From 6fe27036650adc8bc9237d033dd9d18c517892a1 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 29 Jan 2016 13:53:56 -0600 Subject: [PATCH] Vendor all dependencies w/ Godep * Remove `make updatedeps` from Travis build. We'll follow up with more specific plans around dependency updating in subsequent PRs. * Update all `make` targets to set `GO15VENDOREXPERIMENT=1` and to filter out `/vendor/` from `./...` where appropriate. * Temporarily remove `vet` from the `make test` target until we can figure out how to get it to not vet `vendor/`. (Initial experimentation failed to yield the proper incantation.) Everything is pinned to current master, with the exception of: * Azure/azure-sdk-for-go which is pinned before the breaking change today * aws/aws-sdk-go which is pinned to the most recent tag The documentation still needs to be updated, which we can do in a follow up PR. The goal here is to unblock release. --- .gitignore | 3 +- .travis.yml | 6 +- Godeps/Godeps.json | 741 + Godeps/Readme | 5 + Makefile | 26 +- scripts/build.sh | 8 +- scripts/gofmtcheck.sh | 2 +- .../Azure/go-autorest/autorest/autorest.go | 163 + .../Azure/go-autorest/autorest/azure/azure.go | 72 + .../autorest/azure/example/README.md | 115 + .../autorest/azure/example/main.go | 130 + .../Azure/go-autorest/autorest/azure/token.go | 298 + .../Azure/go-autorest/autorest/client.go | 288 + .../Azure/go-autorest/autorest/date/date.go | 87 + .../Azure/go-autorest/autorest/date/time.go | 70 + .../Azure/go-autorest/autorest/error.go | 114 + .../go-autorest/autorest/mocks/helpers.go | 104 + .../Azure/go-autorest/autorest/mocks/mocks.go | 165 + .../Azure/go-autorest/autorest/preparer.go | 311 + .../Azure/go-autorest/autorest/responder.go | 169 + .../Azure/go-autorest/autorest/sender.go | 211 + .../Azure/go-autorest/autorest/to/convert.go | 133 + .../Azure/go-autorest/autorest/utility.go | 83 + .../Azure/go-autorest/autorest/version.go | 18 + .../github.com/dgrijalva/jwt-go/.gitignore | 4 + .../github.com/dgrijalva/jwt-go/.travis.yml | 7 + .../src/github.com/dgrijalva/jwt-go/LICENSE | 8 + .../src/github.com/dgrijalva/jwt-go/README.md | 69 + .../dgrijalva/jwt-go/VERSION_HISTORY.md | 67 + .../dgrijalva/jwt-go/cmd/jwt/app.go | 210 + .../src/github.com/dgrijalva/jwt-go/doc.go | 4 + .../src/github.com/dgrijalva/jwt-go/ecdsa.go | 147 + .../dgrijalva/jwt-go/ecdsa_utils.go | 67 + .../src/github.com/dgrijalva/jwt-go/errors.go | 43 + .../src/github.com/dgrijalva/jwt-go/hmac.go | 94 + .../src/github.com/dgrijalva/jwt-go/parser.go | 113 + .../src/github.com/dgrijalva/jwt-go/rsa.go | 114 + .../github.com/dgrijalva/jwt-go/rsa_pss.go | 126 + .../github.com/dgrijalva/jwt-go/rsa_utils.go | 68 + .../dgrijalva/jwt-go/signing_method.go | 24 + .../dgrijalva/jwt-go/test/ec256-private.pem | 5 + .../dgrijalva/jwt-go/test/ec256-public.pem | 4 + .../dgrijalva/jwt-go/test/ec384-private.pem | 6 + .../dgrijalva/jwt-go/test/ec384-public.pem | 5 + .../dgrijalva/jwt-go/test/ec512-private.pem | 7 + .../dgrijalva/jwt-go/test/ec512-public.pem | 6 + .../dgrijalva/jwt-go/test/hmacTestKey | 1 + .../dgrijalva/jwt-go/test/sample_key | 27 + .../dgrijalva/jwt-go/test/sample_key.pub | 9 + .../src/github.com/dgrijalva/jwt-go/token.go | 126 + .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 + .../src/golang.org/x/crypto/pkcs12/crypto.go | 131 + .../src/golang.org/x/crypto/pkcs12/errors.go | 23 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 274 + .../src/golang.org/x/crypto/pkcs12/mac.go | 45 + .../src/golang.org/x/crypto/pkcs12/pbkdf.go | 170 + .../src/golang.org/x/crypto/pkcs12/pkcs12.go | 342 + .../golang.org/x/crypto/pkcs12/safebags.go | 57 + .../Azure/azure-sdk-for-go/arm/cdn/client.go | 56 + .../azure-sdk-for-go/arm/cdn/customdomains.go | 388 + .../azure-sdk-for-go/arm/cdn/endpoints.go | 711 + .../Azure/azure-sdk-for-go/arm/cdn/models.go | 428 + .../arm/cdn/nameavailability.go | 102 + .../azure-sdk-for-go/arm/cdn/operations.go | 98 + .../Azure/azure-sdk-for-go/arm/cdn/origins.go | 388 + .../azure-sdk-for-go/arm/cdn/profiles.go | 436 + .../Azure/azure-sdk-for-go/arm/cdn/version.go | 43 + .../arm/compute/availabilitysets.go | 410 + .../azure-sdk-for-go/arm/compute/client.go | 52 + .../azure-sdk-for-go/arm/compute/models.go | 1208 + .../arm/compute/usageoperations.go | 128 + .../azure-sdk-for-go/arm/compute/version.go | 43 + .../compute/virtualmachineextensionimages.go | 241 + .../arm/compute/virtualmachineextensions.go | 246 + .../arm/compute/virtualmachineimages.go | 367 + .../arm/compute/virtualmachines.go | 883 + .../arm/compute/virtualmachinescalesets.go | 975 + .../arm/compute/virtualmachinescalesetvms.go | 600 + .../arm/compute/virtualmachinesizes.go | 128 + .../arm/network/applicationgateways.go | 541 + .../azure-sdk-for-go/arm/network/client.go | 126 + .../expressroutecircuitauthorizations.go | 337 + .../network/expressroutecircuitpeerings.go | 335 + .../arm/network/expressroutecircuits.go | 682 + .../network/expressrouteserviceproviders.go | 130 + .../arm/network/interfaces.go | 671 + .../arm/network/loadbalancers.go | 414 + .../arm/network/localnetworkgateways.go | 331 + .../azure-sdk-for-go/arm/network/models.go | 1853 + .../arm/network/publicipaddresses.go | 416 + .../azure-sdk-for-go/arm/network/routes.go | 332 + .../arm/network/routetables.go | 412 + .../arm/network/securitygroups.go | 418 + .../arm/network/securityrules.go | 338 + .../azure-sdk-for-go/arm/network/subnets.go | 336 + .../azure-sdk-for-go/arm/network/usages.go | 130 + .../azure-sdk-for-go/arm/network/version.go | 43 + .../virtualnetworkgatewayconnections.go | 545 + .../arm/network/virtualnetworkgateways.go | 469 + .../arm/network/virtualnetworks.go | 416 + .../arm/resources/resources/client.go | 486 + .../resources/deploymentoperations.go | 201 + .../arm/resources/resources/deployments.go | 522 + .../arm/resources/resources/groups.go | 547 + .../arm/resources/resources/models.go | 460 + .../resources/resources/policyassignments.go | 785 + .../resources/resources/policydefinitions.go | 230 + .../resources/provideroperationdetails.go | 129 + .../arm/resources/resources/providers.go | 316 + .../arm/resources/resources/resources.go | 472 + .../arm/resources/resources/tags.go | 371 + .../arm/resources/resources/version.go | 43 + .../azure-sdk-for-go/arm/scheduler/client.go | 52 + .../arm/scheduler/jobcollections.go | 596 + .../azure-sdk-for-go/arm/scheduler/jobs.go | 566 + .../azure-sdk-for-go/arm/scheduler/models.go | 534 + .../azure-sdk-for-go/arm/scheduler/version.go | 43 + .../azure-sdk-for-go/arm/storage/accounts.go | 648 + .../azure-sdk-for-go/arm/storage/client.go | 52 + .../azure-sdk-for-go/arm/storage/models.go | 217 + .../arm/storage/usageoperations.go | 102 + .../azure-sdk-for-go/arm/storage/version.go | 43 + .../azure-sdk-for-go/core/http/cgi/child.go | 206 + .../core/http/cgi/child_test.go | 131 + .../azure-sdk-for-go/core/http/cgi/host.go | 377 + .../core/http/cgi/host_test.go | 461 + .../core/http/cgi/matryoshka_test.go | 228 + .../core/http/cgi/plan9_test.go | 18 + .../core/http/cgi/posix_test.go | 21 + .../core/http/cgi/testdata/test.cgi | 91 + .../azure-sdk-for-go/core/http/chunked.go | 203 + .../core/http/chunked_test.go | 159 + .../azure-sdk-for-go/core/http/client.go | 487 + .../azure-sdk-for-go/core/http/client_test.go | 1038 + .../azure-sdk-for-go/core/http/cookie.go | 363 + .../azure-sdk-for-go/core/http/cookie_test.go | 380 + .../core/http/cookiejar/jar.go | 497 + .../core/http/cookiejar/jar_test.go | 1267 + .../core/http/cookiejar/punycode.go | 159 + .../core/http/cookiejar/punycode_test.go | 161 + .../Azure/azure-sdk-for-go/core/http/doc.go | 80 + .../core/http/example_test.go | 88 + .../azure-sdk-for-go/core/http/export_test.go | 72 + .../azure-sdk-for-go/core/http/fcgi/child.go | 305 + .../azure-sdk-for-go/core/http/fcgi/fcgi.go | 274 + .../core/http/fcgi/fcgi_test.go | 150 + .../core/http/filetransport.go | 123 + .../core/http/filetransport_test.go | 65 + .../Azure/azure-sdk-for-go/core/http/fs.go | 549 + .../azure-sdk-for-go/core/http/fs_test.go | 858 + .../azure-sdk-for-go/core/http/header.go | 211 + .../azure-sdk-for-go/core/http/header_test.go | 212 + .../core/http/httptest/example_test.go | 50 + .../core/http/httptest/recorder.go | 72 + .../core/http/httptest/recorder_test.go | 90 + .../core/http/httptest/server.go | 228 + .../core/http/httptest/server_test.go | 52 + .../core/http/httputil/chunked.go | 203 + .../core/http/httputil/chunked_test.go | 159 + .../core/http/httputil/dump.go | 276 + .../core/http/httputil/dump_test.go | 263 + .../core/http/httputil/httputil.go | 32 + .../core/http/httputil/persist.go | 429 + .../core/http/httputil/reverseproxy.go | 211 + .../core/http/httputil/reverseproxy_test.go | 213 + .../Azure/azure-sdk-for-go/core/http/jar.go | 27 + .../Azure/azure-sdk-for-go/core/http/lex.go | 96 + .../azure-sdk-for-go/core/http/lex_test.go | 31 + .../azure-sdk-for-go/core/http/npn_test.go | 118 + .../azure-sdk-for-go/core/http/pprof/pprof.go | 205 + .../azure-sdk-for-go/core/http/proxy_test.go | 81 + .../Azure/azure-sdk-for-go/core/http/race.go | 11 + .../azure-sdk-for-go/core/http/range_test.go | 79 + .../core/http/readrequest_test.go | 331 + .../azure-sdk-for-go/core/http/request.go | 875 + .../core/http/request_test.go | 610 + .../core/http/requestwrite_test.go | 565 + .../azure-sdk-for-go/core/http/response.go | 291 + .../core/http/response_test.go | 645 + .../core/http/responsewrite_test.go | 226 + .../azure-sdk-for-go/core/http/serve_test.go | 2848 + .../azure-sdk-for-go/core/http/server.go | 2052 + .../Azure/azure-sdk-for-go/core/http/sniff.go | 214 + .../azure-sdk-for-go/core/http/sniff_test.go | 171 + .../azure-sdk-for-go/core/http/status.go | 120 + .../azure-sdk-for-go/core/http/testdata/file | 1 + .../core/http/testdata/index.html | 1 + .../core/http/testdata/style.css | 1 + .../azure-sdk-for-go/core/http/transfer.go | 730 + .../core/http/transfer_test.go | 64 + .../azure-sdk-for-go/core/http/transport.go | 1208 + .../core/http/transport_test.go | 2173 + .../Azure/azure-sdk-for-go/core/http/triv.go | 141 + .../azure-sdk-for-go/core/http/z_last_test.go | 97 + .../Azure/azure-sdk-for-go/core/tls/alert.go | 77 + .../core/tls/cipher_suites.go | 270 + .../Azure/azure-sdk-for-go/core/tls/common.go | 438 + .../Azure/azure-sdk-for-go/core/tls/conn.go | 1026 + .../azure-sdk-for-go/core/tls/conn_test.go | 106 + .../core/tls/generate_cert.go | 118 + .../core/tls/handshake_client.go | 411 + .../core/tls/handshake_client_test.go | 3050 + .../core/tls/handshake_messages.go | 1304 + .../core/tls/handshake_messages_test.go | 246 + .../core/tls/handshake_server.go | 638 + .../core/tls/handshake_server_test.go | 3796 ++ .../core/tls/key_agreement.go | 400 + .../Azure/azure-sdk-for-go/core/tls/prf.go | 291 + .../azure-sdk-for-go/core/tls/prf_test.go | 126 + .../Azure/azure-sdk-for-go/core/tls/ticket.go | 182 + .../Azure/azure-sdk-for-go/core/tls/tls.go | 225 + .../azure-sdk-for-go/core/tls/tls_test.go | 107 + .../management/affinitygroup/client.go | 131 + .../management/affinitygroup/entities.go | 80 + .../azure-sdk-for-go/management/client.go | 138 + .../azure-sdk-for-go/management/errors.go | 36 + .../management/errors_test.go | 30 + .../management/hostedservice/client.go | 125 + .../management/hostedservice/entities.go | 58 + .../Azure/azure-sdk-for-go/management/http.go | 187 + .../management/location/client.go | 30 + .../management/location/entities.go | 37 + .../management/networksecuritygroup/client.go | 245 + .../networksecuritygroup/entities.go | 115 + .../azure-sdk-for-go/management/operations.go | 92 + .../management/osimage/client.go | 31 + .../management/osimage/entities.go | 47 + .../management/publishSettings.go | 105 + .../azure-sdk-for-go/management/sql/client.go | 316 + .../management/sql/entities.go | 124 + .../management/storageservice/client.go | 108 + .../management/storageservice/entities.go | 79 + .../storageservice/entities_test.go | 31 + .../management/testutils/managementclient.go | 87 + .../Azure/azure-sdk-for-go/management/util.go | 11 + .../management/virtualmachine/client.go | 327 + .../management/virtualmachine/entities.go | 569 + .../virtualmachine/entities_test.go | 299 + .../virtualmachine/resourceextensions.go | 25 + .../virtualmachine/resourceextensions_test.go | 27 + .../management/virtualmachinedisk/client.go | 230 + .../management/virtualmachinedisk/entities.go | 134 + .../management/virtualmachineimage/client.go | 110 + .../virtualmachineimage/entities.go | 95 + .../virtualmachineimage/entities_test.go | 110 + .../management/virtualnetwork/client.go | 47 + .../management/virtualnetwork/entities.go | 90 + .../management/vmutils/configurationset.go | 28 + .../management/vmutils/datadisks.go | 58 + .../management/vmutils/deployment.go | 91 + .../management/vmutils/examples_test.go | 53 + .../management/vmutils/extensions.go | 88 + .../management/vmutils/extensions_test.go | 42 + .../management/vmutils/integration_test.go | 455 + .../management/vmutils/network.go | 83 + .../management/vmutils/rolesize.go | 76 + .../management/vmutils/rolestate.go | 58 + .../management/vmutils/vmutils.go | 148 + .../management/vmutils/vmutils_test.go | 440 + .../Azure/azure-sdk-for-go/storage/blob.go | 959 + .../azure-sdk-for-go/storage/blob_test.go | 715 + .../Azure/azure-sdk-for-go/storage/client.go | 388 + .../azure-sdk-for-go/storage/client_test.go | 156 + .../Azure/azure-sdk-for-go/storage/file.go | 92 + .../azure-sdk-for-go/storage/file_test.go | 63 + .../Azure/azure-sdk-for-go/storage/queue.go | 308 + .../azure-sdk-for-go/storage/queue_test.go | 132 + .../Azure/azure-sdk-for-go/storage/util.go | 71 + .../azure-sdk-for-go/storage/util_test.go | 69 + .../DreamItGetIT/statuscake/LICENSE | 21 + .../DreamItGetIT/statuscake/README.md | 5 + .../DreamItGetIT/statuscake/client.go | 170 + .../DreamItGetIT/statuscake/client_test.go | 236 + .../statuscake/cmd/statuscake/main.go | 229 + .../github.com/DreamItGetIT/statuscake/doc.go | 34 + .../DreamItGetIT/statuscake/errors.go | 80 + .../statuscake/fixtures/auth_error.json | 4 + .../statuscake/fixtures/tests_all_ok.json | 22 + .../fixtures/tests_delete_error.json | 5 + .../statuscake/fixtures/tests_delete_ok.json | 6 + .../statuscake/fixtures/tests_detail_ok.json | 29 + .../fixtures/tests_update_error.json | 9 + .../tests_update_error_slice_of_issues.json | 5 + .../statuscake/fixtures/tests_update_ok.json | 6 + .../DreamItGetIT/statuscake/makefile | 11 + .../DreamItGetIT/statuscake/responses.go | 66 + .../DreamItGetIT/statuscake/tests.go | 298 + .../DreamItGetIT/statuscake/tests_test.go | 327 + .../apparentlymart/go-cidr/cidr/cidr.go | 105 + .../apparentlymart/go-cidr/cidr/cidr_test.go | 238 + .../apparentlymart/go-cidr/cidr/wrangling.go | 38 + .../go-rundeck-api/rundeck/client.go | 302 + .../go-rundeck-api/rundeck/error.go | 21 + .../go-rundeck-api/rundeck/job.go | 328 + .../go-rundeck-api/rundeck/job_test.go | 113 + .../go-rundeck-api/rundeck/key.go | 81 + .../go-rundeck-api/rundeck/project.go | 87 + .../go-rundeck-api/rundeck/system_info.go | 116 + .../go-rundeck-api/rundeck/testing.go | 46 + .../go-rundeck-api/rundeck/util.go | 81 + vendor/github.com/armon/circbuf/.gitignore | 22 + vendor/github.com/armon/circbuf/LICENSE | 20 + vendor/github.com/armon/circbuf/README.md | 28 + vendor/github.com/armon/circbuf/circbuf.go | 92 + .../github.com/armon/circbuf/circbuf_test.go | 198 + vendor/github.com/armon/go-radix/.gitignore | 22 + vendor/github.com/armon/go-radix/.travis.yml | 3 + vendor/github.com/armon/go-radix/LICENSE | 20 + vendor/github.com/armon/go-radix/README.md | 38 + vendor/github.com/armon/go-radix/radix.go | 496 + .../github.com/armon/go-radix/radix_test.go | 319 + .../aws/aws-sdk-go/aws/awserr/error.go | 127 + .../aws/aws-sdk-go/aws/awserr/types.go | 197 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 100 + .../aws/aws-sdk-go/aws/awsutil/copy_test.go | 233 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/equal_test.go | 29 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws-sdk-go/aws/awsutil/path_value_test.go | 142 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 103 + .../aws-sdk-go/aws/awsutil/string_value.go | 89 + .../aws/aws-sdk-go/aws/client/client.go | 120 + .../aws-sdk-go/aws/client/default_retryer.go | 45 + .../aws/client/metadata/client_info.go | 12 + .../github.com/aws/aws-sdk-go/aws/config.go | 311 + .../aws/aws-sdk-go/aws/config_test.go | 86 + .../aws/aws-sdk-go/aws/convert_types.go | 357 + .../aws/aws-sdk-go/aws/convert_types_test.go | 437 + .../aws-sdk-go/aws/corehandlers/handlers.go | 139 + .../aws/corehandlers/handlers_test.go | 113 + .../aws/corehandlers/param_validator.go | 144 + .../aws/corehandlers/param_validator_test.go | 129 + .../aws/credentials/chain_provider.go | 100 + .../aws/credentials/chain_provider_test.go | 154 + .../aws-sdk-go/aws/credentials/credentials.go | 223 + .../aws/credentials/credentials_test.go | 73 + .../ec2rolecreds/ec2_role_provider.go | 178 + .../ec2rolecreds/ec2_role_provider_test.go | 159 + .../aws/credentials/env_provider.go | 77 + .../aws/credentials/env_provider_test.go | 70 + .../aws-sdk-go/aws/credentials/example.ini | 12 + .../shared_credentials_provider.go | 151 + .../shared_credentials_provider_test.go | 116 + .../aws/credentials/static_provider.go | 48 + .../aws/credentials/static_provider_test.go | 34 + .../stscreds/assume_role_provider.go | 134 + .../stscreds/assume_role_provider_test.go | 56 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 96 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 43 + .../aws-sdk-go/aws/ec2metadata/api_test.go | 101 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 117 + .../aws/ec2metadata/service_test.go | 79 + .../github.com/aws/aws-sdk-go/aws/errors.go | 17 + .../github.com/aws/aws-sdk-go/aws/logger.go | 98 + .../aws/aws-sdk-go/aws/request/handlers.go | 140 + .../aws-sdk-go/aws/request/handlers_test.go | 47 + .../aws/aws-sdk-go/aws/request/request.go | 294 + .../aws/request/request_pagination.go | 104 + .../aws/request/request_pagination_test.go | 455 + .../aws-sdk-go/aws/request/request_test.go | 261 + .../aws/aws-sdk-go/aws/request/retryer.go | 82 + .../aws/aws-sdk-go/aws/session/session.go | 120 + .../aws-sdk-go/aws/session/session_test.go | 20 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 88 + .../aws/aws-sdk-go/aws/types_test.go | 56 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../aws-sdk-go/private/endpoints/endpoints.go | 65 + .../private/endpoints/endpoints.json | 92 + .../private/endpoints/endpoints_map.go | 104 + .../private/endpoints/endpoints_test.go | 41 + .../private/protocol/ec2query/build.go | 32 + .../protocol/ec2query/build_bench_test.go | 85 + .../private/protocol/ec2query/build_test.go | 983 + .../private/protocol/ec2query/unmarshal.go | 54 + .../protocol/ec2query/unmarshal_test.go | 1056 + .../private/protocol/json/jsonutil/build.go | 243 + .../protocol/json/jsonutil/build_test.go | 100 + .../protocol/json/jsonutil/unmarshal.go | 213 + .../protocol/jsonrpc/build_bench_test.go | 71 + .../private/protocol/jsonrpc/build_test.go | 1152 + .../private/protocol/jsonrpc/jsonrpc.go | 99 + .../protocol/jsonrpc/unmarshal_test.go | 812 + .../private/protocol/query/build.go | 33 + .../private/protocol/query/build_test.go | 1999 + .../protocol/query/queryutil/queryutil.go | 223 + .../private/protocol/query/unmarshal.go | 29 + .../private/protocol/query/unmarshal_error.go | 37 + .../private/protocol/query/unmarshal_test.go | 1746 + .../aws-sdk-go/private/protocol/rest/build.go | 254 + .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 187 + .../protocol/restjson/build_bench_test.go | 356 + .../private/protocol/restjson/build_test.go | 2371 + .../private/protocol/restjson/restjson.go | 79 + .../protocol/restjson/unmarshal_test.go | 1321 + .../protocol/restxml/build_bench_test.go | 246 + .../private/protocol/restxml/build_test.go | 3338 ++ .../private/protocol/restxml/restxml.go | 57 + .../protocol/restxml/unmarshal_test.go | 1498 + .../private/protocol/xml/xmlutil/build.go | 287 + .../private/protocol/xml/xmlutil/unmarshal.go | 260 + .../protocol/xml/xmlutil/xml_to_struct.go | 105 + .../private/signer/v4/functional_test.go | 77 + .../private/signer/v4/header_rules.go | 82 + .../private/signer/v4/header_rules_test.go | 57 + .../aws/aws-sdk-go/private/signer/v4/v4.go | 438 + .../aws-sdk-go/private/signer/v4/v4_test.go | 255 + .../aws/aws-sdk-go/private/waiter/waiter.go | 134 + .../aws-sdk-go/private/waiter/waiter_test.go | 401 + .../aws/aws-sdk-go/service/autoscaling/api.go | 5108 ++ .../autoscaling/autoscalingiface/interface.go | 226 + .../service/autoscaling/examples_test.go | 1207 + .../aws-sdk-go/service/autoscaling/service.go | 88 + .../aws-sdk-go/service/cloudformation/api.go | 2294 + .../cloudformationiface/interface.go | 98 + .../service/cloudformation/examples_test.go | 465 + .../service/cloudformation/service.go | 103 + .../service/cloudformation/waiters.go | 100 + .../aws/aws-sdk-go/service/cloudtrail/api.go | 1409 + .../cloudtrail/cloudtrailiface/interface.go | 62 + .../service/cloudtrail/examples_test.go | 296 + .../aws-sdk-go/service/cloudtrail/service.go | 106 + .../aws/aws-sdk-go/service/cloudwatch/api.go | 1361 + .../cloudwatch/cloudwatchiface/interface.go | 64 + .../service/cloudwatch/examples_test.go | 337 + .../aws-sdk-go/service/cloudwatch/service.go | 125 + .../aws-sdk-go/service/cloudwatchlogs/api.go | 2471 + .../cloudwatchlogsiface/interface.go | 128 + .../service/cloudwatchlogs/examples_test.go | 566 + .../service/cloudwatchlogs/service.go | 119 + .../aws/aws-sdk-go/service/codecommit/api.go | 880 + .../codecommit/codecommitiface/interface.go | 58 + .../service/codecommit/examples_test.go | 238 + .../aws-sdk-go/service/codecommit/service.go | 94 + .../aws/aws-sdk-go/service/codedeploy/api.go | 3263 + .../codedeploy/codedeployiface/interface.go | 154 + .../service/codedeploy/examples_test.go | 787 + .../aws-sdk-go/service/codedeploy/service.go | 136 + .../service/directoryservice/api.go | 2179 + .../directoryserviceiface/interface.go | 102 + .../service/directoryservice/examples_test.go | 532 + .../service/directoryservice/service.go | 90 + .../aws/aws-sdk-go/service/dynamodb/api.go | 5387 ++ .../service/dynamodb/customizations.go | 98 + .../service/dynamodb/customizations_test.go | 106 + .../dynamodb/dynamodbattribute/converter.go | 463 + .../dynamodbattribute/converter_test.go | 488 + .../dynamodbattribute/examples_test.go | 79 + .../dynamodb/dynamodbiface/interface.go | 74 + .../service/dynamodb/examples_test.go | 1336 + .../aws-sdk-go/service/dynamodb/service.go | 190 + .../aws-sdk-go/service/dynamodb/waiters.go | 59 + .../aws/aws-sdk-go/service/ec2/api.go | 24975 ++++++++ .../aws-sdk-go/service/ec2/customizations.go | 55 + .../service/ec2/customizations_test.go | 35 + .../service/ec2/ec2iface/interface.go | 832 + .../aws-sdk-go/service/ec2/examples_test.go | 5695 ++ .../aws/aws-sdk-go/service/ec2/service.go | 89 + .../aws/aws-sdk-go/service/ec2/waiters.go | 761 + .../aws/aws-sdk-go/service/ecr/api.go | 1433 + .../service/ecr/ecriface/interface.go | 78 + .../aws-sdk-go/service/ecr/examples_test.go | 376 + .../aws/aws-sdk-go/service/ecr/service.go | 93 + .../aws/aws-sdk-go/service/ecs/api.go | 3638 ++ .../service/ecs/ecsiface/interface.go | 134 + .../aws-sdk-go/service/ecs/examples_test.go | 791 + .../aws/aws-sdk-go/service/ecs/service.go | 99 + .../aws/aws-sdk-go/service/ecs/waiters.go | 135 + .../aws/aws-sdk-go/service/efs/api.go | 1083 + .../service/efs/efsiface/interface.go | 58 + .../aws-sdk-go/service/efs/examples_test.go | 254 + .../aws/aws-sdk-go/service/efs/service.go | 85 + .../aws/aws-sdk-go/service/elasticache/api.go | 4891 ++ .../elasticache/elasticacheiface/interface.go | 186 + .../service/elasticache/examples_test.go | 943 + .../aws-sdk-go/service/elasticache/service.go | 96 + .../aws-sdk-go/service/elasticache/waiters.go | 183 + .../service/elasticsearchservice/api.go | 1143 + .../elasticsearchserviceiface/interface.go | 54 + .../elasticsearchservice/examples_test.go | 262 + .../service/elasticsearchservice/service.go | 92 + .../aws/aws-sdk-go/service/elb/api.go | 2784 + .../service/elb/elbiface/interface.go | 128 + .../aws-sdk-go/service/elb/examples_test.go | 722 + .../aws/aws-sdk-go/service/elb/service.go | 98 + .../aws/aws-sdk-go/service/elb/waiters.go | 53 + .../aws/aws-sdk-go/service/firehose/api.go | 1163 + .../service/firehose/examples_test.go | 249 + .../firehose/firehoseiface/interface.go | 42 + .../aws-sdk-go/service/firehose/service.go | 89 + .../aws/aws-sdk-go/service/glacier/api.go | 3548 ++ .../service/glacier/customizations.go | 58 + .../service/glacier/customizations_test.go | 77 + .../service/glacier/examples_test.go | 706 + .../service/glacier/glacieriface/interface.go | 146 + .../aws/aws-sdk-go/service/glacier/service.go | 116 + .../aws-sdk-go/service/glacier/treehash.go | 71 + .../service/glacier/treehash_test.go | 28 + .../aws/aws-sdk-go/service/glacier/waiters.go | 65 + .../aws/aws-sdk-go/service/iam/api.go | 11168 ++++ .../aws-sdk-go/service/iam/examples_test.go | 2366 + .../service/iam/iamiface/interface.go | 510 + .../aws/aws-sdk-go/service/iam/service.go | 133 + .../aws/aws-sdk-go/service/iam/waiters.go | 65 + .../aws/aws-sdk-go/service/kinesis/api.go | 1747 + .../service/kinesis/examples_test.go | 337 + .../service/kinesis/kinesisiface/interface.go | 78 + .../aws/aws-sdk-go/service/kinesis/service.go | 89 + .../aws/aws-sdk-go/service/kinesis/waiters.go | 30 + .../aws/aws-sdk-go/service/lambda/api.go | 2150 + .../service/lambda/examples_test.go | 539 + .../service/lambda/lambdaiface/interface.go | 114 + .../aws/aws-sdk-go/service/lambda/service.go | 92 + .../aws/aws-sdk-go/service/opsworks/api.go | 7712 +++ .../service/opsworks/examples_test.go | 1890 + .../opsworks/opsworksiface/interface.go | 296 + .../aws-sdk-go/service/opsworks/service.go | 124 + .../aws-sdk-go/service/opsworks/waiters.go | 290 + .../aws/aws-sdk-go/service/rds/api.go | 10572 ++++ .../aws-sdk-go/service/rds/examples_test.go | 2340 + .../service/rds/rdsiface/interface.go | 360 + .../aws/aws-sdk-go/service/rds/service.go | 109 + .../aws/aws-sdk-go/service/rds/waiters.go | 119 + .../aws/aws-sdk-go/service/redshift/api.go | 7171 +++ .../service/redshift/examples_test.go | 1497 + .../redshift/redshiftiface/interface.go | 280 + .../aws-sdk-go/service/redshift/service.go | 107 + .../aws-sdk-go/service/redshift/waiters.go | 141 + .../aws/aws-sdk-go/service/route53/api.go | 5646 ++ .../service/route53/customizations.go | 21 + .../service/route53/customizations_test.go | 22 + .../service/route53/examples_test.go | 1080 + .../service/route53/route53iface/interface.go | 212 + .../aws/aws-sdk-go/service/route53/service.go | 86 + .../aws/aws-sdk-go/service/s3/api.go | 6267 ++ .../aws-sdk-go/service/s3/bucket_location.go | 43 + .../service/s3/bucket_location_test.go | 78 + .../aws/aws-sdk-go/service/s3/content_md5.go | 36 + .../aws-sdk-go/service/s3/customizations.go | 37 + .../service/s3/customizations_test.go | 105 + .../aws-sdk-go/service/s3/examples_test.go | 1599 + .../service/s3/host_style_bucket.go | 60 + .../service/s3/host_style_bucket_test.go | 75 + .../service/s3/s3iface/interface.go | 246 + .../aws-sdk-go/service/s3/s3manager/doc.go | 3 + .../service/s3/s3manager/download.go | 354 + .../service/s3/s3manager/download_test.go | 309 + .../s3/s3manager/s3manageriface/interface.go | 23 + .../service/s3/s3manager/shared_test.go | 4 + .../aws-sdk-go/service/s3/s3manager/upload.go | 661 + .../service/s3/s3manager/upload_test.go | 482 + .../aws/aws-sdk-go/service/s3/service.go | 86 + .../aws/aws-sdk-go/service/s3/sse.go | 44 + .../aws/aws-sdk-go/service/s3/sse_test.go | 79 + .../aws-sdk-go/service/s3/statusok_error.go | 36 + .../service/s3/statusok_error_test.go | 130 + .../aws-sdk-go/service/s3/unmarshal_error.go | 57 + .../service/s3/unmarshal_error_test.go | 165 + .../aws/aws-sdk-go/service/s3/waiters.go | 117 + .../aws/aws-sdk-go/service/sns/api.go | 2076 + .../aws-sdk-go/service/sns/examples_test.go | 542 + .../aws/aws-sdk-go/service/sns/service.go | 98 + .../service/sns/snsiface/interface.go | 124 + .../aws/aws-sdk-go/service/sqs/api.go | 1806 + .../aws/aws-sdk-go/service/sqs/api_test.go | 30 + .../aws/aws-sdk-go/service/sqs/checksums.go | 105 + .../aws-sdk-go/service/sqs/checksums_test.go | 207 + .../aws-sdk-go/service/sqs/customizations.go | 9 + .../aws-sdk-go/service/sqs/examples_test.go | 433 + .../aws/aws-sdk-go/service/sqs/service.go | 110 + .../service/sqs/sqsiface/interface.go | 82 + .../github.com/bgentry/speakeasy/.gitignore | 2 + .../bgentry/speakeasy/LICENSE_WINDOWS | 201 + vendor/github.com/bgentry/speakeasy/Readme.md | 30 + .../bgentry/speakeasy/example/main.go | 18 + .../github.com/bgentry/speakeasy/speakeasy.go | 47 + .../bgentry/speakeasy/speakeasy_unix.go | 93 + .../bgentry/speakeasy/speakeasy_windows.go | 43 + .../src/github.com/ugorji/go/codec/0doc.go | 193 + .../src/github.com/ugorji/go/codec/README.md | 148 + .../src/github.com/ugorji/go/codec/binc.go | 918 + .../src/github.com/ugorji/go/codec/cbor.go | 584 + .../github.com/ugorji/go/codec/cbor_test.go | 205 + .../github.com/ugorji/go/codec/codec_test.go | 1185 + .../ugorji/go/codec/codecgen/README.md | 36 + .../ugorji/go/codec/codecgen/gen.go | 273 + .../github.com/ugorji/go/codec/codecgen/z.go | 3 + .../ugorji/go/codec/codecgen_test.go | 24 + .../src/github.com/ugorji/go/codec/decode.go | 2015 + .../src/github.com/ugorji/go/codec/encode.go | 1405 + .../ugorji/go/codec/fast-path.generated.go | 38900 ++++++++++++ .../ugorji/go/codec/fast-path.go.tmpl | 511 + .../ugorji/go/codec/fast-path.not.go | 32 + .../ugorji/go/codec/gen-dec-array.go.tmpl | 101 + .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 + .../ugorji/go/codec/gen-helper.generated.go | 233 + .../ugorji/go/codec/gen-helper.go.tmpl | 364 + .../ugorji/go/codec/gen.generated.go | 172 + .../src/github.com/ugorji/go/codec/gen.go | 1920 + .../src/github.com/ugorji/go/codec/helper.go | 1129 + .../ugorji/go/codec/helper_internal.go | 242 + .../ugorji/go/codec/helper_not_unsafe.go | 20 + .../github.com/ugorji/go/codec/helper_test.go | 242 + .../ugorji/go/codec/helper_unsafe.go | 45 + .../src/github.com/ugorji/go/codec/json.go | 1072 + .../src/github.com/ugorji/go/codec/msgpack.go | 844 + .../src/github.com/ugorji/go/codec/noop.go | 213 + .../github.com/ugorji/go/codec/prebuild.go | 3 + .../github.com/ugorji/go/codec/prebuild.sh | 199 + .../src/github.com/ugorji/go/codec/py_test.go | 30 + .../src/github.com/ugorji/go/codec/rpc.go | 180 + .../src/github.com/ugorji/go/codec/simple.go | 518 + .../ugorji/go/codec/test-cbor-goldens.json | 639 + .../src/github.com/ugorji/go/codec/test.py | 120 + .../src/github.com/ugorji/go/codec/tests.sh | 74 + .../src/github.com/ugorji/go/codec/time.go | 222 + .../github.com/ugorji/go/codec/values_test.go | 203 + .../src/golang.org/x/net/context/context.go | 447 + .../golang.org/x/net/context/context_test.go | 575 + .../x/net/context/ctxhttp/cancelreq.go | 19 + .../x/net/context/ctxhttp/cancelreq_go14.go | 23 + .../x/net/context/ctxhttp/ctxhttp.go | 140 + .../x/net/context/ctxhttp/ctxhttp_test.go | 176 + .../x/net/context/withtimeout_test.go | 26 + .../github.com/coreos/etcd/client/README.md | 110 + .../coreos/etcd/client/auth_role.go | 233 + .../coreos/etcd/client/auth_user.go | 295 + .../coreos/etcd/client/cancelreq.go | 20 + .../coreos/etcd/client/cancelreq_go14.go | 17 + .../github.com/coreos/etcd/client/client.go | 596 + .../coreos/etcd/client/client_test.go | 896 + .../coreos/etcd/client/cluster_error.go | 33 + vendor/github.com/coreos/etcd/client/curl.go | 70 + .../github.com/coreos/etcd/client/discover.go | 21 + vendor/github.com/coreos/etcd/client/doc.go | 71 + .../etcd/client/fake_transport_go14_test.go | 41 + .../coreos/etcd/client/fake_transport_test.go | 42 + .../coreos/etcd/client/keys.generated.go | 1000 + vendor/github.com/coreos/etcd/client/keys.go | 651 + .../coreos/etcd/client/keys_bench_test.go | 87 + .../coreos/etcd/client/keys_test.go | 1407 + .../github.com/coreos/etcd/client/members.go | 304 + .../coreos/etcd/client/members_test.go | 599 + vendor/github.com/coreos/etcd/client/srv.go | 65 + .../github.com/coreos/etcd/client/srv_test.go | 102 + vendor/github.com/coreos/etcd/client/util.go | 23 + .../coreos/etcd/pkg/pathutil/path.go | 31 + .../coreos/etcd/pkg/pathutil/path_test.go | 38 + .../github.com/coreos/etcd/pkg/types/doc.go | 17 + vendor/github.com/coreos/etcd/pkg/types/id.go | 41 + .../coreos/etcd/pkg/types/id_test.go | 95 + .../github.com/coreos/etcd/pkg/types/set.go | 178 + .../coreos/etcd/pkg/types/set_test.go | 186 + .../github.com/coreos/etcd/pkg/types/slice.go | 22 + .../coreos/etcd/pkg/types/slice_test.go | 30 + .../github.com/coreos/etcd/pkg/types/urls.go | 74 + .../coreos/etcd/pkg/types/urls_test.go | 169 + .../coreos/etcd/pkg/types/urlsmap.go | 91 + .../etcd/pkg/types/urlsmap_go15_test.go | 40 + .../coreos/etcd/pkg/types/urlsmap_test.go | 99 + .../cyberdelia/heroku-go/v3/heroku.go | 1734 + .../cyberdelia/heroku-go/v3/schema.json | 5864 ++ .../cyberdelia/heroku-go/v3/transport.go | 152 + .../github.com/digitalocean/godo/.travis.yml | 6 + .../digitalocean/godo/CONTRIBUTING.md | 23 + .../github.com/digitalocean/godo/LICENSE.txt | 55 + vendor/github.com/digitalocean/godo/README.md | 136 + .../github.com/digitalocean/godo/account.go | 53 + .../digitalocean/godo/account_test.go | 59 + vendor/github.com/digitalocean/godo/action.go | 100 + .../digitalocean/godo/action_test.go | 136 + vendor/github.com/digitalocean/godo/doc.go | 2 + .../github.com/digitalocean/godo/domains.go | 323 + .../digitalocean/godo/domains_test.go | 340 + .../digitalocean/godo/droplet_actions.go | 238 + .../digitalocean/godo/droplet_actions_test.go | 666 + .../github.com/digitalocean/godo/droplets.go | 447 + .../digitalocean/godo/droplets_test.go | 428 + vendor/github.com/digitalocean/godo/errors.go | 24 + .../digitalocean/godo/errors_test.go | 11 + .../digitalocean/godo/floating_ips.go | 131 + .../digitalocean/godo/floating_ips_actions.go | 105 + .../godo/floating_ips_actions_test.go | 167 + .../digitalocean/godo/floating_ips_test.go | 149 + vendor/github.com/digitalocean/godo/godo.go | 341 + .../github.com/digitalocean/godo/godo_test.go | 455 + .../digitalocean/godo/image_actions.go | 71 + .../digitalocean/godo/image_actions_test.go | 62 + vendor/github.com/digitalocean/godo/images.go | 194 + .../digitalocean/godo/images_test.go | 262 + vendor/github.com/digitalocean/godo/keys.go | 222 + .../github.com/digitalocean/godo/keys_test.go | 265 + vendor/github.com/digitalocean/godo/links.go | 86 + .../digitalocean/godo/links_test.go | 176 + .../github.com/digitalocean/godo/regions.go | 63 + .../digitalocean/godo/regions_test.go | 91 + vendor/github.com/digitalocean/godo/sizes.go | 63 + .../digitalocean/godo/sizes_test.go | 96 + .../github.com/digitalocean/godo/strings.go | 83 + .../github.com/digitalocean/godo/timestamp.go | 35 + .../digitalocean/godo/timestamp_test.go | 176 + .../digitalocean/godo/util/droplet.go | 47 + .../digitalocean/godo/util/droplet_test.go | 26 + vendor/github.com/dylanmei/iso8601/LICENSE | 21 + vendor/github.com/dylanmei/iso8601/README.md | 9 + .../github.com/dylanmei/iso8601/duration.go | 96 + .../dylanmei/iso8601/duration_test.go | 81 + vendor/github.com/dylanmei/winrmtest/LICENSE | 22 + .../github.com/dylanmei/winrmtest/README.md | 48 + .../github.com/dylanmei/winrmtest/remote.go | 79 + vendor/github.com/dylanmei/winrmtest/wsman.go | 170 + .../dylanmei/winrmtest/wsman_test.go | 220 + .../fsouza/go-dockerclient/.gitignore | 2 + .../fsouza/go-dockerclient/.travis.yml | 22 + .../github.com/fsouza/go-dockerclient/AUTHORS | 125 + .../fsouza/go-dockerclient/DOCKER-LICENSE | 6 + .../github.com/fsouza/go-dockerclient/LICENSE | 22 + .../fsouza/go-dockerclient/Makefile | 60 + .../fsouza/go-dockerclient/README.markdown | 105 + .../github.com/fsouza/go-dockerclient/auth.go | 136 + .../fsouza/go-dockerclient/auth_test.go | 91 + .../fsouza/go-dockerclient/build_test.go | 154 + .../fsouza/go-dockerclient/change.go | 43 + .../fsouza/go-dockerclient/change_test.go | 24 + .../fsouza/go-dockerclient/client.go | 928 + .../fsouza/go-dockerclient/client_test.go | 502 + .../fsouza/go-dockerclient/container.go | 1180 + .../fsouza/go-dockerclient/container_test.go | 2263 + .../github.com/fsouza/go-dockerclient/env.go | 168 + .../fsouza/go-dockerclient/env_test.go | 351 + .../fsouza/go-dockerclient/event.go | 304 + .../fsouza/go-dockerclient/event_test.go | 132 + .../fsouza/go-dockerclient/example_test.go | 168 + .../github.com/fsouza/go-dockerclient/exec.go | 202 + .../fsouza/go-dockerclient/exec_test.go | 262 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 55 + .../github.com/Sirupsen/logrus/LICENSE | 21 + .../github.com/Sirupsen/logrus/README.md | 365 + .../github.com/Sirupsen/logrus/doc.go | 26 + .../github.com/Sirupsen/logrus/entry.go | 264 + .../github.com/Sirupsen/logrus/entry_test.go | 77 + .../github.com/Sirupsen/logrus/exported.go | 193 + .../github.com/Sirupsen/logrus/formatter.go | 48 + .../Sirupsen/logrus/formatter_bench_test.go | 98 + .../github.com/Sirupsen/logrus/hook_test.go | 122 + .../github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/json_formatter.go | 41 + .../Sirupsen/logrus/json_formatter_test.go | 120 + .../github.com/Sirupsen/logrus/logger.go | 212 + .../github.com/Sirupsen/logrus/logrus.go | 98 + .../github.com/Sirupsen/logrus/logrus_test.go | 301 + .../Sirupsen/logrus/terminal_bsd.go | 9 + .../Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 + .../Sirupsen/logrus/terminal_solaris.go | 15 + .../Sirupsen/logrus/terminal_windows.go | 27 + .../Sirupsen/logrus/text_formatter.go | 161 + .../Sirupsen/logrus/text_formatter_test.go | 61 + .../github.com/Sirupsen/logrus/writer.go | 31 + .../github.com/docker/docker/opts/envfile.go | 67 + .../docker/docker/opts/envfile_test.go | 142 + .../github.com/docker/docker/opts/hosts.go | 146 + .../docker/docker/opts/hosts_test.go | 164 + .../docker/docker/opts/hosts_unix.go | 8 + .../docker/docker/opts/hosts_windows.go | 6 + .../github.com/docker/docker/opts/ip.go | 42 + .../github.com/docker/docker/opts/ip_test.go | 54 + .../github.com/docker/docker/opts/opts.go | 252 + .../docker/docker/opts/opts_test.go | 301 + .../docker/docker/opts/opts_unix.go | 6 + .../docker/docker/opts/opts_windows.go | 56 + .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1049 + .../docker/docker/pkg/archive/archive_test.go | 1248 + .../docker/docker/pkg/archive/archive_unix.go | 112 + .../docker/pkg/archive/archive_unix_test.go | 60 + .../docker/pkg/archive/archive_windows.go | 70 + .../pkg/archive/archive_windows_test.go | 87 + .../docker/docker/pkg/archive/changes.go | 416 + .../docker/pkg/archive/changes_linux.go | 285 + .../docker/pkg/archive/changes_other.go | 97 + .../docker/pkg/archive/changes_posix_test.go | 127 + .../docker/docker/pkg/archive/changes_test.go | 527 + .../docker/docker/pkg/archive/changes_unix.go | 36 + .../docker/pkg/archive/changes_windows.go | 30 + .../docker/docker/pkg/archive/copy.go | 458 + .../docker/docker/pkg/archive/copy_test.go | 974 + .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 279 + .../docker/docker/pkg/archive/diff_test.go | 370 + .../docker/pkg/archive/example_changes.go | 97 + .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/utils_test.go | 166 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/archive/wrap_test.go | 98 + .../docker/docker/pkg/fileutils/fileutils.go | 279 + .../docker/pkg/fileutils/fileutils_test.go | 573 + .../docker/pkg/fileutils/fileutils_unix.go | 22 + .../docker/pkg/fileutils/fileutils_windows.go | 7 + .../docker/docker/pkg/homedir/homedir.go | 39 + .../docker/docker/pkg/homedir/homedir_test.go | 24 + .../docker/docker/pkg/idtools/idtools.go | 195 + .../docker/docker/pkg/idtools/idtools_unix.go | 60 + .../docker/pkg/idtools/idtools_unix_test.go | 243 + .../docker/pkg/idtools/idtools_windows.go | 18 + .../docker/pkg/idtools/usergroupadd_linux.go | 155 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../docker/docker/pkg/ioutils/bytespipe.go | 152 + .../docker/pkg/ioutils/bytespipe_test.go | 158 + .../docker/docker/pkg/ioutils/fmt.go | 22 + .../docker/docker/pkg/ioutils/fmt_test.go | 17 + .../docker/docker/pkg/ioutils/multireader.go | 226 + .../docker/pkg/ioutils/multireader_test.go | 149 + .../docker/docker/pkg/ioutils/readers.go | 154 + .../docker/docker/pkg/ioutils/readers_test.go | 94 + .../docker/docker/pkg/ioutils/scheduler.go | 6 + .../docker/pkg/ioutils/scheduler_gccgo.go | 13 + .../docker/docker/pkg/ioutils/temp_unix.go | 10 + .../docker/docker/pkg/ioutils/temp_windows.go | 18 + .../docker/docker/pkg/ioutils/writeflusher.go | 92 + .../docker/docker/pkg/ioutils/writers.go | 66 + .../docker/docker/pkg/ioutils/writers_test.go | 65 + .../docker/docker/pkg/longpath/longpath.go | 26 + .../docker/pkg/longpath/longpath_test.go | 22 + .../docker/docker/pkg/pools/pools.go | 119 + .../docker/docker/pkg/pools/pools_test.go | 162 + .../docker/docker/pkg/promise/promise.go | 11 + .../docker/docker/pkg/stdcopy/stdcopy.go | 175 + .../docker/docker/pkg/stdcopy/stdcopy_test.go | 261 + .../docker/docker/pkg/system/chtimes.go | 47 + .../docker/docker/pkg/system/chtimes_test.go | 94 + .../docker/pkg/system/chtimes_unix_test.go | 91 + .../docker/pkg/system/chtimes_windows_test.go | 86 + .../docker/docker/pkg/system/errors.go | 10 + .../docker/pkg/system/events_windows.go | 83 + .../docker/docker/pkg/system/filesys.go | 19 + .../docker/pkg/system/filesys_windows.go | 82 + .../docker/docker/pkg/system/lstat.go | 19 + .../docker/pkg/system/lstat_unix_test.go | 30 + .../docker/docker/pkg/system/lstat_windows.go | 25 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 66 + .../docker/pkg/system/meminfo_unix_test.go | 40 + .../docker/pkg/system/meminfo_unsupported.go | 8 + .../docker/pkg/system/meminfo_windows.go | 44 + .../docker/docker/pkg/system/mknod.go | 22 + .../docker/docker/pkg/system/mknod_windows.go | 13 + .../docker/docker/pkg/system/path_unix.go | 8 + .../docker/docker/pkg/system/path_windows.go | 7 + .../docker/docker/pkg/system/stat.go | 53 + .../docker/docker/pkg/system/stat_freebsd.go | 27 + .../docker/docker/pkg/system/stat_linux.go | 33 + .../docker/docker/pkg/system/stat_solaris.go | 17 + .../docker/pkg/system/stat_unix_test.go | 39 + .../docker/pkg/system/stat_unsupported.go | 17 + .../docker/docker/pkg/system/stat_windows.go | 43 + .../docker/docker/pkg/system/syscall_unix.go | 11 + .../docker/pkg/system/syscall_windows.go | 36 + .../docker/docker/pkg/system/umask.go | 13 + .../docker/docker/pkg/system/umask_windows.go | 9 + .../docker/docker/pkg/system/utimes_darwin.go | 8 + .../docker/pkg/system/utimes_freebsd.go | 22 + .../docker/docker/pkg/system/utimes_linux.go | 26 + .../docker/pkg/system/utimes_unix_test.go | 68 + .../docker/pkg/system/utimes_unsupported.go | 10 + .../docker/docker/pkg/system/xattrs_linux.go | 63 + .../docker/pkg/system/xattrs_unsupported.go | 13 + .../docker/go-units/CONTRIBUTING.md | 67 + .../github.com/docker/go-units/LICENSE.code | 191 + .../github.com/docker/go-units/LICENSE.docs | 425 + .../github.com/docker/go-units/MAINTAINERS | 27 + .../github.com/docker/go-units/README.md | 18 + .../github.com/docker/go-units/circle.yml | 11 + .../github.com/docker/go-units/duration.go | 33 + .../docker/go-units/duration_test.go | 81 + .../github.com/docker/go-units/size.go | 95 + .../github.com/docker/go-units/size_test.go | 160 + .../github.com/docker/go-units/ulimit.go | 118 + .../github.com/docker/go-units/ulimit_test.go | 74 + .../github.com/gorilla/context/LICENSE | 27 + .../github.com/gorilla/context/README.md | 7 + .../github.com/gorilla/context/context.go | 143 + .../gorilla/context/context_test.go | 161 + .../github.com/gorilla/context/doc.go | 82 + .../external/github.com/gorilla/mux/LICENSE | 27 + .../external/github.com/gorilla/mux/README.md | 240 + .../github.com/gorilla/mux/bench_test.go | 21 + .../external/github.com/gorilla/mux/doc.go | 206 + .../external/github.com/gorilla/mux/mux.go | 481 + .../github.com/gorilla/mux/mux_test.go | 1358 + .../github.com/gorilla/mux/old_test.go | 714 + .../external/github.com/gorilla/mux/regexp.go | 317 + .../external/github.com/gorilla/mux/route.go | 595 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 40 + .../runc/libcontainer/user/MAINTAINERS | 2 + .../runc/libcontainer/user/lookup.go | 108 + .../runc/libcontainer/user/lookup_unix.go | 30 + .../libcontainer/user/lookup_unsupported.go | 21 + .../runc/libcontainer/user/user.go | 418 + .../runc/libcontainer/user/user_test.go | 472 + .../golang.org/x/net/context/context.go | 447 + .../golang.org/x/net/context/context_test.go | 575 + .../x/net/context/withtimeout_test.go | 26 + .../external/golang.org/x/sys/unix/asm.s | 10 + .../golang.org/x/sys/unix/asm_darwin_386.s | 29 + .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + .../golang.org/x/sys/unix/asm_darwin_arm.s | 30 + .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + .../golang.org/x/sys/unix/asm_dragonfly_386.s | 29 + .../x/sys/unix/asm_dragonfly_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_linux_386.s | 35 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 29 + .../golang.org/x/sys/unix/asm_linux_arm.s | 29 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 24 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 28 + .../golang.org/x/sys/unix/asm_netbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + .../golang.org/x/sys/unix/constants.go | 13 + .../golang.org/x/sys/unix/creds_test.go | 121 + .../golang.org/x/sys/unix/env_unix.go | 27 + .../golang.org/x/sys/unix/env_unset.go | 14 + .../golang.org/x/sys/unix/export_test.go | 9 + .../external/golang.org/x/sys/unix/flock.go | 24 + .../x/sys/unix/flock_linux_32bit.go | 13 + .../external/golang.org/x/sys/unix/gccgo.go | 46 + .../external/golang.org/x/sys/unix/gccgo_c.c | 41 + .../x/sys/unix/gccgo_linux_amd64.go | 20 + .../external/golang.org/x/sys/unix/mkall.sh | 274 + .../golang.org/x/sys/unix/mkerrors.sh | 476 + .../golang.org/x/sys/unix/mksyscall.pl | 323 + .../x/sys/unix/mksyscall_solaris.pl | 294 + .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 264 + .../golang.org/x/sys/unix/mksysnum_darwin.pl | 39 + .../x/sys/unix/mksysnum_dragonfly.pl | 50 + .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 63 + .../golang.org/x/sys/unix/mksysnum_linux.pl | 58 + .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 58 + .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 50 + .../golang.org/x/sys/unix/mmap_unix_test.go | 23 + .../external/golang.org/x/sys/unix/race.go | 30 + .../external/golang.org/x/sys/unix/race0.go | 25 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 + .../golang.org/x/sys/unix/sockcmsg_unix.go | 103 + .../external/golang.org/x/sys/unix/str.go | 26 + .../external/golang.org/x/sys/unix/syscall.go | 74 + .../golang.org/x/sys/unix/syscall_bsd.go | 628 + .../golang.org/x/sys/unix/syscall_bsd_test.go | 42 + .../golang.org/x/sys/unix/syscall_darwin.go | 509 + .../x/sys/unix/syscall_darwin_386.go | 79 + .../x/sys/unix/syscall_darwin_amd64.go | 81 + .../x/sys/unix/syscall_darwin_arm.go | 73 + .../x/sys/unix/syscall_darwin_arm64.go | 79 + .../x/sys/unix/syscall_dragonfly.go | 411 + .../x/sys/unix/syscall_dragonfly_386.go | 63 + .../x/sys/unix/syscall_dragonfly_amd64.go | 63 + .../golang.org/x/sys/unix/syscall_freebsd.go | 682 + .../x/sys/unix/syscall_freebsd_386.go | 63 + .../x/sys/unix/syscall_freebsd_amd64.go | 63 + .../x/sys/unix/syscall_freebsd_arm.go | 63 + .../x/sys/unix/syscall_freebsd_test.go | 20 + .../golang.org/x/sys/unix/syscall_linux.go | 1086 + .../x/sys/unix/syscall_linux_386.go | 388 + .../x/sys/unix/syscall_linux_amd64.go | 146 + .../x/sys/unix/syscall_linux_arm.go | 233 + .../x/sys/unix/syscall_linux_arm64.go | 150 + .../x/sys/unix/syscall_linux_ppc64x.go | 96 + .../golang.org/x/sys/unix/syscall_netbsd.go | 492 + .../x/sys/unix/syscall_netbsd_386.go | 44 + .../x/sys/unix/syscall_netbsd_amd64.go | 44 + .../x/sys/unix/syscall_netbsd_arm.go | 44 + .../golang.org/x/sys/unix/syscall_no_getwd.go | 11 + .../golang.org/x/sys/unix/syscall_openbsd.go | 303 + .../x/sys/unix/syscall_openbsd_386.go | 44 + .../x/sys/unix/syscall_openbsd_amd64.go | 44 + .../golang.org/x/sys/unix/syscall_solaris.go | 713 + .../x/sys/unix/syscall_solaris_amd64.go | 37 + .../golang.org/x/sys/unix/syscall_test.go | 50 + .../golang.org/x/sys/unix/syscall_unix.go | 297 + .../x/sys/unix/syscall_unix_test.go | 318 + .../golang.org/x/sys/unix/types_darwin.go | 250 + .../golang.org/x/sys/unix/types_dragonfly.go | 242 + .../golang.org/x/sys/unix/types_freebsd.go | 353 + .../golang.org/x/sys/unix/types_linux.go | 406 + .../golang.org/x/sys/unix/types_netbsd.go | 232 + .../golang.org/x/sys/unix/types_openbsd.go | 244 + .../golang.org/x/sys/unix/types_solaris.go | 260 + .../x/sys/unix/zerrors_darwin_386.go | 1576 + .../x/sys/unix/zerrors_darwin_amd64.go | 1576 + .../x/sys/unix/zerrors_darwin_arm.go | 1293 + .../x/sys/unix/zerrors_darwin_arm64.go | 1576 + .../x/sys/unix/zerrors_dragonfly_386.go | 1530 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1530 + .../x/sys/unix/zerrors_freebsd_386.go | 1743 + .../x/sys/unix/zerrors_freebsd_amd64.go | 1748 + .../x/sys/unix/zerrors_freebsd_arm.go | 1729 + .../x/sys/unix/zerrors_linux_386.go | 1817 + .../x/sys/unix/zerrors_linux_amd64.go | 1818 + .../x/sys/unix/zerrors_linux_arm.go | 1742 + .../x/sys/unix/zerrors_linux_arm64.go | 1896 + .../x/sys/unix/zerrors_linux_ppc64.go | 1969 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1968 + .../x/sys/unix/zerrors_netbsd_386.go | 1712 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1702 + .../x/sys/unix/zerrors_netbsd_arm.go | 1688 + .../x/sys/unix/zerrors_openbsd_386.go | 1584 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1583 + .../x/sys/unix/zerrors_solaris_amd64.go | 1436 + .../x/sys/unix/zsyscall_darwin_386.go | 1426 + .../x/sys/unix/zsyscall_darwin_amd64.go | 1442 + .../x/sys/unix/zsyscall_darwin_arm.go | 1426 + .../x/sys/unix/zsyscall_darwin_arm64.go | 1426 + .../x/sys/unix/zsyscall_dragonfly_386.go | 1412 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1412 + .../x/sys/unix/zsyscall_freebsd_386.go | 1664 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 1664 + .../x/sys/unix/zsyscall_freebsd_arm.go | 1664 + .../x/sys/unix/zsyscall_linux_386.go | 1628 + .../x/sys/unix/zsyscall_linux_amd64.go | 1822 + .../x/sys/unix/zsyscall_linux_arm.go | 1756 + .../x/sys/unix/zsyscall_linux_arm64.go | 1750 + .../x/sys/unix/zsyscall_linux_ppc64.go | 1792 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 1792 + .../x/sys/unix/zsyscall_netbsd_386.go | 1326 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 1326 + .../x/sys/unix/zsyscall_netbsd_arm.go | 1326 + .../x/sys/unix/zsyscall_openbsd_386.go | 1386 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1386 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1559 + .../golang.org/x/sys/unix/zsysctl_openbsd.go | 270 + .../x/sys/unix/zsysnum_darwin_386.go | 398 + .../x/sys/unix/zsysnum_darwin_amd64.go | 398 + .../x/sys/unix/zsysnum_darwin_arm.go | 358 + .../x/sys/unix/zsysnum_darwin_arm64.go | 398 + .../x/sys/unix/zsysnum_dragonfly_386.go | 304 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 304 + .../x/sys/unix/zsysnum_freebsd_386.go | 351 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 351 + .../x/sys/unix/zsysnum_freebsd_arm.go | 351 + .../x/sys/unix/zsysnum_linux_386.go | 355 + .../x/sys/unix/zsysnum_linux_amd64.go | 321 + .../x/sys/unix/zsysnum_linux_arm.go | 356 + .../x/sys/unix/zsysnum_linux_arm64.go | 272 + .../x/sys/unix/zsysnum_linux_ppc64.go | 360 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 353 + .../x/sys/unix/zsysnum_netbsd_386.go | 273 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 273 + .../x/sys/unix/zsysnum_netbsd_arm.go | 273 + .../x/sys/unix/zsysnum_openbsd_386.go | 207 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 207 + .../x/sys/unix/zsysnum_solaris_amd64.go | 13 + .../x/sys/unix/ztypes_darwin_386.go | 447 + .../x/sys/unix/ztypes_darwin_amd64.go | 462 + .../x/sys/unix/ztypes_darwin_arm.go | 449 + .../x/sys/unix/ztypes_darwin_arm64.go | 457 + .../x/sys/unix/ztypes_dragonfly_386.go | 437 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 443 + .../x/sys/unix/ztypes_freebsd_386.go | 502 + .../x/sys/unix/ztypes_freebsd_amd64.go | 505 + .../x/sys/unix/ztypes_freebsd_arm.go | 497 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 590 + .../x/sys/unix/ztypes_linux_amd64.go | 608 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 579 + .../x/sys/unix/ztypes_linux_arm64.go | 595 + .../x/sys/unix/ztypes_linux_ppc64.go | 605 + .../x/sys/unix/ztypes_linux_ppc64le.go | 605 + .../x/sys/unix/ztypes_netbsd_386.go | 396 + .../x/sys/unix/ztypes_netbsd_amd64.go | 403 + .../x/sys/unix/ztypes_netbsd_arm.go | 401 + .../x/sys/unix/ztypes_openbsd_386.go | 441 + .../x/sys/unix/ztypes_openbsd_amd64.go | 448 + .../x/sys/unix/ztypes_solaris_amd64.go | 422 + .../fsouza/go-dockerclient/image.go | 599 + .../fsouza/go-dockerclient/image_test.go | 1018 + .../go-dockerclient/integration_test.go | 94 + .../github.com/fsouza/go-dockerclient/misc.go | 57 + .../fsouza/go-dockerclient/misc_test.go | 159 + .../fsouza/go-dockerclient/network.go | 215 + .../fsouza/go-dockerclient/network_test.go | 173 + .../fsouza/go-dockerclient/signal.go | 49 + .../github.com/fsouza/go-dockerclient/tar.go | 117 + .../testing/data/.dockerignore | 3 + .../go-dockerclient/testing/data/Dockerfile | 15 + .../go-dockerclient/testing/data/barfile | 0 .../go-dockerclient/testing/data/ca.pem | 18 + .../go-dockerclient/testing/data/cert.pem | 18 + .../testing/data/container.tar | Bin 0 -> 2048 bytes .../testing/data/dockerfile.tar | Bin 0 -> 2560 bytes .../go-dockerclient/testing/data/foofile | 0 .../go-dockerclient/testing/data/key.pem | 27 + .../go-dockerclient/testing/data/server.pem | 18 + .../testing/data/serverkey.pem | 27 + .../fsouza/go-dockerclient/testing/server.go | 1246 + .../go-dockerclient/testing/server_test.go | 2103 + .../github.com/fsouza/go-dockerclient/tls.go | 96 + .../fsouza/go-dockerclient/volume.go | 127 + .../fsouza/go-dockerclient/volume_test.go | 142 + vendor/github.com/go-chef/chef/.gitignore | 25 + vendor/github.com/go-chef/chef/LICENSE | 201 + vendor/github.com/go-chef/chef/README.md | 95 + vendor/github.com/go-chef/chef/acl.go | 52 + vendor/github.com/go-chef/chef/acl_test.go | 102 + .../github.com/go-chef/chef/authentication.go | 122 + vendor/github.com/go-chef/chef/build.sh | 26 + vendor/github.com/go-chef/chef/client.go | 88 + vendor/github.com/go-chef/chef/client_test.go | 102 + vendor/github.com/go-chef/chef/cookbook.go | 144 + .../github.com/go-chef/chef/cookbook_test.go | 139 + vendor/github.com/go-chef/chef/databag.go | 108 + .../github.com/go-chef/chef/databag_test.go | 164 + vendor/github.com/go-chef/chef/debug.go | 9 + vendor/github.com/go-chef/chef/doc.go | 96 + vendor/github.com/go-chef/chef/environment.go | 114 + .../go-chef/chef/environment_test.go | 158 + .../chef/examples/cookbooks/cookbooks.go | 59 + .../go-chef/chef/examples/cookbooks/key.pem | 27 + .../go-chef/chef/examples/nodes/key.pem | 27 + .../go-chef/chef/examples/nodes/nodes.go | 86 + .../go-chef/chef/examples/sandboxes/key.pem | 27 + .../chef/examples/sandboxes/sandboxes.go | 103 + .../go-chef/chef/examples/search/key.pem | 27 + .../go-chef/chef/examples/search/search.go | 89 + vendor/github.com/go-chef/chef/http.go | 306 + vendor/github.com/go-chef/chef/http_test.go | 571 + vendor/github.com/go-chef/chef/node.go | 87 + vendor/github.com/go-chef/chef/node_test.go | 122 + vendor/github.com/go-chef/chef/reader.go | 15 + vendor/github.com/go-chef/chef/reader_test.go | 46 + vendor/github.com/go-chef/chef/release.go | 5 + vendor/github.com/go-chef/chef/role.go | 101 + vendor/github.com/go-chef/chef/role_test.go | 192 + vendor/github.com/go-chef/chef/run_list.go | 3 + .../github.com/go-chef/chef/run_list_test.go | 28 + vendor/github.com/go-chef/chef/sandbox.go | 70 + .../github.com/go-chef/chef/sandbox_test.go | 96 + vendor/github.com/go-chef/chef/search.go | 148 + vendor/github.com/go-chef/chef/search_test.go | 89 + .../github.com/go-chef/chef/test/client.json | 7 + .../go-chef/chef/test/cookbooks_response.json | 20 + .../go-chef/chef/test/environment.json | 28 + vendor/github.com/go-chef/chef/test/node.json | 26 + vendor/github.com/go-chef/chef/test/role.json | 23 + vendor/github.com/go-chef/chef/wercker.yml | 56 + vendor/github.com/go-ini/ini/.gitignore | 4 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/README.md | 590 + vendor/github.com/go-ini/ini/README_ZH.md | 577 + vendor/github.com/go-ini/ini/ini.go | 1027 + vendor/github.com/go-ini/ini/ini_test.go | 574 + vendor/github.com/go-ini/ini/parser.go | 312 + vendor/github.com/go-ini/ini/struct.go | 351 + vendor/github.com/go-ini/ini/struct_test.go | 239 + .../github.com/go-ini/ini/testdata/conf.ini | 2 + .../google/go-querystring/query/encode.go | 311 + .../go-querystring/query/encode_test.go | 301 + .../hashicorp/atlas-go/archive/archive.go | 500 + .../atlas-go/archive/archive_test.go | 554 + .../test-fixtures/archive-dir-mode/file.txt | 1 + .../archive-file-compressed/file.tar.gz | Bin 0 -> 144 bytes .../test-fixtures/archive-file/foo.txt | 1 + .../test-fixtures/archive-flat/baz.txt | 1 + .../test-fixtures/archive-flat/foo.txt | 1 + .../archive-git/DOTgit/COMMIT_EDITMSG | 12 + .../test-fixtures/archive-git/DOTgit/HEAD | 1 + .../test-fixtures/archive-git/DOTgit/config | 13 + .../archive-git/DOTgit/description | 1 + .../DOTgit/hooks/applypatch-msg.sample | 15 + .../DOTgit/hooks/commit-msg.sample | 24 + .../DOTgit/hooks/post-update.sample | 8 + .../DOTgit/hooks/pre-applypatch.sample | 14 + .../DOTgit/hooks/pre-commit.sample | 49 + .../archive-git/DOTgit/hooks/pre-push.sample | 54 + .../DOTgit/hooks/pre-rebase.sample | 169 + .../DOTgit/hooks/prepare-commit-msg.sample | 36 + .../archive-git/DOTgit/hooks/update.sample | 128 + .../test-fixtures/archive-git/DOTgit/index | Bin 0 -> 256 bytes .../archive-git/DOTgit/info/exclude | 6 + .../archive-git/DOTgit/logs/HEAD | 1 + .../archive-git/DOTgit/logs/refs/heads/master | 1 + .../25/7cc5642cb1a054f08cc83f2d943e56fd3ebe99 | Bin 0 -> 19 bytes .../57/16ca5987cbf97d6bb54920bea6adde242d87e6 | Bin 0 -> 19 bytes .../75/25d17cbbb56f3253a20903ffddc07c6c935c76 | 4 + .../7e/49ea5550b356e32b63c044201f5f7da1e0925f | Bin 0 -> 54 bytes .../7f/7402c7d2a6e71ca3db3e236099771b160b8ad1 | Bin 0 -> 113 bytes .../archive-git/DOTgit/refs/heads/master | 1 + .../archive/test-fixtures/archive-git/bar.txt | 1 + .../archive/test-fixtures/archive-git/foo.txt | 1 + .../archive-git/subdir/hello.txt | 1 + .../test-fixtures/archive-git/untracked.txt | 1 + .../archive/test-fixtures/archive-hg/bar.txt | 1 + .../archive/test-fixtures/archive-hg/foo.txt | 1 + .../test-fixtures/archive-hg/subdir/hello.txt | 1 + .../archive-subdir-splat/bar.txt | 1 + .../build/darwin-amd64/build.txt | 1 + .../build/linux-amd64/build.txt | 1 + .../test-fixtures/archive-subdir/bar.txt | 1 + .../test-fixtures/archive-subdir/foo.txt | 1 + .../archive-subdir/subdir/hello.txt | 1 + .../test-fixtures/archive-symlink/link/link | 1 + .../archive-symlink/real/foo.txt | 0 .../hashicorp/atlas-go/archive/vcs.go | 365 + .../hashicorp/atlas-go/archive/vcs_test.go | 260 + .../hashicorp/atlas-go/v1/application.go | 164 + .../hashicorp/atlas-go/v1/application_test.go | 118 + .../hashicorp/atlas-go/v1/artifact.go | 248 + .../hashicorp/atlas-go/v1/artifact_test.go | 167 + .../hashicorp/atlas-go/v1/atlas_test.go | 453 + .../hashicorp/atlas-go/v1/authentication.go | 88 + .../atlas-go/v1/authentication_test.go | 30 + .../hashicorp/atlas-go/v1/build_config.go | 183 + .../atlas-go/v1/build_config_test.go | 95 + .../hashicorp/atlas-go/v1/client.go | 301 + .../hashicorp/atlas-go/v1/client_test.go | 268 + .../hashicorp/atlas-go/v1/terraform.go | 97 + .../hashicorp/atlas-go/v1/terraform_test.go | 57 + .../github.com/hashicorp/atlas-go/v1/util.go | 22 + .../hashicorp/atlas-go/v1/util_test.go | 57 + .../github.com/hashicorp/consul/api/README.md | 43 + vendor/github.com/hashicorp/consul/api/acl.go | 140 + .../hashicorp/consul/api/acl_test.go | 128 + .../github.com/hashicorp/consul/api/agent.go | 337 + .../hashicorp/consul/api/agent_test.go | 568 + vendor/github.com/hashicorp/consul/api/api.go | 475 + .../hashicorp/consul/api/api_test.go | 288 + .../hashicorp/consul/api/catalog.go | 182 + .../hashicorp/consul/api/catalog_test.go | 279 + .../hashicorp/consul/api/coordinate.go | 66 + .../hashicorp/consul/api/coordinate_test.go | 54 + .../github.com/hashicorp/consul/api/event.go | 104 + .../hashicorp/consul/api/event_test.go | 49 + .../github.com/hashicorp/consul/api/health.go | 136 + .../hashicorp/consul/api/health_test.go | 125 + vendor/github.com/hashicorp/consul/api/kv.go | 240 + .../hashicorp/consul/api/kv_test.go | 447 + .../github.com/hashicorp/consul/api/lock.go | 380 + .../hashicorp/consul/api/lock_test.go | 560 + .../hashicorp/consul/api/prepared_query.go | 173 + .../consul/api/prepared_query_test.go | 123 + vendor/github.com/hashicorp/consul/api/raw.go | 24 + .../hashicorp/consul/api/semaphore.go | 512 + .../hashicorp/consul/api/semaphore_test.go | 518 + .../hashicorp/consul/api/session.go | 217 + .../hashicorp/consul/api/session_test.go | 314 + .../github.com/hashicorp/consul/api/status.go | 43 + .../hashicorp/consul/api/status_test.go | 37 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + .../hashicorp/errwrap/errwrap_test.go | 94 + .../hashicorp/go-checkpoint/LICENSE | 354 + .../hashicorp/go-checkpoint/README.md | 22 + .../hashicorp/go-checkpoint/checkpoint.go | 359 + .../go-checkpoint/checkpoint_test.go | 201 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 40 + .../hashicorp/go-getter/.travis.yml | 10 + vendor/github.com/hashicorp/go-getter/LICENSE | 354 + .../github.com/hashicorp/go-getter/README.md | 151 + .../hashicorp/go-getter/Vagrantfile | 18 + .../github.com/hashicorp/go-getter/client.go | 304 + .../hashicorp/go-getter/copy_dir.go | 78 + .../hashicorp/go-getter/decompress.go | 29 + .../hashicorp/go-getter/decompress_bzip2.go | 45 + .../go-getter/decompress_bzip2_test.go | 32 + .../hashicorp/go-getter/decompress_gzip.go | 49 + .../go-getter/decompress_gzip_test.go | 32 + .../hashicorp/go-getter/decompress_tbz2.go | 95 + .../go-getter/decompress_tbz2_test.go | 56 + .../hashicorp/go-getter/decompress_testing.go | 121 + .../hashicorp/go-getter/decompress_tgz.go | 99 + .../go-getter/decompress_tgz_test.go | 56 + .../hashicorp/go-getter/decompress_zip.go | 87 + .../go-getter/decompress_zip_test.go | 56 + .../github.com/hashicorp/go-getter/detect.go | 97 + .../hashicorp/go-getter/detect_bitbucket.go | 66 + .../go-getter/detect_bitbucket_test.go | 67 + .../hashicorp/go-getter/detect_file.go | 60 + .../hashicorp/go-getter/detect_file_test.go | 88 + .../hashicorp/go-getter/detect_github.go | 73 + .../hashicorp/go-getter/detect_github_test.go | 55 + .../hashicorp/go-getter/detect_s3.go | 61 + .../hashicorp/go-getter/detect_s3_test.go | 84 + .../hashicorp/go-getter/detect_test.go | 51 + .../hashicorp/go-getter/folder_storage.go | 65 + .../go-getter/folder_storage_test.go | 48 + vendor/github.com/hashicorp/go-getter/get.go | 120 + .../hashicorp/go-getter/get_file.go | 98 + .../hashicorp/go-getter/get_file_test.go | 150 + .../github.com/hashicorp/go-getter/get_git.go | 120 + .../hashicorp/go-getter/get_git_test.go | 231 + .../github.com/hashicorp/go-getter/get_hg.go | 122 + .../hashicorp/go-getter/get_hg_test.go | 98 + .../hashicorp/go-getter/get_http.go | 201 + .../hashicorp/go-getter/get_http_test.go | 184 + .../hashicorp/go-getter/get_mock.go | 45 + .../github.com/hashicorp/go-getter/get_s3.go | 194 + .../hashicorp/go-getter/get_s3_test.go | 108 + .../hashicorp/go-getter/get_test.go | 245 + .../hashicorp/go-getter/helper/url/url.go | 14 + .../go-getter/helper/url/url_test.go | 88 + .../go-getter/helper/url/url_unix.go | 11 + .../go-getter/helper/url/url_windows.go | 40 + .../hashicorp/go-getter/module_test.go | 83 + .../github.com/hashicorp/go-getter/source.go | 36 + .../hashicorp/go-getter/source_test.go | 39 + .../github.com/hashicorp/go-getter/storage.go | 13 + .../go-getter/test-fixtures/archive.tar.gz | Bin 0 -> 141 bytes .../go-getter/test-fixtures/basic-dot/main.tf | 5 + .../basic-file-archive/archive.tar.gz | Bin 0 -> 141 bytes .../test-fixtures/basic-file/foo.txt | 1 + .../basic-git/DOTgit/COMMIT_EDITMSG | 7 + .../test-fixtures/basic-git/DOTgit/HEAD | 1 + .../test-fixtures/basic-git/DOTgit/config | 7 + .../basic-git/DOTgit/description | 1 + .../DOTgit/hooks/applypatch-msg.sample | 15 + .../basic-git/DOTgit/hooks/commit-msg.sample | 24 + .../basic-git/DOTgit/hooks/post-update.sample | 8 + .../DOTgit/hooks/pre-applypatch.sample | 14 + .../basic-git/DOTgit/hooks/pre-commit.sample | 49 + .../basic-git/DOTgit/hooks/pre-push.sample | 54 + .../basic-git/DOTgit/hooks/pre-rebase.sample | 169 + .../DOTgit/hooks/prepare-commit-msg.sample | 36 + .../basic-git/DOTgit/hooks/update.sample | 128 + .../test-fixtures/basic-git/DOTgit/index | Bin 0 -> 320 bytes .../basic-git/DOTgit/info/exclude | 6 + .../test-fixtures/basic-git/DOTgit/logs/HEAD | 8 + .../basic-git/DOTgit/logs/refs/heads/master | 5 + .../DOTgit/logs/refs/heads/test-branch | 2 + .../14/6492b04efe0aae2b8288c5c0aef6a951030fde | Bin 0 -> 170 bytes .../1d/3d6744266642cb7623e2c678c33c77b075c49f | Bin 0 -> 84 bytes .../1f/31e97f053caeb5d6b7bffa3faf82941c99efa2 | Bin 0 -> 167 bytes .../24/3f0fc5c4e586d1a3daa54c981b6f34e9ab1085 | Bin 0 -> 164 bytes .../25/851303ab930fdacfdb7df91adbf626a483df48 | 1 + .../38/30637158f774a20edcc0bf1c4d07b0bf87c43d | Bin 0 -> 59 bytes .../40/4618c9d96dfa0a5d365b518e0dfbb5a387c649 | Bin 0 -> 84 bytes .../49/7bc37401eb3c9b11865b1768725b64066eccee | 2 + .../7b/7614f8759ac8b5e4b02be65ad8e2667be6dd87 | 2 + .../8c/1a79ca1f98b6d00f5bf5c6cc9e8d3c092dd3ba | Bin 0 -> 51 bytes .../96/43088174e25a9bd91c27970a580af0085c9f32 | Bin 0 -> 52 bytes .../b7/757b6a3696ad036e9aa2f5b4856d09e7f17993 | Bin 0 -> 82 bytes .../d2/368a7048e1e919eebaad2a9a275b18cc9c4181 | Bin 0 -> 116 bytes .../e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391 | Bin 0 -> 15 bytes .../e9/65047ad7c57865823c7d992b1d046ea66edf78 | Bin 0 -> 21 bytes .../basic-git/DOTgit/refs/heads/master | 1 + .../basic-git/DOTgit/refs/heads/test-branch | 1 + .../basic-git/DOTgit/refs/tags/v1.0 | 1 + .../go-getter/test-fixtures/basic-git/foo.txt | 1 + .../go-getter/test-fixtures/basic-git/main.tf | 5 + .../test-fixtures/basic-git/subdir/sub.tf | 0 .../go-getter/test-fixtures/basic-hg/foo.txt | 1 + .../go-getter/test-fixtures/basic-hg/main.tf | 5 + .../test-fixtures/basic-parent/a/a.tf | 3 + .../test-fixtures/basic-parent/c/c.tf | 1 + .../test-fixtures/basic-parent/main.tf | 3 + .../basic-subdir/foo/sub/baz/main.tf | 0 .../basic-subdir/foo/sub/main.tf | 3 + .../test-fixtures/basic-subdir/main.tf | 3 + .../go-getter/test-fixtures/basic/foo/main.tf | 1 + .../go-getter/test-fixtures/basic/main.tf | 5 + .../test-fixtures/basic/subdir/sub.tf | 0 .../test-fixtures/child/foo/bar/main.tf | 2 + .../go-getter/test-fixtures/child/foo/main.tf | 5 + .../go-getter/test-fixtures/child/main.tf | 5 + .../test-fixtures/decompress-bz2/single.bz2 | Bin 0 -> 40 bytes .../test-fixtures/decompress-gz/single.gz | Bin 0 -> 29 bytes .../decompress-tbz2/empty.tar.bz2 | Bin 0 -> 46 bytes .../decompress-tbz2/multiple.tar.bz2 | Bin 0 -> 166 bytes .../decompress-tbz2/single.tar.bz2 | Bin 0 -> 135 bytes .../test-fixtures/decompress-tgz/empty.tar.gz | Bin 0 -> 45 bytes .../decompress-tgz/multiple.tar.gz | Bin 0 -> 157 bytes .../decompress-tgz/single.tar.gz | Bin 0 -> 137 bytes .../test-fixtures/decompress-zip/empty.zip | Bin 0 -> 22 bytes .../test-fixtures/decompress-zip/multiple.zip | Bin 0 -> 306 bytes .../test-fixtures/decompress-zip/single.zip | Bin 0 -> 162 bytes .../go-getter/test-fixtures/dup/foo/main.tf | 0 .../go-getter/test-fixtures/dup/main.tf | 7 + .../git-branch-update/DOTgit-1/COMMIT_EDITMSG | 7 + .../git-branch-update/DOTgit-1/HEAD | 1 + .../git-branch-update/DOTgit-1/config | 7 + .../git-branch-update/DOTgit-1/description | 1 + .../DOTgit-1/hooks/applypatch-msg.sample | 15 + .../DOTgit-1/hooks/commit-msg.sample | 24 + .../DOTgit-1/hooks/post-update.sample | 8 + .../DOTgit-1/hooks/pre-applypatch.sample | 14 + .../DOTgit-1/hooks/pre-commit.sample | 49 + .../DOTgit-1/hooks/pre-push.sample | 53 + .../DOTgit-1/hooks/pre-rebase.sample | 169 + .../DOTgit-1/hooks/prepare-commit-msg.sample | 36 + .../DOTgit-1/hooks/update.sample | 128 + .../git-branch-update/DOTgit-1/index | Bin 0 -> 217 bytes .../git-branch-update/DOTgit-1/info/exclude | 6 + .../git-branch-update/DOTgit-1/logs/HEAD | 3 + .../DOTgit-1/logs/refs/heads/master | 1 + .../DOTgit-1/logs/refs/heads/test-branch | 2 + .../29/5c5449283c0fe38c75ae73144513240e5626a6 | Bin 0 -> 84 bytes .../3e/655aa57060b300052ca598ba18b5752f8c0958 | Bin 0 -> 24 bytes .../9a/311b181c4ec0fe4d8856b9fc935a191855742a | 2 + .../a9/193e348262522550ebacc037f772c52cbfa5d5 | Bin 0 -> 52 bytes .../ae/4fc53ed172182cf8ab07d2f087994950a2ccdf | Bin 0 -> 172 bytes .../be/fe5fe7626f1850e202fb35ac375c61e1f27f95 | Bin 0 -> 22 bytes .../DOTgit-1/refs/heads/master | 1 + .../DOTgit-1/refs/heads/test-branch | 1 + .../git-branch-update/DOTgit-2/COMMIT_EDITMSG | 10 + .../git-branch-update/DOTgit-2/HEAD | 1 + .../git-branch-update/DOTgit-2/config | 7 + .../git-branch-update/DOTgit-2/description | 1 + .../DOTgit-2/hooks/applypatch-msg.sample | 15 + .../DOTgit-2/hooks/commit-msg.sample | 24 + .../DOTgit-2/hooks/post-update.sample | 8 + .../DOTgit-2/hooks/pre-applypatch.sample | 14 + .../DOTgit-2/hooks/pre-commit.sample | 49 + .../DOTgit-2/hooks/pre-push.sample | 53 + .../DOTgit-2/hooks/pre-rebase.sample | 169 + .../DOTgit-2/hooks/prepare-commit-msg.sample | 36 + .../DOTgit-2/hooks/update.sample | 128 + .../git-branch-update/DOTgit-2/index | Bin 0 -> 305 bytes .../git-branch-update/DOTgit-2/info/exclude | 6 + .../git-branch-update/DOTgit-2/logs/HEAD | 4 + .../DOTgit-2/logs/refs/heads/master | 1 + .../DOTgit-2/logs/refs/heads/test-branch | 3 + .../20/9cbd800810a2a1559ba33c48bc75860e967e07 | Bin 0 -> 117 bytes .../29/5c5449283c0fe38c75ae73144513240e5626a6 | Bin 0 -> 84 bytes .../34/fa5e1fb706dc77cc0b0cf622c82e67ba84e297 | Bin 0 -> 171 bytes .../3e/655aa57060b300052ca598ba18b5752f8c0958 | Bin 0 -> 24 bytes .../8a/2dfe46d4eb9b54f5aa0e4e536346bdafee3467 | Bin 0 -> 32 bytes .../9a/311b181c4ec0fe4d8856b9fc935a191855742a | 2 + .../a9/193e348262522550ebacc037f772c52cbfa5d5 | Bin 0 -> 52 bytes .../ae/4fc53ed172182cf8ab07d2f087994950a2ccdf | Bin 0 -> 172 bytes .../be/fe5fe7626f1850e202fb35ac375c61e1f27f95 | Bin 0 -> 22 bytes .../DOTgit-2/refs/heads/master | 1 + .../DOTgit-2/refs/heads/test-branch | 1 + .../test-fixtures/git-branch-update/main.tf | 1 + .../git-branch-update/main_branch.tf | 1 + .../git-branch-update/main_branch_update.tf | 1 + .../child/main.tf | 1 + .../validate-bad-output-to-module/main.tf | 8 + .../validate-bad-output/child/main.tf | 0 .../test-fixtures/validate-bad-output/main.tf | 7 + .../validate-bad-var/child/main.tf | 0 .../test-fixtures/validate-bad-var/main.tf | 5 + .../validate-child-bad/child/main.tf | 3 + .../test-fixtures/validate-child-bad/main.tf | 3 + .../validate-child-good/child/main.tf | 3 + .../test-fixtures/validate-child-good/main.tf | 8 + .../validate-required-var/child/main.tf | 1 + .../validate-required-var/main.tf | 3 + .../test-fixtures/validate-root-bad/main.tf | 3 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/README.md | 91 + .../hashicorp/go-multierror/append.go | 37 + .../hashicorp/go-multierror/append_test.go | 64 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/flatten_test.go | 48 + .../hashicorp/go-multierror/format.go | 23 + .../hashicorp/go-multierror/format_test.go | 23 + .../hashicorp/go-multierror/multierror.go | 51 + .../go-multierror/multierror_test.go | 70 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/prefix_test.go | 33 + .../hashicorp/go-retryablehttp/.travis.yml | 12 + .../hashicorp/go-retryablehttp/LICENSE | 363 + .../hashicorp/go-retryablehttp/Makefile | 11 + .../hashicorp/go-retryablehttp/README.md | 43 + .../hashicorp/go-retryablehttp/client.go | 219 + .../hashicorp/go-retryablehttp/client_test.go | 342 + .../hashicorp/go-version/.travis.yml | 11 + .../github.com/hashicorp/go-version/LICENSE | 354 + .../github.com/hashicorp/go-version/README.md | 65 + .../hashicorp/go-version/constraint.go | 156 + .../hashicorp/go-version/constraint_test.go | 103 + .../hashicorp/go-version/version.go | 251 + .../go-version/version_collection.go | 17 + .../go-version/version_collection_test.go | 46 + .../hashicorp/go-version/version_test.go | 208 + vendor/github.com/hashicorp/hcl/.gitignore | 7 + vendor/github.com/hashicorp/hcl/.travis.yml | 3 + vendor/github.com/hashicorp/hcl/LICENSE | 354 + vendor/github.com/hashicorp/hcl/Makefile | 17 + vendor/github.com/hashicorp/hcl/README.md | 94 + vendor/github.com/hashicorp/hcl/decoder.go | 627 + .../github.com/hashicorp/hcl/decoder_test.go | 664 + vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 211 + .../hashicorp/hcl/hcl/ast/ast_test.go | 200 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/error_test.go | 9 + .../hashicorp/hcl/hcl/parser/parser.go | 417 + .../hashicorp/hcl/hcl/parser/parser_test.go | 323 + .../parser/test-fixtures/array_comment.hcl | 4 + .../parser/test-fixtures/array_comment_2.hcl | 6 + .../hcl/parser/test-fixtures/assign_colon.hcl | 6 + .../hcl/parser/test-fixtures/assign_deep.hcl | 5 + .../hcl/hcl/parser/test-fixtures/comment.hcl | 15 + .../parser/test-fixtures/comment_lastline.hcl | 1 + .../parser/test-fixtures/comment_single.hcl | 1 + .../hcl/hcl/parser/test-fixtures/complex.hcl | 42 + .../hcl/parser/test-fixtures/complex_key.hcl | 1 + .../hcl/hcl/parser/test-fixtures/empty.hcl | 0 .../hcl/hcl/parser/test-fixtures/list.hcl | 1 + .../hcl/parser/test-fixtures/list_comma.hcl | 1 + .../parser/test-fixtures/missing_braces.hcl | 4 + .../hcl/hcl/parser/test-fixtures/multiple.hcl | 2 + .../hcl/hcl/parser/test-fixtures/old.hcl | 3 + .../hcl/parser/test-fixtures/structure.hcl | 5 + .../parser/test-fixtures/structure_basic.hcl | 5 + .../parser/test-fixtures/structure_empty.hcl | 1 + .../hcl/hcl/parser/test-fixtures/types.hcl | 7 + .../hashicorp/hcl/hcl/printer/nodes.go | 573 + .../hashicorp/hcl/hcl/printer/printer.go | 64 + .../hashicorp/hcl/hcl/printer/printer_test.go | 143 + .../hcl/hcl/printer/testdata/comment.golden | 36 + .../hcl/hcl/printer/testdata/comment.input | 37 + .../printer/testdata/comment_aligned.golden | 25 + .../printer/testdata/comment_aligned.input | 21 + .../testdata/comment_standalone.golden | 16 + .../printer/testdata/comment_standalone.input | 16 + .../hcl/printer/testdata/complexhcl.golden | 54 + .../hcl/hcl/printer/testdata/complexhcl.input | 53 + .../hcl/hcl/printer/testdata/list.golden | 27 + .../hcl/hcl/printer/testdata/list.input | 21 + .../hashicorp/hcl/hcl/scanner/scanner.go | 612 + .../hashicorp/hcl/hcl/scanner/scanner_test.go | 535 + .../hashicorp/hcl/hcl/strconv/quote.go | 245 + .../hashicorp/hcl/hcl/strconv/quote_test.go | 93 + .../hcl/hcl/test-fixtures/array_comment.hcl | 4 + .../hcl/hcl/test-fixtures/assign_colon.hcl | 6 + .../hcl/hcl/test-fixtures/assign_deep.hcl | 5 + .../hcl/hcl/test-fixtures/comment.hcl | 15 + .../hcl/hcl/test-fixtures/comment_single.hcl | 1 + .../hcl/hcl/test-fixtures/complex.hcl | 42 + .../hcl/hcl/test-fixtures/complex_key.hcl | 1 + .../hashicorp/hcl/hcl/test-fixtures/empty.hcl | 0 .../hashicorp/hcl/hcl/test-fixtures/list.hcl | 1 + .../hcl/hcl/test-fixtures/list_comma.hcl | 1 + .../hcl/hcl/test-fixtures/multiple.hcl | 2 + .../hashicorp/hcl/hcl/test-fixtures/old.hcl | 3 + .../hcl/hcl/test-fixtures/structure.hcl | 5 + .../hcl/hcl/test-fixtures/structure_basic.hcl | 5 + .../hcl/hcl/test-fixtures/structure_empty.hcl | 1 + .../hashicorp/hcl/hcl/test-fixtures/types.hcl | 7 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 170 + .../hashicorp/hcl/hcl/token/token_test.go | 63 + vendor/github.com/hashicorp/hcl/hcl_test.go | 19 + .../hashicorp/hcl/json/parser/flatten.go | 111 + .../hashicorp/hcl/json/parser/parser.go | 297 + .../hashicorp/hcl/json/parser/parser_test.go | 338 + .../hcl/json/parser/test-fixtures/array.json | 4 + .../hcl/json/parser/test-fixtures/basic.json | 3 + .../hcl/json/parser/test-fixtures/object.json | 5 + .../hcl/json/parser/test-fixtures/types.json | 10 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hcl/json/scanner/scanner_test.go | 363 + .../hcl/json/test-fixtures/array.json | 4 + .../hcl/json/test-fixtures/basic.json | 3 + .../hcl/json/test-fixtures/object.json | 5 + .../hcl/json/test-fixtures/types.json | 10 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + .../hashicorp/hcl/json/token/token_test.go | 34 + vendor/github.com/hashicorp/hcl/lex.go | 31 + vendor/github.com/hashicorp/hcl/lex_test.go | 37 + vendor/github.com/hashicorp/hcl/parse.go | 23 + .../hashicorp/hcl/test-fixtures/basic.hcl | 2 + .../hashicorp/hcl/test-fixtures/basic.json | 4 + .../hcl/test-fixtures/basic_int_string.hcl | 1 + .../hcl/test-fixtures/basic_squish.hcl | 3 + .../hcl/test-fixtures/decode_policy.hcl | 15 + .../hcl/test-fixtures/decode_policy.json | 19 + .../hcl/test-fixtures/decode_tf_variable.hcl | 10 + .../hcl/test-fixtures/decode_tf_variable.json | 14 + .../hashicorp/hcl/test-fixtures/empty.hcl | 1 + .../hashicorp/hcl/test-fixtures/escape.hcl | 1 + .../hashicorp/hcl/test-fixtures/flat.hcl | 2 + .../hashicorp/hcl/test-fixtures/float.hcl | 1 + .../hashicorp/hcl/test-fixtures/float.json | 3 + .../hcl/test-fixtures/interpolate_escape.hcl | 1 + .../hashicorp/hcl/test-fixtures/multiline.hcl | 4 + .../hcl/test-fixtures/multiline.json | 3 + .../hcl/test-fixtures/multiline_bad.hcl | 4 + .../hcl/test-fixtures/multiline_no_eof.hcl | 5 + .../hcl/test-fixtures/multiline_no_marker.hcl | 1 + .../test-fixtures/nested_block_comment.hcl | 5 + .../hcl/test-fixtures/object_list.json | 15 + .../hcl/test-fixtures/scientific.hcl | 6 + .../hcl/test-fixtures/scientific.json | 8 + .../hcl/test-fixtures/slice_expand.hcl | 7 + .../hashicorp/hcl/test-fixtures/structure.hcl | 5 + .../hcl/test-fixtures/structure.json | 8 + .../hcl/test-fixtures/structure2.hcl | 9 + .../hcl/test-fixtures/structure2.json | 10 + .../hcl/test-fixtures/structure_flat.json | 8 + .../hcl/test-fixtures/structure_flatmap.hcl | 7 + .../hcl/test-fixtures/structure_list.hcl | 6 + .../hcl/test-fixtures/structure_list.json | 7 + .../test-fixtures/structure_list_deep.json | 16 + .../hcl/test-fixtures/structure_multi.hcl | 7 + .../hcl/test-fixtures/structure_multi.json | 11 + .../hcl/test-fixtures/terraform_heroku.hcl | 5 + .../hcl/test-fixtures/terraform_heroku.json | 6 + .../hashicorp/hcl/test-fixtures/tfvars.hcl | 3 + .../unterminated_block_comment.hcl | 2 + .../github.com/hashicorp/logutils/.gitignore | 22 + vendor/github.com/hashicorp/logutils/LICENSE | 354 + .../github.com/hashicorp/logutils/README.md | 36 + vendor/github.com/hashicorp/logutils/level.go | 81 + .../logutils/level_benchmark_test.go | 37 + .../hashicorp/logutils/level_test.go | 94 + .../hashicorp/serf/coordinate/README.md | 1 + .../hashicorp/serf/coordinate/client.go | 180 + .../hashicorp/serf/coordinate/client_test.go | 109 + .../hashicorp/serf/coordinate/config.go | 70 + .../hashicorp/serf/coordinate/coordinate.go | 183 + .../serf/coordinate/coordinate_test.go | 260 + .../serf/coordinate/performance_test.go | 182 + .../hashicorp/serf/coordinate/phantom.go | 187 + .../hashicorp/serf/coordinate/test_util.go | 27 + vendor/github.com/hashicorp/yamux/.gitignore | 23 + vendor/github.com/hashicorp/yamux/LICENSE | 362 + vendor/github.com/hashicorp/yamux/README.md | 86 + vendor/github.com/hashicorp/yamux/addr.go | 60 + .../github.com/hashicorp/yamux/bench_test.go | 81 + vendor/github.com/hashicorp/yamux/const.go | 157 + .../github.com/hashicorp/yamux/const_test.go | 72 + vendor/github.com/hashicorp/yamux/mux.go | 87 + vendor/github.com/hashicorp/yamux/session.go | 598 + .../hashicorp/yamux/session_test.go | 1153 + vendor/github.com/hashicorp/yamux/spec.md | 141 + vendor/github.com/hashicorp/yamux/stream.go | 452 + vendor/github.com/hashicorp/yamux/util.go | 28 + .../github.com/hashicorp/yamux/util_test.go | 50 + .../hmrc/vmware-govcd/.editorconfig | 26 + .../github.com/hmrc/vmware-govcd/.gitignore | 25 + .../github.com/hmrc/vmware-govcd/.travis.yml | 16 + vendor/github.com/hmrc/vmware-govcd/LICENSE | 569 + vendor/github.com/hmrc/vmware-govcd/README.md | 59 + vendor/github.com/hmrc/vmware-govcd/api.go | 107 + .../github.com/hmrc/vmware-govcd/api_test.go | 3 + .../github.com/hmrc/vmware-govcd/api_vca.go | 324 + .../hmrc/vmware-govcd/api_vca_test.go | 749 + .../github.com/hmrc/vmware-govcd/api_vcd.go | 237 + .../hmrc/vmware-govcd/api_vcd_test.go | 208 + .../github.com/hmrc/vmware-govcd/catalog.go | 57 + .../hmrc/vmware-govcd/catalog_test.go | 68 + .../hmrc/vmware-govcd/catalogitem.go | 49 + .../hmrc/vmware-govcd/catalogitem_test.go | 55 + .../hmrc/vmware-govcd/edgegateway.go | 625 + .../hmrc/vmware-govcd/edgegateway_test.go | 409 + vendor/github.com/hmrc/vmware-govcd/org.go | 56 + .../github.com/hmrc/vmware-govcd/org_test.go | 55 + .../hmrc/vmware-govcd/orgvdcnetwork.go | 141 + .../hmrc/vmware-govcd/orgvdcnetwork_test.go | 64 + .../hmrc/vmware-govcd/script/coverage | 51 + vendor/github.com/hmrc/vmware-govcd/task.go | 77 + .../github.com/hmrc/vmware-govcd/task_test.go | 35 + .../hmrc/vmware-govcd/testutil/http.go | 186 + .../hmrc/vmware-govcd/types/v56/types.go | 1508 + vendor/github.com/hmrc/vmware-govcd/vapp.go | 807 + .../github.com/hmrc/vmware-govcd/vapp_test.go | 692 + .../hmrc/vmware-govcd/vapptemplate.go | 57 + .../hmrc/vmware-govcd/vapptemplate_test.go | 183 + vendor/github.com/hmrc/vmware-govcd/vdc.go | 278 + .../github.com/hmrc/vmware-govcd/vdc_test.go | 178 + vendor/github.com/imdario/mergo/.travis.yml | 2 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 122 + vendor/github.com/imdario/mergo/doc.go | 44 + .../github.com/imdario/mergo/issue17_test.go | 25 + vendor/github.com/imdario/mergo/map.go | 154 + vendor/github.com/imdario/mergo/merge.go | 120 + vendor/github.com/imdario/mergo/mergo.go | 90 + vendor/github.com/imdario/mergo/mergo_test.go | 502 + .../imdario/mergo/testdata/license.yml | 3 + .../imdario/mergo/testdata/thing.yml | 5 + .../jmespath/go-jmespath/.gitignore | 4 + .../jmespath/go-jmespath/.travis.yml | 9 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 44 + .../github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../jmespath/go-jmespath/api_test.go | 32 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/cmd/jpgo/main.go | 96 + .../go-jmespath/compliance/basic.json | 96 + .../go-jmespath/compliance/boolean.json | 257 + .../go-jmespath/compliance/current.json | 25 + .../go-jmespath/compliance/escape.json | 46 + .../go-jmespath/compliance/filters.json | 468 + .../go-jmespath/compliance/functions.json | 825 + .../go-jmespath/compliance/identifiers.json | 1377 + .../go-jmespath/compliance/indices.json | 346 + .../go-jmespath/compliance/literal.json | 185 + .../go-jmespath/compliance/multiselect.json | 393 + .../go-jmespath/compliance/ormatch.json | 59 + .../jmespath/go-jmespath/compliance/pipe.json | 131 + .../go-jmespath/compliance/slice.json | 187 + .../go-jmespath/compliance/syntax.json | 616 + .../go-jmespath/compliance/unicode.json | 38 + .../go-jmespath/compliance/wildcard.json | 460 + .../jmespath/go-jmespath/compliance_test.go | 123 + .../jmespath/go-jmespath/functions.go | 840 + .../jmespath/go-jmespath/fuzz/corpus/expr-1 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-10 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-100 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-101 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-102 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-103 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-104 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-105 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-106 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-107 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-108 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-109 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-110 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-112 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-115 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-118 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-119 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-12 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-120 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-121 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-122 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-123 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-126 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-128 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-129 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-13 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-130 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-131 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-132 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-133 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-134 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-135 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-136 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-137 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-138 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-139 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-14 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-140 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-141 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-142 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-143 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-144 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-145 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-146 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-147 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-148 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-149 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-15 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-150 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-151 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-152 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-153 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-155 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-156 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-157 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-158 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-159 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-16 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-160 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-161 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-162 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-163 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-164 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-165 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-166 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-167 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-168 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-169 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-17 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-170 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-171 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-172 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-173 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-174 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-175 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-178 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-179 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-18 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-180 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-181 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-182 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-183 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-184 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-185 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-186 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-187 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-188 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-189 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-19 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-190 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-191 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-192 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-193 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-194 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-195 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-196 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-198 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-199 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-2 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-20 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-200 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-201 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-202 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-203 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-204 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-205 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-206 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-207 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-208 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-209 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-21 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-210 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-211 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-212 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-213 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-214 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-215 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-216 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-217 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-218 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-219 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-22 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-220 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-221 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-222 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-223 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-224 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-225 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-226 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-227 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-228 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-229 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-23 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-230 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-231 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-232 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-233 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-234 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-235 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-236 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-237 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-238 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-239 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-24 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-240 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-241 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-242 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-243 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-244 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-245 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-246 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-247 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-248 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-249 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-25 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-250 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-251 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-252 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-253 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-254 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-255 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-256 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-257 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-258 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-259 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-26 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-260 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-261 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-262 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-263 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-264 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-265 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-266 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-267 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-268 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-269 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-27 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-270 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-271 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-272 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-273 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-274 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-275 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-276 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-277 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-278 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-279 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-28 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-280 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-281 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-282 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-283 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-284 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-285 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-286 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-287 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-288 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-289 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-29 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-290 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-291 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-292 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-293 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-294 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-295 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-296 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-297 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-298 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-299 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-3 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-30 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-300 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-301 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-302 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-303 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-304 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-305 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-306 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-307 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-308 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-309 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-31 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-310 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-311 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-312 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-313 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-314 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-315 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-316 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-317 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-318 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-319 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-32 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-320 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-321 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-322 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-323 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-324 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-325 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-326 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-327 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-328 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-329 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-33 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-330 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-331 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-332 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-333 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-334 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-335 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-336 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-337 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-338 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-339 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-34 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-340 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-341 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-342 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-343 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-344 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-345 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-346 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-347 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-348 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-349 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-35 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-350 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-351 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-352 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-353 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-354 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-355 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-356 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-357 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-358 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-359 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-36 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-360 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-361 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-362 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-363 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-364 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-365 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-366 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-367 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-368 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-369 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-37 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-370 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-371 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-372 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-373 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-374 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-375 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-376 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-377 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-378 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-379 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-38 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-380 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-381 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-382 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-383 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-384 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-385 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-386 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-387 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-388 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-389 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-39 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-390 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-391 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-392 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-393 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-394 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-395 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-396 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-397 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-398 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-399 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-4 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-40 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-400 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-401 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-402 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-403 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-404 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-405 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-406 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-407 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-408 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-409 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-41 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-410 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-411 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-412 | 2 + .../jmespath/go-jmespath/fuzz/corpus/expr-413 | 2 + .../jmespath/go-jmespath/fuzz/corpus/expr-414 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-415 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-416 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-417 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-418 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-419 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-42 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-420 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-421 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-422 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-423 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-424 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-425 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-426 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-427 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-428 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-429 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-43 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-430 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-431 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-432 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-433 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-434 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-435 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-436 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-437 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-438 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-439 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-44 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-440 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-441 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-442 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-443 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-444 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-445 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-446 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-447 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-448 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-449 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-45 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-450 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-451 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-452 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-453 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-454 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-455 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-456 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-457 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-458 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-459 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-46 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-460 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-461 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-462 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-463 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-464 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-465 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-466 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-467 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-468 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-469 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-47 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-470 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-471 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-472 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-473 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-474 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-475 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-476 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-477 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-478 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-479 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-48 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-480 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-481 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-482 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-483 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-484 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-485 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-486 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-487 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-488 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-489 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-49 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-490 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-491 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-492 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-493 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-494 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-495 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-496 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-497 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-498 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-499 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-5 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-50 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-500 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-501 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-502 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-503 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-504 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-505 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-506 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-507 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-508 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-509 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-51 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-510 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-511 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-512 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-513 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-514 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-515 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-516 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-517 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-518 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-519 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-52 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-520 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-521 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-522 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-523 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-524 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-525 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-526 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-527 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-528 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-529 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-53 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-530 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-531 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-532 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-533 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-534 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-535 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-536 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-537 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-538 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-539 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-54 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-540 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-541 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-542 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-543 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-544 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-545 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-546 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-547 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-548 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-549 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-55 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-550 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-551 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-552 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-553 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-554 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-555 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-556 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-557 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-558 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-559 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-56 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-560 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-561 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-562 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-563 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-564 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-565 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-566 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-567 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-568 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-569 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-57 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-570 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-571 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-572 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-573 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-574 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-575 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-576 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-577 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-578 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-579 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-58 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-580 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-581 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-582 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-583 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-584 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-585 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-586 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-587 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-588 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-589 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-59 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-590 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-591 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-592 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-593 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-594 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-595 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-596 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-597 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-598 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-599 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-6 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-60 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-600 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-601 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-602 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-603 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-604 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-605 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-606 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-607 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-608 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-609 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-61 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-610 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-611 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-612 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-613 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-614 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-615 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-616 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-617 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-618 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-619 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-62 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-620 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-621 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-622 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-623 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-624 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-625 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-626 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-627 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-628 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-629 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-63 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-630 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-631 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-632 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-633 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-634 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-635 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-636 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-637 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-638 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-639 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-64 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-640 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-641 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-642 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-643 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-644 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-645 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-646 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-647 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-648 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-649 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-65 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-650 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-651 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-652 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-653 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-654 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-655 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-656 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-66 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-67 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-68 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-69 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-7 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-70 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-71 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-72 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-73 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-74 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-75 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-76 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-77 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-78 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-79 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-8 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-80 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-81 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-82 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-83 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-84 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-85 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-86 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-87 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-88 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-89 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-9 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-90 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-91 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-92 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-93 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-94 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-95 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-96 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-97 | 1 + .../jmespath/go-jmespath/fuzz/corpus/expr-98 | 1 + .../jmespath/go-jmespath/fuzz/jmespath.go | 13 + .../jmespath/go-jmespath/interpreter.go | 418 + .../jmespath/go-jmespath/interpreter_test.go | 213 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../jmespath/go-jmespath/lexer_test.go | 161 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/parser_test.go | 136 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../jmespath/go-jmespath/util_test.go | 73 + vendor/github.com/kardianos/osext/LICENSE | 27 + vendor/github.com/kardianos/osext/README.md | 16 + vendor/github.com/kardianos/osext/osext.go | 33 + .../github.com/kardianos/osext/osext_plan9.go | 20 + .../kardianos/osext/osext_procfs.go | 36 + .../kardianos/osext/osext_sysctl.go | 126 + .../github.com/kardianos/osext/osext_test.go | 203 + .../kardianos/osext/osext_windows.go | 34 + vendor/github.com/lib/pq/.gitignore | 4 + vendor/github.com/lib/pq/.travis.yml | 71 + vendor/github.com/lib/pq/CONTRIBUTING.md | 29 + vendor/github.com/lib/pq/LICENSE.md | 8 + vendor/github.com/lib/pq/README.md | 105 + vendor/github.com/lib/pq/bench_test.go | 435 + vendor/github.com/lib/pq/buf.go | 91 + vendor/github.com/lib/pq/certs/README | 3 + vendor/github.com/lib/pq/certs/postgresql.crt | 69 + vendor/github.com/lib/pq/certs/postgresql.key | 15 + vendor/github.com/lib/pq/certs/root.crt | 24 + vendor/github.com/lib/pq/certs/server.crt | 81 + vendor/github.com/lib/pq/certs/server.key | 27 + vendor/github.com/lib/pq/conn.go | 1852 + vendor/github.com/lib/pq/conn_test.go | 1433 + vendor/github.com/lib/pq/copy.go | 267 + vendor/github.com/lib/pq/copy_test.go | 465 + vendor/github.com/lib/pq/doc.go | 212 + vendor/github.com/lib/pq/encode.go | 538 + vendor/github.com/lib/pq/encode_test.go | 719 + vendor/github.com/lib/pq/error.go | 508 + vendor/github.com/lib/pq/hstore/hstore.go | 118 + .../github.com/lib/pq/hstore/hstore_test.go | 148 + .../github.com/lib/pq/listen_example/doc.go | 102 + vendor/github.com/lib/pq/notify.go | 766 + vendor/github.com/lib/pq/notify_test.go | 574 + vendor/github.com/lib/pq/oid/doc.go | 6 + vendor/github.com/lib/pq/oid/gen.go | 74 + vendor/github.com/lib/pq/oid/types.go | 161 + vendor/github.com/lib/pq/ssl_test.go | 226 + vendor/github.com/lib/pq/url.go | 76 + vendor/github.com/lib/pq/url_test.go | 54 + vendor/github.com/lib/pq/user_posix.go | 24 + vendor/github.com/lib/pq/user_windows.go | 27 + .../src/artifactory.v401/api.go | 1 + .../src/artifactory.v401/api_test.go | 1 + .../src/artifactory.v401/archive.go | 1 + .../src/artifactory.v401/archive_test.go | 1 + .../src/artifactory.v401/artifact.go | 67 + .../src/artifactory.v401/artifact_test.go | 1 + .../artifactory.v401/assets/test/error.json | 6 + .../artifactory.v401/assets/test/groups.json | 16 + .../assets/test/license_information.json | 5 + .../assets/test/local_repository_config.json | 33 + .../assets/test/permissions.json | 16 + .../assets/test/permissions_details.json | 11 + .../assets/test/remote_repository_config.json | 51 + .../assets/test/repositories.json | 94 + .../assets/test/single_group.json | 6 + .../assets/test/single_user.json | 11 + .../artifactory.v401/assets/test/users.json | 9 + .../test/virtual_repository_config.json | 25 + .../src/artifactory.v401/bintray.go | 1 + .../src/artifactory.v401/bintray_test.go | 1 + .../src/artifactory.v401/build.go | 1 + .../src/artifactory.v401/build_test.go | 1 + .../src/artifactory.v401/client.go | 64 + .../src/artifactory.v401/client_test.go | 38 + .../src/artifactory.v401/compliance.go | 1 + .../src/artifactory.v401/compliance_test.go | 1 + .../src/artifactory.v401/errors.go | 10 + .../src/artifactory.v401/groups.go | 61 + .../src/artifactory.v401/groups_test.go | 158 + .../src/artifactory.v401/http.go | 96 + .../src/artifactory.v401/http_test.go | 1 + .../src/artifactory.v401/license.go | 23 + .../src/artifactory.v401/mimetypes.go | 11 + .../src/artifactory.v401/mimetypes_test.go | 1 + .../artifactory.v401/permissions_targets.go | 53 + .../permissions_targets_test.go | 99 + .../src/artifactory.v401/repos.go | 151 + .../src/artifactory.v401/repos_test.go | 1 + .../src/artifactory.v401/responses.go | 9 + .../src/artifactory.v401/responses_test.go | 1 + .../src/artifactory.v401/search.go | 47 + .../src/artifactory.v401/search_test.go | 1 + .../src/artifactory.v401/security.go | 6 + .../src/artifactory.v401/security_test.go | 1 + .../src/artifactory.v401/storage.go | 18 + .../src/artifactory.v401/storage_test.go | 1 + .../src/artifactory.v401/system.go | 6 + .../src/artifactory.v401/system_test.go | 1 + .../src/artifactory.v401/users.go | 83 + .../src/artifactory.v401/users_test.go | 188 + .../src/artifactory.v401/version.go | 3 + .../src/artifactory.v401/version_test.go | 1 + .../masterzen/simplexml/dom/document.go | 35 + .../masterzen/simplexml/dom/dom_test.go | 105 + .../masterzen/simplexml/dom/element.go | 200 + .../masterzen/simplexml/dom/namespace.go | 10 + .../github.com/masterzen/winrm/soap/header.go | 181 + .../masterzen/winrm/soap/header_test.go | 48 + .../masterzen/winrm/soap/message.go | 74 + .../masterzen/winrm/soap/namespaces.go | 37 + .../masterzen/winrm/soap/namespaces_test.go | 37 + .../masterzen/winrm/winrm/client.go | 166 + .../masterzen/winrm/winrm/client_test.go | 51 + .../masterzen/winrm/winrm/command.go | 210 + .../masterzen/winrm/winrm/command_test.go | 164 + .../masterzen/winrm/winrm/endpoint.go | 22 + .../masterzen/winrm/winrm/endpoint_test.go | 15 + .../masterzen/winrm/winrm/fixture_test.go | 87 + .../github.com/masterzen/winrm/winrm/http.go | 60 + .../masterzen/winrm/winrm/http_test.go | 63 + .../masterzen/winrm/winrm/parameters.go | 20 + .../masterzen/winrm/winrm/parameters_test.go | 19 + .../masterzen/winrm/winrm/powershell.go | 22 + .../masterzen/winrm/winrm/powershell_test.go | 10 + .../masterzen/winrm/winrm/request.go | 116 + .../masterzen/winrm/winrm/request_test.go | 143 + .../masterzen/winrm/winrm/response.go | 111 + .../masterzen/winrm/winrm/response_test.go | 70 + .../github.com/masterzen/winrm/winrm/shell.go | 31 + .../masterzen/winrm/winrm/shell_test.go | 40 + vendor/github.com/masterzen/xmlpath/LICENSE | 185 + .../github.com/masterzen/xmlpath/all_test.go | 536 + vendor/github.com/masterzen/xmlpath/doc.go | 95 + vendor/github.com/masterzen/xmlpath/parser.go | 233 + vendor/github.com/masterzen/xmlpath/path.go | 642 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 37 + vendor/github.com/mattn/go-isatty/doc.go | 2 + .../mattn/go-isatty/isatty_appengine.go | 9 + .../github.com/mattn/go-isatty/isatty_bsd.go | 18 + .../mattn/go-isatty/isatty_linux.go | 18 + .../mattn/go-isatty/isatty_solaris.go | 16 + .../mattn/go-isatty/isatty_windows.go | 19 + vendor/github.com/mitchellh/cli/.travis.yml | 14 + vendor/github.com/mitchellh/cli/LICENSE | 354 + vendor/github.com/mitchellh/cli/README.md | 61 + vendor/github.com/mitchellh/cli/cli.go | 390 + vendor/github.com/mitchellh/cli/cli_test.go | 443 + vendor/github.com/mitchellh/cli/command.go | 47 + .../github.com/mitchellh/cli/command_mock.go | 42 + .../mitchellh/cli/command_mock_test.go | 9 + vendor/github.com/mitchellh/cli/help.go | 79 + vendor/github.com/mitchellh/cli/ui.go | 187 + vendor/github.com/mitchellh/cli/ui_colored.go | 69 + .../mitchellh/cli/ui_colored_test.go | 74 + .../github.com/mitchellh/cli/ui_concurrent.go | 54 + .../mitchellh/cli/ui_concurrent_test.go | 9 + vendor/github.com/mitchellh/cli/ui_mock.go | 64 + .../github.com/mitchellh/cli/ui_mock_test.go | 9 + vendor/github.com/mitchellh/cli/ui_test.go | 162 + vendor/github.com/mitchellh/cli/ui_writer.go | 18 + .../mitchellh/cli/ui_writer_test.go | 37 + .../mitchellh/colorstring/.travis.yml | 15 + .../github.com/mitchellh/colorstring/LICENSE | 21 + .../mitchellh/colorstring/README.md | 30 + .../mitchellh/colorstring/colorstring.go | 244 + .../mitchellh/colorstring/colorstring_test.go | 185 + .../mitchellh/copystructure/LICENSE | 21 + .../mitchellh/copystructure/README.md | 21 + .../mitchellh/copystructure/copier_time.go | 15 + .../copystructure/copier_time_test.go | 17 + .../mitchellh/copystructure/copystructure.go | 281 + .../copystructure_examples_test.go | 22 + .../copystructure/copystructure_test.go | 194 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + .../mitchellh/go-homedir/homedir.go | 132 + .../mitchellh/go-homedir/homedir_test.go | 112 + .../mitchellh/go-linereader/LICENSE.md | 21 + .../mitchellh/go-linereader/README.md | 30 + .../mitchellh/go-linereader/linereader.go | 118 + .../go-linereader/linereader_test.go | 48 + .../mitchellh/mapstructure/.travis.yml | 7 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 151 + .../mapstructure/decode_hooks_test.go | 229 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 745 + .../mapstructure_benchmark_test.go | 279 + .../mapstructure/mapstructure_bugs_test.go | 47 + .../mapstructure_examples_test.go | 203 + .../mapstructure/mapstructure_test.go | 999 + .../mitchellh/packer/common/uuid/uuid.go | 24 + .../mitchellh/packer/common/uuid/uuid_test.go | 12 + vendor/github.com/mitchellh/panicwrap/LICENSE | 21 + .../github.com/mitchellh/panicwrap/README.md | 108 + .../mitchellh/panicwrap/panicwrap.go | 309 + .../mitchellh/panicwrap/panicwrap_test.go | 325 + .../github.com/mitchellh/prefixedio/LICENSE | 19 + .../github.com/mitchellh/prefixedio/README.md | 30 + .../github.com/mitchellh/prefixedio/reader.go | 220 + .../mitchellh/prefixedio/reader_test.go | 96 + .../github.com/mitchellh/reflectwalk/LICENSE | 21 + .../mitchellh/reflectwalk/README.md | 6 + .../mitchellh/reflectwalk/location.go | 17 + .../mitchellh/reflectwalk/location_string.go | 16 + .../mitchellh/reflectwalk/reflectwalk.go | 279 + .../mitchellh/reflectwalk/reflectwalk_test.go | 377 + .../nesv/go-dynect/dynect/client.go | 153 + .../nesv/go-dynect/dynect/client_test.go | 114 + .../go-dynect/dynect/convenient_client.go | 154 + .../github.com/nesv/go-dynect/dynect/json.go | 58 + .../nesv/go-dynect/dynect/record.go | 12 + .../nesv/go-dynect/dynect/records.go | 168 + .../github.com/nesv/go-dynect/dynect/zones.go | 24 + vendor/github.com/nu7hatch/gouuid/.gitignore | 11 + vendor/github.com/nu7hatch/gouuid/COPYING | 19 + vendor/github.com/nu7hatch/gouuid/README.md | 21 + .../nu7hatch/gouuid/example_test.go | 33 + vendor/github.com/nu7hatch/gouuid/uuid.go | 173 + .../github.com/nu7hatch/gouuid/uuid_test.go | 135 + .../packer-community/winrmcp/winrmcp/cp.go | 200 + .../winrmcp/winrmcp/endpoint.go | 42 + .../winrmcp/winrmcp/endpoint_test.go | 81 + .../packer-community/winrmcp/winrmcp/ls.go | 71 + .../packer-community/winrmcp/winrmcp/path.go | 18 + .../winrmcp/winrmcp/psobject.go | 17 + .../winrmcp/winrmcp/winrmcp.go | 117 + .../github.com/packethost/packngo/LICENSE.txt | 56 + .../github.com/packethost/packngo/devices.go | 236 + vendor/github.com/packethost/packngo/email.go | 40 + .../packethost/packngo/facilities.go | 55 + .../packethost/packngo/operatingsystems.go | 44 + .../github.com/packethost/packngo/packngo.go | 205 + vendor/github.com/packethost/packngo/plans.go | 114 + .../github.com/packethost/packngo/projects.go | 136 + vendor/github.com/packethost/packngo/rate.go | 11 + .../github.com/packethost/packngo/sshkeys.go | 138 + .../packethost/packngo/timestamp.go | 35 + vendor/github.com/packethost/packngo/user.go | 52 + vendor/github.com/packethost/packngo/utils.go | 112 + vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 + vendor/github.com/pborman/uuid/LICENSE | 27 + vendor/github.com/pborman/uuid/dce.go | 84 + vendor/github.com/pborman/uuid/doc.go | 8 + vendor/github.com/pborman/uuid/hash.go | 53 + vendor/github.com/pborman/uuid/json.go | 30 + vendor/github.com/pborman/uuid/json_test.go | 32 + vendor/github.com/pborman/uuid/node.go | 101 + vendor/github.com/pborman/uuid/seq_test.go | 66 + vendor/github.com/pborman/uuid/sql.go | 48 + vendor/github.com/pborman/uuid/sql_test.go | 58 + vendor/github.com/pborman/uuid/time.go | 132 + vendor/github.com/pborman/uuid/util.go | 43 + vendor/github.com/pborman/uuid/uuid.go | 163 + vendor/github.com/pborman/uuid/uuid_test.go | 390 + vendor/github.com/pborman/uuid/version1.go | 41 + vendor/github.com/pborman/uuid/version4.go | 25 + .../github.com/pearkes/cloudflare/.gitignore | 23 + vendor/github.com/pearkes/cloudflare/LICENSE | 362 + .../github.com/pearkes/cloudflare/README.md | 15 + vendor/github.com/pearkes/cloudflare/api.go | 119 + .../github.com/pearkes/cloudflare/api_test.go | 101 + .../github.com/pearkes/cloudflare/record.go | 334 + .../pearkes/cloudflare/record_test.go | 409 + .../pearkes/cloudflare/testutil/http.go | 174 + .../github.com/pearkes/dnsimple/.travis.yml | 5 + vendor/github.com/pearkes/dnsimple/LICENSE | 362 + vendor/github.com/pearkes/dnsimple/README.md | 17 + vendor/github.com/pearkes/dnsimple/api.go | 175 + .../github.com/pearkes/dnsimple/api_test.go | 83 + vendor/github.com/pearkes/dnsimple/domain.go | 62 + .../pearkes/dnsimple/domain_test.go | 102 + vendor/github.com/pearkes/dnsimple/record.go | 215 + .../pearkes/dnsimple/record_test.go | 116 + .../pearkes/dnsimple/testutil/http.go | 174 + vendor/github.com/pearkes/mailgun/.gitignore | 23 + vendor/github.com/pearkes/mailgun/LICENSE | 362 + vendor/github.com/pearkes/mailgun/README.md | 15 + vendor/github.com/pearkes/mailgun/api.go | 140 + vendor/github.com/pearkes/mailgun/api_test.go | 92 + vendor/github.com/pearkes/mailgun/domain.go | 131 + .../github.com/pearkes/mailgun/domain_test.go | 109 + .../pearkes/mailgun/testutil/http.go | 174 + .../rackspace/gophercloud/.travis.yml | 16 + .../rackspace/gophercloud/CONTRIBUTING.md | 274 + .../rackspace/gophercloud/CONTRIBUTORS.md | 13 + .../github.com/rackspace/gophercloud/LICENSE | 191 + .../rackspace/gophercloud/README.md | 160 + .../rackspace/gophercloud/UPGRADING.md | 338 + .../gophercloud/acceptance/README.md | 57 + .../blockstorage/v1/snapshots_test.go | 70 + .../openstack/blockstorage/v1/volumes_test.go | 63 + .../blockstorage/v1/volumetypes_test.go | 49 + .../acceptance/openstack/client_test.go | 40 + .../compute/v2/bootfromvolume_test.go | 55 + .../openstack/compute/v2/compute_test.go | 104 + .../openstack/compute/v2/extension_test.go | 47 + .../openstack/compute/v2/flavors_test.go | 57 + .../openstack/compute/v2/floatingip_test.go | 168 + .../openstack/compute/v2/images_test.go | 37 + .../openstack/compute/v2/keypairs_test.go | 74 + .../openstack/compute/v2/network_test.go | 78 + .../acceptance/openstack/compute/v2/pkg.go | 3 + .../openstack/compute/v2/secdefrules_test.go | 72 + .../openstack/compute/v2/secgroup_test.go | 177 + .../openstack/compute/v2/servergroup_test.go | 143 + .../openstack/compute/v2/servers_test.go | 484 + .../compute/v2/tenantnetworks_test.go | 109 + .../openstack/compute/v2/volumeattach_test.go | 125 + .../acceptance/openstack/db/v1/common.go | 70 + .../openstack/db/v1/database_test.go | 45 + .../acceptance/openstack/db/v1/flavor_test.go | 31 + .../openstack/db/v1/instance_test.go | 138 + .../acceptance/openstack/db/v1/pkg.go | 1 + .../acceptance/openstack/db/v1/user_test.go | 70 + .../openstack/identity/v2/extension_test.go | 46 + .../openstack/identity/v2/identity_test.go | 47 + .../acceptance/openstack/identity/v2/pkg.go | 1 + .../openstack/identity/v2/role_test.go | 58 + .../openstack/identity/v2/tenant_test.go | 32 + .../openstack/identity/v2/token_test.go | 54 + .../openstack/identity/v2/user_test.go | 127 + .../openstack/identity/v3/endpoint_test.go | 111 + .../openstack/identity/v3/identity_test.go | 39 + .../acceptance/openstack/identity/v3/pkg.go | 1 + .../openstack/identity/v3/service_test.go | 36 + .../openstack/identity/v3/token_test.go | 42 + .../networking/v2/apiversion_test.go | 51 + .../openstack/networking/v2/common.go | 39 + .../openstack/networking/v2/extension_test.go | 45 + .../v2/extensions/fwaas/firewall_test.go | 116 + .../networking/v2/extensions/fwaas/pkg.go | 1 + .../v2/extensions/fwaas/policy_test.go | 107 + .../v2/extensions/fwaas/rule_test.go | 84 + .../networking/v2/extensions/layer3_test.go | 300 + .../networking/v2/extensions/lbaas/common.go | 78 + .../v2/extensions/lbaas/member_test.go | 95 + .../v2/extensions/lbaas/monitor_test.go | 77 + .../networking/v2/extensions/lbaas/pkg.go | 1 + .../v2/extensions/lbaas/pool_test.go | 98 + .../v2/extensions/lbaas/vip_test.go | 101 + .../openstack/networking/v2/extensions/pkg.go | 1 + .../networking/v2/extensions/provider_test.go | 68 + .../networking/v2/extensions/security_test.go | 171 + .../openstack/networking/v2/network_test.go | 68 + .../acceptance/openstack/networking/v2/pkg.go | 1 + .../openstack/networking/v2/port_test.go | 117 + .../openstack/networking/v2/subnet_test.go | 86 + .../objectstorage/v1/accounts_test.go | 50 + .../openstack/objectstorage/v1/common.go | 28 + .../objectstorage/v1/containers_test.go | 137 + .../objectstorage/v1/objects_test.go | 119 + .../orchestration/v1/buildinfo_test.go | 20 + .../openstack/orchestration/v1/common.go | 44 + .../orchestration/v1/hello-compute.json | 13 + .../orchestration/v1/stackevents_test.go | 68 + .../orchestration/v1/stackresources_test.go | 62 + .../openstack/orchestration/v1/stacks_test.go | 153 + .../orchestration/v1/stacktemplates_test.go | 75 + .../gophercloud/acceptance/openstack/pkg.go | 4 + .../rackspace/blockstorage/v1/common.go | 38 + .../blockstorage/v1/snapshot_test.go | 82 + .../rackspace/blockstorage/v1/volume_test.go | 71 + .../blockstorage/v1/volume_type_test.go | 46 + .../acceptance/rackspace/cdn/v1/base_test.go | 32 + .../acceptance/rackspace/cdn/v1/common.go | 23 + .../rackspace/cdn/v1/flavor_test.go | 47 + .../rackspace/cdn/v1/service_test.go | 93 + .../rackspace/cdn/v1/serviceasset_test.go | 32 + .../acceptance/rackspace/client_test.go | 28 + .../compute/v2/bootfromvolume_test.go | 49 + .../rackspace/compute/v2/compute_test.go | 60 + .../rackspace/compute/v2/flavors_test.go | 61 + .../rackspace/compute/v2/images_test.go | 63 + .../rackspace/compute/v2/keypairs_test.go | 87 + .../rackspace/compute/v2/networks_test.go | 53 + .../acceptance/rackspace/compute/v2/pkg.go | 1 + .../rackspace/compute/v2/servers_test.go | 217 + .../compute/v2/virtualinterfaces_test.go | 53 + .../rackspace/compute/v2/volumeattach_test.go | 130 + .../acceptance/rackspace/db/v1/backup_test.go | 84 + .../acceptance/rackspace/db/v1/common.go | 73 + .../rackspace/db/v1/config_group_test.go | 93 + .../rackspace/db/v1/database_test.go | 54 + .../acceptance/rackspace/db/v1/flavor_test.go | 32 + .../rackspace/db/v1/instance_test.go | 169 + .../acceptance/rackspace/db/v1/pkg.go | 1 + .../rackspace/db/v1/replica_test.go | 33 + .../acceptance/rackspace/db/v1/user_test.go | 125 + .../rackspace/identity/v2/extension_test.go | 54 + .../rackspace/identity/v2/identity_test.go | 50 + .../acceptance/rackspace/identity/v2/pkg.go | 1 + .../rackspace/identity/v2/role_test.go | 59 + .../rackspace/identity/v2/tenant_test.go | 37 + .../rackspace/identity/v2/tokens_test.go | 61 + .../rackspace/identity/v2/user_test.go | 93 + .../acceptance/rackspace/lb/v1/acl_test.go | 94 + .../acceptance/rackspace/lb/v1/common.go | 62 + .../acceptance/rackspace/lb/v1/lb_test.go | 214 + .../rackspace/lb/v1/monitor_test.go | 60 + .../acceptance/rackspace/lb/v1/node_test.go | 175 + .../rackspace/lb/v1/session_test.go | 47 + .../rackspace/lb/v1/throttle_test.go | 53 + .../acceptance/rackspace/lb/v1/vip_test.go | 83 + .../rackspace/networking/v2/common.go | 39 + .../rackspace/networking/v2/network_test.go | 65 + .../rackspace/networking/v2/port_test.go | 116 + .../rackspace/networking/v2/security_test.go | 165 + .../rackspace/networking/v2/subnet_test.go | 84 + .../objectstorage/v1/accounts_test.go | 38 + .../rackspace/objectstorage/v1/bulk_test.go | 23 + .../objectstorage/v1/cdncontainers_test.go | 66 + .../objectstorage/v1/cdnobjects_test.go | 50 + .../rackspace/objectstorage/v1/common.go | 54 + .../objectstorage/v1/containers_test.go | 90 + .../objectstorage/v1/objects_test.go | 124 + .../orchestration/v1/buildinfo_test.go | 20 + .../rackspace/orchestration/v1/common.go | 45 + .../orchestration/v1/stackevents_test.go | 70 + .../orchestration/v1/stackresources_test.go | 64 + .../rackspace/orchestration/v1/stacks_test.go | 154 + .../orchestration/v1/stacktemplates_test.go | 77 + .../gophercloud/acceptance/rackspace/pkg.go | 1 + .../rackconnect/v3/cloudnetworks_test.go | 36 + .../rackspace/rackconnect/v3/common.go | 26 + .../rackspace/rackconnect/v3/lbpools_test.go | 71 + .../rackconnect/v3/publicips_test.go | 45 + .../gophercloud/acceptance/tools/pkg.go | 1 + .../gophercloud/acceptance/tools/tools.go | 89 + .../rackspace/gophercloud/auth_options.go | 50 + .../rackspace/gophercloud/auth_results.go | 14 + .../github.com/rackspace/gophercloud/doc.go | 67 + .../rackspace/gophercloud/endpoint_search.go | 92 + .../gophercloud/endpoint_search_test.go | 19 + .../gophercloud/openstack/auth_env.go | 58 + .../blockstorage/v1/apiversions/doc.go | 3 + .../blockstorage/v1/apiversions/requests.go | 21 + .../v1/apiversions/requests_test.go | 145 + .../blockstorage/v1/apiversions/results.go | 58 + .../blockstorage/v1/apiversions/urls.go | 15 + .../blockstorage/v1/apiversions/urls_test.go | 26 + .../blockstorage/v1/snapshots/doc.go | 5 + .../blockstorage/v1/snapshots/fixtures.go | 114 + .../blockstorage/v1/snapshots/requests.go | 206 + .../v1/snapshots/requests_test.go | 104 + .../blockstorage/v1/snapshots/results.go | 123 + .../blockstorage/v1/snapshots/urls.go | 27 + .../blockstorage/v1/snapshots/urls_test.go | 50 + .../blockstorage/v1/snapshots/util.go | 22 + .../openstack/blockstorage/v1/volumes/doc.go | 5 + .../blockstorage/v1/volumes/requests.go | 236 + .../blockstorage/v1/volumes/requests_test.go | 123 + .../blockstorage/v1/volumes/results.go | 113 + .../blockstorage/v1/volumes/testing/doc.go | 7 + .../v1/volumes/testing/fixtures.go | 113 + .../openstack/blockstorage/v1/volumes/urls.go | 23 + .../blockstorage/v1/volumes/urls_test.go | 44 + .../openstack/blockstorage/v1/volumes/util.go | 22 + .../blockstorage/v1/volumetypes/doc.go | 9 + .../blockstorage/v1/volumetypes/fixtures.go | 60 + .../blockstorage/v1/volumetypes/requests.go | 76 + .../v1/volumetypes/requests_test.go | 118 + .../blockstorage/v1/volumetypes/results.go | 72 + .../blockstorage/v1/volumetypes/urls.go | 19 + .../blockstorage/v1/volumetypes/urls_test.go | 38 + .../gophercloud/openstack/cdn/v1/base/doc.go | 4 + .../openstack/cdn/v1/base/fixtures.go | 53 + .../openstack/cdn/v1/base/requests.go | 21 + .../openstack/cdn/v1/base/requests_test.go | 43 + .../openstack/cdn/v1/base/results.go | 35 + .../gophercloud/openstack/cdn/v1/base/urls.go | 11 + .../openstack/cdn/v1/flavors/doc.go | 6 + .../openstack/cdn/v1/flavors/fixtures.go | 82 + .../openstack/cdn/v1/flavors/requests.go | 22 + .../openstack/cdn/v1/flavors/requests_test.go | 89 + .../openstack/cdn/v1/flavors/results.go | 71 + .../openstack/cdn/v1/flavors/urls.go | 11 + .../openstack/cdn/v1/serviceassets/doc.go | 7 + .../cdn/v1/serviceassets/fixtures.go | 19 + .../cdn/v1/serviceassets/requests.go | 48 + .../cdn/v1/serviceassets/requests_test.go | 18 + .../openstack/cdn/v1/serviceassets/results.go | 8 + .../openstack/cdn/v1/serviceassets/urls.go | 7 + .../openstack/cdn/v1/services/doc.go | 7 + .../openstack/cdn/v1/services/errors.go | 7 + .../openstack/cdn/v1/services/fixtures.go | 372 + .../openstack/cdn/v1/services/requests.go | 378 + .../cdn/v1/services/requests_test.go | 358 + .../openstack/cdn/v1/services/results.go | 316 + .../openstack/cdn/v1/services/urls.go | 23 + .../rackspace/gophercloud/openstack/client.go | 274 + .../gophercloud/openstack/client_test.go | 161 + .../gophercloud/openstack/common/README.md | 3 + .../openstack/common/extensions/doc.go | 15 + .../openstack/common/extensions/errors.go | 1 + .../openstack/common/extensions/fixtures.go | 91 + .../openstack/common/extensions/requests.go | 21 + .../common/extensions/requests_test.go | 38 + .../openstack/common/extensions/results.go | 65 + .../openstack/common/extensions/urls.go | 13 + .../openstack/common/extensions/urls_test.go | 26 + .../v2/extensions/bootfromvolume/requests.go | 111 + .../bootfromvolume/requests_test.go | 53 + .../v2/extensions/bootfromvolume/results.go | 10 + .../v2/extensions/bootfromvolume/urls.go | 7 + .../v2/extensions/bootfromvolume/urls_test.go | 16 + .../compute/v2/extensions/defsecrules/doc.go | 1 + .../v2/extensions/defsecrules/fixtures.go | 108 + .../v2/extensions/defsecrules/requests.go | 95 + .../extensions/defsecrules/requests_test.go | 100 + .../v2/extensions/defsecrules/results.go | 69 + .../compute/v2/extensions/defsecrules/urls.go | 13 + .../compute/v2/extensions/delegate.go | 23 + .../compute/v2/extensions/delegate_test.go | 96 + .../compute/v2/extensions/diskconfig/doc.go | 3 + .../v2/extensions/diskconfig/requests.go | 114 + .../v2/extensions/diskconfig/requests_test.go | 89 + .../v2/extensions/diskconfig/results.go | 60 + .../v2/extensions/diskconfig/results_test.go | 68 + .../openstack/compute/v2/extensions/doc.go | 3 + .../compute/v2/extensions/floatingip/doc.go | 3 + .../v2/extensions/floatingip/fixtures.go | 193 + .../v2/extensions/floatingip/requests.go | 171 + .../v2/extensions/floatingip/requests_test.go | 123 + .../v2/extensions/floatingip/results.go | 99 + .../compute/v2/extensions/floatingip/urls.go | 37 + .../v2/extensions/floatingip/urls_test.go | 60 + .../compute/v2/extensions/keypairs/doc.go | 3 + .../v2/extensions/keypairs/fixtures.go | 171 + .../v2/extensions/keypairs/requests.go | 102 + .../v2/extensions/keypairs/requests_test.go | 71 + .../compute/v2/extensions/keypairs/results.go | 94 + .../compute/v2/extensions/keypairs/urls.go | 25 + .../v2/extensions/keypairs/urls_test.go | 40 + .../compute/v2/extensions/networks/doc.go | 2 + .../v2/extensions/networks/fixtures.go | 209 + .../v2/extensions/networks/requests.go | 22 + .../v2/extensions/networks/requests_test.go | 37 + .../compute/v2/extensions/networks/results.go | 222 + .../compute/v2/extensions/networks/urls.go | 17 + .../v2/extensions/networks/urls_test.go | 25 + .../v2/extensions/schedulerhints/doc.go | 3 + .../v2/extensions/schedulerhints/requests.go | 134 + .../schedulerhints/requests_test.go | 130 + .../compute/v2/extensions/secgroups/doc.go | 1 + .../v2/extensions/secgroups/fixtures.go | 267 + .../v2/extensions/secgroups/requests.go | 257 + .../v2/extensions/secgroups/requests_test.go | 248 + .../v2/extensions/secgroups/results.go | 147 + .../compute/v2/extensions/secgroups/urls.go | 32 + .../compute/v2/extensions/servergroups/doc.go | 2 + .../v2/extensions/servergroups/fixtures.go | 161 + .../v2/extensions/servergroups/requests.go | 77 + .../extensions/servergroups/requests_test.go | 59 + .../v2/extensions/servergroups/results.go | 87 + .../v2/extensions/servergroups/urls.go | 25 + .../v2/extensions/servergroups/urls_test.go | 42 + .../compute/v2/extensions/startstop/doc.go | 5 + .../v2/extensions/startstop/fixtures.go | 27 + .../v2/extensions/startstop/requests.go | 23 + .../v2/extensions/startstop/requests_test.go | 30 + .../v2/extensions/tenantnetworks/doc.go | 2 + .../v2/extensions/tenantnetworks/fixtures.go | 84 + .../v2/extensions/tenantnetworks/requests.go | 22 + .../tenantnetworks/requests_test.go | 37 + .../v2/extensions/tenantnetworks/results.go | 68 + .../v2/extensions/tenantnetworks/urls.go | 17 + .../v2/extensions/tenantnetworks/urls_test.go | 25 + .../compute/v2/extensions/volumeattach/doc.go | 3 + .../v2/extensions/volumeattach/requests.go | 75 + .../extensions/volumeattach/requests_test.go | 94 + .../v2/extensions/volumeattach/results.go | 84 + .../v2/extensions/volumeattach/testing/doc.go | 7 + .../volumeattach/testing/fixtures.go | 110 + .../v2/extensions/volumeattach/urls.go | 25 + .../v2/extensions/volumeattach/urls_test.go | 46 + .../openstack/compute/v2/flavors/doc.go | 7 + .../openstack/compute/v2/flavors/requests.go | 103 + .../compute/v2/flavors/requests_test.go | 129 + .../openstack/compute/v2/flavors/results.go | 122 + .../openstack/compute/v2/flavors/urls.go | 13 + .../openstack/compute/v2/flavors/urls_test.go | 26 + .../openstack/compute/v2/images/doc.go | 7 + .../openstack/compute/v2/images/requests.go | 109 + .../compute/v2/images/requests_test.go | 191 + .../openstack/compute/v2/images/results.go | 95 + .../openstack/compute/v2/images/urls.go | 15 + .../openstack/compute/v2/images/urls_test.go | 26 + .../openstack/compute/v2/servers/doc.go | 6 + .../openstack/compute/v2/servers/fixtures.go | 664 + .../openstack/compute/v2/servers/requests.go | 852 + .../compute/v2/servers/requests_test.go | 373 + .../openstack/compute/v2/servers/results.go | 372 + .../openstack/compute/v2/servers/urls.go | 47 + .../openstack/compute/v2/servers/urls_test.go | 68 + .../openstack/compute/v2/servers/util.go | 20 + .../openstack/db/v1/configurations/doc.go | 11 + .../db/v1/configurations/fixtures.go | 157 + .../db/v1/configurations/requests.go | 287 + .../db/v1/configurations/requests_test.go | 236 + .../openstack/db/v1/configurations/results.go | 197 + .../openstack/db/v1/configurations/urls.go | 31 + .../openstack/db/v1/databases/doc.go | 6 + .../openstack/db/v1/databases/fixtures.go | 61 + .../openstack/db/v1/databases/requests.go | 115 + .../db/v1/databases/requests_test.go | 66 + .../openstack/db/v1/databases/results.go | 72 + .../openstack/db/v1/databases/urls.go | 11 + .../openstack/db/v1/datastores/doc.go | 3 + .../openstack/db/v1/datastores/fixtures.go | 100 + .../openstack/db/v1/datastores/requests.go | 47 + .../db/v1/datastores/requests_test.go | 78 + .../openstack/db/v1/datastores/results.go | 123 + .../openstack/db/v1/datastores/urls.go | 19 + .../openstack/db/v1/flavors/doc.go | 7 + .../openstack/db/v1/flavors/fixtures.go | 50 + .../openstack/db/v1/flavors/requests.go | 29 + .../openstack/db/v1/flavors/requests_test.go | 91 + .../openstack/db/v1/flavors/results.go | 92 + .../openstack/db/v1/flavors/urls.go | 11 + .../openstack/db/v1/instances/doc.go | 7 + .../openstack/db/v1/instances/fixtures.go | 169 + .../openstack/db/v1/instances/requests.go | 238 + .../db/v1/instances/requests_test.go | 133 + .../openstack/db/v1/instances/results.go | 213 + .../openstack/db/v1/instances/urls.go | 19 + .../gophercloud/openstack/db/v1/users/doc.go | 3 + .../openstack/db/v1/users/fixtures.go | 37 + .../openstack/db/v1/users/requests.go | 132 + .../openstack/db/v1/users/requests_test.go | 84 + .../openstack/db/v1/users/results.go | 73 + .../gophercloud/openstack/db/v1/users/urls.go | 11 + .../openstack/endpoint_location.go | 91 + .../openstack/endpoint_location_test.go | 228 + .../v2/extensions/admin/roles/docs.go | 16 + .../v2/extensions/admin/roles/fixtures.go | 48 + .../v2/extensions/admin/roles/requests.go | 33 + .../extensions/admin/roles/requests_test.go | 64 + .../v2/extensions/admin/roles/results.go | 53 + .../v2/extensions/admin/roles/urls.go | 21 + .../identity/v2/extensions/delegate.go | 52 + .../identity/v2/extensions/delegate_test.go | 38 + .../openstack/identity/v2/extensions/doc.go | 3 + .../identity/v2/extensions/fixtures.go | 60 + .../openstack/identity/v2/tenants/doc.go | 7 + .../openstack/identity/v2/tenants/fixtures.go | 65 + .../openstack/identity/v2/tenants/requests.go | 33 + .../identity/v2/tenants/requests_test.go | 29 + .../openstack/identity/v2/tenants/results.go | 62 + .../openstack/identity/v2/tenants/urls.go | 7 + .../openstack/identity/v2/tokens/doc.go | 5 + .../openstack/identity/v2/tokens/errors.go | 30 + .../openstack/identity/v2/tokens/fixtures.go | 195 + .../openstack/identity/v2/tokens/requests.go | 99 + .../identity/v2/tokens/requests_test.go | 152 + .../openstack/identity/v2/tokens/results.go | 170 + .../openstack/identity/v2/tokens/urls.go | 13 + .../openstack/identity/v2/users/doc.go | 1 + .../openstack/identity/v2/users/fixtures.go | 163 + .../openstack/identity/v2/users/requests.go | 161 + .../identity/v2/users/requests_test.go | 165 + .../openstack/identity/v2/users/results.go | 128 + .../openstack/identity/v2/users/urls.go | 21 + .../openstack/identity/v3/endpoints/doc.go | 6 + .../openstack/identity/v3/endpoints/errors.go | 21 + .../identity/v3/endpoints/requests.go | 123 + .../identity/v3/endpoints/requests_test.go | 226 + .../identity/v3/endpoints/results.go | 82 + .../openstack/identity/v3/endpoints/urls.go | 11 + .../identity/v3/endpoints/urls_test.go | 23 + .../openstack/identity/v3/roles/doc.go | 3 + .../openstack/identity/v3/roles/requests.go | 50 + .../identity/v3/roles/requests_test.go | 104 + .../openstack/identity/v3/roles/results.go | 81 + .../openstack/identity/v3/roles/urls.go | 7 + .../openstack/identity/v3/roles/urls_test.go | 15 + .../openstack/identity/v3/services/doc.go | 3 + .../identity/v3/services/requests.go | 77 + .../identity/v3/services/requests_test.go | 209 + .../openstack/identity/v3/services/results.go | 80 + .../openstack/identity/v3/services/urls.go | 11 + .../identity/v3/services/urls_test.go | 23 + .../openstack/identity/v3/tokens/doc.go | 6 + .../openstack/identity/v3/tokens/errors.go | 72 + .../openstack/identity/v3/tokens/requests.go | 281 + .../identity/v3/tokens/requests_test.go | 514 + .../openstack/identity/v3/tokens/results.go | 139 + .../openstack/identity/v3/tokens/urls.go | 7 + .../openstack/identity/v3/tokens/urls_test.go | 21 + .../networking/v2/apiversions/doc.go | 4 + .../networking/v2/apiversions/errors.go | 1 + .../networking/v2/apiversions/requests.go | 21 + .../v2/apiversions/requests_test.go | 182 + .../networking/v2/apiversions/results.go | 77 + .../networking/v2/apiversions/urls.go | 15 + .../networking/v2/apiversions/urls_test.go | 26 + .../networking/v2/common/common_tests.go | 14 + .../networking/v2/extensions/delegate.go | 41 + .../networking/v2/extensions/delegate_test.go | 105 + .../networking/v2/extensions/external/doc.go | 3 + .../v2/extensions/external/requests.go | 69 + .../v2/extensions/external/results.go | 81 + .../v2/extensions/external/results_test.go | 254 + .../networking/v2/extensions/fwaas/doc.go | 3 + .../v2/extensions/fwaas/firewalls/errors.go | 11 + .../v2/extensions/fwaas/firewalls/requests.go | 216 + .../fwaas/firewalls/requests_test.go | 246 + .../v2/extensions/fwaas/firewalls/results.go | 101 + .../v2/extensions/fwaas/firewalls/urls.go | 16 + .../v2/extensions/fwaas/policies/requests.go | 243 + .../fwaas/policies/requests_test.go | 279 + .../v2/extensions/fwaas/policies/results.go | 101 + .../v2/extensions/fwaas/policies/urls.go | 26 + .../v2/extensions/fwaas/rules/errors.go | 12 + .../v2/extensions/fwaas/rules/requests.go | 285 + .../extensions/fwaas/rules/requests_test.go | 328 + .../v2/extensions/fwaas/rules/results.go | 110 + .../v2/extensions/fwaas/rules/urls.go | 16 + .../networking/v2/extensions/layer3/doc.go | 5 + .../extensions/layer3/floatingips/requests.go | 168 + .../layer3/floatingips/requests_test.go | 355 + .../extensions/layer3/floatingips/results.go | 127 + .../v2/extensions/layer3/floatingips/urls.go | 13 + .../v2/extensions/layer3/routers/requests.go | 230 + .../layer3/routers/requests_test.go | 405 + .../v2/extensions/layer3/routers/results.go | 168 + .../v2/extensions/layer3/routers/urls.go | 21 + .../networking/v2/extensions/lbaas/doc.go | 3 + .../v2/extensions/lbaas/members/requests.go | 123 + .../extensions/lbaas/members/requests_test.go | 243 + .../v2/extensions/lbaas/members/results.go | 122 + .../v2/extensions/lbaas/members/urls.go | 16 + .../v2/extensions/lbaas/monitors/requests.go | 265 + .../lbaas/monitors/requests_test.go | 312 + .../v2/extensions/lbaas/monitors/results.go | 147 + .../v2/extensions/lbaas/monitors/urls.go | 16 + .../v2/extensions/lbaas/pools/requests.go | 181 + .../extensions/lbaas/pools/requests_test.go | 318 + .../v2/extensions/lbaas/pools/results.go | 146 + .../v2/extensions/lbaas/pools/urls.go | 25 + .../v2/extensions/lbaas/vips/requests.go | 256 + .../v2/extensions/lbaas/vips/requests_test.go | 336 + .../v2/extensions/lbaas/vips/results.go | 166 + .../v2/extensions/lbaas/vips/urls.go | 16 + .../networking/v2/extensions/provider/doc.go | 21 + .../v2/extensions/provider/results.go | 124 + .../v2/extensions/provider/results_test.go | 253 + .../networking/v2/extensions/security/doc.go | 32 + .../v2/extensions/security/groups/requests.go | 131 + .../security/groups/requests_test.go | 213 + .../v2/extensions/security/groups/results.go | 108 + .../v2/extensions/security/groups/urls.go | 13 + .../v2/extensions/security/rules/requests.go | 174 + .../security/rules/requests_test.go | 243 + .../v2/extensions/security/rules/results.go | 133 + .../v2/extensions/security/rules/urls.go | 13 + .../openstack/networking/v2/networks/doc.go | 9 + .../networking/v2/networks/errors.go | 1 + .../networking/v2/networks/requests.go | 226 + .../networking/v2/networks/requests_test.go | 276 + .../networking/v2/networks/results.go | 116 + .../openstack/networking/v2/networks/urls.go | 31 + .../networking/v2/networks/urls_test.go | 38 + .../openstack/networking/v2/ports/doc.go | 8 + .../openstack/networking/v2/ports/errors.go | 11 + .../openstack/networking/v2/ports/requests.go | 260 + .../networking/v2/ports/requests_test.go | 321 + .../openstack/networking/v2/ports/results.go | 126 + .../openstack/networking/v2/ports/urls.go | 31 + .../networking/v2/ports/urls_test.go | 44 + .../openstack/networking/v2/subnets/doc.go | 10 + .../openstack/networking/v2/subnets/errors.go | 13 + .../networking/v2/subnets/requests.go | 271 + .../networking/v2/subnets/requests_test.go | 362 + .../networking/v2/subnets/results.go | 132 + .../networking/v2/subnets/results_test.go | 54 + .../openstack/networking/v2/subnets/urls.go | 31 + .../networking/v2/subnets/urls_test.go | 44 + .../objectstorage/v1/accounts/doc.go | 8 + .../objectstorage/v1/accounts/fixtures.go | 38 + .../objectstorage/v1/accounts/requests.go | 107 + .../v1/accounts/requests_test.go | 32 + .../objectstorage/v1/accounts/results.go | 102 + .../objectstorage/v1/accounts/urls.go | 11 + .../objectstorage/v1/accounts/urls_test.go | 26 + .../objectstorage/v1/containers/doc.go | 8 + .../objectstorage/v1/containers/fixtures.go | 143 + .../objectstorage/v1/containers/requests.go | 205 + .../v1/containers/requests_test.go | 117 + .../objectstorage/v1/containers/results.go | 270 + .../objectstorage/v1/containers/urls.go | 23 + .../objectstorage/v1/containers/urls_test.go | 43 + .../openstack/objectstorage/v1/objects/doc.go | 5 + .../objectstorage/v1/objects/fixtures.go | 195 + .../objectstorage/v1/objects/requests.go | 501 + .../objectstorage/v1/objects/requests_test.go | 165 + .../objectstorage/v1/objects/results.go | 438 + .../objectstorage/v1/objects/urls.go | 33 + .../objectstorage/v1/objects/urls_test.go | 56 + .../orchestration/v1/apiversions/doc.go | 4 + .../orchestration/v1/apiversions/requests.go | 13 + .../v1/apiversions/requests_test.go | 89 + .../orchestration/v1/apiversions/results.go | 42 + .../orchestration/v1/apiversions/urls.go | 7 + .../orchestration/v1/buildinfo/doc.go | 2 + .../orchestration/v1/buildinfo/fixtures.go | 45 + .../orchestration/v1/buildinfo/requests.go | 10 + .../v1/buildinfo/requests_test.go | 20 + .../orchestration/v1/buildinfo/results.go | 37 + .../orchestration/v1/buildinfo/urls.go | 7 + .../orchestration/v1/stackevents/doc.go | 4 + .../orchestration/v1/stackevents/fixtures.go | 446 + .../orchestration/v1/stackevents/requests.go | 203 + .../v1/stackevents/requests_test.go | 71 + .../orchestration/v1/stackevents/results.go | 172 + .../orchestration/v1/stackevents/urls.go | 19 + .../orchestration/v1/stackresources/doc.go | 5 + .../v1/stackresources/fixtures.go | 439 + .../v1/stackresources/requests.go | 113 + .../v1/stackresources/requests_test.go | 111 + .../v1/stackresources/results.go | 284 + .../orchestration/v1/stackresources/urls.go | 31 + .../openstack/orchestration/v1/stacks/doc.go | 8 + .../orchestration/v1/stacks/environment.go | 137 + .../v1/stacks/environment_test.go | 184 + .../orchestration/v1/stacks/fixtures.go | 604 + .../orchestration/v1/stacks/requests.go | 682 + .../orchestration/v1/stacks/requests_test.go | 358 + .../orchestration/v1/stacks/results.go | 313 + .../orchestration/v1/stacks/template.go | 139 + .../orchestration/v1/stacks/template_test.go | 148 + .../openstack/orchestration/v1/stacks/urls.go | 35 + .../orchestration/v1/stacks/utils.go | 161 + .../orchestration/v1/stacks/utils_test.go | 94 + .../orchestration/v1/stacktemplates/doc.go | 8 + .../v1/stacktemplates/fixtures.go | 95 + .../v1/stacktemplates/requests.go | 58 + .../v1/stacktemplates/requests_test.go | 57 + .../v1/stacktemplates/results.go | 51 + .../orchestration/v1/stacktemplates/urls.go | 11 + .../openstack/utils/choose_version.go | 114 + .../openstack/utils/choose_version_test.go | 118 + .../rackspace/gophercloud/pagination/http.go | 60 + .../gophercloud/pagination/linked.go | 67 + .../gophercloud/pagination/linked_test.go | 120 + .../gophercloud/pagination/marker.go | 40 + .../gophercloud/pagination/marker_test.go | 126 + .../rackspace/gophercloud/pagination/null.go | 20 + .../rackspace/gophercloud/pagination/pager.go | 226 + .../gophercloud/pagination/pagination_test.go | 13 + .../rackspace/gophercloud/pagination/pkg.go | 4 + .../gophercloud/pagination/single.go | 15 + .../gophercloud/pagination/single_test.go | 84 + .../rackspace/gophercloud/params.go | 271 + .../rackspace/gophercloud/params_test.go | 165 + .../rackspace/gophercloud/provider_client.go | 308 + .../gophercloud/provider_client_test.go | 35 + .../gophercloud/rackspace/auth_env.go | 57 + .../blockstorage/v1/snapshots/delegate.go | 131 + .../v1/snapshots/delegate_test.go | 97 + .../blockstorage/v1/snapshots/doc.go | 3 + .../blockstorage/v1/snapshots/results.go | 147 + .../blockstorage/v1/volumes/delegate.go | 75 + .../blockstorage/v1/volumes/delegate_test.go | 107 + .../rackspace/blockstorage/v1/volumes/doc.go | 3 + .../blockstorage/v1/volumes/results.go | 66 + .../blockstorage/v1/volumetypes/delegate.go | 18 + .../v1/volumetypes/delegate_test.go | 64 + .../blockstorage/v1/volumetypes/doc.go | 3 + .../blockstorage/v1/volumetypes/results.go | 37 + .../rackspace/cdn/v1/base/delegate.go | 18 + .../rackspace/cdn/v1/base/delegate_test.go | 44 + .../gophercloud/rackspace/cdn/v1/base/doc.go | 4 + .../rackspace/cdn/v1/flavors/delegate.go | 18 + .../rackspace/cdn/v1/flavors/delegate_test.go | 90 + .../rackspace/cdn/v1/flavors/doc.go | 6 + .../cdn/v1/serviceassets/delegate.go | 13 + .../cdn/v1/serviceassets/delegate_test.go | 19 + .../rackspace/cdn/v1/serviceassets/doc.go | 7 + .../rackspace/cdn/v1/services/delegate.go | 37 + .../cdn/v1/services/delegate_test.go | 359 + .../rackspace/cdn/v1/services/doc.go | 7 + .../rackspace/gophercloud/rackspace/client.go | 224 + .../gophercloud/rackspace/client_test.go | 38 + .../compute/v2/bootfromvolume/delegate.go | 12 + .../v2/bootfromvolume/delegate_test.go | 54 + .../rackspace/compute/v2/flavors/delegate.go | 43 + .../compute/v2/flavors/delegate_test.go | 62 + .../rackspace/compute/v2/flavors/doc.go | 3 + .../rackspace/compute/v2/flavors/fixtures.go | 137 + .../rackspace/compute/v2/flavors/results.go | 104 + .../rackspace/compute/v2/flavors/urls.go | 9 + .../rackspace/compute/v2/images/delegate.go | 22 + .../compute/v2/images/delegate_test.go | 62 + .../rackspace/compute/v2/images/doc.go | 3 + .../rackspace/compute/v2/images/fixtures.go | 200 + .../rackspace/compute/v2/keypairs/delegate.go | 33 + .../compute/v2/keypairs/delegate_test.go | 72 + .../rackspace/compute/v2/keypairs/doc.go | 3 + .../rackspace/compute/v2/networks/doc.go | 3 + .../rackspace/compute/v2/networks/requests.go | 89 + .../compute/v2/networks/requests_test.go | 156 + .../rackspace/compute/v2/networks/results.go | 81 + .../rackspace/compute/v2/networks/urls.go | 27 + .../compute/v2/networks/urls_test.go | 38 + .../rackspace/compute/v2/servers/delegate.go | 116 + .../compute/v2/servers/delegate_test.go | 182 + .../rackspace/compute/v2/servers/doc.go | 3 + .../rackspace/compute/v2/servers/fixtures.go | 574 + .../rackspace/compute/v2/servers/requests.go | 178 + .../compute/v2/servers/requests_test.go | 59 + .../compute/v2/virtualinterfaces/requests.go | 45 + .../v2/virtualinterfaces/requests_test.go | 165 + .../compute/v2/virtualinterfaces/results.go | 81 + .../compute/v2/virtualinterfaces/urls.go | 15 + .../compute/v2/virtualinterfaces/urls_test.go | 32 + .../compute/v2/volumeattach/delegate.go | 27 + .../compute/v2/volumeattach/delegate_test.go | 95 + .../rackspace/compute/v2/volumeattach/doc.go | 3 + .../rackspace/db/v1/backups/doc.go | 6 + .../rackspace/db/v1/backups/fixtures.go | 66 + .../rackspace/db/v1/backups/requests.go | 138 + .../rackspace/db/v1/backups/requests_test.go | 131 + .../rackspace/db/v1/backups/results.go | 149 + .../rackspace/db/v1/backups/urls.go | 11 + .../db/v1/configurations/delegate.go | 79 + .../db/v1/configurations/delegate_test.go | 237 + .../rackspace/db/v1/configurations/doc.go | 1 + .../db/v1/configurations/fixtures.go | 159 + .../rackspace/db/v1/databases/delegate.go | 19 + .../db/v1/databases/delegate_test.go | 71 + .../rackspace/db/v1/databases/doc.go | 3 + .../rackspace/db/v1/databases/urls.go | 1 + .../rackspace/db/v1/datastores/delegate.go | 28 + .../db/v1/datastores/delegate_test.go | 79 + .../rackspace/db/v1/datastores/doc.go | 1 + .../rackspace/db/v1/flavors/delegate.go | 17 + .../rackspace/db/v1/flavors/delegate_test.go | 95 + .../rackspace/db/v1/flavors/doc.go | 3 + .../rackspace/db/v1/instances/delegate.go | 49 + .../db/v1/instances/delegate_test.go | 107 + .../rackspace/db/v1/instances/doc.go | 3 + .../rackspace/db/v1/instances/fixtures.go | 340 + .../rackspace/db/v1/instances/requests.go | 199 + .../db/v1/instances/requests_test.go | 246 + .../rackspace/db/v1/instances/results.go | 191 + .../rackspace/db/v1/instances/urls.go | 23 + .../rackspace/db/v1/users/delegate.go | 16 + .../rackspace/db/v1/users/delegate_test.go | 48 + .../gophercloud/rackspace/db/v1/users/doc.go | 3 + .../rackspace/db/v1/users/fixtures.go | 77 + .../rackspace/db/v1/users/requests.go | 176 + .../rackspace/db/v1/users/requests_test.go | 156 + .../rackspace/db/v1/users/results.go | 149 + .../gophercloud/rackspace/db/v1/users/urls.go | 19 + .../identity/v2/extensions/delegate.go | 24 + .../identity/v2/extensions/delegate_test.go | 39 + .../rackspace/identity/v2/extensions/doc.go | 3 + .../rackspace/identity/v2/roles/delegate.go | 50 + .../identity/v2/roles/delegate_test.go | 66 + .../rackspace/identity/v2/roles/fixtures.go | 49 + .../rackspace/identity/v2/tenants/delegate.go | 17 + .../identity/v2/tenants/delegate_test.go | 28 + .../rackspace/identity/v2/tenants/doc.go | 3 + .../rackspace/identity/v2/tokens/delegate.go | 60 + .../identity/v2/tokens/delegate_test.go | 36 + .../rackspace/identity/v2/tokens/doc.go | 3 + .../rackspace/identity/v2/users/delegate.go | 142 + .../identity/v2/users/delegate_test.go | 111 + .../rackspace/identity/v2/users/fixtures.go | 154 + .../rackspace/identity/v2/users/results.go | 129 + .../rackspace/identity/v2/users/urls.go | 7 + .../gophercloud/rackspace/lb/v1/acl/doc.go | 12 + .../rackspace/lb/v1/acl/fixtures.go | 109 + .../rackspace/lb/v1/acl/requests.go | 111 + .../rackspace/lb/v1/acl/requests_test.go | 91 + .../rackspace/lb/v1/acl/results.go | 72 + .../gophercloud/rackspace/lb/v1/acl/urls.go | 20 + .../gophercloud/rackspace/lb/v1/lbs/doc.go | 44 + .../rackspace/lb/v1/lbs/fixtures.go | 584 + .../rackspace/lb/v1/lbs/requests.go | 497 + .../rackspace/lb/v1/lbs/requests_test.go | 438 + .../rackspace/lb/v1/lbs/results.go | 420 + .../gophercloud/rackspace/lb/v1/lbs/urls.go | 49 + .../rackspace/lb/v1/monitors/doc.go | 21 + .../rackspace/lb/v1/monitors/fixtures.go | 87 + .../rackspace/lb/v1/monitors/requests.go | 160 + .../rackspace/lb/v1/monitors/requests_test.go | 75 + .../rackspace/lb/v1/monitors/results.go | 90 + .../rackspace/lb/v1/monitors/urls.go | 16 + .../gophercloud/rackspace/lb/v1/nodes/doc.go | 35 + .../rackspace/lb/v1/nodes/fixtures.go | 243 + .../rackspace/lb/v1/nodes/requests.go | 251 + .../rackspace/lb/v1/nodes/requests_test.go | 243 + .../rackspace/lb/v1/nodes/results.go | 213 + .../gophercloud/rackspace/lb/v1/nodes/urls.go | 25 + .../rackspace/lb/v1/sessions/doc.go | 30 + .../rackspace/lb/v1/sessions/fixtures.go | 59 + .../rackspace/lb/v1/sessions/requests.go | 63 + .../rackspace/lb/v1/sessions/requests_test.go | 44 + .../rackspace/lb/v1/sessions/results.go | 58 + .../rackspace/lb/v1/sessions/urls.go | 16 + .../gophercloud/rackspace/lb/v1/ssl/doc.go | 22 + .../rackspace/lb/v1/ssl/fixtures.go | 196 + .../rackspace/lb/v1/ssl/requests.go | 247 + .../rackspace/lb/v1/ssl/requests_test.go | 167 + .../rackspace/lb/v1/ssl/results.go | 148 + .../gophercloud/rackspace/lb/v1/ssl/urls.go | 25 + .../rackspace/lb/v1/throttle/doc.go | 5 + .../rackspace/lb/v1/throttle/fixtures.go | 62 + .../rackspace/lb/v1/throttle/requests.go | 76 + .../rackspace/lb/v1/throttle/requests_test.go | 44 + .../rackspace/lb/v1/throttle/results.go | 43 + .../rackspace/lb/v1/throttle/urls.go | 16 + .../gophercloud/rackspace/lb/v1/vips/doc.go | 13 + .../rackspace/lb/v1/vips/fixtures.go | 88 + .../rackspace/lb/v1/vips/requests.go | 97 + .../rackspace/lb/v1/vips/requests_test.go | 87 + .../rackspace/lb/v1/vips/results.go | 89 + .../gophercloud/rackspace/lb/v1/vips/urls.go | 20 + .../networking/v2/common/common_tests.go | 12 + .../networking/v2/networks/delegate.go | 41 + .../networking/v2/networks/delegate_test.go | 285 + .../rackspace/networking/v2/ports/delegate.go | 43 + .../networking/v2/ports/delegate_test.go | 322 + .../rackspace/networking/v2/security/doc.go | 32 + .../networking/v2/security/groups/delegate.go | 30 + .../v2/security/groups/delegate_test.go | 206 + .../networking/v2/security/rules/delegate.go | 30 + .../v2/security/rules/delegate_test.go | 236 + .../networking/v2/subnets/delegate.go | 40 + .../networking/v2/subnets/delegate_test.go | 363 + .../objectstorage/v1/accounts/delegate.go | 39 + .../v1/accounts/delegate_test.go | 32 + .../objectstorage/v1/accounts/doc.go | 3 + .../rackspace/objectstorage/v1/bulk/doc.go | 3 + .../objectstorage/v1/bulk/requests.go | 51 + .../objectstorage/v1/bulk/requests_test.go | 36 + .../objectstorage/v1/bulk/results.go | 28 + .../rackspace/objectstorage/v1/bulk/urls.go | 11 + .../objectstorage/v1/bulk/urls_test.go | 26 + .../v1/cdncontainers/delegate.go | 38 + .../v1/cdncontainers/delegate_test.go | 50 + .../objectstorage/v1/cdncontainers/doc.go | 3 + .../v1/cdncontainers/requests.go | 161 + .../v1/cdncontainers/requests_test.go | 29 + .../objectstorage/v1/cdncontainers/results.go | 149 + .../objectstorage/v1/cdncontainers/urls.go | 15 + .../v1/cdncontainers/urls_test.go | 20 + .../objectstorage/v1/cdnobjects/delegate.go | 11 + .../v1/cdnobjects/delegate_test.go | 19 + .../objectstorage/v1/cdnobjects/doc.go | 3 + .../objectstorage/v1/cdnobjects/request.go | 15 + .../objectstorage/v1/containers/delegate.go | 93 + .../v1/containers/delegate_test.go | 91 + .../objectstorage/v1/containers/doc.go | 3 + .../objectstorage/v1/objects/delegate.go | 94 + .../objectstorage/v1/objects/delegate_test.go | 127 + .../rackspace/objectstorage/v1/objects/doc.go | 3 + .../orchestration/v1/buildinfo/delegate.go | 11 + .../v1/buildinfo/delegate_test.go | 21 + .../orchestration/v1/buildinfo/doc.go | 2 + .../orchestration/v1/stackevents/delegate.go | 27 + .../v1/stackevents/delegate_test.go | 72 + .../orchestration/v1/stackevents/doc.go | 3 + .../v1/stackresources/delegate.go | 42 + .../v1/stackresources/delegate_test.go | 108 + .../orchestration/v1/stackresources/doc.go | 5 + .../orchestration/v1/stacks/delegate.go | 49 + .../orchestration/v1/stacks/delegate_test.go | 870 + .../rackspace/orchestration/v1/stacks/doc.go | 8 + .../orchestration/v1/stacks/fixtures.go | 32 + .../v1/stacktemplates/delegate.go | 16 + .../v1/stacktemplates/delegate_test.go | 47 + .../orchestration/v1/stacktemplates/doc.go | 8 + .../rackconnect/v3/cloudnetworks/requests.go | 24 + .../v3/cloudnetworks/requests_test.go | 87 + .../rackconnect/v3/cloudnetworks/results.go | 113 + .../rackconnect/v3/cloudnetworks/urls.go | 11 + .../rackspace/rackconnect/v3/doc.go | 4 + .../rackspace/rackconnect/v3/lbpools/doc.go | 14 + .../rackconnect/v3/lbpools/requests.go | 146 + .../rackconnect/v3/lbpools/requests_test.go | 876 + .../rackconnect/v3/lbpools/results.go | 505 + .../rackspace/rackconnect/v3/lbpools/urls.go | 49 + .../rackconnect/v3/publicips/requests.go | 50 + .../rackconnect/v3/publicips/requests_test.go | 378 + .../rackconnect/v3/publicips/results.go | 221 + .../rackconnect/v3/publicips/urls.go | 25 + .../rackspace/gophercloud/results.go | 153 + .../gophercloud/script/acceptancetest | 5 + .../rackspace/gophercloud/script/bootstrap | 26 + .../rackspace/gophercloud/script/cibuild | 5 + .../rackspace/gophercloud/script/test | 5 + .../rackspace/gophercloud/script/unittest | 5 + .../rackspace/gophercloud/service_client.go | 32 + .../gophercloud/service_client_test.go | 14 + .../gophercloud/testhelper/client/fake.go | 17 + .../gophercloud/testhelper/convenience.go | 329 + .../rackspace/gophercloud/testhelper/doc.go | 4 + .../gophercloud/testhelper/fixture/helper.go | 31 + .../gophercloud/testhelper/http_responses.go | 91 + .../github.com/rackspace/gophercloud/util.go | 82 + .../rackspace/gophercloud/util_test.go | 85 + vendor/github.com/satori/go.uuid/.travis.yml | 11 + vendor/github.com/satori/go.uuid/LICENSE | 20 + vendor/github.com/satori/go.uuid/README.md | 66 + .../satori/go.uuid/benchmarks_test.go | 121 + vendor/github.com/satori/go.uuid/uuid.go | 435 + vendor/github.com/satori/go.uuid/uuid_test.go | 508 + .../github.com/soniah/dnsmadeeasy/.gitignore | 28 + .../github.com/soniah/dnsmadeeasy/.travis.yml | 13 + .../github.com/soniah/dnsmadeeasy/AUTHORS.md | 6 + vendor/github.com/soniah/dnsmadeeasy/LICENSE | 363 + .../github.com/soniah/dnsmadeeasy/README.md | 33 + vendor/github.com/soniah/dnsmadeeasy/api.go | 144 + .../github.com/soniah/dnsmadeeasy/api_test.go | 79 + .../soniah/dnsmadeeasy/examples/README.md | 8 + .../soniah/dnsmadeeasy/examples/create.go | 45 + .../soniah/dnsmadeeasy/examples/delete.go | 37 + .../soniah/dnsmadeeasy/examples/read.go | 38 + .../soniah/dnsmadeeasy/examples/update.go | 41 + .../github.com/soniah/dnsmadeeasy/record.go | 191 + .../soniah/dnsmadeeasy/record_test.go | 173 + .../soniah/dnsmadeeasy/testutil/http.go | 180 + .../sthulb/mime/multipart/example_test.go | 53 + .../sthulb/mime/multipart/formdata.go | 157 + .../sthulb/mime/multipart/formdata_test.go | 90 + .../sthulb/mime/multipart/multipart.go | 388 + .../sthulb/mime/multipart/multipart_test.go | 813 + .../mime/multipart/testdata/nested-mime | 29 + .../sthulb/mime/multipart/writer.go | 191 + .../sthulb/mime/multipart/writer_test.go | 152 + .../github.com/tent/http-link-go/.gitignore | 1 + .../github.com/tent/http-link-go/.travis.yml | 6 + vendor/github.com/tent/http-link-go/LICENSE | 27 + vendor/github.com/tent/http-link-go/README.md | 12 + vendor/github.com/tent/http-link-go/link.go | 185 + .../github.com/tent/http-link-go/link_test.go | 67 + vendor/github.com/vmware/govmomi/.travis.yml | 10 + vendor/github.com/vmware/govmomi/CHANGELOG.md | 69 + vendor/github.com/vmware/govmomi/CONTRIBUTORS | 26 + vendor/github.com/vmware/govmomi/LICENSE.txt | 202 + vendor/github.com/vmware/govmomi/Makefile | 20 + vendor/github.com/vmware/govmomi/README.md | 36 + vendor/github.com/vmware/govmomi/client.go | 167 + .../github.com/vmware/govmomi/client_test.go | 138 + .../vmware/govmomi/event/history_collector.go | 75 + .../vmware/govmomi/event/manager.go | 161 + .../github.com/vmware/govmomi/event/sort.go | 45 + .../govmomi/examples/datastores/main.go | 180 + .../github.com/vmware/govmomi/find/error.go | 64 + .../github.com/vmware/govmomi/find/finder.go | 711 + vendor/github.com/vmware/govmomi/gen/Gemfile | 3 + .../vmware/govmomi/gen/Gemfile.lock | 12 + vendor/github.com/vmware/govmomi/gen/gen.sh | 38 + .../vmware/govmomi/gen/gen_from_vmodl.rb | 216 + .../vmware/govmomi/gen/gen_from_wsdl.rb | 70 + .../github.com/vmware/govmomi/gen/vim_wsdl.rb | 745 + .../github.com/vmware/govmomi/govc/.gitignore | 1 + .../vmware/govmomi/govc/CHANGELOG.md | 71 + .../github.com/vmware/govmomi/govc/README.md | 116 + .../vmware/govmomi/govc/about/command.go | 69 + .../github.com/vmware/govmomi/govc/build.sh | 32 + .../vmware/govmomi/govc/cli/command.go | 142 + .../vmware/govmomi/govc/cli/register.go | 33 + .../vmware/govmomi/govc/cluster/add.go | 129 + .../vmware/govmomi/govc/cluster/change.go | 109 + .../vmware/govmomi/govc/cluster/create.go | 104 + .../vmware/govmomi/govc/datacenter/create.go | 85 + .../vmware/govmomi/govc/datacenter/destroy.go | 81 + .../vmware/govmomi/govc/datastore/cp.go | 99 + .../vmware/govmomi/govc/datastore/create.go | 273 + .../vmware/govmomi/govc/datastore/download.go | 73 + .../vmware/govmomi/govc/datastore/info.go | 153 + .../vmware/govmomi/govc/datastore/ls.go | 232 + .../vmware/govmomi/govc/datastore/mkdir.go | 95 + .../vmware/govmomi/govc/datastore/mv.go | 92 + .../vmware/govmomi/govc/datastore/remove.go | 90 + .../vmware/govmomi/govc/datastore/rm.go | 96 + .../vmware/govmomi/govc/datastore/upload.go | 80 + .../vmware/govmomi/govc/device/boot.go | 83 + .../vmware/govmomi/govc/device/cdrom/add.go | 95 + .../vmware/govmomi/govc/device/cdrom/eject.go | 78 + .../govmomi/govc/device/cdrom/insert.go | 93 + .../vmware/govmomi/govc/device/connect.go | 83 + .../vmware/govmomi/govc/device/disconnect.go | 83 + .../vmware/govmomi/govc/device/floppy/add.go | 86 + .../govmomi/govc/device/floppy/eject.go | 78 + .../govmomi/govc/device/floppy/insert.go | 93 + .../vmware/govmomi/govc/device/info.go | 160 + .../vmware/govmomi/govc/device/ls.go | 86 + .../vmware/govmomi/govc/device/remove.go | 79 + .../vmware/govmomi/govc/device/scsi/add.go | 107 + .../vmware/govmomi/govc/device/serial/add.go | 86 + .../govmomi/govc/device/serial/connect.go | 76 + .../govmomi/govc/device/serial/disconnect.go | 72 + .../github.com/vmware/govmomi/govc/dvs/add.go | 145 + .../vmware/govmomi/govc/dvs/create.go | 119 + .../vmware/govmomi/govc/dvs/portgroup/add.go | 107 + .../vmware/govmomi/govc/emacs/.gitignore | 3 + .../github.com/vmware/govmomi/govc/emacs/Cask | 10 + .../vmware/govmomi/govc/emacs/Makefile | 27 + .../vmware/govmomi/govc/emacs/README.md | 224 + .../vmware/govmomi/govc/emacs/govc.el | 1260 + .../govmomi/govc/emacs/test/govc-test.el | 137 + .../vmware/govmomi/govc/emacs/test/make.el | 58 + .../vmware/govmomi/govc/events/command.go | 124 + .../vmware/govmomi/govc/examples/lib/ssh.sh | 44 + .../vmware/govmomi/govc/examples/vcsa.sh | 57 + .../vmware/govmomi/govc/extension/info.go | 121 + .../vmware/govmomi/govc/extension/register.go | 81 + .../vmware/govmomi/govc/extension/setcert.go | 171 + .../govmomi/govc/extension/unregister.go | 66 + .../vmware/govmomi/govc/fields/add.go | 78 + .../vmware/govmomi/govc/fields/ls.go | 74 + .../vmware/govmomi/govc/fields/rename.go | 75 + .../vmware/govmomi/govc/fields/rm.go | 75 + .../vmware/govmomi/govc/fields/set.go | 89 + .../vmware/govmomi/govc/flags/client.go | 409 + .../vmware/govmomi/govc/flags/common.go | 23 + .../vmware/govmomi/govc/flags/datacenter.go | 154 + .../vmware/govmomi/govc/flags/datastore.go | 114 + .../vmware/govmomi/govc/flags/debug.go | 93 + .../vmware/govmomi/govc/flags/empty.go | 32 + .../vmware/govmomi/govc/flags/host_connect.go | 79 + .../vmware/govmomi/govc/flags/host_system.go | 141 + .../vmware/govmomi/govc/flags/network.go | 125 + .../govmomi/govc/flags/optional_bool.go | 55 + .../govmomi/govc/flags/optional_bool_test.go | 63 + .../vmware/govmomi/govc/flags/output.go | 210 + .../govmomi/govc/flags/resource_pool.go | 85 + .../vmware/govmomi/govc/flags/search.go | 392 + .../vmware/govmomi/govc/flags/version.go | 61 + .../vmware/govmomi/govc/flags/version_test.go | 63 + .../vmware/govmomi/govc/flags/virtual_app.go | 104 + .../govmomi/govc/flags/virtual_machine.go | 110 + .../vmware/govmomi/govc/host/add.go | 153 + .../vmware/govmomi/govc/host/autostart/add.go | 63 + .../govmomi/govc/host/autostart/autostart.go | 173 + .../govmomi/govc/host/autostart/configure.go | 63 + .../govmomi/govc/host/autostart/info.go | 143 + .../govmomi/govc/host/autostart/remove.go | 63 + .../vmware/govmomi/govc/host/disconnect.go | 81 + .../govmomi/govc/host/esxcli/command.go | 149 + .../govmomi/govc/host/esxcli/command_test.go | 124 + .../vmware/govmomi/govc/host/esxcli/esxcli.go | 153 + .../govmomi/govc/host/esxcli/executor.go | 166 + .../govmomi/govc/host/esxcli/firewall_info.go | 45 + .../host/esxcli/fixtures/network_vm_list.xml | 15 + .../esxcli/fixtures/network_vm_port_list.xml | 12 + .../esxcli/fixtures/system_hostname_get.xml | 5 + .../govmomi/govc/host/esxcli/guest_info.go | 116 + .../govmomi/govc/host/esxcli/response.go | 97 + .../govmomi/govc/host/esxcli/response_test.go | 105 + .../vmware/govmomi/govc/host/firewall/find.go | 128 + .../vmware/govmomi/govc/host/info.go | 159 + .../govmomi/govc/host/maintenance/enter.go | 94 + .../govmomi/govc/host/maintenance/exit.go | 95 + .../vmware/govmomi/govc/host/portgroup/add.go | 66 + .../govmomi/govc/host/portgroup/remove.go | 58 + .../vmware/govmomi/govc/host/reconnect.go | 97 + .../vmware/govmomi/govc/host/remove.go | 101 + .../vmware/govmomi/govc/host/storage/info.go | 188 + .../govmomi/govc/host/storage/partition.go | 127 + .../vmware/govmomi/govc/host/vnic/info.go | 146 + .../vmware/govmomi/govc/host/vnic/service.go | 112 + .../vmware/govmomi/govc/host/vswitch/add.go | 72 + .../vmware/govmomi/govc/host/vswitch/info.go | 106 + .../govmomi/govc/host/vswitch/remove.go | 58 + .../vmware/govmomi/govc/importx/archive.go | 150 + .../vmware/govmomi/govc/importx/folder.go | 91 + .../vmware/govmomi/govc/importx/importable.go | 59 + .../govmomi/govc/importx/lease_updater.go | 131 + .../vmware/govmomi/govc/importx/options.go | 84 + .../vmware/govmomi/govc/importx/ova.go | 61 + .../vmware/govmomi/govc/importx/ovf.go | 415 + .../vmware/govmomi/govc/importx/spec.go | 172 + .../vmware/govmomi/govc/importx/vmdk.go | 518 + .../vmware/govmomi/govc/license/add.go | 96 + .../vmware/govmomi/govc/license/assign.go | 114 + .../vmware/govmomi/govc/license/assigned.go | 96 + .../vmware/govmomi/govc/license/decode.go | 85 + .../vmware/govmomi/govc/license/list.go | 78 + .../vmware/govmomi/govc/license/output.go | 41 + .../vmware/govmomi/govc/license/remove.go | 74 + .../vmware/govmomi/govc/logs/command.go | 110 + .../vmware/govmomi/govc/logs/download.go | 133 + .../github.com/vmware/govmomi/govc/logs/ls.go | 81 + .../vmware/govmomi/govc/ls/command.go | 131 + vendor/github.com/vmware/govmomi/govc/main.go | 63 + .../vmware/govmomi/govc/main_test.go | 38 + .../vmware/govmomi/govc/permissions/ls.go | 97 + .../vmware/govmomi/govc/permissions/remove.go | 85 + .../vmware/govmomi/govc/permissions/set.go | 103 + .../vmware/govmomi/govc/pool/change.go | 98 + .../vmware/govmomi/govc/pool/create.go | 101 + .../vmware/govmomi/govc/pool/destroy.go | 101 + .../vmware/govmomi/govc/pool/help.go | 50 + .../vmware/govmomi/govc/pool/info.go | 212 + .../govmomi/govc/pool/resource_config_spec.go | 98 + .../github.com/vmware/govmomi/govc/release.sh | 76 + .../vmware/govmomi/govc/test/.gitignore | 1 + .../vmware/govmomi/govc/test/README.md | 73 + .../govmomi/govc/test/boot_order_test.sh | 75 + .../vmware/govmomi/govc/test/clean.sh | 24 + .../vmware/govmomi/govc/test/cli.bats | 25 + .../vmware/govmomi/govc/test/datacenter.bats | 85 + .../vmware/govmomi/govc/test/datastore.bats | 75 + .../vmware/govmomi/govc/test/device.bats | 199 + .../govmomi/govc/test/esxbox/Vagrantfile | 27 + .../vmware/govmomi/govc/test/esxcli.bats | 43 + .../vmware/govmomi/govc/test/events.bats | 54 + .../vmware/govmomi/govc/test/extension.bats | 75 + .../vmware/govmomi/govc/test/fields.bats | 48 + .../vmware/govmomi/govc/test/firewall.bats | 20 + .../vmware/govmomi/govc/test/govc-sim | 7 + .../vmware/govmomi/govc/test/host.bats | 81 + .../govmomi/govc/test/images/.gitignore | 3 + .../vmware/govmomi/govc/test/images/update.sh | 18 + .../vmware/govmomi/govc/test/import.bats | 60 + .../vmware/govmomi/govc/test/license.bats | 53 + .../vmware/govmomi/govc/test/logs.bats | 38 + .../vmware/govmomi/govc/test/ls.bats | 93 + .../vmware/govmomi/govc/test/network.bats | 144 + .../vmware/govmomi/govc/test/pool.bats | 241 + .../vmware/govmomi/govc/test/test_helper.bash | 248 + .../govmomi/govc/test/vcsim/Vagrantfile | 24 + .../govmomi/govc/test/vcsim/provision.sh | 30 + .../vmware/govmomi/govc/test/vm.bats | 339 + .../vmware/govmomi/govc/vapp/destroy.go | 95 + .../vmware/govmomi/govc/vapp/power.go | 112 + .../vmware/govmomi/govc/version/command.go | 46 + .../vmware/govmomi/govc/vm/change.go | 98 + .../vmware/govmomi/govc/vm/create.go | 274 + .../vmware/govmomi/govc/vm/destroy.go | 82 + .../vmware/govmomi/govc/vm/disk/attach.go | 110 + .../vmware/govmomi/govc/vm/disk/create.go | 114 + .../vmware/govmomi/govc/vm/guest/auth.go | 68 + .../vmware/govmomi/govc/vm/guest/chmod.go | 59 + .../vmware/govmomi/govc/vm/guest/download.go | 82 + .../vmware/govmomi/govc/vm/guest/file_attr.go | 47 + .../vmware/govmomi/govc/vm/guest/getenv.go | 63 + .../vmware/govmomi/govc/vm/guest/guest.go | 113 + .../vmware/govmomi/govc/vm/guest/kill.go | 63 + .../vmware/govmomi/govc/vm/guest/ls.go | 78 + .../vmware/govmomi/govc/vm/guest/mkdir.go | 71 + .../vmware/govmomi/govc/vm/guest/mktemp.go | 74 + .../vmware/govmomi/govc/vm/guest/ps.go | 112 + .../vmware/govmomi/govc/vm/guest/rm.go | 53 + .../vmware/govmomi/govc/vm/guest/rmdir.go | 57 + .../vmware/govmomi/govc/vm/guest/start.go | 87 + .../vmware/govmomi/govc/vm/guest/upload.go | 87 + .../github.com/vmware/govmomi/govc/vm/info.go | 314 + .../github.com/vmware/govmomi/govc/vm/ip.go | 113 + .../vmware/govmomi/govc/vm/network/add.go | 75 + .../vmware/govmomi/govc/vm/network/change.go | 107 + .../vmware/govmomi/govc/vm/power.go | 161 + .../vmware/govmomi/govc/vm/question.go | 98 + .../github.com/vmware/govmomi/govc/vm/vnc.go | 480 + .../vmware/govmomi/guest/auth_manager.go | 79 + .../vmware/govmomi/guest/file_manager.go | 202 + .../govmomi/guest/operations_manager.go | 72 + .../vmware/govmomi/guest/process_manager.go | 96 + .../govmomi/license/assignment_manager.go | 69 + .../vmware/govmomi/license/manager.go | 195 + .../github.com/vmware/govmomi/list/lister.go | 571 + vendor/github.com/vmware/govmomi/list/path.go | 44 + .../vmware/govmomi/list/path_test.go | 93 + .../vmware/govmomi/list/recurser.go | 97 + .../govmomi/object/authorization_manager.go | 108 + .../object/cluster_compute_resource.go | 87 + .../object/cluster_compute_resource_test.go | 20 + .../vmware/govmomi/object/common.go | 71 + .../vmware/govmomi/object/compute_resource.go | 126 + .../govmomi/object/compute_resource_test.go | 20 + .../govmomi/object/custom_fields_manager.go | 135 + .../object/customization_spec_manager.go | 165 + .../vmware/govmomi/object/datacenter.go | 90 + .../vmware/govmomi/object/datacenter_test.go | 20 + .../vmware/govmomi/object/datastore.go | 351 + .../vmware/govmomi/object/datastore_test.go | 20 + .../govmomi/object/diagnostic_manager.go | 95 + .../object/distributed_virtual_portgroup.go | 64 + .../distributed_virtual_portgroup_test.go | 23 + .../object/distributed_virtual_switch.go | 68 + .../govmomi/object/extension_manager.go | 112 + .../vmware/govmomi/object/file_manager.go | 125 + .../vmware/govmomi/object/folder.go | 198 + .../vmware/govmomi/object/folder_test.go | 20 + .../govmomi/object/history_collector.go | 71 + .../govmomi/object/host_config_manager.go | 100 + .../govmomi/object/host_datastore_browser.go | 64 + .../govmomi/object/host_datastore_system.go | 103 + .../govmomi/object/host_firewall_system.go | 181 + .../govmomi/object/host_network_system.go | 357 + .../govmomi/object/host_storage_system.go | 80 + .../vmware/govmomi/object/host_system.go | 173 + .../object/host_virtual_nic_manager.go | 92 + .../vmware/govmomi/object/host_vsan_system.go | 87 + .../vmware/govmomi/object/http_nfc_lease.go | 143 + .../vmware/govmomi/object/network.go | 54 + .../govmomi/object/network_reference.go | 30 + .../vmware/govmomi/object/network_test.go | 23 + .../vmware/govmomi/object/ovf_manager.go | 103 + .../vmware/govmomi/object/resource_pool.go | 159 + .../vmware/govmomi/object/search_index.go | 162 + .../govmomi/object/search_index_test.go | 155 + .../vmware/govmomi/object/storage_pod.go | 21 + .../object/storage_resource_manager.go | 178 + .../github.com/vmware/govmomi/object/task.go | 52 + .../github.com/vmware/govmomi/object/types.go | 65 + .../vmware/govmomi/object/virtual_app.go | 126 + .../govmomi/object/virtual_device_list.go | 754 + .../object/virtual_device_list_test.go | 848 + .../govmomi/object/virtual_disk_manager.go | 145 + .../vmware/govmomi/object/virtual_machine.go | 468 + .../govmomi/object/virtual_machine_test.go | 20 + .../vmware_distributed_virtual_switch.go | 21 + vendor/github.com/vmware/govmomi/ovf/cim.go | 78 + vendor/github.com/vmware/govmomi/ovf/doc.go | 25 + vendor/github.com/vmware/govmomi/ovf/env.go | 98 + .../github.com/vmware/govmomi/ovf/env_test.go | 57 + .../github.com/vmware/govmomi/ovf/envelope.go | 191 + vendor/github.com/vmware/govmomi/ovf/ovf.go | 34 + .../github.com/vmware/govmomi/ovf/ovf_test.go | 85 + .../vmware/govmomi/property/collector.go | 174 + .../vmware/govmomi/property/wait.go | 91 + .../vmware/govmomi/scripts/.gitignore | 1 + .../vmware/govmomi/scripts/contributors.sh | 24 + .../vmware/govmomi/scripts/debug-ls.sh | 43 + .../vmware/govmomi/scripts/govc-env.bash | 62 + .../vmware/govmomi/scripts/headers/go.txt | 15 + .../vmware/govmomi/scripts/headers/rb.txt | 13 + .../vmware/govmomi/scripts/license.sh | 48 + .../govmomi/scripts/vagrant/vcsa/.gitignore | 3 + .../govmomi/scripts/vagrant/vcsa/Vagrantfile | 13 + .../scripts/vagrant/vcsa/create-box.sh | 81 + .../govmomi/scripts/vagrant/vcsa/vagrant.sh | 23 + .../vmware/govmomi/scripts/wireshark-esx.sh | 68 + .../vmware/govmomi/scripts/wireshark-vcsa.sh | 65 + .../vmware/govmomi/session/keep_alive.go | 127 + .../vmware/govmomi/session/keep_alive_test.go | 280 + .../vmware/govmomi/session/manager.go | 163 + .../vmware/govmomi/session/manager_test.go | 106 + .../github.com/vmware/govmomi/task/error.go | 32 + vendor/github.com/vmware/govmomi/task/wait.go | 126 + vendor/github.com/vmware/govmomi/test/doc.go | 22 + .../vmware/govmomi/test/functional/helper.go | 130 + .../govmomi/test/functional/issue_242_test.go | 104 + .../github.com/vmware/govmomi/test/helper.go | 73 + .../github.com/vmware/govmomi/units/size.go | 89 + .../vmware/govmomi/units/size_test.go | 145 + .../github.com/vmware/govmomi/vim25/client.go | 123 + .../vmware/govmomi/vim25/client_test.go | 97 + .../vmware/govmomi/vim25/debug/debug.go | 81 + vendor/github.com/vmware/govmomi/vim25/doc.go | 29 + .../govmomi/vim25/methods/fault_test.go | 62 + .../vmware/govmomi/vim25/methods/internal.go | 123 + .../vmware/govmomi/vim25/methods/methods.go | 14343 +++++ .../govmomi/vim25/methods/service_content.go | 56 + .../vmware/govmomi/vim25/mo/ancestors.go | 92 + .../vmware/govmomi/vim25/mo/extra.go | 57 + .../mo/fixtures/cluster_host_property.xml | 15 + .../hostsystem_list_name_property.xml | 17 + .../vim25/mo/fixtures/nested_property.xml | 14 + .../mo/fixtures/not_authenticated_fault.xml | 42 + .../vim25/mo/fixtures/pointer_property.xml | 15 + .../github.com/vmware/govmomi/vim25/mo/mo.go | 1509 + .../vmware/govmomi/vim25/mo/reference.go | 26 + .../vmware/govmomi/vim25/mo/registry.go | 21 + .../vmware/govmomi/vim25/mo/retrieve.go | 171 + .../vmware/govmomi/vim25/mo/retrieve_test.go | 144 + .../vmware/govmomi/vim25/mo/type_info.go | 239 + .../vmware/govmomi/vim25/mo/type_info_test.go | 37 + .../govmomi/vim25/progress/aggregator.go | 73 + .../govmomi/vim25/progress/aggregator_test.go | 81 + .../govmomi/vim25/progress/common_test.go | 43 + .../vmware/govmomi/vim25/progress/doc.go | 32 + .../vmware/govmomi/vim25/progress/prefix.go | 54 + .../govmomi/vim25/progress/prefix_test.go | 40 + .../vmware/govmomi/vim25/progress/reader.go | 177 + .../govmomi/vim25/progress/reader_test.go | 90 + .../vmware/govmomi/vim25/progress/report.go | 26 + .../vmware/govmomi/vim25/progress/scale.go | 76 + .../govmomi/vim25/progress/scale_test.go | 45 + .../vmware/govmomi/vim25/progress/sinker.go | 33 + .../vmware/govmomi/vim25/progress/tee.go | 41 + .../vmware/govmomi/vim25/progress/tee_test.go | 46 + .../github.com/vmware/govmomi/vim25/retry.go | 105 + .../vmware/govmomi/vim25/retry_test.go | 86 + .../vmware/govmomi/vim25/soap/client.go | 484 + .../vmware/govmomi/vim25/soap/client_test.go | 43 + .../vmware/govmomi/vim25/soap/debug.go | 149 + .../vmware/govmomi/vim25/soap/error.go | 109 + .../vmware/govmomi/vim25/soap/soap.go | 45 + .../vmware/govmomi/vim25/soap/soap_test.go | 55 + .../vmware/govmomi/vim25/types/base.go | 19 + .../vmware/govmomi/vim25/types/base_test.go | 65 + .../vmware/govmomi/vim25/types/enum.go | 4097 ++ .../vmware/govmomi/vim25/types/fault.go | 32 + .../vmware/govmomi/vim25/types/helpers.go | 21 + .../vmware/govmomi/vim25/types/if.go | 3287 + .../vmware/govmomi/vim25/types/internal.go | 262 + .../vmware/govmomi/vim25/types/registry.go | 30 + .../govmomi/vim25/types/registry_test.go | 43 + .../vmware/govmomi/vim25/types/types.go | 49930 ++++++++++++++++ .../vmware/govmomi/vim25/types/types_test.go | 92 + .../vmware/govmomi/vim25/xml/LICENSE | 27 + .../vmware/govmomi/vim25/xml/atom_test.go | 56 + .../vmware/govmomi/vim25/xml/example_test.go | 151 + .../vmware/govmomi/vim25/xml/extras.go | 93 + .../vmware/govmomi/vim25/xml/extras_test.go | 222 + .../vmware/govmomi/vim25/xml/marshal.go | 949 + .../vmware/govmomi/vim25/xml/marshal_test.go | 1266 + .../vmware/govmomi/vim25/xml/read.go | 769 + .../vmware/govmomi/vim25/xml/read_test.go | 762 + .../vmware/govmomi/vim25/xml/typeinfo.go | 366 + .../vmware/govmomi/vim25/xml/xml.go | 1939 + .../vmware/govmomi/vim25/xml/xml_test.go | 726 + .../cloudstack/APIDiscoveryService.go | 96 + .../cloudstack/AccountService.go | 1833 + .../cloudstack/AddressService.go | 809 + .../cloudstack/AffinityGroupService.go | 832 + .../go-cloudstack/cloudstack/AlertService.go | 493 + .../cloudstack/AsyncjobService.go | 239 + .../cloudstack/AutoScaleService.go | 2753 + .../cloudstack/BaremetalService.go | 676 + .../cloudstack/BigSwitchVNSService.go | 281 + .../cloudstack/CertificateService.go | 140 + .../cloudstack/CloudIdentifierService.go | 74 + .../cloudstack/ClusterService.go | 1011 + .../cloudstack/ConfigurationService.go | 631 + .../cloudstack/DiskOfferingService.go | 638 + .../go-cloudstack/cloudstack/DomainService.go | 677 + .../go-cloudstack/cloudstack/EventService.go | 504 + .../cloudstack/FirewallService.go | 2462 + .../cloudstack/GuestOSService.go | 1022 + .../go-cloudstack/cloudstack/HostService.go | 2046 + .../cloudstack/HypervisorService.go | 293 + .../go-cloudstack/cloudstack/ISOService.go | 1928 + .../cloudstack/ImageStoreService.go | 861 + .../cloudstack/InternalLBService.go | 1002 + .../go-cloudstack/cloudstack/LDAPService.go | 239 + .../go-cloudstack/cloudstack/LimitService.go | 475 + .../cloudstack/LoadBalancerService.go | 4766 ++ .../go-cloudstack/cloudstack/LoginService.go | 17 + .../go-cloudstack/cloudstack/LogoutService.go | 17 + .../go-cloudstack/cloudstack/NATService.go | 640 + .../cloudstack/NetworkACLService.go | 1480 + .../cloudstack/NetworkDeviceService.go | 245 + .../cloudstack/NetworkOfferingService.go | 939 + .../cloudstack/NetworkService.go | 3711 ++ .../go-cloudstack/cloudstack/NicService.go | 320 + .../cloudstack/NiciraNVPService.go | 332 + .../cloudstack/OvsElementService.go | 262 + .../go-cloudstack/cloudstack/PodService.go | 870 + .../go-cloudstack/cloudstack/PoolService.go | 776 + .../cloudstack/PortableIPService.go | 387 + .../cloudstack/ProjectService.go | 1338 + .../go-cloudstack/cloudstack/RegionService.go | 336 + .../cloudstack/ResourcemetadataService.go | 423 + .../cloudstack/ResourcetagsService.go | 428 + .../go-cloudstack/cloudstack/RouterService.go | 1446 + .../go-cloudstack/cloudstack/S3Service.go | 281 + .../go-cloudstack/cloudstack/SSHService.go | 734 + .../cloudstack/SecurityGroupService.go | 1199 + .../cloudstack/ServiceOfferingService.go | 803 + .../cloudstack/SnapshotService.go | 1784 + .../cloudstack/StoragePoolService.go | 300 + .../cloudstack/StratosphereSSPService.go | 132 + .../go-cloudstack/cloudstack/SwiftService.go | 243 + .../cloudstack/SystemCapacityService.go | 178 + .../cloudstack/SystemVMService.go | 1026 + .../cloudstack/TemplateService.go | 1999 + .../go-cloudstack/cloudstack/UCSService.go | 582 + .../go-cloudstack/cloudstack/UsageService.go | 1120 + .../go-cloudstack/cloudstack/UserService.go | 1233 + .../go-cloudstack/cloudstack/VLANService.go | 952 + .../cloudstack/VMGroupService.go | 494 + .../go-cloudstack/cloudstack/VPCService.go | 2834 + .../go-cloudstack/cloudstack/VPNService.go | 2881 + .../cloudstack/VirtualMachineService.go | 5323 ++ .../go-cloudstack/cloudstack/VolumeService.go | 1939 + .../go-cloudstack/cloudstack/ZoneService.go | 1158 + .../go-cloudstack/cloudstack/cloudstack.go | 874 + vendor/github.com/xanzy/ssh-agent/.gitignore | 24 + vendor/github.com/xanzy/ssh-agent/LICENSE | 202 + vendor/github.com/xanzy/ssh-agent/README.md | 23 + .../xanzy/ssh-agent/pageant_windows.go | 146 + vendor/github.com/xanzy/ssh-agent/sshagent.go | 49 + .../xanzy/ssh-agent/sshagent_windows.go | 80 + .../github.com/ziutek/mymysql/mysql/errors.go | 533 + .../github.com/ziutek/mymysql/mysql/field.go | 15 + .../ziutek/mymysql/mysql/interface.go | 106 + vendor/github.com/ziutek/mymysql/mysql/row.go | 475 + .../github.com/ziutek/mymysql/mysql/status.go | 18 + .../github.com/ziutek/mymysql/mysql/types.go | 225 + .../ziutek/mymysql/mysql/types_test.go | 100 + .../github.com/ziutek/mymysql/mysql/utils.go | 302 + .../github.com/ziutek/mymysql/native/LICENSE | 24 + .../ziutek/mymysql/native/addons.go | 17 + .../ziutek/mymysql/native/bind_test.go | 386 + .../ziutek/mymysql/native/binding.go | 150 + .../ziutek/mymysql/native/codecs.go | 458 + .../ziutek/mymysql/native/command.go | 149 + .../ziutek/mymysql/native/common.go | 25 + .../ziutek/mymysql/native/consts.go | 176 + .../github.com/ziutek/mymysql/native/init.go | 84 + .../github.com/ziutek/mymysql/native/mysql.go | 834 + .../ziutek/mymysql/native/native_test.go | 1218 + .../ziutek/mymysql/native/packet.go | 250 + .../ziutek/mymysql/native/paramvalue.go | 161 + .../ziutek/mymysql/native/passwd.go | 108 + .../ziutek/mymysql/native/prepared.go | 163 + .../ziutek/mymysql/native/result.go | 406 + .../ziutek/mymysql/native/unsafe.go-disabled | 116 + .../github.com/ziutek/mymysql/thrsafe/LICENSE | 24 + .../ziutek/mymysql/thrsafe/thrsafe.go | 349 + .../ziutek/mymysql/thrsafe/thrsafe_test.go | 136 + .../x/crypto/curve25519/const_amd64.s | 20 + .../x/crypto/curve25519/cswap_amd64.s | 88 + .../x/crypto/curve25519/curve25519.go | 841 + .../x/crypto/curve25519/curve25519_test.go | 29 + vendor/golang.org/x/crypto/curve25519/doc.go | 23 + .../x/crypto/curve25519/freeze_amd64.s | 94 + .../x/crypto/curve25519/ladderstep_amd64.s | 1398 + .../x/crypto/curve25519/mont25519_amd64.go | 240 + .../x/crypto/curve25519/mul_amd64.s | 191 + .../x/crypto/curve25519/square_amd64.s | 153 + .../golang.org/x/crypto/ssh/agent/client.go | 615 + .../x/crypto/ssh/agent/client_test.go | 287 + .../golang.org/x/crypto/ssh/agent/forward.go | 103 + .../golang.org/x/crypto/ssh/agent/keyring.go | 184 + .../x/crypto/ssh/agent/keyring_test.go | 78 + .../golang.org/x/crypto/ssh/agent/server.go | 209 + .../x/crypto/ssh/agent/server_test.go | 77 + .../x/crypto/ssh/agent/testdata_test.go | 64 + .../golang.org/x/crypto/ssh/benchmark_test.go | 122 + vendor/golang.org/x/crypto/ssh/buffer.go | 98 + vendor/golang.org/x/crypto/ssh/buffer_test.go | 87 + vendor/golang.org/x/crypto/ssh/certs.go | 501 + vendor/golang.org/x/crypto/ssh/certs_test.go | 216 + vendor/golang.org/x/crypto/ssh/channel.go | 631 + vendor/golang.org/x/crypto/ssh/cipher.go | 552 + vendor/golang.org/x/crypto/ssh/cipher_test.go | 127 + vendor/golang.org/x/crypto/ssh/client.go | 213 + vendor/golang.org/x/crypto/ssh/client_auth.go | 441 + .../x/crypto/ssh/client_auth_test.go | 393 + vendor/golang.org/x/crypto/ssh/client_test.go | 39 + vendor/golang.org/x/crypto/ssh/common.go | 354 + vendor/golang.org/x/crypto/ssh/connection.go | 144 + vendor/golang.org/x/crypto/ssh/doc.go | 18 + .../golang.org/x/crypto/ssh/example_test.go | 211 + vendor/golang.org/x/crypto/ssh/handshake.go | 412 + .../golang.org/x/crypto/ssh/handshake_test.go | 415 + vendor/golang.org/x/crypto/ssh/kex.go | 526 + vendor/golang.org/x/crypto/ssh/kex_test.go | 50 + vendor/golang.org/x/crypto/ssh/keys.go | 720 + vendor/golang.org/x/crypto/ssh/keys_test.go | 437 + vendor/golang.org/x/crypto/ssh/mac.go | 57 + .../golang.org/x/crypto/ssh/mempipe_test.go | 110 + vendor/golang.org/x/crypto/ssh/messages.go | 725 + .../golang.org/x/crypto/ssh/messages_test.go | 254 + vendor/golang.org/x/crypto/ssh/mux.go | 356 + vendor/golang.org/x/crypto/ssh/mux_test.go | 525 + vendor/golang.org/x/crypto/ssh/server.go | 495 + vendor/golang.org/x/crypto/ssh/session.go | 605 + .../golang.org/x/crypto/ssh/session_test.go | 774 + vendor/golang.org/x/crypto/ssh/tcpip.go | 407 + vendor/golang.org/x/crypto/ssh/tcpip_test.go | 20 + .../x/crypto/ssh/terminal/terminal.go | 892 + .../x/crypto/ssh/terminal/terminal_test.go | 269 + .../golang.org/x/crypto/ssh/terminal/util.go | 128 + .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 11 + .../x/crypto/ssh/terminal/util_windows.go | 174 + .../x/crypto/ssh/test/agent_unix_test.go | 59 + .../golang.org/x/crypto/ssh/test/cert_test.go | 47 + vendor/golang.org/x/crypto/ssh/test/doc.go | 7 + .../x/crypto/ssh/test/forward_unix_test.go | 160 + .../x/crypto/ssh/test/session_test.go | 340 + .../x/crypto/ssh/test/tcpip_test.go | 46 + .../x/crypto/ssh/test/test_unix_test.go | 261 + .../x/crypto/ssh/test/testdata_test.go | 64 + .../golang.org/x/crypto/ssh/testdata/doc.go | 8 + .../golang.org/x/crypto/ssh/testdata/keys.go | 43 + .../golang.org/x/crypto/ssh/testdata_test.go | 63 + vendor/golang.org/x/crypto/ssh/transport.go | 332 + .../golang.org/x/crypto/ssh/transport_test.go | 109 + vendor/golang.org/x/net/context/context.go | 447 + .../golang.org/x/net/context/context_test.go | 575 + .../x/net/context/ctxhttp/cancelreq.go | 19 + .../x/net/context/ctxhttp/cancelreq_go14.go | 23 + .../x/net/context/ctxhttp/ctxhttp.go | 140 + .../x/net/context/ctxhttp/ctxhttp_test.go | 176 + .../x/net/context/withtimeout_test.go | 26 + vendor/golang.org/x/oauth2/.travis.yml | 14 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 64 + .../x/oauth2/bitbucket/bitbucket.go | 16 + .../golang.org/x/oauth2/client_appengine.go | 25 + .../clientcredentials/clientcredentials.go | 112 + .../clientcredentials_test.go | 96 + vendor/golang.org/x/oauth2/example_test.go | 45 + .../golang.org/x/oauth2/facebook/facebook.go | 16 + vendor/golang.org/x/oauth2/github/github.go | 16 + .../golang.org/x/oauth2/google/appengine.go | 86 + .../x/oauth2/google/appengine_hook.go | 13 + .../x/oauth2/google/appenginevm_hook.go | 14 + vendor/golang.org/x/oauth2/google/default.go | 155 + .../x/oauth2/google/example_test.go | 150 + vendor/golang.org/x/oauth2/google/google.go | 145 + .../golang.org/x/oauth2/google/google_test.go | 67 + vendor/golang.org/x/oauth2/google/jwt.go | 71 + vendor/golang.org/x/oauth2/google/sdk.go | 168 + vendor/golang.org/x/oauth2/google/sdk_test.go | 46 + .../oauth2/google/testdata/gcloud/credentials | 122 + .../oauth2/google/testdata/gcloud/properties | 2 + vendor/golang.org/x/oauth2/internal/oauth2.go | 76 + .../x/oauth2/internal/oauth2_test.go | 62 + vendor/golang.org/x/oauth2/internal/token.go | 221 + .../x/oauth2/internal/token_test.go | 36 + .../golang.org/x/oauth2/internal/transport.go | 69 + .../x/oauth2/internal/transport_test.go | 38 + vendor/golang.org/x/oauth2/jws/jws.go | 172 + .../golang.org/x/oauth2/jwt/example_test.go | 31 + vendor/golang.org/x/oauth2/jwt/jwt.go | 153 + vendor/golang.org/x/oauth2/jwt/jwt_test.go | 134 + .../golang.org/x/oauth2/linkedin/linkedin.go | 16 + .../x/oauth2/microsoft/microsoft.go | 16 + vendor/golang.org/x/oauth2/oauth2.go | 337 + vendor/golang.org/x/oauth2/oauth2_test.go | 471 + .../x/oauth2/odnoklassniki/odnoklassniki.go | 16 + vendor/golang.org/x/oauth2/paypal/paypal.go | 22 + vendor/golang.org/x/oauth2/token.go | 158 + vendor/golang.org/x/oauth2/token_test.go | 72 + vendor/golang.org/x/oauth2/transport.go | 132 + vendor/golang.org/x/oauth2/transport_test.go | 108 + vendor/golang.org/x/oauth2/vk/vk.go | 16 + .../api/compute/v1/compute-api.json | 13967 +++++ .../api/compute/v1/compute-gen.go | 37579 ++++++++++++ .../api/container/v1/container-api.json | 690 + .../api/container/v1/container-gen.go | 1639 + .../google.golang.org/api/dns/v1/dns-api.json | 708 + .../google.golang.org/api/dns/v1/dns-gen.go | 1787 + .../api/gensupport/buffer.go | 77 + .../api/gensupport/buffer_test.go | 296 + .../google.golang.org/api/gensupport/doc.go | 10 + .../google.golang.org/api/gensupport/json.go | 172 + .../api/gensupport/json_test.go | 367 + .../google.golang.org/api/gensupport/media.go | 198 + .../api/gensupport/media_test.go | 142 + .../api/gensupport/params.go | 41 + .../api/gensupport/resumable.go | 134 + .../api/gensupport/resumable_test.go | 219 + .../api/gensupport/util_test.go | 25 + .../api/googleapi/googleapi.go | 418 + .../api/googleapi/googleapi_test.go | 380 + .../googleapi/internal/uritemplates/LICENSE | 18 + .../internal/uritemplates/uritemplates.go | 359 + .../googleapi/internal/uritemplates/utils.go | 13 + .../api/googleapi/transport/apikey.go | 38 + .../google.golang.org/api/googleapi/types.go | 182 + .../api/googleapi/types_test.go | 44 + .../api/pubsub/v1/pubsub-api.json | 1040 + .../api/pubsub/v1/pubsub-gen.go | 3316 + .../api/sqladmin/v1beta4/sqladmin-api.json | 2707 + .../api/sqladmin/v1beta4/sqladmin-gen.go | 6956 +++ .../api/storage/v1/storage-api.json | 2871 + .../api/storage/v1/storage-gen.go | 7632 +++ .../cloud/compute/metadata/metadata.go | 327 + .../google.golang.org/cloud/internal/cloud.go | 128 + .../internal/datastore/datastore_v1.pb.go | 1633 + .../internal/datastore/datastore_v1.proto | 606 + .../cloud/internal/opts/option.go | 25 + .../cloud/internal/testutil/context.go | 71 + .../cloud/internal/transport/cancelreq.go | 29 + .../internal/transport/cancelreq_legacy.go | 31 + .../cloud/internal/transport/dial.go | 135 + .../cloud/internal/transport/proto.go | 80 + 4144 files changed, 974543 insertions(+), 22 deletions(-) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policyassignments.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policydefinitions.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/provideroperationdetails.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/client_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/example_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/export_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/fs.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/fs_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/header.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/header_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/jar.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/lex.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/lex_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/npn_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/proxy_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/race.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/range_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/readrequest_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/request_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/requestwrite_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/response_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/responsewrite_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/serve_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/server.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/status.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/file create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/transport.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/transport_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/triv.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/http/z_last_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/alert.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/cipher_suites.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/common.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/generate_cert.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/key_agreement.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/ticket.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/errors_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/http.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/operations.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/osimage/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/osimage/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/sql/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/sql/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/testutils/managementclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/configurationset.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/datadisks.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/examples_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/network.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolesize.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/file.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/LICENSE create mode 100644 vendor/github.com/DreamItGetIT/statuscake/README.md create mode 100644 vendor/github.com/DreamItGetIT/statuscake/client.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/client_test.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/cmd/statuscake/main.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/doc.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/errors.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/auth_error.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_all_ok.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_error.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_ok.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_detail_ok.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error_slice_of_issues.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_ok.json create mode 100644 vendor/github.com/DreamItGetIT/statuscake/makefile create mode 100644 vendor/github.com/DreamItGetIT/statuscake/responses.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/tests.go create mode 100644 vendor/github.com/DreamItGetIT/statuscake/tests_test.go create mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go create mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go create mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/client.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/error.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/key.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/project.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/system_info.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/testing.go create mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/util.go create mode 100644 vendor/github.com/armon/circbuf/.gitignore create mode 100644 vendor/github.com/armon/circbuf/LICENSE create mode 100644 vendor/github.com/armon/circbuf/README.md create mode 100644 vendor/github.com/armon/circbuf/circbuf.go create mode 100644 vendor/github.com/armon/circbuf/circbuf_test.go create mode 100644 vendor/github.com/armon/go-radix/.gitignore create mode 100644 vendor/github.com/armon/go-radix/.travis.yml create mode 100644 vendor/github.com/armon/go-radix/LICENSE create mode 100644 vendor/github.com/armon/go-radix/README.md create mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/armon/go-radix/radix_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go create mode 100644 vendor/github.com/bgentry/speakeasy/.gitignore create mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS create mode 100644 vendor/github.com/bgentry/speakeasy/Readme.md create mode 100644 vendor/github.com/bgentry/speakeasy/example/main.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_unix.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_windows.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/rpc.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/simple.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/tests.sh create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go create mode 100644 vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go create mode 100644 vendor/github.com/coreos/etcd/client/README.md create mode 100644 vendor/github.com/coreos/etcd/client/auth_role.go create mode 100644 vendor/github.com/coreos/etcd/client/auth_user.go create mode 100644 vendor/github.com/coreos/etcd/client/cancelreq.go create mode 100644 vendor/github.com/coreos/etcd/client/cancelreq_go14.go create mode 100644 vendor/github.com/coreos/etcd/client/client.go create mode 100644 vendor/github.com/coreos/etcd/client/client_test.go create mode 100644 vendor/github.com/coreos/etcd/client/cluster_error.go create mode 100644 vendor/github.com/coreos/etcd/client/curl.go create mode 100644 vendor/github.com/coreos/etcd/client/discover.go create mode 100644 vendor/github.com/coreos/etcd/client/doc.go create mode 100644 vendor/github.com/coreos/etcd/client/fake_transport_go14_test.go create mode 100644 vendor/github.com/coreos/etcd/client/fake_transport_test.go create mode 100644 vendor/github.com/coreos/etcd/client/keys.generated.go create mode 100644 vendor/github.com/coreos/etcd/client/keys.go create mode 100644 vendor/github.com/coreos/etcd/client/keys_bench_test.go create mode 100644 vendor/github.com/coreos/etcd/client/keys_test.go create mode 100644 vendor/github.com/coreos/etcd/client/members.go create mode 100644 vendor/github.com/coreos/etcd/client/members_test.go create mode 100644 vendor/github.com/coreos/etcd/client/srv.go create mode 100644 vendor/github.com/coreos/etcd/client/srv_test.go create mode 100644 vendor/github.com/coreos/etcd/client/util.go create mode 100644 vendor/github.com/coreos/etcd/pkg/pathutil/path.go create mode 100644 vendor/github.com/coreos/etcd/pkg/pathutil/path_test.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/doc.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/id.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/id_test.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/set.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/set_test.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/slice.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/slice_test.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/urls.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/urls_test.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/urlsmap.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/urlsmap_go15_test.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/urlsmap_test.go create mode 100644 vendor/github.com/cyberdelia/heroku-go/v3/heroku.go create mode 100644 vendor/github.com/cyberdelia/heroku-go/v3/schema.json create mode 100644 vendor/github.com/cyberdelia/heroku-go/v3/transport.go create mode 100644 vendor/github.com/digitalocean/godo/.travis.yml create mode 100644 vendor/github.com/digitalocean/godo/CONTRIBUTING.md create mode 100644 vendor/github.com/digitalocean/godo/LICENSE.txt create mode 100644 vendor/github.com/digitalocean/godo/README.md create mode 100644 vendor/github.com/digitalocean/godo/account.go create mode 100644 vendor/github.com/digitalocean/godo/account_test.go create mode 100644 vendor/github.com/digitalocean/godo/action.go create mode 100644 vendor/github.com/digitalocean/godo/action_test.go create mode 100644 vendor/github.com/digitalocean/godo/doc.go create mode 100644 vendor/github.com/digitalocean/godo/domains.go create mode 100644 vendor/github.com/digitalocean/godo/domains_test.go create mode 100644 vendor/github.com/digitalocean/godo/droplet_actions.go create mode 100644 vendor/github.com/digitalocean/godo/droplet_actions_test.go create mode 100644 vendor/github.com/digitalocean/godo/droplets.go create mode 100644 vendor/github.com/digitalocean/godo/droplets_test.go create mode 100644 vendor/github.com/digitalocean/godo/errors.go create mode 100644 vendor/github.com/digitalocean/godo/errors_test.go create mode 100644 vendor/github.com/digitalocean/godo/floating_ips.go create mode 100644 vendor/github.com/digitalocean/godo/floating_ips_actions.go create mode 100644 vendor/github.com/digitalocean/godo/floating_ips_actions_test.go create mode 100644 vendor/github.com/digitalocean/godo/floating_ips_test.go create mode 100644 vendor/github.com/digitalocean/godo/godo.go create mode 100644 vendor/github.com/digitalocean/godo/godo_test.go create mode 100644 vendor/github.com/digitalocean/godo/image_actions.go create mode 100644 vendor/github.com/digitalocean/godo/image_actions_test.go create mode 100644 vendor/github.com/digitalocean/godo/images.go create mode 100644 vendor/github.com/digitalocean/godo/images_test.go create mode 100644 vendor/github.com/digitalocean/godo/keys.go create mode 100644 vendor/github.com/digitalocean/godo/keys_test.go create mode 100644 vendor/github.com/digitalocean/godo/links.go create mode 100644 vendor/github.com/digitalocean/godo/links_test.go create mode 100644 vendor/github.com/digitalocean/godo/regions.go create mode 100644 vendor/github.com/digitalocean/godo/regions_test.go create mode 100644 vendor/github.com/digitalocean/godo/sizes.go create mode 100644 vendor/github.com/digitalocean/godo/sizes_test.go create mode 100644 vendor/github.com/digitalocean/godo/strings.go create mode 100644 vendor/github.com/digitalocean/godo/timestamp.go create mode 100644 vendor/github.com/digitalocean/godo/timestamp_test.go create mode 100644 vendor/github.com/digitalocean/godo/util/droplet.go create mode 100644 vendor/github.com/digitalocean/godo/util/droplet_test.go create mode 100644 vendor/github.com/dylanmei/iso8601/LICENSE create mode 100644 vendor/github.com/dylanmei/iso8601/README.md create mode 100644 vendor/github.com/dylanmei/iso8601/duration.go create mode 100644 vendor/github.com/dylanmei/iso8601/duration_test.go create mode 100644 vendor/github.com/dylanmei/winrmtest/LICENSE create mode 100644 vendor/github.com/dylanmei/winrmtest/README.md create mode 100644 vendor/github.com/dylanmei/winrmtest/remote.go create mode 100644 vendor/github.com/dylanmei/winrmtest/wsman.go create mode 100644 vendor/github.com/dylanmei/winrmtest/wsman_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/.gitignore create mode 100644 vendor/github.com/fsouza/go-dockerclient/.travis.yml create mode 100644 vendor/github.com/fsouza/go-dockerclient/AUTHORS create mode 100644 vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/Makefile create mode 100644 vendor/github.com/fsouza/go-dockerclient/README.markdown create mode 100644 vendor/github.com/fsouza/go-dockerclient/auth.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/auth_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/build_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/change.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/change_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/client.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/client_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/env.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/env_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/event.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/event_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/example_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/exec.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/exec_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/withtimeout_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_386.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/constants.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/creds_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unset.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/export_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock_linux_32bit.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_c.c create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall_solaris.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysctl_openbsd.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_darwin.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_dragonfly.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_freebsd.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_linux.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_netbsd.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_openbsd.pl create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mmap_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/race.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/race0.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/str.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_no_getwd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_darwin.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_dragonfly.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_freebsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_linux.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_netbsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_openbsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_solaris.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysctl_openbsd.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_solaris_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/image.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/image_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/integration_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/misc.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/misc_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/network.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/network_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/signal.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/tar.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/.dockerignore create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/Dockerfile create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/barfile create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/ca.pem create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/cert.pem create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/container.tar create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/foofile create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/key.pem create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/server.pem create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/server.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/testing/server_test.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/tls.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/volume.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/volume_test.go create mode 100644 vendor/github.com/go-chef/chef/.gitignore create mode 100644 vendor/github.com/go-chef/chef/LICENSE create mode 100644 vendor/github.com/go-chef/chef/README.md create mode 100644 vendor/github.com/go-chef/chef/acl.go create mode 100644 vendor/github.com/go-chef/chef/acl_test.go create mode 100644 vendor/github.com/go-chef/chef/authentication.go create mode 100644 vendor/github.com/go-chef/chef/build.sh create mode 100644 vendor/github.com/go-chef/chef/client.go create mode 100644 vendor/github.com/go-chef/chef/client_test.go create mode 100644 vendor/github.com/go-chef/chef/cookbook.go create mode 100644 vendor/github.com/go-chef/chef/cookbook_test.go create mode 100644 vendor/github.com/go-chef/chef/databag.go create mode 100644 vendor/github.com/go-chef/chef/databag_test.go create mode 100644 vendor/github.com/go-chef/chef/debug.go create mode 100644 vendor/github.com/go-chef/chef/doc.go create mode 100644 vendor/github.com/go-chef/chef/environment.go create mode 100644 vendor/github.com/go-chef/chef/environment_test.go create mode 100644 vendor/github.com/go-chef/chef/examples/cookbooks/cookbooks.go create mode 100644 vendor/github.com/go-chef/chef/examples/cookbooks/key.pem create mode 100644 vendor/github.com/go-chef/chef/examples/nodes/key.pem create mode 100644 vendor/github.com/go-chef/chef/examples/nodes/nodes.go create mode 100644 vendor/github.com/go-chef/chef/examples/sandboxes/key.pem create mode 100644 vendor/github.com/go-chef/chef/examples/sandboxes/sandboxes.go create mode 100644 vendor/github.com/go-chef/chef/examples/search/key.pem create mode 100644 vendor/github.com/go-chef/chef/examples/search/search.go create mode 100644 vendor/github.com/go-chef/chef/http.go create mode 100644 vendor/github.com/go-chef/chef/http_test.go create mode 100644 vendor/github.com/go-chef/chef/node.go create mode 100644 vendor/github.com/go-chef/chef/node_test.go create mode 100644 vendor/github.com/go-chef/chef/reader.go create mode 100644 vendor/github.com/go-chef/chef/reader_test.go create mode 100644 vendor/github.com/go-chef/chef/release.go create mode 100644 vendor/github.com/go-chef/chef/role.go create mode 100644 vendor/github.com/go-chef/chef/role_test.go create mode 100644 vendor/github.com/go-chef/chef/run_list.go create mode 100644 vendor/github.com/go-chef/chef/run_list_test.go create mode 100644 vendor/github.com/go-chef/chef/sandbox.go create mode 100644 vendor/github.com/go-chef/chef/sandbox_test.go create mode 100644 vendor/github.com/go-chef/chef/search.go create mode 100644 vendor/github.com/go-chef/chef/search_test.go create mode 100644 vendor/github.com/go-chef/chef/test/client.json create mode 100644 vendor/github.com/go-chef/chef/test/cookbooks_response.json create mode 100644 vendor/github.com/go-chef/chef/test/environment.json create mode 100644 vendor/github.com/go-chef/chef/test/node.json create mode 100644 vendor/github.com/go-chef/chef/test/role.json create mode 100644 vendor/github.com/go-chef/chef/wercker.yml create mode 100644 vendor/github.com/go-ini/ini/.gitignore create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/ini_test.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/go-ini/ini/struct_test.go create mode 100644 vendor/github.com/go-ini/ini/testdata/conf.ini create mode 100644 vendor/github.com/google/go-querystring/query/encode.go create mode 100644 vendor/github.com/google/go-querystring/query/encode_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/archive.go create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/archive_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-dir-mode/file.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-file-compressed/file.tar.gz create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-file/foo.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/baz.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/foo.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/COMMIT_EDITMSG create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/HEAD create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/config create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/description create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/applypatch-msg.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/commit-msg.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/post-update.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-applypatch.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-commit.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-push.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-rebase.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/prepare-commit-msg.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/update.sample create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/index create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/info/exclude create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/logs/HEAD create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/logs/refs/heads/master create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/objects/25/7cc5642cb1a054f08cc83f2d943e56fd3ebe99 create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/objects/57/16ca5987cbf97d6bb54920bea6adde242d87e6 create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/objects/75/25d17cbbb56f3253a20903ffddc07c6c935c76 create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/objects/7e/49ea5550b356e32b63c044201f5f7da1e0925f create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/objects/7f/7402c7d2a6e71ca3db3e236099771b160b8ad1 create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/refs/heads/master create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/bar.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/foo.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/subdir/hello.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/untracked.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-hg/bar.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-hg/foo.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-hg/subdir/hello.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-subdir-splat/bar.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-subdir-splat/build/darwin-amd64/build.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-subdir-splat/build/linux-amd64/build.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-subdir/bar.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-subdir/foo.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-subdir/subdir/hello.txt create mode 120000 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-symlink/link/link create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-symlink/real/foo.txt create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/vcs.go create mode 100644 vendor/github.com/hashicorp/atlas-go/archive/vcs_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/application.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/application_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/artifact.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/artifact_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/atlas_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/authentication.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/authentication_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/build_config.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/build_config_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/client.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/client_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/terraform.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/terraform_test.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/util.go create mode 100644 vendor/github.com/hashicorp/atlas-go/v1/util_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/README.md create mode 100644 vendor/github.com/hashicorp/consul/api/acl.go create mode 100644 vendor/github.com/hashicorp/consul/api/acl_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/agent.go create mode 100644 vendor/github.com/hashicorp/consul/api/agent_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/api.go create mode 100644 vendor/github.com/hashicorp/consul/api/api_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go create mode 100644 vendor/github.com/hashicorp/consul/api/catalog_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go create mode 100644 vendor/github.com/hashicorp/consul/api/coordinate_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/event.go create mode 100644 vendor/github.com/hashicorp/consul/api/event_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/health.go create mode 100644 vendor/github.com/hashicorp/consul/api/health_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/kv.go create mode 100644 vendor/github.com/hashicorp/consul/api/kv_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/lock.go create mode 100644 vendor/github.com/hashicorp/consul/api/lock_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go create mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/raw.go create mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go create mode 100644 vendor/github.com/hashicorp/consul/api/semaphore_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/session.go create mode 100644 vendor/github.com/hashicorp/consul/api/session_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/status.go create mode 100644 vendor/github.com/hashicorp/consul/api/status_test.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap_test.go create mode 100644 vendor/github.com/hashicorp/go-checkpoint/LICENSE create mode 100644 vendor/github.com/hashicorp/go-checkpoint/README.md create mode 100644 vendor/github.com/hashicorp/go-checkpoint/checkpoint.go create mode 100644 vendor/github.com/hashicorp/go-checkpoint/checkpoint_test.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-getter/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-getter/LICENSE create mode 100644 vendor/github.com/hashicorp/go-getter/README.md create mode 100644 vendor/github.com/hashicorp/go-getter/Vagrantfile create mode 100644 vendor/github.com/hashicorp/go-getter/client.go create mode 100644 vendor/github.com/hashicorp/go-getter/copy_dir.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_bzip2.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_bzip2_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_gzip.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_gzip_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tbz2.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tbz2_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_testing.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tgz.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tgz_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_zip.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_zip_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_bitbucket.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_bitbucket_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_file.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_file_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_github.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_github_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_s3.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_s3_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/folder_storage.go create mode 100644 vendor/github.com/hashicorp/go-getter/folder_storage_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/get.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_file.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_file_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_git.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_git_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_hg.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_hg_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_http.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_http_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_mock.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_s3.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_s3_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url.go create mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go create mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go create mode 100644 vendor/github.com/hashicorp/go-getter/module_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/source.go create mode 100644 vendor/github.com/hashicorp/go-getter/source_test.go create mode 100644 vendor/github.com/hashicorp/go-getter/storage.go create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/archive.tar.gz create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-dot/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-file-archive/archive.tar.gz create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-file/foo.txt create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/COMMIT_EDITMSG create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/HEAD create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/config create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/description create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/applypatch-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/commit-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/post-update.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-applypatch.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-commit.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-push.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-rebase.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/prepare-commit-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/update.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/index create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/info/exclude create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/HEAD create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/master create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/test-branch create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/14/6492b04efe0aae2b8288c5c0aef6a951030fde create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/1d/3d6744266642cb7623e2c678c33c77b075c49f create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/1f/31e97f053caeb5d6b7bffa3faf82941c99efa2 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/24/3f0fc5c4e586d1a3daa54c981b6f34e9ab1085 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/25/851303ab930fdacfdb7df91adbf626a483df48 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/38/30637158f774a20edcc0bf1c4d07b0bf87c43d create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/40/4618c9d96dfa0a5d365b518e0dfbb5a387c649 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/49/7bc37401eb3c9b11865b1768725b64066eccee create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/7b/7614f8759ac8b5e4b02be65ad8e2667be6dd87 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/8c/1a79ca1f98b6d00f5bf5c6cc9e8d3c092dd3ba create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/96/43088174e25a9bd91c27970a580af0085c9f32 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/b7/757b6a3696ad036e9aa2f5b4856d09e7f17993 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/d2/368a7048e1e919eebaad2a9a275b18cc9c4181 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/e9/65047ad7c57865823c7d992b1d046ea66edf78 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/refs/heads/master create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/refs/heads/test-branch create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/refs/tags/v1.0 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/foo.txt create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/subdir/sub.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-hg/foo.txt create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-hg/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-parent/a/a.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-parent/c/c.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-parent/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-subdir/foo/sub/baz/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-subdir/foo/sub/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic-subdir/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic/foo/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/basic/subdir/sub.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/child/foo/bar/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/child/foo/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/child/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-bz2/single.bz2 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-gz/single.gz create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/empty.tar.bz2 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/multiple.tar.bz2 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/single.tar.bz2 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tgz/empty.tar.gz create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tgz/multiple.tar.gz create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tgz/single.tar.gz create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/empty.zip create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/multiple.zip create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/single.zip create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/dup/foo/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/dup/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/COMMIT_EDITMSG create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/HEAD create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/config create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/description create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/applypatch-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/commit-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/post-update.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-applypatch.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-commit.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-push.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-rebase.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/prepare-commit-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/update.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/index create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/info/exclude create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/HEAD create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/master create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/test-branch create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/29/5c5449283c0fe38c75ae73144513240e5626a6 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/3e/655aa57060b300052ca598ba18b5752f8c0958 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/a9/193e348262522550ebacc037f772c52cbfa5d5 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/ae/4fc53ed172182cf8ab07d2f087994950a2ccdf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/be/fe5fe7626f1850e202fb35ac375c61e1f27f95 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/refs/heads/master create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/refs/heads/test-branch create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/COMMIT_EDITMSG create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/HEAD create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/config create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/description create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/applypatch-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/commit-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/post-update.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-applypatch.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-commit.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-push.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-rebase.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/prepare-commit-msg.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/update.sample create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/index create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/info/exclude create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/HEAD create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/master create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/test-branch create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/20/9cbd800810a2a1559ba33c48bc75860e967e07 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/29/5c5449283c0fe38c75ae73144513240e5626a6 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/34/fa5e1fb706dc77cc0b0cf622c82e67ba84e297 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/3e/655aa57060b300052ca598ba18b5752f8c0958 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/8a/2dfe46d4eb9b54f5aa0e4e536346bdafee3467 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/a9/193e348262522550ebacc037f772c52cbfa5d5 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/ae/4fc53ed172182cf8ab07d2f087994950a2ccdf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/be/fe5fe7626f1850e202fb35ac375c61e1f27f95 create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/refs/heads/master create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/refs/heads/test-branch create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/main_branch.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/main_branch_update.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-bad-output-to-module/child/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-bad-output-to-module/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-bad-output/child/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-bad-output/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-bad-var/child/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-bad-var/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-child-bad/child/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-child-bad/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-child-good/child/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-child-good/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-required-var/child/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-required-var/main.tf create mode 100644 vendor/github.com/hashicorp/go-getter/test-fixtures/validate-root-bad/main.tf create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/append_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix_test.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client_test.go create mode 100644 vendor/github.com/hashicorp/go-version/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/constraint_test.go create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection_test.go create mode 100644 vendor/github.com/hashicorp/go-version/version_test.go create mode 100644 vendor/github.com/hashicorp/hcl/.gitignore create mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/Makefile create mode 100644 vendor/github.com/hashicorp/hcl/README.md create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 vendor/github.com/hashicorp/hcl/decoder_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/array_comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/array_comment_2.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/assign_colon.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/assign_deep.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_lastline.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_single.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_key.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/list.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/list_comma.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/missing_braces.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/multiple.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/old.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/structure.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/structure_basic.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/structure_empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/types.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_deep.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl_test.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser_test.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go create mode 100644 vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json create mode 100644 vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json create mode 100644 vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json create mode 100644 vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token_test.go create mode 100644 vendor/github.com/hashicorp/hcl/lex.go create mode 100644 vendor/github.com/hashicorp/hcl/lex_test.go create mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/basic.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/float.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/interpolate_escape.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/multiline.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/multiline_bad.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/multiline_no_eof.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/multiline_no_marker.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/nested_block_comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/object_list.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/scientific.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/scientific.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/slice_expand.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure2.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure2.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure_flat.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure_flatmap.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure_list.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure_list.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure_list_deep.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure_multi.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/structure_multi.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/terraform_heroku.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/terraform_heroku.json create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/tfvars.hcl create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/unterminated_block_comment.hcl create mode 100644 vendor/github.com/hashicorp/logutils/.gitignore create mode 100644 vendor/github.com/hashicorp/logutils/LICENSE create mode 100644 vendor/github.com/hashicorp/logutils/README.md create mode 100644 vendor/github.com/hashicorp/logutils/level.go create mode 100644 vendor/github.com/hashicorp/logutils/level_benchmark_test.go create mode 100644 vendor/github.com/hashicorp/logutils/level_test.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/README.md create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client_test.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/performance_test.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/test_util.go create mode 100644 vendor/github.com/hashicorp/yamux/.gitignore create mode 100644 vendor/github.com/hashicorp/yamux/LICENSE create mode 100644 vendor/github.com/hashicorp/yamux/README.md create mode 100644 vendor/github.com/hashicorp/yamux/addr.go create mode 100644 vendor/github.com/hashicorp/yamux/bench_test.go create mode 100644 vendor/github.com/hashicorp/yamux/const.go create mode 100644 vendor/github.com/hashicorp/yamux/const_test.go create mode 100644 vendor/github.com/hashicorp/yamux/mux.go create mode 100644 vendor/github.com/hashicorp/yamux/session.go create mode 100644 vendor/github.com/hashicorp/yamux/session_test.go create mode 100644 vendor/github.com/hashicorp/yamux/spec.md create mode 100644 vendor/github.com/hashicorp/yamux/stream.go create mode 100644 vendor/github.com/hashicorp/yamux/util.go create mode 100644 vendor/github.com/hashicorp/yamux/util_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/.editorconfig create mode 100644 vendor/github.com/hmrc/vmware-govcd/.gitignore create mode 100644 vendor/github.com/hmrc/vmware-govcd/.travis.yml create mode 100644 vendor/github.com/hmrc/vmware-govcd/LICENSE create mode 100644 vendor/github.com/hmrc/vmware-govcd/README.md create mode 100644 vendor/github.com/hmrc/vmware-govcd/api.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/api_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/api_vca.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/api_vca_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/api_vcd.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/api_vcd_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/catalog.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/catalog_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/catalogitem.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/catalogitem_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/edgegateway.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/edgegateway_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/org.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/org_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/script/coverage create mode 100644 vendor/github.com/hmrc/vmware-govcd/task.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/task_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/testutil/http.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/types/v56/types.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/vapp.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/vapp_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/vapptemplate.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/vapptemplate_test.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/vdc.go create mode 100644 vendor/github.com/hmrc/vmware-govcd/vdc_test.go create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/issue17_test.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/imdario/mergo/mergo_test.go create mode 100644 vendor/github.com/imdario/mergo/testdata/license.yml create mode 100644 vendor/github.com/imdario/mergo/testdata/thing.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore create mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/api_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/basic.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/boolean.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/current.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/escape.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/filters.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/functions.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/indices.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/literal.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/pipe.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/slice.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/syntax.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/unicode.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-1 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-10 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-100 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-101 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-102 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-103 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-104 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-105 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-106 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-107 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-108 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-109 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-110 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-112 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-115 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-118 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-119 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-12 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-120 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-121 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-122 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-123 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-126 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-128 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-129 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-13 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-130 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-131 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-132 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-134 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-135 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-136 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-137 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-138 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-139 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-14 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-140 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-141 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-142 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-143 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-144 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-145 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-146 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-147 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-148 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-149 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-15 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-150 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-151 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-152 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-153 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-155 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-156 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-157 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-158 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-159 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-16 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-160 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-161 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-162 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-163 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-164 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-165 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-166 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-167 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-168 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-169 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-17 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-170 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-171 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-172 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-173 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-174 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-178 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-18 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-180 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-181 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-182 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-183 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-184 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-185 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-186 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-187 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-188 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-189 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-19 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-190 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-191 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-192 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-193 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-194 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-195 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-196 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-198 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-199 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-2 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-20 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-200 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-201 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-202 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-203 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-204 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-205 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-206 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-207 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-208 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-209 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-21 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-210 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-211 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-212 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-213 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-214 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-215 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-216 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-217 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-218 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-219 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-22 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-220 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-221 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-222 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-223 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-224 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-225 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-226 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-227 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-228 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-229 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-23 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-230 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-231 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-232 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-233 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-234 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-235 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-236 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-237 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-238 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-239 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-24 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-240 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-241 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-242 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-243 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-244 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-245 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-246 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-247 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-248 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-249 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-25 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-250 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-251 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-252 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-253 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-254 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-255 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-256 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-257 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-258 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-259 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-26 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-260 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-261 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-262 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-263 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-264 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-265 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-266 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-267 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-268 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-269 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-270 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-271 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-272 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-273 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-274 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-275 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-276 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-277 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-278 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-279 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-28 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-280 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-281 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-282 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-284 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-285 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-286 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-287 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-288 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-289 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-29 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-290 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-291 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-292 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-293 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-294 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-295 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-296 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-297 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-298 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-299 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-3 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-30 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-300 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-301 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-302 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-303 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-304 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-305 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-306 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-307 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-308 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-309 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-31 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-310 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-311 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-312 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-313 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-314 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-315 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-316 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-317 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-319 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-32 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-320 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-321 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-322 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-323 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-324 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-325 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-326 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-327 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-328 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-329 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-33 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-330 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-331 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-332 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-333 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-334 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-335 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-336 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-337 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-338 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-339 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-34 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-340 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-341 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-342 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-343 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-344 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-345 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-346 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-347 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-348 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-349 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-35 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-350 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-351 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-352 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-353 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-354 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-355 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-356 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-357 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-358 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-359 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-36 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-360 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-361 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-362 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-363 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-364 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-365 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-366 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-367 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-368 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-369 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-37 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-370 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-371 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-372 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-373 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-374 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-375 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-376 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-377 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-378 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-379 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-38 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-380 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-382 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-383 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-384 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-385 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-386 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-387 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-388 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-389 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-39 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-390 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-391 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-392 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-393 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-394 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-395 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-396 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-397 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-398 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-399 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-4 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-40 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-400 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-401 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-402 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-403 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-404 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-405 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-407 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-408 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-409 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-41 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-410 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-411 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-412 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-413 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-414 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-415 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-416 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-417 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-418 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-419 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-42 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-420 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-421 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-422 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-423 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-424 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-425 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-426 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-427 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-428 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-429 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-43 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-430 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-431 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-432 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-433 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-434 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-435 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-436 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-437 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-438 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-439 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-44 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-440 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-441 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-442 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-443 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-444 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-445 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-446 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-447 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-448 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-449 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-45 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-450 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-451 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-452 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-453 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-454 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-455 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-456 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-457 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-458 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-459 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-46 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-460 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-461 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-462 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-463 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-464 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-465 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-466 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-467 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-468 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-469 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-47 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-470 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-471 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-472 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-473 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-474 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-475 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-476 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-477 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-478 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-479 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-48 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-480 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-481 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-482 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-483 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-484 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-485 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-486 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-487 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-488 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-489 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-49 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-490 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-491 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-492 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-493 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-494 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-495 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-496 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-497 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-498 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-499 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-5 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-50 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-500 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-501 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-502 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-503 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-504 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-505 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-506 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-507 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-508 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-509 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-51 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-510 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-511 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-512 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-513 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-514 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-515 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-516 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-517 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-518 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-519 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-52 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-520 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-521 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-522 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-523 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-524 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-525 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-526 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-527 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-528 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-529 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-53 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-530 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-531 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-532 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-533 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-534 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-535 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-536 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-537 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-538 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-539 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-54 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-540 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-541 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-542 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-543 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-544 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-545 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-546 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-547 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-548 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-549 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-55 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-550 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-551 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-552 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-553 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-554 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-555 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-556 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-557 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-558 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-559 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-56 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-560 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-561 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-562 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-563 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-564 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-565 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-566 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-567 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-568 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-569 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-57 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-570 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-571 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-572 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-573 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-574 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-575 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-576 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-577 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-578 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-579 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-58 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-580 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-581 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-582 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-583 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-584 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-585 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-586 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-587 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-588 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-589 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-59 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-590 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-591 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-592 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-593 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-594 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-595 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-597 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-598 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-599 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-6 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-60 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-600 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-601 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-602 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-603 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-604 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-605 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-606 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-607 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-608 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-609 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-61 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-610 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-611 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-612 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-613 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-614 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-615 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-616 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-617 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-618 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-619 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-62 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-620 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-621 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-622 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-623 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-624 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-625 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-626 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-627 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-628 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-629 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-63 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-630 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-631 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-632 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-633 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-634 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-635 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-636 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-637 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-638 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-639 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-64 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-640 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-641 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-642 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-643 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-644 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-645 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-646 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-647 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-648 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-649 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-65 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-650 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-651 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-652 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-653 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-654 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-655 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-656 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-66 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-67 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-68 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-69 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-7 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-70 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-71 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-72 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-73 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-74 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-75 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-76 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-77 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-78 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-79 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-8 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-80 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-81 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-82 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-83 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-84 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-85 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-86 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-87 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-88 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-89 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-9 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-90 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-91 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-92 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-93 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-94 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-95 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-96 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-97 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-98 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util_test.go create mode 100644 vendor/github.com/kardianos/osext/LICENSE create mode 100644 vendor/github.com/kardianos/osext/README.md create mode 100644 vendor/github.com/kardianos/osext/osext.go create mode 100644 vendor/github.com/kardianos/osext/osext_plan9.go create mode 100644 vendor/github.com/kardianos/osext/osext_procfs.go create mode 100644 vendor/github.com/kardianos/osext/osext_sysctl.go create mode 100644 vendor/github.com/kardianos/osext/osext_test.go create mode 100644 vendor/github.com/kardianos/osext/osext_windows.go create mode 100644 vendor/github.com/lib/pq/.gitignore create mode 100644 vendor/github.com/lib/pq/.travis.yml create mode 100644 vendor/github.com/lib/pq/CONTRIBUTING.md create mode 100644 vendor/github.com/lib/pq/LICENSE.md create mode 100644 vendor/github.com/lib/pq/README.md create mode 100644 vendor/github.com/lib/pq/bench_test.go create mode 100644 vendor/github.com/lib/pq/buf.go create mode 100644 vendor/github.com/lib/pq/certs/README create mode 100644 vendor/github.com/lib/pq/certs/postgresql.crt create mode 100644 vendor/github.com/lib/pq/certs/postgresql.key create mode 100644 vendor/github.com/lib/pq/certs/root.crt create mode 100644 vendor/github.com/lib/pq/certs/server.crt create mode 100644 vendor/github.com/lib/pq/certs/server.key create mode 100644 vendor/github.com/lib/pq/conn.go create mode 100644 vendor/github.com/lib/pq/conn_test.go create mode 100644 vendor/github.com/lib/pq/copy.go create mode 100644 vendor/github.com/lib/pq/copy_test.go create mode 100644 vendor/github.com/lib/pq/doc.go create mode 100644 vendor/github.com/lib/pq/encode.go create mode 100644 vendor/github.com/lib/pq/encode_test.go create mode 100644 vendor/github.com/lib/pq/error.go create mode 100644 vendor/github.com/lib/pq/hstore/hstore.go create mode 100644 vendor/github.com/lib/pq/hstore/hstore_test.go create mode 100644 vendor/github.com/lib/pq/listen_example/doc.go create mode 100644 vendor/github.com/lib/pq/notify.go create mode 100644 vendor/github.com/lib/pq/notify_test.go create mode 100644 vendor/github.com/lib/pq/oid/doc.go create mode 100644 vendor/github.com/lib/pq/oid/gen.go create mode 100644 vendor/github.com/lib/pq/oid/types.go create mode 100644 vendor/github.com/lib/pq/ssl_test.go create mode 100644 vendor/github.com/lib/pq/url.go create mode 100644 vendor/github.com/lib/pq/url_test.go create mode 100644 vendor/github.com/lib/pq/user_posix.go create mode 100644 vendor/github.com/lib/pq/user_windows.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/error.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/groups.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/license_information.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/local_repository_config.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions_details.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/remote_repository_config.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/repositories.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_group.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_user.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/users.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/virtual_repository_config.json create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users_test.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go create mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version_test.go create mode 100644 vendor/github.com/masterzen/simplexml/dom/document.go create mode 100644 vendor/github.com/masterzen/simplexml/dom/dom_test.go create mode 100644 vendor/github.com/masterzen/simplexml/dom/element.go create mode 100644 vendor/github.com/masterzen/simplexml/dom/namespace.go create mode 100644 vendor/github.com/masterzen/winrm/soap/header.go create mode 100644 vendor/github.com/masterzen/winrm/soap/header_test.go create mode 100644 vendor/github.com/masterzen/winrm/soap/message.go create mode 100644 vendor/github.com/masterzen/winrm/soap/namespaces.go create mode 100644 vendor/github.com/masterzen/winrm/soap/namespaces_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/client.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/client_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/command.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/command_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/endpoint.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/endpoint_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/fixture_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/http.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/http_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/parameters.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/parameters_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/powershell.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/powershell_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/request.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/request_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/response.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/response_test.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/shell.go create mode 100644 vendor/github.com/masterzen/winrm/winrm/shell_test.go create mode 100644 vendor/github.com/masterzen/xmlpath/LICENSE create mode 100644 vendor/github.com/masterzen/xmlpath/all_test.go create mode 100644 vendor/github.com/masterzen/xmlpath/doc.go create mode 100644 vendor/github.com/masterzen/xmlpath/parser.go create mode 100644 vendor/github.com/masterzen/xmlpath/path.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mitchellh/cli/.travis.yml create mode 100644 vendor/github.com/mitchellh/cli/LICENSE create mode 100644 vendor/github.com/mitchellh/cli/README.md create mode 100644 vendor/github.com/mitchellh/cli/cli.go create mode 100644 vendor/github.com/mitchellh/cli/cli_test.go create mode 100644 vendor/github.com/mitchellh/cli/command.go create mode 100644 vendor/github.com/mitchellh/cli/command_mock.go create mode 100644 vendor/github.com/mitchellh/cli/command_mock_test.go create mode 100644 vendor/github.com/mitchellh/cli/help.go create mode 100644 vendor/github.com/mitchellh/cli/ui.go create mode 100644 vendor/github.com/mitchellh/cli/ui_colored.go create mode 100644 vendor/github.com/mitchellh/cli/ui_colored_test.go create mode 100644 vendor/github.com/mitchellh/cli/ui_concurrent.go create mode 100644 vendor/github.com/mitchellh/cli/ui_concurrent_test.go create mode 100644 vendor/github.com/mitchellh/cli/ui_mock.go create mode 100644 vendor/github.com/mitchellh/cli/ui_mock_test.go create mode 100644 vendor/github.com/mitchellh/cli/ui_test.go create mode 100644 vendor/github.com/mitchellh/cli/ui_writer.go create mode 100644 vendor/github.com/mitchellh/cli/ui_writer_test.go create mode 100644 vendor/github.com/mitchellh/colorstring/.travis.yml create mode 100644 vendor/github.com/mitchellh/colorstring/LICENSE create mode 100644 vendor/github.com/mitchellh/colorstring/README.md create mode 100644 vendor/github.com/mitchellh/colorstring/colorstring.go create mode 100644 vendor/github.com/mitchellh/colorstring/colorstring_test.go create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE create mode 100644 vendor/github.com/mitchellh/copystructure/README.md create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time_test.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure_examples_test.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure_test.go create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir_test.go create mode 100644 vendor/github.com/mitchellh/go-linereader/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-linereader/README.md create mode 100644 vendor/github.com/mitchellh/go-linereader/linereader.go create mode 100644 vendor/github.com/mitchellh/go-linereader/linereader_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_test.go create mode 100644 vendor/github.com/mitchellh/packer/common/uuid/uuid.go create mode 100644 vendor/github.com/mitchellh/packer/common/uuid/uuid_test.go create mode 100644 vendor/github.com/mitchellh/panicwrap/LICENSE create mode 100644 vendor/github.com/mitchellh/panicwrap/README.md create mode 100644 vendor/github.com/mitchellh/panicwrap/panicwrap.go create mode 100644 vendor/github.com/mitchellh/panicwrap/panicwrap_test.go create mode 100644 vendor/github.com/mitchellh/prefixedio/LICENSE create mode 100644 vendor/github.com/mitchellh/prefixedio/README.md create mode 100644 vendor/github.com/mitchellh/prefixedio/reader.go create mode 100644 vendor/github.com/mitchellh/prefixedio/reader_test.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk_test.go create mode 100644 vendor/github.com/nesv/go-dynect/dynect/client.go create mode 100644 vendor/github.com/nesv/go-dynect/dynect/client_test.go create mode 100644 vendor/github.com/nesv/go-dynect/dynect/convenient_client.go create mode 100644 vendor/github.com/nesv/go-dynect/dynect/json.go create mode 100644 vendor/github.com/nesv/go-dynect/dynect/record.go create mode 100644 vendor/github.com/nesv/go-dynect/dynect/records.go create mode 100644 vendor/github.com/nesv/go-dynect/dynect/zones.go create mode 100644 vendor/github.com/nu7hatch/gouuid/.gitignore create mode 100644 vendor/github.com/nu7hatch/gouuid/COPYING create mode 100644 vendor/github.com/nu7hatch/gouuid/README.md create mode 100644 vendor/github.com/nu7hatch/gouuid/example_test.go create mode 100644 vendor/github.com/nu7hatch/gouuid/uuid.go create mode 100644 vendor/github.com/nu7hatch/gouuid/uuid_test.go create mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/cp.go create mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/endpoint.go create mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/endpoint_test.go create mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/ls.go create mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/path.go create mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/psobject.go create mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/winrmcp.go create mode 100644 vendor/github.com/packethost/packngo/LICENSE.txt create mode 100644 vendor/github.com/packethost/packngo/devices.go create mode 100644 vendor/github.com/packethost/packngo/email.go create mode 100644 vendor/github.com/packethost/packngo/facilities.go create mode 100644 vendor/github.com/packethost/packngo/operatingsystems.go create mode 100644 vendor/github.com/packethost/packngo/packngo.go create mode 100644 vendor/github.com/packethost/packngo/plans.go create mode 100644 vendor/github.com/packethost/packngo/projects.go create mode 100644 vendor/github.com/packethost/packngo/rate.go create mode 100644 vendor/github.com/packethost/packngo/sshkeys.go create mode 100644 vendor/github.com/packethost/packngo/timestamp.go create mode 100644 vendor/github.com/packethost/packngo/user.go create mode 100644 vendor/github.com/packethost/packngo/utils.go create mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/pborman/uuid/LICENSE create mode 100644 vendor/github.com/pborman/uuid/dce.go create mode 100644 vendor/github.com/pborman/uuid/doc.go create mode 100644 vendor/github.com/pborman/uuid/hash.go create mode 100644 vendor/github.com/pborman/uuid/json.go create mode 100644 vendor/github.com/pborman/uuid/json_test.go create mode 100644 vendor/github.com/pborman/uuid/node.go create mode 100644 vendor/github.com/pborman/uuid/seq_test.go create mode 100644 vendor/github.com/pborman/uuid/sql.go create mode 100644 vendor/github.com/pborman/uuid/sql_test.go create mode 100644 vendor/github.com/pborman/uuid/time.go create mode 100644 vendor/github.com/pborman/uuid/util.go create mode 100644 vendor/github.com/pborman/uuid/uuid.go create mode 100644 vendor/github.com/pborman/uuid/uuid_test.go create mode 100644 vendor/github.com/pborman/uuid/version1.go create mode 100644 vendor/github.com/pborman/uuid/version4.go create mode 100644 vendor/github.com/pearkes/cloudflare/.gitignore create mode 100644 vendor/github.com/pearkes/cloudflare/LICENSE create mode 100644 vendor/github.com/pearkes/cloudflare/README.md create mode 100644 vendor/github.com/pearkes/cloudflare/api.go create mode 100644 vendor/github.com/pearkes/cloudflare/api_test.go create mode 100644 vendor/github.com/pearkes/cloudflare/record.go create mode 100644 vendor/github.com/pearkes/cloudflare/record_test.go create mode 100644 vendor/github.com/pearkes/cloudflare/testutil/http.go create mode 100644 vendor/github.com/pearkes/dnsimple/.travis.yml create mode 100644 vendor/github.com/pearkes/dnsimple/LICENSE create mode 100644 vendor/github.com/pearkes/dnsimple/README.md create mode 100644 vendor/github.com/pearkes/dnsimple/api.go create mode 100644 vendor/github.com/pearkes/dnsimple/api_test.go create mode 100644 vendor/github.com/pearkes/dnsimple/domain.go create mode 100644 vendor/github.com/pearkes/dnsimple/domain_test.go create mode 100644 vendor/github.com/pearkes/dnsimple/record.go create mode 100644 vendor/github.com/pearkes/dnsimple/record_test.go create mode 100644 vendor/github.com/pearkes/dnsimple/testutil/http.go create mode 100644 vendor/github.com/pearkes/mailgun/.gitignore create mode 100644 vendor/github.com/pearkes/mailgun/LICENSE create mode 100644 vendor/github.com/pearkes/mailgun/README.md create mode 100644 vendor/github.com/pearkes/mailgun/api.go create mode 100644 vendor/github.com/pearkes/mailgun/api_test.go create mode 100644 vendor/github.com/pearkes/mailgun/domain.go create mode 100644 vendor/github.com/pearkes/mailgun/domain_test.go create mode 100644 vendor/github.com/pearkes/mailgun/testutil/http.go create mode 100644 vendor/github.com/rackspace/gophercloud/.travis.yml create mode 100644 vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md create mode 100644 vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md create mode 100644 vendor/github.com/rackspace/gophercloud/LICENSE create mode 100644 vendor/github.com/rackspace/gophercloud/README.md create mode 100644 vendor/github.com/rackspace/gophercloud/UPGRADING.md create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/README.md create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/floatingip_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/network_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secdefrules_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secgroup_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/volumeattach_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/database_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/flavor_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/instance_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/user_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/role_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/user_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/firewall_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/policy_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/rule_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/buildinfo_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/hello-compute.json create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackevents_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackresources_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacks_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacktemplates_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/base_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/flavor_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/service_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/serviceasset_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/volumeattach_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/backup_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/config_group_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/database_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/flavor_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/instance_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/replica_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/user_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/role_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tokens_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/user_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/acl_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/lb_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/monitor_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/node_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/session_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/throttle_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/vip_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/network_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/port_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/security_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/subnet_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/buildinfo_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackevents_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackresources_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacks_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacktemplates_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/cloudnetworks_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/common.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/lbpools_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/publicips_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/tools/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/acceptance/tools/tools.go create mode 100644 vendor/github.com/rackspace/gophercloud/auth_options.go create mode 100644 vendor/github.com/rackspace/gophercloud/auth_results.go create mode 100644 vendor/github.com/rackspace/gophercloud/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/endpoint_search.go create mode 100644 vendor/github.com/rackspace/gophercloud/endpoint_search_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/auth_env.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/client.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/client_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/README.md create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/docs.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/http.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/linked.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/linked_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/marker.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/marker_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/null.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/pager.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/pagination_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/pkg.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/single.go create mode 100644 vendor/github.com/rackspace/gophercloud/pagination/single_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/params.go create mode 100644 vendor/github.com/rackspace/gophercloud/params_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/provider_client.go create mode 100644 vendor/github.com/rackspace/gophercloud/provider_client_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/auth_env.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/client.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/client_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/common/common_tests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/request.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/script/acceptancetest create mode 100644 vendor/github.com/rackspace/gophercloud/script/bootstrap create mode 100644 vendor/github.com/rackspace/gophercloud/script/cibuild create mode 100644 vendor/github.com/rackspace/gophercloud/script/test create mode 100644 vendor/github.com/rackspace/gophercloud/script/unittest create mode 100644 vendor/github.com/rackspace/gophercloud/service_client.go create mode 100644 vendor/github.com/rackspace/gophercloud/service_client_test.go create mode 100644 vendor/github.com/rackspace/gophercloud/testhelper/client/fake.go create mode 100644 vendor/github.com/rackspace/gophercloud/testhelper/convenience.go create mode 100644 vendor/github.com/rackspace/gophercloud/testhelper/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/testhelper/fixture/helper.go create mode 100644 vendor/github.com/rackspace/gophercloud/testhelper/http_responses.go create mode 100644 vendor/github.com/rackspace/gophercloud/util.go create mode 100644 vendor/github.com/rackspace/gophercloud/util_test.go create mode 100644 vendor/github.com/satori/go.uuid/.travis.yml create mode 100644 vendor/github.com/satori/go.uuid/LICENSE create mode 100644 vendor/github.com/satori/go.uuid/README.md create mode 100644 vendor/github.com/satori/go.uuid/benchmarks_test.go create mode 100644 vendor/github.com/satori/go.uuid/uuid.go create mode 100644 vendor/github.com/satori/go.uuid/uuid_test.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/.gitignore create mode 100644 vendor/github.com/soniah/dnsmadeeasy/.travis.yml create mode 100644 vendor/github.com/soniah/dnsmadeeasy/AUTHORS.md create mode 100644 vendor/github.com/soniah/dnsmadeeasy/LICENSE create mode 100644 vendor/github.com/soniah/dnsmadeeasy/README.md create mode 100644 vendor/github.com/soniah/dnsmadeeasy/api.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/api_test.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/examples/README.md create mode 100644 vendor/github.com/soniah/dnsmadeeasy/examples/create.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/examples/delete.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/examples/read.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/examples/update.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/record.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/record_test.go create mode 100644 vendor/github.com/soniah/dnsmadeeasy/testutil/http.go create mode 100644 vendor/github.com/sthulb/mime/multipart/example_test.go create mode 100644 vendor/github.com/sthulb/mime/multipart/formdata.go create mode 100644 vendor/github.com/sthulb/mime/multipart/formdata_test.go create mode 100644 vendor/github.com/sthulb/mime/multipart/multipart.go create mode 100644 vendor/github.com/sthulb/mime/multipart/multipart_test.go create mode 100644 vendor/github.com/sthulb/mime/multipart/testdata/nested-mime create mode 100644 vendor/github.com/sthulb/mime/multipart/writer.go create mode 100644 vendor/github.com/sthulb/mime/multipart/writer_test.go create mode 100644 vendor/github.com/tent/http-link-go/.gitignore create mode 100644 vendor/github.com/tent/http-link-go/.travis.yml create mode 100644 vendor/github.com/tent/http-link-go/LICENSE create mode 100644 vendor/github.com/tent/http-link-go/README.md create mode 100644 vendor/github.com/tent/http-link-go/link.go create mode 100644 vendor/github.com/tent/http-link-go/link_test.go create mode 100644 vendor/github.com/vmware/govmomi/.travis.yml create mode 100644 vendor/github.com/vmware/govmomi/CHANGELOG.md create mode 100644 vendor/github.com/vmware/govmomi/CONTRIBUTORS create mode 100644 vendor/github.com/vmware/govmomi/LICENSE.txt create mode 100644 vendor/github.com/vmware/govmomi/Makefile create mode 100644 vendor/github.com/vmware/govmomi/README.md create mode 100644 vendor/github.com/vmware/govmomi/client.go create mode 100644 vendor/github.com/vmware/govmomi/client_test.go create mode 100644 vendor/github.com/vmware/govmomi/event/history_collector.go create mode 100644 vendor/github.com/vmware/govmomi/event/manager.go create mode 100644 vendor/github.com/vmware/govmomi/event/sort.go create mode 100644 vendor/github.com/vmware/govmomi/examples/datastores/main.go create mode 100644 vendor/github.com/vmware/govmomi/find/error.go create mode 100644 vendor/github.com/vmware/govmomi/find/finder.go create mode 100644 vendor/github.com/vmware/govmomi/gen/Gemfile create mode 100644 vendor/github.com/vmware/govmomi/gen/Gemfile.lock create mode 100644 vendor/github.com/vmware/govmomi/gen/gen.sh create mode 100644 vendor/github.com/vmware/govmomi/gen/gen_from_vmodl.rb create mode 100644 vendor/github.com/vmware/govmomi/gen/gen_from_wsdl.rb create mode 100644 vendor/github.com/vmware/govmomi/gen/vim_wsdl.rb create mode 100644 vendor/github.com/vmware/govmomi/govc/.gitignore create mode 100644 vendor/github.com/vmware/govmomi/govc/CHANGELOG.md create mode 100644 vendor/github.com/vmware/govmomi/govc/README.md create mode 100644 vendor/github.com/vmware/govmomi/govc/about/command.go create mode 100644 vendor/github.com/vmware/govmomi/govc/build.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/cli/command.go create mode 100644 vendor/github.com/vmware/govmomi/govc/cli/register.go create mode 100644 vendor/github.com/vmware/govmomi/govc/cluster/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/cluster/change.go create mode 100644 vendor/github.com/vmware/govmomi/govc/cluster/create.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datacenter/create.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datacenter/destroy.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/cp.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/create.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/download.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/ls.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/mkdir.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/mv.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/rm.go create mode 100644 vendor/github.com/vmware/govmomi/govc/datastore/upload.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/boot.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/cdrom/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/cdrom/eject.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/cdrom/insert.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/connect.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/disconnect.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/floppy/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/floppy/eject.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/floppy/insert.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/ls.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/scsi/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/serial/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/serial/connect.go create mode 100644 vendor/github.com/vmware/govmomi/govc/device/serial/disconnect.go create mode 100644 vendor/github.com/vmware/govmomi/govc/dvs/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/dvs/create.go create mode 100644 vendor/github.com/vmware/govmomi/govc/dvs/portgroup/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/emacs/.gitignore create mode 100644 vendor/github.com/vmware/govmomi/govc/emacs/Cask create mode 100644 vendor/github.com/vmware/govmomi/govc/emacs/Makefile create mode 100644 vendor/github.com/vmware/govmomi/govc/emacs/README.md create mode 100644 vendor/github.com/vmware/govmomi/govc/emacs/govc.el create mode 100644 vendor/github.com/vmware/govmomi/govc/emacs/test/govc-test.el create mode 100644 vendor/github.com/vmware/govmomi/govc/emacs/test/make.el create mode 100644 vendor/github.com/vmware/govmomi/govc/events/command.go create mode 100644 vendor/github.com/vmware/govmomi/govc/examples/lib/ssh.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/examples/vcsa.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/extension/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/extension/register.go create mode 100644 vendor/github.com/vmware/govmomi/govc/extension/setcert.go create mode 100644 vendor/github.com/vmware/govmomi/govc/extension/unregister.go create mode 100644 vendor/github.com/vmware/govmomi/govc/fields/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/fields/ls.go create mode 100644 vendor/github.com/vmware/govmomi/govc/fields/rename.go create mode 100644 vendor/github.com/vmware/govmomi/govc/fields/rm.go create mode 100644 vendor/github.com/vmware/govmomi/govc/fields/set.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/client.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/common.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/datacenter.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/datastore.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/debug.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/empty.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/host_connect.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/host_system.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/network.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/optional_bool.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/optional_bool_test.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/output.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/resource_pool.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/search.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/version.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/version_test.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/virtual_app.go create mode 100644 vendor/github.com/vmware/govmomi/govc/flags/virtual_machine.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/autostart/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/autostart/autostart.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/autostart/configure.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/autostart/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/autostart/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/disconnect.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/command.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/command_test.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/esxcli.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/executor.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/firewall_info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_list.xml create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_port_list.xml create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/system_hostname_get.xml create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/guest_info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/response.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/esxcli/response_test.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/firewall/find.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/maintenance/enter.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/maintenance/exit.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/portgroup/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/portgroup/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/reconnect.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/storage/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/storage/partition.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/vnic/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/vnic/service.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/vswitch/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/vswitch/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/host/vswitch/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/archive.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/folder.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/importable.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/lease_updater.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/options.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/ova.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/ovf.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/spec.go create mode 100644 vendor/github.com/vmware/govmomi/govc/importx/vmdk.go create mode 100644 vendor/github.com/vmware/govmomi/govc/license/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/license/assign.go create mode 100644 vendor/github.com/vmware/govmomi/govc/license/assigned.go create mode 100644 vendor/github.com/vmware/govmomi/govc/license/decode.go create mode 100644 vendor/github.com/vmware/govmomi/govc/license/list.go create mode 100644 vendor/github.com/vmware/govmomi/govc/license/output.go create mode 100644 vendor/github.com/vmware/govmomi/govc/license/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/logs/command.go create mode 100644 vendor/github.com/vmware/govmomi/govc/logs/download.go create mode 100644 vendor/github.com/vmware/govmomi/govc/logs/ls.go create mode 100644 vendor/github.com/vmware/govmomi/govc/ls/command.go create mode 100644 vendor/github.com/vmware/govmomi/govc/main.go create mode 100644 vendor/github.com/vmware/govmomi/govc/main_test.go create mode 100644 vendor/github.com/vmware/govmomi/govc/permissions/ls.go create mode 100644 vendor/github.com/vmware/govmomi/govc/permissions/remove.go create mode 100644 vendor/github.com/vmware/govmomi/govc/permissions/set.go create mode 100644 vendor/github.com/vmware/govmomi/govc/pool/change.go create mode 100644 vendor/github.com/vmware/govmomi/govc/pool/create.go create mode 100644 vendor/github.com/vmware/govmomi/govc/pool/destroy.go create mode 100644 vendor/github.com/vmware/govmomi/govc/pool/help.go create mode 100644 vendor/github.com/vmware/govmomi/govc/pool/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/pool/resource_config_spec.go create mode 100644 vendor/github.com/vmware/govmomi/govc/release.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/test/.gitignore create mode 100644 vendor/github.com/vmware/govmomi/govc/test/README.md create mode 100644 vendor/github.com/vmware/govmomi/govc/test/boot_order_test.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/test/clean.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/test/cli.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/datacenter.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/datastore.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/device.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/esxbox/Vagrantfile create mode 100644 vendor/github.com/vmware/govmomi/govc/test/esxcli.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/events.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/extension.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/fields.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/firewall.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/govc-sim create mode 100644 vendor/github.com/vmware/govmomi/govc/test/host.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/images/.gitignore create mode 100644 vendor/github.com/vmware/govmomi/govc/test/images/update.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/test/import.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/license.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/logs.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/ls.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/network.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/pool.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/test/test_helper.bash create mode 100644 vendor/github.com/vmware/govmomi/govc/test/vcsim/Vagrantfile create mode 100644 vendor/github.com/vmware/govmomi/govc/test/vcsim/provision.sh create mode 100644 vendor/github.com/vmware/govmomi/govc/test/vm.bats create mode 100644 vendor/github.com/vmware/govmomi/govc/vapp/destroy.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vapp/power.go create mode 100644 vendor/github.com/vmware/govmomi/govc/version/command.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/change.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/create.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/destroy.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/disk/attach.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/disk/create.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/auth.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/chmod.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/download.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/file_attr.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/getenv.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/guest.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/kill.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/ls.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/mkdir.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/mktemp.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/ps.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/rm.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/rmdir.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/start.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/guest/upload.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/info.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/ip.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/network/add.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/network/change.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/power.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/question.go create mode 100644 vendor/github.com/vmware/govmomi/govc/vm/vnc.go create mode 100644 vendor/github.com/vmware/govmomi/guest/auth_manager.go create mode 100644 vendor/github.com/vmware/govmomi/guest/file_manager.go create mode 100644 vendor/github.com/vmware/govmomi/guest/operations_manager.go create mode 100644 vendor/github.com/vmware/govmomi/guest/process_manager.go create mode 100644 vendor/github.com/vmware/govmomi/license/assignment_manager.go create mode 100644 vendor/github.com/vmware/govmomi/license/manager.go create mode 100644 vendor/github.com/vmware/govmomi/list/lister.go create mode 100644 vendor/github.com/vmware/govmomi/list/path.go create mode 100644 vendor/github.com/vmware/govmomi/list/path_test.go create mode 100644 vendor/github.com/vmware/govmomi/list/recurser.go create mode 100644 vendor/github.com/vmware/govmomi/object/authorization_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go create mode 100644 vendor/github.com/vmware/govmomi/object/cluster_compute_resource_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/common.go create mode 100644 vendor/github.com/vmware/govmomi/object/compute_resource.go create mode 100644 vendor/github.com/vmware/govmomi/object/compute_resource_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/custom_fields_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/customization_spec_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/datacenter.go create mode 100644 vendor/github.com/vmware/govmomi/object/datacenter_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/datastore.go create mode 100644 vendor/github.com/vmware/govmomi/object/datastore_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/diagnostic_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go create mode 100644 vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go create mode 100644 vendor/github.com/vmware/govmomi/object/extension_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/file_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/folder.go create mode 100644 vendor/github.com/vmware/govmomi/object/folder_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/history_collector.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_config_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_datastore_browser.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_datastore_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_firewall_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_network_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_storage_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_vsan_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/http_nfc_lease.go create mode 100644 vendor/github.com/vmware/govmomi/object/network.go create mode 100644 vendor/github.com/vmware/govmomi/object/network_reference.go create mode 100644 vendor/github.com/vmware/govmomi/object/network_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/ovf_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/resource_pool.go create mode 100644 vendor/github.com/vmware/govmomi/object/search_index.go create mode 100644 vendor/github.com/vmware/govmomi/object/search_index_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/storage_pod.go create mode 100644 vendor/github.com/vmware/govmomi/object/storage_resource_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/task.go create mode 100644 vendor/github.com/vmware/govmomi/object/types.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_app.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_device_list.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_device_list_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_machine.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_machine_test.go create mode 100644 vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go create mode 100644 vendor/github.com/vmware/govmomi/ovf/cim.go create mode 100644 vendor/github.com/vmware/govmomi/ovf/doc.go create mode 100644 vendor/github.com/vmware/govmomi/ovf/env.go create mode 100644 vendor/github.com/vmware/govmomi/ovf/env_test.go create mode 100644 vendor/github.com/vmware/govmomi/ovf/envelope.go create mode 100644 vendor/github.com/vmware/govmomi/ovf/ovf.go create mode 100644 vendor/github.com/vmware/govmomi/ovf/ovf_test.go create mode 100644 vendor/github.com/vmware/govmomi/property/collector.go create mode 100644 vendor/github.com/vmware/govmomi/property/wait.go create mode 100644 vendor/github.com/vmware/govmomi/scripts/.gitignore create mode 100644 vendor/github.com/vmware/govmomi/scripts/contributors.sh create mode 100644 vendor/github.com/vmware/govmomi/scripts/debug-ls.sh create mode 100644 vendor/github.com/vmware/govmomi/scripts/govc-env.bash create mode 100644 vendor/github.com/vmware/govmomi/scripts/headers/go.txt create mode 100644 vendor/github.com/vmware/govmomi/scripts/headers/rb.txt create mode 100644 vendor/github.com/vmware/govmomi/scripts/license.sh create mode 100644 vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/.gitignore create mode 100644 vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/Vagrantfile create mode 100644 vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/create-box.sh create mode 100644 vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/vagrant.sh create mode 100644 vendor/github.com/vmware/govmomi/scripts/wireshark-esx.sh create mode 100644 vendor/github.com/vmware/govmomi/scripts/wireshark-vcsa.sh create mode 100644 vendor/github.com/vmware/govmomi/session/keep_alive.go create mode 100644 vendor/github.com/vmware/govmomi/session/keep_alive_test.go create mode 100644 vendor/github.com/vmware/govmomi/session/manager.go create mode 100644 vendor/github.com/vmware/govmomi/session/manager_test.go create mode 100644 vendor/github.com/vmware/govmomi/task/error.go create mode 100644 vendor/github.com/vmware/govmomi/task/wait.go create mode 100644 vendor/github.com/vmware/govmomi/test/doc.go create mode 100644 vendor/github.com/vmware/govmomi/test/functional/helper.go create mode 100644 vendor/github.com/vmware/govmomi/test/functional/issue_242_test.go create mode 100644 vendor/github.com/vmware/govmomi/test/helper.go create mode 100644 vendor/github.com/vmware/govmomi/units/size.go create mode 100644 vendor/github.com/vmware/govmomi/units/size_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/client.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/client_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/debug/debug.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/doc.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/methods/fault_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/methods/internal.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/methods/methods.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/methods/service_content.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/extra.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/fixtures/cluster_host_property.xml create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/fixtures/hostsystem_list_name_property.xml create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/fixtures/nested_property.xml create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/fixtures/not_authenticated_fault.xml create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/fixtures/pointer_property.xml create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/mo.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/reference.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/registry.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/retrieve_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/type_info.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/type_info_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/aggregator_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/common_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/doc.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/prefix.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/prefix_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/reader.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/reader_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/report.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/scale.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/scale_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/sinker.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/tee.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/tee_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/retry.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/retry_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/client.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/client_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/debug.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/error.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/soap.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/soap_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/base.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/base_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/enum.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/fault.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/helpers.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/if.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/internal.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/registry.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/registry_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/types.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/types_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/LICENSE create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/atom_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/example_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/extras.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/extras_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/marshal.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/marshal_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/read.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/read_test.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/xml.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/xml_test.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/APIDiscoveryService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/AccountService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/AddressService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/AffinityGroupService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/AlertService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/AsyncjobService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/AutoScaleService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/BaremetalService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/BigSwitchVNSService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/CertificateService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/CloudIdentifierService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ClusterService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ConfigurationService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/DiskOfferingService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/DomainService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/EventService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/FirewallService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/GuestOSService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/HostService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/HypervisorService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ISOService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ImageStoreService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/InternalLBService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/LDAPService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/LimitService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/LoadBalancerService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/LoginService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/LogoutService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/NATService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkACLService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkDeviceService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkOfferingService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/NicService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/NiciraNVPService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/OvsElementService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/PodService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/PoolService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/PortableIPService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ProjectService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/RegionService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcemetadataService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcetagsService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/RouterService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/S3Service.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/SSHService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/SecurityGroupService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ServiceOfferingService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/SnapshotService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/StoragePoolService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/StratosphereSSPService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/SwiftService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemCapacityService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemVMService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/TemplateService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/UCSService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/UsageService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/UserService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/VLANService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/VMGroupService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/VPCService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/VPNService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/VirtualMachineService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/VolumeService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/ZoneService.go create mode 100644 vendor/github.com/xanzy/go-cloudstack/cloudstack/cloudstack.go create mode 100644 vendor/github.com/xanzy/ssh-agent/.gitignore create mode 100644 vendor/github.com/xanzy/ssh-agent/LICENSE create mode 100644 vendor/github.com/xanzy/ssh-agent/README.md create mode 100644 vendor/github.com/xanzy/ssh-agent/pageant_windows.go create mode 100644 vendor/github.com/xanzy/ssh-agent/sshagent.go create mode 100644 vendor/github.com/xanzy/ssh-agent/sshagent_windows.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/errors.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/field.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/interface.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/row.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/status.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/types.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/types_test.go create mode 100644 vendor/github.com/ziutek/mymysql/mysql/utils.go create mode 100644 vendor/github.com/ziutek/mymysql/native/LICENSE create mode 100644 vendor/github.com/ziutek/mymysql/native/addons.go create mode 100644 vendor/github.com/ziutek/mymysql/native/bind_test.go create mode 100644 vendor/github.com/ziutek/mymysql/native/binding.go create mode 100644 vendor/github.com/ziutek/mymysql/native/codecs.go create mode 100644 vendor/github.com/ziutek/mymysql/native/command.go create mode 100644 vendor/github.com/ziutek/mymysql/native/common.go create mode 100644 vendor/github.com/ziutek/mymysql/native/consts.go create mode 100644 vendor/github.com/ziutek/mymysql/native/init.go create mode 100644 vendor/github.com/ziutek/mymysql/native/mysql.go create mode 100644 vendor/github.com/ziutek/mymysql/native/native_test.go create mode 100644 vendor/github.com/ziutek/mymysql/native/packet.go create mode 100644 vendor/github.com/ziutek/mymysql/native/paramvalue.go create mode 100644 vendor/github.com/ziutek/mymysql/native/passwd.go create mode 100644 vendor/github.com/ziutek/mymysql/native/prepared.go create mode 100644 vendor/github.com/ziutek/mymysql/native/result.go create mode 100644 vendor/github.com/ziutek/mymysql/native/unsafe.go-disabled create mode 100644 vendor/github.com/ziutek/mymysql/thrsafe/LICENSE create mode 100644 vendor/github.com/ziutek/mymysql/thrsafe/thrsafe.go create mode 100644 vendor/github.com/ziutek/mymysql/thrsafe/thrsafe_test.go create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_test.go create mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go create mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s create mode 100644 vendor/golang.org/x/crypto/ssh/agent/client.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/client_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/forward.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/server_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/testdata_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/benchmark_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go create mode 100644 vendor/golang.org/x/crypto/ssh/buffer_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/certs.go create mode 100644 vendor/golang.org/x/crypto/ssh/certs_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/channel.go create mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go create mode 100644 vendor/golang.org/x/crypto/ssh/cipher_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/client.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/common.go create mode 100644 vendor/golang.org/x/crypto/ssh/connection.go create mode 100644 vendor/golang.org/x/crypto/ssh/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/example_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go create mode 100644 vendor/golang.org/x/crypto/ssh/handshake_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/kex.go create mode 100644 vendor/golang.org/x/crypto/ssh/kex_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/keys.go create mode 100644 vendor/golang.org/x/crypto/ssh/keys_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/mac.go create mode 100644 vendor/golang.org/x/crypto/ssh/mempipe_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/messages.go create mode 100644 vendor/golang.org/x/crypto/ssh/messages_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/mux.go create mode 100644 vendor/golang.org/x/crypto/ssh/mux_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/session.go create mode 100644 vendor/golang.org/x/crypto/ssh/session_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/cert_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/session_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/tcpip_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/test_unix_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/testdata_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/testdata/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/testdata/keys.go create mode 100644 vendor/golang.org/x/crypto/ssh/testdata_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/transport.go create mode 100644 vendor/golang.org/x/crypto/ssh/transport_test.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/context_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go create mode 100644 vendor/golang.org/x/net/context/withtimeout_test.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/bitbucket/bitbucket.go create mode 100644 vendor/golang.org/x/oauth2/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go create mode 100644 vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go create mode 100644 vendor/golang.org/x/oauth2/example_test.go create mode 100644 vendor/golang.org/x/oauth2/facebook/facebook.go create mode 100644 vendor/golang.org/x/oauth2/github/github.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/appenginevm_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/example_test.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/google_test.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk_test.go create mode 100644 vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials create mode 100644 vendor/golang.org/x/oauth2/google/testdata/gcloud/properties create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2_test.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/token_test.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport_test.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/example_test.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt_test.go create mode 100644 vendor/golang.org/x/oauth2/linkedin/linkedin.go create mode 100644 vendor/golang.org/x/oauth2/microsoft/microsoft.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/oauth2_test.go create mode 100644 vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go create mode 100644 vendor/golang.org/x/oauth2/paypal/paypal.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/token_test.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/golang.org/x/oauth2/transport_test.go create mode 100644 vendor/golang.org/x/oauth2/vk/vk.go create mode 100644 vendor/google.golang.org/api/compute/v1/compute-api.json create mode 100644 vendor/google.golang.org/api/compute/v1/compute-gen.go create mode 100644 vendor/google.golang.org/api/container/v1/container-api.json create mode 100644 vendor/google.golang.org/api/container/v1/container-gen.go create mode 100644 vendor/google.golang.org/api/dns/v1/dns-api.json create mode 100644 vendor/google.golang.org/api/dns/v1/dns-gen.go create mode 100644 vendor/google.golang.org/api/gensupport/buffer.go create mode 100644 vendor/google.golang.org/api/gensupport/buffer_test.go create mode 100644 vendor/google.golang.org/api/gensupport/doc.go create mode 100644 vendor/google.golang.org/api/gensupport/json.go create mode 100644 vendor/google.golang.org/api/gensupport/json_test.go create mode 100644 vendor/google.golang.org/api/gensupport/media.go create mode 100644 vendor/google.golang.org/api/gensupport/media_test.go create mode 100644 vendor/google.golang.org/api/gensupport/params.go create mode 100644 vendor/google.golang.org/api/gensupport/resumable.go create mode 100644 vendor/google.golang.org/api/gensupport/resumable_test.go create mode 100644 vendor/google.golang.org/api/gensupport/util_test.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi_test.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go create mode 100644 vendor/google.golang.org/api/googleapi/transport/apikey.go create mode 100644 vendor/google.golang.org/api/googleapi/types.go create mode 100644 vendor/google.golang.org/api/googleapi/types_test.go create mode 100644 vendor/google.golang.org/api/pubsub/v1/pubsub-api.json create mode 100644 vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go create mode 100644 vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json create mode 100644 vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go create mode 100644 vendor/google.golang.org/api/storage/v1/storage-api.json create mode 100644 vendor/google.golang.org/api/storage/v1/storage-gen.go create mode 100644 vendor/google.golang.org/cloud/compute/metadata/metadata.go create mode 100644 vendor/google.golang.org/cloud/internal/cloud.go create mode 100644 vendor/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go create mode 100644 vendor/google.golang.org/cloud/internal/datastore/datastore_v1.proto create mode 100644 vendor/google.golang.org/cloud/internal/opts/option.go create mode 100644 vendor/google.golang.org/cloud/internal/testutil/context.go create mode 100644 vendor/google.golang.org/cloud/internal/transport/cancelreq.go create mode 100644 vendor/google.golang.org/cloud/internal/transport/cancelreq_legacy.go create mode 100644 vendor/google.golang.org/cloud/internal/transport/dial.go create mode 100644 vendor/google.golang.org/cloud/internal/transport/proto.go diff --git a/.gitignore b/.gitignore index 1109e1a2f..a4e3dca58 100644 --- a/.gitignore +++ b/.gitignore @@ -6,8 +6,7 @@ terraform.tfplan terraform.tfstate bin/ modules-dev/ -pkg/ -vendor/ +/pkg/ website/.vagrant website/.bundle website/build diff --git a/.travis.yml b/.travis.yml index e60001302..a2d73848e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,11 @@ language: go go: - 1.5 -install: make updatedeps +# Need to ensure at least something remains in the install stanza to opt out of +# Travis's built-in 'godep restore', which will fail Travis +# See https://docs.travis-ci.com/user/languages/go#godep-support +install: + - echo noop script: - make test diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 000000000..b8452b058 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,741 @@ +{ + "ImportPath": "github.com/hashicorp/terraform", + "GoVersion": "go1.5.2", + "Packages": [ + "github.com/hashicorp/terraform", + "github.com/hashicorp/terraform/builtin/bins/provider-atlas", + "github.com/hashicorp/terraform/builtin/bins/provider-aws", + "github.com/hashicorp/terraform/builtin/bins/provider-azure", + "github.com/hashicorp/terraform/builtin/bins/provider-azurerm", + "github.com/hashicorp/terraform/builtin/bins/provider-chef", + "github.com/hashicorp/terraform/builtin/bins/provider-cloudflare", + "github.com/hashicorp/terraform/builtin/bins/provider-cloudstack", + "github.com/hashicorp/terraform/builtin/bins/provider-consul", + "github.com/hashicorp/terraform/builtin/bins/provider-digitalocean", + "github.com/hashicorp/terraform/builtin/bins/provider-dme", + "github.com/hashicorp/terraform/builtin/bins/provider-dnsimple", + "github.com/hashicorp/terraform/builtin/bins/provider-docker", + "github.com/hashicorp/terraform/builtin/bins/provider-dyn", + "github.com/hashicorp/terraform/builtin/bins/provider-google", + "github.com/hashicorp/terraform/builtin/bins/provider-heroku", + "github.com/hashicorp/terraform/builtin/bins/provider-mailgun", + "github.com/hashicorp/terraform/builtin/bins/provider-mysql", + "github.com/hashicorp/terraform/builtin/bins/provider-null", + "github.com/hashicorp/terraform/builtin/bins/provider-openstack", + "github.com/hashicorp/terraform/builtin/bins/provider-packet", + "github.com/hashicorp/terraform/builtin/bins/provider-postgresql", + "github.com/hashicorp/terraform/builtin/bins/provider-powerdns", + "github.com/hashicorp/terraform/builtin/bins/provider-rundeck", + "github.com/hashicorp/terraform/builtin/bins/provider-statuscake", + "github.com/hashicorp/terraform/builtin/bins/provider-template", + "github.com/hashicorp/terraform/builtin/bins/provider-terraform", + "github.com/hashicorp/terraform/builtin/bins/provider-tls", + "github.com/hashicorp/terraform/builtin/bins/provider-vcd", + "github.com/hashicorp/terraform/builtin/bins/provider-vsphere", + "github.com/hashicorp/terraform/builtin/bins/provisioner-chef", + "github.com/hashicorp/terraform/builtin/bins/provisioner-file", + "github.com/hashicorp/terraform/builtin/bins/provisioner-local-exec", + "github.com/hashicorp/terraform/builtin/bins/provisioner-remote-exec", + "github.com/hashicorp/terraform/builtin/providers/atlas", + "github.com/hashicorp/terraform/builtin/providers/aws", + "github.com/hashicorp/terraform/builtin/providers/azure", + "github.com/hashicorp/terraform/builtin/providers/azurerm", + "github.com/hashicorp/terraform/builtin/providers/chef", + "github.com/hashicorp/terraform/builtin/providers/cloudflare", + "github.com/hashicorp/terraform/builtin/providers/cloudstack", + "github.com/hashicorp/terraform/builtin/providers/consul", + "github.com/hashicorp/terraform/builtin/providers/digitalocean", + "github.com/hashicorp/terraform/builtin/providers/dme", + "github.com/hashicorp/terraform/builtin/providers/dnsimple", + "github.com/hashicorp/terraform/builtin/providers/docker", + "github.com/hashicorp/terraform/builtin/providers/dyn", + "github.com/hashicorp/terraform/builtin/providers/google", + "github.com/hashicorp/terraform/builtin/providers/heroku", + "github.com/hashicorp/terraform/builtin/providers/mailgun", + "github.com/hashicorp/terraform/builtin/providers/mysql", + "github.com/hashicorp/terraform/builtin/providers/null", + "github.com/hashicorp/terraform/builtin/providers/openstack", + "github.com/hashicorp/terraform/builtin/providers/packet", + "github.com/hashicorp/terraform/builtin/providers/postgresql", + "github.com/hashicorp/terraform/builtin/providers/powerdns", + "github.com/hashicorp/terraform/builtin/providers/rundeck", + "github.com/hashicorp/terraform/builtin/providers/statuscake", + "github.com/hashicorp/terraform/builtin/providers/template", + "github.com/hashicorp/terraform/builtin/providers/terraform", + "github.com/hashicorp/terraform/builtin/providers/tls", + "github.com/hashicorp/terraform/builtin/providers/vcd", + "github.com/hashicorp/terraform/builtin/providers/vsphere", + "github.com/hashicorp/terraform/builtin/provisioners/chef", + "github.com/hashicorp/terraform/builtin/provisioners/file", + "github.com/hashicorp/terraform/builtin/provisioners/local-exec", + "github.com/hashicorp/terraform/builtin/provisioners/remote-exec", + "github.com/hashicorp/terraform/command", + "github.com/hashicorp/terraform/communicator", + "github.com/hashicorp/terraform/communicator/remote", + "github.com/hashicorp/terraform/communicator/ssh", + "github.com/hashicorp/terraform/communicator/winrm", + "github.com/hashicorp/terraform/config", + "github.com/hashicorp/terraform/config/lang", + "github.com/hashicorp/terraform/config/lang/ast", + "github.com/hashicorp/terraform/config/module", + "github.com/hashicorp/terraform/dag", + "github.com/hashicorp/terraform/digraph", + "github.com/hashicorp/terraform/dot", + "github.com/hashicorp/terraform/flatmap", + "github.com/hashicorp/terraform/helper/acctest", + "github.com/hashicorp/terraform/helper/config", + "github.com/hashicorp/terraform/helper/diff", + "github.com/hashicorp/terraform/helper/hashcode", + "github.com/hashicorp/terraform/helper/logging", + "github.com/hashicorp/terraform/helper/mutexkv", + "github.com/hashicorp/terraform/helper/pathorcontents", + "github.com/hashicorp/terraform/helper/resource", + "github.com/hashicorp/terraform/helper/schema", + "github.com/hashicorp/terraform/plugin", + "github.com/hashicorp/terraform/rpc", + "github.com/hashicorp/terraform/state", + "github.com/hashicorp/terraform/state/remote", + "github.com/hashicorp/terraform/terraform" + ], + "Deps": [ + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/cdn", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/resources/resources", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/scheduler", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/core/http", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/management", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Comment": "v1.2-315-g1cb9dff", + "Rev": "1cb9dff8c37b2918ad1ebd7b294d01100a153d27" + }, + { + "ImportPath": "github.com/DreamItGetIT/statuscake", + "Rev": "8cbe86575f00210a6df2c19cb2f59b00cd181de3" + }, + { + "ImportPath": "github.com/apparentlymart/go-cidr/cidr", + "Rev": "a3ebdb999b831ecb6ab8a226e31b07b2b9061c47" + }, + { + "ImportPath": "github.com/apparentlymart/go-rundeck-api/rundeck", + "Comment": "v0.0.1-1-g43fcd8f", + "Rev": "43fcd8fbcf18fd5929258c044b4e3dd0643f875e" + }, + { + "ImportPath": "github.com/armon/circbuf", + "Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d" + }, + { + "ImportPath": "github.com/armon/go-radix", + "Rev": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restjson", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudformation", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudtrail", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/codecommit", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/codedeploy", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ec2", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ecr", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ecs", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/efs", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elasticache", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elb", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/firehose", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/glacier", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/iam", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/kinesis", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/lambda", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/opsworks", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/rds", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/redshift", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/route53", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sns", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sqs", + "Comment": "v1.1.0", + "Rev": "be2ec39e520e3c4088c0c6288055bdc8184a89ee" + }, + { + "ImportPath": "github.com/bgentry/speakeasy", + "Rev": "36e9cfdd690967f4f690c6edcc9ffacd006014a0" + }, + { + "ImportPath": "github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec", + "Comment": "v2.3.0-alpha.0-652-ge552791", + "Rev": "e5527914aa42cae3063f52892e1ca4518da0e4ae" + }, + { + "ImportPath": "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context", + "Comment": "v2.3.0-alpha.0-652-ge552791", + "Rev": "e5527914aa42cae3063f52892e1ca4518da0e4ae" + }, + { + "ImportPath": "github.com/coreos/etcd/client", + "Comment": "v2.3.0-alpha.0-652-ge552791", + "Rev": "e5527914aa42cae3063f52892e1ca4518da0e4ae" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/pathutil", + "Comment": "v2.3.0-alpha.0-652-ge552791", + "Rev": "e5527914aa42cae3063f52892e1ca4518da0e4ae" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/types", + "Comment": "v2.3.0-alpha.0-652-ge552791", + "Rev": "e5527914aa42cae3063f52892e1ca4518da0e4ae" + }, + { + "ImportPath": "github.com/cyberdelia/heroku-go/v3", + "Rev": "81c5afa1abcf69cc18ccc24fa3716b5a455c9208" + }, + { + "ImportPath": "github.com/digitalocean/godo", + "Comment": "v0.9.0-20-gf75d769", + "Rev": "f75d769b07edce8a73682dcf325b4404f366ab3d" + }, + { + "ImportPath": "github.com/dylanmei/iso8601", + "Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4" + }, + { + "ImportPath": "github.com/dylanmei/winrmtest", + "Rev": "025617847eb2cf9bd1d851bc3b22ed28e6245ce5" + }, + { + "ImportPath": "github.com/fsouza/go-dockerclient", + "Rev": "02a8beb401b20e112cff3ea740545960b667eab1" + }, + { + "ImportPath": "github.com/go-chef/chef", + "Comment": "0.0.1-42-gea19666", + "Rev": "ea196660dd8700ad18911681b223fe6bfc29cd69" + }, + { + "ImportPath": "github.com/go-ini/ini", + "Comment": "v1.8.6", + "Rev": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3" + }, + { + "ImportPath": "github.com/google/go-querystring/query", + "Rev": "2a60fc2ba6c19de80291203597d752e9ba58e4c0" + }, + { + "ImportPath": "github.com/hashicorp/atlas-go/archive", + "Comment": "20141209094003-90-g0008886", + "Rev": "0008886ebfa3b424bed03e2a5cbe4a2568ea0ff6" + }, + { + "ImportPath": "github.com/hashicorp/atlas-go/v1", + "Comment": "20141209094003-90-g0008886", + "Rev": "0008886ebfa3b424bed03e2a5cbe4a2568ea0ff6" + }, + { + "ImportPath": "github.com/hashicorp/consul/api", + "Comment": "v0.6.3-28-g3215b87", + "Rev": "3215b8727f44c778dd7045dcfd5ac42735c581a9" + }, + { + "ImportPath": "github.com/hashicorp/errwrap", + "Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55" + }, + { + "ImportPath": "github.com/hashicorp/go-checkpoint", + "Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b" + }, + { + "ImportPath": "github.com/hashicorp/go-cleanhttp", + "Rev": "ce617e79981a8fff618bb643d155133a8f38db96" + }, + { + "ImportPath": "github.com/hashicorp/go-getter", + "Rev": "848242c76c346ef0aeb34787753b068f5f6f92fe" + }, + { + "ImportPath": "github.com/hashicorp/go-multierror", + "Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" + }, + { + "ImportPath": "github.com/hashicorp/go-retryablehttp", + "Rev": "24fda80b7c713c52649e57ce20100d453f7bdb24" + }, + { + "ImportPath": "github.com/hashicorp/go-version", + "Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38" + }, + { + "ImportPath": "github.com/hashicorp/hcl", + "Rev": "578dd9746824a54637686b51a41bad457a56bcef" + }, + { + "ImportPath": "github.com/hashicorp/logutils", + "Rev": "0dc08b1671f34c4250ce212759ebd880f743d883" + }, + { + "ImportPath": "github.com/hashicorp/serf/coordinate", + "Comment": "v0.7.0-12-ge4ec8cc", + "Rev": "e4ec8cc423bbe20d26584b96efbeb9102e16d05f" + }, + { + "ImportPath": "github.com/hashicorp/yamux", + "Rev": "df949784da9ed028ee76df44652e42d37a09d7e4" + }, + { + "ImportPath": "github.com/hmrc/vmware-govcd", + "Comment": "v0.0.2-37-g5cd82f0", + "Rev": "5cd82f01aa1c97afa9b23ef6f4f42a60f3106003" + }, + { + "ImportPath": "github.com/imdario/mergo", + "Comment": "0.2.1-3-gb1859b1", + "Rev": "b1859b199a7171589445bdea9fa8c19362613f80" + }, + { + "ImportPath": "github.com/jmespath/go-jmespath", + "Comment": "0.2.2-2-gc01cf91", + "Rev": "c01cf91b011868172fdcd9f41838e80c9d716264" + }, + { + "ImportPath": "github.com/kardianos/osext", + "Rev": "29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc" + }, + { + "ImportPath": "github.com/lib/pq", + "Comment": "go1.0-cutoff-74-g8ad2b29", + "Rev": "8ad2b298cadd691a77015666a5372eae5dbfac8f" + }, + { + "ImportPath": "github.com/lusis/go-artifactory/src/artifactory.v401", + "Rev": "7e4ce345df825841661d1b3ffbb1327083d4a22f" + }, + { + "ImportPath": "github.com/masterzen/simplexml/dom", + "Rev": "95ba30457eb1121fa27753627c774c7cd4e90083" + }, + { + "ImportPath": "github.com/masterzen/winrm/soap", + "Rev": "54ea5d01478cfc2afccec1504bd0dfcd8c260cfa" + }, + { + "ImportPath": "github.com/masterzen/winrm/winrm", + "Rev": "54ea5d01478cfc2afccec1504bd0dfcd8c260cfa" + }, + { + "ImportPath": "github.com/masterzen/xmlpath", + "Rev": "13f4951698adc0fa9c1dda3e275d489a24201161" + }, + { + "ImportPath": "github.com/mattn/go-isatty", + "Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639" + }, + { + "ImportPath": "github.com/mitchellh/cli", + "Rev": "cb6853d606ea4a12a15ac83cc43503df99fd28fb" + }, + { + "ImportPath": "github.com/mitchellh/colorstring", + "Rev": "8631ce90f28644f54aeedcb3e389a85174e067d1" + }, + { + "ImportPath": "github.com/mitchellh/copystructure", + "Rev": "80adcec1955ee4e97af357c30dee61aadcc02c10" + }, + { + "ImportPath": "github.com/mitchellh/go-homedir", + "Rev": "d682a8f0cf139663a984ff12528da460ca963de9" + }, + { + "ImportPath": "github.com/mitchellh/go-linereader", + "Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "281073eb9eb092240d33ef253c404f1cca550309" + }, + { + "ImportPath": "github.com/mitchellh/packer/common/uuid", + "Comment": "v0.8.6-411-g314aad3", + "Rev": "314aad379a39f6ad5bcca278e6757d9abbb3a52e" + }, + { + "ImportPath": "github.com/mitchellh/panicwrap", + "Rev": "a1e50bc201f387747a45ffff020f1af2d8759e88" + }, + { + "ImportPath": "github.com/mitchellh/prefixedio", + "Rev": "6e6954073784f7ee67b28f2d22749d6479151ed7" + }, + { + "ImportPath": "github.com/mitchellh/reflectwalk", + "Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6" + }, + { + "ImportPath": "github.com/nesv/go-dynect/dynect", + "Comment": "v0.2.0-8-g841842b", + "Rev": "841842b16b39cf2b5007278956976d7d909bd98b" + }, + { + "ImportPath": "github.com/nu7hatch/gouuid", + "Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3" + }, + { + "ImportPath": "github.com/packer-community/winrmcp/winrmcp", + "Rev": "3d184cea22ee1c41ec1697e0d830ff0c78f7ea97" + }, + { + "ImportPath": "github.com/packethost/packngo", + "Rev": "f03d7dc788a8b57b62d301ccb98c950c325756f8" + }, + { + "ImportPath": "github.com/pborman/uuid", + "Rev": "dee7705ef7b324f27ceb85a121c61f2c2e8ce988" + }, + { + "ImportPath": "github.com/pearkes/cloudflare", + "Rev": "765ac1828a78ba49e6dc48309d56415c61806ac3" + }, + { + "ImportPath": "github.com/pearkes/dnsimple", + "Rev": "78996265f576c7580ff75d0cb2c606a61883ceb8" + }, + { + "ImportPath": "github.com/pearkes/mailgun", + "Rev": "b88605989c4141d22a6d874f78800399e5bb7ac2" + }, + { + "ImportPath": "github.com/rackspace/gophercloud", + "Comment": "v1.0.0-774-g680aa02", + "Rev": "680aa02616313d8399abc91f17a444cf9292f0e1" + }, + { + "ImportPath": "github.com/satori/go.uuid", + "Rev": "d41af8bb6a7704f00bc3b7cba9355ae6a5a80048" + }, + { + "ImportPath": "github.com/soniah/dnsmadeeasy", + "Comment": "v1.1-2-g5578a8c", + "Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e" + }, + { + "ImportPath": "github.com/sthulb/mime/multipart", + "Rev": "698462dc9685d7743511c26da726c1b0c1cfb111" + }, + { + "ImportPath": "github.com/tent/http-link-go", + "Rev": "ac974c61c2f990f4115b119354b5e0b47550e888" + }, + { + "ImportPath": "github.com/vmware/govmomi", + "Comment": "v0.3.0-18-g3b66976", + "Rev": "3b669760f460befb135048aea80d2f5fa13885ca" + }, + { + "ImportPath": "github.com/xanzy/go-cloudstack/cloudstack", + "Comment": "v1.2.0-61-g252eb1b", + "Rev": "252eb1b665d77aa31dedd435fab0a7da57b2d8c1" + }, + { + "ImportPath": "github.com/xanzy/ssh-agent", + "Rev": "ba9c9e33906f58169366275e3450db66139a31a9" + }, + { + "ImportPath": "github.com/ziutek/mymysql/mysql", + "Comment": "v1.5.4-13-g75ce5fb", + "Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821" + }, + { + "ImportPath": "github.com/ziutek/mymysql/native", + "Comment": "v1.5.4-13-g75ce5fb", + "Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821" + }, + { + "ImportPath": "github.com/ziutek/mymysql/thrsafe", + "Comment": "v1.5.4-13-g75ce5fb", + "Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821" + }, + { + "ImportPath": "golang.org/x/crypto/curve25519", + "Rev": "1f22c0103821b9390939b6776727195525381532" + }, + { + "ImportPath": "golang.org/x/crypto/ssh", + "Rev": "1f22c0103821b9390939b6776727195525381532" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "04b9de9b512f58addf28c9853d50ebef61c3953e" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "8a57ed94ffd43444c0879fe75701732a38afc985" + }, + { + "ImportPath": "google.golang.org/api/compute/v1", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/api/container/v1", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/api/dns/v1", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/api/pubsub/v1", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/api/sqladmin/v1beta4", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "fb10e8da373d97f6ba5e648299a10b3b91f14cd5" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "fb10e8da373d97f6ba5e648299a10b3b91f14cd5" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Makefile b/Makefile index dc5f3cc12..b1cba7070 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -TEST?=./... +TEST?=$$(GO15VENDOREXPERIMENT=1 go list ./... | grep -v /vendor/) VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr default: test @@ -19,7 +19,7 @@ quickdev: generate # changes will require a rebuild of everything, in which case the dev # target should be used. core-dev: fmtcheck generate - go install github.com/hashicorp/terraform + GO15VENDOREXPERIMENT=1 go install github.com/hashicorp/terraform # Shorthand for quickly testing the core of Terraform (i.e. "not providers") core-test: generate @@ -28,13 +28,12 @@ core-test: generate # Shorthand for building and installing just one plugin for local testing. # Run as (for example): make plugin-dev PLUGIN=provider-aws plugin-dev: fmtcheck generate - go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN) + GO15VENDOREXPERIMENT=1 go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN) mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN) -# test runs the unit tests and vets the code +# test runs the unit tests test: fmtcheck generate - TF_ACC= go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4 - @$(MAKE) vet + TF_ACC= GO15VENDOREXPERIMENT=1 go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4 # testacc runs acceptance tests testacc: fmtcheck generate @@ -43,11 +42,11 @@ testacc: fmtcheck generate echo " make testacc TEST=./builtin/providers/aws"; \ exit 1; \ fi - TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m + TF_ACC=1 GO15VENDOREXPERIMENT=1 go test $(TEST) -v $(TESTARGS) -timeout 120m # testrace runs the race checker testrace: fmtcheck generate - TF_ACC= go test -race $(TEST) $(TESTARGS) + TF_ACC= GO15VENDOREXPERIMENT=1 go test -race $(TEST) $(TESTARGS) # updatedeps installs all the dependencies that Terraform needs to run # and build. @@ -65,8 +64,8 @@ cover: @go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \ go get -u golang.org/x/tools/cmd/cover; \ fi - go test $(TEST) -coverprofile=coverage.out - go tool cover -html=coverage.out + GO15VENDOREXPERIMENT=1 go test $(TEST) -coverprofile=coverage.out + GO15VENDOREXPERIMENT=1 go tool cover -html=coverage.out rm coverage.out # vet runs the Go source code static analysis tool `vet` to find @@ -76,7 +75,7 @@ vet: go get golang.org/x/tools/cmd/vet; \ fi @echo "go tool vet $(VETARGS) ." - @go tool vet $(VETARGS) . ; if [ $$? -eq 1 ]; then \ + @GO15VENDOREXPERIMENT=1 go tool vet $(VETARGS) . ; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Vet found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for review."; \ @@ -86,7 +85,10 @@ vet: # generate runs `go generate` to build the dynamically generated # source files. generate: - go generate ./... + @which stringer ; if [ $$? -ne 0 ]; then \ + go get -u golang.org/x/tools/cmd/stringer; \ + fi + GO15VENDOREXPERIMENT=1 go generate $$(GO15VENDOREXPERIMENT=1 go list ./... | grep -v /vendor/) fmt: gofmt -w . diff --git a/scripts/build.sh b/scripts/build.sh index a4cf52391..af8904511 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -18,12 +18,8 @@ GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true) XC_ARCH=${XC_ARCH:-"386 amd64 arm"} XC_OS=${XC_OS:-linux darwin windows freebsd openbsd solaris} - -# Get dependencies unless running in quick mode -if [ "${TF_QUICKDEV}x" == "x" ]; then - echo "==> Getting dependencies..." - go get -d ./... -fi +# Use vendored dependencies +export GO15VENDOREXPERIMENT=1 # Delete the old dir echo "==> Removing old directory..." diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh index 315571042..a89f042fd 100755 --- a/scripts/gofmtcheck.sh +++ b/scripts/gofmtcheck.sh @@ -2,7 +2,7 @@ # Check gofmt echo "==> Checking that code complies with gofmt requirements..." -gofmt_files=$(gofmt -l .) +gofmt_files=$(gofmt -l . | grep -v vendor) if [[ -n ${gofmt_files} ]]; then echo 'gofmt needs running on the following files:' echo "${gofmt_files}" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000..c50d13c9c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,163 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +import ( + "net/http" + "time" +) + +const ( + headerLocation = "Location" + headerRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// ResponseRequiresPolling returns true if the passed http.Response requires polling follow-up +// request (as determined by the status code being in the passed set, which defaults to HTTP 202 +// Accepted). +func ResponseRequiresPolling(resp *http.Response, codes ...int) bool { + if resp.StatusCode == http.StatusOK { + return false + } + + if len(codes) == 0 { + codes = []int{http.StatusAccepted} + } + + return ResponseHasStatusCode(resp, codes...) +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. If +// it successfully creates the request, it will also close the body of the passed response, +// otherwise the body remains open. +func NewPollingRequest(resp *http.Response, authorizer Authorizer) (*http.Request, error) { + location := GetPollingLocation(resp) + if location == "" { + return nil, NewErrorWithStatusCode("autorest", "NewPollingRequest", resp.StatusCode, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{}, + AsGet(), + WithBaseURL(location), + authorizer.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", UndefinedStatusCode, "Failure creating poll request to %s", location) + } + + Respond(resp, + ByClosing()) + + return req, nil +} + +// GetPollingDelay extracts the polling delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetPollingDelay(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(headerRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// GetPollingLocation retrieves the polling URL from the Location header of the passed response. +func GetPollingLocation(resp *http.Response) string { + return resp.Header.Get(headerLocation) +} + +// PollForAttempts will retry the passed http.Request until it receives an HTTP status code outside +// the passed set or has made the specified number of attempts. The set of status codes defaults to +// HTTP 202 Accepted. +func PollForAttempts(s Sender, req *http.Request, defaultDelay time.Duration, attempts int, codes ...int) (*http.Response, error) { + return SendWithSender( + decorateForPolling(s, defaultDelay, codes...), + req, + DoRetryForAttempts(attempts, time.Duration(0))) +} + +// PollForDuration will retry the passed http.Request until it receives an HTTP status code outside +// the passed set or the total time meets or exceeds the specified duration. The set of status codes +// defaults to HTTP 202 Accepted. +func PollForDuration(s Sender, req *http.Request, defaultDelay time.Duration, total time.Duration, codes ...int) (*http.Response, error) { + return SendWithSender( + decorateForPolling(s, defaultDelay, codes...), + req, + DoRetryForDuration(total, time.Duration(0))) +} + +func decorateForPolling(s Sender, defaultDelay time.Duration, codes ...int) Sender { + if len(codes) == 0 { + codes = []int{http.StatusAccepted} + } + + return DecorateSender(s, + AfterRetryDelay(defaultDelay), + DoErrorIfStatusCode(codes...), + DoCloseIfError()) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000..e43589302 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,72 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +import ( + "net/http" + "strconv" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md new file mode 100644 index 000000000..49909718b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md @@ -0,0 +1,115 @@ +# autorest azure example + +## Usage + +This example covers how to make an authenticated call to the Azure Resource Manager APIs, using certificate-based authentication. + +0. Export some required variables + + ``` + export SUBSCRIPTION_ID="aff271ee-e9be-4441-b9bb-42f5af4cbaeb" + export TENANT_ID="13de0a15-b5db-44b9-b682-b4ba82afbd29" + export RESOURCE_GROUP="someresourcegroup" + ``` + + * replace both values with your own + +1. Create a private key + + ``` + openssl genrsa -out "example.key" 2048 + ``` + + + +2. Create the certificate + + ``` + openssl req -new -key "example.key" -subj "/CN=example" -out "example.csr" + + openssl x509 -req -in "example.csr" -signkey "example.key" -out "example.crt" -days 10000 + ``` + + + +3. Create the PKCS12 version of the certificate (with no password) + + ``` + openssl pkcs12 -export -out "example.pfx" -inkey "example.key" -in "example.crt" -passout pass: + ``` + + + +4. Register a new Azure AD Application with the certificate contents + + ``` + certificateContents="$(tail -n+2 "example.key" | head -n-1)" + + azure ad app create \ + --name "example-azuread-app" \ + --home-page="http://example-azuread-app/home" \ + --identifier-uris "http://example-azuread-app/app" \ + --key-usage "Verify" \ + --end-date "2020-01-01" \ + --key-value "${certificateContents}" + ``` + + + +5. Create a new service principal using the "Application Id" from the previous step + + ``` + azure ad sp create "APPLICATION_ID" + ``` + + * Replace APPLICATION_ID with the "Application Id" returned in step 4 + + + +6. Grant your service principal necessary permissions + + ``` + azure role assignment create \ + --resource-group "${RESOURCE_GROUP}" \ + --roleName "Contributor" \ + --subscription "${SUBSCRIPTION_ID}" \ + --spn "http://example-azuread-app/app" + ``` + + * Replace SUBSCRIPTION_ID with your subscription id + * Replace RESOURCE_GROUP with the resource group for the assignment + * Ensure that the `spn` parameter matches an `identifier-url` from Step 4 + + + +7. Run this example app to see your resource groups + + ``` + go run main.go \ + --tenantId="${TENANT_ID}" \ + --subscriptionId="${SUBSCRIPTION_ID}" \ + --applicationId="http://example-azuread-app/app" \ + --certificatePath="certificate.pfx" + ``` + + +You should see something like this as output: + +``` +2015/11/08 18:28:39 Using these settings: +2015/11/08 18:28:39 * certificatePath: certificate.pfx +2015/11/08 18:28:39 * applicationID: http://example-azuread-app/app +2015/11/08 18:28:39 * tenantID: 13de0a15-b5db-44b9-b682-b4ba82afbd29 +2015/11/08 18:28:39 * subscriptionID: aff271ee-e9be-4441-b9bb-42f5af4cbaeb +2015/11/08 18:28:39 loading certificate... +2015/11/08 18:28:39 retrieve oauth token... +2015/11/08 18:28:39 querying the list of resource groups... +2015/11/08 18:28:50 +2015/11/08 18:28:50 Groups: {"value":[{"id":"/subscriptions/aff271ee-e9be-4441-b9bb-42f5af4cbaeb/resourceGroups/kube-66f30810","name":"kube-66f30810","location":"westus","tags":{},"properties":{"provisioningState":"Succeeded"}}]} +``` + + + +## Notes + +You may need to wait sometime between executing step 4, step 5 and step 6. If you issue those requests too quickly, you might hit an AD server that is not consistent with the server where the resource was created. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go new file mode 100644 index 000000000..f2b71b299 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go @@ -0,0 +1,130 @@ +package main + +import ( + "crypto/rsa" + "crypto/x509" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12" +) + +const resourceGroupURLTemplate = "https://management.azure.com/subscriptions/{subscription-id}/resourcegroups" +const apiVersion = "2015-01-01" + +var ( + certificatePath string + applicationID string + tenantID string + subscriptionID string +) + +func init() { + flag.StringVar(&certificatePath, "certificatePath", "", "path to pk12/pfx certificate") + flag.StringVar(&applicationID, "applicationId", "", "application id") + flag.StringVar(&tenantID, "tenantId", "", "tenant id") + flag.StringVar(&subscriptionID, "subscriptionId", "", "subscription id") + flag.Parse() + + log.Println("Using these settings:") + log.Println("* certificatePath:", certificatePath) + log.Println("* applicationID:", applicationID) + log.Println("* tenantID:", tenantID) + log.Println("* subscriptionID:", subscriptionID) + + if strings.Trim(certificatePath, " ") == "" || + strings.Trim(applicationID, " ") == "" || + strings.Trim(tenantID, " ") == "" || + strings.Trim(subscriptionID, " ") == "" { + log.Fatalln("Bad usage. Please specify all four parameters") + } +} + +func main() { + log.Println("loading certificate... ") + certData, err := ioutil.ReadFile(certificatePath) + if err != nil { + log.Fatalln("failed", err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, "") + if err != nil { + log.Fatalln("failed", err) + } + + log.Println("retrieve oauth token... ") + spt, err := azure.NewServicePrincipalTokenFromCertificate( + applicationID, + certificate, + rsaPrivateKey, + tenantID, + azure.AzureResourceManagerScope) + if err != nil { + log.Fatalln("failed", err) + panic(err) + } + + client := &autorest.Client{} + client.Authorizer = spt + + log.Println("querying the list of resource groups... ") + groupsAsString, err := getResourceGroups(client) + if err != nil { + log.Fatalln("failed", err) + } + + log.Println("") + log.Println("Groups:", *groupsAsString) +} + +func getResourceGroups(client *autorest.Client) (*string, error) { + var p map[string]interface{} + var req *http.Request + p = map[string]interface{}{ + "subscription-id": subscriptionID, + } + q := map[string]interface{}{ + "api-version": apiVersion, + } + + req, _ = autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL(resourceGroupURLTemplate), + autorest.WithPathParameters(p), + autorest.WithQueryParameters(q)) + + resp, err := client.Send(req, http.StatusOK) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + contentsString := string(contents) + + return &contentsString, nil +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go new file mode 100644 index 000000000..955b29d8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go @@ -0,0 +1,298 @@ +package azure + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + oauthURL = "https://login.microsoftonline.com/{tenantID}/oauth2/{requestType}?api-version=1.0" + tokenBaseDate = "1970-01-01T00:00:00Z" + + jwtAudienceTemplate = "https://login.microsoftonline.com/%s/oauth2/token" + + // AzureResourceManagerScope is the OAuth scope for the Azure Resource Manager. + AzureResourceManagerScope = "https://management.azure.com/" +) + +var expirationBase time.Time + +func init() { + expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) +} + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + return expirationBase.Add(time.Duration(s) * time.Second).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the Token. +func (t *Token) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) + }) + } +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalTokenSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = map[string]interface{}{ + "aud": fmt.Sprintf(jwtAudienceTemplate, spt.tenantID), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, nil +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalTokenSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + secret ServicePrincipalSecret + clientID string + tenantID string + resource string + autoRefresh bool + refreshWithin time.Duration + sender autorest.Sender +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(id string, tenantID string, resource string, secret ServicePrincipalSecret) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + secret: secret, + clientID: id, + resource: resource, + tenantID: tenantID, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + } + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(id string, secret string, tenantID string, resource string) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + id, + tenantID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + ) +} + +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(id string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, tenantID string, resource string) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + id, + tenantID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin). +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + p := map[string]interface{}{ + "tenantID": spt.tenantID, + "requestType": "token", + } + + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("grant_type", "client_credentials") + v.Set("resource", spt.resource) + + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + + req, err := autorest.Prepare(&http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(oauthURL), + autorest.WithPathParameters(p), + autorest.WithFormData(v)) + if err != nil { + return err + } + + resp, err := autorest.SendWithSender(spt.sender, req) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp.StatusCode, "Failure sending request for Service Principal %s", + spt.clientID) + } + + var newToken Token + + err = autorest.Respond(resp, + autorest.WithErrorUnlessOK(), + autorest.ByUnmarshallingJSON(&newToken), + autorest.ByClosing()) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp.StatusCode, "Failure handling response to Service Principal %s request", + spt.clientID) + } + + spt.Token = newToken + + return nil +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { + spt.sender = s +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. +// +// By default, the token will automatically refresh if nearly expired (as determined by the +// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing +// tokens. +func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + if spt.autoRefresh { + err := spt.EnsureFresh() + if err != nil { + return r, autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "WithAuthorization", autorest.UndefinedStatusCode, "Failed to refresh Service Principal Token for request to %s", + r.URL) + } + } + return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000..09202333a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,288 @@ +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "log" + "net/http" + "time" +) + +const ( + // DefaultPollingDelay is the default delay between polling requests (only used if the + // http.Request lacks a well-formed Retry-After header). + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is the default total polling duration. + DefaultPollingDuration = 15 * time.Minute +) + +// PollingMode sets how, if at all, clients composed with Client will poll. +type PollingMode string + +const ( + // PollUntilAttempts polling mode polls until reaching a maximum number of attempts. + PollUntilAttempts PollingMode = "poll-until-attempts" + + // PollUntilDuration polling mode polls until a specified time.Duration has passed. + PollUntilDuration PollingMode = "poll-until-duration" + + // DoNotPoll disables polling. + DoNotPoll PollingMode = "not-at-all" +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + r.Write(&b) + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + + defer resp.Body.Close() + + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + resp.Write(&b) + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +var ( + // DefaultClient is the base from which generated clients should create a Client instance. Users + // can then established widely used Client defaults by replacing or modifying the DefaultClient + // before instantiating a generated client. + DefaultClient = Client{PollingMode: PollUntilDuration, PollingDuration: DefaultPollingDuration} +) + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. Lastly, it supports basic request polling, +// limited to a maximum number of attempts or a specified duration. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + PollingMode PollingMode + PollingAttempts int + PollingDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string +} + +// NewClientWithUserAgent returns an instance of the DefaultClient with the UserAgent set to the +// passed string. +func NewClientWithUserAgent(ua string) Client { + c := DefaultClient + c.UserAgent = ua + return c +} + +// IsPollingAllowed returns an error if the client allows polling and the passed http.Response +// requires it, otherwise it returns nil. +func (c Client) IsPollingAllowed(resp *http.Response, codes ...int) error { + if c.DoNotPoll() && ResponseRequiresPolling(resp, codes...) { + return NewErrorWithStatusCode("autorest/Client", "IsPollingAllowed", resp.StatusCode, "Response to %s requires polling but polling is disabled", + resp.Request.URL) + } + return nil +} + +// PollAsNeeded is a convenience method that will poll if the passed http.Response requires it. +func (c Client) PollAsNeeded(resp *http.Response, codes ...int) (*http.Response, error) { + if !ResponseRequiresPolling(resp, codes...) { + return resp, nil + } + + if c.DoNotPoll() { + return resp, NewErrorWithStatusCode("autorest/Client", "PollAsNeeded", resp.StatusCode, "Polling for %s is required, but polling is disabled", + resp.Request.URL) + } + + req, err := NewPollingRequest(resp, c) + if err != nil { + return resp, NewErrorWithError(err, "autorest/Client", "PollAsNeeded", resp.StatusCode, "Unable to create polling request for response to %s", + resp.Request.URL) + } + + Prepare(req, + c.WithInspection()) + + if c.PollForAttempts() { + return PollForAttempts(c, req, DefaultPollingDelay, c.PollingAttempts, codes...) + } + return PollForDuration(c, req, DefaultPollingDelay, c.PollingDuration, codes...) +} + +// DoNotPoll returns true if the client should not poll, false otherwise. +func (c Client) DoNotPoll() bool { + return len(c.PollingMode) == 0 || c.PollingMode == DoNotPoll +} + +// PollForAttempts returns true if the PollingMode is set to ForAttempts, false otherwise. +func (c Client) PollForAttempts() bool { + return c.PollingMode == PollUntilAttempts +} + +// PollForDuration return true if the PollingMode is set to ForDuration, false otherwise. +func (c Client) PollForDuration() bool { + return c.PollingMode == PollUntilDuration +} + +// Send sends the passed http.Request after applying authorization. It will poll if the client +// allows polling and the http.Response status code requires it. It will close the http.Response +// Body if the request returns an error. +func (c Client) Send(req *http.Request, codes ...int) (*http.Response, error) { + if len(codes) == 0 { + codes = []int{http.StatusOK} + } + + req, err := Prepare(req, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Send", UndefinedStatusCode, "Preparing request failed") + } + + resp, err := SendWithSender(c, req, + DoErrorUnlessStatusCode(codes...)) + if err == nil { + err = c.IsPollingAllowed(resp) + if err == nil { + resp, err = c.PollAsNeeded(resp) + } + } + + if err != nil { + Respond(resp, + ByClosing()) + } + + return resp, err +} + +// Do implements the Sender interface by invoking the active Sender. If Sender is not set, it uses +// a new instance of http.Client. In both cases it will, if UserAgent is set, apply set the +// User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if len(c.UserAgent) > 0 { + r, _ = Prepare(r, WithUserAgent(c.UserAgent)) + } + return c.sender().Do(r) +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + return http.DefaultClient + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// GetPollingDelay extracts the polling delay from the Retry-After header of the response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func (r Response) GetPollingDelay(defaultDelay time.Duration) time.Duration { + return GetPollingDelay(r.Response, defaultDelay) +} + +// GetPollingLocation retrieves the polling URL from the Location header of the response. +func (r Response) GetPollingLocation() string { + return GetPollingLocation(r.Response) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000..c0514bc20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,87 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +import ( + "fmt" + "time" +) + +const ( + rfc3339FullDate = "2006-01-02" + dateFormat = "%4d-%02d-%02d" + jsonFormat = `"%4d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + d = Date{} + d.Time, err = time.Parse(rfc3339FullDate, date) + return d, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + if data[0] == '"' { + data = data[1 : len(data)-1] + } + d.Time, err = time.Parse(rfc3339FullDate, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(rfc3339FullDate, string(data)) + if err != nil { + return err + } + return nil +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000..ccac424c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,70 @@ +package date + +import ( + "time" +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// ParseTime creates a new Time from the passed string. +func ParseTime(date string) (d Time, err error) { + d = Time{} + d.Time, err = time.Parse(time.RFC3339, date) + return d, err +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) MarshalBinary() ([]byte, error) { + return d.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (d *Time) UnmarshalBinary(data []byte) error { + return d.Time.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) MarshalJSON() (json []byte, err error) { + return d.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (d *Time) UnmarshalJSON(data []byte) (err error) { + return d.Time.UnmarshalJSON(data) +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) MarshalText() (text []byte, err error) { + return d.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (d *Time) UnmarshalText(data []byte) (err error) { + return d.Time.UnmarshalText(data) +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := d.Time.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (d Time) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000..1ad8bd07a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,114 @@ +package autorest + +import ( + "fmt" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// Error describes the methods implemented by autorest errors. +type Error interface { + error + + // PackageType should return the package type of the object emitting the error. For types, the + // value should match that produced the the '%T' format specifier of the fmt package. For other + // elements, such as functions, it returns just the package name (e.g., "autorest"). + PackageType() string + + // Method should return the name of the method raising the error. + Method() string + + // StatusCode returns the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode() int + + // Message should return the error message. + Message() string + + // String should return a formatted containing all available details (i.e., PackageType, Method, + // Message, and original error (if any)). + String() string + + // Original should return the original error, if any, and nil otherwise. + Original() error +} + +type baseError struct { + packageType string + method string + statusCode int + message string + + original error +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) Error { + return NewErrorWithError(nil, packageType, method, UndefinedStatusCode, message, args...) +} + +// NewErrorWithStatusCode creates a new Error conforming object from the passed packageType, method, +// statusCode, and message. message is treated as a format string to which the optional args apply. +func NewErrorWithStatusCode(packageType string, method string, statusCode int, message string, args ...interface{}) Error { + return NewErrorWithError(nil, packageType, method, statusCode, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the passed packageType, method, +// statusCode, message, and original error. message is treated as a format string to which the +// optional args apply. +func NewErrorWithError(original error, packageType string, method string, statusCode int, message string, args ...interface{}) Error { + if _, ok := original.(Error); ok { + return original.(Error) + } + return baseError{ + packageType: packageType, + method: method, + statusCode: statusCode, + message: fmt.Sprintf(message, args...), + original: original, + } +} + +// PackageType returns the package type of the object emitting the error. For types, the value +// matches that produced the the '%T' format specifier of the fmt package. For other elements, +// such as functions, it returns just the package name (e.g., "autorest"). +func (be baseError) PackageType() string { + return be.packageType +} + +// Method returns the name of the method raising the error. +func (be baseError) Method() string { + return be.method +} + +// StatusCode returns the HTTP Response StatusCode (if non-zero) that led to the error. +func (be baseError) StatusCode() int { + return be.statusCode +} + +// Message is the error message. +func (be baseError) Message() string { + return be.message +} + +// Original returns the original error, if any, and nil otherwise. +func (be baseError) Original() error { + return be.original +} + +// Error returns the same formatted string as String. +func (be baseError) Error() string { + return be.String() +} + +// String returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (be baseError) String() string { + if be.original == nil { + return fmt.Sprintf("%s:%s %v %s", be.packageType, be.method, be.statusCode, be.message) + } + return fmt.Sprintf("%s:%s %v %s -- Original Error: %v", be.packageType, be.method, be.statusCode, be.message, be.original) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go new file mode 100644 index 000000000..57d6df871 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go @@ -0,0 +1,104 @@ +package mocks + +import ( + "fmt" + "net/http" + "time" +) + +const ( + // TestDelay is the Retry-After delay used in tests. + TestDelay = 0 * time.Second + + // TestHeader is the header used in tests. + TestHeader = "x-test-header" + + // TestURL is the URL used in tests. + TestURL = "https://microsoft.com/a/b/c/" +) + +const ( + headerLocation = "Location" + headerRetryAfter = "Retry-After" +) + +// NewRequest instantiates a new request. +func NewRequest() *http.Request { + return NewRequestWithContent("") +} + +// NewRequestWithContent instantiates a new request using the passed string for the body content. +func NewRequestWithContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBody(c)) + return r +} + +// NewRequestForURL instantiates a new request using the passed URL. +func NewRequestForURL(u string) *http.Request { + r, err := http.NewRequest("GET", u, NewBody("")) + if err != nil { + panic(fmt.Sprintf("mocks: ERROR (%v) parsing testing URL %s", err, u)) + } + return r +} + +// NewResponse instantiates a new response. +func NewResponse() *http.Response { + return NewResponseWithContent("") +} + +// NewResponseWithContent instantiates a new response with the passed string as the body content. +func NewResponseWithContent(c string) *http.Response { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: NewBody(c), + Request: NewRequest(), + } +} + +// NewResponseWithStatus instantiates a new response using the passed string and integer as the +// status and status code. +func NewResponseWithStatus(s string, c int) *http.Response { + resp := NewResponse() + resp.Status = s + resp.StatusCode = c + return resp +} + +// SetResponseHeader adds a header to the passed response. +func SetResponseHeader(resp *http.Response, h string, v string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + resp.Header.Set(h, v) +} + +// SetResponseHeaderValues adds a header containing all the passed string values. +func SetResponseHeaderValues(resp *http.Response, h string, values []string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + for _, v := range values { + resp.Header.Add(h, v) + } +} + +// SetAcceptedHeaders adds the headers usually associated with a 202 Accepted response. +func SetAcceptedHeaders(resp *http.Response) { + SetLocationHeader(resp, TestURL) + SetRetryHeader(resp, TestDelay) +} + +// SetLocationHeader adds the Location header. +func SetLocationHeader(resp *http.Response, location string) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerLocation), location) +} + +// SetRetryHeader adds the Retry-After header. +func SetRetryHeader(resp *http.Response, delay time.Duration) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerRetryAfter), fmt.Sprintf("%v", delay.Seconds())) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go new file mode 100644 index 000000000..b20243fc1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go @@ -0,0 +1,165 @@ +/* +Package mocks provides mocks and helpers used in testing. +*/ +package mocks + +import ( + "fmt" + "io" + "net/http" +) + +// Body implements acceptable body over a string. +type Body struct { + s string + b []byte + isOpen bool + closeAttempts int +} + +// NewBody creates a new instance of Body. +func NewBody(s string) *Body { + return (&Body{s: s}).reset() +} + +// Read reads into the passed byte slice and returns the bytes read. +func (body *Body) Read(b []byte) (n int, err error) { + if !body.IsOpen() { + return 0, fmt.Errorf("ERROR: Body has been closed\n") + } + if len(body.b) == 0 { + return 0, io.EOF + } + n = copy(b, body.b) + body.b = body.b[n:] + return n, nil +} + +// Close closes the body. +func (body *Body) Close() error { + if body.isOpen { + body.isOpen = false + body.closeAttempts++ + } + return nil +} + +// CloseAttempts returns the number of times Close was called. +func (body *Body) CloseAttempts() int { + return body.closeAttempts +} + +// IsOpen returns true if the Body has not been closed, false otherwise. +func (body *Body) IsOpen() bool { + return body.isOpen +} + +func (body *Body) reset() *Body { + body.isOpen = true + body.b = []byte(body.s) + return body +} + +// Sender implements a simple null sender. +type Sender struct { + attempts int + pollAttempts int + content string + reuseResponse bool + resp *http.Response + status string + statusCode int + emitErrors int + err error +} + +// NewSender creates a new instance of Sender. +func NewSender() *Sender { + return &Sender{status: "200 OK", statusCode: 200} +} + +// Do accepts the passed request and, based on settings, emits a response and possible error. +func (c *Sender) Do(r *http.Request) (*http.Response, error) { + c.attempts++ + + if !c.reuseResponse || c.resp == nil { + resp := NewResponse() + resp.Request = r + resp.Body = NewBody(c.content) + resp.Status = c.status + resp.StatusCode = c.statusCode + c.resp = resp + } else { + c.resp.Body.(*Body).reset() + } + + if c.pollAttempts > 0 { + c.pollAttempts-- + c.resp.Status = "Accepted" + c.resp.StatusCode = http.StatusAccepted + SetAcceptedHeaders(c.resp) + } + + if c.emitErrors > 0 || c.emitErrors < 0 { + c.emitErrors-- + if c.err == nil { + return c.resp, fmt.Errorf("Faux Error") + } + return c.resp, c.err + } + return c.resp, nil +} + +// Attempts returns the number of times Do was called. +func (c *Sender) Attempts() int { + return c.attempts +} + +// EmitErrors sets the number times Do should emit an error. +func (c *Sender) EmitErrors(emit int) { + c.emitErrors = emit +} + +// SetError sets the error Do should return. +func (c *Sender) SetError(err error) { + c.err = err +} + +// ClearError clears the error Do emits. +func (c *Sender) ClearError() { + c.SetError(nil) +} + +// EmitContent sets the content to be returned by Do in the response body. +func (c *Sender) EmitContent(s string) { + c.content = s +} + +// EmitStatus sets the status of the response Do emits. +func (c *Sender) EmitStatus(status string, code int) { + c.status = status + c.statusCode = code +} + +// SetPollAttempts sets the number of times the returned response emits the default polling +// status code (i.e., 202 Accepted). +func (c *Sender) SetPollAttempts(pa int) { + c.pollAttempts = pa +} + +// ReuseResponse sets if the just one response object should be reused by all calls to Do. +func (c *Sender) ReuseResponse(reuseResponse bool) { + c.reuseResponse = reuseResponse +} + +// SetResponse sets the response from Do. +func (c *Sender) SetResponse(resp *http.Response) { + c.resp = resp + c.reuseResponse = true +} + +// T is a simple testing struct. +type T struct { + Name string `json:"name" xml:"Name"` + Age int `json:"age" xml:"Age"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000..d02b1c84d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,311 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + u, err := url.Parse(baseURL) + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + u := r.URL + u.Path = strings.TrimRight(u.Path, "/") + if strings.HasPrefix(path, "/") { + u.Path = path + } else { + u.Path += "/" + path + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + r.URL.Path = strings.Replace(r.URL.Path, "{"+key+"}", value, -1) + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + r.URL.Path = strings.Replace(r.URL.Path, "{"+key+"}", value, -1) + } + } + return r, err + }) + } +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000..8b14eb41c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,169 @@ +package autorest + +import ( + "fmt" + "net/http" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return byUnmarshallingAs(EncodedAsJSON, v) +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return byUnmarshallingAs(EncodedAsXML, v) +} + +func byUnmarshallingAs(encodedAs EncodedAs, v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errDecode := CopyAndDecode(encodedAs, resp.Body, v) + if errDecode != nil { + err = fmt.Errorf("Error (%v) occurred decoding %s (\"%s\")", errDecode, encodedAs, b.String()) + } + } + return err + }) + } + +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithStatusCode("autorest", "WithErrorUnlessStatusCode", resp.StatusCode, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000..6d6d8f011 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,211 @@ +package autorest + +import ( + "log" + "math" + "net/http" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + time.Sleep(d) + return s.Do(r) + }) + } +} + +// AfterRetryDelay returns a SendDecorator that delays for the number of seconds specified in the +// Retry-After header of the prior response when polling is required. +func AfterRetryDelay(defaultDelay time.Duration, codes ...int) SendDecorator { + delay := time.Duration(0) + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if delay > time.Duration(0) { + time.Sleep(delay) + } + resp, err := s.Do(r) + if ResponseRequiresPolling(resp, codes...) { + delay = GetPollingDelay(resp, defaultDelay) + } else { + delay = time.Duration(0) + } + return resp, err + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s\n", r.Method, r.URL) + resp, err := s.Do(r) + logger.Printf("%s %s received %s\n", r.Method, r.URL, resp.Status) + return resp, err + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithStatusCode("autorest", "DoErrorIfStatusCode", resp.StatusCode, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithStatusCode("autorest", "DoErrorUnlessStatusCode", resp.StatusCode, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries the request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + for attempt := 0; attempt < attempts; attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt) + } + return resp, err + }) + } +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.Sleep for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff may be zero. +func DelayForBackoff(backoff time.Duration, attempt int) { + time.Sleep(time.Duration(math.Pow(float64(backoff), float64(attempt)))) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 000000000..7b180b866 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,133 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000..c373aa825 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,83 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/url" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000..685b1e632 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,18 @@ +package autorest + +import ( + "fmt" +) + +const ( + major = "1" + minor = "1" + patch = "1" + tag = "" + semVerFormat = "%s.%s.%s%s" +) + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 000000000..80bed650e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 000000000..d6089146c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.3.3 + - 1.4.2 + - 1.5 + - tip diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 000000000..df83a9c2f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 000000000..001c0a338 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,69 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. + +## What the heck is a JWT? + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are RSA256 and HMAC SHA256, though hooks are present for adding your own. + +## Parse and Verify + +Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used. + +```go + token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + return myLookupKey(token.Header["kid"]) + }) + + if err == nil && token.Valid { + deliverGoodness("!") + } else { + deliverUtterRejection(":(") + } +``` + +## Create a token + +```go + // Create the token + token := jwt.New(jwt.SigningMethodHS256) + // Set some claims + token.Claims["foo"] = "bar" + token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix() + // Sign and get the complete encoded token as a string + tokenString, err := token.SignedString(mySigningKey) +``` + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 000000000..9eb7ff9ca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,67 @@ +## `jwt-go` Version History + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go new file mode 100644 index 000000000..9b9536124 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go @@ -0,0 +1,210 @@ +// A useful example app. You can use this to debug your tokens on the command line. +// This is also a great place to look at how you might use this library. +// +// Example usage: +// The following will create and sign a token, then verify it and output the original claims. +// echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" +) + +var ( + // Options + flagAlg = flag.String("alg", "", "signing algorithm identifier") + flagKey = flag.String("key", "", "path to key file or '-' to read from stdin") + flagCompact = flag.Bool("compact", false, "output compact JSON") + flagDebug = flag.Bool("debug", false, "print out all kinds of debug data") + + // Modes - exactly one of these is required + flagSign = flag.String("sign", "", "path to claims object to sign or '-' to read from stdin") + flagVerify = flag.String("verify", "", "path to JWT token to verify or '-' to read from stdin") +) + +func main() { + // Usage message if you ask for -help or if you mess up inputs. + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " One of the following flags is required: sign, verify\n") + flag.PrintDefaults() + } + + // Parse command line options + flag.Parse() + + // Do the thing. If something goes wrong, print error to stderr + // and exit with a non-zero status code + if err := start(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +// Figure out which thing to do and then do that +func start() error { + if *flagSign != "" { + return signToken() + } else if *flagVerify != "" { + return verifyToken() + } else { + flag.Usage() + return fmt.Errorf("None of the required flags are present. What do you want me to do?") + } +} + +// Helper func: Read input from specified file or stdin +func loadData(p string) ([]byte, error) { + if p == "" { + return nil, fmt.Errorf("No path specified") + } + + var rdr io.Reader + if p == "-" { + rdr = os.Stdin + } else { + if f, err := os.Open(p); err == nil { + rdr = f + defer f.Close() + } else { + return nil, err + } + } + return ioutil.ReadAll(rdr) +} + +// Print a json object in accordance with the prophecy (or the command line options) +func printJSON(j interface{}) error { + var out []byte + var err error + + if *flagCompact == false { + out, err = json.MarshalIndent(j, "", " ") + } else { + out, err = json.Marshal(j) + } + + if err == nil { + fmt.Println(string(out)) + } + + return err +} + +// Verify a token and output the claims. This is a great example +// of how to verify and view a token. +func verifyToken() error { + // get the token + tokData, err := loadData(*flagVerify) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } + + // trim possible whitespace from token + tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) + if *flagDebug { + fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) + } + + // Parse the token. Load the key from command line option + token, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) { + data, err := loadData(*flagKey) + if err != nil { + return nil, err + } + if isEs() { + return jwt.ParseECPublicKeyFromPEM(data) + } + return data, nil + }) + + // Print some debug data + if *flagDebug && token != nil { + fmt.Fprintf(os.Stderr, "Header:\n%v\n", token.Header) + fmt.Fprintf(os.Stderr, "Claims:\n%v\n", token.Claims) + } + + // Print an error if we can't parse for some reason + if err != nil { + return fmt.Errorf("Couldn't parse token: %v", err) + } + + // Is token invalid? + if !token.Valid { + return fmt.Errorf("Token is invalid") + } + + // Print the token details + if err := printJSON(token.Claims); err != nil { + return fmt.Errorf("Failed to output claims: %v", err) + } + + return nil +} + +// Create, sign, and output a token. This is a great, simple example of +// how to use this library to create and sign a token. +func signToken() error { + // get the token data from command line arguments + tokData, err := loadData(*flagSign) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } else if *flagDebug { + fmt.Fprintf(os.Stderr, "Token: %v bytes", len(tokData)) + } + + // parse the JSON of the claims + var claims map[string]interface{} + if err := json.Unmarshal(tokData, &claims); err != nil { + return fmt.Errorf("Couldn't parse claims JSON: %v", err) + } + + // get the key + var key interface{} + key, err = loadData(*flagKey) + if err != nil { + return fmt.Errorf("Couldn't read key: %v", err) + } + + // get the signing alg + alg := jwt.GetSigningMethod(*flagAlg) + if alg == nil { + return fmt.Errorf("Couldn't find signing method: %v", *flagAlg) + } + + // create a new token + token := jwt.New(alg) + token.Claims = claims + + if isEs() { + if k, ok := key.([]byte); !ok { + return fmt.Errorf("Couldn't convert key data to key") + } else { + key, err = jwt.ParseECPrivateKeyFromPEM(k) + if err != nil { + return err + } + } + } + + if out, err := token.SignedString(key); err == nil { + fmt.Println(out) + } else { + return fmt.Errorf("Error signing token: %v", err) + } + + return nil +} + +func isEs() bool { + return strings.HasPrefix(*flagAlg, "ES") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 000000000..a86dc1a3b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 000000000..0518ed106 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKey + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 000000000..d19624b72 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 000000000..e9e788ff9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,43 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid or of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") + ErrNoTokenInRequest = errors.New("no token present in request") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + ValidationErrorExpired // Exp validation failed + ValidationErrorNotValidYet // NBF validation failed +) + +// The error from Parse if token is not valid +type ValidationError struct { + err string + Errors uint32 // bitfield. see ValidationError... constants +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.err == "" { + return "token is invalid" + } + return e.err +} + +// No errors +func (e *ValidationError) valid() bool { + if e.Errors > 0 { + return false + } + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 000000000..192e625fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKey + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 000000000..3fc27bfed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,113 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, &ValidationError{err: "token contains an invalid number of segments", Errors: ValidationErrorMalformed} + } + + var err error + token := &Token{Raw: tokenString} + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + if err = dec.Decode(&token.Claims); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, &ValidationError{err: "signing method (alg) is unavailable.", Errors: ValidationErrorUnverifiable} + } + } else { + return token, &ValidationError{err: "signing method (alg) is unspecified.", Errors: ValidationErrorUnverifiable} + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, &ValidationError{err: fmt.Sprintf("signing method %v is invalid", alg), Errors: ValidationErrorSignatureInvalid} + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, &ValidationError{err: "no Keyfunc was provided.", Errors: ValidationErrorUnverifiable} + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorUnverifiable} + } + + // Check expiration times + vErr := &ValidationError{} + now := TimeFunc().Unix() + if exp, ok := token.Claims["exp"].(float64); ok { + if now > int64(exp) { + vErr.err = "token is expired" + vErr.Errors |= ValidationErrorExpired + } + } + if nbf, ok := token.Claims["nbf"].(float64); ok { + if now < int64(nbf) { + vErr.err = "token is not valid yet" + vErr.Errors |= ValidationErrorNotValidYet + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.err = err.Error() + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 000000000..cddffced5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,114 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA public key as +// []byte, or an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + + switch k := key.(type) { + case []byte: + if rsaKey, err = ParseRSAPublicKeyFromPEM(k); err != nil { + return err + } + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA private key as +// []byte, or an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var err error + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case []byte: + if rsaKey, err = ParseRSAPrivateKeyFromPEM(k); err != nil { + return "", err + } + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 000000000..b5b707350 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 000000000..6f3b6ff04 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,68 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 000000000..12cf0f3d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,24 @@ +package jwt + +var signingMethods = map[string]func() SigningMethod{} + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem new file mode 100644 index 000000000..a6882b3e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIAh5qA3rmqQQuu0vbKV/+zouz/y/Iy2pLpIcWUSyImSwoAoGCCqGSM49 +AwEHoUQDQgAEYD54V/vp+54P9DXarYqx4MPcm+HKRIQzNasYSoRQHQ/6S6Ps8tpM +cT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem new file mode 100644 index 000000000..7191361e7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem @@ -0,0 +1,4 @@ +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYD54V/vp+54P9DXarYqx4MPcm+HK +RIQzNasYSoRQHQ/6S6Ps8tpMcT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END PUBLIC KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem new file mode 100644 index 000000000..a86c823e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem @@ -0,0 +1,6 @@ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDCaCvMHKhcG/qT7xsNLYnDT7sE/D+TtWIol1ROdaK1a564vx5pHbsRy +SEKcIxISi1igBwYFK4EEACKhZANiAATYa7rJaU7feLMqrAx6adZFNQOpaUH/Uylb +ZLriOLON5YFVwtVUpO1FfEXZUIQpptRPtc5ixIPY658yhBSb6irfIJUSP9aYTflJ +GKk/mDkK4t8mWBzhiD5B6jg9cEGhGgA= +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem new file mode 100644 index 000000000..e80d00564 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem @@ -0,0 +1,5 @@ +-----BEGIN PUBLIC KEY----- +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE2Gu6yWlO33izKqwMemnWRTUDqWlB/1Mp +W2S64jizjeWBVcLVVKTtRXxF2VCEKabUT7XOYsSD2OufMoQUm+oq3yCVEj/WmE35 +SRipP5g5CuLfJlgc4Yg+Qeo4PXBBoRoA +-----END PUBLIC KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem new file mode 100644 index 000000000..213afaf13 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIB0pE4uFaWRx7t03BsYlYvF1YvKaBGyvoakxnodm9ou0R9wC+sJAjH +QZZJikOg4SwNqgQ/hyrOuDK2oAVHhgVGcYmgBwYFK4EEACOhgYkDgYYABAAJXIuw +12MUzpHggia9POBFYXSxaOGKGbMjIyDI+6q7wi7LMw3HgbaOmgIqFG72o8JBQwYN +4IbXHf+f86CRY1AA2wHzbHvt6IhkCXTNxBEffa1yMUgu8n9cKKF2iLgyQKcKqW33 +8fGOw/n3Rm2Yd/EB56u2rnD29qS+nOM9eGS+gy39OQ== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem new file mode 100644 index 000000000..02ea02203 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem @@ -0,0 +1,6 @@ +-----BEGIN PUBLIC KEY----- +MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQACVyLsNdjFM6R4IImvTzgRWF0sWjh +ihmzIyMgyPuqu8IuyzMNx4G2jpoCKhRu9qPCQUMGDeCG1x3/n/OgkWNQANsB82x7 +7eiIZAl0zcQRH32tcjFILvJ/XCihdoi4MkCnCqlt9/HxjsP590ZtmHfxAeertq5w +9vakvpzjPXhkvoMt/Tk= +-----END PUBLIC KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey new file mode 100644 index 000000000..435b8ddb3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey @@ -0,0 +1 @@ +#5K+¥¼ƒ~ew{¦Z³(æðTÉ(©„²ÒP.¿ÓûZ’ÒGï–Š´Ãwb="=.!r.OÀÍšõgЀ£ \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key new file mode 100644 index 000000000..abdbade31 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA4f5wg5l2hKsTeNem/V41fGnJm6gOdrj8ym3rFkEU/wT8RDtn +SgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7mCpz9Er5qLaMXJwZxzHzAahlfA0i +cqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBpHssPnpYGIn20ZZuNlX2BrClciHhC +PUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2XrHhR+1DcKJzQBSTAGnpYVaqpsAR +ap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3bODIRe1AuTyHceAbewn8b462yEWKA +Rdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy7wIDAQABAoIBAQCwia1k7+2oZ2d3 +n6agCAbqIE1QXfCmh41ZqJHbOY3oRQG3X1wpcGH4Gk+O+zDVTV2JszdcOt7E5dAy +MaomETAhRxB7hlIOnEN7WKm+dGNrKRvV0wDU5ReFMRHg31/Lnu8c+5BvGjZX+ky9 +POIhFFYJqwCRlopGSUIxmVj5rSgtzk3iWOQXr+ah1bjEXvlxDOWkHN6YfpV5ThdE +KdBIPGEVqa63r9n2h+qazKrtiRqJqGnOrHzOECYbRFYhexsNFz7YT02xdfSHn7gM +IvabDDP/Qp0PjE1jdouiMaFHYnLBbgvlnZW9yuVf/rpXTUq/njxIXMmvmEyyvSDn +FcFikB8pAoGBAPF77hK4m3/rdGT7X8a/gwvZ2R121aBcdPwEaUhvj/36dx596zvY +mEOjrWfZhF083/nYWE2kVquj2wjs+otCLfifEEgXcVPTnEOPO9Zg3uNSL0nNQghj +FuD3iGLTUBCtM66oTe0jLSslHe8gLGEQqyMzHOzYxNqibxcOZIe8Qt0NAoGBAO+U +I5+XWjWEgDmvyC3TrOSf/KCGjtu0TSv30ipv27bDLMrpvPmD/5lpptTFwcxvVhCs +2b+chCjlghFSWFbBULBrfci2FtliClOVMYrlNBdUSJhf3aYSG2Doe6Bgt1n2CpNn +/iu37Y3NfemZBJA7hNl4dYe+f+uzM87cdQ214+jrAoGAXA0XxX8ll2+ToOLJsaNT +OvNB9h9Uc5qK5X5w+7G7O998BN2PC/MWp8H+2fVqpXgNENpNXttkRm1hk1dych86 +EunfdPuqsX+as44oCyJGFHVBnWpm33eWQw9YqANRI+pCJzP08I5WK3osnPiwshd+ +hR54yjgfYhBFNI7B95PmEQkCgYBzFSz7h1+s34Ycr8SvxsOBWxymG5zaCsUbPsL0 +4aCgLScCHb9J+E86aVbbVFdglYa5Id7DPTL61ixhl7WZjujspeXZGSbmq0Kcnckb +mDgqkLECiOJW2NHP/j0McAkDLL4tysF8TLDO8gvuvzNC+WQ6drO2ThrypLVZQ+ry +eBIPmwKBgEZxhqa0gVvHQG/7Od69KWj4eJP28kq13RhKay8JOoN0vPmspXJo1HY3 +CKuHRG+AP579dncdUnOMvfXOtkdM4vk0+hWASBQzM9xzVcztCa+koAugjVaLS9A+ +9uQoqEeVNTckxx0S2bYevRy7hGQmUJTyQm3j1zEUR5jpdbL83Fbq +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub new file mode 100644 index 000000000..03dc982ac --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4f5wg5l2hKsTeNem/V41 +fGnJm6gOdrj8ym3rFkEU/wT8RDtnSgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7 +mCpz9Er5qLaMXJwZxzHzAahlfA0icqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBp +HssPnpYGIn20ZZuNlX2BrClciHhCPUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2 +XrHhR+1DcKJzQBSTAGnpYVaqpsARap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3b +ODIRe1AuTyHceAbewn8b462yEWKARdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy +7wIDAQAB +-----END PUBLIC KEY----- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 000000000..d35aaa4a9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,126 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use propries in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims map[string]interface{} // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: make(map[string]interface{}), + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var source map[string]interface{} + if i == 0 { + source = t.Header + } else { + source = t.Claims + } + + var jsonValue []byte + if jsonValue, err = json.Marshal(source); err != nil { + return "", err + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +// Try to find the token in an http.Request. +// This method will call ParseMultipartForm if there's no token in the header. +// Currently, it looks in the Authorization header as well as +// looking for an 'access_token' request parameter in req.Form. +func ParseFromRequest(req *http.Request, keyFunc Keyfunc) (token *Token, err error) { + + // Look for an Authorization header + if ah := req.Header.Get("Authorization"); ah != "" { + // Should be a bearer token + if len(ah) > 6 && strings.ToUpper(ah[0:6]) == "BEARER" { + return Parse(ah[7:], keyFunc) + } + } + + // Look for "access_token" parameter + req.ParseMultipartForm(10e6) + if tokStr := req.Form.Get("access_token"); tokStr != "" { + return Parse(tokStr, keyFunc) + } + + return nil, ErrNoTokenInRequest + +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 000000000..284d2a68f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 000000000..2bb8e2470 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts a object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 000000000..7377ce6fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 000000000..8c7090258 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,274 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 000000000..5f38aa7de --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 000000000..5c419d41e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 000000000..e8e179988 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,342 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `ans1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 000000000..def1f7b98 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go new file mode 100644 index 000000000..7b9275709 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go @@ -0,0 +1,56 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Cdn + APIVersion = "2015-06-01" + + // DefaultBaseURI is the default URI used for the service Cdn + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the use these APIs to manage Azure CDN resources +// through the Azure Resource Manager. You must make sure that requests made +// to these resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go new file mode 100644 index 000000000..a81838580 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go @@ -0,0 +1,388 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// CustomDomainsClient is the use these APIs to manage Azure CDN resources +// through the Azure Resource Manager. You must make sure that requests made +// to these resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type CustomDomainsClient struct { + ManagementClient +} + +// NewCustomDomainsClient creates an instance of the CustomDomainsClient +// client. +func NewCustomDomainsClient(subscriptionID string) CustomDomainsClient { + return NewCustomDomainsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCustomDomainsClientWithBaseURI creates an instance of the +// CustomDomainsClient client. +func NewCustomDomainsClientWithBaseURI(baseURI string, subscriptionID string) CustomDomainsClient { + return CustomDomainsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. +// +// customDomainName is name of the custom domain within an endpoint +// customDomainProperties is custom domain properties required for creation +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client CustomDomainsClient) Create(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string) (result CustomDomain, ae error) { + req, err := client.CreatePreparer(customDomainName, customDomainProperties, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Create", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Create", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Create", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client CustomDomainsClient) CreatePreparer(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": url.QueryEscape(customDomainName), + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}"), + autorest.WithJSON(customDomainProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated, http.StatusAccepted) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) CreateResponder(resp *http.Response) (result CustomDomain, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteIfExists sends the delete if exists request. +// +// customDomainName is name of the custom domain within an endpoint +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client CustomDomainsClient) DeleteIfExists(customDomainName string, endpointName string, profileName string, resourceGroupName string) (result CustomDomain, ae error) { + req, err := client.DeleteIfExistsPreparer(customDomainName, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "DeleteIfExists", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "DeleteIfExists", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "DeleteIfExists", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client CustomDomainsClient) DeleteIfExistsPreparer(customDomainName string, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": url.QueryEscape(customDomainName), + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) DeleteIfExistsResponder(resp *http.Response) (result CustomDomain, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get sends the get request. +// +// customDomainName is name of the custom domain within an endpoint +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client CustomDomainsClient) Get(customDomainName string, endpointName string, profileName string, resourceGroupName string) (result CustomDomain, ae error) { + req, err := client.GetPreparer(customDomainName, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client CustomDomainsClient) GetPreparer(customDomainName string, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": url.QueryEscape(customDomainName), + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) GetResponder(resp *http.Response) (result CustomDomain, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByEndpoint sends the list by endpoint request. +// +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client CustomDomainsClient) ListByEndpoint(endpointName string, profileName string, resourceGroupName string) (result CustomDomainListResult, ae error) { + req, err := client.ListByEndpointPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "ListByEndpoint", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListByEndpointSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "ListByEndpoint", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListByEndpointResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "ListByEndpoint", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListByEndpointPreparer prepares the ListByEndpoint request. +func (client CustomDomainsClient) ListByEndpointPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByEndpointSender sends the ListByEndpoint request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) ListByEndpointSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByEndpointResponder handles the response to the ListByEndpoint request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) ListByEndpointResponder(resp *http.Response) (result CustomDomainListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update sends the update request. +// +// customDomainName is name of the custom domain within an endpoint +// customDomainProperties is custom domain properties to update endpointName +// is name of the endpoint within the CDN profile profileName is name of the +// CDN profile within the resource group resourceGroupName is name of the +// resource group within the Azure subscription +func (client CustomDomainsClient) Update(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string) (result ErrorResponse, ae error) { + req, err := client.UpdatePreparer(customDomainName, customDomainProperties, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Update", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Update", resp.StatusCode, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/CustomDomainsClient", "Update", resp.StatusCode, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client CustomDomainsClient) UpdatePreparer(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": url.QueryEscape(customDomainName), + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}"), + autorest.WithJSON(customDomainProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) UpdateResponder(resp *http.Response) (result ErrorResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go new file mode 100644 index 000000000..57658911b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go @@ -0,0 +1,711 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// EndpointsClient is the use these APIs to manage Azure CDN resources through +// the Azure Resource Manager. You must make sure that requests made to these +// resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type EndpointsClient struct { + ManagementClient +} + +// NewEndpointsClient creates an instance of the EndpointsClient client. +func NewEndpointsClient(subscriptionID string) EndpointsClient { + return NewEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewEndpointsClientWithBaseURI creates an instance of the EndpointsClient +// client. +func NewEndpointsClientWithBaseURI(baseURI string, subscriptionID string) EndpointsClient { + return EndpointsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. +// +// endpointName is name of the endpoint within the CDN profile +// endpointProperties is endpoint properties profileName is name of the CDN +// profile within the resource group resourceGroupName is name of the +// resource group within the Azure subscription +func (client EndpointsClient) Create(endpointName string, endpointProperties EndpointCreateParameters, profileName string, resourceGroupName string) (result TrackedResource, ae error) { + req, err := client.CreatePreparer(endpointName, endpointProperties, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Create", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Create", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Create", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client EndpointsClient) CreatePreparer(endpointName string, endpointProperties EndpointCreateParameters, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}"), + autorest.WithJSON(endpointProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated, http.StatusAccepted) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client EndpointsClient) CreateResponder(resp *http.Response) (result TrackedResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteIfExists sends the delete if exists request. +// +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client EndpointsClient) DeleteIfExists(endpointName string, profileName string, resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.DeleteIfExistsPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "DeleteIfExists", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "DeleteIfExists", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "DeleteIfExists", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client EndpointsClient) DeleteIfExistsPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client EndpointsClient) DeleteIfExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get sends the get request. +// +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client EndpointsClient) Get(endpointName string, profileName string, resourceGroupName string) (result Endpoint, ae error) { + req, err := client.GetPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client EndpointsClient) GetPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client EndpointsClient) GetResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByProfile sends the list by profile request. +// +// profileName is name of the CDN profile within the resource group +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client EndpointsClient) ListByProfile(profileName string, resourceGroupName string) (result EndpointListResult, ae error) { + req, err := client.ListByProfilePreparer(profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "ListByProfile", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListByProfileSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "ListByProfile", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListByProfileResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "ListByProfile", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListByProfilePreparer prepares the ListByProfile request. +func (client EndpointsClient) ListByProfilePreparer(profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByProfileSender sends the ListByProfile request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) ListByProfileSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByProfileResponder handles the response to the ListByProfile request. The method always +// closes the http.Response Body. +func (client EndpointsClient) ListByProfileResponder(resp *http.Response) (result EndpointListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// LoadContent sends the load content request. +// +// endpointName is name of the endpoint within the CDN profile +// contentFilePaths is the path to the content to be loaded. Path should +// describe a file. profileName is name of the CDN profile within the +// resource group resourceGroupName is name of the resource group within the +// Azure subscription +func (client EndpointsClient) LoadContent(endpointName string, contentFilePaths LoadParameters, profileName string, resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.LoadContentPreparer(endpointName, contentFilePaths, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "LoadContent", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.LoadContentSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "LoadContent", resp.StatusCode, "Failure sending request") + } + + result, err = client.LoadContentResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "LoadContent", resp.StatusCode, "Failure responding to request") + } + + return +} + +// LoadContentPreparer prepares the LoadContent request. +func (client EndpointsClient) LoadContentPreparer(endpointName string, contentFilePaths LoadParameters, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/load"), + autorest.WithJSON(contentFilePaths), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// LoadContentSender sends the LoadContent request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) LoadContentSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// LoadContentResponder handles the response to the LoadContent request. The method always +// closes the http.Response Body. +func (client EndpointsClient) LoadContentResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// PurgeContent sends the purge content request. +// +// endpointName is name of the endpoint within the CDN profile +// contentFilePaths is the path to the content to be purged. Path can +// describe a file or directory. profileName is name of the CDN profile +// within the resource group resourceGroupName is name of the resource group +// within the Azure subscription +func (client EndpointsClient) PurgeContent(endpointName string, contentFilePaths PurgeParameters, profileName string, resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.PurgeContentPreparer(endpointName, contentFilePaths, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "PurgeContent", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.PurgeContentSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "PurgeContent", resp.StatusCode, "Failure sending request") + } + + result, err = client.PurgeContentResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "PurgeContent", resp.StatusCode, "Failure responding to request") + } + + return +} + +// PurgeContentPreparer prepares the PurgeContent request. +func (client EndpointsClient) PurgeContentPreparer(endpointName string, contentFilePaths PurgeParameters, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/purge"), + autorest.WithJSON(contentFilePaths), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PurgeContentSender sends the PurgeContent request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) PurgeContentSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// PurgeContentResponder handles the response to the PurgeContent request. The method always +// closes the http.Response Body. +func (client EndpointsClient) PurgeContentResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start sends the start request. +// +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client EndpointsClient) Start(endpointName string, profileName string, resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.StartPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Start", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Start", resp.StatusCode, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Start", resp.StatusCode, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client EndpointsClient) StartPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/start"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) StartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client EndpointsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop sends the stop request. +// +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client EndpointsClient) Stop(endpointName string, profileName string, resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.StopPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Stop", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Stop", resp.StatusCode, "Failure sending request") + } + + result, err = client.StopResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Stop", resp.StatusCode, "Failure responding to request") + } + + return +} + +// StopPreparer prepares the Stop request. +func (client EndpointsClient) StopPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/stop"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) StopSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client EndpointsClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update sends the update request. +// +// endpointName is name of the endpoint within the CDN profile +// endpointProperties is endpoint properties profileName is name of the CDN +// profile within the resource group resourceGroupName is name of the +// resource group within the Azure subscription +func (client EndpointsClient) Update(endpointName string, endpointProperties EndpointUpdateParameters, profileName string, resourceGroupName string) (result Endpoint, ae error) { + req, err := client.UpdatePreparer(endpointName, endpointProperties, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Update", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Update", resp.StatusCode, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "Update", resp.StatusCode, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client EndpointsClient) UpdatePreparer(endpointName string, endpointProperties EndpointUpdateParameters, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}"), + autorest.WithJSON(endpointProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client EndpointsClient) UpdateResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ValidateCustomDomain sends the validate custom domain request. +// +// endpointName is name of the endpoint within the CDN profile +// customDomainProperties is custom domain to validate profileName is name of +// the CDN profile within the resource group resourceGroupName is name of the +// resource group within the Azure subscription +func (client EndpointsClient) ValidateCustomDomain(endpointName string, customDomainProperties ValidateCustomDomainInput, profileName string, resourceGroupName string) (result ValidateCustomDomainOutput, ae error) { + req, err := client.ValidateCustomDomainPreparer(endpointName, customDomainProperties, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "ValidateCustomDomain", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ValidateCustomDomainSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/EndpointsClient", "ValidateCustomDomain", resp.StatusCode, "Failure sending request") + } + + result, err = client.ValidateCustomDomainResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/EndpointsClient", "ValidateCustomDomain", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ValidateCustomDomainPreparer prepares the ValidateCustomDomain request. +func (client EndpointsClient) ValidateCustomDomainPreparer(endpointName string, customDomainProperties ValidateCustomDomainInput, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/validateCustomDomain"), + autorest.WithJSON(customDomainProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ValidateCustomDomainSender sends the ValidateCustomDomain request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) ValidateCustomDomainSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ValidateCustomDomainResponder handles the response to the ValidateCustomDomain request. The method always +// closes the http.Response Body. +func (client EndpointsClient) ValidateCustomDomainResponder(resp *http.Response) (result ValidateCustomDomainOutput, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go new file mode 100644 index 000000000..ad3940323 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go @@ -0,0 +1,428 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +// CustomDomainResourceState enumerates the values for custom domain resource +// state. +type CustomDomainResourceState string + +const ( + // Active specifies the active state for custom domain resource state. + Active CustomDomainResourceState = "Active" + // Creating specifies the creating state for custom domain resource state. + Creating CustomDomainResourceState = "Creating" + // Deleting specifies the deleting state for custom domain resource state. + Deleting CustomDomainResourceState = "Deleting" +) + +// EndpointResourceState enumerates the values for endpoint resource state. +type EndpointResourceState string + +const ( + // EndpointResourceStateCreating specifies the endpoint resource state + // creating state for endpoint resource state. + EndpointResourceStateCreating EndpointResourceState = "Creating" + // EndpointResourceStateDeleting specifies the endpoint resource state + // deleting state for endpoint resource state. + EndpointResourceStateDeleting EndpointResourceState = "Deleting" + // EndpointResourceStateRunning specifies the endpoint resource state + // running state for endpoint resource state. + EndpointResourceStateRunning EndpointResourceState = "Running" + // EndpointResourceStateStarting specifies the endpoint resource state + // starting state for endpoint resource state. + EndpointResourceStateStarting EndpointResourceState = "Starting" + // EndpointResourceStateStopped specifies the endpoint resource state + // stopped state for endpoint resource state. + EndpointResourceStateStopped EndpointResourceState = "Stopped" + // EndpointResourceStateStopping specifies the endpoint resource state + // stopping state for endpoint resource state. + EndpointResourceStateStopping EndpointResourceState = "Stopping" +) + +// OriginResourceState enumerates the values for origin resource state. +type OriginResourceState string + +const ( + // OriginResourceStateActive specifies the origin resource state active + // state for origin resource state. + OriginResourceStateActive OriginResourceState = "Active" + // OriginResourceStateCreating specifies the origin resource state + // creating state for origin resource state. + OriginResourceStateCreating OriginResourceState = "Creating" + // OriginResourceStateDeleting specifies the origin resource state + // deleting state for origin resource state. + OriginResourceStateDeleting OriginResourceState = "Deleting" +) + +// ProfileResourceState enumerates the values for profile resource state. +type ProfileResourceState string + +const ( + // ProfileResourceStateActive specifies the profile resource state active + // state for profile resource state. + ProfileResourceStateActive ProfileResourceState = "Active" + // ProfileResourceStateCreating specifies the profile resource state + // creating state for profile resource state. + ProfileResourceStateCreating ProfileResourceState = "Creating" + // ProfileResourceStateDeleting specifies the profile resource state + // deleting state for profile resource state. + ProfileResourceStateDeleting ProfileResourceState = "Deleting" + // ProfileResourceStateDisabled specifies the profile resource state + // disabled state for profile resource state. + ProfileResourceStateDisabled ProfileResourceState = "Disabled" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateCreating specifies the provisioning state creating + // state for provisioning state. + ProvisioningStateCreating ProvisioningState = "Creating" + // ProvisioningStateFailed specifies the provisioning state failed state + // for provisioning state. + ProvisioningStateFailed ProvisioningState = "Failed" + // ProvisioningStateSucceeded specifies the provisioning state succeeded + // state for provisioning state. + ProvisioningStateSucceeded ProvisioningState = "Succeeded" +) + +// QueryStringCachingBehavior enumerates the values for query string caching +// behavior. +type QueryStringCachingBehavior string + +const ( + // BypassCaching specifies the bypass caching state for query string + // caching behavior. + BypassCaching QueryStringCachingBehavior = "BypassCaching" + // IgnoreQueryString specifies the ignore query string state for query + // string caching behavior. + IgnoreQueryString QueryStringCachingBehavior = "IgnoreQueryString" + // UseQueryString specifies the use query string state for query string + // caching behavior. + UseQueryString QueryStringCachingBehavior = "UseQueryString" +) + +// ResourceType enumerates the values for resource type. +type ResourceType string + +const ( + // MicrosoftCdnProfilesEndpoints specifies the microsoft cdn profiles + // endpoints state for resource type. + MicrosoftCdnProfilesEndpoints ResourceType = "Microsoft.Cdn/Profiles/Endpoints" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // Premium specifies the premium state for sku name. + Premium SkuName = "Premium" + // Standard specifies the standard state for sku name. + Standard SkuName = "Standard" +) + +// CheckNameAvailabilityInput is input of check name availability API +type CheckNameAvailabilityInput struct { + Name *string `json:"name,omitempty"` + Type ResourceType `json:"type,omitempty"` +} + +// CheckNameAvailabilityOutput is output of check name availability API +type CheckNameAvailabilityOutput struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"NameAvailable,omitempty"` + Reason *string `json:"Reason,omitempty"` + Message *string `json:"Message,omitempty"` +} + +// CustomDomain is cDN CustomDomain represents a mapping between a user +// specified domain name and an Endpoint. It is a common practice to use +// custom domain names to represent the URLs for branding purposes. +type CustomDomain struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *CustomDomainProperties `json:"properties,omitempty"` +} + +// CustomDomainListResult is +type CustomDomainListResult struct { + autorest.Response `json:"-"` + Value *[]CustomDomain `json:"value,omitempty"` +} + +// CustomDomainParameters is customDomain properties required for custom +// domain creation or update +type CustomDomainParameters struct { + Properties *CustomDomainPropertiesParameters `json:"properties,omitempty"` +} + +// CustomDomainProperties is +type CustomDomainProperties struct { + HostName *string `json:"hostName,omitempty"` + ResourceState CustomDomainResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// CustomDomainPropertiesParameters is +type CustomDomainPropertiesParameters struct { + HostName *string `json:"hostName,omitempty"` +} + +// DeepCreatedOrigin is deep created origins within a CDN endpoint +type DeepCreatedOrigin struct { + Name *string `json:"name,omitempty"` + Properties *DeepCreatedOriginProperties `json:"properties,omitempty"` +} + +// DeepCreatedOriginProperties is properties of deep created origin on a CDN +// endpoint +type DeepCreatedOriginProperties struct { + HostName *string `json:"hostName,omitempty"` + HTTPPort *int `json:"httpPort,omitempty"` + HTTPSPort *int `json:"httpsPort,omitempty"` +} + +// Endpoint is cDN Endpoint is the entity within a CDN Profile containing +// configuration information regarding caching behaviors and origins. The CDN +// Endpoint is exposed using the URL format .azureedge.net by +// default, but custom domains can also be created. +type Endpoint struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *EndpointProperties `json:"properties,omitempty"` +} + +// EndpointCreateParameters is endpoint properties required for new endpoint +// creation +type EndpointCreateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *EndpointPropertiesCreateUpdateParameters `json:"properties,omitempty"` +} + +// EndpointListResult is +type EndpointListResult struct { + autorest.Response `json:"-"` + Value *[]Endpoint `json:"value,omitempty"` +} + +// EndpointProperties is +type EndpointProperties struct { + HostName *string `json:"hostName,omitempty"` + OriginHostHeader *string `json:"originHostHeader,omitempty"` + OriginPath *string `json:"originPath,omitempty"` + ContentTypesToCompress *[]string `json:"contentTypesToCompress,omitempty"` + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty"` + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty"` + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty"` + QueryStringCachingBehavior QueryStringCachingBehavior `json:"queryStringCachingBehavior,omitempty"` + Origins *[]DeepCreatedOrigin `json:"origins,omitempty"` + ResourceState EndpointResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// EndpointPropertiesCreateUpdateParameters is +type EndpointPropertiesCreateUpdateParameters struct { + OriginHostHeader *string `json:"originHostHeader,omitempty"` + OriginPath *string `json:"originPath,omitempty"` + ContentTypesToCompress *[]string `json:"contentTypesToCompress,omitempty"` + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty"` + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty"` + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty"` + QueryStringCachingBehavior QueryStringCachingBehavior `json:"queryStringCachingBehavior,omitempty"` + Origins *[]DeepCreatedOrigin `json:"origins,omitempty"` +} + +// EndpointUpdateParameters is endpoint properties required for new endpoint +// creation +type EndpointUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + Properties *EndpointPropertiesCreateUpdateParameters `json:"properties,omitempty"` +} + +// ErrorResponse is +type ErrorResponse struct { + autorest.Response `json:"-"` + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} + +// LoadParameters is parameters required for endpoint load +type LoadParameters struct { + ContentPaths *[]string `json:"contentPaths,omitempty"` +} + +// Operation is cDN REST API operation +type Operation struct { + Name *string `json:"name,omitempty"` + Display *OperationDisplay `json:"display,omitempty"` +} + +// OperationDisplay is +type OperationDisplay struct { + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` +} + +// OperationListResult is +type OperationListResult struct { + autorest.Response `json:"-"` + Value *[]Operation `json:"value,omitempty"` +} + +// Origin is cDN Origin is the source of the content being delivered via CDN. +// When the edge nodes represented by an Endpoint do not have the requested +// content cached, they attempt to fetch it from one or more of the +// configured Origins. +type Origin struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *OriginProperties `json:"properties,omitempty"` +} + +// OriginListResult is +type OriginListResult struct { + autorest.Response `json:"-"` + Value *[]Origin `json:"value,omitempty"` +} + +// OriginParameters is origin properties needed for origin creation or update +type OriginParameters struct { + Properties *OriginPropertiesParameters `json:"properties,omitempty"` +} + +// OriginProperties is +type OriginProperties struct { + HostName *string `json:"hostName,omitempty"` + HTTPPort *int `json:"httpPort,omitempty"` + HTTPSPort *int `json:"httpsPort,omitempty"` + ResourceState OriginResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// OriginPropertiesParameters is +type OriginPropertiesParameters struct { + HostName *string `json:"hostName,omitempty"` + HTTPPort *int `json:"httpPort,omitempty"` + HTTPSPort *int `json:"httpsPort,omitempty"` +} + +// Profile is cDN profile represents the top level resource and the entry +// point into the CDN API. This allows users to set up a logical grouping of +// endpoints in addition to creating shared configuration settings and +// selecting pricing tiers and providers. +type Profile struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ProfileProperties `json:"properties,omitempty"` +} + +// ProfileCreateParameters is profile properties required for profile creation +type ProfileCreateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ProfilePropertiesCreateParameters `json:"properties,omitempty"` +} + +// ProfileListResult is +type ProfileListResult struct { + autorest.Response `json:"-"` + Value *[]Profile `json:"value,omitempty"` +} + +// ProfileProperties is +type ProfileProperties struct { + Sku *Sku `json:"sku,omitempty"` + ResourceState ProfileResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// ProfilePropertiesCreateParameters is +type ProfilePropertiesCreateParameters struct { + Sku *Sku `json:"sku,omitempty"` +} + +// ProfileUpdateParameters is profile properties required for profile update +type ProfileUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` +} + +// PurgeParameters is parameters required for endpoint purge +type PurgeParameters struct { + ContentPaths *[]string `json:"contentPaths,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// Sku is defines a pricing tier for a profile +type Sku struct { + Name SkuName `json:"name,omitempty"` +} + +// SsoURI is sso uri required to login to third party web portal +type SsoURI struct { + autorest.Response `json:"-"` + SsoURIProperty *string `json:"ssoUri,omitempty"` +} + +// TrackedResource is aRM tracked resource +type TrackedResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ValidateCustomDomainInput is input of the custom domain to be validated +type ValidateCustomDomainInput struct { + HostName *string `json:"hostName,omitempty"` +} + +// ValidateCustomDomainOutput is output of custom domain validation +type ValidateCustomDomainOutput struct { + autorest.Response `json:"-"` + CustomDomainValidated *bool `json:"CustomDomainValidated,omitempty"` + Reason *string `json:"Reason,omitempty"` + Message *string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go new file mode 100644 index 000000000..4da9228cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go @@ -0,0 +1,102 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" +) + +// NameAvailabilityClient is the use these APIs to manage Azure CDN resources +// through the Azure Resource Manager. You must make sure that requests made +// to these resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type NameAvailabilityClient struct { + ManagementClient +} + +// NewNameAvailabilityClient creates an instance of the NameAvailabilityClient +// client. +func NewNameAvailabilityClient(subscriptionID string) NameAvailabilityClient { + return NewNameAvailabilityClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewNameAvailabilityClientWithBaseURI creates an instance of the +// NameAvailabilityClient client. +func NewNameAvailabilityClientWithBaseURI(baseURI string, subscriptionID string) NameAvailabilityClient { + return NameAvailabilityClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability sends the check name availability request. +// +// checkNameAvailabilityInput is input to check +func (client NameAvailabilityClient) CheckNameAvailability(checkNameAvailabilityInput CheckNameAvailabilityInput) (result CheckNameAvailabilityOutput, ae error) { + req, err := client.CheckNameAvailabilityPreparer(checkNameAvailabilityInput) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/NameAvailabilityClient", "CheckNameAvailability", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/NameAvailabilityClient", "CheckNameAvailability", resp.StatusCode, "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/NameAvailabilityClient", "CheckNameAvailability", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client NameAvailabilityClient) CheckNameAvailabilityPreparer(checkNameAvailabilityInput CheckNameAvailabilityInput) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Cdn/checkNameAvailability"), + autorest.WithJSON(checkNameAvailabilityInput), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client NameAvailabilityClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client NameAvailabilityClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityOutput, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go new file mode 100644 index 000000000..ac2e33f58 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go @@ -0,0 +1,98 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" +) + +// OperationsClient is the use these APIs to manage Azure CDN resources +// through the Azure Resource Manager. You must make sure that requests made +// to these resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type OperationsClient struct { + ManagementClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient +// client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List sends the list request. +func (client OperationsClient) List() (result OperationListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/OperationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/OperationsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/OperationsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Cdn/operations"), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go new file mode 100644 index 000000000..9bf1ddc74 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go @@ -0,0 +1,388 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// OriginsClient is the use these APIs to manage Azure CDN resources through +// the Azure Resource Manager. You must make sure that requests made to these +// resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type OriginsClient struct { + ManagementClient +} + +// NewOriginsClient creates an instance of the OriginsClient client. +func NewOriginsClient(subscriptionID string) OriginsClient { + return NewOriginsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOriginsClientWithBaseURI creates an instance of the OriginsClient client. +func NewOriginsClientWithBaseURI(baseURI string, subscriptionID string) OriginsClient { + return OriginsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. +// +// originName is name of the origin, an arbitrary value but it needs to be +// unique under endpoint originProperties is origin properties endpointName +// is name of the endpoint within the CDN profile profileName is name of the +// CDN profile within the resource group resourceGroupName is name of the +// resource group within the Azure subscription +func (client OriginsClient) Create(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string) (result Origin, ae error) { + req, err := client.CreatePreparer(originName, originProperties, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "Create", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "Create", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/OriginsClient", "Create", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client OriginsClient) CreatePreparer(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "originName": url.QueryEscape(originName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}"), + autorest.WithJSON(originProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated, http.StatusAccepted) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client OriginsClient) CreateResponder(resp *http.Response) (result Origin, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteIfExists sends the delete if exists request. +// +// originName is name of the origin, an arbitrary value but it needs to be +// unique under endpoint endpointName is name of the endpoint within the CDN +// profile profileName is name of the CDN profile within the resource group +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client OriginsClient) DeleteIfExists(originName string, endpointName string, profileName string, resourceGroupName string) (result Origin, ae error) { + req, err := client.DeleteIfExistsPreparer(originName, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "DeleteIfExists", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "DeleteIfExists", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/OriginsClient", "DeleteIfExists", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client OriginsClient) DeleteIfExistsPreparer(originName string, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "originName": url.QueryEscape(originName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client OriginsClient) DeleteIfExistsResponder(resp *http.Response) (result Origin, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get sends the get request. +// +// originName is name of the origin, an arbitrary value but it needs to be +// unique under endpoint endpointName is name of the endpoint within the CDN +// profile profileName is name of the CDN profile within the resource group +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client OriginsClient) Get(originName string, endpointName string, profileName string, resourceGroupName string) (result Origin, ae error) { + req, err := client.GetPreparer(originName, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/OriginsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client OriginsClient) GetPreparer(originName string, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "originName": url.QueryEscape(originName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client OriginsClient) GetResponder(resp *http.Response) (result Origin, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByEndpoint sends the list by endpoint request. +// +// endpointName is name of the endpoint within the CDN profile profileName is +// name of the CDN profile within the resource group resourceGroupName is +// name of the resource group within the Azure subscription +func (client OriginsClient) ListByEndpoint(endpointName string, profileName string, resourceGroupName string) (result OriginListResult, ae error) { + req, err := client.ListByEndpointPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "ListByEndpoint", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListByEndpointSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "ListByEndpoint", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListByEndpointResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/OriginsClient", "ListByEndpoint", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListByEndpointPreparer prepares the ListByEndpoint request. +func (client OriginsClient) ListByEndpointPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByEndpointSender sends the ListByEndpoint request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) ListByEndpointSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByEndpointResponder handles the response to the ListByEndpoint request. The method always +// closes the http.Response Body. +func (client OriginsClient) ListByEndpointResponder(resp *http.Response) (result OriginListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update sends the update request. +// +// originName is name of the origin, an arbitrary value but it needs to be +// unique under endpoint originProperties is origin properties endpointName +// is name of the endpoint within the CDN profile profileName is name of the +// CDN profile within the resource group resourceGroupName is name of the +// resource group within the Azure subscription +func (client OriginsClient) Update(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string) (result Origin, ae error) { + req, err := client.UpdatePreparer(originName, originProperties, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "Update", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/OriginsClient", "Update", resp.StatusCode, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/OriginsClient", "Update", resp.StatusCode, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client OriginsClient) UpdatePreparer(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": url.QueryEscape(endpointName), + "originName": url.QueryEscape(originName), + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}"), + autorest.WithJSON(originProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client OriginsClient) UpdateResponder(resp *http.Response) (result Origin, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go new file mode 100644 index 000000000..10b73e027 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go @@ -0,0 +1,436 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ProfilesClient is the use these APIs to manage Azure CDN resources through +// the Azure Resource Manager. You must make sure that requests made to these +// resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type ProfilesClient struct { + ManagementClient +} + +// NewProfilesClient creates an instance of the ProfilesClient client. +func NewProfilesClient(subscriptionID string) ProfilesClient { + return NewProfilesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProfilesClientWithBaseURI creates an instance of the ProfilesClient +// client. +func NewProfilesClientWithBaseURI(baseURI string, subscriptionID string) ProfilesClient { + return ProfilesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. +// +// profileName is name of the CDN profile within the resource group +// profileProperties is profile properties needed for creation +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client ProfilesClient) Create(profileName string, profileProperties ProfileCreateParameters, resourceGroupName string) (result Profile, ae error) { + req, err := client.CreatePreparer(profileName, profileProperties, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Create", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Create", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Create", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client ProfilesClient) CreatePreparer(profileName string, profileProperties ProfileCreateParameters, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}"), + autorest.WithJSON(profileProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated, http.StatusAccepted) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client ProfilesClient) CreateResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteIfExists sends the delete if exists request. +// +// profileName is name of the CDN profile within the resource group +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client ProfilesClient) DeleteIfExists(profileName string, resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.DeleteIfExistsPreparer(profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "DeleteIfExists", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "DeleteIfExists", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/ProfilesClient", "DeleteIfExists", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client ProfilesClient) DeleteIfExistsPreparer(profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client ProfilesClient) DeleteIfExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GenerateSsoURI sends the generate sso uri request. +// +// profileName is name of the CDN profile within the resource group +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client ProfilesClient) GenerateSsoURI(profileName string, resourceGroupName string) (result SsoURI, ae error) { + req, err := client.GenerateSsoURIPreparer(profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "GenerateSsoURI", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GenerateSsoURISender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "GenerateSsoURI", resp.StatusCode, "Failure sending request") + } + + result, err = client.GenerateSsoURIResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/ProfilesClient", "GenerateSsoURI", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GenerateSsoURIPreparer prepares the GenerateSsoURI request. +func (client ProfilesClient) GenerateSsoURIPreparer(profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/generateSsoUri"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GenerateSsoURISender sends the GenerateSsoURI request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) GenerateSsoURISender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GenerateSsoURIResponder handles the response to the GenerateSsoURI request. The method always +// closes the http.Response Body. +func (client ProfilesClient) GenerateSsoURIResponder(resp *http.Response) (result SsoURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get sends the get request. +// +// profileName is name of the CDN profile within the resource group +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client ProfilesClient) Get(profileName string, resourceGroupName string) (result Profile, ae error) { + req, err := client.GetPreparer(profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProfilesClient) GetPreparer(profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProfilesClient) GetResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup sends the list by resource group request. +// +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client ProfilesClient) ListByResourceGroup(resourceGroupName string) (result ProfileListResult, ae error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "ListByResourceGroup", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "ListByResourceGroup", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/ProfilesClient", "ListByResourceGroup", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ProfilesClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ProfilesClient) ListByResourceGroupResponder(resp *http.Response) (result ProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update sends the update request. +// +// profileName is name of the CDN profile within the resource group +// profileProperties is profile properties needed for update +// resourceGroupName is name of the resource group within the Azure +// subscription +func (client ProfilesClient) Update(profileName string, profileProperties ProfileUpdateParameters, resourceGroupName string) (result Profile, ae error) { + req, err := client.UpdatePreparer(profileName, profileProperties, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Update", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Update", resp.StatusCode, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "cdn/ProfilesClient", "Update", resp.StatusCode, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ProfilesClient) UpdatePreparer(profileName string, profileProperties ProfileUpdateParameters, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": url.QueryEscape(profileName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}"), + autorest.WithJSON(profileProperties), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ProfilesClient) UpdateResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go new file mode 100644 index 000000000..2fafc41e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go @@ -0,0 +1,43 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "4" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "cdn", "2015-06-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go new file mode 100644 index 000000000..4e740ff40 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go @@ -0,0 +1,410 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// AvailabilitySetsClient is the the Compute Management Client. +type AvailabilitySetsClient struct { + ManagementClient +} + +// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient +// client. +func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { + return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAvailabilitySetsClientWithBaseURI creates an instance of the +// AvailabilitySetsClient client. +func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { + return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the availability set. +// +// resourceGroupName is the name of the resource group. name is parameters +// supplied to the Create Availability Set operation. parameters is +// parameters supplied to the Create Availability Set operation. +func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters AvailabilitySet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": url.QueryEscape(name), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{name}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": url.QueryEscape(availabilitySetName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, ae error) { + req, err := client.GetPreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": url.QueryEscape(availabilitySetName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to list the availability sets. +// +// resourceGroupName is the name of the resource group. +func (client AvailabilitySetsClient) List(resourceGroupName string) (result AvailabilitySetListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result AvailabilitySetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client AvailabilitySetsClient) ListNextResults(lastResults AvailabilitySetListResult) (result AvailabilitySetListResult, ae error) { + req, err := lastResults.AvailabilitySetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAvailableSizes lists virtual-machine-sizes available to be used for an +// availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, ae error) { + req, err := client.ListAvailableSizesPreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": url.QueryEscape(availabilitySetName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAvailableSizesNextResults retrieves the next set of results, if any. +func (client AvailabilitySetsClient) ListAvailableSizesNextResults(lastResults VirtualMachineSizeListResult) (result VirtualMachineSizeListResult, ae error) { + req, err := lastResults.VirtualMachineSizeListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go new file mode 100644 index 000000000..537fd1196 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go @@ -0,0 +1,52 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Compute + APIVersion = "2015-06-15" + + // DefaultBaseURI is the default URI used for the service Compute + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the the Compute Management Client. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go new file mode 100644 index 000000000..2936a2bc5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go @@ -0,0 +1,1208 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// CachingTypes enumerates the values for caching types. +type CachingTypes string + +const ( + // None specifies the none state for caching types. + None CachingTypes = "None" + // ReadOnly specifies the read only state for caching types. + ReadOnly CachingTypes = "ReadOnly" + // ReadWrite specifies the read write state for caching types. + ReadWrite CachingTypes = "ReadWrite" +) + +// ComponentNames enumerates the values for component names. +type ComponentNames string + +const ( + // MicrosoftWindowsShellSetup specifies the microsoft windows shell setup + // state for component names. + MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup" +) + +// DiskCreateOptionTypes enumerates the values for disk create option types. +type DiskCreateOptionTypes string + +const ( + // Attach specifies the attach state for disk create option types. + Attach DiskCreateOptionTypes = "attach" + // Empty specifies the empty state for disk create option types. + Empty DiskCreateOptionTypes = "empty" + // FromImage specifies the from image state for disk create option types. + FromImage DiskCreateOptionTypes = "fromImage" +) + +// OperatingSystemTypes enumerates the values for operating system types. +type OperatingSystemTypes string + +const ( + // Linux specifies the linux state for operating system types. + Linux OperatingSystemTypes = "Linux" + // Windows specifies the windows state for operating system types. + Windows OperatingSystemTypes = "Windows" +) + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // Failed specifies the failed state for operation status. + Failed OperationStatus = "Failed" + // InProgress specifies the in progress state for operation status. + InProgress OperationStatus = "InProgress" + // Succeeded specifies the succeeded state for operation status. + Succeeded OperationStatus = "Succeeded" +) + +// OperationStatusEnum enumerates the values for operation status enum. +type OperationStatusEnum string + +const ( + // OperationStatusEnumFailed specifies the operation status enum failed + // state for operation status enum. + OperationStatusEnumFailed OperationStatusEnum = "Failed" + // OperationStatusEnumInProgress specifies the operation status enum in + // progress state for operation status enum. + OperationStatusEnumInProgress OperationStatusEnum = "InProgress" + // OperationStatusEnumPreempted specifies the operation status enum + // preempted state for operation status enum. + OperationStatusEnumPreempted OperationStatusEnum = "Preempted" + // OperationStatusEnumSucceeded specifies the operation status enum + // succeeded state for operation status enum. + OperationStatusEnumSucceeded OperationStatusEnum = "Succeeded" +) + +// PassNames enumerates the values for pass names. +type PassNames string + +const ( + // OobeSystem specifies the oobe system state for pass names. + OobeSystem PassNames = "oobeSystem" +) + +// ProtocolTypes enumerates the values for protocol types. +type ProtocolTypes string + +const ( + // HTTP specifies the http state for protocol types. + HTTP ProtocolTypes = "Http" + // HTTPS specifies the https state for protocol types. + HTTPS ProtocolTypes = "Https" +) + +// SettingNames enumerates the values for setting names. +type SettingNames string + +const ( + // AutoLogon specifies the auto logon state for setting names. + AutoLogon SettingNames = "AutoLogon" + // FirstLogonCommands specifies the first logon commands state for setting + // names. + FirstLogonCommands SettingNames = "FirstLogonCommands" +) + +// StatusLevelTypes enumerates the values for status level types. +type StatusLevelTypes string + +const ( + // Error specifies the error state for status level types. + Error StatusLevelTypes = "Error" + // Info specifies the info state for status level types. + Info StatusLevelTypes = "Info" + // Warning specifies the warning state for status level types. + Warning StatusLevelTypes = "Warning" +) + +// UpgradeMode enumerates the values for upgrade mode. +type UpgradeMode string + +const ( + // Automatic specifies the automatic state for upgrade mode. + Automatic UpgradeMode = "Automatic" + // Manual specifies the manual state for upgrade mode. + Manual UpgradeMode = "Manual" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" +) + +// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual +// machine scale set sku scale type. +type VirtualMachineScaleSetSkuScaleType string + +const ( + // VirtualMachineScaleSetSkuScaleTypeAutomatic specifies the virtual + // machine scale set sku scale type automatic state for virtual machine + // scale set sku scale type. + VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic" + // VirtualMachineScaleSetSkuScaleTypeNone specifies the virtual machine + // scale set sku scale type none state for virtual machine scale set sku + // scale type. + VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None" +) + +// VirtualMachineSizeTypes enumerates the values for virtual machine size +// types. +type VirtualMachineSizeTypes string + +const ( + // BasicA0 specifies the basic a0 state for virtual machine size types. + BasicA0 VirtualMachineSizeTypes = "Basic_A0" + // BasicA1 specifies the basic a1 state for virtual machine size types. + BasicA1 VirtualMachineSizeTypes = "Basic_A1" + // BasicA2 specifies the basic a2 state for virtual machine size types. + BasicA2 VirtualMachineSizeTypes = "Basic_A2" + // BasicA3 specifies the basic a3 state for virtual machine size types. + BasicA3 VirtualMachineSizeTypes = "Basic_A3" + // BasicA4 specifies the basic a4 state for virtual machine size types. + BasicA4 VirtualMachineSizeTypes = "Basic_A4" + // StandardA0 specifies the standard a0 state for virtual machine size + // types. + StandardA0 VirtualMachineSizeTypes = "Standard_A0" + // StandardA1 specifies the standard a1 state for virtual machine size + // types. + StandardA1 VirtualMachineSizeTypes = "Standard_A1" + // StandardA10 specifies the standard a10 state for virtual machine size + // types. + StandardA10 VirtualMachineSizeTypes = "Standard_A10" + // StandardA11 specifies the standard a11 state for virtual machine size + // types. + StandardA11 VirtualMachineSizeTypes = "Standard_A11" + // StandardA2 specifies the standard a2 state for virtual machine size + // types. + StandardA2 VirtualMachineSizeTypes = "Standard_A2" + // StandardA3 specifies the standard a3 state for virtual machine size + // types. + StandardA3 VirtualMachineSizeTypes = "Standard_A3" + // StandardA4 specifies the standard a4 state for virtual machine size + // types. + StandardA4 VirtualMachineSizeTypes = "Standard_A4" + // StandardA5 specifies the standard a5 state for virtual machine size + // types. + StandardA5 VirtualMachineSizeTypes = "Standard_A5" + // StandardA6 specifies the standard a6 state for virtual machine size + // types. + StandardA6 VirtualMachineSizeTypes = "Standard_A6" + // StandardA7 specifies the standard a7 state for virtual machine size + // types. + StandardA7 VirtualMachineSizeTypes = "Standard_A7" + // StandardA8 specifies the standard a8 state for virtual machine size + // types. + StandardA8 VirtualMachineSizeTypes = "Standard_A8" + // StandardA9 specifies the standard a9 state for virtual machine size + // types. + StandardA9 VirtualMachineSizeTypes = "Standard_A9" + // StandardD1 specifies the standard d1 state for virtual machine size + // types. + StandardD1 VirtualMachineSizeTypes = "Standard_D1" + // StandardD11 specifies the standard d11 state for virtual machine size + // types. + StandardD11 VirtualMachineSizeTypes = "Standard_D11" + // StandardD11V2 specifies the standard d11v2 state for virtual machine + // size types. + StandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" + // StandardD12 specifies the standard d12 state for virtual machine size + // types. + StandardD12 VirtualMachineSizeTypes = "Standard_D12" + // StandardD12V2 specifies the standard d12v2 state for virtual machine + // size types. + StandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" + // StandardD13 specifies the standard d13 state for virtual machine size + // types. + StandardD13 VirtualMachineSizeTypes = "Standard_D13" + // StandardD13V2 specifies the standard d13v2 state for virtual machine + // size types. + StandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" + // StandardD14 specifies the standard d14 state for virtual machine size + // types. + StandardD14 VirtualMachineSizeTypes = "Standard_D14" + // StandardD14V2 specifies the standard d14v2 state for virtual machine + // size types. + StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + // StandardD1V2 specifies the standard d1v2 state for virtual machine size + // types. + StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" + // StandardD2 specifies the standard d2 state for virtual machine size + // types. + StandardD2 VirtualMachineSizeTypes = "Standard_D2" + // StandardD2V2 specifies the standard d2v2 state for virtual machine size + // types. + StandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" + // StandardD3 specifies the standard d3 state for virtual machine size + // types. + StandardD3 VirtualMachineSizeTypes = "Standard_D3" + // StandardD3V2 specifies the standard d3v2 state for virtual machine size + // types. + StandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" + // StandardD4 specifies the standard d4 state for virtual machine size + // types. + StandardD4 VirtualMachineSizeTypes = "Standard_D4" + // StandardD4V2 specifies the standard d4v2 state for virtual machine size + // types. + StandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" + // StandardD5V2 specifies the standard d5v2 state for virtual machine size + // types. + StandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" + // StandardDS1 specifies the standard ds1 state for virtual machine size + // types. + StandardDS1 VirtualMachineSizeTypes = "Standard_DS1" + // StandardDS11 specifies the standard ds11 state for virtual machine size + // types. + StandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + // StandardDS12 specifies the standard ds12 state for virtual machine size + // types. + StandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + // StandardDS13 specifies the standard ds13 state for virtual machine size + // types. + StandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + // StandardDS14 specifies the standard ds14 state for virtual machine size + // types. + StandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + // StandardDS2 specifies the standard ds2 state for virtual machine size + // types. + StandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + // StandardDS3 specifies the standard ds3 state for virtual machine size + // types. + StandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + // StandardDS4 specifies the standard ds4 state for virtual machine size + // types. + StandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + // StandardG1 specifies the standard g1 state for virtual machine size + // types. + StandardG1 VirtualMachineSizeTypes = "Standard_G1" + // StandardG2 specifies the standard g2 state for virtual machine size + // types. + StandardG2 VirtualMachineSizeTypes = "Standard_G2" + // StandardG3 specifies the standard g3 state for virtual machine size + // types. + StandardG3 VirtualMachineSizeTypes = "Standard_G3" + // StandardG4 specifies the standard g4 state for virtual machine size + // types. + StandardG4 VirtualMachineSizeTypes = "Standard_G4" + // StandardG5 specifies the standard g5 state for virtual machine size + // types. + StandardG5 VirtualMachineSizeTypes = "Standard_G5" + // StandardGS1 specifies the standard gs1 state for virtual machine size + // types. + StandardGS1 VirtualMachineSizeTypes = "Standard_GS1" + // StandardGS2 specifies the standard gs2 state for virtual machine size + // types. + StandardGS2 VirtualMachineSizeTypes = "Standard_GS2" + // StandardGS3 specifies the standard gs3 state for virtual machine size + // types. + StandardGS3 VirtualMachineSizeTypes = "Standard_GS3" + // StandardGS4 specifies the standard gs4 state for virtual machine size + // types. + StandardGS4 VirtualMachineSizeTypes = "Standard_GS4" + // StandardGS5 specifies the standard gs5 state for virtual machine size + // types. + StandardGS5 VirtualMachineSizeTypes = "Standard_GS5" +) + +// AdditionalUnattendContent is gets or sets additional XML formatted +// information that can be included in the Unattend.xml file, which is used +// by Windows Setup. Contents are defined by setting name, component name, +// and the pass in which the content is a applied. +type AdditionalUnattendContent struct { + PassName PassNames `json:"passName,omitempty"` + ComponentName ComponentNames `json:"componentName,omitempty"` + SettingName SettingNames `json:"settingName,omitempty"` + Content *string `json:"content,omitempty"` +} + +// APIEntityReference is the API entity reference. +type APIEntityReference struct { + ID *string `json:"id,omitempty"` +} + +// APIError is api error. +type APIError struct { + Details *[]APIErrorBase `json:"details,omitempty"` + Innererror *InnerError `json:"innererror,omitempty"` + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// APIErrorBase is api error base. +type APIErrorBase struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// AvailabilitySet is create or update Availability Set parameters. +type AvailabilitySet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AvailabilitySetProperties `json:"properties,omitempty"` +} + +// AvailabilitySetListResult is the List Availability Set operation response. +type AvailabilitySetListResult struct { + autorest.Response `json:"-"` + Value *[]AvailabilitySet `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// AvailabilitySetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AvailabilitySetListResult) AvailabilitySetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// AvailabilitySetProperties is the instance view of a resource. +type AvailabilitySetProperties struct { + PlatformUpdateDomainCount *int `json:"platformUpdateDomainCount,omitempty"` + PlatformFaultDomainCount *int `json:"platformFaultDomainCount,omitempty"` + VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// BootDiagnostics is describes Boot Diagnostics. +type BootDiagnostics struct { + Enabled *bool `json:"enabled,omitempty"` + StorageURI *string `json:"storageUri,omitempty"` +} + +// BootDiagnosticsInstanceView is the instance view of a virtual machine boot +// diagnostics. +type BootDiagnosticsInstanceView struct { + ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` + SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` +} + +// DataDisk is describes a data disk. +type DataDisk struct { + Lun *int `json:"lun,omitempty"` + Name *string `json:"name,omitempty"` + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + DiskSizeGB *int `json:"diskSizeGB,omitempty"` +} + +// DataDiskImage is contains the data disk images information. +type DataDiskImage struct { + Lun *int `json:"lun,omitempty"` +} + +// DeleteOperationResult is the compute long running operation response. +type DeleteOperationResult struct { + OperationID *string `json:"operationId,omitempty"` + Status OperationStatus `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Error *APIError `json:"error,omitempty"` +} + +// DiagnosticsProfile is describes a diagnostics profile. +type DiagnosticsProfile struct { + BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +} + +// DiskEncryptionSettings is describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// DiskInstanceView is the instance view of the disk. +type DiskInstanceView struct { + Name *string `json:"name,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// HardwareProfile is describes a hardware profile. +type HardwareProfile struct { + VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` +} + +// ImageReference is the image reference. +type ImageReference struct { + Publisher *string `json:"publisher,omitempty"` + Offer *string `json:"offer,omitempty"` + Sku *string `json:"sku,omitempty"` + Version *string `json:"version,omitempty"` +} + +// InnerError is inner error details. +type InnerError struct { + Exceptiontype *string `json:"exceptiontype,omitempty"` + Errordetail *string `json:"errordetail,omitempty"` +} + +// InstanceViewStatus is instance view status. +type InstanceViewStatus struct { + Code *string `json:"code,omitempty"` + Level StatusLevelTypes `json:"level,omitempty"` + DisplayStatus *string `json:"displayStatus,omitempty"` + Message *string `json:"message,omitempty"` + Time *date.Time `json:"time,omitempty"` +} + +// KeyVaultKeyReference is describes a reference to Key Vault Key +type KeyVaultKeyReference struct { + KeyURL *string `json:"keyUrl,omitempty"` + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// KeyVaultSecretReference is describes a reference to Key Vault Secret +type KeyVaultSecretReference struct { + SecretURL *string `json:"secretUrl,omitempty"` + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// LinuxConfiguration is describes Windows Configuration of the OS Profile. +type LinuxConfiguration struct { + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` + SSH *SSHConfiguration `json:"ssh,omitempty"` +} + +// ListUsagesResult is the List Usages operation response. +type ListUsagesResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// ListUsagesResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListUsagesResult) ListUsagesResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LongRunningOperationProperties is compute-specific operation properties, +// including output +type LongRunningOperationProperties struct { + Output *map[string]interface{} `json:"output,omitempty"` +} + +// LongRunningOperationResult is the Compute service response for long-running +// operations. +type LongRunningOperationResult struct { + OperationID *string `json:"operationId,omitempty"` + Status OperationStatusEnum `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Properties *LongRunningOperationProperties `json:"properties,omitempty"` + Error *APIError `json:"error,omitempty"` +} + +// NetworkInterfaceReference is describes a network interface reference. +type NetworkInterfaceReference struct { + ID *string `json:"id,omitempty"` + Properties *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` +} + +// NetworkInterfaceReferenceProperties is describes a network interface +// reference properties. +type NetworkInterfaceReferenceProperties struct { + Primary *bool `json:"primary,omitempty"` +} + +// NetworkProfile is describes a network profile. +type NetworkProfile struct { + NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` +} + +// OSDisk is describes an Operating System disk. +type OSDisk struct { + OsType OperatingSystemTypes `json:"osType,omitempty"` + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + Name *string `json:"name,omitempty"` + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + DiskSizeGB *int `json:"diskSizeGB,omitempty"` +} + +// OSDiskImage is contains the os disk image information. +type OSDiskImage struct { + OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` +} + +// OSProfile is describes an OS profile. +type OSProfile struct { + ComputerName *string `json:"computerName,omitempty"` + AdminUsername *string `json:"adminUsername,omitempty"` + AdminPassword *string `json:"adminPassword,omitempty"` + CustomData *string `json:"customData,omitempty"` + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// Plan is plan for the resource. +type Plan struct { + Name *string `json:"name,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Product *string `json:"product,omitempty"` + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// PurchasePlan is used for establishing the purchase context of any 3rd Party +// artifact through MarketPlace. +type PurchasePlan struct { + Publisher *string `json:"publisher,omitempty"` + Name *string `json:"name,omitempty"` + Product *string `json:"product,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Sku is describes a virtual machine scale set sku. +type Sku struct { + Name *string `json:"name,omitempty"` + Tier *string `json:"tier,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` +} + +// SSHConfiguration is sSH configuration for Linux based VMs running on Azure +type SSHConfiguration struct { + PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` +} + +// SSHPublicKey is contains information about SSH certificate public key and +// the path on the Linux VM where the public key is placed. +type SSHPublicKey struct { + Path *string `json:"path,omitempty"` + KeyData *string `json:"keyData,omitempty"` +} + +// StorageProfile is describes a storage profile. +type StorageProfile struct { + ImageReference *ImageReference `json:"imageReference,omitempty"` + OsDisk *OSDisk `json:"osDisk,omitempty"` + DataDisks *[]DataDisk `json:"dataDisks,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// UpgradePolicy is describes an upgrade policy - automatic or manual. +type UpgradePolicy struct { + Mode UpgradeMode `json:"mode,omitempty"` +} + +// Usage is describes Compute Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int `json:"currentValue,omitempty"` + Limit *int32 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageName is the Usage Names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// VaultCertificate is describes a single certificate reference in a Key +// Vault, and where the certificate should reside on the VM. +type VaultCertificate struct { + CertificateURL *string `json:"certificateUrl,omitempty"` + CertificateStore *string `json:"certificateStore,omitempty"` +} + +// VaultSecretGroup is describes a set of certificates which are all in the +// same Key Vault. +type VaultSecretGroup struct { + SourceVault *SubResource `json:"sourceVault,omitempty"` + VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"` +} + +// VirtualHardDisk is describes the uri of a disk. +type VirtualHardDisk struct { + URI *string `json:"uri,omitempty"` +} + +// VirtualMachine is describes a Virtual Machine. +type VirtualMachine struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Properties *VirtualMachineProperties `json:"properties,omitempty"` + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` +} + +// VirtualMachineAgentInstanceView is the instance view of the VM Agent +// running on the virtual machine. +type VirtualMachineAgentInstanceView struct { + VMAgentVersion *string `json:"vmAgentVersion,omitempty"` + ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineCaptureParameters is capture Virtual Machine parameters. +type VirtualMachineCaptureParameters struct { + VhdPrefix *string `json:"vhdPrefix,omitempty"` + DestinationContainerName *string `json:"destinationContainerName,omitempty"` + OverwriteVhds *bool `json:"overwriteVhds,omitempty"` +} + +// VirtualMachineCaptureResult is resource Id. +type VirtualMachineCaptureResult struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *VirtualMachineCaptureResultProperties `json:"properties,omitempty"` +} + +// VirtualMachineCaptureResultProperties is compute-specific operation +// properties, including output +type VirtualMachineCaptureResultProperties struct { + Output *map[string]interface{} `json:"output,omitempty"` +} + +// VirtualMachineExtension is describes a Virtual Machine Extension. +type VirtualMachineExtension struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualMachineExtensionProperties `json:"properties,omitempty"` +} + +// VirtualMachineExtensionHandlerInstanceView is the instance view of a +// virtual machine extension handler. +type VirtualMachineExtensionHandlerInstanceView struct { + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// VirtualMachineExtensionImage is describes a Virtual Machine Extension Image. +type VirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineExtensionImageProperties is describes the properties of a +// Virtual Machine Extension Image. +type VirtualMachineExtensionImageProperties struct { + OperatingSystem *string `json:"operatingSystem,omitempty"` + ComputeRole *string `json:"computeRole,omitempty"` + HandlerSchema *string `json:"handlerSchema,omitempty"` + VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"` + SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` +} + +// VirtualMachineExtensionInstanceView is the instance view of a virtual +// machine extension. +type VirtualMachineExtensionInstanceView struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + Substatuses *[]InstanceViewStatus `json:"substatuses,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineExtensionProperties is describes the properties of a Virtual +// Machine Extension. +type VirtualMachineExtensionProperties struct { + Publisher *string `json:"publisher,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + Settings *map[string]interface{} `json:"settings,omitempty"` + ProtectedSettings *map[string]interface{} `json:"protectedSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` +} + +// VirtualMachineImage is describes a Virtual Machine Image. +type VirtualMachineImage struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *VirtualMachineImageProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineImageProperties is describes the properties of a Virtual +// Machine Image. +type VirtualMachineImageProperties struct { + Plan *PurchasePlan `json:"plan,omitempty"` + OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` + DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` +} + +// VirtualMachineImageResource is virtual machine image resource information. +type VirtualMachineImageResource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineImageResourceList is +type VirtualMachineImageResourceList struct { + autorest.Response `json:"-"` + Value *VirtualMachineImageResource `json:"value,omitempty"` +} + +// VirtualMachineInstanceView is the instance view of a virtual machine. +type VirtualMachineInstanceView struct { + PlatformUpdateDomain *int `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int `json:"platformFaultDomain,omitempty"` + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + Disks *[]DiskInstanceView `json:"disks,omitempty"` + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineListResult is the List Virtual Machine operation response. +type VirtualMachineListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachine `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineListResult) VirtualMachineListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineProperties is describes the properties of a Virtual Machine. +type VirtualMachineProperties struct { + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + OsProfile *OSProfile `json:"osProfile,omitempty"` + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` +} + +// VirtualMachineScaleSet is describes a Virtual Machine Scale Set. +type VirtualMachineScaleSet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Properties *VirtualMachineScaleSetProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetExtension is describes a Virtual Machine Scale Set +// Extension. +type VirtualMachineScaleSetExtension struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetExtensionProfile is describes a virtual machine scale +// set extension profile. +type VirtualMachineScaleSetExtensionProfile struct { + Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"` +} + +// VirtualMachineScaleSetExtensionProperties is describes the properties of a +// Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtensionProperties struct { + Publisher *string `json:"publisher,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + Settings *map[string]interface{} `json:"settings,omitempty"` + ProtectedSettings *map[string]interface{} `json:"protectedSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualMachineScaleSetInstanceView is the instance view of a virtual +// machine scale set. +type VirtualMachineScaleSetInstanceView struct { + autorest.Response `json:"-"` + VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"` + Extensions *[]VirtualMachineScaleSetVMExtensionsSummary `json:"extensions,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineScaleSetInstanceViewStatusesSummary is instance view statuses +// summary for virtual machines of a virtual machine scale set. +type VirtualMachineScaleSetInstanceViewStatusesSummary struct { + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetIPConfiguration is describes a virtual machine scale +// set network profile's IP configuration. +type VirtualMachineScaleSetIPConfiguration struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetIPConfigurationProperties is describes a virtual +// machine scale set network profile's IP configuration properties. +type VirtualMachineScaleSetIPConfigurationProperties struct { + Subnet *APIEntityReference `json:"subnet,omitempty"` + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` +} + +// VirtualMachineScaleSetListResult is the List Virtual Machine operation +// response. +type VirtualMachineScaleSetListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// VirtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListResult) VirtualMachineScaleSetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetListSkusResult is the Virtual Machine Scale Set List +// Skus operation response. +type VirtualMachineScaleSetListSkusResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// VirtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListSkusResult) VirtualMachineScaleSetListSkusResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetListWithLinkResult is the List Virtual Machine +// operation response. +type VirtualMachineScaleSetListWithLinkResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListWithLinkResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListWithLinkResult) VirtualMachineScaleSetListWithLinkResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetNetworkConfiguration is describes a virtual machine +// scale set network profile's network configurations. +type VirtualMachineScaleSetNetworkConfiguration struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationProperties is describes a virtual +// machine scale set network profile's IP configuration. +type VirtualMachineScaleSetNetworkConfigurationProperties struct { + Primary *bool `json:"primary,omitempty"` + IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` +} + +// VirtualMachineScaleSetNetworkProfile is describes a virtual machine scale +// set network profile. +type VirtualMachineScaleSetNetworkProfile struct { + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetOSDisk is describes a virtual machine scale set +// operating system disk. +type VirtualMachineScaleSetOSDisk struct { + Name *string `json:"name,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + VhdContainers *[]string `json:"vhdContainers,omitempty"` +} + +// VirtualMachineScaleSetOSProfile is describes a virtual machine scale set OS +// profile. +type VirtualMachineScaleSetOSProfile struct { + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` + AdminUsername *string `json:"adminUsername,omitempty"` + AdminPassword *string `json:"adminPassword,omitempty"` + CustomData *string `json:"customData,omitempty"` + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// VirtualMachineScaleSetProperties is describes the properties of a Virtual +// Machine Scale Set. +type VirtualMachineScaleSetProperties struct { + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualMachineScaleSetSku is describes an available virtual machine scale +// set sku. +type VirtualMachineScaleSetSku struct { + ResourceType *string `json:"resourceType,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Capacity *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"` +} + +// VirtualMachineScaleSetSkuCapacity is describes scaling information of a sku. +type VirtualMachineScaleSetSkuCapacity struct { + Minimum *int32 `json:"minimum,omitempty"` + Maximum *int32 `json:"maximum,omitempty"` + DefaultCapacity *int32 `json:"defaultCapacity,omitempty"` + ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"` +} + +// VirtualMachineScaleSetStorageProfile is describes a virtual machine scale +// set storage profile. +type VirtualMachineScaleSetStorageProfile struct { + ImageReference *ImageReference `json:"imageReference,omitempty"` + OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` +} + +// VirtualMachineScaleSetVM is describes a virtual machine scale set virtual +// machine. +type VirtualMachineScaleSetVM struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + InstanceID *string `json:"instanceId,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Properties *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` +} + +// VirtualMachineScaleSetVMExtensionsSummary is extensions summary for virtual +// machines of a virtual machine scale set. +type VirtualMachineScaleSetVMExtensionsSummary struct { + Name *string `json:"name,omitempty"` + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceIDs is specifies the list of virtual +// machine scale set instance IDs. +type VirtualMachineScaleSetVMInstanceIDs struct { + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceRequiredIDs is specifies the list of +// virtual machine scale set instance IDs. +type VirtualMachineScaleSetVMInstanceRequiredIDs struct { + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceView is the instance view of a virtual +// machine scale set VM. +type VirtualMachineScaleSetVMInstanceView struct { + autorest.Response `json:"-"` + PlatformUpdateDomain *int `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int `json:"platformFaultDomain,omitempty"` + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + Disks *[]DiskInstanceView `json:"disks,omitempty"` + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineScaleSetVMListResult is the List Virtual Machine Scale Set +// VMs operation response. +type VirtualMachineScaleSetVMListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// VirtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetVMListResult) VirtualMachineScaleSetVMListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetVMProfile is describes a virtual machine scale set +// virtual machine profile. +type VirtualMachineScaleSetVMProfile struct { + OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` + StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` + NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` +} + +// VirtualMachineScaleSetVMProperties is describes the properties of a virtual +// machine scale set virtual machine. +type VirtualMachineScaleSetVMProperties struct { + LatestModelApplied *bool `json:"latestModelApplied,omitempty"` + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + OsProfile *OSProfile `json:"osProfile,omitempty"` + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualMachineSize is describes the properties of a VM size. +type VirtualMachineSize struct { + Name *string `json:"name,omitempty"` + NumberOfCores *int `json:"numberOfCores,omitempty"` + OsDiskSizeInMB *int `json:"osDiskSizeInMB,omitempty"` + ResourceDiskSizeInMB *int `json:"resourceDiskSizeInMB,omitempty"` + MemoryInMB *int `json:"memoryInMB,omitempty"` + MaxDataDiskCount *int `json:"maxDataDiskCount,omitempty"` +} + +// VirtualMachineSizeListResult is the List Virtual Machine operation response. +type VirtualMachineSizeListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineSize `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// VirtualMachineSizeListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineSizeListResult) VirtualMachineSizeListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineStatusCodeCount is the status code and count of the virtual +// machine scale set instance view status summary. +type VirtualMachineStatusCodeCount struct { + Code *string `json:"code,omitempty"` + Count *int `json:"count,omitempty"` +} + +// WindowsConfiguration is describes Windows Configuration of the OS Profile. +type WindowsConfiguration struct { + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` + WinRM *WinRMConfiguration `json:"winRM,omitempty"` +} + +// WinRMConfiguration is describes Windows Remote Management configuration of +// the VM +type WinRMConfiguration struct { + Listeners *[]WinRMListener `json:"listeners,omitempty"` +} + +// WinRMListener is describes Protocol and thumbprint of Windows Remote +// Management listener +type WinRMListener struct { + Protocol ProtocolTypes `json:"protocol,omitempty"` + CertificateURL *string `json:"certificateUrl,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go new file mode 100644 index 000000000..93ff10c33 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go @@ -0,0 +1,128 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// UsageOperationsClient is the the Compute Management Client. +type UsageOperationsClient struct { + ManagementClient +} + +// NewUsageOperationsClient creates an instance of the UsageOperationsClient +// client. +func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists compute usages for a subscription. +// +// location is the location upon which resource usage is queried. +func (client UsageOperationsClient) List(location string) (result ListUsagesResult, ae error) { + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageOperationsClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageOperationsClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client UsageOperationsClient) ListNextResults(lastResults ListUsagesResult) (result ListUsagesResult, ae error) { + req, err := lastResults.ListUsagesResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go new file mode 100644 index 000000000..93d1fde86 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go @@ -0,0 +1,43 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "4" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "compute", "2015-06-15") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go new file mode 100644 index 000000000..97355ee7c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go @@ -0,0 +1,241 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineExtensionImagesClient is the the Compute Management Client. +type VirtualMachineExtensionImagesClient struct { + ManagementClient +} + +// NewVirtualMachineExtensionImagesClient creates an instance of the +// VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient { + return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of +// the VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { + return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine extension image. +// +func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, ae error) { + req, err := client.GetPreparer(location, publisherName, typeParameter, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, publisherName string, typeParameter string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "type": url.QueryEscape(typeParameter), + "version": url.QueryEscape(version), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Response) (result VirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListTypes gets a list of virtual machine extension image types. +// +func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListTypesPreparer(location, publisherName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListTypesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListTypesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListTypesPreparer prepares the ListTypes request. +func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListTypesSender sends the ListTypes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListTypesResponder handles the response to the ListTypes request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVersions gets a list of virtual machine extension image versions. +// +// filter is the filter to apply on the operation. +func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top *int, orderBy string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListVersionsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListVersionsPreparer prepares the ListVersions request. +func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location string, publisherName string, typeParameter string, filter string, top *int, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "type": url.QueryEscape(typeParameter), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if top != nil { + queryParameters["$top"] = top + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = orderBy + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListVersionsSender sends the ListVersions request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListVersionsResponder handles the response to the ListVersions request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go new file mode 100644 index 000000000..97255c5e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go @@ -0,0 +1,246 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineExtensionsClient is the the Compute Management Client. +type VirtualMachineExtensionsClient struct { + ManagementClient +} + +// NewVirtualMachineExtensionsClient creates an instance of the +// VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient { + return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the +// VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { + return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the extension. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine where the extension should be create or updated. +// vmExtensionName is the name of the virtual machine extension. +// extensionParameters is parameters supplied to the Create Virtual Machine +// Extension operation. +func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension) (result VirtualMachineExtension, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, vmExtensionName, extensionParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmExtensionName": url.QueryEscape(vmExtensionName), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), + autorest.WithJSON(extensionParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the extension. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine where the extension should be deleted. vmExtensionName +// is the name of the virtual machine extension. +func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, vmName string, vmExtensionName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, vmName, vmExtensionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, vmName string, vmExtensionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmExtensionName": url.QueryEscape(vmExtensionName), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusNoContent, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the extension. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine containing the extension. vmExtensionName is the name +// of the virtual machine extension. expand is the expand expression to apply +// on the operation. +func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmName string, vmExtensionName string, expand string) (result VirtualMachineExtension, ae error) { + req, err := client.GetPreparer(resourceGroupName, vmName, vmExtensionName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName string, vmName string, vmExtensionName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmExtensionName": url.QueryEscape(vmExtensionName), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go new file mode 100644 index 000000000..0b3c23ce5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go @@ -0,0 +1,367 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineImagesClient is the the Compute Management Client. +type VirtualMachineImagesClient struct { + ManagementClient +} + +// NewVirtualMachineImagesClient creates an instance of the +// VirtualMachineImagesClient client. +func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient { + return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the +// VirtualMachineImagesClient client. +func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { + return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine image. +// +func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, ae error) { + req, err := client.GetPreparer(location, publisherName, offer, skus, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineImagesClient) GetPreparer(location string, publisherName string, offer string, skus string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "offer": url.QueryEscape(offer), + "publisherName": url.QueryEscape(publisherName), + "skus": url.QueryEscape(skus), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "version": url.QueryEscape(version), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of virtual machine images. +// +// filter is the filter to apply on the operation. +func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top *int, orderby string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListPreparer(location, publisherName, offer, skus, filter, top, orderby) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineImagesClient) ListPreparer(location string, publisherName string, offer string, skus string, filter string, top *int, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "offer": url.QueryEscape(offer), + "publisherName": url.QueryEscape(publisherName), + "skus": url.QueryEscape(skus), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if top != nil { + queryParameters["$top"] = top + } + if len(orderby) > 0 { + queryParameters["$orderby"] = orderby + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListOffers gets a list of virtual machine image offers. +// +func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListOffersPreparer(location, publisherName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListOffersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListOffersResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListOffersPreparer prepares the ListOffers request. +func (client VirtualMachineImagesClient) ListOffersPreparer(location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListOffersSender sends the ListOffers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListOffersResponder handles the response to the ListOffers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListPublishers gets a list of virtual machine image publishers. +// +func (client VirtualMachineImagesClient) ListPublishers(location string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListPublishersPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListPublishersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListPublishersResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPublishersPreparer prepares the ListPublishers request. +func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListPublishersSender sends the ListPublishers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListPublishersResponder handles the response to the ListPublishers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSkus gets a list of virtual machine image skus. +// +func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListSkusPreparer(location, publisherName, offer) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publisherName string, offer string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "offer": url.QueryEscape(offer), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go new file mode 100644 index 000000000..8e60d5074 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go @@ -0,0 +1,883 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachinesClient is the the Compute Management Client. +type VirtualMachinesClient struct { + ManagementClient +} + +// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient +// client. +func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { + return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachinesClientWithBaseURI creates an instance of the +// VirtualMachinesClient client. +func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { + return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Capture captures the VM by copying VirtualHardDisks of the VM and outputs a +// template that can be used to create similar VMs. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. parameters is parameters supplied to the Capture +// Virtual Machine operation. +func (client VirtualMachinesClient) Capture(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters) (result VirtualMachineCaptureResult, ae error) { + req, err := client.CapturePreparer(resourceGroupName, vmName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CaptureSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", resp.StatusCode, "Failure sending request") + } + + result, err = client.CaptureResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CapturePreparer prepares the Capture request. +func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CaptureSender sends the Capture request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// CaptureResponder handles the response to the Capture request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result VirtualMachineCaptureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate the operation to create or update a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. parameters is parameters supplied to the Create +// Virtual Machine operation. +func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmName string, parameters VirtualMachine) (result VirtualMachine, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, parameters VirtualMachine) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Deallocate shuts down the Virtual Machine and releases the compute +// resources. You are not billed for the compute resources that this Virtual +// Machine uses. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Deallocate(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the operation to delete a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Delete(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Generalize sets the state of the VM as Generalized. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.GeneralizePreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GeneralizeSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", resp.StatusCode, "Failure sending request") + } + + result, err = client.GeneralizeResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GeneralizePreparer prepares the Generalize request. +func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GeneralizeSender sends the Generalize request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GeneralizeResponder handles the response to the Generalize request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. expand is the expand expression to apply on the +// operation. +func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, expand string) (result VirtualMachine, ae error) { + req, err := client.GetPreparer(resourceGroupName, vmName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, vmName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result VirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to list virtual machines under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, ae error) { + req, err := lastResults.VirtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll gets the list of Virtual Machines in the subscription. Use nextLink +// property in the response to get the next page of Virtual Machines. Do this +// till nextLink is not null to fetch all the Virtual Machines. +func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAllResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, ae error) { + req, err := lastResults.VirtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAvailableSizes lists virtual-machine-sizes available to be used for a +// virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, vmName string) (result VirtualMachineSizeListResult, ae error) { + req, err := client.ListAvailableSizesPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAvailableSizesNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListAvailableSizesNextResults(lastResults VirtualMachineSizeListResult) (result VirtualMachineSizeListResult, ae error) { + req, err := lastResults.VirtualMachineSizeListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// PowerOff the operation to power off (stop) a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) PowerOff(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", resp.StatusCode, "Failure sending request") + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", resp.StatusCode, "Failure responding to request") + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart the operation to restart a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Restart(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.RestartPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", resp.StatusCode, "Failure sending request") + } + + result, err = client.RestartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", resp.StatusCode, "Failure responding to request") + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start the operation to start a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Start(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.StartPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", resp.StatusCode, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", resp.StatusCode, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go new file mode 100644 index 000000000..462121ee2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go @@ -0,0 +1,975 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineScaleSetsClient is the the Compute Management Client. +type VirtualMachineScaleSetsClient struct { + ManagementClient +} + +// NewVirtualMachineScaleSetsClient creates an instance of the +// VirtualMachineScaleSetsClient client. +func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient { + return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetsClient client. +func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient { + return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update a virtual machine scale +// set. +// +// resourceGroupName is the name of the resource group. name is parameters +// supplied to the Create Virtual Machine Scale Set operation. parameters is +// parameters supplied to the Create Virtual Machine Scale Set operation. +func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet) (result VirtualMachineScaleSet, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters VirtualMachineScaleSet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": url.QueryEscape(name), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{name}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Deallocate the operation to deallocate virtual machines in a virtual +// machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. vmInstanceIDs is the list of +// virtual machine scale set instance IDs. +func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Deallocate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Deallocate", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Deallocate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{}) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the operation to delete a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. +func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, vmScaleSetName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteInstances the operation to delete virtual machines in a virtual +// machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. vmInstanceIDs is the list of +// virtual machine scale set instance IDs. +func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result autorest.Response, ae error) { + req, err := client.DeleteInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "DeleteInstances", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteInstancesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "DeleteInstances", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteInstancesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "DeleteInstances", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeleteInstancesPreparer prepares the DeleteInstances request. +func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete"), + autorest.WithJSON(vmInstanceIDs), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteInstancesSender sends the DeleteInstances request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// DeleteInstancesResponder handles the response to the DeleteInstances request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. +func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSet, ae error) { + req, err := client.GetPreparer(resourceGroupName, vmScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView the operation to get a virtual machine scale set instance +// view. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. +func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetInstanceView, ae error) { + req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "GetInstanceView", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "GetInstanceView", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "GetInstanceView", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to list virtual machine scale sets under a resource +// group. +// +// resourceGroupName is the name of the resource group. +func (client VirtualMachineScaleSetsClient) List(resourceGroupName string) (result VirtualMachineScaleSetListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListNextResults(lastResults VirtualMachineScaleSetListResult) (result VirtualMachineScaleSetListResult, ae error) { + req, err := lastResults.VirtualMachineScaleSetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll gets the list of Virtual Machine Scale Sets in the subscription. +// Use nextLink property in the response to get the next page of Virtual +// Machine Scale Sets. Do this till nextLink is not null to fetch all the +// Virtual Machine Scale Sets. +func (client VirtualMachineScaleSetsClient) ListAll() (result VirtualMachineScaleSetListWithLinkResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListAllResponder(resp *http.Response) (result VirtualMachineScaleSetListWithLinkResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListAllNextResults(lastResults VirtualMachineScaleSetListWithLinkResult) (result VirtualMachineScaleSetListWithLinkResult, ae error) { + req, err := lastResults.VirtualMachineScaleSetListWithLinkResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListSkus the operation to list available skus for a virtual machine scale +// set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. +func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetListSkusResult, ae error) { + req, err := client.ListSkusPreparer(resourceGroupName, vmScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListSkusResponder(resp *http.Response) (result VirtualMachineScaleSetListSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSkusNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListSkusNextResults(lastResults VirtualMachineScaleSetListSkusResult) (result VirtualMachineScaleSetListSkusResult, ae error) { + req, err := lastResults.VirtualMachineScaleSetListSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// PowerOff the operation to power off (stop) virtual machines in a virtual +// machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. vmInstanceIDs is the list of +// virtual machine scale set instance IDs. +func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "PowerOff", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "PowerOff", resp.StatusCode, "Failure sending request") + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "PowerOff", resp.StatusCode, "Failure responding to request") + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{}) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart the operation to restart virtual machines in a virtual machine +// scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. vmInstanceIDs is the list of +// virtual machine scale set instance IDs. +func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { + req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Restart", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Restart", resp.StatusCode, "Failure sending request") + } + + result, err = client.RestartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Restart", resp.StatusCode, "Failure responding to request") + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{}) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start the operation to start virtual machines in a virtual machine scale +// set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. vmInstanceIDs is the list of +// virtual machine scale set instance IDs. +func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { + req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Start", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Start", resp.StatusCode, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Start", resp.StatusCode, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{}) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// UpdateInstances the operation to manually upgrade virtual machines in a +// virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. vmInstanceIDs is the list of +// virtual machine scale set instance IDs. +func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result autorest.Response, ae error) { + req, err := client.UpdateInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "UpdateInstances", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.UpdateInstancesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "UpdateInstances", resp.StatusCode, "Failure sending request") + } + + result, err = client.UpdateInstancesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "UpdateInstances", resp.StatusCode, "Failure responding to request") + } + + return +} + +// UpdateInstancesPreparer prepares the UpdateInstances request. +func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade"), + autorest.WithJSON(vmInstanceIDs), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateInstancesSender sends the UpdateInstances request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// UpdateInstancesResponder handles the response to the UpdateInstances request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go new file mode 100644 index 000000000..ec432f64e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go @@ -0,0 +1,600 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineScaleSetVMsClient is the the Compute Management Client. +type VirtualMachineScaleSetVMsClient struct { + ManagementClient +} + +// NewVirtualMachineScaleSetVMsClient creates an instance of the +// VirtualMachineScaleSetVMsClient client. +func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient { + return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetVMsClient client. +func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient { + return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Deallocate the operation to deallocate a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Deallocate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Deallocate", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Deallocate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": url.QueryEscape(instanceID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the operation to delete a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": url.QueryEscape(instanceID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get a virtual machine scale set virtual machine. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, ae error) { + req, err := client.GetPreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": url.QueryEscape(instanceID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView the operation to get a virtual machine scale set virtual +// machine. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, ae error) { + req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "GetInstanceView", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "GetInstanceView", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "GetInstanceView", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": url.QueryEscape(instanceID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetVMInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to list virtual machine scale sets VMs. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the virtual machine scale set. +// filter is the filter to apply on the operation. selectParameter is the +// list parameters. expand is the expand expression to apply on the +// operation. +func (client VirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if len(selectParameter) > 0 { + queryParameters["$select"] = selectParameter + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetVMListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetVMsClient) ListNextResults(lastResults VirtualMachineScaleSetVMListResult) (result VirtualMachineScaleSetVMListResult, ae error) { + req, err := lastResults.VirtualMachineScaleSetVMListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// PowerOff the operation to power off (stop) a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "PowerOff", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "PowerOff", resp.StatusCode, "Failure sending request") + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "PowerOff", resp.StatusCode, "Failure responding to request") + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": url.QueryEscape(instanceID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart the operation to restart a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { + req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Restart", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Restart", resp.StatusCode, "Failure sending request") + } + + result, err = client.RestartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Restart", resp.StatusCode, "Failure responding to request") + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": url.QueryEscape(instanceID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start the operation to start a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { + req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Start", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Start", resp.StatusCode, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Start", resp.StatusCode, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": url.QueryEscape(instanceID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmScaleSetName": url.QueryEscape(vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go new file mode 100644 index 000000000..dca91c6fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go @@ -0,0 +1,128 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineSizesClient is the the Compute Management Client. +type VirtualMachineSizesClient struct { + ManagementClient +} + +// NewVirtualMachineSizesClient creates an instance of the +// VirtualMachineSizesClient client. +func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient { + return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the +// VirtualMachineSizesClient client. +func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { + return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists virtual-machine-sizes available in a location for a subscription. +// +// location is the location upon which virtual-machine-sizes is queried. +func (client VirtualMachineSizesClient) List(location string) (result VirtualMachineSizeListResult, ae error) { + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineSizesClient) ListNextResults(lastResults VirtualMachineSizeListResult) (result VirtualMachineSizeListResult, ae error) { + req, err := lastResults.VirtualMachineSizeListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go new file mode 100644 index 000000000..40adba140 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go @@ -0,0 +1,541 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ApplicationGatewaysClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type ApplicationGatewaysClient struct { + ManagementClient +} + +// NewApplicationGatewaysClient creates an instance of the +// ApplicationGatewaysClient client. +func NewApplicationGatewaysClient(subscriptionID string) ApplicationGatewaysClient { + return NewApplicationGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewApplicationGatewaysClientWithBaseURI creates an instance of the +// ApplicationGatewaysClient client. +func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ApplicationGatewaysClient { + return ApplicationGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put ApplicationGateway operation creates/updates a +// ApplicationGateway +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the ApplicationGateway. parameters is parameters supplied +// to the create/delete ApplicationGateway operation +func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (result ApplicationGateway, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete applicationgateway operation deletes the specified +// applicationgateway. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the applicationgateway. +func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusNoContent, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get applicationgateway operation retreives information about the +// specified applicationgateway. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the applicationgateway. +func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, ae error) { + req, err := client.GetPreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (result ApplicationGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List ApplicationGateway opertion retrieves all the +// applicationgateways in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ApplicationGatewaysClient) List(resourceGroupName string) (result ApplicationGatewayListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, ae error) { + req, err := lastResults.ApplicationGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the List applicationgateway opertion retrieves all the +// applicationgateways in a subscription. +func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListAllResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) ListAllNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, ae error) { + req, err := lastResults.ApplicationGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// Start the Start ApplicationGateway operation starts application gatewayin +// the specified resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { + req, err := client.StartPreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", resp.StatusCode, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", resp.StatusCode, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop the STOP ApplicationGateway operation stops application gatewayin the +// specified resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { + req, err := client.StopPreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", resp.StatusCode, "Failure sending request") + } + + result, err = client.StopResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", resp.StatusCode, "Failure responding to request") + } + + return +} + +// StopPreparer prepares the Stop request. +func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StopSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go new file mode 100644 index 000000000..542b00757 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go @@ -0,0 +1,126 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +const ( + // APIVersion is the version of the Network + APIVersion = "2015-06-15" + + // DefaultBaseURI is the default URI used for the service Network + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the the Microsoft Azure Network management API provides +// a RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resrources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// CheckDNSNameAvailability checks whether a domain name in the cloudapp.net +// zone is available for use. +// +// location is the location of the domain name domainNameLabel is the domain +// name to be verified. It must conform to the following regular expression: +// ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. +func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, ae error) { + req, err := client.CheckDNSNameAvailabilityPreparer(location, domainNameLabel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CheckDNSNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", resp.StatusCode, "Failure sending request") + } + + result, err = client.CheckDNSNameAvailabilityResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CheckDNSNameAvailabilityPreparer prepares the CheckDNSNameAvailability request. +func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, domainNameLabel string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(domainNameLabel) > 0 { + queryParameters["domainNameLabel"] = domainNameLabel + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always +// closes the http.Response Body. +func (client ManagementClient) CheckDNSNameAvailabilityResponder(resp *http.Response) (result DNSNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go new file mode 100644 index 000000000..a5ac72e67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go @@ -0,0 +1,337 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ExpressRouteCircuitAuthorizationsClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteCircuitAuthorizationsClient struct { + ManagementClient +} + +// NewExpressRouteCircuitAuthorizationsClient creates an instance of the +// ExpressRouteCircuitAuthorizationsClient client. +func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string) ExpressRouteCircuitAuthorizationsClient { + return NewExpressRouteCircuitAuthorizationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance +// of the ExpressRouteCircuitAuthorizationsClient client. +func NewExpressRouteCircuitAuthorizationsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitAuthorizationsClient { + return ExpressRouteCircuitAuthorizationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put Authorization operation creates/updates an +// authorization in thespecified ExpressRouteCircuits +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. authorizationName is the name of the +// authorization. authorizationParameters is parameters supplied to the +// create/update ExpressRouteCircuitAuthorization operation +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization) (result ExpressRouteCircuitAuthorization, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, authorizationName, authorizationParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": url.QueryEscape(authorizationName), + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}"), + autorest.WithJSON(authorizationParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete authorization operation deletes the specified +// authorization from the specified ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. authorizationName is the name of the +// authorization. +func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, authorizationName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": url.QueryEscape(authorizationName), + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the GET authorization operation retrieves the specified authorization +// from the specified ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. authorizationName is the name of the +// authorization. +func (client ExpressRouteCircuitAuthorizationsClient) Get(resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, ae error) { + req, err := client.GetPreparer(resourceGroupName, circuitName, authorizationName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": url.QueryEscape(authorizationName), + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List authorization operation retrieves all the authorizations in +// an ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the curcuit. +func (client ExpressRouteCircuitAuthorizationsClient) List(resourceGroupName string, circuitName string) (result AuthorizationListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) ListResponder(resp *http.Response) (result AuthorizationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitAuthorizationsClient) ListNextResults(lastResults AuthorizationListResult) (result AuthorizationListResult, ae error) { + req, err := lastResults.AuthorizationListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go new file mode 100644 index 000000000..5fcc14392 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go @@ -0,0 +1,335 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ExpressRouteCircuitPeeringsClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteCircuitPeeringsClient struct { + ManagementClient +} + +// NewExpressRouteCircuitPeeringsClient creates an instance of the +// ExpressRouteCircuitPeeringsClient client. +func NewExpressRouteCircuitPeeringsClient(subscriptionID string) ExpressRouteCircuitPeeringsClient { + return NewExpressRouteCircuitPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitPeeringsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitPeeringsClient client. +func NewExpressRouteCircuitPeeringsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitPeeringsClient { + return ExpressRouteCircuitPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put Pering operation creates/updates an peering in the +// specified ExpressRouteCircuits +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +// peeringParameters is parameters supplied to the create/update +// ExpressRouteCircuit Peering operation +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering) (result ExpressRouteCircuitPeering, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, peeringName, peeringParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "peeringName": url.QueryEscape(peeringName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}"), + autorest.WithJSON(peeringParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete peering operation deletes the specified peering from the +// ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, peeringName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "peeringName": url.QueryEscape(peeringName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the GET peering operation retrieves the specified authorization from +// the ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +func (client ExpressRouteCircuitPeeringsClient) Get(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, ae error) { + req, err := client.GetPreparer(resourceGroupName, circuitName, peeringName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "peeringName": url.QueryEscape(peeringName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List peering operation retrieves all the peerings in an +// ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the curcuit. +func (client ExpressRouteCircuitPeeringsClient) List(resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitPeeringListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitPeeringsClient) ListNextResults(lastResults ExpressRouteCircuitPeeringListResult) (result ExpressRouteCircuitPeeringListResult, ae error) { + req, err := lastResults.ExpressRouteCircuitPeeringListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go new file mode 100644 index 000000000..8b1ad1fee --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go @@ -0,0 +1,682 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ExpressRouteCircuitsClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resrources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteCircuitsClient struct { + ManagementClient +} + +// NewExpressRouteCircuitsClient creates an instance of the +// ExpressRouteCircuitsClient client. +func NewExpressRouteCircuitsClient(subscriptionID string) ExpressRouteCircuitsClient { + return NewExpressRouteCircuitsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitsClient client. +func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitsClient { + return ExpressRouteCircuitsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put ExpressRouteCircuit operation creates/updates a +// ExpressRouteCircuit +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. parameters is parameters supplied to the +// create/delete ExpressRouteCircuit operation +func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (result ExpressRouteCircuit, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuit, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete ExpressRouteCircuit operation deletes the specified +// ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route Circuit. +func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get ExpressRouteCircuit operation retreives information about the +// specified ExpressRouteCircuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. +func (client ExpressRouteCircuitsClient) Get(resourceGroupName string, circuitName string) (result ExpressRouteCircuit, ae error) { + req, err := client.GetPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuit, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List ExpressRouteCircuit opertion retrieves all the +// ExpressRouteCircuits in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ExpressRouteCircuitsClient) List(resourceGroupName string) (result ExpressRouteCircuitListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, ae error) { + req, err := lastResults.ExpressRouteCircuitListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the List ExpressRouteCircuit opertion retrieves all the +// ExpressRouteCircuits in a subscription. +func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListAllResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, ae error) { + req, err := lastResults.ExpressRouteCircuitListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListArpTable the ListArpTable from ExpressRouteCircuit opertion retrieves +// the currently advertised arp table associated with the +// ExpressRouteCircuits in a resource group. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. +func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string) (result ExpressRouteCircuitsArpTableListResult, ae error) { + req, err := client.ListArpTablePreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListArpTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListArpTableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListArpTablePreparer prepares the ListArpTable request. +func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/arpTable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListArpTableSender sends the ListArpTable request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListArpTableResponder handles the response to the ListArpTable request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result ExpressRouteCircuitsArpTableListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListArpTableNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) ListArpTableNextResults(lastResults ExpressRouteCircuitsArpTableListResult) (result ExpressRouteCircuitsArpTableListResult, ae error) { + req, err := lastResults.ExpressRouteCircuitsArpTableListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListArpTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListArpTableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListRoutesTable the ListRoutesTable from ExpressRouteCircuit opertion +// retrieves the currently advertised routes table associated with the +// ExpressRouteCircuits in a resource group. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. +func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string) (result ExpressRouteCircuitsRoutesTableListResult, ae error) { + req, err := client.ListRoutesTablePreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListRoutesTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListRoutesTableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListRoutesTablePreparer prepares the ListRoutesTable request. +func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/routesTable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListRoutesTableSender sends the ListRoutesTable request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListRoutesTableNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) ListRoutesTableNextResults(lastResults ExpressRouteCircuitsRoutesTableListResult) (result ExpressRouteCircuitsRoutesTableListResult, ae error) { + req, err := lastResults.ExpressRouteCircuitsRoutesTableListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListRoutesTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListRoutesTableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListStats the Liststats ExpressRouteCircuit opertion retrieves all the +// stats from a ExpressRouteCircuits in a resource group. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the loadBalancer. +func (client ExpressRouteCircuitsClient) ListStats(resourceGroupName string, circuitName string) (result ExpressRouteCircuitsStatsListResult, ae error) { + req, err := client.ListStatsPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListStatsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListStatsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListStatsPreparer prepares the ListStats request. +func (client ExpressRouteCircuitsClient) ListStatsPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": url.QueryEscape(circuitName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListStatsSender sends the ListStats request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListStatsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListStatsResponder handles the response to the ListStats request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListStatsResponder(resp *http.Response) (result ExpressRouteCircuitsStatsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListStatsNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) ListStatsNextResults(lastResults ExpressRouteCircuitsStatsListResult) (result ExpressRouteCircuitsStatsListResult, ae error) { + req, err := lastResults.ExpressRouteCircuitsStatsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListStatsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListStatsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go new file mode 100644 index 000000000..6250e16b4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go @@ -0,0 +1,130 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ExpressRouteServiceProvidersClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteServiceProvidersClient struct { + ManagementClient +} + +// NewExpressRouteServiceProvidersClient creates an instance of the +// ExpressRouteServiceProvidersClient client. +func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteServiceProvidersClient { + return NewExpressRouteServiceProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteServiceProvidersClientWithBaseURI creates an instance of the +// ExpressRouteServiceProvidersClient client. +func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient { + return ExpressRouteServiceProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List the List ExpressRouteServiceProvider opertion retrieves all the +// available ExpressRouteServiceProviders. +func (client ExpressRouteServiceProvidersClient) List() (result ExpressRouteServiceProviderListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteServiceProvidersClient) ListResponder(resp *http.Response) (result ExpressRouteServiceProviderListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteServiceProvidersClient) ListNextResults(lastResults ExpressRouteServiceProviderListResult) (result ExpressRouteServiceProviderListResult, ae error) { + req, err := lastResults.ExpressRouteServiceProviderListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go new file mode 100644 index 000000000..0bf08a736 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go @@ -0,0 +1,671 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// InterfacesClient is the the Microsoft Azure Network management API provides +// a RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resrources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type InterfacesClient struct { + ManagementClient +} + +// NewInterfacesClient creates an instance of the InterfacesClient client. +func NewInterfacesClient(subscriptionID string) InterfacesClient { + return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient +// client. +func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient { + return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put NetworkInterface operation creates/updates a +// networkInterface +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. parameters is parameters supplied to +// the create/update NetworkInterface operation +func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface) (result Interface, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": url.QueryEscape(networkInterfaceName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete netwokInterface operation deletes the specified +// netwokInterface. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": url.QueryEscape(networkInterfaceName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get ntework interface operation retreives information about the +// specified network interface. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. expand is expand references +// resources. +func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, ae error) { + req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": url.QueryEscape(networkInterfaceName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetVirtualMachineScaleSetNetworkInterface the Get ntework interface +// operation retreives information about the specified network interface in a +// virtual machine scale set. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the virtual machine scale set. +// virtualmachineIndex is the virtual machine index. networkInterfaceName is +// the name of the network interface. expand is expand references resources. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, ae error) { + req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": url.QueryEscape(networkInterfaceName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualmachineIndex": url.QueryEscape(virtualmachineIndex), + "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List networkInterfaces opertion retrieves all the +// networkInterfaces in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the List networkInterfaces opertion retrieves all the +// networkInterfaces in a subscription. +func (client InterfacesClient) ListAll() (result InterfaceListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListVirtualMachineScaleSetNetworkInterfaces the list network interface +// operation retrieves information about all network interfaces in a virtual +// machine scale set. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the virtual machine scale set. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, ae error) { + req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfaces the list network interface +// operation retrieves information about all network interfaces in a virtual +// machine from a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the virtual machine scale set. +// virtualmachineIndex is the virtual machine index. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, ae error) { + req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualmachineIndex": url.QueryEscape(virtualmachineIndex), + "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go new file mode 100644 index 000000000..a36a34f46 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go @@ -0,0 +1,414 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// LoadBalancersClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type LoadBalancersClient struct { + ManagementClient +} + +// NewLoadBalancersClient creates an instance of the LoadBalancersClient +// client. +func NewLoadBalancersClient(subscriptionID string) LoadBalancersClient { + return NewLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancersClientWithBaseURI creates an instance of the +// LoadBalancersClient client. +func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancersClient { + return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put LoadBalancer operation creates/updates a LoadBalancer +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the loadBalancer. parameters is parameters supplied to the +// create/delete LoadBalancer operation +func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (result LoadBalancer, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": url.QueryEscape(loadBalancerName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result LoadBalancer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete loadbalancer operation deletes the specified loadbalancer. +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the loadBalancer. +func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, loadBalancerName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": url.QueryEscape(loadBalancerName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get ntework interface operation retreives information about the +// specified network interface. +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the loadBalancer. expand is expand references resources. +func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result LoadBalancer, ae error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBalancerName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": url.QueryEscape(loadBalancerName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) GetResponder(resp *http.Response) (result LoadBalancer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List loadBalancer opertion retrieves all the loadbalancers in a +// resource group. +// +// resourceGroupName is the name of the resource group. +func (client LoadBalancersClient) List(resourceGroupName string) (result LoadBalancerListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, ae error) { + req, err := lastResults.LoadBalancerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the List loadBalancer opertion retrieves all the loadbalancers in a +// subscription. +func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListAllResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) ListAllNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, ae error) { + req, err := lastResults.LoadBalancerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go new file mode 100644 index 000000000..56116bf34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go @@ -0,0 +1,331 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// LocalNetworkGatewaysClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resrources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type LocalNetworkGatewaysClient struct { + ManagementClient +} + +// NewLocalNetworkGatewaysClient creates an instance of the +// LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClient(subscriptionID string) LocalNetworkGatewaysClient { + return NewLocalNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the +// LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) LocalNetworkGatewaysClient { + return LocalNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put LocalNetworkGateway operation creates/updates a +// local network gateway in the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +// parameters is parameters supplied to the Begin Create or update Local +// Network Gateway operation through Network resource provider. +func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (result LocalNetworkGateway, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result LocalNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete LocalNetworkGateway operation deletes the specifed local +// network Gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get LocalNetworkGateway operation retrieves information about the +// specified local network gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, ae error) { + req, err := client.GetPreparer(resourceGroupName, localNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) GetResponder(resp *http.Response) (result LocalNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List LocalNetworkGateways opertion retrieves all the local network +// gateways stored. +// +// resourceGroupName is the name of the resource group. +func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) ListResponder(resp *http.Response) (result LocalNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LocalNetworkGatewaysClient) ListNextResults(lastResults LocalNetworkGatewayListResult) (result LocalNetworkGatewayListResult, ae error) { + req, err := lastResults.LocalNetworkGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go new file mode 100644 index 000000000..5e23a26e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go @@ -0,0 +1,1853 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// ApplicationGatewayCookieBasedAffinity enumerates the values for application +// gateway cookie based affinity. +type ApplicationGatewayCookieBasedAffinity string + +const ( + // Disabled specifies the disabled state for application gateway cookie + // based affinity. + Disabled ApplicationGatewayCookieBasedAffinity = "Disabled" + // Enabled specifies the enabled state for application gateway cookie + // based affinity. + Enabled ApplicationGatewayCookieBasedAffinity = "Enabled" +) + +// ApplicationGatewayOperationalState enumerates the values for application +// gateway operational state. +type ApplicationGatewayOperationalState string + +const ( + // Running specifies the running state for application gateway operational + // state. + Running ApplicationGatewayOperationalState = "Running" + // Starting specifies the starting state for application gateway + // operational state. + Starting ApplicationGatewayOperationalState = "Starting" + // Stopped specifies the stopped state for application gateway operational + // state. + Stopped ApplicationGatewayOperationalState = "Stopped" + // Stopping specifies the stopping state for application gateway + // operational state. + Stopping ApplicationGatewayOperationalState = "Stopping" +) + +// ApplicationGatewayProtocol enumerates the values for application gateway +// protocol. +type ApplicationGatewayProtocol string + +const ( + // HTTP specifies the http state for application gateway protocol. + HTTP ApplicationGatewayProtocol = "Http" + // HTTPS specifies the https state for application gateway protocol. + HTTPS ApplicationGatewayProtocol = "Https" +) + +// ApplicationGatewayRequestRoutingRuleType enumerates the values for +// application gateway request routing rule type. +type ApplicationGatewayRequestRoutingRuleType string + +const ( + // Basic specifies the basic state for application gateway request routing + // rule type. + Basic ApplicationGatewayRequestRoutingRuleType = "Basic" + // PathBasedRouting specifies the path based routing state for application + // gateway request routing rule type. + PathBasedRouting ApplicationGatewayRequestRoutingRuleType = "PathBasedRouting" +) + +// ApplicationGatewaySkuName enumerates the values for application gateway sku +// name. +type ApplicationGatewaySkuName string + +const ( + // StandardLarge specifies the standard large state for application + // gateway sku name. + StandardLarge ApplicationGatewaySkuName = "Standard_Large" + // StandardMedium specifies the standard medium state for application + // gateway sku name. + StandardMedium ApplicationGatewaySkuName = "Standard_Medium" + // StandardSmall specifies the standard small state for application + // gateway sku name. + StandardSmall ApplicationGatewaySkuName = "Standard_Small" +) + +// ApplicationGatewayTier enumerates the values for application gateway tier. +type ApplicationGatewayTier string + +const ( + // Standard specifies the standard state for application gateway tier. + Standard ApplicationGatewayTier = "Standard" +) + +// AuthorizationUseStatus enumerates the values for authorization use status. +type AuthorizationUseStatus string + +const ( + // Available specifies the available state for authorization use status. + Available AuthorizationUseStatus = "Available" + // InUse specifies the in use state for authorization use status. + InUse AuthorizationUseStatus = "InUse" +) + +// ExpressRouteCircuitPeeringAdvertisedPublicPrefixState enumerates the values +// for express route circuit peering advertised public prefix state. +type ExpressRouteCircuitPeeringAdvertisedPublicPrefixState string + +const ( + // Configured specifies the configured state for express route circuit + // peering advertised public prefix state. + Configured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configured" + // Configuring specifies the configuring state for express route circuit + // peering advertised public prefix state. + Configuring ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configuring" + // NotConfigured specifies the not configured state for express route + // circuit peering advertised public prefix state. + NotConfigured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "NotConfigured" + // ValidationNeeded specifies the validation needed state for express + // route circuit peering advertised public prefix state. + ValidationNeeded ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "ValidationNeeded" +) + +// ExpressRouteCircuitPeeringState enumerates the values for express route +// circuit peering state. +type ExpressRouteCircuitPeeringState string + +const ( + // ExpressRouteCircuitPeeringStateDisabled specifies the express route + // circuit peering state disabled state for express route circuit peering + // state. + ExpressRouteCircuitPeeringStateDisabled ExpressRouteCircuitPeeringState = "Disabled" + // ExpressRouteCircuitPeeringStateEnabled specifies the express route + // circuit peering state enabled state for express route circuit peering + // state. + ExpressRouteCircuitPeeringStateEnabled ExpressRouteCircuitPeeringState = "Enabled" +) + +// ExpressRouteCircuitPeeringType enumerates the values for express route +// circuit peering type. +type ExpressRouteCircuitPeeringType string + +const ( + // AzurePrivatePeering specifies the azure private peering state for + // express route circuit peering type. + AzurePrivatePeering ExpressRouteCircuitPeeringType = "AzurePrivatePeering" + // AzurePublicPeering specifies the azure public peering state for express + // route circuit peering type. + AzurePublicPeering ExpressRouteCircuitPeeringType = "AzurePublicPeering" + // MicrosoftPeering specifies the microsoft peering state for express + // route circuit peering type. + MicrosoftPeering ExpressRouteCircuitPeeringType = "MicrosoftPeering" +) + +// ExpressRouteCircuitSkuFamily enumerates the values for express route +// circuit sku family. +type ExpressRouteCircuitSkuFamily string + +const ( + // MeteredData specifies the metered data state for express route circuit + // sku family. + MeteredData ExpressRouteCircuitSkuFamily = "MeteredData" + // UnlimitedData specifies the unlimited data state for express route + // circuit sku family. + UnlimitedData ExpressRouteCircuitSkuFamily = "UnlimitedData" +) + +// ExpressRouteCircuitSkuTier enumerates the values for express route circuit +// sku tier. +type ExpressRouteCircuitSkuTier string + +const ( + // ExpressRouteCircuitSkuTierPremium specifies the express route circuit + // sku tier premium state for express route circuit sku tier. + ExpressRouteCircuitSkuTierPremium ExpressRouteCircuitSkuTier = "Premium" + // ExpressRouteCircuitSkuTierStandard specifies the express route circuit + // sku tier standard state for express route circuit sku tier. + ExpressRouteCircuitSkuTierStandard ExpressRouteCircuitSkuTier = "Standard" +) + +// IPAllocationMethod enumerates the values for ip allocation method. +type IPAllocationMethod string + +const ( + // Dynamic specifies the dynamic state for ip allocation method. + Dynamic IPAllocationMethod = "Dynamic" + // Static specifies the static state for ip allocation method. + Static IPAllocationMethod = "Static" +) + +// LoadDistribution enumerates the values for load distribution. +type LoadDistribution string + +const ( + // Default specifies the default state for load distribution. + Default LoadDistribution = "Default" + // SourceIP specifies the source ip state for load distribution. + SourceIP LoadDistribution = "SourceIP" + // SourceIPProtocol specifies the source ip protocol state for load + // distribution. + SourceIPProtocol LoadDistribution = "SourceIPProtocol" +) + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // Failed specifies the failed state for operation status. + Failed OperationStatus = "Failed" + // InProgress specifies the in progress state for operation status. + InProgress OperationStatus = "InProgress" + // Succeeded specifies the succeeded state for operation status. + Succeeded OperationStatus = "Succeeded" +) + +// ProbeProtocol enumerates the values for probe protocol. +type ProbeProtocol string + +const ( + // ProbeProtocolHTTP specifies the probe protocol http state for probe + // protocol. + ProbeProtocolHTTP ProbeProtocol = "Http" + // ProbeProtocolTCP specifies the probe protocol tcp state for probe + // protocol. + ProbeProtocolTCP ProbeProtocol = "Tcp" +) + +// ProcessorArchitecture enumerates the values for processor architecture. +type ProcessorArchitecture string + +const ( + // Amd64 specifies the amd 64 state for processor architecture. + Amd64 ProcessorArchitecture = "Amd64" + // X86 specifies the x86 state for processor architecture. + X86 ProcessorArchitecture = "X86" +) + +// RouteNextHopType enumerates the values for route next hop type. +type RouteNextHopType string + +const ( + // RouteNextHopTypeInternet specifies the route next hop type internet + // state for route next hop type. + RouteNextHopTypeInternet RouteNextHopType = "Internet" + // RouteNextHopTypeNone specifies the route next hop type none state for + // route next hop type. + RouteNextHopTypeNone RouteNextHopType = "None" + // RouteNextHopTypeVirtualAppliance specifies the route next hop type + // virtual appliance state for route next hop type. + RouteNextHopTypeVirtualAppliance RouteNextHopType = "VirtualAppliance" + // RouteNextHopTypeVirtualNetworkGateway specifies the route next hop type + // virtual network gateway state for route next hop type. + RouteNextHopTypeVirtualNetworkGateway RouteNextHopType = "VirtualNetworkGateway" + // RouteNextHopTypeVnetLocal specifies the route next hop type vnet local + // state for route next hop type. + RouteNextHopTypeVnetLocal RouteNextHopType = "VnetLocal" +) + +// SecurityRuleAccess enumerates the values for security rule access. +type SecurityRuleAccess string + +const ( + // Allow specifies the allow state for security rule access. + Allow SecurityRuleAccess = "Allow" + // Deny specifies the deny state for security rule access. + Deny SecurityRuleAccess = "Deny" +) + +// SecurityRuleDirection enumerates the values for security rule direction. +type SecurityRuleDirection string + +const ( + // Inbound specifies the inbound state for security rule direction. + Inbound SecurityRuleDirection = "Inbound" + // Outbound specifies the outbound state for security rule direction. + Outbound SecurityRuleDirection = "Outbound" +) + +// SecurityRuleProtocol enumerates the values for security rule protocol. +type SecurityRuleProtocol string + +const ( + // Asterisk specifies the asterisk state for security rule protocol. + Asterisk SecurityRuleProtocol = "*" + // TCP specifies the tcp state for security rule protocol. + TCP SecurityRuleProtocol = "Tcp" + // UDP specifies the udp state for security rule protocol. + UDP SecurityRuleProtocol = "Udp" +) + +// ServiceProviderProvisioningState enumerates the values for service provider +// provisioning state. +type ServiceProviderProvisioningState string + +const ( + // Deprovisioning specifies the deprovisioning state for service provider + // provisioning state. + Deprovisioning ServiceProviderProvisioningState = "Deprovisioning" + // NotProvisioned specifies the not provisioned state for service provider + // provisioning state. + NotProvisioned ServiceProviderProvisioningState = "NotProvisioned" + // Provisioned specifies the provisioned state for service provider + // provisioning state. + Provisioned ServiceProviderProvisioningState = "Provisioned" + // Provisioning specifies the provisioning state for service provider + // provisioning state. + Provisioning ServiceProviderProvisioningState = "Provisioning" +) + +// TransportProtocol enumerates the values for transport protocol. +type TransportProtocol string + +const ( + // TransportProtocolTCP specifies the transport protocol tcp state for + // transport protocol. + TransportProtocolTCP TransportProtocol = "Tcp" + // TransportProtocolUDP specifies the transport protocol udp state for + // transport protocol. + TransportProtocolUDP TransportProtocol = "Udp" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" +) + +// VirtualNetworkGatewayConnectionStatus enumerates the values for virtual +// network gateway connection status. +type VirtualNetworkGatewayConnectionStatus string + +const ( + // Connected specifies the connected state for virtual network gateway + // connection status. + Connected VirtualNetworkGatewayConnectionStatus = "Connected" + // Connecting specifies the connecting state for virtual network gateway + // connection status. + Connecting VirtualNetworkGatewayConnectionStatus = "Connecting" + // NotConnected specifies the not connected state for virtual network + // gateway connection status. + NotConnected VirtualNetworkGatewayConnectionStatus = "NotConnected" + // Unknown specifies the unknown state for virtual network gateway + // connection status. + Unknown VirtualNetworkGatewayConnectionStatus = "Unknown" +) + +// VirtualNetworkGatewayConnectionType enumerates the values for virtual +// network gateway connection type. +type VirtualNetworkGatewayConnectionType string + +const ( + // ExpressRoute specifies the express route state for virtual network + // gateway connection type. + ExpressRoute VirtualNetworkGatewayConnectionType = "ExpressRoute" + // IPsec specifies the i psec state for virtual network gateway connection + // type. + IPsec VirtualNetworkGatewayConnectionType = "IPsec" + // Vnet2Vnet specifies the vnet 2 vnet state for virtual network gateway + // connection type. + Vnet2Vnet VirtualNetworkGatewayConnectionType = "Vnet2Vnet" + // VPNClient specifies the vpn client state for virtual network gateway + // connection type. + VPNClient VirtualNetworkGatewayConnectionType = "VPNClient" +) + +// VirtualNetworkGatewaySkuName enumerates the values for virtual network +// gateway sku name. +type VirtualNetworkGatewaySkuName string + +const ( + // VirtualNetworkGatewaySkuNameBasic specifies the virtual network gateway + // sku name basic state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameBasic VirtualNetworkGatewaySkuName = "Basic" + // VirtualNetworkGatewaySkuNameHighPerformance specifies the virtual + // network gateway sku name high performance state for virtual network + // gateway sku name. + VirtualNetworkGatewaySkuNameHighPerformance VirtualNetworkGatewaySkuName = "HighPerformance" + // VirtualNetworkGatewaySkuNameStandard specifies the virtual network + // gateway sku name standard state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameStandard VirtualNetworkGatewaySkuName = "Standard" +) + +// VirtualNetworkGatewaySkuTier enumerates the values for virtual network +// gateway sku tier. +type VirtualNetworkGatewaySkuTier string + +const ( + // VirtualNetworkGatewaySkuTierBasic specifies the virtual network gateway + // sku tier basic state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierBasic VirtualNetworkGatewaySkuTier = "Basic" + // VirtualNetworkGatewaySkuTierHighPerformance specifies the virtual + // network gateway sku tier high performance state for virtual network + // gateway sku tier. + VirtualNetworkGatewaySkuTierHighPerformance VirtualNetworkGatewaySkuTier = "HighPerformance" + // VirtualNetworkGatewaySkuTierStandard specifies the virtual network + // gateway sku tier standard state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierStandard VirtualNetworkGatewaySkuTier = "Standard" +) + +// VirtualNetworkGatewayType enumerates the values for virtual network gateway +// type. +type VirtualNetworkGatewayType string + +const ( + // VirtualNetworkGatewayTypeExpressRoute specifies the virtual network + // gateway type express route state for virtual network gateway type. + VirtualNetworkGatewayTypeExpressRoute VirtualNetworkGatewayType = "ExpressRoute" + // VirtualNetworkGatewayTypeVpn specifies the virtual network gateway type + // vpn state for virtual network gateway type. + VirtualNetworkGatewayTypeVpn VirtualNetworkGatewayType = "Vpn" +) + +// VpnType enumerates the values for vpn type. +type VpnType string + +const ( + // PolicyBased specifies the policy based state for vpn type. + PolicyBased VpnType = "PolicyBased" + // RouteBased specifies the route based state for vpn type. + RouteBased VpnType = "RouteBased" +) + +// AddressSpace is addressSpace contains an array of IP address ranges that +// can be used by subnets +type AddressSpace struct { + AddressPrefixes *[]string `json:"addressPrefixes,omitempty"` +} + +// ApplicationGateway is applicationGateways resource +type ApplicationGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ApplicationGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendAddress is backend Address of application gateway +type ApplicationGatewayBackendAddress struct { + Fqdn *string `json:"fqdn,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` +} + +// ApplicationGatewayBackendAddressPool is backend Address Pool of application +// gateway +type ApplicationGatewayBackendAddressPool struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayBackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendAddressPoolPropertiesFormat is properties of +// Backend Address Pool of application gateway +type ApplicationGatewayBackendAddressPoolPropertiesFormat struct { + BackendIPConfigurations *[]SubResource `json:"backendIPConfigurations,omitempty"` + BackendAddresses *[]ApplicationGatewayBackendAddress `json:"backendAddresses,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayBackendHTTPSettings is backend address pool settings of +// application gateway +type ApplicationGatewayBackendHTTPSettings struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayBackendHTTPSettingsPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of +// Backend address pool settings of application gateway +type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { + Port *int `json:"port,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` + RequestTimeout *int `json:"requestTimeout,omitempty"` + Probe *SubResource `json:"probe,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of +// application gateway +type ApplicationGatewayFrontendIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayFrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayFrontendIPConfigurationPropertiesFormat is properties of +// Frontend IP configuration of application gateway +type ApplicationGatewayFrontendIPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendPort is frontend Port of application gateway +type ApplicationGatewayFrontendPort struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayFrontendPortPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayFrontendPortPropertiesFormat is properties of Frontend +// Port of application gateway +type ApplicationGatewayFrontendPortPropertiesFormat struct { + Port *int `json:"port,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayHTTPListener is http listener of application gateway +type ApplicationGatewayHTTPListener struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayHTTPListenerPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayHTTPListenerPropertiesFormat is properties of Http +// listener of application gateway +type ApplicationGatewayHTTPListenerPropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + FrontendPort *SubResource `json:"frontendPort,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + HostName *string `json:"hostName,omitempty"` + SslCertificate *SubResource `json:"sslCertificate,omitempty"` + RequireServerNameIndication *string `json:"requireServerNameIndication,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayIPConfiguration is iP configuration of application gateway +type ApplicationGatewayIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayIPConfigurationPropertiesFormat is properties of IP +// configuration of application gateway +type ApplicationGatewayIPConfigurationPropertiesFormat struct { + Subnet *SubResource `json:"subnet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayListResult is response for ListLoadBalancers Api service +// call +type ApplicationGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]ApplicationGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ApplicationGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ApplicationGatewayListResult) ApplicationGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ApplicationGatewayPathRule is path rule of URL path map of application +// gateway +type ApplicationGatewayPathRule struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayPathRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayPathRulePropertiesFormat is properties of probe of +// application gateway +type ApplicationGatewayPathRulePropertiesFormat struct { + Paths *[]string `json:"paths,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayProbe is probe of application gateway +type ApplicationGatewayProbe struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayProbePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayProbePropertiesFormat is properties of probe of +// application gateway +type ApplicationGatewayProbePropertiesFormat struct { + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + Host *string `json:"host,omitempty"` + Path *string `json:"path,omitempty"` + Interval *int `json:"interval,omitempty"` + Timeout *int `json:"timeout,omitempty"` + UnhealthyThreshold *int `json:"unhealthyThreshold,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayPropertiesFormat is properties of Application Gateway +type ApplicationGatewayPropertiesFormat struct { + Sku *ApplicationGatewaySku `json:"sku,omitempty"` + OperationalState ApplicationGatewayOperationalState `json:"operationalState,omitempty"` + GatewayIPConfigurations *[]ApplicationGatewayIPConfiguration `json:"gatewayIPConfigurations,omitempty"` + SslCertificates *[]ApplicationGatewaySslCertificate `json:"sslCertificates,omitempty"` + FrontendIPConfigurations *[]ApplicationGatewayFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + FrontendPorts *[]ApplicationGatewayFrontendPort `json:"frontendPorts,omitempty"` + Probes *[]ApplicationGatewayProbe `json:"probes,omitempty"` + BackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"backendAddressPools,omitempty"` + BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` + HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` + URLPathMaps *[]ApplicationGatewayURLPathMap `json:"urlPathMaps,omitempty"` + RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayRequestRoutingRule is request routing rule of application +// gateway +type ApplicationGatewayRequestRoutingRule struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayRequestRoutingRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayRequestRoutingRulePropertiesFormat is properties of +// Request routing rule of application gateway +type ApplicationGatewayRequestRoutingRulePropertiesFormat struct { + RuleType ApplicationGatewayRequestRoutingRuleType `json:"ruleType,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + HTTPListener *SubResource `json:"httpListener,omitempty"` + URLPathMap *SubResource `json:"urlPathMap,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewaySku is sKU of application gateway +type ApplicationGatewaySku struct { + Name ApplicationGatewaySkuName `json:"name,omitempty"` + Tier ApplicationGatewayTier `json:"tier,omitempty"` + Capacity *int `json:"capacity,omitempty"` +} + +// ApplicationGatewaySslCertificate is sSL certificates of application gateway +type ApplicationGatewaySslCertificate struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewaySslCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewaySslCertificatePropertiesFormat is properties of SSL +// certificates of application gateway +type ApplicationGatewaySslCertificatePropertiesFormat struct { + Data *string `json:"data,omitempty"` + Password *string `json:"password,omitempty"` + PublicCertData *string `json:"publicCertData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayURLPathMap is urlPathMap of application gateway +type ApplicationGatewayURLPathMap struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayURLPathMapPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayURLPathMapPropertiesFormat is properties of probe of +// application gateway +type ApplicationGatewayURLPathMapPropertiesFormat struct { + DefaultBackendAddressPool *SubResource `json:"defaultBackendAddressPool,omitempty"` + DefaultBackendHTTPSettings *SubResource `json:"defaultBackendHttpSettings,omitempty"` + PathRules *[]ApplicationGatewayPathRule `json:"pathRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// AuthorizationListResult is response for ListAuthorizations Api service +// callRetrieves all authorizations that belongs to an ExpressRouteCircuit +type AuthorizationListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitAuthorization `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// AuthorizationListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AuthorizationListResult) AuthorizationListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// AuthorizationPropertiesFormat is +type AuthorizationPropertiesFormat struct { + AuthorizationKey *string `json:"authorizationKey,omitempty"` + AuthorizationUseStatus AuthorizationUseStatus `json:"authorizationUseStatus,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// AzureAsyncOperationResult is the response body contains the status of the +// specified asynchronous operation, indicating whether it has succeeded, is +// inprogress, or has failed. Note that this status is distinct from the HTTP +// status code returned for the Get Operation Status operation itself. If the +// asynchronous operation succeeded, the response body includes the HTTP +// status code for the successful request. If the asynchronous operation +// failed, the response body includes the HTTP status code for the failed +// request and error information regarding the failure. +type AzureAsyncOperationResult struct { + Status OperationStatus `json:"status,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// BackendAddressPool is pool of backend IP addresseses +type BackendAddressPool struct { + ID *string `json:"id,omitempty"` + Properties *BackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// BackendAddressPoolPropertiesFormat is properties of BackendAddressPool +type BackendAddressPoolPropertiesFormat struct { + BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + OutboundNatRule *SubResource `json:"outboundNatRule,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ConnectionResetSharedKey is +type ConnectionResetSharedKey struct { + autorest.Response `json:"-"` + KeyLength *int32 `json:"keyLength,omitempty"` +} + +// ConnectionSharedKey is response for GetConnectionSharedKey Api servive call +type ConnectionSharedKey struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + +// ConnectionSharedKeyResult is response for CheckConnectionSharedKey Api +// servive call +type ConnectionSharedKeyResult struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + +// DhcpOptions is dHCPOptions contains an array of DNS servers available to +// VMs deployed in the virtual networkStandard DHCP option for a subnet +// overrides VNET DHCP options. +type DhcpOptions struct { + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// DNSNameAvailabilityResult is response for CheckDnsNameAvailability Api +// servive call +type DNSNameAvailabilityResult struct { + autorest.Response `json:"-"` + Available *bool `json:"available,omitempty"` +} + +// Error is +type Error struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]ErrorDetails `json:"details,omitempty"` + InnerError *string `json:"innerError,omitempty"` +} + +// ErrorDetails is +type ErrorDetails struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// ExpressRouteCircuit is expressRouteCircuit resource +type ExpressRouteCircuit struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *ExpressRouteCircuitSku `json:"sku,omitempty"` + Properties *ExpressRouteCircuitPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ExpressRouteCircuitArpTable is the arp table associated with the +// ExpressRouteCircuit +type ExpressRouteCircuitArpTable struct { + IPAddress *string `json:"ipAddress,omitempty"` + MacAddress *string `json:"macAddress,omitempty"` +} + +// ExpressRouteCircuitAuthorization is authorization in a ExpressRouteCircuit +// resource +type ExpressRouteCircuitAuthorization struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *AuthorizationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ExpressRouteCircuitListResult is response for ListExpressRouteCircuit Api +// service call +type ExpressRouteCircuitListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuit `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteCircuitListResult) ExpressRouteCircuitListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteCircuitPeering is peering in a ExpressRouteCircuit resource +type ExpressRouteCircuitPeering struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *ExpressRouteCircuitPeeringPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ExpressRouteCircuitPeeringConfig is specfies the peering config +type ExpressRouteCircuitPeeringConfig struct { + AdvertisedPublicPrefixes *[]string `json:"advertisedPublicPrefixes,omitempty"` + AdvertisedPublicPrefixesState ExpressRouteCircuitPeeringAdvertisedPublicPrefixState `json:"advertisedPublicPrefixesState,omitempty"` + CustomerASN *int `json:"customerASN,omitempty"` + RoutingRegistryName *string `json:"routingRegistryName,omitempty"` +} + +// ExpressRouteCircuitPeeringListResult is response for ListPeering Api +// service callRetrieves all Peerings that belongs to an ExpressRouteCircuit +type ExpressRouteCircuitPeeringListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitPeering `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitPeeringListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteCircuitPeeringListResult) ExpressRouteCircuitPeeringListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteCircuitPeeringPropertiesFormat is +type ExpressRouteCircuitPeeringPropertiesFormat struct { + PeeringType ExpressRouteCircuitPeeringType `json:"peeringType,omitempty"` + State ExpressRouteCircuitPeeringState `json:"state,omitempty"` + AzureASN *int `json:"azureASN,omitempty"` + PeerASN *int `json:"peerASN,omitempty"` + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty"` + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty"` + PrimaryAzurePort *string `json:"primaryAzurePort,omitempty"` + SecondaryAzurePort *string `json:"secondaryAzurePort,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + VlanID *int `json:"vlanId,omitempty"` + MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` + Stats *ExpressRouteCircuitStats `json:"stats,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ExpressRouteCircuitPropertiesFormat is properties of ExpressRouteCircuit +type ExpressRouteCircuitPropertiesFormat struct { + CircuitProvisioningState *string `json:"circuitProvisioningState,omitempty"` + ServiceProviderProvisioningState ServiceProviderProvisioningState `json:"serviceProviderProvisioningState,omitempty"` + Authorizations *[]ExpressRouteCircuitAuthorization `json:"authorizations,omitempty"` + Peerings *[]ExpressRouteCircuitPeering `json:"peerings,omitempty"` + ServiceKey *string `json:"serviceKey,omitempty"` + ServiceProviderNotes *string `json:"serviceProviderNotes,omitempty"` + ServiceProviderProperties *ExpressRouteCircuitServiceProviderProperties `json:"serviceProviderProperties,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ExpressRouteCircuitRoutesTable is the routes table associated with the +// ExpressRouteCircuit +type ExpressRouteCircuitRoutesTable struct { + AddressPrefix *string `json:"addressPrefix,omitempty"` + NextHopType RouteNextHopType `json:"nextHopType,omitempty"` + NextHopIP *string `json:"nextHopIP,omitempty"` + AsPath *string `json:"asPath,omitempty"` +} + +// ExpressRouteCircuitsArpTableListResult is response for ListArpTable +// associated with the Express Route Circuits Api +type ExpressRouteCircuitsArpTableListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitArpTable `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitsArpTableListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteCircuitsArpTableListResult) ExpressRouteCircuitsArpTableListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteCircuitServiceProviderProperties is contains +// ServiceProviderProperties in an ExpressRouteCircuit +type ExpressRouteCircuitServiceProviderProperties struct { + ServiceProviderName *string `json:"serviceProviderName,omitempty"` + PeeringLocation *string `json:"peeringLocation,omitempty"` + BandwidthInMbps *int `json:"bandwidthInMbps,omitempty"` +} + +// ExpressRouteCircuitSku is contains sku in an ExpressRouteCircuit +type ExpressRouteCircuitSku struct { + Name *string `json:"name,omitempty"` + Tier ExpressRouteCircuitSkuTier `json:"tier,omitempty"` + Family ExpressRouteCircuitSkuFamily `json:"family,omitempty"` +} + +// ExpressRouteCircuitsRoutesTableListResult is response for ListRoutesTable +// associated with the Express Route Circuits Api +type ExpressRouteCircuitsRoutesTableListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitRoutesTable `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitsRoutesTableListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteCircuitsRoutesTableListResult) ExpressRouteCircuitsRoutesTableListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteCircuitsStatsListResult is response for ListStats from Express +// Route Circuits Api service call +type ExpressRouteCircuitsStatsListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitStats `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitsStatsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteCircuitsStatsListResult) ExpressRouteCircuitsStatsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteCircuitStats is contains Stats associated with the peering +type ExpressRouteCircuitStats struct { + BytesIn *int `json:"bytesIn,omitempty"` + BytesOut *int `json:"bytesOut,omitempty"` +} + +// ExpressRouteServiceProvider is expressRouteResourceProvider object +type ExpressRouteServiceProvider struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *ExpressRouteServiceProviderPropertiesFormat `json:"properties,omitempty"` +} + +// ExpressRouteServiceProviderBandwidthsOffered is contains Bandwidths offered +// in ExpressRouteServiceProviders +type ExpressRouteServiceProviderBandwidthsOffered struct { + OfferName *string `json:"offerName,omitempty"` + ValueInMbps *int `json:"valueInMbps,omitempty"` +} + +// ExpressRouteServiceProviderListResult is response for +// ListExpressRouteServiceProvider Api service call +type ExpressRouteServiceProviderListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteServiceProvider `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteServiceProviderListResult) ExpressRouteServiceProviderListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteServiceProviderPropertiesFormat is properties of +// ExpressRouteServiceProvider +type ExpressRouteServiceProviderPropertiesFormat struct { + PeeringLocations *[]string `json:"peeringLocations,omitempty"` + BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// FrontendIPConfiguration is frontend IP address of the load balancer +type FrontendIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *FrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// FrontendIPConfigurationPropertiesFormat is properties of Frontend IP +// Configuration of the load balancer +type FrontendIPConfigurationPropertiesFormat struct { + InboundNatRules *[]SubResource `json:"inboundNatRules,omitempty"` + InboundNatPools *[]SubResource `json:"inboundNatPools,omitempty"` + OutboundNatRules *[]SubResource `json:"outboundNatRules,omitempty"` + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InboundNatPool is inbound NAT pool of the loadbalancer +type InboundNatPool struct { + ID *string `json:"id,omitempty"` + Properties *InboundNatPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InboundNatPoolPropertiesFormat is properties of Inbound NAT pool +type InboundNatPoolPropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + FrontendPortRangeStart *int `json:"frontendPortRangeStart,omitempty"` + FrontendPortRangeEnd *int `json:"frontendPortRangeEnd,omitempty"` + BackendPort *int `json:"backendPort,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InboundNatRule is inbound NAT rule of the loadbalancer +type InboundNatRule struct { + ID *string `json:"id,omitempty"` + Properties *InboundNatRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InboundNatRulePropertiesFormat is properties of Inbound NAT rule +type InboundNatRulePropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + BackendIPConfiguration *InterfaceIPConfiguration `json:"backendIPConfiguration,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + FrontendPort *int `json:"frontendPort,omitempty"` + BackendPort *int `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Interface is a NetworkInterface in a resource group +type Interface struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *InterfacePropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InterfaceDNSSettings is dns Settings of a network interface +type InterfaceDNSSettings struct { + DNSServers *[]string `json:"dnsServers,omitempty"` + AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` + InternalDNSNameLabel *string `json:"internalDnsNameLabel,omitempty"` + InternalFqdn *string `json:"internalFqdn,omitempty"` +} + +// InterfaceIPConfiguration is iPConfiguration in a NetworkInterface +type InterfaceIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *InterfaceIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InterfaceIPConfigurationPropertiesFormat is properties of IPConfiguration +type InterfaceIPConfigurationPropertiesFormat struct { + LoadBalancerBackendAddressPools *[]BackendAddressPool `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatRules *[]InboundNatRule `json:"loadBalancerInboundNatRules,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod *string `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InterfaceListResult is response for ListNetworkInterface Api service call +type InterfaceListResult struct { + autorest.Response `json:"-"` + Value *[]Interface `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// InterfaceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client InterfaceListResult) InterfaceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// InterfacePropertiesFormat is networkInterface properties. +type InterfacePropertiesFormat struct { + VirtualMachine *SubResource `json:"virtualMachine,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + IPConfigurations *[]InterfaceIPConfiguration `json:"ipConfigurations,omitempty"` + DNSSettings *InterfaceDNSSettings `json:"dnsSettings,omitempty"` + MacAddress *string `json:"macAddress,omitempty"` + Primary *bool `json:"primary,omitempty"` + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// IPConfiguration is iPConfiguration +type IPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *IPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// IPConfigurationPropertiesFormat is properties of IPConfiguration +type IPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancer is loadBalancer resource +type LoadBalancer struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *LoadBalancerPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LoadBalancerListResult is response for ListLoadBalancers Api service call +type LoadBalancerListResult struct { + autorest.Response `json:"-"` + Value *[]LoadBalancer `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LoadBalancerListResult) LoadBalancerListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LoadBalancerPropertiesFormat is properties of Load Balancer +type LoadBalancerPropertiesFormat struct { + FrontendIPConfigurations *[]FrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + BackendAddressPools *[]BackendAddressPool `json:"backendAddressPools,omitempty"` + LoadBalancingRules *[]LoadBalancingRule `json:"loadBalancingRules,omitempty"` + Probes *[]Probe `json:"probes,omitempty"` + InboundNatRules *[]InboundNatRule `json:"inboundNatRules,omitempty"` + InboundNatPools *[]InboundNatPool `json:"inboundNatPools,omitempty"` + OutboundNatRules *[]OutboundNatRule `json:"outboundNatRules,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancingRule is rules of the load balancer +type LoadBalancingRule struct { + ID *string `json:"id,omitempty"` + Properties *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LoadBalancingRulePropertiesFormat is properties of the load balancer +type LoadBalancingRulePropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + Probe *SubResource `json:"probe,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + LoadDistribution LoadDistribution `json:"loadDistribution,omitempty"` + FrontendPort *int `json:"frontendPort,omitempty"` + BackendPort *int `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LocalNetworkGateway is a common class for general resource information +type LocalNetworkGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *LocalNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LocalNetworkGatewayListResult is response for ListLocalNetworkGateways Api +// service call +type LocalNetworkGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]LocalNetworkGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LocalNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LocalNetworkGatewayListResult) LocalNetworkGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LocalNetworkGatewayPropertiesFormat is localNetworkGateway properties +type LocalNetworkGatewayPropertiesFormat struct { + LocalNetworkAddressSpace *AddressSpace `json:"localNetworkAddressSpace,omitempty"` + GatewayIPAddress *string `json:"gatewayIpAddress,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// OutboundNatRule is outbound NAT pool of the loadbalancer +type OutboundNatRule struct { + ID *string `json:"id,omitempty"` + Properties *OutboundNatRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// OutboundNatRulePropertiesFormat is outbound NAT pool of the loadbalancer +type OutboundNatRulePropertiesFormat struct { + AllocatedOutboundPorts *int `json:"allocatedOutboundPorts,omitempty"` + FrontendIPConfigurations *[]SubResource `json:"frontendIPConfigurations,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Probe is load balancer Probe +type Probe struct { + ID *string `json:"id,omitempty"` + Properties *ProbePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ProbePropertiesFormat is +type ProbePropertiesFormat struct { + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + Protocol ProbeProtocol `json:"protocol,omitempty"` + Port *int `json:"port,omitempty"` + IntervalInSeconds *int `json:"intervalInSeconds,omitempty"` + NumberOfProbes *int `json:"numberOfProbes,omitempty"` + RequestPath *string `json:"requestPath,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// PublicIPAddress is publicIPAddress resource +type PublicIPAddress struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *PublicIPAddressPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// PublicIPAddressDNSSettings is contains FQDN of the DNS record associated +// with the public IP address +type PublicIPAddressDNSSettings struct { + DomainNameLabel *string `json:"domainNameLabel,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + ReverseFqdn *string `json:"reverseFqdn,omitempty"` +} + +// PublicIPAddressListResult is response for ListPublicIpAddresses Api service +// call +type PublicIPAddressListResult struct { + autorest.Response `json:"-"` + Value *[]PublicIPAddress `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// PublicIPAddressListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client PublicIPAddressListResult) PublicIPAddressListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// PublicIPAddressPropertiesFormat is publicIpAddress properties +type PublicIPAddressPropertiesFormat struct { + PublicIPAllocationMethod IPAllocationMethod `json:"publicIPAllocationMethod,omitempty"` + IPConfiguration *IPConfiguration `json:"ipConfiguration,omitempty"` + DNSSettings *PublicIPAddressDNSSettings `json:"dnsSettings,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Route is route resource +type Route struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *RoutePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// RouteListResult is response for ListRoute Api servive call +type RouteListResult struct { + autorest.Response `json:"-"` + Value *[]Route `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RouteListResult) RouteListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RoutePropertiesFormat is route resource +type RoutePropertiesFormat struct { + AddressPrefix *string `json:"addressPrefix,omitempty"` + NextHopType RouteNextHopType `json:"nextHopType,omitempty"` + NextHopIPAddress *string `json:"nextHopIpAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// RouteTable is routeTable resource +type RouteTable struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *RouteTablePropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// RouteTableListResult is response for ListRouteTable Api servive call +type RouteTableListResult struct { + autorest.Response `json:"-"` + Value *[]RouteTable `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteTableListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RouteTableListResult) RouteTableListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RouteTablePropertiesFormat is route Table resource +type RouteTablePropertiesFormat struct { + Routes *[]Route `json:"routes,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SecurityGroup is networkSecurityGroup resource +type SecurityGroup struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *SecurityGroupPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SecurityGroupListResult is response for ListNetworkSecurityGroups Api +// servive call +type SecurityGroupListResult struct { + autorest.Response `json:"-"` + Value *[]SecurityGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityGroupListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SecurityGroupListResult) SecurityGroupListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SecurityGroupPropertiesFormat is network Security Group resource +type SecurityGroupPropertiesFormat struct { + SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` + DefaultSecurityRules *[]SecurityRule `json:"defaultSecurityRules,omitempty"` + NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SecurityRule is network security rule +type SecurityRule struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *SecurityRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SecurityRuleListResult is response for ListSecurityRule Api service +// callRetrieves all security rules that belongs to a network security group +type SecurityRuleListResult struct { + autorest.Response `json:"-"` + Value *[]SecurityRule `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SecurityRuleListResult) SecurityRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SecurityRulePropertiesFormat is +type SecurityRulePropertiesFormat struct { + Description *string `json:"description,omitempty"` + Protocol SecurityRuleProtocol `json:"protocol,omitempty"` + SourcePortRange *string `json:"sourcePortRange,omitempty"` + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + Access SecurityRuleAccess `json:"access,omitempty"` + Priority *int `json:"priority,omitempty"` + Direction SecurityRuleDirection `json:"direction,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// String is +type String struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + +// Subnet is subnet in a VirtualNework resource +type Subnet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *SubnetPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SubnetListResult is response for ListSubnets Api service callRetrieves all +// subnet that belongs to a virtual network +type SubnetListResult struct { + autorest.Response `json:"-"` + Value *[]Subnet `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SubnetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SubnetListResult) SubnetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SubnetPropertiesFormat is +type SubnetPropertiesFormat struct { + AddressPrefix *string `json:"addressPrefix,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + RouteTable *RouteTable `json:"routeTable,omitempty"` + IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// Usage is describes Network Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int32 `json:"currentValue,omitempty"` + Limit *int32 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageName is the Usage Names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// UsagesListResult is the List Usages operation response. +type UsagesListResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// UsagesListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client UsagesListResult) UsagesListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetwork is virtual Network resource +type VirtualNetwork struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualNetworkPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGateway is a common class for general resource information +type VirtualNetworkGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayConnection is a common class for general resource +// information +type VirtualNetworkGatewayConnection struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualNetworkGatewayConnectionPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResult is response for +// ListVirtualNetworkGatewayConnections Api service call +type VirtualNetworkGatewayConnectionListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkGatewayConnection `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkGatewayConnectionListResult) VirtualNetworkGatewayConnectionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkGatewayConnectionPropertiesFormat is +// virtualNeworkGatewayConnection properties +type VirtualNetworkGatewayConnectionPropertiesFormat struct { + AuthorizationKey *string `json:"authorizationKey,omitempty"` + VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` + VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` + LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` + ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` + RoutingWeight *int `json:"routingWeight,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` + EgressBytesTransferred *int32 `json:"egressBytesTransferred,omitempty"` + IngressBytesTransferred *int32 `json:"ingressBytesTransferred,omitempty"` + Peer *SubResource `json:"peer,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayIPConfiguration is ipConfiguration for Virtual network +// gateway +type VirtualNetworkGatewayIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *VirtualNetworkGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayIPConfigurationPropertiesFormat is properties of +// VirtualNetworkGatewayIPConfiguration +type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayListResult is response for ListVirtualNetworkGateways +// Api service call +type VirtualNetworkGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkGatewayListResult) VirtualNetworkGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkGatewayPropertiesFormat is virtualNeworkGateay properties +type VirtualNetworkGatewayPropertiesFormat struct { + IPConfigurations *[]VirtualNetworkGatewayIPConfiguration `json:"ipConfigurations,omitempty"` + GatewayType VirtualNetworkGatewayType `json:"gatewayType,omitempty"` + VpnType VpnType `json:"vpnType,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` + GatewayDefaultSite *SubResource `json:"gatewayDefaultSite,omitempty"` + Sku *VirtualNetworkGatewaySku `json:"sku,omitempty"` + VpnClientConfiguration *VpnClientConfiguration `json:"vpnClientConfiguration,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewaySku is virtualNetworkGatewaySku details +type VirtualNetworkGatewaySku struct { + Name VirtualNetworkGatewaySkuName `json:"name,omitempty"` + Tier VirtualNetworkGatewaySkuTier `json:"tier,omitempty"` + Capacity *int `json:"capacity,omitempty"` +} + +// VirtualNetworkListResult is response for ListVirtualNetworks Api servive +// call +type VirtualNetworkListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetwork `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkListResult) VirtualNetworkListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkPropertiesFormat is +type VirtualNetworkPropertiesFormat struct { + AddressSpace *AddressSpace `json:"addressSpace,omitempty"` + DhcpOptions *DhcpOptions `json:"dhcpOptions,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VpnClientConfiguration is vpnClientConfiguration for P2S client +type VpnClientConfiguration struct { + VpnClientAddressPool *AddressSpace `json:"vpnClientAddressPool,omitempty"` + VpnClientRootCertificates *[]VpnClientRootCertificate `json:"vpnClientRootCertificates,omitempty"` + VpnClientRevokedCertificates *[]VpnClientRevokedCertificate `json:"vpnClientRevokedCertificates,omitempty"` +} + +// VpnClientParameters is vpnClientParameters +type VpnClientParameters struct { + ProcessorArchitecture ProcessorArchitecture `json:"ProcessorArchitecture,omitempty"` +} + +// VpnClientRevokedCertificate is vPN client revoked certificate of virtual +// network gateway +type VpnClientRevokedCertificate struct { + ID *string `json:"id,omitempty"` + Properties *VpnClientRevokedCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked +// VPN client certificate of virtual network gateway +type VpnClientRevokedCertificatePropertiesFormat struct { + Thumbprint *string `json:"thumbprint,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VpnClientRootCertificate is vPN client root certificate of virtual network +// gateway +type VpnClientRootCertificate struct { + ID *string `json:"id,omitempty"` + Properties *VpnClientRootCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VpnClientRootCertificatePropertiesFormat is properties of SSL certificates +// of application gateway +type VpnClientRootCertificatePropertiesFormat struct { + PublicCertData *string `json:"publicCertData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go new file mode 100644 index 000000000..70cfd4efe --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go @@ -0,0 +1,416 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// PublicIPAddressesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type PublicIPAddressesClient struct { + ManagementClient +} + +// NewPublicIPAddressesClient creates an instance of the +// PublicIPAddressesClient client. +func NewPublicIPAddressesClient(subscriptionID string) PublicIPAddressesClient { + return NewPublicIPAddressesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPublicIPAddressesClientWithBaseURI creates an instance of the +// PublicIPAddressesClient client. +func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string) PublicIPAddressesClient { + return PublicIPAddressesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put PublicIPAddress operation creates/updates a +// stable/dynamic PublicIP address +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the publicIpAddress. parameters is parameters supplied to the +// create/update PublicIPAddress operation +func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (result PublicIPAddress, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": url.QueryEscape(publicIPAddressName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete publicIpAddress operation deletes the specified +// publicIpAddress. +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the subnet. +func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, publicIPAddressName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": url.QueryEscape(publicIPAddressName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get publicIpAddress operation retreives information about the +// specified pubicIpAddress +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the subnet. expand is expand references resources. +func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result PublicIPAddress, ae error) { + req, err := client.GetPreparer(resourceGroupName, publicIPAddressName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publicIPAddressName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": url.QueryEscape(publicIPAddressName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List publicIpAddress opertion retrieves all the publicIpAddresses +// in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client PublicIPAddressesClient) List(resourceGroupName string) (result PublicIPAddressListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, ae error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the List publicIpAddress opertion retrieves all the +// publicIpAddresses in a subscription. +func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListAllResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListAllNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, ae error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go new file mode 100644 index 000000000..751b8a6b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go @@ -0,0 +1,332 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// RoutesClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resrources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type RoutesClient struct { + ManagementClient +} + +// NewRoutesClient creates an instance of the RoutesClient client. +func NewRoutesClient(subscriptionID string) RoutesClient { + return NewRoutesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRoutesClientWithBaseURI creates an instance of the RoutesClient client. +func NewRoutesClientWithBaseURI(baseURI string, subscriptionID string) RoutesClient { + return RoutesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put route operation creates/updates a route in the +// specified route table +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. routeName is the name of the route. +// routeParameters is parameters supplied to the create/update routeoperation +func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route) (result Route, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, routeName, routeParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RoutesClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, routeName string, routeParameters Route) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "routeName": url.QueryEscape(routeName), + "routeTableName": url.QueryEscape(routeTableName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}"), + autorest.WithJSON(routeParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result Route, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete route operation deletes the specified route from a route +// table. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. routeName is the name of the route. +func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, routeTableName, routeName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RoutesClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "routeName": url.QueryEscape(routeName), + "routeTableName": url.QueryEscape(routeTableName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RoutesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get route operation retreives information about the specified route +// from the route table. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. routeName is the name of the route. +func (client RoutesClient) Get(resourceGroupName string, routeTableName string, routeName string) (result Route, ae error) { + req, err := client.GetPreparer(resourceGroupName, routeTableName, routeName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RoutesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "routeName": url.QueryEscape(routeName), + "routeTableName": url.QueryEscape(routeTableName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RoutesClient) GetResponder(resp *http.Response) (result Route, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List network security rule opertion retrieves all the routes in a +// route table. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. +func (client RoutesClient) List(resourceGroupName string, routeTableName string) (result RouteListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, routeTableName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RoutesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "routeTableName": url.QueryEscape(routeTableName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RoutesClient) ListResponder(resp *http.Response) (result RouteListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RoutesClient) ListNextResults(lastResults RouteListResult) (result RouteListResult, ae error) { + req, err := lastResults.RouteListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RoutesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go new file mode 100644 index 000000000..363e6a5a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go @@ -0,0 +1,412 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// RouteTablesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type RouteTablesClient struct { + ManagementClient +} + +// NewRouteTablesClient creates an instance of the RouteTablesClient client. +func NewRouteTablesClient(subscriptionID string) RouteTablesClient { + return NewRouteTablesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRouteTablesClientWithBaseURI creates an instance of the +// RouteTablesClient client. +func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) RouteTablesClient { + return RouteTablesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put RouteTable operation creates/updates a route tablein +// the specified resource group. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. parameters is parameters supplied to the +// create/update Route Table operation +func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable) (result RouteTable, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, parameters RouteTable) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "routeTableName": url.QueryEscape(routeTableName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result RouteTable, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete RouteTable operation deletes the specifed Route Table +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. +func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, routeTableName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "routeTableName": url.QueryEscape(routeTableName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get RouteTables operation retrieves information about the specified +// route table. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. expand is expand references resources. +func (client RouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result RouteTable, ae error) { + req, err := client.GetPreparer(resourceGroupName, routeTableName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTableName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "routeTableName": url.QueryEscape(routeTableName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) GetResponder(resp *http.Response) (result RouteTable, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the list RouteTables returns all route tables in a resource group +// +// resourceGroupName is the name of the resource group. +func (client RouteTablesClient) List(resourceGroupName string) (result RouteTableListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) ListResponder(resp *http.Response) (result RouteTableListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RouteTablesClient) ListNextResults(lastResults RouteTableListResult) (result RouteTableListResult, ae error) { + req, err := lastResults.RouteTableListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the list RouteTables returns all route tables in a subscription +func (client RouteTablesClient) ListAll() (result RouteTableListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) ListAllResponder(resp *http.Response) (result RouteTableListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client RouteTablesClient) ListAllNextResults(lastResults RouteTableListResult) (result RouteTableListResult, ae error) { + req, err := lastResults.RouteTableListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go new file mode 100644 index 000000000..1d3b3f176 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go @@ -0,0 +1,418 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// SecurityGroupsClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type SecurityGroupsClient struct { + ManagementClient +} + +// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient +// client. +func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient { + return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityGroupsClientWithBaseURI creates an instance of the +// SecurityGroupsClient client. +func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient { + return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put NetworkSecurityGroup operation creates/updates a +// network security groupin the specified resource group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// parameters is parameters supplied to the create/update Network Security +// Group operation +func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (result SecurityGroup, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete NetworkSecurityGroup operation deletes the specifed +// network security group +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get NetworkSecurityGroups operation retrieves information about the +// specified network security group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. expand +// is expand references resources. +func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, ae error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) GetResponder(resp *http.Response) (result SecurityGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the list NetworkSecurityGroups returns all network security groups in +// a resource group +// +// resourceGroupName is the name of the resource group. +func (client SecurityGroupsClient) List(resourceGroupName string) (result SecurityGroupListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, ae error) { + req, err := lastResults.SecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the list NetworkSecurityGroups returns all network security groups +// in a subscription +func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListAllResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) ListAllNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, ae error) { + req, err := lastResults.SecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go new file mode 100644 index 000000000..b009f0439 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go @@ -0,0 +1,338 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// SecurityRulesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type SecurityRulesClient struct { + ManagementClient +} + +// NewSecurityRulesClient creates an instance of the SecurityRulesClient +// client. +func NewSecurityRulesClient(subscriptionID string) SecurityRulesClient { + return NewSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityRulesClientWithBaseURI creates an instance of the +// SecurityRulesClient client. +func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) SecurityRulesClient { + return SecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put network security rule operation creates/updates a +// security rule in the specified network security group +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. securityRuleParameters +// is parameters supplied to the create/update network security rule +// operation +func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (result SecurityRule, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "securityRuleName": url.QueryEscape(securityRuleName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), + autorest.WithJSON(securityRuleParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete network security rule operation deletes the specified +// network security rule. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. +func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "securityRuleName": url.QueryEscape(securityRuleName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get NetworkSecurityRule operation retreives information about the +// specified network security rule. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. +func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, ae error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "securityRuleName": url.QueryEscape(securityRuleName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) GetResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List network security rule opertion retrieves all the security +// rules in a network security group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityRulesClient) ListPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) ListResponder(resp *http.Response) (result SecurityRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SecurityRulesClient) ListNextResults(lastResults SecurityRuleListResult) (result SecurityRuleListResult, ae error) { + req, err := lastResults.SecurityRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go new file mode 100644 index 000000000..94ecf759d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go @@ -0,0 +1,336 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// SubnetsClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resrources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type SubnetsClient struct { + ManagementClient +} + +// NewSubnetsClient creates an instance of the SubnetsClient client. +func NewSubnetsClient(subscriptionID string) SubnetsClient { + return NewSubnetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSubnetsClientWithBaseURI creates an instance of the SubnetsClient client. +func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsClient { + return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put Subnet operation creates/updates a subnet in +// thespecified virtual network +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +// subnetParameters is parameters supplied to the create/update Subnet +// operation +func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (result Subnet, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subnetName": url.QueryEscape(subnetName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), + autorest.WithJSON(subnetParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result Subnet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete subnet operation deletes the specified subnet. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, subnetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subnetName": url.QueryEscape(subnetName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SubnetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get subnet operation retreives information about the specified +// subnet. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +// expand is expand references resources. +func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result Subnet, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subnetName": url.QueryEscape(subnetName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List subnets opertion retrieves all the subnets in a virtual +// network. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SubnetsClient) ListResponder(resp *http.Response) (result SubnetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SubnetsClient) ListNextResults(lastResults SubnetListResult) (result SubnetListResult, ae error) { + req, err := lastResults.SubnetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go new file mode 100644 index 000000000..f3a536533 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go @@ -0,0 +1,130 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// UsagesClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resrources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type UsagesClient struct { + ManagementClient +} + +// NewUsagesClient creates an instance of the UsagesClient client. +func NewUsagesClient(subscriptionID string) UsagesClient { + return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client. +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { + return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists compute usages for a subscription. +// +// location is the location upon which resource usage is queried. +func (client UsagesClient) List(location string) (result UsagesListResult, ae error) { + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/UsagesClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsagesClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsagesClient) ListResponder(resp *http.Response) (result UsagesListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client UsagesClient) ListNextResults(lastResults UsagesListResult) (result UsagesListResult, ae error) { + req, err := lastResults.UsagesListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/UsagesClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go new file mode 100644 index 000000000..2ebbc3b37 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go @@ -0,0 +1,43 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "4" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "network", "2015-06-15") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go new file mode 100644 index 000000000..50fed8751 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go @@ -0,0 +1,545 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualNetworkGatewayConnectionsClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type VirtualNetworkGatewayConnectionsClient struct { + ManagementClient +} + +// NewVirtualNetworkGatewayConnectionsClient creates an instance of the +// VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return NewVirtualNetworkGatewayConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of +// the VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return VirtualNetworkGatewayConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put VirtualNetworkGatewayConnection operation +// creates/updates a virtual network gateway connection in the specified +// resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway conenction. parameters is parameters supplied to the Begin Create +// or update Virtual Network Gateway connection operation through Network +// resource provider. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (result VirtualNetworkGatewayConnection, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete VirtualNetworkGatewayConnection operation deletes the +// specifed virtual network Gateway connection through Network resource +// provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get VirtualNetworkGatewayConnection operation retrieves information +// about the specified virtual network gateway connection through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation +// retrieves information about the specified virtual network gateway +// connection shared key through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// connectionSharedKeyName is the virtual network gateway connection shared +// key name. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, connectionSharedKeyName string) (result ConnectionSharedKeyResult, ae error) { + req, err := client.GetSharedKeyPreparer(resourceGroupName, connectionSharedKeyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetSharedKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetSharedKeyPreparer prepares the GetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resourceGroupName string, connectionSharedKeyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "connectionSharedKeyName": url.QueryEscape(connectionSharedKeyName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{connectionSharedKeyName}/sharedkey"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSharedKeySender sends the GetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetSharedKeyResponder handles the response to the GetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKeyResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List VirtualNetworkGatewayConnections operation retrieves all the +// virtual network gateways connections created. +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayConnectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults VirtualNetworkGatewayConnectionListResult) (result VirtualNetworkGatewayConnectionListResult, ae error) { + req, err := lastResults.VirtualNetworkGatewayConnectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation +// resets the virtual network gateway connection shared key for passed +// virtual network gateway connection in the specified resource group through +// Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection reset shared key Name. parameters is parameters supplied to the +// Begin Reset Virtual Network Gateway connection shared key operation +// through Network resource provider. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (result ConnectionResetSharedKey, ae error) { + req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ResetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp.StatusCode, "Failure sending request") + } + + result, err = client.ResetSharedKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ResetSharedKeyPreparer prepares the ResetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ResetSharedKeySender sends the ResetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// ResetSharedKeyResponder handles the response to the ResetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result ConnectionResetSharedKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation +// sets the virtual network gateway connection shared key for passed virtual +// network gateway connection in the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection name. parameters is parameters supplied to the Begin Set +// Virtual Network Gateway conection Shared key operation throughNetwork +// resource provider. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (result ConnectionSharedKey, ae error) { + req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.SetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp.StatusCode, "Failure sending request") + } + + result, err = client.SetSharedKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp.StatusCode, "Failure responding to request") + } + + return +} + +// SetSharedKeyPreparer prepares the SetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// SetSharedKeySender sends the SetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// SetSharedKeyResponder handles the response to the SetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go new file mode 100644 index 000000000..d5e69686a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go @@ -0,0 +1,469 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualNetworkGatewaysClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resrources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type VirtualNetworkGatewaysClient struct { + ManagementClient +} + +// NewVirtualNetworkGatewaysClient creates an instance of the +// VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClient(subscriptionID string) VirtualNetworkGatewaysClient { + return NewVirtualNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the +// VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewaysClient { + return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put VirtualNetworkGateway operation creates/updates a +// virtual network gateway in the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to the Begin Create or update Virtual +// Network Gateway operation through Network resource provider. +func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGateway, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete VirtualNetworkGateway operation deletes the specifed +// virtual network Gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Generatevpnclientpackage the Generatevpnclientpackage operation generates +// Vpn client package for P2S client of the virtual network gateway in the +// specified resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to the Begin Generating Virtual Network +// Gateway Vpn client package operation through Network resource provider. +func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (result String, ae error) { + req, err := client.GeneratevpnclientpackagePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Generatevpnclientpackage", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GeneratevpnclientpackageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp.StatusCode, "Failure sending request") + } + + result, err = client.GeneratevpnclientpackageResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GeneratevpnclientpackagePreparer prepares the Generatevpnclientpackage request. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}/generatevpnclientpackage"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GeneratevpnclientpackageSender sends the Generatevpnclientpackage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// GeneratevpnclientpackageResponder handles the response to the Generatevpnclientpackage request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get the Get VirtualNetworkGateway operation retrieves information about the +// specified virtual network gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List VirtualNetworkGateways opertion retrieves all the virtual +// network gateways stored. +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworkGatewaysClient) List(resourceGroupName string) (result VirtualNetworkGatewayListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNetworkGatewayListResult) (result VirtualNetworkGatewayListResult, ae error) { + req, err := lastResults.VirtualNetworkGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// Reset the Reset VirtualNetworkGateway operation resets the primary of the +// virtual network gateway in the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to the Begin Reset Virtual Network +// Gateway operation through Network resource provider. +func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGateway, ae error) { + req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ResetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", resp.StatusCode, "Failure sending request") + } + + result, err = client.ResetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ResetPreparer prepares the Reset request. +func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}/reset"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ResetSender sends the Reset request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK) +} + +// ResetResponder handles the response to the Reset request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go new file mode 100644 index 000000000..148681a5c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go @@ -0,0 +1,416 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualNetworksClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type VirtualNetworksClient struct { + ManagementClient +} + +// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient +// client. +func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient { + return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworksClientWithBaseURI creates an instance of the +// VirtualNetworksClient client. +func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient { + return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put VirtualNetwork operation creates/updates a virtual +// network in the specified resource group. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. parameters is parameters supplied to the +// create/update Virtual Network operation +func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (result VirtualNetwork, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete VirtualNetwork operation deletes the specifed virtual +// network +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusNoContent, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get VirtualNetwork operation retrieves information about the +// specified virtual network. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. expand is expand references resources. +func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtualNetworkName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = expand + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) GetResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the list VirtualNetwork returns all Virtual Networks in a resource +// group +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, ae error) { + req, err := lastResults.VirtualNetworkListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListAll the list VirtualNetwork returns all Virtual Networks in a +// subscription +func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualnetworks"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListAllResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, ae error) { + req, err := lastResults.VirtualNetworkListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go new file mode 100644 index 000000000..81bd73568 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go @@ -0,0 +1,486 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +const ( + // APIVersion is the version of the Resources + APIVersion = "2014-04-01-preview" + + // DefaultBaseURI is the default URI used for the service Resources + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Resources. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// CheckExistence checks whether resource exists. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) CheckExistence(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { + req, err := client.CheckExistencePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", resp.StatusCode, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client ManagementClient) CheckExistencePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client ManagementClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. parameters is create or +// update resource parameters. +func (client ManagementClient) CreateOrUpdate(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (result GenericResource, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ManagementClient) CreateOrUpdatePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ManagementClient) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource and all of its resources. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) Delete(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManagementClient) DeletePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManagementClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a resource belonging to a resource group. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) Get(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result GenericResource, ae error) { + req, err := client.GetPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all of the resources under a subscription. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client ManagementClient) List(filter string, top *int) (result ResourceListResult, ae error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer(filter string, top *int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if top != nil { + queryParameters["$top"] = top + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resources"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result ResourceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults ResourceListResult) (result ResourceListResult, ae error) { + req, err := lastResults.ResourceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// MoveResources begin moving resources.To determine whether the operation has +// finished processing the request, call GetLongRunningOperationStatus. +// +// sourceResourceGroupName is source resource group name. parameters is move +// resources' parameters. +func (client ManagementClient) MoveResources(sourceResourceGroupName string, parameters MoveInfo) (result autorest.Response, ae error) { + req, err := client.MoveResourcesPreparer(sourceResourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.MoveResourcesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", resp.StatusCode, "Failure sending request") + } + + result, err = client.MoveResourcesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", resp.StatusCode, "Failure responding to request") + } + + return +} + +// MoveResourcesPreparer prepares the MoveResources request. +func (client ManagementClient) MoveResourcesPreparer(sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": url.QueryEscape(sourceResourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// MoveResourcesSender sends the MoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) MoveResourcesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// MoveResourcesResponder handles the response to the MoveResources request. The method always +// closes the http.Response Body. +func (client ManagementClient) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go new file mode 100644 index 000000000..3c042f089 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go @@ -0,0 +1,201 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// DeploymentOperationsClient is the client for the DeploymentOperations +// methods of the Resources service. +type DeploymentOperationsClient struct { + ManagementClient +} + +// NewDeploymentOperationsClient creates an instance of the +// DeploymentOperationsClient client. +func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { + return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentOperationsClientWithBaseURI creates an instance of the +// DeploymentOperationsClient client. +func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { + return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a list of deployments operations. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. operationID is +// operation Id. +func (client DeploymentOperationsClient) Get(resourceGroupName string, deploymentName string, operationID string) (result DeploymentOperation, ae error) { + req, err := client.GetPreparer(resourceGroupName, deploymentName, operationID) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentOperationsClient) GetPreparer(resourceGroupName string, deploymentName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "operationId": url.QueryEscape(operationID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) GetResponder(resp *http.Response) (result DeploymentOperation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of deployments operations. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. top is query +// parameters. +func (client DeploymentOperationsClient) List(resourceGroupName string, deploymentName string, top *int) (result DeploymentOperationsListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, deploymentName, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentOperationsClient) ListPreparer(resourceGroupName string, deploymentName string, top *int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = top + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) ListResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DeploymentOperationsClient) ListNextResults(lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, ae error) { + req, err := lastResults.DeploymentOperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go new file mode 100644 index 000000000..b7f940726 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go @@ -0,0 +1,522 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// DeploymentsClient is the client for the Deployments methods of the +// Resources service. +type DeploymentsClient struct { + ManagementClient +} + +// NewDeploymentsClient creates an instance of the DeploymentsClient client. +func NewDeploymentsClient(subscriptionID string) DeploymentsClient { + return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentsClientWithBaseURI creates an instance of the +// DeploymentsClient client. +func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { + return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancel a currently running template deployment. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) Cancel(resourceGroupName string, deploymentName string) (result autorest.Response, ae error) { + req, err := client.CancelPreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CancelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", resp.StatusCode, "Failure sending request") + } + + result, err = client.CancelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client DeploymentsClient) CancelPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CancelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CheckExistence checks whether deployment exists. +// +// resourceGroupName is the name of the resource group to check. The name is +// case insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) CheckExistence(resourceGroupName string, deploymentName string) (result autorest.Response, ae error) { + req, err := client.CheckExistencePreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CheckExistence", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CheckExistence", resp.StatusCode, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CheckExistence", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client DeploymentsClient) CheckExistencePreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a named template deployment using a template. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. parameters is +// additional parameters supplied to the operation. +func (client DeploymentsClient) CreateOrUpdate(resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentExtended, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, deploymentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DeploymentsClient) CreateOrUpdatePreparer(resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CreateOrUpdateResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete begin deleting deployment.To determine whether the operation has +// finished processing the request, call GetLongRunningOperationStatus. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment to be deleted. +func (client DeploymentsClient) Delete(resourceGroupName string, deploymentName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DeploymentsClient) DeletePreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get a deployment. +// +// resourceGroupName is the name of the resource group to get. The name is +// case insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) Get(resourceGroupName string, deploymentName string) (result DeploymentExtended, ae error) { + req, err := client.GetPreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentsClient) GetPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) GetResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get a list of deployments. +// +// resourceGroupName is the name of the resource group to filter by. The name +// is case insensitive. filter is the filter to apply on the operation. top +// is query parameters. If null is passed returns all deployments. +func (client DeploymentsClient) List(resourceGroupName string, filter string, top *int) (result DeploymentListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentsClient) ListPreparer(resourceGroupName string, filter string, top *int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if top != nil { + queryParameters["$top"] = top + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ListResponder(resp *http.Response) (result DeploymentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DeploymentsClient) ListNextResults(lastResults DeploymentListResult) (result DeploymentListResult, ae error) { + req, err := lastResults.DeploymentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// Validate validate a deployment template. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. parameters is +// deployment to validate. +func (client DeploymentsClient) Validate(resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentValidateResult, ae error) { + req, err := client.ValidatePreparer(resourceGroupName, deploymentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ValidateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", resp.StatusCode, "Failure sending request") + } + + result, err = client.ValidateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ValidatePreparer prepares the Validate request. +func (client DeploymentsClient) ValidatePreparer(resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ValidateSender sends the Validate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ValidateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusBadRequest) +} + +// ValidateResponder handles the response to the Validate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ValidateResponder(resp *http.Response) (result DeploymentValidateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go new file mode 100644 index 000000000..785cf0dff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go @@ -0,0 +1,547 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// GroupsClient is the client for the Groups methods of the Resources service. +type GroupsClient struct { + ManagementClient +} + +// NewGroupsClient creates an instance of the GroupsClient client. +func NewGroupsClient(subscriptionID string) GroupsClient { + return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. +func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { + return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether resource group exists. +// +// resourceGroupName is the name of the resource group to check. The name is +// case insensitive. +func (client GroupsClient) CheckExistence(resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.CheckExistencePreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", resp.StatusCode, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client GroupsClient) CheckExistencePreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client GroupsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource group. +// +// resourceGroupName is the name of the resource group to be created or +// updated. parameters is parameters supplied to the create or update +// resource group service operation. +func (client GroupsClient) CreateOrUpdate(resourceGroupName string, parameters ResourceGroup) (result ResourceGroup, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GroupsClient) CreateOrUpdatePreparer(resourceGroupName string, parameters ResourceGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ResourceGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete begin deleting resource group.To determine whether the operation has +// finished processing the request, call GetLongRunningOperationStatus. +// +// resourceGroupName is the name of the resource group to be deleted. The name +// is case insensitive. +func (client GroupsClient) Delete(resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GroupsClient) DeletePreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get a resource group. +// +// resourceGroupName is the name of the resource group to get. The name is +// case insensitive. +func (client GroupsClient) Get(resourceGroupName string) (result ResourceGroup, ae error) { + req, err := client.GetPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GroupsClient) GetPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GroupsClient) GetResponder(resp *http.Response) (result ResourceGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a collection of resource groups. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client GroupsClient) List(filter string, top *int) (result ResourceGroupListResult, ae error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GroupsClient) ListPreparer(filter string, top *int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if top != nil { + queryParameters["$top"] = top + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResponder(resp *http.Response) (result ResourceGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client GroupsClient) ListNextResults(lastResults ResourceGroupListResult) (result ResourceGroupListResult, ae error) { + req, err := lastResults.ResourceGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListResources get all of the resources under a subscription. +// +// resourceGroupName is query parameters. If null is passed returns all +// resource groups. filter is the filter to apply on the operation. top is +// query parameters. If null is passed returns all resource groups. +func (client GroupsClient) ListResources(resourceGroupName string, filter string, top *int) (result ResourceListResult, ae error) { + req, err := client.ListResourcesPreparer(resourceGroupName, filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResourcesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListResourcesPreparer prepares the ListResources request. +func (client GroupsClient) ListResourcesPreparer(resourceGroupName string, filter string, top *int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if top != nil { + queryParameters["$top"] = top + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListResourcesSender sends the ListResources request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListResourcesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResourcesResponder handles the response to the ListResources request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResourcesResponder(resp *http.Response) (result ResourceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListResourcesNextResults retrieves the next set of results, if any. +func (client GroupsClient) ListResourcesNextResults(lastResults ResourceListResult) (result ResourceListResult, ae error) { + req, err := lastResults.ResourceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResourcesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// Patch resource groups can be updated through a simple PATCH operation to a +// group address. The format of the request is the same as that for creating +// a resource groups, though if a field is unspecified current value will be +// carried over. +// +// resourceGroupName is the name of the resource group to be created or +// updated. The name is case insensitive. parameters is parameters supplied +// to the update state resource group service operation. +func (client GroupsClient) Patch(resourceGroupName string, parameters ResourceGroup) (result ResourceGroup, ae error) { + req, err := client.PatchPreparer(resourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", resp.StatusCode, "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", resp.StatusCode, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client GroupsClient) PatchPreparer(resourceGroupName string, parameters ResourceGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) PatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client GroupsClient) PatchResponder(resp *http.Response) (result ResourceGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go new file mode 100644 index 000000000..754edbb5e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go @@ -0,0 +1,460 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DeploymentMode enumerates the values for deployment mode. +type DeploymentMode string + +const ( + // Complete specifies the complete state for deployment mode. + Complete DeploymentMode = "Complete" + // Incremental specifies the incremental state for deployment mode. + Incremental DeploymentMode = "Incremental" +) + +// BasicDependency is deployment dependency information. +type BasicDependency struct { + ID *string `json:"id,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` +} + +// Dependency is deployment dependency information. +type Dependency struct { + DependsOn *[]BasicDependency `json:"dependsOn,omitempty"` + ID *string `json:"id,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` +} + +// Deployment is deployment operation parameters. +type Deployment struct { + Properties *DeploymentProperties `json:"properties,omitempty"` +} + +// DeploymentExtended is deployment information. +type DeploymentExtended struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DeploymentListResult is list of deployments. +type DeploymentListResult struct { + autorest.Response `json:"-"` + Value *[]DeploymentExtended `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DeploymentListResult) DeploymentListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DeploymentOperation is deployment operation information. +type DeploymentOperation struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + OperationID *string `json:"operationId,omitempty"` + Properties *DeploymentOperationProperties `json:"properties,omitempty"` +} + +// DeploymentOperationProperties is deployment operation properties. +type DeploymentOperationProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` + StatusCode *string `json:"statusCode,omitempty"` + StatusMessage *map[string]interface{} `json:"statusMessage,omitempty"` + TargetResource *TargetResource `json:"targetResource,omitempty"` +} + +// DeploymentOperationsListResult is list of deployment operations. +type DeploymentOperationsListResult struct { + autorest.Response `json:"-"` + Value *[]DeploymentOperation `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentOperationsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DeploymentOperationsListResult) DeploymentOperationsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DeploymentProperties is deployment properties. +type DeploymentProperties struct { + Template *map[string]interface{} `json:"template,omitempty"` + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + Parameters *map[string]interface{} `json:"parameters,omitempty"` + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + Mode DeploymentMode `json:"mode,omitempty"` +} + +// DeploymentPropertiesExtended is deployment properties with additional +// details. +type DeploymentPropertiesExtended struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + CorrelationID *string `json:"correlationId,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` + Outputs *map[string]interface{} `json:"outputs,omitempty"` + Providers *[]Provider `json:"providers,omitempty"` + Dependencies *[]Dependency `json:"dependencies,omitempty"` + Template *map[string]interface{} `json:"template,omitempty"` + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + Parameters *map[string]interface{} `json:"parameters,omitempty"` + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + Mode DeploymentMode `json:"mode,omitempty"` +} + +// DeploymentValidateResult is information from validate template deployment +// response. +type DeploymentValidateResult struct { + autorest.Response `json:"-"` + Error *ResourceManagementErrorWithDetails `json:"error,omitempty"` + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// GenericResource is resource information. +type GenericResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Properties *map[string]interface{} `json:"properties,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// MoveInfo is parameters of move resources. +type MoveInfo struct { + Resources *[]string `json:"resources,omitempty"` + TargetResourceGroup *string `json:"targetResourceGroup,omitempty"` +} + +// ParametersLink is entity representing the reference to the deployment +// paramaters. +type ParametersLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` +} + +// Plan is plan for the resource. +type Plan struct { + Name *string `json:"name,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Product *string `json:"product,omitempty"` + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// PolicyAssignment is policy assignment. +type PolicyAssignment struct { + autorest.Response `json:"-"` + Properties *PolicyAssignmentProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` +} + +// PolicyAssignmentListResult is policy assignment list operation result. +type PolicyAssignmentListResult struct { + autorest.Response `json:"-"` + Value *[]PolicyAssignment `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// PolicyAssignmentListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client PolicyAssignmentListResult) PolicyAssignmentListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// PolicyAssignmentProperties is policy Assignment properties. +type PolicyAssignmentProperties struct { + Scope *string `json:"scope,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty"` +} + +// PolicyDefinition is policy definition. +type PolicyDefinition struct { + autorest.Response `json:"-"` + Properties *PolicyDefinitionProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` +} + +// PolicyDefinitionProperties is policy definition properties. +type PolicyDefinitionProperties struct { + Description *string `json:"description,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + PolicyRule *map[string]interface{} `json:"policyRule,omitempty"` +} + +// Provider is resource provider information. +type Provider struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Namespace *string `json:"namespace,omitempty"` + RegistrationState *string `json:"registrationState,omitempty"` + ResourceTypes *[]ProviderResourceType `json:"resourceTypes,omitempty"` +} + +// ProviderListResult is list of resource providers. +type ProviderListResult struct { + autorest.Response `json:"-"` + Value *[]Provider `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ProviderListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ProviderListResult) ProviderListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ProviderResourceType is resource type managed by the resource provider. +type ProviderResourceType struct { + ResourceType *string `json:"resourceType,omitempty"` + Locations *[]string `json:"locations,omitempty"` + APIVersions *[]string `json:"apiVersions,omitempty"` + Properties *map[string]*string `json:"properties,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroup is resource group information. +type ResourceGroup struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ResourceGroupProperties `json:"properties,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroupFilter is resource group filter. +type ResourceGroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// ResourceGroupListResult is list of resource groups. +type ResourceGroupListResult struct { + autorest.Response `json:"-"` + Value *[]ResourceGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceGroupListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResourceGroupListResult) ResourceGroupListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResourceGroupProperties is the resource group properties. +type ResourceGroupProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ResourceListResult is list of resource groups. +type ResourceListResult struct { + autorest.Response `json:"-"` + Value *[]GenericResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResourceListResult) ResourceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResourceManagementError is +type ResourceManagementError struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} + +// ResourceManagementErrorWithDetails is +type ResourceManagementErrorWithDetails struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]ResourceManagementError `json:"details,omitempty"` +} + +// ResourceProviderOperationDefinition is resource provider operation +// information. +type ResourceProviderOperationDefinition struct { + Name *string `json:"name,omitempty"` + Display *ResourceProviderOperationDisplayProperties `json:"display,omitempty"` +} + +// ResourceProviderOperationDetailListResult is list of resource provider +// operations. +type ResourceProviderOperationDetailListResult struct { + autorest.Response `json:"-"` + Value *[]ResourceProviderOperationDefinition `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// ResourceProviderOperationDetailListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResourceProviderOperationDetailListResult) ResourceProviderOperationDetailListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResourceProviderOperationDisplayProperties is resource provider operation's +// display properties. +type ResourceProviderOperationDisplayProperties struct { + Publisher *string `json:"publisher,omitempty"` + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` + Description *string `json:"description,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// TagCount is tag count. +type TagCount struct { + Type *string `json:"type,omitempty"` + Value *string `json:"value,omitempty"` +} + +// TagDetails is tag details. +type TagDetails struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + TagName *string `json:"tagName,omitempty"` + Count *TagCount `json:"count,omitempty"` + Values *[]TagValue `json:"values,omitempty"` +} + +// TagsListResult is list of subscription tags. +type TagsListResult struct { + autorest.Response `json:"-"` + Value *[]TagDetails `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// TagsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client TagsListResult) TagsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// TagValue is tag information. +type TagValue struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + TagValueProperty *string `json:"tagValue,omitempty"` + Count *TagCount `json:"count,omitempty"` +} + +// TargetResource is target resource. +type TargetResource struct { + ID *string `json:"id,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` +} + +// TemplateLink is entity representing the reference to the template. +type TemplateLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policyassignments.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policyassignments.go new file mode 100644 index 000000000..e45d2d7f1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policyassignments.go @@ -0,0 +1,785 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// PolicyAssignmentsClient is the client for the PolicyAssignments methods of +// the Resources service. +type PolicyAssignmentsClient struct { + ManagementClient +} + +// NewPolicyAssignmentsClient creates an instance of the +// PolicyAssignmentsClient client. +func NewPolicyAssignmentsClient(subscriptionID string) PolicyAssignmentsClient { + return NewPolicyAssignmentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPolicyAssignmentsClientWithBaseURI creates an instance of the +// PolicyAssignmentsClient client. +func NewPolicyAssignmentsClientWithBaseURI(baseURI string, subscriptionID string) PolicyAssignmentsClient { + return PolicyAssignmentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create policy assignment. +// +// scope is scope. policyAssignmentName is policy assignment name. parameters +// is policy assignment. +func (client PolicyAssignmentsClient) Create(scope string, policyAssignmentName string, parameters PolicyAssignment) (result PolicyAssignment, ae error) { + req, err := client.CreatePreparer(scope, policyAssignmentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Create", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Create", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Create", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client PolicyAssignmentsClient) CreatePreparer(scope string, policyAssignmentName string, parameters PolicyAssignment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentName": url.QueryEscape(policyAssignmentName), + "scope": scope, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) CreateResponder(resp *http.Response) (result PolicyAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateByID create policy assignment by Id. +// +// policyAssignmentID is policy assignment Id parameters is policy assignment. +func (client PolicyAssignmentsClient) CreateByID(policyAssignmentID string, parameters PolicyAssignment) (result PolicyAssignment, ae error) { + req, err := client.CreateByIDPreparer(policyAssignmentID, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "CreateByID", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "CreateByID", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateByIDResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "CreateByID", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateByIDPreparer prepares the CreateByID request. +func (client PolicyAssignmentsClient) CreateByIDPreparer(policyAssignmentID string, parameters PolicyAssignment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentId": policyAssignmentID, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{policyAssignmentId}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateByIDSender sends the CreateByID request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) CreateByIDSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateByIDResponder handles the response to the CreateByID request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) CreateByIDResponder(resp *http.Response) (result PolicyAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete policy assignment. +// +// scope is scope. policyAssignmentName is policy assignment name. +func (client PolicyAssignmentsClient) Delete(scope string, policyAssignmentName string) (result PolicyAssignment, ae error) { + req, err := client.DeletePreparer(scope, policyAssignmentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PolicyAssignmentsClient) DeletePreparer(scope string, policyAssignmentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentName": url.QueryEscape(policyAssignmentName), + "scope": scope, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) DeleteResponder(resp *http.Response) (result PolicyAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteByID delete policy assignment. +// +// policyAssignmentID is policy assignment Id +func (client PolicyAssignmentsClient) DeleteByID(policyAssignmentID string) (result PolicyAssignment, ae error) { + req, err := client.DeleteByIDPreparer(policyAssignmentID) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "DeleteByID", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "DeleteByID", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteByIDResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "DeleteByID", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeleteByIDPreparer prepares the DeleteByID request. +func (client PolicyAssignmentsClient) DeleteByIDPreparer(policyAssignmentID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentId": policyAssignmentID, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{policyAssignmentId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteByIDSender sends the DeleteByID request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) DeleteByIDSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DeleteByIDResponder handles the response to the DeleteByID request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) DeleteByIDResponder(resp *http.Response) (result PolicyAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get single policy assignment. +// +// scope is scope. policyAssignmentName is policy assignment name. +func (client PolicyAssignmentsClient) Get(scope string, policyAssignmentName string) (result PolicyAssignment, ae error) { + req, err := client.GetPreparer(scope, policyAssignmentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PolicyAssignmentsClient) GetPreparer(scope string, policyAssignmentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentName": url.QueryEscape(policyAssignmentName), + "scope": scope, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) GetResponder(resp *http.Response) (result PolicyAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByID get single policy assignment. +// +// policyAssignmentID is policy assignment Id +func (client PolicyAssignmentsClient) GetByID(policyAssignmentID string) (result PolicyAssignment, ae error) { + req, err := client.GetByIDPreparer(policyAssignmentID) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "GetByID", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "GetByID", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetByIDResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "GetByID", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetByIDPreparer prepares the GetByID request. +func (client PolicyAssignmentsClient) GetByIDPreparer(policyAssignmentID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentId": policyAssignmentID, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{policyAssignmentId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetByIDSender sends the GetByID request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) GetByIDSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetByIDResponder handles the response to the GetByID request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) GetByIDResponder(resp *http.Response) (result PolicyAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets policy assignments of the subscription. +// +// filter is the filter to apply on the operation. +func (client PolicyAssignmentsClient) List(filter string) (result PolicyAssignmentListResult, ae error) { + req, err := client.ListPreparer(filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PolicyAssignmentsClient) ListPreparer(filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) ListResponder(resp *http.Response) (result PolicyAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client PolicyAssignmentsClient) ListNextResults(lastResults PolicyAssignmentListResult) (result PolicyAssignmentListResult, ae error) { + req, err := lastResults.PolicyAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListForResource gets policy assignments of the resource. +// +// resourceGroupName is the name of the resource group. +// resourceProviderNamespace is the name of the resource provider. +// parentResourcePath is the parent resource path. resourceType is the +// resource type. resourceName is the resource name. filter is the filter to +// apply on the operation. +func (client PolicyAssignmentsClient) ListForResource(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result PolicyAssignmentListResult, ae error) { + req, err := client.ListForResourcePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResource", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResource", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResource", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListForResourcePreparer prepares the ListForResource request. +func (client PolicyAssignmentsClient) ListForResourcePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": url.QueryEscape(parentResourcePath), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": url.QueryEscape(resourceType), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}providers/Microsoft.Authorization/policyAssignments"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListForResourceSender sends the ListForResource request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListForResourceResponder handles the response to the ListForResource request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) ListForResourceResponder(resp *http.Response) (result PolicyAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceNextResults retrieves the next set of results, if any. +func (client PolicyAssignmentsClient) ListForResourceNextResults(lastResults PolicyAssignmentListResult) (result PolicyAssignmentListResult, ae error) { + req, err := lastResults.PolicyAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResource", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResource", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResource", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListForResourceGroup gets policy assignments of the resource group. +// +// resourceGroupName is resource group name. filter is the filter to apply on +// the operation. +func (client PolicyAssignmentsClient) ListForResourceGroup(resourceGroupName string, filter string) (result PolicyAssignmentListResult, ae error) { + req, err := client.ListForResourceGroupPreparer(resourceGroupName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResourceGroup", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResourceGroup", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResourceGroup", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListForResourceGroupPreparer prepares the ListForResourceGroup request. +func (client PolicyAssignmentsClient) ListForResourceGroupPreparer(resourceGroupName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) ListForResourceGroupResponder(resp *http.Response) (result PolicyAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceGroupNextResults retrieves the next set of results, if any. +func (client PolicyAssignmentsClient) ListForResourceGroupNextResults(lastResults PolicyAssignmentListResult) (result PolicyAssignmentListResult, ae error) { + req, err := lastResults.PolicyAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResourceGroup", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResourceGroup", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForResourceGroup", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListForScope gets policy assignments of the scope. +// +// scope is scope. filter is the filter to apply on the operation. +func (client PolicyAssignmentsClient) ListForScope(scope string, filter string) (result PolicyAssignmentListResult, ae error) { + req, err := client.ListForScopePreparer(scope, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForScope", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListForScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForScope", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListForScopeResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForScope", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListForScopePreparer prepares the ListForScope request. +func (client PolicyAssignmentsClient) ListForScopePreparer(scope string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "scope": scope, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{scope}/providers/Microsoft.Authorization/policyAssignments"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListForScopeSender sends the ListForScope request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyAssignmentsClient) ListForScopeSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListForScopeResponder handles the response to the ListForScope request. The method always +// closes the http.Response Body. +func (client PolicyAssignmentsClient) ListForScopeResponder(resp *http.Response) (result PolicyAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForScopeNextResults retrieves the next set of results, if any. +func (client PolicyAssignmentsClient) ListForScopeNextResults(lastResults PolicyAssignmentListResult) (result PolicyAssignmentListResult, ae error) { + req, err := lastResults.PolicyAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForScope", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForScope", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListForScopeResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyAssignmentsClient", "ListForScope", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policydefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policydefinitions.go new file mode 100644 index 000000000..25f740d2e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/policydefinitions.go @@ -0,0 +1,230 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// PolicyDefinitionsClient is the client for the PolicyDefinitions methods of +// the Resources service. +type PolicyDefinitionsClient struct { + ManagementClient +} + +// NewPolicyDefinitionsClient creates an instance of the +// PolicyDefinitionsClient client. +func NewPolicyDefinitionsClient(subscriptionID string) PolicyDefinitionsClient { + return NewPolicyDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPolicyDefinitionsClientWithBaseURI creates an instance of the +// PolicyDefinitionsClient client. +func NewPolicyDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) PolicyDefinitionsClient { + return PolicyDefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update policy definition. +// +// policyDefinitionName is the policy definition name. parameters is the +// policy definition properties +func (client PolicyDefinitionsClient) CreateOrUpdate(policyDefinitionName string, parameters PolicyDefinition) (result PolicyDefinition, ae error) { + req, err := client.CreateOrUpdatePreparer(policyDefinitionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client PolicyDefinitionsClient) CreateOrUpdatePreparer(policyDefinitionName string, parameters PolicyDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyDefinitionName": url.QueryEscape(policyDefinitionName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyDefinitionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client PolicyDefinitionsClient) CreateOrUpdateResponder(resp *http.Response) (result PolicyDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes policy definition. +// +// policyDefinitionName is the policy definition name. +func (client PolicyDefinitionsClient) Delete(policyDefinitionName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(policyDefinitionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PolicyDefinitionsClient) DeletePreparer(policyDefinitionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyDefinitionName": url.QueryEscape(policyDefinitionName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyDefinitionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PolicyDefinitionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets policy definition. +// +// policyDefinitionName is the policy definition name. +func (client PolicyDefinitionsClient) Get(policyDefinitionName string) (result PolicyDefinition, ae error) { + req, err := client.GetPreparer(policyDefinitionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/PolicyDefinitionsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PolicyDefinitionsClient) GetPreparer(policyDefinitionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyDefinitionName": url.QueryEscape(policyDefinitionName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyDefinitionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PolicyDefinitionsClient) GetResponder(resp *http.Response) (result PolicyDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/provideroperationdetails.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/provideroperationdetails.go new file mode 100644 index 000000000..ecf62fd2a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/provideroperationdetails.go @@ -0,0 +1,129 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ProviderOperationDetailsClient is the client for the +// ProviderOperationDetails methods of the Resources service. +type ProviderOperationDetailsClient struct { + ManagementClient +} + +// NewProviderOperationDetailsClient creates an instance of the +// ProviderOperationDetailsClient client. +func NewProviderOperationDetailsClient(subscriptionID string) ProviderOperationDetailsClient { + return NewProviderOperationDetailsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProviderOperationDetailsClientWithBaseURI creates an instance of the +// ProviderOperationDetailsClient client. +func NewProviderOperationDetailsClientWithBaseURI(baseURI string, subscriptionID string) ProviderOperationDetailsClient { + return ProviderOperationDetailsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of resource providers. +// +// resourceProviderNamespace is resource identity. +func (client ProviderOperationDetailsClient) List(resourceProviderNamespace string, apiVersion string) (result ResourceProviderOperationDetailListResult, ae error) { + req, err := client.ListPreparer(resourceProviderNamespace, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProviderOperationDetailsClient) ListPreparer(resourceProviderNamespace string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/{resourceProviderNamespace}/operations"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProviderOperationDetailsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProviderOperationDetailsClient) ListResponder(resp *http.Response) (result ResourceProviderOperationDetailListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ProviderOperationDetailsClient) ListNextResults(lastResults ResourceProviderOperationDetailListResult) (result ResourceProviderOperationDetailListResult, ae error) { + req, err := lastResults.ResourceProviderOperationDetailListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go new file mode 100644 index 000000000..2d030fc5a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go @@ -0,0 +1,316 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ProvidersClient is the client for the Providers methods of the Resources +// service. +type ProvidersClient struct { + ManagementClient +} + +// NewProvidersClient creates an instance of the ProvidersClient client. +func NewProvidersClient(subscriptionID string) ProvidersClient { + return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient +// client. +func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { + return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a resource provider. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Get(resourceProviderNamespace string) (result Provider, ae error) { + req, err := client.GetPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProvidersClient) GetPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of resource providers. +// +// top is query parameters. If null is passed returns all deployments. +func (client ProvidersClient) List(top *int) (result ProviderListResult, ae error) { + req, err := client.ListPreparer(top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProvidersClient) ListPreparer(top *int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = top + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ProvidersClient) ListNextResults(lastResults ProviderListResult) (result ProviderListResult, ae error) { + req, err := lastResults.ProviderListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// Register registers provider to be used with a subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Register(resourceProviderNamespace string) (result Provider, ae error) { + req, err := client.RegisterPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", resp.StatusCode, "Failure sending request") + } + + result, err = client.RegisterResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", resp.StatusCode, "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client ProvidersClient) RegisterPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Unregister unregisters provider from a subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Unregister(resourceProviderNamespace string) (result Provider, ae error) { + req, err := client.UnregisterPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.UnregisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", resp.StatusCode, "Failure sending request") + } + + result, err = client.UnregisterResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", resp.StatusCode, "Failure responding to request") + } + + return +} + +// UnregisterPreparer prepares the Unregister request. +func (client ProvidersClient) UnregisterPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UnregisterSender sends the Unregister request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// UnregisterResponder handles the response to the Unregister request. The method always +// closes the http.Response Body. +func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go new file mode 100644 index 000000000..dcdc32bf0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go @@ -0,0 +1,472 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// Client is the client for the Resources methods of the Resources service. +type Client struct { + ManagementClient +} + +// NewClient creates an instance of the Client client. +func NewClient(subscriptionID string) Client { + return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return Client{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether resource exists. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client Client) CheckExistence(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { + req, err := client.CheckExistencePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/Client", "CheckExistence", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/Client", "CheckExistence", resp.StatusCode, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/Client", "CheckExistence", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client Client) CheckExistencePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client Client) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. parameters is create or +// update resource parameters. +func (client Client) CreateOrUpdate(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (result GenericResource, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/Client", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/Client", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/Client", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client Client) CreateOrUpdatePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client Client) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource and all of its resources. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client Client) Delete(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/Client", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/Client", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/Client", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client Client) DeletePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a resource belonging to a resource group. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client Client) Get(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result GenericResource, ae error) { + req, err := client.GetPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/Client", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/Client", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/Client", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client Client) GetPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client Client) GetResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all of the resources under a subscription. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client Client) List(filter string, top *int) (result ResourceListResult, ae error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/Client", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/Client", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/Client", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client Client) ListPreparer(filter string, top *int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + if top != nil { + queryParameters["$top"] = top + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resources"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client Client) ListResponder(resp *http.Response) (result ResourceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client Client) ListNextResults(lastResults ResourceListResult) (result ResourceListResult, ae error) { + req, err := lastResults.ResourceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/Client", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/Client", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/Client", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// MoveResources begin moving resources.To determine whether the operation has +// finished processing the request, call GetLongRunningOperationStatus. +// +// sourceResourceGroupName is source resource group name. parameters is move +// resources' parameters. +func (client Client) MoveResources(sourceResourceGroupName string, parameters MoveInfo) (result autorest.Response, ae error) { + req, err := client.MoveResourcesPreparer(sourceResourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/Client", "MoveResources", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.MoveResourcesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/Client", "MoveResources", resp.StatusCode, "Failure sending request") + } + + result, err = client.MoveResourcesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/Client", "MoveResources", resp.StatusCode, "Failure responding to request") + } + + return +} + +// MoveResourcesPreparer prepares the MoveResources request. +func (client Client) MoveResourcesPreparer(sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": url.QueryEscape(sourceResourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// MoveResourcesSender sends the MoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client Client) MoveResourcesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// MoveResourcesResponder handles the response to the MoveResources request. The method always +// closes the http.Response Body. +func (client Client) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go new file mode 100644 index 000000000..aec33be2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go @@ -0,0 +1,371 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// TagsClient is the client for the Tags methods of the Resources service. +type TagsClient struct { + ManagementClient +} + +// NewTagsClient creates an instance of the TagsClient client. +func NewTagsClient(subscriptionID string) TagsClient { + return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTagsClientWithBaseURI creates an instance of the TagsClient client. +func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { + return TagsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create a subscription resource tag. +// +// tagName is the name of the tag. +func (client TagsClient) CreateOrUpdate(tagName string) (result TagDetails, ae error) { + req, err := client.CreateOrUpdatePreparer(tagName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client TagsClient) CreateOrUpdatePreparer(tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateResponder(resp *http.Response) (result TagDetails, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateValue create a subscription resource tag value. +// +// tagName is the name of the tag. tagValue is the value of the tag. +func (client TagsClient) CreateOrUpdateValue(tagName string, tagValue string) (result TagValue, ae error) { + req, err := client.CreateOrUpdateValuePreparer(tagName, tagValue) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateValueSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateValueResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdateValuePreparer prepares the CreateOrUpdateValue request. +func (client TagsClient) CreateOrUpdateValuePreparer(tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + "tagValue": url.QueryEscape(tagValue), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateValueResponder(resp *http.Response) (result TagValue, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a subscription resource tag. +// +// tagName is the name of the tag. +func (client TagsClient) Delete(tagName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(tagName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TagsClient) DeletePreparer(tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteValue delete a subscription resource tag value. +// +// tagName is the name of the tag. tagValue is the value of the tag. +func (client TagsClient) DeleteValue(tagName string, tagValue string) (result autorest.Response, ae error) { + req, err := client.DeleteValuePreparer(tagName, tagValue) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteValueSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteValueResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeleteValuePreparer prepares the DeleteValue request. +func (client TagsClient) DeleteValuePreparer(tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + "tagValue": url.QueryEscape(tagValue), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteValueSender sends the DeleteValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteValueResponder handles the response to the DeleteValue request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteValueResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// List get a list of subscription resource tags. +func (client TagsClient) List() (result TagsListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TagsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TagsClient) ListResponder(resp *http.Response) (result TagsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client TagsClient) ListNextResults(lastResults TagsListResult) (result TagsListResult, ae error) { + req, err := lastResults.TagsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go new file mode 100644 index 000000000..695898636 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go @@ -0,0 +1,43 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "4" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "resources", "2014-04-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go new file mode 100644 index 000000000..e1095c7c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go @@ -0,0 +1,52 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Scheduler + APIVersion = "2016-01-01" + + // DefaultBaseURI is the default URI used for the service Scheduler + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Scheduler. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go new file mode 100644 index 000000000..b25b64526 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go @@ -0,0 +1,596 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// JobCollectionsClient is the client for the JobCollections methods of the +// Scheduler service. +type JobCollectionsClient struct { + ManagementClient +} + +// NewJobCollectionsClient creates an instance of the JobCollectionsClient +// client. +func NewJobCollectionsClient(subscriptionID string) JobCollectionsClient { + return NewJobCollectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobCollectionsClientWithBaseURI creates an instance of the +// JobCollectionsClient client. +func NewJobCollectionsClientWithBaseURI(baseURI string, subscriptionID string) JobCollectionsClient { + return JobCollectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate provisions a new job collection or updates an existing job +// collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobCollection is the job collection definition. +func (client JobCollectionsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, jobCollectionName, jobCollection) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client JobCollectionsClient) CreateOrUpdatePreparer(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithJSON(jobCollection), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) CreateOrUpdateResponder(resp *http.Response) (result JobCollectionDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Delete(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client JobCollectionsClient) DeletePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Disable disables all of the jobs in the job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Disable(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { + req, err := client.DisablePreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DisableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", resp.StatusCode, "Failure sending request") + } + + result, err = client.DisableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DisablePreparer prepares the Disable request. +func (client JobCollectionsClient) DisablePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/disable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DisableSender sends the Disable request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) DisableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DisableResponder handles the response to the Disable request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Enable enables all of the jobs in the job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Enable(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { + req, err := client.EnablePreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.EnableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", resp.StatusCode, "Failure sending request") + } + + result, err = client.EnableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", resp.StatusCode, "Failure responding to request") + } + + return +} + +// EnablePreparer prepares the Enable request. +func (client JobCollectionsClient) EnablePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/enable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// EnableSender sends the Enable request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) EnableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// EnableResponder handles the response to the Enable request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Get(resourceGroupName string, jobCollectionName string) (result JobCollectionDefinition, ae error) { + req, err := client.GetPreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobCollectionsClient) GetPreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) GetResponder(resp *http.Response) (result JobCollectionDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup gets all job collections under specified resource group. +// +// resourceGroupName is the resource group name. +func (client JobCollectionsClient) ListByResourceGroup(resourceGroupName string) (result JobCollectionListResult, ae error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client JobCollectionsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) ListByResourceGroupResponder(resp *http.Response) (result JobCollectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client JobCollectionsClient) ListByResourceGroupNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, ae error) { + req, err := lastResults.JobCollectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListBySubscription gets all job collections under specified subscription. +func (client JobCollectionsClient) ListBySubscription() (result JobCollectionListResult, ae error) { + req, err := client.ListBySubscriptionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client JobCollectionsClient) ListBySubscriptionPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Scheduler/jobCollections"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) ListBySubscriptionResponder(resp *http.Response) (result JobCollectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionNextResults retrieves the next set of results, if any. +func (client JobCollectionsClient) ListBySubscriptionNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, ae error) { + req, err := lastResults.JobCollectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// Patch patches an existing job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobCollection is the job collection definition. +func (client JobCollectionsClient) Patch(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, ae error) { + req, err := client.PatchPreparer(resourceGroupName, jobCollectionName, jobCollection) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", resp.StatusCode, "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", resp.StatusCode, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client JobCollectionsClient) PatchPreparer(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithJSON(jobCollection), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) PatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) PatchResponder(resp *http.Response) (result JobCollectionDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go new file mode 100644 index 000000000..9d27c6d20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go @@ -0,0 +1,566 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// JobsClient is the client for the Jobs methods of the Scheduler service. +type JobsClient struct { + ManagementClient +} + +// NewJobsClient creates an instance of the JobsClient client. +func NewJobsClient(subscriptionID string) JobsClient { + return NewJobsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobsClientWithBaseURI creates an instance of the JobsClient client. +func NewJobsClientWithBaseURI(baseURI string, subscriptionID string) JobsClient { + return JobsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate provisions a new job or updates an existing job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. job is the job definition. +func (client JobsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, jobCollectionName, jobName, job) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client JobsClient) CreateOrUpdatePreparer(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithJSON(job), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client JobsClient) CreateOrUpdateResponder(resp *http.Response) (result JobDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. +func (client JobsClient) Delete(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, jobCollectionName, jobName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client JobsClient) DeletePreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client JobsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. +func (client JobsClient) Get(resourceGroupName string, jobCollectionName string, jobName string) (result JobDefinition, ae error) { + req, err := client.GetPreparer(resourceGroupName, jobCollectionName, jobName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobsClient) GetPreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobsClient) GetResponder(resp *http.Response) (result JobDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all jobs under the specified job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. top is the number of jobs to request, in the of range +// [1..100]. skip is the (0-based) index of the job history list from which +// to begin requesting entries. filter is the filter to apply on the job +// state. +func (client JobsClient) List(resourceGroupName string, jobCollectionName string, top *int, skip *int, filter string) (result JobListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, jobCollectionName, top, skip, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client JobsClient) ListPreparer(resourceGroupName string, jobCollectionName string, top *int, skip *int, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = top + } + if skip != nil { + queryParameters["$skip"] = skip + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client JobsClient) ListResponder(resp *http.Response) (result JobListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client JobsClient) ListNextResults(lastResults JobListResult) (result JobListResult, ae error) { + req, err := lastResults.JobListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// ListJobHistory lists job history. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. top is the number of job history +// to request, in the of range [1..100]. skip is the (0-based) index of the +// job history list from which to begin requesting entries. filter is the +// filter to apply on the job state. +func (client JobsClient) ListJobHistory(resourceGroupName string, jobCollectionName string, jobName string, top *int, skip *int, filter string) (result JobHistoryListResult, ae error) { + req, err := client.ListJobHistoryPreparer(resourceGroupName, jobCollectionName, jobName, top, skip, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListJobHistorySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListJobHistoryResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListJobHistoryPreparer prepares the ListJobHistory request. +func (client JobsClient) ListJobHistoryPreparer(resourceGroupName string, jobCollectionName string, jobName string, top *int, skip *int, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = top + } + if skip != nil { + queryParameters["$skip"] = skip + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/history"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListJobHistorySender sends the ListJobHistory request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) ListJobHistorySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListJobHistoryResponder handles the response to the ListJobHistory request. The method always +// closes the http.Response Body. +func (client JobsClient) ListJobHistoryResponder(resp *http.Response) (result JobHistoryListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListJobHistoryNextResults retrieves the next set of results, if any. +func (client JobsClient) ListJobHistoryNextResults(lastResults JobHistoryListResult) (result JobHistoryListResult, ae error) { + req, err := lastResults.JobHistoryListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", autorest.UndefinedStatusCode, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListJobHistorySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", resp.StatusCode, "Failure sending next results request request") + } + + result, err = client.ListJobHistoryResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", resp.StatusCode, "Failure responding to next results request request") + } + + return +} + +// Patch patches an existing job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. job is the job definition. +func (client JobsClient) Patch(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, ae error) { + req, err := client.PatchPreparer(resourceGroupName, jobCollectionName, jobName, job) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", resp.StatusCode, "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", resp.StatusCode, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client JobsClient) PatchPreparer(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithJSON(job), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) PatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client JobsClient) PatchResponder(resp *http.Response) (result JobDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Run runs a job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. +func (client JobsClient) Run(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, ae error) { + req, err := client.RunPreparer(resourceGroupName, jobCollectionName, jobName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.RunSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", resp.StatusCode, "Failure sending request") + } + + result, err = client.RunResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", resp.StatusCode, "Failure responding to request") + } + + return +} + +// RunPreparer prepares the Run request. +func (client JobsClient) RunPreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/run"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RunSender sends the Run request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) RunSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RunResponder handles the response to the Run request. The method always +// closes the http.Response Body. +func (client JobsClient) RunResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go new file mode 100644 index 000000000..37ea1bd32 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go @@ -0,0 +1,534 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DayOfWeek enumerates the values for day of week. +type DayOfWeek string + +const ( + // Friday specifies the friday state for day of week. + Friday DayOfWeek = "Friday" + // Monday specifies the monday state for day of week. + Monday DayOfWeek = "Monday" + // Saturday specifies the saturday state for day of week. + Saturday DayOfWeek = "Saturday" + // Sunday specifies the sunday state for day of week. + Sunday DayOfWeek = "Sunday" + // Thursday specifies the thursday state for day of week. + Thursday DayOfWeek = "Thursday" + // Tuesday specifies the tuesday state for day of week. + Tuesday DayOfWeek = "Tuesday" + // Wednesday specifies the wednesday state for day of week. + Wednesday DayOfWeek = "Wednesday" +) + +// HTTPAuthenticationType enumerates the values for http authentication type. +type HTTPAuthenticationType string + +const ( + // ActiveDirectoryOAuth specifies the active directory o auth state for + // http authentication type. + ActiveDirectoryOAuth HTTPAuthenticationType = "ActiveDirectoryOAuth" + // Basic specifies the basic state for http authentication type. + Basic HTTPAuthenticationType = "Basic" + // ClientCertificate specifies the client certificate state for http + // authentication type. + ClientCertificate HTTPAuthenticationType = "ClientCertificate" + // NotSpecified specifies the not specified state for http authentication + // type. + NotSpecified HTTPAuthenticationType = "NotSpecified" +) + +// JobActionType enumerates the values for job action type. +type JobActionType string + +const ( + // HTTP specifies the http state for job action type. + HTTP JobActionType = "Http" + // HTTPS specifies the https state for job action type. + HTTPS JobActionType = "Https" + // ServiceBusQueue specifies the service bus queue state for job action + // type. + ServiceBusQueue JobActionType = "ServiceBusQueue" + // ServiceBusTopic specifies the service bus topic state for job action + // type. + ServiceBusTopic JobActionType = "ServiceBusTopic" + // StorageQueue specifies the storage queue state for job action type. + StorageQueue JobActionType = "StorageQueue" +) + +// JobCollectionState enumerates the values for job collection state. +type JobCollectionState string + +const ( + // Deleted specifies the deleted state for job collection state. + Deleted JobCollectionState = "Deleted" + // Disabled specifies the disabled state for job collection state. + Disabled JobCollectionState = "Disabled" + // Enabled specifies the enabled state for job collection state. + Enabled JobCollectionState = "Enabled" + // Suspended specifies the suspended state for job collection state. + Suspended JobCollectionState = "Suspended" +) + +// JobExecutionStatus enumerates the values for job execution status. +type JobExecutionStatus string + +const ( + // Completed specifies the completed state for job execution status. + Completed JobExecutionStatus = "Completed" + // Failed specifies the failed state for job execution status. + Failed JobExecutionStatus = "Failed" + // Postponed specifies the postponed state for job execution status. + Postponed JobExecutionStatus = "Postponed" +) + +// JobHistoryActionName enumerates the values for job history action name. +type JobHistoryActionName string + +const ( + // ErrorAction specifies the error action state for job history action + // name. + ErrorAction JobHistoryActionName = "ErrorAction" + // MainAction specifies the main action state for job history action name. + MainAction JobHistoryActionName = "MainAction" +) + +// JobScheduleDay enumerates the values for job schedule day. +type JobScheduleDay string + +const ( + // JobScheduleDayFriday specifies the job schedule day friday state for + // job schedule day. + JobScheduleDayFriday JobScheduleDay = "Friday" + // JobScheduleDayMonday specifies the job schedule day monday state for + // job schedule day. + JobScheduleDayMonday JobScheduleDay = "Monday" + // JobScheduleDaySaturday specifies the job schedule day saturday state + // for job schedule day. + JobScheduleDaySaturday JobScheduleDay = "Saturday" + // JobScheduleDaySunday specifies the job schedule day sunday state for + // job schedule day. + JobScheduleDaySunday JobScheduleDay = "Sunday" + // JobScheduleDayThursday specifies the job schedule day thursday state + // for job schedule day. + JobScheduleDayThursday JobScheduleDay = "Thursday" + // JobScheduleDayTuesday specifies the job schedule day tuesday state for + // job schedule day. + JobScheduleDayTuesday JobScheduleDay = "Tuesday" + // JobScheduleDayWednesday specifies the job schedule day wednesday state + // for job schedule day. + JobScheduleDayWednesday JobScheduleDay = "Wednesday" +) + +// JobState enumerates the values for job state. +type JobState string + +const ( + // JobStateCompleted specifies the job state completed state for job state. + JobStateCompleted JobState = "Completed" + // JobStateDisabled specifies the job state disabled state for job state. + JobStateDisabled JobState = "Disabled" + // JobStateEnabled specifies the job state enabled state for job state. + JobStateEnabled JobState = "Enabled" + // JobStateFaulted specifies the job state faulted state for job state. + JobStateFaulted JobState = "Faulted" +) + +// RecurrenceFrequency enumerates the values for recurrence frequency. +type RecurrenceFrequency string + +const ( + // Day specifies the day state for recurrence frequency. + Day RecurrenceFrequency = "Day" + // Hour specifies the hour state for recurrence frequency. + Hour RecurrenceFrequency = "Hour" + // Minute specifies the minute state for recurrence frequency. + Minute RecurrenceFrequency = "Minute" + // Month specifies the month state for recurrence frequency. + Month RecurrenceFrequency = "Month" + // Week specifies the week state for recurrence frequency. + Week RecurrenceFrequency = "Week" +) + +// RetryType enumerates the values for retry type. +type RetryType string + +const ( + // Fixed specifies the fixed state for retry type. + Fixed RetryType = "Fixed" + // None specifies the none state for retry type. + None RetryType = "None" +) + +// ServiceBusAuthenticationType enumerates the values for service bus +// authentication type. +type ServiceBusAuthenticationType string + +const ( + // ServiceBusAuthenticationTypeNotSpecified specifies the service bus + // authentication type not specified state for service bus authentication + // type. + ServiceBusAuthenticationTypeNotSpecified ServiceBusAuthenticationType = "NotSpecified" + // ServiceBusAuthenticationTypeSharedAccessKey specifies the service bus + // authentication type shared access key state for service bus + // authentication type. + ServiceBusAuthenticationTypeSharedAccessKey ServiceBusAuthenticationType = "SharedAccessKey" +) + +// ServiceBusTransportType enumerates the values for service bus transport +// type. +type ServiceBusTransportType string + +const ( + // ServiceBusTransportTypeAMQP specifies the service bus transport type + // amqp state for service bus transport type. + ServiceBusTransportTypeAMQP ServiceBusTransportType = "AMQP" + // ServiceBusTransportTypeNetMessaging specifies the service bus transport + // type net messaging state for service bus transport type. + ServiceBusTransportTypeNetMessaging ServiceBusTransportType = "NetMessaging" + // ServiceBusTransportTypeNotSpecified specifies the service bus transport + // type not specified state for service bus transport type. + ServiceBusTransportTypeNotSpecified ServiceBusTransportType = "NotSpecified" +) + +// SkuDefinition enumerates the values for sku definition. +type SkuDefinition string + +const ( + // Free specifies the free state for sku definition. + Free SkuDefinition = "Free" + // Premium specifies the premium state for sku definition. + Premium SkuDefinition = "Premium" + // Standard specifies the standard state for sku definition. + Standard SkuDefinition = "Standard" +) + +// BasicAuthentication is +type BasicAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` +} + +// ClientCertAuthentication is +type ClientCertAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` + Password *string `json:"password,omitempty"` + Pfx *string `json:"pfx,omitempty"` + CertificateThumbprint *string `json:"certificateThumbprint,omitempty"` + CertificateExpirationDate *date.Time `json:"certificateExpirationDate,omitempty"` + CertificateSubjectName *string `json:"certificateSubjectName,omitempty"` +} + +// HTTPAuthentication is +type HTTPAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` +} + +// HTTPRequest is +type HTTPRequest struct { + Authentication *HTTPAuthentication `json:"authentication,omitempty"` + URI *string `json:"uri,omitempty"` + Method *string `json:"method,omitempty"` + Body *string `json:"body,omitempty"` + Headers *map[string]*string `json:"headers,omitempty"` +} + +// JobAction is +type JobAction struct { + Type JobActionType `json:"type,omitempty"` + Request *HTTPRequest `json:"request,omitempty"` + QueueMessage *StorageQueueMessage `json:"queueMessage,omitempty"` + ServiceBusQueueMessage *ServiceBusQueueMessage `json:"serviceBusQueueMessage,omitempty"` + ServiceBusTopicMessage *ServiceBusTopicMessage `json:"serviceBusTopicMessage,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + ErrorAction *JobErrorAction `json:"errorAction,omitempty"` +} + +// JobCollectionDefinition is +type JobCollectionDefinition struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *JobCollectionProperties `json:"properties,omitempty"` +} + +// JobCollectionListResult is +type JobCollectionListResult struct { + autorest.Response `json:"-"` + Value *[]JobCollectionDefinition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// JobCollectionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client JobCollectionListResult) JobCollectionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// JobCollectionProperties is +type JobCollectionProperties struct { + Sku *Sku `json:"sku,omitempty"` + State JobCollectionState `json:"state,omitempty"` + Quota *JobCollectionQuota `json:"quota,omitempty"` +} + +// JobCollectionQuota is +type JobCollectionQuota struct { + MaxJobCount *int `json:"maxJobCount,omitempty"` + MaxJobOccurrence *int `json:"maxJobOccurrence,omitempty"` + MaxRecurrence *JobMaxRecurrence `json:"maxRecurrence,omitempty"` +} + +// JobDefinition is +type JobDefinition struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` + Properties *JobProperties `json:"properties,omitempty"` +} + +// JobErrorAction is +type JobErrorAction struct { + Type JobActionType `json:"type,omitempty"` + Request *HTTPRequest `json:"request,omitempty"` + QueueMessage *StorageQueueMessage `json:"queueMessage,omitempty"` + ServiceBusQueueMessage *ServiceBusQueueMessage `json:"serviceBusQueueMessage,omitempty"` + ServiceBusTopicMessage *ServiceBusTopicMessage `json:"serviceBusTopicMessage,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` +} + +// JobHistoryDefinition is +type JobHistoryDefinition struct { + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` + Properties *JobHistoryDefinitionProperties `json:"properties,omitempty"` +} + +// JobHistoryDefinitionProperties is +type JobHistoryDefinitionProperties struct { + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + ExpectedExecutionTime *date.Time `json:"expectedExecutionTime,omitempty"` + ActionName JobHistoryActionName `json:"actionName,omitempty"` + Status JobExecutionStatus `json:"status,omitempty"` + Message *string `json:"message,omitempty"` + RetryCount *int `json:"retryCount,omitempty"` + RepeatCount *int `json:"repeatCount,omitempty"` +} + +// JobHistoryFilter is +type JobHistoryFilter struct { + Status JobExecutionStatus `json:"status,omitempty"` +} + +// JobHistoryListResult is +type JobHistoryListResult struct { + autorest.Response `json:"-"` + Value *[]JobHistoryDefinition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// JobHistoryListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client JobHistoryListResult) JobHistoryListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// JobListResult is +type JobListResult struct { + autorest.Response `json:"-"` + Value *[]JobDefinition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// JobListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client JobListResult) JobListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// JobMaxRecurrence is +type JobMaxRecurrence struct { + Frequency RecurrenceFrequency `json:"frequency,omitempty"` + Interval *int `json:"interval,omitempty"` +} + +// JobProperties is +type JobProperties struct { + StartTime *date.Time `json:"startTime,omitempty"` + Action *JobAction `json:"action,omitempty"` + Recurrence *JobRecurrence `json:"recurrence,omitempty"` + State JobState `json:"state,omitempty"` + Status *JobStatus `json:"status,omitempty"` +} + +// JobRecurrence is +type JobRecurrence struct { + Frequency RecurrenceFrequency `json:"frequency,omitempty"` + Interval *int `json:"interval,omitempty"` + Count *int `json:"count,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Schedule *JobRecurrenceSchedule `json:"schedule,omitempty"` +} + +// JobRecurrenceSchedule is +type JobRecurrenceSchedule struct { + WeekDays *[]DayOfWeek `json:"weekDays,omitempty"` + Hours *[]int `json:"hours,omitempty"` + Minutes *[]int `json:"minutes,omitempty"` + MonthDays *[]int `json:"monthDays,omitempty"` + MonthlyOccurrences *[]JobRecurrenceScheduleMonthlyOccurrence `json:"monthlyOccurrences,omitempty"` +} + +// JobRecurrenceScheduleMonthlyOccurrence is +type JobRecurrenceScheduleMonthlyOccurrence struct { + Day JobScheduleDay `json:"day,omitempty"` + Occurrence *int `json:"Occurrence,omitempty"` +} + +// JobStateFilter is +type JobStateFilter struct { + State JobState `json:"state,omitempty"` +} + +// JobStatus is +type JobStatus struct { + ExecutionCount *int `json:"executionCount,omitempty"` + FailureCount *int `json:"failureCount,omitempty"` + FaultedCount *int `json:"faultedCount,omitempty"` + LastExecutionTime *date.Time `json:"lastExecutionTime,omitempty"` + NextExecutionTime *date.Time `json:"nextExecutionTime,omitempty"` +} + +// OAuthAuthentication is +type OAuthAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` + Secret *string `json:"secret,omitempty"` + Tenant *string `json:"tenant,omitempty"` + Audience *string `json:"audience,omitempty"` + ClientID *string `json:"clientId,omitempty"` +} + +// RetryPolicy is +type RetryPolicy struct { + RetryType RetryType `json:"retryType,omitempty"` + RetryInterval *string `json:"retryInterval,omitempty"` + RetryCount *int `json:"retryCount,omitempty"` +} + +// ServiceBusAuthentication is +type ServiceBusAuthentication struct { + SasKey *string `json:"sasKey,omitempty"` + SasKeyName *string `json:"sasKeyName,omitempty"` + Type ServiceBusAuthenticationType `json:"type,omitempty"` +} + +// ServiceBusBrokeredMessageProperties is +type ServiceBusBrokeredMessageProperties struct { + ContentType *string `json:"contentType,omitempty"` + CorrelationID *string `json:"correlationId,omitempty"` + ForcePersistence *bool `json:"forcePersistence,omitempty"` + Label *string `json:"label,omitempty"` + MessageID *string `json:"messageId,omitempty"` + PartitionKey *string `json:"partitionKey,omitempty"` + ReplyTo *string `json:"replyTo,omitempty"` + ReplyToSessionID *string `json:"replyToSessionId,omitempty"` + ScheduledEnqueueTimeUtc *date.Time `json:"scheduledEnqueueTimeUtc,omitempty"` + SessionID *string `json:"sessionId,omitempty"` + TimeToLive *date.Time `json:"timeToLive,omitempty"` + To *string `json:"to,omitempty"` + ViaPartitionKey *string `json:"viaPartitionKey,omitempty"` +} + +// ServiceBusMessage is +type ServiceBusMessage struct { + Authentication *ServiceBusAuthentication `json:"authentication,omitempty"` + BrokeredMessageProperties *ServiceBusBrokeredMessageProperties `json:"brokeredMessageProperties,omitempty"` + CustomMessageProperties *map[string]*string `json:"customMessageProperties,omitempty"` + Message *string `json:"message,omitempty"` + Namespace *string `json:"namespace,omitempty"` + TransportType ServiceBusTransportType `json:"transportType,omitempty"` +} + +// ServiceBusQueueMessage is +type ServiceBusQueueMessage struct { + Authentication *ServiceBusAuthentication `json:"authentication,omitempty"` + BrokeredMessageProperties *ServiceBusBrokeredMessageProperties `json:"brokeredMessageProperties,omitempty"` + CustomMessageProperties *map[string]*string `json:"customMessageProperties,omitempty"` + Message *string `json:"message,omitempty"` + Namespace *string `json:"namespace,omitempty"` + TransportType ServiceBusTransportType `json:"transportType,omitempty"` + QueueName *string `json:"queueName,omitempty"` +} + +// ServiceBusTopicMessage is +type ServiceBusTopicMessage struct { + Authentication *ServiceBusAuthentication `json:"authentication,omitempty"` + BrokeredMessageProperties *ServiceBusBrokeredMessageProperties `json:"brokeredMessageProperties,omitempty"` + CustomMessageProperties *map[string]*string `json:"customMessageProperties,omitempty"` + Message *string `json:"message,omitempty"` + Namespace *string `json:"namespace,omitempty"` + TransportType ServiceBusTransportType `json:"transportType,omitempty"` + TopicPath *string `json:"topicPath,omitempty"` +} + +// Sku is +type Sku struct { + Name SkuDefinition `json:"name,omitempty"` +} + +// StorageQueueMessage is +type StorageQueueMessage struct { + StorageAccount *string `json:"storageAccount,omitempty"` + QueueName *string `json:"queueName,omitempty"` + SasToken *string `json:"sasToken,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go new file mode 100644 index 000000000..ff0772f51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go @@ -0,0 +1,43 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "4" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "scheduler", "2016-01-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go new file mode 100644 index 000000000..38315a840 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -0,0 +1,648 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// AccountsClient is the the Storage Management Client. +type AccountsClient struct { + ManagementClient +} + +// NewAccountsClient creates an instance of the AccountsClient client. +func NewAccountsClient(subscriptionID string) AccountsClient { + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient +// client. +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks that account name is valid and is not in use. +// +// accountName is the name of the storage account within the specified +// resource group. Storage account names must be between 3 and 24 characters +// in length and use numbers and lower-case letters only. +func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, ae error) { + req, err := client.CheckNameAvailabilityPreparer(accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", resp.StatusCode, "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability"), + autorest.WithJSON(accountName), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create asynchronously creates a new storage account with the specified +// parameters. Existing accounts cannot be updated with this API and should +// instead use the Update Storage Account API. If an account is already +// created and subsequent PUT request is issued with exact same set of +// properties, then HTTP 200 would be returned. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// parameters is the parameters to provide for the created account. +func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters) (result Account, ae error) { + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", resp.StatusCode, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", resp.StatusCode, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a storage account in Microsoft Azure. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", resp.StatusCode, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", resp.StatusCode, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetProperties returns the properties for the specified storage account +// including but not limited to name, account type, location, and account +// status. The ListKeys operation should be used to retrieve storage keys. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, ae error) { + req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", resp.StatusCode, "Failure sending request") + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", resp.StatusCode, "Failure responding to request") + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the storage accounts available under the subscription. Note +// that storage keys are not returned; use the ListKeys operation for this. +func (client AccountsClient) List() (result AccountListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all the storage accounts available under the +// given resource group. Note that storage keys are not returned; use the +// ListKeys operation for this. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. +func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, ae error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListKeys lists the access keys for the specified storage account. +// +// resourceGroupName is the name of the resource group. accountName is the +// name of the storage account. +func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountKeys, ae error) { + req, err := client.ListKeysPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates the access keys for the specified storage account. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// regenerateKey is specifies name of the key which should be regenerated. +// key1 or key2 for the default keys +func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountKeys, ae error) { + req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", resp.StatusCode, "Failure sending request") + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", resp.StatusCode, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey"), + autorest.WithJSON(regenerateKey), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates the account type or tags for a storage account. It can also +// be used to add a custom domain (note that custom domains cannot be added +// via the Create operation). Only one custom domain is supported per storage +// account. In order to replace a custom domain, the old value must be +// cleared before a new value may be set. To clear a custom domain, simply +// update the custom domain with empty string. Then call update again with +// the new cutsom domain name. The update API can only be used to update one +// of tags, accountType, or customDomain per call. To update multiple of +// these properties, call the API multiple times with one change per call. +// This call does not change the storage keys for the account. If you want to +// change storage account keys, use the RegenerateKey operation. The location +// and name of the storage account cannot be changed after creation. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// parameters is the parameters to update on the account. Note that only one +// property can be changed at a time using this API. +func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, ae error) { + req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", resp.StatusCode, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", resp.StatusCode, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go new file mode 100644 index 000000000..c468e5d79 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -0,0 +1,52 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Storage + APIVersion = "2015-06-15" + + // DefaultBaseURI is the default URI used for the service Storage + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the the Storage Management Client. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go new file mode 100644 index 000000000..fca182c96 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go @@ -0,0 +1,217 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" +) + +// AccountStatus enumerates the values for account status. +type AccountStatus string + +const ( + // Available specifies the available state for account status. + Available AccountStatus = "Available" + // Unavailable specifies the unavailable state for account status. + Unavailable AccountStatus = "Unavailable" +) + +// AccountType enumerates the values for account type. +type AccountType string + +const ( + // PremiumLRS specifies the premium lrs state for account type. + PremiumLRS AccountType = "Premium_LRS" + // StandardGRS specifies the standard grs state for account type. + StandardGRS AccountType = "Standard_GRS" + // StandardLRS specifies the standard lrs state for account type. + StandardLRS AccountType = "Standard_LRS" + // StandardRAGRS specifies the standard ragrs state for account type. + StandardRAGRS AccountType = "Standard_RAGRS" + // StandardZRS specifies the standard zrs state for account type. + StandardZRS AccountType = "Standard_ZRS" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating specifies the creating state for provisioning state. + Creating ProvisioningState = "Creating" + // ResolvingDNS specifies the resolving dns state for provisioning state. + ResolvingDNS ProvisioningState = "ResolvingDNS" + // Succeeded specifies the succeeded state for provisioning state. + Succeeded ProvisioningState = "Succeeded" +) + +// Reason enumerates the values for reason. +type Reason string + +const ( + // AccountNameInvalid specifies the account name invalid state for reason. + AccountNameInvalid Reason = "AccountNameInvalid" + // AlreadyExists specifies the already exists state for reason. + AlreadyExists Reason = "AlreadyExists" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Bytes specifies the bytes state for usage unit. + Bytes UsageUnit = "Bytes" + // BytesPerSecond specifies the bytes per second state for usage unit. + BytesPerSecond UsageUnit = "BytesPerSecond" + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" + // CountsPerSecond specifies the counts per second state for usage unit. + CountsPerSecond UsageUnit = "CountsPerSecond" + // Percent specifies the percent state for usage unit. + Percent UsageUnit = "Percent" + // Seconds specifies the seconds state for usage unit. + Seconds UsageUnit = "Seconds" +) + +// Account is the storage account. +type Account struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountProperties `json:"properties,omitempty"` +} + +// AccountCheckNameAvailabilityParameters is +type AccountCheckNameAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters is the parameters to provide for the account. +type AccountCreateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountPropertiesCreateParameters `json:"properties,omitempty"` +} + +// AccountKeys is the access keys for the storage account. +type AccountKeys struct { + autorest.Response `json:"-"` + Key1 *string `json:"key1,omitempty"` + Key2 *string `json:"key2,omitempty"` +} + +// AccountListResult is the list storage accounts operation response. +type AccountListResult struct { + autorest.Response `json:"-"` + Value *[]Account `json:"value,omitempty"` +} + +// AccountProperties is +type AccountProperties struct { + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + AccountType AccountType `json:"accountType,omitempty"` + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + PrimaryLocation *string `json:"primaryLocation,omitempty"` + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` +} + +// AccountPropertiesCreateParameters is +type AccountPropertiesCreateParameters struct { + AccountType AccountType `json:"accountType,omitempty"` +} + +// AccountPropertiesUpdateParameters is +type AccountPropertiesUpdateParameters struct { + AccountType AccountType `json:"accountType,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` +} + +// AccountRegenerateKeyParameters is +type AccountRegenerateKeyParameters struct { + KeyName *string `json:"keyName,omitempty"` +} + +// AccountUpdateParameters is the parameters to update on the account. +type AccountUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// CheckNameAvailabilityResult is the CheckNameAvailability operation response. +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason Reason `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CustomDomain is the custom domain assigned to this storage account. This +// can be set via Update. +type CustomDomain struct { + Name *string `json:"name,omitempty"` + UseSubDomain *bool `json:"useSubDomain,omitempty"` +} + +// Endpoints is the URIs that are used to perform a retrieval of a public +// blob, queue or table object. +type Endpoints struct { + Blob *string `json:"blob,omitempty"` + Queue *string `json:"queue,omitempty"` + Table *string `json:"table,omitempty"` + File *string `json:"file,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Usage is describes Storage Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int `json:"currentValue,omitempty"` + Limit *int `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageListResult is the List Usages operation response. +type UsageListResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` +} + +// UsageName is the Usage Names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go new file mode 100644 index 000000000..f99018a1b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go @@ -0,0 +1,102 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// UsageOperationsClient is the the Storage Management Client. +type UsageOperationsClient struct { + ManagementClient +} + +// NewUsageOperationsClient creates an instance of the UsageOperationsClient +// client. +func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the current usage count and the limit for the resources under the +// subscription. +func (client UsageOperationsClient) List() (result UsageListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", autorest.UndefinedStatusCode, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", resp.StatusCode, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", resp.StatusCode, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageOperationsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageOperationsClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go new file mode 100644 index 000000000..8d4883fa6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -0,0 +1,43 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "4" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "storage", "2015-06-15") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go new file mode 100644 index 000000000..45fc2e57c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go @@ -0,0 +1,206 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements CGI from the perspective of a child +// process. + +package cgi + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" +) + +// Request returns the HTTP request as represented in the current +// environment. This assumes the current program is being run +// by a web server in a CGI environment. +// The returned Request's Body is populated, if applicable. +func Request() (*http.Request, error) { + r, err := RequestFromMap(envMap(os.Environ())) + if err != nil { + return nil, err + } + if r.ContentLength > 0 { + r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength)) + } + return r, nil +} + +func envMap(env []string) map[string]string { + m := make(map[string]string) + for _, kv := range env { + if idx := strings.Index(kv, "="); idx != -1 { + m[kv[:idx]] = kv[idx+1:] + } + } + return m +} + +// RequestFromMap creates an http.Request from CGI variables. +// The returned Request's Body field is not populated. +func RequestFromMap(params map[string]string) (*http.Request, error) { + r := new(http.Request) + r.Method = params["REQUEST_METHOD"] + if r.Method == "" { + return nil, errors.New("cgi: no REQUEST_METHOD in environment") + } + + r.Proto = params["SERVER_PROTOCOL"] + var ok bool + r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto) + if !ok { + return nil, errors.New("cgi: invalid SERVER_PROTOCOL version") + } + + r.Close = true + r.Trailer = http.Header{} + r.Header = http.Header{} + + r.Host = params["HTTP_HOST"] + + if lenstr := params["CONTENT_LENGTH"]; lenstr != "" { + clen, err := strconv.ParseInt(lenstr, 10, 64) + if err != nil { + return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr) + } + r.ContentLength = clen + } + + if ct := params["CONTENT_TYPE"]; ct != "" { + r.Header.Set("Content-Type", ct) + } + + // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers + for k, v := range params { + if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" { + continue + } + r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v) + } + + // TODO: cookies. parsing them isn't exported, though. + + uriStr := params["REQUEST_URI"] + if uriStr == "" { + // Fallback to SCRIPT_NAME, PATH_INFO and QUERY_STRING. + uriStr = params["SCRIPT_NAME"] + params["PATH_INFO"] + s := params["QUERY_STRING"] + if s != "" { + uriStr += "?" + s + } + } + + // There's apparently a de-facto standard for this. + // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636 + if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" { + r.TLS = &tls.ConnectionState{HandshakeComplete: true} + } + + if r.Host != "" { + // Hostname is provided, so we can reasonably construct a URL. + rawurl := r.Host + uriStr + if r.TLS == nil { + rawurl = "http://" + rawurl + } else { + rawurl = "https://" + rawurl + } + url, err := url.Parse(rawurl) + if err != nil { + return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl) + } + r.URL = url + } + // Fallback logic if we don't have a Host header or the URL + // failed to parse + if r.URL == nil { + url, err := url.Parse(uriStr) + if err != nil { + return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr) + } + r.URL = url + } + + // Request.RemoteAddr has its port set by Go's standard http + // server, so we do here too. We don't have one, though, so we + // use a dummy one. + r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0") + + return r, nil +} + +// Serve executes the provided Handler on the currently active CGI +// request, if any. If there's no current CGI environment +// an error is returned. The provided handler may be nil to use +// http.DefaultServeMux. +func Serve(handler http.Handler) error { + req, err := Request() + if err != nil { + return err + } + if handler == nil { + handler = http.DefaultServeMux + } + rw := &response{ + req: req, + header: make(http.Header), + bufw: bufio.NewWriter(os.Stdout), + } + handler.ServeHTTP(rw, req) + rw.Write(nil) // make sure a response is sent + if err = rw.bufw.Flush(); err != nil { + return err + } + return nil +} + +type response struct { + req *http.Request + header http.Header + bufw *bufio.Writer + headerSent bool +} + +func (r *response) Flush() { + r.bufw.Flush() +} + +func (r *response) Header() http.Header { + return r.header +} + +func (r *response) Write(p []byte) (n int, err error) { + if !r.headerSent { + r.WriteHeader(http.StatusOK) + } + return r.bufw.Write(p) +} + +func (r *response) WriteHeader(code int) { + if r.headerSent { + // Note: explicitly using Stderr, as Stdout is our HTTP output. + fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL) + return + } + r.headerSent = true + fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code)) + + // Set a default Content-Type + if _, hasType := r.header["Content-Type"]; !hasType { + r.header.Add("Content-Type", "text/html; charset=utf-8") + } + + r.header.Write(r.bufw) + r.bufw.WriteString("\r\n") + r.bufw.Flush() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go new file mode 100644 index 000000000..075d8411b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go @@ -0,0 +1,131 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for CGI (the child process perspective) + +package cgi + +import ( + "testing" +) + +func TestRequest(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "REQUEST_METHOD": "GET", + "HTTP_HOST": "example.com", + "HTTP_REFERER": "elsewhere", + "HTTP_USER_AGENT": "goclient", + "HTTP_FOO_BAR": "baz", + "REQUEST_URI": "/path?a=b", + "CONTENT_LENGTH": "123", + "CONTENT_TYPE": "text/xml", + "REMOTE_ADDR": "5.6.7.8", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if g, e := req.UserAgent(), "goclient"; e != g { + t.Errorf("expected UserAgent %q; got %q", e, g) + } + if g, e := req.Method, "GET"; e != g { + t.Errorf("expected Method %q; got %q", e, g) + } + if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g { + t.Errorf("expected Content-Type %q; got %q", e, g) + } + if g, e := req.ContentLength, int64(123); e != g { + t.Errorf("expected ContentLength %d; got %d", e, g) + } + if g, e := req.Referer(), "elsewhere"; e != g { + t.Errorf("expected Referer %q; got %q", e, g) + } + if req.Header == nil { + t.Fatalf("unexpected nil Header") + } + if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g { + t.Errorf("expected Foo-Bar %q; got %q", e, g) + } + if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g { + t.Errorf("expected URL %q; got %q", e, g) + } + if g, e := req.FormValue("a"), "b"; e != g { + t.Errorf("expected FormValue(a) %q; got %q", e, g) + } + if req.Trailer == nil { + t.Errorf("unexpected nil Trailer") + } + if req.TLS != nil { + t.Errorf("expected nil TLS") + } + if e, g := "5.6.7.8:0", req.RemoteAddr; e != g { + t.Errorf("RemoteAddr: got %q; want %q", g, e) + } +} + +func TestRequestWithTLS(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "REQUEST_METHOD": "GET", + "HTTP_HOST": "example.com", + "HTTP_REFERER": "elsewhere", + "REQUEST_URI": "/path?a=b", + "CONTENT_TYPE": "text/xml", + "HTTPS": "1", + "REMOTE_ADDR": "5.6.7.8", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if g, e := req.URL.String(), "https://example.com/path?a=b"; e != g { + t.Errorf("expected URL %q; got %q", e, g) + } + if req.TLS == nil { + t.Errorf("expected non-nil TLS") + } +} + +func TestRequestWithoutHost(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "HTTP_HOST": "", + "REQUEST_METHOD": "GET", + "REQUEST_URI": "/path?a=b", + "CONTENT_LENGTH": "123", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if req.URL == nil { + t.Fatalf("unexpected nil URL") + } + if g, e := req.URL.String(), "/path?a=b"; e != g { + t.Errorf("URL = %q; want %q", g, e) + } +} + +func TestRequestWithoutRequestURI(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "HTTP_HOST": "example.com", + "REQUEST_METHOD": "GET", + "SCRIPT_NAME": "/dir/scriptname", + "PATH_INFO": "/p1/p2", + "QUERY_STRING": "a=1&b=2", + "CONTENT_LENGTH": "123", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if req.URL == nil { + t.Fatalf("unexpected nil URL") + } + if g, e := req.URL.String(), "http://example.com/dir/scriptname/p1/p2?a=1&b=2"; e != g { + t.Errorf("URL = %q; want %q", g, e) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go new file mode 100644 index 000000000..ec95a972c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go @@ -0,0 +1,377 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the host side of CGI (being the webserver +// parent process). + +// Package cgi implements CGI (Common Gateway Interface) as specified +// in RFC 3875. +// +// Note that using CGI means starting a new process to handle each +// request, which is typically less efficient than using a +// long-running server. This package is intended primarily for +// compatibility with existing systems. +package cgi + +import ( + "bufio" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" +) + +var trailingPort = regexp.MustCompile(`:([0-9]+)$`) + +var osDefaultInheritEnv = map[string][]string{ + "darwin": {"DYLD_LIBRARY_PATH"}, + "freebsd": {"LD_LIBRARY_PATH"}, + "hpux": {"LD_LIBRARY_PATH", "SHLIB_PATH"}, + "irix": {"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"}, + "linux": {"LD_LIBRARY_PATH"}, + "openbsd": {"LD_LIBRARY_PATH"}, + "solaris": {"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"}, + "windows": {"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"}, +} + +// Handler runs an executable in a subprocess with a CGI environment. +type Handler struct { + Path string // path to the CGI executable + Root string // root URI prefix of handler or empty for "/" + + // Dir specifies the CGI executable's working directory. + // If Dir is empty, the base directory of Path is used. + // If Path has no base directory, the current working + // directory is used. + Dir string + + Env []string // extra environment variables to set, if any, as "key=value" + InheritEnv []string // environment variables to inherit from host, as "key" + Logger *log.Logger // optional log for errors or nil to use log.Print + Args []string // optional arguments to pass to child process + + // PathLocationHandler specifies the root http Handler that + // should handle internal redirects when the CGI process + // returns a Location header value starting with a "/", as + // specified in RFC 3875 § 6.3.2. This will likely be + // http.DefaultServeMux. + // + // If nil, a CGI response with a local URI path is instead sent + // back to the client and not redirected internally. + PathLocationHandler http.Handler +} + +// removeLeadingDuplicates remove leading duplicate in environments. +// It's possible to override environment like following. +// cgi.Handler{ +// ... +// Env: []string{"SCRIPT_FILENAME=foo.php"}, +// } +func removeLeadingDuplicates(env []string) (ret []string) { + n := len(env) + for i := 0; i < n; i++ { + e := env[i] + s := strings.SplitN(e, "=", 2)[0] + found := false + for j := i + 1; j < n; j++ { + if s == strings.SplitN(env[j], "=", 2)[0] { + found = true + break + } + } + if !found { + ret = append(ret, e) + } + } + return +} + +func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + root := h.Root + if root == "" { + root = "/" + } + + if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" { + rw.WriteHeader(http.StatusBadRequest) + rw.Write([]byte("Chunked request bodies are not supported by CGI.")) + return + } + + pathInfo := req.URL.Path + if root != "/" && strings.HasPrefix(pathInfo, root) { + pathInfo = pathInfo[len(root):] + } + + port := "80" + if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 { + port = matches[1] + } + + env := []string{ + "SERVER_SOFTWARE=go", + "SERVER_NAME=" + req.Host, + "SERVER_PROTOCOL=HTTP/1.1", + "HTTP_HOST=" + req.Host, + "GATEWAY_INTERFACE=CGI/1.1", + "REQUEST_METHOD=" + req.Method, + "QUERY_STRING=" + req.URL.RawQuery, + "REQUEST_URI=" + req.URL.RequestURI(), + "PATH_INFO=" + pathInfo, + "SCRIPT_NAME=" + root, + "SCRIPT_FILENAME=" + h.Path, + "REMOTE_ADDR=" + req.RemoteAddr, + "REMOTE_HOST=" + req.RemoteAddr, + "SERVER_PORT=" + port, + } + + if req.TLS != nil { + env = append(env, "HTTPS=on") + } + + for k, v := range req.Header { + k = strings.Map(upperCaseAndUnderscore, k) + joinStr := ", " + if k == "COOKIE" { + joinStr = "; " + } + env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr)) + } + + if req.ContentLength > 0 { + env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength)) + } + if ctype := req.Header.Get("Content-Type"); ctype != "" { + env = append(env, "CONTENT_TYPE="+ctype) + } + + if h.Env != nil { + env = append(env, h.Env...) + } + + envPath := os.Getenv("PATH") + if envPath == "" { + envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin" + } + env = append(env, "PATH="+envPath) + + for _, e := range h.InheritEnv { + if v := os.Getenv(e); v != "" { + env = append(env, e+"="+v) + } + } + + for _, e := range osDefaultInheritEnv[runtime.GOOS] { + if v := os.Getenv(e); v != "" { + env = append(env, e+"="+v) + } + } + + env = removeLeadingDuplicates(env) + + var cwd, path string + if h.Dir != "" { + path = h.Path + cwd = h.Dir + } else { + cwd, path = filepath.Split(h.Path) + } + if cwd == "" { + cwd = "." + } + + internalError := func(err error) { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("CGI error: %v", err) + } + + cmd := &exec.Cmd{ + Path: path, + Args: append([]string{h.Path}, h.Args...), + Dir: cwd, + Env: env, + Stderr: os.Stderr, // for now + } + if req.ContentLength != 0 { + cmd.Stdin = req.Body + } + stdoutRead, err := cmd.StdoutPipe() + if err != nil { + internalError(err) + return + } + + err = cmd.Start() + if err != nil { + internalError(err) + return + } + if hook := testHookStartProcess; hook != nil { + hook(cmd.Process) + } + defer cmd.Wait() + defer stdoutRead.Close() + + linebody := bufio.NewReaderSize(stdoutRead, 1024) + headers := make(http.Header) + statusCode := 0 + headerLines := 0 + sawBlankLine := false + for { + line, isPrefix, err := linebody.ReadLine() + if isPrefix { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: long header line from subprocess.") + return + } + if err == io.EOF { + break + } + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: error reading headers: %v", err) + return + } + if len(line) == 0 { + sawBlankLine = true + break + } + headerLines++ + parts := strings.SplitN(string(line), ":", 2) + if len(parts) < 2 { + h.printf("cgi: bogus header line: %s", string(line)) + continue + } + header, val := parts[0], parts[1] + header = strings.TrimSpace(header) + val = strings.TrimSpace(val) + switch { + case header == "Status": + if len(val) < 3 { + h.printf("cgi: bogus status (short): %q", val) + return + } + code, err := strconv.Atoi(val[0:3]) + if err != nil { + h.printf("cgi: bogus status: %q", val) + h.printf("cgi: line was %q", line) + return + } + statusCode = code + default: + headers.Add(header, val) + } + } + if headerLines == 0 || !sawBlankLine { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: no headers") + return + } + + if loc := headers.Get("Location"); loc != "" { + if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil { + h.handleInternalRedirect(rw, req, loc) + return + } + if statusCode == 0 { + statusCode = http.StatusFound + } + } + + if statusCode == 0 && headers.Get("Content-Type") == "" { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: missing required Content-Type in headers") + return + } + + if statusCode == 0 { + statusCode = http.StatusOK + } + + // Copy headers to rw's headers, after we've decided not to + // go into handleInternalRedirect, which won't want its rw + // headers to have been touched. + for k, vv := range headers { + for _, v := range vv { + rw.Header().Add(k, v) + } + } + + rw.WriteHeader(statusCode) + + _, err = io.Copy(rw, linebody) + if err != nil { + h.printf("cgi: copy error: %v", err) + // And kill the child CGI process so we don't hang on + // the deferred cmd.Wait above if the error was just + // the client (rw) going away. If it was a read error + // (because the child died itself), then the extra + // kill of an already-dead process is harmless (the PID + // won't be reused until the Wait above). + cmd.Process.Kill() + } +} + +func (h *Handler) printf(format string, v ...interface{}) { + if h.Logger != nil { + h.Logger.Printf(format, v...) + } else { + log.Printf(format, v...) + } +} + +func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) { + url, err := req.URL.Parse(path) + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: error resolving local URI path %q: %v", path, err) + return + } + // TODO: RFC 3875 isn't clear if only GET is supported, but it + // suggests so: "Note that any message-body attached to the + // request (such as for a POST request) may not be available + // to the resource that is the target of the redirect." We + // should do some tests against Apache to see how it handles + // POST, HEAD, etc. Does the internal redirect get the same + // method or just GET? What about incoming headers? + // (e.g. Cookies) Which headers, if any, are copied into the + // second request? + newReq := &http.Request{ + Method: "GET", + URL: url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: url.Host, + RemoteAddr: req.RemoteAddr, + TLS: req.TLS, + } + h.PathLocationHandler.ServeHTTP(rw, newReq) +} + +func upperCaseAndUnderscore(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + return r - ('a' - 'A') + case r == '-': + return '_' + case r == '=': + // Maybe not part of the CGI 'spec' but would mess up + // the environment in any case, as Go represents the + // environment as a slice of "key=value" strings. + return '_' + } + // TODO: other transformations in spec or practice? + return r +} + +var testHookStartProcess func(*os.Process) // nil except for some tests diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go new file mode 100644 index 000000000..8c16e6897 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go @@ -0,0 +1,461 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for package cgi + +package cgi + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +func newRequest(httpreq string) *http.Request { + buf := bufio.NewReader(strings.NewReader(httpreq)) + req, err := http.ReadRequest(buf) + if err != nil { + panic("cgi: bogus http request in test: " + httpreq) + } + req.RemoteAddr = "1.2.3.4" + return req +} + +func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder { + rw := httptest.NewRecorder() + req := newRequest(httpreq) + h.ServeHTTP(rw, req) + + // Make a map to hold the test map that the CGI returns. + m := make(map[string]string) + m["_body"] = rw.Body.String() + linesRead := 0 +readlines: + for { + line, err := rw.Body.ReadString('\n') + switch { + case err == io.EOF: + break readlines + case err != nil: + t.Fatalf("unexpected error reading from CGI: %v", err) + } + linesRead++ + trimmedLine := strings.TrimRight(line, "\r\n") + split := strings.SplitN(trimmedLine, "=", 2) + if len(split) != 2 { + t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v", + len(split), linesRead, line, m) + } + m[split[0]] = split[1] + } + + for key, expected := range expectedMap { + got := m[key] + if key == "cwd" { + // For Windows. golang.org/issue/4645. + fi1, _ := os.Stat(got) + fi2, _ := os.Stat(expected) + if os.SameFile(fi1, fi2) { + got = expected + } + } + if got != expected { + t.Errorf("for key %q got %q; expected %q", key, got, expected) + } + } + return rw +} + +var cgiTested, cgiWorks bool + +func check(t *testing.T) { + if !cgiTested { + cgiTested = true + cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil + } + if !cgiWorks { + // No Perl on Windows, needed by test.cgi + // TODO: make the child process be Go, not Perl. + t.Skip("Skipping test: test.cgi failed.") + } +} + +func TestCGIBasicGet(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "test": "Hello CGI", + "param-a": "b", + "param-foo": "bar", + "env-GATEWAY_INTERFACE": "CGI/1.1", + "env-HTTP_HOST": "example.com", + "env-PATH_INFO": "", + "env-QUERY_STRING": "foo=bar&a=b", + "env-REMOTE_ADDR": "1.2.3.4", + "env-REMOTE_HOST": "1.2.3.4", + "env-REQUEST_METHOD": "GET", + "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + "env-SERVER_NAME": "example.com", + "env-SERVER_PORT": "80", + "env-SERVER_SOFTWARE": "go", + } + replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) + + if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected { + t.Errorf("got a Content-Type of %q; expected %q", got, expected) + } + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +func TestCGIBasicGetAbsPath(t *testing.T) { + check(t) + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("getwd error: %v", err) + } + h := &Handler{ + Path: pwd + "/testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", + "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + } + runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestPathInfo(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "param-a": "b", + "env-PATH_INFO": "/extrapath", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/test.cgi/extrapath?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + } + runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestPathInfoDirRoot(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/myscript/", + } + expectedMap := map[string]string{ + "env-PATH_INFO": "bar", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/myscript/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/myscript/", + } + runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestDupHeaders(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + } + expectedMap := map[string]string{ + "env-REQUEST_URI": "/myscript/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-HTTP_COOKIE": "nom=NOM; yum=YUM", + "env-HTTP_X_FOO": "val1, val2", + } + runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+ + "Cookie: nom=NOM\n"+ + "Cookie: yum=YUM\n"+ + "X-Foo: val1\n"+ + "X-Foo: val2\n"+ + "Host: example.com\n\n", + expectedMap) +} + +func TestPathInfoNoRoot(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "", + } + expectedMap := map[string]string{ + "env-PATH_INFO": "/bar", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/", + } + runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestCGIBasicPost(t *testing.T) { + check(t) + postReq := `POST /test.cgi?a=b HTTP/1.0 +Host: example.com +Content-Type: application/x-www-form-urlencoded +Content-Length: 15 + +postfoo=postbar` + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "test": "Hello CGI", + "param-postfoo": "postbar", + "env-REQUEST_METHOD": "POST", + "env-CONTENT_LENGTH": "15", + "env-REQUEST_URI": "/test.cgi?a=b", + } + runCgiTest(t, h, postReq, expectedMap) +} + +func chunk(s string) string { + return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) +} + +// The CGI spec doesn't allow chunked requests. +func TestCGIPostChunked(t *testing.T) { + check(t) + postReq := `POST /test.cgi?a=b HTTP/1.1 +Host: example.com +Content-Type: application/x-www-form-urlencoded +Transfer-Encoding: chunked + +` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("") + + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{} + resp := runCgiTest(t, h, postReq, expectedMap) + if got, expected := resp.Code, http.StatusBadRequest; got != expected { + t.Fatalf("Expected %v response code from chunked request body; got %d", + expected, got) + } +} + +func TestRedirect(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil) + if e, g := 302, rec.Code; e != g { + t.Errorf("expected status code %d; got %d", e, g) + } + if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g { + t.Errorf("expected Location header of %q; got %q", e, g) + } +} + +func TestInternalRedirect(t *testing.T) { + check(t) + baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path) + fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr) + }) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + PathLocationHandler: baseHandler, + } + expectedMap := map[string]string{ + "basepath": "/foo", + "remoteaddr": "1.2.3.4", + } + runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +// TestCopyError tests that we kill the process if there's an error copying +// its output. (for example, from the client having gone away) +func TestCopyError(t *testing.T) { + check(t) + if runtime.GOOS == "windows" { + t.Skipf("skipping test on %q", runtime.GOOS) + } + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + ts := httptest.NewServer(h) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil) + err = req.Write(conn) + if err != nil { + t.Fatalf("Write: %v", err) + } + + res, err := http.ReadResponse(bufio.NewReader(conn), req) + if err != nil { + t.Fatalf("ReadResponse: %v", err) + } + + pidstr := res.Header.Get("X-CGI-Pid") + if pidstr == "" { + t.Fatalf("expected an X-CGI-Pid header in response") + } + pid, err := strconv.Atoi(pidstr) + if err != nil { + t.Fatalf("invalid X-CGI-Pid value") + } + + var buf [5000]byte + n, err := io.ReadFull(res.Body, buf[:]) + if err != nil { + t.Fatalf("ReadFull: %d bytes, %v", n, err) + } + + childRunning := func() bool { + return isProcessRunning(t, pid) + } + + if !childRunning() { + t.Fatalf("pre-conn.Close, expected child to be running") + } + conn.Close() + + tries := 0 + for tries < 25 && childRunning() { + time.Sleep(50 * time.Millisecond * time.Duration(tries)) + tries++ + } + if childRunning() { + t.Fatalf("post-conn.Close, expected child to be gone") + } +} + +func TestDirUnix(t *testing.T) { + check(t) + if runtime.GOOS == "windows" { + t.Skipf("skipping test on %q", runtime.GOOS) + } + cwd, _ := os.Getwd() + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + Dir: cwd, + } + expectedMap := map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) + + cwd, _ = os.Getwd() + cwd = filepath.Join(cwd, "testdata") + h = &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap = map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestDirWindows(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping windows specific test.") + } + + cgifile, _ := filepath.Abs("testdata/test.cgi") + + var perl string + var err error + perl, err = exec.LookPath("perl") + if err != nil { + t.Skip("Skipping test: perl not found.") + } + perl, _ = filepath.Abs(perl) + + cwd, _ := os.Getwd() + h := &Handler{ + Path: perl, + Root: "/test.cgi", + Dir: cwd, + Args: []string{cgifile}, + Env: []string{"SCRIPT_FILENAME=" + cgifile}, + } + expectedMap := map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) + + // If not specify Dir on windows, working directory should be + // base directory of perl. + cwd, _ = filepath.Split(perl) + if cwd != "" && cwd[len(cwd)-1] == filepath.Separator { + cwd = cwd[:len(cwd)-1] + } + h = &Handler{ + Path: perl, + Root: "/test.cgi", + Args: []string{cgifile}, + Env: []string{"SCRIPT_FILENAME=" + cgifile}, + } + expectedMap = map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestEnvOverride(t *testing.T) { + cgifile, _ := filepath.Abs("testdata/test.cgi") + + var perl string + var err error + perl, err = exec.LookPath("perl") + if err != nil { + t.Skipf("Skipping test: perl not found.") + } + perl, _ = filepath.Abs(perl) + + cwd, _ := os.Getwd() + h := &Handler{ + Path: perl, + Root: "/test.cgi", + Dir: cwd, + Args: []string{cgifile}, + Env: []string{ + "SCRIPT_FILENAME=" + cgifile, + "REQUEST_URI=/foo/bar"}, + } + expectedMap := map[string]string{ + "cwd": cwd, + "env-SCRIPT_FILENAME": cgifile, + "env-REQUEST_URI": "/foo/bar", + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go new file mode 100644 index 000000000..18c4803e7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go @@ -0,0 +1,228 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests a Go CGI program running under a Go CGI host process. +// Further, the two programs are the same binary, just checking +// their environment to figure out what mode to run in. + +package cgi + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "runtime" + "testing" + "time" +) + +// This test is a CGI host (testing host.go) that runs its own binary +// as a child process testing the other half of CGI (child.go). +func TestHostingOurselves(t *testing.T) { + if runtime.GOOS == "nacl" { + t.Skip("skipping on nacl") + } + + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "test": "Hello CGI-in-CGI", + "param-a": "b", + "param-foo": "bar", + "env-GATEWAY_INTERFACE": "CGI/1.1", + "env-HTTP_HOST": "example.com", + "env-PATH_INFO": "", + "env-QUERY_STRING": "foo=bar&a=b", + "env-REMOTE_ADDR": "1.2.3.4", + "env-REMOTE_HOST": "1.2.3.4", + "env-REQUEST_METHOD": "GET", + "env-REQUEST_URI": "/test.go?foo=bar&a=b", + "env-SCRIPT_FILENAME": os.Args[0], + "env-SCRIPT_NAME": "/test.go", + "env-SERVER_NAME": "example.com", + "env-SERVER_PORT": "80", + "env-SERVER_SOFTWARE": "go", + } + replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) + + if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected { + t.Errorf("got a Content-Type of %q; expected %q", got, expected) + } + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +type customWriterRecorder struct { + w io.Writer + *httptest.ResponseRecorder +} + +func (r *customWriterRecorder) Write(p []byte) (n int, err error) { + return r.w.Write(p) +} + +type limitWriter struct { + w io.Writer + n int +} + +func (w *limitWriter) Write(p []byte) (n int, err error) { + if len(p) > w.n { + p = p[:w.n] + } + if len(p) > 0 { + n, err = w.w.Write(p) + w.n -= n + } + if w.n == 0 { + err = errors.New("past write limit") + } + return +} + +// If there's an error copying the child's output to the parent, test +// that we kill the child. +func TestKillChildAfterCopyError(t *testing.T) { + if runtime.GOOS == "nacl" { + t.Skip("skipping on nacl") + } + + defer func() { testHookStartProcess = nil }() + proc := make(chan *os.Process, 1) + testHookStartProcess = func(p *os.Process) { + proc <- p + } + + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + req, _ := http.NewRequest("GET", "http://example.com/test.cgi?write-forever=1", nil) + rec := httptest.NewRecorder() + var out bytes.Buffer + const writeLen = 50 << 10 + rw := &customWriterRecorder{&limitWriter{&out, writeLen}, rec} + + donec := make(chan bool, 1) + go func() { + h.ServeHTTP(rw, req) + donec <- true + }() + + select { + case <-donec: + if out.Len() != writeLen || out.Bytes()[0] != 'a' { + t.Errorf("unexpected output: %q", out.Bytes()) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout. ServeHTTP hung and didn't kill the child process?") + select { + case p := <-proc: + p.Kill() + t.Logf("killed process") + default: + t.Logf("didn't kill process") + } + } +} + +// Test that a child handler writing only headers works. +// golang.org/issue/7196 +func TestChildOnlyHeaders(t *testing.T) { + if runtime.GOOS == "nacl" { + t.Skip("skipping on nacl") + } + + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "_body": "", + } + replay := runCgiTest(t, h, "GET /test.go?no-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap) + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +// golang.org/issue/7198 +func Test500WithNoHeaders(t *testing.T) { want500Test(t, "/immediate-disconnect") } +func Test500WithNoContentType(t *testing.T) { want500Test(t, "/no-content-type") } +func Test500WithEmptyHeaders(t *testing.T) { want500Test(t, "/empty-headers") } + +func want500Test(t *testing.T, path string) { + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "_body": "", + } + replay := runCgiTest(t, h, "GET "+path+" HTTP/1.0\nHost: example.com\n\n", expectedMap) + if replay.Code != 500 { + t.Errorf("Got code %d; want 500", replay.Code) + } +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// Note: not actually a test. +func TestBeChildCGIProcess(t *testing.T) { + if os.Getenv("REQUEST_METHOD") == "" { + // Not in a CGI environment; skipping test. + return + } + switch os.Getenv("REQUEST_URI") { + case "/immediate-disconnect": + os.Exit(0) + case "/no-content-type": + fmt.Printf("Content-Length: 6\n\nHello\n") + os.Exit(0) + case "/empty-headers": + fmt.Printf("\nHello") + os.Exit(0) + } + Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("X-Test-Header", "X-Test-Value") + req.ParseForm() + if req.FormValue("no-body") == "1" { + return + } + if req.FormValue("write-forever") == "1" { + io.Copy(rw, neverEnding('a')) + for { + time.Sleep(5 * time.Second) // hang forever, until killed + } + } + fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n") + for k, vv := range req.Form { + for _, v := range vv { + fmt.Fprintf(rw, "param-%s=%s\n", k, v) + } + } + for _, kv := range os.Environ() { + fmt.Fprintf(rw, "env-%s\n", kv) + } + })) + os.Exit(0) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go new file mode 100644 index 000000000..c8235831b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go @@ -0,0 +1,18 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package cgi + +import ( + "os" + "strconv" + "testing" +) + +func isProcessRunning(t *testing.T, pid int) bool { + _, err := os.Stat("/proc/" + strconv.Itoa(pid)) + return err == nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go new file mode 100644 index 000000000..5ff9e7d5e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go @@ -0,0 +1,21 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package cgi + +import ( + "os" + "syscall" + "testing" +) + +func isProcessRunning(t *testing.T, pid int) bool { + p, err := os.FindProcess(pid) + if err != nil { + return false + } + return p.Signal(syscall.Signal(0)) == nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi new file mode 100644 index 000000000..3214df6f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi @@ -0,0 +1,91 @@ +#!/usr/bin/perl +# Copyright 2011 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Test script run as a child process under cgi_test.go + +use strict; +use Cwd; + +binmode STDOUT; + +my $q = MiniCGI->new; +my $params = $q->Vars; + +if ($params->{"loc"}) { + print "Location: $params->{loc}\r\n\r\n"; + exit(0); +} + +print "Content-Type: text/html\r\n"; +print "X-CGI-Pid: $$\r\n"; +print "X-Test-Header: X-Test-Value\r\n"; +print "\r\n"; + +if ($params->{"bigresponse"}) { + # 17 MB, for OS X: golang.org/issue/4958 + for (1..(17 * 1024)) { + print "A" x 1024, "\r\n"; + } + exit 0; +} + +print "test=Hello CGI\r\n"; + +foreach my $k (sort keys %$params) { + print "param-$k=$params->{$k}\r\n"; +} + +foreach my $k (sort keys %ENV) { + my $clean_env = $ENV{$k}; + $clean_env =~ s/[\n\r]//g; + print "env-$k=$clean_env\r\n"; +} + +# NOTE: msys perl returns /c/go/src/... not C:\go\.... +my $dir = getcwd(); +if ($^O eq 'MSWin32' || $^O eq 'msys') { + if ($dir =~ /^.:/) { + $dir =~ s!/!\\!g; + } else { + my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe'; + $cmd =~ s!\\!/!g; + $dir = `$cmd /c cd`; + chomp $dir; + } +} +print "cwd=$dir\r\n"; + +# A minimal version of CGI.pm, for people without the perl-modules +# package installed. (CGI.pm used to be part of the Perl core, but +# some distros now bundle perl-base and perl-modules separately...) +package MiniCGI; + +sub new { + my $class = shift; + return bless {}, $class; +} + +sub Vars { + my $self = shift; + my $pairs; + if ($ENV{CONTENT_LENGTH}) { + $pairs = do { local $/; }; + } else { + $pairs = $ENV{QUERY_STRING}; + } + my $vars = {}; + foreach my $kv (split(/&/, $pairs)) { + my ($k, $v) = split(/=/, $kv, 2); + $vars->{_urldecode($k)} = _urldecode($v); + } + return $vars; +} + +sub _urldecode { + my $v = shift; + $v =~ tr/+/ /; + $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg; + return $v; +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked.go new file mode 100644 index 000000000..749f29d32 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked.go @@ -0,0 +1,203 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The wire protocol for HTTP's "chunked" Transfer-Encoding. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package http + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +const maxLineLength = 4096 // assumed <= bufio.defaultBufSize + +var ErrLineTooLong = errors.New("header line too long") + +// newChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// newChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func newChunkedReader(r io.Reader) io.Reader { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + return &chunkedReader{r: br} +} + +type chunkedReader struct { + r *bufio.Reader + n uint64 // unread bytes in chunk + err error + buf [2]byte +} + +func (cr *chunkedReader) beginChunk() { + // chunk-size CRLF + var line []byte + line, cr.err = readLine(cr.r) + if cr.err != nil { + return + } + cr.n, cr.err = parseHexUint(line) + if cr.err != nil { + return + } + if cr.n == 0 { + cr.err = io.EOF + } +} + +func (cr *chunkedReader) chunkHeaderAvailable() bool { + n := cr.r.Buffered() + if n > 0 { + peek, _ := cr.r.Peek(n) + return bytes.IndexByte(peek, '\n') >= 0 + } + return false +} + +func (cr *chunkedReader) Read(b []uint8) (n int, err error) { + for cr.err == nil { + if cr.n == 0 { + if n > 0 && !cr.chunkHeaderAvailable() { + // We've read enough. Don't potentially block + // reading a new chunk header. + break + } + cr.beginChunk() + continue + } + if len(b) == 0 { + break + } + rbuf := b + if uint64(len(rbuf)) > cr.n { + rbuf = rbuf[:cr.n] + } + var n0 int + n0, cr.err = cr.r.Read(rbuf) + n += n0 + b = b[n0:] + cr.n -= uint64(n0) + // If we're at the end of a chunk, read the next two + // bytes to verify they are "\r\n". + if cr.n == 0 && cr.err == nil { + if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { + if cr.buf[0] != '\r' || cr.buf[1] != '\n' { + cr.err = errors.New("malformed chunked encoding") + } + } + } + } + return n, cr.err +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are a pointer into storage in +// the bufio, so they are only valid until the next bufio read. +func readLine(b *bufio.Reader) (p []byte, err error) { + if p, err = b.ReadSlice('\n'); err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if err == bufio.ErrBufferFull { + err = ErrLineTooLong + } + return nil, err + } + if len(p) >= maxLineLength { + return nil, ErrLineTooLong + } + return trimTrailingWhitespace(p), nil +} + +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + return b +} + +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// newChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using newChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func newChunkedWriter(w io.Writer) io.WriteCloser { + return &chunkedWriter{w} +} + +// Writing to chunkedWriter translates to writing in HTTP chunked Transfer +// Encoding wire format to the underlying Wire chunkedWriter. +type chunkedWriter struct { + Wire io.Writer +} + +// Write the contents of data as one chunk to Wire. +// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has +// a bug since it does not check for success of io.WriteString +func (cw *chunkedWriter) Write(data []byte) (n int, err error) { + + // Don't send 0-length data. It looks like EOF for chunked encoding. + if len(data) == 0 { + return 0, nil + } + + if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { + return 0, err + } + if n, err = cw.Wire.Write(data); err != nil { + return + } + if n != len(data) { + err = io.ErrShortWrite + return + } + _, err = io.WriteString(cw.Wire, "\r\n") + + return +} + +func (cw *chunkedWriter) Close() error { + _, err := io.WriteString(cw.Wire, "0\r\n") + return err +} + +func parseHexUint(v []byte) (n uint64, err error) { + for _, b := range v { + n <<= 4 + switch { + case '0' <= b && b <= '9': + b = b - '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + n |= uint64(b) + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked_test.go new file mode 100644 index 000000000..34544790a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/chunked_test.go @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package http + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestChunk(t *testing.T) { + var b bytes.Buffer + + w := newChunkedWriter(&b) + const chunk1 = "hello, " + const chunk2 = "world! 0123456789abcdef" + w.Write([]byte(chunk1)) + w.Write([]byte(chunk2)) + w.Close() + + if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e { + t.Fatalf("chunk writer wrote %q; want %q", g, e) + } + + r := newChunkedReader(&b) + data, err := ioutil.ReadAll(r) + if err != nil { + t.Logf(`data: "%s"`, data) + t.Fatalf("ReadAll from reader: %v", err) + } + if g, e := string(data), chunk1+chunk2; g != e { + t.Errorf("chunk reader read %q; want %q", g, e) + } +} + +func TestChunkReadMultiple(t *testing.T) { + // Bunch of small chunks, all read together. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + w.Write([]byte("foo")) + w.Write([]byte("bar")) + w.Close() + + r := newChunkedReader(&b) + buf := make([]byte, 10) + n, err := r.Read(buf) + if n != 6 || err != io.EOF { + t.Errorf("Read = %d, %v; want 6, EOF", n, err) + } + buf = buf[:n] + if string(buf) != "foobar" { + t.Errorf("Read = %q; want %q", buf, "foobar") + } + } + + // One big chunk followed by a little chunk, but the small bufio.Reader size + // should prevent the second chunk header from being read. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, + // the same as the bufio ReaderSize below (the minimum), so even + // though we're going to try to Read with a buffer larger enough to also + // receive "foo", the second chunk header won't be read yet. + const fillBufChunk = "0123456789a" + const shortChunk = "foo" + w.Write([]byte(fillBufChunk)) + w.Write([]byte(shortChunk)) + w.Close() + + r := newChunkedReader(bufio.NewReaderSize(&b, 16)) + buf := make([]byte, len(fillBufChunk)+len(shortChunk)) + n, err := r.Read(buf) + if n != len(fillBufChunk) || err != nil { + t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) + } + buf = buf[:n] + if string(buf) != fillBufChunk { + t.Errorf("Read = %q; want %q", buf, fillBufChunk) + } + + n, err = r.Read(buf) + if n != len(shortChunk) || err != io.EOF { + t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) + } + } + + // And test that we see an EOF chunk, even though our buffer is already full: + { + r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) + buf := make([]byte, 3) + n, err := r.Read(buf) + if n != 3 || err != io.EOF { + t.Errorf("Read = %d, %v; want 3, EOF", n, err) + } + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + } +} + +func TestChunkReaderAllocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + var buf bytes.Buffer + w := newChunkedWriter(&buf) + a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") + w.Write(a) + w.Write(b) + w.Write(c) + w.Close() + + readBuf := make([]byte, len(a)+len(b)+len(c)+1) + byter := bytes.NewReader(buf.Bytes()) + bufr := bufio.NewReader(byter) + mallocs := testing.AllocsPerRun(100, func() { + byter.Seek(0, 0) + bufr.Reset(byter) + r := newChunkedReader(bufr) + n, err := io.ReadFull(r, readBuf) + if n != len(readBuf)-1 { + t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) + } + if err != io.ErrUnexpectedEOF { + t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) + } + }) + if mallocs > 1.5 { + t.Errorf("mallocs = %v; want 1", mallocs) + } +} + +func TestParseHexUint(t *testing.T) { + for i := uint64(0); i <= 1234; i++ { + line := []byte(fmt.Sprintf("%x", i)) + got, err := parseHexUint(line) + if err != nil { + t.Fatalf("on %d: %v", i, err) + } + if got != i { + t.Errorf("for input %q = %d; want %d", line, got, i) + } + } + _, err := parseHexUint([]byte("bogus")) + if err == nil { + t.Error("expected error on bogus input") + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/client.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/client.go new file mode 100644 index 000000000..a5a3abe61 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/client.go @@ -0,0 +1,487 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP client. See RFC 2616. +// +// This is the high-level Client interface. +// The low-level implementation is in transport.go. + +package http + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/url" + "strings" + "sync" + "time" +) + +// A Client is an HTTP client. Its zero value (DefaultClient) is a +// usable client that uses DefaultTransport. +// +// The Client's Transport typically has internal state (cached TCP +// connections), so Clients should be reused instead of created as +// needed. Clients are safe for concurrent use by multiple goroutines. +// +// A Client is higher-level than a RoundTripper (such as Transport) +// and additionally handles HTTP details such as cookies and +// redirects. +type Client struct { + // Transport specifies the mechanism by which individual + // HTTP requests are made. + // If nil, DefaultTransport is used. + Transport RoundTripper + + // CheckRedirect specifies the policy for handling redirects. + // If CheckRedirect is not nil, the client calls it before + // following an HTTP redirect. The arguments req and via are + // the upcoming request and the requests made already, oldest + // first. If CheckRedirect returns an error, the Client's Get + // method returns both the previous Response and + // CheckRedirect's error (wrapped in a url.Error) instead of + // issuing the Request req. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect func(req *Request, via []*Request) error + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar CookieJar + + // Timeout specifies a time limit for requests made by this + // Client. The timeout includes connection time, any + // redirects, and reading the response body. The timer remains + // running after Get, Head, Post, or Do return and will + // interrupt reading of the Response.Body. + // + // A Timeout of zero means no timeout. + // + // The Client's Transport must support the CancelRequest + // method or Client will return errors when attempting to make + // a request with Get, Head, Post, or Do. Client's default + // Transport (DefaultTransport) supports CancelRequest. + Timeout time.Duration +} + +// DefaultClient is the default Client and is used by Get, Head, and Post. +var DefaultClient = &Client{} + +// RoundTripper is an interface representing the ability to execute a +// single HTTP transaction, obtaining the Response for a given Request. +// +// A RoundTripper must be safe for concurrent use by multiple +// goroutines. +type RoundTripper interface { + // RoundTrip executes a single HTTP transaction, returning + // the Response for the request req. RoundTrip should not + // attempt to interpret the response. In particular, + // RoundTrip must return err == nil if it obtained a response, + // regardless of the response's HTTP status code. A non-nil + // err should be reserved for failure to obtain a response. + // Similarly, RoundTrip should not attempt to handle + // higher-level protocol details such as redirects, + // authentication, or cookies. + // + // RoundTrip should not modify the request, except for + // consuming and closing the Body, including on errors. The + // request's URL and Header fields are guaranteed to be + // initialized. + RoundTrip(*Request) (*Response, error) +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// Used in Send to implement io.ReadCloser by bundling together the +// bufio.Reader through which we read the response, and the underlying +// network connection. +type readClose struct { + io.Reader + io.Closer +} + +func (c *Client) send(req *Request) (*Response, error) { + if c.Jar != nil { + for _, cookie := range c.Jar.Cookies(req.URL) { + req.AddCookie(cookie) + } + } + resp, err := send(req, c.transport()) + if err != nil { + return nil, err + } + if c.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + c.Jar.SetCookies(req.URL, rc) + } + } + return resp, err +} + +// Do sends an HTTP request and returns an HTTP response, following +// policy (e.g. redirects, cookies, auth) as configured on the client. +// +// An error is returned if caused by client policy (such as +// CheckRedirect), or if there was an HTTP protocol error. +// A non-2xx response doesn't cause an error. +// +// When err is nil, resp always contains a non-nil resp.Body. +// +// Callers should close resp.Body when done reading from it. If +// resp.Body is not closed, the Client's underlying RoundTripper +// (typically Transport) may not be able to re-use a persistent TCP +// connection to the server for a subsequent "keep-alive" request. +// +// The request Body, if non-nil, will be closed by the underlying +// Transport, even on errors. +// +// Generally Get, Post, or PostForm will be used instead of Do. +func (c *Client) Do(req *Request) (resp *Response, err error) { + if req.Method == "GET" || req.Method == "HEAD" { + return c.doFollowingRedirects(req, shouldRedirectGet) + } + if req.Method == "POST" || req.Method == "PUT" { + return c.doFollowingRedirects(req, shouldRedirectPost) + } + return c.send(req) +} + +func (c *Client) transport() RoundTripper { + if c.Transport != nil { + return c.Transport + } + return DefaultTransport +} + +// send issues an HTTP request. +// Caller should close resp.Body when done reading from it. +func send(req *Request, t RoundTripper) (resp *Response, err error) { + if t == nil { + req.closeBody() + return nil, errors.New("http: no Client.Transport or DefaultTransport") + } + + if req.URL == nil { + req.closeBody() + return nil, errors.New("http: nil Request.URL") + } + + if req.RequestURI != "" { + req.closeBody() + return nil, errors.New("http: Request.RequestURI can't be set in client requests.") + } + + // Most the callers of send (Get, Post, et al) don't need + // Headers, leaving it uninitialized. We guarantee to the + // Transport that this has been initialized, though. + if req.Header == nil { + req.Header = make(Header) + } + + if u := req.URL.User; u != nil { + username := u.Username() + password, _ := u.Password() + req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) + } + resp, err = t.RoundTrip(req) + if err != nil { + if resp != nil { + log.Printf("RoundTripper returned a response & error; ignoring response") + } + return nil, err + } + return resp, nil +} + +// See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt +// "To receive authorization, the client sends the userid and password, +// separated by a single colon (":") character, within a base64 +// encoded string in the credentials." +// It is not meant to be urlencoded. +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +// True if the specified HTTP status code is one for which the Get utility should +// automatically redirect. +func shouldRedirectGet(statusCode int) bool { + switch statusCode { + case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect: + return true + } + return false +} + +// True if the specified HTTP status code is one for which the Post utility should +// automatically redirect. +func shouldRedirectPost(statusCode int) bool { + switch statusCode { + case StatusFound, StatusSeeOther: + return true + } + return false +} + +// Get issues a GET to the specified URL. If the response is one of the following +// redirect codes, Get follows the redirect, up to a maximum of 10 redirects: +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +// +// An error is returned if there were too many redirects or if there +// was an HTTP protocol error. A non-2xx response doesn't cause an +// error. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +// +// Get is a wrapper around DefaultClient.Get. +func Get(url string) (resp *Response, err error) { + return DefaultClient.Get(url) +} + +// Get issues a GET to the specified URL. If the response is one of the +// following redirect codes, Get follows the redirect after calling the +// Client's CheckRedirect function. +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +// +// An error is returned if the Client's CheckRedirect function fails +// or if there was an HTTP protocol error. A non-2xx response doesn't +// cause an error. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +func (c *Client) Get(url string) (resp *Response, err error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.doFollowingRedirects(req, shouldRedirectGet) +} + +func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) { + var base *url.URL + redirectChecker := c.CheckRedirect + if redirectChecker == nil { + redirectChecker = defaultCheckRedirect + } + var via []*Request + + if ireq.URL == nil { + ireq.closeBody() + return nil, errors.New("http: nil Request.URL") + } + + var reqmu sync.Mutex // guards req + req := ireq + + var timer *time.Timer + if c.Timeout > 0 { + type canceler interface { + CancelRequest(*Request) + } + tr, ok := c.transport().(canceler) + if !ok { + return nil, fmt.Errorf("net/http: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", c.transport()) + } + timer = time.AfterFunc(c.Timeout, func() { + reqmu.Lock() + defer reqmu.Unlock() + tr.CancelRequest(req) + }) + } + + urlStr := "" // next relative or absolute URL to fetch (after first request) + redirectFailed := false + for redirect := 0; ; redirect++ { + if redirect != 0 { + nreq := new(Request) + nreq.Method = ireq.Method + if ireq.Method == "POST" || ireq.Method == "PUT" { + nreq.Method = "GET" + } + nreq.Header = make(Header) + nreq.URL, err = base.Parse(urlStr) + if err != nil { + break + } + if len(via) > 0 { + // Add the Referer header. + lastReq := via[len(via)-1] + if lastReq.URL.Scheme != "https" { + nreq.Header.Set("Referer", lastReq.URL.String()) + } + + err = redirectChecker(nreq, via) + if err != nil { + redirectFailed = true + break + } + } + reqmu.Lock() + req = nreq + reqmu.Unlock() + } + + urlStr = req.URL.String() + if resp, err = c.send(req); err != nil { + break + } + + if shouldRedirect(resp.StatusCode) { + // Read the body if small so underlying TCP connection will be re-used. + // No need to check for errors: if it fails, Transport won't reuse it anyway. + const maxBodySlurpSize = 2 << 10 + if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { + io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize) + } + resp.Body.Close() + if urlStr = resp.Header.Get("Location"); urlStr == "" { + err = errors.New(fmt.Sprintf("%d response missing Location header", resp.StatusCode)) + break + } + base = req.URL + via = append(via, req) + continue + } + if timer != nil { + resp.Body = &cancelTimerBody{timer, resp.Body} + } + return resp, nil + } + + method := ireq.Method + urlErr := &url.Error{ + Op: method[0:1] + strings.ToLower(method[1:]), + URL: urlStr, + Err: err, + } + + if redirectFailed { + // Special case for Go 1 compatibility: return both the response + // and an error if the CheckRedirect function failed. + // See http://golang.org/issue/3795 + return resp, urlErr + } + + if resp != nil { + resp.Body.Close() + } + return nil, urlErr +} + +func defaultCheckRedirect(req *Request, via []*Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + return nil +} + +// Post issues a POST to the specified URL. +// +// Caller should close resp.Body when done reading from it. +// +// Post is a wrapper around DefaultClient.Post +func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { + return DefaultClient.Post(url, bodyType, body) +} + +// Post issues a POST to the specified URL. +// +// Caller should close resp.Body when done reading from it. +// +// If the provided body is also an io.Closer, it is closed after the +// request. +func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.doFollowingRedirects(req, shouldRedirectPost) +} + +// PostForm issues a POST to the specified URL, with data's keys and +// values URL-encoded as the request body. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +// +// PostForm is a wrapper around DefaultClient.PostForm +func PostForm(url string, data url.Values) (resp *Response, err error) { + return DefaultClient.PostForm(url, data) +} + +// PostForm issues a POST to the specified URL, +// with data's keys and values urlencoded as the request body. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// Head issues a HEAD to the specified URL. If the response is one of the +// following redirect codes, Head follows the redirect after calling the +// Client's CheckRedirect function. +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +// +// Head is a wrapper around DefaultClient.Head +func Head(url string) (resp *Response, err error) { + return DefaultClient.Head(url) +} + +// Head issues a HEAD to the specified URL. If the response is one of the +// following redirect codes, Head follows the redirect after calling the +// Client's CheckRedirect function. +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +func (c *Client) Head(url string) (resp *Response, err error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.doFollowingRedirects(req, shouldRedirectGet) +} + +type cancelTimerBody struct { + t *time.Timer + rc io.ReadCloser +} + +func (b *cancelTimerBody) Read(p []byte) (n int, err error) { + n, err = b.rc.Read(p) + if err == io.EOF { + b.t.Stop() + } + return +} + +func (b *cancelTimerBody) Close() error { + err := b.rc.Close() + b.t.Stop() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/client_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/client_test.go new file mode 100644 index 000000000..6392c1baf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/client_test.go @@ -0,0 +1,1038 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for client.go + +package http_test + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + . "net/http" + "net/http/httptest" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Last-Modified", "sometime") + fmt.Fprintf(w, "User-agent: go\nDisallow: /something/") +}) + +// pedanticReadAll works like ioutil.ReadAll but additionally +// verifies that r obeys the documented io.Reader contract. +func pedanticReadAll(r io.Reader) (b []byte, err error) { + var bufa [64]byte + buf := bufa[:] + for { + n, err := r.Read(buf) + if n == 0 && err == nil { + return nil, fmt.Errorf("Read: n=0 with err=nil") + } + b = append(b, buf[:n]...) + if err == io.EOF { + n, err := r.Read(buf) + if n != 0 || err != io.EOF { + return nil, fmt.Errorf("Read: n=%d err=%#v after EOF", n, err) + } + return b, nil + } + if err != nil { + return b, err + } + } +} + +type chanWriter chan string + +func (w chanWriter) Write(p []byte) (n int, err error) { + w <- string(p) + return len(p), nil +} + +func TestClient(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(robotsTxtHandler) + defer ts.Close() + + r, err := Get(ts.URL) + var b []byte + if err == nil { + b, err = pedanticReadAll(r.Body) + r.Body.Close() + } + if err != nil { + t.Error(err) + } else if s := string(b); !strings.HasPrefix(s, "User-agent:") { + t.Errorf("Incorrect page body (did not begin with User-agent): %q", s) + } +} + +func TestClientHead(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(robotsTxtHandler) + defer ts.Close() + + r, err := Head(ts.URL) + if err != nil { + t.Fatal(err) + } + if _, ok := r.Header["Last-Modified"]; !ok { + t.Error("Last-Modified header not found.") + } +} + +type recordingTransport struct { + req *Request +} + +func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err error) { + t.req = req + return nil, errors.New("dummy impl") +} + +func TestGetRequestFormat(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + url := "http://dummy.faketld/" + client.Get(url) // Note: doesn't hit network + if tr.req.Method != "GET" { + t.Errorf("expected method %q; got %q", "GET", tr.req.Method) + } + if tr.req.URL.String() != url { + t.Errorf("expected URL %q; got %q", url, tr.req.URL.String()) + } + if tr.req.Header == nil { + t.Errorf("expected non-nil request Header") + } +} + +func TestPostRequestFormat(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + + url := "http://dummy.faketld/" + json := `{"key":"value"}` + b := strings.NewReader(json) + client.Post(url, "application/json", b) // Note: doesn't hit network + + if tr.req.Method != "POST" { + t.Errorf("got method %q, want %q", tr.req.Method, "POST") + } + if tr.req.URL.String() != url { + t.Errorf("got URL %q, want %q", tr.req.URL.String(), url) + } + if tr.req.Header == nil { + t.Fatalf("expected non-nil request Header") + } + if tr.req.Close { + t.Error("got Close true, want false") + } + if g, e := tr.req.ContentLength, int64(len(json)); g != e { + t.Errorf("got ContentLength %d, want %d", g, e) + } +} + +func TestPostFormRequestFormat(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + + urlStr := "http://dummy.faketld/" + form := make(url.Values) + form.Set("foo", "bar") + form.Add("foo", "bar2") + form.Set("bar", "baz") + client.PostForm(urlStr, form) // Note: doesn't hit network + + if tr.req.Method != "POST" { + t.Errorf("got method %q, want %q", tr.req.Method, "POST") + } + if tr.req.URL.String() != urlStr { + t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr) + } + if tr.req.Header == nil { + t.Fatalf("expected non-nil request Header") + } + if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e { + t.Errorf("got Content-Type %q, want %q", g, e) + } + if tr.req.Close { + t.Error("got Close true, want false") + } + // Depending on map iteration, body can be either of these. + expectedBody := "foo=bar&foo=bar2&bar=baz" + expectedBody1 := "bar=baz&foo=bar&foo=bar2" + if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e { + t.Errorf("got ContentLength %d, want %d", g, e) + } + bodyb, err := ioutil.ReadAll(tr.req.Body) + if err != nil { + t.Fatalf("ReadAll on req.Body: %v", err) + } + if g := string(bodyb); g != expectedBody && g != expectedBody1 { + t.Errorf("got body %q, want %q or %q", g, expectedBody, expectedBody1) + } +} + +func TestClientRedirects(t *testing.T) { + defer afterTest(t) + var ts *httptest.Server + ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + n, _ := strconv.Atoi(r.FormValue("n")) + // Test Referer header. (7 is arbitrary position to test at) + if n == 7 { + if g, e := r.Referer(), ts.URL+"/?n=6"; e != g { + t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g) + } + } + if n < 15 { + Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound) + return + } + fmt.Fprintf(w, "n=%d", n) + })) + defer ts.Close() + + c := &Client{} + _, err := c.Get(ts.URL) + if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { + t.Errorf("with default client Get, expected error %q, got %q", e, g) + } + + // HEAD request should also have the ability to follow redirects. + _, err = c.Head(ts.URL) + if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { + t.Errorf("with default client Head, expected error %q, got %q", e, g) + } + + // Do should also follow redirects. + greq, _ := NewRequest("GET", ts.URL, nil) + _, err = c.Do(greq) + if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { + t.Errorf("with default client Do, expected error %q, got %q", e, g) + } + + var checkErr error + var lastVia []*Request + c = &Client{CheckRedirect: func(_ *Request, via []*Request) error { + lastVia = via + return checkErr + }} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + res.Body.Close() + finalUrl := res.Request.URL.String() + if e, g := "", fmt.Sprintf("%v", err); e != g { + t.Errorf("with custom client, expected error %q, got %q", e, g) + } + if !strings.HasSuffix(finalUrl, "/?n=15") { + t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl) + } + if e, g := 15, len(lastVia); e != g { + t.Errorf("expected lastVia to have contained %d elements; got %d", e, g) + } + + checkErr = errors.New("no redirects allowed") + res, err = c.Get(ts.URL) + if urlError, ok := err.(*url.Error); !ok || urlError.Err != checkErr { + t.Errorf("with redirects forbidden, expected a *url.Error with our 'no redirects allowed' error inside; got %#v (%q)", err, err) + } + if res == nil { + t.Fatalf("Expected a non-nil Response on CheckRedirect failure (http://golang.org/issue/3795)") + } + res.Body.Close() + if res.Header.Get("Location") == "" { + t.Errorf("no Location header in Response") + } +} + +func TestPostRedirects(t *testing.T) { + defer afterTest(t) + var log struct { + sync.Mutex + bytes.Buffer + } + var ts *httptest.Server + ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + log.Lock() + fmt.Fprintf(&log.Buffer, "%s %s ", r.Method, r.RequestURI) + log.Unlock() + if v := r.URL.Query().Get("code"); v != "" { + code, _ := strconv.Atoi(v) + if code/100 == 3 { + w.Header().Set("Location", ts.URL) + } + w.WriteHeader(code) + } + })) + defer ts.Close() + tests := []struct { + suffix string + want int // response code + }{ + {"/", 200}, + {"/?code=301", 301}, + {"/?code=302", 200}, + {"/?code=303", 200}, + {"/?code=404", 404}, + } + for _, tt := range tests { + res, err := Post(ts.URL+tt.suffix, "text/plain", strings.NewReader("Some content")) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != tt.want { + t.Errorf("POST %s: status code = %d; want %d", tt.suffix, res.StatusCode, tt.want) + } + } + log.Lock() + got := log.String() + log.Unlock() + want := "POST / POST /?code=301 POST /?code=302 GET / POST /?code=303 GET / POST /?code=404 " + if got != want { + t.Errorf("Log differs.\n Got: %q\nWant: %q", got, want) + } +} + +var expectedCookies = []*Cookie{ + {Name: "ChocolateChip", Value: "tasty"}, + {Name: "First", Value: "Hit"}, + {Name: "Second", Value: "Hit"}, +} + +var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request) { + for _, cookie := range r.Cookies() { + SetCookie(w, cookie) + } + if r.URL.Path == "/" { + SetCookie(w, expectedCookies[1]) + Redirect(w, r, "/second", StatusMovedPermanently) + } else { + SetCookie(w, expectedCookies[2]) + w.Write([]byte("hello")) + } +}) + +func TestClientSendsCookieFromJar(t *testing.T) { + tr := &recordingTransport{} + client := &Client{Transport: tr} + client.Jar = &TestJar{perURL: make(map[string][]*Cookie)} + us := "http://dummy.faketld/" + u, _ := url.Parse(us) + client.Jar.SetCookies(u, expectedCookies) + + client.Get(us) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + client.Head(us) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + client.Post(us, "text/plain", strings.NewReader("body")) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + client.PostForm(us, url.Values{}) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + req, _ := NewRequest("GET", us, nil) + client.Do(req) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + req, _ = NewRequest("POST", us, nil) + client.Do(req) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) +} + +// Just enough correctness for our redirect tests. Uses the URL.Host as the +// scope of all cookies. +type TestJar struct { + m sync.Mutex + perURL map[string][]*Cookie +} + +func (j *TestJar) SetCookies(u *url.URL, cookies []*Cookie) { + j.m.Lock() + defer j.m.Unlock() + if j.perURL == nil { + j.perURL = make(map[string][]*Cookie) + } + j.perURL[u.Host] = cookies +} + +func (j *TestJar) Cookies(u *url.URL) []*Cookie { + j.m.Lock() + defer j.m.Unlock() + return j.perURL[u.Host] +} + +func TestRedirectCookiesJar(t *testing.T) { + defer afterTest(t) + var ts *httptest.Server + ts = httptest.NewServer(echoCookiesRedirectHandler) + defer ts.Close() + c := &Client{ + Jar: new(TestJar), + } + u, _ := url.Parse(ts.URL) + c.Jar.SetCookies(u, []*Cookie{expectedCookies[0]}) + resp, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("Get: %v", err) + } + resp.Body.Close() + matchReturnedCookies(t, expectedCookies, resp.Cookies()) +} + +func matchReturnedCookies(t *testing.T, expected, given []*Cookie) { + if len(given) != len(expected) { + t.Logf("Received cookies: %v", given) + t.Errorf("Expected %d cookies, got %d", len(expected), len(given)) + } + for _, ec := range expected { + foundC := false + for _, c := range given { + if ec.Name == c.Name && ec.Value == c.Value { + foundC = true + break + } + } + if !foundC { + t.Errorf("Missing cookie %v", ec) + } + } +} + +func TestJarCalls(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + pathSuffix := r.RequestURI[1:] + if r.RequestURI == "/nosetcookie" { + return // dont set cookies for this path + } + SetCookie(w, &Cookie{Name: "name" + pathSuffix, Value: "val" + pathSuffix}) + if r.RequestURI == "/" { + Redirect(w, r, "http://secondhost.fake/secondpath", 302) + } + })) + defer ts.Close() + jar := new(RecordingJar) + c := &Client{ + Jar: jar, + Transport: &Transport{ + Dial: func(_ string, _ string) (net.Conn, error) { + return net.Dial("tcp", ts.Listener.Addr().String()) + }, + }, + } + _, err := c.Get("http://firsthost.fake/") + if err != nil { + t.Fatal(err) + } + _, err = c.Get("http://firsthost.fake/nosetcookie") + if err != nil { + t.Fatal(err) + } + got := jar.log.String() + want := `Cookies("http://firsthost.fake/") +SetCookie("http://firsthost.fake/", [name=val]) +Cookies("http://secondhost.fake/secondpath") +SetCookie("http://secondhost.fake/secondpath", [namesecondpath=valsecondpath]) +Cookies("http://firsthost.fake/nosetcookie") +` + if got != want { + t.Errorf("Got Jar calls:\n%s\nWant:\n%s", got, want) + } +} + +// RecordingJar keeps a log of calls made to it, without +// tracking any cookies. +type RecordingJar struct { + mu sync.Mutex + log bytes.Buffer +} + +func (j *RecordingJar) SetCookies(u *url.URL, cookies []*Cookie) { + j.logf("SetCookie(%q, %v)\n", u, cookies) +} + +func (j *RecordingJar) Cookies(u *url.URL) []*Cookie { + j.logf("Cookies(%q)\n", u) + return nil +} + +func (j *RecordingJar) logf(format string, args ...interface{}) { + j.mu.Lock() + defer j.mu.Unlock() + fmt.Fprintf(&j.log, format, args...) +} + +func TestStreamingGet(t *testing.T) { + defer afterTest(t) + say := make(chan string) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.(Flusher).Flush() + for str := range say { + w.Write([]byte(str)) + w.(Flusher).Flush() + } + })) + defer ts.Close() + + c := &Client{} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + var buf [10]byte + for _, str := range []string{"i", "am", "also", "known", "as", "comet"} { + say <- str + n, err := io.ReadFull(res.Body, buf[0:len(str)]) + if err != nil { + t.Fatalf("ReadFull on %q: %v", str, err) + } + if n != len(str) { + t.Fatalf("Receiving %q, only read %d bytes", str, n) + } + got := string(buf[0:n]) + if got != str { + t.Fatalf("Expected %q, got %q", str, got) + } + } + close(say) + _, err = io.ReadFull(res.Body, buf[0:1]) + if err != io.EOF { + t.Fatalf("at end expected EOF, got %v", err) + } +} + +type writeCountingConn struct { + net.Conn + count *int +} + +func (c *writeCountingConn) Write(p []byte) (int, error) { + *c.count++ + return c.Conn.Write(p) +} + +// TestClientWrites verifies that client requests are buffered and we +// don't send a TCP packet per line of the http request + body. +func TestClientWrites(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + })) + defer ts.Close() + + writes := 0 + dialer := func(netz string, addr string) (net.Conn, error) { + c, err := net.Dial(netz, addr) + if err == nil { + c = &writeCountingConn{c, &writes} + } + return c, err + } + c := &Client{Transport: &Transport{Dial: dialer}} + + _, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if writes != 1 { + t.Errorf("Get request did %d Write calls, want 1", writes) + } + + writes = 0 + _, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}}) + if err != nil { + t.Fatal(err) + } + if writes != 1 { + t.Errorf("Post request did %d Write calls, want 1", writes) + } +} + +func TestClientInsecureTransport(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write([]byte("Hello")) + })) + errc := make(chanWriter, 10) // but only expecting 1 + ts.Config.ErrorLog = log.New(errc, "", 0) + defer ts.Close() + + // TODO(bradfitz): add tests for skipping hostname checks too? + // would require a new cert for testing, and probably + // redundant with these tests. + for _, insecure := range []bool{true, false} { + tr := &Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: insecure, + }, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get(ts.URL) + if (err == nil) != insecure { + t.Errorf("insecure=%v: got unexpected err=%v", insecure, err) + } + if res != nil { + res.Body.Close() + } + } + + select { + case v := <-errc: + if !strings.Contains(v, "TLS handshake error") { + t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", v) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for logged error") + } + +} + +func TestClientErrorWithRequestURI(t *testing.T) { + defer afterTest(t) + req, _ := NewRequest("GET", "http://localhost:1234/", nil) + req.RequestURI = "/this/field/is/illegal/and/should/error/" + _, err := DefaultClient.Do(req) + if err == nil { + t.Fatalf("expected an error") + } + if !strings.Contains(err.Error(), "RequestURI") { + t.Errorf("wanted error mentioning RequestURI; got error: %v", err) + } +} + +func newTLSTransport(t *testing.T, ts *httptest.Server) *Transport { + certs := x509.NewCertPool() + for _, c := range ts.TLS.Certificates { + roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1]) + if err != nil { + t.Fatalf("error parsing server's root cert: %v", err) + } + for _, root := range roots { + certs.AddCert(root) + } + } + return &Transport{ + TLSClientConfig: &tls.Config{RootCAs: certs}, + } +} + +func TestClientWithCorrectTLSServerName(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.TLS.ServerName != "127.0.0.1" { + t.Errorf("expected client to set ServerName 127.0.0.1, got: %q", r.TLS.ServerName) + } + })) + defer ts.Close() + + c := &Client{Transport: newTLSTransport(t, ts)} + if _, err := c.Get(ts.URL); err != nil { + t.Fatalf("expected successful TLS connection, got error: %v", err) + } +} + +func TestClientWithIncorrectTLSServerName(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + defer ts.Close() + errc := make(chanWriter, 10) // but only expecting 1 + ts.Config.ErrorLog = log.New(errc, "", 0) + + trans := newTLSTransport(t, ts) + trans.TLSClientConfig.ServerName = "badserver" + c := &Client{Transport: trans} + _, err := c.Get(ts.URL) + if err == nil { + t.Fatalf("expected an error") + } + if !strings.Contains(err.Error(), "127.0.0.1") || !strings.Contains(err.Error(), "badserver") { + t.Errorf("wanted error mentioning 127.0.0.1 and badserver; got error: %v", err) + } + select { + case v := <-errc: + if !strings.Contains(v, "TLS handshake error") { + t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", v) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for logged error") + } +} + +// Test for golang.org/issue/5829; the Transport should respect TLSClientConfig.ServerName +// when not empty. +// +// tls.Config.ServerName (non-empty, set to "example.com") takes +// precedence over "some-other-host.tld" which previously incorrectly +// took precedence. We don't actually connect to (or even resolve) +// "some-other-host.tld", though, because of the Transport.Dial hook. +// +// The httptest.Server has a cert with "example.com" as its name. +func TestTransportUsesTLSConfigServerName(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write([]byte("Hello")) + })) + defer ts.Close() + + tr := newTLSTransport(t, ts) + tr.TLSClientConfig.ServerName = "example.com" // one of httptest's Server cert names + tr.Dial = func(netw, addr string) (net.Conn, error) { + return net.Dial(netw, ts.Listener.Addr().String()) + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get("https://some-other-host.tld/") + if err != nil { + t.Fatal(err) + } + res.Body.Close() +} + +func TestResponseSetsTLSConnectionState(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write([]byte("Hello")) + })) + defer ts.Close() + + tr := newTLSTransport(t, ts) + tr.TLSClientConfig.CipherSuites = []uint16{tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA} + tr.Dial = func(netw, addr string) (net.Conn, error) { + return net.Dial(netw, ts.Listener.Addr().String()) + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get("https://example.com/") + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.TLS == nil { + t.Fatal("Response didn't set TLS Connection State.") + } + if got, want := res.TLS.CipherSuite, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA; got != want { + t.Errorf("TLS Cipher Suite = %d; want %d", got, want) + } +} + +// Verify Response.ContentLength is populated. http://golang.org/issue/4126 +func TestClientHeadContentLength(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if v := r.FormValue("cl"); v != "" { + w.Header().Set("Content-Length", v) + } + })) + defer ts.Close() + tests := []struct { + suffix string + want int64 + }{ + {"/?cl=1234", 1234}, + {"/?cl=0", 0}, + {"", -1}, + } + for _, tt := range tests { + req, _ := NewRequest("HEAD", ts.URL+tt.suffix, nil) + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if res.ContentLength != tt.want { + t.Errorf("Content-Length = %d; want %d", res.ContentLength, tt.want) + } + bs, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if len(bs) != 0 { + t.Errorf("Unexpected content: %q", bs) + } + } +} + +func TestEmptyPasswordAuth(t *testing.T) { + defer afterTest(t) + gopher := "gopher" + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + auth := r.Header.Get("Authorization") + if strings.HasPrefix(auth, "Basic ") { + encoded := auth[6:] + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + t.Fatal(err) + } + expected := gopher + ":" + s := string(decoded) + if expected != s { + t.Errorf("Invalid Authorization header. Got %q, wanted %q", s, expected) + } + } else { + t.Errorf("Invalid auth %q", auth) + } + })) + defer ts.Close() + c := &Client{} + req, err := NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + req.URL.User = url.User(gopher) + resp, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() +} + +func TestBasicAuth(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + + url := "http://My%20User:My%20Pass@dummy.faketld/" + expected := "My User:My Pass" + client.Get(url) + + if tr.req.Method != "GET" { + t.Errorf("got method %q, want %q", tr.req.Method, "GET") + } + if tr.req.URL.String() != url { + t.Errorf("got URL %q, want %q", tr.req.URL.String(), url) + } + if tr.req.Header == nil { + t.Fatalf("expected non-nil request Header") + } + auth := tr.req.Header.Get("Authorization") + if strings.HasPrefix(auth, "Basic ") { + encoded := auth[6:] + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + t.Fatal(err) + } + s := string(decoded) + if expected != s { + t.Errorf("Invalid Authorization header. Got %q, wanted %q", s, expected) + } + } else { + t.Errorf("Invalid auth %q", auth) + } +} + +func TestClientTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + defer afterTest(t) + sawRoot := make(chan bool, 1) + sawSlow := make(chan bool, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.URL.Path == "/" { + sawRoot <- true + Redirect(w, r, "/slow", StatusFound) + return + } + if r.URL.Path == "/slow" { + w.Write([]byte("Hello")) + w.(Flusher).Flush() + sawSlow <- true + time.Sleep(2 * time.Second) + return + } + })) + defer ts.Close() + const timeout = 500 * time.Millisecond + c := &Client{ + Timeout: timeout, + } + + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + + select { + case <-sawRoot: + // good. + default: + t.Fatal("handler never got / request") + } + + select { + case <-sawSlow: + // good. + default: + t.Fatal("handler never got /slow request") + } + + errc := make(chan error, 1) + go func() { + _, err := ioutil.ReadAll(res.Body) + errc <- err + res.Body.Close() + }() + + const failTime = timeout * 2 + select { + case err := <-errc: + if err == nil { + t.Error("expected error from ReadAll") + } + // Expected error. + case <-time.After(failTime): + t.Errorf("timeout after %v waiting for timeout of %v", failTime, timeout) + } +} + +func TestClientRedirectEatsBody(t *testing.T) { + defer afterTest(t) + saw := make(chan string, 2) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + saw <- r.RemoteAddr + if r.URL.Path == "/" { + Redirect(w, r, "/foo", StatusFound) // which includes a body + } + })) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + _, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + + var first string + select { + case first = <-saw: + default: + t.Fatal("server didn't see a request") + } + + var second string + select { + case second = <-saw: + default: + t.Fatal("server didn't see a second request") + } + + if first != second { + t.Fatal("server saw different client ports before & after the redirect") + } +} + +// eofReaderFunc is an io.Reader that runs itself, and then returns io.EOF. +type eofReaderFunc func() + +func (f eofReaderFunc) Read(p []byte) (n int, err error) { + f() + return 0, io.EOF +} + +func TestClientTrailers(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") + w.Header().Add("Trailer", "Server-Trailer-C") + + var decl []string + for k := range r.Trailer { + decl = append(decl, k) + } + sort.Strings(decl) + + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Server reading request body: %v", err) + } + if string(slurp) != "foo" { + t.Errorf("Server read request body %q; want foo", slurp) + } + if r.Trailer == nil { + io.WriteString(w, "nil Trailer") + } else { + fmt.Fprintf(w, "decl: %v, vals: %s, %s", + decl, + r.Trailer.Get("Client-Trailer-A"), + r.Trailer.Get("Client-Trailer-B")) + } + + // TODO: golang.org/issue/7759: there's no way yet for + // the server to set trailers without hijacking, so do + // that for now, just to test the client. Later, in + // Go 1.4, it should be implicit that any mutations + // to w.Header() after the initial write are the + // trailers to be sent, if and only if they were + // previously declared with w.Header().Set("Trailer", + // ..keys..) + w.(Flusher).Flush() + conn, buf, _ := w.(Hijacker).Hijack() + t := Header{} + t.Set("Server-Trailer-A", "valuea") + t.Set("Server-Trailer-C", "valuec") // skipping B + buf.WriteString("0\r\n") // eof + t.Write(buf) + buf.WriteString("\r\n") // end of trailers + buf.Flush() + conn.Close() + })) + defer ts.Close() + + var req *Request + req, _ = NewRequest("POST", ts.URL, io.MultiReader( + eofReaderFunc(func() { + req.Trailer["Client-Trailer-A"] = []string{"valuea"} + }), + strings.NewReader("foo"), + eofReaderFunc(func() { + req.Trailer["Client-Trailer-B"] = []string{"valueb"} + }), + )) + req.Trailer = Header{ + "Client-Trailer-A": nil, // to be set later + "Client-Trailer-B": nil, // to be set later + } + req.ContentLength = -1 + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if err := wantBody(res, err, "decl: [Client-Trailer-A Client-Trailer-B], vals: valuea, valueb"); err != nil { + t.Error(err) + } + want := Header{ + "Server-Trailer-A": []string{"valuea"}, + "Server-Trailer-B": nil, + "Server-Trailer-C": []string{"valuec"}, + } + if !reflect.DeepEqual(res.Trailer, want) { + t.Errorf("Response trailers = %#v; want %#v", res.Trailer, want) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie.go new file mode 100644 index 000000000..dc60ba87f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie.go @@ -0,0 +1,363 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// This implementation is done according to RFC 6265: +// +// http://tools.ietf.org/html/rfc6265 + +// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an +// HTTP response or the Cookie header of an HTTP request. +type Cookie struct { + Name string + Value string + Path string + Domain string + Expires time.Time + RawExpires string + + // MaxAge=0 means no 'Max-Age' attribute specified. + // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' + // MaxAge>0 means Max-Age attribute present and given in seconds + MaxAge int + Secure bool + HttpOnly bool + Raw string + Unparsed []string // Raw text of unparsed attribute-value pairs +} + +// readSetCookies parses all "Set-Cookie" values from +// the header h and returns the successfully parsed Cookies. +func readSetCookies(h Header) []*Cookie { + cookies := []*Cookie{} + for _, line := range h["Set-Cookie"] { + parts := strings.Split(strings.TrimSpace(line), ";") + if len(parts) == 1 && parts[0] == "" { + continue + } + parts[0] = strings.TrimSpace(parts[0]) + j := strings.Index(parts[0], "=") + if j < 0 { + continue + } + name, value := parts[0][:j], parts[0][j+1:] + if !isCookieNameValid(name) { + continue + } + value, success := parseCookieValue(value) + if !success { + continue + } + c := &Cookie{ + Name: name, + Value: value, + Raw: line, + } + for i := 1; i < len(parts); i++ { + parts[i] = strings.TrimSpace(parts[i]) + if len(parts[i]) == 0 { + continue + } + + attr, val := parts[i], "" + if j := strings.Index(attr, "="); j >= 0 { + attr, val = attr[:j], attr[j+1:] + } + lowerAttr := strings.ToLower(attr) + val, success = parseCookieValue(val) + if !success { + c.Unparsed = append(c.Unparsed, parts[i]) + continue + } + switch lowerAttr { + case "secure": + c.Secure = true + continue + case "httponly": + c.HttpOnly = true + continue + case "domain": + c.Domain = val + continue + case "max-age": + secs, err := strconv.Atoi(val) + if err != nil || secs != 0 && val[0] == '0' { + break + } + if secs <= 0 { + c.MaxAge = -1 + } else { + c.MaxAge = secs + } + continue + case "expires": + c.RawExpires = val + exptime, err := time.Parse(time.RFC1123, val) + if err != nil { + exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) + if err != nil { + c.Expires = time.Time{} + break + } + } + c.Expires = exptime.UTC() + continue + case "path": + c.Path = val + continue + } + c.Unparsed = append(c.Unparsed, parts[i]) + } + cookies = append(cookies, c) + } + return cookies +} + +// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. +func SetCookie(w ResponseWriter, cookie *Cookie) { + w.Header().Add("Set-Cookie", cookie.String()) +} + +// String returns the serialization of the cookie for use in a Cookie +// header (if only Name and Value are set) or a Set-Cookie response +// header (if other fields are set). +func (c *Cookie) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) + if len(c.Path) > 0 { + fmt.Fprintf(&b, "; Path=%s", sanitizeCookiePath(c.Path)) + } + if len(c.Domain) > 0 { + if validCookieDomain(c.Domain) { + // A c.Domain containing illegal characters is not + // sanitized but simply dropped which turns the cookie + // into a host-only cookie. A leading dot is okay + // but won't be sent. + d := c.Domain + if d[0] == '.' { + d = d[1:] + } + fmt.Fprintf(&b, "; Domain=%s", d) + } else { + log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", + c.Domain) + } + } + if c.Expires.Unix() > 0 { + fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123)) + } + if c.MaxAge > 0 { + fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) + } else if c.MaxAge < 0 { + fmt.Fprintf(&b, "; Max-Age=0") + } + if c.HttpOnly { + fmt.Fprintf(&b, "; HttpOnly") + } + if c.Secure { + fmt.Fprintf(&b, "; Secure") + } + return b.String() +} + +// readCookies parses all "Cookie" values from the header h and +// returns the successfully parsed Cookies. +// +// if filter isn't empty, only cookies of that name are returned +func readCookies(h Header, filter string) []*Cookie { + cookies := []*Cookie{} + lines, ok := h["Cookie"] + if !ok { + return cookies + } + + for _, line := range lines { + parts := strings.Split(strings.TrimSpace(line), ";") + if len(parts) == 1 && parts[0] == "" { + continue + } + // Per-line attributes + parsedPairs := 0 + for i := 0; i < len(parts); i++ { + parts[i] = strings.TrimSpace(parts[i]) + if len(parts[i]) == 0 { + continue + } + name, val := parts[i], "" + if j := strings.Index(name, "="); j >= 0 { + name, val = name[:j], name[j+1:] + } + if !isCookieNameValid(name) { + continue + } + if filter != "" && filter != name { + continue + } + val, success := parseCookieValue(val) + if !success { + continue + } + cookies = append(cookies, &Cookie{Name: name, Value: val}) + parsedPairs++ + } + } + return cookies +} + +// validCookieDomain returns wheter v is a valid cookie domain-value. +func validCookieDomain(v string) bool { + if isCookieDomainName(v) { + return true + } + if net.ParseIP(v) != nil && !strings.Contains(v, ":") { + return true + } + return false +} + +// isCookieDomainName returns whether s is a valid domain name or a valid +// domain name with a leading dot '.'. It is almost a direct copy of +// package net's isDomainName. +func isCookieDomainName(s string) bool { + if len(s) == 0 { + return false + } + if len(s) > 255 { + return false + } + + if s[0] == '.' { + // A cookie a domain attribute may start with a leading dot. + s = s[1:] + } + last := byte('.') + ok := false // Ok once we've seen a letter. + partlen := 0 + for i := 0; i < len(s); i++ { + c := s[i] + switch { + default: + return false + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + // No '_' allowed here (in contrast to package net). + ok = true + partlen++ + case '0' <= c && c <= '9': + // fine + partlen++ + case c == '-': + // Byte before dash cannot be dot. + if last == '.' { + return false + } + partlen++ + case c == '.': + // Byte before dot cannot be dot, dash. + if last == '.' || last == '-' { + return false + } + if partlen > 63 || partlen == 0 { + return false + } + partlen = 0 + } + last = c + } + if last == '-' || partlen > 63 { + return false + } + + return ok +} + +var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") + +func sanitizeCookieName(n string) string { + return cookieNameSanitizer.Replace(n) +} + +// http://tools.ietf.org/html/rfc6265#section-4.1.1 +// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) +// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E +// ; US-ASCII characters excluding CTLs, +// ; whitespace DQUOTE, comma, semicolon, +// ; and backslash +// We loosen this as spaces and commas are common in cookie values +// but we produce a quoted cookie-value in when value starts or ends +// with a comma or space. +// See http://golang.org/issue/7243 for the discussion. +func sanitizeCookieValue(v string) string { + v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v) + if len(v) == 0 { + return v + } + if v[0] == ' ' || v[0] == ',' || v[len(v)-1] == ' ' || v[len(v)-1] == ',' { + return `"` + v + `"` + } + return v +} + +func validCookieValueByte(b byte) bool { + return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\' +} + +// path-av = "Path=" path-value +// path-value = +func sanitizeCookiePath(v string) string { + return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v) +} + +func validCookiePathByte(b byte) bool { + return 0x20 <= b && b < 0x7f && b != ';' +} + +func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string { + ok := true + for i := 0; i < len(v); i++ { + if valid(v[i]) { + continue + } + log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName) + ok = false + break + } + if ok { + return v + } + buf := make([]byte, 0, len(v)) + for i := 0; i < len(v); i++ { + if b := v[i]; valid(b) { + buf = append(buf, b) + } + } + return string(buf) +} + +func parseCookieValue(raw string) (string, bool) { + // Strip the quotes, if present. + if len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { + raw = raw[1 : len(raw)-1] + } + for i := 0; i < len(raw); i++ { + if !validCookieValueByte(raw[i]) { + return "", false + } + } + return raw, true +} + +func isCookieNameValid(raw string) bool { + return strings.IndexFunc(raw, isNotToken) < 0 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie_test.go new file mode 100644 index 000000000..f78f37299 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookie_test.go @@ -0,0 +1,380 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "strings" + "testing" + "time" +) + +var writeSetCookiesTests = []struct { + Cookie *Cookie + Raw string +}{ + { + &Cookie{Name: "cookie-1", Value: "v$1"}, + "cookie-1=v$1", + }, + { + &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600}, + "cookie-2=two; Max-Age=3600", + }, + { + &Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"}, + "cookie-3=three; Domain=example.com", + }, + { + &Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"}, + "cookie-4=four; Path=/restricted/", + }, + { + &Cookie{Name: "cookie-5", Value: "five", Domain: "wrong;bad.abc"}, + "cookie-5=five", + }, + { + &Cookie{Name: "cookie-6", Value: "six", Domain: "bad-.abc"}, + "cookie-6=six", + }, + { + &Cookie{Name: "cookie-7", Value: "seven", Domain: "127.0.0.1"}, + "cookie-7=seven; Domain=127.0.0.1", + }, + { + &Cookie{Name: "cookie-8", Value: "eight", Domain: "::1"}, + "cookie-8=eight", + }, + // The "special" cookies have values containing commas or spaces which + // are disallowed by RFC 6265 but are common in the wild. + { + &Cookie{Name: "special-1", Value: "a z"}, + `special-1=a z`, + }, + { + &Cookie{Name: "special-2", Value: " z"}, + `special-2=" z"`, + }, + { + &Cookie{Name: "special-3", Value: "a "}, + `special-3="a "`, + }, + { + &Cookie{Name: "special-4", Value: " "}, + `special-4=" "`, + }, + { + &Cookie{Name: "special-5", Value: "a,z"}, + `special-5=a,z`, + }, + { + &Cookie{Name: "special-6", Value: ",z"}, + `special-6=",z"`, + }, + { + &Cookie{Name: "special-7", Value: "a,"}, + `special-7="a,"`, + }, + { + &Cookie{Name: "special-8", Value: ","}, + `special-8=","`, + }, + { + &Cookie{Name: "empty-value", Value: ""}, + `empty-value=`, + }, +} + +func TestWriteSetCookies(t *testing.T) { + defer log.SetOutput(os.Stderr) + var logbuf bytes.Buffer + log.SetOutput(&logbuf) + + for i, tt := range writeSetCookiesTests { + if g, e := tt.Cookie.String(), tt.Raw; g != e { + t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g) + continue + } + } + + if got, sub := logbuf.String(), "dropping domain attribute"; !strings.Contains(got, sub) { + t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got) + } +} + +type headerOnlyResponseWriter Header + +func (ho headerOnlyResponseWriter) Header() Header { + return Header(ho) +} + +func (ho headerOnlyResponseWriter) Write([]byte) (int, error) { + panic("NOIMPL") +} + +func (ho headerOnlyResponseWriter) WriteHeader(int) { + panic("NOIMPL") +} + +func TestSetCookie(t *testing.T) { + m := make(Header) + SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"}) + SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600}) + if l := len(m["Set-Cookie"]); l != 2 { + t.Fatalf("expected %d cookies, got %d", 2, l) + } + if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e { + t.Errorf("cookie #1: want %q, got %q", e, g) + } + if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e { + t.Errorf("cookie #2: want %q, got %q", e, g) + } +} + +var addCookieTests = []struct { + Cookies []*Cookie + Raw string +}{ + { + []*Cookie{}, + "", + }, + { + []*Cookie{{Name: "cookie-1", Value: "v$1"}}, + "cookie-1=v$1", + }, + { + []*Cookie{ + {Name: "cookie-1", Value: "v$1"}, + {Name: "cookie-2", Value: "v$2"}, + {Name: "cookie-3", Value: "v$3"}, + }, + "cookie-1=v$1; cookie-2=v$2; cookie-3=v$3", + }, +} + +func TestAddCookie(t *testing.T) { + for i, tt := range addCookieTests { + req, _ := NewRequest("GET", "http://example.com/", nil) + for _, c := range tt.Cookies { + req.AddCookie(c) + } + if g := req.Header.Get("Cookie"); g != tt.Raw { + t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g) + continue + } + } +} + +var readSetCookiesTests = []struct { + Header Header + Cookies []*Cookie +}{ + { + Header{"Set-Cookie": {"Cookie-1=v$1"}}, + []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, + }, + { + Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, + []*Cookie{{ + Name: "NID", + Value: "99=YsDT5i3E-CXax-", + Path: "/", + Domain: ".google.ch", + HttpOnly: true, + Expires: time.Date(2011, 11, 23, 1, 5, 3, 0, time.UTC), + RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT", + Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly", + }}, + }, + { + Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, + []*Cookie{{ + Name: ".ASPXAUTH", + Value: "7E3AA", + Path: "/", + Expires: time.Date(2012, 3, 7, 14, 25, 6, 0, time.UTC), + RawExpires: "Wed, 07-Mar-2012 14:25:06 GMT", + HttpOnly: true, + Raw: ".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly", + }}, + }, + { + Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, + []*Cookie{{ + Name: "ASP.NET_SessionId", + Value: "foo", + Path: "/", + HttpOnly: true, + Raw: "ASP.NET_SessionId=foo; path=/; HttpOnly", + }}, + }, + // Make sure we can properly read back the Set-Cookie headers we create + // for values containing spaces or commas: + { + Header{"Set-Cookie": {`special-1=a z`}}, + []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, + }, + { + Header{"Set-Cookie": {`special-2=" z"`}}, + []*Cookie{{Name: "special-2", Value: " z", Raw: `special-2=" z"`}}, + }, + { + Header{"Set-Cookie": {`special-3="a "`}}, + []*Cookie{{Name: "special-3", Value: "a ", Raw: `special-3="a "`}}, + }, + { + Header{"Set-Cookie": {`special-4=" "`}}, + []*Cookie{{Name: "special-4", Value: " ", Raw: `special-4=" "`}}, + }, + { + Header{"Set-Cookie": {`special-5=a,z`}}, + []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, + }, + { + Header{"Set-Cookie": {`special-6=",z"`}}, + []*Cookie{{Name: "special-6", Value: ",z", Raw: `special-6=",z"`}}, + }, + { + Header{"Set-Cookie": {`special-7=a,`}}, + []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, + }, + { + Header{"Set-Cookie": {`special-8=","`}}, + []*Cookie{{Name: "special-8", Value: ",", Raw: `special-8=","`}}, + }, + + // TODO(bradfitz): users have reported seeing this in the + // wild, but do browsers handle it? RFC 6265 just says "don't + // do that" (section 3) and then never mentions header folding + // again. + // Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, +} + +func toJSON(v interface{}) string { + b, err := json.Marshal(v) + if err != nil { + return fmt.Sprintf("%#v", v) + } + return string(b) +} + +func TestReadSetCookies(t *testing.T) { + for i, tt := range readSetCookiesTests { + for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input + c := readSetCookies(tt.Header) + if !reflect.DeepEqual(c, tt.Cookies) { + t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + continue + } + } + } +} + +var readCookiesTests = []struct { + Header Header + Filter string + Cookies []*Cookie +}{ + { + Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + "", + []*Cookie{ + {Name: "Cookie-1", Value: "v$1"}, + {Name: "c2", Value: "v2"}, + }, + }, + { + Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + "c2", + []*Cookie{ + {Name: "c2", Value: "v2"}, + }, + }, + { + Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + "", + []*Cookie{ + {Name: "Cookie-1", Value: "v$1"}, + {Name: "c2", Value: "v2"}, + }, + }, + { + Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + "c2", + []*Cookie{ + {Name: "c2", Value: "v2"}, + }, + }, +} + +func TestReadCookies(t *testing.T) { + for i, tt := range readCookiesTests { + for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input + c := readCookies(tt.Header, tt.Filter) + if !reflect.DeepEqual(c, tt.Cookies) { + t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies)) + continue + } + } + } +} + +func TestCookieSanitizeValue(t *testing.T) { + defer log.SetOutput(os.Stderr) + var logbuf bytes.Buffer + log.SetOutput(&logbuf) + + tests := []struct { + in, want string + }{ + {"foo", "foo"}, + {"foo;bar", "foobar"}, + {"foo\\bar", "foobar"}, + {"foo\"bar", "foobar"}, + {"\x00\x7e\x7f\x80", "\x7e"}, + {`"withquotes"`, "withquotes"}, + {"a z", "a z"}, + {" z", `" z"`}, + {"a ", `"a "`}, + } + for _, tt := range tests { + if got := sanitizeCookieValue(tt.in); got != tt.want { + t.Errorf("sanitizeCookieValue(%q) = %q; want %q", tt.in, got, tt.want) + } + } + + if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) { + t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got) + } +} + +func TestCookieSanitizePath(t *testing.T) { + defer log.SetOutput(os.Stderr) + var logbuf bytes.Buffer + log.SetOutput(&logbuf) + + tests := []struct { + in, want string + }{ + {"/path", "/path"}, + {"/path with space/", "/path with space/"}, + {"/just;no;semicolon\x00orstuff/", "/justnosemicolonorstuff/"}, + } + for _, tt := range tests { + if got := sanitizeCookiePath(tt.in); got != tt.want { + t.Errorf("sanitizeCookiePath(%q) = %q; want %q", tt.in, got, tt.want) + } + } + + if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) { + t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go new file mode 100644 index 000000000..389ab58e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go @@ -0,0 +1,497 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cookiejar implements an in-memory RFC 6265-compliant http.CookieJar. +package cookiejar + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/url" + "sort" + "strings" + "sync" + "time" +) + +// PublicSuffixList provides the public suffix of a domain. For example: +// - the public suffix of "example.com" is "com", +// - the public suffix of "foo1.foo2.foo3.co.uk" is "co.uk", and +// - the public suffix of "bar.pvt.k12.ma.us" is "pvt.k12.ma.us". +// +// Implementations of PublicSuffixList must be safe for concurrent use by +// multiple goroutines. +// +// An implementation that always returns "" is valid and may be useful for +// testing but it is not secure: it means that the HTTP server for foo.com can +// set a cookie for bar.com. +// +// A public suffix list implementation is in the package +// code.google.com/p/go.net/publicsuffix. +type PublicSuffixList interface { + // PublicSuffix returns the public suffix of domain. + // + // TODO: specify which of the caller and callee is responsible for IP + // addresses, for leading and trailing dots, for case sensitivity, and + // for IDN/Punycode. + PublicSuffix(domain string) string + + // String returns a description of the source of this public suffix + // list. The description will typically contain something like a time + // stamp or version number. + String() string +} + +// Options are the options for creating a new Jar. +type Options struct { + // PublicSuffixList is the public suffix list that determines whether + // an HTTP server can set a cookie for a domain. + // + // A nil value is valid and may be useful for testing but it is not + // secure: it means that the HTTP server for foo.co.uk can set a cookie + // for bar.co.uk. + PublicSuffixList PublicSuffixList +} + +// Jar implements the http.CookieJar interface from the net/http package. +type Jar struct { + psList PublicSuffixList + + // mu locks the remaining fields. + mu sync.Mutex + + // entries is a set of entries, keyed by their eTLD+1 and subkeyed by + // their name/domain/path. + entries map[string]map[string]entry + + // nextSeqNum is the next sequence number assigned to a new cookie + // created SetCookies. + nextSeqNum uint64 +} + +// New returns a new cookie jar. A nil *Options is equivalent to a zero +// Options. +func New(o *Options) (*Jar, error) { + jar := &Jar{ + entries: make(map[string]map[string]entry), + } + if o != nil { + jar.psList = o.PublicSuffixList + } + return jar, nil +} + +// entry is the internal representation of a cookie. +// +// This struct type is not used outside of this package per se, but the exported +// fields are those of RFC 6265. +type entry struct { + Name string + Value string + Domain string + Path string + Secure bool + HttpOnly bool + Persistent bool + HostOnly bool + Expires time.Time + Creation time.Time + LastAccess time.Time + + // seqNum is a sequence number so that Cookies returns cookies in a + // deterministic order, even for cookies that have equal Path length and + // equal Creation time. This simplifies testing. + seqNum uint64 +} + +// Id returns the domain;path;name triple of e as an id. +func (e *entry) id() string { + return fmt.Sprintf("%s;%s;%s", e.Domain, e.Path, e.Name) +} + +// shouldSend determines whether e's cookie qualifies to be included in a +// request to host/path. It is the caller's responsibility to check if the +// cookie is expired. +func (e *entry) shouldSend(https bool, host, path string) bool { + return e.domainMatch(host) && e.pathMatch(path) && (https || !e.Secure) +} + +// domainMatch implements "domain-match" of RFC 6265 section 5.1.3. +func (e *entry) domainMatch(host string) bool { + if e.Domain == host { + return true + } + return !e.HostOnly && hasDotSuffix(host, e.Domain) +} + +// pathMatch implements "path-match" according to RFC 6265 section 5.1.4. +func (e *entry) pathMatch(requestPath string) bool { + if requestPath == e.Path { + return true + } + if strings.HasPrefix(requestPath, e.Path) { + if e.Path[len(e.Path)-1] == '/' { + return true // The "/any/" matches "/any/path" case. + } else if requestPath[len(e.Path)] == '/' { + return true // The "/any" matches "/any/path" case. + } + } + return false +} + +// hasDotSuffix reports whether s ends in "."+suffix. +func hasDotSuffix(s, suffix string) bool { + return len(s) > len(suffix) && s[len(s)-len(suffix)-1] == '.' && s[len(s)-len(suffix):] == suffix +} + +// byPathLength is a []entry sort.Interface that sorts according to RFC 6265 +// section 5.4 point 2: by longest path and then by earliest creation time. +type byPathLength []entry + +func (s byPathLength) Len() int { return len(s) } + +func (s byPathLength) Less(i, j int) bool { + if len(s[i].Path) != len(s[j].Path) { + return len(s[i].Path) > len(s[j].Path) + } + if !s[i].Creation.Equal(s[j].Creation) { + return s[i].Creation.Before(s[j].Creation) + } + return s[i].seqNum < s[j].seqNum +} + +func (s byPathLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Cookies implements the Cookies method of the http.CookieJar interface. +// +// It returns an empty slice if the URL's scheme is not HTTP or HTTPS. +func (j *Jar) Cookies(u *url.URL) (cookies []*http.Cookie) { + return j.cookies(u, time.Now()) +} + +// cookies is like Cookies but takes the current time as a parameter. +func (j *Jar) cookies(u *url.URL, now time.Time) (cookies []*http.Cookie) { + if u.Scheme != "http" && u.Scheme != "https" { + return cookies + } + host, err := canonicalHost(u.Host) + if err != nil { + return cookies + } + key := jarKey(host, j.psList) + + j.mu.Lock() + defer j.mu.Unlock() + + submap := j.entries[key] + if submap == nil { + return cookies + } + + https := u.Scheme == "https" + path := u.Path + if path == "" { + path = "/" + } + + modified := false + var selected []entry + for id, e := range submap { + if e.Persistent && !e.Expires.After(now) { + delete(submap, id) + modified = true + continue + } + if !e.shouldSend(https, host, path) { + continue + } + e.LastAccess = now + submap[id] = e + selected = append(selected, e) + modified = true + } + if modified { + if len(submap) == 0 { + delete(j.entries, key) + } else { + j.entries[key] = submap + } + } + + sort.Sort(byPathLength(selected)) + for _, e := range selected { + cookies = append(cookies, &http.Cookie{Name: e.Name, Value: e.Value}) + } + + return cookies +} + +// SetCookies implements the SetCookies method of the http.CookieJar interface. +// +// It does nothing if the URL's scheme is not HTTP or HTTPS. +func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) { + j.setCookies(u, cookies, time.Now()) +} + +// setCookies is like SetCookies but takes the current time as parameter. +func (j *Jar) setCookies(u *url.URL, cookies []*http.Cookie, now time.Time) { + if len(cookies) == 0 { + return + } + if u.Scheme != "http" && u.Scheme != "https" { + return + } + host, err := canonicalHost(u.Host) + if err != nil { + return + } + key := jarKey(host, j.psList) + defPath := defaultPath(u.Path) + + j.mu.Lock() + defer j.mu.Unlock() + + submap := j.entries[key] + + modified := false + for _, cookie := range cookies { + e, remove, err := j.newEntry(cookie, now, defPath, host) + if err != nil { + continue + } + id := e.id() + if remove { + if submap != nil { + if _, ok := submap[id]; ok { + delete(submap, id) + modified = true + } + } + continue + } + if submap == nil { + submap = make(map[string]entry) + } + + if old, ok := submap[id]; ok { + e.Creation = old.Creation + e.seqNum = old.seqNum + } else { + e.Creation = now + e.seqNum = j.nextSeqNum + j.nextSeqNum++ + } + e.LastAccess = now + submap[id] = e + modified = true + } + + if modified { + if len(submap) == 0 { + delete(j.entries, key) + } else { + j.entries[key] = submap + } + } +} + +// canonicalHost strips port from host if present and returns the canonicalized +// host name. +func canonicalHost(host string) (string, error) { + var err error + host = strings.ToLower(host) + if hasPort(host) { + host, _, err = net.SplitHostPort(host) + if err != nil { + return "", err + } + } + if strings.HasSuffix(host, ".") { + // Strip trailing dot from fully qualified domain names. + host = host[:len(host)-1] + } + return toASCII(host) +} + +// hasPort reports whether host contains a port number. host may be a host +// name, an IPv4 or an IPv6 address. +func hasPort(host string) bool { + colons := strings.Count(host, ":") + if colons == 0 { + return false + } + if colons == 1 { + return true + } + return host[0] == '[' && strings.Contains(host, "]:") +} + +// jarKey returns the key to use for a jar. +func jarKey(host string, psl PublicSuffixList) string { + if isIP(host) { + return host + } + + var i int + if psl == nil { + i = strings.LastIndex(host, ".") + if i == -1 { + return host + } + } else { + suffix := psl.PublicSuffix(host) + if suffix == host { + return host + } + i = len(host) - len(suffix) + if i <= 0 || host[i-1] != '.' { + // The provided public suffix list psl is broken. + // Storing cookies under host is a safe stopgap. + return host + } + } + prevDot := strings.LastIndex(host[:i-1], ".") + return host[prevDot+1:] +} + +// isIP reports whether host is an IP address. +func isIP(host string) bool { + return net.ParseIP(host) != nil +} + +// defaultPath returns the directory part of an URL's path according to +// RFC 6265 section 5.1.4. +func defaultPath(path string) string { + if len(path) == 0 || path[0] != '/' { + return "/" // Path is empty or malformed. + } + + i := strings.LastIndex(path, "/") // Path starts with "/", so i != -1. + if i == 0 { + return "/" // Path has the form "/abc". + } + return path[:i] // Path is either of form "/abc/xyz" or "/abc/xyz/". +} + +// newEntry creates an entry from a http.Cookie c. now is the current time and +// is compared to c.Expires to determine deletion of c. defPath and host are the +// default-path and the canonical host name of the URL c was received from. +// +// remove records whether the jar should delete this cookie, as it has already +// expired with respect to now. In this case, e may be incomplete, but it will +// be valid to call e.id (which depends on e's Name, Domain and Path). +// +// A malformed c.Domain will result in an error. +func (j *Jar) newEntry(c *http.Cookie, now time.Time, defPath, host string) (e entry, remove bool, err error) { + e.Name = c.Name + + if c.Path == "" || c.Path[0] != '/' { + e.Path = defPath + } else { + e.Path = c.Path + } + + e.Domain, e.HostOnly, err = j.domainAndType(host, c.Domain) + if err != nil { + return e, false, err + } + + // MaxAge takes precedence over Expires. + if c.MaxAge < 0 { + return e, true, nil + } else if c.MaxAge > 0 { + e.Expires = now.Add(time.Duration(c.MaxAge) * time.Second) + e.Persistent = true + } else { + if c.Expires.IsZero() { + e.Expires = endOfTime + e.Persistent = false + } else { + if !c.Expires.After(now) { + return e, true, nil + } + e.Expires = c.Expires + e.Persistent = true + } + } + + e.Value = c.Value + e.Secure = c.Secure + e.HttpOnly = c.HttpOnly + + return e, false, nil +} + +var ( + errIllegalDomain = errors.New("cookiejar: illegal cookie domain attribute") + errMalformedDomain = errors.New("cookiejar: malformed cookie domain attribute") + errNoHostname = errors.New("cookiejar: no host name available (IP only)") +) + +// endOfTime is the time when session (non-persistent) cookies expire. +// This instant is representable in most date/time formats (not just +// Go's time.Time) and should be far enough in the future. +var endOfTime = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC) + +// domainAndType determines the cookie's domain and hostOnly attribute. +func (j *Jar) domainAndType(host, domain string) (string, bool, error) { + if domain == "" { + // No domain attribute in the SetCookie header indicates a + // host cookie. + return host, true, nil + } + + if isIP(host) { + // According to RFC 6265 domain-matching includes not being + // an IP address. + // TODO: This might be relaxed as in common browsers. + return "", false, errNoHostname + } + + // From here on: If the cookie is valid, it is a domain cookie (with + // the one exception of a public suffix below). + // See RFC 6265 section 5.2.3. + if domain[0] == '.' { + domain = domain[1:] + } + + if len(domain) == 0 || domain[0] == '.' { + // Received either "Domain=." or "Domain=..some.thing", + // both are illegal. + return "", false, errMalformedDomain + } + domain = strings.ToLower(domain) + + if domain[len(domain)-1] == '.' { + // We received stuff like "Domain=www.example.com.". + // Browsers do handle such stuff (actually differently) but + // RFC 6265 seems to be clear here (e.g. section 4.1.2.3) in + // requiring a reject. 4.1.2.3 is not normative, but + // "Domain Matching" (5.1.3) and "Canonicalized Host Names" + // (5.1.2) are. + return "", false, errMalformedDomain + } + + // See RFC 6265 section 5.3 #5. + if j.psList != nil { + if ps := j.psList.PublicSuffix(domain); ps != "" && !hasDotSuffix(domain, ps) { + if host == domain { + // This is the one exception in which a cookie + // with a domain attribute is a host cookie. + return host, true, nil + } + return "", false, errIllegalDomain + } + } + + // The domain must domain-match host: www.mycompany.com cannot + // set cookies for .ourcompetitors.com. + if host != domain && !hasDotSuffix(host, domain) { + return "", false, errIllegalDomain + } + + return domain, false, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go new file mode 100644 index 000000000..3aa601586 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go @@ -0,0 +1,1267 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cookiejar + +import ( + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "testing" + "time" +) + +// tNow is the synthetic current time used as now during testing. +var tNow = time.Date(2013, 1, 1, 12, 0, 0, 0, time.UTC) + +// testPSL implements PublicSuffixList with just two rules: "co.uk" +// and the default rule "*". +type testPSL struct{} + +func (testPSL) String() string { + return "testPSL" +} +func (testPSL) PublicSuffix(d string) string { + if d == "co.uk" || strings.HasSuffix(d, ".co.uk") { + return "co.uk" + } + return d[strings.LastIndex(d, ".")+1:] +} + +// newTestJar creates an empty Jar with testPSL as the public suffix list. +func newTestJar() *Jar { + jar, err := New(&Options{PublicSuffixList: testPSL{}}) + if err != nil { + panic(err) + } + return jar +} + +var hasDotSuffixTests = [...]struct { + s, suffix string +}{ + {"", ""}, + {"", "."}, + {"", "x"}, + {".", ""}, + {".", "."}, + {".", ".."}, + {".", "x"}, + {".", "x."}, + {".", ".x"}, + {".", ".x."}, + {"x", ""}, + {"x", "."}, + {"x", ".."}, + {"x", "x"}, + {"x", "x."}, + {"x", ".x"}, + {"x", ".x."}, + {".x", ""}, + {".x", "."}, + {".x", ".."}, + {".x", "x"}, + {".x", "x."}, + {".x", ".x"}, + {".x", ".x."}, + {"x.", ""}, + {"x.", "."}, + {"x.", ".."}, + {"x.", "x"}, + {"x.", "x."}, + {"x.", ".x"}, + {"x.", ".x."}, + {"com", ""}, + {"com", "m"}, + {"com", "om"}, + {"com", "com"}, + {"com", ".com"}, + {"com", "x.com"}, + {"com", "xcom"}, + {"com", "xorg"}, + {"com", "org"}, + {"com", "rg"}, + {"foo.com", ""}, + {"foo.com", "m"}, + {"foo.com", "om"}, + {"foo.com", "com"}, + {"foo.com", ".com"}, + {"foo.com", "o.com"}, + {"foo.com", "oo.com"}, + {"foo.com", "foo.com"}, + {"foo.com", ".foo.com"}, + {"foo.com", "x.foo.com"}, + {"foo.com", "xfoo.com"}, + {"foo.com", "xfoo.org"}, + {"foo.com", "foo.org"}, + {"foo.com", "oo.org"}, + {"foo.com", "o.org"}, + {"foo.com", ".org"}, + {"foo.com", "org"}, + {"foo.com", "rg"}, +} + +func TestHasDotSuffix(t *testing.T) { + for _, tc := range hasDotSuffixTests { + got := hasDotSuffix(tc.s, tc.suffix) + want := strings.HasSuffix(tc.s, "."+tc.suffix) + if got != want { + t.Errorf("s=%q, suffix=%q: got %v, want %v", tc.s, tc.suffix, got, want) + } + } +} + +var canonicalHostTests = map[string]string{ + "www.example.com": "www.example.com", + "WWW.EXAMPLE.COM": "www.example.com", + "wWw.eXAmple.CoM": "www.example.com", + "www.example.com:80": "www.example.com", + "192.168.0.10": "192.168.0.10", + "192.168.0.5:8080": "192.168.0.5", + "2001:4860:0:2001::68": "2001:4860:0:2001::68", + "[2001:4860:0:::68]:8080": "2001:4860:0:::68", + "www.bücher.de": "www.xn--bcher-kva.de", + "www.example.com.": "www.example.com", + "[bad.unmatched.bracket:": "error", +} + +func TestCanonicalHost(t *testing.T) { + for h, want := range canonicalHostTests { + got, err := canonicalHost(h) + if want == "error" { + if err == nil { + t.Errorf("%q: got nil error, want non-nil", h) + } + continue + } + if err != nil { + t.Errorf("%q: %v", h, err) + continue + } + if got != want { + t.Errorf("%q: got %q, want %q", h, got, want) + continue + } + } +} + +var hasPortTests = map[string]bool{ + "www.example.com": false, + "www.example.com:80": true, + "127.0.0.1": false, + "127.0.0.1:8080": true, + "2001:4860:0:2001::68": false, + "[2001::0:::68]:80": true, +} + +func TestHasPort(t *testing.T) { + for host, want := range hasPortTests { + if got := hasPort(host); got != want { + t.Errorf("%q: got %t, want %t", host, got, want) + } + } +} + +var jarKeyTests = map[string]string{ + "foo.www.example.com": "example.com", + "www.example.com": "example.com", + "example.com": "example.com", + "com": "com", + "foo.www.bbc.co.uk": "bbc.co.uk", + "www.bbc.co.uk": "bbc.co.uk", + "bbc.co.uk": "bbc.co.uk", + "co.uk": "co.uk", + "uk": "uk", + "192.168.0.5": "192.168.0.5", +} + +func TestJarKey(t *testing.T) { + for host, want := range jarKeyTests { + if got := jarKey(host, testPSL{}); got != want { + t.Errorf("%q: got %q, want %q", host, got, want) + } + } +} + +var jarKeyNilPSLTests = map[string]string{ + "foo.www.example.com": "example.com", + "www.example.com": "example.com", + "example.com": "example.com", + "com": "com", + "foo.www.bbc.co.uk": "co.uk", + "www.bbc.co.uk": "co.uk", + "bbc.co.uk": "co.uk", + "co.uk": "co.uk", + "uk": "uk", + "192.168.0.5": "192.168.0.5", +} + +func TestJarKeyNilPSL(t *testing.T) { + for host, want := range jarKeyNilPSLTests { + if got := jarKey(host, nil); got != want { + t.Errorf("%q: got %q, want %q", host, got, want) + } + } +} + +var isIPTests = map[string]bool{ + "127.0.0.1": true, + "1.2.3.4": true, + "2001:4860:0:2001::68": true, + "example.com": false, + "1.1.1.300": false, + "www.foo.bar.net": false, + "123.foo.bar.net": false, +} + +func TestIsIP(t *testing.T) { + for host, want := range isIPTests { + if got := isIP(host); got != want { + t.Errorf("%q: got %t, want %t", host, got, want) + } + } +} + +var defaultPathTests = map[string]string{ + "/": "/", + "/abc": "/", + "/abc/": "/abc", + "/abc/xyz": "/abc", + "/abc/xyz/": "/abc/xyz", + "/a/b/c.html": "/a/b", + "": "/", + "strange": "/", + "//": "/", + "/a//b": "/a/", + "/a/./b": "/a/.", + "/a/../b": "/a/..", +} + +func TestDefaultPath(t *testing.T) { + for path, want := range defaultPathTests { + if got := defaultPath(path); got != want { + t.Errorf("%q: got %q, want %q", path, got, want) + } + } +} + +var domainAndTypeTests = [...]struct { + host string // host Set-Cookie header was received from + domain string // domain attribute in Set-Cookie header + wantDomain string // expected domain of cookie + wantHostOnly bool // expected host-cookie flag + wantErr error // expected error +}{ + {"www.example.com", "", "www.example.com", true, nil}, + {"127.0.0.1", "", "127.0.0.1", true, nil}, + {"2001:4860:0:2001::68", "", "2001:4860:0:2001::68", true, nil}, + {"www.example.com", "example.com", "example.com", false, nil}, + {"www.example.com", ".example.com", "example.com", false, nil}, + {"www.example.com", "www.example.com", "www.example.com", false, nil}, + {"www.example.com", ".www.example.com", "www.example.com", false, nil}, + {"foo.sso.example.com", "sso.example.com", "sso.example.com", false, nil}, + {"bar.co.uk", "bar.co.uk", "bar.co.uk", false, nil}, + {"foo.bar.co.uk", ".bar.co.uk", "bar.co.uk", false, nil}, + {"127.0.0.1", "127.0.0.1", "", false, errNoHostname}, + {"2001:4860:0:2001::68", "2001:4860:0:2001::68", "2001:4860:0:2001::68", false, errNoHostname}, + {"www.example.com", ".", "", false, errMalformedDomain}, + {"www.example.com", "..", "", false, errMalformedDomain}, + {"www.example.com", "other.com", "", false, errIllegalDomain}, + {"www.example.com", "com", "", false, errIllegalDomain}, + {"www.example.com", ".com", "", false, errIllegalDomain}, + {"foo.bar.co.uk", ".co.uk", "", false, errIllegalDomain}, + {"127.www.0.0.1", "127.0.0.1", "", false, errIllegalDomain}, + {"com", "", "com", true, nil}, + {"com", "com", "com", true, nil}, + {"com", ".com", "com", true, nil}, + {"co.uk", "", "co.uk", true, nil}, + {"co.uk", "co.uk", "co.uk", true, nil}, + {"co.uk", ".co.uk", "co.uk", true, nil}, +} + +func TestDomainAndType(t *testing.T) { + jar := newTestJar() + for _, tc := range domainAndTypeTests { + domain, hostOnly, err := jar.domainAndType(tc.host, tc.domain) + if err != tc.wantErr { + t.Errorf("%q/%q: got %q error, want %q", + tc.host, tc.domain, err, tc.wantErr) + continue + } + if err != nil { + continue + } + if domain != tc.wantDomain || hostOnly != tc.wantHostOnly { + t.Errorf("%q/%q: got %q/%t want %q/%t", + tc.host, tc.domain, domain, hostOnly, + tc.wantDomain, tc.wantHostOnly) + } + } +} + +// expiresIn creates an expires attribute delta seconds from tNow. +func expiresIn(delta int) string { + t := tNow.Add(time.Duration(delta) * time.Second) + return "expires=" + t.Format(time.RFC1123) +} + +// mustParseURL parses s to an URL and panics on error. +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil || u.Scheme == "" || u.Host == "" { + panic(fmt.Sprintf("Unable to parse URL %s.", s)) + } + return u +} + +// jarTest encapsulates the following actions on a jar: +// 1. Perform SetCookies with fromURL and the cookies from setCookies. +// (Done at time tNow + 0 ms.) +// 2. Check that the entries in the jar matches content. +// (Done at time tNow + 1001 ms.) +// 3. For each query in tests: Check that Cookies with toURL yields the +// cookies in want. +// (Query n done at tNow + (n+2)*1001 ms.) +type jarTest struct { + description string // The description of what this test is supposed to test + fromURL string // The full URL of the request from which Set-Cookie headers where received + setCookies []string // All the cookies received from fromURL + content string // The whole (non-expired) content of the jar + queries []query // Queries to test the Jar.Cookies method +} + +// query contains one test of the cookies returned from Jar.Cookies. +type query struct { + toURL string // the URL in the Cookies call + want string // the expected list of cookies (order matters) +} + +// run runs the jarTest. +func (test jarTest) run(t *testing.T, jar *Jar) { + now := tNow + + // Populate jar with cookies. + setCookies := make([]*http.Cookie, len(test.setCookies)) + for i, cs := range test.setCookies { + cookies := (&http.Response{Header: http.Header{"Set-Cookie": {cs}}}).Cookies() + if len(cookies) != 1 { + panic(fmt.Sprintf("Wrong cookie line %q: %#v", cs, cookies)) + } + setCookies[i] = cookies[0] + } + jar.setCookies(mustParseURL(test.fromURL), setCookies, now) + now = now.Add(1001 * time.Millisecond) + + // Serialize non-expired entries in the form "name1=val1 name2=val2". + var cs []string + for _, submap := range jar.entries { + for _, cookie := range submap { + if !cookie.Expires.After(now) { + continue + } + cs = append(cs, cookie.Name+"="+cookie.Value) + } + } + sort.Strings(cs) + got := strings.Join(cs, " ") + + // Make sure jar content matches our expectations. + if got != test.content { + t.Errorf("Test %q Content\ngot %q\nwant %q", + test.description, got, test.content) + } + + // Test different calls to Cookies. + for i, query := range test.queries { + now = now.Add(1001 * time.Millisecond) + var s []string + for _, c := range jar.cookies(mustParseURL(query.toURL), now) { + s = append(s, c.Name+"="+c.Value) + } + if got := strings.Join(s, " "); got != query.want { + t.Errorf("Test %q #%d\ngot %q\nwant %q", test.description, i, got, query.want) + } + } +} + +// basicsTests contains fundamental tests. Each jarTest has to be performed on +// a fresh, empty Jar. +var basicsTests = [...]jarTest{ + { + "Retrieval of a plain host cookie.", + "http://www.host.test/", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", "A=a"}, + {"http://www.host.test/", "A=a"}, + {"http://www.host.test/some/path", "A=a"}, + {"https://www.host.test", "A=a"}, + {"https://www.host.test/", "A=a"}, + {"https://www.host.test/some/path", "A=a"}, + {"ftp://www.host.test", ""}, + {"ftp://www.host.test/", ""}, + {"ftp://www.host.test/some/path", ""}, + {"http://www.other.org", ""}, + {"http://sibling.host.test", ""}, + {"http://deep.www.host.test", ""}, + }, + }, + { + "Secure cookies are not returned to http.", + "http://www.host.test/", + []string{"A=a; secure"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some/path", ""}, + {"https://www.host.test", "A=a"}, + {"https://www.host.test/", "A=a"}, + {"https://www.host.test/some/path", "A=a"}, + }, + }, + { + "Explicit path.", + "http://www.host.test/", + []string{"A=a; path=/some/path"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some", ""}, + {"http://www.host.test/some/", ""}, + {"http://www.host.test/some/path", "A=a"}, + {"http://www.host.test/some/paths", ""}, + {"http://www.host.test/some/path/foo", "A=a"}, + {"http://www.host.test/some/path/foo/", "A=a"}, + }, + }, + { + "Implicit path #1: path is a directory.", + "http://www.host.test/some/path/", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some", ""}, + {"http://www.host.test/some/", ""}, + {"http://www.host.test/some/path", "A=a"}, + {"http://www.host.test/some/paths", ""}, + {"http://www.host.test/some/path/foo", "A=a"}, + {"http://www.host.test/some/path/foo/", "A=a"}, + }, + }, + { + "Implicit path #2: path is not a directory.", + "http://www.host.test/some/path/index.html", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some", ""}, + {"http://www.host.test/some/", ""}, + {"http://www.host.test/some/path", "A=a"}, + {"http://www.host.test/some/paths", ""}, + {"http://www.host.test/some/path/foo", "A=a"}, + {"http://www.host.test/some/path/foo/", "A=a"}, + }, + }, + { + "Implicit path #3: no path in URL at all.", + "http://www.host.test", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", "A=a"}, + {"http://www.host.test/", "A=a"}, + {"http://www.host.test/some/path", "A=a"}, + }, + }, + { + "Cookies are sorted by path length.", + "http://www.host.test/", + []string{ + "A=a; path=/foo/bar", + "B=b; path=/foo/bar/baz/qux", + "C=c; path=/foo/bar/baz", + "D=d; path=/foo"}, + "A=a B=b C=c D=d", + []query{ + {"http://www.host.test/foo/bar/baz/qux", "B=b C=c A=a D=d"}, + {"http://www.host.test/foo/bar/baz/", "C=c A=a D=d"}, + {"http://www.host.test/foo/bar", "A=a D=d"}, + }, + }, + { + "Creation time determines sorting on same length paths.", + "http://www.host.test/", + []string{ + "A=a; path=/foo/bar", + "X=x; path=/foo/bar", + "Y=y; path=/foo/bar/baz/qux", + "B=b; path=/foo/bar/baz/qux", + "C=c; path=/foo/bar/baz", + "W=w; path=/foo/bar/baz", + "Z=z; path=/foo", + "D=d; path=/foo"}, + "A=a B=b C=c D=d W=w X=x Y=y Z=z", + []query{ + {"http://www.host.test/foo/bar/baz/qux", "Y=y B=b C=c W=w A=a X=x Z=z D=d"}, + {"http://www.host.test/foo/bar/baz/", "C=c W=w A=a X=x Z=z D=d"}, + {"http://www.host.test/foo/bar", "A=a X=x Z=z D=d"}, + }, + }, + { + "Sorting of same-name cookies.", + "http://www.host.test/", + []string{ + "A=1; path=/", + "A=2; path=/path", + "A=3; path=/quux", + "A=4; path=/path/foo", + "A=5; domain=.host.test; path=/path", + "A=6; domain=.host.test; path=/quux", + "A=7; domain=.host.test; path=/path/foo", + }, + "A=1 A=2 A=3 A=4 A=5 A=6 A=7", + []query{ + {"http://www.host.test/path", "A=2 A=5 A=1"}, + {"http://www.host.test/path/foo", "A=4 A=7 A=2 A=5 A=1"}, + }, + }, + { + "Disallow domain cookie on public suffix.", + "http://www.bbc.co.uk", + []string{ + "a=1", + "b=2; domain=co.uk", + }, + "a=1", + []query{{"http://www.bbc.co.uk", "a=1"}}, + }, + { + "Host cookie on IP.", + "http://192.168.0.10", + []string{"a=1"}, + "a=1", + []query{{"http://192.168.0.10", "a=1"}}, + }, + { + "Port is ignored #1.", + "http://www.host.test/", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://www.host.test:8080/", "a=1"}, + }, + }, + { + "Port is ignored #2.", + "http://www.host.test:8080/", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://www.host.test:8080/", "a=1"}, + {"http://www.host.test:1234/", "a=1"}, + }, + }, +} + +func TestBasics(t *testing.T) { + for _, test := range basicsTests { + jar := newTestJar() + test.run(t, jar) + } +} + +// updateAndDeleteTests contains jarTests which must be performed on the same +// Jar. +var updateAndDeleteTests = [...]jarTest{ + { + "Set initial cookies.", + "http://www.host.test", + []string{ + "a=1", + "b=2; secure", + "c=3; httponly", + "d=4; secure; httponly"}, + "a=1 b=2 c=3 d=4", + []query{ + {"http://www.host.test", "a=1 c=3"}, + {"https://www.host.test", "a=1 b=2 c=3 d=4"}, + }, + }, + { + "Update value via http.", + "http://www.host.test", + []string{ + "a=w", + "b=x; secure", + "c=y; httponly", + "d=z; secure; httponly"}, + "a=w b=x c=y d=z", + []query{ + {"http://www.host.test", "a=w c=y"}, + {"https://www.host.test", "a=w b=x c=y d=z"}, + }, + }, + { + "Clear Secure flag from a http.", + "http://www.host.test/", + []string{ + "b=xx", + "d=zz; httponly"}, + "a=w b=xx c=y d=zz", + []query{{"http://www.host.test", "a=w b=xx c=y d=zz"}}, + }, + { + "Delete all.", + "http://www.host.test/", + []string{ + "a=1; max-Age=-1", // delete via MaxAge + "b=2; " + expiresIn(-10), // delete via Expires + "c=2; max-age=-1; " + expiresIn(-10), // delete via both + "d=4; max-age=-1; " + expiresIn(10)}, // MaxAge takes precedence + "", + []query{{"http://www.host.test", ""}}, + }, + { + "Refill #1.", + "http://www.host.test", + []string{ + "A=1", + "A=2; path=/foo", + "A=3; domain=.host.test", + "A=4; path=/foo; domain=.host.test"}, + "A=1 A=2 A=3 A=4", + []query{{"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}}, + }, + { + "Refill #2.", + "http://www.google.com", + []string{ + "A=6", + "A=7; path=/foo", + "A=8; domain=.google.com", + "A=9; path=/foo; domain=.google.com"}, + "A=1 A=2 A=3 A=4 A=6 A=7 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}, + {"http://www.google.com/foo", "A=7 A=9 A=6 A=8"}, + }, + }, + { + "Delete A7.", + "http://www.google.com", + []string{"A=; path=/foo; max-age=-1"}, + "A=1 A=2 A=3 A=4 A=6 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}, + {"http://www.google.com/foo", "A=9 A=6 A=8"}, + }, + }, + { + "Delete A4.", + "http://www.host.test", + []string{"A=; path=/foo; domain=host.test; max-age=-1"}, + "A=1 A=2 A=3 A=6 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1 A=3"}, + {"http://www.google.com/foo", "A=9 A=6 A=8"}, + }, + }, + { + "Delete A6.", + "http://www.google.com", + []string{"A=; max-age=-1"}, + "A=1 A=2 A=3 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1 A=3"}, + {"http://www.google.com/foo", "A=9 A=8"}, + }, + }, + { + "Delete A3.", + "http://www.host.test", + []string{"A=; domain=host.test; max-age=-1"}, + "A=1 A=2 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1"}, + {"http://www.google.com/foo", "A=9 A=8"}, + }, + }, + { + "No cross-domain delete.", + "http://www.host.test", + []string{ + "A=; domain=google.com; max-age=-1", + "A=; path=/foo; domain=google.com; max-age=-1"}, + "A=1 A=2 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1"}, + {"http://www.google.com/foo", "A=9 A=8"}, + }, + }, + { + "Delete A8 and A9.", + "http://www.google.com", + []string{ + "A=; domain=google.com; max-age=-1", + "A=; path=/foo; domain=google.com; max-age=-1"}, + "A=1 A=2", + []query{ + {"http://www.host.test/foo", "A=2 A=1"}, + {"http://www.google.com/foo", ""}, + }, + }, +} + +func TestUpdateAndDelete(t *testing.T) { + jar := newTestJar() + for _, test := range updateAndDeleteTests { + test.run(t, jar) + } +} + +func TestExpiration(t *testing.T) { + jar := newTestJar() + jarTest{ + "Expiration.", + "http://www.host.test", + []string{ + "a=1", + "b=2; max-age=3", + "c=3; " + expiresIn(3), + "d=4; max-age=5", + "e=5; " + expiresIn(5), + "f=6; max-age=100", + }, + "a=1 b=2 c=3 d=4 e=5 f=6", // executed at t0 + 1001 ms + []query{ + {"http://www.host.test", "a=1 b=2 c=3 d=4 e=5 f=6"}, // t0 + 2002 ms + {"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 3003 ms + {"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 4004 ms + {"http://www.host.test", "a=1 f=6"}, // t0 + 5005 ms + {"http://www.host.test", "a=1 f=6"}, // t0 + 6006 ms + }, + }.run(t, jar) +} + +// +// Tests derived from Chromium's cookie_store_unittest.h. +// + +// See http://src.chromium.org/viewvc/chrome/trunk/src/net/cookies/cookie_store_unittest.h?revision=159685&content-type=text/plain +// Some of the original tests are in a bad condition (e.g. +// DomainWithTrailingDotTest) or are not RFC 6265 conforming (e.g. +// TestNonDottedAndTLD #1 and #6) and have not been ported. + +// chromiumBasicsTests contains fundamental tests. Each jarTest has to be +// performed on a fresh, empty Jar. +var chromiumBasicsTests = [...]jarTest{ + { + "DomainWithTrailingDotTest.", + "http://www.google.com/", + []string{ + "a=1; domain=.www.google.com.", + "b=2; domain=.www.google.com.."}, + "", + []query{ + {"http://www.google.com", ""}, + }, + }, + { + "ValidSubdomainTest #1.", + "http://a.b.c.d.com", + []string{ + "a=1; domain=.a.b.c.d.com", + "b=2; domain=.b.c.d.com", + "c=3; domain=.c.d.com", + "d=4; domain=.d.com"}, + "a=1 b=2 c=3 d=4", + []query{ + {"http://a.b.c.d.com", "a=1 b=2 c=3 d=4"}, + {"http://b.c.d.com", "b=2 c=3 d=4"}, + {"http://c.d.com", "c=3 d=4"}, + {"http://d.com", "d=4"}, + }, + }, + { + "ValidSubdomainTest #2.", + "http://a.b.c.d.com", + []string{ + "a=1; domain=.a.b.c.d.com", + "b=2; domain=.b.c.d.com", + "c=3; domain=.c.d.com", + "d=4; domain=.d.com", + "X=bcd; domain=.b.c.d.com", + "X=cd; domain=.c.d.com"}, + "X=bcd X=cd a=1 b=2 c=3 d=4", + []query{ + {"http://b.c.d.com", "b=2 c=3 d=4 X=bcd X=cd"}, + {"http://c.d.com", "c=3 d=4 X=cd"}, + }, + }, + { + "InvalidDomainTest #1.", + "http://foo.bar.com", + []string{ + "a=1; domain=.yo.foo.bar.com", + "b=2; domain=.foo.com", + "c=3; domain=.bar.foo.com", + "d=4; domain=.foo.bar.com.net", + "e=5; domain=ar.com", + "f=6; domain=.", + "g=7; domain=/", + "h=8; domain=http://foo.bar.com", + "i=9; domain=..foo.bar.com", + "j=10; domain=..bar.com", + "k=11; domain=.foo.bar.com?blah", + "l=12; domain=.foo.bar.com/blah", + "m=12; domain=.foo.bar.com:80", + "n=14; domain=.foo.bar.com:", + "o=15; domain=.foo.bar.com#sup", + }, + "", // Jar is empty. + []query{{"http://foo.bar.com", ""}}, + }, + { + "InvalidDomainTest #2.", + "http://foo.com.com", + []string{"a=1; domain=.foo.com.com.com"}, + "", + []query{{"http://foo.bar.com", ""}}, + }, + { + "DomainWithoutLeadingDotTest #1.", + "http://manage.hosted.filefront.com", + []string{"a=1; domain=filefront.com"}, + "a=1", + []query{{"http://www.filefront.com", "a=1"}}, + }, + { + "DomainWithoutLeadingDotTest #2.", + "http://www.google.com", + []string{"a=1; domain=www.google.com"}, + "a=1", + []query{ + {"http://www.google.com", "a=1"}, + {"http://sub.www.google.com", "a=1"}, + {"http://something-else.com", ""}, + }, + }, + { + "CaseInsensitiveDomainTest.", + "http://www.google.com", + []string{ + "a=1; domain=.GOOGLE.COM", + "b=2; domain=.www.gOOgLE.coM"}, + "a=1 b=2", + []query{{"http://www.google.com", "a=1 b=2"}}, + }, + { + "TestIpAddress #1.", + "http://1.2.3.4/foo", + []string{"a=1; path=/"}, + "a=1", + []query{{"http://1.2.3.4/foo", "a=1"}}, + }, + { + "TestIpAddress #2.", + "http://1.2.3.4/foo", + []string{ + "a=1; domain=.1.2.3.4", + "b=2; domain=.3.4"}, + "", + []query{{"http://1.2.3.4/foo", ""}}, + }, + { + "TestIpAddress #3.", + "http://1.2.3.4/foo", + []string{"a=1; domain=1.2.3.4"}, + "", + []query{{"http://1.2.3.4/foo", ""}}, + }, + { + "TestNonDottedAndTLD #2.", + "http://com./index.html", + []string{"a=1"}, + "a=1", + []query{ + {"http://com./index.html", "a=1"}, + {"http://no-cookies.com./index.html", ""}, + }, + }, + { + "TestNonDottedAndTLD #3.", + "http://a.b", + []string{ + "a=1; domain=.b", + "b=2; domain=b"}, + "", + []query{{"http://bar.foo", ""}}, + }, + { + "TestNonDottedAndTLD #4.", + "http://google.com", + []string{ + "a=1; domain=.com", + "b=2; domain=com"}, + "", + []query{{"http://google.com", ""}}, + }, + { + "TestNonDottedAndTLD #5.", + "http://google.co.uk", + []string{ + "a=1; domain=.co.uk", + "b=2; domain=.uk"}, + "", + []query{ + {"http://google.co.uk", ""}, + {"http://else.co.com", ""}, + {"http://else.uk", ""}, + }, + }, + { + "TestHostEndsWithDot.", + "http://www.google.com", + []string{ + "a=1", + "b=2; domain=.www.google.com."}, + "a=1", + []query{{"http://www.google.com", "a=1"}}, + }, + { + "PathTest", + "http://www.google.izzle", + []string{"a=1; path=/wee"}, + "a=1", + []query{ + {"http://www.google.izzle/wee", "a=1"}, + {"http://www.google.izzle/wee/", "a=1"}, + {"http://www.google.izzle/wee/war", "a=1"}, + {"http://www.google.izzle/wee/war/more/more", "a=1"}, + {"http://www.google.izzle/weehee", ""}, + {"http://www.google.izzle/", ""}, + }, + }, +} + +func TestChromiumBasics(t *testing.T) { + for _, test := range chromiumBasicsTests { + jar := newTestJar() + test.run(t, jar) + } +} + +// chromiumDomainTests contains jarTests which must be executed all on the +// same Jar. +var chromiumDomainTests = [...]jarTest{ + { + "Fill #1.", + "http://www.google.izzle", + []string{"A=B"}, + "A=B", + []query{{"http://www.google.izzle", "A=B"}}, + }, + { + "Fill #2.", + "http://www.google.izzle", + []string{"C=D; domain=.google.izzle"}, + "A=B C=D", + []query{{"http://www.google.izzle", "A=B C=D"}}, + }, + { + "Verify A is a host cookie and not accessible from subdomain.", + "http://unused.nil", + []string{}, + "A=B C=D", + []query{{"http://foo.www.google.izzle", "C=D"}}, + }, + { + "Verify domain cookies are found on proper domain.", + "http://www.google.izzle", + []string{"E=F; domain=.www.google.izzle"}, + "A=B C=D E=F", + []query{{"http://www.google.izzle", "A=B C=D E=F"}}, + }, + { + "Leading dots in domain attributes are optional.", + "http://www.google.izzle", + []string{"G=H; domain=www.google.izzle"}, + "A=B C=D E=F G=H", + []query{{"http://www.google.izzle", "A=B C=D E=F G=H"}}, + }, + { + "Verify domain enforcement works #1.", + "http://www.google.izzle", + []string{"K=L; domain=.bar.www.google.izzle"}, + "A=B C=D E=F G=H", + []query{{"http://bar.www.google.izzle", "C=D E=F G=H"}}, + }, + { + "Verify domain enforcement works #2.", + "http://unused.nil", + []string{}, + "A=B C=D E=F G=H", + []query{{"http://www.google.izzle", "A=B C=D E=F G=H"}}, + }, +} + +func TestChromiumDomain(t *testing.T) { + jar := newTestJar() + for _, test := range chromiumDomainTests { + test.run(t, jar) + } + +} + +// chromiumDeletionTests must be performed all on the same Jar. +var chromiumDeletionTests = [...]jarTest{ + { + "Create session cookie a1.", + "http://www.google.com", + []string{"a=1"}, + "a=1", + []query{{"http://www.google.com", "a=1"}}, + }, + { + "Delete sc a1 via MaxAge.", + "http://www.google.com", + []string{"a=1; max-age=-1"}, + "", + []query{{"http://www.google.com", ""}}, + }, + { + "Create session cookie b2.", + "http://www.google.com", + []string{"b=2"}, + "b=2", + []query{{"http://www.google.com", "b=2"}}, + }, + { + "Delete sc b2 via Expires.", + "http://www.google.com", + []string{"b=2; " + expiresIn(-10)}, + "", + []query{{"http://www.google.com", ""}}, + }, + { + "Create persistent cookie c3.", + "http://www.google.com", + []string{"c=3; max-age=3600"}, + "c=3", + []query{{"http://www.google.com", "c=3"}}, + }, + { + "Delete pc c3 via MaxAge.", + "http://www.google.com", + []string{"c=3; max-age=-1"}, + "", + []query{{"http://www.google.com", ""}}, + }, + { + "Create persistent cookie d4.", + "http://www.google.com", + []string{"d=4; max-age=3600"}, + "d=4", + []query{{"http://www.google.com", "d=4"}}, + }, + { + "Delete pc d4 via Expires.", + "http://www.google.com", + []string{"d=4; " + expiresIn(-10)}, + "", + []query{{"http://www.google.com", ""}}, + }, +} + +func TestChromiumDeletion(t *testing.T) { + jar := newTestJar() + for _, test := range chromiumDeletionTests { + test.run(t, jar) + } +} + +// domainHandlingTests tests and documents the rules for domain handling. +// Each test must be performed on an empty new Jar. +var domainHandlingTests = [...]jarTest{ + { + "Host cookie", + "http://www.host.test", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://host.test", ""}, + {"http://bar.host.test", ""}, + {"http://foo.www.host.test", ""}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie #1", + "http://www.host.test", + []string{"a=1; domain=host.test"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://host.test", "a=1"}, + {"http://bar.host.test", "a=1"}, + {"http://foo.www.host.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie #2", + "http://www.host.test", + []string{"a=1; domain=.host.test"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://host.test", "a=1"}, + {"http://bar.host.test", "a=1"}, + {"http://foo.www.host.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Host cookie on IDNA domain #1", + "http://www.bücher.test", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", ""}, + {"http://xn--bcher-kva.test", ""}, + {"http://bar.bücher.test", ""}, + {"http://bar.xn--bcher-kva.test", ""}, + {"http://foo.www.bücher.test", ""}, + {"http://foo.www.xn--bcher-kva.test", ""}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Host cookie on IDNA domain #2", + "http://www.xn--bcher-kva.test", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", ""}, + {"http://xn--bcher-kva.test", ""}, + {"http://bar.bücher.test", ""}, + {"http://bar.xn--bcher-kva.test", ""}, + {"http://foo.www.bücher.test", ""}, + {"http://foo.www.xn--bcher-kva.test", ""}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie on IDNA domain #1", + "http://www.bücher.test", + []string{"a=1; domain=xn--bcher-kva.test"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", "a=1"}, + {"http://xn--bcher-kva.test", "a=1"}, + {"http://bar.bücher.test", "a=1"}, + {"http://bar.xn--bcher-kva.test", "a=1"}, + {"http://foo.www.bücher.test", "a=1"}, + {"http://foo.www.xn--bcher-kva.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie on IDNA domain #2", + "http://www.xn--bcher-kva.test", + []string{"a=1; domain=xn--bcher-kva.test"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", "a=1"}, + {"http://xn--bcher-kva.test", "a=1"}, + {"http://bar.bücher.test", "a=1"}, + {"http://bar.xn--bcher-kva.test", "a=1"}, + {"http://foo.www.bücher.test", "a=1"}, + {"http://foo.www.xn--bcher-kva.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Host cookie on TLD.", + "http://com", + []string{"a=1"}, + "a=1", + []query{ + {"http://com", "a=1"}, + {"http://any.com", ""}, + {"http://any.test", ""}, + }, + }, + { + "Domain cookie on TLD becomes a host cookie.", + "http://com", + []string{"a=1; domain=com"}, + "a=1", + []query{ + {"http://com", "a=1"}, + {"http://any.com", ""}, + {"http://any.test", ""}, + }, + }, + { + "Host cookie on public suffix.", + "http://co.uk", + []string{"a=1"}, + "a=1", + []query{ + {"http://co.uk", "a=1"}, + {"http://uk", ""}, + {"http://some.co.uk", ""}, + {"http://foo.some.co.uk", ""}, + {"http://any.uk", ""}, + }, + }, + { + "Domain cookie on public suffix is ignored.", + "http://some.co.uk", + []string{"a=1; domain=co.uk"}, + "", + []query{ + {"http://co.uk", ""}, + {"http://uk", ""}, + {"http://some.co.uk", ""}, + {"http://foo.some.co.uk", ""}, + {"http://any.uk", ""}, + }, + }, +} + +func TestDomainHandling(t *testing.T) { + for _, test := range domainHandlingTests { + jar := newTestJar() + test.run(t, jar) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go new file mode 100644 index 000000000..ea7ceb5ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cookiejar + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("cookiejar: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("cookiejar: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("cookiejar: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} + +// Strictly speaking, the remaining code below deals with IDNA (RFC 5890 and +// friends) and not Punycode (RFC 3492) per se. + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// toASCII converts a domain or domain label to its ASCII form. For example, +// toASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// toASCII("golang") is "golang". +func toASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go new file mode 100644 index 000000000..0301de14e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cookiejar + +import ( + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3B. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) -with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) 2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) MajiKoi5 + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) de + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/doc.go new file mode 100644 index 000000000..b1216e8da --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/doc.go @@ -0,0 +1,80 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package http provides HTTP client and server implementations. + +Get, Head, Post, and PostForm make HTTP (or HTTPS) requests: + + resp, err := http.Get("http://example.com/") + ... + resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) + ... + resp, err := http.PostForm("http://example.com/form", + url.Values{"key": {"Value"}, "id": {"123"}}) + +The client must close the response body when finished with it: + + resp, err := http.Get("http://example.com/") + if err != nil { + // handle error + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + // ... + +For control over HTTP client headers, redirect policy, and other +settings, create a Client: + + client := &http.Client{ + CheckRedirect: redirectPolicyFunc, + } + + resp, err := client.Get("http://example.com") + // ... + + req, err := http.NewRequest("GET", "http://example.com", nil) + // ... + req.Header.Add("If-None-Match", `W/"wyzzy"`) + resp, err := client.Do(req) + // ... + +For control over proxies, TLS configuration, keep-alives, +compression, and other settings, create a Transport: + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: pool}, + DisableCompression: true, + } + client := &http.Client{Transport: tr} + resp, err := client.Get("https://example.com") + +Clients and Transports are safe for concurrent use by multiple +goroutines and for efficiency should only be created once and re-used. + +ListenAndServe starts an HTTP server with a given address and handler. +The handler is usually nil, which means to use DefaultServeMux. +Handle and HandleFunc add handlers to DefaultServeMux: + + http.Handle("/foo", fooHandler) + + http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) + }) + + log.Fatal(http.ListenAndServe(":8080", nil)) + +More control over the server's behavior is available by creating a +custom Server: + + s := &http.Server{ + Addr: ":8080", + Handler: myHandler, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + } + log.Fatal(s.ListenAndServe()) +*/ +package http diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/example_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/example_test.go new file mode 100644 index 000000000..88b97d9e3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/example_test.go @@ -0,0 +1,88 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" +) + +func ExampleHijacker() { + http.HandleFunc("/hijack", func(w http.ResponseWriter, r *http.Request) { + hj, ok := w.(http.Hijacker) + if !ok { + http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError) + return + } + conn, bufrw, err := hj.Hijack() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + // Don't forget to close the connection: + defer conn.Close() + bufrw.WriteString("Now we're speaking raw TCP. Say hi: ") + bufrw.Flush() + s, err := bufrw.ReadString('\n') + if err != nil { + log.Printf("error reading string: %v", err) + return + } + fmt.Fprintf(bufrw, "You said: %q\nBye.\n", s) + bufrw.Flush() + }) +} + +func ExampleGet() { + res, err := http.Get("http://www.google.com/robots.txt") + if err != nil { + log.Fatal(err) + } + robots, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + log.Fatal(err) + } + fmt.Printf("%s", robots) +} + +func ExampleFileServer() { + // Simple static webserver: + log.Fatal(http.ListenAndServe(":8080", http.FileServer(http.Dir("/usr/share/doc")))) +} + +func ExampleFileServer_stripPrefix() { + // To serve a directory on disk (/tmp) under an alternate URL + // path (/tmpfiles/), use StripPrefix to modify the request + // URL's path before the FileServer sees it: + http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp")))) +} + +func ExampleStripPrefix() { + // To serve a directory on disk (/tmp) under an alternate URL + // path (/tmpfiles/), use StripPrefix to modify the request + // URL's path before the FileServer sees it: + http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp")))) +} + +type apiHandler struct{} + +func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {} + +func ExampleServeMux_Handle() { + mux := http.NewServeMux() + mux.Handle("/api/", apiHandler{}) + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + // The "/" pattern matches everything, so we need to check + // that we're at the root here. + if req.URL.Path != "/" { + http.NotFound(w, req) + return + } + fmt.Fprintf(w, "Welcome to the home page!") + }) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/export_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/export_test.go new file mode 100644 index 000000000..960563b24 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/export_test.go @@ -0,0 +1,72 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Bridge package to expose http internals to tests in the http_test +// package. + +package http + +import ( + "net" + "time" +) + +func NewLoggingConn(baseName string, c net.Conn) net.Conn { + return newLoggingConn(baseName, c) +} + +var ExportAppendTime = appendTime + +func (t *Transport) NumPendingRequestsForTesting() int { + t.reqMu.Lock() + defer t.reqMu.Unlock() + return len(t.reqCanceler) +} + +func (t *Transport) IdleConnKeysForTesting() (keys []string) { + keys = make([]string, 0) + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConn == nil { + return + } + for key := range t.idleConn { + keys = append(keys, key.String()) + } + return +} + +func (t *Transport) IdleConnCountForTesting(cacheKey string) int { + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConn == nil { + return 0 + } + for k, conns := range t.idleConn { + if k.String() == cacheKey { + return len(conns) + } + } + return 0 +} + +func (t *Transport) IdleConnChMapSizeForTesting() int { + t.idleMu.Lock() + defer t.idleMu.Unlock() + return len(t.idleConnCh) +} + +func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler { + f := func() <-chan time.Time { + return ch + } + return &timeoutHandler{handler, f, ""} +} + +func ResetCachedEnvironment() { + httpProxyEnv.reset() + noProxyEnv.reset() +} + +var DefaultUserAgent = defaultUserAgent diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go new file mode 100644 index 000000000..a3beaa33a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go @@ -0,0 +1,305 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fcgi + +// This file implements FastCGI from the perspective of a child process. + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/cgi" + "os" + "strings" + "sync" + "time" +) + +// request holds the state for an in-progress request. As soon as it's complete, +// it's converted to an http.Request. +type request struct { + pw *io.PipeWriter + reqId uint16 + params map[string]string + buf [1024]byte + rawParams []byte + keepConn bool +} + +func newRequest(reqId uint16, flags uint8) *request { + r := &request{ + reqId: reqId, + params: map[string]string{}, + keepConn: flags&flagKeepConn != 0, + } + r.rawParams = r.buf[:0] + return r +} + +// parseParams reads an encoded []byte into Params. +func (r *request) parseParams() { + text := r.rawParams + r.rawParams = nil + for len(text) > 0 { + keyLen, n := readSize(text) + if n == 0 { + return + } + text = text[n:] + valLen, n := readSize(text) + if n == 0 { + return + } + text = text[n:] + key := readString(text, keyLen) + text = text[keyLen:] + val := readString(text, valLen) + text = text[valLen:] + r.params[key] = val + } +} + +// response implements http.ResponseWriter. +type response struct { + req *request + header http.Header + w *bufWriter + wroteHeader bool +} + +func newResponse(c *child, req *request) *response { + return &response{ + req: req, + header: http.Header{}, + w: newWriter(c.conn, typeStdout, req.reqId), + } +} + +func (r *response) Header() http.Header { + return r.header +} + +func (r *response) Write(data []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + return r.w.Write(data) +} + +func (r *response) WriteHeader(code int) { + if r.wroteHeader { + return + } + r.wroteHeader = true + if code == http.StatusNotModified { + // Must not have body. + r.header.Del("Content-Type") + r.header.Del("Content-Length") + r.header.Del("Transfer-Encoding") + } else if r.header.Get("Content-Type") == "" { + r.header.Set("Content-Type", "text/html; charset=utf-8") + } + + if r.header.Get("Date") == "" { + r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + } + + fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code)) + r.header.Write(r.w) + r.w.WriteString("\r\n") +} + +func (r *response) Flush() { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + r.w.Flush() +} + +func (r *response) Close() error { + r.Flush() + return r.w.Close() +} + +type child struct { + conn *conn + handler http.Handler + + mu sync.Mutex // protects requests: + requests map[uint16]*request // keyed by request ID +} + +func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child { + return &child{ + conn: newConn(rwc), + handler: handler, + requests: make(map[uint16]*request), + } +} + +func (c *child) serve() { + defer c.conn.Close() + var rec record + for { + if err := rec.read(c.conn.rwc); err != nil { + return + } + if err := c.handleRecord(&rec); err != nil { + return + } + } +} + +var errCloseConn = errors.New("fcgi: connection should be closed") + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +func (c *child) handleRecord(rec *record) error { + c.mu.Lock() + req, ok := c.requests[rec.h.Id] + c.mu.Unlock() + if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { + // The spec says to ignore unknown request IDs. + return nil + } + + switch rec.h.Type { + case typeBeginRequest: + if req != nil { + // The server is trying to begin a request with the same ID + // as an in-progress request. This is an error. + return errors.New("fcgi: received ID that is already in-flight") + } + + var br beginRequest + if err := br.read(rec.content()); err != nil { + return err + } + if br.role != roleResponder { + c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) + return nil + } + req = newRequest(rec.h.Id, br.flags) + c.mu.Lock() + c.requests[rec.h.Id] = req + c.mu.Unlock() + return nil + case typeParams: + // NOTE(eds): Technically a key-value pair can straddle the boundary + // between two packets. We buffer until we've received all parameters. + if len(rec.content()) > 0 { + req.rawParams = append(req.rawParams, rec.content()...) + return nil + } + req.parseParams() + return nil + case typeStdin: + content := rec.content() + if req.pw == nil { + var body io.ReadCloser + if len(content) > 0 { + // body could be an io.LimitReader, but it shouldn't matter + // as long as both sides are behaving. + body, req.pw = io.Pipe() + } else { + body = emptyBody + } + go c.serveRequest(req, body) + } + if len(content) > 0 { + // TODO(eds): This blocks until the handler reads from the pipe. + // If the handler takes a long time, it might be a problem. + req.pw.Write(content) + } else if req.pw != nil { + req.pw.Close() + } + return nil + case typeGetValues: + values := map[string]string{"FCGI_MPXS_CONNS": "1"} + c.conn.writePairs(typeGetValuesResult, 0, values) + return nil + case typeData: + // If the filter role is implemented, read the data stream here. + return nil + case typeAbortRequest: + println("abort") + c.mu.Lock() + delete(c.requests, rec.h.Id) + c.mu.Unlock() + c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) + if !req.keepConn { + // connection will close upon return + return errCloseConn + } + return nil + default: + b := make([]byte, 8) + b[0] = byte(rec.h.Type) + c.conn.writeRecord(typeUnknownType, 0, b) + return nil + } +} + +func (c *child) serveRequest(req *request, body io.ReadCloser) { + r := newResponse(c, req) + httpReq, err := cgi.RequestFromMap(req.params) + if err != nil { + // there was an error reading the request + r.WriteHeader(http.StatusInternalServerError) + c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) + } else { + httpReq.Body = body + c.handler.ServeHTTP(r, httpReq) + } + r.Close() + c.mu.Lock() + delete(c.requests, req.reqId) + c.mu.Unlock() + c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) + + // Consume the entire body, so the host isn't still writing to + // us when we close the socket below in the !keepConn case, + // otherwise we'd send a RST. (golang.org/issue/4183) + // TODO(bradfitz): also bound this copy in time. Or send + // some sort of abort request to the host, so the host + // can properly cut off the client sending all the data. + // For now just bound it a little and + io.CopyN(ioutil.Discard, body, 100<<20) + body.Close() + + if !req.keepConn { + c.conn.Close() + } +} + +// Serve accepts incoming FastCGI connections on the listener l, creating a new +// goroutine for each. The goroutine reads requests and then calls handler +// to reply to them. +// If l is nil, Serve accepts connections from os.Stdin. +// If handler is nil, http.DefaultServeMux is used. +func Serve(l net.Listener, handler http.Handler) error { + if l == nil { + var err error + l, err = net.FileListener(os.Stdin) + if err != nil { + return err + } + defer l.Close() + } + if handler == nil { + handler = http.DefaultServeMux + } + for { + rw, err := l.Accept() + if err != nil { + return err + } + c := newChild(rw, handler) + go c.serve() + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go new file mode 100644 index 000000000..06bba0488 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go @@ -0,0 +1,274 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fcgi implements the FastCGI protocol. +// Currently only the responder role is supported. +// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 +package fcgi + +// This file defines the raw protocol and some utilities used by the child and +// the host. + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "sync" +) + +// recType is a record type, as defined by +// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8 +type recType uint8 + +const ( + typeBeginRequest recType = 1 + typeAbortRequest recType = 2 + typeEndRequest recType = 3 + typeParams recType = 4 + typeStdin recType = 5 + typeStdout recType = 6 + typeStderr recType = 7 + typeData recType = 8 + typeGetValues recType = 9 + typeGetValuesResult recType = 10 + typeUnknownType recType = 11 +) + +// keep the connection between web-server and responder open after request +const flagKeepConn = 1 + +const ( + maxWrite = 65535 // maximum record body + maxPad = 255 +) + +const ( + roleResponder = iota + 1 // only Responders are implemented. + roleAuthorizer + roleFilter +) + +const ( + statusRequestComplete = iota + statusCantMultiplex + statusOverloaded + statusUnknownRole +) + +const headerLen = 8 + +type header struct { + Version uint8 + Type recType + Id uint16 + ContentLength uint16 + PaddingLength uint8 + Reserved uint8 +} + +type beginRequest struct { + role uint16 + flags uint8 + reserved [5]uint8 +} + +func (br *beginRequest) read(content []byte) error { + if len(content) != 8 { + return errors.New("fcgi: invalid begin request record") + } + br.role = binary.BigEndian.Uint16(content) + br.flags = content[2] + return nil +} + +// for padding so we don't have to allocate all the time +// not synchronized because we don't care what the contents are +var pad [maxPad]byte + +func (h *header) init(recType recType, reqId uint16, contentLength int) { + h.Version = 1 + h.Type = recType + h.Id = reqId + h.ContentLength = uint16(contentLength) + h.PaddingLength = uint8(-contentLength & 7) +} + +// conn sends records over rwc +type conn struct { + mutex sync.Mutex + rwc io.ReadWriteCloser + + // to avoid allocations + buf bytes.Buffer + h header +} + +func newConn(rwc io.ReadWriteCloser) *conn { + return &conn{rwc: rwc} +} + +func (c *conn) Close() error { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.rwc.Close() +} + +type record struct { + h header + buf [maxWrite + maxPad]byte +} + +func (rec *record) read(r io.Reader) (err error) { + if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { + return err + } + if rec.h.Version != 1 { + return errors.New("fcgi: invalid header version") + } + n := int(rec.h.ContentLength) + int(rec.h.PaddingLength) + if _, err = io.ReadFull(r, rec.buf[:n]); err != nil { + return err + } + return nil +} + +func (r *record) content() []byte { + return r.buf[:r.h.ContentLength] +} + +// writeRecord writes and sends a single record. +func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { + c.mutex.Lock() + defer c.mutex.Unlock() + c.buf.Reset() + c.h.init(recType, reqId, len(b)) + if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { + return err + } + if _, err := c.buf.Write(b); err != nil { + return err + } + if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { + return err + } + _, err := c.rwc.Write(c.buf.Bytes()) + return err +} + +func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { + b := [8]byte{byte(role >> 8), byte(role), flags} + return c.writeRecord(typeBeginRequest, reqId, b[:]) +} + +func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { + b := make([]byte, 8) + binary.BigEndian.PutUint32(b, uint32(appStatus)) + b[4] = protocolStatus + return c.writeRecord(typeEndRequest, reqId, b) +} + +func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { + w := newWriter(c, recType, reqId) + b := make([]byte, 8) + for k, v := range pairs { + n := encodeSize(b, uint32(len(k))) + n += encodeSize(b[n:], uint32(len(v))) + if _, err := w.Write(b[:n]); err != nil { + return err + } + if _, err := w.WriteString(k); err != nil { + return err + } + if _, err := w.WriteString(v); err != nil { + return err + } + } + w.Close() + return nil +} + +func readSize(s []byte) (uint32, int) { + if len(s) == 0 { + return 0, 0 + } + size, n := uint32(s[0]), 1 + if size&(1<<7) != 0 { + if len(s) < 4 { + return 0, 0 + } + n = 4 + size = binary.BigEndian.Uint32(s) + size &^= 1 << 31 + } + return size, n +} + +func readString(s []byte, size uint32) string { + if size > uint32(len(s)) { + return "" + } + return string(s[:size]) +} + +func encodeSize(b []byte, size uint32) int { + if size > 127 { + size |= 1 << 31 + binary.BigEndian.PutUint32(b, size) + return 4 + } + b[0] = byte(size) + return 1 +} + +// bufWriter encapsulates bufio.Writer but also closes the underlying stream when +// Closed. +type bufWriter struct { + closer io.Closer + *bufio.Writer +} + +func (w *bufWriter) Close() error { + if err := w.Writer.Flush(); err != nil { + w.closer.Close() + return err + } + return w.closer.Close() +} + +func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { + s := &streamWriter{c: c, recType: recType, reqId: reqId} + w := bufio.NewWriterSize(s, maxWrite) + return &bufWriter{s, w} +} + +// streamWriter abstracts out the separation of a stream into discrete records. +// It only writes maxWrite bytes at a time. +type streamWriter struct { + c *conn + recType recType + reqId uint16 +} + +func (w *streamWriter) Write(p []byte) (int, error) { + nn := 0 + for len(p) > 0 { + n := len(p) + if n > maxWrite { + n = maxWrite + } + if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil { + return nn, err + } + nn += n + p = p[n:] + } + return nn, nil +} + +func (w *streamWriter) Close() error { + // send empty record to close the stream + return w.c.writeRecord(w.recType, w.reqId, nil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go new file mode 100644 index 000000000..6c7e1a9ce --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go @@ -0,0 +1,150 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fcgi + +import ( + "bytes" + "errors" + "io" + "testing" +) + +var sizeTests = []struct { + size uint32 + bytes []byte +}{ + {0, []byte{0x00}}, + {127, []byte{0x7F}}, + {128, []byte{0x80, 0x00, 0x00, 0x80}}, + {1000, []byte{0x80, 0x00, 0x03, 0xE8}}, + {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}}, +} + +func TestSize(t *testing.T) { + b := make([]byte, 4) + for i, test := range sizeTests { + n := encodeSize(b, test.size) + if !bytes.Equal(b[:n], test.bytes) { + t.Errorf("%d expected %x, encoded %x", i, test.bytes, b) + } + size, n := readSize(test.bytes) + if size != test.size { + t.Errorf("%d expected %d, read %d", i, test.size, size) + } + if len(test.bytes) != n { + t.Errorf("%d did not consume all the bytes", i) + } + } +} + +var streamTests = []struct { + desc string + recType recType + reqId uint16 + content []byte + raw []byte +}{ + {"single record", typeStdout, 1, nil, + []byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0}, + }, + // this data will have to be split into two records + {"two records", typeStdin, 300, make([]byte, 66000), + bytes.Join([][]byte{ + // header for the first record + {1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0}, + make([]byte, 65536), + // header for the second + {1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0}, + make([]byte, 472), + // header for the empty record + {1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0}, + }, + nil), + }, +} + +type nilCloser struct { + io.ReadWriter +} + +func (c *nilCloser) Close() error { return nil } + +func TestStreams(t *testing.T) { + var rec record +outer: + for _, test := range streamTests { + buf := bytes.NewBuffer(test.raw) + var content []byte + for buf.Len() > 0 { + if err := rec.read(buf); err != nil { + t.Errorf("%s: error reading record: %v", test.desc, err) + continue outer + } + content = append(content, rec.content()...) + } + if rec.h.Type != test.recType { + t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) + continue + } + if rec.h.Id != test.reqId { + t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) + continue + } + if !bytes.Equal(content, test.content) { + t.Errorf("%s: read wrong content", test.desc) + continue + } + buf.Reset() + c := newConn(&nilCloser{buf}) + w := newWriter(c, test.recType, test.reqId) + if _, err := w.Write(test.content); err != nil { + t.Errorf("%s: error writing record: %v", test.desc, err) + continue + } + if err := w.Close(); err != nil { + t.Errorf("%s: error closing stream: %v", test.desc, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.raw) { + t.Errorf("%s: wrote wrong content", test.desc) + } + } +} + +type writeOnlyConn struct { + buf []byte +} + +func (c *writeOnlyConn) Write(p []byte) (int, error) { + c.buf = append(c.buf, p...) + return len(p), nil +} + +func (c *writeOnlyConn) Read(p []byte) (int, error) { + return 0, errors.New("conn is write-only") +} + +func (c *writeOnlyConn) Close() error { + return nil +} + +func TestGetValues(t *testing.T) { + var rec record + rec.h.Type = typeGetValues + + wc := new(writeOnlyConn) + c := newChild(wc, nil) + err := c.handleRecord(&rec) + if err != nil { + t.Fatalf("handleRecord: %v", err) + } + + const want = "\x01\n\x00\x00\x00\x12\x06\x00" + + "\x0f\x01FCGI_MPXS_CONNS1" + + "\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00" + if got := string(wc.buf); got != want { + t.Errorf(" got: %q\nwant: %q\n", got, want) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport.go new file mode 100644 index 000000000..821787e0c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "fmt" + "io" +) + +// fileTransport implements RoundTripper for the 'file' protocol. +type fileTransport struct { + fh fileHandler +} + +// NewFileTransport returns a new RoundTripper, serving the provided +// FileSystem. The returned RoundTripper ignores the URL host in its +// incoming requests, as well as most other properties of the +// request. +// +// The typical use case for NewFileTransport is to register the "file" +// protocol with a Transport, as in: +// +// t := &http.Transport{} +// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) +// c := &http.Client{Transport: t} +// res, err := c.Get("file:///etc/passwd") +// ... +func NewFileTransport(fs FileSystem) RoundTripper { + return fileTransport{fileHandler{fs}} +} + +func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) { + // We start ServeHTTP in a goroutine, which may take a long + // time if the file is large. The newPopulateResponseWriter + // call returns a channel which either ServeHTTP or finish() + // sends our *Response on, once the *Response itself has been + // populated (even if the body itself is still being + // written to the res.Body, a pipe) + rw, resc := newPopulateResponseWriter() + go func() { + t.fh.ServeHTTP(rw, req) + rw.finish() + }() + return <-resc, nil +} + +func newPopulateResponseWriter() (*populateResponse, <-chan *Response) { + pr, pw := io.Pipe() + rw := &populateResponse{ + ch: make(chan *Response), + pw: pw, + res: &Response{ + Proto: "HTTP/1.0", + ProtoMajor: 1, + Header: make(Header), + Close: true, + Body: pr, + }, + } + return rw, rw.ch +} + +// populateResponse is a ResponseWriter that populates the *Response +// in res, and writes its body to a pipe connected to the response +// body. Once writes begin or finish() is called, the response is sent +// on ch. +type populateResponse struct { + res *Response + ch chan *Response + wroteHeader bool + hasContent bool + sentResponse bool + pw *io.PipeWriter +} + +func (pr *populateResponse) finish() { + if !pr.wroteHeader { + pr.WriteHeader(500) + } + if !pr.sentResponse { + pr.sendResponse() + } + pr.pw.Close() +} + +func (pr *populateResponse) sendResponse() { + if pr.sentResponse { + return + } + pr.sentResponse = true + + if pr.hasContent { + pr.res.ContentLength = -1 + } + pr.ch <- pr.res +} + +func (pr *populateResponse) Header() Header { + return pr.res.Header +} + +func (pr *populateResponse) WriteHeader(code int) { + if pr.wroteHeader { + return + } + pr.wroteHeader = true + + pr.res.StatusCode = code + pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code)) +} + +func (pr *populateResponse) Write(p []byte) (n int, err error) { + if !pr.wroteHeader { + pr.WriteHeader(StatusOK) + } + pr.hasContent = true + if !pr.sentResponse { + pr.sendResponse() + } + return pr.pw.Write(p) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport_test.go new file mode 100644 index 000000000..6f1a537e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/filetransport_test.go @@ -0,0 +1,65 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func checker(t *testing.T) func(string, error) { + return func(call string, err error) { + if err == nil { + return + } + t.Fatalf("%s: %v", call, err) + } +} + +func TestFileTransport(t *testing.T) { + check := checker(t) + + dname, err := ioutil.TempDir("", "") + check("TempDir", err) + fname := filepath.Join(dname, "foo.txt") + err = ioutil.WriteFile(fname, []byte("Bar"), 0644) + check("WriteFile", err) + defer os.Remove(dname) + defer os.Remove(fname) + + tr := &Transport{} + tr.RegisterProtocol("file", NewFileTransport(Dir(dname))) + c := &Client{Transport: tr} + + fooURLs := []string{"file:///foo.txt", "file://../foo.txt"} + for _, urlstr := range fooURLs { + res, err := c.Get(urlstr) + check("Get "+urlstr, err) + if res.StatusCode != 200 { + t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode) + } + if res.ContentLength != -1 { + t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength) + } + if res.Body == nil { + t.Fatalf("for %s, nil Body", urlstr) + } + slurp, err := ioutil.ReadAll(res.Body) + check("ReadAll "+urlstr, err) + if string(slurp) != "Bar" { + t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar") + } + } + + const badURL = "file://../no-exist.txt" + res, err := c.Get(badURL) + check("Get "+badURL, err) + if res.StatusCode != 404 { + t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode) + } + res.Body.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/fs.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fs.go new file mode 100644 index 000000000..8576cf844 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fs.go @@ -0,0 +1,549 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP file system request handler + +package http + +import ( + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "net/textproto" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" +) + +// A Dir implements http.FileSystem using the native file +// system restricted to a specific directory tree. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) Open(name string) (File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d) + if dir == "" { + dir = "." + } + f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +// A FileSystem implements access to a collection of named files. +// The elements in a file path are separated by slash ('/', U+002F) +// characters, regardless of host operating system convention. +type FileSystem interface { + Open(name string) (File, error) +} + +// A File is returned by a FileSystem's Open method and can be +// served by the FileServer implementation. +// +// The methods should behave the same as those on an *os.File. +type File interface { + io.Closer + io.Reader + Readdir(count int) ([]os.FileInfo, error) + Seek(offset int64, whence int) (int64, error) + Stat() (os.FileInfo, error) +} + +func dirList(w ResponseWriter, f File) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprintf(w, "
\n")
+	for {
+		dirs, err := f.Readdir(100)
+		if err != nil || len(dirs) == 0 {
+			break
+		}
+		for _, d := range dirs {
+			name := d.Name()
+			if d.IsDir() {
+				name += "/"
+			}
+			// name may contain '?' or '#', which must be escaped to remain
+			// part of the URL path, and not indicate the start of a query
+			// string or fragment.
+			url := url.URL{Path: name}
+			fmt.Fprintf(w, "%s\n", url.String(), htmlReplacer.Replace(name))
+		}
+	}
+	fmt.Fprintf(w, "
\n") +} + +// ServeContent replies to the request using the content in the +// provided ReadSeeker. The main benefit of ServeContent over io.Copy +// is that it handles Range requests properly, sets the MIME type, and +// handles If-Modified-Since requests. +// +// If the response's Content-Type header is not set, ServeContent +// first tries to deduce the type from name's file extension and, +// if that fails, falls back to reading the first block of the content +// and passing it to DetectContentType. +// The name is otherwise unused; in particular it can be empty and is +// never sent in the response. +// +// If modtime is not the zero time, ServeContent includes it in a +// Last-Modified header in the response. If the request includes an +// If-Modified-Since header, ServeContent uses modtime to decide +// whether the content needs to be sent at all. +// +// The content's Seek method must work: ServeContent uses +// a seek to the end of the content to determine its size. +// +// If the caller has set w's ETag header, ServeContent uses it to +// handle requests using If-Range and If-None-Match. +// +// Note that *os.File implements the io.ReadSeeker interface. +func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) { + sizeFunc := func() (int64, error) { + size, err := content.Seek(0, os.SEEK_END) + if err != nil { + return 0, errSeeker + } + _, err = content.Seek(0, os.SEEK_SET) + if err != nil { + return 0, errSeeker + } + return size, nil + } + serveContent(w, req, name, modtime, sizeFunc, content) +} + +// errSeeker is returned by ServeContent's sizeFunc when the content +// doesn't seek properly. The underlying Seeker's error text isn't +// included in the sizeFunc reply so it's not sent over HTTP to end +// users. +var errSeeker = errors.New("seeker can't seek") + +// if name is empty, filename is unknown. (used for mime type, before sniffing) +// if modtime.IsZero(), modtime is unknown. +// content must be seeked to the beginning of the file. +// The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response. +func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) { + if checkLastModified(w, r, modtime) { + return + } + rangeReq, done := checkETag(w, r) + if done { + return + } + + code := StatusOK + + // If Content-Type isn't set, use the file's extension to find it, but + // if the Content-Type is unset explicitly, do not sniff the type. + ctypes, haveType := w.Header()["Content-Type"] + var ctype string + if !haveType { + ctype = mime.TypeByExtension(filepath.Ext(name)) + if ctype == "" { + // read a chunk to decide between utf-8 text and binary + var buf [sniffLen]byte + n, _ := io.ReadFull(content, buf[:]) + ctype = DetectContentType(buf[:n]) + _, err := content.Seek(0, os.SEEK_SET) // rewind to output whole file + if err != nil { + Error(w, "seeker can't seek", StatusInternalServerError) + return + } + } + w.Header().Set("Content-Type", ctype) + } else if len(ctypes) > 0 { + ctype = ctypes[0] + } + + size, err := sizeFunc() + if err != nil { + Error(w, err.Error(), StatusInternalServerError) + return + } + + // handle Content-Range header. + sendSize := size + var sendContent io.Reader = content + if size >= 0 { + ranges, err := parseRange(rangeReq, size) + if err != nil { + Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > size { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + ranges = nil + } + switch { + case len(ranges) == 1: + // RFC 2616, Section 14.16: + // "When an HTTP message includes the content of a single + // range (for example, a response to a request for a + // single range, or to a request for a set of ranges + // that overlap without any holes), this content is + // transmitted with a Content-Range header, and a + // Content-Length header showing the number of bytes + // actually transferred. + // ... + // A response to a request for a single range MUST NOT + // be sent using the multipart/byteranges media type." + ra := ranges[0] + if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { + Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) + return + } + sendSize = ra.length + code = StatusPartialContent + w.Header().Set("Content-Range", ra.contentRange(size)) + case len(ranges) > 1: + for _, ra := range ranges { + if ra.start > size { + Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) + return + } + } + sendSize = rangesMIMESize(ranges, ctype, size) + code = StatusPartialContent + + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) + sendContent = pr + defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. + go func() { + for _, ra := range ranges { + part, err := mw.CreatePart(ra.mimeHeader(ctype, size)) + if err != nil { + pw.CloseWithError(err) + return + } + if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { + pw.CloseWithError(err) + return + } + if _, err := io.CopyN(part, content, ra.length); err != nil { + pw.CloseWithError(err) + return + } + } + mw.Close() + pw.Close() + }() + } + + w.Header().Set("Accept-Ranges", "bytes") + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + } + + w.WriteHeader(code) + + if r.Method != "HEAD" { + io.CopyN(w, sendContent, sendSize) + } +} + +// modtime is the modification time of the resource to be served, or IsZero(). +// return value is whether this request is now complete. +func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool { + if modtime.IsZero() { + return false + } + + // The Date-Modified header truncates sub-second precision, so + // use mtime < t+1s instead of mtime <= t to check for unmodified. + if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) { + h := w.Header() + delete(h, "Content-Type") + delete(h, "Content-Length") + w.WriteHeader(StatusNotModified) + return true + } + w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat)) + return false +} + +// checkETag implements If-None-Match and If-Range checks. +// The ETag must have been previously set in the ResponseWriter's headers. +// +// The return value is the effective request "Range" header to use and +// whether this request is now considered done. +func checkETag(w ResponseWriter, r *Request) (rangeReq string, done bool) { + etag := w.Header().get("Etag") + rangeReq = r.Header.get("Range") + + // Invalidate the range request if the entity doesn't match the one + // the client was expecting. + // "If-Range: version" means "ignore the Range: header unless version matches the + // current file." + // We only support ETag versions. + // The caller must have set the ETag on the response already. + if ir := r.Header.get("If-Range"); ir != "" && ir != etag { + // TODO(bradfitz): handle If-Range requests with Last-Modified + // times instead of ETags? I'd rather not, at least for + // now. That seems like a bug/compromise in the RFC 2616, and + // I've never heard of anybody caring about that (yet). + rangeReq = "" + } + + if inm := r.Header.get("If-None-Match"); inm != "" { + // Must know ETag. + if etag == "" { + return rangeReq, false + } + + // TODO(bradfitz): non-GET/HEAD requests require more work: + // sending a different status code on matches, and + // also can't use weak cache validators (those with a "W/ + // prefix). But most users of ServeContent will be using + // it on GET or HEAD, so only support those for now. + if r.Method != "GET" && r.Method != "HEAD" { + return rangeReq, false + } + + // TODO(bradfitz): deal with comma-separated or multiple-valued + // list of If-None-match values. For now just handle the common + // case of a single item. + if inm == etag || inm == "*" { + h := w.Header() + delete(h, "Content-Type") + delete(h, "Content-Length") + w.WriteHeader(StatusNotModified) + return "", true + } + } + return rangeReq, false +} + +// name is '/'-separated, not filepath.Separator. +func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) { + const indexPage = "/index.html" + + // redirect .../index.html to .../ + // can't use Redirect() because that would make the path absolute, + // which would be a problem running under StripPrefix + if strings.HasSuffix(r.URL.Path, indexPage) { + localRedirect(w, r, "./") + return + } + + f, err := fs.Open(name) + if err != nil { + // TODO expose actual error? + NotFound(w, r) + return + } + defer f.Close() + + d, err1 := f.Stat() + if err1 != nil { + // TODO expose actual error? + NotFound(w, r) + return + } + + if redirect { + // redirect to canonical path: / at end of directory url + // r.URL.Path always begins with / + url := r.URL.Path + if d.IsDir() { + if url[len(url)-1] != '/' { + localRedirect(w, r, path.Base(url)+"/") + return + } + } else { + if url[len(url)-1] == '/' { + localRedirect(w, r, "../"+path.Base(url)) + return + } + } + } + + // use contents of index.html for directory, if present + if d.IsDir() { + index := name + indexPage + ff, err := fs.Open(index) + if err == nil { + defer ff.Close() + dd, err := ff.Stat() + if err == nil { + name = index + d = dd + f = ff + } + } + } + + // Still a directory? (we didn't find an index.html file) + if d.IsDir() { + if checkLastModified(w, r, d.ModTime()) { + return + } + dirList(w, f) + return + } + + // serverContent will check modification time + sizeFunc := func() (int64, error) { return d.Size(), nil } + serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f) +} + +// localRedirect gives a Moved Permanently response. +// It does not convert relative paths to absolute paths like Redirect does. +func localRedirect(w ResponseWriter, r *Request, newPath string) { + if q := r.URL.RawQuery; q != "" { + newPath += "?" + q + } + w.Header().Set("Location", newPath) + w.WriteHeader(StatusMovedPermanently) +} + +// ServeFile replies to the request with the contents of the named file or directory. +func ServeFile(w ResponseWriter, r *Request, name string) { + dir, file := filepath.Split(name) + serveFile(w, r, Dir(dir), file, false) +} + +type fileHandler struct { + root FileSystem +} + +// FileServer returns a handler that serves HTTP requests +// with the contents of the file system rooted at root. +// +// To use the operating system's file system implementation, +// use http.Dir: +// +// http.Handle("/", http.FileServer(http.Dir("/tmp"))) +func FileServer(root FileSystem) Handler { + return &fileHandler{root} +} + +func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { + upath := r.URL.Path + if !strings.HasPrefix(upath, "/") { + upath = "/" + upath + r.URL.Path = upath + } + serveFile(w, r, f.root, path.Clean(upath), true) +} + +// httpRange specifies the byte range to be sent to the client. +type httpRange struct { + start, length int64 +} + +func (r httpRange) contentRange(size int64) string { + return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size) +} + +func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader { + return textproto.MIMEHeader{ + "Content-Range": {r.contentRange(size)}, + "Content-Type": {contentType}, + } +} + +// parseRange parses a Range header string as per RFC 2616. +func parseRange(s string, size int64) ([]httpRange, error) { + if s == "" { + return nil, nil // header not present + } + const b = "bytes=" + if !strings.HasPrefix(s, b) { + return nil, errors.New("invalid range") + } + var ranges []httpRange + for _, ra := range strings.Split(s[len(b):], ",") { + ra = strings.TrimSpace(ra) + if ra == "" { + continue + } + i := strings.Index(ra, "-") + if i < 0 { + return nil, errors.New("invalid range") + } + start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) + var r httpRange + if start == "" { + // If no start is specified, end specifies the + // range start relative to the end of the file. + i, err := strconv.ParseInt(end, 10, 64) + if err != nil { + return nil, errors.New("invalid range") + } + if i > size { + i = size + } + r.start = size - i + r.length = size - r.start + } else { + i, err := strconv.ParseInt(start, 10, 64) + if err != nil || i > size || i < 0 { + return nil, errors.New("invalid range") + } + r.start = i + if end == "" { + // If no end is specified, range extends to end of the file. + r.length = size - r.start + } else { + i, err := strconv.ParseInt(end, 10, 64) + if err != nil || r.start > i { + return nil, errors.New("invalid range") + } + if i >= size { + i = size - 1 + } + r.length = i - r.start + 1 + } + } + ranges = append(ranges, r) + } + return ranges, nil +} + +// countingWriter counts how many bytes have been written to it. +type countingWriter int64 + +func (w *countingWriter) Write(p []byte) (n int, err error) { + *w += countingWriter(len(p)) + return len(p), nil +} + +// rangesMIMESize returns the number of bytes it takes to encode the +// provided ranges as a multipart response. +func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) { + var w countingWriter + mw := multipart.NewWriter(&w) + for _, ra := range ranges { + mw.CreatePart(ra.mimeHeader(contentType, contentSize)) + encSize += ra.length + } + mw.Close() + encSize += int64(w) + return +} + +func sumRangesSize(ranges []httpRange) (size int64) { + for _, ra := range ranges { + size += ra.length + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/fs_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fs_test.go new file mode 100644 index 000000000..f968565f9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/fs_test.go @@ -0,0 +1,858 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net" + . "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +const ( + testFile = "testdata/file" + testFileLen = 11 +) + +type wantRange struct { + start, end int64 // range [start,end) +} + +var itoa = strconv.Itoa + +var ServeFileRangeTests = []struct { + r string + code int + ranges []wantRange +}{ + {r: "", code: StatusOK}, + {r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}}, + {r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}}, + {r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}}, + {r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}}, + {r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable}, + {r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}}, + {r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}}, + {r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}}, + {r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}}, + {r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request + {r: "bytes=0-" + itoa(testFileLen-2), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}}, + {r: "bytes=0-" + itoa(testFileLen-1), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}}, + {r: "bytes=0-" + itoa(testFileLen), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}}, +} + +func TestServeFile(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/file") + })) + defer ts.Close() + + var err error + + file, err := ioutil.ReadFile(testFile) + if err != nil { + t.Fatal("reading file:", err) + } + + // set up the Request (re-used for all tests) + var req Request + req.Header = make(Header) + if req.URL, err = url.Parse(ts.URL); err != nil { + t.Fatal("ParseURL:", err) + } + req.Method = "GET" + + // straight GET + _, body := getBody(t, "straight get", req) + if !bytes.Equal(body, file) { + t.Fatalf("body mismatch: got %q, want %q", body, file) + } + + // Range tests +Cases: + for _, rt := range ServeFileRangeTests { + if rt.r != "" { + req.Header.Set("Range", rt.r) + } + resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req) + if resp.StatusCode != rt.code { + t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code) + } + if rt.code == StatusRequestedRangeNotSatisfiable { + continue + } + wantContentRange := "" + if len(rt.ranges) == 1 { + rng := rt.ranges[0] + wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen) + } + cr := resp.Header.Get("Content-Range") + if cr != wantContentRange { + t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange) + } + ct := resp.Header.Get("Content-Type") + if len(rt.ranges) == 1 { + rng := rt.ranges[0] + wantBody := file[rng.start:rng.end] + if !bytes.Equal(body, wantBody) { + t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody) + } + if strings.HasPrefix(ct, "multipart/byteranges") { + t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct) + } + } + if len(rt.ranges) > 1 { + typ, params, err := mime.ParseMediaType(ct) + if err != nil { + t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err) + continue + } + if typ != "multipart/byteranges" { + t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ) + continue + } + if params["boundary"] == "" { + t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct) + continue + } + if g, w := resp.ContentLength, int64(len(body)); g != w { + t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w) + continue + } + mr := multipart.NewReader(bytes.NewReader(body), params["boundary"]) + for ri, rng := range rt.ranges { + part, err := mr.NextPart() + if err != nil { + t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err) + continue Cases + } + wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen) + if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w { + t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w) + } + body, err := ioutil.ReadAll(part) + if err != nil { + t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err) + continue Cases + } + wantBody := file[rng.start:rng.end] + if !bytes.Equal(body, wantBody) { + t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody) + } + } + _, err = mr.NextPart() + if err != io.EOF { + t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err) + } + } + } +} + +var fsRedirectTestData = []struct { + original, redirect string +}{ + {"/test/index.html", "/test/"}, + {"/test/testdata", "/test/testdata/"}, + {"/test/testdata/file/", "/test/testdata/file"}, +} + +func TestFSRedirect(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir(".")))) + defer ts.Close() + + for _, data := range fsRedirectTestData { + res, err := Get(ts.URL + data.original) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if g, e := res.Request.URL.Path, data.redirect; g != e { + t.Errorf("redirect from %s: got %s, want %s", data.original, g, e) + } + } +} + +type testFileSystem struct { + open func(name string) (File, error) +} + +func (fs *testFileSystem) Open(name string) (File, error) { + return fs.open(name) +} + +func TestFileServerCleans(t *testing.T) { + defer afterTest(t) + ch := make(chan string, 1) + fs := FileServer(&testFileSystem{func(name string) (File, error) { + ch <- name + return nil, errors.New("file does not exist") + }}) + tests := []struct { + reqPath, openArg string + }{ + {"/foo.txt", "/foo.txt"}, + {"//foo.txt", "/foo.txt"}, + {"/../foo.txt", "/foo.txt"}, + } + req, _ := NewRequest("GET", "http://example.com", nil) + for n, test := range tests { + rec := httptest.NewRecorder() + req.URL.Path = test.reqPath + fs.ServeHTTP(rec, req) + if got := <-ch; got != test.openArg { + t.Errorf("test %d: got %q, want %q", n, got, test.openArg) + } + } +} + +func TestFileServerEscapesNames(t *testing.T) { + defer afterTest(t) + const dirListPrefix = "
\n"
+	const dirListSuffix = "\n
\n" + tests := []struct { + name, escaped string + }{ + {`simple_name`, `simple_name`}, + {`"'<>&`, `"'<>&`}, + {`?foo=bar#baz`, `?foo=bar#baz`}, + {`?foo`, `<combo>?foo`}, + } + + // We put each test file in its own directory in the fakeFS so we can look at it in isolation. + fs := make(fakeFS) + for i, test := range tests { + testFile := &fakeFileInfo{basename: test.name} + fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{ + dir: true, + modtime: time.Unix(1000000000, 0).UTC(), + ents: []*fakeFileInfo{testFile}, + } + fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile + } + + ts := httptest.NewServer(FileServer(&fs)) + defer ts.Close() + for i, test := range tests { + url := fmt.Sprintf("%s/%d", ts.URL, i) + res, err := Get(url) + if err != nil { + t.Fatalf("test %q: Get: %v", test.name, err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("test %q: read Body: %v", test.name, err) + } + s := string(b) + if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) { + t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix) + } + if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped { + t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped) + } + res.Body.Close() + } +} + +func mustRemoveAll(dir string) { + err := os.RemoveAll(dir) + if err != nil { + panic(err) + } +} + +func TestFileServerImplicitLeadingSlash(t *testing.T) { + defer afterTest(t) + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("TempDir: %v", err) + } + defer mustRemoveAll(tempDir) + if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil { + t.Fatalf("WriteFile: %v", err) + } + ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir)))) + defer ts.Close() + get := func(suffix string) string { + res, err := Get(ts.URL + suffix) + if err != nil { + t.Fatalf("Get %s: %v", suffix, err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("ReadAll %s: %v", suffix, err) + } + res.Body.Close() + return string(b) + } + if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") { + t.Logf("expected a directory listing with foo.txt, got %q", s) + } + if s := get("/bar/foo.txt"); s != "Hello world" { + t.Logf("expected %q, got %q", "Hello world", s) + } +} + +func TestDirJoin(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping test on windows") + } + wfi, err := os.Stat("/etc/hosts") + if err != nil { + t.Skip("skipping test; no /etc/hosts file") + } + test := func(d Dir, name string) { + f, err := d.Open(name) + if err != nil { + t.Fatalf("open of %s: %v", name, err) + } + defer f.Close() + gfi, err := f.Stat() + if err != nil { + t.Fatalf("stat of %s: %v", name, err) + } + if !os.SameFile(gfi, wfi) { + t.Errorf("%s got different file", name) + } + } + test(Dir("/etc/"), "/hosts") + test(Dir("/etc/"), "hosts") + test(Dir("/etc/"), "../../../../hosts") + test(Dir("/etc"), "/hosts") + test(Dir("/etc"), "hosts") + test(Dir("/etc"), "../../../../hosts") + + // Not really directories, but since we use this trick in + // ServeFile, test it: + test(Dir("/etc/hosts"), "") + test(Dir("/etc/hosts"), "/") + test(Dir("/etc/hosts"), "../") +} + +func TestEmptyDirOpenCWD(t *testing.T) { + test := func(d Dir) { + name := "fs_test.go" + f, err := d.Open(name) + if err != nil { + t.Fatalf("open of %s: %v", name, err) + } + defer f.Close() + } + test(Dir("")) + test(Dir(".")) + test(Dir("./")) +} + +func TestServeFileContentType(t *testing.T) { + defer afterTest(t) + const ctype = "icecream/chocolate" + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + switch r.FormValue("override") { + case "1": + w.Header().Set("Content-Type", ctype) + case "2": + // Explicitly inhibit sniffing. + w.Header()["Content-Type"] = []string{} + } + ServeFile(w, r, "testdata/file") + })) + defer ts.Close() + get := func(override string, want []string) { + resp, err := Get(ts.URL + "?override=" + override) + if err != nil { + t.Fatal(err) + } + if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) { + t.Errorf("Content-Type mismatch: got %v, want %v", h, want) + } + resp.Body.Close() + } + get("0", []string{"text/plain; charset=utf-8"}) + get("1", []string{ctype}) + get("2", nil) +} + +func TestServeFileMimeType(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/style.css") + })) + defer ts.Close() + resp, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + resp.Body.Close() + want := "text/css; charset=utf-8" + if h := resp.Header.Get("Content-Type"); h != want { + t.Errorf("Content-Type mismatch: got %q, want %q", h, want) + } +} + +func TestServeFileFromCWD(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "fs_test.go") + })) + defer ts.Close() + r, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + r.Body.Close() + if r.StatusCode != 200 { + t.Fatalf("expected 200 OK, got %s", r.Status) + } +} + +func TestServeFileWithContentEncoding(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Encoding", "foo") + ServeFile(w, r, "testdata/file") + })) + defer ts.Close() + resp, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + resp.Body.Close() + if g, e := resp.ContentLength, int64(-1); g != e { + t.Errorf("Content-Length mismatch: got %d, want %d", g, e) + } +} + +func TestServeIndexHtml(t *testing.T) { + defer afterTest(t) + const want = "index.html says hello\n" + ts := httptest.NewServer(FileServer(Dir("."))) + defer ts.Close() + + for _, path := range []string{"/testdata/", "/testdata/index.html"} { + res, err := Get(ts.URL + path) + if err != nil { + t.Fatal(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal("reading Body:", err) + } + if s := string(b); s != want { + t.Errorf("for path %q got %q, want %q", path, s, want) + } + res.Body.Close() + } +} + +func TestFileServerZeroByte(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(FileServer(Dir("."))) + defer ts.Close() + + res, err := Get(ts.URL + "/..\x00") + if err != nil { + t.Fatal(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal("reading Body:", err) + } + if res.StatusCode == 200 { + t.Errorf("got status 200; want an error. Body is:\n%s", string(b)) + } +} + +type fakeFileInfo struct { + dir bool + basename string + modtime time.Time + ents []*fakeFileInfo + contents string +} + +func (f *fakeFileInfo) Name() string { return f.basename } +func (f *fakeFileInfo) Sys() interface{} { return nil } +func (f *fakeFileInfo) ModTime() time.Time { return f.modtime } +func (f *fakeFileInfo) IsDir() bool { return f.dir } +func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) } +func (f *fakeFileInfo) Mode() os.FileMode { + if f.dir { + return 0755 | os.ModeDir + } + return 0644 +} + +type fakeFile struct { + io.ReadSeeker + fi *fakeFileInfo + path string // as opened + entpos int +} + +func (f *fakeFile) Close() error { return nil } +func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil } +func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) { + if !f.fi.dir { + return nil, os.ErrInvalid + } + var fis []os.FileInfo + + limit := f.entpos + count + if count <= 0 || limit > len(f.fi.ents) { + limit = len(f.fi.ents) + } + for ; f.entpos < limit; f.entpos++ { + fis = append(fis, f.fi.ents[f.entpos]) + } + + if len(fis) == 0 && count > 0 { + return fis, io.EOF + } else { + return fis, nil + } +} + +type fakeFS map[string]*fakeFileInfo + +func (fs fakeFS) Open(name string) (File, error) { + name = path.Clean(name) + f, ok := fs[name] + if !ok { + return nil, os.ErrNotExist + } + return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil +} + +func TestDirectoryIfNotModified(t *testing.T) { + defer afterTest(t) + const indexContents = "I am a fake index.html file" + fileMod := time.Unix(1000000000, 0).UTC() + fileModStr := fileMod.Format(TimeFormat) + dirMod := time.Unix(123, 0).UTC() + indexFile := &fakeFileInfo{ + basename: "index.html", + modtime: fileMod, + contents: indexContents, + } + fs := fakeFS{ + "/": &fakeFileInfo{ + dir: true, + modtime: dirMod, + ents: []*fakeFileInfo{indexFile}, + }, + "/index.html": indexFile, + } + + ts := httptest.NewServer(FileServer(fs)) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != indexContents { + t.Fatalf("Got body %q; want %q", b, indexContents) + } + res.Body.Close() + + lastMod := res.Header.Get("Last-Modified") + if lastMod != fileModStr { + t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr) + } + + req, _ := NewRequest("GET", ts.URL, nil) + req.Header.Set("If-Modified-Since", lastMod) + + res, err = DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 304 { + t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode) + } + res.Body.Close() + + // Advance the index.html file's modtime, but not the directory's. + indexFile.modtime = indexFile.modtime.Add(1 * time.Hour) + + res, err = DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 200 { + t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res) + } + res.Body.Close() +} + +func mustStat(t *testing.T, fileName string) os.FileInfo { + fi, err := os.Stat(fileName) + if err != nil { + t.Fatal(err) + } + return fi +} + +func TestServeContent(t *testing.T) { + defer afterTest(t) + type serveParam struct { + name string + modtime time.Time + content io.ReadSeeker + contentType string + etag string + } + servec := make(chan serveParam, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + p := <-servec + if p.etag != "" { + w.Header().Set("ETag", p.etag) + } + if p.contentType != "" { + w.Header().Set("Content-Type", p.contentType) + } + ServeContent(w, r, p.name, p.modtime, p.content) + })) + defer ts.Close() + + type testCase struct { + // One of file or content must be set: + file string + content io.ReadSeeker + + modtime time.Time + serveETag string // optional + serveContentType string // optional + reqHeader map[string]string + wantLastMod string + wantContentType string + wantStatus int + } + htmlModTime := mustStat(t, "testdata/index.html").ModTime() + tests := map[string]testCase{ + "no_last_modified": { + file: "testdata/style.css", + wantContentType: "text/css; charset=utf-8", + wantStatus: 200, + }, + "with_last_modified": { + file: "testdata/index.html", + wantContentType: "text/html; charset=utf-8", + modtime: htmlModTime, + wantLastMod: htmlModTime.UTC().Format(TimeFormat), + wantStatus: 200, + }, + "not_modified_modtime": { + file: "testdata/style.css", + modtime: htmlModTime, + reqHeader: map[string]string{ + "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat), + }, + wantStatus: 304, + }, + "not_modified_modtime_with_contenttype": { + file: "testdata/style.css", + serveContentType: "text/css", // explicit content type + modtime: htmlModTime, + reqHeader: map[string]string{ + "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat), + }, + wantStatus: 304, + }, + "not_modified_etag": { + file: "testdata/style.css", + serveETag: `"foo"`, + reqHeader: map[string]string{ + "If-None-Match": `"foo"`, + }, + wantStatus: 304, + }, + "not_modified_etag_no_seek": { + content: panicOnSeek{nil}, // should never be called + serveETag: `"foo"`, + reqHeader: map[string]string{ + "If-None-Match": `"foo"`, + }, + wantStatus: 304, + }, + "range_good": { + file: "testdata/style.css", + serveETag: `"A"`, + reqHeader: map[string]string{ + "Range": "bytes=0-4", + }, + wantStatus: StatusPartialContent, + wantContentType: "text/css; charset=utf-8", + }, + // An If-Range resource for entity "A", but entity "B" is now current. + // The Range request should be ignored. + "range_no_match": { + file: "testdata/style.css", + serveETag: `"A"`, + reqHeader: map[string]string{ + "Range": "bytes=0-4", + "If-Range": `"B"`, + }, + wantStatus: 200, + wantContentType: "text/css; charset=utf-8", + }, + } + for testName, tt := range tests { + var content io.ReadSeeker + if tt.file != "" { + f, err := os.Open(tt.file) + if err != nil { + t.Fatalf("test %q: %v", testName, err) + } + defer f.Close() + content = f + } else { + content = tt.content + } + + servec <- serveParam{ + name: filepath.Base(tt.file), + content: content, + modtime: tt.modtime, + etag: tt.serveETag, + contentType: tt.serveContentType, + } + req, err := NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + for k, v := range tt.reqHeader { + req.Header.Set(k, v) + } + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if res.StatusCode != tt.wantStatus { + t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus) + } + if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e { + t.Errorf("test %q: content-type = %q, want %q", testName, g, e) + } + if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e { + t.Errorf("test %q: last-modified = %q, want %q", testName, g, e) + } + } +} + +// verifies that sendfile is being used on Linux +func TestLinuxSendfile(t *testing.T) { + defer afterTest(t) + if runtime.GOOS != "linux" { + t.Skip("skipping; linux-only test") + } + if _, err := exec.LookPath("strace"); err != nil { + t.Skip("skipping; strace not found in path") + } + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + lnf, err := ln.(*net.TCPListener).File() + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + var buf bytes.Buffer + child := exec.Command("strace", "-f", "-q", "-e", "trace=sendfile,sendfile64", os.Args[0], "-test.run=TestLinuxSendfileChild") + child.ExtraFiles = append(child.ExtraFiles, lnf) + child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...) + child.Stdout = &buf + child.Stderr = &buf + if err := child.Start(); err != nil { + t.Skipf("skipping; failed to start straced child: %v", err) + } + + res, err := Get(fmt.Sprintf("http://%s/", ln.Addr())) + if err != nil { + t.Fatalf("http client error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + if err != nil { + t.Fatalf("client body read error: %v", err) + } + res.Body.Close() + + // Force child to exit cleanly. + Get(fmt.Sprintf("http://%s/quit", ln.Addr())) + child.Wait() + + rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`) + rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`) + out := buf.String() + if !rx.MatchString(out) && !rxResume.MatchString(out) { + t.Errorf("no sendfile system call found in:\n%s", out) + } +} + +func getBody(t *testing.T, testName string, req Request) (*Response, []byte) { + r, err := DefaultClient.Do(&req) + if err != nil { + t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err) + } + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err) + } + return r, b +} + +// TestLinuxSendfileChild isn't a real test. It's used as a helper process +// for TestLinuxSendfile. +func TestLinuxSendfileChild(*testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + defer os.Exit(0) + fd3 := os.NewFile(3, "ephemeral-port-listener") + ln, err := net.FileListener(fd3) + if err != nil { + panic(err) + } + mux := NewServeMux() + mux.Handle("/", FileServer(Dir("testdata"))) + mux.HandleFunc("/quit", func(ResponseWriter, *Request) { + os.Exit(0) + }) + s := &Server{Handler: mux} + err = s.Serve(ln) + if err != nil { + panic(err) + } +} + +type panicOnSeek struct{ io.ReadSeeker } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/header.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/header.go new file mode 100644 index 000000000..153b94370 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/header.go @@ -0,0 +1,211 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "io" + "net/textproto" + "sort" + "strings" + "sync" + "time" +) + +var raceEnabled = false // set by race.go + +// A Header represents the key-value pairs in an HTTP header. +type Header map[string][]string + +// Add adds the key, value pair to the header. +// It appends to any existing values associated with key. +func (h Header) Add(key, value string) { + textproto.MIMEHeader(h).Add(key, value) +} + +// Set sets the header entries associated with key to +// the single element value. It replaces any existing +// values associated with key. +func (h Header) Set(key, value string) { + textproto.MIMEHeader(h).Set(key, value) +} + +// Get gets the first value associated with the given key. +// If there are no values associated with the key, Get returns "". +// To access multiple values of a key, access the map directly +// with CanonicalHeaderKey. +func (h Header) Get(key string) string { + return textproto.MIMEHeader(h).Get(key) +} + +// get is like Get, but key must already be in CanonicalHeaderKey form. +func (h Header) get(key string) string { + if v := h[key]; len(v) > 0 { + return v[0] + } + return "" +} + +// Del deletes the values associated with key. +func (h Header) Del(key string) { + textproto.MIMEHeader(h).Del(key) +} + +// Write writes a header in wire format. +func (h Header) Write(w io.Writer) error { + return h.WriteSubset(w, nil) +} + +func (h Header) clone() Header { + h2 := make(Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +var timeFormats = []string{ + TimeFormat, + time.RFC850, + time.ANSIC, +} + +// ParseTime parses a time header (such as the Date: header), +// trying each of the three formats allowed by HTTP/1.1: +// TimeFormat, time.RFC850, and time.ANSIC. +func ParseTime(text string) (t time.Time, err error) { + for _, layout := range timeFormats { + t, err = time.Parse(layout, text) + if err == nil { + return + } + } + return +} + +var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") + +type writeStringer interface { + WriteString(string) (int, error) +} + +// stringWriter implements WriteString on a Writer. +type stringWriter struct { + w io.Writer +} + +func (w stringWriter) WriteString(s string) (n int, err error) { + return w.w.Write([]byte(s)) +} + +type keyValues struct { + key string + values []string +} + +// A headerSorter implements sort.Interface by sorting a []keyValues +// by key. It's used as a pointer, so it can fit in a sort.Interface +// interface value without allocation. +type headerSorter struct { + kvs []keyValues +} + +func (s *headerSorter) Len() int { return len(s.kvs) } +func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } +func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key } + +var headerSorterPool = sync.Pool{ + New: func() interface{} { return new(headerSorter) }, +} + +// sortedKeyValues returns h's keys sorted in the returned kvs +// slice. The headerSorter used to sort is also returned, for possible +// return to headerSorterCache. +func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) { + hs = headerSorterPool.Get().(*headerSorter) + if cap(hs.kvs) < len(h) { + hs.kvs = make([]keyValues, 0, len(h)) + } + kvs = hs.kvs[:0] + for k, vv := range h { + if !exclude[k] { + kvs = append(kvs, keyValues{k, vv}) + } + } + hs.kvs = kvs + sort.Sort(hs) + return kvs, hs +} + +// WriteSubset writes a header in wire format. +// If exclude is not nil, keys where exclude[key] == true are not written. +func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error { + ws, ok := w.(writeStringer) + if !ok { + ws = stringWriter{w} + } + kvs, sorter := h.sortedKeyValues(exclude) + for _, kv := range kvs { + for _, v := range kv.values { + v = headerNewlineToSpace.Replace(v) + v = textproto.TrimString(v) + for _, s := range []string{kv.key, ": ", v, "\r\n"} { + if _, err := ws.WriteString(s); err != nil { + return err + } + } + } + } + headerSorterPool.Put(sorter) + return nil +} + +// CanonicalHeaderKey returns the canonical format of the +// header key s. The canonicalization converts the first +// letter and any letter following a hyphen to upper case; +// the rest are converted to lowercase. For example, the +// canonical key for "accept-encoding" is "Accept-Encoding". +func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } + +// hasToken reports whether token appears with v, ASCII +// case-insensitive, with space or comma boundaries. +// token must be all lowercase. +// v may contain mixed cased. +func hasToken(v, token string) bool { + if len(token) > len(v) || token == "" { + return false + } + if v == token { + return true + } + for sp := 0; sp <= len(v)-len(token); sp++ { + // Check that first character is good. + // The token is ASCII, so checking only a single byte + // is sufficient. We skip this potential starting + // position if both the first byte and its potential + // ASCII uppercase equivalent (b|0x20) don't match. + // False positives ('^' => '~') are caught by EqualFold. + if b := v[sp]; b != token[0] && b|0x20 != token[0] { + continue + } + // Check that start pos is on a valid token boundary. + if sp > 0 && !isTokenBoundary(v[sp-1]) { + continue + } + // Check that end pos is on a valid token boundary. + if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) { + continue + } + if strings.EqualFold(v[sp:sp+len(token)], token) { + return true + } + } + return false +} + +func isTokenBoundary(b byte) bool { + return b == ' ' || b == ',' || b == '\t' +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/header_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/header_test.go new file mode 100644 index 000000000..9dcd591fa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/header_test.go @@ -0,0 +1,212 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "runtime" + "testing" + "time" +) + +var headerWriteTests = []struct { + h Header + exclude map[string]bool + expected string +}{ + {Header{}, nil, ""}, + { + Header{ + "Content-Type": {"text/html; charset=UTF-8"}, + "Content-Length": {"0"}, + }, + nil, + "Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n", + }, + { + Header{ + "Content-Length": {"0", "1", "2"}, + }, + nil, + "Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n", + }, + { + Header{ + "Expires": {"-1"}, + "Content-Length": {"0"}, + "Content-Encoding": {"gzip"}, + }, + map[string]bool{"Content-Length": true}, + "Content-Encoding: gzip\r\nExpires: -1\r\n", + }, + { + Header{ + "Expires": {"-1"}, + "Content-Length": {"0", "1", "2"}, + "Content-Encoding": {"gzip"}, + }, + map[string]bool{"Content-Length": true}, + "Content-Encoding: gzip\r\nExpires: -1\r\n", + }, + { + Header{ + "Expires": {"-1"}, + "Content-Length": {"0"}, + "Content-Encoding": {"gzip"}, + }, + map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true}, + "", + }, + { + Header{ + "Nil": nil, + "Empty": {}, + "Blank": {""}, + "Double-Blank": {"", ""}, + }, + nil, + "Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n", + }, + // Tests header sorting when over the insertion sort threshold side: + { + Header{ + "k1": {"1a", "1b"}, + "k2": {"2a", "2b"}, + "k3": {"3a", "3b"}, + "k4": {"4a", "4b"}, + "k5": {"5a", "5b"}, + "k6": {"6a", "6b"}, + "k7": {"7a", "7b"}, + "k8": {"8a", "8b"}, + "k9": {"9a", "9b"}, + }, + map[string]bool{"k5": true}, + "k1: 1a\r\nk1: 1b\r\nk2: 2a\r\nk2: 2b\r\nk3: 3a\r\nk3: 3b\r\n" + + "k4: 4a\r\nk4: 4b\r\nk6: 6a\r\nk6: 6b\r\n" + + "k7: 7a\r\nk7: 7b\r\nk8: 8a\r\nk8: 8b\r\nk9: 9a\r\nk9: 9b\r\n", + }, +} + +func TestHeaderWrite(t *testing.T) { + var buf bytes.Buffer + for i, test := range headerWriteTests { + test.h.WriteSubset(&buf, test.exclude) + if buf.String() != test.expected { + t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected) + } + buf.Reset() + } +} + +var parseTimeTests = []struct { + h Header + err bool +}{ + {Header{"Date": {""}}, true}, + {Header{"Date": {"invalid"}}, true}, + {Header{"Date": {"1994-11-06T08:49:37Z00:00"}}, true}, + {Header{"Date": {"Sun, 06 Nov 1994 08:49:37 GMT"}}, false}, + {Header{"Date": {"Sunday, 06-Nov-94 08:49:37 GMT"}}, false}, + {Header{"Date": {"Sun Nov 6 08:49:37 1994"}}, false}, +} + +func TestParseTime(t *testing.T) { + expect := time.Date(1994, 11, 6, 8, 49, 37, 0, time.UTC) + for i, test := range parseTimeTests { + d, err := ParseTime(test.h.Get("Date")) + if err != nil { + if !test.err { + t.Errorf("#%d:\n got err: %v", i, err) + } + continue + } + if test.err { + t.Errorf("#%d:\n should err", i) + continue + } + if !expect.Equal(d) { + t.Errorf("#%d:\n got: %v\nwant: %v", i, d, expect) + } + } +} + +type hasTokenTest struct { + header string + token string + want bool +} + +var hasTokenTests = []hasTokenTest{ + {"", "", false}, + {"", "foo", false}, + {"foo", "foo", true}, + {"foo ", "foo", true}, + {" foo", "foo", true}, + {" foo ", "foo", true}, + {"foo,bar", "foo", true}, + {"bar,foo", "foo", true}, + {"bar, foo", "foo", true}, + {"bar,foo, baz", "foo", true}, + {"bar, foo,baz", "foo", true}, + {"bar,foo, baz", "foo", true}, + {"bar, foo, baz", "foo", true}, + {"FOO", "foo", true}, + {"FOO ", "foo", true}, + {" FOO", "foo", true}, + {" FOO ", "foo", true}, + {"FOO,BAR", "foo", true}, + {"BAR,FOO", "foo", true}, + {"BAR, FOO", "foo", true}, + {"BAR,FOO, baz", "foo", true}, + {"BAR, FOO,BAZ", "foo", true}, + {"BAR,FOO, BAZ", "foo", true}, + {"BAR, FOO, BAZ", "foo", true}, + {"foobar", "foo", false}, + {"barfoo ", "foo", false}, +} + +func TestHasToken(t *testing.T) { + for _, tt := range hasTokenTests { + if hasToken(tt.header, tt.token) != tt.want { + t.Errorf("hasToken(%q, %q) = %v; want %v", tt.header, tt.token, !tt.want, tt.want) + } + } +} + +var testHeader = Header{ + "Content-Length": {"123"}, + "Content-Type": {"text/plain"}, + "Date": {"some date at some time Z"}, + "Server": {DefaultUserAgent}, +} + +var buf bytes.Buffer + +func BenchmarkHeaderWriteSubset(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + buf.Reset() + testHeader.WriteSubset(&buf, nil) + } +} + +func TestHeaderWriteSubsetAllocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping alloc test in short mode") + } + if raceEnabled { + t.Skip("skipping test under race detector") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Skip("skipping; GOMAXPROCS>1") + } + n := testing.AllocsPerRun(100, func() { + buf.Reset() + testHeader.WriteSubset(&buf, nil) + }) + if n > 0 { + t.Errorf("allocs = %g; want 0", n) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go new file mode 100644 index 000000000..42a0ec953 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httptest_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" +) + +func ExampleResponseRecorder() { + handler := func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "something failed", http.StatusInternalServerError) + } + + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + log.Fatal(err) + } + + w := httptest.NewRecorder() + handler(w, req) + + fmt.Printf("%d - %s", w.Code, w.Body.String()) + // Output: 500 - something failed +} + +func ExampleServer() { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, client") + })) + defer ts.Close() + + res, err := http.Get(ts.URL) + if err != nil { + log.Fatal(err) + } + greeting, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", greeting) + // Output: Hello, client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go new file mode 100644 index 000000000..5451f5423 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go @@ -0,0 +1,72 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httptest provides utilities for HTTP testing. +package httptest + +import ( + "bytes" + "net/http" +) + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool + + wroteHeader bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + Code: 200, + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + m := rw.HeaderMap + if m == nil { + m = make(http.Header) + rw.HeaderMap = m + } + return m +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if !rw.wroteHeader { + rw.WriteHeader(200) + } + if rw.Body != nil { + rw.Body.Write(buf) + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + if !rw.wroteHeader { + rw.Code = code + } + rw.wroteHeader = true +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + if !rw.wroteHeader { + rw.WriteHeader(200) + } + rw.Flushed = true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go new file mode 100644 index 000000000..2b563260c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go @@ -0,0 +1,90 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httptest + +import ( + "fmt" + "net/http" + "testing" +) + +func TestRecorder(t *testing.T) { + type checkFunc func(*ResponseRecorder) error + check := func(fns ...checkFunc) []checkFunc { return fns } + + hasStatus := func(wantCode int) checkFunc { + return func(rec *ResponseRecorder) error { + if rec.Code != wantCode { + return fmt.Errorf("Status = %d; want %d", rec.Code, wantCode) + } + return nil + } + } + hasContents := func(want string) checkFunc { + return func(rec *ResponseRecorder) error { + if rec.Body.String() != want { + return fmt.Errorf("wrote = %q; want %q", rec.Body.String(), want) + } + return nil + } + } + hasFlush := func(want bool) checkFunc { + return func(rec *ResponseRecorder) error { + if rec.Flushed != want { + return fmt.Errorf("Flushed = %v; want %v", rec.Flushed, want) + } + return nil + } + } + + tests := []struct { + name string + h func(w http.ResponseWriter, r *http.Request) + checks []checkFunc + }{ + { + "200 default", + func(w http.ResponseWriter, r *http.Request) {}, + check(hasStatus(200), hasContents("")), + }, + { + "first code only", + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(201) + w.WriteHeader(202) + w.Write([]byte("hi")) + }, + check(hasStatus(201), hasContents("hi")), + }, + { + "write sends 200", + func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi first")) + w.WriteHeader(201) + w.WriteHeader(202) + }, + check(hasStatus(200), hasContents("hi first"), hasFlush(false)), + }, + { + "flush", + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() // also sends a 200 + w.WriteHeader(201) + }, + check(hasStatus(200), hasFlush(true)), + }, + } + r, _ := http.NewRequest("GET", "http://foo.com/", nil) + for _, tt := range tests { + h := http.HandlerFunc(tt.h) + rec := NewRecorder() + h.ServeHTTP(rec, r) + for _, check := range tt.checks { + if err := check(rec); err != nil { + t.Errorf("%s: %v", tt.name, err) + } + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go new file mode 100644 index 000000000..7f265552f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go @@ -0,0 +1,228 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Implementation of Server + +package httptest + +import ( + "crypto/tls" + "flag" + "fmt" + "net" + "net/http" + "os" + "sync" +) + +// A Server is an HTTP server listening on a system-chosen port on the +// local loopback interface, for use in end-to-end HTTP tests. +type Server struct { + URL string // base URL of form http://ipaddr:port with no trailing slash + Listener net.Listener + + // TLS is the optional TLS configuration, populated with a new config + // after TLS is started. If set on an unstarted server before StartTLS + // is called, existing fields are copied into the new config. + TLS *tls.Config + + // Config may be changed after calling NewUnstartedServer and + // before Start or StartTLS. + Config *http.Server + + // wg counts the number of outstanding HTTP requests on this server. + // Close blocks until all requests are finished. + wg sync.WaitGroup +} + +// historyListener keeps track of all connections that it's ever +// accepted. +type historyListener struct { + net.Listener + sync.Mutex // protects history + history []net.Conn +} + +func (hs *historyListener) Accept() (c net.Conn, err error) { + c, err = hs.Listener.Accept() + if err == nil { + hs.Lock() + hs.history = append(hs.history, c) + hs.Unlock() + } + return +} + +func newLocalListener() net.Listener { + if *serve != "" { + l, err := net.Listen("tcp", *serve) + if err != nil { + panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err)) + } + return l + } + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { + panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) + } + } + return l +} + +// When debugging a particular http server-based test, +// this flag lets you run +// go test -run=BrokenTest -httptest.serve=127.0.0.1:8000 +// to start the broken server so you can interact with it manually. +var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks") + +// NewServer starts and returns a new Server. +// The caller should call Close when finished, to shut it down. +func NewServer(handler http.Handler) *Server { + ts := NewUnstartedServer(handler) + ts.Start() + return ts +} + +// NewUnstartedServer returns a new Server but doesn't start it. +// +// After changing its configuration, the caller should call Start or +// StartTLS. +// +// The caller should call Close when finished, to shut it down. +func NewUnstartedServer(handler http.Handler) *Server { + return &Server{ + Listener: newLocalListener(), + Config: &http.Server{Handler: handler}, + } +} + +// Start starts a server from NewUnstartedServer. +func (s *Server) Start() { + if s.URL != "" { + panic("Server already started") + } + s.Listener = &historyListener{Listener: s.Listener} + s.URL = "http://" + s.Listener.Addr().String() + s.wrapHandler() + go s.Config.Serve(s.Listener) + if *serve != "" { + fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL) + select {} + } +} + +// StartTLS starts TLS on a server from NewUnstartedServer. +func (s *Server) StartTLS() { + if s.URL != "" { + panic("Server already started") + } + cert, err := tls.X509KeyPair(localhostCert, localhostKey) + if err != nil { + panic(fmt.Sprintf("httptest: NewTLSServer: %v", err)) + } + + existingConfig := s.TLS + s.TLS = new(tls.Config) + if existingConfig != nil { + *s.TLS = *existingConfig + } + if s.TLS.NextProtos == nil { + s.TLS.NextProtos = []string{"http/1.1"} + } + if len(s.TLS.Certificates) == 0 { + s.TLS.Certificates = []tls.Certificate{cert} + } + tlsListener := tls.NewListener(s.Listener, s.TLS) + + s.Listener = &historyListener{Listener: tlsListener} + s.URL = "https://" + s.Listener.Addr().String() + s.wrapHandler() + go s.Config.Serve(s.Listener) +} + +func (s *Server) wrapHandler() { + h := s.Config.Handler + if h == nil { + h = http.DefaultServeMux + } + s.Config.Handler = &waitGroupHandler{ + s: s, + h: h, + } +} + +// NewTLSServer starts and returns a new Server using TLS. +// The caller should call Close when finished, to shut it down. +func NewTLSServer(handler http.Handler) *Server { + ts := NewUnstartedServer(handler) + ts.StartTLS() + return ts +} + +// Close shuts down the server and blocks until all outstanding +// requests on this server have completed. +func (s *Server) Close() { + s.Listener.Close() + s.wg.Wait() + s.CloseClientConnections() + if t, ok := http.DefaultTransport.(*http.Transport); ok { + t.CloseIdleConnections() + } +} + +// CloseClientConnections closes any currently open HTTP connections +// to the test Server. +func (s *Server) CloseClientConnections() { + hl, ok := s.Listener.(*historyListener) + if !ok { + return + } + hl.Lock() + for _, conn := range hl.history { + conn.Close() + } + hl.Unlock() +} + +// waitGroupHandler wraps a handler, incrementing and decrementing a +// sync.WaitGroup on each request, to enable Server.Close to block +// until outstanding requests are finished. +type waitGroupHandler struct { + s *Server + h http.Handler // non-nil +} + +func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.s.wg.Add(1) + defer h.s.wg.Done() // a defer, in case ServeHTTP below panics + h.h.ServeHTTP(w, r) +} + +// localhostCert is a PEM-encoded TLS cert with SAN IPs +// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end +// of ASN.1 time). +// generated from src/pkg/crypto/tls: +// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var localhostCert = []byte(`-----BEGIN CERTIFICATE----- +MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD +bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj +bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa +IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA +AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud +EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA +AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk +Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA== +-----END CERTIFICATE-----`) + +// localhostKey is the private key for localhostCert. +var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0 +0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV +NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d +AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW +MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD +EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA +1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE= +-----END RSA PRIVATE KEY-----`) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go new file mode 100644 index 000000000..501cc8a99 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go @@ -0,0 +1,52 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httptest + +import ( + "io/ioutil" + "net/http" + "testing" + "time" +) + +func TestServer(t *testing.T) { + ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello")) + })) + defer ts.Close() + res, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + got, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(got) != "hello" { + t.Errorf("got %q, want hello", string(got)) + } +} + +func TestIssue7264(t *testing.T) { + for i := 0; i < 1000; i++ { + func() { + inHandler := make(chan bool, 1) + ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + })) + defer ts.Close() + tr := &http.Transport{ + ResponseHeaderTimeout: time.Nanosecond, + } + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + res, err := c.Get(ts.URL) + <-inHandler + if err == nil { + res.Body.Close() + } + }() + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go new file mode 100644 index 000000000..9632bfd19 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go @@ -0,0 +1,203 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The wire protocol for HTTP's "chunked" Transfer-Encoding. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package httputil + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +const maxLineLength = 4096 // assumed <= bufio.defaultBufSize + +var ErrLineTooLong = errors.New("header line too long") + +// newChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// newChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func newChunkedReader(r io.Reader) io.Reader { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + return &chunkedReader{r: br} +} + +type chunkedReader struct { + r *bufio.Reader + n uint64 // unread bytes in chunk + err error + buf [2]byte +} + +func (cr *chunkedReader) beginChunk() { + // chunk-size CRLF + var line []byte + line, cr.err = readLine(cr.r) + if cr.err != nil { + return + } + cr.n, cr.err = parseHexUint(line) + if cr.err != nil { + return + } + if cr.n == 0 { + cr.err = io.EOF + } +} + +func (cr *chunkedReader) chunkHeaderAvailable() bool { + n := cr.r.Buffered() + if n > 0 { + peek, _ := cr.r.Peek(n) + return bytes.IndexByte(peek, '\n') >= 0 + } + return false +} + +func (cr *chunkedReader) Read(b []uint8) (n int, err error) { + for cr.err == nil { + if cr.n == 0 { + if n > 0 && !cr.chunkHeaderAvailable() { + // We've read enough. Don't potentially block + // reading a new chunk header. + break + } + cr.beginChunk() + continue + } + if len(b) == 0 { + break + } + rbuf := b + if uint64(len(rbuf)) > cr.n { + rbuf = rbuf[:cr.n] + } + var n0 int + n0, cr.err = cr.r.Read(rbuf) + n += n0 + b = b[n0:] + cr.n -= uint64(n0) + // If we're at the end of a chunk, read the next two + // bytes to verify they are "\r\n". + if cr.n == 0 && cr.err == nil { + if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { + if cr.buf[0] != '\r' || cr.buf[1] != '\n' { + cr.err = errors.New("malformed chunked encoding") + } + } + } + } + return n, cr.err +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are a pointer into storage in +// the bufio, so they are only valid until the next bufio read. +func readLine(b *bufio.Reader) (p []byte, err error) { + if p, err = b.ReadSlice('\n'); err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if err == bufio.ErrBufferFull { + err = ErrLineTooLong + } + return nil, err + } + if len(p) >= maxLineLength { + return nil, ErrLineTooLong + } + return trimTrailingWhitespace(p), nil +} + +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + return b +} + +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// newChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using newChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func newChunkedWriter(w io.Writer) io.WriteCloser { + return &chunkedWriter{w} +} + +// Writing to chunkedWriter translates to writing in HTTP chunked Transfer +// Encoding wire format to the underlying Wire chunkedWriter. +type chunkedWriter struct { + Wire io.Writer +} + +// Write the contents of data as one chunk to Wire. +// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has +// a bug since it does not check for success of io.WriteString +func (cw *chunkedWriter) Write(data []byte) (n int, err error) { + + // Don't send 0-length data. It looks like EOF for chunked encoding. + if len(data) == 0 { + return 0, nil + } + + if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { + return 0, err + } + if n, err = cw.Wire.Write(data); err != nil { + return + } + if n != len(data) { + err = io.ErrShortWrite + return + } + _, err = io.WriteString(cw.Wire, "\r\n") + + return +} + +func (cw *chunkedWriter) Close() error { + _, err := io.WriteString(cw.Wire, "0\r\n") + return err +} + +func parseHexUint(v []byte) (n uint64, err error) { + for _, b := range v { + n <<= 4 + switch { + case '0' <= b && b <= '9': + b = b - '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + n |= uint64(b) + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go new file mode 100644 index 000000000..a7a577468 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package httputil + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestChunk(t *testing.T) { + var b bytes.Buffer + + w := newChunkedWriter(&b) + const chunk1 = "hello, " + const chunk2 = "world! 0123456789abcdef" + w.Write([]byte(chunk1)) + w.Write([]byte(chunk2)) + w.Close() + + if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e { + t.Fatalf("chunk writer wrote %q; want %q", g, e) + } + + r := newChunkedReader(&b) + data, err := ioutil.ReadAll(r) + if err != nil { + t.Logf(`data: "%s"`, data) + t.Fatalf("ReadAll from reader: %v", err) + } + if g, e := string(data), chunk1+chunk2; g != e { + t.Errorf("chunk reader read %q; want %q", g, e) + } +} + +func TestChunkReadMultiple(t *testing.T) { + // Bunch of small chunks, all read together. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + w.Write([]byte("foo")) + w.Write([]byte("bar")) + w.Close() + + r := newChunkedReader(&b) + buf := make([]byte, 10) + n, err := r.Read(buf) + if n != 6 || err != io.EOF { + t.Errorf("Read = %d, %v; want 6, EOF", n, err) + } + buf = buf[:n] + if string(buf) != "foobar" { + t.Errorf("Read = %q; want %q", buf, "foobar") + } + } + + // One big chunk followed by a little chunk, but the small bufio.Reader size + // should prevent the second chunk header from being read. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, + // the same as the bufio ReaderSize below (the minimum), so even + // though we're going to try to Read with a buffer larger enough to also + // receive "foo", the second chunk header won't be read yet. + const fillBufChunk = "0123456789a" + const shortChunk = "foo" + w.Write([]byte(fillBufChunk)) + w.Write([]byte(shortChunk)) + w.Close() + + r := newChunkedReader(bufio.NewReaderSize(&b, 16)) + buf := make([]byte, len(fillBufChunk)+len(shortChunk)) + n, err := r.Read(buf) + if n != len(fillBufChunk) || err != nil { + t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) + } + buf = buf[:n] + if string(buf) != fillBufChunk { + t.Errorf("Read = %q; want %q", buf, fillBufChunk) + } + + n, err = r.Read(buf) + if n != len(shortChunk) || err != io.EOF { + t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) + } + } + + // And test that we see an EOF chunk, even though our buffer is already full: + { + r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) + buf := make([]byte, 3) + n, err := r.Read(buf) + if n != 3 || err != io.EOF { + t.Errorf("Read = %d, %v; want 3, EOF", n, err) + } + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + } +} + +func TestChunkReaderAllocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + var buf bytes.Buffer + w := newChunkedWriter(&buf) + a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") + w.Write(a) + w.Write(b) + w.Write(c) + w.Close() + + readBuf := make([]byte, len(a)+len(b)+len(c)+1) + byter := bytes.NewReader(buf.Bytes()) + bufr := bufio.NewReader(byter) + mallocs := testing.AllocsPerRun(100, func() { + byter.Seek(0, 0) + bufr.Reset(byter) + r := newChunkedReader(bufr) + n, err := io.ReadFull(r, readBuf) + if n != len(readBuf)-1 { + t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) + } + if err != io.ErrUnexpectedEOF { + t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) + } + }) + if mallocs > 1.5 { + t.Errorf("mallocs = %v; want 1", mallocs) + } +} + +func TestParseHexUint(t *testing.T) { + for i := uint64(0); i <= 1234; i++ { + line := []byte(fmt.Sprintf("%x", i)) + got, err := parseHexUint(line) + if err != nil { + t.Fatalf("on %d: %v", i, err) + } + if got != i { + t.Errorf("for input %q = %d; want %d", line, got, i) + } + } + _, err := parseHexUint([]byte("bogus")) + if err == nil { + t.Error("expected error on bogus input") + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go new file mode 100644 index 000000000..2a7a413d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go @@ -0,0 +1,276 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httputil + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// One of the copies, say from b to r2, could be avoided by using a more +// elaborate trick where the other copy is made during Request/Response.Write. +// This would complicate things too much, given that these functions are for +// debugging only. +func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) { + var buf bytes.Buffer + if _, err = buf.ReadFrom(b); err != nil { + return nil, nil, err + } + if err = b.Close(); err != nil { + return nil, nil, err + } + return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewReader(buf.Bytes())), nil +} + +// dumpConn is a net.Conn which writes to Writer and reads from Reader +type dumpConn struct { + io.Writer + io.Reader +} + +func (c *dumpConn) Close() error { return nil } +func (c *dumpConn) LocalAddr() net.Addr { return nil } +func (c *dumpConn) RemoteAddr() net.Addr { return nil } +func (c *dumpConn) SetDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil } + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// DumpRequestOut is like DumpRequest but includes +// headers that the standard http.Transport adds, +// such as User-Agent. +func DumpRequestOut(req *http.Request, body bool) ([]byte, error) { + save := req.Body + dummyBody := false + if !body || req.Body == nil { + req.Body = nil + if req.ContentLength != 0 { + req.Body = ioutil.NopCloser(io.LimitReader(neverEnding('x'), req.ContentLength)) + dummyBody = true + } + } else { + var err error + save, req.Body, err = drainBody(req.Body) + if err != nil { + return nil, err + } + } + + // Since we're using the actual Transport code to write the request, + // switch to http so the Transport doesn't try to do an SSL + // negotiation with our dumpConn and its bytes.Buffer & pipe. + // The wire format for https and http are the same, anyway. + reqSend := req + if req.URL.Scheme == "https" { + reqSend = new(http.Request) + *reqSend = *req + reqSend.URL = new(url.URL) + *reqSend.URL = *req.URL + reqSend.URL.Scheme = "http" + } + + // Use the actual Transport code to record what we would send + // on the wire, but not using TCP. Use a Transport with a + // custom dialer that returns a fake net.Conn that waits + // for the full input (and recording it), and then responds + // with a dummy response. + var buf bytes.Buffer // records the output + pr, pw := io.Pipe() + dr := &delegateReader{c: make(chan io.Reader)} + // Wait for the request before replying with a dummy response: + go func() { + http.ReadRequest(bufio.NewReader(pr)) + dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n") + }() + + t := &http.Transport{ + Dial: func(net, addr string) (net.Conn, error) { + return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil + }, + } + defer t.CloseIdleConnections() + + _, err := t.RoundTrip(reqSend) + + req.Body = save + if err != nil { + return nil, err + } + dump := buf.Bytes() + + // If we used a dummy body above, remove it now. + // TODO: if the req.ContentLength is large, we allocate memory + // unnecessarily just to slice it off here. But this is just + // a debug function, so this is acceptable for now. We could + // discard the body earlier if this matters. + if dummyBody { + if i := bytes.Index(dump, []byte("\r\n\r\n")); i >= 0 { + dump = dump[:i+4] + } + } + return dump, nil +} + +// delegateReader is a reader that delegates to another reader, +// once it arrives on a channel. +type delegateReader struct { + c chan io.Reader + r io.Reader // nil until received from c +} + +func (r *delegateReader) Read(p []byte) (int, error) { + if r.r == nil { + r.r = <-r.c + } + return r.r.Read(p) +} + +// Return value if nonempty, def otherwise. +func valueOrDefault(value, def string) string { + if value != "" { + return value + } + return def +} + +var reqWriteExcludeHeaderDump = map[string]bool{ + "Host": true, // not in Header map anyway + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, +} + +// dumpAsReceived writes req to w in the form as it was received, or +// at least as accurately as possible from the information retained in +// the request. +func dumpAsReceived(req *http.Request, w io.Writer) error { + return nil +} + +// DumpRequest returns the as-received wire representation of req, +// optionally including the request body, for debugging. +// DumpRequest is semantically a no-op, but in order to +// dump the body, it reads the body data into memory and +// changes req.Body to refer to the in-memory copy. +// The documentation for http.Request.Write details which fields +// of req are used. +func DumpRequest(req *http.Request, body bool) (dump []byte, err error) { + save := req.Body + if !body || req.Body == nil { + req.Body = nil + } else { + save, req.Body, err = drainBody(req.Body) + if err != nil { + return + } + } + + var b bytes.Buffer + + fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), + req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor) + + host := req.Host + if host == "" && req.URL != nil { + host = req.URL.Host + } + if host != "" { + fmt.Fprintf(&b, "Host: %s\r\n", host) + } + + chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" + if len(req.TransferEncoding) > 0 { + fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ",")) + } + if req.Close { + fmt.Fprintf(&b, "Connection: close\r\n") + } + + err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump) + if err != nil { + return + } + + io.WriteString(&b, "\r\n") + + if req.Body != nil { + var dest io.Writer = &b + if chunked { + dest = NewChunkedWriter(dest) + } + _, err = io.Copy(dest, req.Body) + if chunked { + dest.(io.Closer).Close() + io.WriteString(&b, "\r\n") + } + } + + req.Body = save + if err != nil { + return + } + dump = b.Bytes() + return +} + +// errNoBody is a sentinel error value used by failureToReadBody so we can detect +// that the lack of body was intentional. +var errNoBody = errors.New("sentinel error value") + +// failureToReadBody is a io.ReadCloser that just returns errNoBody on +// Read. It's swapped in when we don't actually want to consume the +// body, but need a non-nil one, and want to distinguish the error +// from reading the dummy body. +type failureToReadBody struct{} + +func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody } +func (failureToReadBody) Close() error { return nil } + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// DumpResponse is like DumpRequest but dumps a response. +func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { + var b bytes.Buffer + save := resp.Body + savecl := resp.ContentLength + + if !body { + resp.Body = failureToReadBody{} + } else if resp.Body == nil { + resp.Body = emptyBody + } else { + save, resp.Body, err = drainBody(resp.Body) + if err != nil { + return + } + } + err = resp.Write(&b) + if err == errNoBody { + err = nil + } + resp.Body = save + resp.ContentLength = savecl + if err != nil { + return nil, err + } + return b.Bytes(), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go new file mode 100644 index 000000000..e1ffb3935 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go @@ -0,0 +1,263 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httputil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "runtime" + "strings" + "testing" +) + +type dumpTest struct { + Req http.Request + Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body + + WantDump string + WantDumpOut string + NoBody bool // if true, set DumpRequest{,Out} body to false +} + +var dumpTests = []dumpTest{ + + // HTTP/1.1 => chunked coding; body; empty trailer + { + Req: http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + TransferEncoding: []string{"chunked"}, + }, + + Body: []byte("abcdef"), + + WantDump: "GET /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + }, + + // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host, + // and doesn't add a User-Agent. + { + Req: http.Request{ + Method: "GET", + URL: mustParseURL("/foo"), + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{ + "X-Foo": []string{"X-Bar"}, + }, + }, + + WantDump: "GET /foo HTTP/1.0\r\n" + + "X-Foo: X-Bar\r\n\r\n", + }, + + { + Req: *mustNewRequest("GET", "http://example.com/foo", nil), + + WantDumpOut: "GET /foo HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Accept-Encoding: gzip\r\n\r\n", + }, + + // Test that an https URL doesn't try to do an SSL negotiation + // with a bytes.Buffer and hang with all goroutines not + // runnable. + { + Req: *mustNewRequest("GET", "https://example.com/foo", nil), + + WantDumpOut: "GET /foo HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Accept-Encoding: gzip\r\n\r\n", + }, + + // Request with Body, but Dump requested without it. + { + Req: http.Request{ + Method: "POST", + URL: &url.URL{ + Scheme: "http", + Host: "post.tld", + Path: "/", + }, + ContentLength: 6, + ProtoMajor: 1, + ProtoMinor: 1, + }, + + Body: []byte("abcdef"), + + WantDumpOut: "POST / HTTP/1.1\r\n" + + "Host: post.tld\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 6\r\n" + + "Accept-Encoding: gzip\r\n\r\n", + + NoBody: true, + }, +} + +func TestDumpRequest(t *testing.T) { + numg0 := runtime.NumGoroutine() + for i, tt := range dumpTests { + setBody := func() { + if tt.Body == nil { + return + } + switch b := tt.Body.(type) { + case []byte: + tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b)) + case func() io.ReadCloser: + tt.Req.Body = b() + } + } + setBody() + if tt.Req.Header == nil { + tt.Req.Header = make(http.Header) + } + + if tt.WantDump != "" { + setBody() + dump, err := DumpRequest(&tt.Req, !tt.NoBody) + if err != nil { + t.Errorf("DumpRequest #%d: %s", i, err) + continue + } + if string(dump) != tt.WantDump { + t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump)) + continue + } + } + + if tt.WantDumpOut != "" { + setBody() + dump, err := DumpRequestOut(&tt.Req, !tt.NoBody) + if err != nil { + t.Errorf("DumpRequestOut #%d: %s", i, err) + continue + } + if string(dump) != tt.WantDumpOut { + t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump)) + continue + } + } + } + if dg := runtime.NumGoroutine() - numg0; dg > 4 { + t.Errorf("Unexpectedly large number of new goroutines: %d new", dg) + } +} + +func chunk(s string) string { + return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) +} + +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(fmt.Sprintf("Error parsing URL %q: %v", s, err)) + } + return u +} + +func mustNewRequest(method, url string, body io.Reader) *http.Request { + req, err := http.NewRequest(method, url, body) + if err != nil { + panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err)) + } + return req +} + +var dumpResTests = []struct { + res *http.Response + body bool + want string +}{ + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 50, + Header: http.Header{ + "Foo": []string{"Bar"}, + }, + Body: ioutil.NopCloser(strings.NewReader("foo")), // shouldn't be used + }, + body: false, // to verify we see 50, not empty or 3. + want: `HTTP/1.1 200 OK +Content-Length: 50 +Foo: Bar`, + }, + + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 3, + Body: ioutil.NopCloser(strings.NewReader("foo")), + }, + body: true, + want: `HTTP/1.1 200 OK +Content-Length: 3 + +foo`, + }, + + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: -1, + Body: ioutil.NopCloser(strings.NewReader("foo")), + TransferEncoding: []string{"chunked"}, + }, + body: true, + want: `HTTP/1.1 200 OK +Transfer-Encoding: chunked + +3 +foo +0`, + }, +} + +func TestDumpResponse(t *testing.T) { + for i, tt := range dumpResTests { + gotb, err := DumpResponse(tt.res, tt.body) + if err != nil { + t.Errorf("%d. DumpResponse = %v", i, err) + continue + } + got := string(gotb) + got = strings.TrimSpace(got) + got = strings.Replace(got, "\r", "", -1) + + if got != tt.want { + t.Errorf("%d.\nDumpResponse got:\n%s\n\nWant:\n%s\n", i, got, tt.want) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go new file mode 100644 index 000000000..74fb6c655 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httputil provides HTTP utility functions, complementing the +// more common ones in the net/http package. +package httputil + +import "io" + +// NewChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// NewChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func NewChunkedReader(r io.Reader) io.Reader { + return newChunkedReader(r) +} + +// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// NewChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using NewChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func NewChunkedWriter(w io.Writer) io.WriteCloser { + return newChunkedWriter(w) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go new file mode 100644 index 000000000..987bcc96b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go @@ -0,0 +1,429 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httputil + +import ( + "bufio" + "errors" + "io" + "net" + "net/http" + "net/textproto" + "sync" +) + +var ( + ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"} + ErrClosed = &http.ProtocolError{ErrorString: "connection closed by user"} + ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"} +) + +// This is an API usage error - the local side is closed. +// ErrPersistEOF (above) reports that the remote side is closed. +var errClosed = errors.New("i/o operation on closed connection") + +// A ServerConn reads requests and sends responses over an underlying +// connection, until the HTTP keepalive logic commands an end. ServerConn +// also allows hijacking the underlying connection by calling Hijack +// to regain control over the connection. ServerConn supports pipe-lining, +// i.e. requests can be read out of sync (but in the same order) while the +// respective responses are sent. +// +// ServerConn is low-level and old. Applications should instead use Server +// in the net/http package. +type ServerConn struct { + lk sync.Mutex // read-write protects the following fields + c net.Conn + r *bufio.Reader + re, we error // read/write errors + lastbody io.ReadCloser + nread, nwritten int + pipereq map[*http.Request]uint + + pipe textproto.Pipeline +} + +// NewServerConn returns a new ServerConn reading and writing c. If r is not +// nil, it is the buffer to use when reading c. +// +// ServerConn is low-level and old. Applications should instead use Server +// in the net/http package. +func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn { + if r == nil { + r = bufio.NewReader(c) + } + return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)} +} + +// Hijack detaches the ServerConn and returns the underlying connection as well +// as the read-side bufio which may have some left over data. Hijack may be +// called before Read has signaled the end of the keep-alive logic. The user +// should not call Hijack while Read or Write is in progress. +func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) { + sc.lk.Lock() + defer sc.lk.Unlock() + c = sc.c + r = sc.r + sc.c = nil + sc.r = nil + return +} + +// Close calls Hijack and then also closes the underlying connection +func (sc *ServerConn) Close() error { + c, _ := sc.Hijack() + if c != nil { + return c.Close() + } + return nil +} + +// Read returns the next request on the wire. An ErrPersistEOF is returned if +// it is gracefully determined that there are no more requests (e.g. after the +// first request on an HTTP/1.0 connection, or after a Connection:close on a +// HTTP/1.1 connection). +func (sc *ServerConn) Read() (req *http.Request, err error) { + + // Ensure ordered execution of Reads and Writes + id := sc.pipe.Next() + sc.pipe.StartRequest(id) + defer func() { + sc.pipe.EndRequest(id) + if req == nil { + sc.pipe.StartResponse(id) + sc.pipe.EndResponse(id) + } else { + // Remember the pipeline id of this request + sc.lk.Lock() + sc.pipereq[req] = id + sc.lk.Unlock() + } + }() + + sc.lk.Lock() + if sc.we != nil { // no point receiving if write-side broken or closed + defer sc.lk.Unlock() + return nil, sc.we + } + if sc.re != nil { + defer sc.lk.Unlock() + return nil, sc.re + } + if sc.r == nil { // connection closed by user in the meantime + defer sc.lk.Unlock() + return nil, errClosed + } + r := sc.r + lastbody := sc.lastbody + sc.lastbody = nil + sc.lk.Unlock() + + // Make sure body is fully consumed, even if user does not call body.Close + if lastbody != nil { + // body.Close is assumed to be idempotent and multiple calls to + // it should return the error that its first invocation + // returned. + err = lastbody.Close() + if err != nil { + sc.lk.Lock() + defer sc.lk.Unlock() + sc.re = err + return nil, err + } + } + + req, err = http.ReadRequest(r) + sc.lk.Lock() + defer sc.lk.Unlock() + if err != nil { + if err == io.ErrUnexpectedEOF { + // A close from the opposing client is treated as a + // graceful close, even if there was some unparse-able + // data before the close. + sc.re = ErrPersistEOF + return nil, sc.re + } else { + sc.re = err + return req, err + } + } + sc.lastbody = req.Body + sc.nread++ + if req.Close { + sc.re = ErrPersistEOF + return req, sc.re + } + return req, err +} + +// Pending returns the number of unanswered requests +// that have been received on the connection. +func (sc *ServerConn) Pending() int { + sc.lk.Lock() + defer sc.lk.Unlock() + return sc.nread - sc.nwritten +} + +// Write writes resp in response to req. To close the connection gracefully, set the +// Response.Close field to true. Write should be considered operational until +// it returns an error, regardless of any errors returned on the Read side. +func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error { + + // Retrieve the pipeline ID of this request/response pair + sc.lk.Lock() + id, ok := sc.pipereq[req] + delete(sc.pipereq, req) + if !ok { + sc.lk.Unlock() + return ErrPipeline + } + sc.lk.Unlock() + + // Ensure pipeline order + sc.pipe.StartResponse(id) + defer sc.pipe.EndResponse(id) + + sc.lk.Lock() + if sc.we != nil { + defer sc.lk.Unlock() + return sc.we + } + if sc.c == nil { // connection closed by user in the meantime + defer sc.lk.Unlock() + return ErrClosed + } + c := sc.c + if sc.nread <= sc.nwritten { + defer sc.lk.Unlock() + return errors.New("persist server pipe count") + } + if resp.Close { + // After signaling a keep-alive close, any pipelined unread + // requests will be lost. It is up to the user to drain them + // before signaling. + sc.re = ErrPersistEOF + } + sc.lk.Unlock() + + err := resp.Write(c) + sc.lk.Lock() + defer sc.lk.Unlock() + if err != nil { + sc.we = err + return err + } + sc.nwritten++ + + return nil +} + +// A ClientConn sends request and receives headers over an underlying +// connection, while respecting the HTTP keepalive logic. ClientConn +// supports hijacking the connection calling Hijack to +// regain control of the underlying net.Conn and deal with it as desired. +// +// ClientConn is low-level and old. Applications should instead use +// Client or Transport in the net/http package. +type ClientConn struct { + lk sync.Mutex // read-write protects the following fields + c net.Conn + r *bufio.Reader + re, we error // read/write errors + lastbody io.ReadCloser + nread, nwritten int + pipereq map[*http.Request]uint + + pipe textproto.Pipeline + writeReq func(*http.Request, io.Writer) error +} + +// NewClientConn returns a new ClientConn reading and writing c. If r is not +// nil, it is the buffer to use when reading c. +// +// ClientConn is low-level and old. Applications should use Client or +// Transport in the net/http package. +func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { + if r == nil { + r = bufio.NewReader(c) + } + return &ClientConn{ + c: c, + r: r, + pipereq: make(map[*http.Request]uint), + writeReq: (*http.Request).Write, + } +} + +// NewProxyClientConn works like NewClientConn but writes Requests +// using Request's WriteProxy method. +// +// New code should not use NewProxyClientConn. See Client or +// Transport in the net/http package instead. +func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn { + cc := NewClientConn(c, r) + cc.writeReq = (*http.Request).WriteProxy + return cc +} + +// Hijack detaches the ClientConn and returns the underlying connection as well +// as the read-side bufio which may have some left over data. Hijack may be +// called before the user or Read have signaled the end of the keep-alive +// logic. The user should not call Hijack while Read or Write is in progress. +func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) { + cc.lk.Lock() + defer cc.lk.Unlock() + c = cc.c + r = cc.r + cc.c = nil + cc.r = nil + return +} + +// Close calls Hijack and then also closes the underlying connection +func (cc *ClientConn) Close() error { + c, _ := cc.Hijack() + if c != nil { + return c.Close() + } + return nil +} + +// Write writes a request. An ErrPersistEOF error is returned if the connection +// has been closed in an HTTP keepalive sense. If req.Close equals true, the +// keepalive connection is logically closed after this request and the opposing +// server is informed. An ErrUnexpectedEOF indicates the remote closed the +// underlying TCP connection, which is usually considered as graceful close. +func (cc *ClientConn) Write(req *http.Request) (err error) { + + // Ensure ordered execution of Writes + id := cc.pipe.Next() + cc.pipe.StartRequest(id) + defer func() { + cc.pipe.EndRequest(id) + if err != nil { + cc.pipe.StartResponse(id) + cc.pipe.EndResponse(id) + } else { + // Remember the pipeline id of this request + cc.lk.Lock() + cc.pipereq[req] = id + cc.lk.Unlock() + } + }() + + cc.lk.Lock() + if cc.re != nil { // no point sending if read-side closed or broken + defer cc.lk.Unlock() + return cc.re + } + if cc.we != nil { + defer cc.lk.Unlock() + return cc.we + } + if cc.c == nil { // connection closed by user in the meantime + defer cc.lk.Unlock() + return errClosed + } + c := cc.c + if req.Close { + // We write the EOF to the write-side error, because there + // still might be some pipelined reads + cc.we = ErrPersistEOF + } + cc.lk.Unlock() + + err = cc.writeReq(req, c) + cc.lk.Lock() + defer cc.lk.Unlock() + if err != nil { + cc.we = err + return err + } + cc.nwritten++ + + return nil +} + +// Pending returns the number of unanswered requests +// that have been sent on the connection. +func (cc *ClientConn) Pending() int { + cc.lk.Lock() + defer cc.lk.Unlock() + return cc.nwritten - cc.nread +} + +// Read reads the next response from the wire. A valid response might be +// returned together with an ErrPersistEOF, which means that the remote +// requested that this be the last request serviced. Read can be called +// concurrently with Write, but not with another Read. +func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) { + // Retrieve the pipeline ID of this request/response pair + cc.lk.Lock() + id, ok := cc.pipereq[req] + delete(cc.pipereq, req) + if !ok { + cc.lk.Unlock() + return nil, ErrPipeline + } + cc.lk.Unlock() + + // Ensure pipeline order + cc.pipe.StartResponse(id) + defer cc.pipe.EndResponse(id) + + cc.lk.Lock() + if cc.re != nil { + defer cc.lk.Unlock() + return nil, cc.re + } + if cc.r == nil { // connection closed by user in the meantime + defer cc.lk.Unlock() + return nil, errClosed + } + r := cc.r + lastbody := cc.lastbody + cc.lastbody = nil + cc.lk.Unlock() + + // Make sure body is fully consumed, even if user does not call body.Close + if lastbody != nil { + // body.Close is assumed to be idempotent and multiple calls to + // it should return the error that its first invocation + // returned. + err = lastbody.Close() + if err != nil { + cc.lk.Lock() + defer cc.lk.Unlock() + cc.re = err + return nil, err + } + } + + resp, err = http.ReadResponse(r, req) + cc.lk.Lock() + defer cc.lk.Unlock() + if err != nil { + cc.re = err + return resp, err + } + cc.lastbody = resp.Body + + cc.nread++ + + if resp.Close { + cc.re = ErrPersistEOF // don't send any more requests + return resp, cc.re + } + return resp, err +} + +// Do is convenience method that writes a request and reads a response. +func (cc *ClientConn) Do(req *http.Request) (resp *http.Response, err error) { + err = cc.Write(req) + if err != nil { + return + } + return cc.Read(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go new file mode 100644 index 000000000..48ada5f5f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go @@ -0,0 +1,211 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP reverse proxy handler + +package httputil + +import ( + "io" + "log" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +// onExitFlushLoop is a callback set by tests to detect the state of the +// flushLoop() goroutine. +var onExitFlushLoop func() + +// ReverseProxy is an HTTP Handler that takes an incoming request and +// sends it to another server, proxying the response back to the +// client. +type ReverseProxy struct { + // Director must be a function which modifies + // the request into a new request to be sent + // using Transport. Its response is then copied + // back to the original client unmodified. + Director func(*http.Request) + + // The transport used to perform proxy requests. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper + + // FlushInterval specifies the flush interval + // to flush to the client while copying the + // response body. + // If zero, no periodic flushing is done. + FlushInterval time.Duration +} + +func singleJoiningSlash(a, b string) string { + aslash := strings.HasSuffix(a, "/") + bslash := strings.HasPrefix(b, "/") + switch { + case aslash && bslash: + return a + b[1:] + case !aslash && !bslash: + return a + "/" + b + } + return a + b +} + +// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites +// URLs to the scheme, host, and base path provided in target. If the +// target's path is "/base" and the incoming request was for "/dir", +// the target request will be for /base/dir. +func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy { + targetQuery := target.RawQuery + director := func(req *http.Request) { + req.URL.Scheme = target.Scheme + req.URL.Host = target.Host + req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) + if targetQuery == "" || req.URL.RawQuery == "" { + req.URL.RawQuery = targetQuery + req.URL.RawQuery + } else { + req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery + } + } + return &ReverseProxy{Director: director} +} + +func copyHeader(dst, src http.Header) { + for k, vv := range src { + for _, v := range vv { + dst.Add(k, v) + } + } +} + +// Hop-by-hop headers. These are removed when sent to the backend. +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html +var hopHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Authenticate", + "Proxy-Authorization", + "Te", // canonicalized version of "TE" + "Trailers", + "Transfer-Encoding", + "Upgrade", +} + +func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + transport := p.Transport + if transport == nil { + transport = http.DefaultTransport + } + + outreq := new(http.Request) + *outreq = *req // includes shallow copies of maps, but okay + + p.Director(outreq) + outreq.Proto = "HTTP/1.1" + outreq.ProtoMajor = 1 + outreq.ProtoMinor = 1 + outreq.Close = false + + // Remove hop-by-hop headers to the backend. Especially + // important is "Connection" because we want a persistent + // connection, regardless of what the client sent to us. This + // is modifying the same underlying map from req (shallow + // copied above) so we only copy it if necessary. + copiedHeaders := false + for _, h := range hopHeaders { + if outreq.Header.Get(h) != "" { + if !copiedHeaders { + outreq.Header = make(http.Header) + copyHeader(outreq.Header, req.Header) + copiedHeaders = true + } + outreq.Header.Del(h) + } + } + + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { + // If we aren't the first proxy retain prior + // X-Forwarded-For information as a comma+space + // separated list and fold multiple headers into one. + if prior, ok := outreq.Header["X-Forwarded-For"]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + outreq.Header.Set("X-Forwarded-For", clientIP) + } + + res, err := transport.RoundTrip(outreq) + if err != nil { + log.Printf("http: proxy error: %v", err) + rw.WriteHeader(http.StatusInternalServerError) + return + } + defer res.Body.Close() + + for _, h := range hopHeaders { + res.Header.Del(h) + } + + copyHeader(rw.Header(), res.Header) + + rw.WriteHeader(res.StatusCode) + p.copyResponse(rw, res.Body) +} + +func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) { + if p.FlushInterval != 0 { + if wf, ok := dst.(writeFlusher); ok { + mlw := &maxLatencyWriter{ + dst: wf, + latency: p.FlushInterval, + done: make(chan bool), + } + go mlw.flushLoop() + defer mlw.stop() + dst = mlw + } + } + + io.Copy(dst, src) +} + +type writeFlusher interface { + io.Writer + http.Flusher +} + +type maxLatencyWriter struct { + dst writeFlusher + latency time.Duration + + lk sync.Mutex // protects Write + Flush + done chan bool +} + +func (m *maxLatencyWriter) Write(p []byte) (int, error) { + m.lk.Lock() + defer m.lk.Unlock() + return m.dst.Write(p) +} + +func (m *maxLatencyWriter) flushLoop() { + t := time.NewTicker(m.latency) + defer t.Stop() + for { + select { + case <-m.done: + if onExitFlushLoop != nil { + onExitFlushLoop() + } + return + case <-t.C: + m.lk.Lock() + m.dst.Flush() + m.lk.Unlock() + } + } +} + +func (m *maxLatencyWriter) stop() { m.done <- true } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go new file mode 100644 index 000000000..e9539b44b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go @@ -0,0 +1,213 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Reverse proxy tests. + +package httputil + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" +) + +const fakeHopHeader = "X-Fake-Hop-Header-For-Test" + +func init() { + hopHeaders = append(hopHeaders, fakeHopHeader) +} + +func TestReverseProxy(t *testing.T) { + const backendResponse = "I am the backend" + const backendStatus = 404 + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if len(r.TransferEncoding) > 0 { + t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding) + } + if r.Header.Get("X-Forwarded-For") == "" { + t.Errorf("didn't get X-Forwarded-For header") + } + if c := r.Header.Get("Connection"); c != "" { + t.Errorf("handler got Connection header value %q", c) + } + if c := r.Header.Get("Upgrade"); c != "" { + t.Errorf("handler got Upgrade header value %q", c) + } + if g, e := r.Host, "some-name"; g != e { + t.Errorf("backend got Host header %q, want %q", g, e) + } + w.Header().Set("X-Foo", "bar") + w.Header().Set("Upgrade", "foo") + w.Header().Set(fakeHopHeader, "foo") + w.Header().Add("X-Multi-Value", "foo") + w.Header().Add("X-Multi-Value", "bar") + http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"}) + w.WriteHeader(backendStatus) + w.Write([]byte(backendResponse)) + })) + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + proxyHandler := NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxyHandler) + defer frontend.Close() + + getReq, _ := http.NewRequest("GET", frontend.URL, nil) + getReq.Host = "some-name" + getReq.Header.Set("Connection", "close") + getReq.Header.Set("Upgrade", "foo") + getReq.Close = true + res, err := http.DefaultClient.Do(getReq) + if err != nil { + t.Fatalf("Get: %v", err) + } + if g, e := res.StatusCode, backendStatus; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + if g, e := res.Header.Get("X-Foo"), "bar"; g != e { + t.Errorf("got X-Foo %q; expected %q", g, e) + } + if c := res.Header.Get(fakeHopHeader); c != "" { + t.Errorf("got %s header value %q", fakeHopHeader, c) + } + if g, e := len(res.Header["X-Multi-Value"]), 2; g != e { + t.Errorf("got %d X-Multi-Value header values; expected %d", g, e) + } + if g, e := len(res.Header["Set-Cookie"]), 1; g != e { + t.Fatalf("got %d SetCookies, want %d", g, e) + } + if cookie := res.Cookies()[0]; cookie.Name != "flavor" { + t.Errorf("unexpected cookie %q", cookie.Name) + } + bodyBytes, _ := ioutil.ReadAll(res.Body) + if g, e := string(bodyBytes), backendResponse; g != e { + t.Errorf("got body %q; expected %q", g, e) + } +} + +func TestXForwardedFor(t *testing.T) { + const prevForwardedFor = "client ip" + const backendResponse = "I am the backend" + const backendStatus = 404 + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Forwarded-For") == "" { + t.Errorf("didn't get X-Forwarded-For header") + } + if !strings.Contains(r.Header.Get("X-Forwarded-For"), prevForwardedFor) { + t.Errorf("X-Forwarded-For didn't contain prior data") + } + w.WriteHeader(backendStatus) + w.Write([]byte(backendResponse)) + })) + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + proxyHandler := NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxyHandler) + defer frontend.Close() + + getReq, _ := http.NewRequest("GET", frontend.URL, nil) + getReq.Host = "some-name" + getReq.Header.Set("Connection", "close") + getReq.Header.Set("X-Forwarded-For", prevForwardedFor) + getReq.Close = true + res, err := http.DefaultClient.Do(getReq) + if err != nil { + t.Fatalf("Get: %v", err) + } + if g, e := res.StatusCode, backendStatus; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + bodyBytes, _ := ioutil.ReadAll(res.Body) + if g, e := string(bodyBytes), backendResponse; g != e { + t.Errorf("got body %q; expected %q", g, e) + } +} + +var proxyQueryTests = []struct { + baseSuffix string // suffix to add to backend URL + reqSuffix string // suffix to add to frontend's request URL + want string // what backend should see for final request URL (without ?) +}{ + {"", "", ""}, + {"?sta=tic", "?us=er", "sta=tic&us=er"}, + {"", "?us=er", "us=er"}, + {"?sta=tic", "", "sta=tic"}, +} + +func TestReverseProxyQuery(t *testing.T) { + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Got-Query", r.URL.RawQuery) + w.Write([]byte("hi")) + })) + defer backend.Close() + + for i, tt := range proxyQueryTests { + backendURL, err := url.Parse(backend.URL + tt.baseSuffix) + if err != nil { + t.Fatal(err) + } + frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL)) + req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil) + req.Close = true + res, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("%d. Get: %v", i, err) + } + if g, e := res.Header.Get("X-Got-Query"), tt.want; g != e { + t.Errorf("%d. got query %q; expected %q", i, g, e) + } + res.Body.Close() + frontend.Close() + } +} + +func TestReverseProxyFlushInterval(t *testing.T) { + const expected = "hi" + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(expected)) + })) + defer backend.Close() + + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxyHandler := NewSingleHostReverseProxy(backendURL) + proxyHandler.FlushInterval = time.Microsecond + + done := make(chan bool) + onExitFlushLoop = func() { done <- true } + defer func() { onExitFlushLoop = nil }() + + frontend := httptest.NewServer(proxyHandler) + defer frontend.Close() + + req, _ := http.NewRequest("GET", frontend.URL, nil) + req.Close = true + res, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Get: %v", err) + } + defer res.Body.Close() + if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected { + t.Errorf("got body %q; expected %q", bodyBytes, expected) + } + + select { + case <-done: + // OK + case <-time.After(5 * time.Second): + t.Error("maxLatencyWriter flushLoop() never exited") + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/jar.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/jar.go new file mode 100644 index 000000000..5c3de0dad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/jar.go @@ -0,0 +1,27 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "net/url" +) + +// A CookieJar manages storage and use of cookies in HTTP requests. +// +// Implementations of CookieJar must be safe for concurrent use by multiple +// goroutines. +// +// The net/http/cookiejar package provides a CookieJar implementation. +type CookieJar interface { + // SetCookies handles the receipt of the cookies in a reply for the + // given URL. It may or may not choose to save the cookies, depending + // on the jar's policy and implementation. + SetCookies(u *url.URL, cookies []*Cookie) + + // Cookies returns the cookies to send in a request for the given URL. + // It is up to the implementation to honor the standard cookie use + // restrictions such as in RFC 6265. + Cookies(u *url.URL) []*Cookie +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/lex.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/lex.go new file mode 100644 index 000000000..cb33318f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/lex.go @@ -0,0 +1,96 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +// This file deals with lexical matters of HTTP + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func isToken(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !isToken(r) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/lex_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/lex_test.go new file mode 100644 index 000000000..6d9d294f7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/lex_test.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if isToken(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/npn_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/npn_test.go new file mode 100644 index 000000000..98b8930d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/npn_test.go @@ -0,0 +1,118 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bufio" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + . "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestNextProtoUpgrade(t *testing.T) { + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "path=%s,proto=", r.URL.Path) + if r.TLS != nil { + w.Write([]byte(r.TLS.NegotiatedProtocol)) + } + if r.RemoteAddr == "" { + t.Error("request with no RemoteAddr") + } + if r.Body == nil { + t.Errorf("request with nil Body") + } + })) + ts.TLS = &tls.Config{ + NextProtos: []string{"unhandled-proto", "tls-0.9"}, + } + ts.Config.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){ + "tls-0.9": handleTLSProtocol09, + } + ts.StartTLS() + defer ts.Close() + + tr := newTLSTransport(t, ts) + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + // Normal request, without NPN. + { + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if want := "path=/,proto="; string(body) != want { + t.Errorf("plain request = %q; want %q", body, want) + } + } + + // Request to an advertised but unhandled NPN protocol. + // Server will hang up. + { + tr.CloseIdleConnections() + tr.TLSClientConfig.NextProtos = []string{"unhandled-proto"} + _, err := c.Get(ts.URL) + if err == nil { + t.Errorf("expected error on unhandled-proto request") + } + } + + // Request using the "tls-0.9" protocol, which we register here. + // It is HTTP/0.9 over TLS. + { + tlsConfig := newTLSTransport(t, ts).TLSClientConfig + tlsConfig.NextProtos = []string{"tls-0.9"} + conn, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + conn.Write([]byte("GET /foo\n")) + body, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + if want := "path=/foo,proto=tls-0.9"; string(body) != want { + t.Errorf("plain request = %q; want %q", body, want) + } + } +} + +// handleTLSProtocol09 implements the HTTP/0.9 protocol over TLS, for the +// TestNextProtoUpgrade test. +func handleTLSProtocol09(srv *Server, conn *tls.Conn, h Handler) { + br := bufio.NewReader(conn) + line, err := br.ReadString('\n') + if err != nil { + return + } + line = strings.TrimSpace(line) + path := strings.TrimPrefix(line, "GET ") + if path == line { + return + } + req, _ := NewRequest("GET", path, nil) + req.Proto = "HTTP/0.9" + req.ProtoMajor = 0 + req.ProtoMinor = 9 + rw := &http09Writer{conn, make(Header)} + h.ServeHTTP(rw, req) +} + +type http09Writer struct { + io.Writer + h Header +} + +func (w http09Writer) Header() Header { return w.h } +func (w http09Writer) WriteHeader(int) {} // no headers diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go new file mode 100644 index 000000000..0c7548e3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go @@ -0,0 +1,205 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pprof serves via its HTTP server runtime profiling data +// in the format expected by the pprof visualization tool. +// For more information about pprof, see +// http://code.google.com/p/google-perftools/. +// +// The package is typically only imported for the side effect of +// registering its HTTP handlers. +// The handled paths all begin with /debug/pprof/. +// +// To use pprof, link this package into your program: +// import _ "net/http/pprof" +// +// If your application is not already running an http server, you +// need to start one. Add "net/http" and "log" to your imports and +// the following code to your main function: +// +// go func() { +// log.Println(http.ListenAndServe("localhost:6060", nil)) +// }() +// +// Then use the pprof tool to look at the heap profile: +// +// go tool pprof http://localhost:6060/debug/pprof/heap +// +// Or to look at a 30-second CPU profile: +// +// go tool pprof http://localhost:6060/debug/pprof/profile +// +// Or to look at the goroutine blocking profile: +// +// go tool pprof http://localhost:6060/debug/pprof/block +// +// To view all available profiles, open http://localhost:6060/debug/pprof/ +// in your browser. +// +// For a study of the facility in action, visit +// +// http://blog.golang.org/2011/06/profiling-go-programs.html +// +package pprof + +import ( + "bufio" + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "time" +) + +func init() { + http.Handle("/debug/pprof/", http.HandlerFunc(Index)) + http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline)) + http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile)) + http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol)) +} + +// Cmdline responds with the running program's +// command line, with arguments separated by NUL bytes. +// The package initialization registers it as /debug/pprof/cmdline. +func Cmdline(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, strings.Join(os.Args, "\x00")) +} + +// Profile responds with the pprof-formatted cpu profile. +// The package initialization registers it as /debug/pprof/profile. +func Profile(w http.ResponseWriter, r *http.Request) { + sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64) + if sec == 0 { + sec = 30 + } + + // Set Content Type assuming StartCPUProfile will work, + // because if it does it starts writing. + w.Header().Set("Content-Type", "application/octet-stream") + if err := pprof.StartCPUProfile(w); err != nil { + // StartCPUProfile failed, so no writes yet. + // Can change header back to text content + // and send error code. + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err) + return + } + time.Sleep(time.Duration(sec) * time.Second) + pprof.StopCPUProfile() +} + +// Symbol looks up the program counters listed in the request, +// responding with a table mapping program counters to function names. +// The package initialization registers it as /debug/pprof/symbol. +func Symbol(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + + // We have to read the whole POST body before + // writing any output. Buffer the output here. + var buf bytes.Buffer + + // We don't know how many symbols we have, but we + // do have symbol information. Pprof only cares whether + // this number is 0 (no symbols available) or > 0. + fmt.Fprintf(&buf, "num_symbols: 1\n") + + var b *bufio.Reader + if r.Method == "POST" { + b = bufio.NewReader(r.Body) + } else { + b = bufio.NewReader(strings.NewReader(r.URL.RawQuery)) + } + + for { + word, err := b.ReadSlice('+') + if err == nil { + word = word[0 : len(word)-1] // trim + + } + pc, _ := strconv.ParseUint(string(word), 0, 64) + if pc != 0 { + f := runtime.FuncForPC(uintptr(pc)) + if f != nil { + fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name()) + } + } + + // Wait until here to check for err; the last + // symbol will have an err because it doesn't end in +. + if err != nil { + if err != io.EOF { + fmt.Fprintf(&buf, "reading request: %v\n", err) + } + break + } + } + + w.Write(buf.Bytes()) +} + +// Handler returns an HTTP handler that serves the named profile. +func Handler(name string) http.Handler { + return handler(name) +} + +type handler string + +func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + debug, _ := strconv.Atoi(r.FormValue("debug")) + p := pprof.Lookup(string(name)) + if p == nil { + w.WriteHeader(404) + fmt.Fprintf(w, "Unknown profile: %s\n", name) + return + } + p.WriteTo(w, debug) + return +} + +// Index responds with the pprof-formatted profile named by the request. +// For example, "/debug/pprof/heap" serves the "heap" profile. +// Index responds to a request for "/debug/pprof/" with an HTML page +// listing the available profiles. +func Index(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/debug/pprof/") { + name := strings.TrimPrefix(r.URL.Path, "/debug/pprof/") + if name != "" { + handler(name).ServeHTTP(w, r) + return + } + } + + profiles := pprof.Profiles() + if err := indexTmpl.Execute(w, profiles); err != nil { + log.Print(err) + } +} + +var indexTmpl = template.Must(template.New("index").Parse(` + +/debug/pprof/ + +/debug/pprof/
+
+ +profiles:
+ +{{range .}} +
{{.Count}}{{.Name}} +{{end}} +
+
+full goroutine stack dump
+ + +`)) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/proxy_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/proxy_test.go new file mode 100644 index 000000000..b6aed3792 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/proxy_test.go @@ -0,0 +1,81 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "net/url" + "os" + "testing" +) + +// TODO(mattn): +// test ProxyAuth + +var UseProxyTests = []struct { + host string + match bool +}{ + // Never proxy localhost: + {"localhost:80", false}, + {"127.0.0.1", false}, + {"127.0.0.2", false}, + {"[::1]", false}, + {"[::2]", true}, // not a loopback address + + {"barbaz.net", false}, // match as .barbaz.net + {"foobar.com", false}, // have a port but match + {"foofoobar.com", true}, // not match as a part of foobar.com + {"baz.com", true}, // not match as a part of barbaz.com + {"localhost.net", true}, // not match as suffix of address + {"local.localhost", true}, // not match as prefix as address + {"barbarbaz.net", true}, // not match because NO_PROXY have a '.' + {"www.foobar.com", false}, // match because NO_PROXY includes "foobar.com" +} + +func TestUseProxy(t *testing.T) { + ResetProxyEnv() + os.Setenv("NO_PROXY", "foobar.com, .barbaz.net") + for _, test := range UseProxyTests { + if useProxy(test.host+":80") != test.match { + t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match) + } + } +} + +var cacheKeysTests = []struct { + proxy string + scheme string + addr string + key string +}{ + {"", "http", "foo.com", "|http|foo.com"}, + {"", "https", "foo.com", "|https|foo.com"}, + {"http://foo.com", "http", "foo.com", "http://foo.com|http|"}, + {"http://foo.com", "https", "foo.com", "http://foo.com|https|foo.com"}, +} + +func TestCacheKeys(t *testing.T) { + for _, tt := range cacheKeysTests { + var proxy *url.URL + if tt.proxy != "" { + u, err := url.Parse(tt.proxy) + if err != nil { + t.Fatal(err) + } + proxy = u + } + cm := connectMethod{proxy, tt.scheme, tt.addr} + if got := cm.key().String(); got != tt.key { + t.Fatalf("{%q, %q, %q} cache key = %q; want %q", tt.proxy, tt.scheme, tt.addr, got, tt.key) + } + } +} + +func ResetProxyEnv() { + for _, v := range []string{"HTTP_PROXY", "http_proxy", "NO_PROXY", "no_proxy"} { + os.Setenv(v, "") + } + ResetCachedEnvironment() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/race.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/race.go new file mode 100644 index 000000000..766503967 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/race.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build race + +package http + +func init() { + raceEnabled = true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/range_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/range_test.go new file mode 100644 index 000000000..ef911af7b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/range_test.go @@ -0,0 +1,79 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "testing" +) + +var ParseRangeTests = []struct { + s string + length int64 + r []httpRange +}{ + {"", 0, nil}, + {"", 1000, nil}, + {"foo", 0, nil}, + {"bytes=", 0, nil}, + {"bytes=7", 10, nil}, + {"bytes= 7 ", 10, nil}, + {"bytes=1-", 0, nil}, + {"bytes=5-4", 10, nil}, + {"bytes=0-2,5-4", 10, nil}, + {"bytes=2-5,4-3", 10, nil}, + {"bytes=--5,4--3", 10, nil}, + {"bytes=A-", 10, nil}, + {"bytes=A- ", 10, nil}, + {"bytes=A-Z", 10, nil}, + {"bytes= -Z", 10, nil}, + {"bytes=5-Z", 10, nil}, + {"bytes=Ran-dom, garbage", 10, nil}, + {"bytes=0x01-0x02", 10, nil}, + {"bytes= ", 10, nil}, + {"bytes= , , , ", 10, nil}, + + {"bytes=0-9", 10, []httpRange{{0, 10}}}, + {"bytes=0-", 10, []httpRange{{0, 10}}}, + {"bytes=5-", 10, []httpRange{{5, 5}}}, + {"bytes=0-20", 10, []httpRange{{0, 10}}}, + {"bytes=15-,0-5", 10, nil}, + {"bytes=1-2,5-", 10, []httpRange{{1, 2}, {5, 5}}}, + {"bytes=-2 , 7-", 11, []httpRange{{9, 2}, {7, 4}}}, + {"bytes=0-0 ,2-2, 7-", 11, []httpRange{{0, 1}, {2, 1}, {7, 4}}}, + {"bytes=-5", 10, []httpRange{{5, 5}}}, + {"bytes=-15", 10, []httpRange{{0, 10}}}, + {"bytes=0-499", 10000, []httpRange{{0, 500}}}, + {"bytes=500-999", 10000, []httpRange{{500, 500}}}, + {"bytes=-500", 10000, []httpRange{{9500, 500}}}, + {"bytes=9500-", 10000, []httpRange{{9500, 500}}}, + {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}}, + {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}}, + {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}}, + + // Match Apache laxity: + {"bytes= 1 -2 , 4- 5, 7 - 8 , ,,", 11, []httpRange{{1, 2}, {4, 2}, {7, 2}}}, +} + +func TestParseRange(t *testing.T) { + for _, test := range ParseRangeTests { + r := test.r + ranges, err := parseRange(test.s, test.length) + if err != nil && r != nil { + t.Errorf("parseRange(%q) returned error %q", test.s, err) + } + if len(ranges) != len(r) { + t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r)) + continue + } + for i := range r { + if ranges[i].start != r[i].start { + t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start) + } + if ranges[i].length != r[i].length { + t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length) + } + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/readrequest_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/readrequest_test.go new file mode 100644 index 000000000..ffdd6a892 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/readrequest_test.go @@ -0,0 +1,331 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/url" + "reflect" + "testing" +) + +type reqTest struct { + Raw string + Req *Request + Body string + Trailer Header + Error string +} + +var noError = "" +var noBody = "" +var noTrailer Header = nil + +var reqTests = []reqTest{ + // Baseline test; All Request fields included for template use + { + "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + + "Host: www.techcrunch.com\r\n" + + "User-Agent: Fake\r\n" + + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + + "Accept-Language: en-us,en;q=0.5\r\n" + + "Accept-Encoding: gzip,deflate\r\n" + + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + + "Keep-Alive: 300\r\n" + + "Content-Length: 7\r\n" + + "Proxy-Connection: keep-alive\r\n\r\n" + + "abcdef\n???", + + &Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.techcrunch.com", + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"}, + "Accept-Language": {"en-us,en;q=0.5"}, + "Accept-Encoding": {"gzip,deflate"}, + "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"}, + "Keep-Alive": {"300"}, + "Proxy-Connection": {"keep-alive"}, + "Content-Length": {"7"}, + "User-Agent": {"Fake"}, + }, + Close: false, + ContentLength: 7, + Host: "www.techcrunch.com", + RequestURI: "http://www.techcrunch.com/", + }, + + "abcdef\n", + + noTrailer, + noError, + }, + + // GET request with no body (the normal case) + { + "GET / HTTP/1.1\r\n" + + "Host: foo.com\r\n\r\n", + + &Request{ + Method: "GET", + URL: &url.URL{ + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "foo.com", + RequestURI: "/", + }, + + noBody, + noTrailer, + noError, + }, + + // Tests that we don't parse a path that looks like a + // scheme-relative URI as a scheme-relative URI. + { + "GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" + + "Host: test\r\n\r\n", + + &Request{ + Method: "GET", + URL: &url.URL{ + Path: "//user@host/is/actually/a/path/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "test", + RequestURI: "//user@host/is/actually/a/path/", + }, + + noBody, + noTrailer, + noError, + }, + + // Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2) + { + "GET ../../../../etc/passwd HTTP/1.1\r\n" + + "Host: test\r\n\r\n", + nil, + noBody, + noTrailer, + "parse ../../../../etc/passwd: invalid URI for request", + }, + + // Tests missing URL: + { + "GET HTTP/1.1\r\n" + + "Host: test\r\n\r\n", + nil, + noBody, + noTrailer, + "parse : empty url", + }, + + // Tests chunked body with trailer: + { + "POST / HTTP/1.1\r\n" + + "Host: foo.com\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "3\r\nfoo\r\n" + + "3\r\nbar\r\n" + + "0\r\n" + + "Trailer-Key: Trailer-Value\r\n" + + "\r\n", + &Request{ + Method: "POST", + URL: &url.URL{ + Path: "/", + }, + TransferEncoding: []string{"chunked"}, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + ContentLength: -1, + Host: "foo.com", + RequestURI: "/", + }, + + "foobar", + Header{ + "Trailer-Key": {"Trailer-Value"}, + }, + noError, + }, + + // CONNECT request with domain name: + { + "CONNECT www.google.com:443 HTTP/1.1\r\n\r\n", + + &Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "www.google.com:443", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "www.google.com:443", + RequestURI: "www.google.com:443", + }, + + noBody, + noTrailer, + noError, + }, + + // CONNECT request with IP address: + { + "CONNECT 127.0.0.1:6060 HTTP/1.1\r\n\r\n", + + &Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "127.0.0.1:6060", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "127.0.0.1:6060", + RequestURI: "127.0.0.1:6060", + }, + + noBody, + noTrailer, + noError, + }, + + // CONNECT request for RPC: + { + "CONNECT /_goRPC_ HTTP/1.1\r\n\r\n", + + &Request{ + Method: "CONNECT", + URL: &url.URL{ + Path: "/_goRPC_", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "", + RequestURI: "/_goRPC_", + }, + + noBody, + noTrailer, + noError, + }, + + // SSDP Notify request. golang.org/issue/3692 + { + "NOTIFY * HTTP/1.1\r\nServer: foo\r\n\r\n", + &Request{ + Method: "NOTIFY", + URL: &url.URL{ + Path: "*", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Server": []string{"foo"}, + }, + Close: false, + ContentLength: 0, + RequestURI: "*", + }, + + noBody, + noTrailer, + noError, + }, + + // OPTIONS request. Similar to golang.org/issue/3692 + { + "OPTIONS * HTTP/1.1\r\nServer: foo\r\n\r\n", + &Request{ + Method: "OPTIONS", + URL: &url.URL{ + Path: "*", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Server": []string{"foo"}, + }, + Close: false, + ContentLength: 0, + RequestURI: "*", + }, + + noBody, + noTrailer, + noError, + }, +} + +func TestReadRequest(t *testing.T) { + for i := range reqTests { + tt := &reqTests[i] + var braw bytes.Buffer + braw.WriteString(tt.Raw) + req, err := ReadRequest(bufio.NewReader(&braw)) + if err != nil { + if err.Error() != tt.Error { + t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error) + } + continue + } + rbody := req.Body + req.Body = nil + diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req) + var bout bytes.Buffer + if rbody != nil { + _, err := io.Copy(&bout, rbody) + if err != nil { + t.Fatalf("#%d. copying body: %v", i, err) + } + rbody.Close() + } + body := bout.String() + if body != tt.Body { + t.Errorf("#%d: Body = %q want %q", i, body, tt.Body) + } + if !reflect.DeepEqual(tt.Trailer, req.Trailer) { + t.Errorf("#%d. Trailers differ.\n got: %v\nwant: %v", i, req.Trailer, tt.Trailer) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/request.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/request.go new file mode 100644 index 000000000..a67092066 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/request.go @@ -0,0 +1,875 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP Request reading and parsing. + +package http + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/textproto" + "net/url" + "strconv" + "strings" + "sync" +) + +const ( + maxValueLength = 4096 + maxHeaderLines = 1024 + chunkSize = 4 << 10 // 4 KB chunks + defaultMaxMemory = 32 << 20 // 32 MB +) + +// ErrMissingFile is returned by FormFile when the provided file field name +// is either not present in the request or not a file field. +var ErrMissingFile = errors.New("http: no such file") + +// HTTP request parsing errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrHeaderTooLong = &ProtocolError{"header too long"} + ErrShortBody = &ProtocolError{"entity body too short"} + ErrNotSupported = &ProtocolError{"feature not supported"} + ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} + ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"} + ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"} + ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"} +) + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// Headers that Request.Write handles itself and should be skipped. +var reqWriteExcludeHeader = map[string]bool{ + "Host": true, // not in Header map anyway + "User-Agent": true, + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, +} + +// A Request represents an HTTP request received by a server +// or to be sent by a client. +// +// The field semantics differ slightly between client and server +// usage. In addition to the notes on the fields below, see the +// documentation for Request.Write and RoundTripper. +type Request struct { + // Method specifies the HTTP method (GET, POST, PUT, etc.). + // For client requests an empty string means GET. + Method string + + // URL specifies either the URI being requested (for server + // requests) or the URL to access (for client requests). + // + // For server requests the URL is parsed from the URI + // supplied on the Request-Line as stored in RequestURI. For + // most requests, fields other than Path and RawQuery will be + // empty. (See RFC 2616, Section 5.1.2) + // + // For client requests, the URL's Host specifies the server to + // connect to, while the Request's Host field optionally + // specifies the Host header value to send in the HTTP + // request. + URL *url.URL + + // The protocol version for incoming requests. + // Client requests always use HTTP/1.1. + Proto string // "HTTP/1.0" + ProtoMajor int // 1 + ProtoMinor int // 0 + + // A header maps request lines to their values. + // If the header says + // + // accept-encoding: gzip, deflate + // Accept-Language: en-us + // Connection: keep-alive + // + // then + // + // Header = map[string][]string{ + // "Accept-Encoding": {"gzip, deflate"}, + // "Accept-Language": {"en-us"}, + // "Connection": {"keep-alive"}, + // } + // + // HTTP defines that header names are case-insensitive. + // The request parser implements this by canonicalizing the + // name, making the first character and any characters + // following a hyphen uppercase and the rest lowercase. + // + // For client requests certain headers are automatically + // added and may override values in Header. + // + // See the documentation for the Request.Write method. + Header Header + + // Body is the request's body. + // + // For client requests a nil body means the request has no + // body, such as a GET request. The HTTP Client's Transport + // is responsible for calling the Close method. + // + // For server requests the Request Body is always non-nil + // but will return EOF immediately when no body is present. + // The Server will close the request body. The ServeHTTP + // Handler does not need to. + Body io.ReadCloser + + // ContentLength records the length of the associated content. + // The value -1 indicates that the length is unknown. + // Values >= 0 indicate that the given number of bytes may + // be read from Body. + // For client requests, a value of 0 means unknown if Body is not nil. + ContentLength int64 + + // TransferEncoding lists the transfer encodings from outermost to + // innermost. An empty list denotes the "identity" encoding. + // TransferEncoding can usually be ignored; chunked encoding is + // automatically added and removed as necessary when sending and + // receiving requests. + TransferEncoding []string + + // Close indicates whether to close the connection after + // replying to this request (for servers) or after sending + // the request (for clients). + Close bool + + // For server requests Host specifies the host on which the + // URL is sought. Per RFC 2616, this is either the value of + // the "Host" header or the host name given in the URL itself. + // It may be of the form "host:port". + // + // For client requests Host optionally overrides the Host + // header to send. If empty, the Request.Write method uses + // the value of URL.Host. + Host string + + // Form contains the parsed form data, including both the URL + // field's query parameters and the POST or PUT form data. + // This field is only available after ParseForm is called. + // The HTTP client ignores Form and uses Body instead. + Form url.Values + + // PostForm contains the parsed form data from POST or PUT + // body parameters. + // This field is only available after ParseForm is called. + // The HTTP client ignores PostForm and uses Body instead. + PostForm url.Values + + // MultipartForm is the parsed multipart form, including file uploads. + // This field is only available after ParseMultipartForm is called. + // The HTTP client ignores MultipartForm and uses Body instead. + MultipartForm *multipart.Form + + // Trailer specifies additional headers that are sent after the request + // body. + // + // For server requests the Trailer map initially contains only the + // trailer keys, with nil values. (The client declares which trailers it + // will later send.) While the handler is reading from Body, it must + // not reference Trailer. After reading from Body returns EOF, Trailer + // can be read again and will contain non-nil values, if they were sent + // by the client. + // + // For client requests Trailer must be initialized to a map containing + // the trailer keys to later send. The values may be nil or their final + // values. The ContentLength must be 0 or -1, to send a chunked request. + // After the HTTP request is sent the map values can be updated while + // the request body is read. Once the body returns EOF, the caller must + // not mutate Trailer. + // + // Few HTTP clients, servers, or proxies support HTTP trailers. + Trailer Header + + // RemoteAddr allows HTTP servers and other software to record + // the network address that sent the request, usually for + // logging. This field is not filled in by ReadRequest and + // has no defined format. The HTTP server in this package + // sets RemoteAddr to an "IP:port" address before invoking a + // handler. + // This field is ignored by the HTTP client. + RemoteAddr string + + // RequestURI is the unmodified Request-URI of the + // Request-Line (RFC 2616, Section 5.1) as sent by the client + // to a server. Usually the URL field should be used instead. + // It is an error to set this field in an HTTP client request. + RequestURI string + + // TLS allows HTTP servers and other software to record + // information about the TLS connection on which the request + // was received. This field is not filled in by ReadRequest. + // The HTTP server in this package sets the field for + // TLS-enabled connections before invoking a handler; + // otherwise it leaves the field nil. + // This field is ignored by the HTTP client. + TLS *tls.ConnectionState +} + +// ProtoAtLeast reports whether the HTTP protocol used +// in the request is at least major.minor. +func (r *Request) ProtoAtLeast(major, minor int) bool { + return r.ProtoMajor > major || + r.ProtoMajor == major && r.ProtoMinor >= minor +} + +// UserAgent returns the client's User-Agent, if sent in the request. +func (r *Request) UserAgent() string { + return r.Header.Get("User-Agent") +} + +// Cookies parses and returns the HTTP cookies sent with the request. +func (r *Request) Cookies() []*Cookie { + return readCookies(r.Header, "") +} + +var ErrNoCookie = errors.New("http: named cookie not present") + +// Cookie returns the named cookie provided in the request or +// ErrNoCookie if not found. +func (r *Request) Cookie(name string) (*Cookie, error) { + for _, c := range readCookies(r.Header, name) { + return c, nil + } + return nil, ErrNoCookie +} + +// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, +// AddCookie does not attach more than one Cookie header field. That +// means all cookies, if any, are written into the same line, +// separated by semicolon. +func (r *Request) AddCookie(c *Cookie) { + s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) + if c := r.Header.Get("Cookie"); c != "" { + r.Header.Set("Cookie", c+"; "+s) + } else { + r.Header.Set("Cookie", s) + } +} + +// Referer returns the referring URL, if sent in the request. +// +// Referer is misspelled as in the request itself, a mistake from the +// earliest days of HTTP. This value can also be fetched from the +// Header map as Header["Referer"]; the benefit of making it available +// as a method is that the compiler can diagnose programs that use the +// alternate (correct English) spelling req.Referrer() but cannot +// diagnose programs that use Header["Referrer"]. +func (r *Request) Referer() string { + return r.Header.Get("Referer") +} + +// multipartByReader is a sentinel value. +// Its presence in Request.MultipartForm indicates that parsing of the request +// body has been handed off to a MultipartReader instead of ParseMultipartFrom. +var multipartByReader = &multipart.Form{ + Value: make(map[string][]string), + File: make(map[string][]*multipart.FileHeader), +} + +// MultipartReader returns a MIME multipart reader if this is a +// multipart/form-data POST request, else returns nil and an error. +// Use this function instead of ParseMultipartForm to +// process the request body as a stream. +func (r *Request) MultipartReader() (*multipart.Reader, error) { + if r.MultipartForm == multipartByReader { + return nil, errors.New("http: MultipartReader called twice") + } + if r.MultipartForm != nil { + return nil, errors.New("http: multipart handled by ParseMultipartForm") + } + r.MultipartForm = multipartByReader + return r.multipartReader() +} + +func (r *Request) multipartReader() (*multipart.Reader, error) { + v := r.Header.Get("Content-Type") + if v == "" { + return nil, ErrNotMultipart + } + d, params, err := mime.ParseMediaType(v) + if err != nil || d != "multipart/form-data" { + return nil, ErrNotMultipart + } + boundary, ok := params["boundary"] + if !ok { + return nil, ErrMissingBoundary + } + return multipart.NewReader(r.Body, boundary), nil +} + +// Return value if nonempty, def otherwise. +func valueOrDefault(value, def string) string { + if value != "" { + return value + } + return def +} + +// NOTE: This is not intended to reflect the actual Go version being used. +// It was changed from "Go http package" to "Go 1.1 package http" at the +// time of the Go 1.1 release because the former User-Agent had ended up +// on a blacklist for some intrusion detection systems. +// See https://codereview.appspot.com/7532043. +const defaultUserAgent = "Go 1.1 package http" + +// Write writes an HTTP/1.1 request -- header and body -- in wire format. +// This method consults the following fields of the request: +// Host +// URL +// Method (defaults to "GET") +// Header +// ContentLength +// TransferEncoding +// Body +// +// If Body is present, Content-Length is <= 0 and TransferEncoding +// hasn't been set to "identity", Write adds "Transfer-Encoding: +// chunked" to the header. Body is closed after it is sent. +func (r *Request) Write(w io.Writer) error { + return r.write(w, false, nil) +} + +// WriteProxy is like Write but writes the request in the form +// expected by an HTTP proxy. In particular, WriteProxy writes the +// initial Request-URI line of the request with an absolute URI, per +// section 5.1.2 of RFC 2616, including the scheme and host. +// In either case, WriteProxy also writes a Host header, using +// either r.Host or r.URL.Host. +func (r *Request) WriteProxy(w io.Writer) error { + return r.write(w, true, nil) +} + +// extraHeaders may be nil +func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error { + host := req.Host + if host == "" { + if req.URL == nil { + return errors.New("http: Request.Write on Request with no Host or URL set") + } + host = req.URL.Host + } + + ruri := req.URL.RequestURI() + if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" { + ruri = req.URL.Scheme + "://" + host + ruri + } else if req.Method == "CONNECT" && req.URL.Path == "" { + // CONNECT requests normally give just the host and port, not a full URL. + ruri = host + } + // TODO(bradfitz): escape at least newlines in ruri? + + // Wrap the writer in a bufio Writer if it's not already buffered. + // Don't always call NewWriter, as that forces a bytes.Buffer + // and other small bufio Writers to have a minimum 4k buffer + // size. + var bw *bufio.Writer + if _, ok := w.(io.ByteWriter); !ok { + bw = bufio.NewWriter(w) + w = bw + } + + fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) + + // Header lines + fmt.Fprintf(w, "Host: %s\r\n", host) + + // Use the defaultUserAgent unless the Header contains one, which + // may be blank to not send the header. + userAgent := defaultUserAgent + if req.Header != nil { + if ua := req.Header["User-Agent"]; len(ua) > 0 { + userAgent = ua[0] + } + } + if userAgent != "" { + fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent) + } + + // Process Body,ContentLength,Close,Trailer + tw, err := newTransferWriter(req) + if err != nil { + return err + } + err = tw.WriteHeader(w) + if err != nil { + return err + } + + err = req.Header.WriteSubset(w, reqWriteExcludeHeader) + if err != nil { + return err + } + + if extraHeaders != nil { + err = extraHeaders.Write(w) + if err != nil { + return err + } + } + + io.WriteString(w, "\r\n") + + // Write body and trailer + err = tw.WriteBody(w) + if err != nil { + return err + } + + if bw != nil { + return bw.Flush() + } + return nil +} + +// ParseHTTPVersion parses a HTTP version string. +// "HTTP/1.0" returns (1, 0, true). +func ParseHTTPVersion(vers string) (major, minor int, ok bool) { + const Big = 1000000 // arbitrary upper bound + switch vers { + case "HTTP/1.1": + return 1, 1, true + case "HTTP/1.0": + return 1, 0, true + } + if !strings.HasPrefix(vers, "HTTP/") { + return 0, 0, false + } + dot := strings.Index(vers, ".") + if dot < 0 { + return 0, 0, false + } + major, err := strconv.Atoi(vers[5:dot]) + if err != nil || major < 0 || major > Big { + return 0, 0, false + } + minor, err = strconv.Atoi(vers[dot+1:]) + if err != nil || minor < 0 || minor > Big { + return 0, 0, false + } + return major, minor, true +} + +// NewRequest returns a new Request given a method, URL, and optional body. +// +// If the provided body is also an io.Closer, the returned +// Request.Body is set to body and will be closed by the Client +// methods Do, Post, and PostForm, and Transport.RoundTrip. +func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + req := &Request{ + Method: method, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(Header), + Body: rc, + Host: u.Host, + } + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + req.ContentLength = int64(v.Len()) + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + case *strings.Reader: + req.ContentLength = int64(v.Len()) + } + } + + return req, nil +} + +// SetBasicAuth sets the request's Authorization header to use HTTP +// Basic Authentication with the provided username and password. +// +// With HTTP Basic Authentication the provided username and password +// are not encrypted. +func (r *Request) SetBasicAuth(username, password string) { + r.Header.Set("Authorization", "Basic "+basicAuth(username, password)) +} + +// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts. +func parseRequestLine(line string) (method, requestURI, proto string, ok bool) { + s1 := strings.Index(line, " ") + s2 := strings.Index(line[s1+1:], " ") + if s1 < 0 || s2 < 0 { + return + } + s2 += s1 + 1 + return line[:s1], line[s1+1 : s2], line[s2+1:], true +} + +var textprotoReaderPool sync.Pool + +func newTextprotoReader(br *bufio.Reader) *textproto.Reader { + if v := textprotoReaderPool.Get(); v != nil { + tr := v.(*textproto.Reader) + tr.R = br + return tr + } + return textproto.NewReader(br) +} + +func putTextprotoReader(r *textproto.Reader) { + r.R = nil + textprotoReaderPool.Put(r) +} + +// ReadRequest reads and parses a request from b. +func ReadRequest(b *bufio.Reader) (req *Request, err error) { + + tp := newTextprotoReader(b) + req = new(Request) + + // First line: GET /index.html HTTP/1.0 + var s string + if s, err = tp.ReadLine(); err != nil { + return nil, err + } + defer func() { + putTextprotoReader(tp) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + var ok bool + req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s) + if !ok { + return nil, &badStringError{"malformed HTTP request", s} + } + rawurl := req.RequestURI + if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok { + return nil, &badStringError{"malformed HTTP version", req.Proto} + } + + // CONNECT requests are used two different ways, and neither uses a full URL: + // The standard use is to tunnel HTTPS through an HTTP proxy. + // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is + // just the authority section of a URL. This information should go in req.URL.Host. + // + // The net/rpc package also uses CONNECT, but there the parameter is a path + // that starts with a slash. It can be parsed with the regular URL parser, + // and the path will end up in req.URL.Path, where it needs to be in order for + // RPC to work. + justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/") + if justAuthority { + rawurl = "http://" + rawurl + } + + if req.URL, err = url.ParseRequestURI(rawurl); err != nil { + return nil, err + } + + if justAuthority { + // Strip the bogus "http://" back off. + req.URL.Scheme = "" + } + + // Subsequent lines: Key: value. + mimeHeader, err := tp.ReadMIMEHeader() + if err != nil { + return nil, err + } + req.Header = Header(mimeHeader) + + // RFC2616: Must treat + // GET /index.html HTTP/1.1 + // Host: www.google.com + // and + // GET http://www.google.com/index.html HTTP/1.1 + // Host: doesntmatter + // the same. In the second case, any Host line is ignored. + req.Host = req.URL.Host + if req.Host == "" { + req.Host = req.Header.get("Host") + } + delete(req.Header, "Host") + + fixPragmaCacheControl(req.Header) + + err = readTransfer(req, b) + if err != nil { + return nil, err + } + + return req, nil +} + +// MaxBytesReader is similar to io.LimitReader but is intended for +// limiting the size of incoming request bodies. In contrast to +// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a +// non-EOF error for a Read beyond the limit, and Closes the +// underlying reader when its Close method is called. +// +// MaxBytesReader prevents clients from accidentally or maliciously +// sending a large request and wasting server resources. +func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { + return &maxBytesReader{w: w, r: r, n: n} +} + +type maxBytesReader struct { + w ResponseWriter + r io.ReadCloser // underlying reader + n int64 // max bytes remaining + stopped bool +} + +func (l *maxBytesReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + if !l.stopped { + l.stopped = true + if res, ok := l.w.(*response); ok { + res.requestTooLarge() + } + } + return 0, errors.New("http: request body too large") + } + if int64(len(p)) > l.n { + p = p[:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + return +} + +func (l *maxBytesReader) Close() error { + return l.r.Close() +} + +func copyValues(dst, src url.Values) { + for k, vs := range src { + for _, value := range vs { + dst.Add(k, value) + } + } +} + +func parsePostForm(r *Request) (vs url.Values, err error) { + if r.Body == nil { + err = errors.New("missing form body") + return + } + ct := r.Header.Get("Content-Type") + // RFC 2616, section 7.2.1 - empty type + // SHOULD be treated as application/octet-stream + if ct == "" { + ct = "application/octet-stream" + } + ct, _, err = mime.ParseMediaType(ct) + switch { + case ct == "application/x-www-form-urlencoded": + var reader io.Reader = r.Body + maxFormSize := int64(1<<63 - 1) + if _, ok := r.Body.(*maxBytesReader); !ok { + maxFormSize = int64(10 << 20) // 10 MB is a lot of text. + reader = io.LimitReader(r.Body, maxFormSize+1) + } + b, e := ioutil.ReadAll(reader) + if e != nil { + if err == nil { + err = e + } + break + } + if int64(len(b)) > maxFormSize { + err = errors.New("http: POST too large") + return + } + vs, e = url.ParseQuery(string(b)) + if err == nil { + err = e + } + case ct == "multipart/form-data": + // handled by ParseMultipartForm (which is calling us, or should be) + // TODO(bradfitz): there are too many possible + // orders to call too many functions here. + // Clean this up and write more tests. + // request_test.go contains the start of this, + // in TestParseMultipartFormOrder and others. + } + return +} + +// ParseForm parses the raw query from the URL and updates r.Form. +// +// For POST or PUT requests, it also parses the request body as a form and +// put the results into both r.PostForm and r.Form. +// POST and PUT body parameters take precedence over URL query string values +// in r.Form. +// +// If the request Body's size has not already been limited by MaxBytesReader, +// the size is capped at 10MB. +// +// ParseMultipartForm calls ParseForm automatically. +// It is idempotent. +func (r *Request) ParseForm() error { + var err error + if r.PostForm == nil { + if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" { + r.PostForm, err = parsePostForm(r) + } + if r.PostForm == nil { + r.PostForm = make(url.Values) + } + } + if r.Form == nil { + if len(r.PostForm) > 0 { + r.Form = make(url.Values) + copyValues(r.Form, r.PostForm) + } + var newValues url.Values + if r.URL != nil { + var e error + newValues, e = url.ParseQuery(r.URL.RawQuery) + if err == nil { + err = e + } + } + if newValues == nil { + newValues = make(url.Values) + } + if r.Form == nil { + r.Form = newValues + } else { + copyValues(r.Form, newValues) + } + } + return err +} + +// ParseMultipartForm parses a request body as multipart/form-data. +// The whole request body is parsed and up to a total of maxMemory bytes of +// its file parts are stored in memory, with the remainder stored on +// disk in temporary files. +// ParseMultipartForm calls ParseForm if necessary. +// After one call to ParseMultipartForm, subsequent calls have no effect. +func (r *Request) ParseMultipartForm(maxMemory int64) error { + if r.MultipartForm == multipartByReader { + return errors.New("http: multipart handled by MultipartReader") + } + if r.Form == nil { + err := r.ParseForm() + if err != nil { + return err + } + } + if r.MultipartForm != nil { + return nil + } + + mr, err := r.multipartReader() + if err != nil { + return err + } + + f, err := mr.ReadForm(maxMemory) + if err != nil { + return err + } + for k, v := range f.Value { + r.Form[k] = append(r.Form[k], v...) + } + r.MultipartForm = f + + return nil +} + +// FormValue returns the first value for the named component of the query. +// POST and PUT body parameters take precedence over URL query string values. +// FormValue calls ParseMultipartForm and ParseForm if necessary. +// To access multiple values of the same key use ParseForm. +func (r *Request) FormValue(key string) string { + if r.Form == nil { + r.ParseMultipartForm(defaultMaxMemory) + } + if vs := r.Form[key]; len(vs) > 0 { + return vs[0] + } + return "" +} + +// PostFormValue returns the first value for the named component of the POST +// or PUT request body. URL query parameters are ignored. +// PostFormValue calls ParseMultipartForm and ParseForm if necessary. +func (r *Request) PostFormValue(key string) string { + if r.PostForm == nil { + r.ParseMultipartForm(defaultMaxMemory) + } + if vs := r.PostForm[key]; len(vs) > 0 { + return vs[0] + } + return "" +} + +// FormFile returns the first file for the provided form key. +// FormFile calls ParseMultipartForm and ParseForm if necessary. +func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) { + if r.MultipartForm == multipartByReader { + return nil, nil, errors.New("http: multipart handled by MultipartReader") + } + if r.MultipartForm == nil { + err := r.ParseMultipartForm(defaultMaxMemory) + if err != nil { + return nil, nil, err + } + } + if r.MultipartForm != nil && r.MultipartForm.File != nil { + if fhs := r.MultipartForm.File[key]; len(fhs) > 0 { + f, err := fhs[0].Open() + return f, fhs[0], err + } + } + return nil, nil, ErrMissingFile +} + +func (r *Request) expectsContinue() bool { + return hasToken(r.Header.get("Expect"), "100-continue") +} + +func (r *Request) wantsHttp10KeepAlive() bool { + if r.ProtoMajor != 1 || r.ProtoMinor != 0 { + return false + } + return hasToken(r.Header.get("Connection"), "keep-alive") +} + +func (r *Request) wantsClose() bool { + return hasToken(r.Header.get("Connection"), "close") +} + +func (r *Request) closeBody() { + if r.Body != nil { + r.Body.Close() + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/request_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/request_test.go new file mode 100644 index 000000000..b9fa3c2bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/request_test.go @@ -0,0 +1,610 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + . "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "strings" + "testing" +) + +func TestQuery(t *testing.T) { + req := &Request{Method: "GET"} + req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar") + if q := req.FormValue("q"); q != "foo" { + t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) + } +} + +func TestPostQuery(t *testing.T) { + req, _ := NewRequest("POST", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not", + strings.NewReader("z=post&both=y&prio=2&empty=")) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value") + + if q := req.FormValue("q"); q != "foo" { + t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) + } + if z := req.FormValue("z"); z != "post" { + t.Errorf(`req.FormValue("z") = %q, want "post"`, z) + } + if bq, found := req.PostForm["q"]; found { + t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq) + } + if bz := req.PostFormValue("z"); bz != "post" { + t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz) + } + if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) { + t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs) + } + if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) { + t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both) + } + if prio := req.FormValue("prio"); prio != "2" { + t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio) + } + if empty := req.FormValue("empty"); empty != "" { + t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty) + } +} + +func TestPatchQuery(t *testing.T) { + req, _ := NewRequest("PATCH", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not", + strings.NewReader("z=post&both=y&prio=2&empty=")) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value") + + if q := req.FormValue("q"); q != "foo" { + t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) + } + if z := req.FormValue("z"); z != "post" { + t.Errorf(`req.FormValue("z") = %q, want "post"`, z) + } + if bq, found := req.PostForm["q"]; found { + t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq) + } + if bz := req.PostFormValue("z"); bz != "post" { + t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz) + } + if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) { + t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs) + } + if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) { + t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both) + } + if prio := req.FormValue("prio"); prio != "2" { + t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio) + } + if empty := req.FormValue("empty"); empty != "" { + t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty) + } +} + +type stringMap map[string][]string +type parseContentTypeTest struct { + shouldError bool + contentType stringMap +} + +var parseContentTypeTests = []parseContentTypeTest{ + {false, stringMap{"Content-Type": {"text/plain"}}}, + // Empty content type is legal - shoult be treated as + // application/octet-stream (RFC 2616, section 7.2.1) + {false, stringMap{}}, + {true, stringMap{"Content-Type": {"text/plain; boundary="}}}, + {false, stringMap{"Content-Type": {"application/unknown"}}}, +} + +func TestParseFormUnknownContentType(t *testing.T) { + for i, test := range parseContentTypeTests { + req := &Request{ + Method: "POST", + Header: Header(test.contentType), + Body: ioutil.NopCloser(strings.NewReader("body")), + } + err := req.ParseForm() + switch { + case err == nil && test.shouldError: + t.Errorf("test %d should have returned error", i) + case err != nil && !test.shouldError: + t.Errorf("test %d should not have returned error, got %v", i, err) + } + } +} + +func TestParseFormInitializeOnError(t *testing.T) { + nilBody, _ := NewRequest("POST", "http://www.google.com/search?q=foo", nil) + tests := []*Request{ + nilBody, + {Method: "GET", URL: nil}, + } + for i, req := range tests { + err := req.ParseForm() + if req.Form == nil { + t.Errorf("%d. Form not initialized, error %v", i, err) + } + if req.PostForm == nil { + t.Errorf("%d. PostForm not initialized, error %v", i, err) + } + } +} + +func TestMultipartReader(t *testing.T) { + req := &Request{ + Method: "POST", + Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}}, + Body: ioutil.NopCloser(new(bytes.Buffer)), + } + multipart, err := req.MultipartReader() + if multipart == nil { + t.Errorf("expected multipart; error: %v", err) + } + + req.Header = Header{"Content-Type": {"text/plain"}} + multipart, err = req.MultipartReader() + if multipart != nil { + t.Error("unexpected multipart for text/plain") + } +} + +func TestParseMultipartForm(t *testing.T) { + req := &Request{ + Method: "POST", + Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}}, + Body: ioutil.NopCloser(new(bytes.Buffer)), + } + err := req.ParseMultipartForm(25) + if err == nil { + t.Error("expected multipart EOF, got nil") + } + + req.Header = Header{"Content-Type": {"text/plain"}} + err = req.ParseMultipartForm(25) + if err != ErrNotMultipart { + t.Error("expected ErrNotMultipart for text/plain") + } +} + +func TestRedirect(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + switch r.URL.Path { + case "/": + w.Header().Set("Location", "/foo/") + w.WriteHeader(StatusSeeOther) + case "/foo/": + fmt.Fprintf(w, "foo") + default: + w.WriteHeader(StatusBadRequest) + } + })) + defer ts.Close() + + var end = regexp.MustCompile("/foo/$") + r, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + r.Body.Close() + url := r.Request.URL.String() + if r.StatusCode != 200 || !end.MatchString(url) { + t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url) + } +} + +func TestSetBasicAuth(t *testing.T) { + r, _ := NewRequest("GET", "http://example.com/", nil) + r.SetBasicAuth("Aladdin", "open sesame") + if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e { + t.Errorf("got header %q, want %q", g, e) + } +} + +func TestMultipartRequest(t *testing.T) { + // Test that we can read the values and files of a + // multipart request with FormValue and FormFile, + // and that ParseMultipartForm can be called multiple times. + req := newTestMultipartRequest(t) + if err := req.ParseMultipartForm(25); err != nil { + t.Fatal("ParseMultipartForm first call:", err) + } + defer req.MultipartForm.RemoveAll() + validateTestMultipartContents(t, req, false) + if err := req.ParseMultipartForm(25); err != nil { + t.Fatal("ParseMultipartForm second call:", err) + } + validateTestMultipartContents(t, req, false) +} + +func TestMultipartRequestAuto(t *testing.T) { + // Test that FormValue and FormFile automatically invoke + // ParseMultipartForm and return the right values. + req := newTestMultipartRequest(t) + defer func() { + if req.MultipartForm != nil { + req.MultipartForm.RemoveAll() + } + }() + validateTestMultipartContents(t, req, true) +} + +func TestMissingFileMultipartRequest(t *testing.T) { + // Test that FormFile returns an error if + // the named file is missing. + req := newTestMultipartRequest(t) + testMissingFile(t, req) +} + +// Test that FormValue invokes ParseMultipartForm. +func TestFormValueCallsParseMultipartForm(t *testing.T) { + req, _ := NewRequest("POST", "http://www.google.com/", strings.NewReader("z=post")) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value") + if req.Form != nil { + t.Fatal("Unexpected request Form, want nil") + } + req.FormValue("z") + if req.Form == nil { + t.Fatal("ParseMultipartForm not called by FormValue") + } +} + +// Test that FormFile invokes ParseMultipartForm. +func TestFormFileCallsParseMultipartForm(t *testing.T) { + req := newTestMultipartRequest(t) + if req.Form != nil { + t.Fatal("Unexpected request Form, want nil") + } + req.FormFile("") + if req.Form == nil { + t.Fatal("ParseMultipartForm not called by FormFile") + } +} + +// Test that ParseMultipartForm errors if called +// after MultipartReader on the same request. +func TestParseMultipartFormOrder(t *testing.T) { + req := newTestMultipartRequest(t) + if _, err := req.MultipartReader(); err != nil { + t.Fatalf("MultipartReader: %v", err) + } + if err := req.ParseMultipartForm(1024); err == nil { + t.Fatal("expected an error from ParseMultipartForm after call to MultipartReader") + } +} + +// Test that MultipartReader errors if called +// after ParseMultipartForm on the same request. +func TestMultipartReaderOrder(t *testing.T) { + req := newTestMultipartRequest(t) + if err := req.ParseMultipartForm(25); err != nil { + t.Fatalf("ParseMultipartForm: %v", err) + } + defer req.MultipartForm.RemoveAll() + if _, err := req.MultipartReader(); err == nil { + t.Fatal("expected an error from MultipartReader after call to ParseMultipartForm") + } +} + +// Test that FormFile errors if called after +// MultipartReader on the same request. +func TestFormFileOrder(t *testing.T) { + req := newTestMultipartRequest(t) + if _, err := req.MultipartReader(); err != nil { + t.Fatalf("MultipartReader: %v", err) + } + if _, _, err := req.FormFile(""); err == nil { + t.Fatal("expected an error from FormFile after call to MultipartReader") + } +} + +var readRequestErrorTests = []struct { + in string + err error +}{ + {"GET / HTTP/1.1\r\nheader:foo\r\n\r\n", nil}, + {"GET / HTTP/1.1\r\nheader:foo\r\n", io.ErrUnexpectedEOF}, + {"", io.EOF}, +} + +func TestReadRequestErrors(t *testing.T) { + for i, tt := range readRequestErrorTests { + _, err := ReadRequest(bufio.NewReader(strings.NewReader(tt.in))) + if err != tt.err { + t.Errorf("%d. got error = %v; want %v", i, err, tt.err) + } + } +} + +func TestNewRequestHost(t *testing.T) { + req, err := NewRequest("GET", "http://localhost:1234/", nil) + if err != nil { + t.Fatal(err) + } + if req.Host != "localhost:1234" { + t.Errorf("Host = %q; want localhost:1234", req.Host) + } +} + +func TestNewRequestContentLength(t *testing.T) { + readByte := func(r io.Reader) io.Reader { + var b [1]byte + r.Read(b[:]) + return r + } + tests := []struct { + r io.Reader + want int64 + }{ + {bytes.NewReader([]byte("123")), 3}, + {bytes.NewBuffer([]byte("1234")), 4}, + {strings.NewReader("12345"), 5}, + // Not detected: + {struct{ io.Reader }{strings.NewReader("xyz")}, 0}, + {io.NewSectionReader(strings.NewReader("x"), 0, 6), 0}, + {readByte(io.NewSectionReader(strings.NewReader("xy"), 0, 6)), 0}, + } + for _, tt := range tests { + req, err := NewRequest("POST", "http://localhost/", tt.r) + if err != nil { + t.Fatal(err) + } + if req.ContentLength != tt.want { + t.Errorf("ContentLength(%T) = %d; want %d", tt.r, req.ContentLength, tt.want) + } + } +} + +var parseHTTPVersionTests = []struct { + vers string + major, minor int + ok bool +}{ + {"HTTP/0.9", 0, 9, true}, + {"HTTP/1.0", 1, 0, true}, + {"HTTP/1.1", 1, 1, true}, + {"HTTP/3.14", 3, 14, true}, + + {"HTTP", 0, 0, false}, + {"HTTP/one.one", 0, 0, false}, + {"HTTP/1.1/", 0, 0, false}, + {"HTTP/-1,0", 0, 0, false}, + {"HTTP/0,-1", 0, 0, false}, + {"HTTP/", 0, 0, false}, + {"HTTP/1,1", 0, 0, false}, +} + +func TestParseHTTPVersion(t *testing.T) { + for _, tt := range parseHTTPVersionTests { + major, minor, ok := ParseHTTPVersion(tt.vers) + if ok != tt.ok || major != tt.major || minor != tt.minor { + type version struct { + major, minor int + ok bool + } + t.Errorf("failed to parse %q, expected: %#v, got %#v", tt.vers, version{tt.major, tt.minor, tt.ok}, version{major, minor, ok}) + } + } +} + +type logWrites struct { + t *testing.T + dst *[]string +} + +func (l logWrites) WriteByte(c byte) error { + l.t.Fatalf("unexpected WriteByte call") + return nil +} + +func (l logWrites) Write(p []byte) (n int, err error) { + *l.dst = append(*l.dst, string(p)) + return len(p), nil +} + +func TestRequestWriteBufferedWriter(t *testing.T) { + got := []string{} + req, _ := NewRequest("GET", "http://foo.com/", nil) + req.Write(logWrites{t, &got}) + want := []string{ + "GET / HTTP/1.1\r\n", + "Host: foo.com\r\n", + "User-Agent: " + DefaultUserAgent + "\r\n", + "\r\n", + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Writes = %q\n Want = %q", got, want) + } +} + +func testMissingFile(t *testing.T, req *Request) { + f, fh, err := req.FormFile("missing") + if f != nil { + t.Errorf("FormFile file = %v, want nil", f) + } + if fh != nil { + t.Errorf("FormFile file header = %q, want nil", fh) + } + if err != ErrMissingFile { + t.Errorf("FormFile err = %q, want ErrMissingFile", err) + } +} + +func newTestMultipartRequest(t *testing.T) *Request { + b := strings.NewReader(strings.Replace(message, "\n", "\r\n", -1)) + req, err := NewRequest("POST", "/", b) + if err != nil { + t.Fatal("NewRequest:", err) + } + ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary) + req.Header.Set("Content-type", ctype) + return req +} + +func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) { + if g, e := req.FormValue("texta"), textaValue; g != e { + t.Errorf("texta value = %q, want %q", g, e) + } + if g, e := req.FormValue("textb"), textbValue; g != e { + t.Errorf("textb value = %q, want %q", g, e) + } + if g := req.FormValue("missing"); g != "" { + t.Errorf("missing value = %q, want empty string", g) + } + + assertMem := func(n string, fd multipart.File) { + if _, ok := fd.(*os.File); ok { + t.Error(n, " is *os.File, should not be") + } + } + fda := testMultipartFile(t, req, "filea", "filea.txt", fileaContents) + defer fda.Close() + assertMem("filea", fda) + fdb := testMultipartFile(t, req, "fileb", "fileb.txt", filebContents) + defer fdb.Close() + if allMem { + assertMem("fileb", fdb) + } else { + if _, ok := fdb.(*os.File); !ok { + t.Errorf("fileb has unexpected underlying type %T", fdb) + } + } + + testMissingFile(t, req) +} + +func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File { + f, fh, err := req.FormFile(key) + if err != nil { + t.Fatalf("FormFile(%q): %q", key, err) + } + if fh.Filename != expectFilename { + t.Errorf("filename = %q, want %q", fh.Filename, expectFilename) + } + var b bytes.Buffer + _, err = io.Copy(&b, f) + if err != nil { + t.Fatal("copying contents:", err) + } + if g := b.String(); g != expectContent { + t.Errorf("contents = %q, want %q", g, expectContent) + } + return f +} + +const ( + fileaContents = "This is a test file." + filebContents = "Another test file." + textaValue = "foo" + textbValue = "bar" + boundary = `MyBoundary` +) + +const message = ` +--MyBoundary +Content-Disposition: form-data; name="filea"; filename="filea.txt" +Content-Type: text/plain + +` + fileaContents + ` +--MyBoundary +Content-Disposition: form-data; name="fileb"; filename="fileb.txt" +Content-Type: text/plain + +` + filebContents + ` +--MyBoundary +Content-Disposition: form-data; name="texta" + +` + textaValue + ` +--MyBoundary +Content-Disposition: form-data; name="textb" + +` + textbValue + ` +--MyBoundary-- +` + +func benchmarkReadRequest(b *testing.B, request string) { + request = request + "\n" // final \n + request = strings.Replace(request, "\n", "\r\n", -1) // expand \n to \r\n + b.SetBytes(int64(len(request))) + r := bufio.NewReader(&infiniteReader{buf: []byte(request)}) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := ReadRequest(r) + if err != nil { + b.Fatalf("failed to read request: %v", err) + } + } +} + +// infiniteReader satisfies Read requests as if the contents of buf +// loop indefinitely. +type infiniteReader struct { + buf []byte + offset int +} + +func (r *infiniteReader) Read(b []byte) (int, error) { + n := copy(b, r.buf[r.offset:]) + r.offset = (r.offset + n) % len(r.buf) + return n, nil +} + +func BenchmarkReadRequestChrome(b *testing.B) { + // https://github.com/felixge/node-http-perf/blob/master/fixtures/get.http + benchmarkReadRequest(b, `GET / HTTP/1.1 +Host: localhost:8080 +Connection: keep-alive +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 +User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17 +Accept-Encoding: gzip,deflate,sdch +Accept-Language: en-US,en;q=0.8 +Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3 +Cookie: __utma=1.1978842379.1323102373.1323102373.1323102373.1; EPi:NumberOfVisits=1,2012-02-28T13:42:18; CrmSession=5b707226b9563e1bc69084d07a107c98; plushContainerWidth=100%25; plushNoTopMenu=0; hudson_auto_refresh=false +`) +} + +func BenchmarkReadRequestCurl(b *testing.B) { + // curl http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.1 +User-Agent: curl/7.27.0 +Host: localhost:8080 +Accept: */* +`) +} + +func BenchmarkReadRequestApachebench(b *testing.B) { + // ab -n 1 -c 1 http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.0 +Host: localhost:8080 +User-Agent: ApacheBench/2.3 +Accept: */* +`) +} + +func BenchmarkReadRequestSiege(b *testing.B) { + // siege -r 1 -c 1 http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.1 +Host: localhost:8080 +Accept: */* +Accept-Encoding: gzip +User-Agent: JoeDog/1.00 [en] (X11; I; Siege 2.70) +Connection: keep-alive +`) +} + +func BenchmarkReadRequestWrk(b *testing.B) { + // wrk -t 1 -r 1 -c 1 http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.1 +Host: localhost:8080 +`) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/requestwrite_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/requestwrite_test.go new file mode 100644 index 000000000..dc0e204ca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/requestwrite_test.go @@ -0,0 +1,565 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "strings" + "testing" +) + +type reqWriteTest struct { + Req Request + Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body + + // Any of these three may be empty to skip that test. + WantWrite string // Request.Write + WantProxy string // Request.WriteProxy + + WantError error // wanted error from Request.Write +} + +var reqWriteTests = []reqWriteTest{ + // HTTP/1.1 => chunked coding; no body; no trailer + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.techcrunch.com", + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"}, + "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"}, + "Accept-Encoding": {"gzip,deflate"}, + "Accept-Language": {"en-us,en;q=0.5"}, + "Keep-Alive": {"300"}, + "Proxy-Connection": {"keep-alive"}, + "User-Agent": {"Fake"}, + }, + Body: nil, + Close: false, + Host: "www.techcrunch.com", + Form: map[string][]string{}, + }, + + WantWrite: "GET / HTTP/1.1\r\n" + + "Host: www.techcrunch.com\r\n" + + "User-Agent: Fake\r\n" + + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + + "Accept-Encoding: gzip,deflate\r\n" + + "Accept-Language: en-us,en;q=0.5\r\n" + + "Keep-Alive: 300\r\n" + + "Proxy-Connection: keep-alive\r\n\r\n", + + WantProxy: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + + "Host: www.techcrunch.com\r\n" + + "User-Agent: Fake\r\n" + + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + + "Accept-Encoding: gzip,deflate\r\n" + + "Accept-Language: en-us,en;q=0.5\r\n" + + "Keep-Alive: 300\r\n" + + "Proxy-Connection: keep-alive\r\n\r\n", + }, + // HTTP/1.1 => chunked coding; body; empty trailer + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + TransferEncoding: []string{"chunked"}, + }, + + Body: []byte("abcdef"), + + WantWrite: "GET /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + + WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + }, + // HTTP/1.1 POST => chunked coding; body; empty trailer + { + Req: Request{ + Method: "POST", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: true, + TransferEncoding: []string{"chunked"}, + }, + + Body: []byte("abcdef"), + + WantWrite: "POST /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + + WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + }, + + // HTTP/1.1 POST with Content-Length, no chunking + { + Req: Request{ + Method: "POST", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: true, + ContentLength: 6, + }, + + Body: []byte("abcdef"), + + WantWrite: "POST /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + + WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + }, + + // HTTP/1.1 POST with Content-Length in headers + { + Req: Request{ + Method: "POST", + URL: mustParseURL("http://example.com/"), + Host: "example.com", + Header: Header{ + "Content-Length": []string{"10"}, // ignored + }, + ContentLength: 6, + }, + + Body: []byte("abcdef"), + + WantWrite: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + + WantProxy: "POST http://example.com/ HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + }, + + // default to HTTP/1.1 + { + Req: Request{ + Method: "GET", + URL: mustParseURL("/search"), + Host: "www.google.com", + }, + + WantWrite: "GET /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "\r\n", + }, + + // Request with a 0 ContentLength and a 0 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) }, + + // RFC 2616 Section 14.13 says Content-Length should be specified + // unless body is prohibited by the request method. + // Also, nginx expects it for POST and PUT. + WantWrite: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + + WantProxy: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + }, + + // Request with a 0 ContentLength and a 1 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) }, + + WantWrite: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("x") + chunk(""), + + WantProxy: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("x") + chunk(""), + }, + + // Request with a ContentLength of 10 but a 5 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 10, // but we're going to send only 5 bytes + }, + Body: []byte("12345"), + WantError: errors.New("http: Request.ContentLength=10 with Body length 5"), + }, + + // Request with a ContentLength of 4 but an 8 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 4, // but we're going to try to send 8 bytes + }, + Body: []byte("12345678"), + WantError: errors.New("http: Request.ContentLength=4 with Body length 8"), + }, + + // Request with a 5 ContentLength and nil body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 5, // but we'll omit the body + }, + WantError: errors.New("http: Request.ContentLength=5 with nil Body"), + }, + + // Request with a 0 ContentLength and a body with 1 byte content and an error. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { + err := errors.New("Custom reader error") + errReader := &errorReader{err} + return ioutil.NopCloser(io.MultiReader(strings.NewReader("x"), errReader)) + }, + + WantError: errors.New("Custom reader error"), + }, + + // Request with a 0 ContentLength and a body without content and an error. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { + err := errors.New("Custom reader error") + errReader := &errorReader{err} + return ioutil.NopCloser(errReader) + }, + + WantError: errors.New("Custom reader error"), + }, + + // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host, + // and doesn't add a User-Agent. + { + Req: Request{ + Method: "GET", + URL: mustParseURL("/foo"), + ProtoMajor: 1, + ProtoMinor: 0, + Header: Header{ + "X-Foo": []string{"X-Bar"}, + }, + }, + + WantWrite: "GET /foo HTTP/1.1\r\n" + + "Host: \r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "X-Foo: X-Bar\r\n\r\n", + }, + + // If no Request.Host and no Request.URL.Host, we send + // an empty Host header, and don't use + // Request.Header["Host"]. This is just testing that + // we don't change Go 1.0 behavior. + { + Req: Request{ + Method: "GET", + Host: "", + URL: &url.URL{ + Scheme: "http", + Host: "", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Host": []string{"bad.example.com"}, + }, + }, + + WantWrite: "GET /search HTTP/1.1\r\n" + + "Host: \r\n" + + "User-Agent: Go 1.1 package http\r\n\r\n", + }, + + // Opaque test #1 from golang.org/issue/4860 + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Opaque: "/%2F/%2F/", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + }, + + WantWrite: "GET /%2F/%2F/ HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n\r\n", + }, + + // Opaque test #2 from golang.org/issue/4860 + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "x.google.com", + Opaque: "//y.google.com/%2F/%2F/", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + }, + + WantWrite: "GET http://y.google.com/%2F/%2F/ HTTP/1.1\r\n" + + "Host: x.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n\r\n", + }, + + // Testing custom case in header keys. Issue 5022. + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "ALL-CAPS": {"x"}, + }, + }, + + WantWrite: "GET / HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "ALL-CAPS: x\r\n" + + "\r\n", + }, +} + +func TestRequestWrite(t *testing.T) { + for i := range reqWriteTests { + tt := &reqWriteTests[i] + + setBody := func() { + if tt.Body == nil { + return + } + switch b := tt.Body.(type) { + case []byte: + tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b)) + case func() io.ReadCloser: + tt.Req.Body = b() + } + } + setBody() + if tt.Req.Header == nil { + tt.Req.Header = make(Header) + } + + var braw bytes.Buffer + err := tt.Req.Write(&braw) + if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.WantError); g != e { + t.Errorf("writing #%d, err = %q, want %q", i, g, e) + continue + } + if err != nil { + continue + } + + if tt.WantWrite != "" { + sraw := braw.String() + if sraw != tt.WantWrite { + t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantWrite, sraw) + continue + } + } + + if tt.WantProxy != "" { + setBody() + var praw bytes.Buffer + err = tt.Req.WriteProxy(&praw) + if err != nil { + t.Errorf("WriteProxy #%d: %s", i, err) + continue + } + sraw := praw.String() + if sraw != tt.WantProxy { + t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantProxy, sraw) + continue + } + } + } +} + +type closeChecker struct { + io.Reader + closed bool +} + +func (rc *closeChecker) Close() error { + rc.closed = true + return nil +} + +// TestRequestWriteClosesBody tests that Request.Write does close its request.Body. +// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer +// inside a NopCloser, and that it serializes it correctly. +func TestRequestWriteClosesBody(t *testing.T) { + rc := &closeChecker{Reader: strings.NewReader("my body")} + req, _ := NewRequest("POST", "http://foo.com/", rc) + if req.ContentLength != 0 { + t.Errorf("got req.ContentLength %d, want 0", req.ContentLength) + } + buf := new(bytes.Buffer) + req.Write(buf) + if !rc.closed { + t.Error("body not closed after write") + } + expected := "POST / HTTP/1.1\r\n" + + "Host: foo.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + // TODO: currently we don't buffer before chunking, so we get a + // single "m" chunk before the other chunks, as this was the 1-byte + // read from our MultiReader where we stiched the Body back together + // after sniffing whether the Body was 0 bytes or not. + chunk("m") + + chunk("y body") + + chunk("") + if buf.String() != expected { + t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected) + } +} + +func chunk(s string) string { + return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) +} + +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(fmt.Sprintf("Error parsing URL %q: %v", s, err)) + } + return u +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/response.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/response.go new file mode 100644 index 000000000..e4cfe802b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/response.go @@ -0,0 +1,291 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP Response reading and parsing. + +package http + +import ( + "bufio" + "bytes" + "errors" + "github.com/Azure/azure-sdk-for-go/core/tls" + "io" + "net/textproto" + "net/url" + "strconv" + "strings" +) + +var respExcludeHeader = map[string]bool{ + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, +} + +// Response represents the response from an HTTP request. +// +type Response struct { + Status string // e.g. "200 OK" + StatusCode int // e.g. 200 + Proto string // e.g. "HTTP/1.0" + ProtoMajor int // e.g. 1 + ProtoMinor int // e.g. 0 + + // Header maps header keys to values. If the response had multiple + // headers with the same key, they may be concatenated, with comma + // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers + // be semantically equivalent to a comma-delimited sequence.) Values + // duplicated by other fields in this struct (e.g., ContentLength) are + // omitted from Header. + // + // Keys in the map are canonicalized (see CanonicalHeaderKey). + Header Header + + // Body represents the response body. + // + // The http Client and Transport guarantee that Body is always + // non-nil, even on responses without a body or responses with + // a zero-length body. It is the caller's responsibility to + // close Body. + // + // The Body is automatically dechunked if the server replied + // with a "chunked" Transfer-Encoding. + Body io.ReadCloser + + // ContentLength records the length of the associated content. The + // value -1 indicates that the length is unknown. Unless Request.Method + // is "HEAD", values >= 0 indicate that the given number of bytes may + // be read from Body. + ContentLength int64 + + // Contains transfer encodings from outer-most to inner-most. Value is + // nil, means that "identity" encoding is used. + TransferEncoding []string + + // Close records whether the header directed that the connection be + // closed after reading Body. The value is advice for clients: neither + // ReadResponse nor Response.Write ever closes a connection. + Close bool + + // Trailer maps trailer keys to values, in the same + // format as the header. + Trailer Header + + // The Request that was sent to obtain this Response. + // Request's Body is nil (having already been consumed). + // This is only populated for Client requests. + Request *Request + + // TLS contains information about the TLS connection on which the + // response was received. It is nil for unencrypted responses. + // The pointer is shared between responses and should not be + // modified. + TLS *tls.ConnectionState +} + +// Cookies parses and returns the cookies set in the Set-Cookie headers. +func (r *Response) Cookies() []*Cookie { + return readSetCookies(r.Header) +} + +var ErrNoLocation = errors.New("http: no Location header in response") + +// Location returns the URL of the response's "Location" header, +// if present. Relative redirects are resolved relative to +// the Response's Request. ErrNoLocation is returned if no +// Location header is present. +func (r *Response) Location() (*url.URL, error) { + lv := r.Header.Get("Location") + if lv == "" { + return nil, ErrNoLocation + } + if r.Request != nil && r.Request.URL != nil { + return r.Request.URL.Parse(lv) + } + return url.Parse(lv) +} + +// ReadResponse reads and returns an HTTP response from r. +// The req parameter optionally specifies the Request that corresponds +// to this Response. If nil, a GET request is assumed. +// Clients must call resp.Body.Close when finished reading resp.Body. +// After that call, clients can inspect resp.Trailer to find key/value +// pairs included in the response trailer. +func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) { + tp := textproto.NewReader(r) + resp := &Response{ + Request: req, + } + + // Parse the first line of the response. + line, err := tp.ReadLine() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + f := strings.SplitN(line, " ", 3) + if len(f) < 2 { + return nil, &badStringError{"malformed HTTP response", line} + } + reasonPhrase := "" + if len(f) > 2 { + reasonPhrase = f[2] + } + resp.Status = f[1] + " " + reasonPhrase + resp.StatusCode, err = strconv.Atoi(f[1]) + if err != nil { + return nil, &badStringError{"malformed HTTP status code", f[1]} + } + + resp.Proto = f[0] + var ok bool + if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok { + return nil, &badStringError{"malformed HTTP version", resp.Proto} + } + + // Parse the response headers. + mimeHeader, err := tp.ReadMIMEHeader() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + resp.Header = Header(mimeHeader) + + fixPragmaCacheControl(resp.Header) + + err = readTransfer(resp, r) + if err != nil { + return nil, err + } + + return resp, nil +} + +// RFC2616: Should treat +// Pragma: no-cache +// like +// Cache-Control: no-cache +func fixPragmaCacheControl(header Header) { + if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { + if _, presentcc := header["Cache-Control"]; !presentcc { + header["Cache-Control"] = []string{"no-cache"} + } + } +} + +// ProtoAtLeast reports whether the HTTP protocol used +// in the response is at least major.minor. +func (r *Response) ProtoAtLeast(major, minor int) bool { + return r.ProtoMajor > major || + r.ProtoMajor == major && r.ProtoMinor >= minor +} + +// Writes the response (header, body and trailer) in wire format. This method +// consults the following fields of the response: +// +// StatusCode +// ProtoMajor +// ProtoMinor +// Request.Method +// TransferEncoding +// Trailer +// Body +// ContentLength +// Header, values for non-canonical keys will have unpredictable behavior +// +// Body is closed after it is sent. +func (r *Response) Write(w io.Writer) error { + // Status line + text := r.Status + if text == "" { + var ok bool + text, ok = statusText[r.StatusCode] + if !ok { + text = "status code " + strconv.Itoa(r.StatusCode) + } + } + protoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor) + statusCode := strconv.Itoa(r.StatusCode) + " " + text = strings.TrimPrefix(text, statusCode) + if _, err := io.WriteString(w, "HTTP/"+protoMajor+"."+protoMinor+" "+statusCode+text+"\r\n"); err != nil { + return err + } + + // Clone it, so we can modify r1 as needed. + r1 := new(Response) + *r1 = *r + if r1.ContentLength == 0 && r1.Body != nil { + // Is it actually 0 length? Or just unknown? + var buf [1]byte + n, err := r1.Body.Read(buf[:]) + if err != nil && err != io.EOF { + return err + } + if n == 0 { + // Reset it to a known zero reader, in case underlying one + // is unhappy being read repeatedly. + r1.Body = eofReader + } else { + r1.ContentLength = -1 + r1.Body = struct { + io.Reader + io.Closer + }{ + io.MultiReader(bytes.NewReader(buf[:1]), r.Body), + r.Body, + } + } + } + // If we're sending a non-chunked HTTP/1.1 response without a + // content-length, the only way to do that is the old HTTP/1.0 + // way, by noting the EOF with a connection close, so we need + // to set Close. + if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) { + r1.Close = true + } + + // Process Body,ContentLength,Close,Trailer + tw, err := newTransferWriter(r1) + if err != nil { + return err + } + err = tw.WriteHeader(w) + if err != nil { + return err + } + + // Rest of header + err = r.Header.WriteSubset(w, respExcludeHeader) + if err != nil { + return err + } + + // contentLengthAlreadySent may have been already sent for + // POST/PUT requests, even if zero length. See Issue 8180. + contentLengthAlreadySent := tw.shouldSendContentLength() + if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent { + if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil { + return err + } + } + + // End-of-header + if _, err := io.WriteString(w, "\r\n"); err != nil { + return err + } + + // Write body and trailer + err = tw.WriteBody(w) + if err != nil { + return err + } + + // Success + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/response_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/response_test.go new file mode 100644 index 000000000..4b8946f7a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/response_test.go @@ -0,0 +1,645 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "net/url" + "reflect" + "regexp" + "strings" + "testing" +) + +type respTest struct { + Raw string + Resp Response + Body string +} + +func dummyReq(method string) *Request { + return &Request{Method: method} +} + +func dummyReq11(method string) *Request { + return &Request{Method: method, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1} +} + +var respTests = []respTest{ + // Unchunked response without Content-Length. + { + "HTTP/1.0 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{ + "Connection": {"close"}, // TODO(rsc): Delete? + }, + Close: true, + ContentLength: -1, + }, + + "Body here\n", + }, + + // Unchunked HTTP/1.1 response without Content-Length or + // Connection headers. + { + "HTTP/1.1 200 OK\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Request: dummyReq("GET"), + Close: true, + ContentLength: -1, + }, + + "Body here\n", + }, + + // Unchunked HTTP/1.1 204 response without Content-Length. + { + "HTTP/1.1 204 No Content\r\n" + + "\r\n" + + "Body should not be read!\n", + + Response{ + Status: "204 No Content", + StatusCode: 204, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Request: dummyReq("GET"), + Close: false, + ContentLength: 0, + }, + + "", + }, + + // Unchunked response with Content-Length. + { + "HTTP/1.0 200 OK\r\n" + + "Content-Length: 10\r\n" + + "Connection: close\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{ + "Connection": {"close"}, + "Content-Length": {"10"}, + }, + Close: true, + ContentLength: 10, + }, + + "Body here\n", + }, + + // Chunked response without Content-Length. + { + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "\r\n" + + "0a\r\n" + + "Body here\n\r\n" + + "09\r\n" + + "continued\r\n" + + "0\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Close: false, + ContentLength: -1, + TransferEncoding: []string{"chunked"}, + }, + + "Body here\ncontinued", + }, + + // Chunked response with Content-Length. + { + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "Content-Length: 10\r\n" + + "\r\n" + + "0a\r\n" + + "Body here\n\r\n" + + "0\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Close: false, + ContentLength: -1, + TransferEncoding: []string{"chunked"}, + }, + + "Body here\n", + }, + + // Chunked response in response to a HEAD request + { + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("HEAD"), + Header: Header{}, + TransferEncoding: []string{"chunked"}, + Close: false, + ContentLength: -1, + }, + + "", + }, + + // Content-Length in response to a HEAD request + { + "HTTP/1.0 200 OK\r\n" + + "Content-Length: 256\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("HEAD"), + Header: Header{"Content-Length": {"256"}}, + TransferEncoding: nil, + Close: true, + ContentLength: 256, + }, + + "", + }, + + // Content-Length in response to a HEAD request with HTTP/1.1 + { + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 256\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("HEAD"), + Header: Header{"Content-Length": {"256"}}, + TransferEncoding: nil, + Close: false, + ContentLength: 256, + }, + + "", + }, + + // No Content-Length or Chunked in response to a HEAD request + { + "HTTP/1.0 200 OK\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("HEAD"), + Header: Header{}, + TransferEncoding: nil, + Close: true, + ContentLength: -1, + }, + + "", + }, + + // explicit Content-Length of 0. + { + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{ + "Content-Length": {"0"}, + }, + Close: false, + ContentLength: 0, + }, + + "", + }, + + // Status line without a Reason-Phrase, but trailing space. + // (permitted by RFC 2616) + { + "HTTP/1.0 303 \r\n\r\n", + Response{ + Status: "303 ", + StatusCode: 303, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Close: true, + ContentLength: -1, + }, + + "", + }, + + // Status line without a Reason-Phrase, and no trailing space. + // (not permitted by RFC 2616, but we'll accept it anyway) + { + "HTTP/1.0 303\r\n\r\n", + Response{ + Status: "303 ", + StatusCode: 303, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Close: true, + ContentLength: -1, + }, + + "", + }, + + // golang.org/issue/4767: don't special-case multipart/byteranges responses + { + `HTTP/1.1 206 Partial Content +Connection: close +Content-Type: multipart/byteranges; boundary=18a75608c8f47cef + +some body`, + Response{ + Status: "206 Partial Content", + StatusCode: 206, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{ + "Content-Type": []string{"multipart/byteranges; boundary=18a75608c8f47cef"}, + }, + Close: true, + ContentLength: -1, + }, + + "some body", + }, + + // Unchunked response without Content-Length, Request is nil + { + "HTTP/1.0 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: Header{ + "Connection": {"close"}, // TODO(rsc): Delete? + }, + Close: true, + ContentLength: -1, + }, + + "Body here\n", + }, +} + +func TestReadResponse(t *testing.T) { + for i, tt := range respTests { + resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + rbody := resp.Body + resp.Body = nil + diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp) + var bout bytes.Buffer + if rbody != nil { + _, err = io.Copy(&bout, rbody) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + rbody.Close() + } + body := bout.String() + if body != tt.Body { + t.Errorf("#%d: Body = %q want %q", i, body, tt.Body) + } + } +} + +func TestWriteResponse(t *testing.T) { + for i, tt := range respTests { + resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + err = resp.Write(ioutil.Discard) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + } +} + +var readResponseCloseInMiddleTests = []struct { + chunked, compressed bool +}{ + {false, false}, + {true, false}, + {true, true}, +} + +// TestReadResponseCloseInMiddle tests that closing a body after +// reading only part of its contents advances the read to the end of +// the request, right up until the next request. +func TestReadResponseCloseInMiddle(t *testing.T) { + for _, test := range readResponseCloseInMiddleTests { + fatalf := func(format string, args ...interface{}) { + args = append([]interface{}{test.chunked, test.compressed}, args...) + t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...) + } + checkErr := func(err error, msg string) { + if err == nil { + return + } + fatalf(msg+": %v", err) + } + var buf bytes.Buffer + buf.WriteString("HTTP/1.1 200 OK\r\n") + if test.chunked { + buf.WriteString("Transfer-Encoding: chunked\r\n") + } else { + buf.WriteString("Content-Length: 1000000\r\n") + } + var wr io.Writer = &buf + if test.chunked { + wr = newChunkedWriter(wr) + } + if test.compressed { + buf.WriteString("Content-Encoding: gzip\r\n") + wr = gzip.NewWriter(wr) + } + buf.WriteString("\r\n") + + chunk := bytes.Repeat([]byte{'x'}, 1000) + for i := 0; i < 1000; i++ { + if test.compressed { + // Otherwise this compresses too well. + _, err := io.ReadFull(rand.Reader, chunk) + checkErr(err, "rand.Reader ReadFull") + } + wr.Write(chunk) + } + if test.compressed { + err := wr.(*gzip.Writer).Close() + checkErr(err, "compressor close") + } + if test.chunked { + buf.WriteString("0\r\n\r\n") + } + buf.WriteString("Next Request Here") + + bufr := bufio.NewReader(&buf) + resp, err := ReadResponse(bufr, dummyReq("GET")) + checkErr(err, "ReadResponse") + expectedLength := int64(-1) + if !test.chunked { + expectedLength = 1000000 + } + if resp.ContentLength != expectedLength { + fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength) + } + if resp.Body == nil { + fatalf("nil body") + } + if test.compressed { + gzReader, err := gzip.NewReader(resp.Body) + checkErr(err, "gzip.NewReader") + resp.Body = &readerAndCloser{gzReader, resp.Body} + } + + rbuf := make([]byte, 2500) + n, err := io.ReadFull(resp.Body, rbuf) + checkErr(err, "2500 byte ReadFull") + if n != 2500 { + fatalf("ReadFull only read %d bytes", n) + } + if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) { + fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf)) + } + resp.Body.Close() + + rest, err := ioutil.ReadAll(bufr) + checkErr(err, "ReadAll on remainder") + if e, g := "Next Request Here", string(rest); e != g { + g = regexp.MustCompile(`(xx+)`).ReplaceAllStringFunc(g, func(match string) string { + return fmt.Sprintf("x(repeated x%d)", len(match)) + }) + fatalf("remainder = %q, expected %q", g, e) + } + } +} + +func diff(t *testing.T, prefix string, have, want interface{}) { + hv := reflect.ValueOf(have).Elem() + wv := reflect.ValueOf(want).Elem() + if hv.Type() != wv.Type() { + t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type()) + } + for i := 0; i < hv.NumField(); i++ { + hf := hv.Field(i).Interface() + wf := wv.Field(i).Interface() + if !reflect.DeepEqual(hf, wf) { + t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf) + } + } +} + +type responseLocationTest struct { + location string // Response's Location header or "" + requrl string // Response.Request.URL or "" + want string + wantErr error +} + +var responseLocationTests = []responseLocationTest{ + {"/foo", "http://bar.com/baz", "http://bar.com/foo", nil}, + {"http://foo.com/", "http://bar.com/baz", "http://foo.com/", nil}, + {"", "http://bar.com/baz", "", ErrNoLocation}, +} + +func TestLocationResponse(t *testing.T) { + for i, tt := range responseLocationTests { + res := new(Response) + res.Header = make(Header) + res.Header.Set("Location", tt.location) + if tt.requrl != "" { + res.Request = &Request{} + var err error + res.Request.URL, err = url.Parse(tt.requrl) + if err != nil { + t.Fatalf("bad test URL %q: %v", tt.requrl, err) + } + } + + got, err := res.Location() + if tt.wantErr != nil { + if err == nil { + t.Errorf("%d. err=nil; want %q", i, tt.wantErr) + continue + } + if g, e := err.Error(), tt.wantErr.Error(); g != e { + t.Errorf("%d. err=%q; want %q", i, g, e) + continue + } + continue + } + if err != nil { + t.Errorf("%d. err=%q", i, err) + continue + } + if g, e := got.String(), tt.want; g != e { + t.Errorf("%d. Location=%q; want %q", i, g, e) + } + } +} + +func TestResponseStatusStutter(t *testing.T) { + r := &Response{ + Status: "123 some status", + StatusCode: 123, + ProtoMajor: 1, + ProtoMinor: 3, + } + var buf bytes.Buffer + r.Write(&buf) + if strings.Contains(buf.String(), "123 123") { + t.Errorf("stutter in status: %s", buf.String()) + } +} + +func TestResponseContentLengthShortBody(t *testing.T) { + const shortBody = "Short body, not 123 bytes." + br := bufio.NewReader(strings.NewReader("HTTP/1.1 200 OK\r\n" + + "Content-Length: 123\r\n" + + "\r\n" + + shortBody)) + res, err := ReadResponse(br, &Request{Method: "GET"}) + if err != nil { + t.Fatal(err) + } + if res.ContentLength != 123 { + t.Fatalf("Content-Length = %d; want 123", res.ContentLength) + } + var buf bytes.Buffer + n, err := io.Copy(&buf, res.Body) + if n != int64(len(shortBody)) { + t.Errorf("Copied %d bytes; want %d, len(%q)", n, len(shortBody), shortBody) + } + if buf.String() != shortBody { + t.Errorf("Read body %q; want %q", buf.String(), shortBody) + } + if err != io.ErrUnexpectedEOF { + t.Errorf("io.Copy error = %#v; want io.ErrUnexpectedEOF", err) + } +} + +func TestReadResponseUnexpectedEOF(t *testing.T) { + br := bufio.NewReader(strings.NewReader("HTTP/1.1 301 Moved Permanently\r\n" + + "Location: http://example.com")) + _, err := ReadResponse(br, nil) + if err != io.ErrUnexpectedEOF { + t.Errorf("ReadResponse = %v; want io.ErrUnexpectedEOF", err) + } +} + +func TestNeedsSniff(t *testing.T) { + // needsSniff returns true with an empty response. + r := &response{} + if got, want := r.needsSniff(), true; got != want { + t.Errorf("needsSniff = %t; want %t", got, want) + } + // needsSniff returns false when Content-Type = nil. + r.handlerHeader = Header{"Content-Type": nil} + if got, want := r.needsSniff(), false; got != want { + t.Errorf("needsSniff empty Content-Type = %t; want %t", got, want) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/responsewrite_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/responsewrite_test.go new file mode 100644 index 000000000..585b13b85 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/responsewrite_test.go @@ -0,0 +1,226 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" +) + +type respWriteTest struct { + Resp Response + Raw string +} + +func TestResponseWrite(t *testing.T) { + respWriteTests := []respWriteTest{ + // HTTP/1.0, identity coding; no trailer + { + Response{ + StatusCode: 503, + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: 6, + }, + + "HTTP/1.0 503 Service Unavailable\r\n" + + "Content-Length: 6\r\n\r\n" + + "abcdef", + }, + // Unchunked response without Content-Length. + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + }, + "HTTP/1.0 200 OK\r\n" + + "\r\n" + + "abcdef", + }, + // HTTP/1.1 response with unknown length and Connection: close + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + Close: true, + }, + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "abcdef", + }, + // HTTP/1.1 response with unknown length and not setting connection: close + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "abcdef", + }, + // HTTP/1.1 response with unknown length and not setting connection: close, but + // setting chunked. + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + TransferEncoding: []string{"chunked"}, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "6\r\nabcdef\r\n0\r\n\r\n", + }, + // HTTP/1.1 response 0 content-length, and nil body + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: nil, + ContentLength: 0, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + }, + // HTTP/1.1 response 0 content-length, and non-nil empty body + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("")), + ContentLength: 0, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + }, + // HTTP/1.1 response 0 content-length, and non-nil non-empty body + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("foo")), + ContentLength: 0, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "\r\nfoo", + }, + // HTTP/1.1, chunked coding; empty trailer; close + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: 6, + TransferEncoding: []string{"chunked"}, + Close: true, + }, + + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "6\r\nabcdef\r\n0\r\n\r\n", + }, + + // Header value with a newline character (Issue 914). + // Also tests removal of leading and trailing whitespace. + { + Response{ + StatusCode: 204, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{ + "Foo": []string{" Bar\nBaz "}, + }, + Body: nil, + ContentLength: 0, + TransferEncoding: []string{"chunked"}, + Close: true, + }, + + "HTTP/1.1 204 No Content\r\n" + + "Connection: close\r\n" + + "Foo: Bar Baz\r\n" + + "\r\n", + }, + + // Want a single Content-Length header. Fixing issue 8180 where + // there were two. + { + Response{ + StatusCode: StatusOK, + ProtoMajor: 1, + ProtoMinor: 1, + Request: &Request{Method: "POST"}, + Header: Header{}, + ContentLength: 0, + TransferEncoding: nil, + Body: nil, + }, + "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", + }, + } + + for i := range respWriteTests { + tt := &respWriteTests[i] + var braw bytes.Buffer + err := tt.Resp.Write(&braw) + if err != nil { + t.Errorf("error writing #%d: %s", i, err) + continue + } + sraw := braw.String() + if sraw != tt.Raw { + t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw) + continue + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/serve_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/serve_test.go new file mode 100644 index 000000000..9e4d226bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/serve_test.go @@ -0,0 +1,2848 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// End-to-end serving tests + +package http_test + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + . "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "testing" + "time" +) + +type dummyAddr string +type oneConnListener struct { + conn net.Conn +} + +func (l *oneConnListener) Accept() (c net.Conn, err error) { + c = l.conn + if c == nil { + err = io.EOF + return + } + err = nil + l.conn = nil + return +} + +func (l *oneConnListener) Close() error { + return nil +} + +func (l *oneConnListener) Addr() net.Addr { + return dummyAddr("test-address") +} + +func (a dummyAddr) Network() string { + return string(a) +} + +func (a dummyAddr) String() string { + return string(a) +} + +type noopConn struct{} + +func (noopConn) LocalAddr() net.Addr { return dummyAddr("local-addr") } +func (noopConn) RemoteAddr() net.Addr { return dummyAddr("remote-addr") } +func (noopConn) SetDeadline(t time.Time) error { return nil } +func (noopConn) SetReadDeadline(t time.Time) error { return nil } +func (noopConn) SetWriteDeadline(t time.Time) error { return nil } + +type rwTestConn struct { + io.Reader + io.Writer + noopConn + + closeFunc func() error // called if non-nil + closec chan bool // else, if non-nil, send value to it on close +} + +func (c *rwTestConn) Close() error { + if c.closeFunc != nil { + return c.closeFunc() + } + select { + case c.closec <- true: + default: + } + return nil +} + +type testConn struct { + readBuf bytes.Buffer + writeBuf bytes.Buffer + closec chan bool // if non-nil, send value to it on close + noopConn +} + +func (c *testConn) Read(b []byte) (int, error) { + return c.readBuf.Read(b) +} + +func (c *testConn) Write(b []byte) (int, error) { + return c.writeBuf.Write(b) +} + +func (c *testConn) Close() error { + select { + case c.closec <- true: + default: + } + return nil +} + +// reqBytes treats req as a request (with \n delimiters) and returns it with \r\n delimiters, +// ending in \r\n\r\n +func reqBytes(req string) []byte { + return []byte(strings.Replace(strings.TrimSpace(req), "\n", "\r\n", -1) + "\r\n\r\n") +} + +type handlerTest struct { + handler Handler +} + +func newHandlerTest(h Handler) handlerTest { + return handlerTest{h} +} + +func (ht handlerTest) rawResponse(req string) string { + reqb := reqBytes(req) + var output bytes.Buffer + conn := &rwTestConn{ + Reader: bytes.NewReader(reqb), + Writer: &output, + closec: make(chan bool, 1), + } + ln := &oneConnListener{conn: conn} + go Serve(ln, ht.handler) + <-conn.closec + return output.String() +} + +func TestConsumingBodyOnNextConn(t *testing.T) { + conn := new(testConn) + for i := 0; i < 2; i++ { + conn.readBuf.Write([]byte( + "POST / HTTP/1.1\r\n" + + "Host: test\r\n" + + "Content-Length: 11\r\n" + + "\r\n" + + "foo=1&bar=1")) + } + + reqNum := 0 + ch := make(chan *Request) + servech := make(chan error) + listener := &oneConnListener{conn} + handler := func(res ResponseWriter, req *Request) { + reqNum++ + ch <- req + } + + go func() { + servech <- Serve(listener, HandlerFunc(handler)) + }() + + var req *Request + req = <-ch + if req == nil { + t.Fatal("Got nil first request.") + } + if req.Method != "POST" { + t.Errorf("For request #1's method, got %q; expected %q", + req.Method, "POST") + } + + req = <-ch + if req == nil { + t.Fatal("Got nil first request.") + } + if req.Method != "POST" { + t.Errorf("For request #2's method, got %q; expected %q", + req.Method, "POST") + } + + if serveerr := <-servech; serveerr != io.EOF { + t.Errorf("Serve returned %q; expected EOF", serveerr) + } +} + +type stringHandler string + +func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) { + w.Header().Set("Result", string(s)) +} + +var handlers = []struct { + pattern string + msg string +}{ + {"/", "Default"}, + {"/someDir/", "someDir"}, + {"someHost.com/someDir/", "someHost.com/someDir"}, +} + +var vtests = []struct { + url string + expected string +}{ + {"http://localhost/someDir/apage", "someDir"}, + {"http://localhost/otherDir/apage", "Default"}, + {"http://someHost.com/someDir/apage", "someHost.com/someDir"}, + {"http://otherHost.com/someDir/apage", "someDir"}, + {"http://otherHost.com/aDir/apage", "Default"}, + // redirections for trees + {"http://localhost/someDir", "/someDir/"}, + {"http://someHost.com/someDir", "/someDir/"}, +} + +func TestHostHandlers(t *testing.T) { + defer afterTest(t) + mux := NewServeMux() + for _, h := range handlers { + mux.Handle(h.pattern, stringHandler(h.msg)) + } + ts := httptest.NewServer(mux) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + cc := httputil.NewClientConn(conn, nil) + for _, vt := range vtests { + var r *Response + var req Request + if req.URL, err = url.Parse(vt.url); err != nil { + t.Errorf("cannot parse url: %v", err) + continue + } + if err := cc.Write(&req); err != nil { + t.Errorf("writing request: %v", err) + continue + } + r, err := cc.Read(&req) + if err != nil { + t.Errorf("reading response: %v", err) + continue + } + switch r.StatusCode { + case StatusOK: + s := r.Header.Get("Result") + if s != vt.expected { + t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected) + } + case StatusMovedPermanently: + s := r.Header.Get("Location") + if s != vt.expected { + t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected) + } + default: + t.Errorf("Get(%q) unhandled status code %d", vt.url, r.StatusCode) + } + } +} + +var serveMuxRegister = []struct { + pattern string + h Handler +}{ + {"/dir/", serve(200)}, + {"/search", serve(201)}, + {"codesearch.google.com/search", serve(202)}, + {"codesearch.google.com/", serve(203)}, + {"example.com/", HandlerFunc(checkQueryStringHandler)}, +} + +// serve returns a handler that sends a response with the given code. +func serve(code int) HandlerFunc { + return func(w ResponseWriter, r *Request) { + w.WriteHeader(code) + } +} + +// checkQueryStringHandler checks if r.URL.RawQuery has the same value +// as the URL excluding the scheme and the query string and sends 200 +// response code if it is, 500 otherwise. +func checkQueryStringHandler(w ResponseWriter, r *Request) { + u := *r.URL + u.Scheme = "http" + u.Host = r.Host + u.RawQuery = "" + if "http://"+r.URL.RawQuery == u.String() { + w.WriteHeader(200) + } else { + w.WriteHeader(500) + } +} + +var serveMuxTests = []struct { + method string + host string + path string + code int + pattern string +}{ + {"GET", "google.com", "/", 404, ""}, + {"GET", "google.com", "/dir", 301, "/dir/"}, + {"GET", "google.com", "/dir/", 200, "/dir/"}, + {"GET", "google.com", "/dir/file", 200, "/dir/"}, + {"GET", "google.com", "/search", 201, "/search"}, + {"GET", "google.com", "/search/", 404, ""}, + {"GET", "google.com", "/search/foo", 404, ""}, + {"GET", "codesearch.google.com", "/search", 202, "codesearch.google.com/search"}, + {"GET", "codesearch.google.com", "/search/", 203, "codesearch.google.com/"}, + {"GET", "codesearch.google.com", "/search/foo", 203, "codesearch.google.com/"}, + {"GET", "codesearch.google.com", "/", 203, "codesearch.google.com/"}, + {"GET", "images.google.com", "/search", 201, "/search"}, + {"GET", "images.google.com", "/search/", 404, ""}, + {"GET", "images.google.com", "/search/foo", 404, ""}, + {"GET", "google.com", "/../search", 301, "/search"}, + {"GET", "google.com", "/dir/..", 301, ""}, + {"GET", "google.com", "/dir/..", 301, ""}, + {"GET", "google.com", "/dir/./file", 301, "/dir/"}, + + // The /foo -> /foo/ redirect applies to CONNECT requests + // but the path canonicalization does not. + {"CONNECT", "google.com", "/dir", 301, "/dir/"}, + {"CONNECT", "google.com", "/../search", 404, ""}, + {"CONNECT", "google.com", "/dir/..", 200, "/dir/"}, + {"CONNECT", "google.com", "/dir/..", 200, "/dir/"}, + {"CONNECT", "google.com", "/dir/./file", 200, "/dir/"}, +} + +func TestServeMuxHandler(t *testing.T) { + mux := NewServeMux() + for _, e := range serveMuxRegister { + mux.Handle(e.pattern, e.h) + } + + for _, tt := range serveMuxTests { + r := &Request{ + Method: tt.method, + Host: tt.host, + URL: &url.URL{ + Path: tt.path, + }, + } + h, pattern := mux.Handler(r) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, r) + if pattern != tt.pattern || rr.Code != tt.code { + t.Errorf("%s %s %s = %d, %q, want %d, %q", tt.method, tt.host, tt.path, rr.Code, pattern, tt.code, tt.pattern) + } + } +} + +var serveMuxTests2 = []struct { + method string + host string + url string + code int + redirOk bool +}{ + {"GET", "google.com", "/", 404, false}, + {"GET", "example.com", "/test/?example.com/test/", 200, false}, + {"GET", "example.com", "test/?example.com/test/", 200, true}, +} + +// TestServeMuxHandlerRedirects tests that automatic redirects generated by +// mux.Handler() shouldn't clear the request's query string. +func TestServeMuxHandlerRedirects(t *testing.T) { + mux := NewServeMux() + for _, e := range serveMuxRegister { + mux.Handle(e.pattern, e.h) + } + + for _, tt := range serveMuxTests2 { + tries := 1 + turl := tt.url + for tries > 0 { + u, e := url.Parse(turl) + if e != nil { + t.Fatal(e) + } + r := &Request{ + Method: tt.method, + Host: tt.host, + URL: u, + } + h, _ := mux.Handler(r) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, r) + if rr.Code != 301 { + if rr.Code != tt.code { + t.Errorf("%s %s %s = %d, want %d", tt.method, tt.host, tt.url, rr.Code, tt.code) + } + break + } + if !tt.redirOk { + t.Errorf("%s %s %s, unexpected redirect", tt.method, tt.host, tt.url) + break + } + turl = rr.HeaderMap.Get("Location") + tries-- + } + if tries < 0 { + t.Errorf("%s %s %s, too many redirects", tt.method, tt.host, tt.url) + } + } +} + +// Tests for http://code.google.com/p/go/issues/detail?id=900 +func TestMuxRedirectLeadingSlashes(t *testing.T) { + paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"} + for _, path := range paths { + req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n"))) + if err != nil { + t.Errorf("%s", err) + } + mux := NewServeMux() + resp := httptest.NewRecorder() + + mux.ServeHTTP(resp, req) + + if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected { + t.Errorf("Expected Location header set to %q; got %q", expected, loc) + return + } + + if code, expected := resp.Code, StatusMovedPermanently; code != expected { + t.Errorf("Expected response code of StatusMovedPermanently; got %d", code) + return + } + } +} + +func TestServerTimeouts(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + reqNum := 0 + ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) { + reqNum++ + fmt.Fprintf(res, "req=%d", reqNum) + })) + ts.Config.ReadTimeout = 250 * time.Millisecond + ts.Config.WriteTimeout = 250 * time.Millisecond + ts.Start() + defer ts.Close() + + // Hit the HTTP server successfully. + tr := &Transport{DisableKeepAlives: true} // they interfere with this test + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + r, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("http Get #1: %v", err) + } + got, _ := ioutil.ReadAll(r.Body) + expected := "req=1" + if string(got) != expected { + t.Errorf("Unexpected response for request #1; got %q; expected %q", + string(got), expected) + } + + // Slow client that should timeout. + t1 := time.Now() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + buf := make([]byte, 1) + n, err := conn.Read(buf) + latency := time.Since(t1) + if n != 0 || err != io.EOF { + t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF) + } + if latency < 200*time.Millisecond /* fudge from 250 ms above */ { + t.Errorf("got EOF after %s, want >= %s", latency, 200*time.Millisecond) + } + + // Hit the HTTP server successfully again, verifying that the + // previous slow connection didn't run our handler. (that we + // get "req=2", not "req=3") + r, err = Get(ts.URL) + if err != nil { + t.Fatalf("http Get #2: %v", err) + } + got, _ = ioutil.ReadAll(r.Body) + expected = "req=2" + if string(got) != expected { + t.Errorf("Get #2 got %q, want %q", string(got), expected) + } + + if !testing.Short() { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer conn.Close() + go io.Copy(ioutil.Discard, conn) + for i := 0; i < 5; i++ { + _, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo\r\n\r\n")) + if err != nil { + t.Fatalf("on write %d: %v", i, err) + } + time.Sleep(ts.Config.ReadTimeout / 2) + } + } +} + +// golang.org/issue/4741 -- setting only a write timeout that triggers +// shouldn't cause a handler to block forever on reads (next HTTP +// request) that will never happen. +func TestOnlyWriteTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + var conn net.Conn + var afterTimeoutErrc = make(chan error, 1) + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, req *Request) { + buf := make([]byte, 512<<10) + _, err := w.Write(buf) + if err != nil { + t.Errorf("handler Write error: %v", err) + return + } + conn.SetWriteDeadline(time.Now().Add(-30 * time.Second)) + _, err = w.Write(buf) + afterTimeoutErrc <- err + })) + ts.Listener = trackLastConnListener{ts.Listener, &conn} + ts.Start() + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + errc := make(chan error) + go func() { + res, err := c.Get(ts.URL) + if err != nil { + errc <- err + return + } + _, err = io.Copy(ioutil.Discard, res.Body) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + t.Errorf("expected an error from Get request") + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Get error") + } + if err := <-afterTimeoutErrc; err == nil { + t.Error("expected write error after timeout") + } +} + +// trackLastConnListener tracks the last net.Conn that was accepted. +type trackLastConnListener struct { + net.Listener + last *net.Conn // destination +} + +func (l trackLastConnListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + *l.last = c + return +} + +// TestIdentityResponse verifies that a handler can unset +func TestIdentityResponse(t *testing.T) { + defer afterTest(t) + handler := HandlerFunc(func(rw ResponseWriter, req *Request) { + rw.Header().Set("Content-Length", "3") + rw.Header().Set("Transfer-Encoding", req.FormValue("te")) + switch { + case req.FormValue("overwrite") == "1": + _, err := rw.Write([]byte("foo TOO LONG")) + if err != ErrContentLength { + t.Errorf("expected ErrContentLength; got %v", err) + } + case req.FormValue("underwrite") == "1": + rw.Header().Set("Content-Length", "500") + rw.Write([]byte("too short")) + default: + rw.Write([]byte("foo")) + } + }) + + ts := httptest.NewServer(handler) + defer ts.Close() + + // Note: this relies on the assumption (which is true) that + // Get sends HTTP/1.1 or greater requests. Otherwise the + // server wouldn't have the choice to send back chunked + // responses. + for _, te := range []string{"", "identity"} { + url := ts.URL + "/?te=" + te + res, err := Get(url) + if err != nil { + t.Fatalf("error with Get of %s: %v", url, err) + } + if cl, expected := res.ContentLength, int64(3); cl != expected { + t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl) + } + if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected { + t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl) + } + if tl, expected := len(res.TransferEncoding), 0; tl != expected { + t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)", + url, expected, tl, res.TransferEncoding) + } + res.Body.Close() + } + + // Verify that ErrContentLength is returned + url := ts.URL + "/?overwrite=1" + res, err := Get(url) + if err != nil { + t.Fatalf("error with Get of %s: %v", url, err) + } + res.Body.Close() + + // Verify that the connection is closed when the declared Content-Length + // is larger than what the handler wrote. + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + _, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n")) + if err != nil { + t.Fatalf("error writing: %v", err) + } + + // The ReadAll will hang for a failing test, so use a Timer to + // fail explicitly. + goTimeout(t, 2*time.Second, func() { + got, _ := ioutil.ReadAll(conn) + expectedSuffix := "\r\n\r\ntoo short" + if !strings.HasSuffix(string(got), expectedSuffix) { + t.Errorf("Expected output to end with %q; got response body %q", + expectedSuffix, string(got)) + } + }) +} + +func testTCPConnectionCloses(t *testing.T, req string, h Handler) { + defer afterTest(t) + s := httptest.NewServer(h) + defer s.Close() + + conn, err := net.Dial("tcp", s.Listener.Addr().String()) + if err != nil { + t.Fatal("dial error:", err) + } + defer conn.Close() + + _, err = fmt.Fprint(conn, req) + if err != nil { + t.Fatal("print error:", err) + } + + r := bufio.NewReader(conn) + res, err := ReadResponse(r, &Request{Method: "GET"}) + if err != nil { + t.Fatal("ReadResponse error:", err) + } + + didReadAll := make(chan bool, 1) + go func() { + select { + case <-time.After(5 * time.Second): + t.Error("body not closed after 5s") + return + case <-didReadAll: + } + }() + + _, err = ioutil.ReadAll(r) + if err != nil { + t.Fatal("read error:", err) + } + didReadAll <- true + + if !res.Close { + t.Errorf("Response.Close = false; want true") + } +} + +// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive. +func TestServeHTTP10Close(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/file") + })) +} + +// TestClientCanClose verifies that clients can also force a connection to close. +func TestClientCanClose(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.1\r\nConnection: close\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + // Nothing. + })) +} + +// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close, +// even for HTTP/1.1 requests. +func TestHandlersCanSetConnectionClose11(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + })) +} + +func TestHandlersCanSetConnectionClose10(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + })) +} + +func TestSetsRemoteAddr(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "%s", r.RemoteAddr) + })) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("ReadAll error: %v", err) + } + ip := string(body) + if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") { + t.Fatalf("Expected local addr; got %q", ip) + } +} + +func TestChunkedResponseHeaders(t *testing.T) { + defer afterTest(t) + log.SetOutput(ioutil.Discard) // is noisy otherwise + defer log.SetOutput(os.Stderr) + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "intentional gibberish") // we check that this is deleted + w.(Flusher).Flush() + fmt.Fprintf(w, "I am a chunked response.") + })) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + defer res.Body.Close() + if g, e := res.ContentLength, int64(-1); g != e { + t.Errorf("expected ContentLength of %d; got %d", e, g) + } + if g, e := res.TransferEncoding, []string{"chunked"}; !reflect.DeepEqual(g, e) { + t.Errorf("expected TransferEncoding of %v; got %v", e, g) + } + if _, haveCL := res.Header["Content-Length"]; haveCL { + t.Errorf("Unexpected Content-Length") + } +} + +// Test304Responses verifies that 304s don't declare that they're +// chunking in their response headers and aren't allowed to produce +// output. +func Test304Responses(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.WriteHeader(StatusNotModified) + _, err := w.Write([]byte("illegal body")) + if err != ErrBodyNotAllowed { + t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err) + } + })) + defer ts.Close() + res, err := Get(ts.URL) + if err != nil { + t.Error(err) + } + if len(res.TransferEncoding) > 0 { + t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Error(err) + } + if len(body) > 0 { + t.Errorf("got unexpected body %q", string(body)) + } +} + +// TestHeadResponses verifies that all MIME type sniffing and Content-Length +// counting of GET requests also happens on HEAD requests. +func TestHeadResponses(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + _, err := w.Write([]byte("")) + if err != nil { + t.Errorf("ResponseWriter.Write: %v", err) + } + + // Also exercise the ReaderFrom path + _, err = io.Copy(w, strings.NewReader("789a")) + if err != nil { + t.Errorf("Copy(ResponseWriter, ...): %v", err) + } + })) + defer ts.Close() + res, err := Head(ts.URL) + if err != nil { + t.Error(err) + } + if len(res.TransferEncoding) > 0 { + t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) + } + if ct := res.Header.Get("Content-Type"); ct != "text/html; charset=utf-8" { + t.Errorf("Content-Type: %q; want text/html; charset=utf-8", ct) + } + if v := res.ContentLength; v != 10 { + t.Errorf("Content-Length: %d; want 10", v) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Error(err) + } + if len(body) > 0 { + t.Errorf("got unexpected body %q", string(body)) + } +} + +func TestTLSHandshakeTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + errc := make(chanWriter, 10) // but only expecting 1 + ts.Config.ReadTimeout = 250 * time.Millisecond + ts.Config.ErrorLog = log.New(errc, "", 0) + ts.StartTLS() + defer ts.Close() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer conn.Close() + goTimeout(t, 10*time.Second, func() { + var buf [1]byte + n, err := conn.Read(buf[:]) + if err == nil || n != 0 { + t.Errorf("Read = %d, %v; want an error and no bytes", n, err) + } + }) + select { + case v := <-errc: + if !strings.Contains(v, "timeout") && !strings.Contains(v, "TLS handshake") { + t.Errorf("expected a TLS handshake timeout error; got %q", v) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for logged error") + } +} + +func TestTLSServer(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.TLS != nil { + w.Header().Set("X-TLS-Set", "true") + if r.TLS.HandshakeComplete { + w.Header().Set("X-TLS-HandshakeComplete", "true") + } + } + })) + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + defer ts.Close() + + // Connect an idle TCP connection to this server before we run + // our real tests. This idle connection used to block forever + // in the TLS handshake, preventing future connections from + // being accepted. It may prevent future accidental blocking + // in newConn. + idleConn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer idleConn.Close() + goTimeout(t, 10*time.Second, func() { + if !strings.HasPrefix(ts.URL, "https://") { + t.Errorf("expected test TLS server to start with https://, got %q", ts.URL) + return + } + noVerifyTransport := &Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + client := &Client{Transport: noVerifyTransport} + res, err := client.Get(ts.URL) + if err != nil { + t.Error(err) + return + } + if res == nil { + t.Errorf("got nil Response") + return + } + defer res.Body.Close() + if res.Header.Get("X-TLS-Set") != "true" { + t.Errorf("expected X-TLS-Set response header") + return + } + if res.Header.Get("X-TLS-HandshakeComplete") != "true" { + t.Errorf("expected X-TLS-HandshakeComplete header") + } + }) +} + +type serverExpectTest struct { + contentLength int // of request body + chunked bool + expectation string // e.g. "100-continue" + readBody bool // whether handler should read the body (if false, sends StatusUnauthorized) + expectedResponse string // expected substring in first line of http response +} + +func expectTest(contentLength int, expectation string, readBody bool, expectedResponse string) serverExpectTest { + return serverExpectTest{ + contentLength: contentLength, + expectation: expectation, + readBody: readBody, + expectedResponse: expectedResponse, + } +} + +var serverExpectTests = []serverExpectTest{ + // Normal 100-continues, case-insensitive. + expectTest(100, "100-continue", true, "100 Continue"), + expectTest(100, "100-cOntInUE", true, "100 Continue"), + + // No 100-continue. + expectTest(100, "", true, "200 OK"), + + // 100-continue but requesting client to deny us, + // so it never reads the body. + expectTest(100, "100-continue", false, "401 Unauthorized"), + // Likewise without 100-continue: + expectTest(100, "", false, "401 Unauthorized"), + + // Non-standard expectations are failures + expectTest(0, "a-pony", false, "417 Expectation Failed"), + + // Expect-100 requested but no body (is apparently okay: Issue 7625) + expectTest(0, "100-continue", true, "200 OK"), + // Expect-100 requested but handler doesn't read the body + expectTest(0, "100-continue", false, "401 Unauthorized"), + // Expect-100 continue with no body, but a chunked body. + { + expectation: "100-continue", + readBody: true, + chunked: true, + expectedResponse: "100 Continue", + }, +} + +// Tests that the server responds to the "Expect" request header +// correctly. +func TestServerExpect(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + // Note using r.FormValue("readbody") because for POST + // requests that would read from r.Body, which we only + // conditionally want to do. + if strings.Contains(r.URL.RawQuery, "readbody=true") { + ioutil.ReadAll(r.Body) + w.Write([]byte("Hi")) + } else { + w.WriteHeader(StatusUnauthorized) + } + })) + defer ts.Close() + + runTest := func(test serverExpectTest) { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer conn.Close() + + // Only send the body immediately if we're acting like an HTTP client + // that doesn't send 100-continue expectations. + writeBody := test.contentLength != 0 && strings.ToLower(test.expectation) != "100-continue" + + go func() { + contentLen := fmt.Sprintf("Content-Length: %d", test.contentLength) + if test.chunked { + contentLen = "Transfer-Encoding: chunked" + } + _, err := fmt.Fprintf(conn, "POST /?readbody=%v HTTP/1.1\r\n"+ + "Connection: close\r\n"+ + "%s\r\n"+ + "Expect: %s\r\nHost: foo\r\n\r\n", + test.readBody, contentLen, test.expectation) + if err != nil { + t.Errorf("On test %#v, error writing request headers: %v", test, err) + return + } + if writeBody { + var targ io.WriteCloser = struct { + io.Writer + io.Closer + }{ + conn, + ioutil.NopCloser(nil), + } + if test.chunked { + targ = httputil.NewChunkedWriter(conn) + } + body := strings.Repeat("A", test.contentLength) + _, err = fmt.Fprint(targ, body) + if err == nil { + err = targ.Close() + } + if err != nil { + if !test.readBody { + // Server likely already hung up on us. + // See larger comment below. + t.Logf("On test %#v, acceptable error writing request body: %v", test, err) + return + } + t.Errorf("On test %#v, error writing request body: %v", test, err) + } + } + }() + bufr := bufio.NewReader(conn) + line, err := bufr.ReadString('\n') + if err != nil { + if writeBody && !test.readBody { + // This is an acceptable failure due to a possible TCP race: + // We were still writing data and the server hung up on us. A TCP + // implementation may send a RST if our request body data was known + // to be lost, which may trigger our reads to fail. + // See RFC 1122 page 88. + t.Logf("On test %#v, acceptable error from ReadString: %v", test, err) + return + } + t.Fatalf("On test %#v, ReadString: %v", test, err) + } + if !strings.Contains(line, test.expectedResponse) { + t.Errorf("On test %#v, got first line = %q; want %q", test, line, test.expectedResponse) + } + } + + for _, test := range serverExpectTests { + runTest(test) + } +} + +// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server +// should consume client request bodies that a handler didn't read. +func TestServerUnreadRequestBodyLittle(t *testing.T) { + conn := new(testConn) + body := strings.Repeat("x", 100<<10) + conn.readBuf.Write([]byte(fmt.Sprintf( + "POST / HTTP/1.1\r\n"+ + "Host: test\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n", len(body)))) + conn.readBuf.Write([]byte(body)) + + done := make(chan bool) + + ls := &oneConnListener{conn} + go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) { + defer close(done) + if conn.readBuf.Len() < len(body)/2 { + t.Errorf("on request, read buffer length is %d; expected about 100 KB", conn.readBuf.Len()) + } + rw.WriteHeader(200) + rw.(Flusher).Flush() + if g, e := conn.readBuf.Len(), 0; g != e { + t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e) + } + if c := rw.Header().Get("Connection"); c != "" { + t.Errorf(`Connection header = %q; want ""`, c) + } + })) + <-done +} + +// Over a ~256KB (maxPostHandlerReadBytes) threshold, the server +// should ignore client request bodies that a handler didn't read +// and close the connection. +func TestServerUnreadRequestBodyLarge(t *testing.T) { + conn := new(testConn) + body := strings.Repeat("x", 1<<20) + conn.readBuf.Write([]byte(fmt.Sprintf( + "POST / HTTP/1.1\r\n"+ + "Host: test\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n", len(body)))) + conn.readBuf.Write([]byte(body)) + conn.closec = make(chan bool, 1) + + ls := &oneConnListener{conn} + go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) { + if conn.readBuf.Len() < len(body)/2 { + t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len()) + } + rw.WriteHeader(200) + rw.(Flusher).Flush() + if conn.readBuf.Len() < len(body)/2 { + t.Errorf("post-WriteHeader, read buffer length is %d; expected about 1MB", conn.readBuf.Len()) + } + })) + <-conn.closec + + if res := conn.writeBuf.String(); !strings.Contains(res, "Connection: close") { + t.Errorf("Expected a Connection: close header; got response: %s", res) + } +} + +func TestTimeoutHandler(t *testing.T) { + defer afterTest(t) + sendHi := make(chan bool, 1) + writeErrors := make(chan error, 1) + sayHi := HandlerFunc(func(w ResponseWriter, r *Request) { + <-sendHi + _, werr := w.Write([]byte("hi")) + writeErrors <- werr + }) + timeout := make(chan time.Time, 1) // write to this to force timeouts + ts := httptest.NewServer(NewTestTimeoutHandler(sayHi, timeout)) + defer ts.Close() + + // Succeed without timing out: + sendHi <- true + res, err := Get(ts.URL) + if err != nil { + t.Error(err) + } + if g, e := res.StatusCode, StatusOK; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + body, _ := ioutil.ReadAll(res.Body) + if g, e := string(body), "hi"; g != e { + t.Errorf("got body %q; expected %q", g, e) + } + if g := <-writeErrors; g != nil { + t.Errorf("got unexpected Write error on first request: %v", g) + } + + // Times out: + timeout <- time.Time{} + res, err = Get(ts.URL) + if err != nil { + t.Error(err) + } + if g, e := res.StatusCode, StatusServiceUnavailable; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + body, _ = ioutil.ReadAll(res.Body) + if !strings.Contains(string(body), "Timeout") { + t.Errorf("expected timeout body; got %q", string(body)) + } + + // Now make the previously-timed out handler speak again, + // which verifies the panic is handled: + sendHi <- true + if g, e := <-writeErrors, ErrHandlerTimeout; g != e { + t.Errorf("expected Write error of %v; got %v", e, g) + } +} + +// Verifies we don't path.Clean() on the wrong parts in redirects. +func TestRedirectMunging(t *testing.T) { + req, _ := NewRequest("GET", "http://example.com/", nil) + + resp := httptest.NewRecorder() + Redirect(resp, req, "/foo?next=http://bar.com/", 302) + if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e { + t.Errorf("Location header was %q; want %q", g, e) + } + + resp = httptest.NewRecorder() + Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302) + if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e { + t.Errorf("Location header was %q; want %q", g, e) + } +} + +func TestRedirectBadPath(t *testing.T) { + // This used to crash. It's not valid input (bad path), but it + // shouldn't crash. + rr := httptest.NewRecorder() + req := &Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Path: "not-empty-but-no-leading-slash", // bogus + }, + } + Redirect(rr, req, "", 304) + if rr.Code != 304 { + t.Errorf("Code = %d; want 304", rr.Code) + } +} + +// TestZeroLengthPostAndResponse exercises an optimization done by the Transport: +// when there is no body (either because the method doesn't permit a body, or an +// explicit Content-Length of zero is present), then the transport can re-use the +// connection immediately. But when it re-uses the connection, it typically closes +// the previous request's body, which is not optimal for zero-lengthed bodies, +// as the client would then see http.ErrBodyReadAfterClose and not 0, io.EOF. +func TestZeroLengthPostAndResponse(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("handler ReadAll: %v", err) + } + if len(all) != 0 { + t.Errorf("handler got %d bytes; expected 0", len(all)) + } + rw.Header().Set("Content-Length", "0") + })) + defer ts.Close() + + req, err := NewRequest("POST", ts.URL, strings.NewReader("")) + if err != nil { + t.Fatal(err) + } + req.ContentLength = 0 + + var resp [5]*Response + for i := range resp { + resp[i], err = DefaultClient.Do(req) + if err != nil { + t.Fatalf("client post #%d: %v", i, err) + } + } + + for i := range resp { + all, err := ioutil.ReadAll(resp[i].Body) + if err != nil { + t.Fatalf("req #%d: client ReadAll: %v", i, err) + } + if len(all) != 0 { + t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all)) + } + } +} + +func TestHandlerPanicNil(t *testing.T) { + testHandlerPanic(t, false, nil) +} + +func TestHandlerPanic(t *testing.T) { + testHandlerPanic(t, false, "intentional death for testing") +} + +func TestHandlerPanicWithHijack(t *testing.T) { + testHandlerPanic(t, true, "intentional death for testing") +} + +func testHandlerPanic(t *testing.T, withHijack bool, panicValue interface{}) { + defer afterTest(t) + // Unlike the other tests that set the log output to ioutil.Discard + // to quiet the output, this test uses a pipe. The pipe serves three + // purposes: + // + // 1) The log.Print from the http server (generated by the caught + // panic) will go to the pipe instead of stderr, making the + // output quiet. + // + // 2) We read from the pipe to verify that the handler + // actually caught the panic and logged something. + // + // 3) The blocking Read call prevents this TestHandlerPanic + // function from exiting before the HTTP server handler + // finishes crashing. If this text function exited too + // early (and its defer log.SetOutput(os.Stderr) ran), + // then the crash output could spill into the next test. + pr, pw := io.Pipe() + log.SetOutput(pw) + defer log.SetOutput(os.Stderr) + defer pw.Close() + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if withHijack { + rwc, _, err := w.(Hijacker).Hijack() + if err != nil { + t.Logf("unexpected error: %v", err) + } + defer rwc.Close() + } + panic(panicValue) + })) + defer ts.Close() + + // Do a blocking read on the log output pipe so its logging + // doesn't bleed into the next test. But wait only 5 seconds + // for it. + done := make(chan bool, 1) + go func() { + buf := make([]byte, 4<<10) + _, err := pr.Read(buf) + pr.Close() + if err != nil && err != io.EOF { + t.Error(err) + } + done <- true + }() + + _, err := Get(ts.URL) + if err == nil { + t.Logf("expected an error") + } + + if panicValue == nil { + return + } + + select { + case <-done: + return + case <-time.After(5 * time.Second): + t.Fatal("expected server handler to log an error") + } +} + +func TestNoDate(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header()["Date"] = nil + })) + defer ts.Close() + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + _, present := res.Header["Date"] + if present { + t.Fatalf("Expected no Date header; got %v", res.Header["Date"]) + } +} + +func TestStripPrefix(t *testing.T) { + defer afterTest(t) + h := HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("X-Path", r.URL.Path) + }) + ts := httptest.NewServer(StripPrefix("/foo", h)) + defer ts.Close() + + res, err := Get(ts.URL + "/foo/bar") + if err != nil { + t.Fatal(err) + } + if g, e := res.Header.Get("X-Path"), "/bar"; g != e { + t.Errorf("test 1: got %s, want %s", g, e) + } + res.Body.Close() + + res, err = Get(ts.URL + "/bar") + if err != nil { + t.Fatal(err) + } + if g, e := res.StatusCode, 404; g != e { + t.Errorf("test 2: got status %v, want %v", g, e) + } + res.Body.Close() +} + +func TestRequestLimit(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + t.Fatalf("didn't expect to get request in Handler") + })) + defer ts.Close() + req, _ := NewRequest("GET", ts.URL, nil) + var bytesPerHeader = len("header12345: val12345\r\n") + for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ { + req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i)) + } + res, err := DefaultClient.Do(req) + if err != nil { + // Some HTTP clients may fail on this undefined behavior (server replying and + // closing the connection while the request is still being written), but + // we do support it (at least currently), so we expect a response below. + t.Fatalf("Do: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 413 { + t.Fatalf("expected 413 response status; got: %d %s", res.StatusCode, res.Status) + } +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +type countReader struct { + r io.Reader + n *int64 +} + +func (cr countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + atomic.AddInt64(cr.n, int64(n)) + return +} + +func TestRequestBodyLimit(t *testing.T) { + defer afterTest(t) + const limit = 1 << 20 + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + r.Body = MaxBytesReader(w, r.Body, limit) + n, err := io.Copy(ioutil.Discard, r.Body) + if err == nil { + t.Errorf("expected error from io.Copy") + } + if n != limit { + t.Errorf("io.Copy = %d, want %d", n, limit) + } + })) + defer ts.Close() + + nWritten := new(int64) + req, _ := NewRequest("POST", ts.URL, io.LimitReader(countReader{neverEnding('a'), nWritten}, limit*200)) + + // Send the POST, but don't care it succeeds or not. The + // remote side is going to reply and then close the TCP + // connection, and HTTP doesn't really define if that's + // allowed or not. Some HTTP clients will get the response + // and some (like ours, currently) will complain that the + // request write failed, without reading the response. + // + // But that's okay, since what we're really testing is that + // the remote side hung up on us before we wrote too much. + _, _ = DefaultClient.Do(req) + + if atomic.LoadInt64(nWritten) > limit*100 { + t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d", + limit, nWritten) + } +} + +// TestClientWriteShutdown tests that if the client shuts down the write +// side of their TCP connection, the server doesn't send a 400 Bad Request. +func TestClientWriteShutdown(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + defer ts.Close() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + err = conn.(*net.TCPConn).CloseWrite() + if err != nil { + t.Fatalf("Dial: %v", err) + } + donec := make(chan bool) + go func() { + defer close(donec) + bs, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + got := string(bs) + if got != "" { + t.Errorf("read %q from server; want nothing", got) + } + }() + select { + case <-donec: + case <-time.After(10 * time.Second): + t.Fatalf("timeout") + } +} + +// Tests that chunked server responses that write 1 byte at a time are +// buffered before chunk headers are added, not after chunk headers. +func TestServerBufferedChunking(t *testing.T) { + conn := new(testConn) + conn.readBuf.Write([]byte("GET / HTTP/1.1\r\n\r\n")) + conn.closec = make(chan bool, 1) + ls := &oneConnListener{conn} + go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) { + rw.(Flusher).Flush() // force the Header to be sent, in chunking mode, not counting the length + rw.Write([]byte{'x'}) + rw.Write([]byte{'y'}) + rw.Write([]byte{'z'}) + })) + <-conn.closec + if !bytes.HasSuffix(conn.writeBuf.Bytes(), []byte("\r\n\r\n3\r\nxyz\r\n0\r\n\r\n")) { + t.Errorf("response didn't end with a single 3 byte 'xyz' chunk; got:\n%q", + conn.writeBuf.Bytes()) + } +} + +// Tests that the server flushes its response headers out when it's +// ignoring the response body and waits a bit before forcefully +// closing the TCP connection, causing the client to get a RST. +// See http://golang.org/issue/3595 +func TestServerGracefulClose(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + Error(w, "bye", StatusUnauthorized) + })) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + const bodySize = 5 << 20 + req := []byte(fmt.Sprintf("POST / HTTP/1.1\r\nHost: foo.com\r\nContent-Length: %d\r\n\r\n", bodySize)) + for i := 0; i < bodySize; i++ { + req = append(req, 'x') + } + writeErr := make(chan error) + go func() { + _, err := conn.Write(req) + writeErr <- err + }() + br := bufio.NewReader(conn) + lineNum := 0 + for { + line, err := br.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("ReadLine: %v", err) + } + lineNum++ + if lineNum == 1 && !strings.Contains(line, "401 Unauthorized") { + t.Errorf("Response line = %q; want a 401", line) + } + } + // Wait for write to finish. This is a broken pipe on both + // Darwin and Linux, but checking this isn't the point of + // the test. + <-writeErr +} + +func TestCaseSensitiveMethod(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "get" { + t.Errorf(`Got method %q; want "get"`, r.Method) + } + })) + defer ts.Close() + req, _ := NewRequest("get", ts.URL, nil) + res, err := DefaultClient.Do(req) + if err != nil { + t.Error(err) + return + } + res.Body.Close() +} + +// TestContentLengthZero tests that for both an HTTP/1.0 and HTTP/1.1 +// request (both keep-alive), when a Handler never writes any +// response, the net/http package adds a "Content-Length: 0" response +// header. +func TestContentLengthZero(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {})) + defer ts.Close() + + for _, version := range []string{"HTTP/1.0", "HTTP/1.1"} { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + _, err = fmt.Fprintf(conn, "GET / %v\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n", version) + if err != nil { + t.Fatalf("error writing: %v", err) + } + req, _ := NewRequest("GET", "/", nil) + res, err := ReadResponse(bufio.NewReader(conn), req) + if err != nil { + t.Fatalf("error reading response: %v", err) + } + if te := res.TransferEncoding; len(te) > 0 { + t.Errorf("For version %q, Transfer-Encoding = %q; want none", version, te) + } + if cl := res.ContentLength; cl != 0 { + t.Errorf("For version %q, Content-Length = %v; want 0", version, cl) + } + conn.Close() + } +} + +func TestCloseNotifier(t *testing.T) { + defer afterTest(t) + gotReq := make(chan bool, 1) + sawClose := make(chan bool, 1) + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + gotReq <- true + cc := rw.(CloseNotifier).CloseNotify() + <-cc + sawClose <- true + })) + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + diec := make(chan bool) + go func() { + _, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n") + if err != nil { + t.Fatal(err) + } + <-diec + conn.Close() + }() +For: + for { + select { + case <-gotReq: + diec <- true + case <-sawClose: + break For + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } + } + ts.Close() +} + +func TestCloseNotifierChanLeak(t *testing.T) { + defer afterTest(t) + req := reqBytes("GET / HTTP/1.0\nHost: golang.org") + for i := 0; i < 20; i++ { + var output bytes.Buffer + conn := &rwTestConn{ + Reader: bytes.NewReader(req), + Writer: &output, + closec: make(chan bool, 1), + } + ln := &oneConnListener{conn: conn} + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + // Ignore the return value and never read from + // it, testing that we don't leak goroutines + // on the sending side: + _ = rw.(CloseNotifier).CloseNotify() + }) + go Serve(ln, handler) + <-conn.closec + } +} + +func TestOptions(t *testing.T) { + uric := make(chan string, 2) // only expect 1, but leave space for 2 + mux := NewServeMux() + mux.HandleFunc("/", func(w ResponseWriter, r *Request) { + uric <- r.RequestURI + }) + ts := httptest.NewServer(mux) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + // An OPTIONS * request should succeed. + _, err = conn.Write([]byte("OPTIONS * HTTP/1.1\r\nHost: foo.com\r\n\r\n")) + if err != nil { + t.Fatal(err) + } + br := bufio.NewReader(conn) + res, err := ReadResponse(br, &Request{Method: "OPTIONS"}) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 200 { + t.Errorf("Got non-200 response to OPTIONS *: %#v", res) + } + + // A GET * request on a ServeMux should fail. + _, err = conn.Write([]byte("GET * HTTP/1.1\r\nHost: foo.com\r\n\r\n")) + if err != nil { + t.Fatal(err) + } + res, err = ReadResponse(br, &Request{Method: "GET"}) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 400 { + t.Errorf("Got non-400 response to GET *: %#v", res) + } + + res, err = Get(ts.URL + "/second") + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if got := <-uric; got != "/second" { + t.Errorf("Handler saw request for %q; want /second", got) + } +} + +// Tests regarding the ordering of Write, WriteHeader, Header, and +// Flush calls. In Go 1.0, rw.WriteHeader immediately flushed the +// (*response).header to the wire. In Go 1.1, the actual wire flush is +// delayed, so we could maybe tack on a Content-Length and better +// Content-Type after we see more (or all) of the output. To preserve +// compatibility with Go 1, we need to be careful to track which +// headers were live at the time of WriteHeader, so we write the same +// ones, even if the handler modifies them (~erroneously) after the +// first Write. +func TestHeaderToWire(t *testing.T) { + tests := []struct { + name string + handler func(ResponseWriter, *Request) + check func(output string) error + }{ + { + name: "write without Header", + handler: func(rw ResponseWriter, r *Request) { + rw.Write([]byte("hello world")) + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Length:") { + return errors.New("no content-length") + } + if !strings.Contains(got, "Content-Type: text/plain") { + return errors.New("no content-length") + } + return nil + }, + }, + { + name: "Header mutation before write", + handler: func(rw ResponseWriter, r *Request) { + h := rw.Header() + h.Set("Content-Type", "some/type") + rw.Write([]byte("hello world")) + h.Set("Too-Late", "bogus") + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Length:") { + return errors.New("no content-length") + } + if !strings.Contains(got, "Content-Type: some/type") { + return errors.New("wrong content-type") + } + if strings.Contains(got, "Too-Late") { + return errors.New("don't want too-late header") + } + return nil + }, + }, + { + name: "write then useless Header mutation", + handler: func(rw ResponseWriter, r *Request) { + rw.Write([]byte("hello world")) + rw.Header().Set("Too-Late", "Write already wrote headers") + }, + check: func(got string) error { + if strings.Contains(got, "Too-Late") { + return errors.New("header appeared from after WriteHeader") + } + return nil + }, + }, + { + name: "flush then write", + handler: func(rw ResponseWriter, r *Request) { + rw.(Flusher).Flush() + rw.Write([]byte("post-flush")) + rw.Header().Set("Too-Late", "Write already wrote headers") + }, + check: func(got string) error { + if !strings.Contains(got, "Transfer-Encoding: chunked") { + return errors.New("not chunked") + } + if strings.Contains(got, "Too-Late") { + return errors.New("header appeared from after WriteHeader") + } + return nil + }, + }, + { + name: "header then flush", + handler: func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "some/type") + rw.(Flusher).Flush() + rw.Write([]byte("post-flush")) + rw.Header().Set("Too-Late", "Write already wrote headers") + }, + check: func(got string) error { + if !strings.Contains(got, "Transfer-Encoding: chunked") { + return errors.New("not chunked") + } + if strings.Contains(got, "Too-Late") { + return errors.New("header appeared from after WriteHeader") + } + if !strings.Contains(got, "Content-Type: some/type") { + return errors.New("wrong content-length") + } + return nil + }, + }, + { + name: "sniff-on-first-write content-type", + handler: func(rw ResponseWriter, r *Request) { + rw.Write([]byte("some html")) + rw.Header().Set("Content-Type", "x/wrong") + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Type: text/html") { + return errors.New("wrong content-length; want html") + } + return nil + }, + }, + { + name: "explicit content-type wins", + handler: func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "some/type") + rw.Write([]byte("some html")) + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Type: some/type") { + return errors.New("wrong content-length; want html") + } + return nil + }, + }, + { + name: "empty handler", + handler: func(rw ResponseWriter, r *Request) { + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Type: text/plain") { + return errors.New("wrong content-length; want text/plain") + } + if !strings.Contains(got, "Content-Length: 0") { + return errors.New("want 0 content-length") + } + return nil + }, + }, + { + name: "only Header, no write", + handler: func(rw ResponseWriter, r *Request) { + rw.Header().Set("Some-Header", "some-value") + }, + check: func(got string) error { + if !strings.Contains(got, "Some-Header") { + return errors.New("didn't get header") + } + return nil + }, + }, + { + name: "WriteHeader call", + handler: func(rw ResponseWriter, r *Request) { + rw.WriteHeader(404) + rw.Header().Set("Too-Late", "some-value") + }, + check: func(got string) error { + if !strings.Contains(got, "404") { + return errors.New("wrong status") + } + if strings.Contains(got, "Some-Header") { + return errors.New("shouldn't have seen Too-Late") + } + return nil + }, + }, + } + for _, tc := range tests { + ht := newHandlerTest(HandlerFunc(tc.handler)) + got := ht.rawResponse("GET / HTTP/1.1\nHost: golang.org") + if err := tc.check(got); err != nil { + t.Errorf("%s: %v\nGot response:\n%s", tc.name, err, got) + } + } +} + +// goTimeout runs f, failing t if f takes more than ns to complete. +func goTimeout(t *testing.T, d time.Duration, f func()) { + ch := make(chan bool, 2) + timer := time.AfterFunc(d, func() { + t.Errorf("Timeout expired after %v", d) + ch <- true + }) + defer timer.Stop() + go func() { + defer func() { ch <- true }() + f() + }() + <-ch +} + +type errorListener struct { + errs []error +} + +func (l *errorListener) Accept() (c net.Conn, err error) { + if len(l.errs) == 0 { + return nil, io.EOF + } + err = l.errs[0] + l.errs = l.errs[1:] + return +} + +func (l *errorListener) Close() error { + return nil +} + +func (l *errorListener) Addr() net.Addr { + return dummyAddr("test-address") +} + +func TestAcceptMaxFds(t *testing.T) { + log.SetOutput(ioutil.Discard) // is noisy otherwise + defer log.SetOutput(os.Stderr) + + ln := &errorListener{[]error{ + &net.OpError{ + Op: "accept", + Err: syscall.EMFILE, + }}} + err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {}))) + if err != io.EOF { + t.Errorf("got error %v, want EOF", err) + } +} + +func TestWriteAfterHijack(t *testing.T) { + req := reqBytes("GET / HTTP/1.1\nHost: golang.org") + var buf bytes.Buffer + wrotec := make(chan bool, 1) + conn := &rwTestConn{ + Reader: bytes.NewReader(req), + Writer: &buf, + closec: make(chan bool, 1), + } + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + conn, bufrw, err := rw.(Hijacker).Hijack() + if err != nil { + t.Error(err) + return + } + go func() { + bufrw.Write([]byte("[hijack-to-bufw]")) + bufrw.Flush() + conn.Write([]byte("[hijack-to-conn]")) + conn.Close() + wrotec <- true + }() + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + <-wrotec + if g, w := buf.String(), "[hijack-to-bufw][hijack-to-conn]"; g != w { + t.Errorf("wrote %q; want %q", g, w) + } +} + +func TestDoubleHijack(t *testing.T) { + req := reqBytes("GET / HTTP/1.1\nHost: golang.org") + var buf bytes.Buffer + conn := &rwTestConn{ + Reader: bytes.NewReader(req), + Writer: &buf, + closec: make(chan bool, 1), + } + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + conn, _, err := rw.(Hijacker).Hijack() + if err != nil { + t.Error(err) + return + } + _, _, err = rw.(Hijacker).Hijack() + if err == nil { + t.Errorf("got err = nil; want err != nil") + } + conn.Close() + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec +} + +// http://code.google.com/p/go/issues/detail?id=5955 +// Note that this does not test the "request too large" +// exit path from the http server. This is intentional; +// not sending Connection: close is just a minor wire +// optimization and is pointless if dealing with a +// badly behaved client. +func TestHTTP10ConnectionHeader(t *testing.T) { + defer afterTest(t) + + mux := NewServeMux() + mux.Handle("/", HandlerFunc(func(resp ResponseWriter, req *Request) {})) + ts := httptest.NewServer(mux) + defer ts.Close() + + // net/http uses HTTP/1.1 for requests, so write requests manually + tests := []struct { + req string // raw http request + expect []string // expected Connection header(s) + }{ + { + req: "GET / HTTP/1.0\r\n\r\n", + expect: nil, + }, + { + req: "OPTIONS * HTTP/1.0\r\n\r\n", + expect: nil, + }, + { + req: "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", + expect: []string{"keep-alive"}, + }, + } + + for _, tt := range tests { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal("dial err:", err) + } + + _, err = fmt.Fprint(conn, tt.req) + if err != nil { + t.Fatal("conn write err:", err) + } + + resp, err := ReadResponse(bufio.NewReader(conn), &Request{Method: "GET"}) + if err != nil { + t.Fatal("ReadResponse err:", err) + } + conn.Close() + resp.Body.Close() + + got := resp.Header["Connection"] + if !reflect.DeepEqual(got, tt.expect) { + t.Errorf("wrong Connection headers for request %q. Got %q expect %q", tt.req, got, tt.expect) + } + } +} + +// See golang.org/issue/5660 +func TestServerReaderFromOrder(t *testing.T) { + defer afterTest(t) + pr, pw := io.Pipe() + const size = 3 << 20 + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + rw.Header().Set("Content-Type", "text/plain") // prevent sniffing path + done := make(chan bool) + go func() { + io.Copy(rw, pr) + close(done) + }() + time.Sleep(25 * time.Millisecond) // give Copy a chance to break things + n, err := io.Copy(ioutil.Discard, req.Body) + if err != nil { + t.Errorf("handler Copy: %v", err) + return + } + if n != size { + t.Errorf("handler Copy = %d; want %d", n, size) + } + pw.Write([]byte("hi")) + pw.Close() + <-done + })) + defer ts.Close() + + req, err := NewRequest("POST", ts.URL, io.LimitReader(neverEnding('a'), size)) + if err != nil { + t.Fatal(err) + } + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if string(all) != "hi" { + t.Errorf("Body = %q; want hi", all) + } +} + +// Issue 6157, Issue 6685 +func TestCodesPreventingContentTypeAndBody(t *testing.T) { + for _, code := range []int{StatusNotModified, StatusNoContent, StatusContinue} { + ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.URL.Path == "/header" { + w.Header().Set("Content-Length", "123") + } + w.WriteHeader(code) + if r.URL.Path == "/more" { + w.Write([]byte("stuff")) + } + })) + for _, req := range []string{ + "GET / HTTP/1.0", + "GET /header HTTP/1.0", + "GET /more HTTP/1.0", + "GET / HTTP/1.1", + "GET /header HTTP/1.1", + "GET /more HTTP/1.1", + } { + got := ht.rawResponse(req) + wantStatus := fmt.Sprintf("%d %s", code, StatusText(code)) + if !strings.Contains(got, wantStatus) { + t.Errorf("Code %d: Wanted %q Modified for %q: %s", code, wantStatus, req, got) + } else if strings.Contains(got, "Content-Length") { + t.Errorf("Code %d: Got a Content-Length from %q: %s", code, req, got) + } else if strings.Contains(got, "stuff") { + t.Errorf("Code %d: Response contains a body from %q: %s", code, req, got) + } + } + } +} + +func TestContentTypeOkayOn204(t *testing.T) { + ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "123") // suppressed + w.Header().Set("Content-Type", "foo/bar") + w.WriteHeader(204) + })) + got := ht.rawResponse("GET / HTTP/1.1") + if !strings.Contains(got, "Content-Type: foo/bar") { + t.Errorf("Response = %q; want Content-Type: foo/bar", got) + } + if strings.Contains(got, "Content-Length: 123") { + t.Errorf("Response = %q; don't want a Content-Length", got) + } +} + +// Issue 6995 +// A server Handler can receive a Request, and then turn around and +// give a copy of that Request.Body out to the Transport (e.g. any +// proxy). So then two people own that Request.Body (both the server +// and the http client), and both think they can close it on failure. +// Therefore, all incoming server requests Bodies need to be thread-safe. +func TestTransportAndServerSharedBodyRace(t *testing.T) { + defer afterTest(t) + + const bodySize = 1 << 20 + + unblockBackend := make(chan bool) + backend := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + io.CopyN(rw, req.Body, bodySize/2) + <-unblockBackend + })) + defer backend.Close() + + backendRespc := make(chan *Response, 1) + proxy := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + if req.RequestURI == "/foo" { + rw.Write([]byte("bar")) + return + } + req2, _ := NewRequest("POST", backend.URL, req.Body) + req2.ContentLength = bodySize + + bresp, err := DefaultClient.Do(req2) + if err != nil { + t.Errorf("Proxy outbound request: %v", err) + return + } + _, err = io.CopyN(ioutil.Discard, bresp.Body, bodySize/4) + if err != nil { + t.Errorf("Proxy copy error: %v", err) + return + } + backendRespc <- bresp // to close later + + // Try to cause a race: Both the DefaultTransport and the proxy handler's Server + // will try to read/close req.Body (aka req2.Body) + DefaultTransport.(*Transport).CancelRequest(req2) + rw.Write([]byte("OK")) + })) + defer proxy.Close() + + req, _ := NewRequest("POST", proxy.URL, io.LimitReader(neverEnding('a'), bodySize)) + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatalf("Original request: %v", err) + } + + // Cleanup, so we don't leak goroutines. + res.Body.Close() + close(unblockBackend) + (<-backendRespc).Body.Close() +} + +// Test that a hanging Request.Body.Read from another goroutine can't +// cause the Handler goroutine's Request.Body.Close to block. +func TestRequestBodyCloseDoesntBlock(t *testing.T) { + t.Skipf("Skipping known issue; see golang.org/issue/7121") + if testing.Short() { + t.Skip("skipping in -short mode") + } + defer afterTest(t) + + readErrCh := make(chan error, 1) + errCh := make(chan error, 2) + + server := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + go func(body io.Reader) { + _, err := body.Read(make([]byte, 100)) + readErrCh <- err + }(req.Body) + time.Sleep(500 * time.Millisecond) + })) + defer server.Close() + + closeConn := make(chan bool) + defer close(closeConn) + go func() { + conn, err := net.Dial("tcp", server.Listener.Addr().String()) + if err != nil { + errCh <- err + return + } + defer conn.Close() + _, err = conn.Write([]byte("POST / HTTP/1.1\r\nConnection: close\r\nHost: foo\r\nContent-Length: 100000\r\n\r\n")) + if err != nil { + errCh <- err + return + } + // And now just block, making the server block on our + // 100000 bytes of body that will never arrive. + <-closeConn + }() + select { + case err := <-readErrCh: + if err == nil { + t.Error("Read was nil. Expected error.") + } + case err := <-errCh: + t.Error(err) + case <-time.After(5 * time.Second): + t.Error("timeout") + } +} + +func TestResponseWriterWriteStringAllocs(t *testing.T) { + ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.URL.Path == "/s" { + io.WriteString(w, "Hello world") + } else { + w.Write([]byte("Hello world")) + } + })) + before := testing.AllocsPerRun(50, func() { ht.rawResponse("GET / HTTP/1.0") }) + after := testing.AllocsPerRun(50, func() { ht.rawResponse("GET /s HTTP/1.0") }) + if int(after) >= int(before) { + t.Errorf("WriteString allocs of %v >= Write allocs of %v", after, before) + } +} + +func TestAppendTime(t *testing.T) { + var b [len(TimeFormat)]byte + t1 := time.Date(2013, 9, 21, 15, 41, 0, 0, time.FixedZone("CEST", 2*60*60)) + res := ExportAppendTime(b[:0], t1) + t2, err := ParseTime(string(res)) + if err != nil { + t.Fatalf("Error parsing time: %s", err) + } + if !t1.Equal(t2) { + t.Fatalf("Times differ; expected: %v, got %v (%s)", t1, t2, string(res)) + } +} + +func TestServerConnState(t *testing.T) { + defer afterTest(t) + handler := map[string]func(w ResponseWriter, r *Request){ + "/": func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "Hello.") + }, + "/close": func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + fmt.Fprintf(w, "Hello.") + }, + "/hijack": func(w ResponseWriter, r *Request) { + c, _, _ := w.(Hijacker).Hijack() + c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello.")) + c.Close() + }, + "/hijack-panic": func(w ResponseWriter, r *Request) { + c, _, _ := w.(Hijacker).Hijack() + c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello.")) + c.Close() + panic("intentional panic") + }, + } + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) { + handler[r.URL.Path](w, r) + })) + defer ts.Close() + + var mu sync.Mutex // guard stateLog and connID + var stateLog = map[int][]ConnState{} + var connID = map[net.Conn]int{} + + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + ts.Config.ConnState = func(c net.Conn, state ConnState) { + if c == nil { + t.Errorf("nil conn seen in state %s", state) + return + } + mu.Lock() + defer mu.Unlock() + id, ok := connID[c] + if !ok { + id = len(connID) + 1 + connID[c] = id + } + stateLog[id] = append(stateLog[id], state) + } + ts.Start() + + mustGet(t, ts.URL+"/") + mustGet(t, ts.URL+"/close") + + mustGet(t, ts.URL+"/") + mustGet(t, ts.URL+"/", "Connection", "close") + + mustGet(t, ts.URL+"/hijack") + mustGet(t, ts.URL+"/hijack-panic") + + // New->Closed + { + c, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + c.Close() + } + + // New->Active->Closed + { + c, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + if _, err := io.WriteString(c, "BOGUS REQUEST\r\n\r\n"); err != nil { + t.Fatal(err) + } + c.Close() + } + + // New->Idle->Closed + { + c, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + if _, err := io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"); err != nil { + t.Fatal(err) + } + res, err := ReadResponse(bufio.NewReader(c), nil) + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(ioutil.Discard, res.Body); err != nil { + t.Fatal(err) + } + c.Close() + } + + want := map[int][]ConnState{ + 1: []ConnState{StateNew, StateActive, StateIdle, StateActive, StateClosed}, + 2: []ConnState{StateNew, StateActive, StateIdle, StateActive, StateClosed}, + 3: []ConnState{StateNew, StateActive, StateHijacked}, + 4: []ConnState{StateNew, StateActive, StateHijacked}, + 5: []ConnState{StateNew, StateClosed}, + 6: []ConnState{StateNew, StateActive, StateClosed}, + 7: []ConnState{StateNew, StateActive, StateIdle, StateClosed}, + } + logString := func(m map[int][]ConnState) string { + var b bytes.Buffer + for id, l := range m { + fmt.Fprintf(&b, "Conn %d: ", id) + for _, s := range l { + fmt.Fprintf(&b, "%s ", s) + } + b.WriteString("\n") + } + return b.String() + } + + for i := 0; i < 5; i++ { + time.Sleep(time.Duration(i) * 50 * time.Millisecond) + mu.Lock() + match := reflect.DeepEqual(stateLog, want) + mu.Unlock() + if match { + return + } + } + + mu.Lock() + t.Errorf("Unexpected events.\nGot log: %s\n Want: %s\n", logString(stateLog), logString(want)) + mu.Unlock() +} + +func mustGet(t *testing.T, url string, headers ...string) { + req, err := NewRequest("GET", url, nil) + if err != nil { + t.Fatal(err) + } + for len(headers) > 0 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := DefaultClient.Do(req) + if err != nil { + t.Errorf("Error fetching %s: %v", url, err) + return + } + _, err = ioutil.ReadAll(res.Body) + defer res.Body.Close() + if err != nil { + t.Errorf("Error reading %s: %v", url, err) + } +} + +func TestServerKeepAlivesEnabled(t *testing.T) { + defer afterTest(t) + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + ts.Config.SetKeepAlivesEnabled(false) + ts.Start() + defer ts.Close() + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if !res.Close { + t.Errorf("Body.Close == false; want true") + } +} + +// golang.org/issue/7856 +func TestServerEmptyBodyRace(t *testing.T) { + defer afterTest(t) + var n int32 + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + atomic.AddInt32(&n, 1) + })) + defer ts.Close() + var wg sync.WaitGroup + const reqs = 20 + for i := 0; i < reqs; i++ { + wg.Add(1) + go func() { + defer wg.Done() + res, err := Get(ts.URL) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + _, err = io.Copy(ioutil.Discard, res.Body) + if err != nil { + t.Error(err) + return + } + }() + } + wg.Wait() + if got := atomic.LoadInt32(&n); got != reqs { + t.Errorf("handler ran %d times; want %d", got, reqs) + } +} + +func TestServerConnStateNew(t *testing.T) { + sawNew := false // if the test is buggy, we'll race on this variable. + srv := &Server{ + ConnState: func(c net.Conn, state ConnState) { + if state == StateNew { + sawNew = true // testing that this write isn't racy + } + }, + Handler: HandlerFunc(func(w ResponseWriter, r *Request) {}), // irrelevant + } + srv.Serve(&oneConnListener{ + conn: &rwTestConn{ + Reader: strings.NewReader("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"), + Writer: ioutil.Discard, + }, + }) + if !sawNew { // testing that this read isn't racy + t.Error("StateNew not seen") + } +} + +func BenchmarkClientServer(b *testing.B) { + b.ReportAllocs() + b.StopTimer() + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + fmt.Fprintf(rw, "Hello world.\n") + })) + defer ts.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + res, err := Get(ts.URL) + if err != nil { + b.Fatal("Get:", err) + } + all, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + b.Fatal("ReadAll:", err) + } + body := string(all) + if body != "Hello world.\n" { + b.Fatal("Got body:", body) + } + } + + b.StopTimer() +} + +func BenchmarkClientServerParallel4(b *testing.B) { + benchmarkClientServerParallel(b, 4) +} + +func BenchmarkClientServerParallel64(b *testing.B) { + benchmarkClientServerParallel(b, 64) +} + +func benchmarkClientServerParallel(b *testing.B, parallelism int) { + b.ReportAllocs() + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + fmt.Fprintf(rw, "Hello world.\n") + })) + defer ts.Close() + b.ResetTimer() + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + res, err := Get(ts.URL) + if err != nil { + b.Logf("Get: %v", err) + continue + } + all, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + b.Logf("ReadAll: %v", err) + continue + } + body := string(all) + if body != "Hello world.\n" { + panic("Got body: " + body) + } + } + }) +} + +// A benchmark for profiling the server without the HTTP client code. +// The client code runs in a subprocess. +// +// For use like: +// $ go test -c +// $ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15s -test.cpuprofile=http.prof +// $ go tool pprof http.test http.prof +// (pprof) web +func BenchmarkServer(b *testing.B) { + b.ReportAllocs() + // Child process mode; + if url := os.Getenv("TEST_BENCH_SERVER_URL"); url != "" { + n, err := strconv.Atoi(os.Getenv("TEST_BENCH_CLIENT_N")) + if err != nil { + panic(err) + } + for i := 0; i < n; i++ { + res, err := Get(url) + if err != nil { + log.Panicf("Get: %v", err) + } + all, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + log.Panicf("ReadAll: %v", err) + } + body := string(all) + if body != "Hello world.\n" { + log.Panicf("Got body: %q", body) + } + } + os.Exit(0) + return + } + + var res = []byte("Hello world.\n") + b.StopTimer() + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.Write(res) + })) + defer ts.Close() + b.StartTimer() + + cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer") + cmd.Env = append([]string{ + fmt.Sprintf("TEST_BENCH_CLIENT_N=%d", b.N), + fmt.Sprintf("TEST_BENCH_SERVER_URL=%s", ts.URL), + }, os.Environ()...) + out, err := cmd.CombinedOutput() + if err != nil { + b.Errorf("Test failure: %v, with output: %s", err, out) + } +} + +func BenchmarkServerFakeConnNoKeepAlive(b *testing.B) { + b.ReportAllocs() + req := reqBytes(`GET / HTTP/1.0 +Host: golang.org +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 +User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17 +Accept-Encoding: gzip,deflate,sdch +Accept-Language: en-US,en;q=0.8 +Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3 +`) + res := []byte("Hello world!\n") + + conn := &testConn{ + // testConn.Close will not push into the channel + // if it's full. + closec: make(chan bool, 1), + } + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.Write(res) + }) + ln := new(oneConnListener) + for i := 0; i < b.N; i++ { + conn.readBuf.Reset() + conn.writeBuf.Reset() + conn.readBuf.Write(req) + ln.conn = conn + Serve(ln, handler) + <-conn.closec + } +} + +// repeatReader reads content count times, then EOFs. +type repeatReader struct { + content []byte + count int + off int +} + +func (r *repeatReader) Read(p []byte) (n int, err error) { + if r.count <= 0 { + return 0, io.EOF + } + n = copy(p, r.content[r.off:]) + r.off += n + if r.off == len(r.content) { + r.count-- + r.off = 0 + } + return +} + +func BenchmarkServerFakeConnWithKeepAlive(b *testing.B) { + b.ReportAllocs() + + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 +User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17 +Accept-Encoding: gzip,deflate,sdch +Accept-Language: en-US,en;q=0.8 +Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3 +`) + res := []byte("Hello world!\n") + + conn := &rwTestConn{ + Reader: &repeatReader{content: req, count: b.N}, + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + handled := 0 + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + handled++ + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.Write(res) + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + if b.N != handled { + b.Errorf("b.N=%d but handled %d", b.N, handled) + } +} + +// same as above, but representing the most simple possible request +// and handler. Notably: the handler does not call rw.Header(). +func BenchmarkServerFakeConnWithKeepAliveLite(b *testing.B) { + b.ReportAllocs() + + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +`) + res := []byte("Hello world!\n") + + conn := &rwTestConn{ + Reader: &repeatReader{content: req, count: b.N}, + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + handled := 0 + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + handled++ + rw.Write(res) + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + if b.N != handled { + b.Errorf("b.N=%d but handled %d", b.N, handled) + } +} + +const someResponse = "some response" + +// A Response that's just no bigger than 2KB, the buffer-before-chunking threshold. +var response = bytes.Repeat([]byte(someResponse), 2<<10/len(someResponse)) + +// Both Content-Type and Content-Length set. Should be no buffering. +func BenchmarkServerHandlerTypeLen(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + })) +} + +// A Content-Type is set, but no length. No sniffing, but will count the Content-Length. +func BenchmarkServerHandlerNoLen(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Type", "text/html") + w.Write(response) + })) +} + +// A Content-Length is set, but the Content-Type will be sniffed. +func BenchmarkServerHandlerNoType(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + })) +} + +// Neither a Content-Type or Content-Length, so sniffed and counted. +func BenchmarkServerHandlerNoHeader(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write(response) + })) +} + +func benchmarkHandler(b *testing.B, h Handler) { + b.ReportAllocs() + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +`) + conn := &rwTestConn{ + Reader: &repeatReader{content: req, count: b.N}, + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + handled := 0 + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + handled++ + h.ServeHTTP(rw, r) + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + if b.N != handled { + b.Errorf("b.N=%d but handled %d", b.N, handled) + } +} + +func BenchmarkServerHijack(b *testing.B) { + b.ReportAllocs() + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +`) + h := HandlerFunc(func(w ResponseWriter, r *Request) { + conn, _, err := w.(Hijacker).Hijack() + if err != nil { + panic(err) + } + conn.Close() + }) + conn := &rwTestConn{ + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + ln := &oneConnListener{conn: conn} + for i := 0; i < b.N; i++ { + conn.Reader = bytes.NewReader(req) + ln.conn = conn + Serve(ln, h) + <-conn.closec + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/server.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/server.go new file mode 100644 index 000000000..eae097eb8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/server.go @@ -0,0 +1,2052 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP server. See RFC 2616. + +package http + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/url" + "os" + "path" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Errors introduced by the HTTP server. +var ( + ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") + ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") + ErrHijacked = errors.New("Conn has been hijacked") + ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") +) + +// Objects implementing the Handler interface can be +// registered to serve a particular path or subtree +// in the HTTP server. +// +// ServeHTTP should write reply headers and data to the ResponseWriter +// and then return. Returning signals that the request is finished +// and that the HTTP server can move on to the next request on +// the connection. +type Handler interface { + ServeHTTP(ResponseWriter, *Request) +} + +// A ResponseWriter interface is used by an HTTP handler to +// construct an HTTP response. +type ResponseWriter interface { + // Header returns the header map that will be sent by WriteHeader. + // Changing the header after a call to WriteHeader (or Write) has + // no effect. + Header() Header + + // Write writes the data to the connection as part of an HTTP reply. + // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) + // before writing the data. If the Header does not contain a + // Content-Type line, Write adds a Content-Type set to the result of passing + // the initial 512 bytes of written data to DetectContentType. + Write([]byte) (int, error) + + // WriteHeader sends an HTTP response header with status code. + // If WriteHeader is not called explicitly, the first call to Write + // will trigger an implicit WriteHeader(http.StatusOK). + // Thus explicit calls to WriteHeader are mainly used to + // send error codes. + WriteHeader(int) +} + +// The Flusher interface is implemented by ResponseWriters that allow +// an HTTP handler to flush buffered data to the client. +// +// Note that even for ResponseWriters that support Flush, +// if the client is connected through an HTTP proxy, +// the buffered data may not reach the client until the response +// completes. +type Flusher interface { + // Flush sends any buffered data to the client. + Flush() +} + +// The Hijacker interface is implemented by ResponseWriters that allow +// an HTTP handler to take over the connection. +type Hijacker interface { + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the HTTP server library + // will not do anything else with the connection. + // It becomes the caller's responsibility to manage + // and close the connection. + Hijack() (net.Conn, *bufio.ReadWriter, error) +} + +// The CloseNotifier interface is implemented by ResponseWriters which +// allow detecting when the underlying connection has gone away. +// +// This mechanism can be used to cancel long operations on the server +// if the client has disconnected before the response is ready. +type CloseNotifier interface { + // CloseNotify returns a channel that receives a single value + // when the client connection has gone away. + CloseNotify() <-chan bool +} + +// A conn represents the server side of an HTTP connection. +type conn struct { + remoteAddr string // network address of remote side + server *Server // the Server on which the connection arrived + rwc net.Conn // i/o connection + sr liveSwitchReader // where the LimitReader reads from; usually the rwc + lr *io.LimitedReader // io.LimitReader(sr) + buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc + tlsState *tls.ConnectionState // or nil when not using TLS + + mu sync.Mutex // guards the following + clientGone bool // if client has disconnected mid-request + closeNotifyc chan bool // made lazily + hijackedv bool // connection has been hijacked by handler +} + +func (c *conn) hijacked() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.hijackedv +} + +func (c *conn) hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.hijackedv { + return nil, nil, ErrHijacked + } + if c.closeNotifyc != nil { + return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier") + } + c.hijackedv = true + rwc = c.rwc + buf = c.buf + c.rwc = nil + c.buf = nil + c.setState(rwc, StateHijacked) + return +} + +func (c *conn) closeNotify() <-chan bool { + c.mu.Lock() + defer c.mu.Unlock() + if c.closeNotifyc == nil { + c.closeNotifyc = make(chan bool, 1) + if c.hijackedv { + // to obey the function signature, even though + // it'll never receive a value. + return c.closeNotifyc + } + pr, pw := io.Pipe() + + readSource := c.sr.r + c.sr.Lock() + c.sr.r = pr + c.sr.Unlock() + go func() { + _, err := io.Copy(pw, readSource) + if err == nil { + err = io.EOF + } + pw.CloseWithError(err) + c.noteClientGone() + }() + } + return c.closeNotifyc +} + +func (c *conn) noteClientGone() { + c.mu.Lock() + defer c.mu.Unlock() + if c.closeNotifyc != nil && !c.clientGone { + c.closeNotifyc <- true + } + c.clientGone = true +} + +// A switchReader can have its Reader changed at runtime. +// It's not safe for concurrent Reads and switches. +type switchReader struct { + io.Reader +} + +// A switchWriter can have its Writer changed at runtime. +// It's not safe for concurrent Writes and switches. +type switchWriter struct { + io.Writer +} + +// A liveSwitchReader is a switchReader that's safe for concurrent +// reads and switches, if its mutex is held. +type liveSwitchReader struct { + sync.Mutex + r io.Reader +} + +func (sr *liveSwitchReader) Read(p []byte) (n int, err error) { + sr.Lock() + r := sr.r + sr.Unlock() + return r.Read(p) +} + +// This should be >= 512 bytes for DetectContentType, +// but otherwise it's somewhat arbitrary. +const bufferBeforeChunkingSize = 2048 + +// chunkWriter writes to a response's conn buffer, and is the writer +// wrapped by the response.bufw buffered writer. +// +// chunkWriter also is responsible for finalizing the Header, including +// conditionally setting the Content-Type and setting a Content-Length +// in cases where the handler's final output is smaller than the buffer +// size. It also conditionally adds chunk headers, when in chunking mode. +// +// See the comment above (*response).Write for the entire write flow. +type chunkWriter struct { + res *response + + // header is either nil or a deep clone of res.handlerHeader + // at the time of res.WriteHeader, if res.WriteHeader is + // called and extra buffering is being done to calculate + // Content-Type and/or Content-Length. + header Header + + // wroteHeader tells whether the header's been written to "the + // wire" (or rather: w.conn.buf). this is unlike + // (*response).wroteHeader, which tells only whether it was + // logically written. + wroteHeader bool + + // set by the writeHeader method: + chunking bool // using chunked transfer encoding for reply body +} + +var ( + crlf = []byte("\r\n") + colonSpace = []byte(": ") +) + +func (cw *chunkWriter) Write(p []byte) (n int, err error) { + if !cw.wroteHeader { + cw.writeHeader(p) + } + if cw.res.req.Method == "HEAD" { + // Eat writes. + return len(p), nil + } + if cw.chunking { + _, err = fmt.Fprintf(cw.res.conn.buf, "%x\r\n", len(p)) + if err != nil { + cw.res.conn.rwc.Close() + return + } + } + n, err = cw.res.conn.buf.Write(p) + if cw.chunking && err == nil { + _, err = cw.res.conn.buf.Write(crlf) + } + if err != nil { + cw.res.conn.rwc.Close() + } + return +} + +func (cw *chunkWriter) flush() { + if !cw.wroteHeader { + cw.writeHeader(nil) + } + cw.res.conn.buf.Flush() +} + +func (cw *chunkWriter) close() { + if !cw.wroteHeader { + cw.writeHeader(nil) + } + if cw.chunking { + // zero EOF chunk, trailer key/value pairs (currently + // unsupported in Go's server), followed by a blank + // line. + cw.res.conn.buf.WriteString("0\r\n\r\n") + } +} + +// A response represents the server side of an HTTP response. +type response struct { + conn *conn + req *Request // request for this response + wroteHeader bool // reply header has been (logically) written + wroteContinue bool // 100 Continue response was written + + w *bufio.Writer // buffers output in chunks to chunkWriter + cw chunkWriter + sw *switchWriter // of the bufio.Writer, for return to putBufioWriter + + // handlerHeader is the Header that Handlers get access to, + // which may be retained and mutated even after WriteHeader. + // handlerHeader is copied into cw.header at WriteHeader + // time, and privately mutated thereafter. + handlerHeader Header + calledHeader bool // handler accessed handlerHeader via Header + + written int64 // number of bytes written in body + contentLength int64 // explicitly-declared Content-Length; or -1 + status int // status code passed to WriteHeader + + // close connection after this reply. set on request and + // updated after response from handler if there's a + // "Connection: keep-alive" response header and a + // Content-Length. + closeAfterReply bool + + // requestBodyLimitHit is set by requestTooLarge when + // maxBytesReader hits its max size. It is checked in + // WriteHeader, to make sure we don't consume the + // remaining request body to try to advance to the next HTTP + // request. Instead, when this is set, we stop reading + // subsequent requests on this connection and stop reading + // input from it. + requestBodyLimitHit bool + + handlerDone bool // set true when the handler exits + + // Buffers for Date and Content-Length + dateBuf [len(TimeFormat)]byte + clenBuf [10]byte +} + +// requestTooLarge is called by maxBytesReader when too much input has +// been read from the client. +func (w *response) requestTooLarge() { + w.closeAfterReply = true + w.requestBodyLimitHit = true + if !w.wroteHeader { + w.Header().Set("Connection", "close") + } +} + +// needsSniff reports whether a Content-Type still needs to be sniffed. +func (w *response) needsSniff() bool { + _, haveType := w.handlerHeader["Content-Type"] + return !w.cw.wroteHeader && !haveType && w.written < sniffLen +} + +// writerOnly hides an io.Writer value's optional ReadFrom method +// from io.Copy. +type writerOnly struct { + io.Writer +} + +func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { + switch v := src.(type) { + case *os.File: + fi, err := v.Stat() + if err != nil { + return false, err + } + return fi.Mode().IsRegular(), nil + case *io.LimitedReader: + return srcIsRegularFile(v.R) + default: + return + } +} + +// ReadFrom is here to optimize copying from an *os.File regular file +// to a *net.TCPConn with sendfile. +func (w *response) ReadFrom(src io.Reader) (n int64, err error) { + // Our underlying w.conn.rwc is usually a *TCPConn (with its + // own ReadFrom method). If not, or if our src isn't a regular + // file, just fall back to the normal copy method. + rf, ok := w.conn.rwc.(io.ReaderFrom) + regFile, err := srcIsRegularFile(src) + if err != nil { + return 0, err + } + if !ok || !regFile { + return io.Copy(writerOnly{w}, src) + } + + // sendfile path: + + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + + if w.needsSniff() { + n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) + n += n0 + if err != nil { + return n, err + } + } + + w.w.Flush() // get rid of any previous writes + w.cw.flush() // make sure Header is written; flush data to rwc + + // Now that cw has been flushed, its chunking field is guaranteed initialized. + if !w.cw.chunking && w.bodyAllowed() { + n0, err := rf.ReadFrom(src) + n += n0 + w.written += n0 + return n, err + } + + n0, err := io.Copy(writerOnly{w}, src) + n += n0 + return n, err +} + +// noLimit is an effective infinite upper bound for io.LimitedReader +const noLimit int64 = (1 << 63) - 1 + +// debugServerConnections controls whether all server connections are wrapped +// with a verbose logging wrapper. +const debugServerConnections = false + +// Create new connection from rwc. +func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { + c = new(conn) + c.remoteAddr = rwc.RemoteAddr().String() + c.server = srv + c.rwc = rwc + if debugServerConnections { + c.rwc = newLoggingConn("server", c.rwc) + } + c.sr = liveSwitchReader{r: c.rwc} + c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader) + br := newBufioReader(c.lr) + bw := newBufioWriterSize(c.rwc, 4<<10) + c.buf = bufio.NewReadWriter(br, bw) + return c, nil +} + +var ( + bufioReaderPool sync.Pool + bufioWriter2kPool sync.Pool + bufioWriter4kPool sync.Pool +) + +func bufioWriterPool(size int) *sync.Pool { + switch size { + case 2 << 10: + return &bufioWriter2kPool + case 4 << 10: + return &bufioWriter4kPool + } + return nil +} + +func newBufioReader(r io.Reader) *bufio.Reader { + if v := bufioReaderPool.Get(); v != nil { + br := v.(*bufio.Reader) + br.Reset(r) + return br + } + return bufio.NewReader(r) +} + +func putBufioReader(br *bufio.Reader) { + br.Reset(nil) + bufioReaderPool.Put(br) +} + +func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { + pool := bufioWriterPool(size) + if pool != nil { + if v := pool.Get(); v != nil { + bw := v.(*bufio.Writer) + bw.Reset(w) + return bw + } + } + return bufio.NewWriterSize(w, size) +} + +func putBufioWriter(bw *bufio.Writer) { + bw.Reset(nil) + if pool := bufioWriterPool(bw.Available()); pool != nil { + pool.Put(bw) + } +} + +// DefaultMaxHeaderBytes is the maximum permitted size of the headers +// in an HTTP request. +// This can be overridden by setting Server.MaxHeaderBytes. +const DefaultMaxHeaderBytes = 1 << 20 // 1 MB + +func (srv *Server) maxHeaderBytes() int { + if srv.MaxHeaderBytes > 0 { + return srv.MaxHeaderBytes + } + return DefaultMaxHeaderBytes +} + +func (srv *Server) initialLimitedReaderSize() int64 { + return int64(srv.maxHeaderBytes()) + 4096 // bufio slop +} + +// wrapper around io.ReaderCloser which on first read, sends an +// HTTP/1.1 100 Continue header +type expectContinueReader struct { + resp *response + readCloser io.ReadCloser + closed bool +} + +func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { + if ecr.closed { + return 0, ErrBodyReadAfterClose + } + if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { + ecr.resp.wroteContinue = true + ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n") + ecr.resp.conn.buf.Flush() + } + return ecr.readCloser.Read(p) +} + +func (ecr *expectContinueReader) Close() error { + ecr.closed = true + return ecr.readCloser.Close() +} + +// TimeFormat is the time format to use with +// time.Parse and time.Time.Format when parsing +// or generating times in HTTP headers. +// It is like time.RFC1123 but hard codes GMT as the time zone. +const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + +// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) +func appendTime(b []byte, t time.Time) []byte { + const days = "SunMonTueWedThuFriSat" + const months = "JanFebMarAprMayJunJulAugSepOctNovDec" + + t = t.UTC() + yy, mm, dd := t.Date() + hh, mn, ss := t.Clock() + day := days[3*t.Weekday():] + mon := months[3*(mm-1):] + + return append(b, + day[0], day[1], day[2], ',', ' ', + byte('0'+dd/10), byte('0'+dd%10), ' ', + mon[0], mon[1], mon[2], ' ', + byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', + byte('0'+hh/10), byte('0'+hh%10), ':', + byte('0'+mn/10), byte('0'+mn%10), ':', + byte('0'+ss/10), byte('0'+ss%10), ' ', + 'G', 'M', 'T') +} + +var errTooLarge = errors.New("http: request too large") + +// Read next request from connection. +func (c *conn) readRequest() (w *response, err error) { + if c.hijacked() { + return nil, ErrHijacked + } + + if d := c.server.ReadTimeout; d != 0 { + c.rwc.SetReadDeadline(time.Now().Add(d)) + } + if d := c.server.WriteTimeout; d != 0 { + defer func() { + c.rwc.SetWriteDeadline(time.Now().Add(d)) + }() + } + + c.lr.N = c.server.initialLimitedReaderSize() + var req *Request + if req, err = ReadRequest(c.buf.Reader); err != nil { + if c.lr.N == 0 { + return nil, errTooLarge + } + return nil, err + } + c.lr.N = noLimit + + req.RemoteAddr = c.remoteAddr + req.TLS = c.tlsState + + w = &response{ + conn: c, + req: req, + handlerHeader: make(Header), + contentLength: -1, + } + w.cw.res = w + w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) + return w, nil +} + +func (w *response) Header() Header { + if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { + // Accessing the header between logically writing it + // and physically writing it means we need to allocate + // a clone to snapshot the logically written state. + w.cw.header = w.handlerHeader.clone() + } + w.calledHeader = true + return w.handlerHeader +} + +// maxPostHandlerReadBytes is the max number of Request.Body bytes not +// consumed by a handler that the server will read from the client +// in order to keep a connection alive. If there are more bytes than +// this then the server to be paranoid instead sends a "Connection: +// close" response. +// +// This number is approximately what a typical machine's TCP buffer +// size is anyway. (if we have the bytes on the machine, we might as +// well read them) +const maxPostHandlerReadBytes = 256 << 10 + +func (w *response) WriteHeader(code int) { + if w.conn.hijacked() { + w.conn.server.logf("http: response.WriteHeader on hijacked connection") + return + } + if w.wroteHeader { + w.conn.server.logf("http: multiple response.WriteHeader calls") + return + } + w.wroteHeader = true + w.status = code + + if w.calledHeader && w.cw.header == nil { + w.cw.header = w.handlerHeader.clone() + } + + if cl := w.handlerHeader.get("Content-Length"); cl != "" { + v, err := strconv.ParseInt(cl, 10, 64) + if err == nil && v >= 0 { + w.contentLength = v + } else { + w.conn.server.logf("http: invalid Content-Length of %q", cl) + w.handlerHeader.Del("Content-Length") + } + } +} + +// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. +// This type is used to avoid extra allocations from cloning and/or populating +// the response Header map and all its 1-element slices. +type extraHeader struct { + contentType string + connection string + transferEncoding string + date []byte // written if not nil + contentLength []byte // written if not nil +} + +// Sorted the same as extraHeader.Write's loop. +var extraHeaderKeys = [][]byte{ + []byte("Content-Type"), + []byte("Connection"), + []byte("Transfer-Encoding"), +} + +var ( + headerContentLength = []byte("Content-Length: ") + headerDate = []byte("Date: ") +) + +// Write writes the headers described in h to w. +// +// This method has a value receiver, despite the somewhat large size +// of h, because it prevents an allocation. The escape analysis isn't +// smart enough to realize this function doesn't mutate h. +func (h extraHeader) Write(w *bufio.Writer) { + if h.date != nil { + w.Write(headerDate) + w.Write(h.date) + w.Write(crlf) + } + if h.contentLength != nil { + w.Write(headerContentLength) + w.Write(h.contentLength) + w.Write(crlf) + } + for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { + if v != "" { + w.Write(extraHeaderKeys[i]) + w.Write(colonSpace) + w.WriteString(v) + w.Write(crlf) + } + } +} + +// writeHeader finalizes the header sent to the client and writes it +// to cw.res.conn.buf. +// +// p is not written by writeHeader, but is the first chunk of the body +// that will be written. It is sniffed for a Content-Type if none is +// set explicitly. It's also used to set the Content-Length, if the +// total body size was small and the handler has already finished +// running. +func (cw *chunkWriter) writeHeader(p []byte) { + if cw.wroteHeader { + return + } + cw.wroteHeader = true + + w := cw.res + keepAlivesEnabled := w.conn.server.doKeepAlives() + isHEAD := w.req.Method == "HEAD" + + // header is written out to w.conn.buf below. Depending on the + // state of the handler, we either own the map or not. If we + // don't own it, the exclude map is created lazily for + // WriteSubset to remove headers. The setHeader struct holds + // headers we need to add. + header := cw.header + owned := header != nil + if !owned { + header = w.handlerHeader + } + var excludeHeader map[string]bool + delHeader := func(key string) { + if owned { + header.Del(key) + return + } + if _, ok := header[key]; !ok { + return + } + if excludeHeader == nil { + excludeHeader = make(map[string]bool) + } + excludeHeader[key] = true + } + var setHeader extraHeader + + // If the handler is done but never sent a Content-Length + // response header and this is our first (and last) write, set + // it, even to zero. This helps HTTP/1.0 clients keep their + // "keep-alive" connections alive. + // Exceptions: 304/204/1xx responses never get Content-Length, and if + // it was a HEAD request, we don't know the difference between + // 0 actual bytes and 0 bytes because the handler noticed it + // was a HEAD request and chose not to write anything. So for + // HEAD, the handler should either write the Content-Length or + // write non-zero bytes. If it's actually 0 bytes and the + // handler never looked at the Request.Method, we just don't + // send a Content-Length header. + if w.handlerDone && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { + w.contentLength = int64(len(p)) + setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) + } + + // If this was an HTTP/1.0 request with keep-alive and we sent a + // Content-Length back, we can make this a keep-alive response ... + if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { + sentLength := header.get("Content-Length") != "" + if sentLength && header.get("Connection") == "keep-alive" { + w.closeAfterReply = false + } + } + + // Check for a explicit (and valid) Content-Length header. + hasCL := w.contentLength != -1 + + if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { + _, connectionHeaderSet := header["Connection"] + if !connectionHeaderSet { + setHeader.connection = "keep-alive" + } + } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { + w.closeAfterReply = true + } + + if header.get("Connection") == "close" || !keepAlivesEnabled { + w.closeAfterReply = true + } + + // Per RFC 2616, we should consume the request body before + // replying, if the handler hasn't already done so. But we + // don't want to do an unbounded amount of reading here for + // DoS reasons, so we only try up to a threshold. + if w.req.ContentLength != 0 && !w.closeAfterReply { + ecr, isExpecter := w.req.Body.(*expectContinueReader) + if !isExpecter || ecr.resp.wroteContinue { + n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1) + if n >= maxPostHandlerReadBytes { + w.requestTooLarge() + delHeader("Connection") + setHeader.connection = "close" + } else { + w.req.Body.Close() + } + } + } + + code := w.status + if bodyAllowedForStatus(code) { + // If no content type, apply sniffing algorithm to body. + _, haveType := header["Content-Type"] + if !haveType { + setHeader.contentType = DetectContentType(p) + } + } else { + for _, k := range suppressedHeaders(code) { + delHeader(k) + } + } + + if _, ok := header["Date"]; !ok { + setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) + } + + te := header.get("Transfer-Encoding") + hasTE := te != "" + if hasCL && hasTE && te != "identity" { + // TODO: return an error if WriteHeader gets a return parameter + // For now just ignore the Content-Length. + w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", + te, w.contentLength) + delHeader("Content-Length") + hasCL = false + } + + if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { + // do nothing + } else if code == StatusNoContent { + delHeader("Transfer-Encoding") + } else if hasCL { + delHeader("Transfer-Encoding") + } else if w.req.ProtoAtLeast(1, 1) { + // HTTP/1.1 or greater: use chunked transfer encoding + // to avoid closing the connection at EOF. + // TODO: this blows away any custom or stacked Transfer-Encoding they + // might have set. Deal with that as need arises once we have a valid + // use case. + cw.chunking = true + setHeader.transferEncoding = "chunked" + } else { + // HTTP version < 1.1: cannot do chunked transfer + // encoding and we don't know the Content-Length so + // signal EOF by closing connection. + w.closeAfterReply = true + delHeader("Transfer-Encoding") // in case already set + } + + // Cannot use Content-Length with non-identity Transfer-Encoding. + if cw.chunking { + delHeader("Content-Length") + } + if !w.req.ProtoAtLeast(1, 0) { + return + } + + if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { + delHeader("Connection") + if w.req.ProtoAtLeast(1, 1) { + setHeader.connection = "close" + } + } + + w.conn.buf.WriteString(statusLine(w.req, code)) + cw.header.WriteSubset(w.conn.buf, excludeHeader) + setHeader.Write(w.conn.buf.Writer) + w.conn.buf.Write(crlf) +} + +// statusLines is a cache of Status-Line strings, keyed by code (for +// HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a +// map keyed by struct of two fields. This map's max size is bounded +// by 2*len(statusText), two protocol types for each known official +// status code in the statusText map. +var ( + statusMu sync.RWMutex + statusLines = make(map[int]string) +) + +// statusLine returns a response Status-Line (RFC 2616 Section 6.1) +// for the given request and response status code. +func statusLine(req *Request, code int) string { + // Fast path: + key := code + proto11 := req.ProtoAtLeast(1, 1) + if !proto11 { + key = -key + } + statusMu.RLock() + line, ok := statusLines[key] + statusMu.RUnlock() + if ok { + return line + } + + // Slow path: + proto := "HTTP/1.0" + if proto11 { + proto = "HTTP/1.1" + } + codestring := strconv.Itoa(code) + text, ok := statusText[code] + if !ok { + text = "status code " + codestring + } + line = proto + " " + codestring + " " + text + "\r\n" + if ok { + statusMu.Lock() + defer statusMu.Unlock() + statusLines[key] = line + } + return line +} + +// bodyAllowed returns true if a Write is allowed for this response type. +// It's illegal to call this before the header has been flushed. +func (w *response) bodyAllowed() bool { + if !w.wroteHeader { + panic("") + } + return bodyAllowedForStatus(w.status) +} + +// The Life Of A Write is like this: +// +// Handler starts. No header has been sent. The handler can either +// write a header, or just start writing. Writing before sending a header +// sends an implicitly empty 200 OK header. +// +// If the handler didn't declare a Content-Length up front, we either +// go into chunking mode or, if the handler finishes running before +// the chunking buffer size, we compute a Content-Length and send that +// in the header instead. +// +// Likewise, if the handler didn't set a Content-Type, we sniff that +// from the initial chunk of output. +// +// The Writers are wired together like: +// +// 1. *response (the ResponseWriter) -> +// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes +// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) +// and which writes the chunk headers, if needed. +// 4. conn.buf, a bufio.Writer of default (4kB) bytes +// 5. the rwc, the net.Conn. +// +// TODO(bradfitz): short-circuit some of the buffering when the +// initial header contains both a Content-Type and Content-Length. +// Also short-circuit in (1) when the header's been sent and not in +// chunking mode, writing directly to (4) instead, if (2) has no +// buffered data. More generally, we could short-circuit from (1) to +// (3) even in chunking mode if the write size from (1) is over some +// threshold and nothing is in (2). The answer might be mostly making +// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal +// with this instead. +func (w *response) Write(data []byte) (n int, err error) { + return w.write(len(data), data, "") +} + +func (w *response) WriteString(data string) (n int, err error) { + return w.write(len(data), nil, data) +} + +// either dataB or dataS is non-zero. +func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { + if w.conn.hijacked() { + w.conn.server.logf("http: response.Write on hijacked connection") + return 0, ErrHijacked + } + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + if lenData == 0 { + return 0, nil + } + if !w.bodyAllowed() { + return 0, ErrBodyNotAllowed + } + + w.written += int64(lenData) // ignoring errors, for errorKludge + if w.contentLength != -1 && w.written > w.contentLength { + return 0, ErrContentLength + } + if dataB != nil { + return w.w.Write(dataB) + } else { + return w.w.WriteString(dataS) + } +} + +func (w *response) finishRequest() { + w.handlerDone = true + + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + + w.w.Flush() + putBufioWriter(w.w) + w.cw.close() + w.conn.buf.Flush() + + // Close the body (regardless of w.closeAfterReply) so we can + // re-use its bufio.Reader later safely. + w.req.Body.Close() + + if w.req.MultipartForm != nil { + w.req.MultipartForm.RemoveAll() + } + + if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { + // Did not write enough. Avoid getting out of sync. + w.closeAfterReply = true + } +} + +func (w *response) Flush() { + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + w.w.Flush() + w.cw.flush() +} + +func (c *conn) finalFlush() { + if c.buf != nil { + c.buf.Flush() + + // Steal the bufio.Reader (~4KB worth of memory) and its associated + // reader for a future connection. + putBufioReader(c.buf.Reader) + + // Steal the bufio.Writer (~4KB worth of memory) and its associated + // writer for a future connection. + putBufioWriter(c.buf.Writer) + + c.buf = nil + } +} + +// Close the connection. +func (c *conn) close() { + c.finalFlush() + if c.rwc != nil { + c.rwc.Close() + c.rwc = nil + } +} + +// rstAvoidanceDelay is the amount of time we sleep after closing the +// write side of a TCP connection before closing the entire socket. +// By sleeping, we increase the chances that the client sees our FIN +// and processes its final data before they process the subsequent RST +// from closing a connection with known unread data. +// This RST seems to occur mostly on BSD systems. (And Windows?) +// This timeout is somewhat arbitrary (~latency around the planet). +const rstAvoidanceDelay = 500 * time.Millisecond + +// closeWrite flushes any outstanding data and sends a FIN packet (if +// client is connected via TCP), signalling that we're done. We then +// pause for a bit, hoping the client processes it before `any +// subsequent RST. +// +// See http://golang.org/issue/3595 +func (c *conn) closeWriteAndWait() { + c.finalFlush() + if tcp, ok := c.rwc.(*net.TCPConn); ok { + tcp.CloseWrite() + } + time.Sleep(rstAvoidanceDelay) +} + +// validNPN reports whether the proto is not a blacklisted Next +// Protocol Negotiation protocol. Empty and built-in protocol types +// are blacklisted and can't be overridden with alternate +// implementations. +func validNPN(proto string) bool { + switch proto { + case "", "http/1.1", "http/1.0": + return false + } + return true +} + +func (c *conn) setState(nc net.Conn, state ConnState) { + if hook := c.server.ConnState; hook != nil { + hook(nc, state) + } +} + +// Serve a new connection. +func (c *conn) serve() { + origConn := c.rwc // copy it before it's set nil on Close or Hijack + defer func() { + if err := recover(); err != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) + } + if !c.hijacked() { + c.close() + c.setState(origConn, StateClosed) + } + }() + + if tlsConn, ok := c.rwc.(*tls.Conn); ok { + if d := c.server.ReadTimeout; d != 0 { + c.rwc.SetReadDeadline(time.Now().Add(d)) + } + if d := c.server.WriteTimeout; d != 0 { + c.rwc.SetWriteDeadline(time.Now().Add(d)) + } + if err := tlsConn.Handshake(); err != nil { + c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) + return + } + c.tlsState = new(tls.ConnectionState) + *c.tlsState = tlsConn.ConnectionState() + if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { + if fn := c.server.TLSNextProto[proto]; fn != nil { + h := initNPNRequest{tlsConn, serverHandler{c.server}} + fn(c.server, tlsConn, h) + } + return + } + } + + for { + w, err := c.readRequest() + if c.lr.N != c.server.initialLimitedReaderSize() { + // If we read any bytes off the wire, we're active. + c.setState(c.rwc, StateActive) + } + if err != nil { + if err == errTooLarge { + // Their HTTP client may or may not be + // able to read this if we're + // responding to them and hanging up + // while they're still writing their + // request. Undefined behavior. + io.WriteString(c.rwc, "HTTP/1.1 413 Request Entity Too Large\r\n\r\n") + c.closeWriteAndWait() + break + } else if err == io.EOF { + break // Don't reply + } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { + break // Don't reply + } + io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\n\r\n") + break + } + + // Expect 100 Continue support + req := w.req + if req.expectsContinue() { + if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { + // Wrap the Body reader with one that replies on the connection + req.Body = &expectContinueReader{readCloser: req.Body, resp: w} + } + req.Header.Del("Expect") + } else if req.Header.get("Expect") != "" { + w.sendExpectationFailed() + break + } + + // HTTP cannot have multiple simultaneous active requests.[*] + // Until the server replies to this request, it can't read another, + // so we might as well run the handler in this goroutine. + // [*] Not strictly true: HTTP pipelining. We could let them all process + // in parallel even if their responses need to be serialized. + serverHandler{c.server}.ServeHTTP(w, w.req) + if c.hijacked() { + return + } + w.finishRequest() + if w.closeAfterReply { + if w.requestBodyLimitHit { + c.closeWriteAndWait() + } + break + } + c.setState(c.rwc, StateIdle) + } +} + +func (w *response) sendExpectationFailed() { + // TODO(bradfitz): let ServeHTTP handlers handle + // requests with non-standard expectation[s]? Seems + // theoretical at best, and doesn't fit into the + // current ServeHTTP model anyway. We'd need to + // make the ResponseWriter an optional + // "ExpectReplier" interface or something. + // + // For now we'll just obey RFC 2616 14.20 which says + // "If a server receives a request containing an + // Expect field that includes an expectation- + // extension that it does not support, it MUST + // respond with a 417 (Expectation Failed) status." + w.Header().Set("Connection", "close") + w.WriteHeader(StatusExpectationFailed) + w.finishRequest() +} + +// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter +// and a Hijacker. +func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { + if w.wroteHeader { + w.cw.flush() + } + // Release the bufioWriter that writes to the chunk writer, it is not + // used after a connection has been hijacked. + rwc, buf, err = w.conn.hijack() + if err == nil { + putBufioWriter(w.w) + w.w = nil + } + return rwc, buf, err +} + +func (w *response) CloseNotify() <-chan bool { + return w.conn.closeNotify() +} + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as HTTP handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Request) + +// ServeHTTP calls f(w, r). +func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { + f(w, r) +} + +// Helper handlers + +// Error replies to the request with the specified error message and HTTP code. +// The error message should be plain text. +func Error(w ResponseWriter, error string, code int) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(code) + fmt.Fprintln(w, error) +} + +// NotFound replies to the request with an HTTP 404 not found error. +func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } + +// NotFoundHandler returns a simple request handler +// that replies to each request with a ``404 page not found'' reply. +func NotFoundHandler() Handler { return HandlerFunc(NotFound) } + +// StripPrefix returns a handler that serves HTTP requests +// by removing the given prefix from the request URL's Path +// and invoking the handler h. StripPrefix handles a +// request for a path that doesn't begin with prefix by +// replying with an HTTP 404 not found error. +func StripPrefix(prefix string, h Handler) Handler { + if prefix == "" { + return h + } + return HandlerFunc(func(w ResponseWriter, r *Request) { + if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { + r.URL.Path = p + h.ServeHTTP(w, r) + } else { + NotFound(w, r) + } + }) +} + +// Redirect replies to the request with a redirect to url, +// which may be a path relative to the request path. +func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { + if u, err := url.Parse(urlStr); err == nil { + // If url was relative, make absolute by + // combining with request path. + // The browser would probably do this for us, + // but doing it ourselves is more reliable. + + // NOTE(rsc): RFC 2616 says that the Location + // line must be an absolute URI, like + // "http://www.google.com/redirect/", + // not a path like "/redirect/". + // Unfortunately, we don't know what to + // put in the host name section to get the + // client to connect to us again, so we can't + // know the right absolute URI to send back. + // Because of this problem, no one pays attention + // to the RFC; they all send back just a new path. + // So do we. + oldpath := r.URL.Path + if oldpath == "" { // should not happen, but avoid a crash if it does + oldpath = "/" + } + if u.Scheme == "" { + // no leading http://server + if urlStr == "" || urlStr[0] != '/' { + // make relative path absolute + olddir, _ := path.Split(oldpath) + urlStr = olddir + urlStr + } + + var query string + if i := strings.Index(urlStr, "?"); i != -1 { + urlStr, query = urlStr[:i], urlStr[i:] + } + + // clean up but preserve trailing slash + trailing := strings.HasSuffix(urlStr, "/") + urlStr = path.Clean(urlStr) + if trailing && !strings.HasSuffix(urlStr, "/") { + urlStr += "/" + } + urlStr += query + } + } + + w.Header().Set("Location", urlStr) + w.WriteHeader(code) + + // RFC2616 recommends that a short note "SHOULD" be included in the + // response because older user agents may not understand 301/307. + // Shouldn't send the response for POST or HEAD; that leaves GET. + if r.Method == "GET" { + note := "" + statusText[code] + ".\n" + fmt.Fprintln(w, note) + } +} + +var htmlReplacer = strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + // """ is shorter than """. + `"`, """, + // "'" is shorter than "'" and apos was not in HTML until HTML5. + "'", "'", +) + +func htmlEscape(s string) string { + return htmlReplacer.Replace(s) +} + +// Redirect to a fixed URL +type redirectHandler struct { + url string + code int +} + +func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { + Redirect(w, r, rh.url, rh.code) +} + +// RedirectHandler returns a request handler that redirects +// each request it receives to the given url using the given +// status code. +func RedirectHandler(url string, code int) Handler { + return &redirectHandler{url, code} +} + +// ServeMux is an HTTP request multiplexer. +// It matches the URL of each incoming request against a list of registered +// patterns and calls the handler for the pattern that +// most closely matches the URL. +// +// Patterns name fixed, rooted paths, like "/favicon.ico", +// or rooted subtrees, like "/images/" (note the trailing slash). +// Longer patterns take precedence over shorter ones, so that +// if there are handlers registered for both "/images/" +// and "/images/thumbnails/", the latter handler will be +// called for paths beginning "/images/thumbnails/" and the +// former will receive requests for any other paths in the +// "/images/" subtree. +// +// Note that since a pattern ending in a slash names a rooted subtree, +// the pattern "/" matches all paths not matched by other registered +// patterns, not just the URL with Path == "/". +// +// Patterns may optionally begin with a host name, restricting matches to +// URLs on that host only. Host-specific patterns take precedence over +// general patterns, so that a handler might register for the two patterns +// "/codesearch" and "codesearch.google.com/" without also taking over +// requests for "http://www.google.com/". +// +// ServeMux also takes care of sanitizing the URL request path, +// redirecting any request containing . or .. elements to an +// equivalent .- and ..-free URL. +type ServeMux struct { + mu sync.RWMutex + m map[string]muxEntry + hosts bool // whether any patterns contain hostnames +} + +type muxEntry struct { + explicit bool + h Handler + pattern string +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// Does path match pattern? +func pathMatch(pattern, path string) bool { + if len(pattern) == 0 { + // should not happen + return false + } + n := len(pattern) + if pattern[n-1] != '/' { + return pattern == path + } + return len(path) >= n && path[0:n] == pattern +} + +// Return the canonical path for p, eliminating . and .. elements. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// Find a handler on a handler map given a path string +// Most-specific (longest) pattern wins +func (mux *ServeMux) match(path string) (h Handler, pattern string) { + var n = 0 + for k, v := range mux.m { + if !pathMatch(k, path) { + continue + } + if h == nil || len(k) > n { + n = len(k) + h = v.h + pattern = v.pattern + } + } + return +} + +// Handler returns the handler to use for the given request, +// consulting r.Method, r.Host, and r.URL.Path. It always returns +// a non-nil handler. If the path is not in its canonical form, the +// handler will be an internally-generated handler that redirects +// to the canonical path. +// +// Handler also returns the registered pattern that matches the +// request or, in the case of internally-generated redirects, +// the pattern that will match after following the redirect. +// +// If there is no registered handler that applies to the request, +// Handler returns a ``page not found'' handler and an empty pattern. +func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { + if r.Method != "CONNECT" { + if p := cleanPath(r.URL.Path); p != r.URL.Path { + _, pattern = mux.handler(r.Host, p) + url := *r.URL + url.Path = p + return RedirectHandler(url.String(), StatusMovedPermanently), pattern + } + } + + return mux.handler(r.Host, r.URL.Path) +} + +// handler is the main implementation of Handler. +// The path is known to be in canonical form, except for CONNECT methods. +func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { + mux.mu.RLock() + defer mux.mu.RUnlock() + + // Host-specific pattern takes precedence over generic ones + if mux.hosts { + h, pattern = mux.match(host + path) + } + if h == nil { + h, pattern = mux.match(path) + } + if h == nil { + h, pattern = NotFoundHandler(), "" + } + return +} + +// ServeHTTP dispatches the request to the handler whose +// pattern most closely matches the request URL. +func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { + if r.RequestURI == "*" { + if r.ProtoAtLeast(1, 1) { + w.Header().Set("Connection", "close") + } + w.WriteHeader(StatusBadRequest) + return + } + h, _ := mux.Handler(r) + h.ServeHTTP(w, r) +} + +// Handle registers the handler for the given pattern. +// If a handler already exists for pattern, Handle panics. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + mux.mu.Lock() + defer mux.mu.Unlock() + + if pattern == "" { + panic("http: invalid pattern " + pattern) + } + if handler == nil { + panic("http: nil handler") + } + if mux.m[pattern].explicit { + panic("http: multiple registrations for " + pattern) + } + + mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} + + if pattern[0] != '/' { + mux.hosts = true + } + + // Helpful behavior: + // If pattern is /tree/, insert an implicit permanent redirect for /tree. + // It can be overridden by an explicit registration. + n := len(pattern) + if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { + // If pattern contains a host name, strip it and use remaining + // path for redirect. + path := pattern + if pattern[0] != '/' { + // In pattern, at least the last character is a '/', so + // strings.Index can't be -1. + path = pattern[strings.Index(pattern, "/"):] + } + mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(path, StatusMovedPermanently), pattern: pattern} + } +} + +// HandleFunc registers the handler function for the given pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// Handle registers the handler for the given pattern +// in the DefaultServeMux. +// The documentation for ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleFunc registers the handler function for the given pattern +// in the DefaultServeMux. +// The documentation for ServeMux explains how patterns are matched. +func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Serve accepts incoming HTTP connections on the listener l, +// creating a new service goroutine for each. The service goroutines +// read requests and then call handler to reply to them. +// Handler is typically nil, in which case the DefaultServeMux is used. +func Serve(l net.Listener, handler Handler) error { + srv := &Server{Handler: handler} + return srv.Serve(l) +} + +// A Server defines parameters for running an HTTP server. +// The zero value for Server is a valid configuration. +type Server struct { + Addr string // TCP address to listen on, ":http" if empty + Handler Handler // handler to invoke, http.DefaultServeMux if nil + ReadTimeout time.Duration // maximum duration before timing out read of the request + WriteTimeout time.Duration // maximum duration before timing out write of the response + MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 + TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS + + // TLSNextProto optionally specifies a function to take over + // ownership of the provided TLS connection when an NPN + // protocol upgrade has occurred. The map key is the protocol + // name negotiated. The Handler argument should be used to + // handle HTTP requests and will initialize the Request's TLS + // and RemoteAddr if not already set. The connection is + // automatically closed when the function returns. + TLSNextProto map[string]func(*Server, *tls.Conn, Handler) + + // ConnState specifies an optional callback function that is + // called when a client connection changes state. See the + // ConnState type and associated constants for details. + ConnState func(net.Conn, ConnState) + + // ErrorLog specifies an optional logger for errors accepting + // connections and unexpected behavior from handlers. + // If nil, logging goes to os.Stderr via the log package's + // standard logger. + ErrorLog *log.Logger + + disableKeepAlives int32 // accessed atomically. +} + +// A ConnState represents the state of a client connection to a server. +// It's used by the optional Server.ConnState hook. +type ConnState int + +const ( + // StateNew represents a new connection that is expected to + // send a request immediately. Connections begin at this + // state and then transition to either StateActive or + // StateClosed. + StateNew ConnState = iota + + // StateActive represents a connection that has read 1 or more + // bytes of a request. The Server.ConnState hook for + // StateActive fires before the request has entered a handler + // and doesn't fire again until the request has been + // handled. After the request is handled, the state + // transitions to StateClosed, StateHijacked, or StateIdle. + StateActive + + // StateIdle represents a connection that has finished + // handling a request and is in the keep-alive state, waiting + // for a new request. Connections transition from StateIdle + // to either StateActive or StateClosed. + StateIdle + + // StateHijacked represents a hijacked connection. + // This is a terminal state. It does not transition to StateClosed. + StateHijacked + + // StateClosed represents a closed connection. + // This is a terminal state. Hijacked connections do not + // transition to StateClosed. + StateClosed +) + +var stateName = map[ConnState]string{ + StateNew: "new", + StateActive: "active", + StateIdle: "idle", + StateHijacked: "hijacked", + StateClosed: "closed", +} + +func (c ConnState) String() string { + return stateName[c] +} + +// serverHandler delegates to either the server's Handler or +// DefaultServeMux and also handles "OPTIONS *" requests. +type serverHandler struct { + srv *Server +} + +func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { + handler := sh.srv.Handler + if handler == nil { + handler = DefaultServeMux + } + if req.RequestURI == "*" && req.Method == "OPTIONS" { + handler = globalOptionsHandler{} + } + handler.ServeHTTP(rw, req) +} + +// ListenAndServe listens on the TCP network address srv.Addr and then +// calls Serve to handle requests on incoming connections. If +// srv.Addr is blank, ":http" is used. +func (srv *Server) ListenAndServe() error { + addr := srv.Addr + if addr == "" { + addr = ":http" + } + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) +} + +// Serve accepts incoming connections on the Listener l, creating a +// new service goroutine for each. The service goroutines read requests and +// then call srv.Handler to reply to them. +func (srv *Server) Serve(l net.Listener) error { + defer l.Close() + var tempDelay time.Duration // how long to sleep on accept failure + for { + rw, e := l.Accept() + if e != nil { + if ne, ok := e.(net.Error); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) + time.Sleep(tempDelay) + continue + } + return e + } + tempDelay = 0 + c, err := srv.newConn(rw) + if err != nil { + continue + } + c.setState(c.rwc, StateNew) // before Serve can return + go c.serve() + } +} + +func (s *Server) doKeepAlives() bool { + return atomic.LoadInt32(&s.disableKeepAlives) == 0 +} + +// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. +// By default, keep-alives are always enabled. Only very +// resource-constrained environments or servers in the process of +// shutting down should disable them. +func (s *Server) SetKeepAlivesEnabled(v bool) { + if v { + atomic.StoreInt32(&s.disableKeepAlives, 0) + } else { + atomic.StoreInt32(&s.disableKeepAlives, 1) + } +} + +func (s *Server) logf(format string, args ...interface{}) { + if s.ErrorLog != nil { + s.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// ListenAndServe listens on the TCP network address addr +// and then calls Serve with handler to handle requests +// on incoming connections. Handler is typically nil, +// in which case the DefaultServeMux is used. +// +// A trivial example server is: +// +// package main +// +// import ( +// "io" +// "net/http" +// "log" +// ) +// +// // hello world, the web server +// func HelloServer(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "hello, world!\n") +// } +// +// func main() { +// http.HandleFunc("/hello", HelloServer) +// err := http.ListenAndServe(":12345", nil) +// if err != nil { +// log.Fatal("ListenAndServe: ", err) +// } +// } +func ListenAndServe(addr string, handler Handler) error { + server := &Server{Addr: addr, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts identically to ListenAndServe, except that it +// expects HTTPS connections. Additionally, files containing a certificate and +// matching private key for the server must be provided. If the certificate +// is signed by a certificate authority, the certFile should be the concatenation +// of the server's certificate followed by the CA's certificate. +// +// A trivial example server is: +// +// import ( +// "log" +// "net/http" +// ) +// +// func handler(w http.ResponseWriter, req *http.Request) { +// w.Header().Set("Content-Type", "text/plain") +// w.Write([]byte("This is an example server.\n")) +// } +// +// func main() { +// http.HandleFunc("/", handler) +// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") +// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) +// if err != nil { +// log.Fatal(err) +// } +// } +// +// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. +func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error { + server := &Server{Addr: addr, Handler: handler} + return server.ListenAndServeTLS(certFile, keyFile) +} + +// ListenAndServeTLS listens on the TCP network address srv.Addr and +// then calls Serve to handle requests on incoming TLS connections. +// +// Filenames containing a certificate and matching private key for +// the server must be provided. If the certificate is signed by a +// certificate authority, the certFile should be the concatenation +// of the server's certificate followed by the CA's certificate. +// +// If srv.Addr is blank, ":https" is used. +func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { + addr := srv.Addr + if addr == "" { + addr = ":https" + } + config := &tls.Config{} + if srv.TLSConfig != nil { + *config = *srv.TLSConfig + } + if config.NextProtos == nil { + config.NextProtos = []string{"http/1.1"} + } + + var err error + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + + tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) + return srv.Serve(tlsListener) +} + +// TimeoutHandler returns a Handler that runs h with the given time limit. +// +// The new Handler calls h.ServeHTTP to handle each request, but if a +// call runs for longer than its time limit, the handler responds with +// a 503 Service Unavailable error and the given message in its body. +// (If msg is empty, a suitable default message will be sent.) +// After such a timeout, writes by h to its ResponseWriter will return +// ErrHandlerTimeout. +func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { + f := func() <-chan time.Time { + return time.After(dt) + } + return &timeoutHandler{h, f, msg} +} + +// ErrHandlerTimeout is returned on ResponseWriter Write calls +// in handlers which have timed out. +var ErrHandlerTimeout = errors.New("http: Handler timeout") + +type timeoutHandler struct { + handler Handler + timeout func() <-chan time.Time // returns channel producing a timeout + body string +} + +func (h *timeoutHandler) errorBody() string { + if h.body != "" { + return h.body + } + return "Timeout

Timeout

" +} + +func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { + done := make(chan bool, 1) + tw := &timeoutWriter{w: w} + go func() { + h.handler.ServeHTTP(tw, r) + done <- true + }() + select { + case <-done: + return + case <-h.timeout(): + tw.mu.Lock() + defer tw.mu.Unlock() + if !tw.wroteHeader { + tw.w.WriteHeader(StatusServiceUnavailable) + tw.w.Write([]byte(h.errorBody())) + } + tw.timedOut = true + } +} + +type timeoutWriter struct { + w ResponseWriter + + mu sync.Mutex + timedOut bool + wroteHeader bool +} + +func (tw *timeoutWriter) Header() Header { + return tw.w.Header() +} + +func (tw *timeoutWriter) Write(p []byte) (int, error) { + tw.mu.Lock() + timedOut := tw.timedOut + tw.mu.Unlock() + if timedOut { + return 0, ErrHandlerTimeout + } + return tw.w.Write(p) +} + +func (tw *timeoutWriter) WriteHeader(code int) { + tw.mu.Lock() + if tw.timedOut || tw.wroteHeader { + tw.mu.Unlock() + return + } + tw.wroteHeader = true + tw.mu.Unlock() + tw.w.WriteHeader(code) +} + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// globalOptionsHandler responds to "OPTIONS *" requests. +type globalOptionsHandler struct{} + +func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "0") + if r.ContentLength != 0 { + // Read up to 4KB of OPTIONS body (as mentioned in the + // spec as being reserved for future use), but anything + // over that is considered a waste of server resources + // (or an attack) and we abort and close the connection, + // courtesy of MaxBytesReader's EOF behavior. + mb := MaxBytesReader(w, r.Body, 4<<10) + io.Copy(ioutil.Discard, mb) + } +} + +type eofReaderWithWriteTo struct{} + +func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } +func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } + +// eofReader is a non-nil io.ReadCloser that always returns EOF. +// It has a WriteTo method so io.Copy won't need a buffer. +var eofReader = &struct { + eofReaderWithWriteTo + io.Closer +}{ + eofReaderWithWriteTo{}, + ioutil.NopCloser(nil), +} + +// Verify that an io.Copy from an eofReader won't require a buffer. +var _ io.WriterTo = eofReader + +// initNPNRequest is an HTTP handler that initializes certain +// uninitialized fields in its *Request. Such partially-initialized +// Requests come from NPN protocol handlers. +type initNPNRequest struct { + c *tls.Conn + h serverHandler +} + +func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { + if req.TLS == nil { + req.TLS = &tls.ConnectionState{} + *req.TLS = h.c.ConnectionState() + } + if req.Body == nil { + req.Body = eofReader + } + if req.RemoteAddr == "" { + req.RemoteAddr = h.c.RemoteAddr().String() + } + h.h.ServeHTTP(rw, req) +} + +// loggingConn is used for debugging. +type loggingConn struct { + name string + net.Conn +} + +var ( + uniqNameMu sync.Mutex + uniqNameNext = make(map[string]int) +) + +func newLoggingConn(baseName string, c net.Conn) net.Conn { + uniqNameMu.Lock() + defer uniqNameMu.Unlock() + uniqNameNext[baseName]++ + return &loggingConn{ + name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), + Conn: c, + } +} + +func (c *loggingConn) Write(p []byte) (n int, err error) { + log.Printf("%s.Write(%d) = ....", c.name, len(p)) + n, err = c.Conn.Write(p) + log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) + return +} + +func (c *loggingConn) Read(p []byte) (n int, err error) { + log.Printf("%s.Read(%d) = ....", c.name, len(p)) + n, err = c.Conn.Read(p) + log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) + return +} + +func (c *loggingConn) Close() (err error) { + log.Printf("%s.Close() = ...", c.name) + err = c.Conn.Close() + log.Printf("%s.Close() = %v", c.name, err) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff.go new file mode 100644 index 000000000..68f519b05 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff.go @@ -0,0 +1,214 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "encoding/binary" +) + +// The algorithm uses at most sniffLen bytes to make its decision. +const sniffLen = 512 + +// DetectContentType implements the algorithm described +// at http://mimesniff.spec.whatwg.org/ to determine the +// Content-Type of the given data. It considers at most the +// first 512 bytes of data. DetectContentType always returns +// a valid MIME type: if it cannot determine a more specific one, it +// returns "application/octet-stream". +func DetectContentType(data []byte) string { + if len(data) > sniffLen { + data = data[:sniffLen] + } + + // Index of the first non-whitespace byte in data. + firstNonWS := 0 + for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ { + } + + for _, sig := range sniffSignatures { + if ct := sig.match(data, firstNonWS); ct != "" { + return ct + } + } + + return "application/octet-stream" // fallback +} + +func isWS(b byte) bool { + return bytes.IndexByte([]byte("\t\n\x0C\r "), b) != -1 +} + +type sniffSig interface { + // match returns the MIME type of the data, or "" if unknown. + match(data []byte, firstNonWS int) string +} + +// Data matching the table in section 6. +var sniffSignatures = []sniffSig{ + htmlSig("' { + return "" + } + return "text/html; charset=utf-8" +} + +type mp4Sig int + +func (mp4Sig) match(data []byte, firstNonWS int) string { + // c.f. section 6.1. + if len(data) < 8 { + return "" + } + boxSize := int(binary.BigEndian.Uint32(data[:4])) + if boxSize%4 != 0 || len(data) < boxSize { + return "" + } + if !bytes.Equal(data[4:8], []byte("ftyp")) { + return "" + } + for st := 8; st < boxSize; st += 4 { + if st == 12 { + // minor version number + continue + } + seg := string(data[st : st+3]) + switch seg { + case "mp4", "iso", "M4V", "M4P", "M4B": + return "video/mp4" + /* The remainder are not in the spec. + case "M4A": + return "audio/mp4" + case "3gp": + return "video/3gpp" + case "jp2": + return "image/jp2" // JPEG 2000 + */ + } + } + return "" +} + +type textSig int + +func (textSig) match(data []byte, firstNonWS int) string { + // c.f. section 5, step 4. + for _, b := range data[firstNonWS:] { + switch { + case 0x00 <= b && b <= 0x08, + b == 0x0B, + 0x0E <= b && b <= 0x1A, + 0x1C <= b && b <= 0x1F: + return "" + } + } + return "text/plain; charset=utf-8" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff_test.go new file mode 100644 index 000000000..24ca27afc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/sniff_test.go @@ -0,0 +1,171 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + . "net/http" + "net/http/httptest" + "reflect" + "strconv" + "strings" + "testing" +) + +var sniffTests = []struct { + desc string + data []byte + contentType string +}{ + // Some nonsense. + {"Empty", []byte{}, "text/plain; charset=utf-8"}, + {"Binary", []byte{1, 2, 3}, "application/octet-stream"}, + + {"HTML document #1", []byte(`blah blah blah`), "text/html; charset=utf-8"}, + {"HTML document #2", []byte(``), "text/html; charset=utf-8"}, + {"HTML document #3 (leading whitespace)", []byte(` ...`), "text/html; charset=utf-8"}, + {"HTML document #4 (leading CRLF)", []byte("\r\n..."), "text/html; charset=utf-8"}, + + {"Plain text", []byte(`This is not HTML. It has ☃ though.`), "text/plain; charset=utf-8"}, + + {"XML", []byte("\nhi") + })) + defer ts.Close() + + resp, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + + got := resp.Header["Content-Type"] + want := []string{""} + if !reflect.DeepEqual(got, want) { + t.Errorf("Content-Type = %q; want %q", got, want) + } + resp.Body.Close() +} + +func TestContentTypeWithCopy(t *testing.T) { + defer afterTest(t) + + const ( + input = "\n\n\t\n" + expected = "text/html; charset=utf-8" + ) + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + // Use io.Copy from a bytes.Buffer to trigger ReadFrom. + buf := bytes.NewBuffer([]byte(input)) + n, err := io.Copy(w, buf) + if int(n) != len(input) || err != nil { + t.Errorf("io.Copy(w, %q) = %v, %v want %d, nil", input, n, err, len(input)) + } + })) + defer ts.Close() + + resp, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get: %v", err) + } + if ct := resp.Header.Get("Content-Type"); ct != expected { + t.Errorf("Content-Type = %q, want %q", ct, expected) + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("reading body: %v", err) + } else if !bytes.Equal(data, []byte(input)) { + t.Errorf("data is %q, want %q", data, input) + } + resp.Body.Close() +} + +func TestSniffWriteSize(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + size, _ := strconv.Atoi(r.FormValue("size")) + written, err := io.WriteString(w, strings.Repeat("a", size)) + if err != nil { + t.Errorf("write of %d bytes: %v", size, err) + return + } + if written != size { + t.Errorf("write of %d bytes wrote %d bytes", size, written) + } + })) + defer ts.Close() + for _, size := range []int{0, 1, 200, 600, 999, 1000, 1023, 1024, 512 << 10, 1 << 20} { + res, err := Get(fmt.Sprintf("%s/?size=%d", ts.URL, size)) + if err != nil { + t.Fatalf("size %d: %v", size, err) + } + if _, err := io.Copy(ioutil.Discard, res.Body); err != nil { + t.Fatalf("size %d: io.Copy of body = %v", size, err) + } + if err := res.Body.Close(); err != nil { + t.Fatalf("size %d: body Close = %v", size, err) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/status.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/status.go new file mode 100644 index 000000000..d253bd5cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/status.go @@ -0,0 +1,120 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +// HTTP status codes, defined in RFC 2616. +const ( + StatusContinue = 100 + StatusSwitchingProtocols = 101 + + StatusOK = 200 + StatusCreated = 201 + StatusAccepted = 202 + StatusNonAuthoritativeInfo = 203 + StatusNoContent = 204 + StatusResetContent = 205 + StatusPartialContent = 206 + + StatusMultipleChoices = 300 + StatusMovedPermanently = 301 + StatusFound = 302 + StatusSeeOther = 303 + StatusNotModified = 304 + StatusUseProxy = 305 + StatusTemporaryRedirect = 307 + + StatusBadRequest = 400 + StatusUnauthorized = 401 + StatusPaymentRequired = 402 + StatusForbidden = 403 + StatusNotFound = 404 + StatusMethodNotAllowed = 405 + StatusNotAcceptable = 406 + StatusProxyAuthRequired = 407 + StatusRequestTimeout = 408 + StatusConflict = 409 + StatusGone = 410 + StatusLengthRequired = 411 + StatusPreconditionFailed = 412 + StatusRequestEntityTooLarge = 413 + StatusRequestURITooLong = 414 + StatusUnsupportedMediaType = 415 + StatusRequestedRangeNotSatisfiable = 416 + StatusExpectationFailed = 417 + StatusTeapot = 418 + + StatusInternalServerError = 500 + StatusNotImplemented = 501 + StatusBadGateway = 502 + StatusServiceUnavailable = 503 + StatusGatewayTimeout = 504 + StatusHTTPVersionNotSupported = 505 + + // New HTTP status codes from RFC 6585. Not exported yet in Go 1.1. + // See discussion at https://codereview.appspot.com/7678043/ + statusPreconditionRequired = 428 + statusTooManyRequests = 429 + statusRequestHeaderFieldsTooLarge = 431 + statusNetworkAuthenticationRequired = 511 +) + +var statusText = map[int]string{ + StatusContinue: "Continue", + StatusSwitchingProtocols: "Switching Protocols", + + StatusOK: "OK", + StatusCreated: "Created", + StatusAccepted: "Accepted", + StatusNonAuthoritativeInfo: "Non-Authoritative Information", + StatusNoContent: "No Content", + StatusResetContent: "Reset Content", + StatusPartialContent: "Partial Content", + + StatusMultipleChoices: "Multiple Choices", + StatusMovedPermanently: "Moved Permanently", + StatusFound: "Found", + StatusSeeOther: "See Other", + StatusNotModified: "Not Modified", + StatusUseProxy: "Use Proxy", + StatusTemporaryRedirect: "Temporary Redirect", + + StatusBadRequest: "Bad Request", + StatusUnauthorized: "Unauthorized", + StatusPaymentRequired: "Payment Required", + StatusForbidden: "Forbidden", + StatusNotFound: "Not Found", + StatusMethodNotAllowed: "Method Not Allowed", + StatusNotAcceptable: "Not Acceptable", + StatusProxyAuthRequired: "Proxy Authentication Required", + StatusRequestTimeout: "Request Timeout", + StatusConflict: "Conflict", + StatusGone: "Gone", + StatusLengthRequired: "Length Required", + StatusPreconditionFailed: "Precondition Failed", + StatusRequestEntityTooLarge: "Request Entity Too Large", + StatusRequestURITooLong: "Request URI Too Long", + StatusUnsupportedMediaType: "Unsupported Media Type", + StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", + StatusExpectationFailed: "Expectation Failed", + StatusTeapot: "I'm a teapot", + + StatusInternalServerError: "Internal Server Error", + StatusNotImplemented: "Not Implemented", + StatusBadGateway: "Bad Gateway", + StatusServiceUnavailable: "Service Unavailable", + StatusGatewayTimeout: "Gateway Timeout", + StatusHTTPVersionNotSupported: "HTTP Version Not Supported", + + statusPreconditionRequired: "Precondition Required", + statusTooManyRequests: "Too Many Requests", + statusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", + statusNetworkAuthenticationRequired: "Network Authentication Required", +} + +// StatusText returns a text for the HTTP status code. It returns the empty +// string if the code is unknown. +func StatusText(code int) string { + return statusText[code] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/file b/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/file new file mode 100644 index 000000000..11f11f9be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/file @@ -0,0 +1 @@ +0123456789 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html b/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html new file mode 100644 index 000000000..da8e1e93d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html @@ -0,0 +1 @@ +index.html says hello diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css b/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css new file mode 100644 index 000000000..208d16d42 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css @@ -0,0 +1 @@ +body {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer.go new file mode 100644 index 000000000..7f6368652 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer.go @@ -0,0 +1,730 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/textproto" + "sort" + "strconv" + "strings" + "sync" +) + +type errorReader struct { + err error +} + +func (r *errorReader) Read(p []byte) (n int, err error) { + return 0, r.err +} + +// transferWriter inspects the fields of a user-supplied Request or Response, +// sanitizes them without changing the user object and provides methods for +// writing the respective header, body and trailer in wire format. +type transferWriter struct { + Method string + Body io.Reader + BodyCloser io.Closer + ResponseToHEAD bool + ContentLength int64 // -1 means unknown, 0 means exactly none + Close bool + TransferEncoding []string + Trailer Header +} + +func newTransferWriter(r interface{}) (t *transferWriter, err error) { + t = &transferWriter{} + + // Extract relevant fields + atLeastHTTP11 := false + switch rr := r.(type) { + case *Request: + if rr.ContentLength != 0 && rr.Body == nil { + return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength) + } + t.Method = rr.Method + t.Body = rr.Body + t.BodyCloser = rr.Body + t.ContentLength = rr.ContentLength + t.Close = rr.Close + t.TransferEncoding = rr.TransferEncoding + t.Trailer = rr.Trailer + atLeastHTTP11 = rr.ProtoAtLeast(1, 1) + if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 { + if t.ContentLength == 0 { + // Test to see if it's actually zero or just unset. + var buf [1]byte + n, rerr := io.ReadFull(t.Body, buf[:]) + if rerr != nil && rerr != io.EOF { + t.ContentLength = -1 + t.Body = &errorReader{rerr} + } else if n == 1 { + // Oh, guess there is data in this Body Reader after all. + // The ContentLength field just wasn't set. + // Stich the Body back together again, re-attaching our + // consumed byte. + t.ContentLength = -1 + t.Body = io.MultiReader(bytes.NewReader(buf[:]), t.Body) + } else { + // Body is actually empty. + t.Body = nil + t.BodyCloser = nil + } + } + if t.ContentLength < 0 { + t.TransferEncoding = []string{"chunked"} + } + } + case *Response: + if rr.Request != nil { + t.Method = rr.Request.Method + } + t.Body = rr.Body + t.BodyCloser = rr.Body + t.ContentLength = rr.ContentLength + t.Close = rr.Close + t.TransferEncoding = rr.TransferEncoding + t.Trailer = rr.Trailer + atLeastHTTP11 = rr.ProtoAtLeast(1, 1) + t.ResponseToHEAD = noBodyExpected(t.Method) + } + + // Sanitize Body,ContentLength,TransferEncoding + if t.ResponseToHEAD { + t.Body = nil + if chunked(t.TransferEncoding) { + t.ContentLength = -1 + } + } else { + if !atLeastHTTP11 || t.Body == nil { + t.TransferEncoding = nil + } + if chunked(t.TransferEncoding) { + t.ContentLength = -1 + } else if t.Body == nil { // no chunking, no body + t.ContentLength = 0 + } + } + + // Sanitize Trailer + if !chunked(t.TransferEncoding) { + t.Trailer = nil + } + + return t, nil +} + +func noBodyExpected(requestMethod string) bool { + return requestMethod == "HEAD" +} + +func (t *transferWriter) shouldSendContentLength() bool { + if chunked(t.TransferEncoding) { + return false + } + if t.ContentLength > 0 { + return true + } + // Many servers expect a Content-Length for these methods + if t.Method == "POST" || t.Method == "PUT" { + return true + } + if t.ContentLength == 0 && isIdentity(t.TransferEncoding) { + return true + } + + return false +} + +func (t *transferWriter) WriteHeader(w io.Writer) error { + if t.Close { + if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil { + return err + } + } + + // Write Content-Length and/or Transfer-Encoding whose values are a + // function of the sanitized field triple (Body, ContentLength, + // TransferEncoding) + if t.shouldSendContentLength() { + if _, err := io.WriteString(w, "Content-Length: "); err != nil { + return err + } + if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil { + return err + } + } else if chunked(t.TransferEncoding) { + if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil { + return err + } + } + + // Write Trailer header + if t.Trailer != nil { + keys := make([]string, 0, len(t.Trailer)) + for k := range t.Trailer { + k = CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + // TODO: could do better allocation-wise here, but trailers are rare, + // so being lazy for now. + if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil { + return err + } + } + } + + return nil +} + +func (t *transferWriter) WriteBody(w io.Writer) error { + var err error + var ncopy int64 + + // Write body + if t.Body != nil { + if chunked(t.TransferEncoding) { + cw := newChunkedWriter(w) + _, err = io.Copy(cw, t.Body) + if err == nil { + err = cw.Close() + } + } else if t.ContentLength == -1 { + ncopy, err = io.Copy(w, t.Body) + } else { + ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength)) + if err != nil { + return err + } + var nextra int64 + nextra, err = io.Copy(ioutil.Discard, t.Body) + ncopy += nextra + } + if err != nil { + return err + } + if err = t.BodyCloser.Close(); err != nil { + return err + } + } + + if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy { + return fmt.Errorf("http: Request.ContentLength=%d with Body length %d", + t.ContentLength, ncopy) + } + + // TODO(petar): Place trailer writer code here. + if chunked(t.TransferEncoding) { + // Write Trailer header + if t.Trailer != nil { + if err := t.Trailer.Write(w); err != nil { + return err + } + } + // Last chunk, empty trailer + _, err = io.WriteString(w, "\r\n") + } + return err +} + +type transferReader struct { + // Input + Header Header + StatusCode int + RequestMethod string + ProtoMajor int + ProtoMinor int + // Output + Body io.ReadCloser + ContentLength int64 + TransferEncoding []string + Close bool + Trailer Header +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +var ( + suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"} + suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"} +) + +func suppressedHeaders(status int) []string { + switch { + case status == 304: + // RFC 2616 section 10.3.5: "the response MUST NOT include other entity-headers" + return suppressedHeaders304 + case !bodyAllowedForStatus(status): + return suppressedHeadersNoBody + } + return nil +} + +// msg is *Request or *Response. +func readTransfer(msg interface{}, r *bufio.Reader) (err error) { + t := &transferReader{RequestMethod: "GET"} + + // Unify input + isResponse := false + switch rr := msg.(type) { + case *Response: + t.Header = rr.Header + t.StatusCode = rr.StatusCode + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header) + isResponse = true + if rr.Request != nil { + t.RequestMethod = rr.Request.Method + } + case *Request: + t.Header = rr.Header + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + // Transfer semantics for Requests are exactly like those for + // Responses with status code 200, responding to a GET method + t.StatusCode = 200 + default: + panic("unexpected type") + } + + // Default to HTTP/1.1 + if t.ProtoMajor == 0 && t.ProtoMinor == 0 { + t.ProtoMajor, t.ProtoMinor = 1, 1 + } + + // Transfer encoding, content length + t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header) + if err != nil { + return err + } + + realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding) + if err != nil { + return err + } + if isResponse && t.RequestMethod == "HEAD" { + if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil { + return err + } else { + t.ContentLength = n + } + } else { + t.ContentLength = realLength + } + + // Trailer + t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding) + if err != nil { + return err + } + + // If there is no Content-Length or chunked Transfer-Encoding on a *Response + // and the status is not 1xx, 204 or 304, then the body is unbounded. + // See RFC2616, section 4.4. + switch msg.(type) { + case *Response: + if realLength == -1 && + !chunked(t.TransferEncoding) && + bodyAllowedForStatus(t.StatusCode) { + // Unbounded body. + t.Close = true + } + } + + // Prepare body reader. ContentLength < 0 means chunked encoding + // or close connection when finished, since multipart is not supported yet + switch { + case chunked(t.TransferEncoding): + if noBodyExpected(t.RequestMethod) { + t.Body = eofReader + } else { + t.Body = &body{src: newChunkedReader(r), hdr: msg, r: r, closing: t.Close} + } + case realLength == 0: + t.Body = eofReader + case realLength > 0: + t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} + default: + // realLength < 0, i.e. "Content-Length" not mentioned in header + if t.Close { + // Close semantics (i.e. HTTP/1.0) + t.Body = &body{src: r, closing: t.Close} + } else { + // Persistent connection (i.e. HTTP/1.1) + t.Body = eofReader + } + } + + // Unify output + switch rr := msg.(type) { + case *Request: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + rr.TransferEncoding = t.TransferEncoding + rr.Close = t.Close + rr.Trailer = t.Trailer + case *Response: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + rr.TransferEncoding = t.TransferEncoding + rr.Close = t.Close + rr.Trailer = t.Trailer + } + + return nil +} + +// Checks whether chunked is part of the encodings stack +func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } + +// Checks whether the encoding is explicitly "identity". +func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" } + +// Sanitize transfer encoding +func fixTransferEncoding(requestMethod string, header Header) ([]string, error) { + raw, present := header["Transfer-Encoding"] + if !present { + return nil, nil + } + + delete(header, "Transfer-Encoding") + + encodings := strings.Split(raw[0], ",") + te := make([]string, 0, len(encodings)) + // TODO: Even though we only support "identity" and "chunked" + // encodings, the loop below is designed with foresight. One + // invariant that must be maintained is that, if present, + // chunked encoding must always come first. + for _, encoding := range encodings { + encoding = strings.ToLower(strings.TrimSpace(encoding)) + // "identity" encoding is not recorded + if encoding == "identity" { + break + } + if encoding != "chunked" { + return nil, &badStringError{"unsupported transfer encoding", encoding} + } + te = te[0 : len(te)+1] + te[len(te)-1] = encoding + } + if len(te) > 1 { + return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")} + } + if len(te) > 0 { + // Chunked encoding trumps Content-Length. See RFC 2616 + // Section 4.4. Currently len(te) > 0 implies chunked + // encoding. + delete(header, "Content-Length") + return te, nil + } + + return nil, nil +} + +// Determine the expected body length, using RFC 2616 Section 4.4. This +// function is not a method, because ultimately it should be shared by +// ReadResponse and ReadRequest. +func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) { + + // Logic based on response type or status + if noBodyExpected(requestMethod) { + return 0, nil + } + if status/100 == 1 { + return 0, nil + } + switch status { + case 204, 304: + return 0, nil + } + + // Logic based on Transfer-Encoding + if chunked(te) { + return -1, nil + } + + // Logic based on Content-Length + cl := strings.TrimSpace(header.get("Content-Length")) + if cl != "" { + n, err := parseContentLength(cl) + if err != nil { + return -1, err + } + return n, nil + } else { + header.Del("Content-Length") + } + + if !isResponse && requestMethod == "GET" { + // RFC 2616 doesn't explicitly permit nor forbid an + // entity-body on a GET request so we permit one if + // declared, but we default to 0 here (not -1 below) + // if there's no mention of a body. + return 0, nil + } + + // Body-EOF logic based on other methods (like closing, or chunked coding) + return -1, nil +} + +// Determine whether to hang up after sending a request and body, or +// receiving a response and body +// 'header' is the request headers +func shouldClose(major, minor int, header Header) bool { + if major < 1 { + return true + } else if major == 1 && minor == 0 { + if !strings.Contains(strings.ToLower(header.get("Connection")), "keep-alive") { + return true + } + return false + } else { + // TODO: Should split on commas, toss surrounding white space, + // and check each field. + if strings.ToLower(header.get("Connection")) == "close" { + header.Del("Connection") + return true + } + } + return false +} + +// Parse the trailer header +func fixTrailer(header Header, te []string) (Header, error) { + raw := header.get("Trailer") + if raw == "" { + return nil, nil + } + + header.Del("Trailer") + trailer := make(Header) + keys := strings.Split(raw, ",") + for _, key := range keys { + key = CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + return nil, &badStringError{"bad trailer key", key} + } + trailer[key] = nil + } + if len(trailer) == 0 { + return nil, nil + } + if !chunked(te) { + // Trailer and no chunking + return nil, ErrUnexpectedTrailer + } + return trailer, nil +} + +// body turns a Reader into a ReadCloser. +// Close ensures that the body has been fully read +// and then reads the trailer if necessary. +type body struct { + src io.Reader + hdr interface{} // non-nil (Response or Request) value means read trailer + r *bufio.Reader // underlying wire-format reader for the trailer + closing bool // is the connection to be closed after reading body? + + mu sync.Mutex // guards closed, and calls to Read and Close + closed bool +} + +// ErrBodyReadAfterClose is returned when reading a Request or Response +// Body after the body has been closed. This typically happens when the body is +// read after an HTTP Handler calls WriteHeader or Write on its +// ResponseWriter. +var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body") + +func (b *body) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return 0, ErrBodyReadAfterClose + } + return b.readLocked(p) +} + +// Must hold b.mu. +func (b *body) readLocked(p []byte) (n int, err error) { + n, err = b.src.Read(p) + + if err == io.EOF { + // Chunked case. Read the trailer. + if b.hdr != nil { + if e := b.readTrailer(); e != nil { + err = e + } + b.hdr = nil + } else { + // If the server declared the Content-Length, our body is a LimitedReader + // and we need to check whether this EOF arrived early. + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 { + err = io.ErrUnexpectedEOF + } + } + } + + // If we can return an EOF here along with the read data, do + // so. This is optional per the io.Reader contract, but doing + // so helps the HTTP transport code recycle its connection + // earlier (since it will see this EOF itself), even if the + // client doesn't do future reads or Close. + if err == nil && n > 0 { + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 { + err = io.EOF + } + } + + return n, err +} + +var ( + singleCRLF = []byte("\r\n") + doubleCRLF = []byte("\r\n\r\n") +) + +func seeUpcomingDoubleCRLF(r *bufio.Reader) bool { + for peekSize := 4; ; peekSize++ { + // This loop stops when Peek returns an error, + // which it does when r's buffer has been filled. + buf, err := r.Peek(peekSize) + if bytes.HasSuffix(buf, doubleCRLF) { + return true + } + if err != nil { + break + } + } + return false +} + +var errTrailerEOF = errors.New("http: unexpected EOF reading trailer") + +func (b *body) readTrailer() error { + // The common case, since nobody uses trailers. + buf, err := b.r.Peek(2) + if bytes.Equal(buf, singleCRLF) { + b.r.ReadByte() + b.r.ReadByte() + return nil + } + if len(buf) < 2 { + return errTrailerEOF + } + if err != nil { + return err + } + + // Make sure there's a header terminator coming up, to prevent + // a DoS with an unbounded size Trailer. It's not easy to + // slip in a LimitReader here, as textproto.NewReader requires + // a concrete *bufio.Reader. Also, we can't get all the way + // back up to our conn's LimitedReader that *might* be backing + // this bufio.Reader. Instead, a hack: we iteratively Peek up + // to the bufio.Reader's max size, looking for a double CRLF. + // This limits the trailer to the underlying buffer size, typically 4kB. + if !seeUpcomingDoubleCRLF(b.r) { + return errors.New("http: suspiciously long trailer after chunked body") + } + + hdr, err := textproto.NewReader(b.r).ReadMIMEHeader() + if err != nil { + if err == io.EOF { + return errTrailerEOF + } + return err + } + switch rr := b.hdr.(type) { + case *Request: + mergeSetHeader(&rr.Trailer, Header(hdr)) + case *Response: + mergeSetHeader(&rr.Trailer, Header(hdr)) + } + return nil +} + +func mergeSetHeader(dst *Header, src Header) { + if *dst == nil { + *dst = src + return + } + for k, vv := range src { + (*dst)[k] = vv + } +} + +func (b *body) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return nil + } + var err error + switch { + case b.hdr == nil && b.closing: + // no trailer and closing the connection next. + // no point in reading to EOF. + default: + // Fully consume the body, which will also lead to us reading + // the trailer headers after the body, if present. + _, err = io.Copy(ioutil.Discard, bodyLocked{b}) + } + b.closed = true + return err +} + +// bodyLocked is a io.Reader reading from a *body when its mutex is +// already held. +type bodyLocked struct { + b *body +} + +func (bl bodyLocked) Read(p []byte) (n int, err error) { + if bl.b.closed { + return 0, ErrBodyReadAfterClose + } + return bl.b.readLocked(p) +} + +// parseContentLength trims whitespace from s and returns -1 if no value +// is set, or the value if it's >= 0. +func parseContentLength(cl string) (int64, error) { + cl = strings.TrimSpace(cl) + if cl == "" { + return -1, nil + } + n, err := strconv.ParseInt(cl, 10, 64) + if err != nil || n < 0 { + return 0, &badStringError{"bad Content-Length", cl} + } + return n, nil + +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer_test.go new file mode 100644 index 000000000..48cd540b9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transfer_test.go @@ -0,0 +1,64 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "io" + "strings" + "testing" +) + +func TestBodyReadBadTrailer(t *testing.T) { + b := &body{ + src: strings.NewReader("foobar"), + hdr: true, // force reading the trailer + r: bufio.NewReader(strings.NewReader("")), + } + buf := make([]byte, 7) + n, err := b.Read(buf[:3]) + got := string(buf[:n]) + if got != "foo" || err != nil { + t.Fatalf(`first Read = %d (%q), %v; want 3 ("foo")`, n, got, err) + } + + n, err = b.Read(buf[:]) + got = string(buf[:n]) + if got != "bar" || err != nil { + t.Fatalf(`second Read = %d (%q), %v; want 3 ("bar")`, n, got, err) + } + + n, err = b.Read(buf[:]) + got = string(buf[:n]) + if err == nil { + t.Errorf("final Read was successful (%q), expected error from trailer read", got) + } +} + +func TestFinalChunkedBodyReadEOF(t *testing.T) { + res, err := ReadResponse(bufio.NewReader(strings.NewReader( + "HTTP/1.1 200 OK\r\n"+ + "Transfer-Encoding: chunked\r\n"+ + "\r\n"+ + "0a\r\n"+ + "Body here\n\r\n"+ + "09\r\n"+ + "continued\r\n"+ + "0\r\n"+ + "\r\n")), nil) + if err != nil { + t.Fatal(err) + } + want := "Body here\ncontinued" + buf := make([]byte, len(want)) + n, err := res.Body.Read(buf) + if n != len(want) || err != io.EOF { + t.Logf("body = %#v", res.Body) + t.Errorf("Read = %v, %v; want %d, EOF", n, err, len(want)) + } + if string(buf) != want { + t.Errorf("buf = %q; want %q", buf, want) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/transport.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transport.go new file mode 100644 index 000000000..525885901 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transport.go @@ -0,0 +1,1208 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP client implementation. See RFC 2616. +// +// This is the low-level Transport implementation of RoundTripper. +// The high-level interface is in client.go. + +package http + +import ( + "bufio" + "compress/gzip" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/core/tls" + "io" + "log" + "net" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// DefaultTransport is the default implementation of Transport and is +// used by DefaultClient. It establishes network connections as needed +// and caches them for reuse by subsequent calls. It uses HTTP proxies +// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and +// $no_proxy) environment variables. +var DefaultTransport RoundTripper = &Transport{ + Proxy: ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +// DefaultMaxIdleConnsPerHost is the default value of Transport's +// MaxIdleConnsPerHost. +const DefaultMaxIdleConnsPerHost = 2 + +// Transport is an implementation of RoundTripper that supports http, +// https, and http proxies (for either http or https with CONNECT). +// Transport can also cache connections for future re-use. +type Transport struct { + idleMu sync.Mutex + idleConn map[connectMethodKey][]*persistConn + idleConnCh map[connectMethodKey]chan *persistConn + reqMu sync.Mutex + reqCanceler map[*Request]func() + altMu sync.RWMutex + altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*Request) (*url.URL, error) + + // Dial specifies the dial function for creating TCP + // connections. + // If Dial is nil, net.Dial is used. + Dial func(network, addr string) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // TLSHandshakeTimeout specifies the maximum amount of time waiting to + // wait for a TLS handshake. Zero means no timeout. + TLSHandshakeTimeout time.Duration + + // DisableKeepAlives, if true, prevents re-use of TCP connections + // between different HTTP requests. + DisableKeepAlives bool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle + // (keep-alive) to keep per-host. If zero, + // DefaultMaxIdleConnsPerHost is used. + MaxIdleConnsPerHost int + + // ResponseHeaderTimeout, if non-zero, specifies the amount of + // time to wait for a server's response headers after fully + // writing the request (including its body, if any). This + // time does not include the time to read the response body. + ResponseHeaderTimeout time.Duration + + // TODO: tunable on global max cached connections + // TODO: tunable on timeout on cached connections +} + +// ProxyFromEnvironment returns the URL of the proxy to use for a +// given request, as indicated by the environment variables +// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy). +// An error is returned if the proxy environment is invalid. +// A nil URL and nil error are returned if no proxy is defined in the +// environment, or a proxy should not be used for the given request. +// +// As a special case, if req.URL.Host is "localhost" (with or without +// a port number), then a nil URL and nil error will be returned. +func ProxyFromEnvironment(req *Request) (*url.URL, error) { + proxy := httpProxyEnv.Get() + if proxy == "" { + return nil, nil + } + if !useProxy(canonicalAddr(req.URL)) { + return nil, nil + } + proxyURL, err := url.Parse(proxy) + if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we fall + // through and complain about the original one. + if proxyURL, err := url.Parse("http://" + proxy); err == nil { + return proxyURL, nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +// ProxyURL returns a proxy function (for use in a Transport) +// that always returns the same URL. +func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) { + return func(*Request) (*url.URL, error) { + return fixedURL, nil + } +} + +// transportRequest is a wrapper around a *Request that adds +// optional extra headers to write. +type transportRequest struct { + *Request // original request, not to be mutated + extra Header // extra headers to write, or nil +} + +func (tr *transportRequest) extraHeaders() Header { + if tr.extra == nil { + tr.extra = make(Header) + } + return tr.extra +} + +// RoundTrip implements the RoundTripper interface. +// +// For higher-level HTTP client support (such as handling of cookies +// and redirects), see Get, Post, and the Client type. +func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) { + if req.URL == nil { + req.closeBody() + return nil, errors.New("http: nil Request.URL") + } + if req.Header == nil { + req.closeBody() + return nil, errors.New("http: nil Request.Header") + } + if req.URL.Scheme != "http" && req.URL.Scheme != "https" { + t.altMu.RLock() + var rt RoundTripper + if t.altProto != nil { + rt = t.altProto[req.URL.Scheme] + } + t.altMu.RUnlock() + if rt == nil { + req.closeBody() + return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme} + } + return rt.RoundTrip(req) + } + if req.URL.Host == "" { + req.closeBody() + return nil, errors.New("http: no Host in request URL") + } + treq := &transportRequest{Request: req} + cm, err := t.connectMethodForRequest(treq) + if err != nil { + req.closeBody() + return nil, err + } + + // Get the cached or newly-created connection to either the + // host (for http or https), the http proxy, or the http proxy + // pre-CONNECTed to https server. In any case, we'll be ready + // to send it requests. + pconn, err := t.getConn(req, cm) + if err != nil { + t.setReqCanceler(req, nil) + req.closeBody() + return nil, err + } + + return pconn.roundTrip(treq) +} + +// RegisterProtocol registers a new protocol with scheme. +// The Transport will pass requests using the given scheme to rt. +// It is rt's responsibility to simulate HTTP request semantics. +// +// RegisterProtocol can be used by other packages to provide +// implementations of protocol schemes like "ftp" or "file". +func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) { + if scheme == "http" || scheme == "https" { + panic("protocol " + scheme + " already registered") + } + t.altMu.Lock() + defer t.altMu.Unlock() + if t.altProto == nil { + t.altProto = make(map[string]RoundTripper) + } + if _, exists := t.altProto[scheme]; exists { + panic("protocol " + scheme + " already registered") + } + t.altProto[scheme] = rt +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle in +// a "keep-alive" state. It does not interrupt any connections currently +// in use. +func (t *Transport) CloseIdleConnections() { + t.idleMu.Lock() + m := t.idleConn + t.idleConn = nil + t.idleConnCh = nil + t.idleMu.Unlock() + for _, conns := range m { + for _, pconn := range conns { + pconn.close() + } + } +} + +// CancelRequest cancels an in-flight request by closing its +// connection. +func (t *Transport) CancelRequest(req *Request) { + t.reqMu.Lock() + cancel := t.reqCanceler[req] + t.reqMu.Unlock() + if cancel != nil { + cancel() + } +} + +// +// Private implementation past this point. +// + +var ( + httpProxyEnv = &envOnce{ + names: []string{"HTTP_PROXY", "http_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} + +func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) { + cm.targetScheme = treq.URL.Scheme + cm.targetAddr = canonicalAddr(treq.URL) + if t.Proxy != nil { + cm.proxyURL, err = t.Proxy(treq.Request) + } + return cm, nil +} + +// proxyAuth returns the Proxy-Authorization header to set +// on requests, if applicable. +func (cm *connectMethod) proxyAuth() string { + if cm.proxyURL == nil { + return "" + } + if u := cm.proxyURL.User; u != nil { + username := u.Username() + password, _ := u.Password() + return "Basic " + basicAuth(username, password) + } + return "" +} + +// putIdleConn adds pconn to the list of idle persistent connections awaiting +// a new request. +// If pconn is no longer needed or not in a good state, putIdleConn +// returns false. +func (t *Transport) putIdleConn(pconn *persistConn) bool { + if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { + pconn.close() + return false + } + if pconn.isBroken() { + return false + } + key := pconn.cacheKey + max := t.MaxIdleConnsPerHost + if max == 0 { + max = DefaultMaxIdleConnsPerHost + } + t.idleMu.Lock() + + waitingDialer := t.idleConnCh[key] + select { + case waitingDialer <- pconn: + // We're done with this pconn and somebody else is + // currently waiting for a conn of this type (they're + // actively dialing, but this conn is ready + // first). Chrome calls this socket late binding. See + // https://insouciant.org/tech/connection-management-in-chromium/ + t.idleMu.Unlock() + return true + default: + if waitingDialer != nil { + // They had populated this, but their dial won + // first, so we can clean up this map entry. + delete(t.idleConnCh, key) + } + } + if t.idleConn == nil { + t.idleConn = make(map[connectMethodKey][]*persistConn) + } + if len(t.idleConn[key]) >= max { + t.idleMu.Unlock() + pconn.close() + return false + } + for _, exist := range t.idleConn[key] { + if exist == pconn { + log.Fatalf("dup idle pconn %p in freelist", pconn) + } + } + t.idleConn[key] = append(t.idleConn[key], pconn) + t.idleMu.Unlock() + return true +} + +// getIdleConnCh returns a channel to receive and return idle +// persistent connection for the given connectMethod. +// It may return nil, if persistent connections are not being used. +func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn { + if t.DisableKeepAlives { + return nil + } + key := cm.key() + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConnCh == nil { + t.idleConnCh = make(map[connectMethodKey]chan *persistConn) + } + ch, ok := t.idleConnCh[key] + if !ok { + ch = make(chan *persistConn) + t.idleConnCh[key] = ch + } + return ch +} + +func (t *Transport) getIdleConn(cm connectMethod) (pconn *persistConn) { + key := cm.key() + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConn == nil { + return nil + } + for { + pconns, ok := t.idleConn[key] + if !ok { + return nil + } + if len(pconns) == 1 { + pconn = pconns[0] + delete(t.idleConn, key) + } else { + // 2 or more cached connections; pop last + // TODO: queue? + pconn = pconns[len(pconns)-1] + t.idleConn[key] = pconns[:len(pconns)-1] + } + if !pconn.isBroken() { + return + } + } +} + +func (t *Transport) setReqCanceler(r *Request, fn func()) { + t.reqMu.Lock() + defer t.reqMu.Unlock() + if t.reqCanceler == nil { + t.reqCanceler = make(map[*Request]func()) + } + if fn != nil { + t.reqCanceler[r] = fn + } else { + delete(t.reqCanceler, r) + } +} + +func (t *Transport) dial(network, addr string) (c net.Conn, err error) { + if t.Dial != nil { + return t.Dial(network, addr) + } + return net.Dial(network, addr) +} + +// getConn dials and creates a new persistConn to the target as +// specified in the connectMethod. This includes doing a proxy CONNECT +// and/or setting up TLS. If this doesn't return an error, the persistConn +// is ready to write requests to. +func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error) { + if pc := t.getIdleConn(cm); pc != nil { + return pc, nil + } + + type dialRes struct { + pc *persistConn + err error + } + dialc := make(chan dialRes) + + handlePendingDial := func() { + if v := <-dialc; v.err == nil { + t.putIdleConn(v.pc) + } + } + + cancelc := make(chan struct{}) + t.setReqCanceler(req, func() { close(cancelc) }) + + go func() { + pc, err := t.dialConn(cm) + dialc <- dialRes{pc, err} + }() + + idleConnCh := t.getIdleConnCh(cm) + select { + case v := <-dialc: + // Our dial finished. + return v.pc, v.err + case pc := <-idleConnCh: + // Another request finished first and its net.Conn + // became available before our dial. Or somebody + // else's dial that they didn't use. + // But our dial is still going, so give it away + // when it finishes: + go handlePendingDial() + return pc, nil + case <-cancelc: + go handlePendingDial() + return nil, errors.New("net/http: request canceled while waiting for connection") + } +} + +func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) { + conn, err := t.dial("tcp", cm.addr()) + if err != nil { + if cm.proxyURL != nil { + err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) + } + return nil, err + } + + pa := cm.proxyAuth() + + pconn := &persistConn{ + t: t, + cacheKey: cm.key(), + conn: conn, + reqch: make(chan requestAndChan, 1), + writech: make(chan writeRequest, 1), + closech: make(chan struct{}), + writeErrCh: make(chan error, 1), + } + + switch { + case cm.proxyURL == nil: + // Do nothing. + case cm.targetScheme == "http": + pconn.isProxy = true + if pa != "" { + pconn.mutateHeaderFunc = func(h Header) { + h.Set("Proxy-Authorization", pa) + } + } + case cm.targetScheme == "https": + connectReq := &Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: cm.targetAddr}, + Host: cm.targetAddr, + Header: make(Header), + } + if pa != "" { + connectReq.Header.Set("Proxy-Authorization", pa) + } + connectReq.Write(conn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + conn.Close() + return nil, errors.New(f[1]) + } + } + + if cm.targetScheme == "https" { + // Initiate TLS and check remote host name against certificate. + cfg := t.TLSClientConfig + if cfg == nil || cfg.ServerName == "" { + host := cm.tlsHost() + if cfg == nil { + cfg = &tls.Config{ServerName: host} + } else { + clone := *cfg // shallow clone + clone.ServerName = host + cfg = &clone + } + } + plainConn := conn + tlsConn := tls.Client(plainConn, cfg) + errc := make(chan error, 2) + var timer *time.Timer // for canceling TLS handshake + if d := t.TLSHandshakeTimeout; d != 0 { + timer = time.AfterFunc(d, func() { + errc <- tlsHandshakeTimeoutError{} + }) + } + go func() { + err := tlsConn.Handshake() + if timer != nil { + timer.Stop() + } + errc <- err + }() + if err := <-errc; err != nil { + plainConn.Close() + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + plainConn.Close() + return nil, err + } + } + cs := tlsConn.ConnectionState() + pconn.tlsState = &cs + pconn.conn = tlsConn + } + + pconn.br = bufio.NewReader(noteEOFReader{pconn.conn, &pconn.sawEOF}) + pconn.bw = bufio.NewWriter(pconn.conn) + go pconn.readLoop() + go pconn.writeLoop() + return pconn, nil +} + +// useProxy returns true if requests to addr should use a proxy, +// according to the NO_PROXY or no_proxy environment variable. +// addr is always a canonicalAddr with a host and port. +func useProxy(addr string) bool { + if len(addr) == 0 { + return true + } + host, _, err := net.SplitHostPort(addr) + if err != nil { + return false + } + if host == "localhost" { + return false + } + if ip := net.ParseIP(host); ip != nil { + if ip.IsLoopback() { + return false + } + } + + no_proxy := noProxyEnv.Get() + if no_proxy == "*" { + return false + } + + addr = strings.ToLower(strings.TrimSpace(addr)) + if hasPort(addr) { + addr = addr[:strings.LastIndex(addr, ":")] + } + + for _, p := range strings.Split(no_proxy, ",") { + p = strings.ToLower(strings.TrimSpace(p)) + if len(p) == 0 { + continue + } + if hasPort(p) { + p = p[:strings.LastIndex(p, ":")] + } + if addr == p { + return false + } + if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) { + // no_proxy ".foo.com" matches "bar.foo.com" or "foo.com" + return false + } + if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' { + // no_proxy "foo.com" matches "bar.foo.com" + return false + } + } + return true +} + +// connectMethod is the map key (in its String form) for keeping persistent +// TCP connections alive for subsequent HTTP requests. +// +// A connect method may be of the following types: +// +// Cache key form Description +// ----------------- ------------------------- +// |http|foo.com http directly to server, no proxy +// |https|foo.com https directly to server, no proxy +// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com +// http://proxy.com|http http to proxy, http to anywhere after that +// +// Note: no support to https to the proxy yet. +// +type connectMethod struct { + proxyURL *url.URL // nil for no proxy, else full proxy URL + targetScheme string // "http" or "https" + targetAddr string // Not used if proxy + http targetScheme (4th example in table) +} + +func (cm *connectMethod) key() connectMethodKey { + proxyStr := "" + targetAddr := cm.targetAddr + if cm.proxyURL != nil { + proxyStr = cm.proxyURL.String() + if cm.targetScheme == "http" { + targetAddr = "" + } + } + return connectMethodKey{ + proxy: proxyStr, + scheme: cm.targetScheme, + addr: targetAddr, + } +} + +// addr returns the first hop "host:port" to which we need to TCP connect. +func (cm *connectMethod) addr() string { + if cm.proxyURL != nil { + return canonicalAddr(cm.proxyURL) + } + return cm.targetAddr +} + +// tlsHost returns the host name to match against the peer's +// TLS certificate. +func (cm *connectMethod) tlsHost() string { + h := cm.targetAddr + if hasPort(h) { + h = h[:strings.LastIndex(h, ":")] + } + return h +} + +// connectMethodKey is the map key version of connectMethod, with a +// stringified proxy URL (or the empty string) instead of a pointer to +// a URL. +type connectMethodKey struct { + proxy, scheme, addr string +} + +func (k connectMethodKey) String() string { + // Only used by tests. + return fmt.Sprintf("%s|%s|%s", k.proxy, k.scheme, k.addr) +} + +// persistConn wraps a connection, usually a persistent one +// (but may be used for non-keep-alive requests as well) +type persistConn struct { + t *Transport + cacheKey connectMethodKey + conn net.Conn + tlsState *tls.ConnectionState + br *bufio.Reader // from conn + sawEOF bool // whether we've seen EOF from conn; owned by readLoop + bw *bufio.Writer // to conn + reqch chan requestAndChan // written by roundTrip; read by readLoop + writech chan writeRequest // written by roundTrip; read by writeLoop + closech chan struct{} // closed when conn closed + isProxy bool + // writeErrCh passes the request write error (usually nil) + // from the writeLoop goroutine to the readLoop which passes + // it off to the res.Body reader, which then uses it to decide + // whether or not a connection can be reused. Issue 7569. + writeErrCh chan error + + lk sync.Mutex // guards following fields + numExpectedResponses int + closed bool // whether conn has been closed + broken bool // an error has happened on this connection; marked broken so it's not reused. + // mutateHeaderFunc is an optional func to modify extra + // headers on each outbound request before it's written. (the + // original Request given to RoundTrip is not modified) + mutateHeaderFunc func(Header) +} + +// isBroken reports whether this connection is in a known broken state. +func (pc *persistConn) isBroken() bool { + pc.lk.Lock() + b := pc.broken + pc.lk.Unlock() + return b +} + +func (pc *persistConn) cancelRequest() { + pc.conn.Close() +} + +var remoteSideClosedFunc func(error) bool // or nil to use default + +func remoteSideClosed(err error) bool { + if err == io.EOF { + return true + } + if remoteSideClosedFunc != nil { + return remoteSideClosedFunc(err) + } + return false +} + +func (pc *persistConn) readLoop() { + alive := true + + for alive { + pb, err := pc.br.Peek(1) + + pc.lk.Lock() + if pc.numExpectedResponses == 0 { + if !pc.closed { + pc.closeLocked() + if len(pb) > 0 { + log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", + string(pb), err) + } + } + pc.lk.Unlock() + return + } + pc.lk.Unlock() + + rc := <-pc.reqch + + var resp *Response + if err == nil { + resp, err = ReadResponse(pc.br, rc.req) + if err == nil && resp.StatusCode == 100 { + // Skip any 100-continue for now. + // TODO(bradfitz): if rc.req had "Expect: 100-continue", + // actually block the request body write and signal the + // writeLoop now to begin sending it. (Issue 2184) For now we + // eat it, since we're never expecting one. + resp, err = ReadResponse(pc.br, rc.req) + } + } + + if resp != nil { + resp.TLS = pc.tlsState + } + + hasBody := resp != nil && rc.req.Method != "HEAD" && resp.ContentLength != 0 + + if err != nil { + pc.close() + } else { + if rc.addedGzip && hasBody && resp.Header.Get("Content-Encoding") == "gzip" { + resp.Header.Del("Content-Encoding") + resp.Header.Del("Content-Length") + resp.ContentLength = -1 + resp.Body = &gzipReader{body: resp.Body} + } + resp.Body = &bodyEOFSignal{body: resp.Body} + } + + if err != nil || resp.Close || rc.req.Close || resp.StatusCode <= 199 { + // Don't do keep-alive on error if either party requested a close + // or we get an unexpected informational (1xx) response. + // StatusCode 100 is already handled above. + alive = false + } + + var waitForBodyRead chan bool + if hasBody { + waitForBodyRead = make(chan bool, 2) + resp.Body.(*bodyEOFSignal).earlyCloseFn = func() error { + // Sending false here sets alive to + // false and closes the connection + // below. + waitForBodyRead <- false + return nil + } + resp.Body.(*bodyEOFSignal).fn = func(err error) { + waitForBodyRead <- alive && + err == nil && + !pc.sawEOF && + pc.wroteRequest() && + pc.t.putIdleConn(pc) + } + } + + if alive && !hasBody { + alive = !pc.sawEOF && + pc.wroteRequest() && + pc.t.putIdleConn(pc) + } + + rc.ch <- responseAndError{resp, err} + + // Wait for the just-returned response body to be fully consumed + // before we race and peek on the underlying bufio reader. + if waitForBodyRead != nil { + select { + case alive = <-waitForBodyRead: + case <-pc.closech: + alive = false + } + } + + pc.t.setReqCanceler(rc.req, nil) + + if !alive { + pc.close() + } + } +} + +func (pc *persistConn) writeLoop() { + for { + select { + case wr := <-pc.writech: + if pc.isBroken() { + wr.ch <- errors.New("http: can't write HTTP request on broken connection") + continue + } + err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra) + if err == nil { + err = pc.bw.Flush() + } + if err != nil { + pc.markBroken() + wr.req.Request.closeBody() + } + pc.writeErrCh <- err // to the body reader, which might recycle us + wr.ch <- err // to the roundTrip function + case <-pc.closech: + return + } + } +} + +// wroteRequest is a check before recycling a connection that the previous write +// (from writeLoop above) happened and was successful. +func (pc *persistConn) wroteRequest() bool { + select { + case err := <-pc.writeErrCh: + // Common case: the write happened well before the response, so + // avoid creating a timer. + return err == nil + default: + // Rare case: the request was written in writeLoop above but + // before it could send to pc.writeErrCh, the reader read it + // all, processed it, and called us here. In this case, give the + // write goroutine a bit of time to finish its send. + // + // Less rare case: We also get here in the legitimate case of + // Issue 7569, where the writer is still writing (or stalled), + // but the server has already replied. In this case, we don't + // want to wait too long, and we want to return false so this + // connection isn't re-used. + select { + case err := <-pc.writeErrCh: + return err == nil + case <-time.After(50 * time.Millisecond): + return false + } + } +} + +type responseAndError struct { + res *Response + err error +} + +type requestAndChan struct { + req *Request + ch chan responseAndError + + // did the Transport (as opposed to the client code) add an + // Accept-Encoding gzip header? only if it we set it do + // we transparently decode the gzip. + addedGzip bool +} + +// A writeRequest is sent by the readLoop's goroutine to the +// writeLoop's goroutine to write a request while the read loop +// concurrently waits on both the write response and the server's +// reply. +type writeRequest struct { + req *transportRequest + ch chan<- error +} + +type httpError struct { + err string + timeout bool +} + +func (e *httpError) Error() string { return e.err } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true} +var errClosed error = &httpError{err: "net/http: transport closed before response was received"} + +func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) { + pc.t.setReqCanceler(req.Request, pc.cancelRequest) + pc.lk.Lock() + pc.numExpectedResponses++ + headerFn := pc.mutateHeaderFunc + pc.lk.Unlock() + + if headerFn != nil { + headerFn(req.extraHeaders()) + } + + // Ask for a compressed version if the caller didn't set their + // own value for Accept-Encoding. We only attempted to + // uncompress the gzip stream if we were the layer that + // requested it. + requestedGzip := false + if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" && req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // http://golang.org/issue/5522 + requestedGzip = true + req.extraHeaders().Set("Accept-Encoding", "gzip") + } + + // Write the request concurrently with waiting for a response, + // in case the server decides to reply before reading our full + // request body. + writeErrCh := make(chan error, 1) + pc.writech <- writeRequest{req, writeErrCh} + + resc := make(chan responseAndError, 1) + pc.reqch <- requestAndChan{req.Request, resc, requestedGzip} + + var re responseAndError + var pconnDeadCh = pc.closech + var failTicker <-chan time.Time + var respHeaderTimer <-chan time.Time +WaitResponse: + for { + select { + case err := <-writeErrCh: + if err != nil { + re = responseAndError{nil, err} + pc.close() + break WaitResponse + } + if d := pc.t.ResponseHeaderTimeout; d > 0 { + respHeaderTimer = time.After(d) + } + case <-pconnDeadCh: + // The persist connection is dead. This shouldn't + // usually happen (only with Connection: close responses + // with no response bodies), but if it does happen it + // means either a) the remote server hung up on us + // prematurely, or b) the readLoop sent us a response & + // closed its closech at roughly the same time, and we + // selected this case first, in which case a response + // might still be coming soon. + // + // We can't avoid the select race in b) by using a unbuffered + // resc channel instead, because then goroutines can + // leak if we exit due to other errors. + pconnDeadCh = nil // avoid spinning + failTicker = time.After(100 * time.Millisecond) // arbitrary time to wait for resc + case <-failTicker: + re = responseAndError{err: errClosed} + break WaitResponse + case <-respHeaderTimer: + pc.close() + re = responseAndError{err: errTimeout} + break WaitResponse + case re = <-resc: + break WaitResponse + } + } + + pc.lk.Lock() + pc.numExpectedResponses-- + pc.lk.Unlock() + + if re.err != nil { + pc.t.setReqCanceler(req.Request, nil) + } + return re.res, re.err +} + +// markBroken marks a connection as broken (so it's not reused). +// It differs from close in that it doesn't close the underlying +// connection for use when it's still being read. +func (pc *persistConn) markBroken() { + pc.lk.Lock() + defer pc.lk.Unlock() + pc.broken = true +} + +func (pc *persistConn) close() { + pc.lk.Lock() + defer pc.lk.Unlock() + pc.closeLocked() +} + +func (pc *persistConn) closeLocked() { + pc.broken = true + if !pc.closed { + pc.conn.Close() + pc.closed = true + close(pc.closech) + } + pc.mutateHeaderFunc = nil +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} + +// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most +// once, right before its final (error-producing) Read or Close call +// returns. If earlyCloseFn is non-nil and Close is called before +// io.EOF is seen, earlyCloseFn is called instead of fn, and its +// return value is the return value from Close. +type bodyEOFSignal struct { + body io.ReadCloser + mu sync.Mutex // guards following 4 fields + closed bool // whether Close has been called + rerr error // sticky Read error + fn func(error) // error will be nil on Read io.EOF + earlyCloseFn func() error // optional alt Close func used if io.EOF not seen +} + +func (es *bodyEOFSignal) Read(p []byte) (n int, err error) { + es.mu.Lock() + closed, rerr := es.closed, es.rerr + es.mu.Unlock() + if closed { + return 0, errors.New("http: read on closed response body") + } + if rerr != nil { + return 0, rerr + } + + n, err = es.body.Read(p) + if err != nil { + es.mu.Lock() + defer es.mu.Unlock() + if es.rerr == nil { + es.rerr = err + } + es.condfn(err) + } + return +} + +func (es *bodyEOFSignal) Close() error { + es.mu.Lock() + defer es.mu.Unlock() + if es.closed { + return nil + } + es.closed = true + if es.earlyCloseFn != nil && es.rerr != io.EOF { + return es.earlyCloseFn() + } + err := es.body.Close() + es.condfn(err) + return err +} + +// caller must hold es.mu. +func (es *bodyEOFSignal) condfn(err error) { + if es.fn == nil { + return + } + if err == io.EOF { + err = nil + } + es.fn(err) + es.fn = nil +} + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr io.Reader // lazily-initialized gzip reader +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type readerAndCloser struct { + io.Reader + io.Closer +} + +type tlsHandshakeTimeoutError struct{} + +func (tlsHandshakeTimeoutError) Timeout() bool { return true } +func (tlsHandshakeTimeoutError) Temporary() bool { return true } +func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" } + +type noteEOFReader struct { + r io.Reader + sawEOF *bool +} + +func (nr noteEOFReader) Read(p []byte) (n int, err error) { + n, err = nr.r.Read(p) + if err == io.EOF { + *nr.sawEOF = true + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/transport_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transport_test.go new file mode 100644 index 000000000..afa2e4a60 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/transport_test.go @@ -0,0 +1,2173 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for transport.go + +package http_test + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + . "net/http" + "net/http/httptest" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close +// and then verify that the final 2 responses get errors back. + +// hostPortHandler writes back the client's "host:port". +var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) { + if r.FormValue("close") == "true" { + w.Header().Set("Connection", "close") + } + w.Write([]byte(r.RemoteAddr)) +}) + +// testCloseConn is a net.Conn tracked by a testConnSet. +type testCloseConn struct { + net.Conn + set *testConnSet +} + +func (c *testCloseConn) Close() error { + c.set.remove(c) + return c.Conn.Close() +} + +// testConnSet tracks a set of TCP connections and whether they've +// been closed. +type testConnSet struct { + t *testing.T + mu sync.Mutex // guards closed and list + closed map[net.Conn]bool + list []net.Conn // in order created +} + +func (tcs *testConnSet) insert(c net.Conn) { + tcs.mu.Lock() + defer tcs.mu.Unlock() + tcs.closed[c] = false + tcs.list = append(tcs.list, c) +} + +func (tcs *testConnSet) remove(c net.Conn) { + tcs.mu.Lock() + defer tcs.mu.Unlock() + tcs.closed[c] = true +} + +// some tests use this to manage raw tcp connections for later inspection +func makeTestDial(t *testing.T) (*testConnSet, func(n, addr string) (net.Conn, error)) { + connSet := &testConnSet{ + t: t, + closed: make(map[net.Conn]bool), + } + dial := func(n, addr string) (net.Conn, error) { + c, err := net.Dial(n, addr) + if err != nil { + return nil, err + } + tc := &testCloseConn{c, connSet} + connSet.insert(tc) + return tc, nil + } + return connSet, dial +} + +func (tcs *testConnSet) check(t *testing.T) { + tcs.mu.Lock() + defer tcs.mu.Unlock() + for i := 4; i >= 0; i-- { + for i, c := range tcs.list { + if tcs.closed[c] { + continue + } + if i != 0 { + tcs.mu.Unlock() + time.Sleep(50 * time.Millisecond) + tcs.mu.Lock() + continue + } + t.Errorf("TCP connection #%d, %+v (of %d total) was not closed", i+1, c, len(tcs.list)) + } + } +} + +// Two subsequent requests and verify their response is the same. +// The response from the server is our own IP:port +func TestTransportKeepAlives(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + for _, disableKeepAlive := range []bool{false, true} { + tr := &Transport{DisableKeepAlives: disableKeepAlive} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + fetch := func(n int) string { + res, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err) + } + return string(body) + } + + body1 := fetch(1) + body2 := fetch(2) + + bodiesDiffer := body1 != body2 + if bodiesDiffer != disableKeepAlive { + t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", + disableKeepAlive, bodiesDiffer, body1, body2) + } + } +} + +func TestTransportConnectionCloseOnResponse(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + connSet, testDial := makeTestDial(t) + + for _, connectionClose := range []bool{false, true} { + tr := &Transport{ + Dial: testDial, + } + c := &Client{Transport: tr} + + fetch := func(n int) string { + req := new(Request) + var err error + req.URL, err = url.Parse(ts.URL + fmt.Sprintf("/?close=%v", connectionClose)) + if err != nil { + t.Fatalf("URL parse error: %v", err) + } + req.Method = "GET" + req.Proto = "HTTP/1.1" + req.ProtoMajor = 1 + req.ProtoMinor = 1 + + res, err := c.Do(req) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err) + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err) + } + return string(body) + } + + body1 := fetch(1) + body2 := fetch(2) + bodiesDiffer := body1 != body2 + if bodiesDiffer != connectionClose { + t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", + connectionClose, bodiesDiffer, body1, body2) + } + + tr.CloseIdleConnections() + } + + connSet.check(t) +} + +func TestTransportConnectionCloseOnRequest(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + connSet, testDial := makeTestDial(t) + + for _, connectionClose := range []bool{false, true} { + tr := &Transport{ + Dial: testDial, + } + c := &Client{Transport: tr} + + fetch := func(n int) string { + req := new(Request) + var err error + req.URL, err = url.Parse(ts.URL) + if err != nil { + t.Fatalf("URL parse error: %v", err) + } + req.Method = "GET" + req.Proto = "HTTP/1.1" + req.ProtoMajor = 1 + req.ProtoMinor = 1 + req.Close = connectionClose + + res, err := c.Do(req) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err) + } + return string(body) + } + + body1 := fetch(1) + body2 := fetch(2) + bodiesDiffer := body1 != body2 + if bodiesDiffer != connectionClose { + t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", + connectionClose, bodiesDiffer, body1, body2) + } + + tr.CloseIdleConnections() + } + + connSet.check(t) +} + +func TestTransportIdleCacheKeys(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + + if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { + t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g) + } + + resp, err := c.Get(ts.URL) + if err != nil { + t.Error(err) + } + ioutil.ReadAll(resp.Body) + + keys := tr.IdleConnKeysForTesting() + if e, g := 1, len(keys); e != g { + t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g) + } + + if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e { + t.Errorf("Expected idle cache key %q; got %q", e, keys[0]) + } + + tr.CloseIdleConnections() + if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { + t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g) + } +} + +// Tests that the HTTP transport re-uses connections when a client +// reads to the end of a response Body without closing it. +func TestTransportReadToEndReusesConn(t *testing.T) { + defer afterTest(t) + const msg = "foobar" + + var addrSeen map[string]int + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + addrSeen[r.RemoteAddr]++ + if r.URL.Path == "/chunked/" { + w.WriteHeader(200) + w.(http.Flusher).Flush() + } else { + w.Header().Set("Content-Type", strconv.Itoa(len(msg))) + w.WriteHeader(200) + } + w.Write([]byte(msg)) + })) + defer ts.Close() + + buf := make([]byte, len(msg)) + + for pi, path := range []string{"/content-length/", "/chunked/"} { + wantLen := []int{len(msg), -1}[pi] + addrSeen = make(map[string]int) + for i := 0; i < 3; i++ { + res, err := http.Get(ts.URL + path) + if err != nil { + t.Errorf("Get %s: %v", path, err) + continue + } + // We want to close this body eventually (before the + // defer afterTest at top runs), but not before the + // len(addrSeen) check at the bottom of this test, + // since Closing this early in the loop would risk + // making connections be re-used for the wrong reason. + defer res.Body.Close() + + if res.ContentLength != int64(wantLen) { + t.Errorf("%s res.ContentLength = %d; want %d", path, res.ContentLength, wantLen) + } + n, err := res.Body.Read(buf) + if n != len(msg) || err != io.EOF { + t.Errorf("%s Read = %v, %v; want %d, EOF", path, n, err, len(msg)) + } + } + if len(addrSeen) != 1 { + t.Errorf("for %s, server saw %d distinct client addresses; want 1", path, len(addrSeen)) + } + } +} + +func TestTransportMaxPerHostIdleConns(t *testing.T) { + defer afterTest(t) + resch := make(chan string) + gotReq := make(chan bool) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + gotReq <- true + msg := <-resch + _, err := w.Write([]byte(msg)) + if err != nil { + t.Fatalf("Write: %v", err) + } + })) + defer ts.Close() + maxIdleConns := 2 + tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns} + c := &Client{Transport: tr} + + // Start 3 outstanding requests and wait for the server to get them. + // Their responses will hang until we write to resch, though. + donech := make(chan bool) + doReq := func() { + resp, err := c.Get(ts.URL) + if err != nil { + t.Error(err) + return + } + if _, err := ioutil.ReadAll(resp.Body); err != nil { + t.Errorf("ReadAll: %v", err) + return + } + donech <- true + } + go doReq() + <-gotReq + go doReq() + <-gotReq + go doReq() + <-gotReq + + if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { + t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g) + } + + resch <- "res1" + <-donech + keys := tr.IdleConnKeysForTesting() + if e, g := 1, len(keys); e != g { + t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g) + } + cacheKey := "|http|" + ts.Listener.Addr().String() + if keys[0] != cacheKey { + t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0]) + } + if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g { + t.Errorf("after first response, expected %d idle conns; got %d", e, g) + } + + resch <- "res2" + <-donech + if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g { + t.Errorf("after second response, expected %d idle conns; got %d", e, g) + } + + resch <- "res3" + <-donech + if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g { + t.Errorf("after third response, still expected %d idle conns; got %d", e, g) + } +} + +func TestTransportServerClosingUnexpectedly(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + tr := &Transport{} + c := &Client{Transport: tr} + + fetch := func(n, retries int) string { + condFatalf := func(format string, arg ...interface{}) { + if retries <= 0 { + t.Fatalf(format, arg...) + } + t.Logf("retrying shortly after expected error: "+format, arg...) + time.Sleep(time.Second / time.Duration(retries)) + } + for retries >= 0 { + retries-- + res, err := c.Get(ts.URL) + if err != nil { + condFatalf("error in req #%d, GET: %v", n, err) + continue + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + condFatalf("error in req #%d, ReadAll: %v", n, err) + continue + } + res.Body.Close() + return string(body) + } + panic("unreachable") + } + + body1 := fetch(1, 0) + body2 := fetch(2, 0) + + ts.CloseClientConnections() // surprise! + + // This test has an expected race. Sleeping for 25 ms prevents + // it on most fast machines, causing the next fetch() call to + // succeed quickly. But if we do get errors, fetch() will retry 5 + // times with some delays between. + time.Sleep(25 * time.Millisecond) + + body3 := fetch(3, 5) + + if body1 != body2 { + t.Errorf("expected body1 and body2 to be equal") + } + if body2 == body3 { + t.Errorf("expected body2 and body3 to be different") + } +} + +// Test for http://golang.org/issue/2616 (appropriate issue number) +// This fails pretty reliably with GOMAXPROCS=100 or something high. +func TestStressSurpriseServerCloses(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping test in short mode") + } + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "5") + w.Header().Set("Content-Type", "text/plain") + w.Write([]byte("Hello")) + w.(Flusher).Flush() + conn, buf, _ := w.(Hijacker).Hijack() + buf.Flush() + conn.Close() + })) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + + // Do a bunch of traffic from different goroutines. Send to activityc + // after each request completes, regardless of whether it failed. + const ( + numClients = 50 + reqsPerClient = 250 + ) + activityc := make(chan bool) + for i := 0; i < numClients; i++ { + go func() { + for i := 0; i < reqsPerClient; i++ { + res, err := c.Get(ts.URL) + if err == nil { + // We expect errors since the server is + // hanging up on us after telling us to + // send more requests, so we don't + // actually care what the error is. + // But we want to close the body in cases + // where we won the race. + res.Body.Close() + } + activityc <- true + } + }() + } + + // Make sure all the request come back, one way or another. + for i := 0; i < numClients*reqsPerClient; i++ { + select { + case <-activityc: + case <-time.After(5 * time.Second): + t.Fatalf("presumed deadlock; no HTTP client activity seen in awhile") + } + } +} + +// TestTransportHeadResponses verifies that we deal with Content-Lengths +// with no bodies properly +func TestTransportHeadResponses(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "HEAD" { + panic("expected HEAD; got " + r.Method) + } + w.Header().Set("Content-Length", "123") + w.WriteHeader(200) + })) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + for i := 0; i < 2; i++ { + res, err := c.Head(ts.URL) + if err != nil { + t.Errorf("error on loop %d: %v", i, err) + continue + } + if e, g := "123", res.Header.Get("Content-Length"); e != g { + t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g) + } + if e, g := int64(123), res.ContentLength; e != g { + t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g) + } + if all, err := ioutil.ReadAll(res.Body); err != nil { + t.Errorf("loop %d: Body ReadAll: %v", i, err) + } else if len(all) != 0 { + t.Errorf("Bogus body %q", all) + } + } +} + +// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding +// on responses to HEAD requests. +func TestTransportHeadChunkedResponse(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "HEAD" { + panic("expected HEAD; got " + r.Method) + } + w.Header().Set("Transfer-Encoding", "chunked") // client should ignore + w.Header().Set("x-client-ipport", r.RemoteAddr) + w.WriteHeader(200) + })) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + + res1, err := c.Head(ts.URL) + if err != nil { + t.Fatalf("request 1 error: %v", err) + } + res2, err := c.Head(ts.URL) + if err != nil { + t.Fatalf("request 2 error: %v", err) + } + if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 { + t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2) + } +} + +var roundTripTests = []struct { + accept string + expectAccept string + compressed bool +}{ + // Requests with no accept-encoding header use transparent compression + {"", "gzip", false}, + // Requests with other accept-encoding should pass through unmodified + {"foo", "foo", false}, + // Requests with accept-encoding == gzip should be passed through + {"gzip", "gzip", true}, +} + +// Test that the modification made to the Request by the RoundTripper is cleaned up +func TestRoundTripGzip(t *testing.T) { + defer afterTest(t) + const responseBody = "test response body" + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + accept := req.Header.Get("Accept-Encoding") + if expect := req.FormValue("expect_accept"); accept != expect { + t.Errorf("in handler, test %v: Accept-Encoding = %q, want %q", + req.FormValue("testnum"), accept, expect) + } + if accept == "gzip" { + rw.Header().Set("Content-Encoding", "gzip") + gz := gzip.NewWriter(rw) + gz.Write([]byte(responseBody)) + gz.Close() + } else { + rw.Header().Set("Content-Encoding", accept) + rw.Write([]byte(responseBody)) + } + })) + defer ts.Close() + + for i, test := range roundTripTests { + // Test basic request (no accept-encoding) + req, _ := NewRequest("GET", fmt.Sprintf("%s/?testnum=%d&expect_accept=%s", ts.URL, i, test.expectAccept), nil) + if test.accept != "" { + req.Header.Set("Accept-Encoding", test.accept) + } + res, err := DefaultTransport.RoundTrip(req) + var body []byte + if test.compressed { + var r *gzip.Reader + r, err = gzip.NewReader(res.Body) + if err != nil { + t.Errorf("%d. gzip NewReader: %v", i, err) + continue + } + body, err = ioutil.ReadAll(r) + res.Body.Close() + } else { + body, err = ioutil.ReadAll(res.Body) + } + if err != nil { + t.Errorf("%d. Error: %q", i, err) + continue + } + if g, e := string(body), responseBody; g != e { + t.Errorf("%d. body = %q; want %q", i, g, e) + } + if g, e := req.Header.Get("Accept-Encoding"), test.accept; g != e { + t.Errorf("%d. Accept-Encoding = %q; want %q (it was mutated, in violation of RoundTrip contract)", i, g, e) + } + if g, e := res.Header.Get("Content-Encoding"), test.accept; g != e { + t.Errorf("%d. Content-Encoding = %q; want %q", i, g, e) + } + } + +} + +func TestTransportGzip(t *testing.T) { + defer afterTest(t) + const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + const nRandBytes = 1024 * 1024 + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + if req.Method == "HEAD" { + if g := req.Header.Get("Accept-Encoding"); g != "" { + t.Errorf("HEAD request sent with Accept-Encoding of %q; want none", g) + } + return + } + if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e { + t.Errorf("Accept-Encoding = %q, want %q", g, e) + } + rw.Header().Set("Content-Encoding", "gzip") + + var w io.Writer = rw + var buf bytes.Buffer + if req.FormValue("chunked") == "0" { + w = &buf + defer io.Copy(rw, &buf) + defer func() { + rw.Header().Set("Content-Length", strconv.Itoa(buf.Len())) + }() + } + gz := gzip.NewWriter(w) + gz.Write([]byte(testString)) + if req.FormValue("body") == "large" { + io.CopyN(gz, rand.Reader, nRandBytes) + } + gz.Close() + })) + defer ts.Close() + + for _, chunked := range []string{"1", "0"} { + c := &Client{Transport: &Transport{}} + + // First fetch something large, but only read some of it. + res, err := c.Get(ts.URL + "/?body=large&chunked=" + chunked) + if err != nil { + t.Fatalf("large get: %v", err) + } + buf := make([]byte, len(testString)) + n, err := io.ReadFull(res.Body, buf) + if err != nil { + t.Fatalf("partial read of large response: size=%d, %v", n, err) + } + if e, g := testString, string(buf); e != g { + t.Errorf("partial read got %q, expected %q", g, e) + } + res.Body.Close() + // Read on the body, even though it's closed + n, err = res.Body.Read(buf) + if n != 0 || err == nil { + t.Errorf("expected error post-closed large Read; got = %d, %v", n, err) + } + + // Then something small. + res, err = c.Get(ts.URL + "/?chunked=" + chunked) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if g, e := string(body), testString; g != e { + t.Fatalf("body = %q; want %q", g, e) + } + if g, e := res.Header.Get("Content-Encoding"), ""; g != e { + t.Fatalf("Content-Encoding = %q; want %q", g, e) + } + + // Read on the body after it's been fully read: + n, err = res.Body.Read(buf) + if n != 0 || err == nil { + t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err) + } + res.Body.Close() + n, err = res.Body.Read(buf) + if n != 0 || err == nil { + t.Errorf("expected Read error after Close; got %d, %v", n, err) + } + } + + // And a HEAD request too, because they're always weird. + c := &Client{Transport: &Transport{}} + res, err := c.Head(ts.URL) + if err != nil { + t.Fatalf("Head: %v", err) + } + if res.StatusCode != 200 { + t.Errorf("Head status=%d; want=200", res.StatusCode) + } +} + +func TestTransportProxy(t *testing.T) { + defer afterTest(t) + ch := make(chan string, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ch <- "real server" + })) + defer ts.Close() + proxy := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ch <- "proxy for " + r.URL.String() + })) + defer proxy.Close() + + pu, err := url.Parse(proxy.URL) + if err != nil { + t.Fatal(err) + } + c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}} + c.Head(ts.URL) + got := <-ch + want := "proxy for " + ts.URL + "/" + if got != want { + t.Errorf("want %q, got %q", want, got) + } +} + +// TestTransportGzipRecursive sends a gzip quine and checks that the +// client gets the same value back. This is more cute than anything, +// but checks that we don't recurse forever, and checks that +// Content-Encoding is removed. +func TestTransportGzipRecursive(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Encoding", "gzip") + w.Write(rgz) + })) + defer ts.Close() + + c := &Client{Transport: &Transport{}} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(body, rgz) { + t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x", + body, rgz) + } + if g, e := res.Header.Get("Content-Encoding"), ""; g != e { + t.Fatalf("Content-Encoding = %q; want %q", g, e) + } +} + +// golang.org/issue/7750: request fails when server replies with +// a short gzip body +func TestTransportGzipShort(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Encoding", "gzip") + w.Write([]byte{0x1f, 0x8b}) + })) + defer ts.Close() + + tr := &Transport{} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Fatal("Expect an error from reading a body.") + } + if err != io.ErrUnexpectedEOF { + t.Errorf("ReadAll error = %v; want io.ErrUnexpectedEOF", err) + } +} + +// tests that persistent goroutine connections shut down when no longer desired. +func TestTransportPersistConnLeak(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + gotReqCh := make(chan bool) + unblockCh := make(chan bool) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + gotReqCh <- true + <-unblockCh + w.Header().Set("Content-Length", "0") + w.WriteHeader(204) + })) + defer ts.Close() + + tr := &Transport{} + c := &Client{Transport: tr} + + n0 := runtime.NumGoroutine() + + const numReq = 25 + didReqCh := make(chan bool) + for i := 0; i < numReq; i++ { + go func() { + res, err := c.Get(ts.URL) + didReqCh <- true + if err != nil { + t.Errorf("client fetch error: %v", err) + return + } + res.Body.Close() + }() + } + + // Wait for all goroutines to be stuck in the Handler. + for i := 0; i < numReq; i++ { + <-gotReqCh + } + + nhigh := runtime.NumGoroutine() + + // Tell all handlers to unblock and reply. + for i := 0; i < numReq; i++ { + unblockCh <- true + } + + // Wait for all HTTP clients to be done. + for i := 0; i < numReq; i++ { + <-didReqCh + } + + tr.CloseIdleConnections() + time.Sleep(100 * time.Millisecond) + runtime.GC() + runtime.GC() // even more. + nfinal := runtime.NumGoroutine() + + growth := nfinal - n0 + + // We expect 0 or 1 extra goroutine, empirically. Allow up to 5. + // Previously we were leaking one per numReq. + if int(growth) > 5 { + t.Logf("goroutine growth: %d -> %d -> %d (delta: %d)", n0, nhigh, nfinal, growth) + t.Error("too many new goroutines") + } +} + +// golang.org/issue/4531: Transport leaks goroutines when +// request.ContentLength is explicitly short +func TestTransportPersistConnLeakShortBody(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + })) + defer ts.Close() + + tr := &Transport{} + c := &Client{Transport: tr} + + n0 := runtime.NumGoroutine() + body := []byte("Hello") + for i := 0; i < 20; i++ { + req, err := NewRequest("POST", ts.URL, bytes.NewReader(body)) + if err != nil { + t.Fatal(err) + } + req.ContentLength = int64(len(body) - 2) // explicitly short + _, err = c.Do(req) + if err == nil { + t.Fatal("Expect an error from writing too long of a body.") + } + } + nhigh := runtime.NumGoroutine() + tr.CloseIdleConnections() + time.Sleep(400 * time.Millisecond) + runtime.GC() + nfinal := runtime.NumGoroutine() + + growth := nfinal - n0 + + // We expect 0 or 1 extra goroutine, empirically. Allow up to 5. + // Previously we were leaking one per numReq. + t.Logf("goroutine growth: %d -> %d -> %d (delta: %d)", n0, nhigh, nfinal, growth) + if int(growth) > 5 { + t.Error("too many new goroutines") + } +} + +// This used to crash; http://golang.org/issue/3266 +func TestTransportIdleConnCrash(t *testing.T) { + defer afterTest(t) + tr := &Transport{} + c := &Client{Transport: tr} + + unblockCh := make(chan bool, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + <-unblockCh + tr.CloseIdleConnections() + })) + defer ts.Close() + + didreq := make(chan bool) + go func() { + res, err := c.Get(ts.URL) + if err != nil { + t.Error(err) + } else { + res.Body.Close() // returns idle conn + } + didreq <- true + }() + unblockCh <- true + <-didreq +} + +// Test that the transport doesn't close the TCP connection early, +// before the response body has been read. This was a regression +// which sadly lacked a triggering test. The large response body made +// the old race easier to trigger. +func TestIssue3644(t *testing.T) { + defer afterTest(t) + const numFoos = 5000 + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + for i := 0; i < numFoos; i++ { + w.Write([]byte("foo ")) + } + })) + defer ts.Close() + tr := &Transport{} + c := &Client{Transport: tr} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + bs, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if len(bs) != numFoos*len("foo ") { + t.Errorf("unexpected response length") + } +} + +// Test that a client receives a server's reply, even if the server doesn't read +// the entire request body. +func TestIssue3595(t *testing.T) { + defer afterTest(t) + const deniedMsg = "sorry, denied." + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + Error(w, deniedMsg, StatusUnauthorized) + })) + defer ts.Close() + tr := &Transport{} + c := &Client{Transport: tr} + res, err := c.Post(ts.URL, "application/octet-stream", neverEnding('a')) + if err != nil { + t.Errorf("Post: %v", err) + return + } + got, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body ReadAll: %v", err) + } + if !strings.Contains(string(got), deniedMsg) { + t.Errorf("Known bug: response %q does not contain %q", got, deniedMsg) + } +} + +// From http://golang.org/issue/4454 , +// "client fails to handle requests with no body and chunked encoding" +func TestChunkedNoContent(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.WriteHeader(StatusNoContent) + })) + defer ts.Close() + + for _, closeBody := range []bool{true, false} { + c := &Client{Transport: &Transport{}} + const n = 4 + for i := 1; i <= n; i++ { + res, err := c.Get(ts.URL) + if err != nil { + t.Errorf("closingBody=%v, req %d/%d: %v", closeBody, i, n, err) + } else { + if closeBody { + res.Body.Close() + } + } + } + } +} + +func TestTransportConcurrency(t *testing.T) { + defer afterTest(t) + maxProcs, numReqs := 16, 500 + if testing.Short() { + maxProcs, numReqs = 4, 50 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs)) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "%v", r.FormValue("echo")) + })) + defer ts.Close() + + var wg sync.WaitGroup + wg.Add(numReqs) + + tr := &Transport{ + Dial: func(netw, addr string) (c net.Conn, err error) { + // Due to the Transport's "socket late + // binding" (see idleConnCh in transport.go), + // the numReqs HTTP requests below can finish + // with a dial still outstanding. So count + // our dials as work too so the leak checker + // doesn't complain at us. + wg.Add(1) + defer wg.Done() + return net.Dial(netw, addr) + }, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + reqs := make(chan string) + defer close(reqs) + + for i := 0; i < maxProcs*2; i++ { + go func() { + for req := range reqs { + res, err := c.Get(ts.URL + "/?echo=" + req) + if err != nil { + t.Errorf("error on req %s: %v", req, err) + wg.Done() + continue + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("read error on req %s: %v", req, err) + wg.Done() + continue + } + if string(all) != req { + t.Errorf("body of req %s = %q; want %q", req, all, req) + } + res.Body.Close() + wg.Done() + } + }() + } + for i := 0; i < numReqs; i++ { + reqs <- fmt.Sprintf("request-%d", i) + } + wg.Wait() +} + +func TestIssue4191_InfiniteGetTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + const debug = false + mux := NewServeMux() + mux.HandleFunc("/get", func(w ResponseWriter, r *Request) { + io.Copy(w, neverEnding('a')) + }) + ts := httptest.NewServer(mux) + timeout := 100 * time.Millisecond + + client := &Client{ + Transport: &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + conn, err := net.Dial(n, addr) + if err != nil { + return nil, err + } + conn.SetDeadline(time.Now().Add(timeout)) + if debug { + conn = NewLoggingConn("client", conn) + } + return conn, nil + }, + DisableKeepAlives: true, + }, + } + + getFailed := false + nRuns := 5 + if testing.Short() { + nRuns = 1 + } + for i := 0; i < nRuns; i++ { + if debug { + println("run", i+1, "of", nRuns) + } + sres, err := client.Get(ts.URL + "/get") + if err != nil { + if !getFailed { + // Make the timeout longer, once. + getFailed = true + t.Logf("increasing timeout") + i-- + timeout *= 10 + continue + } + t.Errorf("Error issuing GET: %v", err) + break + } + _, err = io.Copy(ioutil.Discard, sres.Body) + if err == nil { + t.Errorf("Unexpected successful copy") + break + } + } + if debug { + println("tests complete; waiting for handlers to finish") + } + ts.Close() +} + +func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + const debug = false + mux := NewServeMux() + mux.HandleFunc("/get", func(w ResponseWriter, r *Request) { + io.Copy(w, neverEnding('a')) + }) + mux.HandleFunc("/put", func(w ResponseWriter, r *Request) { + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }) + ts := httptest.NewServer(mux) + timeout := 100 * time.Millisecond + + client := &Client{ + Transport: &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + conn, err := net.Dial(n, addr) + if err != nil { + return nil, err + } + conn.SetDeadline(time.Now().Add(timeout)) + if debug { + conn = NewLoggingConn("client", conn) + } + return conn, nil + }, + DisableKeepAlives: true, + }, + } + + getFailed := false + nRuns := 5 + if testing.Short() { + nRuns = 1 + } + for i := 0; i < nRuns; i++ { + if debug { + println("run", i+1, "of", nRuns) + } + sres, err := client.Get(ts.URL + "/get") + if err != nil { + if !getFailed { + // Make the timeout longer, once. + getFailed = true + t.Logf("increasing timeout") + i-- + timeout *= 10 + continue + } + t.Errorf("Error issuing GET: %v", err) + break + } + req, _ := NewRequest("PUT", ts.URL+"/put", sres.Body) + _, err = client.Do(req) + if err == nil { + sres.Body.Close() + t.Errorf("Unexpected successful PUT") + break + } + sres.Body.Close() + } + if debug { + println("tests complete; waiting for handlers to finish") + } + ts.Close() +} + +func TestTransportResponseHeaderTimeout(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping timeout test in -short mode") + } + inHandler := make(chan bool, 1) + mux := NewServeMux() + mux.HandleFunc("/fast", func(w ResponseWriter, r *Request) { + inHandler <- true + }) + mux.HandleFunc("/slow", func(w ResponseWriter, r *Request) { + inHandler <- true + time.Sleep(2 * time.Second) + }) + ts := httptest.NewServer(mux) + defer ts.Close() + + tr := &Transport{ + ResponseHeaderTimeout: 500 * time.Millisecond, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + tests := []struct { + path string + want int + wantErr string + }{ + {path: "/fast", want: 200}, + {path: "/slow", wantErr: "timeout awaiting response headers"}, + {path: "/fast", want: 200}, + } + for i, tt := range tests { + res, err := c.Get(ts.URL + tt.path) + select { + case <-inHandler: + case <-time.After(5 * time.Second): + t.Errorf("never entered handler for test index %d, %s", i, tt.path) + continue + } + if err != nil { + uerr, ok := err.(*url.Error) + if !ok { + t.Errorf("error is not an url.Error; got: %#v", err) + continue + } + nerr, ok := uerr.Err.(net.Error) + if !ok { + t.Errorf("error does not satisfy net.Error interface; got: %#v", err) + continue + } + if !nerr.Timeout() { + t.Errorf("want timeout error; got: %q", nerr) + continue + } + if strings.Contains(err.Error(), tt.wantErr) { + continue + } + t.Errorf("%d. unexpected error: %v", i, err) + continue + } + if tt.wantErr != "" { + t.Errorf("%d. no error. expected error: %v", i, tt.wantErr) + continue + } + if res.StatusCode != tt.want { + t.Errorf("%d for path %q status = %d; want %d", i, tt.path, res.StatusCode, tt.want) + } + } +} + +func TestTransportCancelRequest(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping test in -short mode") + } + unblockc := make(chan bool) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "Hello") + w.(Flusher).Flush() // send headers and some body + <-unblockc + })) + defer ts.Close() + defer close(unblockc) + + tr := &Transport{} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + req, _ := NewRequest("GET", ts.URL, nil) + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + go func() { + time.Sleep(1 * time.Second) + tr.CancelRequest(req) + }() + t0 := time.Now() + body, err := ioutil.ReadAll(res.Body) + d := time.Since(t0) + + if err == nil { + t.Error("expected an error reading the body") + } + if string(body) != "Hello" { + t.Errorf("Body = %q; want Hello", body) + } + if d < 500*time.Millisecond { + t.Errorf("expected ~1 second delay; got %v", d) + } + // Verify no outstanding requests after readLoop/writeLoop + // goroutines shut down. + for tries := 3; tries > 0; tries-- { + n := tr.NumPendingRequestsForTesting() + if n == 0 { + break + } + time.Sleep(100 * time.Millisecond) + if tries == 1 { + t.Errorf("pending requests = %d; want 0", n) + } + } +} + +func TestTransportCancelRequestInDial(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping test in -short mode") + } + var logbuf bytes.Buffer + eventLog := log.New(&logbuf, "", 0) + + unblockDial := make(chan bool) + defer close(unblockDial) + + inDial := make(chan bool) + tr := &Transport{ + Dial: func(network, addr string) (net.Conn, error) { + eventLog.Println("dial: blocking") + inDial <- true + <-unblockDial + return nil, errors.New("nope") + }, + } + cl := &Client{Transport: tr} + gotres := make(chan bool) + req, _ := NewRequest("GET", "http://something.no-network.tld/", nil) + go func() { + _, err := cl.Do(req) + eventLog.Printf("Get = %v", err) + gotres <- true + }() + + select { + case <-inDial: + case <-time.After(5 * time.Second): + t.Fatal("timeout; never saw blocking dial") + } + + eventLog.Printf("canceling") + tr.CancelRequest(req) + + select { + case <-gotres: + case <-time.After(5 * time.Second): + panic("hang. events are: " + logbuf.String()) + } + + got := logbuf.String() + want := `dial: blocking +canceling +Get = Get http://something.no-network.tld/: net/http: request canceled while waiting for connection +` + if got != want { + t.Errorf("Got events:\n%s\nWant:\n%s", got, want) + } +} + +// golang.org/issue/3672 -- Client can't close HTTP stream +// Calling Close on a Response.Body used to just read until EOF. +// Now it actually closes the TCP connection. +func TestTransportCloseResponseBody(t *testing.T) { + defer afterTest(t) + writeErr := make(chan error, 1) + msg := []byte("young\n") + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + for { + _, err := w.Write(msg) + if err != nil { + writeErr <- err + return + } + w.(Flusher).Flush() + } + })) + defer ts.Close() + + tr := &Transport{} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + req, _ := NewRequest("GET", ts.URL, nil) + defer tr.CancelRequest(req) + + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + + const repeats = 3 + buf := make([]byte, len(msg)*repeats) + want := bytes.Repeat(msg, repeats) + + _, err = io.ReadFull(res.Body, buf) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(buf, want) { + t.Fatalf("read %q; want %q", buf, want) + } + didClose := make(chan error, 1) + go func() { + didClose <- res.Body.Close() + }() + select { + case err := <-didClose: + if err != nil { + t.Errorf("Close = %v", err) + } + case <-time.After(10 * time.Second): + t.Fatal("too long waiting for close") + } + select { + case err := <-writeErr: + if err == nil { + t.Errorf("expected non-nil write error") + } + case <-time.After(10 * time.Second): + t.Fatal("too long waiting for write error") + } +} + +type fooProto struct{} + +func (fooProto) RoundTrip(req *Request) (*Response, error) { + res := &Response{ + Status: "200 OK", + StatusCode: 200, + Header: make(Header), + Body: ioutil.NopCloser(strings.NewReader("You wanted " + req.URL.String())), + } + return res, nil +} + +func TestTransportAltProto(t *testing.T) { + defer afterTest(t) + tr := &Transport{} + c := &Client{Transport: tr} + tr.RegisterProtocol("foo", fooProto{}) + res, err := c.Get("foo://bar.com/path") + if err != nil { + t.Fatal(err) + } + bodyb, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + body := string(bodyb) + if e := "You wanted foo://bar.com/path"; body != e { + t.Errorf("got response %q, want %q", body, e) + } +} + +func TestTransportNoHost(t *testing.T) { + defer afterTest(t) + tr := &Transport{} + _, err := tr.RoundTrip(&Request{ + Header: make(Header), + URL: &url.URL{ + Scheme: "http", + }, + }) + want := "http: no Host in request URL" + if got := fmt.Sprint(err); got != want { + t.Errorf("error = %v; want %q", err, want) + } +} + +func TestTransportSocketLateBinding(t *testing.T) { + defer afterTest(t) + + mux := NewServeMux() + fooGate := make(chan bool, 1) + mux.HandleFunc("/foo", func(w ResponseWriter, r *Request) { + w.Header().Set("foo-ipport", r.RemoteAddr) + w.(Flusher).Flush() + <-fooGate + }) + mux.HandleFunc("/bar", func(w ResponseWriter, r *Request) { + w.Header().Set("bar-ipport", r.RemoteAddr) + }) + ts := httptest.NewServer(mux) + defer ts.Close() + + dialGate := make(chan bool, 1) + tr := &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + if <-dialGate { + return net.Dial(n, addr) + } + return nil, errors.New("manually closed") + }, + DisableKeepAlives: false, + } + defer tr.CloseIdleConnections() + c := &Client{ + Transport: tr, + } + + dialGate <- true // only allow one dial + fooRes, err := c.Get(ts.URL + "/foo") + if err != nil { + t.Fatal(err) + } + fooAddr := fooRes.Header.Get("foo-ipport") + if fooAddr == "" { + t.Fatal("No addr on /foo request") + } + time.AfterFunc(200*time.Millisecond, func() { + // let the foo response finish so we can use its + // connection for /bar + fooGate <- true + io.Copy(ioutil.Discard, fooRes.Body) + fooRes.Body.Close() + }) + + barRes, err := c.Get(ts.URL + "/bar") + if err != nil { + t.Fatal(err) + } + barAddr := barRes.Header.Get("bar-ipport") + if barAddr != fooAddr { + t.Fatalf("/foo came from conn %q; /bar came from %q instead", fooAddr, barAddr) + } + barRes.Body.Close() + dialGate <- false +} + +// Issue 2184 +func TestTransportReading100Continue(t *testing.T) { + defer afterTest(t) + + const numReqs = 5 + reqBody := func(n int) string { return fmt.Sprintf("request body %d", n) } + reqID := func(n int) string { return fmt.Sprintf("REQ-ID-%d", n) } + + send100Response := func(w *io.PipeWriter, r *io.PipeReader) { + defer w.Close() + defer r.Close() + br := bufio.NewReader(r) + n := 0 + for { + n++ + req, err := ReadRequest(br) + if err == io.EOF { + return + } + if err != nil { + t.Error(err) + return + } + slurp, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("Server request body slurp: %v", err) + return + } + id := req.Header.Get("Request-Id") + resCode := req.Header.Get("X-Want-Response-Code") + if resCode == "" { + resCode = "100 Continue" + if string(slurp) != reqBody(n) { + t.Errorf("Server got %q, %v; want %q", slurp, err, reqBody(n)) + } + } + body := fmt.Sprintf("Response number %d", n) + v := []byte(strings.Replace(fmt.Sprintf(`HTTP/1.1 %s +Date: Thu, 28 Feb 2013 17:55:41 GMT + +HTTP/1.1 200 OK +Content-Type: text/html +Echo-Request-Id: %s +Content-Length: %d + +%s`, resCode, id, len(body), body), "\n", "\r\n", -1)) + w.Write(v) + if id == reqID(numReqs) { + return + } + } + + } + + tr := &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + sr, sw := io.Pipe() // server read/write + cr, cw := io.Pipe() // client read/write + conn := &rwTestConn{ + Reader: cr, + Writer: sw, + closeFunc: func() error { + sw.Close() + cw.Close() + return nil + }, + } + go send100Response(cw, sr) + return conn, nil + }, + DisableKeepAlives: false, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + testResponse := func(req *Request, name string, wantCode int) { + res, err := c.Do(req) + if err != nil { + t.Fatalf("%s: Do: %v", name, err) + } + if res.StatusCode != wantCode { + t.Fatalf("%s: Response Statuscode=%d; want %d", name, res.StatusCode, wantCode) + } + if id, idBack := req.Header.Get("Request-Id"), res.Header.Get("Echo-Request-Id"); id != "" && id != idBack { + t.Errorf("%s: response id %q != request id %q", name, idBack, id) + } + _, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("%s: Slurp error: %v", name, err) + } + } + + // Few 100 responses, making sure we're not off-by-one. + for i := 1; i <= numReqs; i++ { + req, _ := NewRequest("POST", "http://dummy.tld/", strings.NewReader(reqBody(i))) + req.Header.Set("Request-Id", reqID(i)) + testResponse(req, fmt.Sprintf("100, %d/%d", i, numReqs), 200) + } + + // And some other informational 1xx but non-100 responses, to test + // we return them but don't re-use the connection. + for i := 1; i <= numReqs; i++ { + req, _ := NewRequest("POST", "http://other.tld/", strings.NewReader(reqBody(i))) + req.Header.Set("X-Want-Response-Code", "123 Sesame Street") + testResponse(req, fmt.Sprintf("123, %d/%d", i, numReqs), 123) + } +} + +type proxyFromEnvTest struct { + req string // URL to fetch; blank means "http://example.com" + env string + noenv string + want string + wanterr error +} + +func (t proxyFromEnvTest) String() string { + var buf bytes.Buffer + if t.env != "" { + fmt.Fprintf(&buf, "http_proxy=%q", t.env) + } + if t.noenv != "" { + fmt.Fprintf(&buf, " no_proxy=%q", t.noenv) + } + req := "http://example.com" + if t.req != "" { + req = t.req + } + fmt.Fprintf(&buf, " req=%q", req) + return strings.TrimSpace(buf.String()) +} + +var proxyFromEnvTests = []proxyFromEnvTest{ + {env: "127.0.0.1:8080", want: "http://127.0.0.1:8080"}, + {env: "cache.corp.example.com:1234", want: "http://cache.corp.example.com:1234"}, + {env: "cache.corp.example.com", want: "http://cache.corp.example.com"}, + {env: "https://cache.corp.example.com", want: "https://cache.corp.example.com"}, + {env: "http://127.0.0.1:8080", want: "http://127.0.0.1:8080"}, + {env: "https://127.0.0.1:8080", want: "https://127.0.0.1:8080"}, + {want: ""}, + {noenv: "example.com", req: "http://example.com/", env: "proxy", want: ""}, + {noenv: ".example.com", req: "http://example.com/", env: "proxy", want: ""}, + {noenv: "ample.com", req: "http://example.com/", env: "proxy", want: "http://proxy"}, + {noenv: "example.com", req: "http://foo.example.com/", env: "proxy", want: ""}, + {noenv: ".foo.com", req: "http://example.com/", env: "proxy", want: "http://proxy"}, +} + +func TestProxyFromEnvironment(t *testing.T) { + ResetProxyEnv() + for _, tt := range proxyFromEnvTests { + os.Setenv("HTTP_PROXY", tt.env) + os.Setenv("NO_PROXY", tt.noenv) + ResetCachedEnvironment() + reqURL := tt.req + if reqURL == "" { + reqURL = "http://example.com" + } + req, _ := NewRequest("GET", reqURL, nil) + url, err := ProxyFromEnvironment(req) + if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.wanterr); g != e { + t.Errorf("%v: got error = %q, want %q", tt, g, e) + continue + } + if got := fmt.Sprintf("%s", url); got != tt.want { + t.Errorf("%v: got URL = %q, want %q", tt, url, tt.want) + } + } +} + +func TestIdleConnChannelLeak(t *testing.T) { + var mu sync.Mutex + var n int + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + mu.Lock() + n++ + mu.Unlock() + })) + defer ts.Close() + + tr := &Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + return net.Dial(netw, ts.Listener.Addr().String()) + }, + } + defer tr.CloseIdleConnections() + + c := &Client{Transport: tr} + + // First, without keep-alives. + for _, disableKeep := range []bool{true, false} { + tr.DisableKeepAlives = disableKeep + for i := 0; i < 5; i++ { + _, err := c.Get(fmt.Sprintf("http://foo-host-%d.tld/", i)) + if err != nil { + t.Fatal(err) + } + } + if got := tr.IdleConnChMapSizeForTesting(); got != 0 { + t.Fatalf("ForDisableKeepAlives = %v, map size = %d; want 0", disableKeep, got) + } + } +} + +// Verify the status quo: that the Client.Post function coerces its +// body into a ReadCloser if it's a Closer, and that the Transport +// then closes it. +func TestTransportClosesRequestBody(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(http.HandlerFunc(func(w ResponseWriter, r *Request) { + io.Copy(ioutil.Discard, r.Body) + })) + defer ts.Close() + + tr := &Transport{} + defer tr.CloseIdleConnections() + cl := &Client{Transport: tr} + + closes := 0 + + res, err := cl.Post(ts.URL, "text/plain", countCloseReader{&closes, strings.NewReader("hello")}) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if closes != 1 { + t.Errorf("closes = %d; want 1", closes) + } +} + +func TestTransportTLSHandshakeTimeout(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping in short mode") + } + ln := newLocalListener(t) + defer ln.Close() + testdonec := make(chan struct{}) + defer close(testdonec) + + go func() { + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + <-testdonec + c.Close() + }() + + getdonec := make(chan struct{}) + go func() { + defer close(getdonec) + tr := &Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.Dial("tcp", ln.Addr().String()) + }, + TLSHandshakeTimeout: 250 * time.Millisecond, + } + cl := &Client{Transport: tr} + _, err := cl.Get("https://dummy.tld/") + if err == nil { + t.Error("expected error") + return + } + ue, ok := err.(*url.Error) + if !ok { + t.Errorf("expected url.Error; got %#v", err) + return + } + ne, ok := ue.Err.(net.Error) + if !ok { + t.Errorf("expected net.Error; got %#v", err) + return + } + if !ne.Timeout() { + t.Errorf("expected timeout error; got %v", err) + } + if !strings.Contains(err.Error(), "handshake timeout") { + t.Errorf("expected 'handshake timeout' in error; got %v", err) + } + }() + select { + case <-getdonec: + case <-time.After(5 * time.Second): + t.Error("test timeout; TLS handshake hung?") + } +} + +// Trying to repro golang.org/issue/3514 +func TestTLSServerClosesConnection(t *testing.T) { + defer afterTest(t) + if runtime.GOOS == "windows" { + t.Skip("skipping flaky test on Windows; golang.org/issue/7634") + } + closedc := make(chan bool, 1) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if strings.Contains(r.URL.Path, "/keep-alive-then-die") { + conn, _, _ := w.(Hijacker).Hijack() + conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nfoo")) + conn.Close() + closedc <- true + return + } + fmt.Fprintf(w, "hello") + })) + defer ts.Close() + tr := &Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + defer tr.CloseIdleConnections() + client := &Client{Transport: tr} + + var nSuccess = 0 + var errs []error + const trials = 20 + for i := 0; i < trials; i++ { + tr.CloseIdleConnections() + res, err := client.Get(ts.URL + "/keep-alive-then-die") + if err != nil { + t.Fatal(err) + } + <-closedc + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != "foo" { + t.Errorf("Got %q, want foo", slurp) + } + + // Now try again and see if we successfully + // pick a new connection. + res, err = client.Get(ts.URL + "/") + if err != nil { + errs = append(errs, err) + continue + } + slurp, err = ioutil.ReadAll(res.Body) + if err != nil { + errs = append(errs, err) + continue + } + nSuccess++ + } + if nSuccess > 0 { + t.Logf("successes = %d of %d", nSuccess, trials) + } else { + t.Errorf("All runs failed:") + } + for _, err := range errs { + t.Logf(" err: %v", err) + } +} + +// byteFromChanReader is an io.Reader that reads a single byte at a +// time from the channel. When the channel is closed, the reader +// returns io.EOF. +type byteFromChanReader chan byte + +func (c byteFromChanReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + b, ok := <-c + if !ok { + return 0, io.EOF + } + p[0] = b + return 1, nil +} + +// Verifies that the Transport doesn't reuse a connection in the case +// where the server replies before the request has been fully +// written. We still honor that reply (see TestIssue3595), but don't +// send future requests on the connection because it's then in a +// questionable state. +// golang.org/issue/7569 +func TestTransportNoReuseAfterEarlyResponse(t *testing.T) { + defer afterTest(t) + var sconn struct { + sync.Mutex + c net.Conn + } + var getOkay bool + closeConn := func() { + sconn.Lock() + defer sconn.Unlock() + if sconn.c != nil { + sconn.c.Close() + sconn.c = nil + if !getOkay { + t.Logf("Closed server connection") + } + } + } + defer closeConn() + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method == "GET" { + io.WriteString(w, "bar") + return + } + conn, _, _ := w.(Hijacker).Hijack() + sconn.Lock() + sconn.c = conn + sconn.Unlock() + conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nfoo")) // keep-alive + go io.Copy(ioutil.Discard, conn) + })) + defer ts.Close() + tr := &Transport{} + defer tr.CloseIdleConnections() + client := &Client{Transport: tr} + + const bodySize = 256 << 10 + finalBit := make(byteFromChanReader, 1) + req, _ := NewRequest("POST", ts.URL, io.MultiReader(io.LimitReader(neverEnding('x'), bodySize-1), finalBit)) + req.ContentLength = bodySize + res, err := client.Do(req) + if err := wantBody(res, err, "foo"); err != nil { + t.Errorf("POST response: %v", err) + } + donec := make(chan bool) + go func() { + defer close(donec) + res, err = client.Get(ts.URL) + if err := wantBody(res, err, "bar"); err != nil { + t.Errorf("GET response: %v", err) + return + } + getOkay = true // suppress test noise + }() + time.AfterFunc(5*time.Second, closeConn) + select { + case <-donec: + finalBit <- 'x' // unblock the writeloop of the first Post + close(finalBit) + case <-time.After(7 * time.Second): + t.Fatal("timeout waiting for GET request to finish") + } +} + +type errorReader struct { + err error +} + +func (e errorReader) Read(p []byte) (int, error) { return 0, e.err } + +type closerFunc func() error + +func (f closerFunc) Close() error { return f() } + +// Issue 6981 +func TestTransportClosesBodyOnError(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7782") + } + defer afterTest(t) + readBody := make(chan error, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + _, err := ioutil.ReadAll(r.Body) + readBody <- err + })) + defer ts.Close() + fakeErr := errors.New("fake error") + didClose := make(chan bool, 1) + req, _ := NewRequest("POST", ts.URL, struct { + io.Reader + io.Closer + }{ + io.MultiReader(io.LimitReader(neverEnding('x'), 1<<20), errorReader{fakeErr}), + closerFunc(func() error { + select { + case didClose <- true: + default: + } + return nil + }), + }) + res, err := DefaultClient.Do(req) + if res != nil { + defer res.Body.Close() + } + if err == nil || !strings.Contains(err.Error(), fakeErr.Error()) { + t.Fatalf("Do error = %v; want something containing %q", err, fakeErr.Error()) + } + select { + case err := <-readBody: + if err == nil { + t.Errorf("Unexpected success reading request body from handler; want 'unexpected EOF reading trailer'") + } + case <-time.After(5 * time.Second): + t.Error("timeout waiting for server handler to complete") + } + select { + case <-didClose: + default: + t.Errorf("didn't see Body.Close") + } +} + +func wantBody(res *http.Response, err error, want string) error { + if err != nil { + return err + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("error reading body: %v", err) + } + if string(slurp) != want { + return fmt.Errorf("body = %q; want %q", slurp, want) + } + if err := res.Body.Close(); err != nil { + return fmt.Errorf("body Close = %v", err) + } + return nil +} + +func newLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + ln, err = net.Listen("tcp6", "[::1]:0") + } + if err != nil { + t.Fatal(err) + } + return ln +} + +type countCloseReader struct { + n *int + io.Reader +} + +func (cr countCloseReader) Close() error { + (*cr.n)++ + return nil +} + +// rgz is a gzip quine that uncompresses to itself. +var rgz = []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, + 0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0, + 0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, + 0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, + 0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60, + 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2, + 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00, + 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, + 0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16, + 0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05, + 0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff, + 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00, + 0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, + 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, + 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, + 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, + 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, + 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, + 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff, + 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, + 0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, + 0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, + 0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, + 0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00, + 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00, + 0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00, + 0x00, 0x00, +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/triv.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/triv.go new file mode 100644 index 000000000..232d65089 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/triv.go @@ -0,0 +1,141 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "expvar" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "strconv" + "sync" +) + +// hello world, the web server +var helloRequests = expvar.NewInt("hello-requests") + +func HelloServer(w http.ResponseWriter, req *http.Request) { + helloRequests.Add(1) + io.WriteString(w, "hello, world!\n") +} + +// Simple counter server. POSTing to it will set the value. +type Counter struct { + mu sync.Mutex // protects n + n int +} + +// This makes Counter satisfy the expvar.Var interface, so we can export +// it directly. +func (ctr *Counter) String() string { + ctr.mu.Lock() + defer ctr.mu.Unlock() + return fmt.Sprintf("%d", ctr.n) +} + +func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) { + ctr.mu.Lock() + defer ctr.mu.Unlock() + switch req.Method { + case "GET": + ctr.n++ + case "POST": + buf := new(bytes.Buffer) + io.Copy(buf, req.Body) + body := buf.String() + if n, err := strconv.Atoi(body); err != nil { + fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body) + } else { + ctr.n = n + fmt.Fprint(w, "counter reset\n") + } + } + fmt.Fprintf(w, "counter = %d\n", ctr.n) +} + +// simple flag server +var booleanflag = flag.Bool("boolean", true, "another flag for testing") + +func FlagServer(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprint(w, "Flags:\n") + flag.VisitAll(func(f *flag.Flag) { + if f.Value.String() != f.DefValue { + fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue) + } else { + fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String()) + } + }) +} + +// simple argument server +func ArgServer(w http.ResponseWriter, req *http.Request) { + for _, s := range os.Args { + fmt.Fprint(w, s, " ") + } +} + +// a channel (just for the fun of it) +type Chan chan int + +func ChanCreate() Chan { + c := make(Chan) + go func(c Chan) { + for x := 0; ; x++ { + c <- x + } + }(c) + return c +} + +func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch)) +} + +// exec a program, redirecting output +func DateServer(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("Content-Type", "text/plain; charset=utf-8") + + date, err := exec.Command("/bin/date").Output() + if err != nil { + http.Error(rw, err.Error(), 500) + return + } + rw.Write(date) +} + +func Logger(w http.ResponseWriter, req *http.Request) { + log.Print(req.URL) + http.Error(w, "oops", 404) +} + +var webroot = flag.String("root", os.Getenv("HOME"), "web root directory") + +func main() { + flag.Parse() + + // The counter is published as a variable directly. + ctr := new(Counter) + expvar.Publish("counter", ctr) + http.Handle("/counter", ctr) + http.Handle("/", http.HandlerFunc(Logger)) + http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot)))) + http.Handle("/chan", ChanCreate()) + http.HandleFunc("/flags", FlagServer) + http.HandleFunc("/args", ArgServer) + http.HandleFunc("/go/hello", HelloServer) + http.HandleFunc("/date", DateServer) + err := http.ListenAndServe(":12345", nil) + if err != nil { + log.Panicln("ListenAndServe:", err) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/http/z_last_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/http/z_last_test.go new file mode 100644 index 000000000..5a0cc1198 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/http/z_last_test.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "net/http" + "runtime" + "sort" + "strings" + "testing" + "time" +) + +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for _, g := range strings.Split(string(buf), "\n\n") { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + continue + } + stack := strings.TrimSpace(sl[1]) + if stack == "" || + strings.Contains(stack, "created by net.startServer") || + strings.Contains(stack, "created by testing.RunTests") || + strings.Contains(stack, "closeWriteAndWait") || + strings.Contains(stack, "testing.Main(") || + // These only show up with GOTRACEBACK=2; Issue 5005 (comment 28) + strings.Contains(stack, "runtime.goexit") || + strings.Contains(stack, "created by runtime.gc") || + strings.Contains(stack, "runtime.MHeap_Scavenger") { + continue + } + gs = append(gs, stack) + } + sort.Strings(gs) + return +} + +// Verify the other tests didn't leave any goroutines running. +// This is in a file named z_last_test.go so it sorts at the end. +func TestGoroutinesRunning(t *testing.T) { + if testing.Short() { + t.Skip("not counting goroutines for leakage in -short mode") + } + gs := interestingGoroutines() + + n := 0 + stackCount := make(map[string]int) + for _, g := range gs { + stackCount[g]++ + n++ + } + + t.Logf("num goroutines = %d", n) + if n > 0 { + t.Error("Too many goroutines.") + for stack, count := range stackCount { + t.Logf("%d instances of:\n%s", count, stack) + } + } +} + +func afterTest(t *testing.T) { + http.DefaultTransport.(*http.Transport).CloseIdleConnections() + if testing.Short() { + return + } + var bad string + badSubstring := map[string]string{ + ").readLoop(": "a Transport", + ").writeLoop(": "a Transport", + "created by net/http/httptest.(*Server).Start": "an httptest.Server", + "timeoutHandler": "a TimeoutHandler", + "net.(*netFD).connect(": "a timing out dial", + ").noteClientGone(": "a closenotifier sender", + } + var stacks string + for i := 0; i < 4; i++ { + bad = "" + stacks = strings.Join(interestingGoroutines(), "\n\n") + for substr, what := range badSubstring { + if strings.Contains(stacks, substr) { + bad = what + } + } + if bad == "" { + return + } + // Bad stuff found, but goroutines might just still be + // shutting down, so give it some time. + time.Sleep(250 * time.Millisecond) + } + t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/alert.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/alert.go new file mode 100644 index 000000000..0856311e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/alert.go @@ -0,0 +1,77 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import "strconv" + +type alert uint8 + +const ( + // alert level + alertLevelWarning = 1 + alertLevelError = 2 +) + +const ( + alertCloseNotify alert = 0 + alertUnexpectedMessage alert = 10 + alertBadRecordMAC alert = 20 + alertDecryptionFailed alert = 21 + alertRecordOverflow alert = 22 + alertDecompressionFailure alert = 30 + alertHandshakeFailure alert = 40 + alertBadCertificate alert = 42 + alertUnsupportedCertificate alert = 43 + alertCertificateRevoked alert = 44 + alertCertificateExpired alert = 45 + alertCertificateUnknown alert = 46 + alertIllegalParameter alert = 47 + alertUnknownCA alert = 48 + alertAccessDenied alert = 49 + alertDecodeError alert = 50 + alertDecryptError alert = 51 + alertProtocolVersion alert = 70 + alertInsufficientSecurity alert = 71 + alertInternalError alert = 80 + alertUserCanceled alert = 90 + alertNoRenegotiation alert = 100 +) + +var alertText = map[alert]string{ + alertCloseNotify: "close notify", + alertUnexpectedMessage: "unexpected message", + alertBadRecordMAC: "bad record MAC", + alertDecryptionFailed: "decryption failed", + alertRecordOverflow: "record overflow", + alertDecompressionFailure: "decompression failure", + alertHandshakeFailure: "handshake failure", + alertBadCertificate: "bad certificate", + alertUnsupportedCertificate: "unsupported certificate", + alertCertificateRevoked: "revoked certificate", + alertCertificateExpired: "expired certificate", + alertCertificateUnknown: "unknown certificate", + alertIllegalParameter: "illegal parameter", + alertUnknownCA: "unknown certificate authority", + alertAccessDenied: "access denied", + alertDecodeError: "error decoding message", + alertDecryptError: "error decrypting message", + alertProtocolVersion: "protocol version not supported", + alertInsufficientSecurity: "insufficient security level", + alertInternalError: "internal error", + alertUserCanceled: "user canceled", + alertNoRenegotiation: "no renegotiation", +} + +func (e alert) String() string { + s, ok := alertText[e] + if ok { + return s + } + return "alert(" + strconv.Itoa(int(e)) + ")" +} + +func (e alert) Error() string { + return e.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/cipher_suites.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/cipher_suites.go new file mode 100644 index 000000000..39a51459d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/cipher_suites.go @@ -0,0 +1,270 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/hmac" + "crypto/rc4" + "crypto/sha1" + "crypto/x509" + "hash" +) + +// a keyAgreement implements the client and server side of a TLS key agreement +// protocol by generating and processing key exchange messages. +type keyAgreement interface { + // On the server side, the first two methods are called in order. + + // In the case that the key agreement protocol doesn't use a + // ServerKeyExchange message, generateServerKeyExchange can return nil, + // nil. + generateServerKeyExchange(*Config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error) + processClientKeyExchange(*Config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error) + + // On the client side, the next two methods are called in order. + + // This method may not be called if the server doesn't send a + // ServerKeyExchange message. + processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error + generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) +} + +const ( + // suiteECDH indicates that the cipher suite involves elliptic curve + // Diffie-Hellman. This means that it should only be selected when the + // client indicates that it supports ECC with a curve and point format + // that we're happy with. + suiteECDHE = 1 << iota + // suiteECDSA indicates that the cipher suite involves an ECDSA + // signature and therefore may only be selected when the server's + // certificate is ECDSA. If this is not set then the cipher suite is + // RSA based. + suiteECDSA + // suiteTLS12 indicates that the cipher suite should only be advertised + // and accepted when using TLS 1.2. + suiteTLS12 +) + +// A cipherSuite is a specific combination of key agreement, cipher and MAC +// function. All cipher suites currently assume RSA key agreement. +type cipherSuite struct { + id uint16 + // the lengths, in bytes, of the key material needed for each component. + keyLen int + macLen int + ivLen int + ka func(version uint16) keyAgreement + // flags is a bitmask of the suite* values, above. + flags int + cipher func(key, iv []byte, isRead bool) interface{} + mac func(version uint16, macKey []byte) macFunction + aead func(key, fixedNonce []byte) cipher.AEAD +} + +var cipherSuites = []*cipherSuite{ + // Ciphersuite order is chosen so that ECDHE comes before plain RSA + // and RC4 comes before AES (because of the Lucky13 attack). + {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM}, + {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadAESGCM}, + {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE, cipherRC4, macSHA1, nil}, + {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherRC4, macSHA1, nil}, + {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, + {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, + {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, + {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, + {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, 0, cipherRC4, macSHA1, nil}, + {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, + {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, + {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil}, + {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil}, +} + +func cipherRC4(key, iv []byte, isRead bool) interface{} { + cipher, _ := rc4.NewCipher(key) + return cipher +} + +func cipher3DES(key, iv []byte, isRead bool) interface{} { + block, _ := des.NewTripleDESCipher(key) + if isRead { + return cipher.NewCBCDecrypter(block, iv) + } + return cipher.NewCBCEncrypter(block, iv) +} + +func cipherAES(key, iv []byte, isRead bool) interface{} { + block, _ := aes.NewCipher(key) + if isRead { + return cipher.NewCBCDecrypter(block, iv) + } + return cipher.NewCBCEncrypter(block, iv) +} + +// macSHA1 returns a macFunction for the given protocol version. +func macSHA1(version uint16, key []byte) macFunction { + if version == VersionSSL30 { + mac := ssl30MAC{ + h: sha1.New(), + key: make([]byte, len(key)), + } + copy(mac.key, key) + return mac + } + return tls10MAC{hmac.New(sha1.New, key)} +} + +type macFunction interface { + Size() int + MAC(digestBuf, seq, header, data []byte) []byte +} + +// fixedNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to +// each call. +type fixedNonceAEAD struct { + // sealNonce and openNonce are buffers where the larger nonce will be + // constructed. Since a seal and open operation may be running + // concurrently, there is a separate buffer for each. + sealNonce, openNonce []byte + aead cipher.AEAD +} + +func (f *fixedNonceAEAD) NonceSize() int { return 8 } +func (f *fixedNonceAEAD) Overhead() int { return f.aead.Overhead() } + +func (f *fixedNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte { + copy(f.sealNonce[len(f.sealNonce)-8:], nonce) + return f.aead.Seal(out, f.sealNonce, plaintext, additionalData) +} + +func (f *fixedNonceAEAD) Open(out, nonce, plaintext, additionalData []byte) ([]byte, error) { + copy(f.openNonce[len(f.openNonce)-8:], nonce) + return f.aead.Open(out, f.openNonce, plaintext, additionalData) +} + +func aeadAESGCM(key, fixedNonce []byte) cipher.AEAD { + aes, err := aes.NewCipher(key) + if err != nil { + panic(err) + } + aead, err := cipher.NewGCM(aes) + if err != nil { + panic(err) + } + + nonce1, nonce2 := make([]byte, 12), make([]byte, 12) + copy(nonce1, fixedNonce) + copy(nonce2, fixedNonce) + + return &fixedNonceAEAD{nonce1, nonce2, aead} +} + +// ssl30MAC implements the SSLv3 MAC function, as defined in +// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 5.2.3.1 +type ssl30MAC struct { + h hash.Hash + key []byte +} + +func (s ssl30MAC) Size() int { + return s.h.Size() +} + +var ssl30Pad1 = [48]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36} + +var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c} + +func (s ssl30MAC) MAC(digestBuf, seq, header, data []byte) []byte { + padLength := 48 + if s.h.Size() == 20 { + padLength = 40 + } + + s.h.Reset() + s.h.Write(s.key) + s.h.Write(ssl30Pad1[:padLength]) + s.h.Write(seq) + s.h.Write(header[:1]) + s.h.Write(header[3:5]) + s.h.Write(data) + digestBuf = s.h.Sum(digestBuf[:0]) + + s.h.Reset() + s.h.Write(s.key) + s.h.Write(ssl30Pad2[:padLength]) + s.h.Write(digestBuf) + return s.h.Sum(digestBuf[:0]) +} + +// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, section 6.2.3. +type tls10MAC struct { + h hash.Hash +} + +func (s tls10MAC) Size() int { + return s.h.Size() +} + +func (s tls10MAC) MAC(digestBuf, seq, header, data []byte) []byte { + s.h.Reset() + s.h.Write(seq) + s.h.Write(header) + s.h.Write(data) + return s.h.Sum(digestBuf[:0]) +} + +func rsaKA(version uint16) keyAgreement { + return rsaKeyAgreement{} +} + +func ecdheECDSAKA(version uint16) keyAgreement { + return &ecdheKeyAgreement{ + sigType: signatureECDSA, + version: version, + } +} + +func ecdheRSAKA(version uint16) keyAgreement { + return &ecdheKeyAgreement{ + sigType: signatureRSA, + version: version, + } +} + +// mutualCipherSuite returns a cipherSuite given a list of supported +// ciphersuites and the id requested by the peer. +func mutualCipherSuite(have []uint16, want uint16) *cipherSuite { + for _, id := range have { + if id == want { + for _, suite := range cipherSuites { + if suite.id == want { + return suite + } + } + return nil + } + } + return nil +} + +// A list of the possible cipher suite ids. Taken from +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml +const ( + TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a + TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f + TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009 + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a + TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011 + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013 + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014 + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/common.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/common.go new file mode 100644 index 000000000..7eb283c5a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/common.go @@ -0,0 +1,438 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "io" + "math/big" + "strings" + "sync" + "time" +) + +const ( + VersionSSL30 = 0x0300 + VersionTLS10 = 0x0301 + VersionTLS11 = 0x0302 + VersionTLS12 = 0x0303 +) + +const ( + maxPlaintext = 16384 // maximum plaintext payload length + maxCiphertext = 16384 + 2048 // maximum ciphertext payload length + recordHeaderLen = 5 // record header length + maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) + + minVersion = VersionSSL30 + maxVersion = VersionTLS12 +) + +// TLS record types. +type recordType uint8 + +const ( + recordTypeChangeCipherSpec recordType = 20 + recordTypeAlert recordType = 21 + recordTypeHandshake recordType = 22 + recordTypeApplicationData recordType = 23 +) + +// TLS handshake message types. +const ( + typeHelloRequest uint8 = 0 + typeClientHello uint8 = 1 + typeServerHello uint8 = 2 + typeNewSessionTicket uint8 = 4 + typeCertificate uint8 = 11 + typeServerKeyExchange uint8 = 12 + typeCertificateRequest uint8 = 13 + typeServerHelloDone uint8 = 14 + typeCertificateVerify uint8 = 15 + typeClientKeyExchange uint8 = 16 + typeFinished uint8 = 20 + typeCertificateStatus uint8 = 22 + typeNextProtocol uint8 = 67 // Not IANA assigned +) + +// TLS compression types. +const ( + compressionNone uint8 = 0 +) + +// TLS extension numbers +var ( + extensionServerName uint16 = 0 + extensionStatusRequest uint16 = 5 + extensionSupportedCurves uint16 = 10 + extensionSupportedPoints uint16 = 11 + extensionSignatureAlgorithms uint16 = 13 + extensionSessionTicket uint16 = 35 + extensionNextProtoNeg uint16 = 13172 // not IANA assigned +) + +// TLS Elliptic Curves +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 +var ( + curveP256 uint16 = 23 + curveP384 uint16 = 24 + curveP521 uint16 = 25 +) + +// TLS Elliptic Curve Point Formats +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9 +var ( + pointFormatUncompressed uint8 = 0 +) + +// TLS CertificateStatusType (RFC 3546) +const ( + statusTypeOCSP uint8 = 1 +) + +// Certificate types (for certificateRequestMsg) +const ( + certTypeRSASign = 1 // A certificate containing an RSA key + certTypeDSSSign = 2 // A certificate containing a DSA key + certTypeRSAFixedDH = 3 // A certificate containing a static DH key + certTypeDSSFixedDH = 4 // A certificate containing a static DH key + + // See RFC4492 sections 3 and 5.5. + certTypeECDSASign = 64 // A certificate containing an ECDSA-capable public key, signed with ECDSA. + certTypeRSAFixedECDH = 65 // A certificate containing an ECDH-capable public key, signed with RSA. + certTypeECDSAFixedECDH = 66 // A certificate containing an ECDH-capable public key, signed with ECDSA. + + // Rest of these are reserved by the TLS spec +) + +// Hash functions for TLS 1.2 (See RFC 5246, section A.4.1) +const ( + hashSHA1 uint8 = 2 + hashSHA256 uint8 = 4 +) + +// Signature algorithms for TLS 1.2 (See RFC 5246, section A.4.1) +const ( + signatureRSA uint8 = 1 + signatureECDSA uint8 = 3 +) + +// signatureAndHash mirrors the TLS 1.2, SignatureAndHashAlgorithm struct. See +// RFC 5246, section A.4.1. +type signatureAndHash struct { + hash, signature uint8 +} + +// supportedSKXSignatureAlgorithms contains the signature and hash algorithms +// that the code advertises as supported in a TLS 1.2 ClientHello. +var supportedSKXSignatureAlgorithms = []signatureAndHash{ + {hashSHA256, signatureRSA}, + {hashSHA256, signatureECDSA}, + {hashSHA1, signatureRSA}, + {hashSHA1, signatureECDSA}, +} + +// supportedClientCertSignatureAlgorithms contains the signature and hash +// algorithms that the code advertises as supported in a TLS 1.2 +// CertificateRequest. +var supportedClientCertSignatureAlgorithms = []signatureAndHash{ + {hashSHA256, signatureRSA}, + {hashSHA256, signatureECDSA}, +} + +// ConnectionState records basic TLS details about the connection. +type ConnectionState struct { + HandshakeComplete bool // TLS handshake is complete + DidResume bool // connection resumes a previous TLS connection + CipherSuite uint16 // cipher suite in use (TLS_RSA_WITH_RC4_128_SHA, ...) + NegotiatedProtocol string // negotiated next protocol (from Config.NextProtos) + NegotiatedProtocolIsMutual bool // negotiated protocol was advertised by server + ServerName string // server name requested by client, if any (server side only) + PeerCertificates []*x509.Certificate // certificate chain presented by remote peer + VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates +} + +// ClientAuthType declares the policy the server will follow for +// TLS Client Authentication. +type ClientAuthType int + +const ( + NoClientCert ClientAuthType = iota + RequestClientCert + RequireAnyClientCert + VerifyClientCertIfGiven + RequireAndVerifyClientCert +) + +// A Config structure is used to configure a TLS client or server. After one +// has been passed to a TLS function it must not be modified. +type Config struct { + // Rand provides the source of entropy for nonces and RSA blinding. + // If Rand is nil, TLS uses the cryptographic random reader in package + // crypto/rand. + Rand io.Reader + + // Time returns the current time as the number of seconds since the epoch. + // If Time is nil, TLS uses time.Now. + Time func() time.Time + + // Certificates contains one or more certificate chains + // to present to the other side of the connection. + // Server configurations must include at least one certificate. + Certificates []Certificate + + // NameToCertificate maps from a certificate name to an element of + // Certificates. Note that a certificate name can be of the form + // '*.example.com' and so doesn't have to be a domain name as such. + // See Config.BuildNameToCertificate + // The nil value causes the first element of Certificates to be used + // for all connections. + NameToCertificate map[string]*Certificate + + // RootCAs defines the set of root certificate authorities + // that clients use when verifying server certificates. + // If RootCAs is nil, TLS uses the host's root CA set. + RootCAs *x509.CertPool + + // NextProtos is a list of supported, application level protocols. + NextProtos []string + + // ServerName is included in the client's handshake to support virtual + // hosting. + ServerName string + + // ClientAuth determines the server's policy for + // TLS Client Authentication. The default is NoClientCert. + ClientAuth ClientAuthType + + // ClientCAs defines the set of root certificate authorities + // that servers use if required to verify a client certificate + // by the policy in ClientAuth. + ClientCAs *x509.CertPool + + // InsecureSkipVerify controls whether a client verifies the + // server's certificate chain and host name. + // If InsecureSkipVerify is true, TLS accepts any certificate + // presented by the server and any host name in that certificate. + // In this mode, TLS is susceptible to man-in-the-middle attacks. + // This should be used only for testing. + InsecureSkipVerify bool + + // CipherSuites is a list of supported cipher suites. If CipherSuites + // is nil, TLS uses a list of suites supported by the implementation. + CipherSuites []uint16 + + // PreferServerCipherSuites controls whether the server selects the + // client's most preferred ciphersuite, or the server's most preferred + // ciphersuite. If true then the server's preference, as expressed in + // the order of elements in CipherSuites, is used. + PreferServerCipherSuites bool + + // SessionTicketsDisabled may be set to true to disable session ticket + // (resumption) support. + SessionTicketsDisabled bool + + // SessionTicketKey is used by TLS servers to provide session + // resumption. See RFC 5077. If zero, it will be filled with + // random data before the first server handshake. + // + // If multiple servers are terminating connections for the same host + // they should all have the same SessionTicketKey. If the + // SessionTicketKey leaks, previously recorded and future TLS + // connections using that key are compromised. + SessionTicketKey [32]byte + + // MinVersion contains the minimum SSL/TLS version that is acceptable. + // If zero, then SSLv3 is taken as the minimum. + MinVersion uint16 + + // MaxVersion contains the maximum SSL/TLS version that is acceptable. + // If zero, then the maximum version supported by this package is used, + // which is currently TLS 1.2. + MaxVersion uint16 + + serverInitOnce sync.Once // guards calling (*Config).serverInit +} + +func (c *Config) serverInit() { + if c.SessionTicketsDisabled { + return + } + + // If the key has already been set then we have nothing to do. + for _, b := range c.SessionTicketKey { + if b != 0 { + return + } + } + + if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil { + c.SessionTicketsDisabled = true + } +} + +func (c *Config) rand() io.Reader { + r := c.Rand + if r == nil { + return rand.Reader + } + return r +} + +func (c *Config) time() time.Time { + t := c.Time + if t == nil { + t = time.Now + } + return t() +} + +func (c *Config) cipherSuites() []uint16 { + s := c.CipherSuites + if s == nil { + s = defaultCipherSuites() + } + return s +} + +func (c *Config) minVersion() uint16 { + if c == nil || c.MinVersion == 0 { + return minVersion + } + return c.MinVersion +} + +func (c *Config) maxVersion() uint16 { + if c == nil || c.MaxVersion == 0 { + return maxVersion + } + return c.MaxVersion +} + +// mutualVersion returns the protocol version to use given the advertised +// version of the peer. +func (c *Config) mutualVersion(vers uint16) (uint16, bool) { + minVersion := c.minVersion() + maxVersion := c.maxVersion() + + if vers < minVersion { + return 0, false + } + if vers > maxVersion { + vers = maxVersion + } + return vers, true +} + +// getCertificateForName returns the best certificate for the given name, +// defaulting to the first element of c.Certificates if there are no good +// options. +func (c *Config) getCertificateForName(name string) *Certificate { + if len(c.Certificates) == 1 || c.NameToCertificate == nil { + // There's only one choice, so no point doing any work. + return &c.Certificates[0] + } + + name = strings.ToLower(name) + for len(name) > 0 && name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + + if cert, ok := c.NameToCertificate[name]; ok { + return cert + } + + // try replacing labels in the name with wildcards until we get a + // match. + labels := strings.Split(name, ".") + for i := range labels { + labels[i] = "*" + candidate := strings.Join(labels, ".") + if cert, ok := c.NameToCertificate[candidate]; ok { + return cert + } + } + + // If nothing matches, return the first certificate. + return &c.Certificates[0] +} + +// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate +// from the CommonName and SubjectAlternateName fields of each of the leaf +// certificates. +func (c *Config) BuildNameToCertificate() { + c.NameToCertificate = make(map[string]*Certificate) + for i := range c.Certificates { + cert := &c.Certificates[i] + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + continue + } + if len(x509Cert.Subject.CommonName) > 0 { + c.NameToCertificate[x509Cert.Subject.CommonName] = cert + } + for _, san := range x509Cert.DNSNames { + c.NameToCertificate[san] = cert + } + } +} + +// A Certificate is a chain of one or more certificates, leaf first. +type Certificate struct { + Certificate [][]byte + PrivateKey crypto.PrivateKey // supported types: *rsa.PrivateKey, *ecdsa.PrivateKey + // OCSPStaple contains an optional OCSP response which will be served + // to clients that request it. + OCSPStaple []byte + // Leaf is the parsed form of the leaf certificate, which may be + // initialized using x509.ParseCertificate to reduce per-handshake + // processing for TLS clients doing client authentication. If nil, the + // leaf certificate will be parsed as needed. + Leaf *x509.Certificate +} + +// A TLS record. +type record struct { + contentType recordType + major, minor uint8 + payload []byte +} + +type handshakeMessage interface { + marshal() []byte + unmarshal([]byte) bool +} + +// TODO(jsing): Make these available to both crypto/x509 and crypto/tls. +type dsaSignature struct { + R, S *big.Int +} + +type ecdsaSignature dsaSignature + +var emptyConfig Config + +func defaultConfig() *Config { + return &emptyConfig +} + +var ( + once sync.Once + varDefaultCipherSuites []uint16 +) + +func defaultCipherSuites() []uint16 { + once.Do(initDefaultCipherSuites) + return varDefaultCipherSuites +} + +func initDefaultCipherSuites() { + varDefaultCipherSuites = make([]uint16, len(cipherSuites)) + for i, suite := range cipherSuites { + varDefaultCipherSuites[i] = suite.id + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn.go new file mode 100644 index 000000000..db036f967 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn.go @@ -0,0 +1,1026 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TLS low level connection and record layer + +package tls + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "crypto/x509" + "errors" + "io" + "net" + "sync" + "time" +) + +// A Conn represents a secured connection. +// It implements the net.Conn interface. +type Conn struct { + // constant + conn net.Conn + isClient bool + + // constant after handshake; protected by handshakeMutex + handshakeMutex sync.Mutex // handshakeMutex < in.Mutex, out.Mutex, errMutex + vers uint16 // TLS version + haveVers bool // version has been negotiated + config *Config // configuration passed to constructor + handshakeComplete bool + didResume bool // whether this connection was a session resumption + cipherSuite uint16 + ocspResponse []byte // stapled OCSP response + peerCertificates []*x509.Certificate + // verifiedChains contains the certificate chains that we built, as + // opposed to the ones presented by the server. + verifiedChains [][]*x509.Certificate + // serverName contains the server name indicated by the client, if any. + serverName string + + clientProtocol string + clientProtocolFallback bool + + // first permanent error + connErr + + // input/output + in, out halfConn // in.Mutex < out.Mutex + rawInput *block // raw input, right off the wire + input *block // application data waiting to be read + hand bytes.Buffer // handshake data waiting to be read + + tmp [16]byte +} + +type connErr struct { + mu sync.Mutex + value error +} + +func (e *connErr) setError(err error) error { + e.mu.Lock() + defer e.mu.Unlock() + + if e.value == nil { + e.value = err + } + return err +} + +func (e *connErr) error() error { + e.mu.Lock() + defer e.mu.Unlock() + return e.value +} + +// Access to net.Conn methods. +// Cannot just embed net.Conn because that would +// export the struct field too. + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// SetDeadline sets the read and write deadlines associated with the connection. +// A zero value for t means Read and Write will not time out. +// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. +func (c *Conn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline on the underlying connection. +// A zero value for t means Read will not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline on the underlying conneciton. +// A zero value for t means Write will not time out. +// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. +func (c *Conn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// A halfConn represents one direction of the record layer +// connection, either sending or receiving. +type halfConn struct { + sync.Mutex + version uint16 // protocol version + cipher interface{} // cipher algorithm + mac macFunction + seq [8]byte // 64-bit sequence number + bfree *block // list of free blocks + + nextCipher interface{} // next encryption state + nextMac macFunction // next MAC algorithm + + // used to save allocating a new buffer for each MAC. + inDigestBuf, outDigestBuf []byte +} + +// prepareCipherSpec sets the encryption and MAC states +// that a subsequent changeCipherSpec will use. +func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac macFunction) { + hc.version = version + hc.nextCipher = cipher + hc.nextMac = mac +} + +// changeCipherSpec changes the encryption and MAC states +// to the ones previously passed to prepareCipherSpec. +func (hc *halfConn) changeCipherSpec() error { + if hc.nextCipher == nil { + return alertInternalError + } + hc.cipher = hc.nextCipher + hc.mac = hc.nextMac + hc.nextCipher = nil + hc.nextMac = nil + for i := range hc.seq { + hc.seq[i] = 0 + } + return nil +} + +// incSeq increments the sequence number. +func (hc *halfConn) incSeq() { + for i := 7; i >= 0; i-- { + hc.seq[i]++ + if hc.seq[i] != 0 { + return + } + } + + // Not allowed to let sequence number wrap. + // Instead, must renegotiate before it does. + // Not likely enough to bother. + panic("TLS: sequence number wraparound") +} + +// resetSeq resets the sequence number to zero. +func (hc *halfConn) resetSeq() { + for i := range hc.seq { + hc.seq[i] = 0 + } +} + +// removePadding returns an unpadded slice, in constant time, which is a prefix +// of the input. It also returns a byte which is equal to 255 if the padding +// was valid and 0 otherwise. See RFC 2246, section 6.2.3.2 +func removePadding(payload []byte) ([]byte, byte) { + if len(payload) < 1 { + return payload, 0 + } + + paddingLen := payload[len(payload)-1] + t := uint(len(payload)-1) - uint(paddingLen) + // if len(payload) >= (paddingLen - 1) then the MSB of t is zero + good := byte(int32(^t) >> 31) + + toCheck := 255 // the maximum possible padding length + // The length of the padded data is public, so we can use an if here + if toCheck+1 > len(payload) { + toCheck = len(payload) - 1 + } + + for i := 0; i < toCheck; i++ { + t := uint(paddingLen) - uint(i) + // if i <= paddingLen then the MSB of t is zero + mask := byte(int32(^t) >> 31) + b := payload[len(payload)-1-i] + good &^= mask&paddingLen ^ mask&b + } + + // We AND together the bits of good and replicate the result across + // all the bits. + good &= good << 4 + good &= good << 2 + good &= good << 1 + good = uint8(int8(good) >> 7) + + toRemove := good&paddingLen + 1 + return payload[:len(payload)-int(toRemove)], good +} + +// removePaddingSSL30 is a replacement for removePadding in the case that the +// protocol version is SSLv3. In this version, the contents of the padding +// are random and cannot be checked. +func removePaddingSSL30(payload []byte) ([]byte, byte) { + if len(payload) < 1 { + return payload, 0 + } + + paddingLen := int(payload[len(payload)-1]) + 1 + if paddingLen > len(payload) { + return payload, 0 + } + + return payload[:len(payload)-paddingLen], 255 +} + +func roundUp(a, b int) int { + return a + (b-a%b)%b +} + +// cbcMode is an interface for block ciphers using cipher block chaining. +type cbcMode interface { + cipher.BlockMode + SetIV([]byte) +} + +// decrypt checks and strips the mac and decrypts the data in b. Returns a +// success boolean, the number of bytes to skip from the start of the record in +// order to get the application payload, and an optional alert value. +func (hc *halfConn) decrypt(b *block) (ok bool, prefixLen int, alertValue alert) { + // pull out payload + payload := b.data[recordHeaderLen:] + + macSize := 0 + if hc.mac != nil { + macSize = hc.mac.Size() + } + + paddingGood := byte(255) + explicitIVLen := 0 + + // decrypt + if hc.cipher != nil { + switch c := hc.cipher.(type) { + case cipher.Stream: + c.XORKeyStream(payload, payload) + case cipher.AEAD: + explicitIVLen = 8 + if len(payload) < explicitIVLen { + return false, 0, alertBadRecordMAC + } + nonce := payload[:8] + payload = payload[8:] + + var additionalData [13]byte + copy(additionalData[:], hc.seq[:]) + copy(additionalData[8:], b.data[:3]) + n := len(payload) - c.Overhead() + additionalData[11] = byte(n >> 8) + additionalData[12] = byte(n) + var err error + payload, err = c.Open(payload[:0], nonce, payload, additionalData[:]) + if err != nil { + return false, 0, alertBadRecordMAC + } + b.resize(recordHeaderLen + explicitIVLen + len(payload)) + case cbcMode: + blockSize := c.BlockSize() + if hc.version >= VersionTLS11 { + explicitIVLen = blockSize + } + + if len(payload)%blockSize != 0 || len(payload) < roundUp(explicitIVLen+macSize+1, blockSize) { + return false, 0, alertBadRecordMAC + } + + if explicitIVLen > 0 { + c.SetIV(payload[:explicitIVLen]) + payload = payload[explicitIVLen:] + } + c.CryptBlocks(payload, payload) + if hc.version == VersionSSL30 { + payload, paddingGood = removePaddingSSL30(payload) + } else { + payload, paddingGood = removePadding(payload) + } + b.resize(recordHeaderLen + explicitIVLen + len(payload)) + + // note that we still have a timing side-channel in the + // MAC check, below. An attacker can align the record + // so that a correct padding will cause one less hash + // block to be calculated. Then they can iteratively + // decrypt a record by breaking each byte. See + // "Password Interception in a SSL/TLS Channel", Brice + // Canvel et al. + // + // However, our behavior matches OpenSSL, so we leak + // only as much as they do. + default: + panic("unknown cipher type") + } + } + + // check, strip mac + if hc.mac != nil { + if len(payload) < macSize { + return false, 0, alertBadRecordMAC + } + + // strip mac off payload, b.data + n := len(payload) - macSize + b.data[3] = byte(n >> 8) + b.data[4] = byte(n) + b.resize(recordHeaderLen + explicitIVLen + n) + remoteMAC := payload[n:] + localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], payload[:n]) + + if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 { + return false, 0, alertBadRecordMAC + } + hc.inDigestBuf = localMAC + } + hc.incSeq() + + return true, recordHeaderLen + explicitIVLen, 0 +} + +// padToBlockSize calculates the needed padding block, if any, for a payload. +// On exit, prefix aliases payload and extends to the end of the last full +// block of payload. finalBlock is a fresh slice which contains the contents of +// any suffix of payload as well as the needed padding to make finalBlock a +// full block. +func padToBlockSize(payload []byte, blockSize int) (prefix, finalBlock []byte) { + overrun := len(payload) % blockSize + paddingLen := blockSize - overrun + prefix = payload[:len(payload)-overrun] + finalBlock = make([]byte, blockSize) + copy(finalBlock, payload[len(payload)-overrun:]) + for i := overrun; i < blockSize; i++ { + finalBlock[i] = byte(paddingLen - 1) + } + return +} + +// encrypt encrypts and macs the data in b. +func (hc *halfConn) encrypt(b *block, explicitIVLen int) (bool, alert) { + // mac + if hc.mac != nil { + mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], b.data[recordHeaderLen+explicitIVLen:]) + + n := len(b.data) + b.resize(n + len(mac)) + copy(b.data[n:], mac) + hc.outDigestBuf = mac + } + + payload := b.data[recordHeaderLen:] + + // encrypt + if hc.cipher != nil { + switch c := hc.cipher.(type) { + case cipher.Stream: + c.XORKeyStream(payload, payload) + case cipher.AEAD: + payloadLen := len(b.data) - recordHeaderLen - explicitIVLen + b.resize(len(b.data) + c.Overhead()) + nonce := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen] + payload := b.data[recordHeaderLen+explicitIVLen:] + payload = payload[:payloadLen] + + var additionalData [13]byte + copy(additionalData[:], hc.seq[:]) + copy(additionalData[8:], b.data[:3]) + additionalData[11] = byte(payloadLen >> 8) + additionalData[12] = byte(payloadLen) + + c.Seal(payload[:0], nonce, payload, additionalData[:]) + case cbcMode: + blockSize := c.BlockSize() + if explicitIVLen > 0 { + c.SetIV(payload[:explicitIVLen]) + payload = payload[explicitIVLen:] + } + prefix, finalBlock := padToBlockSize(payload, blockSize) + b.resize(recordHeaderLen + explicitIVLen + len(prefix) + len(finalBlock)) + c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen:], prefix) + c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen+len(prefix):], finalBlock) + default: + panic("unknown cipher type") + } + } + + // update length to include MAC and any block padding needed. + n := len(b.data) - recordHeaderLen + b.data[3] = byte(n >> 8) + b.data[4] = byte(n) + hc.incSeq() + + return true, 0 +} + +// A block is a simple data buffer. +type block struct { + data []byte + off int // index for Read + link *block +} + +// resize resizes block to be n bytes, growing if necessary. +func (b *block) resize(n int) { + if n > cap(b.data) { + b.reserve(n) + } + b.data = b.data[0:n] +} + +// reserve makes sure that block contains a capacity of at least n bytes. +func (b *block) reserve(n int) { + if cap(b.data) >= n { + return + } + m := cap(b.data) + if m == 0 { + m = 1024 + } + for m < n { + m *= 2 + } + data := make([]byte, len(b.data), m) + copy(data, b.data) + b.data = data +} + +// readFromUntil reads from r into b until b contains at least n bytes +// or else returns an error. +func (b *block) readFromUntil(r io.Reader, n int) error { + // quick case + if len(b.data) >= n { + return nil + } + + // read until have enough. + b.reserve(n) + for { + m, err := r.Read(b.data[len(b.data):cap(b.data)]) + b.data = b.data[0 : len(b.data)+m] + if len(b.data) >= n { + break + } + if err != nil { + return err + } + } + return nil +} + +func (b *block) Read(p []byte) (n int, err error) { + n = copy(p, b.data[b.off:]) + b.off += n + return +} + +// newBlock allocates a new block, from hc's free list if possible. +func (hc *halfConn) newBlock() *block { + b := hc.bfree + if b == nil { + return new(block) + } + hc.bfree = b.link + b.link = nil + b.resize(0) + return b +} + +// freeBlock returns a block to hc's free list. +// The protocol is such that each side only has a block or two on +// its free list at a time, so there's no need to worry about +// trimming the list, etc. +func (hc *halfConn) freeBlock(b *block) { + b.link = hc.bfree + hc.bfree = b +} + +// splitBlock splits a block after the first n bytes, +// returning a block with those n bytes and a +// block with the remainder. the latter may be nil. +func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) { + if len(b.data) <= n { + return b, nil + } + bb := hc.newBlock() + bb.resize(len(b.data) - n) + copy(bb.data, b.data[n:]) + b.data = b.data[0:n] + return b, bb +} + +// readRecord reads the next TLS record from the connection +// and updates the record layer state. +// c.in.Mutex <= L; c.input == nil. +func (c *Conn) readRecord(want recordType) error { + // Caller must be in sync with connection: + // handshake data if handshake not yet completed, + // else application data. (We don't support renegotiation.) + switch want { + default: + return c.sendAlert(alertInternalError) + case recordTypeHandshake, recordTypeChangeCipherSpec: + if c.handshakeComplete { + return c.sendAlert(alertInternalError) + } + case recordTypeApplicationData: + if !c.handshakeComplete { + return c.sendAlert(alertInternalError) + } + } + +Again: + if c.rawInput == nil { + c.rawInput = c.in.newBlock() + } + b := c.rawInput + + // Read header, payload. + if err := b.readFromUntil(c.conn, recordHeaderLen); err != nil { + // RFC suggests that EOF without an alertCloseNotify is + // an error, but popular web sites seem to do this, + // so we can't make it an error. + // if err == io.EOF { + // err = io.ErrUnexpectedEOF + // } + if e, ok := err.(net.Error); !ok || !e.Temporary() { + c.setError(err) + } + return err + } + typ := recordType(b.data[0]) + + // No valid TLS record has a type of 0x80, however SSLv2 handshakes + // start with a uint16 length where the MSB is set and the first record + // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests + // an SSLv2 client. + if want == recordTypeHandshake && typ == 0x80 { + c.sendAlert(alertProtocolVersion) + return errors.New("tls: unsupported SSLv2 handshake received") + } + + vers := uint16(b.data[1])<<8 | uint16(b.data[2]) + n := int(b.data[3])<<8 | int(b.data[4]) + if c.haveVers && vers != c.vers { + return c.sendAlert(alertProtocolVersion) + } + if n > maxCiphertext { + return c.sendAlert(alertRecordOverflow) + } + if !c.haveVers { + // First message, be extra suspicious: + // this might not be a TLS client. + // Bail out before reading a full 'body', if possible. + // The current max version is 3.1. + // If the version is >= 16.0, it's probably not real. + // Similarly, a clientHello message encodes in + // well under a kilobyte. If the length is >= 12 kB, + // it's probably not real. + if (typ != recordTypeAlert && typ != want) || vers >= 0x1000 || n >= 0x3000 { + return c.sendAlert(alertUnexpectedMessage) + } + } + if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if e, ok := err.(net.Error); !ok || !e.Temporary() { + c.setError(err) + } + return err + } + + // Process message. + b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n) + ok, off, err := c.in.decrypt(b) + if !ok { + return c.sendAlert(err) + } + b.off = off + data := b.data[b.off:] + if len(data) > maxPlaintext { + c.sendAlert(alertRecordOverflow) + c.in.freeBlock(b) + return c.error() + } + + switch typ { + default: + c.sendAlert(alertUnexpectedMessage) + + case recordTypeAlert: + if len(data) != 2 { + c.sendAlert(alertUnexpectedMessage) + break + } + if alert(data[1]) == alertCloseNotify { + c.setError(io.EOF) + break + } + switch data[0] { + case alertLevelWarning: + // drop on the floor + c.in.freeBlock(b) + goto Again + case alertLevelError: + c.setError(&net.OpError{Op: "remote error", Err: alert(data[1])}) + default: + c.sendAlert(alertUnexpectedMessage) + } + + case recordTypeChangeCipherSpec: + if typ != want || len(data) != 1 || data[0] != 1 { + c.sendAlert(alertUnexpectedMessage) + break + } + err := c.in.changeCipherSpec() + if err != nil { + c.sendAlert(err.(alert)) + } + + case recordTypeApplicationData: + if typ != want { + c.sendAlert(alertUnexpectedMessage) + break + } + c.input = b + b = nil + + case recordTypeHandshake: + // TODO(rsc): Should at least pick off connection close. + if typ != want && !c.isClient { + return c.sendAlert(alertNoRenegotiation) + } + c.hand.Write(data) + } + + if b != nil { + c.in.freeBlock(b) + } + return c.error() +} + +// sendAlert sends a TLS alert message. +// c.out.Mutex <= L. +func (c *Conn) sendAlertLocked(err alert) error { + switch err { + case alertNoRenegotiation, alertCloseNotify: + c.tmp[0] = alertLevelWarning + default: + c.tmp[0] = alertLevelError + } + c.tmp[1] = byte(err) + c.writeRecord(recordTypeAlert, c.tmp[0:2]) + // closeNotify is a special case in that it isn't an error: + if err != alertCloseNotify { + return c.setError(&net.OpError{Op: "local error", Err: err}) + } + return nil +} + +// sendAlert sends a TLS alert message. +// L < c.out.Mutex. +func (c *Conn) sendAlert(err alert) error { + c.out.Lock() + defer c.out.Unlock() + return c.sendAlertLocked(err) +} + +// writeRecord writes a TLS record with the given type and payload +// to the connection and updates the record layer state. +// c.out.Mutex <= L. +func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) { + b := c.out.newBlock() + for len(data) > 0 { + m := len(data) + if m > maxPlaintext { + m = maxPlaintext + } + explicitIVLen := 0 + explicitIVIsSeq := false + + var cbc cbcMode + if c.out.version >= VersionTLS11 { + var ok bool + if cbc, ok = c.out.cipher.(cbcMode); ok { + explicitIVLen = cbc.BlockSize() + } + } + if explicitIVLen == 0 { + if _, ok := c.out.cipher.(cipher.AEAD); ok { + explicitIVLen = 8 + // The AES-GCM construction in TLS has an + // explicit nonce so that the nonce can be + // random. However, the nonce is only 8 bytes + // which is too small for a secure, random + // nonce. Therefore we use the sequence number + // as the nonce. + explicitIVIsSeq = true + } + } + b.resize(recordHeaderLen + explicitIVLen + m) + b.data[0] = byte(typ) + vers := c.vers + if vers == 0 { + // Some TLS servers fail if the record version is + // greater than TLS 1.0 for the initial ClientHello. + vers = VersionTLS10 + } + b.data[1] = byte(vers >> 8) + b.data[2] = byte(vers) + b.data[3] = byte(m >> 8) + b.data[4] = byte(m) + if explicitIVLen > 0 { + explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen] + if explicitIVIsSeq { + copy(explicitIV, c.out.seq[:]) + } else { + if _, err = io.ReadFull(c.config.rand(), explicitIV); err != nil { + break + } + } + } + copy(b.data[recordHeaderLen+explicitIVLen:], data) + c.out.encrypt(b, explicitIVLen) + _, err = c.conn.Write(b.data) + if err != nil { + break + } + n += m + data = data[m:] + } + c.out.freeBlock(b) + + if typ == recordTypeChangeCipherSpec { + err = c.out.changeCipherSpec() + if err != nil { + // Cannot call sendAlert directly, + // because we already hold c.out.Mutex. + c.tmp[0] = alertLevelError + c.tmp[1] = byte(err.(alert)) + c.writeRecord(recordTypeAlert, c.tmp[0:2]) + return n, c.setError(&net.OpError{Op: "local error", Err: err}) + } + } + return +} + +// readHandshake reads the next handshake message from +// the record layer. +// c.in.Mutex < L; c.out.Mutex < L. +func (c *Conn) readHandshake() (interface{}, error) { + for c.hand.Len() < 4 { + if err := c.error(); err != nil { + return nil, err + } + if err := c.readRecord(recordTypeHandshake); err != nil { + return nil, err + } + } + + data := c.hand.Bytes() + n := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) + if n > maxHandshake { + c.sendAlert(alertInternalError) + return nil, c.error() + } + for c.hand.Len() < 4+n { + if err := c.error(); err != nil { + return nil, err + } + if err := c.readRecord(recordTypeHandshake); err != nil { + return nil, err + } + } + data = c.hand.Next(4 + n) + var m handshakeMessage + switch data[0] { + case typeHelloRequest: + m = new(helloRequestMsg) + case typeClientHello: + m = new(clientHelloMsg) + case typeServerHello: + m = new(serverHelloMsg) + case typeCertificate: + m = new(certificateMsg) + case typeCertificateRequest: + m = &certificateRequestMsg{ + hasSignatureAndHash: c.vers >= VersionTLS12, + } + case typeCertificateStatus: + m = new(certificateStatusMsg) + case typeServerKeyExchange: + m = new(serverKeyExchangeMsg) + case typeServerHelloDone: + m = new(serverHelloDoneMsg) + case typeClientKeyExchange: + m = new(clientKeyExchangeMsg) + case typeCertificateVerify: + m = &certificateVerifyMsg{ + hasSignatureAndHash: c.vers >= VersionTLS12, + } + case typeNextProtocol: + m = new(nextProtoMsg) + case typeFinished: + m = new(finishedMsg) + default: + c.sendAlert(alertUnexpectedMessage) + return nil, alertUnexpectedMessage + } + + // The handshake message unmarshallers + // expect to be able to keep references to data, + // so pass in a fresh copy that won't be overwritten. + data = append([]byte(nil), data...) + + if !m.unmarshal(data) { + c.sendAlert(alertUnexpectedMessage) + return nil, alertUnexpectedMessage + } + return m, nil +} + +// Write writes data to the connection. +func (c *Conn) Write(b []byte) (int, error) { + if err := c.error(); err != nil { + return 0, err + } + + if err := c.Handshake(); err != nil { + return 0, c.setError(err) + } + + c.out.Lock() + defer c.out.Unlock() + + if !c.handshakeComplete { + return 0, alertInternalError + } + + // SSL 3.0 and TLS 1.0 are susceptible to a chosen-plaintext + // attack when using block mode ciphers due to predictable IVs. + // This can be prevented by splitting each Application Data + // record into two records, effectively randomizing the IV. + // + // http://www.openssl.org/~bodo/tls-cbc.txt + // https://bugzilla.mozilla.org/show_bug.cgi?id=665814 + // http://www.imperialviolet.org/2012/01/15/beastfollowup.html + + var m int + if len(b) > 1 && c.vers <= VersionTLS10 { + if _, ok := c.out.cipher.(cipher.BlockMode); ok { + n, err := c.writeRecord(recordTypeApplicationData, b[:1]) + if err != nil { + return n, c.setError(err) + } + m, b = 1, b[1:] + } + } + + n, err := c.writeRecord(recordTypeApplicationData, b) + return n + m, c.setError(err) +} + +func (c *Conn) handleRenegotiation() error { + c.handshakeComplete = false + if !c.isClient { + panic("renegotiation should only happen for a client") + } + + msg, err := c.readHandshake() + if err != nil { + return err + } + _, ok := msg.(*helloRequestMsg) + if !ok { + c.sendAlert(alertUnexpectedMessage) + return alertUnexpectedMessage + } + + return c.Handshake() +} + +// Read can be made to time out and return a net.Error with Timeout() == true +// after a fixed time limit; see SetDeadline and SetReadDeadline. +func (c *Conn) Read(b []byte) (n int, err error) { + if err = c.Handshake(); err != nil { + return + } + + c.in.Lock() + defer c.in.Unlock() + + // Some OpenSSL servers send empty records in order to randomize the + // CBC IV. So this loop ignores a limited number of empty records. + const maxConsecutiveEmptyRecords = 100 + for emptyRecordCount := 0; emptyRecordCount <= maxConsecutiveEmptyRecords; emptyRecordCount++ { + for c.input == nil && c.error() == nil { + if err := c.readRecord(recordTypeApplicationData); err != nil { + // Soft error, like EAGAIN + return 0, err + } + if c.hand.Len() > 0 { + // We received handshake bytes, indicating the start of + // a renegotiation. + if err := c.handleRenegotiation(); err != nil { + return 0, err + } + continue + } + } + if err := c.error(); err != nil { + return 0, err + } + + n, err = c.input.Read(b) + if c.input.off >= len(c.input.data) { + c.in.freeBlock(c.input) + c.input = nil + } + + if n != 0 || err != nil { + return n, err + } + } + + return 0, io.ErrNoProgress +} + +// Close closes the connection. +func (c *Conn) Close() error { + var alertErr error + + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + if c.handshakeComplete { + alertErr = c.sendAlert(alertCloseNotify) + } + + if err := c.conn.Close(); err != nil { + return err + } + return alertErr +} + +// Handshake runs the client or server handshake +// protocol if it has not yet been run. +// Most uses of this package need not call Handshake +// explicitly: the first Read or Write will call it automatically. +func (c *Conn) Handshake() error { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + if err := c.error(); err != nil { + return err + } + if c.handshakeComplete { + return nil + } + if c.isClient { + return c.clientHandshake() + } + return c.serverHandshake() +} + +// ConnectionState returns basic TLS details about the connection. +func (c *Conn) ConnectionState() ConnectionState { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + + var state ConnectionState + state.HandshakeComplete = c.handshakeComplete + if c.handshakeComplete { + state.NegotiatedProtocol = c.clientProtocol + state.DidResume = c.didResume + state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback + state.CipherSuite = c.cipherSuite + state.PeerCertificates = c.peerCertificates + state.VerifiedChains = c.verifiedChains + state.ServerName = c.serverName + } + + return state +} + +// OCSPResponse returns the stapled OCSP response from the TLS server, if +// any. (Only valid for client connections.) +func (c *Conn) OCSPResponse() []byte { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + + return c.ocspResponse +} + +// VerifyHostname checks that the peer certificate chain is valid for +// connecting to host. If so, it returns nil; if not, it returns an error +// describing the problem. +func (c *Conn) VerifyHostname(host string) error { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + if !c.isClient { + return errors.New("VerifyHostname called on TLS server connection") + } + if !c.handshakeComplete { + return errors.New("TLS handshake has not yet been performed") + } + return c.peerCertificates[0].VerifyHostname(host) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn_test.go new file mode 100644 index 000000000..5c555147c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/conn_test.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "testing" +) + +func TestRoundUp(t *testing.T) { + if roundUp(0, 16) != 0 || + roundUp(1, 16) != 16 || + roundUp(15, 16) != 16 || + roundUp(16, 16) != 16 || + roundUp(17, 16) != 32 { + t.Error("roundUp broken") + } +} + +var paddingTests = []struct { + in []byte + good bool + expectedLen int +}{ + {[]byte{1, 2, 3, 4, 0}, true, 4}, + {[]byte{1, 2, 3, 4, 0, 1}, false, 0}, + {[]byte{1, 2, 3, 4, 99, 99}, false, 0}, + {[]byte{1, 2, 3, 4, 1, 1}, true, 4}, + {[]byte{1, 2, 3, 2, 2, 2}, true, 3}, + {[]byte{1, 2, 3, 3, 3, 3}, true, 2}, + {[]byte{1, 2, 3, 4, 3, 3}, false, 0}, + {[]byte{1, 4, 4, 4, 4, 4}, true, 1}, + {[]byte{5, 5, 5, 5, 5, 5}, true, 0}, + {[]byte{6, 6, 6, 6, 6, 6}, false, 0}, +} + +func TestRemovePadding(t *testing.T) { + for i, test := range paddingTests { + payload, good := removePadding(test.in) + expectedGood := byte(255) + if !test.good { + expectedGood = 0 + } + if good != expectedGood { + t.Errorf("#%d: wrong validity, want:%d got:%d", i, expectedGood, good) + } + if good == 255 && len(payload) != test.expectedLen { + t.Errorf("#%d: got %d, want %d", i, len(payload), test.expectedLen) + } + } +} + +var certExampleCom = `308201403081eda003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313138353835325a170d3132303933303138353835325a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31a301830160603551d11040f300d820b6578616d706c652e636f6d300b06092a864886f70d0101050341001a0b419d2c74474c6450654e5f10b32bf426ffdf55cad1c52602e7a9151513a3424c70f5960dcd682db0c33769cc1daa3fcdd3db10809d2392ed4a1bf50ced18` + +var certWildcardExampleCom = `308201423081efa003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303034365a170d3132303933303139303034365a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31c301a30180603551d110411300f820d2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001676f0c9e7c33c1b656ed5a6476c4e2ee9ec8e62df7407accb1875272b2edd0a22096cb2c22598d11604104d604f810eb4b5987ca6bb319c7e6ce48725c54059` + +var certFooExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303131345a170d3132303933303139303131345a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f666f6f2e6578616d706c652e636f6d300b06092a864886f70d010105034100646a2a51f2aa2477add854b462cf5207ba16d3213ffb5d3d0eed473fbf09935019192d1d5b8ca6a2407b424cf04d97c4cd9197c83ecf81f0eab9464a1109d09f` + +var certDoubleWildcardExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303134315a170d3132303933303139303134315a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f2a2e2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001c3de267975f56ef57771c6218ef95ecc65102e57bd1defe6f7efea90d9b26cf40de5bd7ad75e46201c7f2a92aaa3e907451e9409f65e28ddb6db80d726290f6` + +func TestCertificateSelection(t *testing.T) { + config := Config{ + Certificates: []Certificate{ + { + Certificate: [][]byte{fromHex(certExampleCom)}, + }, + { + Certificate: [][]byte{fromHex(certWildcardExampleCom)}, + }, + { + Certificate: [][]byte{fromHex(certFooExampleCom)}, + }, + { + Certificate: [][]byte{fromHex(certDoubleWildcardExampleCom)}, + }, + }, + } + + config.BuildNameToCertificate() + + pointerToIndex := func(c *Certificate) int { + for i := range config.Certificates { + if c == &config.Certificates[i] { + return i + } + } + return -1 + } + + if n := pointerToIndex(config.getCertificateForName("example.com")); n != 0 { + t.Errorf("example.com returned certificate %d, not 0", n) + } + if n := pointerToIndex(config.getCertificateForName("bar.example.com")); n != 1 { + t.Errorf("bar.example.com returned certificate %d, not 1", n) + } + if n := pointerToIndex(config.getCertificateForName("foo.example.com")); n != 2 { + t.Errorf("foo.example.com returned certificate %d, not 2", n) + } + if n := pointerToIndex(config.getCertificateForName("foo.bar.example.com")); n != 3 { + t.Errorf("foo.bar.example.com returned certificate %d, not 3", n) + } + if n := pointerToIndex(config.getCertificateForName("foo.bar.baz.example.com")); n != 0 { + t.Errorf("foo.bar.baz.example.com returned certificate %d, not 0", n) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/generate_cert.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/generate_cert.go new file mode 100644 index 000000000..b417ea464 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/generate_cert.go @@ -0,0 +1,118 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Generate a self-signed X.509 certificate for a TLS server. Outputs to +// 'cert.pem' and 'key.pem' and will overwrite existing files. + +package main + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "flag" + "fmt" + "log" + "math/big" + "net" + "os" + "strings" + "time" +) + +var ( + host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for") + validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011") + validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for") + isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority") + rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate") +) + +func main() { + flag.Parse() + + if len(*host) == 0 { + log.Fatalf("Missing required --host parameter") + } + + priv, err := rsa.GenerateKey(rand.Reader, *rsaBits) + if err != nil { + log.Fatalf("failed to generate private key: %s", err) + return + } + + var notBefore time.Time + if len(*validFrom) == 0 { + notBefore = time.Now() + } else { + notBefore, err = time.Parse("Jan 2 15:04:05 2006", *validFrom) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse creation date: %s\n", err) + os.Exit(1) + } + } + + notAfter := notBefore.Add(*validFor) + + // end of ASN.1 time + endOfTime := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) + if notAfter.After(endOfTime) { + notAfter = endOfTime + } + + template := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + hosts := strings.Split(*host, ",") + for _, h := range hosts { + if ip := net.ParseIP(h); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, h) + } + } + + if *isCA { + template.IsCA = true + template.KeyUsage |= x509.KeyUsageCertSign + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + log.Fatalf("Failed to create certificate: %s", err) + return + } + + certOut, err := os.Create("cert.pem") + if err != nil { + log.Fatalf("failed to open cert.pem for writing: %s", err) + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + log.Print("written cert.pem\n") + + keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + log.Print("failed to open key.pem for writing:", err) + return + } + pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) + keyOut.Close() + log.Print("written key.pem\n") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client.go new file mode 100644 index 000000000..85e4adefc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client.go @@ -0,0 +1,411 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rsa" + "crypto/subtle" + "crypto/x509" + "encoding/asn1" + "errors" + "io" + "strconv" +) + +func (c *Conn) clientHandshake() error { + if c.config == nil { + c.config = defaultConfig() + } + + hello := &clientHelloMsg{ + vers: c.config.maxVersion(), + compressionMethods: []uint8{compressionNone}, + random: make([]byte, 32), + ocspStapling: true, + serverName: c.config.ServerName, + supportedCurves: []uint16{curveP256, curveP384, curveP521}, + supportedPoints: []uint8{pointFormatUncompressed}, + nextProtoNeg: len(c.config.NextProtos) > 0, + } + + possibleCipherSuites := c.config.cipherSuites() + hello.cipherSuites = make([]uint16, 0, len(possibleCipherSuites)) + +NextCipherSuite: + for _, suiteId := range possibleCipherSuites { + for _, suite := range cipherSuites { + if suite.id != suiteId { + continue + } + // Don't advertise TLS 1.2-only cipher suites unless + // we're attempting TLS 1.2. + if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 { + continue + } + hello.cipherSuites = append(hello.cipherSuites, suiteId) + continue NextCipherSuite + } + } + + t := uint32(c.config.time().Unix()) + hello.random[0] = byte(t >> 24) + hello.random[1] = byte(t >> 16) + hello.random[2] = byte(t >> 8) + hello.random[3] = byte(t) + _, err := io.ReadFull(c.config.rand(), hello.random[4:]) + if err != nil { + c.sendAlert(alertInternalError) + return errors.New("short read from Rand") + } + + if hello.vers >= VersionTLS12 { + hello.signatureAndHashes = supportedSKXSignatureAlgorithms + } + + c.writeRecord(recordTypeHandshake, hello.marshal()) + + msg, err := c.readHandshake() + if err != nil { + return err + } + serverHello, ok := msg.(*serverHelloMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + vers, ok := c.config.mutualVersion(serverHello.vers) + if !ok || vers < VersionTLS10 { + // TLS 1.0 is the minimum version supported as a client. + return c.sendAlert(alertProtocolVersion) + } + c.vers = vers + c.haveVers = true + + finishedHash := newFinishedHash(c.vers) + finishedHash.Write(hello.marshal()) + finishedHash.Write(serverHello.marshal()) + + if serverHello.compressionMethod != compressionNone { + return c.sendAlert(alertUnexpectedMessage) + } + + if !hello.nextProtoNeg && serverHello.nextProtoNeg { + c.sendAlert(alertHandshakeFailure) + return errors.New("server advertised unrequested NPN") + } + + suite := mutualCipherSuite(c.config.cipherSuites(), serverHello.cipherSuite) + if suite == nil { + return c.sendAlert(alertHandshakeFailure) + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + certMsg, ok := msg.(*certificateMsg) + if !ok || len(certMsg.certificates) == 0 { + return c.sendAlert(alertUnexpectedMessage) + } + finishedHash.Write(certMsg.marshal()) + + certs := make([]*x509.Certificate, len(certMsg.certificates)) + for i, asn1Data := range certMsg.certificates { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + c.sendAlert(alertBadCertificate) + return errors.New("failed to parse certificate from server: " + err.Error()) + } + certs[i] = cert + } + + if !c.config.InsecureSkipVerify { + opts := x509.VerifyOptions{ + Roots: c.config.RootCAs, + CurrentTime: c.config.time(), + DNSName: c.config.ServerName, + Intermediates: x509.NewCertPool(), + } + + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + c.verifiedChains, err = certs[0].Verify(opts) + if err != nil { + c.sendAlert(alertBadCertificate) + return err + } + } + + switch certs[0].PublicKey.(type) { + case *rsa.PublicKey, *ecdsa.PublicKey: + break + default: + return c.sendAlert(alertUnsupportedCertificate) + } + + c.peerCertificates = certs + + if serverHello.ocspStapling { + msg, err = c.readHandshake() + if err != nil { + return err + } + cs, ok := msg.(*certificateStatusMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + finishedHash.Write(cs.marshal()) + + if cs.statusType == statusTypeOCSP { + c.ocspResponse = cs.response + } + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + + keyAgreement := suite.ka(c.vers) + + skx, ok := msg.(*serverKeyExchangeMsg) + if ok { + finishedHash.Write(skx.marshal()) + err = keyAgreement.processServerKeyExchange(c.config, hello, serverHello, certs[0], skx) + if err != nil { + c.sendAlert(alertUnexpectedMessage) + return err + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + } + + var chainToSend *Certificate + var certRequested bool + certReq, ok := msg.(*certificateRequestMsg) + if ok { + certRequested = true + + // RFC 4346 on the certificateAuthorities field: + // A list of the distinguished names of acceptable certificate + // authorities. These distinguished names may specify a desired + // distinguished name for a root CA or for a subordinate CA; + // thus, this message can be used to describe both known roots + // and a desired authorization space. If the + // certificate_authorities list is empty then the client MAY + // send any certificate of the appropriate + // ClientCertificateType, unless there is some external + // arrangement to the contrary. + + finishedHash.Write(certReq.marshal()) + + var rsaAvail, ecdsaAvail bool + for _, certType := range certReq.certificateTypes { + switch certType { + case certTypeRSASign: + rsaAvail = true + case certTypeECDSASign: + ecdsaAvail = true + } + } + + // We need to search our list of client certs for one + // where SignatureAlgorithm is RSA and the Issuer is in + // certReq.certificateAuthorities + findCert: + for i, chain := range c.config.Certificates { + if !rsaAvail && !ecdsaAvail { + continue + } + + for j, cert := range chain.Certificate { + x509Cert := chain.Leaf + // parse the certificate if this isn't the leaf + // node, or if chain.Leaf was nil + if j != 0 || x509Cert == nil { + if x509Cert, err = x509.ParseCertificate(cert); err != nil { + c.sendAlert(alertInternalError) + return errors.New("tls: failed to parse client certificate #" + strconv.Itoa(i) + ": " + err.Error()) + } + } + + switch { + case rsaAvail && x509Cert.PublicKeyAlgorithm == x509.RSA: + case ecdsaAvail && x509Cert.PublicKeyAlgorithm == x509.ECDSA: + default: + continue findCert + } + + if len(certReq.certificateAuthorities) == 0 { + // they gave us an empty list, so just take the + // first RSA cert from c.config.Certificates + chainToSend = &chain + break findCert + } + + for _, ca := range certReq.certificateAuthorities { + if bytes.Equal(x509Cert.RawIssuer, ca) { + chainToSend = &chain + break findCert + } + } + } + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + } + + shd, ok := msg.(*serverHelloDoneMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + finishedHash.Write(shd.marshal()) + + // If the server requested a certificate then we have to send a + // Certificate message, even if it's empty because we don't have a + // certificate to send. + if certRequested { + certMsg = new(certificateMsg) + if chainToSend != nil { + certMsg.certificates = chainToSend.Certificate + } + finishedHash.Write(certMsg.marshal()) + c.writeRecord(recordTypeHandshake, certMsg.marshal()) + } + + preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hello, certs[0]) + if err != nil { + c.sendAlert(alertInternalError) + return err + } + if ckx != nil { + finishedHash.Write(ckx.marshal()) + c.writeRecord(recordTypeHandshake, ckx.marshal()) + } + + if chainToSend != nil { + var signed []byte + certVerify := &certificateVerifyMsg{ + hasSignatureAndHash: c.vers >= VersionTLS12, + } + + switch key := c.config.Certificates[0].PrivateKey.(type) { + case *ecdsa.PrivateKey: + digest, _, hashId := finishedHash.hashForClientCertificate(signatureECDSA) + r, s, err := ecdsa.Sign(c.config.rand(), key, digest) + if err == nil { + signed, err = asn1.Marshal(ecdsaSignature{r, s}) + } + certVerify.signatureAndHash.signature = signatureECDSA + certVerify.signatureAndHash.hash = hashId + case *rsa.PrivateKey: + digest, hashFunc, hashId := finishedHash.hashForClientCertificate(signatureRSA) + signed, err = rsa.SignPKCS1v15(c.config.rand(), key, hashFunc, digest) + certVerify.signatureAndHash.signature = signatureRSA + certVerify.signatureAndHash.hash = hashId + default: + err = errors.New("unknown private key type") + } + if err != nil { + return c.sendAlert(alertInternalError) + } + certVerify.signature = signed + + finishedHash.Write(certVerify.marshal()) + c.writeRecord(recordTypeHandshake, certVerify.marshal()) + } + + masterSecret := masterFromPreMasterSecret(c.vers, preMasterSecret, hello.random, serverHello.random) + clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := + keysFromMasterSecret(c.vers, masterSecret, hello.random, serverHello.random, suite.macLen, suite.keyLen, suite.ivLen) + + var clientCipher interface{} + var clientHash macFunction + if suite.cipher != nil { + clientCipher = suite.cipher(clientKey, clientIV, false /* not for reading */) + clientHash = suite.mac(c.vers, clientMAC) + } else { + clientCipher = suite.aead(clientKey, clientIV) + } + c.out.prepareCipherSpec(c.vers, clientCipher, clientHash) + c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) + + if serverHello.nextProtoNeg { + nextProto := new(nextProtoMsg) + proto, fallback := mutualProtocol(c.config.NextProtos, serverHello.nextProtos) + nextProto.proto = proto + c.clientProtocol = proto + c.clientProtocolFallback = fallback + + finishedHash.Write(nextProto.marshal()) + c.writeRecord(recordTypeHandshake, nextProto.marshal()) + } + + finished := new(finishedMsg) + finished.verifyData = finishedHash.clientSum(masterSecret) + finishedHash.Write(finished.marshal()) + c.writeRecord(recordTypeHandshake, finished.marshal()) + + var serverCipher interface{} + var serverHash macFunction + if suite.cipher != nil { + serverCipher = suite.cipher(serverKey, serverIV, true /* for reading */) + serverHash = suite.mac(c.vers, serverMAC) + } else { + serverCipher = suite.aead(serverKey, serverIV) + } + c.in.prepareCipherSpec(c.vers, serverCipher, serverHash) + c.readRecord(recordTypeChangeCipherSpec) + if err := c.error(); err != nil { + return err + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + serverFinished, ok := msg.(*finishedMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + verify := finishedHash.serverSum(masterSecret) + if len(verify) != len(serverFinished.verifyData) || + subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 { + return c.sendAlert(alertHandshakeFailure) + } + + c.handshakeComplete = true + c.cipherSuite = suite.id + return nil +} + +// mutualProtocol finds the mutual Next Protocol Negotiation protocol given the +// set of client and server supported protocols. The set of client supported +// protocols must not be empty. It returns the resulting protocol and flag +// indicating if the fallback case was reached. +func mutualProtocol(clientProtos, serverProtos []string) (string, bool) { + for _, s := range serverProtos { + for _, c := range clientProtos { + if s == c { + return s, false + } + } + } + + return clientProtos[0], true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client_test.go new file mode 100644 index 000000000..6c564001b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client_test.go @@ -0,0 +1,3050 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "flag" + "io" + "net" + "os" + "testing" +) + +func testClientScript(t *testing.T, name string, clientScript [][]byte, config *Config) { + c, s := net.Pipe() + cli := Client(c, config) + go func() { + cli.Write([]byte("hello\n")) + cli.Close() + c.Close() + }() + + defer c.Close() + for i, b := range clientScript { + if i%2 == 1 { + s.Write(b) + continue + } + bb := make([]byte, len(b)) + _, err := io.ReadFull(s, bb) + if err != nil { + t.Fatalf("%s #%d: %s", name, i, err) + } + if !bytes.Equal(b, bb) { + t.Fatalf("%s #%d: mismatch on read: got:%x want:%x", name, i, bb, b) + } + } +} + +func TestHandshakeClientRSARC4(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_RSA_WITH_RC4_128_SHA} + testClientScript(t, "RSA-RC4", rsaRC4ClientScript, &config) +} + +func TestHandshakeClientECDHERSAAES(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + testClientScript(t, "ECDHE-RSA-AES", ecdheRSAAESClientScript, &config) +} + +func TestHandshakeClientECDHECDSAAES(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA} + config.Certificates = nil + config.BuildNameToCertificate() + testClientScript(t, "ECDHE-ECDSA-AES", ecdheECDSAAESClientScript, &config) +} + +func TestLongClientCerticiateChain(t *testing.T) { + config := *testConfig + cert, _ := X509KeyPair(testClientChainCertificate, testClientChainCertificate) + config.Certificates = []Certificate{cert} + testClientScript(t, "Long client certificate chains", clientChainCertificateScript, &config) +} + +func TestHandshakeClientTLS11(t *testing.T) { + var config = *testConfig + config.MaxVersion = VersionTLS11 + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + testClientScript(t, "TLS11-ECDHE-AES", tls11ECDHEAESClientScript, &config) +} + +func TestHandshakeClientTLS12(t *testing.T) { + config := *testConfig + config.MaxVersion = VersionTLS12 + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + cert, _ := X509KeyPair(testClientChainCertificate, testClientChainCertificate) + config.Certificates = []Certificate{cert} + testClientScript(t, "TLS12", clientTLS12Script, &config) +} + +func TestHandshakeClientTLS12ClientCert(t *testing.T) { + config := *testConfig + config.MaxVersion = VersionTLS12 + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256} + cert, _ := X509KeyPair(testClientChainCertificate, testClientChainCertificate) + config.Certificates = []Certificate{cert} + testClientScript(t, "TLS12ClientCert", clientTLS12ClientCertScript, &config) +} + +var connect = flag.Bool("connect", false, "connect to a TLS server on :10443") + +func TestRunClient(t *testing.T) { + if !*connect { + return + } + + tcpConn, err := net.Dial("tcp", "127.0.0.1:10443") + if err != nil { + t.Fatal(err) + } + + record := &recordingConn{ + Conn: tcpConn, + } + + config := GetTestConfig() + conn := Client(record, config) + if err := conn.Handshake(); err != nil { + t.Fatalf("error from TLS handshake: %s", err) + } + + conn.Write([]byte("hello\n")) + conn.Close() + + record.WriteTo(os.Stdout) +} + +func TestEmptyRecords(t *testing.T) { + // emptyRecordScript contains a TLS connection with an empty record as + // the first application data from the server. This test ensures that + // the empty record doesn't cause (0, nil) to be returned from + // Conn.Read. + config := *testConfig + config.CipherSuites = []uint16{TLS_RSA_WITH_AES_256_CBC_SHA} + + c, s := net.Pipe() + cli := Client(c, &config) + go func() { + buf := make([]byte, 1024) + n, err := cli.Read(buf) + defer c.Close() + defer cli.Close() + + if err != nil { + t.Fatalf("error reading from tls.Client: %s", err) + } + const expectedLength = 197 + if n != expectedLength { + t.Fatalf("incorrect length reading from tls.Client, got %d, want %d", n, expectedLength) + } + }() + + defer c.Close() + for i, b := range emptyRecordScript { + if i%2 == 1 { + s.Write(b) + continue + } + bb := make([]byte, len(b)) + _, err := io.ReadFull(s, bb) + if err != nil { + t.Fatalf("#%d: %s", i, err) + } + if !bytes.Equal(b, bb) { + t.Fatalf("#%d: mismatch on read: got:%x want:%x", i, bb, b) + } + } +} + +// Script of interaction with gnutls implementation. +// The values for this test are obtained by building and running in client mode: +// % go test -test.run "TestRunClient" -connect +// The recorded bytes are written to stdout. +// +// The server private key is: +// -----BEGIN RSA PRIVATE KEY----- +// MIIBPAIBAAJBAJ+zw4Qnlf8SMVIPFe9GEcStgOY2Ww/dgNdhjeD8ckUJNP5VZkVD +// TGiXav6ooKXfX3j/7tdkuD8Ey2//Kv7+ue0CAwEAAQJAN6W31vDEP2DjdqhzCDDu +// OA4NACqoiFqyblo7yc2tM4h4xMbC3Yx5UKMN9ZkCtX0gzrz6DyF47bdKcWBzNWCj +// gQIhANEoojVt7hq+SQ6MCN6FTAysGgQf56Q3TYoJMoWvdiXVAiEAw3e3rc+VJpOz +// rHuDo6bgpjUAAXM+v3fcpsfZSNO6V7kCIQCtbVjanpUwvZkMI9by02oUk9taki3b +// PzPfAfNPYAbCJQIhAJXNQDWyqwn/lGmR11cqY2y9nZ1+5w3yHGatLrcDnQHxAiEA +// vnlEGo8K85u+KwIOimM48ZG8oTk7iFdkqLJR1utT3aU= +// -----END RSA PRIVATE KEY----- +// +// and certificate is: +// -----BEGIN CERTIFICATE----- +// MIICKzCCAdWgAwIBAgIJALE1E2URIMWSMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +// BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +// aWRnaXRzIFB0eSBMdGQwHhcNMTIwNDA2MTcxMDEzWhcNMTUwNDA2MTcxMDEzWjBF +// MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50 +// ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJ+z +// w4Qnlf8SMVIPFe9GEcStgOY2Ww/dgNdhjeD8ckUJNP5VZkVDTGiXav6ooKXfX3j/ +// 7tdkuD8Ey2//Kv7+ue0CAwEAAaOBpzCBpDAdBgNVHQ4EFgQUeKaXmmO1xaGlM7oi +// fCNuWxt6zCswdQYDVR0jBG4wbIAUeKaXmmO1xaGlM7oifCNuWxt6zCuhSaRHMEUx +// CzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRl +// cm5ldCBXaWRnaXRzIFB0eSBMdGSCCQCxNRNlESDFkjAMBgNVHRMEBTADAQH/MA0G +// CSqGSIb3DQEBBQUAA0EAhTZAc8G7GtrUWZ8tonAxRnTsg26oyDxRrzms7EC86CJG +// HZnWRiok1IsFCEv7NRFukrt3uuQSu/TIXpyBqJdgTA== +// -----END CERTIFICATE----- +var rsaRC4ClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x02, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x4d, 0x0a, 0x56, 0x16, 0xb5, + 0x91, 0xd1, 0xcb, 0x80, 0x4d, 0xc7, 0x46, 0xf3, + 0x37, 0x0c, 0xef, 0xea, 0x64, 0x11, 0x14, 0x56, + 0x97, 0x9b, 0xc5, 0x67, 0x08, 0xb7, 0x13, 0xea, + 0xf8, 0xc9, 0xb3, 0x20, 0xe2, 0xfc, 0x41, 0xf6, + 0x96, 0x90, 0x9d, 0x43, 0x9b, 0xe9, 0x6e, 0xf8, + 0x41, 0x16, 0xcc, 0xf3, 0xc7, 0xde, 0xda, 0x5a, + 0xa1, 0x33, 0x69, 0xe2, 0xde, 0x5b, 0xaf, 0x2a, + 0x92, 0xe7, 0xd4, 0xa0, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x01, 0xf7, 0x0b, 0x00, 0x01, 0xf3, + 0x00, 0x01, 0xf0, 0x00, 0x01, 0xed, 0x30, 0x82, + 0x01, 0xe9, 0x30, 0x82, 0x01, 0x52, 0x02, 0x01, + 0x06, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x04, 0x05, 0x00, + 0x30, 0x5b, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, + 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, + 0x13, 0x0a, 0x51, 0x75, 0x65, 0x65, 0x6e, 0x73, + 0x6c, 0x61, 0x6e, 0x64, 0x31, 0x1a, 0x30, 0x18, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x11, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x53, 0x6f, 0x66, 0x74, + 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, + 0x31, 0x1b, 0x30, 0x19, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x12, 0x54, 0x65, 0x73, 0x74, 0x20, + 0x43, 0x41, 0x20, 0x28, 0x31, 0x30, 0x32, 0x34, + 0x20, 0x62, 0x69, 0x74, 0x29, 0x30, 0x1e, 0x17, + 0x0d, 0x30, 0x30, 0x31, 0x30, 0x31, 0x36, 0x32, + 0x32, 0x33, 0x31, 0x30, 0x33, 0x5a, 0x17, 0x0d, + 0x30, 0x33, 0x30, 0x31, 0x31, 0x34, 0x32, 0x32, + 0x33, 0x31, 0x30, 0x33, 0x5a, 0x30, 0x63, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x51, + 0x75, 0x65, 0x65, 0x6e, 0x73, 0x6c, 0x61, 0x6e, + 0x64, 0x31, 0x1a, 0x30, 0x18, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x11, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x53, 0x6f, 0x66, 0x74, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x31, 0x23, 0x30, + 0x21, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x1a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x74, + 0x65, 0x73, 0x74, 0x20, 0x63, 0x65, 0x72, 0x74, + 0x20, 0x28, 0x35, 0x31, 0x32, 0x20, 0x62, 0x69, + 0x74, 0x29, 0x30, 0x5c, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x4b, 0x00, 0x30, 0x48, + 0x02, 0x41, 0x00, 0x9f, 0xb3, 0xc3, 0x84, 0x27, + 0x95, 0xff, 0x12, 0x31, 0x52, 0x0f, 0x15, 0xef, + 0x46, 0x11, 0xc4, 0xad, 0x80, 0xe6, 0x36, 0x5b, + 0x0f, 0xdd, 0x80, 0xd7, 0x61, 0x8d, 0xe0, 0xfc, + 0x72, 0x45, 0x09, 0x34, 0xfe, 0x55, 0x66, 0x45, + 0x43, 0x4c, 0x68, 0x97, 0x6a, 0xfe, 0xa8, 0xa0, + 0xa5, 0xdf, 0x5f, 0x78, 0xff, 0xee, 0xd7, 0x64, + 0xb8, 0x3f, 0x04, 0xcb, 0x6f, 0xff, 0x2a, 0xfe, + 0xfe, 0xb9, 0xed, 0x02, 0x03, 0x01, 0x00, 0x01, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x04, 0x05, 0x00, 0x03, + 0x81, 0x81, 0x00, 0x93, 0xd2, 0x0a, 0xc5, 0x41, + 0xe6, 0x5a, 0xa9, 0x86, 0xf9, 0x11, 0x87, 0xe4, + 0xdb, 0x45, 0xe2, 0xc5, 0x95, 0x78, 0x1a, 0x6c, + 0x80, 0x6d, 0x73, 0x1f, 0xb4, 0x6d, 0x44, 0xa3, + 0xba, 0x86, 0x88, 0xc8, 0x58, 0xcd, 0x1c, 0x06, + 0x35, 0x6c, 0x44, 0x62, 0x88, 0xdf, 0xe4, 0xf6, + 0x64, 0x61, 0x95, 0xef, 0x4a, 0xa6, 0x7f, 0x65, + 0x71, 0xd7, 0x6b, 0x88, 0x39, 0xf6, 0x32, 0xbf, + 0xac, 0x93, 0x67, 0x69, 0x51, 0x8c, 0x93, 0xec, + 0x48, 0x5f, 0xc9, 0xb1, 0x42, 0xf9, 0x55, 0xd2, + 0x7e, 0x4e, 0xf4, 0xf2, 0x21, 0x6b, 0x90, 0x57, + 0xe6, 0xd7, 0x99, 0x9e, 0x41, 0xca, 0x80, 0xbf, + 0x1a, 0x28, 0xa2, 0xca, 0x5b, 0x50, 0x4a, 0xed, + 0x84, 0xe7, 0x82, 0xc7, 0xd2, 0xcf, 0x36, 0x9e, + 0x6a, 0x67, 0xb9, 0x88, 0xa7, 0xf3, 0x8a, 0xd0, + 0x04, 0xf8, 0xe8, 0xc6, 0x17, 0xe3, 0xc5, 0x29, + 0xbc, 0x17, 0xf1, 0x16, 0x03, 0x01, 0x00, 0x04, + 0x0e, 0x00, 0x00, 0x00, + }, + + { + 0x16, 0x03, 0x01, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x00, 0x40, 0x87, 0xa1, 0x1f, 0x14, 0xe1, + 0xfb, 0x91, 0xac, 0x58, 0x2e, 0xf3, 0x71, 0xce, + 0x01, 0x85, 0x2c, 0xc7, 0xfe, 0x84, 0x87, 0x82, + 0xb7, 0x57, 0xdb, 0x37, 0x4d, 0x46, 0x83, 0x67, + 0x52, 0x82, 0x51, 0x01, 0x95, 0x23, 0x68, 0x69, + 0x6b, 0xd0, 0xa7, 0xa7, 0xe5, 0x88, 0xd0, 0x47, + 0x71, 0xb8, 0xd2, 0x03, 0x05, 0x25, 0x56, 0x5c, + 0x10, 0x08, 0xc6, 0x9b, 0xd4, 0x67, 0xcd, 0x28, + 0xbe, 0x9c, 0x48, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0xc1, 0xb8, + 0xd3, 0x7f, 0xc5, 0xc2, 0x5a, 0x1d, 0x6d, 0x5b, + 0x2d, 0x5c, 0x82, 0x87, 0xc2, 0x6f, 0x0d, 0x63, + 0x7b, 0x72, 0x2b, 0xda, 0x69, 0xc4, 0xfe, 0x3c, + 0x84, 0xa1, 0x5a, 0x62, 0x38, 0x37, 0xc6, 0x54, + 0x25, 0x2a, + }, + + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0xea, 0x88, 0x9c, 0x00, 0xf6, + 0x35, 0xb8, 0x42, 0x7f, 0x15, 0x17, 0x76, 0x5e, + 0x4b, 0x24, 0xcb, 0x7e, 0xa0, 0x7b, 0xc3, 0x70, + 0x52, 0x0a, 0x88, 0x2a, 0x7a, 0x45, 0x59, 0x90, + 0x59, 0xac, 0xc6, 0xb5, 0x56, 0x55, 0x96, + }, +} + +var ecdheRSAAESClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x13, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x52, 0x02, 0x00, 0x00, + 0x4e, 0x03, 0x01, 0x50, 0xad, 0x72, 0xb1, 0x14, + 0x45, 0xce, 0x0a, 0x95, 0xf9, 0x63, 0xef, 0xa8, + 0xe5, 0x07, 0x34, 0x04, 0xe9, 0x08, 0x0f, 0x38, + 0xe4, 0x28, 0x27, 0x91, 0x07, 0x03, 0xe2, 0xfe, + 0xe3, 0x25, 0xf7, 0x20, 0x08, 0x42, 0xa2, 0x01, + 0x69, 0x53, 0xf0, 0xd9, 0x4c, 0xfa, 0x01, 0xa1, + 0xce, 0x4b, 0xf8, 0x28, 0x21, 0xad, 0x06, 0xbe, + 0xe0, 0x1b, 0x3b, 0xf7, 0xec, 0xd2, 0x52, 0xae, + 0x2a, 0x57, 0xb7, 0xa8, 0xc0, 0x13, 0x00, 0x00, + 0x06, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x16, + 0x03, 0x01, 0x02, 0x39, 0x0b, 0x00, 0x02, 0x35, + 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, 0x30, 0x82, + 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xb1, 0x35, + 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, 0x31, 0x37, + 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, 0x0d, 0x31, + 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, 0x37, 0x31, + 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, + 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, + 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, + 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, 0x00, 0x30, + 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, 0xc3, 0x84, + 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, 0x0f, 0x15, + 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, 0xe6, 0x36, + 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, 0x8d, 0xe0, + 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, 0x55, 0x66, + 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, 0xfe, 0xa8, + 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, 0xee, 0xd7, + 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, 0xff, 0x2a, + 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, 0x01, 0x00, + 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, 0xa4, 0x30, + 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, + 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, 0x63, 0xb5, + 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, 0x7c, 0x23, + 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, 0x30, 0x75, + 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x6e, 0x30, + 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, 0x9a, 0x63, + 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, 0x7c, + 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, 0xa1, + 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, 0x35, 0x13, + 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, 0x0c, 0x06, + 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, + 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, 0x36, 0x40, + 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, 0x59, 0x9f, + 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, 0xec, 0x83, + 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, 0x39, 0xac, + 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, 0x1d, 0x99, + 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, 0x05, 0x08, + 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, 0xbb, 0x77, + 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, 0x5e, 0x9c, + 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, 0x03, 0x01, + 0x00, 0x8b, 0x0c, 0x00, 0x00, 0x87, 0x03, 0x00, + 0x17, 0x41, 0x04, 0x1c, 0x8f, 0x9c, 0x6d, 0xe7, + 0xab, 0x3e, 0xf8, 0x0a, 0x5d, 0xe1, 0x86, 0xb4, + 0xe2, 0x8e, 0xb2, 0x1c, 0x3b, 0xd9, 0xb6, 0x08, + 0x80, 0x58, 0x21, 0xe9, 0x0e, 0xc6, 0x66, 0x67, + 0x97, 0xcb, 0xb9, 0x92, 0x07, 0x00, 0xc4, 0xe5, + 0xec, 0x5f, 0xb4, 0xe2, 0x20, 0xa9, 0xc9, 0x62, + 0xd0, 0x98, 0xd5, 0xe3, 0x53, 0xff, 0xd0, 0x0a, + 0x6e, 0x29, 0x69, 0x39, 0x2a, 0x4b, 0x5c, 0xd8, + 0x6c, 0xf5, 0xfe, 0x00, 0x40, 0x35, 0xa7, 0x26, + 0x2e, 0xc2, 0x48, 0x93, 0x32, 0xf7, 0x7d, 0x0f, + 0x0d, 0x77, 0x56, 0x9a, 0x85, 0x0c, 0xa6, 0x74, + 0x06, 0xb8, 0x3d, 0x90, 0x56, 0x12, 0x63, 0xff, + 0x00, 0x5e, 0x0f, 0xf7, 0x24, 0xf7, 0xdb, 0x48, + 0x71, 0xe9, 0x2e, 0x03, 0xd3, 0xfa, 0x3a, 0xae, + 0xa0, 0xc1, 0x77, 0x3c, 0x4c, 0x59, 0xce, 0x33, + 0x1a, 0xd2, 0x47, 0x83, 0xfa, 0xea, 0xd8, 0x1e, + 0x06, 0xe7, 0x7d, 0xa0, 0x9b, 0x16, 0x03, 0x01, + 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0xd9, 0xa7, + 0x80, 0x56, 0x3f, 0xa3, 0x8f, 0x96, 0x72, 0x4e, + 0x4e, 0x6e, 0x23, 0x41, 0x8f, 0xda, 0x91, 0xb2, + 0x9e, 0x63, 0x23, 0x82, 0x64, 0xcd, 0x07, 0x24, + 0xd3, 0x40, 0x20, 0x22, 0x4c, 0xe3, 0xff, 0x38, + 0xbb, 0x43, 0x9d, 0x57, 0x11, 0xd5, 0x46, 0xa5, + 0x05, 0x29, 0x92, 0x02, 0xce, 0xdf, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x90, 0xe7, 0xba, 0x0e, 0xb1, 0xda, + 0x92, 0xb5, 0x77, 0x56, 0x38, 0xa6, 0x22, 0xc1, + 0x72, 0xeb, 0x8a, 0x68, 0x09, 0xb6, 0x74, 0xad, + 0xb3, 0x4a, 0xf2, 0xdd, 0x09, 0x9b, 0xc9, 0x4f, + 0x84, 0x73, 0x8b, 0xd6, 0x97, 0x50, 0x23, 0x1c, + 0xa0, 0xc2, 0x0c, 0x25, 0x18, 0xdd, 0x5e, 0x15, + 0x4d, 0xd9, 0xef, 0x4f, 0x6a, 0x43, 0x61, 0x9c, + 0x95, 0xde, 0x3c, 0x66, 0xc4, 0xc1, 0x33, 0x56, + 0xdd, 0x2f, 0x90, 0xaf, 0x68, 0x5c, 0x9c, 0xa4, + 0x90, 0x6d, 0xbf, 0x51, 0x1d, 0x68, 0xcb, 0x81, + 0x77, 0x52, 0xa0, 0x93, 0x2a, 0xf8, 0xc7, 0x61, + 0x87, 0x76, 0xca, 0x93, 0x9e, 0xd6, 0xee, 0x6f, + 0x3f, 0xeb, 0x7d, 0x06, 0xdd, 0x73, 0x4e, 0x27, + 0x16, 0x63, 0x92, 0xe4, 0xb2, 0x3f, 0x91, 0x23, + 0x21, 0x97, 0x90, 0xce, 0x53, 0xb8, 0xb0, 0x9d, + 0xbd, 0xbd, 0x33, 0x84, 0xad, 0x6b, 0x2e, 0x7b, + 0xf5, 0xeb, 0x1d, 0x64, 0x37, 0x2e, 0x29, 0x4e, + 0xb0, 0x93, 0xdb, 0x92, 0xc7, 0xaa, 0x94, 0xa5, + 0x3b, 0x64, 0xd0, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x20, 0x11, 0xd8, 0x6b, + 0x3c, 0xf6, 0xbe, 0xf4, 0x54, 0x87, 0xec, 0x75, + 0x0c, 0x44, 0xdb, 0x92, 0xfc, 0xde, 0x7e, 0x0f, + 0x9f, 0x87, 0x87, 0x9c, 0x03, 0xd5, 0x07, 0x84, + 0xe0, 0x3a, 0xf8, 0xae, 0x14, 0x17, 0x03, 0x01, + 0x00, 0x20, 0xba, 0x54, 0xef, 0x5b, 0xce, 0xfd, + 0x47, 0x76, 0x6d, 0xa1, 0x8b, 0xfd, 0x48, 0xde, + 0x6e, 0x26, 0xc1, 0x0c, 0x9d, 0x54, 0xbf, 0x98, + 0xf6, 0x1c, 0x80, 0xb9, 0xca, 0x93, 0x81, 0x0a, + 0x2e, 0x06, 0x15, 0x03, 0x01, 0x00, 0x20, 0x93, + 0x3e, 0x38, 0x17, 0xc9, 0x0a, 0xc3, 0xea, 0xd3, + 0x92, 0x75, 0xa6, 0x53, 0x37, 0x4d, 0x74, 0x94, + 0xbe, 0x01, 0xdc, 0x5c, 0x5a, 0x0f, 0x09, 0xf6, + 0x57, 0x33, 0xc3, 0xbc, 0x3f, 0x7a, 0x4d, + }, +} + +var emptyRecordScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x35, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x02, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x51, 0x71, 0x8e, 0x03, 0x02, + 0xef, 0x09, 0xf2, 0x0e, 0xf5, 0x3b, 0x29, 0x9a, + 0xa8, 0x8b, 0x46, 0xa3, 0xd4, 0xb4, 0xc1, 0x14, + 0xc3, 0x19, 0x99, 0xba, 0x3d, 0x78, 0xcf, 0x50, + 0xd1, 0xe7, 0x26, 0x20, 0xa0, 0x37, 0x6d, 0xc9, + 0xae, 0x93, 0x33, 0x81, 0x20, 0xe3, 0xc1, 0x90, + 0x64, 0x6e, 0x67, 0x93, 0xdb, 0xb4, 0x04, 0x16, + 0xc4, 0x25, 0xdd, 0x10, 0x79, 0x3c, 0x18, 0x0a, + 0x7c, 0xfd, 0x28, 0x65, 0x00, 0x35, 0x00, 0x16, + 0x03, 0x01, 0x09, 0x9e, 0x0b, 0x00, 0x09, 0x9a, + 0x00, 0x09, 0x97, 0x00, 0x04, 0xea, 0x30, 0x82, + 0x04, 0xe6, 0x30, 0x82, 0x03, 0xce, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x11, 0x00, 0xff, 0xab, + 0x02, 0x93, 0xe0, 0x72, 0x99, 0x18, 0x6c, 0x9e, + 0x96, 0xb8, 0xb9, 0xf7, 0x47, 0xcb, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x41, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x46, 0x52, 0x31, 0x12, 0x30, 0x10, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x09, 0x47, + 0x41, 0x4e, 0x44, 0x49, 0x20, 0x53, 0x41, 0x53, + 0x31, 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x15, 0x47, 0x61, 0x6e, 0x64, 0x69, + 0x20, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, + 0x64, 0x20, 0x53, 0x53, 0x4c, 0x20, 0x43, 0x41, + 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x33, 0x30, 0x31, + 0x31, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x5a, 0x17, 0x0d, 0x31, 0x34, 0x30, 0x31, 0x31, + 0x34, 0x32, 0x33, 0x35, 0x39, 0x35, 0x39, 0x5a, + 0x30, 0x62, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0b, 0x13, 0x18, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x20, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x20, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x31, 0x24, 0x30, + 0x22, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x1b, + 0x47, 0x61, 0x6e, 0x64, 0x69, 0x20, 0x53, 0x74, + 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x20, 0x57, + 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x20, + 0x53, 0x53, 0x4c, 0x31, 0x17, 0x30, 0x15, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x14, 0x0e, 0x2a, 0x2e, + 0x66, 0x72, 0x65, 0x65, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6e, 0x65, 0x74, 0x30, 0x82, 0x01, 0x22, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, + 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, + 0x02, 0x82, 0x01, 0x01, 0x00, 0xdc, 0xe3, 0xfd, + 0xce, 0xc1, 0x66, 0x62, 0x28, 0x8b, 0x99, 0x65, + 0x72, 0x52, 0x88, 0x93, 0x5b, 0x3f, 0x8d, 0xde, + 0x2b, 0xb0, 0xa0, 0xf4, 0xbd, 0xb4, 0x07, 0x5f, + 0x9e, 0x01, 0x47, 0x60, 0x57, 0x5f, 0xdf, 0xdc, + 0x63, 0x28, 0x1c, 0x1e, 0x5b, 0xc8, 0xe6, 0x29, + 0xdd, 0xeb, 0x26, 0x63, 0xd5, 0xbf, 0x83, 0xb2, + 0x2d, 0xcd, 0x2c, 0xa0, 0xb6, 0x91, 0xad, 0xaf, + 0x95, 0x21, 0x1d, 0x1f, 0x39, 0x8d, 0x3e, 0x17, + 0xd6, 0xbd, 0x99, 0xf5, 0x6c, 0xd4, 0xcb, 0x79, + 0x12, 0x3e, 0x11, 0xb9, 0x7e, 0x62, 0xbc, 0x2d, + 0xbf, 0xe0, 0x55, 0x1b, 0x5c, 0x1e, 0xce, 0x31, + 0xd9, 0xf8, 0x56, 0x68, 0x95, 0x2b, 0x15, 0x84, + 0x35, 0xae, 0x98, 0x2c, 0x63, 0x01, 0xb2, 0x0d, + 0xab, 0xa8, 0x61, 0xef, 0x7f, 0x15, 0x2c, 0x6d, + 0xf7, 0x67, 0x1d, 0xb8, 0x8d, 0xf6, 0xa2, 0x1c, + 0x4e, 0x85, 0xf0, 0xea, 0x1a, 0x2b, 0xc8, 0xac, + 0x70, 0x86, 0x9a, 0xbb, 0x9e, 0x9d, 0xbd, 0xc9, + 0x87, 0x2b, 0x9f, 0x5e, 0x40, 0x44, 0x9b, 0xba, + 0x96, 0x45, 0x24, 0xbc, 0x49, 0xb8, 0xfe, 0x26, + 0x3a, 0x1d, 0x1a, 0x0a, 0x3a, 0x90, 0x9c, 0x75, + 0x51, 0x59, 0x89, 0x98, 0x1a, 0x56, 0xe1, 0x3a, + 0x1a, 0xba, 0xff, 0xb4, 0x37, 0x7d, 0xd8, 0x99, + 0xe2, 0xeb, 0x45, 0x27, 0xe2, 0x42, 0x42, 0x46, + 0xbb, 0x00, 0x29, 0x9f, 0x30, 0xc9, 0x1e, 0x6c, + 0xce, 0x59, 0x0e, 0xbe, 0x16, 0x03, 0x31, 0xec, + 0x10, 0xc1, 0x6d, 0xca, 0x9d, 0x5f, 0x6d, 0xf1, + 0x26, 0x11, 0xe5, 0x50, 0xa1, 0xbb, 0x67, 0xb2, + 0xe0, 0x2b, 0xed, 0x76, 0x5b, 0xc7, 0x68, 0xc0, + 0x18, 0xad, 0x91, 0x9e, 0xb5, 0xd4, 0x4d, 0x21, + 0xcd, 0x98, 0xd9, 0xe0, 0x05, 0x0a, 0x4d, 0x24, + 0xa3, 0xe6, 0x12, 0x04, 0xdd, 0x50, 0xe6, 0xc8, + 0x7a, 0x69, 0xb9, 0x32, 0x43, 0x02, 0x03, 0x01, + 0x00, 0x01, 0xa3, 0x82, 0x01, 0xb6, 0x30, 0x82, + 0x01, 0xb2, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0xb6, + 0xa8, 0xff, 0xa2, 0xa8, 0x2f, 0xd0, 0xa6, 0xcd, + 0x4b, 0xb1, 0x68, 0xf3, 0xe7, 0x50, 0x10, 0x31, + 0xa7, 0x79, 0x21, 0x30, 0x1d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x62, 0x37, + 0xd4, 0x3c, 0xbf, 0xd9, 0xc2, 0x99, 0xf3, 0x28, + 0x3e, 0xdb, 0xca, 0xee, 0xf3, 0xb3, 0xc8, 0x73, + 0xb0, 0x3c, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x1d, + 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, 0x02, + 0x05, 0xa0, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, + 0x13, 0x01, 0x01, 0xff, 0x04, 0x02, 0x30, 0x00, + 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, + 0x16, 0x30, 0x14, 0x06, 0x08, 0x2b, 0x06, 0x01, + 0x05, 0x05, 0x07, 0x03, 0x01, 0x06, 0x08, 0x2b, + 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x02, 0x30, + 0x60, 0x06, 0x03, 0x55, 0x1d, 0x20, 0x04, 0x59, + 0x30, 0x57, 0x30, 0x4b, 0x06, 0x0b, 0x2b, 0x06, + 0x01, 0x04, 0x01, 0xb2, 0x31, 0x01, 0x02, 0x02, + 0x1a, 0x30, 0x3c, 0x30, 0x3a, 0x06, 0x08, 0x2b, + 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, 0x01, 0x16, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x61, 0x6e, 0x64, + 0x69, 0x2e, 0x6e, 0x65, 0x74, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x2f, + 0x66, 0x72, 0x2f, 0x73, 0x73, 0x6c, 0x2f, 0x63, + 0x70, 0x73, 0x2f, 0x70, 0x64, 0x66, 0x2f, 0x30, + 0x08, 0x06, 0x06, 0x67, 0x81, 0x0c, 0x01, 0x02, + 0x01, 0x30, 0x3c, 0x06, 0x03, 0x55, 0x1d, 0x1f, + 0x04, 0x35, 0x30, 0x33, 0x30, 0x31, 0xa0, 0x2f, + 0xa0, 0x2d, 0x86, 0x2b, 0x68, 0x74, 0x74, 0x70, + 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x6c, 0x2e, 0x67, + 0x61, 0x6e, 0x64, 0x69, 0x2e, 0x6e, 0x65, 0x74, + 0x2f, 0x47, 0x61, 0x6e, 0x64, 0x69, 0x53, 0x74, + 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x53, + 0x4c, 0x43, 0x41, 0x2e, 0x63, 0x72, 0x6c, 0x30, + 0x6a, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, + 0x07, 0x01, 0x01, 0x04, 0x5e, 0x30, 0x5c, 0x30, + 0x37, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, + 0x07, 0x30, 0x02, 0x86, 0x2b, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x74, 0x2e, + 0x67, 0x61, 0x6e, 0x64, 0x69, 0x2e, 0x6e, 0x65, + 0x74, 0x2f, 0x47, 0x61, 0x6e, 0x64, 0x69, 0x53, + 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, + 0x53, 0x4c, 0x43, 0x41, 0x2e, 0x63, 0x72, 0x74, + 0x30, 0x21, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, + 0x05, 0x07, 0x30, 0x01, 0x86, 0x15, 0x68, 0x74, + 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63, 0x73, + 0x70, 0x2e, 0x67, 0x61, 0x6e, 0x64, 0x69, 0x2e, + 0x6e, 0x65, 0x74, 0x30, 0x27, 0x06, 0x03, 0x55, + 0x1d, 0x11, 0x04, 0x20, 0x30, 0x1e, 0x82, 0x0e, + 0x2a, 0x2e, 0x66, 0x72, 0x65, 0x65, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x6e, 0x65, 0x74, 0x82, 0x0c, + 0x66, 0x72, 0x65, 0x65, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6e, 0x65, 0x74, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0x5b, 0x4a, 0x3a, 0x1d, 0x75, 0xe0, 0xc0, 0x9e, + 0xc9, 0x16, 0x66, 0x7f, 0x73, 0x95, 0x6e, 0x35, + 0xe4, 0x27, 0xfa, 0x8c, 0x9d, 0xee, 0xb1, 0x37, + 0x42, 0x3f, 0x54, 0x6a, 0x9d, 0x41, 0x84, 0x57, + 0xe1, 0x03, 0x3d, 0x69, 0x61, 0x77, 0x3b, 0x91, + 0xa2, 0x70, 0x94, 0xb6, 0x8e, 0x41, 0x63, 0x70, + 0xf2, 0x16, 0x04, 0x50, 0x05, 0x14, 0xfb, 0x59, + 0x7d, 0x89, 0x09, 0x3f, 0xb6, 0xef, 0xca, 0x3c, + 0x89, 0x88, 0x08, 0xe9, 0xa1, 0xf3, 0x33, 0x31, + 0x05, 0x4d, 0x70, 0xff, 0xdd, 0xa7, 0xd2, 0xe2, + 0xa0, 0x94, 0x3a, 0xf7, 0xc2, 0x9f, 0xad, 0x2b, + 0x2e, 0x20, 0xfa, 0x6c, 0xe1, 0xfc, 0xe6, 0x62, + 0x22, 0xa1, 0x38, 0x93, 0xec, 0x3e, 0xce, 0xfd, + 0x1f, 0xdd, 0xd4, 0x7c, 0x39, 0x46, 0x8b, 0xb4, + 0x64, 0xfa, 0xa1, 0x46, 0x87, 0x78, 0x2c, 0xd7, + 0x9c, 0xdd, 0x60, 0xd6, 0xda, 0x8e, 0xd8, 0x29, + 0x6d, 0x61, 0xa7, 0x29, 0x07, 0x76, 0xfc, 0xf9, + 0xbd, 0xfd, 0x14, 0xeb, 0x44, 0x70, 0xff, 0xd0, + 0x23, 0x99, 0x83, 0xc5, 0x5c, 0x56, 0x88, 0xaa, + 0x34, 0xda, 0xa6, 0xb3, 0x9a, 0xbf, 0xda, 0x58, + 0x1e, 0xa4, 0xb8, 0xc0, 0x40, 0x9d, 0xf0, 0xfc, + 0xf1, 0x23, 0xc2, 0xbc, 0x59, 0xe1, 0x82, 0xed, + 0x5d, 0xfb, 0x99, 0xaf, 0xf5, 0xf5, 0x15, 0xb8, + 0x8b, 0x59, 0xce, 0xaa, 0xca, 0xdf, 0xdc, 0x94, + 0x11, 0xe0, 0x96, 0xbf, 0x9f, 0x54, 0xa4, 0x9f, + 0x54, 0x36, 0x4a, 0xe8, 0x93, 0xda, 0xf4, 0x8c, + 0xb0, 0x6b, 0x8d, 0x4a, 0x9e, 0x11, 0xae, 0xcb, + 0xcb, 0x33, 0x8a, 0x4d, 0xcd, 0x4e, 0xa5, 0x9b, + 0xe9, 0x14, 0x46, 0x43, 0x9b, 0x96, 0x5f, 0x6d, + 0xf2, 0xea, 0x40, 0xef, 0x14, 0xc3, 0x99, 0x9f, + 0x23, 0x1e, 0xa5, 0x13, 0xab, 0x08, 0xea, 0x8f, + 0x68, 0x5b, 0x7d, 0x71, 0xdf, 0x18, 0xd1, 0x57, + 0x00, 0x04, 0xa7, 0x30, 0x82, 0x04, 0xa3, 0x30, + 0x82, 0x03, 0x8b, 0xa0, 0x03, 0x02, 0x01, 0x02, + 0x02, 0x10, 0x5a, 0xb6, 0x1d, 0xac, 0x1e, 0x4d, + 0xa2, 0x06, 0x14, 0xc7, 0x55, 0x3d, 0x3d, 0xa9, + 0xb2, 0xdc, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, + 0x00, 0x30, 0x81, 0x97, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x02, 0x55, 0x54, 0x31, 0x17, + 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, + 0x0e, 0x53, 0x61, 0x6c, 0x74, 0x20, 0x4c, 0x61, + 0x6b, 0x65, 0x20, 0x43, 0x69, 0x74, 0x79, 0x31, + 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x15, 0x54, 0x68, 0x65, 0x20, 0x55, 0x53, + 0x45, 0x52, 0x54, 0x52, 0x55, 0x53, 0x54, 0x20, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0b, + 0x13, 0x18, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x75, 0x73, 0x65, + 0x72, 0x74, 0x72, 0x75, 0x73, 0x74, 0x2e, 0x63, + 0x6f, 0x6d, 0x31, 0x1f, 0x30, 0x1d, 0x06, 0x03, + 0x55, 0x04, 0x03, 0x13, 0x16, 0x55, 0x54, 0x4e, + 0x2d, 0x55, 0x53, 0x45, 0x52, 0x46, 0x69, 0x72, + 0x73, 0x74, 0x2d, 0x48, 0x61, 0x72, 0x64, 0x77, + 0x61, 0x72, 0x65, 0x30, 0x1e, 0x17, 0x0d, 0x30, + 0x38, 0x31, 0x30, 0x32, 0x33, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x32, 0x30, + 0x30, 0x35, 0x33, 0x30, 0x31, 0x30, 0x34, 0x38, + 0x33, 0x38, 0x5a, 0x30, 0x41, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x46, 0x52, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x09, 0x47, 0x41, 0x4e, + 0x44, 0x49, 0x20, 0x53, 0x41, 0x53, 0x31, 0x1e, + 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, + 0x15, 0x47, 0x61, 0x6e, 0x64, 0x69, 0x20, 0x53, + 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x20, + 0x53, 0x53, 0x4c, 0x20, 0x43, 0x41, 0x30, 0x82, + 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, + 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, + 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb6, + 0x54, 0x3d, 0xa5, 0xdb, 0x0d, 0x22, 0x78, 0x50, + 0x6a, 0x5a, 0x23, 0x89, 0x3f, 0x97, 0xa1, 0xd4, + 0x07, 0x1a, 0xa9, 0x58, 0x08, 0x9b, 0xa0, 0x15, + 0xc3, 0x32, 0xb6, 0xb7, 0xf1, 0xe8, 0xb9, 0xa5, + 0x6f, 0xad, 0x37, 0xf6, 0x6e, 0x71, 0x1b, 0xb4, + 0x75, 0x2d, 0x48, 0x5e, 0x9f, 0xc6, 0x15, 0xaa, + 0x81, 0xef, 0xe5, 0xc4, 0x88, 0x95, 0x8a, 0x3a, + 0x6c, 0x77, 0xcc, 0xb5, 0xcd, 0x65, 0xe4, 0x67, + 0xe5, 0x73, 0xc9, 0x50, 0x52, 0x94, 0xc1, 0x27, + 0x49, 0x3e, 0xa0, 0x6b, 0x41, 0x16, 0x41, 0xb6, + 0x94, 0x99, 0x41, 0xae, 0x3e, 0xcb, 0xe2, 0x06, + 0x46, 0x09, 0xe9, 0x4d, 0xbe, 0xc9, 0x4c, 0x55, + 0xa9, 0x18, 0x7e, 0xa6, 0xdf, 0x6e, 0xfd, 0x4a, + 0xb2, 0xcc, 0x6c, 0x4e, 0xd9, 0xc8, 0x50, 0x15, + 0x93, 0xb3, 0xf2, 0xe9, 0xe3, 0xc2, 0x6a, 0xad, + 0x3a, 0xd5, 0xfb, 0xc3, 0x79, 0x50, 0x9f, 0x25, + 0x79, 0x29, 0xb2, 0x47, 0x64, 0x7c, 0x20, 0x3e, + 0xe2, 0x08, 0x4d, 0x93, 0x29, 0x14, 0xb6, 0x34, + 0x6e, 0xcf, 0x71, 0x46, 0x7e, 0x76, 0x10, 0xf4, + 0xfd, 0x6c, 0xaa, 0x01, 0xd2, 0xc2, 0x06, 0xde, + 0x92, 0x83, 0xcc, 0x58, 0x90, 0x2e, 0x92, 0xde, + 0x1e, 0x65, 0xb7, 0x63, 0x2f, 0x3d, 0xb2, 0xeb, + 0x70, 0x8c, 0x4c, 0xe0, 0xbe, 0x15, 0x9d, 0xde, + 0xc1, 0x4d, 0x56, 0xf8, 0x0b, 0xc6, 0x8e, 0x07, + 0xb9, 0x5d, 0xdf, 0x95, 0xf0, 0x7b, 0x40, 0x1f, + 0x1a, 0x2c, 0xd7, 0x9c, 0x2b, 0x4b, 0x76, 0xf4, + 0x59, 0xf5, 0x43, 0xc1, 0x2c, 0x66, 0x10, 0x9e, + 0x9e, 0x66, 0x96, 0x60, 0x9d, 0x1c, 0x74, 0x1b, + 0x4e, 0x18, 0x5c, 0x08, 0xb0, 0x6e, 0x6c, 0xca, + 0x69, 0x1a, 0x02, 0xe9, 0xbb, 0xca, 0x78, 0xef, + 0x66, 0x2e, 0xe3, 0x32, 0xfd, 0x41, 0x5c, 0x95, + 0x74, 0x81, 0x4d, 0xf4, 0xda, 0xfe, 0x4b, 0x02, + 0x03, 0x01, 0x00, 0x01, 0xa3, 0x82, 0x01, 0x3e, + 0x30, 0x82, 0x01, 0x3a, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, + 0x14, 0xa1, 0x72, 0x5f, 0x26, 0x1b, 0x28, 0x98, + 0x43, 0x95, 0x5d, 0x07, 0x37, 0xd5, 0x85, 0x96, + 0x9d, 0x4b, 0xd2, 0xc3, 0x45, 0x30, 0x1d, 0x06, + 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, + 0xb6, 0xa8, 0xff, 0xa2, 0xa8, 0x2f, 0xd0, 0xa6, + 0xcd, 0x4b, 0xb1, 0x68, 0xf3, 0xe7, 0x50, 0x10, + 0x31, 0xa7, 0x79, 0x21, 0x30, 0x0e, 0x06, 0x03, + 0x55, 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, + 0x03, 0x02, 0x01, 0x06, 0x30, 0x12, 0x06, 0x03, + 0x55, 0x1d, 0x13, 0x01, 0x01, 0xff, 0x04, 0x08, + 0x30, 0x06, 0x01, 0x01, 0xff, 0x02, 0x01, 0x00, + 0x30, 0x18, 0x06, 0x03, 0x55, 0x1d, 0x20, 0x04, + 0x11, 0x30, 0x0f, 0x30, 0x0d, 0x06, 0x0b, 0x2b, + 0x06, 0x01, 0x04, 0x01, 0xb2, 0x31, 0x01, 0x02, + 0x02, 0x1a, 0x30, 0x44, 0x06, 0x03, 0x55, 0x1d, + 0x1f, 0x04, 0x3d, 0x30, 0x3b, 0x30, 0x39, 0xa0, + 0x37, 0xa0, 0x35, 0x86, 0x33, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x6c, 0x2e, + 0x75, 0x73, 0x65, 0x72, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x54, + 0x4e, 0x2d, 0x55, 0x53, 0x45, 0x52, 0x46, 0x69, + 0x72, 0x73, 0x74, 0x2d, 0x48, 0x61, 0x72, 0x64, + 0x77, 0x61, 0x72, 0x65, 0x2e, 0x63, 0x72, 0x6c, + 0x30, 0x74, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, + 0x05, 0x07, 0x01, 0x01, 0x04, 0x68, 0x30, 0x66, + 0x30, 0x3d, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, + 0x05, 0x07, 0x30, 0x02, 0x86, 0x31, 0x68, 0x74, + 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x74, + 0x2e, 0x75, 0x73, 0x65, 0x72, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, + 0x54, 0x4e, 0x41, 0x64, 0x64, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x43, 0x41, 0x2e, 0x63, 0x72, 0x74, 0x30, + 0x25, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, + 0x07, 0x30, 0x01, 0x86, 0x19, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63, 0x73, 0x70, + 0x2e, 0x75, 0x73, 0x65, 0x72, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, + 0x01, 0x00, 0x19, 0x53, 0xbf, 0x03, 0x3d, 0x9b, + 0xe2, 0x6b, 0x5a, 0xfd, 0xba, 0x49, 0x1f, 0x4f, + 0xec, 0xe1, 0xc6, 0x82, 0x39, 0x3c, 0xd2, 0x03, + 0x04, 0x0f, 0xab, 0x7b, 0x3e, 0x82, 0xa9, 0x85, + 0x10, 0x1f, 0xf4, 0xde, 0x32, 0xaf, 0x58, 0x3f, + 0xff, 0x70, 0xf3, 0x30, 0x1d, 0x97, 0x2d, 0x4c, + 0x9a, 0xe2, 0xec, 0x0c, 0x3e, 0x14, 0x2d, 0x2f, + 0x98, 0x48, 0x9d, 0xae, 0x16, 0x6a, 0xac, 0x2d, + 0x42, 0xaa, 0xb5, 0x64, 0xa4, 0x70, 0xbb, 0xeb, + 0x73, 0x94, 0x7b, 0x46, 0x4c, 0xe7, 0x7a, 0x14, + 0x76, 0x5b, 0x4c, 0x1d, 0x84, 0xa1, 0x20, 0x74, + 0x1f, 0x2e, 0x4b, 0x5c, 0x70, 0x88, 0xdc, 0xbd, + 0xf7, 0x19, 0x3d, 0xed, 0x59, 0x0d, 0xe2, 0x3f, + 0x26, 0xe2, 0x9c, 0xac, 0xa4, 0x3c, 0x95, 0x1c, + 0xf8, 0xbe, 0x8c, 0x03, 0xae, 0xf0, 0xe5, 0x9c, + 0x4d, 0xbc, 0xc7, 0x9b, 0x58, 0x00, 0xbf, 0xaf, + 0xad, 0xfa, 0x37, 0x6e, 0x71, 0x6d, 0x18, 0x34, + 0x0e, 0xc1, 0xea, 0x6a, 0xf8, 0x0d, 0xdf, 0x69, + 0x54, 0x56, 0x15, 0xf2, 0x28, 0xb3, 0xfe, 0xa4, + 0x63, 0xec, 0xc5, 0x04, 0x64, 0x60, 0xbb, 0xfe, + 0x2a, 0xf0, 0xf4, 0x87, 0xa1, 0xb0, 0xae, 0xbd, + 0xaa, 0xe4, 0x2f, 0xe3, 0x03, 0x0b, 0x2f, 0x66, + 0x5f, 0x85, 0xa4, 0x32, 0x7b, 0x46, 0xed, 0x25, + 0x0c, 0xe7, 0xf1, 0xb7, 0xe7, 0x19, 0xfd, 0x60, + 0xba, 0x5f, 0x87, 0x77, 0xde, 0x98, 0x07, 0x96, + 0xe4, 0x5e, 0xea, 0x63, 0x7d, 0xa8, 0xde, 0x55, + 0xda, 0x61, 0x5c, 0x3c, 0x90, 0x83, 0x43, 0x04, + 0x07, 0x3c, 0xdd, 0xf3, 0xf8, 0x9f, 0x06, 0x52, + 0x0a, 0xde, 0xc7, 0xb6, 0x7b, 0x8f, 0xe1, 0x11, + 0xf7, 0x04, 0x7a, 0x35, 0xff, 0x6a, 0xbc, 0x5b, + 0xc7, 0x50, 0x49, 0x08, 0x70, 0x6f, 0x94, 0x43, + 0xcd, 0x9e, 0xc7, 0x70, 0xf1, 0xdb, 0xd0, 0x6d, + 0xda, 0x8f, 0x16, 0x03, 0x01, 0x00, 0x0e, 0x0d, + 0x00, 0x00, 0x06, 0x03, 0x01, 0x02, 0x40, 0x00, + 0x00, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x02, 0xbe, 0x0b, 0x00, 0x02, + 0xba, 0x00, 0x02, 0xb7, 0x00, 0x02, 0xb4, 0x30, + 0x82, 0x02, 0xb0, 0x30, 0x82, 0x02, 0x19, 0xa0, + 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, + 0x0d, 0x31, 0x30, 0x30, 0x34, 0x32, 0x34, 0x30, + 0x39, 0x30, 0x39, 0x33, 0x38, 0x5a, 0x17, 0x0d, + 0x31, 0x31, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, + 0x30, 0x39, 0x33, 0x38, 0x5a, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9f, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, + 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, + 0x00, 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, 0xe5, + 0xbf, 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, 0xe6, + 0x2b, 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, 0x8a, + 0x7a, 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, 0xa5, + 0x65, 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, 0xb5, + 0xb4, 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, 0x7e, + 0x62, 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, 0x12, + 0x5c, 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, 0xfa, + 0x58, 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, 0xd3, + 0xd0, 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, 0x54, + 0x9f, 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, 0xfe, + 0x18, 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, 0x7d, + 0xf1, 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, 0x51, + 0xc9, 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, 0x66, + 0x01, 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, 0x9a, + 0x1d, 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, 0x2d, + 0x79, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x81, + 0xa7, 0x30, 0x81, 0xa4, 0x30, 0x1d, 0x06, 0x03, + 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xb1, + 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, + 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, + 0x18, 0x88, 0x39, 0x30, 0x75, 0x06, 0x03, 0x55, + 0x1d, 0x23, 0x04, 0x6e, 0x30, 0x6c, 0x80, 0x14, + 0xb1, 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, + 0xdb, 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, + 0x8e, 0x18, 0x88, 0x39, 0xa1, 0x49, 0xa4, 0x47, + 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, + 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, + 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, + 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, + 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x82, + 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, + 0xb8, 0xca, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, + 0x13, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, + 0x81, 0x81, 0x00, 0x08, 0x6c, 0x45, 0x24, 0xc7, + 0x6b, 0xb1, 0x59, 0xab, 0x0c, 0x52, 0xcc, 0xf2, + 0xb0, 0x14, 0xd7, 0x87, 0x9d, 0x7a, 0x64, 0x75, + 0xb5, 0x5a, 0x95, 0x66, 0xe4, 0xc5, 0x2b, 0x8e, + 0xae, 0x12, 0x66, 0x1f, 0xeb, 0x4f, 0x38, 0xb3, + 0x6e, 0x60, 0xd3, 0x92, 0xfd, 0xf7, 0x41, 0x08, + 0xb5, 0x25, 0x13, 0xb1, 0x18, 0x7a, 0x24, 0xfb, + 0x30, 0x1d, 0xba, 0xed, 0x98, 0xb9, 0x17, 0xec, + 0xe7, 0xd7, 0x31, 0x59, 0xdb, 0x95, 0xd3, 0x1d, + 0x78, 0xea, 0x50, 0x56, 0x5c, 0xd5, 0x82, 0x5a, + 0x2d, 0x5a, 0x5f, 0x33, 0xc4, 0xb6, 0xd8, 0xc9, + 0x75, 0x90, 0x96, 0x8c, 0x0f, 0x52, 0x98, 0xb5, + 0xcd, 0x98, 0x1f, 0x89, 0x20, 0x5f, 0xf2, 0xa0, + 0x1c, 0xa3, 0x1b, 0x96, 0x94, 0xdd, 0xa9, 0xfd, + 0x57, 0xe9, 0x70, 0xe8, 0x26, 0x6d, 0x71, 0x99, + 0x9b, 0x26, 0x6e, 0x38, 0x50, 0x29, 0x6c, 0x90, + 0xa7, 0xbd, 0xd9, 0x16, 0x03, 0x01, 0x01, 0x06, + 0x10, 0x00, 0x01, 0x02, 0x01, 0x00, 0x25, 0x48, + 0x6c, 0x0a, 0xde, 0x9d, 0x3a, 0x57, 0xe4, 0x2e, + 0xb9, 0xfc, 0xb4, 0x46, 0x1f, 0x20, 0x4f, 0x58, + 0x4d, 0x12, 0x08, 0xb4, 0x3e, 0x4c, 0xf5, 0xa8, + 0xa5, 0x16, 0x40, 0x29, 0x19, 0x04, 0x4d, 0xf9, + 0x54, 0x3a, 0x32, 0xd7, 0x79, 0xf2, 0x0e, 0xc1, + 0x7b, 0x0c, 0x62, 0x71, 0xbb, 0xb4, 0x8c, 0xe7, + 0x84, 0xd5, 0xf8, 0x11, 0x77, 0x7f, 0x87, 0x6c, + 0xfc, 0x25, 0xf3, 0x2d, 0x97, 0x3d, 0x1f, 0xf5, + 0xfc, 0x64, 0x94, 0x9f, 0xdd, 0x90, 0x82, 0xdd, + 0x11, 0x74, 0x74, 0x59, 0xa2, 0x1a, 0x71, 0xb2, + 0x55, 0x6d, 0x18, 0xca, 0x85, 0x47, 0x8b, 0x79, + 0x73, 0x06, 0x24, 0x38, 0xc3, 0x34, 0x98, 0x84, + 0x62, 0x81, 0xd8, 0xad, 0x54, 0xad, 0x13, 0xa5, + 0xf4, 0xe4, 0x82, 0x85, 0xd3, 0xe3, 0x9e, 0xeb, + 0xb5, 0xf5, 0x95, 0x83, 0x0e, 0xb9, 0x7d, 0xb6, + 0xda, 0x0c, 0xf6, 0x14, 0x6a, 0x60, 0x8c, 0x75, + 0x56, 0xf0, 0xe9, 0x60, 0xe0, 0x4c, 0xf4, 0x4e, + 0x84, 0x8b, 0x4f, 0xf4, 0x2f, 0xde, 0xb7, 0xec, + 0x61, 0xd3, 0x77, 0x07, 0x6e, 0x41, 0x57, 0xc9, + 0xd9, 0x1d, 0x75, 0xee, 0x42, 0x63, 0xdc, 0x58, + 0xad, 0xfc, 0xc7, 0xe1, 0x77, 0x49, 0xb1, 0x58, + 0x21, 0x96, 0x00, 0x55, 0x90, 0x6b, 0xf6, 0x2a, + 0x5a, 0x19, 0x25, 0x93, 0x59, 0x9d, 0xaf, 0x79, + 0x9b, 0x18, 0x5d, 0xf6, 0x5d, 0x64, 0x4b, 0x9a, + 0xf4, 0xde, 0xf2, 0x7f, 0xbd, 0x93, 0x7e, 0x45, + 0x3e, 0x17, 0xae, 0xbf, 0x52, 0xe1, 0xba, 0x8e, + 0x0b, 0xbc, 0x1e, 0x91, 0x9d, 0xf1, 0x4e, 0x0b, + 0xab, 0x9e, 0x5c, 0x4c, 0x6f, 0xf7, 0xf3, 0x8d, + 0x8c, 0x6d, 0xeb, 0x46, 0x05, 0x36, 0x7e, 0x2f, + 0x9c, 0xa1, 0x86, 0x15, 0xe1, 0xe4, 0xb4, 0x20, + 0x06, 0x44, 0x7b, 0x3c, 0x8b, 0x13, 0x96, 0xf5, + 0x02, 0xb1, 0x4f, 0x3c, 0x2d, 0x4a, 0x16, 0x03, + 0x01, 0x00, 0x86, 0x0f, 0x00, 0x00, 0x82, 0x00, + 0x80, 0x52, 0xb1, 0x0d, 0xfc, 0x85, 0x34, 0x56, + 0xb9, 0xdf, 0xa7, 0x8e, 0xf4, 0xfd, 0x02, 0x46, + 0x8a, 0x23, 0xcc, 0x53, 0x3b, 0x0f, 0xa7, 0x61, + 0xf3, 0xb5, 0xbf, 0xfe, 0x59, 0x77, 0x10, 0xd6, + 0x56, 0x93, 0x19, 0x6b, 0x2c, 0xf1, 0x35, 0x71, + 0xe3, 0x36, 0x2f, 0xa0, 0x90, 0x4e, 0x5a, 0xdf, + 0x8d, 0x06, 0x88, 0xcf, 0xb1, 0x06, 0x56, 0x8b, + 0x74, 0x8f, 0x02, 0x8e, 0x10, 0xd2, 0xab, 0x8d, + 0x3f, 0x3e, 0x02, 0xf1, 0x1a, 0x80, 0x6d, 0x0f, + 0x9e, 0x77, 0xd8, 0xfa, 0x92, 0xb3, 0x16, 0x40, + 0xeb, 0x9e, 0xca, 0xd7, 0xe4, 0x31, 0xcc, 0x63, + 0x5f, 0xe2, 0x4c, 0x85, 0x0e, 0xf2, 0xdd, 0xd3, + 0xfe, 0x7e, 0xa7, 0x60, 0x1c, 0xb4, 0x00, 0xd8, + 0xbe, 0x4b, 0x9b, 0x66, 0x78, 0x0f, 0xfb, 0x3b, + 0x52, 0x30, 0x2b, 0x8b, 0xd9, 0xef, 0x82, 0x0a, + 0xa4, 0x18, 0x1d, 0xb0, 0xb5, 0xbf, 0x54, 0x97, + 0x0c, 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, + 0x03, 0x01, 0x00, 0x30, 0xa1, 0x74, 0x22, 0xd8, + 0x86, 0x6a, 0xbe, 0x53, 0x34, 0x1d, 0xb3, 0x73, + 0xff, 0x51, 0xc0, 0xce, 0x8e, 0x7d, 0x9b, 0xab, + 0xcb, 0x8b, 0x79, 0xae, 0x04, 0x01, 0xa7, 0xf2, + 0x8e, 0x9d, 0xab, 0xa3, 0x73, 0x80, 0x5c, 0xff, + 0x96, 0x20, 0xbb, 0x8d, 0xc0, 0x02, 0x66, 0x6c, + 0x83, 0x4b, 0x78, 0x20, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x30, 0x29, 0xd4, 0xfd, 0x03, 0x8b, + 0x30, 0x20, 0xf7, 0xca, 0xc0, 0x6c, 0x83, 0x5d, + 0x73, 0xcb, 0x81, 0x60, 0xe0, 0x9a, 0x09, 0xcb, + 0x33, 0x03, 0x80, 0x81, 0x4e, 0x84, 0x47, 0xd5, + 0x74, 0x6c, 0x3b, 0xb5, 0xc0, 0x48, 0x0d, 0x52, + 0xdd, 0xbe, 0xc2, 0x06, 0xf5, 0x79, 0x2b, 0x3e, + 0x99, 0x56, 0x94, 0x17, 0x03, 0x01, 0x00, 0x20, + 0x26, 0x46, 0x90, 0x9d, 0xef, 0x59, 0x00, 0xb6, + 0x70, 0xe8, 0x1e, 0x1a, 0x80, 0x8b, 0x04, 0xb2, + 0xfc, 0x51, 0xf8, 0x93, 0xbe, 0x00, 0x28, 0xba, + 0xb8, 0xdc, 0x51, 0x7e, 0x92, 0x80, 0xfa, 0xf2, + 0x17, 0x03, 0x01, 0x00, 0xe0, 0xb8, 0x2e, 0xc4, + 0x6b, 0x3f, 0xda, 0x39, 0x87, 0x7f, 0x03, 0x43, + 0x28, 0xdd, 0xb9, 0xf9, 0x9e, 0x16, 0xf5, 0xce, + 0x3f, 0x7e, 0x6a, 0x7b, 0xb3, 0x60, 0x14, 0xe1, + 0xea, 0x54, 0xc5, 0xe6, 0x05, 0x0a, 0x6c, 0xe0, + 0xef, 0x58, 0x29, 0x8a, 0x77, 0x64, 0x77, 0x5d, + 0x9c, 0xe2, 0xe0, 0x3c, 0x6d, 0x87, 0x82, 0xbe, + 0x47, 0x63, 0xd4, 0xfd, 0x0c, 0x25, 0xc4, 0xb1, + 0xfe, 0x29, 0x6f, 0x84, 0xfb, 0xab, 0x6e, 0xa7, + 0xf9, 0x22, 0x89, 0x97, 0x5b, 0x91, 0x0a, 0x07, + 0xe0, 0xef, 0x3d, 0x67, 0xee, 0x87, 0xa8, 0x33, + 0x02, 0x64, 0x33, 0xca, 0x15, 0x10, 0xb9, 0x57, + 0xd8, 0xe5, 0x1a, 0x4b, 0xe3, 0x45, 0xc1, 0x62, + 0x85, 0x50, 0xf1, 0x79, 0x54, 0xe1, 0x2e, 0x25, + 0x01, 0x3c, 0xdb, 0x2d, 0x39, 0x14, 0x2f, 0x9b, + 0xd0, 0x1d, 0xc1, 0xac, 0x73, 0x7d, 0xa4, 0xed, + 0x89, 0x98, 0xb1, 0xae, 0x8a, 0x9e, 0xc8, 0xa7, + 0xfe, 0x55, 0x27, 0xb5, 0xb5, 0xa2, 0xec, 0x7e, + 0xe3, 0x6b, 0x45, 0x19, 0xfa, 0x20, 0x1c, 0x33, + 0x83, 0x22, 0x33, 0x97, 0xd2, 0x5a, 0xc4, 0xf8, + 0x9a, 0x03, 0x13, 0x85, 0xf2, 0x2b, 0x04, 0x59, + 0x27, 0xd7, 0x0b, 0x42, 0x47, 0x9b, 0x7d, 0x4d, + 0xb2, 0x1a, 0x85, 0x7f, 0x97, 0xc2, 0xf2, 0x10, + 0xf0, 0xfa, 0x4e, 0x4b, 0x62, 0x43, 0x3a, 0x09, + 0x2e, 0xcd, 0x8f, 0xa8, 0xb6, 0x0b, 0x5f, 0x34, + 0xd7, 0x3b, 0xba, 0xd9, 0xe5, 0x01, 0x2d, 0x35, + 0xae, 0xc5, 0x4c, 0xab, 0x40, 0x64, 0xc2, 0xc9, + 0x8c, 0x69, 0x44, 0xf4, 0xb8, 0xb5, 0x3a, 0x05, + 0x3c, 0x29, 0x19, 0xb4, 0x09, 0x17, 0x03, 0x01, + 0x00, 0x20, 0xc8, 0xc5, 0xb7, 0xe3, 0xd2, 0x3e, + 0x27, 0xb5, 0x71, 0x8f, 0x52, 0x0b, 0xce, 0x17, + 0x64, 0x86, 0xa4, 0x34, 0x16, 0x1b, 0x61, 0x64, + 0x7c, 0xb3, 0xf2, 0xe5, 0x3e, 0xfd, 0xdd, 0xfb, + 0x40, 0x78, 0x17, 0x03, 0x01, 0x00, 0x50, 0x8e, + 0x79, 0xf0, 0x8e, 0x76, 0x5d, 0x34, 0x09, 0xdc, + 0xec, 0x6d, 0xc3, 0x43, 0x1d, 0xcb, 0x2d, 0xaa, + 0x08, 0x7a, 0x51, 0x94, 0x4e, 0xc5, 0x26, 0xe4, + 0x0b, 0x8e, 0x8f, 0x51, 0xf2, 0x9f, 0xeb, 0xc3, + 0x18, 0x43, 0x95, 0x15, 0xfc, 0x59, 0x18, 0x25, + 0x47, 0xb6, 0x4a, 0x6e, 0xa3, 0xa4, 0x3b, 0xa3, + 0x47, 0x34, 0x74, 0x6b, 0xc5, 0x3d, 0x41, 0x14, + 0x64, 0xd5, 0x69, 0x5f, 0x77, 0xf3, 0x7c, 0x41, + 0xc6, 0xed, 0x2e, 0xcf, 0xff, 0x40, 0xf2, 0xce, + 0xbb, 0xa7, 0x4e, 0x73, 0x88, 0x98, 0x10, + }, + { + 0x15, 0x03, 0x01, 0x00, 0x20, 0x1a, 0xbc, 0x70, + 0x24, 0xf8, 0xfb, 0xf2, 0x4a, 0xf9, 0x44, 0x1e, + 0x58, 0xf8, 0xaa, 0x41, 0x24, 0xe8, 0x80, 0x33, + 0x45, 0x18, 0xa1, 0x5d, 0xee, 0x16, 0x80, 0xae, + 0x40, 0x41, 0x8e, 0x41, 0x9b, + }, +} + +var tls11ECDHEAESClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x13, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x02, 0x51, 0x9f, 0xa2, 0x21, 0x1a, + 0xb7, 0x75, 0x42, 0x69, 0xd3, 0x14, 0xdd, 0x05, + 0x1e, 0xda, 0x13, 0x71, 0x8d, 0x6a, 0x45, 0x97, + 0xcb, 0xee, 0x0e, 0x77, 0x01, 0x0d, 0x6e, 0xe5, + 0x22, 0x70, 0x16, 0x20, 0x69, 0xfc, 0xa6, 0x9a, + 0xe8, 0x21, 0xcc, 0x46, 0x65, 0x05, 0xb4, 0x48, + 0x0f, 0x34, 0x63, 0x2c, 0xac, 0xa4, 0xf5, 0x4b, + 0x64, 0xd1, 0x07, 0x13, 0xa7, 0xe4, 0x5b, 0xa3, + 0x4d, 0x31, 0x41, 0x53, 0xc0, 0x13, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x02, 0x02, 0x39, 0x0b, 0x00, + 0x02, 0x35, 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, + 0x30, 0x82, 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, + 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, + 0xb1, 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, + 0x31, 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, + 0x0d, 0x31, 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, + 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, + 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, + 0xc3, 0x84, 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, + 0x0f, 0x15, 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, + 0xe6, 0x36, 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, + 0x8d, 0xe0, 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, + 0x55, 0x66, 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, + 0xfe, 0xa8, 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, + 0xee, 0xd7, 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, + 0xff, 0x2a, 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, + 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, + 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, + 0x9a, 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, + 0x22, 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, + 0x2b, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, + 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, + 0x36, 0x40, 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, + 0x59, 0x9f, 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, + 0xec, 0x83, 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, + 0x39, 0xac, 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, + 0x1d, 0x99, 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, + 0x05, 0x08, 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, + 0xbb, 0x77, 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, + 0x5e, 0x9c, 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, + 0x03, 0x02, 0x00, 0x8b, 0x0c, 0x00, 0x00, 0x87, + 0x03, 0x00, 0x17, 0x41, 0x04, 0x34, 0xde, 0x50, + 0x32, 0x8f, 0x25, 0x6b, 0x37, 0x2c, 0x36, 0x24, + 0x27, 0x0e, 0xf9, 0x67, 0xb4, 0xf8, 0x29, 0x1c, + 0xa5, 0xa4, 0x59, 0x9a, 0xca, 0x40, 0x26, 0x15, + 0x61, 0x72, 0x34, 0x4a, 0xd3, 0x0c, 0xac, 0x69, + 0xcb, 0x2a, 0x9e, 0xf8, 0x80, 0xfb, 0x7a, 0xc4, + 0xd4, 0x4b, 0x91, 0x1b, 0xbe, 0x24, 0x26, 0xad, + 0x19, 0x24, 0xbe, 0x32, 0x58, 0xfb, 0xc7, 0x77, + 0xce, 0x7e, 0x71, 0x51, 0x1a, 0x00, 0x40, 0x1a, + 0x0b, 0xe8, 0x91, 0x84, 0x64, 0x54, 0xb6, 0x19, + 0xe8, 0xd4, 0x43, 0x7c, 0x09, 0x0c, 0x2e, 0xba, + 0x42, 0xb9, 0x74, 0xc3, 0x6c, 0x06, 0x9b, 0xa6, + 0x7e, 0x92, 0xe9, 0xee, 0x7c, 0x74, 0xa9, 0xd3, + 0x63, 0xf0, 0x16, 0x20, 0x60, 0x71, 0x8e, 0x24, + 0xc7, 0x7f, 0xc5, 0x5b, 0x9c, 0x19, 0x0c, 0x80, + 0x15, 0x61, 0xbf, 0xb6, 0xed, 0x5b, 0x7b, 0x90, + 0xc5, 0x05, 0x13, 0x72, 0x45, 0x79, 0xdf, 0x16, + 0x03, 0x02, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x02, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x02, 0x00, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x50, + 0x32, 0x26, 0x51, 0xbd, 0xbd, 0x3c, 0x4f, 0x72, + 0xbf, 0xbc, 0x91, 0x70, 0x4b, 0x5d, 0x43, 0x4a, + 0x65, 0x26, 0x0d, 0xaa, 0xed, 0x00, 0x91, 0xaf, + 0x4f, 0x47, 0x09, 0xaa, 0x79, 0xc4, 0x47, 0x21, + 0x71, 0xd8, 0x2b, 0xc1, 0x51, 0xc8, 0xef, 0xed, + 0x67, 0xde, 0x97, 0xef, 0x18, 0x53, + }, + { + 0x14, 0x03, 0x02, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x02, 0x00, 0x40, 0x72, 0x20, 0xbf, 0xd1, 0xbd, + 0x83, 0x53, 0x57, 0xb0, 0x4e, 0xac, 0xba, 0x1a, + 0x2b, 0x2d, 0xeb, 0x8a, 0x48, 0x17, 0xfa, 0x69, + 0xf9, 0xb5, 0x94, 0x8e, 0x6f, 0x9c, 0xda, 0x59, + 0xba, 0x6c, 0x7c, 0x82, 0xe2, 0x53, 0xa9, 0x46, + 0xdc, 0x33, 0xa0, 0x9b, 0xf0, 0x1e, 0xf1, 0x53, + 0x83, 0x48, 0xbf, 0x5e, 0xef, 0x03, 0x2b, 0x50, + 0x7a, 0xa6, 0xf8, 0xc3, 0x9e, 0x24, 0x43, 0x3a, + 0xdf, 0x44, 0x3e, + }, + { + 0x17, 0x03, 0x02, 0x00, 0x30, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x0b, 0x8f, + 0x6b, 0xf9, 0xd3, 0x9f, 0x2b, 0x49, 0xe0, 0x62, + 0x9a, 0x0b, 0x3e, 0xa2, 0x72, 0x8b, 0x96, 0x0c, + 0x41, 0x09, 0x95, 0x9e, 0x6b, 0x26, 0xa1, 0x46, + 0xca, 0xb8, 0xb6, 0xd2, 0xd4, 0x15, 0x03, 0x02, + 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xa0, 0xd4, 0x84, 0xc6, 0x7e, 0x1c, + 0x2f, 0xbd, 0x6b, 0x45, 0x31, 0x1d, 0x7d, 0x8f, + 0x31, 0x39, 0x5a, 0x4e, 0xaa, 0xf1, 0x0a, 0x8a, + 0x6c, 0x33, 0x59, 0x19, 0xd8, 0x75, 0x80, 0xab, + 0x93, 0x81, + }, +} + +var clientChainCertificateScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x02, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x51, 0xa2, 0x9b, 0x8b, 0xd4, + 0xe6, 0x33, 0xa2, 0x70, 0x38, 0x37, 0xba, 0x55, + 0x86, 0xcf, 0x87, 0xea, 0x6d, 0x2c, 0x3e, 0x17, + 0xc2, 0x09, 0xf8, 0x4d, 0xb0, 0x5d, 0x93, 0x2b, + 0x15, 0x99, 0x0c, 0x20, 0x5d, 0x61, 0x21, 0x2c, + 0xed, 0x49, 0x32, 0x29, 0x08, 0x6e, 0x21, 0x58, + 0x00, 0xdb, 0x34, 0xb7, 0x37, 0xcd, 0x27, 0x75, + 0x31, 0x1e, 0x6c, 0x74, 0xa6, 0xef, 0xa2, 0xc4, + 0x2b, 0x6c, 0xc3, 0x03, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x03, 0xef, 0x0b, 0x00, 0x03, 0xeb, + 0x00, 0x03, 0xe8, 0x00, 0x03, 0xe5, 0x30, 0x82, + 0x03, 0xe1, 0x30, 0x82, 0x02, 0xc9, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xcc, 0x22, + 0x4c, 0x4b, 0x98, 0xa2, 0x88, 0xfc, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x81, 0x86, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, + 0x4e, 0x59, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, + 0x55, 0x04, 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, + 0x6f, 0x6b, 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, + 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, + 0x4d, 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x0c, 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, + 0x6f, 0x72, 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, + 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, + 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x33, 0x30, 0x35, 0x32, 0x36, + 0x32, 0x31, 0x30, 0x35, 0x30, 0x31, 0x5a, 0x17, + 0x0d, 0x32, 0x33, 0x30, 0x35, 0x32, 0x34, 0x32, + 0x31, 0x30, 0x35, 0x30, 0x31, 0x5a, 0x30, 0x81, + 0x86, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, + 0x02, 0x4e, 0x59, 0x31, 0x11, 0x30, 0x0f, 0x06, + 0x03, 0x55, 0x04, 0x07, 0x0c, 0x08, 0x42, 0x72, + 0x6f, 0x6f, 0x6b, 0x6c, 0x79, 0x6e, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, + 0x18, 0x4d, 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, + 0x04, 0x03, 0x0c, 0x08, 0x6d, 0x79, 0x63, 0x61, + 0x2e, 0x6f, 0x72, 0x67, 0x31, 0x21, 0x30, 0x1f, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, + 0x68, 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, + 0x61, 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, + 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, + 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, + 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, + 0xf0, 0xfb, 0xad, 0x80, 0x5e, 0x37, 0xd3, 0x6d, + 0xee, 0x2e, 0xcc, 0xbc, 0x0c, 0xd7, 0x56, 0x4b, + 0x56, 0x45, 0xcd, 0x28, 0xb6, 0x22, 0xe9, 0xe2, + 0x0f, 0xd1, 0x87, 0x2a, 0x27, 0xce, 0x77, 0x8d, + 0x6e, 0x0e, 0x0f, 0xfb, 0x66, 0xe1, 0xb5, 0x0e, + 0x9a, 0xb6, 0x05, 0x8e, 0xb3, 0xe1, 0xc5, 0x77, + 0x86, 0x5b, 0x46, 0xd2, 0x0b, 0x92, 0x03, 0x1b, + 0x89, 0x0c, 0x1b, 0x10, 0x0e, 0x99, 0x8f, 0xe2, + 0x17, 0xe8, 0xc2, 0x30, 0x00, 0x47, 0xd6, 0xfc, + 0xf9, 0x0f, 0x3b, 0x75, 0x34, 0x8d, 0x4d, 0xb0, + 0x99, 0xb7, 0xa0, 0x6d, 0xa0, 0xb6, 0xad, 0xda, + 0x07, 0x5e, 0x38, 0x2e, 0x02, 0xe4, 0x30, 0x6d, + 0xae, 0x13, 0x72, 0xd4, 0xc8, 0xce, 0x14, 0x07, + 0xae, 0x23, 0x8c, 0x8f, 0x9e, 0x8c, 0x60, 0xd6, + 0x06, 0xb9, 0xef, 0x00, 0x18, 0xc0, 0x1d, 0x25, + 0x1e, 0xda, 0x3e, 0x2f, 0xcf, 0x2b, 0x56, 0x84, + 0x9e, 0x30, 0x21, 0xc7, 0x29, 0xf6, 0x03, 0x8a, + 0x24, 0xf9, 0x34, 0xac, 0x65, 0x9d, 0x80, 0x36, + 0xc8, 0x3b, 0x15, 0x10, 0xbd, 0x51, 0xe9, 0xbc, + 0x02, 0xe1, 0xe9, 0xb3, 0x5a, 0x9a, 0x99, 0x41, + 0x1b, 0x27, 0xa0, 0x4d, 0x50, 0x9e, 0x27, 0x7f, + 0xa1, 0x7d, 0x09, 0x87, 0xbd, 0x8a, 0xca, 0x5f, + 0xb1, 0xa5, 0x08, 0xb8, 0x04, 0xd4, 0x52, 0x89, + 0xaa, 0xe0, 0x7d, 0x42, 0x2e, 0x2f, 0x15, 0xee, + 0x66, 0x57, 0x0f, 0x13, 0x19, 0x45, 0xa8, 0x4b, + 0x5d, 0x81, 0x66, 0xcc, 0x12, 0x37, 0x94, 0x5e, + 0xfd, 0x3c, 0x10, 0x81, 0x51, 0x3f, 0xfa, 0x0f, + 0xdd, 0xa1, 0x89, 0x03, 0xa9, 0x78, 0x91, 0xf5, + 0x3b, 0xf3, 0xbc, 0xac, 0xbe, 0x93, 0x30, 0x2e, + 0xbe, 0xca, 0x7f, 0x46, 0xd3, 0x28, 0xb4, 0x4e, + 0x91, 0x7b, 0x5b, 0x43, 0x6c, 0xaf, 0x9b, 0x5c, + 0x6a, 0x6d, 0x5a, 0xdb, 0x79, 0x5e, 0x6a, 0x6b, + 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x50, 0x30, + 0x4e, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x6b, 0x1e, 0x00, 0xa8, + 0x9f, 0xfa, 0x7d, 0x00, 0xf9, 0xe0, 0x9d, 0x0f, + 0x90, 0x8c, 0x90, 0xa8, 0xa1, 0x37, 0x6b, 0xda, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x18, 0x30, 0x16, 0x80, 0x14, 0x6b, 0x1e, 0x00, + 0xa8, 0x9f, 0xfa, 0x7d, 0x00, 0xf9, 0xe0, 0x9d, + 0x0f, 0x90, 0x8c, 0x90, 0xa8, 0xa1, 0x37, 0x6b, + 0xda, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, + 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, + 0x01, 0x01, 0x00, 0xcd, 0x6f, 0x73, 0x4d, 0x56, + 0x0b, 0xf3, 0x2e, 0x1c, 0xe2, 0x02, 0x0c, 0x14, + 0xbb, 0x2f, 0xdd, 0x3c, 0x43, 0xfe, 0xdf, 0x94, + 0x2d, 0xa9, 0x89, 0x81, 0x51, 0xf8, 0x5f, 0xa7, + 0xa0, 0x13, 0xaa, 0xcc, 0xb0, 0x18, 0xe2, 0x57, + 0x3e, 0x0d, 0x29, 0x93, 0xe8, 0x95, 0xd5, 0x1b, + 0x53, 0xd2, 0x51, 0xf2, 0xbd, 0xf5, 0x9e, 0x7b, + 0x22, 0x65, 0x62, 0x5c, 0xc4, 0x4c, 0x1d, 0xe8, + 0xe9, 0xc3, 0xd4, 0x2b, 0xe7, 0x78, 0xcb, 0x10, + 0xf3, 0xfe, 0x06, 0x83, 0xdc, 0x3a, 0x1e, 0x62, + 0x10, 0xc0, 0x46, 0x77, 0xc6, 0x9d, 0x9f, 0xab, + 0x96, 0x25, 0x5c, 0xfb, 0x26, 0xc1, 0x15, 0x1f, + 0xa5, 0x33, 0xee, 0x4f, 0x9a, 0x14, 0x6a, 0x14, + 0x97, 0x93, 0x2b, 0x95, 0x0b, 0xdc, 0xa8, 0xd7, + 0x69, 0x2e, 0xf0, 0x01, 0x0e, 0xfd, 0x4e, 0xd0, + 0xd9, 0xa8, 0xe5, 0x65, 0xde, 0xfb, 0xca, 0xca, + 0x1c, 0x5f, 0xf9, 0x53, 0xa0, 0x87, 0xe7, 0x33, + 0x9b, 0x2f, 0xcf, 0xe4, 0x13, 0xfc, 0xec, 0x7a, + 0x6c, 0xb0, 0x90, 0x13, 0x9b, 0xb6, 0xc5, 0x03, + 0xf6, 0x0e, 0x5e, 0xe2, 0xe4, 0x26, 0xc1, 0x7e, + 0x53, 0xfe, 0x69, 0xa3, 0xc7, 0xd8, 0x8e, 0x6e, + 0x94, 0x32, 0xa0, 0xde, 0xca, 0xb6, 0xcc, 0xd6, + 0x01, 0xd5, 0x78, 0x40, 0x28, 0x63, 0x9b, 0xee, + 0xcf, 0x09, 0x3b, 0x35, 0x04, 0xf0, 0x14, 0x02, + 0xf6, 0x80, 0x0e, 0x90, 0xb2, 0x94, 0xd2, 0x25, + 0x16, 0xb8, 0x7a, 0x76, 0x87, 0x84, 0x9f, 0x84, + 0xc5, 0xaf, 0xc2, 0x6d, 0x68, 0x7a, 0x84, 0x9c, + 0xc6, 0x8a, 0x63, 0x60, 0x87, 0x6a, 0x25, 0xc1, + 0xa1, 0x78, 0x0f, 0xba, 0xe8, 0x5f, 0xe1, 0xba, + 0xac, 0xa4, 0x6f, 0xdd, 0x09, 0x3f, 0x12, 0xcb, + 0x1d, 0xf3, 0xcf, 0x48, 0xd7, 0xd3, 0x26, 0xe8, + 0x9c, 0xc3, 0x53, 0xb3, 0xba, 0xdc, 0x32, 0x99, + 0x98, 0x96, 0xd6, 0x16, 0x03, 0x01, 0x00, 0x99, + 0x0d, 0x00, 0x00, 0x91, 0x03, 0x01, 0x02, 0x40, + 0x00, 0x8b, 0x00, 0x89, 0x30, 0x81, 0x86, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, + 0x59, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, + 0x04, 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, + 0x6b, 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, + 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, + 0x0c, 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, + 0x72, 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, + 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, + 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, + 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x0a, 0xfb, 0x0b, 0x00, 0x0a, + 0xf7, 0x00, 0x0a, 0xf4, 0x00, 0x03, 0x7e, 0x30, + 0x82, 0x03, 0x7a, 0x30, 0x82, 0x02, 0x62, 0x02, + 0x09, 0x00, 0xb4, 0x47, 0x58, 0x57, 0x2b, 0x67, + 0xc8, 0xc2, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, + 0x00, 0x30, 0x81, 0x80, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, 0x20, 0x43, + 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, 0x61, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, + 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, + 0x34, 0x34, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x34, + 0x34, 0x30, 0x30, 0x5a, 0x30, 0x7d, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x11, 0x30, 0x0f, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x08, 0x4e, 0x65, + 0x77, 0x20, 0x59, 0x6f, 0x72, 0x6b, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x07, 0x4d, 0x79, 0x20, 0x4c, + 0x65, 0x61, 0x66, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0a, 0x6d, 0x79, + 0x6c, 0x65, 0x61, 0x66, 0x2e, 0x63, 0x6f, 0x6d, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, + 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, + 0x82, 0x01, 0x01, 0x00, 0xa0, 0xa3, 0xef, 0xc1, + 0x44, 0x7d, 0xa2, 0xe3, 0x71, 0x98, 0x27, 0x63, + 0xb3, 0x1d, 0x71, 0x50, 0xa6, 0x34, 0x15, 0xcb, + 0xc9, 0x2a, 0xc3, 0xea, 0xe4, 0x9e, 0x9c, 0x49, + 0xa6, 0x01, 0x9b, 0x7e, 0xa9, 0xb5, 0x7a, 0xff, + 0x15, 0x92, 0x71, 0xc8, 0x97, 0x9c, 0x25, 0xb7, + 0x79, 0x2b, 0xff, 0xab, 0xc6, 0xb1, 0xa7, 0x00, + 0x90, 0xb2, 0x8b, 0xd7, 0x71, 0xd5, 0xc2, 0x3a, + 0xe6, 0x82, 0x42, 0x37, 0x89, 0x41, 0x04, 0xb0, + 0xba, 0xc7, 0x5b, 0x8a, 0x43, 0x9f, 0x97, 0x39, + 0x0c, 0x0f, 0xd5, 0x6d, 0x9e, 0x8d, 0xeb, 0xc0, + 0x26, 0xc5, 0x18, 0xe8, 0x7a, 0x3d, 0x32, 0x2e, + 0x38, 0x90, 0x40, 0x5b, 0x39, 0x2c, 0x07, 0xcb, + 0x24, 0x10, 0xc5, 0xc9, 0x3b, 0xe3, 0x66, 0x47, + 0x57, 0xb9, 0x6a, 0xad, 0x44, 0xf8, 0xd0, 0x70, + 0x62, 0x3b, 0x8e, 0xed, 0x60, 0x5f, 0x22, 0xf8, + 0xb8, 0x0c, 0xc9, 0x41, 0x2b, 0xc9, 0x80, 0x6e, + 0x4e, 0x1b, 0xe1, 0x20, 0xfc, 0x47, 0xa4, 0xac, + 0xc3, 0x3f, 0xe6, 0xc2, 0x81, 0x79, 0x03, 0x37, + 0x25, 0x89, 0xca, 0xd6, 0xa5, 0x46, 0x91, 0x63, + 0x41, 0xc5, 0x3e, 0xd5, 0xed, 0x7f, 0x4f, 0x8d, + 0x06, 0xc0, 0x89, 0x00, 0xbe, 0x37, 0x7b, 0x7e, + 0x73, 0xca, 0x70, 0x00, 0x14, 0x34, 0xbe, 0x47, + 0xbc, 0xb2, 0x6a, 0x28, 0xa5, 0x29, 0x84, 0xa8, + 0x9d, 0xc8, 0x1e, 0x77, 0x66, 0x1f, 0x9f, 0xaa, + 0x2b, 0x47, 0xdb, 0xdd, 0x6b, 0x9c, 0xa8, 0xfc, + 0x82, 0x36, 0x94, 0x62, 0x0d, 0x5c, 0x3f, 0xb2, + 0x01, 0xb4, 0xa5, 0xb8, 0xc6, 0x0e, 0x94, 0x5b, + 0xec, 0x5e, 0xbb, 0x7a, 0x63, 0x24, 0xf1, 0xf9, + 0xd6, 0x50, 0x08, 0xc1, 0xa3, 0xcc, 0x90, 0x07, + 0x5b, 0x04, 0x04, 0x42, 0x74, 0xcf, 0x37, 0xfa, + 0xf0, 0xa5, 0xd9, 0xd3, 0x86, 0x89, 0x89, 0x18, + 0xf3, 0x4c, 0xe2, 0x11, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, + 0x03, 0x82, 0x01, 0x01, 0x00, 0x90, 0xbb, 0xf9, + 0x5e, 0xba, 0x17, 0x1f, 0xac, 0x21, 0x9f, 0x6b, + 0x4a, 0x46, 0xd0, 0x6d, 0x3c, 0x8f, 0x3d, 0xf8, + 0x5e, 0x3e, 0x72, 0xaf, 0xa0, 0x1a, 0xf3, 0xff, + 0x89, 0xac, 0x5b, 0x7a, 0xe2, 0x91, 0x2a, 0x23, + 0x85, 0xc6, 0x4d, 0x47, 0x67, 0x01, 0x08, 0xa8, + 0x05, 0x1d, 0x01, 0x60, 0x50, 0x5f, 0x59, 0xad, + 0xfe, 0x7b, 0xc6, 0x0c, 0x54, 0x90, 0x68, 0x70, + 0x67, 0x2e, 0xed, 0x87, 0xf8, 0x69, 0x8a, 0xac, + 0x32, 0xfe, 0x6f, 0x90, 0x19, 0x2a, 0x64, 0x8d, + 0x82, 0x66, 0x05, 0x43, 0x88, 0xee, 0xf2, 0x30, + 0xed, 0xa4, 0x8f, 0xbf, 0xd6, 0x57, 0x20, 0xd4, + 0x43, 0x1d, 0x52, 0x96, 0x6f, 0xae, 0x09, 0x96, + 0x01, 0x52, 0x38, 0xe3, 0xaf, 0x99, 0xd7, 0xdc, + 0x14, 0x99, 0xc4, 0x8b, 0x0e, 0x04, 0x0f, 0xb3, + 0x14, 0x14, 0xd4, 0xa5, 0x93, 0xe1, 0xc9, 0x8a, + 0x81, 0xef, 0x63, 0xfc, 0x36, 0x77, 0x05, 0x06, + 0xf0, 0x2a, 0x04, 0x0a, 0xbe, 0x2e, 0xce, 0x81, + 0x3d, 0x23, 0xa1, 0xda, 0xd8, 0xeb, 0xc6, 0xea, + 0x5e, 0xcf, 0x28, 0x36, 0x51, 0x31, 0x95, 0x5e, + 0x40, 0x04, 0xed, 0xac, 0xc1, 0xc8, 0x56, 0x69, + 0x87, 0xec, 0x3b, 0x03, 0x3e, 0x9d, 0x0f, 0x4c, + 0x4c, 0xeb, 0xd7, 0xba, 0x26, 0xdf, 0xe3, 0xde, + 0x10, 0xee, 0x93, 0x62, 0x8d, 0x73, 0x52, 0x6e, + 0xff, 0x37, 0x36, 0x98, 0x7b, 0x2d, 0x56, 0x4c, + 0xba, 0x09, 0xb8, 0xa7, 0xf0, 0x3b, 0x16, 0x81, + 0xca, 0xdb, 0x43, 0xab, 0xec, 0x4c, 0x6e, 0x7c, + 0xc1, 0x0b, 0x22, 0x22, 0x43, 0x1d, 0xb6, 0x0c, + 0xc1, 0xb9, 0xcf, 0xe4, 0x53, 0xee, 0x1d, 0x3e, + 0x88, 0xa7, 0x13, 0xbe, 0x7f, 0xbd, 0xae, 0x72, + 0xcf, 0xcd, 0x63, 0xd2, 0xc3, 0x18, 0x58, 0x92, + 0xa2, 0xad, 0xb5, 0x09, 0x9d, 0x91, 0x03, 0xdd, + 0x3c, 0xe2, 0x1c, 0xde, 0x78, 0x00, 0x03, 0x88, + 0x30, 0x82, 0x03, 0x84, 0x30, 0x82, 0x02, 0x6c, + 0x02, 0x09, 0x00, 0xab, 0xed, 0xa6, 0xe4, 0x4a, + 0x2b, 0x2b, 0xf8, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x31, + 0x38, 0x34, 0x30, 0x5a, 0x17, 0x0d, 0x31, 0x33, + 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x31, 0x38, + 0x34, 0x30, 0x5a, 0x30, 0x81, 0x80, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, + 0x20, 0x43, 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, + 0x55, 0x04, 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, + 0x61, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x63, 0x6f, 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, + 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, + 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, + 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, + 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, + 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xce, + 0x13, 0xf0, 0x72, 0xb0, 0x61, 0xc8, 0x18, 0x37, + 0x8a, 0x41, 0x3d, 0x20, 0xa1, 0x1c, 0xcb, 0xbf, + 0xf6, 0x3b, 0x74, 0x26, 0x2a, 0x96, 0x11, 0xec, + 0x53, 0xa1, 0xcc, 0x7d, 0x77, 0x56, 0x45, 0x0f, + 0x36, 0xb7, 0xf2, 0x48, 0x92, 0x1a, 0x62, 0xcc, + 0xb6, 0xc0, 0xa1, 0x2f, 0x44, 0x2b, 0xc1, 0x89, + 0xcb, 0x6e, 0x1e, 0xdb, 0x57, 0x92, 0xd5, 0x97, + 0x60, 0x8c, 0x41, 0x2c, 0xd9, 0x20, 0xfe, 0xe9, + 0x1f, 0x8e, 0xfc, 0x7f, 0x02, 0x44, 0x0f, 0x28, + 0x81, 0xd6, 0x0c, 0xcd, 0xbc, 0xf0, 0x57, 0x6c, + 0xcc, 0xa7, 0xba, 0x06, 0xa0, 0xa6, 0x91, 0xda, + 0xef, 0x46, 0x8a, 0x60, 0x0f, 0x52, 0x6c, 0x90, + 0x6c, 0x8c, 0x44, 0xaf, 0xb0, 0x9d, 0x90, 0xba, + 0x21, 0x58, 0xa0, 0x3c, 0xee, 0x54, 0xb5, 0x29, + 0x26, 0x1f, 0x0a, 0xac, 0xef, 0x48, 0x68, 0x33, + 0xd0, 0x33, 0xd0, 0x8b, 0x1a, 0xec, 0x6e, 0x2f, + 0xb5, 0x4a, 0x53, 0xc2, 0x1a, 0xd2, 0xf1, 0x50, + 0x05, 0x59, 0x5c, 0xd9, 0xda, 0x03, 0x0a, 0x47, + 0xb7, 0xdd, 0xf7, 0x3a, 0x69, 0xf5, 0x4e, 0xea, + 0x4a, 0xc2, 0xca, 0x54, 0xb0, 0x8b, 0x76, 0xe1, + 0x02, 0x2d, 0x52, 0x67, 0xb9, 0xdd, 0x50, 0xc9, + 0x3b, 0x07, 0x24, 0x22, 0x6a, 0x00, 0x1d, 0x58, + 0x83, 0xa8, 0xec, 0x95, 0xf1, 0xda, 0xe2, 0x73, + 0xa0, 0xa1, 0x72, 0x60, 0x9e, 0x86, 0x53, 0xcb, + 0x45, 0xa8, 0xc2, 0xa0, 0x50, 0xa0, 0x53, 0xd6, + 0xfc, 0x18, 0x84, 0xb5, 0x4a, 0x26, 0xd0, 0xa2, + 0xaa, 0xd0, 0xff, 0xb6, 0xfe, 0x3a, 0x9c, 0xb5, + 0x19, 0x3b, 0x3f, 0xe1, 0x48, 0x0d, 0xa4, 0x09, + 0x4f, 0x83, 0xc9, 0xc0, 0xc9, 0xa6, 0x0b, 0x58, + 0x1f, 0x1c, 0x7b, 0xac, 0xa2, 0x42, 0xbc, 0x61, + 0xf4, 0x21, 0x8a, 0x00, 0xda, 0x14, 0xa0, 0x60, + 0x03, 0xfe, 0x93, 0x12, 0x6c, 0x56, 0xcd, 0x02, + 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0x25, 0x29, 0x3b, 0x1e, 0xc3, 0x58, 0x32, 0xe6, + 0x23, 0xc8, 0xee, 0x18, 0xf0, 0x1d, 0x62, 0x6d, + 0x3b, 0x59, 0x99, 0x3a, 0xfe, 0x49, 0x72, 0x07, + 0x3f, 0x58, 0x93, 0xdb, 0xc0, 0xaf, 0xb0, 0xb3, + 0x5c, 0xd1, 0x5c, 0x98, 0xc8, 0xea, 0x4a, 0xe4, + 0x58, 0x73, 0x0d, 0x57, 0xc5, 0x13, 0x7c, 0x5c, + 0x79, 0x66, 0xda, 0x04, 0x1d, 0xe5, 0x98, 0xda, + 0x35, 0x47, 0x44, 0xb0, 0xd2, 0x7a, 0x66, 0x9d, + 0xcd, 0x41, 0xa5, 0x8f, 0xa1, 0x11, 0xb2, 0x1a, + 0x87, 0xc0, 0xcd, 0x55, 0xed, 0xb4, 0x7b, 0x33, + 0x72, 0xeb, 0xf7, 0xe3, 0x7b, 0x8b, 0x02, 0x86, + 0xe9, 0x2b, 0x26, 0x32, 0x9f, 0x99, 0xf1, 0xcb, + 0x93, 0xab, 0xb9, 0x16, 0xb3, 0x9a, 0xb2, 0x22, + 0x13, 0x21, 0x1f, 0x5b, 0xcc, 0xa2, 0x59, 0xbb, + 0x69, 0xf2, 0xb8, 0x07, 0x80, 0xce, 0x0c, 0xf7, + 0x98, 0x4c, 0x85, 0xc2, 0x96, 0x6a, 0x22, 0x05, + 0xe9, 0xbe, 0x48, 0xb0, 0x02, 0x5b, 0x69, 0x28, + 0x18, 0x88, 0x96, 0xe3, 0xd7, 0xc6, 0x7a, 0xd3, + 0xe9, 0x99, 0xff, 0x9d, 0xc3, 0x61, 0x4d, 0x9a, + 0x96, 0xf2, 0xc6, 0x33, 0x4d, 0xe5, 0x5d, 0x5a, + 0x68, 0x64, 0x5a, 0x82, 0x35, 0x65, 0x25, 0xe3, + 0x8c, 0x5b, 0xb0, 0xf6, 0x96, 0x56, 0xbc, 0xbf, + 0x97, 0x76, 0x4b, 0x66, 0x44, 0x81, 0xa4, 0xc4, + 0xa7, 0x31, 0xc5, 0xa1, 0x4f, 0xe8, 0xa4, 0xca, + 0x20, 0xf5, 0x01, 0x5b, 0x99, 0x4f, 0x5a, 0xf4, + 0xf0, 0x78, 0xbf, 0x71, 0x49, 0xd5, 0xf1, 0xc1, + 0xa2, 0x18, 0xfd, 0x72, 0x5b, 0x16, 0xe8, 0x92, + 0xc7, 0x37, 0x48, 0xaf, 0xee, 0x24, 0xfc, 0x35, + 0x0b, 0xc2, 0xdd, 0x05, 0xc7, 0x6e, 0xa3, 0x29, + 0xbb, 0x29, 0x7d, 0xd3, 0x2b, 0x94, 0x80, 0xc3, + 0x40, 0x53, 0x0e, 0x03, 0x54, 0x3d, 0x7b, 0x8b, + 0xce, 0xf9, 0xa4, 0x03, 0x27, 0x63, 0xec, 0x51, + 0x00, 0x03, 0xe5, 0x30, 0x82, 0x03, 0xe1, 0x30, + 0x82, 0x02, 0xc9, 0xa0, 0x03, 0x02, 0x01, 0x02, + 0x02, 0x09, 0x00, 0xcc, 0x22, 0x4c, 0x4b, 0x98, + 0xa2, 0x88, 0xfc, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x30, + 0x35, 0x30, 0x31, 0x5a, 0x17, 0x0d, 0x32, 0x33, + 0x30, 0x35, 0x32, 0x34, 0x32, 0x31, 0x30, 0x35, + 0x30, 0x31, 0x5a, 0x30, 0x81, 0x86, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, + 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, + 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, + 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, + 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, + 0x02, 0x82, 0x01, 0x01, 0x00, 0xf0, 0xfb, 0xad, + 0x80, 0x5e, 0x37, 0xd3, 0x6d, 0xee, 0x2e, 0xcc, + 0xbc, 0x0c, 0xd7, 0x56, 0x4b, 0x56, 0x45, 0xcd, + 0x28, 0xb6, 0x22, 0xe9, 0xe2, 0x0f, 0xd1, 0x87, + 0x2a, 0x27, 0xce, 0x77, 0x8d, 0x6e, 0x0e, 0x0f, + 0xfb, 0x66, 0xe1, 0xb5, 0x0e, 0x9a, 0xb6, 0x05, + 0x8e, 0xb3, 0xe1, 0xc5, 0x77, 0x86, 0x5b, 0x46, + 0xd2, 0x0b, 0x92, 0x03, 0x1b, 0x89, 0x0c, 0x1b, + 0x10, 0x0e, 0x99, 0x8f, 0xe2, 0x17, 0xe8, 0xc2, + 0x30, 0x00, 0x47, 0xd6, 0xfc, 0xf9, 0x0f, 0x3b, + 0x75, 0x34, 0x8d, 0x4d, 0xb0, 0x99, 0xb7, 0xa0, + 0x6d, 0xa0, 0xb6, 0xad, 0xda, 0x07, 0x5e, 0x38, + 0x2e, 0x02, 0xe4, 0x30, 0x6d, 0xae, 0x13, 0x72, + 0xd4, 0xc8, 0xce, 0x14, 0x07, 0xae, 0x23, 0x8c, + 0x8f, 0x9e, 0x8c, 0x60, 0xd6, 0x06, 0xb9, 0xef, + 0x00, 0x18, 0xc0, 0x1d, 0x25, 0x1e, 0xda, 0x3e, + 0x2f, 0xcf, 0x2b, 0x56, 0x84, 0x9e, 0x30, 0x21, + 0xc7, 0x29, 0xf6, 0x03, 0x8a, 0x24, 0xf9, 0x34, + 0xac, 0x65, 0x9d, 0x80, 0x36, 0xc8, 0x3b, 0x15, + 0x10, 0xbd, 0x51, 0xe9, 0xbc, 0x02, 0xe1, 0xe9, + 0xb3, 0x5a, 0x9a, 0x99, 0x41, 0x1b, 0x27, 0xa0, + 0x4d, 0x50, 0x9e, 0x27, 0x7f, 0xa1, 0x7d, 0x09, + 0x87, 0xbd, 0x8a, 0xca, 0x5f, 0xb1, 0xa5, 0x08, + 0xb8, 0x04, 0xd4, 0x52, 0x89, 0xaa, 0xe0, 0x7d, + 0x42, 0x2e, 0x2f, 0x15, 0xee, 0x66, 0x57, 0x0f, + 0x13, 0x19, 0x45, 0xa8, 0x4b, 0x5d, 0x81, 0x66, + 0xcc, 0x12, 0x37, 0x94, 0x5e, 0xfd, 0x3c, 0x10, + 0x81, 0x51, 0x3f, 0xfa, 0x0f, 0xdd, 0xa1, 0x89, + 0x03, 0xa9, 0x78, 0x91, 0xf5, 0x3b, 0xf3, 0xbc, + 0xac, 0xbe, 0x93, 0x30, 0x2e, 0xbe, 0xca, 0x7f, + 0x46, 0xd3, 0x28, 0xb4, 0x4e, 0x91, 0x7b, 0x5b, + 0x43, 0x6c, 0xaf, 0x9b, 0x5c, 0x6a, 0x6d, 0x5a, + 0xdb, 0x79, 0x5e, 0x6a, 0x6b, 0x02, 0x03, 0x01, + 0x00, 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, + 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, + 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, 0x7d, + 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, 0x90, + 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, + 0x80, 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, + 0x7d, 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, + 0x90, 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x0c, + 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, + 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0xcd, 0x6f, 0x73, 0x4d, 0x56, 0x0b, 0xf3, 0x2e, + 0x1c, 0xe2, 0x02, 0x0c, 0x14, 0xbb, 0x2f, 0xdd, + 0x3c, 0x43, 0xfe, 0xdf, 0x94, 0x2d, 0xa9, 0x89, + 0x81, 0x51, 0xf8, 0x5f, 0xa7, 0xa0, 0x13, 0xaa, + 0xcc, 0xb0, 0x18, 0xe2, 0x57, 0x3e, 0x0d, 0x29, + 0x93, 0xe8, 0x95, 0xd5, 0x1b, 0x53, 0xd2, 0x51, + 0xf2, 0xbd, 0xf5, 0x9e, 0x7b, 0x22, 0x65, 0x62, + 0x5c, 0xc4, 0x4c, 0x1d, 0xe8, 0xe9, 0xc3, 0xd4, + 0x2b, 0xe7, 0x78, 0xcb, 0x10, 0xf3, 0xfe, 0x06, + 0x83, 0xdc, 0x3a, 0x1e, 0x62, 0x10, 0xc0, 0x46, + 0x77, 0xc6, 0x9d, 0x9f, 0xab, 0x96, 0x25, 0x5c, + 0xfb, 0x26, 0xc1, 0x15, 0x1f, 0xa5, 0x33, 0xee, + 0x4f, 0x9a, 0x14, 0x6a, 0x14, 0x97, 0x93, 0x2b, + 0x95, 0x0b, 0xdc, 0xa8, 0xd7, 0x69, 0x2e, 0xf0, + 0x01, 0x0e, 0xfd, 0x4e, 0xd0, 0xd9, 0xa8, 0xe5, + 0x65, 0xde, 0xfb, 0xca, 0xca, 0x1c, 0x5f, 0xf9, + 0x53, 0xa0, 0x87, 0xe7, 0x33, 0x9b, 0x2f, 0xcf, + 0xe4, 0x13, 0xfc, 0xec, 0x7a, 0x6c, 0xb0, 0x90, + 0x13, 0x9b, 0xb6, 0xc5, 0x03, 0xf6, 0x0e, 0x5e, + 0xe2, 0xe4, 0x26, 0xc1, 0x7e, 0x53, 0xfe, 0x69, + 0xa3, 0xc7, 0xd8, 0x8e, 0x6e, 0x94, 0x32, 0xa0, + 0xde, 0xca, 0xb6, 0xcc, 0xd6, 0x01, 0xd5, 0x78, + 0x40, 0x28, 0x63, 0x9b, 0xee, 0xcf, 0x09, 0x3b, + 0x35, 0x04, 0xf0, 0x14, 0x02, 0xf6, 0x80, 0x0e, + 0x90, 0xb2, 0x94, 0xd2, 0x25, 0x16, 0xb8, 0x7a, + 0x76, 0x87, 0x84, 0x9f, 0x84, 0xc5, 0xaf, 0xc2, + 0x6d, 0x68, 0x7a, 0x84, 0x9c, 0xc6, 0x8a, 0x63, + 0x60, 0x87, 0x6a, 0x25, 0xc1, 0xa1, 0x78, 0x0f, + 0xba, 0xe8, 0x5f, 0xe1, 0xba, 0xac, 0xa4, 0x6f, + 0xdd, 0x09, 0x3f, 0x12, 0xcb, 0x1d, 0xf3, 0xcf, + 0x48, 0xd7, 0xd3, 0x26, 0xe8, 0x9c, 0xc3, 0x53, + 0xb3, 0xba, 0xdc, 0x32, 0x99, 0x98, 0x96, 0xd6, + 0x16, 0x03, 0x01, 0x01, 0x06, 0x10, 0x00, 0x01, + 0x02, 0x01, 0x00, 0x6e, 0xea, 0x15, 0x6f, 0x21, + 0xbd, 0x2d, 0x14, 0xde, 0x9d, 0x02, 0xeb, 0xdf, + 0x3b, 0x09, 0x75, 0xaf, 0x32, 0x80, 0x0c, 0xe2, + 0xc2, 0x7b, 0x0d, 0xca, 0x24, 0x96, 0xf6, 0x3e, + 0xa5, 0x97, 0xba, 0x0c, 0x50, 0x7e, 0xb3, 0x68, + 0x58, 0xc6, 0xd8, 0xec, 0xab, 0xa9, 0xd9, 0x3a, + 0xb1, 0x49, 0xea, 0x2f, 0xd7, 0xdb, 0x15, 0x1b, + 0xb5, 0xaf, 0xec, 0xcc, 0x40, 0x5c, 0xe6, 0x0f, + 0xc4, 0x33, 0x71, 0xe7, 0x41, 0xc0, 0x04, 0x89, + 0x60, 0x3e, 0xb7, 0xe6, 0xda, 0x38, 0x62, 0x27, + 0x6a, 0xd9, 0xfb, 0x93, 0x94, 0x9d, 0xc1, 0x63, + 0x92, 0x5c, 0x88, 0x19, 0x38, 0x81, 0x79, 0x9d, + 0x59, 0x48, 0x5e, 0xd3, 0xc8, 0xea, 0xcb, 0x6e, + 0x66, 0x66, 0x03, 0xdc, 0x0c, 0x2d, 0x95, 0xb1, + 0x4d, 0x68, 0xc7, 0xc5, 0x6e, 0xfa, 0x94, 0x14, + 0xdf, 0x2c, 0x70, 0x69, 0x04, 0xf4, 0x69, 0xf1, + 0xf0, 0x07, 0xbd, 0x23, 0x53, 0x63, 0xb3, 0x41, + 0xec, 0xa7, 0x10, 0xa5, 0x04, 0x84, 0x24, 0xb5, + 0xf5, 0x0c, 0x0f, 0x5d, 0x02, 0x47, 0x79, 0x60, + 0x76, 0xbb, 0xdf, 0x60, 0xa6, 0xd7, 0x4d, 0x08, + 0x7d, 0xa6, 0x85, 0x4f, 0x61, 0xac, 0x96, 0x3d, + 0xbc, 0xaf, 0x07, 0xb0, 0x7c, 0xb6, 0x23, 0x3e, + 0x1f, 0x0a, 0x62, 0x77, 0x97, 0x77, 0xae, 0x33, + 0x55, 0x0f, 0x85, 0xdf, 0xdc, 0xbe, 0xc6, 0xe0, + 0xe0, 0x14, 0x83, 0x4c, 0x50, 0xf0, 0xe5, 0x2d, + 0xdc, 0x0b, 0x74, 0x7f, 0xc3, 0x28, 0x98, 0x16, + 0xda, 0x74, 0xe6, 0x40, 0xc2, 0xf0, 0xea, 0xc0, + 0x00, 0xd5, 0xfc, 0x16, 0xe4, 0x43, 0xa1, 0xfc, + 0x31, 0x19, 0x81, 0x62, 0xec, 0x2b, 0xfe, 0xcc, + 0xe8, 0x19, 0xed, 0xa1, 0x1e, 0x6a, 0x49, 0x73, + 0xde, 0xc4, 0xe9, 0x22, 0x0a, 0x21, 0xde, 0x45, + 0x1e, 0x55, 0x12, 0xd9, 0x44, 0xef, 0x4e, 0xaa, + 0x5e, 0x26, 0x57, 0x16, 0x03, 0x01, 0x01, 0x06, + 0x0f, 0x00, 0x01, 0x02, 0x01, 0x00, 0x23, 0xde, + 0xb0, 0x39, 0x60, 0xe9, 0x82, 0xb8, 0xed, 0x17, + 0x78, 0xd2, 0x37, 0x0e, 0x85, 0x69, 0xda, 0xcc, + 0x9f, 0x54, 0x4d, 0xda, 0xce, 0xe8, 0x5a, 0xeb, + 0x3c, 0x61, 0x4c, 0x7a, 0x84, 0x1f, 0x21, 0x03, + 0xb3, 0x8a, 0x74, 0x3b, 0x6a, 0x9e, 0x4f, 0x44, + 0xd9, 0x75, 0x0a, 0xd8, 0x7e, 0x56, 0xa3, 0xef, + 0x5a, 0xfe, 0x8a, 0x35, 0xce, 0x29, 0x18, 0xfe, + 0xa6, 0x61, 0x8e, 0x8f, 0x00, 0x90, 0x2d, 0x85, + 0xe3, 0x6c, 0x0e, 0x8d, 0x8c, 0x27, 0x80, 0x8c, + 0x9f, 0x51, 0xe9, 0xd3, 0xe6, 0x7d, 0x70, 0xe9, + 0xfb, 0xcb, 0xb8, 0x24, 0x94, 0x30, 0x9b, 0xba, + 0x01, 0x14, 0x49, 0x9f, 0xaf, 0x09, 0xd8, 0x26, + 0x1b, 0x23, 0xa4, 0xb8, 0xd9, 0x44, 0x0a, 0xdc, + 0x4e, 0x27, 0xe7, 0x32, 0xf5, 0x9c, 0xf3, 0x8d, + 0xa0, 0xc5, 0xc4, 0xbe, 0x92, 0x02, 0x85, 0x4f, + 0x33, 0x8f, 0xa7, 0xf7, 0x87, 0xa9, 0x44, 0xf3, + 0x64, 0xbd, 0x32, 0x04, 0xeb, 0xc5, 0xc3, 0x62, + 0xe9, 0xda, 0x2f, 0x95, 0x5c, 0xf7, 0x58, 0x3e, + 0xad, 0x35, 0xd7, 0x7e, 0xad, 0xdd, 0x32, 0x8d, + 0xce, 0x81, 0x08, 0xad, 0x49, 0xf7, 0xdb, 0xf7, + 0xaf, 0xe3, 0xc6, 0xb2, 0xdd, 0x76, 0x0c, 0xcf, + 0x0f, 0x87, 0x79, 0x90, 0x10, 0x79, 0xc6, 0xc8, + 0x7b, 0xe6, 0x23, 0xf2, 0xda, 0x33, 0xca, 0xe1, + 0xf0, 0x59, 0x42, 0x43, 0x03, 0x56, 0x19, 0xe3, + 0x8b, 0xe6, 0xa8, 0x70, 0xbc, 0x80, 0xfa, 0x24, + 0xae, 0x03, 0x13, 0x30, 0x0d, 0x1f, 0xab, 0xb7, + 0x82, 0xd9, 0x24, 0x90, 0x80, 0xbf, 0x75, 0xe1, + 0x0d, 0x1c, 0xb2, 0xfe, 0x92, 0x2c, 0x4d, 0x21, + 0xe9, 0x5d, 0xa1, 0x68, 0xf3, 0x16, 0xd8, 0x3f, + 0xb2, 0xc3, 0x00, 0x3e, 0xd8, 0x42, 0x25, 0x5c, + 0x90, 0x11, 0xc0, 0x1b, 0xd4, 0x26, 0x5c, 0x37, + 0x47, 0xbd, 0xf8, 0x1e, 0x34, 0xa9, 0x14, 0x03, + 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, 0x00, + 0x24, 0x8f, 0x94, 0x7e, 0x01, 0xee, 0xd5, 0x4f, + 0x83, 0x41, 0x31, 0xc0, 0x36, 0x81, 0x46, 0xc3, + 0xc0, 0xcc, 0x9c, 0xea, 0x0f, 0x29, 0x04, 0x10, + 0x43, 0x1e, 0x08, 0x6e, 0x08, 0xce, 0xb2, 0x62, + 0xa6, 0x0f, 0x68, 0x9f, 0x99, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0xd9, 0x46, 0x5b, 0xbf, 0xfd, + 0x8a, 0xa1, 0x08, 0xd5, 0xf3, 0x0c, 0x1c, 0xd8, + 0xa8, 0xb3, 0xe5, 0x89, 0x83, 0x9e, 0x23, 0x47, + 0x81, 0x66, 0x77, 0x11, 0x98, 0xe5, 0xf4, 0xac, + 0x06, 0xe9, 0x4c, 0x05, 0x8b, 0xc4, 0x16, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x1a, 0xc5, 0x28, 0xfd, + 0x71, 0xc0, 0xe6, 0x89, 0xb8, 0x82, 0x92, 0x1b, + 0xdd, 0x39, 0xe5, 0xbf, 0x41, 0x82, 0x1f, 0xc1, + 0xbc, 0x85, 0xe5, 0x32, 0x1b, 0x93, 0x46, 0x15, + 0x03, 0x01, 0x00, 0x16, 0x1a, 0x8b, 0x10, 0x42, + 0x12, 0xb2, 0xbd, 0xd3, 0xf1, 0x74, 0x1f, 0xc2, + 0x10, 0x08, 0xc2, 0x79, 0x99, 0x2c, 0x55, 0xef, + 0x4a, 0xbd, + }, +} + +// $ openssl s_server -tls1_2 -cert server.crt -key server.key \ +// -cipher ECDHE-RSA-AES128-SHA -port 10443 +// $ go test -test.run "TestRunClient" -connect -ciphersuites=0xc013 \ +// -minversion=0x0303 -maxversion=0x0303 +var clientTLS12Script = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x58, 0x01, 0x00, 0x00, + 0x54, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x13, + 0x01, 0x00, 0x00, 0x29, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x00, + 0x0d, 0x00, 0x0a, 0x00, 0x08, 0x04, 0x01, 0x04, + 0x03, 0x02, 0x01, 0x02, 0x03, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x03, 0x52, 0x65, 0x67, 0xbd, 0xe8, + 0x72, 0x03, 0x6a, 0x52, 0x8d, 0x28, 0x2c, 0x9a, + 0x53, 0xff, 0xc2, 0xa1, 0x62, 0x5f, 0x54, 0xfb, + 0x73, 0x00, 0xcf, 0x4d, 0x28, 0x36, 0xc2, 0xee, + 0xfd, 0x78, 0xf0, 0x20, 0x6f, 0xbe, 0x49, 0xec, + 0x5b, 0x6f, 0xf9, 0x53, 0x42, 0x69, 0x0d, 0x6d, + 0x8b, 0x68, 0x2e, 0xca, 0x3c, 0x3c, 0x88, 0x9e, + 0x8b, 0xf9, 0x32, 0x65, 0x09, 0xd6, 0xa0, 0x7d, + 0xea, 0xc6, 0xd5, 0xc4, 0xc0, 0x13, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x03, 0x02, 0x39, 0x0b, 0x00, + 0x02, 0x35, 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, + 0x30, 0x82, 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, + 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, + 0xb1, 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, + 0x31, 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, + 0x0d, 0x31, 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, + 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, + 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, + 0xc3, 0x84, 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, + 0x0f, 0x15, 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, + 0xe6, 0x36, 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, + 0x8d, 0xe0, 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, + 0x55, 0x66, 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, + 0xfe, 0xa8, 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, + 0xee, 0xd7, 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, + 0xff, 0x2a, 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, + 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, + 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, + 0x9a, 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, + 0x22, 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, + 0x2b, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, + 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, + 0x36, 0x40, 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, + 0x59, 0x9f, 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, + 0xec, 0x83, 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, + 0x39, 0xac, 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, + 0x1d, 0x99, 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, + 0x05, 0x08, 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, + 0xbb, 0x77, 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, + 0x5e, 0x9c, 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, + 0x03, 0x03, 0x00, 0x8d, 0x0c, 0x00, 0x00, 0x89, + 0x03, 0x00, 0x17, 0x41, 0x04, 0x48, 0x93, 0x62, + 0x6a, 0xf8, 0x7c, 0x94, 0xcc, 0xcc, 0x0a, 0x9b, + 0x5e, 0x11, 0xad, 0x0b, 0x30, 0xc4, 0x5d, 0xf7, + 0x63, 0x24, 0xc1, 0xb0, 0x40, 0x5f, 0xff, 0x9f, + 0x0d, 0x7e, 0xd5, 0xa5, 0xd0, 0x4f, 0x80, 0x16, + 0xa8, 0x66, 0x18, 0x31, 0x1f, 0x81, 0xb2, 0x9a, + 0x41, 0x62, 0x5b, 0xcf, 0x73, 0xac, 0x4a, 0x64, + 0xb5, 0xc1, 0x46, 0x4d, 0x8a, 0xac, 0x25, 0xba, + 0x81, 0x7f, 0xbe, 0x64, 0x68, 0x04, 0x01, 0x00, + 0x40, 0x4e, 0x3f, 0x1e, 0x04, 0x4c, 0xef, 0xd2, + 0xa6, 0x82, 0xe6, 0x7c, 0x76, 0x23, 0x17, 0xb9, + 0xe7, 0x52, 0x15, 0x6b, 0x3d, 0xb2, 0xb1, 0x17, + 0x7d, 0xe6, 0xde, 0x06, 0x87, 0x30, 0xb0, 0xb5, + 0x57, 0xae, 0xdf, 0xb2, 0xdc, 0x8d, 0xab, 0x76, + 0x9c, 0xaa, 0x45, 0x6d, 0x23, 0x5d, 0xc1, 0xa8, + 0x7b, 0x79, 0x79, 0xb1, 0x3c, 0xdc, 0xf5, 0x33, + 0x2c, 0xa1, 0x62, 0x3e, 0xbd, 0xf5, 0x5d, 0x6c, + 0x87, 0x16, 0x03, 0x03, 0x00, 0x04, 0x0e, 0x00, + 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x03, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x03, 0x00, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x17, + 0x54, 0x51, 0xb6, 0x1d, 0x8e, 0xe4, 0x6b, 0xed, + 0x5b, 0xa1, 0x27, 0x7f, 0xdc, 0xa9, 0xa5, 0xcf, + 0x38, 0xe6, 0x5d, 0x17, 0x34, 0xf9, 0xc0, 0x07, + 0xb8, 0xbe, 0x56, 0xe6, 0xd6, 0x6a, 0xb6, 0x26, + 0x4e, 0x45, 0x8d, 0x48, 0xe9, 0xc6, 0xb1, 0xa1, + 0xea, 0xdc, 0xb1, 0x37, 0xd9, 0xf6, + }, + { + 0x14, 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x03, 0x00, 0x40, 0x00, 0x68, 0xc5, 0x27, 0xd5, + 0x3d, 0xba, 0x04, 0xde, 0x63, 0xf1, 0x5b, 0xc3, + 0x86, 0xb9, 0x82, 0xc7, 0xb3, 0x90, 0x31, 0xea, + 0x15, 0xe1, 0x42, 0x76, 0x7d, 0x90, 0xcb, 0xc9, + 0xd1, 0x05, 0xe6, 0x8c, 0x76, 0xc7, 0x9a, 0x35, + 0x67, 0xa2, 0x70, 0x9a, 0x8a, 0x6c, 0xb5, 0x6b, + 0xc7, 0x87, 0xf3, 0x65, 0x0a, 0xa0, 0x98, 0xba, + 0x57, 0xbb, 0x31, 0x7b, 0x1f, 0x1a, 0xf7, 0x2a, + 0xf3, 0x12, 0xf6, + }, + { + 0x17, 0x03, 0x03, 0x00, 0x30, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x10, 0x80, + 0x54, 0x1e, 0x72, 0xd3, 0x1a, 0x86, 0x1c, 0xc4, + 0x4a, 0x9b, 0xd4, 0x80, 0xd2, 0x03, 0x35, 0x0d, + 0xe4, 0x12, 0xc2, 0x3d, 0x79, 0x4a, 0x2c, 0xba, + 0xc2, 0xad, 0xf3, 0xd2, 0x16, 0x15, 0x03, 0x03, + 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x9b, 0x68, 0x78, 0x92, 0x28, + 0x62, 0x02, 0x65, 0x87, 0x90, 0xe4, 0x32, 0xd7, + 0x72, 0x08, 0x70, 0xb8, 0x52, 0x32, 0x1f, 0x97, + 0xd4, 0x6a, 0xc6, 0x28, 0x83, 0xb0, 0x1d, 0x6e, + 0x16, 0xd5, + }, +} + +// $ openssl s_server -tls1_2 -cert server.crt -key server.key \ +// -port 10443 -verify 0 +// $ go test -test.run "TestRunClient" -connect -ciphersuites=0xc02f \ +// -maxversion=0x0303 +var clientTLS12ClientCertScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x58, 0x01, 0x00, 0x00, + 0x54, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x2f, + 0x01, 0x00, 0x00, 0x29, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x00, + 0x0d, 0x00, 0x0a, 0x00, 0x08, 0x04, 0x01, 0x04, + 0x03, 0x02, 0x01, 0x02, 0x03, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x03, 0x52, 0x65, 0x67, 0xe0, 0xe8, + 0xf1, 0x13, 0x2a, 0x83, 0x28, 0xa8, 0x2e, 0x76, + 0x69, 0xe6, 0x89, 0x55, 0x6c, 0x48, 0x49, 0x2e, + 0x00, 0xf6, 0x87, 0x6c, 0x13, 0xa1, 0xd4, 0xaa, + 0xd0, 0x76, 0x3b, 0x20, 0xe4, 0xd6, 0x5b, 0x1d, + 0x11, 0xf2, 0x42, 0xf2, 0x82, 0x0c, 0x0d, 0x66, + 0x6d, 0xec, 0x52, 0xf8, 0x4a, 0xd9, 0x45, 0xcf, + 0xe4, 0x4a, 0xba, 0x8b, 0xf1, 0xab, 0x55, 0xe4, + 0x57, 0x18, 0xa9, 0x36, 0xc0, 0x2f, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x03, 0x02, 0x39, 0x0b, 0x00, + 0x02, 0x35, 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, + 0x30, 0x82, 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, + 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, + 0xb1, 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, + 0x31, 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, + 0x0d, 0x31, 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, + 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, + 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, + 0xc3, 0x84, 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, + 0x0f, 0x15, 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, + 0xe6, 0x36, 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, + 0x8d, 0xe0, 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, + 0x55, 0x66, 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, + 0xfe, 0xa8, 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, + 0xee, 0xd7, 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, + 0xff, 0x2a, 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, + 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, + 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, + 0x9a, 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, + 0x22, 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, + 0x2b, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, + 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, + 0x36, 0x40, 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, + 0x59, 0x9f, 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, + 0xec, 0x83, 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, + 0x39, 0xac, 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, + 0x1d, 0x99, 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, + 0x05, 0x08, 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, + 0xbb, 0x77, 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, + 0x5e, 0x9c, 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, + 0x03, 0x03, 0x00, 0x8d, 0x0c, 0x00, 0x00, 0x89, + 0x03, 0x00, 0x17, 0x41, 0x04, 0xaa, 0xf0, 0x0c, + 0xa3, 0x60, 0xcf, 0x69, 0x1e, 0xad, 0x16, 0x9a, + 0x01, 0x40, 0xc6, 0x22, 0xc4, 0xbb, 0x06, 0x3b, + 0x84, 0x65, 0xea, 0xc7, 0xa2, 0x96, 0x79, 0x17, + 0x2f, 0xc7, 0xbe, 0x56, 0x39, 0xe4, 0x79, 0xf3, + 0xad, 0x17, 0xf3, 0x7e, 0xe2, 0x7b, 0xa2, 0x6f, + 0x3f, 0x96, 0xea, 0xe5, 0x0e, 0xea, 0x39, 0x79, + 0x77, 0xeb, 0x14, 0x18, 0xbb, 0x7c, 0x95, 0xda, + 0xa7, 0x51, 0x09, 0xba, 0xd7, 0x04, 0x01, 0x00, + 0x40, 0x82, 0x3e, 0xce, 0xee, 0x7e, 0xba, 0x3b, + 0x51, 0xb1, 0xba, 0x71, 0x2e, 0x54, 0xa9, 0xb9, + 0xe2, 0xb1, 0x59, 0x17, 0xa1, 0xac, 0x76, 0xb4, + 0x4e, 0xf1, 0xae, 0x65, 0x17, 0x2b, 0x43, 0x06, + 0x31, 0x29, 0x0b, 0xa0, 0x1e, 0xb6, 0xfa, 0x35, + 0xe8, 0x63, 0x06, 0xde, 0x13, 0x89, 0x83, 0x69, + 0x3b, 0xc2, 0x15, 0x73, 0x1c, 0xc5, 0x07, 0xe9, + 0x38, 0x9b, 0x06, 0x81, 0x1b, 0x97, 0x7c, 0xa6, + 0x89, 0x16, 0x03, 0x03, 0x00, 0x30, 0x0d, 0x00, + 0x00, 0x28, 0x03, 0x01, 0x02, 0x40, 0x00, 0x20, + 0x06, 0x01, 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, + 0x05, 0x02, 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, + 0x04, 0x03, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, + 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x0a, 0xfb, 0x0b, 0x00, 0x0a, + 0xf7, 0x00, 0x0a, 0xf4, 0x00, 0x03, 0x7e, 0x30, + 0x82, 0x03, 0x7a, 0x30, 0x82, 0x02, 0x62, 0x02, + 0x09, 0x00, 0xb4, 0x47, 0x58, 0x57, 0x2b, 0x67, + 0xc8, 0xc2, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, + 0x00, 0x30, 0x81, 0x80, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, 0x20, 0x43, + 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, 0x61, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, + 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, + 0x34, 0x34, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x34, + 0x34, 0x30, 0x30, 0x5a, 0x30, 0x7d, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x11, 0x30, 0x0f, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x08, 0x4e, 0x65, + 0x77, 0x20, 0x59, 0x6f, 0x72, 0x6b, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x07, 0x4d, 0x79, 0x20, 0x4c, + 0x65, 0x61, 0x66, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0a, 0x6d, 0x79, + 0x6c, 0x65, 0x61, 0x66, 0x2e, 0x63, 0x6f, 0x6d, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, + 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, + 0x82, 0x01, 0x01, 0x00, 0xa0, 0xa3, 0xef, 0xc1, + 0x44, 0x7d, 0xa2, 0xe3, 0x71, 0x98, 0x27, 0x63, + 0xb3, 0x1d, 0x71, 0x50, 0xa6, 0x34, 0x15, 0xcb, + 0xc9, 0x2a, 0xc3, 0xea, 0xe4, 0x9e, 0x9c, 0x49, + 0xa6, 0x01, 0x9b, 0x7e, 0xa9, 0xb5, 0x7a, 0xff, + 0x15, 0x92, 0x71, 0xc8, 0x97, 0x9c, 0x25, 0xb7, + 0x79, 0x2b, 0xff, 0xab, 0xc6, 0xb1, 0xa7, 0x00, + 0x90, 0xb2, 0x8b, 0xd7, 0x71, 0xd5, 0xc2, 0x3a, + 0xe6, 0x82, 0x42, 0x37, 0x89, 0x41, 0x04, 0xb0, + 0xba, 0xc7, 0x5b, 0x8a, 0x43, 0x9f, 0x97, 0x39, + 0x0c, 0x0f, 0xd5, 0x6d, 0x9e, 0x8d, 0xeb, 0xc0, + 0x26, 0xc5, 0x18, 0xe8, 0x7a, 0x3d, 0x32, 0x2e, + 0x38, 0x90, 0x40, 0x5b, 0x39, 0x2c, 0x07, 0xcb, + 0x24, 0x10, 0xc5, 0xc9, 0x3b, 0xe3, 0x66, 0x47, + 0x57, 0xb9, 0x6a, 0xad, 0x44, 0xf8, 0xd0, 0x70, + 0x62, 0x3b, 0x8e, 0xed, 0x60, 0x5f, 0x22, 0xf8, + 0xb8, 0x0c, 0xc9, 0x41, 0x2b, 0xc9, 0x80, 0x6e, + 0x4e, 0x1b, 0xe1, 0x20, 0xfc, 0x47, 0xa4, 0xac, + 0xc3, 0x3f, 0xe6, 0xc2, 0x81, 0x79, 0x03, 0x37, + 0x25, 0x89, 0xca, 0xd6, 0xa5, 0x46, 0x91, 0x63, + 0x41, 0xc5, 0x3e, 0xd5, 0xed, 0x7f, 0x4f, 0x8d, + 0x06, 0xc0, 0x89, 0x00, 0xbe, 0x37, 0x7b, 0x7e, + 0x73, 0xca, 0x70, 0x00, 0x14, 0x34, 0xbe, 0x47, + 0xbc, 0xb2, 0x6a, 0x28, 0xa5, 0x29, 0x84, 0xa8, + 0x9d, 0xc8, 0x1e, 0x77, 0x66, 0x1f, 0x9f, 0xaa, + 0x2b, 0x47, 0xdb, 0xdd, 0x6b, 0x9c, 0xa8, 0xfc, + 0x82, 0x36, 0x94, 0x62, 0x0d, 0x5c, 0x3f, 0xb2, + 0x01, 0xb4, 0xa5, 0xb8, 0xc6, 0x0e, 0x94, 0x5b, + 0xec, 0x5e, 0xbb, 0x7a, 0x63, 0x24, 0xf1, 0xf9, + 0xd6, 0x50, 0x08, 0xc1, 0xa3, 0xcc, 0x90, 0x07, + 0x5b, 0x04, 0x04, 0x42, 0x74, 0xcf, 0x37, 0xfa, + 0xf0, 0xa5, 0xd9, 0xd3, 0x86, 0x89, 0x89, 0x18, + 0xf3, 0x4c, 0xe2, 0x11, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, + 0x03, 0x82, 0x01, 0x01, 0x00, 0x90, 0xbb, 0xf9, + 0x5e, 0xba, 0x17, 0x1f, 0xac, 0x21, 0x9f, 0x6b, + 0x4a, 0x46, 0xd0, 0x6d, 0x3c, 0x8f, 0x3d, 0xf8, + 0x5e, 0x3e, 0x72, 0xaf, 0xa0, 0x1a, 0xf3, 0xff, + 0x89, 0xac, 0x5b, 0x7a, 0xe2, 0x91, 0x2a, 0x23, + 0x85, 0xc6, 0x4d, 0x47, 0x67, 0x01, 0x08, 0xa8, + 0x05, 0x1d, 0x01, 0x60, 0x50, 0x5f, 0x59, 0xad, + 0xfe, 0x7b, 0xc6, 0x0c, 0x54, 0x90, 0x68, 0x70, + 0x67, 0x2e, 0xed, 0x87, 0xf8, 0x69, 0x8a, 0xac, + 0x32, 0xfe, 0x6f, 0x90, 0x19, 0x2a, 0x64, 0x8d, + 0x82, 0x66, 0x05, 0x43, 0x88, 0xee, 0xf2, 0x30, + 0xed, 0xa4, 0x8f, 0xbf, 0xd6, 0x57, 0x20, 0xd4, + 0x43, 0x1d, 0x52, 0x96, 0x6f, 0xae, 0x09, 0x96, + 0x01, 0x52, 0x38, 0xe3, 0xaf, 0x99, 0xd7, 0xdc, + 0x14, 0x99, 0xc4, 0x8b, 0x0e, 0x04, 0x0f, 0xb3, + 0x14, 0x14, 0xd4, 0xa5, 0x93, 0xe1, 0xc9, 0x8a, + 0x81, 0xef, 0x63, 0xfc, 0x36, 0x77, 0x05, 0x06, + 0xf0, 0x2a, 0x04, 0x0a, 0xbe, 0x2e, 0xce, 0x81, + 0x3d, 0x23, 0xa1, 0xda, 0xd8, 0xeb, 0xc6, 0xea, + 0x5e, 0xcf, 0x28, 0x36, 0x51, 0x31, 0x95, 0x5e, + 0x40, 0x04, 0xed, 0xac, 0xc1, 0xc8, 0x56, 0x69, + 0x87, 0xec, 0x3b, 0x03, 0x3e, 0x9d, 0x0f, 0x4c, + 0x4c, 0xeb, 0xd7, 0xba, 0x26, 0xdf, 0xe3, 0xde, + 0x10, 0xee, 0x93, 0x62, 0x8d, 0x73, 0x52, 0x6e, + 0xff, 0x37, 0x36, 0x98, 0x7b, 0x2d, 0x56, 0x4c, + 0xba, 0x09, 0xb8, 0xa7, 0xf0, 0x3b, 0x16, 0x81, + 0xca, 0xdb, 0x43, 0xab, 0xec, 0x4c, 0x6e, 0x7c, + 0xc1, 0x0b, 0x22, 0x22, 0x43, 0x1d, 0xb6, 0x0c, + 0xc1, 0xb9, 0xcf, 0xe4, 0x53, 0xee, 0x1d, 0x3e, + 0x88, 0xa7, 0x13, 0xbe, 0x7f, 0xbd, 0xae, 0x72, + 0xcf, 0xcd, 0x63, 0xd2, 0xc3, 0x18, 0x58, 0x92, + 0xa2, 0xad, 0xb5, 0x09, 0x9d, 0x91, 0x03, 0xdd, + 0x3c, 0xe2, 0x1c, 0xde, 0x78, 0x00, 0x03, 0x88, + 0x30, 0x82, 0x03, 0x84, 0x30, 0x82, 0x02, 0x6c, + 0x02, 0x09, 0x00, 0xab, 0xed, 0xa6, 0xe4, 0x4a, + 0x2b, 0x2b, 0xf8, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x31, + 0x38, 0x34, 0x30, 0x5a, 0x17, 0x0d, 0x31, 0x33, + 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x31, 0x38, + 0x34, 0x30, 0x5a, 0x30, 0x81, 0x80, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, + 0x20, 0x43, 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, + 0x55, 0x04, 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, + 0x61, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x63, 0x6f, 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, + 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, + 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, + 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, + 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, + 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xce, + 0x13, 0xf0, 0x72, 0xb0, 0x61, 0xc8, 0x18, 0x37, + 0x8a, 0x41, 0x3d, 0x20, 0xa1, 0x1c, 0xcb, 0xbf, + 0xf6, 0x3b, 0x74, 0x26, 0x2a, 0x96, 0x11, 0xec, + 0x53, 0xa1, 0xcc, 0x7d, 0x77, 0x56, 0x45, 0x0f, + 0x36, 0xb7, 0xf2, 0x48, 0x92, 0x1a, 0x62, 0xcc, + 0xb6, 0xc0, 0xa1, 0x2f, 0x44, 0x2b, 0xc1, 0x89, + 0xcb, 0x6e, 0x1e, 0xdb, 0x57, 0x92, 0xd5, 0x97, + 0x60, 0x8c, 0x41, 0x2c, 0xd9, 0x20, 0xfe, 0xe9, + 0x1f, 0x8e, 0xfc, 0x7f, 0x02, 0x44, 0x0f, 0x28, + 0x81, 0xd6, 0x0c, 0xcd, 0xbc, 0xf0, 0x57, 0x6c, + 0xcc, 0xa7, 0xba, 0x06, 0xa0, 0xa6, 0x91, 0xda, + 0xef, 0x46, 0x8a, 0x60, 0x0f, 0x52, 0x6c, 0x90, + 0x6c, 0x8c, 0x44, 0xaf, 0xb0, 0x9d, 0x90, 0xba, + 0x21, 0x58, 0xa0, 0x3c, 0xee, 0x54, 0xb5, 0x29, + 0x26, 0x1f, 0x0a, 0xac, 0xef, 0x48, 0x68, 0x33, + 0xd0, 0x33, 0xd0, 0x8b, 0x1a, 0xec, 0x6e, 0x2f, + 0xb5, 0x4a, 0x53, 0xc2, 0x1a, 0xd2, 0xf1, 0x50, + 0x05, 0x59, 0x5c, 0xd9, 0xda, 0x03, 0x0a, 0x47, + 0xb7, 0xdd, 0xf7, 0x3a, 0x69, 0xf5, 0x4e, 0xea, + 0x4a, 0xc2, 0xca, 0x54, 0xb0, 0x8b, 0x76, 0xe1, + 0x02, 0x2d, 0x52, 0x67, 0xb9, 0xdd, 0x50, 0xc9, + 0x3b, 0x07, 0x24, 0x22, 0x6a, 0x00, 0x1d, 0x58, + 0x83, 0xa8, 0xec, 0x95, 0xf1, 0xda, 0xe2, 0x73, + 0xa0, 0xa1, 0x72, 0x60, 0x9e, 0x86, 0x53, 0xcb, + 0x45, 0xa8, 0xc2, 0xa0, 0x50, 0xa0, 0x53, 0xd6, + 0xfc, 0x18, 0x84, 0xb5, 0x4a, 0x26, 0xd0, 0xa2, + 0xaa, 0xd0, 0xff, 0xb6, 0xfe, 0x3a, 0x9c, 0xb5, + 0x19, 0x3b, 0x3f, 0xe1, 0x48, 0x0d, 0xa4, 0x09, + 0x4f, 0x83, 0xc9, 0xc0, 0xc9, 0xa6, 0x0b, 0x58, + 0x1f, 0x1c, 0x7b, 0xac, 0xa2, 0x42, 0xbc, 0x61, + 0xf4, 0x21, 0x8a, 0x00, 0xda, 0x14, 0xa0, 0x60, + 0x03, 0xfe, 0x93, 0x12, 0x6c, 0x56, 0xcd, 0x02, + 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0x25, 0x29, 0x3b, 0x1e, 0xc3, 0x58, 0x32, 0xe6, + 0x23, 0xc8, 0xee, 0x18, 0xf0, 0x1d, 0x62, 0x6d, + 0x3b, 0x59, 0x99, 0x3a, 0xfe, 0x49, 0x72, 0x07, + 0x3f, 0x58, 0x93, 0xdb, 0xc0, 0xaf, 0xb0, 0xb3, + 0x5c, 0xd1, 0x5c, 0x98, 0xc8, 0xea, 0x4a, 0xe4, + 0x58, 0x73, 0x0d, 0x57, 0xc5, 0x13, 0x7c, 0x5c, + 0x79, 0x66, 0xda, 0x04, 0x1d, 0xe5, 0x98, 0xda, + 0x35, 0x47, 0x44, 0xb0, 0xd2, 0x7a, 0x66, 0x9d, + 0xcd, 0x41, 0xa5, 0x8f, 0xa1, 0x11, 0xb2, 0x1a, + 0x87, 0xc0, 0xcd, 0x55, 0xed, 0xb4, 0x7b, 0x33, + 0x72, 0xeb, 0xf7, 0xe3, 0x7b, 0x8b, 0x02, 0x86, + 0xe9, 0x2b, 0x26, 0x32, 0x9f, 0x99, 0xf1, 0xcb, + 0x93, 0xab, 0xb9, 0x16, 0xb3, 0x9a, 0xb2, 0x22, + 0x13, 0x21, 0x1f, 0x5b, 0xcc, 0xa2, 0x59, 0xbb, + 0x69, 0xf2, 0xb8, 0x07, 0x80, 0xce, 0x0c, 0xf7, + 0x98, 0x4c, 0x85, 0xc2, 0x96, 0x6a, 0x22, 0x05, + 0xe9, 0xbe, 0x48, 0xb0, 0x02, 0x5b, 0x69, 0x28, + 0x18, 0x88, 0x96, 0xe3, 0xd7, 0xc6, 0x7a, 0xd3, + 0xe9, 0x99, 0xff, 0x9d, 0xc3, 0x61, 0x4d, 0x9a, + 0x96, 0xf2, 0xc6, 0x33, 0x4d, 0xe5, 0x5d, 0x5a, + 0x68, 0x64, 0x5a, 0x82, 0x35, 0x65, 0x25, 0xe3, + 0x8c, 0x5b, 0xb0, 0xf6, 0x96, 0x56, 0xbc, 0xbf, + 0x97, 0x76, 0x4b, 0x66, 0x44, 0x81, 0xa4, 0xc4, + 0xa7, 0x31, 0xc5, 0xa1, 0x4f, 0xe8, 0xa4, 0xca, + 0x20, 0xf5, 0x01, 0x5b, 0x99, 0x4f, 0x5a, 0xf4, + 0xf0, 0x78, 0xbf, 0x71, 0x49, 0xd5, 0xf1, 0xc1, + 0xa2, 0x18, 0xfd, 0x72, 0x5b, 0x16, 0xe8, 0x92, + 0xc7, 0x37, 0x48, 0xaf, 0xee, 0x24, 0xfc, 0x35, + 0x0b, 0xc2, 0xdd, 0x05, 0xc7, 0x6e, 0xa3, 0x29, + 0xbb, 0x29, 0x7d, 0xd3, 0x2b, 0x94, 0x80, 0xc3, + 0x40, 0x53, 0x0e, 0x03, 0x54, 0x3d, 0x7b, 0x8b, + 0xce, 0xf9, 0xa4, 0x03, 0x27, 0x63, 0xec, 0x51, + 0x00, 0x03, 0xe5, 0x30, 0x82, 0x03, 0xe1, 0x30, + 0x82, 0x02, 0xc9, 0xa0, 0x03, 0x02, 0x01, 0x02, + 0x02, 0x09, 0x00, 0xcc, 0x22, 0x4c, 0x4b, 0x98, + 0xa2, 0x88, 0xfc, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x30, + 0x35, 0x30, 0x31, 0x5a, 0x17, 0x0d, 0x32, 0x33, + 0x30, 0x35, 0x32, 0x34, 0x32, 0x31, 0x30, 0x35, + 0x30, 0x31, 0x5a, 0x30, 0x81, 0x86, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, + 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, + 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, + 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, + 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, + 0x02, 0x82, 0x01, 0x01, 0x00, 0xf0, 0xfb, 0xad, + 0x80, 0x5e, 0x37, 0xd3, 0x6d, 0xee, 0x2e, 0xcc, + 0xbc, 0x0c, 0xd7, 0x56, 0x4b, 0x56, 0x45, 0xcd, + 0x28, 0xb6, 0x22, 0xe9, 0xe2, 0x0f, 0xd1, 0x87, + 0x2a, 0x27, 0xce, 0x77, 0x8d, 0x6e, 0x0e, 0x0f, + 0xfb, 0x66, 0xe1, 0xb5, 0x0e, 0x9a, 0xb6, 0x05, + 0x8e, 0xb3, 0xe1, 0xc5, 0x77, 0x86, 0x5b, 0x46, + 0xd2, 0x0b, 0x92, 0x03, 0x1b, 0x89, 0x0c, 0x1b, + 0x10, 0x0e, 0x99, 0x8f, 0xe2, 0x17, 0xe8, 0xc2, + 0x30, 0x00, 0x47, 0xd6, 0xfc, 0xf9, 0x0f, 0x3b, + 0x75, 0x34, 0x8d, 0x4d, 0xb0, 0x99, 0xb7, 0xa0, + 0x6d, 0xa0, 0xb6, 0xad, 0xda, 0x07, 0x5e, 0x38, + 0x2e, 0x02, 0xe4, 0x30, 0x6d, 0xae, 0x13, 0x72, + 0xd4, 0xc8, 0xce, 0x14, 0x07, 0xae, 0x23, 0x8c, + 0x8f, 0x9e, 0x8c, 0x60, 0xd6, 0x06, 0xb9, 0xef, + 0x00, 0x18, 0xc0, 0x1d, 0x25, 0x1e, 0xda, 0x3e, + 0x2f, 0xcf, 0x2b, 0x56, 0x84, 0x9e, 0x30, 0x21, + 0xc7, 0x29, 0xf6, 0x03, 0x8a, 0x24, 0xf9, 0x34, + 0xac, 0x65, 0x9d, 0x80, 0x36, 0xc8, 0x3b, 0x15, + 0x10, 0xbd, 0x51, 0xe9, 0xbc, 0x02, 0xe1, 0xe9, + 0xb3, 0x5a, 0x9a, 0x99, 0x41, 0x1b, 0x27, 0xa0, + 0x4d, 0x50, 0x9e, 0x27, 0x7f, 0xa1, 0x7d, 0x09, + 0x87, 0xbd, 0x8a, 0xca, 0x5f, 0xb1, 0xa5, 0x08, + 0xb8, 0x04, 0xd4, 0x52, 0x89, 0xaa, 0xe0, 0x7d, + 0x42, 0x2e, 0x2f, 0x15, 0xee, 0x66, 0x57, 0x0f, + 0x13, 0x19, 0x45, 0xa8, 0x4b, 0x5d, 0x81, 0x66, + 0xcc, 0x12, 0x37, 0x94, 0x5e, 0xfd, 0x3c, 0x10, + 0x81, 0x51, 0x3f, 0xfa, 0x0f, 0xdd, 0xa1, 0x89, + 0x03, 0xa9, 0x78, 0x91, 0xf5, 0x3b, 0xf3, 0xbc, + 0xac, 0xbe, 0x93, 0x30, 0x2e, 0xbe, 0xca, 0x7f, + 0x46, 0xd3, 0x28, 0xb4, 0x4e, 0x91, 0x7b, 0x5b, + 0x43, 0x6c, 0xaf, 0x9b, 0x5c, 0x6a, 0x6d, 0x5a, + 0xdb, 0x79, 0x5e, 0x6a, 0x6b, 0x02, 0x03, 0x01, + 0x00, 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, + 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, + 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, 0x7d, + 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, 0x90, + 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, + 0x80, 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, + 0x7d, 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, + 0x90, 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x0c, + 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, + 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0xcd, 0x6f, 0x73, 0x4d, 0x56, 0x0b, 0xf3, 0x2e, + 0x1c, 0xe2, 0x02, 0x0c, 0x14, 0xbb, 0x2f, 0xdd, + 0x3c, 0x43, 0xfe, 0xdf, 0x94, 0x2d, 0xa9, 0x89, + 0x81, 0x51, 0xf8, 0x5f, 0xa7, 0xa0, 0x13, 0xaa, + 0xcc, 0xb0, 0x18, 0xe2, 0x57, 0x3e, 0x0d, 0x29, + 0x93, 0xe8, 0x95, 0xd5, 0x1b, 0x53, 0xd2, 0x51, + 0xf2, 0xbd, 0xf5, 0x9e, 0x7b, 0x22, 0x65, 0x62, + 0x5c, 0xc4, 0x4c, 0x1d, 0xe8, 0xe9, 0xc3, 0xd4, + 0x2b, 0xe7, 0x78, 0xcb, 0x10, 0xf3, 0xfe, 0x06, + 0x83, 0xdc, 0x3a, 0x1e, 0x62, 0x10, 0xc0, 0x46, + 0x77, 0xc6, 0x9d, 0x9f, 0xab, 0x96, 0x25, 0x5c, + 0xfb, 0x26, 0xc1, 0x15, 0x1f, 0xa5, 0x33, 0xee, + 0x4f, 0x9a, 0x14, 0x6a, 0x14, 0x97, 0x93, 0x2b, + 0x95, 0x0b, 0xdc, 0xa8, 0xd7, 0x69, 0x2e, 0xf0, + 0x01, 0x0e, 0xfd, 0x4e, 0xd0, 0xd9, 0xa8, 0xe5, + 0x65, 0xde, 0xfb, 0xca, 0xca, 0x1c, 0x5f, 0xf9, + 0x53, 0xa0, 0x87, 0xe7, 0x33, 0x9b, 0x2f, 0xcf, + 0xe4, 0x13, 0xfc, 0xec, 0x7a, 0x6c, 0xb0, 0x90, + 0x13, 0x9b, 0xb6, 0xc5, 0x03, 0xf6, 0x0e, 0x5e, + 0xe2, 0xe4, 0x26, 0xc1, 0x7e, 0x53, 0xfe, 0x69, + 0xa3, 0xc7, 0xd8, 0x8e, 0x6e, 0x94, 0x32, 0xa0, + 0xde, 0xca, 0xb6, 0xcc, 0xd6, 0x01, 0xd5, 0x78, + 0x40, 0x28, 0x63, 0x9b, 0xee, 0xcf, 0x09, 0x3b, + 0x35, 0x04, 0xf0, 0x14, 0x02, 0xf6, 0x80, 0x0e, + 0x90, 0xb2, 0x94, 0xd2, 0x25, 0x16, 0xb8, 0x7a, + 0x76, 0x87, 0x84, 0x9f, 0x84, 0xc5, 0xaf, 0xc2, + 0x6d, 0x68, 0x7a, 0x84, 0x9c, 0xc6, 0x8a, 0x63, + 0x60, 0x87, 0x6a, 0x25, 0xc1, 0xa1, 0x78, 0x0f, + 0xba, 0xe8, 0x5f, 0xe1, 0xba, 0xac, 0xa4, 0x6f, + 0xdd, 0x09, 0x3f, 0x12, 0xcb, 0x1d, 0xf3, 0xcf, + 0x48, 0xd7, 0xd3, 0x26, 0xe8, 0x9c, 0xc3, 0x53, + 0xb3, 0xba, 0xdc, 0x32, 0x99, 0x98, 0x96, 0xd6, + 0x16, 0x03, 0x03, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x16, 0x03, 0x03, 0x01, 0x08, + 0x0f, 0x00, 0x01, 0x04, 0x04, 0x01, 0x01, 0x00, + 0x7e, 0xe4, 0x65, 0x02, 0x8e, 0xb3, 0x34, 0x6a, + 0x47, 0x71, 0xd1, 0xb0, 0x8d, 0x3c, 0x0c, 0xe1, + 0xde, 0x7e, 0x5f, 0xb4, 0x15, 0x2d, 0x32, 0x0a, + 0x2a, 0xdb, 0x9b, 0x40, 0xba, 0xce, 0x8b, 0xf5, + 0x74, 0xc1, 0x68, 0x20, 0x7c, 0x87, 0x23, 0x13, + 0xc3, 0x13, 0xa7, 0xdb, 0xec, 0x59, 0xa0, 0x40, + 0x9e, 0x64, 0x03, 0x60, 0xac, 0x76, 0xff, 0x01, + 0x34, 0x7b, 0x32, 0x26, 0xd9, 0x41, 0x31, 0x93, + 0xaa, 0x30, 0x51, 0x83, 0x85, 0x40, 0xeb, 0x4e, + 0x66, 0x39, 0x83, 0xb1, 0x30, 0x0d, 0x96, 0x01, + 0xee, 0x81, 0x53, 0x5e, 0xec, 0xa9, 0xc9, 0xdf, + 0x7e, 0xc1, 0x09, 0x47, 0x8b, 0x35, 0xdb, 0x10, + 0x15, 0xd4, 0xc7, 0x5a, 0x39, 0xe3, 0xc0, 0xf3, + 0x93, 0x38, 0x11, 0xdc, 0x71, 0xbb, 0xc7, 0x62, + 0x2b, 0x85, 0xad, 0x6b, 0x4f, 0x09, 0xb3, 0x31, + 0xa8, 0xe5, 0xd1, 0xb3, 0xa9, 0x21, 0x37, 0x50, + 0xc8, 0x7d, 0xc3, 0xd2, 0xf7, 0x00, 0xd3, 0xdb, + 0x0f, 0x82, 0xf2, 0x43, 0xcf, 0x36, 0x6c, 0x98, + 0x63, 0xd8, 0x1d, 0xb3, 0xf3, 0xde, 0x63, 0x79, + 0x64, 0xf0, 0xdb, 0x46, 0x04, 0xe1, 0x1c, 0x57, + 0x0f, 0x9e, 0x96, 0xb9, 0x93, 0x45, 0x71, 0x1c, + 0x8b, 0x65, 0x7d, 0x1e, 0xad, 0xbd, 0x03, 0x51, + 0xae, 0x44, 0xef, 0x97, 0x45, 0x0d, 0x8d, 0x41, + 0x5c, 0x80, 0x7b, 0xe6, 0xe0, 0xbc, 0xa6, 0x72, + 0x95, 0xa0, 0x97, 0xe1, 0xbb, 0xc0, 0xcc, 0xe5, + 0x1e, 0xc3, 0xbe, 0xd7, 0x42, 0x2a, 0xf3, 0x75, + 0x8a, 0x44, 0x67, 0x3c, 0xe5, 0x68, 0x78, 0xe5, + 0x40, 0x1f, 0xf0, 0x89, 0x57, 0xda, 0xee, 0x45, + 0xf4, 0x44, 0x81, 0x01, 0x77, 0xf0, 0x4a, 0x14, + 0xb1, 0x3f, 0x60, 0x2b, 0xeb, 0x42, 0x38, 0xa6, + 0xfb, 0xe5, 0x4d, 0x71, 0xdc, 0x7d, 0x0a, 0x72, + 0x56, 0x28, 0x9d, 0xa6, 0x8e, 0x74, 0x2d, 0xbd, + 0x14, 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x03, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x31, 0x4d, 0x58, 0x94, 0x0b, + 0x0b, 0x06, 0x5f, 0xae, 0x57, 0x17, 0x98, 0x86, + 0xaa, 0x49, 0x17, 0x7f, 0xbd, 0x41, 0x05, 0xa5, + 0x74, 0x1c, 0x58, 0xc8, 0x38, 0x2d, 0x99, 0x5d, + 0xe5, 0x12, 0x43, + }, + { + 0x14, 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x03, 0x00, 0x28, 0xf2, 0x60, 0xc2, 0x75, 0x27, + 0x64, 0xf4, 0x05, 0x98, 0xc9, 0xd3, 0xa8, 0x00, + 0x4c, 0xa0, 0x49, 0x82, 0x68, 0xf1, 0x21, 0x05, + 0x7b, 0x4b, 0x25, 0x3e, 0xe1, 0x5f, 0x0f, 0x84, + 0x26, 0x2d, 0x16, 0x2e, 0xc0, 0xfd, 0xdf, 0x0a, + 0xf4, 0xba, 0x19, + }, + { + 0x17, 0x03, 0x03, 0x00, 0x1e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x35, 0xef, 0x9d, + 0x6a, 0x86, 0x98, 0xc5, 0xca, 0x55, 0xca, 0x89, + 0x29, 0xb4, 0x55, 0xd4, 0x41, 0x08, 0x96, 0xe0, + 0xf3, 0x39, 0xfc, 0x15, 0x03, 0x03, 0x00, 0x1a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x02, 0x63, 0x1b, 0xaa, 0xc6, 0xc9, 0x6d, 0x72, + 0x24, 0x10, 0x55, 0xa9, 0x8c, 0x3b, 0x23, 0xce, + 0xd8, 0x4a, + }, +} + +var testClientChainCertificate = fromHex( + "2d2d2d2d2d424547494e2050524956415445204b" + + "45592d2d2d2d2d0a4d494945766749424144414e" + + "42676b71686b6947397730424151454641415343" + + "424b67776767536b41674541416f494241514367" + + "6f2b2f4252483269343347590a4a324f7a485846" + + "51706a515679386b71772b726b6e70784a706747" + + "6266716d31657638566b6e48496c35776c74336b" + + "722f367647736163416b4c4b4c313348560a776a" + + "726d676b493369554545734c7248573470446e35" + + "633544412f56625a364e3638416d78526a6f656a" + + "30794c6a6951514673354c41664c4a4244467954" + + "766a0a5a6b64587557717452506a51634749376a" + + "75316758794c3475417a4a5153764a6747354f47" + + "2b45672f45656b724d4d2f35734b4265514d334a" + + "596e4b317156470a6b574e427854375637583950" + + "6a5162416951432b4e33742b6338707741425130" + + "766b6538736d6f6f70536d45714a3349486e646d" + + "48352b714b306662335775630a715079434e7052" + + "694456772f7367473070626a4744705262374636" + + "37656d4d6b38666e5755416a426f387951423173" + + "4542454a307a7a6636384b585a3034614a0a6952" + + "6a7a544f495241674d424141454367674542414a" + + "4b613676326b5a3144596146786e586d7369624c" + + "386734426f67514c6a42307362524a6d746b6b4d" + + "54370a685343325873537551522f446c654d7148" + + "664555786731784a717579597643544d44585972" + + "473667354a5051744d4432465a424a7239626c65" + + "467138386c706a0a543766514e793571354c2b4f" + + "682f6b62433835436e623641753641656978776d" + + "2b6e77665a4f3766726b6278306d35516b715975" + + "5739392f452b69502b454e570a76396a68773436" + + "76515065563236494b79717656462b4f7362722f" + + "6152316138707948336361566e3579594a433346" + + "5855756c6f5a77516331714a6b4c434c4c0a375a" + + "49744f525a78514c486d4d4a654d44722f5a4942" + + "34675467645650636145375a4d5141714d6d3066" + + "4c6b6d7671723149526b77642f6831455a645650" + + "79320a742f6b6b43413039566336663749556575" + + "6f67706d705a50303130564e376b6277394a6348" + + "75544561564543675945417a47395679426e6d62" + + "6858496c57764f0a71583747524f2f5231636a2b" + + "6b564e35377876674b54756b35592b7a4d774a48" + + "32626c57435945513251753974446c476854756b" + + "664273385746772b6e6263460a7a6f706d535245" + + "6c6d464d2f6141536d464733574e5a7072696a68" + + "504b77726338376470636b31703131635a415478" + + "5a413168566d43743457616343673634690a4d74" + + "64507a334e2f34416147664956794d2b69624949" + + "35332f515543675945417953693556735a356f6a" + + "644a795077426e6c6142554231686f2b336b7068" + + "70770a7264572b2b4d796b51494a345564534437" + + "3052486e5a315839754359713978616671746c51" + + "664c44395963442f436d665264706461586c5673" + + "5249467a5a556c0a454630557149644e77337046" + + "68634f4a6d6e5a3241434470434342476f763542" + + "6e3068302b3137686a4b376f69315833716e4542" + + "7857326c7462593476556a500a44394c5330666e" + + "4a76703043675942504a527330714c4a4a464333" + + "6669796b712f57574d38727474354b364a584b50" + + "734b674b53644144577a7463316645434d0a7a65" + + "2b394a6a5a376b4d77557063666a644c2b745047" + + "3455563048326c524375635735414131396d7058" + + "50367454494733713737655a6b416e65516f6163" + + "41340a716c3073583051476c6a5763414e30464b" + + "6f4759733975582b6378445a6e7265362f52392f" + + "3930567766443237454c57546373677734633463" + + "514b42675143420a6f5432326e745a5a59396d6e" + + "72455a36752f492f4a332f35664e396737783733" + + "3177746e463745745a5361575453587364597256" + + "466b564f6362505135494a6f0a714a6a7249372b" + + "474a4d69376f6a4c69642f4c45656f31764f3163" + + "454158334f43723236554e38612f6c7434394f5a" + + "69354c337348556b756c475951755671650a6737" + + "6e6e4632437749544c34503645486443575a4461" + + "7a4136626d7375524f2b6462536e335a6c567651" + + "4b42674859524c5a665458536c44755264776977" + + "746b0a513148546b6d6b57694156726c4f577864" + + "5858456d546130303045574c46446145797a7358" + + "7834424863357166776b5a4e746b634a56396e58" + + "63536e647441530a35767a427a676e797a4f7962" + + "68315878484a3966427472414f3847555878446c" + + "6634394457616753393449763072596e616b7656" + + "2f673039786875415763366e0a5365757230576b" + + "5376453847666653734d485149584c456b0a2d2d" + + "2d2d2d454e442050524956415445204b45592d2d" + + "2d2d2d0a2d2d2d2d2d424547494e204345525449" + + "4649434154452d2d2d2d2d0a4d494944656a4343" + + "416d494343514330523168584b326649776a414e" + + "42676b71686b6947397730424151554641444342" + + "6744454c4d416b474131554542684d430a56564d" + + "78437a414a42674e564241674d416b355a4d5245" + + "77447759445651514844416843636d3976613278" + + "35626a45564d424d47413155454367774d54586b" + + "670a51304567513278705a5735304d5263774651" + + "5944565151444441357465574e68593278705a57" + + "35304c6d4e76625445684d423847435371475349" + + "62334451454a0a41525953616e5a7a6147466f61" + + "5752415a32316861577775593239744d42345844" + + "54457a4d4455794e6a49784e4451774d466f5844" + + "54457a4d4459794e5449780a4e4451774d466f77" + + "6654454c4d416b474131554542684d4356564d78" + + "4554415042674e564241674d4345356c6479425a" + + "62334a724d52457744775944565151480a444168" + + "43636d397661327835626a45514d413447413155" + + "454367774854586b67544756685a6a45544d4245" + + "47413155454177774b62586c735a57466d4c6d4e" + + "760a625445684d42384743537147534962334451" + + "454a41525953616e5a7a6147466f615752415a32" + + "316861577775593239744d494942496a414e4267" + + "6b71686b69470a397730424151454641414f4341" + + "5138414d49494243674b43415145416f4b507677" + + "5552396f754e786d43646a73783178554b593046" + + "63764a4b735071354a36630a536159426d333670" + + "7458722f465a4a78794a65634a6264354b2f2b72" + + "7872476e414a43796939647831634936356f4a43" + + "4e346c42424c43367831754b51352b580a4f5177" + + "50315732656a6576414a73555936486f394d6934" + + "346b4542624f5377487979515178636b3734325a" + + "4856376c7172555434304842694f343774594638" + + "690a2b4c674d7955457279594275546876684950" + + "7848704b7a44502b624367586b444e79574a7974" + + "616c5270466a5163552b3165312f543430477749" + + "6b41766a64370a666e504b634141554e4c354876" + + "4c4a714b4b5570684b6964794235335a682b6671" + + "697448323931726e4b6a38676a61555967316350" + + "374942744b5734786736550a572b78657533706a" + + "4a504835316c41497761504d6b41646242415243" + + "644d38332b76436c32644f4769596b5938307a69" + + "45514944415141424d413047435371470a534962" + + "3344514542425155414134494241514351752f6c" + + "65756863667243476661307047304730386a7a33" + + "34586a357972364161382f2b4a72467436347045" + + "710a493458475455646e4151696f425230425946" + + "42665761332b6538594d564a426f634763753759" + + "6634615971734d7635766b426b715a4932435a67" + + "5644694f37790a4d4f326b6a372f575679445551" + + "7831536c6d2b75435a5942556a6a6a72356e5833" + + "42535a7849734f42412b7a46425455705a506879" + + "597142373250384e6e63460a427641714241712b" + + "4c73364250534f6832746a72787570657a796732" + + "55544756586b414537617a4279465a70682b7737" + + "417a36644430784d363965364a742f6a0a336844" + + "756b324b4e63314a752f7a63326d487374566b79" + + "364362696e384473576763726251367673544735" + + "3877517369496b4d6474677a4275632f6b552b34" + + "640a506f696e4537352f766135797a38316a3073" + + "4d59574a4b697262554a6e5a454433547a69484e" + + "35340a2d2d2d2d2d454e44204345525449464943" + + "4154452d2d2d2d2d0a2d2d2d2d2d424547494e20" + + "43455254494649434154452d2d2d2d2d0a4d4949" + + "4468444343416d7743435143723761626b536973" + + "722b44414e42676b71686b694739773042415155" + + "4641444342686a454c4d416b474131554542684d" + + "430a56564d78437a414a42674e564241674d416b" + + "355a4d524577447759445651514844416843636d" + + "397661327835626a45684d423847413155454367" + + "775954586b670a5132567964476c6d61574e6864" + + "4755675158563061473979615852354d52457744" + + "775944565151444441687465574e684c6d39795a" + + "7a45684d423847435371470a534962334451454a" + + "41525953616e5a7a6147466f615752415a323168" + + "61577775593239744d4234584454457a4d445579" + + "4e6a49784d5467304d466f584454457a0a4d4459" + + "794e5449784d5467304d466f7767594178437a41" + + "4a42674e5642415954416c56544d517377435159" + + "445651514944414a4f575445524d413847413155" + + "450a42777749516e4a7662327473655734784654" + + "415442674e5642416f4d4445313549454e424945" + + "4e7361575675644445584d425547413155454177" + + "774f62586c6a0a59574e73615756756443356a62" + + "3230784954416642676b71686b69473977304243" + + "514557456d70326332686861476c6b5147647459" + + "576c734c6d4e76625443430a415349774451594a" + + "4b6f5a496876634e415145424251414467674550" + + "4144434341516f4367674542414d345438484b77" + + "596367594e34704250534368484d752f0a396a74" + + "304a697157456578546f63783964315a46447a61" + + "33386b6953476d4c4d747343684c30517277596e" + + "4c6268376256354c566c32434d51537a5a495037" + + "700a4834373866774a454479694231677a4e7650" + + "4258624d796e75676167707048613730614b5941" + + "3953624a42736a455376734a3251756946596f44" + + "7a75564c55700a4a68384b724f3949614450514d" + + "39434c477578754c37564b553849613076465142" + + "566c6332646f44436b6533336663366166564f36" + + "6b7243796c5377693362680a416931535a376e64" + + "554d6b37427951696167416457494f6f374a5878" + + "32754a7a6f4b4679594a364755387446714d4b67" + + "554b425431767759684c564b4a7443690a717444" + + "2f747634366e4c555a4f7a2f685341326b43552b" + + "447963444a7067745948787837724b4a43764748" + + "3049596f41326853675941502b6b784a73567330" + + "430a417745414154414e42676b71686b69473977" + + "30424151554641414f43415145414a536b374873" + + "4e594d75596a794f3459384231696254745a6d54" + + "722b535849480a5031695432384376734c4e6330" + + "567959794f704b3546687a445666464533786365" + + "5762614242336c6d4e6f3152305377306e706d6e" + + "63314270592b68456249610a6838444e56653230" + + "657a4e79362f666a6534734368756b724a6a4b66" + + "6d66484c6b36753546724f617369495449523962" + + "7a4b4a5a75326e79754165417a677a330a6d4579" + + "4677705a7149675870766b6977416c74704b4269" + + "496c755058786e7254365a6e2f6e634e68545a71" + + "573873597a54655664576d686b576f49315a5358" + + "6a0a6a46757739705a57764c2b58646b746d5249" + + "476b784b637878614650364b544b495055425735" + + "6c5057765477654c397853645878776149592f58" + + "4a62467569530a787a6449722b346b2f44554c77" + + "7430467832366a4b62737066644d726c49444451" + + "464d4f413151396534764f2b6151444a32507355" + + "513d3d0a2d2d2d2d2d454e442043455254494649" + + "434154452d2d2d2d2d0a2d2d2d2d2d424547494e" + + "2043455254494649434154452d2d2d2d2d0a4d49" + + "49443454434341736d67417749424167494a414d" + + "7769544575596f6f6a384d413047435371475349" + + "623344514542425155414d4947474d5173774351" + + "59440a5651514745774a56557a454c4d416b4741" + + "31554543417743546c6b784554415042674e5642" + + "41634d43454a796232397262486c754d53457748" + + "7759445651514b0a4442684e655342445a584a30" + + "61575a70593246305a5342426458526f62334a70" + + "64486b784554415042674e5642414d4d43473135" + + "5932457562334a6e4d5345770a4877594a4b6f5a" + + "496876634e41516b4246684a71646e4e6f595768" + + "705a45426e625746706243356a62323077486863" + + "4e4d544d774e5449324d6a45774e5441780a5768" + + "634e4d6a4d774e5449304d6a45774e544178576a" + + "4342686a454c4d416b474131554542684d435656" + + "4d78437a414a42674e564241674d416b355a4d52" + + "45770a447759445651514844416843636d397661" + + "327835626a45684d423847413155454367775954" + + "586b675132567964476c6d61574e686447556751" + + "585630614739790a615852354d52457744775944" + + "565151444441687465574e684c6d39795a7a4568" + + "4d42384743537147534962334451454a41525953" + + "616e5a7a6147466f615752410a5a323168615777" + + "75593239744d494942496a414e42676b71686b69" + + "47397730424151454641414f43415138414d4949" + + "4243674b434151454138507574674634330a3032" + + "33754c737938444e645753315a467a5369324975" + + "6e69443947484b69664f6434317544672f375a75" + + "4731447071324259367a34635633686c74473067" + + "75530a4178754a4442735144706d503468666f77" + + "6a4141523962382b5138376454534e5462435a74" + + "3642746f4c6174326764654f4334433544427472" + + "684e79314d6a4f0a46416575493479506e6f7867" + + "31676135377741597742306c48746f2b4c383872" + + "566f53654d4348484b665944696954354e4b786c" + + "6e59413279447356454c31520a3662774334656d" + + "7a5770715a5152736e6f4531516e69642f6f5830" + + "4a6837324b796c2b7870516934424e5253696172" + + "67665549754c7858755a6c635045786c460a7145" + + "74646757624d456a65555876303845494652502f" + + "6f503361474a41366c346b665537383779737670" + + "4d774c72374b663062544b4c524f6b5874625132" + + "79760a6d31787162567262655635716177494441" + + "5141426f314177546a416442674e564851344546" + + "67515561783441714a2f3666514435344a30506b" + + "497951714b45330a61396f77487759445652306a" + + "42426777466f415561783441714a2f3666514435" + + "344a30506b497951714b453361396f7744415944" + + "5652305442415577417745420a2f7a414e42676b" + + "71686b6947397730424151554641414f43415145" + + "417a57397a5456594c387934633467494d464c73" + + "76335478442f742b554c616d4a675648340a5836" + + "65674536724d73426a69567a344e4b5a506f6c64" + + "556255394a52387233316e6e73695a574a637845" + + "7764364f6e443143766e654d7351382f34476739" + + "77360a486d495177455a33787032667135596c58" + + "50736d775255667054507554356f55616853586b" + + "7975564339796f31326b753841454f2f55375132" + + "616a6c5a6437370a79736f63582f6c546f49666e" + + "4d3573767a2b51542f4f7836624c435145357532" + + "78515032446c376935436242666c502b61615048" + + "324935756c444b67337371320a7a4e5942315868" + + "414b474f623773384a4f7a554538425143396f41" + + "4f6b4c4b55306955577548703268345366684d57" + + "76776d316f656f5363786f706a594964710a4a63" + + "476865412b3636462f687571796b6239304a5078" + + "4c4c48665050534e66544a75696377314f7a7574" + + "77796d5a695731673d3d0a2d2d2d2d2d454e4420" + + "43455254494649434154452d2d2d2d2d0a", +) + +// Script of interaction with openssl implementation: +// +// openssl s_server -cipher ECDHE-ECDSA-AES128-SHA \ +// -key server.key -cert server.crt -port 10443 +// +// The values for this test are obtained by building and running in client mode: +// % go test -test.run "TestRunClient" -connect -ciphersuites=0xc009 +// The recorded bytes are written to stdout. +// +// The server private key is: +// +// -----BEGIN EC PARAMETERS----- +// BgUrgQQAIw== +// -----END EC PARAMETERS----- +// -----BEGIN EC PRIVATE KEY----- +// MIHcAgEBBEIBmIPpCa0Kyeo9M/nq5mHxeFIGlw+MqakWcvHu3Keo7xK9ZWG7JG3a +// XfS01efjqSZJvF2DoL+Sly4A5iBn0Me9mdegBwYFK4EEACOhgYkDgYYABADEoe2+ +// mPkLSHM2fsMWVhEi8j1TwztNIT3Na3Xm9rDcmt8mwbyyh/ByMnyzZC8ckLzqaCMQ +// fv7jJcBIOmngKG3TNwDvBGLdDaCccGKD2IHTZDGqnpcxvZawaMCbI952ZD8aXH/p +// Eg5YWLZfcN2b2OrV1/XVzLm2nzBmW2aaIOIn5b/+Ow== +// -----END EC PRIVATE KEY----- +// +// and certificate is: +// +// -----BEGIN CERTIFICATE----- +// MIICADCCAWICCQC4vy1HoNLr9DAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw +// EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0 +// eSBMdGQwHhcNMTIxMTIyMTUwNjMyWhcNMjIxMTIwMTUwNjMyWjBFMQswCQYDVQQG +// EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk +// Z2l0cyBQdHkgTHRkMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAxKHtvpj5C0hz +// Nn7DFlYRIvI9U8M7TSE9zWt15vaw3JrfJsG8sofwcjJ8s2QvHJC86mgjEH7+4yXA +// SDpp4Cht0zcA7wRi3Q2gnHBig9iB02Qxqp6XMb2WsGjAmyPedmQ/Glx/6RIOWFi2 +// X3Ddm9jq1df11cy5tp8wZltmmiDiJ+W//jswCQYHKoZIzj0EAQOBjAAwgYgCQgGI +// ok/r4kXFSH0brPXtmJ2uR3DAXhu2L73xtk23YUDTEaLO7gt+kn7/dp3DO36lP876 +// EOJZ7EctfKzaTpcOFaBv0AJCAU38vmcTnC0FDr0/o4wlwTMTgw2UBrvUN3r27HrJ +// hi7d1xFpf4V8Vt77MXgr5Md4Da7Lvp5ONiQxe2oPOZUSB48q +// -----END CERTIFICATE----- +var ecdheECDSAAESClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x09, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x01, 0x50, 0xd7, 0x19, 0xc9, 0x03, + 0xc2, 0x3a, 0xc6, 0x1f, 0x0a, 0x84, 0x9e, 0xd7, + 0xf4, 0x7e, 0x07, 0x6d, 0xa8, 0xe4, 0xa9, 0x4f, + 0x22, 0x50, 0xa2, 0x19, 0x24, 0x44, 0x42, 0x65, + 0xaa, 0xba, 0x3a, 0x20, 0x90, 0x70, 0xb7, 0xe5, + 0x57, 0xed, 0xb1, 0xb1, 0x43, 0x4b, 0xa1, 0x4e, + 0xee, 0x7a, 0x5b, 0x88, 0xf6, 0xa6, 0x73, 0x3b, + 0xcb, 0xa7, 0xbd, 0x57, 0x50, 0xf2, 0x72, 0x8c, + 0xbc, 0x45, 0x73, 0xaa, 0xc0, 0x09, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x01, 0x02, 0x0e, 0x0b, 0x00, + 0x02, 0x0a, 0x00, 0x02, 0x07, 0x00, 0x02, 0x04, + 0x30, 0x82, 0x02, 0x00, 0x30, 0x82, 0x01, 0x62, + 0x02, 0x09, 0x00, 0xb8, 0xbf, 0x2d, 0x47, 0xa0, + 0xd2, 0xeb, 0xf4, 0x30, 0x09, 0x06, 0x07, 0x2a, + 0x86, 0x48, 0xce, 0x3d, 0x04, 0x01, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, + 0x0d, 0x31, 0x32, 0x31, 0x31, 0x32, 0x32, 0x31, + 0x35, 0x30, 0x36, 0x33, 0x32, 0x5a, 0x17, 0x0d, + 0x32, 0x32, 0x31, 0x31, 0x32, 0x30, 0x31, 0x35, + 0x30, 0x36, 0x33, 0x32, 0x5a, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9b, 0x30, + 0x10, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, + 0x02, 0x01, 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, + 0x23, 0x03, 0x81, 0x86, 0x00, 0x04, 0x00, 0xc4, + 0xa1, 0xed, 0xbe, 0x98, 0xf9, 0x0b, 0x48, 0x73, + 0x36, 0x7e, 0xc3, 0x16, 0x56, 0x11, 0x22, 0xf2, + 0x3d, 0x53, 0xc3, 0x3b, 0x4d, 0x21, 0x3d, 0xcd, + 0x6b, 0x75, 0xe6, 0xf6, 0xb0, 0xdc, 0x9a, 0xdf, + 0x26, 0xc1, 0xbc, 0xb2, 0x87, 0xf0, 0x72, 0x32, + 0x7c, 0xb3, 0x64, 0x2f, 0x1c, 0x90, 0xbc, 0xea, + 0x68, 0x23, 0x10, 0x7e, 0xfe, 0xe3, 0x25, 0xc0, + 0x48, 0x3a, 0x69, 0xe0, 0x28, 0x6d, 0xd3, 0x37, + 0x00, 0xef, 0x04, 0x62, 0xdd, 0x0d, 0xa0, 0x9c, + 0x70, 0x62, 0x83, 0xd8, 0x81, 0xd3, 0x64, 0x31, + 0xaa, 0x9e, 0x97, 0x31, 0xbd, 0x96, 0xb0, 0x68, + 0xc0, 0x9b, 0x23, 0xde, 0x76, 0x64, 0x3f, 0x1a, + 0x5c, 0x7f, 0xe9, 0x12, 0x0e, 0x58, 0x58, 0xb6, + 0x5f, 0x70, 0xdd, 0x9b, 0xd8, 0xea, 0xd5, 0xd7, + 0xf5, 0xd5, 0xcc, 0xb9, 0xb6, 0x9f, 0x30, 0x66, + 0x5b, 0x66, 0x9a, 0x20, 0xe2, 0x27, 0xe5, 0xbf, + 0xfe, 0x3b, 0x30, 0x09, 0x06, 0x07, 0x2a, 0x86, + 0x48, 0xce, 0x3d, 0x04, 0x01, 0x03, 0x81, 0x8c, + 0x00, 0x30, 0x81, 0x88, 0x02, 0x42, 0x01, 0x88, + 0xa2, 0x4f, 0xeb, 0xe2, 0x45, 0xc5, 0x48, 0x7d, + 0x1b, 0xac, 0xf5, 0xed, 0x98, 0x9d, 0xae, 0x47, + 0x70, 0xc0, 0x5e, 0x1b, 0xb6, 0x2f, 0xbd, 0xf1, + 0xb6, 0x4d, 0xb7, 0x61, 0x40, 0xd3, 0x11, 0xa2, + 0xce, 0xee, 0x0b, 0x7e, 0x92, 0x7e, 0xff, 0x76, + 0x9d, 0xc3, 0x3b, 0x7e, 0xa5, 0x3f, 0xce, 0xfa, + 0x10, 0xe2, 0x59, 0xec, 0x47, 0x2d, 0x7c, 0xac, + 0xda, 0x4e, 0x97, 0x0e, 0x15, 0xa0, 0x6f, 0xd0, + 0x02, 0x42, 0x01, 0x4d, 0xfc, 0xbe, 0x67, 0x13, + 0x9c, 0x2d, 0x05, 0x0e, 0xbd, 0x3f, 0xa3, 0x8c, + 0x25, 0xc1, 0x33, 0x13, 0x83, 0x0d, 0x94, 0x06, + 0xbb, 0xd4, 0x37, 0x7a, 0xf6, 0xec, 0x7a, 0xc9, + 0x86, 0x2e, 0xdd, 0xd7, 0x11, 0x69, 0x7f, 0x85, + 0x7c, 0x56, 0xde, 0xfb, 0x31, 0x78, 0x2b, 0xe4, + 0xc7, 0x78, 0x0d, 0xae, 0xcb, 0xbe, 0x9e, 0x4e, + 0x36, 0x24, 0x31, 0x7b, 0x6a, 0x0f, 0x39, 0x95, + 0x12, 0x07, 0x8f, 0x2a, 0x16, 0x03, 0x01, 0x00, + 0xd6, 0x0c, 0x00, 0x00, 0xd2, 0x03, 0x00, 0x17, + 0x41, 0x04, 0x33, 0xed, 0xe1, 0x10, 0x3d, 0xe2, + 0xb0, 0x81, 0x5e, 0x01, 0x1b, 0x00, 0x4a, 0x7d, + 0xdc, 0xc5, 0x78, 0x02, 0xb1, 0x9a, 0x78, 0x92, + 0x34, 0xd9, 0x23, 0xcc, 0x01, 0xfb, 0x0c, 0x49, + 0x1c, 0x4a, 0x59, 0x8a, 0x80, 0x1b, 0x34, 0xf0, + 0xe8, 0x87, 0x1b, 0x7c, 0xfb, 0x72, 0xf5, 0xea, + 0xf9, 0xf3, 0xff, 0xa6, 0x3e, 0x4e, 0xac, 0xbc, + 0xee, 0x14, 0x2b, 0x87, 0xd4, 0x0b, 0xda, 0x19, + 0x60, 0x2b, 0x00, 0x8b, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x01, 0x75, 0x46, 0x4f, 0x97, 0x9f, 0xc5, + 0xf9, 0x4c, 0x38, 0xcf, 0x3b, 0x37, 0x1a, 0x6b, + 0x53, 0xfc, 0x05, 0x73, 0x7d, 0x98, 0x2c, 0x5b, + 0x76, 0xd4, 0x37, 0x1f, 0x50, 0x6d, 0xad, 0xc6, + 0x0f, 0x8f, 0x7b, 0xcc, 0x60, 0x8e, 0x04, 0x00, + 0x21, 0x80, 0xa8, 0xa5, 0x98, 0xf2, 0x42, 0xf2, + 0xc3, 0xf6, 0x44, 0x50, 0xc4, 0x7a, 0xae, 0x6f, + 0x74, 0xa0, 0x7f, 0x07, 0x7a, 0x0b, 0xbb, 0x41, + 0x9e, 0x3c, 0x0b, 0x02, 0x42, 0x01, 0xbe, 0x64, + 0xaa, 0x12, 0x03, 0xfb, 0xd8, 0x4f, 0x93, 0xf9, + 0x92, 0x54, 0x0d, 0x9c, 0x9d, 0x53, 0x88, 0x19, + 0x69, 0x94, 0xfc, 0xd6, 0xf7, 0x60, 0xcf, 0x70, + 0x64, 0x15, 0x1b, 0x02, 0x22, 0x56, 0xb0, 0x2c, + 0xb1, 0x72, 0x4c, 0x9e, 0x7b, 0xf0, 0x53, 0x97, + 0x43, 0xac, 0x11, 0x62, 0xe5, 0x5a, 0xf1, 0x7e, + 0x87, 0x8f, 0x5c, 0x43, 0x1d, 0xae, 0x56, 0x28, + 0xdb, 0x76, 0x15, 0xd8, 0x1c, 0x73, 0xce, 0x16, + 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0x1a, 0x45, + 0x92, 0x3b, 0xac, 0x8d, 0x91, 0x89, 0xd3, 0x2c, + 0xf4, 0x3c, 0x5f, 0x70, 0xf1, 0x79, 0xa5, 0x6a, + 0xcf, 0x97, 0x8f, 0x3f, 0x73, 0x08, 0xca, 0x3f, + 0x55, 0xb0, 0x28, 0xd1, 0x6f, 0xcd, 0x9b, 0xca, + 0xb6, 0xb7, 0xd0, 0xa5, 0x21, 0x5b, 0x08, 0xf8, + 0x42, 0xe2, 0xdf, 0x25, 0x6a, 0x16, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x30, 0x30, 0x83, 0xb6, 0x51, 0x8a, + 0x85, 0x4a, 0xee, 0xe4, 0xb6, 0xae, 0xf3, 0xc1, + 0xdc, 0xd2, 0x04, 0xb3, 0xd0, 0x25, 0x47, 0x5f, + 0xac, 0x83, 0xa3, 0x7d, 0xcf, 0x47, 0x92, 0xed, + 0x92, 0x6c, 0xd1, 0x6e, 0xfd, 0x63, 0xf5, 0x2d, + 0x89, 0xd8, 0x04, 0x8c, 0x62, 0x71, 0xae, 0x5e, + 0x32, 0x48, 0xf8, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x20, 0xcf, 0x5e, 0xba, + 0xf4, 0x47, 0x32, 0x35, 0x9b, 0x85, 0xdc, 0xb3, + 0xff, 0x77, 0x90, 0xd9, 0x2b, 0xbd, 0x59, 0x2a, + 0x33, 0xe4, 0x6e, 0x9b, 0xfc, 0x1c, 0x73, 0x3f, + 0x5e, 0x1e, 0xe3, 0xa4, 0xc2, 0x17, 0x03, 0x01, + 0x00, 0x20, 0x05, 0xdf, 0x2d, 0x9b, 0x29, 0x7f, + 0x97, 0xcd, 0x49, 0x04, 0x53, 0x22, 0x1a, 0xa1, + 0xa1, 0xe6, 0x38, 0x3a, 0x56, 0x37, 0x1f, 0xd8, + 0x3a, 0x12, 0x2c, 0xf0, 0xeb, 0x61, 0x35, 0x76, + 0xe5, 0xf0, 0x15, 0x03, 0x01, 0x00, 0x20, 0xa5, + 0x56, 0xb5, 0x49, 0x4b, 0xc2, 0xd4, 0x4c, 0xf6, + 0x95, 0x15, 0x7d, 0x41, 0x1d, 0x5c, 0x00, 0x0e, + 0x20, 0xb1, 0x0a, 0xbc, 0xc9, 0x2a, 0x09, 0x17, + 0xb4, 0xaa, 0x1c, 0x79, 0xda, 0x79, 0x27, + }, +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages.go new file mode 100644 index 000000000..8197391e3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages.go @@ -0,0 +1,1304 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import "bytes" + +type clientHelloMsg struct { + raw []byte + vers uint16 + random []byte + sessionId []byte + cipherSuites []uint16 + compressionMethods []uint8 + nextProtoNeg bool + serverName string + ocspStapling bool + supportedCurves []uint16 + supportedPoints []uint8 + ticketSupported bool + sessionTicket []uint8 + signatureAndHashes []signatureAndHash +} + +func (m *clientHelloMsg) equal(i interface{}) bool { + m1, ok := i.(*clientHelloMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.vers == m1.vers && + bytes.Equal(m.random, m1.random) && + bytes.Equal(m.sessionId, m1.sessionId) && + eqUint16s(m.cipherSuites, m1.cipherSuites) && + bytes.Equal(m.compressionMethods, m1.compressionMethods) && + m.nextProtoNeg == m1.nextProtoNeg && + m.serverName == m1.serverName && + m.ocspStapling == m1.ocspStapling && + eqUint16s(m.supportedCurves, m1.supportedCurves) && + bytes.Equal(m.supportedPoints, m1.supportedPoints) && + m.ticketSupported == m1.ticketSupported && + bytes.Equal(m.sessionTicket, m1.sessionTicket) && + eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes) +} + +func (m *clientHelloMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + + length := 2 + 32 + 1 + len(m.sessionId) + 2 + len(m.cipherSuites)*2 + 1 + len(m.compressionMethods) + numExtensions := 0 + extensionsLength := 0 + if m.nextProtoNeg { + numExtensions++ + } + if m.ocspStapling { + extensionsLength += 1 + 2 + 2 + numExtensions++ + } + if len(m.serverName) > 0 { + extensionsLength += 5 + len(m.serverName) + numExtensions++ + } + if len(m.supportedCurves) > 0 { + extensionsLength += 2 + 2*len(m.supportedCurves) + numExtensions++ + } + if len(m.supportedPoints) > 0 { + extensionsLength += 1 + len(m.supportedPoints) + numExtensions++ + } + if m.ticketSupported { + extensionsLength += len(m.sessionTicket) + numExtensions++ + } + if len(m.signatureAndHashes) > 0 { + extensionsLength += 2 + 2*len(m.signatureAndHashes) + numExtensions++ + } + if numExtensions > 0 { + extensionsLength += 4 * numExtensions + length += 2 + extensionsLength + } + + x := make([]byte, 4+length) + x[0] = typeClientHello + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + x[4] = uint8(m.vers >> 8) + x[5] = uint8(m.vers) + copy(x[6:38], m.random) + x[38] = uint8(len(m.sessionId)) + copy(x[39:39+len(m.sessionId)], m.sessionId) + y := x[39+len(m.sessionId):] + y[0] = uint8(len(m.cipherSuites) >> 7) + y[1] = uint8(len(m.cipherSuites) << 1) + for i, suite := range m.cipherSuites { + y[2+i*2] = uint8(suite >> 8) + y[3+i*2] = uint8(suite) + } + z := y[2+len(m.cipherSuites)*2:] + z[0] = uint8(len(m.compressionMethods)) + copy(z[1:], m.compressionMethods) + + z = z[1+len(m.compressionMethods):] + if numExtensions > 0 { + z[0] = byte(extensionsLength >> 8) + z[1] = byte(extensionsLength) + z = z[2:] + } + if m.nextProtoNeg { + z[0] = byte(extensionNextProtoNeg >> 8) + z[1] = byte(extensionNextProtoNeg) + // The length is always 0 + z = z[4:] + } + if len(m.serverName) > 0 { + z[0] = byte(extensionServerName >> 8) + z[1] = byte(extensionServerName) + l := len(m.serverName) + 5 + z[2] = byte(l >> 8) + z[3] = byte(l) + z = z[4:] + + // RFC 3546, section 3.1 + // + // struct { + // NameType name_type; + // select (name_type) { + // case host_name: HostName; + // } name; + // } ServerName; + // + // enum { + // host_name(0), (255) + // } NameType; + // + // opaque HostName<1..2^16-1>; + // + // struct { + // ServerName server_name_list<1..2^16-1> + // } ServerNameList; + + z[0] = byte((len(m.serverName) + 3) >> 8) + z[1] = byte(len(m.serverName) + 3) + z[3] = byte(len(m.serverName) >> 8) + z[4] = byte(len(m.serverName)) + copy(z[5:], []byte(m.serverName)) + z = z[l:] + } + if m.ocspStapling { + // RFC 4366, section 3.6 + z[0] = byte(extensionStatusRequest >> 8) + z[1] = byte(extensionStatusRequest) + z[2] = 0 + z[3] = 5 + z[4] = 1 // OCSP type + // Two zero valued uint16s for the two lengths. + z = z[9:] + } + if len(m.supportedCurves) > 0 { + // http://tools.ietf.org/html/rfc4492#section-5.5.1 + z[0] = byte(extensionSupportedCurves >> 8) + z[1] = byte(extensionSupportedCurves) + l := 2 + 2*len(m.supportedCurves) + z[2] = byte(l >> 8) + z[3] = byte(l) + l -= 2 + z[4] = byte(l >> 8) + z[5] = byte(l) + z = z[6:] + for _, curve := range m.supportedCurves { + z[0] = byte(curve >> 8) + z[1] = byte(curve) + z = z[2:] + } + } + if len(m.supportedPoints) > 0 { + // http://tools.ietf.org/html/rfc4492#section-5.5.2 + z[0] = byte(extensionSupportedPoints >> 8) + z[1] = byte(extensionSupportedPoints) + l := 1 + len(m.supportedPoints) + z[2] = byte(l >> 8) + z[3] = byte(l) + l-- + z[4] = byte(l) + z = z[5:] + for _, pointFormat := range m.supportedPoints { + z[0] = byte(pointFormat) + z = z[1:] + } + } + if m.ticketSupported { + // http://tools.ietf.org/html/rfc5077#section-3.2 + z[0] = byte(extensionSessionTicket >> 8) + z[1] = byte(extensionSessionTicket) + l := len(m.sessionTicket) + z[2] = byte(l >> 8) + z[3] = byte(l) + z = z[4:] + copy(z, m.sessionTicket) + z = z[len(m.sessionTicket):] + } + if len(m.signatureAndHashes) > 0 { + // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 + z[0] = byte(extensionSignatureAlgorithms >> 8) + z[1] = byte(extensionSignatureAlgorithms) + l := 2 + 2*len(m.signatureAndHashes) + z[2] = byte(l >> 8) + z[3] = byte(l) + z = z[4:] + + l -= 2 + z[0] = byte(l >> 8) + z[1] = byte(l) + z = z[2:] + for _, sigAndHash := range m.signatureAndHashes { + z[0] = sigAndHash.hash + z[1] = sigAndHash.signature + z = z[2:] + } + } + + m.raw = x + + return x +} + +func (m *clientHelloMsg) unmarshal(data []byte) bool { + if len(data) < 42 { + return false + } + m.raw = data + m.vers = uint16(data[4])<<8 | uint16(data[5]) + m.random = data[6:38] + sessionIdLen := int(data[38]) + if sessionIdLen > 32 || len(data) < 39+sessionIdLen { + return false + } + m.sessionId = data[39 : 39+sessionIdLen] + data = data[39+sessionIdLen:] + if len(data) < 2 { + return false + } + // cipherSuiteLen is the number of bytes of cipher suite numbers. Since + // they are uint16s, the number must be even. + cipherSuiteLen := int(data[0])<<8 | int(data[1]) + if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen { + return false + } + numCipherSuites := cipherSuiteLen / 2 + m.cipherSuites = make([]uint16, numCipherSuites) + for i := 0; i < numCipherSuites; i++ { + m.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i]) + } + data = data[2+cipherSuiteLen:] + if len(data) < 1 { + return false + } + compressionMethodsLen := int(data[0]) + if len(data) < 1+compressionMethodsLen { + return false + } + m.compressionMethods = data[1 : 1+compressionMethodsLen] + + data = data[1+compressionMethodsLen:] + + m.nextProtoNeg = false + m.serverName = "" + m.ocspStapling = false + m.ticketSupported = false + m.sessionTicket = nil + m.signatureAndHashes = nil + + if len(data) == 0 { + // ClientHello is optionally followed by extension data + return true + } + if len(data) < 2 { + return false + } + + extensionsLength := int(data[0])<<8 | int(data[1]) + data = data[2:] + if extensionsLength != len(data) { + return false + } + + for len(data) != 0 { + if len(data) < 4 { + return false + } + extension := uint16(data[0])<<8 | uint16(data[1]) + length := int(data[2])<<8 | int(data[3]) + data = data[4:] + if len(data) < length { + return false + } + + switch extension { + case extensionServerName: + if length < 2 { + return false + } + numNames := int(data[0])<<8 | int(data[1]) + d := data[2:] + for i := 0; i < numNames; i++ { + if len(d) < 3 { + return false + } + nameType := d[0] + nameLen := int(d[1])<<8 | int(d[2]) + d = d[3:] + if len(d) < nameLen { + return false + } + if nameType == 0 { + m.serverName = string(d[0:nameLen]) + break + } + d = d[nameLen:] + } + case extensionNextProtoNeg: + if length > 0 { + return false + } + m.nextProtoNeg = true + case extensionStatusRequest: + m.ocspStapling = length > 0 && data[0] == statusTypeOCSP + case extensionSupportedCurves: + // http://tools.ietf.org/html/rfc4492#section-5.5.1 + if length < 2 { + return false + } + l := int(data[0])<<8 | int(data[1]) + if l%2 == 1 || length != l+2 { + return false + } + numCurves := l / 2 + m.supportedCurves = make([]uint16, numCurves) + d := data[2:] + for i := 0; i < numCurves; i++ { + m.supportedCurves[i] = uint16(d[0])<<8 | uint16(d[1]) + d = d[2:] + } + case extensionSupportedPoints: + // http://tools.ietf.org/html/rfc4492#section-5.5.2 + if length < 1 { + return false + } + l := int(data[0]) + if length != l+1 { + return false + } + m.supportedPoints = make([]uint8, l) + copy(m.supportedPoints, data[1:]) + case extensionSessionTicket: + // http://tools.ietf.org/html/rfc5077#section-3.2 + m.ticketSupported = true + m.sessionTicket = data[:length] + case extensionSignatureAlgorithms: + // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 + if length < 2 || length&1 != 0 { + return false + } + l := int(data[0])<<8 | int(data[1]) + if l != length-2 { + return false + } + n := l / 2 + d := data[2:] + m.signatureAndHashes = make([]signatureAndHash, n) + for i := range m.signatureAndHashes { + m.signatureAndHashes[i].hash = d[0] + m.signatureAndHashes[i].signature = d[1] + d = d[2:] + } + } + data = data[length:] + } + + return true +} + +type serverHelloMsg struct { + raw []byte + vers uint16 + random []byte + sessionId []byte + cipherSuite uint16 + compressionMethod uint8 + nextProtoNeg bool + nextProtos []string + ocspStapling bool + ticketSupported bool +} + +func (m *serverHelloMsg) equal(i interface{}) bool { + m1, ok := i.(*serverHelloMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.vers == m1.vers && + bytes.Equal(m.random, m1.random) && + bytes.Equal(m.sessionId, m1.sessionId) && + m.cipherSuite == m1.cipherSuite && + m.compressionMethod == m1.compressionMethod && + m.nextProtoNeg == m1.nextProtoNeg && + eqStrings(m.nextProtos, m1.nextProtos) && + m.ocspStapling == m1.ocspStapling && + m.ticketSupported == m1.ticketSupported +} + +func (m *serverHelloMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + + length := 38 + len(m.sessionId) + numExtensions := 0 + extensionsLength := 0 + + nextProtoLen := 0 + if m.nextProtoNeg { + numExtensions++ + for _, v := range m.nextProtos { + nextProtoLen += len(v) + } + nextProtoLen += len(m.nextProtos) + extensionsLength += nextProtoLen + } + if m.ocspStapling { + numExtensions++ + } + if m.ticketSupported { + numExtensions++ + } + if numExtensions > 0 { + extensionsLength += 4 * numExtensions + length += 2 + extensionsLength + } + + x := make([]byte, 4+length) + x[0] = typeServerHello + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + x[4] = uint8(m.vers >> 8) + x[5] = uint8(m.vers) + copy(x[6:38], m.random) + x[38] = uint8(len(m.sessionId)) + copy(x[39:39+len(m.sessionId)], m.sessionId) + z := x[39+len(m.sessionId):] + z[0] = uint8(m.cipherSuite >> 8) + z[1] = uint8(m.cipherSuite) + z[2] = uint8(m.compressionMethod) + + z = z[3:] + if numExtensions > 0 { + z[0] = byte(extensionsLength >> 8) + z[1] = byte(extensionsLength) + z = z[2:] + } + if m.nextProtoNeg { + z[0] = byte(extensionNextProtoNeg >> 8) + z[1] = byte(extensionNextProtoNeg) + z[2] = byte(nextProtoLen >> 8) + z[3] = byte(nextProtoLen) + z = z[4:] + + for _, v := range m.nextProtos { + l := len(v) + if l > 255 { + l = 255 + } + z[0] = byte(l) + copy(z[1:], []byte(v[0:l])) + z = z[1+l:] + } + } + if m.ocspStapling { + z[0] = byte(extensionStatusRequest >> 8) + z[1] = byte(extensionStatusRequest) + z = z[4:] + } + if m.ticketSupported { + z[0] = byte(extensionSessionTicket >> 8) + z[1] = byte(extensionSessionTicket) + z = z[4:] + } + + m.raw = x + + return x +} + +func (m *serverHelloMsg) unmarshal(data []byte) bool { + if len(data) < 42 { + return false + } + m.raw = data + m.vers = uint16(data[4])<<8 | uint16(data[5]) + m.random = data[6:38] + sessionIdLen := int(data[38]) + if sessionIdLen > 32 || len(data) < 39+sessionIdLen { + return false + } + m.sessionId = data[39 : 39+sessionIdLen] + data = data[39+sessionIdLen:] + if len(data) < 3 { + return false + } + m.cipherSuite = uint16(data[0])<<8 | uint16(data[1]) + m.compressionMethod = data[2] + data = data[3:] + + m.nextProtoNeg = false + m.nextProtos = nil + m.ocspStapling = false + m.ticketSupported = false + + if len(data) == 0 { + // ServerHello is optionally followed by extension data + return true + } + if len(data) < 2 { + return false + } + + extensionsLength := int(data[0])<<8 | int(data[1]) + data = data[2:] + if len(data) != extensionsLength { + return false + } + + for len(data) != 0 { + if len(data) < 4 { + return false + } + extension := uint16(data[0])<<8 | uint16(data[1]) + length := int(data[2])<<8 | int(data[3]) + data = data[4:] + if len(data) < length { + return false + } + + switch extension { + case extensionNextProtoNeg: + m.nextProtoNeg = true + d := data[:length] + for len(d) > 0 { + l := int(d[0]) + d = d[1:] + if l == 0 || l > len(d) { + return false + } + m.nextProtos = append(m.nextProtos, string(d[:l])) + d = d[l:] + } + case extensionStatusRequest: + if length > 0 { + return false + } + m.ocspStapling = true + case extensionSessionTicket: + if length > 0 { + return false + } + m.ticketSupported = true + } + data = data[length:] + } + + return true +} + +type certificateMsg struct { + raw []byte + certificates [][]byte +} + +func (m *certificateMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + eqByteSlices(m.certificates, m1.certificates) +} + +func (m *certificateMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + var i int + for _, slice := range m.certificates { + i += len(slice) + } + + length := 3 + 3*len(m.certificates) + i + x = make([]byte, 4+length) + x[0] = typeCertificate + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + + certificateOctets := length - 3 + x[4] = uint8(certificateOctets >> 16) + x[5] = uint8(certificateOctets >> 8) + x[6] = uint8(certificateOctets) + + y := x[7:] + for _, slice := range m.certificates { + y[0] = uint8(len(slice) >> 16) + y[1] = uint8(len(slice) >> 8) + y[2] = uint8(len(slice)) + copy(y[3:], slice) + y = y[3+len(slice):] + } + + m.raw = x + return +} + +func (m *certificateMsg) unmarshal(data []byte) bool { + if len(data) < 7 { + return false + } + + m.raw = data + certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6]) + if uint32(len(data)) != certsLen+7 { + return false + } + + numCerts := 0 + d := data[7:] + for certsLen > 0 { + if len(d) < 4 { + return false + } + certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) + if uint32(len(d)) < 3+certLen { + return false + } + d = d[3+certLen:] + certsLen -= 3 + certLen + numCerts++ + } + + m.certificates = make([][]byte, numCerts) + d = data[7:] + for i := 0; i < numCerts; i++ { + certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) + m.certificates[i] = d[3 : 3+certLen] + d = d[3+certLen:] + } + + return true +} + +type serverKeyExchangeMsg struct { + raw []byte + key []byte +} + +func (m *serverKeyExchangeMsg) equal(i interface{}) bool { + m1, ok := i.(*serverKeyExchangeMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.key, m1.key) +} + +func (m *serverKeyExchangeMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + length := len(m.key) + x := make([]byte, length+4) + x[0] = typeServerKeyExchange + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + copy(x[4:], m.key) + + m.raw = x + return x +} + +func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 4 { + return false + } + m.key = data[4:] + return true +} + +type certificateStatusMsg struct { + raw []byte + statusType uint8 + response []byte +} + +func (m *certificateStatusMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateStatusMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.statusType == m1.statusType && + bytes.Equal(m.response, m1.response) +} + +func (m *certificateStatusMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + + var x []byte + if m.statusType == statusTypeOCSP { + x = make([]byte, 4+4+len(m.response)) + x[0] = typeCertificateStatus + l := len(m.response) + 4 + x[1] = byte(l >> 16) + x[2] = byte(l >> 8) + x[3] = byte(l) + x[4] = statusTypeOCSP + + l -= 4 + x[5] = byte(l >> 16) + x[6] = byte(l >> 8) + x[7] = byte(l) + copy(x[8:], m.response) + } else { + x = []byte{typeCertificateStatus, 0, 0, 1, m.statusType} + } + + m.raw = x + return x +} + +func (m *certificateStatusMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 5 { + return false + } + m.statusType = data[4] + + m.response = nil + if m.statusType == statusTypeOCSP { + if len(data) < 8 { + return false + } + respLen := uint32(data[5])<<16 | uint32(data[6])<<8 | uint32(data[7]) + if uint32(len(data)) != 4+4+respLen { + return false + } + m.response = data[8:] + } + return true +} + +type serverHelloDoneMsg struct{} + +func (m *serverHelloDoneMsg) equal(i interface{}) bool { + _, ok := i.(*serverHelloDoneMsg) + return ok +} + +func (m *serverHelloDoneMsg) marshal() []byte { + x := make([]byte, 4) + x[0] = typeServerHelloDone + return x +} + +func (m *serverHelloDoneMsg) unmarshal(data []byte) bool { + return len(data) == 4 +} + +type clientKeyExchangeMsg struct { + raw []byte + ciphertext []byte +} + +func (m *clientKeyExchangeMsg) equal(i interface{}) bool { + m1, ok := i.(*clientKeyExchangeMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.ciphertext, m1.ciphertext) +} + +func (m *clientKeyExchangeMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + length := len(m.ciphertext) + x := make([]byte, length+4) + x[0] = typeClientKeyExchange + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + copy(x[4:], m.ciphertext) + + m.raw = x + return x +} + +func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 4 { + return false + } + l := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) + if l != len(data)-4 { + return false + } + m.ciphertext = data[4:] + return true +} + +type finishedMsg struct { + raw []byte + verifyData []byte +} + +func (m *finishedMsg) equal(i interface{}) bool { + m1, ok := i.(*finishedMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.verifyData, m1.verifyData) +} + +func (m *finishedMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + x = make([]byte, 4+len(m.verifyData)) + x[0] = typeFinished + x[3] = byte(len(m.verifyData)) + copy(x[4:], m.verifyData) + m.raw = x + return +} + +func (m *finishedMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 4 { + return false + } + m.verifyData = data[4:] + return true +} + +type nextProtoMsg struct { + raw []byte + proto string +} + +func (m *nextProtoMsg) equal(i interface{}) bool { + m1, ok := i.(*nextProtoMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.proto == m1.proto +} + +func (m *nextProtoMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + l := len(m.proto) + if l > 255 { + l = 255 + } + + padding := 32 - (l+2)%32 + length := l + padding + 2 + x := make([]byte, length+4) + x[0] = typeNextProtocol + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + + y := x[4:] + y[0] = byte(l) + copy(y[1:], []byte(m.proto[0:l])) + y = y[1+l:] + y[0] = byte(padding) + + m.raw = x + + return x +} + +func (m *nextProtoMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 5 { + return false + } + data = data[4:] + protoLen := int(data[0]) + data = data[1:] + if len(data) < protoLen { + return false + } + m.proto = string(data[0:protoLen]) + data = data[protoLen:] + + if len(data) < 1 { + return false + } + paddingLen := int(data[0]) + data = data[1:] + if len(data) != paddingLen { + return false + } + + return true +} + +type certificateRequestMsg struct { + raw []byte + // hasSignatureAndHash indicates whether this message includes a list + // of signature and hash functions. This change was introduced with TLS + // 1.2. + hasSignatureAndHash bool + + certificateTypes []byte + signatureAndHashes []signatureAndHash + certificateAuthorities [][]byte +} + +func (m *certificateRequestMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateRequestMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.certificateTypes, m1.certificateTypes) && + eqByteSlices(m.certificateAuthorities, m1.certificateAuthorities) && + eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes) +} + +func (m *certificateRequestMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + // See http://tools.ietf.org/html/rfc4346#section-7.4.4 + length := 1 + len(m.certificateTypes) + 2 + casLength := 0 + for _, ca := range m.certificateAuthorities { + casLength += 2 + len(ca) + } + length += casLength + + if m.hasSignatureAndHash { + length += 2 + 2*len(m.signatureAndHashes) + } + + x = make([]byte, 4+length) + x[0] = typeCertificateRequest + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + + x[4] = uint8(len(m.certificateTypes)) + + copy(x[5:], m.certificateTypes) + y := x[5+len(m.certificateTypes):] + + if m.hasSignatureAndHash { + n := len(m.signatureAndHashes) * 2 + y[0] = uint8(n >> 8) + y[1] = uint8(n) + y = y[2:] + for _, sigAndHash := range m.signatureAndHashes { + y[0] = sigAndHash.hash + y[1] = sigAndHash.signature + y = y[2:] + } + } + + y[0] = uint8(casLength >> 8) + y[1] = uint8(casLength) + y = y[2:] + for _, ca := range m.certificateAuthorities { + y[0] = uint8(len(ca) >> 8) + y[1] = uint8(len(ca)) + y = y[2:] + copy(y, ca) + y = y[len(ca):] + } + + m.raw = x + return +} + +func (m *certificateRequestMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 5 { + return false + } + + length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) + if uint32(len(data))-4 != length { + return false + } + + numCertTypes := int(data[4]) + data = data[5:] + if numCertTypes == 0 || len(data) <= numCertTypes { + return false + } + + m.certificateTypes = make([]byte, numCertTypes) + if copy(m.certificateTypes, data) != numCertTypes { + return false + } + + data = data[numCertTypes:] + + if m.hasSignatureAndHash { + if len(data) < 2 { + return false + } + sigAndHashLen := uint16(data[0])<<8 | uint16(data[1]) + data = data[2:] + if sigAndHashLen&1 != 0 { + return false + } + if len(data) < int(sigAndHashLen) { + return false + } + numSigAndHash := sigAndHashLen / 2 + m.signatureAndHashes = make([]signatureAndHash, numSigAndHash) + for i := range m.signatureAndHashes { + m.signatureAndHashes[i].hash = data[0] + m.signatureAndHashes[i].signature = data[1] + data = data[2:] + } + } + + if len(data) < 2 { + return false + } + casLength := uint16(data[0])<<8 | uint16(data[1]) + data = data[2:] + if len(data) < int(casLength) { + return false + } + cas := make([]byte, casLength) + copy(cas, data) + data = data[casLength:] + + m.certificateAuthorities = nil + for len(cas) > 0 { + if len(cas) < 2 { + return false + } + caLen := uint16(cas[0])<<8 | uint16(cas[1]) + cas = cas[2:] + + if len(cas) < int(caLen) { + return false + } + + m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen]) + cas = cas[caLen:] + } + if len(data) > 0 { + return false + } + + return true +} + +type certificateVerifyMsg struct { + raw []byte + hasSignatureAndHash bool + signatureAndHash signatureAndHash + signature []byte +} + +func (m *certificateVerifyMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateVerifyMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.hasSignatureAndHash == m1.hasSignatureAndHash && + m.signatureAndHash.hash == m1.signatureAndHash.hash && + m.signatureAndHash.signature == m1.signatureAndHash.signature && + bytes.Equal(m.signature, m1.signature) +} + +func (m *certificateVerifyMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + // See http://tools.ietf.org/html/rfc4346#section-7.4.8 + siglength := len(m.signature) + length := 2 + siglength + if m.hasSignatureAndHash { + length += 2 + } + x = make([]byte, 4+length) + x[0] = typeCertificateVerify + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + y := x[4:] + if m.hasSignatureAndHash { + y[0] = m.signatureAndHash.hash + y[1] = m.signatureAndHash.signature + y = y[2:] + } + y[0] = uint8(siglength >> 8) + y[1] = uint8(siglength) + copy(y[2:], m.signature) + + m.raw = x + + return +} + +func (m *certificateVerifyMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 6 { + return false + } + + length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) + if uint32(len(data))-4 != length { + return false + } + + data = data[4:] + if m.hasSignatureAndHash { + m.signatureAndHash.hash = data[0] + m.signatureAndHash.signature = data[1] + data = data[2:] + } + + if len(data) < 2 { + return false + } + siglength := int(data[0])<<8 + int(data[1]) + data = data[2:] + if len(data) != siglength { + return false + } + + m.signature = data + + return true +} + +type newSessionTicketMsg struct { + raw []byte + ticket []byte +} + +func (m *newSessionTicketMsg) equal(i interface{}) bool { + m1, ok := i.(*newSessionTicketMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.ticket, m1.ticket) +} + +func (m *newSessionTicketMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + // See http://tools.ietf.org/html/rfc5077#section-3.3 + ticketLen := len(m.ticket) + length := 2 + 4 + ticketLen + x = make([]byte, 4+length) + x[0] = typeNewSessionTicket + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + x[8] = uint8(ticketLen >> 8) + x[9] = uint8(ticketLen) + copy(x[10:], m.ticket) + + m.raw = x + + return +} + +func (m *newSessionTicketMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 10 { + return false + } + + length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) + if uint32(len(data))-4 != length { + return false + } + + ticketLen := int(data[8])<<8 + int(data[9]) + if len(data)-10 != ticketLen { + return false + } + + m.ticket = data[10:] + + return true +} + +type helloRequestMsg struct { +} + +func (*helloRequestMsg) marshal() []byte { + return []byte{typeHelloRequest, 0, 0, 0} +} + +func (*helloRequestMsg) unmarshal(data []byte) bool { + return len(data) == 4 +} + +func eqUint16s(x, y []uint16) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + if y[i] != v { + return false + } + } + return true +} + +func eqStrings(x, y []string) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + if y[i] != v { + return false + } + } + return true +} + +func eqByteSlices(x, y [][]byte) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + if !bytes.Equal(v, y[i]) { + return false + } + } + return true +} + +func eqSignatureAndHashes(x, y []signatureAndHash) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + v2 := y[i] + if v.hash != v2.hash || v.signature != v2.signature { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages_test.go new file mode 100644 index 000000000..4f569eeb1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages_test.go @@ -0,0 +1,246 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "math/rand" + "reflect" + "testing" + "testing/quick" +) + +var tests = []interface{}{ + &clientHelloMsg{}, + &serverHelloMsg{}, + &finishedMsg{}, + + &certificateMsg{}, + &certificateRequestMsg{}, + &certificateVerifyMsg{}, + &certificateStatusMsg{}, + &clientKeyExchangeMsg{}, + &nextProtoMsg{}, + &newSessionTicketMsg{}, + &sessionState{}, +} + +type testMessage interface { + marshal() []byte + unmarshal([]byte) bool + equal(interface{}) bool +} + +func TestMarshalUnmarshal(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + + for i, iface := range tests { + ty := reflect.ValueOf(iface).Type() + + n := 100 + if testing.Short() { + n = 5 + } + for j := 0; j < n; j++ { + v, ok := quick.Value(ty, rand) + if !ok { + t.Errorf("#%d: failed to create value", i) + break + } + + m1 := v.Interface().(testMessage) + marshaled := m1.marshal() + m2 := iface.(testMessage) + if !m2.unmarshal(marshaled) { + t.Errorf("#%d failed to unmarshal %#v %x", i, m1, marshaled) + break + } + m2.marshal() // to fill any marshal cache in the message + + if !m1.equal(m2) { + t.Errorf("#%d got:%#v want:%#v %x", i, m2, m1, marshaled) + break + } + + if i >= 3 { + // The first three message types (ClientHello, + // ServerHello and Finished) are allowed to + // have parsable prefixes because the extension + // data is optional and the length of the + // Finished varies across versions. + for j := 0; j < len(marshaled); j++ { + if m2.unmarshal(marshaled[0:j]) { + t.Errorf("#%d unmarshaled a prefix of length %d of %#v", i, j, m1) + break + } + } + } + } + } +} + +func TestFuzz(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + for _, iface := range tests { + m := iface.(testMessage) + + for j := 0; j < 1000; j++ { + len := rand.Intn(100) + bytes := randomBytes(len, rand) + // This just looks for crashes due to bounds errors etc. + m.unmarshal(bytes) + } + } +} + +func randomBytes(n int, rand *rand.Rand) []byte { + r := make([]byte, n) + for i := 0; i < n; i++ { + r[i] = byte(rand.Int31()) + } + return r +} + +func randomString(n int, rand *rand.Rand) string { + b := randomBytes(n, rand) + return string(b) +} + +func (*clientHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &clientHelloMsg{} + m.vers = uint16(rand.Intn(65536)) + m.random = randomBytes(32, rand) + m.sessionId = randomBytes(rand.Intn(32), rand) + m.cipherSuites = make([]uint16, rand.Intn(63)+1) + for i := 0; i < len(m.cipherSuites); i++ { + m.cipherSuites[i] = uint16(rand.Int31()) + } + m.compressionMethods = randomBytes(rand.Intn(63)+1, rand) + if rand.Intn(10) > 5 { + m.nextProtoNeg = true + } + if rand.Intn(10) > 5 { + m.serverName = randomString(rand.Intn(255), rand) + } + m.ocspStapling = rand.Intn(10) > 5 + m.supportedPoints = randomBytes(rand.Intn(5)+1, rand) + m.supportedCurves = make([]uint16, rand.Intn(5)+1) + for i := range m.supportedCurves { + m.supportedCurves[i] = uint16(rand.Intn(30000)) + } + if rand.Intn(10) > 5 { + m.ticketSupported = true + if rand.Intn(10) > 5 { + m.sessionTicket = randomBytes(rand.Intn(300), rand) + } + } + if rand.Intn(10) > 5 { + m.signatureAndHashes = supportedSKXSignatureAlgorithms + } + + return reflect.ValueOf(m) +} + +func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &serverHelloMsg{} + m.vers = uint16(rand.Intn(65536)) + m.random = randomBytes(32, rand) + m.sessionId = randomBytes(rand.Intn(32), rand) + m.cipherSuite = uint16(rand.Int31()) + m.compressionMethod = uint8(rand.Intn(256)) + + if rand.Intn(10) > 5 { + m.nextProtoNeg = true + + n := rand.Intn(10) + m.nextProtos = make([]string, n) + for i := 0; i < n; i++ { + m.nextProtos[i] = randomString(20, rand) + } + } + + if rand.Intn(10) > 5 { + m.ocspStapling = true + } + if rand.Intn(10) > 5 { + m.ticketSupported = true + } + + return reflect.ValueOf(m) +} + +func (*certificateMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateMsg{} + numCerts := rand.Intn(20) + m.certificates = make([][]byte, numCerts) + for i := 0; i < numCerts; i++ { + m.certificates[i] = randomBytes(rand.Intn(10)+1, rand) + } + return reflect.ValueOf(m) +} + +func (*certificateRequestMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateRequestMsg{} + m.certificateTypes = randomBytes(rand.Intn(5)+1, rand) + numCAs := rand.Intn(100) + m.certificateAuthorities = make([][]byte, numCAs) + for i := 0; i < numCAs; i++ { + m.certificateAuthorities[i] = randomBytes(rand.Intn(15)+1, rand) + } + return reflect.ValueOf(m) +} + +func (*certificateVerifyMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateVerifyMsg{} + m.signature = randomBytes(rand.Intn(15)+1, rand) + return reflect.ValueOf(m) +} + +func (*certificateStatusMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateStatusMsg{} + if rand.Intn(10) > 5 { + m.statusType = statusTypeOCSP + m.response = randomBytes(rand.Intn(10)+1, rand) + } else { + m.statusType = 42 + } + return reflect.ValueOf(m) +} + +func (*clientKeyExchangeMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &clientKeyExchangeMsg{} + m.ciphertext = randomBytes(rand.Intn(1000)+1, rand) + return reflect.ValueOf(m) +} + +func (*finishedMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &finishedMsg{} + m.verifyData = randomBytes(12, rand) + return reflect.ValueOf(m) +} + +func (*nextProtoMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &nextProtoMsg{} + m.proto = randomString(rand.Intn(255), rand) + return reflect.ValueOf(m) +} + +func (*newSessionTicketMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &newSessionTicketMsg{} + m.ticket = randomBytes(rand.Intn(4), rand) + return reflect.ValueOf(m) +} + +func (*sessionState) Generate(rand *rand.Rand, size int) reflect.Value { + s := &sessionState{} + s.vers = uint16(rand.Intn(10000)) + s.cipherSuite = uint16(rand.Intn(10000)) + s.masterSecret = randomBytes(rand.Intn(100), rand) + numCerts := rand.Intn(20) + s.certificates = make([][]byte, numCerts) + for i := 0; i < numCerts; i++ { + s.certificates[i] = randomBytes(rand.Intn(10)+1, rand) + } + return reflect.ValueOf(s) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server.go new file mode 100644 index 000000000..c9ccf675c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server.go @@ -0,0 +1,638 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/subtle" + "crypto/x509" + "encoding/asn1" + "errors" + "io" +) + +// serverHandshakeState contains details of a server handshake in progress. +// It's discarded once the handshake has completed. +type serverHandshakeState struct { + c *Conn + clientHello *clientHelloMsg + hello *serverHelloMsg + suite *cipherSuite + ellipticOk bool + ecdsaOk bool + sessionState *sessionState + finishedHash finishedHash + masterSecret []byte + certsFromClient [][]byte + cert *Certificate +} + +// serverHandshake performs a TLS handshake as a server. +func (c *Conn) serverHandshake() error { + config := c.config + + // If this is the first server handshake, we generate a random key to + // encrypt the tickets with. + config.serverInitOnce.Do(config.serverInit) + + hs := serverHandshakeState{ + c: c, + } + isResume, err := hs.readClientHello() + if err != nil { + return err + } + + // For an overview of TLS handshaking, see https://tools.ietf.org/html/rfc5246#section-7.3 + if isResume { + // The client has included a session ticket and so we do an abbreviated handshake. + if err := hs.doResumeHandshake(); err != nil { + return err + } + if err := hs.establishKeys(); err != nil { + return err + } + if err := hs.sendFinished(); err != nil { + return err + } + if err := hs.readFinished(); err != nil { + return err + } + c.didResume = true + } else { + // The client didn't include a session ticket, or it wasn't + // valid so we do a full handshake. + if err := hs.doFullHandshake(); err != nil { + return err + } + if err := hs.establishKeys(); err != nil { + return err + } + if err := hs.readFinished(); err != nil { + return err + } + if err := hs.sendSessionTicket(); err != nil { + return err + } + if err := hs.sendFinished(); err != nil { + return err + } + } + c.handshakeComplete = true + + return nil +} + +// readClientHello reads a ClientHello message from the client and decides +// whether we will perform session resumption. +func (hs *serverHandshakeState) readClientHello() (isResume bool, err error) { + config := hs.c.config + c := hs.c + + msg, err := c.readHandshake() + if err != nil { + return false, err + } + var ok bool + hs.clientHello, ok = msg.(*clientHelloMsg) + if !ok { + return false, c.sendAlert(alertUnexpectedMessage) + } + c.vers, ok = config.mutualVersion(hs.clientHello.vers) + if !ok { + return false, c.sendAlert(alertProtocolVersion) + } + c.haveVers = true + + hs.finishedHash = newFinishedHash(c.vers) + hs.finishedHash.Write(hs.clientHello.marshal()) + + hs.hello = new(serverHelloMsg) + + supportedCurve := false +Curves: + for _, curve := range hs.clientHello.supportedCurves { + switch curve { + case curveP256, curveP384, curveP521: + supportedCurve = true + break Curves + } + } + + supportedPointFormat := false + for _, pointFormat := range hs.clientHello.supportedPoints { + if pointFormat == pointFormatUncompressed { + supportedPointFormat = true + break + } + } + hs.ellipticOk = supportedCurve && supportedPointFormat + + foundCompression := false + // We only support null compression, so check that the client offered it. + for _, compression := range hs.clientHello.compressionMethods { + if compression == compressionNone { + foundCompression = true + break + } + } + + if !foundCompression { + return false, c.sendAlert(alertHandshakeFailure) + } + + hs.hello.vers = c.vers + t := uint32(config.time().Unix()) + hs.hello.random = make([]byte, 32) + hs.hello.random[0] = byte(t >> 24) + hs.hello.random[1] = byte(t >> 16) + hs.hello.random[2] = byte(t >> 8) + hs.hello.random[3] = byte(t) + _, err = io.ReadFull(config.rand(), hs.hello.random[4:]) + if err != nil { + return false, c.sendAlert(alertInternalError) + } + hs.hello.compressionMethod = compressionNone + if len(hs.clientHello.serverName) > 0 { + c.serverName = hs.clientHello.serverName + } + // Although sending an empty NPN extension is reasonable, Firefox has + // had a bug around this. Best to send nothing at all if + // config.NextProtos is empty. See + // https://code.google.com/p/go/issues/detail?id=5445. + if hs.clientHello.nextProtoNeg && len(config.NextProtos) > 0 { + hs.hello.nextProtoNeg = true + hs.hello.nextProtos = config.NextProtos + } + + if len(config.Certificates) == 0 { + return false, c.sendAlert(alertInternalError) + } + hs.cert = &config.Certificates[0] + if len(hs.clientHello.serverName) > 0 { + hs.cert = config.getCertificateForName(hs.clientHello.serverName) + } + + _, hs.ecdsaOk = hs.cert.PrivateKey.(*ecdsa.PrivateKey) + + if hs.checkForResumption() { + return true, nil + } + + var preferenceList, supportedList []uint16 + if c.config.PreferServerCipherSuites { + preferenceList = c.config.cipherSuites() + supportedList = hs.clientHello.cipherSuites + } else { + preferenceList = hs.clientHello.cipherSuites + supportedList = c.config.cipherSuites() + } + + for _, id := range preferenceList { + if hs.suite = c.tryCipherSuite(id, supportedList, c.vers, hs.ellipticOk, hs.ecdsaOk); hs.suite != nil { + break + } + } + + if hs.suite == nil { + return false, c.sendAlert(alertHandshakeFailure) + } + + return false, nil +} + +// checkForResumption returns true if we should perform resumption on this connection. +func (hs *serverHandshakeState) checkForResumption() bool { + c := hs.c + + var ok bool + if hs.sessionState, ok = c.decryptTicket(hs.clientHello.sessionTicket); !ok { + return false + } + + if hs.sessionState.vers > hs.clientHello.vers { + return false + } + if vers, ok := c.config.mutualVersion(hs.sessionState.vers); !ok || vers != hs.sessionState.vers { + return false + } + + cipherSuiteOk := false + // Check that the client is still offering the ciphersuite in the session. + for _, id := range hs.clientHello.cipherSuites { + if id == hs.sessionState.cipherSuite { + cipherSuiteOk = true + break + } + } + if !cipherSuiteOk { + return false + } + + // Check that we also support the ciphersuite from the session. + hs.suite = c.tryCipherSuite(hs.sessionState.cipherSuite, c.config.cipherSuites(), hs.sessionState.vers, hs.ellipticOk, hs.ecdsaOk) + if hs.suite == nil { + return false + } + + sessionHasClientCerts := len(hs.sessionState.certificates) != 0 + needClientCerts := c.config.ClientAuth == RequireAnyClientCert || c.config.ClientAuth == RequireAndVerifyClientCert + if needClientCerts && !sessionHasClientCerts { + return false + } + if sessionHasClientCerts && c.config.ClientAuth == NoClientCert { + return false + } + + return true +} + +func (hs *serverHandshakeState) doResumeHandshake() error { + c := hs.c + + hs.hello.cipherSuite = hs.suite.id + // We echo the client's session ID in the ServerHello to let it know + // that we're doing a resumption. + hs.hello.sessionId = hs.clientHello.sessionId + hs.finishedHash.Write(hs.hello.marshal()) + c.writeRecord(recordTypeHandshake, hs.hello.marshal()) + + if len(hs.sessionState.certificates) > 0 { + if _, err := hs.processCertsFromClient(hs.sessionState.certificates); err != nil { + return err + } + } + + hs.masterSecret = hs.sessionState.masterSecret + + return nil +} + +func (hs *serverHandshakeState) doFullHandshake() error { + config := hs.c.config + c := hs.c + + if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 { + hs.hello.ocspStapling = true + } + + hs.hello.ticketSupported = hs.clientHello.ticketSupported && !config.SessionTicketsDisabled + hs.hello.cipherSuite = hs.suite.id + hs.finishedHash.Write(hs.hello.marshal()) + c.writeRecord(recordTypeHandshake, hs.hello.marshal()) + + certMsg := new(certificateMsg) + certMsg.certificates = hs.cert.Certificate + hs.finishedHash.Write(certMsg.marshal()) + c.writeRecord(recordTypeHandshake, certMsg.marshal()) + + if hs.hello.ocspStapling { + certStatus := new(certificateStatusMsg) + certStatus.statusType = statusTypeOCSP + certStatus.response = hs.cert.OCSPStaple + hs.finishedHash.Write(certStatus.marshal()) + c.writeRecord(recordTypeHandshake, certStatus.marshal()) + } + + keyAgreement := hs.suite.ka(c.vers) + skx, err := keyAgreement.generateServerKeyExchange(config, hs.cert, hs.clientHello, hs.hello) + if err != nil { + c.sendAlert(alertHandshakeFailure) + return err + } + if skx != nil { + hs.finishedHash.Write(skx.marshal()) + c.writeRecord(recordTypeHandshake, skx.marshal()) + } + + if config.ClientAuth >= RequestClientCert { + // Request a client certificate + certReq := new(certificateRequestMsg) + certReq.certificateTypes = []byte{ + byte(certTypeRSASign), + byte(certTypeECDSASign), + } + if c.vers >= VersionTLS12 { + certReq.hasSignatureAndHash = true + certReq.signatureAndHashes = supportedClientCertSignatureAlgorithms + } + + // An empty list of certificateAuthorities signals to + // the client that it may send any certificate in response + // to our request. When we know the CAs we trust, then + // we can send them down, so that the client can choose + // an appropriate certificate to give to us. + if config.ClientCAs != nil { + certReq.certificateAuthorities = config.ClientCAs.Subjects() + } + hs.finishedHash.Write(certReq.marshal()) + c.writeRecord(recordTypeHandshake, certReq.marshal()) + } + + helloDone := new(serverHelloDoneMsg) + hs.finishedHash.Write(helloDone.marshal()) + c.writeRecord(recordTypeHandshake, helloDone.marshal()) + + var pub crypto.PublicKey // public key for client auth, if any + + msg, err := c.readHandshake() + if err != nil { + return err + } + + var ok bool + // If we requested a client certificate, then the client must send a + // certificate message, even if it's empty. + if config.ClientAuth >= RequestClientCert { + if certMsg, ok = msg.(*certificateMsg); !ok { + return c.sendAlert(alertHandshakeFailure) + } + hs.finishedHash.Write(certMsg.marshal()) + + if len(certMsg.certificates) == 0 { + // The client didn't actually send a certificate + switch config.ClientAuth { + case RequireAnyClientCert, RequireAndVerifyClientCert: + c.sendAlert(alertBadCertificate) + return errors.New("tls: client didn't provide a certificate") + } + } + + pub, err = hs.processCertsFromClient(certMsg.certificates) + if err != nil { + return err + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + } + + // Get client key exchange + ckx, ok := msg.(*clientKeyExchangeMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + hs.finishedHash.Write(ckx.marshal()) + + // If we received a client cert in response to our certificate request message, + // the client will send us a certificateVerifyMsg immediately after the + // clientKeyExchangeMsg. This message is a digest of all preceding + // handshake-layer messages that is signed using the private key corresponding + // to the client's certificate. This allows us to verify that the client is in + // possession of the private key of the certificate. + if len(c.peerCertificates) > 0 { + msg, err = c.readHandshake() + if err != nil { + return err + } + certVerify, ok := msg.(*certificateVerifyMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + switch key := pub.(type) { + case *ecdsa.PublicKey: + ecdsaSig := new(ecdsaSignature) + if _, err = asn1.Unmarshal(certVerify.signature, ecdsaSig); err != nil { + break + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + err = errors.New("ECDSA signature contained zero or negative values") + break + } + digest, _, _ := hs.finishedHash.hashForClientCertificate(signatureECDSA) + if !ecdsa.Verify(key, digest, ecdsaSig.R, ecdsaSig.S) { + err = errors.New("ECDSA verification failure") + break + } + case *rsa.PublicKey: + digest, hashFunc, _ := hs.finishedHash.hashForClientCertificate(signatureRSA) + err = rsa.VerifyPKCS1v15(key, hashFunc, digest, certVerify.signature) + } + if err != nil { + c.sendAlert(alertBadCertificate) + return errors.New("could not validate signature of connection nonces: " + err.Error()) + } + + hs.finishedHash.Write(certVerify.marshal()) + } + + preMasterSecret, err := keyAgreement.processClientKeyExchange(config, hs.cert, ckx, c.vers) + if err != nil { + c.sendAlert(alertHandshakeFailure) + return err + } + hs.masterSecret = masterFromPreMasterSecret(c.vers, preMasterSecret, hs.clientHello.random, hs.hello.random) + + return nil +} + +func (hs *serverHandshakeState) establishKeys() error { + c := hs.c + + clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := + keysFromMasterSecret(c.vers, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen) + + var clientCipher, serverCipher interface{} + var clientHash, serverHash macFunction + + if hs.suite.aead == nil { + clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */) + clientHash = hs.suite.mac(c.vers, clientMAC) + serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */) + serverHash = hs.suite.mac(c.vers, serverMAC) + } else { + clientCipher = hs.suite.aead(clientKey, clientIV) + serverCipher = hs.suite.aead(serverKey, serverIV) + } + + c.in.prepareCipherSpec(c.vers, clientCipher, clientHash) + c.out.prepareCipherSpec(c.vers, serverCipher, serverHash) + + return nil +} + +func (hs *serverHandshakeState) readFinished() error { + c := hs.c + + c.readRecord(recordTypeChangeCipherSpec) + if err := c.error(); err != nil { + return err + } + + if hs.hello.nextProtoNeg { + msg, err := c.readHandshake() + if err != nil { + return err + } + nextProto, ok := msg.(*nextProtoMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + hs.finishedHash.Write(nextProto.marshal()) + c.clientProtocol = nextProto.proto + } + + msg, err := c.readHandshake() + if err != nil { + return err + } + clientFinished, ok := msg.(*finishedMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + verify := hs.finishedHash.clientSum(hs.masterSecret) + if len(verify) != len(clientFinished.verifyData) || + subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 { + return c.sendAlert(alertHandshakeFailure) + } + + hs.finishedHash.Write(clientFinished.marshal()) + return nil +} + +func (hs *serverHandshakeState) sendSessionTicket() error { + if !hs.hello.ticketSupported { + return nil + } + + c := hs.c + m := new(newSessionTicketMsg) + + var err error + state := sessionState{ + vers: c.vers, + cipherSuite: hs.suite.id, + masterSecret: hs.masterSecret, + certificates: hs.certsFromClient, + } + m.ticket, err = c.encryptTicket(&state) + if err != nil { + return err + } + + hs.finishedHash.Write(m.marshal()) + c.writeRecord(recordTypeHandshake, m.marshal()) + + return nil +} + +func (hs *serverHandshakeState) sendFinished() error { + c := hs.c + + c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) + + finished := new(finishedMsg) + finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret) + hs.finishedHash.Write(finished.marshal()) + c.writeRecord(recordTypeHandshake, finished.marshal()) + + c.cipherSuite = hs.suite.id + + return nil +} + +// processCertsFromClient takes a chain of client certificates either from a +// Certificates message or from a sessionState and verifies them. It returns +// the public key of the leaf certificate. +func (hs *serverHandshakeState) processCertsFromClient(certificates [][]byte) (crypto.PublicKey, error) { + c := hs.c + + hs.certsFromClient = certificates + certs := make([]*x509.Certificate, len(certificates)) + var err error + for i, asn1Data := range certificates { + if certs[i], err = x509.ParseCertificate(asn1Data); err != nil { + c.sendAlert(alertBadCertificate) + return nil, errors.New("tls: failed to parse client certificate: " + err.Error()) + } + } + + if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 { + opts := x509.VerifyOptions{ + Roots: c.config.ClientCAs, + CurrentTime: c.config.time(), + Intermediates: x509.NewCertPool(), + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + + for _, cert := range certs[1:] { + opts.Intermediates.AddCert(cert) + } + + chains, err := certs[0].Verify(opts) + if err != nil { + c.sendAlert(alertBadCertificate) + return nil, errors.New("tls: failed to verify client's certificate: " + err.Error()) + } + + ok := false + for _, ku := range certs[0].ExtKeyUsage { + if ku == x509.ExtKeyUsageClientAuth { + ok = true + break + } + } + if !ok { + c.sendAlert(alertHandshakeFailure) + return nil, errors.New("tls: client's certificate's extended key usage doesn't permit it to be used for client authentication") + } + + c.verifiedChains = chains + } + + if len(certs) > 0 { + var pub crypto.PublicKey + switch key := certs[0].PublicKey.(type) { + case *ecdsa.PublicKey, *rsa.PublicKey: + pub = key + default: + return nil, c.sendAlert(alertUnsupportedCertificate) + } + c.peerCertificates = certs + return pub, nil + } + + return nil, nil +} + +// tryCipherSuite returns a cipherSuite with the given id if that cipher suite +// is acceptable to use. +func (c *Conn) tryCipherSuite(id uint16, supportedCipherSuites []uint16, version uint16, ellipticOk, ecdsaOk bool) *cipherSuite { + for _, supported := range supportedCipherSuites { + if id == supported { + var candidate *cipherSuite + + for _, s := range cipherSuites { + if s.id == id { + candidate = s + break + } + } + if candidate == nil { + continue + } + // Don't select a ciphersuite which we can't + // support for this client. + if (candidate.flags&suiteECDHE != 0) && !ellipticOk { + continue + } + if (candidate.flags&suiteECDSA != 0) != ecdsaOk { + continue + } + if version < VersionTLS12 && candidate.flags&suiteTLS12 != 0 { + continue + } + return candidate + } + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server_test.go new file mode 100644 index 000000000..c08eba7f1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server_test.go @@ -0,0 +1,3796 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "flag" + "fmt" + "io" + "log" + "math/big" + "net" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type zeroSource struct{} + +func (zeroSource) Read(b []byte) (n int, err error) { + for i := range b { + b[i] = 0 + } + + return len(b), nil +} + +var testConfig *Config + +func init() { + testConfig = new(Config) + testConfig.Time = func() time.Time { return time.Unix(0, 0) } + testConfig.Rand = zeroSource{} + testConfig.Certificates = make([]Certificate, 2) + testConfig.Certificates[0].Certificate = [][]byte{testRSACertificate} + testConfig.Certificates[0].PrivateKey = testRSAPrivateKey + testConfig.Certificates[1].Certificate = [][]byte{testSNICertificate} + testConfig.Certificates[1].PrivateKey = testRSAPrivateKey + testConfig.BuildNameToCertificate() + testConfig.CipherSuites = []uint16{TLS_RSA_WITH_RC4_128_SHA} + testConfig.InsecureSkipVerify = true + testConfig.MinVersion = VersionSSL30 + testConfig.MaxVersion = VersionTLS10 +} + +func testClientHelloFailure(t *testing.T, m handshakeMessage, expected error) { + // Create in-memory network connection, + // send message to server. Should return + // expected error. + c, s := net.Pipe() + go func() { + cli := Client(c, testConfig) + if ch, ok := m.(*clientHelloMsg); ok { + cli.vers = ch.vers + } + cli.writeRecord(recordTypeHandshake, m.marshal()) + c.Close() + }() + err := Server(s, testConfig).Handshake() + s.Close() + if e, ok := err.(*net.OpError); !ok || e.Err != expected { + t.Errorf("Got error: %s; expected: %s", err, expected) + } +} + +func TestSimpleError(t *testing.T) { + testClientHelloFailure(t, &serverHelloDoneMsg{}, alertUnexpectedMessage) +} + +var badProtocolVersions = []uint16{0x0000, 0x0005, 0x0100, 0x0105, 0x0200, 0x0205} + +func TestRejectBadProtocolVersion(t *testing.T) { + for _, v := range badProtocolVersions { + testClientHelloFailure(t, &clientHelloMsg{vers: v}, alertProtocolVersion) + } +} + +func TestNoSuiteOverlap(t *testing.T) { + clientHello := &clientHelloMsg{ + vers: 0x0301, + cipherSuites: []uint16{0xff00}, + compressionMethods: []uint8{0}, + } + testClientHelloFailure(t, clientHello, alertHandshakeFailure) +} + +func TestNoCompressionOverlap(t *testing.T) { + clientHello := &clientHelloMsg{ + vers: 0x0301, + cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, + compressionMethods: []uint8{0xff}, + } + testClientHelloFailure(t, clientHello, alertHandshakeFailure) +} + +func TestTLS12OnlyCipherSuites(t *testing.T) { + // Test that a Server doesn't select a TLS 1.2-only cipher suite when + // the client negotiates TLS 1.1. + var zeros [32]byte + + clientHello := &clientHelloMsg{ + vers: VersionTLS11, + random: zeros[:], + cipherSuites: []uint16{ + // The Server, by default, will use the client's + // preference order. So the GCM cipher suite + // will be selected unless it's excluded because + // of the version in this ClientHello. + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_RSA_WITH_RC4_128_SHA, + }, + compressionMethods: []uint8{compressionNone}, + supportedCurves: []uint16{curveP256, curveP384, curveP521}, + supportedPoints: []uint8{pointFormatUncompressed}, + } + + c, s := net.Pipe() + var reply interface{} + var clientErr error + go func() { + cli := Client(c, testConfig) + cli.vers = clientHello.vers + cli.writeRecord(recordTypeHandshake, clientHello.marshal()) + reply, clientErr = cli.readHandshake() + c.Close() + }() + config := *testConfig + config.CipherSuites = clientHello.cipherSuites + Server(s, &config).Handshake() + s.Close() + if clientErr != nil { + t.Fatal(clientErr) + } + serverHello, ok := reply.(*serverHelloMsg) + if !ok { + t.Fatalf("didn't get ServerHello message in reply. Got %v\n", reply) + } + if s := serverHello.cipherSuite; s != TLS_RSA_WITH_RC4_128_SHA { + t.Fatalf("bad cipher suite from server: %x", s) + } +} + +func TestAlertForwarding(t *testing.T) { + c, s := net.Pipe() + go func() { + Client(c, testConfig).sendAlert(alertUnknownCA) + c.Close() + }() + + err := Server(s, testConfig).Handshake() + s.Close() + if e, ok := err.(*net.OpError); !ok || e.Err != error(alertUnknownCA) { + t.Errorf("Got error: %s; expected: %s", err, error(alertUnknownCA)) + } +} + +func TestClose(t *testing.T) { + c, s := net.Pipe() + go c.Close() + + err := Server(s, testConfig).Handshake() + s.Close() + if err != io.EOF { + t.Errorf("Got error: %s; expected: %s", err, io.EOF) + } +} + +func testHandshake(clientConfig, serverConfig *Config) (state ConnectionState, err error) { + c, s := net.Pipe() + go func() { + cli := Client(c, clientConfig) + cli.Handshake() + c.Close() + }() + server := Server(s, serverConfig) + err = server.Handshake() + if err == nil { + state = server.ConnectionState() + } + s.Close() + return +} + +func TestCipherSuitePreference(t *testing.T) { + serverConfig := &Config{ + CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, + Certificates: testConfig.Certificates, + MaxVersion: VersionTLS11, + } + clientConfig := &Config{ + CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_RC4_128_SHA}, + InsecureSkipVerify: true, + } + state, err := testHandshake(clientConfig, serverConfig) + if err != nil { + t.Fatalf("handshake failed: %s", err) + } + if state.CipherSuite != TLS_RSA_WITH_AES_128_CBC_SHA { + // By default the server should use the client's preference. + t.Fatalf("Client's preference was not used, got %x", state.CipherSuite) + } + + serverConfig.PreferServerCipherSuites = true + state, err = testHandshake(clientConfig, serverConfig) + if err != nil { + t.Fatalf("handshake failed: %s", err) + } + if state.CipherSuite != TLS_RSA_WITH_RC4_128_SHA { + t.Fatalf("Server's preference was not used, got %x", state.CipherSuite) + } +} + +func testServerScript(t *testing.T, name string, serverScript [][]byte, config *Config, peers []*x509.Certificate) { + c, s := net.Pipe() + srv := Server(s, config) + pchan := make(chan []*x509.Certificate, 1) + go func() { + srv.Write([]byte("hello, world\n")) + srv.Close() + s.Close() + st := srv.ConnectionState() + pchan <- st.PeerCertificates + }() + + for i, b := range serverScript { + if i%2 == 0 { + c.Write(b) + continue + } + bb := make([]byte, len(b)) + n, err := io.ReadFull(c, bb) + if err != nil { + t.Fatalf("%s #%d: %s\nRead %d, wanted %d, got %x, wanted %x\n", name, i, err, n, len(bb), bb[:n], b) + } + if !bytes.Equal(b, bb) { + t.Fatalf("%s #%d: mismatch on read: got:%x want:%x", name, i, bb, b) + } + } + c.Close() + + if peers != nil { + gotpeers := <-pchan + if len(peers) == len(gotpeers) { + for i := range peers { + if !peers[i].Equal(gotpeers[i]) { + t.Fatalf("%s: mismatch on peer cert %d", name, i) + } + } + } else { + t.Fatalf("%s: mismatch on peer list length: %d (wanted) != %d (got)", name, len(peers), len(gotpeers)) + } + } +} + +func TestHandshakeServerRSARC4(t *testing.T) { + testServerScript(t, "RSA-RC4", rsaRC4ServerScript, testConfig, nil) +} + +func TestHandshakeServerRSA3DES(t *testing.T) { + des3Config := new(Config) + *des3Config = *testConfig + des3Config.CipherSuites = []uint16{TLS_RSA_WITH_3DES_EDE_CBC_SHA} + testServerScript(t, "RSA-3DES", rsaDES3ServerScript, des3Config, nil) +} + +func TestHandshakeServerRSAAES(t *testing.T) { + aesConfig := new(Config) + *aesConfig = *testConfig + aesConfig.CipherSuites = []uint16{TLS_RSA_WITH_AES_128_CBC_SHA} + testServerScript(t, "RSA-AES", rsaAESServerScript, aesConfig, nil) +} + +func TestHandshakeServerECDHEECDSAAES(t *testing.T) { + ecdsaConfig := new(Config) + *ecdsaConfig = *testConfig + ecdsaConfig.Certificates = make([]Certificate, 1) + ecdsaConfig.Certificates[0].Certificate = [][]byte{testECDSACertificate} + ecdsaConfig.Certificates[0].PrivateKey = testECDSAPrivateKey + ecdsaConfig.BuildNameToCertificate() + ecdsaConfig.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA} + testServerScript(t, "ECDHE-ECDSA-AES", ecdheECDSAAESServerScript, ecdsaConfig, nil) +} + +func TestHandshakeServerSSLv3(t *testing.T) { + testServerScript(t, "SSLv3", sslv3ServerScript, testConfig, nil) +} + +// TestHandshakeServerSNI involves a client sending an SNI extension of +// "snitest.com", which happens to match the CN of testSNICertificate. The test +// verifies that the server correctly selects that certificate. +func TestHandshakeServerSNI(t *testing.T) { + testServerScript(t, "SNI", selectCertificateBySNIScript, testConfig, nil) +} + +func TestResumption(t *testing.T) { + testServerScript(t, "IssueTicket", issueSessionTicketTest, testConfig, nil) + testServerScript(t, "Resume", serverResumeTest, testConfig, nil) +} + +func TestTLS12ClientCertServer(t *testing.T) { + config := *testConfig + config.MaxVersion = VersionTLS12 + config.ClientAuth = RequireAnyClientCert + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_RC4_128_SHA} + + testServerScript(t, "TLS12", tls12ServerScript, &config, nil) +} + +type clientauthTest struct { + name string + clientauth ClientAuthType + peers []*x509.Certificate + script [][]byte +} + +func TestClientAuthRSA(t *testing.T) { + for _, cat := range clientauthRSATests { + t.Log("running", cat.name) + cfg := new(Config) + *cfg = *testConfig + cfg.ClientAuth = cat.clientauth + testServerScript(t, cat.name, cat.script, cfg, cat.peers) + } +} + +func TestClientAuthECDSA(t *testing.T) { + for _, cat := range clientauthECDSATests { + t.Log("running", cat.name) + cfg := new(Config) + *cfg = *testConfig + cfg.Certificates = make([]Certificate, 1) + cfg.Certificates[0].Certificate = [][]byte{testECDSACertificate} + cfg.Certificates[0].PrivateKey = testECDSAPrivateKey + cfg.BuildNameToCertificate() + cfg.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA} + cfg.ClientAuth = cat.clientauth + testServerScript(t, cat.name, cat.script, cfg, cat.peers) + } +} + +// TestCipherSuiteCertPreferance ensures that we select an RSA ciphersuite with +// an RSA certificate and an ECDSA ciphersuite with an ECDSA certificate. +func TestCipherSuiteCertPreferance(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + config.MaxVersion = VersionTLS11 + config.PreferServerCipherSuites = true + testServerScript(t, "CipherSuiteCertPreference", tls11ECDHEAESServerScript, &config, nil) + + config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA} + config.Certificates = []Certificate{ + Certificate{ + Certificate: [][]byte{testECDSACertificate}, + PrivateKey: testECDSAPrivateKey, + }, + } + config.BuildNameToCertificate() + config.PreferServerCipherSuites = true + testServerScript(t, "CipherSuiteCertPreference2", ecdheECDSAAESServerScript, &config, nil) +} + +func TestTLS11Server(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + config.MaxVersion = VersionTLS11 + testServerScript(t, "TLS11", tls11ECDHEAESServerScript, &config, nil) +} + +func TestAESGCM(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256} + config.MaxVersion = VersionTLS12 + testServerScript(t, "AES-GCM", aesGCMServerScript, &config, nil) +} + +// recordingConn is a net.Conn that records the traffic that passes through it. +// WriteTo can be used to produce Go code that contains the recorded traffic. +type recordingConn struct { + net.Conn + lock sync.Mutex + flows [][]byte + currentlyReading bool +} + +func (r *recordingConn) Read(b []byte) (n int, err error) { + if n, err = r.Conn.Read(b); n == 0 { + return + } + b = b[:n] + + r.lock.Lock() + defer r.lock.Unlock() + + if l := len(r.flows); l == 0 || !r.currentlyReading { + buf := make([]byte, len(b)) + copy(buf, b) + r.flows = append(r.flows, buf) + } else { + r.flows[l-1] = append(r.flows[l-1], b[:n]...) + } + r.currentlyReading = true + return +} + +func (r *recordingConn) Write(b []byte) (n int, err error) { + if n, err = r.Conn.Write(b); n == 0 { + return + } + b = b[:n] + + r.lock.Lock() + defer r.lock.Unlock() + + if l := len(r.flows); l == 0 || r.currentlyReading { + buf := make([]byte, len(b)) + copy(buf, b) + r.flows = append(r.flows, buf) + } else { + r.flows[l-1] = append(r.flows[l-1], b[:n]...) + } + r.currentlyReading = false + return +} + +// WriteTo writes Go source code to w that contains the recorded traffic. +func (r *recordingConn) WriteTo(w io.Writer) { + fmt.Fprintf(w, "var changeMe = [][]byte {\n") + for _, buf := range r.flows { + fmt.Fprintf(w, "\t{") + for i, b := range buf { + if i%8 == 0 { + fmt.Fprintf(w, "\n\t\t") + } + fmt.Fprintf(w, "0x%02x, ", b) + } + fmt.Fprintf(w, "\n\t},\n") + } + fmt.Fprintf(w, "}\n") +} + +var serve = flag.Bool("serve", false, "run a TLS server on :10443") +var testCipherSuites = flag.String("ciphersuites", + "0x"+strconv.FormatInt(int64(TLS_RSA_WITH_RC4_128_SHA), 16), + "cipher suites to accept in serving mode") +var testMinVersion = flag.String("minversion", + "0x"+strconv.FormatInt(int64(VersionSSL30), 16), + "minimum version to negotiate") +var testMaxVersion = flag.String("maxversion", + "0x"+strconv.FormatInt(int64(VersionTLS10), 16), + "maximum version to negotiate") +var testClientAuth = flag.Int("clientauth", 0, "value for tls.Config.ClientAuth") + +func GetTestConfig() *Config { + var config = *testConfig + + minVersion, err := strconv.ParseUint(*testMinVersion, 0, 64) + if err != nil { + panic(err) + } + config.MinVersion = uint16(minVersion) + maxVersion, err := strconv.ParseUint(*testMaxVersion, 0, 64) + if err != nil { + panic(err) + } + config.MaxVersion = uint16(maxVersion) + + suites := strings.Split(*testCipherSuites, ",") + config.CipherSuites = make([]uint16, len(suites)) + for i := range suites { + suite, err := strconv.ParseUint(suites[i], 0, 64) + if err != nil { + panic(err) + } + config.CipherSuites[i] = uint16(suite) + } + + ecdsa := false + for _, suite := range config.CipherSuites { + switch suite { + case TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: + ecdsa = true + } + } + if ecdsa { + config.Certificates = nil + if !*connect { + config.Certificates = make([]Certificate, 1) + config.Certificates[0].Certificate = [][]byte{testECDSACertificate} + config.Certificates[0].PrivateKey = testECDSAPrivateKey + } + config.BuildNameToCertificate() + } + + config.ClientAuth = ClientAuthType(*testClientAuth) + return &config +} + +func TestRunServer(t *testing.T) { + if !*serve { + return + } + + config := GetTestConfig() + + const addr = ":10443" + l, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + log.Printf("Now listening for connections on %s", addr) + + for { + tcpConn, err := l.Accept() + if err != nil { + log.Printf("error accepting connection: %s", err) + break + } + + record := &recordingConn{ + Conn: tcpConn, + } + + conn := Server(record, config) + if err := conn.Handshake(); err != nil { + log.Printf("error from TLS handshake: %s", err) + break + } + + _, err = conn.Write([]byte("hello, world\n")) + if err != nil { + log.Printf("error from Write: %s", err) + continue + } + + conn.Close() + + record.WriteTo(os.Stdout) + } +} + +func bigFromString(s string) *big.Int { + ret := new(big.Int) + ret.SetString(s, 10) + return ret +} + +func fromHex(s string) []byte { + b, _ := hex.DecodeString(s) + return b +} + +var testRSACertificate = fromHex("308202b030820219a00302010202090085b0bba48a7fb8ca300d06092a864886f70d01010505003045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c7464301e170d3130303432343039303933385a170d3131303432343039303933385a3045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c746430819f300d06092a864886f70d010101050003818d0030818902818100bb79d6f517b5e5bf4610d0dc69bee62b07435ad0032d8a7a4385b71452e7a5654c2c78b8238cb5b482e5de1f953b7e62a52ca533d6fe125c7a56fcf506bffa587b263fb5cd04d3d0c921964ac7f4549f5abfef427100fe1899077f7e887d7df10439c4a22edb51c97ce3c04c3b326601cfafb11db8719a1ddbdb896baeda2d790203010001a381a73081a4301d0603551d0e04160414b1ade2855acfcb28db69ce2369ded3268e18883930750603551d23046e306c8014b1ade2855acfcb28db69ce2369ded3268e188839a149a4473045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c746482090085b0bba48a7fb8ca300c0603551d13040530030101ff300d06092a864886f70d010105050003818100086c4524c76bb159ab0c52ccf2b014d7879d7a6475b55a9566e4c52b8eae12661feb4f38b36e60d392fdf74108b52513b1187a24fb301dbaed98b917ece7d73159db95d31d78ea50565cd5825a2d5a5f33c4b6d8c97590968c0f5298b5cd981f89205ff2a01ca31b9694dda9fd57e970e8266d71999b266e3850296c90a7bdd9") + +var testECDSACertificate = fromHex("3082020030820162020900b8bf2d47a0d2ebf4300906072a8648ce3d04013045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c7464301e170d3132313132323135303633325a170d3232313132303135303633325a3045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c746430819b301006072a8648ce3d020106052b81040023038186000400c4a1edbe98f90b4873367ec316561122f23d53c33b4d213dcd6b75e6f6b0dc9adf26c1bcb287f072327cb3642f1c90bcea6823107efee325c0483a69e0286dd33700ef0462dd0da09c706283d881d36431aa9e9731bd96b068c09b23de76643f1a5c7fe9120e5858b65f70dd9bd8ead5d7f5d5ccb9b69f30665b669a20e227e5bffe3b300906072a8648ce3d040103818c0030818802420188a24febe245c5487d1bacf5ed989dae4770c05e1bb62fbdf1b64db76140d311a2ceee0b7e927eff769dc33b7ea53fcefa10e259ec472d7cacda4e970e15a06fd00242014dfcbe67139c2d050ebd3fa38c25c13313830d9406bbd4377af6ec7ac9862eddd711697f857c56defb31782be4c7780daecbbe9e4e3624317b6a0f399512078f2a") + +var testSNICertificate = fromHex("308201f23082015da003020102020100300b06092a864886f70d01010530283110300e060355040a130741636d6520436f311430120603550403130b736e69746573742e636f6d301e170d3132303431313137343033355a170d3133303431313137343533355a30283110300e060355040a130741636d6520436f311430120603550403130b736e69746573742e636f6d30819d300b06092a864886f70d01010103818d0030818902818100bb79d6f517b5e5bf4610d0dc69bee62b07435ad0032d8a7a4385b71452e7a5654c2c78b8238cb5b482e5de1f953b7e62a52ca533d6fe125c7a56fcf506bffa587b263fb5cd04d3d0c921964ac7f4549f5abfef427100fe1899077f7e887d7df10439c4a22edb51c97ce3c04c3b326601cfafb11db8719a1ddbdb896baeda2d790203010001a3323030300e0603551d0f0101ff0404030200a0300d0603551d0e0406040401020304300f0603551d2304083006800401020304300b06092a864886f70d0101050381810089c6455f1c1f5ef8eb1ab174ee2439059f5c4259bb1a8d86cdb1d056f56a717da40e95ab90f59e8deaf627c157995094db0802266eb34fc6842dea8a4b68d9c1389103ab84fb9e1f85d9b5d23ff2312c8670fbb540148245a4ebafe264d90c8a4cf4f85b0fac12ac2fc4a3154bad52462868af96c62c6525d652b6e31845bdcc") + +var testRSAPrivateKey = &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: bigFromString("131650079503776001033793877885499001334664249354723305978524647182322416328664556247316495448366990052837680518067798333412266673813370895702118944398081598789828837447552603077848001020611640547221687072142537202428102790818451901395596882588063427854225330436740647715202971973145151161964464812406232198521"), + E: 65537, + }, + D: bigFromString("29354450337804273969007277378287027274721892607543397931919078829901848876371746653677097639302788129485893852488285045793268732234230875671682624082413996177431586734171663258657462237320300610850244186316880055243099640544518318093544057213190320837094958164973959123058337475052510833916491060913053867729"), + Primes: []*big.Int{ + bigFromString("11969277782311800166562047708379380720136961987713178380670422671426759650127150688426177829077494755200794297055316163155755835813760102405344560929062149"), + bigFromString("10998999429884441391899182616418192492905073053684657075974935218461686523870125521822756579792315215543092255516093840728890783887287417039645833477273829"), + }, +} + +var testECDSAPrivateKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: &elliptic.CurveParams{ + P: bigFromString("6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151"), + N: bigFromString("6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449"), + B: bigFromString("1093849038073734274511112390766805569936207598951683748994586394495953116150735016013708737573759623248592132296706313309438452531591012912142327488478985984"), + Gx: bigFromString("2661740802050217063228768716723360960729859168756973147706671368418802944996427808491545080627771902352094241225065558662157113545570916814161637315895999846"), + Gy: bigFromString("3757180025770020463545507224491183603594455134769762486694567779615544477440556316691234405012945539562144444537289428522585666729196580810124344277578376784"), + BitSize: 521, + }, + X: bigFromString("2636411247892461147287360222306590634450676461695221912739908880441342231985950069527906976759812296359387337367668045707086543273113073382714101597903639351"), + Y: bigFromString("3204695818431246682253994090650952614555094516658732116404513121125038617915183037601737180082382202488628239201196033284060130040574800684774115478859677243"), + }, + D: bigFromString("5477294338614160138026852784385529180817726002953041720191098180813046231640184669647735805135001309477695746518160084669446643325196003346204701381388769751"), +} + +func loadPEMCert(in string) *x509.Certificate { + block, _ := pem.Decode([]byte(in)) + if block.Type == "CERTIFICATE" && len(block.Headers) == 0 { + cert, err := x509.ParseCertificate(block.Bytes) + if err == nil { + return cert + } + panic("error parsing cert") + } + panic("error parsing PEM") +} + +// Script of interaction with gnutls implementation. +// The values for this test are obtained by building and running in server mode: +// % go test -test.run "TestRunServer" -serve +// The recorded bytes are written to stdout. +var rsaRC4ServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x54, 0x01, 0x00, 0x00, + 0x50, 0x03, 0x01, 0x50, 0x77, 0x3d, 0xbd, 0x32, + 0x13, 0xd7, 0xea, 0x33, 0x65, 0x02, 0xb8, 0x70, + 0xb7, 0x84, 0xc4, 0x05, 0x1f, 0xa4, 0x24, 0xc4, + 0x91, 0x69, 0x04, 0x32, 0x96, 0xfe, 0x5b, 0x49, + 0x71, 0x60, 0x9a, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, + 0x00, 0x02, 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, + 0x02, 0xb0, 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, + 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x30, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, + 0x30, 0x39, 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, + 0x31, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, + 0x39, 0x33, 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, + 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, + 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, + 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, + 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, + 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, + 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, + 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, + 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, + 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, + 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, + 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, + 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, + 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, + 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, + 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, + 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, + 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, + 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, + 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, + 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, + 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, + 0x30, 0x81, 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, + 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, + 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, + 0x88, 0x39, 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, + 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, + 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, + 0x18, 0x88, 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, + 0x00, 0x85, 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, + 0xca, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, + 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, + 0x81, 0x00, 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, + 0xb1, 0x59, 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, + 0x14, 0xd7, 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, + 0x5a, 0x95, 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, + 0x12, 0x66, 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, + 0x60, 0xd3, 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, + 0x25, 0x13, 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, + 0x1d, 0xba, 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, + 0xd7, 0x31, 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, + 0xea, 0x50, 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, + 0x5a, 0x5f, 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, + 0x90, 0x96, 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, + 0x98, 0x1f, 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, + 0xa3, 0x1b, 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, + 0xe9, 0x70, 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, + 0x26, 0x6e, 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, + 0xbd, 0xd9, 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, + 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x2d, 0x09, 0x7c, 0x7f, 0xfc, + 0x84, 0xce, 0xb3, 0x30, 0x9b, 0xf9, 0xb7, 0xc8, + 0xc3, 0xff, 0xee, 0x6f, 0x20, 0x8a, 0xf4, 0xfb, + 0x86, 0x55, 0x1f, 0x6a, 0xb4, 0x81, 0x50, 0x3a, + 0x46, 0x1b, 0xd3, 0xca, 0x4b, 0x11, 0xff, 0xef, + 0x02, 0xbc, 0x18, 0xb8, 0x4a, 0x7d, 0x43, 0x23, + 0x96, 0x92, 0x27, 0x7c, 0xca, 0xcf, 0xe6, 0x91, + 0xe8, 0x14, 0x97, 0x68, 0xb4, 0xe5, 0xc0, 0xc9, + 0x23, 0xdd, 0x54, 0x07, 0xa6, 0x2e, 0x8c, 0x98, + 0xfc, 0xc6, 0x8c, 0x04, 0x6b, 0x1b, 0x5f, 0xd5, + 0x3d, 0x8b, 0x6c, 0x55, 0x4f, 0x7a, 0xe6, 0x6c, + 0x74, 0x2c, 0x1e, 0x34, 0xdb, 0xfb, 0x00, 0xb1, + 0x4e, 0x10, 0x21, 0x16, 0xe0, 0x3e, 0xc5, 0x64, + 0x84, 0x28, 0x2b, 0x2b, 0x29, 0x47, 0x51, 0x34, + 0x76, 0x15, 0x20, 0x71, 0x0b, 0x30, 0xa1, 0x85, + 0xd5, 0x15, 0x18, 0x14, 0x64, 0x4b, 0x40, 0x7c, + 0x4f, 0xb3, 0x7b, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0xab, 0xee, + 0xf5, 0x97, 0x5f, 0xc6, 0x78, 0xf3, 0xc6, 0x83, + 0x5b, 0x55, 0x4f, 0xcb, 0x45, 0x3f, 0xfa, 0xf7, + 0x05, 0x02, 0xc2, 0x63, 0x87, 0x18, 0xb5, 0x9a, + 0x62, 0xe2, 0x3f, 0x88, 0x5a, 0x60, 0x61, 0x72, + 0xfa, 0x9c, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0x72, 0xa4, 0xe4, 0xaa, 0xd2, + 0xc4, 0x39, 0x7e, 0x2a, 0xc1, 0x6f, 0x34, 0x42, + 0x28, 0xcb, 0x9d, 0x7a, 0x09, 0xca, 0x96, 0xad, + 0x0e, 0x11, 0x51, 0x8a, 0x06, 0xb0, 0xe9, 0xca, + 0xeb, 0xce, 0xe2, 0xd5, 0x2e, 0xc1, 0x8d, 0x17, + 0x03, 0x01, 0x00, 0x21, 0x2e, 0x61, 0x86, 0x17, + 0xdb, 0xa6, 0x30, 0xe2, 0x62, 0x06, 0x2a, 0x8b, + 0x75, 0x2c, 0x2d, 0xcf, 0xf5, 0x01, 0x11, 0x52, + 0x81, 0x38, 0xcf, 0xd5, 0xf7, 0xdc, 0x52, 0x31, + 0x1f, 0x97, 0x43, 0xc2, 0x71, 0x15, 0x03, 0x01, + 0x00, 0x16, 0xe0, 0x21, 0xfe, 0x36, 0x2e, 0x68, + 0x2c, 0xf1, 0xbe, 0x04, 0xec, 0xd4, 0xc6, 0xdd, + 0xac, 0x6f, 0x4c, 0x85, 0x32, 0x3f, 0x87, 0x1b, + }, +} + +var rsaDES3ServerScript = [][]byte{ + { + 0x16, 0x03, 0x00, 0x00, 0xc5, 0x01, 0x00, 0x00, + 0xc1, 0x03, 0x03, 0x50, 0xae, 0x5d, 0x38, 0xec, + 0xaa, 0x2f, 0x41, 0xf9, 0xd2, 0x7b, 0xa1, 0xfd, + 0x0f, 0xff, 0x4e, 0x54, 0x0e, 0x15, 0x57, 0xaf, + 0x2c, 0x91, 0xb5, 0x35, 0x5b, 0x2e, 0xb0, 0xec, + 0x20, 0xe5, 0xd2, 0x00, 0x00, 0x50, 0xc0, 0x09, + 0xc0, 0x23, 0xc0, 0x2b, 0xc0, 0x0a, 0xc0, 0x24, + 0xc0, 0x2c, 0xc0, 0x08, 0xc0, 0x13, 0xc0, 0x27, + 0xc0, 0x2f, 0xc0, 0x14, 0xc0, 0x30, 0xc0, 0x12, + 0x00, 0x33, 0x00, 0x67, 0x00, 0x45, 0x00, 0x9e, + 0x00, 0x39, 0x00, 0x6b, 0x00, 0x88, 0x00, 0x16, + 0x00, 0x32, 0x00, 0x40, 0x00, 0x44, 0x00, 0xa2, + 0x00, 0x38, 0x00, 0x6a, 0x00, 0x87, 0x00, 0x13, + 0x00, 0x66, 0x00, 0x2f, 0x00, 0x3c, 0x00, 0x41, + 0x00, 0x9c, 0x00, 0x35, 0x00, 0x3d, 0x00, 0x84, + 0x00, 0x0a, 0x00, 0x05, 0x00, 0x04, 0x01, 0x00, + 0x00, 0x48, 0x00, 0x05, 0x00, 0x05, 0x01, 0x00, + 0x00, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01, 0x00, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, + 0x00, 0x0a, 0x00, 0x13, 0x00, 0x15, 0x00, 0x17, + 0x00, 0x18, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x02, + 0x01, 0x00, 0x00, 0x0d, 0x00, 0x1c, 0x00, 0x1a, + 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x05, 0x01, + 0x05, 0x03, 0x06, 0x01, 0x06, 0x03, 0x03, 0x01, + 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, 0x02, 0x02, + 0x02, 0x03, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x51, 0x04, 0xf1, 0x7a, 0xbf, + 0xe8, 0xa5, 0x86, 0x09, 0xa7, 0xf3, 0xcc, 0x93, + 0x00, 0x10, 0x5b, 0xb8, 0xc1, 0x51, 0x0d, 0x5b, + 0xcd, 0xed, 0x26, 0x01, 0x69, 0x73, 0xf4, 0x05, + 0x8a, 0x6a, 0xc3, 0xb1, 0x9e, 0x84, 0x4e, 0x39, + 0xcf, 0x5e, 0x55, 0xa9, 0x70, 0x19, 0x96, 0x91, + 0xcd, 0x2c, 0x78, 0x3c, 0xa2, 0x6d, 0xb0, 0x49, + 0x86, 0xf6, 0xd1, 0x3a, 0xde, 0x00, 0x4b, 0xa6, + 0x25, 0xbf, 0x85, 0x39, 0xce, 0xb1, 0xcf, 0xbc, + 0x16, 0xc7, 0x66, 0xac, 0xf8, 0xd2, 0x3b, 0xd1, + 0xcc, 0x16, 0xac, 0x63, 0x3c, 0xbe, 0xd9, 0xb6, + 0x6a, 0xe4, 0x13, 0x8a, 0xf4, 0x56, 0x2f, 0x92, + 0x54, 0xd8, 0xf0, 0x84, 0x01, 0x32, 0x1a, 0xa9, + 0x2d, 0xaf, 0x82, 0x0e, 0x00, 0xfa, 0x07, 0x88, + 0xd9, 0x87, 0xe7, 0xdc, 0x9e, 0xe9, 0x72, 0x49, + 0xb8, 0xfa, 0x8c, 0x7b, 0x07, 0x0b, 0x03, 0x7c, + 0x10, 0x8c, 0x8a, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0xa8, 0x61, 0xa4, + 0xf4, 0x5f, 0x8a, 0x1f, 0x5c, 0x92, 0x3f, 0x8c, + 0xdb, 0xd6, 0x10, 0xcd, 0x9e, 0xe7, 0xf0, 0xc4, + 0x3c, 0xb6, 0x1c, 0x9a, 0x56, 0x73, 0x7f, 0xa6, + 0x14, 0x24, 0xcb, 0x96, 0x1f, 0xe0, 0xaf, 0xcd, + 0x3c, 0x66, 0x43, 0xb7, 0x37, 0x65, 0x34, 0x47, + 0xf8, 0x43, 0xf1, 0xcc, 0x15, 0xb8, 0xdc, 0x35, + 0xe0, 0xa4, 0x2d, 0x78, 0x94, 0xe0, 0x02, 0xf3, + 0x76, 0x46, 0xf7, 0x9b, 0x8d, 0x0d, 0x5d, 0x0b, + 0xd3, 0xdd, 0x9a, 0x9e, 0x62, 0x2e, 0xc5, 0x98, + 0x75, 0x63, 0x0c, 0xbf, 0x8e, 0x49, 0x33, 0x23, + 0x7c, 0x00, 0xcf, 0xfb, 0xcf, 0xba, 0x0f, 0x41, + 0x39, 0x89, 0xb9, 0xcc, 0x59, 0xd0, 0x2b, 0xb6, + 0xec, 0x04, 0xe2, 0xc0, 0x52, 0xc7, 0xcf, 0x71, + 0x47, 0xff, 0x70, 0x7e, 0xa9, 0xbd, 0x1c, 0xdd, + 0x17, 0xa5, 0x6c, 0xb7, 0x10, 0x4f, 0x42, 0x18, + 0x37, 0x69, 0xa9, 0xd2, 0xb3, 0x18, 0x84, 0x92, + 0xa7, 0x47, 0x21, 0xf6, 0x95, 0x63, 0x29, 0xd6, + 0xa5, 0xb6, 0xda, 0x65, 0x67, 0x69, 0xc4, 0x26, + 0xac, 0x8b, 0x08, 0x58, 0xdd, 0x3c, 0x31, 0x20, + 0xd5, 0x0c, 0x88, 0x72, 0x18, 0x16, 0x88, 0x1e, + 0x4a, 0x0f, 0xe1, 0xcf, 0x95, 0x24, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xde, 0xef, 0xba, 0x3e, 0x18, 0x1c, + 0x1e, 0x5e, 0xbc, 0x87, 0xf1, 0x87, 0x8d, 0x72, + 0xe3, 0xbe, 0x0f, 0xdf, 0xfd, 0xd0, 0xb2, 0x89, + 0xf8, 0x05, 0x9a, 0x52, 0x47, 0x77, 0x9e, 0xe8, + 0xb1, 0x1d, 0x18, 0xed, 0x6a, 0x4b, 0x63, 0x1d, + 0xf1, 0x62, 0xd2, 0x65, 0x21, 0x26, 0x73, 0xd4, + 0x35, 0x5b, 0x95, 0x89, 0x12, 0x59, 0x23, 0x8c, + 0xc3, 0xfc, 0xf9, 0x4d, 0x21, 0x79, 0xa0, 0xbd, + 0xff, 0x33, 0xa2, 0x3d, 0x0b, 0x6f, 0x89, 0xc9, + 0x23, 0xe4, 0xe7, 0x9f, 0x1d, 0x98, 0xf6, 0xed, + 0x02, 0x8d, 0xac, 0x1a, 0xf9, 0xcb, 0xa5, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x28, 0x91, 0x56, 0x80, 0xe2, 0x6d, 0x51, + 0x88, 0x03, 0xf8, 0x49, 0xe6, 0x6a, 0x5a, 0xfb, + 0x2f, 0x0b, 0xb5, 0xa1, 0x0d, 0x63, 0x83, 0xae, + 0xb9, 0xbc, 0x05, 0xf0, 0x81, 0x00, 0x61, 0x83, + 0x38, 0xda, 0x14, 0xf6, 0xea, 0xd8, 0x78, 0x65, + 0xc7, 0x26, 0x17, 0x03, 0x01, 0x00, 0x18, 0x81, + 0x30, 0x8b, 0x22, 0x5a, 0xd3, 0x7f, 0xc8, 0xf2, + 0x8a, 0x6b, 0xa3, 0xba, 0x4d, 0xe7, 0x6e, 0xd2, + 0xfd, 0xbf, 0xf2, 0xc5, 0x28, 0xa0, 0x62, 0x17, + 0x03, 0x01, 0x00, 0x28, 0x17, 0x83, 0x3c, 0x78, + 0x18, 0xfa, 0x8d, 0x58, 0x5c, 0xaa, 0x05, 0x7d, + 0x67, 0x96, 0x11, 0x60, 0x11, 0xc0, 0x1e, 0x0d, + 0x6a, 0x6e, 0x5f, 0x1d, 0x98, 0x4b, 0xff, 0x82, + 0xee, 0x21, 0x06, 0x29, 0xd3, 0x8b, 0x80, 0x78, + 0x39, 0x05, 0x34, 0x9b, 0x15, 0x03, 0x01, 0x00, + 0x18, 0xa9, 0x38, 0x18, 0x4f, 0x9d, 0x84, 0x75, + 0x88, 0x53, 0xd6, 0x85, 0xc2, 0x15, 0x4b, 0xe3, + 0xe3, 0x35, 0x9a, 0x74, 0xc9, 0x3e, 0x13, 0xc1, + 0x8c, + }, +} + +var rsaAESServerScript = [][]byte{ + { + 0x16, 0x03, 0x00, 0x00, 0xc5, 0x01, 0x00, 0x00, + 0xc1, 0x03, 0x03, 0x50, 0xae, 0x5c, 0xe9, 0x5e, + 0x31, 0x93, 0x82, 0xa5, 0x6f, 0x51, 0x82, 0xc8, + 0x55, 0x4f, 0x1f, 0x2e, 0x90, 0x98, 0x81, 0x13, + 0x27, 0x80, 0x68, 0xb4, 0x2d, 0xba, 0x3a, 0x76, + 0xd8, 0xd7, 0x2c, 0x00, 0x00, 0x50, 0xc0, 0x09, + 0xc0, 0x23, 0xc0, 0x2b, 0xc0, 0x0a, 0xc0, 0x24, + 0xc0, 0x2c, 0xc0, 0x08, 0xc0, 0x13, 0xc0, 0x27, + 0xc0, 0x2f, 0xc0, 0x14, 0xc0, 0x30, 0xc0, 0x12, + 0x00, 0x33, 0x00, 0x67, 0x00, 0x45, 0x00, 0x9e, + 0x00, 0x39, 0x00, 0x6b, 0x00, 0x88, 0x00, 0x16, + 0x00, 0x32, 0x00, 0x40, 0x00, 0x44, 0x00, 0xa2, + 0x00, 0x38, 0x00, 0x6a, 0x00, 0x87, 0x00, 0x13, + 0x00, 0x66, 0x00, 0x2f, 0x00, 0x3c, 0x00, 0x41, + 0x00, 0x9c, 0x00, 0x35, 0x00, 0x3d, 0x00, 0x84, + 0x00, 0x0a, 0x00, 0x05, 0x00, 0x04, 0x01, 0x00, + 0x00, 0x48, 0x00, 0x05, 0x00, 0x05, 0x01, 0x00, + 0x00, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01, 0x00, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, + 0x00, 0x0a, 0x00, 0x13, 0x00, 0x15, 0x00, 0x17, + 0x00, 0x18, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x02, + 0x01, 0x00, 0x00, 0x0d, 0x00, 0x1c, 0x00, 0x1a, + 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x05, 0x01, + 0x05, 0x03, 0x06, 0x01, 0x06, 0x03, 0x03, 0x01, + 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, 0x02, 0x02, + 0x02, 0x03, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x51, 0x2e, 0xec, 0x0d, 0x86, + 0xf3, 0x9f, 0xf2, 0x77, 0x04, 0x27, 0x2b, 0x0e, + 0x9c, 0xab, 0x35, 0x84, 0x65, 0xff, 0x36, 0xef, + 0xc0, 0x08, 0xc9, 0x1d, 0x9f, 0x29, 0xae, 0x8d, + 0xc5, 0x66, 0x81, 0x31, 0x92, 0x5e, 0x3d, 0xac, + 0xaa, 0x37, 0x28, 0x2c, 0x06, 0x91, 0xa6, 0xc2, + 0xd0, 0x83, 0x34, 0x24, 0x1c, 0x88, 0xfc, 0x0a, + 0xcf, 0xbf, 0xc2, 0x94, 0xe2, 0xed, 0xa7, 0x6a, + 0xa8, 0x8d, 0x3d, 0xf7, 0x06, 0x7d, 0x69, 0xf8, + 0x0d, 0xb2, 0xf7, 0xe4, 0x45, 0xcb, 0x0a, 0x25, + 0xcb, 0xb2, 0x2e, 0x38, 0x9a, 0x84, 0x75, 0xe8, + 0xe1, 0x42, 0x39, 0xa2, 0x18, 0x0e, 0x48, 0xca, + 0x33, 0x16, 0x4e, 0xf6, 0x2f, 0xec, 0x07, 0xe7, + 0x57, 0xe1, 0x20, 0x40, 0x40, 0x6d, 0x4e, 0x29, + 0x04, 0x1a, 0x8c, 0x99, 0xfb, 0x19, 0x3c, 0xaa, + 0x75, 0x64, 0xd3, 0xa6, 0xe6, 0xed, 0x3f, 0x5a, + 0xd2, 0xc9, 0x80, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x01, 0x10, 0xe9, 0x9e, + 0x06, 0x92, 0x18, 0xbf, 0x5e, 0xaf, 0x33, 0xc1, + 0xbf, 0x0e, 0x12, 0x07, 0x48, 0x4f, 0x6b, 0x6c, + 0xf5, 0x23, 0x5e, 0x87, 0xa7, 0xd3, 0x50, 0x79, + 0x38, 0xdc, 0xe0, 0x49, 0xd3, 0x81, 0x21, 0x12, + 0xd0, 0x3d, 0x9a, 0xfb, 0x83, 0xc1, 0x8b, 0xfc, + 0x14, 0xd5, 0xd5, 0xa7, 0xa3, 0x34, 0x14, 0x71, + 0xbe, 0xea, 0x37, 0x18, 0x12, 0x7f, 0x41, 0xfb, + 0xc5, 0x51, 0x17, 0x9d, 0x96, 0x58, 0x14, 0xfb, + 0x4f, 0xd7, 0xd3, 0x15, 0x0f, 0xec, 0x5a, 0x0d, + 0x35, 0xbb, 0x3c, 0x81, 0x5b, 0x3f, 0xdf, 0x52, + 0xa4, 0x4c, 0xcd, 0x13, 0xe1, 0x10, 0x37, 0x34, + 0xbf, 0xb4, 0x80, 0x1e, 0x8d, 0xe2, 0xc3, 0x7a, + 0x0f, 0x7b, 0x7d, 0x23, 0xeb, 0xd0, 0x99, 0x69, + 0xad, 0x0a, 0x2d, 0xb3, 0x6c, 0xd6, 0x80, 0x11, + 0x7f, 0x6c, 0xed, 0x1b, 0xcd, 0x08, 0x22, 0x56, + 0x90, 0x0e, 0xa4, 0xc3, 0x29, 0x33, 0x96, 0x30, + 0x34, 0x94, 0xa1, 0xeb, 0x9c, 0x1b, 0x5a, 0xd1, + 0x03, 0x61, 0xf9, 0xdd, 0xf3, 0x64, 0x8a, 0xfd, + 0x5f, 0x44, 0xdb, 0x2e, 0xa7, 0xfd, 0xe1, 0x1a, + 0x66, 0xc5, 0x01, 0x9c, 0xc7, 0xd1, 0xc4, 0xd3, + 0xea, 0x14, 0x3c, 0xed, 0x74, 0xbb, 0x1b, 0x97, + 0x8f, 0xf1, 0x29, 0x39, 0x33, 0x92, 0x93, 0x4e, + 0xf5, 0x87, 0x91, 0x61, 0x65, 0x8d, 0x27, 0x8d, + 0x76, 0xc1, 0xfa, 0x6a, 0x99, 0x80, 0xb1, 0x9b, + 0x29, 0x53, 0xce, 0x3e, 0xb6, 0x9a, 0xce, 0x3c, + 0x19, 0x5e, 0x48, 0x83, 0xaa, 0xa7, 0x66, 0x98, + 0x59, 0xf4, 0xbb, 0xf2, 0xbc, 0xd9, 0xc5, 0x9a, + 0xc8, 0x2c, 0x63, 0x58, 0xd5, 0xd4, 0xbc, 0x03, + 0xa9, 0x06, 0xa9, 0x80, 0x0d, 0xb3, 0x46, 0x2d, + 0xe3, 0xc6, 0xaf, 0x1a, 0x39, 0x18, 0x7e, 0x1e, + 0x83, 0x80, 0x46, 0x11, 0xd2, 0x13, 0x9f, 0xda, + 0xfc, 0x2d, 0x42, 0xaa, 0x5a, 0x1d, 0x4c, 0x31, + 0xe5, 0x58, 0x78, 0x5e, 0xe2, 0x04, 0xd6, 0x23, + 0x7f, 0x3f, 0x06, 0xc0, 0x54, 0xf8, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xfb, 0xef, 0xba, 0xed, 0xc5, 0x36, + 0xc8, 0x5a, 0x41, 0x3f, 0x05, 0xfa, 0xfe, 0x48, + 0xc3, 0x91, 0x12, 0x8b, 0xe8, 0x32, 0x6a, 0x9f, + 0xdc, 0x97, 0xe2, 0x77, 0xb9, 0x96, 0x2d, 0xd4, + 0xe5, 0xbd, 0xa1, 0xfd, 0x94, 0xbb, 0x74, 0x63, + 0xb1, 0x0c, 0x38, 0xbc, 0x6f, 0x69, 0xaf, 0xa3, + 0x46, 0x9c, 0x96, 0x41, 0xde, 0x59, 0x23, 0xff, + 0x15, 0x6b, 0x3a, 0xef, 0x91, 0x6d, 0x92, 0x44, + 0xdc, 0x72, 0x1f, 0x40, 0x3d, 0xb5, 0x34, 0x8f, + 0x2a, 0xac, 0x21, 0x69, 0x05, 0x6f, 0xb2, 0x60, + 0x32, 0x5d, 0x3d, 0x97, 0xb4, 0x24, 0x99, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x30, 0x68, 0x27, 0x97, 0xca, 0x63, 0x09, + 0x22, 0xed, 0x0e, 0x61, 0x7c, 0x76, 0x31, 0x9c, + 0xbe, 0x27, 0xc9, 0xe6, 0x09, 0xc3, 0xc3, 0xc2, + 0xf4, 0xa2, 0x32, 0xba, 0x7c, 0xf2, 0x0f, 0xb8, + 0x3d, 0xcb, 0xe2, 0x4c, 0xc0, 0x7d, 0x8e, 0x5b, + 0x5a, 0xed, 0x05, 0x5c, 0x15, 0x96, 0x69, 0xc2, + 0x6f, 0x5f, 0x17, 0x03, 0x01, 0x00, 0x20, 0x5a, + 0xfe, 0x0b, 0xe1, 0x6f, 0xa8, 0x54, 0x19, 0x78, + 0xca, 0xba, 0x2e, 0x1e, 0x2e, 0xe1, 0x5d, 0x17, + 0xe5, 0x97, 0x05, 0x2c, 0x08, 0x0c, 0xff, 0xa8, + 0x59, 0xa9, 0xde, 0x5e, 0x21, 0x34, 0x04, 0x17, + 0x03, 0x01, 0x00, 0x30, 0x86, 0xb1, 0x3f, 0x88, + 0x43, 0xf0, 0x07, 0xee, 0xa8, 0xf4, 0xbc, 0xe7, + 0x5f, 0xc6, 0x8c, 0x86, 0x4c, 0xca, 0x70, 0x88, + 0xcc, 0x6a, 0xb4, 0x3d, 0x40, 0xe8, 0x54, 0x89, + 0x19, 0x43, 0x1f, 0x76, 0xe2, 0xac, 0xb2, 0x5b, + 0x92, 0xf8, 0x57, 0x39, 0x2a, 0xc3, 0x6d, 0x13, + 0x45, 0xfa, 0x36, 0x9e, 0x15, 0x03, 0x01, 0x00, + 0x20, 0x6d, 0xed, 0x7b, 0x59, 0x28, 0x2a, 0x27, + 0x04, 0x15, 0x07, 0x4e, 0xeb, 0x13, 0x00, 0xe3, + 0x3a, 0x3f, 0xf8, 0xaa, 0x2b, 0x3b, 0x1a, 0x8c, + 0x12, 0xd6, 0x4c, 0xec, 0x2a, 0xaf, 0x33, 0x60, + 0xaf, + }, +} + +// Generated using: +// $ go test -test.run TestRunServer -serve -ciphersuites=0xc00a +// $ openssl s_client -host 127.0.0.1 -port 10443 -cipher ECDHE-ECDSA-AES256-SHA +var ecdheECDSAAESServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0xa0, 0x01, 0x00, 0x00, + 0x9c, 0x03, 0x03, 0x50, 0xd7, 0x18, 0x31, 0x49, + 0xde, 0x19, 0x8d, 0x08, 0x5c, 0x4b, 0x60, 0x67, + 0x0f, 0xfe, 0xd0, 0x62, 0xf9, 0x31, 0x48, 0x17, + 0x9e, 0x50, 0xc1, 0xd8, 0x35, 0x24, 0x0e, 0xa6, + 0x09, 0x06, 0x51, 0x00, 0x00, 0x04, 0xc0, 0x0a, + 0x00, 0xff, 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, + 0x00, 0x04, 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, + 0x00, 0x34, 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, + 0x00, 0x19, 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, + 0x00, 0x09, 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, + 0x00, 0x15, 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, + 0x00, 0x13, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, + 0x00, 0x0f, 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, + 0x00, 0x00, 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, + 0x06, 0x01, 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, + 0x05, 0x02, 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, + 0x04, 0x03, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, + 0x00, 0x0f, 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x0a, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0x0e, 0x0b, 0x00, 0x02, 0x0a, 0x00, 0x02, + 0x07, 0x00, 0x02, 0x04, 0x30, 0x82, 0x02, 0x00, + 0x30, 0x82, 0x01, 0x62, 0x02, 0x09, 0x00, 0xb8, + 0xbf, 0x2d, 0x47, 0xa0, 0xd2, 0xeb, 0xf4, 0x30, + 0x09, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, + 0x04, 0x01, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x32, 0x31, + 0x31, 0x32, 0x32, 0x31, 0x35, 0x30, 0x36, 0x33, + 0x32, 0x5a, 0x17, 0x0d, 0x32, 0x32, 0x31, 0x31, + 0x32, 0x30, 0x31, 0x35, 0x30, 0x36, 0x33, 0x32, + 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, + 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, + 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, + 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, + 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, + 0x30, 0x81, 0x9b, 0x30, 0x10, 0x06, 0x07, 0x2a, + 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x05, + 0x2b, 0x81, 0x04, 0x00, 0x23, 0x03, 0x81, 0x86, + 0x00, 0x04, 0x00, 0xc4, 0xa1, 0xed, 0xbe, 0x98, + 0xf9, 0x0b, 0x48, 0x73, 0x36, 0x7e, 0xc3, 0x16, + 0x56, 0x11, 0x22, 0xf2, 0x3d, 0x53, 0xc3, 0x3b, + 0x4d, 0x21, 0x3d, 0xcd, 0x6b, 0x75, 0xe6, 0xf6, + 0xb0, 0xdc, 0x9a, 0xdf, 0x26, 0xc1, 0xbc, 0xb2, + 0x87, 0xf0, 0x72, 0x32, 0x7c, 0xb3, 0x64, 0x2f, + 0x1c, 0x90, 0xbc, 0xea, 0x68, 0x23, 0x10, 0x7e, + 0xfe, 0xe3, 0x25, 0xc0, 0x48, 0x3a, 0x69, 0xe0, + 0x28, 0x6d, 0xd3, 0x37, 0x00, 0xef, 0x04, 0x62, + 0xdd, 0x0d, 0xa0, 0x9c, 0x70, 0x62, 0x83, 0xd8, + 0x81, 0xd3, 0x64, 0x31, 0xaa, 0x9e, 0x97, 0x31, + 0xbd, 0x96, 0xb0, 0x68, 0xc0, 0x9b, 0x23, 0xde, + 0x76, 0x64, 0x3f, 0x1a, 0x5c, 0x7f, 0xe9, 0x12, + 0x0e, 0x58, 0x58, 0xb6, 0x5f, 0x70, 0xdd, 0x9b, + 0xd8, 0xea, 0xd5, 0xd7, 0xf5, 0xd5, 0xcc, 0xb9, + 0xb6, 0x9f, 0x30, 0x66, 0x5b, 0x66, 0x9a, 0x20, + 0xe2, 0x27, 0xe5, 0xbf, 0xfe, 0x3b, 0x30, 0x09, + 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, + 0x01, 0x03, 0x81, 0x8c, 0x00, 0x30, 0x81, 0x88, + 0x02, 0x42, 0x01, 0x88, 0xa2, 0x4f, 0xeb, 0xe2, + 0x45, 0xc5, 0x48, 0x7d, 0x1b, 0xac, 0xf5, 0xed, + 0x98, 0x9d, 0xae, 0x47, 0x70, 0xc0, 0x5e, 0x1b, + 0xb6, 0x2f, 0xbd, 0xf1, 0xb6, 0x4d, 0xb7, 0x61, + 0x40, 0xd3, 0x11, 0xa2, 0xce, 0xee, 0x0b, 0x7e, + 0x92, 0x7e, 0xff, 0x76, 0x9d, 0xc3, 0x3b, 0x7e, + 0xa5, 0x3f, 0xce, 0xfa, 0x10, 0xe2, 0x59, 0xec, + 0x47, 0x2d, 0x7c, 0xac, 0xda, 0x4e, 0x97, 0x0e, + 0x15, 0xa0, 0x6f, 0xd0, 0x02, 0x42, 0x01, 0x4d, + 0xfc, 0xbe, 0x67, 0x13, 0x9c, 0x2d, 0x05, 0x0e, + 0xbd, 0x3f, 0xa3, 0x8c, 0x25, 0xc1, 0x33, 0x13, + 0x83, 0x0d, 0x94, 0x06, 0xbb, 0xd4, 0x37, 0x7a, + 0xf6, 0xec, 0x7a, 0xc9, 0x86, 0x2e, 0xdd, 0xd7, + 0x11, 0x69, 0x7f, 0x85, 0x7c, 0x56, 0xde, 0xfb, + 0x31, 0x78, 0x2b, 0xe4, 0xc7, 0x78, 0x0d, 0xae, + 0xcb, 0xbe, 0x9e, 0x4e, 0x36, 0x24, 0x31, 0x7b, + 0x6a, 0x0f, 0x39, 0x95, 0x12, 0x07, 0x8f, 0x2a, + 0x16, 0x03, 0x01, 0x01, 0x1a, 0x0c, 0x00, 0x01, + 0x16, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x00, 0x8b, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, + 0x04, 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, + 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, + 0x3f, 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, + 0x4d, 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, + 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, + 0xff, 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, + 0x6a, 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, + 0xe5, 0xbd, 0x66, 0x02, 0x42, 0x00, 0xad, 0x7d, + 0x06, 0x35, 0xab, 0xec, 0x8d, 0xac, 0xd4, 0xba, + 0x1b, 0x49, 0x5e, 0x05, 0x5f, 0xf0, 0x97, 0x93, + 0x82, 0xb8, 0x2b, 0x8d, 0x91, 0x98, 0x63, 0x8e, + 0xb4, 0x14, 0x62, 0xdb, 0x1e, 0xc9, 0x2b, 0x30, + 0xf8, 0x41, 0x9b, 0xa6, 0xe6, 0xbc, 0xde, 0x0e, + 0x68, 0x30, 0x22, 0x50, 0xe6, 0x98, 0x97, 0x7b, + 0x69, 0xf7, 0x93, 0xed, 0xcd, 0x19, 0x2f, 0x44, + 0x6c, 0x2e, 0xdf, 0x25, 0xee, 0xcc, 0x46, 0x16, + 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x00, 0x1c, 0xc5, 0xe8, 0xb3, + 0x42, 0xb4, 0xad, 0xca, 0x45, 0xcd, 0x42, 0x7b, + 0xfb, 0x0c, 0xea, 0x32, 0x26, 0xd4, 0x8a, 0xef, + 0xdf, 0xc9, 0xff, 0xd2, 0xe0, 0x36, 0xea, 0x4e, + 0xbb, 0x3e, 0xf4, 0x9c, 0x76, 0x4f, 0x44, 0xbd, + 0x84, 0x72, 0xdd, 0xcb, 0xe5, 0x28, 0x8d, 0x31, + 0x72, 0x3b, 0xd3, 0xf2, 0x9a, 0x13, 0xfb, 0x8a, + 0xa7, 0x72, 0xca, 0x21, 0x6c, 0xea, 0xbf, 0xe9, + 0x8c, 0x0a, 0xcc, 0x8f, 0xd6, 0x00, 0x20, 0x87, + 0xf3, 0x7d, 0x18, 0xc5, 0xfd, 0x9e, 0xdd, 0x6b, + 0x06, 0xdc, 0x52, 0xeb, 0x14, 0xc0, 0x67, 0x5a, + 0x06, 0xd8, 0x98, 0x19, 0x14, 0xe7, 0xd4, 0x36, + 0x32, 0xee, 0xb7, 0xfa, 0xe2, 0x85, 0x4a, 0x16, + 0x42, 0x0c, 0xa6, 0x21, 0xcf, 0x1f, 0xae, 0x10, + 0x8b, 0x28, 0x32, 0x19, 0xa4, 0x0a, 0xd7, 0xce, + 0xe6, 0xe1, 0x93, 0xfb, 0x5f, 0x08, 0x8b, 0x42, + 0xa2, 0x20, 0xed, 0x0d, 0x62, 0xca, 0xed, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x30, 0x2e, 0x33, 0xc0, 0x57, 0x6c, 0xb4, + 0x1b, 0xd2, 0x63, 0xe8, 0x67, 0x10, 0x2d, 0x87, + 0x71, 0x6e, 0x19, 0x60, 0xf4, 0xa4, 0x10, 0x52, + 0x73, 0x2d, 0x09, 0x5e, 0xdb, 0x6c, 0xdc, 0xcf, + 0x2d, 0xff, 0x03, 0x11, 0x95, 0x76, 0x90, 0xd7, + 0x87, 0x54, 0x43, 0xed, 0xc2, 0x36, 0x69, 0x14, + 0x72, 0x4a, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x8b, 0xde, 0xef, 0xba, 0xc5, 0x7e, 0x04, + 0xab, 0xfd, 0x79, 0x56, 0xf3, 0xe1, 0xa5, 0x3e, + 0x02, 0xdf, 0x69, 0x6d, 0x1f, 0x41, 0x9f, 0xbc, + 0x93, 0xe2, 0x6c, 0xf1, 0xb1, 0x38, 0xf5, 0x2b, + 0x8c, 0x4c, 0xf4, 0x74, 0xe1, 0x79, 0x35, 0x34, + 0x97, 0x9b, 0xd5, 0xba, 0xfd, 0xf7, 0x2f, 0x2d, + 0x9e, 0x84, 0x54, 0xee, 0x77, 0x59, 0x23, 0x8f, + 0xc8, 0x84, 0xb4, 0xd6, 0xea, 0x4c, 0x44, 0x8a, + 0xc6, 0x9c, 0xf9, 0x9b, 0x27, 0xea, 0x4f, 0x28, + 0x72, 0x33, 0x12, 0x20, 0x7c, 0xd7, 0x3f, 0x56, + 0xa6, 0x76, 0xc7, 0x48, 0xe4, 0x2d, 0x6f, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x30, 0x36, 0xe3, 0xd4, 0xf7, 0xb1, 0x69, + 0x18, 0x8d, 0x09, 0xba, 0x52, 0x1e, 0xd5, 0x7d, + 0x2c, 0x15, 0x3a, 0xd6, 0xe3, 0x99, 0x30, 0x2c, + 0x99, 0x97, 0xbc, 0x19, 0x3c, 0x63, 0xa1, 0x25, + 0x68, 0xbc, 0x8a, 0x16, 0x47, 0xec, 0xae, 0x13, + 0xa4, 0x03, 0x96, 0x29, 0x11, 0x92, 0x90, 0x1a, + 0xc8, 0xa4, 0x17, 0x03, 0x01, 0x00, 0x20, 0xc1, + 0x10, 0x1d, 0xa6, 0xf1, 0xe2, 0x8a, 0xcc, 0x37, + 0x7d, 0x8e, 0x05, 0x00, 0xfb, 0xd1, 0x9f, 0xc7, + 0x11, 0xd2, 0x00, 0xb4, 0x27, 0x0a, 0x25, 0x14, + 0xd9, 0x79, 0x1b, 0xcb, 0x4d, 0x81, 0x61, 0x17, + 0x03, 0x01, 0x00, 0x30, 0x5c, 0x7c, 0x2d, 0xc0, + 0x9e, 0xa6, 0xc4, 0x8e, 0xfd, 0xf4, 0xe2, 0xe5, + 0xe4, 0xe6, 0x56, 0x9f, 0x7d, 0x4c, 0x4c, 0x2d, + 0xb7, 0xa9, 0xac, 0xfa, 0x9f, 0x12, 0x7f, 0x2d, + 0x30, 0x57, 0xe4, 0x8e, 0x30, 0x86, 0x65, 0x59, + 0xcd, 0x24, 0xda, 0xe2, 0x8a, 0x7b, 0x0c, 0x5e, + 0x86, 0x05, 0x06, 0x2a, 0x15, 0x03, 0x01, 0x00, + 0x20, 0xd6, 0xb7, 0x70, 0xf8, 0x47, 0xbc, 0x0f, + 0xf4, 0x66, 0x98, 0x1b, 0x1e, 0x8a, 0x8c, 0x0b, + 0xa1, 0x4a, 0x04, 0x29, 0x60, 0x72, 0x8b, 0xc4, + 0x73, 0xc1, 0xd6, 0x41, 0x72, 0xb7, 0x17, 0x39, + 0xda, + }, +} + +var sslv3ServerScript = [][]byte{ + { + 0x16, 0x03, 0x00, 0x00, 0x54, 0x01, 0x00, 0x00, + 0x50, 0x03, 0x00, 0x50, 0x77, 0x3d, 0x42, 0xae, + 0x84, 0xbd, 0xc5, 0x07, 0xa5, 0xc4, 0xd6, 0x16, + 0x4e, 0xd5, 0xc5, 0xfa, 0x02, 0x7a, 0x0f, 0x1d, + 0xc1, 0xe1, 0xaa, 0xe3, 0x3b, 0x4b, 0x6f, 0x11, + 0xfa, 0x1a, 0xa4, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, + }, + { + 0x16, 0x03, 0x00, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x00, 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, + 0x00, 0x02, 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, + 0x02, 0xb0, 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, + 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x30, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, + 0x30, 0x39, 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, + 0x31, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, + 0x39, 0x33, 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, + 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, + 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, + 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, + 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, + 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, + 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, + 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, + 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, + 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, + 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, + 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, + 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, + 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, + 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, + 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, + 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, + 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, + 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, + 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, + 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, + 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, + 0x30, 0x81, 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, + 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, + 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, + 0x88, 0x39, 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, + 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, + 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, + 0x18, 0x88, 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, + 0x00, 0x85, 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, + 0xca, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, + 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, + 0x81, 0x00, 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, + 0xb1, 0x59, 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, + 0x14, 0xd7, 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, + 0x5a, 0x95, 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, + 0x12, 0x66, 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, + 0x60, 0xd3, 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, + 0x25, 0x13, 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, + 0x1d, 0xba, 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, + 0xd7, 0x31, 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, + 0xea, 0x50, 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, + 0x5a, 0x5f, 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, + 0x90, 0x96, 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, + 0x98, 0x1f, 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, + 0xa3, 0x1b, 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, + 0xe9, 0x70, 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, + 0x26, 0x6e, 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, + 0xbd, 0xd9, 0x16, 0x03, 0x00, 0x00, 0x04, 0x0e, + 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x00, 0x00, 0x84, 0x10, 0x00, 0x00, + 0x80, 0x4a, 0x8d, 0xc4, 0x38, 0x7a, 0x9c, 0xd6, + 0xe8, 0x72, 0x9e, 0xa3, 0xdf, 0x37, 0xb4, 0x6c, + 0x58, 0x33, 0x59, 0xd9, 0xc9, 0x4b, 0x50, 0x33, + 0x6c, 0xed, 0x73, 0x38, 0x2a, 0x46, 0x55, 0x31, + 0xa9, 0x8e, 0x8e, 0xfc, 0x0b, 0x5d, 0x5f, 0x3c, + 0x88, 0x28, 0x3f, 0x60, 0x51, 0x13, 0xf1, 0x59, + 0x0c, 0xa3, 0x5e, 0xe0, 0xa3, 0x35, 0x06, 0xb1, + 0x71, 0x59, 0x24, 0x4e, 0xed, 0x07, 0x15, 0x88, + 0x50, 0xef, 0xc2, 0xb2, 0x2a, 0x52, 0x30, 0x6a, + 0x7c, 0xbe, 0x2f, 0xc6, 0x8f, 0xa8, 0x83, 0xc5, + 0x80, 0x14, 0x62, 0x74, 0x7f, 0x96, 0x9f, 0x41, + 0x32, 0x74, 0xdd, 0x76, 0x2d, 0x7b, 0xeb, 0x7b, + 0xea, 0xd0, 0x4f, 0x0c, 0xcf, 0x9a, 0x9c, 0xc5, + 0x7a, 0xe4, 0xbc, 0xf8, 0xa6, 0xe1, 0x09, 0x8e, + 0x7c, 0x53, 0x3a, 0xe3, 0x30, 0x8f, 0x76, 0xee, + 0x58, 0xbb, 0xfd, 0x0b, 0x06, 0xb8, 0xdf, 0xb7, + 0x31, 0x14, 0x03, 0x00, 0x00, 0x01, 0x01, 0x16, + 0x03, 0x00, 0x00, 0x3c, 0x13, 0x91, 0xc6, 0x4a, + 0x0c, 0x59, 0x25, 0xce, 0x54, 0xc0, 0x1d, 0xb9, + 0x2a, 0xff, 0x4d, 0xca, 0x26, 0x0c, 0x8c, 0x04, + 0x98, 0x7c, 0x7c, 0x38, 0xa3, 0xf5, 0xf9, 0x36, + 0x1c, 0x04, 0x32, 0x47, 0x2d, 0x48, 0x0e, 0x96, + 0xe8, 0x2b, 0x5e, 0x5a, 0xc6, 0x0a, 0x48, 0x41, + 0x34, 0x5e, 0x62, 0xd5, 0x68, 0x4e, 0x44, 0x1d, + 0xb2, 0xa1, 0x11, 0xad, 0x6e, 0x14, 0x85, 0x61, + }, + { + 0x14, 0x03, 0x00, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x00, 0x00, 0x3c, 0x88, 0xae, 0xa9, 0xd4, 0xa8, + 0x10, 0x8d, 0x65, 0xa6, 0x3e, 0x1e, 0xed, 0xd2, + 0xfc, 0xc4, 0x7c, 0xa8, 0x94, 0x4f, 0x11, 0xaf, + 0xa6, 0x87, 0x09, 0x37, 0x54, 0xf7, 0x69, 0xd1, + 0xb5, 0x25, 0x6b, 0xb5, 0xed, 0xcb, 0x25, 0x39, + 0x73, 0xeb, 0x53, 0x6c, 0xc7, 0xb4, 0x29, 0x8f, + 0xd6, 0x49, 0xd1, 0x95, 0x59, 0x80, 0x9a, 0x67, + 0x5c, 0xb2, 0xe0, 0xbd, 0x1e, 0xff, 0xaa, 0x17, + 0x03, 0x00, 0x00, 0x21, 0x65, 0x7b, 0x99, 0x09, + 0x02, 0xc3, 0x9d, 0x54, 0xd6, 0xe7, 0x32, 0x62, + 0xab, 0xc1, 0x09, 0x91, 0x30, 0x0a, 0xc9, 0xfa, + 0x70, 0xec, 0x06, 0x7b, 0xa3, 0xe1, 0x5f, 0xb4, + 0x63, 0xe6, 0x5c, 0xba, 0x1f, 0x15, 0x03, 0x00, + 0x00, 0x16, 0x40, 0x70, 0xbe, 0xe6, 0xa6, 0xee, + 0x8f, 0xd0, 0x87, 0xa0, 0x43, 0xa1, 0x92, 0xd7, + 0xd0, 0x1a, 0x0c, 0x20, 0x7c, 0xbf, 0xa2, 0xb5, + }, +} + +var selectCertificateBySNIScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x6a, 0x01, 0x00, 0x00, + 0x66, 0x03, 0x01, 0x50, 0x77, 0x3d, 0xfe, 0xfb, + 0x8d, 0xc2, 0x68, 0xeb, 0xf9, 0xfa, 0x54, 0x97, + 0x86, 0x45, 0xa2, 0xa3, 0xed, 0xb1, 0x91, 0xb8, + 0x28, 0xc0, 0x47, 0xaf, 0xfb, 0xcd, 0xdc, 0x0e, + 0xb3, 0xea, 0xa5, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, + 0x0e, 0x00, 0x00, 0x0b, 0x73, 0x6e, 0x69, 0x74, + 0x65, 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x02, 0x00, 0x0b, 0x00, 0x01, 0xfc, + 0x00, 0x01, 0xf9, 0x00, 0x01, 0xf6, 0x30, 0x82, + 0x01, 0xf2, 0x30, 0x82, 0x01, 0x5d, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x01, 0x00, 0x30, 0x0b, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x30, 0x28, 0x31, 0x10, 0x30, + 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x07, + 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, 0x31, + 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, + 0x13, 0x0b, 0x73, 0x6e, 0x69, 0x74, 0x65, 0x73, + 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, + 0x0d, 0x31, 0x32, 0x30, 0x34, 0x31, 0x31, 0x31, + 0x37, 0x34, 0x30, 0x33, 0x35, 0x5a, 0x17, 0x0d, + 0x31, 0x33, 0x30, 0x34, 0x31, 0x31, 0x31, 0x37, + 0x34, 0x35, 0x33, 0x35, 0x5a, 0x30, 0x28, 0x31, + 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, + 0x6f, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, + 0x04, 0x03, 0x13, 0x0b, 0x73, 0x6e, 0x69, 0x74, + 0x65, 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x30, + 0x81, 0x9d, 0x30, 0x0b, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x03, + 0x81, 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, + 0x81, 0x00, 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, + 0xe5, 0xbf, 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, + 0xe6, 0x2b, 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, + 0x8a, 0x7a, 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, + 0xa5, 0x65, 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, + 0xb5, 0xb4, 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, + 0x7e, 0x62, 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, + 0x12, 0x5c, 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, + 0xfa, 0x58, 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, + 0xd3, 0xd0, 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, + 0x54, 0x9f, 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, + 0xfe, 0x18, 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, + 0x7d, 0xf1, 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, + 0x51, 0xc9, 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, + 0x66, 0x01, 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, + 0x9a, 0x1d, 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, + 0x2d, 0x79, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, + 0x32, 0x30, 0x30, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, + 0x02, 0x00, 0xa0, 0x30, 0x0d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x06, 0x04, 0x04, 0x01, 0x02, + 0x03, 0x04, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x08, 0x30, 0x06, 0x80, 0x04, 0x01, + 0x02, 0x03, 0x04, 0x30, 0x0b, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x03, 0x81, 0x81, 0x00, 0x89, 0xc6, 0x45, 0x5f, + 0x1c, 0x1f, 0x5e, 0xf8, 0xeb, 0x1a, 0xb1, 0x74, + 0xee, 0x24, 0x39, 0x05, 0x9f, 0x5c, 0x42, 0x59, + 0xbb, 0x1a, 0x8d, 0x86, 0xcd, 0xb1, 0xd0, 0x56, + 0xf5, 0x6a, 0x71, 0x7d, 0xa4, 0x0e, 0x95, 0xab, + 0x90, 0xf5, 0x9e, 0x8d, 0xea, 0xf6, 0x27, 0xc1, + 0x57, 0x99, 0x50, 0x94, 0xdb, 0x08, 0x02, 0x26, + 0x6e, 0xb3, 0x4f, 0xc6, 0x84, 0x2d, 0xea, 0x8a, + 0x4b, 0x68, 0xd9, 0xc1, 0x38, 0x91, 0x03, 0xab, + 0x84, 0xfb, 0x9e, 0x1f, 0x85, 0xd9, 0xb5, 0xd2, + 0x3f, 0xf2, 0x31, 0x2c, 0x86, 0x70, 0xfb, 0xb5, + 0x40, 0x14, 0x82, 0x45, 0xa4, 0xeb, 0xaf, 0xe2, + 0x64, 0xd9, 0x0c, 0x8a, 0x4c, 0xf4, 0xf8, 0x5b, + 0x0f, 0xac, 0x12, 0xac, 0x2f, 0xc4, 0xa3, 0x15, + 0x4b, 0xad, 0x52, 0x46, 0x28, 0x68, 0xaf, 0x96, + 0xc6, 0x2c, 0x65, 0x25, 0xd6, 0x52, 0xb6, 0xe3, + 0x18, 0x45, 0xbd, 0xcc, 0x16, 0x03, 0x01, 0x00, + 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x69, 0xc3, 0xd4, 0x0e, 0xcc, + 0xdc, 0xbc, 0x5e, 0xc2, 0x64, 0xa6, 0xde, 0x3c, + 0x0c, 0x7e, 0x0c, 0x6b, 0x80, 0x0f, 0xd4, 0x8f, + 0x02, 0x4b, 0xb2, 0xba, 0x8d, 0x01, 0xeb, 0x6b, + 0xa1, 0x2e, 0x79, 0x37, 0xba, 0xae, 0x24, 0xc2, + 0x26, 0x72, 0x51, 0xe1, 0x82, 0x8e, 0x51, 0x41, + 0x1c, 0x54, 0xa4, 0x26, 0xbe, 0x13, 0xcd, 0x1b, + 0xc6, 0xed, 0x3d, 0x1f, 0xfd, 0x72, 0x80, 0x90, + 0xdb, 0xbf, 0xd6, 0x39, 0x94, 0x5f, 0x48, 0xfb, + 0x25, 0x5a, 0xc9, 0x60, 0x9b, 0xd7, 0xc6, 0x20, + 0xa8, 0x66, 0x64, 0x13, 0xf3, 0x65, 0xc8, 0xb1, + 0xd5, 0x33, 0x21, 0x0e, 0x73, 0x41, 0xc0, 0x18, + 0x1a, 0x37, 0xfe, 0xcf, 0x28, 0x2a, 0xcd, 0xe4, + 0x0b, 0xac, 0xdd, 0x25, 0x5e, 0xcb, 0x17, 0x51, + 0x69, 0xd5, 0x8c, 0xf4, 0xb6, 0x21, 0x98, 0xef, + 0x20, 0xdb, 0x14, 0x67, 0xf3, 0x7c, 0x95, 0x6a, + 0x48, 0x2a, 0x6a, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0x36, 0x1b, + 0x09, 0xe5, 0xb9, 0xb9, 0x4d, 0x7d, 0xae, 0x87, + 0xb6, 0x0f, 0xaf, 0xec, 0x22, 0xba, 0x0d, 0xa5, + 0x96, 0x5e, 0x64, 0x65, 0xe7, 0xfb, 0xe3, 0xf3, + 0x6b, 0x72, 0xa8, 0xdb, 0xed, 0xd8, 0x69, 0x9c, + 0x08, 0xd8, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0x60, 0xf7, 0x09, 0x5f, 0xd1, + 0xcb, 0xc9, 0xe1, 0x22, 0xb5, 0x2a, 0xcc, 0xde, + 0x7c, 0xa7, 0xb8, 0x85, 0x00, 0xbc, 0xfd, 0x85, + 0xe1, 0x91, 0x36, 0xbb, 0x07, 0x42, 0xad, 0x3d, + 0x29, 0x62, 0x69, 0xc1, 0x45, 0x92, 0x6f, 0x17, + 0x03, 0x01, 0x00, 0x21, 0x0d, 0xf9, 0xd5, 0x87, + 0xb9, 0x57, 0x3c, 0x50, 0x19, 0xe4, 0x3a, 0x50, + 0x45, 0xcc, 0x86, 0x89, 0xd4, 0x32, 0x79, 0x45, + 0x7c, 0x9f, 0x96, 0xd4, 0x54, 0x56, 0x0c, 0x63, + 0x72, 0x81, 0xc3, 0xd3, 0xe3, 0x15, 0x03, 0x01, + 0x00, 0x16, 0x84, 0xec, 0x2e, 0xf6, 0xaf, 0x4f, + 0xee, 0x48, 0x0f, 0xbe, 0xcd, 0x82, 0x5c, 0x56, + 0x16, 0xe4, 0xfb, 0x89, 0xc5, 0x57, 0x3e, 0x91, + }, +} + +var issueSessionTicketTest = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x5a, 0x01, 0x00, 0x00, + 0x56, 0x03, 0x01, 0x50, 0x77, 0x3e, 0x49, 0x7a, + 0xb7, 0x86, 0x5c, 0x27, 0xd2, 0x97, 0x61, 0xe3, + 0x49, 0x41, 0x48, 0xe7, 0x0e, 0xaa, 0x7e, 0x4d, + 0xb8, 0xdc, 0x01, 0x97, 0xfb, 0xab, 0x53, 0xb2, + 0x5e, 0x36, 0xf6, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, 0x00, 0x04, 0x00, 0x23, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x68, 0x10, 0xdc, 0x80, 0xbc, + 0xb3, 0x5a, 0x10, 0x75, 0x89, 0xcc, 0xe5, 0x9f, + 0xbf, 0xe2, 0xce, 0xa4, 0x9f, 0x7f, 0x60, 0xc4, + 0xfe, 0x5c, 0xb5, 0x02, 0x2d, 0xa5, 0xa9, 0x1e, + 0x2c, 0x10, 0x79, 0x15, 0x0f, 0xed, 0x96, 0xb3, + 0xa8, 0x5e, 0x21, 0xbc, 0x5b, 0xdc, 0x58, 0x04, + 0x7d, 0x37, 0xdb, 0xa0, 0x31, 0xe8, 0x4f, 0x04, + 0xbc, 0x46, 0x7c, 0xdb, 0x2e, 0x93, 0x07, 0xaf, + 0xa6, 0x36, 0xd3, 0x39, 0x8d, 0x1d, 0x95, 0xa8, + 0x50, 0x4b, 0xc4, 0x2b, 0xde, 0xd7, 0x04, 0x6d, + 0x77, 0x6c, 0x4d, 0x70, 0x51, 0x88, 0x16, 0x31, + 0x40, 0xb5, 0xba, 0x90, 0x47, 0x64, 0x0c, 0x87, + 0xa5, 0x19, 0xf9, 0x89, 0x24, 0x3c, 0x5e, 0x4b, + 0xaa, 0xe0, 0x60, 0x47, 0x0f, 0x2e, 0xcc, 0xc2, + 0xd5, 0x21, 0xed, 0x72, 0xd0, 0xa9, 0xdd, 0x2a, + 0x2b, 0xef, 0x08, 0x3a, 0x65, 0xea, 0x8b, 0x52, + 0x77, 0x2d, 0xcc, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0xe2, 0x95, + 0x62, 0x3c, 0x18, 0xe5, 0xc7, 0x2c, 0xda, 0x16, + 0x9b, 0x28, 0x0d, 0xf7, 0x88, 0x7b, 0x5d, 0x33, + 0x55, 0x3b, 0x01, 0x73, 0xf2, 0xc6, 0x4e, 0x96, + 0x01, 0x01, 0x83, 0x65, 0xd4, 0xef, 0x12, 0x13, + 0x1d, 0x42, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0xfb, 0x41, 0x92, + 0x6d, 0x37, 0x5f, 0xf8, 0x7d, 0x90, 0x0f, 0x01, + 0xf8, 0x8c, 0xee, 0xbc, 0xd9, 0x0c, 0x97, 0x7e, + 0x23, 0x46, 0xe2, 0x6b, 0x52, 0xc6, 0xc6, 0x97, + 0x1d, 0xab, 0xde, 0xa0, 0x86, 0x94, 0xc8, 0x2e, + 0x8b, 0x2e, 0x42, 0x5f, 0xc2, 0x70, 0x35, 0xc9, + 0xee, 0x37, 0xeb, 0x70, 0xaa, 0x59, 0x23, 0x6c, + 0xc8, 0xc1, 0x84, 0x89, 0x39, 0x87, 0x73, 0x0a, + 0x7e, 0xba, 0xca, 0xed, 0x63, 0xba, 0x4e, 0x4f, + 0xf3, 0x31, 0x4b, 0xf0, 0xee, 0x91, 0xa5, 0xb4, + 0x62, 0x01, 0x9e, 0xbd, 0xbc, 0xb3, 0x35, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0x3f, 0x66, 0xe4, 0x98, 0xc1, 0x3f, + 0xc6, 0x2c, 0x81, 0xfb, 0xa9, 0x9f, 0x27, 0xe9, + 0x63, 0x20, 0x1e, 0x0e, 0x4f, 0xfc, 0x5d, 0x12, + 0xee, 0x77, 0x73, 0xc6, 0x96, 0x51, 0xf2, 0x26, + 0x35, 0x3f, 0xce, 0x6a, 0xa9, 0xfd, 0x17, 0x03, + 0x01, 0x00, 0x21, 0x8d, 0xd5, 0x67, 0x60, 0x5d, + 0xa7, 0x93, 0xcc, 0x39, 0x78, 0x59, 0xab, 0xdb, + 0x10, 0x96, 0xf2, 0xad, 0xa2, 0x85, 0xe2, 0x93, + 0x43, 0x43, 0xcf, 0x82, 0xbd, 0x1f, 0xdc, 0x7a, + 0x72, 0xd6, 0x83, 0x3b, 0x15, 0x03, 0x01, 0x00, + 0x16, 0x89, 0x55, 0xf6, 0x42, 0x71, 0xa9, 0xe9, + 0x05, 0x68, 0xe8, 0xce, 0x0d, 0x21, 0xe9, 0xec, + 0xf2, 0x27, 0x67, 0xa7, 0x94, 0xf8, 0x34, + }, +} +var serverResumeTest = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0xc2, 0x01, 0x00, 0x00, + 0xbe, 0x03, 0x01, 0x50, 0x77, 0x3e, 0x4f, 0x1f, + 0x6f, 0xa5, 0x81, 0xeb, 0xb8, 0x80, 0x55, 0xa4, + 0x76, 0xc2, 0x7f, 0x27, 0xf2, 0xe7, 0xc9, 0x7a, + 0x01, 0x3c, 0xd8, 0xc1, 0xde, 0x99, 0x1f, 0x7c, + 0xab, 0x35, 0x98, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, 0x00, 0x6c, 0x00, 0x23, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0xfb, 0x41, 0x92, + 0x6d, 0x37, 0x5f, 0xf8, 0x7d, 0x90, 0x0f, 0x01, + 0xf8, 0x8c, 0xee, 0xbc, 0xd9, 0x0c, 0x97, 0x7e, + 0x23, 0x46, 0xe2, 0x6b, 0x52, 0xc6, 0xc6, 0x97, + 0x1d, 0xab, 0xde, 0xa0, 0x86, 0x94, 0xc8, 0x2e, + 0x8b, 0x2e, 0x42, 0x5f, 0xc2, 0x70, 0x35, 0xc9, + 0xee, 0x37, 0xeb, 0x70, 0xaa, 0x59, 0x23, 0x6c, + 0xc8, 0xc1, 0x84, 0x89, 0x39, 0x87, 0x73, 0x0a, + 0x7e, 0xba, 0xca, 0xed, 0x63, 0xba, 0x4e, 0x4f, + 0xf3, 0x31, 0x4b, 0xf0, 0xee, 0x91, 0xa5, 0xb4, + 0x62, 0x01, 0x9e, 0xbd, 0xbc, 0xb3, 0x35, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0xc5, 0x35, 0x74, 0x19, 0x05, 0xc5, + 0x85, 0x68, 0x48, 0xe8, 0xb5, 0xe9, 0xaf, 0x78, + 0xbd, 0x35, 0x6f, 0xe9, 0x79, 0x34, 0x1b, 0xf0, + 0x35, 0xd4, 0x4e, 0x55, 0x2e, 0x3c, 0xd5, 0xaf, + 0xfc, 0xba, 0xf5, 0x1e, 0x83, 0x32, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0x27, 0x28, 0x88, 0xe1, 0x7e, + 0x0d, 0x9c, 0x12, 0x50, 0xf6, 0x7a, 0xa7, 0x32, + 0x21, 0x68, 0xba, 0xd8, 0x0a, 0xdc, 0x39, 0xef, + 0x68, 0x95, 0x82, 0xae, 0xbd, 0x12, 0x79, 0xa1, + 0x99, 0xfd, 0xd0, 0x10, 0x8e, 0x4b, 0xd8, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x21, 0xc5, 0x7e, 0x0a, + 0x52, 0x6a, 0xb9, 0xaa, 0x1d, 0xae, 0x9e, 0x24, + 0x9c, 0x34, 0x1e, 0xdb, 0x50, 0x95, 0xee, 0x76, + 0xd7, 0x28, 0x88, 0x08, 0xe3, 0x2e, 0x58, 0xf7, + 0xdb, 0x34, 0x75, 0xa5, 0x7f, 0x9d, 0x15, 0x03, + 0x01, 0x00, 0x16, 0x2c, 0xc1, 0x29, 0x5f, 0x12, + 0x1d, 0x19, 0xab, 0xb3, 0xf4, 0x35, 0x1c, 0x62, + 0x6a, 0x80, 0x29, 0x0d, 0x0e, 0xef, 0x7d, 0x6e, + 0x50, + }, +} + +var clientauthRSATests = []clientauthTest{ + // Server asks for cert with empty CA list, client doesn't give it. + // go test -run "TestRunServer" -serve -clientauth 1 + {"RequestClientCert, none given", RequestClientCert, nil, [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1e, 0x01, 0x00, 0x01, + 0x1a, 0x03, 0x03, 0x51, 0xe5, 0x6c, 0xb5, 0x5a, + 0xc2, 0xf5, 0xf0, 0x92, 0x94, 0x8a, 0x64, 0x18, + 0xa4, 0x2b, 0x82, 0x07, 0xbc, 0xd9, 0xd9, 0xf9, + 0x7b, 0xd2, 0xd0, 0xee, 0xa2, 0x70, 0x4e, 0x23, + 0x88, 0x7c, 0x95, 0x00, 0x00, 0x82, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0x00, 0x07, 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, + 0xc0, 0x02, 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, + 0x00, 0x12, 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, + 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, + 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, + 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, + 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, + 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, + 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, + 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, + 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, + 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, + 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, + 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x09, 0x0d, 0x00, 0x00, + 0x05, 0x02, 0x01, 0x40, 0x00, 0x00, 0x16, 0x03, + 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x07, 0x0b, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x16, 0x03, 0x01, 0x00, + 0x86, 0x10, 0x00, 0x00, 0x82, 0x00, 0x80, 0x36, + 0xfc, 0xd8, 0xc8, 0xa2, 0x67, 0xc8, 0xc6, 0xf4, + 0x28, 0x70, 0xe1, 0x5a, 0x02, 0x8f, 0xef, 0x42, + 0xe0, 0xd3, 0xb8, 0xd6, 0x6b, 0xe4, 0xee, 0x5c, + 0xcf, 0x42, 0xc4, 0xfa, 0xcd, 0x0f, 0xfe, 0xf4, + 0x76, 0x76, 0x47, 0x73, 0xa8, 0x72, 0x8f, 0xa2, + 0x56, 0x81, 0x83, 0xb8, 0x84, 0x72, 0x67, 0xdd, + 0xbe, 0x05, 0x4b, 0x84, 0xd9, 0xd2, 0xb6, 0xc2, + 0xe7, 0x20, 0xac, 0x1f, 0x46, 0x9d, 0x05, 0x47, + 0x8e, 0x89, 0xc0, 0x42, 0x57, 0x4a, 0xa2, 0x98, + 0xe5, 0x39, 0x4f, 0xc4, 0x27, 0x6d, 0x43, 0xa8, + 0x83, 0x76, 0xe6, 0xad, 0xe3, 0x17, 0x68, 0x31, + 0xcb, 0x7e, 0xfc, 0xe7, 0x4b, 0x76, 0x3d, 0x3c, + 0xfa, 0x77, 0x65, 0xc9, 0x4c, 0x5b, 0xce, 0x5e, + 0xf7, 0x8b, 0xa8, 0xa6, 0xdd, 0xb2, 0xef, 0x0b, + 0x46, 0x83, 0xdf, 0x0a, 0x8c, 0x22, 0x12, 0x6e, + 0xe1, 0x45, 0x54, 0x88, 0xd1, 0xe8, 0xd2, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0x30, 0x8c, 0x7d, 0x40, 0xfc, 0x5e, + 0x80, 0x9c, 0xc4, 0x7c, 0x62, 0x01, 0xa1, 0x37, + 0xcf, 0x1a, 0x75, 0x28, 0x8d, 0xeb, 0x63, 0xcc, + 0x02, 0xa6, 0x66, 0xdf, 0x36, 0x01, 0xb3, 0x9d, + 0x38, 0x42, 0x16, 0x91, 0xf0, 0x02, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0x96, 0x9a, 0x2a, + 0x6c, 0x8c, 0x7e, 0x38, 0x10, 0x46, 0x86, 0x1d, + 0x19, 0x1d, 0x62, 0x29, 0x3f, 0x58, 0xfb, 0x6d, + 0x89, 0xd2, 0x81, 0x9a, 0x1c, 0xb3, 0x58, 0xb3, + 0x19, 0x39, 0x17, 0x47, 0x49, 0xc9, 0xfe, 0x4a, + 0x7a, 0x32, 0xac, 0x2c, 0x43, 0xf9, 0xa9, 0xea, + 0xec, 0x51, 0x46, 0xf1, 0xb8, 0x59, 0x23, 0x70, + 0xce, 0x7c, 0xb9, 0x47, 0x70, 0xa3, 0xc9, 0xae, + 0x47, 0x7b, 0x7e, 0xc7, 0xcf, 0x76, 0x12, 0x76, + 0x18, 0x90, 0x12, 0xcd, 0xf3, 0xd4, 0x27, 0x81, + 0xfc, 0x46, 0x03, 0x3e, 0x05, 0x87, 0x6f, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0xc3, 0xa0, 0x29, 0xb1, 0x52, 0x82, + 0xef, 0x85, 0xa1, 0x64, 0x0f, 0xe4, 0xa3, 0xfb, + 0xa7, 0x1d, 0x22, 0x4c, 0xcb, 0xd6, 0x5b, 0x18, + 0x61, 0xc7, 0x7c, 0xf2, 0x67, 0x4a, 0xc7, 0x11, + 0x9d, 0x8e, 0x0e, 0x15, 0x22, 0xcf, 0x17, 0x03, + 0x01, 0x00, 0x21, 0xfd, 0xbb, 0xf1, 0xa9, 0x7c, + 0xbf, 0x92, 0xb3, 0xfa, 0x2c, 0x08, 0x6f, 0x22, + 0x78, 0x80, 0xf2, 0x2e, 0x86, 0x26, 0x21, 0x36, + 0x3f, 0x32, 0xdf, 0xb6, 0x47, 0xa5, 0xf8, 0x27, + 0xc1, 0xe9, 0x53, 0x90, 0x15, 0x03, 0x01, 0x00, + 0x16, 0xfe, 0xef, 0x2e, 0xa0, 0x5d, 0xe0, 0xce, + 0x94, 0x20, 0x56, 0x61, 0x6e, 0xe5, 0x62, 0xce, + 0x27, 0x57, 0x3e, 0x30, 0x32, 0x77, 0x53, + }, + }}, + + // Server asks for cert with empty CA list, client gives one + // go test -run "TestRunServer" -serve -clientauth 1 + {"RequestClientCert, client gives it", RequestClientCert, []*x509.Certificate{clientCertificate}, [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1e, 0x01, 0x00, 0x01, + 0x1a, 0x03, 0x03, 0x51, 0xe5, 0x74, 0x0e, 0x95, + 0x6f, 0x4f, 0x4a, 0xbf, 0xb7, 0xc0, 0x6c, 0xac, + 0xd9, 0xfe, 0x7d, 0xd0, 0x51, 0x19, 0x62, 0x62, + 0x1c, 0x6e, 0x57, 0x77, 0xd2, 0x31, 0xaf, 0x88, + 0xb9, 0xc0, 0x1d, 0x00, 0x00, 0x82, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0x00, 0x07, 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, + 0xc0, 0x02, 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, + 0x00, 0x12, 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, + 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, + 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, + 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, + 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, + 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, + 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, + 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, + 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, + 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, + 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, + 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x09, 0x0d, 0x00, 0x00, + 0x05, 0x02, 0x01, 0x40, 0x00, 0x00, 0x16, 0x03, + 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x01, 0xfb, 0x0b, 0x00, 0x01, + 0xf7, 0x00, 0x01, 0xf4, 0x00, 0x01, 0xf1, 0x30, + 0x82, 0x01, 0xed, 0x30, 0x82, 0x01, 0x58, 0xa0, + 0x03, 0x02, 0x01, 0x02, 0x02, 0x01, 0x00, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x31, 0x31, 0x32, 0x30, 0x38, 0x30, 0x37, + 0x35, 0x35, 0x31, 0x32, 0x5a, 0x17, 0x0d, 0x31, + 0x32, 0x31, 0x32, 0x30, 0x37, 0x30, 0x38, 0x30, + 0x30, 0x31, 0x32, 0x5a, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x81, 0x9c, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x03, 0x81, 0x8c, 0x00, + 0x30, 0x81, 0x88, 0x02, 0x81, 0x80, 0x4e, 0xd0, + 0x7b, 0x31, 0xe3, 0x82, 0x64, 0xd9, 0x59, 0xc0, + 0xc2, 0x87, 0xa4, 0x5e, 0x1e, 0x8b, 0x73, 0x33, + 0xc7, 0x63, 0x53, 0xdf, 0x66, 0x92, 0x06, 0x84, + 0xf6, 0x64, 0xd5, 0x8f, 0xe4, 0x36, 0xa7, 0x1d, + 0x2b, 0xe8, 0xb3, 0x20, 0x36, 0x45, 0x23, 0xb5, + 0xe3, 0x95, 0xae, 0xed, 0xe0, 0xf5, 0x20, 0x9c, + 0x8d, 0x95, 0xdf, 0x7f, 0x5a, 0x12, 0xef, 0x87, + 0xe4, 0x5b, 0x68, 0xe4, 0xe9, 0x0e, 0x74, 0xec, + 0x04, 0x8a, 0x7f, 0xde, 0x93, 0x27, 0xc4, 0x01, + 0x19, 0x7a, 0xbd, 0xf2, 0xdc, 0x3d, 0x14, 0xab, + 0xd0, 0x54, 0xca, 0x21, 0x0c, 0xd0, 0x4d, 0x6e, + 0x87, 0x2e, 0x5c, 0xc5, 0xd2, 0xbb, 0x4d, 0x4b, + 0x4f, 0xce, 0xb6, 0x2c, 0xf7, 0x7e, 0x88, 0xec, + 0x7c, 0xd7, 0x02, 0x91, 0x74, 0xa6, 0x1e, 0x0c, + 0x1a, 0xda, 0xe3, 0x4a, 0x5a, 0x2e, 0xde, 0x13, + 0x9c, 0x4c, 0x40, 0x88, 0x59, 0x93, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x32, 0x30, 0x30, 0x30, + 0x0e, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x01, 0x01, + 0xff, 0x04, 0x04, 0x03, 0x02, 0x00, 0xa0, 0x30, + 0x0d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x06, + 0x04, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, 0x0f, + 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x08, 0x30, + 0x06, 0x80, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x03, 0x81, 0x81, 0x00, + 0x36, 0x1f, 0xb3, 0x7a, 0x0c, 0x75, 0xc9, 0x6e, + 0x37, 0x46, 0x61, 0x2b, 0xd5, 0xbd, 0xc0, 0xa7, + 0x4b, 0xcc, 0x46, 0x9a, 0x81, 0x58, 0x7c, 0x85, + 0x79, 0x29, 0xc8, 0xc8, 0xc6, 0x67, 0xdd, 0x32, + 0x56, 0x45, 0x2b, 0x75, 0xb6, 0xe9, 0x24, 0xa9, + 0x50, 0x9a, 0xbe, 0x1f, 0x5a, 0xfa, 0x1a, 0x15, + 0xd9, 0xcc, 0x55, 0x95, 0x72, 0x16, 0x83, 0xb9, + 0xc2, 0xb6, 0x8f, 0xfd, 0x88, 0x8c, 0x38, 0x84, + 0x1d, 0xab, 0x5d, 0x92, 0x31, 0x13, 0x4f, 0xfd, + 0x83, 0x3b, 0xc6, 0x9d, 0xf1, 0x11, 0x62, 0xb6, + 0x8b, 0xec, 0xab, 0x67, 0xbe, 0xc8, 0x64, 0xb0, + 0x11, 0x50, 0x46, 0x58, 0x17, 0x6b, 0x99, 0x1c, + 0xd3, 0x1d, 0xfc, 0x06, 0xf1, 0x0e, 0xe5, 0x96, + 0xa8, 0x0c, 0xf9, 0x78, 0x20, 0xb7, 0x44, 0x18, + 0x51, 0x8d, 0x10, 0x7e, 0x4f, 0x94, 0x67, 0xdf, + 0xa3, 0x4e, 0x70, 0x73, 0x8e, 0x90, 0x91, 0x85, + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x0a, 0x4e, 0x89, 0xdf, 0x3a, + 0x3f, 0xf0, 0x4f, 0xef, 0x1a, 0x90, 0xd4, 0x3c, + 0xaf, 0x10, 0x57, 0xb0, 0xa1, 0x5f, 0xcd, 0x62, + 0x01, 0xe9, 0x0c, 0x36, 0x42, 0xfd, 0xaf, 0x23, + 0xf9, 0x14, 0xa6, 0x72, 0x26, 0x4e, 0x01, 0xdb, + 0xac, 0xb7, 0x4c, 0xe6, 0xa9, 0x52, 0xe2, 0xec, + 0x26, 0x8c, 0x7a, 0x64, 0xf8, 0x0b, 0x4c, 0x2f, + 0xa9, 0xcb, 0x75, 0xaf, 0x60, 0xd4, 0xb4, 0xe6, + 0xe8, 0xdb, 0x78, 0x78, 0x85, 0xf6, 0x0c, 0x95, + 0xcc, 0xb6, 0x55, 0xb9, 0xba, 0x9e, 0x91, 0xbc, + 0x66, 0xdb, 0x1e, 0x28, 0xab, 0x73, 0xce, 0x8b, + 0xd0, 0xd3, 0xe8, 0xbc, 0xd0, 0x21, 0x28, 0xbd, + 0xfb, 0x74, 0x64, 0xde, 0x3b, 0x3b, 0xd3, 0x4c, + 0x32, 0x40, 0x82, 0xba, 0x91, 0x1e, 0xe8, 0x47, + 0xc2, 0x09, 0xb7, 0x16, 0xaa, 0x25, 0xa9, 0x3c, + 0x6c, 0xa7, 0xf8, 0xc9, 0x54, 0x84, 0xc6, 0xf7, + 0x56, 0x05, 0xa4, 0x16, 0x03, 0x01, 0x00, 0x86, + 0x0f, 0x00, 0x00, 0x82, 0x00, 0x80, 0x4b, 0xab, + 0xda, 0xac, 0x2a, 0xb3, 0xe6, 0x34, 0x55, 0xcd, + 0xf2, 0x4b, 0x67, 0xe3, 0xd3, 0xff, 0xa3, 0xf4, + 0x79, 0x82, 0x01, 0x47, 0x8a, 0xe3, 0x9f, 0x89, + 0x70, 0xbe, 0x24, 0x24, 0xb7, 0x69, 0x60, 0xed, + 0x55, 0xa0, 0xca, 0x72, 0xb6, 0x4a, 0xbc, 0x1d, + 0xe2, 0x3f, 0xb5, 0x31, 0xda, 0x02, 0xf6, 0x37, + 0x51, 0xf8, 0x4c, 0x88, 0x2e, 0xb3, 0x8a, 0xe8, + 0x7b, 0x4a, 0x90, 0x36, 0xe4, 0xa6, 0x31, 0x95, + 0x8b, 0xa0, 0xc6, 0x91, 0x12, 0xb9, 0x35, 0x4e, + 0x72, 0xeb, 0x5c, 0xa2, 0xe8, 0x4c, 0x68, 0xf9, + 0x69, 0xfa, 0x70, 0x60, 0x6c, 0x7f, 0x32, 0x99, + 0xf1, 0xc3, 0x2d, 0xb4, 0x59, 0x58, 0x87, 0xaf, + 0x67, 0x62, 0x90, 0xe7, 0x8d, 0xd0, 0xa3, 0x77, + 0x33, 0xc2, 0x9b, 0xd5, 0x9c, 0xc7, 0xea, 0x25, + 0x98, 0x76, 0x9c, 0xe0, 0x6a, 0x03, 0x3a, 0x10, + 0xfd, 0x10, 0x3d, 0x55, 0x53, 0xa0, 0x14, 0x03, + 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, 0x00, + 0x24, 0xd5, 0x12, 0xfc, 0xb9, 0x5a, 0xe3, 0x27, + 0x01, 0xbe, 0xc3, 0x77, 0x17, 0x1a, 0xbb, 0x4f, + 0xae, 0xd5, 0xa7, 0xee, 0x56, 0x61, 0x0d, 0x40, + 0xf4, 0xa4, 0xb5, 0xcc, 0x76, 0xfd, 0xbd, 0x13, + 0x04, 0xe1, 0xb8, 0xc7, 0x36, + }, + { + 0x16, 0x03, 0x01, 0x02, 0x67, 0x04, 0x00, 0x02, + 0x63, 0x00, 0x00, 0x00, 0x00, 0x02, 0x5d, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0x1f, 0xe2, 0x69, + 0x07, 0x7f, 0x85, 0x2d, 0x4e, 0x2a, 0x2e, 0xbd, + 0x05, 0xe9, 0xc1, 0x6c, 0x9e, 0xbf, 0x47, 0x18, + 0x91, 0x77, 0xf7, 0xe8, 0xb6, 0x27, 0x37, 0xa6, + 0x6b, 0x87, 0x29, 0xbb, 0x3b, 0xe5, 0x68, 0x62, + 0x04, 0x3e, 0xad, 0x4d, 0xff, 0xad, 0xf1, 0x22, + 0x87, 0x8d, 0xf6, 0x04, 0x3b, 0x59, 0x22, 0xf7, + 0xfd, 0x88, 0x0e, 0xa4, 0x09, 0xc0, 0x0d, 0x10, + 0x80, 0x10, 0x79, 0xee, 0x70, 0x96, 0xdb, 0x22, + 0x8b, 0xb7, 0xac, 0xe0, 0x98, 0xad, 0xe9, 0xe3, + 0xcb, 0xea, 0x9f, 0xe6, 0x83, 0x28, 0x7c, 0x7e, + 0x4e, 0x9a, 0x8d, 0xd9, 0xf3, 0x86, 0xf4, 0x89, + 0x8b, 0x79, 0x8f, 0xbb, 0xe9, 0x74, 0x02, 0x02, + 0x14, 0x04, 0xea, 0xba, 0x16, 0x10, 0xa1, 0x85, + 0xbe, 0x4e, 0x4e, 0x92, 0xc5, 0x83, 0xf6, 0x1e, + 0x1f, 0xd4, 0x25, 0xc2, 0xc2, 0xb9, 0xce, 0x33, + 0x63, 0x66, 0x79, 0x1f, 0x54, 0x35, 0xc1, 0xe8, + 0x89, 0x34, 0x78, 0x94, 0x36, 0x14, 0xef, 0x01, + 0x1f, 0xf1, 0xbd, 0x77, 0x2c, 0x4d, 0xac, 0x5c, + 0x5c, 0x4a, 0xc6, 0xed, 0xd8, 0x0e, 0x72, 0x84, + 0x83, 0xdc, 0x56, 0x84, 0xc8, 0xf3, 0x89, 0x56, + 0xfd, 0x89, 0xc1, 0xc9, 0x9a, 0x29, 0x91, 0x7e, + 0x19, 0xe9, 0x8b, 0x5b, 0x11, 0x15, 0x4e, 0x6c, + 0xf4, 0x89, 0xe7, 0x6d, 0x68, 0x1e, 0xf9, 0x6c, + 0x23, 0x72, 0x05, 0x68, 0x82, 0x60, 0x84, 0x1f, + 0x83, 0x20, 0x09, 0x86, 0x10, 0x81, 0xec, 0xec, + 0xdc, 0x25, 0x53, 0x20, 0xfa, 0xa9, 0x41, 0x64, + 0xd6, 0x20, 0xf3, 0xf4, 0x52, 0xf2, 0x80, 0x62, + 0x83, 0xc9, 0x23, 0x66, 0x44, 0x95, 0x5a, 0x99, + 0x8a, 0xe1, 0x26, 0x63, 0xc1, 0x8b, 0x31, 0xf9, + 0x21, 0x06, 0x77, 0x04, 0x27, 0xf2, 0x0c, 0x63, + 0x83, 0x45, 0xa0, 0xa9, 0x7b, 0xcf, 0xdf, 0xd7, + 0x56, 0x75, 0xbc, 0xdd, 0x95, 0x36, 0xb1, 0x75, + 0x39, 0x05, 0x00, 0x3c, 0x8a, 0x79, 0xd6, 0xe9, + 0xf0, 0x4b, 0xdc, 0x51, 0x6b, 0x01, 0x94, 0x16, + 0x87, 0x12, 0x92, 0x6c, 0x07, 0xc1, 0xf5, 0x58, + 0xb7, 0x2a, 0x81, 0xf5, 0xa0, 0x37, 0x8b, 0xa6, + 0x22, 0xfe, 0x28, 0x0a, 0x7e, 0x68, 0xe2, 0xda, + 0x6c, 0x53, 0xee, 0x0e, 0x8d, 0x2d, 0x8b, 0x0b, + 0xda, 0xf8, 0x99, 0x3e, 0x0e, 0xed, 0x9f, 0xc1, + 0x2b, 0xf6, 0xfe, 0xe9, 0x52, 0x38, 0x7b, 0x83, + 0x9a, 0x50, 0xa6, 0xd7, 0x49, 0x83, 0x43, 0x7e, + 0x82, 0xec, 0xc7, 0x09, 0x3d, 0x3d, 0xb1, 0xee, + 0xe8, 0xc5, 0x6a, 0xc3, 0x3d, 0x4b, 0x4c, 0x6a, + 0xbb, 0x0b, 0x2c, 0x24, 0x2e, 0xdb, 0x7d, 0x57, + 0x87, 0xb4, 0x80, 0xa5, 0xae, 0xff, 0x54, 0xa8, + 0xa5, 0x27, 0x69, 0x95, 0xc8, 0xe7, 0x79, 0xc7, + 0x89, 0x2a, 0x73, 0x49, 0xcb, 0xf5, 0xc5, 0xbc, + 0x4a, 0xe0, 0x73, 0xa9, 0xbc, 0x88, 0x64, 0x96, + 0x98, 0xa5, 0x1e, 0xe3, 0x43, 0xc1, 0x7d, 0x78, + 0xc7, 0x94, 0x72, 0xd4, 0x2c, 0x6e, 0x85, 0x39, + 0x9a, 0xaf, 0xdb, 0xa1, 0xe9, 0xe2, 0xcb, 0x37, + 0x04, 0xc6, 0x8c, 0x81, 0xd3, 0x2a, 0xb7, 0xbe, + 0x6c, 0x07, 0x1f, 0x5e, 0xd9, 0x00, 0xd2, 0xf7, + 0xe1, 0xa7, 0xbc, 0x0c, 0xb6, 0x6d, 0xfb, 0x3f, + 0x3d, 0x24, 0xaa, 0xfb, 0x7e, 0xe1, 0xb5, 0x1b, + 0xff, 0x38, 0xaa, 0x69, 0x59, 0x38, 0x52, 0x9a, + 0x0e, 0x6d, 0xbc, 0xde, 0x4f, 0x13, 0x09, 0x17, + 0xc4, 0xa9, 0x05, 0x84, 0xbc, 0x50, 0xef, 0x40, + 0xb0, 0x4c, 0x24, 0x32, 0xed, 0x94, 0x2c, 0xdd, + 0xda, 0x20, 0x24, 0x67, 0xe2, 0xea, 0x71, 0x3d, + 0x4a, 0x04, 0x0d, 0x98, 0x29, 0x20, 0x4c, 0xeb, + 0x70, 0xce, 0x45, 0x9e, 0x5a, 0xaf, 0xb6, 0xa3, + 0x92, 0xc8, 0x28, 0xf2, 0xe3, 0xe8, 0x8a, 0x5d, + 0x0a, 0x33, 0x79, 0x9b, 0x6a, 0xf3, 0x30, 0x01, + 0x1d, 0x47, 0xbd, 0x01, 0xcc, 0x4d, 0x71, 0xc0, + 0x56, 0xfa, 0xfd, 0x37, 0xed, 0x0f, 0x27, 0xc0, + 0xbb, 0xa0, 0xee, 0xc3, 0x79, 0x8b, 0xe7, 0x41, + 0x8f, 0xfa, 0x3a, 0xcb, 0x45, 0x3b, 0x85, 0x9f, + 0x06, 0x90, 0xb2, 0x51, 0x7a, 0xc3, 0x11, 0x41, + 0x4b, 0xe3, 0x26, 0x94, 0x3e, 0xa2, 0xfd, 0x0a, + 0xda, 0x50, 0xf6, 0x50, 0x78, 0x19, 0x6c, 0x52, + 0xd1, 0x12, 0x76, 0xc2, 0x50, 0x2f, 0x0b, 0xca, + 0x33, 0xe5, 0x79, 0x93, 0x14, 0x03, 0x01, 0x00, + 0x01, 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0x2b, + 0x51, 0x42, 0x95, 0x6b, 0xca, 0x9f, 0x42, 0x5d, + 0xd2, 0xd9, 0x67, 0xf9, 0x49, 0x30, 0xfd, 0x2a, + 0x46, 0xd3, 0x04, 0xf4, 0x86, 0xf9, 0x11, 0x34, + 0x82, 0xac, 0xe2, 0xc2, 0x2d, 0xc4, 0xd0, 0xfe, + 0xa9, 0xc9, 0x4b, 0x17, 0x03, 0x01, 0x00, 0x21, + 0x65, 0x1c, 0xe9, 0x5c, 0xb6, 0xe2, 0x7c, 0x8e, + 0x49, 0x12, 0x1b, 0xe6, 0x40, 0xd3, 0x97, 0x21, + 0x76, 0x01, 0xe5, 0x80, 0x5e, 0xf3, 0x11, 0x47, + 0x25, 0x02, 0x78, 0x8e, 0x6b, 0xae, 0xb3, 0xf3, + 0x59, 0x15, 0x03, 0x01, 0x00, 0x16, 0x38, 0xc1, + 0x99, 0x2e, 0xf8, 0x6f, 0x45, 0xa4, 0x10, 0x79, + 0x5b, 0xc1, 0x47, 0x9a, 0xf6, 0x5c, 0x90, 0xeb, + 0xa6, 0xe3, 0x1a, 0x24, + }, + }}, +} + +var tls11ECDHEAESServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x46, 0x01, 0x00, 0x01, + 0x42, 0x03, 0x03, 0x51, 0x9f, 0xa3, 0xb0, 0xb7, + 0x1d, 0x26, 0x93, 0x36, 0xc0, 0x8d, 0x7e, 0xf8, + 0x4f, 0x6f, 0xc9, 0x3c, 0x31, 0x1e, 0x7f, 0xb1, + 0xf0, 0xc1, 0x0f, 0xf9, 0x0c, 0xa2, 0xd5, 0xca, + 0x48, 0xe5, 0x35, 0x00, 0x00, 0xd0, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0xc0, 0x22, 0xc0, 0x21, 0x00, 0xa5, + 0x00, 0xa3, 0x00, 0xa1, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x69, 0x00, 0x68, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x37, 0x00, 0x36, 0x00, 0x88, + 0x00, 0x87, 0x00, 0x86, 0x00, 0x85, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0x00, 0x84, 0xc0, 0x12, 0xc0, 0x08, 0xc0, 0x1c, + 0xc0, 0x1b, 0x00, 0x16, 0x00, 0x13, 0x00, 0x10, + 0x00, 0x0d, 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, + 0xc0, 0x2f, 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, + 0xc0, 0x13, 0xc0, 0x09, 0xc0, 0x1f, 0xc0, 0x1e, + 0x00, 0xa4, 0x00, 0xa2, 0x00, 0xa0, 0x00, 0x9e, + 0x00, 0x67, 0x00, 0x40, 0x00, 0x3f, 0x00, 0x3e, + 0x00, 0x33, 0x00, 0x32, 0x00, 0x31, 0x00, 0x30, + 0x00, 0x9a, 0x00, 0x99, 0x00, 0x98, 0x00, 0x97, + 0x00, 0x45, 0x00, 0x44, 0x00, 0x43, 0x00, 0x42, + 0xc0, 0x31, 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, + 0xc0, 0x0e, 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, + 0x00, 0x2f, 0x00, 0x96, 0x00, 0x41, 0x00, 0x07, + 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, 0xc0, 0x02, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x0f, 0x00, 0x0c, 0x00, 0x09, 0x00, 0x14, + 0x00, 0x11, 0x00, 0x0e, 0x00, 0x0b, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x01, 0x00, + 0x00, 0x49, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, + 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, 0x00, 0x32, + 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, 0x00, 0x0b, + 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, 0x00, 0x0a, + 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, 0x00, 0x06, + 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, 0x00, 0x04, + 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, 0x00, 0x01, + 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, 0x00, 0x10, + 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x13, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x02, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x02, 0x01, 0x0f, 0x0c, 0x00, 0x01, + 0x0b, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x00, 0x80, 0x16, 0x83, 0x9b, 0xf9, + 0x72, 0xdb, 0x9f, 0x55, 0x02, 0xe1, 0x04, 0xf7, + 0xb5, 0x3f, 0x4c, 0x71, 0x13, 0x5a, 0x91, 0xe9, + 0x1d, 0xeb, 0x9d, 0x9c, 0xfb, 0x88, 0xef, 0xca, + 0xec, 0x7d, 0x9b, 0xdd, 0xd9, 0xee, 0x2b, 0x8e, + 0xef, 0xf8, 0xb6, 0xc7, 0x7d, 0xfe, 0xda, 0x7f, + 0x90, 0x2e, 0x53, 0xf1, 0x64, 0x95, 0xfc, 0x66, + 0xfc, 0x87, 0x27, 0xb6, 0x9f, 0xc8, 0x3a, 0x95, + 0x68, 0x17, 0xe1, 0x7d, 0xf1, 0x88, 0xe8, 0x17, + 0x5f, 0x99, 0x90, 0x3f, 0x47, 0x47, 0x81, 0x06, + 0xe2, 0x8e, 0x22, 0x56, 0x8f, 0xc2, 0x14, 0xe5, + 0x62, 0xa7, 0x0d, 0x41, 0x3c, 0xc7, 0x4a, 0x0a, + 0x74, 0x4b, 0xda, 0x00, 0x8e, 0x4f, 0x90, 0xe6, + 0xd7, 0x68, 0xe5, 0x8b, 0xf2, 0x3f, 0x53, 0x1d, + 0x7a, 0xe6, 0xb3, 0xe9, 0x8a, 0xc9, 0x4d, 0x19, + 0xa6, 0xcf, 0xf9, 0xed, 0x5e, 0x26, 0xdc, 0x90, + 0x1c, 0x41, 0xad, 0x7c, 0x16, 0x03, 0x02, 0x00, + 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x01, 0x11, 0xf2, 0xa4, 0x2d, + 0x1a, 0x75, 0x6c, 0xbc, 0x2d, 0x91, 0x95, 0x07, + 0xbe, 0xd6, 0x41, 0x7a, 0xbb, 0xc2, 0x7b, 0xa6, + 0x9b, 0xe3, 0xdc, 0x41, 0x7f, 0x1e, 0x2e, 0xcc, + 0x6d, 0xa3, 0x85, 0x53, 0x98, 0x9f, 0x2d, 0xe6, + 0x3c, 0xb9, 0x82, 0xa6, 0x80, 0x53, 0x9b, 0x71, + 0xfd, 0x27, 0xe5, 0xe5, 0xdf, 0x13, 0xba, 0x56, + 0x62, 0x30, 0x4a, 0x57, 0x27, 0xa7, 0xcc, 0x26, + 0x54, 0xe8, 0x65, 0x6e, 0x4d, 0x00, 0xbf, 0x8a, + 0xcc, 0x89, 0x6a, 0x6c, 0x88, 0xda, 0x79, 0x4f, + 0xc5, 0xad, 0x6d, 0x1d, 0x7c, 0x53, 0x7b, 0x1a, + 0x96, 0xf2, 0xf8, 0x30, 0x01, 0x0b, 0xc2, 0xf0, + 0x78, 0x41, 0xf4, 0x0d, 0xe0, 0xbe, 0xb9, 0x36, + 0xe0, 0xb7, 0xee, 0x16, 0xeb, 0x25, 0x67, 0x04, + 0xc0, 0x2e, 0xd8, 0x34, 0x4a, 0x65, 0xa5, 0xf1, + 0x95, 0x75, 0xc7, 0x39, 0xa9, 0x68, 0xa9, 0x53, + 0x93, 0x5b, 0xca, 0x7b, 0x7f, 0xc0, 0x63, 0x14, + 0x03, 0x02, 0x00, 0x01, 0x01, 0x16, 0x03, 0x02, + 0x00, 0x40, 0x01, 0xb1, 0xae, 0x1b, 0x8a, 0x65, + 0xf8, 0x37, 0x50, 0x39, 0x76, 0xef, 0xaa, 0xda, + 0x84, 0xc9, 0x5f, 0x80, 0xdc, 0xfa, 0xe0, 0x46, + 0x5a, 0xc7, 0x77, 0x9d, 0x76, 0x03, 0xa6, 0xd5, + 0x0e, 0xbf, 0x25, 0x30, 0x5c, 0x99, 0x7d, 0xcd, + 0x2b, 0xaa, 0x2e, 0x8c, 0xdd, 0xda, 0xaa, 0xd7, + 0xf1, 0xf6, 0x33, 0x47, 0x51, 0x1e, 0x83, 0xa1, + 0x83, 0x04, 0xd2, 0xb2, 0xc8, 0xbc, 0x11, 0xc5, + 0x1a, 0x87, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xeb, 0x8b, 0xc7, 0xef, 0xba, 0xe8, 0x0f, 0x69, + 0xfe, 0xfb, 0xc3, 0x3d, 0x90, 0x5d, 0xd7, 0xb2, + 0x51, 0x64, 0xac, 0xc3, 0xae, 0x33, 0x03, 0x42, + 0x45, 0x2d, 0xa7, 0x57, 0xbd, 0xa3, 0x85, 0x64, + 0xa6, 0xfe, 0x5c, 0x33, 0x04, 0x93, 0xf2, 0x7c, + 0x06, 0x6d, 0xd7, 0xd7, 0xcf, 0x4a, 0xaf, 0xb2, + 0xdd, 0x06, 0xdc, 0x28, 0x14, 0x59, 0x23, 0x02, + 0xef, 0x97, 0x6a, 0xe8, 0xec, 0xca, 0x10, 0x44, + 0xcd, 0xb8, 0x50, 0x16, 0x46, 0x5a, 0x05, 0xda, + 0x04, 0xb3, 0x0e, 0xe9, 0xf0, 0x74, 0xc5, 0x23, + 0xc2, 0x0e, 0xa1, 0x54, 0x66, 0x7b, 0xe8, 0x14, + 0x03, 0x02, 0x00, 0x01, 0x01, 0x16, 0x03, 0x02, + 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x6b, 0x43, 0x1c, 0x58, 0xbc, 0x85, + 0xf7, 0xc1, 0x76, 0xbc, 0x72, 0x33, 0x41, 0x6b, + 0xb8, 0xf8, 0xfd, 0x53, 0x21, 0xc2, 0x41, 0x1b, + 0x72, 0x4f, 0xce, 0x97, 0xca, 0x14, 0x23, 0x4d, + 0xbc, 0x44, 0xd6, 0xd7, 0xfc, 0xbc, 0xfd, 0xfd, + 0x5d, 0x33, 0x42, 0x1b, 0x52, 0x40, 0x0a, 0x2b, + 0x6c, 0x98, 0x17, 0x03, 0x02, 0x00, 0x40, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, + 0x31, 0xef, 0x03, 0x7d, 0xa5, 0x74, 0x92, 0x24, + 0x34, 0xae, 0x4e, 0xc9, 0xfc, 0x59, 0xcb, 0x64, + 0xf4, 0x45, 0xb1, 0xac, 0x02, 0xf2, 0x87, 0xe7, + 0x2f, 0xfd, 0x01, 0xca, 0x78, 0x02, 0x2e, 0x3a, + 0x38, 0xcd, 0xb1, 0xe0, 0xf2, 0x2e, 0xf6, 0x27, + 0xa0, 0xac, 0x1f, 0x91, 0x43, 0xc2, 0x3d, 0x15, + 0x03, 0x02, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x9f, 0x30, 0x24, 0x56, + 0x2c, 0xde, 0xa0, 0xe6, 0x44, 0x35, 0x30, 0x51, + 0xec, 0xd4, 0x69, 0x2d, 0x46, 0x64, 0x04, 0x21, + 0xfe, 0x7c, 0x4d, 0xc5, 0xd0, 0x8c, 0xf9, 0xd2, + 0x3f, 0x88, 0x69, 0xd5, + }, +} + +// $ go test -run TestRunServer -serve -clientauth 1 \ +// -ciphersuites=0xc011 -minversion=0x0303 -maxversion=0x0303 +var tls12ServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1e, 0x01, 0x00, 0x01, + 0x1a, 0x03, 0x03, 0x51, 0xe5, 0x76, 0x84, 0x0e, + 0xb9, 0x17, 0xca, 0x08, 0x47, 0xd9, 0xbd, 0xd0, + 0x94, 0xd1, 0x97, 0xca, 0x5b, 0xe7, 0x20, 0xac, + 0x8e, 0xbb, 0xc7, 0x29, 0xe9, 0x26, 0xcf, 0x7d, + 0xb3, 0xdc, 0x99, 0x00, 0x00, 0x82, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0x00, 0x07, 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, + 0xc0, 0x02, 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, + 0x00, 0x12, 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, + 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, + 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, + 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, + 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, + 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, + 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, + 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, + 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, + 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, + 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, + 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x11, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x03, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x03, 0x01, 0x11, 0x0c, 0x00, 0x01, + 0x0d, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x04, 0x01, 0x00, 0x80, 0x4a, 0xf9, + 0xf5, 0x0a, 0x61, 0x37, 0x7e, 0x4e, 0x92, 0xb5, + 0x1c, 0x91, 0x21, 0xb2, 0xb5, 0x17, 0x00, 0xbf, + 0x01, 0x5f, 0x30, 0xec, 0x62, 0x08, 0xd6, 0x9d, + 0x1a, 0x08, 0x05, 0x72, 0x8b, 0xf4, 0x49, 0x85, + 0xa7, 0xbf, 0x3f, 0x75, 0x58, 0x3e, 0x26, 0x82, + 0xc3, 0x28, 0x07, 0xf9, 0x41, 0x7d, 0x03, 0x14, + 0x3b, 0xc3, 0x05, 0x64, 0xff, 0x52, 0xf4, 0x75, + 0x6a, 0x87, 0xcd, 0xdf, 0x93, 0x31, 0x0a, 0x71, + 0x60, 0x17, 0xc6, 0x33, 0xf0, 0x79, 0xb6, 0x7b, + 0xd0, 0x9c, 0xa0, 0x5f, 0x74, 0x14, 0x2c, 0x5a, + 0xb4, 0x3f, 0x39, 0xf5, 0xe4, 0x9f, 0xbe, 0x6d, + 0x21, 0xd2, 0xa9, 0x42, 0xf7, 0xdc, 0xa6, 0x65, + 0xb7, 0x6a, 0x7e, 0x2e, 0x14, 0xd3, 0xf6, 0xf3, + 0x4b, 0x4c, 0x5b, 0x1a, 0x70, 0x7a, 0xbc, 0xb0, + 0x12, 0xf3, 0x6e, 0x0c, 0xcf, 0x43, 0x22, 0xae, + 0x5b, 0xba, 0x00, 0xf8, 0xfd, 0xaf, 0x16, 0x03, + 0x03, 0x00, 0x0f, 0x0d, 0x00, 0x00, 0x0b, 0x02, + 0x01, 0x40, 0x00, 0x04, 0x04, 0x01, 0x04, 0x03, + 0x00, 0x00, 0x16, 0x03, 0x03, 0x00, 0x04, 0x0e, + 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x01, 0xfb, 0x0b, 0x00, 0x01, + 0xf7, 0x00, 0x01, 0xf4, 0x00, 0x01, 0xf1, 0x30, + 0x82, 0x01, 0xed, 0x30, 0x82, 0x01, 0x58, 0xa0, + 0x03, 0x02, 0x01, 0x02, 0x02, 0x01, 0x00, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x31, 0x31, 0x32, 0x30, 0x38, 0x30, 0x37, + 0x35, 0x35, 0x31, 0x32, 0x5a, 0x17, 0x0d, 0x31, + 0x32, 0x31, 0x32, 0x30, 0x37, 0x30, 0x38, 0x30, + 0x30, 0x31, 0x32, 0x5a, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x81, 0x9c, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x03, 0x81, 0x8c, 0x00, + 0x30, 0x81, 0x88, 0x02, 0x81, 0x80, 0x4e, 0xd0, + 0x7b, 0x31, 0xe3, 0x82, 0x64, 0xd9, 0x59, 0xc0, + 0xc2, 0x87, 0xa4, 0x5e, 0x1e, 0x8b, 0x73, 0x33, + 0xc7, 0x63, 0x53, 0xdf, 0x66, 0x92, 0x06, 0x84, + 0xf6, 0x64, 0xd5, 0x8f, 0xe4, 0x36, 0xa7, 0x1d, + 0x2b, 0xe8, 0xb3, 0x20, 0x36, 0x45, 0x23, 0xb5, + 0xe3, 0x95, 0xae, 0xed, 0xe0, 0xf5, 0x20, 0x9c, + 0x8d, 0x95, 0xdf, 0x7f, 0x5a, 0x12, 0xef, 0x87, + 0xe4, 0x5b, 0x68, 0xe4, 0xe9, 0x0e, 0x74, 0xec, + 0x04, 0x8a, 0x7f, 0xde, 0x93, 0x27, 0xc4, 0x01, + 0x19, 0x7a, 0xbd, 0xf2, 0xdc, 0x3d, 0x14, 0xab, + 0xd0, 0x54, 0xca, 0x21, 0x0c, 0xd0, 0x4d, 0x6e, + 0x87, 0x2e, 0x5c, 0xc5, 0xd2, 0xbb, 0x4d, 0x4b, + 0x4f, 0xce, 0xb6, 0x2c, 0xf7, 0x7e, 0x88, 0xec, + 0x7c, 0xd7, 0x02, 0x91, 0x74, 0xa6, 0x1e, 0x0c, + 0x1a, 0xda, 0xe3, 0x4a, 0x5a, 0x2e, 0xde, 0x13, + 0x9c, 0x4c, 0x40, 0x88, 0x59, 0x93, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x32, 0x30, 0x30, 0x30, + 0x0e, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x01, 0x01, + 0xff, 0x04, 0x04, 0x03, 0x02, 0x00, 0xa0, 0x30, + 0x0d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x06, + 0x04, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, 0x0f, + 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x08, 0x30, + 0x06, 0x80, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x03, 0x81, 0x81, 0x00, + 0x36, 0x1f, 0xb3, 0x7a, 0x0c, 0x75, 0xc9, 0x6e, + 0x37, 0x46, 0x61, 0x2b, 0xd5, 0xbd, 0xc0, 0xa7, + 0x4b, 0xcc, 0x46, 0x9a, 0x81, 0x58, 0x7c, 0x85, + 0x79, 0x29, 0xc8, 0xc8, 0xc6, 0x67, 0xdd, 0x32, + 0x56, 0x45, 0x2b, 0x75, 0xb6, 0xe9, 0x24, 0xa9, + 0x50, 0x9a, 0xbe, 0x1f, 0x5a, 0xfa, 0x1a, 0x15, + 0xd9, 0xcc, 0x55, 0x95, 0x72, 0x16, 0x83, 0xb9, + 0xc2, 0xb6, 0x8f, 0xfd, 0x88, 0x8c, 0x38, 0x84, + 0x1d, 0xab, 0x5d, 0x92, 0x31, 0x13, 0x4f, 0xfd, + 0x83, 0x3b, 0xc6, 0x9d, 0xf1, 0x11, 0x62, 0xb6, + 0x8b, 0xec, 0xab, 0x67, 0xbe, 0xc8, 0x64, 0xb0, + 0x11, 0x50, 0x46, 0x58, 0x17, 0x6b, 0x99, 0x1c, + 0xd3, 0x1d, 0xfc, 0x06, 0xf1, 0x0e, 0xe5, 0x96, + 0xa8, 0x0c, 0xf9, 0x78, 0x20, 0xb7, 0x44, 0x18, + 0x51, 0x8d, 0x10, 0x7e, 0x4f, 0x94, 0x67, 0xdf, + 0xa3, 0x4e, 0x70, 0x73, 0x8e, 0x90, 0x91, 0x85, + 0x16, 0x03, 0x03, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x01, 0x5d, 0x3a, 0x92, 0x59, + 0x7f, 0x9a, 0x22, 0x36, 0x0e, 0x1b, 0x1d, 0x2a, + 0x05, 0xb7, 0xa4, 0xb6, 0x5d, 0xfc, 0x51, 0x6e, + 0x15, 0xe5, 0x89, 0x7c, 0xe2, 0xfa, 0x87, 0x38, + 0x05, 0x79, 0x15, 0x92, 0xb4, 0x8f, 0x88, 0x8f, + 0x9d, 0x5d, 0xa0, 0xaf, 0xf8, 0xce, 0xf9, 0x6f, + 0x83, 0xf4, 0x08, 0x69, 0xe4, 0x91, 0xc5, 0xed, + 0xb9, 0xc5, 0xa8, 0x1f, 0x4b, 0xec, 0xef, 0x91, + 0xc1, 0xa3, 0x34, 0x24, 0x18, 0x00, 0x2d, 0xcd, + 0xe6, 0x44, 0xef, 0x5a, 0x3e, 0x52, 0x63, 0x5b, + 0x36, 0x1f, 0x7e, 0xce, 0x9e, 0xaa, 0xda, 0x8d, + 0xb5, 0xc9, 0xea, 0xd8, 0x1b, 0xd1, 0x1c, 0x7c, + 0x07, 0xfc, 0x3c, 0x2d, 0x70, 0x1f, 0xf9, 0x4d, + 0xcb, 0xaa, 0xad, 0x07, 0xd5, 0x6d, 0xbd, 0xa6, + 0x61, 0xf3, 0x2f, 0xa3, 0x9c, 0x45, 0x02, 0x4a, + 0xac, 0x6c, 0xb6, 0x37, 0x95, 0xb1, 0x4a, 0xb5, + 0x0a, 0x4e, 0x60, 0x67, 0xd7, 0xe0, 0x04, 0x16, + 0x03, 0x03, 0x00, 0x88, 0x0f, 0x00, 0x00, 0x84, + 0x04, 0x01, 0x00, 0x80, 0x08, 0x83, 0x53, 0xf0, + 0xf8, 0x14, 0xf5, 0xc2, 0xd1, 0x8b, 0xf0, 0xa5, + 0xc1, 0xd8, 0x1a, 0x36, 0x4b, 0x75, 0x77, 0x02, + 0x19, 0xd8, 0x11, 0x3f, 0x5a, 0x36, 0xfc, 0xe9, + 0x2b, 0x4b, 0xf9, 0xfe, 0xda, 0x8a, 0x0f, 0x6e, + 0x3d, 0xd3, 0x52, 0x87, 0xf7, 0x9c, 0x78, 0x39, + 0xa8, 0xf1, 0xd7, 0xf7, 0x4e, 0x35, 0x33, 0xf9, + 0xc5, 0x76, 0xa8, 0x12, 0xc4, 0x91, 0x33, 0x1d, + 0x93, 0x8c, 0xbf, 0xb1, 0x83, 0x00, 0x90, 0xc5, + 0x52, 0x3e, 0xe0, 0x0a, 0xe8, 0x92, 0x75, 0xdf, + 0x54, 0x5f, 0x9f, 0x95, 0x76, 0x62, 0xb5, 0x85, + 0x69, 0xa4, 0x86, 0x85, 0x6c, 0xf3, 0x6b, 0x2a, + 0x72, 0x7b, 0x4d, 0x42, 0x33, 0x67, 0x4a, 0xce, + 0xb5, 0xdb, 0x9b, 0xae, 0xc0, 0xb0, 0x10, 0xeb, + 0x3b, 0xf4, 0xc2, 0x9a, 0x64, 0x47, 0x4c, 0x1e, + 0xa5, 0x91, 0x7f, 0x6d, 0xd1, 0x03, 0xf5, 0x4a, + 0x90, 0x69, 0x18, 0xb1, 0x14, 0x03, 0x03, 0x00, + 0x01, 0x01, 0x16, 0x03, 0x03, 0x00, 0x24, 0x59, + 0xfc, 0x7e, 0xae, 0xb3, 0xbf, 0xab, 0x4d, 0xdb, + 0x4e, 0xab, 0xa9, 0x6d, 0x6b, 0x4c, 0x60, 0xb6, + 0x16, 0xe0, 0xab, 0x7f, 0x52, 0x2d, 0xa1, 0xfc, + 0xe1, 0x80, 0xd2, 0x8a, 0xa1, 0xe5, 0x8f, 0xa1, + 0x70, 0x93, 0x23, + }, + { + 0x16, 0x03, 0x03, 0x02, 0x67, 0x04, 0x00, 0x02, + 0x63, 0x00, 0x00, 0x00, 0x00, 0x02, 0x5d, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xea, 0x8b, 0xc5, 0xef, 0xba, 0x64, 0xb7, 0x23, + 0x08, 0x86, 0x4f, 0x37, 0xe0, 0x8f, 0xbd, 0x75, + 0x71, 0x2b, 0xcb, 0x20, 0x75, 0x11, 0x3b, 0xa2, + 0x9e, 0x39, 0x3c, 0x03, 0xef, 0x6e, 0x41, 0xd7, + 0xcf, 0x1a, 0x2c, 0xf2, 0xfe, 0xc2, 0xd3, 0x65, + 0x59, 0x00, 0x9d, 0x03, 0xb4, 0xf2, 0x20, 0xe4, + 0x33, 0x80, 0xcd, 0xf6, 0xe4, 0x59, 0x22, 0xf7, + 0xfd, 0x88, 0x0e, 0xa4, 0x09, 0xc0, 0x0d, 0x10, + 0x80, 0x10, 0x79, 0xee, 0x70, 0x96, 0xdb, 0x22, + 0x8b, 0xb7, 0xac, 0xe0, 0x98, 0xad, 0xe9, 0xe3, + 0xcb, 0xea, 0x9f, 0xe6, 0x83, 0x28, 0x7c, 0x7e, + 0x4e, 0x9a, 0x8d, 0xd9, 0xf3, 0x86, 0xf4, 0x89, + 0x8b, 0x79, 0x8f, 0xbb, 0xe9, 0x74, 0x02, 0x02, + 0x14, 0x04, 0xea, 0xba, 0x16, 0x10, 0xa1, 0x85, + 0xbe, 0x4e, 0x4e, 0x92, 0xc5, 0x83, 0xf6, 0x1e, + 0x1f, 0xd4, 0x25, 0xc2, 0xc2, 0xb9, 0xce, 0x33, + 0x63, 0x66, 0x79, 0x1f, 0x54, 0x35, 0xc1, 0xe8, + 0x89, 0x34, 0x78, 0x94, 0x36, 0x14, 0xef, 0x01, + 0x1f, 0xf1, 0xbd, 0x77, 0x2c, 0x4d, 0xac, 0x5c, + 0x5c, 0x4a, 0xc6, 0xed, 0xd8, 0x0e, 0x72, 0x84, + 0x83, 0xdc, 0x56, 0x84, 0xc8, 0xf3, 0x89, 0x56, + 0xfd, 0x89, 0xc1, 0xc9, 0x9a, 0x29, 0x91, 0x7e, + 0x19, 0xe9, 0x8b, 0x5b, 0x11, 0x15, 0x4e, 0x6c, + 0xf4, 0x89, 0xe7, 0x6d, 0x68, 0x1e, 0xf9, 0x6c, + 0x23, 0x72, 0x05, 0x68, 0x82, 0x60, 0x84, 0x1f, + 0x83, 0x20, 0x09, 0x86, 0x10, 0x81, 0xec, 0xec, + 0xdc, 0x25, 0x53, 0x20, 0xfa, 0xa9, 0x41, 0x64, + 0xd6, 0x20, 0xf3, 0xf4, 0x52, 0xf2, 0x80, 0x62, + 0x83, 0xc9, 0x23, 0x66, 0x44, 0x95, 0x5a, 0x99, + 0x8a, 0xe1, 0x26, 0x63, 0xc1, 0x8b, 0x31, 0xf9, + 0x21, 0x06, 0x77, 0x04, 0x27, 0xf2, 0x0c, 0x63, + 0x83, 0x45, 0xa0, 0xa9, 0x7b, 0xcf, 0xdf, 0xd7, + 0x56, 0x75, 0xbc, 0xdd, 0x95, 0x36, 0xb1, 0x75, + 0x39, 0x05, 0x00, 0x3c, 0x8a, 0x79, 0xd6, 0xe9, + 0xf0, 0x4b, 0xdc, 0x51, 0x6b, 0x01, 0x94, 0x16, + 0x87, 0x12, 0x92, 0x6c, 0x07, 0xc1, 0xf5, 0x58, + 0xb7, 0x2a, 0x81, 0xf5, 0xa0, 0x37, 0x8b, 0xa6, + 0x22, 0xfe, 0x28, 0x0a, 0x7e, 0x68, 0xe2, 0xda, + 0x6c, 0x53, 0xee, 0x0e, 0x8d, 0x2d, 0x8b, 0x0b, + 0xda, 0xf8, 0x99, 0x3e, 0x0e, 0xed, 0x9f, 0xc1, + 0x2b, 0xf6, 0xfe, 0xe9, 0x52, 0x38, 0x7b, 0x83, + 0x9a, 0x50, 0xa6, 0xd7, 0x49, 0x83, 0x43, 0x7e, + 0x82, 0xec, 0xc7, 0x09, 0x3d, 0x3d, 0xb1, 0xee, + 0xe8, 0xc5, 0x6a, 0xc3, 0x3d, 0x4b, 0x4c, 0x6a, + 0xbb, 0x0b, 0x2c, 0x24, 0x2e, 0xdb, 0x7d, 0x57, + 0x87, 0xb4, 0x80, 0xa5, 0xae, 0xff, 0x54, 0xa8, + 0xa5, 0x27, 0x69, 0x95, 0xc8, 0xe7, 0x79, 0xc7, + 0x89, 0x2a, 0x73, 0x49, 0xcb, 0xf5, 0xc5, 0xbc, + 0x4a, 0xe0, 0x73, 0xa9, 0xbc, 0x88, 0x64, 0x96, + 0x98, 0xa5, 0x1e, 0xe3, 0x43, 0xc1, 0x7d, 0x78, + 0xc7, 0x94, 0x72, 0xd4, 0x2c, 0x6e, 0x85, 0x39, + 0x9a, 0xaf, 0xdb, 0xa1, 0xe9, 0xe2, 0xcb, 0x37, + 0x04, 0xc6, 0x8c, 0x81, 0xd3, 0x2a, 0xb7, 0xbe, + 0x6c, 0x07, 0x1f, 0x5e, 0xd9, 0x00, 0xd2, 0xf7, + 0xe1, 0xa7, 0xbc, 0x0c, 0xb6, 0x6d, 0xfb, 0x3f, + 0x3d, 0x24, 0xaa, 0xfb, 0x7e, 0xe1, 0xb5, 0x1b, + 0xff, 0x38, 0xaa, 0x69, 0x59, 0x38, 0x52, 0x9a, + 0x0e, 0x6d, 0xbc, 0xde, 0x4f, 0x13, 0x09, 0x17, + 0xc4, 0xa9, 0x05, 0x84, 0xbc, 0x50, 0xef, 0x40, + 0xb0, 0x4c, 0x24, 0x32, 0xed, 0x94, 0x2c, 0xdd, + 0xda, 0x20, 0x24, 0x67, 0xe2, 0xea, 0x71, 0x3d, + 0x4a, 0x04, 0x0d, 0x98, 0x29, 0x20, 0x4c, 0xeb, + 0x70, 0xce, 0x45, 0x9e, 0x5a, 0xaf, 0xb6, 0xa3, + 0x92, 0xc8, 0x28, 0xf2, 0xe3, 0xe8, 0x8a, 0x5d, + 0x0a, 0x33, 0x79, 0x9b, 0x6a, 0xf3, 0x30, 0x01, + 0x1d, 0x47, 0xbd, 0x01, 0xcc, 0x4d, 0x71, 0xc0, + 0x56, 0xfa, 0xfd, 0x37, 0xed, 0x0f, 0x27, 0xc0, + 0xbb, 0xa0, 0xee, 0xc3, 0x79, 0x8b, 0xe7, 0x41, + 0x8f, 0xfa, 0x3a, 0xcb, 0x45, 0x3b, 0x85, 0x9f, + 0x06, 0x90, 0xb2, 0x51, 0xc0, 0x48, 0x10, 0xac, + 0x2a, 0xec, 0xec, 0x48, 0x7a, 0x19, 0x47, 0xc4, + 0x2a, 0xeb, 0xb3, 0xa2, 0x07, 0x22, 0x32, 0x78, + 0xf4, 0x73, 0x5e, 0x92, 0x42, 0x15, 0xa1, 0x90, + 0x91, 0xd0, 0xeb, 0x12, 0x14, 0x03, 0x03, 0x00, + 0x01, 0x01, 0x16, 0x03, 0x03, 0x00, 0x24, 0x45, + 0x4b, 0x80, 0x42, 0x46, 0xde, 0xbb, 0xe7, 0x76, + 0xd1, 0x33, 0x92, 0xfc, 0x46, 0x17, 0x6d, 0x21, + 0xf6, 0x0e, 0x16, 0xca, 0x9b, 0x9b, 0x04, 0x65, + 0x16, 0x40, 0x44, 0x64, 0xbc, 0x58, 0xfa, 0x2a, + 0x49, 0xe9, 0xed, 0x17, 0x03, 0x03, 0x00, 0x21, + 0x89, 0x71, 0xcd, 0x56, 0x54, 0xbf, 0x73, 0xde, + 0xfb, 0x4b, 0x4e, 0xf1, 0x7f, 0xc6, 0x75, 0xa6, + 0xbd, 0x6b, 0x6c, 0xd9, 0xdc, 0x0c, 0x71, 0xb4, + 0xb9, 0xbb, 0x6e, 0xfa, 0x9e, 0xc7, 0xc7, 0x4c, + 0x24, 0x15, 0x03, 0x03, 0x00, 0x16, 0x62, 0xea, + 0x65, 0x69, 0x68, 0x4a, 0xce, 0xa7, 0x9e, 0xce, + 0xc0, 0xf1, 0x5c, 0x96, 0xd9, 0x1f, 0x49, 0xac, + 0x2d, 0x05, 0x89, 0x94, + }, +} + +// cert.pem and key.pem were generated with generate_cert.go +// Thus, they have no ExtKeyUsage fields and trigger an error +// when verification is turned on. + +var clientCertificate = loadPEMCert(` +-----BEGIN CERTIFICATE----- +MIIB7TCCAVigAwIBAgIBADALBgkqhkiG9w0BAQUwJjEQMA4GA1UEChMHQWNtZSBD +bzESMBAGA1UEAxMJMTI3LjAuMC4xMB4XDTExMTIwODA3NTUxMloXDTEyMTIwNzA4 +MDAxMlowJjEQMA4GA1UEChMHQWNtZSBDbzESMBAGA1UEAxMJMTI3LjAuMC4xMIGc +MAsGCSqGSIb3DQEBAQOBjAAwgYgCgYBO0Hsx44Jk2VnAwoekXh6LczPHY1PfZpIG +hPZk1Y/kNqcdK+izIDZFI7Xjla7t4PUgnI2V339aEu+H5Fto5OkOdOwEin/ekyfE +ARl6vfLcPRSr0FTKIQzQTW6HLlzF0rtNS0/Otiz3fojsfNcCkXSmHgwa2uNKWi7e +E5xMQIhZkwIDAQABozIwMDAOBgNVHQ8BAf8EBAMCAKAwDQYDVR0OBAYEBAECAwQw +DwYDVR0jBAgwBoAEAQIDBDALBgkqhkiG9w0BAQUDgYEANh+zegx1yW43RmEr1b3A +p0vMRpqBWHyFeSnIyMZn3TJWRSt1tukkqVCavh9a+hoV2cxVlXIWg7nCto/9iIw4 +hB2rXZIxE0/9gzvGnfERYraL7KtnvshksBFQRlgXa5kc0x38BvEO5ZaoDPl4ILdE +GFGNEH5PlGffo05wc46QkYU= +-----END CERTIFICATE----- +`) + +/* corresponding key.pem for cert.pem is: +-----BEGIN RSA PRIVATE KEY----- +MIICWgIBAAKBgE7QezHjgmTZWcDCh6ReHotzM8djU99mkgaE9mTVj+Q2px0r6LMg +NkUjteOVru3g9SCcjZXff1oS74fkW2jk6Q507ASKf96TJ8QBGXq98tw9FKvQVMoh +DNBNbocuXMXSu01LT862LPd+iOx81wKRdKYeDBra40paLt4TnExAiFmTAgMBAAEC +gYBxvXd8yNteFTns8A/2yomEMC4yeosJJSpp1CsN3BJ7g8/qTnrVPxBy+RU+qr63 +t2WquaOu/cr5P8iEsa6lk20tf8pjKLNXeX0b1RTzK8rJLbS7nGzP3tvOhL096VtQ +dAo4ROEaro0TzYpHmpciSvxVIeEIAAdFDObDJPKqcJAxyQJBAJizfYgK8Gzx9fsx +hxp+VteCbVPg2euASH5Yv3K5LukRdKoSzHE2grUVQgN/LafC0eZibRanxHegYSr7 +7qaswKUCQQCEIWor/X4XTMdVj3Oj+vpiw75y/S9gh682+myZL+d/02IEkwnB098P +RkKVpenBHyrGg0oeN5La7URILWKj7CPXAkBKo6F+d+phNjwIFoN1Xb/RA32w/D1I +saG9sF+UEhRt9AxUfW/U/tIQ9V0ZHHcSg1XaCM5Nvp934brdKdvTOKnJAkBD5h/3 +Rybatlvg/fzBEaJFyq09zhngkxlZOUtBVTqzl17RVvY2orgH02U4HbCHy4phxOn7 +qTdQRYlHRftgnWK1AkANibn9PRYJ7mJyJ9Dyj2QeNcSkSTzrt0tPvUMf4+meJymN +1Ntu5+S1DLLzfxlaljWG6ylW6DNxujCyuXIV2rvA +-----END RSA PRIVATE KEY----- +*/ + +var clientECDSACertificate = loadPEMCert(` +-----BEGIN CERTIFICATE----- +MIIB/DCCAV4CCQCaMIRsJjXZFzAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw +EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0 +eSBMdGQwHhcNMTIxMTE0MTMyNTUzWhcNMjIxMTEyMTMyNTUzWjBBMQswCQYDVQQG +EwJBVTEMMAoGA1UECBMDTlNXMRAwDgYDVQQHEwdQeXJtb250MRIwEAYDVQQDEwlK +b2VsIFNpbmcwgZswEAYHKoZIzj0CAQYFK4EEACMDgYYABACVjJF1FMBexFe01MNv +ja5oHt1vzobhfm6ySD6B5U7ixohLZNz1MLvT/2XMW/TdtWo+PtAd3kfDdq0Z9kUs +jLzYHQFMH3CQRnZIi4+DzEpcj0B22uCJ7B0rxE4wdihBsmKo+1vx+U56jb0JuK7q +ixgnTy5w/hOWusPTQBbNZU6sER7m8TAJBgcqhkjOPQQBA4GMADCBiAJCAOAUxGBg +C3JosDJdYUoCdFzCgbkWqD8pyDbHgf9stlvZcPE4O1BIKJTLCRpS8V3ujfK58PDa +2RU6+b0DeoeiIzXsAkIBo9SKeDUcSpoj0gq+KxAxnZxfvuiRs9oa9V2jI/Umi0Vw +jWVim34BmT0Y9hCaOGGbLlfk+syxis7iI6CH8OFnUes= +-----END CERTIFICATE----- +`) + +/* corresponding key for cert is: +-----BEGIN EC PARAMETERS----- +BgUrgQQAIw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBkJN9X4IqZIguiEVKMqeBUP5xtRsEv4HJEtOpOGLELwO53SD78Ew8 +k+wLWoqizS3NpQyMtrU8JFdWfj+C57UNkOugBwYFK4EEACOhgYkDgYYABACVjJF1 +FMBexFe01MNvja5oHt1vzobhfm6ySD6B5U7ixohLZNz1MLvT/2XMW/TdtWo+PtAd +3kfDdq0Z9kUsjLzYHQFMH3CQRnZIi4+DzEpcj0B22uCJ7B0rxE4wdihBsmKo+1vx ++U56jb0JuK7qixgnTy5w/hOWusPTQBbNZU6sER7m8Q== +-----END EC PRIVATE KEY----- +*/ +var clientauthECDSATests = []clientauthTest{ + // Server asks for cert with empty CA list, client gives one + // go test -run "TestRunServer" -serve \ + // -clientauth 1 -ciphersuites=0xc00a + // openssl s_client -host 127.0.0.1 -port 10443 \ + // -cipher ECDHE-ECDSA-AES256-SHA -key client.key -cert client.crt + {"RequestClientCert, client gives it", RequestClientCert, []*x509.Certificate{clientECDSACertificate}, [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0xa0, 0x01, 0x00, 0x00, + 0x9c, 0x03, 0x03, 0x51, 0xe5, 0x73, 0xc5, 0xae, + 0x51, 0x94, 0xb4, 0xf2, 0xe8, 0xf6, 0x03, 0x0e, + 0x3b, 0x34, 0xaf, 0xf0, 0xdc, 0x1b, 0xcc, 0xd8, + 0x0c, 0x45, 0x82, 0xd4, 0xd6, 0x76, 0x04, 0x6e, + 0x4f, 0x7a, 0x24, 0x00, 0x00, 0x04, 0xc0, 0x0a, + 0x00, 0xff, 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, + 0x00, 0x04, 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, + 0x00, 0x34, 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, + 0x00, 0x19, 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, + 0x00, 0x09, 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, + 0x00, 0x15, 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, + 0x00, 0x13, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, + 0x00, 0x0f, 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, + 0x00, 0x00, 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, + 0x06, 0x01, 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, + 0x05, 0x02, 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, + 0x04, 0x03, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, + 0x00, 0x0f, 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x0a, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0x0e, 0x0b, 0x00, 0x02, 0x0a, 0x00, 0x02, + 0x07, 0x00, 0x02, 0x04, 0x30, 0x82, 0x02, 0x00, + 0x30, 0x82, 0x01, 0x62, 0x02, 0x09, 0x00, 0xb8, + 0xbf, 0x2d, 0x47, 0xa0, 0xd2, 0xeb, 0xf4, 0x30, + 0x09, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, + 0x04, 0x01, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x32, 0x31, + 0x31, 0x32, 0x32, 0x31, 0x35, 0x30, 0x36, 0x33, + 0x32, 0x5a, 0x17, 0x0d, 0x32, 0x32, 0x31, 0x31, + 0x32, 0x30, 0x31, 0x35, 0x30, 0x36, 0x33, 0x32, + 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, + 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, + 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, + 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, + 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, + 0x30, 0x81, 0x9b, 0x30, 0x10, 0x06, 0x07, 0x2a, + 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x05, + 0x2b, 0x81, 0x04, 0x00, 0x23, 0x03, 0x81, 0x86, + 0x00, 0x04, 0x00, 0xc4, 0xa1, 0xed, 0xbe, 0x98, + 0xf9, 0x0b, 0x48, 0x73, 0x36, 0x7e, 0xc3, 0x16, + 0x56, 0x11, 0x22, 0xf2, 0x3d, 0x53, 0xc3, 0x3b, + 0x4d, 0x21, 0x3d, 0xcd, 0x6b, 0x75, 0xe6, 0xf6, + 0xb0, 0xdc, 0x9a, 0xdf, 0x26, 0xc1, 0xbc, 0xb2, + 0x87, 0xf0, 0x72, 0x32, 0x7c, 0xb3, 0x64, 0x2f, + 0x1c, 0x90, 0xbc, 0xea, 0x68, 0x23, 0x10, 0x7e, + 0xfe, 0xe3, 0x25, 0xc0, 0x48, 0x3a, 0x69, 0xe0, + 0x28, 0x6d, 0xd3, 0x37, 0x00, 0xef, 0x04, 0x62, + 0xdd, 0x0d, 0xa0, 0x9c, 0x70, 0x62, 0x83, 0xd8, + 0x81, 0xd3, 0x64, 0x31, 0xaa, 0x9e, 0x97, 0x31, + 0xbd, 0x96, 0xb0, 0x68, 0xc0, 0x9b, 0x23, 0xde, + 0x76, 0x64, 0x3f, 0x1a, 0x5c, 0x7f, 0xe9, 0x12, + 0x0e, 0x58, 0x58, 0xb6, 0x5f, 0x70, 0xdd, 0x9b, + 0xd8, 0xea, 0xd5, 0xd7, 0xf5, 0xd5, 0xcc, 0xb9, + 0xb6, 0x9f, 0x30, 0x66, 0x5b, 0x66, 0x9a, 0x20, + 0xe2, 0x27, 0xe5, 0xbf, 0xfe, 0x3b, 0x30, 0x09, + 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, + 0x01, 0x03, 0x81, 0x8c, 0x00, 0x30, 0x81, 0x88, + 0x02, 0x42, 0x01, 0x88, 0xa2, 0x4f, 0xeb, 0xe2, + 0x45, 0xc5, 0x48, 0x7d, 0x1b, 0xac, 0xf5, 0xed, + 0x98, 0x9d, 0xae, 0x47, 0x70, 0xc0, 0x5e, 0x1b, + 0xb6, 0x2f, 0xbd, 0xf1, 0xb6, 0x4d, 0xb7, 0x61, + 0x40, 0xd3, 0x11, 0xa2, 0xce, 0xee, 0x0b, 0x7e, + 0x92, 0x7e, 0xff, 0x76, 0x9d, 0xc3, 0x3b, 0x7e, + 0xa5, 0x3f, 0xce, 0xfa, 0x10, 0xe2, 0x59, 0xec, + 0x47, 0x2d, 0x7c, 0xac, 0xda, 0x4e, 0x97, 0x0e, + 0x15, 0xa0, 0x6f, 0xd0, 0x02, 0x42, 0x01, 0x4d, + 0xfc, 0xbe, 0x67, 0x13, 0x9c, 0x2d, 0x05, 0x0e, + 0xbd, 0x3f, 0xa3, 0x8c, 0x25, 0xc1, 0x33, 0x13, + 0x83, 0x0d, 0x94, 0x06, 0xbb, 0xd4, 0x37, 0x7a, + 0xf6, 0xec, 0x7a, 0xc9, 0x86, 0x2e, 0xdd, 0xd7, + 0x11, 0x69, 0x7f, 0x85, 0x7c, 0x56, 0xde, 0xfb, + 0x31, 0x78, 0x2b, 0xe4, 0xc7, 0x78, 0x0d, 0xae, + 0xcb, 0xbe, 0x9e, 0x4e, 0x36, 0x24, 0x31, 0x7b, + 0x6a, 0x0f, 0x39, 0x95, 0x12, 0x07, 0x8f, 0x2a, + 0x16, 0x03, 0x01, 0x01, 0x1a, 0x0c, 0x00, 0x01, + 0x16, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x00, 0x8b, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, + 0x04, 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, + 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, + 0x3f, 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, + 0x4d, 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, + 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, + 0xff, 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, + 0x6a, 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, + 0xe5, 0xbd, 0x66, 0x02, 0x42, 0x00, 0xad, 0x7d, + 0x06, 0x35, 0xab, 0xec, 0x8d, 0xac, 0xd4, 0xba, + 0x1b, 0x49, 0x5e, 0x05, 0x5f, 0xf0, 0x97, 0x93, + 0x82, 0xb8, 0x2b, 0x8d, 0x91, 0x98, 0x63, 0x8e, + 0xb4, 0x14, 0x62, 0xdb, 0x1e, 0xc9, 0x2b, 0x30, + 0xf8, 0x41, 0x9b, 0xa6, 0xe6, 0xbc, 0xde, 0x0e, + 0x68, 0x30, 0x21, 0xf4, 0xa8, 0xa9, 0x1b, 0xec, + 0x44, 0x4f, 0x5d, 0x02, 0x2f, 0x60, 0x45, 0x60, + 0xba, 0xe0, 0x4e, 0xc0, 0xd4, 0x3b, 0x01, 0x16, + 0x03, 0x01, 0x00, 0x09, 0x0d, 0x00, 0x00, 0x05, + 0x02, 0x01, 0x40, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x02, 0x0a, 0x0b, 0x00, 0x02, + 0x06, 0x00, 0x02, 0x03, 0x00, 0x02, 0x00, 0x30, + 0x82, 0x01, 0xfc, 0x30, 0x82, 0x01, 0x5e, 0x02, + 0x09, 0x00, 0x9a, 0x30, 0x84, 0x6c, 0x26, 0x35, + 0xd9, 0x17, 0x30, 0x09, 0x06, 0x07, 0x2a, 0x86, + 0x48, 0xce, 0x3d, 0x04, 0x01, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x32, 0x31, 0x31, 0x31, 0x34, 0x31, 0x33, + 0x32, 0x35, 0x35, 0x33, 0x5a, 0x17, 0x0d, 0x32, + 0x32, 0x31, 0x31, 0x31, 0x32, 0x31, 0x33, 0x32, + 0x35, 0x35, 0x33, 0x5a, 0x30, 0x41, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x0c, 0x30, 0x0a, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x03, 0x4e, 0x53, + 0x57, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x04, 0x07, 0x13, 0x07, 0x50, 0x79, 0x72, 0x6d, + 0x6f, 0x6e, 0x74, 0x31, 0x12, 0x30, 0x10, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x13, 0x09, 0x4a, 0x6f, + 0x65, 0x6c, 0x20, 0x53, 0x69, 0x6e, 0x67, 0x30, + 0x81, 0x9b, 0x30, 0x10, 0x06, 0x07, 0x2a, 0x86, + 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x05, 0x2b, + 0x81, 0x04, 0x00, 0x23, 0x03, 0x81, 0x86, 0x00, + 0x04, 0x00, 0x95, 0x8c, 0x91, 0x75, 0x14, 0xc0, + 0x5e, 0xc4, 0x57, 0xb4, 0xd4, 0xc3, 0x6f, 0x8d, + 0xae, 0x68, 0x1e, 0xdd, 0x6f, 0xce, 0x86, 0xe1, + 0x7e, 0x6e, 0xb2, 0x48, 0x3e, 0x81, 0xe5, 0x4e, + 0xe2, 0xc6, 0x88, 0x4b, 0x64, 0xdc, 0xf5, 0x30, + 0xbb, 0xd3, 0xff, 0x65, 0xcc, 0x5b, 0xf4, 0xdd, + 0xb5, 0x6a, 0x3e, 0x3e, 0xd0, 0x1d, 0xde, 0x47, + 0xc3, 0x76, 0xad, 0x19, 0xf6, 0x45, 0x2c, 0x8c, + 0xbc, 0xd8, 0x1d, 0x01, 0x4c, 0x1f, 0x70, 0x90, + 0x46, 0x76, 0x48, 0x8b, 0x8f, 0x83, 0xcc, 0x4a, + 0x5c, 0x8f, 0x40, 0x76, 0xda, 0xe0, 0x89, 0xec, + 0x1d, 0x2b, 0xc4, 0x4e, 0x30, 0x76, 0x28, 0x41, + 0xb2, 0x62, 0xa8, 0xfb, 0x5b, 0xf1, 0xf9, 0x4e, + 0x7a, 0x8d, 0xbd, 0x09, 0xb8, 0xae, 0xea, 0x8b, + 0x18, 0x27, 0x4f, 0x2e, 0x70, 0xfe, 0x13, 0x96, + 0xba, 0xc3, 0xd3, 0x40, 0x16, 0xcd, 0x65, 0x4e, + 0xac, 0x11, 0x1e, 0xe6, 0xf1, 0x30, 0x09, 0x06, + 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x01, + 0x03, 0x81, 0x8c, 0x00, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x00, 0xe0, 0x14, 0xc4, 0x60, 0x60, 0x0b, + 0x72, 0x68, 0xb0, 0x32, 0x5d, 0x61, 0x4a, 0x02, + 0x74, 0x5c, 0xc2, 0x81, 0xb9, 0x16, 0xa8, 0x3f, + 0x29, 0xc8, 0x36, 0xc7, 0x81, 0xff, 0x6c, 0xb6, + 0x5b, 0xd9, 0x70, 0xf1, 0x38, 0x3b, 0x50, 0x48, + 0x28, 0x94, 0xcb, 0x09, 0x1a, 0x52, 0xf1, 0x5d, + 0xee, 0x8d, 0xf2, 0xb9, 0xf0, 0xf0, 0xda, 0xd9, + 0x15, 0x3a, 0xf9, 0xbd, 0x03, 0x7a, 0x87, 0xa2, + 0x23, 0x35, 0xec, 0x02, 0x42, 0x01, 0xa3, 0xd4, + 0x8a, 0x78, 0x35, 0x1c, 0x4a, 0x9a, 0x23, 0xd2, + 0x0a, 0xbe, 0x2b, 0x10, 0x31, 0x9d, 0x9c, 0x5f, + 0xbe, 0xe8, 0x91, 0xb3, 0xda, 0x1a, 0xf5, 0x5d, + 0xa3, 0x23, 0xf5, 0x26, 0x8b, 0x45, 0x70, 0x8d, + 0x65, 0x62, 0x9b, 0x7e, 0x01, 0x99, 0x3d, 0x18, + 0xf6, 0x10, 0x9a, 0x38, 0x61, 0x9b, 0x2e, 0x57, + 0xe4, 0xfa, 0xcc, 0xb1, 0x8a, 0xce, 0xe2, 0x23, + 0xa0, 0x87, 0xf0, 0xe1, 0x67, 0x51, 0xeb, 0x16, + 0x03, 0x01, 0x00, 0x8a, 0x10, 0x00, 0x00, 0x86, + 0x85, 0x04, 0x00, 0xcd, 0x1c, 0xe8, 0x66, 0x5b, + 0xa8, 0x9d, 0x83, 0x2f, 0x7e, 0x1d, 0x0b, 0x59, + 0x23, 0xbc, 0x30, 0xcf, 0xa3, 0xaf, 0x21, 0xdc, + 0xf2, 0x57, 0x49, 0x56, 0x30, 0x25, 0x7c, 0x84, + 0x5d, 0xad, 0xaa, 0x9c, 0x7b, 0x2a, 0x95, 0x58, + 0x3d, 0x30, 0x87, 0x01, 0x3b, 0xb7, 0xea, 0xcb, + 0xc4, 0xa3, 0xeb, 0x22, 0xbf, 0x2d, 0x61, 0x17, + 0x8c, 0x9b, 0xe8, 0x1b, 0xb2, 0x87, 0x16, 0x78, + 0xd5, 0xfd, 0x8b, 0xdd, 0x00, 0x0f, 0xda, 0x8e, + 0xfd, 0x28, 0x36, 0xeb, 0xe4, 0xc5, 0x42, 0x14, + 0xc7, 0xbd, 0x29, 0x5e, 0x9a, 0xed, 0x5e, 0xc1, + 0xf7, 0xf4, 0xbd, 0xbd, 0x15, 0x9c, 0xe8, 0x44, + 0x71, 0xa7, 0xb6, 0xe9, 0xfa, 0x7e, 0x97, 0xcb, + 0x96, 0x3e, 0x53, 0x76, 0xfb, 0x11, 0x1f, 0x36, + 0x8f, 0x30, 0xfb, 0x71, 0x3a, 0x75, 0x3a, 0x25, + 0x7b, 0xa2, 0xc1, 0xf9, 0x3e, 0x58, 0x5f, 0x07, + 0x16, 0xed, 0xe1, 0xf7, 0xc1, 0xb1, 0x16, 0x03, + 0x01, 0x00, 0x90, 0x0f, 0x00, 0x00, 0x8c, 0x00, + 0x8a, 0x30, 0x81, 0x87, 0x02, 0x42, 0x00, 0xb2, + 0xd3, 0x91, 0xe6, 0xd5, 0x9b, 0xb2, 0xb8, 0x03, + 0xf4, 0x85, 0x4d, 0x43, 0x79, 0x1f, 0xb6, 0x6f, + 0x0c, 0xcd, 0x67, 0x5f, 0x5e, 0xca, 0xee, 0xb3, + 0xe4, 0xab, 0x1e, 0x58, 0xc3, 0x04, 0xa9, 0x8a, + 0xa7, 0xcf, 0xaa, 0x33, 0x88, 0xd5, 0x35, 0xd2, + 0x80, 0x8f, 0xfa, 0x1b, 0x3c, 0x3d, 0xf7, 0x80, + 0x50, 0xde, 0x80, 0x30, 0x64, 0xee, 0xc0, 0xb3, + 0x91, 0x6e, 0x5d, 0x1e, 0xc0, 0xdc, 0x3a, 0x93, + 0x02, 0x41, 0x4e, 0xca, 0x98, 0x41, 0x8c, 0x36, + 0xf2, 0x12, 0xbf, 0x8e, 0x0f, 0x69, 0x8e, 0xf8, + 0x7b, 0x9d, 0xba, 0x9c, 0x5c, 0x48, 0x79, 0xf4, + 0xba, 0x3d, 0x06, 0xa5, 0xab, 0x47, 0xe0, 0x1a, + 0x45, 0x28, 0x3a, 0x8f, 0xbf, 0x14, 0x24, 0x36, + 0xd1, 0x1d, 0x29, 0xdc, 0xde, 0x72, 0x5b, 0x76, + 0x41, 0x67, 0xe8, 0xe5, 0x71, 0x4a, 0x77, 0xe9, + 0xed, 0x02, 0x19, 0xdd, 0xe4, 0xaa, 0xe9, 0x2d, + 0xe7, 0x47, 0x32, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0xfa, 0xc3, + 0xf2, 0x35, 0xd0, 0x6d, 0x32, 0x78, 0x6a, 0xd6, + 0xe6, 0x70, 0x5e, 0x00, 0x4c, 0x35, 0xf1, 0xe0, + 0x21, 0xcf, 0xc3, 0x78, 0xcd, 0xe0, 0x2b, 0x0b, + 0xf4, 0xeb, 0xf9, 0xc0, 0x38, 0xf2, 0x9a, 0x31, + 0x55, 0x07, 0x2b, 0x8d, 0x68, 0x40, 0x31, 0x08, + 0xaa, 0xe3, 0x16, 0xcf, 0x4b, 0xd4, + }, + { + 0x16, 0x03, 0x01, 0x02, 0x76, 0x04, 0x00, 0x02, + 0x72, 0x00, 0x00, 0x00, 0x00, 0x02, 0x6c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x8b, 0xde, 0xef, 0xba, 0xf9, 0xdb, 0x95, + 0x24, 0xa5, 0x49, 0xb3, 0x23, 0xd8, 0x73, 0x88, + 0x50, 0x42, 0xed, 0xeb, 0xa3, 0xd8, 0xab, 0x31, + 0x9c, 0xd0, 0x00, 0x01, 0xef, 0xc0, 0xbf, 0xab, + 0x59, 0x55, 0xb5, 0xb9, 0xef, 0xa5, 0xa6, 0xec, + 0x69, 0xed, 0x00, 0x2f, 0x47, 0xdb, 0x75, 0x52, + 0x0c, 0xe5, 0x86, 0xb7, 0x02, 0x59, 0x22, 0xf7, + 0xfd, 0x8b, 0xff, 0xa4, 0x09, 0xc0, 0x1c, 0x10, + 0x80, 0x10, 0x7f, 0x4c, 0x7a, 0x94, 0x40, 0x10, + 0x0d, 0xda, 0x8a, 0xe5, 0x4a, 0xbc, 0xd0, 0xc0, + 0x4b, 0xa5, 0x33, 0x97, 0xc6, 0xe7, 0x40, 0x7f, + 0x7f, 0x8c, 0xf9, 0xf8, 0xc8, 0xb8, 0xfb, 0x8c, + 0xdd, 0x28, 0x81, 0xae, 0xfd, 0x37, 0x20, 0x3a, + 0x40, 0x37, 0x99, 0xc4, 0x21, 0x01, 0xc4, 0x91, + 0xb0, 0x5e, 0x11, 0xc5, 0xa9, 0xfd, 0x9a, 0x02, + 0x7e, 0x97, 0x6a, 0x86, 0x89, 0xb8, 0xc1, 0x32, + 0x4c, 0x7e, 0x6d, 0x47, 0x61, 0x0e, 0xe3, 0xc2, + 0xf0, 0x62, 0x3c, 0xc6, 0x71, 0x4f, 0xbb, 0x47, + 0x65, 0xb1, 0xd9, 0x22, 0x79, 0x15, 0xea, 0x1f, + 0x4b, 0x2a, 0x8a, 0xa4, 0xc8, 0x73, 0x34, 0xba, + 0x83, 0xe4, 0x70, 0x99, 0xc9, 0xcf, 0xbe, 0x64, + 0x99, 0xb9, 0xfa, 0xe9, 0xaf, 0x5d, 0xc7, 0x20, + 0x26, 0xde, 0xc5, 0x06, 0x12, 0x36, 0x4f, 0x4d, + 0xc0, 0xbb, 0x81, 0x5b, 0x5e, 0x38, 0xc3, 0x07, + 0x21, 0x04, 0x1a, 0x53, 0x9c, 0x59, 0xac, 0x2d, + 0xe6, 0xa5, 0x93, 0xa5, 0x19, 0xc6, 0xb0, 0xf7, + 0x56, 0x5d, 0xdf, 0xd1, 0xf4, 0xfd, 0x44, 0x6d, + 0xc6, 0xa2, 0x31, 0xa7, 0x35, 0x42, 0x18, 0x50, + 0x0c, 0x4f, 0x6e, 0xe3, 0x3b, 0xa3, 0xaa, 0x1c, + 0xbe, 0x41, 0x0d, 0xce, 0x6c, 0x62, 0xe1, 0x96, + 0x2d, 0xbd, 0x14, 0x31, 0xe3, 0xc4, 0x5b, 0xbf, + 0xf6, 0xde, 0xec, 0x42, 0xe8, 0xc7, 0x2a, 0x0b, + 0xdb, 0x2d, 0x7c, 0xf0, 0x3f, 0x45, 0x32, 0x45, + 0x09, 0x47, 0x09, 0x0f, 0x21, 0x22, 0x45, 0x06, + 0x11, 0xb8, 0xf9, 0xe6, 0x67, 0x90, 0x4b, 0x4a, + 0xde, 0x81, 0xfb, 0xeb, 0xe7, 0x9a, 0x08, 0x30, + 0xcf, 0x51, 0xe1, 0xd9, 0xfa, 0x79, 0xa3, 0xcc, + 0x65, 0x1a, 0x83, 0x86, 0xc9, 0x7a, 0x41, 0xf5, + 0xdf, 0xa0, 0x7c, 0x44, 0x23, 0x17, 0xf3, 0x62, + 0xe8, 0xa9, 0x31, 0x1e, 0x6b, 0x05, 0x4b, 0x4f, + 0x9d, 0x91, 0x46, 0x92, 0xa6, 0x25, 0x32, 0xca, + 0xa1, 0x75, 0xda, 0xe6, 0x80, 0x3e, 0x7f, 0xd1, + 0x26, 0x57, 0x07, 0x42, 0xe4, 0x91, 0xff, 0xbd, + 0x44, 0xae, 0x98, 0x5c, 0x1d, 0xdf, 0x11, 0xe3, + 0xae, 0x87, 0x5e, 0xb7, 0x69, 0xad, 0x34, 0x7f, + 0x3a, 0x07, 0x7c, 0xdf, 0xfc, 0x76, 0x17, 0x8b, + 0x62, 0xc8, 0xe1, 0x78, 0x2a, 0xc8, 0xb9, 0x8a, + 0xbb, 0x5c, 0xfb, 0x38, 0x74, 0x91, 0x6e, 0x12, + 0x0c, 0x1f, 0x8e, 0xe1, 0xc2, 0x01, 0xd8, 0x9d, + 0x23, 0x0f, 0xc4, 0x67, 0x5d, 0xe5, 0x67, 0x4b, + 0x94, 0x6e, 0x69, 0x72, 0x90, 0x2d, 0x52, 0x78, + 0x8e, 0x61, 0xba, 0xdf, 0x4e, 0xf5, 0xdc, 0xfb, + 0x73, 0xbe, 0x03, 0x70, 0xd9, 0x01, 0x30, 0xf3, + 0xa1, 0xbb, 0x9a, 0x5f, 0xec, 0x9e, 0xed, 0x8d, + 0xdd, 0x53, 0xfd, 0x60, 0xc3, 0x2b, 0x7a, 0x00, + 0x2c, 0xf9, 0x0a, 0x57, 0x47, 0x45, 0x43, 0xb3, + 0x23, 0x01, 0x9c, 0xee, 0x54, 0x4d, 0x58, 0xd3, + 0x71, 0x1c, 0xc9, 0xd3, 0x30, 0x9e, 0x14, 0xa5, + 0xf3, 0xbf, 0x4d, 0x9b, 0xb7, 0x13, 0x21, 0xae, + 0xd2, 0x8d, 0x6e, 0x6f, 0x1c, 0xcc, 0xb2, 0x41, + 0xb2, 0x64, 0x56, 0x83, 0xce, 0xd1, 0x0c, 0x79, + 0x32, 0x78, 0xef, 0xc5, 0x21, 0xb1, 0xe8, 0xc4, + 0x42, 0xa7, 0x8d, 0xc1, 0xfa, 0xa1, 0x9c, 0x3c, + 0x21, 0xd8, 0xe9, 0x90, 0xe2, 0x7c, 0x14, 0x26, + 0xfe, 0x61, 0x3e, 0xf9, 0x71, 0x1d, 0x5d, 0x49, + 0x3b, 0xb1, 0xb8, 0x42, 0xa1, 0xb8, 0x1c, 0x75, + 0x7d, 0xee, 0xed, 0xfc, 0xe6, 0x20, 0x2b, 0x9e, + 0x10, 0x52, 0xda, 0x56, 0x4d, 0x64, 0x6c, 0x41, + 0xc1, 0xf7, 0x60, 0x0c, 0x10, 0x65, 0x6f, 0xd4, + 0xe9, 0x9b, 0x0d, 0x83, 0x13, 0xc8, 0x5a, 0xa3, + 0x56, 0x2a, 0x42, 0xc6, 0x1c, 0xfe, 0xdb, 0xba, + 0x3d, 0x04, 0x12, 0xfd, 0x28, 0xeb, 0x78, 0xdd, + 0xbc, 0xc8, 0x0d, 0xa1, 0xce, 0xd4, 0x54, 0xbf, + 0xaf, 0xe1, 0x60, 0x0c, 0xa3, 0xc3, 0xc3, 0x62, + 0x58, 0xc1, 0x79, 0xa7, 0x95, 0x41, 0x09, 0x24, + 0xc6, 0x9a, 0x50, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0x4d, 0x7b, + 0x5f, 0x28, 0x5e, 0x68, 0x6c, 0xa3, 0x65, 0xc7, + 0x7e, 0x49, 0x6c, 0xb3, 0x67, 0xbb, 0xd0, 0x75, + 0xa2, 0x9e, 0x8c, 0x92, 0x4f, 0x8c, 0x33, 0x14, + 0x7c, 0x6c, 0xf1, 0x74, 0x97, 0xc3, 0xe0, 0x10, + 0xe9, 0x0d, 0xc2, 0x30, 0x5c, 0x23, 0xee, 0x1d, + 0x16, 0x2e, 0xb9, 0x96, 0x2b, 0x2d, 0x17, 0x03, + 0x01, 0x00, 0x20, 0xf2, 0xc8, 0xa7, 0x1b, 0x60, + 0x46, 0xee, 0xe5, 0x7e, 0xc9, 0x35, 0xb3, 0xf1, + 0x7c, 0x32, 0x0c, 0x85, 0x94, 0x59, 0x57, 0x27, + 0xb0, 0xbd, 0x52, 0x86, 0x90, 0xf1, 0xb7, 0x4d, + 0x1e, 0xc1, 0x16, 0x17, 0x03, 0x01, 0x00, 0x30, + 0xff, 0x85, 0x50, 0xdf, 0x3f, 0xfc, 0xa2, 0x61, + 0x1a, 0x12, 0xc0, 0x1e, 0x10, 0x32, 0x88, 0x50, + 0xa0, 0x2c, 0x80, 0xda, 0x77, 0xea, 0x09, 0x47, + 0xe0, 0x85, 0x07, 0x29, 0x45, 0x65, 0x19, 0xa3, + 0x8d, 0x99, 0xb8, 0xbf, 0xb6, 0xbc, 0x76, 0xe2, + 0x50, 0x24, 0x82, 0x0a, 0xfd, 0xdd, 0x35, 0x09, + 0x15, 0x03, 0x01, 0x00, 0x20, 0xe7, 0x36, 0xf6, + 0x61, 0xd2, 0x95, 0x3c, 0xb6, 0x65, 0x7b, 0xb2, + 0xb8, 0xdf, 0x03, 0x53, 0xeb, 0xf7, 0x16, 0xe0, + 0xe0, 0x15, 0x22, 0x71, 0x70, 0x62, 0x73, 0xad, + 0xb5, 0x1a, 0x77, 0x44, 0x57, + }, + }}, +} + +var aesGCMServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1c, 0x01, 0x00, 0x01, + 0x18, 0x03, 0x03, 0x52, 0x1e, 0x74, 0xf0, 0xb0, + 0xc1, 0x8b, 0x16, 0xf9, 0x74, 0xfc, 0x16, 0xc4, + 0x11, 0x18, 0x96, 0x08, 0x25, 0x38, 0x4f, 0x98, + 0x98, 0xbe, 0xb5, 0x61, 0xdf, 0x94, 0x15, 0xcc, + 0x9b, 0x61, 0xef, 0x00, 0x00, 0x80, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, 0xc0, 0x02, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x01, 0x00, + 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, + 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, 0x00, 0x32, + 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, 0x00, 0x0b, + 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, 0x00, 0x0a, + 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, 0x00, 0x06, + 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, 0x00, 0x04, + 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, 0x00, 0x01, + 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, 0x00, 0x10, + 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, 0x00, 0x0d, + 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, 0x06, 0x02, + 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, 0x05, 0x03, + 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x03, 0x01, + 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, 0x02, 0x02, + 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, 0x00, 0x01, + 0x01, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x2f, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x03, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x03, 0x01, 0x11, 0x0c, 0x00, 0x01, + 0x0d, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x04, 0x01, 0x00, 0x80, 0x0d, 0x8e, + 0x79, 0xe6, 0x86, 0xf6, 0xb6, 0xfb, 0x6b, 0x6a, + 0xcc, 0x55, 0xe4, 0x80, 0x4d, 0xc5, 0x0c, 0xc6, + 0xa3, 0x9f, 0x1d, 0x39, 0xd2, 0x98, 0x57, 0x31, + 0xa2, 0x90, 0x73, 0xe8, 0xd2, 0xcd, 0xb0, 0x93, + 0x1a, 0x60, 0x0f, 0x38, 0x02, 0x3b, 0x1b, 0x25, + 0x56, 0xec, 0x44, 0xab, 0xbe, 0x2e, 0x0c, 0xc0, + 0x6e, 0x54, 0x91, 0x50, 0xd6, 0xb1, 0xa2, 0x98, + 0x14, 0xa8, 0x35, 0x62, 0x9d, 0xca, 0xfb, 0x0f, + 0x64, 0x2b, 0x05, 0xa0, 0xa0, 0x57, 0xef, 0xcd, + 0x95, 0x45, 0x13, 0x5a, 0x9b, 0x3d, 0xdb, 0x42, + 0x54, 0x7f, 0xb9, 0x17, 0x08, 0x7f, 0xb2, 0xf0, + 0xb1, 0xc3, 0xdf, 0x67, 0x95, 0xe2, 0x73, 0xf2, + 0x76, 0xa3, 0x97, 0xfd, 0x9c, 0x92, 0x4a, 0xdb, + 0x95, 0x1e, 0x91, 0x95, 0xae, 0x3d, 0xae, 0x58, + 0xb5, 0x03, 0x6f, 0x5c, 0x3a, 0x19, 0xab, 0x92, + 0xa5, 0x09, 0x6b, 0x40, 0x61, 0xb0, 0x16, 0x03, + 0x03, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x01, 0xba, 0xb8, 0xad, 0x69, + 0x20, 0x5e, 0xc1, 0x61, 0xc3, 0x0f, 0xb4, 0x30, + 0x64, 0x66, 0x70, 0x96, 0x33, 0x3c, 0x8e, 0x12, + 0x56, 0xbf, 0x6d, 0xb8, 0x6d, 0xc6, 0xba, 0xea, + 0xfc, 0x38, 0xc0, 0x8b, 0x87, 0xa8, 0xf3, 0x87, + 0xa1, 0xd5, 0xb6, 0xb0, 0x72, 0xc7, 0xd4, 0x19, + 0x56, 0xa0, 0x91, 0xe1, 0x45, 0xc7, 0xf1, 0x7d, + 0xb0, 0x1d, 0x78, 0x18, 0xf6, 0x3d, 0xbf, 0x1a, + 0x23, 0x93, 0x0b, 0x19, 0xb1, 0x00, 0x56, 0xc9, + 0x5e, 0x89, 0xd4, 0x9d, 0xd9, 0x5b, 0xe0, 0xb8, + 0xff, 0x2f, 0x7d, 0x93, 0xae, 0x5b, 0xa5, 0x1f, + 0x1f, 0x2b, 0x09, 0xe5, 0xf6, 0x07, 0x26, 0xa3, + 0xed, 0xcb, 0x6a, 0x1a, 0xd6, 0x14, 0x83, 0x9b, + 0xd3, 0x9d, 0x47, 0x1b, 0xf3, 0x72, 0x5f, 0x69, + 0x21, 0x8f, 0xfa, 0x09, 0x38, 0x1a, 0x6b, 0x91, + 0xcf, 0x19, 0x32, 0x54, 0x58, 0x8e, 0xee, 0xaf, + 0xeb, 0x06, 0x9b, 0x3a, 0x34, 0x16, 0x66, 0x14, + 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, 0x03, + 0x00, 0x28, 0xc6, 0x96, 0x67, 0x62, 0xcc, 0x47, + 0x01, 0xb5, 0xbd, 0xb7, 0x24, 0xd3, 0xb6, 0xfd, + 0xb8, 0x46, 0xce, 0x82, 0x6d, 0x31, 0x1f, 0x15, + 0x11, 0x8f, 0xed, 0x62, 0x71, 0x5f, 0xae, 0xb6, + 0xa9, 0x0c, 0x24, 0x1d, 0xe8, 0x26, 0x51, 0xca, + 0x7c, 0x42, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xea, 0x8b, 0xfb, 0xef, 0xba, 0xc8, 0x88, 0x94, + 0x44, 0x99, 0x5f, 0x02, 0x68, 0x3a, 0x12, 0x67, + 0x7f, 0xb9, 0x39, 0x71, 0x84, 0xe0, 0x30, 0xe6, + 0x90, 0x6c, 0xcf, 0x32, 0x29, 0x29, 0x5c, 0x5a, + 0x8b, 0x7d, 0xaa, 0x11, 0x28, 0x26, 0xb5, 0xce, + 0xd2, 0x88, 0xd5, 0xb0, 0x5f, 0x94, 0x37, 0xa2, + 0x48, 0xd9, 0x53, 0xb2, 0xab, 0x59, 0x23, 0x3d, + 0x81, 0x6e, 0x64, 0x89, 0xca, 0x1a, 0x84, 0x16, + 0xdf, 0x31, 0x10, 0xde, 0x52, 0x7f, 0x50, 0xf3, + 0xd9, 0x27, 0xa0, 0xe8, 0x34, 0x15, 0x9e, 0x11, + 0xdd, 0xba, 0xce, 0x40, 0x17, 0xf3, 0x67, 0x14, + 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, 0x03, + 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x35, 0xcb, 0x17, 0x66, 0xee, 0xfd, + 0x27, 0xdb, 0xb8, 0xa8, 0x8a, 0xf1, 0x56, 0x67, + 0x89, 0x0d, 0x13, 0xac, 0xe2, 0x31, 0xb9, 0xa2, + 0x26, 0xbb, 0x1c, 0xcf, 0xd1, 0xb2, 0x48, 0x1d, + 0x0d, 0xb1, 0x17, 0x03, 0x03, 0x00, 0x25, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc0, + 0x89, 0x7c, 0x58, 0x6a, 0x9b, 0x00, 0x05, 0x8c, + 0x7f, 0x28, 0x54, 0x61, 0x44, 0x10, 0xee, 0x85, + 0x26, 0xa8, 0x04, 0xcd, 0xca, 0x85, 0x60, 0xf2, + 0xeb, 0x22, 0xbd, 0x9e, 0x15, 0x03, 0x03, 0x00, + 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x10, 0xe4, 0xe5, 0xf9, 0x85, 0xe3, 0xb0, + 0xec, 0x84, 0x29, 0x91, 0x05, 0x7d, 0x86, 0xe3, + 0x97, 0xeb, 0xb2, + }, +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/key_agreement.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/key_agreement.go new file mode 100644 index 000000000..7e820c1e7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/key_agreement.go @@ -0,0 +1,400 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "errors" + "io" + "math/big" +) + +// rsaKeyAgreement implements the standard TLS key agreement where the client +// encrypts the pre-master secret to the server's public key. +type rsaKeyAgreement struct{} + +func (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { + return nil, nil +} + +func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { + preMasterSecret := make([]byte, 48) + _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) + if err != nil { + return nil, err + } + + if len(ckx.ciphertext) < 2 { + return nil, errors.New("bad ClientKeyExchange") + } + + ciphertext := ckx.ciphertext + if version != VersionSSL30 { + ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1]) + if ciphertextLen != len(ckx.ciphertext)-2 { + return nil, errors.New("bad ClientKeyExchange") + } + ciphertext = ckx.ciphertext[2:] + } + + err = rsa.DecryptPKCS1v15SessionKey(config.rand(), cert.PrivateKey.(*rsa.PrivateKey), ciphertext, preMasterSecret) + if err != nil { + return nil, err + } + // We don't check the version number in the premaster secret. For one, + // by checking it, we would leak information about the validity of the + // encrypted pre-master secret. Secondly, it provides only a small + // benefit against a downgrade attack and some implementations send the + // wrong version anyway. See the discussion at the end of section + // 7.4.7.1 of RFC 4346. + return preMasterSecret, nil +} + +func (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { + return errors.New("unexpected ServerKeyExchange") +} + +func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { + preMasterSecret := make([]byte, 48) + preMasterSecret[0] = byte(clientHello.vers >> 8) + preMasterSecret[1] = byte(clientHello.vers) + _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) + if err != nil { + return nil, nil, err + } + + encrypted, err := rsa.EncryptPKCS1v15(config.rand(), cert.PublicKey.(*rsa.PublicKey), preMasterSecret) + if err != nil { + return nil, nil, err + } + ckx := new(clientKeyExchangeMsg) + ckx.ciphertext = make([]byte, len(encrypted)+2) + ckx.ciphertext[0] = byte(len(encrypted) >> 8) + ckx.ciphertext[1] = byte(len(encrypted)) + copy(ckx.ciphertext[2:], encrypted) + return preMasterSecret, ckx, nil +} + +// sha1Hash calculates a SHA1 hash over the given byte slices. +func sha1Hash(slices [][]byte) []byte { + hsha1 := sha1.New() + for _, slice := range slices { + hsha1.Write(slice) + } + return hsha1.Sum(nil) +} + +// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the +// concatenation of an MD5 and SHA1 hash. +func md5SHA1Hash(slices [][]byte) []byte { + md5sha1 := make([]byte, md5.Size+sha1.Size) + hmd5 := md5.New() + for _, slice := range slices { + hmd5.Write(slice) + } + copy(md5sha1, hmd5.Sum(nil)) + copy(md5sha1[md5.Size:], sha1Hash(slices)) + return md5sha1 +} + +// sha256Hash implements TLS 1.2's hash function. +func sha256Hash(slices [][]byte) []byte { + h := sha256.New() + for _, slice := range slices { + h.Write(slice) + } + return h.Sum(nil) +} + +// hashForServerKeyExchange hashes the given slices and returns their digest +// and the identifier of the hash function used. The hashFunc argument is only +// used for >= TLS 1.2 and precisely identifies the hash function to use. +func hashForServerKeyExchange(sigType, hashFunc uint8, version uint16, slices ...[]byte) ([]byte, crypto.Hash, error) { + if version >= VersionTLS12 { + switch hashFunc { + case hashSHA256: + return sha256Hash(slices), crypto.SHA256, nil + case hashSHA1: + return sha1Hash(slices), crypto.SHA1, nil + default: + return nil, crypto.Hash(0), errors.New("tls: unknown hash function used by peer") + } + } + if sigType == signatureECDSA { + return sha1Hash(slices), crypto.SHA1, nil + } + return md5SHA1Hash(slices), crypto.MD5SHA1, nil +} + +// pickTLS12HashForSignature returns a TLS 1.2 hash identifier for signing a +// ServerKeyExchange given the signature type being used and the client's +// advertized list of supported signature and hash combinations. +func pickTLS12HashForSignature(sigType uint8, clientSignatureAndHashes []signatureAndHash) (uint8, error) { + if len(clientSignatureAndHashes) == 0 { + // If the client didn't specify any signature_algorithms + // extension then we can assume that it supports SHA1. See + // http://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 + return hashSHA1, nil + } + + for _, sigAndHash := range clientSignatureAndHashes { + if sigAndHash.signature != sigType { + continue + } + switch sigAndHash.hash { + case hashSHA1, hashSHA256: + return sigAndHash.hash, nil + } + } + + return 0, errors.New("tls: client doesn't support any common hash functions") +} + +// ecdheRSAKeyAgreement implements a TLS key agreement where the server +// generates a ephemeral EC public/private key pair and signs it. The +// pre-master secret is then calculated using ECDH. The signature may +// either be ECDSA or RSA. +type ecdheKeyAgreement struct { + version uint16 + sigType uint8 + privateKey []byte + curve elliptic.Curve + x, y *big.Int +} + +func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { + var curveid uint16 + +Curve: + for _, c := range clientHello.supportedCurves { + switch c { + case curveP256: + ka.curve = elliptic.P256() + curveid = c + break Curve + case curveP384: + ka.curve = elliptic.P384() + curveid = c + break Curve + case curveP521: + ka.curve = elliptic.P521() + curveid = c + break Curve + } + } + + if curveid == 0 { + return nil, errors.New("tls: no supported elliptic curves offered") + } + + var x, y *big.Int + var err error + ka.privateKey, x, y, err = elliptic.GenerateKey(ka.curve, config.rand()) + if err != nil { + return nil, err + } + ecdhePublic := elliptic.Marshal(ka.curve, x, y) + + // http://tools.ietf.org/html/rfc4492#section-5.4 + serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic)) + serverECDHParams[0] = 3 // named curve + serverECDHParams[1] = byte(curveid >> 8) + serverECDHParams[2] = byte(curveid) + serverECDHParams[3] = byte(len(ecdhePublic)) + copy(serverECDHParams[4:], ecdhePublic) + + var tls12HashId uint8 + if ka.version >= VersionTLS12 { + if tls12HashId, err = pickTLS12HashForSignature(ka.sigType, clientHello.signatureAndHashes); err != nil { + return nil, err + } + } + + digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, hello.random, serverECDHParams) + if err != nil { + return nil, err + } + var sig []byte + switch ka.sigType { + case signatureECDSA: + privKey, ok := cert.PrivateKey.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("ECDHE ECDSA requires an ECDSA server private key") + } + r, s, err := ecdsa.Sign(config.rand(), privKey, digest) + if err != nil { + return nil, errors.New("failed to sign ECDHE parameters: " + err.Error()) + } + sig, err = asn1.Marshal(ecdsaSignature{r, s}) + case signatureRSA: + privKey, ok := cert.PrivateKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("ECDHE RSA requires a RSA server private key") + } + sig, err = rsa.SignPKCS1v15(config.rand(), privKey, hashFunc, digest) + if err != nil { + return nil, errors.New("failed to sign ECDHE parameters: " + err.Error()) + } + default: + return nil, errors.New("unknown ECDHE signature algorithm") + } + + skx := new(serverKeyExchangeMsg) + sigAndHashLen := 0 + if ka.version >= VersionTLS12 { + sigAndHashLen = 2 + } + skx.key = make([]byte, len(serverECDHParams)+sigAndHashLen+2+len(sig)) + copy(skx.key, serverECDHParams) + k := skx.key[len(serverECDHParams):] + if ka.version >= VersionTLS12 { + k[0] = tls12HashId + k[1] = ka.sigType + k = k[2:] + } + k[0] = byte(len(sig) >> 8) + k[1] = byte(len(sig)) + copy(k[2:], sig) + + return skx, nil +} + +func (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { + if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 { + return nil, errors.New("bad ClientKeyExchange") + } + x, y := elliptic.Unmarshal(ka.curve, ckx.ciphertext[1:]) + if x == nil { + return nil, errors.New("bad ClientKeyExchange") + } + x, _ = ka.curve.ScalarMult(x, y, ka.privateKey) + preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) + xBytes := x.Bytes() + copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) + + return preMasterSecret, nil +} + +var errServerKeyExchange = errors.New("invalid ServerKeyExchange") + +func (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { + if len(skx.key) < 4 { + return errServerKeyExchange + } + if skx.key[0] != 3 { // named curve + return errors.New("server selected unsupported curve") + } + curveid := uint16(skx.key[1])<<8 | uint16(skx.key[2]) + + switch curveid { + case curveP256: + ka.curve = elliptic.P256() + case curveP384: + ka.curve = elliptic.P384() + case curveP521: + ka.curve = elliptic.P521() + default: + return errors.New("server selected unsupported curve") + } + + publicLen := int(skx.key[3]) + if publicLen+4 > len(skx.key) { + return errServerKeyExchange + } + ka.x, ka.y = elliptic.Unmarshal(ka.curve, skx.key[4:4+publicLen]) + if ka.x == nil { + return errServerKeyExchange + } + serverECDHParams := skx.key[:4+publicLen] + + sig := skx.key[4+publicLen:] + if len(sig) < 2 { + return errServerKeyExchange + } + + var tls12HashId uint8 + if ka.version >= VersionTLS12 { + // handle SignatureAndHashAlgorithm + var sigAndHash []uint8 + sigAndHash, sig = sig[:2], sig[2:] + if sigAndHash[1] != ka.sigType { + return errServerKeyExchange + } + tls12HashId = sigAndHash[0] + if len(sig) < 2 { + return errServerKeyExchange + } + } + sigLen := int(sig[0])<<8 | int(sig[1]) + if sigLen+2 != len(sig) { + return errServerKeyExchange + } + sig = sig[2:] + + digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, serverHello.random, serverECDHParams) + if err != nil { + return err + } + switch ka.sigType { + case signatureECDSA: + pubKey, ok := cert.PublicKey.(*ecdsa.PublicKey) + if !ok { + return errors.New("ECDHE ECDSA requires a ECDSA server public key") + } + ecdsaSig := new(ecdsaSignature) + if _, err := asn1.Unmarshal(sig, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pubKey, digest, ecdsaSig.R, ecdsaSig.S) { + return errors.New("ECDSA verification failure") + } + case signatureRSA: + pubKey, ok := cert.PublicKey.(*rsa.PublicKey) + if !ok { + return errors.New("ECDHE RSA requires a RSA server public key") + } + if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, digest, sig); err != nil { + return err + } + default: + return errors.New("unknown ECDHE signature algorithm") + } + + return nil +} + +func (ka *ecdheKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { + if ka.curve == nil { + return nil, nil, errors.New("missing ServerKeyExchange message") + } + priv, mx, my, err := elliptic.GenerateKey(ka.curve, config.rand()) + if err != nil { + return nil, nil, err + } + x, _ := ka.curve.ScalarMult(ka.x, ka.y, priv) + preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) + xBytes := x.Bytes() + copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) + + serialized := elliptic.Marshal(ka.curve, mx, my) + + ckx := new(clientKeyExchangeMsg) + ckx.ciphertext = make([]byte, 1+len(serialized)) + ckx.ciphertext[0] = byte(len(serialized)) + copy(ckx.ciphertext[1:], serialized) + + return preMasterSecret, ckx, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf.go new file mode 100644 index 000000000..fb8b3ab4d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf.go @@ -0,0 +1,291 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +// Split a premaster secret in two as specified in RFC 4346, section 5. +func splitPreMasterSecret(secret []byte) (s1, s2 []byte) { + s1 = secret[0 : (len(secret)+1)/2] + s2 = secret[len(secret)/2:] + return +} + +// pHash implements the P_hash function, as defined in RFC 4346, section 5. +func pHash(result, secret, seed []byte, hash func() hash.Hash) { + h := hmac.New(hash, secret) + h.Write(seed) + a := h.Sum(nil) + + j := 0 + for j < len(result) { + h.Reset() + h.Write(a) + h.Write(seed) + b := h.Sum(nil) + todo := len(b) + if j+todo > len(result) { + todo = len(result) - j + } + copy(result[j:j+todo], b) + j += todo + + h.Reset() + h.Write(a) + a = h.Sum(nil) + } +} + +// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, section 5. +func prf10(result, secret, label, seed []byte) { + hashSHA1 := sha1.New + hashMD5 := md5.New + + labelAndSeed := make([]byte, len(label)+len(seed)) + copy(labelAndSeed, label) + copy(labelAndSeed[len(label):], seed) + + s1, s2 := splitPreMasterSecret(secret) + pHash(result, s1, labelAndSeed, hashMD5) + result2 := make([]byte, len(result)) + pHash(result2, s2, labelAndSeed, hashSHA1) + + for i, b := range result2 { + result[i] ^= b + } +} + +// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, section 5. +func prf12(result, secret, label, seed []byte) { + labelAndSeed := make([]byte, len(label)+len(seed)) + copy(labelAndSeed, label) + copy(labelAndSeed[len(label):], seed) + + pHash(result, secret, labelAndSeed, sha256.New) +} + +// prf30 implements the SSL 3.0 pseudo-random function, as defined in +// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 6. +func prf30(result, secret, label, seed []byte) { + hashSHA1 := sha1.New() + hashMD5 := md5.New() + + done := 0 + i := 0 + // RFC5246 section 6.3 says that the largest PRF output needed is 128 + // bytes. Since no more ciphersuites will be added to SSLv3, this will + // remain true. Each iteration gives us 16 bytes so 10 iterations will + // be sufficient. + var b [11]byte + for done < len(result) { + for j := 0; j <= i; j++ { + b[j] = 'A' + byte(i) + } + + hashSHA1.Reset() + hashSHA1.Write(b[:i+1]) + hashSHA1.Write(secret) + hashSHA1.Write(seed) + digest := hashSHA1.Sum(nil) + + hashMD5.Reset() + hashMD5.Write(secret) + hashMD5.Write(digest) + + done += copy(result[done:], hashMD5.Sum(nil)) + i++ + } +} + +const ( + tlsRandomLength = 32 // Length of a random nonce in TLS 1.1. + masterSecretLength = 48 // Length of a master secret in TLS 1.1. + finishedVerifyLength = 12 // Length of verify_data in a Finished message. +) + +var masterSecretLabel = []byte("master secret") +var keyExpansionLabel = []byte("key expansion") +var clientFinishedLabel = []byte("client finished") +var serverFinishedLabel = []byte("server finished") + +func prfForVersion(version uint16) func(result, secret, label, seed []byte) { + switch version { + case VersionSSL30: + return prf30 + case VersionTLS10, VersionTLS11: + return prf10 + case VersionTLS12: + return prf12 + default: + panic("unknown version") + } +} + +// masterFromPreMasterSecret generates the master secret from the pre-master +// secret. See http://tools.ietf.org/html/rfc5246#section-8.1 +func masterFromPreMasterSecret(version uint16, preMasterSecret, clientRandom, serverRandom []byte) []byte { + var seed [tlsRandomLength * 2]byte + copy(seed[0:len(clientRandom)], clientRandom) + copy(seed[len(clientRandom):], serverRandom) + masterSecret := make([]byte, masterSecretLength) + prfForVersion(version)(masterSecret, preMasterSecret, masterSecretLabel, seed[0:]) + return masterSecret +} + +// keysFromMasterSecret generates the connection keys from the master +// secret, given the lengths of the MAC key, cipher key and IV, as defined in +// RFC 2246, section 6.3. +func keysFromMasterSecret(version uint16, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) { + var seed [tlsRandomLength * 2]byte + copy(seed[0:len(clientRandom)], serverRandom) + copy(seed[len(serverRandom):], clientRandom) + + n := 2*macLen + 2*keyLen + 2*ivLen + keyMaterial := make([]byte, n) + prfForVersion(version)(keyMaterial, masterSecret, keyExpansionLabel, seed[0:]) + clientMAC = keyMaterial[:macLen] + keyMaterial = keyMaterial[macLen:] + serverMAC = keyMaterial[:macLen] + keyMaterial = keyMaterial[macLen:] + clientKey = keyMaterial[:keyLen] + keyMaterial = keyMaterial[keyLen:] + serverKey = keyMaterial[:keyLen] + keyMaterial = keyMaterial[keyLen:] + clientIV = keyMaterial[:ivLen] + keyMaterial = keyMaterial[ivLen:] + serverIV = keyMaterial[:ivLen] + return +} + +func newFinishedHash(version uint16) finishedHash { + if version >= VersionTLS12 { + return finishedHash{sha256.New(), sha256.New(), nil, nil, version} + } + return finishedHash{sha1.New(), sha1.New(), md5.New(), md5.New(), version} +} + +// A finishedHash calculates the hash of a set of handshake messages suitable +// for including in a Finished message. +type finishedHash struct { + client hash.Hash + server hash.Hash + + // Prior to TLS 1.2, an additional MD5 hash is required. + clientMD5 hash.Hash + serverMD5 hash.Hash + + version uint16 +} + +func (h finishedHash) Write(msg []byte) (n int, err error) { + h.client.Write(msg) + h.server.Write(msg) + + if h.version < VersionTLS12 { + h.clientMD5.Write(msg) + h.serverMD5.Write(msg) + } + return len(msg), nil +} + +// finishedSum30 calculates the contents of the verify_data member of a SSLv3 +// Finished message given the MD5 and SHA1 hashes of a set of handshake +// messages. +func finishedSum30(md5, sha1 hash.Hash, masterSecret []byte, magic [4]byte) []byte { + md5.Write(magic[:]) + md5.Write(masterSecret) + md5.Write(ssl30Pad1[:]) + md5Digest := md5.Sum(nil) + + md5.Reset() + md5.Write(masterSecret) + md5.Write(ssl30Pad2[:]) + md5.Write(md5Digest) + md5Digest = md5.Sum(nil) + + sha1.Write(magic[:]) + sha1.Write(masterSecret) + sha1.Write(ssl30Pad1[:40]) + sha1Digest := sha1.Sum(nil) + + sha1.Reset() + sha1.Write(masterSecret) + sha1.Write(ssl30Pad2[:40]) + sha1.Write(sha1Digest) + sha1Digest = sha1.Sum(nil) + + ret := make([]byte, len(md5Digest)+len(sha1Digest)) + copy(ret, md5Digest) + copy(ret[len(md5Digest):], sha1Digest) + return ret +} + +var ssl3ClientFinishedMagic = [4]byte{0x43, 0x4c, 0x4e, 0x54} +var ssl3ServerFinishedMagic = [4]byte{0x53, 0x52, 0x56, 0x52} + +// clientSum returns the contents of the verify_data member of a client's +// Finished message. +func (h finishedHash) clientSum(masterSecret []byte) []byte { + if h.version == VersionSSL30 { + return finishedSum30(h.clientMD5, h.client, masterSecret, ssl3ClientFinishedMagic) + } + + out := make([]byte, finishedVerifyLength) + if h.version >= VersionTLS12 { + seed := h.client.Sum(nil) + prf12(out, masterSecret, clientFinishedLabel, seed) + } else { + seed := make([]byte, 0, md5.Size+sha1.Size) + seed = h.clientMD5.Sum(seed) + seed = h.client.Sum(seed) + prf10(out, masterSecret, clientFinishedLabel, seed) + } + return out +} + +// serverSum returns the contents of the verify_data member of a server's +// Finished message. +func (h finishedHash) serverSum(masterSecret []byte) []byte { + if h.version == VersionSSL30 { + return finishedSum30(h.serverMD5, h.server, masterSecret, ssl3ServerFinishedMagic) + } + + out := make([]byte, finishedVerifyLength) + if h.version >= VersionTLS12 { + seed := h.server.Sum(nil) + prf12(out, masterSecret, serverFinishedLabel, seed) + } else { + seed := make([]byte, 0, md5.Size+sha1.Size) + seed = h.serverMD5.Sum(seed) + seed = h.server.Sum(seed) + prf10(out, masterSecret, serverFinishedLabel, seed) + } + return out +} + +// hashForClientCertificate returns a digest, hash function, and TLS 1.2 hash +// id suitable for signing by a TLS client certificate. +func (h finishedHash) hashForClientCertificate(sigType uint8) ([]byte, crypto.Hash, uint8) { + if h.version >= VersionTLS12 { + digest := h.server.Sum(nil) + return digest, crypto.SHA256, hashSHA256 + } + if sigType == signatureECDSA { + digest := h.server.Sum(nil) + return digest, crypto.SHA1, hashSHA1 + } + + digest := make([]byte, 0, 36) + digest = h.serverMD5.Sum(digest) + digest = h.server.Sum(digest) + return digest, crypto.MD5SHA1, 0 /* not specified in TLS 1.2. */ +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf_test.go new file mode 100644 index 000000000..a9b6c9e4c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/prf_test.go @@ -0,0 +1,126 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "encoding/hex" + "testing" +) + +type testSplitPreMasterSecretTest struct { + in, out1, out2 string +} + +var testSplitPreMasterSecretTests = []testSplitPreMasterSecretTest{ + {"", "", ""}, + {"00", "00", "00"}, + {"0011", "00", "11"}, + {"001122", "0011", "1122"}, + {"00112233", "0011", "2233"}, +} + +func TestSplitPreMasterSecret(t *testing.T) { + for i, test := range testSplitPreMasterSecretTests { + in, _ := hex.DecodeString(test.in) + out1, out2 := splitPreMasterSecret(in) + s1 := hex.EncodeToString(out1) + s2 := hex.EncodeToString(out2) + if s1 != test.out1 || s2 != test.out2 { + t.Errorf("#%d: got: (%s, %s) want: (%s, %s)", i, s1, s2, test.out1, test.out2) + } + } +} + +type testKeysFromTest struct { + version uint16 + preMasterSecret string + clientRandom, serverRandom string + masterSecret string + clientMAC, serverMAC string + clientKey, serverKey string + macLen, keyLen int +} + +func TestKeysFromPreMasterSecret(t *testing.T) { + for i, test := range testKeysFromTests { + in, _ := hex.DecodeString(test.preMasterSecret) + clientRandom, _ := hex.DecodeString(test.clientRandom) + serverRandom, _ := hex.DecodeString(test.serverRandom) + + masterSecret := masterFromPreMasterSecret(test.version, in, clientRandom, serverRandom) + if s := hex.EncodeToString(masterSecret); s != test.masterSecret { + t.Errorf("#%d: bad master secret %s, want %s", i, s, test.masterSecret) + continue + } + + clientMAC, serverMAC, clientKey, serverKey, _, _ := keysFromMasterSecret(test.version, masterSecret, clientRandom, serverRandom, test.macLen, test.keyLen, 0) + clientMACString := hex.EncodeToString(clientMAC) + serverMACString := hex.EncodeToString(serverMAC) + clientKeyString := hex.EncodeToString(clientKey) + serverKeyString := hex.EncodeToString(serverKey) + if clientMACString != test.clientMAC || + serverMACString != test.serverMAC || + clientKeyString != test.clientKey || + serverKeyString != test.serverKey { + t.Errorf("#%d: got: (%s, %s, %s, %s) want: (%s, %s, %s, %s)", i, clientMACString, serverMACString, clientKeyString, serverKeyString, test.clientMAC, test.serverMAC, test.clientKey, test.serverKey) + } + } +} + +// These test vectors were generated from GnuTLS using `gnutls-cli --insecure -d 9 ` +var testKeysFromTests = []testKeysFromTest{ + { + VersionTLS10, + "0302cac83ad4b1db3b9ab49ad05957de2a504a634a386fc600889321e1a971f57479466830ac3e6f468e87f5385fa0c5", + "4ae66303755184a3917fcb44880605fcc53baa01912b22ed94473fc69cebd558", + "4ae663020ec16e6bb5130be918cfcafd4d765979a3136a5d50c593446e4e44db", + "3d851bab6e5556e959a16bc36d66cfae32f672bfa9ecdef6096cbb1b23472df1da63dbbd9827606413221d149ed08ceb", + "805aaa19b3d2c0a0759a4b6c9959890e08480119", + "2d22f9fe519c075c16448305ceee209fc24ad109", + "d50b5771244f850cd8117a9ccafe2cf1", + "e076e33206b30507a85c32855acd0919", + 20, + 16, + }, + { + VersionTLS10, + "03023f7527316bc12cbcd69e4b9e8275d62c028f27e65c745cfcddc7ce01bd3570a111378b63848127f1c36e5f9e4890", + "4ae66364b5ea56b20ce4e25555aed2d7e67f42788dd03f3fee4adae0459ab106", + "4ae66363ab815cbf6a248b87d6b556184e945e9b97fbdf247858b0bdafacfa1c", + "7d64be7c80c59b740200b4b9c26d0baaa1c5ae56705acbcf2307fe62beb4728c19392c83f20483801cce022c77645460", + "97742ed60a0554ca13f04f97ee193177b971e3b0", + "37068751700400e03a8477a5c7eec0813ab9e0dc", + "207cddbc600d2a200abac6502053ee5c", + "df3f94f6e1eacc753b815fe16055cd43", + 20, + 16, + }, + { + VersionTLS10, + "832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1", + "4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e", + "4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e", + "1aff2e7a2c4279d0126f57a65a77a8d9d0087cf2733366699bec27eb53d5740705a8574bb1acc2abbe90e44f0dd28d6c", + "3c7647c93c1379a31a609542aa44e7f117a70085", + "0d73102994be74a575a3ead8532590ca32a526d4", + "ac7581b0b6c10d85bbd905ffbf36c65e", + "ff07edde49682b45466bd2e39464b306", + 20, + 16, + }, + { + VersionSSL30, + "832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1", + "4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e", + "4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e", + "a614863e56299dcffeea2938f22c2ba023768dbe4b3f6877bc9c346c6ae529b51d9cb87ff9695ea4d01f2205584405b2", + "2c450d5b6f6e2013ac6bea6a0b32200d4e1ffb94", + "7a7a7438769536f2fb1ae49a61f0703b79b2dc53", + "f8f6b26c10f12855c9aafb1e0e839ccf", + "2b9d4b4a60cb7f396780ebff50650419", + 20, + 16, + }, +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/ticket.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/ticket.go new file mode 100644 index 000000000..4cfc5a53f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/ticket.go @@ -0,0 +1,182 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "errors" + "io" +) + +// sessionState contains the information that is serialized into a session +// ticket in order to later resume a connection. +type sessionState struct { + vers uint16 + cipherSuite uint16 + masterSecret []byte + certificates [][]byte +} + +func (s *sessionState) equal(i interface{}) bool { + s1, ok := i.(*sessionState) + if !ok { + return false + } + + if s.vers != s1.vers || + s.cipherSuite != s1.cipherSuite || + !bytes.Equal(s.masterSecret, s1.masterSecret) { + return false + } + + if len(s.certificates) != len(s1.certificates) { + return false + } + + for i := range s.certificates { + if !bytes.Equal(s.certificates[i], s1.certificates[i]) { + return false + } + } + + return true +} + +func (s *sessionState) marshal() []byte { + length := 2 + 2 + 2 + len(s.masterSecret) + 2 + for _, cert := range s.certificates { + length += 4 + len(cert) + } + + ret := make([]byte, length) + x := ret + x[0] = byte(s.vers >> 8) + x[1] = byte(s.vers) + x[2] = byte(s.cipherSuite >> 8) + x[3] = byte(s.cipherSuite) + x[4] = byte(len(s.masterSecret) >> 8) + x[5] = byte(len(s.masterSecret)) + x = x[6:] + copy(x, s.masterSecret) + x = x[len(s.masterSecret):] + + x[0] = byte(len(s.certificates) >> 8) + x[1] = byte(len(s.certificates)) + x = x[2:] + + for _, cert := range s.certificates { + x[0] = byte(len(cert) >> 24) + x[1] = byte(len(cert) >> 16) + x[2] = byte(len(cert) >> 8) + x[3] = byte(len(cert)) + copy(x[4:], cert) + x = x[4+len(cert):] + } + + return ret +} + +func (s *sessionState) unmarshal(data []byte) bool { + if len(data) < 8 { + return false + } + + s.vers = uint16(data[0])<<8 | uint16(data[1]) + s.cipherSuite = uint16(data[2])<<8 | uint16(data[3]) + masterSecretLen := int(data[4])<<8 | int(data[5]) + data = data[6:] + if len(data) < masterSecretLen { + return false + } + + s.masterSecret = data[:masterSecretLen] + data = data[masterSecretLen:] + + if len(data) < 2 { + return false + } + + numCerts := int(data[0])<<8 | int(data[1]) + data = data[2:] + + s.certificates = make([][]byte, numCerts) + for i := range s.certificates { + if len(data) < 4 { + return false + } + certLen := int(data[0])<<24 | int(data[1])<<16 | int(data[2])<<8 | int(data[3]) + data = data[4:] + if certLen < 0 { + return false + } + if len(data) < certLen { + return false + } + s.certificates[i] = data[:certLen] + data = data[certLen:] + } + + if len(data) > 0 { + return false + } + + return true +} + +func (c *Conn) encryptTicket(state *sessionState) ([]byte, error) { + serialized := state.marshal() + encrypted := make([]byte, aes.BlockSize+len(serialized)+sha256.Size) + iv := encrypted[:aes.BlockSize] + macBytes := encrypted[len(encrypted)-sha256.Size:] + + if _, err := io.ReadFull(c.config.rand(), iv); err != nil { + return nil, err + } + block, err := aes.NewCipher(c.config.SessionTicketKey[:16]) + if err != nil { + return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error()) + } + cipher.NewCTR(block, iv).XORKeyStream(encrypted[aes.BlockSize:], serialized) + + mac := hmac.New(sha256.New, c.config.SessionTicketKey[16:32]) + mac.Write(encrypted[:len(encrypted)-sha256.Size]) + mac.Sum(macBytes[:0]) + + return encrypted, nil +} + +func (c *Conn) decryptTicket(encrypted []byte) (*sessionState, bool) { + if len(encrypted) < aes.BlockSize+sha256.Size { + return nil, false + } + + iv := encrypted[:aes.BlockSize] + macBytes := encrypted[len(encrypted)-sha256.Size:] + + mac := hmac.New(sha256.New, c.config.SessionTicketKey[16:32]) + mac.Write(encrypted[:len(encrypted)-sha256.Size]) + expected := mac.Sum(nil) + + if subtle.ConstantTimeCompare(macBytes, expected) != 1 { + return nil, false + } + + block, err := aes.NewCipher(c.config.SessionTicketKey[:16]) + if err != nil { + return nil, false + } + ciphertext := encrypted[aes.BlockSize : len(encrypted)-sha256.Size] + plaintext := ciphertext + cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext) + + state := new(sessionState) + ok := state.unmarshal(plaintext) + return state, ok +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls.go new file mode 100644 index 000000000..6c67506fc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls.go @@ -0,0 +1,225 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tls partially implements TLS 1.2, as specified in RFC 5246. +package tls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "io/ioutil" + "net" + "strings" +) + +// Server returns a new TLS server side connection +// using conn as the underlying transport. +// The configuration config must be non-nil and must have +// at least one certificate. +func Server(conn net.Conn, config *Config) *Conn { + return &Conn{conn: conn, config: config} +} + +// Client returns a new TLS client side connection +// using conn as the underlying transport. +// Client interprets a nil configuration as equivalent to +// the zero configuration; see the documentation of Config +// for the defaults. +func Client(conn net.Conn, config *Config) *Conn { + return &Conn{conn: conn, config: config, isClient: true} +} + +// A listener implements a network listener (net.Listener) for TLS connections. +type listener struct { + net.Listener + config *Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *listener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + c = Server(c, l.config) + return +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config must be non-nil and must have +// at least one certificate. +func NewListener(inner net.Listener, config *Config) net.Listener { + l := new(listener) + l.Listener = inner + l.config = config + return l +} + +// Listen creates a TLS listener accepting connections on the +// given network address using net.Listen. +// The configuration config must be non-nil and must have +// at least one certificate. +func Listen(network, laddr string, config *Config) (net.Listener, error) { + if config == nil || len(config.Certificates) == 0 { + return nil, errors.New("tls.Listen: no certificates in configuration") + } + l, err := net.Listen(network, laddr) + if err != nil { + return nil, err + } + return NewListener(l, config), nil +} + +// Dial connects to the given network address using net.Dial +// and then initiates a TLS handshake, returning the resulting +// TLS connection. +// Dial interprets a nil configuration as equivalent to +// the zero configuration; see the documentation of Config +// for the defaults. +func Dial(network, addr string, config *Config) (*Conn, error) { + raddr := addr + c, err := net.Dial(network, raddr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(raddr, ":") + if colonPos == -1 { + colonPos = len(raddr) + } + hostname := raddr[:colonPos] + + if config == nil { + config = defaultConfig() + } + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + conn := Client(c, config) + if err = conn.Handshake(); err != nil { + c.Close() + return nil, err + } + return conn, nil +} + +// LoadX509KeyPair reads and parses a public/private key pair from a pair of +// files. The files must contain PEM encoded data. +func LoadX509KeyPair(certFile, keyFile string) (cert Certificate, err error) { + certPEMBlock, err := ioutil.ReadFile(certFile) + if err != nil { + return + } + keyPEMBlock, err := ioutil.ReadFile(keyFile) + if err != nil { + return + } + return X509KeyPair(certPEMBlock, keyPEMBlock) +} + +// X509KeyPair parses a public/private key pair from a pair of +// PEM encoded data. +func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (cert Certificate, err error) { + var certDERBlock *pem.Block + for { + certDERBlock, certPEMBlock = pem.Decode(certPEMBlock) + if certDERBlock == nil { + break + } + if certDERBlock.Type == "CERTIFICATE" { + cert.Certificate = append(cert.Certificate, certDERBlock.Bytes) + } + } + + if len(cert.Certificate) == 0 { + err = errors.New("crypto/tls: failed to parse certificate PEM data") + return + } + + var keyDERBlock *pem.Block + for { + keyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock) + if keyDERBlock == nil { + err = errors.New("crypto/tls: failed to parse key PEM data") + return + } + if keyDERBlock.Type == "PRIVATE KEY" || strings.HasSuffix(keyDERBlock.Type, " PRIVATE KEY") { + break + } + } + + cert.PrivateKey, err = parsePrivateKey(keyDERBlock.Bytes) + if err != nil { + return + } + + // We don't need to parse the public key for TLS, but we so do anyway + // to check that it looks sane and matches the private key. + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return + } + + switch pub := x509Cert.PublicKey.(type) { + case *rsa.PublicKey: + priv, ok := cert.PrivateKey.(*rsa.PrivateKey) + if !ok { + err = errors.New("crypto/tls: private key type does not match public key type") + return + } + if pub.N.Cmp(priv.N) != 0 { + err = errors.New("crypto/tls: private key does not match public key") + return + } + case *ecdsa.PublicKey: + priv, ok := cert.PrivateKey.(*ecdsa.PrivateKey) + if !ok { + err = errors.New("crypto/tls: private key type does not match public key type") + return + + } + if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 { + err = errors.New("crypto/tls: private key does not match public key") + return + } + default: + err = errors.New("crypto/tls: unknown public key algorithm") + return + } + + return +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey, *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("crypto/tls: found unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("crypto/tls: failed to parse private key") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls_test.go b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls_test.go new file mode 100644 index 000000000..38229014c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/core/tls/tls_test.go @@ -0,0 +1,107 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "testing" +) + +var rsaCertPEM = `-----BEGIN CERTIFICATE----- +MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF +MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 +ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ +hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa +rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv +zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW +r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V +-----END CERTIFICATE----- +` + +var rsaKeyPEM = `-----BEGIN RSA PRIVATE KEY----- +MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo +k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G +6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N +MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW +SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T +xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi +D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g== +-----END RSA PRIVATE KEY----- +` + +// keyPEM is the same as rsaKeyPEM, but declares itself as just +// "PRIVATE KEY", not "RSA PRIVATE KEY". http://golang.org/issue/4477 +var keyPEM = `-----BEGIN PRIVATE KEY----- +MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo +k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G +6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N +MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW +SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T +xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi +D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g== +-----END PRIVATE KEY----- +` + +var ecdsaCertPEM = `-----BEGIN CERTIFICATE----- +MIIB/jCCAWICCQDscdUxw16XFDAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw +EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0 +eSBMdGQwHhcNMTIxMTE0MTI0MDQ4WhcNMTUxMTE0MTI0MDQ4WjBFMQswCQYDVQQG +EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk +Z2l0cyBQdHkgTHRkMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBY9+my9OoeSUR +lDQdV/x8LsOuLilthhiS1Tz4aGDHIPwC1mlvnf7fg5lecYpMCrLLhauAc1UJXcgl +01xoLuzgtAEAgv2P/jgytzRSpUYvgLBt1UA0leLYBy6mQQbrNEuqT3INapKIcUv8 +XxYP0xMEUksLPq6Ca+CRSqTtrd/23uTnapkwCQYHKoZIzj0EAQOBigAwgYYCQXJo +A7Sl2nLVf+4Iu/tAX/IF4MavARKC4PPHK3zfuGfPR3oCCcsAoz3kAzOeijvd0iXb +H5jBImIxPL4WxQNiBTexAkF8D1EtpYuWdlVQ80/h/f4pBcGiXPqX5h2PQSQY7hP1 ++jwM1FGS4fREIOvlBYr/SzzQRtwrvrzGYxDEDbsC0ZGRnA== +-----END CERTIFICATE----- +` + +var ecdsaKeyPEM = `-----BEGIN EC PARAMETERS----- +BgUrgQQAIw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBrsoKp0oqcv6/JovJJDoDVSGWdirrkgCWxrprGlzB9o0X8fV675X0 +NwuBenXFfeZvVcwluO7/Q9wkYoPd/t3jGImgBwYFK4EEACOhgYkDgYYABAFj36bL +06h5JRGUNB1X/Hwuw64uKW2GGJLVPPhoYMcg/ALWaW+d/t+DmV5xikwKssuFq4Bz +VQldyCXTXGgu7OC0AQCC/Y/+ODK3NFKlRi+AsG3VQDSV4tgHLqZBBus0S6pPcg1q +kohxS/xfFg/TEwRSSws+roJr4JFKpO2t3/be5OdqmQ== +-----END EC PRIVATE KEY----- +` + +var keyPairTests = []struct { + algo string + cert string + key string +}{ + {"ECDSA", ecdsaCertPEM, ecdsaKeyPEM}, + {"RSA", rsaCertPEM, rsaKeyPEM}, + {"RSA-untyped", rsaCertPEM, keyPEM}, // golang.org/issue/4477 +} + +func TestX509KeyPair(t *testing.T) { + var pem []byte + for _, test := range keyPairTests { + pem = []byte(test.cert + test.key) + if _, err := X509KeyPair(pem, pem); err != nil { + t.Errorf("Failed to load %s cert followed by %s key: %s", test.algo, test.algo, err) + } + pem = []byte(test.key + test.cert) + if _, err := X509KeyPair(pem, pem); err != nil { + t.Errorf("Failed to load %s key followed by %s cert: %s", test.algo, test.algo, err) + } + } +} + +func TestX509MixedKeyPair(t *testing.T) { + if _, err := X509KeyPair([]byte(rsaCertPEM), []byte(ecdsaKeyPEM)); err == nil { + t.Error("Load of RSA certificate succeeded with ECDSA private key") + } + if _, err := X509KeyPair([]byte(ecdsaCertPEM), []byte(rsaKeyPEM)); err == nil { + t.Error("Load of ECDSA certificate succeeded with RSA private key") + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/client.go new file mode 100644 index 000000000..7fcdc869e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/client.go @@ -0,0 +1,131 @@ +package affinitygroup + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureCreateAffinityGroupURL = "/affinitygroups" + azureGetAffinityGroupURL = "/affinitygroups/%s" + azureListAffinityGroupsURL = "/affinitygroups" + azureUpdateAffinityGroupURL = "/affinitygroups/%s" + azureDeleteAffinityGroupURL = "/affinitygroups/%s" + + errParameterNotSpecified = "Parameter %s not specified." +) + +// AffinityGroupClient simply contains a management.Client and has +// methods for doing all affinity group-related API calls to Azure. +type AffinityGroupClient struct { + mgmtClient management.Client +} + +// NewClient returns an AffinityGroupClient with the given management.Client. +func NewClient(mgmtClient management.Client) AffinityGroupClient { + return AffinityGroupClient{mgmtClient} +} + +// CreateAffinityGroup creates a new affinity group. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715317.aspx +func (c AffinityGroupClient) CreateAffinityGroup(params CreateAffinityGroupParams) error { + params.Label = encodeLabel(params.Label) + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + _, err = c.mgmtClient.SendAzurePostRequest(azureCreateAffinityGroupURL, req) + return err +} + +// GetAffinityGroup returns the system properties that are associated with the +// specified affinity group. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460789.aspx +func (c AffinityGroupClient) GetAffinityGroup(name string) (AffinityGroup, error) { + var affgroup AffinityGroup + if name == "" { + return affgroup, fmt.Errorf(errParameterNotSpecified, "name") + } + + url := fmt.Sprintf(azureGetAffinityGroupURL, name) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return affgroup, err + } + + err = xml.Unmarshal(resp, &affgroup) + affgroup.Label = decodeLabel(affgroup.Label) + return affgroup, err +} + +// ListAffinityGroups lists the affinity groups off Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460797.aspx +func (c AffinityGroupClient) ListAffinityGroups() (ListAffinityGroupsResponse, error) { + var affinitygroups ListAffinityGroupsResponse + + resp, err := c.mgmtClient.SendAzureGetRequest(azureListAffinityGroupsURL) + if err != nil { + return affinitygroups, err + } + + err = xml.Unmarshal(resp, &affinitygroups) + + for i, grp := range affinitygroups.AffinityGroups { + affinitygroups.AffinityGroups[i].Label = decodeLabel(grp.Label) + } + + return affinitygroups, err +} + +// UpdateAffinityGroup updates the label or description for an the group. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715316.aspx +func (c AffinityGroupClient) UpdateAffinityGroup(name string, params UpdateAffinityGroupParams) error { + if name == "" { + return fmt.Errorf(errParameterNotSpecified, "name") + } + + params.Label = encodeLabel(params.Label) + req, err := xml.Marshal(params) + if err != nil { + return err + } + + url := fmt.Sprintf(azureUpdateAffinityGroupURL, name) + _, err = c.mgmtClient.SendAzurePutRequest(url, "text/xml", req) + return err +} + +// DeleteAffinityGroup deletes the given affinity group. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715314.aspx +func (c AffinityGroupClient) DeleteAffinityGroup(name string) error { + if name == "" { + return fmt.Errorf(errParameterNotSpecified, name) + } + + url := fmt.Sprintf(azureDeleteAffinityGroupURL, name) + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + return err +} + +// encodeLabel is a helper function which encodes the given string +// to the base64 string which will be sent to Azure as a Label. +func encodeLabel(label string) string { + return base64.StdEncoding.EncodeToString([]byte(label)) +} + +// decodeLabel is a helper function which decodes the base64 encoded +// label recieved from Azure into standard encoding. +func decodeLabel(label string) string { + res, _ := base64.StdEncoding.DecodeString(label) + return string(res) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/entities.go new file mode 100644 index 000000000..7ce588264 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/affinitygroup/entities.go @@ -0,0 +1,80 @@ +package affinitygroup + +import ( + "encoding/xml" +) + +// CreateAffinityGroupParams respresents the set of parameters required for +// creating an affinity group creation request to Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715317.aspx +type CreateAffinityGroupParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CreateAffinityGroup"` + Name string + Label string + Description string `xml:",omitempty"` + Location string +} + +// HostedService is a struct containing details about a hosted service that is +// part of an affinity group on Azure. +type HostedService struct { + URL string `xml:"Url"` + ServiceName string +} + +// StorageService is a struct containing details about a storage service that is +// part of an affinity group on Azure. +type StorageService struct { + URL string `xml:"Url"` + ServiceName string +} + +// AffinityGroup respresents the properties of an affinity group on Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460789.aspx +type AffinityGroup struct { + Name string + Label string + Description string + Location string + HostedServices []HostedService + StorageServices []StorageService + Capabilities []string +} + +// ComputeCapabilities represents the sets of capabilities of an affinity group +// obtained from an affinity group list call to Azure. +type ComputeCapabilities struct { + VirtualMachineRoleSizes []string + WebWorkerRoleSizes []string +} + +// AffinityGroupListResponse represents the properties obtained for each +// affinity group listed off Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460797.aspx +type AffinityGroupListResponse struct { + Name string + Label string + Description string + Location string + Capabilities []string + ComputeCapabilities ComputeCapabilities +} + +// ListAffinityGroupsResponse contains all the affinity groups obtained from a +// call to the Azure API to list all affinity groups. +type ListAffinityGroupsResponse struct { + AffinityGroups []AffinityGroupListResponse `xml:"AffinityGroup"` +} + +// UpdateAffinityGroupParams if the set of parameters required to update an +// affinity group on Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715316.aspx +type UpdateAffinityGroupParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure UpdateAffinityGroup"` + Label string `xml:",omitempty"` + Description string `xml:",omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/client.go new file mode 100644 index 000000000..67da834ff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/client.go @@ -0,0 +1,138 @@ +// Package management provides the main API client to construct other clients +// and make requests to the Microsoft Azure Service Management REST API. +package management + +import ( + "errors" + "time" +) + +const ( + DefaultAzureManagementURL = "https://management.core.windows.net" + DefaultOperationPollInterval = time.Second * 30 + DefaultAPIVersion = "2014-10-01" + DefaultUserAgent = "azure-sdk-for-go" + + errPublishSettingsConfiguration = "PublishSettingsFilePath is set. Consequently ManagementCertificatePath and SubscriptionId must not be set." + errManagementCertificateConfiguration = "Both ManagementCertificatePath and SubscriptionId should be set, and PublishSettingsFilePath must not be set." + errParamNotSpecified = "Parameter %s is not specified." +) + +type client struct { + publishSettings publishSettings + config ClientConfig +} + +// Client is the base Azure Service Management API client instance that +// can be used to construct client instances for various services. +type Client interface { + // SendAzureGetRequest sends a request to the management API using the HTTP GET method + // and returns the response body or an error. + SendAzureGetRequest(url string) ([]byte, error) + + // SendAzurePostRequest sends a request to the management API using the HTTP POST method + // and returns the request ID or an error. + SendAzurePostRequest(url string, data []byte) (OperationID, error) + + // SendAzurePostRequestWithReturnedResponse sends a request to the management API using + // the HTTP POST method and returns the response body or an error. + SendAzurePostRequestWithReturnedResponse(url string, data []byte) ([]byte, error) + + // SendAzurePutRequest sends a request to the management API using the HTTP PUT method + // and returns the request ID or an error. The content type can be specified, however + // if an empty string is passed, the default of "application/xml" will be used. + SendAzurePutRequest(url, contentType string, data []byte) (OperationID, error) + + // SendAzureDeleteRequest sends a request to the management API using the HTTP DELETE method + // and returns the request ID or an error. + SendAzureDeleteRequest(url string) (OperationID, error) + + // GetOperationStatus gets the status of operation with given Operation ID. + // WaitForOperation utility method can be used for polling for operation status. + GetOperationStatus(operationID OperationID) (GetOperationStatusResponse, error) + + // WaitForOperation polls the Azure API for given operation ID indefinitely + // until the operation is completed with either success or failure. + // It is meant to be used for waiting for the result of the methods that + // return an OperationID value (meaning a long running operation has started). + // + // Cancellation of the polling loop (for instance, timing out) is done through + // cancel channel. If the user does not want to cancel, a nil chan can be provided. + // To cancel the method, it is recommended to close the channel provided to this + // method. + // + // If the operation was not successful or cancelling is signaled, an error + // is returned. + WaitForOperation(operationID OperationID, cancel chan struct{}) error +} + +// ClientConfig provides a configuration for use by a Client. +type ClientConfig struct { + ManagementURL string + OperationPollInterval time.Duration + UserAgent string + APIVersion string +} + +// NewAnonymousClient creates a new azure.Client with no credentials set. +func NewAnonymousClient() Client { + return client{} +} + +// DefaultConfig returns the default client configuration used to construct +// a client. This value can be used to make modifications on the default API +// configuration. +func DefaultConfig() ClientConfig { + return ClientConfig{ + ManagementURL: DefaultAzureManagementURL, + OperationPollInterval: DefaultOperationPollInterval, + APIVersion: DefaultAPIVersion, + UserAgent: DefaultUserAgent, + } +} + +// NewClient creates a new Client using the given subscription ID and +// management certificate. +func NewClient(subscriptionID string, managementCert []byte) (Client, error) { + return NewClientFromConfig(subscriptionID, managementCert, DefaultConfig()) +} + +// NewClientFromConfig creates a new Client using a given ClientConfig. +func NewClientFromConfig(subscriptionID string, managementCert []byte, config ClientConfig) (Client, error) { + return makeClient(subscriptionID, managementCert, config) +} + +func makeClient(subscriptionID string, managementCert []byte, config ClientConfig) (Client, error) { + var c client + + if subscriptionID == "" { + return c, errors.New("azure: subscription ID required") + } + + if len(managementCert) == 0 { + return c, errors.New("azure: management certificate required") + } + + publishSettings := publishSettings{ + SubscriptionID: subscriptionID, + SubscriptionCert: managementCert, + SubscriptionKey: managementCert, + } + + // Validate client configuration + switch { + case config.ManagementURL == "": + return c, errors.New("azure: base URL required") + case config.OperationPollInterval <= 0: + return c, errors.New("azure: operation polling interval must be a positive duration") + case config.APIVersion == "": + return c, errors.New("azure: client configuration must specify an API version") + case config.UserAgent == "": + config.UserAgent = DefaultUserAgent + } + + return client{ + publishSettings: publishSettings, + config: config, + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/management/errors.go new file mode 100644 index 000000000..798594543 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/errors.go @@ -0,0 +1,36 @@ +package management + +import ( + "encoding/xml" + "fmt" +) + +// AzureError represents an error returned by the management API. It has an error +// code (for example, ResourceNotFound) and a descriptive message. +type AzureError struct { + Code string + Message string +} + +//Error implements the error interface for the AzureError type. +func (e AzureError) Error() string { + return fmt.Sprintf("Error response from Azure. Code: %s, Message: %s", e.Code, e.Message) +} + +// IsResourceNotFoundError returns true if the provided error is an AzureError +// reporting that a given resource has not been found. +func IsResourceNotFoundError(err error) bool { + azureErr, ok := err.(AzureError) + return ok && azureErr.Code == "ResourceNotFound" +} + +// getAzureError converts an error response body into an AzureError instance. +func getAzureError(responseBody []byte) error { + var azErr AzureError + err := xml.Unmarshal(responseBody, &azErr) + if err != nil { + return fmt.Errorf("Failed parsing contents to AzureError format: %v", err) + } + return azErr + +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/errors_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/errors_test.go new file mode 100644 index 000000000..9ee32be4e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/errors_test.go @@ -0,0 +1,30 @@ +package management_test + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// TestIsResourceNotFoundError tests IsResourceNotFoundError with the +// set of given test cases. +func TestIsResourceNotFoundError(t *testing.T) { + // isResourceNotFoundTestCases is a set of structs comprising of the error + // IsResourceNotFoundError should test and the expected result. + var isResourceNotFoundTestCases = []struct { + err error + expected bool + }{ + {nil, false}, + {fmt.Errorf("Some other random error."), false}, + {management.AzureError{Code: "ResourceNotFound"}, true}, + {management.AzureError{Code: "NotAResourceNotFound"}, false}, + } + + for i, testCase := range isResourceNotFoundTestCases { + if res := management.IsResourceNotFoundError(testCase.err); res != testCase.expected { + t.Fatalf("Test %d: error %s - expected %t - got %t", i+1, testCase.err, testCase.expected, res) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/client.go new file mode 100644 index 000000000..a9c7063a2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/client.go @@ -0,0 +1,125 @@ +// Package hostedservice provides a client for Hosted Services. +package hostedservice + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureXmlns = "http://schemas.microsoft.com/windowsazure" + azureDeploymentListURL = "services/hostedservices/%s/deployments" + azureHostedServiceListURL = "services/hostedservices" + azureHostedServiceAvailabilityURL = "services/hostedservices/operations/isavailable/%s" + azureDeploymentURL = "services/hostedservices/%s/deployments/%s" + deleteAzureDeploymentURL = "services/hostedservices/%s/deployments/%s" + getHostedServicePropertiesURL = "services/hostedservices/%s" + azureServiceCertificateURL = "services/hostedservices/%s/certificates" + + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to return a handle to the HostedService API +func NewClient(client management.Client) HostedServiceClient { + return HostedServiceClient{client: client} +} + +func (h HostedServiceClient) CreateHostedService(params CreateHostedServiceParameters) error { + req, err := xml.Marshal(params) + if err != nil { + return err + } + + _, err = h.client.SendAzurePostRequest(azureHostedServiceListURL, req) // not a long running operation + return err +} + +func (h HostedServiceClient) CheckHostedServiceNameAvailability(dnsName string) (AvailabilityResponse, error) { + var r AvailabilityResponse + if dnsName == "" { + return r, fmt.Errorf(errParamNotSpecified, "dnsName") + } + + requestURL := fmt.Sprintf(azureHostedServiceAvailabilityURL, dnsName) + response, err := h.client.SendAzureGetRequest(requestURL) + if err != nil { + return r, err + } + + err = xml.Unmarshal(response, &r) + return r, err +} + +func (h HostedServiceClient) DeleteHostedService(dnsName string, deleteDisksAndBlobs bool) (management.OperationID, error) { + if dnsName == "" { + return "", fmt.Errorf(errParamNotSpecified, "dnsName") + } + + requestURL := fmt.Sprintf(getHostedServicePropertiesURL, dnsName) + if deleteDisksAndBlobs { + requestURL += "?comp=media" + } + return h.client.SendAzureDeleteRequest(requestURL) +} + +func (h HostedServiceClient) GetHostedService(name string) (HostedService, error) { + hostedService := HostedService{} + if name == "" { + return hostedService, fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(getHostedServicePropertiesURL, name) + response, err := h.client.SendAzureGetRequest(requestURL) + if err != nil { + return hostedService, err + } + + err = xml.Unmarshal(response, &hostedService) + if err != nil { + return hostedService, err + } + + decodedLabel, err := base64.StdEncoding.DecodeString(hostedService.LabelBase64) + if err != nil { + return hostedService, err + } + hostedService.Label = string(decodedLabel) + return hostedService, nil +} + +func (h HostedServiceClient) ListHostedServices() (ListHostedServicesResponse, error) { + var response ListHostedServicesResponse + + data, err := h.client.SendAzureGetRequest(azureHostedServiceListURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +func (h HostedServiceClient) AddCertificate(dnsName string, certData []byte, certificateFormat CertificateFormat, password string) (management.OperationID, error) { + if dnsName == "" { + return "", fmt.Errorf(errParamNotSpecified, "dnsName") + } + + certBase64 := base64.StdEncoding.EncodeToString(certData) + + addCertificate := CertificateFile{ + Data: certBase64, + CertificateFormat: certificateFormat, + Password: password, + Xmlns: azureXmlns, + } + buffer, err := xml.Marshal(addCertificate) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureServiceCertificateURL, dnsName) + return h.client.SendAzurePostRequest(requestURL, buffer) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/entities.go new file mode 100644 index 000000000..f540fa9f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/hostedservice/entities.go @@ -0,0 +1,58 @@ +package hostedservice + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +//HostedServiceClient is used to perform operations on Azure Hosted Services +type HostedServiceClient struct { + client management.Client +} + +type CreateHostedServiceParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CreateHostedService"` + ServiceName string + Label string + Description string + Location string + ReverseDNSFqdn string `xml:"ReverseDnsFqdn,omitempty"` +} + +type AvailabilityResponse struct { + Xmlns string `xml:"xmlns,attr"` + Result bool + Reason string +} + +type HostedService struct { + URL string `xml:"Url"` + ServiceName string + Description string `xml:"HostedServiceProperties>Description"` + AffinityGroup string `xml:"HostedServiceProperties>AffinityGroup"` + Location string `xml:"HostedServiceProperties>Location"` + LabelBase64 string `xml:"HostedServiceProperties>Label"` + Label string + Status string `xml:"HostedServiceProperties>Status"` + ReverseDNSFqdn string `xml:"HostedServiceProperties>ReverseDnsFqdn"` + DefaultWinRmCertificateThumbprint string +} + +type CertificateFile struct { + Xmlns string `xml:"xmlns,attr"` + Data string + CertificateFormat CertificateFormat + Password string `xml:",omitempty"` +} + +type CertificateFormat string + +const ( + CertificateFormatPfx = CertificateFormat("pfx") + CertificateFormatCer = CertificateFormat("cer") +) + +type ListHostedServicesResponse struct { + HostedServices []HostedService `xml:"HostedService"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/http.go b/vendor/github.com/Azure/azure-sdk-for-go/management/http.go new file mode 100644 index 000000000..247a78254 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/http.go @@ -0,0 +1,187 @@ +package management + +import ( + "bytes" + "fmt" + + "github.com/Azure/azure-sdk-for-go/core/http" + "github.com/Azure/azure-sdk-for-go/core/tls" +) + +const ( + msVersionHeader = "x-ms-version" + requestIDHeader = "x-ms-request-id" + uaHeader = "User-Agent" + contentHeader = "Content-Type" + defaultContentHeaderValue = "application/xml" +) + +func (client client) SendAzureGetRequest(url string) ([]byte, error) { + resp, err := client.sendAzureRequest("GET", url, "", nil) + if err != nil { + return nil, err + } + return getResponseBody(resp) +} + +func (client client) SendAzurePostRequest(url string, data []byte) (OperationID, error) { + return client.doAzureOperation("POST", url, "", data) +} + +func (client client) SendAzurePostRequestWithReturnedResponse(url string, data []byte) ([]byte, error) { + resp, err := client.sendAzureRequest("POST", url, "", data) + if err != nil { + return nil, err + } + + return getResponseBody(resp) +} + +func (client client) SendAzurePutRequest(url, contentType string, data []byte) (OperationID, error) { + return client.doAzureOperation("PUT", url, contentType, data) +} + +func (client client) SendAzureDeleteRequest(url string) (OperationID, error) { + return client.doAzureOperation("DELETE", url, "", nil) +} + +func (client client) doAzureOperation(method, url, contentType string, data []byte) (OperationID, error) { + response, err := client.sendAzureRequest(method, url, contentType, data) + if err != nil { + return "", err + } + return getOperationID(response) +} + +func getOperationID(response *http.Response) (OperationID, error) { + requestID := response.Header.Get(requestIDHeader) + if requestID == "" { + return "", fmt.Errorf("Could not retrieve operation id from %q header", requestIDHeader) + } + return OperationID(requestID), nil +} + +// sendAzureRequest constructs an HTTP client for the request, sends it to the +// management API and returns the response or an error. +func (client client) sendAzureRequest(method, url, contentType string, data []byte) (*http.Response, error) { + if method == "" { + return nil, fmt.Errorf(errParamNotSpecified, "method") + } + if url == "" { + return nil, fmt.Errorf(errParamNotSpecified, "url") + } + + httpClient := client.createHTTPClient() + + response, err := client.sendRequest(httpClient, url, method, contentType, data, 5) + if err != nil { + return nil, err + } + + return response, nil +} + +// createHTTPClient creates an HTTP Client configured with the key pair for +// the subscription for this client. +func (client client) createHTTPClient() *http.Client { + cert, _ := tls.X509KeyPair(client.publishSettings.SubscriptionCert, client.publishSettings.SubscriptionKey) + + ssl := &tls.Config{} + ssl.Certificates = []tls.Certificate{cert} + + httpClient := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: ssl, + }, + } + + return httpClient +} + +// sendRequest sends a request to the Azure management API using the given +// HTTP client and parameters. It returns the response from the call or an +// error. +func (client client) sendRequest(httpClient *http.Client, url, requestType, contentType string, data []byte, numberOfRetries int) (*http.Response, error) { + + absURI := client.createAzureRequestURI(url) + + for { + request, reqErr := client.createAzureRequest(absURI, requestType, contentType, data) + if reqErr != nil { + return nil, reqErr + } + + response, err := httpClient.Do(request) + if err != nil { + if numberOfRetries == 0 { + return nil, err + } + + return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) + } + if response.StatusCode == http.StatusTemporaryRedirect { + // ASM's way of moving traffic around, see https://msdn.microsoft.com/en-us/library/azure/ee460801.aspx + // Only handled automatically for GET/HEAD requests. This is for the rest of the http verbs. + u, err := response.Location() + if err != nil { + return response, fmt.Errorf("Redirect requested but location header could not be retrieved: %v", err) + } + absURI = u.String() + continue // re-issue request + } + + if response.StatusCode >= http.StatusBadRequest { + body, err := getResponseBody(response) + if err != nil { + // Failed to read the response body + return nil, err + } + azureErr := getAzureError(body) + if azureErr != nil { + if numberOfRetries == 0 { + return nil, azureErr + } + + return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) + } + } + + return response, nil + } +} + +// createAzureRequestURI constructs the request uri using the management API endpoint and +// subscription ID associated with the client. +func (client client) createAzureRequestURI(url string) string { + return fmt.Sprintf("%s/%s/%s", client.config.ManagementURL, client.publishSettings.SubscriptionID, url) +} + +// createAzureRequest packages up the request with the correct set of headers and returns +// the request object or an error. +func (client client) createAzureRequest(url string, requestType string, contentType string, data []byte) (*http.Request, error) { + var request *http.Request + var err error + + if data != nil { + body := bytes.NewBuffer(data) + request, err = http.NewRequest(requestType, url, body) + } else { + request, err = http.NewRequest(requestType, url, nil) + } + + if err != nil { + return nil, err + } + + request.Header.Set(msVersionHeader, client.config.APIVersion) + request.Header.Set(uaHeader, client.config.UserAgent) + + if contentType != "" { + request.Header.Set(contentHeader, contentType) + } else { + request.Header.Set(contentHeader, defaultContentHeaderValue) + } + + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go new file mode 100644 index 000000000..721f3f607 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go @@ -0,0 +1,30 @@ +// Package location provides a client for Locations. +package location + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureLocationListURL = "locations" + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new LocationClient from an Azure client +func NewClient(client management.Client) LocationClient { + return LocationClient{client: client} +} + +func (c LocationClient) ListLocations() (ListLocationsResponse, error) { + var l ListLocationsResponse + + response, err := c.client.SendAzureGetRequest(azureLocationListURL) + if err != nil { + return l, err + } + + err = xml.Unmarshal(response, &l) + return l, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go new file mode 100644 index 000000000..8a2501583 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go @@ -0,0 +1,37 @@ +package location + +import ( + "bytes" + "encoding/xml" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/management" +) + +//LocationClient is used to perform operations on Azure Locations +type LocationClient struct { + client management.Client +} + +type ListLocationsResponse struct { + XMLName xml.Name `xml:"Locations"` + Locations []Location `xml:"Location"` +} + +type Location struct { + Name string + DisplayName string + AvailableServices []string `xml:"AvailableServices>AvailableService"` + WebWorkerRoleSizes []string `xml:"ComputeCapabilities>WebWorkerRoleSizes>RoleSize"` + VirtualMachineRoleSizes []string `xml:"ComputeCapabilities>VirtualMachinesRoleSizes>RoleSize"` +} + +func (ll ListLocationsResponse) String() string { + var buf bytes.Buffer + for _, l := range ll.Locations { + fmt.Fprintf(&buf, "%s, ", l.Name) + } + + return strings.Trim(buf.String(), ", ") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/client.go new file mode 100644 index 000000000..8e8eea4da --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/client.go @@ -0,0 +1,245 @@ +// Package networksecuritygroup provides a client for Network Security Groups. +package networksecuritygroup + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + createSecurityGroupURL = "services/networking/networksecuritygroups" + deleteSecurityGroupURL = "services/networking/networksecuritygroups/%s" + getSecurityGroupURL = "services/networking/networksecuritygroups/%s?detaillevel=full" + listSecurityGroupsURL = "services/networking/networksecuritygroups" + addSecurityGroupToSubnetURL = "services/networking/virtualnetwork/%s/subnets/%s/networksecuritygroups" + getSecurityGroupForSubnetURL = "services/networking/virtualnetwork/%s/subnets/%s/networksecuritygroups" + removeSecurityGroupFromSubnetURL = "services/networking/virtualnetwork/%s/subnets/%s/networksecuritygroups/%s" + setSecurityGroupRuleURL = "services/networking/networksecuritygroups/%s/rules/%s" + deleteSecurityGroupRuleURL = "services/networking/networksecuritygroups/%s/rules/%s" + + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewClient is used to instantiate a new SecurityGroupClient from an Azure client +func NewClient(client management.Client) SecurityGroupClient { + return SecurityGroupClient{client: client} +} + +// CreateNetworkSecurityGroup creates a new network security group within +// the context of the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/dn913818.aspx +func (sg SecurityGroupClient) CreateNetworkSecurityGroup( + name string, + label string, + location string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + if location == "" { + return "", fmt.Errorf(errParamNotSpecified, "location") + } + + data, err := xml.Marshal(SecurityGroupRequest{ + Name: name, + Label: label, + Location: location, + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(createSecurityGroupURL) + return sg.client.SendAzurePostRequest(requestURL, data) +} + +// DeleteNetworkSecurityGroup deletes the specified network security group from the subscription +// +// https://msdn.microsoft.com/en-us/library/azure/dn913825.aspx +func (sg SecurityGroupClient) DeleteNetworkSecurityGroup( + name string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(deleteSecurityGroupURL, name) + return sg.client.SendAzureDeleteRequest(requestURL) +} + +// GetNetworkSecurityGroup returns information about the specified network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx +func (sg SecurityGroupClient) GetNetworkSecurityGroup(name string) (SecurityGroupResponse, error) { + if name == "" { + return SecurityGroupResponse{}, fmt.Errorf(errParamNotSpecified, "name") + } + + var securityGroup SecurityGroupResponse + + requestURL := fmt.Sprintf(getSecurityGroupURL, name) + response, err := sg.client.SendAzureGetRequest(requestURL) + if err != nil { + return securityGroup, err + } + + err = xml.Unmarshal(response, &securityGroup) + return securityGroup, err +} + +// ListNetworkSecurityGroups returns a list of the network security groups +// in the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/dn913815.aspx +func (sg SecurityGroupClient) ListNetworkSecurityGroups() (SecurityGroupList, error) { + var securityGroups SecurityGroupList + + response, err := sg.client.SendAzureGetRequest(listSecurityGroupsURL) + if err != nil { + return securityGroups, err + } + + err = xml.Unmarshal(response, &securityGroups) + return securityGroups, err +} + +// AddNetworkSecurityToSubnet associates the network security group with +// specified subnet in a virtual network +// +// https://msdn.microsoft.com/en-us/library/azure/dn913822.aspx +func (sg SecurityGroupClient) AddNetworkSecurityToSubnet( + name string, + subnet string, + virtualNetwork string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + if subnet == "" { + return "", fmt.Errorf(errParamNotSpecified, "subnet") + } + if virtualNetwork == "" { + return "", fmt.Errorf(errParamNotSpecified, "virtualNetwork") + } + + data, err := xml.Marshal(SecurityGroupRequest{Name: name}) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(addSecurityGroupToSubnetURL, virtualNetwork, subnet) + return sg.client.SendAzurePostRequest(requestURL, data) +} + +// GetNetworkSecurityGroupForSubnet returns information about the network +// security group associated with a subnet +// +// https://msdn.microsoft.com/en-us/library/azure/dn913817.aspx +func (sg SecurityGroupClient) GetNetworkSecurityGroupForSubnet( + subnet string, + virtualNetwork string) (SecurityGroupResponse, error) { + if subnet == "" { + return SecurityGroupResponse{}, fmt.Errorf(errParamNotSpecified, "subnet") + } + if virtualNetwork == "" { + return SecurityGroupResponse{}, fmt.Errorf(errParamNotSpecified, "virtualNetwork") + } + + var securityGroup SecurityGroupResponse + + requestURL := fmt.Sprintf(getSecurityGroupForSubnetURL, virtualNetwork, subnet) + response, err := sg.client.SendAzureGetRequest(requestURL) + if err != nil { + return securityGroup, err + } + + err = xml.Unmarshal(response, &securityGroup) + return securityGroup, err +} + +// RemoveNetworkSecurityGroupFromSubnet removes the association of the +// specified network security group from the specified subnet +// +// https://msdn.microsoft.com/en-us/library/azure/dn913820.aspx +func (sg SecurityGroupClient) RemoveNetworkSecurityGroupFromSubnet( + name string, + subnet string, + virtualNetwork string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + if subnet == "" { + return "", fmt.Errorf(errParamNotSpecified, "subnet") + } + if virtualNetwork == "" { + return "", fmt.Errorf(errParamNotSpecified, "virtualNetwork") + } + + requestURL := fmt.Sprintf(removeSecurityGroupFromSubnetURL, virtualNetwork, subnet, name) + return sg.client.SendAzureDeleteRequest(requestURL) +} + +// SetNetworkSecurityGroupRule adds or updates a network security rule that +// is associated with the specified network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913819.aspx +func (sg SecurityGroupClient) SetNetworkSecurityGroupRule( + securityGroup string, + rule RuleRequest) (management.OperationID, error) { + if securityGroup == "" { + return "", fmt.Errorf(errParamNotSpecified, "securityGroup") + } + if rule.Name == "" { + return "", fmt.Errorf(errParamNotSpecified, "Name") + } + if rule.Type == "" { + return "", fmt.Errorf(errParamNotSpecified, "Type") + } + if rule.Priority == 0 { + return "", fmt.Errorf(errParamNotSpecified, "Priority") + } + if rule.Action == "" { + return "", fmt.Errorf(errParamNotSpecified, "Action") + } + if rule.SourceAddressPrefix == "" { + return "", fmt.Errorf(errParamNotSpecified, "SourceAddressPrefix") + } + if rule.SourcePortRange == "" { + return "", fmt.Errorf(errParamNotSpecified, "SourcePortRange") + } + if rule.DestinationAddressPrefix == "" { + return "", fmt.Errorf(errParamNotSpecified, "DestinationAddressPrefix") + } + if rule.DestinationPortRange == "" { + return "", fmt.Errorf(errParamNotSpecified, "DestinationPortRange") + } + if rule.Protocol == "" { + return "", fmt.Errorf(errParamNotSpecified, "Protocol") + } + + data, err := xml.Marshal(rule) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(setSecurityGroupRuleURL, securityGroup, rule.Name) + return sg.client.SendAzurePutRequest(requestURL, "", data) +} + +// DeleteNetworkSecurityGroupRule deletes a network security group rule from +// the specified network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913816.aspx +func (sg SecurityGroupClient) DeleteNetworkSecurityGroupRule( + securityGroup string, + rule string) (management.OperationID, error) { + if securityGroup == "" { + return "", fmt.Errorf(errParamNotSpecified, "securityGroup") + } + if rule == "" { + return "", fmt.Errorf(errParamNotSpecified, "rule") + } + + requestURL := fmt.Sprintf(deleteSecurityGroupRuleURL, securityGroup, rule) + return sg.client.SendAzureDeleteRequest(requestURL) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/entities.go new file mode 100644 index 000000000..2c6f75757 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/entities.go @@ -0,0 +1,115 @@ +// Package networksecuritygroup implements operations for managing network security groups +// using the Service Management REST API +// +// https://msdn.microsoft.com/en-us/library/azure/dn913824.aspx +package networksecuritygroup + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// SecurityGroupClient is used to perform operations on network security groups +type SecurityGroupClient struct { + client management.Client +} + +// SecurityGroupRequest represents a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx +type SecurityGroupRequest struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure NetworkSecurityGroup"` + Name string + Label string `xml:",omitempty"` + Location string `xml:",omitempty"` +} + +// SecurityGroupResponse represents a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx +type SecurityGroupResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure NetworkSecurityGroup"` + Name string + Label string `xml:",omitempty"` + Location string `xml:",omitempty"` + State SecurityGroupState `xml:",omitempty"` + Rules []RuleResponse `xml:">Rule,omitempty"` +} + +// SecurityGroupList represents a list of security groups +type SecurityGroupList []SecurityGroupResponse + +// SecurityGroupState represents a security group state +type SecurityGroupState string + +// These constants represent the possible security group states +const ( + SecurityGroupStateCreated SecurityGroupState = "Created" + SecurityGroupStateCreating SecurityGroupState = "Creating" + SecurityGroupStateUpdating SecurityGroupState = "Updating" + SecurityGroupStateDeleting SecurityGroupState = "Deleting" + SecurityGroupStateUnavailable SecurityGroupState = "Unavailable" +) + +// RuleRequest represents a single rule of a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx#bk_rules +type RuleRequest struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Rule"` + Name string + Type RuleType + Priority int + Action RuleAction + SourceAddressPrefix string + SourcePortRange string + DestinationAddressPrefix string + DestinationPortRange string + Protocol RuleProtocol +} + +// RuleResponse represents a single rule of a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx#bk_rules +type RuleResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Rule"` + Name string + Type RuleType + Priority int + Action RuleAction + SourceAddressPrefix string + SourcePortRange string + DestinationAddressPrefix string + DestinationPortRange string + Protocol RuleProtocol + State string `xml:",omitempty"` + IsDefault bool `xml:",omitempty"` +} + +// RuleType represents a rule type +type RuleType string + +// These constants represent the possible rule types +const ( + RuleTypeInbound RuleType = "Inbound" + RuleTypeOutbound RuleType = "Outbound" +) + +// RuleAction represents a rule action +type RuleAction string + +// These constants represent the possible rule actions +const ( + RuleActionAllow RuleAction = "Allow" + RuleActionDeny RuleAction = "Deny" +) + +// RuleProtocol represents a rule protocol +type RuleProtocol string + +// These constants represent the possible rule types +const ( + RuleProtocolTCP RuleProtocol = "TCP" + RuleProtocolUDP RuleProtocol = "UDP" + RuleProtocolAll RuleProtocol = "*" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/management/operations.go new file mode 100644 index 000000000..4f6acb217 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/operations.go @@ -0,0 +1,92 @@ +package management + +import ( + "encoding/xml" + "errors" + "fmt" + "time" +) + +var ( + // ErrOperationCancelled from WaitForOperation when the polling loop is + // cancelled through signaling the channel. + ErrOperationCancelled = errors.New("Polling for operation status cancelled") +) + +// GetOperationStatusResponse represents an in-flight operation. Use +// client.GetOperationStatus() to get the operation given the operation ID, or +// use WaitForOperation() to poll and wait until the operation has completed. +// See https://msdn.microsoft.com/en-us/library/azure/ee460783.aspx +type GetOperationStatusResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Operation"` + ID string + Status OperationStatus + HTTPStatusCode string + Error *AzureError +} + +// OperationStatus describes the states an Microsoft Azure Service Management +// operation an be in. +type OperationStatus string + +// List of states an operation can be reported as +const ( + OperationStatusInProgress OperationStatus = "InProgress" + OperationStatusSucceeded OperationStatus = "Succeeded" + OperationStatusFailed OperationStatus = "Failed" +) + +// OperationID is assigned by Azure API and can be used to look up the status of +// an operation +type OperationID string + +func (c client) GetOperationStatus(operationID OperationID) (GetOperationStatusResponse, error) { + operation := GetOperationStatusResponse{} + if operationID == "" { + return operation, fmt.Errorf(errParamNotSpecified, "operationID") + } + + url := fmt.Sprintf("operations/%s", operationID) + response, azureErr := c.SendAzureGetRequest(url) + if azureErr != nil { + return operation, azureErr + } + + err := xml.Unmarshal(response, &operation) + return operation, err +} + +func (c client) WaitForOperation(operationID OperationID, cancel chan struct{}) error { + for { + done, err := c.checkOperationStatus(operationID) + if err != nil || done { + return err + } + select { + case <-time.After(c.config.OperationPollInterval): + case <-cancel: + return ErrOperationCancelled + } + } +} + +func (c client) checkOperationStatus(id OperationID) (done bool, err error) { + op, err := c.GetOperationStatus(id) + if err != nil { + return false, fmt.Errorf("Failed to get operation status '%s': %v", id, err) + } + + switch op.Status { + case OperationStatusSucceeded: + return true, nil + case OperationStatusFailed: + if op.Error != nil { + return true, op.Error + } + return true, fmt.Errorf("Azure Operation (x-ms-request-id=%s) has failed", id) + case OperationStatusInProgress: + return false, nil + default: + return false, fmt.Errorf("Unknown operation status returned from API: %s (x-ms-request-id=%s)", op.Status, id) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/osimage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/osimage/client.go new file mode 100644 index 000000000..34d8b67f7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/osimage/client.go @@ -0,0 +1,31 @@ +// Package osimage provides a client for Operating System Images. +package osimage + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureImageListURL = "services/images" + errInvalidImage = "Can not find image %s in specified subscription, please specify another image name." + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewClient is used to instantiate a new OSImageClient from an Azure client. +func NewClient(client management.Client) OSImageClient { + return OSImageClient{client: client} +} + +func (c OSImageClient) ListOSImages() (ListOSImagesResponse, error) { + var l ListOSImagesResponse + + response, err := c.client.SendAzureGetRequest(azureImageListURL) + if err != nil { + return l, err + } + + err = xml.Unmarshal(response, &l) + return l, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/osimage/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/osimage/entities.go new file mode 100644 index 000000000..993f5d15f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/osimage/entities.go @@ -0,0 +1,47 @@ +package osimage + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// OSImageClient is used to perform operations on Azure Locations +type OSImageClient struct { + client management.Client +} + +type ListOSImagesResponse struct { + XMLName xml.Name `xml:"Images"` + OSImages []OSImage `xml:"OSImage"` +} + +type OSImage struct { + Category string // Public || Private || MSDN + Label string // Specifies an identifier for the image. + LogicalSizeInGB float64 //Specifies the size, in GB, of the image. + Name string // Specifies the name of the operating system image. This is the name that is used when creating one or more virtual machines using the image. + OS string // Linux || Windows + Eula string // Specifies the End User License Agreement that is associated with the image. The value for this element is a string, but it is recommended that the value be a URL that points to a EULA. + Description string // Specifies the description of the image. + Location string // The geo-location in which this media is located. The Location value is derived from storage account that contains the blob in which the media is located. If the storage account belongs to an affinity group the value is NULL. + AffinityGroup string // Specifies the affinity in which the media is located. The AffinityGroup value is derived from storage account that contains the blob in which the media is located. If the storage account does not belong to an affinity group the value is NULL and the element is not displayed in the response. This value is NULL for platform images. + MediaLink string // Specifies the location of the vhd file for the image. The storage account where the vhd is located must be associated with the specified subscription. + ImageFamily string // Specifies a value that can be used to group images. + PublishedDate string // Specifies the date when the image was added to the image repository. + IsPremium string // Indicates whether the image contains software or associated services that will incur charges above the core price for the virtual machine. For additional details, see the PricingDetailLink element. + PrivacyURI string `xml:"PrivacyUri"` // Specifies the URI that points to a document that contains the privacy policy related to the image. + RecommendedVMSize string // Specifies the size to use for the virtual machine that is created from the image. + PublisherName string // The name of the publisher of the image. All user images have a publisher name of User. + PricingDetailLink string // Specifies a URL for an image with IsPremium set to true, which contains the pricing details for a virtual machine that is created from the image. + SmallIconURI string `xml:"SmallIconUri"` // Specifies the URI to the small icon that is displayed when the image is presented in the Microsoft Azure Management Portal. + Language string // Specifies the language of the image. + IOType IOType // Provisioned || Standard +} + +type IOType string + +const ( + IOTypeProvisioned IOType = "Provisioned" + IOTypeStandard IOType = "Standard" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go b/vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go new file mode 100644 index 000000000..32bb9724b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go @@ -0,0 +1,105 @@ +package management + +import ( + "encoding/base64" + "encoding/pem" + "encoding/xml" + "fmt" + "io/ioutil" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12" +) + +// ClientFromPublishSettingsData unmarshalls the contents of a publish settings file +// from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the file is used. +func ClientFromPublishSettingsData(settingsData []byte, subscriptionID string) (client Client, err error) { + return ClientFromPublishSettingsDataWithConfig(settingsData, subscriptionID, DefaultConfig()) +} + +// ClientFromPublishSettingsFile reads a publish settings file downloaded from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the file is used. +func ClientFromPublishSettingsFile(filePath, subscriptionID string) (client Client, err error) { + return ClientFromPublishSettingsFileWithConfig(filePath, subscriptionID, DefaultConfig()) +} + +// ClientFromPublishSettingsFileWithConfig reads a publish settings file downloaded from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the file is used. +func ClientFromPublishSettingsFileWithConfig(filePath, subscriptionID string, config ClientConfig) (client Client, err error) { + if filePath == "" { + return client, fmt.Errorf(errParamNotSpecified, "filePath") + } + + publishSettingsContent, err := ioutil.ReadFile(filePath) + if err != nil { + return client, err + } + + return ClientFromPublishSettingsDataWithConfig(publishSettingsContent, subscriptionID, config) +} + +// ClientFromPublishSettingsDataWithConfig unmarshalls the contents of a publish settings file +// from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the string is used. +func ClientFromPublishSettingsDataWithConfig(data []byte, subscriptionID string, config ClientConfig) (client Client, err error) { + publishData := publishData{} + if err = xml.Unmarshal(data, &publishData); err != nil { + return client, err + } + + for _, profile := range publishData.PublishProfiles { + for _, sub := range profile.Subscriptions { + if sub.ID == subscriptionID || subscriptionID == "" { + base64Cert := sub.ManagementCertificate + if base64Cert == "" { + base64Cert = profile.ManagementCertificate + } + + pfxData, err := base64.StdEncoding.DecodeString(base64Cert) + if err != nil { + return client, err + } + + pems, err := pkcs12.ToPEM(pfxData, "") + + cert := []byte{} + for _, b := range pems { + cert = append(cert, pem.EncodeToMemory(b)...) + } + + config.ManagementURL = sub.ServiceManagementURL + return makeClient(sub.ID, cert, config) + } + } + } + + return client, fmt.Errorf("could not find subscription '%s' in settings provided", subscriptionID) +} + +type publishSettings struct { + SubscriptionID string + SubscriptionCert []byte + SubscriptionKey []byte +} + +type publishData struct { + XMLName xml.Name `xml:"PublishData"` + PublishProfiles []publishProfile `xml:"PublishProfile"` +} + +type publishProfile struct { + XMLName xml.Name `xml:"PublishProfile"` + SchemaVersion string `xml:",attr"` + PublishMethod string `xml:",attr"` + URL string `xml:"Url,attr"` + ManagementCertificate string `xml:",attr"` + Subscriptions []subscription `xml:"Subscription"` +} + +type subscription struct { + XMLName xml.Name `xml:"Subscription"` + ServiceManagementURL string `xml:"ServiceManagementUrl,attr"` + ID string `xml:"Id,attr"` + Name string `xml:",attr"` + ManagementCertificate string `xml:",attr"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/sql/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/sql/client.go new file mode 100644 index 000000000..970c3f644 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/sql/client.go @@ -0,0 +1,316 @@ +package sql + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// Definitions of numerous constants representing API endpoints. +const ( + azureCreateDatabaseServerURL = "services/sqlservers/servers" + azureListDatabaseServersURL = "services/sqlservers/servers" + azureDeleteDatabaseServerURL = "services/sqlservers/servers/%s" + + azureCreateFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules" + azureGetFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules/%s" + azureListFirewallRulesURL = "services/sqlservers/servers/%s/firewallrules" + azureUpdateFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules/%s" + azureDeleteFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules/%s" + + azureCreateDatabaseURL = "services/sqlservers/servers/%s/databases" + azureGetDatabaseURL = "services/sqlservers/servers/%s/databases/%s" + azureListDatabasesURL = "services/sqlservers/servers/%s/databases?contentview=generic" + azureUpdateDatabaseURL = "services/sqlservers/servers/%s/databases/%s" + azureDeleteDatabaseURL = "services/sqlservers/servers/%s/databases/%s" + + errParamNotSpecified = "Parameter %s was not specified." + + DatabaseStateCreating = "Creating" +) + +// SQLDatabaseClient defines various database CRUD operations. +// It contains a management.Client for making the actual http calls. +type SQLDatabaseClient struct { + mgmtClient management.Client +} + +// NewClient returns a new SQLDatabaseClient struct with the provided +// management.Client as the underlying client. +func NewClient(mgmtClient management.Client) SQLDatabaseClient { + return SQLDatabaseClient{mgmtClient} +} + +// CreateServer creates a new Azure SQL Database server and return its name. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505699.aspx +func (c SQLDatabaseClient) CreateServer(params DatabaseServerCreateParams) (string, error) { + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + resp, err := c.mgmtClient.SendAzurePostRequestWithReturnedResponse(azureCreateDatabaseServerURL, req) + if err != nil { + return "", err + } + + var name string + err = xml.Unmarshal(resp, &name) + + return name, err +} + +// ListServers retrieves the Azure SQL Database servers for this subscription. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505702.aspx +func (c SQLDatabaseClient) ListServers() (ListServersResponse, error) { + var resp ListServersResponse + + data, err := c.mgmtClient.SendAzureGetRequest(azureListDatabaseServersURL) + if err != nil { + return resp, err + } + + err = xml.Unmarshal(data, &resp) + return resp, err +} + +// DeleteServer deletes an Azure SQL Database server (including all its databases). +// +// https://msdn.microsoft.com/en-us/library/azure/dn505695.aspx +func (c SQLDatabaseClient) DeleteServer(name string) error { + if name == "" { + return fmt.Errorf(errParamNotSpecified, "name") + } + + url := fmt.Sprintf(azureDeleteDatabaseServerURL, name) + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + return err +} + +// CreateFirewallRule creates an Azure SQL Database server +// firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505712.aspx +func (c SQLDatabaseClient) CreateFirewallRule(server string, params FirewallRuleCreateParams) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + url := fmt.Sprintf(azureCreateFirewallRuleURL, server) + + _, err = c.mgmtClient.SendAzurePostRequest(url, req) + return err +} + +// GetFirewallRule gets the details of an Azure SQL Database Server firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505698.aspx +func (c SQLDatabaseClient) GetFirewallRule(server, ruleName string) (FirewallRuleResponse, error) { + var rule FirewallRuleResponse + + if server == "" { + return rule, fmt.Errorf(errParamNotSpecified, "server") + } + if ruleName == "" { + return rule, fmt.Errorf(errParamNotSpecified, "ruleName") + } + + url := fmt.Sprintf(azureGetFirewallRuleURL, server, ruleName) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return rule, err + } + + err = xml.Unmarshal(resp, &rule) + return rule, err +} + +// ListFirewallRules retrieves the set of firewall rules for an Azure SQL +// Database Server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505715.aspx +func (c SQLDatabaseClient) ListFirewallRules(server string) (ListFirewallRulesResponse, error) { + var rules ListFirewallRulesResponse + + if server == "" { + return rules, fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureListFirewallRulesURL, server) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return rules, err + } + + err = xml.Unmarshal(resp, &rules) + return rules, err +} + +// UpdateFirewallRule update a firewall rule for an Azure SQL Database server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505707.aspx +func (c SQLDatabaseClient) UpdateFirewallRule(server, ruleName string, params FirewallRuleUpdateParams) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + if ruleName == "" { + return fmt.Errorf(errParamNotSpecified, "ruleName") + } + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + url := fmt.Sprintf(azureUpdateFirewallRuleURL, server, ruleName) + _, err = c.mgmtClient.SendAzurePutRequest(url, "text/xml", req) + return err +} + +// DeleteFirewallRule deletes an Azure SQL Database server firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505706.aspx +func (c SQLDatabaseClient) DeleteFirewallRule(server, ruleName string) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + if ruleName == "" { + return fmt.Errorf(errParamNotSpecified, "ruleName") + } + + url := fmt.Sprintf(azureDeleteFirewallRuleURL, server, ruleName) + + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + return err +} + +// CreateDatabase creates a new Microsoft Azure SQL Database on the given database server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505701.aspx +func (c SQLDatabaseClient) CreateDatabase(server string, params DatabaseCreateParams) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + target := fmt.Sprintf(azureCreateDatabaseURL, server) + _, err = c.mgmtClient.SendAzurePostRequest(target, req) + return err +} + +// WaitForDatabaseCreation is a helper method which waits +// for the creation of the database on the given server. +func (c SQLDatabaseClient) WaitForDatabaseCreation( + server, database string, + cancel chan struct{}) error { + for { + stat, err := c.GetDatabase(server, database) + if err != nil { + return err + } + if stat.State != DatabaseStateCreating { + return nil + } + + select { + case <-time.After(management.DefaultOperationPollInterval): + case <-cancel: + return management.ErrOperationCancelled + } + } +} + +// GetDatabase gets the details for an Azure SQL Database. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505708.aspx +func (c SQLDatabaseClient) GetDatabase(server, database string) (ServiceResource, error) { + var db ServiceResource + + if database == "" { + return db, fmt.Errorf(errParamNotSpecified, "database") + } + if server == "" { + return db, fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureGetDatabaseURL, server, database) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return db, err + } + + err = xml.Unmarshal(resp, &db) + return db, err +} + +// ListDatabases returns a list of Azure SQL Databases on the given server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505711.aspx +func (c SQLDatabaseClient) ListDatabases(server string) (ListDatabasesResponse, error) { + var databases ListDatabasesResponse + if server == "" { + return databases, fmt.Errorf(errParamNotSpecified, "server name") + } + + url := fmt.Sprintf(azureListDatabasesURL, server) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return databases, err + } + + err = xml.Unmarshal(resp, &databases) + return databases, err +} + +// UpdateDatabase updates the details of the given Database off the given server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505718.aspx +func (c SQLDatabaseClient) UpdateDatabase( + server, database string, + params ServiceResourceUpdateParams) (management.OperationID, error) { + if database == "" { + return "", fmt.Errorf(errParamNotSpecified, "database") + } + if server == "" { + return "", fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureUpdateDatabaseURL, server, database) + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.mgmtClient.SendAzurePutRequest(url, "text/xml", req) +} + +// DeleteDatabase deletes the Azure SQL Database off the given server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505705.aspx +func (c SQLDatabaseClient) DeleteDatabase(server, database string) error { + if database == "" { + return fmt.Errorf(errParamNotSpecified, "database") + } + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureDeleteDatabaseURL, server, database) + + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/sql/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/sql/entities.go new file mode 100644 index 000000000..e5135969a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/sql/entities.go @@ -0,0 +1,124 @@ +package sql + +import ( + "encoding/xml" +) + +// DatabaseServerCreateParams represents the set of possible parameters +// when issuing a database server creation request to Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505699.aspx +type DatabaseServerCreateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/sqlazure/2010/12/ Server"` + AdministratorLogin string + AdministratorLoginPassword string + Location string + Version string +} + +// DatabaseServerCreateResponse represents the response following the creation of +// a database server on Azure. +type DatabaseServerCreateResponse struct { + ServerName string +} + +const ( + DatabaseServerVersion11 = "2.0" + DatabaseServerVersion12 = "12.0" +) + +// DatabaseServer represents the set of data recieved from +// a database server list operation. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505702.aspx +type DatabaseServer struct { + Name string + AdministratorLogin string + Location string + FullyQualifiedDomainName string + Version string + State string +} + +type ListServersResponse struct { + DatabaseServers []DatabaseServer `xml:"Server"` +} + +// FirewallRuleCreateParams represents the set of possible +// paramaters when creating a firewall rule on an Azure database server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505712.aspx +type FirewallRuleCreateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + StartIPAddress string + EndIPAddress string +} + +// FirewallRuleResponse represents the set of data recieved from +// an Azure database server firewall rule get response. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505698.aspx +type FirewallRuleResponse struct { + Name string + StartIPAddress string + EndIPAddress string +} + +type ListFirewallRulesResponse struct { + FirewallRules []FirewallRuleResponse `xml:"ServiceResource"` +} + +// FirewallRuleUpdateParams represents the set of possible +// parameters when issuing an update of a database server firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505707.aspx +type FirewallRuleUpdateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + StartIPAddress string + EndIPAddress string +} + +// DatabaseCreateParams represents the set of possible parameters when issuing +// a database creation to Azure, and reading a list response from Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505701.aspx +type DatabaseCreateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + Edition string `xml:",omitempty"` + CollationName string `xml:",omitempty"` + MaxSizeBytes int64 `xml:",omitempty"` + ServiceObjectiveID string `xml:"ServiceObjectiveId,omitempty"` +} + +// ServiceResource represents the set of parameters obtained from a database +// get or list call. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505708.aspx +type ServiceResource struct { + Name string + State string + SelfLink string + Edition string + CollationName string + MaxSizeBytes int64 + ServiceObjectiveID string `xml:"ServiceObjectiveId,omitempty"` +} + +type ListDatabasesResponse struct { + ServiceResources []ServiceResource `xml:"ServiceResource"` +} + +// ServiceResourceUpdateParams represents the set of parameters available +// for a database service update operation. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505718.aspx +type ServiceResourceUpdateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + Edition string `xml:",omitempty"` + MaxSizeBytes int64 `xml:",omitempty"` + ServiceObjectiveID string `xml:"ServiceObjectiveId,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go new file mode 100644 index 000000000..dad87e6db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go @@ -0,0 +1,108 @@ +// Package storageservice provides a client for Storage Services. +package storageservice + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureStorageServiceListURL = "services/storageservices" + azureStorageServiceURL = "services/storageservices/%s" + azureStorageServiceKeysURL = "services/storageservices/%s/keys" + azureStorageAccountAvailabilityURL = "services/storageservices/operations/isavailable/%s" + + azureXmlns = "http://schemas.microsoft.com/windowsazure" + + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewClient is used to instantiate a new StorageServiceClient from an Azure +// client. +func NewClient(s management.Client) StorageServiceClient { + return StorageServiceClient{client: s} +} + +func (s StorageServiceClient) ListStorageServices() (ListStorageServicesResponse, error) { + var l ListStorageServicesResponse + response, err := s.client.SendAzureGetRequest(azureStorageServiceListURL) + if err != nil { + return l, err + } + + err = xml.Unmarshal(response, &l) + return l, err +} + +func (s StorageServiceClient) GetStorageService(serviceName string) (StorageServiceResponse, error) { + var svc StorageServiceResponse + if serviceName == "" { + return svc, fmt.Errorf(errParamNotSpecified, "serviceName") + } + + requestURL := fmt.Sprintf(azureStorageServiceURL, serviceName) + response, err := s.client.SendAzureGetRequest(requestURL) + if err != nil { + return svc, err + } + + err = xml.Unmarshal(response, &svc) + return svc, err +} + +func (s StorageServiceClient) GetStorageServiceKeys(serviceName string) (GetStorageServiceKeysResponse, error) { + var r GetStorageServiceKeysResponse + if serviceName == "" { + return r, fmt.Errorf(errParamNotSpecified, "serviceName") + } + + requestURL := fmt.Sprintf(azureStorageServiceKeysURL, serviceName) + data, err := s.client.SendAzureGetRequest(requestURL) + if err != nil { + return r, err + } + + err = xml.Unmarshal(data, &r) + return r, err +} + +func (s StorageServiceClient) CreateStorageService(parameters StorageAccountCreateParameters) (management.OperationID, error) { + data, err := xml.Marshal(CreateStorageServiceInput{ + StorageAccountCreateParameters: parameters}) + if err != nil { + return "", err + } + + return s.client.SendAzurePostRequest(azureStorageServiceListURL, data) +} + +func (s StorageServiceClient) DeleteStorageService(serviceName string) (management.OperationID, error) { + if serviceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "serviceName") + } + + requestURL := fmt.Sprintf(azureStorageServiceURL, serviceName) + return s.client.SendAzureDeleteRequest(requestURL) +} + +// CheckStorageAccountNameAvailability checks to if the specified storage account +// name is available. +// +// See https://msdn.microsoft.com/en-us/library/azure/jj154125.aspx +func (s StorageServiceClient) CheckStorageAccountNameAvailability(name string) (AvailabilityResponse, error) { + var r AvailabilityResponse + if name == "" { + return r, fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(azureStorageAccountAvailabilityURL, name) + response, err := s.client.SendAzureGetRequest(requestURL) + if err != nil { + return r, err + } + + err = xml.Unmarshal(response, &r) + return r, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go new file mode 100644 index 000000000..2401298aa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go @@ -0,0 +1,79 @@ +package storageservice + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// StorageServiceClient is used to perform operations on Azure Storage +type StorageServiceClient struct { + client management.Client +} + +type ListStorageServicesResponse struct { + StorageServices []StorageServiceResponse `xml:"StorageService"` +} + +type StorageServiceResponse struct { + URL string `xml:"Url"` + ServiceName string + StorageServiceProperties StorageServiceProperties +} + +type StorageServiceProperties struct { + Description string + Location string + Label string + Status string + Endpoints []string `xml:"Endpoints>Endpoint"` + GeoReplicationEnabled string + GeoPrimaryRegion string +} + +type GetStorageServiceKeysResponse struct { + URL string `xml:"Url"` + PrimaryKey string `xml:"StorageServiceKeys>Primary"` + SecondaryKey string `xml:"StorageServiceKeys>Secondary"` +} + +type CreateStorageServiceInput struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CreateStorageServiceInput"` + StorageAccountCreateParameters +} + +type StorageAccountCreateParameters struct { + ServiceName string + Description string `xml:",omitempty"` + Label string + AffinityGroup string `xml:",omitempty"` + Location string `xml:",omitempty"` + ExtendedProperties ExtendedPropertyList + AccountType AccountType +} + +type AccountType string + +const ( + AccountTypeStandardLRS AccountType = "Standard_LRS" + AccountTypeStandardZRS AccountType = "Standard_ZRS" + AccountTypeStandardGRS AccountType = "Standard_GRS" + AccountTypeStandardRAGRS AccountType = "Standard_RAGRS" + AccountTypePremiumLRS AccountType = "Premium_LRS" +) + +type ExtendedPropertyList struct { + ExtendedProperty []ExtendedProperty +} + +type ExtendedProperty struct { + Name string + Value string +} + +type AvailabilityResponse struct { + XMLName xml.Name `xml:"AvailabilityResponse"` + Xmlns string `xml:"xmlns,attr"` + Result bool + Reason string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go new file mode 100644 index 000000000..3f02f5beb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go @@ -0,0 +1,31 @@ +package storageservice + +import ( + "encoding/xml" + "testing" +) + +func Test_StorageServiceKeysResponse_Unmarshal(t *testing.T) { + // from https://msdn.microsoft.com/en-us/library/azure/ee460785.aspx + response := []byte(` + + storage-service-url + + primary-key + secondary-key + + `) + + keysResponse := GetStorageServiceKeysResponse{} + err := xml.Unmarshal(response, &keysResponse) + if err != nil { + t.Fatal(err) + } + + if expected := "primary-key"; keysResponse.PrimaryKey != expected { + t.Fatalf("Expected %q but got %q", expected, keysResponse.PrimaryKey) + } + if expected := "secondary-key"; keysResponse.SecondaryKey != expected { + t.Fatalf("Expected %q but got %q", expected, keysResponse.SecondaryKey) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/testutils/managementclient.go b/vendor/github.com/Azure/azure-sdk-for-go/management/testutils/managementclient.go new file mode 100644 index 000000000..426e20dcc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/testutils/managementclient.go @@ -0,0 +1,87 @@ +// Package testutils contains some test utilities for the Azure SDK +package testutils + +import ( + "encoding/base64" + "os" + "testing" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// GetTestClient returns a management Client for testing. Expects +// AZSUBSCRIPTIONID and AZCERTDATA to be present in the environment. AZCERTDATA +// is the base64encoded binary representation of the PEM certificate data. +func GetTestClient(t *testing.T) management.Client { + subid := os.Getenv("AZSUBSCRIPTIONID") + certdata := os.Getenv("AZCERTDATA") + if subid == "" || certdata == "" { + t.Skip("AZSUBSCRIPTIONID or AZCERTDATA not set, skipping test") + } + cert, err := base64.StdEncoding.DecodeString(certdata) + if err != nil { + t.Fatal(err) + } + + client, err := management.NewClient(subid, cert) + if err != nil { + t.Fatal(err) + } + return testClient{client, t} +} + +type testClient struct { + management.Client + t *testing.T +} + +func chop(d []byte) string { + const maxlen = 5000 + + s := string(d) + + if len(s) > maxlen { + return s[:maxlen] + "..." + } + return s +} + +func (l testClient) SendAzureGetRequest(url string) ([]byte, error) { + d, err := l.Client.SendAzureGetRequest(url) + logOperation(l.t, "GET", url, nil, d, "", err) + return d, err +} + +func (l testClient) SendAzurePostRequest(url string, data []byte) (management.OperationID, error) { + oid, err := l.Client.SendAzurePostRequest(url, data) + logOperation(l.t, "POST", url, data, nil, oid, err) + return oid, err +} + +func (l testClient) SendAzurePutRequest(url string, contentType string, data []byte) (management.OperationID, error) { + oid, err := l.Client.SendAzurePutRequest(url, contentType, data) + logOperation(l.t, "PUT", url, data, nil, oid, err) + return oid, err +} + +func (l testClient) SendAzureDeleteRequest(url string) (management.OperationID, error) { + oid, err := l.Client.SendAzureDeleteRequest(url) + logOperation(l.t, "DELETE", url, nil, nil, oid, err) + return oid, err +} + +func logOperation(t *testing.T, method, url string, requestData, responseData []byte, oid management.OperationID, err error) { + t.Logf("AZURE> %s %s\n", method, url) + if requestData != nil { + t.Logf(" >>> %s\n", chop(requestData)) + } + if err != nil { + t.Logf(" <<< ERROR: %+v\n", err) + } else { + if responseData != nil { + t.Logf(" <<< %s\n", chop(responseData)) + } else { + t.Logf(" <<< OperationID: %s\n", oid) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/util.go b/vendor/github.com/Azure/azure-sdk-for-go/management/util.go new file mode 100644 index 000000000..d22a00531 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/util.go @@ -0,0 +1,11 @@ +package management + +import ( + "github.com/Azure/azure-sdk-for-go/core/http" + "io/ioutil" +) + +func getResponseBody(response *http.Response) ([]byte, error) { + defer response.Body.Close() + return ioutil.ReadAll(response.Body) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go new file mode 100644 index 000000000..a712b44cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go @@ -0,0 +1,327 @@ +// Package virtualmachine provides a client for Virtual Machines. +package virtualmachine + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureDeploymentListURL = "services/hostedservices/%s/deployments" + azureDeploymentURL = "services/hostedservices/%s/deployments/%s" + azureListDeploymentsInSlotURL = "services/hostedservices/%s/deploymentslots/Production" + deleteAzureDeploymentURL = "services/hostedservices/%s/deployments/%s?comp=media" + azureAddRoleURL = "services/hostedservices/%s/deployments/%s/roles" + azureRoleURL = "services/hostedservices/%s/deployments/%s/roles/%s" + azureOperationsURL = "services/hostedservices/%s/deployments/%s/roleinstances/%s/Operations" + azureRoleSizeListURL = "rolesizes" + + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new VirtualMachineClient from an Azure client +func NewClient(client management.Client) VirtualMachineClient { + return VirtualMachineClient{client: client} +} + +// CreateDeploymentOptions can be used to create a customized deployement request +type CreateDeploymentOptions struct { + DNSServers []DNSServer + LoadBalancers []LoadBalancer + ReservedIPName string + VirtualNetworkName string +} + +// CreateDeployment creates a deployment and then creates a virtual machine +// in the deployment based on the specified configuration. +// +// https://msdn.microsoft.com/en-us/library/azure/jj157194.aspx +func (vm VirtualMachineClient) CreateDeployment( + role Role, + cloudServiceName string, + options CreateDeploymentOptions) (management.OperationID, error) { + + req := DeploymentRequest{ + Name: role.RoleName, + DeploymentSlot: "Production", + Label: role.RoleName, + RoleList: []Role{role}, + DNSServers: options.DNSServers, + LoadBalancers: options.LoadBalancers, + ReservedIPName: options.ReservedIPName, + VirtualNetworkName: options.VirtualNetworkName, + } + + data, err := xml.Marshal(req) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureDeploymentListURL, cloudServiceName) + return vm.client.SendAzurePostRequest(requestURL, data) +} + +// GetDeploymentName queries an existing Azure cloud service for the name of the Deployment, +// if any, in its 'Production' slot (the only slot possible). If none exists, it returns empty +// string but no error +// +//https://msdn.microsoft.com/en-us/library/azure/ee460804.aspx +func (vm VirtualMachineClient) GetDeploymentName(cloudServiceName string) (string, error) { + var deployment DeploymentResponse + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + requestURL := fmt.Sprintf(azureListDeploymentsInSlotURL, cloudServiceName) + response, err := vm.client.SendAzureGetRequest(requestURL) + if err != nil { + if management.IsResourceNotFoundError(err) { + return "", nil + } + return "", err + } + err = xml.Unmarshal(response, &deployment) + if err != nil { + return "", err + } + + return deployment.Name, nil +} + +func (vm VirtualMachineClient) GetDeployment(cloudServiceName, deploymentName string) (DeploymentResponse, error) { + var deployment DeploymentResponse + if cloudServiceName == "" { + return deployment, fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return deployment, fmt.Errorf(errParamNotSpecified, "deploymentName") + } + requestURL := fmt.Sprintf(azureDeploymentURL, cloudServiceName, deploymentName) + response, azureErr := vm.client.SendAzureGetRequest(requestURL) + if azureErr != nil { + return deployment, azureErr + } + + err := xml.Unmarshal(response, &deployment) + return deployment, err +} + +func (vm VirtualMachineClient) DeleteDeployment(cloudServiceName, deploymentName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + + requestURL := fmt.Sprintf(deleteAzureDeploymentURL, cloudServiceName, deploymentName) + return vm.client.SendAzureDeleteRequest(requestURL) +} + +func (vm VirtualMachineClient) GetRole(cloudServiceName, deploymentName, roleName string) (*Role, error) { + if cloudServiceName == "" { + return nil, fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return nil, fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return nil, fmt.Errorf(errParamNotSpecified, "roleName") + } + + role := new(Role) + + requestURL := fmt.Sprintf(azureRoleURL, cloudServiceName, deploymentName, roleName) + response, azureErr := vm.client.SendAzureGetRequest(requestURL) + if azureErr != nil { + return nil, azureErr + } + + err := xml.Unmarshal(response, role) + if err != nil { + return nil, err + } + + return role, nil +} + +// AddRole adds a Virtual Machine to a deployment of Virtual Machines, where role name = VM name +// See https://msdn.microsoft.com/en-us/library/azure/jj157186.aspx +func (vm VirtualMachineClient) AddRole(cloudServiceName string, deploymentName string, role Role) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + + data, err := xml.Marshal(PersistentVMRole{Role: role}) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureAddRoleURL, cloudServiceName, deploymentName) + return vm.client.SendAzurePostRequest(requestURL, data) +} + +// UpdateRole updates the configuration of the specified virtual machine +// See https://msdn.microsoft.com/en-us/library/azure/jj157187.aspx +func (vm VirtualMachineClient) UpdateRole(cloudServiceName, deploymentName, roleName string, role Role) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + data, err := xml.Marshal(PersistentVMRole{Role: role}) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureRoleURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePutRequest(requestURL, "text/xml", data) +} + +func (vm VirtualMachineClient) StartRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + startRoleOperationBytes, err := xml.Marshal(StartRoleOperation{ + OperationType: "StartRoleOperation", + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePostRequest(requestURL, startRoleOperationBytes) +} + +func (vm VirtualMachineClient) ShutdownRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + shutdownRoleOperationBytes, err := xml.Marshal(ShutdownRoleOperation{ + OperationType: "ShutdownRoleOperation", + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePostRequest(requestURL, shutdownRoleOperationBytes) +} + +func (vm VirtualMachineClient) RestartRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + restartRoleOperationBytes, err := xml.Marshal(RestartRoleOperation{ + OperationType: "RestartRoleOperation", + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePostRequest(requestURL, restartRoleOperationBytes) +} + +func (vm VirtualMachineClient) DeleteRole(cloudServiceName, deploymentName, roleName string, deleteVHD bool) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + requestURL := fmt.Sprintf(azureRoleURL, cloudServiceName, deploymentName, roleName) + if deleteVHD { + requestURL += "?comp=media" + } + return vm.client.SendAzureDeleteRequest(requestURL) +} + +func (vm VirtualMachineClient) GetRoleSizeList() (RoleSizeList, error) { + roleSizeList := RoleSizeList{} + + response, err := vm.client.SendAzureGetRequest(azureRoleSizeListURL) + if err != nil { + return roleSizeList, err + } + + err = xml.Unmarshal(response, &roleSizeList) + return roleSizeList, err +} + +// CaptureRole captures a VM role. If reprovisioningConfigurationSet is non-nil, +// the VM role is redeployed after capturing the image, otherwise, the original +// VM role is deleted. +// +// NOTE: an image resulting from this operation shows up in +// osimage.GetImageList() as images with Category "User". +func (vm VirtualMachineClient) CaptureRole(cloudServiceName, deploymentName, roleName, imageName, imageLabel string, + reprovisioningConfigurationSet *ConfigurationSet) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + if reprovisioningConfigurationSet != nil && + !(reprovisioningConfigurationSet.ConfigurationSetType == ConfigurationSetTypeLinuxProvisioning || + reprovisioningConfigurationSet.ConfigurationSetType == ConfigurationSetTypeWindowsProvisioning) { + return "", fmt.Errorf("ConfigurationSet type can only be WindowsProvisioningConfiguration or LinuxProvisioningConfiguration") + } + + operation := CaptureRoleOperation{ + OperationType: "CaptureRoleOperation", + PostCaptureAction: PostCaptureActionReprovision, + ProvisioningConfiguration: reprovisioningConfigurationSet, + TargetImageLabel: imageLabel, + TargetImageName: imageName, + } + if reprovisioningConfigurationSet == nil { + operation.PostCaptureAction = PostCaptureActionDelete + } + + data, err := xml.Marshal(operation) + if err != nil { + return "", err + } + + return vm.client.SendAzurePostRequest(fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName), data) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go new file mode 100644 index 000000000..f105ee82f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go @@ -0,0 +1,569 @@ +package virtualmachine + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +// VirtualMachineClient is used to perform operations on Azure Virtual Machines +type VirtualMachineClient struct { + client management.Client +} + +// DeploymentRequest is the type for creating a deployment and Virtual Machine +// in the deployment based on the specified configuration. See +// https://msdn.microsoft.com/en-us/library/azure/jj157194.aspx +type DeploymentRequest struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Deployment"` + // Required parameters: + Name string `` // Specifies a name for the deployment. The deployment name must be unique among other deployments for the cloud service. + DeploymentSlot string `` // Specifies the environment in which the Virtual Machine is to be deployed. The only allowable value is Production. + Label string `` // Specifies an identifier for the deployment. The label can be up to 100 characters long. The label can be used for tracking purposes. + RoleList []Role `xml:">Role"` // Contains information about the Virtual Machines that are to be deployed. + // Optional parameters: + VirtualNetworkName string `xml:",omitempty"` // Specifies the name of an existing virtual network to which the deployment will belong. + DNSServers []DNSServer `xml:"Dns>DnsServers>DnsServer,omitempty"` // Contains a list of DNS servers to associate with the Virtual Machine. + LoadBalancers []LoadBalancer `xml:">LoadBalancer,omitempty"` // Contains a list of internal load balancers that can be assigned to input endpoints. + ReservedIPName string `xml:",omitempty"` // Specifies the name of a reserved IP address that is to be assigned to the deployment. +} + +// DeploymentResponse is the type for receiving deployment information +// See https://msdn.microsoft.com/en-us/library/azure/ee460804.aspx +type DeploymentResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Deployment"` + + Name string + DeploymentSlot string + Status DeploymentStatus + Label string + URL string `xml:"Url"` + Configuration string + RoleInstanceList []RoleInstance `xml:">RoleInstance"` + UpgradeStatus UpgradeStatus + UpgradeDomainCount int + RoleList []Role `xml:">Role"` + SdkVersion string + Locked bool + RollbackAllowed bool + CreatedTime string + LastModifiedTime string + VirtualNetworkName string + DNSServers []DNSServer `xml:"Dns>DnsServers>DnsServer"` + LoadBalancers []LoadBalancer `xml:">LoadBalancer"` + ExtendedProperties []ExtendedProperty `xml:">ExtendedProperty"` + PersistentVMDowntime PersistentVMDowntime + VirtualIPs []VirtualIP `xml:">VirtualIP"` + ExtensionConfiguration string // cloud service extensions not fully implemented + ReservedIPName string + InternalDNSSuffix string `xml:"InternalDnsSuffix"` +} + +type DeploymentStatus string + +const ( + DeploymentStatusRunning DeploymentStatus = "Running" + DeploymentStatusSuspended DeploymentStatus = "Suspended" + DeploymentStatusRunningTransitioning DeploymentStatus = "RunningTransitioning" + DeploymentStatusSuspendedTransitioning DeploymentStatus = "SuspendedTransitioning" + DeploymentStatusStarting DeploymentStatus = "Starting" + DeploymentStatusSuspending DeploymentStatus = "Suspending" + DeploymentStatusDeploying DeploymentStatus = "Deploying" + DeploymentStatusDeleting DeploymentStatus = "Deleting" +) + +type RoleInstance struct { + RoleName string + InstanceName string + InstanceStatus InstanceStatus + ExtendedInstanceStatus string + InstanceUpgradeDomain int + InstanceFaultDomain int + InstanceSize string + InstanceStateDetails string + InstanceErrorCode string + IPAddress string `xml:"IpAddress"` + InstanceEndpoints []InstanceEndpoint `xml:">InstanceEndpoint"` + PowerState PowerState + HostName string + RemoteAccessCertificateThumbprint string + GuestAgentStatus string // todo: implement + ResourceExtensionStatusList []ResourceExtensionStatus `xml:">ResourceExtensionStatus"` + PublicIPs []PublicIP `xml:">PublicIP"` +} + +type InstanceStatus string + +const ( + InstanceStatusUnknown = "Unknown" + InstanceStatusCreatingVM = "CreatingVM" + InstanceStatusStartingVM = "StartingVM" + InstanceStatusCreatingRole = "CreatingRole" + InstanceStatusStartingRole = "StartingRole" + InstanceStatusReadyRole = "ReadyRole" + InstanceStatusBusyRole = "BusyRole" + InstanceStatusStoppingRole = "StoppingRole" + InstanceStatusStoppingVM = "StoppingVM" + InstanceStatusDeletingVM = "DeletingVM" + InstanceStatusStoppedVM = "StoppedVM" + InstanceStatusRestartingRole = "RestartingRole" + InstanceStatusCyclingRole = "CyclingRole" + InstanceStatusFailedStartingRole = "FailedStartingRole" + InstanceStatusFailedStartingVM = "FailedStartingVM" + InstanceStatusUnresponsiveRole = "UnresponsiveRole" + InstanceStatusStoppedDeallocated = "StoppedDeallocated" + InstanceStatusPreparing = "Preparing" +) + +type InstanceEndpoint struct { + Name string + Vip string + PublicPort int + LocalPort int + Protocol InputEndpointProtocol +} + +type PowerState string + +const ( + PowerStateStarting PowerState = "Starting" + PowerStateStarted PowerState = "Started" + PowerStateStopping PowerState = "Stopping" + PowerStateStopped PowerState = "Stopped" + PowerStateUnknown PowerState = "Unknown" +) + +type ResourceExtensionStatus struct { + HandlerName string + Version string + Status ResourceExtensionState + Code string + FormattedMessage FormattedMessage + ExtensionSettingStatus ExtensionSettingStatus +} + +type ResourceExtensionState string + +const ( + ResourceExtensionStateInstalling ResourceExtensionState = "Installing" + ResourceExtensionStateReady ResourceExtensionState = "Ready" + ResourceExtensionStateNotReady ResourceExtensionState = "NotReady" + ResourceExtensionStateUnresponsive ResourceExtensionState = "Unresponsive" +) + +type FormattedMessage struct { + Language string + Message string +} + +type ExtensionSettingStatus struct { + Timestamp string + Name string + Operation string + Status ExtensionSettingState + Code string + FormattedMessage FormattedMessage + SubStatusList []SubStatus `xml:">SubStatus"` +} + +type ExtensionSettingState string + +const ( + ExtensionSettingStateTransitioning ExtensionSettingState = "transitioning" + ExtensionSettingStateError ExtensionSettingState = "error" + ExtensionSettingStateSuccess ExtensionSettingState = "success" + ExtensionSettingStateWarning ExtensionSettingState = "warning" +) + +type SubStatus struct { + Name string + Status ExtensionSettingState + FormattedMessage FormattedMessage +} + +type UpgradeStatus struct { + UpgradeType UpgradeType + CurrentUpgradeDomainState CurrentUpgradeDomainState + CurrentUpgradeDomain int +} + +type UpgradeType string + +const ( + UpgradeTypeAuto UpgradeType = "Auto" + UpgradeTypeManual UpgradeType = "Manual" + UpgradeTypeSimultaneous UpgradeType = "Simultaneous" +) + +type CurrentUpgradeDomainState string + +const ( + CurrentUpgradeDomainStateBefore CurrentUpgradeDomainState = "Before" + CurrentUpgradeDomainStateDuring CurrentUpgradeDomainState = "During" +) + +type ExtendedProperty struct { + Name string + Value string +} + +type PersistentVMDowntime struct { + StartTime string + EndTime string + Status string +} + +type VirtualIP struct { + Address string + IsReserved bool + ReservedIPName string + Type IPAddressType +} + +// Role contains the configuration sets that are used to create virtual +// machines. +type Role struct { + RoleName string `xml:",omitempty"` // Specifies the name for the Virtual Machine. + RoleType string `xml:",omitempty"` // Specifies the type of role to use. For Virtual Machines, this must be PersistentVMRole. + ConfigurationSets []ConfigurationSet `xml:"ConfigurationSets>ConfigurationSet,omitempty"` + ResourceExtensionReferences *[]ResourceExtensionReference `xml:"ResourceExtensionReferences>ResourceExtensionReference,omitempty"` + VMImageName string `xml:",omitempty"` // Specifies the name of the VM Image that is to be used to create the Virtual Machine. If this element is used, the ConfigurationSets element is not used. + MediaLocation string `xml:",omitempty"` // Required if the Virtual Machine is being created from a published VM Image. Specifies the location of the VHD file that is created when VMImageName specifies a published VM Image. + AvailabilitySetName string `xml:",omitempty"` // Specifies the name of a collection of Virtual Machines. Virtual Machines specified in the same availability set are allocated to different nodes to maximize availability. + DataVirtualHardDisks []DataVirtualHardDisk `xml:"DataVirtualHardDisks>DataVirtualHardDisk,omitempty"` // Contains the parameters that are used to add a data disk to a Virtual Machine. If you are creating a Virtual Machine by using a VM Image, this element is not used. + OSVirtualHardDisk *OSVirtualHardDisk `xml:",omitempty"` // Contains the parameters that are used to create the operating system disk for a Virtual Machine. If you are creating a Virtual Machine by using a VM Image, this element is not used. + RoleSize string `xml:",omitempty"` // Specifies the size of the Virtual Machine. The default size is Small. + ProvisionGuestAgent bool `xml:",omitempty"` // Indicates whether the VM Agent is installed on the Virtual Machine. To run a resource extension in a Virtual Machine, this service must be installed. + VMImageInput *VMImageInput `xml:",omitempty"` // When a VM Image is used to create a new PersistentVMRole, the DiskConfigurations in the VM Image are used to create new Disks for the new VM. This parameter can be used to resize the newly created Disks to a larger size than the underlying DiskConfigurations in the VM Image. + + UseCertAuth bool `xml:"-"` + CertPath string `xml:"-"` +} + +// VMImageInput is for when a VM Image is used to create a new PersistantVMRole, +// the DiskConfigurations in the VM Image are used to create new Disks for the +// new VM. This parameter can be used to resize the newly created Disks to a +// larger size than the underlying DiskConfigurations in the VM Image. +type VMImageInput struct { + OSDiskConfiguration *OSDiskConfiguration `xml:",omitempty"` // This corresponds to the OSDiskConfiguration of the VM Image used to create a new role. The OSDiskConfiguration element is only available using version 2014-10-01 or higher. + DataDiskConfigurations []DataDiskConfiguration `xml:">DataDiskConfiguration,omitempty"` // This corresponds to the DataDiskConfigurations of the VM Image used to create a new role. The DataDiskConfigurations element is only available using version 2014-10-01 or higher. +} + +// OSDiskConfiguration is used to resize the OS disk of a new VM created from a +// previously saved VM image. +type OSDiskConfiguration struct { + ResizedSizeInGB int +} + +// DataDiskConfiguration is used to resize the data disks of a new VM created +// from a previously saved VM image. +type DataDiskConfiguration struct { + OSDiskConfiguration + Name string // The Name of the DataDiskConfiguration being referenced to. + +} + +// ResourceExtensionReference contains a collection of resource extensions that +// are to be installed on the Virtual Machine. The VM Agent must be installed on +// the Virtual Machine to install resource extensions. For more information, see +// Manage Extensions: +// +// https://msdn.microsoft.com/en-us/library/dn606311.aspx. +type ResourceExtensionReference struct { + ReferenceName string + Publisher string + Name string + Version string + ParameterValues []ResourceExtensionParameter `xml:"ResourceExtensionParameterValues>ResourceExtensionParameterValue,omitempty"` + State string +} + +// ResourceExtensionParameter specifies the key, value, and type of a parameter that is passed to the +// resource extension when it is installed. +type ResourceExtensionParameter struct { + Key string + Value string + Type ResourceExtensionParameterType // If this value is set to Private, the parameter will not be returned by Get Deployment (). +} + +type ResourceExtensionParameterType string + +// Enum values for ResourceExtensionParameterType +const ( + ResourceExtensionParameterTypePublic ResourceExtensionParameterType = "Public" + ResourceExtensionParameterTypePrivate ResourceExtensionParameterType = "Private" +) + +// DataVirtualHardDisk specifies the properties that are used to create a data +// disk. +type DataVirtualHardDisk struct { + HostCaching vmdisk.HostCachingType `xml:",omitempty"` // Specifies the caching mode of the data disk. The default value is None. + DiskLabel string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is ignored. If a new disk is being created, this element is used to provide a description of the disk. The value of this element is only obtained programmatically and does not appear in the Management Portal. + DiskName string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is used to identify the disk to add. If a new disk and the associated VHD are being created by Azure, this element is not used and Azure assigns a unique name that is a combination of the deployment name, role name, and identifying number. The name of the disk must contain only alphanumeric characters, underscores, periods, or dashes. The name must not be longer than 256 characters. The name must not end with period or dash. + Lun int `xml:",omitempty"` // Specifies the Logical Unit Number (LUN) for the data disk. If the disk is the first disk that is added, this element is optional and the default value of 0 is used. If more than one disk is being added, this element is required. Valid LUN values are 0 through 31. + LogicalDiskSizeInGB int `xml:",omitempty"` // Specifies the size, in GB, of an empty disk to be attached to the Virtual Machine. If the disk that is being added is already registered in the subscription, this element is ignored. If the disk and VHD is being created by Azure as it is added, this element defines the size of the new disk. + MediaLink string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription or the VHD for the disk already exists in blob storage, this element is ignored. If a VHD file does not exist in blob storage, this element defines the location of the new VHD that is created when the new disk is added. + SourceMediaLink string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription or the VHD for the disk does not exist in blob storage, this element is ignored. If the VHD file exists in blob storage, this element defines the path to the VHD and a disk is registered from it and attached to the virtual machine. +} + +// OSVirtualHardDisk specifies the properties that are used to create an OS +// disk. +type OSVirtualHardDisk struct { + HostCaching vmdisk.HostCachingType `xml:",omitempty"` // Specifies the caching mode of the data disk. The default value is None. + DiskLabel string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is ignored. If a new disk is being created, this element is used to provide a description of the disk. The value of this element is only obtained programmatically and does not appear in the Management Portal. + DiskName string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is used to identify the disk to add. If a new disk and the associated VHD are being created by Azure, this element is not used and Azure assigns a unique name that is a combination of the deployment name, role name, and identifying number. The name of the disk must contain only alphanumeric characters, underscores, periods, or dashes. The name must not be longer than 256 characters. The name must not end with period or dash. + MediaLink string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription or the VHD for the disk already exists in blob storage, this element is ignored. If a VHD file does not exist in blob storage, this element defines the location of the new VHD that is created when the new disk is added. + SourceImageName string `xml:",omitempty"` + OS string `xml:",omitempty"` + RemoteSourceImageLink string `xml:",omitempty"` // Specifies a publicly accessible URI or a SAS URI to the location where an OS image is stored that is used to create the Virtual Machine. This location can be a different location than the user or platform image repositories in Azure. An image is always associated with a VHD, which is a .vhd file stored as a page blob in a storage account in Azure. If you specify the path to an image with this element, an associated VHD is created and you must use the MediaLink element to specify the location in storage where the VHD will be located. If this element is used, SourceImageName is not used. + ResizedSizeInGB int `xml:",omitempty"` +} + +// ConfigurationSet specifies the configuration elements of the Virtual Machine. +// The type attribute is required to prevent the administrator password from +// being written to the operation history file. +type ConfigurationSet struct { + ConfigurationSetType ConfigurationSetType + + // Windows provisioning: + ComputerName string `xml:",omitempty"` // Optional. Specifies the computer name for the Virtual Machine. If you do not specify a computer name, one is assigned that is a combination of the deployment name, role name, and identifying number. Computer names must be 1 to 15 characters long. + AdminPassword string `xml:",omitempty"` // Optional. Specifies the password to use for an administrator account on the Virtual Machine that is being created. If you are creating a Virtual Machine using an image, you must specify a name of an administrator account to be created on the machine using the AdminUsername element. You must use the AdminPassword element to specify the password of the administrator account that is being created. If you are creating a Virtual Machine using an existing specialized disk, this element is not used because the account should already exist on the disk. + EnableAutomaticUpdates bool `xml:",omitempty"` // Optional. Specifies whether automatic updates are enabled for the Virtual Machine. The default value is true. + TimeZone string `xml:",omitempty"` // Optional. Specifies the time zone for the Virtual Machine. + DomainJoin *DomainJoin `xml:",omitempty"` // Optional. Contains properties that define a domain to which the Virtual Machine will be joined. + StoredCertificateSettings []CertificateSetting `xml:">StoredCertificateSetting,omitempty"` // Optional. Contains a list of service certificates with which to provision to the new Virtual Machine. + WinRMListeners *[]WinRMListener `xml:"WinRM>Listeners>Listener,omitempty"` // Optional. Contains configuration settings for the Windows Remote Management service on the Virtual Machine. This enables remote Windows PowerShell. + AdminUsername string `xml:",omitempty"` // Optional. Specifies the name of the administrator account that is created to access the Virtual Machine. If you are creating a Virtual Machine using an image, you must specify a name of an administrator account to be created by using this element. You must use the AdminPassword element to specify the password of the administrator account that is being created. If you are creating a Virtual Machine using an existing specialized disk, this element is not used because the account should already exist on the disk. + AdditionalUnattendContent string `xml:",omitempty"` // Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. + + // Linux provisioning: + HostName string `xml:",omitempty"` // Required. Specifies the host name for the Virtual Machine. Host names must be 1 to 64 characters long. + UserName string `xml:",omitempty"` // Required. Specifies the name of a user account to be created in the sudoer group of the Virtual Machine. User account names must be 1 to 32 characters long. + UserPassword string `xml:",omitempty"` // Required. Specifies the password for the user account. Passwords must be 6 to 72 characters long. + DisableSSHPasswordAuthentication string `xml:"DisableSshPasswordAuthentication,omitempty"` // Optional. Specifies whether SSH password authentication is disabled. By default this value is set to true. + SSH *SSH `xml:",omitempty"` // Optional. Specifies the SSH public keys and key pairs to use with the Virtual Machine. + + // In WindowsProvisioningConfiguration: The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes. The file is saved to %SYSTEMDRIVE%\AzureData\CustomData.bin. If the file exists, it is overwritten. The security on directory is set to System:Full Control and Administrators:Full Control. + // In LinuxProvisioningConfiguration: The base-64 encoded string is located in the ovf-env.xml file on the ISO of the Virtual Machine. The file is copied to /var/lib/waagent/ovf-env.xml by the Azure Linux Agent. The Azure Linux Agent will also place the base-64 encoded data in /var/lib/waagent/CustomData during provisioning. The maximum length of the binary array is 65535 bytes. + CustomData string `xml:",omitempty"` // Specifies a base-64 encoded string of custom data. + + // Network configuration: + InputEndpoints []InputEndpoint `xml:">InputEndpoint,omitempty"` // Optional in NetworkConfiguration. Contains a collection of external endpoints for the Virtual Machine. + SubnetNames []string `xml:">SubnetName,omitempty"` // Required if StaticVirtualNetworkIPAddress is specified; otherwise, optional in NetworkConfiguration. Contains a list of subnets to which the Virtual Machine will belong. + StaticVirtualNetworkIPAddress string `xml:",omitempty"` // Specifies the internal IP address for the Virtual Machine in a Virtual Network. If you specify this element, you must also specify the SubnetNames element with only one subnet defined. The IP address specified in this element must belong to the subnet that is defined in SubnetNames and it should not be the one of the first four IP addresses or the last IP address in the subnet. Deploying web roles or worker roles into a subnet that has Virtual Machines with StaticVirtualNetworkIPAddress defined is not supported. + NetworkSecurityGroup string `xml:",omitempty"` // Optional in NetworkConfiguration. Represents the name of the Network Security Group that will be associated with the Virtual Machine. Network Security Group must exist in the context of subscription and be created in same region to which the virtual machine will be deployed. + PublicIPs []PublicIP `xml:">PublicIP,omitempty"` // Contains a public IP address that can be used in addition to the default virtual IP address for the Virtual Machine. +} + +type ConfigurationSetType string + +// Enum values for ConfigurationSetType +const ( + ConfigurationSetTypeWindowsProvisioning ConfigurationSetType = "WindowsProvisioningConfiguration" + ConfigurationSetTypeLinuxProvisioning ConfigurationSetType = "LinuxProvisioningConfiguration" + ConfigurationSetTypeNetwork ConfigurationSetType = "NetworkConfiguration" +) + +// DomainJoin contains properties that define a domain to which the Virtual +// Machine will be joined. +type DomainJoin struct { + Credentials Credentials `xml:",omitempty"` // Specifies the credentials to use to join the Virtual Machine to the domain. + JoinDomain string `xml:",omitempty"` // Specifies the domain to join. + MachineObjectOU string `xml:",omitempty"` // Specifies the Lightweight Directory Access Protocol (LDAP) X 500-distinguished name of the organizational unit (OU) in which the computer account is created. This account is in Active Directory on a domain controller in the domain to which the computer is being joined. +} + +// Credentials specifies the credentials to use to join the Virtual Machine to +// the domain. If Domain is not specified, Username must specify the user +// principal name (UPN) format (user@fully-qualified-DNS-domain) or the fully- +// qualified-DNS-domain\username format. +type Credentials struct { + Domain string // Specifies the name of the domain used to authenticate an account. The value is a fully qualified DNS domain. + Username string // Specifies a user name in the domain that can be used to join the domain. + Password string // Specifies the password to use to join the domain. +} + +// CertificateSetting specifies the parameters for the certificate which to +// provision to the new Virtual Machine. +type CertificateSetting struct { + StoreLocation string // Required. Specifies the certificate store location on the Virtual Machine. The only supported value is "LocalMachine". + StoreName string // Required. Specifies the name of the certificate store from which the certificate is retrieved. For example, "My". + Thumbprint string // Required. Specifies the thumbprint of the certificate. The thumbprint must specify an existing service certificate. +} + +// WinRMListener specifies the protocol and certificate information for a WinRM +// listener. +type WinRMListener struct { + Protocol WinRMProtocol // Specifies the protocol of listener. + CertificateThumbprint string `xml:",omitempty"` // Specifies the certificate thumbprint for the secure connection. If this value is not specified, a self-signed certificate is generated and used for the Virtual Machine. +} + +type WinRMProtocol string + +// Enum values for WinRMProtocol +const ( + WinRMProtocolHTTP WinRMProtocol = "Http" + WinRMProtocolHTTPS WinRMProtocol = "Https" +) + +// SSH specifies the SSH public keys and key pairs to use with the Virtual Machine. +type SSH struct { + PublicKeys []PublicKey `xml:">PublicKey"` + KeyPairs []KeyPair `xml:">KeyPair"` +} + +// PublicKey specifies a public SSH key. +type PublicKey struct { + Fingerprint string // Specifies the SHA1 fingerprint of an X509 certificate associated with the cloud service and includes the SSH public key. + // Specifies the full path of a file, on the Virtual Machine, where the SSH public key is stored. If + // the file already exists, the specified key is appended to the file. + Path string // Usually /home/username/.ssh/authorized_keys +} + +// KeyPair specifies an SSH keypair. +type KeyPair struct { + Fingerprint string // Specifies the SHA1 fingerprint of an X509 certificate that is associated with the cloud service and includes the SSH keypair. + // Specifies the full path of a file, on the virtual machine, which stores the SSH private key. The + // file is overwritten when multiple keys are written to it. The SSH public key is stored in the same + // directory and has the same name as the private key file with .pub suffix. + Path string // Usually /home/username/.ssh/id_rsa +} + +// InputEndpoint specifies the properties that define an external endpoint for +// the Virtual Machine. +type InputEndpoint struct { + LocalPort int // Specifies the internal port on which the Virtual Machine is listening. + Name string // Specifies the name of the external endpoint. + Port int // Specifies the external port to use for the endpoint. + Protocol InputEndpointProtocol //Specifies the transport protocol for the endpoint. + Vip string `xml:",omitempty"` +} + +type InputEndpointProtocol string + +// Enum values for InputEndpointProtocol +const ( + InputEndpointProtocolTCP InputEndpointProtocol = "TCP" + InputEndpointProtocolUDP InputEndpointProtocol = "UDP" +) + +// PublicIP contains a public IP address that can be used in addition to default +// virtual IP address for the Virtual Machine. +type PublicIP struct { + Name string // Specifies the name of the public IP address. + IdleTimeoutInMinutes int `xml:",omitempty"` // Specifies the timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. +} + +// ServiceCertificate contains a certificate for adding it to a hosted service +type ServiceCertificate struct { + XMLName xml.Name `xml:"CertificateFile"` + Data string + CertificateFormat string + Password string `xml:",omitempty"` +} + +// StartRoleOperation contains the information for starting a Role. +type StartRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure StartRoleOperation"` + OperationType string +} + +// ShutdownRoleOperation contains the information for shutting down a Role. +type ShutdownRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ShutdownRoleOperation"` + OperationType string +} + +// RestartRoleOperation contains the information for restarting a Role. +type RestartRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure RestartRoleOperation"` + OperationType string +} + +// CaptureRoleOperation contains the information for capturing a Role +type CaptureRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CaptureRoleOperation"` + OperationType string + PostCaptureAction PostCaptureAction + ProvisioningConfiguration *ConfigurationSet `xml:",omitempty"` + TargetImageLabel string + TargetImageName string +} + +type PostCaptureAction string + +// Enum values for PostCaptureAction +const ( + PostCaptureActionDelete PostCaptureAction = "Delete" + PostCaptureActionReprovision PostCaptureAction = "Reprovision" +) + +// RoleSizeList contains a list of the available role sizes +type RoleSizeList struct { + XMLName xml.Name `xml:"RoleSizes"` + RoleSizes []RoleSize `xml:"RoleSize"` +} + +// RoleSize contains a detailed explanation of a role size +type RoleSize struct { + Name string + Label string + Cores int + MemoryInMb int + SupportedByWebWorkerRoles bool + SupportedByVirtualMachines bool + MaxDataDiskCount int + WebWorkerResourceDiskSizeInMb int + VirtualMachineResourceDiskSizeInMb int +} + +// DNSServer contains the definition of a DNS server for virtual machine deployment +type DNSServer struct { + Name string + Address string +} + +// LoadBalancer contains the definition of a load balancer for virtual machine deployment +type LoadBalancer struct { + Name string // Specifies the name of the internal load balancer. + Type IPAddressType `xml:"FrontendIpConfiguration>Type"` // Specifies the type of virtual IP address that is provided by the load balancer. The only allowable value is Private. + SubnetName string `xml:"FrontendIpConfiguration>SubnetName,omitempty"` // Required if the deployment exists in a virtual network and a StaticVirtualNetworkIPAddress is assigned. Specifies the subnet of the virtual network that the load balancer uses. The virtual IP address that is managed by the load balancer is contained in this subnet. + StaticVirtualNetworkIPAddress string `xml:"FrontendIpConfiguration>StaticVirtualNetworkIPAddress,omitempty"` // Specifies a specific virtual IP address that the load balancer uses from the subnet in the virtual network. +} + +type IPAddressType string + +// Enum values for IPAddressType +const ( + IPAddressTypePrivate IPAddressType = "Private" // Only allowed value (currently) for IPAddressType +) + +type ResourceExtensions struct { + List []ResourceExtension `xml:"ResourceExtension"` +} + +type ResourceExtension struct { + Publisher string + Name string + Version string + Label string + Description string + PublicConfigurationSchema string + PrivateConfigurationSchema string + SampleConfig string + ReplicationCompleted string + Eula string + PrivacyURI string `xml:"PrivacyUri"` + HomepageURI string `xml:"HomepageUri"` + IsJSONExtension bool `xml:"IsJsonExtension"` + IsInternalExtension bool + DisallowMajorVersionUpgrade bool + CompanyName string + SupportedOS string + PublishedDate string +} + +type PersistentVMRole struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure PersistentVMRole"` + Role +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go new file mode 100644 index 000000000..3118e6c5a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go @@ -0,0 +1,299 @@ +package virtualmachine + +import ( + "encoding/xml" + "testing" +) + +func TestDocumentedDeploymentRequest(t *testing.T) { + // xml based on https://msdn.microsoft.com/en-us/library/azure/jj157194.aspx + // fixed typos, replaced strongly typed fields with values of correct type + xmlString := ` + name-of-deployment + deployment-environment + + + + name-of-the-virtual-machine + PersistentVMRole + + + WindowsProvisioningConfiguration + name-of-computer + administrator-password + true + time-zone + + + domain-to-join + user-name-in-the-domain + password-for-the-user-name + + domain-to-join + distinguished-name-of-the-ou + + + + LocalMachine + name-of-store-on-the-machine + certificate-thumbprint + + + + + + listener-protocol + + + certificate-thumbprint + listener-protocol + + + + name-of-administrator-account + base-64-encoded-data + + + + name-of-pass + + + name-of-component + + + name-of-setting + base-64-encoded-XML-content + + + + + + + + + + LinuxProvisioningConfiguration + host-name-for-the-virtual-machine + new-user-name + password-for-the-new-user + true + + + + certificate-fingerprint + SSH-public-key-storage-location + + + + + certificate-fingerprint + SSH-public-key-storage-location + + + + base-64-encoded-data + + + NetworkConfiguration + + + name-of-load-balanced-set + 22 + ZZH + 33 + + /probe/me + 80 + http + 30 + 5 + + endpoint-protocol + enable-direct-server-return + + + + priority-of-the-rule + permit-rule + subnet-of-the-rule + description-of-the-rule + + + + name-of-internal-loadbalancer + 9 + + + + name-of-subnet + + ip-address + + + name-of-public-ip + 11 + + + + + + + name-of-reference + name-of-publisher + name-of-extension + version-of-extension + + + name-of-parameter-key + parameter-value + type-of-parameter + + + state-of-resource + + + certificate-thumbprint + certificate-algorithm + + + + + name-of-vm-image + path-to-vhd + name-of-availability-set + + + caching-mode + label-of-data-disk + name-of-disk + 0 + 50 + path-to-vhd + + + + caching-mode + label-of-operating-system-disk + name-of-disk + path-to-vhd + name-of-source-image + operating-system-of-image + path-to-source-image + 125 + + size-of-virtual-machine + true + + + 126 + + + + disk-name + 127 + + + + + + name-of-virtual-network + + + + dns-name +
dns-ip-address
+
+
+
+ name-of-reserved-ip + + + name-of-internal-load-balancer + + Private + name-of-subnet + static-ip-address + + + +
` + + deployment := DeploymentRequest{} + if err := xml.Unmarshal([]byte(xmlString), &deployment); err != nil { + t.Fatal(err) + } + + if deployment.Name != "name-of-deployment" { + t.Fatalf("Expected deployment.Name=\"name-of-deployment\", but got \"%s\"", + deployment.Name) + } + + // ====== + + t.Logf("deployment.RoleList[0]: %+v", deployment.RoleList[0]) + if expected := "name-of-the-virtual-machine"; deployment.RoleList[0].RoleName != expected { + t.Fatalf("Expected deployment.RoleList[0].RoleName=%v, but got %v", expected, deployment.RoleList[0].RoleName) + } + + // ====== + + t.Logf("deployment.DNSServers[0]: %+v", deployment.DNSServers[0]) + if deployment.DNSServers[0].Name != "dns-name" { + t.Fatalf("Expected deployment.DNSServers[0].Name=\"dns-name\", but got \"%s\"", + deployment.DNSServers[0].Name) + } + + // ====== + + t.Logf("deployment.LoadBalancers[0]: %+v", deployment.LoadBalancers[0]) + if deployment.LoadBalancers[0].Name != "name-of-internal-load-balancer" { + t.Fatalf("Expected deployment.LoadBalancers[0].Name=\"name-of-internal-load-balancer\", but got \"%s\"", + deployment.LoadBalancers[0].Name) + } + + if deployment.LoadBalancers[0].Type != IPAddressTypePrivate { + t.Fatalf("Expected deployment.LoadBalancers[0].Type=IPAddressTypePrivate, but got \"%s\"", + deployment.LoadBalancers[0].Type) + } + + if deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress != "static-ip-address" { + t.Fatalf("Expected deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress=\"static-ip-address\", but got \"%s\"", + deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress) + } + + // ====== + + extensionReferences := (*deployment.RoleList[0].ResourceExtensionReferences) + t.Logf("(*deployment.RoleList[0].ResourceExtensionReferences)[0]: %+v", extensionReferences[0]) + if extensionReferences[0].Name != "name-of-extension" { + t.Fatalf("Expected (*deployment.RoleList[0].ResourceExtensionReferences)[0].Name=\"name-of-extension\", but got \"%s\"", + extensionReferences[0].Name) + } + + if extensionReferences[0].ParameterValues[0].Key != "name-of-parameter-key" { + t.Fatalf("Expected (*deployment.RoleList[0].ResourceExtensionReferences)[0].ParameterValues[0].Key=\"name-of-parameter-key\", but got %v", + extensionReferences[0].ParameterValues[0].Key) + } + + // ====== + + if deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB != 127 { + t.Fatalf("Expected deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB=127, but got %v", + deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB) + } + + // ====== + + winRMlisteners := *deployment.RoleList[0].ConfigurationSets[0].WinRMListeners + if string(winRMlisteners[0].Protocol) != "listener-protocol" { + t.Fatalf("Expected winRMlisteners[0].Protocol to be listener-protocol, but got %s", + string(winRMlisteners[0].Protocol)) + } + + winRMlisteners2 := *deployment.RoleList[0].ConfigurationSets[0].WinRMListeners + if winRMlisteners2[1].CertificateThumbprint != "certificate-thumbprint" { + t.Fatalf("Expected winRMlisteners2[1].CertificateThumbprint to be certificate-thumbprint, but got %s", + winRMlisteners2[1].CertificateThumbprint) + } + +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions.go new file mode 100644 index 000000000..25320447e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions.go @@ -0,0 +1,25 @@ +package virtualmachine + +import ( + "encoding/xml" +) + +const ( + azureResourceExtensionsURL = "services/resourceextensions" +) + +// GetResourceExtensions lists the resource extensions that are available to add +// to a virtual machine. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn495441.aspx +func (c VirtualMachineClient) GetResourceExtensions() (extensions []ResourceExtension, err error) { + data, err := c.client.SendAzureGetRequest(azureResourceExtensionsURL) + if err != nil { + return extensions, err + } + + var response ResourceExtensions + err = xml.Unmarshal(data, &response) + extensions = response.List + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions_test.go new file mode 100644 index 000000000..b4ec2a769 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions_test.go @@ -0,0 +1,27 @@ +package virtualmachine + +import ( + "testing" + + "github.com/Azure/azure-sdk-for-go/management/testutils" +) + +func TestAzureGetResourceExtensions(t *testing.T) { + client := testutils.GetTestClient(t) + + list, err := NewClient(client).GetResourceExtensions() + if err != nil { + t.Fatal(err) + } + + t.Logf("Found %d extensions", len(list)) + if len(list) == 0 { + t.Fatal("Huh, no resource extensions at all? Something must be wrong.") + } + + for _, extension := range list { + if extension.Name == "" { + t.Fatalf("Resource with empty name? Something must have gone wrong with serialization: %+v", extension) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go new file mode 100644 index 000000000..90f8d6561 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go @@ -0,0 +1,230 @@ +// Package virtualmachinedisk provides a client for Virtual Machine Disks. +package virtualmachinedisk + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + addDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks" + addDiskURL = "services/disks" + deleteDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks/%d" + deleteDiskURL = "services/disks/%s" + getDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks/%d" + getDiskURL = "services/disks/%s" + listDisksURL = "services/disks" + updateDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks/%d" + updateDiskURL = "services/disks/%s" + + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new DiskClient from an Azure client +func NewClient(client management.Client) DiskClient { + return DiskClient{client: client} +} + +// AddDataDisk adds a data disk to a Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157199.aspx +func (c DiskClient) AddDataDisk( + service string, + deployment string, + role string, + params CreateDataDiskParameters) (management.OperationID, error) { + if service == "" { + return "", fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return "", fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return "", fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(addDataDiskURL, service, deployment, role) + + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePostRequest(requestURL, req) +} + +// AddDisk adds an operating system disk or data disk to the user image repository +// +// https://msdn.microsoft.com/en-us/library/azure/jj157178.aspx +func (c DiskClient) AddDisk(params CreateDiskParameters) (management.OperationID, error) { + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePostRequest(addDiskURL, req) +} + +// DeleteDataDisk removes the specified data disk from a Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157179.aspx +func (c DiskClient) DeleteDataDisk( + service string, + deployment string, + role string, + lun int, + deleteVHD bool) (management.OperationID, error) { + if service == "" { + return "", fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return "", fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return "", fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(deleteDataDiskURL, service, deployment, role, lun) + if deleteVHD { + requestURL += "?comp=media" + } + + return c.client.SendAzureDeleteRequest(requestURL) +} + +// DeleteDisk deletes the specified data or operating system disk from the image +// repository that is associated with the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/jj157200.aspx +func (c DiskClient) DeleteDisk(name string, deleteVHD bool) error { + if name == "" { + return fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(deleteDiskURL, name) + if deleteVHD { + requestURL += "?comp=media" + } + + _, err := c.client.SendAzureDeleteRequest(requestURL) // request is handled synchronously + return err +} + +// GetDataDisk retrieves the specified data disk from a Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157180.aspx +func (c DiskClient) GetDataDisk( + service string, + deployment string, + role string, + lun int) (DataDiskResponse, error) { + var response DataDiskResponse + if service == "" { + return response, fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return response, fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return response, fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(getDataDiskURL, service, deployment, role, lun) + + data, err := c.client.SendAzureGetRequest(requestURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +// GetDisk retrieves information about the specified disk +// +// https://msdn.microsoft.com/en-us/library/azure/dn775053.aspx +func (c DiskClient) GetDisk(name string) (DiskResponse, error) { + var response DiskResponse + if name == "" { + return response, fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(getDiskURL, name) + + data, err := c.client.SendAzureGetRequest(requestURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +// ListDisks retrieves a list of the disks in the image repository that is associated +// with the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/jj157176.aspx +func (c DiskClient) ListDisks() (ListDiskResponse, error) { + var response ListDiskResponse + + data, err := c.client.SendAzureGetRequest(listDisksURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +// UpdateDataDisk updates the configuration of the specified data disk that is +// attached to the specified Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157190.aspx +func (c DiskClient) UpdateDataDisk( + service string, + deployment string, + role string, + lun int, + params UpdateDataDiskParameters) (management.OperationID, error) { + if service == "" { + return "", fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return "", fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return "", fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(updateDataDiskURL, service, deployment, role, lun) + + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePutRequest(requestURL, "", req) +} + +// UpdateDisk updates the label of an existing disk in the image repository that is +// associated with the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/jj157205.aspx +func (c DiskClient) UpdateDisk( + name string, + params UpdateDiskParameters) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(updateDiskURL, name) + + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePutRequest(requestURL, "", req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/entities.go new file mode 100644 index 000000000..1359381b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/entities.go @@ -0,0 +1,134 @@ +package virtualmachinedisk + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// DiskClient is used to perform operations on Azure Disks +type DiskClient struct { + client management.Client +} + +// CreateDiskParameters represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type CreateDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disk"` + OS OperatingSystemType `xml:",omitempty"` + Label string + MediaLink string `xml:",omitempty"` + Name string +} + +// UpdateDiskParameters represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type UpdateDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disk"` + Label string `xml:",omitempty"` + Name string + ResizedSizeInGB int `xml:",omitempty"` +} + +// ListDiskResponse represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type ListDiskResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disks"` + Disk []DiskResponse +} + +// DiskResponse represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type DiskResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disk"` + AffinityGroup string + AttachedTo Resource + IsCorrupted bool + OS OperatingSystemType + Location string + LogicalDiskSizeInGB int + MediaLink string + Name string + SourceImageName string + CreatedTime string + IOType IOType +} + +// Resource describes the resource details a disk is currently attached to +type Resource struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure AttachedTo"` + DeploymentName string + HostedServiceName string + RoleName string +} + +// IOType represents an IO type +type IOType string + +// These constants represent the possible IO types +const ( + IOTypeProvisioned IOType = "Provisioned" + IOTypeStandard IOType = "Standard" +) + +// OperatingSystemType represents an operating system type +type OperatingSystemType string + +// These constants represent the valid operating system types +const ( + OperatingSystemTypeNull OperatingSystemType = "NULL" + OperatingSystemTypeLinux OperatingSystemType = "Linux" + OperatingSystemTypeWindows OperatingSystemType = "Windows" +) + +// CreateDataDiskParameters represents a data disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type CreateDataDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure DataVirtualHardDisk"` + HostCaching HostCachingType `xml:",omitempty"` + DiskLabel string `xml:",omitempty"` + DiskName string `xml:",omitempty"` + Lun int `xml:",omitempty"` + LogicalDiskSizeInGB int `xml:",omitempty"` + MediaLink string + SourceMediaLink string `xml:",omitempty"` +} + +// UpdateDataDiskParameters represents a data disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type UpdateDataDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure DataVirtualHardDisk"` + HostCaching HostCachingType `xml:",omitempty"` + DiskName string + Lun int + MediaLink string +} + +// DataDiskResponse represents a data disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type DataDiskResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure DataVirtualHardDisk"` + HostCaching HostCachingType + DiskLabel string + DiskName string + Lun int + LogicalDiskSizeInGB int + MediaLink string +} + +// HostCachingType represents a host caching type +type HostCachingType string + +// These constants represent the valid host caching types +const ( + HostCachingTypeNone HostCachingType = "None" + HostCachingTypeReadOnly HostCachingType = "ReadOnly" + HostCachingTypeReadWrite HostCachingType = "ReadWrite" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go new file mode 100644 index 000000000..11c8848a7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go @@ -0,0 +1,110 @@ +// Package virtualmachineimage provides a client for Virtual Machine Images. +package virtualmachineimage + +import ( + "encoding/xml" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureImageListURL = "services/vmimages" + azureImageDeleteURLformat = "services/vmimages/%s" + azureRoleOperationsURL = "services/hostedservices/%s/deployments/%s/roleinstances/%s/operations" + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new Client from an Azure client +func NewClient(client management.Client) Client { + return Client{client} +} + +//ListVirtualMachineImages lists the available VM images, filtered by the optional parameters. +//See https://msdn.microsoft.com/en-us/library/azure/dn499770.aspx +func (c Client) ListVirtualMachineImages(parameters ListParameters) (ListVirtualMachineImagesResponse, error) { + var imageList ListVirtualMachineImagesResponse + + listURL := azureImageListURL + + v := url.Values{} + if parameters.Location != "" { + v.Add("location", parameters.Location) + } + + if parameters.Publisher != "" { + v.Add("publisher", parameters.Publisher) + } + + if parameters.Category != "" { + v.Add("category", parameters.Category) + } + + query := v.Encode() + if query != "" { + listURL = listURL + "?" + query + } + + response, err := c.SendAzureGetRequest(listURL) + if err != nil { + return imageList, err + } + err = xml.Unmarshal(response, &imageList) + return imageList, err +} + +//DeleteVirtualMachineImage deletes the named VM image. If deleteVHDs is specified, +//the referenced OS and data disks are also deleted. +//See https://msdn.microsoft.com/en-us/library/azure/dn499769.aspx +func (c Client) DeleteVirtualMachineImage(name string, deleteVHDs bool) error { + if name == "" { + return fmt.Errorf(errParamNotSpecified, "name") + } + + uri := fmt.Sprintf(azureImageDeleteURLformat, name) + + if deleteVHDs { + uri = uri + "?comp=media" + } + + _, err := c.SendAzureDeleteRequest(uri) // delete is synchronous for this operation + return err +} + +type ListParameters struct { + Location string + Publisher string + Category string +} + +const CategoryUser = "User" + +//Capture captures a VM into a VM image. The VM has to be shut down previously. +//See https://msdn.microsoft.com/en-us/library/azure/dn499768.aspx +func (c Client) Capture(cloudServiceName, deploymentName, roleName string, + name, label string, osState OSState, parameters CaptureParameters) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + request := CaptureRoleAsVMImageOperation{ + VMImageName: name, + VMImageLabel: label, + OSState: osState, + CaptureParameters: parameters, + } + data, err := xml.Marshal(request) + if err != nil { + return "", err + } + + return c.SendAzurePostRequest(fmt.Sprintf(azureRoleOperationsURL, + cloudServiceName, deploymentName, roleName), data) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go new file mode 100644 index 000000000..4a1df3119 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go @@ -0,0 +1,95 @@ +package virtualmachineimage + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +// Client is used to perform operations on Azure VM Images. +type Client struct { + management.Client +} + +type ListVirtualMachineImagesResponse struct { + VMImages []VMImage `xml:"VMImage"` +} + +type VMImage struct { + Name string // Specifies the name of the image. + Label string // Specifies an identifier for the image. + Category string // Specifies the repository classification of the image. All user images have the category User. + Description string // Specifies the description of the image. + OSDiskConfiguration OSDiskConfiguration // Specifies configuration information for the operating system disk that is associated with the image. + DataDiskConfigurations []DataDiskConfiguration `xml:">DataDiskConfiguration"` // Specifies configuration information for the data disks that are associated with the image. A VM Image might not have data disks associated with it. + ServiceName string // Specifies the name of the cloud service that contained the Virtual Machine from which the image was created. + DeploymentName string // Specifies the name of the deployment that contained the Virtual Machine from which the image was created. + RoleName string // Specifies the name of the Virtual Machine from which the image was created. + Location string // Specifies the geo-location in which the media is located. The Location value is derived from the storage account that contains the blob in which the media is located. If the storage account belongs to an affinity group the value is NULL and the element is not displayed in the response. + AffinityGroup string // Specifies the affinity group in which the media is located. The AffinityGroup value is derived from the storage account that contains the blob in which the media is located. If the storage account does not belong to an affinity group the value is NULL and the element is not displayed in the response. + CreatedTime string // Specifies the time that the image was created. + ModifiedTime string // Specifies the time that the image was last updated. + Language string // Specifies the language of the image. + ImageFamily string // Specifies a value that can be used to group VM Images. + RecommendedVMSize string // Optional. Specifies the size to use for the Virtual Machine that is created from the VM Image. + IsPremium string // Indicates whether the image contains software or associated services that will incur charges above the core price for the virtual machine. For additional details, see the PricingDetailLink element. + Eula string // Specifies the End User License Agreement that is associated with the image. The value for this element is a string, but it is recommended that the value be a URL that points to a EULA. + IconURI string `xml:"IconUri"` // Specifies the URI to the icon that is displayed for the image in the Management Portal. + SmallIconURI string `xml:"SmallIconUri"` // Specifies the URI to the small icon that is displayed for the image in the Management Portal. + PrivacyURI string `xml:"PrivacyUri"` // Specifies the URI that points to a document that contains the privacy policy related to the image. + PublishedDate string // Specifies the date when the image was added to the image repository. +} + +type OSState string + +const ( + OSStateGeneralized OSState = "Generalized" + OSStateSpecialized OSState = "Specialized" +) + +type IOType string + +const ( + IOTypeProvisioned IOType = "Provisioned" + IOTypeStandard IOType = "Standard" +) + +// OSDiskConfiguration specifies configuration information for the operating +// system disk that is associated with the image. +type OSDiskConfiguration struct { + Name string // Specifies the name of the operating system disk. + HostCaching vmdisk.HostCachingType // Specifies the caching behavior of the operating system disk. + OSState OSState // Specifies the state of the operating system in the image. + OS string // Specifies the operating system type of the image. + MediaLink string // Specifies the location of the blob in Azure storage. The blob location belongs to a storage account in the subscription specified by the value in the operation call. + LogicalSizeInGB float64 // Specifies the size, in GB, of the operating system disk. + IOType IOType // Identifies the type of the storage account for the backing VHD. If the backing VHD is in an Provisioned Storage account, “Provisioned†is returned otherwise “Standard†is returned. +} + +// DataDiskConfiguration specifies configuration information for the data disks +// that are associated with the image. +type DataDiskConfiguration struct { + Name string // Specifies the name of the data disk. + HostCaching vmdisk.HostCachingType // Specifies the caching behavior of the data disk. + Lun string // Specifies the Logical Unit Number (LUN) for the data disk. + MediaLink string // Specifies the location of the blob in Azure storage. The blob location belongs to a storage account in the subscription specified by the value in the operation call. + LogicalSizeInGB float64 // Specifies the size, in GB, of the data disk. + IOType IOType // Identifies the type of the storage account for the backing VHD. If the backing VHD is in an Provisioned Storage account, “Provisioned†is returned otherwise “Standard†is returned. +} + +type CaptureRoleAsVMImageOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CaptureRoleAsVMImageOperation"` + OperationType string //CaptureRoleAsVMImageOperation + OSState OSState + VMImageName string + VMImageLabel string + CaptureParameters +} + +type CaptureParameters struct { + Description string `xml:",omitempty"` + Language string `xml:",omitempty"` + ImageFamily string `xml:",omitempty"` + RecommendedVMSize string `xml:",omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go new file mode 100644 index 000000000..b0d5bbe6f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go @@ -0,0 +1,110 @@ +package virtualmachineimage + +import ( + "encoding/xml" + "testing" +) + +const xml1 = ` + + imgName + + User + packer made image + + OSDisk + ReadWrite + Generalized + Linux + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12.vhd + 30 + Standard + + + PkrSrvf3mz03u4mi + PkrVMf3mz03u4mi + PkrVMf3mz03u4mi + Central US + 2015-12-12T08:59:29.1936858Z + 2015-12-12T08:59:29.1936858Z + PackerMade + Small + false + VMImageReadyForUse + StoppedVM + Small +` +const xml2 = ` + + imgName + + User + packer made image + + OSDisk + ReadWrite + Generalized + Linux + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12.vhd + 30 + Standard + + + + DataDisk1 + ReadWrite + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12-dd1.vhd + 31 + Standard + + + DataDisk2 + ReadWrite + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12-dd2.vhd + 32 + Standard + + + PkrSrvf3mz03u4mi + PkrVMf3mz03u4mi + PkrVMf3mz03u4mi + Central US + 2015-12-12T08:59:29.1936858Z + 2015-12-12T08:59:29.1936858Z + PackerMade + Small + false + VMImageReadyForUse + StoppedVM + Small +` + +func Test_NoDataDisksUnmarshal(t *testing.T) { + var image VMImage + if err := xml.Unmarshal([]byte(xml1), &image); err != nil { + t.Fatal(err) + } + + check := checker{t} + check.Equal(0, len(image.DataDiskConfigurations)) +} + +func Test_DataDiskCountUnmarshal(t *testing.T) { + var image VMImage + if err := xml.Unmarshal([]byte(xml2), &image); err != nil { + t.Fatal(err) + } + + check := checker{t} + check.Equal(2, len(image.DataDiskConfigurations)) + check.Equal("DataDisk1", image.DataDiskConfigurations[0].Name) + check.Equal("DataDisk2", image.DataDiskConfigurations[1].Name) +} + +type checker struct{ *testing.T } + +func (a *checker) Equal(expected, actual interface{}) { + if expected != actual { + a.T.Fatalf("Expected %q, but got %q", expected, actual) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/client.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/client.go new file mode 100644 index 000000000..9d27ab7fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/client.go @@ -0,0 +1,47 @@ +// Package virtualnetwork provides a client for Virtual Networks. +package virtualnetwork + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureNetworkConfigurationURL = "services/networking/media" +) + +// NewClient is used to return new VirtualNetworkClient instance +func NewClient(client management.Client) VirtualNetworkClient { + return VirtualNetworkClient{client: client} +} + +// GetVirtualNetworkConfiguration retreives the current virtual network +// configuration for the currently active subscription. Note that the +// underlying Azure API means that network related operations are not safe +// for running concurrently. +func (c VirtualNetworkClient) GetVirtualNetworkConfiguration() (NetworkConfiguration, error) { + networkConfiguration := c.NewNetworkConfiguration() + response, err := c.client.SendAzureGetRequest(azureNetworkConfigurationURL) + if err != nil { + return networkConfiguration, err + } + + err = xml.Unmarshal(response, &networkConfiguration) + return networkConfiguration, err + +} + +// SetVirtualNetworkConfiguration configures the virtual networks for the +// currently active subscription according to the NetworkConfiguration given. +// Note that the underlying Azure API means that network related operations +// are not safe for running concurrently. +func (c VirtualNetworkClient) SetVirtualNetworkConfiguration(networkConfiguration NetworkConfiguration) (management.OperationID, error) { + networkConfiguration.setXMLNamespaces() + networkConfigurationBytes, err := xml.Marshal(networkConfiguration) + if err != nil { + return "", err + } + + return c.client.SendAzurePutRequest(azureNetworkConfigurationURL, "text/plain", networkConfigurationBytes) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/entities.go b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/entities.go new file mode 100644 index 000000000..0cb8913e7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/entities.go @@ -0,0 +1,90 @@ +package virtualnetwork + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const xmlNamespace = "http://schemas.microsoft.com/ServiceHosting/2011/07/NetworkConfiguration" +const xmlNamespaceXsd = "http://www.w3.org/2001/XMLSchema" +const xmlNamespaceXsi = "http://www.w3.org/2001/XMLSchema-instance" + +// VirtualNetworkClient is used to perform operations on Virtual Networks. +type VirtualNetworkClient struct { + client management.Client +} + +// NetworkConfiguration represents the network configuration for an entire Azure +// subscription. +type NetworkConfiguration struct { + XMLName xml.Name `xml:"NetworkConfiguration"` + XMLNamespaceXsd string `xml:"xmlns:xsd,attr"` + XMLNamespaceXsi string `xml:"xmlns:xsi,attr"` + XMLNs string `xml:"xmlns,attr"` + Configuration VirtualNetworkConfiguration `xml:"VirtualNetworkConfiguration"` + + // TODO: Nicer builder methods for these that abstract away the + // underlying structure. +} + +// NewNetworkConfiguration creates a new empty NetworkConfiguration structure +// for further configuration. The XML namespaces are already set correctly. +func (client *VirtualNetworkClient) NewNetworkConfiguration() NetworkConfiguration { + networkConfiguration := NetworkConfiguration{} + networkConfiguration.setXMLNamespaces() + return networkConfiguration +} + +// setXMLNamespaces ensure that all of the required namespaces are set. It +// should be called prior to marshalling the structure to XML for use with the +// Azure REST endpoint. It is used internally prior to submitting requests, but +// since it is idempotent there is no harm in repeat calls. +func (n *NetworkConfiguration) setXMLNamespaces() { + n.XMLNamespaceXsd = xmlNamespaceXsd + n.XMLNamespaceXsi = xmlNamespaceXsi + n.XMLNs = xmlNamespace +} + +type VirtualNetworkConfiguration struct { + DNS DNS `xml:"Dns,omitempty"` + LocalNetworkSites []LocalNetworkSite `xml:"LocalNetworkSites>LocalNetworkSite"` + VirtualNetworkSites []VirtualNetworkSite `xml:"VirtualNetworkSites>VirtualNetworkSite"` +} + +type DNS struct { + DNSServers []DNSServer `xml:"DnsServers>DnsServer,omitempty"` +} + +type DNSServer struct { + XMLName xml.Name `xml:"DnsServer"` + Name string `xml:"name,attr"` + IPAddress string `xml:"IPAddress,attr"` +} + +type DNSServerRef struct { + Name string `xml:"name,attr"` +} + +type VirtualNetworkSite struct { + Name string `xml:"name,attr"` + Location string `xml:"Location,attr"` + AddressSpace AddressSpace `xml:"AddressSpace"` + Subnets []Subnet `xml:"Subnets>Subnet"` + DNSServersRef []DNSServerRef `xml:"DnsServersRef>DnsServerRef,omitempty"` +} + +type LocalNetworkSite struct { + Name string `xml:"name,attr"` + VPNGatewayAddress string + AddressSpace AddressSpace +} + +type AddressSpace struct { + AddressPrefix []string +} + +type Subnet struct { + Name string `xml:"name,attr"` + AddressPrefix string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/configurationset.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/configurationset.go new file mode 100644 index 000000000..0ed3bd018 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/configurationset.go @@ -0,0 +1,28 @@ +package vmutils + +import ( + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +func updateOrAddConfig(configs []vm.ConfigurationSet, configType vm.ConfigurationSetType, update func(*vm.ConfigurationSet)) []vm.ConfigurationSet { + config := findConfig(configs, configType) + if config == nil { + configs = append(configs, vm.ConfigurationSet{ConfigurationSetType: configType}) + config = findConfig(configs, configType) + } + update(config) + + return configs +} + +func findConfig(configs []vm.ConfigurationSet, configType vm.ConfigurationSetType) *vm.ConfigurationSet { + for i, config := range configs { + if config.ConfigurationSetType == configType { + // need to return a pointer to the original set in configs, + // not the copy made by the range iterator + return &configs[i] + } + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/datadisks.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/datadisks.go new file mode 100644 index 000000000..06fcf852d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/datadisks.go @@ -0,0 +1,58 @@ +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +// ConfigureWithNewDataDisk adds configuration for a new (empty) data disk +func ConfigureWithNewDataDisk(role *vm.Role, label, destinationVhdStorageURL string, sizeInGB int, cachingType vmdisk.HostCachingType) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + appendDataDisk(role, vm.DataVirtualHardDisk{ + DiskLabel: label, + HostCaching: cachingType, + LogicalDiskSizeInGB: sizeInGB, + MediaLink: destinationVhdStorageURL, + }) + + return nil +} + +// ConfigureWithExistingDataDisk adds configuration for an existing data disk +func ConfigureWithExistingDataDisk(role *vm.Role, diskName string, cachingType vmdisk.HostCachingType) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + appendDataDisk(role, vm.DataVirtualHardDisk{ + DiskName: diskName, + HostCaching: cachingType, + }) + + return nil +} + +// ConfigureWithVhdDataDisk adds configuration for adding a vhd in a storage +// account as a data disk +func ConfigureWithVhdDataDisk(role *vm.Role, sourceVhdStorageURL string, cachingType vmdisk.HostCachingType) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + appendDataDisk(role, vm.DataVirtualHardDisk{ + SourceMediaLink: sourceVhdStorageURL, + HostCaching: cachingType, + }) + + return nil +} + +func appendDataDisk(role *vm.Role, disk vm.DataVirtualHardDisk) { + disk.Lun = len(role.DataVirtualHardDisks) + role.DataVirtualHardDisks = append(role.DataVirtualHardDisks, disk) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go new file mode 100644 index 000000000..8ac5398de --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go @@ -0,0 +1,91 @@ +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// ConfigureDeploymentFromRemoteImage configures VM Role to deploy from a remote +// image source. "remoteImageSourceURL" can be any publically accessible URL to +// a VHD file, including but not limited to a SAS Azure Storage blob url. "os" +// needs to be either "Linux" or "Windows". "label" is optional. +func ConfigureDeploymentFromRemoteImage( + role *vm.Role, + remoteImageSourceURL string, + os string, + newDiskName string, + destinationVhdStorageURL string, + label string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.OSVirtualHardDisk = &vm.OSVirtualHardDisk{ + RemoteSourceImageLink: remoteImageSourceURL, + MediaLink: destinationVhdStorageURL, + DiskName: newDiskName, + OS: os, + DiskLabel: label, + } + return nil +} + +// ConfigureDeploymentFromPlatformImage configures VM Role to deploy from a +// platform image. See osimage package for methods to retrieve a list of the +// available platform images. "label" is optional. +func ConfigureDeploymentFromPlatformImage( + role *vm.Role, + imageName string, + mediaLink string, + label string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.OSVirtualHardDisk = &vm.OSVirtualHardDisk{ + SourceImageName: imageName, + MediaLink: mediaLink, + } + return nil +} + +// ConfigureDeploymentFromPublishedVMImage configures VM Role to deploy from +// a published (public) VM image. +func ConfigureDeploymentFromPublishedVMImage( + role *vm.Role, + vmImageName string, + mediaLocation string, + provisionGuestAgent bool) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.VMImageName = vmImageName + role.MediaLocation = mediaLocation + role.ProvisionGuestAgent = provisionGuestAgent + return nil +} + +// ConfigureDeploymentFromUserVMImage configures VM Role to deploy from a previously +// captured (user generated) VM image. +func ConfigureDeploymentFromUserVMImage( + role *vm.Role, + vmImageName string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.VMImageName = vmImageName + return nil +} + +// ConfigureDeploymentFromExistingOSDisk configures VM Role to deploy from an +// existing disk. 'label' is optional. +func ConfigureDeploymentFromExistingOSDisk(role *vm.Role, osDiskName, label string) error { + role.OSVirtualHardDisk = &vm.OSVirtualHardDisk{ + DiskName: osDiskName, + DiskLabel: label, + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/examples_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/examples_test.go new file mode 100644 index 000000000..ea23bbd78 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/examples_test.go @@ -0,0 +1,53 @@ +package vmutils + +import ( + "encoding/base64" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" + "github.com/Azure/azure-sdk-for-go/management/hostedservice" + "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +func Example() { + dnsName := "test-vm-from-go" + storageAccount := "mystorageaccount" + location := "West US" + vmSize := "Small" + vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + userName := "testuser" + userPassword := "Test123" + + client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "") + if err != nil { + panic(err) + } + + // create hosted service + if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{ + ServiceName: dnsName, + Location: location, + Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil { + panic(err) + } + + // create virtual machine + role := NewVMConfiguration(dnsName, vmSize) + ConfigureDeploymentFromPlatformImage( + &role, + vmImage, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName), + "") + ConfigureForLinux(&role, dnsName, userName, userPassword) + ConfigureWithPublicSSH(&role) + + operationID, err := virtualmachine.NewClient(client). + CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{}) + if err != nil { + panic(err) + } + err = client.WaitForOperation(operationID, nil) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions.go new file mode 100644 index 000000000..90a869275 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions.go @@ -0,0 +1,88 @@ +package vmutils + +import ( + "encoding/base64" + "encoding/json" + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +const ( + dockerPublicConfigVersion = 2 +) + +func AddAzureVMExtensionConfiguration(role *vm.Role, name, publisher, version, referenceName, state string, + publicConfigurationValue, privateConfigurationValue []byte) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + extension := vm.ResourceExtensionReference{ + Name: name, + Publisher: publisher, + Version: version, + ReferenceName: referenceName, + State: state, + } + + if len(privateConfigurationValue) != 0 { + extension.ParameterValues = append(extension.ParameterValues, vm.ResourceExtensionParameter{ + Key: "ignored", + Value: base64.StdEncoding.EncodeToString(privateConfigurationValue), + Type: "Private", + }) + } + + if len(publicConfigurationValue) != 0 { + extension.ParameterValues = append(extension.ParameterValues, vm.ResourceExtensionParameter{ + Key: "ignored", + Value: base64.StdEncoding.EncodeToString(publicConfigurationValue), + Type: "Public", + }) + } + + if role.ResourceExtensionReferences == nil { + role.ResourceExtensionReferences = &[]vm.ResourceExtensionReference{} + } + extensionList := append(*role.ResourceExtensionReferences, extension) + role.ResourceExtensionReferences = &extensionList + return nil +} + +// AddAzureDockerVMExtensionConfiguration adds the DockerExtension to the role +// configuratioon and opens a port "dockerPort" +// TODO(ahmetalpbalkan) Deprecate this and move to 'docker-machine' codebase. +func AddAzureDockerVMExtensionConfiguration(role *vm.Role, dockerPort int, version string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + ConfigureWithExternalPort(role, "docker", dockerPort, dockerPort, vm.InputEndpointProtocolTCP) + + publicConfiguration, err := createDockerPublicConfig(dockerPort) + if err != nil { + return err + } + + privateConfiguration, err := json.Marshal(dockerPrivateConfig{}) + if err != nil { + return err + } + + return AddAzureVMExtensionConfiguration(role, + "DockerExtension", "MSOpenTech.Extensions", + version, "DockerExtension", "enable", + publicConfiguration, privateConfiguration) +} + +func createDockerPublicConfig(dockerPort int) ([]byte, error) { + return json.Marshal(dockerPublicConfig{DockerPort: dockerPort, Version: dockerPublicConfigVersion}) +} + +type dockerPublicConfig struct { + DockerPort int `json:"dockerport"` + Version int `json:"version"` +} + +type dockerPrivateConfig struct{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go new file mode 100644 index 000000000..17a46563d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go @@ -0,0 +1,42 @@ +package vmutils + +import ( + "encoding/xml" + "testing" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +func Test_AddAzureVMExtensionConfiguration(t *testing.T) { + + role := vm.Role{} + AddAzureVMExtensionConfiguration(&role, + "nameOfExtension", "nameOfPublisher", "versionOfExtension", "nameOfReference", "state", []byte{1, 2, 3}, []byte{}) + + data, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + if expected := ` + + + + nameOfReference + nameOfPublisher + nameOfExtension + versionOfExtension + + + ignored + AQID + Public + + + state + + + +`; string(data) != expected { + t.Fatalf("Expected %q, but got %q", expected, string(data)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go new file mode 100644 index 000000000..ac5853d70 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go @@ -0,0 +1,455 @@ +package vmutils + +import ( + "encoding/base64" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/management" + "github.com/Azure/azure-sdk-for-go/management/hostedservice" + "github.com/Azure/azure-sdk-for-go/management/location" + "github.com/Azure/azure-sdk-for-go/management/osimage" + storage "github.com/Azure/azure-sdk-for-go/management/storageservice" + "github.com/Azure/azure-sdk-for-go/management/testutils" + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" + vmimage "github.com/Azure/azure-sdk-for-go/management/virtualmachineimage" +) + +func TestDeployPlatformImage(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + testRoleConfiguration(t, client, role, location) +} + +func TestDeployPlatformWindowsImage(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetWindowsTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForWindows(&role, vmname, "azureuser", GeneratePassword(), true, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTPS(&role, "") + + testRoleConfiguration(t, client, role, location) +} + +func TestVMImageList(t *testing.T) { + client := testutils.GetTestClient(t) + vmic := vmimage.NewClient(client) + il, _ := vmic.ListVirtualMachineImages(vmimage.ListParameters{}) + for _, im := range il.VMImages { + t.Logf("%s -%s", im.Name, im.Description) + } +} + +func TestDeployPlatformOSImageCaptureRedeploy(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + t.Logf("Deploying VM: %s", vmname) + createRoleConfiguration(t, client, role, location) + + t.Logf("Wait for deployment to enter running state") + vmc := vm.NewClient(client) + status := vm.DeploymentStatusDeploying + for status != vm.DeploymentStatusRunning { + deployment, err := vmc.GetDeployment(vmname, vmname) + if err != nil { + t.Error(err) + break + } + status = deployment.Status + } + + t.Logf("Shutting down VM: %s", vmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.ShutdownRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + + if err := WaitForDeploymentPowerState(client, vmname, vmname, vm.PowerStateStopped); err != nil { + t.Fatal(err) + } + + imagename := GenerateName() + t.Logf("Capturing OSImage: %s", imagename) + if err := Await(client, func() (management.OperationID, error) { + return vmc.CaptureRole(vmname, vmname, vmname, imagename, imagename, nil) + }); err != nil { + t.Error(err) + } + + im := GetUserOSImage(t, client, imagename) + t.Logf("Found image: %+v", im) + + newvmname := GenerateName() + role = NewVMConfiguration(newvmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + im.Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, newvmname), + GenerateName()) + ConfigureForLinux(&role, newvmname, "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + t.Logf("Deploying new VM from freshly captured OS image: %s", newvmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{}) + }); err != nil { + t.Error(err) + } + + deleteHostedService(t, client, vmname) +} + +func TestDeployPlatformVMImageCaptureRedeploy(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + t.Logf("Deploying VM: %s", vmname) + createRoleConfiguration(t, client, role, location) + + t.Logf("Wait for deployment to enter running state") + vmc := vm.NewClient(client) + status := vm.DeploymentStatusDeploying + for status != vm.DeploymentStatusRunning { + deployment, err := vmc.GetDeployment(vmname, vmname) + if err != nil { + t.Error(err) + break + } + status = deployment.Status + } + + t.Logf("Shutting down VM: %s", vmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.ShutdownRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + + if err := WaitForDeploymentInstanceStatus(client, vmname, vmname, vm.InstanceStatusStoppedVM); err != nil { + t.Fatal(err) + } + + imagename := GenerateName() + t.Logf("Capturing VMImage: %s", imagename) + if err := Await(client, func() (management.OperationID, error) { + return vmimage.NewClient(client).Capture(vmname, vmname, vmname, imagename, imagename, vmimage.OSStateGeneralized, vmimage.CaptureParameters{}) + }); err != nil { + t.Error(err) + } + + im := GetUserVMImage(t, client, imagename) + t.Logf("Found image: %+v", im) + + newvmname := GenerateName() + role = NewVMConfiguration(newvmname, "Standard_D3") + ConfigureDeploymentFromUserVMImage(&role, im.Name) + ConfigureForLinux(&role, newvmname, "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + t.Logf("Deploying new VM from freshly captured VM image: %s", newvmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{}) + }); err != nil { + t.Error(err) + } + + deleteHostedService(t, client, vmname) +} + +func TestDeployFromPublishedVmImage(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + im := GetVMImage(t, client, func(im vmimage.VMImage) bool { + return im.Name == + "fb83b3509582419d99629ce476bcb5c8__SQL-Server-2014-RTM-12.0.2430.0-OLTP-ENU-Win2012R2-cy14su11" + }) + + role := NewVMConfiguration(vmname, "Standard_D4") + ConfigureDeploymentFromPublishedVMImage(&role, im.Name, + fmt.Sprintf("http://%s.blob.core.windows.net/%s", sa.ServiceName, vmname), false) + ConfigureForWindows(&role, vmname, "azureuser", GeneratePassword(), true, "") + ConfigureWithPublicSSH(&role) + + testRoleConfiguration(t, client, role, location) +} + +func TestRoleStateOperations(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + + createRoleConfiguration(t, client, role, location) + + vmc := vm.NewClient(client) + if err := Await(client, func() (management.OperationID, error) { + return vmc.ShutdownRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + if err := Await(client, func() (management.OperationID, error) { + return vmc.StartRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + if err := Await(client, func() (management.OperationID, error) { + return vmc.RestartRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + + deleteHostedService(t, client, vmname) +} + +func testRoleConfiguration(t *testing.T, client management.Client, role vm.Role, location string) { + createRoleConfiguration(t, client, role, location) + + deleteHostedService(t, client, role.RoleName) +} + +func createRoleConfiguration(t *testing.T, client management.Client, role vm.Role, location string) { + vmc := vm.NewClient(client) + hsc := hostedservice.NewClient(client) + vmname := role.RoleName + + if err := hsc.CreateHostedService(hostedservice.CreateHostedServiceParameters{ + ServiceName: vmname, Location: location, + Label: base64.StdEncoding.EncodeToString([]byte(vmname))}); err != nil { + t.Error(err) + } + + if err := Await(client, func() (management.OperationID, error) { + return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{}) + }); err != nil { + t.Error(err) + } +} + +func deleteHostedService(t *testing.T, client management.Client, vmname string) { + t.Logf("Deleting hosted service: %s", vmname) + if err := Await(client, func() (management.OperationID, error) { + return hostedservice.NewClient(client).DeleteHostedService(vmname, true) + }); err != nil { + t.Error(err) + } +} + +// === utility funcs === + +func GetTestStorageAccount(t *testing.T, client management.Client) storage.StorageServiceResponse { + t.Log("Retrieving storage account") + sc := storage.NewClient(client) + var sa storage.StorageServiceResponse + ssl, err := sc.ListStorageServices() + if err != nil { + t.Fatal(err) + } + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + if len(ssl.StorageServices) == 0 { + t.Log("No storage accounts found, creating a new one") + lc := location.NewClient(client) + ll, err := lc.ListLocations() + if err != nil { + t.Fatal(err) + } + loc := ll.Locations[rnd.Intn(len(ll.Locations))].Name + + t.Logf("Location for new storage account: %s", loc) + name := GenerateName() + op, err := sc.CreateStorageService(storage.StorageAccountCreateParameters{ + ServiceName: name, + Label: base64.StdEncoding.EncodeToString([]byte(name)), + Location: loc, + AccountType: storage.AccountTypeStandardLRS}) + if err != nil { + t.Fatal(err) + } + if err := client.WaitForOperation(op, nil); err != nil { + t.Fatal(err) + } + sa, err = sc.GetStorageService(name) + } else { + + sa = ssl.StorageServices[rnd.Intn(len(ssl.StorageServices))] + } + + t.Logf("Selected storage account '%s' in location '%s'", + sa.ServiceName, sa.StorageServiceProperties.Location) + + return sa +} + +func GetLinuxTestImage(t *testing.T, client management.Client) osimage.OSImage { + return GetOSImage(t, client, func(im osimage.OSImage) bool { + return im.Category == "Public" && im.ImageFamily == "Ubuntu Server 14.04 LTS" + }) +} + +func GetWindowsTestImage(t *testing.T, client management.Client) osimage.OSImage { + return GetOSImage(t, client, func(im osimage.OSImage) bool { + return im.Category == "Public" && im.ImageFamily == "Windows Server 2012 R2 Datacenter" + }) +} + +func GetUserOSImage(t *testing.T, client management.Client, name string) osimage.OSImage { + return GetOSImage(t, client, func(im osimage.OSImage) bool { + return im.Category == "User" && im.Name == name + }) +} + +func GetOSImage( + t *testing.T, + client management.Client, + filter func(osimage.OSImage) bool) osimage.OSImage { + t.Log("Selecting OS image") + osc := osimage.NewClient(client) + allimages, err := osc.ListOSImages() + if err != nil { + t.Fatal(err) + } + filtered := []osimage.OSImage{} + for _, im := range allimages.OSImages { + if filter(im) { + filtered = append(filtered, im) + } + } + if len(filtered) == 0 { + t.Fatal("Filter too restrictive, no images left?") + } + + image := filtered[0] + for _, im := range filtered { + if im.PublishedDate > image.PublishedDate { + image = im + } + } + + t.Logf("Selecting image '%s'", image.Name) + return image +} + +func GetUserVMImage(t *testing.T, client management.Client, name string) vmimage.VMImage { + return GetVMImage(t, client, func(im vmimage.VMImage) bool { + return im.Category == "User" && im.Name == name + }) +} + +func GetVMImage( + t *testing.T, + client management.Client, + filter func(vmimage.VMImage) bool) vmimage.VMImage { + t.Log("Selecting VM image") + allimages, err := vmimage.NewClient(client).ListVirtualMachineImages(vmimage.ListParameters{}) + if err != nil { + t.Fatal(err) + } + filtered := []vmimage.VMImage{} + for _, im := range allimages.VMImages { + if filter(im) { + filtered = append(filtered, im) + } + } + if len(filtered) == 0 { + t.Fatal("Filter too restrictive, no images left?") + } + + image := filtered[0] + for _, im := range filtered { + if im.PublishedDate > image.PublishedDate { + image = im + } + } + + t.Logf("Selecting image '%s'", image.Name) + return image +} + +func GenerateName() string { + from := "1234567890abcdefghijklmnopqrstuvwxyz" + return "sdk" + GenerateString(12, from) +} + +func GeneratePassword() string { + pw := GenerateString(20, "1234567890") + + GenerateString(20, "abcdefghijklmnopqrstuvwxyz") + + GenerateString(20, "ABCDEFGHIJKLMNOPQRSTUVWXYZ") + + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + i := rnd.Intn(len(pw)-2) + 1 + + pw = string(append([]uint8(pw[i:]), pw[:i-1]...)) + + return pw +} + +func GenerateString(length int, from string) string { + str := "" + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + for len(str) < length { + str += string(from[rnd.Intn(len(from))]) + } + return str +} + +type asyncFunc func() (operationId management.OperationID, err error) + +func Await(client management.Client, async asyncFunc) error { + requestID, err := async() + if err != nil { + return err + } + return client.WaitForOperation(requestID, nil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/network.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/network.go new file mode 100644 index 000000000..bf2331c7b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/network.go @@ -0,0 +1,83 @@ +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// ConfigureWithPublicSSH adds configuration exposing port 22 externally +func ConfigureWithPublicSSH(role *vm.Role) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + return ConfigureWithExternalPort(role, "SSH", 22, 22, vm.InputEndpointProtocolTCP) +} + +// ConfigureWithPublicRDP adds configuration exposing port 3389 externally +func ConfigureWithPublicRDP(role *vm.Role) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + return ConfigureWithExternalPort(role, "RDP", 3389, 3389, vm.InputEndpointProtocolTCP) +} + +// ConfigureWithPublicPowerShell adds configuration exposing port 5986 +// externally +func ConfigureWithPublicPowerShell(role *vm.Role) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + return ConfigureWithExternalPort(role, "PowerShell", 5986, 5986, vm.InputEndpointProtocolTCP) +} + +// ConfigureWithExternalPort adds a new InputEndpoint to the Role, exposing a +// port externally +func ConfigureWithExternalPort(role *vm.Role, name string, localport, externalport int, protocol vm.InputEndpointProtocol) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeNetwork, + func(config *vm.ConfigurationSet) { + config.InputEndpoints = append(config.InputEndpoints, vm.InputEndpoint{ + LocalPort: localport, + Name: name, + Port: externalport, + Protocol: protocol, + }) + }) + + return nil +} + +// ConfigureWithSecurityGroup associates the Role with a specific network security group +func ConfigureWithSecurityGroup(role *vm.Role, networkSecurityGroup string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeNetwork, + func(config *vm.ConfigurationSet) { + config.NetworkSecurityGroup = networkSecurityGroup + }) + + return nil +} + +// ConfigureWithSubnet associates the Role with a specific subnet +func ConfigureWithSubnet(role *vm.Role, subnet string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeNetwork, + func(config *vm.ConfigurationSet) { + config.SubnetNames = append(config.SubnetNames, subnet) + }) + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolesize.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolesize.go new file mode 100644 index 000000000..bedd702fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolesize.go @@ -0,0 +1,76 @@ +package vmutils + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" + lc "github.com/Azure/azure-sdk-for-go/management/location" + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// IsRoleSizeValid retrieves the available rolesizes using +// vmclient.GetRoleSizeList() and returns whether that the provided roleSizeName +// is part of that list +func IsRoleSizeValid(vmclient vm.VirtualMachineClient, roleSizeName string) (bool, error) { + if roleSizeName == "" { + return false, fmt.Errorf(errParamNotSpecified, "roleSizeName") + } + + roleSizeList, err := vmclient.GetRoleSizeList() + if err != nil { + return false, err + } + + for _, roleSize := range roleSizeList.RoleSizes { + if roleSize.Name == roleSizeName { + return true, nil + } + } + + return false, nil +} + +// IsRoleSizeAvailableInLocation retrieves all available sizes in the specified +// location and returns whether that the provided roleSizeName is part of that list. +func IsRoleSizeAvailableInLocation(managementclient management.Client, location, roleSizeName string) (bool, error) { + if location == "" { + return false, fmt.Errorf(errParamNotSpecified, "location") + } + if roleSizeName == "" { + return false, fmt.Errorf(errParamNotSpecified, "roleSizeName") + } + + locationClient := lc.NewClient(managementclient) + locationInfo, err := getLocation(locationClient, location) + if err != nil { + return false, err + } + + for _, availableRoleSize := range locationInfo.VirtualMachineRoleSizes { + if availableRoleSize == roleSizeName { + return true, nil + } + } + + return false, nil +} + +func getLocation(c lc.LocationClient, location string) (*lc.Location, error) { + if location == "" { + return nil, fmt.Errorf(errParamNotSpecified, "location") + } + + locations, err := c.ListLocations() + if err != nil { + return nil, err + } + + for _, existingLocation := range locations.Locations { + if existingLocation.Name != location { + continue + } + + return &existingLocation, nil + } + return nil, fmt.Errorf("Invalid location: %s. Available locations: %s", location, locations) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go new file mode 100644 index 000000000..e3f21285c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go @@ -0,0 +1,58 @@ +package vmutils + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/management" + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// WaitForDeploymentPowerState blocks until all role instances in deployment +// reach desired power state. +func WaitForDeploymentPowerState(client management.Client, cloudServiceName, deploymentName string, desiredPowerstate vm.PowerState) error { + for { + deployment, err := vm.NewClient(client).GetDeployment(cloudServiceName, deploymentName) + if err != nil { + return err + } + if allInstancesInPowerState(deployment.RoleInstanceList, desiredPowerstate) { + return nil + } + time.Sleep(2 * time.Second) + } +} + +func allInstancesInPowerState(instances []vm.RoleInstance, desiredPowerstate vm.PowerState) bool { + for _, r := range instances { + if r.PowerState != desiredPowerstate { + return false + } + } + + return true +} + +// WaitForDeploymentInstanceStatus blocks until all role instances in deployment +// reach desired InstanceStatus. +func WaitForDeploymentInstanceStatus(client management.Client, cloudServiceName, deploymentName string, desiredInstanceStatus vm.InstanceStatus) error { + for { + deployment, err := vm.NewClient(client).GetDeployment(cloudServiceName, deploymentName) + if err != nil { + return err + } + if allInstancesInInstanceStatus(deployment.RoleInstanceList, desiredInstanceStatus) { + return nil + } + time.Sleep(2 * time.Second) + } +} + +func allInstancesInInstanceStatus(instances []vm.RoleInstance, desiredInstancestatus vm.InstanceStatus) bool { + for _, r := range instances { + if r.InstanceStatus != desiredInstancestatus { + return false + } + } + + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go new file mode 100644 index 000000000..7643699df --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go @@ -0,0 +1,148 @@ +// Package vmutils provides convenience methods for creating Virtual +// Machine Role configurations. +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +const ( + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewVMConfiguration creates configuration for a new virtual machine Role. +func NewVMConfiguration(name string, roleSize string) vm.Role { + return vm.Role{ + RoleName: name, + RoleType: "PersistentVMRole", + RoleSize: roleSize, + ProvisionGuestAgent: true, + } +} + +// ConfigureForLinux adds configuration for when deploying a generalized Linux +// image. If "password" is left empty, SSH password security will be disabled by +// default. Certificates with SSH public keys should already be uploaded to the +// cloud service where the VM will be deployed and referenced here only by their +// thumbprint. +func ConfigureForLinux(role *vm.Role, hostname, user, password string, sshPubkeyCertificateThumbprint ...string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeLinuxProvisioning, + func(config *vm.ConfigurationSet) { + config.HostName = hostname + config.UserName = user + config.UserPassword = password + if password != "" { + config.DisableSSHPasswordAuthentication = "false" + } + if len(sshPubkeyCertificateThumbprint) != 0 { + config.SSH = &vm.SSH{} + for _, k := range sshPubkeyCertificateThumbprint { + config.SSH.PublicKeys = append(config.SSH.PublicKeys, + vm.PublicKey{ + Fingerprint: k, + Path: "/home/" + user + "/.ssh/authorized_keys", + }, + ) + } + } + }, + ) + + return nil +} + +// ConfigureForWindows adds configuration for when deploying a generalized +// Windows image. timeZone can be left empty. For a complete list of supported +// time zone entries, you can either refer to the values listed in the registry +// entry "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time +// Zones" or you can use the tzutil command-line tool to list the valid time. +func ConfigureForWindows(role *vm.Role, hostname, user, password string, enableAutomaticUpdates bool, timeZone string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeWindowsProvisioning, + func(config *vm.ConfigurationSet) { + config.ComputerName = hostname + config.AdminUsername = user + config.AdminPassword = password + config.EnableAutomaticUpdates = enableAutomaticUpdates + config.TimeZone = timeZone + }, + ) + + return nil +} + +// ConfigureWindowsToJoinDomain adds configuration to join a new Windows vm to a +// domain. "username" must be in UPN form (user@domain.com), "machineOU" can be +// left empty +func ConfigureWindowsToJoinDomain(role *vm.Role, username, password, domainToJoin, machineOU string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + winconfig := findConfig(role.ConfigurationSets, vm.ConfigurationSetTypeWindowsProvisioning) + if winconfig != nil { + winconfig.DomainJoin = &vm.DomainJoin{ + Credentials: vm.Credentials{Username: username, Password: password}, + JoinDomain: domainToJoin, + MachineObjectOU: machineOU, + } + } + + return nil +} + +func ConfigureWinRMListener(role *vm.Role, protocol vm.WinRMProtocol, certificateThumbprint string) error { + + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + winconfig := findConfig(role.ConfigurationSets, vm.ConfigurationSetTypeWindowsProvisioning) + + if winconfig != nil { + + listener := vm.WinRMListener{ + Protocol: protocol, + CertificateThumbprint: certificateThumbprint, + } + + if winconfig.WinRMListeners == nil { + winconfig.WinRMListeners = &[]vm.WinRMListener{} + } + + currentListeners := *winconfig.WinRMListeners + + // replace existing listener if it's already configured + for i, existingListener := range currentListeners { + if existingListener.Protocol == protocol { + currentListeners[i] = listener + return nil + } + } + + // otherwise append to list of listeners + newListeners := append(currentListeners, listener) + winconfig.WinRMListeners = &newListeners + + return nil + } + + return fmt.Errorf("WindowsProvisioningConfigurationSet not found in 'role'") +} + +func ConfigureWinRMOverHTTP(role *vm.Role) error { + return ConfigureWinRMListener(role, vm.WinRMProtocolHTTP, "") +} + +func ConfigureWinRMOverHTTPS(role *vm.Role, certificateThumbprint string) error { + return ConfigureWinRMListener(role, vm.WinRMProtocolHTTPS, certificateThumbprint) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go new file mode 100644 index 000000000..675594020 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go @@ -0,0 +1,440 @@ +package vmutils + +import ( + "encoding/xml" + "testing" + + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +func TestNewLinuxVmRemoteImage(t *testing.T) { + role := NewVMConfiguration("myvm", "Standard_D3") + ConfigureDeploymentFromRemoteImage(&role, + "http://remote.host/some.vhd?sv=12&sig=ukhfiuwef78687", "Linux", + "myvm-os-disk", "http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd", + "OSDisk") + ConfigureForLinux(&role, "myvm", "azureuser", "P@ssword", "2398yyKJGd78e2389ydfncuirowebhf89yh3IUOBY") + ConfigureWithPublicSSH(&role) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + myvm + PersistentVMRole + + + LinuxProvisioningConfiguration + + myvm + azureuser + P@ssword + false + + + + 2398yyKJGd78e2389ydfncuirowebhf89yh3IUOBY + /home/azureuser/.ssh/authorized_keys + + + + + + + + + + NetworkConfiguration + + + + 22 + SSH + 22 + TCP + + + + + + + + + OSDisk + myvm-os-disk + http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd + Linux + http://remote.host/some.vhd?sv=12&sig=ukhfiuwef78687 + + Standard_D3 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestNewLinuxVmPlatformImage(t *testing.T) { + role := NewVMConfiguration("myplatformvm", "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB", + "http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd", "mydisklabel") + ConfigureForLinux(&role, "myvm", "azureuser", "", "2398yyKJGd78e2389ydfncuirdebhf89yh3IUOBY") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + myplatformvm + PersistentVMRole + + + LinuxProvisioningConfiguration + + myvm + azureuser + + + + 2398yyKJGd78e2389ydfncuirdebhf89yh3IUOBY + /home/azureuser/.ssh/authorized_keys + + + + + + + + + + + + http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd + b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB + + Standard_D3 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestNewVmFromVMImage(t *testing.T) { + role := NewVMConfiguration("restoredbackup", "Standard_D1") + ConfigureDeploymentFromPublishedVMImage(&role, "myvm-backup-20150209", + "http://mystorageacct.blob.core.windows.net/vhds/myoldnewvm.vhd", false) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + restoredbackup + PersistentVMRole + + myvm-backup-20150209 + http://mystorageacct.blob.core.windows.net/vhds/myoldnewvm.vhd + + Standard_D1 +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestNewVmFromExistingDisk(t *testing.T) { + role := NewVMConfiguration("blobvm", "Standard_D14") + ConfigureDeploymentFromExistingOSDisk(&role, "myvm-backup-20150209", "OSDisk") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWindowsToJoinDomain(&role, "user@domain.com", "youReN3verG0nnaGu3ss", "redmond.corp.contoso.com", "") + ConfigureWithNewDataDisk(&role, "my-brand-new-disk", "http://account.blob.core.windows.net/vhds/newdatadisk.vhd", + 30, vmdisk.HostCachingTypeReadWrite) + ConfigureWithExistingDataDisk(&role, "data-disk", vmdisk.HostCachingTypeReadOnly) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + blobvm + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + user@domain.com + youReN3verG0nnaGu3ss + + redmond.corp.contoso.com + + + azuser + + + + + + + + ReadWrite + my-brand-new-disk + 30 + http://account.blob.core.windows.net/vhds/newdatadisk.vhd + + + ReadOnly + data-disk + 1 + + + + OSDisk + myvm-backup-20150209 + + Standard_D14 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestWinRMOverHttps(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTPS(&role, "abcdef") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Https + abcdef + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestWinRMOverHttpsWithNoThumbprint(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTPS(&role, "") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Https + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestWinRMOverHttp(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTP(&role) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Http + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestSettingWinRMOverHttpTwice(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTP(&role) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Http + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestSettingWinRMOverHttpAndHttpsTwice(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTPS(&role, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTPS(&role, "abcdef") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Http + + + Https + abcdef + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go new file mode 100644 index 000000000..da1d816fc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -0,0 +1,959 @@ +package storage + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client +} + +// A Container is an entry in ContainerListResponse. +type Container struct { + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + // TODO (ahmetalpbalkan) remaining fields +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// A Blob is an entry in BlobListResponse. +type Blob struct { + Name string `xml:"Name"` + Properties BlobProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// BlobProperties contains various properties of a blob +// returned in various endpoints like ListBlobs or GetBlobProperties. +type BlobProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type"` + ContentEncoding string `xml:"Content-Encoding"` + BlobType BlobType `xml:"x-ms-blob-blob-type"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime string `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// BlobType defines the type of the Azure Blob. +type BlobType string + +// Types of page blobs +const ( + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" +) + +// PageWriteType defines the type updates that are going to be +// done on the page blob. +type PageWriteType string + +// Types of operations on page blobs +const ( + PageWriteTypeUpdate PageWriteType = "update" + PageWriteTypeClear PageWriteType = "clear" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 4 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// GetPageRangesResponse contains the reponse fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CreateContainer creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx +func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { + resp, err := b.createContainer(name, access) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateContainerIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { + resp, err := b.createContainer(name, access) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { + verb := "PUT" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + headers["Content-Length"] = "0" + if access != "" { + headers["x-ms-blob-public-access"] = string(access) + } + return b.client.exec(verb, uri, headers, nil) +} + +// ContainerExists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (b BlobStorageClient) ContainerExists(name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// DeleteContainer deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainer(name string) error { + resp, err := b.deleteContainer(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteContainerIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { + resp, err := b.deleteContainer(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + return b.client.exec(verb, uri, headers, nil) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) + headers := b.client.getStandardHeaders() + + var out BlobListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// BlobExists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetBlobURL gets the canonical URL to the blob with the specified name in the +// specified container. This method does not create a publicly accessible URL if +// the blob or container is private and this method does not check if the blob +// exists. +func (b BlobStorageClient) GetBlobURL(container, name string) string { + if container == "" { + container = "$root" + } + return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +} + +// GetBlob returns a stream to read the blob. Caller must call Close() the +// reader to close on the underlying connection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, "") + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + return resp.body, nil +} + +// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, bytesRange) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { + return nil, err + } + return resp.body, nil +} + +func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) { + verb := "GET" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + if bytesRange != "" { + headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) + } + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + return resp, err +} + +// GetBlobProperties provides various information about the specified +// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var contentLength int64 + contentLengthStr := resp.headers.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return nil, err + } + } + + var sequenceNum int64 + sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + if sequenceNumStr != "" { + sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) + if err != nil { + return nil, err + } + } + + return &BlobProperties{ + LastModified: resp.headers.Get("Last-Modified"), + Etag: resp.headers.Get("Etag"), + ContentMD5: resp.headers.Get("Content-MD5"), + ContentLength: contentLength, + ContentEncoding: resp.headers.Get("Content-Encoding"), + SequenceNumber: sequenceNum, + CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), + CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), + CopyID: resp.headers.Get("x-ms-copy-id"), + CopyProgress: resp.headers.Get("x-ms-copy-progress"), + CopySource: resp.headers.Get("x-ms-copy-source"), + CopyStatus: resp.headers.Get("x-ms-copy-status"), + BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), + }, nil +} + +// SetBlobMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string) error { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + headers["Content-Length"] = "0" + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetBlobMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + metadata := make(map[string]string) + for k, v := range resp.headers { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata, nil +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlob(container, name string) error { + return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// The API rejects requests with size > 64 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%d", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { + return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx +func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { + blockListXML := prepareBlockListRequest(blocks) + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) + headers := b.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + + resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { + params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + var out BlockListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) + headers["Content-Length"] = fmt.Sprintf("%v", 0) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutPage writes a range of pages to a page blob or clears the given range. +// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned +// with 512-byte boundaries and chunk must be of size multiplies by 512. +// +// See https://msdn.microsoft.com/en-us/library/ee691975.aspx +func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = string(writeType) + headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) + + var contentLength int64 + var data io.Reader + if writeType == PageWriteTypeClear { + contentLength = 0 + data = bytes.NewReader([]byte{}) + } else { + contentLength = int64(len(chunk)) + data = bytes.NewReader(chunk) + } + headers["Content-Length"] = fmt.Sprintf("%v", contentLength) + + resp, err := b.client.exec("PUT", uri, headers, data) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) + headers := b.client.getStandardHeaders() + + var out GetPageRangesResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CopyBlob starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx +func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { + copyID, err := b.startBlobCopy(container, name, sourceBlob) + if err != nil { + return err + } + + return b.waitForBlobCopy(container, name, copyID) +} + +func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + headers["Content-Length"] = "0" + headers["x-ms-copy-source"] = sourceBlob + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return "", err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error { + for { + props, err := b.GetBlobProperties(container, name) + if err != nil { + return err + } + + if props.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch props.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) + } + } +} + +// DeleteBlob deletes the given blob from the specified container. +// If the blob does not exists at the time of the Delete Blob operation, it +// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlob(container, name string) error { + resp, err := b.deleteBlob(container, name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteBlobIfExists deletes the given blob from the specified container If the +// blob is deleted with this call, returns true. Otherwise returns false. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) { + resp, err := b.deleteBlob(container, name) + if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusAccepted, nil + } + defer resp.body.Close() + return false, err +} + +func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + + return b.client.exec(verb, uri, headers, nil) +} + +// helper method to construct the path to a container given its name +func pathForContainer(name string) string { + return fmt.Sprintf("/%s", name) +} + +// helper method to construct the path to a blob given its container and blob +// name +func pathForBlob(container, name string) string { + return fmt.Sprintf("/%s/%s", container, name) +} + +// GetBlobSASURI creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { + var ( + signedPermissions = permissions + blobURL = b.GetBlobURL(container, name) + ) + canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) + if err != nil { + return "", err + } + signedExpiry := expiry.Format(time.RFC3339) + signedResource := "b" + + stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions) + if err != nil { + return "", err + } + + sig := b.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {b.client.apiVersion}, + "se": {signedExpiry}, + "sr": {signedResource}, + "sp": {signedPermissions}, + "sig": {sig}, + } + + sasURL, err := url.Parse(blobURL) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { + var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go new file mode 100644 index 000000000..df3156417 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go @@ -0,0 +1,715 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "sync" + "testing" + "time" + + chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +type StorageBlobSuite struct{} + +var _ = chk.Suite(&StorageBlobSuite{}) + +const testContainerPrefix = "zzzztest-" + +func getBlobClient(c *chk.C) BlobStorageClient { + return getBasicClient(c).GetBlobService() +} + +func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) { + c.Assert(pathForContainer("foo"), chk.Equals, "/foo") +} + +func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) { + c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob") +} + +func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) { + _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP") + c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15 + + out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP") + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n") +} + +func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) { + api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "container/name", + RawQuery: url.Values{ + "sv": {"2013-08-15"}, + "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, + "sr": {"b"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := cli.GetBlobSASURI("container", "name", expiry, "r") + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + blob := randString(20) + body := []byte(randString(100)) + expiry := time.Now().UTC().Add(time.Hour) + permissions := "r" + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil) + + sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions) + c.Assert(err, chk.IsNil) + + resp, err := http.Get(sasURI) + c.Assert(err, chk.IsNil) + + blobResp, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + c.Assert(err, chk.IsNil) + + c.Assert(resp.StatusCode, chk.Equals, http.StatusOK) + c.Assert(len(blobResp), chk.Equals, len(body)) +} + +func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) { + cli := getBlobClient(c) + c.Assert(deleteTestContainers(cli), chk.IsNil) + + const n = 5 + const pageSize = 2 + + // Create test containers + created := []string{} + for i := 0; i < n; i++ { + name := randContainer() + c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil) + created = append(created, name) + } + sort.Strings(created) + + // Defer test container deletions + defer func() { + var wg sync.WaitGroup + for _, cnt := range created { + wg.Add(1) + go func(name string) { + c.Assert(cli.DeleteContainer(name), chk.IsNil) + wg.Done() + }(cnt) + } + wg.Wait() + }() + + // Paginate results + seen := []string{} + marker := "" + for { + resp, err := cli.ListContainers(ListContainersParameters{ + Prefix: testContainerPrefix, + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + containers := resp.Containers + if len(containers) > pageSize { + c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers)) + } + + for _, c := range containers { + seen = append(seen, c.Name) + } + + marker = resp.NextMarker + if marker == "" || len(containers) == 0 { + break + } + } + + c.Assert(seen, chk.DeepEquals, created) +} + +func (s *StorageBlobSuite) TestContainerExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + ok, err := cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + ok, err = cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestCreateContainerDeleteContainer(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + c.Assert(cli.DeleteContainer(cnt), chk.IsNil) +} + +func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + defer cli.DeleteContainer(cnt) + + // First create + ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + + // Nonexisting container + c.Assert(cli.DeleteContainer(cnt), chk.NotNil) + + ok, err := cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // Existing container + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + ok, err = cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestBlobExists(c *chk.C) { + cnt := randContainer() + blob := randString(20) + cli := getBlobClient(c) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil) + defer cli.DeleteBlob(cnt, blob) + + ok, err := cli.BlobExists(cnt, blob+".foo") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + ok, err = cli.BlobExists(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) { + api, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + + c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob") + c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob") + c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob") +} + +func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + cnt := randContainer() + src := randString(20) + dst := randString(20) + body := []byte(randString(1024)) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) + defer cli.DeleteBlob(cnt, src) + + c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil) + defer cli.DeleteBlob(cnt, dst) + + blobBody, err := cli.GetBlob(cnt, dst) + c.Assert(err, chk.IsNil) + + b, err := ioutil.ReadAll(blobBody) + defer blobBody.Close() + c.Assert(err, chk.IsNil) + c.Assert(b, chk.DeepEquals, body) +} + +func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) { + cnt := randContainer() + blob := randString(20) + + cli := getBlobClient(c) + c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil) + + ok, err := cli.DeleteBlobIfExists(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) { + cnt := randContainer() + blob := randString(20) + contents := randString(64) + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + // Nonexisting blob + _, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.NotNil) + + // Put the blob + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil) + + // Get blob properties + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + c.Assert(props.ContentLength, chk.Equals, int64(len(contents))) + c.Assert(props.BlobType, chk.Equals, BlobTypeBlock) +} + +func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + blobs := []string{} + const n = 5 + const pageSize = 2 + for i := 0; i < n; i++ { + name := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) + blobs = append(blobs, name) + } + sort.Strings(blobs) + + // Paginate + seen := []string{} + marker := "" + for { + resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + for _, v := range resp.Blobs { + seen = append(seen, v.Name) + } + + marker = resp.NextMarker + if marker == "" || len(resp.Blobs) == 0 { + break + } + } + + // Compare + c.Assert(seen, chk.DeepEquals, blobs) +} + +func (s *StorageBlobSuite) TestGetAndSetMetadata(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + m, err := cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(m, chk.Not(chk.Equals), nil) + c.Assert(len(m), chk.Equals, 0) + + mPut := map[string]string{ + "foo": "bar", + "bar_baz": "waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPut) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mPut) + + // Case munging + + mPutUpper := map[string]string{ + "Foo": "different bar", + "bar_BAZ": "different waz qux", + } + mExpectLower := map[string]string{ + "foo": "different bar", + "bar_baz": "different waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPutUpper) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mExpectLower) +} + +func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Not(chk.Equals), 0) +} + +func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) { + cnt := randContainer() + blob := randString(20) + body := "0123456789" + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil) + defer cli.DeleteBlob(cnt, blob) + + // Read 1-3 + for _, r := range []struct { + rangeStr string + expected string + }{ + {"0-", body}, + {"1-3", body[1 : 3+1]}, + {"3-", body[3:]}, + } { + resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr) + c.Assert(err, chk.IsNil) + blobBody, err := ioutil.ReadAll(resp) + c.Assert(err, chk.IsNil) + + str := string(blobBody) + c.Assert(str, chk.Equals, r.expected) + } +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReader(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randString(20) + data := randBytes(8888) + c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data), nil), chk.IsNil) + + body, err := cli.GetBlob(cnt, name) + c.Assert(err, chk.IsNil) + gotData, err := ioutil.ReadAll(body) + body.Close() + + c.Assert(err, chk.IsNil) + c.Assert(gotData, chk.DeepEquals, data) +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReaderWithShortData(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randString(20) + data := randBytes(8888) + err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data), nil) + c.Assert(err, chk.Not(chk.IsNil)) + + _, err = cli.GetBlob(cnt, name) + // Upload was incomplete: blob should not have been created. + c.Assert(err, chk.Not(chk.IsNil)) +} + +func (s *StorageBlobSuite) TestPutBlock(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) +} + +func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + + // Put one block + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) + defer cli.deleteBlob(cnt, blob) + + // Get committed blocks + committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted) + c.Assert(err, chk.IsNil) + + if len(committed.CommittedBlocks) > 0 { + c.Fatal("There are committed blocks") + } + + // Get uncommitted blocks + uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted) + c.Assert(err, chk.IsNil) + + c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1) + // Commit block list + c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil) + + // Get all blocks + all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(all.CommittedBlocks), chk.Equals, 1) + c.Assert(len(all.UncommittedBlocks), chk.Equals, 0) + + // Verify the block + thatBlock := all.CommittedBlocks[0] + c.Assert(thatBlock.Name, chk.Equals, blockID) + c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk))) +} + +func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) + + // Verify + blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0) + c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0) +} + +func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Verify + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, size) + c.Assert(props.BlobType, chk.Equals, BlobTypePage) +} + +func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append chunks + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() + + // Overwrite first half of chunk1 + chunk0 := []byte(randString(512)) + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...)) +} + +func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Put 0-2047 + chunk := []byte(randString(2048)) + c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil) + + // Clear 512-1023 + c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, "0-2047") + c.Assert(err, chk.IsNil) + contents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + defer out.Close() + c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...)) +} + +func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Get page ranges on empty blob + out, err := cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 0) + + // Add 0-512 page + c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 1) + + // Add 1024-2048 + c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 2) +} + +func deleteTestContainers(cli BlobStorageClient) error { + for { + resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix}) + if err != nil { + return err + } + if len(resp.Containers) == 0 { + break + } + for _, c := range resp.Containers { + err = cli.DeleteContainer(c.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error { + if len(chunk) > MaxBlobBlockSize { + return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize) + } + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +func randContainer() string { + return testContainerPrefix + randString(32-len(testContainerPrefix)) +} + +func randString(n int) string { + if n <= 0 { + panic("negative number") + } + const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return string(bytes) +} + +func randBytes(n int) []byte { + data := make([]byte, n) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + panic(err) + } + return data +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go new file mode 100644 index 000000000..0305e90d8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -0,0 +1,388 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" +) + +const ( + // DefaultBaseURL is the domain name used for storage requests when a + // default client is created. + DefaultBaseURL = "core.windows.net" + + // DefaultAPIVersion is the Azure Storage API version string used when a + // basic client is created. + DefaultAPIVersion = "2014-02-14" + + defaultUseHTTPS = true + + blobServiceName = "blob" + tableServiceName = "table" + queueServiceName = "queue" + fileServiceName = "file" +) + +// Client is the object that needs to be constructed to perform +// operations on the storage account. +type Client struct { + accountName string + accountKey []byte + useHTTPS bool + baseURL string + apiVersion string +} + +type storageResponse struct { + statusCode int + headers http.Header + body io.ReadCloser +} + +// AzureStorageServiceError contains fields of the error response from +// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx +// Some fields might be specific to certain calls. +type AzureStorageServiceError struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` + QueryParameterName string `xml:"QueryParameterName"` + QueryParameterValue string `xml:"QueryParameterValue"` + Reason string `xml:"Reason"` + StatusCode int + RequestID string +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int + got int +} + +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// NewBasicClient constructs a Client with given storage service name and +// key. +func NewBasicClient(accountName, accountKey string) (Client, error) { + return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} + +// NewClient constructs a Client. This should be used if the caller wants +// to specify whether to use HTTPS, a specific REST API version or a custom +// storage endpoint than Azure Public Cloud. +func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { + var c Client + if accountName == "" { + return c, fmt.Errorf("azure: account name required") + } else if accountKey == "" { + return c, fmt.Errorf("azure: account key required") + } else if blobServiceBaseURL == "" { + return c, fmt.Errorf("azure: base storage service url required") + } + + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return c, err + } + + return Client{ + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: blobServiceBaseURL, + apiVersion: apiVersion, + }, nil +} + +func (c Client) getBaseURL(service string) string { + scheme := "http" + if c.useHTTPS { + scheme = "https" + } + + host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + + u := &url.URL{ + Scheme: scheme, + Host: host} + return u.String() +} + +func (c Client) getEndpoint(service, path string, params url.Values) string { + u, err := url.Parse(c.getBaseURL(service)) + if err != nil { + // really should not be happening + panic(err) + } + + if path == "" { + path = "/" // API doesn't accept path segments not starting with '/' + } + + u.Path = path + u.RawQuery = params.Encode() + return u.String() +} + +// GetBlobService returns a BlobStorageClient which can operate on the blob +// service of the storage account. +func (c Client) GetBlobService() BlobStorageClient { + return BlobStorageClient{c} +} + +// GetQueueService returns a QueueServiceClient which can operate on the queue +// service of the storage account. +func (c Client) GetQueueService() QueueServiceClient { + return QueueServiceClient{c} +} + +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + return FileServiceClient{c} +} + +func (c Client) createAuthorizationHeader(canonicalizedString string) string { + signature := c.computeHmac256(canonicalizedString) + return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature) +} + +func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { + canonicalizedResource, err := c.buildCanonicalizedResource(url) + if err != nil { + return "", err + } + + canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) + return c.createAuthorizationHeader(canonicalizedString), nil +} + +func (c Client) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), + } +} + +func (c Client) buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + match, _ := regexp.MatchString("x-ms-", headerName) + if match { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := "" + + for i, key := range keys { + if i == len(keys)-1 { + ch += fmt.Sprintf("%s:%s", key, cm[key]) + } else { + ch += fmt.Sprintf("%s:%s\n", key, cm[key]) + } + } + return ch +} + +func (c Client) buildCanonicalizedResource(uri string) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.accountName + if len(u.Path) > 0 { + cr += u.Path + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + if len(params) > 0 { + cr += "\n" + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, key) + } + + sort.Strings(keys) + + for i, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + if i == len(keys)-1 { + cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) + } else { + cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) + } + } + } + return cr, nil +} + +func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { + canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + verb, + headers["Content-Encoding"], + headers["Content-Language"], + headers["Content-Length"], + headers["Content-MD5"], + headers["Content-Type"], + headers["Date"], + headers["If-Modified-Since"], + headers["If-Match"], + headers["If-None-Match"], + headers["If-Unmodified-Since"], + headers["Range"], + c.buildCanonicalizedHeader(headers), + canonicalizedResource) + + return canonicalizedString +} + +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { + authHeader, err := c.getAuthorizationHeader(verb, url, headers) + if err != nil { + return nil, err + } + headers["Authorization"] = authHeader + + if err != nil { + return nil, err + } + + req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + + if clstr, ok := headers["Content-Length"]; ok { + // content length header is being signed, but completely ignored by golang. + // instead we have to use the ContentLength property on the request struct + // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and + // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) + req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) + if err != nil { + return nil, err + } + } + for k, v := range headers { + req.Header.Add(k, v) + } + httpClient := http.Client{} + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) + } else { + // response contains storage service error object, unmarshal + storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) + if err != nil { // error unmarshaling the error response + err = errIn + } + err = storageErr + } + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ + }, err + } + + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: resp.Body}, nil +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { + var storageErr AzureStorageServiceError + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + return storageErr, nil +} + +func (e AzureStorageServiceError) Error() string { + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go new file mode 100644 index 000000000..0c75a13b1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go @@ -0,0 +1,156 @@ +package storage + +import ( + "encoding/base64" + "net/url" + "os" + "testing" + + chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +// Hook up gocheck to testing +func Test(t *testing.T) { chk.TestingT(t) } + +type StorageClientSuite struct{} + +var _ = chk.Suite(&StorageClientSuite{}) + +// getBasicClient returns a test client from storage credentials in the env +func getBasicClient(c *chk.C) Client { + name := os.Getenv("ACCOUNT_NAME") + if name == "" { + c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test") + } + key := os.Getenv("ACCOUNT_KEY") + if key == "" { + c.Fatal("ACCOUNT_KEY not set") + } + cli, err := NewBasicClient(name, key) + c.Assert(err, chk.IsNil) + return cli +} + +func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion) + c.Assert(err, chk.IsNil) + c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net") +} + +func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) { + apiVersion := "2015-01-01" // a non existing one + cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false) + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, apiVersion) + c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn") +} + +func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/") +} + +func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "path", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path") +} + +func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d") +} + +func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "path", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d") +} + +func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + headers := cli.getStandardHeaders() + c.Assert(len(headers), chk.Equals, 2) + c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion) + if _, ok := headers["x-ms-date"]; !ok { + c.Fatal("Missing date header") + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct{ url, expected string } + tests := []test{ + {"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"}, + {"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"}, + {"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"}, + } + + for _, i := range tests { + out, err := cli.buildCanonicalizedResource(i.url) + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct { + headers map[string]string + expected string + } + tests := []test{ + {map[string]string{}, ""}, + {map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{"foo:": "bar"}, ""}, + {map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{ + "x-ms-version": "9999-99-99", + "x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}} + + for _, i := range tests { + c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) { + // attempt to delete a nonexisting container + _, err := getBlobClient(c).deleteContainer(randContainer()) + c.Assert(err, chk.NotNil) + + v, ok := err.(AzureStorageServiceError) + c.Check(ok, chk.Equals, true) + c.Assert(v.StatusCode, chk.Equals, 404) + c.Assert(v.Code, chk.Equals, "ContainerNotFound") + c.Assert(v.Code, chk.Not(chk.Equals), "") +} + +func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) { + key := base64.StdEncoding.EncodeToString([]byte("bar")) + cli, err := NewBasicClient("foo", key) + c.Assert(err, chk.IsNil) + + canonicalizedString := `foobarzoo` + expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=` + c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go new file mode 100644 index 000000000..089d14493 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -0,0 +1,92 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client +} + +// pathForFileShare returns the URL path segment for a File Share resource +func pathForFileShare(name string) string { + return fmt.Sprintf("/%s", name) +} + +// CreateShare operation creates a new share under the specified account. If the +// share with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShare(name string) error { + resp, err := f.createShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateShareIfNotExists creates a new share under the specified account if +// it does not exist. Returns true if container is newly created or false if +// container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { + resp, err := f.createShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +// CreateShare creates a Azure File Share and returns its response +func (f FileServiceClient) createShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + headers := f.client.getStandardHeaders() + headers["Content-Length"] = "0" + return f.client.exec("PUT", uri, headers, nil) +} + +// DeleteShare operation marks the specified share for deletion. The share +// and any files contained within it are later deleted during garbage +// collection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShare(name string) error { + resp, err := f.deleteShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteShareIfExists operation marks the specified share for deletion if it +// exists. The share and any files contained within it are later deleted during +// garbage collection. Returns true if share existed and deleted with this call, +// false otherwise. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { + resp, err := f.deleteShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// deleteShare makes the call to Delete Share operation endpoint and returns +// the response +func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go new file mode 100644 index 000000000..399329030 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go @@ -0,0 +1,63 @@ +package storage + +import ( + chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +type StorageFileSuite struct{} + +var _ = chk.Suite(&StorageFileSuite{}) + +func getFileClient(c *chk.C) FileServiceClient { + return getBasicClient(c).GetFileService() +} + +func (s *StorageFileSuite) Test_pathForFileShare(c *chk.C) { + c.Assert(pathForFileShare("foo"), chk.Equals, "/foo") +} + +func (s *StorageFileSuite) TestCreateShareDeleteShare(c *chk.C) { + cli := getFileClient(c) + name := randShare() + c.Assert(cli.CreateShare(name), chk.IsNil) + c.Assert(cli.DeleteShare(name), chk.IsNil) +} + +func (s *StorageFileSuite) TestCreateShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + defer cli.DeleteShare(name) + + // First create + ok, err := cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageFileSuite) TestDeleteShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + + // delete non-existing share + ok, err := cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateShare(name), chk.IsNil) + + // delete existing share + ok, err = cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +const testSharePrefix = "zzzzztest" + +func randShare() string { + return testSharePrefix + randString(32-len(testSharePrefix)) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go new file mode 100644 index 000000000..015f1d568 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -0,0 +1,308 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" + userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" +) + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client +} + +func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } +func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } +func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} + +// PutMessageParameters is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageParameters struct { + VisibilityTimeout int + MessageTTL int +} + +func (p PutMessageParameters) getParameters() url.Values { + out := url.Values{} + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + if p.MessageTTL != 0 { + out.Set("messagettl", strconv.Itoa(p.MessageTTL)) + } + return out +} + +// GetMessagesParameters is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesParameters struct { + NumOfMessages int + VisibilityTimeout int +} + +func (p GetMessagesParameters) getParameters() url.Values { + out := url.Values{} + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + return out +} + +// PeekMessagesParameters is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesParameters struct { + NumOfMessages int +} + +func (p PeekMessagesParameters) getParameters() url.Values { + out := url.Values{"peekonly": {"true"}} // Required for peek operation + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + return out +} + +// GetMessagesResponse represents a response returned from Get Messages +// operation. +type GetMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` +} + +// GetMessageResponse represents a QueueMessage object returned from Get +// Messages operation response. +type GetMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + TimeNextVisible string `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// PeekMessagesResponse represents a response returned from Get Messages +// operation. +type PeekMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` +} + +// PeekMessageResponse represents a QueueMessage object returned from Peek +// Messages operation response. +type PeekMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// QueueMetadataResponse represents user defined metadata and queue +// properties on a specific queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +type QueueMetadataResponse struct { + ApproximateMessageCount int + UserDefinedMetadata map[string]string +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx +func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + headers["Content-Length"] = "0" + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { + qm := QueueMetadataResponse{} + qm.UserDefinedMetadata = make(map[string]string) + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("GET", uri, headers, nil) + if err != nil { + return qm, err + } + defer resp.body.Close() + + for k, v := range resp.headers { + if len(v) != 1 { + return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) + } + + value := v[0] + + if k == approximateMessagesCountHeader { + qm.ApproximateMessageCount, err = strconv.Atoi(value) + if err != nil { + return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) + } + } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { + name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) + qm.UserDefinedMetadata[strings.ToLower(name)] = value + } + } + + return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// CreateQueue operation creates a queue under the given account. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx +func (c QueueServiceClient) CreateQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + headers := c.client.getStandardHeaders() + headers["Content-Length"] = "0" + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// DeleteQueue operation permanently deletes the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx +func (c QueueServiceClient) DeleteQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// QueueExists returns true if a queue with given name exists. +func (c QueueServiceClient) QueueExists(name string) (bool, error) { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusOK, nil + } + + return false, err +} + +// PutMessage operation adds a new message to the back of the message queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx +func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + req := putMessageRequest{MessageText: message} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers := c.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(nn) + resp, err := c.client.exec("POST", uri, headers, body) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// ClearMessages operation deletes all messages from the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx +func (c QueueServiceClient) ClearMessages(queue string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx +func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { + var r GetMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx +func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { + var r PeekMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// DeleteMessage operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { + uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ + "popreceipt": {popReceipt}}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go new file mode 100644 index 000000000..19f809670 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go @@ -0,0 +1,132 @@ +package storage + +import ( + "time" + + chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +type StorageQueueSuite struct{} + +var _ = chk.Suite(&StorageQueueSuite{}) + +func getQueueClient(c *chk.C) QueueServiceClient { + return getBasicClient(c).GetQueueService() +} + +func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) { + c.Assert(pathForQueue("q"), chk.Equals, "/q") +} + +func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) { + c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages") +} + +func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) { + c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m") +} + +func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + c.Assert(cli.DeleteQueue(name), chk.IsNil) +} + +func (s *StorageQueueSuite) Test_GetMetadata_GetApproximateCount(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 0) + + for ix := 0; ix < 3; ix++ { + err = cli.PutMessage(name, "foobar", PutMessageParameters{}) + c.Assert(err, chk.IsNil) + } + time.Sleep(1 * time.Second) + + qm, err = cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 3) +} + +func (s *StorageQueueSuite) Test_SetMetadataGetMetadata_Roundtrips(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + metadata := make(map[string]string) + metadata["Foo1"] = "bar1" + metadata["fooBaz"] = "bar" + err := cli.SetMetadata(name, metadata) + c.Assert(err, chk.IsNil) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.UserDefinedMetadata["foo1"], chk.Equals, "bar1") + c.Assert(qm.UserDefinedMetadata["foobaz"], chk.Equals, "bar") +} + +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { + cli := getQueueClient(c) + ok, err := cli.QueueExists("nonexistent-queue") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + ok, err = cli.QueueExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + msg := randString(64 * 1024) // exercise max length + c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil) + r, err := cli.PeekMessages(q, PeekMessagesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg) +} + +func (s *StorageQueueSuite) TestGetMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + n := 4 + for i := 0; i < n; i++ { + c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil) + } + + r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, n) +} + +func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil) + r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + m := r.QueueMessagesList[0] + c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go new file mode 100644 index 000000000..33155af7f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -0,0 +1,71 @@ +package storage + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" +) + +func (c Client) computeHmac256(message string) string { + h := hmac.New(sha256.New, c.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func currentTimeRfc1123Formatted() string { + return timeRfc1123Formatted(time.Now().UTC()) +} + +func timeRfc1123Formatted(t time.Time) string { + return t.Format(http.TimeFormat) +} + +func mergeParams(v1, v2 url.Values) url.Values { + out := url.Values{} + for k, v := range v1 { + out[k] = v + } + for k, v := range v2 { + vals, ok := out[k] + if ok { + vals = append(vals, v...) + out[k] = vals + } else { + out[k] = v + } + } + return out +} + +func prepareBlockListRequest(blocks []Block) string { + s := `` + for _, v := range blocks { + s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) + } + s += `` + return s +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func xmlMarshal(v interface{}) (io.Reader, int, error) { + b, err := xml.Marshal(v) + if err != nil { + return nil, 0, err + } + return bytes.NewReader(b), len(b), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go new file mode 100644 index 000000000..9acbe5537 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go @@ -0,0 +1,69 @@ +package storage + +import ( + "encoding/xml" + "io/ioutil" + "net/url" + "strings" + "time" + + chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) { + now := time.Now().UTC() + expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT" + c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout)) +} + +func (s *StorageClientSuite) Test_mergeParams(c *chk.C) { + v1 := url.Values{ + "k1": {"v1"}, + "k2": {"v2"}} + v2 := url.Values{ + "k1": {"v11"}, + "k3": {"v3"}} + out := mergeParams(v1, v2) + c.Assert(out.Get("k1"), chk.Equals, "v1") + c.Assert(out.Get("k2"), chk.Equals, "v2") + c.Assert(out.Get("k3"), chk.Equals, "v3") + c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"}) +} + +func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) { + empty := []Block{} + expected := `` + c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected) + + blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}} + expected = `foobar` + c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected) +} + +func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) { + xml := ` + + myblob + ` + var blob Blob + body := ioutil.NopCloser(strings.NewReader(xml)) + c.Assert(xmlUnmarshal(body, &blob), chk.IsNil) + c.Assert(blob.Name, chk.Equals, "myblob") +} + +func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) { + type t struct { + XMLName xml.Name `xml:"S"` + Name string `xml:"Name"` + } + + b := t{Name: "myblob"} + expected := `myblob` + r, i, err := xmlMarshal(b) + c.Assert(err, chk.IsNil) + o, err := ioutil.ReadAll(r) + c.Assert(err, chk.IsNil) + out := string(o) + c.Assert(out, chk.Equals, expected) + c.Assert(i, chk.Equals, len(expected)) +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/LICENSE b/vendor/github.com/DreamItGetIT/statuscake/LICENSE new file mode 100644 index 000000000..5442aadb0 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 DreamItGetIT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/DreamItGetIT/statuscake/README.md b/vendor/github.com/DreamItGetIT/statuscake/README.md new file mode 100644 index 000000000..f1e4eafc7 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/README.md @@ -0,0 +1,5 @@ +# statuscake + +`statuscake` is a Go pkg that implements a client for the [statuscake]("https://statuscake.com") API. + +More documentation and examples at [http://godoc.org/github.com/DreamItGetIT/statuscake](http://godoc.org/github.com/DreamItGetIT/statuscake). diff --git a/vendor/github.com/DreamItGetIT/statuscake/client.go b/vendor/github.com/DreamItGetIT/statuscake/client.go new file mode 100644 index 000000000..db9745a86 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/client.go @@ -0,0 +1,170 @@ +package statuscake + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const apiBaseURL = "https://www.statuscake.com/API" + +type responseBody struct { + io.Reader +} + +func (r *responseBody) Close() error { + return nil +} + +// Auth wraps the authorisation headers required for each request +type Auth struct { + Username string + Apikey string +} + +func (a *Auth) validate() error { + e := make(ValidationError) + + if a.Username == "" { + e["Username"] = "is required" + } + + if a.Apikey == "" { + e["Apikey"] = "is required" + } + + if len(e) > 0 { + return e + } + + return nil +} + +type httpClient interface { + Do(*http.Request) (*http.Response, error) +} + +type apiClient interface { + get(string, url.Values) (*http.Response, error) + delete(string, url.Values) (*http.Response, error) + put(string, url.Values) (*http.Response, error) +} + +// Client is the http client that wraps the remote API. +type Client struct { + c httpClient + username string + apiKey string + testsClient Tests +} + +// New returns a new Client +func New(auth Auth) (*Client, error) { + if err := auth.validate(); err != nil { + return nil, err + } + + return &Client{ + c: &http.Client{}, + username: auth.Username, + apiKey: auth.Apikey, + }, nil +} + +func (c *Client) newRequest(method string, path string, v url.Values, body io.Reader) (*http.Request, error) { + url := fmt.Sprintf("%s%s", apiBaseURL, path) + if v != nil { + url = fmt.Sprintf("%s?%s", url, v.Encode()) + } + + r, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + r.Header.Set("Username", c.username) + r.Header.Set("API", c.apiKey) + + return r, nil +} + +func (c *Client) doRequest(r *http.Request) (*http.Response, error) { + resp, err := c.c.Do(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, &httpError{ + status: resp.Status, + statusCode: resp.StatusCode, + } + } + + var aer autheticationErrorResponse + + // We read and save the response body so that if we don't have error messages + // we can set it again for future usage + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + err = json.Unmarshal(b, &aer) + if err == nil && aer.ErrNo == 0 && aer.Error != "" { + return nil, &AuthenticationError{ + errNo: aer.ErrNo, + message: aer.Error, + } + } + + resp.Body = &responseBody{ + Reader: bytes.NewReader(b), + } + + return resp, nil +} + +func (c *Client) get(path string, v url.Values) (*http.Response, error) { + r, err := c.newRequest("GET", path, v, nil) + if err != nil { + return nil, err + } + + return c.doRequest(r) +} + +func (c *Client) put(path string, v url.Values) (*http.Response, error) { + r, err := c.newRequest("PUT", path, nil, strings.NewReader(v.Encode())) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + if err != nil { + return nil, err + } + + return c.doRequest(r) +} + +func (c *Client) delete(path string, v url.Values) (*http.Response, error) { + r, err := c.newRequest("DELETE", path, v, nil) + if err != nil { + return nil, err + } + + return c.doRequest(r) +} + +// Tests returns a client that implements the `Tests` API. +func (c *Client) Tests() Tests { + if c.testsClient == nil { + c.testsClient = newTests(c) + } + + return c.testsClient +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/client_test.go b/vendor/github.com/DreamItGetIT/statuscake/client_test.go new file mode 100644 index 000000000..1a65f4946 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/client_test.go @@ -0,0 +1,236 @@ +package statuscake + +import ( + "bytes" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAuth_validate(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + auth := &Auth{} + err := auth.validate() + + require.NotNil(err) + assert.Contains(err.Error(), "Username is required") + assert.Contains(err.Error(), "Apikey is required") + + auth.Username = "foo" + err = auth.validate() + + require.NotNil(err) + assert.Equal("Apikey is required", err.Error()) + + auth.Apikey = "bar" + err = auth.validate() + assert.Nil(err) +} + +func TestClient(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + assert.Equal("random-user", c.username) + assert.Equal("my-pass", c.apiKey) +} + +func TestClient_newRequest(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + r, err := c.newRequest("GET", "/hello", nil, nil) + + require.Nil(err) + assert.Equal("GET", r.Method) + assert.Equal("https://www.statuscake.com/API/hello", r.URL.String()) + assert.Equal("random-user", r.Header.Get("Username")) + assert.Equal("my-pass", r.Header.Get("API")) +} + +func TestClient_doRequest(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + hc := &fakeHTTPClient{StatusCode: 200} + c.c = hc + + req, err := http.NewRequest("GET", "http://example.com/test", nil) + require.Nil(err) + + _, err = c.doRequest(req) + require.Nil(err) + + assert.Len(hc.requests, 1) + assert.Equal("http://example.com/test", hc.requests[0].URL.String()) +} + +func TestClient_doRequest_WithHTTPErrors(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + hc := &fakeHTTPClient{ + StatusCode: 500, + } + c.c = hc + + req, err := http.NewRequest("GET", "http://example.com/test", nil) + require.Nil(err) + + _, err = c.doRequest(req) + require.NotNil(err) + assert.IsType(&httpError{}, err) +} + +func TestClient_doRequest_HttpAuthenticationErrors(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + hc := &fakeHTTPClient{ + StatusCode: 200, + Fixture: "auth_error.json", + } + c.c = hc + + req, err := http.NewRequest("GET", "http://example.com/test", nil) + require.Nil(err) + + _, err = c.doRequest(req) + require.NotNil(err) + assert.IsType(&AuthenticationError{}, err) +} + +func TestClient_get(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + hc := &fakeHTTPClient{} + c.c = hc + + c.get("/hello", nil) + assert.Len(hc.requests, 1) + assert.Equal("GET", hc.requests[0].Method) + assert.Equal("https://www.statuscake.com/API/hello", hc.requests[0].URL.String()) +} + +func TestClient_put(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + hc := &fakeHTTPClient{} + c.c = hc + + v := url.Values{"foo": {"bar"}} + c.put("/hello", v) + assert.Len(hc.requests, 1) + assert.Equal("PUT", hc.requests[0].Method) + assert.Equal("https://www.statuscake.com/API/hello", hc.requests[0].URL.String()) + + b, err := ioutil.ReadAll(hc.requests[0].Body) + require.Nil(err) + assert.Equal("foo=bar", string(b)) +} + +func TestClient_delete(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + hc := &fakeHTTPClient{} + c.c = hc + + v := url.Values{"foo": {"bar"}} + c.delete("/hello", v) + assert.Len(hc.requests, 1) + assert.Equal("DELETE", hc.requests[0].Method) + assert.Equal("https://www.statuscake.com/API/hello?foo=bar", hc.requests[0].URL.String()) +} + +func TestClient_Tests(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + + c, err := New(Auth{Username: "random-user", Apikey: "my-pass"}) + require.Nil(err) + + expected := &tests{ + client: c, + } + + assert.Equal(expected, c.Tests()) +} + +type fakeBody struct { + io.Reader +} + +func (f *fakeBody) Close() error { + return nil +} + +type fakeHTTPClient struct { + StatusCode int + Fixture string + requests []*http.Request +} + +func (c *fakeHTTPClient) Do(r *http.Request) (*http.Response, error) { + c.requests = append(c.requests, r) + var body []byte + + if c.Fixture != "" { + p := filepath.Join("fixtures", c.Fixture) + f, err := os.Open(p) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + b, err := ioutil.ReadAll(f) + if err != nil { + log.Fatal(err) + } + + body = b + } + + resp := &http.Response{ + StatusCode: c.StatusCode, + Body: &fakeBody{Reader: bytes.NewReader(body)}, + } + + return resp, nil +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/cmd/statuscake/main.go b/vendor/github.com/DreamItGetIT/statuscake/cmd/statuscake/main.go new file mode 100644 index 000000000..dc86b47e0 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/cmd/statuscake/main.go @@ -0,0 +1,229 @@ +package main + +import ( + "fmt" + logpkg "log" + "os" + "strconv" + + "github.com/DreamItGetIT/statuscake" +) + +var log *logpkg.Logger + +type command func(*statuscake.Client, ...string) error + +var commands map[string]command + +func init() { + log = logpkg.New(os.Stderr, "", 0) + commands = map[string]command{ + "list": cmdList, + "detail": cmdDetail, + "delete": cmdDelete, + "create": cmdCreate, + "update": cmdUpdate, + } +} + +func colouredStatus(s string) string { + switch s { + case "Up": + return fmt.Sprintf("\033[0;32m%s\033[0m", s) + case "Down": + return fmt.Sprintf("\033[0;31m%s\033[0m", s) + default: + return s + } +} + +func getEnv(name string) string { + v := os.Getenv(name) + if v == "" { + log.Fatalf("`%s` env variable is required", name) + } + + return v +} + +func cmdList(c *statuscake.Client, args ...string) error { + tt := c.Tests() + tests, err := tt.All() + if err != nil { + return err + } + + for _, t := range tests { + var paused string + if t.Paused { + paused = "yes" + } else { + paused = "no" + } + + fmt.Printf("* %d: %s\n", t.TestID, colouredStatus(t.Status)) + fmt.Printf(" WebsiteName: %s\n", t.WebsiteName) + fmt.Printf(" TestType: %s\n", t.TestType) + fmt.Printf(" Paused: %s\n", paused) + fmt.Printf(" ContactID: %d\n", t.ContactID) + fmt.Printf(" Uptime: %f\n", t.Uptime) + } + + return nil +} + +func cmdDetail(c *statuscake.Client, args ...string) error { + if len(args) != 1 { + return fmt.Errorf("command `detail` requires a single argument `TestID`") + } + + id, err := strconv.Atoi(args[0]) + if err != nil { + return err + } + + tt := c.Tests() + t, err := tt.Detail(id) + if err != nil { + return err + } + + var paused string + if t.Paused { + paused = "yes" + } else { + paused = "no" + } + + fmt.Printf("* %d: %s\n", t.TestID, colouredStatus(t.Status)) + fmt.Printf(" WebsiteName: %s\n", t.WebsiteName) + fmt.Printf(" WebsiteURL: %s\n", t.WebsiteURL) + fmt.Printf(" PingURL: %s\n", t.PingURL) + fmt.Printf(" TestType: %s\n", t.TestType) + fmt.Printf(" Paused: %s\n", paused) + fmt.Printf(" ContactID: %d\n", t.ContactID) + fmt.Printf(" Uptime: %f\n", t.Uptime) + + return nil +} + +func cmdDelete(c *statuscake.Client, args ...string) error { + if len(args) != 1 { + return fmt.Errorf("command `delete` requires a single argument `TestID`") + } + + id, err := strconv.Atoi(args[0]) + if err != nil { + return err + } + + return c.Tests().Delete(id) +} + +func askString(name string) string { + var v string + + fmt.Printf("%s: ", name) + _, err := fmt.Scanln(&v) + if err != nil { + log.Fatal(err) + } + + return v +} + +func askInt(name string) int { + v := askString(name) + i, err := strconv.Atoi(v) + if err != nil { + log.Fatalf("Invalid number `%s`", v) + } + + return i +} + +func cmdCreate(c *statuscake.Client, args ...string) error { + t := &statuscake.Test{ + WebsiteName: askString("WebsiteName"), + WebsiteURL: askString("WebsiteURL"), + TestType: askString("TestType"), + CheckRate: askInt("CheckRate"), + } + + t2, err := c.Tests().Update(t) + if err != nil { + return err + } + + fmt.Printf("CREATED: \n%+v\n", t2) + + return nil +} + +func cmdUpdate(c *statuscake.Client, args ...string) error { + if len(args) != 1 { + return fmt.Errorf("command `update` requires a single argument `TestID`") + } + + id, err := strconv.Atoi(args[0]) + if err != nil { + return err + } + + tt := c.Tests() + t, err := tt.Detail(id) + if err != nil { + return err + } + + t.TestID = id + t.WebsiteName = askString(fmt.Sprintf("WebsiteName [%s]", t.WebsiteName)) + t.WebsiteURL = askString(fmt.Sprintf("WebsiteURL [%s]", t.WebsiteURL)) + t.TestType = askString(fmt.Sprintf("TestType [%s]", t.TestType)) + t.CheckRate = askInt(fmt.Sprintf("CheckRate [%d]", t.CheckRate)) + + t2, err := c.Tests().Update(t) + if err != nil { + return err + } + + fmt.Printf("UPDATED: \n%+v\n", t2) + + return nil +} + +func usage() { + fmt.Printf("Usage:\n") + fmt.Printf(" %s COMMAND\n", os.Args[0]) + fmt.Printf("Available commands:\n") + for k := range commands { + fmt.Printf(" %+v\n", k) + } +} + +func main() { + username := getEnv("STATUSCAKE_USERNAME") + apikey := getEnv("STATUSCAKE_APIKEY") + + if len(os.Args) < 2 { + usage() + os.Exit(1) + } + + var err error + + c, err := statuscake.New(statuscake.Auth{Username: username, Apikey: apikey}) + if err != nil { + log.Fatal(err) + } + + if cmd, ok := commands[os.Args[1]]; ok { + err = cmd(c, os.Args[2:]...) + } else { + err = fmt.Errorf("Unknown command `%s`", os.Args[1]) + } + + if err != nil { + log.Fatalf("Error running command `%s`: %s", os.Args[1], err.Error()) + } +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/doc.go b/vendor/github.com/DreamItGetIT/statuscake/doc.go new file mode 100644 index 000000000..fa68d94b8 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/doc.go @@ -0,0 +1,34 @@ +// Package statuscake implements a client for statuscake.com API. +// +// // list all `Tests` +// c, err := statuscake.New(statuscake.Auth{Username: username, Apikey: apikey}) +// if err != nil { +// log.Fatal(err) +// } +// +// tests, err := c.Tests().All() +// if err != nil { +// log.Fatal(err) +// } +// +// // delete a `Test` +// err = c.Tests().Delete(TestID) +// +// // create a test +// t := &statuscake.Test{ +// WebsiteName: "Foo", +// WebsiteURL: "htto://example.com", +// ... other required args... +// } +// +// if err = t.Validate(); err != nil { +// log.Fatal(err) +// } +// +// t2 := c.Tests().Update(t) +// fmt.Printf("New Test created with id: %d\n", t2.TestID) +// +// // get Tests details +// t, err := tt.Detail(id) +// ... +package statuscake diff --git a/vendor/github.com/DreamItGetIT/statuscake/errors.go b/vendor/github.com/DreamItGetIT/statuscake/errors.go new file mode 100644 index 000000000..4c5199104 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/errors.go @@ -0,0 +1,80 @@ +package statuscake + +import ( + "fmt" + "strings" +) + +// APIError implements the error interface an it's used when the API response has errors. +type APIError interface { + APIError() string +} + +type httpError struct { + status string + statusCode int +} + +func (e *httpError) Error() string { + return fmt.Sprintf("HTTP error: %d - %s", e.statusCode, e.status) +} + +// ValidationError is a map where the key is the invalid field and the value is a message describing why the field is invalid. +type ValidationError map[string]string + +func (e ValidationError) Error() string { + var messages []string + + for k, v := range e { + m := fmt.Sprintf("%s %s", k, v) + messages = append(messages, m) + } + + return strings.Join(messages, ", ") +} + +type updateError struct { + Issues interface{} +} + +func (e *updateError) Error() string { + var messages []string + + if issues, ok := e.Issues.(map[string]interface{}); ok { + for k, v := range issues { + m := fmt.Sprintf("%s %s", k, v) + messages = append(messages, m) + } + } else if issues, ok := e.Issues.([]interface{}); ok { + for _, v := range issues { + m := fmt.Sprint(v) + messages = append(messages, m) + } + } + + return strings.Join(messages, ", ") +} + +// APIError returns the error specified in the API response +func (e *updateError) APIError() string { + return e.Error() +} + +type deleteError struct { + Message string +} + +func (e *deleteError) Error() string { + return e.Message +} + +// AuthenticationError implements the error interface and it's returned +// when API responses have authentication errors +type AuthenticationError struct { + errNo int + message string +} + +func (e *AuthenticationError) Error() string { + return fmt.Sprintf("%d, %s", e.errNo, e.message) +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/auth_error.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/auth_error.json new file mode 100644 index 000000000..4f14be50d --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/auth_error.json @@ -0,0 +1,4 @@ +{ + "ErrNo": 0, + "Error": "Can not access account. Was both Username and API Key provided?" +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_all_ok.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_all_ok.json new file mode 100644 index 000000000..449c34e61 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_all_ok.json @@ -0,0 +1,22 @@ +[ + { + "TestID": 100, + "Paused": false, + "TestType": "HTTP", + "WebsiteName": "www 1", + "ContactGroup": null, + "ContactID": 1, + "Status": "Up", + "Uptime": 100 + }, + { + "TestID": 101, + "Paused": true, + "TestType": "HTTP", + "WebsiteName": "www 2", + "ContactGroup": null, + "ContactID": 2, + "Status": "Down", + "Uptime": 0 + } +] diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_error.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_error.json new file mode 100644 index 000000000..6fad58e14 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_error.json @@ -0,0 +1,5 @@ +{ + "Success": false, + "Error": "this is an error" +} + diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_ok.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_ok.json new file mode 100644 index 000000000..0dd279bf3 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_delete_ok.json @@ -0,0 +1,6 @@ +{ + "TestID": 6735, + "Affected": 1, + "Success": true, + "Message": "This Check Has Been Deleted. It can not be recovered." +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_detail_ok.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_detail_ok.json new file mode 100644 index 000000000..d5f357924 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_detail_ok.json @@ -0,0 +1,29 @@ +{ + "TestID": 6735, + "TestType": "HTTP", + "Paused": false, + "WebsiteName": "NL", + "ContactGroup": "StatusCake Alerts", + "ContactID": 536, + "Status": "Up", + "Uptime": 0, + "CheckRate": 60, + "Timeout": 40, + "LogoImage": "", + "WebsiteHost": "Various", + "NodeLocations": [ + "UK", + "JP", + "SG1", + "SLC" + ], + "FindString": "", + "DoNotFind": false, + "LastTested": "2013-01-20 14:38:18", + "NextLocation": "USNY", + "Processing": false, + "ProcessingState": "Pretest", + "ProcessingOn": "dalas.localdomain", + "DownTimes": "0" +} + diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error.json new file mode 100644 index 000000000..a76c5ebbb --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error.json @@ -0,0 +1,9 @@ +{ + "Issues": { + "WebsiteName": "issue a", + "WebsiteURL": "issue b", + "CheckRate": "issue c" + }, + "Success": false, + "Message": "Required Data is Missing." +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error_slice_of_issues.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error_slice_of_issues.json new file mode 100644 index 000000000..02e98ebe4 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_error_slice_of_issues.json @@ -0,0 +1,5 @@ +{ + "Issues": ["hello", "world"], + "Success": false, + "Message": "Required Data is Missing." +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_ok.json b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_ok.json new file mode 100644 index 000000000..7c2dee20b --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/fixtures/tests_update_ok.json @@ -0,0 +1,6 @@ +{ + "Issues": {}, + "Success": true, + "Message": "", + "InsertID": 1234 +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/makefile b/vendor/github.com/DreamItGetIT/statuscake/makefile new file mode 100644 index 000000000..946f6d993 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/makefile @@ -0,0 +1,11 @@ +.PHONY: default lint test + +default: lint test + +lint: + @golint ./... + @go vet ./... + +test: + go test ${GOTEST_ARGS} ./... + diff --git a/vendor/github.com/DreamItGetIT/statuscake/responses.go b/vendor/github.com/DreamItGetIT/statuscake/responses.go new file mode 100644 index 000000000..dd3925ef6 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/responses.go @@ -0,0 +1,66 @@ +package statuscake + +type autheticationErrorResponse struct { + ErrNo int + Error string +} + +type updateResponse struct { + Issues interface{} `json:"Issues"` + Success bool `json:"Success"` + Message string `json:"Message"` + InsertID int `json:"InsertID"` +} + +type deleteResponse struct { + Success bool `json:"Success"` + Error string `json:"Error"` +} + +type detailResponse struct { + Method string `json:"Method"` + TestID int `json:"TestID"` + TestType string `json:"TestType"` + Paused bool `json:"Paused"` + WebsiteName string `json:"WebsiteName"` + URI string `json:"URI"` + ContactID int `json:"ContactID"` + Status string `json:"Status"` + Uptime float64 `json:"Uptime"` + CheckRate int `json:"CheckRate"` + Timeout int `json:"Timeout"` + LogoImage string `json:"LogoImage"` + Confirmation int `json:"Confirmation,string"` + WebsiteHost string `json:"WebsiteHost"` + NodeLocations []string `json:"NodeLocations"` + FindString string `json:"FindString"` + DoNotFind bool `json:"DoNotFind"` + LastTested string `json:"LastTested"` + NextLocation string `json:"NextLocation"` + Processing bool `json:"Processing"` + ProcessingState string `json:"ProcessingState"` + ProcessingOn string `json:"ProcessingOn"` + DownTimes int `json:"DownTimes,string"` + Sensitive bool `json:"Sensitive"` +} + +func (d *detailResponse) test() *Test { + return &Test{ + TestID: d.TestID, + TestType: d.TestType, + Paused: d.Paused, + WebsiteName: d.WebsiteName, + WebsiteURL: d.URI, + ContactID: d.ContactID, + Status: d.Status, + Uptime: d.Uptime, + CheckRate: d.CheckRate, + Timeout: d.Timeout, + LogoImage: d.LogoImage, + Confirmation: d.Confirmation, + WebsiteHost: d.WebsiteHost, + NodeLocations: d.NodeLocations, + FindString: d.FindString, + DoNotFind: d.DoNotFind, + } +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/tests.go b/vendor/github.com/DreamItGetIT/statuscake/tests.go new file mode 100644 index 000000000..1870a8e6d --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/tests.go @@ -0,0 +1,298 @@ +package statuscake + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" +) + +const queryStringTag = "querystring" + +// Test represents a statuscake Test +type Test struct { + // ThiTestID is an int, use this to get more details about this test. If not provided will insert a new check, else will update + TestID int `json:"TestID" querystring:"TestID" querystringoptions:"omitempty"` + + // Sent tfalse To Unpause and true To Pause. + Paused bool `json:"Paused" querystring:"Paused"` + + // Website name. Tags are stripped out + WebsiteName string `json:"WebsiteName" querystring:"WebsiteName"` + + // Test location, either an IP (for TCP and Ping) or a fully qualified URL for other TestTypes + WebsiteURL string `json:"WebsiteURL" querystring:"WebsiteURL"` + + // A Port to use on TCP Tests + Port int `json:"Port" querystring:"Port"` + + // Contact group ID - will return int of contact group used else 0 + ContactID int `json:"ContactID"` + + // Current status at last test + Status string `json:"Status"` + + // 7 Day Uptime + Uptime float64 `json:"Uptime"` + + // Any test locations seperated by a comma (using the Node Location IDs) + NodeLocations []string `json:"NodeLocations" querystring:"NodeLocations"` + + // Timeout in an int form representing seconds. + Timeout int `json:"Timeout" querystring:"Timeout"` + + // A URL to ping if a site goes down. + PingURL string `json:"PingURL" querystring:"PingURL"` + + Confirmation int `json:"Confirmationi,string" querystring:"Confirmation"` + + // The number of seconds between checks. + CheckRate int `json:"CheckRate" querystring:"CheckRate"` + + // A Basic Auth User account to use to login + BasicUser string `json:"BasicUser" querystring:"BasicUser"` + + // If BasicUser is set then this should be the password for the BasicUser + BasicPass string `json:"BasicPass" querystring:"BasicPass"` + + // Set 1 to enable public reporting, 0 to disable + Public int `json:"Public" querystring:"Public"` + + // A URL to a image to use for public reporting + LogoImage string `json:"LogoImage" querystring:"LogoImage"` + + // Set to 0 to use branding (default) or 1 to disable public reporting branding + Branding int `json:"Branding" querystring:"Branding"` + + // Used internally by the statuscake API + WebsiteHost string `json:"WebsiteHost"` + + // Enable virus checking or not. 1 to enable + Virus int `json:"Virus" querystring:"Virus"` + + // A string that should either be found or not found. + FindString string `json:"FindString" querystring:"FindString"` + + // If the above string should be found to trigger a alert. true will trigger if FindString found + DoNotFind bool `json:"DoNotFind" querystring:"DoNotFind"` + + // What type of test type to use. Accepted values are HTTP, TCP, PING + TestType string `json:"TestType" querystring:"TestType"` + + // Use 1 to TURN OFF real browser testing + RealBrowser int `json:"RealBrowser" querystring:"RealBrowser"` + + // How many minutes to wait before sending an alert + TriggerRate int `json:"TriggerRate" querystring:"TriggerRate"` + + // Tags should be seperated by a comma - no spacing between tags (this,is,a set,of,tags) + TestTags string `json:"TestTags" querystring:"TestTags"` + + // Comma Seperated List of StatusCodes to Trigger Error on (on Update will replace, so send full list each time) + StatusCodes string `json:"StatusCodes" querystring:"StatusCodes"` +} + +// Validate checks if the Test is valid. If it's invalid, it returns a ValidationError with all invalid fields. It returns nil otherwise. +func (t *Test) Validate() error { + e := make(ValidationError) + + if t.WebsiteName == "" { + e["WebsiteName"] = "is required" + } + + if t.WebsiteURL == "" { + e["WebsiteURL"] = "is required" + } + + if t.Timeout != 0 && (t.Timeout < 6 || t.Timeout > 99) { + e["Timeout"] = "must be 0 or between 6 and 99" + } + + if t.Confirmation < 0 || t.Confirmation > 9 { + e["Confirmation"] = "must be between 0 and 9" + } + + if t.CheckRate < 0 || t.CheckRate > 23999 { + e["CheckRate"] = "must be between 0 and 23999" + } + + if t.Public < 0 || t.Public > 1 { + e["Public"] = "must be 0 or 1" + } + + if t.Virus < 0 || t.Virus > 1 { + e["Virus"] = "must be 0 or 1" + } + + if t.TestType != "HTTP" && t.TestType != "TCP" && t.TestType != "PING" { + e["TestType"] = "must be HTTP, TCP, or PING" + } + + if t.RealBrowser < 0 || t.RealBrowser > 1 { + e["RealBrowser"] = "must be 0 or 1" + } + + if t.TriggerRate < 0 || t.TriggerRate > 59 { + e["TriggerRate"] = "must be between 0 and 59" + } + + if len(e) > 0 { + return e + } + + return nil +} + +// ToURLValues returns url.Values of all fields required to create/update a Test. +func (t Test) ToURLValues() url.Values { + values := make(url.Values) + st := reflect.TypeOf(t) + sv := reflect.ValueOf(t) + for i := 0; i < st.NumField(); i++ { + sf := st.Field(i) + tag := sf.Tag.Get(queryStringTag) + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + v := sv.Field(i) + options := sf.Tag.Get("querystringoptions") + omit := options == "omitempty" && isEmptyValue(v) + + if tag != "" && !omit { + values.Set(tag, valueToQueryStringValue(v)) + } + } + + return values +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return false +} + +func valueToQueryStringValue(v reflect.Value) string { + if v.Type().Name() == "bool" { + if v.Bool() { + return "1" + } + + return "0" + } + + if v.Type().Kind() == reflect.Slice { + if ss, ok := v.Interface().([]string); ok { + return strings.Join(ss, ",") + } + } + + return fmt.Sprint(v) +} + +// Tests is a client that implements the `Tests` API. +type Tests interface { + All() ([]*Test, error) + Detail(int) (*Test, error) + Update(*Test) (*Test, error) + Delete(TestID int) error +} + +type tests struct { + client apiClient +} + +func newTests(c apiClient) Tests { + return &tests{ + client: c, + } +} + +func (tt *tests) All() ([]*Test, error) { + resp, err := tt.client.get("/Tests", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var tests []*Test + err = json.NewDecoder(resp.Body).Decode(&tests) + + return tests, err +} + +func (tt *tests) Update(t *Test) (*Test, error) { + resp, err := tt.client.put("/Tests/Update", t.ToURLValues()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var ur updateResponse + err = json.NewDecoder(resp.Body).Decode(&ur) + if err != nil { + return nil, err + } + + if !ur.Success { + return nil, &updateError{Issues: ur.Issues} + } + + t2 := *t + t2.TestID = ur.InsertID + + return &t2, err +} + +func (tt *tests) Delete(testID int) error { + resp, err := tt.client.delete("/Tests/Details", url.Values{"TestID": {fmt.Sprint(testID)}}) + if err != nil { + return err + } + defer resp.Body.Close() + + var dr deleteResponse + err = json.NewDecoder(resp.Body).Decode(&dr) + if err != nil { + return err + } + + if !dr.Success { + return &deleteError{Message: dr.Error} + } + + return nil +} + +func (tt *tests) Detail(testID int) (*Test, error) { + resp, err := tt.client.get("/Tests/Details", url.Values{"TestID": {fmt.Sprint(testID)}}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var dr *detailResponse + err = json.NewDecoder(resp.Body).Decode(&dr) + if err != nil { + return nil, err + } + + return dr.test(), nil +} diff --git a/vendor/github.com/DreamItGetIT/statuscake/tests_test.go b/vendor/github.com/DreamItGetIT/statuscake/tests_test.go new file mode 100644 index 000000000..b7147e2c2 --- /dev/null +++ b/vendor/github.com/DreamItGetIT/statuscake/tests_test.go @@ -0,0 +1,327 @@ +package statuscake + +import ( + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTest_Validate(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + test := &Test{ + Timeout: 200, + Confirmation: 100, + Public: 200, + Virus: 200, + TestType: "FTP", + RealBrowser: 100, + TriggerRate: 100, + CheckRate: 100000, + WebsiteName: "", + WebsiteURL: "", + } + + err := test.Validate() + require.NotNil(err) + + message := err.Error() + assert.Contains(message, "WebsiteName is required") + assert.Contains(message, "WebsiteURL is required") + assert.Contains(message, "Timeout must be 0 or between 6 and 99") + assert.Contains(message, "Confirmation must be between 0 and 9") + assert.Contains(message, "CheckRate must be between 0 and 23999") + assert.Contains(message, "Public must be 0 or 1") + assert.Contains(message, "Virus must be 0 or 1") + assert.Contains(message, "TestType must be HTTP, TCP, or PING") + assert.Contains(message, "RealBrowser must be 0 or 1") + assert.Contains(message, "TriggerRate must be between 0 and 59") + + test.Timeout = 10 + test.Confirmation = 2 + test.Public = 1 + test.Virus = 1 + test.TestType = "HTTP" + test.RealBrowser = 1 + test.TriggerRate = 50 + test.CheckRate = 10 + test.WebsiteName = "Foo" + test.WebsiteURL = "http://example.com" + + err = test.Validate() + assert.Nil(err) +} + +func TestTest_ToURLValues(t *testing.T) { + assert := assert.New(t) + + test := &Test{ + TestID: 123, + Paused: true, + WebsiteName: "Foo Bar", + WebsiteURL: "http://example.com", + Port: 3000, + NodeLocations: []string{"foo", "bar"}, + Timeout: 11, + PingURL: "http://example.com/ping", + Confirmation: 1, + CheckRate: 500, + BasicUser: "myuser", + BasicPass: "mypass", + Public: 1, + LogoImage: "http://example.com/logo.jpg", + Branding: 1, + Virus: 1, + FindString: "hello", + DoNotFind: true, + TestType: "HTTP", + RealBrowser: 1, + TriggerRate: 50, + TestTags: "tag1,tag2", + StatusCodes: "500", + } + + expected := url.Values{ + "TestID": {"123"}, + "Paused": {"1"}, + "WebsiteName": {"Foo Bar"}, + "WebsiteURL": {"http://example.com"}, + "Port": {"3000"}, + "NodeLocations": {"foo,bar"}, + "Timeout": {"11"}, + "PingURL": {"http://example.com/ping"}, + "Confirmation": {"1"}, + "CheckRate": {"500"}, + "BasicUser": {"myuser"}, + "BasicPass": {"mypass"}, + "Public": {"1"}, + "LogoImage": {"http://example.com/logo.jpg"}, + "Branding": {"1"}, + "Virus": {"1"}, + "FindString": {"hello"}, + "DoNotFind": {"1"}, + "TestType": {"HTTP"}, + "RealBrowser": {"1"}, + "TriggerRate": {"50"}, + "TestTags": {"tag1,tag2"}, + "StatusCodes": {"500"}, + } + + assert.Equal(expected, test.ToURLValues()) + + test.TestID = 0 + delete(expected, "TestID") + + assert.Equal(expected.Encode(), test.ToURLValues().Encode()) +} + +func TestTests_All(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c := &fakeAPIClient{ + fixture: "tests_all_ok.json", + } + tt := newTests(c) + tests, err := tt.All() + require.Nil(err) + + assert.Equal("/Tests", c.sentRequestPath) + assert.Equal("GET", c.sentRequestMethod) + assert.Nil(c.sentRequestValues) + assert.Len(tests, 2) + + expectedTest := &Test{ + TestID: 100, + Paused: false, + TestType: "HTTP", + WebsiteName: "www 1", + ContactID: 1, + Status: "Up", + Uptime: 100, + } + assert.Equal(expectedTest, tests[0]) + + expectedTest = &Test{ + TestID: 101, + Paused: true, + TestType: "HTTP", + WebsiteName: "www 2", + ContactID: 2, + Status: "Down", + Uptime: 0, + } + assert.Equal(expectedTest, tests[1]) +} + +func TestTests_Update_OK(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c := &fakeAPIClient{ + fixture: "tests_update_ok.json", + } + tt := newTests(c) + + test1 := &Test{ + WebsiteName: "foo", + } + + test2, err := tt.Update(test1) + require.Nil(err) + + assert.Equal("/Tests/Update", c.sentRequestPath) + assert.Equal("PUT", c.sentRequestMethod) + assert.NotNil(c.sentRequestValues) + assert.NotNil(test2) + + assert.Equal(1234, test2.TestID) +} + +func TestTests_Update_Error(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c := &fakeAPIClient{ + fixture: "tests_update_error.json", + } + tt := newTests(c) + + test1 := &Test{ + WebsiteName: "foo", + } + + test2, err := tt.Update(test1) + assert.Nil(test2) + + require.NotNil(err) + assert.IsType(&updateError{}, err) + assert.Contains(err.Error(), "issue a") +} + +func TestTests_Update_ErrorWithSliceOfIssues(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c := &fakeAPIClient{ + fixture: "tests_update_error_slice_of_issues.json", + } + tt := newTests(c) + + test1 := &Test{ + WebsiteName: "foo", + } + + test2, err := tt.Update(test1) + assert.Nil(test2) + + require.NotNil(err) + assert.IsType(&updateError{}, err) + assert.Equal("hello, world", err.Error()) +} + +func TestTests_Delete_OK(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c := &fakeAPIClient{ + fixture: "tests_delete_ok.json", + } + tt := newTests(c) + + err := tt.Delete(1234) + require.Nil(err) + + assert.Equal("/Tests/Details", c.sentRequestPath) + assert.Equal("DELETE", c.sentRequestMethod) + assert.Equal(url.Values{"TestID": {"1234"}}, c.sentRequestValues) +} + +func TestTests_Delete_Error(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c := &fakeAPIClient{ + fixture: "tests_delete_error.json", + } + tt := newTests(c) + + err := tt.Delete(1234) + require.NotNil(err) + assert.Equal("this is an error", err.Error()) +} + +func TestTests_Detail_OK(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + c := &fakeAPIClient{ + fixture: "tests_detail_ok.json", + } + tt := newTests(c) + + test, err := tt.Detail(1234) + require.Nil(err) + + assert.Equal("/Tests/Details", c.sentRequestPath) + assert.Equal("GET", c.sentRequestMethod) + assert.Equal(url.Values{"TestID": {"1234"}}, c.sentRequestValues) + + assert.Equal(test.TestID, 6735) + assert.Equal(test.TestType, "HTTP") + assert.Equal(test.Paused, false) + assert.Equal(test.WebsiteName, "NL") + assert.Equal(test.ContactID, 536) + assert.Equal(test.Status, "Up") + assert.Equal(test.Uptime, 0.0) + assert.Equal(test.CheckRate, 60) + assert.Equal(test.Timeout, 40) + assert.Equal(test.LogoImage, "") + assert.Equal(test.WebsiteHost, "Various") + assert.Equal(test.FindString, "") + assert.Equal(test.DoNotFind, false) +} + +type fakeAPIClient struct { + sentRequestPath string + sentRequestMethod string + sentRequestValues url.Values + fixture string +} + +func (c *fakeAPIClient) put(path string, v url.Values) (*http.Response, error) { + return c.all("PUT", path, v) +} + +func (c *fakeAPIClient) delete(path string, v url.Values) (*http.Response, error) { + return c.all("DELETE", path, v) +} + +func (c *fakeAPIClient) get(path string, v url.Values) (*http.Response, error) { + return c.all("GET", path, v) +} + +func (c *fakeAPIClient) all(method string, path string, v url.Values) (*http.Response, error) { + c.sentRequestMethod = method + c.sentRequestPath = path + c.sentRequestValues = v + + p := filepath.Join("fixtures", c.fixture) + f, err := os.Open(p) + if err != nil { + log.Fatal(err) + } + + resp := &http.Response{ + Body: f, + } + + return resp, nil +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go new file mode 100644 index 000000000..1583d6382 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -0,0 +1,105 @@ +// Package cidr is a collection of assorted utilities for computing +// network and host addresses within network ranges. +// +// It expects a CIDR-type address structure where addresses are divided into +// some number of prefix bits representing the network and then the remaining +// suffix bits represent the host. +// +// For example, it can help to calculate addresses for sub-networks of a +// parent network, or to calculate host addresses within a particular prefix. +// +// At present this package is prioritizing simplicity of implementation and +// de-prioritizing speed and memory usage. Thus caution is advised before +// using this package in performance-critical applications or hot codepaths. +// Patches to improve the speed and memory usage may be accepted as long as +// they do not result in a significant increase in code complexity. +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +// Subnet takes a parent CIDR range and creates a subnet within it +// with the given number of additional prefix bits and the given +// network number. +// +// For example, 10.3.0.0/16, extended by 8 bits, with a network number +// of 5, becomes 10.3.5.0/24 . +func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + newPrefixLen := parentLen + newBits + + if newPrefixLen > addrLen { + return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) + } + + maxNetNum := uint64(1< maxNetNum { + return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) + } + + return &net.IPNet{ + IP: insertNumIntoIP(ip, num, newPrefixLen), + Mask: net.CIDRMask(newPrefixLen, addrLen), + }, nil +} + +// Host takes a parent CIDR range and turns it into a host IP address with +// the given host number. +// +// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. +func Host(base *net.IPNet, num int) (net.IP, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + hostLen := addrLen - parentLen + + maxHostNum := uint64(1< maxHostNum { + return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + } + + return insertNumIntoIP(ip, num, 32), nil +} + +// AddressRange returns the first and last addresses in the given CIDR range. +func AddressRange(network *net.IPNet) (net.IP, net.IP) { + // the first IP is easy + firstIP := network.IP + + // the last IP is the network address OR NOT the mask address + prefixLen, bits := network.Mask.Size() + if prefixLen == bits { + // Easy! + // But make sure that our two slices are distinct, since they + // would be in all other cases. + lastIP := make([]byte, len(firstIP)) + copy(lastIP, firstIP) + return firstIP, lastIP + } + + firstIPInt, bits := ipToInt(firstIP) + hostLen := uint(bits) - uint(prefixLen) + lastIPInt := big.NewInt(1) + lastIPInt.Lsh(lastIPInt, hostLen) + lastIPInt.Sub(lastIPInt, big.NewInt(1)) + lastIPInt.Or(lastIPInt, firstIPInt) + + return firstIP, intToIP(lastIPInt, bits) +} + +// AddressCount returns the number of distinct host addresses within the given +// CIDR range. +// +// Since the result is a uint64, this function returns meaningful information +// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. +func AddressCount(network *net.IPNet) uint64 { + prefixLen, bits := network.Mask.Size() + return 1 << (uint64(bits) - uint64(prefixLen)) +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go new file mode 100644 index 000000000..f4095b4a0 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go @@ -0,0 +1,238 @@ +package cidr + +import ( + "fmt" + "net" + "testing" +) + +func TestSubnet(t *testing.T) { + type Case struct { + Base string + Bits int + Num int + Output string + Error bool + } + + cases := []Case{ + Case{ + Base: "192.168.2.0/20", + Bits: 4, + Num: 6, + Output: "192.168.6.0/24", + }, + Case{ + Base: "192.168.2.0/20", + Bits: 4, + Num: 0, + Output: "192.168.0.0/24", + }, + Case{ + Base: "192.168.0.0/31", + Bits: 1, + Num: 1, + Output: "192.168.0.1/32", + }, + Case{ + Base: "192.168.0.0/21", + Bits: 4, + Num: 7, + Output: "192.168.3.128/25", + }, + Case{ + Base: "fe80::/48", + Bits: 16, + Num: 6, + Output: "fe80:0:0:6::/64", + }, + Case{ + Base: "fe80::/49", + Bits: 16, + Num: 7, + Output: "fe80:0:0:3:8000::/65", + }, + Case{ + Base: "192.168.2.0/31", + Bits: 2, + Num: 0, + Error: true, // not enough bits to expand into + }, + Case{ + Base: "fe80::/126", + Bits: 4, + Num: 0, + Error: true, // not enough bits to expand into + }, + Case{ + Base: "192.168.2.0/24", + Bits: 4, + Num: 16, + Error: true, // can't fit 16 into 4 bits + }, + } + + for _, testCase := range cases { + _, base, _ := net.ParseCIDR(testCase.Base) + gotNet, err := Subnet(base, testCase.Bits, testCase.Num) + desc := fmt.Sprintf("Subnet(%#v,%#v,%#v)", testCase.Base, testCase.Bits, testCase.Num) + if err != nil { + if !testCase.Error { + t.Errorf("%s failed: %s", desc, err.Error()) + } + } else { + got := gotNet.String() + if testCase.Error { + t.Errorf("%s = %s; want error", desc, got) + } else { + if got != testCase.Output { + t.Errorf("%s = %s; want %s", desc, got, testCase.Output) + } + } + } + } +} + +func TestHost(t *testing.T) { + type Case struct { + Range string + Num int + Output string + Error bool + } + + cases := []Case{ + Case{ + Range: "192.168.2.0/20", + Num: 6, + Output: "192.168.0.6", + }, + Case{ + Range: "192.168.0.0/20", + Num: 257, + Output: "192.168.1.1", + }, + Case{ + Range: "192.168.1.0/24", + Num: 256, + Error: true, // only 0-255 will fit in 8 bits + }, + } + + for _, testCase := range cases { + _, network, _ := net.ParseCIDR(testCase.Range) + gotIP, err := Host(network, testCase.Num) + desc := fmt.Sprintf("Host(%#v,%#v)", testCase.Range, testCase.Num) + if err != nil { + if !testCase.Error { + t.Errorf("%s failed: %s", desc, err.Error()) + } + } else { + got := gotIP.String() + if testCase.Error { + t.Errorf("%s = %s; want error", desc, got) + } else { + if got != testCase.Output { + t.Errorf("%s = %s; want %s", desc, got, testCase.Output) + } + } + } + } +} + +func TestAddressRange(t *testing.T) { + type Case struct { + Range string + First string + Last string + } + + cases := []Case{ + Case{ + Range: "192.168.0.0/16", + First: "192.168.0.0", + Last: "192.168.255.255", + }, + Case{ + Range: "192.168.0.0/17", + First: "192.168.0.0", + Last: "192.168.127.255", + }, + Case{ + Range: "fe80::/64", + First: "fe80::", + Last: "fe80::ffff:ffff:ffff:ffff", + }, + } + + for _, testCase := range cases { + _, network, _ := net.ParseCIDR(testCase.Range) + firstIP, lastIP := AddressRange(network) + desc := fmt.Sprintf("AddressRange(%#v)", testCase.Range) + gotFirstIP := firstIP.String() + gotLastIP := lastIP.String() + if gotFirstIP != testCase.First { + t.Errorf("%s first is %s; want %s", desc, gotFirstIP, testCase.First) + } + if gotLastIP != testCase.Last { + t.Errorf("%s last is %s; want %s", desc, gotLastIP, testCase.Last) + } + } + +} + +func TestAddressCount(t *testing.T) { + type Case struct { + Range string + Count uint64 + } + + cases := []Case{ + Case{ + Range: "192.168.0.0/16", + Count: 65536, + }, + Case{ + Range: "192.168.0.0/17", + Count: 32768, + }, + Case{ + Range: "192.168.0.0/32", + Count: 1, + }, + Case{ + Range: "192.168.0.0/31", + Count: 2, + }, + Case{ + Range: "0.0.0.0/0", + Count: 4294967296, + }, + Case{ + Range: "0.0.0.0/1", + Count: 2147483648, + }, + Case{ + Range: "::/65", + Count: 9223372036854775808, + }, + Case{ + Range: "::/128", + Count: 1, + }, + Case{ + Range: "::/127", + Count: 2, + }, + } + + for _, testCase := range cases { + _, network, _ := net.ParseCIDR(testCase.Range) + gotCount := AddressCount(network) + desc := fmt.Sprintf("AddressCount(%#v)", testCase.Range) + if gotCount != testCase.Count { + t.Errorf("%s = %d; want %d", desc, gotCount, testCase.Count) + } + } + +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go new file mode 100644 index 000000000..861a5f623 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go @@ -0,0 +1,38 @@ +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +func ipToInt(ip net.IP) (*big.Int, int) { + val := &big.Int{} + val.SetBytes([]byte(ip)) + if len(ip) == net.IPv4len { + return val, 32 + } else if len(ip) == net.IPv6len { + return val, 128 + } else { + panic(fmt.Errorf("Unsupported address length %d", len(ip))) + } +} + +func intToIP(ipInt *big.Int, bits int) net.IP { + ipBytes := ipInt.Bytes() + ret := make([]byte, bits/8) + // Pack our IP bytes into the end of the return array, + // since big.Int.Bytes() removes front zero padding. + for i := 1; i <= len(ipBytes); i++ { + ret[len(ret)-i] = ipBytes[len(ipBytes)-i] + } + return net.IP(ret) +} + +func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP { + ipInt, totalBits := ipToInt(ip) + bigNum := big.NewInt(int64(num)) + bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) + ipInt.Or(ipInt, bigNum) + return intToIP(ipInt, totalBits) +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/client.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/client.go new file mode 100644 index 000000000..b9525ea22 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/client.go @@ -0,0 +1,302 @@ +// Package rundeck provides a client for interacting with a Rundeck instance +// via its HTTP API. +// +// Instantiate a Client with the NewClient function to get started. +// +// At present this package uses Rundeck API version 13. +package rundeck + +import ( + "bytes" + "crypto/tls" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "mime/multipart" + "strings" +) + +// ClientConfig is used with NewClient to specify initialization settings. +type ClientConfig struct { + // The base URL of the Rundeck instance. + BaseURL string + + // The API auth token generated from user settings in the Rundeck UI. + AuthToken string + + // Don't fail if the server uses SSL with an un-verifiable certificate. + // This is not recommended except during development/debugging. + AllowUnverifiedSSL bool +} + +// Client is a Rundeck API client interface. +type Client struct { + httpClient *http.Client + apiURL *url.URL + authToken string +} + +type request struct { + Method string + PathParts []string + QueryArgs map[string]string + Headers map[string]string + BodyBytes []byte +} + +// NewClient returns a configured Rundeck client. +func NewClient(config *ClientConfig) (*Client, error) { + t := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: config.AllowUnverifiedSSL, + }, + } + httpClient := &http.Client{ + Transport: t, + } + + apiPath, _ := url.Parse("api/13/") + baseURL, err := url.Parse(config.BaseURL) + if err != nil { + return nil, fmt.Errorf("invalid base URL: %s", err.Error()) + } + apiURL := baseURL.ResolveReference(apiPath) + + return &Client{ + httpClient: httpClient, + apiURL: apiURL, + authToken: config.AuthToken, + }, nil +} + +func (c *Client) rawRequest(req *request) ([]byte, error) { + res, err := c.httpClient.Do(req.MakeHTTPRequest(c)) + if err != nil { + return nil, err + } + + resBodyBytes, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + if res.StatusCode == 404 { + return nil, &NotFoundError{} + } + + if res.StatusCode < 200 || res.StatusCode >= 300 { + if strings.HasPrefix(res.Header.Get("Content-Type"), "text/xml") { + var richErr Error + err = xml.Unmarshal(resBodyBytes, &richErr) + if err != nil { + return nil, fmt.Errorf("HTTP Error %i with error decoding XML body: %s", res.StatusCode, err.Error()) + } + return nil, richErr + } + + return nil, fmt.Errorf("HTTP Error %i", res.StatusCode) + } + + if res.StatusCode != 200 && res.StatusCode != 201 { + return nil, nil + } + + return resBodyBytes, nil +} + +func (c *Client) xmlRequest(method string, pathParts []string, query map[string]string, reqBody interface{}, result interface{}) error { + + var err error + var reqBodyBytes []byte + reqBodyBytes = nil + if reqBody != nil { + reqBodyBytes, err = xml.Marshal(reqBody) + if err != nil { + return err + } + } + + req := &request{ + Method: method, + PathParts: pathParts, + QueryArgs: query, + BodyBytes: reqBodyBytes, + Headers: map[string]string{ + "Accept": "application/xml", + }, + } + + if reqBody != nil { + req.Headers["Content-Type"] = "application/xml" + } + + resBodyBytes, err := c.rawRequest(req) + if err != nil { + return err + } + + if result != nil { + if resBodyBytes == nil { + return fmt.Errorf("server did not return an XML payload") + } + err = xml.Unmarshal(resBodyBytes, result) + if err != nil { + return fmt.Errorf("error decoding response XML payload: %s", err.Error()) + } + } + + return nil +} + +func (c *Client) get(pathParts []string, query map[string]string, result interface{}) error { + return c.xmlRequest("GET", pathParts, query, nil, result) +} + +func (c *Client) rawGet(pathParts []string, query map[string]string, accept string) (string, error) { + req := &request{ + Method: "GET", + PathParts: pathParts, + QueryArgs: query, + Headers: map[string]string{ + "Accept": accept, + }, + } + + resBodyBytes, err := c.rawRequest(req) + if err != nil { + return "", err + } + + return string(resBodyBytes), nil +} + +func (c *Client) post(pathParts []string, query map[string]string, reqBody interface{}, result interface{}) error { + return c.xmlRequest("POST", pathParts, query, reqBody, result) +} + +func (c *Client) put(pathParts []string, reqBody interface{}, result interface{}) error { + return c.xmlRequest("PUT", pathParts, nil, reqBody, result) +} + +func (c *Client) delete(pathParts []string) error { + return c.xmlRequest("DELETE", pathParts, nil, nil, nil) +} + +func (c *Client) postXMLBatch(pathParts []string, args map[string]string, xmlBatch interface{}, result interface{}) error { + req := &http.Request{ + Method: "POST", + Header: http.Header{}, + } + req.Header.Add("User-Agent", "Go-Rundeck-API") + req.Header.Add("X-Rundeck-Auth-Token", c.authToken) + + urlPath := &url.URL{ + Path: strings.Join(pathParts, "/"), + } + reqURL := c.apiURL.ResolveReference(urlPath) + req.URL = reqURL + + buf := bytes.Buffer{} + writer := multipart.NewWriter(&buf) + for k, v := range args { + err := writer.WriteField(k, v) + if err != nil { + return err + } + } + partWriter, err := writer.CreateFormFile("xmlBatch", "batch.xml") + if err != nil { + return err + } + + reqBodyBytes, err := xml.Marshal(xmlBatch) + if err != nil { + return err + } + + _, err = partWriter.Write(reqBodyBytes) + if err != nil { + return err + } + + writer.Close() + + reqBodyReader := bytes.NewReader(buf.Bytes()) + req.Body = ioutil.NopCloser(reqBodyReader) + req.ContentLength = int64(buf.Len()) + req.Header.Add("Content-Type", writer.FormDataContentType()) + + res, err := c.httpClient.Do(req) + + if err != nil { + return err + } + + resBodyBytes, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + + if res.StatusCode < 200 || res.StatusCode >= 300 { + if strings.HasPrefix(res.Header.Get("Content-Type"), "text/xml") { + var richErr Error + err = xml.Unmarshal(resBodyBytes, &richErr) + if err != nil { + return fmt.Errorf("HTTP Error %i with error decoding XML body: %s", res.StatusCode, err.Error()) + } + return richErr + } + + return fmt.Errorf("HTTP Error %i", res.StatusCode) + } + + if result != nil { + if res.StatusCode != 200 && res.StatusCode != 201 { + return fmt.Errorf("server did not return an XML payload") + } + err = xml.Unmarshal(resBodyBytes, result) + if err != nil { + return fmt.Errorf("error decoding response XML payload: %s", err.Error()) + } + } + + return nil +} + +func (r *request) MakeHTTPRequest(client *Client) *http.Request { + req := &http.Request{ + Method: r.Method, + Header: http.Header{}, + } + + // Automatic/mandatory HTTP headers first + req.Header.Add("User-Agent", "Go-Rundeck-API") + req.Header.Add("X-Rundeck-Auth-Token", client.authToken) + + for k, v := range r.Headers { + req.Header.Add(k, v) + } + + urlPath := &url.URL{ + Path: strings.Join(r.PathParts, "/"), + } + reqURL := client.apiURL.ResolveReference(urlPath) + req.URL = reqURL + + if len(r.QueryArgs) > 0 { + urlQuery := url.Values{} + for k, v := range r.QueryArgs { + urlQuery.Add(k, v) + } + reqURL.RawQuery = urlQuery.Encode() + } + + if r.BodyBytes != nil { + req.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + req.ContentLength = int64(len(r.BodyBytes)) + } + + return req +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/error.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/error.go new file mode 100644 index 000000000..223d3889b --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/error.go @@ -0,0 +1,21 @@ +package rundeck + +import "encoding/xml" + +// Error implements the error interface for a Rundeck API error that was +// returned from the server as XML. +type Error struct { + XMLName xml.Name `xml:"result"` + IsError bool `xml:"error,attr"` + Message string `xml:"error>message"` +} + +func (err Error) Error() string { + return err.Message +} + +type NotFoundError struct {} + +func (err NotFoundError) Error() string { + return "not found" +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go new file mode 100644 index 000000000..ca04ac35a --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go @@ -0,0 +1,328 @@ +package rundeck + +import ( + "encoding/xml" + "fmt" + "strings" +) + +// JobSummary is an abbreviated description of a job that includes only its basic +// descriptive information and identifiers. +type JobSummary struct { + XMLName xml.Name `xml:"job"` + ID string `xml:"id,attr"` + Name string `xml:"name"` + GroupName string `xml:"group"` + ProjectName string `xml:"project"` + Description string `xml:"description,omitempty"` +} + +type jobSummaryList struct { + XMLName xml.Name `xml:"jobs"` + Jobs []JobSummary `xml:"job"` +} + +// JobDetail is a comprehensive description of a job, including its entire definition. +type JobDetail struct { + XMLName xml.Name `xml:"job"` + ID string `xml:"uuid,omitempty"` + Name string `xml:"name"` + GroupName string `xml:"group,omitempty"` + ProjectName string `xml:"context>project,omitempty"` + OptionsConfig *JobOptions `xml:"context>options,omitempty"` + Description string `xml:"description,omitempty"` + LogLevel string `xml:"loglevel,omitempty"` + AllowConcurrentExecutions bool `xml:"multipleExecutions"` + Dispatch *JobDispatch `xml:"dispatch"` + CommandSequence *JobCommandSequence `xml:"sequence,omitempty"` + NodeFilter *JobNodeFilter `xml:"nodefilters,omitempty"` +} + +type jobDetailList struct { + XMLName xml.Name `xml:"joblist"` + Jobs []JobDetail `xml:"job"` +} + +// JobOptions represents the set of options on a job, if any. +type JobOptions struct { + PreserveOrder bool `xml:"preserveOrder,attr,omitempty"` + Options []JobOption `xml:"option"` +} + +// JobOption represents a single option on a job. +type JobOption struct { + XMLName xml.Name `xml:"option"` + + // The name of the option, which can be used to interpolate its value + // into job commands. + Name string `xml:"name,attr,omitempty"` + + // The default value of the option. + DefaultValue string `xml:"value,attr,omitempty"` + + // A sequence of predefined choices for this option. Mutually exclusive with ValueChoicesURL. + ValueChoices JobValueChoices `xml:"values,attr"` + + // A URL from which the predefined choices for this option will be retrieved. + // Mutually exclusive with ValueChoices + ValueChoicesURL string `xml:"valuesUrl,attr,omitempty"` + + // If set, Rundeck will reject values that are not in the set of predefined choices. + RequirePredefinedChoice bool `xml:"enforcedvalues,attr,omitempty"` + + // Regular expression to be used to validate the option value. + ValidationRegex string `xml:"regex,attr,omitempty"` + + // Description of the value to be shown in the Rundeck UI. + Description string `xml:"description,omitempty"` + + // If set, Rundeck requires a value to be set for this option. + IsRequired bool `xml:"required,attr,omitempty"` + + // When either ValueChoices or ValueChoicesURL is set, controls whether more than one + // choice may be selected as the value. + AllowsMultipleValues bool `xml:"multivalued,attr,omitempty"` + + // If AllowsMultipleChoices is set, the string that will be used to delimit the multiple + // chosen options. + MultiValueDelimiter string `xml:"delimeter,attr,omitempty"` + + // If set, the input for this field will be obscured in the UI. Useful for passwords + // and other secrets. + ObscureInput bool `xml:"secure,attr,omitempty"` + + // If set, the value can be accessed from scripts. + ValueIsExposedToScripts bool `xml:"valueExposed,attr,omitempty"` +} + +// JobValueChoices is a specialization of []string representing a sequence of predefined values +// for a job option. +type JobValueChoices []string + +// JobCommandSequence describes the sequence of operations that a job will perform. +type JobCommandSequence struct { + XMLName xml.Name `xml:"sequence"` + + // If set, Rundeck will continue with subsequent commands after a command fails. + ContinueOnError bool `xml:"keepgoing,attr"` + + // Chooses the strategy by which Rundeck will execute commands. Can either be "node-first" or + // "step-first". + OrderingStrategy string `xml:"strategy,attr,omitempty"` + + // Sequence of commands to run in the sequence. + Commands []JobCommand `xml:"command"` +} + +// JobCommand describes a particular command to run within the sequence of commands on a job. +// The members of this struct are mutually-exclusive except for the pair of ScriptFile and +// ScriptFileArgs. +type JobCommand struct { + XMLName xml.Name + + // A literal shell command to run. + ShellCommand string `xml:"exec,omitempty"` + + // An inline program to run. This will be written to disk and executed, so if it is + // a shell script it should have an appropriate #! line. + Script string `xml:"script,omitempty"` + + // A pre-existing file (on the target nodes) that will be executed. + ScriptFile string `xml:"scriptfile,omitempty"` + + // When ScriptFile is set, the arguments to provide to the script when executing it. + ScriptFileArgs string `xml:"scriptargs,omitempty"` + + // A reference to another job to run as this command. + Job *JobCommandJobRef `xml:"jobref"` + + // Configuration for a step plugin to run as this command. + StepPlugin *JobPlugin `xml:"step-plugin"` + + // Configuration for a node step plugin to run as this command. + NodeStepPlugin *JobPlugin `xml:"node-step-plugin"` +} + +// JobCommandJobRef is a reference to another job that will run as one of the commands of a job. +type JobCommandJobRef struct { + XMLName xml.Name `xml:"jobref"` + Name string `xml:"name,attr"` + GroupName string `xml:"group,attr"` + RunForEachNode bool `xml:"nodeStep,attr"` + Arguments JobCommandJobRefArguments `xml:"arg"` +} + +// JobCommandJobRefArguments is a string representing the arguments in a JobCommandJobRef. +type JobCommandJobRefArguments string + +// JobPlugin is a configuration for a plugin to run within a job. +type JobPlugin struct { + XMLName xml.Name + Type string `xml:"type,attr"` + Config JobPluginConfig `xml:"configuration"` +} + +// JobPluginConfig is a specialization of map[string]string for job plugin configuration. +type JobPluginConfig map[string]string + +// JobNodeFilter describes which nodes from the project's resource list will run the configured +// commands. +type JobNodeFilter struct { + ExcludePrecedence bool `xml:"excludeprecedence"` + Query string `xml:"filter,omitempty"` +} + +type jobImportResults struct { + Succeeded jobImportResultsCategory `xml:"succeeded"` + Failed jobImportResultsCategory `xml:"failed"` + Skipped jobImportResultsCategory `xml:"skipped"` +} + +type jobImportResultsCategory struct { + Count int `xml:"count,attr"` + Results []jobImportResult `xml:"job"` +} + +type jobImportResult struct { + ID string `xml:"id,omitempty"` + Name string `xml:"name"` + GroupName string `xml:"group,omitempty"` + ProjectName string `xml:"context>project,omitempty"` + Error string `xml:"error"` +} + +type JobDispatch struct { + MaxThreadCount int `xml:"threadcount,omitempty"` + ContinueOnError bool `xml:"keepgoing"` + RankAttribute string `xml:"rankAttribute,omitempty"` + RankOrder string `xml:"rankOrder,omitempty"` +} + +// GetJobSummariesForProject returns summaries of the jobs belonging to the named project. +func (c *Client) GetJobSummariesForProject(projectName string) ([]JobSummary, error) { + jobList := &jobSummaryList{} + err := c.get([]string{"project", projectName, "jobs"}, nil, jobList) + return jobList.Jobs, err +} + +// GetJobsForProject returns the full job details of the jobs belonging to the named project. +func (c *Client) GetJobsForProject(projectName string) ([]JobDetail, error) { + jobList := &jobDetailList{} + err := c.get([]string{"jobs", "export"}, map[string]string{"project": projectName}, jobList) + if err != nil { + return nil, err + } + return jobList.Jobs, nil +} + +// GetJob returns the full job details of the job with the given id. +func (c *Client) GetJob(id string) (*JobDetail, error) { + jobList := &jobDetailList{} + err := c.get([]string{"job", id}, nil, jobList) + if err != nil { + return nil, err + } + return &jobList.Jobs[0], nil +} + +// CreateJob creates a new job based on the provided structure. +func (c *Client) CreateJob(job *JobDetail) (*JobSummary, error) { + return c.importJob(job, "create") +} + +// CreateOrUpdateJob takes a job detail structure which has its ID set and either updates +// an existing job with the same id or creates a new job with that id. +func (c *Client) CreateOrUpdateJob(job *JobDetail) (*JobSummary, error) { + return c.importJob(job, "update") +} + +func (c *Client) importJob(job *JobDetail, dupeOption string) (*JobSummary, error) { + jobList := &jobDetailList{ + Jobs: []JobDetail{*job}, + } + args := map[string]string{ + "format": "xml", + "dupeOption": dupeOption, + "uuidOption": "preserve", + } + result := &jobImportResults{} + err := c.postXMLBatch([]string{"jobs", "import"}, args, jobList, result) + if err != nil { + return nil, err + } + + if result.Failed.Count > 0 { + errMsg := result.Failed.Results[0].Error + return nil, fmt.Errorf(errMsg) + } + + if result.Succeeded.Count != 1 { + // Should never happen, since we send nothing in the request + // that should cause a job to be skipped. + return nil, fmt.Errorf("job was skipped") + } + + return result.Succeeded.Results[0].JobSummary(), nil +} + +// DeleteJob deletes the job with the given id. +func (c *Client) DeleteJob(id string) error { + return c.delete([]string{"job", id}) +} + +func (c JobValueChoices) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { + if len(c) > 0 { + return xml.Attr{name, strings.Join(c, ",")}, nil + } else { + return xml.Attr{}, nil + } +} + +func (c *JobValueChoices) UnmarshalXMLAttr(attr xml.Attr) error { + values := strings.Split(attr.Value, ",") + *c = values + return nil +} + +func (a JobCommandJobRefArguments) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Attr = []xml.Attr{ + xml.Attr{xml.Name{Local: "line"}, string(a)}, + } + e.EncodeToken(start) + e.EncodeToken(xml.EndElement{start.Name}) + return nil +} + +func (a *JobCommandJobRefArguments) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type jobRefArgs struct { + Line string `xml:"line,attr"` + } + args := jobRefArgs{} + d.DecodeElement(&args, &start) + + *a = JobCommandJobRefArguments(args.Line) + + return nil +} + +func (c JobPluginConfig) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + rc := map[string]string(c) + return marshalMapToXML(&rc, e, start, "entry", "key", "value") +} + +func (c *JobPluginConfig) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + rc := (*map[string]string)(c) + return unmarshalMapFromXML(rc, d, start, "entry", "key", "value") +} + +// JobSummary produces a JobSummary instance with values populated from the import result. +// The summary object won't have its Description populated, since import results do not +// include descriptions. +func (r *jobImportResult) JobSummary() *JobSummary { + return &JobSummary{ + ID: r.ID, + Name: r.Name, + GroupName: r.GroupName, + ProjectName: r.ProjectName, + } +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go new file mode 100644 index 000000000..3229a689d --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go @@ -0,0 +1,113 @@ +package rundeck + +import ( + "fmt" + "testing" +) + +func TestUnmarshalJobDetail(t *testing.T) { + testUnmarshalXML(t, []unmarshalTest{ + unmarshalTest{ + "with-config", + `bazascending`, + &JobDetail{}, + func (rv interface {}) error { + v := rv.(*JobDetail) + if v.ID != "baz" { + return fmt.Errorf("got ID %s, but expecting baz", v.ID) + } + if v.Dispatch.RankOrder != "ascending" { + return fmt.Errorf("Dispatch.RankOrder = \"%v\", but expecting \"ascending\"", v.Dispatch.RankOrder) + } + return nil + }, + }, + unmarshalTest{ + "with-empty-config", + ``, + &JobPlugin{}, + func (rv interface {}) error { + v := rv.(*JobPlugin) + if v.Type != "foo-plugin" { + return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type) + } + if len(v.Config) != 0 { + return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config)) + } + return nil + }, + }, + }) +} + +func TestMarshalJobPlugin(t *testing.T) { + testMarshalXML(t, []marshalTest{ + marshalTest{ + "with-config", + JobPlugin{ + Type: "foo-plugin", + Config: map[string]string{ + "woo": "foo", + "bar": "baz", + }, + }, + ``, + }, + marshalTest{ + "with-empty-config", + JobPlugin{ + Type: "foo-plugin", + Config: map[string]string{}, + }, + ``, + }, + marshalTest{ + "with-zero-value-config", + JobPlugin{ + Type: "foo-plugin", + }, + ``, + }, + }) +} + +func TestUnmarshalJobPlugin(t *testing.T) { + testUnmarshalXML(t, []unmarshalTest{ + unmarshalTest{ + "with-config", + ``, + &JobPlugin{}, + func (rv interface {}) error { + v := rv.(*JobPlugin) + if v.Type != "foo-plugin" { + return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type) + } + if len(v.Config) != 2 { + return fmt.Errorf("got %v Config values, but expecting 2", len(v.Config)) + } + if v.Config["woo"] != "foo" { + return fmt.Errorf("Config[\"woo\"] = \"%s\", but expecting \"foo\"", v.Config["woo"]) + } + if v.Config["bar"] != "baz" { + return fmt.Errorf("Config[\"bar\"] = \"%s\", but expecting \"baz\"", v.Config["bar"]) + } + return nil + }, + }, + unmarshalTest{ + "with-empty-config", + ``, + &JobPlugin{}, + func (rv interface {}) error { + v := rv.(*JobPlugin) + if v.Type != "foo-plugin" { + return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type) + } + if len(v.Config) != 0 { + return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config)) + } + return nil + }, + }, + }) +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/key.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/key.go new file mode 100644 index 000000000..c14725671 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/key.go @@ -0,0 +1,81 @@ +package rundeck + +// KeyMeta is the metadata associated with a resource in the Rundeck key store. +type KeyMeta struct { + XMLName string `xml:"resource"` + Name string `xml:"name,attr,omitempty"` + Path string `xml:"path,attr,omitempty"` + ResourceType string `xml:"type,attr,omitempty"` + URL string `xml:"url,attr,omitempty"` + ContentType string `xml:"resource-meta>Rundeck-content-type"` + ContentSize string `xml:"resource-meta>Rundeck-content-size"` + ContentMask string `xml:"resource-meta>Rundeck-content-mask"` + KeyType string `xml:"resource-meta>Rundeck-key-type"` + LastModifiedByUserName string `xml:"resource-meta>Rundeck-auth-modified-username"` + CreatedByUserName string `xml:"resource-meta>Rundeck-auth-created-username"` + CreatedTimestamp string `xml:"resource-meta>Rundeck-content-creation-time"` + LastModifiedTimestamp string `xml:"resource-meta>Rundeck-content-modify-time"` +} + +type keyMetaListContents struct { + Keys []KeyMeta `xml:"contents>resource"` +} + +// GetKeyMeta returns the metadata for the key at the given keystore path. +func (c *Client) GetKeyMeta(path string) (*KeyMeta, error) { + k := &KeyMeta{} + err := c.get([]string{"storage", "keys", path}, nil, k) + return k, err +} + +// GetKeysInDirMeta returns the metadata for the keys and subdirectories within +// the directory at the given keystore path. +func (c *Client) GetKeysInDirMeta(path string) ([]KeyMeta, error) { + r := &keyMetaListContents{} + err := c.get([]string{"storage", "keys", path}, nil, r) + if err != nil { + return nil, err + } + return r.Keys, nil +} + +// GetKeyContent retrieves and returns the content of the key at the given keystore path. +// Private keys are write-only, so they cannot be retrieved via this interface. +func (c *Client) GetKeyContent(path string) (string, error) { + return c.rawGet([]string{"storage", "keys", path}, nil, "application/pgp-keys") +} + +func (c *Client) CreatePublicKey(path string, content string) error { + return c.createOrReplacePublicKey("POST", path, "application/pgp-keys", content) +} + +func (c *Client) ReplacePublicKey(path string, content string) error { + return c.createOrReplacePublicKey("PUT", path, "application/pgp-keys", content) +} + +func (c *Client) CreatePrivateKey(path string, content string) error { + return c.createOrReplacePublicKey("POST", path, "application/octet-stream", content) +} + +func (c *Client) ReplacePrivateKey(path string, content string) error { + return c.createOrReplacePublicKey("PUT", path, "application/octet-stream", content) +} + +func (c *Client) createOrReplacePublicKey(method string, path string, contentType string, content string) error { + req := &request{ + Method: method, + PathParts: []string{"storage", "keys", path}, + Headers: map[string]string{ + "Content-Type": contentType, + }, + BodyBytes: []byte(content), + } + + _, err := c.rawRequest(req) + + return err +} + +func (c *Client) DeleteKey(path string) error { + return c.delete([]string{"storage", "keys", path}) +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/project.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/project.go new file mode 100644 index 000000000..88c4843e1 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/project.go @@ -0,0 +1,87 @@ +package rundeck + +import ( + "encoding/xml" +) + +// ProjectSummary provides the basic identifying information for a project within Rundeck. +type ProjectSummary struct { + Name string `xml:"name"` + Description string `xml:"description,omitempty"` + URL string `xml:"url,attr"` +} + +// Project represents a project within Rundeck. +type Project struct { + Name string `xml:"name"` + Description string `xml:"description,omitempty"` + + // Config is the project configuration. + // + // When making requests, Config and RawConfigItems are combined to produce + // a single set of configuration settings. Thus it isn't necessary and + // doesn't make sense to duplicate the same properties in both properties. + Config ProjectConfig `xml:"config"` + + // URL is used only to represent server responses. It is ignored when + // making requests. + URL string `xml:"url,attr"` + + // XMLName is used only in XML unmarshalling and doesn't need to + // be set when creating a Project to send to the server. + XMLName xml.Name `xml:"project"` +} + +// ProjectConfig is a specialized map[string]string representing Rundeck project configuration +type ProjectConfig map[string]string + +type projects struct { + XMLName xml.Name `xml:"projects"` + Count int64 `xml:"count,attr"` + Projects []ProjectSummary `xml:"project"` +} + +// GetAllProjects retrieves and returns all of the projects defined in the Rundeck server. +func (c *Client) GetAllProjects() ([]ProjectSummary, error) { + p := &projects{} + err := c.get([]string{"projects"}, nil, p) + return p.Projects, err +} + +// GetProject retrieves and returns the named project. +func (c *Client) GetProject(name string) (*Project, error) { + p := &Project{} + err := c.get([]string{"project", name}, nil, p) + return p, err +} + +// CreateProject creates a new, empty project. +func (c *Client) CreateProject(project *Project) (*Project, error) { + p := &Project{} + err := c.post([]string{"projects"}, nil, project, p) + return p, err +} + +// DeleteProject deletes a project and all of its jobs. +func (c *Client) DeleteProject(name string) error { + return c.delete([]string{"project", name}) +} + +// SetProjectConfig replaces the configuration of the named project. +func (c *Client) SetProjectConfig(projectName string, config ProjectConfig) error { + return c.put( + []string{"project", projectName, "config"}, + config, + nil, + ) +} + +func (c ProjectConfig) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + rc := map[string]string(c) + return marshalMapToXML(&rc, e, start, "property", "key", "value") +} + +func (c *ProjectConfig) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + rc := (*map[string]string)(c) + return unmarshalMapFromXML(rc, d, start, "property", "key", "value") +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/system_info.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/system_info.go new file mode 100644 index 000000000..78753e93b --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/system_info.go @@ -0,0 +1,116 @@ +package rundeck + +import ( + "encoding/xml" + "time" +) + +// SystemInfo represents a set of miscellaneous system information properties about the +// Rundeck server. +type SystemInfo struct { + XMLName xml.Name `xml:"system"` + ServerTime SystemTimestamp `xml:"timestamp"` + Rundeck About `xml:"rundeck"` + OS SystemOS `xml:"os"` + JVM SystemJVM `xml:"jvm"` + Stats SystemStats `xml:"stats"` +} + +// About describes the Rundeck server itself. +type About struct { + XMLName xml.Name `xml:"rundeck"` + Version string `xml:"version"` + APIVersion int64 `xml:"apiversion"` + Build string `xml:"build"` + Node string `xml:"node"` + BaseDir string `xml:"base"` + ServerUUID string `xml:"serverUUID,omitempty"` +} + +// SystemTimestamp gives a timestamp from the Rundeck server. +type SystemTimestamp struct { + Epoch string `xml:"epoch,attr"` + EpochUnit string `xml:"unit,attr"` + DateTimeStr string `xml:"datetime"` +} + +// SystemOS describes the operating system of the Rundeck server. +type SystemOS struct { + Architecture string `xml:"arch"` + Name string `xml:"name"` + Version string `xml:"version"` +} + +// SystemJVM describes the Java Virtual Machine that the Rundeck server is running in. +type SystemJVM struct { + Name string `xml:"name"` + Vendor string `xml:"vendor"` + Version string `xml:"version"` + ImplementationVersion string `xml:"implementationVersion"` +} + +// SystemStats provides some basic system statistics about the server that Rundeck is running on. +type SystemStats struct { + XMLName xml.Name `xml:"stats"` + Uptime SystemUptime `xml:"uptime"` + CPU SystemCPUStats `xml:"cpu"` + Memory SystemMemoryUsage `xml:"memory"` + Scheduler SystemSchedulerStats `xml:"scheduler"` + Threads SystemThreadStats `xml:"threads"` +} + +// SystemUptime describes how long Rundeck's host machine has been running. +type SystemUptime struct { + XMLName xml.Name `xml:"uptime"` + Duration string `xml:"duration,attr"` + DurationUnit string `xml:"unit,attr"` + BootTimestamp SystemTimestamp `xml:"since"` +} + +// SystemCPUStats describes the available processors and the system load average of the machine on +// which the Rundeck server is running. +type SystemCPUStats struct { + XMLName xml.Name `xml:"cpu"` + LoadAverage struct { + Unit string `xml:"unit,attr"` + Value float64 `xml:",chardata"` + } `xml:"loadAverage"` + ProcessorCount int64 `xml:"processors"` +} + +// SystemMemoryUsage describes how much memory is available and used on the machine on which +// the Rundeck server is running. +type SystemMemoryUsage struct { + XMLName xml.Name `xml:"memory"` + Unit string `xml:"unit,attr"` + Max int64 `xml:"max"` + Free int64 `xml:"free"` + Total int64 `xml:"total"` +} + +// SystemSchedulerStats provides statistics about the Rundeck scheduler. +type SystemSchedulerStats struct { + RunningJobCount int64 `xml:"running"` +} + +// SystemThreadStats provides statistics about the thread usage of the Rundeck server. +type SystemThreadStats struct { + ActiveThreadCount int64 `xml:"active"` +} + +// GetSystemInfo retrieves and returns miscellaneous system information about the Rundeck server +// and the machine it's running on. +func (c *Client) GetSystemInfo() (*SystemInfo, error) { + sysInfo := &SystemInfo{} + err := c.get([]string{"system", "info"}, nil, sysInfo) + return sysInfo, err +} + +// DateTime produces a time.Time object from a SystemTimestamp object. +func (ts *SystemTimestamp) DateTime() time.Time { + // Assume the server will always give us a valid timestamp, + // so we don't need to handle the error case. + // (Famous last words?) + t, _ := time.Parse(time.RFC3339, ts.DateTimeStr) + return t +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/testing.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/testing.go new file mode 100644 index 000000000..7ccc68eb8 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/testing.go @@ -0,0 +1,46 @@ +package rundeck + +import ( + "encoding/xml" + "testing" +) + +type marshalTest struct { + Name string + Input interface{} + ExpectedXML string +} + +type unmarshalTestFunc func (result interface {}) error + +type unmarshalTest struct { + Name string + Input string + Output interface {} + TestFunc unmarshalTestFunc +} + +func testMarshalXML(t *testing.T, tests []marshalTest) { + for _, test := range tests { + xmlBytes, err := xml.Marshal(test.Input) + if err != nil { + t.Errorf("Error in Marshall for test %s: %s", test.Name, err.Error()) + continue + } + xmlStr := string(xmlBytes) + if xmlStr != test.ExpectedXML { + t.Errorf("Test %s got %s, but wanted %s", test.Name, xmlStr, test.ExpectedXML) + continue + } + } +} + +func testUnmarshalXML(t *testing.T, tests []unmarshalTest) { + for _, test := range tests { + xml.Unmarshal([]byte(test.Input), test.Output) + err := test.TestFunc(test.Output) + if err != nil { + t.Errorf("Test %s %s", test.Name, err.Error()) + } + } +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/util.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/util.go new file mode 100644 index 000000000..0bf133579 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/util.go @@ -0,0 +1,81 @@ +package rundeck + +import ( + "encoding/xml" + "fmt" + "sort" +) + +func marshalMapToXML(c *map[string]string, e *xml.Encoder, start xml.StartElement, entryName string, keyName string, valueName string) error { + if len(*c) == 0 { + return nil + } + e.EncodeToken(start) + + // Sort the keys so we'll have a deterministic result. + keys := []string{} + for k, _ := range *c { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := (*c)[k] + e.EncodeToken(xml.StartElement{ + Name: xml.Name{Local: entryName}, + Attr: []xml.Attr{ + xml.Attr{ + Name: xml.Name{Local: keyName}, + Value: k, + }, + xml.Attr{ + Name: xml.Name{Local: valueName}, + Value: v, + }, + }, + }) + e.EncodeToken(xml.EndElement{xml.Name{Local: entryName}}) + } + e.EncodeToken(xml.EndElement{start.Name}) + return nil +} + +func unmarshalMapFromXML(c *map[string]string, d *xml.Decoder, start xml.StartElement, entryName string, keyName string, valueName string) error { + result := map[string]string{} + for { + token, err := d.Token() + if token == nil { + err = fmt.Errorf("EOF while decoding job command plugin config") + } + if err != nil { + return err + } + + switch t := token.(type) { + default: + continue + case xml.StartElement: + if t.Name.Local != entryName { + return fmt.Errorf("unexpected element %s while looking for config entries", t.Name.Local) + } + var k string + var v string + for _, attr := range t.Attr { + if attr.Name.Local == keyName { + k = attr.Value + } else if attr.Name.Local == valueName { + v = attr.Value + } + } + if k == "" { + return fmt.Errorf("found config entry with empty key") + } + result[k] = v + case xml.EndElement: + if t.Name.Local == start.Name.Local { + *c = result + return nil + } + } + } +} diff --git a/vendor/github.com/armon/circbuf/.gitignore b/vendor/github.com/armon/circbuf/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/armon/circbuf/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/circbuf/LICENSE b/vendor/github.com/armon/circbuf/LICENSE new file mode 100644 index 000000000..106569e54 --- /dev/null +++ b/vendor/github.com/armon/circbuf/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/circbuf/README.md b/vendor/github.com/armon/circbuf/README.md new file mode 100644 index 000000000..f2e356b8d --- /dev/null +++ b/vendor/github.com/armon/circbuf/README.md @@ -0,0 +1,28 @@ +circbuf +======= + +This repository provides the `circbuf` package. This provides a `Buffer` object +which is a circular (or ring) buffer. It has a fixed size, but can be written +to infinitely. Only the last `size` bytes are ever retained. The buffer implements +the `io.Writer` interface. + +Documentation +============= + +Full documentation can be found on [Godoc](http://godoc.org/github.com/armon/circbuf) + +Usage +===== + +The `circbuf` package is very easy to use: + +```go +buf, _ := NewBuffer(6) +buf.Write([]byte("hello world")) + +if string(buf.Bytes()) != " world" { + panic("should only have last 6 bytes!") +} + +``` + diff --git a/vendor/github.com/armon/circbuf/circbuf.go b/vendor/github.com/armon/circbuf/circbuf.go new file mode 100644 index 000000000..de3cb94a3 --- /dev/null +++ b/vendor/github.com/armon/circbuf/circbuf.go @@ -0,0 +1,92 @@ +package circbuf + +import ( + "fmt" +) + +// Buffer implements a circular buffer. It is a fixed size, +// and new writes overwrite older data, such that for a buffer +// of size N, for any amount of writes, only the last N bytes +// are retained. +type Buffer struct { + data []byte + size int64 + writeCursor int64 + written int64 +} + +// NewBuffer creates a new buffer of a given size. The size +// must be greater than 0. +func NewBuffer(size int64) (*Buffer, error) { + if size <= 0 { + return nil, fmt.Errorf("Size must be positive") + } + + b := &Buffer{ + size: size, + data: make([]byte, size), + } + return b, nil +} + +// Write writes up to len(buf) bytes to the internal ring, +// overriding older data if necessary. +func (b *Buffer) Write(buf []byte) (int, error) { + // Account for total bytes written + n := len(buf) + b.written += int64(n) + + // If the buffer is larger than ours, then we only care + // about the last size bytes anyways + if int64(n) > b.size { + buf = buf[int64(n)-b.size:] + } + + // Copy in place + remain := b.size - b.writeCursor + copy(b.data[b.writeCursor:], buf) + if int64(len(buf)) > remain { + copy(b.data, buf[remain:]) + } + + // Update location of the cursor + b.writeCursor = ((b.writeCursor + int64(len(buf))) % b.size) + return n, nil +} + +// Size returns the size of the buffer +func (b *Buffer) Size() int64 { + return b.size +} + +// TotalWritten provides the total number of bytes written +func (b *Buffer) TotalWritten() int64 { + return b.written +} + +// Bytes provides a slice of the bytes written. This +// slice should not be written to. +func (b *Buffer) Bytes() []byte { + switch { + case b.written >= b.size && b.writeCursor == 0: + return b.data + case b.written > b.size: + out := make([]byte, b.size) + copy(out, b.data[b.writeCursor:]) + copy(out[b.size-b.writeCursor:], b.data[:b.writeCursor]) + return out + default: + return b.data[:b.writeCursor] + } +} + +// Reset resets the buffer so it has no content. +func (b *Buffer) Reset() { + b.writeCursor = 0 + b.written = 0 +} + +// String returns the contents of the buffer as a string +func (b *Buffer) String() string { + return string(b.Bytes()) +} diff --git a/vendor/github.com/armon/circbuf/circbuf_test.go b/vendor/github.com/armon/circbuf/circbuf_test.go new file mode 100644 index 000000000..a3ed64269 --- /dev/null +++ b/vendor/github.com/armon/circbuf/circbuf_test.go @@ -0,0 +1,198 @@ +package circbuf + +import ( + "bytes" + "io" + "testing" +) + +func TestBuffer_Impl(t *testing.T) { + var _ io.Writer = &Buffer{} +} + +func TestBuffer_ShortWrite(t *testing.T) { + buf, err := NewBuffer(1024) + if err != nil { + t.Fatalf("err: %v", err) + } + + inp := []byte("hello world") + + n, err := buf.Write(inp) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(inp) { + t.Fatalf("bad: %v", n) + } + + if !bytes.Equal(buf.Bytes(), inp) { + t.Fatalf("bad: %v", buf.Bytes()) + } +} + +func TestBuffer_FullWrite(t *testing.T) { + inp := []byte("hello world") + + buf, err := NewBuffer(int64(len(inp))) + if err != nil { + t.Fatalf("err: %v", err) + } + + n, err := buf.Write(inp) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(inp) { + t.Fatalf("bad: %v", n) + } + + if !bytes.Equal(buf.Bytes(), inp) { + t.Fatalf("bad: %v", buf.Bytes()) + } +} + +func TestBuffer_LongWrite(t *testing.T) { + inp := []byte("hello world") + + buf, err := NewBuffer(6) + if err != nil { + t.Fatalf("err: %v", err) + } + + n, err := buf.Write(inp) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(inp) { + t.Fatalf("bad: %v", n) + } + + expect := []byte(" world") + if !bytes.Equal(buf.Bytes(), expect) { + t.Fatalf("bad: %s", buf.Bytes()) + } +} + +func TestBuffer_HugeWrite(t *testing.T) { + inp := []byte("hello world") + + buf, err := NewBuffer(3) + if err != nil { + t.Fatalf("err: %v", err) + } + + n, err := buf.Write(inp) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(inp) { + t.Fatalf("bad: %v", n) + } + + expect := []byte("rld") + if !bytes.Equal(buf.Bytes(), expect) { + t.Fatalf("bad: %s", buf.Bytes()) + } +} + +func TestBuffer_ManySmall(t *testing.T) { + inp := []byte("hello world") + + buf, err := NewBuffer(3) + if err != nil { + t.Fatalf("err: %v", err) + } + + for _, b := range inp { + n, err := buf.Write([]byte{b}) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 1 { + t.Fatalf("bad: %v", n) + } + } + + expect := []byte("rld") + if !bytes.Equal(buf.Bytes(), expect) { + t.Fatalf("bad: %v", buf.Bytes()) + } +} + +func TestBuffer_MultiPart(t *testing.T) { + inputs := [][]byte{ + []byte("hello world\n"), + []byte("this is a test\n"), + []byte("my cool input\n"), + } + total := 0 + + buf, err := NewBuffer(16) + if err != nil { + t.Fatalf("err: %v", err) + } + + for _, b := range inputs { + total += len(b) + n, err := buf.Write(b) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(b) { + t.Fatalf("bad: %v", n) + } + } + + if int64(total) != buf.TotalWritten() { + t.Fatalf("bad total") + } + + expect := []byte("t\nmy cool input\n") + if !bytes.Equal(buf.Bytes(), expect) { + t.Fatalf("bad: %v", buf.Bytes()) + } +} + +func TestBuffer_Reset(t *testing.T) { + // Write a bunch of data + inputs := [][]byte{ + []byte("hello world\n"), + []byte("this is a test\n"), + []byte("my cool input\n"), + } + + buf, err := NewBuffer(4) + if err != nil { + t.Fatalf("err: %v", err) + } + + for _, b := range inputs { + n, err := buf.Write(b) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(b) { + t.Fatalf("bad: %v", n) + } + } + + // Reset it + buf.Reset() + + // Write more data + input := []byte("hello") + n, err := buf.Write(input) + if err != nil { + t.Fatalf("err: %s", err) + } + if n != len(input) { + t.Fatalf("bad: %v", n) + } + + // Test the output + expect := []byte("ello") + if !bytes.Equal(buf.Bytes(), expect) { + t.Fatalf("bad: %v", string(buf.Bytes())) + } +} diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/armon/go-radix/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml new file mode 100644 index 000000000..1a0bbea6c --- /dev/null +++ b/vendor/github.com/armon/go-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 000000000..a5df10e67 --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md new file mode 100644 index 000000000..26f42a283 --- /dev/null +++ b/vendor/github.com/armon/go-radix/README.md @@ -0,0 +1,38 @@ +go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) +========= + +Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := radix.New() +r.Insert("foo", 1) +r.Insert("bar", 2) +r.Insert("foobar", 2) + +// Find the longest prefix match +m, _, _ := r.LongestPrefix("foozip") +if m != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 000000000..d2914c13b --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,496 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.replaceEdge(edge{ + label: search[0], + node: child, + }) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/armon/go-radix/radix_test.go b/vendor/github.com/armon/go-radix/radix_test.go new file mode 100644 index 000000000..300f0d475 --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix_test.go @@ -0,0 +1,319 @@ +package radix + +import ( + crand "crypto/rand" + "fmt" + "reflect" + "sort" + "testing" +) + +func TestRadix(t *testing.T) { + var min, max string + inp := make(map[string]interface{}) + for i := 0; i < 1000; i++ { + gen := generateUUID() + inp[gen] = i + if gen < min || i == 0 { + min = gen + } + if gen > max || i == 0 { + max = gen + } + } + + r := NewFromMap(inp) + if r.Len() != len(inp) { + t.Fatalf("bad length: %v %v", r.Len(), len(inp)) + } + + r.Walk(func(k string, v interface{}) bool { + println(k) + return false + }) + + for k, v := range inp { + out, ok := r.Get(k) + if !ok { + t.Fatalf("missing key: %v", k) + } + if out != v { + t.Fatalf("value mis-match: %v %v", out, v) + } + } + + // Check min and max + outMin, _, _ := r.Minimum() + if outMin != min { + t.Fatalf("bad minimum: %v %v", outMin, min) + } + outMax, _, _ := r.Maximum() + if outMax != max { + t.Fatalf("bad maximum: %v %v", outMax, max) + } + + for k, v := range inp { + out, ok := r.Delete(k) + if !ok { + t.Fatalf("missing key: %v", k) + } + if out != v { + t.Fatalf("value mis-match: %v %v", out, v) + } + } + if r.Len() != 0 { + t.Fatalf("bad length: %v", r.Len()) + } +} + +func TestRoot(t *testing.T) { + r := New() + _, ok := r.Delete("") + if ok { + t.Fatalf("bad") + } + _, ok = r.Insert("", true) + if ok { + t.Fatalf("bad") + } + val, ok := r.Get("") + if !ok || val != true { + t.Fatalf("bad: %v", val) + } + val, ok = r.Delete("") + if !ok || val != true { + t.Fatalf("bad: %v", val) + } +} + +func TestDelete(t *testing.T) { + + r := New() + + s := []string{"", "A", "AB"} + + for _, ss := range s { + r.Insert(ss, true) + } + + for _, ss := range s { + _, ok := r.Delete(ss) + if !ok { + t.Fatalf("bad %q", ss) + } + } +} + +func TestLongestPrefix(t *testing.T) { + r := New() + + keys := []string{ + "", + "foo", + "foobar", + "foobarbaz", + "foobarbazzip", + "foozip", + } + for _, k := range keys { + r.Insert(k, nil) + } + if r.Len() != len(keys) { + t.Fatalf("bad len: %v %v", r.Len(), len(keys)) + } + + type exp struct { + inp string + out string + } + cases := []exp{ + {"a", ""}, + {"abc", ""}, + {"fo", ""}, + {"foo", "foo"}, + {"foob", "foo"}, + {"foobar", "foobar"}, + {"foobarba", "foobar"}, + {"foobarbaz", "foobarbaz"}, + {"foobarbazzi", "foobarbaz"}, + {"foobarbazzip", "foobarbazzip"}, + {"foozi", "foo"}, + {"foozip", "foozip"}, + {"foozipzap", "foozip"}, + } + for _, test := range cases { + m, _, ok := r.LongestPrefix(test.inp) + if !ok { + t.Fatalf("no match: %v", test) + } + if m != test.out { + t.Fatalf("mis-match: %v %v", m, test) + } + } +} + +func TestWalkPrefix(t *testing.T) { + r := New() + + keys := []string{ + "foobar", + "foo/bar/baz", + "foo/baz/bar", + "foo/zip/zap", + "zipzap", + } + for _, k := range keys { + r.Insert(k, nil) + } + if r.Len() != len(keys) { + t.Fatalf("bad len: %v %v", r.Len(), len(keys)) + } + + type exp struct { + inp string + out []string + } + cases := []exp{ + { + "f", + []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, + }, + { + "foo", + []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, + }, + { + "foob", + []string{"foobar"}, + }, + { + "foo/", + []string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, + }, + { + "foo/b", + []string{"foo/bar/baz", "foo/baz/bar"}, + }, + { + "foo/ba", + []string{"foo/bar/baz", "foo/baz/bar"}, + }, + { + "foo/bar", + []string{"foo/bar/baz"}, + }, + { + "foo/bar/baz", + []string{"foo/bar/baz"}, + }, + { + "foo/bar/bazoo", + []string{}, + }, + { + "z", + []string{"zipzap"}, + }, + } + + for _, test := range cases { + out := []string{} + fn := func(s string, v interface{}) bool { + out = append(out, s) + return false + } + r.WalkPrefix(test.inp, fn) + sort.Strings(out) + sort.Strings(test.out) + if !reflect.DeepEqual(out, test.out) { + t.Fatalf("mis-match: %v %v", out, test.out) + } + } +} + +func TestWalkPath(t *testing.T) { + r := New() + + keys := []string{ + "foo", + "foo/bar", + "foo/bar/baz", + "foo/baz/bar", + "foo/zip/zap", + "zipzap", + } + for _, k := range keys { + r.Insert(k, nil) + } + if r.Len() != len(keys) { + t.Fatalf("bad len: %v %v", r.Len(), len(keys)) + } + + type exp struct { + inp string + out []string + } + cases := []exp{ + { + "f", + []string{}, + }, + { + "foo", + []string{"foo"}, + }, + { + "foo/", + []string{"foo"}, + }, + { + "foo/ba", + []string{"foo"}, + }, + { + "foo/bar", + []string{"foo", "foo/bar"}, + }, + { + "foo/bar/baz", + []string{"foo", "foo/bar", "foo/bar/baz"}, + }, + { + "foo/bar/bazoo", + []string{"foo", "foo/bar", "foo/bar/baz"}, + }, + { + "z", + []string{}, + }, + } + + for _, test := range cases { + out := []string{} + fn := func(s string, v interface{}) bool { + out = append(out, s) + return false + } + r.WalkPath(test.inp, fn) + sort.Strings(out) + sort.Strings(test.out) + if !reflect.DeepEqual(out, test.out) { + t.Fatalf("mis-match: %v %v", out, test.out) + } + } +} + +// generateUUID is used to generate a random UUID +func generateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..be8b517b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,127 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", err.Code(), err.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", err.Error()) +// +// // Get original error +// if origErr := err.Err(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with code, message, +// and original errors. Calling Error() will only return the error that is at the end +// of the list. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + if e, ok := origErr.(Error); ok && e != nil { + return e + } + return newBaseError(code, message, origErr) +} + +// NewBatchError returns an baseError with an expectation of an array of errors +func NewBatchError(code, message string, errs []error) BatchError { + return newBaseErrors(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Printf("Error:", err.Error() +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..605f73c5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,197 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and err. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErr is the error object which will be nested under the new error to be returned. +func newBaseError(code, message string, origErr error) *baseError { + b := &baseError{ + code: code, + message: message, + } + + if origErr != nil { + b.errs = append(b.errs, origErr) + } + + return b +} + +// newBaseErrors returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErrs is the error objects which will be nested under the new errors to be returned. +func newBaseErrors(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no error +// was set. This only returns the first element in the list. If the full list is +// needed, use BatchError +func (b baseError) OrigErr() error { + if size := len(b.errs); size > 0 { + return b.errs[0] + } + + return nil +} + +// OrigErrs returns the original errors if one was set. An empty slice is returned if +// no error was set:w +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for request +// status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..8429470b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go new file mode 100644 index 000000000..84b7e3f34 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go @@ -0,0 +1,233 @@ +package awsutil_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func ExampleCopy() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} + +func TestCopy(t *testing.T) { + type Foo struct { + A int + B []*string + C map[string]*int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &Foo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + } + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + + // But pointers are not! + str3 := "nothello" + int3 := 57 + f2.A = 100 + f2.B[0] = &str3 + f2.C["B"] = &int3 + assert.NotEqual(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.C, f1.C) +} + +func TestCopyNestedWithUnexported(t *testing.T) { + type Bar struct { + a int + B int + } + type Foo struct { + A string + B Bar + } + + f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}} + + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values match + assert.Equal(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.B.a, f1.B.a) + assert.Equal(t, f2.B.B, f2.B.B) +} + +func TestCopyIgnoreNilMembers(t *testing.T) { + type Foo struct { + A *string + B []string + C map[string]string + } + + f := &Foo{} + assert.Nil(t, f.A) + assert.Nil(t, f.B) + assert.Nil(t, f.C) + + var f2 Foo + awsutil.Copy(&f2, f) + assert.Nil(t, f2.A) + assert.Nil(t, f2.B) + assert.Nil(t, f2.C) + + fcopy := awsutil.CopyOf(f) + f3 := fcopy.(*Foo) + assert.Nil(t, f3.A) + assert.Nil(t, f3.B) + assert.Nil(t, f3.C) +} + +func TestCopyPrimitive(t *testing.T) { + str := "hello" + var s string + awsutil.Copy(&s, &str) + assert.Equal(t, "hello", s) +} + +func TestCopyNil(t *testing.T) { + var s string + awsutil.Copy(&s, nil) + assert.Equal(t, "", s) +} + +func TestCopyReader(t *testing.T) { + var buf io.Reader = bytes.NewReader([]byte("hello world")) + var r io.Reader + awsutil.Copy(&r, buf) + b, err := ioutil.ReadAll(r) + assert.NoError(t, err) + assert.Equal(t, []byte("hello world"), b) + + // empty bytes because this is not a deep copy + b, err = ioutil.ReadAll(buf) + assert.NoError(t, err) + assert.Equal(t, []byte(""), b) +} + +func TestCopyDifferentStructs(t *testing.T) { + type SrcFoo struct { + A int + B []*string + C map[string]*int + SrcUnique string + SameNameDiffType int + unexportedPtr *int + ExportedPtr *int + } + type DstFoo struct { + A int + B []*string + C map[string]*int + DstUnique int + SameNameDiffType string + unexportedPtr *int + ExportedPtr *int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &SrcFoo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + SrcUnique: "unique", + SameNameDiffType: 1, + unexportedPtr: &int1, + ExportedPtr: &int2, + } + + // Do the copy + var f2 DstFoo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + assert.Equal(t, "unique", f1.SrcUnique) + assert.Equal(t, 1, f1.SameNameDiffType) + assert.Equal(t, 0, f2.DstUnique) + assert.Equal(t, "", f2.SameNameDiffType) + assert.Equal(t, int1, *f1.unexportedPtr) + assert.Nil(t, f2.unexportedPtr) + assert.Equal(t, int2, *f1.ExportedPtr) + assert.Equal(t, int2, *f2.ExportedPtr) +} + +func ExampleCopyOf() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + v := awsutil.CopyOf(f1) + var f2 *Foo = v.(*Foo) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..59fa4a558 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go new file mode 100644 index 000000000..7a5db6e49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go @@ -0,0 +1,29 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func TestDeepEqual(t *testing.T) { + cases := []struct { + a, b interface{} + equal bool + }{ + {"a", "a", true}, + {"a", "b", false}, + {"a", aws.String(""), false}, + {"a", nil, false}, + {"a", aws.String("a"), true}, + {(*bool)(nil), (*bool)(nil), true}, + {(*bool)(nil), (*string)(nil), false}, + {nil, nil, true}, + } + + for i, c := range cases { + assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..4d2a01e8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go new file mode 100644 index 000000000..b2225566f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go @@ -0,0 +1,142 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +type Struct struct { + A []Struct + z []Struct + B *Struct + D *Struct + C string + E map[string]string +} + +var data = Struct{ + A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}}, + C: "initial", +} +var data2 = Struct{A: []Struct{ + {A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + {A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, +}} + +func TestValueAtPathSuccess(t *testing.T) { + var testCases = []struct { + expect []interface{} + data interface{} + path string + }{ + {[]interface{}{"initial"}, data, "C"}, + {[]interface{}{"value1"}, data, "A[0].C"}, + {[]interface{}{"value2"}, data, "A[1].C"}, + {[]interface{}{"value3"}, data, "A[2].C"}, + {[]interface{}{"value3"}, data, "a[2].c"}, + {[]interface{}{"value3"}, data, "A[-1].C"}, + {[]interface{}{"value1", "value2", "value3"}, data, "A[].C"}, + {[]interface{}{"terminal"}, data, "B . B . C"}, + {[]interface{}{"initial"}, data, "A.D.X || C"}, + {[]interface{}{"initial"}, data, "A[0].B || C"}, + {[]interface{}{ + Struct{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + Struct{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, + }, data2, "A"}, + } + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestValueAtPathFailure(t *testing.T) { + var testCases = []struct { + expect []interface{} + errContains string + data interface{} + path string + }{ + {nil, "", data, "C.x"}, + {nil, "SyntaxError: Invalid token: tDot", data, ".x"}, + {nil, "", data, "X.Y.Z"}, + {nil, "", data, "A[100].C"}, + {nil, "", data, "A[3].C"}, + {nil, "", data, "B.B.C.Z"}, + {nil, "", data, "z[-1].C"}, + {nil, "", nil, "A.B.C"}, + {[]interface{}{}, "", Struct{}, "A"}, + {nil, "", data, "A[0].B.C"}, + {nil, "", data, "D"}, + } + + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + if c.errContains != "" { + assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path) + continue + } else { + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + } + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestSetValueAtPathSuccess(t *testing.T) { + var s Struct + awsutil.SetValueAtPath(&s, "C", "test1") + awsutil.SetValueAtPath(&s, "B.B.C", "test2") + awsutil.SetValueAtPath(&s, "B.D.C", "test3") + assert.Equal(t, "test1", s.C) + assert.Equal(t, "test2", s.B.B.C) + assert.Equal(t, "test3", s.B.D.C) + + awsutil.SetValueAtPath(&s, "B.*.C", "test0") + assert.Equal(t, "test0", s.B.B.C) + assert.Equal(t, "test0", s.B.D.C) + + var s2 Struct + awsutil.SetValueAtPath(&s2, "b.b.c", "test0") + assert.Equal(t, "test0", s2.B.B.C) + awsutil.SetValueAtPath(&s2, "A", []Struct{{}}) + assert.Equal(t, []Struct{{}}, s2.A) + + str := "foo" + + s3 := Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", str) + assert.Equal(t, "foo", s3.B.B.C) + + s3 = Struct{B: &Struct{B: &Struct{C: str}}} + awsutil.SetValueAtPath(&s3, "b.b.c", nil) + assert.Equal(t, "", s3.B.B.C) + + s3 = Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", nil) + assert.Equal(t, "", s3.B.B.C) + + s3 = Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", &str) + assert.Equal(t, "foo", s3.B.B.C) + + var s4 struct{ Name *string } + awsutil.SetValueAtPath(&s4, "Name", str) + assert.Equal(t, str, *s4.Name) + + s4 = struct{ Name *string }{} + awsutil.SetValueAtPath(&s4, "Name", nil) + assert.Equal(t, (*string)(nil), s4.Name) + + s4 = struct{ Name *string }{Name: &str} + awsutil.SetValueAtPath(&s4, "Name", nil) + assert.Equal(t, (*string)(nil), s4.Name) + + s4 = struct{ Name *string }{} + awsutil.SetValueAtPath(&s4, "Name", &str) + assert.Equal(t, str, *s4.Name) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..0de3eaa0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,103 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..b6432f1a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..c8d0564d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..24d39ce56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,45 @@ +package client + +import ( + "math" + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..4778056dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..9e83e9260 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,311 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config_test.go b/vendor/github.com/aws/aws-sdk-go/aws/config_test.go new file mode 100644 index 000000000..fe97a31fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config_test.go @@ -0,0 +1,86 @@ +package aws + +import ( + "net/http" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + +var copyTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("CopyTestEndpoint"), + Region: String("COPY_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(3), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +func TestCopy(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if !reflect.DeepEqual(*got, want) { + t.Errorf("Copy() = %+v", got) + t.Errorf(" want %+v", want) + } + + got.Region = String("other") + if got.Region == want.Region { + t.Errorf("Expect setting copy values not not reflect in source") + } +} + +func TestCopyReturnsNewInstance(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if got == &want { + t.Errorf("Copy() = %p; want different instance as source %p", got, &want) + } +} + +var mergeTestZeroValueConfig = Config{} + +var mergeTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("MergeTestEndpoint"), + Region: String("MERGE_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(10), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +var mergeTests = []struct { + cfg *Config + in *Config + want *Config +}{ + {&Config{}, nil, &Config{}}, + {&Config{}, &mergeTestZeroValueConfig, &Config{}}, + {&Config{}, &mergeTestConfig, &mergeTestConfig}, +} + +func TestMerge(t *testing.T) { + for i, tt := range mergeTests { + got := tt.cfg.Copy() + got.MergeIn(tt.in) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Config %d %+v", i, tt.cfg) + t.Errorf(" Merge(%+v)", tt.in) + t.Errorf(" got %+v", got) + t.Errorf(" want %+v", tt.want) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..d6a7b08df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,357 @@ +package aws + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go new file mode 100644 index 000000000..df7a3e5d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go @@ -0,0 +1,437 @@ +package aws + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var testCasesStringSlice = [][]string{ + {"a", "b", "c", "d", "e"}, + {"a", "b", "", "", "e"}, +} + +func TestStringSlice(t *testing.T) { + for idx, in := range testCasesStringSlice { + if in == nil { + continue + } + out := StringSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesStringValueSlice = [][]*string{ + {String("a"), String("b"), nil, String("c")}, +} + +func TestStringValueSlice(t *testing.T) { + for idx, in := range testCasesStringValueSlice { + if in == nil { + continue + } + out := StringValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := StringSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesStringMap = []map[string]string{ + {"a": "1", "b": "2", "c": "3"}, +} + +func TestStringMap(t *testing.T) { + for idx, in := range testCasesStringMap { + if in == nil { + continue + } + out := StringMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolSlice = [][]bool{ + {true, true, false, false}, +} + +func TestBoolSlice(t *testing.T) { + for idx, in := range testCasesBoolSlice { + if in == nil { + continue + } + out := BoolSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolValueSlice = [][]*bool{} + +func TestBoolValueSlice(t *testing.T) { + for idx, in := range testCasesBoolValueSlice { + if in == nil { + continue + } + out := BoolValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := BoolSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesBoolMap = []map[string]bool{ + {"a": true, "b": false, "c": true}, +} + +func TestBoolMap(t *testing.T) { + for idx, in := range testCasesBoolMap { + if in == nil { + continue + } + out := BoolMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntSlice = [][]int{ + {1, 2, 3, 4}, +} + +func TestIntSlice(t *testing.T) { + for idx, in := range testCasesIntSlice { + if in == nil { + continue + } + out := IntSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntValueSlice = [][]*int{} + +func TestIntValueSlice(t *testing.T) { + for idx, in := range testCasesIntValueSlice { + if in == nil { + continue + } + out := IntValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := IntSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesIntMap = []map[string]int{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestIntMap(t *testing.T) { + for idx, in := range testCasesIntMap { + if in == nil { + continue + } + out := IntMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64Slice = [][]int64{ + {1, 2, 3, 4}, +} + +func TestInt64Slice(t *testing.T) { + for idx, in := range testCasesInt64Slice { + if in == nil { + continue + } + out := Int64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64ValueSlice = [][]*int64{} + +func TestInt64ValueSlice(t *testing.T) { + for idx, in := range testCasesInt64ValueSlice { + if in == nil { + continue + } + out := Int64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Int64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesInt64Map = []map[string]int64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestInt64Map(t *testing.T) { + for idx, in := range testCasesInt64Map { + if in == nil { + continue + } + out := Int64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64Slice = [][]float64{ + {1, 2, 3, 4}, +} + +func TestFloat64Slice(t *testing.T) { + for idx, in := range testCasesFloat64Slice { + if in == nil { + continue + } + out := Float64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64ValueSlice = [][]*float64{} + +func TestFloat64ValueSlice(t *testing.T) { + for idx, in := range testCasesFloat64ValueSlice { + if in == nil { + continue + } + out := Float64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Float64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesFloat64Map = []map[string]float64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestFloat64Map(t *testing.T) { + for idx, in := range testCasesFloat64Map { + if in == nil { + continue + } + out := Float64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeSlice = [][]time.Time{ + {time.Now(), time.Now().AddDate(100, 0, 0)}, +} + +func TestTimeSlice(t *testing.T) { + for idx, in := range testCasesTimeSlice { + if in == nil { + continue + } + out := TimeSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeValueSlice = [][]*time.Time{} + +func TestTimeValueSlice(t *testing.T) { + for idx, in := range testCasesTimeValueSlice { + if in == nil { + continue + } + out := TimeValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := TimeSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesTimeMap = []map[string]time.Time{ + {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, +} + +func TestTimeMap(t *testing.T) { + for idx, in := range testCasesTimeMap { + if in == nil { + continue + } + out := TimeMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..1d3e656fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,139 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go new file mode 100644 index 000000000..f08950694 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go @@ -0,0 +1,113 @@ +package corehandlers_test + +import ( + "fmt" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestValidateEndpointHandler(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2")) + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.NoError(t, err) +} + +func TestValidateEndpointHandlerErrorRegion(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient() + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingRegion, err) +} + +type mockCredsProvider struct { + expired bool + retrieveCalled bool +} + +func (m *mockCredsProvider) Retrieve() (credentials.Value, error) { + m.retrieveCalled = true + return credentials.Value{ProviderName: "mockCredsProvider"}, nil +} + +func (m *mockCredsProvider) IsExpired() bool { + return m.expired +} + +func TestAfterRetryRefreshCreds(t *testing.T) { + os.Clearenv() + credProvider := &mockCredsProvider{} + + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewCredentials(credProvider), + MaxRetries: aws.Int(1), + }) + + svc.Handlers.Clear() + svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) { + r.Error = awserr.New("UnknownError", "", nil) + r.HTTPResponse = &http.Response{StatusCode: 400} + }) + svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + r.Error = awserr.New("ExpiredTokenException", "", nil) + }) + svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + + assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired") + assert.False(t, credProvider.retrieveCalled) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + req.Send() + + assert.True(t, svc.Config.Credentials.IsExpired()) + assert.False(t, credProvider.retrieveCalled) + + _, err := svc.Config.Credentials.Get() + assert.NoError(t, err) + assert.True(t, credProvider.retrieveCalled) +} + +type testSendHandlerTransport struct{} + +func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("mock error") +} + +func TestSendHandlerError(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + HTTPClient: &http.Client{ + Transport: &testSendHandlerTransport{}, + }, + }) + svc.Handlers.Clear() + svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler) + r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + + r.Send() + + assert.Error(t, r.Error) + assert.NotNil(t, r.HTTPResponse) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..3b53f5e02 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,144 @@ +package corehandlers + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = awserr.New("InvalidParameter", msg, nil) + } + } +}} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) + if err != nil { + v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) + continue + } + + v.validateAny(fvalue, path+prefix+f.Name) + } +} + +type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error + +func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { + for _, fn := range funcs { + if err := fn(f, fvalue); err != nil { + return err + } + } + return nil +} + +// Validates that a field has a valid value provided for required fields. +func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { + if f.Tag.Get("required") == "" { + return nil + } + + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return fmt.Errorf("missing required parameter") + } + default: + if !fvalue.IsValid() { + return fmt.Errorf("missing required parameter") + } + } + return nil +} + +// Validates that if a value is provided for a field, that value must be at +// least a minimum length. +func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { + minStr := f.Tag.Get("min") + if minStr == "" { + return nil + } + min, _ := strconv.ParseInt(minStr, 10, 64) + + kind := fvalue.Kind() + if kind == reflect.Ptr { + if fvalue.IsNil() { + return nil + } + fvalue = fvalue.Elem() + } + + switch fvalue.Kind() { + case reflect.String: + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + case reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return nil + } + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + + // TODO min can also apply to number minimum value. + + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go new file mode 100644 index 000000000..e10debca5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go @@ -0,0 +1,129 @@ +package corehandlers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/stretchr/testify/require" +) + +var testSvc = func() *client.Client { + s := &client.Client{ + Config: aws.Config{}, + ClientInfo: metadata.ClientInfo{ + ServiceName: "mock-service", + APIVersion: "2015-01-01", + }, + } + return s +}() + +type StructShape struct { + RequiredList []*ConditionalStructShape `required:"true"` + RequiredMap map[string]*ConditionalStructShape `required:"true"` + RequiredBool *bool `required:"true"` + OptionalStruct *ConditionalStructShape + + hiddenParameter *string + _ struct{} +} + +type ConditionalStructShape struct { + Name *string `required:"true"` + _ struct{} +} + +func TestNoErrors(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {Name: aws.String("Name")}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + require.NoError(t, req.Error) +} + +func TestMissingRequiredParameters(t *testing.T) { + input := &StructShape{} + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message()) +} + +func TestNestedMissingRequiredParameters(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{{}}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message()) +} + +type testInput struct { + StringField string `min:"5"` + PtrStrField *string `min:"2"` + ListField []string `min:"3"` + MapField map[string]string `min:"4"` +} + +var testsFieldMin = []struct { + err awserr.Error + in testInput +}{ + { + err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 5: StringField", nil), + in: testInput{StringField: "abcd"}, + }, + { + err: awserr.New("InvalidParameter", "2 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField", nil), + in: testInput{StringField: "abcd", ListField: []string{"a", "b"}}, + }, + { + err: awserr.New("InvalidParameter", "3 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField\n- field too short, minimum length 4: MapField", nil), + in: testInput{StringField: "abcd", ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}}, + }, + { + err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 2: PtrStrField", nil), + in: testInput{StringField: "abcde", PtrStrField: aws.String("v")}, + }, + { + err: nil, + in: testInput{StringField: "abcde", PtrStrField: aws.String("value"), + ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}}, + }, +} + +func TestValidateFieldMinParameter(t *testing.T) { + for i, c := range testsFieldMin { + req := testSvc.NewRequest(&request.Operation{}, &c.in, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Equal(t, c.err, req.Error, "%d case failed", i) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..857311f64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go new file mode 100644 index 000000000..3b393a2ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go @@ -0,0 +1,154 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +type secondStubProvider struct { + creds Value + expired bool + err error +} + +func (s *secondStubProvider) Retrieve() (Value, error) { + s.expired = false + s.creds.ProviderName = "secondStubProvider" + return s.creds, s.err +} +func (s *secondStubProvider) IsExpired() bool { + return s.expired +} + +func TestChainProviderWithNames(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + &secondStubProvider{ + creds: Value{ + AccessKeyID: "AKIF", + SecretAccessKey: "NOSECRET", + SessionToken: "", + }, + }, + &stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + }, + }, + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "secondStubProvider", creds.ProviderName, "Expect provider name to match") + + // Also check credentials + assert.Equal(t, "AKIF", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "NOSECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") + +} + +func TestChainProviderGet(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + &stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + }, + }, + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestChainProviderIsExpired(t *testing.T) { + stubProvider := &stubProvider{expired: true} + p := &ChainProvider{ + Providers: []Provider{ + stubProvider, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve") + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") + + stubProvider.expired = true + assert.True(t, p.IsExpired(), "Expect return of expired provider") + + _, err = p.Retrieve() + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") +} + +func TestChainProviderWithNoProvider(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{}, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + assert.Equal(t, + ErrNoValidProvidersFoundInChain, + err, + "Expect no providers error returned") +} + +func TestChainProviderWithNoValidProvider(t *testing.T) { + errs := []error{ + awserr.New("FirstError", "first provider error", nil), + awserr.New("SecondError", "second provider error", nil), + } + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: errs[0]}, + &stubProvider{err: errs[1]}, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + + assert.Equal(t, + ErrNoValidProvidersFoundInChain, + err, + "Expect no providers error returned") +} + +func TestChainProviderWithNoValidProviderWithVerboseEnabled(t *testing.T) { + errs := []error{ + awserr.New("FirstError", "first provider error", nil), + awserr.New("SecondError", "second provider error", nil), + } + p := &ChainProvider{ + VerboseErrors: true, + Providers: []Provider{ + &stubProvider{err: errs[0]}, + &stubProvider{err: errs[1]}, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + + assert.Equal(t, + awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs), + err, + "Expect no providers error returned") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..7b8ebf5f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go new file mode 100644 index 000000000..7b79ba985 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go @@ -0,0 +1,73 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +type stubProvider struct { + creds Value + expired bool + err error +} + +func (s *stubProvider) Retrieve() (Value, error) { + s.expired = false + s.creds.ProviderName = "stubProvider" + return s.creds, s.err +} +func (s *stubProvider) IsExpired() bool { + return s.expired +} + +func TestCredentialsGet(t *testing.T) { + c := NewCredentials(&stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + expired: true, + }) + + creds, err := c.Get() + assert.Nil(t, err, "Expected no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestCredentialsGetWithError(t *testing.T) { + c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true}) + + _, err := c.Get() + assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error") +} + +func TestCredentialsExpire(t *testing.T) { + stub := &stubProvider{} + c := NewCredentials(stub) + + stub.expired = false + assert.True(t, c.IsExpired(), "Expected to start out expired") + c.Expire() + assert.True(t, c.IsExpired(), "Expected to be expired") + + c.forceRefresh = false + assert.False(t, c.IsExpired(), "Expected not to be expired") + + stub.expired = true + assert.True(t, c.IsExpired(), "Expected to be expired") +} + +func TestCredentialsGetWithProviderName(t *testing.T) { + stub := &stubProvider{} + + c := NewCredentials(stub) + + creds, err := c.Get() + assert.Nil(t, err, "Expected no error") + assert.Equal(t, creds.ProviderName, "stubProvider", "Expected provider name to match") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..36f8add35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go new file mode 100644 index 000000000..da3d8ed3e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go @@ -0,0 +1,159 @@ +package ec2rolecreds_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +const credsRespTmpl = `{ + "Code": "Success", + "Type": "AWS-HMAC", + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "%s", + "LastUpdated" : "2009-11-23T0:00:00Z" +}` + +const credsFailRespTmpl = `{ + "Code": "ErrorCode", + "Message": "ErrorMsg", + "LastUpdated": "2009-11-23T0:00:00Z" +}` + +func initTestServer(expireOn string, failAssume bool) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/latest/meta-data/iam/security-credentials" { + fmt.Fprintln(w, "RoleName") + } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" { + if failAssume { + fmt.Fprintf(w, credsFailRespTmpl) + } else { + fmt.Fprintf(w, credsRespTmpl, expireOn) + } + } else { + http.Error(w, "bad request", http.StatusBadRequest) + } + })) + + return server +} + +func TestEC2RoleProvider(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderFailAssume(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", true) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Error(t, err, "Expect error") + + e := err.(awserr.Error) + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "ErrorMsg", e.Message()) + assert.Nil(t, e.OrigErr()) + + assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + ExpiryWindow: time.Hour * 1, + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func BenchmarkEC3RoleProvider(b *testing.B) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..96655bc46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go new file mode 100644 index 000000000..53f6ce256 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go @@ -0,0 +1,70 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestEnvProviderRetrieve(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEnvProviderIsExpired(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + + assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.") +} + +func TestEnvProviderNoAccessKeyID(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderNoSecretAccessKey(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderAlternateNames(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY", "access") + os.Setenv("AWS_SECRET_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key") + assert.Empty(t, creds.SessionToken, "Expected no token") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..7fb7cbf0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go new file mode 100644 index 000000000..6b4093a15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go @@ -0,0 +1,116 @@ +package credentials + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSharedCredentialsProvider(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderIsExpired(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve") +} + +func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini") + p := SharedCredentialsProvider{} + creds, err := p.Retrieve() + + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILEAbsPath(t *testing.T) { + os.Clearenv() + wd, err := os.Getwd() + assert.NoError(t, err) + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", filepath.Join(wd, "example.ini")) + p := SharedCredentialsProvider{} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "no_token") + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderColonInCredFile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func BenchmarkSharedCredentialsProvider(b *testing.B) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..71189e733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go new file mode 100644 index 000000000..ea0123696 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go @@ -0,0 +1,34 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestStaticProviderGet(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + creds, err := s.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no session token") +} + +func TestStaticProviderIsExpired(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + assert.False(t, s.IsExpired(), "Expect static credentials to never expire") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..4d6408b9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,134 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + + roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + }) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go new file mode 100644 index 000000000..6bd6e9197 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go @@ -0,0 +1,56 @@ +package stscreds + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/stretchr/testify/assert" +) + +type stubSTS struct { +} + +func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { + expiry := time.Now().Add(60 * time.Minute) + return &sts.AssumeRoleOutput{ + Credentials: &sts.Credentials{ + // Just reflect the role arn to the provider. + AccessKeyId: input.RoleArn, + SecretAccessKey: aws.String("assumedSecretAccessKey"), + SessionToken: aws.String("assumedSessionToken"), + Expiration: &expiry, + }, + }, nil +} + +func TestAssumeRoleProvider(t *testing.T) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN") + assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match") +} + +func BenchmarkAssumeRoleProvider(b *testing.B) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..42c883aa9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,96 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..e5137ca17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,43 @@ +package ec2metadata + +import ( + "path" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go new file mode 100644 index 000000000..c3c92972b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go @@ -0,0 +1,101 @@ +package ec2metadata_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "path" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" +) + +func initTestServer(path string, resp string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI != path { + http.Error(w, "not found", http.StatusNotFound) + return + } + + w.Write([]byte(resp)) + })) +} + +func TestEndpoint(t *testing.T) { + c := ec2metadata.New(session.New()) + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", "testpath"), + } + + req := c.NewRequest(op, nil, nil) + assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint) + assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String()) +} + +func TestGetMetadata(t *testing.T) { + server := initTestServer( + "/latest/meta-data/some/path", + "success", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + resp, err := c.GetMetadata("some/path") + + assert.NoError(t, err) + assert.Equal(t, "success", resp) +} + +func TestGetRegion(t *testing.T) { + server := initTestServer( + "/latest/meta-data/placement/availability-zone", + "us-west-2a", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + region, err := c.Region() + + assert.NoError(t, err) + assert.Equal(t, "us-west-2", region) +} + +func TestMetadataAvailable(t *testing.T) { + server := initTestServer( + "/latest/meta-data/instance-id", + "instance-id", + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + available := c.Available() + + assert.True(t, available) +} + +func TestMetadataNotAvailable(t *testing.T) { + c := ec2metadata.New(session.New()) + c.Handlers.Send.Clear() + c.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + r.Error = awserr.New("RequestError", "send request failed", nil) + r.Retryable = aws.Bool(true) // network errors are retryable + }) + + available := c.Available() + + assert.False(t, available) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..9e16c1cf7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,117 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "io/ioutil" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + } + + data := r.Data.(*metadataOutput) + data.Content = string(b) +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + _, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + } + + // TODO extract the error... +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go new file mode 100644 index 000000000..d10ecb303 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go @@ -0,0 +1,79 @@ +package ec2metadata_test + +import ( + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/stretchr/testify/assert" +) + +func TestClientOverrideDefaultHTTPClientTimeout(t *testing.T) { + svc := ec2metadata.New(session.New()) + + assert.NotEqual(t, http.DefaultClient, svc.Config.HTTPClient) + assert.Equal(t, 5*time.Second, svc.Config.HTTPClient.Timeout) +} + +func TestClientNotOverrideDefaultHTTPClientTimeout(t *testing.T) { + origClient := *http.DefaultClient + http.DefaultClient.Transport = &http.Transport{} + defer func() { + http.DefaultClient = &origClient + }() + + svc := ec2metadata.New(session.New()) + + assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient) + + tr, ok := svc.Config.HTTPClient.Transport.(*http.Transport) + assert.True(t, ok) + assert.NotNil(t, tr) + assert.Nil(t, tr.Dial) +} + +func TestClientDisableOverrideDefaultHTTPClientTimeout(t *testing.T) { + svc := ec2metadata.New(session.New(aws.NewConfig().WithEC2MetadataDisableTimeoutOverride(true))) + + assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient) +} + +func TestClientOverrideDefaultHTTPClientTimeoutRace(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("us-east-1a")) + })) + + cfg := aws.NewConfig().WithEndpoint(server.URL) + runEC2MetadataClients(t, cfg, 100) +} + +func TestClientOverrideDefaultHTTPClientTimeoutRaceWithTransport(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("us-east-1a")) + })) + + cfg := aws.NewConfig().WithEndpoint(server.URL).WithHTTPClient(&http.Client{ + Transport: http.DefaultTransport, + }) + + runEC2MetadataClients(t, cfg, 100) +} + +func runEC2MetadataClients(t *testing.T, cfg *aws.Config, atOnce int) { + var wg sync.WaitGroup + wg.Add(atOnce) + for i := 0; i < atOnce; i++ { + go func() { + svc := ec2metadata.New(session.New(), cfg) + _, err := svc.Region() + assert.NoError(t, err) + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..576636168 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..f53694873 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,98 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..3e90a7976 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,140 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + var n HandlerList + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for _, f := range l.list { + f.Fn(r) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go new file mode 100644 index 000000000..16a141828 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go @@ -0,0 +1,47 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func TestHandlerList(t *testing.T) { + s := "" + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { + s += "a" + r.Data = s + }) + l.Run(r) + assert.Equal(t, "a", s) + assert.Equal(t, "a", r.Data) +} + +func TestMultipleHandlers(t *testing.T) { + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { r.Data = nil }) + l.PushFront(func(r *request.Request) { r.Data = aws.Bool(true) }) + l.Run(r) + if r.Data != nil { + t.Error("Expected handler to execute") + } +} + +func TestNamedHandlers(t *testing.T) { + l := request.HandlerList{} + named := request.NamedHandler{Name: "Name", Fn: func(r *request.Request) {}} + named2 := request.NamedHandler{Name: "NotName", Fn: func(r *request.Request) {}} + l.PushBackNamed(named) + l.PushBackNamed(named) + l.PushBackNamed(named2) + l.PushBack(func(r *request.Request) {}) + assert.Equal(t, 4, l.Len()) + l.Remove(named) + assert.Equal(t, 2, l.Len()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..61bb4a8ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,294 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + r.Sign() + if r.Error != nil { + return r.Error + } + + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // Re-seek the body back to the original point in for a retry so that + // send will send the body's contents again in the upcoming request. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..2939ec473 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go new file mode 100644 index 000000000..725ea25cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go @@ -0,0 +1,455 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/s3" +) + +// Use DynamoDB methods for simplicity +func TestPaginationQueryPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []map[string]*dynamodb.AttributeValue{}, []map[string]*dynamodb.AttributeValue{}, 0, false + + reqNum := 0 + resps := []*dynamodb.QueryOutput{ + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key1")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key2")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key3")}, + }, + }, + }, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.QueryInput) + if in == nil { + tokens = append(tokens, nil) + } else if len(in.ExclusiveStartKey) != 0 { + tokens = append(tokens, in.ExclusiveStartKey) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.QueryInput{ + Limit: aws.Int64(2), + TableName: aws.String("tablename"), + } + err := db.QueryPages(params, func(p *dynamodb.QueryOutput, last bool) bool { + numPages++ + for _, item := range p.Items { + pages = append(pages, item) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + assert.Nil(t, err) + + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + {"key": {S: aws.String("key1")}}, + {"key": {S: aws.String("key2")}}, + }, tokens) + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + {"key": {S: aws.String("key1")}}, + {"key": {S: aws.String("key2")}}, + {"key": {S: aws.String("key3")}}, + }, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, params.ExclusiveStartKey) +} + +// Use DynamoDB methods for simplicity +func TestPagination(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + for _, t := range p.TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) + assert.Nil(t, params.ExclusiveStartTableName) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEachPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + req, _ := db.ListTablesRequest(params) + err := req.EachPage(func(p interface{}, last bool) bool { + numPages++ + for _, t := range p.(*dynamodb.ListTablesOutput).TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEarlyExit(t *testing.T) { + db := dynamodb.New(unit.Session) + numPages, gotToEnd := 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + if numPages == 2 { + return false + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, 2, numPages) + assert.False(t, gotToEnd) + assert.Nil(t, err) +} + +func TestSkipPagination(t *testing.T) { + client := s3.New(unit.Session) + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = &s3.HeadBucketOutput{} + }) + + req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")}) + + numPages, gotToEnd := 0, false + req.EachPage(func(p interface{}, last bool) bool { + numPages++ + if last { + gotToEnd = true + } + return true + }) + assert.Equal(t, 1, numPages) + assert.True(t, gotToEnd) +} + +// Use S3 for simplicity +func TestPaginationTruncation(t *testing.T) { + client := s3.New(unit.Session) + + reqNum := 0 + resps := []*s3.ListObjectsOutput{ + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}}, + {IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}}, + } + + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &s3.ListObjectsInput{Bucket: aws.String("bucket")} + + results := []string{} + err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results) + assert.Nil(t, err) + + // Try again without truncation token at all + reqNum = 0 + resps[1].IsTruncated = nil + resps[2].IsTruncated = aws.Bool(true) + results = []string{} + err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2"}, results) + assert.Nil(t, err) +} + +func TestPaginationNilToken(t *testing.T) { + client := route53.New(unit.Session) + + reqNum := 0 + resps := []*route53.ListResourceRecordSetsOutput{ + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("first.example.com.")}, + }, + IsTruncated: aws.Bool(true), + NextRecordName: aws.String("second.example.com."), + NextRecordType: aws.String("MX"), + NextRecordIdentifier: aws.String("second"), + MaxItems: aws.String("1"), + }, + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("second.example.com.")}, + }, + IsTruncated: aws.Bool(true), + NextRecordName: aws.String("third.example.com."), + NextRecordType: aws.String("MX"), + MaxItems: aws.String("1"), + }, + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("third.example.com.")}, + }, + IsTruncated: aws.Bool(false), + MaxItems: aws.String("1"), + }, + } + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + + idents := []string{} + client.Handlers.Build.PushBack(func(r *request.Request) { + p := r.Params.(*route53.ListResourceRecordSetsInput) + idents = append(idents, aws.StringValue(p.StartRecordIdentifier)) + + }) + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String("id-zone"), + } + + results := []string{} + err := client.ListResourceRecordSetsPages(params, func(p *route53.ListResourceRecordSetsOutput, last bool) bool { + results = append(results, *p.ResourceRecordSets[0].Name) + return true + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"", "second", ""}, idents) + assert.Equal(t, []string{"first.example.com.", "second.example.com.", "third.example.com."}, results) +} + +// Benchmarks +var benchResps = []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE")}}, +} + +var benchDb = func() *dynamodb.DynamoDB { + db := dynamodb.New(unit.Session) + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + return db +} + +func BenchmarkCodegenIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error { + page, _ := db.ListTablesRequest(input) + for ; page != nil; page = page.NextPage() { + page.Send() + out := page.Data.(*dynamodb.ListTablesOutput) + if result := fn(out, !page.HasNextPage()); page.Error != nil || !result { + return page.Error + } + } + return nil + } + + for i := 0; i < b.N; i++ { + reqNum = 0 + iter(func(p *dynamodb.ListTablesOutput, last bool) bool { + return true + }) + } +} + +func BenchmarkEachPageIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + for i := 0; i < b.N; i++ { + reqNum = 0 + req, _ := db.ListTablesRequest(input) + req.EachPage(func(p interface{}, last bool) bool { + return true + }) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go new file mode 100644 index 000000000..4828dff7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go @@ -0,0 +1,261 @@ +package request_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +type testData struct { + Data string +} + +func body(str string) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(str))) +} + +func unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.Data != nil { + json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data) + } + return +} + +func unmarshalError(req *request.Request) { + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err) + return + } + req.Error = awserr.NewRequestFailure( + awserr.New(jsonErr.Code, jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + "", + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} + +// test that retries occur for 5xx status codes +func TestRequestRecoverRetry5xx(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry` +func TestRequestRecoverRetry4xxRetryable(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)}, + {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries don't occur for 4xx status codes with a response type that can't be retried +func TestRequest4xxUnretryable(t *testing.T) { + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)} + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 401, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code()) + assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message()) + assert.Equal(t, 0, int(r.RetryCount)) +} + +func TestRequestExhaustRetries(t *testing.T) { + delays := []time.Duration{} + sleepDelay := func(delay time.Duration) { + delays = append(delays, delay) + } + + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 500, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "UnknownError", err.(awserr.Error).Code()) + assert.Equal(t, "An error occurred.", err.(awserr.Error).Message()) + assert.Equal(t, 3, int(r.RetryCount)) + + expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}} + for i, v := range delays { + min := expectDelays[i].min * time.Millisecond + max := expectDelays[i].max * time.Millisecond + assert.True(t, min <= v && v <= max, + "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max) + } +} + +// test that the request is retried after the credentials are expired. +func TestRequestRecoverExpiredCreds(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")}) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + + credExpiredBeforeRetry := false + credExpiredAfterRetry := false + + s.Handlers.AfterRetry.PushBack(func(r *request.Request) { + credExpiredAfterRetry = r.Config.Credentials.IsExpired() + }) + + s.Handlers.Sign.Clear() + s.Handlers.Sign.PushBack(func(r *request.Request) { + r.Config.Credentials.Get() + }) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + + assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check") + assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check") + assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery") + + assert.Equal(t, 1, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +func TestMakeAddtoUserAgentHandler(t *testing.T) { + fn := request.MakeAddToUserAgentHandler("name", "version", "extra1", "extra2") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) { + fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestRequestUserAgent(t *testing.T) { + s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")}) + // s.Handlers.Validate.Clear() + + req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{}) + req.HTTPRequest.Header.Set("User-Agent", "foo/bar") + assert.NoError(t, req.Build()) + + expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)", + aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + assert.Equal(t, expectUA, req.HTTPRequest.Header.Get("User-Agent")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..ab6fff5ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,82 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..47e4536ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go new file mode 100644 index 000000000..e56c02fc6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go @@ -0,0 +1,20 @@ +package session_test + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +func TestNewDefaultSession(t *testing.T) { + s := session.New(&aws.Config{Region: aws.String("region")}) + + assert.Equal(t, "region", *s.Config.Region) + assert.Equal(t, http.DefaultClient, s.Config.HTTPClient) + assert.NotNil(t, s.Config.Logger) + assert.Equal(t, aws.LogOff, *s.Config.LogLevel) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..0f067c57f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,88 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + expLen := pos + int64(len(p)) + if int64(len(b.buf)) < expLen { + newBuf := make([]byte, expLen) + copy(newBuf, b.buf) + b.buf = newBuf + } + copy(b.buf[pos:], p) + return len(p), nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/types_test.go new file mode 100644 index 000000000..a4ed20e7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types_test.go @@ -0,0 +1,56 @@ +package aws + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteAtBuffer(t *testing.T) { + b := &WriteAtBuffer{} + + n, err := b.WriteAt([]byte{1}, 0) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{1, 1, 1}, 5) + assert.NoError(t, err) + assert.Equal(t, 3, n) + + n, err = b.WriteAt([]byte{2}, 1) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{3}, 2) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) +} + +func BenchmarkWriteAtBuffer(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } +} + +func BenchmarkWriteAtBufferParallel(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..ad215ea8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.1.0" diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 000000000..2b279e659 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 000000000..7819eedc3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,92 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 000000000..9b2e1b699 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,104 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "ap-northeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-northeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go new file mode 100644 index 000000000..2add48890 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go @@ -0,0 +1,41 @@ +package endpoints_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func TestGenericEndpoint(t *testing.T) { + name := "service" + region := "mock-region-1" + + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com", name, region), ep) + assert.Empty(t, sr) +} + +func TestGlobalEndpoints(t *testing.T) { + region := "mock-region-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.amazonaws.com", name), ep) + assert.Equal(t, "us-east-1", sr) + } +} + +func TestServicesInCN(t *testing.T) { + region := "cn-north-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com.cn", name, region), ep) + assert.Empty(t, sr) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go new file mode 100644 index 000000000..0ead0126e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -0,0 +1,32 @@ +// Package ec2query provides serialisation of AWS EC2 requests and responses. +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// Build builds a request for the EC2 protocol. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, true); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go new file mode 100644 index 000000000..e135b9360 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go @@ -0,0 +1,85 @@ +// +build bench + +package ec2query_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func BenchmarkEC2QueryBuild_Complex_ec2AuthorizeSecurityGroupEgress(b *testing.B) { + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + + benchEC2QueryBuild(b, "AuthorizeSecurityGroupEgress", params) +} + +func BenchmarkEC2QueryBuild_Simple_ec2AttachNetworkInterface(b *testing.B) { + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + + benchEC2QueryBuild(b, "AttachNetworkInterface", params) +} + +func benchEC2QueryBuild(b *testing.B, opName string, params interface{}) { + svc := awstesting.NewClient() + svc.ServiceName = "ec2" + svc.APIVersion = "2015-04-15" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{ + Name: opName, + HTTPMethod: "POST", + HTTPPath: "/", + }, params, nil) + ec2query.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go new file mode 100644 index 000000000..5ada8421c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go @@ -0,0 +1,983 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bar *string `locationName:"barLocationName" type:"string"` + + Foo *string `type:"string"` + + Yuck *string `locationName:"yuckLocationName" queryName:"yuckQueryName" type:"string"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + StructArg *InputService3TestShapeStructType `locationName:"Struct" type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeStructType struct { + _ struct{} `type:"structure"` + + ScalarArg *string `locationName:"Scalar" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationName:"ListMemberName" locationNameList:"item" type:"list"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationName:"ListMemberName" queryName:"ListQueryName" locationNameList:"item" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + BlobArg []byte `type:"blob"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedToMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + Yuck: aws.String("val3"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputService3TestCaseOperation1Input{ + StructArg: &InputService3TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go new file mode 100644 index 000000000..658190f70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go @@ -0,0 +1,54 @@ +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// Unmarshal unmarshals a response body for the EC2 protocol. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the EC2 protocol. +func UnmarshalMeta(r *request.Request) { + // TODO implement unmarshaling of request IDs +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Code string `xml:"Errors>Error>Code"` + Message string `xml:"Errors>Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals a response error for the EC2 protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go new file mode 100644 index 000000000..938c184ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go @@ -0,0 +1,1056 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService6TestShapeStructureType `type:"map"` +} + +type OutputService6TestShapeStructureType struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200arequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService9ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 000000000..870707c6a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,243 @@ +// Package jsonutil provides JSON serialisation of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "sort" + "strconv" + "time" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(value, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + fmt.Fprintf(buf, "%q:", name) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + var sv sortedValues = value.MapKeys() + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + fmt.Fprintf(buf, "%q:", k) + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + switch value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + buf.WriteString(strconv.FormatBool(value.Bool())) + case reflect.Int64: + buf.WriteString(strconv.FormatInt(value.Int(), 10)) + case reflect.Float64: + buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) + default: + switch value.Type() { + case timeType: + converted := value.Interface().(time.Time) + buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) + case byteSliceType: + if !value.IsNil() { + converted := value.Interface().([]byte) + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for _, r := range s { + if r == '"' { + buf.WriteString(`\"`) + } else if r == '\\' { + buf.WriteString(`\\`) + } else if r == '\b' { + buf.WriteString(`\b`) + } else if r == '\f' { + buf.WriteString(`\f`) + } else if r == '\r' { + buf.WriteString(`\r`) + } else if r == '\t' { + buf.WriteString(`\t`) + } else if r == '\n' { + buf.WriteString(`\n`) + } else if r < 32 { + fmt.Fprintf(buf, "\\u%0.4x", r) + } else { + buf.WriteRune(r) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go new file mode 100644 index 000000000..cb9cc458f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go @@ -0,0 +1,100 @@ +package jsonutil_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/stretchr/testify/assert" +) + +func S(s string) *string { + return &s +} + +func D(s int64) *int64 { + return &s +} + +func F(s float64) *float64 { + return &s +} + +func T(s time.Time) *time.Time { + return &s +} + +type J struct { + S *string + SS []string + D *int64 + F *float64 + T *time.Time +} + +var jsonTests = []struct { + in interface{} + out string + err string +}{ + { + J{}, + `{}`, + ``, + }, + { + J{ + S: S("str"), + SS: []string{"A", "B", "C"}, + D: D(123), + F: F(4.56), + T: T(time.Unix(987, 0)), + }, + `{"S":"str","SS":["A","B","C"],"D":123,"F":4.56,"T":987}`, + ``, + }, + { + J{ + S: S(`"''"`), + }, + `{"S":"\"''\""}`, + ``, + }, + { + J{ + S: S("\x00føø\u00FF\n\\\"\r\t\b\f"), + }, + `{"S":"\u0000føøÿ\n\\\"\r\t\b\f"}`, + ``, + }, +} + +func TestBuildJSON(t *testing.T) { + for _, test := range jsonTests { + out, err := jsonutil.BuildJSON(test.in) + if test.err != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), test.err) + } else { + assert.NoError(t, err) + assert.Equal(t, string(out), test.out) + } + } +} + +func BenchmarkBuildJSON(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, test := range jsonTests { + jsonutil.BuildJSON(test.in) + } + } +} + +func BenchmarkStdlibJSON(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, test := range jsonTests { + json.Marshal(test.in) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 000000000..fea535613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,213 @@ +package jsonutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + "time" +) + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + b, err := ioutil.ReadAll(stream) + if err != nil { + return err + } + + if len(b) == 0 { + return nil + } + + if err := json.Unmarshal(b, &out); err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + errf := func() error { + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + default: + return errf() + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return errf() + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return errf() + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go new file mode 100644 index 000000000..563caa05c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go @@ -0,0 +1,71 @@ +// +build bench + +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +func BenchmarkJSONRPCBuild_Simple_dynamodbPutItem(b *testing.B) { + svc := awstesting.NewClient() + + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil) + jsonrpc.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkJSONUtilBuild_Simple_dynamodbPutItem(b *testing.B) { + svc := awstesting.NewClient() + + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil) + _, err := jsonutil.BuildJSON(r.Params) + if err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func BenchmarkEncodingJSONMarshal_Simple_dynamodbPutItem(b *testing.B) { + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func getDynamodbPutItemParams(b *testing.B) *dynamodb.PutItemInput { + av, err := dynamodbattribute.ConvertToMap(struct { + Key string + Data string + }{Key: "MyKey", Data: "MyData"}) + if err != nil { + b.Fatal("benchPutItem, expect no ConvertToMap errors", err) + } + return &dynamodb.PutItemInput{ + Item: av, + TableName: aws.String("tablename"), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go new file mode 100644 index 000000000..24bf4b046 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go @@ -0,0 +1,1152 @@ +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `type:"structure"` + + BlobArg []byte `type:"blob"` + + BlobMap map[string][]byte `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListParam [][]byte `type:"list"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation2 = "OperationName" + +// InputService5TestCaseOperation2Request generates a request for the InputService5TestCaseOperation2 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation2, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation2Output, error) { + req, out := c.InputService5TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation3 = "OperationName" + +// InputService5TestCaseOperation3Request generates a request for the InputService5TestCaseOperation3 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation3Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation3, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation3(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation3Output, error) { + req, out := c.InputService5TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation4 = "OperationName" + +// InputService5TestCaseOperation4Request generates a request for the InputService5TestCaseOperation4 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation4Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation4, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation4(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation4Output, error) { + req, out := c.InputService5TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation5 = "OperationName" + +// InputService5TestCaseOperation5Request generates a request for the InputService5TestCaseOperation5 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation5Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation5, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation5(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation5Output, error) { + req, out := c.InputService5TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation6 = "OperationName" + +// InputService5TestCaseOperation6Request generates a request for the InputService5TestCaseOperation6 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation6Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation6, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation6(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation6Output, error) { + req, out := c.InputService5TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService5TestShapeRecursiveStructType `type:"structure"` +} + +type InputService5TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService5TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService5TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService5TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + Name: aws.String("myname"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Name":"myname"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService2ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService3ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"BlobArg":"Zm9v"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService3ProtocolTestBase64EncodedBlobsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + BlobMap: map[string][]byte{ + "key1": []byte("foo"), + "key2": []byte("bar"), + }, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"BlobMap":{"key1":"Zm9v","key2":"YmFy"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService4ProtocolTestNestedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + ListParam: [][]byte{ + []byte("foo"), + []byte("bar"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"ListParam":["Zm9v","YmFy"]}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService5TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveList: []*InputService5TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveList: []*InputService5TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService5TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService6ProtocolTestEmptyMapsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + Map: map[string]*string{}, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Map":{}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..c4e71cf48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,99 @@ +// Package jsonrpc provides JSON RPC utilities for serialisation of AWS +// requests and responses. +package jsonrpc + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + if req.ClientInfo.JSONVersion != "" { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", req.HTTPResponse.Status, nil), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go new file mode 100644 index 000000000..4e804ffe1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go @@ -0,0 +1,812 @@ +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeBlobContainer struct { + _ struct{} `type:"structure"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + BlobMember []byte `type:"blob"` + + StructMember *OutputService2TestShapeBlobContainer `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StructMember *OutputService3TestShapeTimeContainer `type:"structure"` + + TimeMember *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type OutputService3TestShapeTimeContainer struct { + _ struct{} `type:"structure"` + + Foo *time.Time `locationName:"foo" type:"timestamp" timestampFormat:"unix"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opOutputService4TestCaseOperation2 = "OperationName" + +// OutputService4TestCaseOperation2Request generates a request for the OutputService4TestCaseOperation2 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2Request(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (req *request.Request, output *OutputService4TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation2, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation2Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation2Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputShape struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` + + ListMemberMap []map[string]*string `type:"list"` + + ListMemberStruct []*OutputService4TestShapeStructType `type:"list"` +} + +type OutputService4TestShapeStructType struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string][]*int64 `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StrType *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "hi!", string(out.BlobMember)) + assert.Equal(t, "there!", string(out.StructMember.Foo)) + +} + +func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", \"b\"]}")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Equal(t, "b", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListsCase2(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", null], \"ListMemberMap\": [{}, null, null, {}], \"ListMemberStruct\": [{}, null, null, {}]}")) + req, out := svc.OutputService4TestCaseOperation2Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Nil(t, out.ListMember[1]) + assert.Nil(t, out.ListMemberMap[1]) + assert.Nil(t, out.ListMemberMap[2]) + assert.Nil(t, out.ListMemberStruct[1]) + assert.Nil(t, out.ListMemberStruct[2]) + +} + +func TestOutputService5ProtocolTestMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, int64(1), *out.MapMember["a"][0]) + assert.Equal(t, int64(2), *out.MapMember["a"][1]) + assert.Equal(t, int64(3), *out.MapMember["b"][0]) + assert.Equal(t, int64(4), *out.MapMember["b"][1]) + +} + +func TestOutputService6ProtocolTestIgnoresExtraDataCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"foo\": \"bar\"}")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..2d78c35c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,33 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go new file mode 100644 index 000000000..6588d8f9c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go @@ -0,0 +1,1999 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation2 = "OperationName" + +// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation2, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { + req, out := c.InputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation3 = "OperationName" + +// InputService1TestCaseOperation3Request generates a request for the InputService1TestCaseOperation3 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation3, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) { + req, out := c.InputService1TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *bool `type:"boolean"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + StructArg *InputService2TestShapeStructType `type:"structure"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService2TestShapeStructType struct { + _ struct{} `type:"structure"` + + ScalarArg *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService4TestCaseOperation2 = "OperationName" + +// InputService4TestCaseOperation2Request generates a request for the InputService4TestCaseOperation2 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation2, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) { + req, out := c.InputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list" flattened:"true"` + + NamedListArg []*string `locationNameList:"Foo" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `type:"map" flattened:"true"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationNameList:"item" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationNameList:"ListArgLocation" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `type:"map"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + BlobArg []byte `type:"blob"` +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation2 = "OperationName" + +// InputService12TestCaseOperation2Request generates a request for the InputService12TestCaseOperation2 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation2, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { + req, out := c.InputService12TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation3 = "OperationName" + +// InputService12TestCaseOperation3Request generates a request for the InputService12TestCaseOperation3 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation3, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation3Output, error) { + req, out := c.InputService12TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation4 = "OperationName" + +// InputService12TestCaseOperation4Request generates a request for the InputService12TestCaseOperation4 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation4, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation4Output, error) { + req, out := c.InputService12TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation5 = "OperationName" + +// InputService12TestCaseOperation5Request generates a request for the InputService12TestCaseOperation5 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation5, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation5Output, error) { + req, out := c.InputService12TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation6 = "OperationName" + +// InputService12TestCaseOperation6Request generates a request for the InputService12TestCaseOperation6 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation6, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation6Output, error) { + req, out := c.InputService12TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` +} + +type InputService12TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService12TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService12TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase2(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(true), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=true&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase3(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(false), + } + req, _ := svc.InputService1TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=false&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + StructArg: &InputService2TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + ListArg: []*string{}, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg=&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputShape{ + NamedListArg: []*string{ + aws.String("a"), + }, + } + req, _ := svc.InputService4TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Foo.1=a&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestSerializeFlattenedMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestSerializeMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService12TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService12TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..4afa4cf0e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,223 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + var name string + + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..f961029ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,29 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..cb87b79f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,37 @@ +package query + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + } else { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go new file mode 100644 index 000000000..75022e21c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go @@ -0,0 +1,1746 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Num *int64 `type:"integer"` + + Str *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*OutputService8TestShapeStructureShape `type:"list"` +} + +type OutputService8TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"` +} + +type OutputService9TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*string `locationNameList:"NamedList" type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService11TestShapeStructType `type:"map"` +} + +type OutputService11TestShapeStructType struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService12ProtocolTest client from just a session. +// svc := outputservice12protocoltest.New(mySession) +// +// // Create a OutputService12ProtocolTest client with additional configuration +// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest { + c := p.ClientConfig("outputservice12protocoltest", cfgs...) + return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest { + svc := &OutputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService13ProtocolTest client from just a session. +// svc := outputservice13protocoltest.New(mySession) +// +// // Create a OutputService13ProtocolTest client with additional configuration +// svc := outputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService13ProtocolTest { + c := p.ClientConfig("outputservice13protocoltest", cfgs...) + return newOutputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService13ProtocolTest { + svc := &OutputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService13TestCaseOperation1 = "OperationName" + +// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation. +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *request.Request, output *OutputService13TestShapeOutputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService13TestCaseOperation1, + } + + if input == nil { + input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService13TestShapeOutputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputService13TestCaseOperation1Output, error) { + req, out := c.OutputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService14ProtocolTest client from just a session. +// svc := outputservice14protocoltest.New(mySession) +// +// // Create a OutputService14ProtocolTest client with additional configuration +// svc := outputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService14ProtocolTest { + c := p.ClientConfig("outputservice14protocoltest", cfgs...) + return newOutputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService14ProtocolTest { + svc := &OutputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService14TestCaseOperation1 = "OperationName" + +// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation. +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *request.Request, output *OutputService14TestShapeOutputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService14TestCaseOperation1, + } + + if input == nil { + input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService14TestShapeOutputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputService14TestCaseOperation1Output, error) { + req, out := c.OutputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService15ProtocolTest client from just a session. +// svc := outputservice15protocoltest.New(mySession) +// +// // Create a OutputService15ProtocolTest client with additional configuration +// svc := outputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService15ProtocolTest { + c := p.ClientConfig("outputservice15protocoltest", cfgs...) + return newOutputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService15ProtocolTest { + svc := &OutputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService15TestCaseOperation1 = "OperationName" + +// OutputService15TestCaseOperation1Request generates a request for the OutputService15TestCaseOperation1 operation. +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1Request(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (req *request.Request, output *OutputService15TestShapeOutputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService15TestCaseOperation1, + } + + if input == nil { + input = &OutputService15TestShapeOutputService15TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService15TestShapeOutputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (*OutputService15TestShapeOutputService15TestCaseOperation1Output, error) { + req, out := c.OutputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("mynamerequest-id")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "myname", *out.Str) + +} + +func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abcrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + +} + +func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abrequestid")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.List[0]) + assert.Equal(t, "b", *out.List[1]) + +} + +func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarrequestid")) + req, out := svc.OutputService13TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService14TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService15ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService15TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..ed3c2e039 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,254 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..46837f66c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,187 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go new file mode 100644 index 000000000..31e1d6c04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go @@ -0,0 +1,356 @@ +// +build bench + +package restjson_test + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/service/elastictranscoder" +) + +func BenchmarkRESTJSONBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) + restjson.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkRESTBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) + rest.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkEncodingJSONMarshal_Complex_elastictranscoderCreateJobInput(b *testing.B) { + params := restjsonBuildParms + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func BenchmarkRESTJSONBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) + restjson.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkRESTBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) + rest.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkEncodingJSONMarshal_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +var restjsonBuildParms = &elastictranscoder.CreateJobInput{ + Input: &elastictranscoder.JobInput{ // Required + AspectRatio: aws.String("AspectRatio"), + Container: aws.String("JobContainer"), + DetectedProperties: &elastictranscoder.DetectedProperties{ + DurationMillis: aws.Int64(1), + FileSize: aws.Int64(1), + FrameRate: aws.String("FloatString"), + Height: aws.Int64(1), + Width: aws.Int64(1), + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + FrameRate: aws.String("FrameRate"), + Interlaced: aws.String("Interlaced"), + Key: aws.String("Key"), + Resolution: aws.String("Resolution"), + }, + PipelineId: aws.String("Id"), // Required + Output: &elastictranscoder.CreateJobOutput{ + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + OutputKeyPrefix: aws.String("Key"), + Outputs: []*elastictranscoder.CreateJobOutput{ + { // Required + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + // More values... + }, + Playlists: []*elastictranscoder.CreateJobPlaylist{ + { // Required + Format: aws.String("PlaylistFormat"), + HlsContentProtection: &elastictranscoder.HlsContentProtection{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + KeyStoragePolicy: aws.String("KeyStoragePolicy"), + LicenseAcquisitionUrl: aws.String("ZeroTo512String"), + Method: aws.String("HlsContentProtectionMethod"), + }, + Name: aws.String("Filename"), + OutputKeys: []*string{ + aws.String("Key"), // Required + // More values... + }, + PlayReadyDrm: &elastictranscoder.PlayReadyDrm{ + Format: aws.String("PlayReadyDrmFormatString"), + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("NonEmptyBase64EncodedString"), + KeyId: aws.String("KeyIdGuid"), + KeyMd5: aws.String("NonEmptyBase64EncodedString"), + LicenseAcquisitionUrl: aws.String("OneTo512String"), + }, + }, + // More values... + }, + UserMetadata: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go new file mode 100644 index 000000000..54f6df0f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go @@ -0,0 +1,2371 @@ +package restjson_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Foo *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService3TestShapeInputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string]*string `location:"querystring" type:"map"` +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string][]*string `location:"querystring" type:"map"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + Config *InputService6TestShapeStructType `type:"structure"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService6TestShapeStructType struct { + _ struct{} `type:"structure"` + + A *string `type:"string"` + + B *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + Checksum *string `location:"header" locationName:"x-amz-checksum" type:"string"` + + Config *InputService7TestShapeStructType `type:"structure"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService7TestShapeStructType struct { + _ struct{} `type:"structure"` + + A *string `type:"string"` + + B *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/vaults/{vaultName}/archives", + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadSeeker `locationName:"body" type:"blob"` + + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *string `locationName:"foo" type:"string"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputShape) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService10TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputShape) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService10TestCaseOperation2 = "OperationName" + +// InputService10TestCaseOperation2Request generates a request for the InputService10TestCaseOperation2 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation2Request(input *InputService10TestShapeInputShape) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService10TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService10TestShapeInputService10TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation2(input *InputService10TestShapeInputShape) (*InputService10TestShapeInputService10TestCaseOperation2Output, error) { + req, out := c.InputService10TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService10TestShapeInputService10TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService10TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputShape) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService11TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputShape) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService11TestCaseOperation2 = "OperationName" + +// InputService11TestCaseOperation2Request generates a request for the InputService11TestCaseOperation2 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation2Request(input *InputService11TestShapeInputShape) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService11TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService11TestShapeInputService11TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation2(input *InputService11TestShapeInputShape) (*InputService11TestShapeInputService11TestCaseOperation2Output, error) { + req, out := c.InputService11TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeFooShape struct { + _ struct{} `locationName:"foo" type:"structure"` + + Baz *string `locationName:"baz" type:"string"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService11TestShapeInputService11TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService11TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *InputService11TestShapeFooShape `locationName:"foo" type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation2 = "OperationName" + +// InputService12TestCaseOperation2Request generates a request for the InputService12TestCaseOperation2 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path?abc=mno", + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { + req, out := c.InputService12TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Foo *string `location:"querystring" locationName:"param-name" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation2 = "OperationName" + +// InputService13TestCaseOperation2Request generates a request for the InputService13TestCaseOperation2 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { + req, out := c.InputService13TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation3 = "OperationName" + +// InputService13TestCaseOperation3Request generates a request for the InputService13TestCaseOperation3 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation3Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation3(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation3Output, error) { + req, out := c.InputService13TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation4 = "OperationName" + +// InputService13TestCaseOperation4Request generates a request for the InputService13TestCaseOperation4 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation4Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation4(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation4Output, error) { + req, out := c.InputService13TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation5 = "OperationName" + +// InputService13TestCaseOperation5Request generates a request for the InputService13TestCaseOperation5 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation5Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation5, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation5(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation5Output, error) { + req, out := c.InputService13TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation6 = "OperationName" + +// InputService13TestCaseOperation6Request generates a request for the InputService13TestCaseOperation6 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation6Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation6, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation6(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation6Output, error) { + req, out := c.InputService13TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService13TestShapeRecursiveStructType `type:"structure"` +} + +type InputService13TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService13TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService13TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService13TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService14ProtocolTest client from just a session. +// svc := inputservice14protocoltest.New(mySession) +// +// // Create a InputService14ProtocolTest client with additional configuration +// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { + c := p.ClientConfig("inputservice14protocoltest", cfgs...) + return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService14ProtocolTest { + svc := &InputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService14TestCaseOperation1 = "OperationName" + +// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputShape) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService14TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService14TestCaseOperation2 = "OperationName" + +// InputService14TestCaseOperation2Request generates a request for the InputService14TestCaseOperation2 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputShape) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService14TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService14TestShapeInputService14TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { + req, out := c.InputService14TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService14TestShapeInputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService14TestShapeInputService14TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService14TestShapeInputShape struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"unix"` + + TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService15ProtocolTest client from just a session. +// svc := inputservice15protocoltest.New(mySession) +// +// // Create a InputService15ProtocolTest client with additional configuration +// svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest { + c := p.ClientConfig("inputservice15protocoltest", cfgs...) + return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService15ProtocolTest { + svc := &InputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService15TestCaseOperation1 = "OperationName" + +// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation. +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputService15TestCaseOperation1Input) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputService15TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService15TestShapeInputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputService15TestCaseOperation1Input) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { + req, out := c.InputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService15TestShapeInputService15TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `locationName:"timestamp_location" type:"timestamp" timestampFormat:"unix"` +} + +type InputService15TestShapeInputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestURIParameterOnlyWithNoLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestURIParameterOnlyWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/bar", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputService3TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + QueryDoc: map[string]*string{ + "bar": aws.String("baz"), + "fizz": aws.String("buzz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + PipelineId: aws.String("id"), + QueryDoc: map[string][]*string{ + "fizz": { + aws.String("buzz"), + aws.String("pop"), + }, + "foo": { + aws.String("bar"), + aws.String("baz"), + }, + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestURIParameterAndQuerystringParamsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + Ascending: aws.String("true"), + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestURIParameterQuerystringParamsAndJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + Ascending: aws.String("true"), + Config: &InputService6TestShapeStructType{ + A: aws.String("one"), + B: aws.String("two"), + }, + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + Ascending: aws.String("true"), + Checksum: aws.String("12345"), + Config: &InputService7TestShapeStructType{ + A: aws.String("one"), + B: aws.String("two"), + }, + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + assert.Equal(t, "12345", r.Header.Get("x-amz-checksum")) + +} + +func TestInputService8ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + Body: aws.ReadSeekCloser(bytes.NewBufferString("contents")), + Checksum: aws.String("foo"), + VaultName: aws.String("name"), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `contents`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/vaults/name/archives", r.URL.String()) + + // assert headers + assert.Equal(t, "foo", r.Header.Get("x-amz-sha256-tree-hash")) + +} + +func TestInputService9ProtocolTestStringPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService10TestShapeInputShape{ + Foo: []byte("bar"), + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobPayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService10TestShapeInputShape{} + req, _ := svc.InputService10TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestStructurePayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService11TestShapeInputShape{ + Foo: &InputService11TestShapeFooShape{ + Baz: aws.String("bar"), + }, + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"baz":"bar"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestStructurePayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService11TestShapeInputShape{} + req, _ := svc.InputService11TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{} + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + Foo: aws.String(""), + } + req, _ := svc.InputService12TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService13TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveList: []*InputService13TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveList: []*InputService13TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService13TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService14TestShapeInputShape{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService14TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestTimestampValuesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService14TestShapeInputShape{ + TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService14TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + +} + +func TestInputService15ProtocolTestNamedLocationsInJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService15TestShapeInputService15TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService15TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"timestamp_location":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 000000000..f0c85c0a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,79 @@ +// Package restjson provides RESTful JSON serialisation of AWS +// requests and responses. +package restjson + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + code := r.HTTPResponse.Header.Get("X-Amzn-Errortype") + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading REST JSON error response", err) + return + } + if len(bodyBytes) == 0 { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + r.Error = awserr.New("SerializationError", "failed decoding REST JSON error response", err) + return + } + + if code == "" { + code = jsonErr.Code + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go new file mode 100644 index 000000000..7069e0164 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go @@ -0,0 +1,1321 @@ +package restjson_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + ImaHeader *string `location:"header" type:"string"` + + ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` + + Long *int64 `type:"long"` + + Num *int64 `type:"integer"` + + Status *int64 `location:"statusCode" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeBlobContainer struct { + _ struct{} `type:"structure"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + BlobMember []byte `type:"blob"` + + StructMember *OutputService2TestShapeBlobContainer `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StructMember *OutputService3TestShapeTimeContainer `type:"structure"` + + TimeMember *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type OutputService3TestShapeTimeContainer struct { + _ struct{} `type:"structure"` + + Foo *time.Time `locationName:"foo" type:"timestamp" timestampFormat:"unix"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*OutputService5TestShapeSingleStruct `type:"list"` +} + +type OutputService5TestShapeSingleStruct struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string][]*int64 `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string]*time.Time `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StrType *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + AllHeaders map[string]*string `location:"headers" type:"map"` + + PrefixedHeaders map[string]*string `location:"headers" locationName:"X-" type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeBodyStructure struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Data"` + + Data *OutputService10TestShapeBodyStructure `type:"structure"` + + Header *string `location:"header" locationName:"X-Foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Stream"` + + Stream []byte `type:"blob"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, int64(200), *out.Status) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "hi!", string(out.BlobMember)) + assert.Equal(t, "there!", string(out.StructMember.Foo)) + +} + +func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", \"b\"]}")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Equal(t, "b", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListsWithStructureMemberCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [{\"Foo\": \"a\"}, {\"Foo\": \"b\"}]}")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0].Foo) + assert.Equal(t, "b", *out.ListMember[1].Foo) + +} + +func TestOutputService6ProtocolTestMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, int64(1), *out.MapMember["a"][0]) + assert.Equal(t, int64(2), *out.MapMember["a"][1]) + assert.Equal(t, int64(3), *out.MapMember["b"][0]) + assert.Equal(t, int64(4), *out.MapMember["b"][1]) + +} + +func TestOutputService7ProtocolTestComplexMapValuesCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": 1398796238, \"b\": 1398796238}}")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["a"].String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["b"].String()) + +} + +func TestOutputService8ProtocolTestIgnoresExtraDataCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"foo\": \"bar\"}")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + +} + +func TestOutputService9ProtocolTestSupportsHeaderMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{}")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("Content-Length", "10") + req.HTTPResponse.Header.Set("X-Bam", "boo") + req.HTTPResponse.Header.Set("X-Foo", "bar") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "10", *out.AllHeaders["Content-Length"]) + assert.Equal(t, "boo", *out.AllHeaders["X-Bam"]) + assert.Equal(t, "bar", *out.AllHeaders["X-Foo"]) + assert.Equal(t, "boo", *out.PrefixedHeaders["Bam"]) + assert.Equal(t, "bar", *out.PrefixedHeaders["Foo"]) + +} + +func TestOutputService10ProtocolTestJSONPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Foo\": \"abc\"}")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("X-Foo", "baz") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.Data.Foo) + assert.Equal(t, "baz", *out.Header) + +} + +func TestOutputService11ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", string(out.Stream)) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go new file mode 100644 index 000000000..081716739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go @@ -0,0 +1,246 @@ +// +build bench + +package restxml_test + +import ( + "testing" + + "bytes" + "encoding/xml" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +func BenchmarkRESTXMLBuild_Complex_cloudfrontCreateDistribution(b *testing.B) { + params := restxmlBuildCreateDistroParms + + op := &request.Operation{ + Name: "CreateDistribution", + HTTPMethod: "POST", + HTTPPath: "/2015-04-17/distribution/{DistributionId}/invalidation", + } + + benchRESTXMLBuild(b, op, params) +} + +func BenchmarkRESTXMLBuild_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + op := &request.Operation{ + Name: "DeleteStreamingDistribution", + HTTPMethod: "DELETE", + HTTPPath: "/2015-04-17/streaming-distribution/{Id}", + } + benchRESTXMLBuild(b, op, params) +} + +func BenchmarkEncodingXMLMarshal_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := xml.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func benchRESTXMLBuild(b *testing.B, op *request.Operation, params interface{}) { + svc := awstesting.NewClient() + svc.ServiceName = "cloudfront" + svc.APIVersion = "2015-04-17" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(op, params, nil) + restxml.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +var restxmlBuildCreateDistroParms = &cloudfront.CreateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required + }, + }, + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go new file mode 100644 index 000000000..8e08bcc23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go @@ -0,0 +1,3338 @@ +package restxml_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation2 = "OperationName" + +// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation2, + HTTPMethod: "PUT", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { + req, out := c.InputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation3 = "OperationName" + +// InputService1TestCaseOperation3Request generates a request for the InputService1TestCaseOperation3 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputService1TestCaseOperation3Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation3, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation3Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputService1TestCaseOperation3Input) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) { + req, out := c.InputService1TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Input struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + Name *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + First *bool `type:"boolean"` + + Fourth *int64 `type:"integer"` + + Second *bool `type:"boolean"` + + Third *float64 `type:"float"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + SubStructure *InputService3TestShapeSubStructure `type:"structure"` +} + +type InputService3TestShapeSubStructure struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + SubStructure *InputService4TestShapeSubStructure `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeSubStructure struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `type:"list"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `locationName:"AlternateName" locationNameList:"NotMember" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `type:"list" flattened:"true"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `locationName:"item" type:"list" flattened:"true"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*InputService9TestShapeSingleFieldStruct `locationName:"item" type:"list" flattened:"true"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeSingleFieldStruct struct { + _ struct{} `type:"structure"` + + Element *string `locationName:"value" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + StructureParam *InputService10TestShapeStructureShape `type:"structure"` +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService10TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + B []byte `locationName:"b" type:"blob"` + + T *time.Time `locationName:"t" type:"timestamp" timestampFormat:"iso8601"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Foo map[string]*string `location:"headers" locationName:"x-foo-" type:"map"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputService12TestCaseOperation1Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService12TestShapeInputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputService12TestCaseOperation1Input) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string]*string `location:"querystring" type:"map"` +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputService13TestCaseOperation1Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService13TestShapeInputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputService13TestCaseOperation1Input) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeInputService13TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string][]*string `location:"querystring" type:"map"` +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService14ProtocolTest client from just a session. +// svc := inputservice14protocoltest.New(mySession) +// +// // Create a InputService14ProtocolTest client with additional configuration +// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { + c := p.ClientConfig("inputservice14protocoltest", cfgs...) + return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService14ProtocolTest { + svc := &InputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService14TestCaseOperation1 = "OperationName" + +// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputService14TestCaseOperation1Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService14TestShapeInputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputService14TestCaseOperation1Input) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService14TestShapeInputService14TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *string `locationName:"foo" type:"string"` +} + +type InputService14TestShapeInputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService15ProtocolTest client from just a session. +// svc := inputservice15protocoltest.New(mySession) +// +// // Create a InputService15ProtocolTest client with additional configuration +// svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest { + c := p.ClientConfig("inputservice15protocoltest", cfgs...) + return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService15ProtocolTest { + svc := &InputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService15TestCaseOperation1 = "OperationName" + +// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation. +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService15TestShapeInputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { + req, out := c.InputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService15TestCaseOperation2 = "OperationName" + +// InputService15TestCaseOperation2Request generates a request for the InputService15TestCaseOperation2 operation. +func (c *InputService15ProtocolTest) InputService15TestCaseOperation2Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService15TestShapeInputService15TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation2(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation2Output, error) { + req, out := c.InputService15TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService15TestShapeInputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputService15TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService16ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService16ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService16ProtocolTest client from just a session. +// svc := inputservice16protocoltest.New(mySession) +// +// // Create a InputService16ProtocolTest client with additional configuration +// svc := inputservice16protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService16ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService16ProtocolTest { + c := p.ClientConfig("inputservice16protocoltest", cfgs...) + return newInputService16ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService16ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService16ProtocolTest { + svc := &InputService16ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice16protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService16ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService16TestCaseOperation1 = "OperationName" + +// InputService16TestCaseOperation1Request generates a request for the InputService16TestCaseOperation1 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService16TestShapeInputService16TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { + req, out := c.InputService16TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation2 = "OperationName" + +// InputService16TestCaseOperation2Request generates a request for the InputService16TestCaseOperation2 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService16TestShapeInputService16TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) { + req, out := c.InputService16TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation3 = "OperationName" + +// InputService16TestCaseOperation3Request generates a request for the InputService16TestCaseOperation3 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation3Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService16TestShapeInputService16TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation3(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation3Output, error) { + req, out := c.InputService16TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation4 = "OperationName" + +// InputService16TestCaseOperation4Request generates a request for the InputService16TestCaseOperation4 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation4Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService16TestShapeInputService16TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation4(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation4Output, error) { + req, out := c.InputService16TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +type InputService16TestShapeFooShape struct { + _ struct{} `locationName:"foo" type:"structure"` + + Baz *string `locationName:"baz" type:"string"` +} + +type InputService16TestShapeInputService16TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *InputService16TestShapeFooShape `locationName:"foo" type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService17ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService17ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService17ProtocolTest client from just a session. +// svc := inputservice17protocoltest.New(mySession) +// +// // Create a InputService17ProtocolTest client with additional configuration +// svc := inputservice17protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService17ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService17ProtocolTest { + c := p.ClientConfig("inputservice17protocoltest", cfgs...) + return newInputService17ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService17ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService17ProtocolTest { + svc := &InputService17ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice17protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService17ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService17TestCaseOperation1 = "OperationName" + +// InputService17TestCaseOperation1Request generates a request for the InputService17TestCaseOperation1 operation. +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputService17TestCaseOperation1Input) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService17TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService17TestShapeInputService17TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService17TestShapeInputService17TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputService17TestCaseOperation1Input) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) { + req, out := c.InputService17TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService17TestShapeGrant struct { + _ struct{} `locationName:"Grant" type:"structure"` + + Grantee *InputService17TestShapeGrantee `type:"structure"` +} + +type InputService17TestShapeGrantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + EmailAddress *string `type:"string"` + + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true"` +} + +type InputService17TestShapeInputService17TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Grant"` + + Grant *InputService17TestShapeGrant `locationName:"Grant" type:"structure"` +} + +type InputService17TestShapeInputService17TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService18ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService18ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService18ProtocolTest client from just a session. +// svc := inputservice18protocoltest.New(mySession) +// +// // Create a InputService18ProtocolTest client with additional configuration +// svc := inputservice18protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService18ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService18ProtocolTest { + c := p.ClientConfig("inputservice18protocoltest", cfgs...) + return newInputService18ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService18ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService18ProtocolTest { + svc := &InputService18ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice18protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService18ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService18TestCaseOperation1 = "OperationName" + +// InputService18TestCaseOperation1Request generates a request for the InputService18TestCaseOperation1 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputService18TestCaseOperation1Input) (req *request.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService18TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &InputService18TestShapeInputService18TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService18TestShapeInputService18TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputService18TestCaseOperation1Input) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) { + req, out := c.InputService18TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService18TestShapeInputService18TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" type:"string"` + + Key *string `location:"uri" type:"string"` +} + +type InputService18TestShapeInputService18TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService19ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService19ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService19ProtocolTest client from just a session. +// svc := inputservice19protocoltest.New(mySession) +// +// // Create a InputService19ProtocolTest client with additional configuration +// svc := inputservice19protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService19ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService19ProtocolTest { + c := p.ClientConfig("inputservice19protocoltest", cfgs...) + return newInputService19ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService19ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService19ProtocolTest { + svc := &InputService19ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice19protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService19ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService19TestCaseOperation1 = "OperationName" + +// InputService19TestCaseOperation1Request generates a request for the InputService19TestCaseOperation1 operation. +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService19TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService19TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService19TestShapeInputService19TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) { + req, out := c.InputService19TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService19TestCaseOperation2 = "OperationName" + +// InputService19TestCaseOperation2Request generates a request for the InputService19TestCaseOperation2 operation. +func (c *InputService19ProtocolTest) InputService19TestCaseOperation2Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService19TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path?abc=mno", + } + + if input == nil { + input = &InputService19TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService19TestShapeInputService19TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation2(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation2Output, error) { + req, out := c.InputService19TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService19TestShapeInputService19TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService19TestShapeInputService19TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService19TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Foo *string `location:"querystring" locationName:"param-name" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService20ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService20ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService20ProtocolTest client from just a session. +// svc := inputservice20protocoltest.New(mySession) +// +// // Create a InputService20ProtocolTest client with additional configuration +// svc := inputservice20protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService20ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService20ProtocolTest { + c := p.ClientConfig("inputservice20protocoltest", cfgs...) + return newInputService20ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService20ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService20ProtocolTest { + svc := &InputService20ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice20protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService20ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService20TestCaseOperation1 = "OperationName" + +// InputService20TestCaseOperation1Request generates a request for the InputService20TestCaseOperation1 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation1Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService20TestShapeInputService20TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation1(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation1Output, error) { + req, out := c.InputService20TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation2 = "OperationName" + +// InputService20TestCaseOperation2Request generates a request for the InputService20TestCaseOperation2 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation2Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService20TestShapeInputService20TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation2(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation2Output, error) { + req, out := c.InputService20TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation3 = "OperationName" + +// InputService20TestCaseOperation3Request generates a request for the InputService20TestCaseOperation3 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation3Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService20TestShapeInputService20TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation3(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation3Output, error) { + req, out := c.InputService20TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation4 = "OperationName" + +// InputService20TestCaseOperation4Request generates a request for the InputService20TestCaseOperation4 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation4Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService20TestShapeInputService20TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation4(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation4Output, error) { + req, out := c.InputService20TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation5 = "OperationName" + +// InputService20TestCaseOperation5Request generates a request for the InputService20TestCaseOperation5 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation5Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation5, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService20TestShapeInputService20TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation5(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation5Output, error) { + req, out := c.InputService20TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation6 = "OperationName" + +// InputService20TestCaseOperation6Request generates a request for the InputService20TestCaseOperation6 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation6Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation6, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService20TestShapeInputService20TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation6(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation6Output, error) { + req, out := c.InputService20TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService20TestShapeInputService20TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + RecursiveStruct *InputService20TestShapeRecursiveStructType `type:"structure"` +} + +type InputService20TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService20TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService20TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService20TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService21ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService21ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService21ProtocolTest client from just a session. +// svc := inputservice21protocoltest.New(mySession) +// +// // Create a InputService21ProtocolTest client with additional configuration +// svc := inputservice21protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService21ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService21ProtocolTest { + c := p.ClientConfig("inputservice21protocoltest", cfgs...) + return newInputService21ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService21ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService21ProtocolTest { + svc := &InputService21ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice21protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService21ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService21TestCaseOperation1 = "OperationName" + +// InputService21TestCaseOperation1Request generates a request for the InputService21TestCaseOperation1 operation. +func (c *InputService21ProtocolTest) InputService21TestCaseOperation1Request(input *InputService21TestShapeInputService21TestCaseOperation1Input) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputService21TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService21TestShapeInputService21TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation1(input *InputService21TestShapeInputService21TestCaseOperation1Input) (*InputService21TestShapeInputService21TestCaseOperation1Output, error) { + req, out := c.InputService21TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService21TestShapeInputService21TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` +} + +type InputService21TestShapeInputService21TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestBasicXMLSerializationCase3(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputService1TestCaseOperation3Input{} + req, _ := svc.InputService1TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + First: aws.Bool(true), + Fourth: aws.Int64(3), + Second: aws.Bool(false), + Third: aws.Float64(1.2), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `true3false1.2`, util.Trim(string(body)), InputService2TestShapeInputService2TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService3TestShapeSubStructure{ + Bar: aws.String("b"), + Foo: aws.String("a"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `bazba`, util.Trim(string(body)), InputService3TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructuresCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService3TestShapeSubStructure{ + Foo: aws.String("a"), + }, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `baza`, util.Trim(string(body)), InputService3TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + Description: aws.String("baz"), + SubStructure: &InputService4TestShapeSubStructure{}, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `baz`, util.Trim(string(body)), InputService4TestShapeInputService4TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService5TestShapeInputService5TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService6TestShapeInputService6TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService7TestShapeInputService7TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService8TestShapeInputService8TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + ListParam: []*InputService9TestShapeSingleFieldStruct{ + { + Element: aws.String("one"), + }, + { + Element: aws.String("two"), + }, + { + Element: aws.String("three"), + }, + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService9TestShapeInputService9TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + StructureParam: &InputService10TestShapeStructureShape{ + B: []byte("foo"), + T: aws.Time(time.Unix(1422172800, 0)), + }, + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `Zm9v2015-01-25T08:00:00Z`, util.Trim(string(body)), InputService10TestShapeInputService10TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + Foo: map[string]*string{ + "a": aws.String("b"), + "c": aws.String("d"), + }, + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "b", r.Header.Get("x-foo-a")) + assert.Equal(t, "d", r.Header.Get("x-foo-c")) + +} + +func TestInputService12ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputService12TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + QueryDoc: map[string]*string{ + "bar": aws.String("baz"), + "fizz": aws.String("buzz"), + }, + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService13TestShapeInputService13TestCaseOperation1Input{ + PipelineId: aws.String("id"), + QueryDoc: map[string][]*string{ + "fizz": { + aws.String("buzz"), + aws.String("pop"), + }, + "foo": { + aws.String("bar"), + aws.String("baz"), + }, + }, + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestStringPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService14TestShapeInputService14TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService14TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestBlobPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService15TestShapeInputShape{ + Foo: []byte("bar"), + } + req, _ := svc.InputService15TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestBlobPayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService15TestShapeInputShape{} + req, _ := svc.InputService15TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService16TestShapeInputShape{ + Foo: &InputService16TestShapeFooShape{ + Baz: aws.String("bar"), + }, + } + req, _ := svc.InputService16TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `bar`, util.Trim(string(body)), InputService16TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService16TestShapeInputShape{} + req, _ := svc.InputService16TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase3(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService16TestShapeInputShape{ + Foo: &InputService16TestShapeFooShape{}, + } + req, _ := svc.InputService16TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, ``, util.Trim(string(body)), InputService16TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase4(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService16TestShapeInputShape{} + req, _ := svc.InputService16TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestXMLAttributeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService17TestShapeInputService17TestCaseOperation1Input{ + Grant: &InputService17TestShapeGrant{ + Grantee: &InputService17TestShapeGrantee{ + EmailAddress: aws.String("foo@example.com"), + Type: aws.String("CanonicalUser"), + }, + }, + } + req, _ := svc.InputService17TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo@example.com`, util.Trim(string(body)), InputService17TestShapeInputService17TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestGreedyKeysCase1(t *testing.T) { + sess := session.New() + svc := NewInputService18ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService18TestShapeInputService18TestCaseOperation1Input{ + Bucket: aws.String("my/bucket"), + Key: aws.String("testing /123"), + } + req, _ := svc.InputService18TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService19TestShapeInputShape{} + req, _ := svc.InputService19TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService19TestShapeInputShape{ + Foo: aws.String(""), + } + req, _ := svc.InputService19TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService20TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService20TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveList: []*InputService20TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveList: []*InputService20TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService20TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foofoobarbar`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestTimestampInHeaderCase1(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService21TestShapeInputService21TestCaseOperation1Input{ + TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService21TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 000000000..a6bc0c74c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,57 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go new file mode 100644 index 000000000..0cf44db87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go @@ -0,0 +1,1498 @@ +package restxml_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opOutputService1TestCaseOperation2 = "OperationName" + +// OutputService1TestCaseOperation2Request generates a request for the OutputService1TestCaseOperation2 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2Request(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (req *request.Request, output *OutputService1TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation2, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation2Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation2Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputShape struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + ImaHeader *string `location:"header" type:"string"` + + ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService6TestShapeSingleStructure `type:"map"` +} + +type OutputService6TestShapeSingleStructure struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Data"` + + Data *OutputService9TestShapeSingleStructure `type:"structure"` + + Header *string `location:"header" locationName:"X-Foo" type:"string"` +} + +type OutputService9TestShapeSingleStructure struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Stream"` + + Stream []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `location:"header" locationName:"x-char" type:"character"` + + Double *float64 `location:"header" locationName:"x-double" type:"double"` + + FalseBool *bool `location:"header" locationName:"x-false-bool" type:"boolean"` + + Float *float64 `location:"header" locationName:"x-float" type:"float"` + + Integer *int64 `location:"header" locationName:"x-int" type:"integer"` + + Long *int64 `location:"header" locationName:"x-long" type:"long"` + + Str *string `location:"header" locationName:"x-str" type:"string"` + + Timestamp *time.Time `location:"header" locationName:"x-timestamp" type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `location:"header" locationName:"x-true-bool" type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService12ProtocolTest client from just a session. +// svc := outputservice12protocoltest.New(mySession) +// +// // Create a OutputService12ProtocolTest client with additional configuration +// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest { + c := p.ClientConfig("outputservice12protocoltest", cfgs...) + return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest { + svc := &OutputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation2Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("X-Foo", "baz") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.Data.Foo) + assert.Equal(t, "baz", *out.Header) + +} + +func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", string(out.Stream)) + +} + +func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("x-char", "a") + req.HTTPResponse.Header.Set("x-double", "1.5") + req.HTTPResponse.Header.Set("x-false-bool", "false") + req.HTTPResponse.Header.Set("x-float", "1.5") + req.HTTPResponse.Header.Set("x-int", "1") + req.HTTPResponse.Header.Set("x-long", "100") + req.HTTPResponse.Header.Set("x-str", "string") + req.HTTPResponse.Header.Set("x-timestamp", "Sun, 25 Jan 2015 08:00:00 GMT") + req.HTTPResponse.Header.Set("x-true-bool", "true") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.5, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.5, *out.Float) + assert.Equal(t, int64(1), *out.Integer) + assert.Equal(t, int64(100), *out.Long) + assert.Equal(t, "string", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService12ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..0d76dffbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,287 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + member := elemOf(value.Field(i)) + field := t.Field(i) + mTag := field.Tag + + if mTag.Get("location") != "" { // skip non-body members + continue + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..49f291a85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..72c198a9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go new file mode 100644 index 000000000..c2cee6bbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go @@ -0,0 +1,77 @@ +package v4_test + +import ( + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestPresignHandler(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, err := req.Presign(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-disposition;host;x-amz-acl" + expectedSig := "2d76a414208c0eac2a23ef9c834db9635ecd5a0fbb447a00ad191f82d854f55b" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} + +func TestPresignRequest(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, headers, err := req.PresignRequest(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-disposition;host;x-amz-acl" + expectedSig := "2d76a414208c0eac2a23ef9c834db9635ecd5a0fbb447a00ad191f82d854f55b" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + expectedHeaderMap := http.Header{ + "x-amz-acl": []string{"public-read"}, + "content-disposition": []string{"a+b c$d"}, + } + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, expectedHeaderMap, headers) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go new file mode 100644 index 000000000..7dfddc87e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go @@ -0,0 +1,57 @@ +package v4 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRuleCheckWhitelist(t *testing.T) { + w := whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + } + + assert.True(t, w.IsValid("Cache-Control")) + assert.False(t, w.IsValid("Cache-")) +} + +func TestRuleCheckBlacklist(t *testing.T) { + b := blacklist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + } + + assert.False(t, b.IsValid("Cache-Control")) + assert.True(t, b.IsValid("Cache-")) +} + +func TestRuleCheckPattern(t *testing.T) { + p := patterns{"X-Amz-Meta-"} + + assert.True(t, p.IsValid("X-Amz-Meta-")) + assert.True(t, p.IsValid("X-Amz-Meta-Star")) + assert.False(t, p.IsValid("Cache-")) +} + +func TestRuleComplexWhitelist(t *testing.T) { + w := rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, + } + + r := rules{ + inclusiveRules{patterns{"X-Amz-"}, blacklist{w}}, + } + + assert.True(t, r.IsValid("X-Amz-Blah")) + assert.False(t, r.IsValid("X-Amz-Meta-")) + assert.False(t, r.IsValid("X-Amz-Meta-Star")) + assert.False(t, r.IsValid("Cache-Control")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go new file mode 100644 index 000000000..14b0c6616 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go @@ -0,0 +1,438 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Content-Length": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string + notHoist bool + signedHeaderVals http.Header +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + notHoist: req.NotHoist, + } + + req.Error = s.sign() + req.SignedHeaderVals = s.signedHeaderVals +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + + unsignedHeaders := v4.Request.Header + if v4.isPresign { + if !v4.notHoist { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + v4.Query[k] = urlValues[k] + } + } + } + + v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + headers = append(headers, lowerCaseKey) + + if v4.signedHeaderVals == nil { + v4.signedHeaderVals = make(http.Header) + } + v4.signedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go new file mode 100644 index 000000000..a417a64b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go @@ -0,0 +1,255 @@ +package v4 + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer { + endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" + reader := strings.NewReader(body) + req, _ := http.NewRequest("POST", endpoint, reader) + req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" + req.Header.Add("X-Amz-Target", "prefix.Operation") + req.Header.Add("Content-Type", "application/x-amz-json-1.0") + req.Header.Add("Content-Length", string(len(body))) + req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)") + + return signer{ + Request: req, + Time: signTime, + ExpireTime: expireTime, + Query: req.URL.Query(), + Body: reader, + ServiceName: serviceName, + Region: region, + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + } +} + +func removeWS(text string) string { + text = strings.Replace(text, " ", "", -1) + text = strings.Replace(text, "\n", "", -1) + text = strings.Replace(text, "\t", "", -1) + return text +} + +func assertEqual(t *testing.T, expected, given string) { + if removeWS(expected) != removeWS(given) { + t.Errorf("\nExpected: %s\nGiven: %s", expected, given) + } +} + +func TestPresignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-type;host;x-amz-meta-other-header" + expectedSig := "4fe8944ddd3e83a32bc874955e734e5a349116bfce2d4f43171e0f7572b842f6" + expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request" + expectedTarget := "prefix.Operation" + + q := signer.Request.URL.Query() + assert.Equal(t, expectedSig, q.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, q.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) + assert.Empty(t, q.Get("X-Amz-Meta-Other-Header")) + assert.Equal(t, expectedTarget, q.Get("X-Amz-Target")) +} + +func TestSignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=5d3983fb3de907bdc2f3a6951d968e510f0252a8358c038f7680aa02374eeb67" + + q := signer.Request.Header + assert.Equal(t, expectedSig, q.Get("Authorization")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignEmptyBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "") + signer.Body = nil + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash) +} + +func TestSignBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) +} + +func TestSignSeekedBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello") + signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello" + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) + + start, _ := signer.Body.Seek(0, 1) + assert.Equal(t, int64(3), start) +} + +func TestPresignEmptyBodyS3(t *testing.T) { + signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "UNSIGNED-PAYLOAD", hash) +} + +func TestSignPrecomputedBodyChecksum(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "PRECOMPUTED", hash) +} + +func TestAnonymousCredentials(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{Credentials: credentials.AnonymousCredentials}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + + urlQ := r.HTTPRequest.URL.Query() + assert.Empty(t, urlQ.Get("X-Amz-Signature")) + assert.Empty(t, urlQ.Get("X-Amz-Credential")) + assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders")) + assert.Empty(t, urlQ.Get("X-Amz-Date")) + + hQ := r.HTTPRequest.Header + assert.Empty(t, hQ.Get("Authorization")) + assert.Empty(t, hQ.Get("X-Amz-Date")) +} + +func TestIgnoreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + + Sign(r) + sig := r.HTTPRequest.Header.Get("Authorization") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestIgnorePreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + sig := r.HTTPRequest.Header.Get("X-Amz-Signature") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature")) +} + +func TestResignRequestExpiredCreds(t *testing.T) { + creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + querySig := r.HTTPRequest.Header.Get("Authorization") + + creds.Expire() + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestPreResignRequestExpiredCreds(t *testing.T) { + provider := &credentials.StaticProvider{Value: credentials.Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "SESSION", + }} + creds := credentials.NewCredentials(provider) + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature") + + creds.Expire() + r.Time = time.Now().Add(time.Hour * 48) + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature")) +} + +func BenchmarkPresignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} + +func BenchmarkSignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 000000000..b51e9449c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go new file mode 100644 index 000000000..28fac2595 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go @@ -0,0 +1,401 @@ +package waiter_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/waiter" +) + +type mockClient struct { + *client.Client +} +type MockInput struct{} +type MockOutput struct { + States []*MockState +} +type MockState struct { + State *string +} + +func (c *mockClient) MockRequest(input *MockInput) (*request.Request, *MockOutput) { + op := &request.Operation{ + Name: "Mock", + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MockInput{} + } + + output := &MockOutput{} + req := c.NewRequest(op, input, output) + req.Data = output + return req, output +} + +func TestWaiterPathAll(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterPath(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "States[].State", + Expected: "running", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterFailure(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("stopping")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "States[].State", + Expected: "stopping", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait().(awserr.Error) + assert.Error(t, err) + assert.Equal(t, "ResourceNotReady", err.Code()) + assert.Equal(t, "failed waiting for successful resource state", err.Message()) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterError(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2, error case + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Send.PushBack(func(r *request.Request) { + code := 200 + if reqNum == 1 { + code = 400 + } + r.HTTPResponse = &http.Response{ + StatusCode: code, + Status: http.StatusText(code), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + svc.Handlers.UnmarshalMeta.PushBack(func(r *request.Request) { + if reqNum == 1 { + r.Error = awserr.New("MockException", "mock exception message", nil) + // If there was an error unmarshal error will be called instead of unmarshal + // need to increment count here also + reqNum++ + } + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "MockException", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterStatus(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + reqNum++ + }) + svc.Handlers.Send.PushBack(func(r *request.Request) { + code := 200 + if reqNum == 3 { + code = 404 + r.Error = awserr.New("NotFound", "Not Found", nil) + } + r.HTTPResponse = &http.Response{ + StatusCode: code, + Status: http.StatusText(code), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, reqNum) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go new file mode 100644 index 000000000..da7fda939 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -0,0 +1,5108 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package autoscaling provides a client for Auto Scaling. +package autoscaling + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAttachInstances = "AttachInstances" + +// AttachInstancesRequest generates a request for the AttachInstances operation. +func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req *request.Request, output *AttachInstancesOutput) { + op := &request.Operation{ + Name: opAttachInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachInstancesOutput{} + req.Data = output + return +} + +// Attaches one or more EC2 instances to the specified Auto Scaling group. +// +// When you attach instances, Auto Scaling increases the desired capacity of +// the group by the number of instances being attached. If the number of instances +// being attached plus the desired capacity of the group exceeds the maximum +// size of the group, the operation fails. +// +// For more information, see Attach EC2 Instances to Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-instance-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) AttachInstances(input *AttachInstancesInput) (*AttachInstancesOutput, error) { + req, out := c.AttachInstancesRequest(input) + err := req.Send() + return out, err +} + +const opAttachLoadBalancers = "AttachLoadBalancers" + +// AttachLoadBalancersRequest generates a request for the AttachLoadBalancers operation. +func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput) (req *request.Request, output *AttachLoadBalancersOutput) { + op := &request.Operation{ + Name: opAttachLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachLoadBalancersOutput{} + req.Data = output + return +} + +// Attaches one or more load balancers to the specified Auto Scaling group. +// +// To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. +// To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers. +// +// For more information, see Attach a Load Balancer to Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-load-balancer-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) AttachLoadBalancers(input *AttachLoadBalancersInput) (*AttachLoadBalancersOutput, error) { + req, out := c.AttachLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opCompleteLifecycleAction = "CompleteLifecycleAction" + +// CompleteLifecycleActionRequest generates a request for the CompleteLifecycleAction operation. +func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleActionInput) (req *request.Request, output *CompleteLifecycleActionOutput) { + op := &request.Operation{ + Name: opCompleteLifecycleAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteLifecycleActionInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteLifecycleActionOutput{} + req.Data = output + return +} + +// Completes the lifecycle action for the associated token initiated under the +// given lifecycle hook with the specified result. +// +// This operation is a part of the basic sequence for adding a lifecycle hook +// to an Auto Scaling group: +// +// Create a notification target. A target can be either an Amazon SQS queue +// or an Amazon SNS topic. Create an IAM role. This role allows Auto Scaling +// to publish lifecycle notifications to the designated SQS queue or SNS topic. +// Create the lifecycle hook. You can create a hook that acts when instances +// launch or when instances terminate. If necessary, record the lifecycle action +// heartbeat to keep the instance in a pending state. Complete the lifecycle +// action. For more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CompleteLifecycleAction(input *CompleteLifecycleActionInput) (*CompleteLifecycleActionOutput, error) { + req, out := c.CompleteLifecycleActionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAutoScalingGroup = "CreateAutoScalingGroup" + +// CreateAutoScalingGroupRequest generates a request for the CreateAutoScalingGroup operation. +func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGroupInput) (req *request.Request, output *CreateAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opCreateAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAutoScalingGroupOutput{} + req.Data = output + return +} + +// Creates an Auto Scaling group with the specified name and attributes. +// +// If you exceed your maximum limit of Auto Scaling groups, which by default +// is 20 per region, the call fails. For information about viewing and updating +// this limit, see DescribeAccountLimits. +// +// For more information, see Auto Scaling Groups (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroup.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateAutoScalingGroup(input *CreateAutoScalingGroupInput) (*CreateAutoScalingGroupOutput, error) { + req, out := c.CreateAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateLaunchConfiguration = "CreateLaunchConfiguration" + +// CreateLaunchConfigurationRequest generates a request for the CreateLaunchConfiguration operation. +func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfigurationInput) (req *request.Request, output *CreateLaunchConfigurationOutput) { + op := &request.Operation{ + Name: opCreateLaunchConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLaunchConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLaunchConfigurationOutput{} + req.Data = output + return +} + +// Creates a launch configuration. +// +// If you exceed your maximum limit of launch configurations, which by default +// is 100 per region, the call fails. For information about viewing and updating +// this limit, see DescribeAccountLimits. +// +// For more information, see Launch Configurations (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/LaunchConfiguration.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateLaunchConfiguration(input *CreateLaunchConfigurationInput) (*CreateLaunchConfigurationOutput, error) { + req, out := c.CreateLaunchConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opCreateOrUpdateTags = "CreateOrUpdateTags" + +// CreateOrUpdateTagsRequest generates a request for the CreateOrUpdateTags operation. +func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) (req *request.Request, output *CreateOrUpdateTagsOutput) { + op := &request.Operation{ + Name: opCreateOrUpdateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOrUpdateTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateOrUpdateTagsOutput{} + req.Data = output + return +} + +// Creates or updates tags for the specified Auto Scaling group. +// +// A tag is defined by its resource ID, resource type, key, value, and propagate +// flag. The value and the propagate flag are optional parameters. The only +// supported resource type is auto-scaling-group, and the resource ID must be +// the name of the group. The PropagateAtLaunch flag determines whether the +// tag is added to instances launched in the group. Valid values are true or +// false. +// +// When you specify a tag with a key that already exists, the operation overwrites +// the previous tag definition, and you do not get an error message. +// +// For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateOrUpdateTags(input *CreateOrUpdateTagsInput) (*CreateOrUpdateTagsOutput, error) { + req, out := c.CreateOrUpdateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAutoScalingGroup = "DeleteAutoScalingGroup" + +// DeleteAutoScalingGroupRequest generates a request for the DeleteAutoScalingGroup operation. +func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGroupInput) (req *request.Request, output *DeleteAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opDeleteAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAutoScalingGroupOutput{} + req.Data = output + return +} + +// Deletes the specified Auto Scaling group. +// +// If the group has instances or scaling activities in progress, you must specify +// the option to force the deletion in order for it to succeed. +// +// If the group has policies, deleting the group deletes the policies, the +// underlying alarm actions, and any alarm that no longer has an associated +// action. +// +// To remove instances from the Auto Scaling group before deleting it, call +// DetachInstances with the list of instances and the option to decrement the +// desired capacity so that Auto Scaling does not launch replacement instances. +// +// To terminate all instances before deleting the Auto Scaling group, call +// UpdateAutoScalingGroup and set the minimum size and desired capacity of the +// Auto Scaling group to zero. +func (c *AutoScaling) DeleteAutoScalingGroup(input *DeleteAutoScalingGroupInput) (*DeleteAutoScalingGroupOutput, error) { + req, out := c.DeleteAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLaunchConfiguration = "DeleteLaunchConfiguration" + +// DeleteLaunchConfigurationRequest generates a request for the DeleteLaunchConfiguration operation. +func (c *AutoScaling) DeleteLaunchConfigurationRequest(input *DeleteLaunchConfigurationInput) (req *request.Request, output *DeleteLaunchConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteLaunchConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLaunchConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLaunchConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified launch configuration. +// +// The launch configuration must not be attached to an Auto Scaling group. +// When this call completes, the launch configuration is no longer available +// for use. +func (c *AutoScaling) DeleteLaunchConfiguration(input *DeleteLaunchConfigurationInput) (*DeleteLaunchConfigurationOutput, error) { + req, out := c.DeleteLaunchConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLifecycleHook = "DeleteLifecycleHook" + +// DeleteLifecycleHookRequest generates a request for the DeleteLifecycleHook operation. +func (c *AutoScaling) DeleteLifecycleHookRequest(input *DeleteLifecycleHookInput) (req *request.Request, output *DeleteLifecycleHookOutput) { + op := &request.Operation{ + Name: opDeleteLifecycleHook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLifecycleHookInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLifecycleHookOutput{} + req.Data = output + return +} + +// Deletes the specified lifecycle hook. +// +// If there are any outstanding lifecycle actions, they are completed first +// (ABANDON for launching instances, CONTINUE for terminating instances). +func (c *AutoScaling) DeleteLifecycleHook(input *DeleteLifecycleHookInput) (*DeleteLifecycleHookOutput, error) { + req, out := c.DeleteLifecycleHookRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNotificationConfiguration = "DeleteNotificationConfiguration" + +// DeleteNotificationConfigurationRequest generates a request for the DeleteNotificationConfiguration operation. +func (c *AutoScaling) DeleteNotificationConfigurationRequest(input *DeleteNotificationConfigurationInput) (req *request.Request, output *DeleteNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteNotificationConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNotificationConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified notification. +func (c *AutoScaling) DeleteNotificationConfiguration(input *DeleteNotificationConfigurationInput) (*DeleteNotificationConfigurationOutput, error) { + req, out := c.DeleteNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a request for the DeletePolicy operation. +func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified Auto Scaling policy. +// +// Deleting a policy deletes the underlying alarm action, but does not delete +// the alarm, even if it no longer has an associated action. +func (c *AutoScaling) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteScheduledAction = "DeleteScheduledAction" + +// DeleteScheduledActionRequest generates a request for the DeleteScheduledAction operation. +func (c *AutoScaling) DeleteScheduledActionRequest(input *DeleteScheduledActionInput) (req *request.Request, output *DeleteScheduledActionOutput) { + op := &request.Operation{ + Name: opDeleteScheduledAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteScheduledActionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteScheduledActionOutput{} + req.Data = output + return +} + +// Deletes the specified scheduled action. +func (c *AutoScaling) DeleteScheduledAction(input *DeleteScheduledActionInput) (*DeleteScheduledActionOutput, error) { + req, out := c.DeleteScheduledActionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *AutoScaling) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified tags. +func (c *AutoScaling) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a request for the DescribeAccountLimits operation. +func (c *AutoScaling) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountLimitsOutput{} + req.Data = output + return +} + +// Describes the current Auto Scaling resource limits for your AWS account. +// +// For information about requesting an increase in these limits, see AWS Service +// Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAdjustmentTypes = "DescribeAdjustmentTypes" + +// DescribeAdjustmentTypesRequest generates a request for the DescribeAdjustmentTypes operation. +func (c *AutoScaling) DescribeAdjustmentTypesRequest(input *DescribeAdjustmentTypesInput) (req *request.Request, output *DescribeAdjustmentTypesOutput) { + op := &request.Operation{ + Name: opDescribeAdjustmentTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAdjustmentTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAdjustmentTypesOutput{} + req.Data = output + return +} + +// Describes the policy adjustment types for use with PutScalingPolicy. +func (c *AutoScaling) DescribeAdjustmentTypes(input *DescribeAdjustmentTypesInput) (*DescribeAdjustmentTypesOutput, error) { + req, out := c.DescribeAdjustmentTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAutoScalingGroups = "DescribeAutoScalingGroups" + +// DescribeAutoScalingGroupsRequest generates a request for the DescribeAutoScalingGroups operation. +func (c *AutoScaling) DescribeAutoScalingGroupsRequest(input *DescribeAutoScalingGroupsInput) (req *request.Request, output *DescribeAutoScalingGroupsOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutoScalingGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingGroupsOutput{} + req.Data = output + return +} + +// Describes one or more Auto Scaling groups. If a list of names is not provided, +// the call describes all Auto Scaling groups. +func (c *AutoScaling) DescribeAutoScalingGroups(input *DescribeAutoScalingGroupsInput) (*DescribeAutoScalingGroupsOutput, error) { + req, out := c.DescribeAutoScalingGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeAutoScalingGroupsPages(input *DescribeAutoScalingGroupsInput, fn func(p *DescribeAutoScalingGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAutoScalingGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAutoScalingGroupsOutput), lastPage) + }) +} + +const opDescribeAutoScalingInstances = "DescribeAutoScalingInstances" + +// DescribeAutoScalingInstancesRequest generates a request for the DescribeAutoScalingInstances operation. +func (c *AutoScaling) DescribeAutoScalingInstancesRequest(input *DescribeAutoScalingInstancesInput) (req *request.Request, output *DescribeAutoScalingInstancesOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutoScalingInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingInstancesOutput{} + req.Data = output + return +} + +// Describes one or more Auto Scaling instances. If a list is not provided, +// the call describes all instances. +func (c *AutoScaling) DescribeAutoScalingInstances(input *DescribeAutoScalingInstancesInput) (*DescribeAutoScalingInstancesOutput, error) { + req, out := c.DescribeAutoScalingInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeAutoScalingInstancesPages(input *DescribeAutoScalingInstancesInput, fn func(p *DescribeAutoScalingInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAutoScalingInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAutoScalingInstancesOutput), lastPage) + }) +} + +const opDescribeAutoScalingNotificationTypes = "DescribeAutoScalingNotificationTypes" + +// DescribeAutoScalingNotificationTypesRequest generates a request for the DescribeAutoScalingNotificationTypes operation. +func (c *AutoScaling) DescribeAutoScalingNotificationTypesRequest(input *DescribeAutoScalingNotificationTypesInput) (req *request.Request, output *DescribeAutoScalingNotificationTypesOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingNotificationTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAutoScalingNotificationTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingNotificationTypesOutput{} + req.Data = output + return +} + +// Describes the notification types that are supported by Auto Scaling. +func (c *AutoScaling) DescribeAutoScalingNotificationTypes(input *DescribeAutoScalingNotificationTypesInput) (*DescribeAutoScalingNotificationTypesOutput, error) { + req, out := c.DescribeAutoScalingNotificationTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" + +// DescribeLaunchConfigurationsRequest generates a request for the DescribeLaunchConfigurations operation. +func (c *AutoScaling) DescribeLaunchConfigurationsRequest(input *DescribeLaunchConfigurationsInput) (req *request.Request, output *DescribeLaunchConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeLaunchConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLaunchConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLaunchConfigurationsOutput{} + req.Data = output + return +} + +// Describes one or more launch configurations. If you omit the list of names, +// then the call describes all launch configurations. +func (c *AutoScaling) DescribeLaunchConfigurations(input *DescribeLaunchConfigurationsInput) (*DescribeLaunchConfigurationsOutput, error) { + req, out := c.DescribeLaunchConfigurationsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeLaunchConfigurationsPages(input *DescribeLaunchConfigurationsInput, fn func(p *DescribeLaunchConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLaunchConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLaunchConfigurationsOutput), lastPage) + }) +} + +const opDescribeLifecycleHookTypes = "DescribeLifecycleHookTypes" + +// DescribeLifecycleHookTypesRequest generates a request for the DescribeLifecycleHookTypes operation. +func (c *AutoScaling) DescribeLifecycleHookTypesRequest(input *DescribeLifecycleHookTypesInput) (req *request.Request, output *DescribeLifecycleHookTypesOutput) { + op := &request.Operation{ + Name: opDescribeLifecycleHookTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLifecycleHookTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLifecycleHookTypesOutput{} + req.Data = output + return +} + +// Describes the available types of lifecycle hooks. +func (c *AutoScaling) DescribeLifecycleHookTypes(input *DescribeLifecycleHookTypesInput) (*DescribeLifecycleHookTypesOutput, error) { + req, out := c.DescribeLifecycleHookTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLifecycleHooks = "DescribeLifecycleHooks" + +// DescribeLifecycleHooksRequest generates a request for the DescribeLifecycleHooks operation. +func (c *AutoScaling) DescribeLifecycleHooksRequest(input *DescribeLifecycleHooksInput) (req *request.Request, output *DescribeLifecycleHooksOutput) { + op := &request.Operation{ + Name: opDescribeLifecycleHooks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLifecycleHooksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLifecycleHooksOutput{} + req.Data = output + return +} + +// Describes the lifecycle hooks for the specified Auto Scaling group. +func (c *AutoScaling) DescribeLifecycleHooks(input *DescribeLifecycleHooksInput) (*DescribeLifecycleHooksOutput, error) { + req, out := c.DescribeLifecycleHooksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancers = "DescribeLoadBalancers" + +// DescribeLoadBalancersRequest generates a request for the DescribeLoadBalancers operation. +func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancersOutput{} + req.Data = output + return +} + +// Describes the load balancers for the specified Auto Scaling group. +func (c *AutoScaling) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMetricCollectionTypes = "DescribeMetricCollectionTypes" + +// DescribeMetricCollectionTypesRequest generates a request for the DescribeMetricCollectionTypes operation. +func (c *AutoScaling) DescribeMetricCollectionTypesRequest(input *DescribeMetricCollectionTypesInput) (req *request.Request, output *DescribeMetricCollectionTypesOutput) { + op := &request.Operation{ + Name: opDescribeMetricCollectionTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMetricCollectionTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMetricCollectionTypesOutput{} + req.Data = output + return +} + +// Describes the available CloudWatch metrics for Auto Scaling. +// +// Note that the GroupStandbyInstances metric is not returned by default. You +// must explicitly request this metric when calling EnableMetricsCollection. +func (c *AutoScaling) DescribeMetricCollectionTypes(input *DescribeMetricCollectionTypesInput) (*DescribeMetricCollectionTypesOutput, error) { + req, out := c.DescribeMetricCollectionTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNotificationConfigurations = "DescribeNotificationConfigurations" + +// DescribeNotificationConfigurationsRequest generates a request for the DescribeNotificationConfigurations operation. +func (c *AutoScaling) DescribeNotificationConfigurationsRequest(input *DescribeNotificationConfigurationsInput) (req *request.Request, output *DescribeNotificationConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeNotificationConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNotificationConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNotificationConfigurationsOutput{} + req.Data = output + return +} + +// Describes the notification actions associated with the specified Auto Scaling +// group. +func (c *AutoScaling) DescribeNotificationConfigurations(input *DescribeNotificationConfigurationsInput) (*DescribeNotificationConfigurationsOutput, error) { + req, out := c.DescribeNotificationConfigurationsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeNotificationConfigurationsPages(input *DescribeNotificationConfigurationsInput, fn func(p *DescribeNotificationConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeNotificationConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeNotificationConfigurationsOutput), lastPage) + }) +} + +const opDescribePolicies = "DescribePolicies" + +// DescribePoliciesRequest generates a request for the DescribePolicies operation. +func (c *AutoScaling) DescribePoliciesRequest(input *DescribePoliciesInput) (req *request.Request, output *DescribePoliciesOutput) { + op := &request.Operation{ + Name: opDescribePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePoliciesOutput{} + req.Data = output + return +} + +// Describes the policies for the specified Auto Scaling group. +func (c *AutoScaling) DescribePolicies(input *DescribePoliciesInput) (*DescribePoliciesOutput, error) { + req, out := c.DescribePoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribePoliciesPages(input *DescribePoliciesInput, fn func(p *DescribePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribePoliciesOutput), lastPage) + }) +} + +const opDescribeScalingActivities = "DescribeScalingActivities" + +// DescribeScalingActivitiesRequest generates a request for the DescribeScalingActivities operation. +func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingActivitiesInput) (req *request.Request, output *DescribeScalingActivitiesOutput) { + op := &request.Operation{ + Name: opDescribeScalingActivities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScalingActivitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingActivitiesOutput{} + req.Data = output + return +} + +// Describes one or more scaling activities for the specified Auto Scaling group. +// If you omit the ActivityIds, the call returns all activities from the past +// six weeks. Activities are sorted by the start time. Activities still in progress +// appear first on the list. +func (c *AutoScaling) DescribeScalingActivities(input *DescribeScalingActivitiesInput) (*DescribeScalingActivitiesOutput, error) { + req, out := c.DescribeScalingActivitiesRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(p *DescribeScalingActivitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScalingActivitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScalingActivitiesOutput), lastPage) + }) +} + +const opDescribeScalingProcessTypes = "DescribeScalingProcessTypes" + +// DescribeScalingProcessTypesRequest generates a request for the DescribeScalingProcessTypes operation. +func (c *AutoScaling) DescribeScalingProcessTypesRequest(input *DescribeScalingProcessTypesInput) (req *request.Request, output *DescribeScalingProcessTypesOutput) { + op := &request.Operation{ + Name: opDescribeScalingProcessTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingProcessTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingProcessTypesOutput{} + req.Data = output + return +} + +// Describes the scaling process types for use with ResumeProcesses and SuspendProcesses. +func (c *AutoScaling) DescribeScalingProcessTypes(input *DescribeScalingProcessTypesInput) (*DescribeScalingProcessTypesOutput, error) { + req, out := c.DescribeScalingProcessTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledActions = "DescribeScheduledActions" + +// DescribeScheduledActionsRequest generates a request for the DescribeScheduledActions operation. +func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledActionsInput) (req *request.Request, output *DescribeScheduledActionsOutput) { + op := &request.Operation{ + Name: opDescribeScheduledActions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScheduledActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledActionsOutput{} + req.Data = output + return +} + +// Describes the actions scheduled for your Auto Scaling group that haven't +// run. To describe the actions that have already run, use DescribeScalingActivities. +func (c *AutoScaling) DescribeScheduledActions(input *DescribeScheduledActionsInput) (*DescribeScheduledActionsOutput, error) { + req, out := c.DescribeScheduledActionsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(p *DescribeScheduledActionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScheduledActionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScheduledActionsOutput), lastPage) + }) +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *AutoScaling) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes the specified tags. +// +// You can use filters to limit the results. For example, you can query for +// the tags for a specific Auto Scaling group. You can specify multiple values +// for a filter. A tag must match at least one of the specified values for it +// to be included in the results. +// +// You can also specify multiple filters. The result includes information for +// a particular tag only if it matches all the filters. If there's no match, +// no special message is returned. +func (c *AutoScaling) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTagsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTagsOutput), lastPage) + }) +} + +const opDescribeTerminationPolicyTypes = "DescribeTerminationPolicyTypes" + +// DescribeTerminationPolicyTypesRequest generates a request for the DescribeTerminationPolicyTypes operation. +func (c *AutoScaling) DescribeTerminationPolicyTypesRequest(input *DescribeTerminationPolicyTypesInput) (req *request.Request, output *DescribeTerminationPolicyTypesOutput) { + op := &request.Operation{ + Name: opDescribeTerminationPolicyTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTerminationPolicyTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTerminationPolicyTypesOutput{} + req.Data = output + return +} + +// Describes the termination policies supported by Auto Scaling. +func (c *AutoScaling) DescribeTerminationPolicyTypes(input *DescribeTerminationPolicyTypesInput) (*DescribeTerminationPolicyTypesOutput, error) { + req, out := c.DescribeTerminationPolicyTypesRequest(input) + err := req.Send() + return out, err +} + +const opDetachInstances = "DetachInstances" + +// DetachInstancesRequest generates a request for the DetachInstances operation. +func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req *request.Request, output *DetachInstancesOutput) { + op := &request.Operation{ + Name: opDetachInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachInstancesOutput{} + req.Data = output + return +} + +// Removes one or more instances from the specified Auto Scaling group. +// +// After the instances are detached, you can manage them independently from +// the rest of the Auto Scaling group. +// +// If you do not specify the option to decrement the desired capacity, Auto +// Scaling launches instances to replace the ones that are detached. +// +// For more information, see Detach EC2 Instances from Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/detach-instance-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) DetachInstances(input *DetachInstancesInput) (*DetachInstancesOutput, error) { + req, out := c.DetachInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDetachLoadBalancers = "DetachLoadBalancers" + +// DetachLoadBalancersRequest generates a request for the DetachLoadBalancers operation. +func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput) (req *request.Request, output *DetachLoadBalancersOutput) { + op := &request.Operation{ + Name: opDetachLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachLoadBalancersOutput{} + req.Data = output + return +} + +// Removes one or more load balancers from the specified Auto Scaling group. +// +// When you detach a load balancer, it enters the Removing state while deregistering +// the instances in the group. When all instances are deregistered, then you +// can no longer describe the load balancer using DescribeLoadBalancers. Note +// that the instances remain running. +func (c *AutoScaling) DetachLoadBalancers(input *DetachLoadBalancersInput) (*DetachLoadBalancersOutput, error) { + req, out := c.DetachLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDisableMetricsCollection = "DisableMetricsCollection" + +// DisableMetricsCollectionRequest generates a request for the DisableMetricsCollection operation. +func (c *AutoScaling) DisableMetricsCollectionRequest(input *DisableMetricsCollectionInput) (req *request.Request, output *DisableMetricsCollectionOutput) { + op := &request.Operation{ + Name: opDisableMetricsCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableMetricsCollectionInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableMetricsCollectionOutput{} + req.Data = output + return +} + +// Disables monitoring of the specified metrics for the specified Auto Scaling +// group. +func (c *AutoScaling) DisableMetricsCollection(input *DisableMetricsCollectionInput) (*DisableMetricsCollectionOutput, error) { + req, out := c.DisableMetricsCollectionRequest(input) + err := req.Send() + return out, err +} + +const opEnableMetricsCollection = "EnableMetricsCollection" + +// EnableMetricsCollectionRequest generates a request for the EnableMetricsCollection operation. +func (c *AutoScaling) EnableMetricsCollectionRequest(input *EnableMetricsCollectionInput) (req *request.Request, output *EnableMetricsCollectionOutput) { + op := &request.Operation{ + Name: opEnableMetricsCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMetricsCollectionInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableMetricsCollectionOutput{} + req.Data = output + return +} + +// Enables monitoring of the specified metrics for the specified Auto Scaling +// group. +// +// You can only enable metrics collection if InstanceMonitoring in the launch +// configuration for the group is set to True. +func (c *AutoScaling) EnableMetricsCollection(input *EnableMetricsCollectionInput) (*EnableMetricsCollectionOutput, error) { + req, out := c.EnableMetricsCollectionRequest(input) + err := req.Send() + return out, err +} + +const opEnterStandby = "EnterStandby" + +// EnterStandbyRequest generates a request for the EnterStandby operation. +func (c *AutoScaling) EnterStandbyRequest(input *EnterStandbyInput) (req *request.Request, output *EnterStandbyOutput) { + op := &request.Operation{ + Name: opEnterStandby, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnterStandbyInput{} + } + + req = c.newRequest(op, input, output) + output = &EnterStandbyOutput{} + req.Data = output + return +} + +// Moves the specified instances into Standby mode. +// +// For more information, see Auto Scaling InService State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingInServiceState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) EnterStandby(input *EnterStandbyInput) (*EnterStandbyOutput, error) { + req, out := c.EnterStandbyRequest(input) + err := req.Send() + return out, err +} + +const opExecutePolicy = "ExecutePolicy" + +// ExecutePolicyRequest generates a request for the ExecutePolicy operation. +func (c *AutoScaling) ExecutePolicyRequest(input *ExecutePolicyInput) (req *request.Request, output *ExecutePolicyOutput) { + op := &request.Operation{ + Name: opExecutePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecutePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ExecutePolicyOutput{} + req.Data = output + return +} + +// Executes the specified policy. +func (c *AutoScaling) ExecutePolicy(input *ExecutePolicyInput) (*ExecutePolicyOutput, error) { + req, out := c.ExecutePolicyRequest(input) + err := req.Send() + return out, err +} + +const opExitStandby = "ExitStandby" + +// ExitStandbyRequest generates a request for the ExitStandby operation. +func (c *AutoScaling) ExitStandbyRequest(input *ExitStandbyInput) (req *request.Request, output *ExitStandbyOutput) { + op := &request.Operation{ + Name: opExitStandby, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExitStandbyInput{} + } + + req = c.newRequest(op, input, output) + output = &ExitStandbyOutput{} + req.Data = output + return +} + +// Moves the specified instances out of Standby mode. +// +// For more information, see Auto Scaling InService State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingInServiceState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) ExitStandby(input *ExitStandbyInput) (*ExitStandbyOutput, error) { + req, out := c.ExitStandbyRequest(input) + err := req.Send() + return out, err +} + +const opPutLifecycleHook = "PutLifecycleHook" + +// PutLifecycleHookRequest generates a request for the PutLifecycleHook operation. +func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req *request.Request, output *PutLifecycleHookOutput) { + op := &request.Operation{ + Name: opPutLifecycleHook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLifecycleHookInput{} + } + + req = c.newRequest(op, input, output) + output = &PutLifecycleHookOutput{} + req.Data = output + return +} + +// Creates or updates a lifecycle hook for the specified Auto Scaling Group. +// +// A lifecycle hook tells Auto Scaling that you want to perform an action on +// an instance that is not actively in service; for example, either when the +// instance launches or before the instance terminates. +// +// This operation is a part of the basic sequence for adding a lifecycle hook +// to an Auto Scaling group: +// +// Create a notification target. A target can be either an Amazon SQS queue +// or an Amazon SNS topic. Create an IAM role. This role allows Auto Scaling +// to publish lifecycle notifications to the designated SQS queue or SNS topic. +// Create the lifecycle hook. You can create a hook that acts when instances +// launch or when instances terminate. If necessary, record the lifecycle action +// heartbeat to keep the instance in a pending state. Complete the lifecycle +// action. For more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +// +// If you exceed your maximum limit of lifecycle hooks, which by default is +// 50 per region, the call fails. For information about updating this limit, +// see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) PutLifecycleHook(input *PutLifecycleHookInput) (*PutLifecycleHookOutput, error) { + req, out := c.PutLifecycleHookRequest(input) + err := req.Send() + return out, err +} + +const opPutNotificationConfiguration = "PutNotificationConfiguration" + +// PutNotificationConfigurationRequest generates a request for the PutNotificationConfiguration operation. +func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotificationConfigurationInput) (req *request.Request, output *PutNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutNotificationConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutNotificationConfigurationOutput{} + req.Data = output + return +} + +// Configures an Auto Scaling group to send notifications when specified events +// take place. Subscribers to this topic can have messages for events delivered +// to an endpoint such as a web server or email address. +// +// For more information see Getting Notifications When Your Auto Scaling Group +// Changes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASGettingNotifications.html) +// in the Auto Scaling Developer Guide. +// +// This configuration overwrites an existing configuration. +func (c *AutoScaling) PutNotificationConfiguration(input *PutNotificationConfigurationInput) (*PutNotificationConfigurationOutput, error) { + req, out := c.PutNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutScalingPolicy = "PutScalingPolicy" + +// PutScalingPolicyRequest generates a request for the PutScalingPolicy operation. +func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { + op := &request.Operation{ + Name: opPutScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScalingPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutScalingPolicyOutput{} + req.Data = output + return +} + +// Creates or updates a policy for an Auto Scaling group. To update an existing +// policy, use the existing policy name and set the parameters you want to change. +// Any existing parameter not changed in an update to an existing policy is +// not changed in this update request. +// +// If you exceed your maximum limit of step adjustments, which by default is +// 20 per region, the call fails. For information about updating this limit, +// see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutScheduledUpdateGroupAction = "PutScheduledUpdateGroupAction" + +// PutScheduledUpdateGroupActionRequest generates a request for the PutScheduledUpdateGroupAction operation. +func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUpdateGroupActionInput) (req *request.Request, output *PutScheduledUpdateGroupActionOutput) { + op := &request.Operation{ + Name: opPutScheduledUpdateGroupAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScheduledUpdateGroupActionInput{} + } + + req = c.newRequest(op, input, output) + output = &PutScheduledUpdateGroupActionOutput{} + req.Data = output + return +} + +// Creates or updates a scheduled scaling action for an Auto Scaling group. +// When updating a scheduled scaling action, if you leave a parameter unspecified, +// the corresponding value remains unchanged in the affected Auto Scaling group. +// +// For more information, see Scheduled Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/schedule_time.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) PutScheduledUpdateGroupAction(input *PutScheduledUpdateGroupActionInput) (*PutScheduledUpdateGroupActionOutput, error) { + req, out := c.PutScheduledUpdateGroupActionRequest(input) + err := req.Send() + return out, err +} + +const opRecordLifecycleActionHeartbeat = "RecordLifecycleActionHeartbeat" + +// RecordLifecycleActionHeartbeatRequest generates a request for the RecordLifecycleActionHeartbeat operation. +func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecycleActionHeartbeatInput) (req *request.Request, output *RecordLifecycleActionHeartbeatOutput) { + op := &request.Operation{ + Name: opRecordLifecycleActionHeartbeat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RecordLifecycleActionHeartbeatInput{} + } + + req = c.newRequest(op, input, output) + output = &RecordLifecycleActionHeartbeatOutput{} + req.Data = output + return +} + +// Records a heartbeat for the lifecycle action associated with a specific token. +// This extends the timeout by the length of time defined by the HeartbeatTimeout +// parameter of PutLifecycleHook. +// +// This operation is a part of the basic sequence for adding a lifecycle hook +// to an Auto Scaling group: +// +// Create a notification target. A target can be either an Amazon SQS queue +// or an Amazon SNS topic. Create an IAM role. This role allows Auto Scaling +// to publish lifecycle notifications to the designated SQS queue or SNS topic. +// Create the lifecycle hook. You can create a hook that acts when instances +// launch or when instances terminate. If necessary, record the lifecycle action +// heartbeat to keep the instance in a pending state. Complete the lifecycle +// action. For more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) RecordLifecycleActionHeartbeat(input *RecordLifecycleActionHeartbeatInput) (*RecordLifecycleActionHeartbeatOutput, error) { + req, out := c.RecordLifecycleActionHeartbeatRequest(input) + err := req.Send() + return out, err +} + +const opResumeProcesses = "ResumeProcesses" + +// ResumeProcessesRequest generates a request for the ResumeProcesses operation. +func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *ResumeProcessesOutput) { + op := &request.Operation{ + Name: opResumeProcesses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScalingProcessQuery{} + } + + req = c.newRequest(op, input, output) + output = &ResumeProcessesOutput{} + req.Data = output + return +} + +// Resumes the specified suspended Auto Scaling processes for the specified +// Auto Scaling group. To resume specific processes, use the ScalingProcesses +// parameter. To resume all processes, omit the ScalingProcesses parameter. +// For more information, see Suspend and Resume Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) ResumeProcesses(input *ScalingProcessQuery) (*ResumeProcessesOutput, error) { + req, out := c.ResumeProcessesRequest(input) + err := req.Send() + return out, err +} + +const opSetDesiredCapacity = "SetDesiredCapacity" + +// SetDesiredCapacityRequest generates a request for the SetDesiredCapacity operation. +func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) (req *request.Request, output *SetDesiredCapacityOutput) { + op := &request.Operation{ + Name: opSetDesiredCapacity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDesiredCapacityInput{} + } + + req = c.newRequest(op, input, output) + output = &SetDesiredCapacityOutput{} + req.Data = output + return +} + +// Sets the size of the specified Auto Scaling group. +// +// For more information about desired capacity, see What Is Auto Scaling? (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/WhatIsAutoScaling.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetDesiredCapacity(input *SetDesiredCapacityInput) (*SetDesiredCapacityOutput, error) { + req, out := c.SetDesiredCapacityRequest(input) + err := req.Send() + return out, err +} + +const opSetInstanceHealth = "SetInstanceHealth" + +// SetInstanceHealthRequest generates a request for the SetInstanceHealth operation. +func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (req *request.Request, output *SetInstanceHealthOutput) { + op := &request.Operation{ + Name: opSetInstanceHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetInstanceHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &SetInstanceHealthOutput{} + req.Data = output + return +} + +// Sets the health status of the specified instance. +// +// For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetInstanceHealth(input *SetInstanceHealthInput) (*SetInstanceHealthOutput, error) { + req, out := c.SetInstanceHealthRequest(input) + err := req.Send() + return out, err +} + +const opSetInstanceProtection = "SetInstanceProtection" + +// SetInstanceProtectionRequest generates a request for the SetInstanceProtection operation. +func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionInput) (req *request.Request, output *SetInstanceProtectionOutput) { + op := &request.Operation{ + Name: opSetInstanceProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetInstanceProtectionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetInstanceProtectionOutput{} + req.Data = output + return +} + +// Updates the instance protection settings of the specified instances. +// +// For more information, see Instance Protection (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html#instance-protection) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetInstanceProtection(input *SetInstanceProtectionInput) (*SetInstanceProtectionOutput, error) { + req, out := c.SetInstanceProtectionRequest(input) + err := req.Send() + return out, err +} + +const opSuspendProcesses = "SuspendProcesses" + +// SuspendProcessesRequest generates a request for the SuspendProcesses operation. +func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *SuspendProcessesOutput) { + op := &request.Operation{ + Name: opSuspendProcesses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScalingProcessQuery{} + } + + req = c.newRequest(op, input, output) + output = &SuspendProcessesOutput{} + req.Data = output + return +} + +// Suspends the specified Auto Scaling processes for the specified Auto Scaling +// group. To suspend specific processes, use the ScalingProcesses parameter. +// To suspend all processes, omit the ScalingProcesses parameter. +// +// Note that if you suspend either the Launch or Terminate process types, it +// can prevent other process types from functioning properly. +// +// To resume processes that have been suspended, use ResumeProcesses. +// +// For more information, see Suspend and Resume Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SuspendProcesses(input *ScalingProcessQuery) (*SuspendProcessesOutput, error) { + req, out := c.SuspendProcessesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateInstanceInAutoScalingGroup = "TerminateInstanceInAutoScalingGroup" + +// TerminateInstanceInAutoScalingGroupRequest generates a request for the TerminateInstanceInAutoScalingGroup operation. +func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *TerminateInstanceInAutoScalingGroupInput) (req *request.Request, output *TerminateInstanceInAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opTerminateInstanceInAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstanceInAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateInstanceInAutoScalingGroupOutput{} + req.Data = output + return +} + +// Terminates the specified instance and optionally adjusts the desired group +// size. +// +// This call simply makes a termination request. The instance is not terminated +// immediately. +func (c *AutoScaling) TerminateInstanceInAutoScalingGroup(input *TerminateInstanceInAutoScalingGroupInput) (*TerminateInstanceInAutoScalingGroupOutput, error) { + req, out := c.TerminateInstanceInAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAutoScalingGroup = "UpdateAutoScalingGroup" + +// UpdateAutoScalingGroupRequest generates a request for the UpdateAutoScalingGroup operation. +func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGroupInput) (req *request.Request, output *UpdateAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opUpdateAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAutoScalingGroupOutput{} + req.Data = output + return +} + +// Updates the configuration for the specified Auto Scaling group. +// +// To update an Auto Scaling group with a launch configuration with InstanceMonitoring +// set to False, you must first disable the collection of group metrics. Otherwise, +// you will get an error. If you have previously enabled the collection of group +// metrics, you can disable it using DisableMetricsCollection. +// +// The new settings are registered upon the completion of this call. Any launch +// configuration settings take effect on any triggers after this call returns. +// Scaling activities that are currently in progress aren't affected. +// +// Note the following: +// +// If you specify a new value for MinSize without specifying a value for +// DesiredCapacity, and the new MinSize is larger than the current size of the +// group, we implicitly call SetDesiredCapacity to set the size of the group +// to the new value of MinSize. +// +// If you specify a new value for MaxSize without specifying a value for +// DesiredCapacity, and the new MaxSize is smaller than the current size of +// the group, we implicitly call SetDesiredCapacity to set the size of the group +// to the new value of MaxSize. +// +// All other optional parameters are left unchanged if not specified. +func (c *AutoScaling) UpdateAutoScalingGroup(input *UpdateAutoScalingGroupInput) (*UpdateAutoScalingGroupOutput, error) { + req, out := c.UpdateAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +// Describes scaling activity, which is a long-running process that represents +// a change to your Auto Scaling group, such as changing its size or replacing +// an instance. +type Activity struct { + _ struct{} `type:"structure"` + + // The ID of the activity. + ActivityId *string `type:"string" required:"true"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The reason the activity began. + Cause *string `min:"1" type:"string" required:"true"` + + // A friendly, more verbose description of the activity. + Description *string `type:"string"` + + // The details about the activity. + Details *string `type:"string"` + + // The end time of the activity. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A value between 0 and 100 that indicates the progress of the activity. + Progress *int64 `type:"integer"` + + // The start time of the activity. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The current status of the activity. + StatusCode *string `type:"string" required:"true" enum:"ScalingActivityStatusCode"` + + // A friendly, more verbose description of the activity status. + StatusMessage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Activity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Activity) GoString() string { + return s.String() +} + +// Describes a policy adjustment type. +// +// For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) +// in the Auto Scaling Developer Guide. +type AdjustmentType struct { + _ struct{} `type:"structure"` + + // The policy adjustment type. The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. + AdjustmentType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AdjustmentType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdjustmentType) GoString() string { + return s.String() +} + +// Describes an alarm. +type Alarm struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmARN *string `min:"1" type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Alarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alarm) GoString() string { + return s.String() +} + +type AttachInstancesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more EC2 instance IDs. + InstanceIds []*string `type:"list"` +} + +// String returns the string representation +func (s AttachInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesInput) GoString() string { + return s.String() +} + +type AttachInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesOutput) GoString() string { + return s.String() +} + +type AttachLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One or more load balancer names. + LoadBalancerNames []*string `type:"list"` +} + +// String returns the string representation +func (s AttachLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancersInput) GoString() string { + return s.String() +} + +type AttachLoadBalancersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancersOutput) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh). + DeviceName *string `min:"1" type:"string" required:"true"` + + // The information about the Amazon EBS volume. + Ebs *Ebs `type:"structure"` + + // Suppresses a device mapping. + // + // If this parameter is true for the root device, the instance might fail the + // EC2 health check. Auto Scaling launches a replacement instance if the instance + // fails the health check. + NoDevice *bool `type:"boolean"` + + // The name of the virtual device (for example, ephemeral0). + VirtualName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +type CompleteLifecycleActionInput struct { + _ struct{} `type:"structure"` + + // The name of the group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The action for the group to take. This parameter can be either CONTINUE or + // ABANDON. + LifecycleActionResult *string `type:"string" required:"true"` + + // A universally unique identifier (UUID) that identifies a specific lifecycle + // action associated with an instance. Auto Scaling sends this token to the + // notification target you specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string" required:"true"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLifecycleActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionInput) GoString() string { + return s.String() +} + +type CompleteLifecycleActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CompleteLifecycleActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionOutput) GoString() string { + return s.String() +} + +type CreateAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. This name must be unique within the scope of your + // AWS account. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. This parameter is optional + // if you specify subnets using the VPCZoneIdentifier parameter. + AvailabilityZones []*string `min:"1" type:"list"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. The default is 300. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + DefaultCooldown *int64 `type:"integer"` + + // The number of EC2 instances that should be running in the group. This number + // must be greater than or equal to the minimum size of the group and less than + // or equal to the maximum size of the group. + DesiredCapacity *int64 `type:"integer"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. During this + // time, any health check failures for the instance are ignored. The default + // is 300. + // + // This parameter is required if you are adding an ELB health check. + // + // For more information, see Health Checks for Auto Scaling Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + // + // By default, health checks use Amazon EC2 instance status checks to determine + // the health of an instance. For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckType *string `min:"1" type:"string"` + + // The ID of the EC2 instance used to create a launch configuration for the + // group. Alternatively, use the LaunchConfigurationName parameter to specify + // a launch configuration instead of an EC2 instance. + // + // When you specify an ID of an instance, Auto Scaling creates a new launch + // configuration and associates it with the group. This launch configuration + // derives its attributes from the specified instance, with the exception of + // the block device mapping. + // + // For more information, see Create an Auto Scaling Group from an EC2 Instance + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-asg-from-instance.html) + // in the Auto Scaling Developer Guide. + InstanceId *string `min:"1" type:"string"` + + // The name of the launch configuration. Alternatively, use the InstanceId parameter + // to specify an EC2 instance instead of a launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // One or more load balancers. + // + // For more information, see Load Balance Your Auto Scaling Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SetUpASLBApp.html) + // in the Auto Scaling Developer Guide. + LoadBalancerNames []*string `type:"list"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer" required:"true"` + + // The minimum size of the group. + MinSize *int64 `type:"integer" required:"true"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // The tag to be created or updated. Each tag should be defined by its resource + // type, resource ID, key, value, and a propagate flag. Valid values: key=value, + // value=value, propagate=true or false. Value and propagate are optional parameters. + // + // For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) + // in the Auto Scaling Developer Guide. + Tags []*Tag `type:"list"` + + // One or more termination policies used to select the instance to terminate. + // These policies are executed in the order that they are listed. + // + // For more information, see Choosing a Termination Policy for Your Auto Scaling + // Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-termination-policy.html) + // in the Auto Scaling Developer Guide. + TerminationPolicies []*string `type:"list"` + + // A comma-separated list of subnet identifiers for your virtual private cloud + // (VPC). + // + // If you specify subnets and Availability Zones with this call, ensure that + // the subnets' Availability Zones match the Availability Zones specified. + // + // For more information, see Auto Scaling and Amazon Virtual Private Cloud + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAutoScalingGroupInput) GoString() string { + return s.String() +} + +type CreateAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type CreateLaunchConfigurationInput struct { + _ struct{} `type:"structure"` + + // Used for groups that launch instances into a virtual private cloud (VPC). + // Specifies whether to assign a public IP address to each instance. For more + // information, see Auto Scaling and Amazon Virtual Private Cloud (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + // + // If you specify a value for this parameter, be sure to specify at least one + // subnet using the VPCZoneIdentifier parameter when you create your group. + // + // Default: If the instance is launched into a default subnet, the default + // is true. If the instance is launched into a nondefault subnet, the default + // is false. For more information, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the Amazon Elastic Compute Cloud User Guide. + AssociatePublicIpAddress *bool `type:"boolean"` + + // One or more mappings that specify how block devices are exposed to the instance. + // For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + // This parameter is supported only if you are launching EC2-Classic instances. + // For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCId *string `min:"1" type:"string"` + + // The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + // This parameter is required if ClassicLinkVPCId is specified, and is not supported + // otherwise. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCSecurityGroups []*string `type:"list"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. By default, + // the instance is not optimized for EBS I/O. The optimization provides dedicated + // throughput to Amazon EBS and an optimized configuration stack to provide + // optimal I/O performance. This optimization is not available with all instance + // types. Additional usage charges apply. For more information, see Amazon EBS-Optimized + // Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + // in the Amazon Elastic Compute Cloud User Guide. + EbsOptimized *bool `type:"boolean"` + + // The name or the Amazon Resource Name (ARN) of the instance profile associated + // with the IAM role for the instance. + // + // EC2 instances launched with an IAM role will automatically have AWS security + // credentials available. You can use IAM roles with Auto Scaling to automatically + // enable applications running on your EC2 instances to securely access other + // AWS resources. For more information, see Launch Auto Scaling Instances with + // an IAM Role (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-iam-role.html) + // in the Auto Scaling Developer Guide. + IamInstanceProfile *string `min:"1" type:"string"` + + // The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. + // For more information, see Finding an AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) + // in the Amazon Elastic Compute Cloud User Guide. + ImageId *string `min:"1" type:"string"` + + // The ID of the EC2 instance to use to create the launch configuration. + // + // The new launch configuration derives attributes from the instance, with + // the exception of the block device mapping. + // + // To create a launch configuration with a block device mapping or override + // any other instance attributes, specify them as part of the same request. + // + // For more information, see Create a Launch Configuration Using an EC2 Instance + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-lc-with-instanceID.html) + // in the Auto Scaling Developer Guide. + InstanceId *string `min:"1" type:"string"` + + // Enables detailed monitoring if it is disabled. Detailed monitoring is enabled + // by default. + // + // When detailed monitoring is enabled, Amazon CloudWatch generates metrics + // every minute and your account is charged a fee. When you disable detailed + // monitoring, by specifying False, CloudWatch generates metrics every 5 minutes. + // For more information, see Monitor Your Auto Scaling Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-instance-monitoring.html) + // in the Auto Scaling Developer Guide. + InstanceMonitoring *InstanceMonitoring `type:"structure"` + + // The instance type of the EC2 instance. For information about available instance + // types, see Available Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `min:"1" type:"string"` + + // The ID of the kernel associated with the AMI. + KernelId *string `min:"1" type:"string"` + + // The name of the key pair. For more information, see Amazon EC2 Key Pairs + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in + // the Amazon Elastic Compute Cloud User Guide. + KeyName *string `min:"1" type:"string"` + + // The name of the launch configuration. This name must be unique within the + // scope of your AWS account. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The tenancy of the instance. An instance with a tenancy of dedicated runs + // on single-tenant hardware and can only be launched into a VPC. + // + // You must set the value of this parameter to dedicated if want to launch + // Dedicated Instances into a shared tenancy VPC (VPC with instance placement + // tenancy attribute set to default). + // + // If you specify a value for this parameter, be sure to specify at least one + // subnet using the VPCZoneIdentifier parameter when you create your group. + // + // For more information, see Auto Scaling and Amazon Virtual Private Cloud + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + // + // Valid values: default | dedicated + PlacementTenancy *string `min:"1" type:"string"` + + // The ID of the RAM disk associated with the AMI. + RamdiskId *string `min:"1" type:"string"` + + // One or more security groups with which to associate the instances. + // + // If your instances are launched in EC2-Classic, you can either specify security + // group names or the security group IDs. For more information about security + // groups for EC2-Classic, see Amazon EC2 Security Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If your instances are launched into a VPC, specify security group IDs. For + // more information, see Security Groups for Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) + // in the Amazon Virtual Private Cloud User Guide. + SecurityGroups []*string `type:"list"` + + // The maximum hourly price to be paid for any Spot Instance launched to fulfill + // the request. Spot Instances are launched when the price you specify exceeds + // the current Spot market price. For more information, see Launch Spot Instances + // in Your Auto Scaling Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US-SpotInstances.html) + // in the Auto Scaling Developer Guide. + SpotPrice *string `min:"1" type:"string"` + + // The user data to make available to the launched EC2 instances. For more information, + // see Instance Metadata and User Data (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // At this time, launch configurations don't support compressed (zipped) user + // data files. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s CreateLaunchConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchConfigurationInput) GoString() string { + return s.String() +} + +type CreateLaunchConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLaunchConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchConfigurationOutput) GoString() string { + return s.String() +} + +type CreateOrUpdateTagsInput struct { + _ struct{} `type:"structure"` + + // One or more tags. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateOrUpdateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrUpdateTagsInput) GoString() string { + return s.String() +} + +type CreateOrUpdateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateOrUpdateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrUpdateTagsOutput) GoString() string { + return s.String() +} + +type DeleteAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to delete. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Specifies that the group will be deleted along with all instances associated + // with the group, without waiting for all instances to be terminated. This + // parameter also deletes any lifecycle actions associated with the group. + ForceDelete *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutoScalingGroupInput) GoString() string { + return s.String() +} + +type DeleteAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type DeleteLaunchConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLaunchConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchConfigurationInput) GoString() string { + return s.String() +} + +type DeleteLaunchConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLaunchConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteLifecycleHookInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLifecycleHookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecycleHookInput) GoString() string { + return s.String() +} + +type DeleteLifecycleHookOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLifecycleHookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecycleHookOutput) GoString() string { + return s.String() +} + +type DeleteNotificationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationConfigurationInput) GoString() string { + return s.String() +} + +type DeleteNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The name or Amazon Resource Name (ARN) of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeleteScheduledActionInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The name of the action to delete. + ScheduledActionName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteScheduledActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScheduledActionInput) GoString() string { + return s.String() +} + +type DeleteScheduledActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScheduledActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScheduledActionOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // Each tag should be defined by its resource type, resource ID, key, value, + // and a propagate flag. Valid values are: Resource type = auto-scaling-group, + // Resource ID = AutoScalingGroupName, key=value, value=value, propagate=true + // or false. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of groups allowed for your AWS account. The default limit + // is 20 per region. + MaxNumberOfAutoScalingGroups *int64 `type:"integer"` + + // The maximum number of launch configurations allowed for your AWS account. + // The default limit is 100 per region. + MaxNumberOfLaunchConfigurations *int64 `type:"integer"` + + // The current number of groups for your AWS account. + NumberOfAutoScalingGroups *int64 `type:"integer"` + + // The current number of launch configurations for your AWS account. + NumberOfLaunchConfigurations *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +type DescribeAdjustmentTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAdjustmentTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAdjustmentTypesInput) GoString() string { + return s.String() +} + +type DescribeAdjustmentTypesOutput struct { + _ struct{} `type:"structure"` + + // The policy adjustment types. + AdjustmentTypes []*AdjustmentType `type:"list"` +} + +// String returns the string representation +func (s DescribeAdjustmentTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAdjustmentTypesOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingGroupsInput struct { + _ struct{} `type:"structure"` + + // The group names. + AutoScalingGroupNames []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingGroupsInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingGroupsOutput struct { + _ struct{} `type:"structure"` + + // The groups. + AutoScalingGroups []*Group `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingGroupsOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingInstancesInput struct { + _ struct{} `type:"structure"` + + // One or more Auto Scaling instances to describe, up to 50 instances. If you + // omit this parameter, all Auto Scaling instances are described. If you specify + // an ID that does not exist, it is ignored with no error. + InstanceIds []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingInstancesInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingInstancesOutput struct { + _ struct{} `type:"structure"` + + // The instances. + AutoScalingInstances []*InstanceDetails `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingInstancesOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingNotificationTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAutoScalingNotificationTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingNotificationTypesInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingNotificationTypesOutput struct { + _ struct{} `type:"structure"` + + // One or more of the following notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCH + // + // autoscaling:EC2_INSTANCE_LAUNCH_ERROR + // + // autoscaling:EC2_INSTANCE_TERMINATE + // + // autoscaling:EC2_INSTANCE_TERMINATE_ERROR + // + // autoscaling:TEST_NOTIFICATION + AutoScalingNotificationTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeAutoScalingNotificationTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingNotificationTypesOutput) GoString() string { + return s.String() +} + +type DescribeLaunchConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The launch configuration names. + LaunchConfigurationNames []*string `type:"list"` + + // The maximum number of items to return with this call. The default is 100. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeLaunchConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The launch configurations. + LaunchConfigurations []*LaunchConfiguration `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeLifecycleHookTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLifecycleHookTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHookTypesInput) GoString() string { + return s.String() +} + +type DescribeLifecycleHookTypesOutput struct { + _ struct{} `type:"structure"` + + // One or more of the following notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCHING + // + // autoscaling:EC2_INSTANCE_TERMINATING + LifecycleHookTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHookTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHookTypesOutput) GoString() string { + return s.String() +} + +type DescribeLifecycleHooksInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The names of one or more lifecycle hooks. + LifecycleHookNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHooksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHooksInput) GoString() string { + return s.String() +} + +type DescribeLifecycleHooksOutput struct { + _ struct{} `type:"structure"` + + // The lifecycle hooks for the specified group. + LifecycleHooks []*LifecycleHook `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHooksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHooksOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // The load balancers. + LoadBalancers []*LoadBalancerState `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeMetricCollectionTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeMetricCollectionTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricCollectionTypesInput) GoString() string { + return s.String() +} + +type DescribeMetricCollectionTypesOutput struct { + _ struct{} `type:"structure"` + + // The granularities for the metrics. + Granularities []*MetricGranularityType `type:"list"` + + // One or more metrics. + Metrics []*MetricCollectionType `type:"list"` +} + +// String returns the string representation +func (s DescribeMetricCollectionTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricCollectionTypesOutput) GoString() string { + return s.String() +} + +type DescribeNotificationConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupNames []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNotificationConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeNotificationConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The notification configurations. + NotificationConfigurations []*NotificationConfiguration `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeNotificationConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribePoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The maximum number of items to be returned with each call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // One or more policy names or policy ARNs to be described. If you omit this + // list, all policy names are described. If an group name is provided, the results + // are limited to that group. This list is limited to 50 items. If you specify + // an unknown policy name, it is ignored with no error. + PolicyNames []*string `type:"list"` + + // One or more policy types. Valid values are SimpleScaling and StepScaling. + PolicyTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePoliciesInput) GoString() string { + return s.String() +} + +type DescribePoliciesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The scaling policies. + ScalingPolicies []*ScalingPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePoliciesOutput) GoString() string { + return s.String() +} + +type DescribeScalingActivitiesInput struct { + _ struct{} `type:"structure"` + + // The activity IDs of the desired scaling activities. If this list is omitted, + // all activities are described. If the AutoScalingGroupName parameter is provided, + // the results are limited to that group. The list of requested activities cannot + // contain more than 50 items. If unknown activities are requested, they are + // ignored with no error. + ActivityIds []*string `type:"list"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesInput) GoString() string { + return s.String() +} + +type DescribeScalingActivitiesOutput struct { + _ struct{} `type:"structure"` + + // The scaling activities. + Activities []*Activity `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesOutput) GoString() string { + return s.String() +} + +type DescribeScalingProcessTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeScalingProcessTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingProcessTypesInput) GoString() string { + return s.String() +} + +type DescribeScalingProcessTypesOutput struct { + _ struct{} `type:"structure"` + + // The names of the process types. + Processes []*ProcessType `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingProcessTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingProcessTypesOutput) GoString() string { + return s.String() +} + +type DescribeScheduledActionsInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The latest scheduled start time to return. If scheduled action names are + // provided, this parameter is ignored. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // Describes one or more scheduled actions. If you omit this list, the call + // describes all scheduled actions. If you specify an unknown scheduled action + // it is ignored with no error. + // + // You can describe up to a maximum of 50 instances with a single call. If + // there are more items to return, the call returns a token. To get the next + // set of items, repeat the call with the returned token in the NextToken parameter. + ScheduledActionNames []*string `type:"list"` + + // The earliest scheduled start time to return. If scheduled action names are + // provided, this parameter is ignored. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeScheduledActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledActionsInput) GoString() string { + return s.String() +} + +type DescribeScheduledActionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The scheduled actions. + ScheduledUpdateGroupActions []*ScheduledUpdateGroupAction `type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledActionsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // A filter used to scope the tags to return. + Filters []*Filter `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The tags. + Tags []*TagDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DescribeTerminationPolicyTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeTerminationPolicyTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTerminationPolicyTypesInput) GoString() string { + return s.String() +} + +type DescribeTerminationPolicyTypesOutput struct { + _ struct{} `type:"structure"` + + // The termination policies supported by Auto Scaling (OldestInstance, OldestLaunchConfiguration, + // NewestInstance, ClosestToNextInstanceHour, and Default). + TerminationPolicyTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTerminationPolicyTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTerminationPolicyTypesOutput) GoString() string { + return s.String() +} + +type DetachInstancesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. + InstanceIds []*string `type:"list"` + + // If True, the Auto Scaling group decrements the desired capacity value by + // the number of instances detached. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s DetachInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInstancesInput) GoString() string { + return s.String() +} + +type DetachInstancesOutput struct { + _ struct{} `type:"structure"` + + // The activities related to detaching the instances from the Auto Scaling group. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s DetachInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInstancesOutput) GoString() string { + return s.String() +} + +type DetachLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One or more load balancer names. + LoadBalancerNames []*string `type:"list"` +} + +// String returns the string representation +func (s DetachLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancersInput) GoString() string { + return s.String() +} + +type DetachLoadBalancersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancersOutput) GoString() string { + return s.String() +} + +type DisableMetricsCollectionInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more of the following metrics. If you omit this parameter, all metrics + // are disabled. + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metrics []*string `type:"list"` +} + +// String returns the string representation +func (s DisableMetricsCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableMetricsCollectionInput) GoString() string { + return s.String() +} + +type DisableMetricsCollectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableMetricsCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableMetricsCollectionOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume. +type Ebs struct { + _ struct{} `type:"structure"` + + // Indicates whether to delete the volume on instance termination. + // + // Default: true + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume should be encrypted. Encrypted EBS volumes must + // be attached to instances that support Amazon EBS encryption. Volumes that + // are created from encrypted snapshots are automatically encrypted. There is + // no way to create an encrypted volume from an unencrypted snapshot or an unencrypted + // volume from an encrypted snapshot. For more information, see Amazon EBS Encryption + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in + // the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `type:"boolean"` + + // For Provisioned IOPS (SSD) volumes only. The number of I/O operations per + // second (IOPS) to provision for the volume. + // + // Default: None + Iops *int64 `min:"100" type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `min:"1" type:"string"` + + // The volume size, in gigabytes. + // + // Valid values: If the volume type is io1, the minimum size of the volume + // is 10 GiB. If you specify SnapshotId and VolumeSize, VolumeSize must be equal + // to or larger than the size of the snapshot. + // + // Default: If you create a volume from a snapshot and you don't specify a + // volume size, the default is the size of the snapshot. + // + // Required: Required when the volume type is io1. + VolumeSize *int64 `min:"1" type:"integer"` + + // The volume type. + // + // Valid values: standard | io1 | gp2 + // + // Default: standard + VolumeType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Ebs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ebs) GoString() string { + return s.String() +} + +type EnableMetricsCollectionInput struct { + _ struct{} `type:"structure"` + + // The name or ARN of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The granularity to associate with the metrics to collect. The only valid + // value is 1Minute. + Granularity *string `min:"1" type:"string" required:"true"` + + // One or more of the following metrics. If you omit this parameter, all metrics + // are enabled. + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + // + // Note that the GroupStandbyInstances metric is not enabled by default. You + // must explicitly request this metric. + Metrics []*string `type:"list"` +} + +// String returns the string representation +func (s EnableMetricsCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMetricsCollectionInput) GoString() string { + return s.String() +} + +type EnableMetricsCollectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMetricsCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMetricsCollectionOutput) GoString() string { + return s.String() +} + +// Describes an enabled metric. +type EnabledMetric struct { + _ struct{} `type:"structure"` + + // The granularity of the metric. The only valid value is 1Minute. + Granularity *string `min:"1" type:"string"` + + // One of the following metrics: + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metric *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnabledMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnabledMetric) GoString() string { + return s.String() +} + +type EnterStandbyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instances to move into Standby mode. You must specify at least + // one instance ID. + InstanceIds []*string `type:"list"` + + // Specifies whether the instances moved to Standby mode count as part of the + // Auto Scaling group's desired capacity. If set, the desired capacity for the + // Auto Scaling group decrements by the number of instances moved to Standby + // mode. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s EnterStandbyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnterStandbyInput) GoString() string { + return s.String() +} + +type EnterStandbyOutput struct { + _ struct{} `type:"structure"` + + // The activities related to moving instances into Standby mode. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s EnterStandbyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnterStandbyOutput) GoString() string { + return s.String() +} + +type ExecutePolicyInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The breach threshold for the alarm. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + BreachThreshold *float64 `type:"double"` + + // If this parameter is true, Auto Scaling waits for the cooldown period to + // complete before executing the policy. Otherwise, Auto Scaling executes the + // policy without waiting for the cooldown period to complete. + // + // This parameter is not supported if the policy type is StepScaling. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + HonorCooldown *bool `type:"boolean"` + + // The metric value to compare to BreachThreshold. This enables you to execute + // a policy of type StepScaling and determine which step adjustment to use. + // For example, if the breach threshold is 50 and you want to use a step adjustment + // with a lower bound of 0 and an upper bound of 10, you can set the metric + // value to 59. + // + // If you specify a metric value that doesn't correspond to a step adjustment + // for the policy, the call returns an error. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + MetricValue *float64 `type:"double"` + + // The name or ARN of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ExecutePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutePolicyInput) GoString() string { + return s.String() +} + +type ExecutePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExecutePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutePolicyOutput) GoString() string { + return s.String() +} + +type ExitStandbyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. You must specify at least one instance ID. + InstanceIds []*string `type:"list"` +} + +// String returns the string representation +func (s ExitStandbyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExitStandbyInput) GoString() string { + return s.String() +} + +type ExitStandbyOutput struct { + _ struct{} `type:"structure"` + + // The activities related to moving instances out of Standby mode. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s ExitStandbyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExitStandbyOutput) GoString() string { + return s.String() +} + +// Describes a filter. +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. The valid values are: "auto-scaling-group", "key", + // "value", and "propagate-at-launch". + Name *string `type:"string"` + + // The value of the filter. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Describes an Auto Scaling group. +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the group. + AutoScalingGroupARN *string `min:"1" type:"string"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. + AvailabilityZones []*string `min:"1" type:"list" required:"true"` + + // The date and time the group was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. + DefaultCooldown *int64 `type:"integer" required:"true"` + + // The desired size of the group. + DesiredCapacity *int64 `type:"integer" required:"true"` + + // The metrics enabled for the group. + EnabledMetrics []*EnabledMetric `type:"list"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + HealthCheckType *string `min:"1" type:"string" required:"true"` + + // The EC2 instances associated with the group. + Instances []*Instance `type:"list"` + + // The name of the associated launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // One or more load balancers associated with the group. + LoadBalancerNames []*string `type:"list"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer" required:"true"` + + // The minimum size of the group. + MinSize *int64 `type:"integer" required:"true"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // The current state of the group when DeleteAutoScalingGroup is in progress. + Status *string `min:"1" type:"string"` + + // The suspended processes associated with the group. + SuspendedProcesses []*SuspendedProcess `type:"list"` + + // The tags for the group. + Tags []*TagDescription `type:"list"` + + // The termination policies for the group. + TerminationPolicies []*string `type:"list"` + + // One or more subnet IDs, if applicable, separated by commas. + // + // If you specify VPCZoneIdentifier and AvailabilityZones, ensure that the + // Availability Zones of the subnets match the values for AvailabilityZones. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Group) GoString() string { + return s.String() +} + +// Describes an EC2 instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the instance is running. + AvailabilityZone *string `min:"1" type:"string" required:"true"` + + // The health status of the instance. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // The launch configuration associated with the instance. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // A description of the current lifecycle state. Note that the Quarantined state + // is not used. + LifecycleState *string `type:"string" required:"true" enum:"LifecycleState"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Describes an EC2 instance associated with an Auto Scaling group. +type InstanceDetails struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group associated with the instance. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Availability Zone for the instance. + AvailabilityZone *string `min:"1" type:"string" required:"true"` + + // The health status of this instance. "Healthy" means that the instance is + // healthy and should remain in service. "Unhealthy" means that the instance + // is unhealthy and Auto Scaling should terminate and replace it. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // The launch configuration associated with the instance. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The lifecycle state for the instance. For more information, see Auto Scaling + // Instance States (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html#AutoScalingStates) + // in the Auto Scaling Developer Guide. + LifecycleState *string `min:"1" type:"string" required:"true"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s InstanceDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceDetails) GoString() string { + return s.String() +} + +// Describes whether instance monitoring is enabled. +type InstanceMonitoring struct { + _ struct{} `type:"structure"` + + // If True, instance monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// Describes a launch configuration. +type LaunchConfiguration struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] Indicates whether to assign a public IP address to each instance. + AssociatePublicIpAddress *bool `type:"boolean"` + + // A block device mapping, which specifies the block devices for the instance. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + // This parameter can only be used if you are launching EC2-Classic instances. + // For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCId *string `min:"1" type:"string"` + + // The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + // This parameter is required if ClassicLinkVPCId is specified, and cannot be + // used otherwise. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCSecurityGroups []*string `type:"list"` + + // The creation date and time for the launch configuration. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Controls whether the instance is optimized for EBS I/O (true) or not (false). + EbsOptimized *bool `type:"boolean"` + + // The name or Amazon Resource Name (ARN) of the instance profile associated + // with the IAM role for the instance. + IamInstanceProfile *string `min:"1" type:"string"` + + // The ID of the Amazon Machine Image (AMI). + ImageId *string `min:"1" type:"string" required:"true"` + + // Controls whether instances in this group are launched with detailed monitoring. + InstanceMonitoring *InstanceMonitoring `type:"structure"` + + // The instance type for the instances. + InstanceType *string `min:"1" type:"string" required:"true"` + + // The ID of the kernel associated with the AMI. + KernelId *string `min:"1" type:"string"` + + // The name of the key pair. + KeyName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the launch configuration. + LaunchConfigurationARN *string `min:"1" type:"string"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The tenancy of the instance, either default or dedicated. An instance with + // dedicated tenancy runs in an isolated, single-tenant hardware and can only + // be launched into a VPC. + PlacementTenancy *string `min:"1" type:"string"` + + // The ID of the RAM disk associated with the AMI. + RamdiskId *string `min:"1" type:"string"` + + // The security groups to associate with the instances. + SecurityGroups []*string `type:"list"` + + // The price to bid when launching Spot Instances. + SpotPrice *string `min:"1" type:"string"` + + // The user data available to the instances. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s LaunchConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchConfiguration) GoString() string { + return s.String() +} + +// Describes a lifecycle hook, which tells Auto Scaling that you want to perform +// an action when an instance launches or terminates. When you have a lifecycle +// hook in place, the Auto Scaling group will either: +// +// Pause the instance after it launches, but before it is put into service +// Pause the instance as it terminates, but before it is fully terminated For +// more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +type LifecycleHook struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string"` + + // Defines the action the Auto Scaling group should take when the lifecycle + // hook timeout elapses or if an unexpected failure occurs. The valid values + // are CONTINUE and ABANDON. The default value is CONTINUE. + DefaultResult *string `type:"string"` + + // The maximum time, in seconds, that an instance can remain in a Pending:Wait + // or Terminating:Wait state. The default is 172800 seconds (48 hours). + GlobalTimeout *int64 `type:"integer"` + + // The maximum time, in seconds, that can elapse before the lifecycle hook times + // out. The default is 3600 seconds (1 hour). When the lifecycle hook times + // out, Auto Scaling performs the action defined in the DefaultResult parameter. + // You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. + HeartbeatTimeout *int64 `type:"integer"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string"` + + // The state of the EC2 instance to which you want to attach the lifecycle hook. + // For a list of lifecycle hook types, see DescribeLifecycleHookTypes. + LifecycleTransition *string `type:"string"` + + // Additional information that you want to include any time Auto Scaling sends + // a message to the notification target. + NotificationMetadata *string `min:"1" type:"string"` + + // The ARN of the notification target that Auto Scaling uses to notify you when + // an instance is in the transition state for the lifecycle hook. This ARN target + // can be either an SQS queue or an SNS topic. The notification message sent + // to the target includes the following: + // + // Lifecycle action token User account ID Name of the Auto Scaling group Lifecycle + // hook name EC2 instance ID Lifecycle transition Notification metadata + NotificationTargetARN *string `min:"1" type:"string"` + + // The ARN of the IAM role that allows the Auto Scaling group to publish to + // the specified notification target. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LifecycleHook) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleHook) GoString() string { + return s.String() +} + +// Describes the state of a load balancer. +type LoadBalancerState struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `min:"1" type:"string"` + + // One of the following load balancer states: + // + // Adding - The instances in the group are being registered with the load + // balancer. + // + // Added - All instances in the group are registered with the load balancer. + // + // InService - At least one instance in the group passed an ELB health check. + // + // Removing - The instances are being deregistered from the load balancer. + // If connection draining is enabled, Elastic Load Balancing waits for in-flight + // requests to complete before deregistering the instances. + State *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LoadBalancerState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerState) GoString() string { + return s.String() +} + +// Describes a metric. +type MetricCollectionType struct { + _ struct{} `type:"structure"` + + // One of the following metrics: + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metric *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MetricCollectionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricCollectionType) GoString() string { + return s.String() +} + +// Describes a granularity of a metric. +type MetricGranularityType struct { + _ struct{} `type:"structure"` + + // The granularity. The only valid value is 1Minute. + Granularity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MetricGranularityType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricGranularityType) GoString() string { + return s.String() +} + +// Describes a notification. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One of the following event notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCH + // + // autoscaling:EC2_INSTANCE_LAUNCH_ERROR + // + // autoscaling:EC2_INSTANCE_TERMINATE + // + // autoscaling:EC2_INSTANCE_TERMINATE_ERROR + // + // autoscaling:TEST_NOTIFICATION + NotificationType *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Describes a process type. +// +// For more information, see Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html#process-types) +// in the Auto Scaling Developer Guide. +type ProcessType struct { + _ struct{} `type:"structure"` + + // One of the following processes: + // + // Launch + // + // Terminate + // + // AddToLoadBalancer + // + // AlarmNotification + // + // AZRebalance + // + // HealthCheck + // + // ReplaceUnhealthy + // + // ScheduledActions + ProcessName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ProcessType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProcessType) GoString() string { + return s.String() +} + +type PutLifecycleHookInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group to which you want to assign the lifecycle + // hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Defines the action the Auto Scaling group should take when the lifecycle + // hook timeout elapses or if an unexpected failure occurs. The value for this + // parameter can be either CONTINUE or ABANDON. The default value for this parameter + // is ABANDON. + DefaultResult *string `type:"string"` + + // The amount of time, in seconds, that can elapse before the lifecycle hook + // times out. When the lifecycle hook times out, Auto Scaling performs the action + // defined in the DefaultResult parameter. You can prevent the lifecycle hook + // from timing out by calling RecordLifecycleActionHeartbeat. The default is + // 3600 seconds (1 hour). + HeartbeatTimeout *int64 `type:"integer"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` + + // The instance state to which you want to attach the lifecycle hook. For a + // list of lifecycle hook types, see DescribeLifecycleHookTypes. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + LifecycleTransition *string `type:"string"` + + // Contains additional information that you want to include any time Auto Scaling + // sends a message to the notification target. + NotificationMetadata *string `min:"1" type:"string"` + + // The ARN of the notification target that Auto Scaling will use to notify you + // when an instance is in the transition state for the lifecycle hook. This + // ARN target can be either an SQS queue or an SNS topic. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + // + // The notification message sent to the target will include: + // + // LifecycleActionToken. The Lifecycle action token. AccountId. The user account + // ID. AutoScalingGroupName. The name of the Auto Scaling group. LifecycleHookName. + // The lifecycle hook name. EC2InstanceId. The EC2 instance ID. LifecycleTransition. + // The lifecycle transition. NotificationMetadata. The notification metadata. + // This operation uses the JSON format when sending notifications to an Amazon + // SQS queue, and an email key/value pair format when sending notifications + // to an Amazon SNS topic. + // + // When you call this operation, a test message is sent to the notification + // target. This test message contains an additional key/value pair: Event:autoscaling:TEST_NOTIFICATION. + NotificationTargetARN *string `min:"1" type:"string"` + + // The ARN of the IAM role that allows the Auto Scaling group to publish to + // the specified notification target. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLifecycleHookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleHookInput) GoString() string { + return s.String() +} + +type PutLifecycleHookOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutLifecycleHookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleHookOutput) GoString() string { + return s.String() +} + +type PutNotificationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The type of event that will cause the notification to be sent. For details + // about notification types supported by Auto Scaling, see DescribeAutoScalingNotificationTypes. + NotificationTypes []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutNotificationConfigurationInput) GoString() string { + return s.String() +} + +type PutNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // The adjustment type. Valid values are ChangeInCapacity, ExactCapacity, and + // PercentChangeInCapacity. + // + // For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) + // in the Auto Scaling Developer Guide. + AdjustmentType *string `min:"1" type:"string" required:"true"` + + // The name or ARN of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The amount of time, in seconds, after a scaling activity completes and before + // the next scaling activity can start. If this parameter is not specified, + // the default cooldown period for the group applies. + // + // This parameter is not supported unless the policy type is SimpleScaling. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + Cooldown *int64 `type:"integer"` + + // The estimated time, in seconds, until a newly launched instance can contribute + // to the CloudWatch metrics. The default is to use the value specified for + // the default cooldown period for the group. + // + // This parameter is not supported if the policy type is SimpleScaling. + EstimatedInstanceWarmup *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. If the aggregation type is null, the value is treated + // as Average. + // + // This parameter is not supported if the policy type is SimpleScaling. + MetricAggregationType *string `min:"1" type:"string"` + + // The minimum number of instances to scale. If the value of AdjustmentType + // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity + // of the Auto Scaling group by at least this many instances. Otherwise, the + // error is ValidationError. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // Available for backward compatibility. Use MinAdjustmentMagnitude instead. + MinAdjustmentStep *int64 `type:"integer"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The policy type. Valid values are SimpleScaling and StepScaling. If the policy + // type is null, the value is treated as SimpleScaling. + PolicyType *string `min:"1" type:"string"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + // + // This parameter is required if the policy type is SimpleScaling and not supported + // otherwise. + ScalingAdjustment *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s PutScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyInput) GoString() string { + return s.String() +} + +type PutScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the policy. + PolicyARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyOutput) GoString() string { + return s.String() +} + +type PutScheduledUpdateGroupActionInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The number of EC2 instances that should be running in the group. + DesiredCapacity *int64 `type:"integer"` + + // The time for this action to end. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum size for the Auto Scaling group. + MaxSize *int64 `type:"integer"` + + // The minimum size for the Auto Scaling group. + MinSize *int64 `type:"integer"` + + // The time when recurring future actions will start. Start time is specified + // by the user following the Unix cron syntax format. For more information, + // see Cron (http://en.wikipedia.org/wiki/Cron) in Wikipedia. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action will start and stop. + Recurrence *string `min:"1" type:"string"` + + // The name of this scaling action. + ScheduledActionName *string `min:"1" type:"string" required:"true"` + + // The time for this action to start, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT + // only (for example, 2014-06-01T00:00:00Z). + // + // If you try to schedule your action in the past, Auto Scaling returns an + // error message. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action starts and stops. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // This parameter is deprecated; use StartTime instead. + // + // The time for this action to start. If both Time and StartTime are specified, + // their values must be identical. + Time *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s PutScheduledUpdateGroupActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScheduledUpdateGroupActionInput) GoString() string { + return s.String() +} + +type PutScheduledUpdateGroupActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutScheduledUpdateGroupActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScheduledUpdateGroupActionOutput) GoString() string { + return s.String() +} + +type RecordLifecycleActionHeartbeatInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // A token that uniquely identifies a specific lifecycle action associated with + // an instance. Auto Scaling sends this token to the notification target you + // specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string" required:"true"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordLifecycleActionHeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordLifecycleActionHeartbeatInput) GoString() string { + return s.String() +} + +type RecordLifecycleActionHeartbeatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RecordLifecycleActionHeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordLifecycleActionHeartbeatOutput) GoString() string { + return s.String() +} + +type ResumeProcessesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResumeProcessesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeProcessesOutput) GoString() string { + return s.String() +} + +// Describes a scaling policy. +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // The adjustment type, which specifies how ScalingAdjustment is interpreted. + // Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + AdjustmentType *string `min:"1" type:"string"` + + // The CloudWatch alarms related to the policy. + Alarms []*Alarm `type:"list"` + + // The name of the Auto Scaling group associated with this scaling policy. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The amount of time, in seconds, after a scaling activity completes before + // any further trigger-related scaling activities can start. + Cooldown *int64 `type:"integer"` + + // The estimated time, in seconds, until a newly launched instance can contribute + // to the CloudWatch metrics. + EstimatedInstanceWarmup *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. + MetricAggregationType *string `min:"1" type:"string"` + + // The minimum number of instances to scale. If the value of AdjustmentType + // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity + // of the Auto Scaling group by at least this many instances. Otherwise, the + // error is ValidationError. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // Available for backward compatibility. Use MinAdjustmentMagnitude instead. + MinAdjustmentStep *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the policy. + PolicyARN *string `min:"1" type:"string"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // The policy type. Valid values are SimpleScaling and StepScaling. + PolicyType *string `min:"1" type:"string"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + ScalingAdjustment *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +type ScalingProcessQuery struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more of the following processes: + // + // Launch + // + // Terminate + // + // HealthCheck + // + // ReplaceUnhealthy + // + // AZRebalance + // + // AlarmNotification + // + // ScheduledActions + // + // AddToLoadBalancer + ScalingProcesses []*string `type:"list"` +} + +// String returns the string representation +func (s ScalingProcessQuery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingProcessQuery) GoString() string { + return s.String() +} + +// Describes a scheduled update to an Auto Scaling group. +type ScheduledUpdateGroupAction struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The number of instances you prefer to maintain in the group. + DesiredCapacity *int64 `type:"integer"` + + // The date and time that the action is scheduled to end. This date and time + // can be up to one month in the future. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer"` + + // The minimum size of the group. + MinSize *int64 `type:"integer"` + + // The recurring schedule for the action. + Recurrence *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the scheduled action. + ScheduledActionARN *string `min:"1" type:"string"` + + // The name of the scheduled action. + ScheduledActionName *string `min:"1" type:"string"` + + // The date and time that the action is scheduled to begin. This date and time + // can be up to one month in the future. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action will start and stop. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // This parameter is deprecated; use StartTime instead. + Time *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ScheduledUpdateGroupAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledUpdateGroupAction) GoString() string { + return s.String() +} + +type SetDesiredCapacityInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The number of EC2 instances that should be running in the Auto Scaling group. + DesiredCapacity *int64 `type:"integer" required:"true"` + + // By default, SetDesiredCapacity overrides any cooldown period associated with + // the Auto Scaling group. Specify True to make Auto Scaling to wait for the + // cool-down period associated with the Auto Scaling group to complete before + // initiating a scaling activity to set your Auto Scaling group to its new capacity. + HonorCooldown *bool `type:"boolean"` +} + +// String returns the string representation +func (s SetDesiredCapacityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDesiredCapacityInput) GoString() string { + return s.String() +} + +type SetDesiredCapacityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDesiredCapacityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDesiredCapacityOutput) GoString() string { + return s.String() +} + +type SetInstanceHealthInput struct { + _ struct{} `type:"structure"` + + // The health status of the instance. Set to Healthy if you want the instance + // to remain in service. Set to Unhealthy if you want the instance to be out + // of service. Auto Scaling will terminate and replace the unhealthy instance. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the EC2 instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod + // specified for the group, by default, this call will respect the grace period. + // Set this to False, if you do not want the call to respect the grace period + // associated with the group. + // + // For more information, see the HealthCheckGracePeriod parameter description + // for CreateAutoScalingGroup. + ShouldRespectGracePeriod *bool `type:"boolean"` +} + +// String returns the string representation +func (s SetInstanceHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceHealthInput) GoString() string { + return s.String() +} + +type SetInstanceHealthOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetInstanceHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceHealthOutput) GoString() string { + return s.String() +} + +type SetInstanceProtectionInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. + InstanceIds []*string `type:"list" required:"true"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SetInstanceProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceProtectionInput) GoString() string { + return s.String() +} + +type SetInstanceProtectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetInstanceProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceProtectionOutput) GoString() string { + return s.String() +} + +// Describes an adjustment based on the difference between the value of the +// aggregated CloudWatch metric and the breach threshold that you've defined +// for the alarm. +// +// For the following examples, suppose that you have an alarm with a breach +// threshold of 50: +// +// If you want the adjustment to be triggered when the metric is greater +// than or equal to 50 and less than 60, specify a lower bound of 0 and an upper +// bound of 10. +// +// If you want the adjustment to be triggered when the metric is greater +// than 40 and less than or equal to 50, specify a lower bound of -10 and an +// upper bound of 0. +// +// There are a few rules for the step adjustments for your step policy: +// +// The ranges of your step adjustments can't overlap or have a gap. +// +// At most one step adjustment can have a null lower bound. If one step adjustment +// has a negative lower bound, then there must be a step adjustment with a null +// lower bound. +// +// At most one step adjustment can have a null upper bound. If one step adjustment +// has a positive upper bound, then there must be a step adjustment with a null +// upper bound. +// +// The upper and lower bound can't be null in the same step adjustment. +type StepAdjustment struct { + _ struct{} `type:"structure"` + + // The lower bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the lower bound + // is inclusive (the metric must be greater than or equal to the threshold plus + // the lower bound). Otherwise, it is exclusive (the metric must be greater + // than the threshold plus the lower bound). A null value indicates negative + // infinity. + MetricIntervalLowerBound *float64 `type:"double"` + + // The upper bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the upper bound + // is exclusive (the metric must be less than the threshold plus the upper bound). + // Otherwise, it is inclusive (the metric must be less than or equal to the + // threshold plus the upper bound). A null value indicates positive infinity. + // + // The upper bound must be greater than the lower bound. + MetricIntervalUpperBound *float64 `type:"double"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + ScalingAdjustment *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s StepAdjustment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepAdjustment) GoString() string { + return s.String() +} + +type SuspendProcessesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SuspendProcessesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendProcessesOutput) GoString() string { + return s.String() +} + +// Describes an Auto Scaling process that has been suspended. For more information, +// see ProcessType. +type SuspendedProcess struct { + _ struct{} `type:"structure"` + + // The name of the suspended process. + ProcessName *string `min:"1" type:"string"` + + // The reason that the process was suspended. + SuspensionReason *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SuspendedProcess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendedProcess) GoString() string { + return s.String() +} + +// Describes a tag for an Auto Scaling group. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `min:"1" type:"string" required:"true"` + + // Determines whether the tag is added to new instances as they are launched + // in the group. + PropagateAtLaunch *bool `type:"boolean"` + + // The name of the group. + ResourceId *string `type:"string"` + + // The type of resource. The only supported value is auto-scaling-group. + ResourceType *string `type:"string"` + + // The tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Describes a tag for an Auto Scaling group. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `min:"1" type:"string"` + + // Determines whether the tag is added to new instances as they are launched + // in the group. + PropagateAtLaunch *bool `type:"boolean"` + + // The name of the group. + ResourceId *string `type:"string"` + + // The type of resource. The only supported value is auto-scaling-group. + ResourceType *string `type:"string"` + + // The tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +type TerminateInstanceInAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The ID of the EC2 instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // If true, terminating the instance also decrements the size of the Auto Scaling + // group. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s TerminateInstanceInAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstanceInAutoScalingGroupInput) GoString() string { + return s.String() +} + +type TerminateInstanceInAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` + + // A scaling activity. + Activity *Activity `type:"structure"` +} + +// String returns the string representation +func (s TerminateInstanceInAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstanceInAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type UpdateAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. + AvailabilityZones []*string `min:"1" type:"list"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. The default is 300. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + DefaultCooldown *int64 `type:"integer"` + + // The number of EC2 instances that should be running in the Auto Scaling group. + // This number must be greater than or equal to the minimum size of the group + // and less than or equal to the maximum size of the group. + DesiredCapacity *int64 `type:"integer"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. The default + // is 300. + // + // For more information, see Health Checks For Auto Scaling Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + HealthCheckType *string `min:"1" type:"string"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // The maximum size of the Auto Scaling group. + MaxSize *int64 `type:"integer"` + + // The minimum size of the Auto Scaling group. + MinSize *int64 `type:"integer"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // A standalone termination policy or a list of termination policies used to + // select the instance to terminate. The policies are executed in the order + // that they are listed. + // + // For more information, see Choosing a Termination Policy for Your Auto Scaling + // Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-termination-policy.html) + // in the Auto Scaling Developer Guide. + TerminationPolicies []*string `type:"list"` + + // The ID of the subnet, if you are launching into a VPC. You can specify several + // subnets in a comma-separated list. + // + // When you specify VPCZoneIdentifier with AvailabilityZones, ensure that the + // subnets' Availability Zones match the values you specify for AvailabilityZones. + // + // For more information, see Auto Scaling and Amazon Virtual Private Cloud + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutoScalingGroupInput) GoString() string { + return s.String() +} + +type UpdateAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutoScalingGroupOutput) GoString() string { + return s.String() +} + +const ( + // @enum LifecycleState + LifecycleStatePending = "Pending" + // @enum LifecycleState + LifecycleStatePendingWait = "Pending:Wait" + // @enum LifecycleState + LifecycleStatePendingProceed = "Pending:Proceed" + // @enum LifecycleState + LifecycleStateQuarantined = "Quarantined" + // @enum LifecycleState + LifecycleStateInService = "InService" + // @enum LifecycleState + LifecycleStateTerminating = "Terminating" + // @enum LifecycleState + LifecycleStateTerminatingWait = "Terminating:Wait" + // @enum LifecycleState + LifecycleStateTerminatingProceed = "Terminating:Proceed" + // @enum LifecycleState + LifecycleStateTerminated = "Terminated" + // @enum LifecycleState + LifecycleStateDetaching = "Detaching" + // @enum LifecycleState + LifecycleStateDetached = "Detached" + // @enum LifecycleState + LifecycleStateEnteringStandby = "EnteringStandby" + // @enum LifecycleState + LifecycleStateStandby = "Standby" +) + +const ( + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePendingSpotBidPlacement = "PendingSpotBidPlacement" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForSpotInstanceRequestId = "WaitingForSpotInstanceRequestId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForSpotInstanceId = "WaitingForSpotInstanceId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForInstanceId = "WaitingForInstanceId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePreInService = "PreInService" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeInProgress = "InProgress" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForElbconnectionDraining = "WaitingForELBConnectionDraining" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeMidLifecycleAction = "MidLifecycleAction" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForInstanceWarmup = "WaitingForInstanceWarmup" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeSuccessful = "Successful" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeFailed = "Failed" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeCancelled = "Cancelled" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go new file mode 100644 index 000000000..5f9ed8e60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go @@ -0,0 +1,226 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package autoscalingiface provides an interface for the Auto Scaling. +package autoscalingiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/autoscaling" +) + +// AutoScalingAPI is the interface type for autoscaling.AutoScaling. +type AutoScalingAPI interface { + AttachInstancesRequest(*autoscaling.AttachInstancesInput) (*request.Request, *autoscaling.AttachInstancesOutput) + + AttachInstances(*autoscaling.AttachInstancesInput) (*autoscaling.AttachInstancesOutput, error) + + AttachLoadBalancersRequest(*autoscaling.AttachLoadBalancersInput) (*request.Request, *autoscaling.AttachLoadBalancersOutput) + + AttachLoadBalancers(*autoscaling.AttachLoadBalancersInput) (*autoscaling.AttachLoadBalancersOutput, error) + + CompleteLifecycleActionRequest(*autoscaling.CompleteLifecycleActionInput) (*request.Request, *autoscaling.CompleteLifecycleActionOutput) + + CompleteLifecycleAction(*autoscaling.CompleteLifecycleActionInput) (*autoscaling.CompleteLifecycleActionOutput, error) + + CreateAutoScalingGroupRequest(*autoscaling.CreateAutoScalingGroupInput) (*request.Request, *autoscaling.CreateAutoScalingGroupOutput) + + CreateAutoScalingGroup(*autoscaling.CreateAutoScalingGroupInput) (*autoscaling.CreateAutoScalingGroupOutput, error) + + CreateLaunchConfigurationRequest(*autoscaling.CreateLaunchConfigurationInput) (*request.Request, *autoscaling.CreateLaunchConfigurationOutput) + + CreateLaunchConfiguration(*autoscaling.CreateLaunchConfigurationInput) (*autoscaling.CreateLaunchConfigurationOutput, error) + + CreateOrUpdateTagsRequest(*autoscaling.CreateOrUpdateTagsInput) (*request.Request, *autoscaling.CreateOrUpdateTagsOutput) + + CreateOrUpdateTags(*autoscaling.CreateOrUpdateTagsInput) (*autoscaling.CreateOrUpdateTagsOutput, error) + + DeleteAutoScalingGroupRequest(*autoscaling.DeleteAutoScalingGroupInput) (*request.Request, *autoscaling.DeleteAutoScalingGroupOutput) + + DeleteAutoScalingGroup(*autoscaling.DeleteAutoScalingGroupInput) (*autoscaling.DeleteAutoScalingGroupOutput, error) + + DeleteLaunchConfigurationRequest(*autoscaling.DeleteLaunchConfigurationInput) (*request.Request, *autoscaling.DeleteLaunchConfigurationOutput) + + DeleteLaunchConfiguration(*autoscaling.DeleteLaunchConfigurationInput) (*autoscaling.DeleteLaunchConfigurationOutput, error) + + DeleteLifecycleHookRequest(*autoscaling.DeleteLifecycleHookInput) (*request.Request, *autoscaling.DeleteLifecycleHookOutput) + + DeleteLifecycleHook(*autoscaling.DeleteLifecycleHookInput) (*autoscaling.DeleteLifecycleHookOutput, error) + + DeleteNotificationConfigurationRequest(*autoscaling.DeleteNotificationConfigurationInput) (*request.Request, *autoscaling.DeleteNotificationConfigurationOutput) + + DeleteNotificationConfiguration(*autoscaling.DeleteNotificationConfigurationInput) (*autoscaling.DeleteNotificationConfigurationOutput, error) + + DeletePolicyRequest(*autoscaling.DeletePolicyInput) (*request.Request, *autoscaling.DeletePolicyOutput) + + DeletePolicy(*autoscaling.DeletePolicyInput) (*autoscaling.DeletePolicyOutput, error) + + DeleteScheduledActionRequest(*autoscaling.DeleteScheduledActionInput) (*request.Request, *autoscaling.DeleteScheduledActionOutput) + + DeleteScheduledAction(*autoscaling.DeleteScheduledActionInput) (*autoscaling.DeleteScheduledActionOutput, error) + + DeleteTagsRequest(*autoscaling.DeleteTagsInput) (*request.Request, *autoscaling.DeleteTagsOutput) + + DeleteTags(*autoscaling.DeleteTagsInput) (*autoscaling.DeleteTagsOutput, error) + + DescribeAccountLimitsRequest(*autoscaling.DescribeAccountLimitsInput) (*request.Request, *autoscaling.DescribeAccountLimitsOutput) + + DescribeAccountLimits(*autoscaling.DescribeAccountLimitsInput) (*autoscaling.DescribeAccountLimitsOutput, error) + + DescribeAdjustmentTypesRequest(*autoscaling.DescribeAdjustmentTypesInput) (*request.Request, *autoscaling.DescribeAdjustmentTypesOutput) + + DescribeAdjustmentTypes(*autoscaling.DescribeAdjustmentTypesInput) (*autoscaling.DescribeAdjustmentTypesOutput, error) + + DescribeAutoScalingGroupsRequest(*autoscaling.DescribeAutoScalingGroupsInput) (*request.Request, *autoscaling.DescribeAutoScalingGroupsOutput) + + DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) + + DescribeAutoScalingGroupsPages(*autoscaling.DescribeAutoScalingGroupsInput, func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) error + + DescribeAutoScalingInstancesRequest(*autoscaling.DescribeAutoScalingInstancesInput) (*request.Request, *autoscaling.DescribeAutoScalingInstancesOutput) + + DescribeAutoScalingInstances(*autoscaling.DescribeAutoScalingInstancesInput) (*autoscaling.DescribeAutoScalingInstancesOutput, error) + + DescribeAutoScalingInstancesPages(*autoscaling.DescribeAutoScalingInstancesInput, func(*autoscaling.DescribeAutoScalingInstancesOutput, bool) bool) error + + DescribeAutoScalingNotificationTypesRequest(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*request.Request, *autoscaling.DescribeAutoScalingNotificationTypesOutput) + + DescribeAutoScalingNotificationTypes(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*autoscaling.DescribeAutoScalingNotificationTypesOutput, error) + + DescribeLaunchConfigurationsRequest(*autoscaling.DescribeLaunchConfigurationsInput) (*request.Request, *autoscaling.DescribeLaunchConfigurationsOutput) + + DescribeLaunchConfigurations(*autoscaling.DescribeLaunchConfigurationsInput) (*autoscaling.DescribeLaunchConfigurationsOutput, error) + + DescribeLaunchConfigurationsPages(*autoscaling.DescribeLaunchConfigurationsInput, func(*autoscaling.DescribeLaunchConfigurationsOutput, bool) bool) error + + DescribeLifecycleHookTypesRequest(*autoscaling.DescribeLifecycleHookTypesInput) (*request.Request, *autoscaling.DescribeLifecycleHookTypesOutput) + + DescribeLifecycleHookTypes(*autoscaling.DescribeLifecycleHookTypesInput) (*autoscaling.DescribeLifecycleHookTypesOutput, error) + + DescribeLifecycleHooksRequest(*autoscaling.DescribeLifecycleHooksInput) (*request.Request, *autoscaling.DescribeLifecycleHooksOutput) + + DescribeLifecycleHooks(*autoscaling.DescribeLifecycleHooksInput) (*autoscaling.DescribeLifecycleHooksOutput, error) + + DescribeLoadBalancersRequest(*autoscaling.DescribeLoadBalancersInput) (*request.Request, *autoscaling.DescribeLoadBalancersOutput) + + DescribeLoadBalancers(*autoscaling.DescribeLoadBalancersInput) (*autoscaling.DescribeLoadBalancersOutput, error) + + DescribeMetricCollectionTypesRequest(*autoscaling.DescribeMetricCollectionTypesInput) (*request.Request, *autoscaling.DescribeMetricCollectionTypesOutput) + + DescribeMetricCollectionTypes(*autoscaling.DescribeMetricCollectionTypesInput) (*autoscaling.DescribeMetricCollectionTypesOutput, error) + + DescribeNotificationConfigurationsRequest(*autoscaling.DescribeNotificationConfigurationsInput) (*request.Request, *autoscaling.DescribeNotificationConfigurationsOutput) + + DescribeNotificationConfigurations(*autoscaling.DescribeNotificationConfigurationsInput) (*autoscaling.DescribeNotificationConfigurationsOutput, error) + + DescribeNotificationConfigurationsPages(*autoscaling.DescribeNotificationConfigurationsInput, func(*autoscaling.DescribeNotificationConfigurationsOutput, bool) bool) error + + DescribePoliciesRequest(*autoscaling.DescribePoliciesInput) (*request.Request, *autoscaling.DescribePoliciesOutput) + + DescribePolicies(*autoscaling.DescribePoliciesInput) (*autoscaling.DescribePoliciesOutput, error) + + DescribePoliciesPages(*autoscaling.DescribePoliciesInput, func(*autoscaling.DescribePoliciesOutput, bool) bool) error + + DescribeScalingActivitiesRequest(*autoscaling.DescribeScalingActivitiesInput) (*request.Request, *autoscaling.DescribeScalingActivitiesOutput) + + DescribeScalingActivities(*autoscaling.DescribeScalingActivitiesInput) (*autoscaling.DescribeScalingActivitiesOutput, error) + + DescribeScalingActivitiesPages(*autoscaling.DescribeScalingActivitiesInput, func(*autoscaling.DescribeScalingActivitiesOutput, bool) bool) error + + DescribeScalingProcessTypesRequest(*autoscaling.DescribeScalingProcessTypesInput) (*request.Request, *autoscaling.DescribeScalingProcessTypesOutput) + + DescribeScalingProcessTypes(*autoscaling.DescribeScalingProcessTypesInput) (*autoscaling.DescribeScalingProcessTypesOutput, error) + + DescribeScheduledActionsRequest(*autoscaling.DescribeScheduledActionsInput) (*request.Request, *autoscaling.DescribeScheduledActionsOutput) + + DescribeScheduledActions(*autoscaling.DescribeScheduledActionsInput) (*autoscaling.DescribeScheduledActionsOutput, error) + + DescribeScheduledActionsPages(*autoscaling.DescribeScheduledActionsInput, func(*autoscaling.DescribeScheduledActionsOutput, bool) bool) error + + DescribeTagsRequest(*autoscaling.DescribeTagsInput) (*request.Request, *autoscaling.DescribeTagsOutput) + + DescribeTags(*autoscaling.DescribeTagsInput) (*autoscaling.DescribeTagsOutput, error) + + DescribeTagsPages(*autoscaling.DescribeTagsInput, func(*autoscaling.DescribeTagsOutput, bool) bool) error + + DescribeTerminationPolicyTypesRequest(*autoscaling.DescribeTerminationPolicyTypesInput) (*request.Request, *autoscaling.DescribeTerminationPolicyTypesOutput) + + DescribeTerminationPolicyTypes(*autoscaling.DescribeTerminationPolicyTypesInput) (*autoscaling.DescribeTerminationPolicyTypesOutput, error) + + DetachInstancesRequest(*autoscaling.DetachInstancesInput) (*request.Request, *autoscaling.DetachInstancesOutput) + + DetachInstances(*autoscaling.DetachInstancesInput) (*autoscaling.DetachInstancesOutput, error) + + DetachLoadBalancersRequest(*autoscaling.DetachLoadBalancersInput) (*request.Request, *autoscaling.DetachLoadBalancersOutput) + + DetachLoadBalancers(*autoscaling.DetachLoadBalancersInput) (*autoscaling.DetachLoadBalancersOutput, error) + + DisableMetricsCollectionRequest(*autoscaling.DisableMetricsCollectionInput) (*request.Request, *autoscaling.DisableMetricsCollectionOutput) + + DisableMetricsCollection(*autoscaling.DisableMetricsCollectionInput) (*autoscaling.DisableMetricsCollectionOutput, error) + + EnableMetricsCollectionRequest(*autoscaling.EnableMetricsCollectionInput) (*request.Request, *autoscaling.EnableMetricsCollectionOutput) + + EnableMetricsCollection(*autoscaling.EnableMetricsCollectionInput) (*autoscaling.EnableMetricsCollectionOutput, error) + + EnterStandbyRequest(*autoscaling.EnterStandbyInput) (*request.Request, *autoscaling.EnterStandbyOutput) + + EnterStandby(*autoscaling.EnterStandbyInput) (*autoscaling.EnterStandbyOutput, error) + + ExecutePolicyRequest(*autoscaling.ExecutePolicyInput) (*request.Request, *autoscaling.ExecutePolicyOutput) + + ExecutePolicy(*autoscaling.ExecutePolicyInput) (*autoscaling.ExecutePolicyOutput, error) + + ExitStandbyRequest(*autoscaling.ExitStandbyInput) (*request.Request, *autoscaling.ExitStandbyOutput) + + ExitStandby(*autoscaling.ExitStandbyInput) (*autoscaling.ExitStandbyOutput, error) + + PutLifecycleHookRequest(*autoscaling.PutLifecycleHookInput) (*request.Request, *autoscaling.PutLifecycleHookOutput) + + PutLifecycleHook(*autoscaling.PutLifecycleHookInput) (*autoscaling.PutLifecycleHookOutput, error) + + PutNotificationConfigurationRequest(*autoscaling.PutNotificationConfigurationInput) (*request.Request, *autoscaling.PutNotificationConfigurationOutput) + + PutNotificationConfiguration(*autoscaling.PutNotificationConfigurationInput) (*autoscaling.PutNotificationConfigurationOutput, error) + + PutScalingPolicyRequest(*autoscaling.PutScalingPolicyInput) (*request.Request, *autoscaling.PutScalingPolicyOutput) + + PutScalingPolicy(*autoscaling.PutScalingPolicyInput) (*autoscaling.PutScalingPolicyOutput, error) + + PutScheduledUpdateGroupActionRequest(*autoscaling.PutScheduledUpdateGroupActionInput) (*request.Request, *autoscaling.PutScheduledUpdateGroupActionOutput) + + PutScheduledUpdateGroupAction(*autoscaling.PutScheduledUpdateGroupActionInput) (*autoscaling.PutScheduledUpdateGroupActionOutput, error) + + RecordLifecycleActionHeartbeatRequest(*autoscaling.RecordLifecycleActionHeartbeatInput) (*request.Request, *autoscaling.RecordLifecycleActionHeartbeatOutput) + + RecordLifecycleActionHeartbeat(*autoscaling.RecordLifecycleActionHeartbeatInput) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) + + ResumeProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.ResumeProcessesOutput) + + ResumeProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.ResumeProcessesOutput, error) + + SetDesiredCapacityRequest(*autoscaling.SetDesiredCapacityInput) (*request.Request, *autoscaling.SetDesiredCapacityOutput) + + SetDesiredCapacity(*autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) + + SetInstanceHealthRequest(*autoscaling.SetInstanceHealthInput) (*request.Request, *autoscaling.SetInstanceHealthOutput) + + SetInstanceHealth(*autoscaling.SetInstanceHealthInput) (*autoscaling.SetInstanceHealthOutput, error) + + SetInstanceProtectionRequest(*autoscaling.SetInstanceProtectionInput) (*request.Request, *autoscaling.SetInstanceProtectionOutput) + + SetInstanceProtection(*autoscaling.SetInstanceProtectionInput) (*autoscaling.SetInstanceProtectionOutput, error) + + SuspendProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.SuspendProcessesOutput) + + SuspendProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.SuspendProcessesOutput, error) + + TerminateInstanceInAutoScalingGroupRequest(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*request.Request, *autoscaling.TerminateInstanceInAutoScalingGroupOutput) + + TerminateInstanceInAutoScalingGroup(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) + + UpdateAutoScalingGroupRequest(*autoscaling.UpdateAutoScalingGroupInput) (*request.Request, *autoscaling.UpdateAutoScalingGroupOutput) + + UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) +} + +var _ AutoScalingAPI = (*autoscaling.AutoScaling)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go new file mode 100644 index 000000000..8a9cb6e42 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go @@ -0,0 +1,1207 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package autoscaling_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/autoscaling" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleAutoScaling_AttachInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.AttachInstancesInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.AttachInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_AttachLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.AttachLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.AttachLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CompleteLifecycleAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CompleteLifecycleActionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleActionResult: aws.String("LifecycleActionResult"), // Required + LifecycleActionToken: aws.String("LifecycleActionToken"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + } + resp, err := svc.CompleteLifecycleAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("XmlStringMaxLen255"), // Required + MaxSize: aws.Int64(1), // Required + MinSize: aws.Int64(1), // Required + AvailabilityZones: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + DefaultCooldown: aws.Int64(1), + DesiredCapacity: aws.Int64(1), + HealthCheckGracePeriod: aws.Int64(1), + HealthCheckType: aws.String("XmlStringMaxLen32"), + InstanceId: aws.String("XmlStringMaxLen19"), + LaunchConfigurationName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + NewInstancesProtectedFromScaleIn: aws.Bool(true), + PlacementGroup: aws.String("XmlStringMaxLen255"), + Tags: []*autoscaling.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TerminationPolicies: []*string{ + aws.String("XmlStringMaxLen1600"), // Required + // More values... + }, + VPCZoneIdentifier: aws.String("XmlStringMaxLen255"), + } + resp, err := svc.CreateAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateLaunchConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateLaunchConfigurationInput{ + LaunchConfigurationName: aws.String("XmlStringMaxLen255"), // Required + AssociatePublicIpAddress: aws.Bool(true), + BlockDeviceMappings: []*autoscaling.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("XmlStringMaxLen255"), // Required + Ebs: &autoscaling.Ebs{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("XmlStringMaxLen255"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("BlockDeviceEbsVolumeType"), + }, + NoDevice: aws.Bool(true), + VirtualName: aws.String("XmlStringMaxLen255"), + }, + // More values... + }, + ClassicLinkVPCId: aws.String("XmlStringMaxLen255"), + ClassicLinkVPCSecurityGroups: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: aws.String("XmlStringMaxLen1600"), + ImageId: aws.String("XmlStringMaxLen255"), + InstanceId: aws.String("XmlStringMaxLen19"), + InstanceMonitoring: &autoscaling.InstanceMonitoring{ + Enabled: aws.Bool(true), + }, + InstanceType: aws.String("XmlStringMaxLen255"), + KernelId: aws.String("XmlStringMaxLen255"), + KeyName: aws.String("XmlStringMaxLen255"), + PlacementTenancy: aws.String("XmlStringMaxLen64"), + RamdiskId: aws.String("XmlStringMaxLen255"), + SecurityGroups: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + SpotPrice: aws.String("SpotPrice"), + UserData: aws.String("XmlStringUserData"), + } + resp, err := svc.CreateLaunchConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateOrUpdateTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateOrUpdateTagsInput{ + Tags: []*autoscaling.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateOrUpdateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ForceDelete: aws.Bool(true), + } + resp, err := svc.DeleteAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteLaunchConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteLaunchConfigurationInput{ + LaunchConfigurationName: aws.String("ResourceName"), // Required + } + resp, err := svc.DeleteLaunchConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteLifecycleHook() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteLifecycleHookInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + } + resp, err := svc.DeleteLifecycleHook(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteNotificationConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteNotificationConfigurationInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + TopicARN: aws.String("ResourceName"), // Required + } + resp, err := svc.DeleteNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeletePolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeletePolicyInput{ + PolicyName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteScheduledAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteScheduledActionInput{ + ScheduledActionName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + } + resp, err := svc.DeleteScheduledAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteTagsInput{ + Tags: []*autoscaling.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAccountLimits() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAccountLimitsInput + resp, err := svc.DescribeAccountLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAdjustmentTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAdjustmentTypesInput + resp, err := svc.DescribeAdjustmentTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingGroups() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeAutoScalingGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeAutoScalingInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingNotificationTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAutoScalingNotificationTypesInput + resp, err := svc.DescribeAutoScalingNotificationTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLaunchConfigurations() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLaunchConfigurationsInput{ + LaunchConfigurationNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeLaunchConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLifecycleHookTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeLifecycleHookTypesInput + resp, err := svc.DescribeLifecycleHookTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLifecycleHooks() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLifecycleHooksInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookNames: []*string{ + aws.String("AsciiStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DescribeLifecycleHooks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeMetricCollectionTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeMetricCollectionTypesInput + resp, err := svc.DescribeMetricCollectionTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeNotificationConfigurations() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeNotificationConfigurationsInput{ + AutoScalingGroupNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeNotificationConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribePolicies() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribePoliciesInput{ + AutoScalingGroupName: aws.String("ResourceName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + PolicyNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + PolicyTypes: []*string{ + aws.String("XmlStringMaxLen64"), // Required + // More values... + }, + } + resp, err := svc.DescribePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScalingActivities() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeScalingActivitiesInput{ + ActivityIds: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + AutoScalingGroupName: aws.String("ResourceName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeScalingActivities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScalingProcessTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeScalingProcessTypesInput + resp, err := svc.DescribeScalingProcessTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScheduledActions() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeScheduledActionsInput{ + AutoScalingGroupName: aws.String("ResourceName"), + EndTime: aws.Time(time.Now()), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + ScheduledActionNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeScheduledActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeTagsInput{ + Filters: []*autoscaling.Filter{ + { // Required + Name: aws.String("XmlString"), + Values: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + }, + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeTerminationPolicyTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeTerminationPolicyTypesInput + resp, err := svc.DescribeTerminationPolicyTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DetachInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DetachInstancesInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.DetachInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DetachLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DetachLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DetachLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DisableMetricsCollection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DisableMetricsCollectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + Metrics: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DisableMetricsCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_EnableMetricsCollection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.EnableMetricsCollectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + Granularity: aws.String("XmlStringMaxLen255"), // Required + Metrics: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.EnableMetricsCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_EnterStandby() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.EnterStandbyInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.EnterStandby(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ExecutePolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ExecutePolicyInput{ + PolicyName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + BreachThreshold: aws.Float64(1.0), + HonorCooldown: aws.Bool(true), + MetricValue: aws.Float64(1.0), + } + resp, err := svc.ExecutePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ExitStandby() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ExitStandbyInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.ExitStandby(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutLifecycleHook() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutLifecycleHookInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + DefaultResult: aws.String("LifecycleActionResult"), + HeartbeatTimeout: aws.Int64(1), + LifecycleTransition: aws.String("LifecycleTransition"), + NotificationMetadata: aws.String("XmlStringMaxLen1023"), + NotificationTargetARN: aws.String("ResourceName"), + RoleARN: aws.String("ResourceName"), + } + resp, err := svc.PutLifecycleHook(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutNotificationConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutNotificationConfigurationInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + NotificationTypes: []*string{ // Required + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + TopicARN: aws.String("ResourceName"), // Required + } + resp, err := svc.PutNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutScalingPolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutScalingPolicyInput{ + AdjustmentType: aws.String("XmlStringMaxLen255"), // Required + AutoScalingGroupName: aws.String("ResourceName"), // Required + PolicyName: aws.String("XmlStringMaxLen255"), // Required + Cooldown: aws.Int64(1), + EstimatedInstanceWarmup: aws.Int64(1), + MetricAggregationType: aws.String("XmlStringMaxLen32"), + MinAdjustmentMagnitude: aws.Int64(1), + MinAdjustmentStep: aws.Int64(1), + PolicyType: aws.String("XmlStringMaxLen64"), + ScalingAdjustment: aws.Int64(1), + StepAdjustments: []*autoscaling.StepAdjustment{ + { // Required + ScalingAdjustment: aws.Int64(1), // Required + MetricIntervalLowerBound: aws.Float64(1.0), + MetricIntervalUpperBound: aws.Float64(1.0), + }, + // More values... + }, + } + resp, err := svc.PutScalingPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutScheduledUpdateGroupAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutScheduledUpdateGroupActionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScheduledActionName: aws.String("XmlStringMaxLen255"), // Required + DesiredCapacity: aws.Int64(1), + EndTime: aws.Time(time.Now()), + MaxSize: aws.Int64(1), + MinSize: aws.Int64(1), + Recurrence: aws.String("XmlStringMaxLen255"), + StartTime: aws.Time(time.Now()), + Time: aws.Time(time.Now()), + } + resp, err := svc.PutScheduledUpdateGroupAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_RecordLifecycleActionHeartbeat() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.RecordLifecycleActionHeartbeatInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleActionToken: aws.String("LifecycleActionToken"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + } + resp, err := svc.RecordLifecycleActionHeartbeat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ResumeProcesses() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScalingProcesses: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.ResumeProcesses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetDesiredCapacity() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetDesiredCapacityInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + DesiredCapacity: aws.Int64(1), // Required + HonorCooldown: aws.Bool(true), + } + resp, err := svc.SetDesiredCapacity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetInstanceHealth() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetInstanceHealthInput{ + HealthStatus: aws.String("XmlStringMaxLen32"), // Required + InstanceId: aws.String("XmlStringMaxLen19"), // Required + ShouldRespectGracePeriod: aws.Bool(true), + } + resp, err := svc.SetInstanceHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetInstanceProtection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetInstanceProtectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ // Required + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + ProtectedFromScaleIn: aws.Bool(true), // Required + } + resp, err := svc.SetInstanceProtection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SuspendProcesses() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScalingProcesses: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.SuspendProcesses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_TerminateInstanceInAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ + InstanceId: aws.String("XmlStringMaxLen19"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + } + resp, err := svc.TerminateInstanceInAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_UpdateAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.UpdateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + AvailabilityZones: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + DefaultCooldown: aws.Int64(1), + DesiredCapacity: aws.Int64(1), + HealthCheckGracePeriod: aws.Int64(1), + HealthCheckType: aws.String("XmlStringMaxLen32"), + LaunchConfigurationName: aws.String("ResourceName"), + MaxSize: aws.Int64(1), + MinSize: aws.Int64(1), + NewInstancesProtectedFromScaleIn: aws.Bool(true), + PlacementGroup: aws.String("XmlStringMaxLen255"), + TerminationPolicies: []*string{ + aws.String("XmlStringMaxLen1600"), // Required + // More values... + }, + VPCZoneIdentifier: aws.String("XmlStringMaxLen255"), + } + resp, err := svc.UpdateAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go new file mode 100644 index 000000000..8107b3e65 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package autoscaling + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Auto Scaling is designed to automatically launch or terminate EC2 instances +// based on user-defined policies, schedules, and health checks. Use this service +// in conjunction with the Amazon CloudWatch and Elastic Load Balancing services. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type AutoScaling struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "autoscaling" + +// New creates a new instance of the AutoScaling client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a AutoScaling client from just a session. +// svc := autoscaling.New(mySession) +// +// // Create a AutoScaling client with additional configuration +// svc := autoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScaling { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *AutoScaling { + svc := &AutoScaling{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a AutoScaling operation and runs any +// custom request initialization. +func (c *AutoScaling) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go new file mode 100644 index 000000000..d4bd8475e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go @@ -0,0 +1,2294 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudformation provides a client for AWS CloudFormation. +package cloudformation + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCancelUpdateStack = "CancelUpdateStack" + +// CancelUpdateStackRequest generates a request for the CancelUpdateStack operation. +func (c *CloudFormation) CancelUpdateStackRequest(input *CancelUpdateStackInput) (req *request.Request, output *CancelUpdateStackOutput) { + op := &request.Operation{ + Name: opCancelUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelUpdateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelUpdateStackOutput{} + req.Data = output + return +} + +// Cancels an update on the specified stack. If the call completes successfully, +// the stack rolls back the update and reverts to the previous stack configuration. +// +// You can cancel only stacks that are in the UPDATE_IN_PROGRESS state. +func (c *CloudFormation) CancelUpdateStack(input *CancelUpdateStackInput) (*CancelUpdateStackOutput, error) { + req, out := c.CancelUpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opContinueUpdateRollback = "ContinueUpdateRollback" + +// ContinueUpdateRollbackRequest generates a request for the ContinueUpdateRollback operation. +func (c *CloudFormation) ContinueUpdateRollbackRequest(input *ContinueUpdateRollbackInput) (req *request.Request, output *ContinueUpdateRollbackOutput) { + op := &request.Operation{ + Name: opContinueUpdateRollback, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ContinueUpdateRollbackInput{} + } + + req = c.newRequest(op, input, output) + output = &ContinueUpdateRollbackOutput{} + req.Data = output + return +} + +// For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues +// rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause +// of the failure, you can manually fix the error (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-update-rollback-failed) +// and continue the rollback. By continuing the rollback, you can return your +// stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), return the +// stack to its original settings, and then try to update the stack again. +// +// A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation +// cannot roll back all changes after a failed stack update. For example, you +// might have a stack that is rolling back to an old database instance that +// was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't +// know the database was deleted, it assumes that the database instance still +// exists and attempts to roll back to it, causing the update rollback to fail. +func (c *CloudFormation) ContinueUpdateRollback(input *ContinueUpdateRollbackInput) (*ContinueUpdateRollbackOutput, error) { + req, out := c.ContinueUpdateRollbackRequest(input) + err := req.Send() + return out, err +} + +const opCreateStack = "CreateStack" + +// CreateStackRequest generates a request for the CreateStack operation. +func (c *CloudFormation) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { + op := &request.Operation{ + Name: opCreateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStackOutput{} + req.Data = output + return +} + +// Creates a stack as specified in the template. After the call completes successfully, +// the stack creation starts. You can check the status of the stack via the +// DescribeStacks API. +func (c *CloudFormation) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStack = "DeleteStack" + +// DeleteStackRequest generates a request for the DeleteStack operation. +func (c *CloudFormation) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { + op := &request.Operation{ + Name: opDeleteStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteStackOutput{} + req.Data = output + return +} + +// Deletes a specified stack. Once the call completes successfully, stack deletion +// starts. Deleted stacks do not show up in the DescribeStacks API if the deletion +// has been completed successfully. +func (c *CloudFormation) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a request for the DescribeAccountLimits operation. +func (c *CloudFormation) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountLimitsOutput{} + req.Data = output + return +} + +// Retrieves your account's AWS CloudFormation limits, such as the maximum number +// of stacks that you can create in your account. +func (c *CloudFormation) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackEvents = "DescribeStackEvents" + +// DescribeStackEventsRequest generates a request for the DescribeStackEvents operation. +func (c *CloudFormation) DescribeStackEventsRequest(input *DescribeStackEventsInput) (req *request.Request, output *DescribeStackEventsOutput) { + op := &request.Operation{ + Name: opDescribeStackEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStackEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackEventsOutput{} + req.Data = output + return +} + +// Returns all stack related events for a specified stack. For more information +// about a stack's event history, go to Stacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html) +// in the AWS CloudFormation User Guide. +// +// You can list events for stacks that have failed to create or have been deleted +// by specifying the unique stack identifier (stack ID). +func (c *CloudFormation) DescribeStackEvents(input *DescribeStackEventsInput) (*DescribeStackEventsOutput, error) { + req, out := c.DescribeStackEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) DescribeStackEventsPages(input *DescribeStackEventsInput, fn func(p *DescribeStackEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStackEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStackEventsOutput), lastPage) + }) +} + +const opDescribeStackResource = "DescribeStackResource" + +// DescribeStackResourceRequest generates a request for the DescribeStackResource operation. +func (c *CloudFormation) DescribeStackResourceRequest(input *DescribeStackResourceInput) (req *request.Request, output *DescribeStackResourceOutput) { + op := &request.Operation{ + Name: opDescribeStackResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackResourceOutput{} + req.Data = output + return +} + +// Returns a description of the specified resource in the specified stack. +// +// For deleted stacks, DescribeStackResource returns resource information for +// up to 90 days after the stack has been deleted. +func (c *CloudFormation) DescribeStackResource(input *DescribeStackResourceInput) (*DescribeStackResourceOutput, error) { + req, out := c.DescribeStackResourceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackResources = "DescribeStackResources" + +// DescribeStackResourcesRequest generates a request for the DescribeStackResources operation. +func (c *CloudFormation) DescribeStackResourcesRequest(input *DescribeStackResourcesInput) (req *request.Request, output *DescribeStackResourcesOutput) { + op := &request.Operation{ + Name: opDescribeStackResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackResourcesOutput{} + req.Data = output + return +} + +// Returns AWS resource descriptions for running and deleted stacks. If StackName +// is specified, all the associated resources that are part of the stack are +// returned. If PhysicalResourceId is specified, the associated resources of +// the stack that the resource belongs to are returned. +// +// Only the first 100 resources will be returned. If your stack has more resources +// than this, you should use ListStackResources instead. For deleted stacks, +// DescribeStackResources returns resource information for up to 90 days after +// the stack has been deleted. +// +// You must specify either StackName or PhysicalResourceId, but not both. In +// addition, you can specify LogicalResourceId to filter the returned result. +// For more information about resources, the LogicalResourceId and PhysicalResourceId, +// go to the AWS CloudFormation User Guide (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/). +// +// A ValidationError is returned if you specify both StackName and PhysicalResourceId +// in the same request. +func (c *CloudFormation) DescribeStackResources(input *DescribeStackResourcesInput) (*DescribeStackResourcesOutput, error) { + req, out := c.DescribeStackResourcesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStacks = "DescribeStacks" + +// DescribeStacksRequest generates a request for the DescribeStacks operation. +func (c *CloudFormation) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { + op := &request.Operation{ + Name: opDescribeStacks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStacksOutput{} + req.Data = output + return +} + +// Returns the description for the specified stack; if no stack name was specified, +// then it returns the description for all the stacks created. +func (c *CloudFormation) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) DescribeStacksPages(input *DescribeStacksInput, fn func(p *DescribeStacksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStacksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStacksOutput), lastPage) + }) +} + +const opEstimateTemplateCost = "EstimateTemplateCost" + +// EstimateTemplateCostRequest generates a request for the EstimateTemplateCost operation. +func (c *CloudFormation) EstimateTemplateCostRequest(input *EstimateTemplateCostInput) (req *request.Request, output *EstimateTemplateCostOutput) { + op := &request.Operation{ + Name: opEstimateTemplateCost, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EstimateTemplateCostInput{} + } + + req = c.newRequest(op, input, output) + output = &EstimateTemplateCostOutput{} + req.Data = output + return +} + +// Returns the estimated monthly cost of a template. The return value is an +// AWS Simple Monthly Calculator URL with a query string that describes the +// resources required to run the template. +func (c *CloudFormation) EstimateTemplateCost(input *EstimateTemplateCostInput) (*EstimateTemplateCostOutput, error) { + req, out := c.EstimateTemplateCostRequest(input) + err := req.Send() + return out, err +} + +const opGetStackPolicy = "GetStackPolicy" + +// GetStackPolicyRequest generates a request for the GetStackPolicy operation. +func (c *CloudFormation) GetStackPolicyRequest(input *GetStackPolicyInput) (req *request.Request, output *GetStackPolicyOutput) { + op := &request.Operation{ + Name: opGetStackPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetStackPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStackPolicyOutput{} + req.Data = output + return +} + +// Returns the stack policy for a specified stack. If a stack doesn't have a +// policy, a null value is returned. +func (c *CloudFormation) GetStackPolicy(input *GetStackPolicyInput) (*GetStackPolicyOutput, error) { + req, out := c.GetStackPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTemplate = "GetTemplate" + +// GetTemplateRequest generates a request for the GetTemplate operation. +func (c *CloudFormation) GetTemplateRequest(input *GetTemplateInput) (req *request.Request, output *GetTemplateOutput) { + op := &request.Operation{ + Name: opGetTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTemplateOutput{} + req.Data = output + return +} + +// Returns the template body for a specified stack. You can get the template +// for running or deleted stacks. +// +// For deleted stacks, GetTemplate returns the template for up to 90 days after +// the stack has been deleted. +// +// If the template does not exist, a ValidationError is returned. +func (c *CloudFormation) GetTemplate(input *GetTemplateInput) (*GetTemplateOutput, error) { + req, out := c.GetTemplateRequest(input) + err := req.Send() + return out, err +} + +const opGetTemplateSummary = "GetTemplateSummary" + +// GetTemplateSummaryRequest generates a request for the GetTemplateSummary operation. +func (c *CloudFormation) GetTemplateSummaryRequest(input *GetTemplateSummaryInput) (req *request.Request, output *GetTemplateSummaryOutput) { + op := &request.Operation{ + Name: opGetTemplateSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTemplateSummaryOutput{} + req.Data = output + return +} + +// Returns information about a new or existing template. The GetTemplateSummary +// action is useful for viewing parameter information, such as default parameter +// values and parameter types, before you create or update a stack. +// +// You can use the GetTemplateSummary action when you submit a template, or +// you can get template information for a running or deleted stack. +// +// For deleted stacks, GetTemplateSummary returns the template information +// for up to 90 days after the stack has been deleted. If the template does +// not exist, a ValidationError is returned. +func (c *CloudFormation) GetTemplateSummary(input *GetTemplateSummaryInput) (*GetTemplateSummaryOutput, error) { + req, out := c.GetTemplateSummaryRequest(input) + err := req.Send() + return out, err +} + +const opListStackResources = "ListStackResources" + +// ListStackResourcesRequest generates a request for the ListStackResources operation. +func (c *CloudFormation) ListStackResourcesRequest(input *ListStackResourcesInput) (req *request.Request, output *ListStackResourcesOutput) { + op := &request.Operation{ + Name: opListStackResources, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStackResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStackResourcesOutput{} + req.Data = output + return +} + +// Returns descriptions of all resources of the specified stack. +// +// For deleted stacks, ListStackResources returns resource information for +// up to 90 days after the stack has been deleted. +func (c *CloudFormation) ListStackResources(input *ListStackResourcesInput) (*ListStackResourcesOutput, error) { + req, out := c.ListStackResourcesRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) ListStackResourcesPages(input *ListStackResourcesInput, fn func(p *ListStackResourcesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStackResourcesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStackResourcesOutput), lastPage) + }) +} + +const opListStacks = "ListStacks" + +// ListStacksRequest generates a request for the ListStacks operation. +func (c *CloudFormation) ListStacksRequest(input *ListStacksInput) (req *request.Request, output *ListStacksOutput) { + op := &request.Operation{ + Name: opListStacks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStacksOutput{} + req.Data = output + return +} + +// Returns the summary information for stacks whose status matches the specified +// StackStatusFilter. Summary information for stacks that have been deleted +// is kept for 90 days after the stack is deleted. If no StackStatusFilter is +// specified, summary information for all stacks is returned (including existing +// stacks and stacks that have been deleted). +func (c *CloudFormation) ListStacks(input *ListStacksInput) (*ListStacksOutput, error) { + req, out := c.ListStacksRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) ListStacksPages(input *ListStacksInput, fn func(p *ListStacksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStacksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStacksOutput), lastPage) + }) +} + +const opSetStackPolicy = "SetStackPolicy" + +// SetStackPolicyRequest generates a request for the SetStackPolicy operation. +func (c *CloudFormation) SetStackPolicyRequest(input *SetStackPolicyInput) (req *request.Request, output *SetStackPolicyOutput) { + op := &request.Operation{ + Name: opSetStackPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetStackPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SetStackPolicyOutput{} + req.Data = output + return +} + +// Sets a stack policy for a specified stack. +func (c *CloudFormation) SetStackPolicy(input *SetStackPolicyInput) (*SetStackPolicyOutput, error) { + req, out := c.SetStackPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSignalResource = "SignalResource" + +// SignalResourceRequest generates a request for the SignalResource operation. +func (c *CloudFormation) SignalResourceRequest(input *SignalResourceInput) (req *request.Request, output *SignalResourceOutput) { + op := &request.Operation{ + Name: opSignalResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SignalResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &SignalResourceOutput{} + req.Data = output + return +} + +// Sends a signal to the specified resource with a success or failure status. +// You can use the SignalResource API in conjunction with a creation policy +// or update policy. AWS CloudFormation doesn't proceed with a stack creation +// or update until resources receive the required number of signals or the timeout +// period is exceeded. The SignalResource API is useful in cases where you want +// to send signals from anywhere other than an Amazon EC2 instance. +func (c *CloudFormation) SignalResource(input *SignalResourceInput) (*SignalResourceOutput, error) { + req, out := c.SignalResourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStack = "UpdateStack" + +// UpdateStackRequest generates a request for the UpdateStack operation. +func (c *CloudFormation) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { + op := &request.Operation{ + Name: opUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateStackOutput{} + req.Data = output + return +} + +// Updates a stack as specified in the template. After the call completes successfully, +// the stack update starts. You can check the status of the stack via the DescribeStacks +// action. +// +// To get a copy of the template for an existing stack, you can use the GetTemplate +// action. +// +// Tags that were associated with this stack during creation time will still +// be associated with the stack after an UpdateStack operation. +// +// For more information about creating an update template, updating a stack, +// and monitoring the progress of the update, see Updating a Stack (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html). +func (c *CloudFormation) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opValidateTemplate = "ValidateTemplate" + +// ValidateTemplateRequest generates a request for the ValidateTemplate operation. +func (c *CloudFormation) ValidateTemplateRequest(input *ValidateTemplateInput) (req *request.Request, output *ValidateTemplateOutput) { + op := &request.Operation{ + Name: opValidateTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &ValidateTemplateOutput{} + req.Data = output + return +} + +// Validates a specified template. +func (c *CloudFormation) ValidateTemplate(input *ValidateTemplateInput) (*ValidateTemplateOutput, error) { + req, out := c.ValidateTemplateRequest(input) + err := req.Send() + return out, err +} + +// The AccountLimit data type. +type AccountLimit struct { + _ struct{} `type:"structure"` + + // The name of the account limit. Currently, the only account limit is StackLimit. + Name *string `type:"string"` + + // The value that is associated with the account limit name. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s AccountLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountLimit) GoString() string { + return s.String() +} + +// The input for the CancelUpdateStack action. +type CancelUpdateStackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelUpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelUpdateStackInput) GoString() string { + return s.String() +} + +type CancelUpdateStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelUpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelUpdateStackOutput) GoString() string { + return s.String() +} + +// The input for the ContinueUpdateRollback action. +type ContinueUpdateRollbackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique ID of the stack that you want to continue rolling + // back. + StackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ContinueUpdateRollbackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueUpdateRollbackInput) GoString() string { + return s.String() +} + +// The output for a ContinueUpdateRollback action. +type ContinueUpdateRollbackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ContinueUpdateRollbackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueUpdateRollbackOutput) GoString() string { + return s.String() +} + +// The input for CreateStack action. +type CreateStackInput struct { + _ struct{} `type:"structure"` + + // A list of capabilities that you must specify before AWS CloudFormation can + // create or update certain stacks. Some stack templates might include resources + // that can affect permissions in your AWS account. For those stacks, you must + // explicitly acknowledge their capabilities by specifying this parameter. + // + // Currently, the only valid value is CAPABILITY_IAM, which is required for + // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), + // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), + // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), + // AWS::IAM::Role (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html), + // AWS::IAM::User (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html), + // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). + // If your stack template contains these resources, we recommend that you review + // any permissions associated with them. If you don't specify this parameter, + // this action returns an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // Set to true to disable rollback of the stack if stack creation failed. You + // can specify either DisableRollback or OnFailure, but not both. + // + // Default: false + DisableRollback *bool `type:"boolean"` + + // The Simple Notification Service (SNS) topic ARNs to publish stack related + // events. You can find your SNS topic ARNs using the SNS console (http://console.aws.amazon.com/sns) + // or your Command Line Interface (CLI). + NotificationARNs []*string `type:"list"` + + // Determines what action will be taken if stack creation fails. This must be + // one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure + // or DisableRollback, but not both. + // + // Default: ROLLBACK + OnFailure *string `type:"string" enum:"OnFailure"` + + // A list of Parameter structures that specify input parameters for the stack. + // For more information, see the Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The template resource types that you have permissions to work with for this + // create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. + // Use the following syntax to describe template resource types: AWS::* (for + // all AWS resource), Custom::* (for all custom resources), Custom::logical_ID + // (for a specific custom resource), AWS::service_name::* (for all resources + // of a particular AWS service), and AWS::service_name::resource_logical_ID + // (for a specific AWS resource). + // + // If the list of resource types doesn't include a resource that you're creating, + // the stack creation fails. By default, AWS CloudFormation grants permissions + // to all resource types. AWS Identity and Access Management (IAM) uses this + // parameter for AWS CloudFormation-specific condition keys in IAM policies. + // For more information, see Controlling Access with AWS Identity and Access + // Management (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html). + ResourceTypes []*string `type:"list"` + + // The name that is associated with the stack. The name must be unique in the + // region in which you are creating the stack. + // + // A stack name can contain only alphanumeric characters (case sensitive) and + // hyphens. It must start with an alphabetic character and cannot be longer + // than 128 characters. + StackName *string `type:"string" required:"true"` + + // Structure containing the stack policy body. For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody + // or the StackPolicyURL parameter, but not both. + StackPolicyBody *string `min:"1" type:"string"` + + // Location of a file containing the stack policy. The URL must point to a policy + // (max size: 16KB) located in an S3 bucket in the same region as the stack. + // You can specify either the StackPolicyBody or the StackPolicyURL parameter, + // but not both. + StackPolicyURL *string `min:"1" type:"string"` + + // Key-value pairs to associate with this stack. AWS CloudFormation also propagates + // these tags to the resources created in the stack. A maximum number of 10 + // tags can be specified. + Tags []*Tag `type:"list"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information, go to the Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateURL *string `min:"1" type:"string"` + + // The amount of time that can pass before the stack status becomes CREATE_FAILED; + // if DisableRollback is not set or is set to false, the stack will be rolled + // back. + TimeoutInMinutes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s CreateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInput) GoString() string { + return s.String() +} + +// The output for a CreateStack action. +type CreateStackOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackOutput) GoString() string { + return s.String() +} + +// The input for DeleteStack action. +type DeleteStackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInput) GoString() string { + return s.String() +} + +type DeleteStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackOutput) GoString() string { + return s.String() +} + +// The input for the DescribeAccountLimits action. +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of limits that you want to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +// The output for the DescribeAccountLimits action. +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // An account limit structure that contain a list of AWS CloudFormation account + // limits and their values. + AccountLimits []*AccountLimit `type:"list"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of limits. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackEvents action. +type DescribeStackEventsInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of events that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStackEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackEventsInput) GoString() string { + return s.String() +} + +// The output for a DescribeStackEvents action. +type DescribeStackEventsOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of events. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackEvents structures. + StackEvents []*StackEvent `type:"list"` +} + +// String returns the string representation +func (s DescribeStackEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackEventsOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackResource action. +type DescribeStackResourceInput struct { + _ struct{} `type:"structure"` + + // The logical name of the resource as specified in the template. + // + // Default: There is no default value. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourceInput) GoString() string { + return s.String() +} + +// The output for a DescribeStackResource action. +type DescribeStackResourceOutput struct { + _ struct{} `type:"structure"` + + // A StackResourceDetail structure containing the description of the specified + // resource in the specified stack. + StackResourceDetail *StackResourceDetail `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourceOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackResources action. +type DescribeStackResourcesInput struct { + _ struct{} `type:"structure"` + + // The logical name of the resource as specified in the template. + // + // Default: There is no default value. + LogicalResourceId *string `type:"string"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + // + // For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId + // corresponds to the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources + // to find which stack the instance belongs to and what other resources are + // part of the stack. + // + // Required: Conditional. If you do not specify PhysicalResourceId, you must + // specify StackName. + // + // Default: There is no default value. + PhysicalResourceId *string `type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + // + // Required: Conditional. If you do not specify StackName, you must specify + // PhysicalResourceId. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStackResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourcesInput) GoString() string { + return s.String() +} + +// The output for a DescribeStackResources action. +type DescribeStackResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of StackResource structures. + StackResources []*StackResource `type:"list"` +} + +// String returns the string representation +func (s DescribeStackResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourcesOutput) GoString() string { + return s.String() +} + +// The input for DescribeStacks action. +type DescribeStacksInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stacks that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksInput) GoString() string { + return s.String() +} + +// The output for a DescribeStacks action. +type DescribeStacksOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stacks. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of stack structures. + Stacks []*Stack `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksOutput) GoString() string { + return s.String() +} + +type EstimateTemplateCostInput struct { + _ struct{} `type:"structure"` + + // A list of Parameter structures that specify input parameters. + Parameters []*Parameter `type:"list"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. (For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + // + // Conditional: You must pass TemplateBody or TemplateURL. If both are passed, + // only TemplateBody is used. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // that is located in an Amazon S3 bucket. For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EstimateTemplateCostInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EstimateTemplateCostInput) GoString() string { + return s.String() +} + +// The output for a EstimateTemplateCost action. +type EstimateTemplateCostOutput struct { + _ struct{} `type:"structure"` + + // An AWS Simple Monthly Calculator URL with a query string that describes the + // resources required to run the template. + Url *string `type:"string"` +} + +// String returns the string representation +func (s EstimateTemplateCostOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EstimateTemplateCostOutput) GoString() string { + return s.String() +} + +// The input for the GetStackPolicy action. +type GetStackPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or unique stack ID that is associated with the stack whose policy + // you want to get. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStackPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStackPolicyInput) GoString() string { + return s.String() +} + +// The output for the GetStackPolicy action. +type GetStackPolicyOutput struct { + _ struct{} `type:"structure"` + + // Structure containing the stack policy body. (For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide.) + StackPolicyBody *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetStackPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStackPolicyOutput) GoString() string { + return s.String() +} + +// The input for a GetTemplate action. +type GetTemplateInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateInput) GoString() string { + return s.String() +} + +// The output for GetTemplate action. +type GetTemplateOutput struct { + _ struct{} `type:"structure"` + + // Structure containing the template body. (For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + TemplateBody *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateOutput) GoString() string { + return s.String() +} + +// The input for the GetTemplateSummary action. +type GetTemplateSummaryInput struct { + _ struct{} `type:"structure"` + + // The name or the stack ID that is associated with the stack, which are not + // always interchangeable. For running stacks, you can specify either the stack's + // name or its unique stack ID. For deleted stack, you must specify the unique + // stack ID. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + StackName *string `min:"1" type:"string"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information about templates, see + // Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information about templates, see Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetTemplateSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateSummaryInput) GoString() string { + return s.String() +} + +// The output for the GetTemplateSummary action. +type GetTemplateSummaryOutput struct { + _ struct{} `type:"structure"` + + // The capabilities found within the template. Currently, AWS CloudFormation + // supports only the CAPABILITY_IAM capability. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM value for this parameter when + // you use the CreateStack or UpdateStack actions with your template; otherwise, + // those actions return an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // The list of resources that generated the values in the Capabilities response + // element. + CapabilitiesReason *string `type:"string"` + + // The value that is defined in the Description property of the template. + Description *string `type:"string"` + + // The value that is defined for the Metadata property of the template. + Metadata *string `type:"string"` + + // A list of parameter declarations that describe various properties for each + // parameter. + Parameters []*ParameterDeclaration `type:"list"` + + // A list of all the template resource types that are defined in the template, + // such as AWS::EC2::Instance, AWS::Dynamo::Table, and Custom::MyCustomInstance. + ResourceTypes []*string `type:"list"` + + // The AWS template format version, which identifies the capabilities of the + // template. + Version *string `type:"string"` +} + +// String returns the string representation +func (s GetTemplateSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateSummaryOutput) GoString() string { + return s.String() +} + +// The input for the ListStackResource action. +type ListStackResourcesInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stack resources that you want to + // retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackResourcesInput) GoString() string { + return s.String() +} + +// The output for a ListStackResources action. +type ListStackResourcesOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stack resources. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackResourceSummary structures. + StackResourceSummaries []*StackResourceSummary `type:"list"` +} + +// String returns the string representation +func (s ListStackResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackResourcesOutput) GoString() string { + return s.String() +} + +// The input for ListStacks action. +type ListStacksInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stacks that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // Stack status to use as a filter. Specify one or more stack status codes to + // list only stacks with the specified status codes. For a complete list of + // stack status codes, see the StackStatus parameter of the Stack data type. + StackStatusFilter []*string `type:"list"` +} + +// String returns the string representation +func (s ListStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStacksInput) GoString() string { + return s.String() +} + +// The output for ListStacks action. +type ListStacksOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stacks. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackSummary structures containing information about the specified + // stacks. + StackSummaries []*StackSummary `type:"list"` +} + +// String returns the string representation +func (s ListStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStacksOutput) GoString() string { + return s.String() +} + +// The Output data type. +type Output struct { + _ struct{} `type:"structure"` + + // User defined description associated with the output. + Description *string `type:"string"` + + // The key associated with the output. + OutputKey *string `type:"string"` + + // The value associated with the output. + OutputValue *string `type:"string"` +} + +// String returns the string representation +func (s Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Output) GoString() string { + return s.String() +} + +// The Parameter data type. +type Parameter struct { + _ struct{} `type:"structure"` + + // The key associated with the parameter. If you don't specify a key and value + // for a particular parameter, AWS CloudFormation uses the default value that + // is specified in your template. + ParameterKey *string `type:"string"` + + // The value associated with the parameter. + ParameterValue *string `type:"string"` + + // During a stack update, use the existing parameter value that the stack is + // using for a given parameter key. If you specify true, do not specify a parameter + // value. + UsePreviousValue *bool `type:"boolean"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// A set of criteria that AWS CloudFormation uses to validate parameter values. +// Although other constraints might be defined in the stack template, AWS CloudFormation +// returns only the AllowedValues property. +type ParameterConstraints struct { + _ struct{} `type:"structure"` + + // A list of values that are permitted for a parameter. + AllowedValues []*string `type:"list"` +} + +// String returns the string representation +func (s ParameterConstraints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterConstraints) GoString() string { + return s.String() +} + +// The ParameterDeclaration data type. +type ParameterDeclaration struct { + _ struct{} `type:"structure"` + + // The default value of the parameter. + DefaultValue *string `type:"string"` + + // The description that is associate with the parameter. + Description *string `type:"string"` + + // Flag that indicates whether the parameter value is shown as plain text in + // logs and in the AWS Management Console. + NoEcho *bool `type:"boolean"` + + // The criteria that AWS CloudFormation uses to validate parameter values. + ParameterConstraints *ParameterConstraints `type:"structure"` + + // The name that is associated with the parameter. + ParameterKey *string `type:"string"` + + // The type of parameter. + ParameterType *string `type:"string"` +} + +// String returns the string representation +func (s ParameterDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterDeclaration) GoString() string { + return s.String() +} + +// The input for the SetStackPolicy action. +type SetStackPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or unique stack ID that you want to associate a policy with. + StackName *string `type:"string" required:"true"` + + // Structure containing the stack policy body. For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody + // or the StackPolicyURL parameter, but not both. + StackPolicyBody *string `min:"1" type:"string"` + + // Location of a file containing the stack policy. The URL must point to a policy + // (max size: 16KB) located in an S3 bucket in the same region as the stack. + // You can specify either the StackPolicyBody or the StackPolicyURL parameter, + // but not both. + StackPolicyURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SetStackPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStackPolicyInput) GoString() string { + return s.String() +} + +type SetStackPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetStackPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStackPolicyOutput) GoString() string { + return s.String() +} + +// The input for the SignalResource action. +type SignalResourceInput struct { + _ struct{} `type:"structure"` + + // The logical ID of the resource that you want to signal. The logical ID is + // the name of the resource that given in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The stack name or unique stack ID that includes the resource that you want + // to signal. + StackName *string `min:"1" type:"string" required:"true"` + + // The status of the signal, which is either success or failure. A failure signal + // causes AWS CloudFormation to immediately fail the stack creation or update. + Status *string `type:"string" required:"true" enum:"ResourceSignalStatus"` + + // A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling + // groups, specify the instance ID that you are signaling as the unique ID. + // If you send multiple signals to a single resource (such as signaling a wait + // condition), each signal requires a different unique ID. + UniqueId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalResourceInput) GoString() string { + return s.String() +} + +type SignalResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SignalResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalResourceOutput) GoString() string { + return s.String() +} + +// The Stack data type. +type Stack struct { + _ struct{} `type:"structure"` + + // The capabilities allowed in the stack. + Capabilities []*string `type:"list"` + + // The time at which the stack was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A user-defined description associated with the stack. + Description *string `type:"string"` + + // Boolean to enable or disable rollback on stack creation failures: + // + // true: disable rollback false: enable rollback + DisableRollback *bool `type:"boolean"` + + // The time the stack was last updated. This field will only be returned if + // the stack has been updated at least once. + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // SNS topic ARNs to which stack related events are published. + NotificationARNs []*string `type:"list"` + + // A list of output structures. + Outputs []*Output `type:"list"` + + // A list of Parameter structures. + Parameters []*Parameter `type:"list"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string" required:"true"` + + // Current status of the stack. + StackStatus *string `type:"string" required:"true" enum:"StackStatus"` + + // Success/failure message associated with the stack status. + StackStatusReason *string `type:"string"` + + // A list of Tags that specify cost allocation information for the stack. + Tags []*Tag `type:"list"` + + // The amount of time within which stack creation should complete. + TimeoutInMinutes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s Stack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stack) GoString() string { + return s.String() +} + +// The StackEvent data type. +type StackEvent struct { + _ struct{} `type:"structure"` + + // The unique ID of this event. + EventId *string `type:"string" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string"` + + // The name or unique identifier associated with the physical instance of the + // resource. + PhysicalResourceId *string `type:"string"` + + // BLOB of the properties used to create the resource. + ResourceProperties *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string"` + + // The unique ID name of the instance of the stack. + StackId *string `type:"string" required:"true"` + + // The name associated with a stack. + StackName *string `type:"string" required:"true"` + + // Time the status was updated. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s StackEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackEvent) GoString() string { + return s.String() +} + +// The StackResource data type. +type StackResource struct { + _ struct{} `type:"structure"` + + // User defined description associated with the resource. + Description *string `type:"string"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string" required:"true"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string"` + + // Time the status was updated. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s StackResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResource) GoString() string { + return s.String() +} + +// Contains detailed information about the specified stack resource. +type StackResourceDetail struct { + _ struct{} `type:"structure"` + + // User defined description associated with the resource. + Description *string `type:"string"` + + // Time the status was updated. + LastUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The JSON format content of the Metadata attribute declared for the resource. + // For more information, see Metadata Attribute (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html) + // in the AWS CloudFormation User Guide. + Metadata *string `type:"string"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. ((For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string" required:"true"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s StackResourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResourceDetail) GoString() string { + return s.String() +} + +// Contains high-level information about the specified stack resource. +type StackResourceSummary struct { + _ struct{} `type:"structure"` + + // Time the status was updated. + LastUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or unique identifier that corresponds to a physical instance ID + // of the resource. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StackResourceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResourceSummary) GoString() string { + return s.String() +} + +// The StackSummary Data Type +type StackSummary struct { + _ struct{} `type:"structure"` + + // The time the stack was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The time the stack was deleted. + DeletionTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The time the stack was last updated. This field will only be returned if + // the stack has been updated at least once. + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Unique stack identifier. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string" required:"true"` + + // The current status of the stack. + StackStatus *string `type:"string" required:"true" enum:"StackStatus"` + + // Success/Failure message associated with the stack status. + StackStatusReason *string `type:"string"` + + // The template description of the template used to create the stack. + TemplateDescription *string `type:"string"` +} + +// String returns the string representation +func (s StackSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSummary) GoString() string { + return s.String() +} + +// The Tag type is used by CreateStack in the Tags parameter. It allows you +// to specify a key-value pair that can be used to store information related +// to cost allocation for an AWS CloudFormation stack. +type Tag struct { + _ struct{} `type:"structure"` + + // Required. A string used to identify this tag. You can specify a maximum of + // 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have + // the reserved prefix: aws:. + Key *string `type:"string"` + + // Required. A string containing the value for this tag. You can specify a maximum + // of 256 characters for a tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The TemplateParameter data type. +type TemplateParameter struct { + _ struct{} `type:"structure"` + + // The default value associated with the parameter. + DefaultValue *string `type:"string"` + + // User defined description associated with the parameter. + Description *string `type:"string"` + + // Flag indicating whether the parameter should be displayed as plain text in + // logs and UIs. + NoEcho *bool `type:"boolean"` + + // The name associated with the parameter. + ParameterKey *string `type:"string"` +} + +// String returns the string representation +func (s TemplateParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateParameter) GoString() string { + return s.String() +} + +// The input for UpdateStack action. +type UpdateStackInput struct { + _ struct{} `type:"structure"` + + // A list of capabilities that you must specify before AWS CloudFormation can + // create or update certain stacks. Some stack templates might include resources + // that can affect permissions in your AWS account. For those stacks, you must + // explicitly acknowledge their capabilities by specifying this parameter. Currently, + // the only valid value is CAPABILITY_IAM, which is required for the following + // resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), + // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), + // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), + // AWS::IAM::Role (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html), + // AWS::IAM::User (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html), + // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). + // If your stack template contains these resources, we recommend that you review + // any permissions associated with them. If you don't specify this parameter, + // this action returns an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that + // AWS CloudFormation associates with the stack. Specify an empty list to remove + // all notification topics. + NotificationARNs []*string `type:"list"` + + // A list of Parameter structures that specify input parameters for the stack. + // For more information, see the Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The template resource types that you have permissions to work with for this + // update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. + // + // If the list of resource types doesn't include a resource that you're updating, + // the stack update fails. By default, AWS CloudFormation grants permissions + // to all resource types. AWS Identity and Access Management (IAM) uses this + // parameter for AWS CloudFormation-specific condition keys in IAM policies. + // For more information, see Controlling Access with AWS Identity and Access + // Management (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html). + ResourceTypes []*string `type:"list"` + + // The name or unique stack ID of the stack to update. + StackName *string `type:"string" required:"true"` + + // Structure containing a new stack policy body. You can specify either the + // StackPolicyBody or the StackPolicyURL parameter, but not both. + // + // You might update the stack policy, for example, in order to protect a new + // resource that you created during a stack update. If you do not specify a + // stack policy, the current policy that is associated with the stack is unchanged. + StackPolicyBody *string `min:"1" type:"string"` + + // Structure containing the temporary overriding stack policy body. You can + // specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL + // parameter, but not both. + // + // If you want to update protected resources, specify a temporary overriding + // stack policy during this update. If you do not specify a stack policy, the + // current policy that is associated with the stack will be used. + StackPolicyDuringUpdateBody *string `min:"1" type:"string"` + + // Location of a file containing the temporary overriding stack policy. The + // URL must point to a policy (max size: 16KB) located in an S3 bucket in the + // same region as the stack. You can specify either the StackPolicyDuringUpdateBody + // or the StackPolicyDuringUpdateURL parameter, but not both. + // + // If you want to update protected resources, specify a temporary overriding + // stack policy during this update. If you do not specify a stack policy, the + // current policy that is associated with the stack will be used. + StackPolicyDuringUpdateURL *string `min:"1" type:"string"` + + // Location of a file containing the updated stack policy. The URL must point + // to a policy (max size: 16KB) located in an S3 bucket in the same region as + // the stack. You can specify either the StackPolicyBody or the StackPolicyURL + // parameter, but not both. + // + // You might update the stack policy, for example, in order to protect a new + // resource that you created during a stack update. If you do not specify a + // stack policy, the current policy that is associated with the stack is unchanged. + StackPolicyURL *string `min:"1" type:"string"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. (For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // that is located in an Amazon S3 bucket. For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateURL *string `min:"1" type:"string"` + + // Reuse the existing template that is associated with the stack that you are + // updating. + UsePreviousTemplate *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackInput) GoString() string { + return s.String() +} + +// The output for a UpdateStack action. +type UpdateStackOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackOutput) GoString() string { + return s.String() +} + +// The input for ValidateTemplate action. +type ValidateTemplateInput struct { + _ struct{} `type:"structure"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information, go to Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateTemplateInput) GoString() string { + return s.String() +} + +// The output for ValidateTemplate action. +type ValidateTemplateOutput struct { + _ struct{} `type:"structure"` + + // The capabilities found within the template. Currently, AWS CloudFormation + // supports only the CAPABILITY_IAM capability. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM value for this parameter when + // you use the CreateStack or UpdateStack actions with your template; otherwise, + // those actions return an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // The list of resources that generated the values in the Capabilities response + // element. + CapabilitiesReason *string `type:"string"` + + // The description found within the template. + Description *string `type:"string"` + + // A list of TemplateParameter structures. + Parameters []*TemplateParameter `type:"list"` +} + +// String returns the string representation +func (s ValidateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateTemplateOutput) GoString() string { + return s.String() +} + +const ( + // @enum Capability + CapabilityCapabilityIam = "CAPABILITY_IAM" +) + +const ( + // @enum OnFailure + OnFailureDoNothing = "DO_NOTHING" + // @enum OnFailure + OnFailureRollback = "ROLLBACK" + // @enum OnFailure + OnFailureDelete = "DELETE" +) + +const ( + // @enum ResourceSignalStatus + ResourceSignalStatusSuccess = "SUCCESS" + // @enum ResourceSignalStatus + ResourceSignalStatusFailure = "FAILURE" +) + +const ( + // @enum ResourceStatus + ResourceStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusCreateFailed = "CREATE_FAILED" + // @enum ResourceStatus + ResourceStatusCreateComplete = "CREATE_COMPLETE" + // @enum ResourceStatus + ResourceStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusDeleteFailed = "DELETE_FAILED" + // @enum ResourceStatus + ResourceStatusDeleteComplete = "DELETE_COMPLETE" + // @enum ResourceStatus + ResourceStatusDeleteSkipped = "DELETE_SKIPPED" + // @enum ResourceStatus + ResourceStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusUpdateFailed = "UPDATE_FAILED" + // @enum ResourceStatus + ResourceStatusUpdateComplete = "UPDATE_COMPLETE" +) + +const ( + // @enum StackStatus + StackStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum StackStatus + StackStatusCreateFailed = "CREATE_FAILED" + // @enum StackStatus + StackStatusCreateComplete = "CREATE_COMPLETE" + // @enum StackStatus + StackStatusRollbackInProgress = "ROLLBACK_IN_PROGRESS" + // @enum StackStatus + StackStatusRollbackFailed = "ROLLBACK_FAILED" + // @enum StackStatus + StackStatusRollbackComplete = "ROLLBACK_COMPLETE" + // @enum StackStatus + StackStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum StackStatus + StackStatusDeleteFailed = "DELETE_FAILED" + // @enum StackStatus + StackStatusDeleteComplete = "DELETE_COMPLETE" + // @enum StackStatus + StackStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateCompleteCleanupInProgress = "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateComplete = "UPDATE_COMPLETE" + // @enum StackStatus + StackStatusUpdateRollbackInProgress = "UPDATE_ROLLBACK_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateRollbackFailed = "UPDATE_ROLLBACK_FAILED" + // @enum StackStatus + StackStatusUpdateRollbackCompleteCleanupInProgress = "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateRollbackComplete = "UPDATE_ROLLBACK_COMPLETE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go new file mode 100644 index 000000000..fc4e19afa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudformationiface provides an interface for the AWS CloudFormation. +package cloudformationiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +// CloudFormationAPI is the interface type for cloudformation.CloudFormation. +type CloudFormationAPI interface { + CancelUpdateStackRequest(*cloudformation.CancelUpdateStackInput) (*request.Request, *cloudformation.CancelUpdateStackOutput) + + CancelUpdateStack(*cloudformation.CancelUpdateStackInput) (*cloudformation.CancelUpdateStackOutput, error) + + ContinueUpdateRollbackRequest(*cloudformation.ContinueUpdateRollbackInput) (*request.Request, *cloudformation.ContinueUpdateRollbackOutput) + + ContinueUpdateRollback(*cloudformation.ContinueUpdateRollbackInput) (*cloudformation.ContinueUpdateRollbackOutput, error) + + CreateStackRequest(*cloudformation.CreateStackInput) (*request.Request, *cloudformation.CreateStackOutput) + + CreateStack(*cloudformation.CreateStackInput) (*cloudformation.CreateStackOutput, error) + + DeleteStackRequest(*cloudformation.DeleteStackInput) (*request.Request, *cloudformation.DeleteStackOutput) + + DeleteStack(*cloudformation.DeleteStackInput) (*cloudformation.DeleteStackOutput, error) + + DescribeAccountLimitsRequest(*cloudformation.DescribeAccountLimitsInput) (*request.Request, *cloudformation.DescribeAccountLimitsOutput) + + DescribeAccountLimits(*cloudformation.DescribeAccountLimitsInput) (*cloudformation.DescribeAccountLimitsOutput, error) + + DescribeStackEventsRequest(*cloudformation.DescribeStackEventsInput) (*request.Request, *cloudformation.DescribeStackEventsOutput) + + DescribeStackEvents(*cloudformation.DescribeStackEventsInput) (*cloudformation.DescribeStackEventsOutput, error) + + DescribeStackEventsPages(*cloudformation.DescribeStackEventsInput, func(*cloudformation.DescribeStackEventsOutput, bool) bool) error + + DescribeStackResourceRequest(*cloudformation.DescribeStackResourceInput) (*request.Request, *cloudformation.DescribeStackResourceOutput) + + DescribeStackResource(*cloudformation.DescribeStackResourceInput) (*cloudformation.DescribeStackResourceOutput, error) + + DescribeStackResourcesRequest(*cloudformation.DescribeStackResourcesInput) (*request.Request, *cloudformation.DescribeStackResourcesOutput) + + DescribeStackResources(*cloudformation.DescribeStackResourcesInput) (*cloudformation.DescribeStackResourcesOutput, error) + + DescribeStacksRequest(*cloudformation.DescribeStacksInput) (*request.Request, *cloudformation.DescribeStacksOutput) + + DescribeStacks(*cloudformation.DescribeStacksInput) (*cloudformation.DescribeStacksOutput, error) + + DescribeStacksPages(*cloudformation.DescribeStacksInput, func(*cloudformation.DescribeStacksOutput, bool) bool) error + + EstimateTemplateCostRequest(*cloudformation.EstimateTemplateCostInput) (*request.Request, *cloudformation.EstimateTemplateCostOutput) + + EstimateTemplateCost(*cloudformation.EstimateTemplateCostInput) (*cloudformation.EstimateTemplateCostOutput, error) + + GetStackPolicyRequest(*cloudformation.GetStackPolicyInput) (*request.Request, *cloudformation.GetStackPolicyOutput) + + GetStackPolicy(*cloudformation.GetStackPolicyInput) (*cloudformation.GetStackPolicyOutput, error) + + GetTemplateRequest(*cloudformation.GetTemplateInput) (*request.Request, *cloudformation.GetTemplateOutput) + + GetTemplate(*cloudformation.GetTemplateInput) (*cloudformation.GetTemplateOutput, error) + + GetTemplateSummaryRequest(*cloudformation.GetTemplateSummaryInput) (*request.Request, *cloudformation.GetTemplateSummaryOutput) + + GetTemplateSummary(*cloudformation.GetTemplateSummaryInput) (*cloudformation.GetTemplateSummaryOutput, error) + + ListStackResourcesRequest(*cloudformation.ListStackResourcesInput) (*request.Request, *cloudformation.ListStackResourcesOutput) + + ListStackResources(*cloudformation.ListStackResourcesInput) (*cloudformation.ListStackResourcesOutput, error) + + ListStackResourcesPages(*cloudformation.ListStackResourcesInput, func(*cloudformation.ListStackResourcesOutput, bool) bool) error + + ListStacksRequest(*cloudformation.ListStacksInput) (*request.Request, *cloudformation.ListStacksOutput) + + ListStacks(*cloudformation.ListStacksInput) (*cloudformation.ListStacksOutput, error) + + ListStacksPages(*cloudformation.ListStacksInput, func(*cloudformation.ListStacksOutput, bool) bool) error + + SetStackPolicyRequest(*cloudformation.SetStackPolicyInput) (*request.Request, *cloudformation.SetStackPolicyOutput) + + SetStackPolicy(*cloudformation.SetStackPolicyInput) (*cloudformation.SetStackPolicyOutput, error) + + SignalResourceRequest(*cloudformation.SignalResourceInput) (*request.Request, *cloudformation.SignalResourceOutput) + + SignalResource(*cloudformation.SignalResourceInput) (*cloudformation.SignalResourceOutput, error) + + UpdateStackRequest(*cloudformation.UpdateStackInput) (*request.Request, *cloudformation.UpdateStackOutput) + + UpdateStack(*cloudformation.UpdateStackInput) (*cloudformation.UpdateStackOutput, error) + + ValidateTemplateRequest(*cloudformation.ValidateTemplateInput) (*request.Request, *cloudformation.ValidateTemplateOutput) + + ValidateTemplate(*cloudformation.ValidateTemplateInput) (*cloudformation.ValidateTemplateOutput, error) +} + +var _ CloudFormationAPI = (*cloudformation.CloudFormation)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go new file mode 100644 index 000000000..d2c86d93a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go @@ -0,0 +1,465 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudFormation_CancelUpdateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.CancelUpdateStackInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.CancelUpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ContinueUpdateRollback() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ContinueUpdateRollbackInput{ + StackName: aws.String("StackNameOrId"), // Required + } + resp, err := svc.ContinueUpdateRollback(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_CreateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.CreateStackInput{ + StackName: aws.String("StackName"), // Required + Capabilities: []*string{ + aws.String("Capability"), // Required + // More values... + }, + DisableRollback: aws.Bool(true), + NotificationARNs: []*string{ + aws.String("NotificationARN"), // Required + // More values... + }, + OnFailure: aws.String("OnFailure"), + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyURL: aws.String("StackPolicyURL"), + Tags: []*cloudformation.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + TimeoutInMinutes: aws.Int64(1), + } + resp, err := svc.CreateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DeleteStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DeleteStackInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.DeleteStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeAccountLimits() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeAccountLimitsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeAccountLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackEvents() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackEventsInput{ + NextToken: aws.String("NextToken"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStackEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackResource() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackResourceInput{ + LogicalResourceId: aws.String("LogicalResourceId"), // Required + StackName: aws.String("StackName"), // Required + } + resp, err := svc.DescribeStackResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackResources() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackResourcesInput{ + LogicalResourceId: aws.String("LogicalResourceId"), + PhysicalResourceId: aws.String("PhysicalResourceId"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStackResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStacks() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStacksInput{ + NextToken: aws.String("NextToken"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_EstimateTemplateCost() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.EstimateTemplateCostInput{ + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.EstimateTemplateCost(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetStackPolicy() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetStackPolicyInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.GetStackPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetTemplate() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetTemplateInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.GetTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetTemplateSummary() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetTemplateSummaryInput{ + StackName: aws.String("StackNameOrId"), + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.GetTemplateSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ListStackResources() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ListStackResourcesInput{ + StackName: aws.String("StackName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListStackResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ListStacks() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ListStacksInput{ + NextToken: aws.String("NextToken"), + StackStatusFilter: []*string{ + aws.String("StackStatus"), // Required + // More values... + }, + } + resp, err := svc.ListStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_SetStackPolicy() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.SetStackPolicyInput{ + StackName: aws.String("StackName"), // Required + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyURL: aws.String("StackPolicyURL"), + } + resp, err := svc.SetStackPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_SignalResource() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.SignalResourceInput{ + LogicalResourceId: aws.String("LogicalResourceId"), // Required + StackName: aws.String("StackNameOrId"), // Required + Status: aws.String("ResourceSignalStatus"), // Required + UniqueId: aws.String("ResourceSignalUniqueId"), // Required + } + resp, err := svc.SignalResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_UpdateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.UpdateStackInput{ + StackName: aws.String("StackName"), // Required + Capabilities: []*string{ + aws.String("Capability"), // Required + // More values... + }, + NotificationARNs: []*string{ + aws.String("NotificationARN"), // Required + // More values... + }, + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyDuringUpdateBody: aws.String("StackPolicyDuringUpdateBody"), + StackPolicyDuringUpdateURL: aws.String("StackPolicyDuringUpdateURL"), + StackPolicyURL: aws.String("StackPolicyURL"), + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + UsePreviousTemplate: aws.Bool(true), + } + resp, err := svc.UpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ValidateTemplate() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ValidateTemplateInput{ + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.ValidateTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go new file mode 100644 index 000000000..2bad95af1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go @@ -0,0 +1,103 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS CloudFormation enables you to create and manage AWS infrastructure deployments +// predictably and repeatedly. AWS CloudFormation helps you leverage AWS products +// such as Amazon EC2, EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, +// highly scalable, cost effective applications without worrying about creating +// and configuring the underlying AWS infrastructure. +// +// With AWS CloudFormation, you declare all of your resources and dependencies +// in a template file. The template defines a collection of resources as a single +// unit called a stack. AWS CloudFormation creates and deletes all member resources +// of the stack together and manages all dependencies between the resources +// for you. +// +// For more information about this product, go to the CloudFormation Product +// Page (http://aws.amazon.com/cloudformation/). +// +// Amazon CloudFormation makes use of other AWS products. If you need additional +// technical information about a specific AWS product, you can find the product's +// technical documentation at http://docs.aws.amazon.com/documentation/ (http://docs.aws.amazon.com/documentation/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudFormation struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudformation" + +// New creates a new instance of the CloudFormation client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudFormation client from just a session. +// svc := cloudformation.New(mySession) +// +// // Create a CloudFormation client with additional configuration +// svc := cloudformation.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFormation { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudFormation { + svc := &CloudFormation{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-05-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudFormation operation and runs any +// custom request initialization. +func (c *CloudFormation) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go new file mode 100644 index 000000000..cd90e41d7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CloudFormation) WaitUntilStackCreateComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 50, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFormation) WaitUntilStackDeleteComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_COMPLETE", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ValidationError", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFormation) WaitUntilStackUpdateComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 5, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go new file mode 100644 index 000000000..999cac844 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go @@ -0,0 +1,1409 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudtrail provides a client for AWS CloudTrail. +package cloudtrail + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *CloudTrail) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds one or more tags to a trail, up to a limit of 10. Tags must be unique +// per trail. Overwrites an existing tag's value when a new value is specified +// for an existing tag key. If you specify a key without a value, the tag will +// be created with the specified key and a value of null. You can tag a trail +// that applies to all regions only from the region in which the trail was created +// (that is, from its home region). +func (c *CloudTrail) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrail = "CreateTrail" + +// CreateTrailRequest generates a request for the CreateTrail operation. +func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.Request, output *CreateTrailOutput) { + op := &request.Operation{ + Name: opCreateTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrailOutput{} + req.Data = output + return +} + +// Creates a trail that specifies the settings for delivery of log data to an +// Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective +// of the region in which they were created. +func (c *CloudTrail) CreateTrail(input *CreateTrailInput) (*CreateTrailOutput, error) { + req, out := c.CreateTrailRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrail = "DeleteTrail" + +// DeleteTrailRequest generates a request for the DeleteTrail operation. +func (c *CloudTrail) DeleteTrailRequest(input *DeleteTrailInput) (req *request.Request, output *DeleteTrailOutput) { + op := &request.Operation{ + Name: opDeleteTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrailOutput{} + req.Data = output + return +} + +// Deletes a trail. This operation must be called from the region in which the +// trail was created. DeleteTrail cannot be called on the shadow trails (replicated +// trails in other regions) of a trail that is enabled in all regions. +func (c *CloudTrail) DeleteTrail(input *DeleteTrailInput) (*DeleteTrailOutput, error) { + req, out := c.DeleteTrailRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrails = "DescribeTrails" + +// DescribeTrailsRequest generates a request for the DescribeTrails operation. +func (c *CloudTrail) DescribeTrailsRequest(input *DescribeTrailsInput) (req *request.Request, output *DescribeTrailsOutput) { + op := &request.Operation{ + Name: opDescribeTrails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrailsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrailsOutput{} + req.Data = output + return +} + +// Retrieves settings for the trail associated with the current region for your +// account. +func (c *CloudTrail) DescribeTrails(input *DescribeTrailsInput) (*DescribeTrailsOutput, error) { + req, out := c.DescribeTrailsRequest(input) + err := req.Send() + return out, err +} + +const opGetTrailStatus = "GetTrailStatus" + +// GetTrailStatusRequest generates a request for the GetTrailStatus operation. +func (c *CloudTrail) GetTrailStatusRequest(input *GetTrailStatusInput) (req *request.Request, output *GetTrailStatusOutput) { + op := &request.Operation{ + Name: opGetTrailStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTrailStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrailStatusOutput{} + req.Data = output + return +} + +// Returns a JSON-formatted list of information about the specified trail. Fields +// include information on delivery errors, Amazon SNS and Amazon S3 errors, +// and start and stop logging times for each trail. This operation returns trail +// status from a single region. To return trail status from all regions, you +// must call the operation on each region. +func (c *CloudTrail) GetTrailStatus(input *GetTrailStatusInput) (*GetTrailStatusOutput, error) { + req, out := c.GetTrailStatusRequest(input) + err := req.Send() + return out, err +} + +const opListPublicKeys = "ListPublicKeys" + +// ListPublicKeysRequest generates a request for the ListPublicKeys operation. +func (c *CloudTrail) ListPublicKeysRequest(input *ListPublicKeysInput) (req *request.Request, output *ListPublicKeysOutput) { + op := &request.Operation{ + Name: opListPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPublicKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPublicKeysOutput{} + req.Data = output + return +} + +// Returns all public keys whose private keys were used to sign the digest files +// within the specified time range. The public key is needed to validate digest +// files that were signed with its corresponding private key. +// +// CloudTrail uses different private/public key pairs per region. Each digest +// file is signed with a private key unique to its region. Therefore, when you +// validate a digest file from a particular region, you must look in the same +// region for its corresponding public key. +func (c *CloudTrail) ListPublicKeys(input *ListPublicKeysInput) (*ListPublicKeysOutput, error) { + req, out := c.ListPublicKeysRequest(input) + err := req.Send() + return out, err +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a request for the ListTags operation. +func (c *CloudTrail) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsOutput{} + req.Data = output + return +} + +// Lists the tags for the specified trail or trails in the current region. +// +// Lists the tags for the trail in the current region. +func (c *CloudTrail) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + err := req.Send() + return out, err +} + +const opLookupEvents = "LookupEvents" + +// LookupEventsRequest generates a request for the LookupEvents operation. +func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request.Request, output *LookupEventsOutput) { + op := &request.Operation{ + Name: opLookupEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LookupEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &LookupEventsOutput{} + req.Data = output + return +} + +// Looks up API activity events captured by CloudTrail that create, update, +// or delete resources in your account. Events for a region can be looked up +// for the times in which you had CloudTrail turned on in that region during +// the last seven days. Lookup supports five different attributes: time range +// (defined by a start time and end time), user name, event name, resource type, +// and resource name. All attributes are optional. The maximum number of attributes +// that can be specified in any one lookup request are time range and one other +// attribute. The default number of results returned is 10, with a maximum of +// 50 possible. The response includes a token that you can use to get the next +// page of results. +// +// The rate of lookup requests is limited to one per second per account. If +// this limit is exceeded, a throttling error occurs. Events that occurred +// during the selected time range will not be available for lookup if CloudTrail +// logging was not enabled when the events occurred. +func (c *CloudTrail) LookupEvents(input *LookupEventsInput) (*LookupEventsOutput, error) { + req, out := c.LookupEventsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *CloudTrail) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes the specified tags from a trail. +func (c *CloudTrail) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opStartLogging = "StartLogging" + +// StartLoggingRequest generates a request for the StartLogging operation. +func (c *CloudTrail) StartLoggingRequest(input *StartLoggingInput) (req *request.Request, output *StartLoggingOutput) { + op := &request.Operation{ + Name: opStartLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &StartLoggingOutput{} + req.Data = output + return +} + +// Starts the recording of AWS API calls and log file delivery for a trail. +// For a trail that is enabled in all regions, this operation must be called +// from the region in which the trail was created. This operation cannot be +// called on the shadow trails (replicated trails in other regions) of a trail +// that is enabled in all regions. +func (c *CloudTrail) StartLogging(input *StartLoggingInput) (*StartLoggingOutput, error) { + req, out := c.StartLoggingRequest(input) + err := req.Send() + return out, err +} + +const opStopLogging = "StopLogging" + +// StopLoggingRequest generates a request for the StopLogging operation. +func (c *CloudTrail) StopLoggingRequest(input *StopLoggingInput) (req *request.Request, output *StopLoggingOutput) { + op := &request.Operation{ + Name: opStopLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &StopLoggingOutput{} + req.Data = output + return +} + +// Suspends the recording of AWS API calls and log file delivery for the specified +// trail. Under most circumstances, there is no need to use this action. You +// can update a trail without stopping it first. This action is the only way +// to stop recording. For a trail enabled in all regions, this operation must +// be called from the region in which the trail was created, or an InvalidHomeRegionException +// will occur. This operation cannot be called on the shadow trails (replicated +// trails in other regions) of a trail enabled in all regions. +func (c *CloudTrail) StopLogging(input *StopLoggingInput) (*StopLoggingOutput, error) { + req, out := c.StopLoggingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrail = "UpdateTrail" + +// UpdateTrailRequest generates a request for the UpdateTrail operation. +func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.Request, output *UpdateTrailOutput) { + op := &request.Operation{ + Name: opUpdateTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrailOutput{} + req.Data = output + return +} + +// Updates the settings that specify delivery of log files. Changes to a trail +// do not require stopping the CloudTrail service. Use this action to designate +// an existing bucket for log delivery. If the existing bucket has previously +// been a target for CloudTrail log files, an IAM policy exists for the bucket. +// UpdateTrail must be called from the region in which the trail was created; +// otherwise, an InvalidHomeRegionException is thrown. +func (c *CloudTrail) UpdateTrail(input *UpdateTrailInput) (*UpdateTrailOutput, error) { + req, out := c.UpdateTrailRequest(input) + err := req.Send() + return out, err +} + +// Specifies the tags to add to a trail. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the trail to which one or more tags will be added. The + // format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + ResourceId *string `type:"string" required:"true"` + + // Contains a list of CloudTrail tags, up to a limit of 10. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Specifies the settings for each trail. +type CreateTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies a log group name using an Amazon Resource Name (ARN), a unique + // identifier that represents the log group to which CloudTrail logs will be + // delivered. Not required unless you specify CloudWatchLogsRoleArn. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. The default is + // false. + // + // When you disable log file integrity validation, the chain of digest files + // is broken after one hour. CloudTrail will not create digest files for log + // files that were delivered during a period in which log file integrity validation + // was disabled. For example, if you enable log file integrity validation at + // noon on January 1, disable it at noon on January 2, and re-enable it at noon + // on January 10, digest files will not be created for the log files delivered + // from noon on January 2 to noon on January 10. The same applies whenever you + // stop CloudTrail logging or delete a trail. + EnableLogFileValidation *bool `type:"boolean"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail is created in the current region or in all regions. + // The default is false. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. + // The value can be a an alias name prefixed by "alias/", a fully specified + // ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + // + // Examples: + // + // alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // 12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies the name of the trail. The name must meet the following requirements: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) Start with a letter or number, and end with a letter or + // number Be between 3 and 128 characters Have no adjacent periods, underscores + // or dashes. Names like my-_namespace and my--namespace are invalid. Not be + // in IP address format (for example, 192.168.5.4) + Name *string `type:"string" required:"true"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string" required:"true"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // The maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. The maximum length is 256 characters. + SnsTopicName *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrailInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type CreateTrailOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail + // logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail exists in one region or in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Specifies the name of the trail. + Name *string `type:"string"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. + SnsTopicName *string `type:"string"` + + // Specifies the ARN of the trail that was created. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrailOutput) GoString() string { + return s.String() +} + +// The request that specifies the name of a trail to delete. +type DeleteTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail to be deleted. The + // format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrailInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type DeleteTrailOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrailOutput) GoString() string { + return s.String() +} + +// Returns information about the trail. +type DescribeTrailsInput struct { + _ struct{} `type:"structure"` + + // Specifies whether to include shadow trails in the response. A shadow trail + // is the replication in a region of a trail that was created in a different + // region. The default is true. + IncludeShadowTrails *bool `locationName:"includeShadowTrails" type:"boolean"` + + // Specifies a list of trail names, trail ARNs, or both, of the trails to describe. + // The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // If an empty list is specified, information for the trail in the current region + // is returned. + // + // If an empty list is specified and IncludeShadowTrails is false, then information + // for all trails in the current region is returned. If an empty list is specified + // and IncludeShadowTrails is null or true, then information for all trails + // in the current region and any associated shadow trails in other regions is + // returned. If one or more trail names are specified, information is returned + // only if the names match the names of trails belonging only to the current + // region. To return information about a trail in another region, you must specify + // its trail ARN. + TrailNameList []*string `locationName:"trailNameList" type:"list"` +} + +// String returns the string representation +func (s DescribeTrailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrailsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type DescribeTrailsOutput struct { + _ struct{} `type:"structure"` + + // The list of trail objects. + TrailList []*Trail `locationName:"trailList" type:"list"` +} + +// String returns the string representation +func (s DescribeTrailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrailsOutput) GoString() string { + return s.String() +} + +// Contains information about an event that was returned by a lookup request. +// The result includes a representation of a CloudTrail event. +type Event struct { + _ struct{} `type:"structure"` + + // A JSON string that contains a representation of the event returned. + CloudTrailEvent *string `type:"string"` + + // The CloudTrail ID of the event returned. + EventId *string `type:"string"` + + // The name of the event returned. + EventName *string `type:"string"` + + // The date and time of the event returned. + EventTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A list of resources referenced by the event returned. + Resources []*Resource `type:"list"` + + // A user name or role name of the requester that called the API in the event + // returned. + Username *string `type:"string"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// The name of a trail about which you want the current status. +type GetTrailStatusInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which you are requesting + // status. To get the status of a shadow trail (a replication of the trail in + // another region), you must specify its ARN. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrailStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailStatusInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type GetTrailStatusOutput struct { + _ struct{} `type:"structure"` + + // Whether the CloudTrail is currently logging AWS API calls. + IsLogging *bool `type:"boolean"` + + // Displays any CloudWatch Logs error that CloudTrail encountered when attempting + // to deliver logs to CloudWatch Logs. + LatestCloudWatchLogsDeliveryError *string `type:"string"` + + // Displays the most recent date and time when CloudTrail delivered logs to + // CloudWatch Logs. + LatestCloudWatchLogsDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + LatestDeliveryAttemptSucceeded *string `type:"string"` + + // This field is deprecated. + LatestDeliveryAttemptTime *string `type:"string"` + + // Displays any Amazon S3 error that CloudTrail encountered when attempting + // to deliver log files to the designated bucket. For more information see the + // topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // in the Amazon S3 API Reference. + // + // This error occurs only when there is a problem with the destination S3 bucket + // and will not occur for timeouts. To resolve the issue, create a new bucket + // and call UpdateTrail to specify the new bucket, or fix the existing objects + // so that CloudTrail can again write to the bucket. + LatestDeliveryError *string `type:"string"` + + // Specifies the date and time that CloudTrail last delivered log files to an + // account's Amazon S3 bucket. + LatestDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Displays any Amazon S3 error that CloudTrail encountered when attempting + // to deliver a digest file to the designated bucket. For more information see + // the topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // in the Amazon S3 API Reference. + // + // This error occurs only when there is a problem with the destination S3 bucket + // and will not occur for timeouts. To resolve the issue, create a new bucket + // and call UpdateTrail to specify the new bucket, or fix the existing objects + // so that CloudTrail can again write to the bucket. + LatestDigestDeliveryError *string `type:"string"` + + // Specifies the date and time that CloudTrail last delivered a digest file + // to an account's Amazon S3 bucket. + LatestDigestDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + LatestNotificationAttemptSucceeded *string `type:"string"` + + // This field is deprecated. + LatestNotificationAttemptTime *string `type:"string"` + + // Displays any Amazon SNS error that CloudTrail encountered when attempting + // to send a notification. For more information about Amazon SNS errors, see + // the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/welcome.html). + LatestNotificationError *string `type:"string"` + + // Specifies the date and time of the most recent Amazon SNS notification that + // CloudTrail has written a new log file to an account's Amazon S3 bucket. + LatestNotificationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Specifies the most recent date and time when CloudTrail started recording + // API calls for an AWS account. + StartLoggingTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Specifies the most recent date and time when CloudTrail stopped recording + // API calls for an AWS account. + StopLoggingTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + TimeLoggingStarted *string `type:"string"` + + // This field is deprecated. + TimeLoggingStopped *string `type:"string"` +} + +// String returns the string representation +func (s GetTrailStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailStatusOutput) GoString() string { + return s.String() +} + +// Requests the public keys for a specified time range. +type ListPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Optionally specifies, in UTC, the end of the time range to look up public + // keys for CloudTrail digest files. If not specified, the current time is used. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Optionally specifies, in UTC, the start of the time range to look up public + // keys for CloudTrail digest files. If not specified, the current time is used, + // and the current public key is returned. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ListPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublicKeysInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type ListPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Contains an array of PublicKey objects. + // + // The returned public keys may have validity time ranges that overlap. + PublicKeyList []*PublicKey `type:"list"` +} + +// String returns the string representation +func (s ListPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublicKeysOutput) GoString() string { + return s.String() +} + +// Specifies a list of trail tags to return. +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Specifies a list of trail ARNs whose tags will be listed. The list has a + // limit of 20 ARNs. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + ResourceIdList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // A list of resource tags. + ResourceTagList []*ResourceTag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// Specifies an attribute and value that filter the events returned. +type LookupAttribute struct { + _ struct{} `type:"structure"` + + // Specifies an attribute on which to filter the events returned. + AttributeKey *string `type:"string" required:"true" enum:"LookupAttributeKey"` + + // Specifies a value for the specified AttributeKey. + AttributeValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LookupAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupAttribute) GoString() string { + return s.String() +} + +// Contains a request for LookupEvents. +type LookupEventsInput struct { + _ struct{} `type:"structure"` + + // Specifies that only events that occur before or at the specified time are + // returned. If the specified end time is before the specified start time, an + // error is returned. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Contains a list of lookup attributes. Currently the list can contain only + // one item. + LookupAttributes []*LookupAttribute `type:"list"` + + // The number of events to return. Possible values are 1 through 50. The default + // is 10. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to get the next page of results after a previous API call. + // This token must be passed in with the same parameters that were specified + // in the the original call. For example, if the original call specified an + // AttributeKey of 'Username' with a value of 'root', the call with NextToken + // should include those same parameters. + NextToken *string `type:"string"` + + // Specifies that only events that occur after or at the specified time are + // returned. If the specified start time is after the specified end time, an + // error is returned. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s LookupEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupEventsInput) GoString() string { + return s.String() +} + +// Contains a response to a LookupEvents action. +type LookupEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of events returned based on the lookup attributes specified and the + // CloudTrail event. The events list is sorted by time. The most recent event + // is listed first. + Events []*Event `type:"list"` + + // The token to use to get the next page of results after a previous API call. + // If the token does not appear, there are no more results to return. The token + // must be passed in with the same parameters as the previous call. For example, + // if the original call specified an AttributeKey of 'Username' with a value + // of 'root', the call with NextToken should include those same parameters. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s LookupEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupEventsOutput) GoString() string { + return s.String() +} + +// Contains information about a returned public key. +type PublicKey struct { + _ struct{} `type:"structure"` + + // The fingerprint of the public key. + Fingerprint *string `type:"string"` + + // The ending time of validity of the public key. + ValidityEndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The starting time of validity of the public key. + ValidityStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The DER encoded public key value in PKCS#1 format. + Value []byte `type:"blob"` +} + +// String returns the string representation +func (s PublicKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicKey) GoString() string { + return s.String() +} + +// Specifies the tags to remove from a trail. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the trail from which tags should be removed. The format + // of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + ResourceId *string `type:"string" required:"true"` + + // Specifies a list of tags to be removed. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Specifies the type and name of a resource referenced by an event. +type Resource struct { + _ struct{} `type:"structure"` + + // The name of the resource referenced by the event returned. These are user-created + // names whose values will depend on the environment. For example, the resource + // name might be "auto-scaling-test-group" for an Auto Scaling Group or "i-1234567" + // for an EC2 Instance. + ResourceName *string `type:"string"` + + // The type of a resource referenced by the event returned. When the resource + // type cannot be determined, null is returned. Some examples of resource types + // are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey + // for IAM. For a list of resource types supported for event lookup, see Resource + // Types Supported for Event Lookup (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/lookup_supported_resourcetypes.html). + ResourceType *string `type:"string"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +// A resource tag. +type ResourceTag struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the resource. + ResourceId *string `type:"string"` + + // A list of tags. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ResourceTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTag) GoString() string { + return s.String() +} + +// The request to CloudTrail to start logging AWS API calls for an account. +type StartLoggingInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail + // logs AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLoggingInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type StartLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLoggingOutput) GoString() string { + return s.String() +} + +// Passes the request to CloudTrail to stop logging AWS API calls for the specified +// account. +type StopLoggingInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail + // will stop logging AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopLoggingInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type StopLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopLoggingOutput) GoString() string { + return s.String() +} + +// A custom key-value pair associated with a resource such as a CloudTrail trail. +type Tag struct { + _ struct{} `type:"structure"` + + // The key in a key-value pair. The key must be must be no longer than 128 Unicode + // characters. The key must be unique for the resource to which it applies. + Key *string `type:"string" required:"true"` + + // The value in a key-value pair of a tag. The value must be no longer than + // 256 Unicode characters. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The settings for a trail. +type Trail struct { + _ struct{} `type:"structure"` + + // Specifies an Amazon Resource Name (ARN), a unique identifier that represents + // the log group to which CloudTrail logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // The region in which the trail was created. + HomeRegion *string `type:"string"` + + // Set to True to include AWS API calls from AWS global services such as IAM. + // Otherwise, False. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail belongs only to one region or exists in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Name of the trail set by calling CreateTrail. The maximum length is 128 characters. + Name *string `type:"string"` + + // Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. + // See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html).The + // maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Name of the existing Amazon SNS topic that CloudTrail uses to notify the + // account owner when new CloudTrail log files have been delivered. The maximum + // length is 256 characters. + SnsTopicName *string `type:"string"` + + // The Amazon Resource Name of the trail. The TrailARN format is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s Trail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trail) GoString() string { + return s.String() +} + +// Specifies settings to update for the trail. +type UpdateTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies a log group name using an Amazon Resource Name (ARN), a unique + // identifier that represents the log group to which CloudTrail logs will be + // delivered. Not required unless you specify CloudWatchLogsRoleArn. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether log file validation is enabled. The default is false. + // + // When you disable log file integrity validation, the chain of digest files + // is broken after one hour. CloudTrail will not create digest files for log + // files that were delivered during a period in which log file integrity validation + // was disabled. For example, if you enable log file integrity validation at + // noon on January 1, disable it at noon on January 2, and re-enable it at noon + // on January 10, digest files will not be created for the log files delivered + // from noon on January 2 to noon on January 10. The same applies whenever you + // stop CloudTrail logging or delete a trail. + EnableLogFileValidation *bool `type:"boolean"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail applies only to the current region or to all + // regions. The default is false. If the trail exists only in the current region + // and this value is set to true, shadow trails (replications of the trail) + // will be created in the other regions. If the trail exists in all regions + // and this value is set to false, the trail will remain in the region where + // it was created, and its shadow trails in other regions will be deleted. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. + // The value can be a an alias name prefixed by "alias/", a fully specified + // ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + // + // Examples: + // + // alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // 12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies the name of the trail or trail ARN. If Name is a trail name, the + // string must meet the following requirements: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) Start with a letter or number, and end with a letter or + // number Be between 3 and 128 characters Have no adjacent periods, underscores + // or dashes. Names like my-_namespace and my--namespace are invalid. Not be + // in IP address format (for example, 192.168.5.4) If Name is a trail ARN, + // it must be in the format arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // The maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. The maximum length is 256 characters. + SnsTopicName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrailInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type UpdateTrailOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail + // logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail exists in one region or in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Specifies the name of the trail. + Name *string `type:"string"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. + SnsTopicName *string `type:"string"` + + // Specifies the ARN of the trail that was updated. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s UpdateTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrailOutput) GoString() string { + return s.String() +} + +const ( + // @enum LookupAttributeKey + LookupAttributeKeyEventId = "EventId" + // @enum LookupAttributeKey + LookupAttributeKeyEventName = "EventName" + // @enum LookupAttributeKey + LookupAttributeKeyUsername = "Username" + // @enum LookupAttributeKey + LookupAttributeKeyResourceType = "ResourceType" + // @enum LookupAttributeKey + LookupAttributeKeyResourceName = "ResourceName" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go new file mode 100644 index 000000000..3100f701a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go @@ -0,0 +1,62 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudtrailiface provides an interface for the AWS CloudTrail. +package cloudtrailiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudtrail" +) + +// CloudTrailAPI is the interface type for cloudtrail.CloudTrail. +type CloudTrailAPI interface { + AddTagsRequest(*cloudtrail.AddTagsInput) (*request.Request, *cloudtrail.AddTagsOutput) + + AddTags(*cloudtrail.AddTagsInput) (*cloudtrail.AddTagsOutput, error) + + CreateTrailRequest(*cloudtrail.CreateTrailInput) (*request.Request, *cloudtrail.CreateTrailOutput) + + CreateTrail(*cloudtrail.CreateTrailInput) (*cloudtrail.CreateTrailOutput, error) + + DeleteTrailRequest(*cloudtrail.DeleteTrailInput) (*request.Request, *cloudtrail.DeleteTrailOutput) + + DeleteTrail(*cloudtrail.DeleteTrailInput) (*cloudtrail.DeleteTrailOutput, error) + + DescribeTrailsRequest(*cloudtrail.DescribeTrailsInput) (*request.Request, *cloudtrail.DescribeTrailsOutput) + + DescribeTrails(*cloudtrail.DescribeTrailsInput) (*cloudtrail.DescribeTrailsOutput, error) + + GetTrailStatusRequest(*cloudtrail.GetTrailStatusInput) (*request.Request, *cloudtrail.GetTrailStatusOutput) + + GetTrailStatus(*cloudtrail.GetTrailStatusInput) (*cloudtrail.GetTrailStatusOutput, error) + + ListPublicKeysRequest(*cloudtrail.ListPublicKeysInput) (*request.Request, *cloudtrail.ListPublicKeysOutput) + + ListPublicKeys(*cloudtrail.ListPublicKeysInput) (*cloudtrail.ListPublicKeysOutput, error) + + ListTagsRequest(*cloudtrail.ListTagsInput) (*request.Request, *cloudtrail.ListTagsOutput) + + ListTags(*cloudtrail.ListTagsInput) (*cloudtrail.ListTagsOutput, error) + + LookupEventsRequest(*cloudtrail.LookupEventsInput) (*request.Request, *cloudtrail.LookupEventsOutput) + + LookupEvents(*cloudtrail.LookupEventsInput) (*cloudtrail.LookupEventsOutput, error) + + RemoveTagsRequest(*cloudtrail.RemoveTagsInput) (*request.Request, *cloudtrail.RemoveTagsOutput) + + RemoveTags(*cloudtrail.RemoveTagsInput) (*cloudtrail.RemoveTagsOutput, error) + + StartLoggingRequest(*cloudtrail.StartLoggingInput) (*request.Request, *cloudtrail.StartLoggingOutput) + + StartLogging(*cloudtrail.StartLoggingInput) (*cloudtrail.StartLoggingOutput, error) + + StopLoggingRequest(*cloudtrail.StopLoggingInput) (*request.Request, *cloudtrail.StopLoggingOutput) + + StopLogging(*cloudtrail.StopLoggingInput) (*cloudtrail.StopLoggingOutput, error) + + UpdateTrailRequest(*cloudtrail.UpdateTrailInput) (*request.Request, *cloudtrail.UpdateTrailOutput) + + UpdateTrail(*cloudtrail.UpdateTrailInput) (*cloudtrail.UpdateTrailOutput, error) +} + +var _ CloudTrailAPI = (*cloudtrail.CloudTrail)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go new file mode 100644 index 000000000..2a78c44e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go @@ -0,0 +1,296 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudtrail_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudtrail" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudTrail_AddTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.AddTagsInput{ + ResourceId: aws.String("String"), // Required + TagsList: []*cloudtrail.Tag{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_CreateTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.CreateTrailInput{ + Name: aws.String("String"), // Required + S3BucketName: aws.String("String"), // Required + CloudWatchLogsLogGroupArn: aws.String("String"), + CloudWatchLogsRoleArn: aws.String("String"), + EnableLogFileValidation: aws.Bool(true), + IncludeGlobalServiceEvents: aws.Bool(true), + IsMultiRegionTrail: aws.Bool(true), + KmsKeyId: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicName: aws.String("String"), + } + resp, err := svc.CreateTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_DeleteTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.DeleteTrailInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.DeleteTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_DescribeTrails() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.DescribeTrailsInput{ + IncludeShadowTrails: aws.Bool(true), + TrailNameList: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_GetTrailStatus() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.GetTrailStatusInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.GetTrailStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_ListPublicKeys() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.ListPublicKeysInput{ + EndTime: aws.Time(time.Now()), + NextToken: aws.String("String"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.ListPublicKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_ListTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.ListTagsInput{ + ResourceIdList: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.ListTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_LookupEvents() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.LookupEventsInput{ + EndTime: aws.Time(time.Now()), + LookupAttributes: []*cloudtrail.LookupAttribute{ + { // Required + AttributeKey: aws.String("LookupAttributeKey"), // Required + AttributeValue: aws.String("String"), // Required + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.LookupEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_RemoveTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.RemoveTagsInput{ + ResourceId: aws.String("String"), // Required + TagsList: []*cloudtrail.Tag{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_StartLogging() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.StartLoggingInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.StartLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_StopLogging() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.StopLoggingInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.StopLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_UpdateTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.UpdateTrailInput{ + Name: aws.String("String"), // Required + CloudWatchLogsLogGroupArn: aws.String("String"), + CloudWatchLogsRoleArn: aws.String("String"), + EnableLogFileValidation: aws.Bool(true), + IncludeGlobalServiceEvents: aws.Bool(true), + IsMultiRegionTrail: aws.Bool(true), + KmsKeyId: aws.String("String"), + S3BucketName: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicName: aws.String("String"), + } + resp, err := svc.UpdateTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go new file mode 100644 index 000000000..17a6af85e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go @@ -0,0 +1,106 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudtrail + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the CloudTrail API Reference. It provides descriptions of actions, +// data types, common parameters, and common errors for CloudTrail. +// +// CloudTrail is a web service that records AWS API calls for your AWS account +// and delivers log files to an Amazon S3 bucket. The recorded information includes +// the identity of the user, the start time of the AWS API call, the source +// IP address, the request parameters, and the response elements returned by +// the service. +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to AWSCloudTrail. For example, the SDKs +// take care of cryptographically signing requests, managing errors, and retrying +// requests automatically. For information about the AWS SDKs, including how +// to download and install them, see the Tools for Amazon Web Services page +// (http://aws.amazon.com/tools/). See the CloudTrail User Guide for information +// about the data that is included with each AWS API call listed in the log +// files. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudTrail struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudtrail" + +// New creates a new instance of the CloudTrail client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudTrail client from just a session. +// svc := cloudtrail.New(mySession) +// +// // Create a CloudTrail client with additional configuration +// svc := cloudtrail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudTrail { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudTrail { + svc := &CloudTrail{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-11-01", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudTrail operation and runs any +// custom request initialization. +func (c *CloudTrail) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go new file mode 100644 index 000000000..64164a0a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -0,0 +1,1361 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatch provides a client for Amazon CloudWatch. +package cloudwatch + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opDeleteAlarms = "DeleteAlarms" + +// DeleteAlarmsRequest generates a request for the DeleteAlarms operation. +func (c *CloudWatch) DeleteAlarmsRequest(input *DeleteAlarmsInput) (req *request.Request, output *DeleteAlarmsOutput) { + op := &request.Operation{ + Name: opDeleteAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAlarmsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAlarmsOutput{} + req.Data = output + return +} + +// Deletes all specified alarms. In the event of an error, no alarms are deleted. +func (c *CloudWatch) DeleteAlarms(input *DeleteAlarmsInput) (*DeleteAlarmsOutput, error) { + req, out := c.DeleteAlarmsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAlarmHistory = "DescribeAlarmHistory" + +// DescribeAlarmHistoryRequest generates a request for the DescribeAlarmHistory operation. +func (c *CloudWatch) DescribeAlarmHistoryRequest(input *DescribeAlarmHistoryInput) (req *request.Request, output *DescribeAlarmHistoryOutput) { + op := &request.Operation{ + Name: opDescribeAlarmHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmHistoryOutput{} + req.Data = output + return +} + +// Retrieves history for the specified alarm. Filter alarms by date range or +// item type. If an alarm name is not specified, Amazon CloudWatch returns histories +// for all of the owner's alarms. +func (c *CloudWatch) DescribeAlarmHistory(input *DescribeAlarmHistoryInput) (*DescribeAlarmHistoryOutput, error) { + req, out := c.DescribeAlarmHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) DescribeAlarmHistoryPages(input *DescribeAlarmHistoryInput, fn func(p *DescribeAlarmHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmHistoryOutput), lastPage) + }) +} + +const opDescribeAlarms = "DescribeAlarms" + +// DescribeAlarmsRequest generates a request for the DescribeAlarms operation. +func (c *CloudWatch) DescribeAlarmsRequest(input *DescribeAlarmsInput) (req *request.Request, output *DescribeAlarmsOutput) { + op := &request.Operation{ + Name: opDescribeAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsOutput{} + req.Data = output + return +} + +// Retrieves alarms with the specified names. If no name is specified, all alarms +// for the user are returned. Alarms can be retrieved by using only a prefix +// for the alarm name, the alarm state, or a prefix for any action. +func (c *CloudWatch) DescribeAlarms(input *DescribeAlarmsInput) (*DescribeAlarmsOutput, error) { + req, out := c.DescribeAlarmsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) DescribeAlarmsPages(input *DescribeAlarmsInput, fn func(p *DescribeAlarmsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmsOutput), lastPage) + }) +} + +const opDescribeAlarmsForMetric = "DescribeAlarmsForMetric" + +// DescribeAlarmsForMetricRequest generates a request for the DescribeAlarmsForMetric operation. +func (c *CloudWatch) DescribeAlarmsForMetricRequest(input *DescribeAlarmsForMetricInput) (req *request.Request, output *DescribeAlarmsForMetricOutput) { + op := &request.Operation{ + Name: opDescribeAlarmsForMetric, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAlarmsForMetricInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsForMetricOutput{} + req.Data = output + return +} + +// Retrieves all alarms for a single metric. Specify a statistic, period, or +// unit to filter the set of alarms further. +func (c *CloudWatch) DescribeAlarmsForMetric(input *DescribeAlarmsForMetricInput) (*DescribeAlarmsForMetricOutput, error) { + req, out := c.DescribeAlarmsForMetricRequest(input) + err := req.Send() + return out, err +} + +const opDisableAlarmActions = "DisableAlarmActions" + +// DisableAlarmActionsRequest generates a request for the DisableAlarmActions operation. +func (c *CloudWatch) DisableAlarmActionsRequest(input *DisableAlarmActionsInput) (req *request.Request, output *DisableAlarmActionsOutput) { + op := &request.Operation{ + Name: opDisableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableAlarmActionsOutput{} + req.Data = output + return +} + +// Disables actions for the specified alarms. When an alarm's actions are disabled +// the alarm's state may change, but none of the alarm's actions will execute. +func (c *CloudWatch) DisableAlarmActions(input *DisableAlarmActionsInput) (*DisableAlarmActionsOutput, error) { + req, out := c.DisableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opEnableAlarmActions = "EnableAlarmActions" + +// EnableAlarmActionsRequest generates a request for the EnableAlarmActions operation. +func (c *CloudWatch) EnableAlarmActionsRequest(input *EnableAlarmActionsInput) (req *request.Request, output *EnableAlarmActionsOutput) { + op := &request.Operation{ + Name: opEnableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableAlarmActionsOutput{} + req.Data = output + return +} + +// Enables actions for the specified alarms. +func (c *CloudWatch) EnableAlarmActions(input *EnableAlarmActionsInput) (*EnableAlarmActionsOutput, error) { + req, out := c.EnableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opGetMetricStatistics = "GetMetricStatistics" + +// GetMetricStatisticsRequest generates a request for the GetMetricStatistics operation. +func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) (req *request.Request, output *GetMetricStatisticsOutput) { + op := &request.Operation{ + Name: opGetMetricStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetMetricStatisticsOutput{} + req.Data = output + return +} + +// Gets statistics for the specified metric. +// +// The maximum number of data points returned from a single GetMetricStatistics +// request is 1,440, wereas the maximum number of data points that can be queried +// is 50,850. If you make a request that generates more than 1,440 data points, +// Amazon CloudWatch returns an error. In such a case, you can alter the request +// by narrowing the specified time range or increasing the specified period. +// Alternatively, you can make multiple requests across adjacent time ranges. +// +// Amazon CloudWatch aggregates data points based on the length of the period +// that you specify. For example, if you request statistics with a one-minute +// granularity, Amazon CloudWatch aggregates data points with time stamps that +// fall within the same one-minute period. In such a case, the data points queried +// can greatly outnumber the data points returned. +// +// The following examples show various statistics allowed by the data point +// query maximum of 50,850 when you call GetMetricStatistics on Amazon EC2 instances +// with detailed (one-minute) monitoring enabled: +// +// Statistics for up to 400 instances for a span of one hour Statistics for +// up to 35 instances over a span of 24 hours Statistics for up to 2 instances +// over a span of 2 weeks For information about the namespace, metric names, +// and dimensions that other Amazon Web Services products use to send metrics +// to Cloudwatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions +// Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// in the Amazon CloudWatch Developer Guide. +func (c *CloudWatch) GetMetricStatistics(input *GetMetricStatisticsInput) (*GetMetricStatisticsOutput, error) { + req, out := c.GetMetricStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListMetrics = "ListMetrics" + +// ListMetricsRequest generates a request for the ListMetrics operation. +func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.Request, output *ListMetricsOutput) { + op := &request.Operation{ + Name: opListMetrics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMetricsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMetricsOutput{} + req.Data = output + return +} + +// Returns a list of valid metrics stored for the AWS account owner. Returned +// metrics can be used with GetMetricStatistics to obtain statistical data for +// a given metric. +func (c *CloudWatch) ListMetrics(input *ListMetricsInput) (*ListMetricsOutput, error) { + req, out := c.ListMetricsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) ListMetricsPages(input *ListMetricsInput, fn func(p *ListMetricsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMetricsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMetricsOutput), lastPage) + }) +} + +const opPutMetricAlarm = "PutMetricAlarm" + +// PutMetricAlarmRequest generates a request for the PutMetricAlarm operation. +func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *request.Request, output *PutMetricAlarmOutput) { + op := &request.Operation{ + Name: opPutMetricAlarm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricAlarmInput{} + } + + req = c.newRequest(op, input, output) + output = &PutMetricAlarmOutput{} + req.Data = output + return +} + +// Creates or updates an alarm and associates it with the specified Amazon CloudWatch +// metric. Optionally, this operation can associate one or more Amazon Simple +// Notification Service resources with the alarm. +// +// When this operation creates an alarm, the alarm state is immediately set +// to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. +// Any actions associated with the StateValue is then executed. +func (c *CloudWatch) PutMetricAlarm(input *PutMetricAlarmInput) (*PutMetricAlarmOutput, error) { + req, out := c.PutMetricAlarmRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricData = "PutMetricData" + +// PutMetricDataRequest generates a request for the PutMetricData operation. +func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *request.Request, output *PutMetricDataOutput) { + op := &request.Operation{ + Name: opPutMetricData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricDataInput{} + } + + req = c.newRequest(op, input, output) + output = &PutMetricDataOutput{} + req.Data = output + return +} + +// Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch associates +// the data points with the specified metric. If the specified metric does not +// exist, Amazon CloudWatch creates the metric. It can take up to fifteen minutes +// for a new metric to appear in calls to the ListMetrics action. +// +// The size of a PutMetricData request is limited to 8 KB for HTTP GET requests +// and 40 KB for HTTP POST requests. +// +// Although the Value parameter accepts numbers of type Double, Amazon CloudWatch +// truncates values with very large exponents. Values with base-10 exponents +// greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 +// exponents less than -130 (1 x 10^-130) are also truncated. Data that is +// timestamped 24 hours or more in the past may take in excess of 48 hours to +// become available from submission time using GetMetricStatistics. +func (c *CloudWatch) PutMetricData(input *PutMetricDataInput) (*PutMetricDataOutput, error) { + req, out := c.PutMetricDataRequest(input) + err := req.Send() + return out, err +} + +const opSetAlarmState = "SetAlarmState" + +// SetAlarmStateRequest generates a request for the SetAlarmState operation. +func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *request.Request, output *SetAlarmStateOutput) { + op := &request.Operation{ + Name: opSetAlarmState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetAlarmStateInput{} + } + + req = c.newRequest(op, input, output) + output = &SetAlarmStateOutput{} + req.Data = output + return +} + +// Temporarily sets the state of an alarm. When the updated StateValue differs +// from the previous value, the action configured for the appropriate state +// is invoked. This is not a permanent change. The next periodic alarm check +// (in about a minute) will set the alarm to its actual state. +func (c *CloudWatch) SetAlarmState(input *SetAlarmStateInput) (*SetAlarmStateOutput, error) { + req, out := c.SetAlarmStateRequest(input) + err := req.Send() + return out, err +} + +// The AlarmHistoryItem data type contains descriptive information about the +// history of a specific alarm. If you call DescribeAlarmHistory, Amazon CloudWatch +// returns this data type as part of the DescribeAlarmHistoryResult data type. +type AlarmHistoryItem struct { + _ struct{} `type:"structure"` + + // The descriptive name for the alarm. + AlarmName *string `min:"1" type:"string"` + + // Machine-readable data about the alarm in JSON format. + HistoryData *string `min:"1" type:"string"` + + // The type of alarm history item. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // A human-readable summary of the alarm history. + HistorySummary *string `min:"1" type:"string"` + + // The time stamp for the alarm history item. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s AlarmHistoryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlarmHistoryItem) GoString() string { + return s.String() +} + +// The Datapoint data type encapsulates the statistical data that Amazon CloudWatch +// computes from metric data. +type Datapoint struct { + _ struct{} `type:"structure"` + + // The average of metric values that correspond to the datapoint. + Average *float64 `type:"double"` + + // The maximum of the metric value used for the datapoint. + Maximum *float64 `type:"double"` + + // The minimum metric value used for the datapoint. + Minimum *float64 `type:"double"` + + // The number of metric values that contributed to the aggregate value of this + // datapoint. + SampleCount *float64 `type:"double"` + + // The sum of metric values used for the datapoint. + Sum *float64 `type:"double"` + + // The time stamp used for the datapoint. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The standard unit used for the datapoint. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s Datapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Datapoint) GoString() string { + return s.String() +} + +type DeleteAlarmsInput struct { + _ struct{} `type:"structure"` + + // A list of alarms to be deleted. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsInput) GoString() string { + return s.String() +} + +type DeleteAlarmsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsOutput) GoString() string { + return s.String() +} + +type DescribeAlarmHistoryInput struct { + _ struct{} `type:"structure"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The ending date to retrieve alarm history. + EndDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The type of alarm histories to retrieve. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // The maximum number of alarm history records to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The starting date to retrieve alarm history. + StartDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarmHistory action. +type DescribeAlarmHistoryOutput struct { + _ struct{} `type:"structure"` + + // A list of alarm histories in JSON format. + AlarmHistoryItems []*AlarmHistoryItem `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsForMetricInput struct { + _ struct{} `type:"structure"` + + // The list of dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // The statistic for the metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarmsForMetric action. +type DescribeAlarmsForMetricOutput struct { + _ struct{} `type:"structure"` + + // A list of information for each alarm with the specified metric. + MetricAlarms []*MetricAlarm `type:"list"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsInput struct { + _ struct{} `type:"structure"` + + // The action name prefix. + ActionPrefix *string `min:"1" type:"string"` + + // The alarm name prefix. AlarmNames cannot be specified if this parameter is + // specified. + AlarmNamePrefix *string `min:"1" type:"string"` + + // A list of alarm names to retrieve information for. + AlarmNames []*string `type:"list"` + + // The maximum number of alarm descriptions to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The state value to be used in matching alarms. + StateValue *string `type:"string" enum:"StateValue"` +} + +// String returns the string representation +func (s DescribeAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarms action. +type DescribeAlarmsOutput struct { + _ struct{} `type:"structure"` + + // A list of information for the specified alarms. + MetricAlarms []*MetricAlarm `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsOutput) GoString() string { + return s.String() +} + +// The Dimension data type further expands on the identity of a metric using +// a Name, Value pair. +// +// For examples that use one or more dimensions, see PutMetricData. +type Dimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. + Name *string `min:"1" type:"string" required:"true"` + + // The value representing the dimension measurement + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dimension) GoString() string { + return s.String() +} + +// The DimensionFilter data type is used to filter ListMetrics results. +type DimensionFilter struct { + _ struct{} `type:"structure"` + + // The dimension name to be matched. + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension to be matched. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DimensionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DimensionFilter) GoString() string { + return s.String() +} + +type DisableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms to disable actions for. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DisableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsInput) GoString() string { + return s.String() +} + +type DisableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsOutput) GoString() string { + return s.String() +} + +type EnableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms to enable actions for. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s EnableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsInput) GoString() string { + return s.String() +} + +type EnableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsOutput) GoString() string { + return s.String() +} + +type GetMetricStatisticsInput struct { + _ struct{} `type:"structure"` + + // A list of dimensions describing qualities of the metric. + Dimensions []*Dimension `type:"list"` + + // The time stamp to use for determining the last datapoint to return. The value + // specified is exclusive; results will include datapoints up to the time stamp + // specified. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the metric, with or without spaces. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric, with or without spaces. + Namespace *string `min:"1" type:"string" required:"true"` + + // The granularity, in seconds, of the returned datapoints. Period must be at + // least 60 seconds and must be a multiple of 60. The default value is 60. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The time stamp to use for determining the first datapoint to return. The + // value specified is inclusive; results include datapoints with the time stamp + // specified. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The metric statistics to return. For information about specific statistics + // returned by GetMetricStatistics, go to Statistics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) + // in the Amazon CloudWatch Developer Guide. + // + // Valid Values: Average | Sum | SampleCount | Maximum | Minimum + Statistics []*string `min:"1" type:"list" required:"true"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s GetMetricStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsInput) GoString() string { + return s.String() +} + +// The output for the GetMetricStatistics action. +type GetMetricStatisticsOutput struct { + _ struct{} `type:"structure"` + + // The datapoints for the specified metric. + Datapoints []*Datapoint `type:"list"` + + // A label describing the specified metric. + Label *string `type:"string"` +} + +// String returns the string representation +func (s GetMetricStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsOutput) GoString() string { + return s.String() +} + +type ListMetricsInput struct { + _ struct{} `type:"structure"` + + // A list of dimensions to filter against. + Dimensions []*DimensionFilter `type:"list"` + + // The name of the metric to filter against. + MetricName *string `min:"1" type:"string"` + + // The namespace to filter against. + Namespace *string `min:"1" type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsInput) GoString() string { + return s.String() +} + +// The output for the ListMetrics action. +type ListMetricsOutput struct { + _ struct{} `type:"structure"` + + // A list of metrics used to generate statistics for an AWS account. + Metrics []*Metric `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsOutput) GoString() string { + return s.String() +} + +// The Metric data type contains information about a specific metric. If you +// call ListMetrics, Amazon CloudWatch returns information contained by this +// data type. +// +// The example in the Examples section publishes two metrics named buffers +// and latency. Both metrics are in the examples namespace. Both metrics have +// two dimensions, InstanceID and InstanceType. +type Metric struct { + _ struct{} `type:"structure"` + + // A list of dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Metric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metric) GoString() string { + return s.String() +} + +// The MetricAlarm data type represents an alarm. You can use PutMetricAlarm +// to create or update an alarm. +type MetricAlarm struct { + _ struct{} `type:"structure"` + + // Indicates whether actions should be executed during any changes to the alarm's + // state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only actions supported are publishing to an Amazon + // SNS topic and triggering an Auto Scaling policy. + AlarmActions []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmArn *string `min:"1" type:"string"` + + // The time stamp of the last update to the alarm configuration. Amazon CloudWatch + // uses Coordinated Universal Time (UTC) when returning time stamps, which do + // not accommodate seasonal adjustments such as daylight savings time. For more + // information, see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + AlarmConfigurationUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // The list of dimensions associated with the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only actions supported are publishing to an Amazon + // SNS topic or triggering an Auto Scaling policy. + // + // The current WSDL lists this attribute as UnknownActions. + InsufficientDataActions []*string `type:"list"` + + // The name of the alarm's metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of alarm's associated metric. + Namespace *string `min:"1" type:"string"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Number + // (ARN). Currently the only actions supported are publishing to an Amazon SNS + // topic and triggering an Auto Scaling policy. + OKActions []*string `type:"list"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // A human-readable explanation for the alarm's state. + StateReason *string `type:"string"` + + // An explanation for the alarm's state in machine-readable JSON format + StateReasonData *string `type:"string"` + + // The time stamp of the last update to the alarm's state. Amazon CloudWatch + // uses Coordinated Universal Time (UTC) when returning time stamps, which do + // not accommodate seasonal adjustments such as daylight savings time. For more + // information, see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + StateUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state value for the alarm. + StateValue *string `type:"string" enum:"StateValue"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double"` + + // The unit of the alarm's associated metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s MetricAlarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricAlarm) GoString() string { + return s.String() +} + +// The MetricDatum data type encapsulates the information sent with PutMetricData +// to either create a new metric or add new values to be aggregated into an +// existing metric. +type MetricDatum struct { + _ struct{} `type:"structure"` + + // A list of dimensions associated with the metric. Note, when using the Dimensions + // value in a query, you need to append .member.N to it (e.g., Dimensions.member.N). + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // A set of statistical values describing the metric. + StatisticValues *StatisticSet `type:"structure"` + + // The time stamp used for the metric. If not specified, the default value is + // set to the time the metric data was received. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unit of the metric. + Unit *string `type:"string" enum:"StandardUnit"` + + // The value for the metric. + // + // Although the Value parameter accepts numbers of type Double, Amazon CloudWatch + // truncates values with very large exponents. Values with base-10 exponents + // greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 + // exponents less than -130 (1 x 10^-130) are also truncated. + Value *float64 `type:"double"` +} + +// String returns the string representation +func (s MetricDatum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDatum) GoString() string { + return s.String() +} + +type PutMetricAlarmInput struct { + _ struct{} `type:"structure"` + + // Indicates whether or not actions should be executed during any changes to + // the alarm's state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only action supported is publishing to an Amazon + // SNS topic or an Amazon Auto Scaling policy. + AlarmActions []*string `type:"list"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account + AlarmName *string `min:"1" type:"string" required:"true"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // The dimensions for the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only action supported is publishing to an Amazon + // SNS topic or an Amazon Auto Scaling policy. + InsufficientDataActions []*string `type:"list"` + + // The name for the alarm's associated metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace for the alarm's associated metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Number + // (ARN). Currently the only action supported is publishing to an Amazon SNS + // topic or an Amazon Auto Scaling policy. + OKActions []*string `type:"list"` + + // The period in seconds over which the specified statistic is applied. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" required:"true" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double" required:"true"` + + // The unit for the alarm's associated metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s PutMetricAlarmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmInput) GoString() string { + return s.String() +} + +type PutMetricAlarmOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricAlarmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmOutput) GoString() string { + return s.String() +} + +type PutMetricDataInput struct { + _ struct{} `type:"structure"` + + // A list of data describing the metric. + MetricData []*MetricDatum `type:"list" required:"true"` + + // The namespace for the metric data. + Namespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutMetricDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataInput) GoString() string { + return s.String() +} + +type PutMetricDataOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataOutput) GoString() string { + return s.String() +} + +type SetAlarmStateInput struct { + _ struct{} `type:"structure"` + + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account. The maximum length is 255 characters. + AlarmName *string `min:"1" type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in human-readable + // text format) + StateReason *string `type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in machine-readable + // JSON format) + StateReasonData *string `type:"string"` + + // The value of the state. + StateValue *string `type:"string" required:"true" enum:"StateValue"` +} + +// String returns the string representation +func (s SetAlarmStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateInput) GoString() string { + return s.String() +} + +type SetAlarmStateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetAlarmStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateOutput) GoString() string { + return s.String() +} + +// The StatisticSet data type describes the StatisticValues component of MetricDatum, +// and represents a set of statistics that describes a specific metric. +type StatisticSet struct { + _ struct{} `type:"structure"` + + // The maximum value of the sample set. + Maximum *float64 `type:"double" required:"true"` + + // The minimum value of the sample set. + Minimum *float64 `type:"double" required:"true"` + + // The number of samples used for the statistic set. + SampleCount *float64 `type:"double" required:"true"` + + // The sum of values for the sample set. + Sum *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s StatisticSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatisticSet) GoString() string { + return s.String() +} + +const ( + // @enum ComparisonOperator + ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + // @enum ComparisonOperator + ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanThreshold = "LessThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +const ( + // @enum HistoryItemType + HistoryItemTypeConfigurationUpdate = "ConfigurationUpdate" + // @enum HistoryItemType + HistoryItemTypeStateUpdate = "StateUpdate" + // @enum HistoryItemType + HistoryItemTypeAction = "Action" +) + +const ( + // @enum StandardUnit + StandardUnitSeconds = "Seconds" + // @enum StandardUnit + StandardUnitMicroseconds = "Microseconds" + // @enum StandardUnit + StandardUnitMilliseconds = "Milliseconds" + // @enum StandardUnit + StandardUnitBytes = "Bytes" + // @enum StandardUnit + StandardUnitKilobytes = "Kilobytes" + // @enum StandardUnit + StandardUnitMegabytes = "Megabytes" + // @enum StandardUnit + StandardUnitGigabytes = "Gigabytes" + // @enum StandardUnit + StandardUnitTerabytes = "Terabytes" + // @enum StandardUnit + StandardUnitBits = "Bits" + // @enum StandardUnit + StandardUnitKilobits = "Kilobits" + // @enum StandardUnit + StandardUnitMegabits = "Megabits" + // @enum StandardUnit + StandardUnitGigabits = "Gigabits" + // @enum StandardUnit + StandardUnitTerabits = "Terabits" + // @enum StandardUnit + StandardUnitPercent = "Percent" + // @enum StandardUnit + StandardUnitCount = "Count" + // @enum StandardUnit + StandardUnitBytesSecond = "Bytes/Second" + // @enum StandardUnit + StandardUnitKilobytesSecond = "Kilobytes/Second" + // @enum StandardUnit + StandardUnitMegabytesSecond = "Megabytes/Second" + // @enum StandardUnit + StandardUnitGigabytesSecond = "Gigabytes/Second" + // @enum StandardUnit + StandardUnitTerabytesSecond = "Terabytes/Second" + // @enum StandardUnit + StandardUnitBitsSecond = "Bits/Second" + // @enum StandardUnit + StandardUnitKilobitsSecond = "Kilobits/Second" + // @enum StandardUnit + StandardUnitMegabitsSecond = "Megabits/Second" + // @enum StandardUnit + StandardUnitGigabitsSecond = "Gigabits/Second" + // @enum StandardUnit + StandardUnitTerabitsSecond = "Terabits/Second" + // @enum StandardUnit + StandardUnitCountSecond = "Count/Second" + // @enum StandardUnit + StandardUnitNone = "None" +) + +const ( + // @enum StateValue + StateValueOk = "OK" + // @enum StateValue + StateValueAlarm = "ALARM" + // @enum StateValue + StateValueInsufficientData = "INSUFFICIENT_DATA" +) + +const ( + // @enum Statistic + StatisticSampleCount = "SampleCount" + // @enum Statistic + StatisticAverage = "Average" + // @enum Statistic + StatisticSum = "Sum" + // @enum Statistic + StatisticMinimum = "Minimum" + // @enum Statistic + StatisticMaximum = "Maximum" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go new file mode 100644 index 000000000..140dc9e98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go @@ -0,0 +1,64 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchiface provides an interface for the Amazon CloudWatch. +package cloudwatchiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +// CloudWatchAPI is the interface type for cloudwatch.CloudWatch. +type CloudWatchAPI interface { + DeleteAlarmsRequest(*cloudwatch.DeleteAlarmsInput) (*request.Request, *cloudwatch.DeleteAlarmsOutput) + + DeleteAlarms(*cloudwatch.DeleteAlarmsInput) (*cloudwatch.DeleteAlarmsOutput, error) + + DescribeAlarmHistoryRequest(*cloudwatch.DescribeAlarmHistoryInput) (*request.Request, *cloudwatch.DescribeAlarmHistoryOutput) + + DescribeAlarmHistory(*cloudwatch.DescribeAlarmHistoryInput) (*cloudwatch.DescribeAlarmHistoryOutput, error) + + DescribeAlarmHistoryPages(*cloudwatch.DescribeAlarmHistoryInput, func(*cloudwatch.DescribeAlarmHistoryOutput, bool) bool) error + + DescribeAlarmsRequest(*cloudwatch.DescribeAlarmsInput) (*request.Request, *cloudwatch.DescribeAlarmsOutput) + + DescribeAlarms(*cloudwatch.DescribeAlarmsInput) (*cloudwatch.DescribeAlarmsOutput, error) + + DescribeAlarmsPages(*cloudwatch.DescribeAlarmsInput, func(*cloudwatch.DescribeAlarmsOutput, bool) bool) error + + DescribeAlarmsForMetricRequest(*cloudwatch.DescribeAlarmsForMetricInput) (*request.Request, *cloudwatch.DescribeAlarmsForMetricOutput) + + DescribeAlarmsForMetric(*cloudwatch.DescribeAlarmsForMetricInput) (*cloudwatch.DescribeAlarmsForMetricOutput, error) + + DisableAlarmActionsRequest(*cloudwatch.DisableAlarmActionsInput) (*request.Request, *cloudwatch.DisableAlarmActionsOutput) + + DisableAlarmActions(*cloudwatch.DisableAlarmActionsInput) (*cloudwatch.DisableAlarmActionsOutput, error) + + EnableAlarmActionsRequest(*cloudwatch.EnableAlarmActionsInput) (*request.Request, *cloudwatch.EnableAlarmActionsOutput) + + EnableAlarmActions(*cloudwatch.EnableAlarmActionsInput) (*cloudwatch.EnableAlarmActionsOutput, error) + + GetMetricStatisticsRequest(*cloudwatch.GetMetricStatisticsInput) (*request.Request, *cloudwatch.GetMetricStatisticsOutput) + + GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + + ListMetricsRequest(*cloudwatch.ListMetricsInput) (*request.Request, *cloudwatch.ListMetricsOutput) + + ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) + + ListMetricsPages(*cloudwatch.ListMetricsInput, func(*cloudwatch.ListMetricsOutput, bool) bool) error + + PutMetricAlarmRequest(*cloudwatch.PutMetricAlarmInput) (*request.Request, *cloudwatch.PutMetricAlarmOutput) + + PutMetricAlarm(*cloudwatch.PutMetricAlarmInput) (*cloudwatch.PutMetricAlarmOutput, error) + + PutMetricDataRequest(*cloudwatch.PutMetricDataInput) (*request.Request, *cloudwatch.PutMetricDataOutput) + + PutMetricData(*cloudwatch.PutMetricDataInput) (*cloudwatch.PutMetricDataOutput, error) + + SetAlarmStateRequest(*cloudwatch.SetAlarmStateInput) (*request.Request, *cloudwatch.SetAlarmStateOutput) + + SetAlarmState(*cloudwatch.SetAlarmStateInput) (*cloudwatch.SetAlarmStateOutput, error) +} + +var _ CloudWatchAPI = (*cloudwatch.CloudWatch)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go new file mode 100644 index 000000000..07b010852 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go @@ -0,0 +1,337 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatch_DeleteAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DeleteAlarmsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DeleteAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmHistory() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmHistoryInput{ + AlarmName: aws.String("AlarmName"), + EndDate: aws.Time(time.Now()), + HistoryItemType: aws.String("HistoryItemType"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartDate: aws.Time(time.Now()), + } + resp, err := svc.DescribeAlarmHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsInput{ + ActionPrefix: aws.String("ActionPrefix"), + AlarmNamePrefix: aws.String("AlarmNamePrefix"), + AlarmNames: []*string{ + aws.String("AlarmName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StateValue: aws.String("StateValue"), + } + resp, err := svc.DescribeAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmsForMetric() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsForMetricInput{ + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Period: aws.Int64(1), + Statistic: aws.String("Statistic"), + Unit: aws.String("StandardUnit"), + } + resp, err := svc.DescribeAlarmsForMetric(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DisableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DisableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DisableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_EnableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.EnableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.EnableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_GetMetricStatistics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.GetMetricStatisticsInput{ + EndTime: aws.Time(time.Now()), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + StartTime: aws.Time(time.Now()), // Required + Statistics: []*string{ // Required + aws.String("Statistic"), // Required + // More values... + }, + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.GetMetricStatistics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_ListMetrics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.ListMetricsInput{ + Dimensions: []*cloudwatch.DimensionFilter{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), + }, + // More values... + }, + MetricName: aws.String("MetricName"), + Namespace: aws.String("Namespace"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListMetrics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricAlarm() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricAlarmInput{ + AlarmName: aws.String("AlarmName"), // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + EvaluationPeriods: aws.Int64(1), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + Statistic: aws.String("Statistic"), // Required + Threshold: aws.Float64(1.0), // Required + ActionsEnabled: aws.Bool(true), + AlarmActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + AlarmDescription: aws.String("AlarmDescription"), + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + InsufficientDataActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + OKActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.PutMetricAlarm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricData() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricDataInput{ + MetricData: []*cloudwatch.MetricDatum{ // Required + { // Required + MetricName: aws.String("MetricName"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + StatisticValues: &cloudwatch.StatisticSet{ + Maximum: aws.Float64(1.0), // Required + Minimum: aws.Float64(1.0), // Required + SampleCount: aws.Float64(1.0), // Required + Sum: aws.Float64(1.0), // Required + }, + Timestamp: aws.Time(time.Now()), + Unit: aws.String("StandardUnit"), + Value: aws.Float64(1.0), + }, + // More values... + }, + Namespace: aws.String("Namespace"), // Required + } + resp, err := svc.PutMetricData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_SetAlarmState() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String("AlarmName"), // Required + StateReason: aws.String("StateReason"), // Required + StateValue: aws.String("StateValue"), // Required + StateReasonData: aws.String("StateReasonData"), + } + resp, err := svc.SetAlarmState(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go new file mode 100644 index 000000000..e6f850684 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -0,0 +1,125 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the Amazon CloudWatch API Reference. This guide provides detailed +// information about Amazon CloudWatch actions, data types, parameters, and +// errors. For detailed information about Amazon CloudWatch features and their +// associated API calls, go to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). +// +// Amazon CloudWatch is a web service that enables you to publish, monitor, +// and manage various metrics, as well as configure alarm actions based on data +// from metrics. For more information about this product go to http://aws.amazon.com/cloudwatch +// (http://aws.amazon.com/cloudwatch). +// +// For information about the namespace, metric names, and dimensions that +// other Amazon Web Services products use to send metrics to Cloudwatch, go +// to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// in the Amazon CloudWatch Developer Guide. +// +// Use the following links to get started using the Amazon CloudWatch API Reference: +// +// Actions (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Operations.html): +// An alphabetical list of all Amazon CloudWatch actions. Data Types (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Types.html): +// An alphabetical list of all Amazon CloudWatch data types. Common Parameters +// (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonParameters.html): +// Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonErrors.html): +// Client and server errors that all actions can return. Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized +// regions and endpoints for all AWS products. WSDL Location (http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl): +// http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl In addition +// to using the Amazon CloudWatch API, you can also use the following SDKs and +// third-party libraries to access Amazon CloudWatch programmatically. +// +// AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/) +// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/) +// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/) +// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/) +// Developers in the AWS developer community also provide their own libraries, +// which you can find at the following AWS developer centers: +// +// AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer +// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/) +// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET +// Developer Center (http://aws.amazon.com/net/) +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatch struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "monitoring" + +// New creates a new instance of the CloudWatch client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatch client from just a session. +// svc := cloudwatch.New(mySession) +// +// // Create a CloudWatch client with additional configuration +// svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatch { + svc := &CloudWatch{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-08-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatch operation and runs any +// custom request initialization. +func (c *CloudWatch) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go new file mode 100644 index 000000000..8d3b64937 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -0,0 +1,2471 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchlogs provides a client for Amazon CloudWatch Logs. +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a request for the CancelExportTask operation. +func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// Cancels an export task if it is in PENDING or RUNNING state. +func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateExportTask = "CreateExportTask" + +// CreateExportTaskRequest generates a request for the CreateExportTask operation. +func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { + op := &request.Operation{ + Name: opCreateExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateExportTaskOutput{} + req.Data = output + return +} + +// Creates an ExportTask which allows you to efficiently export data from a +// Log Group to your Amazon S3 bucket. +// +// This is an asynchronous call. If all the required information is provided, +// this API will initiate an export task and respond with the task Id. Once +// started, DescribeExportTasks can be used to get the status of an export task. +// +// You can export logs from multiple log groups or multiple time ranges to +// the same Amazon S3 bucket. To separate out log data for each export task, +// you can specify a prefix that will be used as the Amazon S3 key prefix for +// all exported objects. +func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogGroup = "CreateLogGroup" + +// CreateLogGroupRequest generates a request for the CreateLogGroup operation. +func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { + op := &request.Operation{ + Name: opCreateLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLogGroupOutput{} + req.Data = output + return +} + +// Creates a new log group with the specified name. The name of the log group +// must be unique within a region for an AWS account. You can create up to 500 +// log groups per account. +// +// You must use the following guidelines when naming a log group: Log group +// names can be between 1 and 512 characters long. Allowed characters are a-z, +// A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). +func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogStream = "CreateLogStream" + +// CreateLogStreamRequest generates a request for the CreateLogStream operation. +func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { + op := &request.Operation{ + Name: opCreateLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLogStreamOutput{} + req.Data = output + return +} + +// Creates a new log stream in the specified log group. The name of the log +// stream must be unique within the log group. There is no limit on the number +// of log streams that can exist in a log group. +// +// You must use the following guidelines when naming a log stream: Log stream +// names can be between 1 and 512 characters long. The ':' colon character is +// not allowed. +func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDestination = "DeleteDestination" + +// DeleteDestinationRequest generates a request for the DeleteDestination operation. +func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { + op := &request.Operation{ + Name: opDeleteDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDestinationOutput{} + req.Data = output + return +} + +// Deletes the destination with the specified name and eventually disables all +// the subscription filters that publish to it. This will not delete the physical +// resource encapsulated by the destination. +func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogGroup = "DeleteLogGroup" + +// DeleteLogGroupRequest generates a request for the DeleteLogGroup operation. +func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { + op := &request.Operation{ + Name: opDeleteLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLogGroupOutput{} + req.Data = output + return +} + +// Deletes the log group with the specified name and permanently deletes all +// the archived log events associated with it. +func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogStream = "DeleteLogStream" + +// DeleteLogStreamRequest generates a request for the DeleteLogStream operation. +func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { + op := &request.Operation{ + Name: opDeleteLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLogStreamOutput{} + req.Data = output + return +} + +// Deletes a log stream and permanently deletes all the archived log events +// associated with it. +func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMetricFilter = "DeleteMetricFilter" + +// DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation. +func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { + op := &request.Operation{ + Name: opDeleteMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMetricFilterOutput{} + req.Data = output + return +} + +// Deletes a metric filter associated with the specified log group. +func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRetentionPolicy = "DeleteRetentionPolicy" + +// DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation. +func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRetentionPolicyOutput{} + req.Data = output + return +} + +// Deletes the retention policy of the specified log group. Log events would +// not expire if they belong to log groups without a retention policy. +func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" + +// DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation. +func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opDeleteSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSubscriptionFilterOutput{} + req.Data = output + return +} + +// Deletes a subscription filter associated with the specified log group. +func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDestinations = "DescribeDestinations" + +// DescribeDestinationsRequest generates a request for the DescribeDestinations operation. +func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { + op := &request.Operation{ + Name: opDescribeDestinations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDestinationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDestinationsOutput{} + req.Data = output + return +} + +// Returns all the destinations that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by destination +// name. +// +// By default, this operation returns up to 50 destinations. If there are +// more destinations to list, the response would contain a nextToken value in +// the response body. You can also limit the number of destinations returned +// in the response by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDestinationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDestinationsOutput), lastPage) + }) +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. +func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// Returns all the export tasks that are associated with the AWS account making +// the request. The export tasks can be filtered based on TaskId or TaskStatus. +// +// By default, this operation returns up to 50 export tasks that satisfy the +// specified filters. If there are more export tasks to list, the response would +// contain a nextToken value in the response body. You can also limit the number +// of export tasks returned in the response by specifying the limit parameter +// in the request. +func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLogGroups = "DescribeLogGroups" + +// DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation. +func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLogGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogGroupsOutput{} + req.Data = output + return +} + +// Returns all the log groups that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by log group +// name. +// +// By default, this operation returns up to 50 log groups. If there are more +// log groups to list, the response would contain a nextToken value in the response +// body. You can also limit the number of log groups returned in the response +// by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogGroupsOutput), lastPage) + }) +} + +const opDescribeLogStreams = "DescribeLogStreams" + +// DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation. +func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { + op := &request.Operation{ + Name: opDescribeLogStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogStreamsOutput{} + req.Data = output + return +} + +// Returns all the log streams that are associated with the specified log group. +// The list returned in the response is ASCII-sorted by log stream name. +// +// By default, this operation returns up to 50 log streams. If there are more +// log streams to list, the response would contain a nextToken value in the +// response body. You can also limit the number of log streams returned in the +// response by specifying the limit parameter in the request. This operation +// has a limit of five transactions per second, after which transactions are +// throttled. +func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogStreamsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogStreamsOutput), lastPage) + }) +} + +const opDescribeMetricFilters = "DescribeMetricFilters" + +// DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation. +func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { + op := &request.Operation{ + Name: opDescribeMetricFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMetricFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMetricFiltersOutput{} + req.Data = output + return +} + +// Returns all the metrics filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 metric filters. If there are +// more metric filters to list, the response would contain a nextToken value +// in the response body. You can also limit the number of metric filters returned +// in the response by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeMetricFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeMetricFiltersOutput), lastPage) + }) +} + +const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" + +// DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation. +func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { + op := &request.Operation{ + Name: opDescribeSubscriptionFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubscriptionFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubscriptionFiltersOutput{} + req.Data = output + return +} + +// Returns all the subscription filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 subscription filters. If there +// are more subscription filters to list, the response would contain a nextToken +// value in the response body. You can also limit the number of subscription +// filters returned in the response by specifying the limit parameter in the +// request. +func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSubscriptionFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) + }) +} + +const opFilterLogEvents = "FilterLogEvents" + +// FilterLogEventsRequest generates a request for the FilterLogEvents operation. +func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { + op := &request.Operation{ + Name: opFilterLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &FilterLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &FilterLogEventsOutput{} + req.Data = output + return +} + +// Retrieves log events, optionally filtered by a filter pattern from the specified +// log group. You can provide an optional time range to filter the results on +// the event timestamp. You can limit the streams searched to an explicit list +// of logStreamNames. +// +// By default, this operation returns as much matching log events as can fit +// in a response size of 1MB, up to 10,000 log events, or all the events found +// within a time-bounded scan window. If the response includes a nextToken, +// then there is more data to search, and the search can be resumed with a new +// request providing the nextToken. The response will contain a list of searchedLogStreams +// that contains information about which streams were searched in the request +// and whether they have been searched completely or require further pagination. +// The limit parameter in the request. can be used to specify the maximum number +// of events to return in a page. +func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.FilterLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*FilterLogEventsOutput), lastPage) + }) +} + +const opGetLogEvents = "GetLogEvents" + +// GetLogEventsRequest generates a request for the GetLogEvents operation. +func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { + op := &request.Operation{ + Name: opGetLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextForwardToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLogEventsOutput{} + req.Data = output + return +} + +// Retrieves log events from the specified log stream. You can provide an optional +// time range to filter the results on the event timestamp. +// +// By default, this operation returns as much log events as can fit in a response +// size of 1MB, up to 10,000 log events. The response will always include a +// nextForwardToken and a nextBackwardToken in the response body. You can use +// any of these tokens in subsequent GetLogEvents requests to paginate through +// events in either forward or backward direction. You can also limit the number +// of log events returned in the response by specifying the limit parameter +// in the request. +func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetLogEventsOutput), lastPage) + }) +} + +const opPutDestination = "PutDestination" + +// PutDestinationRequest generates a request for the PutDestination operation. +func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { + op := &request.Operation{ + Name: opPutDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutDestinationOutput{} + req.Data = output + return +} + +// Creates or updates a Destination. A destination encapsulates a physical resource +// (such as a Kinesis stream) and allows you to subscribe to a real-time stream +// of log events of a different account, ingested through PutLogEvents requests. +// Currently, the only supported physical resource is a Amazon Kinesis stream +// belonging to the same account as the destination. +// +// A destination controls what is written to its Amazon Kinesis stream through +// an access policy. By default, PutDestination does not set any access policy +// with the destination, which means a cross-account user will not be able to +// call PutSubscriptionFilter against this destination. To enable that, the +// destination owner must call PutDestinationPolicy after PutDestination. +func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + err := req.Send() + return out, err +} + +const opPutDestinationPolicy = "PutDestinationPolicy" + +// PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation. +func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { + op := &request.Operation{ + Name: opPutDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutDestinationPolicyOutput{} + req.Data = output + return +} + +// Creates or updates an access policy associated with an existing Destination. +// An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) +// that is used to authorize claims to register a subscription filter against +// a given destination. +func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutLogEvents = "PutLogEvents" + +// PutLogEventsRequest generates a request for the PutLogEvents operation. +func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { + op := &request.Operation{ + Name: opPutLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutLogEventsOutput{} + req.Data = output + return +} + +// Uploads a batch of log events to the specified log stream. +// +// Every PutLogEvents request must include the sequenceToken obtained from +// the response of the previous request. An upload in a newly created log stream +// does not require a sequenceToken. +// +// The batch of events must satisfy the following constraints: The maximum +// batch size is 1,048,576 bytes, and this size is calculated as the sum of +// all event messages in UTF-8, plus 26 bytes for each log event. None of the +// log events in the batch can be more than 2 hours in the future. None of the +// log events in the batch can be older than 14 days or the retention period +// of the log group. The log events in the batch must be in chronological ordered +// by their timestamp. The maximum number of log events in a batch is 10,000. +func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricFilter = "PutMetricFilter" + +// PutMetricFilterRequest generates a request for the PutMetricFilter operation. +func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { + op := &request.Operation{ + Name: opPutMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &PutMetricFilterOutput{} + req.Data = output + return +} + +// Creates or updates a metric filter and associates it with the specified log +// group. Metric filters allow you to configure rules to extract metric data +// from log events ingested through PutLogEvents requests. +// +// The maximum number of metric filters that can be associated with a log +// group is 100. +func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opPutRetentionPolicy = "PutRetentionPolicy" + +// PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation. +func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { + op := &request.Operation{ + Name: opPutRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRetentionPolicyOutput{} + req.Data = output + return +} + +// Sets the retention of the specified log group. A retention policy allows +// you to configure the number of days you want to retain log events in the +// specified log group. +func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutSubscriptionFilter = "PutSubscriptionFilter" + +// PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation. +func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opPutSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &PutSubscriptionFilterOutput{} + req.Data = output + return +} + +// Creates or updates a subscription filter and associates it with the specified +// log group. Subscription filters allow you to subscribe to a real-time stream +// of log events ingested through PutLogEvents requests and have them delivered +// to a specific destination. Currently, the supported destinations are: A +// Amazon Kinesis stream belonging to the same account as the subscription filter, +// for same-account delivery. A logical destination (used via an ARN of Destination) +// belonging to a different account, for cross-account delivery. +// +// Currently there can only be one subscription filter associated with a log +// group. +func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opTestMetricFilter = "TestMetricFilter" + +// TestMetricFilterRequest generates a request for the TestMetricFilter operation. +func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { + op := &request.Operation{ + Name: opTestMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &TestMetricFilterOutput{} + req.Data = output + return +} + +// Tests the filter pattern of a metric filter against a sample of log event +// messages. You can use this operation to validate the correctness of a metric +// filter pattern. +func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + err := req.Send() + return out, err +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // Id of the export task to cancel. + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CreateExportTaskInput struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data will be exported. + // + // NOTE: Only buckets in the same AWS region are supported + Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` + + // Prefix that will be used as the start of Amazon S3 key for every object exported. + // If not specified, this defaults to 'exportedlogs'. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the start time of the range for the request. Events + // with a timestamp prior to this time will not be exported. + From *int64 `locationName:"from" type:"long" required:"true"` + + // The name of the log group to export. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only export log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the end time of the range for the request. Events + // with a timestamp later than this time will not be exported. + To *int64 `locationName:"to" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskInput) GoString() string { + return s.String() +} + +type CreateExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Id of the export task that got created. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskOutput) GoString() string { + return s.String() +} + +type CreateLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to create. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupInput) GoString() string { + return s.String() +} + +type CreateLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupOutput) GoString() string { + return s.String() +} + +type CreateLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream is to be created. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to create. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamInput) GoString() string { + return s.String() +} + +type CreateLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of destination to delete. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationInput) GoString() string { + return s.String() +} + +type DeleteDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationOutput) GoString() string { + return s.String() +} + +type DeleteLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupInput) GoString() string { + return s.String() +} + +type DeleteLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupOutput) GoString() string { + return s.String() +} + +type DeleteLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream to delete belongs. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to delete. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamInput) GoString() string { + return s.String() +} + +type DeleteLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteMetricFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the metric filter to delete. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the metric filter to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterInput) GoString() string { + return s.String() +} + +type DeleteMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterOutput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group that is associated with the retention policy to + // delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyInput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyOutput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the subscription filter to delete. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the subscription filter + // to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterInput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type DescribeDestinationsInput struct { + _ struct{} `type:"structure"` + + // Will only return destinations that match the provided destinationNamePrefix. + // If you don't specify a value, no prefix is applied. + DestinationNamePrefix *string `min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsInput) GoString() string { + return s.String() +} + +type DescribeDestinationsOutput struct { + _ struct{} `type:"structure"` + + Destinations []*Destination `locationName:"destinations" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsOutput) GoString() string { + return s.String() +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeExportTasks + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // All export tasks that matches the specified status code will be returned. + // This can return zero or more export tasks. + StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` + + // Export task that matches the specified task Id will be returned. This can + // result in zero or one export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of export tasks. + ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +type DescribeLogGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Will only return log groups that match the provided logGroupNamePrefix. If + // you don't specify a value, no prefix filter is applied. + LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogGroups + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsInput) GoString() string { + return s.String() +} + +type DescribeLogGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of log groups. + LogGroups []*LogGroup `locationName:"logGroups" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsOutput) GoString() string { + return s.String() +} + +type DescribeLogStreamsInput struct { + _ struct{} `type:"structure"` + + // If set to true, results are returned in descending order. If you don't specify + // a value or set it to false, results are returned in ascending order. + Descending *bool `locationName:"descending" type:"boolean"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which log streams are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only return log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogStreams + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Specifies what to order the returned log streams by. Valid arguments are + // 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results + // are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot + // also contain a logStreamNamePrefix. + OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` +} + +// String returns the string representation +func (s DescribeLogStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsInput) GoString() string { + return s.String() +} + +type DescribeLogStreamsOutput struct { + _ struct{} `type:"structure"` + + // A list of log streams. + LogStreams []*LogStream `locationName:"logStreams" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsOutput) GoString() string { + return s.String() +} + +type DescribeMetricFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return metric filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which metric filters are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeMetricFilters + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersInput) GoString() string { + return s.String() +} + +type DescribeMetricFiltersOutput struct { + _ struct{} `type:"structure"` + + MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersOutput) GoString() string { + return s.String() +} + +type DescribeSubscriptionFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return subscription filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which subscription filters are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersInput) GoString() string { + return s.String() +} + +type DescribeSubscriptionFiltersOutput struct { + _ struct{} `type:"structure"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersOutput) GoString() string { + return s.String() +} + +// A cross account destination that is the recipient of subscription log events. +type Destination struct { + _ struct{} `type:"structure"` + + // An IAM policy document that governs which AWS accounts can create subscription + // filters against this destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` + + // ARN of this destination. + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC specifying when this destination was created. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // Name of the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string"` + + // A role for impersonation for delivering log events to the target. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // ARN of the physical target where the log events will be delivered (eg. ARN + // of a Kinesis stream). + TargetArn *string `locationName:"targetArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Represents an export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data was exported. + Destination *string `locationName:"destination" min:"1" type:"string"` + + // Prefix that was used as the start of Amazon S3 key for every object exported. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // Execution info about the export task. + ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp prior to this time are not exported. + From *int64 `locationName:"from" type:"long"` + + // The name of the log group from which logs data was exported. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Status of the export task. + Status *ExportTaskStatus `locationName:"status" type:"structure"` + + // Id of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp later than this time are not exported. + To *int64 `locationName:"to" type:"long"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskExecutionInfo struct { + _ struct{} `type:"structure"` + + // A point in time when the export task got completed. + CompletionTime *int64 `locationName:"completionTime" type:"long"` + + // A point in time when the export task got created. + CreationTime *int64 `locationName:"creationTime" type:"long"` +} + +// String returns the string representation +func (s ExportTaskExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskExecutionInfo) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskStatus struct { + _ struct{} `type:"structure"` + + // Status code of the export task. + Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` + + // Status message related to the code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportTaskStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskStatus) GoString() string { + return s.String() +} + +type FilterLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp later than this time are + // not returned. + EndTime *int64 `locationName:"endTime" type:"long"` + + // A valid CloudWatch Logs filter pattern to use for filtering the response. + // If not provided, all the events are matched. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // If provided, the API will make a best effort to provide responses that contain + // events from multiple log streams within the log group interleaved in a single + // response. If not provided, all the matched log events in the first log stream + // will be searched first, then those in the next log stream, etc. + Interleaved *bool `locationName:"interleaved" type:"boolean"` + + // The maximum number of events to return in a page of results. Default is 10,000 + // events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Optional list of log stream names within the specified log group to search. + // Defaults to all the log streams in the log group. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp prior to this time are + // not returned. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s FilterLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsInput) GoString() string { + return s.String() +} + +type FilterLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of FilteredLogEvent objects representing the matched events from the + // request. + Events []*FilteredLogEvent `locationName:"events" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A list of SearchedLogStream objects indicating which log streams have been + // searched in this request and whether each has been searched completely or + // still has more to be paginated. + SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` +} + +// String returns the string representation +func (s FilterLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsOutput) GoString() string { + return s.String() +} + +// Represents a matched event from a FilterLogEvents request. +type FilteredLogEvent struct { + _ struct{} `type:"structure"` + + // A unique identifier for this event. + EventId *string `locationName:"eventId" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The name of the log stream this event belongs to. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s FilteredLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilteredLogEvent) GoString() string { + return s.String() +} + +type GetLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The maximum number of log events returned in the response. If you don't specify + // a value, the request would return as many log events as can fit in a response + // size of 1MB, up to 10,000 log events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to query. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the nextForwardToken or nextBackwardToken + // fields in the response of the previous GetLogEvents request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If set to true, the earliest log events would be returned first. The default + // is false (the latest log events are returned first). + StartFromHead *bool `locationName:"startFromHead" type:"boolean"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s GetLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsInput) GoString() string { + return s.String() +} + +type GetLogEventsOutput struct { + _ struct{} `type:"structure"` + + Events []*OutputLogEvent `locationName:"events" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsOutput) GoString() string { + return s.String() +} + +// A log event is a record of some activity that was recorded by the application +// or resource being monitored. The log event record that Amazon CloudWatch +// Logs understands contains two properties: the timestamp of when the event +// occurred, and the raw event message. +type InputLogEvent struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" min:"1" type:"string" required:"true"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` +} + +// String returns the string representation +func (s InputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputLogEvent) GoString() string { + return s.String() +} + +type LogGroup struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The number of metric filters associated with the log group. + MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` +} + +// String returns the string representation +func (s LogGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogGroup) GoString() string { + return s.String() +} + +// A log stream is sequence of log events from a single emitter of logs. +type LogStream struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` + + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s LogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogStream) GoString() string { + return s.String() +} + +// Metric filters can be used to express how Amazon CloudWatch Logs would extract +// metric observations from ingested log events and transform them to metric +// data in a CloudWatch metric. +type MetricFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how Amazon CloudWatch Logs should interpret the + // data in each log event. For example, a log event may contain timestamps, + // IP addresses, strings, and so on. You use the filter pattern to specify what + // to look for in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` +} + +// String returns the string representation +func (s MetricFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilter) GoString() string { + return s.String() +} + +type MetricFilterMatchRecord struct { + _ struct{} `type:"structure"` + + EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` + + EventNumber *int64 `locationName:"eventNumber" type:"long"` + + ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` +} + +// String returns the string representation +func (s MetricFilterMatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilterMatchRecord) GoString() string { + return s.String() +} + +type MetricTransformation struct { + _ struct{} `type:"structure"` + + // The name of the CloudWatch metric to which the monitored log information + // should be published. For example, you may publish to a metric called ErrorCount. + MetricName *string `locationName:"metricName" type:"string" required:"true"` + + // The destination namespace of the new CloudWatch metric. + MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` + + // What to publish to the metric. For example, if you're counting the occurrences + // of a particular term like "Error", the value will be "1" for each occurrence. + // If you're counting the bytes transferred the published value will be the + // value in the log event. + MetricValue *string `locationName:"metricValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricTransformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricTransformation) GoString() string { + return s.String() +} + +type OutputLogEvent struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s OutputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLogEvent) GoString() string { + return s.String() +} + +type PutDestinationInput struct { + _ struct{} `type:"structure"` + + // A name for the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to + // do Amazon Kinesis PutRecord requests on the desitnation stream. + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // The ARN of an Amazon Kinesis stream to deliver matching log events to. + TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationInput) GoString() string { + return s.String() +} + +type PutDestinationOutput struct { + _ struct{} `type:"structure"` + + // A cross account destination that is the recipient of subscription log events. + Destination *Destination `locationName:"destination" type:"structure"` +} + +// String returns the string representation +func (s PutDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationOutput) GoString() string { + return s.String() +} + +type PutDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // An IAM policy document that authorizes cross-account users to deliver their + // log events to associated destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` + + // A name for an existing destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyInput) GoString() string { + return s.String() +} + +type PutDestinationPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyOutput) GoString() string { + return s.String() +} + +type PutLogEventsInput struct { + _ struct{} `type:"structure"` + + // A list of log events belonging to a log stream. + LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` + + // The name of the log group to put log events to. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to put log events to. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token that must be obtained from the response of the previous PutLogEvents + // request. + SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsInput) GoString() string { + return s.String() +} + +type PutLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` + + RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` +} + +// String returns the string representation +func (s PutLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsOutput) GoString() string { + return s.String() +} + +type PutMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A name for the metric filter. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for extracting metric data out of + // ingested log events. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the metric filter with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A collection of information needed to define how metric data gets emitted. + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterInput) GoString() string { + return s.String() +} + +type PutMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterOutput) GoString() string { + return s.String() +} + +type PutRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to associate the retention policy with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` +} + +// String returns the string representation +func (s PutRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyInput) GoString() string { + return s.String() +} + +type PutRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyOutput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the destination to deliver matching log events to. Currently, + // the supported destinations are: A Amazon Kinesis stream belonging to the + // same account as the subscription filter, for same-account delivery. A logical + // destination (used via an ARN of Destination) belonging to a different account, + // for cross-account delivery. + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` + + // A name for the subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for subscribing to a filtered stream + // of log events. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the subscription filter with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to + // deliver ingested log events to the destination stream. You don't need to + // provide the ARN when you are working with a logical destination (used via + // an ARN of Destination) for cross-account delivery. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterInput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type RejectedLogEventsInfo struct { + _ struct{} `type:"structure"` + + ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` + + TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` + + TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` +} + +// String returns the string representation +func (s RejectedLogEventsInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectedLogEventsInfo) GoString() string { + return s.String() +} + +// An object indicating the search status of a log stream in a FilterLogEvents +// request. +type SearchedLogStream struct { + _ struct{} `type:"structure"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // Indicates whether all the events in this log stream were searched or more + // data exists to search by paginating further. + SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` +} + +// String returns the string representation +func (s SearchedLogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchedLogStream) GoString() string { + return s.String() +} + +type SubscriptionFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how Amazon CloudWatch Logs should interpret the + // data in each log event. For example, a log event may contain timestamps, + // IP addresses, strings, and so on. You use the filter pattern to specify what + // to look for in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s SubscriptionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscriptionFilter) GoString() string { + return s.String() +} + +type TestMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A symbolic description of how Amazon CloudWatch Logs should interpret the + // data in each log event. For example, a log event may contain timestamps, + // IP addresses, strings, and so on. You use the filter pattern to specify what + // to look for in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // A list of log event messages to test. + LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TestMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterInput) GoString() string { + return s.String() +} + +type TestMetricFilterOutput struct { + _ struct{} `type:"structure"` + + Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` +} + +// String returns the string representation +func (s TestMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterOutput) GoString() string { + return s.String() +} + +const ( + // @enum ExportTaskStatusCode + ExportTaskStatusCodeCancelled = "CANCELLED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeCompleted = "COMPLETED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeFailed = "FAILED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodePending = "PENDING" + // @enum ExportTaskStatusCode + ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeRunning = "RUNNING" +) + +const ( + // @enum OrderBy + OrderByLogStreamName = "LogStreamName" + // @enum OrderBy + OrderByLastEventTime = "LastEventTime" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go new file mode 100644 index 000000000..c4d87b795 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go @@ -0,0 +1,128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchlogsiface provides an interface for the Amazon CloudWatch Logs. +package cloudwatchlogsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +// CloudWatchLogsAPI is the interface type for cloudwatchlogs.CloudWatchLogs. +type CloudWatchLogsAPI interface { + CancelExportTaskRequest(*cloudwatchlogs.CancelExportTaskInput) (*request.Request, *cloudwatchlogs.CancelExportTaskOutput) + + CancelExportTask(*cloudwatchlogs.CancelExportTaskInput) (*cloudwatchlogs.CancelExportTaskOutput, error) + + CreateExportTaskRequest(*cloudwatchlogs.CreateExportTaskInput) (*request.Request, *cloudwatchlogs.CreateExportTaskOutput) + + CreateExportTask(*cloudwatchlogs.CreateExportTaskInput) (*cloudwatchlogs.CreateExportTaskOutput, error) + + CreateLogGroupRequest(*cloudwatchlogs.CreateLogGroupInput) (*request.Request, *cloudwatchlogs.CreateLogGroupOutput) + + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) + + CreateLogStreamRequest(*cloudwatchlogs.CreateLogStreamInput) (*request.Request, *cloudwatchlogs.CreateLogStreamOutput) + + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + + DeleteDestinationRequest(*cloudwatchlogs.DeleteDestinationInput) (*request.Request, *cloudwatchlogs.DeleteDestinationOutput) + + DeleteDestination(*cloudwatchlogs.DeleteDestinationInput) (*cloudwatchlogs.DeleteDestinationOutput, error) + + DeleteLogGroupRequest(*cloudwatchlogs.DeleteLogGroupInput) (*request.Request, *cloudwatchlogs.DeleteLogGroupOutput) + + DeleteLogGroup(*cloudwatchlogs.DeleteLogGroupInput) (*cloudwatchlogs.DeleteLogGroupOutput, error) + + DeleteLogStreamRequest(*cloudwatchlogs.DeleteLogStreamInput) (*request.Request, *cloudwatchlogs.DeleteLogStreamOutput) + + DeleteLogStream(*cloudwatchlogs.DeleteLogStreamInput) (*cloudwatchlogs.DeleteLogStreamOutput, error) + + DeleteMetricFilterRequest(*cloudwatchlogs.DeleteMetricFilterInput) (*request.Request, *cloudwatchlogs.DeleteMetricFilterOutput) + + DeleteMetricFilter(*cloudwatchlogs.DeleteMetricFilterInput) (*cloudwatchlogs.DeleteMetricFilterOutput, error) + + DeleteRetentionPolicyRequest(*cloudwatchlogs.DeleteRetentionPolicyInput) (*request.Request, *cloudwatchlogs.DeleteRetentionPolicyOutput) + + DeleteRetentionPolicy(*cloudwatchlogs.DeleteRetentionPolicyInput) (*cloudwatchlogs.DeleteRetentionPolicyOutput, error) + + DeleteSubscriptionFilterRequest(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.DeleteSubscriptionFilterOutput) + + DeleteSubscriptionFilter(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*cloudwatchlogs.DeleteSubscriptionFilterOutput, error) + + DescribeDestinationsRequest(*cloudwatchlogs.DescribeDestinationsInput) (*request.Request, *cloudwatchlogs.DescribeDestinationsOutput) + + DescribeDestinations(*cloudwatchlogs.DescribeDestinationsInput) (*cloudwatchlogs.DescribeDestinationsOutput, error) + + DescribeDestinationsPages(*cloudwatchlogs.DescribeDestinationsInput, func(*cloudwatchlogs.DescribeDestinationsOutput, bool) bool) error + + DescribeExportTasksRequest(*cloudwatchlogs.DescribeExportTasksInput) (*request.Request, *cloudwatchlogs.DescribeExportTasksOutput) + + DescribeExportTasks(*cloudwatchlogs.DescribeExportTasksInput) (*cloudwatchlogs.DescribeExportTasksOutput, error) + + DescribeLogGroupsRequest(*cloudwatchlogs.DescribeLogGroupsInput) (*request.Request, *cloudwatchlogs.DescribeLogGroupsOutput) + + DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + + DescribeLogGroupsPages(*cloudwatchlogs.DescribeLogGroupsInput, func(*cloudwatchlogs.DescribeLogGroupsOutput, bool) bool) error + + DescribeLogStreamsRequest(*cloudwatchlogs.DescribeLogStreamsInput) (*request.Request, *cloudwatchlogs.DescribeLogStreamsOutput) + + DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + + DescribeLogStreamsPages(*cloudwatchlogs.DescribeLogStreamsInput, func(*cloudwatchlogs.DescribeLogStreamsOutput, bool) bool) error + + DescribeMetricFiltersRequest(*cloudwatchlogs.DescribeMetricFiltersInput) (*request.Request, *cloudwatchlogs.DescribeMetricFiltersOutput) + + DescribeMetricFilters(*cloudwatchlogs.DescribeMetricFiltersInput) (*cloudwatchlogs.DescribeMetricFiltersOutput, error) + + DescribeMetricFiltersPages(*cloudwatchlogs.DescribeMetricFiltersInput, func(*cloudwatchlogs.DescribeMetricFiltersOutput, bool) bool) error + + DescribeSubscriptionFiltersRequest(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*request.Request, *cloudwatchlogs.DescribeSubscriptionFiltersOutput) + + DescribeSubscriptionFilters(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*cloudwatchlogs.DescribeSubscriptionFiltersOutput, error) + + DescribeSubscriptionFiltersPages(*cloudwatchlogs.DescribeSubscriptionFiltersInput, func(*cloudwatchlogs.DescribeSubscriptionFiltersOutput, bool) bool) error + + FilterLogEventsRequest(*cloudwatchlogs.FilterLogEventsInput) (*request.Request, *cloudwatchlogs.FilterLogEventsOutput) + + FilterLogEvents(*cloudwatchlogs.FilterLogEventsInput) (*cloudwatchlogs.FilterLogEventsOutput, error) + + FilterLogEventsPages(*cloudwatchlogs.FilterLogEventsInput, func(*cloudwatchlogs.FilterLogEventsOutput, bool) bool) error + + GetLogEventsRequest(*cloudwatchlogs.GetLogEventsInput) (*request.Request, *cloudwatchlogs.GetLogEventsOutput) + + GetLogEvents(*cloudwatchlogs.GetLogEventsInput) (*cloudwatchlogs.GetLogEventsOutput, error) + + GetLogEventsPages(*cloudwatchlogs.GetLogEventsInput, func(*cloudwatchlogs.GetLogEventsOutput, bool) bool) error + + PutDestinationRequest(*cloudwatchlogs.PutDestinationInput) (*request.Request, *cloudwatchlogs.PutDestinationOutput) + + PutDestination(*cloudwatchlogs.PutDestinationInput) (*cloudwatchlogs.PutDestinationOutput, error) + + PutDestinationPolicyRequest(*cloudwatchlogs.PutDestinationPolicyInput) (*request.Request, *cloudwatchlogs.PutDestinationPolicyOutput) + + PutDestinationPolicy(*cloudwatchlogs.PutDestinationPolicyInput) (*cloudwatchlogs.PutDestinationPolicyOutput, error) + + PutLogEventsRequest(*cloudwatchlogs.PutLogEventsInput) (*request.Request, *cloudwatchlogs.PutLogEventsOutput) + + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) + + PutMetricFilterRequest(*cloudwatchlogs.PutMetricFilterInput) (*request.Request, *cloudwatchlogs.PutMetricFilterOutput) + + PutMetricFilter(*cloudwatchlogs.PutMetricFilterInput) (*cloudwatchlogs.PutMetricFilterOutput, error) + + PutRetentionPolicyRequest(*cloudwatchlogs.PutRetentionPolicyInput) (*request.Request, *cloudwatchlogs.PutRetentionPolicyOutput) + + PutRetentionPolicy(*cloudwatchlogs.PutRetentionPolicyInput) (*cloudwatchlogs.PutRetentionPolicyOutput, error) + + PutSubscriptionFilterRequest(*cloudwatchlogs.PutSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.PutSubscriptionFilterOutput) + + PutSubscriptionFilter(*cloudwatchlogs.PutSubscriptionFilterInput) (*cloudwatchlogs.PutSubscriptionFilterOutput, error) + + TestMetricFilterRequest(*cloudwatchlogs.TestMetricFilterInput) (*request.Request, *cloudwatchlogs.TestMetricFilterOutput) + + TestMetricFilter(*cloudwatchlogs.TestMetricFilterInput) (*cloudwatchlogs.TestMetricFilterOutput, error) +} + +var _ CloudWatchLogsAPI = (*cloudwatchlogs.CloudWatchLogs)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go new file mode 100644 index 000000000..13c56fc88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go @@ -0,0 +1,566 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchlogs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatchLogs_CancelExportTask() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CancelExportTaskInput{ + TaskId: aws.String("ExportTaskId"), // Required + } + resp, err := svc.CancelExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateExportTask() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateExportTaskInput{ + Destination: aws.String("ExportDestinationBucket"), // Required + From: aws.Int64(1), // Required + LogGroupName: aws.String("LogGroupName"), // Required + To: aws.Int64(1), // Required + DestinationPrefix: aws.String("ExportDestinationPrefix"), + LogStreamNamePrefix: aws.String("LogStreamName"), + TaskName: aws.String("ExportTaskName"), + } + resp, err := svc.CreateExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateLogGroup() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.CreateLogGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateLogStream() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + } + resp, err := svc.CreateLogStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteDestination() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteDestinationInput{ + DestinationName: aws.String("DestinationName"), // Required + } + resp, err := svc.DeleteDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteLogGroup() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteLogGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteLogStream() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + } + resp, err := svc.DeleteLogStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteMetricFilterInput{ + FilterName: aws.String("FilterName"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteRetentionPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteRetentionPolicyInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteRetentionPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteSubscriptionFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteSubscriptionFilterInput{ + FilterName: aws.String("FilterName"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteSubscriptionFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeDestinations() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeDestinationsInput{ + DestinationNamePrefix: aws.String("DestinationName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeDestinations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeExportTasks() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeExportTasksInput{ + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + StatusCode: aws.String("ExportTaskStatusCode"), + TaskId: aws.String("ExportTaskId"), + } + resp, err := svc.DescribeExportTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeLogGroups() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeLogGroupsInput{ + Limit: aws.Int64(1), + LogGroupNamePrefix: aws.String("LogGroupName"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeLogGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeLogStreams() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + Descending: aws.Bool(true), + Limit: aws.Int64(1), + LogStreamNamePrefix: aws.String("LogStreamName"), + NextToken: aws.String("NextToken"), + OrderBy: aws.String("OrderBy"), + } + resp, err := svc.DescribeLogStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeMetricFilters() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeMetricFiltersInput{ + LogGroupName: aws.String("LogGroupName"), // Required + FilterNamePrefix: aws.String("FilterName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeMetricFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeSubscriptionFilters() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeSubscriptionFiltersInput{ + LogGroupName: aws.String("LogGroupName"), // Required + FilterNamePrefix: aws.String("FilterName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeSubscriptionFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_FilterLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.FilterLogEventsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + EndTime: aws.Int64(1), + FilterPattern: aws.String("FilterPattern"), + Interleaved: aws.Bool(true), + Limit: aws.Int64(1), + LogStreamNames: []*string{ + aws.String("LogStreamName"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + StartTime: aws.Int64(1), + } + resp, err := svc.FilterLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_GetLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.GetLogEventsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + EndTime: aws.Int64(1), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartFromHead: aws.Bool(true), + StartTime: aws.Int64(1), + } + resp, err := svc.GetLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutDestination() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutDestinationInput{ + DestinationName: aws.String("DestinationName"), // Required + RoleArn: aws.String("RoleArn"), // Required + TargetArn: aws.String("TargetArn"), // Required + } + resp, err := svc.PutDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutDestinationPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutDestinationPolicyInput{ + AccessPolicy: aws.String("AccessPolicy"), // Required + DestinationName: aws.String("DestinationName"), // Required + } + resp, err := svc.PutDestinationPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: []*cloudwatchlogs.InputLogEvent{ // Required + { // Required + Message: aws.String("EventMessage"), // Required + Timestamp: aws.Int64(1), // Required + }, + // More values... + }, + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + SequenceToken: aws.String("SequenceToken"), + } + resp, err := svc.PutLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutMetricFilterInput{ + FilterName: aws.String("FilterName"), // Required + FilterPattern: aws.String("FilterPattern"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + MetricTransformations: []*cloudwatchlogs.MetricTransformation{ // Required + { // Required + MetricName: aws.String("MetricName"), // Required + MetricNamespace: aws.String("MetricNamespace"), // Required + MetricValue: aws.String("MetricValue"), // Required + }, + // More values... + }, + } + resp, err := svc.PutMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutRetentionPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String("LogGroupName"), // Required + RetentionInDays: aws.Int64(1), // Required + } + resp, err := svc.PutRetentionPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutSubscriptionFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutSubscriptionFilterInput{ + DestinationArn: aws.String("DestinationArn"), // Required + FilterName: aws.String("FilterName"), // Required + FilterPattern: aws.String("FilterPattern"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + RoleArn: aws.String("RoleArn"), + } + resp, err := svc.PutSubscriptionFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_TestMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.TestMetricFilterInput{ + FilterPattern: aws.String("FilterPattern"), // Required + LogEventMessages: []*string{ // Required + aws.String("EventMessage"), // Required + // More values... + }, + } + resp, err := svc.TestMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go new file mode 100644 index 000000000..7a5d472cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -0,0 +1,119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the Amazon CloudWatch Logs API Reference. Amazon CloudWatch Logs +// enables you to monitor, store, and access your system, application, and custom +// log files. This guide provides detailed information about Amazon CloudWatch +// Logs actions, data types, parameters, and errors. For detailed information +// about Amazon CloudWatch Logs features and their associated API calls, go +// to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). +// +// Use the following links to get started using the Amazon CloudWatch Logs +// API Reference: +// +// Actions (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Operations.html): +// An alphabetical list of all Amazon CloudWatch Logs actions. Data Types (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Types.html): +// An alphabetical list of all Amazon CloudWatch Logs data types. Common Parameters +// (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonParameters.html): +// Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonErrors.html): +// Client and server errors that all actions can return. Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized +// regions and endpoints for all AWS products. In addition to using the Amazon +// CloudWatch Logs API, you can also use the following SDKs and third-party +// libraries to access Amazon CloudWatch Logs programmatically. +// +// AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/) +// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/) +// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/) +// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/) +// Developers in the AWS developer community also provide their own libraries, +// which you can find at the following AWS developer centers: +// +// AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer +// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/) +// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET +// Developer Center (http://aws.amazon.com/net/) +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatchLogs struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "logs" + +// New creates a new instance of the CloudWatchLogs client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatchLogs client from just a session. +// svc := cloudwatchlogs.New(mySession) +// +// // Create a CloudWatchLogs client with additional configuration +// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchLogs { + svc := &CloudWatchLogs{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-03-28", + JSONVersion: "1.1", + TargetPrefix: "Logs_20140328", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchLogs operation and runs any +// custom request initialization. +func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go new file mode 100644 index 000000000..831da1e57 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -0,0 +1,880 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codecommit provides a client for AWS CodeCommit. +package codecommit + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBatchGetRepositories = "BatchGetRepositories" + +// BatchGetRepositoriesRequest generates a request for the BatchGetRepositories operation. +func (c *CodeCommit) BatchGetRepositoriesRequest(input *BatchGetRepositoriesInput) (req *request.Request, output *BatchGetRepositoriesOutput) { + op := &request.Operation{ + Name: opBatchGetRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetRepositoriesOutput{} + req.Data = output + return +} + +// Gets information about one or more repositories. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) BatchGetRepositories(input *BatchGetRepositoriesInput) (*BatchGetRepositoriesOutput, error) { + req, out := c.BatchGetRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opCreateBranch = "CreateBranch" + +// CreateBranchRequest generates a request for the CreateBranch operation. +func (c *CodeCommit) CreateBranchRequest(input *CreateBranchInput) (req *request.Request, output *CreateBranchOutput) { + op := &request.Operation{ + Name: opCreateBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBranchInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBranchOutput{} + req.Data = output + return +} + +// Creates a new branch in a repository and points the branch to a commit. +// +// Calling the create branch operation does not set a repository's default +// branch. To do this, call the update default branch operation. +func (c *CodeCommit) CreateBranch(input *CreateBranchInput) (*CreateBranchOutput, error) { + req, out := c.CreateBranchRequest(input) + err := req.Send() + return out, err +} + +const opCreateRepository = "CreateRepository" + +// CreateRepositoryRequest generates a request for the CreateRepository operation. +func (c *CodeCommit) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { + op := &request.Operation{ + Name: opCreateRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRepositoryOutput{} + req.Data = output + return +} + +// Creates a new, empty repository. +func (c *CodeCommit) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepository = "DeleteRepository" + +// DeleteRepositoryRequest generates a request for the DeleteRepository operation. +func (c *CodeCommit) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { + op := &request.Operation{ + Name: opDeleteRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryOutput{} + req.Data = output + return +} + +// Deletes a repository. If a specified repository was already deleted, a null +// repository ID will be returned. +// +// Deleting a repository also deletes all associated objects and metadata. +// After a repository is deleted, all future push calls to the deleted repository +// will fail. +func (c *CodeCommit) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opGetBranch = "GetBranch" + +// GetBranchRequest generates a request for the GetBranch operation. +func (c *CodeCommit) GetBranchRequest(input *GetBranchInput) (req *request.Request, output *GetBranchOutput) { + op := &request.Operation{ + Name: opGetBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBranchInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBranchOutput{} + req.Data = output + return +} + +// Retrieves information about a repository branch, including its name and the +// last commit ID. +func (c *CodeCommit) GetBranch(input *GetBranchInput) (*GetBranchOutput, error) { + req, out := c.GetBranchRequest(input) + err := req.Send() + return out, err +} + +const opGetRepository = "GetRepository" + +// GetRepositoryRequest generates a request for the GetRepository operation. +func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *request.Request, output *GetRepositoryOutput) { + op := &request.Operation{ + Name: opGetRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRepositoryOutput{} + req.Data = output + return +} + +// Gets information about a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) GetRepository(input *GetRepositoryInput) (*GetRepositoryOutput, error) { + req, out := c.GetRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opListBranches = "ListBranches" + +// ListBranchesRequest generates a request for the ListBranches operation. +func (c *CodeCommit) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) { + op := &request.Operation{ + Name: opListBranches, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListBranchesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBranchesOutput{} + req.Data = output + return +} + +// Gets information about one or more branches in a repository. +func (c *CodeCommit) ListBranches(input *ListBranchesInput) (*ListBranchesOutput, error) { + req, out := c.ListBranchesRequest(input) + err := req.Send() + return out, err +} + +const opListRepositories = "ListRepositories" + +// ListRepositoriesRequest generates a request for the ListRepositories operation. +func (c *CodeCommit) ListRepositoriesRequest(input *ListRepositoriesInput) (req *request.Request, output *ListRepositoriesOutput) { + op := &request.Operation{ + Name: opListRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRepositoriesOutput{} + req.Data = output + return +} + +// Gets information about one or more repositories. +func (c *CodeCommit) ListRepositories(input *ListRepositoriesInput) (*ListRepositoriesOutput, error) { + req, out := c.ListRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDefaultBranch = "UpdateDefaultBranch" + +// UpdateDefaultBranchRequest generates a request for the UpdateDefaultBranch operation. +func (c *CodeCommit) UpdateDefaultBranchRequest(input *UpdateDefaultBranchInput) (req *request.Request, output *UpdateDefaultBranchOutput) { + op := &request.Operation{ + Name: opUpdateDefaultBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDefaultBranchInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDefaultBranchOutput{} + req.Data = output + return +} + +// Sets or changes the default branch name for the specified repository. +// +// If you use this operation to change the default branch name to the current +// default branch name, a success message is returned even though the default +// branch did not change. +func (c *CodeCommit) UpdateDefaultBranch(input *UpdateDefaultBranchInput) (*UpdateDefaultBranchOutput, error) { + req, out := c.UpdateDefaultBranchRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRepositoryDescription = "UpdateRepositoryDescription" + +// UpdateRepositoryDescriptionRequest generates a request for the UpdateRepositoryDescription operation. +func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryDescriptionInput) (req *request.Request, output *UpdateRepositoryDescriptionOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryDescriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRepositoryDescriptionOutput{} + req.Data = output + return +} + +// Sets or changes the comment or description for a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) UpdateRepositoryDescription(input *UpdateRepositoryDescriptionInput) (*UpdateRepositoryDescriptionOutput, error) { + req, out := c.UpdateRepositoryDescriptionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRepositoryName = "UpdateRepositoryName" + +// UpdateRepositoryNameRequest generates a request for the UpdateRepositoryName operation. +func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInput) (req *request.Request, output *UpdateRepositoryNameOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryName, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryNameInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRepositoryNameOutput{} + req.Data = output + return +} + +// Renames a repository. +func (c *CodeCommit) UpdateRepositoryName(input *UpdateRepositoryNameInput) (*UpdateRepositoryNameOutput, error) { + req, out := c.UpdateRepositoryNameRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of a batch get repositories operation. +type BatchGetRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The names of the repositories to get information about. + RepositoryNames []*string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get repositories operation. +type BatchGetRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of repositories returned by the batch get repositories operation. + Repositories []*RepositoryMetadata `locationName:"repositories" type:"list"` + + // Returns a list of repository names for which information could not be found. + RepositoriesNotFound []*string `locationName:"repositoriesNotFound" type:"list"` +} + +// String returns the string representation +func (s BatchGetRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesOutput) GoString() string { + return s.String() +} + +// Returns information about a branch. +type BranchInfo struct { + _ struct{} `type:"structure"` + + // The name of the branch. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // The ID of the last commit made to the branch. + CommitId *string `locationName:"commitId" type:"string"` +} + +// String returns the string representation +func (s BranchInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BranchInfo) GoString() string { + return s.String() +} + +// Represents the input of a create branch operation. +type CreateBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the new branch to create. + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The ID of the commit to point the new branch to. + // + // If this commit ID is not specified, the new branch will point to the commit + // that is pointed to by the repository's default branch. + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // The name of the repository in which you want to create the new branch. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchInput) GoString() string { + return s.String() +} + +type CreateBranchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of a create repository operation. +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // A comment or description about the new repository. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The name of the new repository to be created. + // + // The repository name must be unique across the calling AWS account. In addition, + // repository names are restricted to alphanumeric characters. The suffix ".git" + // is prohibited. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +// Represents the output of a create repository operation. +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the newly created repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete repository operation. +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the repository to delete. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +// Represents the output of a delete repository operation. +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The ID of the repository that was deleted. + RepositoryId *string `locationName:"repositoryId" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a get branch operation. +type GetBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch for which you want to retrieve information. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchInput) GoString() string { + return s.String() +} + +// Represents the output of a get branch operation. +type GetBranchOutput struct { + _ struct{} `type:"structure"` + + // The name of the branch. + Branch *BranchInfo `locationName:"branch" type:"structure"` +} + +// String returns the string representation +func (s GetBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of a get repository operation. +type GetRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the repository to get information about. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryInput) GoString() string { + return s.String() +} + +// Represents the output of a get repository operation. +type GetRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` +} + +// String returns the string representation +func (s GetRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a list branches operation. +type ListBranchesInput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository that contains the branches. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListBranchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesInput) GoString() string { + return s.String() +} + +// Represents the output of a list branches operation. +type ListBranchesOutput struct { + _ struct{} `type:"structure"` + + // The list of branch names. + Branches []*string `locationName:"branches" type:"list"` + + // An enumeration token that returns the batch of the results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListBranchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list repositories operation. +type ListRepositoriesInput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order in which to sort the results of a list repositories operation. + Order *string `locationName:"order" type:"string" enum:"OrderEnum"` + + // The criteria used to sort the results of a list repositories operation. + SortBy *string `locationName:"sortBy" type:"string" enum:"SortByEnum"` +} + +// String returns the string representation +func (s ListRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRepositoriesInput) GoString() string { + return s.String() +} + +// Represents the output of a list repositories operation. +type ListRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` + + // Lists the repositories called by the list repositories operation. + Repositories []*RepositoryNameIdPair `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s ListRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRepositoriesOutput) GoString() string { + return s.String() +} + +// Information about a repository. +type RepositoryMetadata struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account associated with the repository. + AccountId *string `locationName:"accountId" type:"string"` + + // The Amazon Resource Name (ARN) of the repository. + Arn *string `type:"string"` + + // The URL to use for cloning the repository over HTTPS. + CloneUrlHttp *string `locationName:"cloneUrlHttp" type:"string"` + + // The URL to use for cloning the repository over SSH. + CloneUrlSsh *string `locationName:"cloneUrlSsh" type:"string"` + + // The date and time the repository was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The repository's default branch name. + DefaultBranch *string `locationName:"defaultBranch" min:"1" type:"string"` + + // The date and time the repository was last modified, in timestamp format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp" timestampFormat:"unix"` + + // A comment or description about the repository. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The ID of the repository. + RepositoryId *string `locationName:"repositoryId" type:"string"` + + // The repository's name. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s RepositoryMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryMetadata) GoString() string { + return s.String() +} + +// Information about a repository name and ID. +type RepositoryNameIdPair struct { + _ struct{} `type:"structure"` + + // The ID associated with the repository name. + RepositoryId *string `locationName:"repositoryId" type:"string"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s RepositoryNameIdPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryNameIdPair) GoString() string { + return s.String() +} + +// Represents the input of an update default branch operation. +type UpdateDefaultBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch to set as the default. + DefaultBranchName *string `locationName:"defaultBranchName" min:"1" type:"string" required:"true"` + + // The name of the repository to set or change the default branch for. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDefaultBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDefaultBranchInput) GoString() string { + return s.String() +} + +type UpdateDefaultBranchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDefaultBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDefaultBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of an update repository description operation. +type UpdateRepositoryDescriptionInput struct { + _ struct{} `type:"structure"` + + // The new comment or description for the specified repository. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The name of the repository to set or change the comment or description for. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRepositoryDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryDescriptionInput) GoString() string { + return s.String() +} + +type UpdateRepositoryDescriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRepositoryDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryDescriptionOutput) GoString() string { + return s.String() +} + +// Represents the input of an update repository description operation. +type UpdateRepositoryNameInput struct { + _ struct{} `type:"structure"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + NewName *string `locationName:"newName" min:"1" type:"string" required:"true"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + OldName *string `locationName:"oldName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRepositoryNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryNameInput) GoString() string { + return s.String() +} + +type UpdateRepositoryNameOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRepositoryNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryNameOutput) GoString() string { + return s.String() +} + +const ( + // @enum OrderEnum + OrderEnumAscending = "ascending" + // @enum OrderEnum + OrderEnumDescending = "descending" +) + +const ( + // @enum SortByEnum + SortByEnumRepositoryName = "repositoryName" + // @enum SortByEnum + SortByEnumLastModifiedDate = "lastModifiedDate" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go new file mode 100644 index 000000000..fb949d6c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codecommitiface provides an interface for the AWS CodeCommit. +package codecommitiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codecommit" +) + +// CodeCommitAPI is the interface type for codecommit.CodeCommit. +type CodeCommitAPI interface { + BatchGetRepositoriesRequest(*codecommit.BatchGetRepositoriesInput) (*request.Request, *codecommit.BatchGetRepositoriesOutput) + + BatchGetRepositories(*codecommit.BatchGetRepositoriesInput) (*codecommit.BatchGetRepositoriesOutput, error) + + CreateBranchRequest(*codecommit.CreateBranchInput) (*request.Request, *codecommit.CreateBranchOutput) + + CreateBranch(*codecommit.CreateBranchInput) (*codecommit.CreateBranchOutput, error) + + CreateRepositoryRequest(*codecommit.CreateRepositoryInput) (*request.Request, *codecommit.CreateRepositoryOutput) + + CreateRepository(*codecommit.CreateRepositoryInput) (*codecommit.CreateRepositoryOutput, error) + + DeleteRepositoryRequest(*codecommit.DeleteRepositoryInput) (*request.Request, *codecommit.DeleteRepositoryOutput) + + DeleteRepository(*codecommit.DeleteRepositoryInput) (*codecommit.DeleteRepositoryOutput, error) + + GetBranchRequest(*codecommit.GetBranchInput) (*request.Request, *codecommit.GetBranchOutput) + + GetBranch(*codecommit.GetBranchInput) (*codecommit.GetBranchOutput, error) + + GetRepositoryRequest(*codecommit.GetRepositoryInput) (*request.Request, *codecommit.GetRepositoryOutput) + + GetRepository(*codecommit.GetRepositoryInput) (*codecommit.GetRepositoryOutput, error) + + ListBranchesRequest(*codecommit.ListBranchesInput) (*request.Request, *codecommit.ListBranchesOutput) + + ListBranches(*codecommit.ListBranchesInput) (*codecommit.ListBranchesOutput, error) + + ListRepositoriesRequest(*codecommit.ListRepositoriesInput) (*request.Request, *codecommit.ListRepositoriesOutput) + + ListRepositories(*codecommit.ListRepositoriesInput) (*codecommit.ListRepositoriesOutput, error) + + UpdateDefaultBranchRequest(*codecommit.UpdateDefaultBranchInput) (*request.Request, *codecommit.UpdateDefaultBranchOutput) + + UpdateDefaultBranch(*codecommit.UpdateDefaultBranchInput) (*codecommit.UpdateDefaultBranchOutput, error) + + UpdateRepositoryDescriptionRequest(*codecommit.UpdateRepositoryDescriptionInput) (*request.Request, *codecommit.UpdateRepositoryDescriptionOutput) + + UpdateRepositoryDescription(*codecommit.UpdateRepositoryDescriptionInput) (*codecommit.UpdateRepositoryDescriptionOutput, error) + + UpdateRepositoryNameRequest(*codecommit.UpdateRepositoryNameInput) (*request.Request, *codecommit.UpdateRepositoryNameOutput) + + UpdateRepositoryName(*codecommit.UpdateRepositoryNameInput) (*codecommit.UpdateRepositoryNameOutput, error) +} + +var _ CodeCommitAPI = (*codecommit.CodeCommit)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go new file mode 100644 index 000000000..b320b9737 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go @@ -0,0 +1,238 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codecommit_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codecommit" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodeCommit_BatchGetRepositories() { + svc := codecommit.New(session.New()) + + params := &codecommit.BatchGetRepositoriesInput{ + RepositoryNames: []*string{ // Required + aws.String("RepositoryName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_CreateBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.CreateBranchInput{ + BranchName: aws.String("BranchName"), // Required + CommitId: aws.String("CommitId"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.CreateBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_CreateRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.CreateRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RepositoryDescription: aws.String("RepositoryDescription"), + } + resp, err := svc.CreateRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_DeleteRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.DeleteRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.DeleteRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetBranchInput{ + BranchName: aws.String("BranchName"), + RepositoryName: aws.String("RepositoryName"), + } + resp, err := svc.GetBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.GetRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_ListBranches() { + svc := codecommit.New(session.New()) + + params := &codecommit.ListBranchesInput{ + RepositoryName: aws.String("RepositoryName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListBranches(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_ListRepositories() { + svc := codecommit.New(session.New()) + + params := &codecommit.ListRepositoriesInput{ + NextToken: aws.String("NextToken"), + Order: aws.String("OrderEnum"), + SortBy: aws.String("SortByEnum"), + } + resp, err := svc.ListRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateDefaultBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateDefaultBranchInput{ + DefaultBranchName: aws.String("BranchName"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.UpdateDefaultBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateRepositoryDescription() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateRepositoryDescriptionInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RepositoryDescription: aws.String("RepositoryDescription"), + } + resp, err := svc.UpdateRepositoryDescription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateRepositoryName() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateRepositoryNameInput{ + NewName: aws.String("RepositoryName"), // Required + OldName: aws.String("RepositoryName"), // Required + } + resp, err := svc.UpdateRepositoryName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go new file mode 100644 index 000000000..760d89d93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codecommit + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the AWS CodeCommit API Reference. This reference provides descriptions +// of the AWS CodeCommit API. +// +// You can use the AWS CodeCommit API to work with the following objects: +// +// Repositories Branches Commits For information about how to use AWS CodeCommit, +// see the AWS CodeCommit User Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodeCommit struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codecommit" + +// New creates a new instance of the CodeCommit client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodeCommit client from just a session. +// svc := codecommit.New(mySession) +// +// // Create a CodeCommit client with additional configuration +// svc := codecommit.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeCommit { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodeCommit { + svc := &CodeCommit{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-13", + JSONVersion: "1.1", + TargetPrefix: "CodeCommit_20150413", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodeCommit operation and runs any +// custom request initialization. +func (c *CodeCommit) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go new file mode 100644 index 000000000..ea7ddc571 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go @@ -0,0 +1,3263 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codedeploy provides a client for AWS CodeDeploy. +package codedeploy + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTagsToOnPremisesInstances = "AddTagsToOnPremisesInstances" + +// AddTagsToOnPremisesInstancesRequest generates a request for the AddTagsToOnPremisesInstances operation. +func (c *CodeDeploy) AddTagsToOnPremisesInstancesRequest(input *AddTagsToOnPremisesInstancesInput) (req *request.Request, output *AddTagsToOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opAddTagsToOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Adds tags to on-premises instances. +func (c *CodeDeploy) AddTagsToOnPremisesInstances(input *AddTagsToOnPremisesInstancesInput) (*AddTagsToOnPremisesInstancesOutput, error) { + req, out := c.AddTagsToOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetApplications = "BatchGetApplications" + +// BatchGetApplicationsRequest generates a request for the BatchGetApplications operation. +func (c *CodeDeploy) BatchGetApplicationsRequest(input *BatchGetApplicationsInput) (req *request.Request, output *BatchGetApplicationsOutput) { + op := &request.Operation{ + Name: opBatchGetApplications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetApplicationsOutput{} + req.Data = output + return +} + +// Gets information about one or more applications. +func (c *CodeDeploy) BatchGetApplications(input *BatchGetApplicationsInput) (*BatchGetApplicationsOutput, error) { + req, out := c.BatchGetApplicationsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetDeployments = "BatchGetDeployments" + +// BatchGetDeploymentsRequest generates a request for the BatchGetDeployments operation. +func (c *CodeDeploy) BatchGetDeploymentsRequest(input *BatchGetDeploymentsInput) (req *request.Request, output *BatchGetDeploymentsOutput) { + op := &request.Operation{ + Name: opBatchGetDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetDeploymentsOutput{} + req.Data = output + return +} + +// Gets information about one or more deployments. +func (c *CodeDeploy) BatchGetDeployments(input *BatchGetDeploymentsInput) (*BatchGetDeploymentsOutput, error) { + req, out := c.BatchGetDeploymentsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetOnPremisesInstances = "BatchGetOnPremisesInstances" + +// BatchGetOnPremisesInstancesRequest generates a request for the BatchGetOnPremisesInstances operation. +func (c *CodeDeploy) BatchGetOnPremisesInstancesRequest(input *BatchGetOnPremisesInstancesInput) (req *request.Request, output *BatchGetOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opBatchGetOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Gets information about one or more on-premises instances. +func (c *CodeDeploy) BatchGetOnPremisesInstances(input *BatchGetOnPremisesInstancesInput) (*BatchGetOnPremisesInstancesOutput, error) { + req, out := c.BatchGetOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplication = "CreateApplication" + +// CreateApplicationRequest generates a request for the CreateApplication operation. +func (c *CodeDeploy) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *CreateApplicationOutput) { + op := &request.Operation{ + Name: opCreateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateApplicationOutput{} + req.Data = output + return +} + +// Creates a new application. +func (c *CodeDeploy) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { + req, out := c.CreateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a request for the CreateDeployment operation. +func (c *CodeDeploy) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentOutput{} + req.Data = output + return +} + +// Deploys an application revision through the specified deployment group. +func (c *CodeDeploy) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeploymentConfig = "CreateDeploymentConfig" + +// CreateDeploymentConfigRequest generates a request for the CreateDeploymentConfig operation. +func (c *CodeDeploy) CreateDeploymentConfigRequest(input *CreateDeploymentConfigInput) (req *request.Request, output *CreateDeploymentConfigOutput) { + op := &request.Operation{ + Name: opCreateDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentConfigOutput{} + req.Data = output + return +} + +// Creates a new deployment configuration. +func (c *CodeDeploy) CreateDeploymentConfig(input *CreateDeploymentConfigInput) (*CreateDeploymentConfigOutput, error) { + req, out := c.CreateDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeploymentGroup = "CreateDeploymentGroup" + +// CreateDeploymentGroupRequest generates a request for the CreateDeploymentGroup operation. +func (c *CodeDeploy) CreateDeploymentGroupRequest(input *CreateDeploymentGroupInput) (req *request.Request, output *CreateDeploymentGroupOutput) { + op := &request.Operation{ + Name: opCreateDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentGroupOutput{} + req.Data = output + return +} + +// Creates a new deployment group for application revisions to be deployed to. +func (c *CodeDeploy) CreateDeploymentGroup(input *CreateDeploymentGroupInput) (*CreateDeploymentGroupOutput, error) { + req, out := c.CreateDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplication = "DeleteApplication" + +// DeleteApplicationRequest generates a request for the DeleteApplication operation. +func (c *CodeDeploy) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { + op := &request.Operation{ + Name: opDeleteApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteApplicationOutput{} + req.Data = output + return +} + +// Deletes an application. +func (c *CodeDeploy) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeploymentConfig = "DeleteDeploymentConfig" + +// DeleteDeploymentConfigRequest generates a request for the DeleteDeploymentConfig operation. +func (c *CodeDeploy) DeleteDeploymentConfigRequest(input *DeleteDeploymentConfigInput) (req *request.Request, output *DeleteDeploymentConfigOutput) { + op := &request.Operation{ + Name: opDeleteDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDeploymentConfigOutput{} + req.Data = output + return +} + +// Deletes a deployment configuration. +// +// A deployment configuration cannot be deleted if it is currently in use. +// Also, predefined configurations cannot be deleted. +func (c *CodeDeploy) DeleteDeploymentConfig(input *DeleteDeploymentConfigInput) (*DeleteDeploymentConfigOutput, error) { + req, out := c.DeleteDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeploymentGroup = "DeleteDeploymentGroup" + +// DeleteDeploymentGroupRequest generates a request for the DeleteDeploymentGroup operation. +func (c *CodeDeploy) DeleteDeploymentGroupRequest(input *DeleteDeploymentGroupInput) (req *request.Request, output *DeleteDeploymentGroupOutput) { + op := &request.Operation{ + Name: opDeleteDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDeploymentGroupOutput{} + req.Data = output + return +} + +// Deletes a deployment group. +func (c *CodeDeploy) DeleteDeploymentGroup(input *DeleteDeploymentGroupInput) (*DeleteDeploymentGroupOutput, error) { + req, out := c.DeleteDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterOnPremisesInstance = "DeregisterOnPremisesInstance" + +// DeregisterOnPremisesInstanceRequest generates a request for the DeregisterOnPremisesInstance operation. +func (c *CodeDeploy) DeregisterOnPremisesInstanceRequest(input *DeregisterOnPremisesInstanceInput) (req *request.Request, output *DeregisterOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Deregisters an on-premises instance. +func (c *CodeDeploy) DeregisterOnPremisesInstance(input *DeregisterOnPremisesInstanceInput) (*DeregisterOnPremisesInstanceOutput, error) { + req, out := c.DeregisterOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetApplication = "GetApplication" + +// GetApplicationRequest generates a request for the GetApplication operation. +func (c *CodeDeploy) GetApplicationRequest(input *GetApplicationInput) (req *request.Request, output *GetApplicationOutput) { + op := &request.Operation{ + Name: opGetApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApplicationOutput{} + req.Data = output + return +} + +// Gets information about an application. +func (c *CodeDeploy) GetApplication(input *GetApplicationInput) (*GetApplicationOutput, error) { + req, out := c.GetApplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetApplicationRevision = "GetApplicationRevision" + +// GetApplicationRevisionRequest generates a request for the GetApplicationRevision operation. +func (c *CodeDeploy) GetApplicationRevisionRequest(input *GetApplicationRevisionInput) (req *request.Request, output *GetApplicationRevisionOutput) { + op := &request.Operation{ + Name: opGetApplicationRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApplicationRevisionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApplicationRevisionOutput{} + req.Data = output + return +} + +// Gets information about an application revision. +func (c *CodeDeploy) GetApplicationRevision(input *GetApplicationRevisionInput) (*GetApplicationRevisionOutput, error) { + req, out := c.GetApplicationRevisionRequest(input) + err := req.Send() + return out, err +} + +const opGetDeployment = "GetDeployment" + +// GetDeploymentRequest generates a request for the GetDeployment operation. +func (c *CodeDeploy) GetDeploymentRequest(input *GetDeploymentInput) (req *request.Request, output *GetDeploymentOutput) { + op := &request.Operation{ + Name: opGetDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentOutput{} + req.Data = output + return +} + +// Gets information about a deployment. +func (c *CodeDeploy) GetDeployment(input *GetDeploymentInput) (*GetDeploymentOutput, error) { + req, out := c.GetDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentConfig = "GetDeploymentConfig" + +// GetDeploymentConfigRequest generates a request for the GetDeploymentConfig operation. +func (c *CodeDeploy) GetDeploymentConfigRequest(input *GetDeploymentConfigInput) (req *request.Request, output *GetDeploymentConfigOutput) { + op := &request.Operation{ + Name: opGetDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentConfigOutput{} + req.Data = output + return +} + +// Gets information about a deployment configuration. +func (c *CodeDeploy) GetDeploymentConfig(input *GetDeploymentConfigInput) (*GetDeploymentConfigOutput, error) { + req, out := c.GetDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentGroup = "GetDeploymentGroup" + +// GetDeploymentGroupRequest generates a request for the GetDeploymentGroup operation. +func (c *CodeDeploy) GetDeploymentGroupRequest(input *GetDeploymentGroupInput) (req *request.Request, output *GetDeploymentGroupOutput) { + op := &request.Operation{ + Name: opGetDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentGroupOutput{} + req.Data = output + return +} + +// Gets information about a deployment group. +func (c *CodeDeploy) GetDeploymentGroup(input *GetDeploymentGroupInput) (*GetDeploymentGroupOutput, error) { + req, out := c.GetDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentInstance = "GetDeploymentInstance" + +// GetDeploymentInstanceRequest generates a request for the GetDeploymentInstance operation. +func (c *CodeDeploy) GetDeploymentInstanceRequest(input *GetDeploymentInstanceInput) (req *request.Request, output *GetDeploymentInstanceOutput) { + op := &request.Operation{ + Name: opGetDeploymentInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentInstanceOutput{} + req.Data = output + return +} + +// Gets information about an instance as part of a deployment. +func (c *CodeDeploy) GetDeploymentInstance(input *GetDeploymentInstanceInput) (*GetDeploymentInstanceOutput, error) { + req, out := c.GetDeploymentInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetOnPremisesInstance = "GetOnPremisesInstance" + +// GetOnPremisesInstanceRequest generates a request for the GetOnPremisesInstance operation. +func (c *CodeDeploy) GetOnPremisesInstanceRequest(input *GetOnPremisesInstanceInput) (req *request.Request, output *GetOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opGetOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Gets information about an on-premises instance. +func (c *CodeDeploy) GetOnPremisesInstance(input *GetOnPremisesInstanceInput) (*GetOnPremisesInstanceOutput, error) { + req, out := c.GetOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opListApplicationRevisions = "ListApplicationRevisions" + +// ListApplicationRevisionsRequest generates a request for the ListApplicationRevisions operation. +func (c *CodeDeploy) ListApplicationRevisionsRequest(input *ListApplicationRevisionsInput) (req *request.Request, output *ListApplicationRevisionsOutput) { + op := &request.Operation{ + Name: opListApplicationRevisions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApplicationRevisionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListApplicationRevisionsOutput{} + req.Data = output + return +} + +// Lists information about revisions for an application. +func (c *CodeDeploy) ListApplicationRevisions(input *ListApplicationRevisionsInput) (*ListApplicationRevisionsOutput, error) { + req, out := c.ListApplicationRevisionsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListApplicationRevisionsPages(input *ListApplicationRevisionsInput, fn func(p *ListApplicationRevisionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListApplicationRevisionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListApplicationRevisionsOutput), lastPage) + }) +} + +const opListApplications = "ListApplications" + +// ListApplicationsRequest generates a request for the ListApplications operation. +func (c *CodeDeploy) ListApplicationsRequest(input *ListApplicationsInput) (req *request.Request, output *ListApplicationsOutput) { + op := &request.Operation{ + Name: opListApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListApplicationsOutput{} + req.Data = output + return +} + +// Lists the applications registered with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListApplications(input *ListApplicationsInput) (*ListApplicationsOutput, error) { + req, out := c.ListApplicationsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListApplicationsPages(input *ListApplicationsInput, fn func(p *ListApplicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListApplicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListApplicationsOutput), lastPage) + }) +} + +const opListDeploymentConfigs = "ListDeploymentConfigs" + +// ListDeploymentConfigsRequest generates a request for the ListDeploymentConfigs operation. +func (c *CodeDeploy) ListDeploymentConfigsRequest(input *ListDeploymentConfigsInput) (req *request.Request, output *ListDeploymentConfigsOutput) { + op := &request.Operation{ + Name: opListDeploymentConfigs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentConfigsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentConfigsOutput{} + req.Data = output + return +} + +// Lists the deployment configurations with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListDeploymentConfigs(input *ListDeploymentConfigsInput) (*ListDeploymentConfigsOutput, error) { + req, out := c.ListDeploymentConfigsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentConfigsPages(input *ListDeploymentConfigsInput, fn func(p *ListDeploymentConfigsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentConfigsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentConfigsOutput), lastPage) + }) +} + +const opListDeploymentGroups = "ListDeploymentGroups" + +// ListDeploymentGroupsRequest generates a request for the ListDeploymentGroups operation. +func (c *CodeDeploy) ListDeploymentGroupsRequest(input *ListDeploymentGroupsInput) (req *request.Request, output *ListDeploymentGroupsOutput) { + op := &request.Operation{ + Name: opListDeploymentGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentGroupsOutput{} + req.Data = output + return +} + +// Lists the deployment groups for an application registered with the applicable +// IAM user or AWS account. +func (c *CodeDeploy) ListDeploymentGroups(input *ListDeploymentGroupsInput) (*ListDeploymentGroupsOutput, error) { + req, out := c.ListDeploymentGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentGroupsPages(input *ListDeploymentGroupsInput, fn func(p *ListDeploymentGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentGroupsOutput), lastPage) + }) +} + +const opListDeploymentInstances = "ListDeploymentInstances" + +// ListDeploymentInstancesRequest generates a request for the ListDeploymentInstances operation. +func (c *CodeDeploy) ListDeploymentInstancesRequest(input *ListDeploymentInstancesInput) (req *request.Request, output *ListDeploymentInstancesOutput) { + op := &request.Operation{ + Name: opListDeploymentInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentInstancesOutput{} + req.Data = output + return +} + +// Lists the instances for a deployment associated with the applicable IAM user +// or AWS account. +func (c *CodeDeploy) ListDeploymentInstances(input *ListDeploymentInstancesInput) (*ListDeploymentInstancesOutput, error) { + req, out := c.ListDeploymentInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentInstancesPages(input *ListDeploymentInstancesInput, fn func(p *ListDeploymentInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentInstancesOutput), lastPage) + }) +} + +const opListDeployments = "ListDeployments" + +// ListDeploymentsRequest generates a request for the ListDeployments operation. +func (c *CodeDeploy) ListDeploymentsRequest(input *ListDeploymentsInput) (req *request.Request, output *ListDeploymentsOutput) { + op := &request.Operation{ + Name: opListDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentsOutput{} + req.Data = output + return +} + +// Lists the deployments within a deployment group for an application registered +// with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListDeployments(input *ListDeploymentsInput) (*ListDeploymentsOutput, error) { + req, out := c.ListDeploymentsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentsPages(input *ListDeploymentsInput, fn func(p *ListDeploymentsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentsOutput), lastPage) + }) +} + +const opListOnPremisesInstances = "ListOnPremisesInstances" + +// ListOnPremisesInstancesRequest generates a request for the ListOnPremisesInstances operation. +func (c *CodeDeploy) ListOnPremisesInstancesRequest(input *ListOnPremisesInstancesInput) (req *request.Request, output *ListOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opListOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Gets a list of one or more on-premises instance names. +// +// Unless otherwise specified, both registered and deregistered on-premises +// instance names will be listed. To list only registered or deregistered on-premises +// instance names, use the registration status parameter. +func (c *CodeDeploy) ListOnPremisesInstances(input *ListOnPremisesInstancesInput) (*ListOnPremisesInstancesOutput, error) { + req, out := c.ListOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterApplicationRevision = "RegisterApplicationRevision" + +// RegisterApplicationRevisionRequest generates a request for the RegisterApplicationRevision operation. +func (c *CodeDeploy) RegisterApplicationRevisionRequest(input *RegisterApplicationRevisionInput) (req *request.Request, output *RegisterApplicationRevisionOutput) { + op := &request.Operation{ + Name: opRegisterApplicationRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterApplicationRevisionInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterApplicationRevisionOutput{} + req.Data = output + return +} + +// Registers with AWS CodeDeploy a revision for the specified application. +func (c *CodeDeploy) RegisterApplicationRevision(input *RegisterApplicationRevisionInput) (*RegisterApplicationRevisionOutput, error) { + req, out := c.RegisterApplicationRevisionRequest(input) + err := req.Send() + return out, err +} + +const opRegisterOnPremisesInstance = "RegisterOnPremisesInstance" + +// RegisterOnPremisesInstanceRequest generates a request for the RegisterOnPremisesInstance operation. +func (c *CodeDeploy) RegisterOnPremisesInstanceRequest(input *RegisterOnPremisesInstanceInput) (req *request.Request, output *RegisterOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opRegisterOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Registers an on-premises instance. +func (c *CodeDeploy) RegisterOnPremisesInstance(input *RegisterOnPremisesInstanceInput) (*RegisterOnPremisesInstanceOutput, error) { + req, out := c.RegisterOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromOnPremisesInstances = "RemoveTagsFromOnPremisesInstances" + +// RemoveTagsFromOnPremisesInstancesRequest generates a request for the RemoveTagsFromOnPremisesInstances operation. +func (c *CodeDeploy) RemoveTagsFromOnPremisesInstancesRequest(input *RemoveTagsFromOnPremisesInstancesInput) (req *request.Request, output *RemoveTagsFromOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Removes one or more tags from one or more on-premises instances. +func (c *CodeDeploy) RemoveTagsFromOnPremisesInstances(input *RemoveTagsFromOnPremisesInstancesInput) (*RemoveTagsFromOnPremisesInstancesOutput, error) { + req, out := c.RemoveTagsFromOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStopDeployment = "StopDeployment" + +// StopDeploymentRequest generates a request for the StopDeployment operation. +func (c *CodeDeploy) StopDeploymentRequest(input *StopDeploymentInput) (req *request.Request, output *StopDeploymentOutput) { + op := &request.Operation{ + Name: opStopDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &StopDeploymentOutput{} + req.Data = output + return +} + +// Attempts to stop an ongoing deployment. +func (c *CodeDeploy) StopDeployment(input *StopDeploymentInput) (*StopDeploymentOutput, error) { + req, out := c.StopDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplication = "UpdateApplication" + +// UpdateApplicationRequest generates a request for the UpdateApplication operation. +func (c *CodeDeploy) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *UpdateApplicationOutput) { + op := &request.Operation{ + Name: opUpdateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateApplicationOutput{} + req.Data = output + return +} + +// Changes an existing application's name. +func (c *CodeDeploy) UpdateApplication(input *UpdateApplicationInput) (*UpdateApplicationOutput, error) { + req, out := c.UpdateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDeploymentGroup = "UpdateDeploymentGroup" + +// UpdateDeploymentGroupRequest generates a request for the UpdateDeploymentGroup operation. +func (c *CodeDeploy) UpdateDeploymentGroupRequest(input *UpdateDeploymentGroupInput) (req *request.Request, output *UpdateDeploymentGroupOutput) { + op := &request.Operation{ + Name: opUpdateDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDeploymentGroupOutput{} + req.Data = output + return +} + +// Changes information about an existing deployment group. +func (c *CodeDeploy) UpdateDeploymentGroup(input *UpdateDeploymentGroupInput) (*UpdateDeploymentGroupOutput, error) { + req, out := c.UpdateDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of an adds tags to on-premises instance operation. +type AddTagsToOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances to add tags to. + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The tag key-value pairs to add to the on-premises instances. + // + // Keys and values are both required. Keys cannot be nulls or empty strings. + // Value-only tags are not allowed. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToOnPremisesInstancesInput) GoString() string { + return s.String() +} + +type AddTagsToOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about an application. +type ApplicationInfo struct { + _ struct{} `type:"structure"` + + // The application ID. + ApplicationId *string `locationName:"applicationId" type:"string"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // The time that the application was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // True if the user has authenticated with GitHub for the specified application; + // otherwise, false. + LinkedToGitHub *bool `locationName:"linkedToGitHub" type:"boolean"` +} + +// String returns the string representation +func (s ApplicationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationInfo) GoString() string { + return s.String() +} + +// Information about an Auto Scaling group. +type AutoScalingGroup struct { + _ struct{} `type:"structure"` + + // An Auto Scaling lifecycle event hook name. + Hook *string `locationName:"hook" type:"string"` + + // The Auto Scaling group name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s AutoScalingGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingGroup) GoString() string { + return s.String() +} + +// Represents the input of a batch get applications operation. +type BatchGetApplicationsInput struct { + _ struct{} `type:"structure"` + + // A list of application names, with multiple application names separated by + // spaces. + ApplicationNames []*string `locationName:"applicationNames" type:"list"` +} + +// String returns the string representation +func (s BatchGetApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationsInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get applications operation. +type BatchGetApplicationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the applications. + ApplicationsInfo []*ApplicationInfo `locationName:"applicationsInfo" type:"list"` +} + +// String returns the string representation +func (s BatchGetApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get deployments operation. +type BatchGetDeploymentsInput struct { + _ struct{} `type:"structure"` + + // A list of deployment IDs, with multiple deployment IDs separated by spaces. + DeploymentIds []*string `locationName:"deploymentIds" type:"list"` +} + +// String returns the string representation +func (s BatchGetDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentsInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get deployments operation. +type BatchGetDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployments. + DeploymentsInfo []*DeploymentInfo `locationName:"deploymentsInfo" type:"list"` +} + +// String returns the string representation +func (s BatchGetDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get on-premises instances operation. +type BatchGetOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances to get information about. + InstanceNames []*string `locationName:"instanceNames" type:"list"` +} + +// String returns the string representation +func (s BatchGetOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get on-premises instances operation. +type BatchGetOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the on-premises instances. + InstanceInfos []*InstanceInfo `locationName:"instanceInfos" type:"list"` +} + +// String returns the string representation +func (s BatchGetOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Represents the input of a create application operation. +type CreateApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application. This name must be unique with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationInput) GoString() string { + return s.String() +} + +// Represents the output of a create application operation. +type CreateApplicationOutput struct { + _ struct{} `type:"structure"` + + // A unique application ID. + ApplicationId *string `locationName:"applicationId" type:"string"` +} + +// String returns the string representation +func (s CreateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment configuration operation. +type CreateDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the deployment configuration to create. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` + + // The minimum number of healthy instances that should be available at any time + // during the deployment. There are two parameters expected in the input: type + // and value. + // + // The type parameter takes either of the following values: + // + // HOST_COUNT: The value parameter represents the minimum number of healthy + // instances, as an absolute value. FLEET_PERCENT: The value parameter represents + // the minimum number of healthy instances, as a percentage of the total number + // of instances in the deployment. If you specify FLEET_PERCENT, then at the + // start of the deployment AWS CodeDeploy converts the percentage to the equivalent + // number of instances and rounds fractional instances up. The value parameter + // takes an integer. + // + // For example, to set a minimum of 95% healthy instances, specify a type of + // FLEET_PERCENT and a value of 95. + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` +} + +// String returns the string representation +func (s CreateDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentConfigInput) GoString() string { + return s.String() +} + +// Represents the output of a create deployment configuration operation. +type CreateDeploymentConfigOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment configuration ID. + DeploymentConfigId *string `locationName:"deploymentConfigId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment group operation. +type CreateDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // A list of associated Auto Scaling groups. + AutoScalingGroups []*string `locationName:"autoScalingGroups" type:"list"` + + // If specified, the deployment configuration name must be one of the predefined + // values, or it can be a custom deployment configuration: + // + // CodeDeployDefault.AllAtOnce deploys an application revision to up to all + // of the instances at once. The overall deployment succeeds if the application + // revision deploys to at least one of the instances. The overall deployment + // fails after the application revision fails to deploy to all of the instances. + // For example, for 9 instances, deploy to up to all 9 instances at once. The + // overall deployment succeeds if any of the 9 instances is successfully deployed + // to, and it fails if all 9 instances fail to be deployed to. CodeDeployDefault.HalfAtATime + // deploys to up to half of the instances at a time (with fractions rounded + // down). The overall deployment succeeds if the application revision deploys + // to at least half of the instances (with fractions rounded up); otherwise, + // the deployment fails. For example, for 9 instances, deploy to up to 4 instances + // at a time. The overall deployment succeeds if 5 or more instances are successfully + // deployed to; otherwise, the deployment fails. Note that the deployment may + // successfully deploy to some instances, even if the overall deployment fails. + // CodeDeployDefault.OneAtATime deploys the application revision to only one + // of the instances at a time. The overall deployment succeeds if the application + // revision deploys to all of the instances. The overall deployment fails after + // the application revision first fails to deploy to any one instances. For + // example, for 9 instances, deploy to one instance at a time. The overall deployment + // succeeds if all 9 instances are successfully deployed to, and it fails if + // any of one of the 9 instances fail to be deployed to. Note that the deployment + // may successfully deploy to some instances, even if the overall deployment + // fails. This is the default deployment configuration if a configuration isn't + // specified for either the deployment or the deployment group. To create a + // custom deployment configuration, call the create deployment configuration + // operation. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` + + // The Amazon EC2 tags to filter on. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The on-premises instance tags to filter on. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A service role ARN that allows AWS CodeDeploy to act on the user's behalf + // when interacting with AWS services. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of a create deployment group operation. +type CreateDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment group ID. + DeploymentGroupId *string `locationName:"deploymentGroupId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment operation. +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment configuration associated with the applicable + // IAM user or AWS account. + // + // If not specified, the value configured in the deployment group will be used + // as the default. If the deployment group does not have a deployment configuration + // associated with it, then CodeDeployDefault.OneAtATime will be used by default. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group's name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // A comment about the deployment. + Description *string `locationName:"description" type:"string"` + + // If set to true, then if the deployment causes the ApplicationStop deployment + // lifecycle event to fail to a specific instance, the deployment will not be + // considered to have failed to that instance at that point and will continue + // on to the BeforeInstall deployment lifecycle event. + // + // If set to false or not specified, then if the deployment causes the ApplicationStop + // deployment lifecycle event to fail to a specific instance, the deployment + // will stop to that instance, and the deployment to that instance will be considered + // to have failed. + IgnoreApplicationStopFailures *bool `locationName:"ignoreApplicationStopFailures" type:"boolean"` + + // The type of revision to deploy, along with information about the revision's + // location. + Revision *RevisionLocation `locationName:"revision" type:"structure"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Represents the output of a create deployment operation. +type CreateDeploymentOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete application operation. +type DeleteApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationInput) GoString() string { + return s.String() +} + +type DeleteApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete deployment configuration operation. +type DeleteDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of an existing deployment configuration associated with the applicable + // IAM user or AWS account. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentConfigInput) GoString() string { + return s.String() +} + +type DeleteDeploymentConfigOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete deployment group operation. +type DeleteDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of a delete deployment group operation. +type DeleteDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // If the output contains no data, and the corresponding deployment group contained + // at least one Auto Scaling group, AWS CodeDeploy successfully removed all + // corresponding Auto Scaling lifecycle event hooks from the Amazon EC2 instances + // in the Auto Scaling. If the output does contain data, AWS CodeDeploy could + // not remove some Auto Scaling lifecycle event hooks from the Amazon EC2 instances + // in the Auto Scaling group. + HooksNotCleanedUp []*AutoScalingGroup `locationName:"hooksNotCleanedUp" type:"list"` +} + +// String returns the string representation +func (s DeleteDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Information about a deployment configuration. +type DeploymentConfigInfo struct { + _ struct{} `type:"structure"` + + // The time that the deployment configuration was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment configuration ID. + DeploymentConfigId *string `locationName:"deploymentConfigId" type:"string"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // Information about the number or percentage of minimum healthy instances. + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` +} + +// String returns the string representation +func (s DeploymentConfigInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentConfigInfo) GoString() string { + return s.String() +} + +// Information about a deployment group. +type DeploymentGroupInfo struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A list of associated Auto Scaling groups. + AutoScalingGroups []*AutoScalingGroup `locationName:"autoScalingGroups" type:"list"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group ID. + DeploymentGroupId *string `locationName:"deploymentGroupId" type:"string"` + + // The deployment group name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // The Amazon EC2 tags to filter on. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The on-premises instance tags to filter on. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A service role ARN. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` + + // Information about the deployment group's target revision, including the revision's + // type and its location. + TargetRevision *RevisionLocation `locationName:"targetRevision" type:"structure"` +} + +// String returns the string representation +func (s DeploymentGroupInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentGroupInfo) GoString() string { + return s.String() +} + +// Information about a deployment. +type DeploymentInfo struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A timestamp indicating when the deployment was completed. + CompleteTime *time.Time `locationName:"completeTime" type:"timestamp" timestampFormat:"unix"` + + // A timestamp indicating when the deployment was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // How the deployment was created: + // + // user: A user created the deployment. autoscaling: Auto Scaling created + // the deployment. + Creator *string `locationName:"creator" type:"string" enum:"DeploymentCreator"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // The deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // A summary of the deployment status of the instances in the deployment. + DeploymentOverview *DeploymentOverview `locationName:"deploymentOverview" type:"structure"` + + // A comment about the deployment. + Description *string `locationName:"description" type:"string"` + + // Information about any error associated with this deployment. + ErrorInformation *ErrorInformation `locationName:"errorInformation" type:"structure"` + + // If true, then if the deployment causes the ApplicationStop deployment lifecycle + // event to fail to a specific instance, the deployment will not be considered + // to have failed to that instance at that point and will continue on to the + // BeforeInstall deployment lifecycle event. + // + // If false or not specified, then if the deployment causes the ApplicationStop + // deployment lifecycle event to fail to a specific instance, the deployment + // will stop to that instance, and the deployment to that instance will be considered + // to have failed. + IgnoreApplicationStopFailures *bool `locationName:"ignoreApplicationStopFailures" type:"boolean"` + + // Information about the location of application artifacts that are stored and + // the service to retrieve them from. + Revision *RevisionLocation `locationName:"revision" type:"structure"` + + // A timestamp indicating when the deployment began deploying to the deployment + // group. + // + // Note that in some cases, the reported value of the start time may be later + // than the complete time. This is due to differences in the clock settings + // of various back-end servers that participate in the overall deployment process. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` + + // The current state of the deployment as a whole. + Status *string `locationName:"status" type:"string" enum:"DeploymentStatus"` +} + +// String returns the string representation +func (s DeploymentInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentInfo) GoString() string { + return s.String() +} + +// Information about the deployment status of the instances in the deployment. +type DeploymentOverview struct { + _ struct{} `type:"structure"` + + // The number of instances that have failed in the deployment. + Failed *int64 `type:"long"` + + // The number of instances that are in progress in the deployment. + InProgress *int64 `type:"long"` + + // The number of instances that are pending in the deployment. + Pending *int64 `type:"long"` + + // The number of instances that have been skipped in the deployment. + Skipped *int64 `type:"long"` + + // The number of instances that have succeeded in the deployment. + Succeeded *int64 `type:"long"` +} + +// String returns the string representation +func (s DeploymentOverview) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentOverview) GoString() string { + return s.String() +} + +// Represents the input of a deregister on-premises instance operation. +type DeregisterOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the on-premises instance to deregister. + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterOnPremisesInstanceInput) GoString() string { + return s.String() +} + +type DeregisterOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Diagnostic information about executable scripts that are part of a deployment. +type Diagnostics struct { + _ struct{} `type:"structure"` + + // The associated error code: + // + // Success: The specified script ran. ScriptMissing: The specified script + // was not found in the specified location. ScriptNotExecutable: The specified + // script is not a recognized executable file type. ScriptTimedOut: The specified + // script did not finish running in the specified time period. ScriptFailed: + // The specified script failed to run as expected. UnknownError: The specified + // script did not run for an unknown reason. + ErrorCode *string `locationName:"errorCode" type:"string" enum:"LifecycleErrorCode"` + + // The last portion of the associated diagnostic log. + LogTail *string `locationName:"logTail" type:"string"` + + // The message associated with the error. + Message *string `locationName:"message" type:"string"` + + // The name of the script. + ScriptName *string `locationName:"scriptName" type:"string"` +} + +// String returns the string representation +func (s Diagnostics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Diagnostics) GoString() string { + return s.String() +} + +// Information about a tag filter. +type EC2TagFilter struct { + _ struct{} `type:"structure"` + + // The tag filter key. + Key *string `type:"string"` + + // The tag filter type: + // + // KEY_ONLY: Key only. VALUE_ONLY: Value only. KEY_AND_VALUE: Key and value. + Type *string `type:"string" enum:"EC2TagFilterType"` + + // The tag filter value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s EC2TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2TagFilter) GoString() string { + return s.String() +} + +// Information about a deployment error. +type ErrorInformation struct { + _ struct{} `type:"structure"` + + // The error code: + // + // APPLICATION_MISSING: The application was missing. Note that this error + // code will most likely be raised if the application is deleted after the deployment + // is created but before it starts. DEPLOYMENT_GROUP_MISSING: The deployment + // group was missing. Note that this error code will most likely be raised if + // the deployment group is deleted after the deployment is created but before + // it starts. HEALTH_CONSTRAINTS: The deployment failed on too many instances + // to be able to successfully deploy within the specified instance health constraints. + // HEALTH_CONSTRAINTS_INVALID: The revision can never successfully deploy within + // the instance health constraints as specified. IAM_ROLE_MISSING: The service + // role cannot be accessed. IAM_ROLE_PERMISSIONS: The service role does not + // have the correct permissions. INTERNAL_ERROR: There was an internal error. + // NO_EC2_SUBSCRIPTION: The calling account is not subscribed to the Amazon + // EC2 service. NO_INSTANCES: No instances were specified, or no instances can + // be found. OVER_MAX_INSTANCES: The maximum number of instances was exceeded. + // THROTTLED: The operation was throttled because the calling account exceeded + // the throttling limits of one or more AWS services. TIMEOUT: The deployment + // has timed out. REVISION_MISSING: The revision ID was missing. Note that this + // error code will most likely be raised if the revision is deleted after the + // deployment is created but before it starts. + Code *string `locationName:"code" type:"string" enum:"ErrorCode"` + + // An accompanying error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorInformation) GoString() string { + return s.String() +} + +// Information about an application revision. +type GenericRevisionInfo struct { + _ struct{} `type:"structure"` + + // A list of deployment groups that use this revision. + DeploymentGroups []*string `locationName:"deploymentGroups" type:"list"` + + // A comment about the revision. + Description *string `locationName:"description" type:"string"` + + // When the revision was first used by AWS CodeDeploy. + FirstUsedTime *time.Time `locationName:"firstUsedTime" type:"timestamp" timestampFormat:"unix"` + + // When the revision was last used by AWS CodeDeploy. + LastUsedTime *time.Time `locationName:"lastUsedTime" type:"timestamp" timestampFormat:"unix"` + + // When the revision was registered with AWS CodeDeploy. + RegisterTime *time.Time `locationName:"registerTime" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GenericRevisionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenericRevisionInfo) GoString() string { + return s.String() +} + +// Represents the input of a get application operation. +type GetApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationInput) GoString() string { + return s.String() +} + +// Represents the output of a get application operation. +type GetApplicationOutput struct { + _ struct{} `type:"structure"` + + // Information about the application. + Application *ApplicationInfo `locationName:"application" type:"structure"` +} + +// String returns the string representation +func (s GetApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a get application revision operation. +type GetApplicationRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of the application that corresponds to the revision. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // Information about the application revision to get, including the revision's + // type and its location. + Revision *RevisionLocation `locationName:"revision" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetApplicationRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationRevisionInput) GoString() string { + return s.String() +} + +// Represents the output of a get application revision operation. +type GetApplicationRevisionOutput struct { + _ struct{} `type:"structure"` + + // The name of the application that corresponds to the revision. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // Additional information about the revision, including the revision's type + // and its location. + Revision *RevisionLocation `locationName:"revision" type:"structure"` + + // General information about the revision. + RevisionInfo *GenericRevisionInfo `locationName:"revisionInfo" type:"structure"` +} + +// String returns the string representation +func (s GetApplicationRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment configuration operation. +type GetDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of an existing deployment configuration associated with the applicable + // IAM user or AWS account. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentConfigInput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment configuration operation. +type GetDeploymentConfigOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment configuration. + DeploymentConfigInfo *DeploymentConfigInfo `locationName:"deploymentConfigInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment group operation. +type GetDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment group operation. +type GetDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment group. + DeploymentGroupInfo *DeploymentGroupInfo `locationName:"deploymentGroupInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment operation. +type GetDeploymentInput struct { + _ struct{} `type:"structure"` + + // An existing deployment ID associated with the applicable IAM user or AWS + // account. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment instance operation. +type GetDeploymentInstanceInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // The unique ID of an instance in the deployment's deployment group. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInstanceInput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment instance operation. +type GetDeploymentInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance. + InstanceSummary *InstanceSummary `locationName:"instanceSummary" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInstanceOutput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment operation. +type GetDeploymentOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment. + DeploymentInfo *DeploymentInfo `locationName:"deploymentInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentOutput) GoString() string { + return s.String() +} + +// Represents the input of a get on-premises instance operation. +type GetOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the on-premises instance to get information about + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOnPremisesInstanceInput) GoString() string { + return s.String() +} + +// Represents the output of a get on-premises instance operation. +type GetOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the on-premises instance. + InstanceInfo *InstanceInfo `locationName:"instanceInfo" type:"structure"` +} + +// String returns the string representation +func (s GetOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Information about the location of application artifacts that are stored in +// GitHub. +type GitHubLocation struct { + _ struct{} `type:"structure"` + + // The SHA1 commit ID of the GitHub commit that references the that represents + // the bundled artifacts for the application revision. + CommitId *string `locationName:"commitId" type:"string"` + + // The GitHub account and repository pair that stores a reference to the commit + // that represents the bundled artifacts for the application revision. + // + // Specified as account/repository. + Repository *string `locationName:"repository" type:"string"` +} + +// String returns the string representation +func (s GitHubLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GitHubLocation) GoString() string { + return s.String() +} + +// Information about an on-premises instance. +type InstanceInfo struct { + _ struct{} `type:"structure"` + + // If the on-premises instance was deregistered, the time that the on-premises + // instance was deregistered. + DeregisterTime *time.Time `locationName:"deregisterTime" type:"timestamp" timestampFormat:"unix"` + + // The IAM user ARN associated with the on-premises instance. + IamUserArn *string `locationName:"iamUserArn" type:"string"` + + // The ARN of the on-premises instance. + InstanceArn *string `locationName:"instanceArn" type:"string"` + + // The name of the on-premises instance. + InstanceName *string `locationName:"instanceName" type:"string"` + + // The time that the on-premises instance was registered. + RegisterTime *time.Time `locationName:"registerTime" type:"timestamp" timestampFormat:"unix"` + + // The tags that are currently associated with the on-premises instance. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s InstanceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInfo) GoString() string { + return s.String() +} + +// Information about an instance in a deployment. +type InstanceSummary struct { + _ struct{} `type:"structure"` + + // The deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The instance ID. + InstanceId *string `locationName:"instanceId" type:"string"` + + // A timestamp indicating when the instance information was last updated. + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" timestampFormat:"unix"` + + // A list of lifecycle events for this instance. + LifecycleEvents []*LifecycleEvent `locationName:"lifecycleEvents" type:"list"` + + // The deployment status for this instance: + // + // Pending: The deployment is pending for this instance. In Progress: The + // deployment is in progress for this instance. Succeeded: The deployment has + // succeeded for this instance. Failed: The deployment has failed for this instance. + // Skipped: The deployment has been skipped for this instance. Unknown: The + // deployment status is unknown for this instance. + Status *string `locationName:"status" type:"string" enum:"InstanceStatus"` +} + +// String returns the string representation +func (s InstanceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceSummary) GoString() string { + return s.String() +} + +// Information about a deployment lifecycle event. +type LifecycleEvent struct { + _ struct{} `type:"structure"` + + // Diagnostic information about the deployment lifecycle event. + Diagnostics *Diagnostics `locationName:"diagnostics" type:"structure"` + + // A timestamp indicating when the deployment lifecycle event ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment lifecycle event name, such as ApplicationStop, BeforeInstall, + // AfterInstall, ApplicationStart, or ValidateService. + LifecycleEventName *string `locationName:"lifecycleEventName" type:"string"` + + // A timestamp indicating when the deployment lifecycle event started. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment lifecycle event status: + // + // Pending: The deployment lifecycle event is pending. InProgress: The deployment + // lifecycle event is in progress. Succeeded: The deployment lifecycle event + // has succeeded. Failed: The deployment lifecycle event has failed. Skipped: + // The deployment lifecycle event has been skipped. Unknown: The deployment + // lifecycle event is unknown. + Status *string `locationName:"status" type:"string" enum:"LifecycleEventStatus"` +} + +// String returns the string representation +func (s LifecycleEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleEvent) GoString() string { + return s.String() +} + +// Represents the input of a list application revisions operation. +type ListApplicationRevisionsInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // Whether to list revisions based on whether the revision is the target revision + // of an deployment group: + // + // include: List revisions that are target revisions of a deployment group. + // exclude: Do not list revisions that are target revisions of a deployment + // group. ignore: List all revisions, regardless of whether they are target + // revisions of a deployment group. + Deployed *string `locationName:"deployed" type:"string" enum:"ListStateFilterAction"` + + // An identifier that was returned from the previous list application revisions + // call, which can be used to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // A specific Amazon S3 bucket name to limit the search for revisions. + // + // If set to null, then all of the user's buckets will be searched. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // A specific key prefix for the set of Amazon S3 objects to limit the search + // for revisions. + S3KeyPrefix *string `locationName:"s3KeyPrefix" type:"string"` + + // The column name to sort the list results by: + // + // registerTime: Sort the list results by when the revisions were registered + // with AWS CodeDeploy. firstUsedTime: Sort the list results by when the revisions + // were first used by in a deployment. lastUsedTime: Sort the list results by + // when the revisions were last used in a deployment. If not specified or set + // to null, the results will be returned in an arbitrary order. + SortBy *string `locationName:"sortBy" type:"string" enum:"ApplicationRevisionSortBy"` + + // The order to sort the list results by: + // + // ascending: Sort the list of results in ascending order. descending: Sort + // the list of results in descending order. If not specified, the results will + // be sorted in ascending order. + // + // If set to null, the results will be sorted in an arbitrary order. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s ListApplicationRevisionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationRevisionsInput) GoString() string { + return s.String() +} + +// Represents the output of a list application revisions operation. +type ListApplicationRevisionsOutput struct { + _ struct{} `type:"structure"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // application revisions call to return the next set of application revisions + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of revision locations that contain the matching revisions. + Revisions []*RevisionLocation `locationName:"revisions" type:"list"` +} + +// String returns the string representation +func (s ListApplicationRevisionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationRevisionsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list applications operation. +type ListApplicationsInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list applications call, + // which can be used to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsInput) GoString() string { + return s.String() +} + +// Represents the output of a list applications operation. +type ListApplicationsOutput struct { + _ struct{} `type:"structure"` + + // A list of application names. + Applications []*string `locationName:"applications" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // applications call to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment configurations operation. +type ListDeploymentConfigsInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list deployment configurations + // call, which can be used to return the next set of deployment configurations + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentConfigsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentConfigsInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployment configurations operation. +type ListDeploymentConfigsOutput struct { + _ struct{} `type:"structure"` + + // A list of deployment configurations, including the built-in configurations + // such as CodeDeployDefault.OneAtATime. + DeploymentConfigsList []*string `locationName:"deploymentConfigsList" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployment configurations call to return the next set of deployment configurations + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentConfigsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentConfigsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment groups operation. +type ListDeploymentGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // An identifier that was returned from the previous list deployment groups + // call, which can be used to return the next set of deployment groups in the + // list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployment groups operation. +type ListDeploymentGroupsOutput struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A list of corresponding deployment group names. + DeploymentGroups []*string `locationName:"deploymentGroups" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployment groups call to return the next set of deployment groups in the + // list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment instances operation. +type ListDeploymentInstancesInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // A subset of instances to list, by status: + // + // Pending: Include in the resulting list those instances with pending deployments. + // InProgress: Include in the resulting list those instances with in-progress + // deployments. Succeeded: Include in the resulting list those instances with + // succeeded deployments. Failed: Include in the resulting list those instances + // with failed deployments. Skipped: Include in the resulting list those instances + // with skipped deployments. Unknown: Include in the resulting list those instances + // with deployments in an unknown state. + InstanceStatusFilter []*string `locationName:"instanceStatusFilter" type:"list"` + + // An identifier that was returned from the previous list deployment instances + // call, which can be used to return the next set of deployment instances in + // the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployment instances operation. +type ListDeploymentInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of instances IDs. + InstancesList []*string `locationName:"instancesList" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployment instances call to return the next set of deployment instances + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentInstancesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployments operation. +type ListDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A deployment creation start- and end-time range for returning a subset of + // the list of deployments. + CreateTimeRange *TimeRange `locationName:"createTimeRange" type:"structure"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // A subset of deployments to list, by status: + // + // Created: Include in the resulting list created deployments. Queued: Include + // in the resulting list queued deployments. In Progress: Include in the resulting + // list in-progress deployments. Succeeded: Include in the resulting list succeeded + // deployments. Failed: Include in the resulting list failed deployments. Aborted: + // Include in the resulting list aborted deployments. + IncludeOnlyStatuses []*string `locationName:"includeOnlyStatuses" type:"list"` + + // An identifier that was returned from the previous list deployments call, + // which can be used to return the next set of deployments in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentsInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployments operation. +type ListDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // A list of deployment IDs. + Deployments []*string `locationName:"deployments" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployments call to return the next set of deployments in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list on-premises instances operation. +// +// . +type ListOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list on-premises instances + // call, which can be used to return the next set of on-premises instances in + // the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // The on-premises instances registration status: + // + // Deregistered: Include in the resulting list deregistered on-premises instances. + // Registered: Include in the resulting list registered on-premises instances. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" enum:"RegistrationStatus"` + + // The on-premises instance tags that will be used to restrict the corresponding + // on-premises instance names that are returned. + TagFilters []*TagFilter `locationName:"tagFilters" type:"list"` +} + +// String returns the string representation +func (s ListOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of list on-premises instances operation. +type ListOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of matching on-premises instance names. + InstanceNames []*string `locationName:"instanceNames" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // on-premises instances call to return the next set of on-premises instances + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about minimum healthy instances. +type MinimumHealthyHosts struct { + _ struct{} `type:"structure"` + + // The minimum healthy instances type: + // + // HOST_COUNT: The minimum number of healthy instances, as an absolute value. + // FLEET_PERCENT: The minimum number of healthy instances, as a percentage of + // the total number of instances in the deployment. For example, for 9 instances, + // if a HOST_COUNT of 6 is specified, deploy to up to 3 instances at a time. + // The deployment succeeds if 6 or more instances are successfully deployed + // to; otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, + // deploy to up to 5 instances at a time. The deployment succeeds if 4 or more + // instances are successfully deployed to; otherwise, the deployment fails. + // + // In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime + // will return a minimum healthy instances type of MOST_CONCURRENCY and a value + // of 1. This means a deployment to only one instances at a time. (You cannot + // set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) + Type *string `locationName:"type" type:"string" enum:"MinimumHealthyHostsType"` + + // The minimum healthy instances value. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s MinimumHealthyHosts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MinimumHealthyHosts) GoString() string { + return s.String() +} + +// Represents the input of a register application revision operation. +type RegisterApplicationRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // A comment about the revision. + Description *string `locationName:"description" type:"string"` + + // Information about the application revision to register, including the revision's + // type and its location. + Revision *RevisionLocation `locationName:"revision" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RegisterApplicationRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterApplicationRevisionInput) GoString() string { + return s.String() +} + +type RegisterApplicationRevisionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterApplicationRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterApplicationRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of register on-premises instance operation. +type RegisterOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM user to associate with the on-premises instance. + IamUserArn *string `locationName:"iamUserArn" type:"string" required:"true"` + + // The name of the on-premises instance to register. + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterOnPremisesInstanceInput) GoString() string { + return s.String() +} + +type RegisterOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Represents the input of a remove tags from on-premises instances operation. +type RemoveTagsFromOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances to remove tags from. + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The tag key-value pairs to remove from the on-premises instances. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromOnPremisesInstancesInput) GoString() string { + return s.String() +} + +type RemoveTagsFromOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about an application revision's location. +type RevisionLocation struct { + _ struct{} `type:"structure"` + + // Information about the location of application artifacts that are stored in + // GitHub. + GitHubLocation *GitHubLocation `locationName:"gitHubLocation" type:"structure"` + + // The application revision's type: + // + // S3: An application revision stored in Amazon S3. GitHub: An application + // revision stored in GitHub. + RevisionType *string `locationName:"revisionType" type:"string" enum:"RevisionLocationType"` + + // Information about the location of application artifacts that are stored in + // Amazon S3. + S3Location *S3Location `locationName:"s3Location" type:"structure"` +} + +// String returns the string representation +func (s RevisionLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevisionLocation) GoString() string { + return s.String() +} + +// Information about the location of application artifacts that are stored in +// Amazon S3. +type S3Location struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket where the application revision is stored. + Bucket *string `locationName:"bucket" type:"string"` + + // The file type of the application revision. Must be one of the following: + // + // tar: A tar archive file. tgz: A compressed tar archive file. zip: A zip + // archive file. + BundleType *string `locationName:"bundleType" type:"string" enum:"BundleType"` + + // The ETag of the Amazon S3 object that represents the bundled artifacts for + // the application revision. + // + // If the ETag is not specified as an input parameter, ETag validation of the + // object will be skipped. + ETag *string `locationName:"eTag" type:"string"` + + // The name of the Amazon S3 object that represents the bundled artifacts for + // the application revision. + Key *string `locationName:"key" type:"string"` + + // A specific version of the Amazon S3 object that represents the bundled artifacts + // for the application revision. + // + // If the version is not specified, the system will use the most recent version + // by default. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { + return s.String() +} + +// Represents the input of a stop deployment operation. +type StopDeploymentInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDeploymentInput) GoString() string { + return s.String() +} + +// Represents the output of a stop deployment operation. +type StopDeploymentOutput struct { + _ struct{} `type:"structure"` + + // The status of the stop deployment operation: + // + // Pending: The stop operation is pending. Succeeded: The stop operation succeeded. + Status *string `locationName:"status" type:"string" enum:"StopStatus"` + + // An accompanying status message. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s StopDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDeploymentOutput) GoString() string { + return s.String() +} + +// Information about a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag's key. + Key *string `type:"string"` + + // The tag's value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Information about an on-premises instance tag filter. +type TagFilter struct { + _ struct{} `type:"structure"` + + // The on-premises instance tag filter key. + Key *string `type:"string"` + + // The on-premises instance tag filter type: + // + // KEY_ONLY: Key only. VALUE_ONLY: Value only. KEY_AND_VALUE: Key and value. + Type *string `type:"string" enum:"TagFilterType"` + + // The on-premises instance tag filter value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Information about a time range. +type TimeRange struct { + _ struct{} `type:"structure"` + + // The time range's end time. + // + // Specify null to leave the time range's end time open-ended. + End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"unix"` + + // The time range's start time. + // + // Specify null to leave the time range's start time open-ended. + Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s TimeRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeRange) GoString() string { + return s.String() +} + +// Represents the input of an update application operation. +type UpdateApplicationInput struct { + _ struct{} `type:"structure"` + + // The current name of the application that you want to change. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // The new name that you want to change the application to. + NewApplicationName *string `locationName:"newApplicationName" min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationInput) GoString() string { + return s.String() +} + +type UpdateApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of an update deployment group operation. +type UpdateDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The application name corresponding to the deployment group to update. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The replacement list of Auto Scaling groups to be included in the deployment + // group, if you want to change them. + AutoScalingGroups []*string `locationName:"autoScalingGroups" type:"list"` + + // The current name of the existing deployment group. + CurrentDeploymentGroupName *string `locationName:"currentDeploymentGroupName" min:"1" type:"string" required:"true"` + + // The replacement deployment configuration name to use, if you want to change + // it. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The replacement set of Amazon EC2 tags to filter on, if you want to change + // them. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The new name of the deployment group, if you want to change it. + NewDeploymentGroupName *string `locationName:"newDeploymentGroupName" min:"1" type:"string"` + + // The replacement set of on-premises instance tags for filter on, if you want + // to change them. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A replacement service role's ARN, if you want to change it. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` +} + +// String returns the string representation +func (s UpdateDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of an update deployment group operation. +type UpdateDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // If the output contains no data, and the corresponding deployment group contained + // at least one Auto Scaling group, AWS CodeDeploy successfully removed all + // corresponding Auto Scaling lifecycle event hooks from the AWS account. If + // the output does contain data, AWS CodeDeploy could not remove some Auto Scaling + // lifecycle event hooks from the AWS account. + HooksNotCleanedUp []*AutoScalingGroup `locationName:"hooksNotCleanedUp" type:"list"` +} + +// String returns the string representation +func (s UpdateDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentGroupOutput) GoString() string { + return s.String() +} + +const ( + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByRegisterTime = "registerTime" + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByFirstUsedTime = "firstUsedTime" + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByLastUsedTime = "lastUsedTime" +) + +const ( + // @enum BundleType + BundleTypeTar = "tar" + // @enum BundleType + BundleTypeTgz = "tgz" + // @enum BundleType + BundleTypeZip = "zip" +) + +const ( + // @enum DeploymentCreator + DeploymentCreatorUser = "user" + // @enum DeploymentCreator + DeploymentCreatorAutoscaling = "autoscaling" +) + +const ( + // @enum DeploymentStatus + DeploymentStatusCreated = "Created" + // @enum DeploymentStatus + DeploymentStatusQueued = "Queued" + // @enum DeploymentStatus + DeploymentStatusInProgress = "InProgress" + // @enum DeploymentStatus + DeploymentStatusSucceeded = "Succeeded" + // @enum DeploymentStatus + DeploymentStatusFailed = "Failed" + // @enum DeploymentStatus + DeploymentStatusStopped = "Stopped" +) + +const ( + // @enum EC2TagFilterType + EC2TagFilterTypeKeyOnly = "KEY_ONLY" + // @enum EC2TagFilterType + EC2TagFilterTypeValueOnly = "VALUE_ONLY" + // @enum EC2TagFilterType + EC2TagFilterTypeKeyAndValue = "KEY_AND_VALUE" +) + +const ( + // @enum ErrorCode + ErrorCodeDeploymentGroupMissing = "DEPLOYMENT_GROUP_MISSING" + // @enum ErrorCode + ErrorCodeApplicationMissing = "APPLICATION_MISSING" + // @enum ErrorCode + ErrorCodeRevisionMissing = "REVISION_MISSING" + // @enum ErrorCode + ErrorCodeIamRoleMissing = "IAM_ROLE_MISSING" + // @enum ErrorCode + ErrorCodeIamRolePermissions = "IAM_ROLE_PERMISSIONS" + // @enum ErrorCode + ErrorCodeNoEc2Subscription = "NO_EC2_SUBSCRIPTION" + // @enum ErrorCode + ErrorCodeOverMaxInstances = "OVER_MAX_INSTANCES" + // @enum ErrorCode + ErrorCodeNoInstances = "NO_INSTANCES" + // @enum ErrorCode + ErrorCodeTimeout = "TIMEOUT" + // @enum ErrorCode + ErrorCodeHealthConstraintsInvalid = "HEALTH_CONSTRAINTS_INVALID" + // @enum ErrorCode + ErrorCodeHealthConstraints = "HEALTH_CONSTRAINTS" + // @enum ErrorCode + ErrorCodeInternalError = "INTERNAL_ERROR" + // @enum ErrorCode + ErrorCodeThrottled = "THROTTLED" +) + +const ( + // @enum InstanceStatus + InstanceStatusPending = "Pending" + // @enum InstanceStatus + InstanceStatusInProgress = "InProgress" + // @enum InstanceStatus + InstanceStatusSucceeded = "Succeeded" + // @enum InstanceStatus + InstanceStatusFailed = "Failed" + // @enum InstanceStatus + InstanceStatusSkipped = "Skipped" + // @enum InstanceStatus + InstanceStatusUnknown = "Unknown" +) + +const ( + // @enum LifecycleErrorCode + LifecycleErrorCodeSuccess = "Success" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptMissing = "ScriptMissing" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptNotExecutable = "ScriptNotExecutable" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptTimedOut = "ScriptTimedOut" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptFailed = "ScriptFailed" + // @enum LifecycleErrorCode + LifecycleErrorCodeUnknownError = "UnknownError" +) + +const ( + // @enum LifecycleEventStatus + LifecycleEventStatusPending = "Pending" + // @enum LifecycleEventStatus + LifecycleEventStatusInProgress = "InProgress" + // @enum LifecycleEventStatus + LifecycleEventStatusSucceeded = "Succeeded" + // @enum LifecycleEventStatus + LifecycleEventStatusFailed = "Failed" + // @enum LifecycleEventStatus + LifecycleEventStatusSkipped = "Skipped" + // @enum LifecycleEventStatus + LifecycleEventStatusUnknown = "Unknown" +) + +const ( + // @enum ListStateFilterAction + ListStateFilterActionInclude = "include" + // @enum ListStateFilterAction + ListStateFilterActionExclude = "exclude" + // @enum ListStateFilterAction + ListStateFilterActionIgnore = "ignore" +) + +const ( + // @enum MinimumHealthyHostsType + MinimumHealthyHostsTypeHostCount = "HOST_COUNT" + // @enum MinimumHealthyHostsType + MinimumHealthyHostsTypeFleetPercent = "FLEET_PERCENT" +) + +const ( + // @enum RegistrationStatus + RegistrationStatusRegistered = "Registered" + // @enum RegistrationStatus + RegistrationStatusDeregistered = "Deregistered" +) + +const ( + // @enum RevisionLocationType + RevisionLocationTypeS3 = "S3" + // @enum RevisionLocationType + RevisionLocationTypeGitHub = "GitHub" +) + +const ( + // @enum SortOrder + SortOrderAscending = "ascending" + // @enum SortOrder + SortOrderDescending = "descending" +) + +const ( + // @enum StopStatus + StopStatusPending = "Pending" + // @enum StopStatus + StopStatusSucceeded = "Succeeded" +) + +const ( + // @enum TagFilterType + TagFilterTypeKeyOnly = "KEY_ONLY" + // @enum TagFilterType + TagFilterTypeValueOnly = "VALUE_ONLY" + // @enum TagFilterType + TagFilterTypeKeyAndValue = "KEY_AND_VALUE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go new file mode 100644 index 000000000..83066e488 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go @@ -0,0 +1,154 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codedeployiface provides an interface for the AWS CodeDeploy. +package codedeployiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +// CodeDeployAPI is the interface type for codedeploy.CodeDeploy. +type CodeDeployAPI interface { + AddTagsToOnPremisesInstancesRequest(*codedeploy.AddTagsToOnPremisesInstancesInput) (*request.Request, *codedeploy.AddTagsToOnPremisesInstancesOutput) + + AddTagsToOnPremisesInstances(*codedeploy.AddTagsToOnPremisesInstancesInput) (*codedeploy.AddTagsToOnPremisesInstancesOutput, error) + + BatchGetApplicationsRequest(*codedeploy.BatchGetApplicationsInput) (*request.Request, *codedeploy.BatchGetApplicationsOutput) + + BatchGetApplications(*codedeploy.BatchGetApplicationsInput) (*codedeploy.BatchGetApplicationsOutput, error) + + BatchGetDeploymentsRequest(*codedeploy.BatchGetDeploymentsInput) (*request.Request, *codedeploy.BatchGetDeploymentsOutput) + + BatchGetDeployments(*codedeploy.BatchGetDeploymentsInput) (*codedeploy.BatchGetDeploymentsOutput, error) + + BatchGetOnPremisesInstancesRequest(*codedeploy.BatchGetOnPremisesInstancesInput) (*request.Request, *codedeploy.BatchGetOnPremisesInstancesOutput) + + BatchGetOnPremisesInstances(*codedeploy.BatchGetOnPremisesInstancesInput) (*codedeploy.BatchGetOnPremisesInstancesOutput, error) + + CreateApplicationRequest(*codedeploy.CreateApplicationInput) (*request.Request, *codedeploy.CreateApplicationOutput) + + CreateApplication(*codedeploy.CreateApplicationInput) (*codedeploy.CreateApplicationOutput, error) + + CreateDeploymentRequest(*codedeploy.CreateDeploymentInput) (*request.Request, *codedeploy.CreateDeploymentOutput) + + CreateDeployment(*codedeploy.CreateDeploymentInput) (*codedeploy.CreateDeploymentOutput, error) + + CreateDeploymentConfigRequest(*codedeploy.CreateDeploymentConfigInput) (*request.Request, *codedeploy.CreateDeploymentConfigOutput) + + CreateDeploymentConfig(*codedeploy.CreateDeploymentConfigInput) (*codedeploy.CreateDeploymentConfigOutput, error) + + CreateDeploymentGroupRequest(*codedeploy.CreateDeploymentGroupInput) (*request.Request, *codedeploy.CreateDeploymentGroupOutput) + + CreateDeploymentGroup(*codedeploy.CreateDeploymentGroupInput) (*codedeploy.CreateDeploymentGroupOutput, error) + + DeleteApplicationRequest(*codedeploy.DeleteApplicationInput) (*request.Request, *codedeploy.DeleteApplicationOutput) + + DeleteApplication(*codedeploy.DeleteApplicationInput) (*codedeploy.DeleteApplicationOutput, error) + + DeleteDeploymentConfigRequest(*codedeploy.DeleteDeploymentConfigInput) (*request.Request, *codedeploy.DeleteDeploymentConfigOutput) + + DeleteDeploymentConfig(*codedeploy.DeleteDeploymentConfigInput) (*codedeploy.DeleteDeploymentConfigOutput, error) + + DeleteDeploymentGroupRequest(*codedeploy.DeleteDeploymentGroupInput) (*request.Request, *codedeploy.DeleteDeploymentGroupOutput) + + DeleteDeploymentGroup(*codedeploy.DeleteDeploymentGroupInput) (*codedeploy.DeleteDeploymentGroupOutput, error) + + DeregisterOnPremisesInstanceRequest(*codedeploy.DeregisterOnPremisesInstanceInput) (*request.Request, *codedeploy.DeregisterOnPremisesInstanceOutput) + + DeregisterOnPremisesInstance(*codedeploy.DeregisterOnPremisesInstanceInput) (*codedeploy.DeregisterOnPremisesInstanceOutput, error) + + GetApplicationRequest(*codedeploy.GetApplicationInput) (*request.Request, *codedeploy.GetApplicationOutput) + + GetApplication(*codedeploy.GetApplicationInput) (*codedeploy.GetApplicationOutput, error) + + GetApplicationRevisionRequest(*codedeploy.GetApplicationRevisionInput) (*request.Request, *codedeploy.GetApplicationRevisionOutput) + + GetApplicationRevision(*codedeploy.GetApplicationRevisionInput) (*codedeploy.GetApplicationRevisionOutput, error) + + GetDeploymentRequest(*codedeploy.GetDeploymentInput) (*request.Request, *codedeploy.GetDeploymentOutput) + + GetDeployment(*codedeploy.GetDeploymentInput) (*codedeploy.GetDeploymentOutput, error) + + GetDeploymentConfigRequest(*codedeploy.GetDeploymentConfigInput) (*request.Request, *codedeploy.GetDeploymentConfigOutput) + + GetDeploymentConfig(*codedeploy.GetDeploymentConfigInput) (*codedeploy.GetDeploymentConfigOutput, error) + + GetDeploymentGroupRequest(*codedeploy.GetDeploymentGroupInput) (*request.Request, *codedeploy.GetDeploymentGroupOutput) + + GetDeploymentGroup(*codedeploy.GetDeploymentGroupInput) (*codedeploy.GetDeploymentGroupOutput, error) + + GetDeploymentInstanceRequest(*codedeploy.GetDeploymentInstanceInput) (*request.Request, *codedeploy.GetDeploymentInstanceOutput) + + GetDeploymentInstance(*codedeploy.GetDeploymentInstanceInput) (*codedeploy.GetDeploymentInstanceOutput, error) + + GetOnPremisesInstanceRequest(*codedeploy.GetOnPremisesInstanceInput) (*request.Request, *codedeploy.GetOnPremisesInstanceOutput) + + GetOnPremisesInstance(*codedeploy.GetOnPremisesInstanceInput) (*codedeploy.GetOnPremisesInstanceOutput, error) + + ListApplicationRevisionsRequest(*codedeploy.ListApplicationRevisionsInput) (*request.Request, *codedeploy.ListApplicationRevisionsOutput) + + ListApplicationRevisions(*codedeploy.ListApplicationRevisionsInput) (*codedeploy.ListApplicationRevisionsOutput, error) + + ListApplicationRevisionsPages(*codedeploy.ListApplicationRevisionsInput, func(*codedeploy.ListApplicationRevisionsOutput, bool) bool) error + + ListApplicationsRequest(*codedeploy.ListApplicationsInput) (*request.Request, *codedeploy.ListApplicationsOutput) + + ListApplications(*codedeploy.ListApplicationsInput) (*codedeploy.ListApplicationsOutput, error) + + ListApplicationsPages(*codedeploy.ListApplicationsInput, func(*codedeploy.ListApplicationsOutput, bool) bool) error + + ListDeploymentConfigsRequest(*codedeploy.ListDeploymentConfigsInput) (*request.Request, *codedeploy.ListDeploymentConfigsOutput) + + ListDeploymentConfigs(*codedeploy.ListDeploymentConfigsInput) (*codedeploy.ListDeploymentConfigsOutput, error) + + ListDeploymentConfigsPages(*codedeploy.ListDeploymentConfigsInput, func(*codedeploy.ListDeploymentConfigsOutput, bool) bool) error + + ListDeploymentGroupsRequest(*codedeploy.ListDeploymentGroupsInput) (*request.Request, *codedeploy.ListDeploymentGroupsOutput) + + ListDeploymentGroups(*codedeploy.ListDeploymentGroupsInput) (*codedeploy.ListDeploymentGroupsOutput, error) + + ListDeploymentGroupsPages(*codedeploy.ListDeploymentGroupsInput, func(*codedeploy.ListDeploymentGroupsOutput, bool) bool) error + + ListDeploymentInstancesRequest(*codedeploy.ListDeploymentInstancesInput) (*request.Request, *codedeploy.ListDeploymentInstancesOutput) + + ListDeploymentInstances(*codedeploy.ListDeploymentInstancesInput) (*codedeploy.ListDeploymentInstancesOutput, error) + + ListDeploymentInstancesPages(*codedeploy.ListDeploymentInstancesInput, func(*codedeploy.ListDeploymentInstancesOutput, bool) bool) error + + ListDeploymentsRequest(*codedeploy.ListDeploymentsInput) (*request.Request, *codedeploy.ListDeploymentsOutput) + + ListDeployments(*codedeploy.ListDeploymentsInput) (*codedeploy.ListDeploymentsOutput, error) + + ListDeploymentsPages(*codedeploy.ListDeploymentsInput, func(*codedeploy.ListDeploymentsOutput, bool) bool) error + + ListOnPremisesInstancesRequest(*codedeploy.ListOnPremisesInstancesInput) (*request.Request, *codedeploy.ListOnPremisesInstancesOutput) + + ListOnPremisesInstances(*codedeploy.ListOnPremisesInstancesInput) (*codedeploy.ListOnPremisesInstancesOutput, error) + + RegisterApplicationRevisionRequest(*codedeploy.RegisterApplicationRevisionInput) (*request.Request, *codedeploy.RegisterApplicationRevisionOutput) + + RegisterApplicationRevision(*codedeploy.RegisterApplicationRevisionInput) (*codedeploy.RegisterApplicationRevisionOutput, error) + + RegisterOnPremisesInstanceRequest(*codedeploy.RegisterOnPremisesInstanceInput) (*request.Request, *codedeploy.RegisterOnPremisesInstanceOutput) + + RegisterOnPremisesInstance(*codedeploy.RegisterOnPremisesInstanceInput) (*codedeploy.RegisterOnPremisesInstanceOutput, error) + + RemoveTagsFromOnPremisesInstancesRequest(*codedeploy.RemoveTagsFromOnPremisesInstancesInput) (*request.Request, *codedeploy.RemoveTagsFromOnPremisesInstancesOutput) + + RemoveTagsFromOnPremisesInstances(*codedeploy.RemoveTagsFromOnPremisesInstancesInput) (*codedeploy.RemoveTagsFromOnPremisesInstancesOutput, error) + + StopDeploymentRequest(*codedeploy.StopDeploymentInput) (*request.Request, *codedeploy.StopDeploymentOutput) + + StopDeployment(*codedeploy.StopDeploymentInput) (*codedeploy.StopDeploymentOutput, error) + + UpdateApplicationRequest(*codedeploy.UpdateApplicationInput) (*request.Request, *codedeploy.UpdateApplicationOutput) + + UpdateApplication(*codedeploy.UpdateApplicationInput) (*codedeploy.UpdateApplicationOutput, error) + + UpdateDeploymentGroupRequest(*codedeploy.UpdateDeploymentGroupInput) (*request.Request, *codedeploy.UpdateDeploymentGroupOutput) + + UpdateDeploymentGroup(*codedeploy.UpdateDeploymentGroupInput) (*codedeploy.UpdateDeploymentGroupOutput, error) +} + +var _ CodeDeployAPI = (*codedeploy.CodeDeploy)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go new file mode 100644 index 000000000..cb1658f81 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go @@ -0,0 +1,787 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codedeploy_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodeDeploy_AddTagsToOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.AddTagsToOnPremisesInstancesInput{ + InstanceNames: []*string{ // Required + aws.String("InstanceName"), // Required + // More values... + }, + Tags: []*codedeploy.Tag{ // Required + { // Required + Key: aws.String("Key"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetApplications() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetApplicationsInput{ + ApplicationNames: []*string{ + aws.String("ApplicationName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetDeployments() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetDeploymentsInput{ + DeploymentIds: []*string{ + aws.String("DeploymentId"), // Required + // More values... + }, + } + resp, err := svc.BatchGetDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetOnPremisesInstancesInput{ + InstanceNames: []*string{ + aws.String("InstanceName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.CreateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentConfigName: aws.String("DeploymentConfigName"), + DeploymentGroupName: aws.String("DeploymentGroupName"), + Description: aws.String("Description"), + IgnoreApplicationStopFailures: aws.Bool(true), + Revision: &codedeploy.RevisionLocation{ + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + MinimumHealthyHosts: &codedeploy.MinimumHealthyHosts{ + Type: aws.String("MinimumHealthyHostsType"), + Value: aws.Int64(1), + }, + } + resp, err := svc.CreateDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + ServiceRoleArn: aws.String("Role"), // Required + AutoScalingGroups: []*string{ + aws.String("AutoScalingGroupName"), // Required + // More values... + }, + DeploymentConfigName: aws.String("DeploymentConfigName"), + Ec2TagFilters: []*codedeploy.EC2TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("EC2TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + OnPremisesInstanceTagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.CreateDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.DeleteApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + } + resp, err := svc.DeleteDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + } + resp, err := svc.DeleteDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeregisterOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeregisterOnPremisesInstanceInput{ + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.DeregisterOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.GetApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetApplicationRevision() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetApplicationRevisionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Revision: &codedeploy.RevisionLocation{ // Required + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + } + resp, err := svc.GetApplicationRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentInput{ + DeploymentId: aws.String("DeploymentId"), // Required + } + resp, err := svc.GetDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + } + resp, err := svc.GetDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + } + resp, err := svc.GetDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentInstanceInput{ + DeploymentId: aws.String("DeploymentId"), // Required + InstanceId: aws.String("InstanceId"), // Required + } + resp, err := svc.GetDeploymentInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetOnPremisesInstanceInput{ + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.GetOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListApplicationRevisions() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListApplicationRevisionsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Deployed: aws.String("ListStateFilterAction"), + NextToken: aws.String("NextToken"), + S3Bucket: aws.String("S3Bucket"), + S3KeyPrefix: aws.String("S3Key"), + SortBy: aws.String("ApplicationRevisionSortBy"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.ListApplicationRevisions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListApplications() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListApplicationsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentConfigs() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentConfigsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentConfigs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentGroups() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentGroupsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentInstancesInput{ + DeploymentId: aws.String("DeploymentId"), // Required + InstanceStatusFilter: []*string{ + aws.String("InstanceStatus"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeployments() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentsInput{ + ApplicationName: aws.String("ApplicationName"), + CreateTimeRange: &codedeploy.TimeRange{ + End: aws.Time(time.Now()), + Start: aws.Time(time.Now()), + }, + DeploymentGroupName: aws.String("DeploymentGroupName"), + IncludeOnlyStatuses: []*string{ + aws.String("DeploymentStatus"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListOnPremisesInstancesInput{ + NextToken: aws.String("NextToken"), + RegistrationStatus: aws.String("RegistrationStatus"), + TagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.ListOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RegisterApplicationRevision() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RegisterApplicationRevisionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Revision: &codedeploy.RevisionLocation{ // Required + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + Description: aws.String("Description"), + } + resp, err := svc.RegisterApplicationRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RegisterOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RegisterOnPremisesInstanceInput{ + IamUserArn: aws.String("IamUserArn"), // Required + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.RegisterOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RemoveTagsFromOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RemoveTagsFromOnPremisesInstancesInput{ + InstanceNames: []*string{ // Required + aws.String("InstanceName"), // Required + // More values... + }, + Tags: []*codedeploy.Tag{ // Required + { // Required + Key: aws.String("Key"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.RemoveTagsFromOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_StopDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.StopDeploymentInput{ + DeploymentId: aws.String("DeploymentId"), // Required + } + resp, err := svc.StopDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_UpdateApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.UpdateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), + NewApplicationName: aws.String("ApplicationName"), + } + resp, err := svc.UpdateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_UpdateDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.UpdateDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + CurrentDeploymentGroupName: aws.String("DeploymentGroupName"), // Required + AutoScalingGroups: []*string{ + aws.String("AutoScalingGroupName"), // Required + // More values... + }, + DeploymentConfigName: aws.String("DeploymentConfigName"), + Ec2TagFilters: []*codedeploy.EC2TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("EC2TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + NewDeploymentGroupName: aws.String("DeploymentGroupName"), + OnPremisesInstanceTagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + ServiceRoleArn: aws.String("Role"), + } + resp, err := svc.UpdateDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go new file mode 100644 index 000000000..a799244fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go @@ -0,0 +1,136 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codedeploy + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview This is the AWS CodeDeploy API Reference. This guide provides descriptions +// of the AWS CodeDeploy APIs. For additional information, see the AWS CodeDeploy +// User Guide (http://docs.aws.amazon.com/codedeploy/latest/userguide). +// +// Using the APIs You can use the AWS CodeDeploy APIs to work with the following +// items: +// +// Applications are unique identifiers that AWS CodeDeploy uses to ensure +// that the correct combinations of revisions, deployment configurations, and +// deployment groups are being referenced during deployments. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, list, and update +// applications. +// +// Deployment configurations are sets of deployment rules and deployment +// success and failure conditions that AWS CodeDeploy uses during deployments. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, and list deployment +// configurations. +// +// Deployment groups are groups of instances to which application revisions +// can be deployed. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, list, and update +// deployment groups. +// +// Instances represent Amazon EC2 instances to which application revisions +// are deployed. Instances are identified by their Amazon EC2 tags or Auto Scaling +// group names. Instances belong to deployment groups. +// +// You can use the AWS CodeDeploy APIs to get and list instances. +// +// Deployments represent the process of deploying revisions to instances. +// +// You can use the AWS CodeDeploy APIs to create, get, list, and stop deployments. +// +// Application revisions are archive files that are stored in Amazon S3 buckets +// or GitHub repositories. These revisions contain source content (such as source +// code, web pages, executable files, any deployment scripts, and similar) along +// with an Application Specification file (AppSpec file). (The AppSpec file +// is unique to AWS CodeDeploy; it defines a series of deployment actions that +// you want AWS CodeDeploy to execute.) An application revision is uniquely +// identified by its Amazon S3 object key and its ETag, version, or both (for +// application revisions that are stored in Amazon S3 buckets) or by its repository +// name and commit ID (for applications revisions that are stored in GitHub +// repositories). Application revisions are deployed through deployment groups. +// +// You can use the AWS CodeDeploy APIs to get, list, and register application +// revisions. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodeDeploy struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codedeploy" + +// New creates a new instance of the CodeDeploy client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodeDeploy client from just a session. +// svc := codedeploy.New(mySession) +// +// // Create a CodeDeploy client with additional configuration +// svc := codedeploy.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeDeploy { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodeDeploy { + svc := &CodeDeploy{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-10-06", + JSONVersion: "1.1", + TargetPrefix: "CodeDeploy_20141006", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodeDeploy operation and runs any +// custom request initialization. +func (c *CodeDeploy) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go new file mode 100644 index 000000000..c374080e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -0,0 +1,2179 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directoryservice provides a client for AWS Directory Service. +package directoryservice + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opConnectDirectory = "ConnectDirectory" + +// ConnectDirectoryRequest generates a request for the ConnectDirectory operation. +func (c *DirectoryService) ConnectDirectoryRequest(input *ConnectDirectoryInput) (req *request.Request, output *ConnectDirectoryOutput) { + op := &request.Operation{ + Name: opConnectDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConnectDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &ConnectDirectoryOutput{} + req.Data = output + return +} + +// Creates an AD Connector to connect to an on-premises directory. +func (c *DirectoryService) ConnectDirectory(input *ConnectDirectoryInput) (*ConnectDirectoryOutput, error) { + req, out := c.ConnectDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a request for the CreateAlias operation. +func (c *DirectoryService) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAliasOutput{} + req.Data = output + return +} + +// Creates an alias for a directory and assigns the alias to the directory. +// The alias is used to construct the access URL for the directory, such as +// http://alias.awsapps.com. +// +// After an alias has been created, it cannot be deleted or reused, so this +// operation should only be used when absolutely necessary. +func (c *DirectoryService) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateComputer = "CreateComputer" + +// CreateComputerRequest generates a request for the CreateComputer operation. +func (c *DirectoryService) CreateComputerRequest(input *CreateComputerInput) (req *request.Request, output *CreateComputerOutput) { + op := &request.Operation{ + Name: opCreateComputer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateComputerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateComputerOutput{} + req.Data = output + return +} + +// Creates a computer account in the specified directory, and joins the computer +// to the directory. +func (c *DirectoryService) CreateComputer(input *CreateComputerInput) (*CreateComputerOutput, error) { + req, out := c.CreateComputerRequest(input) + err := req.Send() + return out, err +} + +const opCreateDirectory = "CreateDirectory" + +// CreateDirectoryRequest generates a request for the CreateDirectory operation. +func (c *DirectoryService) CreateDirectoryRequest(input *CreateDirectoryInput) (req *request.Request, output *CreateDirectoryOutput) { + op := &request.Operation{ + Name: opCreateDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDirectoryOutput{} + req.Data = output + return +} + +// Creates a Simple AD directory. +func (c *DirectoryService) CreateDirectory(input *CreateDirectoryInput) (*CreateDirectoryOutput, error) { + req, out := c.CreateDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opCreateMicrosoftAD = "CreateMicrosoftAD" + +// CreateMicrosoftADRequest generates a request for the CreateMicrosoftAD operation. +func (c *DirectoryService) CreateMicrosoftADRequest(input *CreateMicrosoftADInput) (req *request.Request, output *CreateMicrosoftADOutput) { + op := &request.Operation{ + Name: opCreateMicrosoftAD, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMicrosoftADInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMicrosoftADOutput{} + req.Data = output + return +} + +// Creates a Microsoft AD in the AWS cloud. +func (c *DirectoryService) CreateMicrosoftAD(input *CreateMicrosoftADInput) (*CreateMicrosoftADOutput, error) { + req, out := c.CreateMicrosoftADRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *DirectoryService) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a Simple AD directory. +// +// You cannot take snapshots of AD Connector directories. +func (c *DirectoryService) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrust = "CreateTrust" + +// CreateTrustRequest generates a request for the CreateTrust operation. +func (c *DirectoryService) CreateTrustRequest(input *CreateTrustInput) (req *request.Request, output *CreateTrustOutput) { + op := &request.Operation{ + Name: opCreateTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrustOutput{} + req.Data = output + return +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// trust relationships. For example, you can establish a trust between your +// Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active +// Directory. This would allow you to provide users and groups access to resources +// in either domain, with a single set of credentials. +// +// This action initiates the creation of the AWS side of a trust relationship +// between a Microsoft AD in the AWS cloud and an external domain. +func (c *DirectoryService) CreateTrust(input *CreateTrustInput) (*CreateTrustOutput, error) { + req, out := c.CreateTrustRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDirectory = "DeleteDirectory" + +// DeleteDirectoryRequest generates a request for the DeleteDirectory operation. +func (c *DirectoryService) DeleteDirectoryRequest(input *DeleteDirectoryInput) (req *request.Request, output *DeleteDirectoryOutput) { + op := &request.Operation{ + Name: opDeleteDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDirectoryOutput{} + req.Data = output + return +} + +// Deletes an AWS Directory Service directory. +func (c *DirectoryService) DeleteDirectory(input *DeleteDirectoryInput) (*DeleteDirectoryOutput, error) { + req, out := c.DeleteDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +func (c *DirectoryService) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// Deletes a directory snapshot. +func (c *DirectoryService) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrust = "DeleteTrust" + +// DeleteTrustRequest generates a request for the DeleteTrust operation. +func (c *DirectoryService) DeleteTrustRequest(input *DeleteTrustInput) (req *request.Request, output *DeleteTrustOutput) { + op := &request.Operation{ + Name: opDeleteTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrustOutput{} + req.Data = output + return +} + +// Deletes an existing trust relationship between your Microsoft AD in the AWS +// cloud and an external domain. +func (c *DirectoryService) DeleteTrust(input *DeleteTrustInput) (*DeleteTrustOutput, error) { + req, out := c.DeleteTrustRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDirectories = "DescribeDirectories" + +// DescribeDirectoriesRequest generates a request for the DescribeDirectories operation. +func (c *DirectoryService) DescribeDirectoriesRequest(input *DescribeDirectoriesInput) (req *request.Request, output *DescribeDirectoriesOutput) { + op := &request.Operation{ + Name: opDescribeDirectories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDirectoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDirectoriesOutput{} + req.Data = output + return +} + +// Obtains information about the directories that belong to this account. +// +// You can retrieve information about specific directories by passing the directory +// identifiers in the DirectoryIds parameter. Otherwise, all directories that +// belong to the current account are returned. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken +// member contains a token that you pass in the next call to DescribeDirectories +// to retrieve the next set of items. +// +// You can also specify a maximum number of return results with the Limit parameter. +func (c *DirectoryService) DescribeDirectories(input *DescribeDirectoriesInput) (*DescribeDirectoriesOutput, error) { + req, out := c.DescribeDirectoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +func (c *DirectoryService) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// Obtains information about the directory snapshots that belong to this account. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the DescribeSnapshots.NextToken +// member contains a token that you pass in the next call to DescribeSnapshots +// to retrieve the next set of items. +// +// You can also specify a maximum number of return results with the Limit parameter. +func (c *DirectoryService) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrusts = "DescribeTrusts" + +// DescribeTrustsRequest generates a request for the DescribeTrusts operation. +func (c *DirectoryService) DescribeTrustsRequest(input *DescribeTrustsInput) (req *request.Request, output *DescribeTrustsOutput) { + op := &request.Operation{ + Name: opDescribeTrusts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustsOutput{} + req.Data = output + return +} + +// Obtains information about the trust relationships for this account. +// +// If no input parameters are provided, such as DirectoryId or TrustIds, this +// request describes all the trust relationships belonging to the account. +func (c *DirectoryService) DescribeTrusts(input *DescribeTrustsInput) (*DescribeTrustsOutput, error) { + req, out := c.DescribeTrustsRequest(input) + err := req.Send() + return out, err +} + +const opDisableRadius = "DisableRadius" + +// DisableRadiusRequest generates a request for the DisableRadius operation. +func (c *DirectoryService) DisableRadiusRequest(input *DisableRadiusInput) (req *request.Request, output *DisableRadiusOutput) { + op := &request.Operation{ + Name: opDisableRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableRadiusOutput{} + req.Data = output + return +} + +// Disables multi-factor authentication (MFA) with the Remote Authentication +// Dial In User Service (RADIUS) server for an AD Connector directory. +func (c *DirectoryService) DisableRadius(input *DisableRadiusInput) (*DisableRadiusOutput, error) { + req, out := c.DisableRadiusRequest(input) + err := req.Send() + return out, err +} + +const opDisableSso = "DisableSso" + +// DisableSsoRequest generates a request for the DisableSso operation. +func (c *DirectoryService) DisableSsoRequest(input *DisableSsoInput) (req *request.Request, output *DisableSsoOutput) { + op := &request.Operation{ + Name: opDisableSso, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableSsoInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableSsoOutput{} + req.Data = output + return +} + +// Disables single-sign on for a directory. +func (c *DirectoryService) DisableSso(input *DisableSsoInput) (*DisableSsoOutput, error) { + req, out := c.DisableSsoRequest(input) + err := req.Send() + return out, err +} + +const opEnableRadius = "EnableRadius" + +// EnableRadiusRequest generates a request for the EnableRadius operation. +func (c *DirectoryService) EnableRadiusRequest(input *EnableRadiusInput) (req *request.Request, output *EnableRadiusOutput) { + op := &request.Operation{ + Name: opEnableRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableRadiusOutput{} + req.Data = output + return +} + +// Enables multi-factor authentication (MFA) with the Remote Authentication +// Dial In User Service (RADIUS) server for an AD Connector directory. +func (c *DirectoryService) EnableRadius(input *EnableRadiusInput) (*EnableRadiusOutput, error) { + req, out := c.EnableRadiusRequest(input) + err := req.Send() + return out, err +} + +const opEnableSso = "EnableSso" + +// EnableSsoRequest generates a request for the EnableSso operation. +func (c *DirectoryService) EnableSsoRequest(input *EnableSsoInput) (req *request.Request, output *EnableSsoOutput) { + op := &request.Operation{ + Name: opEnableSso, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableSsoInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableSsoOutput{} + req.Data = output + return +} + +// Enables single-sign on for a directory. +func (c *DirectoryService) EnableSso(input *EnableSsoInput) (*EnableSsoOutput, error) { + req, out := c.EnableSsoRequest(input) + err := req.Send() + return out, err +} + +const opGetDirectoryLimits = "GetDirectoryLimits" + +// GetDirectoryLimitsRequest generates a request for the GetDirectoryLimits operation. +func (c *DirectoryService) GetDirectoryLimitsRequest(input *GetDirectoryLimitsInput) (req *request.Request, output *GetDirectoryLimitsOutput) { + op := &request.Operation{ + Name: opGetDirectoryLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDirectoryLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDirectoryLimitsOutput{} + req.Data = output + return +} + +// Obtains directory limit information for the current region. +func (c *DirectoryService) GetDirectoryLimits(input *GetDirectoryLimitsInput) (*GetDirectoryLimitsOutput, error) { + req, out := c.GetDirectoryLimitsRequest(input) + err := req.Send() + return out, err +} + +const opGetSnapshotLimits = "GetSnapshotLimits" + +// GetSnapshotLimitsRequest generates a request for the GetSnapshotLimits operation. +func (c *DirectoryService) GetSnapshotLimitsRequest(input *GetSnapshotLimitsInput) (req *request.Request, output *GetSnapshotLimitsOutput) { + op := &request.Operation{ + Name: opGetSnapshotLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSnapshotLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSnapshotLimitsOutput{} + req.Data = output + return +} + +// Obtains the manual snapshot limits for a directory. +func (c *DirectoryService) GetSnapshotLimits(input *GetSnapshotLimitsInput) (*GetSnapshotLimitsOutput, error) { + req, out := c.GetSnapshotLimitsRequest(input) + err := req.Send() + return out, err +} + +const opRestoreFromSnapshot = "RestoreFromSnapshot" + +// RestoreFromSnapshotRequest generates a request for the RestoreFromSnapshot operation. +func (c *DirectoryService) RestoreFromSnapshotRequest(input *RestoreFromSnapshotInput) (req *request.Request, output *RestoreFromSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreFromSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreFromSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreFromSnapshotOutput{} + req.Data = output + return +} + +// Restores a directory using an existing directory snapshot. +// +// When you restore a directory from a snapshot, any changes made to the directory +// after the snapshot date are overwritten. +// +// This action returns as soon as the restore operation is initiated. You can +// monitor the progress of the restore operation by calling the DescribeDirectories +// operation with the directory identifier. When the DirectoryDescription.Stage +// value changes to Active, the restore operation is complete. +func (c *DirectoryService) RestoreFromSnapshot(input *RestoreFromSnapshotInput) (*RestoreFromSnapshotOutput, error) { + req, out := c.RestoreFromSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRadius = "UpdateRadius" + +// UpdateRadiusRequest generates a request for the UpdateRadius operation. +func (c *DirectoryService) UpdateRadiusRequest(input *UpdateRadiusInput) (req *request.Request, output *UpdateRadiusOutput) { + op := &request.Operation{ + Name: opUpdateRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRadiusOutput{} + req.Data = output + return +} + +// Updates the Remote Authentication Dial In User Service (RADIUS) server information +// for an AD Connector directory. +func (c *DirectoryService) UpdateRadius(input *UpdateRadiusInput) (*UpdateRadiusOutput, error) { + req, out := c.UpdateRadiusRequest(input) + err := req.Send() + return out, err +} + +const opVerifyTrust = "VerifyTrust" + +// VerifyTrustRequest generates a request for the VerifyTrust operation. +func (c *DirectoryService) VerifyTrustRequest(input *VerifyTrustInput) (req *request.Request, output *VerifyTrustOutput) { + op := &request.Operation{ + Name: opVerifyTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyTrustOutput{} + req.Data = output + return +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// and verify trust relationships. +// +// This action verifies a trust relationship between your Microsoft AD in the +// AWS cloud and an external domain. +func (c *DirectoryService) VerifyTrust(input *VerifyTrustInput) (*VerifyTrustOutput, error) { + req, out := c.VerifyTrustRequest(input) + err := req.Send() + return out, err +} + +// Represents a named directory attribute. +type Attribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Name *string `min:"1" type:"string"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// Contains information about a computer account in a directory. +type Computer struct { + _ struct{} `type:"structure"` + + // An array of Attribute objects containing the LDAP attributes that belong + // to the computer account. + ComputerAttributes []*Attribute `type:"list"` + + // The identifier of the computer. + ComputerId *string `min:"1" type:"string"` + + // The computer name. + ComputerName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Computer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Computer) GoString() string { + return s.String() +} + +// Contains the inputs for the ConnectDirectory operation. +type ConnectDirectoryInput struct { + _ struct{} `type:"structure"` + + // A DirectoryConnectSettings object that contains additional information for + // the operation. + ConnectSettings *DirectoryConnectSettings `type:"structure" required:"true"` + + // A textual description for the directory. + Description *string `type:"string"` + + // The fully-qualified name of the on-premises directory, such as corp.example.com. + Name *string `type:"string" required:"true"` + + // The password for the on-premises user account. + Password *string `min:"1" type:"string" required:"true"` + + // The NetBIOS name of the on-premises directory, such as CORP. + ShortName *string `type:"string"` + + // The size of the directory. + Size *string `type:"string" required:"true" enum:"DirectorySize"` +} + +// String returns the string representation +func (s ConnectDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectDirectoryInput) GoString() string { + return s.String() +} + +// Contains the results of the ConnectDirectory operation. +type ConnectDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the new directory. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s ConnectDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectDirectoryOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateAlias operation. +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // The requested alias. + // + // The alias must be unique amongst all aliases in AWS. This operation throws + // an EntityAlreadyExistsException error if the alias already exists. + Alias *string `min:"1" type:"string" required:"true"` + + // The identifier of the directory for which to create the alias. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +// Contains the results of the CreateAlias operation. +type CreateAliasOutput struct { + _ struct{} `type:"structure"` + + // The alias for the directory. + Alias *string `min:"1" type:"string"` + + // The identifier of the directory. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateComputer operation. +type CreateComputerInput struct { + _ struct{} `type:"structure"` + + // An array of Attribute objects that contain any LDAP attributes to apply to + // the computer account. + ComputerAttributes []*Attribute `type:"list"` + + // The name of the computer account. + ComputerName *string `min:"1" type:"string" required:"true"` + + // The identifier of the directory in which to create the computer account. + DirectoryId *string `type:"string" required:"true"` + + // The fully-qualified distinguished name of the organizational unit to place + // the computer account in. + OrganizationalUnitDistinguishedName *string `min:"1" type:"string"` + + // A one-time password that is used to join the computer to the directory. You + // should generate a random, strong password to use for this parameter. + Password *string `min:"8" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateComputerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComputerInput) GoString() string { + return s.String() +} + +// Contains the results for the CreateComputer operation. +type CreateComputerOutput struct { + _ struct{} `type:"structure"` + + // A Computer object that represents the computer account. + Computer *Computer `type:"structure"` +} + +// String returns the string representation +func (s CreateComputerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComputerOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateDirectory operation. +type CreateDirectoryInput struct { + _ struct{} `type:"structure"` + + // A textual description for the directory. + Description *string `type:"string"` + + // The fully qualified name for the directory, such as corp.example.com. + Name *string `type:"string" required:"true"` + + // The password for the directory administrator. The directory creation process + // creates a directory administrator account with the username Administrator + // and this password. + Password *string `type:"string" required:"true"` + + // The short name of the directory, such as CORP. + ShortName *string `type:"string"` + + // The size of the directory. + Size *string `type:"string" required:"true" enum:"DirectorySize"` + + // A DirectoryVpcSettings object that contains additional information for the + // operation. + VpcSettings *DirectoryVpcSettings `type:"structure"` +} + +// String returns the string representation +func (s CreateDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryInput) GoString() string { + return s.String() +} + +// Contains the results of the CreateDirectory operation. +type CreateDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory that was created. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryOutput) GoString() string { + return s.String() +} + +// Creates a Microsoft AD in the AWS cloud. +type CreateMicrosoftADInput struct { + _ struct{} `type:"structure"` + + // A textual description for the directory. This label will appear on the AWS + // console Directory Details page after the directory is created. + Description *string `type:"string"` + + // The fully qualified domain name for the directory, such as corp.example.com. + // This name will resolve inside your VPC only. It does not need to be publicly + // resolvable. + Name *string `type:"string" required:"true"` + + // The password for the default administrative user named Admin. + Password *string `type:"string" required:"true"` + + // The NetBIOS name for your domain. A short identifier for your domain, such + // as CORP. If you don't specify a NetBIOS name, it will default to the first + // part of your directory DNS. For example, CORP for the directory DNS corp.example.com. + ShortName *string `type:"string"` + + // Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation. + VpcSettings *DirectoryVpcSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateMicrosoftADInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMicrosoftADInput) GoString() string { + return s.String() +} + +type CreateMicrosoftADOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory that was created. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMicrosoftADOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMicrosoftADOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateSnapshot operation. +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory to take a snapshot of. + DirectoryId *string `type:"string" required:"true"` + + // The descriptive name to apply to the snapshot. + Name *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Contains the results of the CreateSnapshot operation. +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the snapshot that was created. + SnapshotId *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// trust relationships. For example, you can establish a trust between your +// Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active +// Directory. This would allow you to provide users and groups access to resources +// in either domain, with a single set of credentials. +// +// This action initiates the creation of the AWS side of a trust relationship +// between a Microsoft AD in the AWS cloud and an external domain. +type CreateTrustInput struct { + _ struct{} `type:"structure"` + + // The Directory ID of the Microsoft AD in the AWS cloud for which to establish + // the trust relationship. + DirectoryId *string `type:"string" required:"true"` + + // The Fully Qualified Domain Name (FQDN) of the external domain for which to + // create the trust relationship. + RemoteDomainName *string `type:"string" required:"true"` + + // The direction of the trust relationship. + TrustDirection *string `type:"string" required:"true" enum:"TrustDirection"` + + // The trust password. The must be the same password that was used when creating + // the trust relationship on the external domain. + TrustPassword *string `min:"1" type:"string" required:"true"` + + // The trust relationship type. + TrustType *string `type:"string" enum:"TrustType"` +} + +// String returns the string representation +func (s CreateTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrustInput) GoString() string { + return s.String() +} + +type CreateTrustOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the trust relationship that was created. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrustOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteDirectory operation. +type DeleteDirectoryInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory to delete. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryInput) GoString() string { + return s.String() +} + +// Contains the results of the DeleteDirectory operation. +type DeleteDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The directory identifier. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteSnapshot operation. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory snapshot to be deleted. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +// Contains the results of the DeleteSnapshot operation. +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory snapshot that was deleted. + SnapshotId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Deletes the local side of an existing trust relationship between the Microsoft +// AD in the AWS cloud and the external domain. +type DeleteTrustInput struct { + _ struct{} `type:"structure"` + + // The Trust ID of the trust relationship to be deleted. + TrustId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrustInput) GoString() string { + return s.String() +} + +type DeleteTrustOutput struct { + _ struct{} `type:"structure"` + + // The Trust ID of the trust relationship that was deleted. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrustOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeDirectories operation. +type DescribeDirectoriesInput struct { + _ struct{} `type:"structure"` + + // A list of identifiers of the directories for which to obtain the information. + // If this member is null, all directories that belong to the current account + // are returned. + // + // An empty list results in an InvalidParameterException being thrown. + DirectoryIds []*string `type:"list"` + + // The maximum number of items to return. If this value is zero, the maximum + // number of items is specified by the limitations of the operation. + Limit *int64 `type:"integer"` + + // The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. + // Pass null if this is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoriesInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeDirectories operation. +type DescribeDirectoriesOutput struct { + _ struct{} `type:"structure"` + + // The list of DirectoryDescription objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + DirectoryDescriptions []*DirectoryDescription `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to DescribeDirectories to retrieve the next + // set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoriesOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeSnapshots operation. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to retrieve snapshot information. + DirectoryId *string `type:"string"` + + // The maximum number of objects to return. + Limit *int64 `type:"integer"` + + // The DescribeSnapshotsResult.NextToken value from a previous call to DescribeSnapshots. + // Pass null if this is the first call. + NextToken *string `type:"string"` + + // A list of identifiers of the snapshots to obtain the information for. If + // this member is null or empty, all snapshots are returned using the Limit + // and NextToken members. + SnapshotIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeSnapshots operation. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value in the NextToken + // member of a subsequent call to DescribeSnapshots. + NextToken *string `type:"string"` + + // The list of Snapshot objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + Snapshots []*Snapshot `type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Describes the trust relationships for a particular Microsoft AD in the AWS +// cloud. If no input parameters are are provided, such as directory ID or trust +// ID, this request describes all the trust relationships. +type DescribeTrustsInput struct { + _ struct{} `type:"structure"` + + // The Directory ID of the AWS directory that is a part of the requested trust + // relationship. + DirectoryId *string `type:"string"` + + // The maximum number of objects to return. + Limit *int64 `type:"integer"` + + // The DescribeTrustsResult.NextToken value from a previous call to DescribeTrusts. + // Pass null if this is the first call. + NextToken *string `type:"string"` + + // A list of identifiers of the trust relationships for which to obtain the + // information. If this member is null, all trust relationships that belong + // to the current account are returned. + // + // An empty list results in an InvalidParameterException being thrown. + TrustIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTrustsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustsInput) GoString() string { + return s.String() +} + +type DescribeTrustsOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to DescribeTrusts to retrieve the next set + // of items. + NextToken *string `type:"string"` + + // The list of Trust objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + Trusts []*Trust `type:"list"` +} + +// String returns the string representation +func (s DescribeTrustsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustsOutput) GoString() string { + return s.String() +} + +// Contains information for the ConnectDirectory operation when an AD Connector +// directory is being created. +type DirectoryConnectSettings struct { + _ struct{} `type:"structure"` + + // A list of one or more IP addresses of DNS servers or domain controllers in + // the on-premises directory. + CustomerDnsIps []*string `type:"list" required:"true"` + + // The username of an account in the on-premises directory that is used to connect + // to the directory. This account must have the following privileges: + // + // Read users and groups Create computer objects Join computers to the domain + CustomerUserName *string `min:"1" type:"string" required:"true"` + + // A list of subnet identifiers in the VPC in which the AD Connector is created. + SubnetIds []*string `type:"list" required:"true"` + + // The identifier of the VPC in which the AD Connector is created. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DirectoryConnectSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConnectSettings) GoString() string { + return s.String() +} + +// Contains information about an AD Connector directory. +type DirectoryConnectSettingsDescription struct { + _ struct{} `type:"structure"` + + // A list of the Availability Zones that the directory is in. + AvailabilityZones []*string `type:"list"` + + // The IP addresses of the AD Connector servers. + ConnectIps []*string `type:"list"` + + // The username of the service account in the on-premises directory. + CustomerUserName *string `min:"1" type:"string"` + + // The security group identifier for the AD Connector directory. + SecurityGroupId *string `type:"string"` + + // A list of subnet identifiers in the VPC that the AD connector is in. + SubnetIds []*string `type:"list"` + + // The identifier of the VPC that the AD Connector is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DirectoryConnectSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConnectSettingsDescription) GoString() string { + return s.String() +} + +// Contains information about an AWS Directory Service directory. +type DirectoryDescription struct { + _ struct{} `type:"structure"` + + // The access URL for the directory, such as http://alias.awsapps.com. If no + // alias has been created for the directory, alias is the directory identifier, + // such as d-XXXXXXXXXX. + AccessUrl *string `min:"1" type:"string"` + + // The alias for the directory. If no alias has been created for the directory, + // the alias is the directory identifier, such as d-XXXXXXXXXX. + Alias *string `min:"1" type:"string"` + + // A DirectoryConnectSettingsDescription object that contains additional information + // about an AD Connector directory. This member is only present if the directory + // is an AD Connector directory. + ConnectSettings *DirectoryConnectSettingsDescription `type:"structure"` + + // The textual description for the directory. + Description *string `type:"string"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The IP addresses of the DNS servers for the directory. For a Simple AD or + // Microsoft AD directory, these are the IP addresses of the Simple AD or Microsoft + // AD directory servers. For an AD Connector directory, these are the IP addresses + // of the DNS servers or domain controllers in the on-premises directory to + // which the AD Connector is connected. + DnsIpAddrs []*string `type:"list"` + + // Specifies when the directory was created. + LaunchTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The fully-qualified name of the directory. + Name *string `type:"string"` + + // A RadiusSettings object that contains information about the RADIUS server + // configured for this directory. + RadiusSettings *RadiusSettings `type:"structure"` + + // The status of the RADIUS MFA server connection. + RadiusStatus *string `type:"string" enum:"RadiusStatus"` + + // The short name of the directory. + ShortName *string `type:"string"` + + // The directory size. + Size *string `type:"string" enum:"DirectorySize"` + + // Indicates if single-sign on is enabled for the directory. For more information, + // see EnableSso and DisableSso. + SsoEnabled *bool `type:"boolean"` + + // The current stage of the directory. + Stage *string `type:"string" enum:"DirectoryStage"` + + // The date and time that the stage was last updated. + StageLastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Additional information about the directory stage. + StageReason *string `type:"string"` + + // The directory size. + Type *string `type:"string" enum:"DirectoryType"` + + // A DirectoryVpcSettingsDescription object that contains additional information + // about a directory. This member is only present if the directory is a Simple + // AD or Managed AD directory. + VpcSettings *DirectoryVpcSettingsDescription `type:"structure"` +} + +// String returns the string representation +func (s DirectoryDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryDescription) GoString() string { + return s.String() +} + +// Contains directory limit information for a region. +type DirectoryLimits struct { + _ struct{} `type:"structure"` + + // The current number of cloud directories in the region. + CloudOnlyDirectoriesCurrentCount *int64 `type:"integer"` + + // The maximum number of cloud directories allowed in the region. + CloudOnlyDirectoriesLimit *int64 `type:"integer"` + + // Indicates if the cloud directory limit has been reached. + CloudOnlyDirectoriesLimitReached *bool `type:"boolean"` + + // The current number of Microsoft AD directories in the region. + CloudOnlyMicrosoftADCurrentCount *int64 `type:"integer"` + + // The maximum number of Microsoft AD directories allowed in the region. + CloudOnlyMicrosoftADLimit *int64 `type:"integer"` + + // Indicates if the Microsoft AD directory limit has been reached. + CloudOnlyMicrosoftADLimitReached *bool `type:"boolean"` + + // The current number of connected directories in the region. + ConnectedDirectoriesCurrentCount *int64 `type:"integer"` + + // The maximum number of connected directories allowed in the region. + ConnectedDirectoriesLimit *int64 `type:"integer"` + + // Indicates if the connected directory limit has been reached. + ConnectedDirectoriesLimitReached *bool `type:"boolean"` +} + +// String returns the string representation +func (s DirectoryLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryLimits) GoString() string { + return s.String() +} + +// Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation. +type DirectoryVpcSettings struct { + _ struct{} `type:"structure"` + + // The identifiers of the subnets for the directory servers. The two subnets + // must be in different Availability Zones. AWS Directory Service creates a + // directory server and a DNS server in each of these subnets. + SubnetIds []*string `type:"list" required:"true"` + + // The identifier of the VPC in which to create the directory. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DirectoryVpcSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryVpcSettings) GoString() string { + return s.String() +} + +// Contains information about the directory. +type DirectoryVpcSettingsDescription struct { + _ struct{} `type:"structure"` + + // The list of Availability Zones that the directory is in. + AvailabilityZones []*string `type:"list"` + + // The security group identifier for the directory. If the directory was created + // before 8/1/2014, this is the identifier of the directory members security + // group that was created when the directory was created. If the directory was + // created after this date, this value is null. + SecurityGroupId *string `type:"string"` + + // The identifiers of the subnets for the directory servers. + SubnetIds []*string `type:"list"` + + // The identifier of the VPC that the directory is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DirectoryVpcSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryVpcSettingsDescription) GoString() string { + return s.String() +} + +// Contains the inputs for the DisableRadius operation. +type DisableRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to disable MFA. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRadiusInput) GoString() string { + return s.String() +} + +// Contains the results of the DisableRadius operation. +type DisableRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRadiusOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DisableSso operation. +type DisableSsoInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to disable single-sign on. + DirectoryId *string `type:"string" required:"true"` + + // The password of an alternate account to use to disable single-sign on. This + // is only used for AD Connector directories. For more information, see the + // UserName parameter. + Password *string `min:"1" type:"string"` + + // The username of an alternate account to use to disable single-sign on. This + // is only used for AD Connector directories. This account must have privileges + // to remove a service principal name. + // + // If the AD Connector service account does not have privileges to remove a + // service principal name, you can specify an alternate account with the UserName + // and Password parameters. These credentials are only used to disable single + // sign-on and are not stored by the service. The AD Connector service account + // is not changed. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DisableSsoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSsoInput) GoString() string { + return s.String() +} + +// Contains the results of the DisableSso operation. +type DisableSsoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableSsoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSsoOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the EnableRadius operation. +type EnableRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to enable MFA. + DirectoryId *string `type:"string" required:"true"` + + // A RadiusSettings object that contains information about the RADIUS server. + RadiusSettings *RadiusSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s EnableRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRadiusInput) GoString() string { + return s.String() +} + +// Contains the results of the EnableRadius operation. +type EnableRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRadiusOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the EnableSso operation. +type EnableSsoInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to enable single-sign on. + DirectoryId *string `type:"string" required:"true"` + + // The password of an alternate account to use to enable single-sign on. This + // is only used for AD Connector directories. For more information, see the + // UserName parameter. + Password *string `min:"1" type:"string"` + + // The username of an alternate account to use to enable single-sign on. This + // is only used for AD Connector directories. This account must have privileges + // to add a service principal name. + // + // If the AD Connector service account does not have privileges to add a service + // principal name, you can specify an alternate account with the UserName and + // Password parameters. These credentials are only used to enable single sign-on + // and are not stored by the service. The AD Connector service account is not + // changed. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnableSsoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSsoInput) GoString() string { + return s.String() +} + +// Contains the results of the EnableSso operation. +type EnableSsoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableSsoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSsoOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the GetDirectoryLimits operation. +type GetDirectoryLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetDirectoryLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDirectoryLimitsInput) GoString() string { + return s.String() +} + +// Contains the results of the GetDirectoryLimits operation. +type GetDirectoryLimitsOutput struct { + _ struct{} `type:"structure"` + + // A DirectoryLimits object that contains the directory limits for the current + // region. + DirectoryLimits *DirectoryLimits `type:"structure"` +} + +// String returns the string representation +func (s GetDirectoryLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDirectoryLimitsOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the GetSnapshotLimits operation. +type GetSnapshotLimitsInput struct { + _ struct{} `type:"structure"` + + // Contains the identifier of the directory to obtain the limits for. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSnapshotLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSnapshotLimitsInput) GoString() string { + return s.String() +} + +// Contains the results of the GetSnapshotLimits operation. +type GetSnapshotLimitsOutput struct { + _ struct{} `type:"structure"` + + // A SnapshotLimits object that contains the manual snapshot limits for the + // specified directory. + SnapshotLimits *SnapshotLimits `type:"structure"` +} + +// String returns the string representation +func (s GetSnapshotLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSnapshotLimitsOutput) GoString() string { + return s.String() +} + +// Contains information about a Remote Authentication Dial In User Service (RADIUS) +// server. +type RadiusSettings struct { + _ struct{} `type:"structure"` + + // The protocol specified for your RADIUS endpoints. + AuthenticationProtocol *string `type:"string" enum:"RadiusAuthenticationProtocol"` + + // Not currently used. + DisplayLabel *string `min:"1" type:"string"` + + // The port that your RADIUS server is using for communications. Your on-premises + // network must allow inbound traffic over this port from the AWS Directory + // Service servers. + RadiusPort *int64 `min:"1025" type:"integer"` + + // The maximum number of times that communication with the RADIUS server is + // attempted. + RadiusRetries *int64 `type:"integer"` + + // An array of strings that contains the IP addresses of the RADIUS server endpoints, + // or the IP addresses of your RADIUS server load balancer. + RadiusServers []*string `type:"list"` + + // The amount of time, in seconds, to wait for the RADIUS server to respond. + RadiusTimeout *int64 `min:"1" type:"integer"` + + // The shared secret code that was specified when your RADIUS endpoints were + // created. + SharedSecret *string `min:"8" type:"string"` + + // Not currently used. + UseSameUsername *bool `type:"boolean"` +} + +// String returns the string representation +func (s RadiusSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RadiusSettings) GoString() string { + return s.String() +} + +// An object representing the inputs for the RestoreFromSnapshot operation. +type RestoreFromSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the snapshot to restore from. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreFromSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromSnapshotInput) GoString() string { + return s.String() +} + +// Contains the results of the RestoreFromSnapshot operation. +type RestoreFromSnapshotOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestoreFromSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromSnapshotOutput) GoString() string { + return s.String() +} + +// Describes a directory snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The descriptive name of the snapshot. + Name *string `type:"string"` + + // The snapshot identifier. + SnapshotId *string `type:"string"` + + // The date and time that the snapshot was taken. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The snapshot status. + Status *string `type:"string" enum:"SnapshotStatus"` + + // The snapshot type. + Type *string `type:"string" enum:"SnapshotType"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Contains manual snapshot limit information for a directory. +type SnapshotLimits struct { + _ struct{} `type:"structure"` + + // The current number of manual snapshots of the directory. + ManualSnapshotsCurrentCount *int64 `type:"integer"` + + // The maximum number of manual snapshots allowed. + ManualSnapshotsLimit *int64 `type:"integer"` + + // Indicates if the manual snapshot limit has been reached. + ManualSnapshotsLimitReached *bool `type:"boolean"` +} + +// String returns the string representation +func (s SnapshotLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotLimits) GoString() string { + return s.String() +} + +// Describes a trust relationship between an Microsoft AD in the AWS cloud and +// an external domain. +type Trust struct { + _ struct{} `type:"structure"` + + // The date and time that the trust relationship was created. + CreatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Directory ID of the AWS directory involved in the trust relationship. + DirectoryId *string `type:"string"` + + // The date and time that the trust relationship was last updated. + LastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Fully Qualified Domain Name (FQDN) of the external domain involved in + // the trust relationship. + RemoteDomainName *string `type:"string"` + + // The date and time that the TrustState was last updated. + StateLastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The trust relationship direction. + TrustDirection *string `type:"string" enum:"TrustDirection"` + + // The unique ID of the trust relationship. + TrustId *string `type:"string"` + + // The trust relationship state. + TrustState *string `type:"string" enum:"TrustState"` + + // The trust relationship type. + TrustType *string `type:"string" enum:"TrustType"` +} + +// String returns the string representation +func (s Trust) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trust) GoString() string { + return s.String() +} + +// Contains the inputs for the UpdateRadius operation. +type UpdateRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to update the RADIUS server information. + DirectoryId *string `type:"string" required:"true"` + + // A RadiusSettings object that contains information about the RADIUS server. + RadiusSettings *RadiusSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRadiusInput) GoString() string { + return s.String() +} + +// Contains the results of the UpdateRadius operation. +type UpdateRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRadiusOutput) GoString() string { + return s.String() +} + +// Initiates the verification of an existing trust relationship between a Microsoft +// AD in the AWS cloud and an external domain. +type VerifyTrustInput struct { + _ struct{} `type:"structure"` + + // The unique Trust ID of the trust relationship to verify. + TrustId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyTrustInput) GoString() string { + return s.String() +} + +type VerifyTrustOutput struct { + _ struct{} `type:"structure"` + + // The unique Trust ID of the trust relationship that was verified. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s VerifyTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyTrustOutput) GoString() string { + return s.String() +} + +const ( + // @enum DirectorySize + DirectorySizeSmall = "Small" + // @enum DirectorySize + DirectorySizeLarge = "Large" +) + +const ( + // @enum DirectoryStage + DirectoryStageRequested = "Requested" + // @enum DirectoryStage + DirectoryStageCreating = "Creating" + // @enum DirectoryStage + DirectoryStageCreated = "Created" + // @enum DirectoryStage + DirectoryStageActive = "Active" + // @enum DirectoryStage + DirectoryStageInoperable = "Inoperable" + // @enum DirectoryStage + DirectoryStageImpaired = "Impaired" + // @enum DirectoryStage + DirectoryStageRestoring = "Restoring" + // @enum DirectoryStage + DirectoryStageRestoreFailed = "RestoreFailed" + // @enum DirectoryStage + DirectoryStageDeleting = "Deleting" + // @enum DirectoryStage + DirectoryStageDeleted = "Deleted" + // @enum DirectoryStage + DirectoryStageFailed = "Failed" +) + +const ( + // @enum DirectoryType + DirectoryTypeSimpleAd = "SimpleAD" + // @enum DirectoryType + DirectoryTypeAdconnector = "ADConnector" + // @enum DirectoryType + DirectoryTypeMicrosoftAd = "MicrosoftAD" +) + +const ( + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolPap = "PAP" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolChap = "CHAP" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolMsChapv1 = "MS-CHAPv1" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolMsChapv2 = "MS-CHAPv2" +) + +const ( + // @enum RadiusStatus + RadiusStatusCreating = "Creating" + // @enum RadiusStatus + RadiusStatusCompleted = "Completed" + // @enum RadiusStatus + RadiusStatusFailed = "Failed" +) + +const ( + // @enum SnapshotStatus + SnapshotStatusCreating = "Creating" + // @enum SnapshotStatus + SnapshotStatusCompleted = "Completed" + // @enum SnapshotStatus + SnapshotStatusFailed = "Failed" +) + +const ( + // @enum SnapshotType + SnapshotTypeAuto = "Auto" + // @enum SnapshotType + SnapshotTypeManual = "Manual" +) + +const ( + // @enum TrustDirection + TrustDirectionOneWayOutgoing = "One-Way: Outgoing" + // @enum TrustDirection + TrustDirectionOneWayIncoming = "One-Way: Incoming" + // @enum TrustDirection + TrustDirectionTwoWay = "Two-Way" +) + +const ( + // @enum TrustState + TrustStateCreating = "Creating" + // @enum TrustState + TrustStateCreated = "Created" + // @enum TrustState + TrustStateVerifying = "Verifying" + // @enum TrustState + TrustStateVerifyFailed = "VerifyFailed" + // @enum TrustState + TrustStateVerified = "Verified" + // @enum TrustState + TrustStateDeleting = "Deleting" + // @enum TrustState + TrustStateDeleted = "Deleted" + // @enum TrustState + TrustStateFailed = "Failed" +) + +const ( + // @enum TrustType + TrustTypeForest = "Forest" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go new file mode 100644 index 000000000..fec194c91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go @@ -0,0 +1,102 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directoryserviceiface provides an interface for the AWS Directory Service. +package directoryserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/directoryservice" +) + +// DirectoryServiceAPI is the interface type for directoryservice.DirectoryService. +type DirectoryServiceAPI interface { + ConnectDirectoryRequest(*directoryservice.ConnectDirectoryInput) (*request.Request, *directoryservice.ConnectDirectoryOutput) + + ConnectDirectory(*directoryservice.ConnectDirectoryInput) (*directoryservice.ConnectDirectoryOutput, error) + + CreateAliasRequest(*directoryservice.CreateAliasInput) (*request.Request, *directoryservice.CreateAliasOutput) + + CreateAlias(*directoryservice.CreateAliasInput) (*directoryservice.CreateAliasOutput, error) + + CreateComputerRequest(*directoryservice.CreateComputerInput) (*request.Request, *directoryservice.CreateComputerOutput) + + CreateComputer(*directoryservice.CreateComputerInput) (*directoryservice.CreateComputerOutput, error) + + CreateDirectoryRequest(*directoryservice.CreateDirectoryInput) (*request.Request, *directoryservice.CreateDirectoryOutput) + + CreateDirectory(*directoryservice.CreateDirectoryInput) (*directoryservice.CreateDirectoryOutput, error) + + CreateMicrosoftADRequest(*directoryservice.CreateMicrosoftADInput) (*request.Request, *directoryservice.CreateMicrosoftADOutput) + + CreateMicrosoftAD(*directoryservice.CreateMicrosoftADInput) (*directoryservice.CreateMicrosoftADOutput, error) + + CreateSnapshotRequest(*directoryservice.CreateSnapshotInput) (*request.Request, *directoryservice.CreateSnapshotOutput) + + CreateSnapshot(*directoryservice.CreateSnapshotInput) (*directoryservice.CreateSnapshotOutput, error) + + CreateTrustRequest(*directoryservice.CreateTrustInput) (*request.Request, *directoryservice.CreateTrustOutput) + + CreateTrust(*directoryservice.CreateTrustInput) (*directoryservice.CreateTrustOutput, error) + + DeleteDirectoryRequest(*directoryservice.DeleteDirectoryInput) (*request.Request, *directoryservice.DeleteDirectoryOutput) + + DeleteDirectory(*directoryservice.DeleteDirectoryInput) (*directoryservice.DeleteDirectoryOutput, error) + + DeleteSnapshotRequest(*directoryservice.DeleteSnapshotInput) (*request.Request, *directoryservice.DeleteSnapshotOutput) + + DeleteSnapshot(*directoryservice.DeleteSnapshotInput) (*directoryservice.DeleteSnapshotOutput, error) + + DeleteTrustRequest(*directoryservice.DeleteTrustInput) (*request.Request, *directoryservice.DeleteTrustOutput) + + DeleteTrust(*directoryservice.DeleteTrustInput) (*directoryservice.DeleteTrustOutput, error) + + DescribeDirectoriesRequest(*directoryservice.DescribeDirectoriesInput) (*request.Request, *directoryservice.DescribeDirectoriesOutput) + + DescribeDirectories(*directoryservice.DescribeDirectoriesInput) (*directoryservice.DescribeDirectoriesOutput, error) + + DescribeSnapshotsRequest(*directoryservice.DescribeSnapshotsInput) (*request.Request, *directoryservice.DescribeSnapshotsOutput) + + DescribeSnapshots(*directoryservice.DescribeSnapshotsInput) (*directoryservice.DescribeSnapshotsOutput, error) + + DescribeTrustsRequest(*directoryservice.DescribeTrustsInput) (*request.Request, *directoryservice.DescribeTrustsOutput) + + DescribeTrusts(*directoryservice.DescribeTrustsInput) (*directoryservice.DescribeTrustsOutput, error) + + DisableRadiusRequest(*directoryservice.DisableRadiusInput) (*request.Request, *directoryservice.DisableRadiusOutput) + + DisableRadius(*directoryservice.DisableRadiusInput) (*directoryservice.DisableRadiusOutput, error) + + DisableSsoRequest(*directoryservice.DisableSsoInput) (*request.Request, *directoryservice.DisableSsoOutput) + + DisableSso(*directoryservice.DisableSsoInput) (*directoryservice.DisableSsoOutput, error) + + EnableRadiusRequest(*directoryservice.EnableRadiusInput) (*request.Request, *directoryservice.EnableRadiusOutput) + + EnableRadius(*directoryservice.EnableRadiusInput) (*directoryservice.EnableRadiusOutput, error) + + EnableSsoRequest(*directoryservice.EnableSsoInput) (*request.Request, *directoryservice.EnableSsoOutput) + + EnableSso(*directoryservice.EnableSsoInput) (*directoryservice.EnableSsoOutput, error) + + GetDirectoryLimitsRequest(*directoryservice.GetDirectoryLimitsInput) (*request.Request, *directoryservice.GetDirectoryLimitsOutput) + + GetDirectoryLimits(*directoryservice.GetDirectoryLimitsInput) (*directoryservice.GetDirectoryLimitsOutput, error) + + GetSnapshotLimitsRequest(*directoryservice.GetSnapshotLimitsInput) (*request.Request, *directoryservice.GetSnapshotLimitsOutput) + + GetSnapshotLimits(*directoryservice.GetSnapshotLimitsInput) (*directoryservice.GetSnapshotLimitsOutput, error) + + RestoreFromSnapshotRequest(*directoryservice.RestoreFromSnapshotInput) (*request.Request, *directoryservice.RestoreFromSnapshotOutput) + + RestoreFromSnapshot(*directoryservice.RestoreFromSnapshotInput) (*directoryservice.RestoreFromSnapshotOutput, error) + + UpdateRadiusRequest(*directoryservice.UpdateRadiusInput) (*request.Request, *directoryservice.UpdateRadiusOutput) + + UpdateRadius(*directoryservice.UpdateRadiusInput) (*directoryservice.UpdateRadiusOutput, error) + + VerifyTrustRequest(*directoryservice.VerifyTrustInput) (*request.Request, *directoryservice.VerifyTrustOutput) + + VerifyTrust(*directoryservice.VerifyTrustInput) (*directoryservice.VerifyTrustOutput, error) +} + +var _ DirectoryServiceAPI = (*directoryservice.DirectoryService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go new file mode 100644 index 000000000..06071ae47 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go @@ -0,0 +1,532 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directoryservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/directoryservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDirectoryService_ConnectDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.ConnectDirectoryInput{ + ConnectSettings: &directoryservice.DirectoryConnectSettings{ // Required + CustomerDnsIps: []*string{ // Required + aws.String("IpAddr"), // Required + // More values... + }, + CustomerUserName: aws.String("UserName"), // Required + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + Name: aws.String("DirectoryName"), // Required + Password: aws.String("ConnectPassword"), // Required + Size: aws.String("DirectorySize"), // Required + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + } + resp, err := svc.ConnectDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateAlias() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateAliasInput{ + Alias: aws.String("AliasName"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateComputer() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateComputerInput{ + ComputerName: aws.String("ComputerName"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ComputerPassword"), // Required + ComputerAttributes: []*directoryservice.Attribute{ + { // Required + Name: aws.String("AttributeName"), + Value: aws.String("AttributeValue"), + }, + // More values... + }, + OrganizationalUnitDistinguishedName: aws.String("OrganizationalUnitDN"), + } + resp, err := svc.CreateComputer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateDirectoryInput{ + Name: aws.String("DirectoryName"), // Required + Password: aws.String("Password"), // Required + Size: aws.String("DirectorySize"), // Required + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + VpcSettings: &directoryservice.DirectoryVpcSettings{ + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + } + resp, err := svc.CreateDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateMicrosoftAD() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateMicrosoftADInput{ + Name: aws.String("DirectoryName"), // Required + Password: aws.String("Password"), // Required + VpcSettings: &directoryservice.DirectoryVpcSettings{ // Required + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + } + resp, err := svc.CreateMicrosoftAD(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateSnapshotInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Name: aws.String("SnapshotName"), + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateTrustInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RemoteDomainName: aws.String("RemoteDomainName"), // Required + TrustDirection: aws.String("TrustDirection"), // Required + TrustPassword: aws.String("TrustPassword"), // Required + TrustType: aws.String("TrustType"), + } + resp, err := svc.CreateTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteDirectoryInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.DeleteDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteSnapshotInput{ + SnapshotId: aws.String("SnapshotId"), // Required + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteTrustInput{ + TrustId: aws.String("TrustId"), // Required + } + resp, err := svc.DeleteTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeDirectories() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeDirectoriesInput{ + DirectoryIds: []*string{ + aws.String("DirectoryId"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeDirectories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeSnapshots() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeSnapshotsInput{ + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + SnapshotIds: []*string{ + aws.String("SnapshotId"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeTrusts() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeTrustsInput{ + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + TrustIds: []*string{ + aws.String("TrustId"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrusts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DisableRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DisableRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.DisableRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DisableSso() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DisableSsoInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ConnectPassword"), + UserName: aws.String("UserName"), + } + resp, err := svc.DisableSso(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_EnableRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.EnableRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RadiusSettings: &directoryservice.RadiusSettings{ // Required + AuthenticationProtocol: aws.String("RadiusAuthenticationProtocol"), + DisplayLabel: aws.String("RadiusDisplayLabel"), + RadiusPort: aws.Int64(1), + RadiusRetries: aws.Int64(1), + RadiusServers: []*string{ + aws.String("Server"), // Required + // More values... + }, + RadiusTimeout: aws.Int64(1), + SharedSecret: aws.String("RadiusSharedSecret"), + UseSameUsername: aws.Bool(true), + }, + } + resp, err := svc.EnableRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_EnableSso() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.EnableSsoInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ConnectPassword"), + UserName: aws.String("UserName"), + } + resp, err := svc.EnableSso(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_GetDirectoryLimits() { + svc := directoryservice.New(session.New()) + + var params *directoryservice.GetDirectoryLimitsInput + resp, err := svc.GetDirectoryLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_GetSnapshotLimits() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.GetSnapshotLimitsInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.GetSnapshotLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_RestoreFromSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.RestoreFromSnapshotInput{ + SnapshotId: aws.String("SnapshotId"), // Required + } + resp, err := svc.RestoreFromSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_UpdateRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.UpdateRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RadiusSettings: &directoryservice.RadiusSettings{ // Required + AuthenticationProtocol: aws.String("RadiusAuthenticationProtocol"), + DisplayLabel: aws.String("RadiusDisplayLabel"), + RadiusPort: aws.Int64(1), + RadiusRetries: aws.Int64(1), + RadiusServers: []*string{ + aws.String("Server"), // Required + // More values... + }, + RadiusTimeout: aws.Int64(1), + SharedSecret: aws.String("RadiusSharedSecret"), + UseSameUsername: aws.Bool(true), + }, + } + resp, err := svc.UpdateRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_VerifyTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.VerifyTrustInput{ + TrustId: aws.String("TrustId"), // Required + } + resp, err := svc.VerifyTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go new file mode 100644 index 000000000..fbf3ee8a8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directoryservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the AWS Directory Service API Reference. This guide provides detailed +// information about AWS Directory Service operations, data types, parameters, +// and errors. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DirectoryService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ds" + +// New creates a new instance of the DirectoryService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DirectoryService client from just a session. +// svc := directoryservice.New(mySession) +// +// // Create a DirectoryService client with additional configuration +// svc := directoryservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectoryService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DirectoryService { + svc := &DirectoryService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-16", + JSONVersion: "1.1", + TargetPrefix: "DirectoryService_20150416", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DirectoryService operation and runs any +// custom request initialization. +func (c *DirectoryService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go new file mode 100644 index 000000000..25922a5d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -0,0 +1,5387 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodb provides a client for Amazon DynamoDB. +package dynamodb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBatchGetItem = "BatchGetItem" + +// BatchGetItemRequest generates a request for the BatchGetItem operation. +func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { + op := &request.Operation{ + Name: opBatchGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"RequestItems"}, + OutputTokens: []string{"UnprocessedKeys"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &BatchGetItemInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetItemOutput{} + req.Data = output + return +} + +// The BatchGetItem operation returns the attributes of one or more items from +// one or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as +// many as 100 items. BatchGetItem will return a partial result if the response +// size limit is exceeded, the table's provisioned throughput is exceeded, or +// an internal processing failure occurs. If a partial result is returned, the +// operation returns a value for UnprocessedKeys. You can use this value to +// retry the operation starting with the next item to get. +// +// If you request more than 100 items BatchGetItem will return a ValidationException +// with the message "Too many items requested for the BatchGetItem call". +// +// For example, if you ask to retrieve 100 items, but each individual item +// is 300 KB in size, the system returns 52 items (so as not to exceed the 16 +// MB limit). It also returns an appropriate UnprocessedKeys value so you can +// get the next page of results. If desired, your application can include its +// own logic to assemble the pages of results into one data set. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. +// If at least one of the items is successfully processed, then BatchGetItem +// completes successfully, while returning the keys of the unread items in UnprocessedKeys. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every +// table in the request. If you want strongly consistent reads instead, you +// can set ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem retrieves items in parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// attributes in any particular order. To help parse the response by item, include +// the primary key values for the items in your request in the AttributesToGet +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to +// the type of read. For more information, see Capacity Units Calculations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) +// in the Amazon DynamoDB Developer Guide. +func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(p *BatchGetItemOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.BatchGetItemRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*BatchGetItemOutput), lastPage) + }) +} + +const opBatchWriteItem = "BatchWriteItem" + +// BatchWriteItemRequest generates a request for the BatchWriteItem operation. +func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { + op := &request.Operation{ + Name: opBatchWriteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchWriteItemInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchWriteItemOutput{} + req.Data = output + return +} + +// The BatchWriteItem operation puts or deletes multiple items in one or more +// tables. A single call to BatchWriteItem can write up to 16 MB of data, which +// can comprise as many as 25 put or delete requests. Individual items to be +// written can be as large as 400 KB. +// +// BatchWriteItem cannot update items. To update items, use the UpdateItem +// API. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested operations +// fail because the table's provisioned throughput is exceeded or an internal +// processing failure occurs, the failed operations are returned in the UnprocessedItems +// response parameter. You can investigate and optionally resend the requests. +// Typically, you would call BatchWriteItem in a loop. Each iteration would +// check for unprocessed items and submit a new BatchWriteItem request with +// those unprocessed items until all items have been processed. +// +// Note that if none of the items can be processed due to insufficient provisioned +// throughput on all of the tables in the request, then BatchWriteItem will +// return a ProvisionedThroughputExceededException. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem, you can efficiently write or delete large amounts +// of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another +// database into DynamoDB. In order to improve performance with these large-scale +// operations, BatchWriteItem does not behave in the same way as individual +// PutItem and DeleteItem calls would. For example, you cannot specify conditions +// on individual put and delete requests, and BatchWriteItem does not return +// deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, +// you must update or delete the specified items one at a time. In both situations, +// BatchWriteItem provides an alternative where the API performs the specified +// put and delete operations in parallel, giving you the power of the thread +// pool approach without having to introduce complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed +// in parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// +// One or more tables specified in the BatchWriteItem request does not exist. +// +// Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. +// +// You try to perform multiple operations on the same item in the same BatchWriteItem +// request. For example, you cannot put and delete the same item in the same +// BatchWriteItem request. +// +// There are more than 25 requests in the batch. +// +// Any individual item in a batch exceeds 400 KB. +// +// The total request size exceeds 16 MB. +func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + err := req.Send() + return out, err +} + +const opCreateTable = "CreateTable" + +// CreateTableRequest generates a request for the CreateTable operation. +func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { + op := &request.Operation{ + Name: opCreateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTableInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTableOutput{} + req.Data = output + return +} + +// The CreateTable operation adds a new table to your account. In an AWS account, +// table names must be unique within each region. That is, you can have two +// tables with same name if you create the tables in different regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING. After +// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of +// the CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table +// with secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable API to check the table status. +func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + err := req.Send() + return out, err +} + +const opDeleteItem = "DeleteItem" + +// DeleteItemRequest generates a request for the DeleteItem operation. +func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { + op := &request.Operation{ + Name: opDeleteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteItemInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteItemOutput{} + req.Data = output + return +} + +// Deletes a single item in a table by primary key. You can perform a conditional +// delete operation that deletes the item if it exists, or if it has an expected +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// Unless you specify conditions, the DeleteItem is an idempotent operation; +// running it multiple times on the same item or attribute does not result in +// an error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. +func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTable = "DeleteTable" + +// DeleteTableRequest generates a request for the DeleteTable operation. +func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { + op := &request.Operation{ + Name: opDeleteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTableOutput{} + req.Data = output + return +} + +// The DeleteTable operation deletes a table and all of its items. After a DeleteTable +// request, the specified table is in the DELETING state until DynamoDB completes +// the deletion. If the table is in the ACTIVE state, you can delete it. If +// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. +// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. +// If table is already in the DELETING state, no error is returned. +// +// DynamoDB might continue to accept data read and write operations, such +// as GetItem and PutItem, on a table in the DELETING state until the table +// deletion is complete. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is automatically +// deleted after 24 hours. +// +// Use the DescribeTable API to check the status of the table. +func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTable = "DescribeTable" + +// DescribeTableRequest generates a request for the DescribeTable operation. +func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { + op := &request.Operation{ + Name: opDescribeTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTableOutput{} + req.Data = output + return +} + +// Returns information about the table, including the current status of the +// table, when it was created, the primary key schema, and any indexes on the +// table. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable +// uses an eventually consistent query, and the metadata for your table might +// not be available at that moment. Wait for a few seconds, and then try the +// DescribeTable request again. +func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + err := req.Send() + return out, err +} + +const opGetItem = "GetItem" + +// GetItemRequest generates a request for the GetItem operation. +func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { + op := &request.Operation{ + Name: opGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetItemInput{} + } + + req = c.newRequest(op, input, output) + output = &GetItemOutput{} + req.Data = output + return +} + +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true. Although +// a strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. +func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + err := req.Send() + return out, err +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a request for the ListTables operation. +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { + op := &request.Operation{ + Name: opListTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTablesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTablesOutput{} + req.Data = output + return +} + +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(p *ListTablesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTablesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTablesOutput), lastPage) + }) +} + +const opPutItem = "PutItem" + +// PutItemRequest generates a request for the PutItem operation. +func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { + op := &request.Operation{ + Name: opPutItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutItemInput{} + } + + req = c.newRequest(op, input, output) + output = &PutItemOutput{} + req.Data = output + return +} + +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified +// table, the new item completely replaces the existing item. You can perform +// a conditional put operation (add a new item if one with the specified primary +// key doesn't exist), or replace an existing item if it has certain attribute +// values. +// +// In addition to putting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// When you add an item, the primary key attribute(s) are the only required +// attributes. Attribute values cannot be null. String and Binary type attributes +// must have lengths greater than zero. Set type attributes cannot be empty. +// Requests with empty values will be rejected with a ValidationException exception. +// +// You can request that PutItem return either a copy of the original item (before +// the update) or a copy of the updated item (after the update). For more information, +// see the ReturnValues description below. +// +// To prevent a new item from replacing an existing item, use a conditional +// put operation with ComparisonOperator set to NULL for the primary key attribute, +// or attributes. +// +// For more information about using this API, see Working with Items (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// in the Amazon DynamoDB Developer Guide. +func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + err := req.Send() + return out, err +} + +const opQuery = "Query" + +// QueryRequest generates a request for the Query operation. +func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { + op := &request.Operation{ + Name: opQuery, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &QueryInput{} + } + + req = c.newRequest(op, input, output) + output = &QueryOutput{} + req.Data = output + return +} + +// A Query operation uses the primary key of a table or a secondary index to +// directly access items from that table or index. +// +// Use the KeyConditionExpression parameter to provide a specific hash key +// value. The Query operation will return all of the items from the table or +// index with that hash key value. You can optionally narrow the scope of the +// Query operation by specifying a range key value and a comparison operator +// in KeyConditionExpression. You can use the ScanIndexForward parameter to +// get results in forward or reverse order, by range key or by index key. +// +// Queries that do not return results consume the minimum number of read capacity +// units for that type of read operation. +// +// If the total number of items meeting the query criteria exceeds the result +// set size limit of 1 MB, the query stops and results are returned to the user +// with the LastEvaluatedKey element to continue the query in a subsequent operation. +// Unlike a Scan operation, a Query operation never returns both an empty result +// set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if the +// results exceed 1 MB, or if you have used the Limit parameter. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the ConsistentRead +// parameter to true and obtain a strongly consistent result. Global secondary +// indexes support eventually consistent reads only, so do not specify ConsistentRead +// when querying a global secondary index. +func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) QueryPages(input *QueryInput, fn func(p *QueryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.QueryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*QueryOutput), lastPage) + }) +} + +const opScan = "Scan" + +// ScanRequest generates a request for the Scan operation. +func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { + op := &request.Operation{ + Name: opScan, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ScanInput{} + } + + req = c.newRequest(op, input, output) + output = &ScanOutput{} + req.Data = output + return +} + +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer +// items, you can provide a ScanFilter operation. +// +// If the total number of scanned items exceeds the maximum data set size limit +// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey +// value to continue the scan in a subsequent operation. The results also include +// the number of items exceeding the limit. A scan can result in no table data +// meeting the filter criteria. +// +// By default, Scan operations proceed sequentially; however, for faster performance +// on a large table or secondary index, applications can request a parallel +// Scan operation by providing the Segment and TotalSegments parameters. For +// more information, see Parallel Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan) +// in the Amazon DynamoDB Developer Guide. +// +// By default, Scan uses eventually consistent reads when acessing the data +// in the table or local secondary index. However, you can use strongly consistent +// reads instead by setting the ConsistentRead parameter to true. +func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) ScanPages(input *ScanInput, fn func(p *ScanOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ScanRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ScanOutput), lastPage) + }) +} + +const opUpdateItem = "UpdateItem" + +// UpdateItemRequest generates a request for the UpdateItem operation. +func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { + op := &request.Operation{ + Name: opUpdateItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateItemInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateItemOutput{} + req.Data = output + return +} + +// Edits an existing item's attributes, or adds a new item to the table if it +// does not already exist. You can put, delete, or add attribute values. You +// can also perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair +// if it has certain expected attribute values). If conditions are specified +// and the item does not exist, then the operation fails and a new item is not +// created. +// +// You can also return the item's attribute values in the same UpdateItem operation +// using the ReturnValues parameter. +func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a request for the UpdateTable operation. +func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTableOutput{} + req.Data = output + return +} + +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// You can only perform one of the following operations at once: +// +// Modify the provisioned throughput settings of the table. +// +// Enable or disable Streams on the table. +// +// Remove a global secondary index from the table. +// +// Create a new global secondary index on the table. Once the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it is executing, the table +// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// issue another UpdateTable request. When the table returns to the ACTIVE state, +// the UpdateTable operation is complete. +func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + err := req.Send() + return out, err +} + +// Represents an attribute for describing the key schema for the table and indexes. +type AttributeDefinition struct { + _ struct{} `type:"structure"` + + // A name for the attribute. + AttributeName *string `min:"1" type:"string" required:"true"` + + // The data type for the attribute. + AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` +} + +// String returns the string representation +func (s AttributeDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeDefinition) GoString() string { + return s.String() +} + +// Represents the data for an attribute. You can set one, and only one, of the +// elements. +// +// Each attribute in an item is a name-value pair. An attribute can be single-valued +// or multi-valued set. For example, a book item can have title and authors +// attributes. Each book has one title but can have many authors. The multi-valued +// attribute is a set; duplicate values are not allowed. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // A Binary data type. + B []byte `type:"blob"` + + // A Boolean data type. + BOOL *bool `type:"boolean"` + + // A Binary Set data type. + BS [][]byte `type:"list"` + + // A List of attribute values. + L []*AttributeValue `type:"list"` + + // A Map of attribute values. + M map[string]*AttributeValue `type:"map"` + + // A Number data type. + N *string `type:"string"` + + // A Number Set data type. + NS []*string `type:"list"` + + // A Null data type. + NULL *bool `type:"boolean"` + + // A String data type. + S *string `type:"string"` + + // A String Set data type. + SS []*string `type:"list"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +// For the UpdateItem operation, represents the attributes to be modified, the +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, +// you will need to delete the item, and then use PutItem to create a new item +// with new attributes. +// +// Attribute values cannot be null; string and binary type attributes must +// have lengths greater than zero; and set type attributes must not be empty. +// Requests with empty values will be rejected with a ValidationException exception. +type AttributeValueUpdate struct { + _ struct{} `type:"structure"` + + // Specifies how to perform the update. Valid values are PUT (default), DELETE, + // and ADD. The behavior depends on whether the specified primary key already + // exists in the table. + // + // If an item with the specified Key is found in the table: + // + // PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // DELETE - If no value is specified, the attribute and its value are removed + // from the item. The data type of the specified value must match the existing + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specified [a,c], then the final attribute value would be [b]. + // Specifying an empty set is an error. + // + // ADD - If the attribute does not already exist, then the attribute and + // its values are added to the item. If the attribute does exist, then the behavior + // of ADD depends on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then the Value is mathematically added to the existing attribute. If Value + // is a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // In addition, if you use ADD to update an existing item, and intend to increment + // or decrement an attribute value which does not yet exist, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // does not yet have an attribute named itemcount, but you decide to ADD the + // number 3 to this attribute anyway, even though it currently does not exist. + // DynamoDB will create the itemcount attribute, set its initial value to 0, + // and finally add 3 to it. The result will be a new itemcount attribute in + // the item, with a value of 3. + // + // If the existing data type is a set, and if the Value is also a set, then + // the Value is added to the existing set. (This is a set operation, not mathematical + // addition.) For example, if the attribute value was the set [1,2], and the + // ADD action specified [3], then the final attribute value would be [1,2,3]. + // An error occurs if an Add action is specified for a set attribute and the + // attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. The + // same holds true for number sets and binary sets. + // + // This action is only valid for an existing attribute whose data type is + // number or is a set. Do not use ADD for any other data types. + // + // If no item with the specified Key is found: + // + // PUT - DynamoDB creates a new item with the specified primary key, and + // then adds the attribute. + // + // DELETE - Nothing happens; there is no attribute to delete. + // + // ADD - DynamoDB creates an item with the supplied primary key and number + // (or set of numbers) for the attribute value. The only data types allowed + // are number and number set; no other data types can be specified. + Action *string `type:"string" enum:"AttributeAction"` + + // Represents the data for an attribute. You can set one, and only one, of the + // elements. + // + // Each attribute in an item is a name-value pair. An attribute can be single-valued + // or multi-valued set. For example, a book item can have title and authors + // attributes. Each book has one title but can have many authors. The multi-valued + // attribute is a set; duplicate values are not allowed. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s AttributeValueUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValueUpdate) GoString() string { + return s.String() +} + +// Represents the input of a BatchGetItem operation. +type BatchGetItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a map that describes + // one or more items to retrieve from that table. Each table name can be used + // only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // + // ConsistentRead - If true, a strongly consistent read is used; if false + // (the default), an eventually consistent read is used. + // + // ExpressionAttributeNames - One or more substitution tokens for attribute + // names in the ProjectionExpression parameter. The following are some use cases + // for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // Keys - An array of primary key attribute values that define specific items + // in the table. For each primary key, you must provide all of the key attributes. + // For example, with a hash type primary key, you only need to provide the hash + // attribute. For a hash-and-range type primary key, you must provide both the + // hash attribute and the range attribute. + // + // ProjectionExpression - A string that identifies one or more attributes + // to retrieve from the table. These attributes can include scalars, sets, or + // elements of a JSON document. The attributes in the expression must be separated + // by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // AttributesToGet - + // + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` +} + +// String returns the string representation +func (s BatchGetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemInput) GoString() string { + return s.String() +} + +// Represents the output of a BatchGetItem operation. +type BatchGetItemOutput struct { + _ struct{} `type:"structure"` + + // The read capacity units consumed by the operation. + // + // Each element consists of: + // + // TableName - The table that consumed the provisioned throughput. + // + // CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A map of table name to a list of items. Each object in Responses consists + // of a table name, along with a map of attribute data consisting of the data + // type and attribute value. + Responses map[string][]map[string]*AttributeValue `type:"map"` + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems, + // so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // Keys - An array of primary key attribute values that define specific items + // in the table. + // + // AttributesToGet - One or more attributes to be retrieved from the table + // or index. By default, all attributes are returned. If a requested attribute + // is not found, it does not appear in the result. + // + // ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchGetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemOutput) GoString() string { + return s.String() +} + +// Represents the input of a BatchWriteItem operation. +type BatchWriteItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a list of operations + // to be performed (DeleteRequest or PutRequest). Each element in the map consists + // of the following: + // + // DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: + // + // Key - A map of primary key attribute values that uniquely identify the + // ! item. Each entry in this map consists of an attribute name and an attribute + // value. For each primary key, you must provide all of the key attributes. + // For example, with a hash type primary key, you only need to provide the hash + // attribute. For a hash-and-range type primary key, you must provide both the + // hash attribute and the range attribute. + // + // PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: + // + // Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be + // null; string and binary type attributes must have lengths greater than zero; + // and set type attributes must not be empty. Requests that contain empty values + // will be rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` +} + +// String returns the string representation +func (s BatchWriteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemInput) GoString() string { + return s.String() +} + +// Represents the output of a BatchWriteItem operation. +type BatchWriteItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the operation. + // + // Each element consists of: + // + // TableName - The table that consumed the provisioned throughput. + // + // CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by BatchWriteItem and, for each table, + // information about any item collections that were affected by individual DeleteItem + // or PutItem operations. + // + // Each entry consists of the following subelements: + // + // ItemCollectionKey - The hash key value of the item collection. This is + // the same as the hash key of the item. + // + // SizeEstimateRange - An estimate of item collection size, expressed in + // GB. This is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on the table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` + + // A map of tables and requests against those tables that were not processed. + // The UnprocessedItems value is in the same form as RequestItems, so you can + // provide this value directly to a subsequent BatchGetItem operation. For more + // information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name and, for that table, + // a list of operations to perform (DeleteRequest or PutRequest). + // + // DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: + // + // Key - A map of primary key attribute values that uniquely identify the + // item. Each entry in this map consists of an attribute name and an attribute + // value. + // + // PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: + // + // Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be + // null; string and binary type attributes must have lengths greater than zero; + // and set type attributes must not be empty. Requests that contain empty values + // will be rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // If there are no unprocessed items remaining, the response contains an + // empty UnprocessedItems map. + UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchWriteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemOutput) GoString() string { + return s.String() +} + +// Represents the amount of provisioned throughput capacity consumed on a table +// or an index. +type Capacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed on a table or an index. + CapacityUnits *float64 `type:"double"` +} + +// String returns the string representation +func (s Capacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Capacity) GoString() string { + return s.String() +} + +// Represents the selection criteria for a Query or Scan operation: +// +// For a Query operation, Condition is used for specifying the KeyConditions +// to use when querying a table or an index. For KeyConditions, only the following +// comparison operators are supported: +// +// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN +// +// Condition is also used in a QueryFilter, which evaluates the query results +// and returns only the desired values. +// +// For a Scan operation, Condition is used in a ScanFilter, which evaluates +// the scan results and returns only the desired values. +type Condition struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes. For example, equals, greater than, + // less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see Legacy + // Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// The capacity units consumed by an operation. The data returned includes the +// total provisioned throughput consumed, along with statistics for the table +// and any indexes involved in the operation. ConsumedCapacity is only returned +// if the request asked for it. For more information, see Provisioned Throughput +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// in the Amazon DynamoDB Developer Guide. +type ConsumedCapacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed by the operation. + CapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on each global index affected by the operation. + GlobalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on each local index affected by the operation. + LocalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on the table affected by the operation. + Table *Capacity `type:"structure"` + + // The name of the table that was affected by the operation. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ConsumedCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConsumedCapacity) GoString() string { + return s.String() +} + +// Represents a new global secondary index to be added to an existing table. +type CreateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be created. + IndexName *string `min:"3" type:"string" required:"true"` + + // The key schema for the global secondary index. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Represents the input of a CreateTable operation. +type CreateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // One or more global secondary indexes (the maximum is five) to be created + // on the table. Each global secondary index in the array includes the following: + // + // IndexName - The name of the global secondary index. Must be unique only + // for this table. + // + // KeySchema - Specifies the key schema for the global secondary index. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // Specifies the attributes that make up the primary key for a table or an index. + // The attributes in KeySchema must also be defined in the AttributeDefinitions + // array. For more information, see Data Model (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) + // in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // + // AttributeName - The name of this key attribute. + // + // KeyType - Determines whether the key attribute is HASH or RANGE. + // + // For a primary key that consists of a hash attribute, you must provide + // exactly one element with a KeyType of HASH. + // + // For a primary key that consists of hash and range attributes, you must provide + // exactly two elements, in this order: The first element must have a KeyType + // of HASH, and the second element must have a KeyType of RANGE. + // + // For more information, see Specifying the Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // One or more local secondary indexes (the maximum is five) to be created on + // the table. Each index is scoped to a given hash key value. There is a 10 + // GB size limit per hash key; otherwise, the size of a local secondary index + // is unconstrained. + // + // Each local secondary index in the array includes the following: + // + // IndexName - The name of the local secondary index. Must be unique only + // for this table. + // + // KeySchema - Specifies the key schema for the local secondary index. The + // key schema must begin with the same hash key attribute as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + + // The settings for DynamoDB Streams on the table. These settings consist of: + // + // StreamEnabled - Indicates whether Streams is to be enabled (true) or disabled + // (false). + // + // StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the table's stream. Valid values + // for StreamViewType are: + // + // KEYS_ONLY - Only the key attributes of the modified item are written to + // the stream. + // + // NEW_IMAGE - The entire item, as it appears after it was modified, is written + // to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is written + // to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // written to the stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to create. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableInput) GoString() string { + return s.String() +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableOutput) GoString() string { + return s.String() +} + +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be deleted. + IndexName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional DeleteItem + // to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the DeleteItem operation. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + // + // This parameter does not support attributes of type List or Map. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the attributes. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were deleted. For DeleteItem, the valid values are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - The content of the old item is returned. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table from which to delete the item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute names to AttributeValue objects, representing the item + // as it appeared before the DeleteItem operation. This map appears in the response + // only if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // ItemCollectionKey - The hash key value of the item collection. This is + // the same as the hash key of the item. + // + // SizeEstimateRange - An estimate of item collection size, in gigabytes. This + // value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s DeleteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemOutput) GoString() string { + return s.String() +} + +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to delete. All of the table's primary key attributes must be + // specified, and their data types must match those of the table's key schema. + Key map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s DeleteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRequest) GoString() string { + return s.String() +} + +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to delete. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DeleteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to describe. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + Table *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableOutput) GoString() string { + return s.String() +} + +// Represents a condition to be compared with an attribute value. This condition +// can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison +// evaluates to true, the operation succeeds; if not, the operation fails. You +// can use ExpectedAttributeValue in one of two different ways: +// +// Use AttributeValueList to specify one or more values to compare against +// an attribute. Use ComparisonOperator to specify how you want to perform the +// comparison. If the comparison evaluates to true, then the conditional operation +// succeeds. +// +// Use Value to specify a value that DynamoDB will compare against an attribute. +// If the values match, then ExpectedAttributeValue evaluates to true and the +// conditional operation succeeds. Optionally, you can also set Exists to false, +// indicating that you do not expect to find the attribute value in the table. +// In this case, the conditional operation succeeds only if the comparison evaluates +// to false. +// +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. +// Note that if you use both sets of parameters at once, DynamoDB will return +// a ValidationException exception. +type ExpectedAttributeValue struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes in the AttributeValueList. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionalCheckFailedException. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the assumption + // that it does not exist, the operation fails with a ConditionalCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // Exists is false but you also provide a Value. (You cannot expect an attribute + // to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for an attribute. You can set one, and only one, of the + // elements. + // + // Each attribute in an item is a name-value pair. An attribute can be single-valued + // or multi-valued set. For example, a book item can have title and authors + // attributes. Each book has one title but can have many authors. The multi-valued + // attribute is a set; duplicate values are not allowed. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// Represents the input of a GetItem operation. +type GetItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + AttributesToGet []*string `min:"1" type:"list"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The name of the table containing the requested item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemInput) GoString() string { + return s.String() +} + +// Represents the output of a GetItem operation. +type GetItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // A map of attribute names to AttributeValue objects, as specified by AttributesToGet. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation +func (s GetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemOutput) GoString() string { + return s.String() +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether the index is currently backfilling. Backfilling is the + // process of reading items from the table and determining whether they can + // be added to the index. (Not all items will qualify: For example, a hash key + // attribute cannot have any duplicates.) If an item can be added to the index, + // DynamoDB will do so. After all items have been processed, the backfilling + // operation is complete and Backfilling is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. + Backfilling *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The current state of the global secondary index: + // + // CREATING - The index is being created. + // + // UPDATING - The index is being updated. + // + // DELETING - The index is being deleted. + // + // ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for the global secondary index, consisting of one + // or more pairs of attribute names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the table, consisting + // of read and write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// Represents one of the following: +// +// A new global secondary index to be added to an existing table. +// +// New provisioned throughput parameters for an existing global secondary index. +// +// An existing global secondary index to be removed from an existing table. +type GlobalSecondaryIndexUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a global secondary index on an existing + // table: + // + // IndexName + // + // KeySchema + // + // AttributeDefinitions + // + // Projection + // + // ProvisionedThroughput + Create *CreateGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index to be removed. + Delete *DeleteGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index, along with new provisioned + // throughput settings to be applied to that index. + Update *UpdateGlobalSecondaryIndexAction `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexUpdate) GoString() string { + return s.String() +} + +// Information about item collections, if any, that were affected by the operation. +// ItemCollectionMetrics is only returned if the request asked for it. If the +// table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + _ struct{} `type:"structure"` + + // The hash key value of the item collection. This value is the same as the + // hash key of the item. + ItemCollectionKey map[string]*AttributeValue `type:"map"` + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []*float64 `type:"list"` +} + +// String returns the string representation +func (s ItemCollectionMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ItemCollectionMetrics) GoString() string { + return s.String() +} + +// Represents a single element of a key schema. A key schema specifies the attributes +// that make up the primary key of a table, or the key attributes of an index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. +// For example, a hash type primary key would be represented by one KeySchemaElement. +// A hash-and-range type primary key would require one KeySchemaElement for +// the hash attribute, and another KeySchemaElement for the range attribute. +type KeySchemaElement struct { + _ struct{} `type:"structure"` + + // The name of a key attribute. + AttributeName *string `min:"1" type:"string" required:"true"` + + // The attribute data, consisting of the data type and the attribute value itself. + KeyType *string `type:"string" required:"true" enum:"KeyType"` +} + +// String returns the string representation +func (s KeySchemaElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeySchemaElement) GoString() string { + return s.String() +} + +// Represents a set of primary keys and, for each key, the attributes to retrieve +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a hash type primary key, you only need to provide the hash attribute. +// For a hash-and-range type primary key, you must provide both the hash attribute +// and the range attribute. +type KeysAndAttributes struct { + _ struct{} `type:"structure"` + + // One or more attributes to retrieve from the table or index. If no attribute + // names are specified then all attributes will be returned. If any of the specified + // attributes are not found, they will not appear in the result. + AttributesToGet []*string `min:"1" type:"list"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // The primary key attribute values that define the items and the attributes + // associated with the items. + Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` +} + +// String returns the string representation +func (s KeysAndAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeysAndAttributes) GoString() string { + return s.String() +} + +// Represents the input of a ListTables operation. +type ListTablesInput struct { + _ struct{} `type:"structure"` + + // The first table name that this operation will evaluate. Use the value that + // was returned for LastEvaluatedTableName in a previous operation, so that + // you can obtain the next page of results. + ExclusiveStartTableName *string `min:"3" type:"string"` + + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesInput) GoString() string { + return s.String() +} + +// Represents the output of a ListTables operation. +type ListTablesOutput struct { + _ struct{} `type:"structure"` + + // The name of the last table in the current page of results. Use this value + // as the ExclusiveStartTableName in a new request to obtain the next page of + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. + LastEvaluatedTableName *string `min:"3" type:"string"` + + // The names of the tables associated with the current account at the current + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value + // as the ExclusiveStartTableName parameter in a subsequent ListTables request + // and obtain the next page of results. + TableNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesOutput) GoString() string { + return s.String() +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the local secondary index. The name must be unique among all + // other indexes on this table. + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` +} + +// String returns the string representation +func (s LocalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndex) GoString() string { + return s.String() +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete index key schema, which consists of one or more pairs of attribute + // names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation +func (s LocalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// Represents attributes that are copied (projected) from the table into an +// index. These are in addition to the primary key attributes and index key +// attributes, which are automatically projected. +type Projection struct { + _ struct{} `type:"structure"` + + // Represents the non-key attribute names which will be projected into the index. + // + // For local secondary indexes, the total count of NonKeyAttributes summed + // across all of the local secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + NonKeyAttributes []*string `min:"1" type:"list"` + + // The set of attributes that are projected into the index: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + ProjectionType *string `type:"string" enum:"ProjectionType"` +} + +// String returns the string representation +func (s Projection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Projection) GoString() string { + return s.String() +} + +// Represents the provisioned throughput settings for a specified table or index. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see Limits +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughput struct { + _ struct{} `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s ProvisionedThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughput) GoString() string { + return s.String() +} + +// Represents the provisioned throughput settings for the table, consisting +// of read and write capacity units, along with data about increases and decreases. +type ProvisionedThroughputDescription struct { + _ struct{} `type:"structure"` + + // The date and time of the last provisioned throughput decrease for this table. + LastDecreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time of the last provisioned throughput increase for this table. + LastIncreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The number of provisioned throughput decreases for this table during this + // UTC calendar day. For current maximums on provisioned throughput decreases, + // see Limits (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + NumberOfDecreasesToday *int64 `min:"1" type:"long"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. Eventually consistent reads require + // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits + // per second provides 100 eventually consistent ReadCapacityUnits per second. + ReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + WriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s ProvisionedThroughputDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughputDescription) GoString() string { + return s.String() +} + +// Represents the input of a PutItem operation. +type PutItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional PutItem operation + // to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the PutItem operation. + // + // This parameter does not support attributes of type List or Map. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name/value pairs, one for each attribute. Only the primary + // key attributes are required; you can optionally provide other attribute name-value + // pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + // + // Each element in the Item map is an AttributeValue object. + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were updated with the PutItem request. For PutItem, the valid + // values are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // Other "Valid Values" are not relevant to PutItem. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table to contain the item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemInput) GoString() string { + return s.String() +} + +// Represents the output of a PutItem operation. +type PutItemOutput struct { + _ struct{} `type:"structure"` + + // The attribute values as they appeared before the PutItem operation, but only + // if ReturnValues is specified as ALL_OLD in the request. Each element consists + // of an attribute name and an attribute value. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // ItemCollectionKey - The hash key value of the item collection. This is + // the same as the hash key of the item. + // + // SizeEstimateRange - An estimate of item collection size, in gigabytes. This + // value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s PutItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemOutput) GoString() string { + return s.String() +} + +// Represents a request to perform a PutItem operation on an item. +type PutRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of an item to be processed by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item which are part of an index + // key schema for the table, their types must match the index key schema. + Item map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s PutRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRequest) GoString() string { + return s.String() +} + +// Represents the input of a Query operation. +type QueryInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + // + // You cannot use both AttributesToGet and Select together in a Query request, + // unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent + // to specifying AttributesToGet without any value for Select.) + // + // If you query a local secondary index and request only attributes that are + // projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the + // local secondary index, DynamoDB will fetch each of these attributes from + // the parent table. This extra fetching incurs additional throughput cost and + // latency. + // + // If you query a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A logical operator to apply to the conditions in a QueryFilter map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you query a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Query operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + // + // FilterExpression replaces the legacy QueryFilter and ConditionalOperator + // parameters. + FilterExpression *string `type:"string"` + + // The name of an index to query. This index can be any local secondary index + // or global secondary index on the table. Note that if you use the IndexName + // parameter, you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The condition that specifies the key value(s) for items to be retrieved by + // the Query action. + // + // The condition must perform an equality test on a single hash key value. + // The condition can also perform one of several comparison tests on a single + // range key value. Query can use KeyConditionExpression to retrieve one item + // with a given hash and range key value, or several items that have the same + // hash key value but different range key values. + // + // The hash key equality test is required, and must be specified in the following + // format: + // + // hashAttributeName = :hashval + // + // If you also want to provide a range key condition, it must be combined using + // AND with the hash key condition. Following is an example, using the = comparison + // operator for the range key: + // + // hashAttributeName = :hashval AND rangeAttributeName = :rangeval + // + // Valid comparisons for the range key condition are as follows: + // + // rangeAttributeName = :rangeval - true if the range key is equal to :rangeval. + // + // rangeAttributeName < :rangeval - true if the range key is less than :rangeval. + // + // rangeAttributeName <= :rangeval - true if the range key is less than or + // equal to :rangeval. + // + // rangeAttributeName > :rangeval - true if the range key is greater than + // :rangeval. + // + // rangeAttributeName >= :rangeval - true if the range key is greater than + // or equal to :rangeval. + // + // rangeAttributeName BETWEEN :rangeval1 AND :rangeval2 - true if the range + // key is greater than or equal to :rangeval1, and less than or equal to :rangeval2. + // + // begins_with (rangeAttributeName, :rangeval) - true if the range key begins + // with a particular operand. (You cannot use this function with a range key + // that is of type Number.) Note that the function name begins_with is case-sensitive. + // + // Use the ExpressionAttributeValues parameter to replace tokens such as + // :hashval and :rangeval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace + // the names of the hash and range attributes with placeholder tokens. This + // option might be necessary if an attribute name conflicts with a DynamoDB + // reserved word. For example, the following KeyConditionExpression parameter + // causes an error because Size is a reserved word: + // + // Size = :myval To work around this, define a placeholder (such a #S) + // to represent the attribute name Size. KeyConditionExpression then is as follows: + // + // #S = :myval For a list of reserved words, see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues, + // see Using Placeholders for Attribute Names and Values (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) + // in the Amazon DynamoDB Developer Guide. + // + // KeyConditionExpression replaces the legacy KeyConditions parameter. + KeyConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use KeyConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // The selection criteria for the query. For a query on a table, you can have + // conditions only on the table primary key attributes. You must provide the + // hash key attribute name and value as an EQ condition. You can optionally + // provide a second condition, referring to the range key attribute. + // + // If you don't provide a range key condition, all of the items that match + // the hash key will be retrieved. If a FilterExpression or QueryFilter is present, + // it will be applied after the items are retrieved. + // + // For a query on an index, you can have conditions only on the index key attributes. + // You must provide the index hash attribute name and value as an EQ condition. + // You can optionally provide a second condition, referring to the index key + // range attribute. + // + // Each KeyConditions element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes, for example, + // equals, greater than, less than, and so on. + // + // For KeyConditions, only the following comparison operators are supported: + // + // EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN + // + // The following are descriptions of these comparison operators. + // + // EQ : Equal. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one specified in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditions map[string]*Condition `type:"map"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed data set + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A condition that evaluates the query results after the items are read and + // returns only the desired values. + // + // This parameter does not support attributes of type List or Map. + // + // A QueryFilter is applied after the items have already been read; the process + // of filtering does not consume any additional read capacity units. + // + // If you provide more than one condition in the QueryFilter map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // Note that QueryFilter does not allow key attributes. You cannot define a + // filter condition on a hash key or range key. + // + // Each QueryFilter element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the operator specified + // in ComparisonOperator. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator - A comparator for evaluating attributes. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // For complete descriptions of all comparison operators, see the Condition + // (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html) + // data type. + QueryFilter map[string]*Condition `type:"map"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Specifies the order in which to return the query results - either ascending + // (true) or descending (false). + // + // Items with the same hash key are stored in sorted order by range key .If + // the range key data type is Number, the results are stored in numeric order. + // For type String, the results are returned in order of ASCII character code + // values. For type Binary, DynamoDB treats each byte of the binary data as + // unsigned. + // + // If ScanIndexForward is true, DynamoDB returns the results in order, by range + // key. This is the default behavior. + // + // If ScanIndexForward is false, DynamoDB sorts the results in descending order + // by range key, and then returns the results to the client. + ScanIndexForward *bool `type:"boolean"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index DynamoDB will fetch the entire item from the parent table. + // If the index is configured to project all item attributes, then all of the + // data can be obtained from the local secondary index, and no fetching is required. + // + // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is configured + // to project all attributes, this return value is equivalent to specifying + // ALL_ATTRIBUTES. + // + // COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without specifying + // any value for Select. + // + // If you query a local secondary index and request only attributes that are + // projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the + // local secondary index, DynamoDB will fetch each of these attributes from + // the parent table. This extra fetching incurs additional throughput cost and + // latency. + // + // If you query a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and AttributesToGet together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryInput) GoString() string { + return s.String() +} + +// Represents the output of a Query operation. +type QueryOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before> the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount + // are the same. + Count *int64 `type:"integer"` + + // An array of item attributes that match the query criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any QueryFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient + // Query operation. For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s QueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryOutput) GoString() string { + return s.String() +} + +// Represents the input of a Scan operation. +type ScanInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A logical operator to apply to the conditions in a ScanFilter map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // A Boolean value that determines the read consistency model during the scan: + // + // If ConsistentRead is false, then Scan will use eventually consistent reads. + // The data returned from Scan might not contain the results of other recently + // completed write operations (PutItem, UpdateItem or DeleteItem). The Scan + // response might include some stale data. + // + // If ConsistentRead is true, then Scan will use strongly consistent reads. + // All of the write operations that completed before the Scan began are guaranteed + // to be contained in the Scan response. + // + // The default setting for ConsistentRead is false, meaning that eventually + // consistent reads will be used. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you scan a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must + // specify the same segment whose previous Scan returned the corresponding value + // of LastEvaluatedKey. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Scan operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + // + // FilterExpression replaces the legacy ScanFilter and ConditionalOperator + // parameters. + FilterExpression *string `type:"string"` + + // The name of a secondary index to scan. This index can be any local secondary + // index or global secondary index. Note that if you use the IndexName parameter, + // you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed data set + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the specified + // table or index. These attributes can include scalars, sets, or elements of + // a JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A condition that evaluates the scan results and returns only the desired + // values. + // + // This parameter does not support attributes of type List or Map. + // + // If you specify more than one condition in the ScanFilter map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // Each ScanFilter element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the operator specified + // in ComparisonOperator . + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator - A comparator for evaluating attributes. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // For complete descriptions of all comparison operators, see Condition (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html). + ScanFilter map[string]*Condition `type:"map"` + + // For a parallel Scan request, Segment identifies an individual segment to + // be scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, + // if you want to use four application threads to scan a table or an index, + // then the first thread specifies a Segment value of 0, the second thread specifies + // 1, and so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must + // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than + // the value provided for TotalSegments. + // + // If you provide Segment, you must also provide TotalSegments. + Segment *int64 `type:"integer"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, or the count of matching items. + // + // ALL_ATTRIBUTES - Returns all of the item attributes. + // + // COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without specifying + // any value for Select. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES. You cannot use both AttributesToGet and Select together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items; or, if you provide + // IndexName, the name of the table to which that index belongs. + TableName *string `min:"3" type:"string" required:"true"` + + // For a parallel Scan request, TotalSegments represents the total number of + // segments into which the Scan operation will be divided. The value of TotalSegments + // corresponds to the number of application workers that will perform the parallel + // scan. For example, if you want to use four application threads to scan a + // table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less + // than or equal to 1000000. If you specify a TotalSegments value of 1, the + // Scan operation will be sequential rather than parallel. + // + // If you specify TotalSegments, you must also specify Segment. + TotalSegments *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanInput) GoString() string { + return s.String() +} + +// Represents the output of a Scan operation. +type ScanOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as ScannedCount. + Count *int64 `type:"integer"` + + // An array of item attributes that match the scan criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount + // value with few, or no, Count results indicates an inefficient Scan operation. + // For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanOutput) GoString() string { + return s.String() +} + +// Represents the DynamoDB Streams configuration for a table in DynamoDB. +type StreamSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) + // on the table. + StreamEnabled *bool `type:"boolean"` + + // The DynamoDB Streams settings for the table. These settings consist of: + // + // StreamEnabled - Indicates whether DynamoDB Streams is enabled (true) or + // disabled (false) on the table. + // + // StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the stream for this table. Valid + // values for StreamViewType are: + // + // KEYS_ONLY - Only the key attributes of the modified item are written to + // the stream. + // + // NEW_IMAGE - The entire item, as it appears after it was modified, is written + // to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is written + // to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // written to the stream. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation +func (s StreamSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamSpecification) GoString() string { + return s.String() +} + +// Represents the properties of a table. +type TableDescription struct { + _ struct{} `type:"structure"` + + // An array of AttributeDefinition objects. Each of these objects describes + // one attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // + // AttributeName - The name of the attribute. + // + // AttributeType - The data type for the attribute. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) + // format. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The global secondary indexes, if any, on the table. Each index is scoped + // to a given hash key value. Each element is composed of: + // + // Backfilling - If true, then the index is currently in the backfilling + // phase. Backfilling occurs only when a new global secondary index is added + // to the table; it is the process by which DynamoDB populates the new index + // with data from the table. (This attribute does not appear for indexes that + // were created during a CreateTable operation.) + // + // IndexName - The name of the global secondary index. + // + // IndexSizeBytes - The total size of the global secondary index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. + // + // IndexStatus - The current status of the global secondary index: + // + // CREATING - The index is being created. + // + // UPDATING - The index is being updated. + // + // DELETING - The index is being deleted. + // + // ACTIVE - The index is ready for use. + // + // ItemCount - The number of items in the global secondary index. DynamoDB + // updates this value approximately every six hours. Recent changes might not + // be reflected in this value. + // + // KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The key + // schema must begin with the same hash key attribute as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units, along + // with data about increases and decreases. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` + + // The number of items in the specified table. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The primary key structure for the table. Each KeySchemaElement consists of: + // + // AttributeName - The name of the attribute. + // + // KeyType - The key type for the attribute. Can be either HASH or RANGE. + // + // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) that uniquely identifies the latest stream + // for this table. + LatestStreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // the AWS customer ID. + // + // the table name. + // + // the StreamLabel. + LatestStreamLabel *string `type:"string"` + + // Represents one or more local secondary indexes on the table. Each index is + // scoped to a given hash key value. Tables with one or more local secondary + // indexes are subject to an item collection size limit, where the amount of + // data within a given item collection cannot exceed 10 GB. Each element is + // composed of: + // + // IndexName - The name of the local secondary index. + // + // KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The key + // schema must begin with the same hash key attribute as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB + // updates this value approximately every six hours. Recent changes might not + // be reflected in this value. + // + // ItemCount - Represents the number of items in the index. DynamoDB updates + // this value approximately every six hours. Recent changes might not be reflected + // in this value. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + + // The provisioned throughput settings for the table, consisting of read and + // write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *StreamSpecification `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the table. + TableArn *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The total size of the specified table, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + TableSizeBytes *int64 `type:"long"` + + // The current state of the table: + // + // CREATING - The table is being created. + // + // UPDATING - The table is being updated. + // + // DELETING - The table is being deleted. + // + // ACTIVE - The table is ready for use. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation +func (s TableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableDescription) GoString() string { + return s.String() +} + +// Represents the new provisioned throughput settings to be applied to a global +// secondary index. +type UpdateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be updated. + IndexName *string `min:"3" type:"string" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Represents the input of an UpdateItem operation. +type UpdateItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use UpdateExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // This parameter can be used for modifying top-level attributes; however, + // it does not support individual list or map elements. + // + // The names of attributes to be modified, the action to perform on each, + // and the new value for each. If you are updating an attribute that is an index + // key attribute for any indexes on that table, the attribute type must match + // the index key type defined in the AttributesDefinition of the table description. + // You can use UpdateItem to update any nonkey attributes. + // + // Attribute values cannot be null. String and Binary type attributes must + // have lengths greater than zero. Set type attributes must not be empty. Requests + // with empty values will be rejected with a ValidationException exception. + // + // Each AttributeUpdates element consists of an attribute name to modify, along + // with the following: + // + // Value - The new value, if applicable, for this attribute. + // + // Action - A value that specifies how to perform the update. This action + // is only valid for an existing attribute whose data type is Number or is a + // set; do not use ADD for other data types. + // + // If an item with the specified primary key is found in the table, the following + // values perform the following actions: + // + // PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // DELETE - Removes the attribute and its value, if no value is specified + // for DELETE. The data type of the specified value must match the existing + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying + // an empty set is an error. + // + // ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then Value is mathematically added to the existing attribute. If Value is + // a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement + // an attribute value that doesn't exist before the update, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // doesn't have an attribute named itemcount, but you decide to ADD the number + // 3 to this attribute anyway. DynamoDB will create the itemcount attribute, + // set its initial value to 0, and finally add 3 to it. The result will be a + // new itemcount attribute, with a value of 3. + // + // If the existing data type is a set, and if Value is also a set, then + // Value is appended to the existing set. For example, if the attribute value + // is the set [1,2], and the ADD action specified [3], then the final attribute + // value is [1,2,3]. An error occurs if an ADD action is specified for a set + // attribute and the attribute type specified does not match the existing set + // type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, Value must also be a set of strings. + // + // If no item with the specified key is found in the table, the following + // values perform the following actions: + // + // PUT - Causes DynamoDB to create a new item with the specified primary + // key, and then adds the attribute. + // + // DELETE - Nothing happens, because attributes cannot be deleted from a + // nonexistent item. The operation succeeds, but DynamoDB does not create a + // new item. + // + // ADD - Causes DynamoDB to create an item with the supplied primary key + // and number (or set of numbers) for the attribute value. The only data types + // allowed are Number and Number Set. + // + // If you provide any attributes that are part of an index key, then the + // data types for those attributes must match those of the schema in the table's + // attribute definition. + AttributeUpdates map[string]*AttributeValueUpdate `type:"map"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the UpdateItem operation. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + // + // This parameter does not support attributes of type List or Map. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // either before or after they were updated. For UpdateItem, the valid values + // are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - If UpdateItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // UPDATED_OLD - The old versions of only the updated attributes are returned. + // + // ALL_NEW - All of the attributes of the new version of the item are returned. + // + // UPDATED_NEW - The new versions of only the updated attributes are returned. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table containing the item to update. + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new value(s) for them. + // + // The following action values are available for UpdateExpression. + // + // SET - Adds one or more attributes and values to an item. If any of these + // attribute already exist, they are replaced by the new values. You can also + // use SET to add or subtract from an attribute that is of type Number. For + // example: SET myNum = myNum + :val + // + // SET supports the following functions: + // + // if_not_exists (path, operand) - if the item does not contain an attribute + // at the specified path, then if_not_exists evaluates to operand; otherwise, + // it evaluates to path. You can use this function to avoid overwriting an attribute + // that may already be present in the item. + // + // list_append (operand, operand) - evaluates to a list with a new element + // added to it. You can append the new element to the start or the end of the + // list by reversing the order of the operands. + // + // These function names are case-sensitive. + // + // REMOVE - Removes one or more attributes from an item. + // + // ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then Value is mathematically added to the existing attribute. If Value is + // a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement + // an attribute value that doesn't exist before the update, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // doesn't have an attribute named itemcount, but you decide to ADD the number + // 3 to this attribute anyway. DynamoDB will create the itemcount attribute, + // set its initial value to 0, and finally add 3 to it. The result will be a + // new itemcount attribute in the item, with a value of 3. + // + // If the existing data type is a set and if Value is also a set, then Value + // is added to the existing set. For example, if the attribute value is the + // set [1,2], and the ADD action specified [3], then the final attribute value + // is [1,2,3]. An error occurs if an ADD action is specified for a set attribute + // and the attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. + // + // The ADD action only supports Number and set data types. In addition, ADD + // can only be used on top-level attributes, not nested attributes. + // + // DELETE - Deletes an element from a set. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying + // an empty set is an error. + // + // The DELETE action only supports set data types. In addition, DELETE can + // only be used on top-level attributes, not nested attributes. + // + // You can have many actions in a single expression, such as the following: + // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see Modifying Items and Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) + // in the Amazon DynamoDB Developer Guide. + // + // UpdateExpression replaces the legacy AttributeUpdates parameter. + UpdateExpression *string `type:"string"` +} + +// String returns the string representation +func (s UpdateItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateItem operation. +type UpdateItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute values as they appeared before the UpdateItem operation. + // This map only appears if ReturnValues was specified as something other than + // NONE in the request. Each element represents one attribute. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s UpdateItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemOutput) GoString() string { + return s.String() +} + +// Represents the input of an UpdateTable operation. +type UpdateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // If you are adding a new global secondary index to the table, AttributeDefinitions + // must include the key element(s) of the new index. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // An array of one or more global secondary indexes for the table. For each + // index in the array, you can request one action: + // + // Create - add a new global secondary index to the table. + // + // Update - modify the provisioned throughput settings of an existing global + // secondary index. + // + // Delete - remove a global secondary index from the table. + // + // For more information, see Managing Global Secondary Indexes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) + // in the Amazon DynamoDB Developer Guide. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the DynamoDB Streams configuration for the table. + // + // You will receive a ResourceInUseException if you attempt to enable a stream + // on a table that already has a stream, or if you attempt to disable a stream + // on a table which does not have a stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to be updated. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateTable operation. +type UpdateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableOutput) GoString() string { + return s.String() +} + +// Represents an operation to perform - either DeleteItem or PutItem. You can +// only request one of these operations, not both, in a single WriteRequest. +// If you do need to perform both of these operations, you will need to provide +// two separate WriteRequest objects. +type WriteRequest struct { + _ struct{} `type:"structure"` + + // A request to perform a DeleteItem operation. + DeleteRequest *DeleteRequest `type:"structure"` + + // A request to perform a PutItem operation. + PutRequest *PutRequest `type:"structure"` +} + +// String returns the string representation +func (s WriteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteRequest) GoString() string { + return s.String() +} + +const ( + // @enum AttributeAction + AttributeActionAdd = "ADD" + // @enum AttributeAction + AttributeActionPut = "PUT" + // @enum AttributeAction + AttributeActionDelete = "DELETE" +) + +const ( + // @enum ComparisonOperator + ComparisonOperatorEq = "EQ" + // @enum ComparisonOperator + ComparisonOperatorNe = "NE" + // @enum ComparisonOperator + ComparisonOperatorIn = "IN" + // @enum ComparisonOperator + ComparisonOperatorLe = "LE" + // @enum ComparisonOperator + ComparisonOperatorLt = "LT" + // @enum ComparisonOperator + ComparisonOperatorGe = "GE" + // @enum ComparisonOperator + ComparisonOperatorGt = "GT" + // @enum ComparisonOperator + ComparisonOperatorBetween = "BETWEEN" + // @enum ComparisonOperator + ComparisonOperatorNotNull = "NOT_NULL" + // @enum ComparisonOperator + ComparisonOperatorNull = "NULL" + // @enum ComparisonOperator + ComparisonOperatorContains = "CONTAINS" + // @enum ComparisonOperator + ComparisonOperatorNotContains = "NOT_CONTAINS" + // @enum ComparisonOperator + ComparisonOperatorBeginsWith = "BEGINS_WITH" +) + +const ( + // @enum ConditionalOperator + ConditionalOperatorAnd = "AND" + // @enum ConditionalOperator + ConditionalOperatorOr = "OR" +) + +const ( + // @enum IndexStatus + IndexStatusCreating = "CREATING" + // @enum IndexStatus + IndexStatusUpdating = "UPDATING" + // @enum IndexStatus + IndexStatusDeleting = "DELETING" + // @enum IndexStatus + IndexStatusActive = "ACTIVE" +) + +const ( + // @enum KeyType + KeyTypeHash = "HASH" + // @enum KeyType + KeyTypeRange = "RANGE" +) + +const ( + // @enum ProjectionType + ProjectionTypeAll = "ALL" + // @enum ProjectionType + ProjectionTypeKeysOnly = "KEYS_ONLY" + // @enum ProjectionType + ProjectionTypeInclude = "INCLUDE" +) + +// Determines the level of detail about provisioned throughput consumption that +// is returned in the response: +// +// INDEXES - The response includes the aggregate ConsumedCapacity for the +// operation, together with ConsumedCapacity for each table and secondary index +// that was accessed. +// +// Note that some operations, such as GetItem and BatchGetItem, do not access +// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity +// information for table(s). +// +// TOTAL - The response includes only the aggregate ConsumedCapacity for the +// operation. +// +// NONE - No ConsumedCapacity details are included in the response. +const ( + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityIndexes = "INDEXES" + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityTotal = "TOTAL" + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityNone = "NONE" +) + +const ( + // @enum ReturnItemCollectionMetrics + ReturnItemCollectionMetricsSize = "SIZE" + // @enum ReturnItemCollectionMetrics + ReturnItemCollectionMetricsNone = "NONE" +) + +const ( + // @enum ReturnValue + ReturnValueNone = "NONE" + // @enum ReturnValue + ReturnValueAllOld = "ALL_OLD" + // @enum ReturnValue + ReturnValueUpdatedOld = "UPDATED_OLD" + // @enum ReturnValue + ReturnValueAllNew = "ALL_NEW" + // @enum ReturnValue + ReturnValueUpdatedNew = "UPDATED_NEW" +) + +const ( + // @enum ScalarAttributeType + ScalarAttributeTypeS = "S" + // @enum ScalarAttributeType + ScalarAttributeTypeN = "N" + // @enum ScalarAttributeType + ScalarAttributeTypeB = "B" +) + +const ( + // @enum Select + SelectAllAttributes = "ALL_ATTRIBUTES" + // @enum Select + SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES" + // @enum Select + SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES" + // @enum Select + SelectCount = "COUNT" +) + +const ( + // @enum StreamViewType + StreamViewTypeNewImage = "NEW_IMAGE" + // @enum StreamViewType + StreamViewTypeOldImage = "OLD_IMAGE" + // @enum StreamViewType + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + // @enum StreamViewType + StreamViewTypeKeysOnly = "KEYS_ONLY" +) + +const ( + // @enum TableStatus + TableStatusCreating = "CREATING" + // @enum TableStatus + TableStatusUpdating = "UPDATING" + // @enum TableStatus + TableStatusDeleting = "DELETING" + // @enum TableStatus + TableStatusActive = "ACTIVE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go new file mode 100644 index 000000000..51843cd7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go @@ -0,0 +1,98 @@ +package dynamodb + +import ( + "bytes" + "hash/crc32" + "io" + "io/ioutil" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +type retryer struct { + client.DefaultRetryer +} + +func (d retryer) RetryRules(r *request.Request) time.Duration { + delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50 + return delay * time.Millisecond +} + +func init() { + initClient = func(c *client.Client) { + r := retryer{} + if c.Config.MaxRetries == nil || aws.IntValue(c.Config.MaxRetries) == aws.UseServiceDefaultRetries { + r.NumMaxRetries = 10 + } else { + r.NumMaxRetries = *c.Config.MaxRetries + } + c.Retryer = r + + c.Handlers.Build.PushBack(disableCompression) + c.Handlers.Unmarshal.PushFront(validateCRC32) + } +} + +func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) { + if length < 0 { + length = 0 + } + buf := bytes.NewBuffer(make([]byte, 0, length)) + + if _, err = buf.ReadFrom(b); err != nil { + return nil, err + } + if err = b.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func disableCompression(r *request.Request) { + r.HTTPRequest.Header.Set("Accept-Encoding", "identity") +} + +func validateCRC32(r *request.Request) { + if r.Error != nil { + return // already have an error, no need to verify CRC + } + + // Checksum validation is off, skip + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + // Try to get CRC from response + header := r.HTTPResponse.Header.Get("X-Amz-Crc32") + if header == "" { + return // No header, skip + } + + expected, err := strconv.ParseUint(header, 10, 32) + if err != nil { + return // Could not determine CRC value, skip + } + + buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength) + if err != nil { // failed to read the response body, skip + return + } + + // Reset body for subsequent reads + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + + // Compute the CRC checksum + crc := crc32.ChecksumIEEE(buf.Bytes()) + + if crc != uint32(expected) { + // CRC does not match, set a retryable error + r.Retryable = aws.Bool(true) + r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go new file mode 100644 index 000000000..194b51794 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go @@ -0,0 +1,106 @@ +package dynamodb_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +var db *dynamodb.DynamoDB + +func TestMain(m *testing.M) { + db = dynamodb.New(unit.Session, &aws.Config{ + MaxRetries: aws.Int(2), + }) + db.Handlers.Send.Clear() // mock sending + + os.Exit(m.Run()) +} + +func mockCRCResponse(svc *dynamodb.DynamoDB, status int, body, crc string) (req *request.Request) { + header := http.Header{} + header.Set("x-amz-crc32", crc) + + req, _ = svc.ListTablesRequest(nil) + req.Handlers.Send.PushBack(func(*request.Request) { + req.HTTPResponse = &http.Response{ + ContentLength: int64(len(body)), + StatusCode: status, + Body: ioutil.NopCloser(bytes.NewReader([]byte(body))), + Header: header, + } + }) + req.Send() + return +} + +func TestDefaultRetryRules(t *testing.T) { + d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(-1)}) + assert.Equal(t, d.MaxRetries(), 10) +} + +func TestCustomRetryRules(t *testing.T) { + d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(2)}) + assert.Equal(t, d.MaxRetries(), 2) +} + +func TestValidateCRC32NoHeaderSkip(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "") + assert.NoError(t, req.Error) +} + +func TestValidateCRC32InvalidHeaderSkip(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "ABC") + assert.NoError(t, req.Error) +} + +func TestValidateCRC32AlreadyErrorSkip(t *testing.T) { + req := mockCRCResponse(db, 400, "{}", "1234") + assert.Error(t, req.Error) + + assert.NotEqual(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code()) +} + +func TestValidateCRC32IsValid(t *testing.T) { + req := mockCRCResponse(db, 200, `{"TableNames":["A"]}`, "3090163698") + assert.NoError(t, req.Error) + + // CRC check does not affect output parsing + out := req.Data.(*dynamodb.ListTablesOutput) + assert.Equal(t, "A", *out.TableNames[0]) +} + +func TestValidateCRC32DoesNotMatch(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "1234") + assert.Error(t, req.Error) + + assert.Equal(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code()) + assert.Equal(t, 2, req.RetryCount) +} + +func TestValidateCRC32DoesNotMatchNoComputeChecksum(t *testing.T) { + svc := dynamodb.New(unit.Session, &aws.Config{ + MaxRetries: aws.Int(2), + DisableComputeChecksums: aws.Bool(true), + }) + svc.Handlers.Send.Clear() // mock sending + + req := mockCRCResponse(svc, 200, `{"TableNames":["A"]}`, "1234") + assert.NoError(t, req.Error) + + assert.Equal(t, 0, int(req.RetryCount)) + + // CRC check disabled. Does not affect output parsing + out := req.Data.(*dynamodb.ListTablesOutput) + assert.Equal(t, "A", *out.TableNames[0]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go new file mode 100644 index 000000000..7b2daf464 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go @@ -0,0 +1,463 @@ +// Package dynamodbattribute provides conversion utilities from dynamodb.AttributeValue +// to concrete Go types and structures. These conversion utilities allow you to +// convert a Struct, Slice, Map, or Scalar value to or from dynamodb.AttributeValue. +// These are most useful to serialize concrete types to dynamodb.AttributeValue for +// requests or unmarshalling the dynamodb.AttributeValue into a well known typed form. +// +// Convert concrete type to dynamodb.AttributeValue: See (ExampleConvertTo) +// +// type Record struct { +// MyField string +// Letters []string +// A2Num map[string]int +// } +// +// ... +// +// r := Record{ +// MyField: "dynamodbattribute.ConvertToX example", +// Letters: []string{"a", "b", "c", "d"}, +// A2Num: map[string]int{"a": 1, "b": 2, "c": 3}, +// } +// av, err := dynamodbattribute.ConvertTo(r) +// fmt.Println(av, err) +// +// Convert dynamodb.AttributeValue to Concrete type: See (ExampleConvertFrom) +// +// r2 := Record{} +// err = dynamodbattribute.ConvertFrom(av, &r2) +// fmt.Println(err, reflect.DeepEqual(r, r2)) +// +// Use Conversion utilities with DynamoDB.PutItem: See () +// +// svc := dynamodb.New(nil) +// item, err := dynamodbattribute.ConvertToMap(r) +// if err != nil { +// fmt.Println("Failed to convert", err) +// return +// } +// result, err := svc.PutItem(&dynamodb.PutItemInput{ +// Item: item, +// TableName: aws.String("exampleTable"), +// }) +package dynamodbattribute + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// ConvertToMap accepts a map[string]interface{} or struct and converts it to a +// map[string]*dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a map[string]interface{}, so `json` struct tags are respected. +func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in == nil { + return nil, awserr.New("SerializationError", + "in must be a map[string]interface{} or struct, got ", nil) + } + + v := reflect.ValueOf(in) + if v.Kind() != reflect.Struct && !(v.Kind() == reflect.Map && v.Type().Key().Kind() == reflect.String) { + return nil, awserr.New("SerializationError", + fmt.Sprintf("in must be a map[string]interface{} or struct, got %s", + v.Type().String()), + nil) + } + + if isTyped(reflect.TypeOf(in)) { + var out map[string]interface{} + in = convertToUntyped(in, out) + } + + item = make(map[string]*dynamodb.AttributeValue) + for k, v := range in.(map[string]interface{}) { + item[k] = convertTo(v) + } + + return item, nil +} + +// ConvertFromMap accepts a map[string]*dynamodb.AttributeValue and converts it to a +// map[string]interface{} or struct. +// +// If v points to a struct, the result is first converted it to a +// map[string]interface{}, then JSON encoded/decoded it to convert to a struct, +// so `json` struct tags are respected. +func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Struct && !(rv.Elem().Kind() == reflect.Map && rv.Elem().Type().Key().Kind() == reflect.String) { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s", + rv.Type()), + nil) + } + + m := make(map[string]interface{}) + for k, v := range item { + m[k] = convertFrom(v) + } + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(m, v) + } else { + rv.Elem().Set(reflect.ValueOf(m)) + } + + return err +} + +// ConvertToList accepts an array or slice and converts it to a +// []*dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a []interface{}, so `json` struct tags are respected. +func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in == nil { + return nil, awserr.New("SerializationError", + "in must be an array or slice, got ", + nil) + } + + v := reflect.ValueOf(in) + if v.Kind() != reflect.Array && v.Kind() != reflect.Slice { + return nil, awserr.New("SerializationError", + fmt.Sprintf("in must be an array or slice, got %s", + v.Type().String()), + nil) + } + + if isTyped(reflect.TypeOf(in)) { + var out []interface{} + in = convertToUntyped(in, out) + } + + item = make([]*dynamodb.AttributeValue, 0, len(in.([]interface{}))) + for _, v := range in.([]interface{}) { + item = append(item, convertTo(v)) + } + + return item, nil +} + +// ConvertFromList accepts a []*dynamodb.AttributeValue and converts it to an array or +// slice. +// +// If v contains any structs, the result is first converted it to a +// []interface{}, then JSON encoded/decoded it to convert to a typed array or +// slice, so `json` struct tags are respected. +func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Array && rv.Elem().Kind() != reflect.Slice { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s", + rv.Type()), + nil) + } + + l := make([]interface{}, 0, len(item)) + for _, v := range item { + l = append(l, convertFrom(v)) + } + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(l, v) + } else { + rv.Elem().Set(reflect.ValueOf(l)) + } + + return err +} + +// ConvertTo accepts any interface{} and converts it to a *dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a interface{}, so `json` struct tags are respected. +func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in != nil && isTyped(reflect.TypeOf(in)) { + var out interface{} + in = convertToUntyped(in, out) + } + + item = convertTo(in) + return item, nil +} + +// ConvertFrom accepts a *dynamodb.AttributeValue and converts it to any interface{}. +// +// If v contains any structs, the result is first converted it to a interface{}, +// then JSON encoded/decoded it to convert to a struct, so `json` struct tags +// are respected. +func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Interface && rv.Elem().Kind() != reflect.Struct { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s", + rv.Type()), + nil) + } + + res := convertFrom(item) + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(res, v) + } else if res != nil { + rv.Elem().Set(reflect.ValueOf(res)) + } + + return err +} + +func isTyped(v reflect.Type) bool { + switch v.Kind() { + case reflect.Struct: + return true + case reflect.Array, reflect.Slice: + if isTyped(v.Elem()) { + return true + } + case reflect.Map: + if isTyped(v.Key()) { + return true + } + if isTyped(v.Elem()) { + return true + } + case reflect.Ptr: + return isTyped(v.Elem()) + } + return false +} + +func convertToUntyped(in, out interface{}) interface{} { + b, err := json.Marshal(in) + if err != nil { + panic(err) + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + err = decoder.Decode(&out) + if err != nil { + panic(err) + } + + return out +} + +func convertToTyped(in, out interface{}) error { + b, err := json.Marshal(in) + if err != nil { + return err + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + return decoder.Decode(&out) +} + +func convertTo(in interface{}) *dynamodb.AttributeValue { + a := &dynamodb.AttributeValue{} + + if in == nil { + a.NULL = new(bool) + *a.NULL = true + return a + } + + if m, ok := in.(map[string]interface{}); ok { + a.M = make(map[string]*dynamodb.AttributeValue) + for k, v := range m { + a.M[k] = convertTo(v) + } + return a + } + + if l, ok := in.([]interface{}); ok { + a.L = make([]*dynamodb.AttributeValue, len(l)) + for index, v := range l { + a.L[index] = convertTo(v) + } + return a + } + + // Only primitive types should remain. + v := reflect.ValueOf(in) + switch v.Kind() { + case reflect.Bool: + a.BOOL = new(bool) + *a.BOOL = v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a.N = new(string) + *a.N = strconv.FormatInt(v.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + a.N = new(string) + *a.N = strconv.FormatUint(v.Uint(), 10) + case reflect.Float32, reflect.Float64: + a.N = new(string) + *a.N = strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.String: + if n, ok := in.(json.Number); ok { + a.N = new(string) + *a.N = n.String() + } else { + a.S = new(string) + *a.S = v.String() + } + default: + panic(fmt.Sprintf("the type %s is not supported", v.Type().String())) + } + + return a +} + +func convertFrom(a *dynamodb.AttributeValue) interface{} { + if a.S != nil { + return *a.S + } + + if a.N != nil { + // Number is tricky b/c we don't know which numeric type to use. Here we + // simply try the different types from most to least restrictive. + if n, err := strconv.ParseInt(*a.N, 10, 64); err == nil { + return int(n) + } + if n, err := strconv.ParseUint(*a.N, 10, 64); err == nil { + return uint(n) + } + n, err := strconv.ParseFloat(*a.N, 64) + if err != nil { + panic(err) + } + return n + } + + if a.BOOL != nil { + return *a.BOOL + } + + if a.NULL != nil { + return nil + } + + if a.M != nil { + m := make(map[string]interface{}) + for k, v := range a.M { + m[k] = convertFrom(v) + } + return m + } + + if a.L != nil { + l := make([]interface{}, len(a.L)) + for index, v := range a.L { + l[index] = convertFrom(v) + } + return l + } + + panic(fmt.Sprintf("%#v is not a supported dynamodb.AttributeValue", a)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go new file mode 100644 index 000000000..2b536f039 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go @@ -0,0 +1,488 @@ +package dynamodbattribute + +import ( + "math" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +type mySimpleStruct struct { + String string + Int int + Uint uint + Float32 float32 + Float64 float64 + Bool bool + Null *interface{} +} + +type myComplexStruct struct { + Simple []mySimpleStruct +} + +type converterTestInput struct { + input interface{} + expected interface{} + err awserr.Error + inputType string // "enum" of types +} + +var trueValue = true +var falseValue = false + +var converterScalarInputs = []converterTestInput{ + { + input: nil, + expected: &dynamodb.AttributeValue{NULL: &trueValue}, + }, + { + input: "some string", + expected: &dynamodb.AttributeValue{S: aws.String("some string")}, + }, + { + input: true, + expected: &dynamodb.AttributeValue{BOOL: &trueValue}, + }, + { + input: false, + expected: &dynamodb.AttributeValue{BOOL: &falseValue}, + }, + { + input: 3.14, + expected: &dynamodb.AttributeValue{N: aws.String("3.14")}, + }, + { + input: math.MaxFloat32, + expected: &dynamodb.AttributeValue{N: aws.String("340282346638528860000000000000000000000")}, + }, + { + input: math.MaxFloat64, + expected: &dynamodb.AttributeValue{N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}, + }, + { + input: 12, + expected: &dynamodb.AttributeValue{N: aws.String("12")}, + }, + { + input: mySimpleStruct{}, + expected: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + inputType: "mySimpleStruct", + }, +} + +var converterMapTestInputs = []converterTestInput{ + // Scalar tests + { + input: nil, + err: awserr.New("SerializationError", "in must be a map[string]interface{} or struct, got ", nil), + }, + { + input: map[string]interface{}{"string": "some string"}, + expected: map[string]*dynamodb.AttributeValue{"string": {S: aws.String("some string")}}, + }, + { + input: map[string]interface{}{"bool": true}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &trueValue}}, + }, + { + input: map[string]interface{}{"bool": false}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &falseValue}}, + }, + { + input: map[string]interface{}{"null": nil}, + expected: map[string]*dynamodb.AttributeValue{"null": {NULL: &trueValue}}, + }, + { + input: map[string]interface{}{"float": 3.14}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("3.14")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat32}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("340282346638528860000000000000000000000")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat64}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}}, + }, + { + input: map[string]interface{}{"int": int(12)}, + expected: map[string]*dynamodb.AttributeValue{"int": {N: aws.String("12")}}, + }, + // List + { + input: map[string]interface{}{"list": []interface{}{"a string", 12, 3.14, true, nil, false}}, + expected: map[string]*dynamodb.AttributeValue{ + "list": { + L: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + }, + }, + // Map + { + input: map[string]interface{}{"map": map[string]interface{}{"nestedint": 12}}, + expected: map[string]*dynamodb.AttributeValue{ + "map": { + M: map[string]*dynamodb.AttributeValue{ + "nestedint": { + N: aws.String("12"), + }, + }, + }, + }, + }, + // Structs + { + input: mySimpleStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + inputType: "mySimpleStruct", + }, + { + input: myComplexStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": {NULL: &trueValue}, + }, + inputType: "myComplexStruct", + }, + { + input: myComplexStruct{Simple: []mySimpleStruct{{Int: -2}, {Uint: 5}}}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": { + L: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("-2")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("5")}, + }, + }, + }, + }, + }, + inputType: "myComplexStruct", + }, +} + +var converterListTestInputs = []converterTestInput{ + { + input: nil, + err: awserr.New("SerializationError", "in must be an array or slice, got ", nil), + }, + { + input: []interface{}{}, + expected: []*dynamodb.AttributeValue{}, + }, + { + input: []interface{}{"a string", 12, 3.14, true, nil, false}, + expected: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + { + input: []mySimpleStruct{{}}, + expected: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + }, + inputType: "mySimpleStruct", + }, +} + +func TestConvertTo(t *testing.T) { + for _, test := range converterScalarInputs { + testConvertTo(t, test) + } +} + +func testConvertTo(t *testing.T, test converterTestInput) { + actual, err := ConvertTo(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertTo with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertTo with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertTo with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFrom(t *testing.T) { + // Using the same inputs from TestConvertTo, test the reverse mapping. + for _, test := range converterScalarInputs { + if test.expected != nil { + testConvertFrom(t, test) + } + } +} + +func testConvertFrom(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual mySimpleStruct + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual myComplexStruct + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual interface{} + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromError(t *testing.T) { + // Test that we get an error using ConvertFrom to convert to a map. + var actual map[string]interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *map[string]interface {}`, nil).Error() + if err := ConvertFrom(nil, &actual); err == nil { + t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFrom to convert to a list. + var actual2 []interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *[]interface {}`, nil).Error() + if err := ConvertFrom(nil, &actual2); err == nil { + t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func TestConvertToMap(t *testing.T) { + for _, test := range converterMapTestInputs { + testConvertToMap(t, test) + } +} + +func testConvertToMap(t *testing.T, test converterTestInput) { + actual, err := ConvertToMap(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertToMap with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertToMap with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertToMap with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFromMap(t *testing.T) { + // Using the same inputs from TestConvertToMap, test the reverse mapping. + for _, test := range converterMapTestInputs { + if test.expected != nil { + testConvertFromMap(t, test) + } + } +} + +func testConvertFromMap(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual mySimpleStruct + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual myComplexStruct + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual map[string]interface{} + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromMapError(t *testing.T) { + // Test that we get an error using ConvertFromMap to convert to an interface{}. + var actual interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *interface {}`, nil).Error() + if err := ConvertFromMap(nil, &actual); err == nil { + t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromMap to convert to a slice. + var actual2 []interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *[]interface {}`, nil).Error() + if err := ConvertFromMap(nil, &actual2); err == nil { + t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func TestConvertToList(t *testing.T) { + for _, test := range converterListTestInputs { + testConvertToList(t, test) + } +} + +func testConvertToList(t *testing.T, test converterTestInput) { + actual, err := ConvertToList(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertToList with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertToList with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertToList with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFromList(t *testing.T) { + // Using the same inputs from TestConvertToList, test the reverse mapping. + for _, test := range converterListTestInputs { + if test.expected != nil { + testConvertFromList(t, test) + } + } +} + +func testConvertFromList(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual []mySimpleStruct + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual []myComplexStruct + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual []interface{} + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromListError(t *testing.T) { + // Test that we get an error using ConvertFromList to convert to a map. + var actual map[string]interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *map[string]interface {}`, nil).Error() + if err := ConvertFromList(nil, &actual); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromList to convert to a struct. + var actual2 myComplexStruct + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *dynamodbattribute.myComplexStruct`, nil).Error() + if err := ConvertFromList(nil, &actual2); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromList to convert to an interface{}. + var actual3 interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *interface {}`, nil).Error() + if err := ConvertFromList(nil, &actual3); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func compareObjects(t *testing.T, expected interface{}, actual interface{}) { + if !reflect.DeepEqual(expected, actual) { + t.Errorf("\nExpected %s:\n%s\nActual %s:\n%s\n", + reflect.ValueOf(expected).Kind(), + awsutil.Prettify(expected), + reflect.ValueOf(actual).Kind(), + awsutil.Prettify(actual)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go new file mode 100644 index 000000000..e4fcedfcb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go @@ -0,0 +1,79 @@ +package dynamodbattribute_test + +import ( + "fmt" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + "reflect" +) + +func ExampleConvertTo() { + type Record struct { + MyField string + Letters []string + Numbers []int + } + + r := Record{ + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + Numbers: []int{1, 2, 3}, + } + av, err := dynamodbattribute.ConvertTo(r) + fmt.Println("err", err) + fmt.Println("MyField", av.M["MyField"]) + fmt.Println("Letters", av.M["Letters"]) + fmt.Println("Numbers", av.M["Numbers"]) + + // Output: + // err + // MyField { + // S: "MyFieldValue" + // } + // Letters { + // L: [ + // { + // S: "a" + // }, + // { + // S: "b" + // }, + // { + // S: "c" + // }, + // { + // S: "d" + // } + // ] + // } + // Numbers { + // L: [{ + // N: "1" + // },{ + // N: "2" + // },{ + // N: "3" + // }] + // } +} + +func ExampleConvertFrom() { + type Record struct { + MyField string + Letters []string + A2Num map[string]int + } + + r := Record{ + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + A2Num: map[string]int{"a": 1, "b": 2, "c": 3}, + } + av, err := dynamodbattribute.ConvertTo(r) + + r2 := Record{} + err = dynamodbattribute.ConvertFrom(av, &r2) + fmt.Println(err, reflect.DeepEqual(r, r2)) + + // Output: + // true +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go new file mode 100644 index 000000000..ad7a09ef0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go @@ -0,0 +1,74 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodbiface provides an interface for the Amazon DynamoDB. +package dynamodbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// DynamoDBAPI is the interface type for dynamodb.DynamoDB. +type DynamoDBAPI interface { + BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput) + + BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) + + BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error + + BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput) + + BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) + + CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput) + + CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) + + DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput) + + DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) + + DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput) + + DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) + + DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput) + + DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) + + GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) + + GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) + + ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput) + + ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) + + ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error + + PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput) + + PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) + + QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput) + + Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error) + + QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error + + ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput) + + Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error) + + ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error + + UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput) + + UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) + + UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput) + + UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error) +} + +var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go new file mode 100644 index 000000000..79db6a5ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go @@ -0,0 +1,1336 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDynamoDB_BatchGetItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.BatchGetItemInput{ + RequestItems: map[string]*dynamodb.KeysAndAttributes{ // Required + "Key": { // Required + Keys: []map[string]*dynamodb.AttributeValue{ // Required + { // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + // More values... + }, + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ProjectionExpression: aws.String("ProjectionExpression"), + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + } + resp, err := svc.BatchGetItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_BatchWriteItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ // Required + "Key": { // Required + { // Required + DeleteRequest: &dynamodb.DeleteRequest{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + PutRequest: &dynamodb.PutRequest{ + Item: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + }, + // More values... + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + } + resp, err := svc.BatchWriteItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_CreateTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.CreateTableInput{ + AttributeDefinitions: []*dynamodb.AttributeDefinition{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + AttributeType: aws.String("ScalarAttributeType"), // Required + }, + // More values... + }, + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + TableName: aws.String("TableName"), // Required + GlobalSecondaryIndexes: []*dynamodb.GlobalSecondaryIndex{ + { // Required + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + // More values... + }, + LocalSecondaryIndexes: []*dynamodb.LocalSecondaryIndex{ + { // Required + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + }, + // More values... + }, + StreamSpecification: &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(true), + StreamViewType: aws.String("StreamViewType"), + }, + } + resp, err := svc.CreateTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DeleteItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + } + resp, err := svc.DeleteItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DeleteTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DeleteTableInput{ + TableName: aws.String("TableName"), // Required + } + resp, err := svc.DeleteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DescribeTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DescribeTableInput{ + TableName: aws.String("TableName"), // Required + } + resp, err := svc.DescribeTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_GetItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ProjectionExpression: aws.String("ProjectionExpression"), + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + } + resp, err := svc.GetItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_ListTables() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.ListTablesInput{ + ExclusiveStartTableName: aws.String("TableName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListTables(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_PutItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + } + resp, err := svc.PutItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_Query() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.QueryInput{ + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConditionalOperator: aws.String("ConditionalOperator"), + ConsistentRead: aws.Bool(true), + ExclusiveStartKey: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + FilterExpression: aws.String("ConditionExpression"), + IndexName: aws.String("IndexName"), + KeyConditionExpression: aws.String("KeyExpression"), + KeyConditions: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + Limit: aws.Int64(1), + ProjectionExpression: aws.String("ProjectionExpression"), + QueryFilter: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ScanIndexForward: aws.Bool(true), + Select: aws.String("Select"), + } + resp, err := svc.Query(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_Scan() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.ScanInput{ + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConditionalOperator: aws.String("ConditionalOperator"), + ConsistentRead: aws.Bool(true), + ExclusiveStartKey: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + FilterExpression: aws.String("ConditionExpression"), + IndexName: aws.String("IndexName"), + Limit: aws.Int64(1), + ProjectionExpression: aws.String("ProjectionExpression"), + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ScanFilter: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + Segment: aws.Int64(1), + Select: aws.String("Select"), + TotalSegments: aws.Int64(1), + } + resp, err := svc.Scan(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_UpdateItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.UpdateItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + AttributeUpdates: map[string]*dynamodb.AttributeValueUpdate{ + "Key": { // Required + Action: aws.String("AttributeAction"), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + UpdateExpression: aws.String("UpdateExpression"), + } + resp, err := svc.UpdateItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_UpdateTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.UpdateTableInput{ + TableName: aws.String("TableName"), // Required + AttributeDefinitions: []*dynamodb.AttributeDefinition{ + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + AttributeType: aws.String("ScalarAttributeType"), // Required + }, + // More values... + }, + GlobalSecondaryIndexUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { // Required + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + }, + Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + }, + // More values... + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + StreamSpecification: &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(true), + StreamViewType: aws.String("StreamViewType"), + }, + } + resp, err := svc.UpdateTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go new file mode 100644 index 000000000..12fc1d81f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -0,0 +1,190 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview +// +// This is the Amazon DynamoDB API Reference. This guide provides descriptions +// and samples of the low-level DynamoDB API. For information about DynamoDB +// application development, see the Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/). +// +// Instead of making the requests to the low-level DynamoDB API directly from +// your application, we recommend that you use the AWS Software Development +// Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary +// to call the low-level DynamoDB API directly from your application. The libraries +// take care of request authentication, serialization, and connection management. +// For more information, see Using the AWS SDKs with DynamoDB (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/UsingAWSSDK.html) +// in the Amazon DynamoDB Developer Guide. +// +// If you decide to code against the low-level DynamoDB API directly, you will +// need to write the necessary code to authenticate your requests. For more +// information on signing your requests, see Using the DynamoDB API (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/API.html) +// in the Amazon DynamoDB Developer Guide. +// +// The following are short descriptions of each low-level API action, organized +// by function. +// +// Managing Tables +// +// CreateTable - Creates a table with user-specified provisioned throughput +// settings. You must designate one attribute as the hash primary key for the +// table; you can optionally designate a second attribute as the range primary +// key. DynamoDB creates indexes on these key attributes for fast data access. +// Optionally, you can create one or more secondary indexes, which provide fast +// data access using non-key attributes. +// +// DescribeTable - Returns metadata for a table, such as table size, status, +// and index information. +// +// UpdateTable - Modifies the provisioned throughput settings for a table. +// Optionally, you can modify the provisioned throughput settings for global +// secondary indexes on the table. +// +// ListTables - Returns a list of all tables associated with the current +// AWS account and endpoint. +// +// DeleteTable - Deletes a table and all of its indexes. +// +// For conceptual information about managing tables, see Working with Tables +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html) +// in the Amazon DynamoDB Developer Guide. +// +// Reading Data +// +// GetItem - Returns a set of attributes for the item that has a given primary +// key. By default, GetItem performs an eventually consistent read; however, +// applications can request a strongly consistent read instead. +// +// BatchGetItem - Performs multiple GetItem requests for data items using +// their primary keys, from one table or multiple tables. The response from +// BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items. +// Both eventually consistent and strongly consistent reads can be used. +// +// Query - Returns one or more items from a table or a secondary index. You +// must provide a specific hash key value. You can narrow the scope of the query +// using comparison operators against a range key value, or on the index key. +// Query supports either eventual or strong consistency. A single response has +// a size limit of 1 MB. +// +// Scan - Reads every item in a table; the result set is eventually consistent. +// You can limit the number of items returned by filtering the data attributes, +// using conditional expressions. Scan can be used to enable ad-hoc querying +// of a table against non-key attributes; however, since this is a full table +// scan without using an index, Scan should not be used for any application +// query use case that requires predictable performance. +// +// For conceptual information about reading data, see Working with Items +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) +// in the Amazon DynamoDB Developer Guide. +// +// Modifying Data +// +// PutItem - Creates a new item, or replaces an existing item with a new +// item (including all the attributes). By default, if an item in the table +// already exists with the same primary key, the new item completely replaces +// the existing item. You can use conditional operators to replace an item only +// if its attribute values match certain conditions, or to insert a new item +// only if that item doesn't already exist. +// +// UpdateItem - Modifies the attributes of an existing item. You can also +// use conditional operators to perform an update only if the item's attribute +// values match certain conditions. +// +// DeleteItem - Deletes an item in a table by primary key. You can use conditional +// operators to perform a delete an item only if the item's attribute values +// match certain conditions. +// +// BatchWriteItem - Performs multiple PutItem and DeleteItem requests across +// multiple tables in a single request. A failure of any request(s) in the batch +// will not cause the entire BatchWriteItem operation to fail. Supports batches +// of up to 25 items to put or delete, with a maximum total request size of +// 16 MB. +// +// For conceptual information about modifying data, see Working with Items +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) +// in the Amazon DynamoDB Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DynamoDB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "dynamodb" + +// New creates a new instance of the DynamoDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DynamoDB client from just a session. +// svc := dynamodb.New(mySession) +// +// // Create a DynamoDB client with additional configuration +// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DynamoDB { + svc := &DynamoDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-08-10", + JSONVersion: "1.0", + TargetPrefix: "DynamoDB_20120810", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDB operation and runs any +// custom request initialization. +func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go new file mode 100644 index 000000000..4deeed7a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTable", + Delay: 20, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Table.TableStatus", + Expected: "ACTIVE", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTable", + Delay: 20, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go new file mode 100644 index 000000000..cd018821b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -0,0 +1,24975 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2 provides a client for Amazon Elastic Compute Cloud. +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" + +// AcceptVpcPeeringConnectionRequest generates a request for the AcceptVpcPeeringConnection operation. +func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectionInput) (req *request.Request, output *AcceptVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opAcceptVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &AcceptVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Accept a VPC peering connection request. To accept a request, the VPC peering +// connection must be in the pending-acceptance state, and you must be the owner +// of the peer VPC. Use the DescribeVpcPeeringConnections request to view your +// outstanding VPC peering connection requests. +func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) (*AcceptVpcPeeringConnectionOutput, error) { + req, out := c.AcceptVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opAllocateAddress = "AllocateAddress" + +// AllocateAddressRequest generates a request for the AllocateAddress operation. +func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.Request, output *AllocateAddressOutput) { + op := &request.Operation{ + Name: opAllocateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AllocateAddressOutput{} + req.Data = output + return +} + +// Acquires an Elastic IP address. +// +// An Elastic IP address is for use either in the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutput, error) { + req, out := c.AllocateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAllocateHosts = "AllocateHosts" + +// AllocateHostsRequest generates a request for the AllocateHosts operation. +func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Request, output *AllocateHostsOutput) { + op := &request.Operation{ + Name: opAllocateHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &AllocateHostsOutput{} + req.Data = output + return +} + +// Allocates a Dedicated host to your account. At minimum you need to specify +// the instance size type, Availability Zone, and quantity of hosts you want +// to allocate. +func (c *EC2) AllocateHosts(input *AllocateHostsInput) (*AllocateHostsOutput, error) { + req, out := c.AllocateHostsRequest(input) + err := req.Send() + return out, err +} + +const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" + +// AssignPrivateIpAddressesRequest generates a request for the AssignPrivateIpAddresses operation. +func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInput) (req *request.Request, output *AssignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opAssignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &AssignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Assigns one or more secondary private IP addresses to the specified network +// interface. You can specify one or more specific secondary IP addresses, or +// you can specify the number of secondary IP addresses to be automatically +// assigned within the subnet's CIDR block range. The number of secondary IP +// addresses that you can assign to an instance varies by instance type. For +// information about instance types, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// Elastic IP addresses, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// AssignPrivateIpAddresses is available only in EC2-VPC. +func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*AssignPrivateIpAddressesOutput, error) { + req, out := c.AssignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opAssociateAddress = "AssociateAddress" + +// AssociateAddressRequest generates a request for the AssociateAddress operation. +func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *request.Request, output *AssociateAddressOutput) { + op := &request.Operation{ + Name: opAssociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateAddressOutput{} + req.Data = output + return +} + +// Associates an Elastic IP address with an instance or a network interface. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address +// is already associated with a different instance, it is disassociated from +// that instance and associated with the specified instance. +// +// [VPC in an EC2-Classic account] If you don't specify a private IP address, +// the Elastic IP address is associated with the primary IP address. If the +// Elastic IP address is already associated with a different instance or a network +// interface, you get an error unless you allow reassociation. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressOutput, error) { + req, out := c.AssociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAssociateDhcpOptions = "AssociateDhcpOptions" + +// AssociateDhcpOptionsRequest generates a request for the AssociateDhcpOptions operation. +func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req *request.Request, output *AssociateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opAssociateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateDhcpOptionsOutput{} + req.Data = output + return +} + +// Associates a set of DHCP options (that you've previously created) with the +// specified VPC, or associates no DHCP options with the VPC. +// +// After you associate the options with the VPC, any existing instances and +// all new instances that you launch in that VPC use the options. You don't +// need to restart or relaunch the instances. They automatically pick up the +// changes within a few hours, depending on how frequently the instance renews +// its DHCP lease. You can explicitly renew the lease using the operating system +// on the instance. +// +// For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*AssociateDhcpOptionsOutput, error) { + req, out := c.AssociateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opAssociateRouteTable = "AssociateRouteTable" + +// AssociateRouteTableRequest generates a request for the AssociateRouteTable operation. +func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *request.Request, output *AssociateRouteTableOutput) { + op := &request.Operation{ + Name: opAssociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateRouteTableOutput{} + req.Data = output + return +} + +// Associates a subnet with a route table. The subnet and route table must be +// in the same VPC. This association causes traffic originating from the subnet +// to be routed according to the routes in the route table. The action returns +// an association ID, which you need in order to disassociate the route table +// from the subnet later. A route table can be associated with multiple subnets. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRouteTableOutput, error) { + req, out := c.AssociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opAttachClassicLinkVpc = "AttachClassicLinkVpc" + +// AttachClassicLinkVpcRequest generates a request for the AttachClassicLinkVpc operation. +func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req *request.Request, output *AttachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opAttachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or +// more of the VPC's security groups. You cannot link an EC2-Classic instance +// to more than one VPC at a time. You can only link an instance that's in the +// running state. An instance is automatically unlinked from a VPC when it's +// stopped - you can link it to the VPC again when you restart it. +// +// After you've linked an instance, you cannot change the VPC security groups +// that are associated with it. To change the security groups, you must first +// unlink the instance, and then link it again. +// +// Linking your instance to a VPC is sometimes referred to as attaching your +// instance. +func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachClassicLinkVpcOutput, error) { + req, out := c.AttachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opAttachInternetGateway = "AttachInternetGateway" + +// AttachInternetGatewayRequest generates a request for the AttachInternetGateway operation. +func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (req *request.Request, output *AttachInternetGatewayOutput) { + op := &request.Operation{ + Name: opAttachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachInternetGatewayOutput{} + req.Data = output + return +} + +// Attaches an Internet gateway to a VPC, enabling connectivity between the +// Internet and the VPC. For more information about your VPC and Internet gateway, +// see the Amazon Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachInternetGatewayOutput, error) { + req, out := c.AttachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAttachNetworkInterface = "AttachNetworkInterface" + +// AttachNetworkInterfaceRequest generates a request for the AttachNetworkInterface operation. +func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) (req *request.Request, output *AttachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opAttachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Attaches a network interface to an instance. +func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*AttachNetworkInterfaceOutput, error) { + req, out := c.AttachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opAttachVolume = "AttachVolume" + +// AttachVolumeRequest generates a request for the AttachVolume operation. +func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opAttachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Attaches an EBS volume to a running or stopped instance and exposes it to +// the instance with the specified device name. +// +// Encrypted EBS volumes may only be attached to instances that support Amazon +// EBS encryption. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For a list of supported device names, see Attaching an EBS Volume to an +// Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html). +// Any device names that aren't reserved for instance store volumes can be used +// for EBS volumes. For more information, see Amazon EC2 Instance Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If a volume has an AWS Marketplace product code: +// +// The volume can be attached only to a stopped instance. AWS Marketplace +// product codes are copied from the volume to the instance. You must be subscribed +// to the product. The instance type and operating system of the instance must +// support the product. For example, you can't detach a volume from a Windows +// instance and attach it to a Linux instance. For an overview of the AWS Marketplace, +// see Introducing AWS Marketplace (https://aws.amazon.com/marketplace/help/200900000). +// +// For more information about EBS volumes, see Attaching Amazon EBS Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) { + req, out := c.AttachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opAttachVpnGateway = "AttachVpnGateway" + +// AttachVpnGatewayRequest generates a request for the AttachVpnGateway operation. +func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *request.Request, output *AttachVpnGatewayOutput) { + op := &request.Operation{ + Name: opAttachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachVpnGatewayOutput{} + req.Data = output + return +} + +// Attaches a virtual private gateway to a VPC. For more information, see Adding +// a Hardware Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayOutput, error) { + req, out := c.AttachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" + +// AuthorizeSecurityGroupEgressRequest generates a request for the AuthorizeSecurityGroupEgress operation. +func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupEgressInput) (req *request.Request, output *AuthorizeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Adds one or more egress rules to a security group for use +// with a VPC. Specifically, this action permits instances to send traffic to +// one or more destination CIDR IP address ranges, or to one or more destination +// security groups for the same VPC. This action doesn't apply to security groups +// for use in EC2-Classic. For more information, see Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can have up to 50 rules per security group (covering both ingress and +// egress rules). +// +// Each rule consists of the protocol (for example, TCP), plus either a CIDR +// range or a source group. For the TCP and UDP protocols, you must also specify +// the destination port or port range. For the ICMP protocol, you must also +// specify the ICMP type and code. You can use -1 for the type or code to mean +// all types or all codes. +// +// Rule changes are propagated to affected instances as quickly as possible. +// However, a small delay might occur. +func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressInput) (*AuthorizeSecurityGroupEgressOutput, error) { + req, out := c.AuthorizeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" + +// AuthorizeSecurityGroupIngressRequest generates a request for the AuthorizeSecurityGroupIngress operation. +func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroupIngressInput) (req *request.Request, output *AuthorizeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Adds one or more ingress rules to a security group. +// +// EC2-Classic: You can have up to 100 rules per group. +// +// EC2-VPC: You can have up to 50 rules per group (covering both ingress and +// egress rules). +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +// +// [EC2-Classic] This action gives one or more CIDR IP address ranges permission +// to access a security group in your account, or gives one or more security +// groups (called the source groups) permission to access a security group for +// your account. A source group can be for your own AWS account, or another. +// +// [EC2-VPC] This action gives one or more CIDR IP address ranges permission +// to access a security group in your VPC, or gives one or more other security +// groups (called the source groups) permission to access a security group for +// your VPC. The security groups must all be for the same VPC. +func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngressInput) (*AuthorizeSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opBundleInstance = "BundleInstance" + +// BundleInstanceRequest generates a request for the BundleInstance operation. +func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Request, output *BundleInstanceOutput) { + op := &request.Operation{ + Name: opBundleInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BundleInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &BundleInstanceOutput{} + req.Data = output + return +} + +// Bundles an Amazon instance store-backed Windows instance. +// +// During bundling, only the root device volume (C:\) is bundled. Data on other +// instance store volumes is not preserved. +// +// This action is not applicable for Linux/Unix instances or Windows instances +// that are backed by Amazon EBS. +// +// For more information, see Creating an Instance Store-Backed Windows AMI +// (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_InstanceStoreBacked_WinAMI.html). +func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, error) { + req, out := c.BundleInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCancelBundleTask = "CancelBundleTask" + +// CancelBundleTaskRequest generates a request for the CancelBundleTask operation. +func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *request.Request, output *CancelBundleTaskOutput) { + op := &request.Operation{ + Name: opCancelBundleTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelBundleTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelBundleTaskOutput{} + req.Data = output + return +} + +// Cancels a bundling operation for an instance store-backed Windows instance. +func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskOutput, error) { + req, out := c.CancelBundleTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelConversionTask = "CancelConversionTask" + +// CancelConversionTaskRequest generates a request for the CancelConversionTask operation. +func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req *request.Request, output *CancelConversionTaskOutput) { + op := &request.Operation{ + Name: opCancelConversionTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelConversionTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelConversionTaskOutput{} + req.Data = output + return +} + +// Cancels an active conversion task. The task can be the import of an instance +// or volume. The action removes all artifacts of the conversion, including +// a partially uploaded volume or instance. If the conversion is complete or +// is in the process of transferring the final disk image, the command fails +// and returns an exception. +// +// For more information, see Using the Command Line Tools to Import Your Virtual +// Machine to Amazon EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelConversionTaskOutput, error) { + req, out := c.CancelConversionTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a request for the CancelExportTask operation. +func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// Cancels an active export task. The request removes all artifacts of the export, +// including any partially-created Amazon S3 objects. If the export task is +// complete or is in the process of transferring the final disk image, the command +// fails and returns an error. +func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelImportTask = "CancelImportTask" + +// CancelImportTaskRequest generates a request for the CancelImportTask operation. +func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *request.Request, output *CancelImportTaskOutput) { + op := &request.Operation{ + Name: opCancelImportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelImportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelImportTaskOutput{} + req.Data = output + return +} + +// Cancels an in-process import virtual machine or import snapshot task. +func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskOutput, error) { + req, out := c.CancelImportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelReservedInstancesListing = "CancelReservedInstancesListing" + +// CancelReservedInstancesListingRequest generates a request for the CancelReservedInstancesListing operation. +func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstancesListingInput) (req *request.Request, output *CancelReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCancelReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelReservedInstancesListingOutput{} + req.Data = output + return +} + +// Cancels the specified Reserved Instance listing in the Reserved Instance +// Marketplace. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListingInput) (*CancelReservedInstancesListingOutput, error) { + req, out := c.CancelReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotFleetRequests = "CancelSpotFleetRequests" + +// CancelSpotFleetRequestsRequest generates a request for the CancelSpotFleetRequests operation. +func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput) (req *request.Request, output *CancelSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Cancels the specified Spot fleet requests. +// +// After you cancel a Spot fleet request, the Spot fleet launches no new Spot +// instances. You must specify whether the Spot fleet should also terminate +// its Spot instances. If you terminate the instances, the Spot fleet request +// enters the cancelled_terminating state. Otherwise, the Spot fleet request +// enters the cancelled_running state and the instances continue to run until +// they are interrupted or you terminate them manually. +func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) { + req, out := c.CancelSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" + +// CancelSpotInstanceRequestsRequest generates a request for the CancelSpotInstanceRequests operation. +func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequestsInput) (req *request.Request, output *CancelSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Cancels one or more Spot instance requests. Spot instances are instances +// that Amazon EC2 starts on your behalf when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Canceling a Spot instance request does not terminate running Spot instances +// associated with the request. +func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) (*CancelSpotInstanceRequestsOutput, error) { + req, out := c.CancelSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opConfirmProductInstance = "ConfirmProductInstance" + +// ConfirmProductInstanceRequest generates a request for the ConfirmProductInstance operation. +func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) (req *request.Request, output *ConfirmProductInstanceOutput) { + op := &request.Operation{ + Name: opConfirmProductInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmProductInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmProductInstanceOutput{} + req.Data = output + return +} + +// Determines whether a product code is associated with an instance. This action +// can only be used by the owner of the product code. It is useful when a product +// code owner needs to verify whether another user's instance is eligible for +// support. +func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*ConfirmProductInstanceOutput, error) { + req, out := c.ConfirmProductInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCopyImage = "CopyImage" + +// CopyImageRequest generates a request for the CopyImage operation. +func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) { + op := &request.Operation{ + Name: opCopyImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyImageOutput{} + req.Data = output + return +} + +// Initiates the copy of an AMI from the specified source region to the current +// region. You specify the destination region by using its endpoint when making +// the request. +// +// For more information, see Copying AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + err := req.Send() + return out, err +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a request for the CopySnapshot operation. +func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopySnapshotOutput{} + req.Data = output + return +} + +// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon +// S3. You can copy the snapshot within the same region or from one region to +// another. You can use the snapshot to create EBS volumes or Amazon Machine +// Images (AMIs). The snapshot is copied to the regional endpoint that you send +// the HTTP request to. +// +// Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted +// snapshots remain unencrypted, unless the Encrypted flag is specified during +// the snapshot copy operation. By default, encrypted snapshot copies use the +// default AWS Key Management Service (AWS KMS) customer master key (CMK); however, +// you can specify a non-default CMK with the KmsKeyId parameter. +// +// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCustomerGateway = "CreateCustomerGateway" + +// CreateCustomerGatewayRequest generates a request for the CreateCustomerGateway operation. +func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (req *request.Request, output *CreateCustomerGatewayOutput) { + op := &request.Operation{ + Name: opCreateCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCustomerGatewayOutput{} + req.Data = output + return +} + +// Provides information to AWS about your VPN customer gateway device. The customer +// gateway is the appliance at your end of the VPN connection. (The device on +// the AWS side of the VPN connection is the virtual private gateway.) You must +// provide the Internet-routable IP address of the customer gateway's external +// interface. The IP address must be static and may be behind a device performing +// network address translation (NAT). +// +// For devices that use Border Gateway Protocol (BGP), you can also provide +// the device's BGP Autonomous System Number (ASN). You can use an existing +// ASN assigned to your network. If you don't have an ASN already, you can use +// a private ASN (in the 64512 - 65534 range). +// +// Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with +// the exception of 7224, which is reserved in the us-east-1 region, and 9059, +// which is reserved in the eu-west-1 region. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You cannot create more than one customer gateway with the same VPN type, +// IP address, and BGP ASN parameter values. If you run an identical request +// more than one time, the first request creates the customer gateway, and subsequent +// requests return information about the existing customer gateway. The subsequent +// requests do not create new customer gateway resources. +func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateCustomerGatewayOutput, error) { + req, out := c.CreateCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateDhcpOptions = "CreateDhcpOptions" + +// CreateDhcpOptionsRequest generates a request for the CreateDhcpOptions operation. +func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *request.Request, output *CreateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opCreateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDhcpOptionsOutput{} + req.Data = output + return +} + +// Creates a set of DHCP options for your VPC. After creating the set, you must +// associate it with the VPC, causing all existing and new instances that you +// launch in the VPC to use this set of DHCP options. The following are the +// individual DHCP options you can specify. For more information about the options, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// +// domain-name-servers - The IP addresses of up to four domain name servers, +// or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. +// If specifying more than one domain name server, specify the IP addresses +// in a single parameter, separated by commas. domain-name - If you're using +// AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS +// in another region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). +// Otherwise, specify a domain name (for example, MyCompany.com). Important: +// Some Linux operating systems accept multiple domain names separated by spaces. +// However, Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set is +// associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. ntp-servers - The IP addresses of up to four +// Network Time Protocol (NTP) servers. netbios-name-servers - The IP addresses +// of up to four NetBIOS name servers. netbios-node-type - The NetBIOS node +// type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast +// are not currently supported). For more information about these node types, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). Your VPC automatically +// starts out with a set of DHCP options that includes only a DNS server that +// we provide (AmazonProvidedDNS). If you create a set of options, and if your +// VPC has an Internet gateway, make sure to set the domain-name-servers option +// either to AmazonProvidedDNS or to a domain name server of your choice. For +// more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptionsOutput, error) { + req, out := c.CreateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opCreateFlowLogs = "CreateFlowLogs" + +// CreateFlowLogsRequest generates a request for the CreateFlowLogs operation. +func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Request, output *CreateFlowLogsOutput) { + op := &request.Operation{ + Name: opCreateFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateFlowLogsOutput{} + req.Data = output + return +} + +// Creates one or more flow logs to capture IP traffic for a specific network +// interface, subnet, or VPC. Flow logs are delivered to a specified log group +// in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, +// a log stream is created in CloudWatch Logs for each network interface in +// the subnet or VPC. Log streams can include information about accepted and +// rejected traffic to a network interface. You can view the data in your log +// streams using Amazon CloudWatch Logs. +// +// In your request, you must also specify an IAM role that has permission to +// publish logs to CloudWatch Logs. +func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, error) { + req, out := c.CreateFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opCreateImage = "CreateImage" + +// CreateImageRequest generates a request for the CreateImage operation. +func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, output *CreateImageOutput) { + op := &request.Operation{ + Name: opCreateImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateImageOutput{} + req.Data = output + return +} + +// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that +// is either running or stopped. +// +// If you customized your instance with instance store volumes or EBS volumes +// in addition to the root device volume, the new AMI contains block device +// mapping information for those volumes. When you launch an instance from this +// new AMI, the instance automatically launches with those additional volumes. +// +// For more information, see Creating Amazon EBS-Backed Linux AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { + req, out := c.CreateImageRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstanceExportTask = "CreateInstanceExportTask" + +// CreateInstanceExportTaskRequest generates a request for the CreateInstanceExportTask operation. +func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInput) (req *request.Request, output *CreateInstanceExportTaskOutput) { + op := &request.Operation{ + Name: opCreateInstanceExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceExportTaskOutput{} + req.Data = output + return +} + +// Exports a running or stopped instance to an S3 bucket. +// +// For information about the supported operating systems, image formats, and +// known limitations for the types of instances you can export, see Exporting +// EC2 Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ExportingEC2Instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*CreateInstanceExportTaskOutput, error) { + req, out := c.CreateInstanceExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateInternetGateway = "CreateInternetGateway" + +// CreateInternetGatewayRequest generates a request for the CreateInternetGateway operation. +func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (req *request.Request, output *CreateInternetGatewayOutput) { + op := &request.Operation{ + Name: opCreateInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInternetGatewayOutput{} + req.Data = output + return +} + +// Creates an Internet gateway for use with a VPC. After creating the Internet +// gateway, you attach it to a VPC using AttachInternetGateway. +// +// For more information about your VPC and Internet gateway, see the Amazon +// Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateInternetGatewayOutput, error) { + req, out := c.CreateInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateKeyPair = "CreateKeyPair" + +// CreateKeyPairRequest generates a request for the CreateKeyPair operation. +func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) { + op := &request.Operation{ + Name: opCreateKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeyPairOutput{} + req.Data = output + return +} + +// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores +// the public key and displays the private key for you to save to a file. The +// private key is returned as an unencrypted PEM encoded PKCS#8 private key. +// If a key with the specified name already exists, Amazon EC2 returns an error. +// +// You can have up to five thousand key pairs per region. +// +// The key pair returned to you is available only in the region in which you +// create it. To create a key pair that is available in all regions, use ImportKeyPair. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) { + req, out := c.CreateKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opCreateNatGateway = "CreateNatGateway" + +// CreateNatGatewayRequest generates a request for the CreateNatGateway operation. +func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *request.Request, output *CreateNatGatewayOutput) { + op := &request.Operation{ + Name: opCreateNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNatGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNatGatewayOutput{} + req.Data = output + return +} + +// Creates a NAT gateway in the specified subnet. A NAT gateway can be used +// to enable instances in a private subnet to connect to the Internet. This +// action creates a network interface in the specified subnet with a private +// IP address from the IP address range of the subnet. For more information, +// see NAT Gateways (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNatGateway(input *CreateNatGatewayInput) (*CreateNatGatewayOutput, error) { + req, out := c.CreateNatGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAcl = "CreateNetworkAcl" + +// CreateNetworkAclRequest generates a request for the CreateNetworkAcl operation. +func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *request.Request, output *CreateNetworkAclOutput) { + op := &request.Operation{ + Name: opCreateNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkAclOutput{} + req.Data = output + return +} + +// Creates a network ACL in a VPC. Network ACLs provide an optional layer of +// security (in addition to security groups) for the instances in your VPC. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclOutput, error) { + req, out := c.CreateNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAclEntry = "CreateNetworkAclEntry" + +// CreateNetworkAclEntryRequest generates a request for the CreateNetworkAclEntry operation. +func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (req *request.Request, output *CreateNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opCreateNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkAclEntryOutput{} + req.Data = output + return +} + +// Creates an entry (a rule) in a network ACL with the specified rule number. +// Each network ACL has a set of numbered ingress rules and a separate set of +// numbered egress rules. When determining whether a packet should be allowed +// in or out of a subnet associated with the ACL, we process the entries in +// the ACL according to the rule numbers, in ascending order. Each network ACL +// has a set of ingress rules and a separate set of egress rules. +// +// We recommend that you leave room between the rule numbers (for example, +// 100, 110, 120, ...), and not number them one right after the other (for example, +// 101, 102, 103, ...). This makes it easier to add a rule between existing +// ones without having to renumber the rules. +// +// After you add an entry, you can't modify it; you must either replace it, +// or create an entry and delete the old one. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateNetworkAclEntryOutput, error) { + req, out := c.CreateNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkInterface = "CreateNetworkInterface" + +// CreateNetworkInterfaceRequest generates a request for the CreateNetworkInterface operation. +func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) (req *request.Request, output *CreateNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkInterfaceOutput{} + req.Data = output + return +} + +// Creates a network interface in the specified subnet. +// +// For more information about network interfaces, see Elastic Network Interfaces +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*CreateNetworkInterfaceOutput, error) { + req, out := c.CreateNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlacementGroup = "CreatePlacementGroup" + +// CreatePlacementGroupRequest generates a request for the CreatePlacementGroup operation. +func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req *request.Request, output *CreatePlacementGroupOutput) { + op := &request.Operation{ + Name: opCreatePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlacementGroupOutput{} + req.Data = output + return +} + +// Creates a placement group that you launch cluster instances into. You must +// give the group a name that's unique within the scope of your account. +// +// For more information about placement groups and cluster instances, see Cluster +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePlacementGroupOutput, error) { + req, out := c.CreatePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReservedInstancesListing = "CreateReservedInstancesListing" + +// CreateReservedInstancesListingRequest generates a request for the CreateReservedInstancesListing operation. +func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstancesListingInput) (req *request.Request, output *CreateReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCreateReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReservedInstancesListingOutput{} + req.Data = output + return +} + +// Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved +// Instance Marketplace. You can submit one Reserved Instance listing at a time. +// To get a list of your Reserved Instances, you can use the DescribeReservedInstances +// operation. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// To sell your Reserved Instances, you must first register as a seller in +// the Reserved Instance Marketplace. After completing the registration process, +// you can create a Reserved Instance Marketplace listing of some or all of +// your Reserved Instances, and specify the upfront price to receive for them. +// Your Reserved Instance listings then become available for purchase. To view +// the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings +// operation. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListingInput) (*CreateReservedInstancesListingOutput, error) { + req, out := c.CreateReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCreateRoute = "CreateRoute" + +// CreateRouteRequest generates a request for the CreateRoute operation. +func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, output *CreateRouteOutput) { + op := &request.Operation{ + Name: opCreateRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteOutput{} + req.Data = output + return +} + +// Creates a route in a route table within a VPC. +// +// You must specify one of the following targets: Internet gateway or virtual +// private gateway, NAT instance, NAT gateway, VPC peering connection, or network +// interface. +// +// When determining how to route traffic, we use the route with the most specific +// match. For example, let's say the traffic is destined for 192.0.2.3, and +// the route table includes the following two routes: +// +// 192.0.2.0/24 (goes to some target A) +// +// 192.0.2.0/28 (goes to some target B) +// +// Both routes apply to the traffic destined for 192.0.2.3. However, the +// second route in the list covers a smaller number of IP addresses and is therefore +// more specific, so we use that route to determine where to target the traffic. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) { + req, out := c.CreateRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateRouteTable = "CreateRouteTable" + +// CreateRouteTableRequest generates a request for the CreateRouteTable operation. +func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *request.Request, output *CreateRouteTableOutput) { + op := &request.Operation{ + Name: opCreateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteTableOutput{} + req.Data = output + return +} + +// Creates a route table for the specified VPC. After you create a route table, +// you can add routes and associate the table with a subnet. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableOutput, error) { + req, out := c.CreateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opCreateSecurityGroup = "CreateSecurityGroup" + +// CreateSecurityGroupRequest generates a request for the CreateSecurityGroup operation. +func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *request.Request, output *CreateSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a security group. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// EC2-Classic: You can have up to 500 security groups. +// +// EC2-VPC: You can create up to 500 security groups per VPC. +// +// When you create a security group, you specify a friendly name of your choice. +// You can have a security group for use in EC2-Classic with the same name as +// a security group for use in a VPC. However, you can't have two security groups +// for use in EC2-Classic with the same name or two security groups for use +// in a VPC with the same name. +// +// You have a default security group for use in EC2-Classic and a default security +// group for use in your VPC. If you don't specify a security group when you +// launch an instance, the instance is launched into the appropriate default +// security group. A default security group includes a default rule that grants +// instances unrestricted network access to each other. +// +// You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, +// AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress. +func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecurityGroupOutput, error) { + req, out := c.CreateSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *Snapshot) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &Snapshot{} + req.Data = output + return +} + +// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use +// snapshots for backups, to make copies of EBS volumes, and to save data before +// shutting down an instance. +// +// When a snapshot is created, any AWS Marketplace product codes that are associated +// with the source volume are propagated to the snapshot. +// +// You can take a snapshot of an attached volume that is in use. However, snapshots +// only capture data that has been written to your EBS volume at the time the +// snapshot command is issued; this may exclude any data that has been cached +// by any applications or the operating system. If you can pause any file systems +// on the volume long enough to take a snapshot, your snapshot should be complete. +// However, if you cannot pause all file writes to the volume, you should unmount +// the volume from within the instance, issue the snapshot command, and then +// remount the volume to ensure a consistent and complete snapshot. You may +// remount and use your volume while the snapshot status is pending. +// +// To create a snapshot for EBS volumes that serve as root devices, you should +// stop the instance before taking the snapshot. +// +// Snapshots that are taken from encrypted volumes are automatically encrypted. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. Your encrypted volumes and any associated snapshots always remain +// protected. +// +// For more information, see Amazon Elastic Block Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) +// and Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" + +// CreateSpotDatafeedSubscriptionRequest generates a request for the CreateSpotDatafeedSubscription operation. +func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSubscriptionInput) (req *request.Request, output *CreateSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Creates a data feed for Spot instances, enabling you to view Spot instance +// usage logs. You can create one data feed per AWS account. For more information, +// see Spot Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscriptionInput) (*CreateSpotDatafeedSubscriptionOutput, error) { + req, out := c.CreateSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateSubnet = "CreateSubnet" + +// CreateSubnetRequest generates a request for the CreateSubnet operation. +func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Request, output *CreateSubnetOutput) { + op := &request.Operation{ + Name: opCreateSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSubnetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSubnetOutput{} + req.Data = output + return +} + +// Creates a subnet in an existing VPC. +// +// When you create each subnet, you provide the VPC ID and the CIDR block you +// want for the subnet. After you create a subnet, you can't change its CIDR +// block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming +// you want only a single subnet in the VPC), or a subset of the VPC's CIDR +// block. If you create more than one subnet in a VPC, the subnets' CIDR blocks +// must not overlap. The smallest subnet (and VPC) you can create uses a /28 +// netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP +// addresses). +// +// AWS reserves both the first four and the last IP address in each subnet's +// CIDR block. They're not available for use. +// +// If you add more than one subnet to a VPC, they're set up in a star topology +// with a logical router in the middle. +// +// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP +// address doesn't change if you stop and restart the instance (unlike a similar +// instance launched outside a VPC, which gets a new IP address when restarted). +// It's therefore possible to have a subnet with no running instances (they're +// all stopped), but no remaining IP addresses available. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error) { + req, out := c.CreateSubnetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a request for the CreateTags operation. +func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified Amazon EC2 resource +// or resources. Each resource can have a maximum of 10 tags. Each tag consists +// of a key and optional value. Tag keys must be unique per resource. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// creating IAM policies that control users' access to resources based on tags, +// see Supported Resource-Level Permissions for Amazon EC2 API Actions (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-iam-actions-resources.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateVolume = "CreateVolume" + +// CreateVolumeRequest generates a request for the CreateVolume operation. +func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Request, output *Volume) { + op := &request.Operation{ + Name: opCreateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &Volume{} + req.Data = output + return +} + +// Creates an EBS volume that can be attached to an instance in the same Availability +// Zone. The volume is created in the regional endpoint that you send the HTTP +// request to. For more information see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). +// +// You can create a new empty volume or restore a volume from an EBS snapshot. +// Any AWS Marketplace product codes from the snapshot are propagated to the +// volume. +// +// You can create encrypted volumes with the Encrypted parameter. Encrypted +// volumes may only be attached to instances that support Amazon EBS encryption. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Creating or Restoring an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) { + req, out := c.CreateVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpc = "CreateVpc" + +// CreateVpcRequest generates a request for the CreateVpc operation. +func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, output *CreateVpcOutput) { + op := &request.Operation{ + Name: opCreateVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcOutput{} + req.Data = output + return +} + +// Creates a VPC with the specified CIDR block. +// +// The smallest VPC you can create uses a /28 netmask (16 IP addresses), and +// the largest uses a /16 netmask (65,536 IP addresses). To help you decide +// how big to make your VPC, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// By default, each instance you launch in the VPC has the default DHCP options, +// which includes only a default DNS server that we provide (AmazonProvidedDNS). +// For more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) { + req, out := c.CreateVpcRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcEndpoint = "CreateVpcEndpoint" + +// CreateVpcEndpointRequest generates a request for the CreateVpcEndpoint operation. +func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *request.Request, output *CreateVpcEndpointOutput) { + op := &request.Operation{ + Name: opCreateVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcEndpointOutput{} + req.Data = output + return +} + +// Creates a VPC endpoint for a specified AWS service. An endpoint enables you +// to create a private connection between your VPC and another AWS service in +// your account. You can specify an endpoint policy to attach to the endpoint +// that will control access to the service from your VPC. You can also specify +// the VPC route tables that use the endpoint. +// +// Currently, only endpoints to Amazon S3 are supported. +func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpointOutput, error) { + req, out := c.CreateVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" + +// CreateVpcPeeringConnectionRequest generates a request for the CreateVpcPeeringConnection operation. +func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Requests a VPC peering connection between two VPCs: a requester VPC that +// you own and a peer VPC with which to create the connection. The peer VPC +// can belong to another AWS account. The requester VPC and peer VPC cannot +// have overlapping CIDR blocks. +// +// The owner of the peer VPC must accept the peering request to activate the +// peering connection. The VPC peering connection request expires after 7 days, +// after which it cannot be accepted or rejected. +// +// A CreateVpcPeeringConnection request between VPCs with overlapping CIDR +// blocks results in the VPC peering connection having a status of failed. +func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnection = "CreateVpnConnection" + +// CreateVpnConnectionRequest generates a request for the CreateVpnConnection operation. +func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *request.Request, output *CreateVpnConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnConnectionOutput{} + req.Data = output + return +} + +// Creates a VPN connection between an existing virtual private gateway and +// a VPN customer gateway. The only supported connection type is ipsec.1. +// +// The response includes information that you need to give to your network +// administrator to configure your customer gateway. +// +// We strongly recommend that you use HTTPS when calling this operation because +// the response contains sensitive cryptographic information for configuring +// your customer gateway. +// +// If you decide to shut down your VPN connection for any reason and later +// create a new VPN connection, you must reconfigure your customer gateway with +// the new information returned from this call. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnConnectionOutput, error) { + req, out := c.CreateVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" + +// CreateVpnConnectionRouteRequest generates a request for the CreateVpnConnectionRoute operation. +func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInput) (req *request.Request, output *CreateVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opCreateVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Creates a static route associated with a VPN connection between an existing +// virtual private gateway and a VPN customer gateway. The static route allows +// traffic to be routed from the virtual private gateway to the VPN customer +// gateway. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*CreateVpnConnectionRouteOutput, error) { + req, out := c.CreateVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnGateway = "CreateVpnGateway" + +// CreateVpnGatewayRequest generates a request for the CreateVpnGateway operation. +func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *request.Request, output *CreateVpnGatewayOutput) { + op := &request.Operation{ + Name: opCreateVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnGatewayOutput{} + req.Data = output + return +} + +// Creates a virtual private gateway. A virtual private gateway is the endpoint +// on the VPC side of your VPN connection. You can create a virtual private +// gateway before creating the VPC itself. +// +// For more information about virtual private gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayOutput, error) { + req, out := c.CreateVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCustomerGateway = "DeleteCustomerGateway" + +// DeleteCustomerGatewayRequest generates a request for the DeleteCustomerGateway operation. +func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (req *request.Request, output *DeleteCustomerGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCustomerGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified customer gateway. You must delete the VPN connection +// before you can delete the customer gateway. +func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteCustomerGatewayOutput, error) { + req, out := c.DeleteCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDhcpOptions = "DeleteDhcpOptions" + +// DeleteDhcpOptionsRequest generates a request for the DeleteDhcpOptions operation. +func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *request.Request, output *DeleteDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDeleteDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDhcpOptionsOutput{} + req.Data = output + return +} + +// Deletes the specified set of DHCP options. You must disassociate the set +// of DHCP options before you can delete it. You can disassociate the set of +// DHCP options by associating either a new set of options or the default set +// of options with the VPC. +func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptionsOutput, error) { + req, out := c.DeleteDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFlowLogs = "DeleteFlowLogs" + +// DeleteFlowLogsRequest generates a request for the DeleteFlowLogs operation. +func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Request, output *DeleteFlowLogsOutput) { + op := &request.Operation{ + Name: opDeleteFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteFlowLogsOutput{} + req.Data = output + return +} + +// Deletes one or more flow logs. +func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, error) { + req, out := c.DeleteFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInternetGateway = "DeleteInternetGateway" + +// DeleteInternetGatewayRequest generates a request for the DeleteInternetGateway operation. +func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (req *request.Request, output *DeleteInternetGatewayOutput) { + op := &request.Operation{ + Name: opDeleteInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteInternetGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified Internet gateway. You must detach the Internet gateway +// from the VPC before you can delete it. +func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteInternetGatewayOutput, error) { + req, out := c.DeleteInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteKeyPair = "DeleteKeyPair" + +// DeleteKeyPairRequest generates a request for the DeleteKeyPair operation. +func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) { + op := &request.Operation{ + Name: opDeleteKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteKeyPairOutput{} + req.Data = output + return +} + +// Deletes the specified key pair, by removing the public key from Amazon EC2. +func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) { + req, out := c.DeleteKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNatGateway = "DeleteNatGateway" + +// DeleteNatGatewayRequest generates a request for the DeleteNatGateway operation. +func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *request.Request, output *DeleteNatGatewayOutput) { + op := &request.Operation{ + Name: opDeleteNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNatGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNatGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its +// Elastic IP address, but does not release the address from your account. Deleting +// a NAT gateway does not delete any NAT gateway routes in your route tables. +func (c *EC2) DeleteNatGateway(input *DeleteNatGatewayInput) (*DeleteNatGatewayOutput, error) { + req, out := c.DeleteNatGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAcl = "DeleteNetworkAcl" + +// DeleteNetworkAclRequest generates a request for the DeleteNetworkAcl operation. +func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *request.Request, output *DeleteNetworkAclOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNetworkAclOutput{} + req.Data = output + return +} + +// Deletes the specified network ACL. You can't delete the ACL if it's associated +// with any subnets. You can't delete the default network ACL. +func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclOutput, error) { + req, out := c.DeleteNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" + +// DeleteNetworkAclEntryRequest generates a request for the DeleteNetworkAclEntry operation. +func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (req *request.Request, output *DeleteNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNetworkAclEntryOutput{} + req.Data = output + return +} + +// Deletes the specified ingress or egress entry (rule) from the specified network +// ACL. +func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteNetworkAclEntryOutput, error) { + req, out := c.DeleteNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkInterface = "DeleteNetworkInterface" + +// DeleteNetworkInterfaceRequest generates a request for the DeleteNetworkInterface operation. +func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) (req *request.Request, output *DeleteNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNetworkInterfaceOutput{} + req.Data = output + return +} + +// Deletes the specified network interface. You must detach the network interface +// before you can delete it. +func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*DeleteNetworkInterfaceOutput, error) { + req, out := c.DeleteNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlacementGroup = "DeletePlacementGroup" + +// DeletePlacementGroupRequest generates a request for the DeletePlacementGroup operation. +func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req *request.Request, output *DeletePlacementGroupOutput) { + op := &request.Operation{ + Name: opDeletePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePlacementGroupOutput{} + req.Data = output + return +} + +// Deletes the specified placement group. You must terminate all instances in +// the placement group before you can delete the placement group. For more information +// about placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePlacementGroupOutput, error) { + req, out := c.DeletePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRoute = "DeleteRoute" + +// DeleteRouteRequest generates a request for the DeleteRoute operation. +func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, output *DeleteRouteOutput) { + op := &request.Operation{ + Name: opDeleteRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRouteOutput{} + req.Data = output + return +} + +// Deletes the specified route from the specified route table. +func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) { + req, out := c.DeleteRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRouteTable = "DeleteRouteTable" + +// DeleteRouteTableRequest generates a request for the DeleteRouteTable operation. +func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *request.Request, output *DeleteRouteTableOutput) { + op := &request.Operation{ + Name: opDeleteRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRouteTableOutput{} + req.Data = output + return +} + +// Deletes the specified route table. You must disassociate the route table +// from any subnets before you can delete it. You can't delete the main route +// table. +func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableOutput, error) { + req, out := c.DeleteRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSecurityGroup = "DeleteSecurityGroup" + +// DeleteSecurityGroupRequest generates a request for the DeleteSecurityGroup operation. +func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req *request.Request, output *DeleteSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes a security group. +// +// If you attempt to delete a security group that is associated with an instance, +// or is referenced by another security group, the operation fails with InvalidGroup.InUse +// in EC2-Classic or DependencyViolation in EC2-VPC. +func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecurityGroupOutput, error) { + req, out := c.DeleteSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// Deletes the specified snapshot. +// +// When you make periodic snapshots of a volume, the snapshots are incremental, +// and only the blocks on the device that have changed since your last snapshot +// are saved in the new snapshot. When you delete a snapshot, only the data +// not needed for any other snapshot is removed. So regardless of which prior +// snapshots have been deleted, all active snapshots will have access to all +// the information needed to restore the volume. +// +// You cannot delete a snapshot of the root device of an EBS volume used by +// a registered AMI. You must first de-register the AMI before you can delete +// the snapshot. +// +// For more information, see Deleting an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" + +// DeleteSpotDatafeedSubscriptionRequest generates a request for the DeleteSpotDatafeedSubscription operation. +func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSubscriptionInput) (req *request.Request, output *DeleteSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Deletes the data feed for Spot instances. +func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscriptionInput) (*DeleteSpotDatafeedSubscriptionOutput, error) { + req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubnet = "DeleteSubnet" + +// DeleteSubnetRequest generates a request for the DeleteSubnet operation. +func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Request, output *DeleteSubnetOutput) { + op := &request.Operation{ + Name: opDeleteSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubnetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSubnetOutput{} + req.Data = output + return +} + +// Deletes the specified subnet. You must terminate all running instances in +// the subnet before you can delete the subnet. +func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error) { + req, out := c.DeleteSubnetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified set of tags from the specified set of resources. This +// call is designed to follow a DescribeTags request. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVolume = "DeleteVolume" + +// DeleteVolumeRequest generates a request for the DeleteVolume operation. +func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { + op := &request.Operation{ + Name: opDeleteVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVolumeOutput{} + req.Data = output + return +} + +// Deletes the specified EBS volume. The volume must be in the available state +// (not attached to an instance). +// +// The volume may remain in the deleting state for several minutes. +// +// For more information, see Deleting an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpc = "DeleteVpc" + +// DeleteVpcRequest generates a request for the DeleteVpc operation. +func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, output *DeleteVpcOutput) { + op := &request.Operation{ + Name: opDeleteVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcOutput{} + req.Data = output + return +} + +// Deletes the specified VPC. You must detach or delete all gateways and resources +// that are associated with the VPC before you can delete it. For example, you +// must terminate all instances running in the VPC, delete all security groups +// associated with the VPC (except the default one), delete all route tables +// associated with the VPC (except the default one), and so on. +func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) { + req, out := c.DeleteVpcRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcEndpoints = "DeleteVpcEndpoints" + +// DeleteVpcEndpointsRequest generates a request for the DeleteVpcEndpoints operation. +func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *request.Request, output *DeleteVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDeleteVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcEndpointsOutput{} + req.Data = output + return +} + +// Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes +// the endpoint routes in the route tables that were associated with the endpoint. +func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndpointsOutput, error) { + req, out := c.DeleteVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" + +// DeleteVpcPeeringConnectionRequest generates a request for the DeleteVpcPeeringConnection operation. +func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Deletes a VPC peering connection. Either the owner of the requester VPC or +// the owner of the peer VPC can delete the VPC peering connection if it's in +// the active state. The owner of the requester VPC can delete a VPC peering +// connection in the pending-acceptance state. +func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnection = "DeleteVpnConnection" + +// DeleteVpnConnectionRequest generates a request for the DeleteVpnConnection operation. +func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req *request.Request, output *DeleteVpnConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpnConnectionOutput{} + req.Data = output + return +} + +// Deletes the specified VPN connection. +// +// If you're deleting the VPC and its associated components, we recommend that +// you detach the virtual private gateway from the VPC and delete the VPC before +// deleting the VPN connection. If you believe that the tunnel credentials for +// your VPN connection have been compromised, you can delete the VPN connection +// and create a new one that has new keys, without needing to delete the VPC +// or virtual private gateway. If you create a new VPN connection, you must +// reconfigure the customer gateway using the new configuration information +// returned with the new VPN connection ID. +func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnConnectionOutput, error) { + req, out := c.DeleteVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" + +// DeleteVpnConnectionRouteRequest generates a request for the DeleteVpnConnectionRoute operation. +func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInput) (req *request.Request, output *DeleteVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Deletes the specified static route associated with a VPN connection between +// an existing virtual private gateway and a VPN customer gateway. The static +// route allows traffic to be routed from the virtual private gateway to the +// VPN customer gateway. +func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*DeleteVpnConnectionRouteOutput, error) { + req, out := c.DeleteVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnGateway = "DeleteVpnGateway" + +// DeleteVpnGatewayRequest generates a request for the DeleteVpnGateway operation. +func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *request.Request, output *DeleteVpnGatewayOutput) { + op := &request.Operation{ + Name: opDeleteVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpnGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified virtual private gateway. We recommend that before you +// delete a virtual private gateway, you detach it from the VPC and delete the +// VPN connection. Note that you don't need to delete the virtual private gateway +// if you plan to delete and recreate the VPN connection between your VPC and +// your network. +func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayOutput, error) { + req, out := c.DeleteVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterImage = "DeregisterImage" + +// DeregisterImageRequest generates a request for the DeregisterImage operation. +func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.Request, output *DeregisterImageOutput) { + op := &request.Operation{ + Name: opDeregisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterImageInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterImageOutput{} + req.Data = output + return +} + +// Deregisters the specified AMI. After you deregister an AMI, it can't be used +// to launch new instances. +// +// This command does not delete the AMI. +func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutput, error) { + req, out := c.DeregisterImageRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a request for the DescribeAccountAttributes operation. +func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Describes attributes of your AWS account. The following are the supported +// account attributes: +// +// supported-platforms: Indicates whether your account can launch instances +// into EC2-Classic and EC2-VPC, or only into EC2-VPC. +// +// default-vpc: The ID of the default VPC for your account, or none. +// +// max-instances: The maximum number of On-Demand instances that you can +// run. +// +// vpc-max-security-groups-per-interface: The maximum number of security +// groups that you can assign to a network interface. +// +// max-elastic-ips: The maximum number of Elastic IP addresses that you can +// allocate for use with EC2-Classic. +// +// vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you +// can allocate for use with EC2-VPC. +func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAddresses = "DescribeAddresses" + +// DescribeAddressesRequest generates a request for the DescribeAddresses operation. +func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) { + op := &request.Operation{ + Name: opDescribeAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAddressesOutput{} + req.Data = output + return +} + +// Describes one or more of your Elastic IP addresses. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) { + req, out := c.DescribeAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAvailabilityZones = "DescribeAvailabilityZones" + +// DescribeAvailabilityZonesRequest generates a request for the DescribeAvailabilityZones operation. +func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesInput) (req *request.Request, output *DescribeAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAvailabilityZonesOutput{} + req.Data = output + return +} + +// Describes one or more of the Availability Zones that are available to you. +// The results include zones only for the region you're currently using. If +// there is an event impacting an Availability Zone, you can use this request +// to view the state and any provided message for that Availability Zone. +// +// For more information, see Regions and Availability Zones (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) (*DescribeAvailabilityZonesOutput, error) { + req, out := c.DescribeAvailabilityZonesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBundleTasks = "DescribeBundleTasks" + +// DescribeBundleTasksRequest generates a request for the DescribeBundleTasks operation. +func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req *request.Request, output *DescribeBundleTasksOutput) { + op := &request.Operation{ + Name: opDescribeBundleTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBundleTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBundleTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your bundling tasks. +// +// Completed bundle tasks are listed for only a limited time. If your bundle +// task is no longer in the list, you can still register an AMI from it. Just +// use RegisterImage with the Amazon S3 bucket name and image manifest name +// you provided to the bundle task. +func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBundleTasksOutput, error) { + req, out := c.DescribeBundleTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" + +// DescribeClassicLinkInstancesRequest generates a request for the DescribeClassicLinkInstances operation. +func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInstancesInput) (req *request.Request, output *DescribeClassicLinkInstancesOutput) { + op := &request.Operation{ + Name: opDescribeClassicLinkInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClassicLinkInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClassicLinkInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your linked EC2-Classic instances. This request +// only returns information about EC2-Classic instances linked to a VPC through +// ClassicLink; you cannot use this request to return information about other +// instances. +func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesInput) (*DescribeClassicLinkInstancesOutput, error) { + req, out := c.DescribeClassicLinkInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConversionTasks = "DescribeConversionTasks" + +// DescribeConversionTasksRequest generates a request for the DescribeConversionTasks operation. +func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput) (req *request.Request, output *DescribeConversionTasksOutput) { + op := &request.Operation{ + Name: opDescribeConversionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConversionTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConversionTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your conversion tasks. For more information, see +// Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*DescribeConversionTasksOutput, error) { + req, out := c.DescribeConversionTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCustomerGateways = "DescribeCustomerGateways" + +// DescribeCustomerGatewaysRequest generates a request for the DescribeCustomerGateways operation. +func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInput) (req *request.Request, output *DescribeCustomerGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCustomerGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCustomerGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCustomerGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN customer gateways. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*DescribeCustomerGatewaysOutput, error) { + req, out := c.DescribeCustomerGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDhcpOptions = "DescribeDhcpOptions" + +// DescribeDhcpOptionsRequest generates a request for the DescribeDhcpOptions operation. +func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *request.Request, output *DescribeDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDescribeDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDhcpOptionsOutput{} + req.Data = output + return +} + +// Describes one or more of your DHCP options sets. +// +// For more information about DHCP options sets, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhcpOptionsOutput, error) { + req, out := c.DescribeDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. +func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your export tasks. +func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFlowLogs = "DescribeFlowLogs" + +// DescribeFlowLogsRequest generates a request for the DescribeFlowLogs operation. +func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *request.Request, output *DescribeFlowLogsOutput) { + op := &request.Operation{ + Name: opDescribeFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFlowLogsOutput{} + req.Data = output + return +} + +// Describes one or more flow logs. To view the information in your flow logs +// (the log streams for the network interfaces), you must use the CloudWatch +// Logs console or the CloudWatch Logs API. +func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsOutput, error) { + req, out := c.DescribeFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeHosts = "DescribeHosts" + +// DescribeHostsRequest generates a request for the DescribeHosts operation. +func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Request, output *DescribeHostsOutput) { + op := &request.Operation{ + Name: opDescribeHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHostsOutput{} + req.Data = output + return +} + +// Describes one or more of your Dedicated hosts. +// +// The results describe only the Dedicated hosts in the region you're currently +// using. All listed instances consume capacity on your Dedicated host. Dedicated +// hosts that have recently been released will be listed with the state released. +func (c *EC2) DescribeHosts(input *DescribeHostsInput) (*DescribeHostsOutput, error) { + req, out := c.DescribeHostsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdFormat = "DescribeIdFormat" + +// DescribeIdFormatRequest generates a request for the DescribeIdFormat operation. +func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *request.Request, output *DescribeIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdFormatInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdFormatOutput{} + req.Data = output + return +} + +// Describes the ID format settings for your resources on a per-region basis, +// for example, to view which resource types are enabled for longer IDs. This +// request only returns information about resource types whose ID formats can +// be modified; it does not return information about other resource types. +// +// The following resource types support longer IDs: instance | reservation. +// +// These settings apply to the IAM user who makes the request; they do not +// apply to the entire AWS account. By default, an IAM user defaults to the +// same settings as the root user, unless they explicitly override the settings +// by running the ModifyIdFormat command. Resources created with longer IDs +// are visible to all IAM users, regardless of these settings and provided that +// they have permission to use the relevant Describe command for the resource +// type. +func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatOutput, error) { + req, out := c.DescribeIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImageAttribute = "DescribeImageAttribute" + +// DescribeImageAttributeRequest generates a request for the DescribeImageAttribute operation. +func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) (req *request.Request, output *DescribeImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImageAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*DescribeImageAttributeOutput, error) { + req, out := c.DescribeImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImages = "DescribeImages" + +// DescribeImagesRequest generates a request for the DescribeImages operation. +func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { + op := &request.Operation{ + Name: opDescribeImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImagesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImagesOutput{} + req.Data = output + return +} + +// Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. +// Images available to you include public images, private images that you own, +// and private images owned by other AWS accounts but for which you have explicit +// launch permissions. +// +// Deregistered images are included in the returned results for an unspecified +// interval after deregistration. +func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportImageTasks = "DescribeImportImageTasks" + +// DescribeImportImageTasksRequest generates a request for the DescribeImportImageTasks operation. +func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInput) (req *request.Request, output *DescribeImportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportImageTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportImageTasksOutput{} + req.Data = output + return +} + +// Displays details about an import virtual machine or import snapshot tasks +// that are already created. +func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*DescribeImportImageTasksOutput, error) { + req, out := c.DescribeImportImageTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" + +// DescribeImportSnapshotTasksRequest generates a request for the DescribeImportSnapshotTasks operation. +func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTasksInput) (req *request.Request, output *DescribeImportSnapshotTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportSnapshotTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportSnapshotTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportSnapshotTasksOutput{} + req.Data = output + return +} + +// Describes your import snapshot tasks. +func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInput) (*DescribeImportSnapshotTasksOutput, error) { + req, out := c.DescribeImportSnapshotTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceAttribute = "DescribeInstanceAttribute" + +// DescribeInstanceAttributeRequest generates a request for the DescribeInstanceAttribute operation. +func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeInput) (req *request.Request, output *DescribeInstanceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified instance. You can specify +// only one attribute at a time. Valid attribute values are: instanceType | +// kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior +// | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | +// groupSet | ebsOptimized | sriovNetSupport +func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) (*DescribeInstanceAttributeOutput, error) { + req, out := c.DescribeInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceStatus = "DescribeInstanceStatus" + +// DescribeInstanceStatusRequest generates a request for the DescribeInstanceStatus operation. +func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) (req *request.Request, output *DescribeInstanceStatusOutput) { + op := &request.Operation{ + Name: opDescribeInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceStatusOutput{} + req.Data = output + return +} + +// Describes the status of one or more instances. +// +// Instance status includes the following components: +// +// Status checks - Amazon EC2 performs status checks on running EC2 instances +// to identify hardware and software issues. For more information, see Status +// Checks for Your Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting Instances with Failed Status Checks (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, +// or terminate) for your instances related to hardware issues, software updates, +// or system maintenance. For more information, see Scheduled Events for Your +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Instance state - You can manage your instances from the moment you launch +// them through their termination. For more information, see Instance Lifecycle +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*DescribeInstanceStatusOutput, error) { + req, out := c.DescribeInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(p *DescribeInstanceStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstanceStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstanceStatusOutput), lastPage) + }) +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a request for the DescribeInstances operation. +func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your instances. +// +// If you specify one or more instance IDs, Amazon EC2 returns information +// for those instances. If you do not specify instance IDs, Amazon EC2 returns +// information for all relevant instances. If you specify an instance ID that +// is not valid, an error is returned. If you specify an instance that you do +// not own, it is not included in the returned results. +// +// Recently terminated instances might appear in the returned results. This +// interval is usually less than one hour. +func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *DescribeInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstancesOutput), lastPage) + }) +} + +const opDescribeInternetGateways = "DescribeInternetGateways" + +// DescribeInternetGatewaysRequest generates a request for the DescribeInternetGateways operation. +func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInput) (req *request.Request, output *DescribeInternetGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeInternetGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInternetGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInternetGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your Internet gateways. +func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*DescribeInternetGatewaysOutput, error) { + req, out := c.DescribeInternetGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeKeyPairs = "DescribeKeyPairs" + +// DescribeKeyPairsRequest generates a request for the DescribeKeyPairs operation. +func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *request.Request, output *DescribeKeyPairsOutput) { + op := &request.Operation{ + Name: opDescribeKeyPairs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKeyPairsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeKeyPairsOutput{} + req.Data = output + return +} + +// Describes one or more of your key pairs. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsOutput, error) { + req, out := c.DescribeKeyPairsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMovingAddresses = "DescribeMovingAddresses" + +// DescribeMovingAddressesRequest generates a request for the DescribeMovingAddresses operation. +func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput) (req *request.Request, output *DescribeMovingAddressesOutput) { + op := &request.Operation{ + Name: opDescribeMovingAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMovingAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMovingAddressesOutput{} + req.Data = output + return +} + +// Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, +// or that are being restored to the EC2-Classic platform. This request does +// not return information about any other Elastic IP addresses in your account. +func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*DescribeMovingAddressesOutput, error) { + req, out := c.DescribeMovingAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNatGateways = "DescribeNatGateways" + +// DescribeNatGatewaysRequest generates a request for the DescribeNatGateways operation. +func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req *request.Request, output *DescribeNatGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeNatGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNatGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNatGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of the your NAT gateways. +func (c *EC2) DescribeNatGateways(input *DescribeNatGatewaysInput) (*DescribeNatGatewaysOutput, error) { + req, out := c.DescribeNatGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkAcls = "DescribeNetworkAcls" + +// DescribeNetworkAclsRequest generates a request for the DescribeNetworkAcls operation. +func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req *request.Request, output *DescribeNetworkAclsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkAcls, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkAclsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkAclsOutput{} + req.Data = output + return +} + +// Describes one or more of your network ACLs. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNetworkAclsOutput, error) { + req, out := c.DescribeNetworkAclsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" + +// DescribeNetworkInterfaceAttributeRequest generates a request for the DescribeNetworkInterfaceAttribute operation. +func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInterfaceAttributeInput) (req *request.Request, output *DescribeNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Describes a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceAttributeInput) (*DescribeNetworkInterfaceAttributeOutput, error) { + req, out := c.DescribeNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" + +// DescribeNetworkInterfacesRequest generates a request for the DescribeNetworkInterfaces operation. +func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesInput) (req *request.Request, output *DescribeNetworkInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfacesOutput{} + req.Data = output + return +} + +// Describes one or more of your network interfaces. +func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) (*DescribeNetworkInterfacesOutput, error) { + req, out := c.DescribeNetworkInterfacesRequest(input) + err := req.Send() + return out, err +} + +const opDescribePlacementGroups = "DescribePlacementGroups" + +// DescribePlacementGroupsRequest generates a request for the DescribePlacementGroups operation. +func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput) (req *request.Request, output *DescribePlacementGroupsOutput) { + op := &request.Operation{ + Name: opDescribePlacementGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePlacementGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePlacementGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your placement groups. For more information about +// placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*DescribePlacementGroupsOutput, error) { + req, out := c.DescribePlacementGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribePrefixLists = "DescribePrefixLists" + +// DescribePrefixListsRequest generates a request for the DescribePrefixLists operation. +func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *request.Request, output *DescribePrefixListsOutput) { + op := &request.Operation{ + Name: opDescribePrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePrefixListsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePrefixListsOutput{} + req.Data = output + return +} + +// Describes available AWS services in a prefix list format, which includes +// the prefix list name and prefix list ID of the service and the IP address +// range for the service. A prefix list ID is required for creating an outbound +// security group rule that allows traffic from a VPC to access an AWS service +// through a VPC endpoint. +func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePrefixListsOutput, error) { + req, out := c.DescribePrefixListsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRegions = "DescribeRegions" + +// DescribeRegionsRequest generates a request for the DescribeRegions operation. +func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request.Request, output *DescribeRegionsOutput) { + op := &request.Operation{ + Name: opDescribeRegions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRegionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRegionsOutput{} + req.Data = output + return +} + +// Describes one or more regions that are currently available to you. +// +// For a list of the regions supported by Amazon EC2, see Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). +func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutput, error) { + req, out := c.DescribeRegionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstances = "DescribeReservedInstances" + +// DescribeReservedInstancesRequest generates a request for the DescribeReservedInstances operation. +func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesInput) (req *request.Request, output *DescribeReservedInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of the Reserved Instances that you purchased. +// +// For more information about Reserved Instances, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) { + req, out := c.DescribeReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" + +// DescribeReservedInstancesListingsRequest generates a request for the DescribeReservedInstancesListings operation. +func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedInstancesListingsInput) (req *request.Request, output *DescribeReservedInstancesListingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesListings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesListingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesListingsOutput{} + req.Data = output + return +} + +// Describes your account's Reserved Instance listings in the Reserved Instance +// Marketplace. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// As a seller, you choose to list some or all of your Reserved Instances, +// and you specify the upfront price to receive for them. Your Reserved Instances +// are then listed in the Reserved Instance Marketplace and are available for +// purchase. +// +// As a buyer, you specify the configuration of the Reserved Instance to purchase, +// and the Marketplace matches what you're searching for with what's available. +// The Marketplace first sells the lowest priced Reserved Instances to you, +// and continues to sell available Reserved Instance listings to you until your +// demand is met. You are charged based on the total price of all of the listings +// that you purchase. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstancesListingsInput) (*DescribeReservedInstancesListingsOutput, error) { + req, out := c.DescribeReservedInstancesListingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications" + +// DescribeReservedInstancesModificationsRequest generates a request for the DescribeReservedInstancesModifications operation. +func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReservedInstancesModificationsInput) (req *request.Request, output *DescribeReservedInstancesModificationsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesModifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesModificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesModificationsOutput{} + req.Data = output + return +} + +// Describes the modifications made to your Reserved Instances. If no parameter +// is specified, information about all your Reserved Instances modification +// requests is returned. If a modification ID is specified, only information +// about the specific modification is returned. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInstancesModificationsInput) (*DescribeReservedInstancesModificationsOutput, error) { + req, out := c.DescribeReservedInstancesModificationsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(p *DescribeReservedInstancesModificationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesModificationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesModificationsOutput), lastPage) + }) +} + +const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings" + +// DescribeReservedInstancesOfferingsRequest generates a request for the DescribeReservedInstancesOfferings operation. +func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedInstancesOfferingsInput) (req *request.Request, output *DescribeReservedInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOfferingsOutput{} + req.Data = output + return +} + +// Describes Reserved Instance offerings that are available for purchase. With +// Reserved Instances, you purchase the right to launch instances for a period +// of time. During that time period, you do not receive insufficient capacity +// errors, and you pay a lower usage rate than the rate charged for On-Demand +// instances for the actual time used. +// +// If you have listed your own Reserved Instances for sale in the Reserved +// Instance Marketplace, they will be excluded from these results. This is to +// ensure that you do not purchase your own Reserved Instances. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstancesOfferingsInput) (*DescribeReservedInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedInstancesOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(p *DescribeReservedInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesOfferingsOutput), lastPage) + }) +} + +const opDescribeRouteTables = "DescribeRouteTables" + +// DescribeRouteTablesRequest generates a request for the DescribeRouteTables operation. +func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *request.Request, output *DescribeRouteTablesOutput) { + op := &request.Operation{ + Name: opDescribeRouteTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRouteTablesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRouteTablesOutput{} + req.Data = output + return +} + +// Describes one or more of your route tables. +// +// Each subnet in your VPC must be associated with a route table. If a subnet +// is not explicitly associated with any route table, it is implicitly associated +// with the main route table. This command does not return the subnet ID for +// implicit associations. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRouteTablesOutput, error) { + req, out := c.DescribeRouteTablesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability" + +// DescribeScheduledInstanceAvailabilityRequest generates a request for the DescribeScheduledInstanceAvailability operation. +func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeScheduledInstanceAvailabilityInput) (req *request.Request, output *DescribeScheduledInstanceAvailabilityOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstanceAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstanceAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstanceAvailabilityOutput{} + req.Data = output + return +} + +// Finds available schedules that meet the specified criteria. +// +// You can search for an available schedule no more than 3 months in advance. +// You must meet the minimum required duration of 1,200 hours per year. For +// example, the minimum daily schedule is 4 hours, the minimum weekly schedule +// is 24 hours, and the minimum monthly schedule is 100 hours. +// +// After you find a schedule that meets your needs, call PurchaseScheduledInstances +// to purchase Scheduled Instances with that schedule. +func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInstanceAvailabilityInput) (*DescribeScheduledInstanceAvailabilityOutput, error) { + req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledInstances = "DescribeScheduledInstances" + +// DescribeScheduledInstancesRequest generates a request for the DescribeScheduledInstances operation. +func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstancesInput) (req *request.Request, output *DescribeScheduledInstancesOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your Scheduled Instances. +func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) (*DescribeScheduledInstancesOutput, error) { + req, out := c.DescribeScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSecurityGroups = "DescribeSecurityGroups" + +// DescribeSecurityGroupsRequest generates a request for the DescribeSecurityGroups operation. +func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) (req *request.Request, output *DescribeSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSecurityGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your security groups. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*DescribeSecurityGroupsOutput, error) { + req, out := c.DescribeSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" + +// DescribeSnapshotAttributeRequest generates a request for the DescribeSnapshotAttribute operation. +func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeInput) (req *request.Request, output *DescribeSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified snapshot. You can specify +// only one attribute at a time. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) { + req, out := c.DescribeSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// Describes one or more of the EBS snapshots available to you. Available snapshots +// include public snapshots available for any AWS account to launch, private +// snapshots that you own, and private snapshots owned by another AWS account +// but for which you've been given explicit create volume permissions. +// +// The create volume permissions fall into the following categories: +// +// public: The owner of the snapshot granted create volume permissions for +// the snapshot to the all group. All AWS accounts have create volume permissions +// for these snapshots. explicit: The owner of the snapshot granted create volume +// permissions to a specific AWS account. implicit: An AWS account has implicit +// create volume permissions for all snapshots it owns. The list of snapshots +// returned can be modified by specifying snapshot IDs, snapshot owners, or +// AWS accounts with create volume permissions. If no options are specified, +// Amazon EC2 returns all snapshots for which you have create volume permissions. +// +// If you specify one or more snapshot IDs, only snapshots that have the specified +// IDs are returned. If you specify an invalid snapshot ID, an error is returned. +// If you specify a snapshot ID for which you do not have access, it is not +// included in the returned results. +// +// If you specify one or more snapshot owners, only snapshots from the specified +// owners and for which you have access are returned. The results can include +// the AWS account IDs of the specified owners, amazon for snapshots owned by +// Amazon, or self for snapshots that you own. +// +// If you specify a list of restorable users, only snapshots with create snapshot +// permissions for those users are returned. You can specify AWS account IDs +// (if you own the snapshots), self for snapshots for which you own or have +// explicit permissions, or all for public snapshots. +// +// If you are describing a long list of snapshots, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeSnapshots request +// to retrieve the remaining results. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSnapshotsOutput), lastPage) + }) +} + +const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" + +// DescribeSpotDatafeedSubscriptionRequest generates a request for the DescribeSpotDatafeedSubscription operation. +func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) (req *request.Request, output *DescribeSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDescribeSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Describes the data feed for Spot instances. For more information, see Spot +// Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscriptionInput) (*DescribeSpotDatafeedSubscriptionOutput, error) { + req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" + +// DescribeSpotFleetInstancesRequest generates a request for the DescribeSpotFleetInstances operation. +func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstancesInput) (req *request.Request, output *DescribeSpotFleetInstancesOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetInstancesOutput{} + req.Data = output + return +} + +// Describes the running instances for the specified Spot fleet. +func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) (*DescribeSpotFleetInstancesOutput, error) { + req, out := c.DescribeSpotFleetInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" + +// DescribeSpotFleetRequestHistoryRequest generates a request for the DescribeSpotFleetRequestHistory operation. +func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetRequestHistoryInput) (req *request.Request, output *DescribeSpotFleetRequestHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequestHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestHistoryOutput{} + req.Data = output + return +} + +// Describes the events for the specified Spot fleet request during the specified +// time. +// +// Spot fleet events are delayed by up to 30 seconds before they can be described. +// This ensures that you can query by the last evaluated time and not miss a +// recorded event. +func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHistoryInput) (*DescribeSpotFleetRequestHistoryOutput, error) { + req, out := c.DescribeSpotFleetRequestHistoryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" + +// DescribeSpotFleetRequestsRequest generates a request for the DescribeSpotFleetRequests operation. +func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsInput) (req *request.Request, output *DescribeSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Describes your Spot fleet requests. +func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) (*DescribeSpotFleetRequestsOutput, error) { + req, out := c.DescribeSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" + +// DescribeSpotInstanceRequestsRequest generates a request for the DescribeSpotInstanceRequests operation. +func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceRequestsInput) (req *request.Request, output *DescribeSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Describes the Spot instance requests that belong to your account. Spot instances +// are instances that Amazon EC2 launches when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can use DescribeSpotInstanceRequests to find a running Spot instance +// by examining the response. If the status of the Spot instance is fulfilled, +// the instance ID appears in the response and contains the identifier of the +// instance. Alternatively, you can use DescribeInstances with a filter to look +// for instances where the instance lifecycle is spot. +func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsInput) (*DescribeSpotInstanceRequestsOutput, error) { + req, out := c.DescribeSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" + +// DescribeSpotPriceHistoryRequest generates a request for the DescribeSpotPriceHistory operation. +func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInput) (req *request.Request, output *DescribeSpotPriceHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotPriceHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotPriceHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotPriceHistoryOutput{} + req.Data = output + return +} + +// Describes the Spot price history. The prices returned are listed in chronological +// order, from the oldest to the most recent, for up to the past 90 days. For +// more information, see Spot Instance Pricing History (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you specify a start and end time, this operation returns the prices +// of the instance types within the time range that you specified and the time +// when the price changed. The price is valid within the time period that you +// specified; the response merely indicates the last time that the price changed. +func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*DescribeSpotPriceHistoryOutput, error) { + req, out := c.DescribeSpotPriceHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(p *DescribeSpotPriceHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSpotPriceHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSpotPriceHistoryOutput), lastPage) + }) +} + +const opDescribeSubnets = "DescribeSubnets" + +// DescribeSubnetsRequest generates a request for the DescribeSubnets operation. +func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.Request, output *DescribeSubnetsOutput) { + op := &request.Operation{ + Name: opDescribeSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubnetsOutput{} + req.Data = output + return +} + +// Describes one or more of your subnets. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutput, error) { + req, out := c.DescribeSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes one or more of the tags for your EC2 resources. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTagsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTagsOutput), lastPage) + }) +} + +const opDescribeVolumeAttribute = "DescribeVolumeAttribute" + +// DescribeVolumeAttributeRequest generates a request for the DescribeVolumeAttribute operation. +func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified volume. You can specify +// only one attribute at a time. +// +// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon +// Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVolumeStatus = "DescribeVolumeStatus" + +// DescribeVolumeStatusRequest generates a request for the DescribeVolumeStatus operation. +func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req *request.Request, output *DescribeVolumeStatusOutput) { + op := &request.Operation{ + Name: opDescribeVolumeStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumeStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeStatusOutput{} + req.Data = output + return +} + +// Describes the status of the specified volumes. Volume status provides the +// result of the checks performed on your volumes to determine events that can +// impair the performance of your volumes. The performance of a volume can be +// affected if an issue occurs on the volume's underlying host. If the volume's +// underlying host experiences a power outage or system issue, after the system +// is restored, there could be data inconsistencies on the volume. Volume events +// notify you if this occurs. Volume actions notify you if any action needs +// to be taken in response to the event. +// +// The DescribeVolumeStatus operation provides the following information about +// the specified volumes: +// +// Status: Reflects the current status of the volume. The possible values are +// ok, impaired , warning, or insufficient-data. If all checks pass, the overall +// status of the volume is ok. If the check fails, the overall status is impaired. +// If the status is insufficient-data, then the checks may still be taking place +// on your volume at the time. We recommend that you retry the request. For +// more information on volume status, see Monitoring the Status of Your Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html). +// +// Events: Reflect the cause of a volume status and may require you to take +// action. For example, if your volume returns an impaired status, then the +// volume event might be potential-data-inconsistency. This means that your +// volume has been affected by an issue with the underlying host, has all I/O +// operations disabled, and may have inconsistent data. +// +// Actions: Reflect the actions you may have to take in response to an event. +// For example, if the status of the volume is impaired and the volume event +// shows potential-data-inconsistency, then the action shows enable-volume-io. +// This means that you may want to enable the I/O operations for the volume +// by calling the EnableVolumeIO action and then check the volume for data consistency. +// +// Volume status is based on the volume status checks, and does not reflect +// the volume state. Therefore, volume status does not indicate volumes in the +// error state (for example, when a volume is incapable of accepting I/O.) +func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeVolumeStatusOutput, error) { + req, out := c.DescribeVolumeStatusRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(p *DescribeVolumeStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumeStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumeStatusOutput), lastPage) + }) +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a request for the DescribeVolumes operation. +func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumesOutput{} + req.Data = output + return +} + +// Describes the specified EBS volumes. +// +// If you are describing a long list of volumes, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeVolumes request +// to retrieve the remaining results. +// +// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon +// Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *DescribeVolumesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumesOutput), lastPage) + }) +} + +const opDescribeVpcAttribute = "DescribeVpcAttribute" + +// DescribeVpcAttributeRequest generates a request for the DescribeVpcAttribute operation. +func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req *request.Request, output *DescribeVpcAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified VPC. You can specify only +// one attribute at a time. +func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeVpcAttributeOutput, error) { + req, out := c.DescribeVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcClassicLink = "DescribeVpcClassicLink" + +// DescribeVpcClassicLinkRequest generates a request for the DescribeVpcClassicLink operation. +func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) (req *request.Request, output *DescribeVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkOutput{} + req.Data = output + return +} + +// Describes the ClassicLink status of one or more VPCs. +func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*DescribeVpcClassicLinkOutput, error) { + req, out := c.DescribeVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" + +// DescribeVpcClassicLinkDnsSupportRequest generates a request for the DescribeVpcClassicLinkDnsSupport operation. +func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicLinkDnsSupportInput) (req *request.Request, output *DescribeVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Describes the ClassicLink DNS support status of one or more VPCs. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsSupportInput) (*DescribeVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" + +// DescribeVpcEndpointServicesRequest generates a request for the DescribeVpcEndpointServices operation. +func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServicesInput) (req *request.Request, output *DescribeVpcEndpointServicesOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointServicesOutput{} + req.Data = output + return +} + +// Describes all supported AWS services that can be specified when creating +// a VPC endpoint. +func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInput) (*DescribeVpcEndpointServicesOutput, error) { + req, out := c.DescribeVpcEndpointServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpoints = "DescribeVpcEndpoints" + +// DescribeVpcEndpointsRequest generates a request for the DescribeVpcEndpoints operation. +func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req *request.Request, output *DescribeVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC endpoints. +func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeVpcEndpointsOutput, error) { + req, out := c.DescribeVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" + +// DescribeVpcPeeringConnectionsRequest generates a request for the DescribeVpcPeeringConnections operation. +func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcPeeringConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcPeeringConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcPeeringConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC peering connections. +func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcs = "DescribeVpcs" + +// DescribeVpcsRequest generates a request for the DescribeVpcs operation. +func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Request, output *DescribeVpcsOutput) { + op := &request.Operation{ + Name: opDescribeVpcs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPCs. +func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error) { + req, out := c.DescribeVpcsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnConnections = "DescribeVpnConnections" + +// DescribeVpnConnectionsRequest generates a request for the DescribeVpnConnections operation. +func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) (req *request.Request, output *DescribeVpnConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpnConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN connections. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*DescribeVpnConnectionsOutput, error) { + req, out := c.DescribeVpnConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnGateways = "DescribeVpnGateways" + +// DescribeVpnGatewaysRequest generates a request for the DescribeVpnGateways operation. +func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req *request.Request, output *DescribeVpnGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVpnGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your virtual private gateways. +// +// For more information about virtual private gateways, see Adding an IPsec +// Hardware VPN to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpnGatewaysOutput, error) { + req, out := c.DescribeVpnGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDetachClassicLinkVpc = "DetachClassicLinkVpc" + +// DetachClassicLinkVpcRequest generates a request for the DetachClassicLinkVpc operation. +func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req *request.Request, output *DetachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opDetachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance +// has been unlinked, the VPC security groups are no longer associated with +// it. An instance is automatically unlinked from a VPC when it's stopped. +func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachClassicLinkVpcOutput, error) { + req, out := c.DetachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opDetachInternetGateway = "DetachInternetGateway" + +// DetachInternetGatewayRequest generates a request for the DetachInternetGateway operation. +func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (req *request.Request, output *DetachInternetGatewayOutput) { + op := &request.Operation{ + Name: opDetachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachInternetGatewayOutput{} + req.Data = output + return +} + +// Detaches an Internet gateway from a VPC, disabling connectivity between the +// Internet and the VPC. The VPC must not contain any running instances with +// Elastic IP addresses. +func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachInternetGatewayOutput, error) { + req, out := c.DetachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDetachNetworkInterface = "DetachNetworkInterface" + +// DetachNetworkInterfaceRequest generates a request for the DetachNetworkInterface operation. +func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) (req *request.Request, output *DetachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDetachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Detaches a network interface from an instance. +func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*DetachNetworkInterfaceOutput, error) { + req, out := c.DetachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDetachVolume = "DetachVolume" + +// DetachVolumeRequest generates a request for the DetachVolume operation. +func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opDetachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Detaches an EBS volume from an instance. Make sure to unmount any file systems +// on the device within your operating system before detaching the volume. Failure +// to do so results in the volume being stuck in a busy state while detaching. +// +// If an Amazon EBS volume is the root device of an instance, it can't be detached +// while the instance is running. To detach the root volume, stop the instance +// first. +// +// When a volume with an AWS Marketplace product code is detached from an instance, +// the product code is no longer associated with the instance. +// +// For more information, see Detaching an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) { + req, out := c.DetachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDetachVpnGateway = "DetachVpnGateway" + +// DetachVpnGatewayRequest generates a request for the DetachVpnGateway operation. +func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *request.Request, output *DetachVpnGatewayOutput) { + op := &request.Operation{ + Name: opDetachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachVpnGatewayOutput{} + req.Data = output + return +} + +// Detaches a virtual private gateway from a VPC. You do this if you're planning +// to turn off the VPC and not use it anymore. You can confirm a virtual private +// gateway has been completely detached from a VPC by describing the virtual +// private gateway (any attachments to the virtual private gateway are also +// described). +// +// You must wait for the attachment's state to switch to detached before you +// can delete the VPC or attach a different VPC to the virtual private gateway. +func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayOutput, error) { + req, out := c.DetachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" + +// DisableVgwRoutePropagationRequest generates a request for the DisableVgwRoutePropagation operation. +func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagationInput) (req *request.Request, output *DisableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opDisableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Disables a virtual private gateway (VGW) from propagating routes to a specified +// route table of a VPC. +func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) (*DisableVgwRoutePropagationOutput, error) { + req, out := c.DisableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opDisableVpcClassicLink = "DisableVpcClassicLink" + +// DisableVpcClassicLinkRequest generates a request for the DisableVpcClassicLink operation. +func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (req *request.Request, output *DisableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC +// that has EC2-Classic instances linked to it. +func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*DisableVpcClassicLinkOutput, error) { + req, out := c.DisableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" + +// DisableVpcClassicLinkDnsSupportRequest generates a request for the DisableVpcClassicLinkDnsSupport operation. +func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLinkDnsSupportInput) (req *request.Request, output *DisableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve +// to public IP addresses when addressed between a linked EC2-Classic instance +// and instances in the VPC to which it's linked. For more information about +// ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSupportInput) (*DisableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateAddress = "DisassociateAddress" + +// DisassociateAddressRequest generates a request for the DisassociateAddress operation. +func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req *request.Request, output *DisassociateAddressOutput) { + op := &request.Operation{ + Name: opDisassociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateAddressOutput{} + req.Data = output + return +} + +// Disassociates an Elastic IP address from the instance or network interface +// it's associated with. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*DisassociateAddressOutput, error) { + req, out := c.DisassociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateRouteTable = "DisassociateRouteTable" + +// DisassociateRouteTableRequest generates a request for the DisassociateRouteTable operation. +func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) (req *request.Request, output *DisassociateRouteTableOutput) { + op := &request.Operation{ + Name: opDisassociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateRouteTableOutput{} + req.Data = output + return +} + +// Disassociates a subnet from a route table. +// +// After you perform this action, the subnet no longer uses the routes in the +// route table. Instead, it uses the routes in the VPC's main route table. For +// more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*DisassociateRouteTableOutput, error) { + req, out := c.DisassociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" + +// EnableVgwRoutePropagationRequest generates a request for the EnableVgwRoutePropagation operation. +func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationInput) (req *request.Request, output *EnableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opEnableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Enables a virtual private gateway (VGW) to propagate routes to the specified +// route table of a VPC. +func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) (*EnableVgwRoutePropagationOutput, error) { + req, out := c.EnableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opEnableVolumeIO = "EnableVolumeIO" + +// EnableVolumeIORequest generates a request for the EnableVolumeIO operation. +func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Request, output *EnableVolumeIOOutput) { + op := &request.Operation{ + Name: opEnableVolumeIO, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVolumeIOInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVolumeIOOutput{} + req.Data = output + return +} + +// Enables I/O operations for a volume that had I/O operations disabled because +// the data on the volume was potentially inconsistent. +func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, error) { + req, out := c.EnableVolumeIORequest(input) + err := req.Send() + return out, err +} + +const opEnableVpcClassicLink = "EnableVpcClassicLink" + +// EnableVpcClassicLinkRequest generates a request for the EnableVpcClassicLink operation. +func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req *request.Request, output *EnableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Enables a VPC for ClassicLink. You can then link EC2-Classic instances to +// your ClassicLink-enabled VPC to allow communication over private IP addresses. +// You cannot enable your VPC for ClassicLink if any of your VPC's route tables +// have existing routes for address ranges within the 10.0.0.0/8 IP address +// range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 +// IP address ranges. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpcClassicLinkOutput, error) { + req, out := c.EnableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" + +// EnableVpcClassicLinkDnsSupportRequest generates a request for the EnableVpcClassicLinkDnsSupport operation. +func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkDnsSupportInput) (req *request.Request, output *EnableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSupportInput) (*EnableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opGetConsoleOutput = "GetConsoleOutput" + +// GetConsoleOutputRequest generates a request for the GetConsoleOutput operation. +func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *request.Request, output *GetConsoleOutputOutput) { + op := &request.Operation{ + Name: opGetConsoleOutput, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleOutputInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConsoleOutputOutput{} + req.Data = output + return +} + +// Gets the console output for the specified instance. +// +// Instances do not have a physical monitor through which you can view their +// console output. They also lack physical controls that allow you to power +// up, reboot, or shut them down. To allow these actions, we provide them through +// the Amazon EC2 API and command line interface. +// +// Instance console output is buffered and posted shortly after instance boot, +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output +// which is available for at least one hour after the most recent post. +// +// For Linux instances, the instance console output displays the exact console +// output that would normally be displayed on a physical monitor attached to +// a computer. This output is buffered because the instance produces it and +// then posts it to a store where the instance's owner can retrieve it. +// +// For Windows instances, the instance console output includes output from +// the EC2Config service. +func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputOutput, error) { + req, out := c.GetConsoleOutputRequest(input) + err := req.Send() + return out, err +} + +const opGetPasswordData = "GetPasswordData" + +// GetPasswordDataRequest generates a request for the GetPasswordData operation. +func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.Request, output *GetPasswordDataOutput) { + op := &request.Operation{ + Name: opGetPasswordData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPasswordDataInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPasswordDataOutput{} + req.Data = output + return +} + +// Retrieves the encrypted administrator password for an instance running Windows. +// +// The Windows password is generated at boot if the EC2Config service plugin, +// Ec2SetPassword, is enabled. This usually only happens the first time an AMI +// is launched, and then Ec2SetPassword is automatically disabled. The password +// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before +// bundling. +// +// The password is encrypted using the key pair that you specified when you +// launched the instance. You must provide the corresponding key pair file. +// +// Password generation and encryption takes a few moments. We recommend that +// you wait up to 15 minutes after launching an instance before trying to retrieve +// the generated password. +func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutput, error) { + req, out := c.GetPasswordDataRequest(input) + err := req.Send() + return out, err +} + +const opImportImage = "ImportImage" + +// ImportImageRequest generates a request for the ImportImage operation. +func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, output *ImportImageOutput) { + op := &request.Operation{ + Name: opImportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportImageInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportImageOutput{} + req.Data = output + return +} + +// Import single or multi-volume disk images or EBS snapshots into an Amazon +// Machine Image (AMI). +func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) { + req, out := c.ImportImageRequest(input) + err := req.Send() + return out, err +} + +const opImportInstance = "ImportInstance" + +// ImportInstanceRequest generates a request for the ImportInstance operation. +func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Request, output *ImportInstanceOutput) { + op := &request.Operation{ + Name: opImportInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportInstanceOutput{} + req.Data = output + return +} + +// Creates an import instance task using metadata from the specified disk image. +// ImportInstance only supports single-volume VMs. To import multi-volume VMs, +// use ImportImage. After importing the image, you then upload it using the +// ec2-import-volume command in the EC2 command line tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, error) { + req, out := c.ImportInstanceRequest(input) + err := req.Send() + return out, err +} + +const opImportKeyPair = "ImportKeyPair" + +// ImportKeyPairRequest generates a request for the ImportKeyPair operation. +func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { + op := &request.Operation{ + Name: opImportKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportKeyPairOutput{} + req.Data = output + return +} + +// Imports the public key from an RSA key pair that you created with a third-party +// tool. Compare this with CreateKeyPair, in which AWS creates the key pair +// and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, +// you create the key pair and give AWS just the public key. The private key +// is never transferred between you and AWS. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opImportSnapshot = "ImportSnapshot" + +// ImportSnapshotRequest generates a request for the ImportSnapshot operation. +func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Request, output *ImportSnapshotOutput) { + op := &request.Operation{ + Name: opImportSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportSnapshotOutput{} + req.Data = output + return +} + +// Imports a disk into an EBS snapshot. +func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, error) { + req, out := c.ImportSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opImportVolume = "ImportVolume" + +// ImportVolumeRequest generates a request for the ImportVolume operation. +func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Request, output *ImportVolumeOutput) { + op := &request.Operation{ + Name: opImportVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportVolumeOutput{} + req.Data = output + return +} + +// Creates an import volume task using metadata from the specified disk image. +// After importing the image, you then upload it using the ec2-import-volume +// command in the Amazon EC2 command-line interface (CLI) tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error) { + req, out := c.ImportVolumeRequest(input) + err := req.Send() + return out, err +} + +const opModifyHosts = "ModifyHosts" + +// ModifyHostsRequest generates a request for the ModifyHosts operation. +func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, output *ModifyHostsOutput) { + op := &request.Operation{ + Name: opModifyHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyHostsOutput{} + req.Data = output + return +} + +// Modify the auto-placement setting of a Dedicated host. When auto-placement +// is enabled, AWS will place instances that you launch with a tenancy of host, +// but without targeting a specific host ID, onto any available Dedicated host +// in your account which has auto-placement enabled. When auto-placement is +// disabled, you need to provide a host ID if you want the instance to launch +// onto a specific host. If no host ID is provided, the instance will be launched +// onto a suitable host which has auto-placement enabled. +func (c *EC2) ModifyHosts(input *ModifyHostsInput) (*ModifyHostsOutput, error) { + req, out := c.ModifyHostsRequest(input) + err := req.Send() + return out, err +} + +const opModifyIdFormat = "ModifyIdFormat" + +// ModifyIdFormatRequest generates a request for the ModifyIdFormat operation. +func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Request, output *ModifyIdFormatOutput) { + op := &request.Operation{ + Name: opModifyIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyIdFormatInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyIdFormatOutput{} + req.Data = output + return +} + +// Modifies the ID format for the specified resource on a per-region basis. +// You can specify that resources should receive longer IDs (17-character IDs) +// when they are created. The following resource types support longer IDs: instance +// | reservation. +// +// This setting applies to the IAM user who makes the request; it does not +// apply to the entire AWS account. By default, an IAM user defaults to the +// same settings as the root user, unless they explicitly override the settings +// by running this request. Resources created with longer IDs are visible to +// all IAM users, regardless of these settings and provided that they have permission +// to use the relevant Describe command for the resource type. +func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) { + req, out := c.ModifyIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opModifyImageAttribute = "ModifyImageAttribute" + +// ModifyImageAttributeRequest generates a request for the ModifyImageAttribute operation. +func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req *request.Request, output *ModifyImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyImageAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +// +// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace +// product code cannot be made public. +func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { + req, out := c.ModifyImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyInstanceAttribute = "ModifyInstanceAttribute" + +// ModifyInstanceAttributeRequest generates a request for the ModifyInstanceAttribute operation. +func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput) (req *request.Request, output *ModifyInstanceAttributeOutput) { + op := &request.Operation{ + Name: opModifyInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyInstanceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified instance. You can specify +// only one attribute at a time. +// +// To modify some attributes, the instance must be stopped. For more information, +// see Modifying Attributes of a Stopped Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*ModifyInstanceAttributeOutput, error) { + req, out := c.ModifyInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyInstancePlacement = "ModifyInstancePlacement" + +// ModifyInstancePlacementRequest generates a request for the ModifyInstancePlacement operation. +func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput) (req *request.Request, output *ModifyInstancePlacementOutput) { + op := &request.Operation{ + Name: opModifyInstancePlacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstancePlacementInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyInstancePlacementOutput{} + req.Data = output + return +} + +// Set the instance affinity value for a specific stopped instance and modify +// the instance tenancy setting. +// +// Instance affinity is disabled by default. When instance affinity is host +// and it is not associated with a specific Dedicated host, the next time it +// is launched it will automatically be associated with the host it lands on. +// This relationship will persist if the instance is stopped/started, or rebooted. +// +// You can modify the host ID associated with a stopped instance. If a stopped +// instance has a new host ID association, the instance will target that host +// when restarted. +// +// You can modify the tenancy of a stopped instance with a tenancy of host +// or dedicated. +// +// Affinity, hostID, and tenancy are not required parameters, but at least +// one of them must be specified in the request. Affinity and tenancy can be +// modified in the same request, but tenancy can only be modified on instances +// that are stopped. +func (c *EC2) ModifyInstancePlacement(input *ModifyInstancePlacementInput) (*ModifyInstancePlacementOutput, error) { + req, out := c.ModifyInstancePlacementRequest(input) + err := req.Send() + return out, err +} + +const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" + +// ModifyNetworkInterfaceAttributeRequest generates a request for the ModifyNetworkInterfaceAttribute operation. +func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfaceAttributeInput) (req *request.Request, output *ModifyNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opModifyNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified network interface attribute. You can specify only +// one attribute at a time. +func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttributeInput) (*ModifyNetworkInterfaceAttributeOutput, error) { + req, out := c.ModifyNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyReservedInstances = "ModifyReservedInstances" + +// ModifyReservedInstancesRequest generates a request for the ModifyReservedInstances operation. +func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput) (req *request.Request, output *ModifyReservedInstancesOutput) { + op := &request.Operation{ + Name: opModifyReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReservedInstancesOutput{} + req.Data = output + return +} + +// Modifies the Availability Zone, instance count, instance type, or network +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*ModifyReservedInstancesOutput, error) { + req, out := c.ModifyReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opModifySnapshotAttribute = "ModifySnapshotAttribute" + +// ModifySnapshotAttributeRequest generates a request for the ModifySnapshotAttribute operation. +func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput) (req *request.Request, output *ModifySnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifySnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds or removes permission settings for the specified snapshot. You may add +// or remove specified AWS account IDs from a snapshot's list of create volume +// permissions, but you cannot do both in a single API call. If you need to +// both add and remove account IDs for a snapshot, you must use multiple API +// calls. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Snapshots with AWS Marketplace product codes cannot be made public. +func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) { + req, out := c.ModifySnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifySpotFleetRequest = "ModifySpotFleetRequest" + +// ModifySpotFleetRequestRequest generates a request for the ModifySpotFleetRequest operation. +func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) { + op := &request.Operation{ + Name: opModifySpotFleetRequest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySpotFleetRequestInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySpotFleetRequestOutput{} + req.Data = output + return +} + +// Modifies the specified Spot fleet request. +// +// While the Spot fleet request is being modified, it is in the modifying state. +// +// To scale up your Spot fleet, increase its target capacity. The Spot fleet +// launches the additional Spot instances according to the allocation strategy +// for the Spot fleet request. If the allocation strategy is lowestPrice, the +// Spot fleet launches instances using the Spot pool with the lowest price. +// If the allocation strategy is diversified, the Spot fleet distributes the +// instances across the Spot pools. +// +// To scale down your Spot fleet, decrease its target capacity. First, the +// Spot fleet cancels any open bids that exceed the new target capacity. You +// can request that the Spot fleet terminate Spot instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the Spot fleet terminates the instances with the highest +// price per unit. If the allocation strategy is diversified, the Spot fleet +// terminates instances across the Spot pools. Alternatively, you can request +// that the Spot fleet keep the fleet at its current size, but not replace any +// Spot instances that are interrupted or that you terminate manually. +func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + err := req.Send() + return out, err +} + +const opModifySubnetAttribute = "ModifySubnetAttribute" + +// ModifySubnetAttributeRequest generates a request for the ModifySubnetAttribute operation. +func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (req *request.Request, output *ModifySubnetAttributeOutput) { + op := &request.Operation{ + Name: opModifySubnetAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySubnetAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySubnetAttributeOutput{} + req.Data = output + return +} + +// Modifies a subnet attribute. +func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifySubnetAttributeOutput, error) { + req, out := c.ModifySubnetAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVolumeAttribute = "ModifyVolumeAttribute" + +// ModifyVolumeAttributeRequest generates a request for the ModifyVolumeAttribute operation. +func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (req *request.Request, output *ModifyVolumeAttributeOutput) { + op := &request.Operation{ + Name: opModifyVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVolumeAttributeOutput{} + req.Data = output + return +} + +// Modifies a volume attribute. +// +// By default, all I/O operations for the volume are suspended when the data +// on the volume is determined to be potentially inconsistent, to prevent undetectable, +// latent data corruption. The I/O access to the volume can be resumed by first +// enabling I/O access and then checking the data consistency on your volume. +// +// You can change the default behavior to resume I/O operations. We recommend +// that you change this only for boot volumes or for volumes that are stateless +// or disposable. +func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyVolumeAttributeOutput, error) { + req, out := c.ModifyVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcAttribute = "ModifyVpcAttribute" + +// ModifyVpcAttributeRequest generates a request for the ModifyVpcAttribute operation. +func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *request.Request, output *ModifyVpcAttributeOutput) { + op := &request.Operation{ + Name: opModifyVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVpcAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified VPC. +func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttributeOutput, error) { + req, out := c.ModifyVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcEndpoint = "ModifyVpcEndpoint" + +// ModifyVpcEndpointRequest generates a request for the ModifyVpcEndpoint operation. +func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *request.Request, output *ModifyVpcEndpointOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVpcEndpointOutput{} + req.Data = output + return +} + +// Modifies attributes of a specified VPC endpoint. You can modify the policy +// associated with the endpoint, and you can add and remove route tables associated +// with the endpoint. +func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpointOutput, error) { + req, out := c.ModifyVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opMonitorInstances = "MonitorInstances" + +// MonitorInstancesRequest generates a request for the MonitorInstances operation. +func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *request.Request, output *MonitorInstancesOutput) { + op := &request.Operation{ + Name: opMonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &MonitorInstancesOutput{} + req.Data = output + return +} + +// Enables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesOutput, error) { + req, out := c.MonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +const opMoveAddressToVpc = "MoveAddressToVpc" + +// MoveAddressToVpcRequest generates a request for the MoveAddressToVpc operation. +func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *request.Request, output *MoveAddressToVpcOutput) { + op := &request.Operation{ + Name: opMoveAddressToVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MoveAddressToVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &MoveAddressToVpcOutput{} + req.Data = output + return +} + +// Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC +// platform. The Elastic IP address must be allocated to your account for more +// than 24 hours, and it must not be associated with an instance. After the +// Elastic IP address is moved, it is no longer available for use in the EC2-Classic +// platform, unless you move it back using the RestoreAddressToClassic request. +// You cannot move an Elastic IP address that's allocated for use in the EC2-VPC +// platform to the EC2-Classic platform. You cannot migrate an Elastic IP address +// that's associated with a reverse DNS record. Contact AWS account and billing +// support to remove the reverse DNS record. +func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { + req, out := c.MoveAddressToVpcRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" + +// PurchaseReservedInstancesOfferingRequest generates a request for the PurchaseReservedInstancesOffering operation. +func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedInstancesOfferingInput) (req *request.Request, output *PurchaseReservedInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedInstancesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedInstancesOfferingOutput{} + req.Data = output + return +} + +// Purchases a Reserved Instance for use with your account. With Reserved Instances, +// you obtain a capacity reservation for a certain instance configuration over +// a specified period of time and pay a lower hourly rate compared to On-Demand +// instance pricing. +// +// Use DescribeReservedInstancesOfferings to get a list of Reserved Instance +// offerings that match your specifications. After you've purchased a Reserved +// Instance, you can check for your new Reserved Instance with DescribeReservedInstances. +// +// For more information, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// and Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstancesOfferingInput) (*PurchaseReservedInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedInstancesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseScheduledInstances = "PurchaseScheduledInstances" + +// PurchaseScheduledInstancesRequest generates a request for the PurchaseScheduledInstances operation. +func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstancesInput) (req *request.Request, output *PurchaseScheduledInstancesOutput) { + op := &request.Operation{ + Name: opPurchaseScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseScheduledInstancesOutput{} + req.Data = output + return +} + +// Purchases one or more Scheduled Instances with the specified schedule. +// +// Scheduled Instances enable you to purchase Amazon EC2 compute capacity by +// the hour for a one-year term. Before you can purchase a Scheduled Instance, +// you must call DescribeScheduledInstanceAvailability to check for available +// schedules and obtain a purchase token. +func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) { + req, out := c.PurchaseScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRebootInstances = "RebootInstances" + +// RebootInstancesRequest generates a request for the RebootInstances operation. +func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.Request, output *RebootInstancesOutput) { + op := &request.Operation{ + Name: opRebootInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootInstancesOutput{} + req.Data = output + return +} + +// Requests a reboot of one or more instances. This operation is asynchronous; +// it only queues a request to reboot the specified instances. The operation +// succeeds if the instances are valid and belong to you. Requests to reboot +// terminated instances are ignored. +// +// If a Linux/Unix instance does not cleanly shut down within four minutes, +// Amazon EC2 performs a hard reboot. +// +// For more information about troubleshooting, see Getting Console Output and +// Rebooting Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutput, error) { + req, out := c.RebootInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterImage = "RegisterImage" + +// RegisterImageRequest generates a request for the RegisterImage operation. +func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Request, output *RegisterImageOutput) { + op := &request.Operation{ + Name: opRegisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterImageInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterImageOutput{} + req.Data = output + return +} + +// Registers an AMI. When you're creating an AMI, this is the final step you +// must complete before you can launch an instance from the AMI. For more information +// about creating AMIs, see Creating Your Own AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For Amazon EBS-backed instances, CreateImage creates and registers the AMI +// in a single request, so you don't have to register the AMI yourself. +// +// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI +// from a snapshot of a root device volume. For more information, see Launching +// an Instance from a Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_LaunchingInstanceFromSnapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE +// Linux Enterprise Server (SLES), use the EC2 billingProduct code associated +// with an AMI to verify subscription status for package updates. Creating an +// AMI from an EBS snapshot does not maintain this billing code, and subsequent +// instances launched from such an AMI will not be able to connect to package +// update infrastructure. +// +// Similarly, although you can create a Windows AMI from a snapshot, you can't +// successfully launch an instance from the AMI. +// +// To create Windows AMIs or to create AMIs for Linux operating systems that +// must retain AMI billing codes to work properly, see CreateImage. +// +// If needed, you can deregister an AMI at any time. Any modifications you +// make to an AMI backed by an instance store volume invalidates its registration. +// If you make changes to an image, deregister the previous image and register +// the new image. +// +// You can't register an image where a secondary (non-root) snapshot has AWS +// Marketplace product codes. +func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) { + req, out := c.RegisterImageRequest(input) + err := req.Send() + return out, err +} + +const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" + +// RejectVpcPeeringConnectionRequest generates a request for the RejectVpcPeeringConnection operation. +func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectionInput) (req *request.Request, output *RejectVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opRejectVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &RejectVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Rejects a VPC peering connection request. The VPC peering connection must +// be in the pending-acceptance state. Use the DescribeVpcPeeringConnections +// request to view your outstanding VPC peering connection requests. To delete +// an active VPC peering connection, or to delete a VPC peering connection request +// that you initiated, use DeleteVpcPeeringConnection. +func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) (*RejectVpcPeeringConnectionOutput, error) { + req, out := c.RejectVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opReleaseAddress = "ReleaseAddress" + +// ReleaseAddressRequest generates a request for the ReleaseAddress operation. +func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Request, output *ReleaseAddressOutput) { + op := &request.Operation{ + Name: opReleaseAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &ReleaseAddressOutput{} + req.Data = output + return +} + +// Releases the specified Elastic IP address. +// +// After releasing an Elastic IP address, it is released to the IP address +// pool and might be unavailable to you. Be sure to update your DNS records +// and any servers or devices that communicate with the address. If you attempt +// to release an Elastic IP address that you already released, you'll get an +// AuthFailure error if the address is already allocated to another AWS account. +// +// [EC2-Classic, default VPC] Releasing an Elastic IP address automatically +// disassociates it from any instance that it's associated with. To disassociate +// an Elastic IP address without releasing it, use DisassociateAddress. +// +// [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic +// IP address before you try to release it. Otherwise, Amazon EC2 returns an +// error (InvalidIPAddress.InUse). +func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, error) { + req, out := c.ReleaseAddressRequest(input) + err := req.Send() + return out, err +} + +const opReleaseHosts = "ReleaseHosts" + +// ReleaseHostsRequest generates a request for the ReleaseHosts operation. +func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Request, output *ReleaseHostsOutput) { + op := &request.Operation{ + Name: opReleaseHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &ReleaseHostsOutput{} + req.Data = output + return +} + +// When you no longer want to use a Dedicated host it can be released. On-Demand +// billing is stopped and the host goes into released state. The host ID of +// Dedicated hosts that have been released can no longer be specified in another +// request, e.g., ModifyHosts. You must stop or terminate all instances on a +// host before it can be released. +// +// When Dedicated hosts are released, it make take some time for them to stop +// counting toward your limit and you may receive capacity errors when trying +// to allocate new Dedicated hosts. Try waiting a few minutes, and then try +// again. +// +// Released hosts will still appear in a DescribeHosts response. +func (c *EC2) ReleaseHosts(input *ReleaseHostsInput) (*ReleaseHostsOutput, error) { + req, out := c.ReleaseHostsRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" + +// ReplaceNetworkAclAssociationRequest generates a request for the ReplaceNetworkAclAssociation operation. +func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssociationInput) (req *request.Request, output *ReplaceNetworkAclAssociationOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceNetworkAclAssociationOutput{} + req.Data = output + return +} + +// Changes which network ACL a subnet is associated with. By default when you +// create a subnet, it's automatically associated with the default network ACL. +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationInput) (*ReplaceNetworkAclAssociationOutput, error) { + req, out := c.ReplaceNetworkAclAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" + +// ReplaceNetworkAclEntryRequest generates a request for the ReplaceNetworkAclEntry operation. +func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) (req *request.Request, output *ReplaceNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceNetworkAclEntryOutput{} + req.Data = output + return +} + +// Replaces an entry (rule) in a network ACL. For more information about network +// ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*ReplaceNetworkAclEntryOutput, error) { + req, out := c.ReplaceNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRoute = "ReplaceRoute" + +// ReplaceRouteRequest generates a request for the ReplaceRoute operation. +func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Request, output *ReplaceRouteOutput) { + op := &request.Operation{ + Name: opReplaceRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceRouteOutput{} + req.Data = output + return +} + +// Replaces an existing route within a route table in a VPC. You must provide +// only one of the following: Internet gateway or virtual private gateway, NAT +// instance, NAT gateway, VPC peering connection, or network interface. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error) { + req, out := c.ReplaceRouteRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" + +// ReplaceRouteTableAssociationRequest generates a request for the ReplaceRouteTableAssociation operation. +func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssociationInput) (req *request.Request, output *ReplaceRouteTableAssociationOutput) { + op := &request.Operation{ + Name: opReplaceRouteTableAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteTableAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceRouteTableAssociationOutput{} + req.Data = output + return +} + +// Changes the route table associated with a given subnet in a VPC. After the +// operation completes, the subnet uses the routes in the new route table it's +// associated with. For more information about route tables, see Route Tables +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can also use ReplaceRouteTableAssociation to change which table is the +// main route table in the VPC. You just specify the main route table's association +// ID and the route table to be the new main route table. +func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationInput) (*ReplaceRouteTableAssociationOutput, error) { + req, out := c.ReplaceRouteTableAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReportInstanceStatus = "ReportInstanceStatus" + +// ReportInstanceStatusRequest generates a request for the ReportInstanceStatus operation. +func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req *request.Request, output *ReportInstanceStatusOutput) { + op := &request.Operation{ + Name: opReportInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &ReportInstanceStatusOutput{} + req.Data = output + return +} + +// Submits feedback about the status of an instance. The instance must be in +// the running state. If your experience with the instance differs from the +// instance status returned by DescribeInstanceStatus, use ReportInstanceStatus +// to report your experience with the instance. Amazon EC2 collects this information +// to improve the accuracy of status checks. +// +// Use of this action does not change the value returned by DescribeInstanceStatus. +func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportInstanceStatusOutput, error) { + req, out := c.ReportInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotFleet = "RequestSpotFleet" + +// RequestSpotFleetRequest generates a request for the RequestSpotFleet operation. +func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *request.Request, output *RequestSpotFleetOutput) { + op := &request.Operation{ + Name: opRequestSpotFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotFleetInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotFleetOutput{} + req.Data = output + return +} + +// Creates a Spot fleet request. +// +// You can submit a single request that includes multiple launch specifications +// that vary by instance type, AMI, Availability Zone, or subnet. +// +// By default, the Spot fleet requests Spot instances in the Spot pool where +// the price per unit is the lowest. Each launch specification can include its +// own instance weighting that reflects the value of the instance type to your +// application workload. +// +// Alternatively, you can specify that the Spot fleet distribute the target +// capacity across the Spot pools included in its launch specifications. By +// ensuring that the Spot instances in your Spot fleet are in different Spot +// pools, you can improve the availability of your fleet. +// +// For more information, see Spot Fleet Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetOutput, error) { + req, out := c.RequestSpotFleetRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotInstances = "RequestSpotInstances" + +// RequestSpotInstancesRequest generates a request for the RequestSpotInstances operation. +func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req *request.Request, output *RequestSpotInstancesOutput) { + op := &request.Operation{ + Name: opRequestSpotInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotInstancesOutput{} + req.Data = output + return +} + +// Creates a Spot instance request. Spot instances are instances that Amazon +// EC2 launches when the bid price that you specify exceeds the current Spot +// price. Amazon EC2 periodically sets the Spot price based on available Spot +// Instance capacity and current Spot instance requests. For more information, +// see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSpotInstancesOutput, error) { + req, out := c.RequestSpotInstancesRequest(input) + err := req.Send() + return out, err +} + +const opResetImageAttribute = "ResetImageAttribute" + +// ResetImageAttributeRequest generates a request for the ResetImageAttribute operation. +func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req *request.Request, output *ResetImageAttributeOutput) { + op := &request.Operation{ + Name: opResetImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetImageAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an AMI to its default value. +// +// The productCodes attribute can't be reset. +func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageAttributeOutput, error) { + req, out := c.ResetImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetInstanceAttribute = "ResetInstanceAttribute" + +// ResetInstanceAttributeRequest generates a request for the ResetInstanceAttribute operation. +func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) (req *request.Request, output *ResetInstanceAttributeOutput) { + op := &request.Operation{ + Name: opResetInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetInstanceAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an instance to its default value. To reset the kernel +// or ramdisk, the instance must be in a stopped state. To reset the SourceDestCheck, +// the instance can be either running or stopped. +// +// The SourceDestCheck attribute controls whether source/destination checking +// is enabled. The default value is true, which means checking is enabled. This +// value must be false for a NAT instance to perform NAT. For more information, +// see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*ResetInstanceAttributeOutput, error) { + req, out := c.ResetInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" + +// ResetNetworkInterfaceAttributeRequest generates a request for the ResetNetworkInterfaceAttribute operation. +func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterfaceAttributeInput) (req *request.Request, output *ResetNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opResetNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Resets a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttributeInput) (*ResetNetworkInterfaceAttributeOutput, error) { + req, out := c.ResetNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetSnapshotAttribute = "ResetSnapshotAttribute" + +// ResetSnapshotAttributeRequest generates a request for the ResetSnapshotAttribute operation. +func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) (req *request.Request, output *ResetSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opResetSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetSnapshotAttributeOutput{} + req.Data = output + return +} + +// Resets permission settings for the specified snapshot. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*ResetSnapshotAttributeOutput, error) { + req, out := c.ResetSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opRestoreAddressToClassic = "RestoreAddressToClassic" + +// RestoreAddressToClassicRequest generates a request for the RestoreAddressToClassic operation. +func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput) (req *request.Request, output *RestoreAddressToClassicOutput) { + op := &request.Operation{ + Name: opRestoreAddressToClassic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreAddressToClassicInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreAddressToClassicOutput{} + req.Data = output + return +} + +// Restores an Elastic IP address that was previously moved to the EC2-VPC platform +// back to the EC2-Classic platform. You cannot move an Elastic IP address that +// was originally allocated for use in EC2-VPC. The Elastic IP address must +// not be associated with an instance or network interface. You cannot restore +// an Elastic IP address that's associated with a reverse DNS record. Contact +// AWS account and billing support to remove the reverse DNS record. +func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { + req, out := c.RestoreAddressToClassicRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" + +// RevokeSecurityGroupEgressRequest generates a request for the RevokeSecurityGroupEgress operation. +func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressInput) (req *request.Request, output *RevokeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Removes one or more egress rules from a security group for +// EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. +// The values that you specify in the revoke request (for example, ports) must +// match the existing rule's values for the rule to be revoked. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) (*RevokeSecurityGroupEgressOutput, error) { + req, out := c.RevokeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" + +// RevokeSecurityGroupIngressRequest generates a request for the RevokeSecurityGroupIngress operation. +func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngressInput) (req *request.Request, output *RevokeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Removes one or more ingress rules from a security group. The values that +// you specify in the revoke request (for example, ports) must match the existing +// rule's values for the rule to be removed. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) (*RevokeSecurityGroupIngressOutput, error) { + req, out := c.RevokeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opRunInstances = "RunInstances" + +// RunInstancesRequest generates a request for the RunInstances operation. +func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Request, output *Reservation) { + op := &request.Operation{ + Name: opRunInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &Reservation{} + req.Data = output + return +} + +// Launches the specified number of instances using an AMI for which you have +// permissions. +// +// When you launch an instance, it enters the pending state. After the instance +// is ready for you, it enters the running state. To check the state of your +// instance, call DescribeInstances. +// +// If you don't specify a security group when launching an instance, Amazon +// EC2 uses the default security group. For more information, see Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-VPC only accounts] If you don't specify a subnet in the request, we +// choose a default subnet from your default VPC for you. +// +// [EC2-Classic accounts] If you're launching into EC2-Classic and you don't +// specify an Availability Zone, we choose one for you. +// +// Linux instances have access to the public key of the key pair at boot. You +// can use this key to provide secure access to the instance. Amazon EC2 public +// images use this feature to provide secure access without passwords. For more +// information, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can provide optional user data when launching an instance. For more +// information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If any of the AMIs have a product code attached for which the user has not +// subscribed, RunInstances fails. +// +// T2 instance types can only be launched into a VPC. If you do not have a +// default VPC, or if you do not specify a subnet ID in the request, RunInstances +// fails. +// +// For more information about troubleshooting, see What To Do If An Instance +// Immediately Terminates (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), +// and Troubleshooting Connecting to Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { + req, out := c.RunInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRunScheduledInstances = "RunScheduledInstances" + +// RunScheduledInstancesRequest generates a request for the RunScheduledInstances operation. +func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (req *request.Request, output *RunScheduledInstancesOutput) { + op := &request.Operation{ + Name: opRunScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RunScheduledInstancesOutput{} + req.Data = output + return +} + +// Launches the specified Scheduled Instances. +// +// Before you can launch a Scheduled Instance, you must purchase it and obtain +// an identifier using PurchaseScheduledInstances. +// +// You must launch a Scheduled Instance during its scheduled time period. You +// can't stop or reboot a Scheduled Instance, but you can terminate it as needed. +// If you terminate a Scheduled Instance before the current scheduled time period +// ends, you can launch it again after a few minutes. +func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) { + req, out := c.RunScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStartInstances = "StartInstances" + +// StartInstancesRequest generates a request for the StartInstances operation. +func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Request, output *StartInstancesOutput) { + op := &request.Operation{ + Name: opStartInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StartInstancesOutput{} + req.Data = output + return +} + +// Starts an Amazon EBS-backed AMI that you've previously stopped. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for hourly instance usage. However, your +// root partition Amazon EBS volume remains, continues to persist your data, +// and you are charged for Amazon EBS volume usage. You can restart your instance +// at any time. Each time you transition an instance from stopped to started, +// Amazon EC2 charges a full instance hour, even if transitions happen multiple +// times within a single hour. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as +// its root device returns an error. +// +// For more information, see Stopping Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, error) { + req, out := c.StartInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStopInstances = "StopInstances" + +// StopInstancesRequest generates a request for the StopInstances operation. +func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Request, output *StopInstancesOutput) { + op := &request.Operation{ + Name: opStopInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StopInstancesOutput{} + req.Data = output + return +} + +// Stops an Amazon EBS-backed instance. Each time you transition an instance +// from stopped to started, Amazon EC2 charges a full instance hour, even if +// transitions happen multiple times within a single hour. +// +// You can't start or stop Spot instances. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for hourly instance usage. However, your +// root partition Amazon EBS volume remains, continues to persist your data, +// and you are charged for Amazon EBS volume usage. You can restart your instance +// at any time. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as +// its root device returns an error. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, the root device and any other devices attached during the instance +// launch are automatically deleted. For more information about the differences +// between stopping and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting Stopping +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) { + req, out := c.StopInstancesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateInstances = "TerminateInstances" + +// TerminateInstancesRequest generates a request for the TerminateInstances operation. +func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *request.Request, output *TerminateInstancesOutput) { + op := &request.Operation{ + Name: opTerminateInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateInstancesOutput{} + req.Data = output + return +} + +// Shuts down one or more instances. This operation is idempotent; if you terminate +// an instance more than once, each call succeeds. +// +// Terminated instances remain visible after termination (for approximately +// one hour). +// +// By default, Amazon EC2 deletes all EBS volumes that were attached when the +// instance launched. Volumes attached after instance launch continue running. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, any attached EBS volumes with the DeleteOnTermination block +// device mapping parameter set to true are automatically deleted. For more +// information about the differences between stopping and terminating instances, +// see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting Terminating +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInstancesOutput, error) { + req, out := c.TerminateInstancesRequest(input) + err := req.Send() + return out, err +} + +const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" + +// UnassignPrivateIpAddressesRequest generates a request for the UnassignPrivateIpAddresses operation. +func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddressesInput) (req *request.Request, output *UnassignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opUnassignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &UnassignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Unassigns one or more secondary private IP addresses from a network interface. +func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) (*UnassignPrivateIpAddressesOutput, error) { + req, out := c.UnassignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opUnmonitorInstances = "UnmonitorInstances" + +// UnmonitorInstancesRequest generates a request for the UnmonitorInstances operation. +func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *request.Request, output *UnmonitorInstancesOutput) { + op := &request.Operation{ + Name: opUnmonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnmonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &UnmonitorInstancesOutput{} + req.Data = output + return +} + +// Disables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) UnmonitorInstances(input *UnmonitorInstancesInput) (*UnmonitorInstancesOutput, error) { + req, out := c.UnmonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +type AcceptVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type AcceptVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Describes an account attribute. +type AccountAttribute struct { + _ struct{} `type:"structure"` + + // The name of the account attribute. + AttributeName *string `locationName:"attributeName" type:"string"` + + // One or more values for the account attribute. + AttributeValues []*AccountAttributeValue `locationName:"attributeValueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AccountAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttribute) GoString() string { + return s.String() +} + +// Describes a value of an account attribute. +type AccountAttributeValue struct { + _ struct{} `type:"structure"` + + // The value of the attribute. + AttributeValue *string `locationName:"attributeValue" type:"string"` +} + +// String returns the string representation +func (s AccountAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttributeValue) GoString() string { + return s.String() +} + +// Describes a running instance in a Spot fleet. +type ActiveInstance struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` +} + +// String returns the string representation +func (s ActiveInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveInstance) GoString() string { + return s.String() +} + +// Describes an Elastic IP address. +type Address struct { + _ struct{} `type:"structure"` + + // The ID representing the allocation of the address for use with EC2-VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID representing the association of the address with an instance in a + // VPC. + AssociationId *string `locationName:"associationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The ID of the instance that the address is associated with (if any). + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that owns the network interface. + NetworkInterfaceOwnerId *string `locationName:"networkInterfaceOwnerId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Address) GoString() string { + return s.String() +} + +type AllocateAddressInput struct { + _ struct{} `type:"structure"` + + // Set to vpc to allocate the address for use with instances in a VPC. + // + // Default: The address is for use with instances in EC2-Classic. + Domain *string `type:"string" enum:"DomainType"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s AllocateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressInput) GoString() string { + return s.String() +} + +type AllocateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic + // IP address for use with instances in a VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s AllocateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressOutput) GoString() string { + return s.String() +} + +type AllocateHostsInput struct { + _ struct{} `type:"structure"` + + // This is enabled by default. This property allows instances to be automatically + // placed onto available Dedicated hosts, when you are launching instances without + // specifying a host ID. + // + // Default: Enabled + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone for the Dedicated hosts. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Specify the instance type that you want your Dedicated hosts to be configured + // for. When you specify the instance type, that is the only instance type that + // you can launch onto that host. + InstanceType *string `locationName:"instanceType" type:"string" required:"true"` + + // The number of Dedicated hosts you want to allocate to your account with these + // parameters. + Quantity *int64 `locationName:"quantity" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllocateHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsInput) GoString() string { + return s.String() +} + +type AllocateHostsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the allocated Dedicated host. This is used when you want to launch + // an instance onto a specific host. + HostIds []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AllocateHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsOutput) GoString() string { + return s.String() +} + +type AssignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to allow an IP address that is already assigned to another + // network interface or instance to be reassigned to the specified network interface. + AllowReassignment *bool `locationName:"allowReassignment" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // One or more IP addresses to be assigned as a secondary private IP address + // to the network interface. You can't specify this parameter when also specifying + // a number of secondary IP addresses. + // + // If you don't specify an IP address, Amazon EC2 automatically selects an + // IP address within the subnet range. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list"` + + // The number of secondary IP addresses to assign to the network interface. + // You can't specify this parameter when also specifying private IP addresses. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +type AssignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +type AssociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. This is required for EC2-VPC. + AllocationId *string `type:"string"` + + // [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic + // IP address that is already associated with an instance or network interface + // to be reassociated with the specified instance or network interface. Otherwise, + // the operation fails. In a VPC in an EC2-VPC-only account, reassociation is + // automatic, therefore you can specify false to ensure the operation fails + // if the Elastic IP address is already associated with another resource. + AllowReassociation *bool `locationName:"allowReassociation" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you + // can specify either the instance ID or the network interface ID, but not both. + // The operation fails if you specify an instance ID unless exactly one network + // interface is attached. + InstanceId *string `type:"string"` + + // [EC2-VPC] The ID of the network interface. If the instance has more than + // one network interface, you must specify a network interface ID. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // [EC2-VPC] The primary or secondary private IP address to associate with the + // Elastic IP address. If no private IP address is specified, the Elastic IP + // address is associated with the primary private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. This is required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s AssociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressInput) GoString() string { + return s.String() +} + +type AssociateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that represents the association of the Elastic IP address + // with an instance. + AssociationId *string `locationName:"associationId" type:"string"` +} + +// String returns the string representation +func (s AssociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressOutput) GoString() string { + return s.String() +} + +type AssociateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set, or default to associate no DHCP options with + // the VPC. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsInput) GoString() string { + return s.String() +} + +type AssociateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsOutput) GoString() string { + return s.String() +} + +type AssociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableInput) GoString() string { + return s.String() +} + +type AssociateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // The route table association ID (needed to disassociate the route table). + AssociationId *string `locationName:"associationId" type:"string"` +} + +// String returns the string representation +func (s AssociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableOutput) GoString() string { + return s.String() +} + +type AttachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of one or more of the VPC's security groups. You cannot specify security + // groups from a different VPC. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"groupId" type:"list" required:"true"` + + // The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of a ClassicLink-enabled VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcInput) GoString() string { + return s.String() +} + +type AttachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +type AttachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayInput) GoString() string { + return s.String() +} + +type AttachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayOutput) GoString() string { + return s.String() +} + +type AttachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceInput) GoString() string { + return s.String() +} + +type AttachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type AttachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name to expose to the instance (for example, /dev/sdh or xvdh). + Device *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The ID of the EBS volume. The volume and instance must be within the same + // Availability Zone. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVolumeInput) GoString() string { + return s.String() +} + +type AttachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayInput) GoString() string { + return s.String() +} + +type AttachVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the attachment. + VpcAttachment *VpcAttachment `locationName:"attachment" type:"structure"` +} + +// String returns the string representation +func (s AttachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayOutput) GoString() string { + return s.String() +} + +// The value to use when a resource attribute accepts a Boolean value. +type AttributeBooleanValue struct { + _ struct{} `type:"structure"` + + // Valid values are true or false. + Value *bool `locationName:"value" type:"boolean"` +} + +// String returns the string representation +func (s AttributeBooleanValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeBooleanValue) GoString() string { + return s.String() +} + +// The value to use for a resource attribute. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // Valid values are case-sensitive and vary by action. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. We recommend that you specify the CIDR range in + // a set of IP permissions instead. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name or number. We recommend that you specify the protocol + // in a set of IP permissions instead. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To authorize outbound access to + // a destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To authorize outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. Can be used to specify multiple rules in a single + // command. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // (VPC only) Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic, default VPC] The AWS account number for the source security + // group. For EC2-VPC, the source security group must be in the same VPC. You + // can't specify this parameter in combination with the following parameters: + // the CIDR IP address range, the IP protocol, the start of the port range, + // and the end of the port range. Creates rules that grant full ICMP, UDP, and + // TCP access. To create a rule with a specific IP protocol and port range, + // use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes an Availability Zone. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // Any messages about the Availability Zone. + Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` + + // The state of the Availability Zone. + State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` + + // The name of the Availability Zone. + ZoneName *string `locationName:"zoneName" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Describes a message about an Availability Zone. +type AvailabilityZoneMessage struct { + _ struct{} `type:"structure"` + + // The message about the Availability Zone. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZoneMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZoneMessage) GoString() string { + return s.String() +} + +// The capacity information for instances launched onto the Dedicated host. +type AvailableCapacity struct { + _ struct{} `type:"structure"` + + // The total number of instances that the Dedicated host supports. + AvailableInstanceCapacity []*InstanceCapacity `locationName:"availableInstanceCapacity" locationNameList:"item" type:"list"` + + // The number of vCPUs available on the Dedicated host. + AvailableVCpus *int64 `locationName:"availableVCpus" type:"integer"` +} + +// String returns the string representation +func (s AvailableCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailableCapacity) GoString() string { + return s.String() +} + +type BlobAttributeValue struct { + _ struct{} `type:"structure"` + + Value []byte `locationName:"value" type:"blob"` +} + +// String returns the string representation +func (s BlobAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlobAttributeValue) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsBlockDevice `locationName:"ebs" type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with 2 available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +type BundleInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to bundle. + // + // Type: String + // + // Default: None + // + // Required: Yes + InstanceId *string `type:"string" required:"true"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Storage *Storage `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BundleInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceInput) GoString() string { + return s.String() +} + +type BundleInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s BundleInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceOutput) GoString() string { + return s.String() +} + +// Describes a bundle task. +type BundleTask struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + BundleId *string `locationName:"bundleId" type:"string"` + + // If the task fails, a description of the error. + BundleTaskError *BundleTaskError `locationName:"error" type:"structure"` + + // The ID of the instance associated with this bundle task. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The level of task completion, as a percent (for example, 20%). + Progress *string `locationName:"progress" type:"string"` + + // The time this task started. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the task. + State *string `locationName:"state" type:"string" enum:"BundleTaskState"` + + // The Amazon S3 storage locations. + Storage *Storage `locationName:"storage" type:"structure"` + + // The time of the most recent update for the task. + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s BundleTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTask) GoString() string { + return s.String() +} + +// Describes an error for BundleInstance. +type BundleTaskError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BundleTaskError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTaskError) GoString() string { + return s.String() +} + +type CancelBundleTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + BundleId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CancelBundleTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskInput) GoString() string { + return s.String() +} + +type CancelBundleTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s CancelBundleTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskOutput) GoString() string { + return s.String() +} + +type CancelConversionTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The reason for canceling the conversion task. + ReasonMessage *string `locationName:"reasonMessage" type:"string"` +} + +// String returns the string representation +func (s CancelConversionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskInput) GoString() string { + return s.String() +} + +type CancelConversionTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelConversionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskOutput) GoString() string { + return s.String() +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. This is the ID returned by CreateInstanceExportTask. + ExportTaskId *string `locationName:"exportTaskId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CancelImportTaskInput struct { + _ struct{} `type:"structure"` + + // The reason for canceling the task. + CancelReason *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the import image or import snapshot task to be canceled. + ImportTaskId *string `type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskInput) GoString() string { + return s.String() +} + +type CancelImportTaskOutput struct { + _ struct{} `type:"structure"` + + // The ID of the task being canceled. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The current state of the task being canceled. + PreviousState *string `locationName:"previousState" type:"string"` + + // The current state of the task being canceled. + State *string `locationName:"state" type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskOutput) GoString() string { + return s.String() +} + +type CancelReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingInput) GoString() string { + return s.String() +} + +type CancelReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // The Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet error. +type CancelSpotFleetRequestsError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" required:"true" enum:"CancelBatchErrorCode"` + + // The description for the error code. + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsError) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was not successfully canceled. +type CancelSpotFleetRequestsErrorItem struct { + _ struct{} `type:"structure"` + + // The error. + Error *CancelSpotFleetRequestsError `locationName:"error" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsErrorItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotFleetRequests. +type CancelSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"` + + // Indicates whether to terminate instances for a Spot fleet request if it is + // canceled successfully. + TerminateInstances *bool `locationName:"terminateInstances" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of CancelSpotFleetRequests. +type CancelSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Spot fleet requests that are successfully canceled. + SuccessfulFleetRequests []*CancelSpotFleetRequestsSuccessItem `locationName:"successfulFleetRequestSet" locationNameList:"item" type:"list"` + + // Information about the Spot fleet requests that are not successfully canceled. + UnsuccessfulFleetRequests []*CancelSpotFleetRequestsErrorItem `locationName:"unsuccessfulFleetRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was successfully canceled. +type CancelSpotFleetRequestsSuccessItem struct { + _ struct{} `type:"structure"` + + // The current state of the Spot fleet request. + CurrentSpotFleetRequestState *string `locationName:"currentSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The previous state of the Spot fleet request. + PreviousSpotFleetRequestState *string `locationName:"previousSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list" required:"true"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + CancelledSpotInstanceRequests []*CancelledSpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Describes a request to cancel a Spot instance. +type CancelledSpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The state of the Spot instance request. + State *string `locationName:"state" type:"string" enum:"CancelSpotInstanceRequestState"` +} + +// String returns the string representation +func (s CancelledSpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelledSpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes the ClassicLink DNS support status of a VPC. +type ClassicLinkDnsSupport struct { + _ struct{} `type:"structure"` + + // Indicates whether ClassicLink DNS support is enabled for the VPC. + ClassicLinkDnsSupported *bool `locationName:"classicLinkDnsSupported" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkDnsSupport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkDnsSupport) GoString() string { + return s.String() +} + +// Describes a linked EC2-Classic instance. +type ClassicLinkInstance struct { + _ struct{} `type:"structure"` + + // A list of security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkInstance) GoString() string { + return s.String() +} + +// Describes the client-specific data. +type ClientData struct { + _ struct{} `type:"structure"` + + // A user-defined comment about the disk upload. + Comment *string `type:"string"` + + // The time that the disk upload ends. + UploadEnd *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The size of the uploaded disk image, in GiB. + UploadSize *float64 `type:"double"` + + // The time that the disk upload starts. + UploadStart *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ClientData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientData) GoString() string { + return s.String() +} + +type ConfirmProductInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The product code. This must be a product code that you own. + ProductCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmProductInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceInput) GoString() string { + return s.String() +} + +type ConfirmProductInstanceOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID of the instance owner. This is only present if the product + // code is attached to the instance. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The return value of the request. Returns true if the specified product code + // is owned by the requester and associated with the specified instance. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ConfirmProductInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceOutput) GoString() string { + return s.String() +} + +// Describes a conversion task. +type ConversionTask struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // The time when the task expires. If the upload isn't complete before the expiration + // time, we automatically cancel the task. + ExpirationTime *string `locationName:"expirationTime" type:"string"` + + // If the task is for importing an instance, this contains information about + // the import instance task. + ImportInstance *ImportInstanceTaskDetails `locationName:"importInstance" type:"structure"` + + // If the task is for importing a volume, this contains information about the + // import volume task. + ImportVolume *ImportVolumeTaskDetails `locationName:"importVolume" type:"structure"` + + // The state of the conversion task. + State *string `locationName:"state" type:"string" required:"true" enum:"ConversionTaskState"` + + // The status message related to the conversion task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ConversionTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConversionTask) GoString() string { + return s.String() +} + +type CopyImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `type:"string"` + + // A description for the new AMI in the destination region. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshots of the copied image should be + // encrypted. The default CMK for EBS is used unless a non-default AWS Key Management + // Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see + // Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when + // encrypting the snapshots of an image during a copy operation. This parameter + // is only required if you want to use a non-default CMK; if this parameter + // is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms + // namespace, followed by the region of the CMK, the AWS account ID of the CMK + // owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // The specified CMK must exist in the region that the snapshot is being copied + // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The name of the new AMI in the destination region. + Name *string `type:"string" required:"true"` + + // The ID of the AMI to copy. + SourceImageId *string `type:"string" required:"true"` + + // The name of the region that contains the AMI to copy. + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageInput) GoString() string { + return s.String() +} + +type CopyImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CopyImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageOutput) GoString() string { + return s.String() +} + +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the EBS snapshot. + Description *string `type:"string"` + + // The destination region to use in the PresignedUrl parameter of a snapshot + // copy operation. This parameter is only valid for specifying the destination + // region in a PresignedUrl parameter, where it is required. + // + // CopySnapshot sends the snapshot copy to the regional endpoint that you + // send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS + // CLI, this is specified with the --region parameter or the default region + // in your AWS configuration file). + DestinationRegion *string `locationName:"destinationRegion" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshot should be encrypted. There is + // no way to create an unencrypted snapshot copy from an encrypted snapshot; + // however, you can encrypt a copy of an unencrypted snapshot with this flag. + // The default CMK for EBS is used unless a non-default AWS Key Management Service + // (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon + // EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when + // creating the snapshot copy. This parameter is only required if you want to + // use a non-default CMK; if this parameter is not specified, the default CMK + // for EBS is used. The ARN contains the arn:aws:kms namespace, followed by + // the region of the CMK, the AWS account ID of the CMK owner, the key namespace, + // and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // The specified CMK must exist in the region that the snapshot is being copied + // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The pre-signed URL that facilitates copying an encrypted snapshot. This parameter + // is only required when copying an encrypted snapshot with the Amazon EC2 Query + // API; it is available as an optional parameter in all other cases. The PresignedUrl + // should use the snapshot source endpoint, the CopySnapshot action, and include + // the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The + // PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots + // are stored in Amazon S3, the signing algorithm for this parameter uses the + // same logic that is described in Authenticating Requests by Using Query Parameters + // (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + // in the Amazon Simple Storage Service API Reference. An invalid or improperly + // signed PresignedUrl will cause the copy operation to fail asynchronously, + // and the snapshot will move to an error state. + PresignedUrl *string `locationName:"presignedUrl" type:"string"` + + // The ID of the region that contains the snapshot to be copied. + SourceRegion *string `type:"string" required:"true"` + + // The ID of the EBS snapshot to copy. + SourceSnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +type CreateCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // For devices that support BGP, the customer gateway's BGP ASN. + // + // Default: 65000 + BgpAsn *int64 `type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Internet-routable IP address for the customer gateway's outside interface. + // The address must be static. + PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + + // The type of VPN connection that this customer gateway supports (ipsec.1). + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayInput) GoString() string { + return s.String() +} + +type CreateCustomerGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the customer gateway. + CustomerGateway *CustomerGateway `locationName:"customerGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayOutput) GoString() string { + return s.String() +} + +type CreateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // A DHCP configuration option. + DhcpConfigurations []*NewDhcpConfiguration `locationName:"dhcpConfiguration" locationNameList:"item" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CreateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsInput) GoString() string { + return s.String() +} + +type CreateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // A set of DHCP options. + DhcpOptions *DhcpOptions `locationName:"dhcpOptions" type:"structure"` +} + +// String returns the string representation +func (s CreateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsOutput) GoString() string { + return s.String() +} + +type CreateFlowLogsInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs + // log group. + DeliverLogsPermissionArn *string `type:"string" required:"true"` + + // The name of the CloudWatch log group. + LogGroupName *string `type:"string" required:"true"` + + // One or more subnet, network interface, or VPC IDs. + ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"` + + // The type of resource on which to create the flow log. + ResourceType *string `type:"string" required:"true" enum:"FlowLogsResourceType"` + + // The type of traffic to log. + TrafficType *string `type:"string" required:"true" enum:"TrafficType"` +} + +// String returns the string representation +func (s CreateFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsInput) GoString() string { + return s.String() +} + +type CreateFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the flow logs. + FlowLogIds []*string `locationName:"flowLogIdSet" locationNameList:"item" type:"list"` + + // Information about the flow logs that could not be created successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsOutput) GoString() string { + return s.String() +} + +type CreateImageInput struct { + _ struct{} `type:"structure"` + + // Information about one or more block device mappings. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for the new image. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // A name for the new image. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // By default, this parameter is set to false, which means Amazon EC2 attempts + // to shut down the instance cleanly before image creation and then reboots + // the instance. When the parameter is set to true, Amazon EC2 doesn't shut + // down the instance before creating the image. When this option is used, file + // system integrity on the created image can't be guaranteed. + NoReboot *bool `locationName:"noReboot" type:"boolean"` +} + +// String returns the string representation +func (s CreateImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageInput) GoString() string { + return s.String() +} + +type CreateImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CreateImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageOutput) GoString() string { + return s.String() +} + +type CreateInstanceExportTaskInput struct { + _ struct{} `type:"structure"` + + // A description for the conversion task or the resource being exported. The + // maximum length is 255 bytes. + Description *string `locationName:"description" type:"string"` + + // The format and location for an instance export task. + ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskInput) GoString() string { + return s.String() +} + +type CreateInstanceExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance export task. + ExportTask *ExportTask `locationName:"exportTask" type:"structure"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskOutput) GoString() string { + return s.String() +} + +type CreateInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CreateInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayInput) GoString() string { + return s.String() +} + +type CreateInternetGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the Internet gateway. + InternetGateway *InternetGateway `locationName:"internetGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayOutput) GoString() string { + return s.String() +} + +type CreateKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + // + // Constraints: Up to 255 ASCII characters + KeyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairInput) GoString() string { + return s.String() +} + +// Describes a key pair. +type CreateKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The SHA-1 digest of the DER encoded private key. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // An unencrypted PEM encoded RSA private key. + KeyMaterial *string `locationName:"keyMaterial" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s CreateKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairOutput) GoString() string { + return s.String() +} + +type CreateNatGatewayInput struct { + _ struct{} `type:"structure"` + + // The allocation ID of an Elastic IP address to associate with the NAT gateway. + // If the Elastic IP address is associated with another resource, you must first + // disassociate it. + AllocationId *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraint: Maximum 64 ASCII characters. + ClientToken *string `type:"string"` + + // The subnet in which to create the NAT gateway. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayInput) GoString() string { + return s.String() +} + +type CreateNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier to ensure the idempotency of the request. + // Only returned if a client token was provided in the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the NAT gateway. + NatGateway *NatGateway `locationName:"natGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayOutput) GoString() string { + return s.String() +} + +type CreateNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether this is an egress rule (rule is applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying ICMP for the + // protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number for the entry (for example, 100). ACL entries are processed + // in ascending order by rule number. + // + // Constraints: Positive integer from 1 to 32766 + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryInput) GoString() string { + return s.String() +} + +type CreateNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type CreateNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclInput) GoString() string { + return s.String() +} + +type CreateNetworkAclOutput struct { + _ struct{} `type:"structure"` + + // Information about the network ACL. + NetworkAcl *NetworkAcl `locationName:"networkAcl" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclOutput) GoString() string { + return s.String() +} + +type CreateNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // A description for the network interface. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The primary private IP address of the network interface. If you don't specify + // an IP address, Amazon EC2 selects one for you from the subnet range. If you + // specify an IP address, you cannot indicate any IP addresses specified in + // privateIpAddresses as primary (only one IP address can be designated as primary). + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses to assign to a network interface. + // When you specify a number of secondary IP addresses, Amazon EC2 selects these + // IP addresses within the subnet range. You can't specify this option and specify + // more than one private IP address using privateIpAddresses. + // + // The number of IP addresses you can assign to a network interface varies + // by instance type. For more information, see Private IP Addresses Per ENI + // Per Instance Type (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) + // in the Amazon Elastic Compute Cloud User Guide. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet to associate with the network interface. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceInput) GoString() string { + return s.String() +} + +type CreateNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // Information about the network interface. + NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type CreatePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A name for the placement group. + // + // Constraints: Up to 255 ASCII characters + GroupName *string `locationName:"groupName" type:"string" required:"true"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" required:"true" enum:"PlacementStrategy"` +} + +// String returns the string representation +func (s CreatePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupInput) GoString() string { + return s.String() +} + +type CreatePlacementGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreatePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupOutput) GoString() string { + return s.String() +} + +type CreateReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of your + // listings. This helps avoid duplicate listings. For more information, see + // Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The number of instances that are a part of a Reserved Instance account to + // be listed in the Reserved Instance Marketplace. This number should be less + // than or equal to the instance count associated with the Reserved Instance + // ID specified in this call. + InstanceCount *int64 `locationName:"instanceCount" type:"integer" required:"true"` + + // A list specifying the price of the Reserved Instance for each month remaining + // in the Reserved Instance term. + PriceSchedules []*PriceScheduleSpecification `locationName:"priceSchedules" locationNameList:"item" type:"list" required:"true"` + + // The ID of the active Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingInput) GoString() string { + return s.String() +} + +type CreateReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingOutput) GoString() string { + return s.String() +} + +type CreateRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR address block used for the destination match. Routing decisions + // are based on the most specific match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway attached to your + // VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. The operation fails if you specify + // an instance ID unless exactly one network interface is attached. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table for the route. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s CreateRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteInput) GoString() string { + return s.String() +} + +type CreateRouteOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s CreateRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteOutput) GoString() string { + return s.String() +} + +type CreateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableInput) GoString() string { + return s.String() +} + +type CreateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // Information about the route table. + RouteTable *RouteTable `locationName:"routeTable" type:"structure"` +} + +// String returns the string representation +func (s CreateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableOutput) GoString() string { + return s.String() +} + +type CreateSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the security group. This is informational only. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"GroupDescription" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the security group. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + GroupName *string `type:"string" required:"true"` + + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CreateSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` +} + +// String returns the string representation +func (s CreateSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket in which to store the Spot instance data feed. + Bucket *string `locationName:"bucket" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A prefix for the data feed file names. + Prefix *string `locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Contains the output of CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateSubnetInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the subnet. + // + // Default: AWS selects one for you. If you create more than one subnet in + // your VPC, we may not necessarily select a different zone for each subnet. + AvailabilityZone *string `type:"string"` + + // The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetInput) GoString() string { + return s.String() +} + +type CreateSubnetOutput struct { + _ struct{} `type:"structure"` + + // Information about the subnet. + Subnet *Subnet `locationName:"subnet" type:"structure"` +} + +// String returns the string representation +func (s CreateSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetOutput) GoString() string { + return s.String() +} + +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more resources to tag. For example, ami-1a2b3c4d. + Resources []*string `locationName:"ResourceId" type:"list" required:"true"` + + // One or more tags. The value parameter is required, but if you don't want + // the tag to have a value, specify the parameter with no value, and we set + // the value to an empty string. + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type CreateVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to create the volume. Use DescribeAvailabilityZones + // to list the Availability Zones that are currently available to you. + AvailabilityZone *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. Volumes + // that are created from encrypted snapshots are automatically encrypted. There + // is no way to create an encrypted volume from an unencrypted snapshot or vice + // versa. If your AMI uses encrypted volumes, you can only launch it on supported + // instance types. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Only valid for Provisioned IOPS (SSD) volumes. The number of I/O operations + // per second (IOPS) to provision for the volume, with a maximum ratio of 30 + // IOPS/GiB. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes + Iops *int64 `type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use when creating the encrypted volume. This parameter is only + // required if you want to use a non-default CMK; if this parameter is not specified, + // the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, + // followed by the region of the CMK, the AWS account ID of the CMK owner, the + // key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `type:"string"` + + // The size of the volume, in GiBs. + // + // Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 + // for io1 volumes. If you specify a snapshot, the volume size must be equal + // to or larger than the snapshot size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + Size *int64 `type:"integer"` + + // The snapshot from which to create the volume. + SnapshotId *string `type:"string"` + + // The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for + // Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s CreateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumeInput) GoString() string { + return s.String() +} + +// Describes the user or group to be added or removed from the permissions for +// a volume. +type CreateVolumePermission struct { + _ struct{} `type:"structure"` + + // The specific group that is to be added or removed from a volume's list of + // create volume permissions. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The specific AWS account ID that is to be added or removed from a volume's + // list of create volume permissions. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s CreateVolumePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermission) GoString() string { + return s.String() +} + +// Describes modifications to the permissions for a volume. +type CreateVolumePermissionModifications struct { + _ struct{} `type:"structure"` + + // Adds a specific AWS account ID or group to a volume's list of create volume + // permissions. + Add []*CreateVolumePermission `locationNameList:"item" type:"list"` + + // Removes a specific AWS account ID or group from a volume's list of create + // volume permissions. + Remove []*CreateVolumePermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateVolumePermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermissionModifications) GoString() string { + return s.String() +} + +type CreateVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy to attach to the endpoint that controls access to the service. The + // policy must be in valid JSON format. If this parameter is not specified, + // we attach a default policy that allows full access to the service. + PolicyDocument *string `type:"string"` + + // One or more route table IDs. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` + + // The AWS service name, in the form com.amazonaws.region.service. To get a + // list of available services, use the DescribeVpcEndpointServices request. + ServiceName *string `type:"string" required:"true"` + + // The ID of the VPC in which the endpoint will be used. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointInput) GoString() string { + return s.String() +} + +type CreateVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the endpoint. + VpcEndpoint *VpcEndpoint `locationName:"vpcEndpoint" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointOutput) GoString() string { + return s.String() +} + +type CreateVpcInput struct { + _ struct{} `type:"structure"` + + // The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The supported tenancy options for instances launched into the VPC. A value + // of default means that instances can be launched with any tenancy; a value + // of dedicated means all instances launched into the VPC are launched as dedicated + // tenancy instances regardless of the tenancy assigned to the instance at launch. + // Dedicated tenancy instances run on single-tenant hardware. + // + // Important: The host value cannot be used with this parameter. Use the default + // or dedicated values only. + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s CreateVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcInput) GoString() string { + return s.String() +} + +type CreateVpcOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC. + Vpc *Vpc `locationName:"vpc" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcOutput) GoString() string { + return s.String() +} + +type CreateVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The AWS account ID of the owner of the peer VPC. + // + // Default: Your AWS account ID + PeerOwnerId *string `locationName:"peerOwnerId" type:"string"` + + // The ID of the VPC with which you are creating the VPC peering connection. + PeerVpcId *string `locationName:"peerVpcId" type:"string"` + + // The ID of the requester VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type CreateVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type CreateVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the VPN connection requires static routes. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. + // + // Default: false + Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` + + // The type of VPN connection (ipsec.1). + Type *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionInput) GoString() string { + return s.String() +} + +type CreateVpnConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionOutput) GoString() string { + return s.String() +} + +type CreateVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteInput) GoString() string { + return s.String() +} + +type CreateVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +type CreateVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the virtual private gateway. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of VPN connection this virtual private gateway supports. + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayInput) GoString() string { + return s.String() +} + +type CreateVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the virtual private gateway. + VpnGateway *VpnGateway `locationName:"vpnGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a customer gateway. +type CustomerGateway struct { + _ struct{} `type:"structure"` + + // The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number + // (ASN). + BgpAsn *string `locationName:"bgpAsn" type:"string"` + + // The ID of the customer gateway. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The Internet-routable IP address of the customer gateway's outside interface. + IpAddress *string `locationName:"ipAddress" type:"string"` + + // The current state of the customer gateway (pending | available | deleting + // | deleted). + State *string `locationName:"state" type:"string"` + + // Any tags assigned to the customer gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the customer gateway supports (ipsec.1). + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s CustomerGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerGateway) GoString() string { + return s.String() +} + +type DeleteCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayInput) GoString() string { + return s.String() +} + +type DeleteCustomerGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayOutput) GoString() string { + return s.String() +} + +type DeleteDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsInput) GoString() string { + return s.String() +} + +type DeleteDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsOutput) GoString() string { + return s.String() +} + +type DeleteFlowLogsInput struct { + _ struct{} `type:"structure"` + + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsInput) GoString() string { + return s.String() +} + +type DeleteFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs that could not be deleted successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsOutput) GoString() string { + return s.String() +} + +type DeleteInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayInput) GoString() string { + return s.String() +} + +type DeleteInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayOutput) GoString() string { + return s.String() +} + +type DeleteKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the key pair. + KeyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairInput) GoString() string { + return s.String() +} + +type DeleteKeyPairOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairOutput) GoString() string { + return s.String() +} + +type DeleteNatGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the NAT gateway. + NatGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayInput) GoString() string { + return s.String() +} + +type DeleteNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` +} + +// String returns the string representation +func (s DeleteNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayOutput) GoString() string { + return s.String() +} + +type DeleteNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the rule is an egress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // The rule number of the entry to delete. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryInput) GoString() string { + return s.String() +} + +type DeleteNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type DeleteNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclInput) GoString() string { + return s.String() +} + +type DeleteNetworkAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclOutput) GoString() string { + return s.String() +} + +type DeleteNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceInput) GoString() string { + return s.String() +} + +type DeleteNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type DeletePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupInput) GoString() string { + return s.String() +} + +type DeletePlacementGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupOutput) GoString() string { + return s.String() +} + +type DeleteRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR range for the route. The value you specify must match the CIDR for + // the route exactly. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteInput) GoString() string { + return s.String() +} + +type DeleteRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteOutput) GoString() string { + return s.String() +} + +type DeleteRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableInput) GoString() string { + return s.String() +} + +type DeleteRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableOutput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You can specify + // either the security group name or the security group ID. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSpotDatafeedSubscription. +type DeleteSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteSubnetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the subnet. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetInput) GoString() string { + return s.String() +} + +type DeleteSubnetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the resource. For example, ami-1a2b3c4d. You can specify more than + // one resource ID. + Resources []*string `locationName:"resourceId" type:"list" required:"true"` + + // One or more tags to delete. If you omit the value parameter, we delete the + // tag regardless of its value. If you specify this parameter with an empty + // string as the value, we delete the key only if its value is an empty string. + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DeleteVolumeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeInput) GoString() string { + return s.String() +} + +type DeleteVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeOutput) GoString() string { + return s.String() +} + +type DeleteVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsInput) GoString() string { + return s.String() +} + +type DeleteVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // Information about the endpoints that were not successfully deleted. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsOutput) GoString() string { + return s.String() +} + +type DeleteVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcInput) GoString() string { + return s.String() +} + +type DeleteVpcOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcOutput) GoString() string { + return s.String() +} + +type DeleteVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type DeleteVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionInput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionOutput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteInput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +type DeleteVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayInput) GoString() string { + return s.String() +} + +type DeleteVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayOutput) GoString() string { + return s.String() +} + +type DeregisterImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageInput) GoString() string { + return s.String() +} + +type DeregisterImageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageOutput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` + + // One or more account attribute names. + AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more account attributes. + AccountAttributes []*AccountAttribute `locationName:"accountAttributeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +type DescribeAddressesInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] One or more allocation IDs. + // + // Default: Describes all your Elastic IP addresses. + AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // allocation-id - [EC2-VPC] The allocation ID for the address. + // + // association-id - [EC2-VPC] The association ID for the address. + // + // domain - Indicates whether the address is for use in EC2-Classic (standard) + // or in a VPC (vpc). + // + // instance-id - The ID of the instance the address is associated with, if + // any. + // + // network-interface-id - [EC2-VPC] The ID of the network interface that + // the address is associated with, if any. + // + // network-interface-owner-id - The AWS account ID of the owner. + // + // private-ip-address - [EC2-VPC] The private IP address associated with + // the Elastic IP address. + // + // public-ip - The Elastic IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // [EC2-Classic] One or more Elastic IP addresses. + // + // Default: Describes all your Elastic IP addresses. + PublicIps []*string `locationName:"PublicIp" locationNameList:"PublicIp" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesInput) GoString() string { + return s.String() +} + +type DescribeAddressesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Elastic IP addresses. + Addresses []*Address `locationName:"addressesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesOutput) GoString() string { + return s.String() +} + +type DescribeAvailabilityZonesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // message - Information about the Availability Zone. + // + // region-name - The name of the region for the Availability Zone (for example, + // us-east-1). + // + // state - The state of the Availability Zone (available | information | + // impaired | unavailable). + // + // zone-name - The name of the Availability Zone (for example, us-east-1a). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more Availability Zones. + ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesInput) GoString() string { + return s.String() +} + +type DescribeAvailabilityZonesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Availability Zones. + AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesOutput) GoString() string { + return s.String() +} + +type DescribeBundleTasksInput struct { + _ struct{} `type:"structure"` + + // One or more bundle task IDs. + // + // Default: Describes all your bundle tasks. + BundleIds []*string `locationName:"BundleId" locationNameList:"BundleId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bundle-id - The ID of the bundle task. + // + // error-code - If the task failed, the error code returned. + // + // error-message - If the task failed, the error message returned. + // + // instance-id - The ID of the instance. + // + // progress - The level of task completion, as a percentage (for example, + // 20%). + // + // s3-bucket - The Amazon S3 bucket to store the AMI. + // + // s3-prefix - The beginning of the AMI name. + // + // start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z). + // + // state - The state of the task (pending | waiting-for-shutdown | bundling + // | storing | cancelling | complete | failed). + // + // update-time - The time of the most recent update for the task. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksInput) GoString() string { + return s.String() +} + +type DescribeBundleTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more bundle tasks. + BundleTasks []*BundleTask `locationName:"bundleInstanceTasksSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksOutput) GoString() string { + return s.String() +} + +type DescribeClassicLinkInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-id - The ID of a VPC security group that's associated with the instance. + // + // instance-id - The ID of the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC that the instance is linked to. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesInput) GoString() string { + return s.String() +} + +type DescribeClassicLinkInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more linked EC2-Classic instances. + Instances []*ClassicLinkInstance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesOutput) GoString() string { + return s.String() +} + +type DescribeConversionTasksInput struct { + _ struct{} `type:"structure"` + + // One or more conversion task IDs. + ConversionTaskIds []*string `locationName:"conversionTaskId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeConversionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksInput) GoString() string { + return s.String() +} + +type DescribeConversionTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion tasks. + ConversionTasks []*ConversionTask `locationName:"conversionTasks" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeConversionTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksOutput) GoString() string { + return s.String() +} + +type DescribeCustomerGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more customer gateway IDs. + // + // Default: Describes all your customer gateways. + CustomerGatewayIds []*string `locationName:"CustomerGatewayId" locationNameList:"CustomerGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous + // System Number (ASN). + // + // customer-gateway-id - The ID of the customer gateway. + // + // ip-address - The IP address of the customer gateway's Internet-routable + // external interface. + // + // state - The state of the customer gateway (pending | available | deleting + // | deleted). + // + // type - The type of customer gateway. Currently, the only supported type + // is ipsec.1. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysInput) GoString() string { + return s.String() +} + +type DescribeCustomerGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more customer gateways. + CustomerGateways []*CustomerGateway `locationName:"customerGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The IDs of one or more DHCP options sets. + // + // Default: Describes all your DHCP options sets. + DhcpOptionsIds []*string `locationName:"DhcpOptionsId" locationNameList:"DhcpOptionsId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // key - The key for one of the options (for example, domain-name). + // + // value - The value for one of the options. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsInput) GoString() string { + return s.String() +} + +type DescribeDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more DHCP options sets. + DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsOutput) GoString() string { + return s.String() +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // One or more export task IDs. + ExportTaskIds []*string `locationName:"exportTaskId" locationNameList:"ExportTaskId" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export tasks. + ExportTasks []*ExportTask `locationName:"exportTaskSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +type DescribeFlowLogsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). + // + // flow-log-id - The ID of the flow log. + // + // log-group-name - The name of the log group. + // + // resource-id - The ID of the VPC, subnet, or network interface. + // + // traffic-type - The type of traffic (ACCEPT | REJECT | ALL) + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // NextToken value. This value can be between 5 and 1000; if MaxResults is given + // a value larger than 1000, only 1000 results are returned. You cannot specify + // this parameter and the flow log IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsInput) GoString() string { + return s.String() +} + +type DescribeFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs. + FlowLogs []*FlowLog `locationName:"flowLogSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsOutput) GoString() string { + return s.String() +} + +type DescribeHostsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // instance-type - The instance type size that the Dedicated host is configured + // to support. + // + // auto-placement - Whether auto-placement is enabled or disabled (on | off). + // + // host-reservation-id - The ID of the reservation associated with this host. + // + // client-token - The idempotency token you provided when you launched the + // instance + // + // state- The allocation state of the Dedicated host (available | under-assessment + // | permanent-failure | released | released-permanent-failure). + // + // availability-zone - The Availability Zone of the host. + Filter []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The IDs of the Dedicated hosts. The IDs are used for targeted instance launches. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500; if maxResults is given + // a larger value than 500, you will receive an error. You cannot specify this + // parameter and the host IDs parameter in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsInput) GoString() string { + return s.String() +} + +type DescribeHostsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Dedicated hosts. + Hosts []*Host `locationName:"hostSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsOutput) GoString() string { + return s.String() +} + +type DescribeIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource. + Resource *string `type:"string"` +} + +// String returns the string representation +func (s DescribeIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatInput) GoString() string { + return s.String() +} + +type DescribeIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about the ID format for the resource. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatOutput) GoString() string { + return s.String() +} + +type DescribeImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AMI attribute. + // + // Note: Depending on your account privileges, the blockDeviceMapping attribute + // may return a Client.AuthFailure error. If this happens, use DescribeImages + // to get information about the block device mapping for the AMI. + Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeInput) GoString() string { + return s.String() +} + +// Describes an image attribute. +type DescribeImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // A description for the AMI. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // One or more launch permissions. + LaunchPermissions []*LaunchPermission `locationName:"launchPermission" locationNameList:"item" type:"list"` + + // One or more product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The value to use for a resource attribute. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` +} + +// String returns the string representation +func (s DescribeImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeOutput) GoString() string { + return s.String() +} + +type DescribeImagesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Scopes the images by users with explicit launch permissions. Specify an AWS + // account ID, self (the sender of the request), or all (public AMIs). + ExecutableUsers []*string `locationName:"ExecutableBy" locationNameList:"ExecutableBy" type:"list"` + + // One or more filters. + // + // architecture - The image architecture (i386 | x86_64). + // + // block-device-mapping.delete-on-termination - A Boolean value that indicates + // whether the Amazon EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh). + // + // block-device-mapping.snapshot-id - The ID of the snapshot used for the + // EBS volume. + // + // block-device-mapping.volume-size - The volume size of the EBS volume, + // in GiB. + // + // block-device-mapping.volume-type - The volume type of the EBS volume (gp2 + // | standard | io1). + // + // description - The description of the image (provided during image creation). + // + // hypervisor - The hypervisor type (ovm | xen). + // + // image-id - The ID of the image. + // + // image-type - The image type (machine | kernel | ramdisk). + // + // is-public - A Boolean that indicates whether the image is public. + // + // kernel-id - The kernel ID. + // + // manifest-location - The location of the image manifest. + // + // name - The name of the AMI (provided during image creation). + // + // owner-alias - The AWS account alias (for example, amazon). + // + // owner-id - The AWS account ID of the image owner. + // + // platform - The platform. To only list Windows-based AMIs, use windows. + // + // product-code - The product code. + // + // product-code.type - The type of the product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // root-device-name - The name of the root device volume (for example, /dev/sda1). + // + // root-device-type - The type of the root device volume (ebs | instance-store). + // + // state - The state of the image (available | pending | failed). + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - The message for the state change. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // virtualization-type - The virtualization type (paravirtual | hvm). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more image IDs. + // + // Default: Describes all images available to you. + ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` + + // Filters the images by the owner. Specify an AWS account ID, amazon (owner + // is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the + // sender of the request). Omitting this option returns all images for which + // you have launch permissions, regardless of ownership. + Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesInput) GoString() string { + return s.String() +} + +type DescribeImagesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more images. + Images []*Image `locationName:"imagesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesOutput) GoString() string { + return s.String() +} + +type DescribeImportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import image task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single request. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksInput) GoString() string { + return s.String() +} + +type DescribeImportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import image tasks that are currently active or were + // completed or canceled in the previous 7 days. + ImportImageTasks []*ImportImageTask `locationName:"importImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksOutput) GoString() string { + return s.String() +} + +type DescribeImportSnapshotTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import snapshot task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single request. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksInput) GoString() string { + return s.String() +} + +type DescribeImportSnapshotTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import snapshot tasks that are currently active or + // were completed or canceled in the previous 7 days. + ImportSnapshotTasks []*ImportSnapshotTask `locationName:"importSnapshotTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksOutput) GoString() string { + return s.String() +} + +type DescribeInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The instance attribute. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeInput) GoString() string { + return s.String() +} + +// Describes an instance attribute. +type DescribeInstanceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The block device mapping of the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance through the Amazon + // EC2 console, CLI, or API; otherwise, you can. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Indicates whether the instance is optimized for EBS I/O. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // The security groups associated with the instance. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // The instance type. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` + + // The value to use for a resource attribute. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // The Base64-encoded MIME user data. + UserData *AttributeValue `locationName:"userData" type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeOutput) GoString() string { + return s.String() +} + +type DescribeInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone of the instance. + // + // event.code - The code for the scheduled event (instance-reboot | system-reboot + // | system-maintenance | instance-retirement | instance-stop). + // + // event.description - A description of the event. + // + // event.not-after - The latest end time for the scheduled event (for example, + // 2014-09-15T17:15:20.000Z). + // + // event.not-before - The earliest start time for the scheduled event (for + // example, 2014-09-15T17:15:20.000Z). + // + // instance-state-code - The code for the instance state, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | shutting-down + // | terminated | stopping | stopped). + // + // instance-status.reachability - Filters on instance status where the name + // is reachability (passed | failed | initializing | insufficient-data). + // + // instance-status.status - The status of the instance (ok | impaired | initializing + // | insufficient-data | not-applicable). + // + // system-status.reachability - Filters on system status where the name is + // reachability (passed | failed | initializing | insufficient-data). + // + // system-status.status - The system status of the instance (ok | impaired + // | initializing | insufficient-data | not-applicable). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // When true, includes the health status for all instances. When false, includes + // the health status for running instances only. + // + // Default: false + IncludeAllInstances *bool `locationName:"includeAllInstances" type:"boolean"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + // + // Constraints: Maximum 100 explicitly specified instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusInput) GoString() string { + return s.String() +} + +type DescribeInstanceStatusOutput struct { + _ struct{} `type:"structure"` + + // One or more instance status descriptions. + InstanceStatuses []*InstanceStatus `locationName:"instanceStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusOutput) GoString() string { + return s.String() +} + +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // affinity - The affinity setting for an instance running on a Dedicated + // host (default | host). + // + // architecture - The instance architecture (i386 | x86_64). + // + // availability-zone - The Availability Zone of the instance. + // + // block-device-mapping.attach-time - The attach time for an EBS volume mapped + // to the instance, for example, 2010-09-15T17:15:20.000Z. + // + // block-device-mapping.delete-on-termination - A Boolean that indicates + // whether the EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh or xvdh). + // + // block-device-mapping.status - The status for the EBS volume (attaching + // | attached | detaching | detached). + // + // block-device-mapping.volume-id - The volume ID of the EBS volume. + // + // client-token - The idempotency token you provided when you launched the + // instance. + // + // dns-name - The public DNS name of the instance. + // + // group-id - The ID of the security group for the instance. EC2-Classic + // only. + // + // group-name - The name of the security group for the instance. EC2-Classic + // only. + // + // host-Id - The ID of the Dedicated host on which the instance is running, + // if applicable. + // + // hypervisor - The hypervisor type of the instance (ovm | xen). + // + // iam-instance-profile.arn - The instance profile associated with the instance. + // Specified as an ARN. + // + // image-id - The ID of the image used to launch the instance. + // + // instance-id - The ID of the instance. + // + // instance-lifecycle - Indicates whether this is a Spot Instance (spot). + // + // instance-state-code - The state of the instance, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are: + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | shutting-down + // | terminated | stopping | stopped). + // + // instance-type - The type of instance (for example, t2.micro). + // + // instance.group-id - The ID of the security group for the instance. + // + // instance.group-name - The name of the security group for the instance. + // + // ip-address - The public IP address of the instance. + // + // kernel-id - The kernel ID. + // + // key-name - The name of the key pair used when the instance was launched. + // + // launch-index - When launching multiple instances, this is the index for + // the instance in the launch group (for example, 0, 1, 2, and so on). + // + // launch-time - The time when the instance was launched. + // + // monitoring-state - Indicates whether monitoring is enabled for the instance + // (disabled | enabled). + // + // owner-id - The AWS account ID of the instance owner. + // + // placement-group-name - The name of the placement group for the instance. + // + // platform - The platform. Use windows if you have Windows instances; otherwise, + // leave blank. + // + // private-dns-name - The private DNS name of the instance. + // + // private-ip-address - The private IP address of the instance. + // + // product-code - The product code associated with the AMI used to launch + // the instance. + // + // product-code.type - The type of product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // reason - The reason for the current state of the instance (for example, + // shows "User Initiated [date]" when you stop or terminate the instance). Similar + // to the state-reason-code filter. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // reservation-id - The ID of the instance's reservation. A reservation ID + // is created any time you launch an instance. A reservation ID has a one-to-one + // relationship with an instance launch request, but can be associated with + // more than one instance if you launch multiple instances using the same launch + // request. For example, if you launch one instance, you'll get one reservation + // ID. If you launch ten instances using the same launch request, you'll also + // get one reservation ID. + // + // root-device-name - The name of the root device for the instance (for example, + // /dev/sda1 or /dev/xvda). + // + // root-device-type - The type of root device that the instance uses (ebs + // | instance-store). + // + // source-dest-check - Indicates whether the instance performs source/destination + // checking. A value of true means that checking is enabled, and false means + // checking is disabled. The value must be false for the instance to perform + // network address translation (NAT) in your VPC. + // + // spot-instance-request-id - The ID of the Spot instance request. + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - A message that describes the state change. + // + // subnet-id - The ID of the subnet for the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource, + // where tag:key is the tag's key. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // tenancy - The tenancy of an instance (dedicated | default | host). + // + // virtualization-type - The virtualization type of the instance (paravirtual + // | hvm). + // + // vpc-id - The ID of the VPC that the instance is running in. + // + // network-interface.description - The description of the network interface. + // + // network-interface.subnet-id - The ID of the subnet for the network interface. + // + // network-interface.vpc-id - The ID of the VPC for the network interface. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.owner-id - The ID of the owner of the network interface. + // + // network-interface.availability-zone - The Availability Zone for the network + // interface. + // + // network-interface.requester-id - The requester ID for the network interface. + // + // network-interface.requester-managed - Indicates whether the network interface + // is being managed by AWS. + // + // network-interface.status - The status of the network interface (available) + // | in-use). + // + // network-interface.mac-address - The MAC address of the network interface. + // + // network-interface.private-dns-name - The private DNS name of the network + // interface. + // + // network-interface.source-dest-check - Whether the network interface performs + // source/destination checking. A value of true means checking is enabled, and + // false means checking is disabled. The value must be false for the network + // interface to perform network address translation (NAT) in your VPC. + // + // network-interface.group-id - The ID of a security group associated with + // the network interface. + // + // network-interface.group-name - The name of a security group associated + // with the network interface. + // + // network-interface.attachment.attachment-id - The ID of the interface attachment. + // + // network-interface.attachment.instance-id - The ID of the instance to which + // the network interface is attached. + // + // network-interface.attachment.instance-owner-id - The owner ID of the instance + // to which the network interface is attached. + // + // network-interface.addresses.private-ip-address - The private IP address + // associated with the network interface. + // + // network-interface.attachment.device-index - The device index to which + // the network interface is attached. + // + // network-interface.attachment.status - The status of the attachment (attaching + // | attached | detaching | detached). + // + // network-interface.attachment.attach-time - The time that the network interface + // was attached to an instance. + // + // network-interface.attachment.delete-on-termination - Specifies whether + // the attachment is deleted when an instance is terminated. + // + // network-interface.addresses.primary - Specifies whether the IP address + // of the network interface is the primary private IP address. + // + // network-interface.addresses.association.public-ip - The ID of the association + // of an Elastic IP address with a network interface. + // + // network-interface.addresses.association.ip-owner-id - The owner ID of + // the private IP address associated with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to request the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Zero or more reservations. + Reservations []*Reservation `locationName:"reservationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +type DescribeInternetGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (available). Present only if a VPC is attached. + // + // attachment.vpc-id - The ID of an attached VPC. + // + // internet-gateway-id - The ID of the Internet gateway. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Internet gateway IDs. + // + // Default: Describes all your Internet gateways. + InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysInput) GoString() string { + return s.String() +} + +type DescribeInternetGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Internet gateways. + InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeKeyPairsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // fingerprint - The fingerprint of the key pair. + // + // key-name - The name of the key pair. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more key pair names. + // + // Default: Describes all your key pairs. + KeyNames []*string `locationName:"KeyName" locationNameList:"KeyName" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsInput) GoString() string { + return s.String() +} + +type DescribeKeyPairsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more key pairs. + KeyPairs []*KeyPairInfo `locationName:"keySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsOutput) GoString() string { + return s.String() +} + +type DescribeMovingAddressesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic). + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value outside of this range, an error is returned. + // + // Default: If no value is provided, the default is 1000. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more Elastic IP addresses. + PublicIps []*string `locationName:"publicIp" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeMovingAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesInput) GoString() string { + return s.String() +} + +type DescribeMovingAddressesOutput struct { + _ struct{} `type:"structure"` + + // The status for each Elastic IP address. + MovingAddressStatuses []*MovingAddressStatus `locationName:"movingAddressStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeMovingAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesOutput) GoString() string { + return s.String() +} + +type DescribeNatGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // nat-gateway-id - The ID of the NAT gateway. + // + // state - The state of the NAT gateway (pending | failed | available | deleting + // | deleted). + // + // subnet-id - The ID of the subnet in which the NAT gateway resides. + // + // vpc-id - The ID of the VPC in which the NAT gateway resides. + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value specified is greater than 1000, we return only + // 1000 items. + MaxResults *int64 `type:"integer"` + + // One or more NAT gateway IDs. + NatGatewayIds []*string `locationName:"NatGatewayId" locationNameList:"item" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysInput) GoString() string { + return s.String() +} + +type DescribeNatGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the NAT gateways. + NatGateways []*NatGateway `locationName:"natGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeNetworkAclsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.association-id - The ID of an association ID for the ACL. + // + // association.network-acl-id - The ID of the network ACL involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // default - Indicates whether the ACL is the default network ACL for the + // VPC. + // + // entry.cidr - The CIDR range specified in the entry. + // + // entry.egress - Indicates whether the entry applies to egress traffic. + // + // entry.icmp.code - The ICMP code specified in the entry, if any. + // + // entry.icmp.type - The ICMP type specified in the entry, if any. + // + // entry.port-range.from - The start of the port range specified in the entry. + // + // entry.port-range.to - The end of the port range specified in the entry. + // + // entry.protocol - The protocol specified in the entry (tcp | udp | icmp + // or a protocol number). + // + // entry.rule-action - Allows or denies the matching traffic (allow | deny). + // + // entry.rule-number - The number of an entry (in other words, rule) in the + // ACL's set of entries. + // + // network-acl-id - The ID of the network ACL. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network ACL. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more network ACL IDs. + // + // Default: Describes all your network ACLs. + NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkAclsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsInput) GoString() string { + return s.String() +} + +type DescribeNetworkAclsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network ACLs. + NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkAclsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsOutput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute of the network interface. + Attribute *string `locationName:"attribute" type:"string" enum:"NetworkInterfaceAttribute"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The attachment (if any) of the network interface. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description of the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The security groups associated with the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Indicates whether source/destination checking is enabled. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfacesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // addresses.private-ip-address - The private IP addresses associated with + // the network interface. + // + // addresses.primary - Whether the private IP address is the primary IP address + // associated with the network interface. + // + // addresses.association.public-ip - The association ID returned when the + // network interface was associated with the Elastic IP address. + // + // addresses.association.owner-id - The owner ID of the addresses associated + // with the network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.public-dns-name - The public DNS name for the network interface. + // + // attachment.attachment-id - The ID of the interface attachment. + // + // attachment.attach.time - The time that the network interface was attached + // to an instance. + // + // attachment.delete-on-termination - Indicates whether the attachment is + // deleted when an instance is terminated. + // + // attachment.device-index - The device index to which the network interface + // is attached. + // + // attachment.instance-id - The ID of the instance to which the network interface + // is attached. + // + // attachment.instance-owner-id - The owner ID of the instance to which the + // network interface is attached. + // + // attachment.nat-gateway-id - The ID of the NAT gateway to which the network + // interface is attached. + // + // attachment.status - The status of the attachment (attaching | attached + // | detaching | detached). + // + // availability-zone - The Availability Zone of the network interface. + // + // description - The description of the network interface. + // + // group-id - The ID of a security group associated with the network interface. + // + // group-name - The name of a security group associated with the network + // interface. + // + // mac-address - The MAC address of the network interface. + // + // network-interface-id - The ID of the network interface. + // + // owner-id - The AWS account ID of the network interface owner. + // + // private-ip-address - The private IP address or addresses of the network + // interface. + // + // private-dns-name - The private DNS name of the network interface. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // requester-managed - Indicates whether the network interface is being managed + // by an AWS service (for example, AWS Management Console, Auto Scaling, and + // so on). + // + // source-desk-check - Indicates whether the network interface performs source/destination + // checking. A value of true means checking is enabled, and false means checking + // is disabled. The value must be false for the network interface to perform + // network address translation (NAT) in your VPC. + // + // status - The status of the network interface. If the network interface + // is not attached to an instance, the status is available; if a network interface + // is attached to an instance the status is in-use. + // + // subnet-id - The ID of the subnet for the network interface. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network interface. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // One or more network interface IDs. + // + // Default: Describes all your network interfaces. + NetworkInterfaceIds []*string `locationName:"NetworkInterfaceId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesInput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfacesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network interfaces. + NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesOutput) GoString() string { + return s.String() +} + +type DescribePlacementGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-name - The name of the placement group. + // + // state - The state of the placement group (pending | available | deleting + // | deleted). + // + // strategy - The strategy of the placement group (cluster). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more placement group names. + // + // Default: Describes all your placement groups, or only those otherwise specified. + GroupNames []*string `locationName:"groupName" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsInput) GoString() string { + return s.String() +} + +type DescribePlacementGroupsOutput struct { + _ struct{} `type:"structure"` + + // One or more placement groups. + PlacementGroups []*PlacementGroup `locationName:"placementGroupSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsOutput) GoString() string { + return s.String() +} + +type DescribePrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // prefix-list-id: The ID of a prefix list. + // + // prefix-list-name: The name of a prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value specified is greater than 1000, we return only + // 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsInput) GoString() string { + return s.String() +} + +type DescribePrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // All available prefix lists. + PrefixLists []*PrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsOutput) GoString() string { + return s.String() +} + +type DescribeRegionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com). + // + // region-name - The name of the region (for example, us-east-1). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more regions. + RegionNames []*string `locationName:"RegionName" locationNameList:"RegionName" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsInput) GoString() string { + return s.String() +} + +type DescribeRegionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more regions. + Regions []*Region `locationName:"regionInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (one year or three years), + // in seconds (31536000 | 94608000). + // + // end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type that is covered by the reservation. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)). + // + // reserved-instances-id - The ID of the Reserved Instance. + // + // start - The time at which the Reserved Instance purchase request was placed + // (for example, 2014-08-07T11:54:42.000Z). + // + // state - The state of the Reserved Instance (payment-pending | active | + // payment-failed | retired). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // One or more Reserved Instance IDs. + // + // Default: Describes all your Reserved Instances, or only those otherwise + // specified. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesListingsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // reserved-instances-id - The ID of the Reserved Instances. + // + // reserved-instances-listing-id - The ID of the Reserved Instances listing. + // + // status - The status of the Reserved Instance listing (pending | active + // | cancelled | closed). + // + // status-message - The reason for the status. + Filters []*Filter `locationName:"filters" locationNameList:"Filter" type:"list"` + + // One or more Reserved Instance IDs. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // One or more Reserved Instance listing IDs. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesListingsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesModificationsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // client-token - The idempotency token for the modification request. + // + // create-date - The time when the modification request was created. + // + // effective-date - The time when the modification becomes effective. + // + // modification-result.reserved-instances-id - The ID for the Reserved Instances + // created as part of the modification request. This ID is only available when + // the status of the modification is fulfilled. + // + // modification-result.target-configuration.availability-zone - The Availability + // Zone for the new Reserved Instances. + // + // modification-result.target-configuration.instance-count - The number + // of new Reserved Instances. + // + // modification-result.target-configuration.instance-type - The instance + // type of the new Reserved Instances. + // + // modification-result.target-configuration.platform - The network platform + // of the new Reserved Instances (EC2-Classic | EC2-VPC). + // + // reserved-instances-id - The ID of the Reserved Instances modified. + // + // reserved-instances-modification-id - The ID of the modification request. + // + // status - The status of the Reserved Instances modification request (processing + // | fulfilled | failed). + // + // status-message - The reason for the status. + // + // update-date - The time when the modification request was last updated. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // IDs for the submitted modification request. + ReservedInstancesModificationIds []*string `locationName:"ReservedInstancesModificationId" locationNameList:"ReservedInstancesModificationId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesModificationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance modification information. + ReservedInstancesModifications []*ReservedInstancesModification `locationName:"reservedInstancesModificationsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (for example, one year + // or three years), in seconds (31536000 | 94608000). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type that is covered by the reservation. + // + // marketplace - Set to true to show only Reserved Instance Marketplace offerings. + // When this filter is not used, which is the default behavior, all offerings + // from both AWS and the Reserved Instance Marketplace are listed. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)) + // + // reserved-instances-offering-id - The Reserved Instances offering ID. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Include Reserved Instance Marketplace offerings in the response. + IncludeMarketplace *bool `type:"boolean"` + + // The tenancy of the instances covered by the reservation. A Reserved Instance + // with a tenancy of dedicated is applied to instances that run in a VPC on + // single-tenant hardware (i.e., Dedicated Instances). + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type that the reservation will cover (for example, m1.small). + // For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The maximum duration (in seconds) to filter when searching for offerings. + // + // Default: 94608000 (3 years) + MaxDuration *int64 `type:"long"` + + // The maximum number of instances to filter when searching for offerings. + // + // Default: 20 + MaxInstanceCount *int64 `type:"integer"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. The maximum is 100. + // + // Default: 100 + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The minimum duration (in seconds) to filter when searching for offerings. + // + // Default: 2592000 (1 month) + MinDuration *int64 `type:"long"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. Instances that include + // (Amazon VPC) in the description are for use with Amazon VPC. + ProductDescription *string `type:"string" enum:"RIProductDescription"` + + // One or more Reserved Instances offering IDs. + ReservedInstancesOfferingIds []*string `locationName:"ReservedInstancesOfferingId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of Reserved Instances offerings. + ReservedInstancesOfferings []*ReservedInstancesOffering `locationName:"reservedInstancesOfferingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of Reserved Instances. + ReservedInstances []*ReservedInstances `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOutput) GoString() string { + return s.String() +} + +type DescribeRouteTablesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.route-table-association-id - The ID of an association ID for + // the route table. + // + // association.route-table-id - The ID of the route table involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // association.main - Indicates whether the route table is the main route + // table for the VPC (true | false). + // + // route-table-id - The ID of the route table. + // + // route.destination-cidr-block - The CIDR range specified in a route in + // the table. + // + // route.destination-prefix-list-id - The ID (prefix) of the AWS service + // specified in a route in the table. + // + // route.gateway-id - The ID of a gateway specified in a route in the table. + // + // route.instance-id - The ID of an instance specified in a route in the + // table. + // + // route.nat-gateway-id - The ID of a NAT gateway. + // + // route.origin - Describes how the route was created. CreateRouteTable indicates + // that the route was automatically created when the route table was created; + // CreateRoute indicates that the route was manually added to the route table; + // EnableVgwRoutePropagation indicates that the route was propagated by route + // propagation. + // + // route.state - The state of a route in the route table (active | blackhole). + // The blackhole state indicates that the route's target isn't available (for + // example, the specified gateway isn't attached to the VPC, the specified NAT + // instance has been terminated, and so on). + // + // route.vpc-peering-connection-id - The ID of a VPC peering connection specified + // in a route in the table. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the route table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more route table IDs. + // + // Default: Describes all your route tables. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesInput) GoString() string { + return s.String() +} + +type DescribeRouteTablesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more route tables. + RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The time period for the first schedule to start. + FirstSlotStartTimeRange *SlotDateTimeRangeRequest `type:"structure" required:"true"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The maximum available duration, in hours. This value must be greater than + // MinSlotDurationInHours and less than 1,720. + MaxSlotDurationInHours *int64 `type:"integer"` + + // The minimum available duration, in hours. The minimum required duration is + // 1,200 hours per year. For example, the minimum daily schedule is 4 hours, + // the minimum weekly schedule is 24 hours, and the minimum monthly schedule + // is 100 hours. + MinSlotDurationInHours *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrenceRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the available Scheduled Instances. + ScheduledInstanceAvailabilitySet []*ScheduledInstanceAvailability `locationName:"scheduledInstanceAvailabilitySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeScheduledInstances. +type DescribeScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // One or more Scheduled Instance IDs. + ScheduledInstanceIds []*string `locationName:"ScheduledInstanceId" locationNameList:"ScheduledInstanceId" type:"list"` + + // The time period for the first schedule to start. + SlotStartTimeRange *SlotStartTimeRangeRequest `type:"structure"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeScheduledInstances. +type DescribeScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesOutput) GoString() string { + return s.String() +} + +type DescribeSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. If using multiple filters for rules, the results include + // security groups for which any combination of rules - not necessarily a single + // rule - match all filters. + // + // description - The description of the security group. + // + // egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service + // to which the security group allows access. + // + // group-id - The ID of the security group. + // + // group-name - The name of the security group. + // + // ip-permission.cidr - A CIDR range that has been granted permission. + // + // ip-permission.from-port - The start of port range for the TCP and UDP + // protocols, or an ICMP type number. + // + // ip-permission.group-id - The ID of a security group that has been granted + // permission. + // + // ip-permission.group-name - The name of a security group that has been + // granted permission. + // + // ip-permission.protocol - The IP protocol for the permission (tcp | udp + // | icmp or a protocol number). + // + // ip-permission.to-port - The end of port range for the TCP and UDP protocols, + // or an ICMP code. + // + // ip-permission.user-id - The ID of an AWS account that has been granted + // permission. + // + // owner-id - The AWS account ID of the owner of the security group. + // + // tag-key - The key of a tag assigned to the security group. + // + // tag-value - The value of a tag assigned to the security group. + // + // vpc-id - The ID of the VPC specified when the security group was created. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more security group IDs. Required for security groups in a nondefault + // VPC. + // + // Default: Describes all your security groups. + GroupIds []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // [EC2-Classic and default VPC only] One or more security group names. You + // can specify either the security group name or the security group ID. For + // security groups in a nondefault VPC, use the group-name filter to describe + // security groups by name. + // + // Default: Describes all your security groups. + GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsInput) GoString() string { + return s.String() +} + +type DescribeSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more security groups. + SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute you would like to view. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeInput) GoString() string { + return s.String() +} + +type DescribeSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // A list of permissions for creating volumes from the snapshot. + CreateVolumePermissions []*CreateVolumePermission `locationName:"createVolumePermission" locationNameList:"item" type:"list"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the EBS snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // description - A description of the snapshot. + // + // owner-alias - The AWS account alias (for example, amazon) that owns the + // snapshot. + // + // owner-id - The ID of the AWS account that owns the snapshot. + // + // progress - The progress of the snapshot, as a percentage (for example, + // 80%). + // + // snapshot-id - The snapshot ID. + // + // start-time - The time stamp when the snapshot was initiated. + // + // status - The status of the snapshot (pending | completed | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // volume-id - The ID of the volume the snapshot is for. + // + // volume-size - The size of the volume, in GiB. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of snapshot results returned by DescribeSnapshots in paginated + // output. When this parameter is used, DescribeSnapshots only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeSnapshots + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeSnapshots returns + // all results. You cannot specify this parameter and the snapshot IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeSnapshots + // request where MaxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the NextToken value. This value is null when there are no more results + // to return. + NextToken *string `type:"string"` + + // Returns the snapshots owned by the specified owner. Multiple owners can be + // specified. + OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` + + // One or more AWS accounts IDs that can create volumes from the snapshot. + RestorableByUserIds []*string `locationName:"RestorableBy" type:"list"` + + // One or more snapshot IDs. + // + // Default: Describes snapshots for which you have launch permissions. + SnapshotIds []*string `locationName:"SnapshotId" locationNameList:"SnapshotId" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeSnapshots request. When + // the results of a DescribeSnapshots request exceed MaxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the snapshots. + Snapshots []*Snapshot `locationName:"snapshotSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesOutput struct { + _ struct{} `type:"structure"` + + // The running instances. Note that this list is refreshed periodically and + // might be out of date. + ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of events to describe. By default, all events are described. + EventType *string `locationName:"eventType" type:"string" enum:"EventType"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the events in the history of the Spot fleet request. + HistoryRecords []*HistoryRecord `locationName:"historyRecordSet" locationNameList:"item" type:"list" required:"true"` + + // The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // All records up to this time were retrieved. + // + // If nextToken indicates that there are more results, this value is not present. + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the configuration of your Spot fleet. + SpotFleetRequestConfigs []*SpotFleetRequestConfig `locationName:"spotFleetRequestConfigSet" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone-group - The Availability Zone group. + // + // create-time - The time stamp when the Spot instance request was created. + // + // fault-code - The fault code related to the request. + // + // fault-message - The fault message related to the request. + // + // instance-id - The ID of the instance that fulfilled the request. + // + // launch-group - The Spot instance launch group. + // + // launch.block-device-mapping.delete-on-termination - Indicates whether + // the Amazon EBS volume is deleted on instance termination. + // + // launch.block-device-mapping.device-name - The device name for the Amazon + // EBS volume (for example, /dev/sdh). + // + // launch.block-device-mapping.snapshot-id - The ID of the snapshot used + // for the Amazon EBS volume. + // + // launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, + // in GiB. + // + // launch.block-device-mapping.volume-type - The type of the Amazon EBS volume + // (gp2 | standard | io1). + // + // launch.group-id - The security group for the instance. + // + // launch.image-id - The ID of the AMI. + // + // launch.instance-type - The type of instance (for example, m3.medium). + // + // launch.kernel-id - The kernel ID. + // + // launch.key-name - The name of the key pair the instance launched with. + // + // launch.monitoring-enabled - Whether monitoring is enabled for the Spot + // instance. + // + // launch.ramdisk-id - The RAM disk ID. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.device-index - The index of the device for the network + // interface attachment on the instance. + // + // network-interface.subnet-id - The ID of the subnet for the instance. + // + // network-interface.description - A description of the network interface. + // + // network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // network-interface.delete-on-termination - Indicates whether the network + // interface is deleted when the instance is terminated. + // + // network-interface.group-id - The ID of the security group associated with + // the network interface. + // + // network-interface.group-name - The name of the security group associated + // with the network interface. + // + // network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. + // + // product-description - The product description associated with the instance + // (Linux/UNIX | Windows). + // + // spot-instance-request-id - The Spot instance request ID. + // + // spot-price - The maximum hourly price for any Spot instance launched to + // fulfill the request. + // + // state - The state of the Spot instance request (open | active | closed + // | cancelled | failed). Spot bid status information can help you track your + // Amazon EC2 Spot instance requests. For more information, see Spot Bid Status + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // status-code - The short code describing the most recent evaluation of + // your Spot instance request. + // + // status-message - The message explaining the status of the Spot instance + // request. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of Spot instance request (one-time | persistent). + // + // launched-availability-zone - The Availability Zone in which the bid is + // launched. + // + // valid-from - The start date of the request. + // + // valid-until - The end date of the request. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryInput struct { + _ struct{} `type:"structure"` + + // Filters the results by the specified Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The date and time, up to the current date, from which to stop retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more filters. + // + // availability-zone - The Availability Zone for which prices should be returned. + // + // instance-type - The type of instance (for example, m3.medium). + // + // product-description - The product description for the Spot price (Linux/UNIX + // | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) + // | Windows (Amazon VPC)). + // + // spot-price - The Spot price. The value must match exactly (or use wildcards; + // greater than or less than comparison is not supported). + // + // timestamp - The timestamp of the Spot price history, in UTC format (for + // example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater + // than or less than comparison is not supported. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Filters the results by the specified instance types. + InstanceTypes []*string `locationName:"InstanceType" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Filters the results by the specified basic product descriptions. + ProductDescriptions []*string `locationName:"ProductDescription" type:"list"` + + // The date and time, up to the past 90 days, from which to start retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The historical Spot prices. + SpotPriceHistory []*SpotPrice `locationName:"spotPriceHistorySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryOutput) GoString() string { + return s.String() +} + +type DescribeSubnetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availabilityZone - The Availability Zone for the subnet. You can also + // use availability-zone as the filter name. + // + // available-ip-address-count - The number of IP addresses in the subnet + // that are available. + // + // cidrBlock - The CIDR block of the subnet. The CIDR block you specify must + // exactly match the subnet's CIDR block for information to be returned for + // the subnet. You can also use cidr or cidr-block as the filter names. + // + // defaultForAz - Indicates whether this is the default subnet for the Availability + // Zone. You can also use default-for-az as the filter name. + // + // state - The state of the subnet (pending | available). + // + // subnet-id - The ID of the subnet. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the subnet. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more subnet IDs. + // + // Default: Describes all your subnets. + SubnetIds []*string `locationName:"SubnetId" locationNameList:"SubnetId" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsInput) GoString() string { + return s.String() +} + +type DescribeSubnetsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more subnets. + Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // key - The tag key. + // + // resource-id - The resource ID. + // + // resource-type - The resource type (customer-gateway | dhcp-options | image + // | instance | internet-gateway | network-acl | network-interface | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpn-connection | vpn-gateway). + // + // value - The tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return.. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of tags. + Tags []*TagDescription `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DescribeVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // The instance attribute. + Attribute *string `type:"string" enum:"VolumeAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeInput) GoString() string { + return s.String() +} + +type DescribeVolumeAttributeOutput struct { + _ struct{} `type:"structure"` + + // The state of autoEnableIO attribute. + AutoEnableIO *AttributeBooleanValue `locationName:"autoEnableIO" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeOutput) GoString() string { + return s.String() +} + +type DescribeVolumeStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // action.code - The action code for the event (for example, enable-volume-io). + // + // action.description - A description of the action. + // + // action.event-id - The event ID associated with the action. + // + // availability-zone - The Availability Zone of the instance. + // + // event.description - A description of the event. + // + // event.event-id - The event ID. + // + // event.event-type - The event type (for io-enabled: passed | failed; for + // io-performance: io-performance:degraded | io-performance:severely-degraded + // | io-performance:stalled). + // + // event.not-after - The latest end time for the event. + // + // event.not-before - The earliest start time for the event. + // + // volume-status.details-name - The cause for volume-status.status (io-enabled + // | io-performance). + // + // volume-status.details-status - The status of volume-status.details-name + // (for io-enabled: passed | failed; for io-performance: normal | degraded | + // severely-degraded | stalled). + // + // volume-status.status - The status of the volume (ok | impaired | warning + // | insufficient-data). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumeStatus in + // paginated output. When this parameter is used, the request only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another request with + // the returned NextToken value. This value can be between 5 and 1000; if MaxResults + // is given a value larger than 1000, only 1000 results are returned. If this + // parameter is not used, then DescribeVolumeStatus returns all results. You + // cannot specify this parameter and the volume IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value to include in a future DescribeVolumeStatus request. + // When the results of the request exceed MaxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `type:"string"` + + // One or more volume IDs. + // + // Default: Describes all your volumes. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusInput) GoString() string { + return s.String() +} + +type DescribeVolumeStatusOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of volumes. + VolumeStatuses []*VolumeStatusItem `locationName:"volumeStatusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusOutput) GoString() string { + return s.String() +} + +type DescribeVolumesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.attach-time - The time stamp when the attachment initiated. + // + // attachment.delete-on-termination - Whether the volume is deleted on instance + // termination. + // + // attachment.device - The device name that is exposed to the instance (for + // example, /dev/sda1). + // + // attachment.instance-id - The ID of the instance the volume is attached + // to. + // + // attachment.status - The attachment state (attaching | attached | detaching + // | detached). + // + // availability-zone - The Availability Zone in which the volume was created. + // + // create-time - The time stamp when the volume was created. + // + // encrypted - The encryption status of the volume. + // + // size - The size of the volume, in GiB. + // + // snapshot-id - The snapshot from which the volume was created. + // + // status - The status of the volume (creating | available | in-use | deleting + // | deleted | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // volume-id - The volume ID. + // + // volume-type - The Amazon EBS volume type. This can be gp2 for General + // Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard + // for Magnetic volumes. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumes in paginated + // output. When this parameter is used, DescribeVolumes only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeVolumes + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeVolumes returns + // all results. You cannot specify this parameter and the volume IDs parameter + // in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The NextToken value returned from a previous paginated DescribeVolumes request + // where MaxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // NextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more volume IDs. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +type DescribeVolumesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeVolumes request. When + // the results of a DescribeVolumes request exceed MaxResults, this value can + // be used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the volumes. + Volumes []*Volume `locationName:"volumeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +type DescribeVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // The VPC attribute. + Attribute *string `type:"string" required:"true" enum:"VpcAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeInput) GoString() string { + return s.String() +} + +type DescribeVpcAttributeOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // this attribute is true, instances in the VPC get DNS hostnames; otherwise, + // they do not. + EnableDnsHostnames *AttributeBooleanValue `locationName:"enableDnsHostnames" type:"structure"` + + // Indicates whether DNS resolution is enabled for the VPC. If this attribute + // is true, the Amazon DNS server resolves DNS hostnames for your instances + // to their corresponding IP addresses; otherwise, it does not. + EnableDnsSupport *AttributeBooleanValue `locationName:"enableDnsSupport" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s DescribeVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeOutput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // One or more VPC IDs. + VpcIds []*string `locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Information about the ClassicLink DNS support status of the VPCs. + Vpcs []*ClassicLinkDnsSupport `locationName:"vpcs" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true + // | false). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPCs for which you want to describe the ClassicLink status. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkInput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // The ClassicLink status of one or more VPCs. + Vpcs []*VpcClassicLink `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkOutput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointServicesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesInput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointServicesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of supported AWS services. + ServiceNames []*string `locationName:"serviceNameSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesOutput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // service-name: The name of the AWS service. + // + // vpc-id: The ID of the VPC in which the endpoint resides. + // + // vpc-endpoint-id: The ID of the endpoint. + // + // vpc-endpoint-state: The state of the endpoint. (pending | available | + // deleting | deleted) + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsInput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the endpoints. + VpcEndpoints []*VpcEndpoint `locationName:"vpcEndpointSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsOutput) GoString() string { + return s.String() +} + +type DescribeVpcPeeringConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // accepter-vpc-info.cidr-block - The CIDR block of the peer VPC. + // + // accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer + // VPC. + // + // accepter-vpc-info.vpc-id - The ID of the peer VPC. + // + // expiration-time - The expiration date and time for the VPC peering connection. + // + // requester-vpc-info.cidr-block - The CIDR block of the requester's VPC. + // + // requester-vpc-info.owner-id - The AWS account ID of the owner of the requester + // VPC. + // + // requester-vpc-info.vpc-id - The ID of the requester VPC. + // + // status-code - The status of the VPC peering connection (pending-acceptance + // | failed | expired | provisioning | active | deleted | rejected). + // + // status-message - A message that provides more information about the status + // of the VPC peering connection, if applicable. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-peering-connection-id - The ID of the VPC peering connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC peering connection IDs. + // + // Default: Describes all your VPC peering connections. + VpcPeeringConnectionIds []*string `locationName:"VpcPeeringConnectionId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsInput) GoString() string { + return s.String() +} + +type DescribeVpcPeeringConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connections. + VpcPeeringConnections []*VpcPeeringConnection `locationName:"vpcPeeringConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) GoString() string { + return s.String() +} + +type DescribeVpcsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // cidr - The CIDR block of the VPC. The CIDR block you specify must exactly + // match the VPC's CIDR block for information to be returned for the VPC. Must + // contain the slash followed by one or two digits (for example, /28). + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // isDefault - Indicates whether the VPC is the default VPC. + // + // state - The state of the VPC (pending | available). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC IDs. + // + // Default: Describes all your VPCs. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsInput) GoString() string { + return s.String() +} + +type DescribeVpcsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more VPCs. + Vpcs []*Vpc `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsOutput) GoString() string { + return s.String() +} + +type DescribeVpnConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // customer-gateway-configuration - The configuration information for the + // customer gateway. + // + // customer-gateway-id - The ID of a customer gateway associated with the + // VPN connection. + // + // state - The state of the VPN connection (pending | available | deleting + // | deleted). + // + // option.static-routes-only - Indicates whether the connection has static + // routes only. Used for devices that do not support Border Gateway Protocol + // (BGP). + // + // route.destination-cidr-block - The destination CIDR block. This corresponds + // to the subnet used in a customer data center. + // + // bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP + // device. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of VPN connection. Currently the only supported type is + // ipsec.1. + // + // vpn-connection-id - The ID of the VPN connection. + // + // vpn-gateway-id - The ID of a virtual private gateway associated with the + // VPN connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPN connection IDs. + // + // Default: Describes your VPN connections. + VpnConnectionIds []*string `locationName:"VpnConnectionId" locationNameList:"VpnConnectionId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsInput) GoString() string { + return s.String() +} + +type DescribeVpnConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more VPN connections. + VpnConnections []*VpnConnection `locationName:"vpnConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsOutput) GoString() string { + return s.String() +} + +type DescribeVpnGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (attaching | attached | detaching | detached). + // + // attachment.vpc-id - The ID of an attached VPC. + // + // availability-zone - The Availability Zone for the virtual private gateway + // (if applicable). + // + // state - The state of the virtual private gateway (pending | available + // | deleting | deleted). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of virtual private gateway. Currently the only supported + // type is ipsec.1. + // + // vpn-gateway-id - The ID of the virtual private gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more virtual private gateway IDs. + // + // Default: Describes all your virtual private gateways. + VpnGatewayIds []*string `locationName:"VpnGatewayId" locationNameList:"VpnGatewayId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysInput) GoString() string { + return s.String() +} + +type DescribeVpnGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more virtual private gateways. + VpnGateways []*VpnGateway `locationName:"vpnGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysOutput) GoString() string { + return s.String() +} + +type DetachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to unlink from the VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the VPC to which the instance is linked. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcInput) GoString() string { + return s.String() +} + +type DetachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +type DetachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayInput) GoString() string { + return s.String() +} + +type DetachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayOutput) GoString() string { + return s.String() +} + +type DetachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment. + AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether to force a detachment. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceInput) GoString() string { + return s.String() +} + +type DetachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type DetachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name. + Device *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces detachment if the previous detachment attempt did not occur cleanly + // (for example, logging into an instance, unmounting the volume, and detaching + // normally). This option can lead to data loss or a corrupted file system. + // Use this option only as a last resort to detach a volume from a failed instance. + // The instance won't have an opportunity to flush file system caches or file + // system metadata. If you use this option, you must perform file system check + // and repair procedures. + Force *bool `type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVolumeInput) GoString() string { + return s.String() +} + +type DetachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayInput) GoString() string { + return s.String() +} + +type DetachVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a DHCP configuration option. +type DhcpConfiguration struct { + _ struct{} `type:"structure"` + + // The name of a DHCP option. + Key *string `locationName:"key" type:"string"` + + // One or more values for the DHCP option. + Values []*AttributeValue `locationName:"valueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpConfiguration) GoString() string { + return s.String() +} + +// Describes a set of DHCP options. +type DhcpOptions struct { + _ struct{} `type:"structure"` + + // One or more DHCP options in the set. + DhcpConfigurations []*DhcpConfiguration `locationName:"dhcpConfigurationSet" locationNameList:"item" type:"list"` + + // The ID of the set of DHCP options. + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // Any tags assigned to the DHCP options set. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpOptions) GoString() string { + return s.String() +} + +type DisableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +type DisableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkInput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +type DisassociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The association ID. Required for EC2-VPC. + AssociationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s DisassociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressInput) GoString() string { + return s.String() +} + +type DisassociateAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressOutput) GoString() string { + return s.String() +} + +type DisassociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // The association ID representing the current association between the route + // table and subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DisassociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableInput) GoString() string { + return s.String() +} + +type DisassociateRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableOutput) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImage struct { + _ struct{} `type:"structure"` + + // A description of the disk image. + Description *string `type:"string"` + + // Information about the disk image. + Image *DiskImageDetail `type:"structure"` + + // Information about the volume. + Volume *VolumeDetail `type:"structure"` +} + +// String returns the string representation +func (s DiskImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImage) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImageDescription struct { + _ struct{} `type:"structure"` + + // The checksum computed for the disk image. + Checksum *string `locationName:"checksum" type:"string"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3. For information + // about creating a presigned URL for an Amazon S3 object, read the "Query String + // Request Authentication Alternative" section of the Authenticating REST Requests + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` + + // The size of the disk image, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` +} + +// String returns the string representation +func (s DiskImageDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDescription) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImageDetail struct { + _ struct{} `type:"structure"` + + // The size of the disk image, in GiB. + Bytes *int64 `locationName:"bytes" type:"long" required:"true"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3 and presented + // here as an Amazon S3 presigned URL. For information about creating a presigned + // URL for an Amazon S3 object, read the "Query String Request Authentication + // Alternative" section of the Authenticating REST Requests (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s DiskImageDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDetail) GoString() string { + return s.String() +} + +// Describes a disk image volume. +type DiskImageVolumeDescription struct { + _ struct{} `type:"structure"` + + // The volume identifier. + Id *string `locationName:"id" type:"string" required:"true"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long"` +} + +// String returns the string representation +func (s DiskImageVolumeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageVolumeDescription) GoString() string { + return s.String() +} + +// Describes a block device for an EBS volume. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information on General Purpose (SSD) baseline + // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The size of the volume, in GiB. + // + // Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 + // for io1 volumes. If you specify a snapshot, the volume size must be equal + // to or larger than the snapshot size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Describes a parameter used to set up an EBS volume in a block device mapping. +type EbsInstanceBlockDevice struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDevice) GoString() string { + return s.String() +} + +type EbsInstanceBlockDeviceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDeviceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDeviceSpecification) GoString() string { + return s.String() +} + +type EnableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +type EnableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type EnableVolumeIOInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVolumeIOInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOInput) GoString() string { + return s.String() +} + +type EnableVolumeIOOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVolumeIOOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOOutput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkInput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet event. +type EventInformation struct { + _ struct{} `type:"structure"` + + // The description of the event. + EventDescription *string `locationName:"eventDescription" type:"string"` + + // The event. + // + // The following are the error events. + // + // iamFleetRoleInvalid - The Spot fleet did not have the required permissions + // either to launch or terminate an instance. + // + // launchSpecTemporarilyBlacklisted - The configuration is not valid and + // several attempts to launch instances have failed. For more information, see + // the description of the event. + // + // spotFleetRequestConfigurationInvalid - The configuration is not valid. + // For more information, see the description of the event. + // + // spotInstanceCountLimitExceeded - You've reached the limit on the number + // of Spot instances that you can launch. + // + // The following are the fleetRequestChange events. + // + // active - The Spot fleet has been validated and Amazon EC2 is attempting + // to maintain the target number of running Spot instances. + // + // cancelled - The Spot fleet is canceled and has no running Spot instances. + // The Spot fleet will be deleted two days after its instances were terminated. + // + // cancelled_running - The Spot fleet is canceled and will not launch additional + // Spot instances, but its existing Spot instances continue to run until they + // are interrupted or terminated. + // + // cancelled_terminating - The Spot fleet is canceled and its Spot instances + // are terminating. + // + // expired - The Spot fleet request has expired. A subsequent event indicates + // that the instances were terminated, if the request was created with TerminateInstancesWithExpiration + // set. + // + // modify_in_progress - A request to modify the Spot fleet request was accepted + // and is in progress. + // + // modify_successful - The Spot fleet request was modified. + // + // price_update - The bid price for a launch configuration was adjusted because + // it was too high. This change is permanent. + // + // submitted - The Spot fleet request is being evaluated and Amazon EC2 is + // preparing to launch the target number of Spot instances. + // + // The following are the instanceChange events. + // + // launched - A bid was fulfilled and a new instance was launched. + // + // terminated - An instance was terminated by the user. + EventSubType *string `locationName:"eventSubType" type:"string"` + + // The ID of the instance. This information is available only for instanceChange + // events. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s EventInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInformation) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // A description of the resource being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export task. + ExportTaskId *string `locationName:"exportTaskId" type:"string"` + + // Information about the export task. + ExportToS3Task *ExportToS3Task `locationName:"exportToS3" type:"structure"` + + // Information about the instance to export. + InstanceExportDetails *InstanceExportDetails `locationName:"instanceExport" type:"structure"` + + // The state of the export task. + State *string `locationName:"state" type:"string" enum:"ExportTaskState"` + + // The status message related to the export task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Describes the format and location for an instance export task. +type ExportToS3Task struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The encryption key for your S3 bucket. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s ExportToS3Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3Task) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportToS3TaskSpecification struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The image is written to a single object in the S3 bucket at the S3 key s3prefix + // + exportTaskId + '.' + diskImageFormat. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportToS3TaskSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3TaskSpecification) GoString() string { + return s.String() +} + +// A filter name and value pair that is used to return a more specific list +// of results. Filters can be used to match a set of resources by various criteria, +// such as tags, attributes, or IDs. +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. Filter names are case-sensitive. + Name *string `type:"string"` + + // One or more filter values. Filter values are case-sensitive. + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Describes a flow log. +type FlowLog struct { + _ struct{} `type:"structure"` + + // The date and time the flow log was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // Information about the error that occurred. Rate limited indicates that CloudWatch + // logs throttling has been applied for one or more network interfaces, or that + // you've reached the limit on the number of CloudWatch Logs log groups that + // you can create. Access error indicates that the IAM role associated with + // the flow log does not have sufficient permissions to publish to CloudWatch + // Logs. Unknown error indicates an internal error. + DeliverLogsErrorMessage *string `locationName:"deliverLogsErrorMessage" type:"string"` + + // The ARN of the IAM role that posts logs to CloudWatch Logs. + DeliverLogsPermissionArn *string `locationName:"deliverLogsPermissionArn" type:"string"` + + // The status of the logs delivery (SUCCESS | FAILED). + DeliverLogsStatus *string `locationName:"deliverLogsStatus" type:"string"` + + // The flow log ID. + FlowLogId *string `locationName:"flowLogId" type:"string"` + + // The status of the flow log (ACTIVE). + FlowLogStatus *string `locationName:"flowLogStatus" type:"string"` + + // The name of the flow log group. + LogGroupName *string `locationName:"logGroupName" type:"string"` + + // The ID of the resource on which the flow log was created. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of traffic captured for the flow log. + TrafficType *string `locationName:"trafficType" type:"string" enum:"TrafficType"` +} + +// String returns the string representation +func (s FlowLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlowLog) GoString() string { + return s.String() +} + +type GetConsoleOutputInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetConsoleOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputInput) GoString() string { + return s.String() +} + +type GetConsoleOutputOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The console output, Base64 encoded. If using a command line tool, the tools + // decode the output for you. + Output *string `locationName:"output" type:"string"` + + // The time the output was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetConsoleOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputOutput) GoString() string { + return s.String() +} + +type GetPasswordDataInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Windows instance. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPasswordDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataInput) GoString() string { + return s.String() +} + +type GetPasswordDataOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Windows instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The password of the instance. + PasswordData *string `locationName:"passwordData" type:"string"` + + // The time the data was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetPasswordDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataOutput) GoString() string { + return s.String() +} + +// Describes a security group. +type GroupIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s GroupIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupIdentifier) GoString() string { + return s.String() +} + +// Describes an event in the history of the Spot fleet request. +type HistoryRecord struct { + _ struct{} `type:"structure"` + + // Information about the event. + EventInformation *EventInformation `locationName:"eventInformation" type:"structure" required:"true"` + + // The event type. + // + // error - Indicates an error with the Spot fleet request. + // + // fleetRequestChange - Indicates a change in the status or configuration + // of the Spot fleet request. + // + // instanceChange - Indicates that an instance was launched or terminated. + EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` + + // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s HistoryRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryRecord) GoString() string { + return s.String() +} + +// Describes the properties of the Dedicated host. +type Host struct { + _ struct{} `type:"structure"` + + // Whether auto-placement is on or off. + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone of the Dedicated host. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of new instances that can be launched onto the Dedicated host. + AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The ID of the Dedicated host. + HostId *string `locationName:"hostId" type:"string"` + + // The hardware specifications of the Dedicated host. + HostProperties *HostProperties `locationName:"hostProperties" type:"structure"` + + // The reservation ID of the Dedicated host. This returns a null response if + // the Dedicated host doesn't have an associated reservation. + HostReservationId *string `locationName:"hostReservationId" type:"string"` + + // The IDs and instance type that are currently running on the Dedicated host. + Instances []*HostInstance `locationName:"instances" locationNameList:"item" type:"list"` + + // The Dedicated host's state. + State *string `locationName:"state" type:"string" enum:"AllocationState"` +} + +// String returns the string representation +func (s Host) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Host) GoString() string { + return s.String() +} + +type HostInstance struct { + _ struct{} `type:"structure"` + + // the IDs of instances that are running on the Dedicated host. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type size (e.g., m3.medium) of the running instance. + InstanceType *string `locationName:"instanceType" type:"string"` +} + +// String returns the string representation +func (s HostInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostInstance) GoString() string { + return s.String() +} + +type HostProperties struct { + _ struct{} `type:"structure"` + + // The number of cores on the Dedicated host. + Cores *int64 `locationName:"cores" type:"integer"` + + // The instance type size that the Dedicated host supports (e.g., m3.medium). + InstanceType *string `locationName:"instanceType" type:"string"` + + // The number of sockets on the Dedicated host. + Sockets *int64 `locationName:"sockets" type:"integer"` + + // The number of vCPUs on the Dedicated host. + TotalVCpus *int64 `locationName:"totalVCpus" type:"integer"` +} + +// String returns the string representation +func (s HostProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostProperties) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the instance profile. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfile) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfileSpecification struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The name of the instance profile. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfileSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfileSpecification) GoString() string { + return s.String() +} + +// Describes the ICMP type and code. +type IcmpTypeCode struct { + _ struct{} `type:"structure"` + + // The ICMP type. A value of -1 means all types. + Code *int64 `locationName:"code" type:"integer"` + + // The ICMP code. A value of -1 means all codes for the specified ICMP type. + Type *int64 `locationName:"type" type:"integer"` +} + +// String returns the string representation +func (s IcmpTypeCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IcmpTypeCode) GoString() string { + return s.String() +} + +// Describes the ID format for a resource. +type IdFormat struct { + _ struct{} `type:"structure"` + + // The date in UTC at which you are permanently switched over to using longer + // IDs. + Deadline *time.Time `locationName:"deadline" type:"timestamp" timestampFormat:"iso8601"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string"` + + // Indicates whether longer IDs (17-character IDs) are enabled for the resource. + UseLongIds *bool `locationName:"useLongIds" type:"boolean"` +} + +// String returns the string representation +func (s IdFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdFormat) GoString() string { + return s.String() +} + +// Describes an image. +type Image struct { + _ struct{} `type:"structure"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The date and time the image was created. + CreationDate *string `locationName:"creationDate" type:"string"` + + // The description of the AMI that was provided during image creation. + Description *string `locationName:"description" type:"string"` + + // The hypervisor type of the image. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The location of the AMI. + ImageLocation *string `locationName:"imageLocation" type:"string"` + + // The AWS account alias (for example, amazon, self) or the AWS account ID of + // the AMI owner. + ImageOwnerAlias *string `locationName:"imageOwnerAlias" type:"string"` + + // The type of image. + ImageType *string `locationName:"imageType" type:"string" enum:"ImageTypeValues"` + + // The kernel associated with the image, if any. Only applicable for machine + // images. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the AMI that was provided during image creation. + Name *string `locationName:"name" type:"string"` + + // The AWS account ID of the image owner. + OwnerId *string `locationName:"imageOwnerId" type:"string"` + + // The value is Windows for Windows AMIs; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // Any product codes associated with the AMI. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // Indicates whether the image has public launch permissions. The value is true + // if this image has public launch permissions or false if it has only implicit + // and explicit launch permissions. + Public *bool `locationName:"isPublic" type:"boolean"` + + // The RAM disk associated with the image, if any. Only applicable for machine + // images. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The device name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The type of root device used by the AMI. The AMI can use an EBS volume or + // an instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // Specifies whether enhanced networking is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the AMI. If the state is available, the image is successfully + // registered and can be used to launch an instance. + State *string `locationName:"imageState" type:"string" enum:"ImageState"` + + // The reason for the state change. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // Any tags assigned to the image. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of virtualization of the AMI. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +// Describes the disk container object for an import image task. +type ImageDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image. + Description *string `type:"string"` + + // The block device mapping for the disk. + DeviceName *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The ID of the EBS snapshot to be used for importing the snapshot. + SnapshotId *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. The URL can either + // be a https URL (https://..) or an Amazon S3 URL (s3://..) + Url *string `type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s ImageDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDiskContainer) GoString() string { + return s.String() +} + +type ImportImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `type:"string"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // The token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // A description string for the import image task. + Description *string `type:"string"` + + // Information about the disk containers. + DiskContainers []*ImageDiskContainer `locationName:"DiskContainer" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The target hypervisor platform. + // + // Valid values: xen + Hypervisor *string `type:"string"` + + // The license type to be used for the Amazon Machine Image (AMI) after importing. + // + // Note: You may only use BYOL if you have existing licenses with rights to + // use these licenses in a third party cloud like AWS. For more information, + // see VM Import/Export Prerequisites (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Valid values: AWS | BYOL + LicenseType *string `type:"string"` + + // The operating system of the virtual machine. + // + // Valid values: Windows | Linux + Platform *string `type:"string"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` +} + +// String returns the string representation +func (s ImportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageInput) GoString() string { + return s.String() +} + +type ImportImageOutput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor of the import task. + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) created by the import task. + ImageId *string `locationName:"imageId" type:"string"` + + // The task ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The operating system of the virtual machine. + Platform *string `locationName:"platform" type:"string"` + + // The progress of the task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status of the task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message of the import task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ImportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageOutput) GoString() string { + return s.String() +} + +// Describes an import image task. +type ImportImageTask struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor for the import task. + // + // Valid values: xen + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) of the imported virtual machine. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The description string for the import image task. + Platform *string `locationName:"platform" type:"string"` + + // The percentage of progress of the import image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status for the import image task. + Status *string `locationName:"status" type:"string"` + + // A descriptive status message for the import image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ImportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageTask) GoString() string { + return s.String() +} + +type ImportInstanceInput struct { + _ struct{} `type:"structure"` + + // A description for the instance being imported. + Description *string `locationName:"description" type:"string"` + + // The disk image. + DiskImages []*DiskImage `locationName:"diskImage" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The launch specification. + LaunchSpecification *ImportInstanceLaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" required:"true" enum:"PlatformValues"` +} + +// String returns the string representation +func (s ImportInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceInput) GoString() string { + return s.String() +} + +// Describes the launch specification for VM import. +type ImportInstanceLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The architecture of the instance. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more security group IDs. + GroupIds []*string `locationName:"GroupId" locationNameList:"SecurityGroupId" type:"list"` + + // One or more security group names. + GroupNames []*string `locationName:"GroupName" locationNameList:"SecurityGroup" type:"list"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information about the instance types that you + // can import, see Before You Get Started (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether monitoring is enabled. + Monitoring *bool `locationName:"monitoring" type:"boolean"` + + // The placement information for the instance. + Placement *Placement `locationName:"placement" type:"structure"` + + // [EC2-VPC] An available IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // [EC2-VPC] The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to be made available to the instance. + UserData *UserData `locationName:"userData" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceLaunchSpecification) GoString() string { + return s.String() +} + +type ImportInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceOutput) GoString() string { + return s.String() +} + +// Describes an import instance task. +type ImportInstanceTaskDetails struct { + _ struct{} `type:"structure"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // One or more volumes. + Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ImportInstanceTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceTaskDetails) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportInstanceVolumeDetailItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting instance will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The status of the import of this particular disk image. + Status *string `locationName:"status" type:"string" required:"true"` + + // The status information or errors related to the disk image. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportInstanceVolumeDetailItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceVolumeDetailItem) GoString() string { + return s.String() +} + +type ImportKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + KeyName *string `locationName:"keyName" type:"string" required:"true"` + + // The public key. You must base64 encode the public key material before sending + // it to AWS. + PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"` +} + +// String returns the string representation +func (s ImportKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairInput) GoString() string { + return s.String() +} + +type ImportKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The MD5 public key fingerprint as specified in section 4 of RFC 4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The key pair name you provided. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s ImportKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairOutput) GoString() string { + return s.String() +} + +type ImportSnapshotInput struct { + _ struct{} `type:"structure"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // Token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // The description string for the import snapshot task. + Description *string `type:"string"` + + // Information about the disk container. + DiskContainer *SnapshotDiskContainer `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` +} + +// String returns the string representation +func (s ImportSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotInput) GoString() string { + return s.String() +} + +type ImportSnapshotOutput struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Information about the import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotOutput) GoString() string { + return s.String() +} + +// Describes an import snapshot task. +type ImportSnapshotTask struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Describes an import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotTask) GoString() string { + return s.String() +} + +type ImportVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the resulting EBS volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // A description of the volume. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The disk image. + Image *DiskImageDetail `locationName:"image" type:"structure" required:"true"` + + // The volume size. + Volume *VolumeDetail `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeInput) GoString() string { + return s.String() +} + +type ImportVolumeOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeOutput) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportVolumeTaskDetails struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting volume will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // The description you provided when starting the import volume task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportVolumeTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeTaskDetails) GoString() string { + return s.String() +} + +// Describes an instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The AMI launch index, which can be used to find this instance in the launch + // group. + AmiLaunchIndex *int64 `locationName:"amiLaunchIndex" type:"integer"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries for the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The idempotency token you provided when you launched the instance, if applicable. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The hypervisor type of the instance. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The IAM instance profile associated with the instance, if applicable. + IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI used to launch the instance. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether this is a Spot instance. + InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The kernel associated with this instance, if applicable. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair, if this instance was launched with an associated + // key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The time the instance was launched. + LaunchTime *time.Time `locationName:"launchTime" type:"timestamp" timestampFormat:"iso8601"` + + // The monitoring information for the instance. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` + + // [EC2-VPC] One or more network interfaces for the instance. + NetworkInterfaces []*InstanceNetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The location where the instance launched, if applicable. + Placement *Placement `locationName:"placement" type:"structure"` + + // The value is Windows for Windows instances; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // The private DNS name assigned to the instance. This DNS name can only be + // used inside the Amazon EC2 network. This name is not available until the + // instance enters the running state. For EC2-VPC, this name is only available + // if you've enabled DNS hostnames for your VPC. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address assigned to the instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The product codes attached to this instance, if applicable. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The public DNS name assigned to the instance. This name is not available + // until the instance enters the running state. For EC2-VPC, this name is only + // available if you've enabled DNS hostnames for your VPC. + PublicDnsName *string `locationName:"dnsName" type:"string"` + + // The public IP address assigned to the instance, if applicable. + PublicIpAddress *string `locationName:"ipAddress" type:"string"` + + // The RAM disk associated with this instance, if applicable. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The root device name (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The root device type used by the AMI. The AMI can use an EBS volume or an + // instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // One or more security groups for the instance. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // Specifies whether to enable an instance launched in a VPC to perform NAT. + // This controls whether source/destination checking is enabled on the instance. + // A value of true means checking is enabled, and false means checking is disabled. + // The value must be false for the instance to perform NAT. For more information, + // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // If the request is a Spot instance request, the ID of the request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // Specifies whether enhanced networking is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the instance. + State *InstanceState `locationName:"instanceState" type:"structure"` + + // The reason for the most recent state transition. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // The reason for the most recent state transition. This might be an empty string. + StateTransitionReason *string `locationName:"reason" type:"string"` + + // [EC2-VPC] The ID of the subnet in which the instance is running. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The virtualization type of the instance. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` + + // [EC2-VPC] The ID of the VPC in which the instance is running. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type InstanceBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDevice `locationName:"ebs" type:"structure"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes a block device mapping entry. +type InstanceBlockDeviceMappingSpecification struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDeviceSpecification `locationName:"ebs" type:"structure"` + + // suppress the specified device included in the block device mapping. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMappingSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMappingSpecification) GoString() string { + return s.String() +} + +// Information about the instance type that the Dedicated host supports. +type InstanceCapacity struct { + _ struct{} `type:"structure"` + + // The number of instances that can still be launched onto the Dedicated host. + AvailableCapacity *int64 `locationName:"availableCapacity" type:"integer"` + + // The instance type size supported by the Dedicated host. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The total number of instances that can be launched onto the Dedicated host. + TotalCapacity *int64 `locationName:"totalCapacity" type:"integer"` +} + +// String returns the string representation +func (s InstanceCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCapacity) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing state. +type InstanceCount struct { + _ struct{} `type:"structure"` + + // The number of listed Reserved Instances in the state specified by the state. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The states of the listed Reserved Instances. + State *string `locationName:"state" type:"string" enum:"ListingState"` +} + +// String returns the string representation +func (s InstanceCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCount) GoString() string { + return s.String() +} + +// Describes an instance to export. +type InstanceExportDetails struct { + _ struct{} `type:"structure"` + + // The ID of the resource being exported. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s InstanceExportDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceExportDetails) GoString() string { + return s.String() +} + +// Describes the monitoring information of the instance. +type InstanceMonitoring struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The monitoring information. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP associated with the network + // interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *InstanceNetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description. + Description *string `locationName:"description" type:"string"` + + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that created the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // Indicates whether to validate network traffic to or from this network interface. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type InstanceNetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the owner of the Elastic IP address. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The public IP address or Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type InstanceNetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The index of the device on the instance for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterfaceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IP address to an instance you launch + // in a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` + + // If set to true, the interface is deleted when the instance is terminated. + // You can specify true only if creating a new network interface when launching + // an instance. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The description of the network interface. Applies only if creating a network + // interface when launching an instance. + Description *string `locationName:"description" type:"string"` + + // The index of the device on the instance for the network interface attachment. + // If you are specifying a network interface in a RunInstances request, you + // must provide the device index. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The IDs of the security groups for the network interface. Applies only if + // creating a network interface when launching an instance. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address of the network interface. Applies only if creating + // a network interface when launching an instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses to assign to the network interface. Only + // one private IP address can be designated as primary. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" queryName:"PrivateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses. You can't specify this option + // and specify more than one private IP address using the private IP addresses + // option. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet associated with the network string. Applies only if + // creating a network interface when launching an instance. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceSpecification) GoString() string { + return s.String() +} + +// Describes a private IP address. +type InstancePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address for the network interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address of the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s InstancePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancePrivateIpAddress) GoString() string { + return s.String() +} + +// Describes the current state of the instance. +type InstanceState struct { + _ struct{} `type:"structure"` + + // The low byte represents the state. The high byte is an opaque internal value + // and should be ignored. + // + // 0 : pending + // + // 16 : running + // + // 32 : shutting-down + // + // 48 : terminated + // + // 64 : stopping + // + // 80 : stopped + Code *int64 `locationName:"code" type:"integer"` + + // The current state of the instance. + Name *string `locationName:"name" type:"string" enum:"InstanceStateName"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// Describes an instance state change. +type InstanceStateChange struct { + _ struct{} `type:"structure"` + + // The current state of the instance. + CurrentState *InstanceState `locationName:"currentState" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The previous state of the instance. + PreviousState *InstanceState `locationName:"previousState" type:"structure"` +} + +// String returns the string representation +func (s InstanceStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStateChange) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatus struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Any scheduled events associated with the instance. + Events []*InstanceStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The intended state of the instance. DescribeInstanceStatus requires that + // an instance be in the running state. + InstanceState *InstanceState `locationName:"instanceState" type:"structure"` + + // Reports impaired functionality that stems from issues internal to the instance, + // such as impaired reachability. + InstanceStatus *InstanceStatusSummary `locationName:"instanceStatus" type:"structure"` + + // Reports impaired functionality that stems from issues related to the systems + // that support an instance, such as hardware failures and network connectivity + // problems. + SystemStatus *InstanceStatusSummary `locationName:"systemStatus" type:"structure"` +} + +// String returns the string representation +func (s InstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatus) GoString() string { + return s.String() +} + +// Describes the instance status. +type InstanceStatusDetails struct { + _ struct{} `type:"structure"` + + // The time when a status check failed. For an instance that was launched and + // impaired, this is the time when the instance was launched. + ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp" timestampFormat:"iso8601"` + + // The type of instance status. + Name *string `locationName:"name" type:"string" enum:"StatusName"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s InstanceStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusDetails) GoString() string { + return s.String() +} + +// Describes a scheduled event for an instance. +type InstanceStatusEvent struct { + _ struct{} `type:"structure"` + + // The event code. + Code *string `locationName:"code" type:"string" enum:"EventCode"` + + // A description of the event. + // + // After a scheduled event is completed, it can still be described for up to + // a week. If the event has been completed, this description starts with the + // following text: [Completed]. + Description *string `locationName:"description" type:"string"` + + // The latest scheduled end time for the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest scheduled start time for the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s InstanceStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatusSummary struct { + _ struct{} `type:"structure"` + + // The system instance health or application instance health. + Details []*InstanceStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"SummaryStatus"` +} + +// String returns the string representation +func (s InstanceStatusSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusSummary) GoString() string { + return s.String() +} + +// Describes an Internet gateway. +type InternetGateway struct { + _ struct{} `type:"structure"` + + // Any VPCs attached to the Internet gateway. + Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string"` + + // Any tags assigned to the Internet gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s InternetGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGateway) GoString() string { + return s.String() +} + +// Describes the attachment of a VPC to an Internet gateway. +type InternetGatewayAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InternetGatewayAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGatewayAttachment) GoString() string { + return s.String() +} + +// Describes a security group rule. +type IpPermission struct { + _ struct{} `type:"structure"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // A value of -1 indicates all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers + // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // + // [EC2-VPC only] When you authorize or revoke security group rules, you can + // use -1 to specify all. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // One or more IP ranges. + IpRanges []*IpRange `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups + // only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress + // request, this is the AWS service that you want to access through a VPC endpoint + // from instances associated with the security group. + PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code. A value + // of -1 indicates all ICMP codes for the specified ICMP type. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // One or more security group and AWS account ID pairs. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s IpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpPermission) GoString() string { + return s.String() +} + +// Describes an IP range. +type IpRange struct { + _ struct{} `type:"structure"` + + // The CIDR range. You can either specify a CIDR range or a source security + // group, not both. + CidrIp *string `locationName:"cidrIp" type:"string"` +} + +// String returns the string representation +func (s IpRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpRange) GoString() string { + return s.String() +} + +// Describes a key pair. +type KeyPairInfo struct { + _ struct{} `type:"structure"` + + // If you used CreateKeyPair to create the key pair, this is the SHA-1 digest + // of the DER encoded private key. If you used ImportKeyPair to provide AWS + // the public key, this is the MD5 public key fingerprint as specified in section + // 4 of RFC4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s KeyPairInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPairInfo) GoString() string { + return s.String() +} + +// Describes a launch permission. +type LaunchPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LaunchPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermission) GoString() string { + return s.String() +} + +// Describes a launch permission modification. +type LaunchPermissionModifications struct { + _ struct{} `type:"structure"` + + // The AWS account ID to add to the list of launch permissions for the AMI. + Add []*LaunchPermission `locationNameList:"item" type:"list"` + + // The AWS account ID to remove from the list of launch permissions for the + // AMI. + Remove []*LaunchPermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermissionModifications) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type LaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s LaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchSpecification) GoString() string { + return s.String() +} + +type ModifyHostsInput struct { + _ struct{} `type:"structure"` + + // Specify whether to enable or disable auto-placement. + AutoPlacement *string `locationName:"autoPlacement" type:"string" required:"true" enum:"AutoPlacement"` + + // The host IDs of the Dedicated hosts you want to modify. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsInput) GoString() string { + return s.String() +} + +type ModifyHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts that were successfully modified. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated hosts that could not be modified. Check whether + // the setting you requested can be used. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ModifyHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsOutput) GoString() string { + return s.String() +} + +type ModifyIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource. + Resource *string `type:"string" required:"true"` + + // Indicate whether the resource should use longer IDs (17-character IDs). + UseLongIds *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ModifyIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatInput) GoString() string { + return s.String() +} + +type ModifyIdFormatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatOutput) GoString() string { + return s.String() +} + +type ModifyImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute to modify. + Attribute *string `type:"string"` + + // A description for the AMI. + Description *AttributeValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` + + // A launch permission modification. + LaunchPermission *LaunchPermissionModifications `type:"structure"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // One or more product codes. After you add a product code to an AMI, it can't + // be removed. This is only valid when modifying the productCodes attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // One or more user groups. This is only valid when modifying the launchPermission + // attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // One or more AWS account IDs. This is only valid when modifying the launchPermission + // attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` + + // The value of the attribute being modified. This is only valid when modifying + // the description attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ModifyImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeInput) GoString() string { + return s.String() +} + +type ModifyImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeOutput) GoString() string { + return s.String() +} + +type ModifyInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `locationName:"attribute" type:"string" enum:"InstanceAttributeName"` + + // Modifies the DeleteOnTermination attribute for volumes that are currently + // attached. The volume must be owned by the caller. If no value is specified + // for DeleteOnTermination, the default is true and the volume is deleted when + // the instance is terminated. + // + // To add instance store volumes to an Amazon EBS-backed instance, you must + // add them when you launch the instance. For more information, see Updating + // the Block Device Mapping when Launching an Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance using the Amazon EC2 + // console, CLI, or API; otherwise, you can. You cannot use this paramater for + // Spot Instances. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // [EC2-VPC] Changes the security groups of the instance. You must specify at + // least one security group, even if it's just the default security group for + // the VPC. You must specify the security group ID, not the security group name. + Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // Specifies whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // Changes the instance type to the specified value. For more information, see + // Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // Changes the instance's kernel to the specified value. We recommend that you + // use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Kernel *AttributeValue `locationName:"kernel" type:"structure"` + + // Changes the instance's RAM disk to the specified value. We recommend that + // you use PV-GRUB instead of kernels and RAM disks. For more information, see + // PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` + + // Specifies whether source/destination checking is enabled. A value of true + // means that checking is enabled, and false means checking is disabled. This + // value must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `type:"structure"` + + // Set to simple to enable enhanced networking for the instance. + // + // There is no way to disable enhanced networking at this time. + // + // This option is supported only for HVM instances. Specifying this option + // with a PV instance can make it unreachable. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // Changes the instance's user data to the specified value. + UserData *BlobAttributeValue `locationName:"userData" type:"structure"` + + // A new value for the attribute. Use only with the kernel, ramdisk, userData, + // disableApiTermination, or instanceInitiatedShutdownBehavior attribute. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeInput) GoString() string { + return s.String() +} + +type ModifyInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeOutput) GoString() string { + return s.String() +} + +type ModifyInstancePlacementInput struct { + _ struct{} `type:"structure"` + + // The new affinity setting for the instance. + Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"` + + // The ID of the Dedicated host that the instance will have affinity with. + HostId *string `locationName:"hostId" type:"string"` + + // The ID of the instance that you are modifying. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The tenancy of the instance that you are modifying. + Tenancy *string `locationName:"tenancy" type:"string" enum:"HostTenancy"` +} + +// String returns the string representation +func (s ModifyInstancePlacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementInput) GoString() string { + return s.String() +} + +type ModifyInstancePlacementOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyInstancePlacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementOutput) GoString() string { + return s.String() +} + +type ModifyNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Information about the interface attachment. If modifying the 'delete on termination' + // attribute, you must specify the ID of the interface attachment. + Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"` + + // A description for the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Changes the security groups for the network interface. The new set of groups + // you specify replaces the current set. You must specify at least one group, + // even if it's just the default security group in the VPC. You must specify + // the ID of the security group, not the name. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. For more information, see + // NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type ModifyNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type ModifyReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive token you provide to ensure idempotency of your + // modification request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the Reserved Instances to modify. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list" required:"true"` + + // The configuration settings for the Reserved Instances to modify. + TargetConfigurations []*ReservedInstancesConfiguration `locationName:"ReservedInstancesConfigurationSetItemType" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesInput) GoString() string { + return s.String() +} + +type ModifyReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // The ID for the modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` +} + +// String returns the string representation +func (s ModifyReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesOutput) GoString() string { + return s.String() +} + +type ModifySnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute to modify. + // + // Only volume creation permissions may be modified at the customer level. + Attribute *string `type:"string" enum:"SnapshotAttributeName"` + + // A JSON representation of the snapshot attribute modification. + CreateVolumePermission *CreateVolumePermissionModifications `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The group to modify for the snapshot. + GroupNames []*string `locationName:"UserGroup" locationNameList:"GroupName" type:"list"` + + // The type of operation to perform to the attribute. + OperationType *string `type:"string" enum:"OperationType"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` + + // The account ID to modify for the snapshot. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeInput) GoString() string { + return s.String() +} + +type ModifySnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifySpotFleetRequest. +type ModifySpotFleetRequestInput struct { + _ struct{} `type:"structure"` + + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The size of the fleet. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestInput) GoString() string { + return s.String() +} + +// Contains the output of ModifySpotFleetRequest. +type ModifySpotFleetRequestOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestOutput) GoString() string { + return s.String() +} + +type ModifySubnetAttributeInput struct { + _ struct{} `type:"structure"` + + // Specify true to indicate that instances launched into the specified subnet + // should be assigned public IP address. + MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifySubnetAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeInput) GoString() string { + return s.String() +} + +type ModifySubnetAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySubnetAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume should be auto-enabled for I/O operations. + AutoEnableIO *AttributeBooleanValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeInput) GoString() string { + return s.String() +} + +type ModifyVolumeAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // enabled, instances in the VPC get DNS hostnames; otherwise, they do not. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. You can only enable + // DNS hostnames if you've enabled DNS support. + EnableDnsHostnames *AttributeBooleanValue `type:"structure"` + + // Indicates whether the DNS resolution is supported for the VPC. If enabled, + // queries to the Amazon provided DNS server at the 169.254.169.253 IP address, + // or the reserved IP address at the base of the VPC network range "plus two" + // will succeed. If disabled, the Amazon provided DNS service in the VPC that + // resolves public DNS hostnames to IP addresses is not enabled. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. + EnableDnsSupport *AttributeBooleanValue `type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeInput) GoString() string { + return s.String() +} + +type ModifyVpcAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // One or more route tables IDs to associate with the endpoint. + AddRouteTableIds []*string `locationName:"AddRouteTableId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy document to attach to the endpoint. The policy must be in valid + // JSON format. + PolicyDocument *string `type:"string"` + + // One or more route table IDs to disassociate from the endpoint. + RemoveRouteTableIds []*string `locationName:"RemoveRouteTableId" locationNameList:"item" type:"list"` + + // Specify true to reset the policy document to the default policy. The default + // policy allows access to the service. + ResetPolicy *bool `type:"boolean"` + + // The ID of the endpoint. + VpcEndpointId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointInput) GoString() string { + return s.String() +} + +type ModifyVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointOutput) GoString() string { + return s.String() +} + +type MonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s MonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesInput) GoString() string { + return s.String() +} + +type MonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s MonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesOutput) GoString() string { + return s.String() +} + +// Describes the monitoring for the instance. +type Monitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled for the instance. + State *string `locationName:"state" type:"string" enum:"MonitoringState"` +} + +// String returns the string representation +func (s Monitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Monitoring) GoString() string { + return s.String() +} + +type MoveAddressToVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s MoveAddressToVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcInput) GoString() string { + return s.String() +} + +type MoveAddressToVpcOutput struct { + _ struct{} `type:"structure"` + + // The allocation ID for the Elastic IP address. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The status of the move of the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s MoveAddressToVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcOutput) GoString() string { + return s.String() +} + +// Describes the status of a moving Elastic IP address. +type MovingAddressStatus struct { + _ struct{} `type:"structure"` + + // The status of the Elastic IP address that's being moved to the EC2-VPC platform, + // or restored to the EC2-Classic platform. + MoveStatus *string `locationName:"moveStatus" type:"string" enum:"MoveStatus"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s MovingAddressStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MovingAddressStatus) GoString() string { + return s.String() +} + +// Describes a NAT gateway. +type NatGateway struct { + _ struct{} `type:"structure"` + + // The date and time the NAT gateway was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The date and time the NAT gateway was deleted, if applicable. + DeleteTime *time.Time `locationName:"deleteTime" type:"timestamp" timestampFormat:"iso8601"` + + // If the NAT gateway could not be created, specifies the error code for the + // failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound + // | Resource.AlreadyAssociated | InternalError) + FailureCode *string `locationName:"failureCode" type:"string"` + + // If the NAT gateway could not be created, specifies the error message for + // the failure, that corresponds to the error code. + // + // For InsufficientFreeAddressesInSubnet: Subnet has insufficient free addresses + // to create this NAT gateway For Gateway.NotAttached: Network vpc-xxxxxxxx + // has no Internet gateway attached For InvalidAllocationID.NotFound: Elastic + // IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway + // For Resource.AlreadyAssociated: Elastic IP address eipalloc-xxxxxxxx is already + // associated For InternalError: Network interface eni-xxxxxxxx, created and + // used internally by this NAT gateway is in an invalid state. Please try again. + FailureMessage *string `locationName:"failureMessage" type:"string"` + + // Information about the IP addresses and network interface associated with + // the NAT gateway. + NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The state of the NAT gateway. + State *string `locationName:"state" type:"string" enum:"NatGatewayState"` + + // The ID of the subnet in which the NAT gateway is located. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC in which the NAT gateway is located. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NatGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGateway) GoString() string { + return s.String() +} + +// Describes the IP addresses and network interface associated with a NAT gateway. +type NatGatewayAddress struct { + _ struct{} `type:"structure"` + + // The allocation ID of the Elastic IP address that's associated with the NAT + // gateway. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID of the network interface associated with the NAT gateway. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIp *string `locationName:"privateIp" type:"string"` + + // The Elastic IP address associated with the NAT gateway. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NatGatewayAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGatewayAddress) GoString() string { + return s.String() +} + +// Describes a network ACL. +type NetworkAcl struct { + _ struct{} `type:"structure"` + + // Any associations between the network ACL and one or more subnets + Associations []*NetworkAclAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // One or more entries (rules) in the network ACL. + Entries []*NetworkAclEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // Indicates whether this is the default network ACL for the VPC. + IsDefault *bool `locationName:"default" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // Any tags assigned to the network ACL. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC for the network ACL. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkAcl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAcl) GoString() string { + return s.String() +} + +// Describes an association between a network ACL and a subnet. +type NetworkAclAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association between a network ACL and a subnet. + NetworkAclAssociationId *string `locationName:"networkAclAssociationId" type:"string"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s NetworkAclAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclAssociation) GoString() string { + return s.String() +} + +// Describes an entry in a network ACL. +type NetworkAclEntry struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether the rule is an egress rule (applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean"` + + // ICMP protocol: The ICMP type and code. + IcmpTypeCode *IcmpTypeCode `locationName:"icmpTypeCode" type:"structure"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" enum:"RuleAction"` + + // The rule number for the entry. ACL entries are processed in ascending order + // by rule number. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer"` +} + +// String returns the string representation +func (s NetworkAclEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclEntry) GoString() string { + return s.String() +} + +// Describes a network interface. +type NetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP associated with the network + // interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A description. + Description *string `locationName:"description" type:"string"` + + // Any security groups for the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The type of interface. + InterfaceType *string `locationName:"interfaceType" type:"string" enum:"NetworkInterfaceType"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The AWS account ID of the owner of the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // The ID of the entity that launched the instance on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // Indicates whether the network interface is being managed by AWS. + RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"` + + // Indicates whether traffic to or from the instance is validated. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the network interface. + TagSet []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type NetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The allocation ID. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string"` + + // The ID of the Elastic IP address owner. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The address of the Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type NetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The timestamp indicating when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device index of the network interface attachment on the instance. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes an attachment change. +type NetworkInterfaceAttachmentChanges struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachmentChanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachmentChanges) GoString() string { + return s.String() +} + +// Describes the private IP address of a network interface. +type NetworkInterfacePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address associated with the + // network interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfacePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePrivateIpAddress) GoString() string { + return s.String() +} + +type NewDhcpConfiguration struct { + _ struct{} `type:"structure"` + + Key *string `locationName:"key" type:"string"` + + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s NewDhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewDhcpConfiguration) GoString() string { + return s.String() +} + +// Describes the placement for the instance. +type Placement struct { + _ struct{} `type:"structure"` + + // The affinity setting for the instance on the Dedicated host. This parameter + // is not supported for the ImportInstance command. + Affinity *string `locationName:"affinity" type:"string"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group the instance is in (for cluster compute instances). + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of the Dedicted host on which the instance resides. This parameter + // is not support for the ImportInstance command. + HostId *string `locationName:"hostId" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy + // is not supported for the ImportInstance command. + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s Placement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Placement) GoString() string { + return s.String() +} + +// Describes a placement group. +type PlacementGroup struct { + _ struct{} `type:"structure"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string"` + + // The state of the placement group. + State *string `locationName:"state" type:"string" enum:"PlacementGroupState"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` +} + +// String returns the string representation +func (s PlacementGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementGroup) GoString() string { + return s.String() +} + +// Describes a range of ports. +type PortRange struct { + _ struct{} `type:"structure"` + + // The first port in the range. + From *int64 `locationName:"from" type:"integer"` + + // The last port in the range. + To *int64 `locationName:"to" type:"integer"` +} + +// String returns the string representation +func (s PortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortRange) GoString() string { + return s.String() +} + +// Describes prefixes for AWS services. +type PrefixList struct { + _ struct{} `type:"structure"` + + // The IP address range of the AWS service. + Cidrs []*string `locationName:"cidrSet" locationNameList:"item" type:"list"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix. + PrefixListName *string `locationName:"prefixListName" type:"string"` +} + +// String returns the string representation +func (s PrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixList) GoString() string { + return s.String() +} + +// The ID of the prefix. +type PrefixListId struct { + _ struct{} `type:"structure"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` +} + +// String returns the string representation +func (s PrefixListId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListId) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceSchedule struct { + _ struct{} `type:"structure"` + + // The current price schedule, as determined by the term remaining for the Reserved + // Instance in the listing. + // + // A specific price schedule is always in effect, but only one price schedule + // can be active at any time. Take, for example, a Reserved Instance listing + // that has five months remaining in its term. When you specify price schedules + // for five months and two months, this means that schedule 1, covering the + // first three months of the remaining term, will be active during months 5, + // 4, and 3. Then schedule 2, covering the last two months of the term, will + // be active for months 2 and 1. + Active *bool `locationName:"active" type:"boolean"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceSchedule) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceScheduleSpecification struct { + _ struct{} `type:"structure"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceScheduleSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceScheduleSpecification) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type PricingDetail struct { + _ struct{} `type:"structure"` + + // The number of reservations available for the price. + Count *int64 `locationName:"count" type:"integer"` + + // The price per instance. + Price *float64 `locationName:"price" type:"double"` +} + +// String returns the string representation +func (s PricingDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PricingDetail) GoString() string { + return s.String() +} + +// Describes a secondary private IP address for a network interface. +type PrivateIpAddressSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the private IP address is the primary private IP address. + // Only one IP address can be designated as primary. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private IP addresses. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string" required:"true"` +} + +// String returns the string representation +func (s PrivateIpAddressSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateIpAddressSpecification) GoString() string { + return s.String() +} + +// Describes a product code. +type ProductCode struct { + _ struct{} `type:"structure"` + + // The product code. + ProductCodeId *string `locationName:"productCode" type:"string"` + + // The type of product code. + ProductCodeType *string `locationName:"type" type:"string" enum:"ProductCodeValues"` +} + +// String returns the string representation +func (s ProductCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductCode) GoString() string { + return s.String() +} + +// Describes a virtual private gateway propagating route. +type PropagatingVgw struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway (VGW). + GatewayId *string `locationName:"gatewayId" type:"string"` +} + +// String returns the string representation +func (s PropagatingVgw) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PropagatingVgw) GoString() string { + return s.String() +} + +// Describes a request to purchase Scheduled Instances. +type PurchaseRequest struct { + _ struct{} `type:"structure"` + + // The number of instances. + InstanceCount *int64 `type:"integer"` + + // The purchase token. + PurchaseToken *string `type:"string"` +} + +// String returns the string representation +func (s PurchaseRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseRequest) GoString() string { + return s.String() +} + +type PurchaseReservedInstancesOfferingInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The number of Reserved Instances to purchase. + InstanceCount *int64 `type:"integer" required:"true"` + + // Specified for Reserved Instance Marketplace offerings to limit the total + // order and ensure that the Reserved Instances are not purchased at unexpected + // prices. + LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"` + + // The ID of the Reserved Instance offering to purchase. + ReservedInstancesOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedInstancesOfferingOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the purchased Reserved Instances. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PurchaseScheduledInstances. +type PurchaseScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more purchase requests. + PurchaseRequests []*PurchaseRequest `locationName:"PurchaseRequest" locationNameList:"PurchaseRequest" type:"list" required:"true"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of PurchaseScheduledInstances. +type PurchaseScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesOutput) GoString() string { + return s.String() +} + +type RebootInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesInput) GoString() string { + return s.String() +} + +type RebootInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebootInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount of the recurring charge. + Amount *float64 `locationName:"amount" type:"double"` + + // The frequency of the recurring charge. + Frequency *string `locationName:"frequency" type:"string" enum:"RecurringChargeFrequency"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Describes a region. +type Region struct { + _ struct{} `type:"structure"` + + // The region service endpoint. + Endpoint *string `locationName:"regionEndpoint" type:"string"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` +} + +// String returns the string representation +func (s Region) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Region) GoString() string { + return s.String() +} + +type RegisterImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the AMI. + // + // Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, + // the architecture specified in the manifest file. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for your AMI. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The full path to your AMI manifest in Amazon S3 storage. + ImageLocation *string `type:"string"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // A name for your AMI. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The name of the root device (for example, /dev/sda1, or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // Set to simple to enable enhanced networking for the AMI and any instances + // that you launch from the AMI. + // + // There is no way to disable enhanced networking at this time. + // + // This option is supported only for HVM AMIs. Specifying this option with + // a PV AMI can make instances launched from the AMI unreachable. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The type of virtualization. + // + // Default: paravirtual + VirtualizationType *string `locationName:"virtualizationType" type:"string"` +} + +// String returns the string representation +func (s RegisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageInput) GoString() string { + return s.String() +} + +type RegisterImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the newly registered AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s RegisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageOutput) GoString() string { + return s.String() +} + +type RejectVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type RejectVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type ReleaseAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. Required for EC2-VPC. + AllocationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s ReleaseAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressInput) GoString() string { + return s.String() +} + +type ReleaseAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReleaseAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressOutput) GoString() string { + return s.String() +} + +type ReleaseHostsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts you want to release. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ReleaseHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsInput) GoString() string { + return s.String() +} + +type ReleaseHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts that were successfully released. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated hosts that could not be released, including an error + // message. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ReleaseHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsOutput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the current association between the original network ACL and the + // subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new network ACL to associate with the subnet. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationInput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclAssociationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationOutput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether to replace the egress rule. + // + // Default: If no value is specified, we replace the ingress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for + // the protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. Required if + // specifying 6 (TCP) or 17 (UDP) for the protocol. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The IP protocol. You can specify all or -1 to mean all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number of the entry to replace. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryInput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type ReplaceRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR address block used for the destination match. The value you provide + // must match the CIDR of an existing route in the table. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteInput) GoString() string { + return s.String() +} + +type ReplaceRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteOutput) GoString() string { + return s.String() +} + +type ReplaceRouteTableAssociationInput struct { + _ struct{} `type:"structure"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new route table to associate with the subnet. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationInput) GoString() string { + return s.String() +} + +type ReplaceRouteTableAssociationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationOutput) GoString() string { + return s.String() +} + +type ReportInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Descriptive text about the health state of your instance. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The time at which the reported instance health state ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more instances. + Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + // One or more reason codes that describes the health state of your instance. + // + // instance-stuck-in-state: My instance is stuck in a state. + // + // unresponsive: My instance is unresponsive. + // + // not-accepting-credentials: My instance is not accepting my credentials. + // + // password-not-available: A password is not available for my instance. + // + // performance-network: My instance is experiencing performance problems which + // I believe are network related. + // + // performance-instance-store: My instance is experiencing performance problems + // which I believe are related to the instance stores. + // + // performance-ebs-volume: My instance is experiencing performance problems + // which I believe are related to an EBS volume. + // + // performance-other: My instance is experiencing performance problems. + // + // other: [explain using the description parameter] + ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` + + // The time at which the reported instance health state began. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The status of all instances listed. + Status *string `locationName:"status" type:"string" required:"true" enum:"ReportStatusType"` +} + +// String returns the string representation +func (s ReportInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusInput) GoString() string { + return s.String() +} + +type ReportInstanceStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReportInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotFleet. +type RequestSpotFleetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The configuration for the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RequestSpotFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetInput) GoString() string { + return s.String() +} + +// Contains the output of RequestSpotFleet. +type RequestSpotFleetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestSpotFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotInstances. +type RequestSpotInstancesInput struct { + _ struct{} `type:"structure"` + + // The user-specified name for a logical grouping of bids. + // + // When you specify an Availability Zone group in a Spot Instance request, + // all Spot instances in the request are launched in the same Availability Zone. + // Instance proximity is maintained with this parameter, but the choice of Availability + // Zone is not. The group applies only to bids for Spot Instances of the same + // instance type. Any additional Spot instance requests that are specified with + // the same Availability Zone group name are launched in that same Availability + // Zone, as long as at least one instance from the group is still active. + // + // If there is no active instance running in the Availability Zone group that + // you specify for a new Spot instance request (all instances are terminated, + // the bid is expired, or the bid falls below current market), then Amazon EC2 + // launches the instance in any Availability Zone where the constraint can be + // met. Consequently, the subsequent set of Spot instances could be placed in + // a different zone from the original request, even if you specified the same + // Availability Zone group. + // + // Default: Instances are launched in any available Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The required duration for the Spot instances, in minutes. This value must + // be a multiple of 60 (60, 120, 180, 240, 300, or 360). + // + // The duration period starts as soon as your Spot instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot instance + // for termination and provides a Spot instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // Note that you can't specify an Availability Zone group or a launch group + // if you specify a duration. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of Spot instances to launch. + // + // Default: 1 + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + // + // Default: Instances are launched and terminated individually + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Describes the launch specification for an instance. + LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"` + + // The maximum hourly price (bid) for any Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The Spot instance request type. + // + // Default: one-time + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request. If this is a one-time request, the request + // becomes active at this date and time and remains active until all instances + // launch, the request expires, or the request is canceled. If the request is + // persistent, the request becomes active at this date and time and remains + // active until it expires or is canceled. + // + // Default: The request is effective indefinitely. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request. If this is a one-time request, the request remains + // active until all instances launch, the request is canceled, or this date + // is reached. If the request is persistent, it remains active until it is canceled + // or this date and time is reached. + // + // Default: The request is effective indefinitely. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s RequestSpotInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of RequestSpotInstances. +type RequestSpotInstancesOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RequestSpotInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesOutput) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type RequestSpotLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"NetworkInterface" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s RequestSpotLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotLaunchSpecification) GoString() string { + return s.String() +} + +// Describes a reservation. +type Reservation struct { + _ struct{} `type:"structure"` + + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // One or more instances. + Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The ID of the AWS account that owns the reservation. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the requester that launched the instances on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // The ID of the reservation. + ReservationId *string `locationName:"reservationId" type:"string"` +} + +// String returns the string representation +func (s Reservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Reservation) GoString() string { + return s.String() +} + +// Describes the limit price of a Reserved Instance offering. +type ReservedInstanceLimitPrice struct { + _ struct{} `type:"structure"` + + // Used for Reserved Instance Marketplace offerings. Specifies the limit price + // on the total order (instanceCount * price). + Amount *float64 `locationName:"amount" type:"double"` + + // The currency in which the limitPrice amount is specified. At this time, the + // only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` +} + +// String returns the string representation +func (s ReservedInstanceLimitPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstanceLimitPrice) GoString() string { + return s.String() +} + +// Describes a Reserved Instance. +type ReservedInstances struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance. It's specified using ISO 4217 standard + // currency codes. At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The time when the Reserved Instance expires. + End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The number of reservations purchased. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The date and time the Reserved Instance started. + Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the Reserved Instance purchase. + State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstances) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstances) GoString() string { + return s.String() +} + +// Describes the configuration settings for the modified Reserved Instances. +type ReservedInstancesConfiguration struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the modified Reserved Instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of modified Reserved Instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type for the modified Reserved Instances. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The network platform of the modified Reserved Instances, which is either + // EC2-Classic or EC2-VPC. + Platform *string `locationName:"platform" type:"string"` +} + +// String returns the string representation +func (s ReservedInstancesConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesConfiguration) GoString() string { + return s.String() +} + +// Describes the ID of a Reserved Instance. +type ReservedInstancesId struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s ReservedInstancesId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesId) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing. +type ReservedInstancesListing struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time the listing was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The number of instances in this state. + InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"` + + // The price of the Reserved Instance listing. + PriceSchedules []*PriceSchedule `locationName:"priceSchedules" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` + + // The status of the Reserved Instance listing. + Status *string `locationName:"status" type:"string" enum:"ListingStatus"` + + // The reason for the current status of the Reserved Instance listing. The response + // can be blank. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The last modified timestamp of the listing. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ReservedInstancesListing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesListing) GoString() string { + return s.String() +} + +// Describes a Reserved Instance modification. +type ReservedInstancesModification struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time when the modification request was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The time for the modification to become effective. + EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp" timestampFormat:"iso8601"` + + // Contains target configurations along with their corresponding new Reserved + // Instance IDs. + ModificationResults []*ReservedInstancesModificationResult `locationName:"modificationResultSet" locationNameList:"item" type:"list"` + + // The IDs of one or more Reserved Instances. + ReservedInstancesIds []*ReservedInstancesId `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` + + // A unique ID for the Reserved Instance modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` + + // The status of the Reserved Instances modification request. + Status *string `locationName:"status" type:"string"` + + // The reason for the status. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The time when the modification request was last updated. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ReservedInstancesModification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModification) GoString() string { + return s.String() +} + +type ReservedInstancesModificationResult struct { + _ struct{} `type:"structure"` + + // The ID for the Reserved Instances that were created as part of the modification + // request. This field is only available when the modification is fulfilled. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The target Reserved Instances configurations supplied as part of the modification + // request. + TargetConfiguration *ReservedInstancesConfiguration `locationName:"targetConfiguration" type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesModificationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModificationResult) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type ReservedInstancesOffering struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance offering you are purchasing. It's specified + // using ISO 4217 standard currency codes. At this time, the only supported + // currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether the offering is available through the Reserved Instance + // Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, + // this is true. + Marketplace *bool `locationName:"marketplace" type:"boolean"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The pricing details of the Reserved Instance offering. + PricingDetails []*PricingDetail `locationName:"pricingDetailsSet" locationNameList:"item" type:"list"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance offering. + ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesOffering) GoString() string { + return s.String() +} + +type ResetImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset (currently you can only reset the launch permission + // attribute). + Attribute *string `type:"string" required:"true" enum:"ResetImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeInput) GoString() string { + return s.String() +} + +type ResetImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeOutput) GoString() string { + return s.String() +} + +type ResetInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeInput) GoString() string { + return s.String() +} + +type ResetInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeOutput) GoString() string { + return s.String() +} + +type ResetNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The source/destination checking attribute. Resets the value to true. + SourceDestCheck *string `locationName:"sourceDestCheck" type:"string"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type ResetNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type ResetSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. Currently, only the attribute for permission to create + // volumes can be reset. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeInput) GoString() string { + return s.String() +} + +type ResetSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type RestoreAddressToClassicInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreAddressToClassicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicInput) GoString() string { + return s.String() +} + +type RestoreAddressToClassicOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The move status for the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s RestoreAddressToClassicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicOutput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. We recommend that you specify the CIDR range in + // a set of IP permissions instead. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name or number. We recommend that you specify the protocol + // in a set of IP permissions instead. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To revoke outbound access to a + // destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To revoke outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a security group in a nondefault + // VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. You can't specify a source security group and a + // CIDR IP address range. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic, default VPC] The AWS account ID of the source security group. + // For EC2-VPC, the source security group must be in the same VPC. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the IP protocol, the start of the port range, and + // the end of the port range. To revoke a specific rule for an IP protocol and + // port range, use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes a route in a route table. +type Route struct { + _ struct{} `type:"structure"` + + // The CIDR block used for the destination match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The prefix of the AWS service. + DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"` + + // The ID of a gateway attached to your VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Describes how the route was created. + // + // CreateRouteTable indicates that route was automatically created when the + // route table was created. CreateRoute indicates that the route was manually + // added to the route table. EnableVgwRoutePropagation indicates that the route + // was propagated by route propagation. + Origin *string `locationName:"origin" type:"string" enum:"RouteOrigin"` + + // The state of the route. The blackhole state indicates that the route's target + // isn't available (for example, the specified gateway isn't attached to the + // VPC, or the specified NAT instance has been terminated). + State *string `locationName:"state" type:"string" enum:"RouteState"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s Route) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Route) GoString() string { + return s.String() +} + +// Describes a route table. +type RouteTable struct { + _ struct{} `type:"structure"` + + // The associations between the route table and one or more subnets. + Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // Any virtual private gateway (VGW) propagating routes. + PropagatingVgws []*PropagatingVgw `locationName:"propagatingVgwSet" locationNameList:"item" type:"list"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The routes in the route table. + Routes []*Route `locationName:"routeSet" locationNameList:"item" type:"list"` + + // Any tags assigned to the route table. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s RouteTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTable) GoString() string { + return s.String() +} + +// Describes an association between a route table and a subnet. +type RouteTableAssociation struct { + _ struct{} `type:"structure"` + + // Indicates whether this is the main route table. + Main *bool `locationName:"main" type:"boolean"` + + // The ID of the association between a route table and a subnet. + RouteTableAssociationId *string `locationName:"routeTableAssociationId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The ID of the subnet. A subnet ID is not returned for an implicit association. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s RouteTableAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTableAssociation) GoString() string { + return s.String() +} + +type RunInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The block device mapping. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Maximum 64 ASCII characters + ClientToken *string `locationName:"clientToken" type:"string"` + + // If you set this parameter to true, you can't terminate the instance using + // the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this + // parameter to true and then later want to be able to terminate the instance, + // you must first change the value of the disableApiTermination attribute to + // false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior + // to terminate, you can terminate the instance by running the shutdown command + // from the instance. + // + // Default: false + DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI, which you can get by calling DescribeImages. + ImageId *string `type:"string" required:"true"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + // + // Default: stop + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: m1.small + InstanceType *string `type:"string" enum:"InstanceType"` + + // The ID of the kernel. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + KernelId *string `type:"string"` + + // The name of the key pair. You can create a key pair using CreateKeyPair or + // ImportKeyPair. + // + // If you do not specify a key pair, you can't connect to the instance unless + // you choose an AMI that is configured to allow users another way to log in. + KeyName *string `type:"string"` + + // The maximum number of instances to launch. If you specify more instances + // than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches + // the largest possible number of instances above MinCount. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + MaxCount *int64 `type:"integer" required:"true"` + + // The minimum number of instances to launch. If you specify a minimum that + // is more instances than Amazon EC2 can launch in the target Availability Zone, + // Amazon EC2 launches no instances. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + MinCount *int64 `type:"integer" required:"true"` + + // The monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"` + + // The placement for the instance. + Placement *Placement `type:"structure"` + + // [EC2-VPC] The primary IP address. You must specify a value from the IP address + // range of the subnet. + // + // Only one private IP address can be designated as primary. Therefore, you + // can't specify this parameter if PrivateIpAddresses.n.Primary is set to true + // and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address. + // + // Default: We select an IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The ID of the RAM disk. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + RamdiskId *string `type:"string"` + + // One or more security group IDs. You can create a security group using CreateSecurityGroup. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // [EC2-Classic, default VPC] One or more security group names. For a nondefault + // VPC, you must use security group IDs instead. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` + + // [EC2-VPC] The ID of the subnet to launch the instance into. + SubnetId *string `type:"string"` + + // Data to configure the instance, or a script to run during instance launch. + // For more information, see Running Commands on Your Linux Instance at Launch + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) (Linux) + // and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + // (Windows). For API calls, the text must be base64-encoded. Command line tools + // perform encoding for you. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s RunInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesInput) GoString() string { + return s.String() +} + +// Describes the monitoring for the instance. +type RunInstancesMonitoringEnabled struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled for the instance. + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s RunInstancesMonitoringEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesMonitoringEnabled) GoString() string { + return s.String() +} + +// Contains the parameters for RunScheduledInstances. +type RunScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The number of instances. + // + // Default: 1 + InstanceCount *int64 `type:"integer"` + + // The launch specification. + LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RunScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of RunScheduledInstances. +type RunScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the newly launched instances. + InstanceIdSet []*string `locationName:"instanceIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RunScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesOutput) GoString() string { + return s.String() +} + +// Describes the storage parameters for S3 and S3 buckets for an instance store-backed +// AMI. +type S3Storage struct { + _ struct{} `type:"structure"` + + // The access key ID of the owner of the bucket. Before you specify a value + // for your access key ID, review and follow the guidance in Best Practices + // for Managing AWS Access Keys (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + AWSAccessKeyId *string `type:"string"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Bucket *string `locationName:"bucket" type:"string"` + + // The beginning of the file name of the AMI. + Prefix *string `locationName:"prefix" type:"string"` + + // A Base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission + // to upload items into Amazon S3 on your behalf. + UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"` + + // The signature of the Base64 encoded JSON document. + UploadPolicySignature *string `locationName:"uploadPolicySignature" type:"string"` +} + +// String returns the string representation +func (s S3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Storage) GoString() string { + return s.String() +} + +// Describes a Scheduled Instance. +type ScheduledInstance struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The date when the Scheduled Instance was purchased. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The number of instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The time for the next schedule to start. + NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The time that the previous schedule ended or will end. + PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp" timestampFormat:"iso8601"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `locationName:"scheduledInstanceId" type:"string"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The end date for the Scheduled Instance. + TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp" timestampFormat:"iso8601"` + + // The start date for the Scheduled Instance. + TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp" timestampFormat:"iso8601"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstance) GoString() string { + return s.String() +} + +// Describes a schedule that is available for your Scheduled Instances. +type ScheduledInstanceAvailability struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of available instances. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The time period for the first schedule to start. + FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance type. You can specify one of the C3, C4, M4, or R3 instance + // types. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The maximum term. The only possible value is 365 days. + MaxTermDurationInDays *int64 `locationName:"maxTermDurationInDays" type:"integer"` + + // The minimum term. The only possible value is 365 days. + MinTermDurationInDays *int64 `locationName:"minTermDurationInDays" type:"integer"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The purchase token. This token expires in two hours. + PurchaseToken *string `locationName:"purchaseToken" type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstanceAvailability) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceAvailability) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrence struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `locationName:"frequency" type:"string"` + + // The interval quantity. The interval unit depends on the value of frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `locationName:"interval" type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). + OccurrenceDaySet []*int64 `locationName:"occurrenceDaySet" locationNameList:"item" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. + OccurrenceRelativeToEnd *bool `locationName:"occurrenceRelativeToEnd" type:"boolean"` + + // The unit for occurrenceDaySet (DayOfWeek or DayOfMonth). + OccurrenceUnit *string `locationName:"occurrenceUnit" type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrence) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrence) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrenceRequest struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `type:"string"` + + // The interval quantity. The interval unit depends on the value of Frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). You can't specify this value with a daily schedule. If the occurrence + // is relative to the end of the month, you can specify only a single day. + OccurrenceDays []*int64 `locationName:"OccurrenceDay" locationNameList:"OccurenceDay" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. You can't specify this value with a daily schedule. + OccurrenceRelativeToEnd *bool `type:"boolean"` + + // The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required + // for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. + // You can't specify this value with a daily schedule. + OccurrenceUnit *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrenceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrenceRequest) GoString() string { + return s.String() +} + +// Describes a block device mapping for a Scheduled Instance. +type ScheduledInstancesBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `type:"string"` + + // Parameters used to set up EBS volumes automatically when the instance is + // launched. + Ebs *ScheduledInstancesEbs `type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with two available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes an EBS volume for a Scheduled Instance. +type ScheduledInstancesEbs struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume is encrypted. You can attached encrypted volumes + // only to instances that support them. + Encrypted *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information about General Purpose (SSD) + // baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `type:"string"` + + // The size of the volume, in GiB. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesEbs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesEbs) GoString() string { + return s.String() +} + +// Describes an IAM instance profile for a Scheduled Instance. +type ScheduledInstancesIamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). + Arn *string `type:"string"` + + // The name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesIamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesIamInstanceProfile) GoString() string { + return s.String() +} + +// Describes the launch specification for a Scheduled Instance. +type ScheduledInstancesLaunchSpecification struct { + _ struct{} `type:"structure"` + + // One or more block device mapping entries. + BlockDeviceMappings []*ScheduledInstancesBlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *ScheduledInstancesIamInstanceProfile `type:"structure"` + + // The ID of the Amazon Machine Image (AMI). + ImageId *string `type:"string" required:"true"` + + // The instance type. + InstanceType *string `type:"string"` + + // The ID of the kernel. + KernelId *string `type:"string"` + + // The name of the key pair. + KeyName *string `type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *ScheduledInstancesMonitoring `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*ScheduledInstancesNetworkInterface `locationName:"NetworkInterface" locationNameList:"NetworkInterface" type:"list"` + + // The placement information. + Placement *ScheduledInstancesPlacement `type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `type:"string"` + + // The IDs of one or more security groups. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `type:"string"` + + // The base64-encoded MIME user data. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesLaunchSpecification) GoString() string { + return s.String() +} + +// Describes whether monitoring is enabled for a Scheduled Instance. +type ScheduledInstancesMonitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ScheduledInstancesMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface for a Scheduled Instance. +type ScheduledInstancesNetworkInterface struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IP address to instances launched in + // a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `type:"boolean"` + + // Indicates whether to delete the interface when the instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // The description. + Description *string `type:"string"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `type:"integer"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"Group" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `type:"string"` + + // The private IP addresses. + PrivateIpAddressConfigs []*ScheduledInstancesPrivateIpAddressConfig `locationName:"PrivateIpAddressConfig" locationNameList:"PrivateIpAddressConfigSet" type:"list"` + + // The number of secondary private IP addresses. + SecondaryPrivateIpAddressCount *int64 `type:"integer"` + + // The ID of the subnet. + SubnetId *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesNetworkInterface) GoString() string { + return s.String() +} + +// Describes the placement for a Scheduled Instance. +type ScheduledInstancesPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `type:"string"` + + // The name of the placement group. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPlacement) GoString() string { + return s.String() +} + +// Describes a private IP address for a Scheduled Instance. +type ScheduledInstancesPrivateIpAddressConfig struct { + _ struct{} `type:"structure"` + + // Indicates whether this is a primary IP address. Otherwise, this is a secondary + // IP address. + Primary *bool `type:"boolean"` + + // The IP address. + PrivateIpAddress *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) GoString() string { + return s.String() +} + +// Describes a security group +type SecurityGroup struct { + _ struct{} `type:"structure"` + + // A description of the security group. + Description *string `locationName:"groupDescription" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // One or more inbound rules associated with the security group. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // [EC2-VPC] One or more outbound rules associated with the security group. + IpPermissionsEgress []*IpPermission `locationName:"ipPermissionsEgress" locationNameList:"item" type:"list"` + + // The AWS account ID of the owner of the security group. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // [EC2-VPC] The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroup) GoString() string { + return s.String() +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +// The time period must span less than one day. +type SlotDateTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. This + // value must be later than or equal to the earliest date and at most three + // months in the future. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s SlotDateTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotDateTimeRangeRequest) GoString() string { + return s.String() +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +type SlotStartTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SlotStartTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotStartTimeRangeRequest) GoString() string { + return s.String() +} + +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The data encryption key identifier for the snapshot. This value is a unique + // identifier that corresponds to the data encryption key that was used to encrypt + // the original volume or snapshot copy. Because data encryption keys are inherited + // by volumes created from snapshots, and vice versa, if snapshots share the + // same data encryption key identifier, then they belong to the same volume/snapshot + // lineage. This parameter is only returned by the DescribeSnapshots API operation. + DataEncryptionKeyId *string `locationName:"dataEncryptionKeyId" type:"string"` + + // The description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the parent + // volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The AWS account alias (for example, amazon, self) or AWS account ID that + // owns the snapshot. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The AWS account ID of the EBS snapshot owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The progress of the snapshot, as a percentage. + Progress *string `locationName:"progress" type:"string"` + + // The ID of the snapshot. Each snapshot receives a unique identifier when it + // is created. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The time stamp when the snapshot was initiated. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The snapshot state. + State *string `locationName:"status" type:"string" enum:"SnapshotState"` + + // Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy + // operation fails (for example, if the proper AWS Key Management Service (AWS + // KMS) permissions are not obtained) this field displays error state details + // to help you diagnose why the error occurred. This parameter is only returned + // by the DescribeSnapshots API operation. + StateMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume that was used to create the snapshot. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The size of the volume, in GiB. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Describes the snapshot created from the imported disk. +type SnapshotDetail struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // The block device mapping for the snapshot. + DeviceName *string `locationName:"deviceName" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of progress for the task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status of the snapshot creation. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the snapshot creation. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL used to access the disk image. + Url *string `locationName:"url" type:"string"` + + // Describes the S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDetail) GoString() string { + return s.String() +} + +// The disk container object for the import snapshot request. +type SnapshotDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image being imported. + Description *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. It can either be + // a https URL (https://..) or an Amazon S3 URL (s3://..). + Url *string `type:"string"` + + // Describes the S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s SnapshotDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDiskContainer) GoString() string { + return s.String() +} + +// Details about the import snapshot task. +type SnapshotTaskDetail struct { + _ struct{} `type:"structure"` + + // The description of the snapshot. + Description *string `locationName:"description" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of completion for the import snapshot task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status for the import snapshot task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the import snapshot task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL of the disk image from which the snapshot is created. + Url *string `locationName:"url" type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotTaskDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotTaskDetail) GoString() string { + return s.String() +} + +// Describes the data feed for a Spot instance. +type SpotDatafeedSubscription struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket where the Spot instance data feed is located. + Bucket *string `locationName:"bucket" type:"string"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The AWS account ID of the account. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The prefix that is prepended to data feed files. + Prefix *string `locationName:"prefix" type:"string"` + + // The state of the Spot instance data feed subscription. + State *string `locationName:"state" type:"string" enum:"DatafeedSubscriptionState"` +} + +// String returns the string representation +func (s SpotDatafeedSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotDatafeedSubscription) GoString() string { + return s.String() +} + +// Describes the launch specification for one or more Spot instances. +type SpotFleetLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The bid price per unit hour for the specified instance type. If this value + // is not specified, the default is the Spot bid price specified for the fleet. + // To determine the bid price per unit hour, divide the Spot bid price by the + // value of WeightedCapacity. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The ID of the subnet in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` + + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms (instances + // or a performance characteristic such as vCPUs, memory, or I/O). + // + // If the target capacity divided by this value is not a whole number, we round + // the number of instances to the next whole number. If this value is not specified, + // the default is 1. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` +} + +// String returns the string representation +func (s SpotFleetLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetLaunchSpecification) GoString() string { + return s.String() +} + +// Describes whether monitoring is enabled. +type SpotFleetMonitoring struct { + _ struct{} `type:"structure"` + + // Enables monitoring for the instance. + // + // Default: false + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s SpotFleetMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetMonitoring) GoString() string { + return s.String() +} + +// Describes a Spot fleet request. +type SpotFleetRequestConfig struct { + _ struct{} `type:"structure"` + + // The creation date and time of the request. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Information about the configuration of the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The state of the Spot fleet request. + SpotFleetRequestState *string `locationName:"spotFleetRequestState" type:"string" required:"true" enum:"BatchState"` +} + +// String returns the string representation +func (s SpotFleetRequestConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfig) GoString() string { + return s.String() +} + +// Describes the configuration of a Spot fleet request. +type SpotFleetRequestConfigData struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target capacity across the Spot pools specified + // by the Spot fleet request. The default is lowestPrice. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` + + // A unique, case-sensitive identifier you provide to ensure idempotency of + // your listings. This helps avoid duplicate listings. For more information, + // see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // Grants the Spot fleet permission to terminate Spot instances on your behalf + // when you cancel its Spot fleet request using CancelSpotFleetRequests or when + // the Spot fleet request expires, if you set terminateInstancesWithExpiration. + IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` + + // Information about the launch specifications for the Spot fleet request. + LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" min:"1" type:"list" required:"true"` + + // The bid price per unit hour. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The number of units to request. You can choose to set the target capacity + // in terms of instances or a performance characteristic that is important to + // your application workload, such as vCPUs, memory, or I/O. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"` + + // Indicates whether running Spot instances should be terminated when the Spot + // fleet request expires. + TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new Spot instance requests are placed or enabled to fulfill + // the request. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotFleetRequestConfigData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfigData) GoString() string { + return s.String() +} + +// Describes a Spot instance request. +type SpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // If you specified a duration and your Spot instance request was fulfilled, + // this is the fixed hourly price in effect for the Spot instance while it runs. + ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"` + + // The Availability Zone group. If you specify the same Availability Zone group + // for all Spot instance requests, all Spot instances are launched in the same + // Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The duration for the Spot instance, in minutes. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // The date and time when the Spot instance request was created, in UTC format + // (for example, YYYY-MM-DDTHH:MM:SSZ). + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The instance ID, if an instance has been launched to fulfill the Spot instance + // request. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Additional information for launching instances. + LaunchSpecification *LaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The Availability Zone in which the bid is launched. + LaunchedAvailabilityZone *string `locationName:"launchedAvailabilityZone" type:"string"` + + // The product description associated with the Spot instance. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The maximum hourly price (bid) for the Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The state of the Spot instance request. Spot bid status information can help + // you track your Spot instance requests. For more information, see Spot Bid + // Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` + + // The status code and status message describing the Spot instance request. + Status *SpotInstanceStatus `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The Spot instance request type. + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The request becomes active at this date and time. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // If this is a one-time request, it remains active until all instances launch, + // the request is canceled, or this date is reached. If the request is persistent, + // it remains active until it is canceled or this date is reached. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes a Spot instance state change. +type SpotInstanceStateFault struct { + _ struct{} `type:"structure"` + + // The reason code for the Spot instance state change. + Code *string `locationName:"code" type:"string"` + + // The message for the Spot instance state change. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s SpotInstanceStateFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStateFault) GoString() string { + return s.String() +} + +// Describes the status of a Spot instance request. +type SpotInstanceStatus struct { + _ struct{} `type:"structure"` + + // The status code. For a list of status codes, see Spot Bid Status Codes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // in the Amazon Elastic Compute Cloud User Guide. + Code *string `locationName:"code" type:"string"` + + // The description for the status code. + Message *string `locationName:"message" type:"string"` + + // The date and time of the most recent status update, in UTC format (for example, + // YYYY-MM-DDTHH:MM:SSZ). + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotInstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStatus) GoString() string { + return s.String() +} + +// Describes Spot instance placement. +type SpotPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zones. To specify multiple Availability Zones, separate + // them using commas; for example, "us-west-2a, us-west-2b". + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group (for cluster instances). + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s SpotPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPlacement) GoString() string { + return s.String() +} + +// Describes the maximum hourly price (bid) for any Spot instance launched to +// fulfill the request. +type SpotPrice struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // A general description of the AMI. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The maximum price (bid) that you are willing to pay for a Spot instance. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPrice) GoString() string { + return s.String() +} + +type StartInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesInput) GoString() string { + return s.String() +} + +type StartInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more started instances. + StartingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StartInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesOutput) GoString() string { + return s.String() +} + +// Describes a state change. +type StateReason struct { + _ struct{} `type:"structure"` + + // The reason code for the state change. + Code *string `locationName:"code" type:"string"` + + // The message for the state change. + // + // Server.SpotInstanceTermination: A Spot instance was terminated due to an + // increase in the market price. + // + // Server.InternalError: An internal error occurred during instance launch, + // resulting in termination. + // + // Server.InsufficientInstanceCapacity: There was insufficient instance capacity + // to satisfy the launch request. + // + // Client.InternalError: A client error caused the instance to terminate on + // launch. + // + // Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown + // -h command from the instance. + // + // Client.UserInitiatedShutdown: The instance was shut down using the Amazon + // EC2 API. + // + // Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total + // storage was exceeded. Decrease usage or request an increase in your limits. + // + // Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StateReason) GoString() string { + return s.String() +} + +type StopInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces the instances to stop. The instances do not have an opportunity to + // flush file system caches or file system metadata. If you use this option, + // you must perform file system check and repair procedures. This option is + // not recommended for Windows instances. + // + // Default: false + Force *bool `locationName:"force" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StopInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesInput) GoString() string { + return s.String() +} + +type StopInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more stopped instances. + StoppingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StopInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesOutput) GoString() string { + return s.String() +} + +// Describes the storage location for an instance store-backed AMI. +type Storage struct { + _ struct{} `type:"structure"` + + // An Amazon S3 storage location. + S3 *S3Storage `type:"structure"` +} + +// String returns the string representation +func (s Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Storage) GoString() string { + return s.String() +} + +// Describes a subnet. +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the subnet. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of unused IP addresses in the subnet. Note that the IP addresses + // for any stopped instances are considered unavailable. + AvailableIpAddressCount *int64 `locationName:"availableIpAddressCount" type:"integer"` + + // The CIDR block assigned to the subnet. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether this is the default subnet for the Availability Zone. + DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"` + + // Indicates whether instances launched in this subnet receive a public IP address. + MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"` + + // The current state of the subnet. + State *string `locationName:"state" type:"string" enum:"SubnetState"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the subnet. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC the subnet is in. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Describes a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode + // characters. May not begin with aws: + Key *string `locationName:"key" type:"string"` + + // The value of the tag. + // + // Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode + // characters. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Describes a tag. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `locationName:"key" type:"string"` + + // The ID of the resource. For example, ami-1a2b3c4d. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tag value. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +type TerminateInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesInput) GoString() string { + return s.String() +} + +type TerminateInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more terminated instances. + TerminatingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s TerminateInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesOutput) GoString() string { + return s.String() +} + +type UnassignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The secondary private IP addresses to unassign from the network interface. + // You can specify this option multiple times to unassign more than one IP address. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +type UnassignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +type UnmonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnmonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesInput) GoString() string { + return s.String() +} + +type UnmonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s UnmonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesOutput) GoString() string { + return s.String() +} + +// Information about items that were not successfully processed in a batch call. +type UnsuccessfulItem struct { + _ struct{} `type:"structure"` + + // Information about the error. + Error *UnsuccessfulItemError `locationName:"error" type:"structure" required:"true"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` +} + +// String returns the string representation +func (s UnsuccessfulItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItem) GoString() string { + return s.String() +} + +// Information about the error that occurred. For more information about errors, +// see Error Codes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). +type UnsuccessfulItemError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" required:"true"` + + // The error message accompanying the error code. + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsuccessfulItemError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItemError) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucket struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket where the disk image is located. + S3Bucket *string `type:"string"` + + // The key for the disk image. + S3Key *string `type:"string"` +} + +// String returns the string representation +func (s UserBucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucket) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucketDetails struct { + _ struct{} `type:"structure"` + + // The S3 bucket from which the disk image was created. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The key from which the disk image was created. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s UserBucketDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucketDetails) GoString() string { + return s.String() +} + +// Describes the user data to be made available to an instance. +type UserData struct { + _ struct{} `type:"structure"` + + // The Base64-encoded MIME user data for the instance. + Data *string `locationName:"data" type:"string"` +} + +// String returns the string representation +func (s UserData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserData) GoString() string { + return s.String() +} + +// Describes a security group and AWS account ID pair. +type UserIdGroupPair struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. In a request, use this parameter for a security + // group in EC2-Classic or a default VPC only. For a security group in a nondefault + // VPC, use GroupId. + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of an AWS account. EC2-Classic only. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s UserIdGroupPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserIdGroupPair) GoString() string { + return s.String() +} + +// Describes telemetry for a VPN tunnel. +type VgwTelemetry struct { + _ struct{} `type:"structure"` + + // The number of accepted routes. + AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + + // The date and time of the last change in status. + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"iso8601"` + + // The Internet-routable IP address of the virtual private gateway's outside + // interface. + OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"` + + // The status of the VPN tunnel. + Status *string `locationName:"status" type:"string" enum:"TelemetryStatus"` + + // If an error occurs, a description of the error. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s VgwTelemetry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VgwTelemetry) GoString() string { + return s.String() +} + +// Describes a volume. +type Volume struct { + _ struct{} `type:"structure"` + + // Information about the volume attachments. + Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The Availability Zone for the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time stamp when volume creation was initiated. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume will be encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information on General Purpose (SSD) baseline + // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The size of the volume, in GiBs. + Size *int64 `locationName:"size" type:"integer"` + + // The snapshot from which the volume was created, if applicable. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The volume state. + State *string `locationName:"status" type:"string" enum:"VolumeState"` + + // Any tags assigned to the volume. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for + // Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes. + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Describes volume attachment details. +type VolumeAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device name. + Device *string `locationName:"device" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The attachment state of the volume. + State *string `locationName:"status" type:"string" enum:"VolumeAttachmentState"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s VolumeAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeAttachment) GoString() string { + return s.String() +} + +// Describes an EBS volume. +type VolumeDetail struct { + _ struct{} `type:"structure"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` +} + +// String returns the string representation +func (s VolumeDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeDetail) GoString() string { + return s.String() +} + +// Describes a volume status operation code. +type VolumeStatusAction struct { + _ struct{} `type:"structure"` + + // The code identifying the operation, for example, enable-volume-io. + Code *string `locationName:"code" type:"string"` + + // A description of the operation. + Description *string `locationName:"description" type:"string"` + + // The ID of the event associated with this operation. + EventId *string `locationName:"eventId" type:"string"` + + // The event type associated with this operation. + EventType *string `locationName:"eventType" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusAction) GoString() string { + return s.String() +} + +// Describes a volume status. +type VolumeStatusDetails struct { + _ struct{} `type:"structure"` + + // The name of the volume status. + Name *string `locationName:"name" type:"string" enum:"VolumeStatusName"` + + // The intended status of the volume status. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusDetails) GoString() string { + return s.String() +} + +// Describes a volume status event. +type VolumeStatusEvent struct { + _ struct{} `type:"structure"` + + // A description of the event. + Description *string `locationName:"description" type:"string"` + + // The ID of this event. + EventId *string `locationName:"eventId" type:"string"` + + // The type of this event. + EventType *string `locationName:"eventType" type:"string"` + + // The latest end time of the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest start time of the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s VolumeStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of a volume. +type VolumeStatusInfo struct { + _ struct{} `type:"structure"` + + // The details of the volume status. + Details []*VolumeStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status of the volume. + Status *string `locationName:"status" type:"string" enum:"VolumeStatusInfoStatus"` +} + +// String returns the string representation +func (s VolumeStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusInfo) GoString() string { + return s.String() +} + +// Describes the volume status. +type VolumeStatusItem struct { + _ struct{} `type:"structure"` + + // The details of the operation. + Actions []*VolumeStatusAction `locationName:"actionsSet" locationNameList:"item" type:"list"` + + // The Availability Zone of the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A list of events associated with the volume. + Events []*VolumeStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The volume ID. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume status. + VolumeStatus *VolumeStatusInfo `locationName:"volumeStatus" type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusItem) GoString() string { + return s.String() +} + +// Describes a VPC. +type Vpc struct { + _ struct{} `type:"structure"` + + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The ID of the set of DHCP options you've associated with the VPC (or default + // if the default options are associated with the VPC). + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // The allowed tenancy of instances launched into the VPC. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // Indicates whether the VPC is the default VPC. + IsDefault *bool `locationName:"isDefault" type:"boolean"` + + // The current state of the VPC. + State *string `locationName:"state" type:"string" enum:"VpcState"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Vpc) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vpc) GoString() string { + return s.String() +} + +// Describes an attachment between a virtual private gateway and a VPC. +type VpcAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcAttachment) GoString() string { + return s.String() +} + +// Describes whether a VPC is enabled for ClassicLink. +type VpcClassicLink struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPC is enabled for ClassicLink. + ClassicLinkEnabled *bool `locationName:"classicLinkEnabled" type:"boolean"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcClassicLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcClassicLink) GoString() string { + return s.String() +} + +// Describes a VPC endpoint. +type VpcEndpoint struct { + _ struct{} `type:"structure"` + + // The date and time the VPC endpoint was created. + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp" timestampFormat:"iso8601"` + + // The policy document associated with the endpoint. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // One or more route tables associated with the endpoint. + RouteTableIds []*string `locationName:"routeTableIdSet" locationNameList:"item" type:"list"` + + // The name of the AWS service to which the endpoint is associated. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The state of the VPC endpoint. + State *string `locationName:"state" type:"string" enum:"State"` + + // The ID of the VPC endpoint. + VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` + + // The ID of the VPC to which the endpoint is associated. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcEndpoint) GoString() string { + return s.String() +} + +// Describes a VPC peering connection. +type VpcPeeringConnection struct { + _ struct{} `type:"structure"` + + // The information of the peer VPC. + AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` + + // The time that an unaccepted VPC peering connection will expire. + ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"` + + // The information of the requester VPC. + RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"` + + // The status of the VPC peering connection. + Status *VpcPeeringConnectionStateReason `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnection) GoString() string { + return s.String() +} + +// Describes the status of a VPC peering connection. +type VpcPeeringConnectionStateReason struct { + _ struct{} `type:"structure"` + + // The status of the VPC peering connection. + Code *string `locationName:"code" type:"string" enum:"VpcPeeringConnectionStateReasonCode"` + + // A message that provides more information about the status, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionStateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionStateReason) GoString() string { + return s.String() +} + +// Describes a VPC in a VPC peering connection. +type VpcPeeringConnectionVpcInfo struct { + _ struct{} `type:"structure"` + + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The AWS account ID of the VPC owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionVpcInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionVpcInfo) GoString() string { + return s.String() +} + +// Describes a VPN connection. +type VpnConnection struct { + _ struct{} `type:"structure"` + + // The configuration information for the VPN connection's customer gateway (in + // the native XML format). This element is always present in the CreateVpnConnection + // response; however, it's present in the DescribeVpnConnections response only + // if the VPN connection is in the pending or available state. + CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string"` + + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The VPN connection options. + Options *VpnConnectionOptions `locationName:"options" type:"structure"` + + // The static routes associated with the VPN connection. + Routes []*VpnStaticRoute `locationName:"routes" locationNameList:"item" type:"list"` + + // The current state of the VPN connection. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the VPN connection. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Information about the VPN tunnel. + VgwTelemetry []*VgwTelemetry `locationName:"vgwTelemetry" locationNameList:"item" type:"list"` + + // The ID of the VPN connection. + VpnConnectionId *string `locationName:"vpnConnectionId" type:"string"` + + // The ID of the virtual private gateway at the AWS side of the VPN connection. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnection) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` +} + +// String returns the string representation +func (s VpnConnectionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptions) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptionsSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` +} + +// String returns the string representation +func (s VpnConnectionOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptionsSpecification) GoString() string { + return s.String() +} + +// Describes a virtual private gateway. +type VpnGateway struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the virtual private gateway was created, if applicable. + // This field may be empty or not returned. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The current state of the virtual private gateway. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the virtual private gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the virtual private gateway supports. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Any VPCs attached to the virtual private gateway. + VpcAttachments []*VpcAttachment `locationName:"attachments" locationNameList:"item" type:"list"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnGateway) GoString() string { + return s.String() +} + +// Describes a static route for a VPN connection. +type VpnStaticRoute struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer data center. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // Indicates how the routes were provided. + Source *string `locationName:"source" type:"string" enum:"VpnStaticRouteSource"` + + // The current state of the static route. + State *string `locationName:"state" type:"string" enum:"VpnState"` +} + +// String returns the string representation +func (s VpnStaticRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnStaticRoute) GoString() string { + return s.String() +} + +const ( + // @enum AccountAttributeName + AccountAttributeNameSupportedPlatforms = "supported-platforms" + // @enum AccountAttributeName + AccountAttributeNameDefaultVpc = "default-vpc" +) + +const ( + // @enum Affinity + AffinityDefault = "default" + // @enum Affinity + AffinityHost = "host" +) + +const ( + // @enum AllocationState + AllocationStateAvailable = "available" + // @enum AllocationState + AllocationStateUnderAssessment = "under-assessment" + // @enum AllocationState + AllocationStatePermanentFailure = "permanent-failure" + // @enum AllocationState + AllocationStateReleased = "released" + // @enum AllocationState + AllocationStateReleasedPermanentFailure = "released-permanent-failure" +) + +const ( + // @enum AllocationStrategy + AllocationStrategyLowestPrice = "lowestPrice" + // @enum AllocationStrategy + AllocationStrategyDiversified = "diversified" +) + +const ( + // @enum ArchitectureValues + ArchitectureValuesI386 = "i386" + // @enum ArchitectureValues + ArchitectureValuesX8664 = "x86_64" +) + +const ( + // @enum AttachmentStatus + AttachmentStatusAttaching = "attaching" + // @enum AttachmentStatus + AttachmentStatusAttached = "attached" + // @enum AttachmentStatus + AttachmentStatusDetaching = "detaching" + // @enum AttachmentStatus + AttachmentStatusDetached = "detached" +) + +const ( + // @enum AutoPlacement + AutoPlacementOn = "on" + // @enum AutoPlacement + AutoPlacementOff = "off" +) + +const ( + // @enum AvailabilityZoneState + AvailabilityZoneStateAvailable = "available" + // @enum AvailabilityZoneState + AvailabilityZoneStateInformation = "information" + // @enum AvailabilityZoneState + AvailabilityZoneStateImpaired = "impaired" + // @enum AvailabilityZoneState + AvailabilityZoneStateUnavailable = "unavailable" +) + +const ( + // @enum BatchState + BatchStateSubmitted = "submitted" + // @enum BatchState + BatchStateActive = "active" + // @enum BatchState + BatchStateCancelled = "cancelled" + // @enum BatchState + BatchStateFailed = "failed" + // @enum BatchState + BatchStateCancelledRunning = "cancelled_running" + // @enum BatchState + BatchStateCancelledTerminating = "cancelled_terminating" + // @enum BatchState + BatchStateModifying = "modifying" +) + +const ( + // @enum BundleTaskState + BundleTaskStatePending = "pending" + // @enum BundleTaskState + BundleTaskStateWaitingForShutdown = "waiting-for-shutdown" + // @enum BundleTaskState + BundleTaskStateBundling = "bundling" + // @enum BundleTaskState + BundleTaskStateStoring = "storing" + // @enum BundleTaskState + BundleTaskStateCancelling = "cancelling" + // @enum BundleTaskState + BundleTaskStateComplete = "complete" + // @enum BundleTaskState + BundleTaskStateFailed = "failed" +) + +const ( + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdMalformed = "fleetRequestIdMalformed" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestNotInCancellableState = "fleetRequestNotInCancellableState" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeUnexpectedError = "unexpectedError" +) + +const ( + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateActive = "active" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateOpen = "open" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateClosed = "closed" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCancelled = "cancelled" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCompleted = "completed" +) + +const ( + // @enum ContainerFormat + ContainerFormatOva = "ova" +) + +const ( + // @enum ConversionTaskState + ConversionTaskStateActive = "active" + // @enum ConversionTaskState + ConversionTaskStateCancelling = "cancelling" + // @enum ConversionTaskState + ConversionTaskStateCancelled = "cancelled" + // @enum ConversionTaskState + ConversionTaskStateCompleted = "completed" +) + +const ( + // @enum CurrencyCodeValues + CurrencyCodeValuesUsd = "USD" +) + +const ( + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateActive = "Active" + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateInactive = "Inactive" +) + +const ( + // @enum DeviceType + DeviceTypeEbs = "ebs" + // @enum DeviceType + DeviceTypeInstanceStore = "instance-store" +) + +const ( + // @enum DiskImageFormat + DiskImageFormatVmdk = "VMDK" + // @enum DiskImageFormat + DiskImageFormatRaw = "RAW" + // @enum DiskImageFormat + DiskImageFormatVhd = "VHD" +) + +const ( + // @enum DomainType + DomainTypeVpc = "vpc" + // @enum DomainType + DomainTypeStandard = "standard" +) + +const ( + // @enum EventCode + EventCodeInstanceReboot = "instance-reboot" + // @enum EventCode + EventCodeSystemReboot = "system-reboot" + // @enum EventCode + EventCodeSystemMaintenance = "system-maintenance" + // @enum EventCode + EventCodeInstanceRetirement = "instance-retirement" + // @enum EventCode + EventCodeInstanceStop = "instance-stop" +) + +const ( + // @enum EventType + EventTypeInstanceChange = "instanceChange" + // @enum EventType + EventTypeFleetRequestChange = "fleetRequestChange" + // @enum EventType + EventTypeError = "error" +) + +const ( + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyNoTermination = "noTermination" + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyDefault = "default" +) + +const ( + // @enum ExportEnvironment + ExportEnvironmentCitrix = "citrix" + // @enum ExportEnvironment + ExportEnvironmentVmware = "vmware" + // @enum ExportEnvironment + ExportEnvironmentMicrosoft = "microsoft" +) + +const ( + // @enum ExportTaskState + ExportTaskStateActive = "active" + // @enum ExportTaskState + ExportTaskStateCancelling = "cancelling" + // @enum ExportTaskState + ExportTaskStateCancelled = "cancelled" + // @enum ExportTaskState + ExportTaskStateCompleted = "completed" +) + +const ( + // @enum FlowLogsResourceType + FlowLogsResourceTypeVpc = "VPC" + // @enum FlowLogsResourceType + FlowLogsResourceTypeSubnet = "Subnet" + // @enum FlowLogsResourceType + FlowLogsResourceTypeNetworkInterface = "NetworkInterface" +) + +const ( + // @enum GatewayType + GatewayTypeIpsec1 = "ipsec.1" +) + +const ( + // @enum HostTenancy + HostTenancyDedicated = "dedicated" + // @enum HostTenancy + HostTenancyHost = "host" +) + +const ( + // @enum HypervisorType + HypervisorTypeOvm = "ovm" + // @enum HypervisorType + HypervisorTypeXen = "xen" +) + +const ( + // @enum ImageAttributeName + ImageAttributeNameDescription = "description" + // @enum ImageAttributeName + ImageAttributeNameKernel = "kernel" + // @enum ImageAttributeName + ImageAttributeNameRamdisk = "ramdisk" + // @enum ImageAttributeName + ImageAttributeNameLaunchPermission = "launchPermission" + // @enum ImageAttributeName + ImageAttributeNameProductCodes = "productCodes" + // @enum ImageAttributeName + ImageAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum ImageAttributeName + ImageAttributeNameSriovNetSupport = "sriovNetSupport" +) + +const ( + // @enum ImageState + ImageStatePending = "pending" + // @enum ImageState + ImageStateAvailable = "available" + // @enum ImageState + ImageStateInvalid = "invalid" + // @enum ImageState + ImageStateDeregistered = "deregistered" + // @enum ImageState + ImageStateTransient = "transient" + // @enum ImageState + ImageStateFailed = "failed" + // @enum ImageState + ImageStateError = "error" +) + +const ( + // @enum ImageTypeValues + ImageTypeValuesMachine = "machine" + // @enum ImageTypeValues + ImageTypeValuesKernel = "kernel" + // @enum ImageTypeValues + ImageTypeValuesRamdisk = "ramdisk" +) + +const ( + // @enum InstanceAttributeName + InstanceAttributeNameInstanceType = "instanceType" + // @enum InstanceAttributeName + InstanceAttributeNameKernel = "kernel" + // @enum InstanceAttributeName + InstanceAttributeNameRamdisk = "ramdisk" + // @enum InstanceAttributeName + InstanceAttributeNameUserData = "userData" + // @enum InstanceAttributeName + InstanceAttributeNameDisableApiTermination = "disableApiTermination" + // @enum InstanceAttributeName + InstanceAttributeNameInstanceInitiatedShutdownBehavior = "instanceInitiatedShutdownBehavior" + // @enum InstanceAttributeName + InstanceAttributeNameRootDeviceName = "rootDeviceName" + // @enum InstanceAttributeName + InstanceAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum InstanceAttributeName + InstanceAttributeNameProductCodes = "productCodes" + // @enum InstanceAttributeName + InstanceAttributeNameSourceDestCheck = "sourceDestCheck" + // @enum InstanceAttributeName + InstanceAttributeNameGroupSet = "groupSet" + // @enum InstanceAttributeName + InstanceAttributeNameEbsOptimized = "ebsOptimized" + // @enum InstanceAttributeName + InstanceAttributeNameSriovNetSupport = "sriovNetSupport" +) + +const ( + // @enum InstanceLifecycleType + InstanceLifecycleTypeSpot = "spot" +) + +const ( + // @enum InstanceStateName + InstanceStateNamePending = "pending" + // @enum InstanceStateName + InstanceStateNameRunning = "running" + // @enum InstanceStateName + InstanceStateNameShuttingDown = "shutting-down" + // @enum InstanceStateName + InstanceStateNameTerminated = "terminated" + // @enum InstanceStateName + InstanceStateNameStopping = "stopping" + // @enum InstanceStateName + InstanceStateNameStopped = "stopped" +) + +const ( + // @enum InstanceType + InstanceTypeT1Micro = "t1.micro" + // @enum InstanceType + InstanceTypeM1Small = "m1.small" + // @enum InstanceType + InstanceTypeM1Medium = "m1.medium" + // @enum InstanceType + InstanceTypeM1Large = "m1.large" + // @enum InstanceType + InstanceTypeM1Xlarge = "m1.xlarge" + // @enum InstanceType + InstanceTypeM3Medium = "m3.medium" + // @enum InstanceType + InstanceTypeM3Large = "m3.large" + // @enum InstanceType + InstanceTypeM3Xlarge = "m3.xlarge" + // @enum InstanceType + InstanceTypeM32xlarge = "m3.2xlarge" + // @enum InstanceType + InstanceTypeM4Large = "m4.large" + // @enum InstanceType + InstanceTypeM4Xlarge = "m4.xlarge" + // @enum InstanceType + InstanceTypeM42xlarge = "m4.2xlarge" + // @enum InstanceType + InstanceTypeM44xlarge = "m4.4xlarge" + // @enum InstanceType + InstanceTypeM410xlarge = "m4.10xlarge" + // @enum InstanceType + InstanceTypeT2Nano = "t2.nano" + // @enum InstanceType + InstanceTypeT2Micro = "t2.micro" + // @enum InstanceType + InstanceTypeT2Small = "t2.small" + // @enum InstanceType + InstanceTypeT2Medium = "t2.medium" + // @enum InstanceType + InstanceTypeT2Large = "t2.large" + // @enum InstanceType + InstanceTypeM2Xlarge = "m2.xlarge" + // @enum InstanceType + InstanceTypeM22xlarge = "m2.2xlarge" + // @enum InstanceType + InstanceTypeM24xlarge = "m2.4xlarge" + // @enum InstanceType + InstanceTypeCr18xlarge = "cr1.8xlarge" + // @enum InstanceType + InstanceTypeI2Xlarge = "i2.xlarge" + // @enum InstanceType + InstanceTypeI22xlarge = "i2.2xlarge" + // @enum InstanceType + InstanceTypeI24xlarge = "i2.4xlarge" + // @enum InstanceType + InstanceTypeI28xlarge = "i2.8xlarge" + // @enum InstanceType + InstanceTypeHi14xlarge = "hi1.4xlarge" + // @enum InstanceType + InstanceTypeHs18xlarge = "hs1.8xlarge" + // @enum InstanceType + InstanceTypeC1Medium = "c1.medium" + // @enum InstanceType + InstanceTypeC1Xlarge = "c1.xlarge" + // @enum InstanceType + InstanceTypeC3Large = "c3.large" + // @enum InstanceType + InstanceTypeC3Xlarge = "c3.xlarge" + // @enum InstanceType + InstanceTypeC32xlarge = "c3.2xlarge" + // @enum InstanceType + InstanceTypeC34xlarge = "c3.4xlarge" + // @enum InstanceType + InstanceTypeC38xlarge = "c3.8xlarge" + // @enum InstanceType + InstanceTypeC4Large = "c4.large" + // @enum InstanceType + InstanceTypeC4Xlarge = "c4.xlarge" + // @enum InstanceType + InstanceTypeC42xlarge = "c4.2xlarge" + // @enum InstanceType + InstanceTypeC44xlarge = "c4.4xlarge" + // @enum InstanceType + InstanceTypeC48xlarge = "c4.8xlarge" + // @enum InstanceType + InstanceTypeCc14xlarge = "cc1.4xlarge" + // @enum InstanceType + InstanceTypeCc28xlarge = "cc2.8xlarge" + // @enum InstanceType + InstanceTypeG22xlarge = "g2.2xlarge" + // @enum InstanceType + InstanceTypeCg14xlarge = "cg1.4xlarge" + // @enum InstanceType + InstanceTypeR3Large = "r3.large" + // @enum InstanceType + InstanceTypeR3Xlarge = "r3.xlarge" + // @enum InstanceType + InstanceTypeR32xlarge = "r3.2xlarge" + // @enum InstanceType + InstanceTypeR34xlarge = "r3.4xlarge" + // @enum InstanceType + InstanceTypeR38xlarge = "r3.8xlarge" + // @enum InstanceType + InstanceTypeD2Xlarge = "d2.xlarge" + // @enum InstanceType + InstanceTypeD22xlarge = "d2.2xlarge" + // @enum InstanceType + InstanceTypeD24xlarge = "d2.4xlarge" + // @enum InstanceType + InstanceTypeD28xlarge = "d2.8xlarge" +) + +const ( + // @enum ListingState + ListingStateAvailable = "available" + // @enum ListingState + ListingStateSold = "sold" + // @enum ListingState + ListingStateCancelled = "cancelled" + // @enum ListingState + ListingStatePending = "pending" +) + +const ( + // @enum ListingStatus + ListingStatusActive = "active" + // @enum ListingStatus + ListingStatusPending = "pending" + // @enum ListingStatus + ListingStatusCancelled = "cancelled" + // @enum ListingStatus + ListingStatusClosed = "closed" +) + +const ( + // @enum MonitoringState + MonitoringStateDisabled = "disabled" + // @enum MonitoringState + MonitoringStateDisabling = "disabling" + // @enum MonitoringState + MonitoringStateEnabled = "enabled" + // @enum MonitoringState + MonitoringStatePending = "pending" +) + +const ( + // @enum MoveStatus + MoveStatusMovingToVpc = "movingToVpc" + // @enum MoveStatus + MoveStatusRestoringToClassic = "restoringToClassic" +) + +const ( + // @enum NatGatewayState + NatGatewayStatePending = "pending" + // @enum NatGatewayState + NatGatewayStateFailed = "failed" + // @enum NatGatewayState + NatGatewayStateAvailable = "available" + // @enum NatGatewayState + NatGatewayStateDeleting = "deleting" + // @enum NatGatewayState + NatGatewayStateDeleted = "deleted" +) + +const ( + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeDescription = "description" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeGroupSet = "groupSet" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeSourceDestCheck = "sourceDestCheck" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeAttachment = "attachment" +) + +const ( + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAvailable = "available" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAttaching = "attaching" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusInUse = "in-use" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusDetaching = "detaching" +) + +const ( + // @enum NetworkInterfaceType + NetworkInterfaceTypeInterface = "interface" + // @enum NetworkInterfaceType + NetworkInterfaceTypeNatGateway = "natGateway" +) + +const ( + // @enum OfferingTypeValues + OfferingTypeValuesHeavyUtilization = "Heavy Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesMediumUtilization = "Medium Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesLightUtilization = "Light Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesNoUpfront = "No Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesPartialUpfront = "Partial Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesAllUpfront = "All Upfront" +) + +const ( + // @enum OperationType + OperationTypeAdd = "add" + // @enum OperationType + OperationTypeRemove = "remove" +) + +const ( + // @enum PermissionGroup + PermissionGroupAll = "all" +) + +const ( + // @enum PlacementGroupState + PlacementGroupStatePending = "pending" + // @enum PlacementGroupState + PlacementGroupStateAvailable = "available" + // @enum PlacementGroupState + PlacementGroupStateDeleting = "deleting" + // @enum PlacementGroupState + PlacementGroupStateDeleted = "deleted" +) + +const ( + // @enum PlacementStrategy + PlacementStrategyCluster = "cluster" +) + +const ( + // @enum PlatformValues + PlatformValuesWindows = "Windows" +) + +const ( + // @enum ProductCodeValues + ProductCodeValuesDevpay = "devpay" + // @enum ProductCodeValues + ProductCodeValuesMarketplace = "marketplace" +) + +const ( + // @enum RIProductDescription + RIProductDescriptionLinuxUnix = "Linux/UNIX" + // @enum RIProductDescription + RIProductDescriptionLinuxUnixamazonVpc = "Linux/UNIX (Amazon VPC)" + // @enum RIProductDescription + RIProductDescriptionWindows = "Windows" + // @enum RIProductDescription + RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)" +) + +const ( + // @enum RecurringChargeFrequency + RecurringChargeFrequencyHourly = "Hourly" +) + +const ( + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesUnresponsive = "unresponsive" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesNotAcceptingCredentials = "not-accepting-credentials" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPasswordNotAvailable = "password-not-available" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceNetwork = "performance-network" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceInstanceStore = "performance-instance-store" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceEbsVolume = "performance-ebs-volume" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceOther = "performance-other" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesOther = "other" +) + +const ( + // @enum ReportStatusType + ReportStatusTypeOk = "ok" + // @enum ReportStatusType + ReportStatusTypeImpaired = "impaired" +) + +const ( + // @enum ReservedInstanceState + ReservedInstanceStatePaymentPending = "payment-pending" + // @enum ReservedInstanceState + ReservedInstanceStateActive = "active" + // @enum ReservedInstanceState + ReservedInstanceStatePaymentFailed = "payment-failed" + // @enum ReservedInstanceState + ReservedInstanceStateRetired = "retired" +) + +const ( + // @enum ResetImageAttributeName + ResetImageAttributeNameLaunchPermission = "launchPermission" +) + +const ( + // @enum ResourceType + ResourceTypeCustomerGateway = "customer-gateway" + // @enum ResourceType + ResourceTypeDhcpOptions = "dhcp-options" + // @enum ResourceType + ResourceTypeImage = "image" + // @enum ResourceType + ResourceTypeInstance = "instance" + // @enum ResourceType + ResourceTypeInternetGateway = "internet-gateway" + // @enum ResourceType + ResourceTypeNetworkAcl = "network-acl" + // @enum ResourceType + ResourceTypeNetworkInterface = "network-interface" + // @enum ResourceType + ResourceTypeReservedInstances = "reserved-instances" + // @enum ResourceType + ResourceTypeRouteTable = "route-table" + // @enum ResourceType + ResourceTypeSnapshot = "snapshot" + // @enum ResourceType + ResourceTypeSpotInstancesRequest = "spot-instances-request" + // @enum ResourceType + ResourceTypeSubnet = "subnet" + // @enum ResourceType + ResourceTypeSecurityGroup = "security-group" + // @enum ResourceType + ResourceTypeVolume = "volume" + // @enum ResourceType + ResourceTypeVpc = "vpc" + // @enum ResourceType + ResourceTypeVpnConnection = "vpn-connection" + // @enum ResourceType + ResourceTypeVpnGateway = "vpn-gateway" +) + +const ( + // @enum RouteOrigin + RouteOriginCreateRouteTable = "CreateRouteTable" + // @enum RouteOrigin + RouteOriginCreateRoute = "CreateRoute" + // @enum RouteOrigin + RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation" +) + +const ( + // @enum RouteState + RouteStateActive = "active" + // @enum RouteState + RouteStateBlackhole = "blackhole" +) + +const ( + // @enum RuleAction + RuleActionAllow = "allow" + // @enum RuleAction + RuleActionDeny = "deny" +) + +const ( + // @enum ShutdownBehavior + ShutdownBehaviorStop = "stop" + // @enum ShutdownBehavior + ShutdownBehaviorTerminate = "terminate" +) + +const ( + // @enum SnapshotAttributeName + SnapshotAttributeNameProductCodes = "productCodes" + // @enum SnapshotAttributeName + SnapshotAttributeNameCreateVolumePermission = "createVolumePermission" +) + +const ( + // @enum SnapshotState + SnapshotStatePending = "pending" + // @enum SnapshotState + SnapshotStateCompleted = "completed" + // @enum SnapshotState + SnapshotStateError = "error" +) + +const ( + // @enum SpotInstanceState + SpotInstanceStateOpen = "open" + // @enum SpotInstanceState + SpotInstanceStateActive = "active" + // @enum SpotInstanceState + SpotInstanceStateClosed = "closed" + // @enum SpotInstanceState + SpotInstanceStateCancelled = "cancelled" + // @enum SpotInstanceState + SpotInstanceStateFailed = "failed" +) + +const ( + // @enum SpotInstanceType + SpotInstanceTypeOneTime = "one-time" + // @enum SpotInstanceType + SpotInstanceTypePersistent = "persistent" +) + +const ( + // @enum State + StatePending = "Pending" + // @enum State + StateAvailable = "Available" + // @enum State + StateDeleting = "Deleting" + // @enum State + StateDeleted = "Deleted" +) + +const ( + // @enum Status + StatusMoveInProgress = "MoveInProgress" + // @enum Status + StatusInVpc = "InVpc" + // @enum Status + StatusInClassic = "InClassic" +) + +const ( + // @enum StatusName + StatusNameReachability = "reachability" +) + +const ( + // @enum StatusType + StatusTypePassed = "passed" + // @enum StatusType + StatusTypeFailed = "failed" + // @enum StatusType + StatusTypeInsufficientData = "insufficient-data" + // @enum StatusType + StatusTypeInitializing = "initializing" +) + +const ( + // @enum SubnetState + SubnetStatePending = "pending" + // @enum SubnetState + SubnetStateAvailable = "available" +) + +const ( + // @enum SummaryStatus + SummaryStatusOk = "ok" + // @enum SummaryStatus + SummaryStatusImpaired = "impaired" + // @enum SummaryStatus + SummaryStatusInsufficientData = "insufficient-data" + // @enum SummaryStatus + SummaryStatusNotApplicable = "not-applicable" + // @enum SummaryStatus + SummaryStatusInitializing = "initializing" +) + +const ( + // @enum TelemetryStatus + TelemetryStatusUp = "UP" + // @enum TelemetryStatus + TelemetryStatusDown = "DOWN" +) + +const ( + // @enum Tenancy + TenancyDefault = "default" + // @enum Tenancy + TenancyDedicated = "dedicated" + // @enum Tenancy + TenancyHost = "host" +) + +const ( + // @enum TrafficType + TrafficTypeAccept = "ACCEPT" + // @enum TrafficType + TrafficTypeReject = "REJECT" + // @enum TrafficType + TrafficTypeAll = "ALL" +) + +const ( + // @enum VirtualizationType + VirtualizationTypeHvm = "hvm" + // @enum VirtualizationType + VirtualizationTypeParavirtual = "paravirtual" +) + +const ( + // @enum VolumeAttachmentState + VolumeAttachmentStateAttaching = "attaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateAttached = "attached" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetaching = "detaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetached = "detached" +) + +const ( + // @enum VolumeAttributeName + VolumeAttributeNameAutoEnableIo = "autoEnableIO" + // @enum VolumeAttributeName + VolumeAttributeNameProductCodes = "productCodes" +) + +const ( + // @enum VolumeState + VolumeStateCreating = "creating" + // @enum VolumeState + VolumeStateAvailable = "available" + // @enum VolumeState + VolumeStateInUse = "in-use" + // @enum VolumeState + VolumeStateDeleting = "deleting" + // @enum VolumeState + VolumeStateDeleted = "deleted" + // @enum VolumeState + VolumeStateError = "error" +) + +const ( + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusOk = "ok" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusImpaired = "impaired" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusInsufficientData = "insufficient-data" +) + +const ( + // @enum VolumeStatusName + VolumeStatusNameIoEnabled = "io-enabled" + // @enum VolumeStatusName + VolumeStatusNameIoPerformance = "io-performance" +) + +const ( + // @enum VolumeType + VolumeTypeStandard = "standard" + // @enum VolumeType + VolumeTypeIo1 = "io1" + // @enum VolumeType + VolumeTypeGp2 = "gp2" +) + +const ( + // @enum VpcAttributeName + VpcAttributeNameEnableDnsSupport = "enableDnsSupport" + // @enum VpcAttributeName + VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames" +) + +const ( + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodePendingAcceptance = "pending-acceptance" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeActive = "active" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleted = "deleted" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeRejected = "rejected" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeFailed = "failed" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeExpired = "expired" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeProvisioning = "provisioning" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleting = "deleting" +) + +const ( + // @enum VpcState + VpcStatePending = "pending" + // @enum VpcState + VpcStateAvailable = "available" +) + +const ( + // @enum VpnState + VpnStatePending = "pending" + // @enum VpnState + VpnStateAvailable = "available" + // @enum VpnState + VpnStateDeleting = "deleting" + // @enum VpnState + VpnStateDeleted = "deleted" +) + +const ( + // @enum VpnStaticRouteSource + VpnStaticRouteSourceStatic = "Static" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go new file mode 100644 index 000000000..9e94fe671 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -0,0 +1,55 @@ +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func init() { + initRequest = func(r *request.Request) { + if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter + r.Handlers.Build.PushFront(fillPresignedURL) + } + } +} + +func fillPresignedURL(r *request.Request) { + if !r.ParamsFilled() { + return + } + + origParams := r.Params.(*CopySnapshotInput) + + // Stop if PresignedURL/DestinationRegion is set + if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil { + return + } + + origParams.DestinationRegion = r.Config.Region + newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput) + + // Create a new request based on the existing request. We will use this to + // presign the CopySnapshot request against the source region. + cfg := r.Config.Copy(aws.NewConfig(). + WithEndpoint(""). + WithRegion(aws.StringValue(origParams.SourceRegion))) + + clientInfo := r.ClientInfo + clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion( + clientInfo.ServiceName, aws.StringValue(cfg.Region), aws.BoolValue(cfg.DisableSSL)) + + // Presign a CopySnapshot request with modified params + req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data) + url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough. + if err != nil { // bubble error back up to original request + r.Error = err + return + } + + // We have our URL, set it on params + origParams.PresignedUrl = &url +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go new file mode 100644 index 000000000..195d9b55b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go @@ -0,0 +1,35 @@ +package ec2_test + +import ( + "io/ioutil" + "net/url" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/stretchr/testify/assert" +) + +func TestCopySnapshotPresignedURL(t *testing.T) { + svc := ec2.New(unit.Session, &aws.Config{Region: aws.String("us-west-2")}) + + assert.NotPanics(t, func() { + // Doesn't panic on nil input + req, _ := svc.CopySnapshotRequest(nil) + req.Sign() + }) + + req, _ := svc.CopySnapshotRequest(&ec2.CopySnapshotInput{ + SourceRegion: aws.String("us-west-1"), + SourceSnapshotId: aws.String("snap-id"), + }) + req.Sign() + + b, _ := ioutil.ReadAll(req.HTTPRequest.Body) + q, _ := url.ParseQuery(string(b)) + u, _ := url.QueryUnescape(q.Get("PresignedUrl")) + assert.Equal(t, "us-west-2", q.Get("DestinationRegion")) + assert.Equal(t, "us-west-1", q.Get("SourceRegion")) + assert.Regexp(t, `^https://ec2\.us-west-1\.amazonaws\.com/.+&DestinationRegion=us-west-2`, u) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go new file mode 100644 index 000000000..3091c64d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go @@ -0,0 +1,832 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2iface provides an interface for the Amazon Elastic Compute Cloud. +package ec2iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" +) + +// EC2API is the interface type for ec2.EC2. +type EC2API interface { + AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput) + + AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error) + + AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput) + + AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error) + + AllocateHostsRequest(*ec2.AllocateHostsInput) (*request.Request, *ec2.AllocateHostsOutput) + + AllocateHosts(*ec2.AllocateHostsInput) (*ec2.AllocateHostsOutput, error) + + AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput) + + AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error) + + AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput) + + AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error) + + AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput) + + AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error) + + AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput) + + AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error) + + AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput) + + AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error) + + AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput) + + AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error) + + AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput) + + AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error) + + AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) + + AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput) + + AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error) + + AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput) + + AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error) + + AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput) + + AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) + + BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput) + + BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error) + + CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput) + + CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error) + + CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput) + + CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error) + + CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput) + + CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error) + + CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput) + + CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error) + + CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput) + + CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error) + + CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput) + + CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error) + + CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput) + + CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error) + + ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput) + + ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error) + + CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput) + + CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error) + + CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput) + + CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error) + + CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput) + + CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error) + + CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput) + + CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error) + + CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput) + + CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error) + + CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput) + + CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error) + + CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput) + + CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error) + + CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput) + + CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error) + + CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput) + + CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error) + + CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput) + + CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error) + + CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput) + + CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error) + + CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput) + + CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error) + + CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput) + + CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error) + + CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput) + + CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error) + + CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput) + + CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error) + + CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput) + + CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) + + CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput) + + CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error) + + CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput) + + CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) + + CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot) + + CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error) + + CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput) + + CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error) + + CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput) + + CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error) + + CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput) + + CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) + + CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume) + + CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error) + + CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput) + + CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error) + + CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput) + + CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error) + + CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput) + + CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error) + + CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput) + + CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error) + + CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput) + + CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error) + + CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput) + + CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error) + + DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput) + + DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error) + + DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput) + + DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error) + + DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput) + + DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error) + + DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput) + + DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error) + + DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput) + + DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error) + + DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput) + + DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error) + + DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput) + + DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error) + + DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput) + + DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error) + + DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput) + + DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error) + + DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput) + + DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error) + + DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput) + + DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) + + DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput) + + DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error) + + DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput) + + DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) + + DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput) + + DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error) + + DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput) + + DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error) + + DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput) + + DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error) + + DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput) + + DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error) + + DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput) + + DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) + + DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput) + + DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error) + + DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput) + + DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error) + + DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput) + + DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error) + + DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput) + + DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error) + + DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput) + + DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error) + + DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput) + + DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error) + + DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput) + + DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error) + + DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error) + + DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput) + + DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error) + + DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput) + + DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error) + + DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput) + + DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error) + + DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput) + + DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error) + + DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput) + + DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error) + + DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput) + + DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error) + + DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput) + + DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error) + + DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput) + + DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error) + + DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput) + + DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error) + + DescribeHostsRequest(*ec2.DescribeHostsInput) (*request.Request, *ec2.DescribeHostsOutput) + + DescribeHosts(*ec2.DescribeHostsInput) (*ec2.DescribeHostsOutput, error) + + DescribeIdFormatRequest(*ec2.DescribeIdFormatInput) (*request.Request, *ec2.DescribeIdFormatOutput) + + DescribeIdFormat(*ec2.DescribeIdFormatInput) (*ec2.DescribeIdFormatOutput, error) + + DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput) + + DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error) + + DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput) + + DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error) + + DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput) + + DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error) + + DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput) + + DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error) + + DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput) + + DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error) + + DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput) + + DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error) + + DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error + + DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput) + + DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) + + DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error + + DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) + + DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) + + DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput) + + DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) + + DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) + + DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error) + + DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput) + + DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error) + + DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput) + + DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error) + + DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput) + + DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error) + + DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput) + + DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error) + + DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput) + + DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error) + + DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput) + + DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error) + + DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput) + + DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) + + DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput) + + DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error) + + DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput) + + DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error) + + DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput) + + DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error) + + DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error + + DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput) + + DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error) + + DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error + + DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput) + + DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error) + + DescribeScheduledInstanceAvailabilityRequest(*ec2.DescribeScheduledInstanceAvailabilityInput) (*request.Request, *ec2.DescribeScheduledInstanceAvailabilityOutput) + + DescribeScheduledInstanceAvailability(*ec2.DescribeScheduledInstanceAvailabilityInput) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error) + + DescribeScheduledInstancesRequest(*ec2.DescribeScheduledInstancesInput) (*request.Request, *ec2.DescribeScheduledInstancesOutput) + + DescribeScheduledInstances(*ec2.DescribeScheduledInstancesInput) (*ec2.DescribeScheduledInstancesOutput, error) + + DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput) + + DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error) + + DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput) + + DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error) + + DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput) + + DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error) + + DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error + + DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput) + + DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error) + + DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput) + + DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error) + + DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput) + + DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error) + + DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput) + + DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error) + + DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput) + + DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error) + + DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput) + + DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error) + + DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error + + DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput) + + DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error) + + DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput) + + DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) + + DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error + + DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput) + + DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error) + + DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput) + + DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error) + + DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error + + DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput) + + DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error) + + DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error + + DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput) + + DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error) + + DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput) + + DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error) + + DescribeVpcClassicLinkDnsSupportRequest(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DescribeVpcClassicLinkDnsSupportOutput) + + DescribeVpcClassicLinkDnsSupport(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error) + + DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput) + + DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error) + + DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput) + + DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error) + + DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput) + + DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error) + + DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput) + + DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) + + DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput) + + DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error) + + DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput) + + DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error) + + DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput) + + DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error) + + DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput) + + DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error) + + DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput) + + DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error) + + DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) + + DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput) + + DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error) + + DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput) + + DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error) + + DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput) + + DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error) + + DisableVpcClassicLinkDnsSupportRequest(*ec2.DisableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DisableVpcClassicLinkDnsSupportOutput) + + DisableVpcClassicLinkDnsSupport(*ec2.DisableVpcClassicLinkDnsSupportInput) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error) + + DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput) + + DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error) + + DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput) + + DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error) + + EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput) + + EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error) + + EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput) + + EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error) + + EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput) + + EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error) + + EnableVpcClassicLinkDnsSupportRequest(*ec2.EnableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.EnableVpcClassicLinkDnsSupportOutput) + + EnableVpcClassicLinkDnsSupport(*ec2.EnableVpcClassicLinkDnsSupportInput) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error) + + GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput) + + GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error) + + GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput) + + GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error) + + ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput) + + ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error) + + ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput) + + ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error) + + ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput) + + ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) + + ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput) + + ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error) + + ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput) + + ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error) + + ModifyHostsRequest(*ec2.ModifyHostsInput) (*request.Request, *ec2.ModifyHostsOutput) + + ModifyHosts(*ec2.ModifyHostsInput) (*ec2.ModifyHostsOutput, error) + + ModifyIdFormatRequest(*ec2.ModifyIdFormatInput) (*request.Request, *ec2.ModifyIdFormatOutput) + + ModifyIdFormat(*ec2.ModifyIdFormatInput) (*ec2.ModifyIdFormatOutput, error) + + ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput) + + ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error) + + ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput) + + ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) + + ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput) + + ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error) + + ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput) + + ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error) + + ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput) + + ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error) + + ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput) + + ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error) + + ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput) + + ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error) + + ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput) + + ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error) + + ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput) + + ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error) + + ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput) + + ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error) + + ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput) + + ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error) + + MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput) + + MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error) + + MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput) + + MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error) + + PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput) + + PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error) + + PurchaseScheduledInstancesRequest(*ec2.PurchaseScheduledInstancesInput) (*request.Request, *ec2.PurchaseScheduledInstancesOutput) + + PurchaseScheduledInstances(*ec2.PurchaseScheduledInstancesInput) (*ec2.PurchaseScheduledInstancesOutput, error) + + RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput) + + RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error) + + RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput) + + RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error) + + RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput) + + RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error) + + ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput) + + ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error) + + ReleaseHostsRequest(*ec2.ReleaseHostsInput) (*request.Request, *ec2.ReleaseHostsOutput) + + ReleaseHosts(*ec2.ReleaseHostsInput) (*ec2.ReleaseHostsOutput, error) + + ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput) + + ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error) + + ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput) + + ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error) + + ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput) + + ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error) + + ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput) + + ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error) + + ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput) + + ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error) + + RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput) + + RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error) + + RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput) + + RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error) + + ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput) + + ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error) + + ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput) + + ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error) + + ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput) + + ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error) + + ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput) + + ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error) + + RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput) + + RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error) + + RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput) + + RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error) + + RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput) + + RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) + + RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation) + + RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error) + + RunScheduledInstancesRequest(*ec2.RunScheduledInstancesInput) (*request.Request, *ec2.RunScheduledInstancesOutput) + + RunScheduledInstances(*ec2.RunScheduledInstancesInput) (*ec2.RunScheduledInstancesOutput, error) + + StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput) + + StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error) + + StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput) + + StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error) + + TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput) + + TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) + + UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput) + + UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error) + + UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput) + + UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error) +} + +var _ EC2API = (*ec2.EC2)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go new file mode 100644 index 000000000..a834cf82b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go @@ -0,0 +1,5695 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEC2_AcceptVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.AcceptVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.AcceptVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AllocateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AllocateAddressInput{ + Domain: aws.String("DomainType"), + DryRun: aws.Bool(true), + } + resp, err := svc.AllocateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AllocateHosts() { + svc := ec2.New(session.New()) + + params := &ec2.AllocateHostsInput{ + AvailabilityZone: aws.String("String"), // Required + InstanceType: aws.String("String"), // Required + Quantity: aws.Int64(1), // Required + AutoPlacement: aws.String("AutoPlacement"), + ClientToken: aws.String("String"), + } + resp, err := svc.AllocateHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.AssignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + AllowReassignment: aws.Bool(true), + PrivateIpAddresses: []*string{ + aws.String("String"), // Required + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.AssignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateAddressInput{ + AllocationId: aws.String("String"), + AllowReassociation: aws.Bool(true), + DryRun: aws.Bool(true), + InstanceId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PublicIp: aws.String("String"), + } + resp, err := svc.AssociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateRouteTableInput{ + RouteTableId: aws.String("String"), // Required + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.AttachClassicLinkVpcInput{ + Groups: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVolumeInput{ + Device: aws.String("String"), // Required + InstanceId: aws.String("String"), // Required + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_BundleInstance() { + svc := ec2.New(session.New()) + + params := &ec2.BundleInstanceInput{ + InstanceId: aws.String("String"), // Required + Storage: &ec2.Storage{ // Required + S3: &ec2.S3Storage{ + AWSAccessKeyId: aws.String("String"), + Bucket: aws.String("String"), + Prefix: aws.String("String"), + UploadPolicy: []byte("PAYLOAD"), + UploadPolicySignature: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + } + resp, err := svc.BundleInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelBundleTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelBundleTaskInput{ + BundleId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelBundleTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelConversionTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelConversionTaskInput{ + ConversionTaskId: aws.String("String"), // Required + DryRun: aws.Bool(true), + ReasonMessage: aws.String("String"), + } + resp, err := svc.CancelConversionTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelExportTaskInput{ + ExportTaskId: aws.String("String"), // Required + } + resp, err := svc.CancelExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelImportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelImportTaskInput{ + CancelReason: aws.String("String"), + DryRun: aws.Bool(true), + ImportTaskId: aws.String("String"), + } + resp, err := svc.CancelImportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CancelReservedInstancesListingInput{ + ReservedInstancesListingId: aws.String("String"), // Required + } + resp, err := svc.CancelReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotFleetRequestsInput{ + SpotFleetRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TerminateInstances: aws.Bool(true), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ConfirmProductInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ConfirmProductInstanceInput{ + InstanceId: aws.String("String"), // Required + ProductCode: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ConfirmProductInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopyImage() { + svc := ec2.New(session.New()) + + params := &ec2.CopyImageInput{ + Name: aws.String("String"), // Required + SourceImageId: aws.String("String"), // Required + SourceRegion: aws.String("String"), // Required + ClientToken: aws.String("String"), + Description: aws.String("String"), + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + KmsKeyId: aws.String("String"), + } + resp, err := svc.CopyImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopySnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CopySnapshotInput{ + SourceRegion: aws.String("String"), // Required + SourceSnapshotId: aws.String("String"), // Required + Description: aws.String("String"), + DestinationRegion: aws.String("String"), + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + KmsKeyId: aws.String("String"), + PresignedUrl: aws.String("String"), + } + resp, err := svc.CopySnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateCustomerGatewayInput{ + BgpAsn: aws.Int64(1), // Required + PublicIp: aws.String("String"), // Required + Type: aws.String("GatewayType"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.CreateDhcpOptionsInput{ + DhcpConfigurations: []*ec2.NewDhcpConfiguration{ // Required + { // Required + Key: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.CreateFlowLogsInput{ + DeliverLogsPermissionArn: aws.String("String"), // Required + LogGroupName: aws.String("String"), // Required + ResourceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ResourceType: aws.String("FlowLogsResourceType"), // Required + TrafficType: aws.String("TrafficType"), // Required + ClientToken: aws.String("String"), + } + resp, err := svc.CreateFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateImage() { + svc := ec2.New(session.New()) + + params := &ec2.CreateImageInput{ + InstanceId: aws.String("String"), // Required + Name: aws.String("String"), // Required + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + NoReboot: aws.Bool(true), + } + resp, err := svc.CreateImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInstanceExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInstanceExportTaskInput{ + InstanceId: aws.String("String"), // Required + Description: aws.String("String"), + ExportToS3Task: &ec2.ExportToS3TaskSpecification{ + ContainerFormat: aws.String("ContainerFormat"), + DiskImageFormat: aws.String("DiskImageFormat"), + S3Bucket: aws.String("String"), + S3Prefix: aws.String("String"), + }, + TargetEnvironment: aws.String("ExportEnvironment"), + } + resp, err := svc.CreateInstanceExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInternetGatewayInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.CreateInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.CreateKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNatGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNatGatewayInput{ + AllocationId: aws.String("String"), // Required + SubnetId: aws.String("String"), // Required + ClientToken: aws.String("String"), + } + resp, err := svc.CreateNatGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.CreateNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkInterfaceInput{ + SubnetId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.CreateNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreatePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreatePlacementGroupInput{ + GroupName: aws.String("String"), // Required + Strategy: aws.String("PlacementStrategy"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreatePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CreateReservedInstancesListingInput{ + ClientToken: aws.String("String"), // Required + InstanceCount: aws.Int64(1), // Required + PriceSchedules: []*ec2.PriceScheduleSpecification{ // Required + { // Required + CurrencyCode: aws.String("CurrencyCodeValues"), + Price: aws.Float64(1.0), + Term: aws.Int64(1), + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), // Required + } + resp, err := svc.CreateReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NatGatewayId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.CreateRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteTableInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSecurityGroupInput{ + Description: aws.String("String"), // Required + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CreateSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSnapshotInput{ + VolumeId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSpotDatafeedSubscriptionInput{ + Bucket: aws.String("String"), // Required + DryRun: aws.Bool(true), + Prefix: aws.String("String"), + } + resp, err := svc.CreateSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSubnetInput{ + CidrBlock: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateTags() { + svc := ec2.New(session.New()) + + params := &ec2.CreateTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*ec2.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVolume() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + KmsKeyId: aws.String("String"), + Size: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeType: aws.String("VolumeType"), + } + resp, err := svc.CreateVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpc() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcInput{ + CidrBlock: aws.String("String"), // Required + DryRun: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + } + resp, err := svc.CreateVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcEndpointInput{ + ServiceName: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + PeerOwnerId: aws.String("String"), + PeerVpcId: aws.String("String"), + VpcId: aws.String("String"), + } + resp, err := svc.CreateVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionInput{ + CustomerGatewayId: aws.String("String"), // Required + Type: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Options: &ec2.VpnConnectionOptionsSpecification{ + StaticRoutesOnly: aws.Bool(true), + }, + } + resp, err := svc.CreateVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.CreateVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnGatewayInput{ + Type: aws.String("GatewayType"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteCustomerGatewayInput{ + CustomerGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteFlowLogsInput{ + FlowLogIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DeleteFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNatGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNatGatewayInput{ + NatGatewayId: aws.String("String"), // Required + } + resp, err := svc.DeleteNatGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclInput{ + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclEntryInput{ + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkInterfaceInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeletePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeletePlacementGroupInput{ + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeletePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteTableInput{ + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSecurityGroupInput{ + DryRun: aws.Bool(true), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + } + resp, err := svc.DeleteSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSnapshotInput{ + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSubnetInput{ + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteTags() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Tags: []*ec2.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVolumeInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcEndpointsInput{ + VpcEndpointIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionInput{ + VpnConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.DeleteVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnGatewayInput{ + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeregisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.DeregisterImageInput{ + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeregisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAccountAttributes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAccountAttributesInput{ + AttributeNames: []*string{ + aws.String("AccountAttributeName"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAddressesInput{ + AllocationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAvailabilityZones() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAvailabilityZonesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ZoneNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAvailabilityZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeBundleTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeBundleTasksInput{ + BundleIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeBundleTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeClassicLinkInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeClassicLinkInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeClassicLinkInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeConversionTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeConversionTasksInput{ + ConversionTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeConversionTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeCustomerGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeCustomerGatewaysInput{ + CustomerGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeCustomerGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeExportTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeExportTasksInput{ + ExportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeExportTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeFlowLogsInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + FlowLogIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeHosts() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeHostsInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + HostIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeIdFormatInput{ + Resource: aws.String("String"), + } + resp, err := svc.DescribeIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImageAttributeInput{ + Attribute: aws.String("ImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImages() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImagesInput{ + DryRun: aws.Bool(true), + ExecutableUsers: []*string{ + aws.String("String"), // Required + // More values... + }, + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImageIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Owners: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeImages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportImageTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportImageTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportImageTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportSnapshotTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportSnapshotTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportSnapshotTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeAllInstances: aws.Bool(true), + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInternetGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInternetGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InternetGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeInternetGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeKeyPairs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeKeyPairsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + KeyNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeKeyPairs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeMovingAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeMovingAddressesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeMovingAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNatGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NatGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.DescribeNatGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkAcls() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkAclsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkAclIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkAcls(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attribute: aws.String("NetworkInterfaceAttribute"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaces() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfacesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkInterfaceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkInterfaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePlacementGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePlacementGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePlacementGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePrefixLists() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePrefixListsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PrefixListIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePrefixLists(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRegions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRegionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RegionNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRegions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + OfferingType: aws.String("OfferingTypeValues"), + ReservedInstancesIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesListings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesListingsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), + ReservedInstancesListingId: aws.String("String"), + } + resp, err := svc.DescribeReservedInstancesListings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesModifications() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesModificationsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NextToken: aws.String("String"), + ReservedInstancesModificationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesModifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesOfferings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesOfferingsInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeMarketplace: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + InstanceType: aws.String("InstanceType"), + MaxDuration: aws.Int64(1), + MaxInstanceCount: aws.Int64(1), + MaxResults: aws.Int64(1), + MinDuration: aws.Int64(1), + NextToken: aws.String("String"), + OfferingType: aws.String("OfferingTypeValues"), + ProductDescription: aws.String("RIProductDescription"), + ReservedInstancesOfferingIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRouteTables() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRouteTablesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRouteTables(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeScheduledInstanceAvailability() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeScheduledInstanceAvailabilityInput{ + FirstSlotStartTimeRange: &ec2.SlotDateTimeRangeRequest{ // Required + EarliestTime: aws.Time(time.Now()), // Required + LatestTime: aws.Time(time.Now()), // Required + }, + Recurrence: &ec2.ScheduledInstanceRecurrenceRequest{ // Required + Frequency: aws.String("String"), + Interval: aws.Int64(1), + OccurrenceDays: []*int64{ + aws.Int64(1), // Required + // More values... + }, + OccurrenceRelativeToEnd: aws.Bool(true), + OccurrenceUnit: aws.String("String"), + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + MaxSlotDurationInHours: aws.Int64(1), + MinSlotDurationInHours: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeScheduledInstanceAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeScheduledInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ScheduledInstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SlotStartTimeRange: &ec2.SlotStartTimeRangeRequest{ + EarliestTime: aws.Time(time.Now()), + LatestTime: aws.Time(time.Now()), + }, + } + resp, err := svc.DescribeScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSecurityGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSecurityGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshots() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + OwnerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + RestorableByUserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetInstancesInput{ + SpotFleetRequestId: aws.String("String"), // Required + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequestHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestHistoryInput{ + SpotFleetRequestId: aws.String("String"), // Required + StartTime: aws.Time(time.Now()), // Required + DryRun: aws.Bool(true), + EventType: aws.String("EventType"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetRequestHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestsInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + SpotFleetRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotInstanceRequestsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SpotInstanceRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotPriceHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotPriceHistoryInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceTypes: []*string{ + aws.String("InstanceType"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ProductDescriptions: []*string{ + aws.String("String"), // Required + // More values... + }, + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeSpotPriceHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSubnets() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSubnetsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SubnetIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeTags() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeTagsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + Attribute: aws.String("VolumeAttributeName"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumeStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcAttributeInput{ + Attribute: aws.String("VpcAttributeName"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcClassicLinkInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcClassicLinkDnsSupportInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpointServices() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointServicesInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeVpcEndpointServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VpcEndpointIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcPeeringConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcPeeringConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcPeeringConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcPeeringConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DetachClassicLinkVpcInput{ + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.DetachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVolumeInput{ + VolumeId: aws.String("String"), // Required + Device: aws.String("String"), + DryRun: aws.Bool(true), + Force: aws.Bool(true), + InstanceId: aws.String("String"), + } + resp, err := svc.DetachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.DisableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVpcClassicLinkDnsSupportInput{ + VpcId: aws.String("String"), + } + resp, err := svc.DisableVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateAddressInput{ + AssociationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.DisassociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateRouteTableInput{ + AssociationId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisassociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.EnableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVolumeIO() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVolumeIOInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVolumeIO(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVpcClassicLinkDnsSupportInput{ + VpcId: aws.String("String"), + } + resp, err := svc.EnableVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetConsoleOutput() { + svc := ec2.New(session.New()) + + params := &ec2.GetConsoleOutputInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetConsoleOutput(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetPasswordData() { + svc := ec2.New(session.New()) + + params := &ec2.GetPasswordDataInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetPasswordData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportImage() { + svc := ec2.New(session.New()) + + params := &ec2.ImportImageInput{ + Architecture: aws.String("String"), + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainers: []*ec2.ImageDiskContainer{ + { // Required + Description: aws.String("String"), + DeviceName: aws.String("String"), + Format: aws.String("String"), + SnapshotId: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + Hypervisor: aws.String("String"), + LicenseType: aws.String("String"), + Platform: aws.String("String"), + RoleName: aws.String("String"), + } + resp, err := svc.ImportImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ImportInstanceInput{ + Platform: aws.String("PlatformValues"), // Required + Description: aws.String("String"), + DiskImages: []*ec2.DiskImage{ + { // Required + Description: aws.String("String"), + Image: &ec2.DiskImageDetail{ + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ + Size: aws.Int64(1), // Required + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + LaunchSpecification: &ec2.ImportInstanceLaunchSpecification{ + AdditionalInfo: aws.String("String"), + Architecture: aws.String("ArchitectureValues"), + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + Monitoring: aws.Bool(true), + Placement: &ec2.Placement{ + Affinity: aws.String("String"), + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + HostId: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + SubnetId: aws.String("String"), + UserData: &ec2.UserData{ + Data: aws.String("String"), + }, + }, + } + resp, err := svc.ImportInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.ImportKeyPairInput{ + KeyName: aws.String("String"), // Required + PublicKeyMaterial: []byte("PAYLOAD"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ImportKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.ImportSnapshotInput{ + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainer: &ec2.SnapshotDiskContainer{ + Description: aws.String("String"), + Format: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + RoleName: aws.String("String"), + } + resp, err := svc.ImportSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportVolume() { + svc := ec2.New(session.New()) + + params := &ec2.ImportVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + Image: &ec2.DiskImageDetail{ // Required + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ // Required + Size: aws.Int64(1), // Required + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.ImportVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyHosts() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyHostsInput{ + AutoPlacement: aws.String("AutoPlacement"), // Required + HostIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyIdFormatInput{ + Resource: aws.String("String"), // Required + UseLongIds: aws.Bool(true), // Required + } + resp, err := svc.ModifyIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyImageAttributeInput{ + ImageId: aws.String("String"), // Required + Attribute: aws.String("String"), + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Add: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + OperationType: aws.String("OperationType"), + ProductCodes: []*string{ + aws.String("String"), // Required + // More values... + }, + UserGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String("String"), // Required + Attribute: aws.String("InstanceAttributeName"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMappingSpecification{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsInstanceBlockDeviceSpecification{ + DeleteOnTermination: aws.Bool(true), + VolumeId: aws.String("String"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + DisableApiTermination: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + EbsOptimized: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + InstanceType: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Kernel: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Ramdisk: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SriovNetSupport: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + UserData: &ec2.BlobAttributeValue{ + Value: []byte("PAYLOAD"), + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyInstancePlacement() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyInstancePlacementInput{ + InstanceId: aws.String("String"), // Required + Affinity: aws.String("Affinity"), + HostId: aws.String("String"), + Tenancy: aws.String("HostTenancy"), + } + resp, err := svc.ModifyInstancePlacement(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attachment: &ec2.NetworkInterfaceAttachmentChanges{ + AttachmentId: aws.String("String"), + DeleteOnTermination: aws.Bool(true), + }, + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyReservedInstancesInput{ + ReservedInstancesIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TargetConfigurations: []*ec2.ReservedInstancesConfiguration{ // Required + { // Required + AvailabilityZone: aws.String("String"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("InstanceType"), + Platform: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + } + resp, err := svc.ModifyReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySnapshotAttributeInput{ + SnapshotId: aws.String("String"), // Required + Attribute: aws.String("SnapshotAttributeName"), + CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ + Add: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + DryRun: aws.Bool(true), + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + OperationType: aws.String("OperationType"), + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifySnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySpotFleetRequest() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySpotFleetRequestInput{ + SpotFleetRequestId: aws.String("String"), // Required + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TargetCapacity: aws.Int64(1), + } + resp, err := svc.ModifySpotFleetRequest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySubnetAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String("String"), // Required + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifySubnetAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + AutoEnableIO: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.ModifyVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcAttributeInput{ + VpcId: aws.String("String"), // Required + EnableDnsHostnames: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + EnableDnsSupport: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcEndpointInput{ + VpcEndpointId: aws.String("String"), // Required + AddRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RemoveRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + ResetPolicy: aws.Bool(true), + } + resp, err := svc.ModifyVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.MonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.MonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MoveAddressToVpc() { + svc := ec2.New(session.New()) + + params := &ec2.MoveAddressToVpcInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.MoveAddressToVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_PurchaseReservedInstancesOffering() { + svc := ec2.New(session.New()) + + params := &ec2.PurchaseReservedInstancesOfferingInput{ + InstanceCount: aws.Int64(1), // Required + ReservedInstancesOfferingId: aws.String("String"), // Required + DryRun: aws.Bool(true), + LimitPrice: &ec2.ReservedInstanceLimitPrice{ + Amount: aws.Float64(1.0), + CurrencyCode: aws.String("CurrencyCodeValues"), + }, + } + resp, err := svc.PurchaseReservedInstancesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_PurchaseScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.PurchaseScheduledInstancesInput{ + PurchaseRequests: []*ec2.PurchaseRequest{ // Required + { // Required + InstanceCount: aws.Int64(1), + PurchaseToken: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.PurchaseScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RebootInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RebootInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RebootInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RegisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.RegisterImageInput{ + Name: aws.String("String"), // Required + Architecture: aws.String("ArchitectureValues"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + ImageLocation: aws.String("String"), + KernelId: aws.String("String"), + RamdiskId: aws.String("String"), + RootDeviceName: aws.String("String"), + SriovNetSupport: aws.String("String"), + VirtualizationType: aws.String("String"), + } + resp, err := svc.RegisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RejectVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.RejectVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RejectVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReleaseAddress() { + svc := ec2.New(session.New()) + + params := &ec2.ReleaseAddressInput{ + AllocationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.ReleaseAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReleaseHosts() { + svc := ec2.New(session.New()) + + params := &ec2.ReleaseHostsInput{ + HostIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ReleaseHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: aws.String("String"), // Required + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceNetworkAclAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.ReplaceNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRoute() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NatGatewayId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.ReplaceRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRouteTableAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteTableAssociationInput{ + AssociationId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceRouteTableAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReportInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.ReportInstanceStatusInput{ + Instances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ReasonCodes: []*string{ // Required + aws.String("ReportInstanceReasonCodes"), // Required + // More values... + }, + Status: aws.String("ReportStatusType"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.ReportInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotFleet() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotFleetInput{ + SpotFleetRequestConfig: &ec2.SpotFleetRequestConfigData{ // Required + IamFleetRole: aws.String("String"), // Required + LaunchSpecifications: []*ec2.SpotFleetLaunchSpecification{ // Required + { // Required + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.SpotFleetMonitoring{ + Enabled: aws.Bool(true), + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroups: []*ec2.GroupIdentifier{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + }, + // More values... + }, + SpotPrice: aws.String("String"), + SubnetId: aws.String("String"), + UserData: aws.String("String"), + WeightedCapacity: aws.Float64(1.0), + }, + // More values... + }, + SpotPrice: aws.String("String"), // Required + TargetCapacity: aws.Int64(1), // Required + AllocationStrategy: aws.String("AllocationStrategy"), + ClientToken: aws.String("String"), + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TerminateInstancesWithExpiration: aws.Bool(true), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RequestSpotFleet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotInstancesInput{ + SpotPrice: aws.String("String"), // Required + AvailabilityZoneGroup: aws.String("String"), + BlockDurationMinutes: aws.Int64(1), + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + InstanceCount: aws.Int64(1), + LaunchGroup: aws.String("String"), + LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + }, + Type: aws.String("SpotInstanceType"), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + } + resp, err := svc.RequestSpotInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetImageAttributeInput{ + Attribute: aws.String("ResetImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + SourceDestCheck: aws.String("String"), + } + resp, err := svc.ResetNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RestoreAddressToClassic() { + svc := ec2.New(session.New()) + + params := &ec2.RestoreAddressToClassicInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RestoreAddressToClassic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RunInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RunInstancesInput{ + ImageId: aws.String("String"), // Required + MaxCount: aws.Int64(1), // Required + MinCount: aws.Int64(1), // Required + AdditionalInfo: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + DisableApiTermination: aws.Bool(true), + DryRun: aws.Bool(true), + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.Placement{ + Affinity: aws.String("String"), + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + HostId: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + } + resp, err := svc.RunInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RunScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RunScheduledInstancesInput{ + LaunchSpecification: &ec2.ScheduledInstancesLaunchSpecification{ // Required + ImageId: aws.String("String"), // Required + BlockDeviceMappings: []*ec2.ScheduledInstancesBlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.ScheduledInstancesEbs{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("String"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.ScheduledInstancesIamInstanceProfile{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + InstanceType: aws.String("String"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.ScheduledInstancesMonitoring{ + Enabled: aws.Bool(true), + }, + NetworkInterfaces: []*ec2.ScheduledInstancesNetworkInterface{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddressConfigs: []*ec2.ScheduledInstancesPrivateIpAddressConfig{ + { // Required + Primary: aws.Bool(true), + PrivateIpAddress: aws.String("String"), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.ScheduledInstancesPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + }, + ScheduledInstanceId: aws.String("String"), // Required + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + InstanceCount: aws.Int64(1), + } + resp, err := svc.RunScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StartInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StartInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + AdditionalInfo: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.StartInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StopInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StopInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.StopInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_TerminateInstances() { + svc := ec2.New(session.New()) + + params := &ec2.TerminateInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.TerminateInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnassignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.UnassignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + PrivateIpAddresses: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.UnassignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnmonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.UnmonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.UnmonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go new file mode 100644 index 000000000..2ff4220f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity +// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your +// need to invest in hardware up front, so you can develop and deploy applications +// faster. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EC2 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ec2" + +// New creates a new instance of the EC2 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EC2 client from just a session. +// svc := ec2.New(mySession) +// +// // Create a EC2 client with additional configuration +// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EC2 { + svc := &EC2{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-10-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EC2 operation and runs any +// custom request initialization. +func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go new file mode 100644 index 000000000..1b28317a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -0,0 +1,761 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeBundleTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "BundleTasks[].State", + Expected: "complete", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "BundleTasks[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "completed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelling", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCustomerGateways", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CustomerGateways[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeImages", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Images[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Images[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 5, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidInstanceIDNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "shutting-down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].InstanceStatus.Status", + Expected: "ok", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeKeyPairs", + Delay: 5, + MaxAttempts: 6, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "length(KeyPairs[].KeyName) > `0`", + Expected: true, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidKeyPairNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeNetworkInterfaces", + Delay: 20, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "NetworkInterfaces[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "error", + Argument: "", + Expected: "InvalidNetworkInterfaceIDNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error { + waiterCfg := waiter.Config{ + Operation: "GetPasswordData", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(PasswordData) > `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSnapshots", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Snapshots[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSpotInstanceRequests", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "fulfilled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "schedule-expired", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "canceled-before-fulfillment", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "bad-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "system-error", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSubnets", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Subnets[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].SystemStatus.Status", + Expected: "ok", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "InvalidVolumeNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "in-use", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpcs", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Vpcs[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "pending", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go new file mode 100644 index 000000000..73bd81f8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -0,0 +1,1433 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecr provides a client for Amazon EC2 Container Registry. +package ecr + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability" + +// BatchCheckLayerAvailabilityRequest generates a request for the BatchCheckLayerAvailability operation. +func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) { + op := &request.Operation{ + Name: opBatchCheckLayerAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchCheckLayerAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchCheckLayerAvailabilityOutput{} + req.Data = output + return +} + +// Check the availability of multiple image layers in a specified registry and +// repository. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) { + req, out := c.BatchCheckLayerAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opBatchDeleteImage = "BatchDeleteImage" + +// BatchDeleteImageRequest generates a request for the BatchDeleteImage operation. +func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) { + op := &request.Operation{ + Name: opBatchDeleteImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDeleteImageInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchDeleteImageOutput{} + req.Data = output + return +} + +// Deletes a list of specified images within a specified repository. Images +// are specified with either imageTag or imageDigest. +func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) { + req, out := c.BatchDeleteImageRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetImage = "BatchGetImage" + +// BatchGetImageRequest generates a request for the BatchGetImage operation. +func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) { + op := &request.Operation{ + Name: opBatchGetImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetImageInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetImageOutput{} + req.Data = output + return +} + +// Gets detailed information for specified images within a specified repository. +// Images are specified with either imageTag or imageDigest. +func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) { + req, out := c.BatchGetImageRequest(input) + err := req.Send() + return out, err +} + +const opCompleteLayerUpload = "CompleteLayerUpload" + +// CompleteLayerUploadRequest generates a request for the CompleteLayerUpload operation. +func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) { + op := &request.Operation{ + Name: opCompleteLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteLayerUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteLayerUploadOutput{} + req.Data = output + return +} + +// Inform Amazon ECR that the image layer upload for a specified registry, repository +// name, and upload ID, has completed. You can optionally provide a sha256 digest +// of the image layer for data validation purposes. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { + req, out := c.CompleteLayerUploadRequest(input) + err := req.Send() + return out, err +} + +const opCreateRepository = "CreateRepository" + +// CreateRepositoryRequest generates a request for the CreateRepository operation. +func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { + op := &request.Operation{ + Name: opCreateRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRepositoryOutput{} + req.Data = output + return +} + +// Creates an image repository. +func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepository = "DeleteRepository" + +// DeleteRepositoryRequest generates a request for the DeleteRepository operation. +func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { + op := &request.Operation{ + Name: opDeleteRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryOutput{} + req.Data = output + return +} + +// Deletes an existing image repository. If a repository contains images, you +// must use the force option to delete it. +func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy" + +// DeleteRepositoryPolicyRequest generates a request for the DeleteRepositoryPolicy operation. +func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryPolicyOutput{} + req.Data = output + return +} + +// Deletes the repository policy from a specified repository. +func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) { + req, out := c.DeleteRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRepositories = "DescribeRepositories" + +// DescribeRepositoriesRequest generates a request for the DescribeRepositories operation. +func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) { + op := &request.Operation{ + Name: opDescribeRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRepositoriesOutput{} + req.Data = output + return +} + +// Describes image repositories in a registry. +func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) { + req, out := c.DescribeRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opGetAuthorizationToken = "GetAuthorizationToken" + +// GetAuthorizationTokenRequest generates a request for the GetAuthorizationToken operation. +func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) { + op := &request.Operation{ + Name: opGetAuthorizationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAuthorizationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAuthorizationTokenOutput{} + req.Data = output + return +} + +// Retrieves a token that is valid for a specified registry for 12 hours. This +// command allows you to use the docker CLI to push and pull images with Amazon +// ECR. If you do not specify a registry, the default registry is assumed. +// +// The authorizationToken returned for each registry specified is a base64 +// encoded string that can be decoded and used in a docker login command to +// authenticate to a registry. The AWS CLI offers an aws ecr get-login command +// that simplifies the login process. +func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer" + +// GetDownloadUrlForLayerRequest generates a request for the GetDownloadUrlForLayer operation. +func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) { + op := &request.Operation{ + Name: opGetDownloadUrlForLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDownloadUrlForLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDownloadUrlForLayerOutput{} + req.Data = output + return +} + +// Retrieves the pre-signed Amazon S3 download URL corresponding to an image +// layer. You can only get URLs for image layers that are referenced in an image. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) { + req, out := c.GetDownloadUrlForLayerRequest(input) + err := req.Send() + return out, err +} + +const opGetRepositoryPolicy = "GetRepositoryPolicy" + +// GetRepositoryPolicyRequest generates a request for the GetRepositoryPolicy operation. +func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opGetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRepositoryPolicyOutput{} + req.Data = output + return +} + +// Retrieves the repository policy for a specified repository. +func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) { + req, out := c.GetRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opInitiateLayerUpload = "InitiateLayerUpload" + +// InitiateLayerUploadRequest generates a request for the InitiateLayerUpload operation. +func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) { + op := &request.Operation{ + Name: opInitiateLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InitiateLayerUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateLayerUploadOutput{} + req.Data = output + return +} + +// Notify Amazon ECR that you intend to upload an image layer. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { + req, out := c.InitiateLayerUploadRequest(input) + err := req.Send() + return out, err +} + +const opListImages = "ListImages" + +// ListImagesRequest generates a request for the ListImages operation. +func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) { + op := &request.Operation{ + Name: opListImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListImagesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListImagesOutput{} + req.Data = output + return +} + +// Lists all the image IDs for a given repository. +func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { + req, out := c.ListImagesRequest(input) + err := req.Send() + return out, err +} + +const opPutImage = "PutImage" + +// PutImageRequest generates a request for the PutImage operation. +func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) { + op := &request.Operation{ + Name: opPutImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageInput{} + } + + req = c.newRequest(op, input, output) + output = &PutImageOutput{} + req.Data = output + return +} + +// Creates or updates the image manifest associated with an image. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { + req, out := c.PutImageRequest(input) + err := req.Send() + return out, err +} + +const opSetRepositoryPolicy = "SetRepositoryPolicy" + +// SetRepositoryPolicyRequest generates a request for the SetRepositoryPolicy operation. +func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opSetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SetRepositoryPolicyOutput{} + req.Data = output + return +} + +// Applies a repository policy on a specified repository to control access permissions. +func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUploadLayerPart = "UploadLayerPart" + +// UploadLayerPartRequest generates a request for the UploadLayerPart operation. +func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { + op := &request.Operation{ + Name: opUploadLayerPart, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadLayerPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadLayerPartOutput{} + req.Data = output + return +} + +// Uploads an image layer part to Amazon ECR. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { + req, out := c.UploadLayerPartRequest(input) + err := req.Send() + return out, err +} + +// An object representing authorization data for an Amazon ECR registry. +type AuthorizationData struct { + _ struct{} `type:"structure"` + + // A base64-encoded string that contains authorization data for the specified + // Amazon ECR registry. When the string is decoded, it is presented in the format + // user:password for private registry authentication using docker login. + AuthorizationToken *string `locationName:"authorizationToken" type:"string"` + + // The Unix time in seconds and milliseconds when the authorization token expires. + // Authorization tokens are valid for 12 hours. + ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp" timestampFormat:"unix"` + + // The registry URL to use for this authorization token in a docker login command. + // The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com. + // For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com. + ProxyEndpoint *string `locationName:"proxyEndpoint" type:"string"` +} + +// String returns the string representation +func (s AuthorizationData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizationData) GoString() string { + return s.String() +} + +type BatchCheckLayerAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The digests of the image layers to check. + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image layers + // to check. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layers to check. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityInput) GoString() string { + return s.String() +} + +type BatchCheckLayerAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*LayerFailure `locationName:"failures" type:"list"` + + // A list of image layer objects corresponding to the image layer references + // in the request. + Layers []*Layer `locationName:"layers" type:"list"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityOutput) GoString() string { + return s.String() +} + +// Deletes specified images within a specified repository. Images are specified +// with either the imageTag or imageDigest. +type BatchDeleteImageInput struct { + _ struct{} `type:"structure"` + + // A list of image ID references that correspond to images to delete. The format + // of the imageIds reference is imageTag=tag or imageDigest=digest. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image to + // delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the image to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageInput) GoString() string { + return s.String() +} + +type BatchDeleteImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // The image IDs of the deleted images. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchDeleteImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageOutput) GoString() string { + return s.String() +} + +type BatchGetImageInput struct { + _ struct{} `type:"structure"` + + // A list of image ID references that correspond to images to describe. The + // format of the imageIds reference is imageTag=tag or imageDigest=digest. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the images + // to describe. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the images to describe. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchGetImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageInput) GoString() string { + return s.String() +} + +type BatchGetImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // A list of image objects corresponding to the image references in the request. + Images []*Image `locationName:"images" type:"list"` +} + +// String returns the string representation +func (s BatchGetImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageOutput) GoString() string { + return s.String() +} + +type CompleteLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry to which to upload layers. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to associate with the image layer. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the image layer. + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadInput) GoString() string { + return s.String() +} + +type CompleteLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the layer. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s CompleteLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadOutput) GoString() string { + return s.String() +} + +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name to use for the repository. The repository name may be specified + // on its own (such as nginx-web-app) or it can be prepended with a namespace + // to group the repository into a category (such as project-a/nginx-web-app). + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Object representing a repository. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // Force the deletion of the repository if it contains images. + Force *bool `locationName:"force" type:"boolean"` + + // The AWS account ID associated with the registry that contains the repository + // to delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Object representing a repository. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +type DeleteRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository + // policy to delete. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyInput) GoString() string { + return s.String() +} + +type DeleteRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy that was deleted from the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyOutput) GoString() string { + return s.String() +} + +type DescribeRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of repository results returned by DescribeRepositories + // in paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeRepositories + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repositories + // to be described. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // A list of repositories to describe. If this parameter is omitted, then all + // repositories in a registry are described. + RepositoryNames []*string `locationName:"repositoryNames" min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesInput) GoString() string { + return s.String() +} + +type DescribeRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future DescribeRepositories request. + // When the results of a DescribeRepositories request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of repository objects corresponding to valid repositories. + Repositories []*Repository `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesOutput) GoString() string { + return s.String() +} + +type GetAuthorizationTokenInput struct { + _ struct{} `type:"structure"` + + // A list of AWS account IDs that are associated with the registries for which + // to get authorization tokens. If you do not specify a registry, the default + // registry is assumed. + RegistryIds []*string `locationName:"registryIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenInput) GoString() string { + return s.String() +} + +type GetAuthorizationTokenOutput struct { + _ struct{} `type:"structure"` + + // A list of authorization token data objects that correspond to the registryIds + // values in the request. + AuthorizationData []*AuthorizationData `locationName:"authorizationData" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenOutput) GoString() string { + return s.String() +} + +type GetDownloadUrlForLayerInput struct { + _ struct{} `type:"structure"` + + // The digest of the image layer to download. + LayerDigest *string `locationName:"layerDigest" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the image layer + // to download. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layer to download. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerInput) GoString() string { + return s.String() +} + +type GetDownloadUrlForLayerOutput struct { + _ struct{} `type:"structure"` + + // The pre-signed Amazon S3 download URL for the requested layer. + DownloadUrl *string `locationName:"downloadUrl" type:"string"` + + // The digest of the image layer to download. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerOutput) GoString() string { + return s.String() +} + +type GetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository whose policy you want to retrieve. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyInput) GoString() string { + return s.String() +} + +type GetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text associated with the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// Object representing an image. +type Image struct { + _ struct{} `type:"structure"` + + // An object containing the image tag and image digest associated with an image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The image manifest associated with the image. + ImageManifest *string `locationName:"imageManifest" type:"string"` + + // The AWS account ID associated with the registry containing the image. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository associated with the image. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +type ImageFailure struct { + _ struct{} `type:"structure"` + + // The code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The image ID associated with the failure. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` +} + +// String returns the string representation +func (s ImageFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageFailure) GoString() string { + return s.String() +} + +type ImageIdentifier struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The tag used for the image. + ImageTag *string `locationName:"imageTag" type:"string"` +} + +// String returns the string representation +func (s ImageIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageIdentifier) GoString() string { + return s.String() +} + +type InitiateLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that you intend to upload + // layers to. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that you intend to upload layers to. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadInput) GoString() string { + return s.String() +} + +type InitiateLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The size, in bytes, that Amazon ECR expects future layer part uploads to + // be. + PartSize *int64 `locationName:"partSize" type:"long"` + + // The upload ID for the layer upload. This parameter is passed to further UploadLayerPart + // and CompleteLayerUpload operations. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s InitiateLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadOutput) GoString() string { + return s.String() +} + +type Layer struct { + _ struct{} `type:"structure"` + + // The availability status of the image layer. Valid values are AVAILABLE and + // UNAVAILABLE. + LayerAvailability *string `locationName:"layerAvailability" type:"string" enum:"LayerAvailability"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The size, in bytes, of the image layer. + LayerSize *int64 `locationName:"layerSize" type:"long"` +} + +// String returns the string representation +func (s Layer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Layer) GoString() string { + return s.String() +} + +type LayerFailure struct { + _ struct{} `type:"structure"` + + // The failure code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"LayerFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The layer digest associated with the failure. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s LayerFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayerFailure) GoString() string { + return s.String() +} + +type ListImagesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of image results returned by ListImages in paginated output. + // When this parameter is used, ListImages only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListImages request + // with the returned nextToken value. This value can be between 1 and 100. If + // this parameter is not used, then ListImages returns up to 100 results and + // a nextToken value, if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated ListImages request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // to list images in. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository whose image IDs are to be listed. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesInput) GoString() string { + return s.String() +} + +type ListImagesOutput struct { + _ struct{} `type:"structure"` + + // The list of image IDs for the requested repository. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The nextToken value to include in a future ListImages request. When the results + // of a ListImages request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesOutput) GoString() string { + return s.String() +} + +type PutImageInput struct { + _ struct{} `type:"structure"` + + // The image manifest corresponding to the image to be uploaded. + ImageManifest *string `locationName:"imageManifest" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to put the image. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to put the image. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageInput) GoString() string { + return s.String() +} + +type PutImageOutput struct { + _ struct{} `type:"structure"` + + // Details of the image uploaded. + Image *Image `locationName:"image" type:"structure"` +} + +// String returns the string representation +func (s PutImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageOutput) GoString() string { + return s.String() +} + +// Object representing a repository. +type Repository struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + RegistryId *string `locationName:"registryId" type:"string"` + + // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains + // the arn:aws:ecr namespace, followed by the region of the repository, the + // AWS account ID of the repository owner, the repository namespace, and then + // the repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + RepositoryArn *string `locationName:"repositoryArn" type:"string"` + + // The name of the repository. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s Repository) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Repository) GoString() string { + return s.String() +} + +type SetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // If the policy you are attempting to set on a repository policy would prevent + // you from setting another policy in the future, you must force the SetRepositoryPolicy + // operation. This is intended to prevent accidental repository lock outs. + Force *bool `locationName:"force" type:"boolean"` + + // The JSON repository policy text to apply to the repository. + PolicyText *string `locationName:"policyText" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyInput) GoString() string { + return s.String() +} + +type SetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text applied to the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s SetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +type UploadLayerPartInput struct { + _ struct{} `type:"structure"` + + // The base64-encoded layer part payload. + LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"` + + // The integer value of the first byte of the layer part. + PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"` + + // The integer value of the last byte of the layer part. + PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` + + // The AWS account ID associated with the registry that you are uploading layer + // parts to. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that you are uploading layer parts to. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the layer part upload. + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadLayerPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartInput) GoString() string { + return s.String() +} + +type UploadLayerPartOutput struct { + _ struct{} `type:"structure"` + + // The integer value of the last byte received in the request. + LastByteReceived *int64 `locationName:"lastByteReceived" type:"long"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the request. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s UploadLayerPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartOutput) GoString() string { + return s.String() +} + +const ( + // @enum ImageFailureCode + ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" + // @enum ImageFailureCode + ImageFailureCodeInvalidImageTag = "InvalidImageTag" + // @enum ImageFailureCode + ImageFailureCodeImageTagDoesNotMatchDigest = "ImageTagDoesNotMatchDigest" + // @enum ImageFailureCode + ImageFailureCodeImageNotFound = "ImageNotFound" + // @enum ImageFailureCode + ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag" +) + +const ( + // @enum LayerAvailability + LayerAvailabilityAvailable = "AVAILABLE" + // @enum LayerAvailability + LayerAvailabilityUnavailable = "UNAVAILABLE" +) + +const ( + // @enum LayerFailureCode + LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest" + // @enum LayerFailureCode + LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go new file mode 100644 index 000000000..f98fda647 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecriface provides an interface for the Amazon EC2 Container Registry. +package ecriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ecr" +) + +// ECRAPI is the interface type for ecr.ECR. +type ECRAPI interface { + BatchCheckLayerAvailabilityRequest(*ecr.BatchCheckLayerAvailabilityInput) (*request.Request, *ecr.BatchCheckLayerAvailabilityOutput) + + BatchCheckLayerAvailability(*ecr.BatchCheckLayerAvailabilityInput) (*ecr.BatchCheckLayerAvailabilityOutput, error) + + BatchDeleteImageRequest(*ecr.BatchDeleteImageInput) (*request.Request, *ecr.BatchDeleteImageOutput) + + BatchDeleteImage(*ecr.BatchDeleteImageInput) (*ecr.BatchDeleteImageOutput, error) + + BatchGetImageRequest(*ecr.BatchGetImageInput) (*request.Request, *ecr.BatchGetImageOutput) + + BatchGetImage(*ecr.BatchGetImageInput) (*ecr.BatchGetImageOutput, error) + + CompleteLayerUploadRequest(*ecr.CompleteLayerUploadInput) (*request.Request, *ecr.CompleteLayerUploadOutput) + + CompleteLayerUpload(*ecr.CompleteLayerUploadInput) (*ecr.CompleteLayerUploadOutput, error) + + CreateRepositoryRequest(*ecr.CreateRepositoryInput) (*request.Request, *ecr.CreateRepositoryOutput) + + CreateRepository(*ecr.CreateRepositoryInput) (*ecr.CreateRepositoryOutput, error) + + DeleteRepositoryRequest(*ecr.DeleteRepositoryInput) (*request.Request, *ecr.DeleteRepositoryOutput) + + DeleteRepository(*ecr.DeleteRepositoryInput) (*ecr.DeleteRepositoryOutput, error) + + DeleteRepositoryPolicyRequest(*ecr.DeleteRepositoryPolicyInput) (*request.Request, *ecr.DeleteRepositoryPolicyOutput) + + DeleteRepositoryPolicy(*ecr.DeleteRepositoryPolicyInput) (*ecr.DeleteRepositoryPolicyOutput, error) + + DescribeRepositoriesRequest(*ecr.DescribeRepositoriesInput) (*request.Request, *ecr.DescribeRepositoriesOutput) + + DescribeRepositories(*ecr.DescribeRepositoriesInput) (*ecr.DescribeRepositoriesOutput, error) + + GetAuthorizationTokenRequest(*ecr.GetAuthorizationTokenInput) (*request.Request, *ecr.GetAuthorizationTokenOutput) + + GetAuthorizationToken(*ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) + + GetDownloadUrlForLayerRequest(*ecr.GetDownloadUrlForLayerInput) (*request.Request, *ecr.GetDownloadUrlForLayerOutput) + + GetDownloadUrlForLayer(*ecr.GetDownloadUrlForLayerInput) (*ecr.GetDownloadUrlForLayerOutput, error) + + GetRepositoryPolicyRequest(*ecr.GetRepositoryPolicyInput) (*request.Request, *ecr.GetRepositoryPolicyOutput) + + GetRepositoryPolicy(*ecr.GetRepositoryPolicyInput) (*ecr.GetRepositoryPolicyOutput, error) + + InitiateLayerUploadRequest(*ecr.InitiateLayerUploadInput) (*request.Request, *ecr.InitiateLayerUploadOutput) + + InitiateLayerUpload(*ecr.InitiateLayerUploadInput) (*ecr.InitiateLayerUploadOutput, error) + + ListImagesRequest(*ecr.ListImagesInput) (*request.Request, *ecr.ListImagesOutput) + + ListImages(*ecr.ListImagesInput) (*ecr.ListImagesOutput, error) + + PutImageRequest(*ecr.PutImageInput) (*request.Request, *ecr.PutImageOutput) + + PutImage(*ecr.PutImageInput) (*ecr.PutImageOutput, error) + + SetRepositoryPolicyRequest(*ecr.SetRepositoryPolicyInput) (*request.Request, *ecr.SetRepositoryPolicyOutput) + + SetRepositoryPolicy(*ecr.SetRepositoryPolicyInput) (*ecr.SetRepositoryPolicyOutput, error) + + UploadLayerPartRequest(*ecr.UploadLayerPartInput) (*request.Request, *ecr.UploadLayerPartOutput) + + UploadLayerPart(*ecr.UploadLayerPartInput) (*ecr.UploadLayerPartOutput, error) +} + +var _ ECRAPI = (*ecr.ECR)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go new file mode 100644 index 000000000..95e088bae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go @@ -0,0 +1,376 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecr_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ecr" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleECR_BatchCheckLayerAvailability() { + svc := ecr.New(session.New()) + + params := &ecr.BatchCheckLayerAvailabilityInput{ + LayerDigests: []*string{ // Required + aws.String("BatchedOperationLayerDigest"), // Required + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchCheckLayerAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_BatchDeleteImage() { + svc := ecr.New(session.New()) + + params := &ecr.BatchDeleteImageInput{ + ImageIds: []*ecr.ImageIdentifier{ // Required + { // Required + ImageDigest: aws.String("ImageDigest"), + ImageTag: aws.String("ImageTag"), + }, + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchDeleteImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_BatchGetImage() { + svc := ecr.New(session.New()) + + params := &ecr.BatchGetImageInput{ + ImageIds: []*ecr.ImageIdentifier{ // Required + { // Required + ImageDigest: aws.String("ImageDigest"), + ImageTag: aws.String("ImageTag"), + }, + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchGetImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_CompleteLayerUpload() { + svc := ecr.New(session.New()) + + params := &ecr.CompleteLayerUploadInput{ + LayerDigests: []*string{ // Required + aws.String("LayerDigest"), // Required + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + UploadId: aws.String("UploadId"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.CompleteLayerUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_CreateRepository() { + svc := ecr.New(session.New()) + + params := &ecr.CreateRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.CreateRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DeleteRepository() { + svc := ecr.New(session.New()) + + params := &ecr.DeleteRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + Force: aws.Bool(true), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.DeleteRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DeleteRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.DeleteRepositoryPolicyInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.DeleteRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DescribeRepositories() { + svc := ecr.New(session.New()) + + params := &ecr.DescribeRepositoriesInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RegistryId: aws.String("RegistryId"), + RepositoryNames: []*string{ + aws.String("RepositoryName"), // Required + // More values... + }, + } + resp, err := svc.DescribeRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetAuthorizationToken() { + svc := ecr.New(session.New()) + + params := &ecr.GetAuthorizationTokenInput{ + RegistryIds: []*string{ + aws.String("RegistryId"), // Required + // More values... + }, + } + resp, err := svc.GetAuthorizationToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetDownloadUrlForLayer() { + svc := ecr.New(session.New()) + + params := &ecr.GetDownloadUrlForLayerInput{ + LayerDigest: aws.String("LayerDigest"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.GetDownloadUrlForLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.GetRepositoryPolicyInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.GetRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_InitiateLayerUpload() { + svc := ecr.New(session.New()) + + params := &ecr.InitiateLayerUploadInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.InitiateLayerUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_ListImages() { + svc := ecr.New(session.New()) + + params := &ecr.ListImagesInput{ + RepositoryName: aws.String("RepositoryName"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.ListImages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_PutImage() { + svc := ecr.New(session.New()) + + params := &ecr.PutImageInput{ + ImageManifest: aws.String("ImageManifest"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.PutImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_SetRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.SetRepositoryPolicyInput{ + PolicyText: aws.String("RepositoryPolicyText"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + Force: aws.Bool(true), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.SetRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_UploadLayerPart() { + svc := ecr.New(session.New()) + + params := &ecr.UploadLayerPartInput{ + LayerPartBlob: []byte("PAYLOAD"), // Required + PartFirstByte: aws.Int64(1), // Required + PartLastByte: aws.Int64(1), // Required + RepositoryName: aws.String("RepositoryName"), // Required + UploadId: aws.String("UploadId"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.UploadLayerPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go new file mode 100644 index 000000000..f6ca31c2a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry +// service. Customers can use the familiar Docker CLI to push, pull, and manage +// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon +// ECR supports private Docker repositories with resource-based permissions +// using AWS IAM so that specific users or Amazon EC2 instances can access repositories +// and images. Developers can use the Docker CLI to author and manage images. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ECR struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ecr" + +// New creates a new instance of the ECR client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ECR client from just a session. +// svc := ecr.New(mySession) +// +// // Create a ECR client with additional configuration +// svc := ecr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ECR { + svc := &ECR{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-09-21", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerRegistry_V20150921", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECR operation and runs any +// custom request initialization. +func (c *ECR) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go new file mode 100644 index 000000000..18398532c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -0,0 +1,3638 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecs provides a client for Amazon EC2 Container Service. +package ecs + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a request for the CreateCluster operation. +func (c *ECS) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterOutput{} + req.Data = output + return +} + +// Creates a new Amazon ECS cluster. By default, your account receives a default +// cluster when you launch your first container instance. However, you can create +// your own cluster with a unique name with the CreateCluster action. +func (c *ECS) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateService = "CreateService" + +// CreateServiceRequest generates a request for the CreateService operation. +func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Request, output *CreateServiceOutput) { + op := &request.Operation{ + Name: opCreateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateServiceOutput{} + req.Data = output + return +} + +// Runs and maintains a desired number of tasks from a specified task definition. +// If the number of tasks running in a service drops below desiredCount, Amazon +// ECS spawns another instantiation of the task in the specified cluster. To +// update an existing service, see UpdateService. +// +// You can optionally specify a deployment configuration for your service. +// During a deployment (which is triggered by changing the task definition of +// a service with an UpdateService operation), the service scheduler uses the +// minimumHealthyPercent and maximumPercent parameters to determine the deployment +// strategy. +// +// If the minimumHealthyPercent is below 100%, the scheduler can ignore the +// desiredCount temporarily during a deployment. For example, if your service +// has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the +// scheduler to stop two existing tasks before starting two new tasks. Tasks +// for services that do not use a load balancer are considered healthy if they +// are in the RUNNING state; tasks for services that do use a load balancer +// are considered healthy if they are in the RUNNING state and the container +// instance it is hosted on is reported as healthy by the load balancer. The +// default value for minimumHealthyPercent is 50% in the console and 100% for +// the AWS CLI, the AWS SDKs, and the APIs. +// +// The maximumPercent parameter represents an upper limit on the number of +// running tasks during a deployment, which enables you to define the deployment +// batch size. For example, if your service has a desiredCount of four tasks, +// a maximumPercent value of 200% starts four new tasks before stopping the +// four older tasks (provided that the cluster resources required to do this +// are available). The default value for maximumPercent is 200%. +// +// When the service scheduler launches new tasks, it attempts to balance them +// across the Availability Zones in your cluster with the following logic: +// +// Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal for +// placement. +// +// Place the new service task on a valid container instance in an optimal +// Availability Zone (based on the previous steps), favoring container instances +// with the fewest number of running tasks for this service. +func (c *ECS) CreateService(input *CreateServiceInput) (*CreateServiceOutput, error) { + req, out := c.CreateServiceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a request for the DeleteCluster operation. +func (c *ECS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterOutput{} + req.Data = output + return +} + +// Deletes the specified cluster. You must deregister all container instances +// from this cluster before you may delete it. You can list the container instances +// in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance. +func (c *ECS) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteService = "DeleteService" + +// DeleteServiceRequest generates a request for the DeleteService operation. +func (c *ECS) DeleteServiceRequest(input *DeleteServiceInput) (req *request.Request, output *DeleteServiceOutput) { + op := &request.Operation{ + Name: opDeleteService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteServiceOutput{} + req.Data = output + return +} + +// Deletes a specified service within a cluster. You can delete a service if +// you have no running tasks in it and the desired task count is zero. If the +// service is actively maintaining tasks, you cannot delete it, and you must +// update the service to a desired task count of zero. For more information, +// see UpdateService. +// +// When you delete a service, if there are still running tasks that require +// cleanup, the service status moves from ACTIVE to DRAINING, and the service +// is no longer visible in the console or in ListServices API operations. After +// the tasks have stopped, then the service status moves from DRAINING to INACTIVE. +// Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices +// API operations; however, in the future, INACTIVE services may be cleaned +// up and purged from Amazon ECS record keeping, and DescribeServices API operations +// on those services will return a ServiceNotFoundException error. +func (c *ECS) DeleteService(input *DeleteServiceInput) (*DeleteServiceOutput, error) { + req, out := c.DeleteServiceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterContainerInstance = "DeregisterContainerInstance" + +// DeregisterContainerInstanceRequest generates a request for the DeregisterContainerInstance operation. +func (c *ECS) DeregisterContainerInstanceRequest(input *DeregisterContainerInstanceInput) (req *request.Request, output *DeregisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterContainerInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterContainerInstanceOutput{} + req.Data = output + return +} + +// Deregisters an Amazon ECS container instance from the specified cluster. +// This instance is no longer available to run tasks. +// +// If you intend to use the container instance for some other purpose after +// deregistration, you should stop all of the tasks running on the container +// instance before deregistration to avoid any orphaned tasks from consuming +// resources. +// +// Deregistering a container instance removes the instance from a cluster, +// but it does not terminate the EC2 instance; if you are finished using the +// instance, be sure to terminate it in the Amazon EC2 console to stop billing. +// +// When you terminate a container instance, it is automatically deregistered +// from your cluster. +func (c *ECS) DeregisterContainerInstance(input *DeregisterContainerInstanceInput) (*DeregisterContainerInstanceOutput, error) { + req, out := c.DeregisterContainerInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterTaskDefinition = "DeregisterTaskDefinition" + +// DeregisterTaskDefinitionRequest generates a request for the DeregisterTaskDefinition operation. +func (c *ECS) DeregisterTaskDefinitionRequest(input *DeregisterTaskDefinitionInput) (req *request.Request, output *DeregisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDeregisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterTaskDefinitionOutput{} + req.Data = output + return +} + +// Deregisters the specified task definition by family and revision. Upon deregistration, +// the task definition is marked as INACTIVE. Existing tasks and services that +// reference an INACTIVE task definition continue to run without disruption. +// Existing services that reference an INACTIVE task definition can still scale +// up or down by modifying the service's desired count. +// +// You cannot use an INACTIVE task definition to run new tasks or create new +// services, and you cannot update an existing service to reference an INACTIVE +// task definition (although there may be up to a 10 minute window following +// deregistration where these restrictions have not yet taken effect). +func (c *ECS) DeregisterTaskDefinition(input *DeregisterTaskDefinitionInput) (*DeregisterTaskDefinitionOutput, error) { + req, out := c.DeregisterTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a request for the DescribeClusters operation. +func (c *ECS) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClustersOutput{} + req.Data = output + return +} + +// Describes one or more of your clusters. +func (c *ECS) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeContainerInstances = "DescribeContainerInstances" + +// DescribeContainerInstancesRequest generates a request for the DescribeContainerInstances operation. +func (c *ECS) DescribeContainerInstancesRequest(input *DescribeContainerInstancesInput) (req *request.Request, output *DescribeContainerInstancesOutput) { + op := &request.Operation{ + Name: opDescribeContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContainerInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeContainerInstancesOutput{} + req.Data = output + return +} + +// Describes Amazon EC2 Container Service container instances. Returns metadata +// about registered and remaining resources on each container instance requested. +func (c *ECS) DescribeContainerInstances(input *DescribeContainerInstancesInput) (*DescribeContainerInstancesOutput, error) { + req, out := c.DescribeContainerInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServices = "DescribeServices" + +// DescribeServicesRequest generates a request for the DescribeServices operation. +func (c *ECS) DescribeServicesRequest(input *DescribeServicesInput) (req *request.Request, output *DescribeServicesOutput) { + op := &request.Operation{ + Name: opDescribeServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServicesOutput{} + req.Data = output + return +} + +// Describes the specified services running in your cluster. +func (c *ECS) DescribeServices(input *DescribeServicesInput) (*DescribeServicesOutput, error) { + req, out := c.DescribeServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTaskDefinition = "DescribeTaskDefinition" + +// DescribeTaskDefinitionRequest generates a request for the DescribeTaskDefinition operation. +func (c *ECS) DescribeTaskDefinitionRequest(input *DescribeTaskDefinitionInput) (req *request.Request, output *DescribeTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDescribeTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTaskDefinitionOutput{} + req.Data = output + return +} + +// Describes a task definition. You can specify a family and revision to find +// information about a specific task definition, or you can simply specify the +// family to find the latest ACTIVE revision in that family. +// +// You can only describe INACTIVE task definitions while an active task or +// service references them. +func (c *ECS) DescribeTaskDefinition(input *DescribeTaskDefinitionInput) (*DescribeTaskDefinitionOutput, error) { + req, out := c.DescribeTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTasks = "DescribeTasks" + +// DescribeTasksRequest generates a request for the DescribeTasks operation. +func (c *ECS) DescribeTasksRequest(input *DescribeTasksInput) (req *request.Request, output *DescribeTasksOutput) { + op := &request.Operation{ + Name: opDescribeTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTasksOutput{} + req.Data = output + return +} + +// Describes a specified task or tasks. +func (c *ECS) DescribeTasks(input *DescribeTasksInput) (*DescribeTasksOutput, error) { + req, out := c.DescribeTasksRequest(input) + err := req.Send() + return out, err +} + +const opDiscoverPollEndpoint = "DiscoverPollEndpoint" + +// DiscoverPollEndpointRequest generates a request for the DiscoverPollEndpoint operation. +func (c *ECS) DiscoverPollEndpointRequest(input *DiscoverPollEndpointInput) (req *request.Request, output *DiscoverPollEndpointOutput) { + op := &request.Operation{ + Name: opDiscoverPollEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DiscoverPollEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DiscoverPollEndpointOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Returns an endpoint for the Amazon EC2 Container Service agent to poll for +// updates. +func (c *ECS) DiscoverPollEndpoint(input *DiscoverPollEndpointInput) (*DiscoverPollEndpointOutput, error) { + req, out := c.DiscoverPollEndpointRequest(input) + err := req.Send() + return out, err +} + +const opListClusters = "ListClusters" + +// ListClustersRequest generates a request for the ListClusters operation. +func (c *ECS) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { + op := &request.Operation{ + Name: opListClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListClustersOutput{} + req.Data = output + return +} + +// Returns a list of existing clusters. +func (c *ECS) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListClustersPages(input *ListClustersInput, fn func(p *ListClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListClustersOutput), lastPage) + }) +} + +const opListContainerInstances = "ListContainerInstances" + +// ListContainerInstancesRequest generates a request for the ListContainerInstances operation. +func (c *ECS) ListContainerInstancesRequest(input *ListContainerInstancesInput) (req *request.Request, output *ListContainerInstancesOutput) { + op := &request.Operation{ + Name: opListContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListContainerInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListContainerInstancesOutput{} + req.Data = output + return +} + +// Returns a list of container instances in a specified cluster. +func (c *ECS) ListContainerInstances(input *ListContainerInstancesInput) (*ListContainerInstancesOutput, error) { + req, out := c.ListContainerInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListContainerInstancesPages(input *ListContainerInstancesInput, fn func(p *ListContainerInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListContainerInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListContainerInstancesOutput), lastPage) + }) +} + +const opListServices = "ListServices" + +// ListServicesRequest generates a request for the ListServices operation. +func (c *ECS) ListServicesRequest(input *ListServicesInput) (req *request.Request, output *ListServicesOutput) { + op := &request.Operation{ + Name: opListServices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListServicesOutput{} + req.Data = output + return +} + +// Lists the services that are running in a specified cluster. +func (c *ECS) ListServices(input *ListServicesInput) (*ListServicesOutput, error) { + req, out := c.ListServicesRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListServicesPages(input *ListServicesInput, fn func(p *ListServicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListServicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListServicesOutput), lastPage) + }) +} + +const opListTaskDefinitionFamilies = "ListTaskDefinitionFamilies" + +// ListTaskDefinitionFamiliesRequest generates a request for the ListTaskDefinitionFamilies operation. +func (c *ECS) ListTaskDefinitionFamiliesRequest(input *ListTaskDefinitionFamiliesInput) (req *request.Request, output *ListTaskDefinitionFamiliesOutput) { + op := &request.Operation{ + Name: opListTaskDefinitionFamilies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionFamiliesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTaskDefinitionFamiliesOutput{} + req.Data = output + return +} + +// Returns a list of task definition families that are registered to your account +// (which may include task definition families that no longer have any ACTIVE +// task definitions). You can filter the results with the familyPrefix parameter. +func (c *ECS) ListTaskDefinitionFamilies(input *ListTaskDefinitionFamiliesInput) (*ListTaskDefinitionFamiliesOutput, error) { + req, out := c.ListTaskDefinitionFamiliesRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListTaskDefinitionFamiliesPages(input *ListTaskDefinitionFamiliesInput, fn func(p *ListTaskDefinitionFamiliesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTaskDefinitionFamiliesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTaskDefinitionFamiliesOutput), lastPage) + }) +} + +const opListTaskDefinitions = "ListTaskDefinitions" + +// ListTaskDefinitionsRequest generates a request for the ListTaskDefinitions operation. +func (c *ECS) ListTaskDefinitionsRequest(input *ListTaskDefinitionsInput) (req *request.Request, output *ListTaskDefinitionsOutput) { + op := &request.Operation{ + Name: opListTaskDefinitions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTaskDefinitionsOutput{} + req.Data = output + return +} + +// Returns a list of task definitions that are registered to your account. You +// can filter the results by family name with the familyPrefix parameter or +// by status with the status parameter. +func (c *ECS) ListTaskDefinitions(input *ListTaskDefinitionsInput) (*ListTaskDefinitionsOutput, error) { + req, out := c.ListTaskDefinitionsRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListTaskDefinitionsPages(input *ListTaskDefinitionsInput, fn func(p *ListTaskDefinitionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTaskDefinitionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTaskDefinitionsOutput), lastPage) + }) +} + +const opListTasks = "ListTasks" + +// ListTasksRequest generates a request for the ListTasks operation. +func (c *ECS) ListTasksRequest(input *ListTasksInput) (req *request.Request, output *ListTasksOutput) { + op := &request.Operation{ + Name: opListTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTasksOutput{} + req.Data = output + return +} + +// Returns a list of tasks for a specified cluster. You can filter the results +// by family name, by a particular container instance, or by the desired status +// of the task with the family, containerInstance, and desiredStatus parameters. +func (c *ECS) ListTasks(input *ListTasksInput) (*ListTasksOutput, error) { + req, out := c.ListTasksRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListTasksPages(input *ListTasksInput, fn func(p *ListTasksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTasksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTasksOutput), lastPage) + }) +} + +const opRegisterContainerInstance = "RegisterContainerInstance" + +// RegisterContainerInstanceRequest generates a request for the RegisterContainerInstance operation. +func (c *ECS) RegisterContainerInstanceRequest(input *RegisterContainerInstanceInput) (req *request.Request, output *RegisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opRegisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterContainerInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterContainerInstanceOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Registers an EC2 instance into the specified cluster. This instance becomes +// available to place containers on. +func (c *ECS) RegisterContainerInstance(input *RegisterContainerInstanceInput) (*RegisterContainerInstanceOutput, error) { + req, out := c.RegisterContainerInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterTaskDefinition = "RegisterTaskDefinition" + +// RegisterTaskDefinitionRequest generates a request for the RegisterTaskDefinition operation. +func (c *ECS) RegisterTaskDefinitionRequest(input *RegisterTaskDefinitionInput) (req *request.Request, output *RegisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opRegisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterTaskDefinitionOutput{} + req.Data = output + return +} + +// Registers a new task definition from the supplied family and containerDefinitions. +// Optionally, you can add data volumes to your containers with the volumes +// parameter. For more information about task definition parameters and defaults, +// see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) +// in the Amazon EC2 Container Service Developer Guide. +func (c *ECS) RegisterTaskDefinition(input *RegisterTaskDefinitionInput) (*RegisterTaskDefinitionOutput, error) { + req, out := c.RegisterTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opRunTask = "RunTask" + +// RunTaskRequest generates a request for the RunTask operation. +func (c *ECS) RunTaskRequest(input *RunTaskInput) (req *request.Request, output *RunTaskOutput) { + op := &request.Operation{ + Name: opRunTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &RunTaskOutput{} + req.Data = output + return +} + +// Start a task using random placement and the default Amazon ECS scheduler. +// To use your own scheduler or place a task on a specific container instance, +// use StartTask instead. +// +// The count parameter is limited to 10 tasks per call. +func (c *ECS) RunTask(input *RunTaskInput) (*RunTaskOutput, error) { + req, out := c.RunTaskRequest(input) + err := req.Send() + return out, err +} + +const opStartTask = "StartTask" + +// StartTaskRequest generates a request for the StartTask operation. +func (c *ECS) StartTaskRequest(input *StartTaskInput) (req *request.Request, output *StartTaskOutput) { + op := &request.Operation{ + Name: opStartTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StartTaskOutput{} + req.Data = output + return +} + +// Starts a new task from the specified task definition on the specified container +// instance or instances. To use the default Amazon ECS scheduler to place your +// task, use RunTask instead. +// +// The list of container instances to start tasks on is limited to 10. +func (c *ECS) StartTask(input *StartTaskInput) (*StartTaskOutput, error) { + req, out := c.StartTaskRequest(input) + err := req.Send() + return out, err +} + +const opStopTask = "StopTask" + +// StopTaskRequest generates a request for the StopTask operation. +func (c *ECS) StopTaskRequest(input *StopTaskInput) (req *request.Request, output *StopTaskOutput) { + op := &request.Operation{ + Name: opStopTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StopTaskOutput{} + req.Data = output + return +} + +// Stops a running task. +// +// When StopTask is called on a task, the equivalent of docker stop is issued +// to the containers running in the task. This results in a SIGTERM and a 30-second +// timeout, after which SIGKILL is sent and the containers are forcibly stopped. +// If the container handles the SIGTERM gracefully and exits within 30 seconds +// from receiving it, no SIGKILL is sent. +func (c *ECS) StopTask(input *StopTaskInput) (*StopTaskOutput, error) { + req, out := c.StopTaskRequest(input) + err := req.Send() + return out, err +} + +const opSubmitContainerStateChange = "SubmitContainerStateChange" + +// SubmitContainerStateChangeRequest generates a request for the SubmitContainerStateChange operation. +func (c *ECS) SubmitContainerStateChangeRequest(input *SubmitContainerStateChangeInput) (req *request.Request, output *SubmitContainerStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitContainerStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitContainerStateChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubmitContainerStateChangeOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Sent to acknowledge that a container changed states. +func (c *ECS) SubmitContainerStateChange(input *SubmitContainerStateChangeInput) (*SubmitContainerStateChangeOutput, error) { + req, out := c.SubmitContainerStateChangeRequest(input) + err := req.Send() + return out, err +} + +const opSubmitTaskStateChange = "SubmitTaskStateChange" + +// SubmitTaskStateChangeRequest generates a request for the SubmitTaskStateChange operation. +func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (req *request.Request, output *SubmitTaskStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitTaskStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitTaskStateChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubmitTaskStateChangeOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Sent to acknowledge that a task changed states. +func (c *ECS) SubmitTaskStateChange(input *SubmitTaskStateChangeInput) (*SubmitTaskStateChangeOutput, error) { + req, out := c.SubmitTaskStateChangeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateContainerAgent = "UpdateContainerAgent" + +// UpdateContainerAgentRequest generates a request for the UpdateContainerAgent operation. +func (c *ECS) UpdateContainerAgentRequest(input *UpdateContainerAgentInput) (req *request.Request, output *UpdateContainerAgentOutput) { + op := &request.Operation{ + Name: opUpdateContainerAgent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContainerAgentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateContainerAgentOutput{} + req.Data = output + return +} + +// Updates the Amazon ECS container agent on a specified container instance. +// Updating the Amazon ECS container agent does not interrupt running tasks +// or services on the container instance. The process for updating the agent +// differs depending on whether your container instance was launched with the +// Amazon ECS-optimized AMI or another operating system. +// +// UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux +// with the ecs-init service installed and running. For help updating the Amazon +// ECS container agent on other operating systems, see Manually Updating the +// Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent) +// in the Amazon EC2 Container Service Developer Guide. +func (c *ECS) UpdateContainerAgent(input *UpdateContainerAgentInput) (*UpdateContainerAgentOutput, error) { + req, out := c.UpdateContainerAgentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateService = "UpdateService" + +// UpdateServiceRequest generates a request for the UpdateService operation. +func (c *ECS) UpdateServiceRequest(input *UpdateServiceInput) (req *request.Request, output *UpdateServiceOutput) { + op := &request.Operation{ + Name: opUpdateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateServiceOutput{} + req.Data = output + return +} + +// Modifies the desired count, deployment configuration, or task definition +// used in a service. +// +// You can add to or subtract from the number of instantiations of a task definition +// in a service by specifying the cluster that the service is running in and +// a new desiredCount parameter. +// +// You can use UpdateService to modify your task definition and deploy a new +// version of your service. +// +// You can also update the deployment configuration of a service. When a deployment +// is triggered by updating the task definition of a service, the service scheduler +// uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, +// to determine the deployment strategy. +// +// If the minimumHealthyPercent is below 100%, the scheduler can ignore the +// desiredCount temporarily during a deployment. For example, if your service +// has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the +// scheduler to stop two existing tasks before starting two new tasks. Tasks +// for services that do not use a load balancer are considered healthy if they +// are in the RUNNING state; tasks for services that do use a load balancer +// are considered healthy if they are in the RUNNING state and the container +// instance it is hosted on is reported as healthy by the load balancer. +// +// The maximumPercent parameter represents an upper limit on the number of +// running tasks during a deployment, which enables you to define the deployment +// batch size. For example, if your service has a desiredCount of four tasks, +// a maximumPercent value of 200% starts four new tasks before stopping the +// four older tasks (provided that the cluster resources required to do this +// are available). +// +// When UpdateService stops a task during a deployment, the equivalent of docker +// stop is issued to the containers running in the task. This results in a SIGTERM +// and a 30-second timeout, after which SIGKILL is sent and the containers are +// forcibly stopped. If the container handles the SIGTERM gracefully and exits +// within 30 seconds from receiving it, no SIGKILL is sent. +// +// When the service scheduler launches new tasks, it attempts to balance them +// across the Availability Zones in your cluster with the following logic: +// +// Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal for +// placement. +// +// Place the new service task on a valid container instance in an optimal +// Availability Zone (based on the previous steps), favoring container instances +// with the fewest number of running tasks for this service. +func (c *ECS) UpdateService(input *UpdateServiceInput) (*UpdateServiceOutput, error) { + req, out := c.UpdateServiceRequest(input) + err := req.Send() + return out, err +} + +// The attributes applicable to a container instance when it is registered. +type Attribute struct { + _ struct{} `type:"structure"` + + // The name of the container instance attribute. + Name *string `locationName:"name" type:"string" required:"true"` + + // The value of the container instance attribute (at this time, the value here + // is Null, but this could change in future revisions for expandability). + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// A regional grouping of one or more container instances on which you can run +// task requests. Each account receives a default cluster the first time you +// use the Amazon ECS service, but you may also create other clusters. Clusters +// may contain more than one instance type simultaneously. +type Cluster struct { + _ struct{} `type:"structure"` + + // The number of services that are running on the cluster in an ACTIVE state. + // You can view these services with ListServices. + ActiveServicesCount *int64 `locationName:"activeServicesCount" type:"integer"` + + // The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the cluster, the AWS + // account ID of the cluster owner, the cluster namespace, and then the cluster + // name. For example, arn:aws:ecs:region:012345678910:cluster/test. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // A user-generated string that you use to identify your cluster. + ClusterName *string `locationName:"clusterName" type:"string"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The number of container instances registered into the cluster. + RegisteredContainerInstancesCount *int64 `locationName:"registeredContainerInstancesCount" type:"integer"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE + // indicates that you can register container instances with the cluster and + // the associated instances can accept tasks. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// A Docker container that is part of a task. +type Container struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the container. + ContainerArn *string `locationName:"containerArn" type:"string"` + + // The exit code returned from the container. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + // The last known status of the container. + LastStatus *string `locationName:"lastStatus" type:"string"` + + // The name of the container. + Name *string `locationName:"name" type:"string"` + + // The network bindings associated with the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // A short (255 max characters) human-readable string to provide additional + // detail about a running or stopped container. + Reason *string `locationName:"reason" type:"string"` + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string `locationName:"taskArn" type:"string"` +} + +// String returns the string representation +func (s Container) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Container) GoString() string { + return s.String() +} + +// Container definitions are used in task definitions to describe the different +// containers that are launched as part of a task. +type ContainerDefinition struct { + _ struct{} `type:"structure"` + + // The command that is passed to the container. This parameter maps to Cmd in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the COMMAND parameter to docker run (https://docs.docker.com/reference/commandline/run/). + // For more information, see https://docs.docker.com/reference/builder/#cmd + // (https://docs.docker.com/reference/builder/#cmd). + Command []*string `locationName:"command" type:"list"` + + // The number of cpu units reserved for the container. A container instance + // has 1,024 cpu units for every CPU core. This parameter specifies the minimum + // amount of CPU to reserve for a container, and containers share unallocated + // CPU units with other containers on the instance with the same ratio as their + // allocated amount. This parameter maps to CpuShares in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --cpu-shares option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // You can determine the number of CPU units that are available per EC2 instance + // type by multiplying the vCPUs listed for that instance type on the Amazon + // EC2 Instances (http://aws.amazon.com/ec2/instance-types/) detail page by + // 1,024. + // + // For example, if you run a single-container task on a single-core instance + // type with 512 CPU units specified for that container, and that is the only + // task running on the container instance, that container could use the full + // 1,024 CPU unit share at any given time. However, if you launched another + // copy of the same task on that container instance, each task would be guaranteed + // a minimum of 512 CPU units when needed, and each container could float to + // higher CPU usage if the other container was not using it, but if both tasks + // were 100% active all of the time, they would be limited to 512 CPU units. + // + // The Docker daemon on the container instance uses the CPU value to calculate + // the relative CPU share ratios for running containers. For more information, + // see CPU share constraint (https://docs.docker.com/reference/run/#cpu-share-constraint) + // in the Docker documentation. The minimum valid CPU share value that the Linux + // kernel allows is 2; however, the CPU parameter is not required, and you can + // use CPU values below 2 in your container definitions. For CPU values below + // 2 (including null), the behavior varies based on your Amazon ECS container + // agent version: + // + // Agent versions less than or equal to 1.1.0: Null and zero CPU values are + // passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU + // values of 1 are passed to Docker as 1, which the Linux kernel converts to + // 2 CPU shares. Agent versions greater than or equal to 1.2.0: Null, zero, + // and CPU values of 1 are passed to Docker as 2. + Cpu *int64 `locationName:"cpu" type:"integer"` + + // When this parameter is true, networking is disabled within the container. + // This parameter maps to NetworkDisabled in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/). + DisableNetworking *bool `locationName:"disableNetworking" type:"boolean"` + + // A list of DNS search domains that are presented to the container. This parameter + // maps to DnsSearch in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --dns-search option to docker run (https://docs.docker.com/reference/commandline/run/). + DnsSearchDomains []*string `locationName:"dnsSearchDomains" type:"list"` + + // A list of DNS servers that are presented to the container. This parameter + // maps to Dns in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --dns option to docker run (https://docs.docker.com/reference/commandline/run/). + DnsServers []*string `locationName:"dnsServers" type:"list"` + + // A key/value map of labels to add to the container. This parameter maps to + // Labels in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --label option to docker run (https://docs.docker.com/reference/commandline/run/). + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + DockerLabels map[string]*string `locationName:"dockerLabels" type:"map"` + + // A list of strings to provide custom labels for SELinux and AppArmor multi-level + // security systems. This parameter maps to SecurityOpt in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --security-opt option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // The Amazon ECS container agent running on a container instance must register + // with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment + // variables before containers placed on that instance can use these security + // options. For more information, see Amazon ECS Container Agent Configuration + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/developerguide/ecs-agent-config.html) + // in the Amazon EC2 Container Service Developer Guide. + DockerSecurityOptions []*string `locationName:"dockerSecurityOptions" type:"list"` + + // Early versions of the Amazon ECS container agent do not properly handle entryPoint + // parameters. If you have problems using entryPoint, update your container + // agent or enter your commands and arguments as command array items instead. + // + // The entry point that is passed to the container. This parameter maps to + // Entrypoint in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --entrypoint option to docker run (https://docs.docker.com/reference/commandline/run/). + // For more information, see https://docs.docker.com/reference/builder/#entrypoint + // (https://docs.docker.com/reference/builder/#entrypoint). + EntryPoint []*string `locationName:"entryPoint" type:"list"` + + // The environment variables to pass to a container. This parameter maps to + // Env in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --env option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // We do not recommend using plain text environment variables for sensitive + // information, such as credential data. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // If the essential parameter of a container is marked as true, the failure + // of that container stops the task. If the essential parameter of a container + // is marked as false, then its failure does not affect the rest of the containers + // in a task. If this parameter is omitted, a container is assumed to be essential. + // + // All tasks must have at least one essential container. + Essential *bool `locationName:"essential" type:"boolean"` + + // A list of hostnames and IP address mappings to append to the /etc/hosts file + // on the container. This parameter maps to ExtraHosts in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --add-host option to docker run (https://docs.docker.com/reference/commandline/run/). + ExtraHosts []*HostEntry `locationName:"extraHosts" type:"list"` + + // The hostname to use for your container. This parameter maps to Hostname in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --hostname option to docker run (https://docs.docker.com/reference/commandline/run/). + Hostname *string `locationName:"hostname" type:"string"` + + // The image used to start a container. This string is passed directly to the + // Docker daemon. Images in the Docker Hub registry are available by default. + // Other repositories are specified with repository-url/image:tag. Up to 255 + // letters (uppercase and lowercase), numbers, hyphens, underscores, colons, + // periods, forward slashes, and number signs are allowed. This parameter maps + // to Image in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the IMAGE parameter of docker run (https://docs.docker.com/reference/commandline/run/). + // + // Images in official repositories on Docker Hub use a single name (for example, + // ubuntu or mongo). Images in other repositories on Docker Hub are qualified + // with an organization name (for example, amazon/amazon-ecs-agent). Images + // in other online repositories are qualified further by a domain name (for + // example, quay.io/assemblyline/ubuntu). + Image *string `locationName:"image" type:"string"` + + // The link parameter allows containers to communicate with each other without + // the need for port mappings, using the name parameter and optionally, an alias + // for the link. This construct is analogous to name:alias in Docker links. + // Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores + // are allowed for each name and alias. For more information on linking Docker + // containers, see https://docs.docker.com/userguide/dockerlinks/ (https://docs.docker.com/userguide/dockerlinks/). + // This parameter maps to Links in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --link option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // Containers that are collocated on a single container instance may be able + // to communicate with each other without requiring links or host port mappings. + // Network isolation is achieved on the container instance using security groups + // and VPC settings. + Links []*string `locationName:"links" type:"list"` + + // The log configuration specification for the container. This parameter maps + // to LogConfig in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --log-driver option to docker run (https://docs.docker.com/reference/commandline/run/). + // Valid log drivers are displayed in the LogConfiguration data type. This parameter + // requires version 1.18 of the Docker Remote API or greater on your container + // instance. To check the Docker Remote API version on your container instance, + // log into your container instance and run the following command: sudo docker + // version | grep "Server API version" + // + // The Amazon ECS container agent running on a container instance must register + // the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS + // environment variable before containers placed on that instance can use these + // log configuration options. For more information, see Amazon ECS Container + // Agent Configuration (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/developerguide/ecs-agent-config.html) + // in the Amazon EC2 Container Service Developer Guide. + LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` + + // The number of MiB of memory to reserve for the container. You must specify + // a non-zero integer for this parameter; the Docker daemon reserves a minimum + // of 4 MiB of memory for a container, so you should not specify fewer than + // 4 MiB of memory for your containers. If your container attempts to exceed + // the memory allocated here, the container is killed. This parameter maps to + // Memory in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --memory option to docker run (https://docs.docker.com/reference/commandline/run/). + Memory *int64 `locationName:"memory" type:"integer"` + + // The mount points for data volumes in your container. This parameter maps + // to Volumes in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --volume option to docker run (https://docs.docker.com/reference/commandline/run/). + MountPoints []*MountPoint `locationName:"mountPoints" type:"list"` + + // The name of a container. If you are linking multiple containers together + // in a task definition, the name of one container can be entered in the links + // of another container to connect the containers. Up to 255 letters (uppercase + // and lowercase), numbers, hyphens, and underscores are allowed. This parameter + // maps to name in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --name option to docker run (https://docs.docker.com/reference/commandline/run/). + Name *string `locationName:"name" type:"string"` + + // The list of port mappings for the container. Port mappings allow containers + // to access ports on the host container instance to send or receive traffic. + // This parameter maps to PortBindings in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --publish option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // After a task reaches the RUNNING status, manual and automatic host and + // container port assignments are visible in the Network Bindings section of + // a container description of a selected task in the Amazon ECS console, or + // the networkBindings section DescribeTasks responses. + PortMappings []*PortMapping `locationName:"portMappings" type:"list"` + + // When this parameter is true, the container is given elevated privileges on + // the host container instance (similar to the root user). This parameter maps + // to Privileged in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --privileged option to docker run (https://docs.docker.com/reference/commandline/run/). + Privileged *bool `locationName:"privileged" type:"boolean"` + + // When this parameter is true, the container is given read-only access to its + // root file system. This parameter maps to ReadonlyRootfs in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --read-only option to docker run. + ReadonlyRootFilesystem *bool `locationName:"readonlyRootFilesystem" type:"boolean"` + + // A list of ulimits to set in the container. This parameter maps to Ulimits + // in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --ulimit option to docker run (https://docs.docker.com/reference/commandline/run/). + // Valid naming values are displayed in the Ulimit data type. This parameter + // requires version 1.18 of the Docker Remote API or greater on your container + // instance. To check the Docker Remote API version on your container instance, + // log into your container instance and run the following command: sudo docker + // version | grep "Server API version" + Ulimits []*Ulimit `locationName:"ulimits" type:"list"` + + // The user name to use inside the container. This parameter maps to User in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --user option to docker run (https://docs.docker.com/reference/commandline/run/). + User *string `locationName:"user" type:"string"` + + // Data volumes to mount from another container. This parameter maps to VolumesFrom + // in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --volumes-from option to docker run (https://docs.docker.com/reference/commandline/run/). + VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"` + + // The working directory in which to run commands inside the container. This + // parameter maps to WorkingDir in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --workdir option to docker run (https://docs.docker.com/reference/commandline/run/). + WorkingDirectory *string `locationName:"workingDirectory" type:"string"` +} + +// String returns the string representation +func (s ContainerDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDefinition) GoString() string { + return s.String() +} + +// An EC2 instance that is running the Amazon ECS agent and has been registered +// with a cluster. +type ContainerInstance struct { + _ struct{} `type:"structure"` + + // This parameter returns true if the agent is actually connected to Amazon + // ECS. Registered instances with an agent that may be unhealthy or stopped + // return false, and instances without a connected agent cannot accept placement + // requests. + AgentConnected *bool `locationName:"agentConnected" type:"boolean"` + + // The status of the most recent agent update. If an update has never been requested, + // this value is NULL. + AgentUpdateStatus *string `locationName:"agentUpdateStatus" type:"string" enum:"AgentUpdateStatus"` + + // The attributes set for the container instance by the Amazon ECS container + // agent at instance registration. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The Amazon Resource Name (ARN) of the container instance. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the container instance, + // the AWS account ID of the container instance owner, the container-instance + // namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The EC2 instance ID of the container instance. + Ec2InstanceId *string `locationName:"ec2InstanceId" type:"string"` + + // The number of tasks on the container instance that are in the PENDING status. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The registered resources on the container instance that are in use by current + // tasks. + RegisteredResources []*Resource `locationName:"registeredResources" type:"list"` + + // The remaining resources of the container instance that are available for + // new tasks. + RemainingResources []*Resource `locationName:"remainingResources" type:"list"` + + // The number of tasks on the container instance that are in the RUNNING status. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the container instance. The valid values are ACTIVE or INACTIVE. + // ACTIVE indicates that the container instance can accept tasks. + Status *string `locationName:"status" type:"string"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s ContainerInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerInstance) GoString() string { + return s.String() +} + +// The overrides that should be sent to a container. +type ContainerOverride struct { + _ struct{} `type:"structure"` + + // The command to send to the container that overrides the default command from + // the Docker image or the task definition. + Command []*string `locationName:"command" type:"list"` + + // The environment variables to send to the container. You can add new environment + // variables, which are added to the container at launch, or you can override + // the existing environment variables from the Docker image or the task definition. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // The name of the container that receives the override. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s ContainerOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerOverride) GoString() string { + return s.String() +} + +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // The name of your cluster. If you do not specify a name for your cluster, + // you create a cluster named default. Up to 255 letters (uppercase and lowercase), + // numbers, hyphens, and underscores are allowed. + ClusterName *string `locationName:"clusterName" type:"string"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of your new cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +type CreateServiceInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. Up to 32 ASCII characters are allowed. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your service. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the specified task definition to place and + // keep running on your cluster. + DesiredCount *int64 `locationName:"desiredCount" type:"integer" required:"true"` + + // A list of load balancer objects, containing the load balancer name, the container + // name (as it appears in a container definition), and the container port to + // access from the load balancer. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The name or full Amazon Resource Name (ARN) of the IAM role that allows your + // Amazon ECS container agent to make calls to your load balancer on your behalf. + // This parameter is only required if you are using a load balancer with your + // service. + Role *string `locationName:"role" type:"string"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a region or across multiple regions. + ServiceName *string `locationName:"serviceName" type:"string" required:"true"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run in your service. If a revision is not specified, + // the latest ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceInput) GoString() string { + return s.String() +} + +type CreateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the create call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s CreateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceOutput) GoString() string { + return s.String() +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster to delete. + Cluster *string `locationName:"cluster" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +type DeleteServiceInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster that hosts the service to delete. If you do not specify + // a cluster, the default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The name of the service to delete. + Service *string `locationName:"service" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceInput) GoString() string { + return s.String() +} + +type DeleteServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted service. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceOutput) GoString() string { + return s.String() +} + +// The details of an Amazon ECS service deployment. +type Deployment struct { + _ struct{} `type:"structure"` + + // The Unix time in seconds and milliseconds when the service was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The most recent desired count of tasks that was specified for the service + // to deploy or maintain. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The ID of the deployment. + Id *string `locationName:"id" type:"string"` + + // The number of tasks in the deployment that are in the PENDING status. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The number of tasks in the deployment that are in the RUNNING status. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The status of the deployment. Valid values are PRIMARY (for the most recent + // deployment), ACTIVE (for previous deployments that still have tasks running, + // but are being replaced with the PRIMARY deployment), and INACTIVE (for deployments + // that have been completely replaced). + Status *string `locationName:"status" type:"string"` + + // The most recent task definition that was specified for the service to use. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` + + // The Unix time in seconds and milliseconds when the service was last updated. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Optional deployment parameters that control how many tasks run during the +// deployment and the ordering of stopping and starting tasks. +type DeploymentConfiguration struct { + _ struct{} `type:"structure"` + + // The upper limit (as a percentage of the service's desiredCount) of the number + // of running tasks that can be running in a service during a deployment. The + // maximum number of tasks during a deployment is the desiredCount multiplied + // by the maximumPercent/100, rounded down to the nearest integer value. + MaximumPercent *int64 `locationName:"maximumPercent" type:"integer"` + + // The lower limit (as a percentage of the service's desiredCount) of the number + // of running tasks that must remain running and healthy in a service during + // a deployment. The minimum healthy tasks during a deployment is the desiredCount + // multiplied by the minimumHealthyPercent/100, rounded up to the nearest integer + // value. + MinimumHealthyPercent *int64 `locationName:"minimumHealthyPercent" type:"integer"` +} + +// String returns the string representation +func (s DeploymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentConfiguration) GoString() string { + return s.String() +} + +type DeregisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instance to deregister. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance to deregister. The ARN contains the arn:aws:ecs namespace, followed + // by the region of the container instance, the AWS account ID of the container + // instance owner, the container-instance namespace, and then the container + // instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` + + // Forces the deregistration of the container instance. If you have tasks running + // on the container instance when you deregister it with the force option, these + // tasks remain running and they continue to pass Elastic Load Balancing load + // balancer health checks until you terminate the instance or the tasks stop + // through some other means, but they are orphaned (no longer monitored or accounted + // for by Amazon ECS). If an orphaned task on your container instance is part + // of an Amazon ECS service, then the service scheduler starts another copy + // of that task, on a different container instance if possible. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceInput) GoString() string { + return s.String() +} + +type DeregisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to deregister. You must specify a revision. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionInput) GoString() string { + return s.String() +} + +type DeregisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deregistered task. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // A space-separated list of cluster names or full cluster Amazon Resource Name + // (ARN) entries. If you do not specify a cluster, the default cluster is assumed. + Clusters []*string `locationName:"clusters" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of clusters. + Clusters []*Cluster `locationName:"clusters" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +type DescribeContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to describe. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A space-separated list of container instance IDs or full Amazon Resource + // Name (ARN) entries. + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesInput) GoString() string { + return s.String() +} + +type DescribeContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances. + ContainerInstances []*ContainerInstance `locationName:"containerInstances" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesOutput) GoString() string { + return s.String() +} + +type DescribeServicesInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster that hosts the service to describe. If you do not + // specify a cluster, the default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A list of services to describe. + Services []*string `locationName:"services" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesInput) GoString() string { + return s.String() +} + +type DescribeServicesOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of services described. + Services []*Service `locationName:"services" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesOutput) GoString() string { + return s.String() +} + +type DescribeTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family for the latest ACTIVE revision, family and revision (family:revision) + // for a specific revision in the family, or full Amazon Resource Name (ARN) + // of the task definition to describe. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionInput) GoString() string { + return s.String() +} + +type DescribeTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full task definition description. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionOutput) GoString() string { + return s.String() +} + +type DescribeTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to describe. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A space-separated list of task IDs or full Amazon Resource Name (ARN) entries. + Tasks []*string `locationName:"tasks" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksInput) GoString() string { + return s.String() +} + +type DescribeTasksOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of tasks. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s DescribeTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksOutput) GoString() string { + return s.String() +} + +type DiscoverPollEndpointInput struct { + _ struct{} `type:"structure"` + + // The cluster that the container instance belongs to. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance. The ARN contains the arn:aws:ecs namespace, followed by the region + // of the container instance, the AWS account ID of the container instance owner, + // the container-instance namespace, and then the container instance ID. For + // example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + ContainerInstance *string `locationName:"containerInstance" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointInput) GoString() string { + return s.String() +} + +type DiscoverPollEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint for the Amazon ECS agent to poll. + Endpoint *string `locationName:"endpoint" type:"string"` + + // The telemetry endpoint for the Amazon ECS agent. + TelemetryEndpoint *string `locationName:"telemetryEndpoint" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointOutput) GoString() string { + return s.String() +} + +// A failed resource. +type Failure struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the failed resource. + Arn *string `locationName:"arn" type:"string"` + + // The reason for the failure. + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s Failure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Failure) GoString() string { + return s.String() +} + +// Hostnames and IP address entries that are added to the /etc/hosts file of +// a container via the extraHosts parameter of its ContainerDefinition. +type HostEntry struct { + _ struct{} `type:"structure"` + + // The hostname to use in the /etc/hosts entry. + Hostname *string `locationName:"hostname" type:"string" required:"true"` + + // The IP address to use in the /etc/hosts entry. + IpAddress *string `locationName:"ipAddress" type:"string" required:"true"` +} + +// String returns the string representation +func (s HostEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostEntry) GoString() string { + return s.String() +} + +// Details on a container instance host volume. +type HostVolumeProperties struct { + _ struct{} `type:"structure"` + + // The path on the host container instance that is presented to the container. + // If this parameter is empty, then the Docker daemon has assigned a host path + // for you. If the host parameter contains a sourcePath file location, then + // the data volume persists at the specified location on the host container + // instance until you delete it manually. If the sourcePath value does not exist + // on the host container instance, the Docker daemon creates it. If the location + // does exist, the contents of the source path folder are exported. + SourcePath *string `locationName:"sourcePath" type:"string"` +} + +// String returns the string representation +func (s HostVolumeProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostVolumeProperties) GoString() string { + return s.String() +} + +// A key and value pair object. +type KeyValuePair struct { + _ struct{} `type:"structure"` + + // The name of the key value pair. For environment variables, this is the name + // of the environment variable. + Name *string `locationName:"name" type:"string"` + + // The value of the key value pair. For environment variables, this is the value + // of the environment variable. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s KeyValuePair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyValuePair) GoString() string { + return s.String() +} + +type ListClustersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of cluster results returned by ListClusters in paginated + // output. When this parameter is used, ListClusters only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListClusters + // request with the returned nextToken value. This value can be between 1 and + // 100. If this parameter is not used, then ListClusters returns up to 100 results + // and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListClusters request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersInput) GoString() string { + return s.String() +} + +type ListClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of full Amazon Resource Name (ARN) entries for each cluster associated + // with your account. + ClusterArns []*string `locationName:"clusterArns" type:"list"` + + // The nextToken value to include in a future ListClusters request. When the + // results of a ListClusters request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersOutput) GoString() string { + return s.String() +} + +type ListContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to list. If you do not specify a cluster, the default + // cluster is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The maximum number of container instance results returned by ListContainerInstances + // in paginated output. When this parameter is used, ListContainerInstances + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListContainerInstances request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListContainerInstances returns up to 100 results and a nextToken value if + // applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListContainerInstances + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesInput) GoString() string { + return s.String() +} + +type ListContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances with full Amazon Resource Name (ARN) entries + // for each container instance associated with the specified cluster. + ContainerInstanceArns []*string `locationName:"containerInstanceArns" type:"list"` + + // The nextToken value to include in a future ListContainerInstances request. + // When the results of a ListContainerInstances request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesOutput) GoString() string { + return s.String() +} + +type ListServicesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the services to list. If you do not specify a cluster, the default cluster + // is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The maximum number of container instance results returned by ListServices + // in paginated output. When this parameter is used, ListServices only returns + // maxResults results in a single page along with a nextToken response element. + // The remaining results of the initial request can be seen by sending another + // ListServices request with the returned nextToken value. This value can be + // between 1 and 10. If this parameter is not used, then ListServices returns + // up to 10 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListServices request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesInput) GoString() string { + return s.String() +} + +type ListServicesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListServices request. When the + // results of a ListServices request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of full Amazon Resource Name (ARN) entries for each service associated + // with the specified cluster. + ServiceArns []*string `locationName:"serviceArns" type:"list"` +} + +// String returns the string representation +func (s ListServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesOutput) GoString() string { + return s.String() +} + +type ListTaskDefinitionFamiliesInput struct { + _ struct{} `type:"structure"` + + // The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. + // If you specify a familyPrefix, only task definition family names that begin + // with the familyPrefix string are returned. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition family results returned by ListTaskDefinitionFamilies + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitionFamilies request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListTaskDefinitionFamilies returns up to 100 results and a nextToken value + // if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitionFamilies + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesInput) GoString() string { + return s.String() +} + +type ListTaskDefinitionFamiliesOutput struct { + _ struct{} `type:"structure"` + + // The list of task definition family names that match the ListTaskDefinitionFamilies + // request. + Families []*string `locationName:"families" type:"list"` + + // The nextToken value to include in a future ListTaskDefinitionFamilies request. + // When the results of a ListTaskDefinitionFamilies request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesOutput) GoString() string { + return s.String() +} + +type ListTaskDefinitionsInput struct { + _ struct{} `type:"structure"` + + // The full family name with which to filter the ListTaskDefinitions results. + // Specifying a familyPrefix limits the listed task definitions to task definition + // revisions that belong to that family. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition results returned by ListTaskDefinitions + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitions request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitions + // returns up to 100 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitions + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order in which to sort the results. Valid values are ASC and DESC. By + // default (ASC), task definitions are listed lexicographically by family name + // and in ascending numerical order by revision so that the newest task definitions + // in a family are listed last. Setting this parameter to DESC reverses the + // sort order on family name and revision so that the newest task definitions + // in a family are listed first. + Sort *string `locationName:"sort" type:"string" enum:"SortOrder"` + + // The task definition status with which to filter the ListTaskDefinitions results. + // By default, only ACTIVE task definitions are listed. By setting this parameter + // to INACTIVE, you can view task definitions that are INACTIVE as long as an + // active task or service still references them. If you paginate the resulting + // output, be sure to keep the status value constant in each subsequent request. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` +} + +// String returns the string representation +func (s ListTaskDefinitionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsInput) GoString() string { + return s.String() +} + +type ListTaskDefinitionsOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTaskDefinitions request. When + // the results of a ListTaskDefinitions request exceed maxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions + // request. + TaskDefinitionArns []*string `locationName:"taskDefinitionArns" type:"list"` +} + +// String returns the string representation +func (s ListTaskDefinitionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsOutput) GoString() string { + return s.String() +} + +type ListTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the tasks to list. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance with which to filter the ListTasks results. Specifying a containerInstance + // limits the results to tasks that belong to that container instance. + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + // The task status with which to filter the ListTasks results. Specifying a + // desiredStatus of STOPPED limits the results to tasks that are in the STOPPED + // status, which can be useful for debugging tasks that are not starting properly + // or have died or finished. The default status filter is RUNNING. + DesiredStatus *string `locationName:"desiredStatus" type:"string" enum:"DesiredStatus"` + + // The name of the family with which to filter the ListTasks results. Specifying + // a family limits the results to tasks that belong to that family. + Family *string `locationName:"family" type:"string"` + + // The maximum number of task results returned by ListTasks in paginated output. + // When this parameter is used, ListTasks only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListTasks request with + // the returned nextToken value. This value can be between 1 and 100. If this + // parameter is not used, then ListTasks returns up to 100 results and a nextToken + // value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTasks request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the service with which to filter the ListTasks results. Specifying + // a serviceName limits the results to tasks that belong to that service. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The startedBy value with which to filter the task results. Specifying a startedBy + // value limits the results to tasks that were started with that value. + StartedBy *string `locationName:"startedBy" type:"string"` +} + +// String returns the string representation +func (s ListTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksInput) GoString() string { + return s.String() +} + +type ListTasksOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTasks request. When the results + // of a ListTasks request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task Amazon Resource Name (ARN) entries for the ListTasks request. + TaskArns []*string `locationName:"taskArns" type:"list"` +} + +// String returns the string representation +func (s ListTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksOutput) GoString() string { + return s.String() +} + +// Details on a load balancer that is used with a service. +type LoadBalancer struct { + _ struct{} `type:"structure"` + + // The name of the container (as it appears in a container definition) to associate + // with the load balancer. + ContainerName *string `locationName:"containerName" type:"string"` + + // The port on the container to associate with the load balancer. This port + // must correspond to a containerPort in the service's task definition. Your + // container instances must allow ingress traffic on the hostPort of the port + // mapping. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The name of the load balancer. + LoadBalancerName *string `locationName:"loadBalancerName" type:"string"` +} + +// String returns the string representation +func (s LoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancer) GoString() string { + return s.String() +} + +// Log configuration options to send to a custom log driver for the container. +type LogConfiguration struct { + _ struct{} `type:"structure"` + + // The log driver to use for the container. This parameter requires version + // 1.18 of the Docker Remote API or greater on your container instance. To check + // the Docker Remote API version on your container instance, log into your container + // instance and run the following command: sudo docker version | grep "Server + // API version" + LogDriver *string `locationName:"logDriver" type:"string" required:"true" enum:"LogDriver"` + + // The configuration options to send to the log driver. This parameter requires + // version 1.19 of the Docker Remote API or greater on your container instance. + // To check the Docker Remote API version on your container instance, log into + // your container instance and run the following command: sudo docker version + // | grep "Server API version" + Options map[string]*string `locationName:"options" type:"map"` +} + +// String returns the string representation +func (s LogConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogConfiguration) GoString() string { + return s.String() +} + +// Details on a volume mount point that is used in a container definition. +type MountPoint struct { + _ struct{} `type:"structure"` + + // The path on the container to mount the host volume at. + ContainerPath *string `locationName:"containerPath" type:"string"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of the volume to mount. + SourceVolume *string `locationName:"sourceVolume" type:"string"` +} + +// String returns the string representation +func (s MountPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountPoint) GoString() string { + return s.String() +} + +// Details on the network bindings between a container and its host container +// instance. After a task reaches the RUNNING status, manual and automatic host +// and container port assignments are visible in the networkBindings section +// of DescribeTasks API responses. +type NetworkBinding struct { + _ struct{} `type:"structure"` + + // The IP address that the container is bound to on the container instance. + BindIP *string `locationName:"bindIP" type:"string"` + + // The port number on the container that is be used with the network binding. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the host that is used with the network binding. + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the network binding. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s NetworkBinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkBinding) GoString() string { + return s.String() +} + +// Port mappings allow containers to access ports on the host container instance +// to send or receive traffic. Port mappings are specified as part of the container +// definition. After a task reaches the RUNNING status, manual and automatic +// host and container port assignments are visible in the networkBindings section +// of DescribeTasks API responses. +type PortMapping struct { + _ struct{} `type:"structure"` + + // The port number on the container that is bound to the user-specified or automatically + // assigned host port. If you specify a container port and not a host port, + // your container automatically receives a host port in the ephemeral port range + // (for more information, see hostPort). + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the container instance to reserve for your container. + // You can specify a non-reserved host port for your container port mapping, + // or you can omit the hostPort (or set it to 0) while specifying a containerPort + // and your container automatically receives a port in the ephemeral port range + // for your container instance operating system and Docker version. + // + // The default ephemeral port range is 49153 to 65535, and this range is used + // for Docker versions prior to 1.6.0. For Docker version 1.6.0 and later, the + // Docker daemon tries to read the ephemeral port range from /proc/sys/net/ipv4/ip_local_port_range; + // if this kernel parameter is unavailable, the default ephemeral port range + // is used. You should not attempt to specify a host port in the ephemeral port + // range, because these are reserved for automatic assignment. In general, ports + // below 32768 are outside of the ephemeral port range. + // + // The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, + // and the Amazon ECS container agent port 51678. Any host port that was previously + // specified in a running task is also reserved while the task is running (after + // a task stops, the host port is released).The current reserved ports are displayed + // in the remainingResources of DescribeContainerInstances output, and a container + // instance may have up to 50 reserved ports at a time, including the default + // reserved ports (automatically assigned ports do not count toward this limit). + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the port mapping. Valid values are tcp and udp. The + // default is tcp. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s PortMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortMapping) GoString() string { + return s.String() +} + +type RegisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The container instance attributes that this container instance supports. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The short name or full Amazon Resource Name (ARN) of the cluster with which + // to register your container instance. If you do not specify a cluster, the + // default cluster is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The Amazon Resource Name (ARN) of the container instance (if it was previously + // registered). + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The instance identity document for the EC2 instance to register. This document + // can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/document/ + InstanceIdentityDocument *string `locationName:"instanceIdentityDocument" type:"string"` + + // The instance identity document signature for the EC2 instance to register. + // This signature can be found by running the following command from the instance: + // curl http://169.254.169.254/latest/dynamic/instance-identity/signature/ + InstanceIdentityDocumentSignature *string `locationName:"instanceIdentityDocumentSignature" type:"string"` + + // The resources available on the instance. + TotalResources []*Resource `locationName:"totalResources" type:"list"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceInput) GoString() string { + return s.String() +} + +type RegisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceOutput) GoString() string { + return s.String() +} + +type RegisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list" required:"true"` + + // You must specify a family for a task definition, which allows you to track + // multiple versions of the same task definition. The family is used as a name + // for your task definition. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + Family *string `locationName:"family" type:"string" required:"true"` + + // A list of volume definitions in JSON format that containers in your task + // may use. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionInput) GoString() string { + return s.String() +} + +type RegisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the registered task definition. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +// Describes the resources available for a container instance. +type Resource struct { + _ struct{} `type:"structure"` + + // When the doubleValue type is set, the value of the resource must be a double + // precision floating-point type. + DoubleValue *float64 `locationName:"doubleValue" type:"double"` + + // When the integerValue type is set, the value of the resource must be an integer. + IntegerValue *int64 `locationName:"integerValue" type:"integer"` + + // When the longValue type is set, the value of the resource must be an extended + // precision floating-point type. + LongValue *int64 `locationName:"longValue" type:"long"` + + // The name of the resource, such as CPU, MEMORY, PORTS, or a user-defined resource. + Name *string `locationName:"name" type:"string"` + + // When the stringSetValue type is set, the value of the resource must be a + // string type. + StringSetValue []*string `locationName:"stringSetValue" type:"list"` + + // The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +type RunTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your task. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The number of instantiations of the specified task to place on your cluster. + // + // The count parameter is limited to 10 tasks per call. + Count *int64 `locationName:"count" type:"integer"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run. If a revision is not specified, the latest + // ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s RunTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskInput) GoString() string { + return s.String() +} + +type RunTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were run. Each task that was successfully + // placed on your cluster are described here. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s RunTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskOutput) GoString() string { + return s.String() +} + +// Details on a service within a cluster +type Service struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The current state of deployments for the service. + Deployments []*Deployment `locationName:"deployments" type:"list"` + + // The desired number of instantiations of the task definition to keep running + // on the service. This value is specified when the service is created with + // CreateService, and it can be modified with UpdateService. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The event stream for your service. A maximum of 100 of the latest events + // are displayed. + Events []*ServiceEvent `locationName:"events" type:"list"` + + // A list of load balancer objects, containing the load balancer name, the container + // name (as it appears in a container definition), and the container port to + // access from the load balancer. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the service + // that allows the Amazon ECS container agent to register container instances + // with a load balancer. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The Amazon Resource Name (ARN) that identifies the service. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the service, the AWS + // account ID of the service owner, the service namespace, and then the service + // name. For example, arn:aws:ecs:region:012345678910:service/my-service. + ServiceArn *string `locationName:"serviceArn" type:"string"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a region or across multiple regions. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE. + Status *string `locationName:"status" type:"string"` + + // The task definition to use for tasks in the service. This value is specified + // when the service is created with CreateService, and it can be modified with + // UpdateService. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s Service) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Service) GoString() string { + return s.String() +} + +// Details on an event associated with a service. +type ServiceEvent struct { + _ struct{} `type:"structure"` + + // The Unix time in seconds and milliseconds when the event was triggered. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The ID string of the event. + Id *string `locationName:"id" type:"string"` + + // The event message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceEvent) GoString() string { + return s.String() +} + +type StartTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to start your task. If you do not specify a cluster, the default cluster + // is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance IDs or full Amazon Resource Name (ARN) entries for + // the container instances on which you would like to place your task. + // + // The list of container instances to start tasks on is limited to 10. + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to start. If a revision is not specified, the latest + // ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskInput) GoString() string { + return s.String() +} + +type StartTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were started. Each task that was successfully + // placed on your container instances are described here. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s StartTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskOutput) GoString() string { + return s.String() +} + +type StopTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to stop. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // An optional message specified when a task is stopped. For example, if you + // are using a custom scheduler, you can use this parameter to specify the reason + // for stopping the task here, and the message will appear in subsequent DescribeTasks + // API operations on this task. Up to 255 characters are allowed in this message. + Reason *string `locationName:"reason" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) entry of the task to stop. + Task *string `locationName:"task" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskInput) GoString() string { + return s.String() +} + +type StopTaskOutput struct { + _ struct{} `type:"structure"` + + // Details on a task in a cluster. + Task *Task `locationName:"task" type:"structure"` +} + +// String returns the string representation +func (s StopTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskOutput) GoString() string { + return s.String() +} + +type SubmitContainerStateChangeInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container. + Cluster *string `locationName:"cluster" type:"string"` + + // The name of the container. + ContainerName *string `locationName:"containerName" type:"string"` + + // The exit code returned for the state change request. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + // The network bindings of the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) of the task that hosts the + // container. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeInput) GoString() string { + return s.String() +} + +type SubmitContainerStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeOutput) GoString() string { + return s.String() +} + +type SubmitTaskStateChangeInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task. + Cluster *string `locationName:"cluster" type:"string"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) of the task in the state change + // request. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeInput) GoString() string { + return s.String() +} + +type SubmitTaskStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeOutput) GoString() string { + return s.String() +} + +// Details on a task in a cluster. +type Task struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the of the cluster that hosts the task. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the container instances that host the task. + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The containers associated with the task. + Containers []*Container `locationName:"containers" type:"list"` + + // The Unix time in seconds and milliseconds when the task was created (the + // task entered the PENDING state). + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The desired status of the task. + DesiredStatus *string `locationName:"desiredStatus" type:"string"` + + // The last known status of the task. + LastStatus *string `locationName:"lastStatus" type:"string"` + + // One or more container overrides. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // The Unix time in seconds and milliseconds when the task was started (the + // task transitioned from the PENDING state to the RUNNING state). + StartedAt *time.Time `locationName:"startedAt" type:"timestamp" timestampFormat:"unix"` + + // The tag specified when a task is started. If the task is started by an Amazon + // ECS service, then the startedBy parameter contains the deployment ID of the + // service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The Unix time in seconds and milliseconds when the task was stopped (the + // task transitioned from the RUNNING state to the STOPPED state). + StoppedAt *time.Time `locationName:"stoppedAt" type:"timestamp" timestampFormat:"unix"` + + // The reason the task was stopped. + StoppedReason *string `locationName:"stoppedReason" type:"string"` + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string `locationName:"taskArn" type:"string"` + + // The Amazon Resource Name (ARN) of the of the task definition that creates + // the task. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` +} + +// String returns the string representation +func (s Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Task) GoString() string { + return s.String() +} + +// Details of a task definition. +type TaskDefinition struct { + _ struct{} `type:"structure"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. For more information about container definition + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // in the Amazon EC2 Container Service Developer Guide. + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list"` + + // The family of your task definition, used as the definition name. + Family *string `locationName:"family" type:"string"` + + // The container instance attributes required by your task. + RequiresAttributes []*Attribute `locationName:"requiresAttributes" type:"list"` + + // The revision of the task in a particular family. The revision is a version + // number of a task definition in a family. When you register a task definition + // for the first time, the revision is 1; each time you register a new revision + // of a task definition in the same family, the revision value always increases + // by one (even if you have deregistered previous revisions in this family). + Revision *int64 `locationName:"revision" type:"integer"` + + // The status of the task definition. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` + + // The full Amazon Resource Name (ARN) of the of the task definition. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` + + // The list of volumes in a task. For more information about volume definition + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // in the Amazon EC2 Container Service Developer Guide. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s TaskDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskDefinition) GoString() string { + return s.String() +} + +// The overrides associated with a task. +type TaskOverride struct { + _ struct{} `type:"structure"` + + // One or more container overrides sent to a task. + ContainerOverrides []*ContainerOverride `locationName:"containerOverrides" type:"list"` +} + +// String returns the string representation +func (s TaskOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskOverride) GoString() string { + return s.String() +} + +// The ulimit settings to pass to the container. +type Ulimit struct { + _ struct{} `type:"structure"` + + // The hard limit for the ulimit type. + HardLimit *int64 `locationName:"hardLimit" type:"integer" required:"true"` + + // The type of the ulimit. + Name *string `locationName:"name" type:"string" required:"true" enum:"UlimitName"` + + // The soft limit for the ulimit type. + SoftLimit *int64 `locationName:"softLimit" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Ulimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ulimit) GoString() string { + return s.String() +} + +type UpdateContainerAgentInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // container instance is running on. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) entries for + // the container instance on which you would like to update the Amazon ECS container + // agent. + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateContainerAgentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentInput) GoString() string { + return s.String() +} + +type UpdateContainerAgentOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s UpdateContainerAgentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentOutput) GoString() string { + return s.String() +} + +type UpdateServiceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // service is running on. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the task to place and keep running in your + // service. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The name of the service to update. + Service *string `locationName:"service" type:"string" required:"true"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run in your service. If a revision is not specified, + // the latest ACTIVE revision is used. If you modify the task definition with + // UpdateService, Amazon ECS spawns a task with the new version of the task + // definition and then stops an old task after the new version is running. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s UpdateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceInput) GoString() string { + return s.String() +} + +type UpdateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the update call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s UpdateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceOutput) GoString() string { + return s.String() +} + +// The Docker and Amazon ECS container agent version information about a container +// instance. +type VersionInfo struct { + _ struct{} `type:"structure"` + + // The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent + // (https://github.com/aws/amazon-ecs-agent/commits/master) GitHub repository. + AgentHash *string `locationName:"agentHash" type:"string"` + + // The version number of the Amazon ECS container agent. + AgentVersion *string `locationName:"agentVersion" type:"string"` + + // The Docker version running on the container instance. + DockerVersion *string `locationName:"dockerVersion" type:"string"` +} + +// String returns the string representation +func (s VersionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersionInfo) GoString() string { + return s.String() +} + +// A data volume used in a task definition. +type Volume struct { + _ struct{} `type:"structure"` + + // The contents of the host parameter determine whether your data volume persists + // on the host container instance and where it is stored. If the host parameter + // is empty, then the Docker daemon assigns a host path for your data volume, + // but the data is not guaranteed to persist after the containers associated + // with it stop running. + Host *HostVolumeProperties `locationName:"host" type:"structure"` + + // The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. This name is referenced in the sourceVolume + // parameter of container definition mountPoints. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Details on a data volume from another container. +type VolumeFrom struct { + _ struct{} `type:"structure"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of the container to mount volumes from. + SourceContainer *string `locationName:"sourceContainer" type:"string"` +} + +// String returns the string representation +func (s VolumeFrom) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeFrom) GoString() string { + return s.String() +} + +const ( + // @enum AgentUpdateStatus + AgentUpdateStatusPending = "PENDING" + // @enum AgentUpdateStatus + AgentUpdateStatusStaging = "STAGING" + // @enum AgentUpdateStatus + AgentUpdateStatusStaged = "STAGED" + // @enum AgentUpdateStatus + AgentUpdateStatusUpdating = "UPDATING" + // @enum AgentUpdateStatus + AgentUpdateStatusUpdated = "UPDATED" + // @enum AgentUpdateStatus + AgentUpdateStatusFailed = "FAILED" +) + +const ( + // @enum DesiredStatus + DesiredStatusRunning = "RUNNING" + // @enum DesiredStatus + DesiredStatusPending = "PENDING" + // @enum DesiredStatus + DesiredStatusStopped = "STOPPED" +) + +const ( + // @enum LogDriver + LogDriverJsonFile = "json-file" + // @enum LogDriver + LogDriverSyslog = "syslog" + // @enum LogDriver + LogDriverJournald = "journald" + // @enum LogDriver + LogDriverGelf = "gelf" + // @enum LogDriver + LogDriverFluentd = "fluentd" +) + +const ( + // @enum SortOrder + SortOrderAsc = "ASC" + // @enum SortOrder + SortOrderDesc = "DESC" +) + +const ( + // @enum TaskDefinitionStatus + TaskDefinitionStatusActive = "ACTIVE" + // @enum TaskDefinitionStatus + TaskDefinitionStatusInactive = "INACTIVE" +) + +const ( + // @enum TransportProtocol + TransportProtocolTcp = "tcp" + // @enum TransportProtocol + TransportProtocolUdp = "udp" +) + +const ( + // @enum UlimitName + UlimitNameCore = "core" + // @enum UlimitName + UlimitNameCpu = "cpu" + // @enum UlimitName + UlimitNameData = "data" + // @enum UlimitName + UlimitNameFsize = "fsize" + // @enum UlimitName + UlimitNameLocks = "locks" + // @enum UlimitName + UlimitNameMemlock = "memlock" + // @enum UlimitName + UlimitNameMsgqueue = "msgqueue" + // @enum UlimitName + UlimitNameNice = "nice" + // @enum UlimitName + UlimitNameNofile = "nofile" + // @enum UlimitName + UlimitNameNproc = "nproc" + // @enum UlimitName + UlimitNameRss = "rss" + // @enum UlimitName + UlimitNameRtprio = "rtprio" + // @enum UlimitName + UlimitNameRttime = "rttime" + // @enum UlimitName + UlimitNameSigpending = "sigpending" + // @enum UlimitName + UlimitNameStack = "stack" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go new file mode 100644 index 000000000..58a4aecef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go @@ -0,0 +1,134 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecsiface provides an interface for the Amazon EC2 Container Service. +package ecsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ecs" +) + +// ECSAPI is the interface type for ecs.ECS. +type ECSAPI interface { + CreateClusterRequest(*ecs.CreateClusterInput) (*request.Request, *ecs.CreateClusterOutput) + + CreateCluster(*ecs.CreateClusterInput) (*ecs.CreateClusterOutput, error) + + CreateServiceRequest(*ecs.CreateServiceInput) (*request.Request, *ecs.CreateServiceOutput) + + CreateService(*ecs.CreateServiceInput) (*ecs.CreateServiceOutput, error) + + DeleteClusterRequest(*ecs.DeleteClusterInput) (*request.Request, *ecs.DeleteClusterOutput) + + DeleteCluster(*ecs.DeleteClusterInput) (*ecs.DeleteClusterOutput, error) + + DeleteServiceRequest(*ecs.DeleteServiceInput) (*request.Request, *ecs.DeleteServiceOutput) + + DeleteService(*ecs.DeleteServiceInput) (*ecs.DeleteServiceOutput, error) + + DeregisterContainerInstanceRequest(*ecs.DeregisterContainerInstanceInput) (*request.Request, *ecs.DeregisterContainerInstanceOutput) + + DeregisterContainerInstance(*ecs.DeregisterContainerInstanceInput) (*ecs.DeregisterContainerInstanceOutput, error) + + DeregisterTaskDefinitionRequest(*ecs.DeregisterTaskDefinitionInput) (*request.Request, *ecs.DeregisterTaskDefinitionOutput) + + DeregisterTaskDefinition(*ecs.DeregisterTaskDefinitionInput) (*ecs.DeregisterTaskDefinitionOutput, error) + + DescribeClustersRequest(*ecs.DescribeClustersInput) (*request.Request, *ecs.DescribeClustersOutput) + + DescribeClusters(*ecs.DescribeClustersInput) (*ecs.DescribeClustersOutput, error) + + DescribeContainerInstancesRequest(*ecs.DescribeContainerInstancesInput) (*request.Request, *ecs.DescribeContainerInstancesOutput) + + DescribeContainerInstances(*ecs.DescribeContainerInstancesInput) (*ecs.DescribeContainerInstancesOutput, error) + + DescribeServicesRequest(*ecs.DescribeServicesInput) (*request.Request, *ecs.DescribeServicesOutput) + + DescribeServices(*ecs.DescribeServicesInput) (*ecs.DescribeServicesOutput, error) + + DescribeTaskDefinitionRequest(*ecs.DescribeTaskDefinitionInput) (*request.Request, *ecs.DescribeTaskDefinitionOutput) + + DescribeTaskDefinition(*ecs.DescribeTaskDefinitionInput) (*ecs.DescribeTaskDefinitionOutput, error) + + DescribeTasksRequest(*ecs.DescribeTasksInput) (*request.Request, *ecs.DescribeTasksOutput) + + DescribeTasks(*ecs.DescribeTasksInput) (*ecs.DescribeTasksOutput, error) + + DiscoverPollEndpointRequest(*ecs.DiscoverPollEndpointInput) (*request.Request, *ecs.DiscoverPollEndpointOutput) + + DiscoverPollEndpoint(*ecs.DiscoverPollEndpointInput) (*ecs.DiscoverPollEndpointOutput, error) + + ListClustersRequest(*ecs.ListClustersInput) (*request.Request, *ecs.ListClustersOutput) + + ListClusters(*ecs.ListClustersInput) (*ecs.ListClustersOutput, error) + + ListClustersPages(*ecs.ListClustersInput, func(*ecs.ListClustersOutput, bool) bool) error + + ListContainerInstancesRequest(*ecs.ListContainerInstancesInput) (*request.Request, *ecs.ListContainerInstancesOutput) + + ListContainerInstances(*ecs.ListContainerInstancesInput) (*ecs.ListContainerInstancesOutput, error) + + ListContainerInstancesPages(*ecs.ListContainerInstancesInput, func(*ecs.ListContainerInstancesOutput, bool) bool) error + + ListServicesRequest(*ecs.ListServicesInput) (*request.Request, *ecs.ListServicesOutput) + + ListServices(*ecs.ListServicesInput) (*ecs.ListServicesOutput, error) + + ListServicesPages(*ecs.ListServicesInput, func(*ecs.ListServicesOutput, bool) bool) error + + ListTaskDefinitionFamiliesRequest(*ecs.ListTaskDefinitionFamiliesInput) (*request.Request, *ecs.ListTaskDefinitionFamiliesOutput) + + ListTaskDefinitionFamilies(*ecs.ListTaskDefinitionFamiliesInput) (*ecs.ListTaskDefinitionFamiliesOutput, error) + + ListTaskDefinitionFamiliesPages(*ecs.ListTaskDefinitionFamiliesInput, func(*ecs.ListTaskDefinitionFamiliesOutput, bool) bool) error + + ListTaskDefinitionsRequest(*ecs.ListTaskDefinitionsInput) (*request.Request, *ecs.ListTaskDefinitionsOutput) + + ListTaskDefinitions(*ecs.ListTaskDefinitionsInput) (*ecs.ListTaskDefinitionsOutput, error) + + ListTaskDefinitionsPages(*ecs.ListTaskDefinitionsInput, func(*ecs.ListTaskDefinitionsOutput, bool) bool) error + + ListTasksRequest(*ecs.ListTasksInput) (*request.Request, *ecs.ListTasksOutput) + + ListTasks(*ecs.ListTasksInput) (*ecs.ListTasksOutput, error) + + ListTasksPages(*ecs.ListTasksInput, func(*ecs.ListTasksOutput, bool) bool) error + + RegisterContainerInstanceRequest(*ecs.RegisterContainerInstanceInput) (*request.Request, *ecs.RegisterContainerInstanceOutput) + + RegisterContainerInstance(*ecs.RegisterContainerInstanceInput) (*ecs.RegisterContainerInstanceOutput, error) + + RegisterTaskDefinitionRequest(*ecs.RegisterTaskDefinitionInput) (*request.Request, *ecs.RegisterTaskDefinitionOutput) + + RegisterTaskDefinition(*ecs.RegisterTaskDefinitionInput) (*ecs.RegisterTaskDefinitionOutput, error) + + RunTaskRequest(*ecs.RunTaskInput) (*request.Request, *ecs.RunTaskOutput) + + RunTask(*ecs.RunTaskInput) (*ecs.RunTaskOutput, error) + + StartTaskRequest(*ecs.StartTaskInput) (*request.Request, *ecs.StartTaskOutput) + + StartTask(*ecs.StartTaskInput) (*ecs.StartTaskOutput, error) + + StopTaskRequest(*ecs.StopTaskInput) (*request.Request, *ecs.StopTaskOutput) + + StopTask(*ecs.StopTaskInput) (*ecs.StopTaskOutput, error) + + SubmitContainerStateChangeRequest(*ecs.SubmitContainerStateChangeInput) (*request.Request, *ecs.SubmitContainerStateChangeOutput) + + SubmitContainerStateChange(*ecs.SubmitContainerStateChangeInput) (*ecs.SubmitContainerStateChangeOutput, error) + + SubmitTaskStateChangeRequest(*ecs.SubmitTaskStateChangeInput) (*request.Request, *ecs.SubmitTaskStateChangeOutput) + + SubmitTaskStateChange(*ecs.SubmitTaskStateChangeInput) (*ecs.SubmitTaskStateChangeOutput, error) + + UpdateContainerAgentRequest(*ecs.UpdateContainerAgentInput) (*request.Request, *ecs.UpdateContainerAgentOutput) + + UpdateContainerAgent(*ecs.UpdateContainerAgentInput) (*ecs.UpdateContainerAgentOutput, error) + + UpdateServiceRequest(*ecs.UpdateServiceInput) (*request.Request, *ecs.UpdateServiceOutput) + + UpdateService(*ecs.UpdateServiceInput) (*ecs.UpdateServiceOutput, error) +} + +var _ ECSAPI = (*ecs.ECS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go new file mode 100644 index 000000000..2afd3a8af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go @@ -0,0 +1,791 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ecs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleECS_CreateCluster() { + svc := ecs.New(session.New()) + + params := &ecs.CreateClusterInput{ + ClusterName: aws.String("String"), + } + resp, err := svc.CreateCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_CreateService() { + svc := ecs.New(session.New()) + + params := &ecs.CreateServiceInput{ + DesiredCount: aws.Int64(1), // Required + ServiceName: aws.String("String"), // Required + TaskDefinition: aws.String("String"), // Required + ClientToken: aws.String("String"), + Cluster: aws.String("String"), + DeploymentConfiguration: &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(1), + MinimumHealthyPercent: aws.Int64(1), + }, + LoadBalancers: []*ecs.LoadBalancer{ + { // Required + ContainerName: aws.String("String"), + ContainerPort: aws.Int64(1), + LoadBalancerName: aws.String("String"), + }, + // More values... + }, + Role: aws.String("String"), + } + resp, err := svc.CreateService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeleteCluster() { + svc := ecs.New(session.New()) + + params := &ecs.DeleteClusterInput{ + Cluster: aws.String("String"), // Required + } + resp, err := svc.DeleteCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeleteService() { + svc := ecs.New(session.New()) + + params := &ecs.DeleteServiceInput{ + Service: aws.String("String"), // Required + Cluster: aws.String("String"), + } + resp, err := svc.DeleteService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeregisterContainerInstance() { + svc := ecs.New(session.New()) + + params := &ecs.DeregisterContainerInstanceInput{ + ContainerInstance: aws.String("String"), // Required + Cluster: aws.String("String"), + Force: aws.Bool(true), + } + resp, err := svc.DeregisterContainerInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeregisterTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.DeregisterTaskDefinitionInput{ + TaskDefinition: aws.String("String"), // Required + } + resp, err := svc.DeregisterTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeClusters() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeClustersInput{ + Clusters: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeContainerInstances() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeContainerInstancesInput{ + ContainerInstances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeContainerInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeServices() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeServicesInput{ + Services: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeTaskDefinitionInput{ + TaskDefinition: aws.String("String"), // Required + } + resp, err := svc.DescribeTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeTasks() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeTasksInput{ + Tasks: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DiscoverPollEndpoint() { + svc := ecs.New(session.New()) + + params := &ecs.DiscoverPollEndpointInput{ + Cluster: aws.String("String"), + ContainerInstance: aws.String("String"), + } + resp, err := svc.DiscoverPollEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListClusters() { + svc := ecs.New(session.New()) + + params := &ecs.ListClustersInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListContainerInstances() { + svc := ecs.New(session.New()) + + params := &ecs.ListContainerInstancesInput{ + Cluster: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListContainerInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListServices() { + svc := ecs.New(session.New()) + + params := &ecs.ListServicesInput{ + Cluster: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTaskDefinitionFamilies() { + svc := ecs.New(session.New()) + + params := &ecs.ListTaskDefinitionFamiliesInput{ + FamilyPrefix: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListTaskDefinitionFamilies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTaskDefinitions() { + svc := ecs.New(session.New()) + + params := &ecs.ListTaskDefinitionsInput{ + FamilyPrefix: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + Sort: aws.String("SortOrder"), + Status: aws.String("TaskDefinitionStatus"), + } + resp, err := svc.ListTaskDefinitions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTasks() { + svc := ecs.New(session.New()) + + params := &ecs.ListTasksInput{ + Cluster: aws.String("String"), + ContainerInstance: aws.String("String"), + DesiredStatus: aws.String("DesiredStatus"), + Family: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ServiceName: aws.String("String"), + StartedBy: aws.String("String"), + } + resp, err := svc.ListTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RegisterContainerInstance() { + svc := ecs.New(session.New()) + + params := &ecs.RegisterContainerInstanceInput{ + Attributes: []*ecs.Attribute{ + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + Cluster: aws.String("String"), + ContainerInstanceArn: aws.String("String"), + InstanceIdentityDocument: aws.String("String"), + InstanceIdentityDocumentSignature: aws.String("String"), + TotalResources: []*ecs.Resource{ + { // Required + DoubleValue: aws.Float64(1.0), + IntegerValue: aws.Int64(1), + LongValue: aws.Int64(1), + Name: aws.String("String"), + StringSetValue: []*string{ + aws.String("String"), // Required + // More values... + }, + Type: aws.String("String"), + }, + // More values... + }, + VersionInfo: &ecs.VersionInfo{ + AgentHash: aws.String("String"), + AgentVersion: aws.String("String"), + DockerVersion: aws.String("String"), + }, + } + resp, err := svc.RegisterContainerInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RegisterTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.RegisterTaskDefinitionInput{ + ContainerDefinitions: []*ecs.ContainerDefinition{ // Required + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Cpu: aws.Int64(1), + DisableNetworking: aws.Bool(true), + DnsSearchDomains: []*string{ + aws.String("String"), // Required + // More values... + }, + DnsServers: []*string{ + aws.String("String"), // Required + // More values... + }, + DockerLabels: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DockerSecurityOptions: []*string{ + aws.String("String"), // Required + // More values... + }, + EntryPoint: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Essential: aws.Bool(true), + ExtraHosts: []*ecs.HostEntry{ + { // Required + Hostname: aws.String("String"), // Required + IpAddress: aws.String("String"), // Required + }, + // More values... + }, + Hostname: aws.String("String"), + Image: aws.String("String"), + Links: []*string{ + aws.String("String"), // Required + // More values... + }, + LogConfiguration: &ecs.LogConfiguration{ + LogDriver: aws.String("LogDriver"), // Required + Options: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + Memory: aws.Int64(1), + MountPoints: []*ecs.MountPoint{ + { // Required + ContainerPath: aws.String("String"), + ReadOnly: aws.Bool(true), + SourceVolume: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + PortMappings: []*ecs.PortMapping{ + { // Required + ContainerPort: aws.Int64(1), + HostPort: aws.Int64(1), + Protocol: aws.String("TransportProtocol"), + }, + // More values... + }, + Privileged: aws.Bool(true), + ReadonlyRootFilesystem: aws.Bool(true), + Ulimits: []*ecs.Ulimit{ + { // Required + HardLimit: aws.Int64(1), // Required + Name: aws.String("UlimitName"), // Required + SoftLimit: aws.Int64(1), // Required + }, + // More values... + }, + User: aws.String("String"), + VolumesFrom: []*ecs.VolumeFrom{ + { // Required + ReadOnly: aws.Bool(true), + SourceContainer: aws.String("String"), + }, + // More values... + }, + WorkingDirectory: aws.String("String"), + }, + // More values... + }, + Family: aws.String("String"), // Required + Volumes: []*ecs.Volume{ + { // Required + Host: &ecs.HostVolumeProperties{ + SourcePath: aws.String("String"), + }, + Name: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.RegisterTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RunTask() { + svc := ecs.New(session.New()) + + params := &ecs.RunTaskInput{ + TaskDefinition: aws.String("String"), // Required + Cluster: aws.String("String"), + Count: aws.Int64(1), + Overrides: &ecs.TaskOverride{ + ContainerOverrides: []*ecs.ContainerOverride{ + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + }, + // More values... + }, + }, + StartedBy: aws.String("String"), + } + resp, err := svc.RunTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_StartTask() { + svc := ecs.New(session.New()) + + params := &ecs.StartTaskInput{ + ContainerInstances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TaskDefinition: aws.String("String"), // Required + Cluster: aws.String("String"), + Overrides: &ecs.TaskOverride{ + ContainerOverrides: []*ecs.ContainerOverride{ + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + }, + // More values... + }, + }, + StartedBy: aws.String("String"), + } + resp, err := svc.StartTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_StopTask() { + svc := ecs.New(session.New()) + + params := &ecs.StopTaskInput{ + Task: aws.String("String"), // Required + Cluster: aws.String("String"), + Reason: aws.String("String"), + } + resp, err := svc.StopTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_SubmitContainerStateChange() { + svc := ecs.New(session.New()) + + params := &ecs.SubmitContainerStateChangeInput{ + Cluster: aws.String("String"), + ContainerName: aws.String("String"), + ExitCode: aws.Int64(1), + NetworkBindings: []*ecs.NetworkBinding{ + { // Required + BindIP: aws.String("String"), + ContainerPort: aws.Int64(1), + HostPort: aws.Int64(1), + Protocol: aws.String("TransportProtocol"), + }, + // More values... + }, + Reason: aws.String("String"), + Status: aws.String("String"), + Task: aws.String("String"), + } + resp, err := svc.SubmitContainerStateChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_SubmitTaskStateChange() { + svc := ecs.New(session.New()) + + params := &ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String("String"), + Reason: aws.String("String"), + Status: aws.String("String"), + Task: aws.String("String"), + } + resp, err := svc.SubmitTaskStateChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_UpdateContainerAgent() { + svc := ecs.New(session.New()) + + params := &ecs.UpdateContainerAgentInput{ + ContainerInstance: aws.String("String"), // Required + Cluster: aws.String("String"), + } + resp, err := svc.UpdateContainerAgent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_UpdateService() { + svc := ecs.New(session.New()) + + params := &ecs.UpdateServiceInput{ + Service: aws.String("String"), // Required + Cluster: aws.String("String"), + DeploymentConfiguration: &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(1), + MinimumHealthyPercent: aws.Int64(1), + }, + DesiredCount: aws.Int64(1), + TaskDefinition: aws.String("String"), + } + resp, err := svc.UpdateService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go new file mode 100644 index 000000000..04b9f7180 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go @@ -0,0 +1,99 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container +// management service that makes it easy to run, stop, and manage Docker containers +// on a cluster of EC2 instances. Amazon ECS lets you launch and stop container-enabled +// applications with simple API calls, allows you to get the state of your cluster +// from a centralized service, and gives you access to many familiar Amazon +// EC2 features like security groups, Amazon EBS volumes, and IAM roles. +// +// You can use Amazon ECS to schedule the placement of containers across your +// cluster based on your resource needs, isolation policies, and availability +// requirements. Amazon EC2 Container Service eliminates the need for you to +// operate your own cluster management and configuration management systems +// or worry about scaling your management infrastructure. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ECS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ecs" + +// New creates a new instance of the ECS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ECS client from just a session. +// svc := ecs.New(mySession) +// +// // Create a ECS client with additional configuration +// svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ECS { + svc := &ECS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-13", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerServiceV20141113", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECS operation and runs any +// custom request initialization. +func (c *ECS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go new file mode 100644 index 000000000..155f7c098 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go @@ -0,0 +1,135 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ECS) WaitUntilServicesInactive(input *DescribeServicesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeServices", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "success", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "INACTIVE", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilServicesStable(input *DescribeServicesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeServices", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "DRAINING", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "INACTIVE", + }, + { + State: "success", + Matcher: "path", + Argument: "services | [@[?length(deployments)!=`1`], @[?desiredCount!=runningCount]][] | length(@) == `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilTasksRunning(input *DescribeTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTasks", + Delay: 6, + MaxAttempts: 100, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "tasks[].lastStatus", + Expected: "STOPPED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "success", + Matcher: "pathAll", + Argument: "tasks[].lastStatus", + Expected: "RUNNING", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilTasksStopped(input *DescribeTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTasks", + Delay: 6, + MaxAttempts: 100, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "tasks[].lastStatus", + Expected: "STOPPED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/api.go b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go new file mode 100644 index 000000000..8b32301b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go @@ -0,0 +1,1083 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package efs provides a client for Amazon Elastic File System. +package efs + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateFileSystem = "CreateFileSystem" + +// CreateFileSystemRequest generates a request for the CreateFileSystem operation. +func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *request.Request, output *FileSystemDescription) { + op := &request.Operation{ + Name: opCreateFileSystem, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/file-systems", + } + + if input == nil { + input = &CreateFileSystemInput{} + } + + req = c.newRequest(op, input, output) + output = &FileSystemDescription{} + req.Data = output + return +} + +// Creates a new, empty file system. The operation requires a creation token +// in the request that Amazon EFS uses to ensure idempotent creation (calling +// the operation with same creation token has no effect). If a file system does +// not currently exist that is owned by the caller's AWS account with the specified +// creation token, this operation does the following: +// +// Creates a new, empty file system. The file system will have an Amazon EFS +// assigned ID, and an initial lifecycle state "creating". Returns with the +// description of the created file system. Otherwise, this operation returns +// a FileSystemAlreadyExists error with the ID of the existing file system. +// +// For basic use cases, you can use a randomly generated UUID for the creation +// token. The idempotent operation allows you to retry a CreateFileSystem call +// without risk of creating an extra file system. This can happen when an initial +// call fails in a way that leaves it uncertain whether or not a file system +// was actually created. An example might be that a transport level timeout +// occurred or your connection was reset. As long as you use the same creation +// token, if the initial call had succeeded in creating a file system, the client +// can learn of its existence from the FileSystemAlreadyExists error. +// +// The CreateFileSystem call returns while the file system's lifecycle state +// is still "creating". You can check the file system creation status by calling +// the DescribeFileSystems API, which among other things returns the file system +// state. After the file system is fully created, Amazon EFS sets its lifecycle +// state to "available", at which point you can create one or more mount targets +// for the file system (CreateMountTarget) in your VPC. You mount your Amazon +// EFS file system on an EC2 instances in your VPC via the mount target. For +// more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html) +// +// This operation requires permission for the elasticfilesystem:CreateFileSystem +// action. +func (c *EFS) CreateFileSystem(input *CreateFileSystemInput) (*FileSystemDescription, error) { + req, out := c.CreateFileSystemRequest(input) + err := req.Send() + return out, err +} + +const opCreateMountTarget = "CreateMountTarget" + +// CreateMountTargetRequest generates a request for the CreateMountTarget operation. +func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *request.Request, output *MountTargetDescription) { + op := &request.Operation{ + Name: opCreateMountTarget, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/mount-targets", + } + + if input == nil { + input = &CreateMountTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &MountTargetDescription{} + req.Data = output + return +} + +// Creates a mount target for a file system. You can then mount the file system +// on EC2 instances via the mount target. +// +// You can create one mount target in each Availability Zone in your VPC. All +// EC2 instances in a VPC within a given Availability Zone share a single mount +// target for a given file system. If you have multiple subnets in an Availability +// Zone, you create a mount target in one of the subnets. EC2 instances do not +// need to be in the same subnet as the mount target in order to access their +// file system. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). +// +// In the request, you also specify a file system ID for which you are creating +// the mount target and the file system's lifecycle state must be "available" +// (see DescribeFileSystems). +// +// In the request, you also provide a subnet ID, which serves several purposes: +// +// It determines the VPC in which Amazon EFS creates the mount target. It +// determines the Availability Zone in which Amazon EFS creates the mount target. +// It determines the IP address range from which Amazon EFS selects the IP +// address of the mount target if you don't specify an IP address in the request. +// After creating the mount target, Amazon EFS returns a response that includes, +// a MountTargetId and an IpAddress. You use this IP address when mounting the +// file system in an EC2 instance. You can also use the mount target's DNS name +// when mounting the file system. The EC2 instance on which you mount the file +// system via the mount target can resolve the mount target's DNS name to its +// IP address. For more information, see How it Works: Implementation Overview +// (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation). +// +// Note that you can create mount targets for a file system in only one VPC, +// and there can be only one mount target per Availability Zone. That is, if +// the file system already has one or more mount targets created for it, the +// request to add another mount target must meet the following requirements: +// +// The subnet specified in the request must belong to the same VPC as the +// subnets of the existing mount targets. +// +// The subnet specified in the request must not be in the same Availability +// Zone as any of the subnets of the existing mount targets. If the request +// satisfies the requirements, Amazon EFS does the following: +// +// Creates a new mount target in the specified subnet. Also creates a new +// network interface in the subnet as follows: If the request provides an IpAddress, +// Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon +// EFS assigns a free address in the subnet (in the same way that the Amazon +// EC2 CreateNetworkInterface call does when a request does not specify a primary +// private IP address). If the request provides SecurityGroups, this network +// interface is associated with those security groups. Otherwise, it belongs +// to the default security group for the subnet's VPC. Assigns the description +// "Mount target fsmt-id for file system fs-id" where fsmt-id is the mount target +// ID, and fs-id is the FileSystemId. Sets the requesterManaged property of +// the network interface to "true", and the requesterId value to "EFS". Each +// Amazon EFS mount target has one corresponding requestor-managed EC2 network +// interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId +// field in the mount target's description to the network interface ID, and +// the IpAddress field to its address. If network interface creation fails, +// the entire CreateMountTarget operation fails. +// +// The CreateMountTarget call returns only after creating the network interface, +// but while the mount target state is still "creating". You can check the mount +// target creation status by calling the DescribeFileSystems API, which among +// other things returns the mount target state. We recommend you create a mount +// target in each of the Availability Zones. There are cost considerations for +// using a file system in an Availability Zone through a mount target created +// in another Availability Zone. For more information, go to Amazon EFS (http://aws.amazon.com/efs/) +// product detail page. In addition, by always using a mount target local to +// the instance's Availability Zone, you eliminate a partial failure scenario; +// if the Availability Zone in which your mount target is created goes down, +// then you won't be able to access your file system through that mount target. +// +// This operation requires permission for the following action on the file +// system: +// +// elasticfilesystem:CreateMountTarget This operation also requires permission +// for the following Amazon EC2 actions: +// +// ec2:DescribeSubnets ec2:DescribeNetworkInterfaces ec2:CreateNetworkInterface +func (c *EFS) CreateMountTarget(input *CreateMountTargetInput) (*MountTargetDescription, error) { + req, out := c.CreateMountTargetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a request for the CreateTags operation. +func (c *EFS) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/create-tags/{FileSystemId}", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Creates or overwrites tags associated with a file system. Each tag is a key-value +// pair. If a tag key specified in the request already exists on the file system, +// this operation overwrites its value with the value provided in the request. +// If you add the "Name" tag to your file system, Amazon EFS returns it in the +// response to the DescribeFileSystems API. +// +// This operation requires permission for the elasticfilesystem:CreateTags +// action. +func (c *EFS) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFileSystem = "DeleteFileSystem" + +// DeleteFileSystemRequest generates a request for the DeleteFileSystem operation. +func (c *EFS) DeleteFileSystemRequest(input *DeleteFileSystemInput) (req *request.Request, output *DeleteFileSystemOutput) { + op := &request.Operation{ + Name: opDeleteFileSystem, + HTTPMethod: "DELETE", + HTTPPath: "/2015-02-01/file-systems/{FileSystemId}", + } + + if input == nil { + input = &DeleteFileSystemInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteFileSystemOutput{} + req.Data = output + return +} + +// Deletes a file system, permanently severing access to its contents. Upon +// return, the file system no longer exists and you will not be able to access +// any contents of the deleted file system. +// +// You cannot delete a file system that is in use. That is, if the file system +// has any mount targets, you must first delete them. For more information, +// see DescribeMountTargets and DeleteMountTarget. +// +// The DeleteFileSystem call returns while the file system state is still "deleting". +// You can check the file system deletion status by calling the DescribeFileSystems +// API, which returns a list of file systems in your account. If you pass file +// system ID or creation token for the deleted file system, the DescribeFileSystems +// will return a 404 "FileSystemNotFound" error. This operation requires permission +// for the elasticfilesystem:DeleteFileSystem action. +func (c *EFS) DeleteFileSystem(input *DeleteFileSystemInput) (*DeleteFileSystemOutput, error) { + req, out := c.DeleteFileSystemRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMountTarget = "DeleteMountTarget" + +// DeleteMountTargetRequest generates a request for the DeleteMountTarget operation. +func (c *EFS) DeleteMountTargetRequest(input *DeleteMountTargetInput) (req *request.Request, output *DeleteMountTargetOutput) { + op := &request.Operation{ + Name: opDeleteMountTarget, + HTTPMethod: "DELETE", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}", + } + + if input == nil { + input = &DeleteMountTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMountTargetOutput{} + req.Data = output + return +} + +// Deletes the specified mount target. +// +// This operation forcibly breaks any mounts of the file system via the mount +// target being deleted, which might disrupt instances or applications using +// those mounts. To avoid applications getting cut off abruptly, you might consider +// unmounting any mounts of the mount target, if feasible. The operation also +// deletes the associated network interface. Uncommitted writes may be lost, +// but breaking a mount target using this operation does not corrupt the file +// system itself. The file system you created remains. You can mount an EC2 +// instance in your VPC using another mount target. +// +// This operation requires permission for the following action on the file +// system: +// +// elasticfilesystem:DeleteMountTarget The DeleteMountTarget call returns +// while the mount target state is still "deleting". You can check the mount +// target deletion by calling the DescribeMountTargets API, which returns a +// list of mount target descriptions for the given file system. The operation +// also requires permission for the following Amazon EC2 action on the mount +// target's network interface: +// +// ec2:DeleteNetworkInterface +func (c *EFS) DeleteMountTarget(input *DeleteMountTargetInput) (*DeleteMountTargetOutput, error) { + req, out := c.DeleteMountTargetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *EFS) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/delete-tags/{FileSystemId}", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified tags from a file system. If the DeleteTags request +// includes a tag key that does not exist, Amazon EFS ignores it; it is not +// an error. For more information about tags and related restrictions, go to +// Tag Restrictions (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// in the AWS Billing and Cost Management User Guide. +// +// This operation requires permission for the elasticfilesystem:DeleteTags +// action. +func (c *EFS) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFileSystems = "DescribeFileSystems" + +// DescribeFileSystemsRequest generates a request for the DescribeFileSystems operation. +func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req *request.Request, output *DescribeFileSystemsOutput) { + op := &request.Operation{ + Name: opDescribeFileSystems, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/file-systems", + } + + if input == nil { + input = &DescribeFileSystemsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFileSystemsOutput{} + req.Data = output + return +} + +// Returns the description of a specific Amazon EFS file system if either the +// file system CreationToken or the FileSystemId is provided; otherwise, returns +// descriptions of all file systems owned by the caller's AWS account in the +// AWS region of the endpoint that you're calling. +// +// When retrieving all file system descriptions, you can optionally specify +// the MaxItems parameter to limit the number of descriptions in a response. +// If more file system descriptions remain, Amazon EFS returns a NextMarker, +// an opaque token, in the response. In this case, you should send a subsequent +// request with the Marker request parameter set to the value of NextMarker. +// +// So to retrieve a list of your file system descriptions, the expected usage +// of this API is an iterative process of first calling DescribeFileSystems +// without the Marker and then continuing to call it with the Marker parameter +// set to the value of the NextMarker from the previous response until the response +// has no NextMarker. +// +// Note that the implementation may return fewer than MaxItems file system +// descriptions while still including a NextMarker value. +// +// The order of file systems returned in the response of one DescribeFileSystems +// call, and the order of file systems returned across the responses of a multi-call +// iteration, is unspecified. +// +// This operation requires permission for the elasticfilesystem:DescribeFileSystems +// action. +func (c *EFS) DescribeFileSystems(input *DescribeFileSystemsInput) (*DescribeFileSystemsOutput, error) { + req, out := c.DescribeFileSystemsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMountTargetSecurityGroups = "DescribeMountTargetSecurityGroups" + +// DescribeMountTargetSecurityGroupsRequest generates a request for the DescribeMountTargetSecurityGroups operation. +func (c *EFS) DescribeMountTargetSecurityGroupsRequest(input *DescribeMountTargetSecurityGroupsInput) (req *request.Request, output *DescribeMountTargetSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeMountTargetSecurityGroups, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}/security-groups", + } + + if input == nil { + input = &DescribeMountTargetSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMountTargetSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns the security groups currently in effect for a mount target. This +// operation requires that the network interface of the mount target has been +// created and the life cycle state of the mount target is not "deleted". +// +// This operation requires permissions for the following actions: +// +// elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount +// target's file system. ec2:DescribeNetworkInterfaceAttribute action on the +// mount target's network interface. +func (c *EFS) DescribeMountTargetSecurityGroups(input *DescribeMountTargetSecurityGroupsInput) (*DescribeMountTargetSecurityGroupsOutput, error) { + req, out := c.DescribeMountTargetSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMountTargets = "DescribeMountTargets" + +// DescribeMountTargetsRequest generates a request for the DescribeMountTargets operation. +func (c *EFS) DescribeMountTargetsRequest(input *DescribeMountTargetsInput) (req *request.Request, output *DescribeMountTargetsOutput) { + op := &request.Operation{ + Name: opDescribeMountTargets, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/mount-targets", + } + + if input == nil { + input = &DescribeMountTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMountTargetsOutput{} + req.Data = output + return +} + +// Returns the descriptions of all the current mount targets, or a specific +// mount target, for a file system. When requesting all of the current mount +// targets, the order of mount targets returned in the response is unspecified. +// +// This operation requires permission for the elasticfilesystem:DescribeMountTargets +// action, on either the file system id that you specify in FileSystemId, or +// on the file system of the mount target that you specify in MountTargetId. +func (c *EFS) DescribeMountTargets(input *DescribeMountTargetsInput) (*DescribeMountTargetsOutput, error) { + req, out := c.DescribeMountTargetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *EFS) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/tags/{FileSystemId}/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Returns the tags associated with a file system. The order of tags returned +// in the response of one DescribeTags call, and the order of tags returned +// across the responses of a multi-call iteration (when using pagination), is +// unspecified. +// +// This operation requires permission for the elasticfilesystem:DescribeTags +// action. +func (c *EFS) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opModifyMountTargetSecurityGroups = "ModifyMountTargetSecurityGroups" + +// ModifyMountTargetSecurityGroupsRequest generates a request for the ModifyMountTargetSecurityGroups operation. +func (c *EFS) ModifyMountTargetSecurityGroupsRequest(input *ModifyMountTargetSecurityGroupsInput) (req *request.Request, output *ModifyMountTargetSecurityGroupsOutput) { + op := &request.Operation{ + Name: opModifyMountTargetSecurityGroups, + HTTPMethod: "PUT", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}/security-groups", + } + + if input == nil { + input = &ModifyMountTargetSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyMountTargetSecurityGroupsOutput{} + req.Data = output + return +} + +// Modifies the set of security groups in effect for a mount target. +// +// When you create a mount target, Amazon EFS also creates a new network interface +// (see CreateMountTarget). This operation replaces the security groups in effect +// for the network interface associated with a mount target, with the SecurityGroups +// provided in the request. This operation requires that the network interface +// of the mount target has been created and the life cycle state of the mount +// target is not "deleted". +// +// The operation requires permissions for the following actions: +// +// elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount +// target's file system. ec2:ModifyNetworkInterfaceAttribute action on the +// mount target's network interface. +func (c *EFS) ModifyMountTargetSecurityGroups(input *ModifyMountTargetSecurityGroupsInput) (*ModifyMountTargetSecurityGroupsOutput, error) { + req, out := c.ModifyMountTargetSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +type CreateFileSystemInput struct { + _ struct{} `type:"structure"` + + // String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent + // creation. + CreationToken *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemInput) GoString() string { + return s.String() +} + +type CreateMountTargetInput struct { + _ struct{} `type:"structure"` + + // The ID of the file system for which to create the mount target. + FileSystemId *string `type:"string" required:"true"` + + // A valid IPv4 address within the address range of the specified subnet. + IpAddress *string `type:"string"` + + // Up to 5 VPC security group IDs, of the form "sg-xxxxxxxx". These must be + // for the same VPC as subnet specified. + SecurityGroups []*string `type:"list"` + + // The ID of the subnet to add the mount target in. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateMountTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMountTargetInput) GoString() string { + return s.String() +} + +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // String. The ID of the file system whose tags you want to modify. This operation + // modifies only the tags and not the file system. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // An array of Tag objects to add. Each Tag object is a key-value pair. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type DeleteFileSystemInput struct { + _ struct{} `type:"structure"` + + // The ID of the file system you want to delete. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemInput) GoString() string { + return s.String() +} + +type DeleteFileSystemOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFileSystemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemOutput) GoString() string { + return s.String() +} + +type DeleteMountTargetInput struct { + _ struct{} `type:"structure"` + + // String. The ID of the mount target to delete. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMountTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMountTargetInput) GoString() string { + return s.String() +} + +type DeleteMountTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMountTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMountTargetOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // String. The ID of the file system whose tags you want to delete. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // A list of tag keys to delete. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeFileSystemsInput struct { + _ struct{} `type:"structure"` + + // Optional string. Restricts the list to the file system with this creation + // token (you specify a creation token at the time of creating an Amazon EFS + // file system). + CreationToken *string `location:"querystring" locationName:"CreationToken" min:"1" type:"string"` + + // Optional string. File system ID whose description you want to retrieve. + FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` + + // Optional string. Opaque pagination token returned from a previous DescribeFileSystems + // operation. If present, specifies to continue the list from where the returning + // call had left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of file systems to return + // in the response. This parameter value must be greater than 0. The number + // of items Amazon EFS returns will be the minimum of the MaxItems parameter + // specified in the request and the service's internal maximum number of items + // per page. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeFileSystemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsInput) GoString() string { + return s.String() +} + +type DescribeFileSystemsOutput struct { + _ struct{} `type:"structure"` + + // An array of file system descriptions. + FileSystems []*FileSystemDescription `type:"list"` + + // A string, present if provided by caller in the request. + Marker *string `type:"string"` + + // A string, present if there are more file systems than returned in the response. + // You can use the NextMarker in the subsequent request to fetch the descriptions. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsOutput) GoString() string { + return s.String() +} + +type DescribeMountTargetSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The ID of the mount target whose security groups you want to retrieve. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMountTargetSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetSecurityGroupsInput) GoString() string { + return s.String() +} + +type DescribeMountTargetSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // An array of security groups. + SecurityGroups []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeMountTargetSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeMountTargetsInput struct { + _ struct{} `type:"structure"` + + // Optional. String. The ID of the file system whose mount targets you want + // to list. It must be included in your request if MountTargetId is not included. + FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` + + // Optional. String. Opaque pagination token returned from a previous DescribeMountTargets + // operation. If present, it specifies to continue the list from where the previous + // returning call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional. Maximum number of mount targets to return in the response. It must + // be an integer with a value greater than zero. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` + + // Optional. String. The ID of the mount target that you want to have described. + // It must be included in your request if FileSystemId is not included. + MountTargetId *string `location:"querystring" locationName:"MountTargetId" type:"string"` +} + +// String returns the string representation +func (s DescribeMountTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetsInput) GoString() string { + return s.String() +} + +type DescribeMountTargetsOutput struct { + _ struct{} `type:"structure"` + + // If the request included the Marker, the response returns that value in this + // field. + Marker *string `type:"string"` + + // Returns the file system's mount targets as an array of MountTargetDescription + // objects. + MountTargets []*MountTargetDescription `type:"list"` + + // If a value is present, there are more mount targets to return. In a subsequent + // request, you can provide Marker in your request with this value to retrieve + // the next set of mount targets. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeMountTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the file system whose tag set you want to retrieve. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // Optional. String. Opaque pagination token returned from a previous DescribeTags + // operation. If present, it specifies to continue the list from where the previous + // call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional. Maximum number of file system tags to return in the response. It + // must be an integer with a value greater than zero. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // If the request included a Marker, the response returns that value in this + // field. + Marker *string `type:"string"` + + // If a value is present, there are more tags to return. In a subsequent request, + // you can provide the value of NextMarker as the value of the Marker parameter + // in your next request to retrieve the next set of tags. + NextMarker *string `type:"string"` + + // Returns tags associated with the file system as an array of Tag objects. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// This object provides description of a file system. +type FileSystemDescription struct { + _ struct{} `type:"structure"` + + // The time at which the file system was created, in seconds, since 1970-01-01T00:00:00Z. + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Opaque string specified in the request. + CreationToken *string `min:"1" type:"string" required:"true"` + + // The file system ID assigned by Amazon EFS. + FileSystemId *string `type:"string" required:"true"` + + // A predefined string value that indicates the lifecycle phase of the file + // system. + LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` + + // You can add tags to a file system (see CreateTags) including a "Name" tag. + // If the file system has a "Name" tag, Amazon EFS returns the value in this + // field. + Name *string `type:"string"` + + // The current number of mount targets (see CreateMountTarget) the file system + // has. + NumberOfMountTargets *int64 `type:"integer" required:"true"` + + // The AWS account that created the file system. If the file system was created + // by an IAM user, the parent account to which the user belongs is the owner. + OwnerId *string `type:"string" required:"true"` + + // This object provides the latest known metered size of data stored in the + // file system, in bytes, in its Value field, and the time at which that size + // was determined in its Timestamp field. The Timestamp value is the integer + // number of seconds since 1970-01-01T00:00:00Z. Note that the value does not + // represent the size of a consistent snapshot of the file system, but it is + // eventually consistent when there are no writes to the file system. That is, + // the value will represent actual size only if the file system is not modified + // for a period longer than a couple of hours. Otherwise, the value is not the + // exact size the file system was at any instant in time. + SizeInBytes *FileSystemSize `type:"structure" required:"true"` +} + +// String returns the string representation +func (s FileSystemDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemDescription) GoString() string { + return s.String() +} + +// This object provides the latest known metered size, in bytes, of data stored +// in the file system, in its Value field, and the time at which that size was +// determined in its Timestamp field. Note that the value does not represent +// the size of a consistent snapshot of the file system, but it is eventually +// consistent when there are no writes to the file system. That is, the value +// will represent the actual size only if the file system is not modified for +// a period longer than a couple of hours. Otherwise, the value is not necessarily +// the exact size the file system was at any instant in time. +type FileSystemSize struct { + _ struct{} `type:"structure"` + + // The time at which the size of data, returned in the Value field, was determined. + // The value is the integer number of seconds since 1970-01-01T00:00:00Z. + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The latest known metered size, in bytes, of data stored in the file system. + Value *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s FileSystemSize) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemSize) GoString() string { + return s.String() +} + +type ModifyMountTargetSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The ID of the mount target whose security groups you want to modify. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` + + // An array of up to five VPC security group IDs. + SecurityGroups []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyMountTargetSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyMountTargetSecurityGroupsInput) GoString() string { + return s.String() +} + +type ModifyMountTargetSecurityGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyMountTargetSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyMountTargetSecurityGroupsOutput) GoString() string { + return s.String() +} + +// This object provides description of a mount target. +type MountTargetDescription struct { + _ struct{} `type:"structure"` + + // The ID of the file system for which the mount target is intended. + FileSystemId *string `type:"string" required:"true"` + + // The address at which the file system may be mounted via the mount target. + IpAddress *string `type:"string"` + + // The lifecycle state the mount target is in. + LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` + + // The system-assigned mount target ID. + MountTargetId *string `type:"string" required:"true"` + + // The ID of the network interface that Amazon EFS created when it created the + // mount target. + NetworkInterfaceId *string `type:"string"` + + // The AWS account ID that owns the resource. + OwnerId *string `type:"string"` + + // The ID of the subnet that the mount target is in. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MountTargetDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountTargetDescription) GoString() string { + return s.String() +} + +// A tag is a pair of key and value. The allowed characters in keys and values +// are letters, whitespace, and numbers, representable in UTF-8, and the characters +// '+', '-', '=', '.', '_', ':', and '/'. +type Tag struct { + _ struct{} `type:"structure"` + + // Tag key, a string. The key must not start with "aws:". + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag key. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +const ( + // @enum LifeCycleState + LifeCycleStateCreating = "creating" + // @enum LifeCycleState + LifeCycleStateAvailable = "available" + // @enum LifeCycleState + LifeCycleStateDeleting = "deleting" + // @enum LifeCycleState + LifeCycleStateDeleted = "deleted" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go new file mode 100644 index 000000000..53542f18d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package efsiface provides an interface for the Amazon Elastic File System. +package efsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/efs" +) + +// EFSAPI is the interface type for efs.EFS. +type EFSAPI interface { + CreateFileSystemRequest(*efs.CreateFileSystemInput) (*request.Request, *efs.FileSystemDescription) + + CreateFileSystem(*efs.CreateFileSystemInput) (*efs.FileSystemDescription, error) + + CreateMountTargetRequest(*efs.CreateMountTargetInput) (*request.Request, *efs.MountTargetDescription) + + CreateMountTarget(*efs.CreateMountTargetInput) (*efs.MountTargetDescription, error) + + CreateTagsRequest(*efs.CreateTagsInput) (*request.Request, *efs.CreateTagsOutput) + + CreateTags(*efs.CreateTagsInput) (*efs.CreateTagsOutput, error) + + DeleteFileSystemRequest(*efs.DeleteFileSystemInput) (*request.Request, *efs.DeleteFileSystemOutput) + + DeleteFileSystem(*efs.DeleteFileSystemInput) (*efs.DeleteFileSystemOutput, error) + + DeleteMountTargetRequest(*efs.DeleteMountTargetInput) (*request.Request, *efs.DeleteMountTargetOutput) + + DeleteMountTarget(*efs.DeleteMountTargetInput) (*efs.DeleteMountTargetOutput, error) + + DeleteTagsRequest(*efs.DeleteTagsInput) (*request.Request, *efs.DeleteTagsOutput) + + DeleteTags(*efs.DeleteTagsInput) (*efs.DeleteTagsOutput, error) + + DescribeFileSystemsRequest(*efs.DescribeFileSystemsInput) (*request.Request, *efs.DescribeFileSystemsOutput) + + DescribeFileSystems(*efs.DescribeFileSystemsInput) (*efs.DescribeFileSystemsOutput, error) + + DescribeMountTargetSecurityGroupsRequest(*efs.DescribeMountTargetSecurityGroupsInput) (*request.Request, *efs.DescribeMountTargetSecurityGroupsOutput) + + DescribeMountTargetSecurityGroups(*efs.DescribeMountTargetSecurityGroupsInput) (*efs.DescribeMountTargetSecurityGroupsOutput, error) + + DescribeMountTargetsRequest(*efs.DescribeMountTargetsInput) (*request.Request, *efs.DescribeMountTargetsOutput) + + DescribeMountTargets(*efs.DescribeMountTargetsInput) (*efs.DescribeMountTargetsOutput, error) + + DescribeTagsRequest(*efs.DescribeTagsInput) (*request.Request, *efs.DescribeTagsOutput) + + DescribeTags(*efs.DescribeTagsInput) (*efs.DescribeTagsOutput, error) + + ModifyMountTargetSecurityGroupsRequest(*efs.ModifyMountTargetSecurityGroupsInput) (*request.Request, *efs.ModifyMountTargetSecurityGroupsOutput) + + ModifyMountTargetSecurityGroups(*efs.ModifyMountTargetSecurityGroupsInput) (*efs.ModifyMountTargetSecurityGroupsOutput, error) +} + +var _ EFSAPI = (*efs.EFS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go new file mode 100644 index 000000000..85907f936 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go @@ -0,0 +1,254 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package efs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/efs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEFS_CreateFileSystem() { + svc := efs.New(session.New()) + + params := &efs.CreateFileSystemInput{ + CreationToken: aws.String("CreationToken"), // Required + } + resp, err := svc.CreateFileSystem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_CreateMountTarget() { + svc := efs.New(session.New()) + + params := &efs.CreateMountTargetInput{ + FileSystemId: aws.String("FileSystemId"), // Required + SubnetId: aws.String("SubnetId"), // Required + IpAddress: aws.String("IpAddress"), + SecurityGroups: []*string{ + aws.String("SecurityGroup"), // Required + // More values... + }, + } + resp, err := svc.CreateMountTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_CreateTags() { + svc := efs.New(session.New()) + + params := &efs.CreateTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + Tags: []*efs.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteFileSystem() { + svc := efs.New(session.New()) + + params := &efs.DeleteFileSystemInput{ + FileSystemId: aws.String("FileSystemId"), // Required + } + resp, err := svc.DeleteFileSystem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteMountTarget() { + svc := efs.New(session.New()) + + params := &efs.DeleteMountTargetInput{ + MountTargetId: aws.String("MountTargetId"), // Required + } + resp, err := svc.DeleteMountTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteTags() { + svc := efs.New(session.New()) + + params := &efs.DeleteTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeFileSystems() { + svc := efs.New(session.New()) + + params := &efs.DescribeFileSystemsInput{ + CreationToken: aws.String("CreationToken"), + FileSystemId: aws.String("FileSystemId"), + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.DescribeFileSystems(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeMountTargetSecurityGroups() { + svc := efs.New(session.New()) + + params := &efs.DescribeMountTargetSecurityGroupsInput{ + MountTargetId: aws.String("MountTargetId"), // Required + } + resp, err := svc.DescribeMountTargetSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeMountTargets() { + svc := efs.New(session.New()) + + params := &efs.DescribeMountTargetsInput{ + FileSystemId: aws.String("FileSystemId"), + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + MountTargetId: aws.String("MountTargetId"), + } + resp, err := svc.DescribeMountTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeTags() { + svc := efs.New(session.New()) + + params := &efs.DescribeTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_ModifyMountTargetSecurityGroups() { + svc := efs.New(session.New()) + + params := &efs.ModifyMountTargetSecurityGroupsInput{ + MountTargetId: aws.String("MountTargetId"), // Required + SecurityGroups: []*string{ + aws.String("SecurityGroup"), // Required + // More values... + }, + } + resp, err := svc.ModifyMountTargetSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go new file mode 100644 index 000000000..307d09cf3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go @@ -0,0 +1,85 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package efs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EFS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticfilesystem" + +// New creates a new instance of the EFS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EFS client from just a session. +// svc := efs.New(mySession) +// +// // Create a EFS client with additional configuration +// svc := efs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EFS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EFS { + svc := &EFS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-02-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EFS operation and runs any +// custom request initialization. +func (c *EFS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go new file mode 100644 index 000000000..ea8233b7c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -0,0 +1,4891 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticache provides a client for Amazon ElastiCache. +package elasticache + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a request for the AddTagsToResource operation. +func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The AddTagsToResource action adds up to 10 cost allocation tags to the named +// resource. A cost allocation tag is a key-value pair where the key and value +// are case-sensitive. Cost allocation tags can be used to categorize and track +// your AWS costs. +// +// When you apply tags to your ElastiCache resources, AWS generates a cost +// allocation report as a comma-separated value (CSV) file with your usage and +// costs aggregated by your tags. You can apply tags that represent business +// categories (such as cost centers, application names, or owners) to organize +// your costs across multiple services. For more information, see Using Cost +// Allocation Tags in Amazon ElastiCache (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Tagging.html). +func (c *ElastiCache) AddTagsToResource(input *AddTagsToResourceInput) (*TagListMessage, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeCacheSecurityGroupIngress = "AuthorizeCacheSecurityGroupIngress" + +// AuthorizeCacheSecurityGroupIngressRequest generates a request for the AuthorizeCacheSecurityGroupIngress operation. +func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressRequest(input *AuthorizeCacheSecurityGroupIngressInput) (req *request.Request, output *AuthorizeCacheSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeCacheSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeCacheSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeCacheSecurityGroupIngressOutput{} + req.Data = output + return +} + +// The AuthorizeCacheSecurityGroupIngress action allows network ingress to a +// cache security group. Applications using ElastiCache must be running on Amazon +// EC2, and Amazon EC2 security groups are used as the authorization mechanism. +// +// You cannot authorize ingress from an Amazon EC2 security group in one region +// to an ElastiCache cluster in another region. +func (c *ElastiCache) AuthorizeCacheSecurityGroupIngress(input *AuthorizeCacheSecurityGroupIngressInput) (*AuthorizeCacheSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeCacheSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a request for the CopySnapshot operation. +func (c *ElastiCache) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopySnapshotOutput{} + req.Data = output + return +} + +// The CopySnapshot action makes a copy of an existing snapshot. +func (c *ElastiCache) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheCluster = "CreateCacheCluster" + +// CreateCacheClusterRequest generates a request for the CreateCacheCluster operation. +func (c *ElastiCache) CreateCacheClusterRequest(input *CreateCacheClusterInput) (req *request.Request, output *CreateCacheClusterOutput) { + op := &request.Operation{ + Name: opCreateCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheClusterOutput{} + req.Data = output + return +} + +// The CreateCacheCluster action creates a cache cluster. All nodes in the cache +// cluster run the same protocol-compliant cache engine software, either Memcached +// or Redis. +func (c *ElastiCache) CreateCacheCluster(input *CreateCacheClusterInput) (*CreateCacheClusterOutput, error) { + req, out := c.CreateCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheParameterGroup = "CreateCacheParameterGroup" + +// CreateCacheParameterGroupRequest generates a request for the CreateCacheParameterGroup operation. +func (c *ElastiCache) CreateCacheParameterGroupRequest(input *CreateCacheParameterGroupInput) (req *request.Request, output *CreateCacheParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheParameterGroupOutput{} + req.Data = output + return +} + +// The CreateCacheParameterGroup action creates a new cache parameter group. +// A cache parameter group is a collection of parameters that you apply to all +// of the nodes in a cache cluster. +func (c *ElastiCache) CreateCacheParameterGroup(input *CreateCacheParameterGroupInput) (*CreateCacheParameterGroupOutput, error) { + req, out := c.CreateCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheSecurityGroup = "CreateCacheSecurityGroup" + +// CreateCacheSecurityGroupRequest generates a request for the CreateCacheSecurityGroup operation. +func (c *ElastiCache) CreateCacheSecurityGroupRequest(input *CreateCacheSecurityGroupInput) (req *request.Request, output *CreateCacheSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheSecurityGroupOutput{} + req.Data = output + return +} + +// The CreateCacheSecurityGroup action creates a new cache security group. Use +// a cache security group to control access to one or more cache clusters. +// +// Cache security groups are only used when you are creating a cache cluster +// outside of an Amazon Virtual Private Cloud (VPC). If you are creating a cache +// cluster inside of a VPC, use a cache subnet group instead. For more information, +// see CreateCacheSubnetGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html). +func (c *ElastiCache) CreateCacheSecurityGroup(input *CreateCacheSecurityGroupInput) (*CreateCacheSecurityGroupOutput, error) { + req, out := c.CreateCacheSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheSubnetGroup = "CreateCacheSubnetGroup" + +// CreateCacheSubnetGroupRequest generates a request for the CreateCacheSubnetGroup operation. +func (c *ElastiCache) CreateCacheSubnetGroupRequest(input *CreateCacheSubnetGroupInput) (req *request.Request, output *CreateCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The CreateCacheSubnetGroup action creates a new cache subnet group. +// +// Use this parameter only when you are creating a cluster in an Amazon Virtual +// Private Cloud (VPC). +func (c *ElastiCache) CreateCacheSubnetGroup(input *CreateCacheSubnetGroupInput) (*CreateCacheSubnetGroupOutput, error) { + req, out := c.CreateCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReplicationGroup = "CreateReplicationGroup" + +// CreateReplicationGroupRequest generates a request for the CreateReplicationGroup operation. +func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGroupInput) (req *request.Request, output *CreateReplicationGroupOutput) { + op := &request.Operation{ + Name: opCreateReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReplicationGroupOutput{} + req.Data = output + return +} + +// The CreateReplicationGroup action creates a replication group. A replication +// group is a collection of cache clusters, where one of the cache clusters +// is a read/write primary and the others are read-only replicas. Writes to +// the primary are automatically propagated to the replicas. +// +// When you create a replication group, you must specify an existing cache +// cluster that is in the primary role. When the replication group has been +// successfully created, you can add one or more read replica replicas to it, +// up to a total of five read replicas. +// +// Note: This action is valid only for Redis. +func (c *ElastiCache) CreateReplicationGroup(input *CreateReplicationGroupInput) (*CreateReplicationGroupOutput, error) { + req, out := c.CreateReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *ElastiCache) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// The CreateSnapshot action creates a copy of an entire cache cluster at a +// specific moment in time. +func (c *ElastiCache) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheCluster = "DeleteCacheCluster" + +// DeleteCacheClusterRequest generates a request for the DeleteCacheCluster operation. +func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) (req *request.Request, output *DeleteCacheClusterOutput) { + op := &request.Operation{ + Name: opDeleteCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCacheClusterOutput{} + req.Data = output + return +} + +// The DeleteCacheCluster action deletes a previously provisioned cache cluster. +// DeleteCacheCluster deletes all associated cache nodes, node endpoints and +// the cache cluster itself. When you receive a successful response from this +// action, Amazon ElastiCache immediately begins deleting the cache cluster; +// you cannot cancel or revert this action. +// +// This API cannot be used to delete a cache cluster that is the last read +// replica of a replication group that has Multi-AZ mode enabled. +func (c *ElastiCache) DeleteCacheCluster(input *DeleteCacheClusterInput) (*DeleteCacheClusterOutput, error) { + req, out := c.DeleteCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheParameterGroup = "DeleteCacheParameterGroup" + +// DeleteCacheParameterGroupRequest generates a request for the DeleteCacheParameterGroup operation. +func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParameterGroupInput) (req *request.Request, output *DeleteCacheParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCacheParameterGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheParameterGroup action deletes the specified cache parameter +// group. You cannot delete a cache parameter group if it is associated with +// any cache clusters. +func (c *ElastiCache) DeleteCacheParameterGroup(input *DeleteCacheParameterGroupInput) (*DeleteCacheParameterGroupOutput, error) { + req, out := c.DeleteCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheSecurityGroup = "DeleteCacheSecurityGroup" + +// DeleteCacheSecurityGroupRequest generates a request for the DeleteCacheSecurityGroup operation. +func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurityGroupInput) (req *request.Request, output *DeleteCacheSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCacheSecurityGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheSecurityGroup action deletes a cache security group. +// +// You cannot delete a cache security group if it is associated with any cache +// clusters. +func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupInput) (*DeleteCacheSecurityGroupOutput, error) { + req, out := c.DeleteCacheSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheSubnetGroup = "DeleteCacheSubnetGroup" + +// DeleteCacheSubnetGroupRequest generates a request for the DeleteCacheSubnetGroup operation. +func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGroupInput) (req *request.Request, output *DeleteCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheSubnetGroup action deletes a cache subnet group. +// +// You cannot delete a cache subnet group if it is associated with any cache +// clusters. +func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) (*DeleteCacheSubnetGroupOutput, error) { + req, out := c.DeleteCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReplicationGroup = "DeleteReplicationGroup" + +// DeleteReplicationGroupRequest generates a request for the DeleteReplicationGroup operation. +func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGroupInput) (req *request.Request, output *DeleteReplicationGroupOutput) { + op := &request.Operation{ + Name: opDeleteReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReplicationGroupOutput{} + req.Data = output + return +} + +// The DeleteReplicationGroup action deletes an existing replication group. +// By default, this action deletes the entire replication group, including the +// primary cluster and all of the read replicas. You can optionally delete only +// the read replicas, while retaining the primary cluster. +// +// When you receive a successful response from this action, Amazon ElastiCache +// immediately begins deleting the selected resources; you cannot cancel or +// revert this action. +func (c *ElastiCache) DeleteReplicationGroup(input *DeleteReplicationGroupInput) (*DeleteReplicationGroupOutput, error) { + req, out := c.DeleteReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// The DeleteSnapshot action deletes an existing snapshot. When you receive +// a successful response from this action, ElastiCache immediately begins deleting +// the snapshot; you cannot cancel or revert this action. +func (c *ElastiCache) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCacheClusters = "DescribeCacheClusters" + +// DescribeCacheClustersRequest generates a request for the DescribeCacheClusters operation. +func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersInput) (req *request.Request, output *DescribeCacheClustersOutput) { + op := &request.Operation{ + Name: opDescribeCacheClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheClustersOutput{} + req.Data = output + return +} + +// The DescribeCacheClusters action returns information about all provisioned +// cache clusters if no cache cluster identifier is specified, or about a specific +// cache cluster if a cache cluster identifier is supplied. +// +// By default, abbreviated information about the cache clusters(s) will be +// returned. You can use the optional ShowDetails flag to retrieve detailed +// information about the cache nodes associated with the cache clusters. These +// details include the DNS address and port for the cache node endpoint. +// +// If the cluster is in the CREATING state, only cluster level information +// will be displayed until all of the nodes are successfully provisioned. +// +// If the cluster is in the DELETING state, only cluster level information +// will be displayed. +// +// If cache nodes are currently being added to the cache cluster, node endpoint +// information and creation time for the additional nodes will not be displayed +// until they are completely provisioned. When the cache cluster state is available, +// the cluster is ready for use. +// +// If cache nodes are currently being removed from the cache cluster, no endpoint +// information for the removed nodes is displayed. +func (c *ElastiCache) DescribeCacheClusters(input *DescribeCacheClustersInput) (*DescribeCacheClustersOutput, error) { + req, out := c.DescribeCacheClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInput, fn func(p *DescribeCacheClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheClustersOutput), lastPage) + }) +} + +const opDescribeCacheEngineVersions = "DescribeCacheEngineVersions" + +// DescribeCacheEngineVersionsRequest generates a request for the DescribeCacheEngineVersions operation. +func (c *ElastiCache) DescribeCacheEngineVersionsRequest(input *DescribeCacheEngineVersionsInput) (req *request.Request, output *DescribeCacheEngineVersionsOutput) { + op := &request.Operation{ + Name: opDescribeCacheEngineVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheEngineVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheEngineVersionsOutput{} + req.Data = output + return +} + +// The DescribeCacheEngineVersions action returns a list of the available cache +// engines and their versions. +func (c *ElastiCache) DescribeCacheEngineVersions(input *DescribeCacheEngineVersionsInput) (*DescribeCacheEngineVersionsOutput, error) { + req, out := c.DescribeCacheEngineVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngineVersionsInput, fn func(p *DescribeCacheEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheEngineVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheEngineVersionsOutput), lastPage) + }) +} + +const opDescribeCacheParameterGroups = "DescribeCacheParameterGroups" + +// DescribeCacheParameterGroupsRequest generates a request for the DescribeCacheParameterGroups operation. +func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCacheParameterGroupsInput) (req *request.Request, output *DescribeCacheParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheParameterGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheParameterGroups action returns a list of cache parameter +// group descriptions. If a cache parameter group name is specified, the list +// will contain only the descriptions for that group. +func (c *ElastiCache) DescribeCacheParameterGroups(input *DescribeCacheParameterGroupsInput) (*DescribeCacheParameterGroupsOutput, error) { + req, out := c.DescribeCacheParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCacheParameterGroupsInput, fn func(p *DescribeCacheParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheParameterGroupsOutput), lastPage) + }) +} + +const opDescribeCacheParameters = "DescribeCacheParameters" + +// DescribeCacheParametersRequest generates a request for the DescribeCacheParameters operation. +func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParametersInput) (req *request.Request, output *DescribeCacheParametersOutput) { + op := &request.Operation{ + Name: opDescribeCacheParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheParametersOutput{} + req.Data = output + return +} + +// The DescribeCacheParameters action returns the detailed parameter list for +// a particular cache parameter group. +func (c *ElastiCache) DescribeCacheParameters(input *DescribeCacheParametersInput) (*DescribeCacheParametersOutput, error) { + req, out := c.DescribeCacheParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParametersInput, fn func(p *DescribeCacheParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheParametersOutput), lastPage) + }) +} + +const opDescribeCacheSecurityGroups = "DescribeCacheSecurityGroups" + +// DescribeCacheSecurityGroupsRequest generates a request for the DescribeCacheSecurityGroups operation. +func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSecurityGroupsInput) (req *request.Request, output *DescribeCacheSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheSecurityGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheSecurityGroups action returns a list of cache security group +// descriptions. If a cache security group name is specified, the list will +// contain only the description of that group. +func (c *ElastiCache) DescribeCacheSecurityGroups(input *DescribeCacheSecurityGroupsInput) (*DescribeCacheSecurityGroupsOutput, error) { + req, out := c.DescribeCacheSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecurityGroupsInput, fn func(p *DescribeCacheSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeCacheSubnetGroups = "DescribeCacheSubnetGroups" + +// DescribeCacheSubnetGroupsRequest generates a request for the DescribeCacheSubnetGroups operation. +func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubnetGroupsInput) (req *request.Request, output *DescribeCacheSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheSubnetGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheSubnetGroups action returns a list of cache subnet group +// descriptions. If a subnet group name is specified, the list will contain +// only the description of that group. +func (c *ElastiCache) DescribeCacheSubnetGroups(input *DescribeCacheSubnetGroupsInput) (*DescribeCacheSubnetGroupsOutput, error) { + req, out := c.DescribeCacheSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetGroupsInput, fn func(p *DescribeCacheSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" + +// DescribeEngineDefaultParametersRequest generates a request for the DescribeEngineDefaultParameters operation. +func (c *ElastiCache) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"EngineDefaults.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEngineDefaultParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultParametersOutput{} + req.Data = output + return +} + +// The DescribeEngineDefaultParameters action returns the default engine and +// system parameter information for the specified cache engine. +func (c *ElastiCache) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEngineDefaultParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEngineDefaultParametersOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a request for the DescribeEvents operation. +func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// The DescribeEvents action returns events related to cache clusters, cache +// security groups, and cache parameter groups. You can obtain events specific +// to a particular cache cluster, cache security group, or cache parameter group +// by providing the name as a parameter. +// +// By default, only the events occurring within the last hour are returned; +// however, you can retrieve up to 14 days' worth of events if necessary. +func (c *ElastiCache) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeReplicationGroups = "DescribeReplicationGroups" + +// DescribeReplicationGroupsRequest generates a request for the DescribeReplicationGroups operation. +func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicationGroupsInput) (req *request.Request, output *DescribeReplicationGroupsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplicationGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReplicationGroupsOutput{} + req.Data = output + return +} + +// The DescribeReplicationGroups action returns information about a particular +// replication group. If no identifier is specified, DescribeReplicationGroups +// returns information about all replication groups. +func (c *ElastiCache) DescribeReplicationGroups(input *DescribeReplicationGroupsInput) (*DescribeReplicationGroupsOutput, error) { + req, out := c.DescribeReplicationGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationGroupsInput, fn func(p *DescribeReplicationGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReplicationGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReplicationGroupsOutput), lastPage) + }) +} + +const opDescribeReservedCacheNodes = "DescribeReservedCacheNodes" + +// DescribeReservedCacheNodesRequest generates a request for the DescribeReservedCacheNodes operation. +func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedCacheNodesInput) (req *request.Request, output *DescribeReservedCacheNodesOutput) { + op := &request.Operation{ + Name: opDescribeReservedCacheNodes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedCacheNodesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedCacheNodesOutput{} + req.Data = output + return +} + +// The DescribeReservedCacheNodes action returns information about reserved +// cache nodes for this account, or about a specified reserved cache node. +func (c *ElastiCache) DescribeReservedCacheNodes(input *DescribeReservedCacheNodesInput) (*DescribeReservedCacheNodesOutput, error) { + req, out := c.DescribeReservedCacheNodesRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCacheNodesInput, fn func(p *DescribeReservedCacheNodesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedCacheNodesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedCacheNodesOutput), lastPage) + }) +} + +const opDescribeReservedCacheNodesOfferings = "DescribeReservedCacheNodesOfferings" + +// DescribeReservedCacheNodesOfferingsRequest generates a request for the DescribeReservedCacheNodesOfferings operation. +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *DescribeReservedCacheNodesOfferingsInput) (req *request.Request, output *DescribeReservedCacheNodesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedCacheNodesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedCacheNodesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedCacheNodesOfferingsOutput{} + req.Data = output + return +} + +// The DescribeReservedCacheNodesOfferings action lists available reserved cache +// node offerings. +func (c *ElastiCache) DescribeReservedCacheNodesOfferings(input *DescribeReservedCacheNodesOfferingsInput) (*DescribeReservedCacheNodesOfferingsOutput, error) { + req, out := c.DescribeReservedCacheNodesOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeReservedCacheNodesOfferingsInput, fn func(p *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedCacheNodesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedCacheNodesOfferingsOutput), lastPage) + }) +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// The DescribeSnapshots action returns information about cache cluster snapshots. +// By default, DescribeSnapshots lists all of your snapshots; it can optionally +// describe a single snapshot, or just the snapshots associated with a particular +// cache cluster. +func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSnapshotsOutput), lastPage) + }) +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The ListTagsForResource action lists all cost allocation tags currently on +// the named resource. A cost allocation tag is a key-value pair where the key +// is case-sensitive and the value is optional. Cost allocation tags can be +// used to categorize and track your AWS costs. +// +// You can have a maximum of 10 cost allocation tags on an ElastiCache resource. +// For more information, see Using Cost Allocation Tags in Amazon ElastiCache +// (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/BestPractices.html). +func (c *ElastiCache) ListTagsForResource(input *ListTagsForResourceInput) (*TagListMessage, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheCluster = "ModifyCacheCluster" + +// ModifyCacheClusterRequest generates a request for the ModifyCacheCluster operation. +func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) (req *request.Request, output *ModifyCacheClusterOutput) { + op := &request.Operation{ + Name: opModifyCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyCacheClusterOutput{} + req.Data = output + return +} + +// The ModifyCacheCluster action modifies the settings for a cache cluster. +// You can use this action to change one or more cluster configuration parameters +// by specifying the parameters and the new values. +func (c *ElastiCache) ModifyCacheCluster(input *ModifyCacheClusterInput) (*ModifyCacheClusterOutput, error) { + req, out := c.ModifyCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheParameterGroup = "ModifyCacheParameterGroup" + +// ModifyCacheParameterGroupRequest generates a request for the ModifyCacheParameterGroup operation. +func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CacheParameterGroupNameMessage{} + req.Data = output + return +} + +// The ModifyCacheParameterGroup action modifies the parameters of a cache parameter +// group. You can modify up to 20 parameters in a single request by submitting +// a list parameter name and value pairs. +func (c *ElastiCache) ModifyCacheParameterGroup(input *ModifyCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ModifyCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheSubnetGroup = "ModifyCacheSubnetGroup" + +// ModifyCacheSubnetGroupRequest generates a request for the ModifyCacheSubnetGroup operation. +func (c *ElastiCache) ModifyCacheSubnetGroupRequest(input *ModifyCacheSubnetGroupInput) (req *request.Request, output *ModifyCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The ModifyCacheSubnetGroup action modifies an existing cache subnet group. +func (c *ElastiCache) ModifyCacheSubnetGroup(input *ModifyCacheSubnetGroupInput) (*ModifyCacheSubnetGroupOutput, error) { + req, out := c.ModifyCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyReplicationGroup = "ModifyReplicationGroup" + +// ModifyReplicationGroupRequest generates a request for the ModifyReplicationGroup operation. +func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGroupInput) (req *request.Request, output *ModifyReplicationGroupOutput) { + op := &request.Operation{ + Name: opModifyReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReplicationGroupOutput{} + req.Data = output + return +} + +// The ModifyReplicationGroup action modifies the settings for a replication +// group. +func (c *ElastiCache) ModifyReplicationGroup(input *ModifyReplicationGroupInput) (*ModifyReplicationGroupOutput, error) { + req, out := c.ModifyReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedCacheNodesOffering = "PurchaseReservedCacheNodesOffering" + +// PurchaseReservedCacheNodesOfferingRequest generates a request for the PurchaseReservedCacheNodesOffering operation. +func (c *ElastiCache) PurchaseReservedCacheNodesOfferingRequest(input *PurchaseReservedCacheNodesOfferingInput) (req *request.Request, output *PurchaseReservedCacheNodesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedCacheNodesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedCacheNodesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedCacheNodesOfferingOutput{} + req.Data = output + return +} + +// The PurchaseReservedCacheNodesOffering action allows you to purchase a reserved +// cache node offering. +func (c *ElastiCache) PurchaseReservedCacheNodesOffering(input *PurchaseReservedCacheNodesOfferingInput) (*PurchaseReservedCacheNodesOfferingOutput, error) { + req, out := c.PurchaseReservedCacheNodesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootCacheCluster = "RebootCacheCluster" + +// RebootCacheClusterRequest generates a request for the RebootCacheCluster operation. +func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) (req *request.Request, output *RebootCacheClusterOutput) { + op := &request.Operation{ + Name: opRebootCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootCacheClusterOutput{} + req.Data = output + return +} + +// The RebootCacheCluster action reboots some, or all, of the cache nodes within +// a provisioned cache cluster. This API will apply any modified cache parameter +// groups to the cache cluster. The reboot action takes place as soon as possible, +// and results in a momentary outage to the cache cluster. During the reboot, +// the cache cluster status is set to REBOOTING. +// +// The reboot causes the contents of the cache (for each cache node being rebooted) +// to be lost. +// +// When the reboot is complete, a cache cluster event is created. +func (c *ElastiCache) RebootCacheCluster(input *RebootCacheClusterInput) (*RebootCacheClusterOutput, error) { + req, out := c.RebootCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a request for the RemoveTagsFromResource operation. +func (c *ElastiCache) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The RemoveTagsFromResource action removes the tags identified by the TagKeys +// list from the named resource. +func (c *ElastiCache) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*TagListMessage, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetCacheParameterGroup = "ResetCacheParameterGroup" + +// ResetCacheParameterGroupRequest generates a request for the ResetCacheParameterGroup operation. +func (c *ElastiCache) ResetCacheParameterGroupRequest(input *ResetCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CacheParameterGroupNameMessage{} + req.Data = output + return +} + +// The ResetCacheParameterGroup action modifies the parameters of a cache parameter +// group to the engine or system default value. You can reset specific parameters +// by submitting a list of parameter names. To reset the entire cache parameter +// group, specify the ResetAllParameters and CacheParameterGroupName parameters. +func (c *ElastiCache) ResetCacheParameterGroup(input *ResetCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ResetCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRevokeCacheSecurityGroupIngress = "RevokeCacheSecurityGroupIngress" + +// RevokeCacheSecurityGroupIngressRequest generates a request for the RevokeCacheSecurityGroupIngress operation. +func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheSecurityGroupIngressInput) (req *request.Request, output *RevokeCacheSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeCacheSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeCacheSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeCacheSecurityGroupIngressOutput{} + req.Data = output + return +} + +// The RevokeCacheSecurityGroupIngress action revokes ingress from a cache security +// group. Use this action to disallow access from an Amazon EC2 security group +// that had been previously authorized. +func (c *ElastiCache) RevokeCacheSecurityGroupIngress(input *RevokeCacheSecurityGroupIngressInput) (*RevokeCacheSecurityGroupIngressOutput, error) { + req, out := c.RevokeCacheSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of an AddTagsToResource action. +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the resource to which the tags are to be added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster. + ResourceName *string `type:"string" required:"true"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Represents the input of an AuthorizeCacheSecurityGroupIngress action. +type AuthorizeCacheSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The cache security group which will allow network ingress. + CacheSecurityGroupName *string `type:"string" required:"true"` + + // The Amazon EC2 security group to be authorized for ingress to the cache security + // group. + EC2SecurityGroupName *string `type:"string" required:"true"` + + // The AWS account number of the Amazon EC2 security group owner. Note that + // this is not the same thing as an AWS access key ID - you must provide a valid + // AWS account number for this parameter. + EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeCacheSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes an Availability Zone in which the cache cluster is launched. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the Availability Zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Contains all of the attributes of a specific cache cluster. +type CacheCluster struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The date and time when the cache cluster was created. + CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The user-supplied identifier of the cache cluster. This identifier is a unique + // key that identifies a cache cluster. + CacheClusterId *string `type:"string"` + + // The current state of this cache cluster, one of the following values: available, + // creating, deleted, deleting, incompatible-network, modifying, rebooting cache + // cluster nodes, restore-failed, or snapshotting. + CacheClusterStatus *string `type:"string"` + + // The name of the compute and memory capacity node type for the cache cluster. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // A list of cache nodes that are members of the cache cluster. + CacheNodes []*CacheNode `locationNameList:"CacheNode" type:"list"` + + // The status of the cache parameter group. + CacheParameterGroup *CacheParameterGroupStatus `type:"structure"` + + // A list of cache security group elements, composed of name and status sub-elements. + CacheSecurityGroups []*CacheSecurityGroupMembership `locationNameList:"CacheSecurityGroup" type:"list"` + + // The name of the cache subnet group associated with the cache cluster. + CacheSubnetGroupName *string `type:"string"` + + // The URL of the web page where you can download the latest ElastiCache client + // library. + ClientDownloadLandingPage *string `type:"string"` + + // Represents the information required for client programs to connect to a cache + // node. + ConfigurationEndpoint *Endpoint `type:"structure"` + + // The name of the cache engine (memcached or redis) to be used for this cache + // cluster. + Engine *string `type:"string"` + + // The version of the cache engine version that is used in this cache cluster. + EngineVersion *string `type:"string"` + + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + NotificationConfiguration *NotificationConfiguration `type:"structure"` + + // The number of cache nodes in the cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` + + // A group of settings that will be applied to the cache cluster in the future, + // or that are currently being applied. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // The name of the Availability Zone in which the cache cluster is located or + // "Multiple" if the cache nodes are located in different Availability Zones. + PreferredAvailabilityZone *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The replication group to which this cache cluster belongs. If this field + // is empty, the cache cluster is not associated with any replication group. + ReplicationGroupId *string `type:"string"` + + // A list of VPC Security Groups associated with the cache cluster. + SecurityGroups []*SecurityGroupMembership `type:"list"` + + // The number of days for which ElastiCache will retain automatic cache cluster + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your cache cluster. + // + // Example: 05:00-09:00 + SnapshotWindow *string `type:"string"` +} + +// String returns the string representation +func (s CacheCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheCluster) GoString() string { + return s.String() +} + +// Provides all of the details about a particular cache engine version. +type CacheEngineVersion struct { + _ struct{} `type:"structure"` + + // The description of the cache engine. + CacheEngineDescription *string `type:"string"` + + // The description of the cache engine version. + CacheEngineVersionDescription *string `type:"string"` + + // The name of the cache parameter group family associated with this cache engine. + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache engine. + Engine *string `type:"string"` + + // The version number of the cache engine. + EngineVersion *string `type:"string"` +} + +// String returns the string representation +func (s CacheEngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheEngineVersion) GoString() string { + return s.String() +} + +// Represents an individual cache node within a cache cluster. Each cache node +// runs its own instance of the cluster's protocol-compliant caching software +// - either Memcached or Redis. +// +// Valid node types are as follows: +// +// General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, +// cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous +// generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, +// cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current +// generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, +// cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, +// cache.m2.4xlarge Notes: +// +// All t2 instances are created in an Amazon Virtual Private Cloud (VPC). +// Redis backup/restore is not supported for t2 instances. Redis Append-only +// files (AOF) functionality is not supported for t1 or t2 instances. For a +// complete listing of cache node types and specifications, see Amazon ElastiCache +// Product Features and Details (http://aws.amazon.com/elasticache/details) +// and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) +// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). +type CacheNode struct { + _ struct{} `type:"structure"` + + // The date and time when the cache node was created. + CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The cache node identifier. A node ID is a numeric identifier (0001, 0002, + // etc.). The combination of cluster ID and node ID uniquely identifies every + // cache node used in a customer's AWS account. + CacheNodeId *string `type:"string"` + + // The current state of this cache node. + CacheNodeStatus *string `type:"string"` + + // The Availability Zone where this node was created and now resides. + CustomerAvailabilityZone *string `type:"string"` + + // The hostname for connecting to this cache node. + Endpoint *Endpoint `type:"structure"` + + // The status of the parameter group applied to this cache node. + ParameterGroupStatus *string `type:"string"` + + // The ID of the primary node to which this read replica node is synchronized. + // If this field is empty, then this node is not associated with a primary cache + // cluster. + SourceCacheNodeId *string `type:"string"` +} + +// String returns the string representation +func (s CacheNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNode) GoString() string { + return s.String() +} + +// A parameter that has a different value for each cache node type it is applied +// to. For example, in a Redis cache cluster, a cache.m1.large cache node type +// would have a larger maxmemory value than a cache.m1.small type. +type CacheNodeTypeSpecificParameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // A list of cache node types and their corresponding values for this parameter. + CacheNodeTypeSpecificValues []*CacheNodeTypeSpecificValue `locationNameList:"CacheNodeTypeSpecificValue" type:"list"` + + // The valid data type for the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest cache engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The source of the parameter value. + Source *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificParameter) GoString() string { + return s.String() +} + +// A value that applies only to a certain cache node type. +type CacheNodeTypeSpecificValue struct { + _ struct{} `type:"structure"` + + // The cache node type for which this value applies. + CacheNodeType *string `type:"string"` + + // The value for the cache node type. + Value *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificValue) GoString() string { + return s.String() +} + +// Represents the output of a CreateCacheParameterGroup action. +type CacheParameterGroup struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family that this cache parameter group + // is compatible with. + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The description for this cache parameter group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroup) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// ModifyCacheParameterGroup ResetCacheParameterGroup +type CacheParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupNameMessage) GoString() string { + return s.String() +} + +// The status of the cache parameter group. +type CacheParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // A list of the cache node IDs which need to be rebooted for parameter changes + // to be applied. A node ID is a numeric identifier (0001, 0002, etc.). + CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupStatus) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress +type CacheSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The description of the cache security group. + Description *string `type:"string"` + + // A list of Amazon EC2 security groups that are associated with this cache + // security group. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // The AWS account ID of the cache security group owner. + OwnerId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroup) GoString() string { + return s.String() +} + +// Represents a cache cluster's status within a particular cache security group. +type CacheSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The membership status in the cache security group. The status changes when + // a cache security group is modified, or when the cache security groups assigned + // to a cache cluster are modified. + Status *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroupMembership) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// CreateCacheSubnetGroup ModifyCacheSubnetGroup +type CacheSubnetGroup struct { + _ struct{} `type:"structure"` + + // The description of the cache subnet group. + CacheSubnetGroupDescription *string `type:"string"` + + // The name of the cache subnet group. + CacheSubnetGroupName *string `type:"string"` + + // A list of subnets associated with the cache subnet group. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + // group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSubnetGroup) GoString() string { + return s.String() +} + +// Represents the input of a CopySnapshotMessage action. +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of an existing snapshot from which to copy. + SourceSnapshotName *string `type:"string" required:"true"` + + // A name for the copied snapshot. + TargetSnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheCluster action. +type CreateCacheClusterInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the nodes in this Memcached node group are created in a + // single Availability Zone or created across multiple Availability Zones in + // the cluster's region. + // + // This parameter is only supported for Memcached cache clusters. + // + // If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + // assumes single-az mode. + AZMode *string `type:"string" enum:"AZMode"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The node group identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // A name must contain from 1 to 20 alphanumeric characters or hyphens. The + // first character must be a letter. A name cannot end with a hyphen or contain + // two consecutive hyphens. + CacheClusterId *string `type:"string" required:"true"` + + // The compute and memory capacity of the nodes in the node group. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this cache cluster. If + // this argument is omitted, the default parameter group for the specified engine + // is used. + CacheParameterGroupName *string `type:"string"` + + // A list of security group names to associate with this cache cluster. + // + // Use this parameter only when you are creating a cache cluster outside of + // an Amazon Virtual Private Cloud (VPC). + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the subnet group to be used for the cache cluster. + // + // Use this parameter only when you are creating a cache cluster in an Amazon + // Virtual Private Cloud (VPC). + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine to be used for this cache cluster. + // + // Valid values for this parameter are: + // + // memcached | redis + Engine *string `type:"string"` + + // The version number of the cache engine to be used for this cache cluster. + // To view the supported cache engine versions, use the DescribeCacheEngineVersions + // action. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications will be sent. + // + // The Amazon SNS topic owner must be the same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The initial number of cache nodes that the cache cluster will have. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + // + // If you need more than 20 nodes for your Memcached cluster, please fill out + // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + NumCacheNodes *int64 `type:"integer"` + + // The port number on which each of the cache nodes will accept connections. + Port *int64 `type:"integer"` + + // The EC2 Availability Zone in which the cache cluster will be created. + // + // All nodes belonging to this Memcached cache cluster are placed in the preferred + // Availability Zone. If you want to create your nodes across multiple Availability + // Zones, use PreferredAvailabilityZones. + // + // Default: System chosen Availability Zone. + PreferredAvailabilityZone *string `type:"string"` + + // A list of the Availability Zones in which cache nodes will be created. The + // order of the zones in the list is not important. + // + // This option is only supported on Memcached. + // + // If you are creating your cache cluster in an Amazon VPC (recommended) you + // can only locate nodes in Availability Zones that are associated with the + // subnets in the selected subnet group. + // + // The number of Availability Zones listed must equal the value of NumCacheNodes. + // + // If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + // instead, or repeat the Availability Zone multiple times in the list. + // + // Default: System chosen Availability Zones. + // + // Example: One Memcached node in each of three different Availability Zones: + // PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2b&PreferredAvailabilityZones.member.3=us-west-2c + // + // Example: All three Memcached nodes in one Availability Zone: PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2a&PreferredAvailabilityZones.member.3=us-west-2a + PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The ID of the replication group to which this cache cluster should belong. + // If this parameter is specified, the cache cluster will be added to the specified + // replication group as a read replica; otherwise, the cache cluster will be + // a standalone primary that is not part of any replication group. + // + // If the specified replication group is Multi-AZ enabled and the availability + // zone is not specified, the cache cluster will be created in availability + // zones that provide the best spread of read replicas across availability zones. + // + // Note: This parameter is only valid if the Engine parameter is redis. + ReplicationGroupId *string `type:"string"` + + // One or more VPC security groups associated with the cache cluster. + // + // Use this parameter only when you are creating a cache cluster in an Amazon + // Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file will be used to populate the node group. The Amazon S3 object name in + // the ARN cannot contain any commas. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` + + // The name of a snapshot from which to restore data into the new node group. + // The snapshot status changes to restoring while the new node group is being + // created. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `type:"string"` + + // The number of days for which ElastiCache will retain automatic snapshots + // before deleting them. For example, if you set SnapshotRetentionLimit to 5, + // then a snapshot that was taken today will be retained for 5 days before being + // deleted. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your node group. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `type:"string"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheClusterInput) GoString() string { + return s.String() +} + +type CreateCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheParameterGroup action. +type CreateCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family the cache parameter group can + // be used with. + // + // Valid values are: memcached1.4 | redis2.6 | redis2.8 + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // A user-specified name for the cache parameter group. + CacheParameterGroupName *string `type:"string" required:"true"` + + // A user-specified description for the cache parameter group. + Description *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheParameterGroupInput) GoString() string { + return s.String() +} + +type CreateCacheParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a CreateCacheParameterGroup action. + CacheParameterGroup *CacheParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheParameterGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheSecurityGroup action. +type CreateCacheSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A name for the cache security group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters. Cannot + // be the word "Default". + // + // Example: mysecuritygroup + CacheSecurityGroupName *string `type:"string" required:"true"` + + // A description for the cache security group. + Description *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCacheSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateCacheSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSecurityGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheSubnetGroup action. +type CreateCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the cache subnet group. + CacheSubnetGroupDescription *string `type:"string" required:"true"` + + // A name for the cache subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // + // Example: mysubnetgroup + CacheSubnetGroupName *string `type:"string" required:"true"` + + // A list of VPC subnet IDs for the cache subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSubnetGroupInput) GoString() string { + return s.String() +} + +type CreateCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // CreateCacheSubnetGroup ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateReplicationGroup action. +type CreateReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Specifies whether a read-only replica will be automatically promoted to read/write + // primary if the existing primary fails. + // + // If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ + // is disabled for this replication group. + // + // Default: false + // + // ElastiCache Multi-AZ replication groups is not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailoverEnabled *bool `type:"boolean"` + + // The compute and memory capacity of the nodes in the node group. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this replication group. + // If this argument is omitted, the default cache parameter group for the specified + // engine is used. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to associate with this replication group. + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the cache subnet group to be used for the replication group. + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine to be used for the cache clusters in this replication + // group. + // + // Default: redis + Engine *string `type:"string"` + + // The version number of the cache engine to be used for the cache clusters + // in this replication group. To view the supported cache engine versions, use + // the DescribeCacheEngineVersions action. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications will be sent. + // + // The Amazon SNS topic owner must be the same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The number of cache clusters this replication group will initially have. + // + // If Multi-AZ is enabled, the value of this parameter must be at least 2. + // + // The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas). + // If you need to exceed this limit, please fill out the ElastiCache Limit Increase + // Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request). + NumCacheClusters *int64 `type:"integer"` + + // The port number on which each member of the replication group will accept + // connections. + Port *int64 `type:"integer"` + + // A list of EC2 availability zones in which the replication group's cache clusters + // will be created. The order of the availability zones in the list is not important. + // + // If you are creating your replication group in an Amazon VPC (recommended), + // you can only locate cache clusters in availability zones associated with + // the subnets in the selected subnet group. The number of availability zones + // listed must equal the value of NumCacheClusters. + // + // Default: system chosen availability zones. + // + // Example: One Redis cache cluster in each of three availability zones. PreferredAvailabilityZones.member.1=us-west-2a + // PreferredAvailabilityZones.member.2=us-west-2c PreferredAvailabilityZones.member.3=us-west-2c + PreferredCacheClusterAZs []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The identifier of the cache cluster that will serve as the primary for this + // replication group. This cache cluster must already exist and have a status + // of available. + // + // This parameter is not required if NumCacheClusters is specified. + PrimaryClusterId *string `type:"string"` + + // A user-created description for the replication group. + ReplicationGroupDescription *string `type:"string" required:"true"` + + // The replication group identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // A name must contain from 1 to 20 alphanumeric characters or hyphens. The + // first character must be a letter. A name cannot end with a hyphen or contain + // two consecutive hyphens. + ReplicationGroupId *string `type:"string" required:"true"` + + // One or more Amazon VPC security groups associated with this replication group. + // + // Use this parameter only when you are creating a replication group in an + // Amazon Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file will be used to populate the node group. The Amazon S3 object name in + // the ARN cannot contain any commas. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` + + // The name of a snapshot from which to restore data into the new node group. + // The snapshot status changes to restoring while the new node group is being + // created. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `type:"string"` + + // The number of days for which ElastiCache will retain automatic snapshots + // before deleting them. For example, if you set SnapshotRetentionLimit to 5, + // then a snapshot that was taken today will be retained for 5 days before being + // deleted. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your node group. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `type:"string"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationGroupInput) GoString() string { + return s.String() +} + +type CreateReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateSnapshot action. +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of an existing cache cluster. The snapshot will be created + // from this cache cluster. + CacheClusterId *string `type:"string" required:"true"` + + // A name for the snapshot being created. + SnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheCluster action. +type DeleteCacheClusterInput struct { + _ struct{} `type:"structure"` + + // The cache cluster identifier for the cluster to be deleted. This parameter + // is not case sensitive. + CacheClusterId *string `type:"string" required:"true"` + + // The user-supplied name of a final cache cluster snapshot. This is the unique + // name that identifies the snapshot. ElastiCache creates the snapshot, and + // then deletes the cache cluster immediately afterward. + FinalSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DeleteCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheClusterInput) GoString() string { + return s.String() +} + +type DeleteCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheParameterGroup action. +type DeleteCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to delete. + // + // The specified cache security group must not be associated with any cache + // clusters. + CacheParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteCacheParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheParameterGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheSecurityGroup action. +type DeleteCacheSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to delete. + // + // You cannot delete the default security group. + CacheSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteCacheSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSecurityGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheSubnetGroup action. +type DeleteCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache subnet group to delete. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + CacheSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSubnetGroupInput) GoString() string { + return s.String() +} + +type DeleteCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteReplicationGroup action. +type DeleteReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // The name of a final node group snapshot. ElastiCache creates the snapshot + // from the primary node in the cluster, rather than one of the replicas; this + // is to ensure that it captures the freshest data. After the final snapshot + // is taken, the cluster is immediately deleted. + FinalSnapshotIdentifier *string `type:"string"` + + // The identifier for the cluster to be deleted. This parameter is not case + // sensitive. + ReplicationGroupId *string `type:"string" required:"true"` + + // If set to true, all of the read replicas will be deleted, but the primary + // node will be retained. + RetainPrimaryCluster *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationGroupInput) GoString() string { + return s.String() +} + +type DeleteReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteSnapshot action. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of the snapshot to be deleted. + SnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheClusters action. +type DescribeCacheClustersInput struct { + _ struct{} `type:"structure"` + + // The user-supplied cluster identifier. If this parameter is specified, only + // information about that specific cache cluster is returned. This parameter + // isn't case sensitive. + CacheClusterId *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // An optional flag that can be included in the DescribeCacheCluster request + // to retrieve information about the individual cache nodes. + ShowCacheNodeInfo *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeCacheClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheClustersInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheClusters action. +type DescribeCacheClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of cache clusters. Each item in the list contains detailed information + // about one cache cluster. + CacheClusters []*CacheCluster `locationNameList:"CacheCluster" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheClustersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheEngineVersions action. +type DescribeCacheEngineVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + CacheParameterGroupFamily *string `type:"string"` + + // If true, specifies that only the default version of the specified engine + // or engine and major version combination is to be returned. + DefaultOnly *bool `type:"boolean"` + + // The cache engine to return. Valid values: memcached | redis + Engine *string `type:"string"` + + // The cache engine version to return. + // + // Example: 1.4.14 + EngineVersion *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheEngineVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheEngineVersionsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheEngineVersions action. +type DescribeCacheEngineVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache engine version details. Each element in the list contains + // detailed information about one cache engine version. + CacheEngineVersions []*CacheEngineVersion `locationNameList:"CacheEngineVersion" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheEngineVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheEngineVersionsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheParameterGroups action. +type DescribeCacheParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group to return details for. + CacheParameterGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParameterGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheParameterGroups action. +type DescribeCacheParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache parameter groups. Each element in the list contains detailed + // information about one cache parameter group. + CacheParameterGroups []*CacheParameterGroup `locationNameList:"CacheParameterGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParameterGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheParameters action. +type DescribeCacheParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group to return details for. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The parameter types to return. + // + // Valid values: user | system | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParametersInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheParameters action. +type DescribeCacheParametersOutput struct { + _ struct{} `type:"structure"` + + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of Parameter instances. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeCacheParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParametersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheSecurityGroups action. +type DescribeCacheSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to return details for. + CacheSecurityGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSecurityGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheSecurityGroups action. +type DescribeCacheSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache security groups. Each element in the list contains detailed + // information about one group. + CacheSecurityGroups []*CacheSecurityGroup `locationNameList:"CacheSecurityGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSecurityGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheSubnetGroups action. +type DescribeCacheSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cache subnet group to return details for. + CacheSubnetGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSubnetGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheSubnetGroups action. +type DescribeCacheSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache subnet groups. Each element in the list contains detailed + // information about one group. + CacheSubnetGroups []*CacheSubnetGroup `locationNameList:"CacheSubnetGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSubnetGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeEngineDefaultParameters action. +type DescribeEngineDefaultParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family. Valid values are: memcached1.4 + // | redis2.6 | redis2.8 + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersInput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultParametersOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a DescribeEngineDefaultParameters action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeEvents action. +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes' worth of events to retrieve. + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // not specified, then all sources are included in the response. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + // + // Valid values are: cache-cluster | cache-parameter-group | cache-security-group + // | cache-subnet-group + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of events. Each element in the list contains detailed information + // about one event. + Events []*Event `locationNameList:"Event" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReplicationGroups action. +type DescribeReplicationGroupsInput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier for the replication group to be described. This parameter + // is not case sensitive. + // + // If you do not specify this parameter, information about all replication + // groups is returned. + ReplicationGroupId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReplicationGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReplicationGroups action. +type DescribeReplicationGroupsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of replication groups. Each item in the list contains detailed information + // about one replication group. + ReplicationGroups []*ReplicationGroup `locationNameList:"ReplicationGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReservedCacheNodes action. +type DescribeReservedCacheNodesInput struct { + _ struct{} `type:"structure"` + + // The cache node type filter value. Use this parameter to show only those reservations + // matching the specified cache node type. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only those + // reservations matching the specified product description. + ProductDescription *string `type:"string"` + + // The reserved cache node identifier filter value. Use this parameter to show + // only the reservation that matches the specified reservation ID. + ReservedCacheNodeId *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only purchased + // reservations matching the specified offering identifier. + ReservedCacheNodesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesInput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReservedCacheNodesOfferings action. +type DescribeReservedCacheNodesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The cache node type filter value. Use this parameter to show only the available + // offerings matching the specified cache node type. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // Duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for a given duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only the + // available offerings matching the specified product description. + ProductDescription *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only the + // available offering that matches the specified reservation identifier. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedCacheNodesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOfferingsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReservedCacheNodesOfferings action. +type DescribeReservedCacheNodesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of reserved cache node offerings. Each element in the list contains + // detailed information about one offering. + ReservedCacheNodesOfferings []*ReservedCacheNodesOffering `locationNameList:"ReservedCacheNodesOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOfferingsOutput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReservedCacheNodes action. +type DescribeReservedCacheNodesOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of reserved cache nodes. Each element in the list contains detailed + // information about one node. + ReservedCacheNodes []*ReservedCacheNode `locationNameList:"ReservedCacheNode" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeSnapshotsMessage action. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A user-supplied cluster identifier. If this parameter is specified, only + // snapshots associated with that specific cache cluster will be described. + CacheClusterId *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 50 + // + // Constraints: minimum 20; maximum 50. + MaxRecords *int64 `type:"integer"` + + // A user-supplied name of the snapshot. If this parameter is specified, only + // this snapshot will be described. + SnapshotName *string `type:"string"` + + // If set to system, the output shows snapshots that were automatically created + // by ElastiCache. If set to user the output shows snapshots that were manually + // created. If omitted, the output shows both automatically and manually created + // snapshots. + SnapshotSource *string `type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeSnapshots action. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of snapshots. Each item in the list contains detailed information + // about one snapshot. + Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Provides ownership and status information for an Amazon EC2 security group. +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the Amazon EC2 security group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account ID of the Amazon EC2 security group owner. + EC2SecurityGroupOwnerId *string `type:"string"` + + // The status of the Amazon EC2 security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// Represents the information required for client programs to connect to a cache +// node. +type Endpoint struct { + _ struct{} `type:"structure"` + + // The DNS hostname of the cache node. + Address *string `type:"string"` + + // The port number that the cache engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Represents the output of a DescribeEngineDefaultParameters action. +type EngineDefaults struct { + _ struct{} `type:"structure"` + + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + + // Specifies the name of the cache parameter group family to which the engine + // default parameters apply. + CacheParameterGroupFamily *string `type:"string"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // Contains a list of engine default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s EngineDefaults) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EngineDefaults) GoString() string { + return s.String() +} + +// Represents a single occurrence of something interesting within the system. +// Some examples of events are creating a cache cluster, adding or removing +// a cache node, or rebooting a node. +type Event struct { + _ struct{} `type:"structure"` + + // The date and time when the event occurred. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The text of the event. + Message *string `type:"string"` + + // The identifier for the source of the event. For example, if the event occurred + // at the cache cluster level, the identifier would be the name of the cache + // cluster. + SourceIdentifier *string `type:"string"` + + // Specifies the origin of this event - a cache cluster, a parameter group, + // a security group, etc. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// The input parameters for the ListTagsForResource action. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the resource for which you want the list of tags, for example + // arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster. + ResourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyCacheCluster action. +type ModifyCacheClusterInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the new nodes in this Memcached cache cluster are all created + // in a single Availability Zone or created across multiple Availability Zones. + // + // Valid values: single-az | cross-az. + // + // This option is only supported for Memcached cache clusters. + // + // You cannot specify single-az if the Memcached cache cluster already has + // cache nodes in different Availability Zones. If cross-az is specified, existing + // Memcached nodes remain in their current Availability Zone. + // + // Only newly created nodes will be located in different Availability Zones. + // For instructions on how to move existing Memcached nodes to different Availability + // Zones, see the Availability Zone Considerations section of Cache Node Considerations + // for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html). + AZMode *string `type:"string" enum:"AZMode"` + + // If true, this parameter causes the modifications in this request and any + // pending modifications to be applied, asynchronously and as soon as possible, + // regardless of the PreferredMaintenanceWindow setting for the cache cluster. + // + // If false, then changes to the cache cluster are applied on the next maintenance + // reboot, or the next failure reboot, whichever occurs first. + // + // If you perform a ModifyCacheCluster before a pending modification is applied, + // the pending modification is replaced by the newer modification. Valid values: + // true | false + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The cache cluster identifier. This value is stored as a lowercase string. + CacheClusterId *string `type:"string" required:"true"` + + // A list of cache node IDs to be removed. A node ID is a numeric identifier + // (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less + // than the existing number of cache nodes. The number of cache node IDs supplied + // in this parameter must match the difference between the existing number of + // cache nodes in the cluster or pending cache nodes, whichever is greater, + // and the value of NumCacheNodes in the request. + // + // For example: If you have 3 active cache nodes, 7 pending cache nodes, and + // the number of cache nodes in this ModifyCacheCluser call is 5, you must list + // 2 (7 - 5) cache node IDs to remove. + CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` + + // The name of the cache parameter group to apply to this cache cluster. This + // change is asynchronously applied as soon as possible for parameters when + // the ApplyImmediately parameter is specified as true for this request. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to authorize on this cache cluster. + // This change is asynchronously applied as soon as possible. + // + // This parameter can be used only with clusters that are created outside of + // an Amazon Virtual Private Cloud (VPC). + // + // Constraints: Must contain no more than 255 alphanumeric characters. Must + // not be "Default". + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The upgraded version of the cache engine to be run on the cache nodes. + EngineVersion *string `type:"string"` + + // The list of Availability Zones where the new Memcached cache nodes will be + // created. + // + // This parameter is only valid when NumCacheNodes in the request is greater + // than the sum of the number of active cache nodes and the number of cache + // nodes pending creation (which may be zero). The number of Availability Zones + // supplied in this list must match the cache nodes being added in this request. + // + // This option is only supported on Memcached clusters. + // + // Scenarios: Scenario 1: You have 3 active nodes and wish to add 2 nodes. + // Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones + // for the two new nodes. Scenario 2: You have 3 active nodes and 2 nodes pending + // creation (from the scenario 1 call) and want to add 1 more node. Specify + // NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone + // for the new node. Scenario 3: You want to cancel all pending actions. Specify + // NumCacheNodes=3 to cancel all pending actions. + // + // The Availability Zone placement of nodes pending creation cannot be modified. + // If you wish to cancel any nodes pending creation, add 0 nodes by setting + // NumCacheNodes to the number of current nodes. + // + // If cross-az is specified, existing Memcached nodes remain in their current + // Availability Zone. Only newly created nodes can be located in different Availability + // Zones. For guidance on how to move existing Memcached nodes to different + // Availability Zones, see the Availability Zone Considerations section of Cache + // Node Considerations for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html). + // + // Impact of new add/remove requests upon pending requests + // + // Scenarios Pending action New Request Results Scenario-1 Delete Delete + // The new delete, pending or immediate, replaces the pending delete. Scenario-2 + // Delete Create The new create, pending or immediate, replaces the pending + // delete. Scenario-3 Create Delete The new delete, pending or immediate, + // replaces the pending create. Scenario-4 Create Create The new create is + // added to the pending create. Important:If the new create request is Apply + // Immediately - Yes, all creates are performed immediately. If the new create + // request is Apply Immediately - No, all creates are pending. Example: NewAvailabilityZones.member.1=us-west-2a&NewAvailabilityZones.member.2=us-west-2b&NewAvailabilityZones.member.3=us-west-2c + NewAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications + // will be sent. + // + // The Amazon SNS topic owner must be same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The status of the Amazon SNS notification topic. Notifications are sent only + // if the status is active. + // + // Valid values: active | inactive + NotificationTopicStatus *string `type:"string"` + + // The number of cache nodes that the cache cluster should have. If the value + // for NumCacheNodes is greater than the sum of the number of current cache + // nodes and the number of cache nodes pending creation (which may be zero), + // then more nodes will be added. If the value is less than the number of existing + // cache nodes, then nodes will be removed. If the value is equal to the number + // of current cache nodes, then any pending add or remove requests are canceled. + // + // If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter + // to provide the IDs of the specific cache nodes to remove. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + // + // Note:Adding or removing Memcached cache nodes can be applied immediately + // or as a pending action. See ApplyImmediately. A pending action to modify + // the number of cache nodes in a cluster during its maintenance window, whether + // by adding or removing nodes in accordance with the scale out architecture, + // is not queued. The customer's latest request to add or remove nodes to the + // cluster overrides any previous pending actions to modify the number of cache + // nodes in the cluster. For example, a request to remove 2 nodes would override + // a previous pending action to remove 3 nodes. Similarly, a request to add + // 2 nodes would override a previous pending action to remove 3 nodes and vice + // versa. As Memcached cache nodes may now be provisioned in different Availability + // Zones with flexible cache node placement, a request to add nodes does not + // automatically override a previous pending action to add nodes. The customer + // can modify the previous pending action to add more nodes or explicitly cancel + // the pending request and retry the new request. To cancel pending actions + // to modify the number of cache nodes in a cluster, use the ModifyCacheCluster + // request and set NumCacheNodes equal to the number of cache nodes currently + // in the cache cluster. + NumCacheNodes *int64 `type:"integer"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the VPC Security Groups associated with the cache cluster. + // + // This parameter can be used only with clusters that are created in an Amazon + // Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // The number of days for which ElastiCache will retain automatic cache cluster + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your cache cluster. + SnapshotWindow *string `type:"string"` +} + +// String returns the string representation +func (s ModifyCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheClusterInput) GoString() string { + return s.String() +} + +type ModifyCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyCacheParameterGroup action. +type ModifyCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to modify. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names and values for the parameter update. You must + // supply at least one parameter name and value; subsequent arguments are optional. + // A maximum of 20 parameters may be modified per request. + ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyCacheSubnetGroup action. +type ModifyCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the cache subnet group. + CacheSubnetGroupDescription *string `type:"string"` + + // The name for the cache subnet group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // + // Example: mysubnetgroup + CacheSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 subnet IDs for the cache subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list"` +} + +// String returns the string representation +func (s ModifyCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheSubnetGroupInput) GoString() string { + return s.String() +} + +type ModifyCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // CreateCacheSubnetGroup ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyReplicationGroups action. +type ModifyReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // If true, this parameter causes the modifications in this request and any + // pending modifications to be applied, asynchronously and as soon as possible, + // regardless of the PreferredMaintenanceWindow setting for the replication + // group. + // + // If false, then changes to the nodes in the replication group are applied + // on the next maintenance reboot, or the next failure reboot, whichever occurs + // first. + // + // Valid values: true | false + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Whether a read replica will be automatically promoted to read/write primary + // if the existing primary encounters a failure. + // + // Valid values: true | false + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailoverEnabled *bool `type:"boolean"` + + // The name of the cache parameter group to apply to all of the clusters in + // this replication group. This change is asynchronously applied as soon as + // possible for parameters when the ApplyImmediately parameter is specified + // as true for this request. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to authorize for the clusters in this + // replication group. This change is asynchronously applied as soon as possible. + // + // This parameter can be used only with replication group containing cache + // clusters running outside of an Amazon Virtual Private Cloud (VPC). + // + // Constraints: Must contain no more than 255 alphanumeric characters. Must + // not be "Default". + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The upgraded version of the cache engine to be run on the cache clusters + // in the replication group. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications + // will be sent. + // + // The Amazon SNS topic owner must be same as the replication group owner. + NotificationTopicArn *string `type:"string"` + + // The status of the Amazon SNS notification topic for the replication group. + // Notifications are sent only if the status is active. + // + // Valid values: active | inactive + NotificationTopicStatus *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // If this parameter is specified, ElastiCache will promote each of the cache + // clusters in the specified replication group to the primary role. The nodes + // of all other cache clusters in the replication group will be read replicas. + PrimaryClusterId *string `type:"string"` + + // A description for the replication group. Maximum length is 255 characters. + ReplicationGroupDescription *string `type:"string"` + + // The identifier of the replication group to modify. + ReplicationGroupId *string `type:"string" required:"true"` + + // Specifies the VPC Security Groups associated with the cache clusters in the + // replication group. + // + // This parameter can be used only with replication group containing cache + // clusters running in an Amazon Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // The number of days for which ElastiCache will retain automatic node group + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of the node group specified by SnapshottingClusterId. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + SnapshotWindow *string `type:"string"` + + // The cache cluster ID that will be used as the daily snapshot source for the + // replication group. + SnapshottingClusterId *string `type:"string"` +} + +// String returns the string representation +func (s ModifyReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupInput) GoString() string { + return s.String() +} + +type ModifyReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents a collection of cache nodes in a replication group. +type NodeGroup struct { + _ struct{} `type:"structure"` + + // The identifier for the node group. A replication group contains only one + // node group; therefore, the node group ID is 0001. + NodeGroupId *string `type:"string"` + + // A list containing information about individual nodes within the node group. + NodeGroupMembers []*NodeGroupMember `locationNameList:"NodeGroupMember" type:"list"` + + // Represents the information required for client programs to connect to a cache + // node. + PrimaryEndpoint *Endpoint `type:"structure"` + + // The current state of this replication group - creating, available, etc. + Status *string `type:"string"` +} + +// String returns the string representation +func (s NodeGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeGroup) GoString() string { + return s.String() +} + +// Represents a single node within a node group. +type NodeGroupMember struct { + _ struct{} `type:"structure"` + + // The ID of the cache cluster to which the node belongs. + CacheClusterId *string `type:"string"` + + // The ID of the node within its cache cluster. A node ID is a numeric identifier + // (0001, 0002, etc.). + CacheNodeId *string `type:"string"` + + // The role that is currently assigned to the node - primary or replica. + CurrentRole *string `type:"string"` + + // The name of the Availability Zone in which the node is located. + PreferredAvailabilityZone *string `type:"string"` + + // Represents the information required for client programs to connect to a cache + // node. + ReadEndpoint *Endpoint `type:"structure"` +} + +// String returns the string representation +func (s NodeGroupMember) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeGroupMember) GoString() string { + return s.String() +} + +// Represents an individual cache node in a snapshot of a cache cluster. +type NodeSnapshot struct { + _ struct{} `type:"structure"` + + // The date and time when the cache node was created in the source cache cluster. + CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The cache node identifier for the node in the source cache cluster. + CacheNodeId *string `type:"string"` + + // The size of the cache on the source cache node. + CacheSize *string `type:"string"` + + // The date and time when the source node's metadata and cache data set was + // obtained for the snapshot. + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s NodeSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeSnapshot) GoString() string { + return s.String() +} + +// Describes a notification topic and its status. Notification topics are used +// for publishing ElastiCache events to subscribers using Amazon Simple Notification +// Service (SNS). +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the topic. + TopicArn *string `type:"string"` + + // The current state of the topic. + TopicStatus *string `type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Describes an individual setting that controls some aspect of ElastiCache +// behavior. +type Parameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // The valid data type for the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest cache engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` + + // The source of the parameter. + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Describes a name-value pair that is used to update the value of a parameter. +type ParameterNameValue struct { + _ struct{} `type:"structure"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` +} + +// String returns the string representation +func (s ParameterNameValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterNameValue) GoString() string { + return s.String() +} + +// A group of settings that will be applied to the cache cluster in the future, +// or that are currently being applied. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // A list of cache node IDs that are being removed (or will be removed) from + // the cache cluster. A node ID is a numeric identifier (0001, 0002, etc.). + CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` + + // The new cache engine version that the cache cluster will run. + EngineVersion *string `type:"string"` + + // The new number of cache nodes for the cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +// Represents the input of a PurchaseReservedCacheNodesOffering action. +type PurchaseReservedCacheNodesOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of cache node instances to reserve. + // + // Default: 1 + CacheNodeCount *int64 `type:"integer"` + + // A customer-specified identifier to track this reservation. + // + // Example: myreservationID + ReservedCacheNodeId *string `type:"string"` + + // The ID of the reserved cache node offering to purchase. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedCacheNodesOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedCacheNodesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedCacheNodesOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedCacheNodesOfferingOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a PurchaseReservedCacheNodesOffering action. + ReservedCacheNode *ReservedCacheNode `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedCacheNodesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedCacheNodesOfferingOutput) GoString() string { + return s.String() +} + +// Represents the input of a RebootCacheCluster action. +type RebootCacheClusterInput struct { + _ struct{} `type:"structure"` + + // The cache cluster identifier. This parameter is stored as a lowercase string. + CacheClusterId *string `type:"string" required:"true"` + + // A list of cache node IDs to reboot. A node ID is a numeric identifier (0001, + // 0002, etc.). To reboot an entire cache cluster, specify all of the cache + // node IDs. + CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootCacheClusterInput) GoString() string { + return s.String() +} + +type RebootCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s RebootCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootCacheClusterOutput) GoString() string { + return s.String() +} + +// Contains the specific price and frequency of a recurring charges for a reserved +// cache node, or for a reserved cache node offering. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The monetary amount of the recurring charge. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency of the recurring charge. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Represents the input of a RemoveTagsFromResource action. +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the ElastiCache resource from which you want the listed tags + // removed, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster. + ResourceName *string `type:"string" required:"true"` + + // A list of TagKeys identifying the tags you want removed from the named resource. + // For example, TagKeys.member.1=Region removes the cost allocation tag with + // the key name Region from the resource named by the ResourceName parameter. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Contains all of the attributes of a specific replication group. +type ReplicationGroup struct { + _ struct{} `type:"structure"` + + // Indicates the status of Multi-AZ for this replication group. + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` + + // The description of the replication group. + Description *string `type:"string"` + + // The names of all the cache clusters that are part of this replication group. + MemberClusters []*string `locationNameList:"ClusterId" type:"list"` + + // A single element list with information about the nodes in the replication + // group. + NodeGroups []*NodeGroup `locationNameList:"NodeGroup" type:"list"` + + // A group of settings to be applied to the replication group, either immediately + // or during the next maintenance window. + PendingModifiedValues *ReplicationGroupPendingModifiedValues `type:"structure"` + + // The identifier for the replication group. + ReplicationGroupId *string `type:"string"` + + // The cache cluster ID that is used as the daily snapshot source for the replication + // group. + SnapshottingClusterId *string `type:"string"` + + // The current state of this replication group - creating, available, etc. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationGroup) GoString() string { + return s.String() +} + +// The settings to be applied to the replication group, either immediately or +// during the next maintenance window. +type ReplicationGroupPendingModifiedValues struct { + _ struct{} `type:"structure"` + + // Indicates the status of Multi-AZ for this replication group. + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` + + // The primary cluster ID which will be applied immediately (if --apply-immediately + // was specified), or during the next maintenance window. + PrimaryClusterId *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationGroupPendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationGroupPendingModifiedValues) GoString() string { + return s.String() +} + +// Represents the output of a PurchaseReservedCacheNodesOffering action. +type ReservedCacheNode struct { + _ struct{} `type:"structure"` + + // The number of cache nodes that have been reserved. + CacheNodeCount *int64 `type:"integer"` + + // The cache node type for the reserved cache nodes. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration of the reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this reserved cache node. + FixedPrice *float64 `type:"double"` + + // The offering type of this reserved cache node. + OfferingType *string `type:"string"` + + // The description of the reserved cache node. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved cache node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedCacheNodeId *string `type:"string"` + + // The offering identifier. + ReservedCacheNodesOfferingId *string `type:"string"` + + // The time the reservation started. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved cache node. + State *string `type:"string"` + + // The hourly price charged for this reserved cache node. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedCacheNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedCacheNode) GoString() string { + return s.String() +} + +// Describes all of the attributes of a reserved cache node offering. +type ReservedCacheNodesOffering struct { + _ struct{} `type:"structure"` + + // The cache node type for the reserved cache node. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration of the offering. in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this offering. + FixedPrice *float64 `type:"double"` + + // The offering type. + OfferingType *string `type:"string"` + + // The cache engine used by the offering. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved cache node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // A unique identifier for the reserved cache node offering. + ReservedCacheNodesOfferingId *string `type:"string"` + + // The hourly price charged for this offering. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedCacheNodesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedCacheNodesOffering) GoString() string { + return s.String() +} + +// Represents the input of a ResetCacheParameterGroup action. +type ResetCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to reset. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names to be reset. If you are not resetting the entire + // cache parameter group, you must specify at least one parameter name. + ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list" required:"true"` + + // If true, all parameters in the cache parameter group will be reset to default + // values. If false, no such action occurs. + // + // Valid values: true | false + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Represents the input of a RevokeCacheSecurityGroupIngress action. +type RevokeCacheSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to revoke ingress from. + CacheSecurityGroupName *string `type:"string" required:"true"` + + // The name of the Amazon EC2 security group to revoke access from. + EC2SecurityGroupName *string `type:"string" required:"true"` + + // The AWS account number of the Amazon EC2 security group owner. Note that + // this is not the same thing as an AWS access key ID - you must provide a valid + // AWS account number for this parameter. + EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeCacheSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeCacheSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeCacheSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeCacheSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeCacheSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Represents a single cache security group and its status. +type SecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The identifier of the cache security group. + SecurityGroupId *string `type:"string"` + + // The status of the cache security group membership. The status changes whenever + // a cache security group is modified, or when the cache security groups assigned + // to a cache cluster are modified. + Status *string `type:"string"` +} + +// String returns the string representation +func (s SecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupMembership) GoString() string { + return s.String() +} + +// Represents a copy of an entire cache cluster as of the time when the snapshot +// was taken. +type Snapshot struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The date and time when the source cache cluster was created. + CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The user-supplied identifier of the source cache cluster. + CacheClusterId *string `type:"string"` + + // The name of the compute and memory capacity node type for the source cache + // cluster. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The cache parameter group that is associated with the source cache cluster. + CacheParameterGroupName *string `type:"string"` + + // The name of the cache subnet group associated with the source cache cluster. + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine (memcached or redis) used by the source cache + // cluster. + Engine *string `type:"string"` + + // The version of the cache engine version that is used by the source cache + // cluster. + EngineVersion *string `type:"string"` + + // A list of the cache nodes in the source cache cluster. + NodeSnapshots []*NodeSnapshot `locationNameList:"NodeSnapshot" type:"list"` + + // The number of cache nodes in the source cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` + + // The port number used by each cache nodes in the source cache cluster. + Port *int64 `type:"integer"` + + // The name of the Availability Zone in which the source cache cluster is located. + PreferredAvailabilityZone *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The name of a snapshot. For an automatic snapshot, the name is system-generated; + // for a manual snapshot, this is the user-provided name. + SnapshotName *string `type:"string"` + + // For an automatic snapshot, the number of days for which ElastiCache will + // retain the snapshot before deleting it. + // + // For manual snapshots, this field reflects the SnapshotRetentionLimit for + // the source cache cluster when the snapshot was created. This field is otherwise + // ignored: Manual snapshots do not expire, and can only be deleted using the + // DeleteSnapshot action. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // Indicates whether the snapshot is from an automatic backup (automated) or + // was created manually (manual). + SnapshotSource *string `type:"string"` + + // The status of the snapshot. Valid values: creating | available | restoring + // | copying | deleting. + SnapshotStatus *string `type:"string"` + + // The daily time range during which ElastiCache takes daily snapshots of the + // source cache cluster. + SnapshotWindow *string `type:"string"` + + // The Amazon Resource Name (ARN) for the topic used by the source cache cluster + // for publishing notifications. + TopicArn *string `type:"string"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + // group for the source cache cluster. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Represents the subnet associated with a cache cluster. This parameter refers +// to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used +// with ElastiCache. +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone associated with the subnet. + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // The unique identifier for the subnet. + SubnetIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// A cost allocation Tag that can be added to an ElastiCache cluster or replication +// group. Tags are composed of a Key/Value pair. A tag with a null Value is +// permitted. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for the tag. + Key *string `type:"string"` + + // The tag's value. May not be null. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Represents the output from the AddTagsToResource, ListTagsOnResource, and +// RemoveTagsFromResource actions. +type TagListMessage struct { + _ struct{} `type:"structure"` + + // A list of cost allocation tags as key-value pairs. + TagList []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s TagListMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagListMessage) GoString() string { + return s.String() +} + +const ( + // @enum AZMode + AZModeSingleAz = "single-az" + // @enum AZMode + AZModeCrossAz = "cross-az" +) + +const ( + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusEnabled = "enabled" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusDisabled = "disabled" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusEnabling = "enabling" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusDisabling = "disabling" +) + +const ( + // @enum PendingAutomaticFailoverStatus + PendingAutomaticFailoverStatusEnabled = "enabled" + // @enum PendingAutomaticFailoverStatus + PendingAutomaticFailoverStatusDisabled = "disabled" +) + +const ( + // @enum SourceType + SourceTypeCacheCluster = "cache-cluster" + // @enum SourceType + SourceTypeCacheParameterGroup = "cache-parameter-group" + // @enum SourceType + SourceTypeCacheSecurityGroup = "cache-security-group" + // @enum SourceType + SourceTypeCacheSubnetGroup = "cache-subnet-group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go new file mode 100644 index 000000000..6ea5142f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go @@ -0,0 +1,186 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticacheiface provides an interface for the Amazon ElastiCache. +package elasticacheiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticache" +) + +// ElastiCacheAPI is the interface type for elasticache.ElastiCache. +type ElastiCacheAPI interface { + AddTagsToResourceRequest(*elasticache.AddTagsToResourceInput) (*request.Request, *elasticache.TagListMessage) + + AddTagsToResource(*elasticache.AddTagsToResourceInput) (*elasticache.TagListMessage, error) + + AuthorizeCacheSecurityGroupIngressRequest(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.AuthorizeCacheSecurityGroupIngressOutput) + + AuthorizeCacheSecurityGroupIngress(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error) + + CopySnapshotRequest(*elasticache.CopySnapshotInput) (*request.Request, *elasticache.CopySnapshotOutput) + + CopySnapshot(*elasticache.CopySnapshotInput) (*elasticache.CopySnapshotOutput, error) + + CreateCacheClusterRequest(*elasticache.CreateCacheClusterInput) (*request.Request, *elasticache.CreateCacheClusterOutput) + + CreateCacheCluster(*elasticache.CreateCacheClusterInput) (*elasticache.CreateCacheClusterOutput, error) + + CreateCacheParameterGroupRequest(*elasticache.CreateCacheParameterGroupInput) (*request.Request, *elasticache.CreateCacheParameterGroupOutput) + + CreateCacheParameterGroup(*elasticache.CreateCacheParameterGroupInput) (*elasticache.CreateCacheParameterGroupOutput, error) + + CreateCacheSecurityGroupRequest(*elasticache.CreateCacheSecurityGroupInput) (*request.Request, *elasticache.CreateCacheSecurityGroupOutput) + + CreateCacheSecurityGroup(*elasticache.CreateCacheSecurityGroupInput) (*elasticache.CreateCacheSecurityGroupOutput, error) + + CreateCacheSubnetGroupRequest(*elasticache.CreateCacheSubnetGroupInput) (*request.Request, *elasticache.CreateCacheSubnetGroupOutput) + + CreateCacheSubnetGroup(*elasticache.CreateCacheSubnetGroupInput) (*elasticache.CreateCacheSubnetGroupOutput, error) + + CreateReplicationGroupRequest(*elasticache.CreateReplicationGroupInput) (*request.Request, *elasticache.CreateReplicationGroupOutput) + + CreateReplicationGroup(*elasticache.CreateReplicationGroupInput) (*elasticache.CreateReplicationGroupOutput, error) + + CreateSnapshotRequest(*elasticache.CreateSnapshotInput) (*request.Request, *elasticache.CreateSnapshotOutput) + + CreateSnapshot(*elasticache.CreateSnapshotInput) (*elasticache.CreateSnapshotOutput, error) + + DeleteCacheClusterRequest(*elasticache.DeleteCacheClusterInput) (*request.Request, *elasticache.DeleteCacheClusterOutput) + + DeleteCacheCluster(*elasticache.DeleteCacheClusterInput) (*elasticache.DeleteCacheClusterOutput, error) + + DeleteCacheParameterGroupRequest(*elasticache.DeleteCacheParameterGroupInput) (*request.Request, *elasticache.DeleteCacheParameterGroupOutput) + + DeleteCacheParameterGroup(*elasticache.DeleteCacheParameterGroupInput) (*elasticache.DeleteCacheParameterGroupOutput, error) + + DeleteCacheSecurityGroupRequest(*elasticache.DeleteCacheSecurityGroupInput) (*request.Request, *elasticache.DeleteCacheSecurityGroupOutput) + + DeleteCacheSecurityGroup(*elasticache.DeleteCacheSecurityGroupInput) (*elasticache.DeleteCacheSecurityGroupOutput, error) + + DeleteCacheSubnetGroupRequest(*elasticache.DeleteCacheSubnetGroupInput) (*request.Request, *elasticache.DeleteCacheSubnetGroupOutput) + + DeleteCacheSubnetGroup(*elasticache.DeleteCacheSubnetGroupInput) (*elasticache.DeleteCacheSubnetGroupOutput, error) + + DeleteReplicationGroupRequest(*elasticache.DeleteReplicationGroupInput) (*request.Request, *elasticache.DeleteReplicationGroupOutput) + + DeleteReplicationGroup(*elasticache.DeleteReplicationGroupInput) (*elasticache.DeleteReplicationGroupOutput, error) + + DeleteSnapshotRequest(*elasticache.DeleteSnapshotInput) (*request.Request, *elasticache.DeleteSnapshotOutput) + + DeleteSnapshot(*elasticache.DeleteSnapshotInput) (*elasticache.DeleteSnapshotOutput, error) + + DescribeCacheClustersRequest(*elasticache.DescribeCacheClustersInput) (*request.Request, *elasticache.DescribeCacheClustersOutput) + + DescribeCacheClusters(*elasticache.DescribeCacheClustersInput) (*elasticache.DescribeCacheClustersOutput, error) + + DescribeCacheClustersPages(*elasticache.DescribeCacheClustersInput, func(*elasticache.DescribeCacheClustersOutput, bool) bool) error + + DescribeCacheEngineVersionsRequest(*elasticache.DescribeCacheEngineVersionsInput) (*request.Request, *elasticache.DescribeCacheEngineVersionsOutput) + + DescribeCacheEngineVersions(*elasticache.DescribeCacheEngineVersionsInput) (*elasticache.DescribeCacheEngineVersionsOutput, error) + + DescribeCacheEngineVersionsPages(*elasticache.DescribeCacheEngineVersionsInput, func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool) error + + DescribeCacheParameterGroupsRequest(*elasticache.DescribeCacheParameterGroupsInput) (*request.Request, *elasticache.DescribeCacheParameterGroupsOutput) + + DescribeCacheParameterGroups(*elasticache.DescribeCacheParameterGroupsInput) (*elasticache.DescribeCacheParameterGroupsOutput, error) + + DescribeCacheParameterGroupsPages(*elasticache.DescribeCacheParameterGroupsInput, func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool) error + + DescribeCacheParametersRequest(*elasticache.DescribeCacheParametersInput) (*request.Request, *elasticache.DescribeCacheParametersOutput) + + DescribeCacheParameters(*elasticache.DescribeCacheParametersInput) (*elasticache.DescribeCacheParametersOutput, error) + + DescribeCacheParametersPages(*elasticache.DescribeCacheParametersInput, func(*elasticache.DescribeCacheParametersOutput, bool) bool) error + + DescribeCacheSecurityGroupsRequest(*elasticache.DescribeCacheSecurityGroupsInput) (*request.Request, *elasticache.DescribeCacheSecurityGroupsOutput) + + DescribeCacheSecurityGroups(*elasticache.DescribeCacheSecurityGroupsInput) (*elasticache.DescribeCacheSecurityGroupsOutput, error) + + DescribeCacheSecurityGroupsPages(*elasticache.DescribeCacheSecurityGroupsInput, func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool) error + + DescribeCacheSubnetGroupsRequest(*elasticache.DescribeCacheSubnetGroupsInput) (*request.Request, *elasticache.DescribeCacheSubnetGroupsOutput) + + DescribeCacheSubnetGroups(*elasticache.DescribeCacheSubnetGroupsInput) (*elasticache.DescribeCacheSubnetGroupsOutput, error) + + DescribeCacheSubnetGroupsPages(*elasticache.DescribeCacheSubnetGroupsInput, func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool) error + + DescribeEngineDefaultParametersRequest(*elasticache.DescribeEngineDefaultParametersInput) (*request.Request, *elasticache.DescribeEngineDefaultParametersOutput) + + DescribeEngineDefaultParameters(*elasticache.DescribeEngineDefaultParametersInput) (*elasticache.DescribeEngineDefaultParametersOutput, error) + + DescribeEngineDefaultParametersPages(*elasticache.DescribeEngineDefaultParametersInput, func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool) error + + DescribeEventsRequest(*elasticache.DescribeEventsInput) (*request.Request, *elasticache.DescribeEventsOutput) + + DescribeEvents(*elasticache.DescribeEventsInput) (*elasticache.DescribeEventsOutput, error) + + DescribeEventsPages(*elasticache.DescribeEventsInput, func(*elasticache.DescribeEventsOutput, bool) bool) error + + DescribeReplicationGroupsRequest(*elasticache.DescribeReplicationGroupsInput) (*request.Request, *elasticache.DescribeReplicationGroupsOutput) + + DescribeReplicationGroups(*elasticache.DescribeReplicationGroupsInput) (*elasticache.DescribeReplicationGroupsOutput, error) + + DescribeReplicationGroupsPages(*elasticache.DescribeReplicationGroupsInput, func(*elasticache.DescribeReplicationGroupsOutput, bool) bool) error + + DescribeReservedCacheNodesRequest(*elasticache.DescribeReservedCacheNodesInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOutput) + + DescribeReservedCacheNodes(*elasticache.DescribeReservedCacheNodesInput) (*elasticache.DescribeReservedCacheNodesOutput, error) + + DescribeReservedCacheNodesPages(*elasticache.DescribeReservedCacheNodesInput, func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool) error + + DescribeReservedCacheNodesOfferingsRequest(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOfferingsOutput) + + DescribeReservedCacheNodesOfferings(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error) + + DescribeReservedCacheNodesOfferingsPages(*elasticache.DescribeReservedCacheNodesOfferingsInput, func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool) error + + DescribeSnapshotsRequest(*elasticache.DescribeSnapshotsInput) (*request.Request, *elasticache.DescribeSnapshotsOutput) + + DescribeSnapshots(*elasticache.DescribeSnapshotsInput) (*elasticache.DescribeSnapshotsOutput, error) + + DescribeSnapshotsPages(*elasticache.DescribeSnapshotsInput, func(*elasticache.DescribeSnapshotsOutput, bool) bool) error + + ListTagsForResourceRequest(*elasticache.ListTagsForResourceInput) (*request.Request, *elasticache.TagListMessage) + + ListTagsForResource(*elasticache.ListTagsForResourceInput) (*elasticache.TagListMessage, error) + + ModifyCacheClusterRequest(*elasticache.ModifyCacheClusterInput) (*request.Request, *elasticache.ModifyCacheClusterOutput) + + ModifyCacheCluster(*elasticache.ModifyCacheClusterInput) (*elasticache.ModifyCacheClusterOutput, error) + + ModifyCacheParameterGroupRequest(*elasticache.ModifyCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) + + ModifyCacheParameterGroup(*elasticache.ModifyCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) + + ModifyCacheSubnetGroupRequest(*elasticache.ModifyCacheSubnetGroupInput) (*request.Request, *elasticache.ModifyCacheSubnetGroupOutput) + + ModifyCacheSubnetGroup(*elasticache.ModifyCacheSubnetGroupInput) (*elasticache.ModifyCacheSubnetGroupOutput, error) + + ModifyReplicationGroupRequest(*elasticache.ModifyReplicationGroupInput) (*request.Request, *elasticache.ModifyReplicationGroupOutput) + + ModifyReplicationGroup(*elasticache.ModifyReplicationGroupInput) (*elasticache.ModifyReplicationGroupOutput, error) + + PurchaseReservedCacheNodesOfferingRequest(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*request.Request, *elasticache.PurchaseReservedCacheNodesOfferingOutput) + + PurchaseReservedCacheNodesOffering(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error) + + RebootCacheClusterRequest(*elasticache.RebootCacheClusterInput) (*request.Request, *elasticache.RebootCacheClusterOutput) + + RebootCacheCluster(*elasticache.RebootCacheClusterInput) (*elasticache.RebootCacheClusterOutput, error) + + RemoveTagsFromResourceRequest(*elasticache.RemoveTagsFromResourceInput) (*request.Request, *elasticache.TagListMessage) + + RemoveTagsFromResource(*elasticache.RemoveTagsFromResourceInput) (*elasticache.TagListMessage, error) + + ResetCacheParameterGroupRequest(*elasticache.ResetCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) + + ResetCacheParameterGroup(*elasticache.ResetCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) + + RevokeCacheSecurityGroupIngressRequest(*elasticache.RevokeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.RevokeCacheSecurityGroupIngressOutput) + + RevokeCacheSecurityGroupIngress(*elasticache.RevokeCacheSecurityGroupIngressInput) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error) +} + +var _ ElastiCacheAPI = (*elasticache.ElastiCache)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go new file mode 100644 index 000000000..886c75597 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go @@ -0,0 +1,943 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticache" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElastiCache_AddTagsToResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.AddTagsToResourceInput{ + ResourceName: aws.String("String"), // Required + Tags: []*elasticache.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_AuthorizeCacheSecurityGroupIngress() { + svc := elasticache.New(session.New()) + + params := &elasticache.AuthorizeCacheSecurityGroupIngressInput{ + CacheSecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupOwnerId: aws.String("String"), // Required + } + resp, err := svc.AuthorizeCacheSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CopySnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.CopySnapshotInput{ + SourceSnapshotName: aws.String("String"), // Required + TargetSnapshotName: aws.String("String"), // Required + } + resp, err := svc.CopySnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + AZMode: aws.String("AZMode"), + AutoMinorVersionUpgrade: aws.Bool(true), + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NumCacheNodes: aws.Int64(1), + Port: aws.Int64(1), + PreferredAvailabilityZone: aws.String("String"), + PreferredAvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + PreferredMaintenanceWindow: aws.String("String"), + ReplicationGroupId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotArns: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotName: aws.String("String"), + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + Tags: []*elasticache.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheParameterGroupInput{ + CacheParameterGroupFamily: aws.String("String"), // Required + CacheParameterGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + } + resp, err := svc.CreateCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheSecurityGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + } + resp, err := svc.CreateCacheSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheSubnetGroupInput{ + CacheSubnetGroupDescription: aws.String("String"), // Required + CacheSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateReplicationGroupInput{ + ReplicationGroupDescription: aws.String("String"), // Required + ReplicationGroupId: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NumCacheClusters: aws.Int64(1), + Port: aws.Int64(1), + PreferredCacheClusterAZs: []*string{ + aws.String("String"), // Required + // More values... + }, + PreferredMaintenanceWindow: aws.String("String"), + PrimaryClusterId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotArns: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotName: aws.String("String"), + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + Tags: []*elasticache.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateSnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateSnapshotInput{ + CacheClusterId: aws.String("String"), // Required + SnapshotName: aws.String("String"), // Required + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + FinalSnapshotIdentifier: aws.String("String"), + } + resp, err := svc.DeleteCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheSecurityGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteReplicationGroupInput{ + ReplicationGroupId: aws.String("String"), // Required + FinalSnapshotIdentifier: aws.String("String"), + RetainPrimaryCluster: aws.Bool(true), + } + resp, err := svc.DeleteReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteSnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteSnapshotInput{ + SnapshotName: aws.String("String"), // Required + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheClusters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ShowCacheNodeInfo: aws.Bool(true), + } + resp, err := svc.DescribeCacheClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheEngineVersions() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheEngineVersionsInput{ + CacheParameterGroupFamily: aws.String("String"), + DefaultOnly: aws.Bool(true), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheEngineVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheParameterGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheParameterGroupsInput{ + CacheParameterGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheParameters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheParametersInput{ + CacheParameterGroupName: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeCacheParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheSecurityGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheSecurityGroupsInput{ + CacheSecurityGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheSubnetGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheSubnetGroupsInput{ + CacheSubnetGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeEngineDefaultParameters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeEngineDefaultParametersInput{ + CacheParameterGroupFamily: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeEvents() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReplicationGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReplicationGroupsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReplicationGroupId: aws.String("String"), + } + resp, err := svc.DescribeReplicationGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReservedCacheNodes() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReservedCacheNodesInput{ + CacheNodeType: aws.String("String"), + Duration: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedCacheNodeId: aws.String("String"), + ReservedCacheNodesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedCacheNodes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReservedCacheNodesOfferings() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReservedCacheNodesOfferingsInput{ + CacheNodeType: aws.String("String"), + Duration: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedCacheNodesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedCacheNodesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeSnapshots() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeSnapshotsInput{ + CacheClusterId: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotName: aws.String("String"), + SnapshotSource: aws.String("String"), + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ListTagsForResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.ListTagsForResourceInput{ + ResourceName: aws.String("String"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + AZMode: aws.String("AZMode"), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + CacheNodeIdsToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + NewAvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + NotificationTopicArn: aws.String("String"), + NotificationTopicStatus: aws.String("String"), + NumCacheNodes: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + } + resp, err := svc.ModifyCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + ParameterNameValues: []*elasticache.ParameterNameValue{ // Required + { // Required + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String("String"), // Required + CacheSubnetGroupDescription: aws.String("String"), + SubnetIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NotificationTopicStatus: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PrimaryClusterId: aws.String("String"), + ReplicationGroupDescription: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + SnapshottingClusterId: aws.String("String"), + } + resp, err := svc.ModifyReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_PurchaseReservedCacheNodesOffering() { + svc := elasticache.New(session.New()) + + params := &elasticache.PurchaseReservedCacheNodesOfferingInput{ + ReservedCacheNodesOfferingId: aws.String("String"), // Required + CacheNodeCount: aws.Int64(1), + ReservedCacheNodeId: aws.String("String"), + } + resp, err := svc.PurchaseReservedCacheNodesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RebootCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.RebootCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + CacheNodeIdsToReboot: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RebootCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RemoveTagsFromResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.RemoveTagsFromResourceInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ResetCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ResetCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + ParameterNameValues: []*elasticache.ParameterNameValue{ // Required + { // Required + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RevokeCacheSecurityGroupIngress() { + svc := elasticache.New(session.New()) + + params := &elasticache.RevokeCacheSecurityGroupIngressInput{ + CacheSecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupOwnerId: aws.String("String"), // Required + } + resp, err := svc.RevokeCacheSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go new file mode 100644 index 000000000..820bd48ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go @@ -0,0 +1,96 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon ElastiCache is a web service that makes it easier to set up, operate, +// and scale a distributed cache in the cloud. +// +// With ElastiCache, customers gain all of the benefits of a high-performance, +// in-memory cache with far less of the administrative burden of launching and +// managing a distributed cache. The service makes setup, scaling, and cluster +// failure handling much simpler than in a self-managed cache deployment. +// +// In addition, through integration with Amazon CloudWatch, customers get enhanced +// visibility into the key performance statistics associated with their cache +// and can receive alarms if a part of their cache runs hot. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElastiCache struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticache" + +// New creates a new instance of the ElastiCache client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElastiCache client from just a session. +// svc := elasticache.New(mySession) +// +// // Create a ElastiCache client with additional configuration +// svc := elasticache.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElastiCache { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElastiCache { + svc := &ElastiCache{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-02-02", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElastiCache operation and runs any +// custom request initialization. +func (c *ElastiCache) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go new file mode 100644 index 000000000..0f594a65f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go @@ -0,0 +1,183 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ElastiCache) WaitUntilCacheClusterAvailable(input *DescribeCacheClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCacheClusters", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "incompatible-network", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "restore-failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilCacheClusterDeleted(input *DescribeCacheClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCacheClusters", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "CacheClusterNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "incompatible-network", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "modifying", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "restore-failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "snapshotting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilReplicationGroupAvailable(input *DescribeReplicationGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeReplicationGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ReplicationGroups[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ReplicationGroups[].Status", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilReplicationGroupDeleted(input *DescribeReplicationGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeReplicationGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ReplicationGroups[].Status", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ReplicationGroups[].Status", + Expected: "available", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ReplicationGroupNotFoundFault", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go new file mode 100644 index 000000000..7faca62dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go @@ -0,0 +1,1143 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticsearchservice provides a client for Amazon Elasticsearch Service. +package elasticsearchservice + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *ElasticsearchService) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/tags", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive +// key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging +// Amazon Elasticsearch Service Domains for more information. (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-awsresorcetagging" +// target="_blank) +func (c *ElasticsearchService) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateElasticsearchDomain = "CreateElasticsearchDomain" + +// CreateElasticsearchDomainRequest generates a request for the CreateElasticsearchDomain operation. +func (c *ElasticsearchService) CreateElasticsearchDomainRequest(input *CreateElasticsearchDomainInput) (req *request.Request, output *CreateElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opCreateElasticsearchDomain, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain", + } + + if input == nil { + input = &CreateElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateElasticsearchDomainOutput{} + req.Data = output + return +} + +// Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch +// Domains (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains" +// target="_blank) in the Amazon Elasticsearch Service Developer Guide. +func (c *ElasticsearchService) CreateElasticsearchDomain(input *CreateElasticsearchDomainInput) (*CreateElasticsearchDomainOutput, error) { + req, out := c.CreateElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteElasticsearchDomain = "DeleteElasticsearchDomain" + +// DeleteElasticsearchDomainRequest generates a request for the DeleteElasticsearchDomain operation. +func (c *ElasticsearchService) DeleteElasticsearchDomainRequest(input *DeleteElasticsearchDomainInput) (req *request.Request, output *DeleteElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opDeleteElasticsearchDomain, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/es/domain/{DomainName}", + } + + if input == nil { + input = &DeleteElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteElasticsearchDomainOutput{} + req.Data = output + return +} + +// Permanently deletes the specified Elasticsearch domain and all of its data. +// Once a domain is deleted, it cannot be recovered. +func (c *ElasticsearchService) DeleteElasticsearchDomain(input *DeleteElasticsearchDomainInput) (*DeleteElasticsearchDomainOutput, error) { + req, out := c.DeleteElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomain = "DescribeElasticsearchDomain" + +// DescribeElasticsearchDomainRequest generates a request for the DescribeElasticsearchDomain operation. +func (c *ElasticsearchService) DescribeElasticsearchDomainRequest(input *DescribeElasticsearchDomainInput) (req *request.Request, output *DescribeElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomain, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/domain/{DomainName}", + } + + if input == nil { + input = &DescribeElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainOutput{} + req.Data = output + return +} + +// Returns domain configuration information about the specified Elasticsearch +// domain, including the domain ID, domain endpoint, and domain ARN. +func (c *ElasticsearchService) DescribeElasticsearchDomain(input *DescribeElasticsearchDomainInput) (*DescribeElasticsearchDomainOutput, error) { + req, out := c.DescribeElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomainConfig = "DescribeElasticsearchDomainConfig" + +// DescribeElasticsearchDomainConfigRequest generates a request for the DescribeElasticsearchDomainConfig operation. +func (c *ElasticsearchService) DescribeElasticsearchDomainConfigRequest(input *DescribeElasticsearchDomainConfigInput) (req *request.Request, output *DescribeElasticsearchDomainConfigOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomainConfig, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + } + + if input == nil { + input = &DescribeElasticsearchDomainConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainConfigOutput{} + req.Data = output + return +} + +// Provides cluster configuration information about the specified Elasticsearch +// domain, such as the state, creation date, update version, and update date +// for cluster options. +func (c *ElasticsearchService) DescribeElasticsearchDomainConfig(input *DescribeElasticsearchDomainConfigInput) (*DescribeElasticsearchDomainConfigOutput, error) { + req, out := c.DescribeElasticsearchDomainConfigRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomains = "DescribeElasticsearchDomains" + +// DescribeElasticsearchDomainsRequest generates a request for the DescribeElasticsearchDomains operation. +func (c *ElasticsearchService) DescribeElasticsearchDomainsRequest(input *DescribeElasticsearchDomainsInput) (req *request.Request, output *DescribeElasticsearchDomainsOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomains, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain-info", + } + + if input == nil { + input = &DescribeElasticsearchDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainsOutput{} + req.Data = output + return +} + +// Returns domain configuration information about the specified Elasticsearch +// domains, including the domain ID, domain endpoint, and domain ARN. +func (c *ElasticsearchService) DescribeElasticsearchDomains(input *DescribeElasticsearchDomainsInput) (*DescribeElasticsearchDomainsOutput, error) { + req, out := c.DescribeElasticsearchDomainsRequest(input) + err := req.Send() + return out, err +} + +const opListDomainNames = "ListDomainNames" + +// ListDomainNamesRequest generates a request for the ListDomainNames operation. +func (c *ElasticsearchService) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { + op := &request.Operation{ + Name: opListDomainNames, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/domain", + } + + if input == nil { + input = &ListDomainNamesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainNamesOutput{} + req.Data = output + return +} + +// Returns the name of all Elasticsearch domains owned by the current user's +// account. +func (c *ElasticsearchService) ListDomainNames(input *ListDomainNamesInput) (*ListDomainNamesOutput, error) { + req, out := c.ListDomainNamesRequest(input) + err := req.Send() + return out, err +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a request for the ListTags operation. +func (c *ElasticsearchService) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/tags/", + } + + if input == nil { + input = &ListTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsOutput{} + req.Data = output + return +} + +// Returns all tags for the given Elasticsearch domain. +func (c *ElasticsearchService) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *ElasticsearchService) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/tags-removal", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes the specified set of tags from the specified Elasticsearch domain. +func (c *ElasticsearchService) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateElasticsearchDomainConfig = "UpdateElasticsearchDomainConfig" + +// UpdateElasticsearchDomainConfigRequest generates a request for the UpdateElasticsearchDomainConfig operation. +func (c *ElasticsearchService) UpdateElasticsearchDomainConfigRequest(input *UpdateElasticsearchDomainConfigInput) (req *request.Request, output *UpdateElasticsearchDomainConfigOutput) { + op := &request.Operation{ + Name: opUpdateElasticsearchDomainConfig, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + } + + if input == nil { + input = &UpdateElasticsearchDomainConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateElasticsearchDomainConfigOutput{} + req.Data = output + return +} + +// Modifies the cluster configuration of the specified Elasticsearch domain, +// setting as setting the instance type and the number of instances. +func (c *ElasticsearchService) UpdateElasticsearchDomainConfig(input *UpdateElasticsearchDomainConfigInput) (*UpdateElasticsearchDomainConfigOutput, error) { + req, out := c.UpdateElasticsearchDomainConfigRequest(input) + err := req.Send() + return out, err +} + +// The configured access rules for the domain's document and search endpoints, +// and the current status of those rules. +type AccessPoliciesStatus struct { + _ struct{} `type:"structure"` + + // The access policy configured for the Elasticsearch domain. Access policies + // may be resource-based, IP-based, or IAM-based. See Configuring Access Policies + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies" + // target="_blank)for more information. + Options *string `type:"string" required:"true"` + + // The status of the access policy for the Elasticsearch domain. See OptionStatus + // for the status information that's included. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AccessPoliciesStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessPoliciesStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the AddTags operation. Specify the tags that +// you want to attach to the Elasticsearch domain. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // Specify the ARN for which you want to add the tags. + ARN *string `type:"string" required:"true"` + + // List of Tag that need to be added for the Elasticsearch domain. + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Status of the advanced options for the specified Elasticsearch domain. Currently, +// the following advanced options are available: +// +// Option to allow references to indices in an HTTP request body. Must be +// false when configuring access to individual sub-resources. By default, the +// value is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" +// target="_blank) for more information. Option to specify the percentage of +// heap space that is allocated to field data. By default, this setting is unbounded. +// For more information, see Configuring Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options). +type AdvancedOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the status of advanced options for the specified Elasticsearch + // domain. + Options map[string]*string `type:"map" required:"true"` + + // Specifies the status of OptionStatus for advanced options for the specified + // Elasticsearch domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AdvancedOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedOptionsStatus) GoString() string { + return s.String() +} + +type CreateElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Option to allow references to indices in an HTTP request body. Must be false + // when configuring access to individual sub-resources. By default, the value + // is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions map[string]*string `type:"map"` + + // The name of the Elasticsearch domain that you are creating. Domain names + // are unique across the domains owned by an account within an AWS region. Domain + // names must start with a letter or number and can contain the following characters: + // a-z (lowercase), 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // Options to enable, disable and specify the type and size of EBS storage volumes. + EBSOptions *EBSOptions `type:"structure"` + + // Configuration options for an Elasticsearch domain. Specifies the instance + // type and number of instances in the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + + // Option to set time, in UTC format, of the daily automated snapshot. Default + // value is 0 hours. + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s CreateElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateElasticsearchDomainInput) GoString() string { + return s.String() +} + +// The result of a CreateElasticsearchDomain operation. Contains the status +// of the newly created Elasticsearch domain. +type CreateElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the newly created Elasticsearch domain. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` +} + +// String returns the string representation +func (s CreateElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteElasticsearchDomain operation. +// Specifies the name of the Elasticsearch domain that you want to delete. +type DeleteElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the Elasticsearch domain that you want to permanently delete. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchDomainInput) GoString() string { + return s.String() +} + +// The result of a DeleteElasticsearchDomain request. Contains the status of +// the pending deletion, or no status if the domain and all of its resources +// have been deleted. +type DeleteElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the Elasticsearch domain being deleted. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` +} + +// String returns the string representation +func (s DeleteElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomainConfig operation. +// Specifies the domain name for which you want configuration information. +type DescribeElasticsearchDomainConfigInput struct { + _ struct{} `type:"structure"` + + // The Elasticsearch domain that you want to get information about. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainConfigInput) GoString() string { + return s.String() +} + +// The result of a DescribeElasticsearchDomainConfig request. Contains the configuration +// information of the requested domain. +type DescribeElasticsearchDomainConfigOutput struct { + _ struct{} `type:"structure"` + + // The configuration information of the domain requested in the DescribeElasticsearchDomainConfig + // request. + DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainConfigOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomain operation. +type DescribeElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the Elasticsearch domain for which you want information. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainInput) GoString() string { + return s.String() +} + +// The result of a DescribeElasticsearchDomain request. Contains the status +// of the domain specified in the request. +type DescribeElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The current status of the Elasticsearch domain. + DomainStatus *ElasticsearchDomainStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomains operation. +// By default, the API returns the status of all Elasticsearch domains. +type DescribeElasticsearchDomainsInput struct { + _ struct{} `type:"structure"` + + // The Elasticsearch domains for which you want information. + DomainNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainsInput) GoString() string { + return s.String() +} + +// The result of a DescribeElasticsearchDomains request. Contains the status +// of the specified domains or all domains owned by the account. +type DescribeElasticsearchDomainsOutput struct { + _ struct{} `type:"structure"` + + // The status of the domains requested in the DescribeElasticsearchDomains request. + DomainStatusList []*ElasticsearchDomainStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainsOutput) GoString() string { + return s.String() +} + +type DomainInfo struct { + _ struct{} `type:"structure"` + + // Specifies the DomainName. + DomainName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s DomainInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainInfo) GoString() string { + return s.String() +} + +// Options to enable, disable, and specify the properties of EBS storage volumes. +// For more information, see Configuring EBS-based Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" +// target="_blank). +type EBSOptions struct { + _ struct{} `type:"structure"` + + // Specifies whether EBS-based storage is enabled. + EBSEnabled *bool `type:"boolean"` + + // Specifies the IOPD for a Provisioned IOPS EBS volume (SSD). + Iops *int64 `type:"integer"` + + // Integer to specify the size of an EBS volume. + VolumeSize *int64 `type:"integer"` + + // Specifies the volume type for EBS-based storage. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EBSOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EBSOptions) GoString() string { + return s.String() +} + +// Status of the EBS options for the specified Elasticsearch domain. +type EBSOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the EBS options for the specified Elasticsearch domain. + Options *EBSOptions `type:"structure" required:"true"` + + // Specifies the status of the EBS options for the specified Elasticsearch domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s EBSOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EBSOptionsStatus) GoString() string { + return s.String() +} + +// Specifies the configuration for the domain cluster, such as the type and +// number of instances. +type ElasticsearchClusterConfig struct { + _ struct{} `type:"structure"` + + // Total number of dedicated master nodes, active and on standby, for the cluster. + DedicatedMasterCount *int64 `type:"integer"` + + // A boolean value to indicate whether a dedicated master node is enabled. See + // About Dedicated Master Nodes (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes" + // target="_blank) for more information. + DedicatedMasterEnabled *bool `type:"boolean"` + + // The instance type for a dedicated master node. + DedicatedMasterType *string `type:"string" enum:"ESPartitionInstanceType"` + + // The number of instances in the specified domain cluster. + InstanceCount *int64 `type:"integer"` + + // The instance type for an Elasticsearch cluster. + InstanceType *string `type:"string" enum:"ESPartitionInstanceType"` + + // A boolean value to indicate whether zone awareness is enabled. See About + // Zone Awareness (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness" + // target="_blank) for more information. + ZoneAwarenessEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ElasticsearchClusterConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchClusterConfig) GoString() string { + return s.String() +} + +// Specifies the configuration status for the specified Elasticsearch domain. +type ElasticsearchClusterConfigStatus struct { + _ struct{} `type:"structure"` + + // Specifies the cluster configuration for the specified Elasticsearch domain. + Options *ElasticsearchClusterConfig `type:"structure" required:"true"` + + // Specifies the status of the configuration for the specified Elasticsearch + // domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ElasticsearchClusterConfigStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchClusterConfigStatus) GoString() string { + return s.String() +} + +// The configuration of an Elasticsearch domain. +type ElasticsearchDomainConfig struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *AccessPoliciesStatus `type:"structure"` + + // Specifies the AdvancedOptions for the domain. See Configuring Advanced Options + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions *AdvancedOptionsStatus `type:"structure"` + + // Specifies the EBSOptions for the Elasticsearch domain. + EBSOptions *EBSOptionsStatus `type:"structure"` + + // Specifies the ElasticsearchClusterConfig for the Elasticsearch domain. + ElasticsearchClusterConfig *ElasticsearchClusterConfigStatus `type:"structure"` + + // Specifies the SnapshotOptions for the Elasticsearch domain. + SnapshotOptions *SnapshotOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s ElasticsearchDomainConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDomainConfig) GoString() string { + return s.String() +} + +// The current status of an Elasticsearch domain. +type ElasticsearchDomainStatus struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of an Elasticsearch domain. See Identifiers + // for IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html" + // target="_blank) in Using AWS Identity and Access Management for more information. + ARN *string `type:"string" required:"true"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Specifies the status of the AdvancedOptions + AdvancedOptions map[string]*string `type:"map"` + + // The domain creation status. True if the creation of an Elasticsearch domain + // is complete. False if domain creation is still in progress. + Created *bool `type:"boolean"` + + // The domain deletion status. True if a delete request has been received for + // the domain but resource cleanup is still in progress. False if the domain + // has not been deleted. Once domain deletion is complete, the status of the + // domain is no longer returned. + Deleted *bool `type:"boolean"` + + // The unique identifier for the specified Elasticsearch domain. + DomainId *string `min:"1" type:"string" required:"true"` + + // The name of an Elasticsearch domain. Domain names are unique across the domains + // owned by an account within an AWS region. Domain names start with a letter + // or number and can contain the following characters: a-z (lowercase), 0-9, + // and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The EBSOptions for the specified domain. See Configuring EBS-based Storage + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" + // target="_blank) for more information. + EBSOptions *EBSOptions `type:"structure"` + + // The type and number of instances in the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure" required:"true"` + + // The Elasticsearch domain endpoint that you use to submit index and search + // requests. + Endpoint *string `type:"string"` + + // The status of the Elasticsearch domain configuration. True if Amazon Elasticsearch + // Service is processing configuration changes. False if the configuration is + // active. + Processing *bool `type:"boolean"` + + // Specifies the status of the SnapshotOptions + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s ElasticsearchDomainStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDomainStatus) GoString() string { + return s.String() +} + +type ListDomainNamesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListDomainNamesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesInput) GoString() string { + return s.String() +} + +// The result of a ListDomainNames operation. Contains the names of all Elasticsearch +// domains owned by this account. +type ListDomainNamesOutput struct { + _ struct{} `type:"structure"` + + // List of Elasticsearch domain names. + DomainNames []*DomainInfo `type:"list"` +} + +// String returns the string representation +func (s ListDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListTags operation. Specify the ARN for +// the Elasticsearch domain to which the tags are attached that you want to +// view are attached. +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // Specify the ARN for the Elasticsearch domain to which the tags are attached + // that you want to view. + ARN *string `location:"querystring" locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// The result of a ListTags operation. Contains tags for all requested Elasticsearch +// domains. +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // List of Tag for the requested Elasticsearch domain. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// Provides the current status of the entity. +type OptionStatus struct { + _ struct{} `type:"structure"` + + // Timestamp which tells the creation date for the entity. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Indicates whether the Elasticsearch domain is being deleted. + PendingDeletion *bool `type:"boolean"` + + // Provides the OptionState for the Elasticsearch domain. + State *string `type:"string" required:"true" enum:"OptionState"` + + // Timestamp which tells the last updated time for the entity. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Specifies the latest version for the entity. + UpdateVersion *int64 `type:"integer"` +} + +// String returns the string representation +func (s OptionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the RemoveTags operation. Specify the ARN +// for the Elasticsearch domain from which you want to remove the specified +// TagKey. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN for the Elasticsearch domain from which you want to delete + // the specified tags. + ARN *string `type:"string" required:"true"` + + // Specifies the TagKey list which you want to remove from the Elasticsearch + // domain. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Specifies the time, in UTC format, when the service takes a daily automated +// snapshot of the specified Elasticsearch domain. Default value is 0 hours. +type SnapshotOptions struct { + _ struct{} `type:"structure"` + + // Specifies the time, in UTC format, when the service takes a daily automated + // snapshot of the specified Elasticsearch domain. Default value is 0 hours. + AutomatedSnapshotStartHour *int64 `type:"integer"` +} + +// String returns the string representation +func (s SnapshotOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotOptions) GoString() string { + return s.String() +} + +// Status of a daily automated snapshot. +type SnapshotOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the daily snapshot options specified for the Elasticsearch domain. + Options *SnapshotOptions `type:"structure" required:"true"` + + // Specifies the status of a daily automated snapshot. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SnapshotOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotOptionsStatus) GoString() string { + return s.String() +} + +// Specifies a key value pair for a resource tag. +type Tag struct { + _ struct{} `type:"structure"` + + // Specifies the TagKey, the name of the tag. Tag keys must be unique for the + // Elasticsearch domain to which they are attached. + Key *string `min:"1" type:"string" required:"true"` + + // Specifies the TagValue, the value assigned to the corresponding tag key. + // Tag values can be null and do not have to be unique in a tag set. For example, + // you can have a key value pair in a tag set of project : Trinity and cost-center + // : Trinity + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateElasticsearchDomain operation. +// Specifies the type and number of instances in the domain cluster. +type UpdateElasticsearchDomainConfigInput struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Modifies the advanced option to allow references to indices in an HTTP request + // body. Must be false when configuring access to individual sub-resources. + // By default, the value is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions map[string]*string `type:"map"` + + // The name of the Elasticsearch domain that you are updating. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + + // Specify the type and size of the EBS volume that you want to use. + EBSOptions *EBSOptions `type:"structure"` + + // The type and number of instances to instantiate for the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + + // Option to set the time, in UTC format, for the daily automated snapshot. + // Default value is 0 hours. + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s UpdateElasticsearchDomainConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticsearchDomainConfigInput) GoString() string { + return s.String() +} + +// The result of an UpdateElasticsearchDomain request. Contains the status of +// the Elasticsearch domain being updated. +type UpdateElasticsearchDomainConfigOutput struct { + _ struct{} `type:"structure"` + + // The status of the updated Elasticsearch domain. + DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateElasticsearchDomainConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticsearchDomainConfigOutput) GoString() string { + return s.String() +} + +const ( + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3MediumElasticsearch = "m3.medium.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3LargeElasticsearch = "m3.large.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3XlargeElasticsearch = "m3.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM32xlargeElasticsearch = "m3.2xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2MicroElasticsearch = "t2.micro.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2SmallElasticsearch = "t2.small.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2MediumElasticsearch = "t2.medium.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR3LargeElasticsearch = "r3.large.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR3XlargeElasticsearch = "r3.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR32xlargeElasticsearch = "r3.2xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR34xlargeElasticsearch = "r3.4xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR38xlargeElasticsearch = "r3.8xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeI2XlargeElasticsearch = "i2.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeI22xlargeElasticsearch = "i2.2xlarge.elasticsearch" +) + +// The state of a requested change. One of the following: +// +// Processing: The request change is still in-process. Active: The request +// change is processed and deployed to the Elasticsearch domain. +const ( + // @enum OptionState + OptionStateRequiresIndexDocuments = "RequiresIndexDocuments" + // @enum OptionState + OptionStateProcessing = "Processing" + // @enum OptionState + OptionStateActive = "Active" +) + +// The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based +// Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" +// target="_blank)for more information. +const ( + // @enum VolumeType + VolumeTypeStandard = "standard" + // @enum VolumeType + VolumeTypeGp2 = "gp2" + // @enum VolumeType + VolumeTypeIo1 = "io1" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go new file mode 100644 index 000000000..974a57852 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticsearchserviceiface provides an interface for the Amazon Elasticsearch Service. +package elasticsearchserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" +) + +// ElasticsearchServiceAPI is the interface type for elasticsearchservice.ElasticsearchService. +type ElasticsearchServiceAPI interface { + AddTagsRequest(*elasticsearchservice.AddTagsInput) (*request.Request, *elasticsearchservice.AddTagsOutput) + + AddTags(*elasticsearchservice.AddTagsInput) (*elasticsearchservice.AddTagsOutput, error) + + CreateElasticsearchDomainRequest(*elasticsearchservice.CreateElasticsearchDomainInput) (*request.Request, *elasticsearchservice.CreateElasticsearchDomainOutput) + + CreateElasticsearchDomain(*elasticsearchservice.CreateElasticsearchDomainInput) (*elasticsearchservice.CreateElasticsearchDomainOutput, error) + + DeleteElasticsearchDomainRequest(*elasticsearchservice.DeleteElasticsearchDomainInput) (*request.Request, *elasticsearchservice.DeleteElasticsearchDomainOutput) + + DeleteElasticsearchDomain(*elasticsearchservice.DeleteElasticsearchDomainInput) (*elasticsearchservice.DeleteElasticsearchDomainOutput, error) + + DescribeElasticsearchDomainRequest(*elasticsearchservice.DescribeElasticsearchDomainInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainOutput) + + DescribeElasticsearchDomain(*elasticsearchservice.DescribeElasticsearchDomainInput) (*elasticsearchservice.DescribeElasticsearchDomainOutput, error) + + DescribeElasticsearchDomainConfigRequest(*elasticsearchservice.DescribeElasticsearchDomainConfigInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainConfigOutput) + + DescribeElasticsearchDomainConfig(*elasticsearchservice.DescribeElasticsearchDomainConfigInput) (*elasticsearchservice.DescribeElasticsearchDomainConfigOutput, error) + + DescribeElasticsearchDomainsRequest(*elasticsearchservice.DescribeElasticsearchDomainsInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainsOutput) + + DescribeElasticsearchDomains(*elasticsearchservice.DescribeElasticsearchDomainsInput) (*elasticsearchservice.DescribeElasticsearchDomainsOutput, error) + + ListDomainNamesRequest(*elasticsearchservice.ListDomainNamesInput) (*request.Request, *elasticsearchservice.ListDomainNamesOutput) + + ListDomainNames(*elasticsearchservice.ListDomainNamesInput) (*elasticsearchservice.ListDomainNamesOutput, error) + + ListTagsRequest(*elasticsearchservice.ListTagsInput) (*request.Request, *elasticsearchservice.ListTagsOutput) + + ListTags(*elasticsearchservice.ListTagsInput) (*elasticsearchservice.ListTagsOutput, error) + + RemoveTagsRequest(*elasticsearchservice.RemoveTagsInput) (*request.Request, *elasticsearchservice.RemoveTagsOutput) + + RemoveTags(*elasticsearchservice.RemoveTagsInput) (*elasticsearchservice.RemoveTagsOutput, error) + + UpdateElasticsearchDomainConfigRequest(*elasticsearchservice.UpdateElasticsearchDomainConfigInput) (*request.Request, *elasticsearchservice.UpdateElasticsearchDomainConfigOutput) + + UpdateElasticsearchDomainConfig(*elasticsearchservice.UpdateElasticsearchDomainConfigInput) (*elasticsearchservice.UpdateElasticsearchDomainConfigOutput, error) +} + +var _ ElasticsearchServiceAPI = (*elasticsearchservice.ElasticsearchService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go new file mode 100644 index 000000000..8ca0ea7fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go @@ -0,0 +1,262 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticsearchservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElasticsearchService_AddTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.AddTagsInput{ + ARN: aws.String("ARN"), // Required + TagList: []*elasticsearchservice.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_CreateElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.CreateElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + AccessPolicies: aws.String("PolicyDocument"), + AdvancedOptions: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + EBSOptions: &elasticsearchservice.EBSOptions{ + EBSEnabled: aws.Bool(true), + Iops: aws.Int64(1), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + ElasticsearchClusterConfig: &elasticsearchservice.ElasticsearchClusterConfig{ + DedicatedMasterCount: aws.Int64(1), + DedicatedMasterEnabled: aws.Bool(true), + DedicatedMasterType: aws.String("ESPartitionInstanceType"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("ESPartitionInstanceType"), + ZoneAwarenessEnabled: aws.Bool(true), + }, + SnapshotOptions: &elasticsearchservice.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(1), + }, + } + resp, err := svc.CreateElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DeleteElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DeleteElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DeleteElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomainConfig() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainConfigInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeElasticsearchDomainConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomains() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainsInput{ + DomainNames: []*string{ // Required + aws.String("DomainName"), // Required + // More values... + }, + } + resp, err := svc.DescribeElasticsearchDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_ListDomainNames() { + svc := elasticsearchservice.New(session.New()) + + var params *elasticsearchservice.ListDomainNamesInput + resp, err := svc.ListDomainNames(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_ListTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.ListTagsInput{ + ARN: aws.String("ARN"), // Required + } + resp, err := svc.ListTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_RemoveTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.RemoveTagsInput{ + ARN: aws.String("ARN"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_UpdateElasticsearchDomainConfig() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.UpdateElasticsearchDomainConfigInput{ + DomainName: aws.String("DomainName"), // Required + AccessPolicies: aws.String("PolicyDocument"), + AdvancedOptions: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + EBSOptions: &elasticsearchservice.EBSOptions{ + EBSEnabled: aws.Bool(true), + Iops: aws.Int64(1), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + ElasticsearchClusterConfig: &elasticsearchservice.ElasticsearchClusterConfig{ + DedicatedMasterCount: aws.Int64(1), + DedicatedMasterEnabled: aws.Bool(true), + DedicatedMasterType: aws.String("ESPartitionInstanceType"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("ESPartitionInstanceType"), + ZoneAwarenessEnabled: aws.Bool(true), + }, + SnapshotOptions: &elasticsearchservice.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(1), + }, + } + resp, err := svc.UpdateElasticsearchDomainConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go new file mode 100644 index 000000000..082211fce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticsearchservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Use the Amazon Elasticsearch configuration API to create, configure, and +// manage Elasticsearch domains. +// +// The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. +// For example, es.us-east-1.amazonaws.com. For a current list of supported +// regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#cloudsearch_region" +// target="_blank). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElasticsearchService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "es" + +// New creates a new instance of the ElasticsearchService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElasticsearchService client from just a session. +// svc := elasticsearchservice.New(mySession) +// +// // Create a ElasticsearchService client with additional configuration +// svc := elasticsearchservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticsearchService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElasticsearchService { + svc := &ElasticsearchService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElasticsearchService operation and runs any +// custom request initialization. +func (c *ElasticsearchService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go new file mode 100644 index 000000000..a1c7b7e60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -0,0 +1,2784 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elb provides a client for Elastic Load Balancing. +package elb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds the specified tags to the specified load balancer. Each load balancer +// can have a maximum of 10 tags. +// +// Each tag consists of a key and an optional value. If a tag with the same +// key is already associated with the load balancer, AddTags updates its value. +// +// For more information, see Tag Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/add-remove-tags.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opApplySecurityGroupsToLoadBalancer = "ApplySecurityGroupsToLoadBalancer" + +// ApplySecurityGroupsToLoadBalancerRequest generates a request for the ApplySecurityGroupsToLoadBalancer operation. +func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroupsToLoadBalancerInput) (req *request.Request, output *ApplySecurityGroupsToLoadBalancerOutput) { + op := &request.Operation{ + Name: opApplySecurityGroupsToLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplySecurityGroupsToLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplySecurityGroupsToLoadBalancerOutput{} + req.Data = output + return +} + +// Associates one or more security groups with your load balancer in a virtual +// private cloud (VPC). The specified security groups override the previously +// associated security groups. +// +// For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-groups.html#elb-vpc-security-groups) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) ApplySecurityGroupsToLoadBalancer(input *ApplySecurityGroupsToLoadBalancerInput) (*ApplySecurityGroupsToLoadBalancerOutput, error) { + req, out := c.ApplySecurityGroupsToLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opAttachLoadBalancerToSubnets = "AttachLoadBalancerToSubnets" + +// AttachLoadBalancerToSubnetsRequest generates a request for the AttachLoadBalancerToSubnets operation. +func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubnetsInput) (req *request.Request, output *AttachLoadBalancerToSubnetsOutput) { + op := &request.Operation{ + Name: opAttachLoadBalancerToSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachLoadBalancerToSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachLoadBalancerToSubnetsOutput{} + req.Data = output + return +} + +// Adds one or more subnets to the set of configured subnets for the specified +// load balancer. +// +// The load balancer evenly distributes requests across all registered subnets. +// For more information, see Add or Remove Subnets for Your Load Balancer in +// a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-manage-subnets.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) AttachLoadBalancerToSubnets(input *AttachLoadBalancerToSubnetsInput) (*AttachLoadBalancerToSubnetsOutput, error) { + req, out := c.AttachLoadBalancerToSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opConfigureHealthCheck = "ConfigureHealthCheck" + +// ConfigureHealthCheckRequest generates a request for the ConfigureHealthCheck operation. +func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req *request.Request, output *ConfigureHealthCheckOutput) { + op := &request.Operation{ + Name: opConfigureHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfigureHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfigureHealthCheckOutput{} + req.Data = output + return +} + +// Specifies the health check settings to use when evaluating the health state +// of your back-end instances. +// +// For more information, see Configure Health Checks (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) ConfigureHealthCheck(input *ConfigureHealthCheckInput) (*ConfigureHealthCheckOutput, error) { + req, out := c.ConfigureHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateAppCookieStickinessPolicy = "CreateAppCookieStickinessPolicy" + +// CreateAppCookieStickinessPolicyRequest generates a request for the CreateAppCookieStickinessPolicy operation. +func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStickinessPolicyInput) (req *request.Request, output *CreateAppCookieStickinessPolicyOutput) { + op := &request.Operation{ + Name: opCreateAppCookieStickinessPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAppCookieStickinessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAppCookieStickinessPolicyOutput{} + req.Data = output + return +} + +// Generates a stickiness policy with sticky session lifetimes that follow that +// of an application-generated cookie. This policy can be associated only with +// HTTP/HTTPS listeners. +// +// This policy is similar to the policy created by CreateLBCookieStickinessPolicy, +// except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, +// follows the lifetime of the application-generated cookie specified in the +// policy configuration. The load balancer only inserts a new stickiness cookie +// when the application response includes a new application cookie. +// +// If the application cookie is explicitly removed or expires, the session +// stops being sticky until a new application cookie is issued. +// +// For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-application) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateAppCookieStickinessPolicy(input *CreateAppCookieStickinessPolicyInput) (*CreateAppCookieStickinessPolicyOutput, error) { + req, out := c.CreateAppCookieStickinessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateLBCookieStickinessPolicy = "CreateLBCookieStickinessPolicy" + +// CreateLBCookieStickinessPolicyRequest generates a request for the CreateLBCookieStickinessPolicy operation. +func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickinessPolicyInput) (req *request.Request, output *CreateLBCookieStickinessPolicyOutput) { + op := &request.Operation{ + Name: opCreateLBCookieStickinessPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLBCookieStickinessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLBCookieStickinessPolicyOutput{} + req.Data = output + return +} + +// Generates a stickiness policy with sticky session lifetimes controlled by +// the lifetime of the browser (user-agent) or a specified expiration period. +// This policy can be associated only with HTTP/HTTPS listeners. +// +// When a load balancer implements this policy, the load balancer uses a special +// cookie to track the back-end server instance for each request. When the load +// balancer receives a request, it first checks to see if this cookie is present +// in the request. If so, the load balancer sends the request to the application +// server specified in the cookie. If not, the load balancer sends the request +// to a server that is chosen based on the existing load-balancing algorithm. +// +// A cookie is inserted into the response for binding subsequent requests from +// the same user to that server. The validity of the cookie is based on the +// cookie expiration time, which is specified in the policy configuration. +// +// For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-duration) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLBCookieStickinessPolicy(input *CreateLBCookieStickinessPolicyInput) (*CreateLBCookieStickinessPolicyOutput, error) { + req, out := c.CreateLBCookieStickinessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancer = "CreateLoadBalancer" + +// CreateLoadBalancerRequest generates a request for the CreateLoadBalancer operation. +func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *request.Request, output *CreateLoadBalancerOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerOutput{} + req.Data = output + return +} + +// Creates a load balancer. +// +// If the call completes successfully, a new load balancer is created with +// a unique Domain Name Service (DNS) name. The load balancer receives incoming +// traffic and routes it to the registered instances. For more information, +// see How Elastic Load Balancing Works (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/how-elb-works.html) +// in the Elastic Load Balancing Developer Guide. +// +// You can create up to 20 load balancers per region per account. You can request +// an increase for the number of load balancers for your account. For more information, +// see Elastic Load Balancing Limits (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-limits.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBalancerOutput, error) { + req, out := c.CreateLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancerListeners = "CreateLoadBalancerListeners" + +// CreateLoadBalancerListenersRequest generates a request for the CreateLoadBalancerListeners operation. +func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListenersInput) (req *request.Request, output *CreateLoadBalancerListenersOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancerListeners, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerListenersInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerListenersOutput{} + req.Data = output + return +} + +// Creates one or more listeners for the specified load balancer. If a listener +// with the specified port does not already exist, it is created; otherwise, +// the properties of the new listener must match the properties of the existing +// listener. +// +// For more information, see Add a Listener to Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/us-add-listener.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLoadBalancerListeners(input *CreateLoadBalancerListenersInput) (*CreateLoadBalancerListenersOutput, error) { + req, out := c.CreateLoadBalancerListenersRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancerPolicy = "CreateLoadBalancerPolicy" + +// CreateLoadBalancerPolicyRequest generates a request for the CreateLoadBalancerPolicy operation. +func (c *ELB) CreateLoadBalancerPolicyRequest(input *CreateLoadBalancerPolicyInput) (req *request.Request, output *CreateLoadBalancerPolicyOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancerPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerPolicyOutput{} + req.Data = output + return +} + +// Creates a policy with the specified attributes for the specified load balancer. +// +// Policies are settings that are saved for your load balancer and that can +// be applied to the front-end listener or the back-end application server, +// depending on the policy type. +func (c *ELB) CreateLoadBalancerPolicy(input *CreateLoadBalancerPolicyInput) (*CreateLoadBalancerPolicyOutput, error) { + req, out := c.CreateLoadBalancerPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancer = "DeleteLoadBalancer" + +// DeleteLoadBalancerRequest generates a request for the DeleteLoadBalancer operation. +func (c *ELB) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *request.Request, output *DeleteLoadBalancerOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerOutput{} + req.Data = output + return +} + +// Deletes the specified load balancer. +// +// If you are attempting to recreate a load balancer, you must reconfigure +// all settings. The DNS name associated with a deleted load balancer are no +// longer usable. The name and associated DNS record of the deleted load balancer +// no longer exist and traffic sent to any of its IP addresses is no longer +// delivered to back-end instances. +// +// If the load balancer does not exist or has already been deleted, the call +// to DeleteLoadBalancer still succeeds. +func (c *ELB) DeleteLoadBalancer(input *DeleteLoadBalancerInput) (*DeleteLoadBalancerOutput, error) { + req, out := c.DeleteLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancerListeners = "DeleteLoadBalancerListeners" + +// DeleteLoadBalancerListenersRequest generates a request for the DeleteLoadBalancerListeners operation. +func (c *ELB) DeleteLoadBalancerListenersRequest(input *DeleteLoadBalancerListenersInput) (req *request.Request, output *DeleteLoadBalancerListenersOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancerListeners, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerListenersInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerListenersOutput{} + req.Data = output + return +} + +// Deletes the specified listeners from the specified load balancer. +func (c *ELB) DeleteLoadBalancerListeners(input *DeleteLoadBalancerListenersInput) (*DeleteLoadBalancerListenersOutput, error) { + req, out := c.DeleteLoadBalancerListenersRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancerPolicy = "DeleteLoadBalancerPolicy" + +// DeleteLoadBalancerPolicyRequest generates a request for the DeleteLoadBalancerPolicy operation. +func (c *ELB) DeleteLoadBalancerPolicyRequest(input *DeleteLoadBalancerPolicyInput) (req *request.Request, output *DeleteLoadBalancerPolicyOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancerPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified policy from the specified load balancer. This policy +// must not be enabled for any listeners. +func (c *ELB) DeleteLoadBalancerPolicy(input *DeleteLoadBalancerPolicyInput) (*DeleteLoadBalancerPolicyOutput, error) { + req, out := c.DeleteLoadBalancerPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterInstancesFromLoadBalancer = "DeregisterInstancesFromLoadBalancer" + +// DeregisterInstancesFromLoadBalancerRequest generates a request for the DeregisterInstancesFromLoadBalancer operation. +func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstancesFromLoadBalancerInput) (req *request.Request, output *DeregisterInstancesFromLoadBalancerOutput) { + op := &request.Operation{ + Name: opDeregisterInstancesFromLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstancesFromLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterInstancesFromLoadBalancerOutput{} + req.Data = output + return +} + +// Deregisters the specified instances from the specified load balancer. After +// the instance is deregistered, it no longer receives traffic from the load +// balancer. +// +// You can use DescribeLoadBalancers to verify that the instance is deregistered +// from the load balancer. +// +// For more information, see Deregister and Register Amazon EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) DeregisterInstancesFromLoadBalancer(input *DeregisterInstancesFromLoadBalancerInput) (*DeregisterInstancesFromLoadBalancerOutput, error) { + req, out := c.DeregisterInstancesFromLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceHealth = "DescribeInstanceHealth" + +// DescribeInstanceHealthRequest generates a request for the DescribeInstanceHealth operation. +func (c *ELB) DescribeInstanceHealthRequest(input *DescribeInstanceHealthInput) (req *request.Request, output *DescribeInstanceHealthOutput) { + op := &request.Operation{ + Name: opDescribeInstanceHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceHealthOutput{} + req.Data = output + return +} + +// Describes the state of the specified instances with respect to the specified +// load balancer. If no instances are specified, the call describes the state +// of all instances that are currently registered with the load balancer. If +// instances are specified, their state is returned even if they are no longer +// registered with the load balancer. The state of terminated instances is not +// returned. +func (c *ELB) DescribeInstanceHealth(input *DescribeInstanceHealthInput) (*DescribeInstanceHealthOutput, error) { + req, out := c.DescribeInstanceHealthRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerAttributes = "DescribeLoadBalancerAttributes" + +// DescribeLoadBalancerAttributesRequest generates a request for the DescribeLoadBalancerAttributes operation. +func (c *ELB) DescribeLoadBalancerAttributesRequest(input *DescribeLoadBalancerAttributesInput) (req *request.Request, output *DescribeLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerAttributesOutput{} + req.Data = output + return +} + +// Describes the attributes for the specified load balancer. +func (c *ELB) DescribeLoadBalancerAttributes(input *DescribeLoadBalancerAttributesInput) (*DescribeLoadBalancerAttributesOutput, error) { + req, out := c.DescribeLoadBalancerAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerPolicies = "DescribeLoadBalancerPolicies" + +// DescribeLoadBalancerPoliciesRequest generates a request for the DescribeLoadBalancerPolicies operation. +func (c *ELB) DescribeLoadBalancerPoliciesRequest(input *DescribeLoadBalancerPoliciesInput) (req *request.Request, output *DescribeLoadBalancerPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerPoliciesOutput{} + req.Data = output + return +} + +// Describes the specified policies. +// +// If you specify a load balancer name, the action returns the descriptions +// of all policies created for the load balancer. If you specify a policy name +// associated with your load balancer, the action returns the description of +// that policy. If you don't specify a load balancer name, the action returns +// descriptions of the specified sample policies, or descriptions of all sample +// policies. The names of the sample policies have the ELBSample- prefix. +func (c *ELB) DescribeLoadBalancerPolicies(input *DescribeLoadBalancerPoliciesInput) (*DescribeLoadBalancerPoliciesOutput, error) { + req, out := c.DescribeLoadBalancerPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerPolicyTypes = "DescribeLoadBalancerPolicyTypes" + +// DescribeLoadBalancerPolicyTypesRequest generates a request for the DescribeLoadBalancerPolicyTypes operation. +func (c *ELB) DescribeLoadBalancerPolicyTypesRequest(input *DescribeLoadBalancerPolicyTypesInput) (req *request.Request, output *DescribeLoadBalancerPolicyTypesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerPolicyTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerPolicyTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerPolicyTypesOutput{} + req.Data = output + return +} + +// Describes the specified load balancer policy types. +// +// You can use these policy types with CreateLoadBalancerPolicy to create policy +// configurations for a load balancer. +func (c *ELB) DescribeLoadBalancerPolicyTypes(input *DescribeLoadBalancerPolicyTypesInput) (*DescribeLoadBalancerPolicyTypesOutput, error) { + req, out := c.DescribeLoadBalancerPolicyTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancers = "DescribeLoadBalancers" + +// DescribeLoadBalancersRequest generates a request for the DescribeLoadBalancers operation. +func (c *ELB) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancersOutput{} + req.Data = output + return +} + +// Describes the specified the load balancers. If no load balancers are specified, +// the call describes all of your load balancers. +func (c *ELB) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +func (c *ELB) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(p *DescribeLoadBalancersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLoadBalancersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLoadBalancersOutput), lastPage) + }) +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *ELB) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes the tags associated with the specified load balancers. +func (c *ELB) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opDetachLoadBalancerFromSubnets = "DetachLoadBalancerFromSubnets" + +// DetachLoadBalancerFromSubnetsRequest generates a request for the DetachLoadBalancerFromSubnets operation. +func (c *ELB) DetachLoadBalancerFromSubnetsRequest(input *DetachLoadBalancerFromSubnetsInput) (req *request.Request, output *DetachLoadBalancerFromSubnetsOutput) { + op := &request.Operation{ + Name: opDetachLoadBalancerFromSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachLoadBalancerFromSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachLoadBalancerFromSubnetsOutput{} + req.Data = output + return +} + +// Removes the specified subnets from the set of configured subnets for the +// load balancer. +// +// After a subnet is removed, all EC2 instances registered with the load balancer +// in the removed subnet go into the OutOfService state. Then, the load balancer +// balances the traffic among the remaining routable subnets. +func (c *ELB) DetachLoadBalancerFromSubnets(input *DetachLoadBalancerFromSubnetsInput) (*DetachLoadBalancerFromSubnetsOutput, error) { + req, out := c.DetachLoadBalancerFromSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opDisableAvailabilityZonesForLoadBalancer = "DisableAvailabilityZonesForLoadBalancer" + +// DisableAvailabilityZonesForLoadBalancerRequest generates a request for the DisableAvailabilityZonesForLoadBalancer operation. +func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *DisableAvailabilityZonesForLoadBalancerOutput) { + op := &request.Operation{ + Name: opDisableAvailabilityZonesForLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAvailabilityZonesForLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableAvailabilityZonesForLoadBalancerOutput{} + req.Data = output + return +} + +// Removes the specified Availability Zones from the set of Availability Zones +// for the specified load balancer. +// +// There must be at least one Availability Zone registered with a load balancer +// at all times. After an Availability Zone is removed, all instances registered +// with the load balancer that are in the removed Availability Zone go into +// the OutOfService state. Then, the load balancer attempts to equally balance +// the traffic among its remaining Availability Zones. +// +// For more information, see Disable an Availability Zone from a Load-Balanced +// Application (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_ShrinkLBApp04.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) DisableAvailabilityZonesForLoadBalancer(input *DisableAvailabilityZonesForLoadBalancerInput) (*DisableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.DisableAvailabilityZonesForLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opEnableAvailabilityZonesForLoadBalancer = "EnableAvailabilityZonesForLoadBalancer" + +// EnableAvailabilityZonesForLoadBalancerRequest generates a request for the EnableAvailabilityZonesForLoadBalancer operation. +func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *EnableAvailabilityZonesForLoadBalancerOutput) { + op := &request.Operation{ + Name: opEnableAvailabilityZonesForLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAvailabilityZonesForLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableAvailabilityZonesForLoadBalancerOutput{} + req.Data = output + return +} + +// Adds the specified Availability Zones to the set of Availability Zones for +// the specified load balancer. +// +// The load balancer evenly distributes requests across all its registered +// Availability Zones that contain instances. +// +// For more information, see Add Availability Zone (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_AddLBAvailabilityZone.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) EnableAvailabilityZonesForLoadBalancer(input *EnableAvailabilityZonesForLoadBalancerInput) (*EnableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.EnableAvailabilityZonesForLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opModifyLoadBalancerAttributes = "ModifyLoadBalancerAttributes" + +// ModifyLoadBalancerAttributesRequest generates a request for the ModifyLoadBalancerAttributes operation. +func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttributesInput) (req *request.Request, output *ModifyLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opModifyLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyLoadBalancerAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyLoadBalancerAttributesOutput{} + req.Data = output + return +} + +// Modifies the attributes of the specified load balancer. +// +// You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, +// and CrossZoneLoadBalancing by either enabling or disabling them. Or, you +// can modify the load balancer attribute ConnectionSettings by specifying an +// idle connection timeout value for your load balancer. +// +// For more information, see the following in the Elastic Load Balancing Developer +// Guide: +// +// Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#request-routing) +// Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) +// Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/access-log-collection.html) +// Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#idle-timeout) +func (c *ELB) ModifyLoadBalancerAttributes(input *ModifyLoadBalancerAttributesInput) (*ModifyLoadBalancerAttributesOutput, error) { + req, out := c.ModifyLoadBalancerAttributesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterInstancesWithLoadBalancer = "RegisterInstancesWithLoadBalancer" + +// RegisterInstancesWithLoadBalancerRequest generates a request for the RegisterInstancesWithLoadBalancer operation. +func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesWithLoadBalancerInput) (req *request.Request, output *RegisterInstancesWithLoadBalancerOutput) { + op := &request.Operation{ + Name: opRegisterInstancesWithLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstancesWithLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterInstancesWithLoadBalancerOutput{} + req.Data = output + return +} + +// Adds the specified instances to the specified load balancer. +// +// The instance must be a running instance in the same network as the load +// balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances +// and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic +// instances to that VPC and then register the linked EC2-Classic instances +// with the load balancer in the VPC. +// +// Note that RegisterInstanceWithLoadBalancer completes when the request has +// been registered. Instance registration takes a little time to complete. To +// check the state of the registered instances, use DescribeLoadBalancers or +// DescribeInstanceHealth. +// +// After the instance is registered, it starts receiving traffic and requests +// from the load balancer. Any instance that is not in one of the Availability +// Zones registered for the load balancer is moved to the OutOfService state. +// If an Availability Zone is added to the load balancer later, any instances +// registered with the load balancer move to the InService state. +// +// If you stop an instance registered with a load balancer and then start it, +// the IP addresses associated with the instance changes. Elastic Load Balancing +// cannot recognize the new IP address, which prevents it from routing traffic +// to the instances. We recommend that you use the following sequence: stop +// the instance, deregister the instance, start the instance, and then register +// the instance. To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. +// +// For more information, see Deregister and Register EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) RegisterInstancesWithLoadBalancer(input *RegisterInstancesWithLoadBalancerInput) (*RegisterInstancesWithLoadBalancerOutput, error) { + req, out := c.RegisterInstancesWithLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *ELB) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes one or more tags from the specified load balancer. +func (c *ELB) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerListenerSSLCertificate = "SetLoadBalancerListenerSSLCertificate" + +// SetLoadBalancerListenerSSLCertificateRequest generates a request for the SetLoadBalancerListenerSSLCertificate operation. +func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalancerListenerSSLCertificateInput) (req *request.Request, output *SetLoadBalancerListenerSSLCertificateOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerListenerSSLCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerListenerSSLCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerListenerSSLCertificateOutput{} + req.Data = output + return +} + +// Sets the certificate that terminates the specified listener's SSL connections. +// The specified certificate replaces any prior certificate that was used on +// the same load balancer and port. +// +// For more information about updating your SSL certificate, see Updating an +// SSL Certificate for a Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_UpdatingLoadBalancerSSL.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) SetLoadBalancerListenerSSLCertificate(input *SetLoadBalancerListenerSSLCertificateInput) (*SetLoadBalancerListenerSSLCertificateOutput, error) { + req, out := c.SetLoadBalancerListenerSSLCertificateRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerPoliciesForBackendServer = "SetLoadBalancerPoliciesForBackendServer" + +// SetLoadBalancerPoliciesForBackendServerRequest generates a request for the SetLoadBalancerPoliciesForBackendServer operation. +func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalancerPoliciesForBackendServerInput) (req *request.Request, output *SetLoadBalancerPoliciesForBackendServerOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerPoliciesForBackendServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerPoliciesForBackendServerInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerPoliciesForBackendServerOutput{} + req.Data = output + return +} + +// Replaces the set of policies associated with the specified port on which +// the back-end server is listening with a new set of policies. At this time, +// only the back-end server authentication policy type can be applied to the +// back-end ports; this policy type is composed of multiple public key policies. +// +// Each time you use SetLoadBalancerPoliciesForBackendServer to enable the +// policies, use the PolicyNames parameter to list the policies that you want +// to enable. +// +// You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify +// that the policy is associated with the back-end server. +func (c *ELB) SetLoadBalancerPoliciesForBackendServer(input *SetLoadBalancerPoliciesForBackendServerInput) (*SetLoadBalancerPoliciesForBackendServerOutput, error) { + req, out := c.SetLoadBalancerPoliciesForBackendServerRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerPoliciesOfListener = "SetLoadBalancerPoliciesOfListener" + +// SetLoadBalancerPoliciesOfListenerRequest generates a request for the SetLoadBalancerPoliciesOfListener operation. +func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPoliciesOfListenerInput) (req *request.Request, output *SetLoadBalancerPoliciesOfListenerOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerPoliciesOfListener, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerPoliciesOfListenerInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerPoliciesOfListenerOutput{} + req.Data = output + return +} + +// Associates, updates, or disables a policy with a listener for the specified +// load balancer. You can associate multiple policies with a listener. +func (c *ELB) SetLoadBalancerPoliciesOfListener(input *SetLoadBalancerPoliciesOfListenerInput) (*SetLoadBalancerPoliciesOfListenerOutput, error) { + req, out := c.SetLoadBalancerPoliciesOfListenerRequest(input) + err := req.Send() + return out, err +} + +// Information about the AccessLog attribute. +type AccessLog struct { + _ struct{} `type:"structure"` + + // The interval for publishing the access logs. You can specify an interval + // of either 5 minutes or 60 minutes. + // + // Default: 60 minutes + EmitInterval *int64 `type:"integer"` + + // Specifies whether access log is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` + + // The name of the Amazon S3 bucket where the access logs are stored. + S3BucketName *string `type:"string"` + + // The logical hierarchy you created for your Amazon S3 bucket, for example + // my-bucket-prefix/prod. If the prefix is not provided, the log is placed at + // the root level of the bucket. + S3BucketPrefix *string `type:"string"` +} + +// String returns the string representation +func (s AccessLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessLog) GoString() string { + return s.String() +} + +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. You can specify one load balancer only. + LoadBalancerNames []*string `type:"list" required:"true"` + + // The tags. + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// This data type is reserved. +type AdditionalAttribute struct { + _ struct{} `type:"structure"` + + // This parameter is reserved. + Key *string `type:"string"` + + // This parameter is reserved. + Value *string `type:"string"` +} + +// String returns the string representation +func (s AdditionalAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdditionalAttribute) GoString() string { + return s.String() +} + +// Information about a policy for application-controlled session stickiness. +type AppCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The name of the application cookie used for stickiness. + CookieName *string `type:"string"` + + // The mnemonic name for the policy being created. The name must be unique within + // a set of policies for this load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AppCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AppCookieStickinessPolicy) GoString() string { + return s.String() +} + +type ApplySecurityGroupsToLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the security groups to associate with the load balancer. Note + // that you cannot specify the name of the security group. + SecurityGroups []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToLoadBalancerInput) GoString() string { + return s.String() +} + +type ApplySecurityGroupsToLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the security groups associated with the load balancer. + SecurityGroups []*string `type:"list"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToLoadBalancerOutput) GoString() string { + return s.String() +} + +type AttachLoadBalancerToSubnetsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the subnets to add for the load balancer. You can add only one + // subnet per Availability Zone. + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AttachLoadBalancerToSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerToSubnetsInput) GoString() string { + return s.String() +} + +type AttachLoadBalancerToSubnetsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the subnets attached to the load balancer. + Subnets []*string `type:"list"` +} + +// String returns the string representation +func (s AttachLoadBalancerToSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerToSubnetsOutput) GoString() string { + return s.String() +} + +// Information about the configuration of a back-end server. +type BackendServerDescription struct { + _ struct{} `type:"structure"` + + // The port on which the back-end server is listening. + InstancePort *int64 `min:"1" type:"integer"` + + // The names of the policies enabled for the back-end server. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s BackendServerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackendServerDescription) GoString() string { + return s.String() +} + +type ConfigureHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The configuration information for the new health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfigureHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureHealthCheckInput) GoString() string { + return s.String() +} + +type ConfigureHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // The updated health check. + HealthCheck *HealthCheck `type:"structure"` +} + +// String returns the string representation +func (s ConfigureHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureHealthCheckOutput) GoString() string { + return s.String() +} + +// Information about the ConnectionDraining attribute. +type ConnectionDraining struct { + _ struct{} `type:"structure"` + + // Specifies whether connection draining is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` + + // The maximum time, in seconds, to keep the existing connections open before + // deregistering the instances. + Timeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ConnectionDraining) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionDraining) GoString() string { + return s.String() +} + +// Information about the ConnectionSettings attribute. +type ConnectionSettings struct { + _ struct{} `type:"structure"` + + // The time, in seconds, that the connection is allowed to be idle (no data + // has been sent over the connection) before it is closed by the load balancer. + IdleTimeout *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ConnectionSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionSettings) GoString() string { + return s.String() +} + +type CreateAppCookieStickinessPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the application cookie used for stickiness. + CookieName *string `type:"string" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy being created. Policy names must consist of alphanumeric + // characters and dashes (-). This name must be unique within the set of policies + // for this load balancer. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAppCookieStickinessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppCookieStickinessPolicyInput) GoString() string { + return s.String() +} + +type CreateAppCookieStickinessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAppCookieStickinessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppCookieStickinessPolicyOutput) GoString() string { + return s.String() +} + +type CreateLBCookieStickinessPolicyInput struct { + _ struct{} `type:"structure"` + + // The time period, in seconds, after which the cookie should be considered + // stale. If you do not specify this parameter, the sticky session lasts for + // the duration of the browser session. + CookieExpirationPeriod *int64 `type:"long"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy being created. Policy names must consist of alphanumeric + // characters and dashes (-). This name must be unique within the set of policies + // for this load balancer. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLBCookieStickinessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLBCookieStickinessPolicyInput) GoString() string { + return s.String() +} + +type CreateLBCookieStickinessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLBCookieStickinessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLBCookieStickinessPolicyOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // One or more Availability Zones from the same region as the load balancer. + // Traffic is equally distributed across all specified Availability Zones. + // + // You must specify at least one Availability Zone. + // + // You can add more Availability Zones after you create the load balancer using + // EnableAvailabilityZonesForLoadBalancer. + AvailabilityZones []*string `type:"list"` + + // The listeners. + // + // For more information, see Listeners for Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) + // in the Elastic Load Balancing Developer Guide. + Listeners []*Listener `type:"list" required:"true"` + + // The name of the load balancer. + // + // This name must be unique within your set of load balancers for the region, + // must have a maximum of 32 characters, must contain only alphanumeric characters + // or hyphens, and cannot begin or end with a hyphen. + LoadBalancerName *string `type:"string" required:"true"` + + // The type of a load balancer. Valid only for load balancers in a VPC. + // + // By default, Elastic Load Balancing creates an Internet-facing load balancer + // with a publicly resolvable DNS name, which resolves to public IP addresses. + // For more information about Internet-facing and Internal load balancers, see + // Internet-facing and Internal Load Balancers (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/vpc-loadbalancer-types.html) + // in the Elastic Load Balancing Developer Guide. + // + // Specify internal to create an internal load balancer with a DNS name that + // resolves to private IP addresses. + Scheme *string `type:"string"` + + // The IDs of the security groups to assign to the load balancer. + SecurityGroups []*string `type:"list"` + + // The IDs of the subnets in your VPC to attach to the load balancer. Specify + // one subnet per Availability Zone specified in AvailabilityZones. + Subnets []*string `type:"list"` + + // A list of tags to assign to the load balancer. + // + // For more information about tagging your load balancer, see Tagging (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#tagging-elb) + // in the Elastic Load Balancing Developer Guide. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerInput) GoString() string { + return s.String() +} + +type CreateLoadBalancerListenersInput struct { + _ struct{} `type:"structure"` + + // The listeners. + Listeners []*Listener `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoadBalancerListenersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerListenersInput) GoString() string { + return s.String() +} + +type CreateLoadBalancerListenersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLoadBalancerListenersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerListenersOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The DNS name of the load balancer. + DNSName *string `type:"string"` +} + +// String returns the string representation +func (s CreateLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The attributes for the policy. + PolicyAttributes []*PolicyAttribute `type:"list"` + + // The name of the load balancer policy to be created. This name must be unique + // within the set of policies for this load balancer. + PolicyName *string `type:"string" required:"true"` + + // The name of the base policy type. To get the list of policy types, use DescribeLoadBalancerPolicyTypes. + PolicyTypeName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoadBalancerPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerPolicyInput) GoString() string { + return s.String() +} + +type CreateLoadBalancerPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLoadBalancerPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerPolicyOutput) GoString() string { + return s.String() +} + +// Information about the CrossZoneLoadBalancing attribute. +type CrossZoneLoadBalancing struct { + _ struct{} `type:"structure"` + + // Specifies whether cross-zone load balancing is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CrossZoneLoadBalancing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CrossZoneLoadBalancing) GoString() string { + return s.String() +} + +type DeleteLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerInput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerListenersInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The client port numbers of the listeners. + LoadBalancerPorts []*int64 `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerListenersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerListenersInput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerListenersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerListenersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerListenersOutput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerOutput) GoString() string { + return s.String() +} + +// = +type DeleteLoadBalancerPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerPolicyInput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerPolicyOutput) GoString() string { + return s.String() +} + +type DeregisterInstancesFromLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterInstancesFromLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstancesFromLoadBalancerInput) GoString() string { + return s.String() +} + +type DeregisterInstancesFromLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The remaining instances registered with the load balancer. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s DeregisterInstancesFromLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstancesFromLoadBalancerOutput) GoString() string { + return s.String() +} + +type DescribeInstanceHealthInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstanceHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceHealthInput) GoString() string { + return s.String() +} + +type DescribeInstanceHealthOutput struct { + _ struct{} `type:"structure"` + + // Information about the health of the instances. + InstanceStates []*InstanceState `type:"list"` +} + +// String returns the string representation +func (s DescribeInstanceHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceHealthOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancer attributes. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The names of the policies. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPoliciesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPoliciesOutput struct { + _ struct{} `type:"structure"` + + // Information about the policies. + PolicyDescriptions []*PolicyDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPoliciesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPolicyTypesInput struct { + _ struct{} `type:"structure"` + + // The names of the policy types. If no names are specified, describes all policy + // types defined by Elastic Load Balancing. + PolicyTypeNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPolicyTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPolicyTypesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPolicyTypesOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy types. + PolicyTypeDescriptions []*PolicyTypeDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPolicyTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPolicyTypesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The names of the load balancers. + LoadBalancerNames []*string `type:"list"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The maximum number of results to return with this call (a number from 1 to + // 400). The default is 400. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancers. + LoadBalancerDescriptions []*LoadBalancerDescription `type:"list"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The names of the load balancers. + LoadBalancerNames []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + TagDescriptions []*TagDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DetachLoadBalancerFromSubnetsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the subnets. + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DetachLoadBalancerFromSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerFromSubnetsInput) GoString() string { + return s.String() +} + +type DetachLoadBalancerFromSubnetsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the remaining subnets for the load balancer. + Subnets []*string `type:"list"` +} + +// String returns the string representation +func (s DetachLoadBalancerFromSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerFromSubnetsOutput) GoString() string { + return s.String() +} + +type DisableAvailabilityZonesForLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones. + AvailabilityZones []*string `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerInput) GoString() string { + return s.String() +} + +type DisableAvailabilityZonesForLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The remaining Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` +} + +// String returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerOutput) GoString() string { + return s.String() +} + +type EnableAvailabilityZonesForLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones. These must be in the same region as the load balancer. + AvailabilityZones []*string `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerInput) GoString() string { + return s.String() +} + +type EnableAvailabilityZonesForLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The updated list of Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` +} + +// String returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerOutput) GoString() string { + return s.String() +} + +// Information about a health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // The number of consecutive health checks successes required before moving + // the instance to the Healthy state. + HealthyThreshold *int64 `min:"2" type:"integer" required:"true"` + + // The approximate interval, in seconds, between health checks of an individual + // instance. + Interval *int64 `min:"1" type:"integer" required:"true"` + + // The instance being checked. The protocol is either TCP, HTTP, HTTPS, or SSL. + // The range of valid ports is one (1) through 65535. + // + // TCP is the default, specified as a TCP: port pair, for example "TCP:5000". + // In this case, a health check simply attempts to open a TCP connection to + // the instance on the specified port. Failure to connect within the configured + // timeout is considered unhealthy. + // + // SSL is also specified as SSL: port pair, for example, SSL:5000. + // + // For HTTP/HTTPS, you must include a ping path in the string. HTTP is specified + // as a HTTP:port;/;PathToPing; grouping, for example "HTTP:80/weather/us/wa/seattle". + // In this case, a HTTP GET request is issued to the instance on the given port + // and path. Any answer other than "200 OK" within the timeout period is considered + // unhealthy. + // + // The total length of the HTTP ping target must be 1024 16-bit Unicode characters + // or less. + Target *string `type:"string" required:"true"` + + // The amount of time, in seconds, during which no response means a failed health + // check. + // + // This value must be less than the Interval value. + Timeout *int64 `min:"1" type:"integer" required:"true"` + + // The number of consecutive health check failures required before moving the + // instance to the Unhealthy state. + UnhealthyThreshold *int64 `min:"2" type:"integer" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// The ID of a back-end instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Information about the state of a back-end instance. +type InstanceState struct { + _ struct{} `type:"structure"` + + // A description of the instance state. This string can contain one or more + // of the following messages. + // + // N/A + // + // A transient error occurred. Please try again later. + // + // Instance has failed at least the UnhealthyThreshold number of health checks + // consecutively. + // + // Instance has not passed the configured HealthyThreshold number of health + // checks consecutively. + // + // Instance registration is still in progress. + // + // Instance is in the EC2 Availability Zone for which LoadBalancer is not + // configured to route traffic to. + // + // Instance is not currently registered with the LoadBalancer. + // + // Instance deregistration currently in progress. + // + // Disable Availability Zone is currently in progress. + // + // Instance is in pending state. + // + // Instance is in stopped state. + // + // Instance is in terminated state. + Description *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // Information about the cause of OutOfService instances. Specifically, whether + // the cause is Elastic Load Balancing or the instance. + // + // Valid values: ELB | Instance | N/A + ReasonCode *string `type:"string"` + + // The current state of the instance. + // + // Valid values: InService | OutOfService | Unknown + State *string `type:"string"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// Information about a policy for duration-based session stickiness. +type LBCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The time period, in seconds, after which the cookie should be considered + // stale. If this parameter is not specified, the stickiness session lasts for + // the duration of the browser session. + CookieExpirationPeriod *int64 `type:"long"` + + // The name for the policy being created. The name must be unique within the + // set of policies for this load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s LBCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LBCookieStickinessPolicy) GoString() string { + return s.String() +} + +// Information about a listener. +// +// For information about the protocols and the ports supported by Elastic Load +// Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) +// in the Elastic Load Balancing Developer Guide. +type Listener struct { + _ struct{} `type:"structure"` + + // The port on which the instance is listening. + InstancePort *int64 `min:"1" type:"integer" required:"true"` + + // The protocol to use for routing traffic to back-end instances: HTTP, HTTPS, + // TCP, or SSL. + // + // If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol + // must be at the same protocol. + // + // If there is another listener with the same InstancePort whose InstanceProtocol + // is secure, (HTTPS or SSL), the listener's InstanceProtocol must also be secure. + // + // If there is another listener with the same InstancePort whose InstanceProtocol + // is HTTP or TCP, the listener's InstanceProtocol must be HTTP or TCP. + InstanceProtocol *string `type:"string"` + + // The port on which the load balancer is listening. On EC2-VPC, you can specify + // any port from the range 1-65535. On EC2-Classic, you can specify any port + // from the following list: 25, 80, 443, 465, 587, 1024-65535. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The load balancer transport protocol to use for routing: HTTP, HTTPS, TCP, + // or SSL. + Protocol *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the server certificate. + SSLCertificateId *string `type:"string"` +} + +// String returns the string representation +func (s Listener) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Listener) GoString() string { + return s.String() +} + +// The policies enabled for a listener. +type ListenerDescription struct { + _ struct{} `type:"structure"` + + // Information about a listener. + // + // For information about the protocols and the ports supported by Elastic Load + // Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) + // in the Elastic Load Balancing Developer Guide. + Listener *Listener `type:"structure"` + + // The policies. If there are no policies enabled, the list is empty. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListenerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListenerDescription) GoString() string { + return s.String() +} + +// The attributes for a load balancer. +type LoadBalancerAttributes struct { + _ struct{} `type:"structure"` + + // If enabled, the load balancer captures detailed information of all requests + // and delivers the information to the Amazon S3 bucket that you specify. + // + // For more information, see Enable Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-access-logs.html) + // in the Elastic Load Balancing Developer Guide. + AccessLog *AccessLog `type:"structure"` + + // This parameter is reserved. + AdditionalAttributes []*AdditionalAttribute `type:"list"` + + // If enabled, the load balancer allows existing requests to complete before + // the load balancer shifts traffic away from a deregistered or unhealthy back-end + // instance. + // + // For more information, see Enable Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-conn-drain.html) + // in the Elastic Load Balancing Developer Guide. + ConnectionDraining *ConnectionDraining `type:"structure"` + + // If enabled, the load balancer allows the connections to remain idle (no data + // is sent over the connection) for the specified duration. + // + // By default, Elastic Load Balancing maintains a 60-second idle connection + // timeout for both front-end and back-end connections of your load balancer. + // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-idle-timeout.html) + // in the Elastic Load Balancing Developer Guide. + ConnectionSettings *ConnectionSettings `type:"structure"` + + // If enabled, the load balancer routes the request traffic evenly across all + // back-end instances regardless of the Availability Zones. + // + // For more information, see Enable Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-disable-crosszone-lb.html) + // in the Elastic Load Balancing Developer Guide. + CrossZoneLoadBalancing *CrossZoneLoadBalancing `type:"structure"` +} + +// String returns the string representation +func (s LoadBalancerAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerAttributes) GoString() string { + return s.String() +} + +// Information about a load balancer. +type LoadBalancerDescription struct { + _ struct{} `type:"structure"` + + // The Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` + + // Information about the back-end servers. + BackendServerDescriptions []*BackendServerDescription `type:"list"` + + // The Amazon Route 53 hosted zone associated with the load balancer. + // + // For more information, see Using Domain Names With Elastic Load Balancing + // (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/using-domain-names-with-elb.html) + // in the Elastic Load Balancing Developer Guide. + CanonicalHostedZoneName *string `type:"string"` + + // The ID of the Amazon Route 53 hosted zone name associated with the load balancer. + CanonicalHostedZoneNameID *string `type:"string"` + + // The date and time the load balancer was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The external DNS name of the load balancer. + DNSName *string `type:"string"` + + // Information about the health checks conducted on the load balancer. + HealthCheck *HealthCheck `type:"structure"` + + // The IDs of the instances for the load balancer. + Instances []*Instance `type:"list"` + + // The listeners for the load balancer. + ListenerDescriptions []*ListenerDescription `type:"list"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The policies defined for the load balancer. + Policies *Policies `type:"structure"` + + // The type of load balancer. Valid only for load balancers in a VPC. + // + // If Scheme is internet-facing, the load balancer has a public DNS name that + // resolves to a public IP address. + // + // If Scheme is internal, the load balancer has a public DNS name that resolves + // to a private IP address. + Scheme *string `type:"string"` + + // The security groups for the load balancer. Valid only for load balancers + // in a VPC. + SecurityGroups []*string `type:"list"` + + // The security group that you can use as part of your inbound rules for your + // load balancer's back-end application instances. To only allow traffic from + // load balancers, add a security group rule to your back end instance that + // specifies this source security group as the inbound source. + SourceSecurityGroup *SourceSecurityGroup `type:"structure"` + + // The IDs of the subnets for the load balancer. + Subnets []*string `type:"list"` + + // The ID of the VPC for the load balancer. + VPCId *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerDescription) GoString() string { + return s.String() +} + +type ModifyLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The attributes of the load balancer. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +type ModifyLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // The attributes for a load balancer. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +// The policies for a load balancer. +type Policies struct { + _ struct{} `type:"structure"` + + // The stickiness policies created using CreateAppCookieStickinessPolicy. + AppCookieStickinessPolicies []*AppCookieStickinessPolicy `type:"list"` + + // The stickiness policies created using CreateLBCookieStickinessPolicy. + LBCookieStickinessPolicies []*LBCookieStickinessPolicy `type:"list"` + + // The policies other than the stickiness policies. + OtherPolicies []*string `type:"list"` +} + +// String returns the string representation +func (s Policies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policies) GoString() string { + return s.String() +} + +// Information about a policy attribute. +type PolicyAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The value of the attribute. + AttributeValue *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttribute) GoString() string { + return s.String() +} + +// Information about a policy attribute. +type PolicyAttributeDescription struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The value of the attribute. + AttributeValue *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttributeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttributeDescription) GoString() string { + return s.String() +} + +// Information about a policy attribute type. +type PolicyAttributeTypeDescription struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The type of the attribute. For example, Boolean or Integer. + AttributeType *string `type:"string"` + + // The cardinality of the attribute. + // + // Valid values: + // + // ONE(1) : Single value required ZERO_OR_ONE(0..1) : Up to one value can + // be supplied ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed ONE_OR_MORE(1..*0) + // : Required. Multiple values are allowed + Cardinality *string `type:"string"` + + // The default value of the attribute, if applicable. + DefaultValue *string `type:"string"` + + // A description of the attribute. + Description *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttributeTypeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttributeTypeDescription) GoString() string { + return s.String() +} + +// Information about a policy. +type PolicyDescription struct { + _ struct{} `type:"structure"` + + // The policy attributes. + PolicyAttributeDescriptions []*PolicyAttributeDescription `type:"list"` + + // The name of the policy. + PolicyName *string `type:"string"` + + // The name of the policy type. + PolicyTypeName *string `type:"string"` +} + +// String returns the string representation +func (s PolicyDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescription) GoString() string { + return s.String() +} + +// Information about a policy type. +type PolicyTypeDescription struct { + _ struct{} `type:"structure"` + + // A description of the policy type. + Description *string `type:"string"` + + // The description of the policy attributes associated with the policies defined + // by Elastic Load Balancing. + PolicyAttributeTypeDescriptions []*PolicyAttributeTypeDescription `type:"list"` + + // The name of the policy type. + PolicyTypeName *string `type:"string"` +} + +// String returns the string representation +func (s PolicyTypeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyTypeDescription) GoString() string { + return s.String() +} + +type RegisterInstancesWithLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterInstancesWithLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstancesWithLoadBalancerInput) GoString() string { + return s.String() +} + +type RegisterInstancesWithLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The updated list of instances for the load balancer. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s RegisterInstancesWithLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstancesWithLoadBalancerOutput) GoString() string { + return s.String() +} + +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. You can specify a maximum of one load balancer + // name. + LoadBalancerNames []*string `type:"list" required:"true"` + + // The list of tag keys to remove. + Tags []*TagKeyOnly `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerListenerSSLCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The port that uses the specified SSL certificate. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) of the SSL certificate. + SSLCertificateId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerListenerSSLCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerListenerSSLCertificateInput) GoString() string { + return s.String() +} + +type SetLoadBalancerListenerSSLCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerListenerSSLCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerListenerSSLCertificateOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesForBackendServerInput struct { + _ struct{} `type:"structure"` + + // The port number associated with the back-end server. + InstancePort *int64 `type:"integer" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The names of the policies. If the list is empty, then all current polices + // are removed from the back-end server. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerInput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesForBackendServerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesOfListenerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The external port of the load balancer for the policy. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The names of the policies. If the list is empty, the current policy is removed + // from the listener. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesOfListenerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesOfListenerInput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesOfListenerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesOfListenerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesOfListenerOutput) GoString() string { + return s.String() +} + +// Information about a source security group. +type SourceSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the security group. + GroupName *string `type:"string"` + + // The owner of the security group. + OwnerAlias *string `type:"string"` +} + +// String returns the string representation +func (s SourceSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSecurityGroup) GoString() string { + return s.String() +} + +// Information about a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The tags associated with a load balancer. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The tags. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +// The key of a tag. +type TagKeyOnly struct { + _ struct{} `type:"structure"` + + // The name of the key. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TagKeyOnly) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagKeyOnly) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go new file mode 100644 index 000000000..5674133f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go @@ -0,0 +1,128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elbiface provides an interface for the Elastic Load Balancing. +package elbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elb" +) + +// ELBAPI is the interface type for elb.ELB. +type ELBAPI interface { + AddTagsRequest(*elb.AddTagsInput) (*request.Request, *elb.AddTagsOutput) + + AddTags(*elb.AddTagsInput) (*elb.AddTagsOutput, error) + + ApplySecurityGroupsToLoadBalancerRequest(*elb.ApplySecurityGroupsToLoadBalancerInput) (*request.Request, *elb.ApplySecurityGroupsToLoadBalancerOutput) + + ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) + + AttachLoadBalancerToSubnetsRequest(*elb.AttachLoadBalancerToSubnetsInput) (*request.Request, *elb.AttachLoadBalancerToSubnetsOutput) + + AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) + + ConfigureHealthCheckRequest(*elb.ConfigureHealthCheckInput) (*request.Request, *elb.ConfigureHealthCheckOutput) + + ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) + + CreateAppCookieStickinessPolicyRequest(*elb.CreateAppCookieStickinessPolicyInput) (*request.Request, *elb.CreateAppCookieStickinessPolicyOutput) + + CreateAppCookieStickinessPolicy(*elb.CreateAppCookieStickinessPolicyInput) (*elb.CreateAppCookieStickinessPolicyOutput, error) + + CreateLBCookieStickinessPolicyRequest(*elb.CreateLBCookieStickinessPolicyInput) (*request.Request, *elb.CreateLBCookieStickinessPolicyOutput) + + CreateLBCookieStickinessPolicy(*elb.CreateLBCookieStickinessPolicyInput) (*elb.CreateLBCookieStickinessPolicyOutput, error) + + CreateLoadBalancerRequest(*elb.CreateLoadBalancerInput) (*request.Request, *elb.CreateLoadBalancerOutput) + + CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) + + CreateLoadBalancerListenersRequest(*elb.CreateLoadBalancerListenersInput) (*request.Request, *elb.CreateLoadBalancerListenersOutput) + + CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) + + CreateLoadBalancerPolicyRequest(*elb.CreateLoadBalancerPolicyInput) (*request.Request, *elb.CreateLoadBalancerPolicyOutput) + + CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) + + DeleteLoadBalancerRequest(*elb.DeleteLoadBalancerInput) (*request.Request, *elb.DeleteLoadBalancerOutput) + + DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) + + DeleteLoadBalancerListenersRequest(*elb.DeleteLoadBalancerListenersInput) (*request.Request, *elb.DeleteLoadBalancerListenersOutput) + + DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) + + DeleteLoadBalancerPolicyRequest(*elb.DeleteLoadBalancerPolicyInput) (*request.Request, *elb.DeleteLoadBalancerPolicyOutput) + + DeleteLoadBalancerPolicy(*elb.DeleteLoadBalancerPolicyInput) (*elb.DeleteLoadBalancerPolicyOutput, error) + + DeregisterInstancesFromLoadBalancerRequest(*elb.DeregisterInstancesFromLoadBalancerInput) (*request.Request, *elb.DeregisterInstancesFromLoadBalancerOutput) + + DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) + + DescribeInstanceHealthRequest(*elb.DescribeInstanceHealthInput) (*request.Request, *elb.DescribeInstanceHealthOutput) + + DescribeInstanceHealth(*elb.DescribeInstanceHealthInput) (*elb.DescribeInstanceHealthOutput, error) + + DescribeLoadBalancerAttributesRequest(*elb.DescribeLoadBalancerAttributesInput) (*request.Request, *elb.DescribeLoadBalancerAttributesOutput) + + DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) + + DescribeLoadBalancerPoliciesRequest(*elb.DescribeLoadBalancerPoliciesInput) (*request.Request, *elb.DescribeLoadBalancerPoliciesOutput) + + DescribeLoadBalancerPolicies(*elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) + + DescribeLoadBalancerPolicyTypesRequest(*elb.DescribeLoadBalancerPolicyTypesInput) (*request.Request, *elb.DescribeLoadBalancerPolicyTypesOutput) + + DescribeLoadBalancerPolicyTypes(*elb.DescribeLoadBalancerPolicyTypesInput) (*elb.DescribeLoadBalancerPolicyTypesOutput, error) + + DescribeLoadBalancersRequest(*elb.DescribeLoadBalancersInput) (*request.Request, *elb.DescribeLoadBalancersOutput) + + DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) + + DescribeLoadBalancersPages(*elb.DescribeLoadBalancersInput, func(*elb.DescribeLoadBalancersOutput, bool) bool) error + + DescribeTagsRequest(*elb.DescribeTagsInput) (*request.Request, *elb.DescribeTagsOutput) + + DescribeTags(*elb.DescribeTagsInput) (*elb.DescribeTagsOutput, error) + + DetachLoadBalancerFromSubnetsRequest(*elb.DetachLoadBalancerFromSubnetsInput) (*request.Request, *elb.DetachLoadBalancerFromSubnetsOutput) + + DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) + + DisableAvailabilityZonesForLoadBalancerRequest(*elb.DisableAvailabilityZonesForLoadBalancerInput) (*request.Request, *elb.DisableAvailabilityZonesForLoadBalancerOutput) + + DisableAvailabilityZonesForLoadBalancer(*elb.DisableAvailabilityZonesForLoadBalancerInput) (*elb.DisableAvailabilityZonesForLoadBalancerOutput, error) + + EnableAvailabilityZonesForLoadBalancerRequest(*elb.EnableAvailabilityZonesForLoadBalancerInput) (*request.Request, *elb.EnableAvailabilityZonesForLoadBalancerOutput) + + EnableAvailabilityZonesForLoadBalancer(*elb.EnableAvailabilityZonesForLoadBalancerInput) (*elb.EnableAvailabilityZonesForLoadBalancerOutput, error) + + ModifyLoadBalancerAttributesRequest(*elb.ModifyLoadBalancerAttributesInput) (*request.Request, *elb.ModifyLoadBalancerAttributesOutput) + + ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) + + RegisterInstancesWithLoadBalancerRequest(*elb.RegisterInstancesWithLoadBalancerInput) (*request.Request, *elb.RegisterInstancesWithLoadBalancerOutput) + + RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) + + RemoveTagsRequest(*elb.RemoveTagsInput) (*request.Request, *elb.RemoveTagsOutput) + + RemoveTags(*elb.RemoveTagsInput) (*elb.RemoveTagsOutput, error) + + SetLoadBalancerListenerSSLCertificateRequest(*elb.SetLoadBalancerListenerSSLCertificateInput) (*request.Request, *elb.SetLoadBalancerListenerSSLCertificateOutput) + + SetLoadBalancerListenerSSLCertificate(*elb.SetLoadBalancerListenerSSLCertificateInput) (*elb.SetLoadBalancerListenerSSLCertificateOutput, error) + + SetLoadBalancerPoliciesForBackendServerRequest(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*request.Request, *elb.SetLoadBalancerPoliciesForBackendServerOutput) + + SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) + + SetLoadBalancerPoliciesOfListenerRequest(*elb.SetLoadBalancerPoliciesOfListenerInput) (*request.Request, *elb.SetLoadBalancerPoliciesOfListenerOutput) + + SetLoadBalancerPoliciesOfListener(*elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) +} + +var _ ELBAPI = (*elb.ELB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go new file mode 100644 index 000000000..d1da802e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go @@ -0,0 +1,722 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleELB_AddTags() { + svc := elb.New(session.New()) + + params := &elb.AddTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + Tags: []*elb.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ApplySecurityGroupsToLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.ApplySecurityGroupsToLoadBalancerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + SecurityGroups: []*string{ // Required + aws.String("SecurityGroupId"), // Required + // More values... + }, + } + resp, err := svc.ApplySecurityGroupsToLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_AttachLoadBalancerToSubnets() { + svc := elb.New(session.New()) + + params := &elb.AttachLoadBalancerToSubnetsInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Subnets: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + } + resp, err := svc.AttachLoadBalancerToSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ConfigureHealthCheck() { + svc := elb.New(session.New()) + + params := &elb.ConfigureHealthCheckInput{ + HealthCheck: &elb.HealthCheck{ // Required + HealthyThreshold: aws.Int64(1), // Required + Interval: aws.Int64(1), // Required + Target: aws.String("HealthCheckTarget"), // Required + Timeout: aws.Int64(1), // Required + UnhealthyThreshold: aws.Int64(1), // Required + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.ConfigureHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateAppCookieStickinessPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateAppCookieStickinessPolicyInput{ + CookieName: aws.String("CookieName"), // Required + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.CreateAppCookieStickinessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLBCookieStickinessPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateLBCookieStickinessPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + CookieExpirationPeriod: aws.Int64(1), + } + resp, err := svc.CreateLBCookieStickinessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerInput{ + Listeners: []*elb.Listener{ // Required + { // Required + InstancePort: aws.Int64(1), // Required + LoadBalancerPort: aws.Int64(1), // Required + Protocol: aws.String("Protocol"), // Required + InstanceProtocol: aws.String("Protocol"), + SSLCertificateId: aws.String("SSLCertificateId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + AvailabilityZones: []*string{ + aws.String("AvailabilityZone"), // Required + // More values... + }, + Scheme: aws.String("LoadBalancerScheme"), + SecurityGroups: []*string{ + aws.String("SecurityGroupId"), // Required + // More values... + }, + Subnets: []*string{ + aws.String("SubnetId"), // Required + // More values... + }, + Tags: []*elb.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancerListeners() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerListenersInput{ + Listeners: []*elb.Listener{ // Required + { // Required + InstancePort: aws.Int64(1), // Required + LoadBalancerPort: aws.Int64(1), // Required + Protocol: aws.String("Protocol"), // Required + InstanceProtocol: aws.String("Protocol"), + SSLCertificateId: aws.String("SSLCertificateId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.CreateLoadBalancerListeners(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancerPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + PolicyTypeName: aws.String("PolicyTypeName"), // Required + PolicyAttributes: []*elb.PolicyAttribute{ + { // Required + AttributeName: aws.String("AttributeName"), + AttributeValue: aws.String("AttributeValue"), + }, + // More values... + }, + } + resp, err := svc.CreateLoadBalancerPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DeleteLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancerListeners() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerListenersInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPorts: []*int64{ // Required + aws.Int64(1), // Required + // More values... + }, + } + resp, err := svc.DeleteLoadBalancerListeners(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancerPolicy() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.DeleteLoadBalancerPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeregisterInstancesFromLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DeregisterInstancesFromLoadBalancerInput{ + Instances: []*elb.Instance{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DeregisterInstancesFromLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeInstanceHealth() { + svc := elb.New(session.New()) + + params := &elb.DescribeInstanceHealthInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Instances: []*elb.Instance{ + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + } + resp, err := svc.DescribeInstanceHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerAttributes() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerAttributesInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DescribeLoadBalancerAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerPolicies() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String("AccessPointName"), + PolicyNames: []*string{ + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBalancerPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerPolicyTypes() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerPolicyTypesInput{ + PolicyTypeNames: []*string{ + aws.String("PolicyTypeName"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBalancerPolicyTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancers() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{ + aws.String("AccessPointName"), // Required + // More values... + }, + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.DescribeLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeTags() { + svc := elb.New(session.New()) + + params := &elb.DescribeTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DetachLoadBalancerFromSubnets() { + svc := elb.New(session.New()) + + params := &elb.DetachLoadBalancerFromSubnetsInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Subnets: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + } + resp, err := svc.DetachLoadBalancerFromSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DisableAvailabilityZonesForLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DisableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: []*string{ // Required + aws.String("AvailabilityZone"), // Required + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DisableAvailabilityZonesForLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_EnableAvailabilityZonesForLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.EnableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: []*string{ // Required + aws.String("AvailabilityZone"), // Required + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.EnableAvailabilityZonesForLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ModifyLoadBalancerAttributes() { + svc := elb.New(session.New()) + + params := &elb.ModifyLoadBalancerAttributesInput{ + LoadBalancerAttributes: &elb.LoadBalancerAttributes{ // Required + AccessLog: &elb.AccessLog{ + Enabled: aws.Bool(true), // Required + EmitInterval: aws.Int64(1), + S3BucketName: aws.String("S3BucketName"), + S3BucketPrefix: aws.String("AccessLogPrefix"), + }, + AdditionalAttributes: []*elb.AdditionalAttribute{ + { // Required + Key: aws.String("StringVal"), + Value: aws.String("StringVal"), + }, + // More values... + }, + ConnectionDraining: &elb.ConnectionDraining{ + Enabled: aws.Bool(true), // Required + Timeout: aws.Int64(1), + }, + ConnectionSettings: &elb.ConnectionSettings{ + IdleTimeout: aws.Int64(1), // Required + }, + CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ + Enabled: aws.Bool(true), // Required + }, + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.ModifyLoadBalancerAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_RegisterInstancesWithLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.RegisterInstancesWithLoadBalancerInput{ + Instances: []*elb.Instance{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.RegisterInstancesWithLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_RemoveTags() { + svc := elb.New(session.New()) + + params := &elb.RemoveTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + Tags: []*elb.TagKeyOnly{ // Required + { // Required + Key: aws.String("TagKey"), + }, + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerListenerSSLCertificate() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerListenerSSLCertificateInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPort: aws.Int64(1), // Required + SSLCertificateId: aws.String("SSLCertificateId"), // Required + } + resp, err := svc.SetLoadBalancerListenerSSLCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerPoliciesForBackendServer() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: aws.Int64(1), // Required + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.SetLoadBalancerPoliciesForBackendServer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerPoliciesOfListener() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPort: aws.Int64(1), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.SetLoadBalancerPoliciesOfListener(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go new file mode 100644 index 000000000..428fea0ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Elastic Load Balancing distributes incoming traffic across your EC2 instances. +// +// For information about the features of Elastic Load Balancing, see What Is +// Elastic Load Balancing? (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elastic-load-balancing.html) +// in the Elastic Load Balancing Developer Guide. +// +// For information about the AWS regions supported by Elastic Load Balancing, +// see Regions and Endpoints - Elastic Load Balancing (http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region) +// in the Amazon Web Services General Reference. +// +// All Elastic Load Balancing operations are idempotent, which means that they +// complete at most one time. If you repeat an operation, it succeeds with a +// 200 OK response code. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ELB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticloadbalancing" + +// New creates a new instance of the ELB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ELB client from just a session. +// svc := elb.New(mySession) +// +// // Create a ELB client with additional configuration +// svc := elb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ELB { + svc := &ELB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-06-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ELB operation and runs any +// custom request initialization. +func (c *ELB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go new file mode 100644 index 000000000..5d5755d04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go @@ -0,0 +1,53 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ELB) WaitUntilAnyInstanceInService(input *DescribeInstanceHealthInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceHealth", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAny", + Argument: "InstanceStates[].State", + Expected: "InService", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ELB) WaitUntilInstanceInService(input *DescribeInstanceHealthInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceHealth", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStates[].State", + Expected: "InService", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go new file mode 100644 index 000000000..87392dcc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go @@ -0,0 +1,1163 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package firehose provides a client for Amazon Kinesis Firehose. +package firehose + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateDeliveryStream = "CreateDeliveryStream" + +// CreateDeliveryStreamRequest generates a request for the CreateDeliveryStream operation. +func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) (req *request.Request, output *CreateDeliveryStreamOutput) { + op := &request.Operation{ + Name: opCreateDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeliveryStreamOutput{} + req.Data = output + return +} + +// Creates a delivery stream. +// +// CreateDeliveryStream is an asynchronous operation that immediately returns. +// The initial status of the delivery stream is CREATING. After the delivery +// stream is created, its status is ACTIVE and it now accepts data. Attempts +// to send data to a delivery stream that is not in the ACTIVE state cause an +// exception. To check the state of a delivery stream, use DescribeDeliveryStream. +// +// The name of a delivery stream identifies it. You can't have two delivery +// streams with the same name in the same region. Two delivery streams in different +// AWS accounts or different regions in the same AWS account can have the same +// name. +// +// By default, you can create up to 5 delivery streams per region. +// +// A delivery stream can only be configured with a single destination, Amazon +// S3 or Amazon Redshift. For correct CreateDeliveryStream request syntax, specify +// only one destination configuration parameter: either RedshiftDestinationConfiguration +// or S3DestinationConfiguration +// +// As part of S3DestinationConfiguration, optional values BufferingHints, EncryptionConfiguration, +// and CompressionFormat can be provided. By default, if no BufferingHints value +// is provided, Amazon Kinesis Firehose buffers data up to 5 MB or for 5 minutes, +// whichever condition is satisfied first. Note that BufferingHints is a hint, +// so there are some cases where the service cannot adhere to these conditions +// strictly; for example, record boundaries are such that the size is a little +// over or under the configured buffering size. By default, no encryption is +// performed. We strongly recommend that you enable encryption to ensure secure +// data storage in Amazon S3. +// +// A few notes about RedshiftDestinationConfiguration: +// +// An Amazon Redshift destination requires an S3 bucket as intermediate location, +// as Amazon Kinesis Firehose first delivers data to S3 and then uses COPY syntax +// to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration +// parameter element. The compression formats SNAPPY or ZIP cannot be specified +// in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift +// COPY operation that reads from the S3 bucket doesn't support these compression +// formats. We strongly recommend that the username and password provided is +// used exclusively for Amazon Kinesis Firehose purposes, and that the permissions +// for the account are restricted for Amazon Redshift INSERT permissions. Amazon +// Kinesis Firehose assumes the IAM role that is configured as part of destinations. +// The IAM role should allow the Amazon Kinesis Firehose principal to assume +// the role, and the role should have permissions that allows the service to +// deliver the data. For more information, see Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) +// in the Amazon Kinesis Firehose Developer Guide. +func (c *Firehose) CreateDeliveryStream(input *CreateDeliveryStreamInput) (*CreateDeliveryStreamOutput, error) { + req, out := c.CreateDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeliveryStream = "DeleteDeliveryStream" + +// DeleteDeliveryStreamRequest generates a request for the DeleteDeliveryStream operation. +func (c *Firehose) DeleteDeliveryStreamRequest(input *DeleteDeliveryStreamInput) (req *request.Request, output *DeleteDeliveryStreamOutput) { + op := &request.Operation{ + Name: opDeleteDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDeliveryStreamOutput{} + req.Data = output + return +} + +// Deletes a delivery stream and its data. +// +// You can delete a delivery stream only if it is in ACTIVE or DELETING state, +// and not in the CREATING state. While the deletion request is in process, +// the delivery stream is in the DELETING state. +// +// To check the state of a delivery stream, use DescribeDeliveryStream. +// +// While the delivery stream is DELETING state, the service may continue to +// accept the records, but the service doesn't make any guarantees with respect +// to delivering the data. Therefore, as a best practice, you should first stop +// any applications that are sending records before deleting a delivery stream. +func (c *Firehose) DeleteDeliveryStream(input *DeleteDeliveryStreamInput) (*DeleteDeliveryStreamOutput, error) { + req, out := c.DeleteDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeliveryStream = "DescribeDeliveryStream" + +// DescribeDeliveryStreamRequest generates a request for the DescribeDeliveryStream operation. +func (c *Firehose) DescribeDeliveryStreamRequest(input *DescribeDeliveryStreamInput) (req *request.Request, output *DescribeDeliveryStreamOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeliveryStreamOutput{} + req.Data = output + return +} + +// Describes the specified delivery stream and gets the status. For example, +// after your delivery stream is created, call DescribeDeliveryStream to see +// if the delivery stream is ACTIVE and therefore ready for data to be sent +// to it. +func (c *Firehose) DescribeDeliveryStream(input *DescribeDeliveryStreamInput) (*DescribeDeliveryStreamOutput, error) { + req, out := c.DescribeDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opListDeliveryStreams = "ListDeliveryStreams" + +// ListDeliveryStreamsRequest generates a request for the ListDeliveryStreams operation. +func (c *Firehose) ListDeliveryStreamsRequest(input *ListDeliveryStreamsInput) (req *request.Request, output *ListDeliveryStreamsOutput) { + op := &request.Operation{ + Name: opListDeliveryStreams, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDeliveryStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeliveryStreamsOutput{} + req.Data = output + return +} + +// Lists your delivery streams. +// +// The number of delivery streams might be too large to return using a single +// call to ListDeliveryStreams. You can limit the number of delivery streams +// returned, using the Limit parameter. To determine whether there are more +// delivery streams to list, check the value of HasMoreDeliveryStreams in the +// output. If there are more delivery streams to list, you can request them +// by specifying the name of the last delivery stream returned in the call in +// the ExclusiveStartDeliveryStreamName parameter of a subsequent call. +func (c *Firehose) ListDeliveryStreams(input *ListDeliveryStreamsInput) (*ListDeliveryStreamsOutput, error) { + req, out := c.ListDeliveryStreamsRequest(input) + err := req.Send() + return out, err +} + +const opPutRecord = "PutRecord" + +// PutRecordRequest generates a request for the PutRecord operation. +func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { + op := &request.Operation{ + Name: opPutRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordOutput{} + req.Data = output + return +} + +// Writes a single data record into an Amazon Kinesis Firehose delivery stream. +// To write multiple data records into a delivery stream, use PutRecordBatch. +// Applications using these operations are referred to as producers. +// +// By default, each delivery stream can take in up to 2,000 transactions per +// second, 5,000 records per second, or 5 MB per second. Note that if you use +// PutRecord and PutRecordBatch, the limits are an aggregate across these two +// operations for each delivery stream. For more information about limits and +// how to request an increase, see Amazon Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// +// You must specify the name of the delivery stream and the data record when +// using PutRecord. The data record consists of a data blob that can be up to +// 1,000 KB in size, and any kind of data, for example, a segment from a log +// file, geographic location data, web site clickstream data, etc. +// +// Amazon Kinesis Firehose buffers records before delivering them to the destination. +// To disambiguate the data blobs at the destination, a common solution is to +// use delimiters in the data, such as a newline (\n) or some other character +// unique within the data. This allows the consumer application(s) to parse +// individual data items when reading the data from the destination. +// +// Amazon Kinesis Firehose does not maintain data record ordering. If the destination +// data needs to be re-ordered by the consumer application, the producer should +// include some form of sequence number in each data record. +// +// The PutRecord operation returns a RecordId, which is a unique string assigned +// to each record. Producer applications can use this ID for purposes such as +// auditability and investigation. +// +// If the PutRecord operation throws a ServiceUnavailableException, back off +// and retry. If the exception persists, it is possible that the throughput +// limits have been exceeded for the delivery stream. +// +// Data records sent to Amazon Kinesis Firehose are stored for 24 hours from +// the time they are added to a delivery stream as it attempts to send the records +// to the destination. If the destination is unreachable for more than 24 hours, +// the data is no longer available. +func (c *Firehose) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { + req, out := c.PutRecordRequest(input) + err := req.Send() + return out, err +} + +const opPutRecordBatch = "PutRecordBatch" + +// PutRecordBatchRequest generates a request for the PutRecordBatch operation. +func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *request.Request, output *PutRecordBatchOutput) { + op := &request.Operation{ + Name: opPutRecordBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordBatchOutput{} + req.Data = output + return +} + +// Writes multiple data records into a delivery stream in a single call, which +// can achieve higher throughput per producer than when writing single records. +// To write single data records into a delivery stream, use PutRecord. Applications +// using these operations are referred to as producers. +// +// Each PutRecordBatch request supports up to 500 records. Each record in the +// request can be as large as 1,000 KB (before 64-bit encoding), up to a limit +// of 4 MB for the entire request. By default, each delivery stream can take +// in up to 2,000 transactions per second, 5,000 records per second, or 5 MB +// per second. Note that if you use PutRecord and PutRecordBatch, the limits +// are an aggregate across these two operations for each delivery stream. For +// more information about limits and how to request an increase, see Amazon +// Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// +// You must specify the name of the delivery stream and the data record when +// using PutRecord. The data record consists of a data blob that can be up to +// 1,000 KB in size, and any kind of data, for example, a segment from a log +// file, geographic location data, web site clickstream data, and so on. +// +// Amazon Kinesis Firehose buffers records before delivering them to the destination. +// To disambiguate the data blobs at the destination, a common solution is to +// use delimiters in the data, such as a newline (\n) or some other character +// unique within the data. This allows the consumer application(s) to parse +// individual data items when reading the data from the destination. +// +// The PutRecordBatch response includes a count of any failed records, FailedPutCount, +// and an array of responses, RequestResponses. The FailedPutCount value is +// a count of records that failed. Each entry in the RequestResponses array +// gives additional information of the processed record. Each entry in RequestResponses +// directly correlates with a record in the request array using the same ordering, +// from the top to the bottom of the request and response. RequestResponses +// always includes the same number of records as the request array. RequestResponses +// both successfully and unsuccessfully processed records. Amazon Kinesis Firehose +// attempts to process all records in each PutRecordBatch request. A single +// record failure does not stop the processing of subsequent records. +// +// A successfully processed record includes a RecordId value, which is a unique +// value identified for the record. An unsuccessfully processed record includes +// ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and +// is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage +// provides more detailed information about the error. +// +// If FailedPutCount is greater than 0 (zero), retry the request. A retry of +// the entire batch of records is possible; however, we strongly recommend that +// you inspect the entire response and resend only those records that failed +// processing. This minimizes duplicate records and also reduces the total bytes +// sent (and corresponding charges). +// +// If the PutRecordBatch operation throws a ServiceUnavailableException, back +// off and retry. If the exception persists, it is possible that the throughput +// limits have been exceeded for the delivery stream. +// +// Data records sent to Amazon Kinesis Firehose are stored for 24 hours from +// the time they are added to a delivery stream as it attempts to send the records +// to the destination. If the destination is unreachable for more than 24 hours, +// the data is no longer available. +func (c *Firehose) PutRecordBatch(input *PutRecordBatchInput) (*PutRecordBatchOutput, error) { + req, out := c.PutRecordBatchRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDestination = "UpdateDestination" + +// UpdateDestinationRequest generates a request for the UpdateDestination operation. +func (c *Firehose) UpdateDestinationRequest(input *UpdateDestinationInput) (req *request.Request, output *UpdateDestinationOutput) { + op := &request.Operation{ + Name: opUpdateDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDestinationOutput{} + req.Data = output + return +} + +// Updates the specified destination of the specified delivery stream. +// +// This operation can be used to change the destination type (for example, +// to replace the Amazon S3 destination with Amazon Redshift) or change the +// parameters associated with a given destination (for example, to change the +// bucket name of the Amazon S3 destination). The update may not occur immediately. +// The target delivery stream remains active while the configurations are updated, +// so data writes to the delivery stream can continue during this process. The +// updated configurations are normally effective within a few minutes. +// +// If the destination type is the same, Amazon Kinesis Firehose merges the +// configuration parameters specified in the UpdateDestination request with +// the destination configuration that already exists on the delivery stream. +// If any of the parameters are not specified in the update request, then the +// existing configuration parameters are retained. For example, in the Amazon +// S3 destination, if EncryptionConfiguration is not specified then the existing +// EncryptionConfiguration is maintained on the destination. +// +// If the destination type is not the same, for example, changing the destination +// from Amazon S3 to Amazon Redshift, Amazon Kinesis Firehose does not merge +// any parameters. In this case, all parameters must be specified. +// +// Amazon Kinesis Firehose uses the CurrentDeliveryStreamVersionId to avoid +// race conditions and conflicting merges. This is a required field in every +// request and the service only updates the configuration if the existing configuration +// matches the VersionId. After the update is applied successfully, the VersionId +// is updated, which can be retrieved with the DescribeDeliveryStream operation. +// The new VersionId should be uses to set CurrentDeliveryStreamVersionId in +// the next UpdateDestination operation. +func (c *Firehose) UpdateDestination(input *UpdateDestinationInput) (*UpdateDestinationOutput, error) { + req, out := c.UpdateDestinationRequest(input) + err := req.Send() + return out, err +} + +// Describes the buffering to perform before delivering data to the destination. +type BufferingHints struct { + _ struct{} `type:"structure"` + + // Buffer incoming data for the specified period of time, in seconds, before + // delivering it to the destination. The default value is 300. + IntervalInSeconds *int64 `min:"60" type:"integer"` + + // Buffer incoming data to the specified size, in MBs, before delivering it + // to the destination. The default value is 5. + // + // We recommend setting SizeInMBs to a value greater than the amount of data + // you typically ingest into the delivery stream in 10 seconds. For example, + // if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher. + SizeInMBs *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s BufferingHints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BufferingHints) GoString() string { + return s.String() +} + +// Describes a COPY command for Amazon Redshift. +type CopyCommand struct { + _ struct{} `type:"structure"` + + // Optional parameters to use with the Amazon Redshift COPY command. For more + // information, see the "Optional Parameters" section of Amazon Redshift COPY + // command (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some + // possible examples that would apply to Amazon Kinesis Firehose are as follows. + // + // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and + // compressed using lzop. + // + // delimiter '| - fields are delimited with "|" (this is the default delimiter). + // + // delimiter '|' escape - the delimiter should be escaped. + // + // fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' + // - fields are fixed width in the source, with each width specified after every + // column in the table. + // + // JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path + // specified is the format of the data. + // + // For more examples, see and Amazon Redshift COPY command exmaples (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html). + CopyOptions *string `type:"string"` + + // A comma-separated list of column names. + DataTableColumns *string `type:"string"` + + // The name of the target table. The table must already exist in the database. + DataTableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyCommand) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyCommand) GoString() string { + return s.String() +} + +// Contains the parameters for CreateDeliveryStream. +type CreateDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The destination in Amazon Redshift. This value cannot be specified if Amazon + // S3 is the desired destination (see restrictions listed above). + RedshiftDestinationConfiguration *RedshiftDestinationConfiguration `type:"structure"` + + // The destination in Amazon S3. This value must be specified if RedshiftDestinationConfiguration + // is specified (see restrictions listed above). + S3DestinationConfiguration *S3DestinationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeliveryStreamInput) GoString() string { + return s.String() +} + +// Contains the output of CreateDeliveryStream. +type CreateDeliveryStreamOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the delivery stream. + DeliveryStreamARN *string `type:"string"` +} + +// String returns the string representation +func (s CreateDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteDeliveryStream. +type DeleteDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryStreamInput) GoString() string { + return s.String() +} + +// Contains the output of DeleteDeliveryStream. +type DeleteDeliveryStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Contains information about a delivery stream. +type DeliveryStreamDescription struct { + _ struct{} `type:"structure"` + + // The date and time that the delivery stream was created. + CreateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the delivery stream. + DeliveryStreamARN *string `type:"string" required:"true"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The status of the delivery stream. + DeliveryStreamStatus *string `type:"string" required:"true" enum:"DeliveryStreamStatus"` + + // The destinations. + Destinations []*DestinationDescription `type:"list" required:"true"` + + // Indicates whether there are more destinations available to list. + HasMoreDestinations *bool `type:"boolean" required:"true"` + + // The date and time that the delivery stream was last updated. + LastUpdateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Used when calling the UpdateDestination operation. Each time the destination + // is updated for the delivery stream, the VersionId is changed, and the current + // VersionId is required when updating the destination. This is so that the + // service knows it is applying the changes to the correct version of the delivery + // stream. + VersionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeliveryStreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryStreamDescription) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeDeliveryStream. +type DescribeDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // Specifies the destination ID to start returning the destination information. + // Currently Amazon Kinesis Firehose supports one destination per delivery stream. + ExclusiveStartDestinationId *string `min:"1" type:"string"` + + // The limit on the number of destinations to return. Currently, you can have + // one destination per delivery stream. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryStreamInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeDeliveryStream. +type DescribeDeliveryStreamOutput struct { + _ struct{} `type:"structure"` + + // Information about the delivery stream. + DeliveryStreamDescription *DeliveryStreamDescription `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Describes the destination for a delivery stream. +type DestinationDescription struct { + _ struct{} `type:"structure"` + + // The ID of the destination. + DestinationId *string `min:"1" type:"string" required:"true"` + + // The destination in Amazon Redshift. + RedshiftDestinationDescription *RedshiftDestinationDescription `type:"structure"` + + // The Amazon S3 destination. + S3DestinationDescription *S3DestinationDescription `type:"structure"` +} + +// String returns the string representation +func (s DestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DestinationDescription) GoString() string { + return s.String() +} + +// Describes the encryption for a destination in Amazon S3. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The encryption key. + KMSEncryptionConfig *KMSEncryptionConfig `type:"structure"` + + // Specifically override existing encryption information to ensure no encryption + // is used. + NoEncryptionConfig *string `type:"string" enum:"NoEncryptionConfig"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// Describes an encryption key for a destination in Amazon S3. +type KMSEncryptionConfig struct { + _ struct{} `type:"structure"` + + // The ARN of the encryption key. Must belong to the same region as the destination + // Amazon S3 bucket. + AWSKMSKeyARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s KMSEncryptionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KMSEncryptionConfig) GoString() string { + return s.String() +} + +// Contains the parameters for ListDeliveryStreams. +type ListDeliveryStreamsInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream to start the list with. + ExclusiveStartDeliveryStreamName *string `min:"1" type:"string"` + + // The maximum number of delivery streams to list. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListDeliveryStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeliveryStreamsInput) GoString() string { + return s.String() +} + +// Contains the output of ListDeliveryStreams. +type ListDeliveryStreamsOutput struct { + _ struct{} `type:"structure"` + + // The names of the delivery streams. + DeliveryStreamNames []*string `type:"list" required:"true"` + + // Indicates whether there are more delivery streams available to list. + HasMoreDeliveryStreams *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ListDeliveryStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeliveryStreamsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PutRecordBatch. +type PutRecordBatchInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // One or more records. + Records []*Record `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchInput) GoString() string { + return s.String() +} + +// Contains the output of PutRecordBatch. +type PutRecordBatchOutput struct { + _ struct{} `type:"structure"` + + // The number of unsuccessfully written records. + FailedPutCount *int64 `type:"integer" required:"true"` + + // The results for the individual records. The index of each element matches + // the same index in which records were sent. + RequestResponses []*PutRecordBatchResponseEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchOutput) GoString() string { + return s.String() +} + +// Contains the result for an individual record from a PutRecordBatch request. +// If the record is successfully added to your delivery stream, it receives +// a record ID. If the record fails to be added to your delivery stream, the +// result includes an error code and an error message. +type PutRecordBatchResponseEntry struct { + _ struct{} `type:"structure"` + + // The error code for an individual record result. + ErrorCode *string `type:"string"` + + // The error message for an individual record result. + ErrorMessage *string `type:"string"` + + // The ID of the record. + RecordId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRecordBatchResponseEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchResponseEntry) GoString() string { + return s.String() +} + +// Contains the parameters for PutRecord. +type PutRecordInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The record. + Record *Record `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordInput) GoString() string { + return s.String() +} + +// Contains the output of PutRecord. +type PutRecordOutput struct { + _ struct{} `type:"structure"` + + // The ID of the record. + RecordId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordOutput) GoString() string { + return s.String() +} + +// The unit of data in a delivery stream. +type Record struct { + _ struct{} `type:"structure"` + + // The data blob, which is base64-encoded when the blob is serialized. The maximum + // size of the data blob, before base64-encoding, is 1,000 KB. + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// Describes the configuration of a destination in Amazon Redshift. +type RedshiftDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string" required:"true"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure" required:"true"` + + // The user password. + Password *string `min:"6" type:"string" required:"true"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The S3 configuration for the intermediate location from which Amazon Redshift + // obtains data. Restrictions are described in the topic for CreateDeliveryStream. + // + // The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration + // because the Amazon Redshift COPY operation that reads from the S3 bucket + // doesn't support these compression formats. + S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"` + + // The name of the user. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationConfiguration) GoString() string { + return s.String() +} + +// Describes a destination in Amazon Redshift. +type RedshiftDestinationDescription struct { + _ struct{} `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string" required:"true"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure" required:"true"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The Amazon S3 destination. + S3DestinationDescription *S3DestinationDescription `type:"structure" required:"true"` + + // The name of the user. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationDescription) GoString() string { + return s.String() +} + +// Describes an update for a destination in Amazon Redshift. +type RedshiftDestinationUpdate struct { + _ struct{} `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure"` + + // The user password. + Password *string `min:"6" type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string"` + + // The Amazon S3 destination. + // + // The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update + // because the Amazon Redshift COPY operation that reads from the S3 bucket + // doesn't support these compression formats. + S3Update *S3DestinationUpdate `type:"structure"` + + // The name of the user. + Username *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RedshiftDestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationUpdate) GoString() string { + return s.String() +} + +// Describes the configuration of a destination in Amazon S3. +type S3DestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string" required:"true"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. + // + // The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift + // destinations because they are not supported by the Amazon Redshift COPY operation + // that reads from the S3 bucket. + CompressionFormat *string `type:"string" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationConfiguration) GoString() string { + return s.String() +} + +// Describes a destination in Amazon S3. +type S3DestinationDescription struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string" required:"true"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure" required:"true"` + + // The compression format. If no value is specified, the default is NOCOMPRESSION. + CompressionFormat *string `type:"string" required:"true" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationDescription) GoString() string { + return s.String() +} + +// Describes an update for a destination in Amazon S3. +type S3DestinationUpdate struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure"` + + // The compression format. If no value is specified, the default is NOCOMPRESSION. + // + // The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift + // destinations because they are not supported by the Amazon Redshift COPY operation + // that reads from the S3 bucket. + CompressionFormat *string `type:"string" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s S3DestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationUpdate) GoString() string { + return s.String() +} + +// Contains the parameters for UpdateDestination. +type UpdateDestinationInput struct { + _ struct{} `type:"structure"` + + // Obtain this value from the VersionId result of the DeliveryStreamDescription + // operation. This value is required, and helps the service to perform conditional + // operations. For example, if there is a interleaving update and this value + // is null, then the update destination fails. After the update is successful, + // the VersionId value is updated. The service then performs a merge of the + // old configuration with the new configuration. + CurrentDeliveryStreamVersionId *string `min:"1" type:"string" required:"true"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The ID of the destination. + DestinationId *string `min:"1" type:"string" required:"true"` + + // Describes an update for a destination in Amazon Redshift. + RedshiftDestinationUpdate *RedshiftDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Amazon S3. + S3DestinationUpdate *S3DestinationUpdate `type:"structure"` +} + +// String returns the string representation +func (s UpdateDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDestinationInput) GoString() string { + return s.String() +} + +// Contains the output of UpdateDestination. +type UpdateDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDestinationOutput) GoString() string { + return s.String() +} + +const ( + // @enum CompressionFormat + CompressionFormatUncompressed = "UNCOMPRESSED" + // @enum CompressionFormat + CompressionFormatGzip = "GZIP" + // @enum CompressionFormat + CompressionFormatZip = "ZIP" + // @enum CompressionFormat + CompressionFormatSnappy = "Snappy" +) + +const ( + // @enum DeliveryStreamStatus + DeliveryStreamStatusCreating = "CREATING" + // @enum DeliveryStreamStatus + DeliveryStreamStatusDeleting = "DELETING" + // @enum DeliveryStreamStatus + DeliveryStreamStatusActive = "ACTIVE" +) + +const ( + // @enum NoEncryptionConfig + NoEncryptionConfigNoEncryption = "NoEncryption" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go new file mode 100644 index 000000000..9cff4e0af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go @@ -0,0 +1,249 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package firehose_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/firehose" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleFirehose_CreateDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.CreateDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + RedshiftDestinationConfiguration: &firehose.RedshiftDestinationConfiguration{ + ClusterJDBCURL: aws.String("ClusterJDBCURL"), // Required + CopyCommand: &firehose.CopyCommand{ // Required + DataTableName: aws.String("DataTableName"), // Required + CopyOptions: aws.String("CopyOptions"), + DataTableColumns: aws.String("DataTableColumns"), + }, + Password: aws.String("Password"), // Required + RoleARN: aws.String("RoleARN"), // Required + S3Configuration: &firehose.S3DestinationConfiguration{ // Required + BucketARN: aws.String("BucketARN"), // Required + RoleARN: aws.String("RoleARN"), // Required + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + }, + Username: aws.String("Username"), // Required + }, + S3DestinationConfiguration: &firehose.S3DestinationConfiguration{ + BucketARN: aws.String("BucketARN"), // Required + RoleARN: aws.String("RoleARN"), // Required + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + }, + } + resp, err := svc.CreateDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_DeleteDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.DeleteDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + } + resp, err := svc.DeleteDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_DescribeDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.DescribeDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + ExclusiveStartDestinationId: aws.String("DestinationId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_ListDeliveryStreams() { + svc := firehose.New(session.New()) + + params := &firehose.ListDeliveryStreamsInput{ + ExclusiveStartDeliveryStreamName: aws.String("DeliveryStreamName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListDeliveryStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_PutRecord() { + svc := firehose.New(session.New()) + + params := &firehose.PutRecordInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + Record: &firehose.Record{ // Required + Data: []byte("PAYLOAD"), // Required + }, + } + resp, err := svc.PutRecord(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_PutRecordBatch() { + svc := firehose.New(session.New()) + + params := &firehose.PutRecordBatchInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + Records: []*firehose.Record{ // Required + { // Required + Data: []byte("PAYLOAD"), // Required + }, + // More values... + }, + } + resp, err := svc.PutRecordBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_UpdateDestination() { + svc := firehose.New(session.New()) + + params := &firehose.UpdateDestinationInput{ + CurrentDeliveryStreamVersionId: aws.String("DeliveryStreamVersionId"), // Required + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + DestinationId: aws.String("DestinationId"), // Required + RedshiftDestinationUpdate: &firehose.RedshiftDestinationUpdate{ + ClusterJDBCURL: aws.String("ClusterJDBCURL"), + CopyCommand: &firehose.CopyCommand{ + DataTableName: aws.String("DataTableName"), // Required + CopyOptions: aws.String("CopyOptions"), + DataTableColumns: aws.String("DataTableColumns"), + }, + Password: aws.String("Password"), + RoleARN: aws.String("RoleARN"), + S3Update: &firehose.S3DestinationUpdate{ + BucketARN: aws.String("BucketARN"), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + RoleARN: aws.String("RoleARN"), + }, + Username: aws.String("Username"), + }, + S3DestinationUpdate: &firehose.S3DestinationUpdate{ + BucketARN: aws.String("BucketARN"), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + RoleARN: aws.String("RoleARN"), + }, + } + resp, err := svc.UpdateDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go new file mode 100644 index 000000000..0c18342d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go @@ -0,0 +1,42 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package firehoseiface provides an interface for the Amazon Kinesis Firehose. +package firehoseiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/firehose" +) + +// FirehoseAPI is the interface type for firehose.Firehose. +type FirehoseAPI interface { + CreateDeliveryStreamRequest(*firehose.CreateDeliveryStreamInput) (*request.Request, *firehose.CreateDeliveryStreamOutput) + + CreateDeliveryStream(*firehose.CreateDeliveryStreamInput) (*firehose.CreateDeliveryStreamOutput, error) + + DeleteDeliveryStreamRequest(*firehose.DeleteDeliveryStreamInput) (*request.Request, *firehose.DeleteDeliveryStreamOutput) + + DeleteDeliveryStream(*firehose.DeleteDeliveryStreamInput) (*firehose.DeleteDeliveryStreamOutput, error) + + DescribeDeliveryStreamRequest(*firehose.DescribeDeliveryStreamInput) (*request.Request, *firehose.DescribeDeliveryStreamOutput) + + DescribeDeliveryStream(*firehose.DescribeDeliveryStreamInput) (*firehose.DescribeDeliveryStreamOutput, error) + + ListDeliveryStreamsRequest(*firehose.ListDeliveryStreamsInput) (*request.Request, *firehose.ListDeliveryStreamsOutput) + + ListDeliveryStreams(*firehose.ListDeliveryStreamsInput) (*firehose.ListDeliveryStreamsOutput, error) + + PutRecordRequest(*firehose.PutRecordInput) (*request.Request, *firehose.PutRecordOutput) + + PutRecord(*firehose.PutRecordInput) (*firehose.PutRecordOutput, error) + + PutRecordBatchRequest(*firehose.PutRecordBatchInput) (*request.Request, *firehose.PutRecordBatchOutput) + + PutRecordBatch(*firehose.PutRecordBatchInput) (*firehose.PutRecordBatchOutput, error) + + UpdateDestinationRequest(*firehose.UpdateDestinationInput) (*request.Request, *firehose.UpdateDestinationOutput) + + UpdateDestination(*firehose.UpdateDestinationInput) (*firehose.UpdateDestinationOutput, error) +} + +var _ FirehoseAPI = (*firehose.Firehose)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go new file mode 100644 index 000000000..cda2888c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package firehose + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Kinesis Firehose is a fully-managed service that delivers real-time +// streaming data to destinations such as Amazon S3 and Amazon Redshift. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Firehose struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "firehose" + +// New creates a new instance of the Firehose client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Firehose client from just a session. +// svc := firehose.New(mySession) +// +// // Create a Firehose client with additional configuration +// svc := firehose.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Firehose { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Firehose { + svc := &Firehose{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-08-04", + JSONVersion: "1.1", + TargetPrefix: "Firehose_20150804", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Firehose operation and runs any +// custom request initialization. +func (c *Firehose) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go new file mode 100644 index 000000000..ce8764c50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go @@ -0,0 +1,3548 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package glacier provides a client for Amazon Glacier. +package glacier + +import ( + "io" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +func (c *Glacier) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// This operation aborts a multipart upload identified by the upload ID. +// +// After the Abort Multipart Upload request succeeds, you cannot upload any +// more parts to the multipart upload or complete the multipart upload. Aborting +// a completed upload fails. However, aborting an already-aborted upload will +// succeed, for a short time. For more information about uploading a part and +// completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload. +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and Abort Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opAbortVaultLock = "AbortVaultLock" + +// AbortVaultLockRequest generates a request for the AbortVaultLock operation. +func (c *Glacier) AbortVaultLockRequest(input *AbortVaultLockInput) (req *request.Request, output *AbortVaultLockOutput) { + op := &request.Operation{ + Name: opAbortVaultLock, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &AbortVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortVaultLockOutput{} + req.Data = output + return +} + +// This operation aborts the vault locking process if the vault lock is not +// in the Locked state. If the vault lock is in the Locked state when this operation +// is requested, the operation returns an AccessDeniedException error. Aborting +// the vault locking process removes the vault lock policy from the specified +// vault. +// +// A vault lock is put into the InProgress state by calling InitiateVaultLock. +// A vault lock is put into the Locked state by calling CompleteVaultLock. You +// can get the state of a vault lock by calling GetVaultLock. For more information +// about the vault locking process, see Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// For more information about vault lock policies, see Amazon Glacier Access +// Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// +// This operation is idempotent. You can successfully invoke this operation +// multiple times, if the vault lock is in the InProgress state or if there +// is no policy associated with the vault. +func (c *Glacier) AbortVaultLock(input *AbortVaultLockInput) (*AbortVaultLockOutput, error) { + req, out := c.AbortVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToVault = "AddTagsToVault" + +// AddTagsToVaultRequest generates a request for the AddTagsToVault operation. +func (c *Glacier) AddTagsToVaultRequest(input *AddTagsToVaultInput) (req *request.Request, output *AddTagsToVaultOutput) { + op := &request.Operation{ + Name: opAddTagsToVault, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags?operation=add", + } + + if input == nil { + input = &AddTagsToVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToVaultOutput{} + req.Data = output + return +} + +// This operation adds the specified tags to a vault. Each tag is composed of +// a key and a value. Each vault can have up to 10 tags. If your request would +// cause the tag limit for the vault to be exceeded, the operation throws the +// LimitExceededException error. If a tag already exists on the vault under +// a specified key, the existing key value will be overwritten. For more information +// about tags, see Tagging Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +func (c *Glacier) AddTagsToVault(input *AddTagsToVaultInput) (*AddTagsToVaultOutput, error) { + req, out := c.AddTagsToVaultRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +func (c *Glacier) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *ArchiveCreationOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &ArchiveCreationOutput{} + req.Data = output + return +} + +// You call this operation to inform Amazon Glacier that all the archive parts +// have been uploaded and that Amazon Glacier can now assemble the archive from +// the uploaded parts. After assembling and saving the archive to the vault, +// Amazon Glacier returns the URI path of the newly created archive resource. +// Using the URI path, you can then access the archive. After you upload an +// archive, you should save the archive ID returned to retrieve the archive +// at a later point. You can also get the vault inventory to obtain a list of +// archive IDs in a vault. For more information, see InitiateJob. +// +// In the request, you must include the computed SHA256 tree hash of the entire +// archive you have uploaded. For information about computing a SHA256 tree +// hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// On the server side, Amazon Glacier also constructs the SHA256 tree hash of +// the assembled archive. If the values match, Amazon Glacier saves the archive +// to the vault; otherwise, it returns an error, and the operation fails. The +// ListParts operation returns a list of parts uploaded for a specific multipart +// upload. It includes checksum information for each uploaded part that can +// be used to debug a bad checksum issue. +// +// Additionally, Amazon Glacier also checks for any missing content ranges +// when assembling the archive, if missing content ranges are found, Amazon +// Glacier returns an error and the operation fails. +// +// Complete Multipart Upload is an idempotent operation. After your first successful +// complete multipart upload, if you call the operation again within a short +// period, the operation will succeed and return the same archive ID. This is +// useful in the event you experience a network issue that causes an aborted +// connection or receive a 500 server error, in which case you can repeat your +// Complete Multipart Upload request and get the same archive ID without creating +// duplicate archives. Note, however, that after the multipart upload completes, +// you cannot call the List Parts operation and the multipart upload will not +// appear in List Multipart Uploads response, even if idempotent complete is +// possible. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Complete Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*ArchiveCreationOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteVaultLock = "CompleteVaultLock" + +// CompleteVaultLockRequest generates a request for the CompleteVaultLock operation. +func (c *Glacier) CompleteVaultLockRequest(input *CompleteVaultLockInput) (req *request.Request, output *CompleteVaultLockOutput) { + op := &request.Operation{ + Name: opCompleteVaultLock, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy/{lockId}", + } + + if input == nil { + input = &CompleteVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteVaultLockOutput{} + req.Data = output + return +} + +// This operation completes the vault locking process by transitioning the vault +// lock from the InProgress state to the Locked state, which causes the vault +// lock policy to become unchangeable. A vault lock is put into the InProgress +// state by calling InitiateVaultLock. You can obtain the state of the vault +// lock by calling GetVaultLock. For more information about the vault locking +// process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// This operation is idempotent. This request is always successful if the vault +// lock is in the Locked state and the provided lock ID matches the lock ID +// originally used to lock the vault. +// +// If an invalid lock ID is passed in the request when the vault lock is in +// the Locked state, the operation returns an AccessDeniedException error. If +// an invalid lock ID is passed in the request when the vault lock is in the +// InProgress state, the operation throws an InvalidParameter error. +func (c *Glacier) CompleteVaultLock(input *CompleteVaultLockInput) (*CompleteVaultLockOutput, error) { + req, out := c.CompleteVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opCreateVault = "CreateVault" + +// CreateVaultRequest generates a request for the CreateVault operation. +func (c *Glacier) CreateVaultRequest(input *CreateVaultInput) (req *request.Request, output *CreateVaultOutput) { + op := &request.Operation{ + Name: opCreateVault, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &CreateVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVaultOutput{} + req.Data = output + return +} + +// This operation creates a new vault with the specified name. The name of the +// vault must be unique within a region for an AWS account. You can create up +// to 1,000 vaults per account. If you need to create more vaults, contact Amazon +// Glacier. +// +// You must use the following guidelines when naming a vault. +// +// Names can be between 1 and 255 characters long. +// +// Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), +// and '.' (period). +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Creating a Vault +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html) +// and Create Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) CreateVault(input *CreateVaultInput) (*CreateVaultOutput, error) { + req, out := c.CreateVaultRequest(input) + err := req.Send() + return out, err +} + +const opDeleteArchive = "DeleteArchive" + +// DeleteArchiveRequest generates a request for the DeleteArchive operation. +func (c *Glacier) DeleteArchiveRequest(input *DeleteArchiveInput) (req *request.Request, output *DeleteArchiveOutput) { + op := &request.Operation{ + Name: opDeleteArchive, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/archives/{archiveId}", + } + + if input == nil { + input = &DeleteArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteArchiveOutput{} + req.Data = output + return +} + +// This operation deletes an archive from a vault. Subsequent requests to initiate +// a retrieval of this archive will fail. Archive retrievals that are in progress +// for this archive ID may or may not succeed according to the following scenarios: +// +// If the archive retrieval job is actively preparing the data for download +// when Amazon Glacier receives the delete archive request, the archival retrieval +// operation might fail. If the archive retrieval job has successfully prepared +// the archive for download when Amazon Glacier receives the delete archive +// request, you will be able to download the output. This operation is idempotent. +// Attempting to delete an already-deleted archive does not result in an error. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Deleting an Archive +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html) +// and Delete Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteArchive(input *DeleteArchiveInput) (*DeleteArchiveOutput, error) { + req, out := c.DeleteArchiveRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVault = "DeleteVault" + +// DeleteVaultRequest generates a request for the DeleteVault operation. +func (c *Glacier) DeleteVaultRequest(input *DeleteVaultInput) (req *request.Request, output *DeleteVaultOutput) { + op := &request.Operation{ + Name: opDeleteVault, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &DeleteVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVaultOutput{} + req.Data = output + return +} + +// This operation deletes a vault. Amazon Glacier will delete a vault only if +// there are no archives in the vault as of the last inventory and there have +// been no writes to the vault since the last inventory. If either of these +// conditions is not satisfied, the vault deletion fails (that is, the vault +// is not removed) and Amazon Glacier returns an error. You can use DescribeVault +// to return the number of archives in a vault, and you can use Initiate a Job +// (POST jobs) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) +// to initiate a new inventory retrieval for a vault. The inventory contains +// the archive IDs you use to delete archives using Delete Archive (DELETE archive) +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html). +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Deleting a Vault +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html) +// and Delete Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteVault(input *DeleteVaultInput) (*DeleteVaultOutput, error) { + req, out := c.DeleteVaultRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVaultAccessPolicy = "DeleteVaultAccessPolicy" + +// DeleteVaultAccessPolicyRequest generates a request for the DeleteVaultAccessPolicy operation. +func (c *Glacier) DeleteVaultAccessPolicyRequest(input *DeleteVaultAccessPolicyInput) (req *request.Request, output *DeleteVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opDeleteVaultAccessPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &DeleteVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation deletes the access policy associated with the specified vault. +// The operation is eventually consistent; that is, it might take some time +// for Amazon Glacier to completely remove the access policy, and you might +// still see the effect of the policy for a short time after you send the delete +// request. +// +// This operation is idempotent. You can invoke delete multiple times, even +// if there is no policy associated with the vault. For more information about +// vault access policies, see Amazon Glacier Access Control with Vault Access +// Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) DeleteVaultAccessPolicy(input *DeleteVaultAccessPolicyInput) (*DeleteVaultAccessPolicyOutput, error) { + req, out := c.DeleteVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVaultNotifications = "DeleteVaultNotifications" + +// DeleteVaultNotificationsRequest generates a request for the DeleteVaultNotifications operation. +func (c *Glacier) DeleteVaultNotificationsRequest(input *DeleteVaultNotificationsInput) (req *request.Request, output *DeleteVaultNotificationsOutput) { + op := &request.Operation{ + Name: opDeleteVaultNotifications, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &DeleteVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation deletes the notification configuration set for a vault. The +// operation is eventually consistent; that is, it might take some time for +// Amazon Glacier to completely disable the notifications and you might still +// receive some notifications for a short time after you send the delete request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Delete Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteVaultNotifications(input *DeleteVaultNotificationsInput) (*DeleteVaultNotificationsOutput, error) { + req, out := c.DeleteVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeJob = "DescribeJob" + +// DescribeJobRequest generates a request for the DescribeJob operation. +func (c *Glacier) DescribeJobRequest(input *DescribeJobInput) (req *request.Request, output *JobDescription) { + op := &request.Operation{ + Name: opDescribeJob, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs/{jobId}", + } + + if input == nil { + input = &DescribeJobInput{} + } + + req = c.newRequest(op, input, output) + output = &JobDescription{} + req.Data = output + return +} + +// This operation returns information about a job you previously initiated, +// including the job initiation date, the user who initiated the job, the job +// status code/message and the Amazon SNS topic to notify after Amazon Glacier +// completes the job. For more information about initiating a job, see InitiateJob. +// +// This operation enables you to check the status of your job. However, it +// is strongly recommended that you set up an Amazon SNS topic and specify it +// in your initiate job request so that Amazon Glacier can notify the topic +// after it completes the job. +// +// A job ID will not expire for at least 24 hours after Amazon Glacier completes +// the job. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For information about the underlying REST API, go to Working with Archives +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DescribeJob(input *DescribeJobInput) (*JobDescription, error) { + req, out := c.DescribeJobRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVault = "DescribeVault" + +// DescribeVaultRequest generates a request for the DescribeVault operation. +func (c *Glacier) DescribeVaultRequest(input *DescribeVaultInput) (req *request.Request, output *DescribeVaultOutput) { + op := &request.Operation{ + Name: opDescribeVault, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &DescribeVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVaultOutput{} + req.Data = output + return +} + +// This operation returns information about a vault, including the vault's Amazon +// Resource Name (ARN), the date the vault was created, the number of archives +// it contains, and the total size of all the archives in the vault. The number +// of archives and their total size are as of the last inventory generation. +// This means that if you add or remove an archive from a vault, and then immediately +// use Describe Vault, the change in contents will not be immediately reflected. +// If you want to retrieve the latest inventory of the vault, use InitiateJob. +// Amazon Glacier generates vault inventories approximately daily. For more +// information, see Downloading a Vault Inventory in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html). +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Retrieving Vault +// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and Describe Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DescribeVault(input *DescribeVaultInput) (*DescribeVaultOutput, error) { + req, out := c.DescribeVaultRequest(input) + err := req.Send() + return out, err +} + +const opGetDataRetrievalPolicy = "GetDataRetrievalPolicy" + +// GetDataRetrievalPolicyRequest generates a request for the GetDataRetrievalPolicy operation. +func (c *Glacier) GetDataRetrievalPolicyRequest(input *GetDataRetrievalPolicyInput) (req *request.Request, output *GetDataRetrievalPolicyOutput) { + op := &request.Operation{ + Name: opGetDataRetrievalPolicy, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/policies/data-retrieval", + } + + if input == nil { + input = &GetDataRetrievalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDataRetrievalPolicyOutput{} + req.Data = output + return +} + +// This operation returns the current data retrieval policy for the account +// and region specified in the GET request. For more information about data +// retrieval policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +func (c *Glacier) GetDataRetrievalPolicy(input *GetDataRetrievalPolicyInput) (*GetDataRetrievalPolicyOutput, error) { + req, out := c.GetDataRetrievalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetJobOutput = "GetJobOutput" + +// GetJobOutputRequest generates a request for the GetJobOutput operation. +func (c *Glacier) GetJobOutputRequest(input *GetJobOutputInput) (req *request.Request, output *GetJobOutputOutput) { + op := &request.Operation{ + Name: opGetJobOutput, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs/{jobId}/output", + } + + if input == nil { + input = &GetJobOutputInput{} + } + + req = c.newRequest(op, input, output) + output = &GetJobOutputOutput{} + req.Data = output + return +} + +// This operation downloads the output of the job you initiated using InitiateJob. +// Depending on the job type you specified when you initiated the job, the output +// will be either the content of an archive or a vault inventory. +// +// A job ID will not expire for at least 24 hours after Amazon Glacier completes +// the job. That is, you can download the job output within the 24 hours period +// after Amazon Glacier completes the job. +// +// If the job output is large, then you can use the Range request header to +// retrieve a portion of the output. This allows you to download the entire +// output in smaller chunks of bytes. For example, suppose you have 1 GB of +// job output you want to download and you decide to download 128 MB chunks +// of data at a time, which is a total of eight Get Job Output requests. You +// use the following process to download the job output: +// +// Download a 128 MB chunk of output by specifying the appropriate byte range +// using the Range header. +// +// Along with the data, the response includes a SHA256 tree hash of the payload. +// You compute the checksum of the payload on the client and compare it with +// the checksum you received in the response to ensure you received all the +// expected data. +// +// Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each +// time specifying the appropriate byte range. +// +// After downloading all the parts of the job output, you have a list of +// eight checksum values. Compute the tree hash of these values to find the +// checksum of the entire output. Using the DescribeJob API, obtain job information +// of the job that provided you the output. The response includes the checksum +// of the entire archive stored in Amazon Glacier. You compare this value with +// the checksum you computed to ensure you have downloaded the entire archive +// content with no errors. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Downloading +// a Vault Inventory (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html), +// Downloading an Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html), +// and Get Job Output (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html) +func (c *Glacier) GetJobOutput(input *GetJobOutputInput) (*GetJobOutputOutput, error) { + req, out := c.GetJobOutputRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultAccessPolicy = "GetVaultAccessPolicy" + +// GetVaultAccessPolicyRequest generates a request for the GetVaultAccessPolicy operation. +func (c *Glacier) GetVaultAccessPolicyRequest(input *GetVaultAccessPolicyInput) (req *request.Request, output *GetVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opGetVaultAccessPolicy, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &GetVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation retrieves the access-policy subresource set on the vault; +// for more information on setting this subresource, see Set Vault Access Policy +// (PUT access-policy) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html). +// If there is no access policy set on the vault, the operation returns a 404 +// Not found error. For more information about vault access policies, see Amazon +// Glacier Access Control with Vault Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) GetVaultAccessPolicy(input *GetVaultAccessPolicyInput) (*GetVaultAccessPolicyOutput, error) { + req, out := c.GetVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultLock = "GetVaultLock" + +// GetVaultLockRequest generates a request for the GetVaultLock operation. +func (c *Glacier) GetVaultLockRequest(input *GetVaultLockInput) (req *request.Request, output *GetVaultLockOutput) { + op := &request.Operation{ + Name: opGetVaultLock, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &GetVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultLockOutput{} + req.Data = output + return +} + +// This operation retrieves the following attributes from the lock-policy subresource +// set on the specified vault: The vault lock policy set on the vault. +// +// The state of the vault lock, which is either InProgess or Locked. +// +// When the lock ID expires. The lock ID is used to complete the vault locking +// process. +// +// When the vault lock was initiated and put into the InProgress state. +// +// A vault lock is put into the InProgress state by calling InitiateVaultLock. +// A vault lock is put into the Locked state by calling CompleteVaultLock. You +// can abort the vault locking process by calling AbortVaultLock. For more information +// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// If there is no vault lock policy set on the vault, the operation returns +// a 404 Not found error. For more information about vault lock policies, Amazon +// Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +func (c *Glacier) GetVaultLock(input *GetVaultLockInput) (*GetVaultLockOutput, error) { + req, out := c.GetVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultNotifications = "GetVaultNotifications" + +// GetVaultNotificationsRequest generates a request for the GetVaultNotifications operation. +func (c *Glacier) GetVaultNotificationsRequest(input *GetVaultNotificationsInput) (req *request.Request, output *GetVaultNotificationsOutput) { + op := &request.Operation{ + Name: opGetVaultNotifications, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &GetVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation retrieves the notification-configuration subresource of the +// specified vault. +// +// For information about setting a notification configuration on a vault, see +// SetVaultNotifications. If a notification configuration for a vault is not +// set, the operation returns a 404 Not Found error. For more information about +// vault notifications, see Configuring Vault Notifications in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html). +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Get Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) GetVaultNotifications(input *GetVaultNotificationsInput) (*GetVaultNotificationsOutput, error) { + req, out := c.GetVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opInitiateJob = "InitiateJob" + +// InitiateJobRequest generates a request for the InitiateJob operation. +func (c *Glacier) InitiateJobRequest(input *InitiateJobInput) (req *request.Request, output *InitiateJobOutput) { + op := &request.Operation{ + Name: opInitiateJob, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs", + } + + if input == nil { + input = &InitiateJobInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateJobOutput{} + req.Data = output + return +} + +// This operation initiates a job of the specified type. In this release, you +// can initiate a job to retrieve either an archive or a vault inventory (a +// list of archives in a vault). +// +// Retrieving data from Amazon Glacier is a two-step process: +// +// Initiate a retrieval job. +// +// A data retrieval policy can cause your initiate retrieval job request to +// fail with a PolicyEnforcedException exception. For more information about +// data retrieval policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +// For more information about the PolicyEnforcedException exception, see Error +// Responses (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-error-responses.html). +// +// After the job completes, download the bytes. +// +// The retrieval request is executed asynchronously. When you initiate a retrieval +// job, Amazon Glacier creates a job and returns a job ID in the response. When +// Amazon Glacier completes the job, you can get the job output (archive or +// inventory data). For information about getting job output, see GetJobOutput +// operation. +// +// The job must complete before you can get its output. To determine when a +// job is complete, you have the following options: +// +// Use Amazon SNS Notification You can specify an Amazon Simple Notification +// Service (Amazon SNS) topic to which Amazon Glacier can post a notification +// after the job is completed. You can specify an SNS topic per job request. +// The notification is sent only after Amazon Glacier completes the job. In +// addition to specifying an SNS topic per job request, you can configure vault +// notifications for a vault so that job notifications are always sent. For +// more information, see SetVaultNotifications. +// +// Get job details You can make a DescribeJob request to obtain job status +// information while a job is in progress. However, it is more efficient to +// use an Amazon SNS notification to determine when a job is complete. +// +// The information you get via notification is same that you get by calling +// DescribeJob. +// +// If for a specific event, you add both the notification configuration on +// the vault and also specify an SNS topic in your initiate job request, Amazon +// Glacier sends both notifications. For more information, see SetVaultNotifications. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// About the Vault Inventory +// +// Amazon Glacier prepares an inventory for each vault periodically, every +// 24 hours. When you initiate a job for a vault inventory, Amazon Glacier returns +// the last inventory for the vault. The inventory data you get might be up +// to a day or two days old. Also, the initiate inventory job might take some +// time to complete before you can download the vault inventory. So you do not +// want to retrieve a vault inventory for each vault operation. However, in +// some scenarios, you might find the vault inventory useful. For example, when +// you upload an archive, you can provide an archive description but not an +// archive name. Amazon Glacier provides you a unique archive ID, an opaque +// string of characters. So, you might maintain your own database that maps +// archive names to their corresponding Amazon Glacier assigned archive IDs. +// You might find the vault inventory useful in the event you need to reconcile +// information in your database with the actual vault inventory. +// +// Range Inventory Retrieval +// +// You can limit the number of inventory items retrieved by filtering on the +// archive creation date or by setting a limit. +// +// Filtering by Archive Creation Date +// +// You can retrieve inventory items for archives created between StartDate +// and EndDate by specifying values for these parameters in the InitiateJob +// request. Archives created on or after the StartDate and before the EndDate +// will be returned. If you only provide the StartDate without the EndDate, +// you will retrieve the inventory for all archives created on or after the +// StartDate. If you only provide the EndDate without the StartDate, you will +// get back the inventory for all archives created before the EndDate. +// +// Limiting Inventory Items per Retrieval +// +// You can limit the number of inventory items returned by setting the Limit +// parameter in the InitiateJob request. The inventory job output will contain +// inventory items up to the specified Limit. If there are more inventory items +// available, the result is paginated. After a job is complete you can use the +// DescribeJob operation to get a marker that you use in a subsequent InitiateJob +// request. The marker will indicate the starting point to retrieve the next +// set of inventory items. You can page through your entire inventory by repeatedly +// making InitiateJob requests with the marker from the previous DescribeJob +// output, until you get a marker from DescribeJob that returns null, indicating +// that there are no more inventory items available. +// +// You can use the Limit parameter together with the date range parameters. +// +// About Ranged Archive Retrieval +// +// You can initiate an archive retrieval for the whole archive or a range +// of the archive. In the case of ranged archive retrieval, you specify a byte +// range to return or the whole archive. The range specified must be megabyte +// (MB) aligned, that is the range start value must be divisible by 1 MB and +// range end value plus 1 must be divisible by 1 MB or equal the end of the +// archive. If the ranged archive retrieval is not megabyte aligned, this operation +// returns a 400 response. Furthermore, to ensure you get checksum values for +// data you download using Get Job Output API, the range must be tree hash aligned. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Initiate a +// Job (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) +// and Downloading a Vault Inventory (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html) +func (c *Glacier) InitiateJob(input *InitiateJobInput) (*InitiateJobOutput, error) { + req, out := c.InitiateJobRequest(input) + err := req.Send() + return out, err +} + +const opInitiateMultipartUpload = "InitiateMultipartUpload" + +// InitiateMultipartUploadRequest generates a request for the InitiateMultipartUpload operation. +func (c *Glacier) InitiateMultipartUploadRequest(input *InitiateMultipartUploadInput) (req *request.Request, output *InitiateMultipartUploadOutput) { + op := &request.Operation{ + Name: opInitiateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads", + } + + if input == nil { + input = &InitiateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateMultipartUploadOutput{} + req.Data = output + return +} + +// This operation initiates a multipart upload. Amazon Glacier creates a multipart +// upload resource and returns its ID in the response. The multipart upload +// ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart). +// +// When you initiate a multipart upload, you specify the part size in number +// of bytes. The part size must be a megabyte (1024 KB) multiplied by a power +// of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 +// (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum +// is 4 GB. +// +// Every part you upload to this resource (see UploadMultipartPart), except +// the last one, must have the same size. The last one can be the same size +// or smaller. For example, suppose you want to upload a 16.2 MB file. If you +// initiate the multipart upload with a part size of 4 MB, you will upload four +// parts of 4 MB each and one part of 0.2 MB. +// +// You don't need to know the size of the archive when you start a multipart +// upload because Amazon Glacier does not require you to specify the overall +// archive size. +// +// After you complete the multipart upload, Amazon Glacier removes the multipart +// upload resource referenced by the ID. Amazon Glacier also removes the multipart +// upload resource if you cancel the multipart upload or it may be removed if +// there is no activity for a period of 24 hours. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Initiate Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) InitiateMultipartUpload(input *InitiateMultipartUploadInput) (*InitiateMultipartUploadOutput, error) { + req, out := c.InitiateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opInitiateVaultLock = "InitiateVaultLock" + +// InitiateVaultLockRequest generates a request for the InitiateVaultLock operation. +func (c *Glacier) InitiateVaultLockRequest(input *InitiateVaultLockInput) (req *request.Request, output *InitiateVaultLockOutput) { + op := &request.Operation{ + Name: opInitiateVaultLock, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &InitiateVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateVaultLockOutput{} + req.Data = output + return +} + +// This operation initiates the vault locking process by doing the following: +// Installing a vault lock policy on the specified vault. +// +// Setting the lock state of vault lock to InProgress. +// +// Returning a lock ID, which is used to complete the vault locking process. +// +// You can set one vault lock policy for each vault and this policy can +// be up to 20 KB in size. For more information about vault lock policies, see +// Amazon Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// +// You must complete the vault locking process within 24 hours after the vault +// lock enters the InProgress state. After the 24 hour window ends, the lock +// ID expires, the vault automatically exits the InProgress state, and the vault +// lock policy is removed from the vault. You call CompleteVaultLock to complete +// the vault locking process by setting the state of the vault lock to Locked. +// +// After a vault lock is in the Locked state, you cannot initiate a new vault +// lock for the vault. +// +// You can abort the vault locking process by calling AbortVaultLock. You can +// get the state of the vault lock by calling GetVaultLock. For more information +// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// If this operation is called when the vault lock is in the InProgress state, +// the operation returns an AccessDeniedException error. When the vault lock +// is in the InProgress state you must call AbortVaultLock before you can initiate +// a new vault lock policy. +func (c *Glacier) InitiateVaultLock(input *InitiateVaultLockInput) (*InitiateVaultLockOutput, error) { + req, out := c.InitiateVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a request for the ListJobs operation. +func (c *Glacier) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { + op := &request.Operation{ + Name: opListJobs, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsOutput{} + req.Data = output + return +} + +// This operation lists jobs for a vault, including jobs that are in-progress +// and jobs that have recently finished. +// +// Amazon Glacier retains recently completed jobs for a period before deleting +// them; however, it eventually removes completed jobs. The output of completed +// jobs can be retrieved. Retaining completed jobs for a period of time after +// they have completed enables you to get a job output in the event you miss +// the job completion notification or your first attempt to download it fails. +// For example, suppose you start an archive retrieval job to download an archive. +// After the job completes, you start to download the archive but encounter +// a network error. In this scenario, you can retry and download the archive +// while the job exists. +// +// To retrieve an archive or retrieve a vault inventory from Amazon Glacier, +// you first initiate a job, and after the job completes, you download the data. +// For an archive retrieval, the output is the archive data, and for an inventory +// retrieval, it is the inventory list. The List Job operation returns a list +// of these jobs sorted by job initiation time. +// +// This List Jobs operation supports pagination. By default, this operation +// returns up to 1,000 jobs in the response. You should always check the response +// for a marker at which to continue the list; if there are no more items the +// marker is null. To return a list of jobs that begins at a specific job, set +// the marker request parameter to the value you obtained from a previous List +// Jobs request. You can also limit the number of jobs returned in the response +// by specifying the limit parameter in the request. +// +// Additionally, you can filter the jobs list returned by specifying an optional +// statuscode (InProgress, Succeeded, or Failed) and completed (true, false) +// parameter. The statuscode allows you to specify that only jobs that match +// a specified status are returned. The completed parameter allows you to specify +// that only jobs in a specific completion state are returned. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For the underlying REST API, go to List Jobs (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html) +func (c *Glacier) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListJobsPages(input *ListJobsInput, fn func(p *ListJobsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsOutput), lastPage) + }) +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +func (c *Glacier) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads for the specified vault. +// An in-progress multipart upload is a multipart upload that has been initiated +// by an InitiateMultipartUpload request, but has not yet been completed or +// aborted. The list returned in the List Multipart Upload response has no guaranteed +// order. +// +// The List Multipart Uploads operation supports pagination. By default, this +// operation returns up to 1,000 multipart uploads in the response. You should +// always check the response for a marker at which to continue the list; if +// there are no more items the marker is null. To return a list of multipart +// uploads that begins at a specific upload, set the marker request parameter +// to the value you obtained from a previous List Multipart Upload request. +// You can also limit the number of uploads returned in the response by specifying +// the limit parameter in the request. +// +// Note the difference between this operation and listing parts (ListParts). +// The List Multipart Uploads operation lists all multipart uploads for a vault +// and does not require a multipart upload ID. The List Parts operation requires +// a multipart upload ID since parts are associated with a single upload. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Multipart Uploads (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a request for the ListParts operation. +func (c *Glacier) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// This operation lists the parts of an archive that have been uploaded in a +// specific multipart upload. You can make this request at any time during an +// in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. +// List Parts returns an error for completed uploads. The list returned in the +// List Parts response is sorted by part range. +// +// The List Parts operation supports pagination. By default, this operation +// returns up to 1,000 uploaded parts in the response. You should always check +// the response for a marker at which to continue the list; if there are no +// more items the marker is null. To return a list of parts that begins at a +// specific part, set the marker request parameter to the value you obtained +// from a previous List Parts request. You can also limit the number of parts +// returned in the response by specifying the limit parameter in the request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Parts (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opListTagsForVault = "ListTagsForVault" + +// ListTagsForVaultRequest generates a request for the ListTagsForVault operation. +func (c *Glacier) ListTagsForVaultRequest(input *ListTagsForVaultInput) (req *request.Request, output *ListTagsForVaultOutput) { + op := &request.Operation{ + Name: opListTagsForVault, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags", + } + + if input == nil { + input = &ListTagsForVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForVaultOutput{} + req.Data = output + return +} + +// This operation lists all the tags attached to a vault. The operation returns +// an empty map if there are no tags. For more information about tags, see Tagging +// Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +func (c *Glacier) ListTagsForVault(input *ListTagsForVaultInput) (*ListTagsForVaultOutput, error) { + req, out := c.ListTagsForVaultRequest(input) + err := req.Send() + return out, err +} + +const opListVaults = "ListVaults" + +// ListVaultsRequest generates a request for the ListVaults operation. +func (c *Glacier) ListVaultsRequest(input *ListVaultsInput) (req *request.Request, output *ListVaultsOutput) { + op := &request.Operation{ + Name: opListVaults, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListVaultsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVaultsOutput{} + req.Data = output + return +} + +// This operation lists all vaults owned by the calling user's account. The +// list returned in the response is ASCII-sorted by vault name. +// +// By default, this operation returns up to 1,000 items. If there are more +// vaults to list, the response marker field contains the vault Amazon Resource +// Name (ARN) at which to continue the list with a new List Vaults request; +// otherwise, the marker field is null. To return a list of vaults that begins +// at a specific vault, set the marker request parameter to the vault ARN you +// obtained from a previous List Vaults request. You can also limit the number +// of vaults returned in the response by specifying the limit parameter in the +// request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Retrieving Vault +// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and List Vaults (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListVaults(input *ListVaultsInput) (*ListVaultsOutput, error) { + req, out := c.ListVaultsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListVaultsPages(input *ListVaultsInput, fn func(p *ListVaultsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVaultsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVaultsOutput), lastPage) + }) +} + +const opRemoveTagsFromVault = "RemoveTagsFromVault" + +// RemoveTagsFromVaultRequest generates a request for the RemoveTagsFromVault operation. +func (c *Glacier) RemoveTagsFromVaultRequest(input *RemoveTagsFromVaultInput) (req *request.Request, output *RemoveTagsFromVaultOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromVault, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags?operation=remove", + } + + if input == nil { + input = &RemoveTagsFromVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromVaultOutput{} + req.Data = output + return +} + +// This operation removes one or more tags from the set of tags attached to +// a vault. For more information about tags, see Tagging Amazon Glacier Resources +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). This +// operation is idempotent. The operation will be successful, even if there +// are no tags attached to the vault. +func (c *Glacier) RemoveTagsFromVault(input *RemoveTagsFromVaultInput) (*RemoveTagsFromVaultOutput, error) { + req, out := c.RemoveTagsFromVaultRequest(input) + err := req.Send() + return out, err +} + +const opSetDataRetrievalPolicy = "SetDataRetrievalPolicy" + +// SetDataRetrievalPolicyRequest generates a request for the SetDataRetrievalPolicy operation. +func (c *Glacier) SetDataRetrievalPolicyRequest(input *SetDataRetrievalPolicyInput) (req *request.Request, output *SetDataRetrievalPolicyOutput) { + op := &request.Operation{ + Name: opSetDataRetrievalPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/policies/data-retrieval", + } + + if input == nil { + input = &SetDataRetrievalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SetDataRetrievalPolicyOutput{} + req.Data = output + return +} + +// This operation sets and then enacts a data retrieval policy in the region +// specified in the PUT request. You can set one policy per region for an AWS +// account. The policy is enacted within a few minutes of a successful PUT operation. +// +// The set policy operation does not affect retrieval jobs that were in progress +// before the policy was enacted. For more information about data retrieval +// policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +func (c *Glacier) SetDataRetrievalPolicy(input *SetDataRetrievalPolicyInput) (*SetDataRetrievalPolicyOutput, error) { + req, out := c.SetDataRetrievalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSetVaultAccessPolicy = "SetVaultAccessPolicy" + +// SetVaultAccessPolicyRequest generates a request for the SetVaultAccessPolicy operation. +func (c *Glacier) SetVaultAccessPolicyRequest(input *SetVaultAccessPolicyInput) (req *request.Request, output *SetVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opSetVaultAccessPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &SetVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SetVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation configures an access policy for a vault and will overwrite +// an existing policy. To configure a vault access policy, send a PUT request +// to the access-policy subresource of the vault. An access policy is specific +// to a vault and is also called a vault subresource. You can set one access +// policy per vault and the policy can be up to 20 KB in size. For more information +// about vault access policies, see Amazon Glacier Access Control with Vault +// Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) SetVaultAccessPolicy(input *SetVaultAccessPolicyInput) (*SetVaultAccessPolicyOutput, error) { + req, out := c.SetVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSetVaultNotifications = "SetVaultNotifications" + +// SetVaultNotificationsRequest generates a request for the SetVaultNotifications operation. +func (c *Glacier) SetVaultNotificationsRequest(input *SetVaultNotificationsInput) (req *request.Request, output *SetVaultNotificationsOutput) { + op := &request.Operation{ + Name: opSetVaultNotifications, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &SetVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &SetVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation configures notifications that will be sent when specific events +// happen to a vault. By default, you don't get any notifications. +// +// To configure vault notifications, send a PUT request to the notification-configuration +// subresource of the vault. The request should include a JSON document that +// provides an Amazon SNS topic and specific events for which you want Amazon +// Glacier to send notifications to the topic. +// +// Amazon SNS topics must grant permission to the vault to be allowed to publish +// notifications to the topic. You can configure a vault to publish a notification +// for the following vault events: +// +// ArchiveRetrievalCompleted This event occurs when a job that was initiated +// for an archive retrieval is completed (InitiateJob). The status of the completed +// job can be "Succeeded" or "Failed". The notification sent to the SNS topic +// is the same output as returned from DescribeJob. InventoryRetrievalCompleted +// This event occurs when a job that was initiated for an inventory retrieval +// is completed (InitiateJob). The status of the completed job can be "Succeeded" +// or "Failed". The notification sent to the SNS topic is the same output as +// returned from DescribeJob. An AWS account has full permission to perform +// all operations (actions). However, AWS Identity and Access Management (IAM) +// users don't have any permissions by default. You must grant them explicit +// permission to perform specific actions. For more information, see Access +// Control Using AWS Identity and Access Management (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Set Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) SetVaultNotifications(input *SetVaultNotificationsInput) (*SetVaultNotificationsOutput, error) { + req, out := c.SetVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opUploadArchive = "UploadArchive" + +// UploadArchiveRequest generates a request for the UploadArchive operation. +func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request.Request, output *ArchiveCreationOutput) { + op := &request.Operation{ + Name: opUploadArchive, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/archives", + } + + if input == nil { + input = &UploadArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &ArchiveCreationOutput{} + req.Data = output + return +} + +// This operation adds an archive to a vault. This is a synchronous operation, +// and for a successful upload, your data is durably persisted. Amazon Glacier +// returns the archive ID in the x-amz-archive-id header of the response. +// +// You must use the archive ID to access your data in Amazon Glacier. After +// you upload an archive, you should save the archive ID returned so that you +// can retrieve or delete the archive later. Besides saving the archive ID, +// you can also index it and give it a friendly name to allow for better searching. +// You can also use the optional archive description field to specify how the +// archive is referred to in an external index of archives, such as you might +// create in Amazon DynamoDB. You can also get the vault inventory to obtain +// a list of archive IDs in a vault. For more information, see InitiateJob. +// +// You must provide a SHA256 tree hash of the data you are uploading. For information +// about computing a SHA256 tree hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// +// You can optionally specify an archive description of up to 1,024 printable +// ASCII characters. You can get the archive description when you either retrieve +// the archive or get the vault inventory. For more information, see InitiateJob. +// Amazon Glacier does not interpret the description in any way. An archive +// description does not need to be unique. You cannot use the description to +// retrieve or sort the archive list. +// +// Archives are immutable. After you upload an archive, you cannot edit the +// archive or its description. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading an +// Archive in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html) +// and Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) UploadArchive(input *UploadArchiveInput) (*ArchiveCreationOutput, error) { + req, out := c.UploadArchiveRequest(input) + err := req.Send() + return out, err +} + +const opUploadMultipartPart = "UploadMultipartPart" + +// UploadMultipartPartRequest generates a request for the UploadMultipartPart operation. +func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (req *request.Request, output *UploadMultipartPartOutput) { + op := &request.Operation{ + Name: opUploadMultipartPart, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &UploadMultipartPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadMultipartPartOutput{} + req.Data = output + return +} + +// This operation uploads a part of an archive. You can upload archive parts +// in any order. You can also upload them in parallel. You can upload up to +// 10,000 parts for a multipart upload. +// +// Amazon Glacier rejects your upload part request if any of the following +// conditions is true: +// +// SHA256 tree hash does not matchTo ensure that part data is not corrupted +// in transmission, you compute a SHA256 tree hash of the part and include it +// in your request. Upon receiving the part data, Amazon Glacier also computes +// a SHA256 tree hash. If these hash values don't match, the operation fails. +// For information about computing a SHA256 tree hash, see Computing Checksums +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// +// Part size does not matchThe size of each part except the last must match +// the size specified in the corresponding InitiateMultipartUpload request. +// The size of the last part must be the same size as, or smaller than, the +// specified size. +// +// If you upload a part whose size is smaller than the part size you specified +// in your initiate multipart upload request and that part is not the last part, +// then the upload part request will succeed. However, the subsequent Complete +// Multipart Upload request will fail. +// +// Range does not alignThe byte range value in the request does not align +// with the part size specified in the corresponding initiate request. For example, +// if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes +// (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. +// However, if you set a range value of 2 MB to 6 MB, the range does not align +// with the part size and the upload will fail. This operation is idempotent. +// If you upload the same part multiple times, the data included in the most +// recent request overwrites the previously uploaded data. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Upload Part (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) UploadMultipartPart(input *UploadMultipartPartInput) (*UploadMultipartPartOutput, error) { + req, out := c.UploadMultipartPartRequest(input) + err := req.Send() + return out, err +} + +// Provides options to abort a multipart upload identified by the upload ID. +// +// For information about the underlying REST API, go to Abort Multipart Upload +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html). +// For conceptual information, go to Working with Archives in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The upload ID of the multipart upload to delete. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// The input values for AbortVaultLock. +type AbortVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortVaultLockInput) GoString() string { + return s.String() +} + +type AbortVaultLockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortVaultLockOutput) GoString() string { + return s.String() +} + +// The input values for AddTagsToVault. +type AddTagsToVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The tags to add to the vault. Each tag is composed of a key and a value. + // The value can be an empty string. + Tags map[string]*string `type:"map"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddTagsToVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToVaultInput) GoString() string { + return s.String() +} + +type AddTagsToVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToVaultOutput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +// +// For information about the underlying REST API, go to Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html). +// For conceptual information, go to Working with Archives in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +type ArchiveCreationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the archive. This value is also included as part of the location. + ArchiveId *string `location:"header" locationName:"x-amz-archive-id" type:"string"` + + // The checksum of the archive computed by Amazon Glacier. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The relative URI path of the newly added archive resource. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s ArchiveCreationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArchiveCreationOutput) GoString() string { + return s.String() +} + +// Provides options to complete a multipart upload operation. This informs Amazon +// Glacier that all the archive parts have been uploaded and Amazon Glacier +// can now assemble the archive from the uploaded parts. After assembling and +// saving the archive to the vault, Amazon Glacier returns the URI path of the +// newly created archive resource. +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The total size, in bytes, of the entire archive. This value should be the + // sum of all the sizes of the individual parts that you uploaded. + ArchiveSize *string `location:"header" locationName:"x-amz-archive-size" type:"string"` + + // The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 + // tree hash of the individual parts. If the value you specify in the request + // does not match the SHA256 tree hash of the final assembled archive as computed + // by Amazon Glacier, Amazon Glacier returns an error and the request fails. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// The input values for CompleteVaultLock. +type CompleteVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The lockId value is the lock ID obtained from a InitiateVaultLock request. + LockId *string `location:"uri" locationName:"lockId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteVaultLockInput) GoString() string { + return s.String() +} + +type CompleteVaultLockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CompleteVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteVaultLockOutput) GoString() string { + return s.String() +} + +// Provides options to create a vault. +type CreateVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVaultInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type CreateVaultOutput struct { + _ struct{} `type:"structure"` + + // The URI of the vault that was created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVaultOutput) GoString() string { + return s.String() +} + +// Data retrieval policy. +type DataRetrievalPolicy struct { + _ struct{} `type:"structure"` + + // The policy rule. Although this is a list type, currently there must be only + // one rule, which contains a Strategy field and optionally a BytesPerHour field. + Rules []*DataRetrievalRule `type:"list"` +} + +// String returns the string representation +func (s DataRetrievalPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRetrievalPolicy) GoString() string { + return s.String() +} + +// Data retrieval policy rule. +type DataRetrievalRule struct { + _ struct{} `type:"structure"` + + // The maximum number of bytes that can be retrieved in an hour. + // + // This field is required only if the value of the Strategy field is BytesPerHour. + // Your PUT operation will be rejected if the Strategy field is not set to BytesPerHour + // and you set this field. + BytesPerHour *int64 `type:"long"` + + // The type of data retrieval policy to set. + // + // Valid values: BytesPerHour|FreeTier|None + Strategy *string `type:"string"` +} + +// String returns the string representation +func (s DataRetrievalRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRetrievalRule) GoString() string { + return s.String() +} + +// Provides options for deleting an archive from an Amazon Glacier vault. +type DeleteArchiveInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The ID of the archive to delete. + ArchiveId *string `location:"uri" locationName:"archiveId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteArchiveInput) GoString() string { + return s.String() +} + +type DeleteArchiveOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteArchiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteArchiveOutput) GoString() string { + return s.String() +} + +// DeleteVaultAccessPolicy input. +type DeleteVaultAccessPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultAccessPolicyInput) GoString() string { + return s.String() +} + +type DeleteVaultAccessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// Provides options for deleting a vault from Amazon Glacier. +type DeleteVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultInput) GoString() string { + return s.String() +} + +// Provides options for deleting a vault notification configuration from an +// Amazon Glacier vault. +type DeleteVaultNotificationsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultNotificationsInput) GoString() string { + return s.String() +} + +type DeleteVaultNotificationsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultNotificationsOutput) GoString() string { + return s.String() +} + +type DeleteVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving a job description. +type DescribeJobInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The ID of the job to describe. + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobInput) GoString() string { + return s.String() +} + +// Provides options for retrieving metadata for a specific vault in Amazon Glacier. +type DescribeVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVaultInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type DescribeVaultOutput struct { + _ struct{} `type:"structure"` + + // The UTC date when the vault was created. A string representation of ISO 8601 + // date format, for example, "2012-03-20T17:03:43.221Z". + CreationDate *string `type:"string"` + + // The UTC date when Amazon Glacier completed the last vault inventory. A string + // representation of ISO 8601 date format, for example, "2012-03-20T17:03:43.221Z". + LastInventoryDate *string `type:"string"` + + // The number of archives in the vault as of the last inventory date. This field + // will return null if an inventory has not yet run on the vault, for example, + // if you just created the vault. + NumberOfArchives *int64 `type:"long"` + + // Total size, in bytes, of the archives in the vault as of the last inventory + // date. This field will return null if an inventory has not yet run on the + // vault, for example, if you just created the vault. + SizeInBytes *int64 `type:"long"` + + // The Amazon Resource Name (ARN) of the vault. + VaultARN *string `type:"string"` + + // The name of the vault. + VaultName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVaultOutput) GoString() string { + return s.String() +} + +// Input for GetDataRetrievalPolicy. +type GetDataRetrievalPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDataRetrievalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataRetrievalPolicyInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to the GetDataRetrievalPolicy request. +type GetDataRetrievalPolicyOutput struct { + _ struct{} `type:"structure"` + + // Contains the returned data retrieval policy in JSON format. + Policy *DataRetrievalPolicy `type:"structure"` +} + +// String returns the string representation +func (s GetDataRetrievalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataRetrievalPolicyOutput) GoString() string { + return s.String() +} + +// Provides options for downloading output of an Amazon Glacier job. +type GetJobOutputInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The job ID whose data is downloaded. + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // The range of bytes to retrieve from the output. For example, if you want + // to download the first 1,048,576 bytes, specify "Range: bytes=0-1048575". + // By default, this operation downloads the entire output. + Range *string `location:"header" locationName:"Range" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutputInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type GetJobOutputOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates the range units accepted. For more information, go to RFC2616 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). + AcceptRanges *string `location:"header" locationName:"Accept-Ranges" type:"string"` + + // The description of an archive. + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The job data, either archive data or inventory data. + Body io.ReadCloser `locationName:"body" type:"blob"` + + // The checksum of the data in the response. This header is returned only when + // retrieving the output for an archive retrieval job. Furthermore, this header + // appears only under the following conditions: You get the entire range of + // the archive. You request a range to return of the archive that starts and + // ends on a multiple of 1 MB. For example, if you have an 3.1 MB archive and + // you specify a range to return that starts at 1 MB and ends at 2 MB, then + // the x-amz-sha256-tree-hash is returned as a response header. You request + // a range of the archive to return that starts on a multiple of 1 MB and goes + // to the end of the archive. For example, if you have a 3.1 MB archive and + // you specify a range that starts at 2 MB and ends at 3.1 MB (the end of the + // archive), then the x-amz-sha256-tree-hash is returned as a response header. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The range of bytes returned by Amazon Glacier. If only partial output is + // downloaded, the response provides the range of bytes Amazon Glacier returned. + // For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // The Content-Type depends on whether the job output is an archive or a vault + // inventory. For archive data, the Content-Type is application/octet-stream. + // For vault inventory, if you requested CSV format when you initiated the job, + // the Content-Type is text/csv. Otherwise, by default, vault inventory is returned + // as JSON, and the Content-Type is application/json. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The HTTP response code for a job output request. The value depends on whether + // a range was specified in the request. + Status *int64 `location:"statusCode" locationName:"status" type:"integer"` +} + +// String returns the string representation +func (s GetJobOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutputOutput) GoString() string { + return s.String() +} + +// Input for GetVaultAccessPolicy. +type GetVaultAccessPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultAccessPolicyInput) GoString() string { + return s.String() +} + +// Output for GetVaultAccessPolicy. +type GetVaultAccessPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // Contains the returned vault access policy as a JSON string. + Policy *VaultAccessPolicy `locationName:"policy" type:"structure"` +} + +// String returns the string representation +func (s GetVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// The input values for GetVaultLock. +type GetVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultLockInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type GetVaultLockOutput struct { + _ struct{} `type:"structure"` + + // The UTC date and time at which the vault lock was put into the InProgress + // state. + CreationDate *string `type:"string"` + + // The UTC date and time at which the lock ID expires. This value can be null + // if the vault lock is in a Locked state. + ExpirationDate *string `type:"string"` + + // The vault lock policy as a JSON string, which uses "\" as an escape character. + Policy *string `type:"string"` + + // The state of the vault lock. InProgress or Locked. + State *string `type:"string"` +} + +// String returns the string representation +func (s GetVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultLockOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving the notification configuration set on an +// Amazon Glacier vault. +type GetVaultNotificationsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultNotificationsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type GetVaultNotificationsOutput struct { + _ struct{} `type:"structure" payload:"VaultNotificationConfig"` + + // Returns the notification configuration set on the vault. + VaultNotificationConfig *VaultNotificationConfig `locationName:"vaultNotificationConfig" type:"structure"` +} + +// String returns the string representation +func (s GetVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultNotificationsOutput) GoString() string { + return s.String() +} + +// Provides options for initiating an Amazon Glacier job. +type InitiateJobInput struct { + _ struct{} `type:"structure" payload:"JobParameters"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Provides options for specifying job information. + JobParameters *JobParameters `locationName:"jobParameters" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateJobInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type InitiateJobOutput struct { + _ struct{} `type:"structure"` + + // The ID of the job. + JobId *string `location:"header" locationName:"x-amz-job-id" type:"string"` + + // The relative URI path of the job. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s InitiateJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateJobOutput) GoString() string { + return s.String() +} + +// Provides options for initiating a multipart upload to an Amazon Glacier vault. +type InitiateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The archive description that you are uploading in parts. + // + // The part size must be a megabyte (1024 KB) multiplied by a power of 2, for + // example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and + // so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 + // MB). + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The size of each part except the last, in bytes. The last part can be smaller + // than this part size. + PartSize *string `location:"header" locationName:"x-amz-part-size" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateMultipartUploadInput) GoString() string { + return s.String() +} + +// The Amazon Glacier response to your request. +type InitiateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The relative URI path of the multipart upload ID Amazon Glacier created. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The ID of the multipart upload. This value is also included as part of the + // location. + UploadId *string `location:"header" locationName:"x-amz-multipart-upload-id" type:"string"` +} + +// String returns the string representation +func (s InitiateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateMultipartUploadOutput) GoString() string { + return s.String() +} + +// The input values for InitiateVaultLock. +type InitiateVaultLockInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The vault lock policy as a JSON string, which uses "\" as an escape character. + Policy *VaultLockPolicy `locationName:"policy" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateVaultLockInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type InitiateVaultLockOutput struct { + _ struct{} `type:"structure"` + + // The lock ID, which is used to complete the vault locking process. + LockId *string `location:"header" locationName:"x-amz-lock-id" type:"string"` +} + +// String returns the string representation +func (s InitiateVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateVaultLockOutput) GoString() string { + return s.String() +} + +// Describes the options for a range inventory retrieval job. +type InventoryRetrievalJobDescription struct { + _ struct{} `type:"structure"` + + // The end of the date range in UTC for vault inventory retrieval that includes + // archives created before this date. A string representation of ISO 8601 date + // format, for example, 2013-03-20T17:03:43Z. + EndDate *string `type:"string"` + + // The output format for the vault inventory list, which is set by the InitiateJob + // request when initiating a job to retrieve a vault inventory. Valid values + // are "CSV" and "JSON". + Format *string `type:"string"` + + // Specifies the maximum number of inventory items returned per vault inventory + // retrieval request. This limit is set when initiating the job with the a InitiateJob + // request. + Limit *string `type:"string"` + + // An opaque string that represents where to continue pagination of the vault + // inventory retrieval results. You use the marker in a new InitiateJob request + // to obtain additional inventory items. If there are no more inventory items, + // this value is null. For more information, see Range Inventory Retrieval + // (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html#api-initiate-job-post-vault-inventory-list-filtering). + Marker *string `type:"string"` + + // The start of the date range in UTC for vault inventory retrieval that includes + // archives created on or after this date. A string representation of ISO 8601 + // date format, for example, 2013-03-20T17:03:43Z. + StartDate *string `type:"string"` +} + +// String returns the string representation +func (s InventoryRetrievalJobDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryRetrievalJobDescription) GoString() string { + return s.String() +} + +// Provides options for specifying a range inventory retrieval job. +type InventoryRetrievalJobInput struct { + _ struct{} `type:"structure"` + + // The end of the date range in UTC for vault inventory retrieval that includes + // archives created before this date. A string representation of ISO 8601 date + // format, for example, 2013-03-20T17:03:43Z. + EndDate *string `type:"string"` + + // Specifies the maximum number of inventory items returned per vault inventory + // retrieval request. Valid values are greater than or equal to 1. + Limit *string `type:"string"` + + // An opaque string that represents where to continue pagination of the vault + // inventory retrieval results. You use the marker in a new InitiateJob request + // to obtain additional inventory items. If there are no more inventory items, + // this value is null. + Marker *string `type:"string"` + + // The start of the date range in UTC for vault inventory retrieval that includes + // archives created on or after this date. A string representation of ISO 8601 + // date format, for example, 2013-03-20T17:03:43Z. + StartDate *string `type:"string"` +} + +// String returns the string representation +func (s InventoryRetrievalJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryRetrievalJobInput) GoString() string { + return s.String() +} + +// Describes an Amazon Glacier job. +type JobDescription struct { + _ struct{} `type:"structure"` + + // The job type. It is either ArchiveRetrieval or InventoryRetrieval. + Action *string `type:"string" enum:"ActionCode"` + + // For an ArchiveRetrieval job, this is the archive ID requested for download. + // Otherwise, this field is null. + ArchiveId *string `type:"string"` + + // The SHA256 tree hash of the entire archive for an archive retrieval. For + // inventory retrieval jobs, this field is null. + ArchiveSHA256TreeHash *string `type:"string"` + + // For an ArchiveRetrieval job, this is the size in bytes of the archive being + // requested for download. For the InventoryRetrieval job, the value is null. + ArchiveSizeInBytes *int64 `type:"long"` + + // The job status. When a job is completed, you get the job's output. + Completed *bool `type:"boolean"` + + // The UTC time that the archive retrieval request completed. While the job + // is in progress, the value will be null. + CompletionDate *string `type:"string"` + + // The UTC date when the job was created. A string representation of ISO 8601 + // date format, for example, "2012-03-20T17:03:43.221Z". + CreationDate *string `type:"string"` + + // Parameters used for range inventory retrieval. + InventoryRetrievalParameters *InventoryRetrievalJobDescription `type:"structure"` + + // For an InventoryRetrieval job, this is the size in bytes of the inventory + // requested for download. For the ArchiveRetrieval job, the value is null. + InventorySizeInBytes *int64 `type:"long"` + + // The job description you provided when you initiated the job. + JobDescription *string `type:"string"` + + // An opaque string that identifies an Amazon Glacier job. + JobId *string `type:"string"` + + // The retrieved byte range for archive retrieval jobs in the form "StartByteValue-EndByteValue" + // If no range was specified in the archive retrieval, then the whole archive + // is retrieved and StartByteValue equals 0 and EndByteValue equals the size + // of the archive minus 1. For inventory retrieval jobs this field is null. + RetrievalByteRange *string `type:"string"` + + // For an ArchiveRetrieval job, it is the checksum of the archive. Otherwise, + // the value is null. + // + // The SHA256 tree hash value for the requested range of an archive. If the + // Initiate a Job request for an archive specified a tree-hash aligned range, + // then this field returns a value. + // + // For the specific case when the whole archive is retrieved, this value is + // the same as the ArchiveSHA256TreeHash value. + // + // This field is null in the following situations: Archive retrieval jobs + // that specify a range that is not tree-hash aligned. + // + // Archival jobs that specify a range that is equal to the whole archive + // and the job status is InProgress. + // + // Inventory jobs. + SHA256TreeHash *string `type:"string"` + + // An Amazon Simple Notification Service (Amazon SNS) topic that receives notification. + SNSTopic *string `type:"string"` + + // The status code can be InProgress, Succeeded, or Failed, and indicates the + // status of the job. + StatusCode *string `type:"string" enum:"StatusCode"` + + // A friendly message that describes the job status. + StatusMessage *string `type:"string"` + + // The Amazon Resource Name (ARN) of the vault from which the archive retrieval + // was requested. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s JobDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobDescription) GoString() string { + return s.String() +} + +// Provides options for defining a job. +type JobParameters struct { + _ struct{} `type:"structure"` + + // The ID of the archive that you want to retrieve. This field is required only + // if Type is set to archive-retrieval. An error occurs if you specify this + // request parameter for an inventory retrieval job request. + ArchiveId *string `type:"string"` + + // The optional description for the job. The description must be less than or + // equal to 1,024 bytes. The allowable characters are 7-bit ASCII without control + // codes-specifically, ASCII values 32-126 decimal or 0x20-0x7E hexadecimal. + Description *string `type:"string"` + + // When initiating a job to retrieve a vault inventory, you can optionally add + // this parameter to your request to specify the output format. If you are initiating + // an inventory job and do not specify a Format field, JSON is the default format. + // Valid values are "CSV" and "JSON". + Format *string `type:"string"` + + // Input parameters used for range inventory retrieval. + InventoryRetrievalParameters *InventoryRetrievalJobInput `type:"structure"` + + // The byte range to retrieve for an archive retrieval. in the form "StartByteValue-EndByteValue" + // If not specified, the whole archive is retrieved. If specified, the byte + // range must be megabyte (1024*1024) aligned which means that StartByteValue + // must be divisible by 1 MB and EndByteValue plus 1 must be divisible by 1 + // MB or be the end of the archive specified as the archive byte size value + // minus 1. If RetrievalByteRange is not megabyte aligned, this operation returns + // a 400 response. + // + // An error occurs if you specify this field for an inventory retrieval job + // request. + RetrievalByteRange *string `type:"string"` + + // The Amazon SNS topic ARN to which Amazon Glacier sends a notification when + // the job is completed and the output is ready for you to download. The specified + // topic publishes the notification to its subscribers. The SNS topic must exist. + SNSTopic *string `type:"string"` + + // The job type. You can initiate a job to retrieve an archive or get an inventory + // of a vault. Valid values are "archive-retrieval" and "inventory-retrieval". + Type *string `type:"string"` +} + +// String returns the string representation +func (s JobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobParameters) GoString() string { + return s.String() +} + +// Provides options for retrieving a job list for an Amazon Glacier vault. +type ListJobsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the state of the jobs to return. You can specify true or false. + Completed *string `location:"querystring" locationName:"completed" type:"string"` + + // Specifies that the response be limited to the specified number of items or + // fewer. If not specified, the List Jobs operation returns up to 1,000 jobs. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the job at which + // the listing of jobs should begin. Get the marker value from a previous List + // Jobs response. You need only include the marker if you are continuing the + // pagination of results started in a previous List Jobs request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specifies the type of job status to return. You can specify the following + // values: "InProgress", "Succeeded", or "Failed". + Statuscode *string `location:"querystring" locationName:"statuscode" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + // A list of job objects. Each job object contains metadata describing the job. + JobList []*JobDescription `type:"list"` + + // An opaque string that represents where to continue pagination of the results. + // You use this value in a new List Jobs request to obtain more jobs in the + // list. If there are no more jobs, this value is null. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving list of in-progress multipart uploads for +// an Amazon Glacier vault. +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the maximum number of uploads returned in the response body. If + // this value is not specified, the List Uploads operation returns up to 1,000 + // uploads. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the upload at + // which the listing of uploads should begin. Get the marker value from a previous + // List Uploads response. You need only include the marker if you are continuing + // the pagination of results started in a previous List Uploads request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // An opaque string that represents where to continue pagination of the results. + // You use the marker in a new List Multipart Uploads request to obtain more + // uploads in the list. If there are no more uploads, this value is null. + Marker *string `type:"string"` + + // A list of in-progress multipart uploads. + UploadsList []*UploadListElement `type:"list"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving a list of parts of an archive that have been +// uploaded in a specific multipart upload. +type ListPartsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the maximum number of parts returned in the response body. If this + // value is not specified, the List Parts operation returns up to 1,000 uploads. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the part at which + // the listing of parts should begin. Get the marker value from the response + // of a previous List Parts response. You need only include the marker if you + // are continuing the pagination of results started in a previous List Parts + // request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // The description of the archive that was specified in the Initiate Multipart + // Upload request. + ArchiveDescription *string `type:"string"` + + // The UTC time at which the multipart upload was initiated. + CreationDate *string `type:"string"` + + // An opaque string that represents where to continue pagination of the results. + // You use the marker in a new List Parts request to obtain more jobs in the + // list. If there are no more parts, this value is null. + Marker *string `type:"string"` + + // The ID of the upload to which the parts are associated. + MultipartUploadId *string `type:"string"` + + // The part size in bytes. + PartSizeInBytes *int64 `type:"long"` + + // A list of the part sizes of the multipart upload. + Parts []*PartListElement `type:"list"` + + // The Amazon Resource Name (ARN) of the vault to which the multipart upload + // was initiated. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// The input value for ListTagsForVaultInput. +type ListTagsForVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForVaultInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListTagsForVaultOutput struct { + _ struct{} `type:"structure"` + + // The tags attached to the vault. Each tag is composed of a key and a value. + Tags map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ListTagsForVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForVaultOutput) GoString() string { + return s.String() +} + +// Provides options to retrieve the vault list owned by the calling user's account. +// The list provides metadata information for each vault. +type ListVaultsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the List Vaults operation returns up to 1,000 items. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // A string used for pagination. The marker specifies the vault ARN after which + // the listing of vaults should begin. + Marker *string `location:"querystring" locationName:"marker" type:"string"` +} + +// String returns the string representation +func (s ListVaultsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVaultsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListVaultsOutput struct { + _ struct{} `type:"structure"` + + // The vault ARN at which to continue pagination of the results. You use the + // marker in another List Vaults request to obtain more vaults in the list. + Marker *string `type:"string"` + + // List of vaults. + VaultList []*DescribeVaultOutput `type:"list"` +} + +// String returns the string representation +func (s ListVaultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVaultsOutput) GoString() string { + return s.String() +} + +// A list of the part sizes of the multipart upload. +type PartListElement struct { + _ struct{} `type:"structure"` + + // The byte range of a part, inclusive of the upper value of the range. + RangeInBytes *string `type:"string"` + + // The SHA256 tree hash value that Amazon Glacier calculated for the part. This + // field is never null. + SHA256TreeHash *string `type:"string"` +} + +// String returns the string representation +func (s PartListElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartListElement) GoString() string { + return s.String() +} + +// The input value for RemoveTagsFromVaultInput. +type RemoveTagsFromVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // A list of tag keys. Each corresponding tag is removed from the vault. + TagKeys []*string `type:"list"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromVaultInput) GoString() string { + return s.String() +} + +type RemoveTagsFromVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromVaultOutput) GoString() string { + return s.String() +} + +// SetDataRetrievalPolicy input. +type SetDataRetrievalPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The data retrieval policy in JSON format. + Policy *DataRetrievalPolicy `type:"structure"` +} + +// String returns the string representation +func (s SetDataRetrievalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDataRetrievalPolicyInput) GoString() string { + return s.String() +} + +type SetDataRetrievalPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDataRetrievalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDataRetrievalPolicyOutput) GoString() string { + return s.String() +} + +// SetVaultAccessPolicy input. +type SetVaultAccessPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The vault access policy as a JSON string. + Policy *VaultAccessPolicy `locationName:"policy" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultAccessPolicyInput) GoString() string { + return s.String() +} + +type SetVaultAccessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// Provides options to configure notifications that will be sent when specific +// events happen to a vault. +type SetVaultNotificationsInput struct { + _ struct{} `type:"structure" payload:"VaultNotificationConfig"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` + + // Provides options for specifying notification configuration. + VaultNotificationConfig *VaultNotificationConfig `locationName:"vaultNotificationConfig" type:"structure"` +} + +// String returns the string representation +func (s SetVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultNotificationsInput) GoString() string { + return s.String() +} + +type SetVaultNotificationsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultNotificationsOutput) GoString() string { + return s.String() +} + +// Provides options to add an archive to a vault. +type UploadArchiveInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The optional description of the archive you are uploading. + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The data to upload. + Body io.ReadSeeker `locationName:"body" type:"blob"` + + // The SHA256 tree hash of the data being uploaded. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadArchiveInput) GoString() string { + return s.String() +} + +// A list of in-progress multipart uploads for a vault. +type UploadListElement struct { + _ struct{} `type:"structure"` + + // The description of the archive that was specified in the Initiate Multipart + // Upload request. + ArchiveDescription *string `type:"string"` + + // The UTC time at which the multipart upload was initiated. + CreationDate *string `type:"string"` + + // The ID of a multipart upload. + MultipartUploadId *string `type:"string"` + + // The part size, in bytes, specified in the Initiate Multipart Upload request. + // This is the size of all the parts in the upload except the last part, which + // may be smaller than this size. + PartSizeInBytes *int64 `type:"long"` + + // The Amazon Resource Name (ARN) of the vault that contains the archive. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s UploadListElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadListElement) GoString() string { + return s.String() +} + +// Provides options to upload a part of an archive in a multipart upload operation. +type UploadMultipartPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The data to upload. + Body io.ReadSeeker `locationName:"body" type:"blob"` + + // The SHA256 tree hash of the data being uploaded. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // Identifies the range of bytes in the assembled archive that will be uploaded + // in this part. Amazon Glacier uses this information to assemble the archive + // in the proper sequence. The format of this header follows RFC 2616. An example + // header is Content-Range:bytes 0-4194303/*. + Range *string `location:"header" locationName:"Content-Range" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadMultipartPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadMultipartPartInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type UploadMultipartPartOutput struct { + _ struct{} `type:"structure"` + + // The SHA256 tree hash that Amazon Glacier computed for the uploaded part. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` +} + +// String returns the string representation +func (s UploadMultipartPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadMultipartPartOutput) GoString() string { + return s.String() +} + +// Contains the vault access policy. +type VaultAccessPolicy struct { + _ struct{} `type:"structure"` + + // The vault access policy. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s VaultAccessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultAccessPolicy) GoString() string { + return s.String() +} + +// Contains the vault lock policy. +type VaultLockPolicy struct { + _ struct{} `type:"structure"` + + // The vault lock policy. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s VaultLockPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultLockPolicy) GoString() string { + return s.String() +} + +// Represents a vault's notification configuration. +type VaultNotificationConfig struct { + _ struct{} `type:"structure"` + + // A list of one or more events for which Amazon Glacier will send a notification + // to the specified Amazon SNS topic. + Events []*string `type:"list"` + + // The Amazon Simple Notification Service (Amazon SNS) topic Amazon Resource + // Name (ARN). + SNSTopic *string `type:"string"` +} + +// String returns the string representation +func (s VaultNotificationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultNotificationConfig) GoString() string { + return s.String() +} + +const ( + // @enum ActionCode + ActionCodeArchiveRetrieval = "ArchiveRetrieval" + // @enum ActionCode + ActionCodeInventoryRetrieval = "InventoryRetrieval" +) + +const ( + // @enum StatusCode + StatusCodeInProgress = "InProgress" + // @enum StatusCode + StatusCodeSucceeded = "Succeeded" + // @enum StatusCode + StatusCodeFailed = "Failed" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go new file mode 100644 index 000000000..fd5701724 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go @@ -0,0 +1,58 @@ +package glacier + +import ( + "encoding/hex" + "reflect" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + defaultAccountID = "-" +) + +func init() { + initRequest = func(r *request.Request) { + r.Handlers.Validate.PushFront(addAccountID) + r.Handlers.Validate.PushFront(copyParams) // this happens first + r.Handlers.Build.PushBack(addChecksum) + r.Handlers.Build.PushBack(addAPIVersion) + } +} + +func copyParams(r *request.Request) { + r.Params = awsutil.CopyOf(r.Params) +} + +func addAccountID(r *request.Request) { + if !r.ParamsFilled() { + return + } + + v := reflect.Indirect(reflect.ValueOf(r.Params)) + if f := v.FieldByName("AccountId"); f.IsNil() { + f.Set(reflect.ValueOf(&defaultAccountID)) + } +} + +func addChecksum(r *request.Request) { + if r.Body == nil { + return + } + + h := ComputeHashes(r.Body) + + if r.HTTPRequest.Header.Get("X-Amz-Content-Sha256") == "" { + hstr := hex.EncodeToString(h.LinearHash) + r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", hstr) + } + if r.HTTPRequest.Header.Get("X-Amz-Sha256-Tree-Hash") == "" { + hstr := hex.EncodeToString(h.TreeHash) + r.HTTPRequest.Header.Set("X-Amz-Sha256-Tree-Hash", hstr) + } +} + +func addAPIVersion(r *request.Request) { + r.HTTPRequest.Header.Set("X-Amz-Glacier-Version", r.ClientInfo.APIVersion) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go new file mode 100644 index 000000000..2825b81c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go @@ -0,0 +1,77 @@ +// +build !integration + +package glacier_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/glacier" +) + +var ( + payloadBuf = func() *bytes.Reader { + buf := make([]byte, 5767168) // 5.5MB buffer + for i := range buf { + buf[i] = '0' // Fill with zero characters + } + return bytes.NewReader(buf) + }() + + svc = glacier.New(unit.Session) +) + +func TestCustomizations(t *testing.T) { + req, _ := svc.UploadArchiveRequest(&glacier.UploadArchiveInput{ + VaultName: aws.String("vault"), + Body: payloadBuf, + }) + err := req.Build() + assert.NoError(t, err) + + // Sets API version + assert.Equal(t, req.ClientInfo.APIVersion, req.HTTPRequest.Header.Get("x-amz-glacier-version")) + + // Sets Account ID + v, _ := awsutil.ValuesAtPath(req.Params, "AccountId") + assert.Equal(t, "-", *(v[0].(*string))) + + // Computes checksums + linear := "68aff0c5a91aa0491752bfb96e3fef33eb74953804f6a2f7b708d5bcefa8ff6b" + tree := "154e26c78fd74d0c2c9b3cc4644191619dc4f2cd539ae2a74d5fd07957a3ee6a" + assert.Equal(t, linear, req.HTTPRequest.Header.Get("x-amz-content-sha256")) + assert.Equal(t, tree, req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} + +func TestShortcircuitTreehash(t *testing.T) { + req, _ := svc.UploadArchiveRequest(&glacier.UploadArchiveInput{ + VaultName: aws.String("vault"), + Body: payloadBuf, + Checksum: aws.String("000"), + }) + err := req.Build() + assert.NoError(t, err) + + assert.Equal(t, "000", req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} + +func TestFillAccountIDWithNilStruct(t *testing.T) { + req, _ := svc.ListVaultsRequest(nil) + err := req.Build() + assert.NoError(t, err) + + empty := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sets Account ID + v, _ := awsutil.ValuesAtPath(req.Params, "AccountId") + assert.Equal(t, "-", *(v[0].(*string))) + + // Does not set tree hash + assert.Equal(t, empty, req.HTTPRequest.Header.Get("x-amz-content-sha256")) + assert.Equal(t, "", req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go new file mode 100644 index 000000000..9384d3dae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go @@ -0,0 +1,706 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/glacier" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleGlacier_AbortMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.AbortMultipartUploadInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.AbortMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_AbortVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.AbortVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.AbortVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_AddTagsToVault() { + svc := glacier.New(session.New()) + + params := &glacier.AddTagsToVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Tags: map[string]*string{ + "Key": aws.String("TagValue"), // Required + // More values... + }, + } + resp, err := svc.AddTagsToVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CompleteMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.CompleteMultipartUploadInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveSize: aws.String("string"), + Checksum: aws.String("string"), + } + resp, err := svc.CompleteMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CompleteVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.CompleteVaultLockInput{ + AccountId: aws.String("string"), // Required + LockId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.CompleteVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CreateVault() { + svc := glacier.New(session.New()) + + params := &glacier.CreateVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.CreateVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteArchive() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteArchiveInput{ + AccountId: aws.String("string"), // Required + ArchiveId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVault() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DescribeJob() { + svc := glacier.New(session.New()) + + params := &glacier.DescribeJobInput{ + AccountId: aws.String("string"), // Required + JobId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DescribeJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DescribeVault() { + svc := glacier.New(session.New()) + + params := &glacier.DescribeVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DescribeVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetDataRetrievalPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.GetDataRetrievalPolicyInput{ + AccountId: aws.String("string"), // Required + } + resp, err := svc.GetDataRetrievalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetJobOutput() { + svc := glacier.New(session.New()) + + params := &glacier.GetJobOutputInput{ + AccountId: aws.String("string"), // Required + JobId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Range: aws.String("string"), + } + resp, err := svc.GetJobOutput(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateJob() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateJobInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + JobParameters: &glacier.JobParameters{ + ArchiveId: aws.String("string"), + Description: aws.String("string"), + Format: aws.String("string"), + InventoryRetrievalParameters: &glacier.InventoryRetrievalJobInput{ + EndDate: aws.String("string"), + Limit: aws.String("string"), + Marker: aws.String("string"), + StartDate: aws.String("string"), + }, + RetrievalByteRange: aws.String("string"), + SNSTopic: aws.String("string"), + Type: aws.String("string"), + }, + } + resp, err := svc.InitiateJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateMultipartUploadInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveDescription: aws.String("string"), + PartSize: aws.String("string"), + } + resp, err := svc.InitiateMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Policy: &glacier.VaultLockPolicy{ + Policy: aws.String("string"), + }, + } + resp, err := svc.InitiateVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListJobs() { + svc := glacier.New(session.New()) + + params := &glacier.ListJobsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Completed: aws.String("string"), + Limit: aws.String("string"), + Marker: aws.String("string"), + Statuscode: aws.String("string"), + } + resp, err := svc.ListJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListMultipartUploads() { + svc := glacier.New(session.New()) + + params := &glacier.ListMultipartUploadsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListMultipartUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListParts() { + svc := glacier.New(session.New()) + + params := &glacier.ListPartsInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListParts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListTagsForVault() { + svc := glacier.New(session.New()) + + params := &glacier.ListTagsForVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.ListTagsForVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListVaults() { + svc := glacier.New(session.New()) + + params := &glacier.ListVaultsInput{ + AccountId: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListVaults(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_RemoveTagsFromVault() { + svc := glacier.New(session.New()) + + params := &glacier.RemoveTagsFromVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + TagKeys: []*string{ + aws.String("string"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetDataRetrievalPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.SetDataRetrievalPolicyInput{ + AccountId: aws.String("string"), // Required + Policy: &glacier.DataRetrievalPolicy{ + Rules: []*glacier.DataRetrievalRule{ + { // Required + BytesPerHour: aws.Int64(1), + Strategy: aws.String("string"), + }, + // More values... + }, + }, + } + resp, err := svc.SetDataRetrievalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.SetVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Policy: &glacier.VaultAccessPolicy{ + Policy: aws.String("string"), + }, + } + resp, err := svc.SetVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.SetVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + VaultNotificationConfig: &glacier.VaultNotificationConfig{ + Events: []*string{ + aws.String("string"), // Required + // More values... + }, + SNSTopic: aws.String("string"), + }, + } + resp, err := svc.SetVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_UploadArchive() { + svc := glacier.New(session.New()) + + params := &glacier.UploadArchiveInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveDescription: aws.String("string"), + Body: bytes.NewReader([]byte("PAYLOAD")), + Checksum: aws.String("string"), + } + resp, err := svc.UploadArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_UploadMultipartPart() { + svc := glacier.New(session.New()) + + params := &glacier.UploadMultipartPartInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Body: bytes.NewReader([]byte("PAYLOAD")), + Checksum: aws.String("string"), + Range: aws.String("string"), + } + resp, err := svc.UploadMultipartPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go new file mode 100644 index 000000000..7b2f76798 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go @@ -0,0 +1,146 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package glacieriface provides an interface for the Amazon Glacier. +package glacieriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/glacier" +) + +// GlacierAPI is the interface type for glacier.Glacier. +type GlacierAPI interface { + AbortMultipartUploadRequest(*glacier.AbortMultipartUploadInput) (*request.Request, *glacier.AbortMultipartUploadOutput) + + AbortMultipartUpload(*glacier.AbortMultipartUploadInput) (*glacier.AbortMultipartUploadOutput, error) + + AbortVaultLockRequest(*glacier.AbortVaultLockInput) (*request.Request, *glacier.AbortVaultLockOutput) + + AbortVaultLock(*glacier.AbortVaultLockInput) (*glacier.AbortVaultLockOutput, error) + + AddTagsToVaultRequest(*glacier.AddTagsToVaultInput) (*request.Request, *glacier.AddTagsToVaultOutput) + + AddTagsToVault(*glacier.AddTagsToVaultInput) (*glacier.AddTagsToVaultOutput, error) + + CompleteMultipartUploadRequest(*glacier.CompleteMultipartUploadInput) (*request.Request, *glacier.ArchiveCreationOutput) + + CompleteMultipartUpload(*glacier.CompleteMultipartUploadInput) (*glacier.ArchiveCreationOutput, error) + + CompleteVaultLockRequest(*glacier.CompleteVaultLockInput) (*request.Request, *glacier.CompleteVaultLockOutput) + + CompleteVaultLock(*glacier.CompleteVaultLockInput) (*glacier.CompleteVaultLockOutput, error) + + CreateVaultRequest(*glacier.CreateVaultInput) (*request.Request, *glacier.CreateVaultOutput) + + CreateVault(*glacier.CreateVaultInput) (*glacier.CreateVaultOutput, error) + + DeleteArchiveRequest(*glacier.DeleteArchiveInput) (*request.Request, *glacier.DeleteArchiveOutput) + + DeleteArchive(*glacier.DeleteArchiveInput) (*glacier.DeleteArchiveOutput, error) + + DeleteVaultRequest(*glacier.DeleteVaultInput) (*request.Request, *glacier.DeleteVaultOutput) + + DeleteVault(*glacier.DeleteVaultInput) (*glacier.DeleteVaultOutput, error) + + DeleteVaultAccessPolicyRequest(*glacier.DeleteVaultAccessPolicyInput) (*request.Request, *glacier.DeleteVaultAccessPolicyOutput) + + DeleteVaultAccessPolicy(*glacier.DeleteVaultAccessPolicyInput) (*glacier.DeleteVaultAccessPolicyOutput, error) + + DeleteVaultNotificationsRequest(*glacier.DeleteVaultNotificationsInput) (*request.Request, *glacier.DeleteVaultNotificationsOutput) + + DeleteVaultNotifications(*glacier.DeleteVaultNotificationsInput) (*glacier.DeleteVaultNotificationsOutput, error) + + DescribeJobRequest(*glacier.DescribeJobInput) (*request.Request, *glacier.JobDescription) + + DescribeJob(*glacier.DescribeJobInput) (*glacier.JobDescription, error) + + DescribeVaultRequest(*glacier.DescribeVaultInput) (*request.Request, *glacier.DescribeVaultOutput) + + DescribeVault(*glacier.DescribeVaultInput) (*glacier.DescribeVaultOutput, error) + + GetDataRetrievalPolicyRequest(*glacier.GetDataRetrievalPolicyInput) (*request.Request, *glacier.GetDataRetrievalPolicyOutput) + + GetDataRetrievalPolicy(*glacier.GetDataRetrievalPolicyInput) (*glacier.GetDataRetrievalPolicyOutput, error) + + GetJobOutputRequest(*glacier.GetJobOutputInput) (*request.Request, *glacier.GetJobOutputOutput) + + GetJobOutput(*glacier.GetJobOutputInput) (*glacier.GetJobOutputOutput, error) + + GetVaultAccessPolicyRequest(*glacier.GetVaultAccessPolicyInput) (*request.Request, *glacier.GetVaultAccessPolicyOutput) + + GetVaultAccessPolicy(*glacier.GetVaultAccessPolicyInput) (*glacier.GetVaultAccessPolicyOutput, error) + + GetVaultLockRequest(*glacier.GetVaultLockInput) (*request.Request, *glacier.GetVaultLockOutput) + + GetVaultLock(*glacier.GetVaultLockInput) (*glacier.GetVaultLockOutput, error) + + GetVaultNotificationsRequest(*glacier.GetVaultNotificationsInput) (*request.Request, *glacier.GetVaultNotificationsOutput) + + GetVaultNotifications(*glacier.GetVaultNotificationsInput) (*glacier.GetVaultNotificationsOutput, error) + + InitiateJobRequest(*glacier.InitiateJobInput) (*request.Request, *glacier.InitiateJobOutput) + + InitiateJob(*glacier.InitiateJobInput) (*glacier.InitiateJobOutput, error) + + InitiateMultipartUploadRequest(*glacier.InitiateMultipartUploadInput) (*request.Request, *glacier.InitiateMultipartUploadOutput) + + InitiateMultipartUpload(*glacier.InitiateMultipartUploadInput) (*glacier.InitiateMultipartUploadOutput, error) + + InitiateVaultLockRequest(*glacier.InitiateVaultLockInput) (*request.Request, *glacier.InitiateVaultLockOutput) + + InitiateVaultLock(*glacier.InitiateVaultLockInput) (*glacier.InitiateVaultLockOutput, error) + + ListJobsRequest(*glacier.ListJobsInput) (*request.Request, *glacier.ListJobsOutput) + + ListJobs(*glacier.ListJobsInput) (*glacier.ListJobsOutput, error) + + ListJobsPages(*glacier.ListJobsInput, func(*glacier.ListJobsOutput, bool) bool) error + + ListMultipartUploadsRequest(*glacier.ListMultipartUploadsInput) (*request.Request, *glacier.ListMultipartUploadsOutput) + + ListMultipartUploads(*glacier.ListMultipartUploadsInput) (*glacier.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*glacier.ListMultipartUploadsInput, func(*glacier.ListMultipartUploadsOutput, bool) bool) error + + ListPartsRequest(*glacier.ListPartsInput) (*request.Request, *glacier.ListPartsOutput) + + ListParts(*glacier.ListPartsInput) (*glacier.ListPartsOutput, error) + + ListPartsPages(*glacier.ListPartsInput, func(*glacier.ListPartsOutput, bool) bool) error + + ListTagsForVaultRequest(*glacier.ListTagsForVaultInput) (*request.Request, *glacier.ListTagsForVaultOutput) + + ListTagsForVault(*glacier.ListTagsForVaultInput) (*glacier.ListTagsForVaultOutput, error) + + ListVaultsRequest(*glacier.ListVaultsInput) (*request.Request, *glacier.ListVaultsOutput) + + ListVaults(*glacier.ListVaultsInput) (*glacier.ListVaultsOutput, error) + + ListVaultsPages(*glacier.ListVaultsInput, func(*glacier.ListVaultsOutput, bool) bool) error + + RemoveTagsFromVaultRequest(*glacier.RemoveTagsFromVaultInput) (*request.Request, *glacier.RemoveTagsFromVaultOutput) + + RemoveTagsFromVault(*glacier.RemoveTagsFromVaultInput) (*glacier.RemoveTagsFromVaultOutput, error) + + SetDataRetrievalPolicyRequest(*glacier.SetDataRetrievalPolicyInput) (*request.Request, *glacier.SetDataRetrievalPolicyOutput) + + SetDataRetrievalPolicy(*glacier.SetDataRetrievalPolicyInput) (*glacier.SetDataRetrievalPolicyOutput, error) + + SetVaultAccessPolicyRequest(*glacier.SetVaultAccessPolicyInput) (*request.Request, *glacier.SetVaultAccessPolicyOutput) + + SetVaultAccessPolicy(*glacier.SetVaultAccessPolicyInput) (*glacier.SetVaultAccessPolicyOutput, error) + + SetVaultNotificationsRequest(*glacier.SetVaultNotificationsInput) (*request.Request, *glacier.SetVaultNotificationsOutput) + + SetVaultNotifications(*glacier.SetVaultNotificationsInput) (*glacier.SetVaultNotificationsOutput, error) + + UploadArchiveRequest(*glacier.UploadArchiveInput) (*request.Request, *glacier.ArchiveCreationOutput) + + UploadArchive(*glacier.UploadArchiveInput) (*glacier.ArchiveCreationOutput, error) + + UploadMultipartPartRequest(*glacier.UploadMultipartPartInput) (*request.Request, *glacier.UploadMultipartPartOutput) + + UploadMultipartPart(*glacier.UploadMultipartPartInput) (*glacier.UploadMultipartPartOutput, error) +} + +var _ GlacierAPI = (*glacier.Glacier)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go new file mode 100644 index 000000000..c71840e8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go @@ -0,0 +1,116 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Glacier is a storage solution for "cold data." +// +// Amazon Glacier is an extremely low-cost storage service that provides secure, +// durable, and easy-to-use storage for data backup and archival. With Amazon +// Glacier, customers can store their data cost effectively for months, years, +// or decades. Amazon Glacier also enables customers to offload the administrative +// burdens of operating and scaling storage to AWS, so they don't have to worry +// about capacity planning, hardware provisioning, data replication, hardware +// failure and recovery, or time-consuming hardware migrations. +// +// Amazon Glacier is a great storage choice when low storage cost is paramount, +// your data is rarely retrieved, and retrieval latency of several hours is +// acceptable. If your application requires fast or frequent access to your +// data, consider using Amazon S3. For more information, go to Amazon Simple +// Storage Service (Amazon S3) (http://aws.amazon.com/s3/). +// +// You can store any kind of data in any format. There is no maximum limit +// on the total amount of data you can store in Amazon Glacier. +// +// If you are a first-time user of Amazon Glacier, we recommend that you begin +// by reading the following sections in the Amazon Glacier Developer Guide: +// +// What is Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html) +// - This section of the Developer Guide describes the underlying data model, +// the operations it supports, and the AWS SDKs that you can use to interact +// with the service. +// +// Getting Started with Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html) +// - The Getting Started section walks you through the process of creating a +// vault, uploading archives, creating jobs to download archives, retrieving +// the job output, and deleting archives. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Glacier struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "glacier" + +// New creates a new instance of the Glacier client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Glacier client from just a session. +// svc := glacier.New(mySession) +// +// // Create a Glacier client with additional configuration +// svc := glacier.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glacier { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Glacier { + svc := &Glacier{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-06-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Glacier operation and runs any +// custom request initialization. +func (c *Glacier) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go new file mode 100644 index 000000000..dac44baf5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go @@ -0,0 +1,71 @@ +package glacier + +import ( + "crypto/sha256" + "io" +) + +const bufsize = 1024 * 1024 + +// Hash contains information about the tree-hash and linear hash of a +// Glacier payload. This structure is generated by ComputeHashes(). +type Hash struct { + TreeHash []byte + LinearHash []byte +} + +// ComputeHashes computes the tree-hash and linear hash of a seekable reader r. +func ComputeHashes(r io.ReadSeeker) Hash { + r.Seek(0, 0) // Read the whole stream + defer r.Seek(0, 0) // Rewind stream at end + + buf := make([]byte, bufsize) + hashes := [][]byte{} + hsh := sha256.New() + + for { + // Build leaf nodes in 1MB chunks + n, err := io.ReadAtLeast(r, buf, bufsize) + if n == 0 { + break + } + + tmpHash := sha256.Sum256(buf[:n]) + hashes = append(hashes, tmpHash[:]) + hsh.Write(buf[:n]) // Track linear hash while we're at it + + if err != nil { + break // This is the last chunk + } + } + + return Hash{ + LinearHash: hsh.Sum(nil), + TreeHash: buildHashTree(hashes), + } +} + +// buildHashTree builds a hash tree root node given a set of hashes. +func buildHashTree(hashes [][]byte) []byte { + if hashes == nil || len(hashes) == 0 { + return nil + } + + for len(hashes) > 1 { + tmpHashes := [][]byte{} + + for i := 0; i < len(hashes); i += 2 { + if i+1 <= len(hashes)-1 { + tmpHash := append(append([]byte{}, hashes[i]...), hashes[i+1]...) + tmpSum := sha256.Sum256(tmpHash) + tmpHashes = append(tmpHashes, tmpSum[:]) + } else { + tmpHashes = append(tmpHashes, hashes[i]) + } + } + + hashes = tmpHashes + } + + return hashes[0] +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go new file mode 100644 index 000000000..970a083b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go @@ -0,0 +1,28 @@ +package glacier_test + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/service/glacier" +) + +func ExampleComputeHashes() { + buf := make([]byte, 5767168) // 5.5MB buffer + for i := range buf { + buf[i] = '0' // Fill with zero characters + } + + r := bytes.NewReader(buf) + h := glacier.ComputeHashes(r) + n, _ := r.Seek(0, 1) // Check position after checksumming + + fmt.Printf("linear: %x\n", h.LinearHash) + fmt.Printf("tree: %x\n", h.TreeHash) + fmt.Printf("pos: %d\n", n) + + // Output: + // linear: 68aff0c5a91aa0491752bfb96e3fef33eb74953804f6a2f7b708d5bcefa8ff6b + // tree: 154e26c78fd74d0c2c9b3cc4644191619dc4f2cd539ae2a74d5fd07957a3ee6a + // pos: 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go new file mode 100644 index 000000000..e6fbedfa1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go @@ -0,0 +1,65 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Glacier) WaitUntilVaultExists(input *DescribeVaultInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVault", + Delay: 3, + MaxAttempts: 15, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Glacier) WaitUntilVaultNotExists(input *DescribeVaultInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVault", + Delay: 3, + MaxAttempts: 15, + Acceptors: []waiter.WaitAcceptor{ + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go new file mode 100644 index 000000000..89b862c27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -0,0 +1,11168 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iam provides a client for AWS Identity and Access Management. +package iam + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddClientIDToOpenIDConnectProvider = "AddClientIDToOpenIDConnectProvider" + +// AddClientIDToOpenIDConnectProviderRequest generates a request for the AddClientIDToOpenIDConnectProvider operation. +func (c *IAM) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpenIDConnectProviderInput) (req *request.Request, output *AddClientIDToOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opAddClientIDToOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddClientIDToOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &AddClientIDToOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Adds a new client ID (also known as audience) to the list of client IDs already +// registered for the specified IAM OpenID Connect provider. +// +// This action is idempotent; it does not fail or return an error if you add +// an existing client ID to the provider. +func (c *IAM) AddClientIDToOpenIDConnectProvider(input *AddClientIDToOpenIDConnectProviderInput) (*AddClientIDToOpenIDConnectProviderOutput, error) { + req, out := c.AddClientIDToOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opAddRoleToInstanceProfile = "AddRoleToInstanceProfile" + +// AddRoleToInstanceProfileRequest generates a request for the AddRoleToInstanceProfile operation. +func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInput) (req *request.Request, output *AddRoleToInstanceProfileOutput) { + op := &request.Operation{ + Name: opAddRoleToInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddRoleToInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &AddRoleToInstanceProfileOutput{} + req.Data = output + return +} + +// Adds the specified role to the specified instance profile. For more information +// about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) AddRoleToInstanceProfile(input *AddRoleToInstanceProfileInput) (*AddRoleToInstanceProfileOutput, error) { + req, out := c.AddRoleToInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opAddUserToGroup = "AddUserToGroup" + +// AddUserToGroupRequest generates a request for the AddUserToGroup operation. +func (c *IAM) AddUserToGroupRequest(input *AddUserToGroupInput) (req *request.Request, output *AddUserToGroupOutput) { + op := &request.Operation{ + Name: opAddUserToGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddUserToGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &AddUserToGroupOutput{} + req.Data = output + return +} + +// Adds the specified user to the specified group. +func (c *IAM) AddUserToGroup(input *AddUserToGroupInput) (*AddUserToGroupOutput, error) { + req, out := c.AddUserToGroupRequest(input) + err := req.Send() + return out, err +} + +const opAttachGroupPolicy = "AttachGroupPolicy" + +// AttachGroupPolicyRequest generates a request for the AttachGroupPolicy operation. +func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *request.Request, output *AttachGroupPolicyOutput) { + op := &request.Operation{ + Name: opAttachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachGroupPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified group. +// +// You use this API to attach a managed policy to a group. To embed an inline +// policy in a group, use PutGroupPolicy. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachGroupPolicy(input *AttachGroupPolicyInput) (*AttachGroupPolicyOutput, error) { + req, out := c.AttachGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachRolePolicy = "AttachRolePolicy" + +// AttachRolePolicyRequest generates a request for the AttachRolePolicy operation. +func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *request.Request, output *AttachRolePolicyOutput) { + op := &request.Operation{ + Name: opAttachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachRolePolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified role. +// +// When you attach a managed policy to a role, the managed policy is used as +// the role's access (permissions) policy. You cannot use a managed policy as +// the role's trust policy. The role's trust policy is created at the same time +// as the role, using CreateRole. You can update a role's trust policy using +// UpdateAssumeRolePolicy. +// +// Use this API to attach a managed policy to a role. To embed an inline policy +// in a role, use PutRolePolicy. For more information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachRolePolicy(input *AttachRolePolicyInput) (*AttachRolePolicyOutput, error) { + req, out := c.AttachRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachUserPolicy = "AttachUserPolicy" + +// AttachUserPolicyRequest generates a request for the AttachUserPolicy operation. +func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *request.Request, output *AttachUserPolicyOutput) { + op := &request.Operation{ + Name: opAttachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachUserPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified user. +// +// You use this API to attach a managed policy to a user. To embed an inline +// policy in a user, use PutUserPolicy. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachUserPolicy(input *AttachUserPolicyInput) (*AttachUserPolicyOutput, error) { + req, out := c.AttachUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opChangePassword = "ChangePassword" + +// ChangePasswordRequest generates a request for the ChangePassword operation. +func (c *IAM) ChangePasswordRequest(input *ChangePasswordInput) (req *request.Request, output *ChangePasswordOutput) { + op := &request.Operation{ + Name: opChangePassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangePasswordInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangePasswordOutput{} + req.Data = output + return +} + +// Changes the password of the IAM user who is calling this action. The root +// account password is not affected by this action. +// +// To change the password for a different user, see UpdateLoginProfile. For +// more information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +func (c *IAM) ChangePassword(input *ChangePasswordInput) (*ChangePasswordOutput, error) { + req, out := c.ChangePasswordRequest(input) + err := req.Send() + return out, err +} + +const opCreateAccessKey = "CreateAccessKey" + +// CreateAccessKeyRequest generates a request for the CreateAccessKey operation. +func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request.Request, output *CreateAccessKeyOutput) { + op := &request.Operation{ + Name: opCreateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAccessKeyOutput{} + req.Data = output + return +} + +// Creates a new AWS secret access key and corresponding AWS access key ID for +// the specified user. The default status for new keys is Active. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// For information about limits on the number of keys you can create, see +// Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. You must save the key (for example, in +// a text file) if you want to be able to access it again. If a secret key is +// lost, you can delete the access keys for the associated user and then create +// new keys. +func (c *IAM) CreateAccessKey(input *CreateAccessKeyInput) (*CreateAccessKeyOutput, error) { + req, out := c.CreateAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opCreateAccountAlias = "CreateAccountAlias" + +// CreateAccountAliasRequest generates a request for the CreateAccountAlias operation. +func (c *IAM) CreateAccountAliasRequest(input *CreateAccountAliasInput) (req *request.Request, output *CreateAccountAliasOutput) { + op := &request.Operation{ + Name: opCreateAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccountAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAccountAliasOutput{} + req.Data = output + return +} + +// Creates an alias for your AWS account. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) CreateAccountAlias(input *CreateAccountAliasInput) (*CreateAccountAliasOutput, error) { + req, out := c.CreateAccountAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateGroup = "CreateGroup" + +// CreateGroupRequest generates a request for the CreateGroup operation. +func (c *IAM) CreateGroupRequest(input *CreateGroupInput) (req *request.Request, output *CreateGroupOutput) { + op := &request.Operation{ + Name: opCreateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateGroupOutput{} + req.Data = output + return +} + +// Creates a new group. +// +// For information about the number of groups you can create, see Limitations +// on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateGroup(input *CreateGroupInput) (*CreateGroupOutput, error) { + req, out := c.CreateGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstanceProfile = "CreateInstanceProfile" + +// CreateInstanceProfileRequest generates a request for the CreateInstanceProfile operation. +func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (req *request.Request, output *CreateInstanceProfileOutput) { + op := &request.Operation{ + Name: opCreateInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceProfileOutput{} + req.Data = output + return +} + +// Creates a new instance profile. For information about instance profiles, +// go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// For information about the number of instance profiles you can create, see +// Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateInstanceProfile(input *CreateInstanceProfileInput) (*CreateInstanceProfileOutput, error) { + req, out := c.CreateInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoginProfile = "CreateLoginProfile" + +// CreateLoginProfileRequest generates a request for the CreateLoginProfile operation. +func (c *IAM) CreateLoginProfileRequest(input *CreateLoginProfileInput) (req *request.Request, output *CreateLoginProfileOutput) { + op := &request.Operation{ + Name: opCreateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoginProfileOutput{} + req.Data = output + return +} + +// Creates a password for the specified user, giving the user the ability to +// access AWS services through the AWS Management Console. For more information +// about managing passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the Using IAM guide. +func (c *IAM) CreateLoginProfile(input *CreateLoginProfileInput) (*CreateLoginProfileOutput, error) { + req, out := c.CreateLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opCreateOpenIDConnectProvider = "CreateOpenIDConnectProvider" + +// CreateOpenIDConnectProviderRequest generates a request for the CreateOpenIDConnectProvider operation. +func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProviderInput) (req *request.Request, output *CreateOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opCreateOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Creates an IAM entity to describe an identity provider (IdP) that supports +// OpenID Connect (OIDC) (http://openid.net/connect/). +// +// The OIDC provider that you create with this operation can be used as a principal +// in a role's trust policy to establish a trust relationship between AWS and +// the OIDC provider. +// +// When you create the IAM OIDC provider, you specify the URL of the OIDC identity +// provider (IdP) to trust, a list of client IDs (also known as audiences) that +// identify the application or applications that are allowed to authenticate +// using the OIDC provider, and a list of thumbprints of the server certificate(s) +// that the IdP uses. You get all of this information from the OIDC IdP that +// you want to use for access to AWS. +// +// Because trust for the OIDC provider is ultimately derived from the IAM provider +// that this action creates, it is a best practice to limit access to the CreateOpenIDConnectProvider +// action to highly-privileged users. +func (c *IAM) CreateOpenIDConnectProvider(input *CreateOpenIDConnectProviderInput) (*CreateOpenIDConnectProviderOutput, error) { + req, out := c.CreateOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicy = "CreatePolicy" + +// CreatePolicyRequest generates a request for the CreatePolicy operation. +func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Request, output *CreatePolicyOutput) { + op := &request.Operation{ + Name: opCreatePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyOutput{} + req.Data = output + return +} + +// Creates a new managed policy for your AWS account. +// +// This operation creates a policy version with a version identifier of v1 +// and sets v1 as the policy's default version. For more information about policy +// versions, see Versioning for Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// For more information about managed policies in general, refer to Managed +// Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { + req, out := c.CreatePolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicyVersion = "CreatePolicyVersion" + +// CreatePolicyVersionRequest generates a request for the CreatePolicyVersion operation. +func (c *IAM) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req *request.Request, output *CreatePolicyVersionOutput) { + op := &request.Operation{ + Name: opCreatePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of the specified managed policy. To update a managed +// policy, you create a new policy version. A managed policy can have up to +// five versions. If the policy has five versions, you must delete an existing +// version using DeletePolicyVersion before you create a new version. +// +// Optionally, you can set the new version as the policy's default version. +// The default version is the operative version; that is, the version that is +// in effect for the IAM users, groups, and roles that the policy is attached +// to. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +func (c *IAM) CreatePolicyVersion(input *CreatePolicyVersionInput) (*CreatePolicyVersionOutput, error) { + req, out := c.CreatePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opCreateRole = "CreateRole" + +// CreateRoleRequest generates a request for the CreateRole operation. +func (c *IAM) CreateRoleRequest(input *CreateRoleInput) (req *request.Request, output *CreateRoleOutput) { + op := &request.Operation{ + Name: opCreateRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRoleOutput{} + req.Data = output + return +} + +// Creates a new role for your AWS account. For more information about roles, +// go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For information about limitations on role names and the number of roles you +// can create, go to Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateRole(input *CreateRoleInput) (*CreateRoleOutput, error) { + req, out := c.CreateRoleRequest(input) + err := req.Send() + return out, err +} + +const opCreateSAMLProvider = "CreateSAMLProvider" + +// CreateSAMLProviderRequest generates a request for the CreateSAMLProvider operation. +func (c *IAM) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) (req *request.Request, output *CreateSAMLProviderOutput) { + op := &request.Operation{ + Name: opCreateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSAMLProviderOutput{} + req.Data = output + return +} + +// Creates an IAM entity to describe an identity provider (IdP) that supports +// SAML 2.0. +// +// The SAML provider that you create with this operation can be used as a +// principal in a role's trust policy to establish a trust relationship between +// AWS and a SAML identity provider. You can create an IAM role that supports +// Web-based single sign-on (SSO) to the AWS Management Console or one that +// supports API access to AWS. +// +// When you create the SAML provider, you upload an a SAML metadata document +// that you get from your IdP and that includes the issuer's name, expiration +// information, and keys that can be used to validate the SAML authentication +// response (assertions) that are received from the IdP. You must generate the +// metadata document using the identity management software that is used as +// your organization's IdP. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// For more information, see Enabling SAML 2.0 Federated Users to Access the +// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) +// and About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +func (c *IAM) CreateSAMLProvider(input *CreateSAMLProviderInput) (*CreateSAMLProviderOutput, error) { + req, out := c.CreateSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opCreateUser = "CreateUser" + +// CreateUserRequest generates a request for the CreateUser operation. +func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, output *CreateUserOutput) { + op := &request.Operation{ + Name: opCreateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserOutput{} + req.Data = output + return +} + +// Creates a new user for your AWS account. +// +// For information about limitations on the number of users you can create, +// see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) + err := req.Send() + return out, err +} + +const opCreateVirtualMFADevice = "CreateVirtualMFADevice" + +// CreateVirtualMFADeviceRequest generates a request for the CreateVirtualMFADevice operation. +func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) (req *request.Request, output *CreateVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opCreateVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVirtualMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVirtualMFADeviceOutput{} + req.Data = output + return +} + +// Creates a new virtual MFA device for the AWS account. After creating the +// virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the Using IAM guide. +// +// For information about limits on the number of MFA devices you can create, +// see Limitations on Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the Using IAM guide. +// +// The seed information contained in the QR code and the Base32 string should +// be treated like any other secret access information, such as your AWS access +// keys or your passwords. After you provision your virtual device, you should +// ensure that the information is destroyed following secure procedures. +func (c *IAM) CreateVirtualMFADevice(input *CreateVirtualMFADeviceInput) (*CreateVirtualMFADeviceOutput, error) { + req, out := c.CreateVirtualMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDeactivateMFADevice = "DeactivateMFADevice" + +// DeactivateMFADeviceRequest generates a request for the DeactivateMFADevice operation. +func (c *IAM) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) (req *request.Request, output *DeactivateMFADeviceOutput) { + op := &request.Operation{ + Name: opDeactivateMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeactivateMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeactivateMFADeviceOutput{} + req.Data = output + return +} + +// Deactivates the specified MFA device and removes it from association with +// the user name for which it was originally enabled. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the Using IAM guide. +func (c *IAM) DeactivateMFADevice(input *DeactivateMFADeviceInput) (*DeactivateMFADeviceOutput, error) { + req, out := c.DeactivateMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccessKey = "DeleteAccessKey" + +// DeleteAccessKeyRequest generates a request for the DeleteAccessKey operation. +func (c *IAM) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) (req *request.Request, output *DeleteAccessKeyOutput) { + op := &request.Operation{ + Name: opDeleteAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAccessKeyOutput{} + req.Data = output + return +} + +// Deletes the access key associated with the specified user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) DeleteAccessKey(input *DeleteAccessKeyInput) (*DeleteAccessKeyOutput, error) { + req, out := c.DeleteAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccountAlias = "DeleteAccountAlias" + +// DeleteAccountAliasRequest generates a request for the DeleteAccountAlias operation. +func (c *IAM) DeleteAccountAliasRequest(input *DeleteAccountAliasInput) (req *request.Request, output *DeleteAccountAliasOutput) { + op := &request.Operation{ + Name: opDeleteAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAccountAliasOutput{} + req.Data = output + return +} + +// Deletes the specified AWS account alias. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) DeleteAccountAlias(input *DeleteAccountAliasInput) (*DeleteAccountAliasOutput, error) { + req, out := c.DeleteAccountAliasRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccountPasswordPolicy = "DeleteAccountPasswordPolicy" + +// DeleteAccountPasswordPolicyRequest generates a request for the DeleteAccountPasswordPolicy operation. +func (c *IAM) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPolicyInput) (req *request.Request, output *DeleteAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opDeleteAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Deletes the password policy for the AWS account. +func (c *IAM) DeleteAccountPasswordPolicy(input *DeleteAccountPasswordPolicyInput) (*DeleteAccountPasswordPolicyOutput, error) { + req, out := c.DeleteAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGroup = "DeleteGroup" + +// DeleteGroupRequest generates a request for the DeleteGroup operation. +func (c *IAM) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { + op := &request.Operation{ + Name: opDeleteGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteGroupOutput{} + req.Data = output + return +} + +// Deletes the specified group. The group must not contain any users or have +// any attached policies. +func (c *IAM) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { + req, out := c.DeleteGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGroupPolicy = "DeleteGroupPolicy" + +// DeleteGroupPolicyRequest generates a request for the DeleteGroupPolicy operation. +func (c *IAM) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) (req *request.Request, output *DeleteGroupPolicyOutput) { + op := &request.Operation{ + Name: opDeleteGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteGroupPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified group. +// +// A group can also have managed policies attached to it. To detach a managed +// policy from a group, use DetachGroupPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteGroupPolicy(input *DeleteGroupPolicyInput) (*DeleteGroupPolicyOutput, error) { + req, out := c.DeleteGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInstanceProfile = "DeleteInstanceProfile" + +// DeleteInstanceProfileRequest generates a request for the DeleteInstanceProfile operation. +func (c *IAM) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) (req *request.Request, output *DeleteInstanceProfileOutput) { + op := &request.Operation{ + Name: opDeleteInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteInstanceProfileOutput{} + req.Data = output + return +} + +// Deletes the specified instance profile. The instance profile must not have +// an associated role. +// +// Make sure you do not have any Amazon EC2 instances running with the instance +// profile you are about to delete. Deleting a role or instance profile that +// is associated with a running instance will break any applications running +// on the instance. For more information about instance profiles, go to About +// Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) DeleteInstanceProfile(input *DeleteInstanceProfileInput) (*DeleteInstanceProfileOutput, error) { + req, out := c.DeleteInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoginProfile = "DeleteLoginProfile" + +// DeleteLoginProfileRequest generates a request for the DeleteLoginProfile operation. +func (c *IAM) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) (req *request.Request, output *DeleteLoginProfileOutput) { + op := &request.Operation{ + Name: opDeleteLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoginProfileOutput{} + req.Data = output + return +} + +// Deletes the password for the specified user, which terminates the user's +// ability to access AWS services through the AWS Management Console. +// +// Deleting a user's password does not prevent a user from accessing IAM through +// the command line interface or the API. To prevent all user access you must +// also either make the access key inactive or delete it. For more information +// about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey. +func (c *IAM) DeleteLoginProfile(input *DeleteLoginProfileInput) (*DeleteLoginProfileOutput, error) { + req, out := c.DeleteLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteOpenIDConnectProvider = "DeleteOpenIDConnectProvider" + +// DeleteOpenIDConnectProviderRequest generates a request for the DeleteOpenIDConnectProvider operation. +func (c *IAM) DeleteOpenIDConnectProviderRequest(input *DeleteOpenIDConnectProviderInput) (req *request.Request, output *DeleteOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opDeleteOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Deletes an IAM OpenID Connect identity provider. +// +// Deleting an OIDC provider does not update any roles that reference the provider +// as a principal in their trust policies. Any attempt to assume a role that +// references a provider that has been deleted will fail. +// +// This action is idempotent; it does not fail or return an error if you call +// the action for a provider that was already deleted. +func (c *IAM) DeleteOpenIDConnectProvider(input *DeleteOpenIDConnectProviderInput) (*DeleteOpenIDConnectProviderOutput, error) { + req, out := c.DeleteOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a request for the DeletePolicy operation. +func (c *IAM) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified managed policy. +// +// Before you can delete a managed policy, you must detach the policy from +// all users, groups, and roles that it is attached to, and you must delete +// all of the policy's versions. The following steps describe the process for +// deleting a managed policy: Detach the policy from all users, groups, and +// roles that the policy is attached to, using the DetachUserPolicy, DetachGroupPolicy, +// or DetachRolePolicy APIs. To list all the users, groups, and roles that a +// policy is attached to, use ListEntitiesForPolicy. Delete all versions of +// the policy using DeletePolicyVersion. To list the policy's versions, use +// ListPolicyVersions. You cannot use DeletePolicyVersion to delete the version +// that is marked as the default version. You delete the policy's default version +// in the next step of the process. Delete the policy (this automatically deletes +// the policy's default version) using this API. +// +// For information about managed policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest generates a request for the DeletePolicyVersion operation. +func (c *IAM) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { + op := &request.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePolicyVersionOutput{} + req.Data = output + return +} + +// Deletes the specified version of the specified managed policy. +// +// You cannot delete the default version of a policy using this API. To delete +// the default version of a policy, use DeletePolicy. To find out which version +// of a policy is marked as the default version, use ListPolicyVersions. +// +// For information about versions for managed policies, refer to Versioning +// for Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +func (c *IAM) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolicyVersionOutput, error) { + req, out := c.DeletePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRole = "DeleteRole" + +// DeleteRoleRequest generates a request for the DeleteRole operation. +func (c *IAM) DeleteRoleRequest(input *DeleteRoleInput) (req *request.Request, output *DeleteRoleOutput) { + op := &request.Operation{ + Name: opDeleteRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRoleOutput{} + req.Data = output + return +} + +// Deletes the specified role. The role must not have any policies attached. +// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Make sure you do not have any Amazon EC2 instances running with the role +// you are about to delete. Deleting a role or instance profile that is associated +// with a running instance will break any applications running on the instance. +func (c *IAM) DeleteRole(input *DeleteRoleInput) (*DeleteRoleOutput, error) { + req, out := c.DeleteRoleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRolePolicy = "DeleteRolePolicy" + +// DeleteRolePolicyRequest generates a request for the DeleteRolePolicy operation. +func (c *IAM) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) (req *request.Request, output *DeleteRolePolicyOutput) { + op := &request.Operation{ + Name: opDeleteRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRolePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified role. +// +// A role can also have managed policies attached to it. To detach a managed +// policy from a role, use DetachRolePolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteRolePolicy(input *DeleteRolePolicyInput) (*DeleteRolePolicyOutput, error) { + req, out := c.DeleteRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSAMLProvider = "DeleteSAMLProvider" + +// DeleteSAMLProviderRequest generates a request for the DeleteSAMLProvider operation. +func (c *IAM) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) (req *request.Request, output *DeleteSAMLProviderOutput) { + op := &request.Operation{ + Name: opDeleteSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSAMLProviderOutput{} + req.Data = output + return +} + +// Deletes a SAML provider. +// +// Deleting the provider does not update any roles that reference the SAML +// provider as a principal in their trust policies. Any attempt to assume a +// role that references a SAML provider that has been deleted will fail. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) DeleteSAMLProvider(input *DeleteSAMLProviderInput) (*DeleteSAMLProviderOutput, error) { + req, out := c.DeleteSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSSHPublicKey = "DeleteSSHPublicKey" + +// DeleteSSHPublicKeyRequest generates a request for the DeleteSSHPublicKey operation. +func (c *IAM) DeleteSSHPublicKeyRequest(input *DeleteSSHPublicKeyInput) (req *request.Request, output *DeleteSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opDeleteSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSSHPublicKeyOutput{} + req.Data = output + return +} + +// Deletes the specified SSH public key. +// +// The SSH public key deleted by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) DeleteSSHPublicKey(input *DeleteSSHPublicKeyInput) (*DeleteSSHPublicKeyOutput, error) { + req, out := c.DeleteSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteServerCertificate = "DeleteServerCertificate" + +// DeleteServerCertificateRequest generates a request for the DeleteServerCertificate operation. +func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput) (req *request.Request, output *DeleteServerCertificateOutput) { + op := &request.Operation{ + Name: opDeleteServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteServerCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified server certificate. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// If you are using a server certificate with Elastic Load Balancing, deleting +// the certificate could have implications for your application. If Elastic +// Load Balancing doesn't detect the deletion of bound certificates, it may +// continue to use the certificates. This could cause Elastic Load Balancing +// to stop accepting traffic. We recommend that you remove the reference to +// the certificate from Elastic Load Balancing before using this command to +// delete the certificate. For more information, go to DeleteLoadBalancerListeners +// (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DeleteLoadBalancerListeners.html) +// in the Elastic Load Balancing API Reference. +func (c *IAM) DeleteServerCertificate(input *DeleteServerCertificateInput) (*DeleteServerCertificateOutput, error) { + req, out := c.DeleteServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSigningCertificate = "DeleteSigningCertificate" + +// DeleteSigningCertificateRequest generates a request for the DeleteSigningCertificate operation. +func (c *IAM) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInput) (req *request.Request, output *DeleteSigningCertificateOutput) { + op := &request.Operation{ + Name: opDeleteSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSigningCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified signing certificate associated with the specified user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) DeleteSigningCertificate(input *DeleteSigningCertificateInput) (*DeleteSigningCertificateOutput, error) { + req, out := c.DeleteSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUser = "DeleteUser" + +// DeleteUserRequest generates a request for the DeleteUser operation. +func (c *IAM) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { + op := &request.Operation{ + Name: opDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteUserOutput{} + req.Data = output + return +} + +// Deletes the specified user. The user must not belong to any groups, have +// any keys or signing certificates, or have any attached policies. +func (c *IAM) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserPolicy = "DeleteUserPolicy" + +// DeleteUserPolicyRequest generates a request for the DeleteUserPolicy operation. +func (c *IAM) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) (req *request.Request, output *DeleteUserPolicyOutput) { + op := &request.Operation{ + Name: opDeleteUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteUserPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified user. +// +// A user can also have managed policies attached to it. To detach a managed +// policy from a user, use DetachUserPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteUserPolicy(input *DeleteUserPolicyInput) (*DeleteUserPolicyOutput, error) { + req, out := c.DeleteUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVirtualMFADevice = "DeleteVirtualMFADevice" + +// DeleteVirtualMFADeviceRequest generates a request for the DeleteVirtualMFADevice operation. +func (c *IAM) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) (req *request.Request, output *DeleteVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opDeleteVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVirtualMFADeviceOutput{} + req.Data = output + return +} + +// Deletes a virtual MFA device. +// +// You must deactivate a user's virtual MFA device before you can delete it. +// For information about deactivating MFA devices, see DeactivateMFADevice. +func (c *IAM) DeleteVirtualMFADevice(input *DeleteVirtualMFADeviceInput) (*DeleteVirtualMFADeviceOutput, error) { + req, out := c.DeleteVirtualMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDetachGroupPolicy = "DetachGroupPolicy" + +// DetachGroupPolicyRequest generates a request for the DetachGroupPolicy operation. +func (c *IAM) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) (req *request.Request, output *DetachGroupPolicyOutput) { + op := &request.Operation{ + Name: opDetachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachGroupPolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified group. +// +// A group can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteGroupPolicy API. For information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachGroupPolicy(input *DetachGroupPolicyInput) (*DetachGroupPolicyOutput, error) { + req, out := c.DetachGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachRolePolicy = "DetachRolePolicy" + +// DetachRolePolicyRequest generates a request for the DetachRolePolicy operation. +func (c *IAM) DetachRolePolicyRequest(input *DetachRolePolicyInput) (req *request.Request, output *DetachRolePolicyOutput) { + op := &request.Operation{ + Name: opDetachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachRolePolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified role. +// +// A role can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteRolePolicy API. For information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachRolePolicy(input *DetachRolePolicyInput) (*DetachRolePolicyOutput, error) { + req, out := c.DetachRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachUserPolicy = "DetachUserPolicy" + +// DetachUserPolicyRequest generates a request for the DetachUserPolicy operation. +func (c *IAM) DetachUserPolicyRequest(input *DetachUserPolicyInput) (req *request.Request, output *DetachUserPolicyOutput) { + op := &request.Operation{ + Name: opDetachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachUserPolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified user. +// +// A user can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteUserPolicy API. For information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachUserPolicy(input *DetachUserPolicyInput) (*DetachUserPolicyOutput, error) { + req, out := c.DetachUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opEnableMFADevice = "EnableMFADevice" + +// EnableMFADeviceRequest generates a request for the EnableMFADevice operation. +func (c *IAM) EnableMFADeviceRequest(input *EnableMFADeviceInput) (req *request.Request, output *EnableMFADeviceOutput) { + op := &request.Operation{ + Name: opEnableMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableMFADeviceOutput{} + req.Data = output + return +} + +// Enables the specified MFA device and associates it with the specified user +// name. When enabled, the MFA device is required for every subsequent login +// by the user name associated with the device. +func (c *IAM) EnableMFADevice(input *EnableMFADeviceInput) (*EnableMFADeviceOutput, error) { + req, out := c.EnableMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opGenerateCredentialReport = "GenerateCredentialReport" + +// GenerateCredentialReportRequest generates a request for the GenerateCredentialReport operation. +func (c *IAM) GenerateCredentialReportRequest(input *GenerateCredentialReportInput) (req *request.Request, output *GenerateCredentialReportOutput) { + op := &request.Operation{ + Name: opGenerateCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateCredentialReportInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateCredentialReportOutput{} + req.Data = output + return +} + +// Generates a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +func (c *IAM) GenerateCredentialReport(input *GenerateCredentialReportInput) (*GenerateCredentialReportOutput, error) { + req, out := c.GenerateCredentialReportRequest(input) + err := req.Send() + return out, err +} + +const opGetAccessKeyLastUsed = "GetAccessKeyLastUsed" + +// GetAccessKeyLastUsedRequest generates a request for the GetAccessKeyLastUsed operation. +func (c *IAM) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) (req *request.Request, output *GetAccessKeyLastUsedOutput) { + op := &request.Operation{ + Name: opGetAccessKeyLastUsed, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyLastUsedInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccessKeyLastUsedOutput{} + req.Data = output + return +} + +// Retrieves information about when the specified access key was last used. +// The information includes the date and time of last use, along with the AWS +// service and region that were specified in the last request made with that +// key. +func (c *IAM) GetAccessKeyLastUsed(input *GetAccessKeyLastUsedInput) (*GetAccessKeyLastUsedOutput, error) { + req, out := c.GetAccessKeyLastUsedRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountAuthorizationDetails = "GetAccountAuthorizationDetails" + +// GetAccountAuthorizationDetailsRequest generates a request for the GetAccountAuthorizationDetails operation. +func (c *IAM) GetAccountAuthorizationDetailsRequest(input *GetAccountAuthorizationDetailsInput) (req *request.Request, output *GetAccountAuthorizationDetailsOutput) { + op := &request.Operation{ + Name: opGetAccountAuthorizationDetails, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetAccountAuthorizationDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountAuthorizationDetailsOutput{} + req.Data = output + return +} + +// Retrieves information about all IAM users, groups, roles, and policies in +// your account, including their relationships to one another. Use this API +// to obtain a snapshot of the configuration of IAM permissions (users, groups, +// roles, and policies) in your account. +// +// You can optionally filter the results using the Filter parameter. You can +// paginate the results using the MaxItems and Marker parameters. +func (c *IAM) GetAccountAuthorizationDetails(input *GetAccountAuthorizationDetailsInput) (*GetAccountAuthorizationDetailsOutput, error) { + req, out := c.GetAccountAuthorizationDetailsRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) GetAccountAuthorizationDetailsPages(input *GetAccountAuthorizationDetailsInput, fn func(p *GetAccountAuthorizationDetailsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetAccountAuthorizationDetailsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetAccountAuthorizationDetailsOutput), lastPage) + }) +} + +const opGetAccountPasswordPolicy = "GetAccountPasswordPolicy" + +// GetAccountPasswordPolicyRequest generates a request for the GetAccountPasswordPolicy operation. +func (c *IAM) GetAccountPasswordPolicyRequest(input *GetAccountPasswordPolicyInput) (req *request.Request, output *GetAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opGetAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Retrieves the password policy for the AWS account. For more information about +// using a password policy, go to Managing an IAM Password Policy (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html). +func (c *IAM) GetAccountPasswordPolicy(input *GetAccountPasswordPolicyInput) (*GetAccountPasswordPolicyOutput, error) { + req, out := c.GetAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountSummary = "GetAccountSummary" + +// GetAccountSummaryRequest generates a request for the GetAccountSummary operation. +func (c *IAM) GetAccountSummaryRequest(input *GetAccountSummaryInput) (req *request.Request, output *GetAccountSummaryOutput) { + op := &request.Operation{ + Name: opGetAccountSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountSummaryOutput{} + req.Data = output + return +} + +// Retrieves information about IAM entity usage and IAM quotas in the AWS account. +// +// For information about limitations on IAM entities, see Limitations on IAM +// Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) GetAccountSummary(input *GetAccountSummaryInput) (*GetAccountSummaryOutput, error) { + req, out := c.GetAccountSummaryRequest(input) + err := req.Send() + return out, err +} + +const opGetContextKeysForCustomPolicy = "GetContextKeysForCustomPolicy" + +// GetContextKeysForCustomPolicyRequest generates a request for the GetContextKeysForCustomPolicy operation. +func (c *IAM) GetContextKeysForCustomPolicyRequest(input *GetContextKeysForCustomPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForCustomPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetContextKeysForPolicyResponse{} + req.Data = output + return +} + +// Gets a list of all of the context keys referenced in Condition elements in +// the input policies. The policies are supplied as a list of one or more strings. +// To get the context keys from policies associated with an IAM user, group, +// or role, use GetContextKeysForPrincipalPolicy. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request, and can be evaluated by +// using the Condition element of an IAM policy. Use GetContextKeysForCustomPolicy +// to understand what key names and values you must supply when you call SimulateCustomPolicy. +// Note that all parameters are shown in unencoded form here for clarity, but +// must be URL encoded to be included as a part of a real HTML request. +func (c *IAM) GetContextKeysForCustomPolicy(input *GetContextKeysForCustomPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForCustomPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetContextKeysForPrincipalPolicy = "GetContextKeysForPrincipalPolicy" + +// GetContextKeysForPrincipalPolicyRequest generates a request for the GetContextKeysForPrincipalPolicy operation. +func (c *IAM) GetContextKeysForPrincipalPolicyRequest(input *GetContextKeysForPrincipalPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForPrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForPrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetContextKeysForPolicyResponse{} + req.Data = output + return +} + +// Gets a list of all of the context keys referenced in Condition elements in +// all of the IAM policies attached to the specified IAM entity. The entity +// can be an IAM user, group, or role. If you specify a user, then the request +// also includes all of the policies attached to groups that the user is a member +// of. +// +// You can optionally include a list of one or more additional policies, specified +// as strings. If you want to include only a list of policies by string, use +// GetContextKeysForCustomPolicy instead. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use GetContextKeysForCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request, and can be evaluated by +// using the Condition element of an IAM policy. Use GetContextKeysForPrincipalPolicy +// to understand what key names and values you must supply when you call SimulatePrincipalPolicy. +func (c *IAM) GetContextKeysForPrincipalPolicy(input *GetContextKeysForPrincipalPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForPrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetCredentialReport = "GetCredentialReport" + +// GetCredentialReportRequest generates a request for the GetCredentialReport operation. +func (c *IAM) GetCredentialReportRequest(input *GetCredentialReportInput) (req *request.Request, output *GetCredentialReportOutput) { + op := &request.Operation{ + Name: opGetCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialReportInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCredentialReportOutput{} + req.Data = output + return +} + +// Retrieves a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +func (c *IAM) GetCredentialReport(input *GetCredentialReportInput) (*GetCredentialReportOutput, error) { + req, out := c.GetCredentialReportRequest(input) + err := req.Send() + return out, err +} + +const opGetGroup = "GetGroup" + +// GetGroupRequest generates a request for the GetGroup operation. +func (c *IAM) GetGroupRequest(input *GetGroupInput) (req *request.Request, output *GetGroupOutput) { + op := &request.Operation{ + Name: opGetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGroupOutput{} + req.Data = output + return +} + +// Returns a list of users that are in the specified group. You can paginate +// the results using the MaxItems and Marker parameters. +func (c *IAM) GetGroup(input *GetGroupInput) (*GetGroupOutput, error) { + req, out := c.GetGroupRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) GetGroupPages(input *GetGroupInput, fn func(p *GetGroupOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetGroupRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetGroupOutput), lastPage) + }) +} + +const opGetGroupPolicy = "GetGroupPolicy" + +// GetGroupPolicyRequest generates a request for the GetGroupPolicy operation. +func (c *IAM) GetGroupPolicyRequest(input *GetGroupPolicyInput) (req *request.Request, output *GetGroupPolicyOutput) { + op := &request.Operation{ + Name: opGetGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGroupPolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded in the specified +// group. +// +// A group can also have managed policies attached to it. To retrieve a managed +// policy document that is attached to a group, use GetPolicy to determine the +// policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetGroupPolicy(input *GetGroupPolicyInput) (*GetGroupPolicyOutput, error) { + req, out := c.GetGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetInstanceProfile = "GetInstanceProfile" + +// GetInstanceProfileRequest generates a request for the GetInstanceProfile operation. +func (c *IAM) GetInstanceProfileRequest(input *GetInstanceProfileInput) (req *request.Request, output *GetInstanceProfileOutput) { + op := &request.Operation{ + Name: opGetInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &GetInstanceProfileOutput{} + req.Data = output + return +} + +// Retrieves information about the specified instance profile, including the +// instance profile's path, GUID, ARN, and role. For more information about +// instance profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// For more information about ARNs, go to ARNs (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). +func (c *IAM) GetInstanceProfile(input *GetInstanceProfileInput) (*GetInstanceProfileOutput, error) { + req, out := c.GetInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opGetLoginProfile = "GetLoginProfile" + +// GetLoginProfileRequest generates a request for the GetLoginProfile operation. +func (c *IAM) GetLoginProfileRequest(input *GetLoginProfileInput) (req *request.Request, output *GetLoginProfileOutput) { + op := &request.Operation{ + Name: opGetLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLoginProfileOutput{} + req.Data = output + return +} + +// Retrieves the user name and password-creation date for the specified user. +// If the user has not been assigned a password, the action returns a 404 (NoSuchEntity) +// error. +func (c *IAM) GetLoginProfile(input *GetLoginProfileInput) (*GetLoginProfileOutput, error) { + req, out := c.GetLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opGetOpenIDConnectProvider = "GetOpenIDConnectProvider" + +// GetOpenIDConnectProviderRequest generates a request for the GetOpenIDConnectProvider operation. +func (c *IAM) GetOpenIDConnectProviderRequest(input *GetOpenIDConnectProviderInput) (req *request.Request, output *GetOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opGetOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Returns information about the specified OpenID Connect provider. +func (c *IAM) GetOpenIDConnectProvider(input *GetOpenIDConnectProviderInput) (*GetOpenIDConnectProviderOutput, error) { + req, out := c.GetOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a request for the GetPolicy operation. +func (c *IAM) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Retrieves information about the specified managed policy, including the policy's +// default version and the total number of users, groups, and roles that the +// policy is attached to. For a list of the specific users, groups, and roles +// that the policy is attached to, use the ListEntitiesForPolicy API. This API +// returns metadata about the policy. To retrieve the policy document for a +// specific version of the policy, use GetPolicyVersion. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded with a user, group, or role, use +// the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicyVersion = "GetPolicyVersion" + +// GetPolicyVersionRequest generates a request for the GetPolicyVersion operation. +func (c *IAM) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *request.Request, output *GetPolicyVersionOutput) { + op := &request.Operation{ + Name: opGetPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyVersionOutput{} + req.Data = output + return +} + +// Retrieves information about the specified version of the specified managed +// policy, including the policy document. +// +// To list the available versions for a policy, use ListPolicyVersions. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded in a user, group, or role, use the +// GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about the types of policies, refer to Managed Policies +// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionOutput, error) { + req, out := c.GetPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opGetRole = "GetRole" + +// GetRoleRequest generates a request for the GetRole operation. +func (c *IAM) GetRoleRequest(input *GetRoleInput) (req *request.Request, output *GetRoleOutput) { + op := &request.Operation{ + Name: opGetRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRoleOutput{} + req.Data = output + return +} + +// Retrieves information about the specified role, including the role's path, +// GUID, ARN, and the policy granting permission to assume the role. For more +// information about ARNs, go to ARNs (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). +// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +func (c *IAM) GetRole(input *GetRoleInput) (*GetRoleOutput, error) { + req, out := c.GetRoleRequest(input) + err := req.Send() + return out, err +} + +const opGetRolePolicy = "GetRolePolicy" + +// GetRolePolicyRequest generates a request for the GetRolePolicy operation. +func (c *IAM) GetRolePolicyRequest(input *GetRolePolicyInput) (req *request.Request, output *GetRolePolicyOutput) { + op := &request.Operation{ + Name: opGetRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRolePolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded with the +// specified role. +// +// A role can also have managed policies attached to it. To retrieve a managed +// policy document that is attached to a role, use GetPolicy to determine the +// policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about roles, go to Using Roles to Delegate Permissions +// and Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +func (c *IAM) GetRolePolicy(input *GetRolePolicyInput) (*GetRolePolicyOutput, error) { + req, out := c.GetRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetSAMLProvider = "GetSAMLProvider" + +// GetSAMLProviderRequest generates a request for the GetSAMLProvider operation. +func (c *IAM) GetSAMLProviderRequest(input *GetSAMLProviderInput) (req *request.Request, output *GetSAMLProviderOutput) { + op := &request.Operation{ + Name: opGetSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSAMLProviderOutput{} + req.Data = output + return +} + +// Returns the SAML provider metadocument that was uploaded when the provider +// was created or updated. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) GetSAMLProvider(input *GetSAMLProviderInput) (*GetSAMLProviderOutput, error) { + req, out := c.GetSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opGetSSHPublicKey = "GetSSHPublicKey" + +// GetSSHPublicKeyRequest generates a request for the GetSSHPublicKey operation. +func (c *IAM) GetSSHPublicKeyRequest(input *GetSSHPublicKeyInput) (req *request.Request, output *GetSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opGetSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSSHPublicKeyOutput{} + req.Data = output + return +} + +// Retrieves the specified SSH public key, including metadata about the key. +// +// The SSH public key retrieved by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) GetSSHPublicKey(input *GetSSHPublicKeyInput) (*GetSSHPublicKeyOutput, error) { + req, out := c.GetSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opGetServerCertificate = "GetServerCertificate" + +// GetServerCertificateRequest generates a request for the GetServerCertificate operation. +func (c *IAM) GetServerCertificateRequest(input *GetServerCertificateInput) (req *request.Request, output *GetServerCertificateOutput) { + op := &request.Operation{ + Name: opGetServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetServerCertificateOutput{} + req.Data = output + return +} + +// Retrieves information about the specified server certificate. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +func (c *IAM) GetServerCertificate(input *GetServerCertificateInput) (*GetServerCertificateOutput, error) { + req, out := c.GetServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetUser = "GetUser" + +// GetUserRequest generates a request for the GetUser operation. +func (c *IAM) GetUserRequest(input *GetUserInput) (req *request.Request, output *GetUserOutput) { + op := &request.Operation{ + Name: opGetUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUserOutput{} + req.Data = output + return +} + +// Retrieves information about the specified user, including the user's creation +// date, path, unique ID, and ARN. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID used to sign the request. +func (c *IAM) GetUser(input *GetUserInput) (*GetUserOutput, error) { + req, out := c.GetUserRequest(input) + err := req.Send() + return out, err +} + +const opGetUserPolicy = "GetUserPolicy" + +// GetUserPolicyRequest generates a request for the GetUserPolicy operation. +func (c *IAM) GetUserPolicyRequest(input *GetUserPolicyInput) (req *request.Request, output *GetUserPolicyOutput) { + op := &request.Operation{ + Name: opGetUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUserPolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded in the specified +// user. +// +// A user can also have managed policies attached to it. To retrieve a managed +// policy document that is attached to a user, use GetPolicy to determine the +// policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetUserPolicy(input *GetUserPolicyInput) (*GetUserPolicyOutput, error) { + req, out := c.GetUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListAccessKeys = "ListAccessKeys" + +// ListAccessKeysRequest generates a request for the ListAccessKeys operation. +func (c *IAM) ListAccessKeysRequest(input *ListAccessKeysInput) (req *request.Request, output *ListAccessKeysOutput) { + op := &request.Operation{ + Name: opListAccessKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccessKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAccessKeysOutput{} + req.Data = output + return +} + +// Returns information about the access key IDs associated with the specified +// user. If there are none, the action returns an empty list. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. +func (c *IAM) ListAccessKeys(input *ListAccessKeysInput) (*ListAccessKeysOutput, error) { + req, out := c.ListAccessKeysRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAccessKeysPages(input *ListAccessKeysInput, fn func(p *ListAccessKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAccessKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAccessKeysOutput), lastPage) + }) +} + +const opListAccountAliases = "ListAccountAliases" + +// ListAccountAliasesRequest generates a request for the ListAccountAliases operation. +func (c *IAM) ListAccountAliasesRequest(input *ListAccountAliasesInput) (req *request.Request, output *ListAccountAliasesOutput) { + op := &request.Operation{ + Name: opListAccountAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccountAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAccountAliasesOutput{} + req.Data = output + return +} + +// Lists the account alias associated with the account (Note: you can have only +// one). For information about using an AWS account alias, see Using an Alias +// for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) ListAccountAliases(input *ListAccountAliasesInput) (*ListAccountAliasesOutput, error) { + req, out := c.ListAccountAliasesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAccountAliasesPages(input *ListAccountAliasesInput, fn func(p *ListAccountAliasesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAccountAliasesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAccountAliasesOutput), lastPage) + }) +} + +const opListAttachedGroupPolicies = "ListAttachedGroupPolicies" + +// ListAttachedGroupPoliciesRequest generates a request for the ListAttachedGroupPolicies operation. +func (c *IAM) ListAttachedGroupPoliciesRequest(input *ListAttachedGroupPoliciesInput) (req *request.Request, output *ListAttachedGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedGroupPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedGroupPoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified group. +// +// A group can also have inline policies embedded with it. To list the inline +// policies for a group, use the ListGroupPolicies API. For information about +// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedGroupPolicies(input *ListAttachedGroupPoliciesInput) (*ListAttachedGroupPoliciesOutput, error) { + req, out := c.ListAttachedGroupPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAttachedGroupPoliciesPages(input *ListAttachedGroupPoliciesInput, fn func(p *ListAttachedGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedGroupPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedGroupPoliciesOutput), lastPage) + }) +} + +const opListAttachedRolePolicies = "ListAttachedRolePolicies" + +// ListAttachedRolePoliciesRequest generates a request for the ListAttachedRolePolicies operation. +func (c *IAM) ListAttachedRolePoliciesRequest(input *ListAttachedRolePoliciesInput) (req *request.Request, output *ListAttachedRolePoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedRolePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedRolePoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified role. +// +// A role can also have inline policies embedded with it. To list the inline +// policies for a role, use the ListRolePolicies API. For information about +// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified role (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedRolePolicies(input *ListAttachedRolePoliciesInput) (*ListAttachedRolePoliciesOutput, error) { + req, out := c.ListAttachedRolePoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAttachedRolePoliciesPages(input *ListAttachedRolePoliciesInput, fn func(p *ListAttachedRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedRolePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedRolePoliciesOutput), lastPage) + }) +} + +const opListAttachedUserPolicies = "ListAttachedUserPolicies" + +// ListAttachedUserPoliciesRequest generates a request for the ListAttachedUserPolicies operation. +func (c *IAM) ListAttachedUserPoliciesRequest(input *ListAttachedUserPoliciesInput) (req *request.Request, output *ListAttachedUserPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedUserPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedUserPoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified user. +// +// A user can also have inline policies embedded with it. To list the inline +// policies for a user, use the ListUserPolicies API. For information about +// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedUserPolicies(input *ListAttachedUserPoliciesInput) (*ListAttachedUserPoliciesOutput, error) { + req, out := c.ListAttachedUserPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAttachedUserPoliciesPages(input *ListAttachedUserPoliciesInput, fn func(p *ListAttachedUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedUserPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedUserPoliciesOutput), lastPage) + }) +} + +const opListEntitiesForPolicy = "ListEntitiesForPolicy" + +// ListEntitiesForPolicyRequest generates a request for the ListEntitiesForPolicy operation. +func (c *IAM) ListEntitiesForPolicyRequest(input *ListEntitiesForPolicyInput) (req *request.Request, output *ListEntitiesForPolicyOutput) { + op := &request.Operation{ + Name: opListEntitiesForPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListEntitiesForPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEntitiesForPolicyOutput{} + req.Data = output + return +} + +// Lists all users, groups, and roles that the specified managed policy is attached +// to. +// +// You can use the optional EntityFilter parameter to limit the results to +// a particular type of entity (users, groups, or roles). For example, to list +// only the roles that are attached to the specified policy, set EntityFilter +// to Role. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListEntitiesForPolicy(input *ListEntitiesForPolicyInput) (*ListEntitiesForPolicyOutput, error) { + req, out := c.ListEntitiesForPolicyRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListEntitiesForPolicyPages(input *ListEntitiesForPolicyInput, fn func(p *ListEntitiesForPolicyOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEntitiesForPolicyRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEntitiesForPolicyOutput), lastPage) + }) +} + +const opListGroupPolicies = "ListGroupPolicies" + +// ListGroupPoliciesRequest generates a request for the ListGroupPolicies operation. +func (c *IAM) ListGroupPoliciesRequest(input *ListGroupPoliciesInput) (req *request.Request, output *ListGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupPoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies that are embedded in the specified +// group. +// +// A group can also have managed policies attached to it. To list the managed +// policies that are attached to a group, use ListAttachedGroupPolicies. For +// more information about policies, refer to Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified group, the action +// returns an empty list. +func (c *IAM) ListGroupPolicies(input *ListGroupPoliciesInput) (*ListGroupPoliciesOutput, error) { + req, out := c.ListGroupPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListGroupPoliciesPages(input *ListGroupPoliciesInput, fn func(p *ListGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupPoliciesOutput), lastPage) + }) +} + +const opListGroups = "ListGroups" + +// ListGroupsRequest generates a request for the ListGroups operation. +func (c *IAM) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { + op := &request.Operation{ + Name: opListGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupsOutput{} + req.Data = output + return +} + +// Lists the groups that have the specified path prefix. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListGroupsPages(input *ListGroupsInput, fn func(p *ListGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupsOutput), lastPage) + }) +} + +const opListGroupsForUser = "ListGroupsForUser" + +// ListGroupsForUserRequest generates a request for the ListGroupsForUser operation. +func (c *IAM) ListGroupsForUserRequest(input *ListGroupsForUserInput) (req *request.Request, output *ListGroupsForUserOutput) { + op := &request.Operation{ + Name: opListGroupsForUser, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsForUserInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupsForUserOutput{} + req.Data = output + return +} + +// Lists the groups the specified user belongs to. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListGroupsForUser(input *ListGroupsForUserInput) (*ListGroupsForUserOutput, error) { + req, out := c.ListGroupsForUserRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListGroupsForUserPages(input *ListGroupsForUserInput, fn func(p *ListGroupsForUserOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupsForUserRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupsForUserOutput), lastPage) + }) +} + +const opListInstanceProfiles = "ListInstanceProfiles" + +// ListInstanceProfilesRequest generates a request for the ListInstanceProfiles operation. +func (c *IAM) ListInstanceProfilesRequest(input *ListInstanceProfilesInput) (req *request.Request, output *ListInstanceProfilesOutput) { + op := &request.Operation{ + Name: opListInstanceProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceProfilesOutput{} + req.Data = output + return +} + +// Lists the instance profiles that have the specified path prefix. If there +// are none, the action returns an empty list. For more information about instance +// profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListInstanceProfiles(input *ListInstanceProfilesInput) (*ListInstanceProfilesOutput, error) { + req, out := c.ListInstanceProfilesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListInstanceProfilesPages(input *ListInstanceProfilesInput, fn func(p *ListInstanceProfilesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceProfilesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceProfilesOutput), lastPage) + }) +} + +const opListInstanceProfilesForRole = "ListInstanceProfilesForRole" + +// ListInstanceProfilesForRoleRequest generates a request for the ListInstanceProfilesForRole operation. +func (c *IAM) ListInstanceProfilesForRoleRequest(input *ListInstanceProfilesForRoleInput) (req *request.Request, output *ListInstanceProfilesForRoleOutput) { + op := &request.Operation{ + Name: opListInstanceProfilesForRole, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesForRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceProfilesForRoleOutput{} + req.Data = output + return +} + +// Lists the instance profiles that have the specified associated role. If there +// are none, the action returns an empty list. For more information about instance +// profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListInstanceProfilesForRole(input *ListInstanceProfilesForRoleInput) (*ListInstanceProfilesForRoleOutput, error) { + req, out := c.ListInstanceProfilesForRoleRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListInstanceProfilesForRolePages(input *ListInstanceProfilesForRoleInput, fn func(p *ListInstanceProfilesForRoleOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceProfilesForRoleRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceProfilesForRoleOutput), lastPage) + }) +} + +const opListMFADevices = "ListMFADevices" + +// ListMFADevicesRequest generates a request for the ListMFADevices operation. +func (c *IAM) ListMFADevicesRequest(input *ListMFADevicesInput) (req *request.Request, output *ListMFADevicesOutput) { + op := &request.Operation{ + Name: opListMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMFADevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMFADevicesOutput{} + req.Data = output + return +} + +// Lists the MFA devices. If the request includes the user name, then this action +// lists all the MFA devices associated with the specified user name. If you +// do not specify a user name, IAM determines the user name implicitly based +// on the AWS access key ID signing the request. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListMFADevices(input *ListMFADevicesInput) (*ListMFADevicesOutput, error) { + req, out := c.ListMFADevicesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListMFADevicesPages(input *ListMFADevicesInput, fn func(p *ListMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMFADevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMFADevicesOutput), lastPage) + }) +} + +const opListOpenIDConnectProviders = "ListOpenIDConnectProviders" + +// ListOpenIDConnectProvidersRequest generates a request for the ListOpenIDConnectProviders operation. +func (c *IAM) ListOpenIDConnectProvidersRequest(input *ListOpenIDConnectProvidersInput) (req *request.Request, output *ListOpenIDConnectProvidersOutput) { + op := &request.Operation{ + Name: opListOpenIDConnectProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOpenIDConnectProvidersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOpenIDConnectProvidersOutput{} + req.Data = output + return +} + +// Lists information about the OpenID Connect providers in the AWS account. +func (c *IAM) ListOpenIDConnectProviders(input *ListOpenIDConnectProvidersInput) (*ListOpenIDConnectProvidersOutput, error) { + req, out := c.ListOpenIDConnectProvidersRequest(input) + err := req.Send() + return out, err +} + +const opListPolicies = "ListPolicies" + +// ListPoliciesRequest generates a request for the ListPolicies operation. +func (c *IAM) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { + op := &request.Operation{ + Name: opListPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPoliciesOutput{} + req.Data = output + return +} + +// Lists all the managed policies that are available to your account, including +// your own customer managed policies and all AWS managed policies. +// +// You can filter the list of policies that is returned using the optional +// OnlyAttached, Scope, and PathPrefix parameters. For example, to list only +// the customer managed policies in your AWS account, set Scope to Local. To +// list only AWS managed policies, set Scope to AWS. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListPoliciesPages(input *ListPoliciesInput, fn func(p *ListPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPoliciesOutput), lastPage) + }) +} + +const opListPolicyVersions = "ListPolicyVersions" + +// ListPolicyVersionsRequest generates a request for the ListPolicyVersions operation. +func (c *IAM) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *request.Request, output *ListPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListPolicyVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPolicyVersionsOutput{} + req.Data = output + return +} + +// Lists information about the versions of the specified managed policy, including +// the version that is set as the policy's default version. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVersionsOutput, error) { + req, out := c.ListPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opListRolePolicies = "ListRolePolicies" + +// ListRolePoliciesRequest generates a request for the ListRolePolicies operation. +func (c *IAM) ListRolePoliciesRequest(input *ListRolePoliciesInput) (req *request.Request, output *ListRolePoliciesOutput) { + op := &request.Operation{ + Name: opListRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRolePoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies that are embedded in the specified +// role. +// +// A role can also have managed policies attached to it. To list the managed +// policies that are attached to a role, use ListAttachedRolePolicies. For more +// information about policies, refer to Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified role, the action +// returns an empty list. +func (c *IAM) ListRolePolicies(input *ListRolePoliciesInput) (*ListRolePoliciesOutput, error) { + req, out := c.ListRolePoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListRolePoliciesPages(input *ListRolePoliciesInput, fn func(p *ListRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRolePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRolePoliciesOutput), lastPage) + }) +} + +const opListRoles = "ListRoles" + +// ListRolesRequest generates a request for the ListRoles operation. +func (c *IAM) ListRolesRequest(input *ListRolesInput) (req *request.Request, output *ListRolesOutput) { + op := &request.Operation{ + Name: opListRoles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRolesOutput{} + req.Data = output + return +} + +// Lists the roles that have the specified path prefix. If there are none, the +// action returns an empty list. For more information about roles, go to Working +// with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListRoles(input *ListRolesInput) (*ListRolesOutput, error) { + req, out := c.ListRolesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListRolesPages(input *ListRolesInput, fn func(p *ListRolesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRolesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRolesOutput), lastPage) + }) +} + +const opListSAMLProviders = "ListSAMLProviders" + +// ListSAMLProvidersRequest generates a request for the ListSAMLProviders operation. +func (c *IAM) ListSAMLProvidersRequest(input *ListSAMLProvidersInput) (req *request.Request, output *ListSAMLProvidersOutput) { + op := &request.Operation{ + Name: opListSAMLProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSAMLProvidersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSAMLProvidersOutput{} + req.Data = output + return +} + +// Lists the SAML providers in the account. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) ListSAMLProviders(input *ListSAMLProvidersInput) (*ListSAMLProvidersOutput, error) { + req, out := c.ListSAMLProvidersRequest(input) + err := req.Send() + return out, err +} + +const opListSSHPublicKeys = "ListSSHPublicKeys" + +// ListSSHPublicKeysRequest generates a request for the ListSSHPublicKeys operation. +func (c *IAM) ListSSHPublicKeysRequest(input *ListSSHPublicKeysInput) (req *request.Request, output *ListSSHPublicKeysOutput) { + op := &request.Operation{ + Name: opListSSHPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSSHPublicKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSSHPublicKeysOutput{} + req.Data = output + return +} + +// Returns information about the SSH public keys associated with the specified +// IAM user. If there are none, the action returns an empty list. +// +// The SSH public keys returned by this action are used only for authenticating +// the IAM user to an AWS CodeCommit repository. For more information about +// using SSH keys to authenticate to an AWS CodeCommit repository, see Set up +// AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +func (c *IAM) ListSSHPublicKeys(input *ListSSHPublicKeysInput) (*ListSSHPublicKeysOutput, error) { + req, out := c.ListSSHPublicKeysRequest(input) + err := req.Send() + return out, err +} + +const opListServerCertificates = "ListServerCertificates" + +// ListServerCertificatesRequest generates a request for the ListServerCertificates operation. +func (c *IAM) ListServerCertificatesRequest(input *ListServerCertificatesInput) (req *request.Request, output *ListServerCertificatesOutput) { + op := &request.Operation{ + Name: opListServerCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListServerCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListServerCertificatesOutput{} + req.Data = output + return +} + +// Lists the server certificates that have the specified path prefix. If none +// exist, the action returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +func (c *IAM) ListServerCertificates(input *ListServerCertificatesInput) (*ListServerCertificatesOutput, error) { + req, out := c.ListServerCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListServerCertificatesPages(input *ListServerCertificatesInput, fn func(p *ListServerCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListServerCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListServerCertificatesOutput), lastPage) + }) +} + +const opListSigningCertificates = "ListSigningCertificates" + +// ListSigningCertificatesRequest generates a request for the ListSigningCertificates operation. +func (c *IAM) ListSigningCertificatesRequest(input *ListSigningCertificatesInput) (req *request.Request, output *ListSigningCertificatesOutput) { + op := &request.Operation{ + Name: opListSigningCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSigningCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSigningCertificatesOutput{} + req.Data = output + return +} + +// Returns information about the signing certificates associated with the specified +// user. If there are none, the action returns an empty list. +// +// Although each user is limited to a small number of signing certificates, +// you can still paginate the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) ListSigningCertificates(input *ListSigningCertificatesInput) (*ListSigningCertificatesOutput, error) { + req, out := c.ListSigningCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListSigningCertificatesPages(input *ListSigningCertificatesInput, fn func(p *ListSigningCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSigningCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSigningCertificatesOutput), lastPage) + }) +} + +const opListUserPolicies = "ListUserPolicies" + +// ListUserPoliciesRequest generates a request for the ListUserPolicies operation. +func (c *IAM) ListUserPoliciesRequest(input *ListUserPoliciesInput) (req *request.Request, output *ListUserPoliciesOutput) { + op := &request.Operation{ + Name: opListUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUserPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUserPoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies embedded in the specified user. +// +// A user can also have managed policies attached to it. To list the managed +// policies that are attached to a user, use ListAttachedUserPolicies. For more +// information about policies, refer to Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified user, the action +// returns an empty list. +func (c *IAM) ListUserPolicies(input *ListUserPoliciesInput) (*ListUserPoliciesOutput, error) { + req, out := c.ListUserPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListUserPoliciesPages(input *ListUserPoliciesInput, fn func(p *ListUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUserPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUserPoliciesOutput), lastPage) + }) +} + +const opListUsers = "ListUsers" + +// ListUsersRequest generates a request for the ListUsers operation. +func (c *IAM) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { + op := &request.Operation{ + Name: opListUsers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUsersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUsersOutput{} + req.Data = output + return +} + +// Lists the IAM users that have the specified path prefix. If no path prefix +// is specified, the action returns all users in the AWS account. If there are +// none, the action returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListUsersPages(input *ListUsersInput, fn func(p *ListUsersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUsersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUsersOutput), lastPage) + }) +} + +const opListVirtualMFADevices = "ListVirtualMFADevices" + +// ListVirtualMFADevicesRequest generates a request for the ListVirtualMFADevices operation. +func (c *IAM) ListVirtualMFADevicesRequest(input *ListVirtualMFADevicesInput) (req *request.Request, output *ListVirtualMFADevicesOutput) { + op := &request.Operation{ + Name: opListVirtualMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListVirtualMFADevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVirtualMFADevicesOutput{} + req.Data = output + return +} + +// Lists the virtual MFA devices under the AWS account by assignment status. +// If you do not specify an assignment status, the action returns a list of +// all virtual MFA devices. Assignment status can be Assigned, Unassigned, or +// Any. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListVirtualMFADevices(input *ListVirtualMFADevicesInput) (*ListVirtualMFADevicesOutput, error) { + req, out := c.ListVirtualMFADevicesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListVirtualMFADevicesPages(input *ListVirtualMFADevicesInput, fn func(p *ListVirtualMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVirtualMFADevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVirtualMFADevicesOutput), lastPage) + }) +} + +const opPutGroupPolicy = "PutGroupPolicy" + +// PutGroupPolicyRequest generates a request for the PutGroupPolicy operation. +func (c *IAM) PutGroupPolicyRequest(input *PutGroupPolicyInput) (req *request.Request, output *PutGroupPolicyOutput) { + op := &request.Operation{ + Name: opPutGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutGroupPolicyOutput{} + req.Data = output + return +} + +// Adds (or updates) an inline policy document that is embedded in the specified +// group. +// +// A user can also have managed policies attached to it. To attach a managed +// policy to a group, use AttachGroupPolicy. To create a new managed policy, +// use CreatePolicy. For information about policies, refer to Managed Policies +// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a group, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutGroupPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. +func (c *IAM) PutGroupPolicy(input *PutGroupPolicyInput) (*PutGroupPolicyOutput, error) { + req, out := c.PutGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutRolePolicy = "PutRolePolicy" + +// PutRolePolicyRequest generates a request for the PutRolePolicy operation. +func (c *IAM) PutRolePolicyRequest(input *PutRolePolicyInput) (req *request.Request, output *PutRolePolicyOutput) { + op := &request.Operation{ + Name: opPutRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRolePolicyOutput{} + req.Data = output + return +} + +// Adds (or updates) an inline policy document that is embedded in the specified +// role. +// +// When you embed an inline policy in a role, the inline policy is used as +// the role's access (permissions) policy. The role's trust policy is created +// at the same time as the role, using CreateRole. You can update a role's trust +// policy using UpdateAssumeRolePolicy. For more information about roles, go +// to Using Roles to Delegate Permissions and Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// A role can also have a managed policy attached to it. To attach a managed +// policy to a role, use AttachRolePolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed with a role, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutRolePolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. +func (c *IAM) PutRolePolicy(input *PutRolePolicyInput) (*PutRolePolicyOutput, error) { + req, out := c.PutRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutUserPolicy = "PutUserPolicy" + +// PutUserPolicyRequest generates a request for the PutUserPolicy operation. +func (c *IAM) PutUserPolicyRequest(input *PutUserPolicyInput) (req *request.Request, output *PutUserPolicyOutput) { + op := &request.Operation{ + Name: opPutUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutUserPolicyOutput{} + req.Data = output + return +} + +// Adds (or updates) an inline policy document that is embedded in the specified +// user. +// +// A user can also have a managed policy attached to it. To attach a managed +// policy to a user, use AttachUserPolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a user, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutUserPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. +func (c *IAM) PutUserPolicy(input *PutUserPolicyInput) (*PutUserPolicyOutput, error) { + req, out := c.PutUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opRemoveClientIDFromOpenIDConnectProvider = "RemoveClientIDFromOpenIDConnectProvider" + +// RemoveClientIDFromOpenIDConnectProviderRequest generates a request for the RemoveClientIDFromOpenIDConnectProvider operation. +func (c *IAM) RemoveClientIDFromOpenIDConnectProviderRequest(input *RemoveClientIDFromOpenIDConnectProviderInput) (req *request.Request, output *RemoveClientIDFromOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opRemoveClientIDFromOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveClientIDFromOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveClientIDFromOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Removes the specified client ID (also known as audience) from the list of +// client IDs registered for the specified IAM OpenID Connect provider. +// +// This action is idempotent; it does not fail or return an error if you try +// to remove a client ID that was removed previously. +func (c *IAM) RemoveClientIDFromOpenIDConnectProvider(input *RemoveClientIDFromOpenIDConnectProviderInput) (*RemoveClientIDFromOpenIDConnectProviderOutput, error) { + req, out := c.RemoveClientIDFromOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opRemoveRoleFromInstanceProfile = "RemoveRoleFromInstanceProfile" + +// RemoveRoleFromInstanceProfileRequest generates a request for the RemoveRoleFromInstanceProfile operation. +func (c *IAM) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstanceProfileInput) (req *request.Request, output *RemoveRoleFromInstanceProfileOutput) { + op := &request.Operation{ + Name: opRemoveRoleFromInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveRoleFromInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveRoleFromInstanceProfileOutput{} + req.Data = output + return +} + +// Removes the specified role from the specified instance profile. +// +// Make sure you do not have any Amazon EC2 instances running with the role +// you are about to remove from the instance profile. Removing a role from an +// instance profile that is associated with a running instance will break any +// applications running on the instance. For more information about roles, +// go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) RemoveRoleFromInstanceProfile(input *RemoveRoleFromInstanceProfileInput) (*RemoveRoleFromInstanceProfileOutput, error) { + req, out := c.RemoveRoleFromInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opRemoveUserFromGroup = "RemoveUserFromGroup" + +// RemoveUserFromGroupRequest generates a request for the RemoveUserFromGroup operation. +func (c *IAM) RemoveUserFromGroupRequest(input *RemoveUserFromGroupInput) (req *request.Request, output *RemoveUserFromGroupOutput) { + op := &request.Operation{ + Name: opRemoveUserFromGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveUserFromGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveUserFromGroupOutput{} + req.Data = output + return +} + +// Removes the specified user from the specified group. +func (c *IAM) RemoveUserFromGroup(input *RemoveUserFromGroupInput) (*RemoveUserFromGroupOutput, error) { + req, out := c.RemoveUserFromGroupRequest(input) + err := req.Send() + return out, err +} + +const opResyncMFADevice = "ResyncMFADevice" + +// ResyncMFADeviceRequest generates a request for the ResyncMFADevice operation. +func (c *IAM) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) (req *request.Request, output *ResyncMFADeviceOutput) { + op := &request.Operation{ + Name: opResyncMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResyncMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &ResyncMFADeviceOutput{} + req.Data = output + return +} + +// Synchronizes the specified MFA device with AWS servers. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the Using IAM guide. +func (c *IAM) ResyncMFADevice(input *ResyncMFADeviceInput) (*ResyncMFADeviceOutput, error) { + req, out := c.ResyncMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" + +// SetDefaultPolicyVersionRequest generates a request for the SetDefaultPolicyVersion operation. +func (c *IAM) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) (req *request.Request, output *SetDefaultPolicyVersionOutput) { + op := &request.Operation{ + Name: opSetDefaultPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDefaultPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetDefaultPolicyVersionOutput{} + req.Data = output + return +} + +// Sets the specified version of the specified policy as the policy's default +// (operative) version. +// +// This action affects all users, groups, and roles that the policy is attached +// to. To list the users, groups, and roles that the policy is attached to, +// use the ListEntitiesForPolicy API. +// +// For information about managed policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*SetDefaultPolicyVersionOutput, error) { + req, out := c.SetDefaultPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opSimulateCustomPolicy = "SimulateCustomPolicy" + +// SimulateCustomPolicyRequest generates a request for the SimulateCustomPolicy operation. +func (c *IAM) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulateCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SimulateCustomPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SimulatePolicyResponse{} + req.Data = output + return +} + +// Simulate how a set of IAM policies and optionally a resource-based policy +// works with a list of API actions and AWS resources to determine the policies' +// effective permissions. The policies are provided as strings. +// +// The simulation does not perform the API actions; it only checks the authorization +// to determine if the simulated policies allow or deny the actions. +// +// If you want to simulate existing policies attached to an IAM user, group, +// or role, use SimulatePrincipalPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy. +// +// If the output is long, you can use MaxItems and Marker parameters to paginate +// the results. +func (c *IAM) SimulateCustomPolicy(input *SimulateCustomPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulateCustomPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy" + +// SimulatePrincipalPolicyRequest generates a request for the SimulatePrincipalPolicy operation. +func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulatePrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SimulatePrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SimulatePolicyResponse{} + req.Data = output + return +} + +// Simulate how a set of IAM policies attached to an IAM entity works with a +// list of API actions and AWS resources to determine the policies' effective +// permissions. The entity can be an IAM user, group, or role. If you specify +// a user, then the simulation also includes all of the policies that are attached +// to groups that the user belongs to . +// +// You can optionally include a list of one or more additional policies specified +// as strings to include in the simulation. If you want to simulate only policies +// specified as strings, use SimulateCustomPolicy instead. +// +// You can also optionally include one resource-based policy to be evaluated +// with each of the resources included in the simulation. +// +// The simulation does not perform the API actions, it only checks the authorization +// to determine if the simulated policies allow or deny the actions. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use SimulateCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy. +// +// If the output is long, you can use the MaxItems and Marker parameters to +// paginate the results. +func (c *IAM) SimulatePrincipalPolicy(input *SimulatePrincipalPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulatePrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAccessKey = "UpdateAccessKey" + +// UpdateAccessKeyRequest generates a request for the UpdateAccessKey operation. +func (c *IAM) UpdateAccessKeyRequest(input *UpdateAccessKeyInput) (req *request.Request, output *UpdateAccessKeyOutput) { + op := &request.Operation{ + Name: opUpdateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAccessKeyOutput{} + req.Data = output + return +} + +// Changes the status of the specified access key from Active to Inactive, or +// vice versa. This action can be used to disable a user's key as part of a +// key rotation work flow. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// For information about rotating keys, see Managing Keys and Certificates +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html) +// in the IAM User Guide. +func (c *IAM) UpdateAccessKey(input *UpdateAccessKeyInput) (*UpdateAccessKeyOutput, error) { + req, out := c.UpdateAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAccountPasswordPolicy = "UpdateAccountPasswordPolicy" + +// UpdateAccountPasswordPolicyRequest generates a request for the UpdateAccountPasswordPolicy operation. +func (c *IAM) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPolicyInput) (req *request.Request, output *UpdateAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opUpdateAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Updates the password policy settings for the AWS account. +// +// This action does not support partial updates. No parameters are required, +// but if you do not specify a parameter, that parameter's value reverts to +// its default value. See the Request Parameters section for each parameter's +// default value. +// +// For more information about using a password policy, see Managing an IAM +// Password Policy (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html) +// in the IAM User Guide. +func (c *IAM) UpdateAccountPasswordPolicy(input *UpdateAccountPasswordPolicyInput) (*UpdateAccountPasswordPolicyOutput, error) { + req, out := c.UpdateAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAssumeRolePolicy = "UpdateAssumeRolePolicy" + +// UpdateAssumeRolePolicyRequest generates a request for the UpdateAssumeRolePolicy operation. +func (c *IAM) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) (req *request.Request, output *UpdateAssumeRolePolicyOutput) { + op := &request.Operation{ + Name: opUpdateAssumeRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssumeRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAssumeRolePolicyOutput{} + req.Data = output + return +} + +// Updates the policy that grants an entity permission to assume a role. For +// more information about roles, go to Using Roles to Delegate Permissions and +// Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +func (c *IAM) UpdateAssumeRolePolicy(input *UpdateAssumeRolePolicyInput) (*UpdateAssumeRolePolicyOutput, error) { + req, out := c.UpdateAssumeRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGroup = "UpdateGroup" + +// UpdateGroupRequest generates a request for the UpdateGroup operation. +func (c *IAM) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { + op := &request.Operation{ + Name: opUpdateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateGroupOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified group. +// +// You should understand the implications of changing a group's path or name. +// For more information, see Renaming Users and Groups (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) +// in the IAM User Guide. To change a group name the requester must have appropriate +// permissions on both the source object and the target object. For example, +// to change Managers to MGRs, the entity making the request must have permission +// on Managers and MGRs, or must have permission on all (*). For more information +// about permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html" +// target="blank). +func (c *IAM) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { + req, out := c.UpdateGroupRequest(input) + err := req.Send() + return out, err +} + +const opUpdateLoginProfile = "UpdateLoginProfile" + +// UpdateLoginProfileRequest generates a request for the UpdateLoginProfile operation. +func (c *IAM) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) (req *request.Request, output *UpdateLoginProfileOutput) { + op := &request.Operation{ + Name: opUpdateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateLoginProfileOutput{} + req.Data = output + return +} + +// Changes the password for the specified user. +// +// Users can change their own passwords by calling ChangePassword. For more +// information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +func (c *IAM) UpdateLoginProfile(input *UpdateLoginProfileInput) (*UpdateLoginProfileOutput, error) { + req, out := c.UpdateLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateOpenIDConnectProviderThumbprint = "UpdateOpenIDConnectProviderThumbprint" + +// UpdateOpenIDConnectProviderThumbprintRequest generates a request for the UpdateOpenIDConnectProviderThumbprint operation. +func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDConnectProviderThumbprintInput) (req *request.Request, output *UpdateOpenIDConnectProviderThumbprintOutput) { + op := &request.Operation{ + Name: opUpdateOpenIDConnectProviderThumbprint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateOpenIDConnectProviderThumbprintInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateOpenIDConnectProviderThumbprintOutput{} + req.Data = output + return +} + +// Replaces the existing list of server certificate thumbprints with a new list. +// +// The list that you pass with this action completely replaces the existing +// list of thumbprints. (The lists are not merged.) +// +// Typically, you need to update a thumbprint only when the identity provider's +// certificate changes, which occurs rarely. However, if the provider's certificate +// does change, any attempt to assume an IAM role that specifies the OIDC provider +// as a principal will fail until the certificate thumbprint is updated. +// +// Because trust for the OpenID Connect provider is ultimately derived from +// the provider's certificate and is validated by the thumbprint, it is a best +// practice to limit access to the UpdateOpenIDConnectProviderThumbprint action +// to highly-privileged users. +func (c *IAM) UpdateOpenIDConnectProviderThumbprint(input *UpdateOpenIDConnectProviderThumbprintInput) (*UpdateOpenIDConnectProviderThumbprintOutput, error) { + req, out := c.UpdateOpenIDConnectProviderThumbprintRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSAMLProvider = "UpdateSAMLProvider" + +// UpdateSAMLProviderRequest generates a request for the UpdateSAMLProvider operation. +func (c *IAM) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) (req *request.Request, output *UpdateSAMLProviderOutput) { + op := &request.Operation{ + Name: opUpdateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSAMLProviderOutput{} + req.Data = output + return +} + +// Updates the metadata document for an existing SAML provider. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) UpdateSAMLProvider(input *UpdateSAMLProviderInput) (*UpdateSAMLProviderOutput, error) { + req, out := c.UpdateSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSSHPublicKey = "UpdateSSHPublicKey" + +// UpdateSSHPublicKeyRequest generates a request for the UpdateSSHPublicKey operation. +func (c *IAM) UpdateSSHPublicKeyRequest(input *UpdateSSHPublicKeyInput) (req *request.Request, output *UpdateSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUpdateSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSSHPublicKeyOutput{} + req.Data = output + return +} + +// Sets the status of the specified SSH public key to active or inactive. SSH +// public keys that are inactive cannot be used for authentication. This action +// can be used to disable a user's SSH public key as part of a key rotation +// work flow. +// +// The SSH public key affected by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) UpdateSSHPublicKey(input *UpdateSSHPublicKeyInput) (*UpdateSSHPublicKeyOutput, error) { + req, out := c.UpdateSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateServerCertificate = "UpdateServerCertificate" + +// UpdateServerCertificateRequest generates a request for the UpdateServerCertificate operation. +func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput) (req *request.Request, output *UpdateServerCertificateOutput) { + op := &request.Operation{ + Name: opUpdateServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateServerCertificateOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified server certificate. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// You should understand the implications of changing a server certificate's +// path or name. For more information, see Renaming a Server Certificate (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs_manage.html#RenamingServerCerts) +// in the IAM User Guide. To change a server certificate name the requester +// must have appropriate permissions on both the source object and the target +// object. For example, to change the name from ProductionCert to ProdCert, +// the entity making the request must have permission on ProductionCert and +// ProdCert, or must have permission on all (*). For more information about +// permissions, see Access Management (http://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +func (c *IAM) UpdateServerCertificate(input *UpdateServerCertificateInput) (*UpdateServerCertificateOutput, error) { + req, out := c.UpdateServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSigningCertificate = "UpdateSigningCertificate" + +// UpdateSigningCertificateRequest generates a request for the UpdateSigningCertificate operation. +func (c *IAM) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInput) (req *request.Request, output *UpdateSigningCertificateOutput) { + op := &request.Operation{ + Name: opUpdateSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSigningCertificateOutput{} + req.Data = output + return +} + +// Changes the status of the specified signing certificate from active to disabled, +// or vice versa. This action can be used to disable a user's signing certificate +// as part of a certificate rotation work flow. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) UpdateSigningCertificate(input *UpdateSigningCertificateInput) (*UpdateSigningCertificateOutput, error) { + req, out := c.UpdateSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUser = "UpdateUser" + +// UpdateUserRequest generates a request for the UpdateUser operation. +func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, output *UpdateUserOutput) { + op := &request.Operation{ + Name: opUpdateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateUserOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified user. +// +// You should understand the implications of changing a user's path or name. +// For more information, see Renaming Users and Groups (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) +// in the IAM User Guide. To change a user name the requester must have appropriate +// permissions on both the source object and the target object. For example, +// to change Bob to Robert, the entity making the request must have permission +// on Bob and Robert, or must have permission on all (*). For more information +// about permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html" +// target="blank). +func (c *IAM) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { + req, out := c.UpdateUserRequest(input) + err := req.Send() + return out, err +} + +const opUploadSSHPublicKey = "UploadSSHPublicKey" + +// UploadSSHPublicKeyRequest generates a request for the UploadSSHPublicKey operation. +func (c *IAM) UploadSSHPublicKeyRequest(input *UploadSSHPublicKeyInput) (req *request.Request, output *UploadSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUploadSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadSSHPublicKeyOutput{} + req.Data = output + return +} + +// Uploads an SSH public key and associates it with the specified IAM user. +// +// The SSH public key uploaded by this action can be used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) UploadSSHPublicKey(input *UploadSSHPublicKeyInput) (*UploadSSHPublicKeyOutput, error) { + req, out := c.UploadSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opUploadServerCertificate = "UploadServerCertificate" + +// UploadServerCertificateRequest generates a request for the UploadServerCertificate operation. +func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput) (req *request.Request, output *UploadServerCertificateOutput) { + op := &request.Operation{ + Name: opUploadServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadServerCertificateOutput{} + req.Data = output + return +} + +// Uploads a server certificate entity for the AWS account. The server certificate +// entity includes a public key certificate, a private key, and an optional +// certificate chain, which should all be PEM-encoded. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// For information about the number of server certificates you can upload, +// see Limitations on IAM Entities and Objects (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) +// in the IAM User Guide. +// +// Because the body of the public key certificate, private key, and the certificate +// chain can be large, you should use POST rather than GET when calling UploadServerCertificate. +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Calling the API by Making HTTP Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html) +// in the IAM User Guide. +func (c *IAM) UploadServerCertificate(input *UploadServerCertificateInput) (*UploadServerCertificateOutput, error) { + req, out := c.UploadServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUploadSigningCertificate = "UploadSigningCertificate" + +// UploadSigningCertificateRequest generates a request for the UploadSigningCertificate operation. +func (c *IAM) UploadSigningCertificateRequest(input *UploadSigningCertificateInput) (req *request.Request, output *UploadSigningCertificateOutput) { + op := &request.Operation{ + Name: opUploadSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadSigningCertificateOutput{} + req.Data = output + return +} + +// Uploads an X.509 signing certificate and associates it with the specified +// user. Some AWS services use X.509 signing certificates to validate requests +// that are signed with a corresponding private key. When you upload the certificate, +// its default status is Active. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// Because the body of a X.509 certificate can be large, you should use POST +// rather than GET when calling UploadSigningCertificate. For information about +// setting up signatures and authorization through the API, go to Signing AWS +// API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAMguide. +func (c *IAM) UploadSigningCertificate(input *UploadSigningCertificateInput) (*UploadSigningCertificateOutput, error) { + req, out := c.UploadSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +// Contains information about an AWS access key. +// +// This data type is used as a response element in the CreateAccessKey and +// ListAccessKeys actions. +// +// The SecretAccessKey value is returned only in response to CreateAccessKey. +// You can get a secret access key only when you first create an access key; +// you cannot recover the secret access key later. If you lose a secret access +// key, you must create a new access key. +type AccessKey struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The secret key used to sign requests. + SecretAccessKey *string `type:"string" required:"true"` + + // The status of the access key. Active means the key is valid for API calls, + // while Inactive means it is not. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user that the access key is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKey) GoString() string { + return s.String() +} + +// Contains information about the last time an AWS access key was used. +// +// This data type is used as a response element in the GetAccessKeyLastUsed +// action. +type AccessKeyLastUsed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the access key was most recently used. This field is null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + LastUsedDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The AWS region where this access key was most recently used. This field is + // null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + // + // For more information about AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html) + // in the Amazon Web Services General Reference. + Region *string `type:"string" required:"true"` + + // The name of the AWS service with which this access key was most recently + // used. This field is null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + ServiceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKeyLastUsed) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyLastUsed) GoString() string { + return s.String() +} + +// Contains information about an AWS access key, without its secret key. +// +// This data type is used as a response element in the ListAccessKeys action. +type AccessKeyMetadata struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The status of the access key. Active means the key is valid for API calls; + // Inactive means it is not. + Status *string `type:"string" enum:"statusType"` + + // The name of the IAM user that the key is associated with. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AccessKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyMetadata) GoString() string { + return s.String() +} + +type AddClientIDToOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to add to the IAM OpenID Connect provider. + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to add the client ID to. You can get a list of OIDC provider ARNs by using + // the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +type AddClientIDToOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type AddRoleToInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to add. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileInput) GoString() string { + return s.String() +} + +type AddRoleToInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileOutput) GoString() string { + return s.String() +} + +type AddUserToGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to add. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddUserToGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupInput) GoString() string { + return s.String() +} + +type AddUserToGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddUserToGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupOutput) GoString() string { + return s.String() +} + +type AttachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to attach the policy to. + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyInput) GoString() string { + return s.String() +} + +type AttachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyOutput) GoString() string { + return s.String() +} + +type AttachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the role to attach the policy to. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyInput) GoString() string { + return s.String() +} + +type AttachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyOutput) GoString() string { + return s.String() +} + +type AttachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the user to attach the policy to. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyInput) GoString() string { + return s.String() +} + +type AttachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyOutput) GoString() string { + return s.String() +} + +// Contains information about an attached policy. +// +// An attached policy is a managed policy that has been attached to a user, +// group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, +// ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails +// actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type AttachedPolicy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string"` + + // The friendly name of the attached policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AttachedPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachedPolicy) GoString() string { + return s.String() +} + +type ChangePasswordInput struct { + _ struct{} `type:"structure"` + + // The new password. The new password must conform to the AWS account's password + // policy, if one exists. + NewPassword *string `min:"1" type:"string" required:"true"` + + // The IAM user's current password. + OldPassword *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangePasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordInput) GoString() string { + return s.String() +} + +type ChangePasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangePasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordOutput) GoString() string { + return s.String() +} + +// Contains information about a condition context key. It includes the name +// of the key and specifies the value (or values, if the context key supports +// multiple values) to use in the simulation. This information is used when +// evaluating the Condition elements of the input policies. +// +// This data type is used as an input parameter to SimulatePolicy. +type ContextEntry struct { + _ struct{} `type:"structure"` + + // The full name of a condition context key, including the service prefix. For + // example, aws:SourceIp or s3:VersionId. + ContextKeyName *string `min:"5" type:"string"` + + // The data type of the value (or values) specified in the ContextKeyValues + // parameter. + ContextKeyType *string `type:"string" enum:"ContextKeyTypeEnum"` + + // The value (or values, if the condition context key supports multiple values) + // to provide to the simulation for use when the key is referenced by a Condition + // element in an input policy. + ContextKeyValues []*string `type:"list"` +} + +// String returns the string representation +func (s ContextEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContextEntry) GoString() string { + return s.String() +} + +type CreateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The user name that the new key will belong to. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateAccessKey request. +type CreateAccessKeyOutput struct { + _ struct{} `type:"structure"` + + // Information about the access key. + AccessKey *AccessKey `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyOutput) GoString() string { + return s.String() +} + +type CreateAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The account alias to create. + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasInput) GoString() string { + return s.String() +} + +type CreateAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasOutput) GoString() string { + return s.String() +} + +type CreateGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to create. Do not include the path in this value. + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateGroup request. +type CreateGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the group. + Group *Group `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupOutput) GoString() string { + return s.String() +} + +type CreateInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to create. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateInstanceProfile request. +type CreateInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance profile. + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileOutput) GoString() string { + return s.String() +} + +type CreateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the user. + Password *string `min:"1" type:"string" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user to create a password for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateLoginProfile request. +type CreateLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // The user name and password create date. + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileOutput) GoString() string { + return s.String() +} + +type CreateOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences). When a mobile or web app + // registers with an OpenID Connect provider, they establish a value that identifies + // the application. (This is the value that's sent as the client_id parameter + // on OAuth requests.) + // + // You can register multiple client IDs with the same provider. For example, + // you might have multiple applications that use the same OIDC provider. You + // cannot register more than 100 client IDs with a single IAM OIDC provider. + // + // There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest + // action accepts client IDs up to 255 characters long. + ClientIDList []*string `type:"list"` + + // A list of server certificate thumbprints for the OpenID Connect (OIDC) identity + // provider's server certificate(s). Typically this list includes only one entry. + // However, IAM lets you have up to five thumbprints for an OIDC provider. This + // lets you maintain multiple thumbprints if the identity provider is rotating + // certificates. + // + // The server certificate thumbprint is the hex-encoded SHA-1 hash value of + // the X.509 certificate used by the domain where the OpenID Connect provider + // makes its keys available. It is always a 40-character string. + // + // You must provide at least one thumbprint when creating an IAM OIDC provider. + // For example, if the OIDC provider is server.example.com and the provider + // stores its keys at "https://keys.server.example.com/openid-connect", the + // thumbprint string would be the hex-encoded SHA-1 hash value of the certificate + // used by https://keys.server.example.com. + // + // For more information about obtaining the OIDC provider's thumbprint, see + // Obtaining the Thumbprint for an OpenID Connect Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/identity-providers-oidc-obtain-thumbprint.html) + // in the IAM User Guide. + ThumbprintList []*string `type:"list" required:"true"` + + // The URL of the identity provider. The URL must begin with "https://" and + // should correspond to the iss claim in the provider's OpenID Connect ID tokens. + // Per the OIDC standard, path components are allowed but query parameters are + // not. Typically the URL consists of only a host name, like "https://server.example.org" + // or "https://example.com". + // + // You cannot register the same provider multiple times in a single AWS account. + // If you try to submit a URL that has already been used for an OpenID Connect + // provider in the AWS account, you will get an error. + Url *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateOpenIDConnectProvider request. +type CreateOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider that was + // created. For more information, see OpenIDConnectProviderListEntry. + OpenIDConnectProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type CreatePolicyInput struct { + _ struct{} `type:"structure"` + + // A friendly description of the policy. + // + // Typically used to store information about the permissions defined in the + // policy. For example, "Grants access to production DynamoDB tables." + // + // The policy description is immutable. After a value is assigned, it cannot + // be changed. + Description *string `type:"string"` + + // The path for the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `type:"string"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreatePolicy request. +type CreatePolicyOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyOutput) GoString() string { + return s.String() +} + +type CreatePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // Specifies whether to set this version as the policy's default version. + // + // When this parameter is true, the new policy version becomes the operative + // version; that is, the version that is in effect for the IAM users, groups, + // and roles that the policy is attached to. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + SetAsDefault *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreatePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreatePolicyVersion request. +type CreatePolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionOutput) GoString() string { + return s.String() +} + +type CreateRoleInput struct { + _ struct{} `type:"structure"` + + // The trust relationship policy document that grants an entity permission to + // assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` + + // The name of the role to create. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateRole request. +type CreateRoleOutput struct { + _ struct{} `type:"structure"` + + // Information about the role. + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleOutput) GoString() string { + return s.String() +} + +type CreateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The name of the provider to create. + Name *string `min:"1" type:"string" required:"true"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + // + // For more information, see About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) + // in the IAM User Guide + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateSAMLProvider request. +type CreateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderOutput) GoString() string { + return s.String() +} + +type CreateUserInput struct { + _ struct{} `type:"structure"` + + // The path for the user name. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` + + // The name of the user to create. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateUser request. +type CreateUserOutput struct { + _ struct{} `type:"structure"` + + // Information about the user. + User *User `type:"structure"` +} + +// String returns the string representation +func (s CreateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserOutput) GoString() string { + return s.String() +} + +type CreateVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The path for the virtual MFA device. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` + + // The name of the virtual MFA device. Use with path to uniquely identify a + // virtual MFA device. + VirtualMFADeviceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateVirtualMFADevice request. +type CreateVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` + + // A newly created virtual MFA device. + VirtualMFADevice *VirtualMFADevice `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +type DeactivateMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to deactivate. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivateMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceInput) GoString() string { + return s.String() +} + +type DeactivateMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivateMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceOutput) GoString() string { + return s.String() +} + +type DeleteAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID for the access key ID and secret access key you want to + // delete. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The name of the user whose key you want to delete. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyInput) GoString() string { + return s.String() +} + +type DeleteAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyOutput) GoString() string { + return s.String() +} + +type DeleteAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The name of the account alias to delete. + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasInput) GoString() string { + return s.String() +} + +type DeleteAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasOutput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type DeleteGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to delete. + GroupName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupInput) GoString() string { + return s.String() +} + +type DeleteGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupOutput) GoString() string { + return s.String() +} + +type DeleteGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the group that the policy is + // embedded in. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name identifying the policy document to delete. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyInput) GoString() string { + return s.String() +} + +type DeleteGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyOutput) GoString() string { + return s.String() +} + +type DeleteInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to delete. + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileInput) GoString() string { + return s.String() +} + +type DeleteInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileOutput) GoString() string { + return s.String() +} + +type DeleteLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose password you want to delete. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileInput) GoString() string { + return s.String() +} + +type DeleteLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileOutput) GoString() string { + return s.String() +} + +type DeleteOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider to delete. + // You can get a list of OpenID Connect provider ARNs by using the ListOpenIDConnectProviders + // action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +type DeleteOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeletePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy version to delete. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionInput) GoString() string { + return s.String() +} + +type DeletePolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionOutput) GoString() string { + return s.String() +} + +type DeleteRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role to delete. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleInput) GoString() string { + return s.String() +} + +type DeleteRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleOutput) GoString() string { + return s.String() +} + +type DeleteRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name identifying the policy document to delete. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the role that the policy is + // embedded in. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyInput) GoString() string { + return s.String() +} + +type DeleteRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyOutput) GoString() string { + return s.String() +} + +type DeleteSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider to delete. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderInput) GoString() string { + return s.String() +} + +type DeleteSAMLProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderOutput) GoString() string { + return s.String() +} + +type DeleteSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyInput) GoString() string { + return s.String() +} + +type DeleteSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type DeleteServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to delete. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateInput) GoString() string { + return s.String() +} + +type DeleteServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateOutput) GoString() string { + return s.String() +} + +type DeleteSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate to delete. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The name of the user the signing certificate belongs to. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateInput) GoString() string { + return s.String() +} + +type DeleteSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateOutput) GoString() string { + return s.String() +} + +type DeleteUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to delete. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserInput) GoString() string { + return s.String() +} + +type DeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() +} + +type DeleteUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name identifying the policy document to delete. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the user that the policy is + // embedded in. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyInput) GoString() string { + return s.String() +} + +type DeleteUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyOutput) GoString() string { + return s.String() +} + +type DeleteVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the same as the ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceInput) GoString() string { + return s.String() +} + +type DeleteVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +type DetachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to detach the policy from. + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyInput) GoString() string { + return s.String() +} + +type DetachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyOutput) GoString() string { + return s.String() +} + +type DetachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the role to detach the policy from. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyInput) GoString() string { + return s.String() +} + +type DetachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyOutput) GoString() string { + return s.String() +} + +type DetachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the user to detach the policy from. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyInput) GoString() string { + return s.String() +} + +type DetachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyOutput) GoString() string { + return s.String() +} + +type EnableMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user for whom you want to enable the MFA device. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceInput) GoString() string { + return s.String() +} + +type EnableMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceOutput) GoString() string { + return s.String() +} + +// Contains the results of a simulation. +// +// This data type is used by the return parameter of SimulatePolicy. +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // The name of the API action tested on the indicated resource. + EvalActionName *string `min:"3" type:"string" required:"true"` + + // The result of the simulation. + EvalDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_compare-resource-policies.html) + EvalDecisionDetails map[string]*string `type:"map"` + + // The ARN of the resource that the indicated API action was tested on. + EvalResourceName *string `min:"1" type:"string"` + + // A list of the statements in the input policies that determine the result + // for this scenario. Remember that even if multiple statements allow the action + // on the resource, if only one statement denies that action, then the explicit + // deny overrides any allow, and the deny statement is the only entry included + // in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. To discover the context + // keys used by a set of policies, you can call GetContextKeysForCustomPolicy + // or GetContextKeysForPrincipalPolicy. + // + // If the response includes any keys in this list, then the reported results + // might be untrustworthy because the simulation could not completely evaluate + // all of the conditions specified in the policies that would occur in a real + // world request. + MissingContextValues []*string `type:"list"` + + // The individual results of the simulation of the API action specified in EvalActionName + // on each resource. + ResourceSpecificResults []*ResourceSpecificResult `type:"list"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResult) GoString() string { + return s.String() +} + +type GenerateCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GenerateCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GenerateCredentialReport request. +type GenerateCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Information about the credential report. + Description *string `type:"string"` + + // Information about the state of the credential report. + State *string `type:"string" enum:"ReportStateType"` +} + +// String returns the string representation +func (s GenerateCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportOutput) GoString() string { + return s.String() +} + +type GetAccessKeyLastUsedInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccessKeyLastUsed request. It is +// also returned as a member of the AccessKeyMetaData structure returned by +// the ListAccessKeys action. +type GetAccessKeyLastUsedOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the last time the access key was used. + AccessKeyLastUsed *AccessKeyLastUsed `type:"structure"` + + // The name of the AWS IAM user that owns this access key. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedOutput) GoString() string { + return s.String() +} + +type GetAccountAuthorizationDetailsInput struct { + _ struct{} `type:"structure"` + + // A list of entity types (user, group, role, local managed policy, or AWS managed + // policy) for filtering the results. + Filter []*string `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountAuthorizationDetails request. +type GetAccountAuthorizationDetailsOutput struct { + _ struct{} `type:"structure"` + + // A list containing information about IAM groups. + GroupDetailList []*GroupDetail `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list containing information about managed policies. + Policies []*ManagedPolicyDetail `type:"list"` + + // A list containing information about IAM roles. + RoleDetailList []*RoleDetail `type:"list"` + + // A list containing information about IAM users. + UserDetailList []*UserDetail `type:"list"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsOutput) GoString() string { + return s.String() +} + +type GetAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountPasswordPolicy request. +type GetAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the account password policy. + // + // This data type is used as a response element in the GetAccountPasswordPolicy + // action. + PasswordPolicy *PasswordPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type GetAccountSummaryInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountSummary request. +type GetAccountSummaryOutput struct { + _ struct{} `type:"structure"` + + // A set of key value pairs containing information about IAM entity usage and + // IAM quotas. + // + // SummaryMap contains the following keys: AccessKeysPerUserQuota + // + // The maximum number of active access keys allowed for each IAM user. + // + // AccountAccessKeysPresent + // + // This value is 1 if the AWS account (root) has an access key, otherwise it + // is 0. + // + // AccountMFAEnabled + // + // This value is 1 if the AWS account (root) has an MFA device assigned, otherwise + // it is 0. + // + // AccountSigningCertificatesPresent + // + // This value is 1 if the AWS account (root) has a signing certificate, otherwise + // it is 0. + // + // AssumeRolePolicySizeQuota + // + // The maximum allowed size for assume role policy documents (trust policies), + // in non-whitespace characters. + // + // AttachedPoliciesPerGroupQuota + // + // The maximum number of managed policies that can be attached to an IAM group. + // + // AttachedPoliciesPerRoleQuota + // + // The maximum number of managed policies that can be attached to an IAM role. + // + // AttachedPoliciesPerUserQuota + // + // The maximum number of managed policies that can be attached to an IAM user. + // + // GroupPolicySizeQuota + // + // The maximum allowed size for the aggregate of all inline policies embedded + // in an IAM group, in non-whitespace characters. + // + // Groups + // + // The number of IAM groups in the AWS account. + // + // GroupsPerUserQuota + // + // The maximum number of IAM groups each IAM user can belong to. + // + // GroupsQuota + // + // The maximum number of IAM groups allowed in the AWS account. + // + // InstanceProfiles + // + // The number of instance profiles in the AWS account. + // + // InstanceProfilesQuota + // + // The maximum number of instance profiles allowed in the AWS account. + // + // MFADevices + // + // The number of MFA devices in the AWS account, including those assigned and + // unassigned. + // + // MFADevicesInUse + // + // The number of MFA devices that have been assigned to an IAM user or to the + // AWS account (root). + // + // Policies + // + // The number of customer managed policies in the AWS account. + // + // PoliciesQuota + // + // The maximum number of customer managed policies allowed in the AWS account. + // + // PolicySizeQuota + // + // The maximum allowed size of a customer managed policy, in non-whitespace + // characters. + // + // PolicyVersionsInUse + // + // The number of managed policies that are attached to IAM users, groups, or + // roles in the AWS account. + // + // PolicyVersionsInUseQuota + // + // The maximum number of managed policies that can be attached to IAM users, + // groups, or roles in the AWS account. + // + // Providers + // + // The number of identity providers in the AWS account. + // + // RolePolicySizeQuota + // + // The maximum allowed size for the aggregate of all inline policies (access + // policies, not the trust policy) embedded in an IAM role, in non-whitespace + // characters. + // + // Roles + // + // The number of IAM roles in the AWS account. + // + // RolesQuota + // + // The maximum number of IAM roles allowed in the AWS account. + // + // ServerCertificates + // + // The number of server certificates in the AWS account. + // + // ServerCertificatesQuota + // + // The maximum number of server certificates allowed in the AWS account. + // + // SigningCertificatesPerUserQuota + // + // The maximum number of X.509 signing certificates allowed for each IAM user. + // + // UserPolicySizeQuota + // + // The maximum allowed size for the aggregate of all inline policies embedded + // in an IAM user, in non-whitespace characters. + // + // Users + // + // The number of IAM users in the AWS account. + // + // UsersQuota + // + // The maximum number of IAM users allowed in the AWS account. + // + // VersionsPerPolicyQuota + // + // The maximum number of policy versions allowed for each managed policy. + SummaryMap map[string]*int64 `type:"map"` +} + +// String returns the string representation +func (s GetAccountSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryOutput) GoString() string { + return s.String() +} + +type GetContextKeysForCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of policies for which you want list of context keys used in Condition + // elements. Each document is specified as a string containing the complete, + // valid JSON text of an IAM policy. + PolicyInputList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForCustomPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetContextKeysForPrincipalPolicy or +// GetContextKeysForCustomPolicy request. +type GetContextKeysForPolicyResponse struct { + _ struct{} `type:"structure"` + + // The list of context keys that are used in the Condition elements of the input + // policies. + ContextKeyNames []*string `type:"list"` +} + +// String returns the string representation +func (s GetContextKeysForPolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPolicyResponse) GoString() string { + return s.String() +} + +type GetContextKeysForPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // A optional list of additional policies for which you want list of context + // keys used in Condition elements. + PolicyInputList []*string `type:"list"` + + // The ARN of a user, group, or role whose policies contain the context keys + // that you want listed. If you specify a user, the list includes context keys + // that are found in all policies attached to the user as well as to all groups + // that the user is a member of. If you pick a group or a role, then it includes + // only those context keys that are found in policies attached to that entity. + // Note that all parameters are shown in unencoded form here for clarity, but + // must be URL encoded to be included as a part of a real HTML request. + PolicySourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) GoString() string { + return s.String() +} + +type GetCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCredentialReport request. +type GetCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Contains the credential report. The report is Base64-encoded. + Content []byte `type:"blob"` + + // The date and time when the credential report was created, in ISO 8601 date-time + // format (http://www.iso.org/iso/iso8601). + GeneratedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The format (MIME type) of the credential report. + ReportFormat *string `type:"string" enum:"ReportFormatType"` +} + +// String returns the string representation +func (s GetCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportOutput) GoString() string { + return s.String() +} + +type GetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetGroup request. +type GetGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the group. + Group *Group `type:"structure" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of users in the group. + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupOutput) GoString() string { + return s.String() +} + +type GetGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group the policy is associated with. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the policy document to get. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetGroupPolicy request. +type GetGroupPolicyOutput struct { + _ struct{} `type:"structure"` + + // The group the policy is associated with. + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyOutput) GoString() string { + return s.String() +} + +type GetInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to get information about. + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetInstanceProfile request. +type GetInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance profile. + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileOutput) GoString() string { + return s.String() +} + +type GetLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose login profile you want to retrieve. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetLoginProfile request. +type GetLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // The user name and password create date for the user. + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileOutput) GoString() string { + return s.String() +} + +type GetOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to get information for. You can get a list of OIDC provider ARNs by using + // the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetOpenIDConnectProvider request. +type GetOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences) that are associated with the + // specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + ClientIDList []*string `type:"list"` + + // The date and time when the IAM OpenID Connect provider entity was created + // in the AWS account. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []*string `type:"list"` + + // The URL that the IAM OpenID Connect provider is associated with. For more + // information, see CreateOpenIDConnectProvider. + Url *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetPolicy request. +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +type GetPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // Identifies the policy version to retrieve. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetPolicyVersion request. +type GetPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy version. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionOutput) GoString() string { + return s.String() +} + +type GetRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role to get information about. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetRole request. +type GetRoleOutput struct { + _ struct{} `type:"structure"` + + // Information about the role. + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleOutput) GoString() string { + return s.String() +} + +type GetRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role associated with the policy. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetRolePolicy request. +type GetRolePolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The role the policy is associated with. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyOutput) GoString() string { + return s.String() +} + +type GetSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider to get information about. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetSAMLProvider request. +type GetSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The XML metadata document that includes information about an identity provider. + SAMLMetadataDocument *string `min:"1000" type:"string"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderOutput) GoString() string { + return s.String() +} + +type GetSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // Specifies the public key encoding format to use in the response. To retrieve + // the public key in ssh-rsa format, use SSH. To retrieve the public key in + // PEM format, use PEM. + Encoding *string `type:"string" required:"true" enum:"encodingType"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetSSHPublicKey request. +type GetSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // Information about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s GetSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type GetServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to retrieve information about. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetServerCertificate request. +type GetServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // Information about the server certificate. + ServerCertificate *ServerCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateOutput) GoString() string { + return s.String() +} + +type GetUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to get information about. + // + // This parameter is optional. If it is not included, it defaults to the user + // making the request. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetUser request. +type GetUserOutput struct { + _ struct{} `type:"structure"` + + // Information about the user. + User *User `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserOutput) GoString() string { + return s.String() +} + +type GetUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user who the policy is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetUserPolicy request. +type GetUserPolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The user the policy is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM group entity. +// +// This data type is used as a response element in the following actions: +// +// CreateGroup GetGroup ListGroups +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the group. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Group) GoString() string { + return s.String() +} + +// Contains information about an IAM group, including all of the group's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type GroupDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the group. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the group. + GroupPolicyList []*PolicyDetail `type:"list"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GroupDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupDetail) GoString() string { + return s.String() +} + +// Contains information about an instance profile. +// +// This data type is used as a response element in the following actions: +// +// CreateInstanceProfile +// +// GetInstanceProfile +// +// ListInstanceProfiles +// +// ListInstanceProfilesForRole +type InstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the instance profile. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date when the instance profile was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the instance profile. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + InstanceProfileId *string `min:"16" type:"string" required:"true"` + + // The name identifying the instance profile. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The role associated with the instance profile. + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceProfile) GoString() string { + return s.String() +} + +type ListAccessKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAccessKeys request. +type ListAccessKeysOutput struct { + _ struct{} `type:"structure"` + + // A list of access key metadata. + AccessKeyMetadata []*AccessKeyMetadata `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysOutput) GoString() string { + return s.String() +} + +type ListAccountAliasesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAccountAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAccountAliases request. +type ListAccountAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of aliases associated with the account. AWS supports only one alias + // per account. + AccountAliases []*string `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccountAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesOutput) GoString() string { + return s.String() +} + +type ListAttachedGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to list attached policies + // for. + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAttachedGroupPolicies request. +type ListAttachedGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesOutput) GoString() string { + return s.String() +} + +type ListAttachedRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the role to list attached policies for. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAttachedRolePolicies request. +type ListAttachedRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesOutput) GoString() string { + return s.String() +} + +type ListAttachedUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the user to list attached policies for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAttachedUserPolicies request. +type ListAttachedUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesOutput) GoString() string { + return s.String() +} + +type ListEntitiesForPolicyInput struct { + _ struct{} `type:"structure"` + + // The entity type to use for filtering the results. + // + // For example, when EntityFilter is Role, only the roles that are attached + // to the specified policy are returned. This parameter is optional. If it is + // not included, all attached entities (users, groups, and roles) are returned. + EntityFilter *string `type:"string" enum:"EntityType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all entities. + PathPrefix *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListEntitiesForPolicy request. +type ListEntitiesForPolicyOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of groups that the policy is attached to. + PolicyGroups []*PolicyGroup `type:"list"` + + // A list of roles that the policy is attached to. + PolicyRoles []*PolicyRole `type:"list"` + + // A list of users that the policy is attached to. + PolicyUsers []*PolicyUser `type:"list"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyOutput) GoString() string { + return s.String() +} + +type ListGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group to list policies for. + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListGroupPolicies request. +type ListGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesOutput) GoString() string { + return s.String() +} + +type ListGroupsForUserInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list groups for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListGroupsForUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListGroupsForUser request. +type ListGroupsForUserOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsForUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserOutput) GoString() string { + return s.String() +} + +type ListGroupsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ + // gets all groups whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all groups. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListGroups request. +type ListGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsOutput) GoString() string { + return s.String() +} + +type ListInstanceProfilesForRoleInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list instance profiles for. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListInstanceProfilesForRole request. +type ListInstanceProfilesForRoleOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleOutput) GoString() string { + return s.String() +} + +type ListInstanceProfilesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all instance profiles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all instance profiles. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListInstanceProfiles request. +type ListInstanceProfilesOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesOutput) GoString() string { + return s.String() +} + +type ListMFADevicesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user whose MFA devices you want to list. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListMFADevices request. +type ListMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // A list of MFA devices. + MFADevices []*MFADevice `type:"list" required:"true"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesOutput) GoString() string { + return s.String() +} + +type ListOpenIDConnectProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListOpenIDConnectProviders request. +type ListOpenIDConnectProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of IAM OpenID Connect providers in the AWS account. + OpenIDConnectProviderList []*OpenIDConnectProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersOutput) GoString() string { + return s.String() +} + +type ListPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A flag to filter the results to only the attached policies. + // + // When OnlyAttached is true, the returned list contains only the policies + // that are attached to a user, group, or role. When OnlyAttached is false, + // or when the parameter is not included, all policies are returned. + OnlyAttached *bool `type:"boolean"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` + + // The scope to use for filtering the results. + // + // To list only AWS managed policies, set Scope to AWS. To list only the customer + // managed policies in your AWS account, set Scope to Local. + // + // This parameter is optional. If it is not included, or if it is set to All, + // all policies are returned. + Scope *string `type:"string" enum:"policyScopeType"` +} + +// String returns the string representation +func (s ListPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListPolicies request. +type ListPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policies. + Policies []*Policy `type:"list"` +} + +// String returns the string representation +func (s ListPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesOutput) GoString() string { + return s.String() +} + +type ListPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListPolicyVersions request. +type ListPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy versions. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + Versions []*PolicyVersion `type:"list"` +} + +// String returns the string representation +func (s ListPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsOutput) GoString() string { + return s.String() +} + +type ListRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list policies for. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListRolePolicies request. +type ListRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesOutput) GoString() string { + return s.String() +} + +type ListRolesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all roles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all roles. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListRoles request. +type ListRolesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of roles. + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesOutput) GoString() string { + return s.String() +} + +type ListSAMLProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListSAMLProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSAMLProviders request. +type ListSAMLProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of SAML providers for this account. + SAMLProviderList []*SAMLProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListSAMLProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersOutput) GoString() string { + return s.String() +} + +type ListSSHPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user to list SSH public keys for. If none is specified, + // the UserName field is determined implicitly based on the AWS access key used + // to sign the request. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSSHPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSSHPublicKeys request. +type ListSSHPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of SSH public keys. + SSHPublicKeys []*SSHPublicKeyMetadata `type:"list"` +} + +// String returns the string representation +func (s ListSSHPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysOutput) GoString() string { + return s.String() +} + +type ListServerCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /company/servercerts + // would get all server certificates for which the path starts with /company/servercerts. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all server certificates. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServerCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListServerCertificates request. +type ListServerCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of server certificates. + ServerCertificateMetadataList []*ServerCertificateMetadata `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListServerCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesOutput) GoString() string { + return s.String() +} + +type ListSigningCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSigningCertificates request. +type ListSigningCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the user's signing certificate information. + Certificates []*SigningCertificate `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesOutput) GoString() string { + return s.String() +} + +type ListUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list policies for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListUserPolicies request. +type ListUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesOutput) GoString() string { + return s.String() +} + +type ListUsersInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, + // which would get all user names whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all user names. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListUsers request. +type ListUsersOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of users. + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersOutput) GoString() string { + return s.String() +} + +type ListVirtualMFADevicesInput struct { + _ struct{} `type:"structure"` + + // The status (unassigned or assigned) of the devices to list. If you do not + // specify an AssignmentStatus, the action defaults to Any which lists both + // assigned and unassigned virtual MFA devices. + AssignmentStatus *string `type:"string" enum:"assignmentStatusType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListVirtualMFADevices request. +type ListVirtualMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // The list of virtual MFA devices in the current account that match the AssignmentStatus + // value that was passed in the request. + VirtualMFADevices []*VirtualMFADevice `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesOutput) GoString() string { + return s.String() +} + +// Contains the user name and password create date for a user. +// +// This data type is used as a response element in the CreateLoginProfile +// and GetLoginProfile actions. +type LoginProfile struct { + _ struct{} `type:"structure"` + + // The date when the password for the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user, which can be used for signing in to the AWS Management + // Console. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoginProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoginProfile) GoString() string { + return s.String() +} + +// Contains information about an MFA device. +// +// This data type is used as a response element in the ListMFADevices action. +type MFADevice struct { + _ struct{} `type:"structure"` + + // The date when the MFA device was enabled for the user. + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The user with whom the MFA device is associated. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MFADevice) GoString() string { + return s.String() +} + +// Contains information about a managed policy, including the policy's ARN, +// versions, and the number of principal entities (users, groups, and roles) +// that the policy is attached to. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type ManagedPolicyDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of principal entities (users, groups, and roles) that the policy + // is attached to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default (operative) + // version. + // + // For more information about policy versions, see Versioning for Managed Policies + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the Using IAM guide. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // A list containing information about the versions of the policy. + PolicyVersionList []*PolicyVersion `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ManagedPolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPolicyDetail) GoString() string { + return s.String() +} + +// Contains the Amazon Resource Name (ARN) for an IAM OpenID Connect provider. +type OpenIDConnectProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s OpenIDConnectProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpenIDConnectProviderListEntry) GoString() string { + return s.String() +} + +// Contains information about the account password policy. +// +// This data type is used as a response element in the GetAccountPasswordPolicy +// action. +type PasswordPolicy struct { + _ struct{} `type:"structure"` + + // Specifies whether IAM users are allowed to change their own password. + AllowUsersToChangePassword *bool `type:"boolean"` + + // Indicates whether passwords in the account expire. Returns true if MaxPasswordAge + // is contains a value greater than 0. Returns false if MaxPasswordAge is 0 + // or not present. + ExpirePasswords *bool `type:"boolean"` + + // Specifies whether IAM users are prevented from setting a new password after + // their password has expired. + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // Minimum length to require for IAM user passwords. + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether to require lowercase characters for IAM user passwords. + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether to require numbers for IAM user passwords. + RequireNumbers *bool `type:"boolean"` + + // Specifies whether to require symbols for IAM user passwords. + RequireSymbols *bool `type:"boolean"` + + // Specifies whether to require uppercase characters for IAM user passwords. + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s PasswordPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PasswordPolicy) GoString() string { + return s.String() +} + +// Contains information about a managed policy. +// +// This data type is used as a response element in the CreatePolicy, GetPolicy, +// and ListPolicies actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type Policy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of entities (users, groups, and roles) that the policy is attached + // to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default version. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + // + // This element is included in the response to the GetPolicy operation. It + // is not included in the response to the ListPolicies operation. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policy) GoString() string { + return s.String() +} + +// Contains information about an IAM policy, including the policy document. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type PolicyDetail struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDetail) GoString() string { + return s.String() +} + +// Contains information about a group that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyGroup struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the group. + GroupName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyGroup) GoString() string { + return s.String() +} + +// Contains information about a role that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyRole struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the role. + RoleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyRole) GoString() string { + return s.String() +} + +// Contains information about a user that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyUser struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyUser) GoString() string { + return s.String() +} + +// Contains information about a version of a managed policy. +// +// This data type is used as a response element in the CreatePolicyVersion, +// GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails +// actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyVersion struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy version was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The policy document. + // + // The policy document is returned in the response to the GetPolicyVersion + // and GetAccountAuthorizationDetails operations. It is not returned in the + // response to the CreatePolicyVersion or ListPolicyVersions operations. + Document *string `min:"1" type:"string"` + + // Specifies whether the policy version is set as the policy's default version. + IsDefaultVersion *bool `type:"boolean"` + + // The identifier for the policy version. + // + // Policy version identifiers always begin with v (always lowercase). When + // a policy is created, the first policy version is v1. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s PolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyVersion) GoString() string { + return s.String() +} + +// Contains the row and column of a location of a Statement element in a policy +// document. +// +// This data type is used as a member of the Statement type. +type Position struct { + _ struct{} `type:"structure"` + + // The column in the line containing the specified position in the document. + Column *int64 `type:"integer"` + + // The line containing the specified position in the document. + Line *int64 `type:"integer"` +} + +// String returns the string representation +func (s Position) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Position) GoString() string { + return s.String() +} + +type PutGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group to associate the policy with. + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyInput) GoString() string { + return s.String() +} + +type PutGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyOutput) GoString() string { + return s.String() +} + +type PutRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role to associate the policy with. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyInput) GoString() string { + return s.String() +} + +type PutRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyOutput) GoString() string { + return s.String() +} + +type PutUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user to associate the policy with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyInput) GoString() string { + return s.String() +} + +type PutUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyOutput) GoString() string { + return s.String() +} + +type RemoveClientIDFromOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to remove from the IAM OpenID Connect + // provider. For more information about client IDs, see CreateOpenIDConnectProvider. + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to remove the client ID from. You can get a list of OIDC provider ARNs by + // using the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +type RemoveClientIDFromOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type RemoveRoleFromInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to remove. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileInput) GoString() string { + return s.String() +} + +type RemoveRoleFromInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) GoString() string { + return s.String() +} + +type RemoveUserFromGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to remove. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveUserFromGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupInput) GoString() string { + return s.String() +} + +type RemoveUserFromGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveUserFromGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupOutput) GoString() string { + return s.String() +} + +// Contains the result of the simulation of a single API action call on a single +// resource. +// +// This data type is used by a member of the EvaluationResult data type. +type ResourceSpecificResult struct { + _ struct{} `type:"structure"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. + EvalDecisionDetails map[string]*string `type:"map"` + + // The result of the simulation of the simulated API action on the resource + // specified in EvalResourceName. + EvalResourceDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // The name of the simulated resource, in Amazon Resource Name (ARN) format. + EvalResourceName *string `min:"1" type:"string" required:"true"` + + // A list of the statements in the input policies that determine the result + // for this part of the simulation. Remember that even if multiple statements + // allow the action on the resource, if any statement denies that action, then + // the explicit deny overrides any allow, and the deny statement is the only + // entry included in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. To discover the context + // keys used by a set of policies, you can call GetContextKeysForCustomPolicy + // or GetContextKeysForPrincipalPolicy. + MissingContextValues []*string `type:"list"` +} + +// String returns the string representation +func (s ResourceSpecificResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceSpecificResult) GoString() string { + return s.String() +} + +type ResyncMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // Serial number that uniquely identifies the MFA device. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to resynchronize. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResyncMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceInput) GoString() string { + return s.String() +} + +type ResyncMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResyncMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM role. +// +// This data type is used as a response element in the following actions: +// +// CreateRole +// +// GetRole +// +// ListRoles +type Role struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the role. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The policy that grants an entity permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Role) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Role) GoString() string { + return s.String() +} + +// Contains information about an IAM role, including all of the role's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type RoleDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The trust policy that grants permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // A list of managed policies attached to the role. These policies are the role's + // access (permissions) policies. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Contains a list of instance profiles. + InstanceProfileList []*InstanceProfile `type:"list"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string"` + + // A list of inline policies embedded in the role. These policies are the role's + // access (permissions) policies. + RolePolicyList []*PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s RoleDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleDetail) GoString() string { + return s.String() +} + +// Contains the list of SAML providers for this account. +type SAMLProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider. + Arn *string `min:"20" type:"string"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SAMLProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SAMLProviderListEntry) GoString() string { + return s.String() +} + +// Contains information about an SSH public key. +// +// This data type is used as a response element in the GetSSHPublicKey and +// UploadSSHPublicKey actions. +type SSHPublicKey struct { + _ struct{} `type:"structure"` + + // The MD5 message digest of the SSH public key. + Fingerprint *string `min:"48" type:"string" required:"true"` + + // The SSH public key. + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means the key can be used for authentication + // with an AWS CodeCommit repository. Inactive means the key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKey) GoString() string { + return s.String() +} + +// Contains information about an SSH public key, without the key's body or fingerprint. +// +// This data type is used as a response element in the ListSSHPublicKeys action. +type SSHPublicKeyMetadata struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means the key can be used for authentication + // with an AWS CodeCommit repository. Inactive means the key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKeyMetadata) GoString() string { + return s.String() +} + +// Contains information about a server certificate. +// +// This data type is used as a response element in the GetServerCertificate +// action. +type ServerCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the public key certificate chain. + CertificateChain *string `min:"1" type:"string"` + + // The meta information of the server certificate, such as its name, path, ID, + // and ARN. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ServerCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificate) GoString() string { + return s.String() +} + +// Contains information about a server certificate without its certificate body, +// certificate chain, and private key. +// +// This data type is used as a response element in the UploadServerCertificate +// and ListServerCertificates actions. +type ServerCertificateMetadata struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the server certificate. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date on which the certificate is set to expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the server certificate. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the server certificate. For more + // information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + ServerCertificateId *string `min:"16" type:"string" required:"true"` + + // The name that identifies the server certificate. + ServerCertificateName *string `min:"1" type:"string" required:"true"` + + // The date when the server certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ServerCertificateMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificateMetadata) GoString() string { + return s.String() +} + +type SetDefaultPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The version of the policy to set as the default (operative) version. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionInput) GoString() string { + return s.String() +} + +type SetDefaultPolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionOutput) GoString() string { + return s.String() +} + +// Contains information about an X.509 signing certificate. +// +// This data type is used as a response element in the UploadSigningCertificate +// and ListSigningCertificates actions. +type SigningCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The ID for the signing certificate. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status of the signing certificate. Active means the key is valid for + // API calls, while Inactive means it is not. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date when the signing certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the user the signing certificate is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SigningCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SigningCertificate) GoString() string { + return s.String() +} + +type SimulateCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API actions to evaluate in the simulation. Each action + // is evaluated against each resource. Each action must include the service + // identifier, such as iam:CreateUser. + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the user that you want to use as the simulated caller of the APIs. + // CallerArn is required if you include a ResourcePolicy so that the policy's + // Principal element has a value to use in evaluating the policy. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN + // of an assumed role, federated user, or a service principal. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated by a Condition element in one of the + // simulated IAM permission policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A list of policy documents to include in the simulation. Each document is + // specified as a string containing the complete, valid JSON text of an IAM + // policy. Do not include any resource-based policies in this parameter. Any + // resource-based policy must be submitted with the ResourcePolicy parameter. + // The policies cannot be "scope-down" policies, such as you could include in + // a call to GetFederationToken (http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetFederationToken.html) + // or one of the AssumeRole (http://docs.aws.amazon.com/IAM/latest/APIReference/API_AssumeRole.html) + // APIs to restrict what a user can do while using the temporary credentials. + PolicyInputList []*string `type:"list" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // If you include a ResourcePolicy, then it must be applicable to all of the + // resources included in the simulation or you receive an invalid input error. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different APIs that support resource-based + // policies require different combinations of resources. By specifying the type + // of simulation to run, you enable the policy simulator to enforce the presence + // of the required resources to ensure reliable simulation results. If your + // simulation does not match one of the following scenarios, then you can omit + // this parameter. The following list shows each of the supported scenario values + // and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the AWS EC2 User Guide. + // + // EC2-Classic-InstanceStore + // + // instance, image, security-group + // + // EC2-Classic-EBS + // + // instance, image, security-group, volume + // + // EC2-VPC-InstanceStore + // + // instance, image, security-group, network-interface + // + // EC2-VPC-InstanceStore-Subnet + // + // instance, image, security-group, network-interface, subnet + // + // EC2-VPC-EBS + // + // instance, image, security-group, network-interface, volume + // + // EC2-VPC-EBS-Subnet + // + // instance, image, security-group, network-interface, subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN, such as an S3 bucket or + // object. If ResourceOwner is specified, it is also used as the account owner + // of any ResourcePolicy included in the simulation. If the ResourceOwner parameter + // is not specified, then the owner of the resources and the resource policy + // defaults to the account of the identity provided in CallerArn. This parameter + // is required only if you specify a resource-based policy and account that + // owns the resource is different from the account that owns the simulated calling + // user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulateCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulateCustomPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy +// request. +type SimulatePolicyResponse struct { + _ struct{} `type:"structure"` + + // The results of the simulation. + EvaluationResults []*EvaluationResult `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePolicyResponse) GoString() string { + return s.String() +} + +type SimulatePrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API actions to evaluate in the simulation. Each action + // is evaluated for each resource. Each action must include the service identifier, + // such as iam:CreateUser. + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the user that you want to specify as the simulated caller of the + // APIs. If you do not specify a CallerArn, it defaults to the ARN of the user + // that you specify in PolicySourceArn, if you specified a user. If you include + // both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) + // and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result + // is that you simulate calling the APIs as Bob, as if Bob had David's policies. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN + // of an assumed role, federated user, or a service principal. + // + // CallerArn is required if you include a ResourcePolicy and the PolicySourceArn + // is not the ARN for an IAM user. This is required so that the resource-based + // policy's Principal element has a value to use in evaluating the policy. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated by a Condition element in one of the + // simulated policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // An optional list of additional policy documents to include in the simulation. + // Each document is specified as a string containing the complete, valid JSON + // text of an IAM policy. + PolicyInputList []*string `type:"list"` + + // The Amazon Resource Name (ARN) of a user, group, or role whose policies you + // want to include in the simulation. If you specify a user, group, or role, + // the simulation includes all policies that are associated with that entity. + // If you specify a user, the simulation also includes all policies that are + // attached to any groups the user belongs to. + PolicySourceArn *string `min:"20" type:"string" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different APIs that support resource-based + // policies require different combinations of resources. By specifying the type + // of simulation to run, you enable the policy simulator to enforce the presence + // of the required resources to ensure reliable simulation results. If your + // simulation does not match one of the following scenarios, then you can omit + // this parameter. The following list shows each of the supported scenario values + // and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the AWS EC2 User Guide. + // + // EC2-Classic-InstanceStore + // + // instance, image, security-group + // + // EC2-Classic-EBS + // + // instance, image, security-group, volume + // + // EC2-VPC-InstanceStore + // + // instance, image, security-group, network-interface + // + // EC2-VPC-InstanceStore-Subnet + // + // instance, image, security-group, network-interface, subnet + // + // EC2-VPC-EBS + // + // instance, image, security-group, network-interface, volume + // + // EC2-VPC-EBS-Subnet + // + // instance, image, security-group, network-interface, subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN, such as an S3 bucket or + // object. If ResourceOwner is specified, it is also used as the account owner + // of any ResourcePolicy included in the simulation. If the ResourceOwner parameter + // is not specified, then the owner of the resources and the resource policy + // defaults to the account of the identity provided in CallerArn. This parameter + // is required only if you specify a resource-based policy and account that + // owns the resource is different from the account that owns the simulated calling + // user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePrincipalPolicyInput) GoString() string { + return s.String() +} + +// Contains a reference to a Statement element in a policy document that determines +// the result of the simulation. +// +// This data type is used by the MatchedStatements member of the EvaluationResult +// type. +type Statement struct { + _ struct{} `type:"structure"` + + // The row and column of the end of a Statement in an IAM policy. + EndPosition *Position `type:"structure"` + + // The identifier of the policy that was provided as an input. + SourcePolicyId *string `type:"string"` + + // The type of the policy. + SourcePolicyType *string `type:"string" enum:"PolicySourceType"` + + // The row and column of the beginning of the Statement in an IAM policy. + StartPosition *Position `type:"structure"` +} + +// String returns the string representation +func (s Statement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Statement) GoString() string { + return s.String() +} + +type UpdateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID of the secret access key you want to update. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The status you want to assign to the secret access key. Active means the + // key can be used for API calls to AWS, while Inactive means the key cannot + // be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the user whose key you want to update. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyInput) GoString() string { + return s.String() +} + +type UpdateAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyOutput) GoString() string { + return s.String() +} + +type UpdateAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` + + // Allows all IAM users in your account to use the AWS Management Console to + // change their own passwords. For more information, see Letting IAM Users Change + // Their Own Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/HowToPwdIAMUser.html) + // in the IAM User Guide. + // + // Default value: false + AllowUsersToChangePassword *bool `type:"boolean"` + + // Prevents IAM users from setting a new password after their password has expired. + // + // Default value: false + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. The default value + // of 0 means IAM user passwords never expire. + // + // Default value: 0 + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // The minimum number of characters allowed in an IAM user password. + // + // Default value: 6 + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. The default value of 0 means IAM users are not prevented from reusing + // previous passwords. + // + // Default value: 0 + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether IAM user passwords must contain at least one lowercase + // character from the ISO basic Latin alphabet (a to z). + // + // Default value: false + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one numeric character + // (0 to 9). + // + // Default value: false + RequireNumbers *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one of the following + // non-alphanumeric characters: + // + // ! @ # $ % ^ & * ( ) _ + - = [ ] { } | ' + // + // Default value: false + RequireSymbols *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one uppercase + // character from the ISO basic Latin alphabet (A to Z). + // + // Default value: false + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +type UpdateAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type UpdateAssumeRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy that grants an entity permission to assume the role. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the role to update. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyInput) GoString() string { + return s.String() +} + +type UpdateAssumeRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyOutput) GoString() string { + return s.String() +} + +type UpdateGroupInput struct { + _ struct{} `type:"structure"` + + // Name of the group to update. If you're changing the name of the group, this + // is the original name. + GroupName *string `min:"1" type:"string" required:"true"` + + // New name for the group. Only include this if changing the group's name. + NewGroupName *string `min:"1" type:"string"` + + // New path for the group. Only include this if changing the group's path. + NewPath *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupInput) GoString() string { + return s.String() +} + +type UpdateGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupOutput) GoString() string { + return s.String() +} + +type UpdateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the specified user. + Password *string `min:"1" type:"string"` + + // Require the specified user to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user whose password you want to update. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileInput) GoString() string { + return s.String() +} + +type UpdateLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileOutput) GoString() string { + return s.String() +} + +type UpdateOpenIDConnectProviderThumbprintInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to update the thumbprint for. You can get a list of OIDC provider ARNs by + // using the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) GoString() string { + return s.String() +} + +type UpdateOpenIDConnectProviderThumbprintOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) GoString() string { + return s.String() +} + +type UpdateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the SAML provider to update. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UpdateSAMLProvider request. +type UpdateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider that was updated. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderOutput) GoString() string { + return s.String() +} + +type UpdateSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status to assign to the SSH public key. Active means the key can be used + // for authentication with an AWS CodeCommit repository. Inactive means the + // key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyInput) GoString() string { + return s.String() +} + +type UpdateSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type UpdateServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The new path for the server certificate. Include this only if you are updating + // the server certificate's path. + NewPath *string `min:"1" type:"string"` + + // The new name for the server certificate. Include this only if you are updating + // the server certificate's name. The name of the certificate cannot contain + // any spaces. + NewServerCertificateName *string `min:"1" type:"string"` + + // The name of the server certificate that you want to update. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateInput) GoString() string { + return s.String() +} + +type UpdateServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateOutput) GoString() string { + return s.String() +} + +type UpdateSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate you want to update. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status you want to assign to the certificate. Active means the certificate + // can be used for API calls to AWS, while Inactive means the certificate cannot + // be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the user the signing certificate belongs to. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateInput) GoString() string { + return s.String() +} + +type UpdateSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateOutput) GoString() string { + return s.String() +} + +type UpdateUserInput struct { + _ struct{} `type:"structure"` + + // New path for the user. Include this parameter only if you're changing the + // user's path. + NewPath *string `min:"1" type:"string"` + + // New name for the user. Include this parameter only if you're changing the + // user's name. + NewUserName *string `min:"1" type:"string"` + + // Name of the user to update. If you're changing the name of the user, this + // is the original user name. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserInput) GoString() string { + return s.String() +} + +type UpdateUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserOutput) GoString() string { + return s.String() +} + +type UploadSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The SSH public key. The public key must be encoded in ssh-rsa format or PEM + // format. + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The name of the IAM user to associate the SSH public key with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UploadSSHPublicKey request. +type UploadSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type UploadServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate in PEM-encoded format. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the certificate chain. This is typically a concatenation + // of the PEM-encoded public key certificates of the chain. + CertificateChain *string `min:"1" type:"string"` + + // The path for the server certificate. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // If you are uploading a server certificate specifically for use with Amazon + // CloudFront distributions, you must specify a path using the --path option. + // The path must begin with /cloudfront and must include a trailing slash (for + // example, /cloudfront/test/). + Path *string `min:"1" type:"string"` + + // The contents of the private key in PEM-encoded format. + PrivateKey *string `min:"1" type:"string" required:"true"` + + // The name for the server certificate. Do not include the path in this value. + // The name of the certificate cannot contain any spaces. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UploadServerCertificate request. +type UploadServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // The meta information of the uploaded server certificate without its certificate + // body, certificate chain, and private key. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure"` +} + +// String returns the string representation +func (s UploadServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateOutput) GoString() string { + return s.String() +} + +type UploadSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The name of the user the signing certificate is for. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UploadSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UploadSigningCertificate request. +type UploadSigningCertificateOutput struct { + _ struct{} `type:"structure"` + + // Information about the certificate. + Certificate *SigningCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UploadSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM user entity. +// +// This data type is used as a response element in the following actions: +// +// CreateUser +// +// GetUser +// +// ListUsers +type User struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the user. For more information + // about ARNs and how to use ARNs in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user's password was last used to sign in to an AWS website. For + // a list of AWS websites that capture a user's last sign-in time, see the Credential + // Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) + // topic in the Using IAM guide. If a password is used more than once in a five-minute + // span, only the first use is returned in this field. This field is null (not + // present) when: + // + // The user does not have a password + // + // The password exists but has never been used (at least not since IAM started + // tracking this information on October 20th, 2014 + // + // there is no sign-in data associated with the user + // + // This value is returned only in the GetUser and ListUsers actions. + PasswordLastUsed *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string" required:"true"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s User) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s User) GoString() string { + return s.String() +} + +// Contains information about an IAM user, including all the user's policies +// and all the IAM groups the user is in. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type UserDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the user. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of IAM groups that the user is in. + GroupList []*string `type:"list"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the user. + UserPolicyList []*PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s UserDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserDetail) GoString() string { + return s.String() +} + +// Contains information about a virtual MFA device. +type VirtualMFADevice struct { + _ struct{} `type:"structure"` + + // The Base32 seed defined as specified in RFC3548 (http://www.ietf.org/rfc/rfc3548.txt). + // The Base32StringSeed is Base64-encoded. + Base32StringSeed []byte `type:"blob"` + + // The date and time on which the virtual MFA device was enabled. + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A QR code PNG image that encodes otpauth://totp/$virtualMFADeviceName@$AccountName?secret=$Base32String + // where $virtualMFADeviceName is one of the create call arguments, AccountName + // is the user name if set (otherwise, the account ID otherwise), and Base32String + // is the seed in Base32 format. The Base32String value is Base64-encoded. + QRCodePNG []byte `type:"blob"` + + // The serial number associated with VirtualMFADevice. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // Contains information about an IAM user entity. + // + // This data type is used as a response element in the following actions: + // + // CreateUser + // + // GetUser + // + // ListUsers + User *User `type:"structure"` +} + +// String returns the string representation +func (s VirtualMFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualMFADevice) GoString() string { + return s.String() +} + +const ( + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumString = "string" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumStringList = "stringList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumNumeric = "numeric" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumNumericList = "numericList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBoolean = "boolean" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBooleanList = "booleanList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumIp = "ip" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumIpList = "ipList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBinary = "binary" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBinaryList = "binaryList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumDate = "date" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumDateList = "dateList" +) + +const ( + // @enum EntityType + EntityTypeUser = "User" + // @enum EntityType + EntityTypeRole = "Role" + // @enum EntityType + EntityTypeGroup = "Group" + // @enum EntityType + EntityTypeLocalManagedPolicy = "LocalManagedPolicy" + // @enum EntityType + EntityTypeAwsmanagedPolicy = "AWSManagedPolicy" +) + +const ( + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeAllowed = "allowed" + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeExplicitDeny = "explicitDeny" + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeImplicitDeny = "implicitDeny" +) + +const ( + // @enum PolicySourceType + PolicySourceTypeUser = "user" + // @enum PolicySourceType + PolicySourceTypeGroup = "group" + // @enum PolicySourceType + PolicySourceTypeRole = "role" + // @enum PolicySourceType + PolicySourceTypeAwsManaged = "aws-managed" + // @enum PolicySourceType + PolicySourceTypeUserManaged = "user-managed" + // @enum PolicySourceType + PolicySourceTypeResource = "resource" + // @enum PolicySourceType + PolicySourceTypeNone = "none" +) + +const ( + // @enum ReportFormatType + ReportFormatTypeTextCsv = "text/csv" +) + +const ( + // @enum ReportStateType + ReportStateTypeStarted = "STARTED" + // @enum ReportStateType + ReportStateTypeInprogress = "INPROGRESS" + // @enum ReportStateType + ReportStateTypeComplete = "COMPLETE" +) + +const ( + // @enum assignmentStatusType + AssignmentStatusTypeAssigned = "Assigned" + // @enum assignmentStatusType + AssignmentStatusTypeUnassigned = "Unassigned" + // @enum assignmentStatusType + AssignmentStatusTypeAny = "Any" +) + +const ( + // @enum encodingType + EncodingTypeSsh = "SSH" + // @enum encodingType + EncodingTypePem = "PEM" +) + +const ( + // @enum policyScopeType + PolicyScopeTypeAll = "All" + // @enum policyScopeType + PolicyScopeTypeAws = "AWS" + // @enum policyScopeType + PolicyScopeTypeLocal = "Local" +) + +const ( + // @enum statusType + StatusTypeActive = "Active" + // @enum statusType + StatusTypeInactive = "Inactive" +) + +const ( + // @enum summaryKeyType + SummaryKeyTypeUsers = "Users" + // @enum summaryKeyType + SummaryKeyTypeUsersQuota = "UsersQuota" + // @enum summaryKeyType + SummaryKeyTypeGroups = "Groups" + // @enum summaryKeyType + SummaryKeyTypeGroupsQuota = "GroupsQuota" + // @enum summaryKeyType + SummaryKeyTypeServerCertificates = "ServerCertificates" + // @enum summaryKeyType + SummaryKeyTypeServerCertificatesQuota = "ServerCertificatesQuota" + // @enum summaryKeyType + SummaryKeyTypeUserPolicySizeQuota = "UserPolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypeGroupPolicySizeQuota = "GroupPolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypeGroupsPerUserQuota = "GroupsPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeSigningCertificatesPerUserQuota = "SigningCertificatesPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeAccessKeysPerUserQuota = "AccessKeysPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeMfadevices = "MFADevices" + // @enum summaryKeyType + SummaryKeyTypeMfadevicesInUse = "MFADevicesInUse" + // @enum summaryKeyType + SummaryKeyTypeAccountMfaenabled = "AccountMFAEnabled" + // @enum summaryKeyType + SummaryKeyTypeAccountAccessKeysPresent = "AccountAccessKeysPresent" + // @enum summaryKeyType + SummaryKeyTypeAccountSigningCertificatesPresent = "AccountSigningCertificatesPresent" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerGroupQuota = "AttachedPoliciesPerGroupQuota" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerRoleQuota = "AttachedPoliciesPerRoleQuota" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerUserQuota = "AttachedPoliciesPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypePolicies = "Policies" + // @enum summaryKeyType + SummaryKeyTypePoliciesQuota = "PoliciesQuota" + // @enum summaryKeyType + SummaryKeyTypePolicySizeQuota = "PolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypePolicyVersionsInUse = "PolicyVersionsInUse" + // @enum summaryKeyType + SummaryKeyTypePolicyVersionsInUseQuota = "PolicyVersionsInUseQuota" + // @enum summaryKeyType + SummaryKeyTypeVersionsPerPolicyQuota = "VersionsPerPolicyQuota" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go new file mode 100644 index 000000000..1aebec8cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go @@ -0,0 +1,2366 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleIAM_AddClientIDToOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.AddClientIDToOpenIDConnectProviderInput{ + ClientID: aws.String("clientIDType"), // Required + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.AddClientIDToOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AddRoleToInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.AddRoleToInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.AddRoleToInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AddUserToGroup() { + svc := iam.New(session.New()) + + params := &iam.AddUserToGroupInput{ + GroupName: aws.String("groupNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.AddUserToGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.AttachGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachRolePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.AttachRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachUserPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.AttachUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ChangePassword() { + svc := iam.New(session.New()) + + params := &iam.ChangePasswordInput{ + NewPassword: aws.String("passwordType"), // Required + OldPassword: aws.String("passwordType"), // Required + } + resp, err := svc.ChangePassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateAccessKey() { + svc := iam.New(session.New()) + + params := &iam.CreateAccessKeyInput{ + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.CreateAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateAccountAlias() { + svc := iam.New(session.New()) + + params := &iam.CreateAccountAliasInput{ + AccountAlias: aws.String("accountAliasType"), // Required + } + resp, err := svc.CreateAccountAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateGroup() { + svc := iam.New(session.New()) + + params := &iam.CreateGroupInput{ + GroupName: aws.String("groupNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.CreateInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.CreateLoginProfileInput{ + Password: aws.String("passwordType"), // Required + UserName: aws.String("userNameType"), // Required + PasswordResetRequired: aws.Bool(true), + } + resp, err := svc.CreateLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.CreateOpenIDConnectProviderInput{ + ThumbprintList: []*string{ // Required + aws.String("thumbprintType"), // Required + // More values... + }, + Url: aws.String("OpenIDConnectProviderUrlType"), // Required + ClientIDList: []*string{ + aws.String("clientIDType"), // Required + // More values... + }, + } + resp, err := svc.CreateOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreatePolicy() { + svc := iam.New(session.New()) + + params := &iam.CreatePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + Description: aws.String("policyDescriptionType"), + Path: aws.String("policyPathType"), + } + resp, err := svc.CreatePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreatePolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.CreatePolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + PolicyDocument: aws.String("policyDocumentType"), // Required + SetAsDefault: aws.Bool(true), + } + resp, err := svc.CreatePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateRole() { + svc := iam.New(session.New()) + + params := &iam.CreateRoleInput{ + AssumeRolePolicyDocument: aws.String("policyDocumentType"), // Required + RoleName: aws.String("roleNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.CreateSAMLProviderInput{ + Name: aws.String("SAMLProviderNameType"), // Required + SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required + } + resp, err := svc.CreateSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateUser() { + svc := iam.New(session.New()) + + params := &iam.CreateUserInput{ + UserName: aws.String("userNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateVirtualMFADevice() { + svc := iam.New(session.New()) + + params := &iam.CreateVirtualMFADeviceInput{ + VirtualMFADeviceName: aws.String("virtualMFADeviceName"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateVirtualMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeactivateMFADevice() { + svc := iam.New(session.New()) + + params := &iam.DeactivateMFADeviceInput{ + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeactivateMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccessKey() { + svc := iam.New(session.New()) + + params := &iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.DeleteAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccountAlias() { + svc := iam.New(session.New()) + + params := &iam.DeleteAccountAliasInput{ + AccountAlias: aws.String("accountAliasType"), // Required + } + resp, err := svc.DeleteAccountAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccountPasswordPolicy() { + svc := iam.New(session.New()) + + var params *iam.DeleteAccountPasswordPolicyInput + resp, err := svc.DeleteAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteGroup() { + svc := iam.New(session.New()) + + params := &iam.DeleteGroupInput{ + GroupName: aws.String("groupNameType"), // Required + } + resp, err := svc.DeleteGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.DeleteGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.DeleteInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + } + resp, err := svc.DeleteInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.DeleteLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DeleteLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.DeleteOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.DeleteOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeletePolicy() { + svc := iam.New(session.New()) + + params := &iam.DeletePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeletePolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.DeletePolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.DeletePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteRole() { + svc := iam.New(session.New()) + + params := &iam.DeleteRoleInput{ + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DeleteRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteRolePolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DeleteRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.DeleteSAMLProviderInput{ + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.DeleteSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.DeleteSSHPublicKeyInput{ + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DeleteSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.DeleteServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + } + resp, err := svc.DeleteServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.DeleteSigningCertificateInput{ + CertificateId: aws.String("certificateIdType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.DeleteSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteUser() { + svc := iam.New(session.New()) + + params := &iam.DeleteUserInput{ + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeleteUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteUserPolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeleteUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteVirtualMFADevice() { + svc := iam.New(session.New()) + + params := &iam.DeleteVirtualMFADeviceInput{ + SerialNumber: aws.String("serialNumberType"), // Required + } + resp, err := svc.DeleteVirtualMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.DetachGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachRolePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DetachRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachUserPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DetachUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_EnableMFADevice() { + svc := iam.New(session.New()) + + params := &iam.EnableMFADeviceInput{ + AuthenticationCode1: aws.String("authenticationCodeType"), // Required + AuthenticationCode2: aws.String("authenticationCodeType"), // Required + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.EnableMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GenerateCredentialReport() { + svc := iam.New(session.New()) + + var params *iam.GenerateCredentialReportInput + resp, err := svc.GenerateCredentialReport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccessKeyLastUsed() { + svc := iam.New(session.New()) + + params := &iam.GetAccessKeyLastUsedInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + } + resp, err := svc.GetAccessKeyLastUsed(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountAuthorizationDetails() { + svc := iam.New(session.New()) + + params := &iam.GetAccountAuthorizationDetailsInput{ + Filter: []*string{ + aws.String("EntityType"), // Required + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.GetAccountAuthorizationDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountPasswordPolicy() { + svc := iam.New(session.New()) + + var params *iam.GetAccountPasswordPolicyInput + resp, err := svc.GetAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountSummary() { + svc := iam.New(session.New()) + + var params *iam.GetAccountSummaryInput + resp, err := svc.GetAccountSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetContextKeysForCustomPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetContextKeysForCustomPolicyInput{ + PolicyInputList: []*string{ // Required + aws.String("policyDocumentType"), // Required + // More values... + }, + } + resp, err := svc.GetContextKeysForCustomPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetContextKeysForPrincipalPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetContextKeysForPrincipalPolicyInput{ + PolicySourceArn: aws.String("arnType"), // Required + PolicyInputList: []*string{ + aws.String("policyDocumentType"), // Required + // More values... + }, + } + resp, err := svc.GetContextKeysForPrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetCredentialReport() { + svc := iam.New(session.New()) + + var params *iam.GetCredentialReportInput + resp, err := svc.GetCredentialReport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetGroup() { + svc := iam.New(session.New()) + + params := &iam.GetGroupInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.GetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.GetGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.GetInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + } + resp, err := svc.GetInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.GetLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.GetLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.GetOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.GetOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetPolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.GetPolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.GetPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetRole() { + svc := iam.New(session.New()) + + params := &iam.GetRoleInput{ + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.GetRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.GetRolePolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.GetRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.GetSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.GetSSHPublicKeyInput{ + Encoding: aws.String("encodingType"), // Required + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.GetSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.GetServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + } + resp, err := svc.GetServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetUser() { + svc := iam.New(session.New()) + + params := &iam.GetUserInput{ + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.GetUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetUserPolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.GetUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAccessKeys() { + svc := iam.New(session.New()) + + params := &iam.ListAccessKeysInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListAccessKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAccountAliases() { + svc := iam.New(session.New()) + + params := &iam.ListAccountAliasesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListAccountAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedGroupPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedGroupPoliciesInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedGroupPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedRolePolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedRolePoliciesInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedRolePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedUserPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedUserPoliciesInput{ + UserName: aws.String("userNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedUserPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListEntitiesForPolicy() { + svc := iam.New(session.New()) + + params := &iam.ListEntitiesForPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + EntityFilter: aws.String("EntityType"), + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathType"), + } + resp, err := svc.ListEntitiesForPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroupPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListGroupPoliciesInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListGroupPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroups() { + svc := iam.New(session.New()) + + params := &iam.ListGroupsInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroupsForUser() { + svc := iam.New(session.New()) + + params := &iam.ListGroupsForUserInput{ + UserName: aws.String("existingUserNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListGroupsForUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListInstanceProfiles() { + svc := iam.New(session.New()) + + params := &iam.ListInstanceProfilesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListInstanceProfiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListInstanceProfilesForRole() { + svc := iam.New(session.New()) + + params := &iam.ListInstanceProfilesForRoleInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListInstanceProfilesForRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListMFADevices() { + svc := iam.New(session.New()) + + params := &iam.ListMFADevicesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListMFADevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListOpenIDConnectProviders() { + svc := iam.New(session.New()) + + var params *iam.ListOpenIDConnectProvidersInput + resp, err := svc.ListOpenIDConnectProviders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListPoliciesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + OnlyAttached: aws.Bool(true), + PathPrefix: aws.String("policyPathType"), + Scope: aws.String("policyScopeType"), + } + resp, err := svc.ListPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListPolicyVersions() { + svc := iam.New(session.New()) + + params := &iam.ListPolicyVersionsInput{ + PolicyArn: aws.String("arnType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListRolePolicies() { + svc := iam.New(session.New()) + + params := &iam.ListRolePoliciesInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListRolePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListRoles() { + svc := iam.New(session.New()) + + params := &iam.ListRolesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSAMLProviders() { + svc := iam.New(session.New()) + + var params *iam.ListSAMLProvidersInput + resp, err := svc.ListSAMLProviders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSSHPublicKeys() { + svc := iam.New(session.New()) + + params := &iam.ListSSHPublicKeysInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("userNameType"), + } + resp, err := svc.ListSSHPublicKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListServerCertificates() { + svc := iam.New(session.New()) + + params := &iam.ListServerCertificatesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListServerCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSigningCertificates() { + svc := iam.New(session.New()) + + params := &iam.ListSigningCertificatesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListSigningCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListUserPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListUserPoliciesInput{ + UserName: aws.String("existingUserNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListUserPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListUsers() { + svc := iam.New(session.New()) + + params := &iam.ListUsersInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListUsers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListVirtualMFADevices() { + svc := iam.New(session.New()) + + params := &iam.ListVirtualMFADevicesInput{ + AssignmentStatus: aws.String("assignmentStatusType"), + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListVirtualMFADevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.PutGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.PutGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.PutRolePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.PutRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.PutUserPolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.PutUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveClientIDFromOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.RemoveClientIDFromOpenIDConnectProviderInput{ + ClientID: aws.String("clientIDType"), // Required + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.RemoveClientIDFromOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveRoleFromInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.RemoveRoleFromInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.RemoveRoleFromInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveUserFromGroup() { + svc := iam.New(session.New()) + + params := &iam.RemoveUserFromGroupInput{ + GroupName: aws.String("groupNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.RemoveUserFromGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ResyncMFADevice() { + svc := iam.New(session.New()) + + params := &iam.ResyncMFADeviceInput{ + AuthenticationCode1: aws.String("authenticationCodeType"), // Required + AuthenticationCode2: aws.String("authenticationCodeType"), // Required + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.ResyncMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SetDefaultPolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.SetDefaultPolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.SetDefaultPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SimulateCustomPolicy() { + svc := iam.New(session.New()) + + params := &iam.SimulateCustomPolicyInput{ + ActionNames: []*string{ // Required + aws.String("ActionNameType"), // Required + // More values... + }, + PolicyInputList: []*string{ // Required + aws.String("policyDocumentType"), // Required + // More values... + }, + CallerArn: aws.String("ResourceNameType"), + ContextEntries: []*iam.ContextEntry{ + { // Required + ContextKeyName: aws.String("ContextKeyNameType"), + ContextKeyType: aws.String("ContextKeyTypeEnum"), + ContextKeyValues: []*string{ + aws.String("ContextKeyValueType"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + ResourceArns: []*string{ + aws.String("ResourceNameType"), // Required + // More values... + }, + ResourceHandlingOption: aws.String("ResourceHandlingOptionType"), + ResourceOwner: aws.String("ResourceNameType"), + ResourcePolicy: aws.String("policyDocumentType"), + } + resp, err := svc.SimulateCustomPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SimulatePrincipalPolicy() { + svc := iam.New(session.New()) + + params := &iam.SimulatePrincipalPolicyInput{ + ActionNames: []*string{ // Required + aws.String("ActionNameType"), // Required + // More values... + }, + PolicySourceArn: aws.String("arnType"), // Required + CallerArn: aws.String("ResourceNameType"), + ContextEntries: []*iam.ContextEntry{ + { // Required + ContextKeyName: aws.String("ContextKeyNameType"), + ContextKeyType: aws.String("ContextKeyTypeEnum"), + ContextKeyValues: []*string{ + aws.String("ContextKeyValueType"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PolicyInputList: []*string{ + aws.String("policyDocumentType"), // Required + // More values... + }, + ResourceArns: []*string{ + aws.String("ResourceNameType"), // Required + // More values... + }, + ResourceHandlingOption: aws.String("ResourceHandlingOptionType"), + ResourceOwner: aws.String("ResourceNameType"), + ResourcePolicy: aws.String("policyDocumentType"), + } + resp, err := svc.SimulatePrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAccessKey() { + svc := iam.New(session.New()) + + params := &iam.UpdateAccessKeyInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UpdateAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAccountPasswordPolicy() { + svc := iam.New(session.New()) + + params := &iam.UpdateAccountPasswordPolicyInput{ + AllowUsersToChangePassword: aws.Bool(true), + HardExpiry: aws.Bool(true), + MaxPasswordAge: aws.Int64(1), + MinimumPasswordLength: aws.Int64(1), + PasswordReusePrevention: aws.Int64(1), + RequireLowercaseCharacters: aws.Bool(true), + RequireNumbers: aws.Bool(true), + RequireSymbols: aws.Bool(true), + RequireUppercaseCharacters: aws.Bool(true), + } + resp, err := svc.UpdateAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAssumeRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.UpdateAssumeRolePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.UpdateAssumeRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateGroup() { + svc := iam.New(session.New()) + + params := &iam.UpdateGroupInput{ + GroupName: aws.String("groupNameType"), // Required + NewGroupName: aws.String("groupNameType"), + NewPath: aws.String("pathType"), + } + resp, err := svc.UpdateGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.UpdateLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + Password: aws.String("passwordType"), + PasswordResetRequired: aws.Bool(true), + } + resp, err := svc.UpdateLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateOpenIDConnectProviderThumbprint() { + svc := iam.New(session.New()) + + params := &iam.UpdateOpenIDConnectProviderThumbprintInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + ThumbprintList: []*string{ // Required + aws.String("thumbprintType"), // Required + // More values... + }, + } + resp, err := svc.UpdateOpenIDConnectProviderThumbprint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.UpdateSAMLProviderInput{ + SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.UpdateSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.UpdateSSHPublicKeyInput{ + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.UpdateSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.UpdateServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + NewPath: aws.String("pathType"), + NewServerCertificateName: aws.String("serverCertificateNameType"), + } + resp, err := svc.UpdateServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.UpdateSigningCertificateInput{ + CertificateId: aws.String("certificateIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UpdateSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateUser() { + svc := iam.New(session.New()) + + params := &iam.UpdateUserInput{ + UserName: aws.String("existingUserNameType"), // Required + NewPath: aws.String("pathType"), + NewUserName: aws.String("userNameType"), + } + resp, err := svc.UpdateUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.UploadSSHPublicKeyInput{ + SSHPublicKeyBody: aws.String("publicKeyMaterialType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.UploadSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.UploadServerCertificateInput{ + CertificateBody: aws.String("certificateBodyType"), // Required + PrivateKey: aws.String("privateKeyType"), // Required + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + CertificateChain: aws.String("certificateChainType"), + Path: aws.String("pathType"), + } + resp, err := svc.UploadServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.UploadSigningCertificateInput{ + CertificateBody: aws.String("certificateBodyType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UploadSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go new file mode 100644 index 000000000..3df15410a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go @@ -0,0 +1,510 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iamiface provides an interface for the AWS Identity and Access Management. +package iamiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iam" +) + +// IAMAPI is the interface type for iam.IAM. +type IAMAPI interface { + AddClientIDToOpenIDConnectProviderRequest(*iam.AddClientIDToOpenIDConnectProviderInput) (*request.Request, *iam.AddClientIDToOpenIDConnectProviderOutput) + + AddClientIDToOpenIDConnectProvider(*iam.AddClientIDToOpenIDConnectProviderInput) (*iam.AddClientIDToOpenIDConnectProviderOutput, error) + + AddRoleToInstanceProfileRequest(*iam.AddRoleToInstanceProfileInput) (*request.Request, *iam.AddRoleToInstanceProfileOutput) + + AddRoleToInstanceProfile(*iam.AddRoleToInstanceProfileInput) (*iam.AddRoleToInstanceProfileOutput, error) + + AddUserToGroupRequest(*iam.AddUserToGroupInput) (*request.Request, *iam.AddUserToGroupOutput) + + AddUserToGroup(*iam.AddUserToGroupInput) (*iam.AddUserToGroupOutput, error) + + AttachGroupPolicyRequest(*iam.AttachGroupPolicyInput) (*request.Request, *iam.AttachGroupPolicyOutput) + + AttachGroupPolicy(*iam.AttachGroupPolicyInput) (*iam.AttachGroupPolicyOutput, error) + + AttachRolePolicyRequest(*iam.AttachRolePolicyInput) (*request.Request, *iam.AttachRolePolicyOutput) + + AttachRolePolicy(*iam.AttachRolePolicyInput) (*iam.AttachRolePolicyOutput, error) + + AttachUserPolicyRequest(*iam.AttachUserPolicyInput) (*request.Request, *iam.AttachUserPolicyOutput) + + AttachUserPolicy(*iam.AttachUserPolicyInput) (*iam.AttachUserPolicyOutput, error) + + ChangePasswordRequest(*iam.ChangePasswordInput) (*request.Request, *iam.ChangePasswordOutput) + + ChangePassword(*iam.ChangePasswordInput) (*iam.ChangePasswordOutput, error) + + CreateAccessKeyRequest(*iam.CreateAccessKeyInput) (*request.Request, *iam.CreateAccessKeyOutput) + + CreateAccessKey(*iam.CreateAccessKeyInput) (*iam.CreateAccessKeyOutput, error) + + CreateAccountAliasRequest(*iam.CreateAccountAliasInput) (*request.Request, *iam.CreateAccountAliasOutput) + + CreateAccountAlias(*iam.CreateAccountAliasInput) (*iam.CreateAccountAliasOutput, error) + + CreateGroupRequest(*iam.CreateGroupInput) (*request.Request, *iam.CreateGroupOutput) + + CreateGroup(*iam.CreateGroupInput) (*iam.CreateGroupOutput, error) + + CreateInstanceProfileRequest(*iam.CreateInstanceProfileInput) (*request.Request, *iam.CreateInstanceProfileOutput) + + CreateInstanceProfile(*iam.CreateInstanceProfileInput) (*iam.CreateInstanceProfileOutput, error) + + CreateLoginProfileRequest(*iam.CreateLoginProfileInput) (*request.Request, *iam.CreateLoginProfileOutput) + + CreateLoginProfile(*iam.CreateLoginProfileInput) (*iam.CreateLoginProfileOutput, error) + + CreateOpenIDConnectProviderRequest(*iam.CreateOpenIDConnectProviderInput) (*request.Request, *iam.CreateOpenIDConnectProviderOutput) + + CreateOpenIDConnectProvider(*iam.CreateOpenIDConnectProviderInput) (*iam.CreateOpenIDConnectProviderOutput, error) + + CreatePolicyRequest(*iam.CreatePolicyInput) (*request.Request, *iam.CreatePolicyOutput) + + CreatePolicy(*iam.CreatePolicyInput) (*iam.CreatePolicyOutput, error) + + CreatePolicyVersionRequest(*iam.CreatePolicyVersionInput) (*request.Request, *iam.CreatePolicyVersionOutput) + + CreatePolicyVersion(*iam.CreatePolicyVersionInput) (*iam.CreatePolicyVersionOutput, error) + + CreateRoleRequest(*iam.CreateRoleInput) (*request.Request, *iam.CreateRoleOutput) + + CreateRole(*iam.CreateRoleInput) (*iam.CreateRoleOutput, error) + + CreateSAMLProviderRequest(*iam.CreateSAMLProviderInput) (*request.Request, *iam.CreateSAMLProviderOutput) + + CreateSAMLProvider(*iam.CreateSAMLProviderInput) (*iam.CreateSAMLProviderOutput, error) + + CreateUserRequest(*iam.CreateUserInput) (*request.Request, *iam.CreateUserOutput) + + CreateUser(*iam.CreateUserInput) (*iam.CreateUserOutput, error) + + CreateVirtualMFADeviceRequest(*iam.CreateVirtualMFADeviceInput) (*request.Request, *iam.CreateVirtualMFADeviceOutput) + + CreateVirtualMFADevice(*iam.CreateVirtualMFADeviceInput) (*iam.CreateVirtualMFADeviceOutput, error) + + DeactivateMFADeviceRequest(*iam.DeactivateMFADeviceInput) (*request.Request, *iam.DeactivateMFADeviceOutput) + + DeactivateMFADevice(*iam.DeactivateMFADeviceInput) (*iam.DeactivateMFADeviceOutput, error) + + DeleteAccessKeyRequest(*iam.DeleteAccessKeyInput) (*request.Request, *iam.DeleteAccessKeyOutput) + + DeleteAccessKey(*iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) + + DeleteAccountAliasRequest(*iam.DeleteAccountAliasInput) (*request.Request, *iam.DeleteAccountAliasOutput) + + DeleteAccountAlias(*iam.DeleteAccountAliasInput) (*iam.DeleteAccountAliasOutput, error) + + DeleteAccountPasswordPolicyRequest(*iam.DeleteAccountPasswordPolicyInput) (*request.Request, *iam.DeleteAccountPasswordPolicyOutput) + + DeleteAccountPasswordPolicy(*iam.DeleteAccountPasswordPolicyInput) (*iam.DeleteAccountPasswordPolicyOutput, error) + + DeleteGroupRequest(*iam.DeleteGroupInput) (*request.Request, *iam.DeleteGroupOutput) + + DeleteGroup(*iam.DeleteGroupInput) (*iam.DeleteGroupOutput, error) + + DeleteGroupPolicyRequest(*iam.DeleteGroupPolicyInput) (*request.Request, *iam.DeleteGroupPolicyOutput) + + DeleteGroupPolicy(*iam.DeleteGroupPolicyInput) (*iam.DeleteGroupPolicyOutput, error) + + DeleteInstanceProfileRequest(*iam.DeleteInstanceProfileInput) (*request.Request, *iam.DeleteInstanceProfileOutput) + + DeleteInstanceProfile(*iam.DeleteInstanceProfileInput) (*iam.DeleteInstanceProfileOutput, error) + + DeleteLoginProfileRequest(*iam.DeleteLoginProfileInput) (*request.Request, *iam.DeleteLoginProfileOutput) + + DeleteLoginProfile(*iam.DeleteLoginProfileInput) (*iam.DeleteLoginProfileOutput, error) + + DeleteOpenIDConnectProviderRequest(*iam.DeleteOpenIDConnectProviderInput) (*request.Request, *iam.DeleteOpenIDConnectProviderOutput) + + DeleteOpenIDConnectProvider(*iam.DeleteOpenIDConnectProviderInput) (*iam.DeleteOpenIDConnectProviderOutput, error) + + DeletePolicyRequest(*iam.DeletePolicyInput) (*request.Request, *iam.DeletePolicyOutput) + + DeletePolicy(*iam.DeletePolicyInput) (*iam.DeletePolicyOutput, error) + + DeletePolicyVersionRequest(*iam.DeletePolicyVersionInput) (*request.Request, *iam.DeletePolicyVersionOutput) + + DeletePolicyVersion(*iam.DeletePolicyVersionInput) (*iam.DeletePolicyVersionOutput, error) + + DeleteRoleRequest(*iam.DeleteRoleInput) (*request.Request, *iam.DeleteRoleOutput) + + DeleteRole(*iam.DeleteRoleInput) (*iam.DeleteRoleOutput, error) + + DeleteRolePolicyRequest(*iam.DeleteRolePolicyInput) (*request.Request, *iam.DeleteRolePolicyOutput) + + DeleteRolePolicy(*iam.DeleteRolePolicyInput) (*iam.DeleteRolePolicyOutput, error) + + DeleteSAMLProviderRequest(*iam.DeleteSAMLProviderInput) (*request.Request, *iam.DeleteSAMLProviderOutput) + + DeleteSAMLProvider(*iam.DeleteSAMLProviderInput) (*iam.DeleteSAMLProviderOutput, error) + + DeleteSSHPublicKeyRequest(*iam.DeleteSSHPublicKeyInput) (*request.Request, *iam.DeleteSSHPublicKeyOutput) + + DeleteSSHPublicKey(*iam.DeleteSSHPublicKeyInput) (*iam.DeleteSSHPublicKeyOutput, error) + + DeleteServerCertificateRequest(*iam.DeleteServerCertificateInput) (*request.Request, *iam.DeleteServerCertificateOutput) + + DeleteServerCertificate(*iam.DeleteServerCertificateInput) (*iam.DeleteServerCertificateOutput, error) + + DeleteSigningCertificateRequest(*iam.DeleteSigningCertificateInput) (*request.Request, *iam.DeleteSigningCertificateOutput) + + DeleteSigningCertificate(*iam.DeleteSigningCertificateInput) (*iam.DeleteSigningCertificateOutput, error) + + DeleteUserRequest(*iam.DeleteUserInput) (*request.Request, *iam.DeleteUserOutput) + + DeleteUser(*iam.DeleteUserInput) (*iam.DeleteUserOutput, error) + + DeleteUserPolicyRequest(*iam.DeleteUserPolicyInput) (*request.Request, *iam.DeleteUserPolicyOutput) + + DeleteUserPolicy(*iam.DeleteUserPolicyInput) (*iam.DeleteUserPolicyOutput, error) + + DeleteVirtualMFADeviceRequest(*iam.DeleteVirtualMFADeviceInput) (*request.Request, *iam.DeleteVirtualMFADeviceOutput) + + DeleteVirtualMFADevice(*iam.DeleteVirtualMFADeviceInput) (*iam.DeleteVirtualMFADeviceOutput, error) + + DetachGroupPolicyRequest(*iam.DetachGroupPolicyInput) (*request.Request, *iam.DetachGroupPolicyOutput) + + DetachGroupPolicy(*iam.DetachGroupPolicyInput) (*iam.DetachGroupPolicyOutput, error) + + DetachRolePolicyRequest(*iam.DetachRolePolicyInput) (*request.Request, *iam.DetachRolePolicyOutput) + + DetachRolePolicy(*iam.DetachRolePolicyInput) (*iam.DetachRolePolicyOutput, error) + + DetachUserPolicyRequest(*iam.DetachUserPolicyInput) (*request.Request, *iam.DetachUserPolicyOutput) + + DetachUserPolicy(*iam.DetachUserPolicyInput) (*iam.DetachUserPolicyOutput, error) + + EnableMFADeviceRequest(*iam.EnableMFADeviceInput) (*request.Request, *iam.EnableMFADeviceOutput) + + EnableMFADevice(*iam.EnableMFADeviceInput) (*iam.EnableMFADeviceOutput, error) + + GenerateCredentialReportRequest(*iam.GenerateCredentialReportInput) (*request.Request, *iam.GenerateCredentialReportOutput) + + GenerateCredentialReport(*iam.GenerateCredentialReportInput) (*iam.GenerateCredentialReportOutput, error) + + GetAccessKeyLastUsedRequest(*iam.GetAccessKeyLastUsedInput) (*request.Request, *iam.GetAccessKeyLastUsedOutput) + + GetAccessKeyLastUsed(*iam.GetAccessKeyLastUsedInput) (*iam.GetAccessKeyLastUsedOutput, error) + + GetAccountAuthorizationDetailsRequest(*iam.GetAccountAuthorizationDetailsInput) (*request.Request, *iam.GetAccountAuthorizationDetailsOutput) + + GetAccountAuthorizationDetails(*iam.GetAccountAuthorizationDetailsInput) (*iam.GetAccountAuthorizationDetailsOutput, error) + + GetAccountAuthorizationDetailsPages(*iam.GetAccountAuthorizationDetailsInput, func(*iam.GetAccountAuthorizationDetailsOutput, bool) bool) error + + GetAccountPasswordPolicyRequest(*iam.GetAccountPasswordPolicyInput) (*request.Request, *iam.GetAccountPasswordPolicyOutput) + + GetAccountPasswordPolicy(*iam.GetAccountPasswordPolicyInput) (*iam.GetAccountPasswordPolicyOutput, error) + + GetAccountSummaryRequest(*iam.GetAccountSummaryInput) (*request.Request, *iam.GetAccountSummaryOutput) + + GetAccountSummary(*iam.GetAccountSummaryInput) (*iam.GetAccountSummaryOutput, error) + + GetContextKeysForCustomPolicyRequest(*iam.GetContextKeysForCustomPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) + + GetContextKeysForCustomPolicy(*iam.GetContextKeysForCustomPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) + + GetContextKeysForPrincipalPolicyRequest(*iam.GetContextKeysForPrincipalPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) + + GetContextKeysForPrincipalPolicy(*iam.GetContextKeysForPrincipalPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) + + GetCredentialReportRequest(*iam.GetCredentialReportInput) (*request.Request, *iam.GetCredentialReportOutput) + + GetCredentialReport(*iam.GetCredentialReportInput) (*iam.GetCredentialReportOutput, error) + + GetGroupRequest(*iam.GetGroupInput) (*request.Request, *iam.GetGroupOutput) + + GetGroup(*iam.GetGroupInput) (*iam.GetGroupOutput, error) + + GetGroupPages(*iam.GetGroupInput, func(*iam.GetGroupOutput, bool) bool) error + + GetGroupPolicyRequest(*iam.GetGroupPolicyInput) (*request.Request, *iam.GetGroupPolicyOutput) + + GetGroupPolicy(*iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) + + GetInstanceProfileRequest(*iam.GetInstanceProfileInput) (*request.Request, *iam.GetInstanceProfileOutput) + + GetInstanceProfile(*iam.GetInstanceProfileInput) (*iam.GetInstanceProfileOutput, error) + + GetLoginProfileRequest(*iam.GetLoginProfileInput) (*request.Request, *iam.GetLoginProfileOutput) + + GetLoginProfile(*iam.GetLoginProfileInput) (*iam.GetLoginProfileOutput, error) + + GetOpenIDConnectProviderRequest(*iam.GetOpenIDConnectProviderInput) (*request.Request, *iam.GetOpenIDConnectProviderOutput) + + GetOpenIDConnectProvider(*iam.GetOpenIDConnectProviderInput) (*iam.GetOpenIDConnectProviderOutput, error) + + GetPolicyRequest(*iam.GetPolicyInput) (*request.Request, *iam.GetPolicyOutput) + + GetPolicy(*iam.GetPolicyInput) (*iam.GetPolicyOutput, error) + + GetPolicyVersionRequest(*iam.GetPolicyVersionInput) (*request.Request, *iam.GetPolicyVersionOutput) + + GetPolicyVersion(*iam.GetPolicyVersionInput) (*iam.GetPolicyVersionOutput, error) + + GetRoleRequest(*iam.GetRoleInput) (*request.Request, *iam.GetRoleOutput) + + GetRole(*iam.GetRoleInput) (*iam.GetRoleOutput, error) + + GetRolePolicyRequest(*iam.GetRolePolicyInput) (*request.Request, *iam.GetRolePolicyOutput) + + GetRolePolicy(*iam.GetRolePolicyInput) (*iam.GetRolePolicyOutput, error) + + GetSAMLProviderRequest(*iam.GetSAMLProviderInput) (*request.Request, *iam.GetSAMLProviderOutput) + + GetSAMLProvider(*iam.GetSAMLProviderInput) (*iam.GetSAMLProviderOutput, error) + + GetSSHPublicKeyRequest(*iam.GetSSHPublicKeyInput) (*request.Request, *iam.GetSSHPublicKeyOutput) + + GetSSHPublicKey(*iam.GetSSHPublicKeyInput) (*iam.GetSSHPublicKeyOutput, error) + + GetServerCertificateRequest(*iam.GetServerCertificateInput) (*request.Request, *iam.GetServerCertificateOutput) + + GetServerCertificate(*iam.GetServerCertificateInput) (*iam.GetServerCertificateOutput, error) + + GetUserRequest(*iam.GetUserInput) (*request.Request, *iam.GetUserOutput) + + GetUser(*iam.GetUserInput) (*iam.GetUserOutput, error) + + GetUserPolicyRequest(*iam.GetUserPolicyInput) (*request.Request, *iam.GetUserPolicyOutput) + + GetUserPolicy(*iam.GetUserPolicyInput) (*iam.GetUserPolicyOutput, error) + + ListAccessKeysRequest(*iam.ListAccessKeysInput) (*request.Request, *iam.ListAccessKeysOutput) + + ListAccessKeys(*iam.ListAccessKeysInput) (*iam.ListAccessKeysOutput, error) + + ListAccessKeysPages(*iam.ListAccessKeysInput, func(*iam.ListAccessKeysOutput, bool) bool) error + + ListAccountAliasesRequest(*iam.ListAccountAliasesInput) (*request.Request, *iam.ListAccountAliasesOutput) + + ListAccountAliases(*iam.ListAccountAliasesInput) (*iam.ListAccountAliasesOutput, error) + + ListAccountAliasesPages(*iam.ListAccountAliasesInput, func(*iam.ListAccountAliasesOutput, bool) bool) error + + ListAttachedGroupPoliciesRequest(*iam.ListAttachedGroupPoliciesInput) (*request.Request, *iam.ListAttachedGroupPoliciesOutput) + + ListAttachedGroupPolicies(*iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) + + ListAttachedGroupPoliciesPages(*iam.ListAttachedGroupPoliciesInput, func(*iam.ListAttachedGroupPoliciesOutput, bool) bool) error + + ListAttachedRolePoliciesRequest(*iam.ListAttachedRolePoliciesInput) (*request.Request, *iam.ListAttachedRolePoliciesOutput) + + ListAttachedRolePolicies(*iam.ListAttachedRolePoliciesInput) (*iam.ListAttachedRolePoliciesOutput, error) + + ListAttachedRolePoliciesPages(*iam.ListAttachedRolePoliciesInput, func(*iam.ListAttachedRolePoliciesOutput, bool) bool) error + + ListAttachedUserPoliciesRequest(*iam.ListAttachedUserPoliciesInput) (*request.Request, *iam.ListAttachedUserPoliciesOutput) + + ListAttachedUserPolicies(*iam.ListAttachedUserPoliciesInput) (*iam.ListAttachedUserPoliciesOutput, error) + + ListAttachedUserPoliciesPages(*iam.ListAttachedUserPoliciesInput, func(*iam.ListAttachedUserPoliciesOutput, bool) bool) error + + ListEntitiesForPolicyRequest(*iam.ListEntitiesForPolicyInput) (*request.Request, *iam.ListEntitiesForPolicyOutput) + + ListEntitiesForPolicy(*iam.ListEntitiesForPolicyInput) (*iam.ListEntitiesForPolicyOutput, error) + + ListEntitiesForPolicyPages(*iam.ListEntitiesForPolicyInput, func(*iam.ListEntitiesForPolicyOutput, bool) bool) error + + ListGroupPoliciesRequest(*iam.ListGroupPoliciesInput) (*request.Request, *iam.ListGroupPoliciesOutput) + + ListGroupPolicies(*iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) + + ListGroupPoliciesPages(*iam.ListGroupPoliciesInput, func(*iam.ListGroupPoliciesOutput, bool) bool) error + + ListGroupsRequest(*iam.ListGroupsInput) (*request.Request, *iam.ListGroupsOutput) + + ListGroups(*iam.ListGroupsInput) (*iam.ListGroupsOutput, error) + + ListGroupsPages(*iam.ListGroupsInput, func(*iam.ListGroupsOutput, bool) bool) error + + ListGroupsForUserRequest(*iam.ListGroupsForUserInput) (*request.Request, *iam.ListGroupsForUserOutput) + + ListGroupsForUser(*iam.ListGroupsForUserInput) (*iam.ListGroupsForUserOutput, error) + + ListGroupsForUserPages(*iam.ListGroupsForUserInput, func(*iam.ListGroupsForUserOutput, bool) bool) error + + ListInstanceProfilesRequest(*iam.ListInstanceProfilesInput) (*request.Request, *iam.ListInstanceProfilesOutput) + + ListInstanceProfiles(*iam.ListInstanceProfilesInput) (*iam.ListInstanceProfilesOutput, error) + + ListInstanceProfilesPages(*iam.ListInstanceProfilesInput, func(*iam.ListInstanceProfilesOutput, bool) bool) error + + ListInstanceProfilesForRoleRequest(*iam.ListInstanceProfilesForRoleInput) (*request.Request, *iam.ListInstanceProfilesForRoleOutput) + + ListInstanceProfilesForRole(*iam.ListInstanceProfilesForRoleInput) (*iam.ListInstanceProfilesForRoleOutput, error) + + ListInstanceProfilesForRolePages(*iam.ListInstanceProfilesForRoleInput, func(*iam.ListInstanceProfilesForRoleOutput, bool) bool) error + + ListMFADevicesRequest(*iam.ListMFADevicesInput) (*request.Request, *iam.ListMFADevicesOutput) + + ListMFADevices(*iam.ListMFADevicesInput) (*iam.ListMFADevicesOutput, error) + + ListMFADevicesPages(*iam.ListMFADevicesInput, func(*iam.ListMFADevicesOutput, bool) bool) error + + ListOpenIDConnectProvidersRequest(*iam.ListOpenIDConnectProvidersInput) (*request.Request, *iam.ListOpenIDConnectProvidersOutput) + + ListOpenIDConnectProviders(*iam.ListOpenIDConnectProvidersInput) (*iam.ListOpenIDConnectProvidersOutput, error) + + ListPoliciesRequest(*iam.ListPoliciesInput) (*request.Request, *iam.ListPoliciesOutput) + + ListPolicies(*iam.ListPoliciesInput) (*iam.ListPoliciesOutput, error) + + ListPoliciesPages(*iam.ListPoliciesInput, func(*iam.ListPoliciesOutput, bool) bool) error + + ListPolicyVersionsRequest(*iam.ListPolicyVersionsInput) (*request.Request, *iam.ListPolicyVersionsOutput) + + ListPolicyVersions(*iam.ListPolicyVersionsInput) (*iam.ListPolicyVersionsOutput, error) + + ListRolePoliciesRequest(*iam.ListRolePoliciesInput) (*request.Request, *iam.ListRolePoliciesOutput) + + ListRolePolicies(*iam.ListRolePoliciesInput) (*iam.ListRolePoliciesOutput, error) + + ListRolePoliciesPages(*iam.ListRolePoliciesInput, func(*iam.ListRolePoliciesOutput, bool) bool) error + + ListRolesRequest(*iam.ListRolesInput) (*request.Request, *iam.ListRolesOutput) + + ListRoles(*iam.ListRolesInput) (*iam.ListRolesOutput, error) + + ListRolesPages(*iam.ListRolesInput, func(*iam.ListRolesOutput, bool) bool) error + + ListSAMLProvidersRequest(*iam.ListSAMLProvidersInput) (*request.Request, *iam.ListSAMLProvidersOutput) + + ListSAMLProviders(*iam.ListSAMLProvidersInput) (*iam.ListSAMLProvidersOutput, error) + + ListSSHPublicKeysRequest(*iam.ListSSHPublicKeysInput) (*request.Request, *iam.ListSSHPublicKeysOutput) + + ListSSHPublicKeys(*iam.ListSSHPublicKeysInput) (*iam.ListSSHPublicKeysOutput, error) + + ListServerCertificatesRequest(*iam.ListServerCertificatesInput) (*request.Request, *iam.ListServerCertificatesOutput) + + ListServerCertificates(*iam.ListServerCertificatesInput) (*iam.ListServerCertificatesOutput, error) + + ListServerCertificatesPages(*iam.ListServerCertificatesInput, func(*iam.ListServerCertificatesOutput, bool) bool) error + + ListSigningCertificatesRequest(*iam.ListSigningCertificatesInput) (*request.Request, *iam.ListSigningCertificatesOutput) + + ListSigningCertificates(*iam.ListSigningCertificatesInput) (*iam.ListSigningCertificatesOutput, error) + + ListSigningCertificatesPages(*iam.ListSigningCertificatesInput, func(*iam.ListSigningCertificatesOutput, bool) bool) error + + ListUserPoliciesRequest(*iam.ListUserPoliciesInput) (*request.Request, *iam.ListUserPoliciesOutput) + + ListUserPolicies(*iam.ListUserPoliciesInput) (*iam.ListUserPoliciesOutput, error) + + ListUserPoliciesPages(*iam.ListUserPoliciesInput, func(*iam.ListUserPoliciesOutput, bool) bool) error + + ListUsersRequest(*iam.ListUsersInput) (*request.Request, *iam.ListUsersOutput) + + ListUsers(*iam.ListUsersInput) (*iam.ListUsersOutput, error) + + ListUsersPages(*iam.ListUsersInput, func(*iam.ListUsersOutput, bool) bool) error + + ListVirtualMFADevicesRequest(*iam.ListVirtualMFADevicesInput) (*request.Request, *iam.ListVirtualMFADevicesOutput) + + ListVirtualMFADevices(*iam.ListVirtualMFADevicesInput) (*iam.ListVirtualMFADevicesOutput, error) + + ListVirtualMFADevicesPages(*iam.ListVirtualMFADevicesInput, func(*iam.ListVirtualMFADevicesOutput, bool) bool) error + + PutGroupPolicyRequest(*iam.PutGroupPolicyInput) (*request.Request, *iam.PutGroupPolicyOutput) + + PutGroupPolicy(*iam.PutGroupPolicyInput) (*iam.PutGroupPolicyOutput, error) + + PutRolePolicyRequest(*iam.PutRolePolicyInput) (*request.Request, *iam.PutRolePolicyOutput) + + PutRolePolicy(*iam.PutRolePolicyInput) (*iam.PutRolePolicyOutput, error) + + PutUserPolicyRequest(*iam.PutUserPolicyInput) (*request.Request, *iam.PutUserPolicyOutput) + + PutUserPolicy(*iam.PutUserPolicyInput) (*iam.PutUserPolicyOutput, error) + + RemoveClientIDFromOpenIDConnectProviderRequest(*iam.RemoveClientIDFromOpenIDConnectProviderInput) (*request.Request, *iam.RemoveClientIDFromOpenIDConnectProviderOutput) + + RemoveClientIDFromOpenIDConnectProvider(*iam.RemoveClientIDFromOpenIDConnectProviderInput) (*iam.RemoveClientIDFromOpenIDConnectProviderOutput, error) + + RemoveRoleFromInstanceProfileRequest(*iam.RemoveRoleFromInstanceProfileInput) (*request.Request, *iam.RemoveRoleFromInstanceProfileOutput) + + RemoveRoleFromInstanceProfile(*iam.RemoveRoleFromInstanceProfileInput) (*iam.RemoveRoleFromInstanceProfileOutput, error) + + RemoveUserFromGroupRequest(*iam.RemoveUserFromGroupInput) (*request.Request, *iam.RemoveUserFromGroupOutput) + + RemoveUserFromGroup(*iam.RemoveUserFromGroupInput) (*iam.RemoveUserFromGroupOutput, error) + + ResyncMFADeviceRequest(*iam.ResyncMFADeviceInput) (*request.Request, *iam.ResyncMFADeviceOutput) + + ResyncMFADevice(*iam.ResyncMFADeviceInput) (*iam.ResyncMFADeviceOutput, error) + + SetDefaultPolicyVersionRequest(*iam.SetDefaultPolicyVersionInput) (*request.Request, *iam.SetDefaultPolicyVersionOutput) + + SetDefaultPolicyVersion(*iam.SetDefaultPolicyVersionInput) (*iam.SetDefaultPolicyVersionOutput, error) + + SimulateCustomPolicyRequest(*iam.SimulateCustomPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) + + SimulateCustomPolicy(*iam.SimulateCustomPolicyInput) (*iam.SimulatePolicyResponse, error) + + SimulatePrincipalPolicyRequest(*iam.SimulatePrincipalPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) + + SimulatePrincipalPolicy(*iam.SimulatePrincipalPolicyInput) (*iam.SimulatePolicyResponse, error) + + UpdateAccessKeyRequest(*iam.UpdateAccessKeyInput) (*request.Request, *iam.UpdateAccessKeyOutput) + + UpdateAccessKey(*iam.UpdateAccessKeyInput) (*iam.UpdateAccessKeyOutput, error) + + UpdateAccountPasswordPolicyRequest(*iam.UpdateAccountPasswordPolicyInput) (*request.Request, *iam.UpdateAccountPasswordPolicyOutput) + + UpdateAccountPasswordPolicy(*iam.UpdateAccountPasswordPolicyInput) (*iam.UpdateAccountPasswordPolicyOutput, error) + + UpdateAssumeRolePolicyRequest(*iam.UpdateAssumeRolePolicyInput) (*request.Request, *iam.UpdateAssumeRolePolicyOutput) + + UpdateAssumeRolePolicy(*iam.UpdateAssumeRolePolicyInput) (*iam.UpdateAssumeRolePolicyOutput, error) + + UpdateGroupRequest(*iam.UpdateGroupInput) (*request.Request, *iam.UpdateGroupOutput) + + UpdateGroup(*iam.UpdateGroupInput) (*iam.UpdateGroupOutput, error) + + UpdateLoginProfileRequest(*iam.UpdateLoginProfileInput) (*request.Request, *iam.UpdateLoginProfileOutput) + + UpdateLoginProfile(*iam.UpdateLoginProfileInput) (*iam.UpdateLoginProfileOutput, error) + + UpdateOpenIDConnectProviderThumbprintRequest(*iam.UpdateOpenIDConnectProviderThumbprintInput) (*request.Request, *iam.UpdateOpenIDConnectProviderThumbprintOutput) + + UpdateOpenIDConnectProviderThumbprint(*iam.UpdateOpenIDConnectProviderThumbprintInput) (*iam.UpdateOpenIDConnectProviderThumbprintOutput, error) + + UpdateSAMLProviderRequest(*iam.UpdateSAMLProviderInput) (*request.Request, *iam.UpdateSAMLProviderOutput) + + UpdateSAMLProvider(*iam.UpdateSAMLProviderInput) (*iam.UpdateSAMLProviderOutput, error) + + UpdateSSHPublicKeyRequest(*iam.UpdateSSHPublicKeyInput) (*request.Request, *iam.UpdateSSHPublicKeyOutput) + + UpdateSSHPublicKey(*iam.UpdateSSHPublicKeyInput) (*iam.UpdateSSHPublicKeyOutput, error) + + UpdateServerCertificateRequest(*iam.UpdateServerCertificateInput) (*request.Request, *iam.UpdateServerCertificateOutput) + + UpdateServerCertificate(*iam.UpdateServerCertificateInput) (*iam.UpdateServerCertificateOutput, error) + + UpdateSigningCertificateRequest(*iam.UpdateSigningCertificateInput) (*request.Request, *iam.UpdateSigningCertificateOutput) + + UpdateSigningCertificate(*iam.UpdateSigningCertificateInput) (*iam.UpdateSigningCertificateOutput, error) + + UpdateUserRequest(*iam.UpdateUserInput) (*request.Request, *iam.UpdateUserOutput) + + UpdateUser(*iam.UpdateUserInput) (*iam.UpdateUserOutput, error) + + UploadSSHPublicKeyRequest(*iam.UploadSSHPublicKeyInput) (*request.Request, *iam.UploadSSHPublicKeyOutput) + + UploadSSHPublicKey(*iam.UploadSSHPublicKeyInput) (*iam.UploadSSHPublicKeyOutput, error) + + UploadServerCertificateRequest(*iam.UploadServerCertificateInput) (*request.Request, *iam.UploadServerCertificateOutput) + + UploadServerCertificate(*iam.UploadServerCertificateInput) (*iam.UploadServerCertificateOutput, error) + + UploadSigningCertificateRequest(*iam.UploadSigningCertificateInput) (*request.Request, *iam.UploadSigningCertificateOutput) + + UploadSigningCertificate(*iam.UploadSigningCertificateInput) (*iam.UploadSigningCertificateOutput, error) +} + +var _ IAMAPI = (*iam.IAM)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go new file mode 100644 index 000000000..e8cfb9c6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -0,0 +1,133 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Identity and Access Management (IAM) is a web service that you can use +// to manage users and user permissions under your AWS account. This guide provides +// descriptions of IAM actions that you can call programmatically. For general +// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/). +// For the user guide for IAM, see Using IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/). +// +// AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). +// The SDKs provide a convenient way to create programmatic access to IAM and +// AWS. For example, the SDKs take care of tasks such as cryptographically signing +// requests (see below), managing errors, and retrying requests automatically. +// For information about the AWS SDKs, including how to download and install +// them, see the Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// page. We recommend that you use the AWS SDKs to make programmatic API calls +// to IAM. However, you can also use the IAM Query API to make direct calls +// to the IAM web service. To learn more about the IAM Query API, see Making +// Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. IAM supports GET and POST requests for all actions. +// That is, the API does not require you to use GET for some actions and POST +// for others. However, GET requests are subject to the limitation size of a +// URL. Therefore, for operations that require larger sizes, use a POST request. +// +// Signing Requests +// +// Requests must be signed using an access key ID and a secret access key. +// We strongly recommend that you do not use your AWS account access key ID +// and secret access key for everyday work with IAM. You can use the access +// key ID and secret access key for an IAM user or you can use the AWS Security +// Token Service to generate temporary security credentials and use those to +// sign requests. +// +// To sign requests, we recommend that you use Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// If you have an existing application that uses Signature Version 2, you do +// not have to update it to use Signature Version 4. However, some operations +// now require Signature Version 4. The documentation for operations that require +// version 4 indicate this requirement. +// +// Additional Resources +// +// For more information, see the following: +// +// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). +// This topic provides general information about the types of credentials used +// for accessing AWS. IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). +// This topic presents a list of suggestions for using the IAM service to help +// secure your AWS resources. Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). +// This set of topics walk you through the process of signing a request using +// an access key ID and secret access key. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type IAM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "iam" + +// New creates a new instance of the IAM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IAM client from just a session. +// svc := iam.New(mySession) +// +// // Create a IAM client with additional configuration +// svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IAM { + svc := &IAM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-05-08", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IAM operation and runs any +// custom request initialization. +func (c *IAM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go new file mode 100644 index 000000000..b27303052 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go @@ -0,0 +1,65 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) error { + waiterCfg := waiter.Config{ + Operation: "GetInstanceProfile", + Delay: 1, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *IAM) WaitUntilUserExists(input *GetUserInput) error { + waiterCfg := waiter.Config{ + Operation: "GetUser", + Delay: 1, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "NoSuchEntity", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go new file mode 100644 index 000000000..a40f4481e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -0,0 +1,1747 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kinesis provides a client for Amazon Kinesis. +package kinesis + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTagsToStream = "AddTagsToStream" + +// AddTagsToStreamRequest generates a request for the AddTagsToStream operation. +func (c *Kinesis) AddTagsToStreamRequest(input *AddTagsToStreamInput) (req *request.Request, output *AddTagsToStreamOutput) { + op := &request.Operation{ + Name: opAddTagsToStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToStreamOutput{} + req.Data = output + return +} + +// Adds or updates tags for the specified Amazon Kinesis stream. Each stream +// can have up to 10 tags. +// +// If tags have already been assigned to the stream, AddTagsToStream overwrites +// any existing tags that correspond to the specified tag keys. +func (c *Kinesis) AddTagsToStream(input *AddTagsToStreamInput) (*AddTagsToStreamOutput, error) { + req, out := c.AddTagsToStreamRequest(input) + err := req.Send() + return out, err +} + +const opCreateStream = "CreateStream" + +// CreateStreamRequest generates a request for the CreateStream operation. +func (c *Kinesis) CreateStreamRequest(input *CreateStreamInput) (req *request.Request, output *CreateStreamOutput) { + op := &request.Operation{ + Name: opCreateStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStreamOutput{} + req.Data = output + return +} + +// Creates a Amazon Kinesis stream. A stream captures and transports data records +// that are continuously emitted from different data sources or producers. Scale-out +// within an Amazon Kinesis stream is explicitly supported by means of shards, +// which are uniquely identified groups of data records in an Amazon Kinesis +// stream. +// +// You specify and control the number of shards that a stream is composed of. +// Each shard can support reads up to 5 transactions per second, up to a maximum +// data read total of 2 MB per second. Each shard can support writes up to 1,000 +// records per second, up to a maximum data write total of 1 MB per second. +// You can add shards to a stream if the amount of data input increases and +// you can remove shards if the amount of data input decreases. +// +// The stream name identifies the stream. The name is scoped to the AWS account +// used by the application. It is also scoped by region. That is, two streams +// in two different accounts can have the same name, and two streams in the +// same account, but in two different regions, can have the same name. +// +// CreateStream is an asynchronous operation. Upon receiving a CreateStream +// request, Amazon Kinesis immediately returns and sets the stream status to +// CREATING. After the stream is created, Amazon Kinesis sets the stream status +// to ACTIVE. You should perform read and write operations only on an ACTIVE +// stream. +// +// You receive a LimitExceededException when making a CreateStream request +// if you try to do one of the following: +// +// Have more than five streams in the CREATING state at any point in time. +// Create more shards than are authorized for your account. For the default +// shard limit for an AWS account, see Amazon Kinesis Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html). +// If you need to increase this limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// +// You can use DescribeStream to check the stream status, which is returned +// in StreamStatus. +// +// CreateStream has a limit of 5 transactions per second per account. +func (c *Kinesis) CreateStream(input *CreateStreamInput) (*CreateStreamOutput, error) { + req, out := c.CreateStreamRequest(input) + err := req.Send() + return out, err +} + +const opDecreaseStreamRetentionPeriod = "DecreaseStreamRetentionPeriod" + +// DecreaseStreamRetentionPeriodRequest generates a request for the DecreaseStreamRetentionPeriod operation. +func (c *Kinesis) DecreaseStreamRetentionPeriodRequest(input *DecreaseStreamRetentionPeriodInput) (req *request.Request, output *DecreaseStreamRetentionPeriodOutput) { + op := &request.Operation{ + Name: opDecreaseStreamRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecreaseStreamRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + output = &DecreaseStreamRetentionPeriodOutput{} + req.Data = output + return +} + +// Decreases the stream's retention period, which is the length of time data +// records are accessible after they are added to the stream. The minimum value +// of a stream’s retention period is 24 hours. +// +// This operation may result in lost data. For example, if the stream's retention +// period is 48 hours and is decreased to 24 hours, any data already in the +// stream that is older than 24 hours is inaccessible. +func (c *Kinesis) DecreaseStreamRetentionPeriod(input *DecreaseStreamRetentionPeriodInput) (*DecreaseStreamRetentionPeriodOutput, error) { + req, out := c.DecreaseStreamRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStream = "DeleteStream" + +// DeleteStreamRequest generates a request for the DeleteStream operation. +func (c *Kinesis) DeleteStreamRequest(input *DeleteStreamInput) (req *request.Request, output *DeleteStreamOutput) { + op := &request.Operation{ + Name: opDeleteStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteStreamOutput{} + req.Data = output + return +} + +// Deletes a stream and all its shards and data. You must shut down any applications +// that are operating on the stream before you delete the stream. If an application +// attempts to operate on a deleted stream, it will receive the exception ResourceNotFoundException. +// +// If the stream is in the ACTIVE state, you can delete it. After a DeleteStream +// request, the specified stream is in the DELETING state until Amazon Kinesis +// completes the deletion. +// +// Note: Amazon Kinesis might continue to accept data read and write operations, +// such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING +// state until the stream deletion is complete. +// +// When you delete a stream, any shards in that stream are also deleted, and +// any tags are dissociated from the stream. +// +// You can use the DescribeStream operation to check the state of the stream, +// which is returned in StreamStatus. +// +// DeleteStream has a limit of 5 transactions per second per account. +func (c *Kinesis) DeleteStream(input *DeleteStreamInput) (*DeleteStreamOutput, error) { + req, out := c.DeleteStreamRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStream = "DescribeStream" + +// DescribeStreamRequest generates a request for the DescribeStream operation. +func (c *Kinesis) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) { + op := &request.Operation{ + Name: opDescribeStream, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartShardId"}, + OutputTokens: []string{"StreamDescription.Shards[-1].ShardId"}, + LimitToken: "Limit", + TruncationToken: "StreamDescription.HasMoreShards", + }, + } + + if input == nil { + input = &DescribeStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStreamOutput{} + req.Data = output + return +} + +// Describes the specified stream. +// +// The information about the stream includes its current status, its Amazon +// Resource Name (ARN), and an array of shard objects. For each shard object, +// there is information about the hash key and sequence number ranges that the +// shard spans, and the IDs of any earlier shards that played in a role in creating +// the shard. A sequence number is the identifier associated with every record +// ingested in the Amazon Kinesis stream. The sequence number is assigned when +// a record is put into the stream. +// +// You can limit the number of returned shards using the Limit parameter. The +// number of shards in a stream may be too large to return from a single call +// to DescribeStream. You can detect this by using the HasMoreShards flag in +// the returned output. HasMoreShards is set to true when there is more data +// available. +// +// DescribeStream is a paginated operation. If there are more shards available, +// you can request them using the shard ID of the last shard returned. Specify +// this ID in the ExclusiveStartShardId parameter in a subsequent request to +// DescribeStream. +// +// DescribeStream has a limit of 10 transactions per second per account. +func (c *Kinesis) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOutput, error) { + req, out := c.DescribeStreamRequest(input) + err := req.Send() + return out, err +} + +func (c *Kinesis) DescribeStreamPages(input *DescribeStreamInput, fn func(p *DescribeStreamOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStreamRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStreamOutput), lastPage) + }) +} + +const opGetRecords = "GetRecords" + +// GetRecordsRequest generates a request for the GetRecords operation. +func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Request, output *GetRecordsOutput) { + op := &request.Operation{ + Name: opGetRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRecordsOutput{} + req.Data = output + return +} + +// Gets data records from a shard. +// +// Specify a shard iterator using the ShardIterator parameter. The shard iterator +// specifies the position in the shard from which you want to start reading +// data records sequentially. If there are no records available in the portion +// of the shard that the iterator points to, GetRecords returns an empty list. +// Note that it might take multiple calls to get to a portion of the shard that +// contains records. +// +// You can scale by provisioning multiple shards. Your application should have +// one thread per shard, each reading continuously from its stream. To read +// from a stream continually, call GetRecords in a loop. Use GetShardIterator +// to get the shard iterator to specify in the first GetRecords call. GetRecords +// returns a new shard iterator in NextShardIterator. Specify the shard iterator +// returned in NextShardIterator in subsequent calls to GetRecords. Note that +// if the shard has been closed, the shard iterator can't return more data and +// GetRecords returns null in NextShardIterator. You can terminate the loop +// when the shard is closed, or when the shard iterator reaches the record with +// the sequence number or other attribute that marks it as the last record to +// process. +// +// Each data record can be up to 1 MB in size, and each shard can read up to +// 2 MB per second. You can ensure that your calls don't exceed the maximum +// supported size or throughput by using the Limit parameter to specify the +// maximum number of records that GetRecords can return. Consider your average +// record size when determining this limit. +// +// The size of the data returned by GetRecords will vary depending on the utilization +// of the shard. The maximum size of data that GetRecords can return is 10 MB. +// If a call returns this amount of data, subsequent calls made within the next +// 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient +// provisioned throughput on the shard, subsequent calls made within the next +// 1 second throw ProvisionedThroughputExceededException. Note that GetRecords +// won't return any data when it throws an exception. For this reason, we recommend +// that you wait one second between calls to GetRecords; however, it's possible +// that the application will get exceptions for longer than 1 second. +// +// To detect whether the application is falling behind in processing, you can +// use the MillisBehindLatest response attribute. You can also monitor the stream +// using CloudWatch metrics (see Monitoring Amazon Kinesis (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html) +// in the Amazon Kinesis Developer Guide). +// +// Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, +// that is set when an Amazon Kinesis stream successfully receives and stores +// a record. This is commonly referred to as a server-side timestamp, which +// is different than a client-side timestamp, where the timestamp is set when +// a data producer creates or sends the record to a stream. The timestamp has +// millisecond precision. There are no guarantees about the timestamp accuracy, +// or that the timestamp is always increasing. For example, records in a shard +// or across a stream might have timestamps that are out of order. +func (c *Kinesis) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) { + req, out := c.GetRecordsRequest(input) + err := req.Send() + return out, err +} + +const opGetShardIterator = "GetShardIterator" + +// GetShardIteratorRequest generates a request for the GetShardIterator operation. +func (c *Kinesis) GetShardIteratorRequest(input *GetShardIteratorInput) (req *request.Request, output *GetShardIteratorOutput) { + op := &request.Operation{ + Name: opGetShardIterator, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetShardIteratorInput{} + } + + req = c.newRequest(op, input, output) + output = &GetShardIteratorOutput{} + req.Data = output + return +} + +// Gets a shard iterator. A shard iterator expires five minutes after it is +// returned to the requester. +// +// A shard iterator specifies the position in the shard from which to start +// reading data records sequentially. A shard iterator specifies this position +// using the sequence number of a data record in a shard. A sequence number +// is the identifier associated with every record ingested in the Amazon Kinesis +// stream. The sequence number is assigned when a record is put into the stream. +// +// You must specify the shard iterator type. For example, you can set the ShardIteratorType +// parameter to read exactly from the position denoted by a specific sequence +// number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after +// the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, +// using sequence numbers returned by earlier calls to PutRecord, PutRecords, +// GetRecords, or DescribeStream. You can specify the shard iterator type TRIM_HORIZON +// in the request to cause ShardIterator to point to the last untrimmed record +// in the shard in the system, which is the oldest data record in the shard. +// Or you can point to just after the most recent record in the shard, by using +// the shard iterator type LATEST, so that you always read the most recent data +// in the shard. +// +// When you repeatedly read from an Amazon Kinesis stream use a GetShardIterator +// request to get the first shard iterator for use in your first GetRecords +// request and then use the shard iterator returned by the GetRecords request +// in NextShardIterator for subsequent reads. A new shard iterator is returned +// by every GetRecords request in NextShardIterator, which you use in the ShardIterator +// parameter of the next GetRecords request. +// +// If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. +// For more information about throughput limits, see GetRecords. +// +// If the shard is closed, the iterator can't return more data, and GetShardIterator +// returns null for its ShardIterator. A shard can be closed using SplitShard +// or MergeShards. +// +// GetShardIterator has a limit of 5 transactions per second per account per +// open shard. +func (c *Kinesis) GetShardIterator(input *GetShardIteratorInput) (*GetShardIteratorOutput, error) { + req, out := c.GetShardIteratorRequest(input) + err := req.Send() + return out, err +} + +const opIncreaseStreamRetentionPeriod = "IncreaseStreamRetentionPeriod" + +// IncreaseStreamRetentionPeriodRequest generates a request for the IncreaseStreamRetentionPeriod operation. +func (c *Kinesis) IncreaseStreamRetentionPeriodRequest(input *IncreaseStreamRetentionPeriodInput) (req *request.Request, output *IncreaseStreamRetentionPeriodOutput) { + op := &request.Operation{ + Name: opIncreaseStreamRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IncreaseStreamRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + output = &IncreaseStreamRetentionPeriodOutput{} + req.Data = output + return +} + +// Increases the stream's retention period, which is the length of time data +// records are accessible after they are added to the stream. The maximum value +// of a stream’s retention period is 168 hours (7 days). +// +// Upon choosing a longer stream retention period, this operation will increase +// the time period records are accessible that have not yet expired. However, +// it will not make previous data that has expired (older than the stream’s +// previous retention period) accessible after the operation has been called. +// For example, if a stream’s retention period is set to 24 hours and is increased +// to 168 hours, any data that is older than 24 hours will remain inaccessible +// to consumer applications. +func (c *Kinesis) IncreaseStreamRetentionPeriod(input *IncreaseStreamRetentionPeriodInput) (*IncreaseStreamRetentionPeriodOutput, error) { + req, out := c.IncreaseStreamRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opListStreams = "ListStreams" + +// ListStreamsRequest generates a request for the ListStreams operation. +func (c *Kinesis) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) { + op := &request.Operation{ + Name: opListStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartStreamName"}, + OutputTokens: []string{"StreamNames[-1]"}, + LimitToken: "Limit", + TruncationToken: "HasMoreStreams", + }, + } + + if input == nil { + input = &ListStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStreamsOutput{} + req.Data = output + return +} + +// Lists your streams. +// +// The number of streams may be too large to return from a single call to +// ListStreams. You can limit the number of returned streams using the Limit +// parameter. If you do not specify a value for the Limit parameter, Amazon +// Kinesis uses the default limit, which is currently 10. +// +// You can detect if there are more streams available to list by using the +// HasMoreStreams flag from the returned output. If there are more streams available, +// you can request more streams by using the name of the last stream returned +// by the ListStreams request in the ExclusiveStartStreamName parameter in a +// subsequent request to ListStreams. The group of stream names returned by +// the subsequent request is then added to the list. You can continue this process +// until all the stream names have been collected in the list. +// +// ListStreams has a limit of 5 transactions per second per account. +func (c *Kinesis) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, error) { + req, out := c.ListStreamsRequest(input) + err := req.Send() + return out, err +} + +func (c *Kinesis) ListStreamsPages(input *ListStreamsInput, fn func(p *ListStreamsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStreamsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStreamsOutput), lastPage) + }) +} + +const opListTagsForStream = "ListTagsForStream" + +// ListTagsForStreamRequest generates a request for the ListTagsForStream operation. +func (c *Kinesis) ListTagsForStreamRequest(input *ListTagsForStreamInput) (req *request.Request, output *ListTagsForStreamOutput) { + op := &request.Operation{ + Name: opListTagsForStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForStreamOutput{} + req.Data = output + return +} + +// Lists the tags for the specified Amazon Kinesis stream. +func (c *Kinesis) ListTagsForStream(input *ListTagsForStreamInput) (*ListTagsForStreamOutput, error) { + req, out := c.ListTagsForStreamRequest(input) + err := req.Send() + return out, err +} + +const opMergeShards = "MergeShards" + +// MergeShardsRequest generates a request for the MergeShards operation. +func (c *Kinesis) MergeShardsRequest(input *MergeShardsInput) (req *request.Request, output *MergeShardsOutput) { + op := &request.Operation{ + Name: opMergeShards, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MergeShardsInput{} + } + + req = c.newRequest(op, input, output) + output = &MergeShardsOutput{} + req.Data = output + return +} + +// Merges two adjacent shards in a stream and combines them into a single shard +// to reduce the stream's capacity to ingest and transport data. Two shards +// are considered adjacent if the union of the hash key ranges for the two shards +// form a contiguous set with no gaps. For example, if you have two shards, +// one with a hash key range of 276...381 and the other with a hash key range +// of 382...454, then you could merge these two shards into a single shard that +// would have a hash key range of 276...454. After the merge, the single child +// shard receives data for all hash key values covered by the two parent shards. +// +// MergeShards is called when there is a need to reduce the overall capacity +// of a stream because of excess capacity that is not being used. You must specify +// the shard to be merged and the adjacent shard for a stream. For more information +// about merging shards, see Merge Two Shards (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html) +// in the Amazon Kinesis Developer Guide. +// +// If the stream is in the ACTIVE state, you can call MergeShards. If a stream +// is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. +// If the specified stream does not exist, MergeShards returns a ResourceNotFoundException. +// +// You can use DescribeStream to check the state of the stream, which is returned +// in StreamStatus. +// +// MergeShards is an asynchronous operation. Upon receiving a MergeShards request, +// Amazon Kinesis immediately returns a response and sets the StreamStatus to +// UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus +// to ACTIVE. Read and write operations continue to work while the stream is +// in the UPDATING state. +// +// You use DescribeStream to determine the shard IDs that are specified in +// the MergeShards request. +// +// If you try to operate on too many streams in parallel using CreateStream, +// DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException. +// +// MergeShards has limit of 5 transactions per second per account. +func (c *Kinesis) MergeShards(input *MergeShardsInput) (*MergeShardsOutput, error) { + req, out := c.MergeShardsRequest(input) + err := req.Send() + return out, err +} + +const opPutRecord = "PutRecord" + +// PutRecordRequest generates a request for the PutRecord operation. +func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { + op := &request.Operation{ + Name: opPutRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordOutput{} + req.Data = output + return +} + +// Writes a single data record from a producer into an Amazon Kinesis stream. +// Call PutRecord to send data from the producer into the Amazon Kinesis stream +// for real-time ingestion and subsequent processing, one record at a time. +// Each shard can support writes up to 1,000 records per second, up to a maximum +// data write total of 1 MB per second. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; a partition key; and the data blob itself. +// +// The data blob can be any type of data; for example, a segment from a log +// file, geographic/location data, website clickstream data, and so on. +// +// The partition key is used by Amazon Kinesis to distribute data across shards. +// Amazon Kinesis segregates the data records that belong to a data stream into +// multiple shards, using the partition key associated with each data record +// to determine which shard a given data record belongs to. +// +// Partition keys are Unicode strings, with a maximum length limit of 256 characters +// for each key. An MD5 hash function is used to map partition keys to 128-bit +// integer values and to map associated data records to shards using the hash +// key ranges of the shards. You can override hashing the partition key to determine +// the shard by explicitly specifying a hash value using the ExplicitHashKey +// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Developer Guide. +// +// PutRecord returns the shard ID of where the data record was placed and the +// sequence number that was assigned to the data record. +// +// Sequence numbers generally increase over time. To guarantee strictly increasing +// ordering, use the SequenceNumberForOrdering parameter. For more information, +// see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Developer Guide. +// +// If a PutRecord request cannot be processed because of insufficient provisioned +// throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException. +// +// By default, data records are accessible for only 24 hours from the time +// that they are added to an Amazon Kinesis stream. This retention period can +// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod +// operations. +func (c *Kinesis) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { + req, out := c.PutRecordRequest(input) + err := req.Send() + return out, err +} + +const opPutRecords = "PutRecords" + +// PutRecordsRequest generates a request for the PutRecords operation. +func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Request, output *PutRecordsOutput) { + op := &request.Operation{ + Name: opPutRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordsOutput{} + req.Data = output + return +} + +// Writes multiple data records from a producer into an Amazon Kinesis stream +// in a single call (also referred to as a PutRecords request). Use this operation +// to send data from a data producer into the Amazon Kinesis stream for data +// ingestion and processing. +// +// Each PutRecords request can support up to 500 records. Each record in the +// request can be as large as 1 MB, up to a limit of 5 MB for the entire request, +// including partition keys. Each shard can support writes up to 1,000 records +// per second, up to a maximum data write total of 1 MB per second. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; and an array of request Records, with each record in the array +// requiring a partition key and data blob. The record size limit applies to +// the total size of the partition key and data blob. +// +// The data blob can be any type of data; for example, a segment from a log +// file, geographic/location data, website clickstream data, and so on. +// +// The partition key is used by Amazon Kinesis as input to a hash function +// that maps the partition key and associated data to a specific shard. An MD5 +// hash function is used to map partition keys to 128-bit integer values and +// to map associated data records to shards. As a result of this hashing mechanism, +// all data records with the same partition key map to the same shard within +// the stream. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Developer Guide. +// +// Each record in the Records array may include an optional parameter, ExplicitHashKey, +// which overrides the partition key to shard mapping. This parameter allows +// a data producer to determine explicitly the shard where the record is stored. +// For more information, see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords) +// in the Amazon Kinesis Developer Guide. +// +// The PutRecords response includes an array of response Records. Each record +// in the response array directly correlates with a record in the request array +// using natural ordering, from the top to the bottom of the request and response. +// The response Records array always includes the same number of records as +// the request array. +// +// The response Records array includes both successfully and unsuccessfully +// processed records. Amazon Kinesis attempts to process all records in each +// PutRecords request. A single record failure does not stop the processing +// of subsequent records. +// +// A successfully-processed record includes ShardId and SequenceNumber values. +// The ShardId parameter identifies the shard in the stream where the record +// is stored. The SequenceNumber parameter is an identifier assigned to the +// put record, unique to all records in the stream. +// +// An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. +// ErrorCode reflects the type of error and can be one of the following values: +// ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides +// more detailed information about the ProvisionedThroughputExceededException +// exception including the account ID, stream name, and shard ID of the record +// that was throttled. For more information about partially successful responses, +// see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords) +// in the Amazon Kinesis Developer Guide. +// +// By default, data records are accessible for only 24 hours from the time +// that they are added to an Amazon Kinesis stream. This retention period can +// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod +// operations. +func (c *Kinesis) PutRecords(input *PutRecordsInput) (*PutRecordsOutput, error) { + req, out := c.PutRecordsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromStream = "RemoveTagsFromStream" + +// RemoveTagsFromStreamRequest generates a request for the RemoveTagsFromStream operation. +func (c *Kinesis) RemoveTagsFromStreamRequest(input *RemoveTagsFromStreamInput) (req *request.Request, output *RemoveTagsFromStreamOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromStreamOutput{} + req.Data = output + return +} + +// Deletes tags from the specified Amazon Kinesis stream. +// +// If you specify a tag that does not exist, it is ignored. +func (c *Kinesis) RemoveTagsFromStream(input *RemoveTagsFromStreamInput) (*RemoveTagsFromStreamOutput, error) { + req, out := c.RemoveTagsFromStreamRequest(input) + err := req.Send() + return out, err +} + +const opSplitShard = "SplitShard" + +// SplitShardRequest generates a request for the SplitShard operation. +func (c *Kinesis) SplitShardRequest(input *SplitShardInput) (req *request.Request, output *SplitShardOutput) { + op := &request.Operation{ + Name: opSplitShard, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SplitShardInput{} + } + + req = c.newRequest(op, input, output) + output = &SplitShardOutput{} + req.Data = output + return +} + +// Splits a shard into two new shards in the stream, to increase the stream's +// capacity to ingest and transport data. SplitShard is called when there is +// a need to increase the overall capacity of stream because of an expected +// increase in the volume of data records being ingested. +// +// You can also use SplitShard when a shard appears to be approaching its maximum +// utilization, for example, when the set of producers sending data into the +// specific shard are suddenly sending more than previously anticipated. You +// can also call SplitShard to increase stream capacity, so that more Amazon +// Kinesis applications can simultaneously read data from the stream for real-time +// processing. +// +// You must specify the shard to be split and the new hash key, which is the +// position in the shard where the shard gets split in two. In many cases, the +// new hash key might simply be the average of the beginning and ending hash +// key, but it can be any hash key value in the range being mapped into the +// shard. For more information about splitting shards, see Split a Shard (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html) +// in the Amazon Kinesis Developer Guide. +// +// You can use DescribeStream to determine the shard ID and hash key values +// for the ShardToSplit and NewStartingHashKey parameters that are specified +// in the SplitShard request. +// +// SplitShard is an asynchronous operation. Upon receiving a SplitShard request, +// Amazon Kinesis immediately returns a response and sets the stream status +// to UPDATING. After the operation is completed, Amazon Kinesis sets the stream +// status to ACTIVE. Read and write operations continue to work while the stream +// is in the UPDATING state. +// +// You can use DescribeStream to check the status of the stream, which is returned +// in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. +// If a stream is in CREATING or UPDATING or DELETING states, DescribeStream +// returns a ResourceInUseException. +// +// If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. +// If you try to create more shards than are authorized for your account, you +// receive a LimitExceededException. +// +// For the default shard limit for an AWS account, see Amazon Kinesis Limits +// (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html). +// If you need to increase this limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// +// If you try to operate on too many streams in parallel using CreateStream, +// DeleteStream, MergeShards or SplitShard, you receive a LimitExceededException. +// +// SplitShard has limit of 5 transactions per second per account. +func (c *Kinesis) SplitShard(input *SplitShardInput) (*SplitShardOutput, error) { + req, out := c.SplitShardRequest(input) + err := req.Send() + return out, err +} + +// Represents the input for AddTagsToStream. +type AddTagsToStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` + + // The set of key-value pairs to use to create the tags. + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s AddTagsToStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToStreamInput) GoString() string { + return s.String() +} + +type AddTagsToStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for CreateStream. +type CreateStreamInput struct { + _ struct{} `type:"structure"` + + // The number of shards that the stream will use. The throughput of the stream + // is a function of the number of shards; more shards are required for greater + // provisioned throughput. + // + // DefaultShardLimit; + ShardCount *int64 `min:"1" type:"integer" required:"true"` + + // A name to identify the stream. The stream name is scoped to the AWS account + // used by the application that creates the stream. It is also scoped by region. + // That is, two streams in two different AWS accounts can have the same name, + // and two streams in the same AWS account, but in two different regions, can + // have the same name. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamInput) GoString() string { + return s.String() +} + +type CreateStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for DecreaseStreamRetentionPeriod. +type DecreaseStreamRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The new retention period of the stream, in hours. Must be less than the current + // retention period. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The name of the stream to modify. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecreaseStreamRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseStreamRetentionPeriodInput) GoString() string { + return s.String() +} + +type DecreaseStreamRetentionPeriodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DecreaseStreamRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseStreamRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Represents the input for DeleteStream. +type DeleteStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream to delete. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamInput) GoString() string { + return s.String() +} + +type DeleteStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for DescribeStream. +type DescribeStreamInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the shard to start with. + ExclusiveStartShardId *string `min:"1" type:"string"` + + // The maximum number of shards to return. + Limit *int64 `min:"1" type:"integer"` + + // The name of the stream to describe. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamInput) GoString() string { + return s.String() +} + +// Represents the output for DescribeStream. +type DescribeStreamOutput struct { + _ struct{} `type:"structure"` + + // The current status of the stream, the stream ARN, an array of shard objects + // that comprise the stream, and states whether there are more shards available. + StreamDescription *StreamDescription `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for GetRecords. +type GetRecordsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of records to return. Specify a value of up to 10,000. + // If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException. + Limit *int64 `min:"1" type:"integer"` + + // The position in the shard from which you want to start sequentially reading + // data records. A shard iterator specifies this position using the sequence + // number of a data record in the shard. + ShardIterator *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsInput) GoString() string { + return s.String() +} + +// Represents the output for GetRecords. +type GetRecordsOutput struct { + _ struct{} `type:"structure"` + + // The number of milliseconds the GetRecords response is from the tip of the + // stream, indicating how far behind current time the consumer is. A value of + // zero indicates record processing is caught up, and there are no new records + // to process at this moment. + MillisBehindLatest *int64 `type:"long"` + + // The next position in the shard from which to start sequentially reading data + // records. If set to null, the shard has been closed and the requested iterator + // will not return any more data. + NextShardIterator *string `min:"1" type:"string"` + + // The data records retrieved from the shard. + Records []*Record `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsOutput) GoString() string { + return s.String() +} + +// Represents the input for GetShardIterator. +type GetShardIteratorInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the shard to get the iterator for. + ShardId *string `min:"1" type:"string" required:"true"` + + // Determines how the shard iterator is used to start reading data records from + // the shard. + // + // The following are the valid shard iterator types: + // + // AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + // a specific sequence number. AFTER_SEQUENCE_NUMBER - Start reading right after + // the position denoted by a specific sequence number. TRIM_HORIZON - Start + // reading at the last untrimmed record in the shard in the system, which is + // the oldest data record in the shard. LATEST - Start reading just after the + // most recent record in the shard, so that you always read the most recent + // data in the shard. + ShardIteratorType *string `type:"string" required:"true" enum:"ShardIteratorType"` + + // The sequence number of the data record in the shard from which to start reading + // from. + StartingSequenceNumber *string `type:"string"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetShardIteratorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorInput) GoString() string { + return s.String() +} + +// Represents the output for GetShardIterator. +type GetShardIteratorOutput struct { + _ struct{} `type:"structure"` + + // The position in the shard from which to start reading data records sequentially. + // A shard iterator specifies this position using the sequence number of a data + // record in a shard. + ShardIterator *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetShardIteratorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorOutput) GoString() string { + return s.String() +} + +// The range of possible hash key values for the shard, which is a set of ordered +// contiguous positive integers. +type HashKeyRange struct { + _ struct{} `type:"structure"` + + // The ending hash key of the hash key range. + EndingHashKey *string `type:"string" required:"true"` + + // The starting hash key of the hash key range. + StartingHashKey *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HashKeyRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HashKeyRange) GoString() string { + return s.String() +} + +// Represents the input for IncreaseStreamRetentionPeriod. +type IncreaseStreamRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The new retention period of the stream, in hours. Must be more than the current + // retention period. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The name of the stream to modify. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IncreaseStreamRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseStreamRetentionPeriodInput) GoString() string { + return s.String() +} + +type IncreaseStreamRetentionPeriodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s IncreaseStreamRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseStreamRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Represents the input for ListStreams. +type ListStreamsInput struct { + _ struct{} `type:"structure"` + + // The name of the stream to start the list with. + ExclusiveStartStreamName *string `min:"1" type:"string"` + + // The maximum number of streams to list. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsInput) GoString() string { + return s.String() +} + +// Represents the output for ListStreams. +type ListStreamsOutput struct { + _ struct{} `type:"structure"` + + // If set to true, there are more streams available to list. + HasMoreStreams *bool `type:"boolean" required:"true"` + + // The names of the streams that are associated with the AWS account making + // the ListStreams request. + StreamNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsOutput) GoString() string { + return s.String() +} + +// Represents the input for ListTagsForStream. +type ListTagsForStreamInput struct { + _ struct{} `type:"structure"` + + // The key to use as the starting point for the list of tags. If this parameter + // is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey. + ExclusiveStartTagKey *string `min:"1" type:"string"` + + // The number of tags to return. If this number is less than the total number + // of tags associated with the stream, HasMoreTags is set to true. To list additional + // tags, set ExclusiveStartTagKey to the last key in the response. + Limit *int64 `min:"1" type:"integer"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForStreamInput) GoString() string { + return s.String() +} + +// Represents the output for ListTagsForStream. +type ListTagsForStreamOutput struct { + _ struct{} `type:"structure"` + + // If set to true, more tags are available. To request additional tags, set + // ExclusiveStartTagKey to the key of the last tag returned. + HasMoreTags *bool `type:"boolean" required:"true"` + + // A list of tags associated with StreamName, starting with the first tag after + // ExclusiveStartTagKey and up to the specified Limit. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for MergeShards. +type MergeShardsInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the adjacent shard for the merge. + AdjacentShardToMerge *string `min:"1" type:"string" required:"true"` + + // The shard ID of the shard to combine with the adjacent shard for the merge. + ShardToMerge *string `min:"1" type:"string" required:"true"` + + // The name of the stream for the merge. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MergeShardsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeShardsInput) GoString() string { + return s.String() +} + +type MergeShardsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s MergeShardsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeShardsOutput) GoString() string { + return s.String() +} + +// Represents the input for PutRecord. +type PutRecordInput struct { + _ struct{} `type:"structure"` + + // The data blob to put into the record, which is base64-encoded when the blob + // is serialized. When the data blob (the payload before base64-encoding) is + // added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + Data []byte `type:"blob" required:"true"` + + // The hash value used to explicitly determine the shard the data record is + // assigned to by overriding the partition key hash. + ExplicitHashKey *string `type:"string"` + + // Determines which shard in the stream the data record is assigned to. Partition + // keys are Unicode strings with a maximum length limit of 256 characters for + // each key. Amazon Kinesis uses the partition key as input to a hash function + // that maps the partition key and associated data to a specific shard. Specifically, + // an MD5 hash function is used to map partition keys to 128-bit integer values + // and to map associated data records to shards. As a result of this hashing + // mechanism, all data records with the same partition key will map to the same + // shard within the stream. + PartitionKey *string `min:"1" type:"string" required:"true"` + + // Guarantees strictly increasing sequence numbers, for puts from the same client + // and to the same partition key. Usage: set the SequenceNumberForOrdering of + // record n to the sequence number of record n-1 (as returned in the result + // when putting record n-1). If this parameter is not set, records will be coarsely + // ordered based on arrival time. + SequenceNumberForOrdering *string `type:"string"` + + // The name of the stream to put the data record into. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordInput) GoString() string { + return s.String() +} + +// Represents the output for PutRecord. +type PutRecordOutput struct { + _ struct{} `type:"structure"` + + // The sequence number identifier that was assigned to the put data record. + // The sequence number for the record is unique across all records in the stream. + // A sequence number is the identifier associated with every record put into + // the stream. + SequenceNumber *string `type:"string" required:"true"` + + // The shard ID of the shard where the data record was placed. + ShardId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordOutput) GoString() string { + return s.String() +} + +// A PutRecords request. +type PutRecordsInput struct { + _ struct{} `type:"structure"` + + // The records associated with the request. + Records []*PutRecordsRequestEntry `min:"1" type:"list" required:"true"` + + // The stream name associated with the request. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsInput) GoString() string { + return s.String() +} + +// PutRecords results. +type PutRecordsOutput struct { + _ struct{} `type:"structure"` + + // The number of unsuccessfully processed records in a PutRecords request. + FailedRecordCount *int64 `min:"1" type:"integer"` + + // An array of successfully and unsuccessfully processed record results, correlated + // with the request by natural ordering. A record that is successfully added + // to your Amazon Kinesis stream includes SequenceNumber and ShardId in the + // result. A record that fails to be added to your Amazon Kinesis stream includes + // ErrorCode and ErrorMessage in the result. + Records []*PutRecordsResultEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsOutput) GoString() string { + return s.String() +} + +// Represents the output for PutRecords. +type PutRecordsRequestEntry struct { + _ struct{} `type:"structure"` + + // The data blob to put into the record, which is base64-encoded when the blob + // is serialized. When the data blob (the payload before base64-encoding) is + // added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + Data []byte `type:"blob" required:"true"` + + // The hash value used to determine explicitly the shard that the data record + // is assigned to by overriding the partition key hash. + ExplicitHashKey *string `type:"string"` + + // Determines which shard in the stream the data record is assigned to. Partition + // keys are Unicode strings with a maximum length limit of 256 characters for + // each key. Amazon Kinesis uses the partition key as input to a hash function + // that maps the partition key and associated data to a specific shard. Specifically, + // an MD5 hash function is used to map partition keys to 128-bit integer values + // and to map associated data records to shards. As a result of this hashing + // mechanism, all data records with the same partition key map to the same shard + // within the stream. + PartitionKey *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordsRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsRequestEntry) GoString() string { + return s.String() +} + +// Represents the result of an individual record from a PutRecords request. +// A record that is successfully added to your Amazon Kinesis stream includes +// SequenceNumber and ShardId in the result. A record that fails to be added +// to your Amazon Kinesis stream includes ErrorCode and ErrorMessage in the +// result. +type PutRecordsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code for an individual record result. ErrorCodes can be either + // ProvisionedThroughputExceededException or InternalFailure. + ErrorCode *string `type:"string"` + + // The error message for an individual record result. An ErrorCode value of + // ProvisionedThroughputExceededException has an error message that includes + // the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure + // has the error message "Internal Service Failure". + ErrorMessage *string `type:"string"` + + // The sequence number for an individual record result. + SequenceNumber *string `type:"string"` + + // The shard ID for an individual record result. + ShardId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRecordsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsResultEntry) GoString() string { + return s.String() +} + +// The unit of data of the Amazon Kinesis stream, which is composed of a sequence +// number, a partition key, and a data blob. +type Record struct { + _ struct{} `type:"structure"` + + // The approximate time that the record was inserted into the stream. + ApproximateArrivalTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The data blob. The data in the blob is both opaque and immutable to the Amazon + // Kinesis service, which does not inspect, interpret, or change the data in + // the blob in any way. When the data blob (the payload before base64-encoding) + // is added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + Data []byte `type:"blob" required:"true"` + + // Identifies which shard in the stream the data record is assigned to. + PartitionKey *string `min:"1" type:"string" required:"true"` + + // The unique identifier of the record in the stream. + SequenceNumber *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// Represents the input for RemoveTagsFromStream. +type RemoveTagsFromStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` + + // A list of tag keys. Each corresponding tag is removed from the stream. + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromStreamInput) GoString() string { + return s.String() +} + +type RemoveTagsFromStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromStreamOutput) GoString() string { + return s.String() +} + +// The range of possible sequence numbers for the shard. +type SequenceNumberRange struct { + _ struct{} `type:"structure"` + + // The ending sequence number for the range. Shards that are in the OPEN state + // have an ending sequence number of null. + EndingSequenceNumber *string `type:"string"` + + // The starting sequence number for the range. + StartingSequenceNumber *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SequenceNumberRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SequenceNumberRange) GoString() string { + return s.String() +} + +// A uniquely identified group of data records in an Amazon Kinesis stream. +type Shard struct { + _ struct{} `type:"structure"` + + // The shard Id of the shard adjacent to the shard's parent. + AdjacentParentShardId *string `min:"1" type:"string"` + + // The range of possible hash key values for the shard, which is a set of ordered + // contiguous positive integers. + HashKeyRange *HashKeyRange `type:"structure" required:"true"` + + // The shard Id of the shard's parent. + ParentShardId *string `min:"1" type:"string"` + + // The range of possible sequence numbers for the shard. + SequenceNumberRange *SequenceNumberRange `type:"structure" required:"true"` + + // The unique identifier of the shard within the Amazon Kinesis stream. + ShardId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Shard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Shard) GoString() string { + return s.String() +} + +// Represents the input for SplitShard. +type SplitShardInput struct { + _ struct{} `type:"structure"` + + // A hash key value for the starting hash key of one of the child shards created + // by the split. The hash key range for a given shard constitutes a set of ordered + // contiguous positive integers. The value for NewStartingHashKey must be in + // the range of hash keys being mapped into the shard. The NewStartingHashKey + // hash key value and all higher hash key values in hash key range are distributed + // to one of the child shards. All the lower hash key values in the range are + // distributed to the other child shard. + NewStartingHashKey *string `type:"string" required:"true"` + + // The shard ID of the shard to split. + ShardToSplit *string `min:"1" type:"string" required:"true"` + + // The name of the stream for the shard split. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SplitShardInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SplitShardInput) GoString() string { + return s.String() +} + +type SplitShardOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SplitShardOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SplitShardOutput) GoString() string { + return s.String() +} + +// Represents the output for DescribeStream. +type StreamDescription struct { + _ struct{} `type:"structure"` + + // If set to true, more shards in the stream are available to describe. + HasMoreShards *bool `type:"boolean" required:"true"` + + // The current retention period, in hours. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The shards that comprise the stream. + Shards []*Shard `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) for the stream being described. + StreamARN *string `type:"string" required:"true"` + + // The name of the stream being described. + StreamName *string `min:"1" type:"string" required:"true"` + + // The current status of the stream being described. + // + // The stream status is one of the following states: + // + // CREATING - The stream is being created. Amazon Kinesis immediately returns + // and sets StreamStatus to CREATING. DELETING - The stream is being deleted. + // The specified stream is in the DELETING state until Amazon Kinesis completes + // the deletion. ACTIVE - The stream exists and is ready for read and write + // operations or deletion. You should perform read and write operations only + // on an ACTIVE stream. UPDATING - Shards in the stream are being merged or + // split. Read and write operations continue to work while the stream is in + // the UPDATING state. + StreamStatus *string `type:"string" required:"true" enum:"StreamStatus"` +} + +// String returns the string representation +func (s StreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamDescription) GoString() string { + return s.String() +} + +// Metadata assigned to the stream, consisting of a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A unique identifier for the tag. Maximum length: 128 characters. Valid characters: + // Unicode letters, digits, white space, _ . / = + - % @ + Key *string `min:"1" type:"string" required:"true"` + + // An optional string, typically used to describe or define the tag. Maximum + // length: 256 characters. Valid characters: Unicode letters, digits, white + // space, _ . / = + - % @ + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +const ( + // @enum ShardIteratorType + ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeTrimHorizon = "TRIM_HORIZON" + // @enum ShardIteratorType + ShardIteratorTypeLatest = "LATEST" +) + +const ( + // @enum StreamStatus + StreamStatusCreating = "CREATING" + // @enum StreamStatus + StreamStatusDeleting = "DELETING" + // @enum StreamStatus + StreamStatusActive = "ACTIVE" + // @enum StreamStatus + StreamStatusUpdating = "UPDATING" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go new file mode 100644 index 000000000..ea90cea6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go @@ -0,0 +1,337 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleKinesis_AddTagsToStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.AddTagsToStreamInput{ + StreamName: aws.String("StreamName"), // Required + Tags: map[string]*string{ // Required + "Key": aws.String("TagValue"), // Required + // More values... + }, + } + resp, err := svc.AddTagsToStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_CreateStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.CreateStreamInput{ + ShardCount: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.CreateStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DecreaseStreamRetentionPeriod() { + svc := kinesis.New(session.New()) + + params := &kinesis.DecreaseStreamRetentionPeriodInput{ + RetentionPeriodHours: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.DecreaseStreamRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DeleteStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.DeleteStreamInput{ + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.DeleteStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DescribeStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.DescribeStreamInput{ + StreamName: aws.String("StreamName"), // Required + ExclusiveStartShardId: aws.String("ShardId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_GetRecords() { + svc := kinesis.New(session.New()) + + params := &kinesis.GetRecordsInput{ + ShardIterator: aws.String("ShardIterator"), // Required + Limit: aws.Int64(1), + } + resp, err := svc.GetRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_GetShardIterator() { + svc := kinesis.New(session.New()) + + params := &kinesis.GetShardIteratorInput{ + ShardId: aws.String("ShardId"), // Required + ShardIteratorType: aws.String("ShardIteratorType"), // Required + StreamName: aws.String("StreamName"), // Required + StartingSequenceNumber: aws.String("SequenceNumber"), + } + resp, err := svc.GetShardIterator(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_IncreaseStreamRetentionPeriod() { + svc := kinesis.New(session.New()) + + params := &kinesis.IncreaseStreamRetentionPeriodInput{ + RetentionPeriodHours: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.IncreaseStreamRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_ListStreams() { + svc := kinesis.New(session.New()) + + params := &kinesis.ListStreamsInput{ + ExclusiveStartStreamName: aws.String("StreamName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_ListTagsForStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.ListTagsForStreamInput{ + StreamName: aws.String("StreamName"), // Required + ExclusiveStartTagKey: aws.String("TagKey"), + Limit: aws.Int64(1), + } + resp, err := svc.ListTagsForStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_MergeShards() { + svc := kinesis.New(session.New()) + + params := &kinesis.MergeShardsInput{ + AdjacentShardToMerge: aws.String("ShardId"), // Required + ShardToMerge: aws.String("ShardId"), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.MergeShards(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_PutRecord() { + svc := kinesis.New(session.New()) + + params := &kinesis.PutRecordInput{ + Data: []byte("PAYLOAD"), // Required + PartitionKey: aws.String("PartitionKey"), // Required + StreamName: aws.String("StreamName"), // Required + ExplicitHashKey: aws.String("HashKey"), + SequenceNumberForOrdering: aws.String("SequenceNumber"), + } + resp, err := svc.PutRecord(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_PutRecords() { + svc := kinesis.New(session.New()) + + params := &kinesis.PutRecordsInput{ + Records: []*kinesis.PutRecordsRequestEntry{ // Required + { // Required + Data: []byte("PAYLOAD"), // Required + PartitionKey: aws.String("PartitionKey"), // Required + ExplicitHashKey: aws.String("HashKey"), + }, + // More values... + }, + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.PutRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_RemoveTagsFromStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.RemoveTagsFromStreamInput{ + StreamName: aws.String("StreamName"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_SplitShard() { + svc := kinesis.New(session.New()) + + params := &kinesis.SplitShardInput{ + NewStartingHashKey: aws.String("HashKey"), // Required + ShardToSplit: aws.String("ShardId"), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.SplitShard(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go new file mode 100644 index 000000000..014eeee52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kinesisiface provides an interface for the Amazon Kinesis. +package kinesisiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/kinesis" +) + +// KinesisAPI is the interface type for kinesis.Kinesis. +type KinesisAPI interface { + AddTagsToStreamRequest(*kinesis.AddTagsToStreamInput) (*request.Request, *kinesis.AddTagsToStreamOutput) + + AddTagsToStream(*kinesis.AddTagsToStreamInput) (*kinesis.AddTagsToStreamOutput, error) + + CreateStreamRequest(*kinesis.CreateStreamInput) (*request.Request, *kinesis.CreateStreamOutput) + + CreateStream(*kinesis.CreateStreamInput) (*kinesis.CreateStreamOutput, error) + + DecreaseStreamRetentionPeriodRequest(*kinesis.DecreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.DecreaseStreamRetentionPeriodOutput) + + DecreaseStreamRetentionPeriod(*kinesis.DecreaseStreamRetentionPeriodInput) (*kinesis.DecreaseStreamRetentionPeriodOutput, error) + + DeleteStreamRequest(*kinesis.DeleteStreamInput) (*request.Request, *kinesis.DeleteStreamOutput) + + DeleteStream(*kinesis.DeleteStreamInput) (*kinesis.DeleteStreamOutput, error) + + DescribeStreamRequest(*kinesis.DescribeStreamInput) (*request.Request, *kinesis.DescribeStreamOutput) + + DescribeStream(*kinesis.DescribeStreamInput) (*kinesis.DescribeStreamOutput, error) + + DescribeStreamPages(*kinesis.DescribeStreamInput, func(*kinesis.DescribeStreamOutput, bool) bool) error + + GetRecordsRequest(*kinesis.GetRecordsInput) (*request.Request, *kinesis.GetRecordsOutput) + + GetRecords(*kinesis.GetRecordsInput) (*kinesis.GetRecordsOutput, error) + + GetShardIteratorRequest(*kinesis.GetShardIteratorInput) (*request.Request, *kinesis.GetShardIteratorOutput) + + GetShardIterator(*kinesis.GetShardIteratorInput) (*kinesis.GetShardIteratorOutput, error) + + IncreaseStreamRetentionPeriodRequest(*kinesis.IncreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.IncreaseStreamRetentionPeriodOutput) + + IncreaseStreamRetentionPeriod(*kinesis.IncreaseStreamRetentionPeriodInput) (*kinesis.IncreaseStreamRetentionPeriodOutput, error) + + ListStreamsRequest(*kinesis.ListStreamsInput) (*request.Request, *kinesis.ListStreamsOutput) + + ListStreams(*kinesis.ListStreamsInput) (*kinesis.ListStreamsOutput, error) + + ListStreamsPages(*kinesis.ListStreamsInput, func(*kinesis.ListStreamsOutput, bool) bool) error + + ListTagsForStreamRequest(*kinesis.ListTagsForStreamInput) (*request.Request, *kinesis.ListTagsForStreamOutput) + + ListTagsForStream(*kinesis.ListTagsForStreamInput) (*kinesis.ListTagsForStreamOutput, error) + + MergeShardsRequest(*kinesis.MergeShardsInput) (*request.Request, *kinesis.MergeShardsOutput) + + MergeShards(*kinesis.MergeShardsInput) (*kinesis.MergeShardsOutput, error) + + PutRecordRequest(*kinesis.PutRecordInput) (*request.Request, *kinesis.PutRecordOutput) + + PutRecord(*kinesis.PutRecordInput) (*kinesis.PutRecordOutput, error) + + PutRecordsRequest(*kinesis.PutRecordsInput) (*request.Request, *kinesis.PutRecordsOutput) + + PutRecords(*kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) + + RemoveTagsFromStreamRequest(*kinesis.RemoveTagsFromStreamInput) (*request.Request, *kinesis.RemoveTagsFromStreamOutput) + + RemoveTagsFromStream(*kinesis.RemoveTagsFromStreamInput) (*kinesis.RemoveTagsFromStreamOutput, error) + + SplitShardRequest(*kinesis.SplitShardInput) (*request.Request, *kinesis.SplitShardOutput) + + SplitShard(*kinesis.SplitShardInput) (*kinesis.SplitShardOutput, error) +} + +var _ KinesisAPI = (*kinesis.Kinesis)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go new file mode 100644 index 000000000..da32f7556 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Kinesis is a managed service that scales elastically for real time +// processing of streaming big data. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Kinesis struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "kinesis" + +// New creates a new instance of the Kinesis client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Kinesis client from just a session. +// svc := kinesis.New(mySession) +// +// // Create a Kinesis client with additional configuration +// svc := kinesis.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kinesis { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Kinesis { + svc := &Kinesis{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-12-02", + JSONVersion: "1.1", + TargetPrefix: "Kinesis_20131202", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Kinesis operation and runs any +// custom request initialization. +func (c *Kinesis) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go new file mode 100644 index 000000000..383a2e0b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Kinesis) WaitUntilStreamExists(input *DescribeStreamInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStream", + Delay: 10, + MaxAttempts: 18, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "StreamDescription.StreamStatus", + Expected: "ACTIVE", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go new file mode 100644 index 000000000..fb3b9be05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go @@ -0,0 +1,2150 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package lambda provides a client for AWS Lambda. +package lambda + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a request for the AddPermission operation. +func (c *Lambda) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a permission to the resource policy associated with the specified AWS +// Lambda function. You use resource policies to grant permissions to event +// sources that use "push" model. In "push" model, event sources (such as Amazon +// S3 and custom applications) invoke your Lambda function. Each permission +// you add to the resource policy allows an event source, permission to invoke +// the Lambda function. +// +// For information about the push model, see AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html). +// +// If you are using versioning feature (see AWS Lambda Function Versioning +// and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html)), +// a Lambda function can have multiple ARNs that can be used to invoke the function. +// Note that, each permission you add to resource policy using this API is specific +// to an ARN, specified using the Qualifier parameter +// +// This operation requires permission for the lambda:AddPermission action. +func (c *Lambda) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a request for the CreateAlias operation. +func (c *Lambda) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Creates an alias to the specified Lambda function version. For more information, +// see Introduction to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:CreateAlias action. +func (c *Lambda) CreateAlias(input *CreateAliasInput) (*AliasConfiguration, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSourceMapping = "CreateEventSourceMapping" + +// CreateEventSourceMappingRequest generates a request for the CreateEventSourceMapping operation. +func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opCreateEventSourceMapping, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/event-source-mappings/", + } + + if input == nil { + input = &CreateEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Identifies a stream as an event source for a Lambda function. It can be either +// an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda invokes +// the specified function when records are posted to the stream. +// +// This is the pull model, where AWS Lambda invokes the function. For more +// information, go to AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) +// in the AWS Lambda Developer Guide. +// +// This association between an Amazon Kinesis stream and a Lambda function +// is called the event source mapping. You provide the configuration information +// (for example, which stream to read from and which Lambda function to invoke) +// for the event source mapping in the request body. +// +// Each event source, such as an Amazon Kinesis or a DynamoDB stream, can +// be associated with multiple AWS Lambda function. A given Lambda function +// can be associated with multiple AWS event sources. +// +// This operation requires permission for the lambda:CreateEventSourceMapping +// action. +func (c *Lambda) CreateEventSourceMapping(input *CreateEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.CreateEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opCreateFunction = "CreateFunction" + +// CreateFunctionRequest generates a request for the CreateFunction operation. +func (c *Lambda) CreateFunctionRequest(input *CreateFunctionInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opCreateFunction, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions", + } + + if input == nil { + input = &CreateFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Creates a new Lambda function. The function metadata is created from the +// request parameters, and the code for the function is provided by a .zip file +// in the request body. If the function name already exists, the operation will +// fail. Note that the function name is case-sensitive. +// +// This operation requires permission for the lambda:CreateFunction action. +func (c *Lambda) CreateFunction(input *CreateFunctionInput) (*FunctionConfiguration, error) { + req, out := c.CreateFunctionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAlias = "DeleteAlias" + +// DeleteAliasRequest generates a request for the DeleteAlias operation. +func (c *Lambda) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { + op := &request.Operation{ + Name: opDeleteAlias, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &DeleteAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAliasOutput{} + req.Data = output + return +} + +// Deletes specified Lambda function alias. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:DeleteAlias action. +func (c *Lambda) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSourceMapping = "DeleteEventSourceMapping" + +// DeleteEventSourceMappingRequest generates a request for the DeleteEventSourceMapping operation. +func (c *Lambda) DeleteEventSourceMappingRequest(input *DeleteEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opDeleteEventSourceMapping, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &DeleteEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Removes an event source mapping. This means AWS Lambda will no longer invoke +// the function for events in the associated source. +// +// This operation requires permission for the lambda:DeleteEventSourceMapping +// action. +func (c *Lambda) DeleteEventSourceMapping(input *DeleteEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.DeleteEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFunction = "DeleteFunction" + +// DeleteFunctionRequest generates a request for the DeleteFunction operation. +func (c *Lambda) DeleteFunctionRequest(input *DeleteFunctionInput) (req *request.Request, output *DeleteFunctionOutput) { + op := &request.Operation{ + Name: opDeleteFunction, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}", + } + + if input == nil { + input = &DeleteFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteFunctionOutput{} + req.Data = output + return +} + +// Deletes the specified Lambda function code and configuration. +// +// If you don't specify a function version, AWS Lambda will delete the function, +// including all its versions, and any aliases pointing to the function versions. +// +// When you delete a function the associated resource policy is also deleted. +// You will need to delete the event source mappings explicitly. +// +// For information about function versioning, see AWS Lambda Function Versioning +// and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// +// This operation requires permission for the lambda:DeleteFunction action. +func (c *Lambda) DeleteFunction(input *DeleteFunctionInput) (*DeleteFunctionOutput, error) { + req, out := c.DeleteFunctionRequest(input) + err := req.Send() + return out, err +} + +const opGetAlias = "GetAlias" + +// GetAliasRequest generates a request for the GetAlias operation. +func (c *Lambda) GetAliasRequest(input *GetAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opGetAlias, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &GetAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Returns the specified alias information such as the alias ARN, description, +// and function version it is pointing to. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:GetAlias action. +func (c *Lambda) GetAlias(input *GetAliasInput) (*AliasConfiguration, error) { + req, out := c.GetAliasRequest(input) + err := req.Send() + return out, err +} + +const opGetEventSourceMapping = "GetEventSourceMapping" + +// GetEventSourceMappingRequest generates a request for the GetEventSourceMapping operation. +func (c *Lambda) GetEventSourceMappingRequest(input *GetEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opGetEventSourceMapping, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &GetEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Returns configuration information for the specified event source mapping +// (see CreateEventSourceMapping). +// +// This operation requires permission for the lambda:GetEventSourceMapping +// action. +func (c *Lambda) GetEventSourceMapping(input *GetEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.GetEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opGetFunction = "GetFunction" + +// GetFunctionRequest generates a request for the GetFunction operation. +func (c *Lambda) GetFunctionRequest(input *GetFunctionInput) (req *request.Request, output *GetFunctionOutput) { + op := &request.Operation{ + Name: opGetFunction, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}", + } + + if input == nil { + input = &GetFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFunctionOutput{} + req.Data = output + return +} + +// Returns the configuration information of the Lambda function and a presigned +// URL link to the .zip file you uploaded with CreateFunction so you can download +// the .zip file. Note that the URL is valid for up to 10 minutes. The configuration +// information is the same information you provided as parameters when uploading +// the function. +// +// Using the optional Qualifier parameter, you can specify a specific function +// version for which you want this information. If you don't specify this parameter, +// the API uses unqualified function ARN which return information about the +// $LATEST version of the Lambda function. For more information, see AWS Lambda +// Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// +// This operation requires permission for the lambda:GetFunction action. +func (c *Lambda) GetFunction(input *GetFunctionInput) (*GetFunctionOutput, error) { + req, out := c.GetFunctionRequest(input) + err := req.Send() + return out, err +} + +const opGetFunctionConfiguration = "GetFunctionConfiguration" + +// GetFunctionConfigurationRequest generates a request for the GetFunctionConfiguration operation. +func (c *Lambda) GetFunctionConfigurationRequest(input *GetFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opGetFunctionConfiguration, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", + } + + if input == nil { + input = &GetFunctionConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Returns the configuration information of the Lambda function. This the same +// information you provided as parameters when uploading the function by using +// CreateFunction. +// +// You can use the optional Qualifier parameter to retrieve configuration information +// for a specific Lambda function version. If you don't provide it, the API +// returns information about the $LATEST version of the function. For more information +// about versioning, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// +// This operation requires permission for the lambda:GetFunctionConfiguration +// operation. +func (c *Lambda) GetFunctionConfiguration(input *GetFunctionConfigurationInput) (*FunctionConfiguration, error) { + req, out := c.GetFunctionConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a request for the GetPolicy operation. +func (c *Lambda) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Returns the resource policy, containing a list of permissions that apply +// to a specific to an ARN that you specify via the Qualifier paramter. +// +// For informration about adding permissions, see AddPermission. +// +// You need permission for the lambda:GetPolicy action. +func (c *Lambda) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opInvoke = "Invoke" + +// InvokeRequest generates a request for the Invoke operation. +func (c *Lambda) InvokeRequest(input *InvokeInput) (req *request.Request, output *InvokeOutput) { + op := &request.Operation{ + Name: opInvoke, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/invocations", + } + + if input == nil { + input = &InvokeInput{} + } + + req = c.newRequest(op, input, output) + output = &InvokeOutput{} + req.Data = output + return +} + +// Invokes a specific Lambda function version. +// +// If you don't provide the Qualifier parameter, it uses the unqualified function +// ARN which results in invocation of the $LATEST version of the Lambda function +// (when you create a Lambda function, the $LATEST is the version). The AWS +// Lambda versioning and aliases feature allows you to publish multiple versions +// of a Lambda function and also create aliases for each function version. So +// each your Lambda function version can be invoked using multiple ARNs. For +// more information, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// Using the Qualifier parameter, you can specify a function version or alias +// name to invoke specific function version. If you specify function version, +// the API uses the qualified function ARN to invoke a specific function version. +// If you specify alias name, the API uses the alias ARN to invoke the function +// version to which the alias points. +// +// This operation requires permission for the lambda:InvokeFunction action. +func (c *Lambda) Invoke(input *InvokeInput) (*InvokeOutput, error) { + req, out := c.InvokeRequest(input) + err := req.Send() + return out, err +} + +const opInvokeAsync = "InvokeAsync" + +// InvokeAsyncRequest generates a request for the InvokeAsync operation. +func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) (req *request.Request, output *InvokeAsyncOutput) { + op := &request.Operation{ + Name: opInvokeAsync, + HTTPMethod: "POST", + HTTPPath: "/2014-11-13/functions/{FunctionName}/invoke-async/", + } + + if input == nil { + input = &InvokeAsyncInput{} + } + + req = c.newRequest(op, input, output) + output = &InvokeAsyncOutput{} + req.Data = output + return +} + +// This API is deprecated. We recommend you use Invoke API (see Invoke). Submits +// an invocation request to AWS Lambda. Upon receiving the request, Lambda executes +// the specified function asynchronously. To see the logs generated by the Lambda +// function execution, see the CloudWatch logs console. +// +// This operation requires permission for the lambda:InvokeFunction action. +func (c *Lambda) InvokeAsync(input *InvokeAsyncInput) (*InvokeAsyncOutput, error) { + req, out := c.InvokeAsyncRequest(input) + err := req.Send() + return out, err +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a request for the ListAliases operation. +func (c *Lambda) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { + op := &request.Operation{ + Name: opListAliases, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", + } + + if input == nil { + input = &ListAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAliasesOutput{} + req.Data = output + return +} + +// Returns list of aliases created for a Lambda function. For each alias, the +// response includes information such as the alias ARN, description, alias name, +// and the function version to which it points. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:ListAliases action. +func (c *Lambda) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + err := req.Send() + return out, err +} + +const opListEventSourceMappings = "ListEventSourceMappings" + +// ListEventSourceMappingsRequest generates a request for the ListEventSourceMappings operation. +func (c *Lambda) ListEventSourceMappingsRequest(input *ListEventSourceMappingsInput) (req *request.Request, output *ListEventSourceMappingsOutput) { + op := &request.Operation{ + Name: opListEventSourceMappings, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/event-source-mappings/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEventSourceMappingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEventSourceMappingsOutput{} + req.Data = output + return +} + +// Returns a list of event source mappings you created using the CreateEventSourceMapping +// (see CreateEventSourceMapping), where you identify a stream as an event source. +// This list does not include Amazon S3 event sources. +// +// For each mapping, the API returns configuration information. You can optionally +// specify filters to retrieve specific event source mappings. +// +// This operation requires permission for the lambda:ListEventSourceMappings +// action. +func (c *Lambda) ListEventSourceMappings(input *ListEventSourceMappingsInput) (*ListEventSourceMappingsOutput, error) { + req, out := c.ListEventSourceMappingsRequest(input) + err := req.Send() + return out, err +} + +func (c *Lambda) ListEventSourceMappingsPages(input *ListEventSourceMappingsInput, fn func(p *ListEventSourceMappingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEventSourceMappingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEventSourceMappingsOutput), lastPage) + }) +} + +const opListFunctions = "ListFunctions" + +// ListFunctionsRequest generates a request for the ListFunctions operation. +func (c *Lambda) ListFunctionsRequest(input *ListFunctionsInput) (req *request.Request, output *ListFunctionsOutput) { + op := &request.Operation{ + Name: opListFunctions, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListFunctionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListFunctionsOutput{} + req.Data = output + return +} + +// Returns a list of your Lambda functions. For each function, the response +// includes the function configuration information. You must use GetFunction +// to retrieve the code for your function. +// +// This operation requires permission for the lambda:ListFunctions action. +func (c *Lambda) ListFunctions(input *ListFunctionsInput) (*ListFunctionsOutput, error) { + req, out := c.ListFunctionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Lambda) ListFunctionsPages(input *ListFunctionsInput, fn func(p *ListFunctionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListFunctionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListFunctionsOutput), lastPage) + }) +} + +const opListVersionsByFunction = "ListVersionsByFunction" + +// ListVersionsByFunctionRequest generates a request for the ListVersionsByFunction operation. +func (c *Lambda) ListVersionsByFunctionRequest(input *ListVersionsByFunctionInput) (req *request.Request, output *ListVersionsByFunctionOutput) { + op := &request.Operation{ + Name: opListVersionsByFunction, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", + } + + if input == nil { + input = &ListVersionsByFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVersionsByFunctionOutput{} + req.Data = output + return +} + +// List all versions of a function. +func (c *Lambda) ListVersionsByFunction(input *ListVersionsByFunctionInput) (*ListVersionsByFunctionOutput, error) { + req, out := c.ListVersionsByFunctionRequest(input) + err := req.Send() + return out, err +} + +const opPublishVersion = "PublishVersion" + +// PublishVersionRequest generates a request for the PublishVersion operation. +func (c *Lambda) PublishVersionRequest(input *PublishVersionInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opPublishVersion, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", + } + + if input == nil { + input = &PublishVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Publishes a version of your function from the current snapshot of HEAD. That +// is, AWS Lambda takes a snapshot of the function code and configuration information +// from HEAD and publishes a new version. The code and handler of this specific +// Lambda function version cannot be modified after publication, but you can +// modify the configuration information. +func (c *Lambda) PublishVersion(input *PublishVersionInput) (*FunctionConfiguration, error) { + req, out := c.PublishVersionRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a request for the RemovePermission operation. +func (c *Lambda) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy/{StatementId}", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// You can remove individual permissions from an resource policy associated +// with a Lambda function by providing a statement ID that you provided when +// you addded the permission. The API removes corresponding permission that +// is associated with the specific ARN identified by the Qualifier parameter. +// +// Note that removal of a permission will cause an active event source to lose +// permission to the function. +// +// You need permission for the lambda:RemovePermission action. +func (c *Lambda) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAlias = "UpdateAlias" + +// UpdateAliasRequest generates a request for the UpdateAlias operation. +func (c *Lambda) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opUpdateAlias, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &UpdateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Using this API you can update function version to which the alias points +// to and alias description. For more information, see Introduction to AWS Lambda +// Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:UpdateAlias action. +func (c *Lambda) UpdateAlias(input *UpdateAliasInput) (*AliasConfiguration, error) { + req, out := c.UpdateAliasRequest(input) + err := req.Send() + return out, err +} + +const opUpdateEventSourceMapping = "UpdateEventSourceMapping" + +// UpdateEventSourceMappingRequest generates a request for the UpdateEventSourceMapping operation. +func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opUpdateEventSourceMapping, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &UpdateEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// You can update an event source mapping. This is useful if you want to change +// the parameters of the existing mapping without losing your position in the +// stream. You can change which function will receive the stream records, but +// to change the stream itself, you must create a new mapping. +// +// This operation requires permission for the lambda:UpdateEventSourceMapping +// action. +func (c *Lambda) UpdateEventSourceMapping(input *UpdateEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.UpdateEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFunctionCode = "UpdateFunctionCode" + +// UpdateFunctionCodeRequest generates a request for the UpdateFunctionCode operation. +func (c *Lambda) UpdateFunctionCodeRequest(input *UpdateFunctionCodeInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opUpdateFunctionCode, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/code", + } + + if input == nil { + input = &UpdateFunctionCodeInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Updates the code for the specified Lambda function. This operation must only +// be used on an existing Lambda function and cannot be used to update the function +// configuration. +// +// This operation requires permission for the lambda:UpdateFunctionCode action. +func (c *Lambda) UpdateFunctionCode(input *UpdateFunctionCodeInput) (*FunctionConfiguration, error) { + req, out := c.UpdateFunctionCodeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFunctionConfiguration = "UpdateFunctionConfiguration" + +// UpdateFunctionConfigurationRequest generates a request for the UpdateFunctionConfiguration operation. +func (c *Lambda) UpdateFunctionConfigurationRequest(input *UpdateFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opUpdateFunctionConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", + } + + if input == nil { + input = &UpdateFunctionConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Updates the configuration parameters for the specified Lambda function by +// using the values provided in the request. You provide only the parameters +// you want to change. This operation must only be used on an existing Lambda +// function and cannot be used to update the function's code. +// +// This operation requires permission for the lambda:UpdateFunctionConfiguration +// action. +func (c *Lambda) UpdateFunctionConfiguration(input *UpdateFunctionConfigurationInput) (*FunctionConfiguration, error) { + req, out := c.UpdateFunctionConfigurationRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS Lambda action you want to allow in this statement. Each Lambda action + // is a string starting with "lambda:" followed by the API name (see Operations). + // For example, "lambda:CreateFunction". You can use wildcard ("lambda:*") to + // grant permission for all AWS Lambda actions. + Action *string `type:"string" required:"true"` + + // Name of the Lambda function whose resource policy you are updating by adding + // a new permission. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // The principal who is getting this permission. It can be Amazon S3 service + // Principal ("s3.amazonaws.com") if you want Amazon S3 to invoke the function, + // an AWS account ID if you are granting cross-account permission, or any valid + // AWS service principal such as "sns.amazonaws.com". For example, you might + // want to allow a custom application in another AWS account to push events + // to AWS Lambda by invoking your function. + Principal *string `type:"string" required:"true"` + + // You can specify this optional query parameter to specify function version + // or alias name. The permission will then apply to the specific qualified ARN. + // For example, if you specify function version 2 as the qualifier, then permission + // applies only when request is made using qualified function ARN: + // + // arn:aws:lambda:aws-region:acct-id:function:function-name:2 + // + // If you specify alias name, for example "PROD", then the permission is valid + // only for requests made using the alias ARN: + // + // arn:aws:lambda:aws-region:acct-id:function:function-name:PROD + // + // If the qualifier is not specified, the permission is valid only when requests + // is made using unqualified function ARN. + // + // arn:aws:lambda:aws-region:acct-id:function:function-name + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` + + // The AWS account ID (without a hyphen) of the source owner. For example, if + // the SourceArn identifies a bucket, then this is the bucket owner's account + // ID. You can use this additional condition to ensure the bucket you specify + // is owned by a specific account (it is possible the bucket owner deleted the + // bucket and some other AWS account created the bucket). You can also use this + // condition to specify all sources (that is, you don't specify the SourceArn) + // owned by a specific account. + SourceAccount *string `type:"string"` + + // This is optional; however, when granting Amazon S3 permission to invoke your + // function, you should specify this field with the bucket Amazon Resource Name + // (ARN) as its value. This ensures that only events generated from the specified + // bucket can invoke the function. + // + // If you add a permission for the Amazon S3 principal without providing the + // source ARN, any AWS account that creates a mapping to your function ARN can + // send events to invoke your Lambda function from Amazon S3. + SourceArn *string `type:"string"` + + // A unique statement identifier. + StatementId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` + + // The permission statement you specified in the request. The response returns + // the same as a string using "\" as an escape character in the JSON. + Statement *string `type:"string"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// Provides configuration information about a Lambda function version alias. +type AliasConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda function ARN that is qualified using alias name as the suffix. For + // example, if you create an alias "BETA" pointing to a helloworld function + // version, the ARN is arn:aws:lambda:aws-regions:acct-id:function:helloworld:BETA. + AliasArn *string `type:"string"` + + // Alias description. + Description *string `type:"string"` + + // Function version to which the alias points. + FunctionVersion *string `min:"1" type:"string"` + + // Alias name. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AliasConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasConfiguration) GoString() string { + return s.String() +} + +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // Description of the alias. + Description *string `type:"string"` + + // Name of the Lambda function for which you want to create an alias. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Lambda function version for which you are creating the alias. + FunctionVersion *string `min:"1" type:"string" required:"true"` + + // Name for the alias your creating. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +type CreateEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The largest number of records that AWS Lambda will retrieve from your event + // source at the time of invoking your function. Your function receives an event + // with all the retrieved records. The default is 100 records. + BatchSize *int64 `min:"1" type:"integer"` + + // Indicates whether AWS Lambda should begin polling the event source. By default, + // Enabled is true. + Enabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis or the Amazon DynamoDB + // stream that is the event source. Any record added to this stream could cause + // AWS Lambda to invoke your Lambda function, it depends on the BatchSize. AWS + // Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda + // function as JSON. + EventSourceArn *string `type:"string" required:"true"` + + // The Lambda function to invoke when AWS Lambda detects an event on the stream. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `min:"1" type:"string" required:"true"` + + // The position in the stream where AWS Lambda should start reading. For more + // information, go to ShardIteratorType (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType) + // in the Amazon Kinesis API Reference. + StartingPosition *string `type:"string" required:"true" enum:"EventSourcePosition"` +} + +// String returns the string representation +func (s CreateEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSourceMappingInput) GoString() string { + return s.String() +} + +type CreateFunctionInput struct { + _ struct{} `type:"structure"` + + // The code for the Lambda function. + Code *FunctionCode `type:"structure" required:"true"` + + // A short, user-defined function description. Lambda does not use this value. + // Assign a meaningful description as you see fit. + Description *string `type:"string"` + + // The name you want to assign to the function you are uploading. You can specify + // an unqualified function name (for example, "Thumbnail") or you can specify + // Amazon Resource Name (ARN) of the function (for example, "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). + // AWS Lambda also allows you to specify only the account ID qualifier (for + // example, "account-id:Thumbnail"). Note that the length constraint applies + // only to the ARN. If you specify only the function name, it is limited to + // 64 character in length. The function names appear in the console and are + // returned in the ListFunctions API. Function names are used to specify functions + // to other AWS Lambda APIs, such as Invoke. + FunctionName *string `min:"1" type:"string" required:"true"` + + // The function within your code that Lambda calls to begin execution. For Node.js, + // it is the module-name.export value in your function. For Java, it can be + // package.class-name::handler or package.class-name. For more information, + // see Lambda Function Handler (Java) (http://docs.aws.amazon.com/lambda/latest/dg/java-programming-model-handler-types.html). + Handler *string `type:"string" required:"true"` + + // The amount of memory, in MB, your Lambda function is given. Lambda uses this + // memory size to infer the amount of CPU and memory allocated to your function. + // Your function use-case determines your CPU and memory requirements. For example, + // a database operation might need less memory compared to an image processing + // function. The default value is 128 MB. The value must be a multiple of 64 + // MB. + MemorySize *int64 `min:"128" type:"integer"` + + // This boolean parameter can be used to request AWS Lambda to create the Lambda + // function and publish a version as an atomic operation. + Publish *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it + // executes your function to access any other Amazon Web Services (AWS) resources. + // For more information, see AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) + Role *string `type:"string" required:"true"` + + // The runtime environment for the Lambda function you are uploading. Currently, + // Lambda supports "java" and "nodejs" as the runtime. + Runtime *string `type:"string" required:"true" enum:"Runtime"` + + // The function execution time at which Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s CreateFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFunctionInput) GoString() string { + return s.String() +} + +type DeleteAliasInput struct { + _ struct{} `type:"structure"` + + // The Lambda function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Name of the alias to delete. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasInput) GoString() string { + return s.String() +} + +type DeleteAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasOutput) GoString() string { + return s.String() +} + +type DeleteEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The event source mapping ID. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSourceMappingInput) GoString() string { + return s.String() +} + +type DeleteFunctionInput struct { + _ struct{} `type:"structure"` + + // The Lambda function to delete. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter you can specify a function version (but not + // the $LATEST version) to direct AWS Lambda to delete a specific function version. + // If the function version has one or more aliases pointing to it, you will + // get an error because you cannot have aliases pointing to it. You can delete + // any function version but not the $LATEST, that is, you cannot specify $LATEST + // as the value of this parameter. The $LATEST version can be deleted only when + // you want to delete all the function versions and aliases. + // + // You can only specify a function version and not alias name using this parameter. + // You cannot delete a function version using its alias. + // + // If you don't specify this parameter, AWS Lambda will delete the function, + // including all its versions and aliases. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFunctionInput) GoString() string { + return s.String() +} + +type DeleteFunctionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFunctionOutput) GoString() string { + return s.String() +} + +// Describes mapping between an Amazon Kinesis stream and a Lambda function. +type EventSourceMappingConfiguration struct { + _ struct{} `type:"structure"` + + // The largest number of records that AWS Lambda will retrieve from your event + // source at the time of invoking your function. Your function receives an event + // with all the retrieved records. + BatchSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source + // of events. + EventSourceArn *string `type:"string"` + + // The Lambda function to invoke when AWS Lambda detects an event on the stream. + FunctionArn *string `type:"string"` + + // The UTC time string indicating the last time the event mapping was updated. + LastModified *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The result of the last AWS Lambda invocation of your Lambda function. + LastProcessingResult *string `type:"string"` + + // The state of the event source mapping. It can be "Creating", "Enabled", "Disabled", + // "Enabling", "Disabling", "Updating", or "Deleting". + State *string `type:"string"` + + // The reason the event source mapping is in its current state. It is either + // user-requested or an AWS Lambda-initiated state transition. + StateTransitionReason *string `type:"string"` + + // The AWS Lambda assigned opaque identifier for the mapping. + UUID *string `type:"string"` +} + +// String returns the string representation +func (s EventSourceMappingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSourceMappingConfiguration) GoString() string { + return s.String() +} + +// The code for the Lambda function. +type FunctionCode struct { + _ struct{} `type:"structure"` + + // Amazon S3 bucket name where the .zip file containing your deployment package + // is stored. This bucket must reside in the same AWS region where you are creating + // the Lambda function. + S3Bucket *string `min:"3" type:"string"` + + // The Amazon S3 object (the deployment package) key name you want to upload. + S3Key *string `min:"1" type:"string"` + + // The Amazon S3 object (the deployment package) version you want to upload. + S3ObjectVersion *string `min:"1" type:"string"` + + // A base64-encoded .zip file containing your deployment package. For more information + // about creating a .zip file, go to Execution Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html) + // in the AWS Lambda Developer Guide. + ZipFile []byte `type:"blob"` +} + +// String returns the string representation +func (s FunctionCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionCode) GoString() string { + return s.String() +} + +// The object for the Lambda function location. +type FunctionCodeLocation struct { + _ struct{} `type:"structure"` + + // The presigned URL you can use to download the function's .zip file that you + // previously uploaded. The URL is valid for up to 10 minutes. + Location *string `type:"string"` + + // The repository from which you can download the function. + RepositoryType *string `type:"string"` +} + +// String returns the string representation +func (s FunctionCodeLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionCodeLocation) GoString() string { + return s.String() +} + +// A complex type that describes function metadata. +type FunctionConfiguration struct { + _ struct{} `type:"structure"` + + // It is the SHA256 hash of your function deployment package. + CodeSha256 *string `type:"string"` + + // The size, in bytes, of the function .zip file you uploaded. + CodeSize *int64 `type:"long"` + + // The user-provided description. + Description *string `type:"string"` + + // The Amazon Resource Name (ARN) assigned to the function. + FunctionArn *string `type:"string"` + + // The name of the function. + FunctionName *string `min:"1" type:"string"` + + // The function Lambda calls to begin executing your function. + Handler *string `type:"string"` + + // The timestamp of the last time you updated the function. + LastModified *string `type:"string"` + + // The memory size, in MB, you configured for the function. Must be a multiple + // of 64 MB. + MemorySize *int64 `min:"128" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it + // executes your function to access any other Amazon Web Services (AWS) resources. + Role *string `type:"string"` + + // The runtime environment for the Lambda function. + Runtime *string `type:"string" enum:"Runtime"` + + // The function execution time at which Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` + + // The version of the Lambda function. + Version *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s FunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionConfiguration) GoString() string { + return s.String() +} + +type GetAliasInput struct { + _ struct{} `type:"structure"` + + // Function name for which the alias is created. An alias is a subresource that + // exists only in the context of an existing Lambda function. So you must specify + // the function name. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Name of the alias for which you want to retrieve information. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAliasInput) GoString() string { + return s.String() +} + +type GetEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The AWS Lambda assigned ID of the event source mapping. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEventSourceMappingInput) GoString() string { + return s.String() +} + +type GetFunctionConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Lambda function for which you want to retrieve the configuration + // information. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter you can specify function version or alias name. + // If you specify function version, the API uses qualified function ARN and + // returns information about the specific function version. if you specify alias + // name, the API uses alias ARN and returns information about the function version + // to which the alias points. + // + // If you don't specify this parameter, the API uses unqualified function ARN, + // and returns information about the $LATEST function version. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFunctionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionConfigurationInput) GoString() string { + return s.String() +} + +type GetFunctionInput struct { + _ struct{} `type:"structure"` + + // The Lambda function name. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter to specify a function version or alias name. + // If you specify function version, the API uses qualified function ARN for + // the request and returns information about the specific Lambda function version. + // If you specify alias name, the API uses alias ARN and returns information + // about the function version to which the alias points. If you don't provide + // this parameter, the API uses unqualified function ARN and returns information + // about the $LATEST version of the Lambda function. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionInput) GoString() string { + return s.String() +} + +// This response contains the object for the Lambda function location (see API_FunctionCodeLocation +type GetFunctionOutput struct { + _ struct{} `type:"structure"` + + // The object for the Lambda function location. + Code *FunctionCodeLocation `type:"structure"` + + // A complex type that describes function metadata. + Configuration *FunctionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionOutput) GoString() string { + return s.String() +} + +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // Function name whose resource policy you want to retrieve. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // You can specify this optional query parameter to specify function version + // or alias name in which case this API will return all permissions associated + // with the specific ARN. If you don't provide this parameter, the API will + // return permissions that apply to the unqualified function ARN. + Qualifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // The resource policy associated with the specified function. The response + // returns the same as a string using "\" as an escape character in the JSON. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +type InvokeAsyncInput struct { + _ struct{} `type:"structure" payload:"InvokeArgs"` + + // The Lambda function name. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // JSON that you want to provide to your Lambda function as input. + InvokeArgs io.ReadSeeker `type:"blob" required:"true"` +} + +// String returns the string representation +func (s InvokeAsyncInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeAsyncInput) GoString() string { + return s.String() +} + +// Upon success, it returns empty response. Otherwise, throws an exception. +type InvokeAsyncOutput struct { + _ struct{} `type:"structure"` + + // It will be 202 upon success. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s InvokeAsyncOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeAsyncOutput) GoString() string { + return s.String() +} + +type InvokeInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Using the ClientContext you can pass client-specific information to the Lambda + // function you are invoking. You can then process the client information in + // your Lambda function as you choose through the context variable. For an example + // of a ClientContext JSON, go to PutEvents (http://docs.aws.amazon.com/mobileanalytics/latest/ug/PutEvents.html) + // in the Amazon Mobile Analytics API Reference and User Guide. + // + // The ClientContext JSON must be base64-encoded. + ClientContext *string `location:"header" locationName:"X-Amz-Client-Context" type:"string"` + + // The Lambda function name. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // By default, the Invoke API assumes "RequestResponse" invocation type. You + // can optionally request asynchronous execution by specifying "Event" as the + // InvocationType. You can also use this parameter to request AWS Lambda to + // not execute the function but do some verification, such as if the caller + // is authorized to invoke the function and if the inputs are valid. You request + // this by specifying "DryRun" as the InvocationType. This is useful in a cross-account + // scenario when you want to verify access to a function without running it. + InvocationType *string `location:"header" locationName:"X-Amz-Invocation-Type" type:"string" enum:"InvocationType"` + + // You can set this optional parameter to "Tail" in the request only if you + // specify the InvocationType parameter with value "RequestResponse". In this + // case, AWS Lambda returns the base64-encoded last 4 KB of log data produced + // by your Lambda function in the x-amz-log-results header. + LogType *string `location:"header" locationName:"X-Amz-Log-Type" type:"string" enum:"LogType"` + + // JSON that you want to provide to your Lambda function as input. + Payload []byte `type:"blob"` + + // You can use this optional paramter to specify a Lambda function version or + // alias name. If you specify function version, the API uses qualified function + // ARN to invoke a specific Lambda function. If you specify alias name, the + // API uses the alias ARN to invoke the Lambda function version to which the + // alias points. + // + // If you don't provide this parameter, then the API uses unqualified function + // ARN which results in invocation of the $LATEST version. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvokeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeInput) GoString() string { + return s.String() +} + +// Upon success, returns an empty response. Otherwise, throws an exception. +type InvokeOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Indicates whether an error occurred while executing the Lambda function. + // If an error occurred this field will have one of two values; Handled or Unhandled. + // Handled errors are errors that are reported by the function while the Unhandled + // errors are those detected and reported by AWS Lambda. Unhandled errors include + // out of memory errors and function timeouts. For information about how to + // report an Handled error, see Programming Model (http://docs.aws.amazon.com/lambda/latest/dg/programming-model.html). + FunctionError *string `location:"header" locationName:"X-Amz-Function-Error" type:"string"` + + // It is the base64-encoded logs for the Lambda function invocation. This is + // present only if the invocation type is "RequestResponse" and the logs were + // requested. + LogResult *string `location:"header" locationName:"X-Amz-Log-Result" type:"string"` + + // It is the JSON representation of the object returned by the Lambda function. + // In This is present only if the invocation type is "RequestResponse". + // + // In the event of a function error this field contains a message describing + // the error. For the Handled errors the Lambda function will report this message. + // For Unhandled errors AWS Lambda reports the message. + Payload []byte `type:"blob"` + + // The HTTP status code will be in the 200 range for successful request. For + // the "RequestResonse" invocation type this status code will be 200. For the + // "Event" invocation type this status code will be 202. For the "DryRun" invocation + // type the status code will be 204. + StatusCode *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s InvokeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeOutput) GoString() string { + return s.String() +} + +type ListAliasesInput struct { + _ struct{} `type:"structure"` + + // Lambda function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // If you specify this optional parameter, the API returns only the aliases + // pointing to the specific Lambda function version, otherwise returns all aliases + // created for the Lambda function. + FunctionVersion *string `location:"querystring" locationName:"FunctionVersion" min:"1" type:"string"` + + // Optional string. An opaque pagination token returned from a previous ListAliases + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of aliases to return in response. + // This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesInput) GoString() string { + return s.String() +} + +type ListAliasesOutput struct { + _ struct{} `type:"structure"` + + // An list of alises. + Aliases []*AliasConfiguration `type:"list"` + + // A string, present if there are more aliases. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +type ListEventSourceMappingsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis stream. + EventSourceArn *string `location:"querystring" locationName:"EventSourceArn" type:"string"` + + // The name of the Lambda function. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"querystring" locationName:"FunctionName" min:"1" type:"string"` + + // Optional string. An opaque pagination token returned from a previous ListEventSourceMappings + // operation. If present, specifies to continue the list from where the returning + // call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of event sources to return + // in response. This value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListEventSourceMappingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSourceMappingsInput) GoString() string { + return s.String() +} + +// Contains a list of event sources (see API_EventSourceMappingConfiguration) +type ListEventSourceMappingsOutput struct { + _ struct{} `type:"structure"` + + // An array of EventSourceMappingConfiguration objects. + EventSourceMappings []*EventSourceMappingConfiguration `type:"list"` + + // A string, present if there are more event source mappings. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListEventSourceMappingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSourceMappingsOutput) GoString() string { + return s.String() +} + +type ListFunctionsInput struct { + _ struct{} `type:"structure"` + + // Optional string. An opaque pagination token returned from a previous ListFunctions + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of AWS Lambda functions to + // return in response. This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListFunctionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFunctionsInput) GoString() string { + return s.String() +} + +// Contains a list of AWS Lambda function configurations (see FunctionConfiguration. +type ListFunctionsOutput struct { + _ struct{} `type:"structure"` + + // A list of Lambda functions. + Functions []*FunctionConfiguration `type:"list"` + + // A string, present if there are more functions. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListFunctionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFunctionsOutput) GoString() string { + return s.String() +} + +type ListVersionsByFunctionInput struct { + _ struct{} `type:"structure"` + + // Function name whose versions to list. You can specify an unqualified function + // name (for example, "Thumbnail") or you can specify Amazon Resource Name (ARN) + // of the function (for example, "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). + // AWS Lambda also allows you to specify only the account ID qualifier (for + // example, "account-id:Thumbnail"). Note that the length constraint applies + // only to the ARN. If you specify only the function name, it is limited to + // 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Optional string. An opaque pagination token returned from a previous ListVersionsByFunction + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of AWS Lambda function versions + // to return in response. This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVersionsByFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVersionsByFunctionInput) GoString() string { + return s.String() +} + +type ListVersionsByFunctionOutput struct { + _ struct{} `type:"structure"` + + // A string, present if there are more function versions. + NextMarker *string `type:"string"` + + // A list of Lambda function versions. + Versions []*FunctionConfiguration `type:"list"` +} + +// String returns the string representation +func (s ListVersionsByFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVersionsByFunctionOutput) GoString() string { + return s.String() +} + +type PublishVersionInput struct { + _ struct{} `type:"structure"` + + // The SHA256 hash of the deployment package you want to publish. This provides + // validation on the code you are publishing. If you provide this parameter + // value must match the SHA256 of the HEAD version for the publication to succeed. + CodeSha256 *string `type:"string"` + + // The description for the version you are publishing. If not provided, AWS + // Lambda copies the description from the HEAD version. + Description *string `type:"string"` + + // The Lambda function name. You can specify an unqualified function name (for + // example, "Thumbnail") or you can specify Amazon Resource Name (ARN) of the + // function (for example, "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). + // AWS Lambda also allows you to specify only the account ID qualifier (for + // example, "account-id:Thumbnail"). Note that the length constraint applies + // only to the ARN. If you specify only the function name, it is limited to + // 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PublishVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishVersionInput) GoString() string { + return s.String() +} + +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // Lambda function whose resource policy you want to remove a permission from. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // You can specify this optional parameter to remove permission associated with + // a specific function version or function alias. The value of this paramter + // is the function version or alias name. If you don't specify this parameter, + // the API removes permission associated with the unqualified function ARN. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` + + // Statement ID of the permission to remove. + StatementId *string `location:"uri" locationName:"StatementId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +type UpdateAliasInput struct { + _ struct{} `type:"structure"` + + // You can optionally change the description of the alias using this parameter. + Description *string `type:"string"` + + // The function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this parameter you can optionally change the Lambda function version + // to which the alias to points to. + FunctionVersion *string `min:"1" type:"string"` + + // The alias name. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasInput) GoString() string { + return s.String() +} + +type UpdateEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The maximum number of stream records that can be sent to your Lambda function + // for a single invocation. + BatchSize *int64 `min:"1" type:"integer"` + + // Specifies whether AWS Lambda should actively poll the stream or not. If disabled, + // AWS Lambda will not poll the stream. + Enabled *bool `type:"boolean"` + + // The Lambda function to which you want the stream records sent. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `min:"1" type:"string"` + + // The event source mapping identifier. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEventSourceMappingInput) GoString() string { + return s.String() +} + +type UpdateFunctionCodeInput struct { + _ struct{} `type:"structure"` + + // The existing Lambda function name whose code you want to replace. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // This boolean parameter can be used to request AWS Lambda to update the Lambda + // function and publish a version as an atomic operation. + Publish *bool `type:"boolean"` + + // Amazon S3 bucket name where the .zip file containing your deployment package + // is stored. This bucket must reside in the same AWS region where you are creating + // the Lambda function. + S3Bucket *string `min:"3" type:"string"` + + // The Amazon S3 object (the deployment package) key name you want to upload. + S3Key *string `min:"1" type:"string"` + + // The Amazon S3 object (the deployment package) version you want to upload. + S3ObjectVersion *string `min:"1" type:"string"` + + // Based64-encoded .zip file containing your packaged source code. + ZipFile []byte `type:"blob"` +} + +// String returns the string representation +func (s UpdateFunctionCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFunctionCodeInput) GoString() string { + return s.String() +} + +type UpdateFunctionConfigurationInput struct { + _ struct{} `type:"structure"` + + // A short user-defined function description. AWS Lambda does not use this value. + // Assign a meaningful description as you see fit. + Description *string `type:"string"` + + // The name of the Lambda function. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // The function that Lambda calls to begin executing your function. For Node.js, + // it is the module-name.export value in your function. + Handler *string `type:"string"` + + // The amount of memory, in MB, your Lambda function is given. AWS Lambda uses + // this memory size to infer the amount of CPU allocated to your function. Your + // function use-case determines your CPU and memory requirements. For example, + // a database operation might need less memory compared to an image processing + // function. The default value is 128 MB. The value must be a multiple of 64 + // MB. + MemorySize *int64 `min:"128" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when + // it executes your function. + Role *string `type:"string"` + + // The function execution time at which AWS Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s UpdateFunctionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFunctionConfigurationInput) GoString() string { + return s.String() +} + +const ( + // @enum EventSourcePosition + EventSourcePositionTrimHorizon = "TRIM_HORIZON" + // @enum EventSourcePosition + EventSourcePositionLatest = "LATEST" +) + +const ( + // @enum InvocationType + InvocationTypeEvent = "Event" + // @enum InvocationType + InvocationTypeRequestResponse = "RequestResponse" + // @enum InvocationType + InvocationTypeDryRun = "DryRun" +) + +const ( + // @enum LogType + LogTypeNone = "None" + // @enum LogType + LogTypeTail = "Tail" +) + +const ( + // @enum Runtime + RuntimeNodejs = "nodejs" + // @enum Runtime + RuntimeJava8 = "java8" + // @enum Runtime + RuntimePython27 = "python2.7" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go new file mode 100644 index 000000000..01566a2f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go @@ -0,0 +1,539 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package lambda_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/lambda" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleLambda_AddPermission() { + svc := lambda.New(session.New()) + + params := &lambda.AddPermissionInput{ + Action: aws.String("Action"), // Required + FunctionName: aws.String("FunctionName"), // Required + Principal: aws.String("Principal"), // Required + StatementId: aws.String("StatementId"), // Required + Qualifier: aws.String("Qualifier"), + SourceAccount: aws.String("SourceOwner"), + SourceArn: aws.String("Arn"), + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateAlias() { + svc := lambda.New(session.New()) + + params := &lambda.CreateAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + FunctionVersion: aws.String("Version"), // Required + Name: aws.String("Alias"), // Required + Description: aws.String("Description"), + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.CreateEventSourceMappingInput{ + EventSourceArn: aws.String("Arn"), // Required + FunctionName: aws.String("FunctionName"), // Required + StartingPosition: aws.String("EventSourcePosition"), // Required + BatchSize: aws.Int64(1), + Enabled: aws.Bool(true), + } + resp, err := svc.CreateEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateFunction() { + svc := lambda.New(session.New()) + + params := &lambda.CreateFunctionInput{ + Code: &lambda.FunctionCode{ // Required + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + S3ObjectVersion: aws.String("S3ObjectVersion"), + ZipFile: []byte("PAYLOAD"), + }, + FunctionName: aws.String("FunctionName"), // Required + Handler: aws.String("Handler"), // Required + Role: aws.String("RoleArn"), // Required + Runtime: aws.String("Runtime"), // Required + Description: aws.String("Description"), + MemorySize: aws.Int64(1), + Publish: aws.Bool(true), + Timeout: aws.Int64(1), + } + resp, err := svc.CreateFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteAlias() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + } + resp, err := svc.DeleteAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteEventSourceMappingInput{ + UUID: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteFunction() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.DeleteFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetAlias() { + svc := lambda.New(session.New()) + + params := &lambda.GetAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + } + resp, err := svc.GetAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.GetEventSourceMappingInput{ + UUID: aws.String("String"), // Required + } + resp, err := svc.GetEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetFunction() { + svc := lambda.New(session.New()) + + params := &lambda.GetFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetFunctionConfiguration() { + svc := lambda.New(session.New()) + + params := &lambda.GetFunctionConfigurationInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetFunctionConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetPolicy() { + svc := lambda.New(session.New()) + + params := &lambda.GetPolicyInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_Invoke() { + svc := lambda.New(session.New()) + + params := &lambda.InvokeInput{ + FunctionName: aws.String("FunctionName"), // Required + ClientContext: aws.String("String"), + InvocationType: aws.String("InvocationType"), + LogType: aws.String("LogType"), + Payload: []byte("PAYLOAD"), + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.Invoke(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_InvokeAsync() { + svc := lambda.New(session.New()) + + params := &lambda.InvokeAsyncInput{ + FunctionName: aws.String("FunctionName"), // Required + InvokeArgs: bytes.NewReader([]byte("PAYLOAD")), // Required + } + resp, err := svc.InvokeAsync(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListAliases() { + svc := lambda.New(session.New()) + + params := &lambda.ListAliasesInput{ + FunctionName: aws.String("FunctionName"), // Required + FunctionVersion: aws.String("Version"), + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListEventSourceMappings() { + svc := lambda.New(session.New()) + + params := &lambda.ListEventSourceMappingsInput{ + EventSourceArn: aws.String("Arn"), + FunctionName: aws.String("FunctionName"), + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListEventSourceMappings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListFunctions() { + svc := lambda.New(session.New()) + + params := &lambda.ListFunctionsInput{ + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListFunctions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListVersionsByFunction() { + svc := lambda.New(session.New()) + + params := &lambda.ListVersionsByFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListVersionsByFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_PublishVersion() { + svc := lambda.New(session.New()) + + params := &lambda.PublishVersionInput{ + FunctionName: aws.String("FunctionName"), // Required + CodeSha256: aws.String("String"), + Description: aws.String("Description"), + } + resp, err := svc.PublishVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_RemovePermission() { + svc := lambda.New(session.New()) + + params := &lambda.RemovePermissionInput{ + FunctionName: aws.String("FunctionName"), // Required + StatementId: aws.String("StatementId"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateAlias() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + Description: aws.String("Description"), + FunctionVersion: aws.String("Version"), + } + resp, err := svc.UpdateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateEventSourceMappingInput{ + UUID: aws.String("String"), // Required + BatchSize: aws.Int64(1), + Enabled: aws.Bool(true), + FunctionName: aws.String("FunctionName"), + } + resp, err := svc.UpdateEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateFunctionCode() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateFunctionCodeInput{ + FunctionName: aws.String("FunctionName"), // Required + Publish: aws.Bool(true), + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + S3ObjectVersion: aws.String("S3ObjectVersion"), + ZipFile: []byte("PAYLOAD"), + } + resp, err := svc.UpdateFunctionCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateFunctionConfiguration() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateFunctionConfigurationInput{ + FunctionName: aws.String("FunctionName"), // Required + Description: aws.String("Description"), + Handler: aws.String("Handler"), + MemorySize: aws.Int64(1), + Role: aws.String("RoleArn"), + Timeout: aws.Int64(1), + } + resp, err := svc.UpdateFunctionConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go new file mode 100644 index 000000000..03c2a40d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go @@ -0,0 +1,114 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package lambdaiface provides an interface for the AWS Lambda. +package lambdaiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/lambda" +) + +// LambdaAPI is the interface type for lambda.Lambda. +type LambdaAPI interface { + AddPermissionRequest(*lambda.AddPermissionInput) (*request.Request, *lambda.AddPermissionOutput) + + AddPermission(*lambda.AddPermissionInput) (*lambda.AddPermissionOutput, error) + + CreateAliasRequest(*lambda.CreateAliasInput) (*request.Request, *lambda.AliasConfiguration) + + CreateAlias(*lambda.CreateAliasInput) (*lambda.AliasConfiguration, error) + + CreateEventSourceMappingRequest(*lambda.CreateEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + CreateEventSourceMapping(*lambda.CreateEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + CreateFunctionRequest(*lambda.CreateFunctionInput) (*request.Request, *lambda.FunctionConfiguration) + + CreateFunction(*lambda.CreateFunctionInput) (*lambda.FunctionConfiguration, error) + + DeleteAliasRequest(*lambda.DeleteAliasInput) (*request.Request, *lambda.DeleteAliasOutput) + + DeleteAlias(*lambda.DeleteAliasInput) (*lambda.DeleteAliasOutput, error) + + DeleteEventSourceMappingRequest(*lambda.DeleteEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + DeleteEventSourceMapping(*lambda.DeleteEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + DeleteFunctionRequest(*lambda.DeleteFunctionInput) (*request.Request, *lambda.DeleteFunctionOutput) + + DeleteFunction(*lambda.DeleteFunctionInput) (*lambda.DeleteFunctionOutput, error) + + GetAliasRequest(*lambda.GetAliasInput) (*request.Request, *lambda.AliasConfiguration) + + GetAlias(*lambda.GetAliasInput) (*lambda.AliasConfiguration, error) + + GetEventSourceMappingRequest(*lambda.GetEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + GetEventSourceMapping(*lambda.GetEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + GetFunctionRequest(*lambda.GetFunctionInput) (*request.Request, *lambda.GetFunctionOutput) + + GetFunction(*lambda.GetFunctionInput) (*lambda.GetFunctionOutput, error) + + GetFunctionConfigurationRequest(*lambda.GetFunctionConfigurationInput) (*request.Request, *lambda.FunctionConfiguration) + + GetFunctionConfiguration(*lambda.GetFunctionConfigurationInput) (*lambda.FunctionConfiguration, error) + + GetPolicyRequest(*lambda.GetPolicyInput) (*request.Request, *lambda.GetPolicyOutput) + + GetPolicy(*lambda.GetPolicyInput) (*lambda.GetPolicyOutput, error) + + InvokeRequest(*lambda.InvokeInput) (*request.Request, *lambda.InvokeOutput) + + Invoke(*lambda.InvokeInput) (*lambda.InvokeOutput, error) + + InvokeAsyncRequest(*lambda.InvokeAsyncInput) (*request.Request, *lambda.InvokeAsyncOutput) + + InvokeAsync(*lambda.InvokeAsyncInput) (*lambda.InvokeAsyncOutput, error) + + ListAliasesRequest(*lambda.ListAliasesInput) (*request.Request, *lambda.ListAliasesOutput) + + ListAliases(*lambda.ListAliasesInput) (*lambda.ListAliasesOutput, error) + + ListEventSourceMappingsRequest(*lambda.ListEventSourceMappingsInput) (*request.Request, *lambda.ListEventSourceMappingsOutput) + + ListEventSourceMappings(*lambda.ListEventSourceMappingsInput) (*lambda.ListEventSourceMappingsOutput, error) + + ListEventSourceMappingsPages(*lambda.ListEventSourceMappingsInput, func(*lambda.ListEventSourceMappingsOutput, bool) bool) error + + ListFunctionsRequest(*lambda.ListFunctionsInput) (*request.Request, *lambda.ListFunctionsOutput) + + ListFunctions(*lambda.ListFunctionsInput) (*lambda.ListFunctionsOutput, error) + + ListFunctionsPages(*lambda.ListFunctionsInput, func(*lambda.ListFunctionsOutput, bool) bool) error + + ListVersionsByFunctionRequest(*lambda.ListVersionsByFunctionInput) (*request.Request, *lambda.ListVersionsByFunctionOutput) + + ListVersionsByFunction(*lambda.ListVersionsByFunctionInput) (*lambda.ListVersionsByFunctionOutput, error) + + PublishVersionRequest(*lambda.PublishVersionInput) (*request.Request, *lambda.FunctionConfiguration) + + PublishVersion(*lambda.PublishVersionInput) (*lambda.FunctionConfiguration, error) + + RemovePermissionRequest(*lambda.RemovePermissionInput) (*request.Request, *lambda.RemovePermissionOutput) + + RemovePermission(*lambda.RemovePermissionInput) (*lambda.RemovePermissionOutput, error) + + UpdateAliasRequest(*lambda.UpdateAliasInput) (*request.Request, *lambda.AliasConfiguration) + + UpdateAlias(*lambda.UpdateAliasInput) (*lambda.AliasConfiguration, error) + + UpdateEventSourceMappingRequest(*lambda.UpdateEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + UpdateEventSourceMapping(*lambda.UpdateEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + UpdateFunctionCodeRequest(*lambda.UpdateFunctionCodeInput) (*request.Request, *lambda.FunctionConfiguration) + + UpdateFunctionCode(*lambda.UpdateFunctionCodeInput) (*lambda.FunctionConfiguration, error) + + UpdateFunctionConfigurationRequest(*lambda.UpdateFunctionConfigurationInput) (*request.Request, *lambda.FunctionConfiguration) + + UpdateFunctionConfiguration(*lambda.UpdateFunctionConfigurationInput) (*lambda.FunctionConfiguration, error) +} + +var _ LambdaAPI = (*lambda.Lambda)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go new file mode 100644 index 000000000..bac7c3af7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package lambda + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview +// +// This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides +// additional information. For the service overview, go to What is AWS Lambda +// (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html), and for information +// about how the service works, go to AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) +// in the AWS Lambda Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Lambda struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "lambda" + +// New creates a new instance of the Lambda client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Lambda client from just a session. +// svc := lambda.New(mySession) +// +// // Create a Lambda client with additional configuration +// svc := lambda.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lambda { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Lambda { + svc := &Lambda{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-03-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restjson.Build) + svc.Handlers.Unmarshal.PushBack(restjson.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restjson.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restjson.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Lambda operation and runs any +// custom request initialization. +func (c *Lambda) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go new file mode 100644 index 000000000..eef0f2612 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go @@ -0,0 +1,7712 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package opsworks provides a client for AWS OpsWorks. +package opsworks + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssignInstance = "AssignInstance" + +// AssignInstanceRequest generates a request for the AssignInstance operation. +func (c *OpsWorks) AssignInstanceRequest(input *AssignInstanceInput) (req *request.Request, output *AssignInstanceOutput) { + op := &request.Operation{ + Name: opAssignInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &AssignInstanceOutput{} + req.Data = output + return +} + +// Assign a registered instance to a layer. +// +// You can assign registered on-premises instances to any layer type. You +// can assign registered Amazon EC2 instances only to custom layers. You cannot +// use this action with instances that were created with AWS OpsWorks. Required +// Permissions: To use this action, an AWS Identity and Access Management (IAM) +// user must have a Manage permissions level for the stack or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssignInstance(input *AssignInstanceInput) (*AssignInstanceOutput, error) { + req, out := c.AssignInstanceRequest(input) + err := req.Send() + return out, err +} + +const opAssignVolume = "AssignVolume" + +// AssignVolumeRequest generates a request for the AssignVolume operation. +func (c *OpsWorks) AssignVolumeRequest(input *AssignVolumeInput) (req *request.Request, output *AssignVolumeOutput) { + op := &request.Operation{ + Name: opAssignVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &AssignVolumeOutput{} + req.Data = output + return +} + +// Assigns one of the stack's registered Amazon EBS volumes to a specified instance. +// The volume must first be registered with the stack by calling RegisterVolume. +// After you register the volume, you must call UpdateVolume to specify a mount +// point before calling AssignVolume. For more information, see Resource Management +// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssignVolume(input *AssignVolumeInput) (*AssignVolumeOutput, error) { + req, out := c.AssignVolumeRequest(input) + err := req.Send() + return out, err +} + +const opAssociateElasticIp = "AssociateElasticIp" + +// AssociateElasticIpRequest generates a request for the AssociateElasticIp operation. +func (c *OpsWorks) AssociateElasticIpRequest(input *AssociateElasticIpInput) (req *request.Request, output *AssociateElasticIpOutput) { + op := &request.Operation{ + Name: opAssociateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateElasticIpOutput{} + req.Data = output + return +} + +// Associates one of the stack's registered Elastic IP addresses with a specified +// instance. The address must first be registered with the stack by calling +// RegisterElasticIp. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssociateElasticIp(input *AssociateElasticIpInput) (*AssociateElasticIpOutput, error) { + req, out := c.AssociateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opAttachElasticLoadBalancer = "AttachElasticLoadBalancer" + +// AttachElasticLoadBalancerRequest generates a request for the AttachElasticLoadBalancer operation. +func (c *OpsWorks) AttachElasticLoadBalancerRequest(input *AttachElasticLoadBalancerInput) (req *request.Request, output *AttachElasticLoadBalancerOutput) { + op := &request.Operation{ + Name: opAttachElasticLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachElasticLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachElasticLoadBalancerOutput{} + req.Data = output + return +} + +// Attaches an Elastic Load Balancing load balancer to a specified layer. For +// more information, see Elastic Load Balancing (http://docs.aws.amazon.com/opsworks/latest/userguide/load-balancer-elb.html). +// +// You must create the Elastic Load Balancing instance separately, by using +// the Elastic Load Balancing console, API, or CLI. For more information, see +// Elastic Load Balancing Developer Guide (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AttachElasticLoadBalancer(input *AttachElasticLoadBalancerInput) (*AttachElasticLoadBalancerOutput, error) { + req, out := c.AttachElasticLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opCloneStack = "CloneStack" + +// CloneStackRequest generates a request for the CloneStack operation. +func (c *OpsWorks) CloneStackRequest(input *CloneStackInput) (req *request.Request, output *CloneStackOutput) { + op := &request.Operation{ + Name: opCloneStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CloneStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CloneStackOutput{} + req.Data = output + return +} + +// Creates a clone of a specified stack. For more information, see Clone a Stack +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html). +// By default, all parameters are set to the values used by the parent stack. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CloneStack(input *CloneStackInput) (*CloneStackOutput, error) { + req, out := c.CloneStackRequest(input) + err := req.Send() + return out, err +} + +const opCreateApp = "CreateApp" + +// CreateAppRequest generates a request for the CreateApp operation. +func (c *OpsWorks) CreateAppRequest(input *CreateAppInput) (req *request.Request, output *CreateAppOutput) { + op := &request.Operation{ + Name: opCreateApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAppInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAppOutput{} + req.Data = output + return +} + +// Creates an app for a specified stack. For more information, see Creating +// Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) { + req, out := c.CreateAppRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a request for the CreateDeployment operation. +func (c *OpsWorks) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentOutput{} + req.Data = output + return +} + +// Runs deployment or stack commands. For more information, see Deploying Apps +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html) +// and Run Stack Commands (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html). +// +// Required Permissions: To use this action, an IAM user must have a Deploy +// or Manage permissions level for the stack, or an attached policy that explicitly +// grants permissions. For more information on user permissions, see Managing +// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstance = "CreateInstance" + +// CreateInstanceRequest generates a request for the CreateInstance operation. +func (c *OpsWorks) CreateInstanceRequest(input *CreateInstanceInput) (req *request.Request, output *CreateInstanceOutput) { + op := &request.Operation{ + Name: opCreateInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceOutput{} + req.Data = output + return +} + +// Creates an instance in a specified stack. For more information, see Adding +// an Instance to a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateInstance(input *CreateInstanceInput) (*CreateInstanceOutput, error) { + req, out := c.CreateInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateLayer = "CreateLayer" + +// CreateLayerRequest generates a request for the CreateLayer operation. +func (c *OpsWorks) CreateLayerRequest(input *CreateLayerInput) (req *request.Request, output *CreateLayerOutput) { + op := &request.Operation{ + Name: opCreateLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLayerOutput{} + req.Data = output + return +} + +// Creates a layer. For more information, see How to Create a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-create.html). +// +// You should use CreateLayer for noncustom layer types such as PHP App Server +// only if the stack does not have an existing layer of that type. A stack can +// have at most one instance of each noncustom layer; if you attempt to create +// a second instance, CreateLayer fails. A stack can have an arbitrary number +// of custom layers, so you can call CreateLayer as many times as you like for +// that layer type. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateLayer(input *CreateLayerInput) (*CreateLayerOutput, error) { + req, out := c.CreateLayerRequest(input) + err := req.Send() + return out, err +} + +const opCreateStack = "CreateStack" + +// CreateStackRequest generates a request for the CreateStack operation. +func (c *OpsWorks) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { + op := &request.Operation{ + Name: opCreateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStackOutput{} + req.Data = output + return +} + +// Creates a new stack. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html). +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + err := req.Send() + return out, err +} + +const opCreateUserProfile = "CreateUserProfile" + +// CreateUserProfileRequest generates a request for the CreateUserProfile operation. +func (c *OpsWorks) CreateUserProfileRequest(input *CreateUserProfileInput) (req *request.Request, output *CreateUserProfileOutput) { + op := &request.Operation{ + Name: opCreateUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserProfileOutput{} + req.Data = output + return +} + +// Creates a new user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateUserProfile(input *CreateUserProfileInput) (*CreateUserProfileOutput, error) { + req, out := c.CreateUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApp = "DeleteApp" + +// DeleteAppRequest generates a request for the DeleteApp operation. +func (c *OpsWorks) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) { + op := &request.Operation{ + Name: opDeleteApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAppInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAppOutput{} + req.Data = output + return +} + +// Deletes a specified app. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { + req, out := c.DeleteAppRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInstance = "DeleteInstance" + +// DeleteInstanceRequest generates a request for the DeleteInstance operation. +func (c *OpsWorks) DeleteInstanceRequest(input *DeleteInstanceInput) (req *request.Request, output *DeleteInstanceOutput) { + op := &request.Operation{ + Name: opDeleteInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteInstanceOutput{} + req.Data = output + return +} + +// Deletes a specified instance, which terminates the associated Amazon EC2 +// instance. You must stop an instance before you can delete it. +// +// For more information, see Deleting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteInstance(input *DeleteInstanceInput) (*DeleteInstanceOutput, error) { + req, out := c.DeleteInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLayer = "DeleteLayer" + +// DeleteLayerRequest generates a request for the DeleteLayer operation. +func (c *OpsWorks) DeleteLayerRequest(input *DeleteLayerInput) (req *request.Request, output *DeleteLayerOutput) { + op := &request.Operation{ + Name: opDeleteLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLayerOutput{} + req.Data = output + return +} + +// Deletes a specified layer. You must first stop and then delete all associated +// instances or unassign registered instances. For more information, see How +// to Delete a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteLayer(input *DeleteLayerInput) (*DeleteLayerOutput, error) { + req, out := c.DeleteLayerRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStack = "DeleteStack" + +// DeleteStackRequest generates a request for the DeleteStack operation. +func (c *OpsWorks) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { + op := &request.Operation{ + Name: opDeleteStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteStackOutput{} + req.Data = output + return +} + +// Deletes a specified stack. You must first delete all instances, layers, and +// apps or deregister registered instances. For more information, see Shut Down +// a Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserProfile = "DeleteUserProfile" + +// DeleteUserProfileRequest generates a request for the DeleteUserProfile operation. +func (c *OpsWorks) DeleteUserProfileRequest(input *DeleteUserProfileInput) (req *request.Request, output *DeleteUserProfileOutput) { + op := &request.Operation{ + Name: opDeleteUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteUserProfileOutput{} + req.Data = output + return +} + +// Deletes a user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteUserProfile(input *DeleteUserProfileInput) (*DeleteUserProfileOutput, error) { + req, out := c.DeleteUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterEcsCluster = "DeregisterEcsCluster" + +// DeregisterEcsClusterRequest generates a request for the DeregisterEcsCluster operation. +func (c *OpsWorks) DeregisterEcsClusterRequest(input *DeregisterEcsClusterInput) (req *request.Request, output *DeregisterEcsClusterOutput) { + op := &request.Operation{ + Name: opDeregisterEcsCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterEcsClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterEcsClusterOutput{} + req.Data = output + return +} + +// Deregisters a specified Amazon ECS cluster from a stack. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see . +func (c *OpsWorks) DeregisterEcsCluster(input *DeregisterEcsClusterInput) (*DeregisterEcsClusterOutput, error) { + req, out := c.DeregisterEcsClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterElasticIp = "DeregisterElasticIp" + +// DeregisterElasticIpRequest generates a request for the DeregisterElasticIp operation. +func (c *OpsWorks) DeregisterElasticIpRequest(input *DeregisterElasticIpInput) (req *request.Request, output *DeregisterElasticIpOutput) { + op := &request.Operation{ + Name: opDeregisterElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterElasticIpInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterElasticIpOutput{} + req.Data = output + return +} + +// Deregisters a specified Elastic IP address. The address can then be registered +// by another stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterElasticIp(input *DeregisterElasticIpInput) (*DeregisterElasticIpOutput, error) { + req, out := c.DeregisterElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterInstance = "DeregisterInstance" + +// DeregisterInstanceRequest generates a request for the DeregisterInstance operation. +func (c *OpsWorks) DeregisterInstanceRequest(input *DeregisterInstanceInput) (req *request.Request, output *DeregisterInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterInstanceOutput{} + req.Data = output + return +} + +// Deregister a registered Amazon EC2 or on-premises instance. This action removes +// the instance from the stack and returns it to your control. This action can +// not be used with instances that were created with AWS OpsWorks. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterInstance(input *DeregisterInstanceInput) (*DeregisterInstanceOutput, error) { + req, out := c.DeregisterInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterRdsDbInstance = "DeregisterRdsDbInstance" + +// DeregisterRdsDbInstanceRequest generates a request for the DeregisterRdsDbInstance operation. +func (c *OpsWorks) DeregisterRdsDbInstanceRequest(input *DeregisterRdsDbInstanceInput) (req *request.Request, output *DeregisterRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterRdsDbInstanceOutput{} + req.Data = output + return +} + +// Deregisters an Amazon RDS instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterRdsDbInstance(input *DeregisterRdsDbInstanceInput) (*DeregisterRdsDbInstanceOutput, error) { + req, out := c.DeregisterRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterVolume = "DeregisterVolume" + +// DeregisterVolumeRequest generates a request for the DeregisterVolume operation. +func (c *OpsWorks) DeregisterVolumeRequest(input *DeregisterVolumeInput) (req *request.Request, output *DeregisterVolumeOutput) { + op := &request.Operation{ + Name: opDeregisterVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterVolumeOutput{} + req.Data = output + return +} + +// Deregisters an Amazon EBS volume. The volume can then be registered by another +// stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterVolume(input *DeregisterVolumeInput) (*DeregisterVolumeOutput, error) { + req, out := c.DeregisterVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAgentVersions = "DescribeAgentVersions" + +// DescribeAgentVersionsRequest generates a request for the DescribeAgentVersions operation. +func (c *OpsWorks) DescribeAgentVersionsRequest(input *DescribeAgentVersionsInput) (req *request.Request, output *DescribeAgentVersionsOutput) { + op := &request.Operation{ + Name: opDescribeAgentVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAgentVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAgentVersionsOutput{} + req.Data = output + return +} + +// Describes the available AWS OpsWorks agent versions. You must specify a stack +// ID or a configuration manager. DescribeAgentVersions returns a list of available +// agent versions for the specified stack or configuration manager. +func (c *OpsWorks) DescribeAgentVersions(input *DescribeAgentVersionsInput) (*DescribeAgentVersionsOutput, error) { + req, out := c.DescribeAgentVersionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApps = "DescribeApps" + +// DescribeAppsRequest generates a request for the DescribeApps operation. +func (c *OpsWorks) DescribeAppsRequest(input *DescribeAppsInput) (req *request.Request, output *DescribeAppsOutput) { + op := &request.Operation{ + Name: opDescribeApps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAppsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAppsOutput{} + req.Data = output + return +} + +// Requests a description of a specified set of apps. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeApps(input *DescribeAppsInput) (*DescribeAppsOutput, error) { + req, out := c.DescribeAppsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCommands = "DescribeCommands" + +// DescribeCommandsRequest generates a request for the DescribeCommands operation. +func (c *OpsWorks) DescribeCommandsRequest(input *DescribeCommandsInput) (req *request.Request, output *DescribeCommandsOutput) { + op := &request.Operation{ + Name: opDescribeCommands, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCommandsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCommandsOutput{} + req.Data = output + return +} + +// Describes the results of specified commands. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeCommands(input *DescribeCommandsInput) (*DescribeCommandsOutput, error) { + req, out := c.DescribeCommandsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeployments = "DescribeDeployments" + +// DescribeDeploymentsRequest generates a request for the DescribeDeployments operation. +func (c *OpsWorks) DescribeDeploymentsRequest(input *DescribeDeploymentsInput) (req *request.Request, output *DescribeDeploymentsOutput) { + op := &request.Operation{ + Name: opDescribeDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeploymentsOutput{} + req.Data = output + return +} + +// Requests a description of a specified set of deployments. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeDeployments(input *DescribeDeploymentsInput) (*DescribeDeploymentsOutput, error) { + req, out := c.DescribeDeploymentsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEcsClusters = "DescribeEcsClusters" + +// DescribeEcsClustersRequest generates a request for the DescribeEcsClusters operation. +func (c *OpsWorks) DescribeEcsClustersRequest(input *DescribeEcsClustersInput) (req *request.Request, output *DescribeEcsClustersOutput) { + op := &request.Operation{ + Name: opDescribeEcsClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEcsClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEcsClustersOutput{} + req.Data = output + return +} + +// Describes Amazon ECS clusters that are registered with a stack. If you specify +// only a stack ID, you can use the MaxResults and NextToken parameters to paginate +// the response. However, AWS OpsWorks currently supports only one cluster per +// layer, so the result set has a maximum of one element. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack or an attached policy that +// explicitly grants permission. For more information on user permissions, see +// Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeEcsClusters(input *DescribeEcsClustersInput) (*DescribeEcsClustersOutput, error) { + req, out := c.DescribeEcsClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *OpsWorks) DescribeEcsClustersPages(input *DescribeEcsClustersInput, fn func(p *DescribeEcsClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEcsClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEcsClustersOutput), lastPage) + }) +} + +const opDescribeElasticIps = "DescribeElasticIps" + +// DescribeElasticIpsRequest generates a request for the DescribeElasticIps operation. +func (c *OpsWorks) DescribeElasticIpsRequest(input *DescribeElasticIpsInput) (req *request.Request, output *DescribeElasticIpsOutput) { + op := &request.Operation{ + Name: opDescribeElasticIps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticIpsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticIpsOutput{} + req.Data = output + return +} + +// Describes Elastic IP addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeElasticIps(input *DescribeElasticIpsInput) (*DescribeElasticIpsOutput, error) { + req, out := c.DescribeElasticIpsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticLoadBalancers = "DescribeElasticLoadBalancers" + +// DescribeElasticLoadBalancersRequest generates a request for the DescribeElasticLoadBalancers operation. +func (c *OpsWorks) DescribeElasticLoadBalancersRequest(input *DescribeElasticLoadBalancersInput) (req *request.Request, output *DescribeElasticLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeElasticLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticLoadBalancersOutput{} + req.Data = output + return +} + +// Describes a stack's Elastic Load Balancing instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeElasticLoadBalancers(input *DescribeElasticLoadBalancersInput) (*DescribeElasticLoadBalancersOutput, error) { + req, out := c.DescribeElasticLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a request for the DescribeInstances operation. +func (c *OpsWorks) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesOutput{} + req.Data = output + return +} + +// Requests a description of a set of instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLayers = "DescribeLayers" + +// DescribeLayersRequest generates a request for the DescribeLayers operation. +func (c *OpsWorks) DescribeLayersRequest(input *DescribeLayersInput) (req *request.Request, output *DescribeLayersOutput) { + op := &request.Operation{ + Name: opDescribeLayers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLayersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLayersOutput{} + req.Data = output + return +} + +// Requests a description of one or more layers in a specified stack. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeLayers(input *DescribeLayersInput) (*DescribeLayersOutput, error) { + req, out := c.DescribeLayersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBasedAutoScaling = "DescribeLoadBasedAutoScaling" + +// DescribeLoadBasedAutoScalingRequest generates a request for the DescribeLoadBasedAutoScaling operation. +func (c *OpsWorks) DescribeLoadBasedAutoScalingRequest(input *DescribeLoadBasedAutoScalingInput) (req *request.Request, output *DescribeLoadBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeLoadBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBasedAutoScalingOutput{} + req.Data = output + return +} + +// Describes load-based auto scaling configurations for specified layers. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeLoadBasedAutoScaling(input *DescribeLoadBasedAutoScalingInput) (*DescribeLoadBasedAutoScalingOutput, error) { + req, out := c.DescribeLoadBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMyUserProfile = "DescribeMyUserProfile" + +// DescribeMyUserProfileRequest generates a request for the DescribeMyUserProfile operation. +func (c *OpsWorks) DescribeMyUserProfileRequest(input *DescribeMyUserProfileInput) (req *request.Request, output *DescribeMyUserProfileOutput) { + op := &request.Operation{ + Name: opDescribeMyUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMyUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMyUserProfileOutput{} + req.Data = output + return +} + +// Describes a user's SSH information. +// +// Required Permissions: To use this action, an IAM user must have self-management +// enabled or an attached policy that explicitly grants permissions. For more +// information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeMyUserProfile(input *DescribeMyUserProfileInput) (*DescribeMyUserProfileOutput, error) { + req, out := c.DescribeMyUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDescribePermissions = "DescribePermissions" + +// DescribePermissionsRequest generates a request for the DescribePermissions operation. +func (c *OpsWorks) DescribePermissionsRequest(input *DescribePermissionsInput) (req *request.Request, output *DescribePermissionsOutput) { + op := &request.Operation{ + Name: opDescribePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePermissionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePermissionsOutput{} + req.Data = output + return +} + +// Describes the permissions for a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribePermissions(input *DescribePermissionsInput) (*DescribePermissionsOutput, error) { + req, out := c.DescribePermissionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRaidArrays = "DescribeRaidArrays" + +// DescribeRaidArraysRequest generates a request for the DescribeRaidArrays operation. +func (c *OpsWorks) DescribeRaidArraysRequest(input *DescribeRaidArraysInput) (req *request.Request, output *DescribeRaidArraysOutput) { + op := &request.Operation{ + Name: opDescribeRaidArrays, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRaidArraysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRaidArraysOutput{} + req.Data = output + return +} + +// Describe an instance's RAID arrays. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeRaidArrays(input *DescribeRaidArraysInput) (*DescribeRaidArraysOutput, error) { + req, out := c.DescribeRaidArraysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRdsDbInstances = "DescribeRdsDbInstances" + +// DescribeRdsDbInstancesRequest generates a request for the DescribeRdsDbInstances operation. +func (c *OpsWorks) DescribeRdsDbInstancesRequest(input *DescribeRdsDbInstancesInput) (req *request.Request, output *DescribeRdsDbInstancesOutput) { + op := &request.Operation{ + Name: opDescribeRdsDbInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRdsDbInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRdsDbInstancesOutput{} + req.Data = output + return +} + +// Describes Amazon RDS instances. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeRdsDbInstances(input *DescribeRdsDbInstancesInput) (*DescribeRdsDbInstancesOutput, error) { + req, out := c.DescribeRdsDbInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServiceErrors = "DescribeServiceErrors" + +// DescribeServiceErrorsRequest generates a request for the DescribeServiceErrors operation. +func (c *OpsWorks) DescribeServiceErrorsRequest(input *DescribeServiceErrorsInput) (req *request.Request, output *DescribeServiceErrorsOutput) { + op := &request.Operation{ + Name: opDescribeServiceErrors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServiceErrorsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServiceErrorsOutput{} + req.Data = output + return +} + +// Describes AWS OpsWorks service errors. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeServiceErrors(input *DescribeServiceErrorsInput) (*DescribeServiceErrorsOutput, error) { + req, out := c.DescribeServiceErrorsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackProvisioningParameters = "DescribeStackProvisioningParameters" + +// DescribeStackProvisioningParametersRequest generates a request for the DescribeStackProvisioningParameters operation. +func (c *OpsWorks) DescribeStackProvisioningParametersRequest(input *DescribeStackProvisioningParametersInput) (req *request.Request, output *DescribeStackProvisioningParametersOutput) { + op := &request.Operation{ + Name: opDescribeStackProvisioningParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackProvisioningParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackProvisioningParametersOutput{} + req.Data = output + return +} + +// Requests a description of a stack's provisioning parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack or an attached policy that +// explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStackProvisioningParameters(input *DescribeStackProvisioningParametersInput) (*DescribeStackProvisioningParametersOutput, error) { + req, out := c.DescribeStackProvisioningParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackSummary = "DescribeStackSummary" + +// DescribeStackSummaryRequest generates a request for the DescribeStackSummary operation. +func (c *OpsWorks) DescribeStackSummaryRequest(input *DescribeStackSummaryInput) (req *request.Request, output *DescribeStackSummaryOutput) { + op := &request.Operation{ + Name: opDescribeStackSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackSummaryOutput{} + req.Data = output + return +} + +// Describes the number of layers and apps in a specified stack, and the number +// of instances in each state, such as running_setup or online. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStackSummary(input *DescribeStackSummaryInput) (*DescribeStackSummaryOutput, error) { + req, out := c.DescribeStackSummaryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStacks = "DescribeStacks" + +// DescribeStacksRequest generates a request for the DescribeStacks operation. +func (c *OpsWorks) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { + op := &request.Operation{ + Name: opDescribeStacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStacksOutput{} + req.Data = output + return +} + +// Requests a description of one or more stacks. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTimeBasedAutoScaling = "DescribeTimeBasedAutoScaling" + +// DescribeTimeBasedAutoScalingRequest generates a request for the DescribeTimeBasedAutoScaling operation. +func (c *OpsWorks) DescribeTimeBasedAutoScalingRequest(input *DescribeTimeBasedAutoScalingInput) (req *request.Request, output *DescribeTimeBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeTimeBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTimeBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTimeBasedAutoScalingOutput{} + req.Data = output + return +} + +// Describes time-based auto scaling configurations for specified instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeTimeBasedAutoScaling(input *DescribeTimeBasedAutoScalingInput) (*DescribeTimeBasedAutoScalingOutput, error) { + req, out := c.DescribeTimeBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opDescribeUserProfiles = "DescribeUserProfiles" + +// DescribeUserProfilesRequest generates a request for the DescribeUserProfiles operation. +func (c *OpsWorks) DescribeUserProfilesRequest(input *DescribeUserProfilesInput) (req *request.Request, output *DescribeUserProfilesOutput) { + op := &request.Operation{ + Name: opDescribeUserProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUserProfilesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeUserProfilesOutput{} + req.Data = output + return +} + +// Describe specified users. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeUserProfiles(input *DescribeUserProfilesInput) (*DescribeUserProfilesOutput, error) { + req, out := c.DescribeUserProfilesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a request for the DescribeVolumes operation. +func (c *OpsWorks) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumesOutput{} + req.Data = output + return +} + +// Describes an instance's Amazon EBS volumes. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + err := req.Send() + return out, err +} + +const opDetachElasticLoadBalancer = "DetachElasticLoadBalancer" + +// DetachElasticLoadBalancerRequest generates a request for the DetachElasticLoadBalancer operation. +func (c *OpsWorks) DetachElasticLoadBalancerRequest(input *DetachElasticLoadBalancerInput) (req *request.Request, output *DetachElasticLoadBalancerOutput) { + op := &request.Operation{ + Name: opDetachElasticLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachElasticLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachElasticLoadBalancerOutput{} + req.Data = output + return +} + +// Detaches a specified Elastic Load Balancing instance from its layer. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DetachElasticLoadBalancer(input *DetachElasticLoadBalancerInput) (*DetachElasticLoadBalancerOutput, error) { + req, out := c.DetachElasticLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateElasticIp = "DisassociateElasticIp" + +// DisassociateElasticIpRequest generates a request for the DisassociateElasticIp operation. +func (c *OpsWorks) DisassociateElasticIpRequest(input *DisassociateElasticIpInput) (req *request.Request, output *DisassociateElasticIpOutput) { + op := &request.Operation{ + Name: opDisassociateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateElasticIpOutput{} + req.Data = output + return +} + +// Disassociates an Elastic IP address from its instance. The address remains +// registered with the stack. For more information, see Resource Management +// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DisassociateElasticIp(input *DisassociateElasticIpInput) (*DisassociateElasticIpOutput, error) { + req, out := c.DisassociateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opGetHostnameSuggestion = "GetHostnameSuggestion" + +// GetHostnameSuggestionRequest generates a request for the GetHostnameSuggestion operation. +func (c *OpsWorks) GetHostnameSuggestionRequest(input *GetHostnameSuggestionInput) (req *request.Request, output *GetHostnameSuggestionOutput) { + op := &request.Operation{ + Name: opGetHostnameSuggestion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetHostnameSuggestionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostnameSuggestionOutput{} + req.Data = output + return +} + +// Gets a generated host name for the specified layer, based on the current +// host name theme. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) GetHostnameSuggestion(input *GetHostnameSuggestionInput) (*GetHostnameSuggestionOutput, error) { + req, out := c.GetHostnameSuggestionRequest(input) + err := req.Send() + return out, err +} + +const opGrantAccess = "GrantAccess" + +// GrantAccessRequest generates a request for the GrantAccess operation. +func (c *OpsWorks) GrantAccessRequest(input *GrantAccessInput) (req *request.Request, output *GrantAccessOutput) { + op := &request.Operation{ + Name: opGrantAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GrantAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &GrantAccessOutput{} + req.Data = output + return +} + +// This action can be used only with Windows stacks. Grants RDP access to a +// Windows instance for a specified time period. +func (c *OpsWorks) GrantAccess(input *GrantAccessInput) (*GrantAccessOutput, error) { + req, out := c.GrantAccessRequest(input) + err := req.Send() + return out, err +} + +const opRebootInstance = "RebootInstance" + +// RebootInstanceRequest generates a request for the RebootInstance operation. +func (c *OpsWorks) RebootInstanceRequest(input *RebootInstanceInput) (req *request.Request, output *RebootInstanceOutput) { + op := &request.Operation{ + Name: opRebootInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootInstanceOutput{} + req.Data = output + return +} + +// Reboots a specified instance. For more information, see Starting, Stopping, +// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RebootInstance(input *RebootInstanceInput) (*RebootInstanceOutput, error) { + req, out := c.RebootInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterEcsCluster = "RegisterEcsCluster" + +// RegisterEcsClusterRequest generates a request for the RegisterEcsCluster operation. +func (c *OpsWorks) RegisterEcsClusterRequest(input *RegisterEcsClusterInput) (req *request.Request, output *RegisterEcsClusterOutput) { + op := &request.Operation{ + Name: opRegisterEcsCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterEcsClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterEcsClusterOutput{} + req.Data = output + return +} + +// Registers a specified Amazon ECS cluster with a stack. You can register only +// one cluster with a stack. A cluster can be registered with only one stack. +// For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterEcsCluster(input *RegisterEcsClusterInput) (*RegisterEcsClusterOutput, error) { + req, out := c.RegisterEcsClusterRequest(input) + err := req.Send() + return out, err +} + +const opRegisterElasticIp = "RegisterElasticIp" + +// RegisterElasticIpRequest generates a request for the RegisterElasticIp operation. +func (c *OpsWorks) RegisterElasticIpRequest(input *RegisterElasticIpInput) (req *request.Request, output *RegisterElasticIpOutput) { + op := &request.Operation{ + Name: opRegisterElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterElasticIpInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterElasticIpOutput{} + req.Data = output + return +} + +// Registers an Elastic IP address with a specified stack. An address can be +// registered with only one stack at a time. If the address is already registered, +// you must first deregister it by calling DeregisterElasticIp. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterElasticIp(input *RegisterElasticIpInput) (*RegisterElasticIpOutput, error) { + req, out := c.RegisterElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opRegisterInstance = "RegisterInstance" + +// RegisterInstanceRequest generates a request for the RegisterInstance operation. +func (c *OpsWorks) RegisterInstanceRequest(input *RegisterInstanceInput) (req *request.Request, output *RegisterInstanceOutput) { + op := &request.Operation{ + Name: opRegisterInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterInstanceOutput{} + req.Data = output + return +} + +// Registers instances with a specified stack that were created outside of AWS +// OpsWorks. +// +// We do not recommend using this action to register instances. The complete +// registration operation has two primary steps, installing the AWS OpsWorks +// agent on the instance and registering the instance with the stack. RegisterInstance +// handles only the second step. You should instead use the AWS CLI register +// command, which performs the entire registration operation. For more information, +// see Registering an Instance with an AWS OpsWorks Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterInstance(input *RegisterInstanceInput) (*RegisterInstanceOutput, error) { + req, out := c.RegisterInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterRdsDbInstance = "RegisterRdsDbInstance" + +// RegisterRdsDbInstanceRequest generates a request for the RegisterRdsDbInstance operation. +func (c *OpsWorks) RegisterRdsDbInstanceRequest(input *RegisterRdsDbInstanceInput) (req *request.Request, output *RegisterRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opRegisterRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterRdsDbInstanceOutput{} + req.Data = output + return +} + +// Registers an Amazon RDS instance with a stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterRdsDbInstance(input *RegisterRdsDbInstanceInput) (*RegisterRdsDbInstanceOutput, error) { + req, out := c.RegisterRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterVolume = "RegisterVolume" + +// RegisterVolumeRequest generates a request for the RegisterVolume operation. +func (c *OpsWorks) RegisterVolumeRequest(input *RegisterVolumeInput) (req *request.Request, output *RegisterVolumeOutput) { + op := &request.Operation{ + Name: opRegisterVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterVolumeOutput{} + req.Data = output + return +} + +// Registers an Amazon EBS volume with a specified stack. A volume can be registered +// with only one stack at a time. If the volume is already registered, you must +// first deregister it by calling DeregisterVolume. For more information, see +// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterVolume(input *RegisterVolumeInput) (*RegisterVolumeOutput, error) { + req, out := c.RegisterVolumeRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBasedAutoScaling = "SetLoadBasedAutoScaling" + +// SetLoadBasedAutoScalingRequest generates a request for the SetLoadBasedAutoScaling operation. +func (c *OpsWorks) SetLoadBasedAutoScalingRequest(input *SetLoadBasedAutoScalingInput) (req *request.Request, output *SetLoadBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opSetLoadBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBasedAutoScalingOutput{} + req.Data = output + return +} + +// Specify the load-based auto scaling configuration for a specified layer. +// For more information, see Managing Load with Time-based and Load-based Instances +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// +// To use load-based auto scaling, you must create a set of load-based auto +// scaling instances. Load-based auto scaling operates only on the instances +// from that set, so you must ensure that you have created enough instances +// to handle the maximum anticipated load. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetLoadBasedAutoScaling(input *SetLoadBasedAutoScalingInput) (*SetLoadBasedAutoScalingOutput, error) { + req, out := c.SetLoadBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opSetPermission = "SetPermission" + +// SetPermissionRequest generates a request for the SetPermission operation. +func (c *OpsWorks) SetPermissionRequest(input *SetPermissionInput) (req *request.Request, output *SetPermissionOutput) { + op := &request.Operation{ + Name: opSetPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetPermissionOutput{} + req.Data = output + return +} + +// Specifies a user's permissions. For more information, see Security and Permissions +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetPermission(input *SetPermissionInput) (*SetPermissionOutput, error) { + req, out := c.SetPermissionRequest(input) + err := req.Send() + return out, err +} + +const opSetTimeBasedAutoScaling = "SetTimeBasedAutoScaling" + +// SetTimeBasedAutoScalingRequest generates a request for the SetTimeBasedAutoScaling operation. +func (c *OpsWorks) SetTimeBasedAutoScalingRequest(input *SetTimeBasedAutoScalingInput) (req *request.Request, output *SetTimeBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opSetTimeBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTimeBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &SetTimeBasedAutoScalingOutput{} + req.Data = output + return +} + +// Specify the time-based auto scaling configuration for a specified instance. +// For more information, see Managing Load with Time-based and Load-based Instances +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetTimeBasedAutoScaling(input *SetTimeBasedAutoScalingInput) (*SetTimeBasedAutoScalingOutput, error) { + req, out := c.SetTimeBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opStartInstance = "StartInstance" + +// StartInstanceRequest generates a request for the StartInstance operation. +func (c *OpsWorks) StartInstanceRequest(input *StartInstanceInput) (req *request.Request, output *StartInstanceOutput) { + op := &request.Operation{ + Name: opStartInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &StartInstanceOutput{} + req.Data = output + return +} + +// Starts a specified instance. For more information, see Starting, Stopping, +// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StartInstance(input *StartInstanceInput) (*StartInstanceOutput, error) { + req, out := c.StartInstanceRequest(input) + err := req.Send() + return out, err +} + +const opStartStack = "StartStack" + +// StartStackRequest generates a request for the StartStack operation. +func (c *OpsWorks) StartStackRequest(input *StartStackInput) (req *request.Request, output *StartStackOutput) { + op := &request.Operation{ + Name: opStartStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartStackInput{} + } + + req = c.newRequest(op, input, output) + output = &StartStackOutput{} + req.Data = output + return +} + +// Starts a stack's instances. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StartStack(input *StartStackInput) (*StartStackOutput, error) { + req, out := c.StartStackRequest(input) + err := req.Send() + return out, err +} + +const opStopInstance = "StopInstance" + +// StopInstanceRequest generates a request for the StopInstance operation. +func (c *OpsWorks) StopInstanceRequest(input *StopInstanceInput) (req *request.Request, output *StopInstanceOutput) { + op := &request.Operation{ + Name: opStopInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &StopInstanceOutput{} + req.Data = output + return +} + +// Stops a specified instance. When you stop a standard instance, the data disappears +// and must be reinstalled when you restart the instance. You can stop an Amazon +// EBS-backed instance without losing data. For more information, see Starting, +// Stopping, and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StopInstance(input *StopInstanceInput) (*StopInstanceOutput, error) { + req, out := c.StopInstanceRequest(input) + err := req.Send() + return out, err +} + +const opStopStack = "StopStack" + +// StopStackRequest generates a request for the StopStack operation. +func (c *OpsWorks) StopStackRequest(input *StopStackInput) (req *request.Request, output *StopStackOutput) { + op := &request.Operation{ + Name: opStopStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopStackInput{} + } + + req = c.newRequest(op, input, output) + output = &StopStackOutput{} + req.Data = output + return +} + +// Stops a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StopStack(input *StopStackInput) (*StopStackOutput, error) { + req, out := c.StopStackRequest(input) + err := req.Send() + return out, err +} + +const opUnassignInstance = "UnassignInstance" + +// UnassignInstanceRequest generates a request for the UnassignInstance operation. +func (c *OpsWorks) UnassignInstanceRequest(input *UnassignInstanceInput) (req *request.Request, output *UnassignInstanceOutput) { + op := &request.Operation{ + Name: opUnassignInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UnassignInstanceOutput{} + req.Data = output + return +} + +// Unassigns a registered instance from all of it's layers. The instance remains +// in the stack as an unassigned instance and can be assigned to another layer, +// as needed. You cannot use this action with instances that were created with +// AWS OpsWorks. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UnassignInstance(input *UnassignInstanceInput) (*UnassignInstanceOutput, error) { + req, out := c.UnassignInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUnassignVolume = "UnassignVolume" + +// UnassignVolumeRequest generates a request for the UnassignVolume operation. +func (c *OpsWorks) UnassignVolumeRequest(input *UnassignVolumeInput) (req *request.Request, output *UnassignVolumeOutput) { + op := &request.Operation{ + Name: opUnassignVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &UnassignVolumeOutput{} + req.Data = output + return +} + +// Unassigns an assigned Amazon EBS volume. The volume remains registered with +// the stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UnassignVolume(input *UnassignVolumeInput) (*UnassignVolumeOutput, error) { + req, out := c.UnassignVolumeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApp = "UpdateApp" + +// UpdateAppRequest generates a request for the UpdateApp operation. +func (c *OpsWorks) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, output *UpdateAppOutput) { + op := &request.Operation{ + Name: opUpdateApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAppInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAppOutput{} + req.Data = output + return +} + +// Updates a specified app. +// +// Required Permissions: To use this action, an IAM user must have a Deploy +// or Manage permissions level for the stack, or an attached policy that explicitly +// grants permissions. For more information on user permissions, see Managing +// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateApp(input *UpdateAppInput) (*UpdateAppOutput, error) { + req, out := c.UpdateAppRequest(input) + err := req.Send() + return out, err +} + +const opUpdateElasticIp = "UpdateElasticIp" + +// UpdateElasticIpRequest generates a request for the UpdateElasticIp operation. +func (c *OpsWorks) UpdateElasticIpRequest(input *UpdateElasticIpInput) (req *request.Request, output *UpdateElasticIpOutput) { + op := &request.Operation{ + Name: opUpdateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateElasticIpOutput{} + req.Data = output + return +} + +// Updates a registered Elastic IP address's name. For more information, see +// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateElasticIp(input *UpdateElasticIpInput) (*UpdateElasticIpOutput, error) { + req, out := c.UpdateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opUpdateInstance = "UpdateInstance" + +// UpdateInstanceRequest generates a request for the UpdateInstance operation. +func (c *OpsWorks) UpdateInstanceRequest(input *UpdateInstanceInput) (req *request.Request, output *UpdateInstanceOutput) { + op := &request.Operation{ + Name: opUpdateInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateInstanceOutput{} + req.Data = output + return +} + +// Updates a specified instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateInstance(input *UpdateInstanceInput) (*UpdateInstanceOutput, error) { + req, out := c.UpdateInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateLayer = "UpdateLayer" + +// UpdateLayerRequest generates a request for the UpdateLayer operation. +func (c *OpsWorks) UpdateLayerRequest(input *UpdateLayerInput) (req *request.Request, output *UpdateLayerOutput) { + op := &request.Operation{ + Name: opUpdateLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateLayerOutput{} + req.Data = output + return +} + +// Updates a specified layer. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateLayer(input *UpdateLayerInput) (*UpdateLayerOutput, error) { + req, out := c.UpdateLayerRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMyUserProfile = "UpdateMyUserProfile" + +// UpdateMyUserProfileRequest generates a request for the UpdateMyUserProfile operation. +func (c *OpsWorks) UpdateMyUserProfileRequest(input *UpdateMyUserProfileInput) (req *request.Request, output *UpdateMyUserProfileOutput) { + op := &request.Operation{ + Name: opUpdateMyUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMyUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateMyUserProfileOutput{} + req.Data = output + return +} + +// Updates a user's SSH public key. +// +// Required Permissions: To use this action, an IAM user must have self-management +// enabled or an attached policy that explicitly grants permissions. For more +// information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateMyUserProfile(input *UpdateMyUserProfileInput) (*UpdateMyUserProfileOutput, error) { + req, out := c.UpdateMyUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRdsDbInstance = "UpdateRdsDbInstance" + +// UpdateRdsDbInstanceRequest generates a request for the UpdateRdsDbInstance operation. +func (c *OpsWorks) UpdateRdsDbInstanceRequest(input *UpdateRdsDbInstanceInput) (req *request.Request, output *UpdateRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opUpdateRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRdsDbInstanceOutput{} + req.Data = output + return +} + +// Updates an Amazon RDS instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateRdsDbInstance(input *UpdateRdsDbInstanceInput) (*UpdateRdsDbInstanceOutput, error) { + req, out := c.UpdateRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStack = "UpdateStack" + +// UpdateStackRequest generates a request for the UpdateStack operation. +func (c *OpsWorks) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { + op := &request.Operation{ + Name: opUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateStackOutput{} + req.Data = output + return +} + +// Updates a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUserProfile = "UpdateUserProfile" + +// UpdateUserProfileRequest generates a request for the UpdateUserProfile operation. +func (c *OpsWorks) UpdateUserProfileRequest(input *UpdateUserProfileInput) (req *request.Request, output *UpdateUserProfileOutput) { + op := &request.Operation{ + Name: opUpdateUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateUserProfileOutput{} + req.Data = output + return +} + +// Updates a specified user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateUserProfile(input *UpdateUserProfileInput) (*UpdateUserProfileOutput, error) { + req, out := c.UpdateUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateVolume = "UpdateVolume" + +// UpdateVolumeRequest generates a request for the UpdateVolume operation. +func (c *OpsWorks) UpdateVolumeRequest(input *UpdateVolumeInput) (req *request.Request, output *UpdateVolumeOutput) { + op := &request.Operation{ + Name: opUpdateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateVolumeOutput{} + req.Data = output + return +} + +// Updates an Amazon EBS volume's name or mount point. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateVolume(input *UpdateVolumeInput) (*UpdateVolumeOutput, error) { + req, out := c.UpdateVolumeRequest(input) + err := req.Send() + return out, err +} + +// Describes an agent version. +type AgentVersion struct { + _ struct{} `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The agent version. + Version *string `type:"string"` +} + +// String returns the string representation +func (s AgentVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentVersion) GoString() string { + return s.String() +} + +// A description of the app. +type App struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` + + // A Source object that describes the app repository. + AppSource *Source `type:"structure"` + + // The stack attributes. + Attributes map[string]*string `type:"map"` + + // When the app was created. + CreatedAt *string `type:"string"` + + // The app's data sources. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app vhost settings with multiple domains separated by commas. For example: + // 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether to enable SSL for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instances. For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases, but if you do exceed + // it, you will cause an exception (API) with an "Environment: is too large + // (maximum is 10KB)" message. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string"` + + // The app's short name. + Shortname *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The app stack ID. + StackId *string `type:"string"` + + // The app type. + Type *string `type:"string" enum:"AppType"` +} + +// String returns the string representation +func (s App) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s App) GoString() string { + return s.String() +} + +type AssignInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The layer ID, which must correspond to a custom layer. You cannot assign + // a registered instance to a built-in layer. + LayerIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AssignInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignInstanceInput) GoString() string { + return s.String() +} + +type AssignInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignInstanceOutput) GoString() string { + return s.String() +} + +type AssignVolumeInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssignVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignVolumeInput) GoString() string { + return s.String() +} + +type AssignVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignVolumeOutput) GoString() string { + return s.String() +} + +type AssociateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s AssociateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateElasticIpInput) GoString() string { + return s.String() +} + +type AssociateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateElasticIpOutput) GoString() string { + return s.String() +} + +type AttachElasticLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string" required:"true"` + + // The ID of the layer that the Elastic Load Balancing instance is to be attached + // to. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachElasticLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachElasticLoadBalancerInput) GoString() string { + return s.String() +} + +type AttachElasticLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachElasticLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachElasticLoadBalancerOutput) GoString() string { + return s.String() +} + +// Describes a load-based auto scaling upscaling or downscaling threshold configuration, +// which specifies when AWS OpsWorks starts or stops load-based instances. +type AutoScalingThresholds struct { + _ struct{} `type:"structure"` + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter + // takes a list of up to five alarm names, which are case sensitive and must + // be in the same region as the stack. + // + // To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. + // You can either have AWS OpsWorks update the role for you when you first use + // this feature or you can edit the role manually. For more information, see + // Allowing AWS OpsWorks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). + Alarms []*string `type:"list"` + + // The CPU utilization threshold, as a percent of the available CPU. A value + // of -1 disables the threshold. + CpuThreshold *float64 `type:"double"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks + // should ignore metrics and suppress additional scaling events. For example, + // AWS OpsWorks adds new instances following an upscaling event but the instances + // won't start reducing the load until they have been booted and configured. + // There is no point in raising additional scaling events during that operation, + // which typically takes several minutes. IgnoreMetricsTime allows you to direct + // AWS OpsWorks to suppress scaling events long enough to get the new instances + // online. + IgnoreMetricsTime *int64 `min:"1" type:"integer"` + + // The number of instances to add or remove when the load exceeds a threshold. + InstanceCount *int64 `type:"integer"` + + // The load threshold. A value of -1 disables the threshold. For more information + // about how load is computed, see Load (computing) (http://en.wikipedia.org/wiki/Load_%28computing%29). + LoadThreshold *float64 `type:"double"` + + // The memory utilization threshold, as a percent of the available memory. A + // value of -1 disables the threshold. + MemoryThreshold *float64 `type:"double"` + + // The amount of time, in minutes, that the load must exceed a threshold before + // more instances are added or removed. + ThresholdsWaitTime *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s AutoScalingThresholds) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingThresholds) GoString() string { + return s.String() +} + +// Describes a block device mapping. This data type maps directly to the Amazon +// EC2 BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) +// data type. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name that is exposed to the instance, such as /dev/sdh. For the + // root device, you can use the explicit device name or you can set this parameter + // to ROOT_DEVICE and AWS OpsWorks will provide the correct device name. + DeviceName *string `type:"string"` + + // An EBSBlockDevice that defines how to configure an Amazon EBS volume when + // the instance is launched. + Ebs *EbsBlockDevice `type:"structure"` + + // Suppresses the specified device included in the AMI's block device mapping. + NoDevice *string `type:"string"` + + // The virtual device name. For more information, see BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes the Chef configuration. +type ChefConfiguration struct { + _ struct{} `type:"structure"` + + // The Berkshelf version. + BerkshelfVersion *string `type:"string"` + + // Whether to enable Berkshelf. + ManageBerkshelf *bool `type:"boolean"` +} + +// String returns the string representation +func (s ChefConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChefConfiguration) GoString() string { + return s.String() +} + +type CloneStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. Fixed version - Set this parameter to your preferred agent version. + // To update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. The default setting is LATEST. To specify an agent version, + // you must use the complete version number, not the abbreviated number shown + // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // A list of stack attributes and values as key/value pairs to be added to the + // cloned stack. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // A list of source stack app IDs to be included in the cloned stack. + CloneAppIds []*string `type:"list"` + + // Whether to clone the source stack's permissions. + ClonePermissions *bool `type:"boolean"` + + // The configuration manager. When you clone a stack we recommend that you use + // the configuration manager to specify the Chef version: 12, 11.10, or 11.4 + // for Linux stacks, or 12.2 for Windows stacks. The default value for Linux + // stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It is used to override + // the corresponding default stack configuration JSON values. The string should + // be in the following format and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html) + CustomJson *string `type:"string"` + + // The cloned stack's default Availability Zone, which must be in the specified + // region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see the VpcId parameter description. + DefaultAvailabilityZone *string `type:"string"` + + // The Amazon Resource Name (ARN) of an IAM profile that is the default profile + // for all of the stack's EC2 instances. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // the custom AMI you want to use when you create instances. For more information + // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // The default option is the parent stack's operating system. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // You can specify a different Linux operating system for the cloned stack, + // but you cannot change from Linux to Windows or Windows to Linux. + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the cloned stack, but you can override it when you create an instance. + // For more information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair name. The default value is none. If you specify + // a key pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's host name theme, with spaces are replaced by underscores. The + // theme is used to generate host names for the stack's instances. By default, + // HostnameTheme is set to Layer_Dependent, which creates host names by appending + // integers to the layer's short name. The other themes are: + // + // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan + // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The cloned stack name. + Name *string `type:"string"` + + // The cloned stack AWS region, such as "us-east-1". For more information about + // AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The stack AWS Identity and Access Management (IAM) role, which allows AWS + // OpsWorks to work with AWS resources on your behalf. You must set this parameter + // to the Amazon Resource Name (ARN) for an existing IAM role. If you create + // a stack by using the AWS OpsWorks console, it creates the role for you. You + // can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // + // You must set this parameter to a valid service role ARN or the action will + // fail; there is no default value. You can specify the source stack's service + // role ARN, if you prefer, but you must do so explicitly. + ServiceRoleArn *string `type:"string" required:"true"` + + // The source stack ID. + SourceStackId *string `type:"string" required:"true"` + + // Whether to use custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // you can instead provide your own custom security groups. UseOpsworksSecurityGroups + // has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in security + // group with each layer (default setting). You can associate additional security + // groups with a layer after you create it but you cannot delete the built-in + // security group. False - AWS OpsWorks does not associate built-in security + // groups with layers. You must create appropriate Amazon Elastic Compute Cloud + // (Amazon EC2) security groups and associate a security group with each layer + // that you create. However, you can still manually associate a built-in security + // group with a layer on creation; custom security groups are required only + // for those layers that need custom settings. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The ID of the VPC that the cloned stack is to be launched into. It must be + // in the specified region. All instances are launched into this VPC, and you + // cannot change the ID later. + // + // If your account supports EC2 Classic, the default value is no VPC. If your + // account does not support EC2 Classic, the default value is the default VPC + // for the specified region. If the VPC ID corresponds to a default VPC and + // you have specified either the DefaultAvailabilityZone or the DefaultSubnetId + // parameter only, AWS OpsWorks infers the value of the other parameter. If + // you specify neither parameter, AWS OpsWorks sets these parameters to the + // first valid Availability Zone for the specified region and the corresponding + // default VPC subnet ID, respectively. + // + // If you specify a nondefault VPC ID, note the following: + // + // It must belong to a VPC in your account that is in the specified region. + // You must specify a value for DefaultSubnetId. For more information on how + // to use AWS OpsWorks with a VPC, see Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on default VPC and EC2 Classic, see Supported Platforms + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CloneStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneStackInput) GoString() string { + return s.String() +} + +// Contains the response to a CloneStack request. +type CloneStackOutput struct { + _ struct{} `type:"structure"` + + // The cloned stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CloneStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneStackOutput) GoString() string { + return s.String() +} + +// Describes a command. +type Command struct { + _ struct{} `type:"structure"` + + // Date and time when the command was acknowledged. + AcknowledgedAt *string `type:"string"` + + // The command ID. + CommandId *string `type:"string"` + + // Date when the command completed. + CompletedAt *string `type:"string"` + + // Date and time when the command was run. + CreatedAt *string `type:"string"` + + // The command deployment ID. + DeploymentId *string `type:"string"` + + // The command exit code. + ExitCode *int64 `type:"integer"` + + // The ID of the instance where the command was executed. + InstanceId *string `type:"string"` + + // The URL of the command log. + LogUrl *string `type:"string"` + + // The command status: + // + // failed successful skipped pending + Status *string `type:"string"` + + // The command type: + // + // deploy rollback start stop restart undeploy update_dependencies + // install_dependencies update_custom_cookbooks execute_recipes + Type *string `type:"string"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +type CreateAppInput struct { + _ struct{} `type:"structure"` + + // A Source object that specifies the app repository. + AppSource *Source `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // The app's data source. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app virtual host settings, with multiple domains separated by commas. + // For example: 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether to enable SSL for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instance. For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases. Exceeding it will + // cause an exception with the message, "Environment: is too large (maximum + // is 10KB)." + // + // This parameter is supported only by Chef 11.10 stacks. If you have specified + // one or more environment variables, you cannot modify the stack's Chef version. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string" required:"true"` + + // The app's short name. + Shortname *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // The app type. Each supported type is associated with a particular layer. + // For example, PHP applications are associated with a PHP layer. AWS OpsWorks + // deploys an application to those instances that are members of the corresponding + // layer. If your app isn't one of the standard types, or you prefer to implement + // your own Deploy recipes, specify other. + Type *string `type:"string" required:"true" enum:"AppType"` +} + +// String returns the string representation +func (s CreateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateApp request. +type CreateAppOutput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` +} + +// String returns the string representation +func (s CreateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppOutput) GoString() string { + return s.String() +} + +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The app ID. This parameter is required for app deployments, but not for other + // deployment commands. + AppId *string `type:"string"` + + // A DeploymentCommand object that specifies the deployment command and any + // associated arguments. + Command *DeploymentCommand `type:"structure" required:"true"` + + // A user-defined comment. + Comment *string `type:"string"` + + // A string that contains user-defined, custom JSON. It is used to override + // the corresponding default stack configuration JSON values. The string should + // be in the following format and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The instance IDs for the deployment targets. + InstanceIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateDeployment request. +type CreateDeploymentOutput struct { + _ struct{} `type:"structure"` + + // The deployment ID, which can be used with other requests to identify the + // deployment. + DeploymentId *string `type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentOutput) GoString() string { + return s.String() +} + +type CreateInstanceInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // INHERIT - Use the stack's default agent version setting. version_number + // - Use the specified agent version. This value overrides the stack's default + // setting. To update the agent version, edit the instance configuration and + // specify a new version. AWS OpsWorks then automatically installs that version + // on the instance. The default setting is INHERIT. To specify an agent version, + // you must use the complete version number, not the abbreviated number shown + // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. The AMI should be based + // on one of the supported operating systems. For more information, see Using + // Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // If you specify a custom AMI, you must set Os to Custom. + AmiId *string `type:"string"` + + // The instance architecture. The default option is x86_64. Instance types do + // not necessarily support both architectures. For a list of the architectures + // that are supported by the different instance types, see Instance Families + // and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. Windows stacks can use + // only time-based instances. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // The instance Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // An array of BlockDeviceMapping objects that specify the instance's block + // devices. For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html). + // Note that block device mappings are not supported for custom AMIs. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // Whether to create an Amazon EBS-optimized instance. + EbsOptimized *bool `type:"boolean"` + + // The instance host name. + Hostname *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance type, such as t2.micro. For a list of supported instance types, + // open the stack in the console, choose Instances, and choose + Instance. The + // Size list contains the currently supported types. For more information, see + // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // The parameter values that you use to specify the various types are in the + // API Name column of the Available Instance Types table. + InstanceType *string `type:"string" required:"true"` + + // An array that contains the instance's layer IDs. + LayerIds []*string `type:"list" required:"true"` + + // The instance's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more + // information on the supported operating systems, see AWS OpsWorks Operating + // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // The default option is the current Amazon Linux version. If you set this + // parameter to Custom, you must use the CreateInstance action's AmiId parameter + // to specify the custom AMI that you want to use. Block device mappings are + // not supported if the value is Custom. For more information on the supported + // operating systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For + // more information on how to use custom AMIs with AWS OpsWorks, see Using Custom + // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + Os *string `type:"string"` + + // The instance root device type. For more information, see Storage for the + // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + RootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // The instance's Amazon EC2 key-pair name. + SshKeyName *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // The ID of the instance's subnet. If the stack is running in a VPC, you can + // use this parameter to override the stack's default subnet ID value and direct + // AWS OpsWorks to launch the instance in a different subnet. + SubnetId *string `type:"string"` + + // The instance's virtualization type, paravirtual or hvm. + VirtualizationType *string `type:"string"` +} + +// String returns the string representation +func (s CreateInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateInstance request. +type CreateInstanceOutput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s CreateInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceOutput) GoString() string { + return s.String() +} + +type CreateLayerInput struct { + _ struct{} `type:"structure"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + // + // To create a cluster layer, set the EcsClusterArn attribute to the cluster's + // ARN. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // The ARN of an IAM profile to be used for the layer's EC2 instances. For more + // information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON-formatted string containing custom stack configuration and deployment + // attributes to be installed on the layer's instances. For more information, + // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + // This feature is supported as of version 1.7.42 of the AWS CLI. + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // Whether to disable auto healing for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // To ensure that your instances have the latest security updates, we strongly + // recommend using the default value of true. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // A LifeCycleEventConfiguration object that you can use to configure the Shutdown + // event to specify an execution timeout and enable or disable Elastic Load + // Balancer connection draining. + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name, which is used by the console. + Name *string `type:"string" required:"true"` + + // An array of Package objects that describes the layer packages. + Packages []*string `type:"list"` + + // For custom layers only, use this parameter to specify the layer's short name, + // which is used internally by AWS OpsWorks and by Chef recipes. The short name + // is also used as the name for the directory where your app files are installed. + // It can have a maximum of 200 characters, which are limited to the alphanumeric + // characters, '-', '_', and '.'. + // + // The built-in layers' short names are defined by AWS OpsWorks. For more information, + // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html). + Shortname *string `type:"string" required:"true"` + + // The layer stack ID. + StackId *string `type:"string" required:"true"` + + // The layer type. A stack cannot have more than one built-in layer of the same + // type. It can have any number of custom layers. + Type *string `type:"string" required:"true" enum:"LayerType"` + + // Whether to use Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s CreateLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLayerInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateLayer request. +type CreateLayerOutput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string"` +} + +// String returns the string representation +func (s CreateLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLayerOutput) GoString() string { + return s.String() +} + +type CreateStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. Fixed version - Set this parameter to your preferred agent version. + // To update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. The default setting is the most recent release of the + // agent. To specify an agent version, you must use the complete version number, + // not the abbreviated number shown on the console. For a list of available + // agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. When you create a stack we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It can be used to override + // the corresponding default stack configuration attribute values or to pass + // data to recipes. The string should be in the following escape characters + // such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone, which must be in the specified region. + // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see the VpcId parameter description. + DefaultAvailabilityZone *string `type:"string"` + + // The Amazon Resource Name (ARN) of an IAM profile that is the default profile + // for all of the stack's EC2 instances. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string" required:"true"` + + // The stack's default operating system, which is installed on every instance + // unless you specify a different operating system when you create the instance. + // You can specify one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // the custom AMI you want to use when you create instances. For more information, + // see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // The default option is the current Amazon Linux version. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + DefaultOs *string `type:"string"` + + // The default root device type. This value is the default for all instances + // in the stack, but you can override it when you create an instance. The default + // option is instance-store. For more information, see Storage for the Root + // Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair name. The default value is none. If you specify + // a key pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's host name theme, with spaces replaced by underscores. The theme + // is used to generate host names for the stack's instances. By default, HostnameTheme + // is set to Layer_Dependent, which creates host names by appending integers + // to the layer's short name. The other themes are: + // + // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan + // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The stack name. + Name *string `type:"string" required:"true"` + + // The stack's AWS region, such as "us-east-1". For more information about Amazon + // regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string" required:"true"` + + // The stack's AWS Identity and Access Management (IAM) role, which allows AWS + // OpsWorks to work with AWS resources on your behalf. You must set this parameter + // to the Amazon Resource Name (ARN) for an existing IAM role. For more information + // about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + ServiceRoleArn *string `type:"string" required:"true"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // you can instead provide your own custom security groups. UseOpsworksSecurityGroups + // has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in security + // group with each layer (default setting). You can associate additional security + // groups with a layer after you create it, but you cannot delete the built-in + // security group. False - AWS OpsWorks does not associate built-in security + // groups with layers. You must create appropriate EC2 security groups and associate + // a security group with each layer that you create. However, you can still + // manually associate a built-in security group with a layer on creation; custom + // security groups are required only for those layers that need custom settings. + // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The ID of the VPC that the stack is to be launched into. The VPC must be + // in the stack's region. All instances are launched into this VPC. You cannot + // change the ID later. + // + // If your account supports EC2-Classic, the default value is no VPC. If your + // account does not support EC2-Classic, the default value is the default VPC + // for the specified region. If the VPC ID corresponds to a default VPC and + // you have specified either the DefaultAvailabilityZone or the DefaultSubnetId + // parameter only, AWS OpsWorks infers the value of the other parameter. If + // you specify neither parameter, AWS OpsWorks sets these parameters to the + // first valid Availability Zone for the specified region and the corresponding + // default VPC subnet ID, respectively. + // + // If you specify a nondefault VPC ID, note the following: + // + // It must belong to a VPC in your account that is in the specified region. + // You must specify a value for DefaultSubnetId. For more information on how + // to use AWS OpsWorks with a VPC, see Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on default VPC and EC2-Classic, see Supported Platforms + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateStack request. +type CreateStackOutput struct { + _ struct{} `type:"structure"` + + // The stack ID, which is an opaque string that you use to identify the stack + // when performing actions such as DescribeStacks. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackOutput) GoString() string { + return s.String() +} + +type CreateUserProfileInput struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Setting an IAM User's Public SSH Key (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's public SSH key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], + // '-', and '_'. If the specified name includes other punctuation marks, AWS + // OpsWorks removes them. For example, my.name will be changed to myname. If + // you do not specify an SSH user name, AWS OpsWorks generates one from the + // IAM user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s CreateUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateUserProfile request. +type CreateUserProfileOutput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserProfileOutput) GoString() string { + return s.String() +} + +// Describes an app's data source. +type DataSource struct { + _ struct{} `type:"structure"` + + // The data source's ARN. + Arn *string `type:"string"` + + // The database name. + DatabaseName *string `type:"string"` + + // The data source's type, AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, + // or RdsDbInstance. + Type *string `type:"string"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +type DeleteAppInput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppInput) GoString() string { + return s.String() +} + +type DeleteAppOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppOutput) GoString() string { + return s.String() +} + +type DeleteInstanceInput struct { + _ struct{} `type:"structure"` + + // Whether to delete the instance Elastic IP address. + DeleteElasticIp *bool `type:"boolean"` + + // Whether to delete the instance's Amazon EBS volumes. + DeleteVolumes *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceInput) GoString() string { + return s.String() +} + +type DeleteInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceOutput) GoString() string { + return s.String() +} + +type DeleteLayerInput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLayerInput) GoString() string { + return s.String() +} + +type DeleteLayerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLayerOutput) GoString() string { + return s.String() +} + +type DeleteStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInput) GoString() string { + return s.String() +} + +type DeleteStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackOutput) GoString() string { + return s.String() +} + +type DeleteUserProfileInput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserProfileInput) GoString() string { + return s.String() +} + +type DeleteUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserProfileOutput) GoString() string { + return s.String() +} + +// Describes a deployment of a stack or app. +type Deployment struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` + + // Used to specify a stack or deployment command. + Command *DeploymentCommand `type:"structure"` + + // A user-defined comment. + Comment *string `type:"string"` + + // Date when the deployment completed. + CompletedAt *string `type:"string"` + + // Date when the deployment was created. + CreatedAt *string `type:"string"` + + // A string that contains user-defined custom JSON. It can be used to override + // the corresponding default stack configuration attribute values for stack + // or to pass data to recipes. The string should be in the following format + // and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The deployment ID. + DeploymentId *string `type:"string"` + + // The deployment duration. + Duration *int64 `type:"integer"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The IDs of the target instances. + InstanceIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` + + // The deployment status: + // + // running successful failed + Status *string `type:"string"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Used to specify a stack or deployment command. +type DeploymentCommand struct { + _ struct{} `type:"structure"` + + // The arguments of those commands that take arguments. It should be set to + // a JSON object with the following format: + // + // {"arg_name1" : ["value1", "value2", ...], "arg_name2" : ["value1", "value2", + // ...], ...} + // + // The update_dependencies command takes two arguments: + // + // upgrade_os_to - Specifies the desired Amazon Linux version for instances + // whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also + // set the allow_reboot argument to true. allow_reboot - Specifies whether to + // allow AWS OpsWorks to reboot the instances if necessary, after installing + // the updates. This argument can be set to either true or false. The default + // value is false. For example, to upgrade an instance to Amazon Linux 2014.09, + // set Args to the following. + // + // { "upgrade_os_to":["Amazon Linux 2014.09"], "allow_reboot":["true"] } + Args map[string][]*string `type:"map"` + + // Specifies the operation. You can specify only one command. + // + // For stacks, the following commands are available: + // + // execute_recipes: Execute one or more recipes. To specify the recipes, set + // an Args parameter named recipes to the list of recipes to be executed. For + // example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. + // install_dependencies: Install the stack's dependencies. update_custom_cookbooks: + // Update the stack's custom cookbooks. update_dependencies: Update the stack's + // dependencies. The update_dependencies and install_dependencies commands + // are supported only for Linux instances. You can run the commands successfully + // on Windows instances, but they do nothing. For apps, the following commands + // are available: + // + // deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter + // named migrate. Set Args to {"migrate":["true"]} to migrate the database. + // The default setting is {"migrate":["false"]}. rollback Roll the app back + // to the previous version. When you update an app, AWS OpsWorks stores the + // previous version, up to a maximum of five versions. You can use this command + // to roll an app back as many as four versions. start: Start the app's web + // or application server. stop: Stop the app's web or application server. restart: + // Restart the app's web or application server. undeploy: Undeploy the app. + Name *string `type:"string" required:"true" enum:"DeploymentCommandName"` +} + +// String returns the string representation +func (s DeploymentCommand) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentCommand) GoString() string { + return s.String() +} + +type DeregisterEcsClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterEcsClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEcsClusterInput) GoString() string { + return s.String() +} + +type DeregisterEcsClusterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterEcsClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEcsClusterOutput) GoString() string { + return s.String() +} + +type DeregisterElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterElasticIpInput) GoString() string { + return s.String() +} + +type DeregisterElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterElasticIpOutput) GoString() string { + return s.String() +} + +type DeregisterInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceInput) GoString() string { + return s.String() +} + +type DeregisterInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterRdsDbInstanceInput) GoString() string { + return s.String() +} + +type DeregisterRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterVolumeInput struct { + _ struct{} `type:"structure"` + + // The AWS OpsWorks volume ID, which is the GUID that AWS OpsWorks assigned + // to the instance when you registered the volume with the stack, not the Amazon + // EC2 volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterVolumeInput) GoString() string { + return s.String() +} + +type DeregisterVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterVolumeOutput) GoString() string { + return s.String() +} + +type DescribeAgentVersionsInput struct { + _ struct{} `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAgentVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentVersionsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeAgentVersions request. +type DescribeAgentVersionsOutput struct { + _ struct{} `type:"structure"` + + // The agent versions for the specified stack or configuration manager. Note + // that this value is the complete version number, not the abbreviated number + // used by the console. + AgentVersions []*AgentVersion `type:"list"` +} + +// String returns the string representation +func (s DescribeAgentVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentVersionsOutput) GoString() string { + return s.String() +} + +type DescribeAppsInput struct { + _ struct{} `type:"structure"` + + // An array of app IDs for the apps to be described. If you use this parameter, + // DescribeApps returns a description of the specified apps. Otherwise, it returns + // a description of every app. + AppIds []*string `type:"list"` + + // The app stack ID. If you use this parameter, DescribeApps returns a description + // of the apps in the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAppsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAppsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeApps request. +type DescribeAppsOutput struct { + _ struct{} `type:"structure"` + + // An array of App objects that describe the specified apps. + Apps []*App `type:"list"` +} + +// String returns the string representation +func (s DescribeAppsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAppsOutput) GoString() string { + return s.String() +} + +type DescribeCommandsInput struct { + _ struct{} `type:"structure"` + + // An array of command IDs. If you include this parameter, DescribeCommands + // returns a description of the specified commands. Otherwise, it returns a + // description of every command. + CommandIds []*string `type:"list"` + + // The deployment ID. If you include this parameter, DescribeCommands returns + // a description of the commands associated with the specified deployment. + DeploymentId *string `type:"string"` + + // The instance ID. If you include this parameter, DescribeCommands returns + // a description of the commands associated with the specified instance. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCommandsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommandsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeCommands request. +type DescribeCommandsOutput struct { + _ struct{} `type:"structure"` + + // An array of Command objects that describe each of the specified commands. + Commands []*Command `type:"list"` +} + +// String returns the string representation +func (s DescribeCommandsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommandsOutput) GoString() string { + return s.String() +} + +type DescribeDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The app ID. If you include this parameter, DescribeDeployments returns a + // description of the commands associated with the specified app. + AppId *string `type:"string"` + + // An array of deployment IDs to be described. If you include this parameter, + // DescribeDeployments returns a description of the specified deployments. Otherwise, + // it returns a description of every deployment. + DeploymentIds []*string `type:"list"` + + // The stack ID. If you include this parameter, DescribeDeployments returns + // a description of the commands associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeploymentsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeDeployments request. +type DescribeDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // An array of Deployment objects that describe the deployments. + Deployments []*Deployment `type:"list"` +} + +// String returns the string representation +func (s DescribeDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeploymentsOutput) GoString() string { + return s.String() +} + +type DescribeEcsClustersInput struct { + _ struct{} `type:"structure"` + + // A list of ARNs, one for each cluster to be described. + EcsClusterArns []*string `type:"list"` + + // To receive a paginated response, use this parameter to specify the maximum + // number of results to be returned with a single call. If the number of available + // results exceeds this maximum, the response includes a NextToken value that + // you can assign to the NextToken request parameter to get the next set of + // results. + MaxResults *int64 `type:"integer"` + + // If the previous paginated request did not return all of the remaining results, + // the response object'sNextToken parameter value is set to a token. To retrieve + // the next set of results, call DescribeEcsClusters again and assign that token + // to the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `type:"string"` + + // A stack ID. DescribeEcsClusters returns a description of the cluster that + // is registered with the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEcsClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEcsClustersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeEcsClusters request. +type DescribeEcsClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of EcsCluster objects containing the cluster descriptions. + EcsClusters []*EcsCluster `type:"list"` + + // If a paginated request does not return all of the remaining results, this + // parameter is set to a token that you can assign to the request object's NextToken + // parameter to retrieve the next set of results. If the previous paginated + // request returned all of the remaining results, this parameter is set to null. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEcsClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEcsClustersOutput) GoString() string { + return s.String() +} + +type DescribeElasticIpsInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you include this parameter, DescribeElasticIps returns + // a description of the Elastic IP addresses associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of Elastic IP addresses to be described. If you include this parameter, + // DescribeElasticIps returns a description of the specified Elastic IP addresses. + // Otherwise, it returns a description of every Elastic IP address. + Ips []*string `type:"list"` + + // A stack ID. If you include this parameter, DescribeElasticIps returns a description + // of the Elastic IP addresses that are registered with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticIpsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticIpsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeElasticIps request. +type DescribeElasticIpsOutput struct { + _ struct{} `type:"structure"` + + // An ElasticIps object that describes the specified Elastic IP addresses. + ElasticIps []*ElasticIp `type:"list"` +} + +// String returns the string representation +func (s DescribeElasticIpsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticIpsOutput) GoString() string { + return s.String() +} + +type DescribeElasticLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // A list of layer IDs. The action describes the Elastic Load Balancing instances + // for the specified layers. + LayerIds []*string `type:"list"` + + // A stack ID. The action describes the stack's Elastic Load Balancing instances. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticLoadBalancersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeElasticLoadBalancers request. +type DescribeElasticLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // A list of ElasticLoadBalancer objects that describe the specified Elastic + // Load Balancing instances. + ElasticLoadBalancers []*ElasticLoadBalancer `type:"list"` +} + +// String returns the string representation +func (s DescribeElasticLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // An array of instance IDs to be described. If you use this parameter, DescribeInstances + // returns a description of the specified instances. Otherwise, it returns a + // description of every instance. + InstanceIds []*string `type:"list"` + + // A layer ID. If you use this parameter, DescribeInstances returns descriptions + // of the instances associated with the specified layer. + LayerId *string `type:"string"` + + // A stack ID. If you use this parameter, DescribeInstances returns descriptions + // of the instances associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeInstances request. +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // An array of Instance objects that describe the instances. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +type DescribeLayersInput struct { + _ struct{} `type:"structure"` + + // An array of layer IDs that specify the layers to be described. If you omit + // this parameter, DescribeLayers returns a description of every layer in the + // specified stack. + LayerIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLayersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLayersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeLayers request. +type DescribeLayersOutput struct { + _ struct{} `type:"structure"` + + // An array of Layer objects that describe the layers. + Layers []*Layer `type:"list"` +} + +// String returns the string representation +func (s DescribeLayersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLayersOutput) GoString() string { + return s.String() +} + +type DescribeLoadBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An array of layer IDs. + LayerIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeLoadBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeLoadBasedAutoScaling request. +type DescribeLoadBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // An array of LoadBasedAutoScalingConfiguration objects that describe each + // layer's configuration. + LoadBasedAutoScalingConfigurations []*LoadBasedAutoScalingConfiguration `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type DescribeMyUserProfileInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeMyUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMyUserProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeMyUserProfile request. +type DescribeMyUserProfileOutput struct { + _ struct{} `type:"structure"` + + // A UserProfile object that describes the user's SSH information. + UserProfile *SelfUserProfile `type:"structure"` +} + +// String returns the string representation +func (s DescribeMyUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMyUserProfileOutput) GoString() string { + return s.String() +} + +type DescribePermissionsInput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + IamUserArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePermissionsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribePermissions request. +type DescribePermissionsOutput struct { + _ struct{} `type:"structure"` + + // An array of Permission objects that describe the stack permissions. + // + // If the request object contains only a stack ID, the array contains a Permission + // object with permissions for each of the stack IAM ARNs. If the request object + // contains only an IAM ARN, the array contains a Permission object with permissions + // for each of the user's stack IDs. If the request contains a stack ID and + // an IAM ARN, the array contains a single Permission object with permissions + // for the specified stack and IAM ARN. + Permissions []*Permission `type:"list"` +} + +// String returns the string representation +func (s DescribePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePermissionsOutput) GoString() string { + return s.String() +} + +type DescribeRaidArraysInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeRaidArrays returns descriptions + // of the RAID arrays associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of RAID array IDs. If you use this parameter, DescribeRaidArrays + // returns descriptions of the specified arrays. Otherwise, it returns a description + // of every array. + RaidArrayIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeRaidArraysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRaidArraysInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeRaidArrays request. +type DescribeRaidArraysOutput struct { + _ struct{} `type:"structure"` + + // A RaidArrays object that describes the specified RAID arrays. + RaidArrays []*RaidArray `type:"list"` +} + +// String returns the string representation +func (s DescribeRaidArraysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRaidArraysOutput) GoString() string { + return s.String() +} + +type DescribeRdsDbInstancesInput struct { + _ struct{} `type:"structure"` + + // An array containing the ARNs of the instances to be described. + RdsDbInstanceArns []*string `type:"list"` + + // The stack ID that the instances are registered with. The operation returns + // descriptions of all registered Amazon RDS instances. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRdsDbInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRdsDbInstancesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeRdsDbInstances request. +type DescribeRdsDbInstancesOutput struct { + _ struct{} `type:"structure"` + + // An a array of RdsDbInstance objects that describe the instances. + RdsDbInstances []*RdsDbInstance `type:"list"` +} + +// String returns the string representation +func (s DescribeRdsDbInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRdsDbInstancesOutput) GoString() string { + return s.String() +} + +type DescribeServiceErrorsInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeServiceErrors returns + // descriptions of the errors associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of service error IDs. If you use this parameter, DescribeServiceErrors + // returns descriptions of the specified errors. Otherwise, it returns a description + // of every error. + ServiceErrorIds []*string `type:"list"` + + // The stack ID. If you use this parameter, DescribeServiceErrors returns descriptions + // of the errors associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeServiceErrorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceErrorsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeServiceErrors request. +type DescribeServiceErrorsOutput struct { + _ struct{} `type:"structure"` + + // An array of ServiceError objects that describe the specified service errors. + ServiceErrors []*ServiceError `type:"list"` +} + +// String returns the string representation +func (s DescribeServiceErrorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceErrorsOutput) GoString() string { + return s.String() +} + +type DescribeStackProvisioningParametersInput struct { + _ struct{} `type:"structure"` + + // The stack ID + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackProvisioningParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackProvisioningParametersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeStackProvisioningParameters request. +type DescribeStackProvisioningParametersOutput struct { + _ struct{} `type:"structure"` + + // The AWS OpsWorks agent installer's URL. + AgentInstallerUrl *string `type:"string"` + + // An embedded object that contains the provisioning parameters. + Parameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s DescribeStackProvisioningParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackProvisioningParametersOutput) GoString() string { + return s.String() +} + +type DescribeStackSummaryInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSummaryInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeStackSummary request. +type DescribeStackSummaryOutput struct { + _ struct{} `type:"structure"` + + // A StackSummary object that contains the results. + StackSummary *StackSummary `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSummaryOutput) GoString() string { + return s.String() +} + +type DescribeStacksInput struct { + _ struct{} `type:"structure"` + + // An array of stack IDs that specify the stacks to be described. If you omit + // this parameter, DescribeStacks returns a description of every stack. + StackIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeStacks request. +type DescribeStacksOutput struct { + _ struct{} `type:"structure"` + + // An array of Stack objects that describe the stacks. + Stacks []*Stack `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksOutput) GoString() string { + return s.String() +} + +type DescribeTimeBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An array of instance IDs. + InstanceIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTimeBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeTimeBasedAutoScaling request. +type DescribeTimeBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // An array of TimeBasedAutoScalingConfiguration objects that describe the configuration + // for the specified instances. + TimeBasedAutoScalingConfigurations []*TimeBasedAutoScalingConfiguration `type:"list"` +} + +// String returns the string representation +func (s DescribeTimeBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type DescribeUserProfilesInput struct { + _ struct{} `type:"structure"` + + // An array of IAM user ARNs that identify the users to be described. + IamUserArns []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeUserProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserProfilesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeUserProfiles request. +type DescribeUserProfilesOutput struct { + _ struct{} `type:"structure"` + + // A Users object that describes the specified users. + UserProfiles []*UserProfile `type:"list"` +} + +// String returns the string representation +func (s DescribeUserProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserProfilesOutput) GoString() string { + return s.String() +} + +type DescribeVolumesInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeVolumes returns descriptions + // of the volumes associated with the specified instance. + InstanceId *string `type:"string"` + + // The RAID array ID. If you use this parameter, DescribeVolumes returns descriptions + // of the volumes associated with the specified RAID array. + RaidArrayId *string `type:"string"` + + // A stack ID. The action describes the stack's registered Amazon EBS volumes. + StackId *string `type:"string"` + + // Am array of volume IDs. If you use this parameter, DescribeVolumes returns + // descriptions of the specified volumes. Otherwise, it returns a description + // of every volume. + VolumeIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeVolumes request. +type DescribeVolumesOutput struct { + _ struct{} `type:"structure"` + + // An array of volume IDs. + Volumes []*Volume `type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +type DetachElasticLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string" required:"true"` + + // The ID of the layer that the Elastic Load Balancing instance is attached + // to. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachElasticLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachElasticLoadBalancerInput) GoString() string { + return s.String() +} + +type DetachElasticLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachElasticLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachElasticLoadBalancerOutput) GoString() string { + return s.String() +} + +type DisassociateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateElasticIpInput) GoString() string { + return s.String() +} + +type DisassociateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateElasticIpOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume. This data type maps directly to the Amazon +// EC2 EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) +// data type. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + Iops *int64 `type:"integer"` + + // The snapshot ID. + SnapshotId *string `type:"string"` + + // The volume size, in GiB. For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Describes a registered Amazon ECS cluster. +type EcsCluster struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string"` + + // The cluster name. + EcsClusterName *string `type:"string"` + + // The time and date that the cluster was registered with the stack. + RegisteredAt *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s EcsCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EcsCluster) GoString() string { + return s.String() +} + +// Describes an Elastic IP address. +type ElasticIp struct { + _ struct{} `type:"structure"` + + // The domain. + Domain *string `type:"string"` + + // The ID of the instance that the address is attached to. + InstanceId *string `type:"string"` + + // The IP address. + Ip *string `type:"string"` + + // The name. + Name *string `type:"string"` + + // The AWS region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` +} + +// String returns the string representation +func (s ElasticIp) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticIp) GoString() string { + return s.String() +} + +// Describes an Elastic Load Balancing instance. +type ElasticLoadBalancer struct { + _ struct{} `type:"structure"` + + // A list of Availability Zones. + AvailabilityZones []*string `type:"list"` + + // The instance's public DNS name. + DnsName *string `type:"string"` + + // A list of the EC2 instances that the Elastic Load Balancing instance is managing + // traffic for. + Ec2InstanceIds []*string `type:"list"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string"` + + // The ID of the layer that the instance is attached to. + LayerId *string `type:"string"` + + // The instance's AWS region. + Region *string `type:"string"` + + // The ID of the stack that the instance is associated with. + StackId *string `type:"string"` + + // A list of subnet IDs, if the stack is running in a VPC. + SubnetIds []*string `type:"list"` + + // The VPC ID. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s ElasticLoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticLoadBalancer) GoString() string { + return s.String() +} + +// Represents an app's environment variable. +type EnvironmentVariable struct { + _ struct{} `type:"structure"` + + // (Required) The environment variable's name, which can consist of up to 64 + // characters and must be specified. The name can contain upper- and lowercase + // letters, numbers, and underscores (_), but it must start with a letter or + // underscore. + Key *string `type:"string" required:"true"` + + // (Optional) Whether the variable's value will be returned by the DescribeApps + // action. To conceal an environment variable's value, set Secure to true. DescribeApps + // then returns *****FILTERED***** instead of the actual value. The default + // value for Secure is false. + Secure *bool `type:"boolean"` + + // (Optional) The environment variable's value, which can be left empty. If + // you specify a value, it can contain up to 256 characters, which must all + // be printable. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnvironmentVariable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentVariable) GoString() string { + return s.String() +} + +type GetHostnameSuggestionInput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostnameSuggestionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostnameSuggestionInput) GoString() string { + return s.String() +} + +// Contains the response to a GetHostnameSuggestion request. +type GetHostnameSuggestionOutput struct { + _ struct{} `type:"structure"` + + // The generated host name. + Hostname *string `type:"string"` + + // The layer ID. + LayerId *string `type:"string"` +} + +// String returns the string representation +func (s GetHostnameSuggestionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostnameSuggestionOutput) GoString() string { + return s.String() +} + +type GrantAccessInput struct { + _ struct{} `type:"structure"` + + // The instance's AWS OpsWorks ID. + InstanceId *string `type:"string" required:"true"` + + // The length of time (in minutes) that the grant is valid. When the grant expires + // at the end of this period, the user will no longer be able to use the credentials + // to log in. If the user is logged in at the time, he or she automatically + // will be logged out. + ValidForInMinutes *int64 `min:"60" type:"integer"` +} + +// String returns the string representation +func (s GrantAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantAccessInput) GoString() string { + return s.String() +} + +// Contains the response to a GrantAccess request. +type GrantAccessOutput struct { + _ struct{} `type:"structure"` + + // A TemporaryCredential object that contains the data needed to log in to the + // instance by RDP clients, such as the Microsoft Remote Desktop Connection. + TemporaryCredential *TemporaryCredential `type:"structure"` +} + +// String returns the string representation +func (s GrantAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantAccessOutput) GoString() string { + return s.String() +} + +// Describes an instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The agent version. This parameter is set to INHERIT if the instance inherits + // the default stack setting or to a a version number for a fixed agent version. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. For more information, + // see Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) + AmiId *string `type:"string"` + + // The instance architecture: "i386" or "x86_64". + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // The instance Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // An array of BlockDeviceMapping objects that specify the instance's block + // device mappings. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The time that the instance was created. + CreatedAt *string `type:"string"` + + // Whether this is an Amazon EBS-optimized instance. + EbsOptimized *bool `type:"boolean"` + + // The ID of the associated Amazon EC2 instance. + Ec2InstanceId *string `type:"string"` + + // For container instances, the Amazon ECS cluster's ARN. + EcsClusterArn *string `type:"string"` + + // For container instances, the instance's ARN. + EcsContainerInstanceArn *string `type:"string"` + + // The instance Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). + ElasticIp *string `type:"string"` + + // The instance host name. + Hostname *string `type:"string"` + + // For registered instances, the infrastructure class: ec2 or on-premises. + InfrastructureClass *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. If this value is set to false, you must + // then update your instances manually by using CreateDeployment to run the + // update_dependencies stack command or by manually running yum (Amazon Linux) + // or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string"` + + // The ARN of the instance's IAM profile. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + InstanceProfileArn *string `type:"string"` + + // The instance type, such as t2.micro. + InstanceType *string `type:"string"` + + // The ID of the last service error. For more information, call DescribeServiceErrors. + LastServiceErrorId *string `type:"string"` + + // An array containing the instance layer IDs. + LayerIds []*string `type:"list"` + + // The instance's operating system. + Os *string `type:"string"` + + // The instance's platform. + Platform *string `type:"string"` + + // The The instance's private DNS name. + PrivateDns *string `type:"string"` + + // The instance's private IP address. + PrivateIp *string `type:"string"` + + // The instance public DNS name. + PublicDns *string `type:"string"` + + // The instance public IP address. + PublicIp *string `type:"string"` + + // For registered instances, who performed the registration. + RegisteredBy *string `type:"string"` + + // The instance's reported AWS OpsWorks agent version. + ReportedAgentVersion *string `type:"string"` + + // For registered instances, the reported operating system. + ReportedOs *ReportedOs `type:"structure"` + + // The instance's root device type. For more information, see Storage for the + // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + RootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // The root device volume ID. + RootDeviceVolumeId *string `type:"string"` + + // An array containing the instance security group IDs. + SecurityGroupIds []*string `type:"list"` + + // The SSH key's Deep Security Agent (DSA) fingerprint. + SshHostDsaKeyFingerprint *string `type:"string"` + + // The SSH key's RSA fingerprint. + SshHostRsaKeyFingerprint *string `type:"string"` + + // The instance's Amazon EC2 key-pair name. + SshKeyName *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // The instance status: + // + // booting connection_lost online pending rebooting requested + // running_setup setup_failed shutting_down start_failed stopped + // stopping terminated terminating + Status *string `type:"string"` + + // The instance's subnet ID; applicable only if the stack is running in a VPC. + SubnetId *string `type:"string"` + + // The instance's virtualization type: paravirtual or hvm. + VirtualizationType *string `type:"string" enum:"VirtualizationType"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata +// service. For more information, see Instance Metadata and User Data (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/Index.html). +type InstanceIdentity struct { + _ struct{} `type:"structure"` + + // A JSON document that contains the metadata. + Document *string `type:"string"` + + // A signature that can be used to verify the document's accuracy and authenticity. + Signature *string `type:"string"` +} + +// String returns the string representation +func (s InstanceIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceIdentity) GoString() string { + return s.String() +} + +// Describes how many instances a stack has for each status. +type InstancesCount struct { + _ struct{} `type:"structure"` + + // The number of instances in the Assigning state. + Assigning *int64 `type:"integer"` + + // The number of instances with booting status. + Booting *int64 `type:"integer"` + + // The number of instances with connection_lost status. + ConnectionLost *int64 `type:"integer"` + + // The number of instances in the Deregistering state. + Deregistering *int64 `type:"integer"` + + // The number of instances with online status. + Online *int64 `type:"integer"` + + // The number of instances with pending status. + Pending *int64 `type:"integer"` + + // The number of instances with rebooting status. + Rebooting *int64 `type:"integer"` + + // The number of instances in the Registered state. + Registered *int64 `type:"integer"` + + // The number of instances in the Registering state. + Registering *int64 `type:"integer"` + + // The number of instances with requested status. + Requested *int64 `type:"integer"` + + // The number of instances with running_setup status. + RunningSetup *int64 `type:"integer"` + + // The number of instances with setup_failed status. + SetupFailed *int64 `type:"integer"` + + // The number of instances with shutting_down status. + ShuttingDown *int64 `type:"integer"` + + // The number of instances with start_failed status. + StartFailed *int64 `type:"integer"` + + // The number of instances with stopped status. + Stopped *int64 `type:"integer"` + + // The number of instances with stopping status. + Stopping *int64 `type:"integer"` + + // The number of instances with terminated status. + Terminated *int64 `type:"integer"` + + // The number of instances with terminating status. + Terminating *int64 `type:"integer"` + + // The number of instances in the Unassigning state. + Unassigning *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstancesCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancesCount) GoString() string { + return s.String() +} + +// Describes a layer. +type Layer struct { + _ struct{} `type:"structure"` + + // The layer attributes. + // + // For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, + // AWS OpsWorks returns *****FILTERED***** instead of the actual value + // + // For an ECS Cluster layer, AWS OpsWorks the EcsClusterArn attribute is set + // to the cluster's ARN. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // Date when the layer was created. + CreatedAt *string `type:"string"` + + // The ARN of the default IAM profile to be used for the layer's EC2 instances. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON formatted string containing the layer's custom stack configuration + // and deployment attributes. + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer's custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer's custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, + // undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard + // recipes for each event. In addition, you can provide custom recipes for any + // or all layers and events. AWS OpsWorks runs custom event recipes after the + // standard recipes. LayerCustomRecipes specifies the custom recipes for a particular + // layer to be run in response to each of the five events. + // + // To specify a recipe, use the cookbook's directory name in the repository + // followed by two colons and the recipe name, which is the recipe's file name + // without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb + // recipe in the repository's phpapp2 folder. + DefaultRecipes *Recipes `type:"structure"` + + // An array containing the layer's security group names. + DefaultSecurityGroupNames []*string `type:"list"` + + // Whether auto healing is disabled for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. If this value is set to false, you must + // then update your instances manually by using CreateDeployment to run the + // update_dependencies stack command or manually running yum (Amazon Linux) + // or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string"` + + // A LifeCycleEventConfiguration object that specifies the Shutdown event configuration. + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name. + Name *string `type:"string"` + + // An array of Package objects that describe the layer's packages. + Packages []*string `type:"list"` + + // The layer short name. + Shortname *string `type:"string"` + + // The layer stack ID. + StackId *string `type:"string"` + + // The layer type. + Type *string `type:"string" enum:"LayerType"` + + // Whether the layer uses Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s Layer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Layer) GoString() string { + return s.String() +} + +// Specifies the lifecycle event configuration +type LifecycleEventConfiguration struct { + _ struct{} `type:"structure"` + + // A ShutdownEventConfiguration object that specifies the Shutdown event configuration. + Shutdown *ShutdownEventConfiguration `type:"structure"` +} + +// String returns the string representation +func (s LifecycleEventConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleEventConfiguration) GoString() string { + return s.String() +} + +// Describes a layer's load-based auto scaling configuration. +type LoadBasedAutoScalingConfiguration struct { + _ struct{} `type:"structure"` + + // An AutoScalingThresholds object that describes the downscaling configuration, + // which defines how and when AWS OpsWorks reduces the number of instances. + DownScaling *AutoScalingThresholds `type:"structure"` + + // Whether load-based auto scaling is enabled for the layer. + Enable *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string"` + + // An AutoScalingThresholds object that describes the upscaling configuration, + // which defines how and when AWS OpsWorks increases the number of instances. + UpScaling *AutoScalingThresholds `type:"structure"` +} + +// String returns the string representation +func (s LoadBasedAutoScalingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBasedAutoScalingConfiguration) GoString() string { + return s.String() +} + +// Describes stack or user permissions. +type Permission struct { + _ struct{} `type:"structure"` + + // Whether the user can use SSH. + AllowSsh *bool `type:"boolean"` + + // Whether the user can use sudo. + AllowSudo *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) for an AWS Identity and Access Management + // (IAM) role. For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + IamUserArn *string `type:"string"` + + // The user's permission level, which must be the following: + // + // deny show deploy manage iam_only For more information on the + // permissions associated with these levels, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) + Level *string `type:"string"` + + // A stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s Permission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Permission) GoString() string { + return s.String() +} + +// Describes an instance's RAID array. +type RaidArray struct { + _ struct{} `type:"structure"` + + // The array's Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // When the RAID array was created. + CreatedAt *string `type:"string"` + + // The array's Linux device. For example /dev/mdadm0. + Device *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The array's mount point. + MountPoint *string `type:"string"` + + // The array name. + Name *string `type:"string"` + + // The number of disks in the array. + NumberOfDisks *int64 `type:"integer"` + + // The array ID. + RaidArrayId *string `type:"string"` + + // The RAID level (http://en.wikipedia.org/wiki/Standard_RAID_levels). + RaidLevel *int64 `type:"integer"` + + // The array's size. + Size *int64 `type:"integer"` + + // The stack ID. + StackId *string `type:"string"` + + // The volume type, standard or PIOPS. + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s RaidArray) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RaidArray) GoString() string { + return s.String() +} + +// Describes an Amazon RDS instance. +type RdsDbInstance struct { + _ struct{} `type:"structure"` + + // The instance's address. + Address *string `type:"string"` + + // The DB instance identifier. + DbInstanceIdentifier *string `type:"string"` + + // AWS OpsWorks returns *****FILTERED***** instead of the actual value. + DbPassword *string `type:"string"` + + // The master user name. + DbUser *string `type:"string"` + + // The instance's database engine. + Engine *string `type:"string"` + + // Set to true if AWS OpsWorks was unable to discover the Amazon RDS instance. + // AWS OpsWorks attempts to discover the instance only once. If this value is + // set to true, you must deregister the instance and then register it again. + MissingOnRds *bool `type:"boolean"` + + // The instance's ARN. + RdsDbInstanceArn *string `type:"string"` + + // The instance's AWS region. + Region *string `type:"string"` + + // The ID of the stack that the instance is registered with. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s RdsDbInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RdsDbInstance) GoString() string { + return s.String() +} + +type RebootInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstanceInput) GoString() string { + return s.String() +} + +type RebootInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebootInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstanceOutput) GoString() string { + return s.String() +} + +// AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, +// undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard +// recipes for each event. In addition, you can provide custom recipes for any +// or all layers and events. AWS OpsWorks runs custom event recipes after the +// standard recipes. LayerCustomRecipes specifies the custom recipes for a particular +// layer to be run in response to each of the five events. +// +// To specify a recipe, use the cookbook's directory name in the repository +// followed by two colons and the recipe name, which is the recipe's file name +// without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb +// recipe in the repository's phpapp2 folder. +type Recipes struct { + _ struct{} `type:"structure"` + + // An array of custom recipe names to be run following a configure event. + Configure []*string `type:"list"` + + // An array of custom recipe names to be run following a deploy event. + Deploy []*string `type:"list"` + + // An array of custom recipe names to be run following a setup event. + Setup []*string `type:"list"` + + // An array of custom recipe names to be run following a shutdown event. + Shutdown []*string `type:"list"` + + // An array of custom recipe names to be run following a undeploy event. + Undeploy []*string `type:"list"` +} + +// String returns the string representation +func (s Recipes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Recipes) GoString() string { + return s.String() +} + +type RegisterEcsClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterEcsClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEcsClusterInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterEcsCluster request. +type RegisterEcsClusterOutput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string"` +} + +// String returns the string representation +func (s RegisterEcsClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEcsClusterOutput) GoString() string { + return s.String() +} + +type RegisterElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterElasticIpInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterElasticIp request. +type RegisterElasticIpOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string"` +} + +// String returns the string representation +func (s RegisterElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterElasticIpOutput) GoString() string { + return s.String() +} + +type RegisterInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance's hostname. + Hostname *string `type:"string"` + + // An InstanceIdentity object that contains the instance's identity. + InstanceIdentity *InstanceIdentity `type:"structure"` + + // The instance's private IP address. + PrivateIp *string `type:"string"` + + // The instance's public IP address. + PublicIp *string `type:"string"` + + // The instances public RSA key. This key is used to encrypt communication between + // the instance and the service. + RsaPublicKey *string `type:"string"` + + // The instances public RSA key fingerprint. + RsaPublicKeyFingerprint *string `type:"string"` + + // The ID of the stack that the instance is to be registered with. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterInstanceResult request. +type RegisterInstanceOutput struct { + _ struct{} `type:"structure"` + + // The registered instance's AWS OpsWorks ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceOutput) GoString() string { + return s.String() +} + +type RegisterRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The database password. + DbPassword *string `type:"string" required:"true"` + + // The database's master user name. + DbUser *string `type:"string" required:"true"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterRdsDbInstanceInput) GoString() string { + return s.String() +} + +type RegisterRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type RegisterVolumeInput struct { + _ struct{} `type:"structure"` + + // The Amazon EBS volume ID. + Ec2VolumeId *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterVolumeInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterVolume request. +type RegisterVolumeOutput struct { + _ struct{} `type:"structure"` + + // The volume ID. + VolumeId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterVolumeOutput) GoString() string { + return s.String() +} + +// A registered instance's reported operating system. +type ReportedOs struct { + _ struct{} `type:"structure"` + + // The operating system family. + Family *string `type:"string"` + + // The operating system name. + Name *string `type:"string"` + + // The operating system version. + Version *string `type:"string"` +} + +// String returns the string representation +func (s ReportedOs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportedOs) GoString() string { + return s.String() +} + +// Describes a user's SSH information. +type SelfUserProfile struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The user's name. + Name *string `type:"string"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s SelfUserProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfUserProfile) GoString() string { + return s.String() +} + +// Describes an AWS OpsWorks service error. +type ServiceError struct { + _ struct{} `type:"structure"` + + // When the error occurred. + CreatedAt *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // A message that describes the error. + Message *string `type:"string"` + + // The error ID. + ServiceErrorId *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // The error type. + Type *string `type:"string"` +} + +// String returns the string representation +func (s ServiceError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceError) GoString() string { + return s.String() +} + +type SetLoadBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An AutoScalingThresholds object with the downscaling threshold configuration. + // If the load falls below these thresholds for a specified amount of time, + // AWS OpsWorks stops a specified number of instances. + DownScaling *AutoScalingThresholds `type:"structure"` + + // Enables load-based auto scaling for the layer. + Enable *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` + + // An AutoScalingThresholds object with the upscaling threshold configuration. + // If the load exceeds these thresholds for a specified amount of time, AWS + // OpsWorks starts a specified number of instances. + UpScaling *AutoScalingThresholds `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBasedAutoScalingInput) GoString() string { + return s.String() +} + +type SetLoadBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type SetPermissionInput struct { + _ struct{} `type:"structure"` + + // The user is allowed to use SSH to communicate with the instance. + AllowSsh *bool `type:"boolean"` + + // The user is allowed to use sudo to elevate privileges. + AllowSudo *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's permission level, which must be set to one of the following strings. + // You cannot set your own permissions level. + // + // deny show deploy manage iam_only For more information on the + // permissions associated with these levels, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). + Level *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPermissionInput) GoString() string { + return s.String() +} + +type SetPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPermissionOutput) GoString() string { + return s.String() +} + +type SetTimeBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An AutoScalingSchedule with the instance schedule. + AutoScalingSchedule *WeeklyAutoScalingSchedule `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTimeBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTimeBasedAutoScalingInput) GoString() string { + return s.String() +} + +type SetTimeBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTimeBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTimeBasedAutoScalingOutput) GoString() string { + return s.String() +} + +// The Shutdown event configuration. +type ShutdownEventConfiguration struct { + _ struct{} `type:"structure"` + + // Whether to enable Elastic Load Balancing connection draining. For more information, + // see Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) + DelayUntilElbConnectionsDrained *bool `type:"boolean"` + + // The time, in seconds, that AWS OpsWorks will wait after triggering a Shutdown + // event before shutting down an instance. + ExecutionTimeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ShutdownEventConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShutdownEventConfiguration) GoString() string { + return s.String() +} + +// Contains the information required to retrieve an app or cookbook from a repository. +// For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) +// or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). +type Source struct { + _ struct{} `type:"structure"` + + // When included in a request, the parameter depends on the repository type. + // + // For Amazon S3 bundles, set Password to the appropriate IAM secret access + // key. For HTTP bundles and Subversion repositories, set Password to the password. + // For more information on how to safely handle IAM credentials, see . + // + // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual + // value. + Password *string `type:"string"` + + // The application's version. AWS OpsWorks enables you to easily deploy new + // versions of an application. One of the simplest approaches is to have branches + // or revisions in your repository that represent different versions that can + // potentially be deployed. + Revision *string `type:"string"` + + // In requests, the repository's SSH key. + // + // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual + // value. + SshKey *string `type:"string"` + + // The repository type. + Type *string `type:"string" enum:"SourceType"` + + // The source URL. + Url *string `type:"string"` + + // This parameter depends on the repository type. + // + // For Amazon S3 bundles, set Username to the appropriate IAM access key ID. + // For HTTP bundles, Git repositories, and Subversion repositories, set Username + // to the user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s Source) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Source) GoString() string { + return s.String() +} + +// Describes an app's SSL configuration. +type SslConfiguration struct { + _ struct{} `type:"structure"` + + // The contents of the certificate's domain.crt file. + Certificate *string `type:"string" required:"true"` + + // Optional. Can be used to specify an intermediate certificate authority key + // or client authentication. + Chain *string `type:"string"` + + // The private key; the contents of the certificate's domain.kex file. + PrivateKey *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SslConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SslConfiguration) GoString() string { + return s.String() +} + +// Describes a stack. +type Stack struct { + _ struct{} `type:"structure"` + + // The agent version. This parameter is set to LATEST for auto-update. or a + // version number for a fixed agent version. + AgentVersion *string `type:"string"` + + // The stack's ARN. + Arn *string `type:"string"` + + // The stack's attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The date when the stack was created. + CreatedAt *string `type:"string"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A JSON object that contains user-defined attributes to be added to the stack + // configuration and deployment attributes. You can use custom JSON to override + // the corresponding default stack configuration attribute values or to pass + // data to recipes. The string should be in the following format and must escape + // characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone. For more information, see Regions + // and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + DefaultAvailabilityZone *string `type:"string"` + + // The ARN of an IAM profile that is the default profile for all of the stack's + // EC2 instances. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's default operating system. + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the stack, but you can override it when you create an instance. For more + // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair for the stack's instances. You can override + // this value when you create or update an instance. + DefaultSshKeyName *string `type:"string"` + + // The default subnet ID; applicable only if the stack is running in a VPC. + DefaultSubnetId *string `type:"string"` + + // The stack host name theme, with spaces replaced by underscores. + HostnameTheme *string `type:"string"` + + // The stack name. + Name *string `type:"string"` + + // The stack AWS region, such as "us-east-1". For more information about AWS + // regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The stack AWS Identity and Access Management (IAM) role. + ServiceRoleArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether the stack automatically associates the AWS OpsWorks built-in security + // groups with the stack's layers. + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The VPC ID; applicable only if the stack is running in a VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Stack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stack) GoString() string { + return s.String() +} + +// Describes the configuration manager. +type StackConfigurationManager struct { + _ struct{} `type:"structure"` + + // The name. This parameter must be set to "Chef". + Name *string `type:"string"` + + // The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux + // stacks, and to 12.2 for Windows stacks. The default value for Linux stacks + // is 11.4. + Version *string `type:"string"` +} + +// String returns the string representation +func (s StackConfigurationManager) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackConfigurationManager) GoString() string { + return s.String() +} + +// Summarizes the number of layers, instances, and apps in a stack. +type StackSummary struct { + _ struct{} `type:"structure"` + + // The number of apps. + AppsCount *int64 `type:"integer"` + + // The stack's ARN. + Arn *string `type:"string"` + + // An InstancesCount object with the number of instances in each status. + InstancesCount *InstancesCount `type:"structure"` + + // The number of layers. + LayersCount *int64 `type:"integer"` + + // The stack name. + Name *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s StackSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSummary) GoString() string { + return s.String() +} + +type StartInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceInput) GoString() string { + return s.String() +} + +type StartInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceOutput) GoString() string { + return s.String() +} + +type StartStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStackInput) GoString() string { + return s.String() +} + +type StartStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStackOutput) GoString() string { + return s.String() +} + +type StopInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstanceInput) GoString() string { + return s.String() +} + +type StopInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstanceOutput) GoString() string { + return s.String() +} + +type StopStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackInput) GoString() string { + return s.String() +} + +type StopStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackOutput) GoString() string { + return s.String() +} + +// Contains the data needed by RDP clients such as the Microsoft Remote Desktop +// Connection to log in to the instance. +type TemporaryCredential struct { + _ struct{} `type:"structure"` + + // The instance's AWS OpsWorks ID. + InstanceId *string `type:"string"` + + // The password. + Password *string `type:"string"` + + // The user name. + Username *string `type:"string"` + + // The length of time (in minutes) that the grant is valid. When the grant expires, + // at the end of this period, the user will no longer be able to use the credentials + // to log in. If they are logged in at the time, they will be automatically + // logged out. + ValidForInMinutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s TemporaryCredential) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemporaryCredential) GoString() string { + return s.String() +} + +// Describes an instance's time-based auto scaling configuration. +type TimeBasedAutoScalingConfiguration struct { + _ struct{} `type:"structure"` + + // A WeeklyAutoScalingSchedule object with the instance schedule. + AutoScalingSchedule *WeeklyAutoScalingSchedule `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s TimeBasedAutoScalingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeBasedAutoScalingConfiguration) GoString() string { + return s.String() +} + +type UnassignInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnassignInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignInstanceInput) GoString() string { + return s.String() +} + +type UnassignInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignInstanceOutput) GoString() string { + return s.String() +} + +type UnassignVolumeInput struct { + _ struct{} `type:"structure"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnassignVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignVolumeInput) GoString() string { + return s.String() +} + +type UnassignVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignVolumeOutput) GoString() string { + return s.String() +} + +type UpdateAppInput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string" required:"true"` + + // A Source object that specifies the app repository. + AppSource *Source `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // The app's data sources. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app's virtual host settings, with multiple domains separated by commas. + // For example: 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether SSL is enabled for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instances.For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases. Exceeding it will + // cause an exception with the message, "Environment: is too large (maximum + // is 10KB)." + // + // This parameter is supported only by Chef 11.10 stacks. If you have specified + // one or more environment variables, you cannot modify the stack's Chef version. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The app type. + Type *string `type:"string" enum:"AppType"` +} + +// String returns the string representation +func (s UpdateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppInput) GoString() string { + return s.String() +} + +type UpdateAppOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppOutput) GoString() string { + return s.String() +} + +type UpdateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The address. + ElasticIp *string `type:"string" required:"true"` + + // The new name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s UpdateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticIpInput) GoString() string { + return s.String() +} + +type UpdateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticIpOutput) GoString() string { + return s.String() +} + +type UpdateInstanceInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // INHERIT - Use the stack's default agent version setting. version_number + // - Use the specified agent version. This value overrides the stack's default + // setting. To update the agent version, you must edit the instance configuration + // and specify a new version. AWS OpsWorks then automatically installs that + // version on the instance. The default setting is INHERIT. To specify an agent + // version, you must use the complete version number, not the abbreviated number + // shown on the console. For a list of available agent version numbers, call + // DescribeAgentVersions. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. The AMI must be based + // on one of the supported operating systems. For more information, see Instances + // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) + // + // If you specify a custom AMI, you must set Os to Custom. + AmiId *string `type:"string"` + + // The instance architecture. Instance types do not necessarily support both + // architectures. For a list of the architectures that are supported by the + // different instance types, see Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. Windows stacks can use + // only time-based instances. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // This property cannot be updated. + EbsOptimized *bool `type:"boolean"` + + // The instance host name. + Hostname *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The instance type, such as t2.micro. For a list of supported instance types, + // open the stack in the console, choose Instances, and choose + Instance. The + // Size list contains the currently supported types. For more information, see + // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // The parameter values that you use to specify the various types are in the + // API Name column of the Available Instance Types table. + InstanceType *string `type:"string"` + + // The instance's layer IDs. + LayerIds []*string `type:"list"` + + // The instance's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more + // information on the supported operating systems, see AWS OpsWorks Operating + // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // The default option is the current Amazon Linux version. If you set this + // parameter to Custom, you must use the AmiId parameter to specify the custom + // AMI that you want to use. For more information on the supported operating + // systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // For more information on how to use custom AMIs with OpsWorks, see Using Custom + // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // You can specify a different Linux operating system for the updated stack, + // but you cannot change from Linux to Windows or Windows to Linux. + Os *string `type:"string"` + + // The instance's Amazon EC2 key name. + SshKeyName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInstanceInput) GoString() string { + return s.String() +} + +type UpdateInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInstanceOutput) GoString() string { + return s.String() +} + +type UpdateLayerInput struct { + _ struct{} `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // The ARN of an IAM profile to be used for all of the layer's EC2 instances. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON-formatted string containing custom stack configuration and deployment + // attributes to be installed on the layer's instances. For more information, + // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer's custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer's custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // Whether to disable auto healing for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or manually + // running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` + + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name, which is used by the console. + Name *string `type:"string"` + + // An array of Package objects that describe the layer's packages. + Packages []*string `type:"list"` + + // For custom layers only, use this parameter to specify the layer's short name, + // which is used internally by AWS OpsWorksand by Chef. The short name is also + // used as the name for the directory where your app files are installed. It + // can have a maximum of 200 characters and must be in the following format: + // /\A[a-z0-9\-\_\.]+\Z/. + // + // The built-in layers' short names are defined by AWS OpsWorks. For more information, + // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html) + Shortname *string `type:"string"` + + // Whether to use Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s UpdateLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLayerInput) GoString() string { + return s.String() +} + +type UpdateLayerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLayerOutput) GoString() string { + return s.String() +} + +type UpdateMyUserProfileInput struct { + _ struct{} `type:"structure"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` +} + +// String returns the string representation +func (s UpdateMyUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMyUserProfileInput) GoString() string { + return s.String() +} + +type UpdateMyUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateMyUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMyUserProfileOutput) GoString() string { + return s.String() +} + +type UpdateRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The database password. + DbPassword *string `type:"string"` + + // The master user name. + DbUser *string `type:"string"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRdsDbInstanceInput) GoString() string { + return s.String() +} + +type UpdateRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type UpdateStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. Fixed version - Set this parameter to your preferred agent version. + // To update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. The default setting is LATEST. To specify an agent version, + // you must use the complete version number, not the abbreviated number shown + // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. When you update a stack, we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It can be used to override + // the corresponding default stack configuration JSON values or to pass data + // to recipes. The string should be in the following format and escape characters + // such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone, which must be in the stack's region. + // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see CreateStack. + DefaultAvailabilityZone *string `type:"string"` + + // The ARN of an IAM profile that is the default profile for all of the stack's + // EC2 instances. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's operating system, which must be set to one of the following: + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // the custom AMI you want to use when you create instances. For more information + // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // The default option is the stack's current operating system. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the stack, but you can override it when you create an instance. For more + // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key-pair name. The default value is none. If you specify + // a key-pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's new host name theme, with spaces replaced by underscores. The + // theme is used to generate host names for the stack's instances. By default, + // HostnameTheme is set to Layer_Dependent, which creates host names by appending + // integers to the layer's short name. The other themes are: + // + // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan + // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The stack's new name. + Name *string `type:"string"` + + // Do not use this parameter. You cannot update a stack's service role. + ServiceRoleArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. UseOpsworksSecurityGroups + // allows you to provide your own custom security groups instead of using the + // built-in groups. UseOpsworksSecurityGroups has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in security + // group with each layer (default setting). You can associate additional security + // groups with a layer after you create it, but you cannot delete the built-in + // security group. False - AWS OpsWorks does not associate built-in security + // groups with layers. You must create appropriate EC2 security groups and associate + // a security group with each layer that you create. However, you can still + // manually associate a built-in security group with a layer on. Custom security + // groups are required only for those layers that need custom settings. For + // more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackInput) GoString() string { + return s.String() +} + +type UpdateStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackOutput) GoString() string { + return s.String() +} + +type UpdateUserProfileInput struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's new SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], + // '-', and '_'. If the specified name includes other punctuation marks, AWS + // OpsWorks removes them. For example, my.name will be changed to myname. If + // you do not specify an SSH user name, AWS OpsWorks generates one from the + // IAM user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s UpdateUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserProfileInput) GoString() string { + return s.String() +} + +type UpdateUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserProfileOutput) GoString() string { + return s.String() +} + +type UpdateVolumeInput struct { + _ struct{} `type:"structure"` + + // The new mount point. + MountPoint *string `type:"string"` + + // The new name. + Name *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVolumeInput) GoString() string { + return s.String() +} + +type UpdateVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVolumeOutput) GoString() string { + return s.String() +} + +// Describes a user's SSH information. +type UserProfile struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The user's name. + Name *string `type:"string"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s UserProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserProfile) GoString() string { + return s.String() +} + +// Describes an instance's Amazon EBS volume. +type Volume struct { + _ struct{} `type:"structure"` + + // The volume Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // The device name. + Device *string `type:"string"` + + // The Amazon EC2 volume ID. + Ec2VolumeId *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The volume mount point. For example "/dev/sdh". + MountPoint *string `type:"string"` + + // The volume name. + Name *string `type:"string"` + + // The RAID array ID. + RaidArrayId *string `type:"string"` + + // The AWS region. For more information about AWS regions, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The volume size. + Size *int64 `type:"integer"` + + // The value returned by DescribeVolumes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVolumes.html). + Status *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string"` + + // The volume type, standard or PIOPS. + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume configuration. +type VolumeConfiguration struct { + _ struct{} `type:"structure"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The volume mount point. For example "/dev/sdh". + MountPoint *string `type:"string" required:"true"` + + // The number of disks in the volume. + NumberOfDisks *int64 `type:"integer" required:"true"` + + // The volume RAID level (http://en.wikipedia.org/wiki/Standard_RAID_levels). + RaidLevel *int64 `type:"integer"` + + // The volume size. + Size *int64 `type:"integer" required:"true"` + + // The volume type: + // + // standard - Magnetic io1 - Provisioned IOPS (SSD) gp2 - General Purpose + // (SSD) + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s VolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeConfiguration) GoString() string { + return s.String() +} + +// Describes a time-based instance's auto scaling schedule. The schedule consists +// of a set of key-value pairs. +// +// The key is the time period (a UTC hour) and must be an integer from 0 - +// 23. The value indicates whether the instance should be online or offline +// for the specified period, and must be set to "on" or "off" The default setting +// for all time periods is off, so you use the following parameters primarily +// to specify the online periods. You don't have to explicitly specify offline +// periods unless you want to change an online period to an offline period. +// +// The following example specifies that the instance should be online for four +// hours, from UTC 1200 - 1600. It will be off for the remainder of the day. +// +// { "12":"on", "13":"on", "14":"on", "15":"on" } +type WeeklyAutoScalingSchedule struct { + _ struct{} `type:"structure"` + + // The schedule for Friday. + Friday map[string]*string `type:"map"` + + // The schedule for Monday. + Monday map[string]*string `type:"map"` + + // The schedule for Saturday. + Saturday map[string]*string `type:"map"` + + // The schedule for Sunday. + Sunday map[string]*string `type:"map"` + + // The schedule for Thursday. + Thursday map[string]*string `type:"map"` + + // The schedule for Tuesday. + Tuesday map[string]*string `type:"map"` + + // The schedule for Wednesday. + Wednesday map[string]*string `type:"map"` +} + +// String returns the string representation +func (s WeeklyAutoScalingSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WeeklyAutoScalingSchedule) GoString() string { + return s.String() +} + +const ( + // @enum AppAttributesKeys + AppAttributesKeysDocumentRoot = "DocumentRoot" + // @enum AppAttributesKeys + AppAttributesKeysRailsEnv = "RailsEnv" + // @enum AppAttributesKeys + AppAttributesKeysAutoBundleOnDeploy = "AutoBundleOnDeploy" + // @enum AppAttributesKeys + AppAttributesKeysAwsFlowRubySettings = "AwsFlowRubySettings" +) + +const ( + // @enum AppType + AppTypeAwsFlowRuby = "aws-flow-ruby" + // @enum AppType + AppTypeJava = "java" + // @enum AppType + AppTypeRails = "rails" + // @enum AppType + AppTypePhp = "php" + // @enum AppType + AppTypeNodejs = "nodejs" + // @enum AppType + AppTypeStatic = "static" + // @enum AppType + AppTypeOther = "other" +) + +const ( + // @enum Architecture + ArchitectureX8664 = "x86_64" + // @enum Architecture + ArchitectureI386 = "i386" +) + +const ( + // @enum AutoScalingType + AutoScalingTypeLoad = "load" + // @enum AutoScalingType + AutoScalingTypeTimer = "timer" +) + +const ( + // @enum DeploymentCommandName + DeploymentCommandNameInstallDependencies = "install_dependencies" + // @enum DeploymentCommandName + DeploymentCommandNameUpdateDependencies = "update_dependencies" + // @enum DeploymentCommandName + DeploymentCommandNameUpdateCustomCookbooks = "update_custom_cookbooks" + // @enum DeploymentCommandName + DeploymentCommandNameExecuteRecipes = "execute_recipes" + // @enum DeploymentCommandName + DeploymentCommandNameConfigure = "configure" + // @enum DeploymentCommandName + DeploymentCommandNameSetup = "setup" + // @enum DeploymentCommandName + DeploymentCommandNameDeploy = "deploy" + // @enum DeploymentCommandName + DeploymentCommandNameRollback = "rollback" + // @enum DeploymentCommandName + DeploymentCommandNameStart = "start" + // @enum DeploymentCommandName + DeploymentCommandNameStop = "stop" + // @enum DeploymentCommandName + DeploymentCommandNameRestart = "restart" + // @enum DeploymentCommandName + DeploymentCommandNameUndeploy = "undeploy" +) + +const ( + // @enum LayerAttributesKeys + LayerAttributesKeysEcsClusterArn = "EcsClusterArn" + // @enum LayerAttributesKeys + LayerAttributesKeysEnableHaproxyStats = "EnableHaproxyStats" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsUrl = "HaproxyStatsUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsUser = "HaproxyStatsUser" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsPassword = "HaproxyStatsPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyHealthCheckUrl = "HaproxyHealthCheckUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyHealthCheckMethod = "HaproxyHealthCheckMethod" + // @enum LayerAttributesKeys + LayerAttributesKeysMysqlRootPassword = "MysqlRootPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysMysqlRootPasswordUbiquitous = "MysqlRootPasswordUbiquitous" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaUrl = "GangliaUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaUser = "GangliaUser" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaPassword = "GangliaPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysMemcachedMemory = "MemcachedMemory" + // @enum LayerAttributesKeys + LayerAttributesKeysNodejsVersion = "NodejsVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRubyVersion = "RubyVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRubygemsVersion = "RubygemsVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysManageBundler = "ManageBundler" + // @enum LayerAttributesKeys + LayerAttributesKeysBundlerVersion = "BundlerVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRailsStack = "RailsStack" + // @enum LayerAttributesKeys + LayerAttributesKeysPassengerVersion = "PassengerVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysJvm = "Jvm" + // @enum LayerAttributesKeys + LayerAttributesKeysJvmVersion = "JvmVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysJvmOptions = "JvmOptions" + // @enum LayerAttributesKeys + LayerAttributesKeysJavaAppServer = "JavaAppServer" + // @enum LayerAttributesKeys + LayerAttributesKeysJavaAppServerVersion = "JavaAppServerVersion" +) + +const ( + // @enum LayerType + LayerTypeAwsFlowRuby = "aws-flow-ruby" + // @enum LayerType + LayerTypeEcsCluster = "ecs-cluster" + // @enum LayerType + LayerTypeJavaApp = "java-app" + // @enum LayerType + LayerTypeLb = "lb" + // @enum LayerType + LayerTypeWeb = "web" + // @enum LayerType + LayerTypePhpApp = "php-app" + // @enum LayerType + LayerTypeRailsApp = "rails-app" + // @enum LayerType + LayerTypeNodejsApp = "nodejs-app" + // @enum LayerType + LayerTypeMemcached = "memcached" + // @enum LayerType + LayerTypeDbMaster = "db-master" + // @enum LayerType + LayerTypeMonitoringMaster = "monitoring-master" + // @enum LayerType + LayerTypeCustom = "custom" +) + +const ( + // @enum RootDeviceType + RootDeviceTypeEbs = "ebs" + // @enum RootDeviceType + RootDeviceTypeInstanceStore = "instance-store" +) + +const ( + // @enum SourceType + SourceTypeGit = "git" + // @enum SourceType + SourceTypeSvn = "svn" + // @enum SourceType + SourceTypeArchive = "archive" + // @enum SourceType + SourceTypeS3 = "s3" +) + +const ( + // @enum StackAttributesKeys + StackAttributesKeysColor = "Color" +) + +const ( + // @enum VirtualizationType + VirtualizationTypeParavirtual = "paravirtual" + // @enum VirtualizationType + VirtualizationTypeHvm = "hvm" +) + +const ( + // @enum VolumeType + VolumeTypeGp2 = "gp2" + // @enum VolumeType + VolumeTypeIo1 = "io1" + // @enum VolumeType + VolumeTypeStandard = "standard" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go new file mode 100644 index 000000000..604caeb67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go @@ -0,0 +1,1890 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleOpsWorks_AssignInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssignInstanceInput{ + InstanceId: aws.String("String"), // Required + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.AssignInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AssignVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssignVolumeInput{ + VolumeId: aws.String("String"), // Required + InstanceId: aws.String("String"), + } + resp, err := svc.AssignVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AssociateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssociateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + InstanceId: aws.String("String"), + } + resp, err := svc.AssociateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AttachElasticLoadBalancer() { + svc := opsworks.New(session.New()) + + params := &opsworks.AttachElasticLoadBalancerInput{ + ElasticLoadBalancerName: aws.String("String"), // Required + LayerId: aws.String("String"), // Required + } + resp, err := svc.AttachElasticLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CloneStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.CloneStackInput{ + ServiceRoleArn: aws.String("String"), // Required + SourceStackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + CloneAppIds: []*string{ + aws.String("String"), // Required + // More values... + }, + ClonePermissions: aws.Bool(true), + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultInstanceProfileArn: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + Name: aws.String("String"), + Region: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CloneStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateAppInput{ + Name: aws.String("String"), // Required + StackId: aws.String("String"), // Required + Type: aws.String("AppType"), // Required + AppSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DataSources: []*opsworks.DataSource{ + { // Required + Arn: aws.String("String"), + DatabaseName: aws.String("String"), + Type: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + Domains: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableSsl: aws.Bool(true), + Environment: []*opsworks.EnvironmentVariable{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), // Required + Secure: aws.Bool(true), + }, + // More values... + }, + Shortname: aws.String("String"), + SslConfiguration: &opsworks.SslConfiguration{ + Certificate: aws.String("String"), // Required + PrivateKey: aws.String("String"), // Required + Chain: aws.String("String"), + }, + } + resp, err := svc.CreateApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateDeployment() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateDeploymentInput{ + Command: &opsworks.DeploymentCommand{ // Required + Name: aws.String("DeploymentCommandName"), // Required + Args: map[string][]*string{ + "Key": { // Required + aws.String("String"), // Required + // More values... + }, + // More values... + }, + }, + StackId: aws.String("String"), // Required + AppId: aws.String("String"), + Comment: aws.String("String"), + CustomJson: aws.String("String"), + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateInstanceInput{ + InstanceType: aws.String("String"), // Required + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + AmiId: aws.String("String"), + Architecture: aws.String("Architecture"), + AutoScalingType: aws.String("AutoScalingType"), + AvailabilityZone: aws.String("String"), + BlockDeviceMappings: []*opsworks.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &opsworks.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + Hostname: aws.String("String"), + InstallUpdatesOnBoot: aws.Bool(true), + Os: aws.String("String"), + RootDeviceType: aws.String("RootDeviceType"), + SshKeyName: aws.String("String"), + SubnetId: aws.String("String"), + VirtualizationType: aws.String("String"), + } + resp, err := svc.CreateInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateLayerInput{ + Name: aws.String("String"), // Required + Shortname: aws.String("String"), // Required + StackId: aws.String("String"), // Required + Type: aws.String("LayerType"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + AutoAssignElasticIps: aws.Bool(true), + AutoAssignPublicIps: aws.Bool(true), + CustomInstanceProfileArn: aws.String("String"), + CustomJson: aws.String("String"), + CustomRecipes: &opsworks.Recipes{ + Configure: []*string{ + aws.String("String"), // Required + // More values... + }, + Deploy: []*string{ + aws.String("String"), // Required + // More values... + }, + Setup: []*string{ + aws.String("String"), // Required + // More values... + }, + Shutdown: []*string{ + aws.String("String"), // Required + // More values... + }, + Undeploy: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + CustomSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableAutoHealing: aws.Bool(true), + InstallUpdatesOnBoot: aws.Bool(true), + LifecycleEventConfiguration: &opsworks.LifecycleEventConfiguration{ + Shutdown: &opsworks.ShutdownEventConfiguration{ + DelayUntilElbConnectionsDrained: aws.Bool(true), + ExecutionTimeout: aws.Int64(1), + }, + }, + Packages: []*string{ + aws.String("String"), // Required + // More values... + }, + UseEbsOptimizedInstances: aws.Bool(true), + VolumeConfigurations: []*opsworks.VolumeConfiguration{ + { // Required + MountPoint: aws.String("String"), // Required + NumberOfDisks: aws.Int64(1), // Required + Size: aws.Int64(1), // Required + Iops: aws.Int64(1), + RaidLevel: aws.Int64(1), + VolumeType: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateStackInput{ + DefaultInstanceProfileArn: aws.String("String"), // Required + Name: aws.String("String"), // Required + Region: aws.String("String"), // Required + ServiceRoleArn: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CreateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateUserProfileInput{ + IamUserArn: aws.String("String"), // Required + AllowSelfManagement: aws.Bool(true), + SshPublicKey: aws.String("String"), + SshUsername: aws.String("String"), + } + resp, err := svc.CreateUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteAppInput{ + AppId: aws.String("String"), // Required + } + resp, err := svc.DeleteApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteInstanceInput{ + InstanceId: aws.String("String"), // Required + DeleteElasticIp: aws.Bool(true), + DeleteVolumes: aws.Bool(true), + } + resp, err := svc.DeleteInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteLayerInput{ + LayerId: aws.String("String"), // Required + } + resp, err := svc.DeleteLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DeleteStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteUserProfileInput{ + IamUserArn: aws.String("String"), // Required + } + resp, err := svc.DeleteUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterEcsCluster() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterEcsClusterInput{ + EcsClusterArn: aws.String("String"), // Required + } + resp, err := svc.DeregisterEcsCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterElasticIpInput{ + ElasticIp: aws.String("String"), // Required + } + resp, err := svc.DeregisterElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.DeregisterInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String("String"), // Required + } + resp, err := svc.DeregisterRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterVolumeInput{ + VolumeId: aws.String("String"), // Required + } + resp, err := svc.DeregisterVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeAgentVersions() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeAgentVersionsInput{ + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeAgentVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeApps() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeAppsInput{ + AppIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeApps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeCommands() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeCommandsInput{ + CommandIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DeploymentId: aws.String("String"), + InstanceId: aws.String("String"), + } + resp, err := svc.DescribeCommands(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeDeployments() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeDeploymentsInput{ + AppId: aws.String("String"), + DeploymentIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeEcsClusters() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeEcsClustersInput{ + EcsClusterArns: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribeEcsClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeElasticIps() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeElasticIpsInput{ + InstanceId: aws.String("String"), + Ips: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeElasticIps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeElasticLoadBalancers() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeElasticLoadBalancersInput{ + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeElasticLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeInstances() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeInstancesInput{ + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + LayerId: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribeInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeLayers() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeLayersInput{ + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeLayers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeLoadBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeLoadBasedAutoScalingInput{ + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeMyUserProfile() { + svc := opsworks.New(session.New()) + + var params *opsworks.DescribeMyUserProfileInput + resp, err := svc.DescribeMyUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribePermissions() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribePermissionsInput{ + IamUserArn: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribePermissions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeRaidArrays() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeRaidArraysInput{ + InstanceId: aws.String("String"), + RaidArrayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeRaidArrays(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeRdsDbInstances() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeRdsDbInstancesInput{ + StackId: aws.String("String"), // Required + RdsDbInstanceArns: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRdsDbInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeServiceErrors() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeServiceErrorsInput{ + InstanceId: aws.String("String"), + ServiceErrorIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeServiceErrors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStackProvisioningParameters() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStackProvisioningParametersInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DescribeStackProvisioningParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStackSummary() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStackSummaryInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DescribeStackSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStacks() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStacksInput{ + StackIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeTimeBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeTimeBasedAutoScalingInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTimeBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeUserProfiles() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeUserProfilesInput{ + IamUserArns: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeUserProfiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeVolumes() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeVolumesInput{ + InstanceId: aws.String("String"), + RaidArrayId: aws.String("String"), + StackId: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DetachElasticLoadBalancer() { + svc := opsworks.New(session.New()) + + params := &opsworks.DetachElasticLoadBalancerInput{ + ElasticLoadBalancerName: aws.String("String"), // Required + LayerId: aws.String("String"), // Required + } + resp, err := svc.DetachElasticLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DisassociateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DisassociateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + } + resp, err := svc.DisassociateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_GetHostnameSuggestion() { + svc := opsworks.New(session.New()) + + params := &opsworks.GetHostnameSuggestionInput{ + LayerId: aws.String("String"), // Required + } + resp, err := svc.GetHostnameSuggestion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_GrantAccess() { + svc := opsworks.New(session.New()) + + params := &opsworks.GrantAccessInput{ + InstanceId: aws.String("String"), // Required + ValidForInMinutes: aws.Int64(1), + } + resp, err := svc.GrantAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RebootInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RebootInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.RebootInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterEcsCluster() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterEcsClusterInput{ + EcsClusterArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterEcsCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterElasticIpInput{ + ElasticIp: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterInstanceInput{ + StackId: aws.String("String"), // Required + Hostname: aws.String("String"), + InstanceIdentity: &opsworks.InstanceIdentity{ + Document: aws.String("String"), + Signature: aws.String("String"), + }, + PrivateIp: aws.String("String"), + PublicIp: aws.String("String"), + RsaPublicKey: aws.String("String"), + RsaPublicKeyFingerprint: aws.String("String"), + } + resp, err := svc.RegisterInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterRdsDbInstanceInput{ + DbPassword: aws.String("String"), // Required + DbUser: aws.String("String"), // Required + RdsDbInstanceArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterVolumeInput{ + StackId: aws.String("String"), // Required + Ec2VolumeId: aws.String("String"), + } + resp, err := svc.RegisterVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetLoadBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetLoadBasedAutoScalingInput{ + LayerId: aws.String("String"), // Required + DownScaling: &opsworks.AutoScalingThresholds{ + Alarms: []*string{ + aws.String("String"), // Required + // More values... + }, + CpuThreshold: aws.Float64(1.0), + IgnoreMetricsTime: aws.Int64(1), + InstanceCount: aws.Int64(1), + LoadThreshold: aws.Float64(1.0), + MemoryThreshold: aws.Float64(1.0), + ThresholdsWaitTime: aws.Int64(1), + }, + Enable: aws.Bool(true), + UpScaling: &opsworks.AutoScalingThresholds{ + Alarms: []*string{ + aws.String("String"), // Required + // More values... + }, + CpuThreshold: aws.Float64(1.0), + IgnoreMetricsTime: aws.Int64(1), + InstanceCount: aws.Int64(1), + LoadThreshold: aws.Float64(1.0), + MemoryThreshold: aws.Float64(1.0), + ThresholdsWaitTime: aws.Int64(1), + }, + } + resp, err := svc.SetLoadBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetPermission() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetPermissionInput{ + IamUserArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + AllowSsh: aws.Bool(true), + AllowSudo: aws.Bool(true), + Level: aws.String("String"), + } + resp, err := svc.SetPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetTimeBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetTimeBasedAutoScalingInput{ + InstanceId: aws.String("String"), // Required + AutoScalingSchedule: &opsworks.WeeklyAutoScalingSchedule{ + Friday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Monday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Saturday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Sunday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Thursday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Tuesday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Wednesday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + }, + } + resp, err := svc.SetTimeBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StartInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.StartInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.StartInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StartStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.StartStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.StartStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StopInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.StopInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.StopInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StopStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.StopStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.StopStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UnassignInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UnassignInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.UnassignInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UnassignVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.UnassignVolumeInput{ + VolumeId: aws.String("String"), // Required + } + resp, err := svc.UnassignVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateAppInput{ + AppId: aws.String("String"), // Required + AppSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DataSources: []*opsworks.DataSource{ + { // Required + Arn: aws.String("String"), + DatabaseName: aws.String("String"), + Type: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + Domains: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableSsl: aws.Bool(true), + Environment: []*opsworks.EnvironmentVariable{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), // Required + Secure: aws.Bool(true), + }, + // More values... + }, + Name: aws.String("String"), + SslConfiguration: &opsworks.SslConfiguration{ + Certificate: aws.String("String"), // Required + PrivateKey: aws.String("String"), // Required + Chain: aws.String("String"), + }, + Type: aws.String("AppType"), + } + resp, err := svc.UpdateApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + Name: aws.String("String"), + } + resp, err := svc.UpdateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateInstanceInput{ + InstanceId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + AmiId: aws.String("String"), + Architecture: aws.String("Architecture"), + AutoScalingType: aws.String("AutoScalingType"), + EbsOptimized: aws.Bool(true), + Hostname: aws.String("String"), + InstallUpdatesOnBoot: aws.Bool(true), + InstanceType: aws.String("String"), + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Os: aws.String("String"), + SshKeyName: aws.String("String"), + } + resp, err := svc.UpdateInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateLayerInput{ + LayerId: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + AutoAssignElasticIps: aws.Bool(true), + AutoAssignPublicIps: aws.Bool(true), + CustomInstanceProfileArn: aws.String("String"), + CustomJson: aws.String("String"), + CustomRecipes: &opsworks.Recipes{ + Configure: []*string{ + aws.String("String"), // Required + // More values... + }, + Deploy: []*string{ + aws.String("String"), // Required + // More values... + }, + Setup: []*string{ + aws.String("String"), // Required + // More values... + }, + Shutdown: []*string{ + aws.String("String"), // Required + // More values... + }, + Undeploy: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + CustomSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableAutoHealing: aws.Bool(true), + InstallUpdatesOnBoot: aws.Bool(true), + LifecycleEventConfiguration: &opsworks.LifecycleEventConfiguration{ + Shutdown: &opsworks.ShutdownEventConfiguration{ + DelayUntilElbConnectionsDrained: aws.Bool(true), + ExecutionTimeout: aws.Int64(1), + }, + }, + Name: aws.String("String"), + Packages: []*string{ + aws.String("String"), // Required + // More values... + }, + Shortname: aws.String("String"), + UseEbsOptimizedInstances: aws.Bool(true), + VolumeConfigurations: []*opsworks.VolumeConfiguration{ + { // Required + MountPoint: aws.String("String"), // Required + NumberOfDisks: aws.Int64(1), // Required + Size: aws.Int64(1), // Required + Iops: aws.Int64(1), + RaidLevel: aws.Int64(1), + VolumeType: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateMyUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateMyUserProfileInput{ + SshPublicKey: aws.String("String"), + } + resp, err := svc.UpdateMyUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String("String"), // Required + DbPassword: aws.String("String"), + DbUser: aws.String("String"), + } + resp, err := svc.UpdateRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateStackInput{ + StackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultInstanceProfileArn: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + Name: aws.String("String"), + ServiceRoleArn: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + } + resp, err := svc.UpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateUserProfileInput{ + IamUserArn: aws.String("String"), // Required + AllowSelfManagement: aws.Bool(true), + SshPublicKey: aws.String("String"), + SshUsername: aws.String("String"), + } + resp, err := svc.UpdateUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateVolumeInput{ + VolumeId: aws.String("String"), // Required + MountPoint: aws.String("String"), + Name: aws.String("String"), + } + resp, err := svc.UpdateVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go new file mode 100644 index 000000000..9e477674a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go @@ -0,0 +1,296 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package opsworksiface provides an interface for the AWS OpsWorks. +package opsworksiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +// OpsWorksAPI is the interface type for opsworks.OpsWorks. +type OpsWorksAPI interface { + AssignInstanceRequest(*opsworks.AssignInstanceInput) (*request.Request, *opsworks.AssignInstanceOutput) + + AssignInstance(*opsworks.AssignInstanceInput) (*opsworks.AssignInstanceOutput, error) + + AssignVolumeRequest(*opsworks.AssignVolumeInput) (*request.Request, *opsworks.AssignVolumeOutput) + + AssignVolume(*opsworks.AssignVolumeInput) (*opsworks.AssignVolumeOutput, error) + + AssociateElasticIpRequest(*opsworks.AssociateElasticIpInput) (*request.Request, *opsworks.AssociateElasticIpOutput) + + AssociateElasticIp(*opsworks.AssociateElasticIpInput) (*opsworks.AssociateElasticIpOutput, error) + + AttachElasticLoadBalancerRequest(*opsworks.AttachElasticLoadBalancerInput) (*request.Request, *opsworks.AttachElasticLoadBalancerOutput) + + AttachElasticLoadBalancer(*opsworks.AttachElasticLoadBalancerInput) (*opsworks.AttachElasticLoadBalancerOutput, error) + + CloneStackRequest(*opsworks.CloneStackInput) (*request.Request, *opsworks.CloneStackOutput) + + CloneStack(*opsworks.CloneStackInput) (*opsworks.CloneStackOutput, error) + + CreateAppRequest(*opsworks.CreateAppInput) (*request.Request, *opsworks.CreateAppOutput) + + CreateApp(*opsworks.CreateAppInput) (*opsworks.CreateAppOutput, error) + + CreateDeploymentRequest(*opsworks.CreateDeploymentInput) (*request.Request, *opsworks.CreateDeploymentOutput) + + CreateDeployment(*opsworks.CreateDeploymentInput) (*opsworks.CreateDeploymentOutput, error) + + CreateInstanceRequest(*opsworks.CreateInstanceInput) (*request.Request, *opsworks.CreateInstanceOutput) + + CreateInstance(*opsworks.CreateInstanceInput) (*opsworks.CreateInstanceOutput, error) + + CreateLayerRequest(*opsworks.CreateLayerInput) (*request.Request, *opsworks.CreateLayerOutput) + + CreateLayer(*opsworks.CreateLayerInput) (*opsworks.CreateLayerOutput, error) + + CreateStackRequest(*opsworks.CreateStackInput) (*request.Request, *opsworks.CreateStackOutput) + + CreateStack(*opsworks.CreateStackInput) (*opsworks.CreateStackOutput, error) + + CreateUserProfileRequest(*opsworks.CreateUserProfileInput) (*request.Request, *opsworks.CreateUserProfileOutput) + + CreateUserProfile(*opsworks.CreateUserProfileInput) (*opsworks.CreateUserProfileOutput, error) + + DeleteAppRequest(*opsworks.DeleteAppInput) (*request.Request, *opsworks.DeleteAppOutput) + + DeleteApp(*opsworks.DeleteAppInput) (*opsworks.DeleteAppOutput, error) + + DeleteInstanceRequest(*opsworks.DeleteInstanceInput) (*request.Request, *opsworks.DeleteInstanceOutput) + + DeleteInstance(*opsworks.DeleteInstanceInput) (*opsworks.DeleteInstanceOutput, error) + + DeleteLayerRequest(*opsworks.DeleteLayerInput) (*request.Request, *opsworks.DeleteLayerOutput) + + DeleteLayer(*opsworks.DeleteLayerInput) (*opsworks.DeleteLayerOutput, error) + + DeleteStackRequest(*opsworks.DeleteStackInput) (*request.Request, *opsworks.DeleteStackOutput) + + DeleteStack(*opsworks.DeleteStackInput) (*opsworks.DeleteStackOutput, error) + + DeleteUserProfileRequest(*opsworks.DeleteUserProfileInput) (*request.Request, *opsworks.DeleteUserProfileOutput) + + DeleteUserProfile(*opsworks.DeleteUserProfileInput) (*opsworks.DeleteUserProfileOutput, error) + + DeregisterEcsClusterRequest(*opsworks.DeregisterEcsClusterInput) (*request.Request, *opsworks.DeregisterEcsClusterOutput) + + DeregisterEcsCluster(*opsworks.DeregisterEcsClusterInput) (*opsworks.DeregisterEcsClusterOutput, error) + + DeregisterElasticIpRequest(*opsworks.DeregisterElasticIpInput) (*request.Request, *opsworks.DeregisterElasticIpOutput) + + DeregisterElasticIp(*opsworks.DeregisterElasticIpInput) (*opsworks.DeregisterElasticIpOutput, error) + + DeregisterInstanceRequest(*opsworks.DeregisterInstanceInput) (*request.Request, *opsworks.DeregisterInstanceOutput) + + DeregisterInstance(*opsworks.DeregisterInstanceInput) (*opsworks.DeregisterInstanceOutput, error) + + DeregisterRdsDbInstanceRequest(*opsworks.DeregisterRdsDbInstanceInput) (*request.Request, *opsworks.DeregisterRdsDbInstanceOutput) + + DeregisterRdsDbInstance(*opsworks.DeregisterRdsDbInstanceInput) (*opsworks.DeregisterRdsDbInstanceOutput, error) + + DeregisterVolumeRequest(*opsworks.DeregisterVolumeInput) (*request.Request, *opsworks.DeregisterVolumeOutput) + + DeregisterVolume(*opsworks.DeregisterVolumeInput) (*opsworks.DeregisterVolumeOutput, error) + + DescribeAgentVersionsRequest(*opsworks.DescribeAgentVersionsInput) (*request.Request, *opsworks.DescribeAgentVersionsOutput) + + DescribeAgentVersions(*opsworks.DescribeAgentVersionsInput) (*opsworks.DescribeAgentVersionsOutput, error) + + DescribeAppsRequest(*opsworks.DescribeAppsInput) (*request.Request, *opsworks.DescribeAppsOutput) + + DescribeApps(*opsworks.DescribeAppsInput) (*opsworks.DescribeAppsOutput, error) + + DescribeCommandsRequest(*opsworks.DescribeCommandsInput) (*request.Request, *opsworks.DescribeCommandsOutput) + + DescribeCommands(*opsworks.DescribeCommandsInput) (*opsworks.DescribeCommandsOutput, error) + + DescribeDeploymentsRequest(*opsworks.DescribeDeploymentsInput) (*request.Request, *opsworks.DescribeDeploymentsOutput) + + DescribeDeployments(*opsworks.DescribeDeploymentsInput) (*opsworks.DescribeDeploymentsOutput, error) + + DescribeEcsClustersRequest(*opsworks.DescribeEcsClustersInput) (*request.Request, *opsworks.DescribeEcsClustersOutput) + + DescribeEcsClusters(*opsworks.DescribeEcsClustersInput) (*opsworks.DescribeEcsClustersOutput, error) + + DescribeEcsClustersPages(*opsworks.DescribeEcsClustersInput, func(*opsworks.DescribeEcsClustersOutput, bool) bool) error + + DescribeElasticIpsRequest(*opsworks.DescribeElasticIpsInput) (*request.Request, *opsworks.DescribeElasticIpsOutput) + + DescribeElasticIps(*opsworks.DescribeElasticIpsInput) (*opsworks.DescribeElasticIpsOutput, error) + + DescribeElasticLoadBalancersRequest(*opsworks.DescribeElasticLoadBalancersInput) (*request.Request, *opsworks.DescribeElasticLoadBalancersOutput) + + DescribeElasticLoadBalancers(*opsworks.DescribeElasticLoadBalancersInput) (*opsworks.DescribeElasticLoadBalancersOutput, error) + + DescribeInstancesRequest(*opsworks.DescribeInstancesInput) (*request.Request, *opsworks.DescribeInstancesOutput) + + DescribeInstances(*opsworks.DescribeInstancesInput) (*opsworks.DescribeInstancesOutput, error) + + DescribeLayersRequest(*opsworks.DescribeLayersInput) (*request.Request, *opsworks.DescribeLayersOutput) + + DescribeLayers(*opsworks.DescribeLayersInput) (*opsworks.DescribeLayersOutput, error) + + DescribeLoadBasedAutoScalingRequest(*opsworks.DescribeLoadBasedAutoScalingInput) (*request.Request, *opsworks.DescribeLoadBasedAutoScalingOutput) + + DescribeLoadBasedAutoScaling(*opsworks.DescribeLoadBasedAutoScalingInput) (*opsworks.DescribeLoadBasedAutoScalingOutput, error) + + DescribeMyUserProfileRequest(*opsworks.DescribeMyUserProfileInput) (*request.Request, *opsworks.DescribeMyUserProfileOutput) + + DescribeMyUserProfile(*opsworks.DescribeMyUserProfileInput) (*opsworks.DescribeMyUserProfileOutput, error) + + DescribePermissionsRequest(*opsworks.DescribePermissionsInput) (*request.Request, *opsworks.DescribePermissionsOutput) + + DescribePermissions(*opsworks.DescribePermissionsInput) (*opsworks.DescribePermissionsOutput, error) + + DescribeRaidArraysRequest(*opsworks.DescribeRaidArraysInput) (*request.Request, *opsworks.DescribeRaidArraysOutput) + + DescribeRaidArrays(*opsworks.DescribeRaidArraysInput) (*opsworks.DescribeRaidArraysOutput, error) + + DescribeRdsDbInstancesRequest(*opsworks.DescribeRdsDbInstancesInput) (*request.Request, *opsworks.DescribeRdsDbInstancesOutput) + + DescribeRdsDbInstances(*opsworks.DescribeRdsDbInstancesInput) (*opsworks.DescribeRdsDbInstancesOutput, error) + + DescribeServiceErrorsRequest(*opsworks.DescribeServiceErrorsInput) (*request.Request, *opsworks.DescribeServiceErrorsOutput) + + DescribeServiceErrors(*opsworks.DescribeServiceErrorsInput) (*opsworks.DescribeServiceErrorsOutput, error) + + DescribeStackProvisioningParametersRequest(*opsworks.DescribeStackProvisioningParametersInput) (*request.Request, *opsworks.DescribeStackProvisioningParametersOutput) + + DescribeStackProvisioningParameters(*opsworks.DescribeStackProvisioningParametersInput) (*opsworks.DescribeStackProvisioningParametersOutput, error) + + DescribeStackSummaryRequest(*opsworks.DescribeStackSummaryInput) (*request.Request, *opsworks.DescribeStackSummaryOutput) + + DescribeStackSummary(*opsworks.DescribeStackSummaryInput) (*opsworks.DescribeStackSummaryOutput, error) + + DescribeStacksRequest(*opsworks.DescribeStacksInput) (*request.Request, *opsworks.DescribeStacksOutput) + + DescribeStacks(*opsworks.DescribeStacksInput) (*opsworks.DescribeStacksOutput, error) + + DescribeTimeBasedAutoScalingRequest(*opsworks.DescribeTimeBasedAutoScalingInput) (*request.Request, *opsworks.DescribeTimeBasedAutoScalingOutput) + + DescribeTimeBasedAutoScaling(*opsworks.DescribeTimeBasedAutoScalingInput) (*opsworks.DescribeTimeBasedAutoScalingOutput, error) + + DescribeUserProfilesRequest(*opsworks.DescribeUserProfilesInput) (*request.Request, *opsworks.DescribeUserProfilesOutput) + + DescribeUserProfiles(*opsworks.DescribeUserProfilesInput) (*opsworks.DescribeUserProfilesOutput, error) + + DescribeVolumesRequest(*opsworks.DescribeVolumesInput) (*request.Request, *opsworks.DescribeVolumesOutput) + + DescribeVolumes(*opsworks.DescribeVolumesInput) (*opsworks.DescribeVolumesOutput, error) + + DetachElasticLoadBalancerRequest(*opsworks.DetachElasticLoadBalancerInput) (*request.Request, *opsworks.DetachElasticLoadBalancerOutput) + + DetachElasticLoadBalancer(*opsworks.DetachElasticLoadBalancerInput) (*opsworks.DetachElasticLoadBalancerOutput, error) + + DisassociateElasticIpRequest(*opsworks.DisassociateElasticIpInput) (*request.Request, *opsworks.DisassociateElasticIpOutput) + + DisassociateElasticIp(*opsworks.DisassociateElasticIpInput) (*opsworks.DisassociateElasticIpOutput, error) + + GetHostnameSuggestionRequest(*opsworks.GetHostnameSuggestionInput) (*request.Request, *opsworks.GetHostnameSuggestionOutput) + + GetHostnameSuggestion(*opsworks.GetHostnameSuggestionInput) (*opsworks.GetHostnameSuggestionOutput, error) + + GrantAccessRequest(*opsworks.GrantAccessInput) (*request.Request, *opsworks.GrantAccessOutput) + + GrantAccess(*opsworks.GrantAccessInput) (*opsworks.GrantAccessOutput, error) + + RebootInstanceRequest(*opsworks.RebootInstanceInput) (*request.Request, *opsworks.RebootInstanceOutput) + + RebootInstance(*opsworks.RebootInstanceInput) (*opsworks.RebootInstanceOutput, error) + + RegisterEcsClusterRequest(*opsworks.RegisterEcsClusterInput) (*request.Request, *opsworks.RegisterEcsClusterOutput) + + RegisterEcsCluster(*opsworks.RegisterEcsClusterInput) (*opsworks.RegisterEcsClusterOutput, error) + + RegisterElasticIpRequest(*opsworks.RegisterElasticIpInput) (*request.Request, *opsworks.RegisterElasticIpOutput) + + RegisterElasticIp(*opsworks.RegisterElasticIpInput) (*opsworks.RegisterElasticIpOutput, error) + + RegisterInstanceRequest(*opsworks.RegisterInstanceInput) (*request.Request, *opsworks.RegisterInstanceOutput) + + RegisterInstance(*opsworks.RegisterInstanceInput) (*opsworks.RegisterInstanceOutput, error) + + RegisterRdsDbInstanceRequest(*opsworks.RegisterRdsDbInstanceInput) (*request.Request, *opsworks.RegisterRdsDbInstanceOutput) + + RegisterRdsDbInstance(*opsworks.RegisterRdsDbInstanceInput) (*opsworks.RegisterRdsDbInstanceOutput, error) + + RegisterVolumeRequest(*opsworks.RegisterVolumeInput) (*request.Request, *opsworks.RegisterVolumeOutput) + + RegisterVolume(*opsworks.RegisterVolumeInput) (*opsworks.RegisterVolumeOutput, error) + + SetLoadBasedAutoScalingRequest(*opsworks.SetLoadBasedAutoScalingInput) (*request.Request, *opsworks.SetLoadBasedAutoScalingOutput) + + SetLoadBasedAutoScaling(*opsworks.SetLoadBasedAutoScalingInput) (*opsworks.SetLoadBasedAutoScalingOutput, error) + + SetPermissionRequest(*opsworks.SetPermissionInput) (*request.Request, *opsworks.SetPermissionOutput) + + SetPermission(*opsworks.SetPermissionInput) (*opsworks.SetPermissionOutput, error) + + SetTimeBasedAutoScalingRequest(*opsworks.SetTimeBasedAutoScalingInput) (*request.Request, *opsworks.SetTimeBasedAutoScalingOutput) + + SetTimeBasedAutoScaling(*opsworks.SetTimeBasedAutoScalingInput) (*opsworks.SetTimeBasedAutoScalingOutput, error) + + StartInstanceRequest(*opsworks.StartInstanceInput) (*request.Request, *opsworks.StartInstanceOutput) + + StartInstance(*opsworks.StartInstanceInput) (*opsworks.StartInstanceOutput, error) + + StartStackRequest(*opsworks.StartStackInput) (*request.Request, *opsworks.StartStackOutput) + + StartStack(*opsworks.StartStackInput) (*opsworks.StartStackOutput, error) + + StopInstanceRequest(*opsworks.StopInstanceInput) (*request.Request, *opsworks.StopInstanceOutput) + + StopInstance(*opsworks.StopInstanceInput) (*opsworks.StopInstanceOutput, error) + + StopStackRequest(*opsworks.StopStackInput) (*request.Request, *opsworks.StopStackOutput) + + StopStack(*opsworks.StopStackInput) (*opsworks.StopStackOutput, error) + + UnassignInstanceRequest(*opsworks.UnassignInstanceInput) (*request.Request, *opsworks.UnassignInstanceOutput) + + UnassignInstance(*opsworks.UnassignInstanceInput) (*opsworks.UnassignInstanceOutput, error) + + UnassignVolumeRequest(*opsworks.UnassignVolumeInput) (*request.Request, *opsworks.UnassignVolumeOutput) + + UnassignVolume(*opsworks.UnassignVolumeInput) (*opsworks.UnassignVolumeOutput, error) + + UpdateAppRequest(*opsworks.UpdateAppInput) (*request.Request, *opsworks.UpdateAppOutput) + + UpdateApp(*opsworks.UpdateAppInput) (*opsworks.UpdateAppOutput, error) + + UpdateElasticIpRequest(*opsworks.UpdateElasticIpInput) (*request.Request, *opsworks.UpdateElasticIpOutput) + + UpdateElasticIp(*opsworks.UpdateElasticIpInput) (*opsworks.UpdateElasticIpOutput, error) + + UpdateInstanceRequest(*opsworks.UpdateInstanceInput) (*request.Request, *opsworks.UpdateInstanceOutput) + + UpdateInstance(*opsworks.UpdateInstanceInput) (*opsworks.UpdateInstanceOutput, error) + + UpdateLayerRequest(*opsworks.UpdateLayerInput) (*request.Request, *opsworks.UpdateLayerOutput) + + UpdateLayer(*opsworks.UpdateLayerInput) (*opsworks.UpdateLayerOutput, error) + + UpdateMyUserProfileRequest(*opsworks.UpdateMyUserProfileInput) (*request.Request, *opsworks.UpdateMyUserProfileOutput) + + UpdateMyUserProfile(*opsworks.UpdateMyUserProfileInput) (*opsworks.UpdateMyUserProfileOutput, error) + + UpdateRdsDbInstanceRequest(*opsworks.UpdateRdsDbInstanceInput) (*request.Request, *opsworks.UpdateRdsDbInstanceOutput) + + UpdateRdsDbInstance(*opsworks.UpdateRdsDbInstanceInput) (*opsworks.UpdateRdsDbInstanceOutput, error) + + UpdateStackRequest(*opsworks.UpdateStackInput) (*request.Request, *opsworks.UpdateStackOutput) + + UpdateStack(*opsworks.UpdateStackInput) (*opsworks.UpdateStackOutput, error) + + UpdateUserProfileRequest(*opsworks.UpdateUserProfileInput) (*request.Request, *opsworks.UpdateUserProfileOutput) + + UpdateUserProfile(*opsworks.UpdateUserProfileInput) (*opsworks.UpdateUserProfileOutput, error) + + UpdateVolumeRequest(*opsworks.UpdateVolumeInput) (*request.Request, *opsworks.UpdateVolumeOutput) + + UpdateVolume(*opsworks.UpdateVolumeInput) (*opsworks.UpdateVolumeOutput, error) +} + +var _ OpsWorksAPI = (*opsworks.OpsWorks)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go new file mode 100644 index 000000000..70ea0a0a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -0,0 +1,124 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Welcome to the AWS OpsWorks API Reference. This guide provides descriptions, +// syntax, and usage examples about AWS OpsWorks actions and data types, including +// common parameters and error codes. +// +// AWS OpsWorks is an application management service that provides an integrated +// experience for overseeing the complete application lifecycle. For information +// about this product, go to the AWS OpsWorks (http://aws.amazon.com/opsworks/) +// details page. +// +// SDKs and CLI +// +// The most common way to use the AWS OpsWorks API is by using the AWS Command +// Line Interface (CLI) or by using one of the AWS SDKs to implement applications +// in your preferred language. For more information, see: +// +// AWS CLI (http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) +// AWS SDK for Java (http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) +// AWS SDK for .NET (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) +// AWS SDK for PHP 2 (http://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) +// AWS SDK for Ruby (http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/OpsWorks/Client.html) +// AWS SDK for Node.js (http://aws.amazon.com/documentation/sdkforjavascript/) +// AWS SDK for Python(Boto) (http://docs.pythonboto.org/en/latest/ref/opsworks.html) +// Endpoints +// +// AWS OpsWorks supports only one endpoint, opsworks.us-east-1.amazonaws.com +// (HTTPS), so you must connect to that endpoint. You can then use the API to +// direct AWS OpsWorks to create stacks in any AWS Region. +// +// Chef Versions +// +// When you call CreateStack, CloneStack, or UpdateStack we recommend you use +// the ConfigurationManager parameter to specify the Chef version. The recommended +// value for Linux stacks is currently 12 (the default is 11.4). Windows stacks +// use Chef 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). +// +// You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend +// migrating your existing Linux stacks to Chef 12 as soon as possible. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OpsWorks struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "opsworks" + +// New creates a new instance of the OpsWorks client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OpsWorks client from just a session. +// svc := opsworks.New(mySession) +// +// // Create a OpsWorks client with additional configuration +// svc := opsworks.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *OpsWorks { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OpsWorks { + svc := &OpsWorks{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-02-18", + JSONVersion: "1.1", + TargetPrefix: "OpsWorks_20130218", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(jsonrpc.Build) + svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a OpsWorks operation and runs any +// custom request initialization. +func (c *OpsWorks) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go new file mode 100644 index 000000000..8d07ac87e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go @@ -0,0 +1,290 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *OpsWorks) WaitUntilAppExists(input *DescribeAppsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeApps", + Delay: 1, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "failure", + Matcher: "status", + Argument: "", + Expected: 400, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilDeploymentSuccessful(input *DescribeDeploymentsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDeployments", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Deployments[].Status", + Expected: "successful", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Deployments[].Status", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceOnline(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "shutting_down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopping", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "booting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "requested", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "running_setup", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "booting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "requested", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "running_setup", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go new file mode 100644 index 000000000..66b2f2c34 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -0,0 +1,10572 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package rds provides a client for Amazon Relational Database Service. +package rds + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddSourceIdentifierToSubscription = "AddSourceIdentifierToSubscription" + +// AddSourceIdentifierToSubscriptionRequest generates a request for the AddSourceIdentifierToSubscription operation. +func (c *RDS) AddSourceIdentifierToSubscriptionRequest(input *AddSourceIdentifierToSubscriptionInput) (req *request.Request, output *AddSourceIdentifierToSubscriptionOutput) { + op := &request.Operation{ + Name: opAddSourceIdentifierToSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddSourceIdentifierToSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddSourceIdentifierToSubscriptionOutput{} + req.Data = output + return +} + +// Adds a source identifier to an existing RDS event notification subscription. +func (c *RDS) AddSourceIdentifierToSubscription(input *AddSourceIdentifierToSubscriptionInput) (*AddSourceIdentifierToSubscriptionOutput, error) { + req, out := c.AddSourceIdentifierToSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a request for the AddTagsToResource operation. +func (c *RDS) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds metadata tags to an Amazon RDS resource. These tags can also be used +// with cost allocation reporting to track cost associated with Amazon RDS resources, +// or used in a Condition statement in an IAM policy for Amazon RDS. +// +// For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opApplyPendingMaintenanceAction = "ApplyPendingMaintenanceAction" + +// ApplyPendingMaintenanceActionRequest generates a request for the ApplyPendingMaintenanceAction operation. +func (c *RDS) ApplyPendingMaintenanceActionRequest(input *ApplyPendingMaintenanceActionInput) (req *request.Request, output *ApplyPendingMaintenanceActionOutput) { + op := &request.Operation{ + Name: opApplyPendingMaintenanceAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplyPendingMaintenanceActionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplyPendingMaintenanceActionOutput{} + req.Data = output + return +} + +// Applies a pending maintenance action to a resource (for example, to a DB +// instance). +func (c *RDS) ApplyPendingMaintenanceAction(input *ApplyPendingMaintenanceActionInput) (*ApplyPendingMaintenanceActionOutput, error) { + req, out := c.ApplyPendingMaintenanceActionRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeDBSecurityGroupIngress = "AuthorizeDBSecurityGroupIngress" + +// AuthorizeDBSecurityGroupIngressRequest generates a request for the AuthorizeDBSecurityGroupIngress operation. +func (c *RDS) AuthorizeDBSecurityGroupIngressRequest(input *AuthorizeDBSecurityGroupIngressInput) (req *request.Request, output *AuthorizeDBSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeDBSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeDBSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeDBSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Enables ingress to a DBSecurityGroup using one of two forms of authorization. +// First, EC2 or VPC security groups can be added to the DBSecurityGroup if +// the application using the database is running on EC2 or VPC instances. Second, +// IP ranges are available if the application accessing your database is running +// on the Internet. Required parameters for this API are one of CIDR range, +// EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName +// or EC2SecurityGroupId for non-VPC). +// +// You cannot authorize ingress from an EC2 security group in one region to +// an Amazon RDS DB instance in another. You cannot authorize ingress from a +// VPC security group in one VPC to an Amazon RDS DB instance in another. For +// an overview of CIDR ranges, go to the Wikipedia Tutorial (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +func (c *RDS) AuthorizeDBSecurityGroupIngress(input *AuthorizeDBSecurityGroupIngressInput) (*AuthorizeDBSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeDBSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBClusterSnapshot = "CopyDBClusterSnapshot" + +// CopyDBClusterSnapshotRequest generates a request for the CopyDBClusterSnapshot operation. +func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (req *request.Request, output *CopyDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCopyDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CopyDBClusterSnapshot(input *CopyDBClusterSnapshotInput) (*CopyDBClusterSnapshotOutput, error) { + req, out := c.CopyDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBParameterGroup = "CopyDBParameterGroup" + +// CopyDBParameterGroupRequest generates a request for the CopyDBParameterGroup operation. +func (c *RDS) CopyDBParameterGroupRequest(input *CopyDBParameterGroupInput) (req *request.Request, output *CopyDBParameterGroupOutput) { + op := &request.Operation{ + Name: opCopyDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBParameterGroupOutput{} + req.Data = output + return +} + +// Copies the specified DB parameter group. +func (c *RDS) CopyDBParameterGroup(input *CopyDBParameterGroupInput) (*CopyDBParameterGroupOutput, error) { + req, out := c.CopyDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBSnapshot = "CopyDBSnapshot" + +// CopyDBSnapshotRequest generates a request for the CopyDBSnapshot operation. +func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Request, output *CopyDBSnapshotOutput) { + op := &request.Operation{ + Name: opCopyDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBSnapshotOutput{} + req.Data = output + return +} + +// Copies the specified DBSnapshot. The source DB snapshot must be in the "available" +// state. +// +// If you are copying from a shared manual DB snapshot, the SourceDBSnapshotIdentifier +// must be the ARN of the shared DB snapshot. +func (c *RDS) CopyDBSnapshot(input *CopyDBSnapshotInput) (*CopyDBSnapshotOutput, error) { + req, out := c.CopyDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCopyOptionGroup = "CopyOptionGroup" + +// CopyOptionGroupRequest generates a request for the CopyOptionGroup operation. +func (c *RDS) CopyOptionGroupRequest(input *CopyOptionGroupInput) (req *request.Request, output *CopyOptionGroupOutput) { + op := &request.Operation{ + Name: opCopyOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyOptionGroupOutput{} + req.Data = output + return +} + +// Copies the specified option group. +func (c *RDS) CopyOptionGroup(input *CopyOptionGroupInput) (*CopyOptionGroupOutput, error) { + req, out := c.CopyOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBCluster = "CreateDBCluster" + +// CreateDBClusterRequest generates a request for the CreateDBCluster operation. +func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request.Request, output *CreateDBClusterOutput) { + op := &request.Operation{ + Name: opCreateDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterOutput{} + req.Data = output + return +} + +// Creates a new Amazon Aurora DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBCluster(input *CreateDBClusterInput) (*CreateDBClusterOutput, error) { + req, out := c.CreateDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBClusterParameterGroup = "CreateDBClusterParameterGroup" + +// CreateDBClusterParameterGroupRequest generates a request for the CreateDBClusterParameterGroup operation. +func (c *RDS) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParameterGroupInput) (req *request.Request, output *CreateDBClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterParameterGroupOutput{} + req.Data = output + return +} + +// Creates a new DB cluster parameter group. +// +// Parameters in a DB cluster parameter group apply to all of the instances +// in a DB cluster. +// +// A DB cluster parameter group is initially created with the default parameters +// for the database engine used by instances in the DB cluster. To provide custom +// values for any of the parameters, you must modify the group after creating +// it using ModifyDBClusterParameterGroup. Once you've created a DB cluster +// parameter group, you need to associate it with your DB cluster using ModifyDBCluster. +// When you associate a new DB cluster parameter group with a running DB cluster, +// you need to reboot the DB instances in the DB cluster without failover for +// the new DB cluster parameter group and associated settings to take effect. +// +// After you create a DB cluster parameter group, you should wait at least +// 5 minutes before creating your first DB cluster that uses that DB cluster +// parameter group as the default parameter group. This allows Amazon RDS to +// fully complete the create action before the DB cluster parameter group is +// used as the default for a new DB cluster. This is especially important for +// parameters that are critical when creating the default database for a DB +// cluster, such as the character set for the default database defined by the +// character_set_database parameter. You can use the Parameter Groups option +// of the Amazon RDS console (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters +// command to verify that your DB cluster parameter group has been created or +// modified. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBClusterParameterGroup(input *CreateDBClusterParameterGroupInput) (*CreateDBClusterParameterGroupOutput, error) { + req, out := c.CreateDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBClusterSnapshot = "CreateDBClusterSnapshot" + +// CreateDBClusterSnapshotRequest generates a request for the CreateDBClusterSnapshot operation. +func (c *RDS) CreateDBClusterSnapshotRequest(input *CreateDBClusterSnapshotInput) (req *request.Request, output *CreateDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCreateDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBClusterSnapshot(input *CreateDBClusterSnapshotInput) (*CreateDBClusterSnapshotOutput, error) { + req, out := c.CreateDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBInstance = "CreateDBInstance" + +// CreateDBInstanceRequest generates a request for the CreateDBInstance operation. +func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *request.Request, output *CreateDBInstanceOutput) { + op := &request.Operation{ + Name: opCreateDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBInstanceOutput{} + req.Data = output + return +} + +// Creates a new DB instance. +func (c *RDS) CreateDBInstance(input *CreateDBInstanceInput) (*CreateDBInstanceOutput, error) { + req, out := c.CreateDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBInstanceReadReplica = "CreateDBInstanceReadReplica" + +// CreateDBInstanceReadReplicaRequest generates a request for the CreateDBInstanceReadReplica operation. +func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadReplicaInput) (req *request.Request, output *CreateDBInstanceReadReplicaOutput) { + op := &request.Operation{ + Name: opCreateDBInstanceReadReplica, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBInstanceReadReplicaInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBInstanceReadReplicaOutput{} + req.Data = output + return +} + +// Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL +// that acts as a Read Replica of a source DB instance. +// +// All Read Replica DB instances are created as Single-AZ deployments with +// backups disabled. All other DB instance attributes (including DB security +// groups and DB parameter groups) are inherited from the source DB instance, +// except as specified below. +// +// The source DB instance must have backup retention enabled. +func (c *RDS) CreateDBInstanceReadReplica(input *CreateDBInstanceReadReplicaInput) (*CreateDBInstanceReadReplicaOutput, error) { + req, out := c.CreateDBInstanceReadReplicaRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBParameterGroup = "CreateDBParameterGroup" + +// CreateDBParameterGroupRequest generates a request for the CreateDBParameterGroup operation. +func (c *RDS) CreateDBParameterGroupRequest(input *CreateDBParameterGroupInput) (req *request.Request, output *CreateDBParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBParameterGroupOutput{} + req.Data = output + return +} + +// Creates a new DB parameter group. +// +// A DB parameter group is initially created with the default parameters for +// the database engine used by the DB instance. To provide custom values for +// any of the parameters, you must modify the group after creating it using +// ModifyDBParameterGroup. Once you've created a DB parameter group, you need +// to associate it with your DB instance using ModifyDBInstance. When you associate +// a new DB parameter group with a running DB instance, you need to reboot the +// DB instance without failover for the new DB parameter group and associated +// settings to take effect. +// +// After you create a DB parameter group, you should wait at least 5 minutes +// before creating your first DB instance that uses that DB parameter group +// as the default parameter group. This allows Amazon RDS to fully complete +// the create action before the parameter group is used as the default for a +// new DB instance. This is especially important for parameters that are critical +// when creating the default database for a DB instance, such as the character +// set for the default database defined by the character_set_database parameter. +// You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) +// or the DescribeDBParameters command to verify that your DB parameter group +// has been created or modified. +func (c *RDS) CreateDBParameterGroup(input *CreateDBParameterGroupInput) (*CreateDBParameterGroupOutput, error) { + req, out := c.CreateDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSecurityGroup = "CreateDBSecurityGroup" + +// CreateDBSecurityGroupRequest generates a request for the CreateDBSecurityGroup operation. +func (c *RDS) CreateDBSecurityGroupRequest(input *CreateDBSecurityGroupInput) (req *request.Request, output *CreateDBSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateDBSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a new DB security group. DB security groups control access to a DB +// instance. +func (c *RDS) CreateDBSecurityGroup(input *CreateDBSecurityGroupInput) (*CreateDBSecurityGroupOutput, error) { + req, out := c.CreateDBSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSnapshot = "CreateDBSnapshot" + +// CreateDBSnapshotRequest generates a request for the CreateDBSnapshot operation. +func (c *RDS) CreateDBSnapshotRequest(input *CreateDBSnapshotInput) (req *request.Request, output *CreateDBSnapshotOutput) { + op := &request.Operation{ + Name: opCreateDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSnapshotOutput{} + req.Data = output + return +} + +// Creates a DBSnapshot. The source DBInstance must be in "available" state. +func (c *RDS) CreateDBSnapshot(input *CreateDBSnapshotInput) (*CreateDBSnapshotOutput, error) { + req, out := c.CreateDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSubnetGroup = "CreateDBSubnetGroup" + +// CreateDBSubnetGroupRequest generates a request for the CreateDBSubnetGroup operation. +func (c *RDS) CreateDBSubnetGroupRequest(input *CreateDBSubnetGroupInput) (req *request.Request, output *CreateDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSubnetGroupOutput{} + req.Data = output + return +} + +// Creates a new DB subnet group. DB subnet groups must contain at least one +// subnet in at least two AZs in the region. +func (c *RDS) CreateDBSubnetGroup(input *CreateDBSubnetGroupInput) (*CreateDBSubnetGroupOutput, error) { + req, out := c.CreateDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSubscription = "CreateEventSubscription" + +// CreateEventSubscriptionRequest generates a request for the CreateEventSubscription operation. +func (c *RDS) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEventSubscriptionOutput{} + req.Data = output + return +} + +// Creates an RDS event notification subscription. This action requires a topic +// ARN (Amazon Resource Name) created by either the RDS console, the SNS console, +// or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon +// SNS and subscribe to the topic. The ARN is displayed in the SNS console. +// +// You can specify the type of source (SourceType) you want to be notified +// of, provide a list of RDS sources (SourceIds) that triggers the events, and +// provide a list of event categories (EventCategories) for events you want +// to be notified of. For example, you can specify SourceType = db-instance, +// SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, +// Backup. +// +// If you specify both the SourceType and SourceIds, such as SourceType = db-instance +// and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance +// events for the specified source. If you specify a SourceType but do not specify +// a SourceIdentifier, you will receive notice of the events for that source +// type for all your RDS sources. If you do not specify either the SourceType +// nor the SourceIdentifier, you will be notified of events generated from all +// RDS sources belonging to your customer account. +func (c *RDS) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { + req, out := c.CreateEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateOptionGroup = "CreateOptionGroup" + +// CreateOptionGroupRequest generates a request for the CreateOptionGroup operation. +func (c *RDS) CreateOptionGroupRequest(input *CreateOptionGroupInput) (req *request.Request, output *CreateOptionGroupOutput) { + op := &request.Operation{ + Name: opCreateOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateOptionGroupOutput{} + req.Data = output + return +} + +// Creates a new option group. You can create up to 20 option groups. +func (c *RDS) CreateOptionGroup(input *CreateOptionGroupInput) (*CreateOptionGroupOutput, error) { + req, out := c.CreateOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBCluster = "DeleteDBCluster" + +// DeleteDBClusterRequest generates a request for the DeleteDBCluster operation. +func (c *RDS) DeleteDBClusterRequest(input *DeleteDBClusterInput) (req *request.Request, output *DeleteDBClusterOutput) { + op := &request.Operation{ + Name: opDeleteDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBClusterOutput{} + req.Data = output + return +} + +// The DeleteDBCluster action deletes a previously provisioned DB cluster. A +// successful response from the web service indicates the request was received +// correctly. When you delete a DB cluster, all automated backups for that DB +// cluster are deleted and cannot be recovered. Manual DB cluster snapshots +// of the DB cluster to be deleted are not deleted. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBCluster(input *DeleteDBClusterInput) (*DeleteDBClusterOutput, error) { + req, out := c.DeleteDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBClusterParameterGroup = "DeleteDBClusterParameterGroup" + +// DeleteDBClusterParameterGroupRequest generates a request for the DeleteDBClusterParameterGroup operation. +func (c *RDS) DeleteDBClusterParameterGroupRequest(input *DeleteDBClusterParameterGroupInput) (req *request.Request, output *DeleteDBClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBClusterParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified DB cluster parameter group. The DB cluster parameter +// group to be deleted cannot be associated with any DB clusters. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBClusterParameterGroup(input *DeleteDBClusterParameterGroupInput) (*DeleteDBClusterParameterGroupOutput, error) { + req, out := c.DeleteDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBClusterSnapshot = "DeleteDBClusterSnapshot" + +// DeleteDBClusterSnapshotRequest generates a request for the DeleteDBClusterSnapshot operation. +func (c *RDS) DeleteDBClusterSnapshotRequest(input *DeleteDBClusterSnapshotInput) (req *request.Request, output *DeleteDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Deletes a DB cluster snapshot. If the snapshot is being copied, the copy +// operation is terminated. +// +// The DB cluster snapshot must be in the available state to be deleted. For +// more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBClusterSnapshot(input *DeleteDBClusterSnapshotInput) (*DeleteDBClusterSnapshotOutput, error) { + req, out := c.DeleteDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBInstance = "DeleteDBInstance" + +// DeleteDBInstanceRequest generates a request for the DeleteDBInstance operation. +func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *request.Request, output *DeleteDBInstanceOutput) { + op := &request.Operation{ + Name: opDeleteDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBInstanceOutput{} + req.Data = output + return +} + +// The DeleteDBInstance action deletes a previously provisioned DB instance. +// A successful response from the web service indicates the request was received +// correctly. When you delete a DB instance, all automated backups for that +// instance are deleted and cannot be recovered. Manual DB snapshots of the +// DB instance to be deleted are not deleted. +// +// If a final DB snapshot is requested the status of the RDS instance will +// be "deleting" until the DB snapshot is created. The API action DescribeDBInstance +// is used to monitor the status of this operation. The action cannot be canceled +// or reverted once submitted. +// +// Note that when a DB instance is in a failure state and has a status of 'failed', +// 'incompatible-restore', or 'incompatible-network', it can only be deleted +// when the SkipFinalSnapshot parameter is set to "true". +func (c *RDS) DeleteDBInstance(input *DeleteDBInstanceInput) (*DeleteDBInstanceOutput, error) { + req, out := c.DeleteDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBParameterGroup = "DeleteDBParameterGroup" + +// DeleteDBParameterGroupRequest generates a request for the DeleteDBParameterGroup operation. +func (c *RDS) DeleteDBParameterGroupRequest(input *DeleteDBParameterGroupInput) (req *request.Request, output *DeleteDBParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted +// cannot be associated with any DB instances. +func (c *RDS) DeleteDBParameterGroup(input *DeleteDBParameterGroupInput) (*DeleteDBParameterGroupOutput, error) { + req, out := c.DeleteDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSecurityGroup = "DeleteDBSecurityGroup" + +// DeleteDBSecurityGroupRequest generates a request for the DeleteDBSecurityGroup operation. +func (c *RDS) DeleteDBSecurityGroupRequest(input *DeleteDBSecurityGroupInput) (req *request.Request, output *DeleteDBSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes a DB security group. +// +// The specified DB security group must not be associated with any DB instances. +func (c *RDS) DeleteDBSecurityGroup(input *DeleteDBSecurityGroupInput) (*DeleteDBSecurityGroupOutput, error) { + req, out := c.DeleteDBSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSnapshot = "DeleteDBSnapshot" + +// DeleteDBSnapshotRequest generates a request for the DeleteDBSnapshot operation. +func (c *RDS) DeleteDBSnapshotRequest(input *DeleteDBSnapshotInput) (req *request.Request, output *DeleteDBSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBSnapshotOutput{} + req.Data = output + return +} + +// Deletes a DBSnapshot. If the snapshot is being copied, the copy operation +// is terminated. +// +// The DBSnapshot must be in the available state to be deleted. +func (c *RDS) DeleteDBSnapshot(input *DeleteDBSnapshotInput) (*DeleteDBSnapshotOutput, error) { + req, out := c.DeleteDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSubnetGroup = "DeleteDBSubnetGroup" + +// DeleteDBSubnetGroupRequest generates a request for the DeleteDBSubnetGroup operation. +func (c *RDS) DeleteDBSubnetGroupRequest(input *DeleteDBSubnetGroupInput) (req *request.Request, output *DeleteDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBSubnetGroupOutput{} + req.Data = output + return +} + +// Deletes a DB subnet group. +// +// The specified database subnet group must not be associated with any DB instances. +func (c *RDS) DeleteDBSubnetGroup(input *DeleteDBSubnetGroupInput) (*DeleteDBSubnetGroupOutput, error) { + req, out := c.DeleteDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSubscription = "DeleteEventSubscription" + +// DeleteEventSubscriptionRequest generates a request for the DeleteEventSubscription operation. +func (c *RDS) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEventSubscriptionOutput{} + req.Data = output + return +} + +// Deletes an RDS event notification subscription. +func (c *RDS) DeleteEventSubscription(input *DeleteEventSubscriptionInput) (*DeleteEventSubscriptionOutput, error) { + req, out := c.DeleteEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteOptionGroup = "DeleteOptionGroup" + +// DeleteOptionGroupRequest generates a request for the DeleteOptionGroup operation. +func (c *RDS) DeleteOptionGroupRequest(input *DeleteOptionGroupInput) (req *request.Request, output *DeleteOptionGroupOutput) { + op := &request.Operation{ + Name: opDeleteOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteOptionGroupOutput{} + req.Data = output + return +} + +// Deletes an existing option group. +func (c *RDS) DeleteOptionGroup(input *DeleteOptionGroupInput) (*DeleteOptionGroupOutput, error) { + req, out := c.DeleteOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a request for the DescribeAccountAttributes operation. +func (c *RDS) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Lists all of the attributes for a customer account. The attributes include +// Amazon RDS quotas for the account, such as the number of DB instances allowed. +// The description for a quota includes the quota name, current usage toward +// that quota, and the quota's maximum value. +// +// This command does not take any parameters. +func (c *RDS) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCertificates = "DescribeCertificates" + +// DescribeCertificatesRequest generates a request for the DescribeCertificates operation. +func (c *RDS) DescribeCertificatesRequest(input *DescribeCertificatesInput) (req *request.Request, output *DescribeCertificatesOutput) { + op := &request.Operation{ + Name: opDescribeCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCertificatesOutput{} + req.Data = output + return +} + +// Lists the set of CA certificates provided by Amazon RDS for this AWS account. +func (c *RDS) DescribeCertificates(input *DescribeCertificatesInput) (*DescribeCertificatesOutput, error) { + req, out := c.DescribeCertificatesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterParameterGroups = "DescribeDBClusterParameterGroups" + +// DescribeDBClusterParameterGroupsRequest generates a request for the DescribeDBClusterParameterGroups operation. +func (c *RDS) DescribeDBClusterParameterGroupsRequest(input *DescribeDBClusterParameterGroupsInput) (req *request.Request, output *DescribeDBClusterParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName +// parameter is specified, the list will contain only the description of the +// specified DB cluster parameter group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterParameterGroups(input *DescribeDBClusterParameterGroupsInput) (*DescribeDBClusterParameterGroupsOutput, error) { + req, out := c.DescribeDBClusterParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterParameters = "DescribeDBClusterParameters" + +// DescribeDBClusterParametersRequest generates a request for the DescribeDBClusterParameters operation. +func (c *RDS) DescribeDBClusterParametersRequest(input *DescribeDBClusterParametersInput) (req *request.Request, output *DescribeDBClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterParametersOutput{} + req.Data = output + return +} + +// Returns the detailed parameter list for a particular DB cluster parameter +// group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterParameters(input *DescribeDBClusterParametersInput) (*DescribeDBClusterParametersOutput, error) { + req, out := c.DescribeDBClusterParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterSnapshots = "DescribeDBClusterSnapshots" + +// DescribeDBClusterSnapshotsRequest generates a request for the DescribeDBClusterSnapshots operation. +func (c *RDS) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapshotsInput) (req *request.Request, output *DescribeDBClusterSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterSnapshotsOutput{} + req.Data = output + return +} + +// Returns information about DB cluster snapshots. This API supports pagination. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterSnapshots(input *DescribeDBClusterSnapshotsInput) (*DescribeDBClusterSnapshotsOutput, error) { + req, out := c.DescribeDBClusterSnapshotsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusters = "DescribeDBClusters" + +// DescribeDBClustersRequest generates a request for the DescribeDBClusters operation. +func (c *RDS) DescribeDBClustersRequest(input *DescribeDBClustersInput) (req *request.Request, output *DescribeDBClustersOutput) { + op := &request.Operation{ + Name: opDescribeDBClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClustersOutput{} + req.Data = output + return +} + +// Returns information about provisioned Aurora DB clusters. This API supports +// pagination. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusters(input *DescribeDBClustersInput) (*DescribeDBClustersOutput, error) { + req, out := c.DescribeDBClustersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBEngineVersions = "DescribeDBEngineVersions" + +// DescribeDBEngineVersionsRequest generates a request for the DescribeDBEngineVersions operation. +func (c *RDS) DescribeDBEngineVersionsRequest(input *DescribeDBEngineVersionsInput) (req *request.Request, output *DescribeDBEngineVersionsOutput) { + op := &request.Operation{ + Name: opDescribeDBEngineVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBEngineVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBEngineVersionsOutput{} + req.Data = output + return +} + +// Returns a list of the available DB engines. +func (c *RDS) DescribeDBEngineVersions(input *DescribeDBEngineVersionsInput) (*DescribeDBEngineVersionsOutput, error) { + req, out := c.DescribeDBEngineVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBEngineVersionsPages(input *DescribeDBEngineVersionsInput, fn func(p *DescribeDBEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBEngineVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBEngineVersionsOutput), lastPage) + }) +} + +const opDescribeDBInstances = "DescribeDBInstances" + +// DescribeDBInstancesRequest generates a request for the DescribeDBInstances operation. +func (c *RDS) DescribeDBInstancesRequest(input *DescribeDBInstancesInput) (req *request.Request, output *DescribeDBInstancesOutput) { + op := &request.Operation{ + Name: opDescribeDBInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBInstancesOutput{} + req.Data = output + return +} + +// Returns information about provisioned RDS instances. This API supports pagination. +func (c *RDS) DescribeDBInstances(input *DescribeDBInstancesInput) (*DescribeDBInstancesOutput, error) { + req, out := c.DescribeDBInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBInstancesPages(input *DescribeDBInstancesInput, fn func(p *DescribeDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBInstancesOutput), lastPage) + }) +} + +const opDescribeDBLogFiles = "DescribeDBLogFiles" + +// DescribeDBLogFilesRequest generates a request for the DescribeDBLogFiles operation. +func (c *RDS) DescribeDBLogFilesRequest(input *DescribeDBLogFilesInput) (req *request.Request, output *DescribeDBLogFilesOutput) { + op := &request.Operation{ + Name: opDescribeDBLogFiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBLogFilesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBLogFilesOutput{} + req.Data = output + return +} + +// Returns a list of DB log files for the DB instance. +func (c *RDS) DescribeDBLogFiles(input *DescribeDBLogFilesInput) (*DescribeDBLogFilesOutput, error) { + req, out := c.DescribeDBLogFilesRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBLogFilesPages(input *DescribeDBLogFilesInput, fn func(p *DescribeDBLogFilesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBLogFilesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBLogFilesOutput), lastPage) + }) +} + +const opDescribeDBParameterGroups = "DescribeDBParameterGroups" + +// DescribeDBParameterGroupsRequest generates a request for the DescribeDBParameterGroups operation. +func (c *RDS) DescribeDBParameterGroupsRequest(input *DescribeDBParameterGroupsInput) (req *request.Request, output *DescribeDBParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName +// is specified, the list will contain only the description of the specified +// DB parameter group. +func (c *RDS) DescribeDBParameterGroups(input *DescribeDBParameterGroupsInput) (*DescribeDBParameterGroupsOutput, error) { + req, out := c.DescribeDBParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBParameterGroupsPages(input *DescribeDBParameterGroupsInput, fn func(p *DescribeDBParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBParameterGroupsOutput), lastPage) + }) +} + +const opDescribeDBParameters = "DescribeDBParameters" + +// DescribeDBParametersRequest generates a request for the DescribeDBParameters operation. +func (c *RDS) DescribeDBParametersRequest(input *DescribeDBParametersInput) (req *request.Request, output *DescribeDBParametersOutput) { + op := &request.Operation{ + Name: opDescribeDBParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBParametersOutput{} + req.Data = output + return +} + +// Returns the detailed parameter list for a particular DB parameter group. +func (c *RDS) DescribeDBParameters(input *DescribeDBParametersInput) (*DescribeDBParametersOutput, error) { + req, out := c.DescribeDBParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBParametersPages(input *DescribeDBParametersInput, fn func(p *DescribeDBParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBParametersOutput), lastPage) + }) +} + +const opDescribeDBSecurityGroups = "DescribeDBSecurityGroups" + +// DescribeDBSecurityGroupsRequest generates a request for the DescribeDBSecurityGroups operation. +func (c *RDS) DescribeDBSecurityGroupsRequest(input *DescribeDBSecurityGroupsInput) (req *request.Request, output *DescribeDBSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName +// is specified, the list will contain only the descriptions of the specified +// DB security group. +func (c *RDS) DescribeDBSecurityGroups(input *DescribeDBSecurityGroupsInput) (*DescribeDBSecurityGroupsOutput, error) { + req, out := c.DescribeDBSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBSecurityGroupsPages(input *DescribeDBSecurityGroupsInput, fn func(p *DescribeDBSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeDBSnapshotAttributes = "DescribeDBSnapshotAttributes" + +// DescribeDBSnapshotAttributesRequest generates a request for the DescribeDBSnapshotAttributes operation. +func (c *RDS) DescribeDBSnapshotAttributesRequest(input *DescribeDBSnapshotAttributesInput) (req *request.Request, output *DescribeDBSnapshotAttributesOutput) { + op := &request.Operation{ + Name: opDescribeDBSnapshotAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBSnapshotAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSnapshotAttributesOutput{} + req.Data = output + return +} + +// Returns a list of DB snapshot attribute names and values for a manual DB +// snapshot. +// +// When sharing snapshots with other AWS accounts, DescribeDBSnapshotAttributes +// returns the restore attribute and a list of the AWS account ids that are +// authorized to copy or restore the manual DB snapshot. If all is included +// in the list of values for the restore attribute, then the manual DB snapshot +// is public and can be copied or restored by all AWS accounts. +// +// To add or remove access for an AWS account to copy or restore a manual DB +// snapshot, or to make the manual DB snapshot public or private, use the ModifyDBSnapshotAttribute +// API. +func (c *RDS) DescribeDBSnapshotAttributes(input *DescribeDBSnapshotAttributesInput) (*DescribeDBSnapshotAttributesOutput, error) { + req, out := c.DescribeDBSnapshotAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBSnapshots = "DescribeDBSnapshots" + +// DescribeDBSnapshotsRequest generates a request for the DescribeDBSnapshots operation. +func (c *RDS) DescribeDBSnapshotsRequest(input *DescribeDBSnapshotsInput) (req *request.Request, output *DescribeDBSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeDBSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSnapshotsOutput{} + req.Data = output + return +} + +// Returns information about DB snapshots. This API supports pagination. +func (c *RDS) DescribeDBSnapshots(input *DescribeDBSnapshotsInput) (*DescribeDBSnapshotsOutput, error) { + req, out := c.DescribeDBSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBSnapshotsPages(input *DescribeDBSnapshotsInput, fn func(p *DescribeDBSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSnapshotsOutput), lastPage) + }) +} + +const opDescribeDBSubnetGroups = "DescribeDBSubnetGroups" + +// DescribeDBSubnetGroupsRequest generates a request for the DescribeDBSubnetGroups operation. +func (c *RDS) DescribeDBSubnetGroupsRequest(input *DescribeDBSubnetGroupsInput) (req *request.Request, output *DescribeDBSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSubnetGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, +// the list will contain only the descriptions of the specified DBSubnetGroup. +// +// For an overview of CIDR ranges, go to the Wikipedia Tutorial (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +func (c *RDS) DescribeDBSubnetGroups(input *DescribeDBSubnetGroupsInput) (*DescribeDBSubnetGroupsOutput, error) { + req, out := c.DescribeDBSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBSubnetGroupsPages(input *DescribeDBSubnetGroupsInput, fn func(p *DescribeDBSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeEngineDefaultClusterParameters = "DescribeEngineDefaultClusterParameters" + +// DescribeEngineDefaultClusterParametersRequest generates a request for the DescribeEngineDefaultClusterParameters operation. +func (c *RDS) DescribeEngineDefaultClusterParametersRequest(input *DescribeEngineDefaultClusterParametersInput) (req *request.Request, output *DescribeEngineDefaultClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEngineDefaultClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultClusterParametersOutput{} + req.Data = output + return +} + +// Returns the default engine and system parameter information for the cluster +// database engine. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeEngineDefaultClusterParameters(input *DescribeEngineDefaultClusterParametersInput) (*DescribeEngineDefaultClusterParametersOutput, error) { + req, out := c.DescribeEngineDefaultClusterParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" + +// DescribeEngineDefaultParametersRequest generates a request for the DescribeEngineDefaultParameters operation. +func (c *RDS) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"EngineDefaults.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEngineDefaultParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultParametersOutput{} + req.Data = output + return +} + +// Returns the default engine and system parameter information for the specified +// database engine. +func (c *RDS) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEngineDefaultParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEngineDefaultParametersOutput), lastPage) + }) +} + +const opDescribeEventCategories = "DescribeEventCategories" + +// DescribeEventCategoriesRequest generates a request for the DescribeEventCategories operation. +func (c *RDS) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { + op := &request.Operation{ + Name: opDescribeEventCategories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventCategoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventCategoriesOutput{} + req.Data = output + return +} + +// Displays a list of categories for all event source types, or, if specified, +// for a specified source type. You can see a list of the event categories and +// source types in the Events (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// topic in the Amazon RDS User Guide. +func (c *RDS) DescribeEventCategories(input *DescribeEventCategoriesInput) (*DescribeEventCategoriesOutput, error) { + req, out := c.DescribeEventCategoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEventSubscriptions = "DescribeEventSubscriptions" + +// DescribeEventSubscriptionsRequest generates a request for the DescribeEventSubscriptions operation. +func (c *RDS) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeEventSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventSubscriptionsOutput{} + req.Data = output + return +} + +// Lists all the subscription descriptions for a customer account. The description +// for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, +// SourceID, CreationTime, and Status. +// +// If you specify a SubscriptionName, lists the description for that subscription. +func (c *RDS) DescribeEventSubscriptions(input *DescribeEventSubscriptionsInput) (*DescribeEventSubscriptionsOutput, error) { + req, out := c.DescribeEventSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventSubscriptionsOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a request for the DescribeEvents operation. +func (c *RDS) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns events related to DB instances, DB security groups, DB snapshots, +// and DB parameter groups for the past 14 days. Events specific to a particular +// DB instance, DB security group, database snapshot, or DB parameter group +// can be obtained by providing the name as a parameter. By default, the past +// hour of events are returned. +func (c *RDS) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeOptionGroupOptions = "DescribeOptionGroupOptions" + +// DescribeOptionGroupOptionsRequest generates a request for the DescribeOptionGroupOptions operation. +func (c *RDS) DescribeOptionGroupOptionsRequest(input *DescribeOptionGroupOptionsInput) (req *request.Request, output *DescribeOptionGroupOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOptionGroupOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOptionGroupOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOptionGroupOptionsOutput{} + req.Data = output + return +} + +// Describes all available options. +func (c *RDS) DescribeOptionGroupOptions(input *DescribeOptionGroupOptionsInput) (*DescribeOptionGroupOptionsOutput, error) { + req, out := c.DescribeOptionGroupOptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeOptionGroupOptionsPages(input *DescribeOptionGroupOptionsInput, fn func(p *DescribeOptionGroupOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOptionGroupOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOptionGroupOptionsOutput), lastPage) + }) +} + +const opDescribeOptionGroups = "DescribeOptionGroups" + +// DescribeOptionGroupsRequest generates a request for the DescribeOptionGroups operation. +func (c *RDS) DescribeOptionGroupsRequest(input *DescribeOptionGroupsInput) (req *request.Request, output *DescribeOptionGroupsOutput) { + op := &request.Operation{ + Name: opDescribeOptionGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOptionGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOptionGroupsOutput{} + req.Data = output + return +} + +// Describes the available option groups. +func (c *RDS) DescribeOptionGroups(input *DescribeOptionGroupsInput) (*DescribeOptionGroupsOutput, error) { + req, out := c.DescribeOptionGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeOptionGroupsPages(input *DescribeOptionGroupsInput, fn func(p *DescribeOptionGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOptionGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOptionGroupsOutput), lastPage) + }) +} + +const opDescribeOrderableDBInstanceOptions = "DescribeOrderableDBInstanceOptions" + +// DescribeOrderableDBInstanceOptionsRequest generates a request for the DescribeOrderableDBInstanceOptions operation. +func (c *RDS) DescribeOrderableDBInstanceOptionsRequest(input *DescribeOrderableDBInstanceOptionsInput) (req *request.Request, output *DescribeOrderableDBInstanceOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOrderableDBInstanceOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOrderableDBInstanceOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOrderableDBInstanceOptionsOutput{} + req.Data = output + return +} + +// Returns a list of orderable DB instance options for the specified engine. +func (c *RDS) DescribeOrderableDBInstanceOptions(input *DescribeOrderableDBInstanceOptionsInput) (*DescribeOrderableDBInstanceOptionsOutput, error) { + req, out := c.DescribeOrderableDBInstanceOptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeOrderableDBInstanceOptionsPages(input *DescribeOrderableDBInstanceOptionsInput, fn func(p *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOrderableDBInstanceOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOrderableDBInstanceOptionsOutput), lastPage) + }) +} + +const opDescribePendingMaintenanceActions = "DescribePendingMaintenanceActions" + +// DescribePendingMaintenanceActionsRequest generates a request for the DescribePendingMaintenanceActions operation. +func (c *RDS) DescribePendingMaintenanceActionsRequest(input *DescribePendingMaintenanceActionsInput) (req *request.Request, output *DescribePendingMaintenanceActionsOutput) { + op := &request.Operation{ + Name: opDescribePendingMaintenanceActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePendingMaintenanceActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePendingMaintenanceActionsOutput{} + req.Data = output + return +} + +// Returns a list of resources (for example, DB instances) that have at least +// one pending maintenance action. +func (c *RDS) DescribePendingMaintenanceActions(input *DescribePendingMaintenanceActionsInput) (*DescribePendingMaintenanceActionsOutput, error) { + req, out := c.DescribePendingMaintenanceActionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedDBInstances = "DescribeReservedDBInstances" + +// DescribeReservedDBInstancesRequest generates a request for the DescribeReservedDBInstances operation. +func (c *RDS) DescribeReservedDBInstancesRequest(input *DescribeReservedDBInstancesInput) (req *request.Request, output *DescribeReservedDBInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedDBInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedDBInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedDBInstancesOutput{} + req.Data = output + return +} + +// Returns information about reserved DB instances for this account, or about +// a specified reserved DB instance. +func (c *RDS) DescribeReservedDBInstances(input *DescribeReservedDBInstancesInput) (*DescribeReservedDBInstancesOutput, error) { + req, out := c.DescribeReservedDBInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeReservedDBInstancesPages(input *DescribeReservedDBInstancesInput, fn func(p *DescribeReservedDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedDBInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedDBInstancesOutput), lastPage) + }) +} + +const opDescribeReservedDBInstancesOfferings = "DescribeReservedDBInstancesOfferings" + +// DescribeReservedDBInstancesOfferingsRequest generates a request for the DescribeReservedDBInstancesOfferings operation. +func (c *RDS) DescribeReservedDBInstancesOfferingsRequest(input *DescribeReservedDBInstancesOfferingsInput) (req *request.Request, output *DescribeReservedDBInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedDBInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedDBInstancesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedDBInstancesOfferingsOutput{} + req.Data = output + return +} + +// Lists available reserved DB instance offerings. +func (c *RDS) DescribeReservedDBInstancesOfferings(input *DescribeReservedDBInstancesOfferingsInput) (*DescribeReservedDBInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedDBInstancesOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeReservedDBInstancesOfferingsPages(input *DescribeReservedDBInstancesOfferingsInput, fn func(p *DescribeReservedDBInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedDBInstancesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedDBInstancesOfferingsOutput), lastPage) + }) +} + +const opDownloadDBLogFilePortion = "DownloadDBLogFilePortion" + +// DownloadDBLogFilePortionRequest generates a request for the DownloadDBLogFilePortion operation. +func (c *RDS) DownloadDBLogFilePortionRequest(input *DownloadDBLogFilePortionInput) (req *request.Request, output *DownloadDBLogFilePortionOutput) { + op := &request.Operation{ + Name: opDownloadDBLogFilePortion, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "NumberOfLines", + TruncationToken: "AdditionalDataPending", + }, + } + + if input == nil { + input = &DownloadDBLogFilePortionInput{} + } + + req = c.newRequest(op, input, output) + output = &DownloadDBLogFilePortionOutput{} + req.Data = output + return +} + +// Downloads all or a portion of the specified log file, up to 1 MB in size. +func (c *RDS) DownloadDBLogFilePortion(input *DownloadDBLogFilePortionInput) (*DownloadDBLogFilePortionOutput, error) { + req, out := c.DownloadDBLogFilePortionRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DownloadDBLogFilePortionPages(input *DownloadDBLogFilePortionInput, fn func(p *DownloadDBLogFilePortionOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DownloadDBLogFilePortionRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DownloadDBLogFilePortionOutput), lastPage) + }) +} + +const opFailoverDBCluster = "FailoverDBCluster" + +// FailoverDBClusterRequest generates a request for the FailoverDBCluster operation. +func (c *RDS) FailoverDBClusterRequest(input *FailoverDBClusterInput) (req *request.Request, output *FailoverDBClusterOutput) { + op := &request.Operation{ + Name: opFailoverDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &FailoverDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &FailoverDBClusterOutput{} + req.Data = output + return +} + +// Forces a failover for a DB cluster. +// +// A failover for a DB cluster promotes one of the read-only instances in the +// DB cluster to the master DB instance (the cluster writer) and deletes the +// current primary instance. +// +// Amazon Aurora will automatically fail over to a read-only instance, if one +// exists, when the primary instance fails. You can force a failover when you +// want to simulate a failure of a DB instance for testing. Because each instance +// in a DB cluster has its own endpoint address, you will need to clean up and +// re-establish any existing connections that use those endpoint addresses when +// the failover is complete. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) FailoverDBCluster(input *FailoverDBClusterInput) (*FailoverDBClusterOutput, error) { + req, out := c.FailoverDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *RDS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags on an Amazon RDS resource. +// +// For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBCluster = "ModifyDBCluster" + +// ModifyDBClusterRequest generates a request for the ModifyDBCluster operation. +func (c *RDS) ModifyDBClusterRequest(input *ModifyDBClusterInput) (req *request.Request, output *ModifyDBClusterOutput) { + op := &request.Operation{ + Name: opModifyDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBClusterOutput{} + req.Data = output + return +} + +// Modify a setting for an Amazon Aurora DB cluster. You can change one or more +// database configuration parameters by specifying these parameters and the +// new values in the request. For more information on Amazon Aurora, see Aurora +// on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) ModifyDBCluster(input *ModifyDBClusterInput) (*ModifyDBClusterOutput, error) { + req, out := c.ModifyDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBClusterParameterGroup = "ModifyDBClusterParameterGroup" + +// ModifyDBClusterParameterGroupRequest generates a request for the ModifyDBClusterParameterGroup operation. +func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB cluster parameter group. To modify more than +// one parameter, submit a list of the following: ParameterName, ParameterValue, +// and ApplyMethod. A maximum of 20 parameters can be modified in a single request. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +// +// Changes to dynamic parameters are applied immediately. Changes to static +// parameters require a reboot without failover to the DB cluster associated +// with the parameter group before the change can take effect. +// +// After you create a DB cluster parameter group, you should wait at least +// 5 minutes before creating your first DB cluster that uses that DB cluster +// parameter group as the default parameter group. This allows Amazon RDS to +// fully complete the create action before the parameter group is used as the +// default for a new DB cluster. This is especially important for parameters +// that are critical when creating the default database for a DB cluster, such +// as the character set for the default database defined by the character_set_database +// parameter. You can use the Parameter Groups option of the Amazon RDS console +// (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters +// command to verify that your DB cluster parameter group has been created or +// modified. +func (c *RDS) ModifyDBClusterParameterGroup(input *ModifyDBClusterParameterGroupInput) (*DBClusterParameterGroupNameMessage, error) { + req, out := c.ModifyDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBInstance = "ModifyDBInstance" + +// ModifyDBInstanceRequest generates a request for the ModifyDBInstance operation. +func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *request.Request, output *ModifyDBInstanceOutput) { + op := &request.Operation{ + Name: opModifyDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBInstanceOutput{} + req.Data = output + return +} + +// Modify settings for a DB instance. You can change one or more database configuration +// parameters by specifying these parameters and the new values in the request. +func (c *RDS) ModifyDBInstance(input *ModifyDBInstanceInput) (*ModifyDBInstanceOutput, error) { + req, out := c.ModifyDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBParameterGroup = "ModifyDBParameterGroup" + +// ModifyDBParameterGroupRequest generates a request for the ModifyDBParameterGroup operation. +func (c *RDS) ModifyDBParameterGroupRequest(input *ModifyDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB parameter group. To modify more than one +// parameter, submit a list of the following: ParameterName, ParameterValue, +// and ApplyMethod. A maximum of 20 parameters can be modified in a single request. +// +// Changes to dynamic parameters are applied immediately. Changes to static +// parameters require a reboot without failover to the DB instance associated +// with the parameter group before the change can take effect. +// +// After you modify a DB parameter group, you should wait at least 5 minutes +// before creating your first DB instance that uses that DB parameter group +// as the default parameter group. This allows Amazon RDS to fully complete +// the modify action before the parameter group is used as the default for a +// new DB instance. This is especially important for parameters that are critical +// when creating the default database for a DB instance, such as the character +// set for the default database defined by the character_set_database parameter. +// You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) +// or the DescribeDBParameters command to verify that your DB parameter group +// has been created or modified. +func (c *RDS) ModifyDBParameterGroup(input *ModifyDBParameterGroupInput) (*DBParameterGroupNameMessage, error) { + req, out := c.ModifyDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBSnapshotAttribute = "ModifyDBSnapshotAttribute" + +// ModifyDBSnapshotAttributeRequest generates a request for the ModifyDBSnapshotAttribute operation. +func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeInput) (req *request.Request, output *ModifyDBSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifyDBSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBSnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds an attribute and values to, or removes an attribute and values from +// a manual DB snapshot. +// +// To share a manual DB snapshot with other AWS accounts, specify restore as +// the AttributeName and use the ValuesToAdd parameter to add a list of the +// AWS account ids that are authorized to retore the manual DB snapshot. Uses +// the value all to make the manual DB snapshot public and can by copied or +// restored by all AWS accounts. Do not add the all value for any manual DB +// snapshots that contain private information that you do not want to be available +// to all AWS accounts. +// +// To view which AWS accounts have access to copy or restore a manual DB snapshot, +// or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes +// API. +// +// If the manual DB snapshot is encrypted, it cannot be shared. +func (c *RDS) ModifyDBSnapshotAttribute(input *ModifyDBSnapshotAttributeInput) (*ModifyDBSnapshotAttributeOutput, error) { + req, out := c.ModifyDBSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBSubnetGroup = "ModifyDBSubnetGroup" + +// ModifyDBSubnetGroupRequest generates a request for the ModifyDBSubnetGroup operation. +func (c *RDS) ModifyDBSubnetGroupRequest(input *ModifyDBSubnetGroupInput) (req *request.Request, output *ModifyDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBSubnetGroupOutput{} + req.Data = output + return +} + +// Modifies an existing DB subnet group. DB subnet groups must contain at least +// one subnet in at least two AZs in the region. +func (c *RDS) ModifyDBSubnetGroup(input *ModifyDBSubnetGroupInput) (*ModifyDBSubnetGroupOutput, error) { + req, out := c.ModifyDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyEventSubscription = "ModifyEventSubscription" + +// ModifyEventSubscriptionRequest generates a request for the ModifyEventSubscription operation. +func (c *RDS) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { + op := &request.Operation{ + Name: opModifyEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyEventSubscriptionOutput{} + req.Data = output + return +} + +// Modifies an existing RDS event notification subscription. Note that you cannot +// modify the source identifiers using this call; to change source identifiers +// for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription +// calls. +// +// You can see a list of the event categories for a given SourceType in the +// Events (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// topic in the Amazon RDS User Guide or by using the DescribeEventCategories +// action. +func (c *RDS) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { + req, out := c.ModifyEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opModifyOptionGroup = "ModifyOptionGroup" + +// ModifyOptionGroupRequest generates a request for the ModifyOptionGroup operation. +func (c *RDS) ModifyOptionGroupRequest(input *ModifyOptionGroupInput) (req *request.Request, output *ModifyOptionGroupOutput) { + op := &request.Operation{ + Name: opModifyOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyOptionGroupOutput{} + req.Data = output + return +} + +// Modifies an existing option group. +func (c *RDS) ModifyOptionGroup(input *ModifyOptionGroupInput) (*ModifyOptionGroupOutput, error) { + req, out := c.ModifyOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opPromoteReadReplica = "PromoteReadReplica" + +// PromoteReadReplicaRequest generates a request for the PromoteReadReplica operation. +func (c *RDS) PromoteReadReplicaRequest(input *PromoteReadReplicaInput) (req *request.Request, output *PromoteReadReplicaOutput) { + op := &request.Operation{ + Name: opPromoteReadReplica, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PromoteReadReplicaInput{} + } + + req = c.newRequest(op, input, output) + output = &PromoteReadReplicaOutput{} + req.Data = output + return +} + +// Promotes a Read Replica DB instance to a standalone DB instance. +// +// We recommend that you enable automated backups on your Read Replica before +// promoting the Read Replica. This ensures that no backup is taken during the +// promotion process. Once the instance is promoted to a primary instance, backups +// are taken based on your backup settings. +func (c *RDS) PromoteReadReplica(input *PromoteReadReplicaInput) (*PromoteReadReplicaOutput, error) { + req, out := c.PromoteReadReplicaRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedDBInstancesOffering = "PurchaseReservedDBInstancesOffering" + +// PurchaseReservedDBInstancesOfferingRequest generates a request for the PurchaseReservedDBInstancesOffering operation. +func (c *RDS) PurchaseReservedDBInstancesOfferingRequest(input *PurchaseReservedDBInstancesOfferingInput) (req *request.Request, output *PurchaseReservedDBInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedDBInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedDBInstancesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedDBInstancesOfferingOutput{} + req.Data = output + return +} + +// Purchases a reserved DB instance offering. +func (c *RDS) PurchaseReservedDBInstancesOffering(input *PurchaseReservedDBInstancesOfferingInput) (*PurchaseReservedDBInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedDBInstancesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootDBInstance = "RebootDBInstance" + +// RebootDBInstanceRequest generates a request for the RebootDBInstance operation. +func (c *RDS) RebootDBInstanceRequest(input *RebootDBInstanceInput) (req *request.Request, output *RebootDBInstanceOutput) { + op := &request.Operation{ + Name: opRebootDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootDBInstanceOutput{} + req.Data = output + return +} + +// Rebooting a DB instance restarts the database engine service. A reboot also +// applies to the DB instance any modifications to the associated DB parameter +// group that were pending. Rebooting a DB instance results in a momentary outage +// of the instance, during which the DB instance status is set to rebooting. +// If the RDS instance is configured for MultiAZ, it is possible that the reboot +// will be conducted through a failover. An Amazon RDS event is created when +// the reboot is completed. +// +// If your DB instance is deployed in multiple Availability Zones, you can +// force a failover from one AZ to the other during the reboot. You might force +// a failover to test the availability of your DB instance deployment or to +// restore operations to the original AZ after a failover occurs. +// +// The time required to reboot is a function of the specific database engine's +// crash recovery process. To improve the reboot time, we recommend that you +// reduce database activities as much as possible during the reboot process +// to reduce rollback activity for in-transit transactions. +func (c *RDS) RebootDBInstance(input *RebootDBInstanceInput) (*RebootDBInstanceOutput, error) { + req, out := c.RebootDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRemoveSourceIdentifierFromSubscription = "RemoveSourceIdentifierFromSubscription" + +// RemoveSourceIdentifierFromSubscriptionRequest generates a request for the RemoveSourceIdentifierFromSubscription operation. +func (c *RDS) RemoveSourceIdentifierFromSubscriptionRequest(input *RemoveSourceIdentifierFromSubscriptionInput) (req *request.Request, output *RemoveSourceIdentifierFromSubscriptionOutput) { + op := &request.Operation{ + Name: opRemoveSourceIdentifierFromSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveSourceIdentifierFromSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveSourceIdentifierFromSubscriptionOutput{} + req.Data = output + return +} + +// Removes a source identifier from an existing RDS event notification subscription. +func (c *RDS) RemoveSourceIdentifierFromSubscription(input *RemoveSourceIdentifierFromSubscriptionInput) (*RemoveSourceIdentifierFromSubscriptionOutput, error) { + req, out := c.RemoveSourceIdentifierFromSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a request for the RemoveTagsFromResource operation. +func (c *RDS) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes metadata tags from an Amazon RDS resource. +// +// For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetDBClusterParameterGroup = "ResetDBClusterParameterGroup" + +// ResetDBClusterParameterGroupRequest generates a request for the ResetDBClusterParameterGroup operation. +func (c *RDS) ResetDBClusterParameterGroupRequest(input *ResetDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB cluster parameter group to the default value. +// To reset specific parameters submit a list of the following: ParameterName +// and ApplyMethod. To reset the entire DB cluster parameter group, specify +// the DBClusterParameterGroupName and ResetAllParameters parameters. +// +// When resetting the entire group, dynamic parameters are updated immediately +// and static parameters are set to pending-reboot to take effect on the next +// DB instance restart or RebootDBInstance request. You must call RebootDBInstance +// for every DB instance in your DB cluster that you want the updated static +// parameter to apply to. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) ResetDBClusterParameterGroup(input *ResetDBClusterParameterGroupInput) (*DBClusterParameterGroupNameMessage, error) { + req, out := c.ResetDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opResetDBParameterGroup = "ResetDBParameterGroup" + +// ResetDBParameterGroupRequest generates a request for the ResetDBParameterGroup operation. +func (c *RDS) ResetDBParameterGroupRequest(input *ResetDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB parameter group to the engine/system default +// value. To reset specific parameters submit a list of the following: ParameterName +// and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup +// name and ResetAllParameters parameters. When resetting the entire group, +// dynamic parameters are updated immediately and static parameters are set +// to pending-reboot to take effect on the next DB instance restart or RebootDBInstance +// request. +func (c *RDS) ResetDBParameterGroup(input *ResetDBParameterGroupInput) (*DBParameterGroupNameMessage, error) { + req, out := c.ResetDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBClusterFromSnapshot = "RestoreDBClusterFromSnapshot" + +// RestoreDBClusterFromSnapshotRequest generates a request for the RestoreDBClusterFromSnapshot operation. +func (c *RDS) RestoreDBClusterFromSnapshotRequest(input *RestoreDBClusterFromSnapshotInput) (req *request.Request, output *RestoreDBClusterFromSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreDBClusterFromSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBClusterFromSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBClusterFromSnapshotOutput{} + req.Data = output + return +} + +// Creates a new DB cluster from a DB cluster snapshot. The target DB cluster +// is created from the source DB cluster restore point with the same configuration +// as the original source DB cluster, except that the new DB cluster is created +// with the default security group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) RestoreDBClusterFromSnapshot(input *RestoreDBClusterFromSnapshotInput) (*RestoreDBClusterFromSnapshotOutput, error) { + req, out := c.RestoreDBClusterFromSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBClusterToPointInTime = "RestoreDBClusterToPointInTime" + +// RestoreDBClusterToPointInTimeRequest generates a request for the RestoreDBClusterToPointInTime operation. +func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPointInTimeInput) (req *request.Request, output *RestoreDBClusterToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreDBClusterToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBClusterToPointInTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBClusterToPointInTimeOutput{} + req.Data = output + return +} + +// Restores a DB cluster to an arbitrary point in time. Users can restore to +// any point in time before LatestRestorableTime for up to BackupRetentionPeriod +// days. The target DB cluster is created from the source DB cluster with the +// same configuration as the original DB cluster, except that the new DB cluster +// is created with the default DB security group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) RestoreDBClusterToPointInTime(input *RestoreDBClusterToPointInTimeInput) (*RestoreDBClusterToPointInTimeOutput, error) { + req, out := c.RestoreDBClusterToPointInTimeRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBInstanceFromDBSnapshot = "RestoreDBInstanceFromDBSnapshot" + +// RestoreDBInstanceFromDBSnapshotRequest generates a request for the RestoreDBInstanceFromDBSnapshot operation. +func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFromDBSnapshotInput) (req *request.Request, output *RestoreDBInstanceFromDBSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreDBInstanceFromDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBInstanceFromDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBInstanceFromDBSnapshotOutput{} + req.Data = output + return +} + +// Creates a new DB instance from a DB snapshot. The target database is created +// from the source database restore point with the most of original configuration, +// but in a system chosen availability zone with the default security group, +// the default subnet group, and the default DB parameter group. By default, +// the new DB instance is created as a single-AZ deployment except when the +// instance is a SQL Server instance that has an option group that is associated +// with mirroring; in this case, the instance becomes a mirrored AZ deployment +// and not a single-AZ deployment. +// +// If your intent is to replace your original DB instance with the new, restored +// DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot +// action. RDS does not allow two DB instances with the same name. Once you +// have renamed your original DB instance with a different identifier, then +// you can pass the original name of the DB instance as the DBInstanceIdentifier +// in the call to the RestoreDBInstanceFromDBSnapshot action. The result is +// that you will replace the original DB instance with the DB instance created +// from the snapshot. +// +// If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier +// must be the ARN of the shared DB snapshot. +func (c *RDS) RestoreDBInstanceFromDBSnapshot(input *RestoreDBInstanceFromDBSnapshotInput) (*RestoreDBInstanceFromDBSnapshotOutput, error) { + req, out := c.RestoreDBInstanceFromDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBInstanceToPointInTime = "RestoreDBInstanceToPointInTime" + +// RestoreDBInstanceToPointInTimeRequest generates a request for the RestoreDBInstanceToPointInTime operation. +func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPointInTimeInput) (req *request.Request, output *RestoreDBInstanceToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreDBInstanceToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBInstanceToPointInTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBInstanceToPointInTimeOutput{} + req.Data = output + return +} + +// Restores a DB instance to an arbitrary point-in-time. Users can restore to +// any point in time before the LatestRestorableTime for up to BackupRetentionPeriod +// days. The target database is created with the most of original configuration, +// but in a system chosen availability zone with the default security group, +// the default subnet group, and the default DB parameter group. By default, +// the new DB instance is created as a single-AZ deployment except when the +// instance is a SQL Server instance that has an option group that is associated +// with mirroring; in this case, the instance becomes a mirrored deployment +// and not a single-AZ deployment. +func (c *RDS) RestoreDBInstanceToPointInTime(input *RestoreDBInstanceToPointInTimeInput) (*RestoreDBInstanceToPointInTimeOutput, error) { + req, out := c.RestoreDBInstanceToPointInTimeRequest(input) + err := req.Send() + return out, err +} + +const opRevokeDBSecurityGroupIngress = "RevokeDBSecurityGroupIngress" + +// RevokeDBSecurityGroupIngressRequest generates a request for the RevokeDBSecurityGroupIngress operation. +func (c *RDS) RevokeDBSecurityGroupIngressRequest(input *RevokeDBSecurityGroupIngressInput) (req *request.Request, output *RevokeDBSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeDBSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeDBSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeDBSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Revokes ingress from a DBSecurityGroup for previously authorized IP ranges +// or EC2 or VPC Security Groups. Required parameters for this API are one of +// CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either +// EC2SecurityGroupName or EC2SecurityGroupId). +func (c *RDS) RevokeDBSecurityGroupIngress(input *RevokeDBSecurityGroupIngressInput) (*RevokeDBSecurityGroupIngressOutput, error) { + req, out := c.RevokeDBSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +// Describes a quota for an AWS account, for example, the number of DB instances +// allowed. +type AccountQuota struct { + _ struct{} `type:"structure"` + + // The name of the Amazon RDS quota for this AWS account. + AccountQuotaName *string `type:"string"` + + // The maximum allowed value for the quota. + Max *int64 `type:"long"` + + // The amount currently used toward the quota maximum. + Used *int64 `type:"long"` +} + +// String returns the string representation +func (s AccountQuota) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountQuota) GoString() string { + return s.String() +} + +type AddSourceIdentifierToSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The identifier of the event source to be added. An identifier must begin + // with a letter and must contain only ASCII letters, digits, and hyphens; it + // cannot end with a hyphen or contain two consecutive hyphens. + // + // Constraints: + // + // If the source type is a DB instance, then a DBInstanceIdentifier must be + // supplied. If the source type is a DB security group, a DBSecurityGroupName + // must be supplied. If the source type is a DB parameter group, a DBParameterGroupName + // must be supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier + // must be supplied. + SourceIdentifier *string `type:"string" required:"true"` + + // The name of the RDS event notification subscription you want to add a source + // identifier to. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddSourceIdentifierToSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddSourceIdentifierToSubscriptionInput) GoString() string { + return s.String() +} + +type AddSourceIdentifierToSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s AddSourceIdentifierToSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddSourceIdentifierToSubscriptionOutput) GoString() string { + return s.String() +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS resource the tags will be added to. This value is an Amazon + // Resource Name (ARN). For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` + + // The tags to be assigned to the Amazon RDS resource. + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +type ApplyPendingMaintenanceActionInput struct { + _ struct{} `type:"structure"` + + // The pending maintenance action to apply to this resource. + ApplyAction *string `type:"string" required:"true"` + + // A value that specifies the type of opt-in request, or undoes an opt-in request. + // An opt-in request of type immediate cannot be undone. + // + // Valid values: + // + // immediate - Apply the maintenance action immediately. next-maintenance + // - Apply the maintenance action during the next maintenance window for the + // resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. + OptInType *string `type:"string" required:"true"` + + // The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance + // action applies to. For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ApplyPendingMaintenanceActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyPendingMaintenanceActionInput) GoString() string { + return s.String() +} + +type ApplyPendingMaintenanceActionOutput struct { + _ struct{} `type:"structure"` + + // Describes the pending maintenance actions for a resource. + ResourcePendingMaintenanceActions *ResourcePendingMaintenanceActions `type:"structure"` +} + +// String returns the string representation +func (s ApplyPendingMaintenanceActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyPendingMaintenanceActionOutput) GoString() string { + return s.String() +} + +type AuthorizeDBSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to authorize. + CIDRIP *string `type:"string"` + + // The name of the DB security group to add authorization to. + DBSecurityGroupName *string `type:"string" required:"true"` + + // Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId + // must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName + // or EC2SecurityGroupId must be provided. + EC2SecurityGroupId *string `type:"string"` + + // Name of the EC2 security group to authorize. For VPC DB security groups, + // EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and + // either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupName *string `type:"string"` + + // AWS account number of the owner of the EC2 security group specified in the + // EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, + // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId + // must be provided. + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s AuthorizeDBSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeDBSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeDBSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup + // RevokeDBSecurityGroupIngress This data type is used as a response element + // in the DescribeDBSecurityGroups action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeDBSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeDBSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Contains Availability Zone information. +// +// This data type is used as an element in the following data type: OrderableDBInstanceOption +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the availability zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// A CA certificate for an AWS account. +type Certificate struct { + _ struct{} `type:"structure"` + + // The unique key that identifies a certificate. + CertificateIdentifier *string `type:"string"` + + // The type of the certificate. + CertificateType *string `type:"string"` + + // The thumbprint of the certificate. + Thumbprint *string `type:"string"` + + // The starting date from which the certificate is valid. + ValidFrom *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The final date that the certificate continues to be valid. + ValidTill *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Certificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Certificate) GoString() string { + return s.String() +} + +// This data type is used as a response element in the action DescribeDBEngineVersions. +type CharacterSet struct { + _ struct{} `type:"structure"` + + // The description of the character set. + CharacterSetDescription *string `type:"string"` + + // The name of the character set. + CharacterSetName *string `type:"string"` +} + +// String returns the string representation +func (s CharacterSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CharacterSet) GoString() string { + return s.String() +} + +type CopyDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster snapshot to copy. This parameter is not + // case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster-snapshot1 + SourceDBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the new DB cluster snapshot to create from the source DB + // cluster snapshot. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster-snapshot2 + TargetDBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterSnapshotInput) GoString() string { + return s.String() +} + +type CopyDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is + // used as a response element in the DescribeDBClusterSnapshots action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CopyDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The identifier or ARN for the source DB parameter group. For information + // about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN) + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // + // Constraints: + // + // Must specify a valid DB parameter group. If the source DB parameter group + // is in the same region as the copy, specify a valid DB parameter group identifier, + // for example my-db-param-group, or a valid ARN. If the source DB parameter + // group is in a different region than the copy, specify a valid DB parameter + // group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters. + SourceDBParameterGroupIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A description for the copied DB parameter group. + TargetDBParameterGroupDescription *string `type:"string" required:"true"` + + // The identifier for the copied DB parameter group. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-db-parameter-group + TargetDBParameterGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBParameterGroupInput) GoString() string { + return s.String() +} + +type CopyDBParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBParameterGroup + // action, and as a response element in the DescribeDBParameterGroups action. + DBParameterGroup *DBParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CopyDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBParameterGroupOutput) GoString() string { + return s.String() +} + +type CopyDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // True to copy all tags from the source DB snapshot to the target DB snapshot; + // otherwise false. The default is false. + CopyTags *bool `type:"boolean"` + + // The identifier for the source DB snapshot. + // + // If you are copying from a shared manual DB snapshot, this must be the ARN + // of the shared DB snapshot. + // + // Constraints: + // + // Must specify a valid system snapshot in the "available" state. If the source + // snapshot is in the same region as the copy, specify a valid DB snapshot identifier. + // If the source snapshot is in a different region than the copy, specify a + // valid DB snapshot ARN. For more information, go to Copying a DB Snapshot + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html). + // Example: rds:mydb-2012-04-02-00-01 + // + // Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805 + SourceDBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier for the copied snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-db-snapshot + TargetDBSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBSnapshotInput) GoString() string { + return s.String() +} + +type CopyDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response + // element in the DescribeDBSnapshots action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBSnapshotOutput) GoString() string { + return s.String() +} + +type CopyOptionGroupInput struct { + _ struct{} `type:"structure"` + + // The identifier or ARN for the source option group. For information about + // creating an ARN, see Constructing an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // + // Constraints: + // + // Must specify a valid option group. If the source option group is in the + // same region as the copy, specify a valid option group identifier, for example + // my-option-group, or a valid ARN. If the source option group is in a different + // region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options. + SourceOptionGroupIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The description for the copied option group. + TargetOptionGroupDescription *string `type:"string" required:"true"` + + // The identifier for the copied option group. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-option-group + TargetOptionGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyOptionGroupInput) GoString() string { + return s.String() +} + +type CopyOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s CopyOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyOptionGroupOutput) GoString() string { + return s.String() +} + +type CreateDBClusterInput struct { + _ struct{} `type:"structure"` + + // A list of EC2 Availability Zones that instances in the DB cluster can be + // created in. For information on regions and Availability Zones, see Regions + // and Availability Zones (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // The number of days for which automated backups are retained. You must specify + // a minimum value of 1. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 1 to 35 + BackupRetentionPeriod *int64 `type:"integer"` + + // A value that indicates that the DB cluster should be associated with the + // specified CharacterSet. + CharacterSetName *string `type:"string"` + + // The DB cluster identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster1 + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB cluster parameter group to associate with this DB cluster. + // If this argument is omitted, default.aurora5.6 for the specified engine will + // be used. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string"` + + // A DB subnet group to associate with this DB cluster. + DBSubnetGroupName *string `type:"string"` + + // The name for your database of up to 8 alpha-numeric characters. If you do + // not provide a name, Amazon RDS will not create a database in the DB cluster + // you are creating. + DatabaseName *string `type:"string"` + + // The name of the database engine to be used for this DB cluster. + // + // Valid Values: aurora + Engine *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // Aurora + // + // Example: 5.6.10a + EngineVersion *string `type:"string"` + + // The KMS key identifier for an encrypted DB cluster. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are creating a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KM encryption key. + // + // If the StorageEncrypted parameter is true, and you do not specify a value + // for the KmsKeyId parameter, then Amazon RDS will use your default encryption + // key. AWS KMS creates the default encryption key for your AWS account. Your + // AWS account has a different default encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // The password for the master database user. This password can contain any + // printable ASCII character except "/", """, or "@". + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string" required:"true"` + + // The name of the master user for the client DB cluster. + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string" required:"true"` + + // A value that indicates that the DB cluster should be associated with the + // specified option group. + // + // Permanent options cannot be removed from an option group. The option group + // cannot be removed from a DB cluster once it is associated with a DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the instances in the DB cluster accept connections. + // + // Default: 3306 + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of EC2 VPC security groups to associate with this DB cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterInput) GoString() string { + return s.String() +} + +type CreateDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterOutput) GoString() string { + return s.String() +} + +type CreateDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens This value is + // stored as a lowercase string. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // The DB cluster parameter group family name. A DB cluster parameter group + // can be associated with one and only one DB cluster parameter group family, + // and can be applied only to a DB cluster running a database engine and engine + // version compatible with that DB cluster parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // The description for the DB cluster parameter group. + Description *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type CreateDBClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBClusterParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBClusterParameterGroup + // action, and as a response element in the DescribeDBClusterParameterGroups + // action. + DBClusterParameterGroup *DBClusterParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type CreateDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster to create a snapshot for. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster1 + DBClusterIdentifier *string `type:"string" required:"true"` + + // The identifier of the DB cluster snapshot. This parameter is stored as a + // lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster1-snapshot1 + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // The tags to be assigned to the DB cluster snapshot. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterSnapshotInput) GoString() string { + return s.String() +} + +type CreateDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is + // used as a response element in the DescribeDBClusterSnapshots action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) to be initially allocated for the database + // instance. + // + // Type: Integer + // + // MySQL + // + // Constraints: Must be an integer from 5 to 6144. + // + // MariaDB + // + // Constraints: Must be an integer from 5 to 6144. + // + // PostgreSQL + // + // Constraints: Must be an integer from 5 to 6144. + // + // Oracle + // + // Constraints: Must be an integer from 10 to 6144. + // + // SQL Server + // + // Constraints: Must be an integer from 200 to 4096 (Standard Edition and + // Enterprise Edition) or from 20 to 4096 (Express Edition and Web Edition) + AllocatedStorage *int64 `type:"integer"` + + // Indicates that minor engine upgrades will be applied automatically to the + // DB instance during the maintenance window. + // + // Default: true + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // For information on regions and Availability Zones, see Regions and Availability + // Zones (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + // + // Default: A random, system-chosen Availability Zone in the endpoint's region. + // + // Example: us-east-1d + // + // Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ + // parameter is set to true. The specified Availability Zone must be in the + // same region as the current endpoint. + AvailabilityZone *string `type:"string"` + + // The number of days for which automated backups are retained. Setting this + // parameter to a positive number enables backups. Setting this parameter to + // 0 disables automated backups. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 0 to 35 Cannot be set to 0 if the DB instance is a + // source to Read Replicas + BackupRetentionPeriod *int64 `type:"integer"` + + // For supported engines, indicates that the DB instance should be associated + // with the specified CharacterSet. + CharacterSetName *string `type:"string"` + + // True to copy all tags from the DB instance to snapshots of the DB instance; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The identifier of the DB cluster that the instance will belong to. + // + // For information on creating a DB cluster, see CreateDBCluster. + // + // Type: String + DBClusterIdentifier *string `type:"string"` + + // The compute and memory capacity of the DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + // db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium + // | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge + // | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge + // | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small + // | db.t2.medium | db.t2.large + DBInstanceClass *string `type:"string" required:"true"` + + // The DB instance identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for + // SQL Server). First character must be a letter. Cannot end with a hyphen or + // contain two consecutive hyphens. Example: mydbinstance + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The meaning of this parameter differs according to the database engine you + // use. + // + // Type: String + // + // MySQL + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, no database is created in the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved + // by the specified database engine MariaDB + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, no database is created in the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved + // by the specified database engine PostgreSQL + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, the default "postgres" database is created in + // the DB instance. + // + // Constraints: + // + // Must contain 1 to 63 alphanumeric characters Must begin with a letter or + // an underscore. Subsequent characters can be letters, underscores, or digits + // (0-9). Cannot be a word reserved by the specified database engine Oracle + // + // The Oracle System ID (SID) of the created DB instance. + // + // Default: ORCL + // + // Constraints: + // + // Cannot be longer than 8 characters SQL Server + // + // Not applicable. Must be null. + // + // Amazon Aurora + // + // The name of the database to create when the primary instance of the DB cluster + // is created. If this parameter is not specified, no database is created in + // the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved + // by the specified database engine + DBName *string `type:"string"` + + // The name of the DB parameter group to associate with this DB instance. If + // this argument is omitted, the default DBParameterGroup for the specified + // engine will be used. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string"` + + // A list of DB security groups to associate with this DB instance. + // + // Default: The default DB security group for the database engine. + DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // A DB subnet group to associate with this DB instance. + // + // If there is no DB subnet group, then it is a non-VPC DB instance. + DBSubnetGroupName *string `type:"string"` + + // The name of the database engine to be used for this instance. + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + // + // Not every database engine is available for every AWS region. + Engine *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // The following are the database engines and major and minor versions that + // are available with Amazon RDS. Not every database engine is available for + // every AWS region. + // + // MySQL + // + // Version 5.1 (Only available in the following regions: ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.1.73a | 5.1.73b + // Version 5.5 (Only available in the following regions: ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.5.40 | 5.5.40a + // Version 5.5 (Available in all regions): 5.5.40b | 5.5.41 | 5.5.42 Version + // 5.6 (Available in all regions): 5.6.19a | 5.6.19b | 5.6.21 | 5.6.21b | 5.6.22 + // | 5.6.23 MariaDB + // + // Version 10.0 (Available in all regions except AWS GovCloud (US) Region + // (us-gov-west-1)): 10.0.17 Oracle Database Enterprise Edition (oracle-ee) + // + // Version 11.2 (Only available in the following regions: ap-northeast-1, + // ap-southeast-1, ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): + // 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version + // 11.2 (Available in all regions): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 + // | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version 12.1 (Available in all + // regions): 12.1.0.1.v1 | 12.1.0.1.v2 | 12.1.0.2.v1 Oracle Database Standard + // Edition (oracle-se) + // + // Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 + // | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version 11.2 (Only + // available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 + // | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version + // 12.1 (Only available in the following regions: eu-central-1, us-west-1): + // 12.1.0.1.v1 | 12.1.0.1.v2 Oracle Database Standard Edition One (oracle-se1) + // + // Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 + // | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version 11.2 (Only + // available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 + // | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version + // 12.1 (Only available in the following regions: eu-central-1, us-west-1): + // 12.1.0.1.v1 | 12.1.0.1.v2 PostgreSQL + // + // Version 9.3 (Only available in the following regions: ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 9.3.1 | 9.3.2 + // Version 9.3 (Available in all regions): 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9 | + // 9.3.10 Version 9.4 (Available in all regions): 9.4.1 | 9.4.4 | 9.4.5 Microsoft + // SQL Server Enterprise Edition (sqlserver-ee) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Microsoft SQL Server Express Edition (sqlserver-ex) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 + // Microsoft SQL Server Standard Edition (sqlserver-se) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 + // Microsoft SQL Server Web Edition (sqlserver-web) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 + EngineVersion *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + // + // Constraints: To use PIOPS, this value must be an integer greater than 1000. + Iops *int64 `type:"integer"` + + // The KMS key identifier for an encrypted DB instance. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are creating a DB instance with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB instance, then you can + // use the KMS key alias instead of the ARN for the KM encryption key. + // + // If the StorageEncrypted parameter is true, and you do not specify a value + // for the KmsKeyId parameter, then Amazon RDS will use your default encryption + // key. AWS KMS creates the default encryption key for your AWS account. Your + // AWS account has a different default encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // License model information for this DB instance. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // The password for the master database user. Can be any printable ASCII character + // except "/", """, or "@". + // + // Type: String + // + // MySQL + // + // Constraints: Must contain from 8 to 41 characters. + // + // MariaDB + // + // Constraints: Must contain from 8 to 41 characters. + // + // Oracle + // + // Constraints: Must contain from 8 to 30 characters. + // + // SQL Server + // + // Constraints: Must contain from 8 to 128 characters. + // + // PostgreSQL + // + // Constraints: Must contain from 8 to 128 characters. + // + // Amazon Aurora + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string"` + + // The name of master user for the client DB instance. + // + // MySQL + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. MariaDB + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. Cannot be a reserved word for + // the chosen database engine. Type: String + // + // Oracle + // + // Constraints: + // + // Must be 1 to 30 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. SQL Server + // + // Constraints: + // + // Must be 1 to 128 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. PostgreSQL + // + // Constraints: + // + // Must be 1 to 63 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 60. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. You cannot set the + // AvailabilityZone parameter if the MultiAZ parameter is set to true. Do not + // set this value if you want a Multi-AZ deployment for a SQL Server DB instance. + // Multi-AZ for SQL Server is set using the Mirroring option in an option group. + MultiAZ *bool `type:"boolean"` + + // Indicates that the DB instance should be associated with the specified option + // group. + // + // Permanent options, such as the TDE option for Oracle Advanced Security + // TDE, cannot be removed from an option group, and that option group cannot + // be removed from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // MySQL + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // MariaDB + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // PostgreSQL + // + // Default: 5432 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // Oracle + // + // Default: 1521 + // + // Valid Values: 1150-65535 + // + // SQL Server + // + // Default: 1433 + // + // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 + // through 49156. + // + // Amazon Aurora + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. For more + // information, see DB Instance Backups (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.BackingUpAndRestoringAmazonRDSInstances.html). + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). For more information, see DB Instance Maintenance + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBMaintenance.html). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC: true VPC: false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Specifies whether the DB instance is encrypted. + // + // Default: false + StorageEncrypted *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // A list of EC2 VPC security groups to associate with this DB instance. + // + // Default: The default EC2 VPC security group for the DB subnet group's VPC. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceInput) GoString() string { + return s.String() +} + +type CreateDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s CreateDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceOutput) GoString() string { + return s.String() +} + +type CreateDBInstanceReadReplicaInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor engine upgrades will be applied automatically to the + // Read Replica during the maintenance window. + // + // Default: Inherits from the source DB instance + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The Amazon EC2 Availability Zone that the Read Replica will be created in. + // + // Default: A random, system-chosen Availability Zone in the endpoint's region. + // + // Example: us-east-1d + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the Read Replica to snapshots of the Read Replica; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Read Replica. + // + // Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | + // db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large + // | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge + // | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge + // | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium + // | db.t2.large + // + // Default: Inherits from the source DB instance. + DBInstanceClass *string `type:"string"` + + // The DB instance identifier of the Read Replica. This identifier is the unique + // key that identifies a DB instance. This parameter is stored as a lowercase + // string. + DBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies a DB subnet group for the DB instance. The new DB instance will + // be created in the VPC associated with the DB subnet group. If no DB subnet + // group is specified, then the new DB instance is not created in a VPC. + // + // Constraints: + // + // Can only be specified if the source DB instance identifier specifies a + // DB instance in another region. The specified DB subnet group must be in the + // same region in which the operation is running. All Read Replicas in one + // region that are created from the same source DB instance must either: Specify + // DB subnet groups from the same VPC. All these Read Replicas will be created + // in the same VPC.Not specify a DB subnet group. All these Read Replicas will + // be created outside of any VPC. + DBSubnetGroupName *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + Iops *int64 `type:"integer"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the Read Replica. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 60. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // The option group the DB instance will be associated with. If omitted, the + // default option group for the engine specified will be used. + OptionGroupName *string `type:"string"` + + // The port number that the DB instance uses for connections. + // + // Default: Inherits from the source DB instance + // + // Valid Values: 1150-65535 + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true VPC:false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // The identifier of the DB instance that will act as the source for the Read + // Replica. Each DB instance can have up to five Read Replicas. + // + // Constraints: + // + // Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB + // instance. Can specify a DB instance that is a MySQL Read Replica only if + // the source is running MySQL 5.6. Can specify a DB instance that is a PostgreSQL + // Read Replica only if the source is running PostgreSQL 9.3.5. The specified + // DB instance must have automatic backups enabled, its backup retention period + // must be greater than 0. If the source DB instance is in the same region as + // the Read Replica, specify a valid DB instance identifier. If the source DB + // instance is in a different region than the Read Replica, specify a valid + // DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon + // Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + SourceDBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies the storage type to be associated with the Read Replica. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBInstanceReadReplicaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceReadReplicaInput) GoString() string { + return s.String() +} + +type CreateDBInstanceReadReplicaOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s CreateDBInstanceReadReplicaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceReadReplicaOutput) GoString() string { + return s.String() +} + +type CreateDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The DB parameter group family name. A DB parameter group can be associated + // with one and only one DB parameter group family, and can be applied only + // to a DB instance running a database engine and engine version compatible + // with that DB parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens This value is + // stored as a lowercase string. + DBParameterGroupName *string `type:"string" required:"true"` + + // The description for the DB parameter group. + Description *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBParameterGroupInput) GoString() string { + return s.String() +} + +type CreateDBParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBParameterGroup + // action, and as a response element in the DescribeDBParameterGroups action. + DBParameterGroup *DBParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBParameterGroupOutput) GoString() string { + return s.String() +} + +type CreateDBSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB security group. + DBSecurityGroupDescription *string `type:"string" required:"true"` + + // The name for the DB security group. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens Must not be "Default" + // Cannot contain spaces Example: mysecuritygroup + DBSecurityGroupName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateDBSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup + // RevokeDBSecurityGroupIngress This data type is used as a response element + // in the DescribeDBSecurityGroups action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier. This is the unique key that identifies a DB instance. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The identifier for the DB snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-snapshot-id + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSnapshotInput) GoString() string { + return s.String() +} + +type CreateDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response + // element in the DescribeDBSnapshots action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSnapshotOutput) GoString() string { + return s.String() +} + +type CreateDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB subnet group. + DBSubnetGroupDescription *string `type:"string" required:"true"` + + // The name for the DB subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 Subnet IDs for the DB subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSubnetGroupInput) GoString() string { + return s.String() +} + +type CreateDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup + // This data type is used as a response element in the DescribeDBSubnetGroups + // action. + DBSubnetGroup *DBSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription, set to false to + // create the subscription but not active it. + Enabled *bool `type:"boolean"` + + // A list of event categories for a SourceType that you want to subscribe to. + // You can see a list of the categories for a given SourceType in the Events + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // topic in the Amazon RDS User Guide or by using the DescribeEventCategories + // action. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The Amazon Resource Name (ARN) of the SNS topic created for event notification. + // The ARN is created by Amazon SNS when you create a topic and subscribe to + // it. + SnsTopicArn *string `type:"string" required:"true"` + + // The list of identifiers of the event sources for which events will be returned. + // If not specified, then all sources are included in the response. An identifier + // must begin with a letter and must contain only ASCII letters, digits, and + // hyphens; it cannot end with a hyphen or contain two consecutive hyphens. + // + // Constraints: + // + // If SourceIds are supplied, SourceType must also be provided. If the source + // type is a DB instance, then a DBInstanceIdentifier must be supplied. If the + // source type is a DB security group, a DBSecurityGroupName must be supplied. + // If the source type is a DB parameter group, a DBParameterGroupName must be + // supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier must + // be supplied. + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a DB instance, you would set this + // parameter to db-instance. if this value is not specified, all events are + // returned. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` + + // The name of the subscription. + // + // Constraints: The name must be less than 255 characters. + SubscriptionName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionInput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s CreateEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateOptionGroupInput struct { + _ struct{} `type:"structure"` + + // Specifies the name of the engine that this option group should be associated + // with. + EngineName *string `type:"string" required:"true"` + + // Specifies the major version of the engine that this option group should be + // associated with. + MajorEngineVersion *string `type:"string" required:"true"` + + // The description of the option group. + OptionGroupDescription *string `type:"string" required:"true"` + + // Specifies the name of the option group to be created. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Example: myoptiongroup + OptionGroupName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOptionGroupInput) GoString() string { + return s.String() +} + +type CreateOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOptionGroupOutput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster +// RestoreDBClusterFromSnapshot This data type is used as a response element +// in the DescribeDBClusters action. +type DBCluster struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Provides the list of EC2 Availability Zones that instances in the DB cluster + // can be created in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the number of days for which automatic DB snapshots are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // If present, specifies the name of the character set that this cluster is + // associated with. + CharacterSetName *string `type:"string"` + + // Contains a user-supplied DB cluster identifier. This identifier is the unique + // key that identifies a DB cluster. + DBClusterIdentifier *string `type:"string"` + + // Provides the list of instances that make up the DB cluster. + DBClusterMembers []*DBClusterMember `locationNameList:"DBClusterMember" type:"list"` + + // Provides the list of option group memberships for this DB cluster. + DBClusterOptionGroupMemberships []*DBClusterOptionGroupStatus `locationNameList:"DBClusterOptionGroup" type:"list"` + + // Specifies the name of the DB cluster parameter group for the DB cluster. + DBClusterParameterGroup *string `type:"string"` + + // Specifies information on the subnet group associated with the DB cluster, + // including the name, description, and subnets in the subnet group. + DBSubnetGroup *string `type:"string"` + + // Contains the name of the initial database of this DB cluster that was provided + // at create time, if one was specified when the DB cluster was created. This + // same name is returned for the life of the DB cluster. + DatabaseName *string `type:"string"` + + // If StorageEncrypted is true, the region-unique, immutable identifier for + // the encrypted DB cluster. This identifier is found in AWS CloudTrail log + // entries whenever the KMS key for the DB cluster is accessed. + DbClusterResourceId *string `type:"string"` + + // Specifies the earliest time to which a database can be restored with point-in-time + // restore. + EarliestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the connection endpoint for the primary instance of the DB cluster. + Endpoint *string `type:"string"` + + // Provides the name of the database engine to be used for this DB cluster. + Engine *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + HostedZoneId *string `type:"string"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // cluster. + KmsKeyId *string `type:"string"` + + // Specifies the latest time to which a database can be restored with point-in-time + // restore. + LatestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Contains the master username for the DB cluster. + MasterUsername *string `type:"string"` + + // Specifies the progress of the operation as a percentage. + PercentProgress *string `type:"string"` + + // Specifies the port that the database engine is listening on. + Port *int64 `type:"integer"` + + // Specifies the daily time range during which automated backups are created + // if automated backups are enabled, as determined by the BackupRetentionPeriod. + PreferredBackupWindow *string `type:"string"` + + // Specifies the weekly time range during which system maintenance can occur, + // in Universal Coordinated Time (UTC). + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the current state of this DB cluster. + Status *string `type:"string"` + + // Specifies whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Provides a list of VPC security groups that the DB cluster belongs to. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s DBCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBCluster) GoString() string { + return s.String() +} + +// Contains information about an instance that is part of a DB cluster. +type DBClusterMember struct { + _ struct{} `type:"structure"` + + // Specifies the status of the DB cluster parameter group for this member of + // the DB cluster. + DBClusterParameterGroupStatus *string `type:"string"` + + // Specifies the instance identifier for this member of the DB cluster. + DBInstanceIdentifier *string `type:"string"` + + // Value that is true if the cluster member is the primary instance for the + // DB cluster and false otherwise. + IsClusterWriter *bool `type:"boolean"` +} + +// String returns the string representation +func (s DBClusterMember) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterMember) GoString() string { + return s.String() +} + +// Contains status information for a DB cluster option group. +type DBClusterOptionGroupStatus struct { + _ struct{} `type:"structure"` + + // Specifies the name of the DB cluster option group. + DBClusterOptionGroupName *string `type:"string"` + + // Specifies the status of the DB cluster option group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterOptionGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterOptionGroupStatus) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the CreateDBClusterParameterGroup +// action. +// +// This data type is used as a request parameter in the DeleteDBClusterParameterGroup +// action, and as a response element in the DescribeDBClusterParameterGroups +// action. +type DBClusterParameterGroup struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB cluster parameter group. + DBClusterParameterGroupName *string `type:"string"` + + // Provides the name of the DB parameter group family that this DB cluster parameter + // group is compatible with. + DBParameterGroupFamily *string `type:"string"` + + // Provides the customer-specified description for this DB cluster parameter + // group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterParameterGroup) GoString() string { + return s.String() +} + +type DBClusterParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens This value is + // stored as a lowercase string. + DBClusterParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterParameterGroupNameMessage) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is +// used as a response element in the DescribeDBClusterSnapshots action. +type DBClusterSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Provides the list of EC2 Availability Zones that instances in the DB cluster + // snapshot can be restored in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the time when the DB cluster was created, in Universal Coordinated + // Time (UTC). + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the DB cluster identifier of the DB cluster that this DB cluster + // snapshot was created from. + DBClusterIdentifier *string `type:"string"` + + // Specifies the identifier for the DB cluster snapshot. + DBClusterSnapshotIdentifier *string `type:"string"` + + // Specifies the name of the database engine. + Engine *string `type:"string"` + + // Provides the version of the database engine for this DB cluster snapshot. + EngineVersion *string `type:"string"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // cluster snapshot. + KmsKeyId *string `type:"string"` + + // Provides the license model information for this DB cluster snapshot. + LicenseModel *string `type:"string"` + + // Provides the master username for the DB cluster snapshot. + MasterUsername *string `type:"string"` + + // Specifies the percentage of the estimated data that has been transferred. + PercentProgress *int64 `type:"integer"` + + // Specifies the port that the DB cluster was listening on at the time of the + // snapshot. + Port *int64 `type:"integer"` + + // Provides the time when the snapshot was taken, in Universal Coordinated Time + // (UTC). + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Provides the type of the DB cluster snapshot. + SnapshotType *string `type:"string"` + + // Specifies the status of this DB cluster snapshot. + Status *string `type:"string"` + + // Specifies whether the DB cluster snapshot is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Provides the VPC ID associated with the DB cluster snapshot. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterSnapshot) GoString() string { + return s.String() +} + +// This data type is used as a response element in the action DescribeDBEngineVersions. +type DBEngineVersion struct { + _ struct{} `type:"structure"` + + // The description of the database engine. + DBEngineDescription *string `type:"string"` + + // The description of the database engine version. + DBEngineVersionDescription *string `type:"string"` + + // The name of the DB parameter group family for the database engine. + DBParameterGroupFamily *string `type:"string"` + + // The default character set for new instances of this engine version, if the + // CharacterSetName parameter of the CreateDBInstance API is not specified. + DefaultCharacterSet *CharacterSet `type:"structure"` + + // The name of the database engine. + Engine *string `type:"string"` + + // The version number of the database engine. + EngineVersion *string `type:"string"` + + // A list of the character sets supported by this engine for the CharacterSetName + // parameter of the CreateDBInstance API. + SupportedCharacterSets []*CharacterSet `locationNameList:"CharacterSet" type:"list"` + + // A list of engine versions that this database engine version can be upgraded + // to. + ValidUpgradeTarget []*UpgradeTarget `locationNameList:"UpgradeTarget" type:"list"` +} + +// String returns the string representation +func (s DBEngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBEngineVersion) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBInstance DeleteDBInstance ModifyDBInstance This data type +// is used as a response element in the DescribeDBInstances action. +type DBInstance struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size specified in gigabytes. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that minor version patches are applied automatically. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Specifies the name of the Availability Zone the DB instance is located in. + AvailabilityZone *string `type:"string"` + + // Specifies the number of days for which automatic DB snapshots are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // The identifier of the CA certificate for this DB instance. + CACertificateIdentifier *string `type:"string"` + + // If present, specifies the name of the character set that this instance is + // associated with. + CharacterSetName *string `type:"string"` + + // Specifies whether tags are copied from the DB instance to snapshots of the + // DB instance. + CopyTagsToSnapshot *bool `type:"boolean"` + + // If the DB instance is a member of a DB cluster, contains the name of the + // DB cluster that the DB instance is a member of. + DBClusterIdentifier *string `type:"string"` + + // Contains the name of the compute and memory capacity class of the DB instance. + DBInstanceClass *string `type:"string"` + + // Contains a user-supplied database identifier. This identifier is the unique + // key that identifies a DB instance. + DBInstanceIdentifier *string `type:"string"` + + // Specifies the current state of this database. + DBInstanceStatus *string `type:"string"` + + // The meaning of this parameter differs according to the database engine you + // use. For example, this value returns MySQL, MariaDB, or PostgreSQL information + // when returning values from CreateDBInstanceReadReplica since Read Replicas + // are only supported for these engines. + // + // MySQL, MariaDB, SQL Server, PostgreSQL, Amazon Aurora + // + // Contains the name of the initial database of this instance that was provided + // at create time, if one was specified when the DB instance was created. This + // same name is returned for the life of the DB instance. + // + // Type: String + // + // Oracle + // + // Contains the Oracle System ID (SID) of the created DB instance. Not shown + // when the returned parameters do not apply to an Oracle DB instance. + DBName *string `type:"string"` + + // Provides the list of DB parameter groups applied to this DB instance. + DBParameterGroups []*DBParameterGroupStatus `locationNameList:"DBParameterGroup" type:"list"` + + // Provides List of DB security group elements containing only DBSecurityGroup.Name + // and DBSecurityGroup.Status subelements. + DBSecurityGroups []*DBSecurityGroupMembership `locationNameList:"DBSecurityGroup" type:"list"` + + // Specifies information on the subnet group associated with the DB instance, + // including the name, description, and subnets in the subnet group. + DBSubnetGroup *DBSubnetGroup `type:"structure"` + + // Specifies the port that the DB instance listens on. If the DB instance is + // part of a DB cluster, this can be a different port than the DB cluster port. + DbInstancePort *int64 `type:"integer"` + + // If StorageEncrypted is true, the region-unique, immutable identifier for + // the encrypted DB instance. This identifier is found in AWS CloudTrail log + // entries whenever the KMS key for the DB instance is accessed. + DbiResourceId *string `type:"string"` + + // Specifies the connection endpoint. + Endpoint *Endpoint `type:"structure"` + + // Provides the name of the database engine to be used for this DB instance. + Engine *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that + // receives the Enhanced Monitoring metrics data for the DB instance. + EnhancedMonitoringResourceArn *string `type:"string"` + + // Provides the date and time the DB instance was created. + InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the Provisioned IOPS (I/O operations per second) value. + Iops *int64 `type:"integer"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // instance. + KmsKeyId *string `type:"string"` + + // Specifies the latest time to which a database can be restored with point-in-time + // restore. + LatestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // License model information for this DB instance. + LicenseModel *string `type:"string"` + + // Contains the master username for the DB instance. + MasterUsername *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics + // to CloudWatch Logs. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + MultiAZ *bool `type:"boolean"` + + // Provides the list of option group memberships for this DB instance. + OptionGroupMemberships []*OptionGroupMembership `locationNameList:"OptionGroupMembership" type:"list"` + + // Specifies that changes to the DB instance are pending. This element is only + // included when changes are pending. Specific changes are identified by subelements. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // Specifies the daily time range during which automated backups are created + // if automated backups are enabled, as determined by the BackupRetentionPeriod. + PreferredBackupWindow *string `type:"string"` + + // Specifies the weekly time range during which system maintenance can occur, + // in Universal Coordinated Time (UTC). + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true VPC:false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Contains one or more identifiers of the Read Replicas associated with this + // DB instance. + ReadReplicaDBInstanceIdentifiers []*string `locationNameList:"ReadReplicaDBInstanceIdentifier" type:"list"` + + // Contains the identifier of the source DB instance if this DB instance is + // a Read Replica. + ReadReplicaSourceDBInstanceIdentifier *string `type:"string"` + + // If present, specifies the name of the secondary Availability Zone for a DB + // instance with multi-AZ support. + SecondaryAvailabilityZone *string `type:"string"` + + // The status of a Read Replica. If the instance is not a Read Replica, this + // will be blank. + StatusInfos []*DBInstanceStatusInfo `locationNameList:"DBInstanceStatusInfo" type:"list"` + + // Specifies whether the DB instance is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Specifies the storage type associated with DB instance. + StorageType *string `type:"string"` + + // The ARN from the Key Store with which the instance is associated for TDE + // encryption. + TdeCredentialArn *string `type:"string"` + + // Provides List of VPC security group elements that the DB instance belongs + // to. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s DBInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBInstance) GoString() string { + return s.String() +} + +// Provides a list of status information for a DB instance. +type DBInstanceStatusInfo struct { + _ struct{} `type:"structure"` + + // Details of the error if there is an error for the instance. If the instance + // is not in an error state, this value is blank. + Message *string `type:"string"` + + // Boolean value that is true if the instance is operating normally, or false + // if the instance is in an error state. + Normal *bool `type:"boolean"` + + // Status of the DB instance. For a StatusType of read replica, the values can + // be replicating, error, stopped, or terminated. + Status *string `type:"string"` + + // This value is currently "read replication." + StatusType *string `type:"string"` +} + +// String returns the string representation +func (s DBInstanceStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBInstanceStatusInfo) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the CreateDBParameterGroup +// action. +// +// This data type is used as a request parameter in the DeleteDBParameterGroup +// action, and as a response element in the DescribeDBParameterGroups action. +type DBParameterGroup struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB parameter group family that this DB parameter + // group is compatible with. + DBParameterGroupFamily *string `type:"string"` + + // Provides the name of the DB parameter group. + DBParameterGroupName *string `type:"string"` + + // Provides the customer-specified description for this DB parameter group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroup) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the ModifyDBParameterGroup +// or ResetDBParameterGroup action. +type DBParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB parameter group. + DBParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroupNameMessage) GoString() string { + return s.String() +} + +// The status of the DB parameter group. +// +// This data type is used as a response element in the following actions: +// +// CreateDBInstance CreateDBInstanceReadReplica DeleteDBInstance ModifyDBInstance +// RebootDBInstance RestoreDBInstanceFromDBSnapshot +type DBParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // The name of the DP parameter group. + DBParameterGroupName *string `type:"string"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroupStatus) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup +// RevokeDBSecurityGroupIngress This data type is used as a response element +// in the DescribeDBSecurityGroups action. +type DBSecurityGroup struct { + _ struct{} `type:"structure"` + + // Provides the description of the DB security group. + DBSecurityGroupDescription *string `type:"string"` + + // Specifies the name of the DB security group. + DBSecurityGroupName *string `type:"string"` + + // Contains a list of EC2SecurityGroup elements. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // Contains a list of IPRange elements. + IPRanges []*IPRange `locationNameList:"IPRange" type:"list"` + + // Provides the AWS ID of the owner of a specific DB security group. + OwnerId *string `type:"string"` + + // Provides the VpcId of the DB security group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSecurityGroup) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// ModifyDBInstance RebootDBInstance RestoreDBInstanceFromDBSnapshot +// RestoreDBInstanceToPointInTime +type DBSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the DB security group. + DBSecurityGroupName *string `type:"string"` + + // The status of the DB security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DBSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSecurityGroupMembership) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBSnapshot DeleteDBSnapshot This data type is used as a response +// element in the DescribeDBSnapshots action. +type DBSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Specifies the name of the Availability Zone the DB instance was located in + // at the time of the DB snapshot. + AvailabilityZone *string `type:"string"` + + // Specifies the DB instance identifier of the DB instance this DB snapshot + // was created from. + DBInstanceIdentifier *string `type:"string"` + + // Specifies the identifier for the DB snapshot. + DBSnapshotIdentifier *string `type:"string"` + + // Specifies whether the DB snapshot is encrypted. + Encrypted *bool `type:"boolean"` + + // Specifies the name of the database engine. + Engine *string `type:"string"` + + // Specifies the version of the database engine. + EngineVersion *string `type:"string"` + + // Specifies the time when the snapshot was taken, in Universal Coordinated + // Time (UTC). + InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the Provisioned IOPS (I/O operations per second) value of the DB + // instance at the time of the snapshot. + Iops *int64 `type:"integer"` + + // If Encrypted is true, the KMS key identifier for the encrypted DB snapshot. + KmsKeyId *string `type:"string"` + + // License model information for the restored DB instance. + LicenseModel *string `type:"string"` + + // Provides the master username for the DB snapshot. + MasterUsername *string `type:"string"` + + // Provides the option group name for the DB snapshot. + OptionGroupName *string `type:"string"` + + // The percentage of the estimated data that has been transferred. + PercentProgress *int64 `type:"integer"` + + // Specifies the port that the database engine was listening on at the time + // of the snapshot. + Port *int64 `type:"integer"` + + // Provides the time when the snapshot was taken, in Universal Coordinated Time + // (UTC). + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Provides the type of the DB snapshot. + SnapshotType *string `type:"string"` + + // The DB snapshot Arn that the DB snapshot was copied from. It only has value + // in case of cross customer or cross region copy. + SourceDBSnapshotIdentifier *string `type:"string"` + + // The region that the DB snapshot was created in or copied from. + SourceRegion *string `type:"string"` + + // Specifies the status of this DB snapshot. + Status *string `type:"string"` + + // Specifies the storage type associated with DB Snapshot. + StorageType *string `type:"string"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // Provides the VPC ID associated with the DB snapshot. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshot) GoString() string { + return s.String() +} + +// Contains the name and values of a manual DB snapshot attribute +// +// Manual DB snapshot attributes are used to authorize other AWS accounts to +// restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute +// API. +type DBSnapshotAttribute struct { + _ struct{} `type:"structure"` + + // The name of the manual DB snapshot attribute. + // + // An attribute name of restore applies to the list of AWS accounts that have + // permission to copy or restore the manual DB snapshot. + AttributeName *string `type:"string"` + + // The value(s) for the manual DB snapshot attribute. + // + // If the AttributeName field is restore, then this field returns a list of + // AWS account ids that are authorized to copy or restore the manual DB snapshot. + // If a value of all is in the list, then the manual DB snapshot is public and + // available for any AWS account to copy or restore. + AttributeValues []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s DBSnapshotAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshotAttribute) GoString() string { + return s.String() +} + +// Contains the results of a successful call to the DescribeDBSnapshotAttributes +// API. +// +// Manual DB snapshot attributes are used to authorize other AWS accounts to +// copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute +// API. +type DBSnapshotAttributesResult struct { + _ struct{} `type:"structure"` + + // The list of attributes and values for the manual DB snapshot. + DBSnapshotAttributes []*DBSnapshotAttribute `locationNameList:"DBSnapshotAttribute" type:"list"` + + // The identifier of the manual DB snapshot that the attributes apply to. + DBSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DBSnapshotAttributesResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshotAttributesResult) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup +// This data type is used as a response element in the DescribeDBSubnetGroups +// action. +type DBSubnetGroup struct { + _ struct{} `type:"structure"` + + // Provides the description of the DB subnet group. + DBSubnetGroupDescription *string `type:"string"` + + // Specifies the name of the DB subnet group. + DBSubnetGroupName *string `type:"string"` + + // Provides the status of the DB subnet group. + SubnetGroupStatus *string `type:"string"` + + // Contains a list of Subnet elements. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // Provides the VpcId of the DB subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSubnetGroup) GoString() string { + return s.String() +} + +type DeleteDBClusterInput struct { + _ struct{} `type:"structure"` + + // The DB cluster identifier for the DB cluster to be deleted. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string" required:"true"` + + // The DB cluster snapshot identifier of the new DB cluster snapshot created + // when SkipFinalSnapshot is set to false. + // + // Specifying this parameter and also setting the SkipFinalShapshot parameter + // to true results in an error. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + FinalDBSnapshotIdentifier *string `type:"string"` + + // Determines whether a final DB cluster snapshot is created before the DB cluster + // is deleted. If true is specified, no DB cluster snapshot is created. If false + // is specified, a DB cluster snapshot is created before the DB cluster is deleted. + // + // You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot + // is false. Default: false + SkipFinalSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterInput) GoString() string { + return s.String() +} + +type DeleteDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterOutput) GoString() string { + return s.String() +} + +type DeleteDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be the name of an existing DB cluster parameter group. You cannot + // delete a default DB cluster parameter group. Cannot be associated with any + // DB clusters. + DBClusterParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteDBClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster snapshot to delete. + // + // Constraints: Must be the name of an existing DB cluster snapshot in the + // available state. + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterSnapshotInput) GoString() string { + return s.String() +} + +type DeleteDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is + // used as a response element in the DescribeDBClusterSnapshots action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier for the DB instance to be deleted. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot + // is set to false. + // + // Specifying this parameter and also setting the SkipFinalShapshot parameter + // to true results in an error. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens Cannot be specified + // when deleting a Read Replica. + FinalDBSnapshotIdentifier *string `type:"string"` + + // Determines whether a final DB snapshot is created before the DB instance + // is deleted. If true is specified, no DBSnapshot is created. If false is specified, + // a DB snapshot is created before the DB instance is deleted. + // + // Note that when a DB instance is in a failure state and has a status of 'failed', + // 'incompatible-restore', or 'incompatible-network', it can only be deleted + // when the SkipFinalSnapshot parameter is set to "true". + // + // Specify true when deleting a Read Replica. + // + // The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot + // is false. Default: false + SkipFinalSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBInstanceInput) GoString() string { + return s.String() +} + +type DeleteDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBInstanceOutput) GoString() string { + return s.String() +} + +type DeleteDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be the name of an existing DB parameter group You cannot delete a + // default DB parameter group Cannot be associated with any DB instances + DBParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteDBParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB security group to delete. + // + // You cannot delete the default DB security group. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens Must not be "Default" + // Cannot contain spaces + DBSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteDBSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // The DBSnapshot identifier. + // + // Constraints: Must be the name of an existing DB snapshot in the available + // state. + DBSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSnapshotInput) GoString() string { + return s.String() +} + +type DeleteDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response + // element in the DescribeDBSnapshots action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the database subnet group to delete. + // + // You cannot delete the default subnet group. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSubnetGroupInput) GoString() string { + return s.String() +} + +type DeleteDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The name of the RDS event notification subscription you want to delete. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteOptionGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the option group to be deleted. + // + // You cannot delete default option groups. + OptionGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOptionGroupInput) GoString() string { + return s.String() +} + +type DeleteOptionGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOptionGroupOutput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +// Data returned by the DescribeAccountAttributes action. +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // A list of AccountQuota objects. Within this list, each quota has a name, + // a count of usage toward the quota maximum, and a maximum value for the quota. + AccountQuotas []*AccountQuota `locationNameList:"AccountQuota" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +type DescribeCertificatesInput struct { + _ struct{} `type:"structure"` + + // The user-supplied certificate identifier. If this parameter is specified, + // information for only the identified certificate is returned. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + CertificateIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeCertificates + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesInput) GoString() string { + return s.String() +} + +// Data returned by the DescribeCertificates action. +type DescribeCertificatesOutput struct { + _ struct{} `type:"structure"` + + // The list of Certificate objects for the AWS account. + Certificates []*Certificate `locationNameList:"Certificate" type:"list"` + + // An optional pagination token provided by a previous DescribeCertificates + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB cluster parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBClusterParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParameterGroupsInput) GoString() string { + return s.String() +} + +type DescribeDBClusterParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DB cluster parameter groups. + DBClusterParameterGroups []*DBClusterParameterGroup `locationNameList:"DBClusterParameterGroup" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB cluster parameter group to return parameter details + // for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A value that indicates to return only parameters for a specific source. Parameter + // sources can be engine, service, or customer. + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParametersInput) GoString() string { + return s.String() +} + +// Provides details about a DB cluster parameter group including the parameters +// in the DB cluster parameter group. +type DescribeDBClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous DescribeDBClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // Provides a list of parameters for the DB cluster parameter group. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeDBClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A DB cluster identifier to retrieve the list of DB cluster snapshots for. + // This parameter cannot be used in conjunction with the DBClusterSnapshotIdentifier + // parameter. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` + + // A specific DB cluster snapshot identifier to describe. This parameter cannot + // be used in conjunction with the DBClusterIdentifier parameter. This value + // is stored as a lowercase string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens If this is the + // identifier of an automated snapshot, the SnapshotType parameter must also + // be specified. + DBClusterSnapshotIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterSnapshots + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The type of DB cluster snapshots that will be returned. Values can be automated + // or manual. If this parameter is not specified, the returned results will + // include all snapshot types. + SnapshotType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotsInput) GoString() string { + return s.String() +} + +// Provides a list of DB cluster snapshots for the user as the result of a call +// to the DescribeDBClusterSnapshots action. +type DescribeDBClusterSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // Provides a list of DB cluster snapshots for the user. + DBClusterSnapshots []*DBClusterSnapshot `locationNameList:"DBClusterSnapshot" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterSnapshots + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeDBClustersInput struct { + _ struct{} `type:"structure"` + + // The user-supplied DB cluster identifier. If this parameter is specified, + // information from only the specific DB cluster is returned. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusters request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClustersInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBClusters +// action. +type DescribeDBClustersOutput struct { + _ struct{} `type:"structure"` + + // Contains a list of DB clusters for the user. + DBClusters []*DBCluster `locationNameList:"DBCluster" type:"list"` + + // A pagination token that can be used in a subsequent DescribeDBClusters request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClustersOutput) GoString() string { + return s.String() +} + +type DescribeDBEngineVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupFamily *string `type:"string"` + + // Indicates that only the default version of the specified engine or engine + // and major version combination is returned. + DefaultOnly *bool `type:"boolean"` + + // The database engine to return. + Engine *string `type:"string"` + + // The database engine version to return. + // + // Example: 5.1.49 + EngineVersion *string `type:"string"` + + // Not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // If this parameter is specified, and if the requested engine supports the + // CharacterSetName parameter for CreateDBInstance, the response includes a + // list of supported character sets for each engine version. + ListSupportedCharacterSets *bool `type:"boolean"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBEngineVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBEngineVersionsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBEngineVersions +// action. +type DescribeDBEngineVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBEngineVersion elements. + DBEngineVersions []*DBEngineVersion `locationNameList:"DBEngineVersion" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBEngineVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBEngineVersionsOutput) GoString() string { + return s.String() +} + +type DescribeDBInstancesInput struct { + _ struct{} `type:"structure"` + + // The user-supplied instance identifier. If this parameter is specified, information + // from only the specific DB instance is returned. This parameter isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBInstances request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBInstancesInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBInstances +// action. +type DescribeDBInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of DBInstance instances. + DBInstances []*DBInstance `locationNameList:"DBInstance" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBInstancesOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element to DescribeDBLogFiles. +type DescribeDBLogFilesDetails struct { + _ struct{} `type:"structure"` + + // A POSIX timestamp when the last log entry was written. + LastWritten *int64 `type:"long"` + + // The name of the log file for the specified DB instance. + LogFileName *string `type:"string"` + + // The size, in bytes, of the log file for the specified DB instance. + Size *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeDBLogFilesDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesDetails) GoString() string { + return s.String() +} + +type DescribeDBLogFilesInput struct { + _ struct{} `type:"structure"` + + // The customer-assigned name of the DB instance that contains the log files + // you want to list. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // Filters the available log files for files written since the specified date, + // in POSIX timestamp format with milliseconds. + FileLastWritten *int64 `type:"long"` + + // Filters the available log files for files larger than the specified size. + FileSize *int64 `type:"long"` + + // Filters the available log files for log file names that contain the specified + // string. + FilenameContains *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The pagination token provided in the previous request. If this parameter + // is specified the response includes only records beyond the marker, up to + // MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBLogFilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesInput) GoString() string { + return s.String() +} + +// The response from a call to DescribeDBLogFiles. +type DescribeDBLogFilesOutput struct { + _ struct{} `type:"structure"` + + // The DB log files returned. + DescribeDBLogFiles []*DescribeDBLogFilesDetails `locationNameList:"DescribeDBLogFilesDetails" type:"list"` + + // A pagination token that can be used in a subsequent DescribeDBLogFiles request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBLogFilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesOutput) GoString() string { + return s.String() +} + +type DescribeDBParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParameterGroupsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBParameterGroups +// action. +type DescribeDBParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBParameterGroup instances. + DBParameterGroups []*DBParameterGroup `locationNameList:"DBParameterGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The parameter types to return. + // + // Default: All parameter types returned + // + // Valid Values: user | system | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParametersInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBParameters +// action. +type DescribeDBParametersOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of Parameter values. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeDBParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParametersOutput) GoString() string { + return s.String() +} + +type DescribeDBSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the DB security group to return details for. + DBSecurityGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBSecurityGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSecurityGroupsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBSecurityGroups +// action. +type DescribeDBSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSecurityGroup instances. + DBSecurityGroups []*DBSecurityGroup `locationNameList:"DBSecurityGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotAttributesInput struct { + _ struct{} `type:"structure"` + + // The identifier for the DB snapshot to modify the attributes for. + DBSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotAttributesInput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotAttributesOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBSnapshotAttributes + // API. + // + // Manual DB snapshot attributes are used to authorize other AWS accounts to + // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute + // API. + DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s DescribeDBSnapshotAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotAttributesOutput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A DB instance identifier to retrieve the list of DB snapshots for. This parameter + // cannot be used in conjunction with DBSnapshotIdentifier. This parameter is + // not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string"` + + // A specific DB snapshot identifier to describe. This parameter cannot be used + // in conjunction with DBInstanceIdentifier. This value is stored as a lowercase + // string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters. First character must be a letter. + // Cannot end with a hyphen or contain two consecutive hyphens. If this is the + // identifier of an automated snapshot, the SnapshotType parameter must also + // be specified. + DBSnapshotIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // True to include manual DB snapshots that are public and can be copied or + // restored by any AWS account; otherwise false. The default is false. + // + // An manual DB snapshot is shared as public by the ModifyDBSnapshotAttribute + // API. + IncludePublic *bool `type:"boolean"` + + // True to include shared manual DB snapshots from other AWS accounts that this + // AWS account has been given permission to copy or restore; otherwise false. + // The default is false. + // + // An AWS account is given permission to restore a manual DB snapshot from + // another AWS account by the ModifyDBSnapshotAttribute API. + IncludeShared *bool `type:"boolean"` + + // An optional pagination token provided by a previous DescribeDBSnapshots request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The type of snapshots that will be returned. You can specify one of the following + // values: + // + // automated - Return all DB snapshots that have been automatically taken + // by Amazon RDS for my AWS account. manual - Return all DB snapshots that have + // been taken by my AWS account. shared - Return all manual DB snapshots that + // have been shared to my AWS account. public - Return all DB snapshots that + // have been marked as public. If you do not specify a SnapshotType, then both + // automated and manual snapshots are returned. You can include shared snapshots + // with these results by setting the IncludeShared parameter to true. You can + // include public snapshots with these results by setting the IncludePublic + // parameter to true. + // + // The IncludeShared and IncludePublic parameters do not apply for SnapshotType + // values of manual or automated. The IncludePublic parameter does not apply + // when SnapshotType is set to shared. the IncludeShared parameter does not + // apply when SnapshotType is set to public. + SnapshotType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBSnapshots +// action. +type DescribeDBSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSnapshot instances. + DBSnapshots []*DBSnapshot `locationNameList:"DBSnapshot" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeDBSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the DB subnet group to return details for. + DBSubnetGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBSubnetGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSubnetGroupsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBSubnetGroups +// action. +type DescribeDBSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSubnetGroup instances. + DBSubnetGroups []*DBSubnetGroup `locationNameList:"DBSubnetGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSubnetGroupsOutput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultClusterParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group family to return engine parameter + // information for. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultClusterParametersInput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the DescribeEngineDefaultParameters + // action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // Not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEngineDefaultParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersInput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultParametersOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the DescribeEngineDefaultParameters + // action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersOutput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The type of source that will be generating the events. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventCategoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesInput) GoString() string { + return s.String() +} + +// Data returned from the DescribeEventCategories action. +type DescribeEventCategoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of EventCategoriesMap data types. + EventCategoriesMapList []*EventCategoriesMap `locationNameList:"EventCategoriesMap" type:"list"` +} + +// String returns the string representation +func (s DescribeEventCategoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesOutput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the RDS event notification subscription you want to describe. + SubscriptionName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsInput) GoString() string { + return s.String() +} + +// Data returned by the DescribeEventSubscriptions action. +type DescribeEventSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of EventSubscriptions data types. + EventSubscriptionsList []*EventSubscription `locationNameList:"EventSubscription" type:"list"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsOutput) GoString() string { + return s.String() +} + +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes to retrieve events for. + // + // Default: 60 + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of event categories that trigger notifications for a event notification + // subscription. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEvents request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // not specified, then all sources are included in the response. + // + // Constraints: + // + // If SourceIdentifier is supplied, SourceType must also be provided. If the + // source type is DBInstance, then a DBInstanceIdentifier must be supplied. + // If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied. + // If the source type is DBParameterGroup, a DBParameterGroupName must be supplied. + // If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied. + // Cannot end with a hyphen or contain two consecutive hyphens. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of Event instances. + Events []*Event `locationNameList:"Event" type:"list"` + + // An optional pagination token provided by a previous Events request. If this + // parameter is specified, the response includes only records beyond the marker, + // up to the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +type DescribeOptionGroupOptionsInput struct { + _ struct{} `type:"structure"` + + // A required parameter. Options available for the given engine name will be + // described. + EngineName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // If specified, filters the results to include only options for the specified + // major engine version. + MajorEngineVersion *string `type:"string"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeOptionGroupOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupOptionsInput) GoString() string { + return s.String() +} + +type DescribeOptionGroupOptionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // List of available option group options. + OptionGroupOptions []*OptionGroupOption `locationNameList:"OptionGroupOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOptionGroupOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupOptionsOutput) GoString() string { + return s.String() +} + +type DescribeOptionGroupsInput struct { + _ struct{} `type:"structure"` + + // Filters the list of option groups to only include groups associated with + // a specific database engine. + EngineName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // Filters the list of option groups to only include groups associated with + // a specific database engine version. If specified, then EngineName must also + // be specified. + MajorEngineVersion *string `type:"string"` + + // An optional pagination token provided by a previous DescribeOptionGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the option group to describe. Cannot be supplied together with + // EngineName or MajorEngineVersion. + OptionGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeOptionGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupsInput) GoString() string { + return s.String() +} + +// List of option groups. +type DescribeOptionGroupsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // List of option groups. + OptionGroupsList []*OptionGroup `locationNameList:"OptionGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeOptionGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupsOutput) GoString() string { + return s.String() +} + +type DescribeOrderableDBInstanceOptionsInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only the + // available offerings matching the specified DB instance class. + DBInstanceClass *string `type:"string"` + + // The name of the engine to retrieve DB instance options for. + Engine *string `type:"string" required:"true"` + + // The engine version filter value. Specify this parameter to show only the + // available offerings matching the specified engine version. + EngineVersion *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The license model filter value. Specify this parameter to show only the available + // offerings matching the specified license model. + LicenseModel *string `type:"string"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The VPC filter value. Specify this parameter to show only the available VPC + // or non-VPC offerings. + Vpc *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeOrderableDBInstanceOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableDBInstanceOptionsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions +// action. +type DescribeOrderableDBInstanceOptionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous OrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // An OrderableDBInstanceOption structure containing information about orderable + // options for the DB instance. + OrderableDBInstanceOptions []*OrderableDBInstanceOption `locationNameList:"OrderableDBInstanceOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOrderableDBInstanceOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableDBInstanceOptionsOutput) GoString() string { + return s.String() +} + +type DescribePendingMaintenanceActionsInput struct { + _ struct{} `type:"structure"` + + // A filter that specifies one or more resources to return pending maintenance + // actions for. + // + // Supported filters: + // + // db-instance-id - Accepts DB instance identifiers and DB instance Amazon + // Resource Names (ARNs). The results list will only include pending maintenance + // actions for the DB instances identified by these ARNs. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribePendingMaintenanceActions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to a number of records specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The ARN of a resource to return pending maintenance actions for. + ResourceIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DescribePendingMaintenanceActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePendingMaintenanceActionsInput) GoString() string { + return s.String() +} + +// Data returned from the DescribePendingMaintenanceActions action. +type DescribePendingMaintenanceActionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous DescribePendingMaintenanceActions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to a number of records specified by MaxRecords. + Marker *string `type:"string"` + + // A list of the pending maintenance actions for the resource. + PendingMaintenanceActions []*ResourcePendingMaintenanceActions `locationNameList:"ResourcePendingMaintenanceActions" type:"list"` +} + +// String returns the string representation +func (s DescribePendingMaintenanceActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePendingMaintenanceActionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedDBInstancesInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only those + // reservations matching the specified DB instances class. + DBInstanceClass *string `type:"string"` + + // The duration filter value, specified in years or seconds. Specify this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The Multi-AZ filter value. Specify this parameter to show only those reservations + // matching the specified Multi-AZ parameter. + MultiAZ *bool `type:"boolean"` + + // The offering type filter value. Specify this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" + OfferingType *string `type:"string"` + + // The product description filter value. Specify this parameter to show only + // those reservations matching the specified product description. + ProductDescription *string `type:"string"` + + // The reserved DB instance identifier filter value. Specify this parameter + // to show only the reservation that matches the specified reservation ID. + ReservedDBInstanceId *string `type:"string"` + + // The offering identifier filter value. Specify this parameter to show only + // purchased reservations matching the specified offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesInput) GoString() string { + return s.String() +} + +type DescribeReservedDBInstancesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only the + // available offerings matching the specified DB instance class. + DBInstanceClass *string `type:"string"` + + // Duration filter value, specified in years or seconds. Specify this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The Multi-AZ filter value. Specify this parameter to show only the available + // offerings matching the specified Multi-AZ parameter. + MultiAZ *bool `type:"boolean"` + + // The offering type filter value. Specify this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" + OfferingType *string `type:"string"` + + // Product description filter value. Specify this parameter to show only the + // available offerings matching the specified product description. + ProductDescription *string `type:"string"` + + // The offering identifier filter value. Specify this parameter to show only + // the available offering that matches the specified reservation identifier. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedDBInstancesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOfferingsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings +// action. +type DescribeReservedDBInstancesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of reserved DB instance offerings. + ReservedDBInstancesOfferings []*ReservedDBInstancesOffering `locationNameList:"ReservedDBInstancesOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOfferingsOutput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeReservedDBInstances +// action. +type DescribeReservedDBInstancesOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of reserved DB instances. + ReservedDBInstances []*ReservedDBInstance `locationNameList:"ReservedDBInstance" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOutput) GoString() string { + return s.String() +} + +type DownloadDBLogFilePortionInput struct { + _ struct{} `type:"structure"` + + // The customer-assigned name of the DB instance that contains the log files + // you want to list. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The name of the log file to be downloaded. + LogFileName *string `type:"string" required:"true"` + + // The pagination token provided in the previous request or "0". If the Marker + // parameter is specified the response includes only records beyond the marker + // until the end of the file or up to NumberOfLines. + Marker *string `type:"string"` + + // The number of lines to download. If the number of lines specified results + // in a file over 1 MB in size, the file will be truncated at 1 MB in size. + // + // If the NumberOfLines parameter is specified, then the block of lines returned + // can be from the beginning or the end of the log file, depending on the value + // of the Marker parameter. If neither Marker or NumberOfLines are specified, + // the entire log file is returned. + // + // If NumberOfLines is specified and Marker is not specified, then the most + // recent lines from the end of the log file are returned. + // + // If Marker is specified as "0", then the specified number of lines from the + // beginning of the log file are returned. + // + // You can download the log file in blocks of lines by specifying the size of + // the block using the NumberOfLines parameter, and by specifying a value of + // "0" for the Marker parameter in your first request. Include the Marker value + // returned in the response as the Marker value for the next request, continuing + // until the AdditionalDataPending response element returns false. + NumberOfLines *int64 `type:"integer"` +} + +// String returns the string representation +func (s DownloadDBLogFilePortionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DownloadDBLogFilePortionInput) GoString() string { + return s.String() +} + +// This data type is used as a response element to DownloadDBLogFilePortion. +type DownloadDBLogFilePortionOutput struct { + _ struct{} `type:"structure"` + + // Boolean value that if true, indicates there is more data to be downloaded. + AdditionalDataPending *bool `type:"boolean"` + + // Entries from the specified log file. + LogFileData *string `type:"string"` + + // A pagination token that can be used in a subsequent DownloadDBLogFilePortion + // request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DownloadDBLogFilePortionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DownloadDBLogFilePortionOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// AuthorizeDBSecurityGroupIngress DescribeDBSecurityGroups RevokeDBSecurityGroupIngress +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // Specifies the id of the EC2 security group. + EC2SecurityGroupId *string `type:"string"` + + // Specifies the name of the EC2 security group. + EC2SecurityGroupName *string `type:"string"` + + // Specifies the AWS ID of the owner of the EC2 security group specified in + // the EC2SecurityGroupName field. + EC2SecurityGroupOwnerId *string `type:"string"` + + // Provides the status of the EC2 security group. Status can be "authorizing", + // "authorized", "revoking", and "revoked". + Status *string `type:"string"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// CreateDBInstance DescribeDBInstances DeleteDBInstance +type Endpoint struct { + _ struct{} `type:"structure"` + + // Specifies the DNS address of the DB instance. + Address *string `type:"string"` + + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + HostedZoneId *string `type:"string"` + + // Specifies the port that the database engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeEngineDefaultParameters +// action. +type EngineDefaults struct { + _ struct{} `type:"structure"` + + // Specifies the name of the DB parameter group family that the engine default + // parameters apply to. + DBParameterGroupFamily *string `type:"string"` + + // An optional pagination token provided by a previous EngineDefaults request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // Contains a list of engine default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s EngineDefaults) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EngineDefaults) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeEvents action. +type Event struct { + _ struct{} `type:"structure"` + + // Specifies the date and time of the event. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the category for the event. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Provides the text of this event. + Message *string `type:"string"` + + // Provides the identifier for the source of the event. + SourceIdentifier *string `type:"string"` + + // Specifies the source type for this event. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// Contains the results of a successful invocation of the DescribeEventCategories +// action. +type EventCategoriesMap struct { + _ struct{} `type:"structure"` + + // The event categories for the specified source type + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The source type that the returned categories belong to + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s EventCategoriesMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventCategoriesMap) GoString() string { + return s.String() +} + +// Contains the results of a successful invocation of the DescribeEventSubscriptions +// action. +type EventSubscription struct { + _ struct{} `type:"structure"` + + // The RDS event notification subscription Id. + CustSubscriptionId *string `type:"string"` + + // The AWS customer account associated with the RDS event notification subscription. + CustomerAwsId *string `type:"string"` + + // A Boolean value indicating if the subscription is enabled. True indicates + // the subscription is enabled. + Enabled *bool `type:"boolean"` + + // A list of event categories for the RDS event notification subscription. + EventCategoriesList []*string `locationNameList:"EventCategory" type:"list"` + + // The topic ARN of the RDS event notification subscription. + SnsTopicArn *string `type:"string"` + + // A list of source IDs for the RDS event notification subscription. + SourceIdsList []*string `locationNameList:"SourceId" type:"list"` + + // The source type for the RDS event notification subscription. + SourceType *string `type:"string"` + + // The status of the RDS event notification subscription. + // + // Constraints: + // + // Can be one of the following: creating | modifying | deleting | active | + // no-permission | topic-not-exist + // + // The status "no-permission" indicates that RDS no longer has permission to + // post to the SNS topic. The status "topic-not-exist" indicates that the topic + // was deleted after the subscription was created. + Status *string `type:"string"` + + // The time the RDS event notification subscription was created. + SubscriptionCreationTime *string `type:"string"` +} + +// String returns the string representation +func (s EventSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSubscription) GoString() string { + return s.String() +} + +type FailoverDBClusterInput struct { + _ struct{} `type:"structure"` + + // A DB cluster identifier to force a failover for. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s FailoverDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverDBClusterInput) GoString() string { + return s.String() +} + +type FailoverDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s FailoverDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverDBClusterOutput) GoString() string { + return s.String() +} + +type Filter struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Name *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Values []*string `locationNameList:"Value" type:"list" required:"true"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeDBSecurityGroups +// action. +type IPRange struct { + _ struct{} `type:"structure"` + + // Specifies the IP range. + CIDRIP *string `type:"string"` + + // Specifies the status of the IP range. Status can be "authorizing", "authorized", + // "revoking", and "revoked". + Status *string `type:"string"` +} + +// String returns the string representation +func (s IPRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPRange) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The Amazon RDS resource with tags to be listed. This value is an Amazon Resource + // Name (ARN). For information about creating an ARN, see Constructing an RDS + // Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // List of tags returned by the ListTagsForResource operation. + TagList []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type ModifyDBClusterInput struct { + _ struct{} `type:"structure"` + + // A value that specifies whether the modifications in this request and any + // pending modifications are asynchronously applied as soon as possible, regardless + // of the PreferredMaintenanceWindow setting for the DB cluster. + // + // If this parameter is set to false, changes to the DB cluster are applied + // during the next maintenance window. + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // The number of days for which automated backups are retained. You must specify + // a minimum value of 1. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 1 to 35 + BackupRetentionPeriod *int64 `type:"integer"` + + // The DB cluster identifier for the cluster being modified. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must be the identifier for an existing DB cluster. Must contain from 1 + // to 63 alphanumeric characters or hyphens. First character must be a letter. + // Cannot end with a hyphen or contain two consecutive hyphens. + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB cluster parameter group to use for the DB cluster. + DBClusterParameterGroupName *string `type:"string"` + + // The new password for the master database user. This password can contain + // any printable ASCII character except "/", """, or "@". + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string"` + + // The new DB cluster identifier for the DB cluster when renaming a DB cluster. + // This value is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Example: my-cluster2 + NewDBClusterIdentifier *string `type:"string"` + + // A value that indicates that the DB cluster should be associated with the + // specified option group. Changing this parameter does not result in an outage + // except in the following case, and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // If the parameter change results in an option group that enables OEM, this + // change can cause a brief (sub-second) period during which new connections + // are rejected but existing connections are not interrupted. + // + // Permanent options cannot be removed from an option group. The option group + // cannot be removed from a DB cluster once it is associated with a DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // A lst of VPC security groups that the DB cluster will belong to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterInput) GoString() string { + return s.String() +} + +type ModifyDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterOutput) GoString() string { + return s.String() +} + +type ModifyDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group to modify. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // A list of parameters in the DB cluster parameter group to modify. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type ModifyDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The new storage capacity of the RDS instance. Changing this setting does + // not result in an outage and the change is applied during the next maintenance + // window unless ApplyImmediately is set to true for this request. + // + // MySQL + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // MariaDB + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // PostgreSQL + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // Oracle + // + // Default: Uses existing setting + // + // Valid Values: 10-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // SQL Server + // + // Cannot be modified. + // + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance will be available for use, but might experience performance + // degradation. While the migration takes place, nightly backups for the instance + // will be suspended. No other Amazon RDS operations can take place for the + // instance, including modifying the instance, rebooting the instance, deleting + // the instance, creating a Read Replica for the instance, and creating a DB + // snapshot of the instance. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that major version upgrades are allowed. Changing this parameter + // does not result in an outage and the change is asynchronously applied as + // soon as possible. + // + // Constraints: This parameter must be set to true when specifying a value + // for the EngineVersion parameter that is a different major version than the + // DB instance's current version. + AllowMajorVersionUpgrade *bool `type:"boolean"` + + // Specifies whether the modifications in this request and any pending modifications + // are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow + // setting for the DB instance. + // + // If this parameter is set to false, changes to the DB instance are applied + // during the next maintenance window. Some parameter changes can cause an outage + // and will be applied on the next call to RebootDBInstance, or the next failure + // reboot. Review the table of parameters in Modifying a DB Instance and Using + // the Apply Immediately Parameter (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) + // to see the impact that setting ApplyImmediately to true or false has for + // each modified parameter and to determine when the changes will be applied. + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. Changing this parameter does not + // result in an outage except in the following case and the change is asynchronously + // applied as soon as possible. An outage will result if this parameter is set + // to true during the maintenance window, and a newer minor version is available, + // and RDS has enabled auto patching for that engine version. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The number of days to retain automated backups. Setting this parameter to + // a positive number enables backups. Setting this parameter to 0 disables automated + // backups. + // + // Changing this parameter can result in an outage if you change from 0 to + // a non-zero value or from a non-zero value to 0. These changes are applied + // during the next maintenance window unless the ApplyImmediately parameter + // is set to true for this request. If you change the parameter from one non-zero + // value to another non-zero value, the change is asynchronously applied as + // soon as possible. + // + // Default: Uses existing setting + // + // Constraints: + // + // Must be a value from 0 to 35 Can be specified for a MySQL Read Replica + // only if the source is running MySQL 5.6 Can be specified for a PostgreSQL + // Read Replica only if the source is running PostgreSQL 9.3.5 Cannot be set + // to 0 if the DB instance is a source to Read Replicas + BackupRetentionPeriod *int64 `type:"integer"` + + // Indicates the certificate that needs to be associated with the instance. + CACertificateIdentifier *string `type:"string"` + + // True to copy all tags from the DB instance to snapshots of the DB instance; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The new compute and memory capacity of the DB instance. To determine the + // instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions + // action. + // + // Passing a value for this setting causes an outage during the change and + // is applied during the next maintenance window, unless ApplyImmediately is + // specified as true for this request. + // + // Default: Uses existing setting + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large + // | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge + // | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge + // | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium + // | db.t2.large + DBInstanceClass *string `type:"string"` + + // The DB instance identifier. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be the identifier for an existing DB instance Must contain from 1 + // to 63 alphanumeric characters or hyphens First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The name of the DB parameter group to apply to the DB instance. Changing + // this setting does not result in an outage. The parameter group name itself + // is changed immediately, but the actual parameter changes are not applied + // until you reboot the instance without failover. The db instance will NOT + // be rebooted automatically and the parameter changes will NOT be applied during + // the next maintenance window. + // + // Default: Uses existing setting + // + // Constraints: The DB parameter group must be in the same DB parameter group + // family as this DB instance. + DBParameterGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // The value of the DBPortNumber parameter must not match any of the port values + // specified for options in the option group for the DB instance. + // + // Your database will restart when you change the DBPortNumber value regardless + // of the value of the ApplyImmediately parameter. + // + // MySQL + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // MariaDB + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // PostgreSQL + // + // Default: 5432 + // + // Valid Values: 1150-65535 + // + // Oracle + // + // Default: 1521 + // + // Valid Values: 1150-65535 + // + // SQL Server + // + // Default: 1433 + // + // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 + // through 49156. + // + // Amazon Aurora + // + // Default: 3306 + // + // Valid Values: 1150-65535 + DBPortNumber *int64 `type:"integer"` + + // A list of DB security groups to authorize on this DB instance. Changing this + // setting does not result in an outage and the change is asynchronously applied + // as soon as possible. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // The version number of the database engine to upgrade to. Changing this parameter + // results in an outage and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // + // For major version upgrades, if a non-default DB parameter group is currently + // in use, a new DB parameter group in the DB parameter group family for the + // new engine version must be specified. The new DB parameter group can be the + // default for that DB parameter group family. + // + // For a list of valid engine versions, see CreateDBInstance. + EngineVersion *string `type:"string"` + + // The new Provisioned IOPS (I/O operations per second) value for the RDS instance. + // Changing this setting does not result in an outage and the change is applied + // during the next maintenance window unless the ApplyImmediately parameter + // is set to true for this request. + // + // Default: Uses existing setting + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. If you are + // migrating from Provisioned IOPS to standard storage, set this value to 0. + // The DB instance will require a reboot for the change in storage type to take + // effect. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + // + // Type: Integer + // + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance will be available for use, but might experience performance + // degradation. While the migration takes place, nightly backups for the instance + // will be suspended. No other Amazon RDS operations can take place for the + // instance, including modifying the instance, rebooting the instance, deleting + // the instance, creating a Read Replica for the instance, and creating a DB + // snapshot of the instance. + Iops *int64 `type:"integer"` + + // The new password for the DB instance master user. Can be any printable ASCII + // character except "/", """, or "@". + // + // Changing this parameter does not result in an outage and the change is + // asynchronously applied as soon as possible. Between the time of the request + // and the completion of the request, the MasterUserPassword element exists + // in the PendingModifiedValues element of the operation response. + // + // Default: Uses existing setting + // + // Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and + // Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric + // characters (SQL Server). + // + // Amazon RDS API actions never return the password, so this action provides + // a way to regain access to a primary instance user if the password is lost. + // This includes restoring privileges that might have been accidentally revoked. + MasterUserPassword *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 60. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter + // does not result in an outage and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // + // Constraints: Cannot be specified if the DB instance is a Read Replica. This + // parameter cannot be used with SQL Server DB instances. Multi-AZ for SQL Server + // DB instances is set using the Mirroring option in an option group associated + // with the DB instance. + MultiAZ *bool `type:"boolean"` + + // The new DB instance identifier for the DB instance when renaming a DB instance. + // When you change the DB instance identifier, an instance reboot will occur + // immediately if you set Apply Immediately to true, or will occur during the + // next maintenance window if Apply Immediately to false. This value is stored + // as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + NewDBInstanceIdentifier *string `type:"string"` + + // Indicates that the DB instance should be associated with the specified option + // group. Changing this parameter does not result in an outage except in the + // following case and the change is applied during the next maintenance window + // unless the ApplyImmediately parameter is set to true for this request. If + // the parameter change results in an option group that enables OEM, this change + // can cause a brief (sub-second) period during which new connections are rejected + // but existing connections are not interrupted. + // + // Permanent options, such as the TDE option for Oracle Advanced Security + // TDE, cannot be removed from an option group, and that option group cannot + // be removed from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, as determined by the BackupRetentionPeriod parameter. + // Changing this parameter does not result in an outage and the change is asynchronously + // applied as soon as possible. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi Times should be in Universal Time + // Coordinated (UTC) Must not conflict with the preferred maintenance window + // Must be at least 30 minutes + PreferredBackupWindow *string `type:"string"` + + // The weekly time range (in UTC) during which system maintenance can occur, + // which might result in an outage. Changing this parameter does not result + // in an outage, except in the following situation, and the change is asynchronously + // applied as soon as possible. If there are pending actions that cause a reboot, + // and the maintenance window is changed to include the current time, then changing + // this parameter will cause a reboot of the DB instance. If moving this window + // to the current time, there must be at least 30 minutes between the current + // time and end of the window to ensure pending changes are applied. + // + // Default: Uses existing setting + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Must be at least 30 minutes + PreferredMaintenanceWindow *string `type:"string"` + + // True to make the DB instance Internet-facing with a publicly resolvable DNS + // name, which resolves to a public IP address. False to make the DB instance + // internal with a DNS name that resolves to a private IP address. + // + // PubliclyAccessible only applies to DB instances in a VPC. The DB instance + // must be part of a public subnet and PubliclyAccessible must be true in order + // for it to be publicly accessible. + // + // Changes to the PubliclyAccessible parameter are applied immediately regardless + // of the value of the ApplyImmediately parameter. + // + // Default: false + PubliclyAccessible *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // A list of EC2 VPC security groups to authorize on this DB instance. This + // change is asynchronously applied as soon as possible. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBInstanceInput) GoString() string { + return s.String() +} + +type ModifyDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBInstanceOutput) GoString() string { + return s.String() +} + +type ModifyDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be the name of an existing DB parameter group Must be 1 to 255 alphanumeric + // characters First character must be a letter Cannot end with a hyphen or contain + // two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names, values, and the apply method for the parameter + // update. At least one parameter name, value, and apply method must be supplied; + // subsequent arguments are optional. A maximum of 20 parameters can be modified + // in a single request. + // + // Valid Values (for the application method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when you reboot the DB instance without failover. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBParameterGroupInput) GoString() string { + return s.String() +} + +type ModifyDBSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the DB snapshot attribute to modify. + // + // To manage authorization for other AWS accounts to copy or restore a manual + // DB snapshot, this value is restore. + AttributeName *string `type:"string"` + + // The identifier for the DB snapshot to modify the attributes for. + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of DB snapshot attributes to add to the attribute specified by AttributeName. + // + // To authorize other AWS Accounts to copy or restore a manual snapshot, this + // is one or more AWS account identifiers, or all to make the manual DB snapshot + // restorable by any AWS account. Do not add the all value for any manual DB + // snapshots that contain private information that you do not want to be available + // to all AWS accounts. + ValuesToAdd []*string `locationNameList:"AttributeValue" type:"list"` + + // A list of DB snapshot attributes to remove from the attribute specified by + // AttributeName. + // + // To remove authorization for other AWS Accounts to copy or restore a manual + // snapshot, this is one or more AWS account identifiers, or all to remove authorization + // for any AWS account to copy or restore the DB snapshot. If you specify all, + // AWS accounts that have their account identifier explicitly added to the restore + // attribute can still copy or restore the manual DB snapshot. + ValuesToRemove []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s ModifyDBSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSnapshotAttributeInput) GoString() string { + return s.String() +} + +type ModifyDBSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBSnapshotAttributes + // API. + // + // Manual DB snapshot attributes are used to authorize other AWS accounts to + // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute + // API. + DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type ModifyDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB subnet group. + DBSubnetGroupDescription *string `type:"string"` + + // The name for the DB subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // Must not be "Default". + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 subnet IDs for the DB subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSubnetGroupInput) GoString() string { + return s.String() +} + +type ModifyDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup + // This data type is used as a response element in the DescribeDBSubnetGroups + // action. + DBSubnetGroup *DBSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription. + Enabled *bool `type:"boolean"` + + // A list of event categories for a SourceType that you want to subscribe to. + // You can see a list of the categories for a given SourceType in the Events + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // topic in the Amazon RDS User Guide or by using the DescribeEventCategories + // action. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The Amazon Resource Name (ARN) of the SNS topic created for event notification. + // The ARN is created by Amazon SNS when you create a topic and subscribe to + // it. + SnsTopicArn *string `type:"string"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a DB instance, you would set this + // parameter to db-instance. if this value is not specified, all events are + // returned. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` + + // The name of the RDS event notification subscription. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionInput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionOutput) GoString() string { + return s.String() +} + +type ModifyOptionGroupInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the changes should be applied immediately, or during the + // next maintenance window for each instance associated with the option group. + ApplyImmediately *bool `type:"boolean"` + + // The name of the option group to be modified. + // + // Permanent options, such as the TDE option for Oracle Advanced Security + // TDE, cannot be removed from an option group, and that option group cannot + // be removed from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string" required:"true"` + + // Options in this list are added to the option group or, if already present, + // the specified configuration is used to update the existing configuration. + OptionsToInclude []*OptionConfiguration `locationNameList:"OptionConfiguration" type:"list"` + + // Options in this list are removed from the option group. + OptionsToRemove []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyOptionGroupInput) GoString() string { + return s.String() +} + +type ModifyOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyOptionGroupOutput) GoString() string { + return s.String() +} + +// Option details. +type Option struct { + _ struct{} `type:"structure"` + + // If the option requires access to a port, then this DB security group allows + // access to the port. + DBSecurityGroupMemberships []*DBSecurityGroupMembership `locationNameList:"DBSecurityGroup" type:"list"` + + // The description of the option. + OptionDescription *string `type:"string"` + + // The name of the option. + OptionName *string `type:"string"` + + // The option settings for this option. + OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + + // Indicate if this option is permanent. + Permanent *bool `type:"boolean"` + + // Indicate if this option is persistent. + Persistent *bool `type:"boolean"` + + // If required, the port configured for this option to use. + Port *int64 `type:"integer"` + + // If the option requires access to a port, then this VPC security group allows + // access to the port. + VpcSecurityGroupMemberships []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s Option) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Option) GoString() string { + return s.String() +} + +// A list of all available options +type OptionConfiguration struct { + _ struct{} `type:"structure"` + + // A list of DBSecurityGroupMemebrship name strings used for this option. + DBSecurityGroupMemberships []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // The configuration of options to include in a group. + OptionName *string `type:"string" required:"true"` + + // The option settings to include in an option group. + OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + + // The optional port for the option. + Port *int64 `type:"integer"` + + // A list of VpcSecurityGroupMemebrship name strings used for this option. + VpcSecurityGroupMemberships []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s OptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionConfiguration) GoString() string { + return s.String() +} + +type OptionGroup struct { + _ struct{} `type:"structure"` + + // Indicates whether this option group can be applied to both VPC and non-VPC + // instances. The value true indicates the option group can be applied to both + // VPC and non-VPC instances. + AllowsVpcAndNonVpcInstanceMemberships *bool `type:"boolean"` + + // Indicates the name of the engine that this option group can be applied to. + EngineName *string `type:"string"` + + // Indicates the major engine version associated with this option group. + MajorEngineVersion *string `type:"string"` + + // Provides a description of the option group. + OptionGroupDescription *string `type:"string"` + + // Specifies the name of the option group. + OptionGroupName *string `type:"string"` + + // Indicates what options are available in the option group. + Options []*Option `locationNameList:"Option" type:"list"` + + // If AllowsVpcAndNonVpcInstanceMemberships is false, this field is blank. If + // AllowsVpcAndNonVpcInstanceMemberships is true and this field is blank, then + // this option group can be applied to both VPC and non-VPC instances. If this + // field contains a value, then this option group can only be applied to instances + // that are in the VPC indicated by this field. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroup) GoString() string { + return s.String() +} + +// Provides information on the option groups the DB instance is a member of. +type OptionGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the option group that the instance belongs to. + OptionGroupName *string `type:"string"` + + // The status of the DB instance's option group membership (e.g. in-sync, pending, + // pending-maintenance, applying). + Status *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupMembership) GoString() string { + return s.String() +} + +// Available option. +type OptionGroupOption struct { + _ struct{} `type:"structure"` + + // If the option requires a port, specifies the default port for the option. + DefaultPort *int64 `type:"integer"` + + // The description of the option. + Description *string `type:"string"` + + // The name of the engine that this option can be applied to. + EngineName *string `type:"string"` + + // Indicates the major engine version that the option is available for. + MajorEngineVersion *string `type:"string"` + + // The minimum required engine version for the option to be applied. + MinimumRequiredMinorEngineVersion *string `type:"string"` + + // The name of the option. + Name *string `type:"string"` + + // Specifies the option settings that are available (and the default value) + // for each option in an option group. + OptionGroupOptionSettings []*OptionGroupOptionSetting `locationNameList:"OptionGroupOptionSetting" type:"list"` + + // List of all options that are prerequisites for this option. + OptionsDependedOn []*string `locationNameList:"OptionName" type:"list"` + + // A permanent option cannot be removed from the option group once the option + // group is used, and it cannot be removed from the db instance after assigning + // an option group with this permanent option. + Permanent *bool `type:"boolean"` + + // A persistent option cannot be removed from the option group once the option + // group is used, but this option can be removed from the db instance while + // modifying the related data and assigning another option group without this + // option. + Persistent *bool `type:"boolean"` + + // Specifies whether the option requires a port. + PortRequired *bool `type:"boolean"` +} + +// String returns the string representation +func (s OptionGroupOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupOption) GoString() string { + return s.String() +} + +// Option group option settings are used to display settings available for each +// option with their default values and other information. These values are +// used with the DescribeOptionGroupOptions action. +type OptionGroupOptionSetting struct { + _ struct{} `type:"structure"` + + // Indicates the acceptable values for the option group option. + AllowedValues *string `type:"string"` + + // The DB engine specific parameter type for the option group option. + ApplyType *string `type:"string"` + + // The default value for the option group option. + DefaultValue *string `type:"string"` + + // Boolean value where true indicates that this option group option can be changed + // from the default value. + IsModifiable *bool `type:"boolean"` + + // The description of the option group option. + SettingDescription *string `type:"string"` + + // The name of the option group option. + SettingName *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroupOptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupOptionSetting) GoString() string { + return s.String() +} + +// Option settings are the actual settings being applied or configured for that +// option. It is used when you modify an option group or describe option groups. +// For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER +// that can have several different values. +type OptionSetting struct { + _ struct{} `type:"structure"` + + // The allowed values of the option setting. + AllowedValues *string `type:"string"` + + // The DB engine specific parameter type. + ApplyType *string `type:"string"` + + // The data type of the option setting. + DataType *string `type:"string"` + + // The default value of the option setting. + DefaultValue *string `type:"string"` + + // The description of the option setting. + Description *string `type:"string"` + + // Indicates if the option setting is part of a collection. + IsCollection *bool `type:"boolean"` + + // A Boolean value that, when true, indicates the option setting can be modified + // from the default. + IsModifiable *bool `type:"boolean"` + + // The name of the option that has settings that you can set. + Name *string `type:"string"` + + // The current value of the option setting. + Value *string `type:"string"` +} + +// String returns the string representation +func (s OptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionSetting) GoString() string { + return s.String() +} + +// Contains a list of available options for a DB instance +// +// This data type is used as a response element in the DescribeOrderableDBInstanceOptions +// action. +type OrderableDBInstanceOption struct { + _ struct{} `type:"structure"` + + // A list of Availability Zones for the orderable DB instance. + AvailabilityZones []*AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` + + // The DB instance class for the orderable DB instance. + DBInstanceClass *string `type:"string"` + + // The engine type of the orderable DB instance. + Engine *string `type:"string"` + + // The engine version of the orderable DB instance. + EngineVersion *string `type:"string"` + + // The license model for the orderable DB instance. + LicenseModel *string `type:"string"` + + // Indicates whether this orderable DB instance is multi-AZ capable. + MultiAZCapable *bool `type:"boolean"` + + // Indicates whether this orderable DB instance can have a Read Replica. + ReadReplicaCapable *bool `type:"boolean"` + + // Indicates the storage type for this orderable DB instance. + StorageType *string `type:"string"` + + // Indicates whether the DB instance supports enhanced monitoring at intervals + // from 1 to 60 seconds. + SupportsEnhancedMonitoring *bool `type:"boolean"` + + // Indicates whether this orderable DB instance supports provisioned IOPS. + SupportsIops *bool `type:"boolean"` + + // Indicates whether this orderable DB instance supports encrypted storage. + SupportsStorageEncryption *bool `type:"boolean"` + + // Indicates whether this is a VPC orderable DB instance. + Vpc *bool `type:"boolean"` +} + +// String returns the string representation +func (s OrderableDBInstanceOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrderableDBInstanceOption) GoString() string { + return s.String() +} + +// This data type is used as a request parameter in the ModifyDBParameterGroup +// and ResetDBParameterGroup actions. +// +// This data type is used as a response element in the DescribeEngineDefaultParameters +// and DescribeDBParameters actions. +type Parameter struct { + _ struct{} `type:"structure"` + + // Specifies the valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // Indicates when to apply parameter updates. + ApplyMethod *string `type:"string" enum:"ApplyMethod"` + + // Specifies the engine specific parameters type. + ApplyType *string `type:"string"` + + // Specifies the valid data type for the parameter. + DataType *string `type:"string"` + + // Provides a description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // Specifies the name of the parameter. + ParameterName *string `type:"string"` + + // Specifies the value of the parameter. + ParameterValue *string `type:"string"` + + // Indicates the source of the parameter value. + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Provides information about a pending maintenance action for a resource. +type PendingMaintenanceAction struct { + _ struct{} `type:"structure"` + + // The type of pending maintenance action that is available for the resource. + Action *string `type:"string"` + + // The date of the maintenance window when the action will be applied. The maintenance + // action will be applied to the resource during its first maintenance window + // after this date. If this date is specified, any next-maintenance opt-in requests + // are ignored. + AutoAppliedAfterDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The effective date when the pending maintenance action will be applied to + // the resource. This date takes into account opt-in requests received from + // the ApplyPendingMaintenanceAction API, the AutoAppliedAfterDate, and the + // ForcedApplyDate. This value is blank if an opt-in request has not been received + // and nothing has been specified as AutoAppliedAfterDate or ForcedApplyDate. + CurrentApplyDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A description providing more detail about the maintenance action. + Description *string `type:"string"` + + // The date when the maintenance action will be automatically applied. The maintenance + // action will be applied to the resource on this date regardless of the maintenance + // window for the resource. If this date is specified, any immediate opt-in + // requests are ignored. + ForcedApplyDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the type of opt-in request that has been received for the resource. + OptInStatus *string `type:"string"` +} + +// String returns the string representation +func (s PendingMaintenanceAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingMaintenanceAction) GoString() string { + return s.String() +} + +// This data type is used as a response element in the ModifyDBInstance action. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // Contains the new AllocatedStorage size for the DB instance that will be applied + // or is in progress. + AllocatedStorage *int64 `type:"integer"` + + // Specifies the pending number of days for which automated backups are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // Specifies the identifier of the CA certificate for the DB instance. + CACertificateIdentifier *string `type:"string"` + + // Contains the new DBInstanceClass for the DB instance that will be applied + // or is in progress. + DBInstanceClass *string `type:"string"` + + // Contains the new DBInstanceIdentifier for the DB instance that will be applied + // or is in progress. + DBInstanceIdentifier *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // Specifies the new Provisioned IOPS value for the DB instance that will be + // applied or is being applied. + Iops *int64 `type:"integer"` + + // Contains the pending or in-progress change of the master credentials for + // the DB instance. + MasterUserPassword *string `type:"string"` + + // Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment. + MultiAZ *bool `type:"boolean"` + + // Specifies the pending port for the DB instance. + Port *int64 `type:"integer"` + + // Specifies the storage type to be associated with the DB instance. + StorageType *string `type:"string"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +type PromoteReadReplicaInput struct { + _ struct{} `type:"structure"` + + // The number of days to retain automated backups. Setting this parameter to + // a positive number enables backups. Setting this parameter to 0 disables automated + // backups. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 0 to 8 + BackupRetentionPeriod *int64 `type:"integer"` + + // The DB instance identifier. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be the identifier for an existing Read Replica DB instance Must contain + // from 1 to 63 alphanumeric characters or hyphens First character must be a + // letter Cannot end with a hyphen or contain two consecutive hyphens Example: + // mydbinstance + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` +} + +// String returns the string representation +func (s PromoteReadReplicaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaInput) GoString() string { + return s.String() +} + +type PromoteReadReplicaOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s PromoteReadReplicaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaOutput) GoString() string { + return s.String() +} + +type PurchaseReservedDBInstancesOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of instances to reserve. + // + // Default: 1 + DBInstanceCount *int64 `type:"integer"` + + // Customer-specified identifier to track this reservation. + // + // Example: myreservationID + ReservedDBInstanceId *string `type:"string"` + + // The ID of the Reserved DB instance offering to purchase. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedDBInstancesOfferingId *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s PurchaseReservedDBInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedDBInstancesOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedDBInstancesOfferingOutput struct { + _ struct{} `type:"structure"` + + // This data type is used as a response element in the DescribeReservedDBInstances + // and PurchaseReservedDBInstancesOffering actions. + ReservedDBInstance *ReservedDBInstance `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedDBInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedDBInstancesOfferingOutput) GoString() string { + return s.String() +} + +type RebootDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // When true, the reboot will be conducted through a MultiAZ failover. + // + // Constraint: You cannot specify true if the instance is not configured for + // MultiAZ. + ForceFailover *bool `type:"boolean"` +} + +// String returns the string representation +func (s RebootDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootDBInstanceInput) GoString() string { + return s.String() +} + +type RebootDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RebootDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootDBInstanceOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstances +// and DescribeReservedDBInstancesOfferings actions. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount of the recurring charge. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency of the recurring charge. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +type RemoveSourceIdentifierFromSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The source identifier to be removed from the subscription, such as the DB + // instance identifier for a DB instance or the name of a security group. + SourceIdentifier *string `type:"string" required:"true"` + + // The name of the RDS event notification subscription you want to remove a + // source identifier from. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionInput) GoString() string { + return s.String() +} + +type RemoveSourceIdentifierFromSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionOutput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS resource the tags will be removed from. This value is an Amazon + // Resource Name (ARN). For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` + + // The tag key (name) of the tag to be removed. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstances +// and PurchaseReservedDBInstancesOffering actions. +type ReservedDBInstance struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved DB instance. + CurrencyCode *string `type:"string"` + + // The DB instance class for the reserved DB instance. + DBInstanceClass *string `type:"string"` + + // The number of reserved DB instances. + DBInstanceCount *int64 `type:"integer"` + + // The duration of the reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this reserved DB instance. + FixedPrice *float64 `type:"double"` + + // Indicates if the reservation applies to Multi-AZ deployments. + MultiAZ *bool `type:"boolean"` + + // The offering type of this reserved DB instance. + OfferingType *string `type:"string"` + + // The description of the reserved DB instance. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved DB instance. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedDBInstanceId *string `type:"string"` + + // The offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` + + // The time the reservation started. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved DB instance. + State *string `type:"string"` + + // The hourly price charged for this reserved DB instance. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedDBInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedDBInstance) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstancesOfferings +// action. +type ReservedDBInstancesOffering struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved DB instance offering. + CurrencyCode *string `type:"string"` + + // The DB instance class for the reserved DB instance. + DBInstanceClass *string `type:"string"` + + // The duration of the offering in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this offering. + FixedPrice *float64 `type:"double"` + + // Indicates if the offering applies to Multi-AZ deployments. + MultiAZ *bool `type:"boolean"` + + // The offering type. + OfferingType *string `type:"string"` + + // The database engine used by the offering. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved DB instance. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` + + // The hourly price charged for this offering. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedDBInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedDBInstancesOffering) GoString() string { + return s.String() +} + +type ResetDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group to reset. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // A list of parameter names in the DB cluster parameter group to reset to the + // default values. You cannot use this parameter if the ResetAllParameters parameter + // is set to true. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // A value that is set to true to reset all parameters in the DB cluster parameter + // group to their default values, and false otherwise. You cannot use this parameter + // if there is a list of parameter names specified for the Parameters parameter. + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type ResetDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names, values, and the apply method for the parameter + // update. At least one parameter name, value, and apply method must be supplied; + // subsequent arguments are optional. A maximum of 20 parameters can be modified + // in a single request. + // + // MySQL + // + // Valid Values (for Apply method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when DB instance reboots. + // + // MariaDB + // + // Valid Values (for Apply method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when DB instance reboots. + // + // Oracle + // + // Valid Values (for Apply method): pending-reboot + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // Specifies whether (true) or not (false) to reset all parameters in the DB + // parameter group to default values. + // + // Default: true + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDBParameterGroupInput) GoString() string { + return s.String() +} + +// Describes the pending maintenance actions for a resource. +type ResourcePendingMaintenanceActions struct { + _ struct{} `type:"structure"` + + // A list that provides details about the pending maintenance actions for the + // resource. + PendingMaintenanceActionDetails []*PendingMaintenanceAction `locationNameList:"PendingMaintenanceAction" type:"list"` + + // The ARN of the resource that has pending maintenance actions. + ResourceIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s ResourcePendingMaintenanceActions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourcePendingMaintenanceActions) GoString() string { + return s.String() +} + +type RestoreDBClusterFromSnapshotInput struct { + _ struct{} `type:"structure"` + + // Provides the list of EC2 Availability Zones that instances in the restored + // DB cluster can be created in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // The name of the DB cluster to create from the DB cluster snapshot. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 255 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Example: my-snapshot-id + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB subnet group to use for the new DB cluster. + DBSubnetGroupName *string `type:"string"` + + // The database name for the restored DB cluster. + DatabaseName *string `type:"string"` + + // The database engine to use for the new DB cluster. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + Engine *string `type:"string" required:"true"` + + // The version of the database engine to use for the new DB cluster. + EngineVersion *string `type:"string"` + + // The KMS key identifier to use when restoring an encrypted DB cluster from + // an encrypted DB cluster snapshot. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are restoring a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KMS encryption key. + // + // If you do not specify a value for the KmsKeyId parameter, then the following + // will occur: + // + // If the DB cluster snapshot is encrypted, then the restored DB cluster is + // encrypted using the KMS key that was used to encrypt the DB cluster snapshot. + // If the DB cluster snapshot is not encrypted, then the restored DB cluster + // is not encrypted. If SnapshotIdentifier refers to a DB cluster snapshot + // that is not encrypted, and you specify a value for the KmsKeyId parameter, + // then the restore request is rejected. + KmsKeyId *string `type:"string"` + + // The name of the option group to use for the restored DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the new DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The identifier for the DB cluster snapshot to restore from. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + SnapshotIdentifier *string `type:"string" required:"true"` + + // The tags to be assigned to the restored DB cluster. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of VPC security groups that the new DB cluster will belong to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreDBClusterFromSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromSnapshotInput) GoString() string { + return s.String() +} + +type RestoreDBClusterFromSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBClusterFromSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromSnapshotOutput) GoString() string { + return s.String() +} + +type RestoreDBClusterToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // The name of the new DB cluster to be created. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string" required:"true"` + + // The DB subnet group name to use for the new DB cluster. + DBSubnetGroupName *string `type:"string"` + + // The KMS key identifier to use when restoring an encrypted DB cluster from + // an encrypted DB cluster. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are restoring a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KMS encryption key. + // + // You can restore to a new DB cluster and encrypt the new DB cluster with + // a KMS key that is different than the KMS key used to encrypt the source DB + // cluster. The new DB cluster will be encrypted with the KMS key identified + // by the KmsKeyId parameter. + // + // If you do not specify a value for the KmsKeyId parameter, then the following + // will occur: + // + // If the DB cluster is encrypted, then the restored DB cluster is encrypted + // using the KMS key that was used to encrypt the source DB cluster. If the + // DB cluster is not encrypted, then the restored DB cluster is not encrypted. + // If DBClusterIdentifier refers to a DB cluster that is note encrypted, then + // the restore request is rejected. + KmsKeyId *string `type:"string"` + + // The name of the option group for the new DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the new DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The date and time to restore the DB cluster to. + // + // Valid Values: Value must be a time in Universal Coordinated Time (UTC) format + // + // Constraints: + // + // Must be before the latest restorable time for the DB instance Cannot be + // specified if UseLatestRestorableTime parameter is true Example: 2015-03-07T23:45:00Z + RestoreToTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the source DB cluster from which to restore. + // + // Constraints: + // + // Must be the identifier of an existing database instance Must contain from + // 1 to 63 alphanumeric characters or hyphens First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + SourceDBClusterIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A value that is set to true to restore the DB cluster to the latest restorable + // backup time, and false otherwise. + // + // Default: false + // + // Constraints: Cannot be specified if RestoreToTime parameter is provided. + UseLatestRestorableTime *bool `type:"boolean"` + + // A lst of VPC security groups that the new DB cluster belongs to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreDBClusterToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterToPointInTimeInput) GoString() string { + return s.String() +} + +type RestoreDBClusterToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBClusterToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterToPointInTimeOutput) GoString() string { + return s.String() +} + +type RestoreDBInstanceFromDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // + // Default: A random, system-chosen Availability Zone. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the restored DB instance to snapshots of the DB + // instance; otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Amazon RDS DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge + // | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge + // | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge + // | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large + DBInstanceClass *string `type:"string"` + + // Name of the DB instance to create from the DB snapshot. This parameter isn't + // case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for + // SQL Server) First character must be a letter Cannot end with a hyphen or + // contain two consecutive hyphens Example: my-snapshot-id + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The database name for the restored DB instance. + // + // This parameter doesn't apply to the MySQL or MariaDB engines. + DBName *string `type:"string"` + + // The identifier for the DB snapshot to restore from. + // + // Constraints: + // + // Must contain from 1 to 255 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier + // must be the ARN of the shared DB snapshot. + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // The DB subnet group name to use for the new instance. + DBSubnetGroupName *string `type:"string"` + + // The database engine to use for the new instance. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + Engine *string `type:"string"` + + // Specifies the amount of provisioned IOPS for the DB instance, expressed in + // I/O operations per second. If this parameter is not specified, the IOPS value + // will be taken from the backup. If this parameter is set to 0, the new instance + // will be converted to a non-PIOPS instance, which will take additional time, + // though your DB instance will be available for connections before the conversion + // starts. + // + // Constraints: Must be an integer greater than 1000. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + Iops *int64 `type:"integer"` + + // License model information for the restored DB instance. + // + // Default: Same as source. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + MultiAZ *bool `type:"boolean"` + + // The name of the option group to be used for the restored DB instance. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // Default: The same port as the original DB instance + // + // Constraints: Value must be 1150-65535 + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC: true VPC: false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` +} + +// String returns the string representation +func (s RestoreDBInstanceFromDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceFromDBSnapshotInput) GoString() string { + return s.String() +} + +type RestoreDBInstanceFromDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBInstanceFromDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceFromDBSnapshotOutput) GoString() string { + return s.String() +} + +type RestoreDBInstanceToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // + // Default: A random, system-chosen Availability Zone. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the restored DB instance to snapshots of the DB + // instance; otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Amazon RDS DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge + // | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge + // | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge + // | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large + // + // Default: The same DBInstanceClass as the original DB instance. + DBInstanceClass *string `type:"string"` + + // The database name for the restored DB instance. + // + // This parameter is not used for the MySQL or MariaDB engines. + DBName *string `type:"string"` + + // The DB subnet group name to use for the new instance. + DBSubnetGroupName *string `type:"string"` + + // The database engine to use for the new instance. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres| aurora + Engine *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + // + // Constraints: Must be an integer greater than 1000. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + Iops *int64 `type:"integer"` + + // License model information for the restored DB instance. + // + // Default: Same as source. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + MultiAZ *bool `type:"boolean"` + + // The name of the option group to be used for the restored DB instance. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB instance. + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true VPC:false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // The date and time to restore from. + // + // Valid Values: Value must be a time in Universal Coordinated Time (UTC) format + // + // Constraints: + // + // Must be before the latest restorable time for the DB instance Cannot be + // specified if UseLatestRestorableTime parameter is true Example: 2009-09-07T23:45:00Z + RestoreTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the source DB instance from which to restore. + // + // Constraints: + // + // Must be the identifier of an existing database instance Must contain from + // 1 to 63 alphanumeric characters or hyphens First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + SourceDBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The name of the new database instance to be created. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + TargetDBInstanceIdentifier *string `type:"string" required:"true"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // Specifies whether (true) or not (false) the DB instance is restored from + // the latest backup time. + // + // Default: false + // + // Constraints: Cannot be specified if RestoreTime parameter is provided. + UseLatestRestorableTime *bool `type:"boolean"` +} + +// String returns the string representation +func (s RestoreDBInstanceToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceToPointInTimeInput) GoString() string { + return s.String() +} + +type RestoreDBInstanceToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBInstanceToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceToPointInTimeOutput) GoString() string { + return s.String() +} + +type RevokeDBSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP + // is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId + // cannot be provided. + CIDRIP *string `type:"string"` + + // The name of the DB security group to revoke ingress from. + DBSecurityGroupName *string `type:"string" required:"true"` + + // The id of the EC2 security group to revoke access from. For VPC DB security + // groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId + // and either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupId *string `type:"string"` + + // The name of the EC2 security group to revoke access from. For VPC DB security + // groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId + // and either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupName *string `type:"string"` + + // The AWS Account Number of the owner of the EC2 security group specified in + // the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, + // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId + // must be provided. + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s RevokeDBSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeDBSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeDBSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup + // RevokeDBSecurityGroupIngress This data type is used as a response element + // in the DescribeDBSecurityGroups action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeDBSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeDBSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeDBSubnetGroups +// action. +type Subnet struct { + _ struct{} `type:"structure"` + + // Contains Availability Zone information. + // + // This data type is used as an element in the following data type: OrderableDBInstanceOption + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // Specifies the identifier of the subnet. + SubnetIdentifier *string `type:"string"` + + // Specifies the status of the subnet. + SubnetStatus *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A key is the required name of the tag. The string value can be from 1 to + // 128 Unicode characters in length and cannot be prefixed with "aws:" or "rds:". + // The string can only contain only the set of Unicode letters, digits, white-space, + // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Key *string `type:"string"` + + // A value is the optional value of the tag. The string value can be from 1 + // to 256 Unicode characters in length and cannot be prefixed with "aws:" or + // "rds:". The string can only contain only the set of Unicode letters, digits, + // white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The version of the database engine that a DB instance can be upgraded to. +type UpgradeTarget struct { + _ struct{} `type:"structure"` + + // A value that indicates whether the target version will be applied to any + // source DB instances that have AutoMinorVersionUpgrade set to true. + AutoUpgrade *bool `type:"boolean"` + + // The version of the database engine that a DB instance can be upgraded to. + Description *string `type:"string"` + + // The name of the upgrade target database engine. + Engine *string `type:"string"` + + // The version number of the upgrade target database engine. + EngineVersion *string `type:"string"` + + // A value that indicates whether a database engine will be upgraded to a major + // version. + IsMajorVersionUpgrade *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpgradeTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpgradeTarget) GoString() string { + return s.String() +} + +// This data type is used as a response element for queries on VPC security +// group membership. +type VpcSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The status of the VPC security group. + Status *string `type:"string"` + + // The name of the VPC security group. + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s VpcSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcSecurityGroupMembership) GoString() string { + return s.String() +} + +const ( + // @enum ApplyMethod + ApplyMethodImmediate = "immediate" + // @enum ApplyMethod + ApplyMethodPendingReboot = "pending-reboot" +) + +const ( + // @enum SourceType + SourceTypeDbInstance = "db-instance" + // @enum SourceType + SourceTypeDbParameterGroup = "db-parameter-group" + // @enum SourceType + SourceTypeDbSecurityGroup = "db-security-group" + // @enum SourceType + SourceTypeDbSnapshot = "db-snapshot" + // @enum SourceType + SourceTypeDbCluster = "db-cluster" + // @enum SourceType + SourceTypeDbClusterSnapshot = "db-cluster-snapshot" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go new file mode 100644 index 000000000..64e5b9a62 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go @@ -0,0 +1,2340 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/rds" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRDS_AddSourceIdentifierToSubscription() { + svc := rds.New(session.New()) + + params := &rds.AddSourceIdentifierToSubscriptionInput{ + SourceIdentifier: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.AddSourceIdentifierToSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_AddTagsToResource() { + svc := rds.New(session.New()) + + params := &rds.AddTagsToResourceInput{ + ResourceName: aws.String("String"), // Required + Tags: []*rds.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ApplyPendingMaintenanceAction() { + svc := rds.New(session.New()) + + params := &rds.ApplyPendingMaintenanceActionInput{ + ApplyAction: aws.String("String"), // Required + OptInType: aws.String("String"), // Required + ResourceIdentifier: aws.String("String"), // Required + } + resp, err := svc.ApplyPendingMaintenanceAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_AuthorizeDBSecurityGroupIngress() { + svc := rds.New(session.New()) + + params := &rds.AuthorizeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupId: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.AuthorizeDBSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CopyDBClusterSnapshotInput{ + SourceDBClusterSnapshotIdentifier: aws.String("String"), // Required + TargetDBClusterSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CopyDBParameterGroupInput{ + SourceDBParameterGroupIdentifier: aws.String("String"), // Required + TargetDBParameterGroupDescription: aws.String("String"), // Required + TargetDBParameterGroupIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CopyDBSnapshotInput{ + SourceDBSnapshotIdentifier: aws.String("String"), // Required + TargetDBSnapshotIdentifier: aws.String("String"), // Required + CopyTags: aws.Bool(true), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.CopyOptionGroupInput{ + SourceOptionGroupIdentifier: aws.String("String"), // Required + TargetOptionGroupDescription: aws.String("String"), // Required + TargetOptionGroupIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBCluster() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + MasterUserPassword: aws.String("String"), // Required + MasterUsername: aws.String("String"), // Required + AvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + BackupRetentionPeriod: aws.Int64(1), + CharacterSetName: aws.String("String"), + DBClusterParameterGroupName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + DatabaseName: aws.String("String"), + EngineVersion: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + StorageEncrypted: aws.Bool(true), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + DBParameterGroupFamily: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterSnapshotInput{ + DBClusterIdentifier: aws.String("String"), // Required + DBClusterSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBInstance() { + svc := rds.New(session.New()) + + params := &rds.CreateDBInstanceInput{ + DBInstanceClass: aws.String("String"), // Required + DBInstanceIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + BackupRetentionPeriod: aws.Int64(1), + CharacterSetName: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBClusterIdentifier: aws.String("String"), + DBName: aws.String("String"), + DBParameterGroupName: aws.String("String"), + DBSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupName: aws.String("String"), + EngineVersion: aws.String("String"), + Iops: aws.Int64(1), + KmsKeyId: aws.String("String"), + LicenseModel: aws.String("String"), + MasterUserPassword: aws.String("String"), + MasterUsername: aws.String("String"), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + StorageEncrypted: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBInstanceReadReplica() { + svc := rds.New(session.New()) + + params := &rds.CreateDBInstanceReadReplicaInput{ + DBInstanceIdentifier: aws.String("String"), // Required + SourceDBInstanceIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Iops: aws.Int64(1), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBInstanceReadReplica(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBParameterGroupInput{ + DBParameterGroupFamily: aws.String("String"), // Required + DBParameterGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSecurityGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSecurityGroupInput{ + DBSecurityGroupDescription: aws.String("String"), // Required + DBSecurityGroupName: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSnapshotInput{ + DBInstanceIdentifier: aws.String("String"), // Required + DBSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSubnetGroupInput{ + DBSubnetGroupDescription: aws.String("String"), // Required + DBSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.CreateEventSubscriptionInput{ + SnsTopicArn: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateOptionGroupInput{ + EngineName: aws.String("String"), // Required + MajorEngineVersion: aws.String("String"), // Required + OptionGroupDescription: aws.String("String"), // Required + OptionGroupName: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBCluster() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + FinalDBSnapshotIdentifier: aws.String("String"), + SkipFinalSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterSnapshotInput{ + DBClusterSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBInstance() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + FinalDBSnapshotIdentifier: aws.String("String"), + SkipFinalSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSecurityGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSecurityGroupInput{ + DBSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSnapshotInput{ + DBSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSubnetGroupInput{ + DBSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.DeleteEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteOptionGroupInput{ + OptionGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeAccountAttributes() { + svc := rds.New(session.New()) + + var params *rds.DescribeAccountAttributesInput + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeCertificates() { + svc := rds.New(session.New()) + + params := &rds.DescribeCertificatesInput{ + CertificateIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterParameterGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterParameterGroupsInput{ + DBClusterParameterGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBClusterParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterParametersInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeDBClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterSnapshots() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterSnapshotsInput{ + DBClusterIdentifier: aws.String("String"), + DBClusterSnapshotIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotType: aws.String("String"), + } + resp, err := svc.DescribeDBClusterSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBEngineVersions() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBEngineVersionsInput{ + DBParameterGroupFamily: aws.String("String"), + DefaultOnly: aws.Bool(true), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ListSupportedCharacterSets: aws.Bool(true), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBEngineVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBInstances() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBLogFiles() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBLogFilesInput{ + DBInstanceIdentifier: aws.String("String"), // Required + FileLastWritten: aws.Int64(1), + FileSize: aws.Int64(1), + FilenameContains: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBLogFiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBParameterGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBParameterGroupsInput{ + DBParameterGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBParametersInput{ + DBParameterGroupName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeDBParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSecurityGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSecurityGroupsInput{ + DBSecurityGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSnapshotAttributes() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSnapshotAttributesInput{ + DBSnapshotIdentifier: aws.String("String"), + } + resp, err := svc.DescribeDBSnapshotAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSnapshots() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSnapshotsInput{ + DBInstanceIdentifier: aws.String("String"), + DBSnapshotIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludePublic: aws.Bool(true), + IncludeShared: aws.Bool(true), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotType: aws.String("String"), + } + resp, err := svc.DescribeDBSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSubnetGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSubnetGroupsInput{ + DBSubnetGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEngineDefaultClusterParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeEngineDefaultClusterParametersInput{ + DBParameterGroupFamily: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEngineDefaultParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeEngineDefaultParametersInput{ + DBParameterGroupFamily: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEventCategories() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventCategoriesInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SourceType: aws.String("String"), + } + resp, err := svc.DescribeEventCategories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEventSubscriptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventSubscriptionsInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SubscriptionName: aws.String("String"), + } + resp, err := svc.DescribeEventSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEvents() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOptionGroupOptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeOptionGroupOptionsInput{ + EngineName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MajorEngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeOptionGroupOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOptionGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeOptionGroupsInput{ + EngineName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MajorEngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OptionGroupName: aws.String("String"), + } + resp, err := svc.DescribeOptionGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOrderableDBInstanceOptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeOrderableDBInstanceOptionsInput{ + Engine: aws.String("String"), // Required + DBInstanceClass: aws.String("String"), + EngineVersion: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + LicenseModel: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Vpc: aws.Bool(true), + } + resp, err := svc.DescribeOrderableDBInstanceOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribePendingMaintenanceActions() { + svc := rds.New(session.New()) + + params := &rds.DescribePendingMaintenanceActionsInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ResourceIdentifier: aws.String("String"), + } + resp, err := svc.DescribePendingMaintenanceActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeReservedDBInstances() { + svc := rds.New(session.New()) + + params := &rds.DescribeReservedDBInstancesInput{ + DBInstanceClass: aws.String("String"), + Duration: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + MultiAZ: aws.Bool(true), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedDBInstanceId: aws.String("String"), + ReservedDBInstancesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedDBInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeReservedDBInstancesOfferings() { + svc := rds.New(session.New()) + + params := &rds.DescribeReservedDBInstancesOfferingsInput{ + DBInstanceClass: aws.String("String"), + Duration: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + MultiAZ: aws.Bool(true), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedDBInstancesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedDBInstancesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DownloadDBLogFilePortion() { + svc := rds.New(session.New()) + + params := &rds.DownloadDBLogFilePortionInput{ + DBInstanceIdentifier: aws.String("String"), // Required + LogFileName: aws.String("String"), // Required + Marker: aws.String("String"), + NumberOfLines: aws.Int64(1), + } + resp, err := svc.DownloadDBLogFilePortion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_FailoverDBCluster() { + svc := rds.New(session.New()) + + params := &rds.FailoverDBClusterInput{ + DBClusterIdentifier: aws.String("String"), + } + resp, err := svc.FailoverDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ListTagsForResource() { + svc := rds.New(session.New()) + + params := &rds.ListTagsForResourceInput{ + ResourceName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBCluster() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + BackupRetentionPeriod: aws.Int64(1), + DBClusterParameterGroupName: aws.String("String"), + MasterUserPassword: aws.String("String"), + NewDBClusterIdentifier: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBInstance() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AllowMajorVersionUpgrade: aws.Bool(true), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + BackupRetentionPeriod: aws.Int64(1), + CACertificateIdentifier: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBParameterGroupName: aws.String("String"), + DBPortNumber: aws.Int64(1), + DBSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + Iops: aws.Int64(1), + MasterUserPassword: aws.String("String"), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + MultiAZ: aws.Bool(true), + NewDBInstanceIdentifier: aws.String("String"), + OptionGroupName: aws.String("String"), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBSnapshotAttribute() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBSnapshotAttributeInput{ + DBSnapshotIdentifier: aws.String("String"), // Required + AttributeName: aws.String("String"), + ValuesToAdd: []*string{ + aws.String("String"), // Required + // More values... + }, + ValuesToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBSubnetGroupInput{ + DBSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupDescription: aws.String("String"), + } + resp, err := svc.ModifyDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.ModifyEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + SnsTopicArn: aws.String("String"), + SourceType: aws.String("String"), + } + resp, err := svc.ModifyEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyOptionGroupInput{ + OptionGroupName: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + OptionsToInclude: []*rds.OptionConfiguration{ + { // Required + OptionName: aws.String("String"), // Required + DBSecurityGroupMemberships: []*string{ + aws.String("String"), // Required + // More values... + }, + OptionSettings: []*rds.OptionSetting{ + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + DefaultValue: aws.String("String"), + Description: aws.String("String"), + IsCollection: aws.Bool(true), + IsModifiable: aws.Bool(true), + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Port: aws.Int64(1), + VpcSecurityGroupMemberships: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + OptionsToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_PromoteReadReplica() { + svc := rds.New(session.New()) + + params := &rds.PromoteReadReplicaInput{ + DBInstanceIdentifier: aws.String("String"), // Required + BackupRetentionPeriod: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + } + resp, err := svc.PromoteReadReplica(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_PurchaseReservedDBInstancesOffering() { + svc := rds.New(session.New()) + + params := &rds.PurchaseReservedDBInstancesOfferingInput{ + ReservedDBInstancesOfferingId: aws.String("String"), // Required + DBInstanceCount: aws.Int64(1), + ReservedDBInstanceId: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.PurchaseReservedDBInstancesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RebootDBInstance() { + svc := rds.New(session.New()) + + params := &rds.RebootDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + ForceFailover: aws.Bool(true), + } + resp, err := svc.RebootDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RemoveSourceIdentifierFromSubscription() { + svc := rds.New(session.New()) + + params := &rds.RemoveSourceIdentifierFromSubscriptionInput{ + SourceIdentifier: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.RemoveSourceIdentifierFromSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RemoveTagsFromResource() { + svc := rds.New(session.New()) + + params := &rds.RemoveTagsFromResourceInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ResetDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ResetDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ResetDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ResetDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBClusterFromSnapshot() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBClusterFromSnapshotInput{ + DBClusterIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + AvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupName: aws.String("String"), + DatabaseName: aws.String("String"), + EngineVersion: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreDBClusterFromSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBClusterToPointInTime() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBClusterToPointInTimeInput{ + DBClusterIdentifier: aws.String("String"), // Required + SourceDBClusterIdentifier: aws.String("String"), // Required + DBSubnetGroupName: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + RestoreToTime: aws.Time(time.Now()), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + UseLatestRestorableTime: aws.Bool(true), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreDBClusterToPointInTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBInstanceFromDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBInstanceFromDBSnapshotInput{ + DBInstanceIdentifier: aws.String("String"), // Required + DBSnapshotIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + Iops: aws.Int64(1), + LicenseModel: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + } + resp, err := svc.RestoreDBInstanceFromDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBInstanceToPointInTime() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBInstanceToPointInTimeInput{ + SourceDBInstanceIdentifier: aws.String("String"), // Required + TargetDBInstanceIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + Iops: aws.Int64(1), + LicenseModel: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + RestoreTime: aws.Time(time.Now()), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + UseLatestRestorableTime: aws.Bool(true), + } + resp, err := svc.RestoreDBInstanceToPointInTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RevokeDBSecurityGroupIngress() { + svc := rds.New(session.New()) + + params := &rds.RevokeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupId: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.RevokeDBSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go new file mode 100644 index 000000000..db350eeea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go @@ -0,0 +1,360 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package rdsiface provides an interface for the Amazon Relational Database Service. +package rdsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/rds" +) + +// RDSAPI is the interface type for rds.RDS. +type RDSAPI interface { + AddSourceIdentifierToSubscriptionRequest(*rds.AddSourceIdentifierToSubscriptionInput) (*request.Request, *rds.AddSourceIdentifierToSubscriptionOutput) + + AddSourceIdentifierToSubscription(*rds.AddSourceIdentifierToSubscriptionInput) (*rds.AddSourceIdentifierToSubscriptionOutput, error) + + AddTagsToResourceRequest(*rds.AddTagsToResourceInput) (*request.Request, *rds.AddTagsToResourceOutput) + + AddTagsToResource(*rds.AddTagsToResourceInput) (*rds.AddTagsToResourceOutput, error) + + ApplyPendingMaintenanceActionRequest(*rds.ApplyPendingMaintenanceActionInput) (*request.Request, *rds.ApplyPendingMaintenanceActionOutput) + + ApplyPendingMaintenanceAction(*rds.ApplyPendingMaintenanceActionInput) (*rds.ApplyPendingMaintenanceActionOutput, error) + + AuthorizeDBSecurityGroupIngressRequest(*rds.AuthorizeDBSecurityGroupIngressInput) (*request.Request, *rds.AuthorizeDBSecurityGroupIngressOutput) + + AuthorizeDBSecurityGroupIngress(*rds.AuthorizeDBSecurityGroupIngressInput) (*rds.AuthorizeDBSecurityGroupIngressOutput, error) + + CopyDBClusterSnapshotRequest(*rds.CopyDBClusterSnapshotInput) (*request.Request, *rds.CopyDBClusterSnapshotOutput) + + CopyDBClusterSnapshot(*rds.CopyDBClusterSnapshotInput) (*rds.CopyDBClusterSnapshotOutput, error) + + CopyDBParameterGroupRequest(*rds.CopyDBParameterGroupInput) (*request.Request, *rds.CopyDBParameterGroupOutput) + + CopyDBParameterGroup(*rds.CopyDBParameterGroupInput) (*rds.CopyDBParameterGroupOutput, error) + + CopyDBSnapshotRequest(*rds.CopyDBSnapshotInput) (*request.Request, *rds.CopyDBSnapshotOutput) + + CopyDBSnapshot(*rds.CopyDBSnapshotInput) (*rds.CopyDBSnapshotOutput, error) + + CopyOptionGroupRequest(*rds.CopyOptionGroupInput) (*request.Request, *rds.CopyOptionGroupOutput) + + CopyOptionGroup(*rds.CopyOptionGroupInput) (*rds.CopyOptionGroupOutput, error) + + CreateDBClusterRequest(*rds.CreateDBClusterInput) (*request.Request, *rds.CreateDBClusterOutput) + + CreateDBCluster(*rds.CreateDBClusterInput) (*rds.CreateDBClusterOutput, error) + + CreateDBClusterParameterGroupRequest(*rds.CreateDBClusterParameterGroupInput) (*request.Request, *rds.CreateDBClusterParameterGroupOutput) + + CreateDBClusterParameterGroup(*rds.CreateDBClusterParameterGroupInput) (*rds.CreateDBClusterParameterGroupOutput, error) + + CreateDBClusterSnapshotRequest(*rds.CreateDBClusterSnapshotInput) (*request.Request, *rds.CreateDBClusterSnapshotOutput) + + CreateDBClusterSnapshot(*rds.CreateDBClusterSnapshotInput) (*rds.CreateDBClusterSnapshotOutput, error) + + CreateDBInstanceRequest(*rds.CreateDBInstanceInput) (*request.Request, *rds.CreateDBInstanceOutput) + + CreateDBInstance(*rds.CreateDBInstanceInput) (*rds.CreateDBInstanceOutput, error) + + CreateDBInstanceReadReplicaRequest(*rds.CreateDBInstanceReadReplicaInput) (*request.Request, *rds.CreateDBInstanceReadReplicaOutput) + + CreateDBInstanceReadReplica(*rds.CreateDBInstanceReadReplicaInput) (*rds.CreateDBInstanceReadReplicaOutput, error) + + CreateDBParameterGroupRequest(*rds.CreateDBParameterGroupInput) (*request.Request, *rds.CreateDBParameterGroupOutput) + + CreateDBParameterGroup(*rds.CreateDBParameterGroupInput) (*rds.CreateDBParameterGroupOutput, error) + + CreateDBSecurityGroupRequest(*rds.CreateDBSecurityGroupInput) (*request.Request, *rds.CreateDBSecurityGroupOutput) + + CreateDBSecurityGroup(*rds.CreateDBSecurityGroupInput) (*rds.CreateDBSecurityGroupOutput, error) + + CreateDBSnapshotRequest(*rds.CreateDBSnapshotInput) (*request.Request, *rds.CreateDBSnapshotOutput) + + CreateDBSnapshot(*rds.CreateDBSnapshotInput) (*rds.CreateDBSnapshotOutput, error) + + CreateDBSubnetGroupRequest(*rds.CreateDBSubnetGroupInput) (*request.Request, *rds.CreateDBSubnetGroupOutput) + + CreateDBSubnetGroup(*rds.CreateDBSubnetGroupInput) (*rds.CreateDBSubnetGroupOutput, error) + + CreateEventSubscriptionRequest(*rds.CreateEventSubscriptionInput) (*request.Request, *rds.CreateEventSubscriptionOutput) + + CreateEventSubscription(*rds.CreateEventSubscriptionInput) (*rds.CreateEventSubscriptionOutput, error) + + CreateOptionGroupRequest(*rds.CreateOptionGroupInput) (*request.Request, *rds.CreateOptionGroupOutput) + + CreateOptionGroup(*rds.CreateOptionGroupInput) (*rds.CreateOptionGroupOutput, error) + + DeleteDBClusterRequest(*rds.DeleteDBClusterInput) (*request.Request, *rds.DeleteDBClusterOutput) + + DeleteDBCluster(*rds.DeleteDBClusterInput) (*rds.DeleteDBClusterOutput, error) + + DeleteDBClusterParameterGroupRequest(*rds.DeleteDBClusterParameterGroupInput) (*request.Request, *rds.DeleteDBClusterParameterGroupOutput) + + DeleteDBClusterParameterGroup(*rds.DeleteDBClusterParameterGroupInput) (*rds.DeleteDBClusterParameterGroupOutput, error) + + DeleteDBClusterSnapshotRequest(*rds.DeleteDBClusterSnapshotInput) (*request.Request, *rds.DeleteDBClusterSnapshotOutput) + + DeleteDBClusterSnapshot(*rds.DeleteDBClusterSnapshotInput) (*rds.DeleteDBClusterSnapshotOutput, error) + + DeleteDBInstanceRequest(*rds.DeleteDBInstanceInput) (*request.Request, *rds.DeleteDBInstanceOutput) + + DeleteDBInstance(*rds.DeleteDBInstanceInput) (*rds.DeleteDBInstanceOutput, error) + + DeleteDBParameterGroupRequest(*rds.DeleteDBParameterGroupInput) (*request.Request, *rds.DeleteDBParameterGroupOutput) + + DeleteDBParameterGroup(*rds.DeleteDBParameterGroupInput) (*rds.DeleteDBParameterGroupOutput, error) + + DeleteDBSecurityGroupRequest(*rds.DeleteDBSecurityGroupInput) (*request.Request, *rds.DeleteDBSecurityGroupOutput) + + DeleteDBSecurityGroup(*rds.DeleteDBSecurityGroupInput) (*rds.DeleteDBSecurityGroupOutput, error) + + DeleteDBSnapshotRequest(*rds.DeleteDBSnapshotInput) (*request.Request, *rds.DeleteDBSnapshotOutput) + + DeleteDBSnapshot(*rds.DeleteDBSnapshotInput) (*rds.DeleteDBSnapshotOutput, error) + + DeleteDBSubnetGroupRequest(*rds.DeleteDBSubnetGroupInput) (*request.Request, *rds.DeleteDBSubnetGroupOutput) + + DeleteDBSubnetGroup(*rds.DeleteDBSubnetGroupInput) (*rds.DeleteDBSubnetGroupOutput, error) + + DeleteEventSubscriptionRequest(*rds.DeleteEventSubscriptionInput) (*request.Request, *rds.DeleteEventSubscriptionOutput) + + DeleteEventSubscription(*rds.DeleteEventSubscriptionInput) (*rds.DeleteEventSubscriptionOutput, error) + + DeleteOptionGroupRequest(*rds.DeleteOptionGroupInput) (*request.Request, *rds.DeleteOptionGroupOutput) + + DeleteOptionGroup(*rds.DeleteOptionGroupInput) (*rds.DeleteOptionGroupOutput, error) + + DescribeAccountAttributesRequest(*rds.DescribeAccountAttributesInput) (*request.Request, *rds.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*rds.DescribeAccountAttributesInput) (*rds.DescribeAccountAttributesOutput, error) + + DescribeCertificatesRequest(*rds.DescribeCertificatesInput) (*request.Request, *rds.DescribeCertificatesOutput) + + DescribeCertificates(*rds.DescribeCertificatesInput) (*rds.DescribeCertificatesOutput, error) + + DescribeDBClusterParameterGroupsRequest(*rds.DescribeDBClusterParameterGroupsInput) (*request.Request, *rds.DescribeDBClusterParameterGroupsOutput) + + DescribeDBClusterParameterGroups(*rds.DescribeDBClusterParameterGroupsInput) (*rds.DescribeDBClusterParameterGroupsOutput, error) + + DescribeDBClusterParametersRequest(*rds.DescribeDBClusterParametersInput) (*request.Request, *rds.DescribeDBClusterParametersOutput) + + DescribeDBClusterParameters(*rds.DescribeDBClusterParametersInput) (*rds.DescribeDBClusterParametersOutput, error) + + DescribeDBClusterSnapshotsRequest(*rds.DescribeDBClusterSnapshotsInput) (*request.Request, *rds.DescribeDBClusterSnapshotsOutput) + + DescribeDBClusterSnapshots(*rds.DescribeDBClusterSnapshotsInput) (*rds.DescribeDBClusterSnapshotsOutput, error) + + DescribeDBClustersRequest(*rds.DescribeDBClustersInput) (*request.Request, *rds.DescribeDBClustersOutput) + + DescribeDBClusters(*rds.DescribeDBClustersInput) (*rds.DescribeDBClustersOutput, error) + + DescribeDBEngineVersionsRequest(*rds.DescribeDBEngineVersionsInput) (*request.Request, *rds.DescribeDBEngineVersionsOutput) + + DescribeDBEngineVersions(*rds.DescribeDBEngineVersionsInput) (*rds.DescribeDBEngineVersionsOutput, error) + + DescribeDBEngineVersionsPages(*rds.DescribeDBEngineVersionsInput, func(*rds.DescribeDBEngineVersionsOutput, bool) bool) error + + DescribeDBInstancesRequest(*rds.DescribeDBInstancesInput) (*request.Request, *rds.DescribeDBInstancesOutput) + + DescribeDBInstances(*rds.DescribeDBInstancesInput) (*rds.DescribeDBInstancesOutput, error) + + DescribeDBInstancesPages(*rds.DescribeDBInstancesInput, func(*rds.DescribeDBInstancesOutput, bool) bool) error + + DescribeDBLogFilesRequest(*rds.DescribeDBLogFilesInput) (*request.Request, *rds.DescribeDBLogFilesOutput) + + DescribeDBLogFiles(*rds.DescribeDBLogFilesInput) (*rds.DescribeDBLogFilesOutput, error) + + DescribeDBLogFilesPages(*rds.DescribeDBLogFilesInput, func(*rds.DescribeDBLogFilesOutput, bool) bool) error + + DescribeDBParameterGroupsRequest(*rds.DescribeDBParameterGroupsInput) (*request.Request, *rds.DescribeDBParameterGroupsOutput) + + DescribeDBParameterGroups(*rds.DescribeDBParameterGroupsInput) (*rds.DescribeDBParameterGroupsOutput, error) + + DescribeDBParameterGroupsPages(*rds.DescribeDBParameterGroupsInput, func(*rds.DescribeDBParameterGroupsOutput, bool) bool) error + + DescribeDBParametersRequest(*rds.DescribeDBParametersInput) (*request.Request, *rds.DescribeDBParametersOutput) + + DescribeDBParameters(*rds.DescribeDBParametersInput) (*rds.DescribeDBParametersOutput, error) + + DescribeDBParametersPages(*rds.DescribeDBParametersInput, func(*rds.DescribeDBParametersOutput, bool) bool) error + + DescribeDBSecurityGroupsRequest(*rds.DescribeDBSecurityGroupsInput) (*request.Request, *rds.DescribeDBSecurityGroupsOutput) + + DescribeDBSecurityGroups(*rds.DescribeDBSecurityGroupsInput) (*rds.DescribeDBSecurityGroupsOutput, error) + + DescribeDBSecurityGroupsPages(*rds.DescribeDBSecurityGroupsInput, func(*rds.DescribeDBSecurityGroupsOutput, bool) bool) error + + DescribeDBSnapshotAttributesRequest(*rds.DescribeDBSnapshotAttributesInput) (*request.Request, *rds.DescribeDBSnapshotAttributesOutput) + + DescribeDBSnapshotAttributes(*rds.DescribeDBSnapshotAttributesInput) (*rds.DescribeDBSnapshotAttributesOutput, error) + + DescribeDBSnapshotsRequest(*rds.DescribeDBSnapshotsInput) (*request.Request, *rds.DescribeDBSnapshotsOutput) + + DescribeDBSnapshots(*rds.DescribeDBSnapshotsInput) (*rds.DescribeDBSnapshotsOutput, error) + + DescribeDBSnapshotsPages(*rds.DescribeDBSnapshotsInput, func(*rds.DescribeDBSnapshotsOutput, bool) bool) error + + DescribeDBSubnetGroupsRequest(*rds.DescribeDBSubnetGroupsInput) (*request.Request, *rds.DescribeDBSubnetGroupsOutput) + + DescribeDBSubnetGroups(*rds.DescribeDBSubnetGroupsInput) (*rds.DescribeDBSubnetGroupsOutput, error) + + DescribeDBSubnetGroupsPages(*rds.DescribeDBSubnetGroupsInput, func(*rds.DescribeDBSubnetGroupsOutput, bool) bool) error + + DescribeEngineDefaultClusterParametersRequest(*rds.DescribeEngineDefaultClusterParametersInput) (*request.Request, *rds.DescribeEngineDefaultClusterParametersOutput) + + DescribeEngineDefaultClusterParameters(*rds.DescribeEngineDefaultClusterParametersInput) (*rds.DescribeEngineDefaultClusterParametersOutput, error) + + DescribeEngineDefaultParametersRequest(*rds.DescribeEngineDefaultParametersInput) (*request.Request, *rds.DescribeEngineDefaultParametersOutput) + + DescribeEngineDefaultParameters(*rds.DescribeEngineDefaultParametersInput) (*rds.DescribeEngineDefaultParametersOutput, error) + + DescribeEngineDefaultParametersPages(*rds.DescribeEngineDefaultParametersInput, func(*rds.DescribeEngineDefaultParametersOutput, bool) bool) error + + DescribeEventCategoriesRequest(*rds.DescribeEventCategoriesInput) (*request.Request, *rds.DescribeEventCategoriesOutput) + + DescribeEventCategories(*rds.DescribeEventCategoriesInput) (*rds.DescribeEventCategoriesOutput, error) + + DescribeEventSubscriptionsRequest(*rds.DescribeEventSubscriptionsInput) (*request.Request, *rds.DescribeEventSubscriptionsOutput) + + DescribeEventSubscriptions(*rds.DescribeEventSubscriptionsInput) (*rds.DescribeEventSubscriptionsOutput, error) + + DescribeEventSubscriptionsPages(*rds.DescribeEventSubscriptionsInput, func(*rds.DescribeEventSubscriptionsOutput, bool) bool) error + + DescribeEventsRequest(*rds.DescribeEventsInput) (*request.Request, *rds.DescribeEventsOutput) + + DescribeEvents(*rds.DescribeEventsInput) (*rds.DescribeEventsOutput, error) + + DescribeEventsPages(*rds.DescribeEventsInput, func(*rds.DescribeEventsOutput, bool) bool) error + + DescribeOptionGroupOptionsRequest(*rds.DescribeOptionGroupOptionsInput) (*request.Request, *rds.DescribeOptionGroupOptionsOutput) + + DescribeOptionGroupOptions(*rds.DescribeOptionGroupOptionsInput) (*rds.DescribeOptionGroupOptionsOutput, error) + + DescribeOptionGroupOptionsPages(*rds.DescribeOptionGroupOptionsInput, func(*rds.DescribeOptionGroupOptionsOutput, bool) bool) error + + DescribeOptionGroupsRequest(*rds.DescribeOptionGroupsInput) (*request.Request, *rds.DescribeOptionGroupsOutput) + + DescribeOptionGroups(*rds.DescribeOptionGroupsInput) (*rds.DescribeOptionGroupsOutput, error) + + DescribeOptionGroupsPages(*rds.DescribeOptionGroupsInput, func(*rds.DescribeOptionGroupsOutput, bool) bool) error + + DescribeOrderableDBInstanceOptionsRequest(*rds.DescribeOrderableDBInstanceOptionsInput) (*request.Request, *rds.DescribeOrderableDBInstanceOptionsOutput) + + DescribeOrderableDBInstanceOptions(*rds.DescribeOrderableDBInstanceOptionsInput) (*rds.DescribeOrderableDBInstanceOptionsOutput, error) + + DescribeOrderableDBInstanceOptionsPages(*rds.DescribeOrderableDBInstanceOptionsInput, func(*rds.DescribeOrderableDBInstanceOptionsOutput, bool) bool) error + + DescribePendingMaintenanceActionsRequest(*rds.DescribePendingMaintenanceActionsInput) (*request.Request, *rds.DescribePendingMaintenanceActionsOutput) + + DescribePendingMaintenanceActions(*rds.DescribePendingMaintenanceActionsInput) (*rds.DescribePendingMaintenanceActionsOutput, error) + + DescribeReservedDBInstancesRequest(*rds.DescribeReservedDBInstancesInput) (*request.Request, *rds.DescribeReservedDBInstancesOutput) + + DescribeReservedDBInstances(*rds.DescribeReservedDBInstancesInput) (*rds.DescribeReservedDBInstancesOutput, error) + + DescribeReservedDBInstancesPages(*rds.DescribeReservedDBInstancesInput, func(*rds.DescribeReservedDBInstancesOutput, bool) bool) error + + DescribeReservedDBInstancesOfferingsRequest(*rds.DescribeReservedDBInstancesOfferingsInput) (*request.Request, *rds.DescribeReservedDBInstancesOfferingsOutput) + + DescribeReservedDBInstancesOfferings(*rds.DescribeReservedDBInstancesOfferingsInput) (*rds.DescribeReservedDBInstancesOfferingsOutput, error) + + DescribeReservedDBInstancesOfferingsPages(*rds.DescribeReservedDBInstancesOfferingsInput, func(*rds.DescribeReservedDBInstancesOfferingsOutput, bool) bool) error + + DownloadDBLogFilePortionRequest(*rds.DownloadDBLogFilePortionInput) (*request.Request, *rds.DownloadDBLogFilePortionOutput) + + DownloadDBLogFilePortion(*rds.DownloadDBLogFilePortionInput) (*rds.DownloadDBLogFilePortionOutput, error) + + DownloadDBLogFilePortionPages(*rds.DownloadDBLogFilePortionInput, func(*rds.DownloadDBLogFilePortionOutput, bool) bool) error + + FailoverDBClusterRequest(*rds.FailoverDBClusterInput) (*request.Request, *rds.FailoverDBClusterOutput) + + FailoverDBCluster(*rds.FailoverDBClusterInput) (*rds.FailoverDBClusterOutput, error) + + ListTagsForResourceRequest(*rds.ListTagsForResourceInput) (*request.Request, *rds.ListTagsForResourceOutput) + + ListTagsForResource(*rds.ListTagsForResourceInput) (*rds.ListTagsForResourceOutput, error) + + ModifyDBClusterRequest(*rds.ModifyDBClusterInput) (*request.Request, *rds.ModifyDBClusterOutput) + + ModifyDBCluster(*rds.ModifyDBClusterInput) (*rds.ModifyDBClusterOutput, error) + + ModifyDBClusterParameterGroupRequest(*rds.ModifyDBClusterParameterGroupInput) (*request.Request, *rds.DBClusterParameterGroupNameMessage) + + ModifyDBClusterParameterGroup(*rds.ModifyDBClusterParameterGroupInput) (*rds.DBClusterParameterGroupNameMessage, error) + + ModifyDBInstanceRequest(*rds.ModifyDBInstanceInput) (*request.Request, *rds.ModifyDBInstanceOutput) + + ModifyDBInstance(*rds.ModifyDBInstanceInput) (*rds.ModifyDBInstanceOutput, error) + + ModifyDBParameterGroupRequest(*rds.ModifyDBParameterGroupInput) (*request.Request, *rds.DBParameterGroupNameMessage) + + ModifyDBParameterGroup(*rds.ModifyDBParameterGroupInput) (*rds.DBParameterGroupNameMessage, error) + + ModifyDBSnapshotAttributeRequest(*rds.ModifyDBSnapshotAttributeInput) (*request.Request, *rds.ModifyDBSnapshotAttributeOutput) + + ModifyDBSnapshotAttribute(*rds.ModifyDBSnapshotAttributeInput) (*rds.ModifyDBSnapshotAttributeOutput, error) + + ModifyDBSubnetGroupRequest(*rds.ModifyDBSubnetGroupInput) (*request.Request, *rds.ModifyDBSubnetGroupOutput) + + ModifyDBSubnetGroup(*rds.ModifyDBSubnetGroupInput) (*rds.ModifyDBSubnetGroupOutput, error) + + ModifyEventSubscriptionRequest(*rds.ModifyEventSubscriptionInput) (*request.Request, *rds.ModifyEventSubscriptionOutput) + + ModifyEventSubscription(*rds.ModifyEventSubscriptionInput) (*rds.ModifyEventSubscriptionOutput, error) + + ModifyOptionGroupRequest(*rds.ModifyOptionGroupInput) (*request.Request, *rds.ModifyOptionGroupOutput) + + ModifyOptionGroup(*rds.ModifyOptionGroupInput) (*rds.ModifyOptionGroupOutput, error) + + PromoteReadReplicaRequest(*rds.PromoteReadReplicaInput) (*request.Request, *rds.PromoteReadReplicaOutput) + + PromoteReadReplica(*rds.PromoteReadReplicaInput) (*rds.PromoteReadReplicaOutput, error) + + PurchaseReservedDBInstancesOfferingRequest(*rds.PurchaseReservedDBInstancesOfferingInput) (*request.Request, *rds.PurchaseReservedDBInstancesOfferingOutput) + + PurchaseReservedDBInstancesOffering(*rds.PurchaseReservedDBInstancesOfferingInput) (*rds.PurchaseReservedDBInstancesOfferingOutput, error) + + RebootDBInstanceRequest(*rds.RebootDBInstanceInput) (*request.Request, *rds.RebootDBInstanceOutput) + + RebootDBInstance(*rds.RebootDBInstanceInput) (*rds.RebootDBInstanceOutput, error) + + RemoveSourceIdentifierFromSubscriptionRequest(*rds.RemoveSourceIdentifierFromSubscriptionInput) (*request.Request, *rds.RemoveSourceIdentifierFromSubscriptionOutput) + + RemoveSourceIdentifierFromSubscription(*rds.RemoveSourceIdentifierFromSubscriptionInput) (*rds.RemoveSourceIdentifierFromSubscriptionOutput, error) + + RemoveTagsFromResourceRequest(*rds.RemoveTagsFromResourceInput) (*request.Request, *rds.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*rds.RemoveTagsFromResourceInput) (*rds.RemoveTagsFromResourceOutput, error) + + ResetDBClusterParameterGroupRequest(*rds.ResetDBClusterParameterGroupInput) (*request.Request, *rds.DBClusterParameterGroupNameMessage) + + ResetDBClusterParameterGroup(*rds.ResetDBClusterParameterGroupInput) (*rds.DBClusterParameterGroupNameMessage, error) + + ResetDBParameterGroupRequest(*rds.ResetDBParameterGroupInput) (*request.Request, *rds.DBParameterGroupNameMessage) + + ResetDBParameterGroup(*rds.ResetDBParameterGroupInput) (*rds.DBParameterGroupNameMessage, error) + + RestoreDBClusterFromSnapshotRequest(*rds.RestoreDBClusterFromSnapshotInput) (*request.Request, *rds.RestoreDBClusterFromSnapshotOutput) + + RestoreDBClusterFromSnapshot(*rds.RestoreDBClusterFromSnapshotInput) (*rds.RestoreDBClusterFromSnapshotOutput, error) + + RestoreDBClusterToPointInTimeRequest(*rds.RestoreDBClusterToPointInTimeInput) (*request.Request, *rds.RestoreDBClusterToPointInTimeOutput) + + RestoreDBClusterToPointInTime(*rds.RestoreDBClusterToPointInTimeInput) (*rds.RestoreDBClusterToPointInTimeOutput, error) + + RestoreDBInstanceFromDBSnapshotRequest(*rds.RestoreDBInstanceFromDBSnapshotInput) (*request.Request, *rds.RestoreDBInstanceFromDBSnapshotOutput) + + RestoreDBInstanceFromDBSnapshot(*rds.RestoreDBInstanceFromDBSnapshotInput) (*rds.RestoreDBInstanceFromDBSnapshotOutput, error) + + RestoreDBInstanceToPointInTimeRequest(*rds.RestoreDBInstanceToPointInTimeInput) (*request.Request, *rds.RestoreDBInstanceToPointInTimeOutput) + + RestoreDBInstanceToPointInTime(*rds.RestoreDBInstanceToPointInTimeInput) (*rds.RestoreDBInstanceToPointInTimeOutput, error) + + RevokeDBSecurityGroupIngressRequest(*rds.RevokeDBSecurityGroupIngressInput) (*request.Request, *rds.RevokeDBSecurityGroupIngressOutput) + + RevokeDBSecurityGroupIngress(*rds.RevokeDBSecurityGroupIngressInput) (*rds.RevokeDBSecurityGroupIngressOutput, error) +} + +var _ RDSAPI = (*rds.RDS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go new file mode 100644 index 000000000..906277204 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go @@ -0,0 +1,109 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Relational Database Service (Amazon RDS) is a web service that makes +// it easier to set up, operate, and scale a relational database in the cloud. +// It provides cost-efficient, resizeable capacity for an industry-standard +// relational database and manages common database administration tasks, freeing +// up developers to focus on what makes their applications and businesses unique. +// +// Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, +// Microsoft SQL Server, Oracle, or Aurora database server. This means the code, +// applications, and tools you already use today with your existing databases +// work with Amazon RDS without modification. Amazon RDS automatically backs +// up your database and maintains the database software that powers your DB +// instance. Amazon RDS is flexible: you can scale your database instance's +// compute resources and storage capacity to meet your application's demand. +// As with all Amazon Web Services, there are no up-front investments, and you +// pay only for the resources you use. +// +// This is an interface reference for Amazon RDS. It contains documentation +// for a programming or command line interface you can use to manage Amazon +// RDS. Note that Amazon RDS is asynchronous, which means that some interfaces +// might require techniques such as polling or callback functions to determine +// when a command has been applied. In this reference, the parameter descriptions +// indicate whether a command is applied immediately, on the next instance reboot, +// or during the maintenance window. For a summary of the Amazon RDS interfaces, +// go to Available RDS Interfaces (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html#Welcome.Interfaces). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type RDS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "rds" + +// New creates a new instance of the RDS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a RDS client from just a session. +// svc := rds.New(mySession) +// +// // Create a RDS client with additional configuration +// svc := rds.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *RDS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *RDS { + svc := &RDS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-10-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a RDS operation and runs any +// custom request initialization. +func (c *RDS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go new file mode 100644 index 000000000..3c24ea392 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go @@ -0,0 +1,119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *RDS) WaitUntilDBInstanceAvailable(input *DescribeDBInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDBInstances", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-restore", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-restore", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *RDS) WaitUntilDBInstanceDeleted(input *DescribeDBInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDBInstances", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "DBInstanceNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "modifying", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "resetting-master-credentials", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go new file mode 100644 index 000000000..a23ff66fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go @@ -0,0 +1,7171 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package redshift provides a client for Amazon Redshift. +package redshift + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAuthorizeClusterSecurityGroupIngress = "AuthorizeClusterSecurityGroupIngress" + +// AuthorizeClusterSecurityGroupIngressRequest generates a request for the AuthorizeClusterSecurityGroupIngress operation. +func (c *Redshift) AuthorizeClusterSecurityGroupIngressRequest(input *AuthorizeClusterSecurityGroupIngressInput) (req *request.Request, output *AuthorizeClusterSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeClusterSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeClusterSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeClusterSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending +// on whether the application accessing your cluster is running on the Internet +// or an EC2 instance, you can authorize inbound access to either a Classless +// Interdomain Routing (CIDR) IP address range or an EC2 security group. You +// can add as many as 20 ingress rules to an Amazon Redshift security group. +// +// The EC2 security group must be defined in the AWS region where the cluster +// resides. For an overview of CIDR blocks, see the Wikipedia article on Classless +// Inter-Domain Routing (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// You must also associate the security group with a cluster so that clients +// running on these IP addresses or the EC2 instance are authorized to connect +// to the cluster. For information about managing security groups, go to Working +// with Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) AuthorizeClusterSecurityGroupIngress(input *AuthorizeClusterSecurityGroupIngressInput) (*AuthorizeClusterSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeClusterSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSnapshotAccess = "AuthorizeSnapshotAccess" + +// AuthorizeSnapshotAccessRequest generates a request for the AuthorizeSnapshotAccess operation. +func (c *Redshift) AuthorizeSnapshotAccessRequest(input *AuthorizeSnapshotAccessInput) (req *request.Request, output *AuthorizeSnapshotAccessOutput) { + op := &request.Operation{ + Name: opAuthorizeSnapshotAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSnapshotAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeSnapshotAccessOutput{} + req.Data = output + return +} + +// Authorizes the specified AWS customer account to restore the specified snapshot. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) AuthorizeSnapshotAccess(input *AuthorizeSnapshotAccessInput) (*AuthorizeSnapshotAccessOutput, error) { + req, out := c.AuthorizeSnapshotAccessRequest(input) + err := req.Send() + return out, err +} + +const opCopyClusterSnapshot = "CopyClusterSnapshot" + +// CopyClusterSnapshotRequest generates a request for the CopyClusterSnapshot operation. +func (c *Redshift) CopyClusterSnapshotRequest(input *CopyClusterSnapshotInput) (req *request.Request, output *CopyClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCopyClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyClusterSnapshotOutput{} + req.Data = output + return +} + +// Copies the specified automated cluster snapshot to a new manual cluster snapshot. +// The source must be an automated snapshot and it must be in the available +// state. +// +// When you delete a cluster, Amazon Redshift deletes any automated snapshots +// of the cluster. Also, when the retention period of the snapshot expires, +// Amazon Redshift automatically deletes it. If you want to keep an automated +// snapshot for a longer period, you can make a manual copy of the snapshot. +// Manual snapshots are retained until you delete them. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CopyClusterSnapshot(input *CopyClusterSnapshotInput) (*CopyClusterSnapshotOutput, error) { + req, out := c.CopyClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a request for the CreateCluster operation. +func (c *Redshift) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterOutput{} + req.Data = output + return +} + +// Creates a new cluster. To create the cluster in virtual private cloud (VPC), +// you must provide cluster subnet group name. If you don't provide a cluster +// subnet group name or the cluster security group parameter, Amazon Redshift +// creates a non-VPC cluster, it associates the default cluster security group +// with the cluster. For more information about managing clusters, go to Amazon +// Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +func (c *Redshift) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterParameterGroup = "CreateClusterParameterGroup" + +// CreateClusterParameterGroupRequest generates a request for the CreateClusterParameterGroup operation. +func (c *Redshift) CreateClusterParameterGroupRequest(input *CreateClusterParameterGroupInput) (req *request.Request, output *CreateClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterParameterGroupOutput{} + req.Data = output + return +} + +// Creates an Amazon Redshift parameter group. +// +// Creating parameter groups is independent of creating clusters. You can associate +// a cluster with a parameter group when you create the cluster. You can also +// associate an existing cluster with a parameter group after the cluster is +// created by using ModifyCluster. +// +// Parameters in the parameter group define specific behavior that applies +// to the databases you create on the cluster. For more information about parameters +// and parameter groups, go to Amazon Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterParameterGroup(input *CreateClusterParameterGroupInput) (*CreateClusterParameterGroupOutput, error) { + req, out := c.CreateClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSecurityGroup = "CreateClusterSecurityGroup" + +// CreateClusterSecurityGroupRequest generates a request for the CreateClusterSecurityGroup operation. +func (c *Redshift) CreateClusterSecurityGroupRequest(input *CreateClusterSecurityGroupInput) (req *request.Request, output *CreateClusterSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a new Amazon Redshift security group. You use security groups to +// control access to non-VPC clusters. +// +// For information about managing security groups, go to Amazon Redshift Cluster +// Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSecurityGroup(input *CreateClusterSecurityGroupInput) (*CreateClusterSecurityGroupOutput, error) { + req, out := c.CreateClusterSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSnapshot = "CreateClusterSnapshot" + +// CreateClusterSnapshotRequest generates a request for the CreateClusterSnapshot operation. +func (c *Redshift) CreateClusterSnapshotRequest(input *CreateClusterSnapshotInput) (req *request.Request, output *CreateClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCreateClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a manual snapshot of the specified cluster. The cluster must be in +// the available state. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSnapshot(input *CreateClusterSnapshotInput) (*CreateClusterSnapshotOutput, error) { + req, out := c.CreateClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSubnetGroup = "CreateClusterSubnetGroup" + +// CreateClusterSubnetGroupRequest generates a request for the CreateClusterSubnetGroup operation. +func (c *Redshift) CreateClusterSubnetGroupRequest(input *CreateClusterSubnetGroupInput) (req *request.Request, output *CreateClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Creates a new Amazon Redshift subnet group. You must provide a list of one +// or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) +// when creating Amazon Redshift subnet group. +// +// For information about subnet groups, go to Amazon Redshift Cluster Subnet +// Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-cluster-subnet-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSubnetGroup(input *CreateClusterSubnetGroupInput) (*CreateClusterSubnetGroupOutput, error) { + req, out := c.CreateClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSubscription = "CreateEventSubscription" + +// CreateEventSubscriptionRequest generates a request for the CreateEventSubscription operation. +func (c *Redshift) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEventSubscriptionOutput{} + req.Data = output + return +} + +// Creates an Amazon Redshift event notification subscription. This action requires +// an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the +// Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To +// obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and +// subscribe to the topic. The ARN is displayed in the SNS console. +// +// You can specify the source type, and lists of Amazon Redshift source IDs, +// event categories, and event severities. Notifications will be sent for all +// events you want that match those criteria. For example, you can specify source +// type = cluster, source ID = my-cluster-1 and mycluster2, event categories +// = Availability, Backup, and severity = ERROR. The subscription will only +// send notifications for those ERROR events in the Availability and Backup +// categories for the specified clusters. +// +// If you specify both the source type and source IDs, such as source type +// = cluster and source identifier = my-cluster-1, notifications will be sent +// for all the cluster events for my-cluster-1. If you specify a source type +// but do not specify a source identifier, you will receive notice of the events +// for the objects of that type in your AWS account. If you do not specify either +// the SourceType nor the SourceIdentifier, you will be notified of events generated +// from all Amazon Redshift sources belonging to your AWS account. You must +// specify a source type if you specify a source ID. +func (c *Redshift) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { + req, out := c.CreateEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsmClientCertificate = "CreateHsmClientCertificate" + +// CreateHsmClientCertificateRequest generates a request for the CreateHsmClientCertificate operation. +func (c *Redshift) CreateHsmClientCertificateRequest(input *CreateHsmClientCertificateInput) (req *request.Request, output *CreateHsmClientCertificateOutput) { + op := &request.Operation{ + Name: opCreateHsmClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmClientCertificateOutput{} + req.Data = output + return +} + +// Creates an HSM client certificate that an Amazon Redshift cluster will use +// to connect to the client's HSM in order to store and retrieve the keys used +// to encrypt the cluster databases. +// +// The command returns a public key, which you must store in the HSM. In addition +// to creating the HSM certificate, you must create an Amazon Redshift HSM configuration +// that provides a cluster the information needed to store and use encryption +// keys in the HSM. For more information, go to Hardware Security Modules (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateHsmClientCertificate(input *CreateHsmClientCertificateInput) (*CreateHsmClientCertificateOutput, error) { + req, out := c.CreateHsmClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsmConfiguration = "CreateHsmConfiguration" + +// CreateHsmConfigurationRequest generates a request for the CreateHsmConfiguration operation. +func (c *Redshift) CreateHsmConfigurationRequest(input *CreateHsmConfigurationInput) (req *request.Request, output *CreateHsmConfigurationOutput) { + op := &request.Operation{ + Name: opCreateHsmConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmConfigurationOutput{} + req.Data = output + return +} + +// Creates an HSM configuration that contains the information required by an +// Amazon Redshift cluster to store and use database encryption keys in a Hardware +// Security Module (HSM). After creating the HSM configuration, you can specify +// it as a parameter when creating a cluster. The cluster will then store its +// encryption keys in the HSM. +// +// In addition to creating an HSM configuration, you must also create an HSM +// client certificate. For more information, go to Hardware Security Modules +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html) in +// the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateHsmConfiguration(input *CreateHsmConfigurationInput) (*CreateHsmConfigurationOutput, error) { + req, out := c.CreateHsmConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshotCopyGrant = "CreateSnapshotCopyGrant" + +// CreateSnapshotCopyGrantRequest generates a request for the CreateSnapshotCopyGrant operation. +func (c *Redshift) CreateSnapshotCopyGrantRequest(input *CreateSnapshotCopyGrantInput) (req *request.Request, output *CreateSnapshotCopyGrantOutput) { + op := &request.Operation{ + Name: opCreateSnapshotCopyGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotCopyGrantInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotCopyGrantOutput{} + req.Data = output + return +} + +// Creates a snapshot copy grant that permits Amazon Redshift to use a customer +// master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied +// snapshots in a destination region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateSnapshotCopyGrant(input *CreateSnapshotCopyGrantInput) (*CreateSnapshotCopyGrantOutput, error) { + req, out := c.CreateSnapshotCopyGrantRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a request for the CreateTags operation. +func (c *Redshift) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Adds one or more tags to a specified resource. +// +// A resource can have up to 10 tags. If you try to create more than 10 tags +// for a resource, you will receive an error and the attempt will fail. +// +// If you specify a key that already exists for the resource, the value for +// that key will be updated with the new value. +func (c *Redshift) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a request for the DeleteCluster operation. +func (c *Redshift) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterOutput{} + req.Data = output + return +} + +// Deletes a previously provisioned cluster. A successful response from the +// web service indicates that the request was received correctly. Use DescribeClusters +// to monitor the status of the deletion. The delete operation cannot be canceled +// or reverted once submitted. For more information about managing clusters, +// go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// If you want to shut down the cluster and retain it for future use, set +// SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. +// You can later restore this snapshot to resume using the cluster. If a final +// cluster snapshot is requested, the status of the cluster will be "final-snapshot" +// while the snapshot is being taken, then it's "deleting" once Amazon Redshift +// begins deleting the cluster. +// +// For more information about managing clusters, go to Amazon Redshift Clusters +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +func (c *Redshift) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterParameterGroup = "DeleteClusterParameterGroup" + +// DeleteClusterParameterGroupRequest generates a request for the DeleteClusterParameterGroup operation. +func (c *Redshift) DeleteClusterParameterGroupRequest(input *DeleteClusterParameterGroupInput) (req *request.Request, output *DeleteClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified Amazon Redshift parameter group. You cannot delete a +// parameter group if it is associated with a cluster. +func (c *Redshift) DeleteClusterParameterGroup(input *DeleteClusterParameterGroupInput) (*DeleteClusterParameterGroupOutput, error) { + req, out := c.DeleteClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSecurityGroup = "DeleteClusterSecurityGroup" + +// DeleteClusterSecurityGroupRequest generates a request for the DeleteClusterSecurityGroup operation. +func (c *Redshift) DeleteClusterSecurityGroupRequest(input *DeleteClusterSecurityGroupInput) (req *request.Request, output *DeleteClusterSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes an Amazon Redshift security group. +// +// You cannot delete a security group that is associated with any clusters. +// You cannot delete the default security group. For information about managing +// security groups, go to Amazon Redshift Cluster Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DeleteClusterSecurityGroup(input *DeleteClusterSecurityGroupInput) (*DeleteClusterSecurityGroupOutput, error) { + req, out := c.DeleteClusterSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSnapshot = "DeleteClusterSnapshot" + +// DeleteClusterSnapshotRequest generates a request for the DeleteClusterSnapshot operation. +func (c *Redshift) DeleteClusterSnapshotRequest(input *DeleteClusterSnapshotInput) (req *request.Request, output *DeleteClusterSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterSnapshotOutput{} + req.Data = output + return +} + +// Deletes the specified manual snapshot. The snapshot must be in the available +// state, with no other users authorized to access the snapshot. +// +// Unlike automated snapshots, manual snapshots are retained even after you +// delete your cluster. Amazon Redshift does not delete your manual snapshots. +// You must delete manual snapshot explicitly to avoid getting charged. If other +// accounts are authorized to access the snapshot, you must revoke all of the +// authorizations before you can delete the snapshot. +func (c *Redshift) DeleteClusterSnapshot(input *DeleteClusterSnapshotInput) (*DeleteClusterSnapshotOutput, error) { + req, out := c.DeleteClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSubnetGroup = "DeleteClusterSubnetGroup" + +// DeleteClusterSubnetGroupRequest generates a request for the DeleteClusterSubnetGroup operation. +func (c *Redshift) DeleteClusterSubnetGroupRequest(input *DeleteClusterSubnetGroupInput) (req *request.Request, output *DeleteClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Deletes the specified cluster subnet group. +func (c *Redshift) DeleteClusterSubnetGroup(input *DeleteClusterSubnetGroupInput) (*DeleteClusterSubnetGroupOutput, error) { + req, out := c.DeleteClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSubscription = "DeleteEventSubscription" + +// DeleteEventSubscriptionRequest generates a request for the DeleteEventSubscription operation. +func (c *Redshift) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEventSubscriptionOutput{} + req.Data = output + return +} + +// Deletes an Amazon Redshift event notification subscription. +func (c *Redshift) DeleteEventSubscription(input *DeleteEventSubscriptionInput) (*DeleteEventSubscriptionOutput, error) { + req, out := c.DeleteEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsmClientCertificate = "DeleteHsmClientCertificate" + +// DeleteHsmClientCertificateRequest generates a request for the DeleteHsmClientCertificate operation. +func (c *Redshift) DeleteHsmClientCertificateRequest(input *DeleteHsmClientCertificateInput) (req *request.Request, output *DeleteHsmClientCertificateOutput) { + op := &request.Operation{ + Name: opDeleteHsmClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHsmClientCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified HSM client certificate. +func (c *Redshift) DeleteHsmClientCertificate(input *DeleteHsmClientCertificateInput) (*DeleteHsmClientCertificateOutput, error) { + req, out := c.DeleteHsmClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsmConfiguration = "DeleteHsmConfiguration" + +// DeleteHsmConfigurationRequest generates a request for the DeleteHsmConfiguration operation. +func (c *Redshift) DeleteHsmConfigurationRequest(input *DeleteHsmConfigurationInput) (req *request.Request, output *DeleteHsmConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteHsmConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHsmConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified Amazon Redshift HSM configuration. +func (c *Redshift) DeleteHsmConfiguration(input *DeleteHsmConfigurationInput) (*DeleteHsmConfigurationOutput, error) { + req, out := c.DeleteHsmConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshotCopyGrant = "DeleteSnapshotCopyGrant" + +// DeleteSnapshotCopyGrantRequest generates a request for the DeleteSnapshotCopyGrant operation. +func (c *Redshift) DeleteSnapshotCopyGrantRequest(input *DeleteSnapshotCopyGrantInput) (req *request.Request, output *DeleteSnapshotCopyGrantOutput) { + op := &request.Operation{ + Name: opDeleteSnapshotCopyGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotCopyGrantInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotCopyGrantOutput{} + req.Data = output + return +} + +// Deletes the specified snapshot copy grant. +func (c *Redshift) DeleteSnapshotCopyGrant(input *DeleteSnapshotCopyGrantInput) (*DeleteSnapshotCopyGrantOutput, error) { + req, out := c.DeleteSnapshotCopyGrantRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *Redshift) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes a tag or tags from a resource. You must provide the ARN of the resource +// from which you want to delete the tag or tags. +func (c *Redshift) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClusterParameterGroups = "DescribeClusterParameterGroups" + +// DescribeClusterParameterGroupsRequest generates a request for the DescribeClusterParameterGroups operation. +func (c *Redshift) DescribeClusterParameterGroupsRequest(input *DescribeClusterParameterGroupsInput) (req *request.Request, output *DescribeClusterParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of Amazon Redshift parameter groups, including parameter groups +// you created and the default parameter group. For each parameter group, the +// response includes the parameter group name, description, and parameter group +// family name. You can optionally specify a name to retrieve the description +// of a specific parameter group. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all parameter groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all parameter groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, parameter groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterParameterGroups(input *DescribeClusterParameterGroupsInput) (*DescribeClusterParameterGroupsOutput, error) { + req, out := c.DescribeClusterParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterParameterGroupsPages(input *DescribeClusterParameterGroupsInput, fn func(p *DescribeClusterParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterParameterGroupsOutput), lastPage) + }) +} + +const opDescribeClusterParameters = "DescribeClusterParameters" + +// DescribeClusterParametersRequest generates a request for the DescribeClusterParameters operation. +func (c *Redshift) DescribeClusterParametersRequest(input *DescribeClusterParametersInput) (req *request.Request, output *DescribeClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterParametersOutput{} + req.Data = output + return +} + +// Returns a detailed list of parameters contained within the specified Amazon +// Redshift parameter group. For each parameter the response includes information +// such as parameter name, description, data type, value, whether the parameter +// value is modifiable, and so on. +// +// You can specify source filter to retrieve parameters of only specific type. +// For example, to retrieve parameters that were modified by a user action such +// as from ModifyClusterParameterGroup, you can specify source equal to user. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeClusterParameters(input *DescribeClusterParametersInput) (*DescribeClusterParametersOutput, error) { + req, out := c.DescribeClusterParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterParametersPages(input *DescribeClusterParametersInput, fn func(p *DescribeClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterParametersOutput), lastPage) + }) +} + +const opDescribeClusterSecurityGroups = "DescribeClusterSecurityGroups" + +// DescribeClusterSecurityGroupsRequest generates a request for the DescribeClusterSecurityGroups operation. +func (c *Redshift) DescribeClusterSecurityGroupsRequest(input *DescribeClusterSecurityGroupsInput) (req *request.Request, output *DescribeClusterSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns information about Amazon Redshift security groups. If the name of +// a security group is specified, the response will contain only information +// about only that security group. +// +// For information about managing security groups, go to Amazon Redshift Cluster +// Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all security groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all security groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, security groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterSecurityGroups(input *DescribeClusterSecurityGroupsInput) (*DescribeClusterSecurityGroupsOutput, error) { + req, out := c.DescribeClusterSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterSecurityGroupsPages(input *DescribeClusterSecurityGroupsInput, fn func(p *DescribeClusterSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeClusterSnapshots = "DescribeClusterSnapshots" + +// DescribeClusterSnapshotsRequest generates a request for the DescribeClusterSnapshots operation. +func (c *Redshift) DescribeClusterSnapshotsRequest(input *DescribeClusterSnapshotsInput) (req *request.Request, output *DescribeClusterSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSnapshotsOutput{} + req.Data = output + return +} + +// Returns one or more snapshot objects, which contain metadata about your cluster +// snapshots. By default, this operation returns information about all snapshots +// of all clusters that are owned by you AWS customer account. No information +// is returned for snapshots owned by inactive AWS customer accounts. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all snapshots that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all snapshots that have any combination +// of those values are returned. Only snapshots that you own are returned in +// the response; shared snapshots are not returned with the tag key and tag +// value request parameters. +// +// If both tag keys and values are omitted from the request, snapshots are +// returned regardless of whether they have tag keys or values associated with +// them. +func (c *Redshift) DescribeClusterSnapshots(input *DescribeClusterSnapshotsInput) (*DescribeClusterSnapshotsOutput, error) { + req, out := c.DescribeClusterSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterSnapshotsPages(input *DescribeClusterSnapshotsInput, fn func(p *DescribeClusterSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSnapshotsOutput), lastPage) + }) +} + +const opDescribeClusterSubnetGroups = "DescribeClusterSubnetGroups" + +// DescribeClusterSubnetGroupsRequest generates a request for the DescribeClusterSubnetGroups operation. +func (c *Redshift) DescribeClusterSubnetGroupsRequest(input *DescribeClusterSubnetGroupsInput) (req *request.Request, output *DescribeClusterSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSubnetGroupsOutput{} + req.Data = output + return +} + +// Returns one or more cluster subnet group objects, which contain metadata +// about your cluster subnet groups. By default, this operation returns information +// about all cluster subnet groups that are defined in you AWS account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all subnet groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all subnet groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, subnet groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterSubnetGroups(input *DescribeClusterSubnetGroupsInput) (*DescribeClusterSubnetGroupsOutput, error) { + req, out := c.DescribeClusterSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterSubnetGroupsPages(input *DescribeClusterSubnetGroupsInput, fn func(p *DescribeClusterSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeClusterVersions = "DescribeClusterVersions" + +// DescribeClusterVersionsRequest generates a request for the DescribeClusterVersions operation. +func (c *Redshift) DescribeClusterVersionsRequest(input *DescribeClusterVersionsInput) (req *request.Request, output *DescribeClusterVersionsOutput) { + op := &request.Operation{ + Name: opDescribeClusterVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterVersionsOutput{} + req.Data = output + return +} + +// Returns descriptions of the available Amazon Redshift cluster versions. You +// can call this operation even before creating any clusters to learn more about +// the Amazon Redshift versions. For more information about managing clusters, +// go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) DescribeClusterVersions(input *DescribeClusterVersionsInput) (*DescribeClusterVersionsOutput, error) { + req, out := c.DescribeClusterVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterVersionsPages(input *DescribeClusterVersionsInput, fn func(p *DescribeClusterVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterVersionsOutput), lastPage) + }) +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a request for the DescribeClusters operation. +func (c *Redshift) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClustersOutput{} + req.Data = output + return +} + +// Returns properties of provisioned clusters including general cluster properties, +// cluster database properties, maintenance and backup properties, and security +// and access properties. This operation supports pagination. For more information +// about managing clusters, go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all clusters that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all clusters that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, clusters are returned +// regardless of whether they have tag keys or values associated with them. +func (c *Redshift) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClustersPages(input *DescribeClustersInput, fn func(p *DescribeClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClustersOutput), lastPage) + }) +} + +const opDescribeDefaultClusterParameters = "DescribeDefaultClusterParameters" + +// DescribeDefaultClusterParametersRequest generates a request for the DescribeDefaultClusterParameters operation. +func (c *Redshift) DescribeDefaultClusterParametersRequest(input *DescribeDefaultClusterParametersInput) (req *request.Request, output *DescribeDefaultClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeDefaultClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"DefaultClusterParameters.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDefaultClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDefaultClusterParametersOutput{} + req.Data = output + return +} + +// Returns a list of parameter settings for the specified parameter group family. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeDefaultClusterParameters(input *DescribeDefaultClusterParametersInput) (*DescribeDefaultClusterParametersOutput, error) { + req, out := c.DescribeDefaultClusterParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeDefaultClusterParametersPages(input *DescribeDefaultClusterParametersInput, fn func(p *DescribeDefaultClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDefaultClusterParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDefaultClusterParametersOutput), lastPage) + }) +} + +const opDescribeEventCategories = "DescribeEventCategories" + +// DescribeEventCategoriesRequest generates a request for the DescribeEventCategories operation. +func (c *Redshift) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { + op := &request.Operation{ + Name: opDescribeEventCategories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventCategoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventCategoriesOutput{} + req.Data = output + return +} + +// Displays a list of event categories for all event source types, or for a +// specified source type. For a list of the event categories and source types, +// go to Amazon Redshift Event Notifications (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html). +func (c *Redshift) DescribeEventCategories(input *DescribeEventCategoriesInput) (*DescribeEventCategoriesOutput, error) { + req, out := c.DescribeEventCategoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEventSubscriptions = "DescribeEventSubscriptions" + +// DescribeEventSubscriptionsRequest generates a request for the DescribeEventSubscriptions operation. +func (c *Redshift) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeEventSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventSubscriptionsOutput{} + req.Data = output + return +} + +// Lists descriptions of all the Amazon Redshift event notifications subscription +// for a customer account. If you specify a subscription name, lists the description +// for that subscription. +func (c *Redshift) DescribeEventSubscriptions(input *DescribeEventSubscriptionsInput) (*DescribeEventSubscriptionsOutput, error) { + req, out := c.DescribeEventSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventSubscriptionsOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a request for the DescribeEvents operation. +func (c *Redshift) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns events related to clusters, security groups, snapshots, and parameter +// groups for the past 14 days. Events specific to a particular cluster, security +// group, snapshot or parameter group can be obtained by providing the name +// as a parameter. By default, the past hour of events are returned. +func (c *Redshift) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeHsmClientCertificates = "DescribeHsmClientCertificates" + +// DescribeHsmClientCertificatesRequest generates a request for the DescribeHsmClientCertificates operation. +func (c *Redshift) DescribeHsmClientCertificatesRequest(input *DescribeHsmClientCertificatesInput) (req *request.Request, output *DescribeHsmClientCertificatesOutput) { + op := &request.Operation{ + Name: opDescribeHsmClientCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHsmClientCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmClientCertificatesOutput{} + req.Data = output + return +} + +// Returns information about the specified HSM client certificate. If no certificate +// ID is specified, returns information about all the HSM certificates owned +// by your AWS customer account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all HSM client certificates that match any combination of +// the specified keys and values. For example, if you have owner and environment +// for tag keys, and admin and test for tag values, all HSM client certificates +// that have any combination of those values are returned. +// +// If both tag keys and values are omitted from the request, HSM client certificates +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeHsmClientCertificates(input *DescribeHsmClientCertificatesInput) (*DescribeHsmClientCertificatesOutput, error) { + req, out := c.DescribeHsmClientCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeHsmClientCertificatesPages(input *DescribeHsmClientCertificatesInput, fn func(p *DescribeHsmClientCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeHsmClientCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeHsmClientCertificatesOutput), lastPage) + }) +} + +const opDescribeHsmConfigurations = "DescribeHsmConfigurations" + +// DescribeHsmConfigurationsRequest generates a request for the DescribeHsmConfigurations operation. +func (c *Redshift) DescribeHsmConfigurationsRequest(input *DescribeHsmConfigurationsInput) (req *request.Request, output *DescribeHsmConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeHsmConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHsmConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmConfigurationsOutput{} + req.Data = output + return +} + +// Returns information about the specified Amazon Redshift HSM configuration. +// If no configuration ID is specified, returns information about all the HSM +// configurations owned by your AWS customer account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all HSM connections that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all HSM connections that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, HSM connections +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeHsmConfigurations(input *DescribeHsmConfigurationsInput) (*DescribeHsmConfigurationsOutput, error) { + req, out := c.DescribeHsmConfigurationsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeHsmConfigurationsPages(input *DescribeHsmConfigurationsInput, fn func(p *DescribeHsmConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeHsmConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeHsmConfigurationsOutput), lastPage) + }) +} + +const opDescribeLoggingStatus = "DescribeLoggingStatus" + +// DescribeLoggingStatusRequest generates a request for the DescribeLoggingStatus operation. +func (c *Redshift) DescribeLoggingStatusRequest(input *DescribeLoggingStatusInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opDescribeLoggingStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoggingStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Describes whether information, such as queries and connection attempts, is +// being logged for the specified Amazon Redshift cluster. +func (c *Redshift) DescribeLoggingStatus(input *DescribeLoggingStatusInput) (*LoggingStatus, error) { + req, out := c.DescribeLoggingStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeOrderableClusterOptions = "DescribeOrderableClusterOptions" + +// DescribeOrderableClusterOptionsRequest generates a request for the DescribeOrderableClusterOptions operation. +func (c *Redshift) DescribeOrderableClusterOptionsRequest(input *DescribeOrderableClusterOptionsInput) (req *request.Request, output *DescribeOrderableClusterOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOrderableClusterOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOrderableClusterOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOrderableClusterOptionsOutput{} + req.Data = output + return +} + +// Returns a list of orderable cluster options. Before you create a new cluster +// you can use this operation to find what options are available, such as the +// EC2 Availability Zones (AZ) in the specific AWS region that you can specify, +// and the node types you can request. The node types differ by available storage, +// memory, CPU and price. With the cost involved you might want to obtain a +// list of cluster options in the specific region and specify values when creating +// a cluster. For more information about managing clusters, go to Amazon Redshift +// Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) DescribeOrderableClusterOptions(input *DescribeOrderableClusterOptionsInput) (*DescribeOrderableClusterOptionsOutput, error) { + req, out := c.DescribeOrderableClusterOptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeOrderableClusterOptionsPages(input *DescribeOrderableClusterOptionsInput, fn func(p *DescribeOrderableClusterOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOrderableClusterOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOrderableClusterOptionsOutput), lastPage) + }) +} + +const opDescribeReservedNodeOfferings = "DescribeReservedNodeOfferings" + +// DescribeReservedNodeOfferingsRequest generates a request for the DescribeReservedNodeOfferings operation. +func (c *Redshift) DescribeReservedNodeOfferingsRequest(input *DescribeReservedNodeOfferingsInput) (req *request.Request, output *DescribeReservedNodeOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedNodeOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedNodeOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedNodeOfferingsOutput{} + req.Data = output + return +} + +// Returns a list of the available reserved node offerings by Amazon Redshift +// with their descriptions including the node type, the fixed and recurring +// costs of reserving the node and duration the node will be reserved for you. +// These descriptions help you determine which reserve node offering you want +// to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering +// to reserve one or more nodes for your Amazon Redshift cluster. +// +// For more information about reserved node offerings, go to Purchasing Reserved +// Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeReservedNodeOfferings(input *DescribeReservedNodeOfferingsInput) (*DescribeReservedNodeOfferingsOutput, error) { + req, out := c.DescribeReservedNodeOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeReservedNodeOfferingsPages(input *DescribeReservedNodeOfferingsInput, fn func(p *DescribeReservedNodeOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedNodeOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedNodeOfferingsOutput), lastPage) + }) +} + +const opDescribeReservedNodes = "DescribeReservedNodes" + +// DescribeReservedNodesRequest generates a request for the DescribeReservedNodes operation. +func (c *Redshift) DescribeReservedNodesRequest(input *DescribeReservedNodesInput) (req *request.Request, output *DescribeReservedNodesOutput) { + op := &request.Operation{ + Name: opDescribeReservedNodes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedNodesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedNodesOutput{} + req.Data = output + return +} + +// Returns the descriptions of the reserved nodes. +func (c *Redshift) DescribeReservedNodes(input *DescribeReservedNodesInput) (*DescribeReservedNodesOutput, error) { + req, out := c.DescribeReservedNodesRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeReservedNodesPages(input *DescribeReservedNodesInput, fn func(p *DescribeReservedNodesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedNodesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedNodesOutput), lastPage) + }) +} + +const opDescribeResize = "DescribeResize" + +// DescribeResizeRequest generates a request for the DescribeResize operation. +func (c *Redshift) DescribeResizeRequest(input *DescribeResizeInput) (req *request.Request, output *DescribeResizeOutput) { + op := &request.Operation{ + Name: opDescribeResize, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResizeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeResizeOutput{} + req.Data = output + return +} + +// Returns information about the last resize operation for the specified cluster. +// If no resize operation has ever been initiated for the specified cluster, +// a HTTP 404 error is returned. If a resize operation was initiated and completed, +// the status of the resize remains as SUCCEEDED until the next resize. +// +// A resize operation can be requested using ModifyCluster and specifying +// a different number or type of nodes for the cluster. +func (c *Redshift) DescribeResize(input *DescribeResizeInput) (*DescribeResizeOutput, error) { + req, out := c.DescribeResizeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotCopyGrants = "DescribeSnapshotCopyGrants" + +// DescribeSnapshotCopyGrantsRequest generates a request for the DescribeSnapshotCopyGrants operation. +func (c *Redshift) DescribeSnapshotCopyGrantsRequest(input *DescribeSnapshotCopyGrantsInput) (req *request.Request, output *DescribeSnapshotCopyGrantsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotCopyGrants, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotCopyGrantsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotCopyGrantsOutput{} + req.Data = output + return +} + +// Returns a list of snapshot copy grants owned by the AWS account in the destination +// region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeSnapshotCopyGrants(input *DescribeSnapshotCopyGrantsInput) (*DescribeSnapshotCopyGrantsOutput, error) { + req, out := c.DescribeSnapshotCopyGrantsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *Redshift) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Returns a list of tags. You can return tags from a specific resource by specifying +// an ARN, or you can return all tags for a given type of resource, such as +// clusters, snapshots, and so on. +// +// The following are limitations for DescribeTags: You cannot specify an +// ARN and a resource-type value together in the same request. You cannot use +// the MaxRecords and Marker parameters together with the ARN parameter. The +// MaxRecords parameter can be a range from 10 to 50 results to return in a +// request. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all resources that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all resources that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, resources are +// returned regardless of whether they have tag keys or values associated with +// them. +func (c *Redshift) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opDisableLogging = "DisableLogging" + +// DisableLoggingRequest generates a request for the DisableLogging operation. +func (c *Redshift) DisableLoggingRequest(input *DisableLoggingInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opDisableLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Stops logging information, such as queries and connection attempts, for the +// specified Amazon Redshift cluster. +func (c *Redshift) DisableLogging(input *DisableLoggingInput) (*LoggingStatus, error) { + req, out := c.DisableLoggingRequest(input) + err := req.Send() + return out, err +} + +const opDisableSnapshotCopy = "DisableSnapshotCopy" + +// DisableSnapshotCopyRequest generates a request for the DisableSnapshotCopy operation. +func (c *Redshift) DisableSnapshotCopyRequest(input *DisableSnapshotCopyInput) (req *request.Request, output *DisableSnapshotCopyOutput) { + op := &request.Operation{ + Name: opDisableSnapshotCopy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableSnapshotCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableSnapshotCopyOutput{} + req.Data = output + return +} + +// Disables the automatic copying of snapshots from one region to another region +// for a specified cluster. +// +// If your cluster and its snapshots are encrypted using a customer master +// key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that +// grants Amazon Redshift permission to the CMK in the destination region. +func (c *Redshift) DisableSnapshotCopy(input *DisableSnapshotCopyInput) (*DisableSnapshotCopyOutput, error) { + req, out := c.DisableSnapshotCopyRequest(input) + err := req.Send() + return out, err +} + +const opEnableLogging = "EnableLogging" + +// EnableLoggingRequest generates a request for the EnableLogging operation. +func (c *Redshift) EnableLoggingRequest(input *EnableLoggingInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opEnableLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Starts logging information, such as queries and connection attempts, for +// the specified Amazon Redshift cluster. +func (c *Redshift) EnableLogging(input *EnableLoggingInput) (*LoggingStatus, error) { + req, out := c.EnableLoggingRequest(input) + err := req.Send() + return out, err +} + +const opEnableSnapshotCopy = "EnableSnapshotCopy" + +// EnableSnapshotCopyRequest generates a request for the EnableSnapshotCopy operation. +func (c *Redshift) EnableSnapshotCopyRequest(input *EnableSnapshotCopyInput) (req *request.Request, output *EnableSnapshotCopyOutput) { + op := &request.Operation{ + Name: opEnableSnapshotCopy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableSnapshotCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableSnapshotCopyOutput{} + req.Data = output + return +} + +// Enables the automatic copy of snapshots from one region to another region +// for a specified cluster. +func (c *Redshift) EnableSnapshotCopy(input *EnableSnapshotCopyInput) (*EnableSnapshotCopyOutput, error) { + req, out := c.EnableSnapshotCopyRequest(input) + err := req.Send() + return out, err +} + +const opModifyCluster = "ModifyCluster" + +// ModifyClusterRequest generates a request for the ModifyCluster operation. +func (c *Redshift) ModifyClusterRequest(input *ModifyClusterInput) (req *request.Request, output *ModifyClusterOutput) { + op := &request.Operation{ + Name: opModifyCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyClusterOutput{} + req.Data = output + return +} + +// Modifies the settings for a cluster. For example, you can add another security +// or parameter group, update the preferred maintenance window, or change the +// master user password. Resetting a cluster password or modifying the security +// groups associated with a cluster do not need a reboot. However, modifying +// a parameter group requires a reboot for parameters to take effect. For more +// information about managing clusters, go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// You can also change node type and the number of nodes to scale up or down +// the cluster. When resizing a cluster, you must specify both the number of +// nodes and the node type even if one of the parameters does not change. +func (c *Redshift) ModifyCluster(input *ModifyClusterInput) (*ModifyClusterOutput, error) { + req, out := c.ModifyClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyClusterParameterGroup = "ModifyClusterParameterGroup" + +// ModifyClusterParameterGroupRequest generates a request for the ModifyClusterParameterGroup operation. +func (c *Redshift) ModifyClusterParameterGroupRequest(input *ModifyClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a parameter group. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) ModifyClusterParameterGroup(input *ModifyClusterParameterGroupInput) (*ClusterParameterGroupNameMessage, error) { + req, out := c.ModifyClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyClusterSubnetGroup = "ModifyClusterSubnetGroup" + +// ModifyClusterSubnetGroupRequest generates a request for the ModifyClusterSubnetGroup operation. +func (c *Redshift) ModifyClusterSubnetGroupRequest(input *ModifyClusterSubnetGroupInput) (req *request.Request, output *ModifyClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Modifies a cluster subnet group to include the specified list of VPC subnets. +// The operation replaces the existing list of subnets with the new list of +// subnets. +func (c *Redshift) ModifyClusterSubnetGroup(input *ModifyClusterSubnetGroupInput) (*ModifyClusterSubnetGroupOutput, error) { + req, out := c.ModifyClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyEventSubscription = "ModifyEventSubscription" + +// ModifyEventSubscriptionRequest generates a request for the ModifyEventSubscription operation. +func (c *Redshift) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { + op := &request.Operation{ + Name: opModifyEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyEventSubscriptionOutput{} + req.Data = output + return +} + +// Modifies an existing Amazon Redshift event notification subscription. +func (c *Redshift) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { + req, out := c.ModifyEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opModifySnapshotCopyRetentionPeriod = "ModifySnapshotCopyRetentionPeriod" + +// ModifySnapshotCopyRetentionPeriodRequest generates a request for the ModifySnapshotCopyRetentionPeriod operation. +func (c *Redshift) ModifySnapshotCopyRetentionPeriodRequest(input *ModifySnapshotCopyRetentionPeriodInput) (req *request.Request, output *ModifySnapshotCopyRetentionPeriodOutput) { + op := &request.Operation{ + Name: opModifySnapshotCopyRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotCopyRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySnapshotCopyRetentionPeriodOutput{} + req.Data = output + return +} + +// Modifies the number of days to retain automated snapshots in the destination +// region after they are copied from the source region. +func (c *Redshift) ModifySnapshotCopyRetentionPeriod(input *ModifySnapshotCopyRetentionPeriodInput) (*ModifySnapshotCopyRetentionPeriodOutput, error) { + req, out := c.ModifySnapshotCopyRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedNodeOffering = "PurchaseReservedNodeOffering" + +// PurchaseReservedNodeOfferingRequest generates a request for the PurchaseReservedNodeOffering operation. +func (c *Redshift) PurchaseReservedNodeOfferingRequest(input *PurchaseReservedNodeOfferingInput) (req *request.Request, output *PurchaseReservedNodeOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedNodeOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedNodeOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedNodeOfferingOutput{} + req.Data = output + return +} + +// Allows you to purchase reserved nodes. Amazon Redshift offers a predefined +// set of reserved node offerings. You can purchase one or more of the offerings. +// You can call the DescribeReservedNodeOfferings API to obtain the available +// reserved node offerings. You can call this API by providing a specific reserved +// node offering and the number of nodes you want to reserve. +// +// For more information about reserved node offerings, go to Purchasing Reserved +// Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) PurchaseReservedNodeOffering(input *PurchaseReservedNodeOfferingInput) (*PurchaseReservedNodeOfferingOutput, error) { + req, out := c.PurchaseReservedNodeOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootCluster = "RebootCluster" + +// RebootClusterRequest generates a request for the RebootCluster operation. +func (c *Redshift) RebootClusterRequest(input *RebootClusterInput) (req *request.Request, output *RebootClusterOutput) { + op := &request.Operation{ + Name: opRebootCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootClusterOutput{} + req.Data = output + return +} + +// Reboots a cluster. This action is taken as soon as possible. It results in +// a momentary outage to the cluster, during which the cluster status is set +// to rebooting. A cluster event is created when the reboot is completed. Any +// pending cluster modifications (see ModifyCluster) are applied at this reboot. +// For more information about managing clusters, go to Amazon Redshift Clusters +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) RebootCluster(input *RebootClusterInput) (*RebootClusterOutput, error) { + req, out := c.RebootClusterRequest(input) + err := req.Send() + return out, err +} + +const opResetClusterParameterGroup = "ResetClusterParameterGroup" + +// ResetClusterParameterGroupRequest generates a request for the ResetClusterParameterGroup operation. +func (c *Redshift) ResetClusterParameterGroupRequest(input *ResetClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Sets one or more parameters of the specified parameter group to their default +// values and sets the source values of the parameters to "engine-default". +// To reset the entire parameter group specify the ResetAllParameters parameter. +// For parameter changes to take effect you must reboot any associated clusters. +func (c *Redshift) ResetClusterParameterGroup(input *ResetClusterParameterGroupInput) (*ClusterParameterGroupNameMessage, error) { + req, out := c.ResetClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRestoreFromClusterSnapshot = "RestoreFromClusterSnapshot" + +// RestoreFromClusterSnapshotRequest generates a request for the RestoreFromClusterSnapshot operation. +func (c *Redshift) RestoreFromClusterSnapshotRequest(input *RestoreFromClusterSnapshotInput) (req *request.Request, output *RestoreFromClusterSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreFromClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreFromClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreFromClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a new cluster from a snapshot. By default, Amazon Redshift creates +// the resulting cluster with the same configuration as the original cluster +// from which the snapshot was created, except that the new cluster is created +// with the default cluster security and parameter groups. After Amazon Redshift +// creates the cluster, you can use the ModifyCluster API to associate a different +// security group and different parameter group with the restored cluster. If +// you are using a DS node type, you can also choose to change to another DS +// node type of the same size during restore. +// +// If you restore a cluster into a VPC, you must provide a cluster subnet +// group where you want the cluster restored. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RestoreFromClusterSnapshot(input *RestoreFromClusterSnapshotInput) (*RestoreFromClusterSnapshotOutput, error) { + req, out := c.RestoreFromClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRevokeClusterSecurityGroupIngress = "RevokeClusterSecurityGroupIngress" + +// RevokeClusterSecurityGroupIngressRequest generates a request for the RevokeClusterSecurityGroupIngress operation. +func (c *Redshift) RevokeClusterSecurityGroupIngressRequest(input *RevokeClusterSecurityGroupIngressInput) (req *request.Request, output *RevokeClusterSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeClusterSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeClusterSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeClusterSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Revokes an ingress rule in an Amazon Redshift security group for a previously +// authorized IP range or Amazon EC2 security group. To add an ingress rule, +// see AuthorizeClusterSecurityGroupIngress. For information about managing +// security groups, go to Amazon Redshift Cluster Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RevokeClusterSecurityGroupIngress(input *RevokeClusterSecurityGroupIngressInput) (*RevokeClusterSecurityGroupIngressOutput, error) { + req, out := c.RevokeClusterSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSnapshotAccess = "RevokeSnapshotAccess" + +// RevokeSnapshotAccessRequest generates a request for the RevokeSnapshotAccess operation. +func (c *Redshift) RevokeSnapshotAccessRequest(input *RevokeSnapshotAccessInput) (req *request.Request, output *RevokeSnapshotAccessOutput) { + op := &request.Operation{ + Name: opRevokeSnapshotAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSnapshotAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeSnapshotAccessOutput{} + req.Data = output + return +} + +// Removes the ability of the specified AWS customer account to restore the +// specified snapshot. If the account is currently restoring the snapshot, the +// restore will run to completion. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RevokeSnapshotAccess(input *RevokeSnapshotAccessInput) (*RevokeSnapshotAccessOutput, error) { + req, out := c.RevokeSnapshotAccessRequest(input) + err := req.Send() + return out, err +} + +const opRotateEncryptionKey = "RotateEncryptionKey" + +// RotateEncryptionKeyRequest generates a request for the RotateEncryptionKey operation. +func (c *Redshift) RotateEncryptionKeyRequest(input *RotateEncryptionKeyInput) (req *request.Request, output *RotateEncryptionKeyOutput) { + op := &request.Operation{ + Name: opRotateEncryptionKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RotateEncryptionKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &RotateEncryptionKeyOutput{} + req.Data = output + return +} + +// Rotates the encryption keys for a cluster. +func (c *Redshift) RotateEncryptionKey(input *RotateEncryptionKeyInput) (*RotateEncryptionKeyOutput, error) { + req, out := c.RotateEncryptionKeyRequest(input) + err := req.Send() + return out, err +} + +// Describes an AWS customer account authorized to restore a snapshot. +type AccountWithRestoreAccess struct { + _ struct{} `type:"structure"` + + // The identifier of an AWS customer account authorized to restore a snapshot. + AccountId *string `type:"string"` +} + +// String returns the string representation +func (s AccountWithRestoreAccess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountWithRestoreAccess) GoString() string { + return s.String() +} + +// ??? +type AuthorizeClusterSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to be added the Amazon Redshift security group. + CIDRIP *string `type:"string"` + + // The name of the security group to which the ingress rule is added. + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // The EC2 security group to be added the Amazon Redshift security group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account number of the owner of the security group specified by the + // EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. + // + // Example: 111122223333 + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s AuthorizeClusterSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClusterSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeClusterSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeClusterSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClusterSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +type AuthorizeSnapshotAccessInput struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS customer account authorized to restore the specified + // snapshot. + AccountWithRestoreAccess *string `type:"string" required:"true"` + + // The identifier of the cluster the snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The identifier of the snapshot the account is authorized to restore. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeSnapshotAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSnapshotAccessInput) GoString() string { + return s.String() +} + +type AuthorizeSnapshotAccessOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSnapshotAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSnapshotAccessOutput) GoString() string { + return s.String() +} + +// Describes an availability zone. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the availability zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Describes a cluster. +type Cluster struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades will be applied automatically to the cluster + // during the maintenance window. + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automatic cluster snapshots are retained. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The name of the Availability Zone in which the cluster is located. + AvailabilityZone *string `type:"string"` + + // The date and time that the cluster was created. + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unique identifier of the cluster. + ClusterIdentifier *string `type:"string"` + + // The nodes in a cluster. + ClusterNodes []*ClusterNode `type:"list"` + + // The list of cluster parameter groups that are associated with this cluster. + // Each parameter group in the list is returned with its status. + ClusterParameterGroups []*ClusterParameterGroupStatus `locationNameList:"ClusterParameterGroup" type:"list"` + + // The public key for the cluster. + ClusterPublicKey *string `type:"string"` + + // The specific revision number of the database in the cluster. + ClusterRevisionNumber *string `type:"string"` + + // A list of cluster security group that are associated with the cluster. Each + // security group is represented by an element that contains ClusterSecurityGroup.Name + // and ClusterSecurityGroup.Status subelements. + // + // Cluster security groups are used when the cluster is not created in a VPC. + // Clusters that are created in a VPC use VPC security groups, which are listed + // by the VpcSecurityGroups parameter. + ClusterSecurityGroups []*ClusterSecurityGroupMembership `locationNameList:"ClusterSecurityGroup" type:"list"` + + // Returns the destination region and retention period that are configured for + // cross-region snapshot copy. + ClusterSnapshotCopyStatus *ClusterSnapshotCopyStatus `type:"structure"` + + // The current state of this cluster. Possible values include available, creating, + // deleting, rebooting, renaming, and resizing. + ClusterStatus *string `type:"string"` + + // The name of the subnet group that is associated with the cluster. This parameter + // is valid only when the cluster is in a VPC. + ClusterSubnetGroupName *string `type:"string"` + + // The version ID of the Amazon Redshift engine that is running on the cluster. + ClusterVersion *string `type:"string"` + + // The name of the initial database that was created when the cluster was created. + // This same name is returned for the life of the cluster. If an initial database + // was not specified, a database named "dev" was created by default. + DBName *string `type:"string"` + + // Describes the status of the elastic IP (EIP) address. + ElasticIpStatus *ElasticIpStatus `type:"structure"` + + // If true, data in the cluster is encrypted at rest. + Encrypted *bool `type:"boolean"` + + // The connection endpoint. + Endpoint *Endpoint `type:"structure"` + + // Reports whether the Amazon Redshift cluster has finished applying any HSM + // settings changes specified in a modify cluster command. + // + // Values: active, applying + HsmStatus *HsmStatus `type:"structure"` + + // The AWS Key Management Service (KMS) key ID of the encryption key used to + // encrypt data in the cluster. + KmsKeyId *string `type:"string"` + + // The master user name for the cluster. This name is used to connect to the + // database that is specified in DBName. + MasterUsername *string `type:"string"` + + // The status of a modify operation, if any, initiated for the cluster. + ModifyStatus *string `type:"string"` + + // The node type for the nodes in the cluster. + NodeType *string `type:"string"` + + // The number of compute nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // If present, changes to the cluster are pending. Specific pending changes + // are identified by subelements. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // The weekly time range (in UTC) during which system maintenance can occur. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // Describes the status of a cluster restore action. Returns null if the cluster + // was not created by restoring a snapshot. + RestoreStatus *RestoreStatus `type:"structure"` + + // The list of tags for the cluster. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the VPC the cluster is in, if the cluster is in a VPC. + VpcId *string `type:"string"` + + // A list of Virtual Private Cloud (VPC) security groups that are associated + // with the cluster. This parameter is returned only if the cluster is in a + // VPC. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroup" type:"list"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// The identifier of a node in a cluster. +type ClusterNode struct { + _ struct{} `type:"structure"` + + // Whether the node is a leader node or a compute node. + NodeRole *string `type:"string"` + + // The private IP address of a node within a cluster. + PrivateIPAddress *string `type:"string"` + + // The public IP address of a node within a cluster. + PublicIPAddress *string `type:"string"` +} + +// String returns the string representation +func (s ClusterNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterNode) GoString() string { + return s.String() +} + +// Describes a parameter group. +type ClusterParameterGroup struct { + _ struct{} `type:"structure"` + + // The description of the parameter group. + Description *string `type:"string"` + + // The name of the cluster parameter group family that this cluster parameter + // group is compatible with. + ParameterGroupFamily *string `type:"string"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` + + // The list of tags for the cluster parameter group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ClusterParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroup) GoString() string { + return s.String() +} + +// Contains the output from the ModifyClusterParameterGroup and ResetClusterParameterGroup +// actions and indicate the parameter group involved and the status of the operation +// on the parameter group. +type ClusterParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` + + // The status of the parameter group. For example, if you made a change to a + // parameter group name-value pair, then the change could be pending a reboot + // of an associated cluster. + ParameterGroupStatus *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroupNameMessage) GoString() string { + return s.String() +} + +// Describes the status of a parameter group. +type ClusterParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // The list of parameter statuses. + // + // For more information about parameters and parameter groups, go to Amazon + // Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // in the Amazon Redshift Cluster Management Guide. + ClusterParameterStatusList []*ClusterParameterStatus `type:"list"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroupStatus) GoString() string { + return s.String() +} + +// Describes the status of a parameter group. +type ClusterParameterStatus struct { + _ struct{} `type:"structure"` + + // The error that prevented the parameter from being applied to the database. + ParameterApplyErrorDescription *string `type:"string"` + + // The status of the parameter that indicates whether the parameter is in sync + // with the database, waiting for a cluster reboot, or encountered an error + // when being applied. + // + // The following are possible statuses and descriptions. in-sync: The parameter + // value is in sync with the database. pending-reboot: The parameter value + // will be applied after the cluster reboots. applying: The parameter value + // is being applied to the database. invalid-parameter: Cannot apply the parameter + // value because it has an invalid value or syntax. apply-deferred: The parameter + // contains static property changes. The changes are deferred until the cluster + // reboots. apply-error: Cannot connect to the cluster. The parameter change + // will be applied after the cluster reboots. unknown-error: Cannot apply the + // parameter change right now. The change will be applied after the cluster + // reboots. + ParameterApplyStatus *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterStatus) GoString() string { + return s.String() +} + +// Describes a security group. +type ClusterSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group to which the operation was applied. + ClusterSecurityGroupName *string `type:"string"` + + // A description of the security group. + Description *string `type:"string"` + + // A list of EC2 security groups that are permitted to access clusters associated + // with this cluster security group. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // A list of IP ranges (CIDR blocks) that are permitted to access clusters associated + // with this cluster security group. + IPRanges []*IPRange `locationNameList:"IPRange" type:"list"` + + // The list of tags for the cluster security group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ClusterSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSecurityGroup) GoString() string { + return s.String() +} + +// Describes a security group. +type ClusterSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group. + ClusterSecurityGroupName *string `type:"string"` + + // The status of the cluster security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSecurityGroupMembership) GoString() string { + return s.String() +} + +// Returns the destination region and retention period that are configured for +// cross-region snapshot copy. +type ClusterSnapshotCopyStatus struct { + _ struct{} `type:"structure"` + + // The destination region that snapshots are automatically copied to when cross-region + // snapshot copy is enabled. + DestinationRegion *string `type:"string"` + + // The number of days that automated snapshots are retained in the destination + // region after they are copied from a source region. + RetentionPeriod *int64 `type:"long"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSnapshotCopyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSnapshotCopyStatus) GoString() string { + return s.String() +} + +// Describes a subnet group. +type ClusterSubnetGroup struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group. + ClusterSubnetGroupName *string `type:"string"` + + // The description of the cluster subnet group. + Description *string `type:"string"` + + // The status of the cluster subnet group. Possible values are Complete, Incomplete + // and Invalid. + SubnetGroupStatus *string `type:"string"` + + // A list of the VPC Subnet elements. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The list of tags for the cluster subnet group. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The VPC ID of the cluster subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSubnetGroup) GoString() string { + return s.String() +} + +// Describes a cluster version, including the parameter group family and description +// of the version. +type ClusterVersion struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group family for the cluster. + ClusterParameterGroupFamily *string `type:"string"` + + // The version number used by the cluster. + ClusterVersion *string `type:"string"` + + // The description of the cluster version. + Description *string `type:"string"` +} + +// String returns the string representation +func (s ClusterVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterVersion) GoString() string { + return s.String() +} + +type CopyClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster the source snapshot was created from. This + // parameter is required if your IAM user has a policy containing a snapshot + // resource element that specifies anything other than * for the cluster name. + // + // Constraints: + // + // Must be the identifier for a valid cluster. + SourceSnapshotClusterIdentifier *string `type:"string"` + + // The identifier for the source snapshot. + // + // Constraints: + // + // Must be the identifier for a valid automated snapshot whose state is available. + SourceSnapshotIdentifier *string `type:"string" required:"true"` + + // The identifier given to the new manual snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank. Must contain from 1 to 255 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. Must be unique for the AWS account + // that is making the request. + TargetSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyClusterSnapshotInput) GoString() string { + return s.String() +} + +type CopyClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades can be applied during the maintenance window + // to the Amazon Redshift engine that is running on the cluster. + // + // When a new major version of the Amazon Redshift engine is released, you + // can request that the service automatically apply upgrades during the maintenance + // window to the Amazon Redshift engine that is running on your cluster. + // + // Default: true + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // Default: 1 + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision + // the cluster. For example, if you have several EC2 instances running in a + // specific Availability Zone, then you might want the cluster to be provisioned + // in the same zone in order to decrease network latency. + // + // Default: A random, system-chosen Availability Zone in the region that is + // specified by the endpoint. + // + // Example: us-east-1d + // + // Constraint: The specified Availability Zone must be in the same region + // as the current endpoint. + AvailabilityZone *string `type:"string"` + + // A unique identifier for the cluster. You use this identifier to refer to + // the cluster for any subsequent cluster operations such as deleting or modifying. + // The identifier also appears in the Amazon Redshift console. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. Example: myexamplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the parameter group to be associated with this cluster. + // + // Default: The default Amazon Redshift cluster parameter group. For information + // about the default parameter group, go to Working with Amazon Redshift Parameter + // Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens. First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + ClusterParameterGroupName *string `type:"string"` + + // A list of security groups to be associated with this cluster. + // + // Default: The default cluster security group for Amazon Redshift. + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The name of a cluster subnet group to be associated with this cluster. + // + // If this parameter is not provided the resulting cluster will be deployed + // outside virtual private cloud (VPC). + ClusterSubnetGroupName *string `type:"string"` + + // The type of the cluster. When cluster type is specified as single-node, + // the NumberOfNodes parameter is not required. multi-node, the NumberOfNodes + // parameter is required. + // + // Valid Values: multi-node | single-node + // + // Default: multi-node + ClusterType *string `type:"string"` + + // The version of the Amazon Redshift engine software that you want to deploy + // on the cluster. + // + // The version selected runs on all the nodes in the cluster. + // + // Constraints: Only version 1.0 is currently available. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // The name of the first database to be created when the cluster is created. + // + // To create additional databases after the cluster is created, connect to + // the cluster with a SQL client and use SQL commands to create a database. + // For more information, go to Create a Database (http://docs.aws.amazon.com/redshift/latest/dg/t_creating_database.html) + // in the Amazon Redshift Database Developer Guide. + // + // Default: dev + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters. Must contain only lowercase + // letters. Cannot be a word that is reserved by the service. A list of reserved + // words can be found in Reserved Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + DBName *string `type:"string"` + + // The Elastic IP (EIP) address for the cluster. + // + // Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible + // through an Internet gateway. For more information about provisioning clusters + // in EC2-VPC, go to Supported Platforms to Launch Your Cluster (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms) + // in the Amazon Redshift Cluster Management Guide. + ElasticIp *string `type:"string"` + + // If true, the data in the cluster is encrypted at rest. + // + // Default: false + Encrypted *bool `type:"boolean"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that you + // want to use to encrypt data in the cluster. + KmsKeyId *string `type:"string"` + + // The password associated with the master user account for the cluster that + // is being created. + // + // Constraints: + // + // Must be between 8 and 64 characters in length. Must contain at least one + // uppercase letter. Must contain at least one lowercase letter. Must contain + // one number. Can be any printable ASCII character (ASCII code 33 to 126) except + // ' (single quote), " (double quote), \, /, @, or space. + MasterUserPassword *string `type:"string" required:"true"` + + // The user name associated with the master user account for the cluster that + // is being created. + // + // Constraints: + // + // Must be 1 - 128 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word. A list of reserved words can be found in Reserved + // Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + MasterUsername *string `type:"string" required:"true"` + + // The node type to be provisioned for the cluster. For information about node + // types, go to Working with Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) + // in the Amazon Redshift Cluster Management Guide. + // + // Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large + // | dc1.8xlarge. + NodeType *string `type:"string" required:"true"` + + // The number of compute nodes in the cluster. This parameter is required when + // the ClusterType parameter is specified as multi-node. + // + // For information about determining how many nodes you need, go to Working + // with Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) + // in the Amazon Redshift Cluster Management Guide. + // + // If you don't specify this parameter, you get a single-node cluster. When + // requesting a multi-node cluster, you must specify the number of nodes that + // you want in the cluster. + // + // Default: 1 + // + // Constraints: Value must be at least 1 and no more than 100. + NumberOfNodes *int64 `type:"integer"` + + // The port number on which the cluster accepts incoming connections. + // + // The cluster is accessible only via the JDBC and ODBC connection strings. + // Part of the connection string requires the port on which the cluster will + // listen for incoming connections. + // + // Default: 5439 + // + // Valid Values: 1150-65535 + Port *int64 `type:"integer"` + + // The weekly time range (in UTC) during which automated cluster maintenance + // can occur. + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region, occurring on a random day of the week. For more information + // about the time blocks for each region, see Maintenance Windows (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) + // in Amazon Redshift Cluster Management Guide. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with + // the cluster. + // + // Default: The default VPC security group is associated with the cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +type CreateClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // A description of the parameter group. + Description *string `type:"string" required:"true"` + + // The Amazon Redshift engine version to which the cluster parameter group applies. + // The cluster engine version determines the set of parameters. + // + // To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. + // By default, Amazon Redshift returns a list of all the parameter groups that + // are owned by your AWS account, including the default parameter groups for + // each Amazon Redshift engine version. The parameter group family names associated + // with the default parameter groups provide you the valid values. For example, + // a valid family name is "redshift-1.0". + ParameterGroupFamily *string `type:"string" required:"true"` + + // The name of the cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Must be unique withing your AWS account. This value is stored as a lower-case + // string. + ParameterGroupName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterParameterGroupInput) GoString() string { + return s.String() +} + +type CreateClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a parameter group. + ClusterParameterGroup *ClusterParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterParameterGroupOutput) GoString() string { + return s.String() +} + +// ??? +type CreateClusterSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name for the security group. Amazon Redshift stores the value as a lowercase + // string. + // + // Constraints: + // + // Must contain no more than 255 alphanumeric characters or hyphens. Must + // not be "Default". Must be unique for all security groups that are created + // by your AWS account. Example: examplesecuritygroup + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // A description for the security group. + Description *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateClusterSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier for which you want a snapshot. + ClusterIdentifier *string `type:"string" required:"true"` + + // A unique identifier for the snapshot that you are requesting. This identifier + // must be unique for all snapshots within the AWS account. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-snapshot-id + SnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSnapshotInput) GoString() string { + return s.String() +} + +type CreateClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name for the subnet group. Amazon Redshift stores the value as a lowercase + // string. + // + // Constraints: + // + // Must contain no more than 255 alphanumeric characters or hyphens. Must + // not be "Default". Must be unique for all subnet groups that are created by + // your AWS account. Example: examplesubnetgroup + ClusterSubnetGroupName *string `type:"string" required:"true"` + + // A description for the subnet group. + Description *string `type:"string" required:"true"` + + // An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a + // single request. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSubnetGroupInput) GoString() string { + return s.String() +} + +type CreateClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a subnet group. + ClusterSubnetGroup *ClusterSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription, set to false to + // create the subscription but not active it. + Enabled *bool `type:"boolean"` + + // Specifies the Amazon Redshift event categories to be published by the event + // notification subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Specifies the Amazon Redshift event severity to be published by the event + // notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit the + // event notifications. The ARN is created by Amazon SNS when you create a topic + // and subscribe to it. + SnsTopicArn *string `type:"string" required:"true"` + + // A list of one or more identifiers of Amazon Redshift source objects. All + // of the objects must be of the same type as was specified in the source type + // parameter. The event subscription will return only events generated by the + // specified objects. If not specified, then events are returned for all objects + // within the source type specified. + // + // Example: my-cluster-1, my-cluster-2 + // + // Example: my-snapshot-20131010 + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a cluster, you would set this + // parameter to cluster. If this value is not specified, events are returned + // for all Amazon Redshift objects in your AWS account. You must specify a source + // type in order to specify source IDs. + // + // Valid values: cluster, cluster-parameter-group, cluster-security-group, + // and cluster-snapshot. + SourceType *string `type:"string"` + + // The name of the event subscription to be created. + // + // Constraints: + // + // Cannot be null, empty, or blank. Must contain from 1 to 255 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. + SubscriptionName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionInput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s CreateEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateHsmClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier to be assigned to the new HSM client certificate that the + // cluster will use to connect to the HSM to use the database encryption keys. + HsmClientCertificateIdentifier *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateHsmClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmClientCertificateInput) GoString() string { + return s.String() +} + +type CreateHsmClientCertificateOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an HSM client certificate. The certificate is stored + // in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift + // cluster to encrypt data files. + HsmClientCertificate *HsmClientCertificate `type:"structure"` +} + +// String returns the string representation +func (s CreateHsmClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmClientCertificateOutput) GoString() string { + return s.String() +} + +type CreateHsmConfigurationInput struct { + _ struct{} `type:"structure"` + + // A text description of the HSM configuration to be created. + Description *string `type:"string" required:"true"` + + // The identifier to be assigned to the new Amazon Redshift HSM configuration. + HsmConfigurationIdentifier *string `type:"string" required:"true"` + + // The IP address that the Amazon Redshift cluster must use to access the HSM. + HsmIpAddress *string `type:"string" required:"true"` + + // The name of the partition in the HSM where the Amazon Redshift clusters will + // store their database encryption keys. + HsmPartitionName *string `type:"string" required:"true"` + + // The password required to access the HSM partition. + HsmPartitionPassword *string `type:"string" required:"true"` + + // The HSMs public certificate file. When using Cloud HSM, the file name is + // server.pem. + HsmServerPublicCertificate *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateHsmConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmConfigurationInput) GoString() string { + return s.String() +} + +type CreateHsmConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an HSM configuration, which is an object that describes + // to Amazon Redshift clusters the information they require to connect to an + // HSM where they can store database encryption keys. + HsmConfiguration *HsmConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateHsmConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmConfigurationOutput) GoString() string { + return s.String() +} + +// The result of the CreateSnapshotCopyGrant action. +type CreateSnapshotCopyGrantInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the customer master key (CMK) to which to grant + // Amazon Redshift permission. If no key is specified, the default key is used. + KmsKeyId *string `type:"string"` + + // The name of the snapshot copy grant. This name must be unique in the region + // for the AWS account. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. + SnapshotCopyGrantName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotCopyGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotCopyGrantInput) GoString() string { + return s.String() +} + +type CreateSnapshotCopyGrantOutput struct { + _ struct{} `type:"structure"` + + // The snapshot copy grant that grants Amazon Redshift permission to encrypt + // copied snapshots with the specified customer master key (CMK) from AWS KMS + // in the destination region. + // + // For more information about managing snapshot copy grants, go to Amazon + // Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) + // in the Amazon Redshift Cluster Management Guide. + SnapshotCopyGrant *SnapshotCopyGrant `type:"structure"` +} + +// String returns the string representation +func (s CreateSnapshotCopyGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotCopyGrantOutput) GoString() string { + return s.String() +} + +// Contains the output from the CreateTags action. +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) to which you want to add the tag or tags. + // For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string" required:"true"` + + // One or more name/value pairs to add as tags to the specified resource. Each + // tag name is passed in with the parameter Key and the corresponding value + // is passed in with the parameter Value. The Key and Value parameters are separated + // by a comma (,). Separate multiple tags with a space. For example, --tags + // "Key"="owner","Value"="admin" "Key"="environment","Value"="test" "Key"="version","Value"="1.0". + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +// Describes the default cluster parameters for a parameter group family. +type DefaultClusterParameters struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The name of the cluster parameter group family to which the engine default + // parameters apply. + ParameterGroupFamily *string `type:"string"` + + // The list of cluster default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DefaultClusterParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultClusterParameters) GoString() string { + return s.String() +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to be deleted. + // + // Constraints: + // + // Must contain lowercase characters. Must contain from 1 to 63 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. + ClusterIdentifier *string `type:"string" required:"true"` + + // The identifier of the final snapshot that is to be created immediately before + // deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot + // must be false. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters. First character must be a letter. + // Cannot end with a hyphen or contain two consecutive hyphens. + FinalClusterSnapshotIdentifier *string `type:"string"` + + // Determines whether a final snapshot of the cluster is created before Amazon + // Redshift deletes the cluster. If true, a final cluster snapshot is not created. + // If false, a final cluster snapshot is created before the cluster is deleted. + // + // The FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot + // is false. Default: false + SkipFinalClusterSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +type DeleteClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group to be deleted. + // + // Constraints: + // + // Must be the name of an existing cluster parameter group. Cannot delete + // a default cluster parameter group. + ParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteClusterSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group to be deleted. + ClusterSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteClusterSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster the snapshot was created from. This + // parameter is required if your IAM user has a policy containing a snapshot + // resource element that specifies anything other than * for the cluster name. + // + // Constraints: Must be the name of valid cluster. + SnapshotClusterIdentifier *string `type:"string"` + + // The unique identifier of the manual snapshot to be deleted. + // + // Constraints: Must be the name of an existing snapshot that is in the available + // state. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSnapshotInput) GoString() string { + return s.String() +} + +type DeleteClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group name to be deleted. + ClusterSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSubnetGroupInput) GoString() string { + return s.String() +} + +type DeleteClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Redshift event notification subscription to be deleted. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteHsmClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier of the HSM client certificate to be deleted. + HsmClientCertificateIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmClientCertificateInput) GoString() string { + return s.String() +} + +type DeleteHsmClientCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHsmClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmClientCertificateOutput) GoString() string { + return s.String() +} + +type DeleteHsmConfigurationInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Amazon Redshift HSM configuration to be deleted. + HsmConfigurationIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmConfigurationInput) GoString() string { + return s.String() +} + +type DeleteHsmConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHsmConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmConfigurationOutput) GoString() string { + return s.String() +} + +// The result of the DeleteSnapshotCopyGrant action. +type DeleteSnapshotCopyGrantInput struct { + _ struct{} `type:"structure"` + + // The name of the snapshot copy grant to delete. + SnapshotCopyGrantName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotCopyGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotCopyGrantInput) GoString() string { + return s.String() +} + +type DeleteSnapshotCopyGrantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotCopyGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotCopyGrantOutput) GoString() string { + return s.String() +} + +// Contains the output from the DeleteTags action. +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) from which you want to remove the tag or tags. + // For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string" required:"true"` + + // The tag key that you want to delete. + TagKeys []*string `locationNameList:"TagKey" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeClusterParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterParameterGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of a specific parameter group for which to return details. By default, + // details about all parameter groups and the default parameter group are returned. + ParameterGroupName *string `type:"string"` + + // A tag key or keys for which you want to return all matching cluster parameter + // groups that are associated with the specified key or keys. For example, suppose + // that you have parameter groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the parameter groups that have either or + // both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster parameter + // groups that are associated with the specified tag value or values. For example, + // suppose that you have parameter groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the parameter groups that have either or + // both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParameterGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterParameterGroups action. +type DescribeClusterParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of ClusterParameterGroup instances. Each instance describes one cluster + // parameter group. + ParameterGroups []*ClusterParameterGroup `locationNameList:"ClusterParameterGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterParametersInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterParameters request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of a cluster parameter group for which to return details. + ParameterGroupName *string `type:"string" required:"true"` + + // The parameter types to return. Specify user to show parameters that are different + // form the default. Similarly, specify engine-default to show parameters that + // are the same as the default parameter group. + // + // Default: All parameter types returned. + // + // Valid Values: user | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParametersInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterParameters action. +type DescribeClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of Parameter instances. Each instance lists the parameters of one + // cluster parameter group. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParametersOutput) GoString() string { + return s.String() +} + +// ??? +type DescribeClusterSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a cluster security group for which you are requesting details. + // You can specify either the Marker parameter or a ClusterSecurityGroupName + // parameter, but not both. + // + // Example: securitygroup1 + ClusterSecurityGroupName *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSecurityGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the ClusterSecurityGroupName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching cluster security + // groups that are associated with the specified key or keys. For example, suppose + // that you have security groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the security groups that have either or + // both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster security + // groups that are associated with the specified tag value or values. For example, + // suppose that you have security groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the security groups that have either or + // both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSecurityGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSecurityGroups action. +type DescribeClusterSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of ClusterSecurityGroup instances. + ClusterSecurityGroups []*ClusterSecurityGroup `locationNameList:"ClusterSecurityGroup" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which information about snapshots is requested. + ClusterIdentifier *string `type:"string"` + + // A time value that requests only snapshots created at or before the specified + // time. The time value is specified in ISO 8601 format. For more information + // about ISO 8601, go to the ISO8601 Wikipedia page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2012-07-16T18:00:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSnapshots request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The AWS customer account used to create or copy the snapshot. Use this field + // to filter the results to snapshots owned by a particular account. To describe + // snapshots you own, either specify your AWS customer account, or do not specify + // the parameter. + OwnerAccount *string `type:"string"` + + // The snapshot identifier of the snapshot about which to return information. + SnapshotIdentifier *string `type:"string"` + + // The type of snapshots for which you are requesting information. By default, + // snapshots of all types are returned. + // + // Valid Values: automated | manual + SnapshotType *string `type:"string"` + + // A value that requests only snapshots created at or after the specified time. + // The time value is specified in ISO 8601 format. For more information about + // ISO 8601, go to the ISO8601 Wikipedia page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2012-07-16T18:00:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A tag key or keys for which you want to return all matching cluster snapshots + // that are associated with the specified key or keys. For example, suppose + // that you have snapshots that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the snapshots that have either or both of these tag keys + // associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster snapshots + // that are associated with the specified tag value or values. For example, + // suppose that you have snapshots that are tagged with values called admin + // and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the snapshots that have either or both of + // these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSnapshots action. +type DescribeClusterSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of Snapshot instances. + Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeClusterSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group for which information is requested. + ClusterSubnetGroupName *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSubnetGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching cluster subnet + // groups that are associated with the specified key or keys. For example, suppose + // that you have subnet groups that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the subnet groups that have either or both of these tag keys + // associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster subnet + // groups that are associated with the specified tag value or values. For example, + // suppose that you have subnet groups that are tagged with values called admin + // and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the subnet groups that have either or both + // of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSubnetGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSubnetGroups action. +type DescribeClusterSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of ClusterSubnetGroup instances. + ClusterSubnetGroups []*ClusterSubnetGroup `locationNameList:"ClusterSubnetGroup" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSubnetGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cluster parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + ClusterParameterGroupFamily *string `type:"string"` + + // The specific cluster version to return. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterVersions request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeClusterVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterVersionsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterVersions action. +type DescribeClusterVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of Version elements. + ClusterVersions []*ClusterVersion `locationNameList:"ClusterVersion" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterVersionsOutput) GoString() string { + return s.String() +} + +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of a cluster whose properties you are requesting. This + // parameter is case sensitive. + // + // The default is that all clusters defined for an account are returned. + ClusterIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusters request exceed the + // value specified in MaxRecords, AWS returns a value in the Marker field of + // the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + // + // Constraints: You can specify either the ClusterIdentifier parameter or + // the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching clusters that + // are associated with the specified key or keys. For example, suppose that + // you have clusters that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the clusters that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching clusters + // that are associated with the specified tag value or values. For example, + // suppose that you have clusters that are tagged with values called admin and + // test. If you specify both of these tag values in the request, Amazon Redshift + // returns a response with the clusters that have either or both of these tag + // values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusters action. +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of Cluster objects, where each object describes one cluster. + Clusters []*Cluster `locationNameList:"Cluster" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +type DescribeDefaultClusterParametersInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeDefaultClusterParameters + // request exceed the value specified in MaxRecords, AWS returns a value in + // the Marker field of the response. You can retrieve the next set of response + // records by providing the returned marker value in the Marker parameter and + // retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the cluster parameter group family. + ParameterGroupFamily *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDefaultClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultClusterParametersInput) GoString() string { + return s.String() +} + +type DescribeDefaultClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // Describes the default cluster parameters for a parameter group family. + DefaultClusterParameters *DefaultClusterParameters `type:"structure"` +} + +// String returns the string representation +func (s DescribeDefaultClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesInput struct { + _ struct{} `type:"structure"` + + // The source type, such as cluster or parameter group, to which the described + // event categories apply. + // + // Valid values: cluster, snapshot, parameter group, and security group. + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventCategoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesInput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of event categories descriptions. + EventCategoriesMapList []*EventCategoriesMap `locationNameList:"EventCategoriesMap" type:"list"` +} + +// String returns the string representation +func (s DescribeEventCategoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesOutput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeEventSubscriptions request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the Amazon Redshift event notification subscription to be described. + SubscriptionName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsInput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of event subscriptions. + EventSubscriptionsList []*EventSubscription `locationNameList:"EventSubscription" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsOutput) GoString() string { + return s.String() +} + +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes prior to the time of the request for which to retrieve + // events. For example, if the request is sent at 18:00 and you specify a duration + // of 60, then only events which have occurred after 17:00 will be returned. + // + // Default: 60 + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeEvents request exceed the + // value specified in MaxRecords, AWS returns a value in the Marker field of + // the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // this parameter is not specified, then all sources are included in the response. + // + // Constraints: + // + // If SourceIdentifier is supplied, SourceType must also be provided. + // + // Specify a cluster identifier when SourceType is cluster. Specify a cluster + // security group name when SourceType is cluster-security-group. Specify a + // cluster parameter group name when SourceType is cluster-parameter-group. + // Specify a cluster snapshot identifier when SourceType is cluster-snapshot. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + // + // Constraints: + // + // If SourceType is supplied, SourceIdentifier must also be provided. + // + // Specify cluster when SourceIdentifier is a cluster identifier. Specify + // cluster-security-group when SourceIdentifier is a cluster security group + // name. Specify cluster-parameter-group when SourceIdentifier is a cluster + // parameter group name. Specify cluster-snapshot when SourceIdentifier is a + // cluster snapshot identifier. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of Event instances. + Events []*Event `locationNameList:"Event" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +type DescribeHsmClientCertificatesInput struct { + _ struct{} `type:"structure"` + + // The identifier of a specific HSM client certificate for which you want information. + // If no identifier is specified, information is returned for all HSM client + // certificates owned by your AWS customer account. + HsmClientCertificateIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeHsmClientCertificates request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching HSM client certificates + // that are associated with the specified key or keys. For example, suppose + // that you have HSM client certificates that are tagged with keys called owner + // and environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the HSM client certificates that have either + // or both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching HSM client + // certificates that are associated with the specified tag value or values. + // For example, suppose that you have HSM client certificates that are tagged + // with values called admin and test. If you specify both of these tag values + // in the request, Amazon Redshift returns a response with the HSM client certificates + // that have either or both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeHsmClientCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmClientCertificatesInput) GoString() string { + return s.String() +} + +type DescribeHsmClientCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the identifiers for one or more HSM client certificates used by + // Amazon Redshift clusters to store and retrieve database encryption keys in + // an HSM. + HsmClientCertificates []*HsmClientCertificate `locationNameList:"HsmClientCertificate" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmClientCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmClientCertificatesOutput) GoString() string { + return s.String() +} + +type DescribeHsmConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The identifier of a specific Amazon Redshift HSM configuration to be described. + // If no identifier is specified, information is returned for all HSM configurations + // owned by your AWS customer account. + HsmConfigurationIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeHsmConfigurations request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching HSM configurations + // that are associated with the specified key or keys. For example, suppose + // that you have HSM configurations that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the HSM configurations that have either + // or both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching HSM configurations + // that are associated with the specified tag value or values. For example, + // suppose that you have HSM configurations that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the HSM configurations that have either + // or both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeHsmConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeHsmConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // A list of Amazon Redshift HSM configurations. + HsmConfigurations []*HsmConfiguration `locationNameList:"HsmConfiguration" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeLoggingStatusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to get the logging status from. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLoggingStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoggingStatusInput) GoString() string { + return s.String() +} + +type DescribeOrderableClusterOptionsInput struct { + _ struct{} `type:"structure"` + + // The version filter value. Specify this parameter to show only the available + // offerings matching the specified version. + // + // Default: All versions. + // + // Constraints: Must be one of the version returned from DescribeClusterVersions. + ClusterVersion *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeOrderableClusterOptions request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The node type filter value. Specify this parameter to show only the available + // offerings matching the specified node type. + NodeType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeOrderableClusterOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableClusterOptionsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeOrderableClusterOptions action. +type DescribeOrderableClusterOptionsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // An OrderableClusterOption structure containing information about orderable + // options for the Cluster. + OrderableClusterOptions []*OrderableClusterOption `locationNameList:"OrderableClusterOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOrderableClusterOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableClusterOptionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedNodeOfferingsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeReservedNodeOfferings request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The unique identifier for the offering. + ReservedNodeOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedNodeOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodeOfferingsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeReservedNodeOfferings action. +type DescribeReservedNodeOfferingsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of reserved node offerings. + ReservedNodeOfferings []*ReservedNodeOffering `locationNameList:"ReservedNodeOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedNodeOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodeOfferingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedNodesInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeReservedNodes request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // Identifier for the node reservation. + ReservedNodeId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedNodesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodesInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeReservedNodes action. +type DescribeReservedNodesOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The list of reserved nodes. + ReservedNodes []*ReservedNode `locationNameList:"ReservedNode" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedNodesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodesOutput) GoString() string { + return s.String() +} + +type DescribeResizeInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of a cluster whose resize progress you are requesting. + // This parameter is case-sensitive. + // + // By default, resize operations for all clusters defined for an AWS account + // are returned. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeResizeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResizeInput) GoString() string { + return s.String() +} + +// Describes the result of a cluster resize operation. +type DescribeResizeOutput struct { + _ struct{} `type:"structure"` + + // The average rate of the resize operation over the last few minutes, measured + // in megabytes per second. After the resize operation completes, this value + // shows the average rate of the entire resize operation. + AvgResizeRateInMegaBytesPerSecond *float64 `type:"double"` + + // The amount of seconds that have elapsed since the resize operation began. + // After the resize operation completes, this value shows the total actual time, + // in seconds, for the resize operation. + ElapsedTimeInSeconds *int64 `type:"long"` + + // The estimated time remaining, in seconds, until the resize operation is complete. + // This value is calculated based on the average resize rate and the estimated + // amount of data remaining to be processed. Once the resize operation is complete, + // this value will be 0. + EstimatedTimeToCompletionInSeconds *int64 `type:"long"` + + // The names of tables that have been completely imported . + // + // Valid Values: List of table names. + ImportTablesCompleted []*string `type:"list"` + + // The names of tables that are being currently imported. + // + // Valid Values: List of table names. + ImportTablesInProgress []*string `type:"list"` + + // The names of tables that have not been yet imported. + // + // Valid Values: List of table names + ImportTablesNotStarted []*string `type:"list"` + + // While the resize operation is in progress, this value shows the current amount + // of data, in megabytes, that has been processed so far. When the resize operation + // is complete, this value shows the total amount of data, in megabytes, on + // the cluster, which may be more or less than TotalResizeDataInMegaBytes (the + // estimated total amount of data before resize). + ProgressInMegaBytes *int64 `type:"long"` + + // The status of the resize operation. + // + // Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED + Status *string `type:"string"` + + // The cluster type after the resize operation is complete. + // + // Valid Values: multi-node | single-node + TargetClusterType *string `type:"string"` + + // The node type that the cluster will have after the resize operation is complete. + TargetNodeType *string `type:"string"` + + // The number of nodes that the cluster will have after the resize operation + // is complete. + TargetNumberOfNodes *int64 `type:"integer"` + + // The estimated total amount of data, in megabytes, on the cluster before the + // resize operation began. + TotalResizeDataInMegaBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeResizeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResizeOutput) GoString() string { + return s.String() +} + +// The result of the DescribeSnapshotCopyGrants action. +type DescribeSnapshotCopyGrantsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeSnapshotCopyGrant request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the SnapshotCopyGrantName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` + + // A tag key or keys for which you want to return all matching resources that + // are associated with the specified key or keys. For example, suppose that + // you have resources tagged with keys called owner and environment. If you + // specify both of these tag keys in the request, Amazon Redshift returns a + // response with all resources that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching resources + // that are associated with the specified value or values. For example, suppose + // that you have resources tagged with values called admin and test. If you + // specify both of these tag values in the request, Amazon Redshift returns + // a response with all resources that have either or both of these tag values + // associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotCopyGrantsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotCopyGrantsInput) GoString() string { + return s.String() +} + +// The result of the snapshot copy grant. +type DescribeSnapshotCopyGrantsOutput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeSnapshotCopyGrant request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the SnapshotCopyGrantName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The list of snapshot copy grants. + SnapshotCopyGrants []*SnapshotCopyGrant `locationNameList:"SnapshotCopyGrant" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotCopyGrantsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotCopyGrantsOutput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeTags action. +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the marker + // parameter and retrying the command. If the marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The maximum number or response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + MaxRecords *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) for which you want to describe the tag or + // tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string"` + + // The type of resource with which you want to view tags. Valid resource types + // are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group + // Subnet group HSM connection HSM certificate Parameter group Snapshot copy + // grant + // + // For more information about Amazon Redshift resource types and constructing + // ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) (http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html) + // in the Amazon Redshift Cluster Management Guide. + ResourceType *string `type:"string"` + + // A tag key or keys for which you want to return all matching resources that + // are associated with the specified key or keys. For example, suppose that + // you have resources tagged with keys called owner and environment. If you + // specify both of these tag keys in the request, Amazon Redshift returns a + // response with all resources that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching resources + // that are associated with the specified value or values. For example, suppose + // that you have resources tagged with values called admin and test. If you + // specify both of these tag values in the request, Amazon Redshift returns + // a response with all resources that have either or both of these tag values + // associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeTags action. +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of tags with their associated resources. + TaggedResources []*TaggedResource `locationNameList:"TaggedResource" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DisableLoggingInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster on which logging is to be stopped. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableLoggingInput) GoString() string { + return s.String() +} + +type DisableSnapshotCopyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the source cluster that you want to disable copying + // of snapshots to a destination region. + // + // Constraints: Must be the valid name of an existing cluster that has cross-region + // snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableSnapshotCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSnapshotCopyInput) GoString() string { + return s.String() +} + +type DisableSnapshotCopyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DisableSnapshotCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSnapshotCopyOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EC2 security group. +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the EC2 Security Group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName + // field. + EC2SecurityGroupOwnerId *string `type:"string"` + + // The status of the EC2 security group. + Status *string `type:"string"` + + // The list of tags for the EC2 security group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// Describes the status of the elastic IP (EIP) address. +type ElasticIpStatus struct { + _ struct{} `type:"structure"` + + // The elastic IP (EIP) address for the cluster. + ElasticIp *string `type:"string"` + + // Describes the status of the elastic IP (EIP) address. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ElasticIpStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticIpStatus) GoString() string { + return s.String() +} + +type EnableLoggingInput struct { + _ struct{} `type:"structure"` + + // The name of an existing S3 bucket where the log files are to be stored. + // + // Constraints: + // + // Must be in the same region as the cluster The cluster must have read bucket + // and put object permissions + BucketName *string `type:"string" required:"true"` + + // The identifier of the cluster on which logging is to be started. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The prefix applied to the log file names. + // + // Constraints: + // + // Cannot exceed 512 characters Cannot contain spaces( ), double quotes ("), + // single quotes ('), a backslash (\), or control characters. The hexadecimal + // codes for invalid characters are: x00 to x20 x22 x27 x5c x7f or larger + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation +func (s EnableLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableLoggingInput) GoString() string { + return s.String() +} + +type EnableSnapshotCopyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the source cluster to copy snapshots from. + // + // Constraints: Must be the valid name of an existing cluster that does not + // already have cross-region snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` + + // The destination region that you want to copy snapshots to. + // + // Constraints: Must be the name of a valid region. For more information, + // see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#redshift_region) + // in the Amazon Web Services General Reference. + DestinationRegion *string `type:"string" required:"true"` + + // The number of days to retain automated snapshots in the destination region + // after they are copied from the source region. + // + // Default: 7. + // + // Constraints: Must be at least 1 and no more than 35. + RetentionPeriod *int64 `type:"integer"` + + // The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted + // cluster are copied to the destination region. + SnapshotCopyGrantName *string `type:"string"` +} + +// String returns the string representation +func (s EnableSnapshotCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSnapshotCopyInput) GoString() string { + return s.String() +} + +type EnableSnapshotCopyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s EnableSnapshotCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSnapshotCopyOutput) GoString() string { + return s.String() +} + +// Describes a connection endpoint. +type Endpoint struct { + _ struct{} `type:"structure"` + + // The DNS address of the Cluster. + Address *string `type:"string"` + + // The port that the database engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Describes an event. +type Event struct { + _ struct{} `type:"structure"` + + // The date and time of the event. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of the event categories. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The identifier of the event. + EventId *string `type:"string"` + + // The text of this event. + Message *string `type:"string"` + + // The severity of the event. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The identifier for the source of the event. + SourceIdentifier *string `type:"string"` + + // The source type for this event. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +type EventCategoriesMap struct { + _ struct{} `type:"structure"` + + // The events in the event category. + Events []*EventInfoMap `locationNameList:"EventInfoMap" type:"list"` + + // The Amazon Redshift source type, such as cluster or cluster-snapshot, that + // the returned categories belong to. + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s EventCategoriesMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventCategoriesMap) GoString() string { + return s.String() +} + +type EventInfoMap struct { + _ struct{} `type:"structure"` + + // The category of an Amazon Redshift event. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The description of an Amazon Redshift event. + EventDescription *string `type:"string"` + + // The identifier of an Amazon Redshift event. + EventId *string `type:"string"` + + // The severity of the event. + // + // Values: ERROR, INFO + Severity *string `type:"string"` +} + +// String returns the string representation +func (s EventInfoMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInfoMap) GoString() string { + return s.String() +} + +type EventSubscription struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Redshift event notification subscription. + CustSubscriptionId *string `type:"string"` + + // The AWS customer account associated with the Amazon Redshift event notification + // subscription. + CustomerAwsId *string `type:"string"` + + // A Boolean value indicating whether the subscription is enabled. true indicates + // the subscription is enabled. + Enabled *bool `type:"boolean"` + + // The list of Amazon Redshift event categories specified in the event notification + // subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategoriesList []*string `locationNameList:"EventCategory" type:"list"` + + // The event severity specified in the Amazon Redshift event notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event + // notification subscription. + SnsTopicArn *string `type:"string"` + + // A list of the sources that publish events to the Amazon Redshift event notification + // subscription. + SourceIdsList []*string `locationNameList:"SourceId" type:"list"` + + // The source type of the events returned the Amazon Redshift event notification, + // such as cluster, or cluster-snapshot. + SourceType *string `type:"string"` + + // The status of the Amazon Redshift event notification subscription. + // + // Constraints: + // + // Can be one of the following: active | no-permission | topic-not-exist The + // status "no-permission" indicates that Amazon Redshift no longer has permission + // to post to the Amazon SNS topic. The status "topic-not-exist" indicates that + // the topic was deleted after the subscription was created. + Status *string `type:"string"` + + // The date and time the Amazon Redshift event notification subscription was + // created. + SubscriptionCreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The list of tags for the event subscription. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s EventSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSubscription) GoString() string { + return s.String() +} + +// Returns information about an HSM client certificate. The certificate is stored +// in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift +// cluster to encrypt data files. +type HsmClientCertificate struct { + _ struct{} `type:"structure"` + + // The identifier of the HSM client certificate. + HsmClientCertificateIdentifier *string `type:"string"` + + // The public key that the Amazon Redshift cluster will use to connect to the + // HSM. You must register the public key in the HSM. + HsmClientCertificatePublicKey *string `type:"string"` + + // The list of tags for the HSM client certificate. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s HsmClientCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmClientCertificate) GoString() string { + return s.String() +} + +// Returns information about an HSM configuration, which is an object that describes +// to Amazon Redshift clusters the information they require to connect to an +// HSM where they can store database encryption keys. +type HsmConfiguration struct { + _ struct{} `type:"structure"` + + // A text description of the HSM configuration. + Description *string `type:"string"` + + // The name of the Amazon Redshift HSM configuration. + HsmConfigurationIdentifier *string `type:"string"` + + // The IP address that the Amazon Redshift cluster must use to access the HSM. + HsmIpAddress *string `type:"string"` + + // The name of the partition in the HSM where the Amazon Redshift clusters will + // store their database encryption keys. + HsmPartitionName *string `type:"string"` + + // The list of tags for the HSM configuration. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s HsmConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmConfiguration) GoString() string { + return s.String() +} + +type HsmStatus struct { + _ struct{} `type:"structure"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // Reports whether the Amazon Redshift cluster has finished applying any HSM + // settings changes specified in a modify cluster command. + // + // Values: active, applying + Status *string `type:"string"` +} + +// String returns the string representation +func (s HsmStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmStatus) GoString() string { + return s.String() +} + +// Describes an IP range used in a security group. +type IPRange struct { + _ struct{} `type:"structure"` + + // The IP range in Classless Inter-Domain Routing (CIDR) notation. + CIDRIP *string `type:"string"` + + // The status of the IP range, for example, "authorized". + Status *string `type:"string"` + + // The list of tags for the IP range. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s IPRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPRange) GoString() string { + return s.String() +} + +// Describes the status of logging for a cluster. +type LoggingStatus struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket where the log files are stored. + BucketName *string `type:"string"` + + // The message indicating that logs failed to be delivered. + LastFailureMessage *string `type:"string"` + + // The last time when logs failed to be delivered. + LastFailureTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The last time when logs were delivered. + LastSuccessfulDeliveryTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // true if logging is on, false if logging is off. + LoggingEnabled *bool `type:"boolean"` + + // The prefix applied to the log file names. + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingStatus) GoString() string { + return s.String() +} + +type ModifyClusterInput struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades will be applied automatically to the cluster + // during the maintenance window. + // + // Default: false + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // If you decrease the automated snapshot retention period from its current + // value, existing automated snapshots that fall outside of the new retention + // period will be immediately deleted. + // + // Default: Uses existing setting. + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The unique identifier of the cluster to be modified. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the cluster parameter group to apply to this cluster. This change + // is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster. + // + // Default: Uses existing setting. + // + // Constraints: The cluster parameter group must be in the same parameter group + // family that matches the cluster version. + ClusterParameterGroupName *string `type:"string"` + + // A list of cluster security groups to be authorized on this cluster. This + // change is asynchronously applied as soon as possible. + // + // Security groups currently associated with the cluster, and not in the list + // of groups to apply, will be revoked from the cluster. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter Cannot end with a hyphen or contain two consecutive hyphens + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The new cluster type. + // + // When you submit your cluster resize request, your existing cluster goes + // into a read-only mode. After Amazon Redshift provisions a new cluster based + // on your resize requirements, there will be outage for a period while the + // old cluster is deleted and your connection is switched to the new cluster. + // You can use DescribeResize to track the progress of the resize request. + // + // Valid Values: multi-node | single-node + ClusterType *string `type:"string"` + + // The new version number of the Amazon Redshift engine to upgrade to. + // + // For major version upgrades, if a non-default cluster parameter group is + // currently in use, a new cluster parameter group in the cluster parameter + // group family for the new version must be specified. The new cluster parameter + // group can be the default for that cluster parameter group family. For more + // information about parameters and parameter groups, go to Amazon Redshift + // Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // in the Amazon Redshift Cluster Management Guide. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // The new password for the cluster master user. This change is asynchronously + // applied as soon as possible. Between the time of the request and the completion + // of the request, the MasterUserPassword element exists in the PendingModifiedValues + // element of the operation response. Operations never return the password, + // so this operation provides a way to regain access to the master user account + // for a cluster if the password is lost. + // + // Default: Uses existing setting. + // + // Constraints: + // + // Must be between 8 and 64 characters in length. Must contain at least one + // uppercase letter. Must contain at least one lowercase letter. Must contain + // one number. Can be any printable ASCII character (ASCII code 33 to 126) except + // ' (single quote), " (double quote), \, /, @, or space. + MasterUserPassword *string `type:"string"` + + // The new identifier for the cluster. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. Example: examplecluster + NewClusterIdentifier *string `type:"string"` + + // The new node type of the cluster. If you specify a new node type, you must + // also specify the number of nodes parameter. + // + // When you submit your request to resize a cluster, Amazon Redshift sets + // access permissions for the cluster to read-only. After Amazon Redshift provisions + // a new cluster according to your resize requirements, there will be a temporary + // outage while the old cluster is deleted and your connection is switched to + // the new cluster. When the new connection is complete, the original access + // permissions for the cluster are restored. You can use DescribeResize to track + // the progress of the resize request. + // + // Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large + // | dc1.8xlarge. + NodeType *string `type:"string"` + + // The new number of nodes of the cluster. If you specify a new number of nodes, + // you must also specify the node type parameter. + // + // When you submit your request to resize a cluster, Amazon Redshift sets + // access permissions for the cluster to read-only. After Amazon Redshift provisions + // a new cluster according to your resize requirements, there will be a temporary + // outage while the old cluster is deleted and your connection is switched to + // the new cluster. When the new connection is complete, the original access + // permissions for the cluster are restored. You can use DescribeResize to track + // the progress of the resize request. + // + // Valid Values: Integer greater than 0. + NumberOfNodes *int64 `type:"integer"` + + // The weekly time range (in UTC) during which system maintenance can occur, + // if necessary. If system maintenance is necessary during the window, it may + // result in an outage. + // + // This maintenance window change is made immediately. If the new maintenance + // window indicates the current time, there must be at least 120 minutes between + // the current time and end of the window in order to ensure that pending changes + // are applied. + // + // Default: Uses existing setting. + // + // Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Must be at least 30 minutes. + PreferredMaintenanceWindow *string `type:"string"` + + // A list of virtual private cloud (VPC) security groups to be associated with + // the cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterInput) GoString() string { + return s.String() +} + +type ModifyClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterOutput) GoString() string { + return s.String() +} + +type ModifyClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group to be modified. + ParameterGroupName *string `type:"string" required:"true"` + + // An array of parameters to be modified. A maximum of 20 parameters can be + // modified in a single request. + // + // For each parameter to be modified, you must supply at least the parameter + // name and parameter value; other name-value pairs of the parameter are optional. + // + // For the workload management (WLM) configuration, you must supply all the + // name-value pairs in the wlm_json_configuration parameter. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterParameterGroupInput) GoString() string { + return s.String() +} + +type ModifyClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the subnet group to be modified. + ClusterSubnetGroupName *string `type:"string" required:"true"` + + // A text description of the subnet group to be modified. + Description *string `type:"string"` + + // An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a + // single request. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterSubnetGroupInput) GoString() string { + return s.String() +} + +type ModifyClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a subnet group. + ClusterSubnetGroup *ClusterSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value indicating if the subscription is enabled. true indicates + // the subscription is enabled + Enabled *bool `type:"boolean"` + + // Specifies the Amazon Redshift event categories to be published by the event + // notification subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Specifies the Amazon Redshift event severity to be published by the event + // notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the SNS topic to be used by the event notification + // subscription. + SnsTopicArn *string `type:"string"` + + // A list of one or more identifiers of Amazon Redshift source objects. All + // of the objects must be of the same type as was specified in the source type + // parameter. The event subscription will return only events generated by the + // specified objects. If not specified, then events are returned for all objects + // within the source type specified. + // + // Example: my-cluster-1, my-cluster-2 + // + // Example: my-snapshot-20131010 + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a cluster, you would set this + // parameter to cluster. If this value is not specified, events are returned + // for all Amazon Redshift objects in your AWS account. You must specify a source + // type in order to specify source IDs. + // + // Valid values: cluster, cluster-parameter-group, cluster-security-group, + // and cluster-snapshot. + SourceType *string `type:"string"` + + // The name of the modified Amazon Redshift event notification subscription. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionInput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionOutput) GoString() string { + return s.String() +} + +type ModifySnapshotCopyRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster for which you want to change the retention + // period for automated snapshots that are copied to a destination region. + // + // Constraints: Must be the valid name of an existing cluster that has cross-region + // snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` + + // The number of days to retain automated snapshots in the destination region + // after they are copied from the source region. + // + // If you decrease the retention period for automated snapshots that are copied + // to a destination region, Amazon Redshift will delete any existing automated + // snapshots that were copied to the destination region and that fall outside + // of the new retention period. + // + // Constraints: Must be at least 1 and no more than 35. + RetentionPeriod *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ModifySnapshotCopyRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotCopyRetentionPeriodInput) GoString() string { + return s.String() +} + +type ModifySnapshotCopyRetentionPeriodOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotCopyRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotCopyRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Describes an orderable cluster option. +type OrderableClusterOption struct { + _ struct{} `type:"structure"` + + // A list of availability zones for the orderable cluster. + AvailabilityZones []*AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` + + // The cluster type, for example multi-node. + ClusterType *string `type:"string"` + + // The version of the orderable cluster. + ClusterVersion *string `type:"string"` + + // The node type for the orderable cluster. + NodeType *string `type:"string"` +} + +// String returns the string representation +func (s OrderableClusterOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrderableClusterOption) GoString() string { + return s.String() +} + +// Describes a parameter in a cluster parameter group. +type Parameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // Specifies how to apply the parameter. Supported value: static. + ApplyType *string `type:"string" enum:"ParameterApplyType"` + + // The data type of the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // If true, the parameter can be modified. Some parameters have security or + // operational implications that prevent them from being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` + + // The source of the parameter value, such as "engine-default" or "user". + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Describes cluster attributes that are in a pending state. A change to one +// or more the attributes was requested and is in progress or will be applied. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // The pending or in-progress change of the automated snapshot retention period. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The pending or in-progress change of the new identifier for the cluster. + ClusterIdentifier *string `type:"string"` + + // The pending or in-progress change of the cluster type. + ClusterType *string `type:"string"` + + // The pending or in-progress change of the service version. + ClusterVersion *string `type:"string"` + + // The pending or in-progress change of the master user password for the cluster. + MasterUserPassword *string `type:"string"` + + // The pending or in-progress change of the cluster's node type. + NodeType *string `type:"string"` + + // The pending or in-progress change of the number of nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +type PurchaseReservedNodeOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of reserved nodes you want to purchase. + // + // Default: 1 + NodeCount *int64 `type:"integer"` + + // The unique identifier of the reserved node offering you want to purchase. + ReservedNodeOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedNodeOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedNodeOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedNodeOfferingOutput struct { + _ struct{} `type:"structure"` + + // Describes a reserved node. You can call the DescribeReservedNodeOfferings + // API to obtain the available reserved node offerings. + ReservedNode *ReservedNode `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedNodeOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedNodeOfferingOutput) GoString() string { + return s.String() +} + +type RebootClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootClusterInput) GoString() string { + return s.String() +} + +type RebootClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RebootClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootClusterOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount charged per the period of time specified by the recurring charge + // frequency. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency at which the recurring charge amount is applied. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Describes a reserved node. You can call the DescribeReservedNodeOfferings +// API to obtain the available reserved node offerings. +type ReservedNode struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved cluster. + CurrencyCode *string `type:"string"` + + // The duration of the node reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed cost Amazon Redshift charges you for this reserved node. + FixedPrice *float64 `type:"double"` + + // The number of reserved compute nodes. + NodeCount *int64 `type:"integer"` + + // The node type of the reserved node. + NodeType *string `type:"string"` + + // The anticipated utilization of the reserved node, as defined in the reserved + // node offering. + OfferingType *string `type:"string"` + + // The recurring charges for the reserved node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedNodeId *string `type:"string"` + + // The identifier for the reserved node offering. + ReservedNodeOfferingId *string `type:"string"` + + // The time the reservation started. You purchase a reserved node offering for + // a duration. This is the start time of that duration. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved compute node. + // + // Possible Values: + // + // pending-payment-This reserved node has recently been purchased, and the + // sale has been approved, but payment has not yet been confirmed. active-This + // reserved node is owned by the caller and is available for use. payment-failed-Payment + // failed for the purchase attempt. + State *string `type:"string"` + + // The hourly rate Amazon Redshift charges you for this reserved node. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedNode) GoString() string { + return s.String() +} + +// Describes a reserved node offering. +type ReservedNodeOffering struct { + _ struct{} `type:"structure"` + + // The currency code for the compute nodes offering. + CurrencyCode *string `type:"string"` + + // The duration, in seconds, for which the offering will reserve the node. + Duration *int64 `type:"integer"` + + // The upfront fixed charge you will pay to purchase the specific reserved node + // offering. + FixedPrice *float64 `type:"double"` + + // The node type offered by the reserved node offering. + NodeType *string `type:"string"` + + // The anticipated utilization of the reserved node, as defined in the reserved + // node offering. + OfferingType *string `type:"string"` + + // The charge to your account regardless of whether you are creating any clusters + // using the node offering. Recurring charges are only in effect for heavy-utilization + // reserved nodes. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The offering identifier. + ReservedNodeOfferingId *string `type:"string"` + + // The rate you are charged for each hour the cluster that is using the offering + // is running. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedNodeOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedNodeOffering) GoString() string { + return s.String() +} + +type ResetClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group to be reset. + ParameterGroupName *string `type:"string" required:"true"` + + // An array of names of parameters to be reset. If ResetAllParameters option + // is not used, then at least one parameter name must be supplied. + // + // Constraints: A maximum of 20 parameters can be reset in a single request. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // If true, all parameters in the specified parameter group will be reset to + // their default values. + // + // Default: true + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetClusterParameterGroupInput) GoString() string { + return s.String() +} + +type RestoreFromClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades can be applied during the maintenance window + // to the Amazon Redshift engine that is running on the cluster. + // + // Default: true + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // Default: The value selected for the cluster from which the snapshot was + // taken. + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The Amazon EC2 Availability Zone in which to restore the cluster. + // + // Default: A random, system-chosen Availability Zone. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // The identifier of the cluster that will be created from restoring the snapshot. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the parameter group to be associated with this cluster. + // + // Default: The default Amazon Redshift cluster parameter group. For information + // about the default parameter group, go to Working with Amazon Redshift Parameter + // Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html). + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens. First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + ClusterParameterGroupName *string `type:"string"` + + // A list of security groups to be associated with this cluster. + // + // Default: The default cluster security group for Amazon Redshift. + // + // Cluster security groups only apply to clusters outside of VPCs. + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The name of the subnet group where you want to cluster restored. + // + // A snapshot of cluster in VPC can be restored only in VPC. Therefore, you + // must provide subnet group name where you want the cluster restored. + ClusterSubnetGroupName *string `type:"string"` + + // The elastic IP (EIP) address for the cluster. + ElasticIp *string `type:"string"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that you + // want to use to encrypt data in the cluster that you restore from a shared + // snapshot. + KmsKeyId *string `type:"string"` + + // The node type that the restored cluster will be provisioned with. + // + // Default: The node type of the cluster from which the snapshot was taken. + // You can modify this if you are using any DS node type. In that case, you + // can choose to restore into another DS node type of the same size. For example, + // you can restore ds1.8xlarge into ds2.8xlarge, or ds2.xlarge into ds1.xlarge. + // If you have a DC instance type, you must restore into that same instance + // type and size. In other words, you can only restore a dc1.large instance + // type into another dc1.large instance type. For more information about node + // types, see About Clusters and Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes) + // in the Amazon Redshift Cluster Management Guide + NodeType *string `type:"string"` + + // The AWS customer account used to create or copy the snapshot. Required if + // you are restoring a snapshot you do not own, optional if you own the snapshot. + OwnerAccount *string `type:"string"` + + // The port number on which the cluster accepts connections. + // + // Default: The same port as the original cluster. + // + // Constraints: Must be between 1115 and 65535. + Port *int64 `type:"integer"` + + // The weekly time range (in UTC) during which automated cluster maintenance + // can occur. + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: The value selected for the cluster from which the snapshot was + // taken. For more information about the time blocks for each region, see Maintenance + // Windows (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) + // in Amazon Redshift Cluster Management Guide. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // The name of the cluster the source snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The name of the snapshot from which to create the new cluster. This parameter + // isn't case sensitive. + // + // Example: my-snapshot-id + SnapshotIdentifier *string `type:"string" required:"true"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with + // the cluster. + // + // Default: The default VPC security group is associated with the cluster. + // + // VPC security groups only apply to clusters in VPCs. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreFromClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromClusterSnapshotInput) GoString() string { + return s.String() +} + +type RestoreFromClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreFromClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromClusterSnapshotOutput) GoString() string { + return s.String() +} + +// Describes the status of a cluster restore action. Returns null if the cluster +// was not created by restoring a snapshot. +type RestoreStatus struct { + _ struct{} `type:"structure"` + + // The number of megabytes per second being transferred from the backup storage. + // Returns the average rate for a completed backup. + CurrentRestoreRateInMegaBytesPerSecond *float64 `type:"double"` + + // The amount of time an in-progress restore has been running, or the amount + // of time it took a completed restore to finish. + ElapsedTimeInSeconds *int64 `type:"long"` + + // The estimate of the time remaining before the restore will complete. Returns + // 0 for a completed restore. + EstimatedTimeToCompletionInSeconds *int64 `type:"long"` + + // The number of megabytes that have been transferred from snapshot storage. + ProgressInMegaBytes *int64 `type:"long"` + + // The size of the set of snapshot data used to restore the cluster. + SnapshotSizeInMegaBytes *int64 `type:"long"` + + // The status of the restore action. Returns starting, restoring, completed, + // or failed. + Status *string `type:"string"` +} + +// String returns the string representation +func (s RestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreStatus) GoString() string { + return s.String() +} + +// ??? +type RevokeClusterSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range for which to revoke access. This range must be a valid Classless + // Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, + // EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided. + CIDRIP *string `type:"string"` + + // The name of the security Group from which to revoke the ingress rule. + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // The name of the EC2 Security Group whose access is to be revoked. If EC2SecurityGroupName + // is specified, EC2SecurityGroupOwnerId must also be provided and CIDRIP cannot + // be provided. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account number of the owner of the security group specified in the + // EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable + // value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must + // also be provided. and CIDRIP cannot be provided. + // + // Example: 111122223333 + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s RevokeClusterSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClusterSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeClusterSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeClusterSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClusterSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +type RevokeSnapshotAccessInput struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS customer account that can no longer restore the + // specified snapshot. + AccountWithRestoreAccess *string `type:"string" required:"true"` + + // The identifier of the cluster the snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The identifier of the snapshot that the account can no longer access. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeSnapshotAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSnapshotAccessInput) GoString() string { + return s.String() +} + +type RevokeSnapshotAccessOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s RevokeSnapshotAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSnapshotAccessOutput) GoString() string { + return s.String() +} + +type RotateEncryptionKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster that you want to rotate the encryption + // keys for. + // + // Constraints: Must be the name of valid cluster that has encryption enabled. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RotateEncryptionKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateEncryptionKeyInput) GoString() string { + return s.String() +} + +type RotateEncryptionKeyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RotateEncryptionKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateEncryptionKeyOutput) GoString() string { + return s.String() +} + +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // A list of the AWS customer accounts authorized to restore the snapshot. Returns + // null if no accounts are authorized. Visible only to the snapshot owner. + AccountsWithRestoreAccess []*AccountWithRestoreAccess `locationNameList:"AccountWithRestoreAccess" type:"list"` + + // The size of the incremental backup. + ActualIncrementalBackupSizeInMegaBytes *float64 `type:"double"` + + // The Availability Zone in which the cluster was created. + AvailabilityZone *string `type:"string"` + + // The number of megabytes that have been transferred to the snapshot backup. + BackupProgressInMegaBytes *float64 `type:"double"` + + // The time (UTC) when the cluster was originally created. + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the cluster for which the snapshot was taken. + ClusterIdentifier *string `type:"string"` + + // The version ID of the Amazon Redshift engine that is running on the cluster. + ClusterVersion *string `type:"string"` + + // The number of megabytes per second being transferred to the snapshot backup. + // Returns 0 for a completed backup. + CurrentBackupRateInMegaBytesPerSecond *float64 `type:"double"` + + // The name of the database that was created when the cluster was created. + DBName *string `type:"string"` + + // The amount of time an in-progress snapshot backup has been running, or the + // amount of time it took a completed backup to finish. + ElapsedTimeInSeconds *int64 `type:"long"` + + // If true, the data in the snapshot is encrypted at rest. + Encrypted *bool `type:"boolean"` + + // A boolean that indicates whether the snapshot data is encrypted using the + // HSM keys of the source cluster. true indicates that the data is encrypted + // using HSM keys. + EncryptedWithHSM *bool `type:"boolean"` + + // The estimate of the time remaining before the snapshot backup will complete. + // Returns 0 for a completed backup. + EstimatedSecondsToCompletion *int64 `type:"long"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that was + // used to encrypt data in the cluster from which the snapshot was taken. + KmsKeyId *string `type:"string"` + + // The master user name for the cluster. + MasterUsername *string `type:"string"` + + // The node type of the nodes in the cluster. + NodeType *string `type:"string"` + + // The number of nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // For manual snapshots, the AWS customer account used to create or copy the + // snapshot. For automatic snapshots, the owner of the cluster. The owner can + // perform all snapshot actions, such as sharing a manual snapshot. + OwnerAccount *string `type:"string"` + + // The port that the cluster is listening on. + Port *int64 `type:"integer"` + + // The list of node types that this cluster snapshot is able to restore into. + RestorableNodeTypes []*string `locationNameList:"NodeType" type:"list"` + + // The time (UTC) when Amazon Redshift began the snapshot. A snapshot contains + // a copy of the cluster data as of this exact time. + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The snapshot identifier that is provided in the request. + SnapshotIdentifier *string `type:"string"` + + // The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot + // will be of type "manual". + SnapshotType *string `type:"string"` + + // The source region from which the snapshot was copied. + SourceRegion *string `type:"string"` + + // The snapshot status. The value of the status depends on the API operation + // used. CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating". + // DescribeClusterSnapshots returns status as "creating", "available", "final + // snapshot", or "failed". DeleteClusterSnapshot returns status as "deleted". + Status *string `type:"string"` + + // The list of tags for the cluster snapshot. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The size of the complete set of backup data that would be used to restore + // the cluster. + TotalBackupSizeInMegaBytes *float64 `type:"double"` + + // The VPC identifier of the cluster if the snapshot is from a cluster in a + // VPC. Otherwise, this field is not in the output. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// The snapshot copy grant that grants Amazon Redshift permission to encrypt +// copied snapshots with the specified customer master key (CMK) from AWS KMS +// in the destination region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +type SnapshotCopyGrant struct { + _ struct{} `type:"structure"` + + // The unique identifier of the customer master key (CMK) in AWS KMS to which + // Amazon Redshift is granted permission. + KmsKeyId *string `type:"string"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s SnapshotCopyGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotCopyGrant) GoString() string { + return s.String() +} + +// Describes a subnet. +type Subnet struct { + _ struct{} `type:"structure"` + + // Describes an availability zone. + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // The identifier of the subnet. + SubnetIdentifier *string `type:"string"` + + // The status of the subnet. + SubnetStatus *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// A tag consisting of a name/value pair for a resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The key, or name, for the resource tag. + Key *string `type:"string"` + + // The value for the resource tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// A tag and its associated resource. +type TaggedResource struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) with which the tag is associated. For example, + // arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string"` + + // The type of resource with which the tag is associated. Valid resource types + // are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group + // Subnet group HSM connection HSM certificate Parameter group + // + // For more information about Amazon Redshift resource types and constructing + // ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) (http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html) + // in the Amazon Redshift Cluster Management Guide. + ResourceType *string `type:"string"` + + // The tag for the resource. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s TaggedResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaggedResource) GoString() string { + return s.String() +} + +// Describes the members of a VPC security group. +type VpcSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + Status *string `type:"string"` + + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s VpcSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcSecurityGroupMembership) GoString() string { + return s.String() +} + +const ( + // @enum ParameterApplyType + ParameterApplyTypeStatic = "static" + // @enum ParameterApplyType + ParameterApplyTypeDynamic = "dynamic" +) + +const ( + // @enum SourceType + SourceTypeCluster = "cluster" + // @enum SourceType + SourceTypeClusterParameterGroup = "cluster-parameter-group" + // @enum SourceType + SourceTypeClusterSecurityGroup = "cluster-security-group" + // @enum SourceType + SourceTypeClusterSnapshot = "cluster-snapshot" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go new file mode 100644 index 000000000..21b65db68 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go @@ -0,0 +1,1497 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/redshift" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRedshift_AuthorizeClusterSecurityGroupIngress() { + svc := redshift.New(session.New()) + + params := &redshift.AuthorizeClusterSecurityGroupIngressInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.AuthorizeClusterSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_AuthorizeSnapshotAccess() { + svc := redshift.New(session.New()) + + params := &redshift.AuthorizeSnapshotAccessInput{ + AccountWithRestoreAccess: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.AuthorizeSnapshotAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CopyClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.CopyClusterSnapshotInput{ + SourceSnapshotIdentifier: aws.String("String"), // Required + TargetSnapshotIdentifier: aws.String("String"), // Required + SourceSnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.CopyClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateCluster() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + MasterUserPassword: aws.String("String"), // Required + MasterUsername: aws.String("String"), // Required + NodeType: aws.String("String"), // Required + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + AvailabilityZone: aws.String("String"), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterSubnetGroupName: aws.String("String"), + ClusterType: aws.String("String"), + ClusterVersion: aws.String("String"), + DBName: aws.String("String"), + ElasticIp: aws.String("String"), + Encrypted: aws.Bool(true), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + KmsKeyId: aws.String("String"), + NumberOfNodes: aws.Int64(1), + Port: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterParameterGroupInput{ + Description: aws.String("String"), // Required + ParameterGroupFamily: aws.String("String"), // Required + ParameterGroupName: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSecurityGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSnapshotInput{ + ClusterIdentifier: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.CreateEventSubscriptionInput{ + SnsTopicArn: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Severity: aws.String("String"), + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateHsmClientCertificate() { + svc := redshift.New(session.New()) + + params := &redshift.CreateHsmClientCertificateInput{ + HsmClientCertificateIdentifier: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateHsmClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateHsmConfiguration() { + svc := redshift.New(session.New()) + + params := &redshift.CreateHsmConfigurationInput{ + Description: aws.String("String"), // Required + HsmConfigurationIdentifier: aws.String("String"), // Required + HsmIpAddress: aws.String("String"), // Required + HsmPartitionName: aws.String("String"), // Required + HsmPartitionPassword: aws.String("String"), // Required + HsmServerPublicCertificate: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateHsmConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateSnapshotCopyGrant() { + svc := redshift.New(session.New()) + + params := &redshift.CreateSnapshotCopyGrantInput{ + SnapshotCopyGrantName: aws.String("String"), // Required + KmsKeyId: aws.String("String"), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateSnapshotCopyGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateTags() { + svc := redshift.New(session.New()) + + params := &redshift.CreateTagsInput{ + ResourceName: aws.String("String"), // Required + Tags: []*redshift.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteCluster() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + FinalClusterSnapshotIdentifier: aws.String("String"), + SkipFinalClusterSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSecurityGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSnapshotInput{ + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.DeleteClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteHsmClientCertificate() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteHsmClientCertificateInput{ + HsmClientCertificateIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteHsmClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteHsmConfiguration() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteHsmConfigurationInput{ + HsmConfigurationIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteHsmConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteSnapshotCopyGrant() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteSnapshotCopyGrantInput{ + SnapshotCopyGrantName: aws.String("String"), // Required + } + resp, err := svc.DeleteSnapshotCopyGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteTags() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteTagsInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterParameterGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterParameterGroupsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ParameterGroupName: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterParameters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterParametersInput{ + ParameterGroupName: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSecurityGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSecurityGroupsInput{ + ClusterSecurityGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSnapshots() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSnapshotsInput{ + ClusterIdentifier: aws.String("String"), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OwnerAccount: aws.String("String"), + SnapshotIdentifier: aws.String("String"), + SnapshotType: aws.String("String"), + StartTime: aws.Time(time.Now()), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSubnetGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSubnetGroupsInput{ + ClusterSubnetGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterVersions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterVersionsInput{ + ClusterParameterGroupFamily: aws.String("String"), + ClusterVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeClusterVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClustersInput{ + ClusterIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeDefaultClusterParameters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeDefaultClusterParametersInput{ + ParameterGroupFamily: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDefaultClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEventCategories() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventCategoriesInput{ + SourceType: aws.String("String"), + } + resp, err := svc.DescribeEventCategories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEventSubscriptions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventSubscriptionsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SubscriptionName: aws.String("String"), + } + resp, err := svc.DescribeEventSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEvents() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeHsmClientCertificates() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeHsmClientCertificatesInput{ + HsmClientCertificateIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeHsmClientCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeHsmConfigurations() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeHsmConfigurationsInput{ + HsmConfigurationIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeHsmConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeLoggingStatus() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeLoggingStatusInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeLoggingStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeOrderableClusterOptions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeOrderableClusterOptionsInput{ + ClusterVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + NodeType: aws.String("String"), + } + resp, err := svc.DescribeOrderableClusterOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeReservedNodeOfferings() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeReservedNodeOfferingsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReservedNodeOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedNodeOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeReservedNodes() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeReservedNodesInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReservedNodeId: aws.String("String"), + } + resp, err := svc.DescribeReservedNodes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeResize() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeResizeInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeResize(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeSnapshotCopyGrants() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeSnapshotCopyGrantsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotCopyGrantName: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshotCopyGrants(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeTags() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeTagsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ResourceName: aws.String("String"), + ResourceType: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DisableLogging() { + svc := redshift.New(session.New()) + + params := &redshift.DisableLoggingInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DisableLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DisableSnapshotCopy() { + svc := redshift.New(session.New()) + + params := &redshift.DisableSnapshotCopyInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DisableSnapshotCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_EnableLogging() { + svc := redshift.New(session.New()) + + params := &redshift.EnableLoggingInput{ + BucketName: aws.String("String"), // Required + ClusterIdentifier: aws.String("String"), // Required + S3KeyPrefix: aws.String("String"), + } + resp, err := svc.EnableLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_EnableSnapshotCopy() { + svc := redshift.New(session.New()) + + params := &redshift.EnableSnapshotCopyInput{ + ClusterIdentifier: aws.String("String"), // Required + DestinationRegion: aws.String("String"), // Required + RetentionPeriod: aws.Int64(1), + SnapshotCopyGrantName: aws.String("String"), + } + resp, err := svc.EnableSnapshotCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyCluster() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterType: aws.String("String"), + ClusterVersion: aws.String("String"), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + MasterUserPassword: aws.String("String"), + NewClusterIdentifier: aws.String("String"), + NodeType: aws.String("String"), + NumberOfNodes: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + Parameters: []*redshift.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("ParameterApplyType"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Description: aws.String("String"), + } + resp, err := svc.ModifyClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Severity: aws.String("String"), + SnsTopicArn: aws.String("String"), + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + } + resp, err := svc.ModifyEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifySnapshotCopyRetentionPeriod() { + svc := redshift.New(session.New()) + + params := &redshift.ModifySnapshotCopyRetentionPeriodInput{ + ClusterIdentifier: aws.String("String"), // Required + RetentionPeriod: aws.Int64(1), // Required + } + resp, err := svc.ModifySnapshotCopyRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_PurchaseReservedNodeOffering() { + svc := redshift.New(session.New()) + + params := &redshift.PurchaseReservedNodeOfferingInput{ + ReservedNodeOfferingId: aws.String("String"), // Required + NodeCount: aws.Int64(1), + } + resp, err := svc.PurchaseReservedNodeOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RebootCluster() { + svc := redshift.New(session.New()) + + params := &redshift.RebootClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.RebootCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ResetClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ResetClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + Parameters: []*redshift.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("ParameterApplyType"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RestoreFromClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.RestoreFromClusterSnapshotInput{ + ClusterIdentifier: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + AvailabilityZone: aws.String("String"), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterSubnetGroupName: aws.String("String"), + ElasticIp: aws.String("String"), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + KmsKeyId: aws.String("String"), + NodeType: aws.String("String"), + OwnerAccount: aws.String("String"), + Port: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + SnapshotClusterIdentifier: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreFromClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RevokeClusterSecurityGroupIngress() { + svc := redshift.New(session.New()) + + params := &redshift.RevokeClusterSecurityGroupIngressInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.RevokeClusterSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RevokeSnapshotAccess() { + svc := redshift.New(session.New()) + + params := &redshift.RevokeSnapshotAccessInput{ + AccountWithRestoreAccess: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.RevokeSnapshotAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RotateEncryptionKey() { + svc := redshift.New(session.New()) + + params := &redshift.RotateEncryptionKeyInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.RotateEncryptionKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go new file mode 100644 index 000000000..44a538869 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go @@ -0,0 +1,280 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package redshiftiface provides an interface for the Amazon Redshift. +package redshiftiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/redshift" +) + +// RedshiftAPI is the interface type for redshift.Redshift. +type RedshiftAPI interface { + AuthorizeClusterSecurityGroupIngressRequest(*redshift.AuthorizeClusterSecurityGroupIngressInput) (*request.Request, *redshift.AuthorizeClusterSecurityGroupIngressOutput) + + AuthorizeClusterSecurityGroupIngress(*redshift.AuthorizeClusterSecurityGroupIngressInput) (*redshift.AuthorizeClusterSecurityGroupIngressOutput, error) + + AuthorizeSnapshotAccessRequest(*redshift.AuthorizeSnapshotAccessInput) (*request.Request, *redshift.AuthorizeSnapshotAccessOutput) + + AuthorizeSnapshotAccess(*redshift.AuthorizeSnapshotAccessInput) (*redshift.AuthorizeSnapshotAccessOutput, error) + + CopyClusterSnapshotRequest(*redshift.CopyClusterSnapshotInput) (*request.Request, *redshift.CopyClusterSnapshotOutput) + + CopyClusterSnapshot(*redshift.CopyClusterSnapshotInput) (*redshift.CopyClusterSnapshotOutput, error) + + CreateClusterRequest(*redshift.CreateClusterInput) (*request.Request, *redshift.CreateClusterOutput) + + CreateCluster(*redshift.CreateClusterInput) (*redshift.CreateClusterOutput, error) + + CreateClusterParameterGroupRequest(*redshift.CreateClusterParameterGroupInput) (*request.Request, *redshift.CreateClusterParameterGroupOutput) + + CreateClusterParameterGroup(*redshift.CreateClusterParameterGroupInput) (*redshift.CreateClusterParameterGroupOutput, error) + + CreateClusterSecurityGroupRequest(*redshift.CreateClusterSecurityGroupInput) (*request.Request, *redshift.CreateClusterSecurityGroupOutput) + + CreateClusterSecurityGroup(*redshift.CreateClusterSecurityGroupInput) (*redshift.CreateClusterSecurityGroupOutput, error) + + CreateClusterSnapshotRequest(*redshift.CreateClusterSnapshotInput) (*request.Request, *redshift.CreateClusterSnapshotOutput) + + CreateClusterSnapshot(*redshift.CreateClusterSnapshotInput) (*redshift.CreateClusterSnapshotOutput, error) + + CreateClusterSubnetGroupRequest(*redshift.CreateClusterSubnetGroupInput) (*request.Request, *redshift.CreateClusterSubnetGroupOutput) + + CreateClusterSubnetGroup(*redshift.CreateClusterSubnetGroupInput) (*redshift.CreateClusterSubnetGroupOutput, error) + + CreateEventSubscriptionRequest(*redshift.CreateEventSubscriptionInput) (*request.Request, *redshift.CreateEventSubscriptionOutput) + + CreateEventSubscription(*redshift.CreateEventSubscriptionInput) (*redshift.CreateEventSubscriptionOutput, error) + + CreateHsmClientCertificateRequest(*redshift.CreateHsmClientCertificateInput) (*request.Request, *redshift.CreateHsmClientCertificateOutput) + + CreateHsmClientCertificate(*redshift.CreateHsmClientCertificateInput) (*redshift.CreateHsmClientCertificateOutput, error) + + CreateHsmConfigurationRequest(*redshift.CreateHsmConfigurationInput) (*request.Request, *redshift.CreateHsmConfigurationOutput) + + CreateHsmConfiguration(*redshift.CreateHsmConfigurationInput) (*redshift.CreateHsmConfigurationOutput, error) + + CreateSnapshotCopyGrantRequest(*redshift.CreateSnapshotCopyGrantInput) (*request.Request, *redshift.CreateSnapshotCopyGrantOutput) + + CreateSnapshotCopyGrant(*redshift.CreateSnapshotCopyGrantInput) (*redshift.CreateSnapshotCopyGrantOutput, error) + + CreateTagsRequest(*redshift.CreateTagsInput) (*request.Request, *redshift.CreateTagsOutput) + + CreateTags(*redshift.CreateTagsInput) (*redshift.CreateTagsOutput, error) + + DeleteClusterRequest(*redshift.DeleteClusterInput) (*request.Request, *redshift.DeleteClusterOutput) + + DeleteCluster(*redshift.DeleteClusterInput) (*redshift.DeleteClusterOutput, error) + + DeleteClusterParameterGroupRequest(*redshift.DeleteClusterParameterGroupInput) (*request.Request, *redshift.DeleteClusterParameterGroupOutput) + + DeleteClusterParameterGroup(*redshift.DeleteClusterParameterGroupInput) (*redshift.DeleteClusterParameterGroupOutput, error) + + DeleteClusterSecurityGroupRequest(*redshift.DeleteClusterSecurityGroupInput) (*request.Request, *redshift.DeleteClusterSecurityGroupOutput) + + DeleteClusterSecurityGroup(*redshift.DeleteClusterSecurityGroupInput) (*redshift.DeleteClusterSecurityGroupOutput, error) + + DeleteClusterSnapshotRequest(*redshift.DeleteClusterSnapshotInput) (*request.Request, *redshift.DeleteClusterSnapshotOutput) + + DeleteClusterSnapshot(*redshift.DeleteClusterSnapshotInput) (*redshift.DeleteClusterSnapshotOutput, error) + + DeleteClusterSubnetGroupRequest(*redshift.DeleteClusterSubnetGroupInput) (*request.Request, *redshift.DeleteClusterSubnetGroupOutput) + + DeleteClusterSubnetGroup(*redshift.DeleteClusterSubnetGroupInput) (*redshift.DeleteClusterSubnetGroupOutput, error) + + DeleteEventSubscriptionRequest(*redshift.DeleteEventSubscriptionInput) (*request.Request, *redshift.DeleteEventSubscriptionOutput) + + DeleteEventSubscription(*redshift.DeleteEventSubscriptionInput) (*redshift.DeleteEventSubscriptionOutput, error) + + DeleteHsmClientCertificateRequest(*redshift.DeleteHsmClientCertificateInput) (*request.Request, *redshift.DeleteHsmClientCertificateOutput) + + DeleteHsmClientCertificate(*redshift.DeleteHsmClientCertificateInput) (*redshift.DeleteHsmClientCertificateOutput, error) + + DeleteHsmConfigurationRequest(*redshift.DeleteHsmConfigurationInput) (*request.Request, *redshift.DeleteHsmConfigurationOutput) + + DeleteHsmConfiguration(*redshift.DeleteHsmConfigurationInput) (*redshift.DeleteHsmConfigurationOutput, error) + + DeleteSnapshotCopyGrantRequest(*redshift.DeleteSnapshotCopyGrantInput) (*request.Request, *redshift.DeleteSnapshotCopyGrantOutput) + + DeleteSnapshotCopyGrant(*redshift.DeleteSnapshotCopyGrantInput) (*redshift.DeleteSnapshotCopyGrantOutput, error) + + DeleteTagsRequest(*redshift.DeleteTagsInput) (*request.Request, *redshift.DeleteTagsOutput) + + DeleteTags(*redshift.DeleteTagsInput) (*redshift.DeleteTagsOutput, error) + + DescribeClusterParameterGroupsRequest(*redshift.DescribeClusterParameterGroupsInput) (*request.Request, *redshift.DescribeClusterParameterGroupsOutput) + + DescribeClusterParameterGroups(*redshift.DescribeClusterParameterGroupsInput) (*redshift.DescribeClusterParameterGroupsOutput, error) + + DescribeClusterParameterGroupsPages(*redshift.DescribeClusterParameterGroupsInput, func(*redshift.DescribeClusterParameterGroupsOutput, bool) bool) error + + DescribeClusterParametersRequest(*redshift.DescribeClusterParametersInput) (*request.Request, *redshift.DescribeClusterParametersOutput) + + DescribeClusterParameters(*redshift.DescribeClusterParametersInput) (*redshift.DescribeClusterParametersOutput, error) + + DescribeClusterParametersPages(*redshift.DescribeClusterParametersInput, func(*redshift.DescribeClusterParametersOutput, bool) bool) error + + DescribeClusterSecurityGroupsRequest(*redshift.DescribeClusterSecurityGroupsInput) (*request.Request, *redshift.DescribeClusterSecurityGroupsOutput) + + DescribeClusterSecurityGroups(*redshift.DescribeClusterSecurityGroupsInput) (*redshift.DescribeClusterSecurityGroupsOutput, error) + + DescribeClusterSecurityGroupsPages(*redshift.DescribeClusterSecurityGroupsInput, func(*redshift.DescribeClusterSecurityGroupsOutput, bool) bool) error + + DescribeClusterSnapshotsRequest(*redshift.DescribeClusterSnapshotsInput) (*request.Request, *redshift.DescribeClusterSnapshotsOutput) + + DescribeClusterSnapshots(*redshift.DescribeClusterSnapshotsInput) (*redshift.DescribeClusterSnapshotsOutput, error) + + DescribeClusterSnapshotsPages(*redshift.DescribeClusterSnapshotsInput, func(*redshift.DescribeClusterSnapshotsOutput, bool) bool) error + + DescribeClusterSubnetGroupsRequest(*redshift.DescribeClusterSubnetGroupsInput) (*request.Request, *redshift.DescribeClusterSubnetGroupsOutput) + + DescribeClusterSubnetGroups(*redshift.DescribeClusterSubnetGroupsInput) (*redshift.DescribeClusterSubnetGroupsOutput, error) + + DescribeClusterSubnetGroupsPages(*redshift.DescribeClusterSubnetGroupsInput, func(*redshift.DescribeClusterSubnetGroupsOutput, bool) bool) error + + DescribeClusterVersionsRequest(*redshift.DescribeClusterVersionsInput) (*request.Request, *redshift.DescribeClusterVersionsOutput) + + DescribeClusterVersions(*redshift.DescribeClusterVersionsInput) (*redshift.DescribeClusterVersionsOutput, error) + + DescribeClusterVersionsPages(*redshift.DescribeClusterVersionsInput, func(*redshift.DescribeClusterVersionsOutput, bool) bool) error + + DescribeClustersRequest(*redshift.DescribeClustersInput) (*request.Request, *redshift.DescribeClustersOutput) + + DescribeClusters(*redshift.DescribeClustersInput) (*redshift.DescribeClustersOutput, error) + + DescribeClustersPages(*redshift.DescribeClustersInput, func(*redshift.DescribeClustersOutput, bool) bool) error + + DescribeDefaultClusterParametersRequest(*redshift.DescribeDefaultClusterParametersInput) (*request.Request, *redshift.DescribeDefaultClusterParametersOutput) + + DescribeDefaultClusterParameters(*redshift.DescribeDefaultClusterParametersInput) (*redshift.DescribeDefaultClusterParametersOutput, error) + + DescribeDefaultClusterParametersPages(*redshift.DescribeDefaultClusterParametersInput, func(*redshift.DescribeDefaultClusterParametersOutput, bool) bool) error + + DescribeEventCategoriesRequest(*redshift.DescribeEventCategoriesInput) (*request.Request, *redshift.DescribeEventCategoriesOutput) + + DescribeEventCategories(*redshift.DescribeEventCategoriesInput) (*redshift.DescribeEventCategoriesOutput, error) + + DescribeEventSubscriptionsRequest(*redshift.DescribeEventSubscriptionsInput) (*request.Request, *redshift.DescribeEventSubscriptionsOutput) + + DescribeEventSubscriptions(*redshift.DescribeEventSubscriptionsInput) (*redshift.DescribeEventSubscriptionsOutput, error) + + DescribeEventSubscriptionsPages(*redshift.DescribeEventSubscriptionsInput, func(*redshift.DescribeEventSubscriptionsOutput, bool) bool) error + + DescribeEventsRequest(*redshift.DescribeEventsInput) (*request.Request, *redshift.DescribeEventsOutput) + + DescribeEvents(*redshift.DescribeEventsInput) (*redshift.DescribeEventsOutput, error) + + DescribeEventsPages(*redshift.DescribeEventsInput, func(*redshift.DescribeEventsOutput, bool) bool) error + + DescribeHsmClientCertificatesRequest(*redshift.DescribeHsmClientCertificatesInput) (*request.Request, *redshift.DescribeHsmClientCertificatesOutput) + + DescribeHsmClientCertificates(*redshift.DescribeHsmClientCertificatesInput) (*redshift.DescribeHsmClientCertificatesOutput, error) + + DescribeHsmClientCertificatesPages(*redshift.DescribeHsmClientCertificatesInput, func(*redshift.DescribeHsmClientCertificatesOutput, bool) bool) error + + DescribeHsmConfigurationsRequest(*redshift.DescribeHsmConfigurationsInput) (*request.Request, *redshift.DescribeHsmConfigurationsOutput) + + DescribeHsmConfigurations(*redshift.DescribeHsmConfigurationsInput) (*redshift.DescribeHsmConfigurationsOutput, error) + + DescribeHsmConfigurationsPages(*redshift.DescribeHsmConfigurationsInput, func(*redshift.DescribeHsmConfigurationsOutput, bool) bool) error + + DescribeLoggingStatusRequest(*redshift.DescribeLoggingStatusInput) (*request.Request, *redshift.LoggingStatus) + + DescribeLoggingStatus(*redshift.DescribeLoggingStatusInput) (*redshift.LoggingStatus, error) + + DescribeOrderableClusterOptionsRequest(*redshift.DescribeOrderableClusterOptionsInput) (*request.Request, *redshift.DescribeOrderableClusterOptionsOutput) + + DescribeOrderableClusterOptions(*redshift.DescribeOrderableClusterOptionsInput) (*redshift.DescribeOrderableClusterOptionsOutput, error) + + DescribeOrderableClusterOptionsPages(*redshift.DescribeOrderableClusterOptionsInput, func(*redshift.DescribeOrderableClusterOptionsOutput, bool) bool) error + + DescribeReservedNodeOfferingsRequest(*redshift.DescribeReservedNodeOfferingsInput) (*request.Request, *redshift.DescribeReservedNodeOfferingsOutput) + + DescribeReservedNodeOfferings(*redshift.DescribeReservedNodeOfferingsInput) (*redshift.DescribeReservedNodeOfferingsOutput, error) + + DescribeReservedNodeOfferingsPages(*redshift.DescribeReservedNodeOfferingsInput, func(*redshift.DescribeReservedNodeOfferingsOutput, bool) bool) error + + DescribeReservedNodesRequest(*redshift.DescribeReservedNodesInput) (*request.Request, *redshift.DescribeReservedNodesOutput) + + DescribeReservedNodes(*redshift.DescribeReservedNodesInput) (*redshift.DescribeReservedNodesOutput, error) + + DescribeReservedNodesPages(*redshift.DescribeReservedNodesInput, func(*redshift.DescribeReservedNodesOutput, bool) bool) error + + DescribeResizeRequest(*redshift.DescribeResizeInput) (*request.Request, *redshift.DescribeResizeOutput) + + DescribeResize(*redshift.DescribeResizeInput) (*redshift.DescribeResizeOutput, error) + + DescribeSnapshotCopyGrantsRequest(*redshift.DescribeSnapshotCopyGrantsInput) (*request.Request, *redshift.DescribeSnapshotCopyGrantsOutput) + + DescribeSnapshotCopyGrants(*redshift.DescribeSnapshotCopyGrantsInput) (*redshift.DescribeSnapshotCopyGrantsOutput, error) + + DescribeTagsRequest(*redshift.DescribeTagsInput) (*request.Request, *redshift.DescribeTagsOutput) + + DescribeTags(*redshift.DescribeTagsInput) (*redshift.DescribeTagsOutput, error) + + DisableLoggingRequest(*redshift.DisableLoggingInput) (*request.Request, *redshift.LoggingStatus) + + DisableLogging(*redshift.DisableLoggingInput) (*redshift.LoggingStatus, error) + + DisableSnapshotCopyRequest(*redshift.DisableSnapshotCopyInput) (*request.Request, *redshift.DisableSnapshotCopyOutput) + + DisableSnapshotCopy(*redshift.DisableSnapshotCopyInput) (*redshift.DisableSnapshotCopyOutput, error) + + EnableLoggingRequest(*redshift.EnableLoggingInput) (*request.Request, *redshift.LoggingStatus) + + EnableLogging(*redshift.EnableLoggingInput) (*redshift.LoggingStatus, error) + + EnableSnapshotCopyRequest(*redshift.EnableSnapshotCopyInput) (*request.Request, *redshift.EnableSnapshotCopyOutput) + + EnableSnapshotCopy(*redshift.EnableSnapshotCopyInput) (*redshift.EnableSnapshotCopyOutput, error) + + ModifyClusterRequest(*redshift.ModifyClusterInput) (*request.Request, *redshift.ModifyClusterOutput) + + ModifyCluster(*redshift.ModifyClusterInput) (*redshift.ModifyClusterOutput, error) + + ModifyClusterParameterGroupRequest(*redshift.ModifyClusterParameterGroupInput) (*request.Request, *redshift.ClusterParameterGroupNameMessage) + + ModifyClusterParameterGroup(*redshift.ModifyClusterParameterGroupInput) (*redshift.ClusterParameterGroupNameMessage, error) + + ModifyClusterSubnetGroupRequest(*redshift.ModifyClusterSubnetGroupInput) (*request.Request, *redshift.ModifyClusterSubnetGroupOutput) + + ModifyClusterSubnetGroup(*redshift.ModifyClusterSubnetGroupInput) (*redshift.ModifyClusterSubnetGroupOutput, error) + + ModifyEventSubscriptionRequest(*redshift.ModifyEventSubscriptionInput) (*request.Request, *redshift.ModifyEventSubscriptionOutput) + + ModifyEventSubscription(*redshift.ModifyEventSubscriptionInput) (*redshift.ModifyEventSubscriptionOutput, error) + + ModifySnapshotCopyRetentionPeriodRequest(*redshift.ModifySnapshotCopyRetentionPeriodInput) (*request.Request, *redshift.ModifySnapshotCopyRetentionPeriodOutput) + + ModifySnapshotCopyRetentionPeriod(*redshift.ModifySnapshotCopyRetentionPeriodInput) (*redshift.ModifySnapshotCopyRetentionPeriodOutput, error) + + PurchaseReservedNodeOfferingRequest(*redshift.PurchaseReservedNodeOfferingInput) (*request.Request, *redshift.PurchaseReservedNodeOfferingOutput) + + PurchaseReservedNodeOffering(*redshift.PurchaseReservedNodeOfferingInput) (*redshift.PurchaseReservedNodeOfferingOutput, error) + + RebootClusterRequest(*redshift.RebootClusterInput) (*request.Request, *redshift.RebootClusterOutput) + + RebootCluster(*redshift.RebootClusterInput) (*redshift.RebootClusterOutput, error) + + ResetClusterParameterGroupRequest(*redshift.ResetClusterParameterGroupInput) (*request.Request, *redshift.ClusterParameterGroupNameMessage) + + ResetClusterParameterGroup(*redshift.ResetClusterParameterGroupInput) (*redshift.ClusterParameterGroupNameMessage, error) + + RestoreFromClusterSnapshotRequest(*redshift.RestoreFromClusterSnapshotInput) (*request.Request, *redshift.RestoreFromClusterSnapshotOutput) + + RestoreFromClusterSnapshot(*redshift.RestoreFromClusterSnapshotInput) (*redshift.RestoreFromClusterSnapshotOutput, error) + + RevokeClusterSecurityGroupIngressRequest(*redshift.RevokeClusterSecurityGroupIngressInput) (*request.Request, *redshift.RevokeClusterSecurityGroupIngressOutput) + + RevokeClusterSecurityGroupIngress(*redshift.RevokeClusterSecurityGroupIngressInput) (*redshift.RevokeClusterSecurityGroupIngressOutput, error) + + RevokeSnapshotAccessRequest(*redshift.RevokeSnapshotAccessInput) (*request.Request, *redshift.RevokeSnapshotAccessOutput) + + RevokeSnapshotAccess(*redshift.RevokeSnapshotAccessInput) (*redshift.RevokeSnapshotAccessOutput, error) + + RotateEncryptionKeyRequest(*redshift.RotateEncryptionKeyInput) (*request.Request, *redshift.RotateEncryptionKeyOutput) + + RotateEncryptionKey(*redshift.RotateEncryptionKeyInput) (*redshift.RotateEncryptionKeyOutput, error) +} + +var _ RedshiftAPI = (*redshift.Redshift)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go new file mode 100644 index 000000000..6d1c8db11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go @@ -0,0 +1,107 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview This is an interface reference for Amazon Redshift. It contains +// documentation for one of the programming or command line interfaces you can +// use to manage Amazon Redshift clusters. Note that Amazon Redshift is asynchronous, +// which means that some interfaces may require techniques, such as polling +// or asynchronous callback handlers, to determine when a command has been applied. +// In this reference, the parameter descriptions indicate whether a change is +// applied immediately, on the next instance reboot, or during the next maintenance +// window. For a summary of the Amazon Redshift cluster management interfaces, +// go to Using the Amazon Redshift Management Interfaces (http://docs.aws.amazon.com/redshift/latest/mgmt/using-aws-sdk.html). +// +// Amazon Redshift manages all the work of setting up, operating, and scaling +// a data warehouse: provisioning capacity, monitoring and backing up the cluster, +// and applying patches and upgrades to the Amazon Redshift engine. You can +// focus on using your data to acquire new insights for your business and customers. +// +// If you are a first-time user of Amazon Redshift, we recommend that you begin +// by reading the The Amazon Redshift Getting Started Guide (http://docs.aws.amazon.com/redshift/latest/gsg/getting-started.html) +// +// If you are a database developer, the Amazon Redshift Database Developer +// Guide (http://docs.aws.amazon.com/redshift/latest/dg/welcome.html) explains +// how to design, build, query, and maintain the databases that make up your +// data warehouse. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Redshift struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "redshift" + +// New creates a new instance of the Redshift client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Redshift client from just a session. +// svc := redshift.New(mySession) +// +// // Create a Redshift client with additional configuration +// svc := redshift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Redshift { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Redshift { + svc := &Redshift{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Redshift operation and runs any +// custom request initialization. +func (c *Redshift) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go new file mode 100644 index 000000000..7fe00bc4e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go @@ -0,0 +1,141 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Redshift) WaitUntilClusterAvailable(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Clusters[].ClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "deleting", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ClusterNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilClusterDeleted(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ClusterNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathList", + Argument: "Clusters[].ClusterStatus", + Expected: "pathAny", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilClusterRestored(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Clusters[].RestoreStatus.Status", + Expected: "completed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "deleting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilSnapshotAvailable(input *DescribeClusterSnapshotsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusterSnapshots", + Delay: 15, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Snapshots[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Snapshots[].Status", + Expected: "failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Snapshots[].Status", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go new file mode 100644 index 000000000..ee93d72ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -0,0 +1,5646 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53 provides a client for Amazon Route 53. +package route53 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssociateVPCWithHostedZone = "AssociateVPCWithHostedZone" + +// AssociateVPCWithHostedZoneRequest generates a request for the AssociateVPCWithHostedZone operation. +func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHostedZoneInput) (req *request.Request, output *AssociateVPCWithHostedZoneOutput) { + op := &request.Operation{ + Name: opAssociateVPCWithHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/associatevpc", + } + + if input == nil { + input = &AssociateVPCWithHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateVPCWithHostedZoneOutput{} + req.Data = output + return +} + +// This action associates a VPC with an hosted zone. +// +// To associate a VPC with an hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID/associatevpc resource. The request body must include an XML document +// with a AssociateVPCWithHostedZoneRequest element. The response returns the +// AssociateVPCWithHostedZoneResponse element that contains ChangeInfo for you +// to track the progress of the AssociateVPCWithHostedZoneRequest you made. +// See GetChange operation for how to track the progress of your change. +func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneInput) (*AssociateVPCWithHostedZoneOutput, error) { + req, out := c.AssociateVPCWithHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opChangeResourceRecordSets = "ChangeResourceRecordSets" + +// ChangeResourceRecordSetsRequest generates a request for the ChangeResourceRecordSets operation. +func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSetsInput) (req *request.Request, output *ChangeResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opChangeResourceRecordSets, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset/", + } + + if input == nil { + input = &ChangeResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeResourceRecordSetsOutput{} + req.Data = output + return +} + +// Use this action to create or change your authoritative DNS information. To +// use this action, send a POST request to the 2013-04-01/hostedzone/hosted +// Zone ID/rrset resource. The request body must include an XML document with +// a ChangeResourceRecordSetsRequest element. +// +// Changes are a list of change items and are considered transactional. For +// more information on transactional changes, also known as change batches, +// see POST ChangeResourceRecordSets (http://docs.aws.amazon.com/Route53/latest/APIReference/) +// in the Amazon Route 53 API Reference. +// +// Due to the nature of transactional changes, you cannot delete the same resource +// record set more than once in a single change batch. If you attempt to delete +// the same change batch more than once, Amazon Route 53 returns an InvalidChangeBatch +// error. In response to a ChangeResourceRecordSets request, your DNS data is +// changed on all Amazon Route 53 DNS servers. Initially, the status of a change +// is PENDING. This means the change has not yet propagated to all the authoritative +// Amazon Route 53 DNS servers. When the change is propagated to all hosts, +// the change returns a status of INSYNC. +// +// Note the following limitations on a ChangeResourceRecordSets request: +// +// A request cannot contain more than 100 Change elements. A request cannot +// contain more than 1000 ResourceRecord elements. The sum of the number of +// characters (including spaces) in all Value elements in a request cannot exceed +// 32,000 characters. +func (c *Route53) ChangeResourceRecordSets(input *ChangeResourceRecordSetsInput) (*ChangeResourceRecordSetsOutput, error) { + req, out := c.ChangeResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +const opChangeTagsForResource = "ChangeTagsForResource" + +// ChangeTagsForResourceRequest generates a request for the ChangeTagsForResource operation. +func (c *Route53) ChangeTagsForResourceRequest(input *ChangeTagsForResourceInput) (req *request.Request, output *ChangeTagsForResourceOutput) { + op := &request.Operation{ + Name: opChangeTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ChangeTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ChangeTagsForResource(input *ChangeTagsForResourceInput) (*ChangeTagsForResourceOutput, error) { + req, out := c.ChangeTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateHealthCheck = "CreateHealthCheck" + +// CreateHealthCheckRequest generates a request for the CreateHealthCheck operation. +func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req *request.Request, output *CreateHealthCheckOutput) { + op := &request.Operation{ + Name: opCreateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck", + } + + if input == nil { + input = &CreateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHealthCheckOutput{} + req.Data = output + return +} + +// This action creates a new health check. +// +// To create a new health check, send a POST request to the 2013-04-01/healthcheck +// resource. The request body must include an XML document with a CreateHealthCheckRequest +// element. The response returns the CreateHealthCheckResponse element that +// contains metadata about the health check. +func (c *Route53) CreateHealthCheck(input *CreateHealthCheckInput) (*CreateHealthCheckOutput, error) { + req, out := c.CreateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateHostedZone = "CreateHostedZone" + +// CreateHostedZoneRequest generates a request for the CreateHostedZone operation. +func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *request.Request, output *CreateHostedZoneOutput) { + op := &request.Operation{ + Name: opCreateHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone", + } + + if input == nil { + input = &CreateHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHostedZoneOutput{} + req.Data = output + return +} + +// This action creates a new hosted zone. +// +// To create a new hosted zone, send a POST request to the 2013-04-01/hostedzone +// resource. The request body must include an XML document with a CreateHostedZoneRequest +// element. The response returns the CreateHostedZoneResponse element that contains +// metadata about the hosted zone. +// +// Amazon Route 53 automatically creates a default SOA record and four NS records +// for the zone. The NS records in the hosted zone are the name servers you +// give your registrar to delegate your domain to. For more information about +// SOA and NS records, see NS and SOA Records that Amazon Route 53 Creates for +// a Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) +// in the Amazon Route 53 Developer Guide. +// +// When you create a zone, its initial status is PENDING. This means that it +// is not yet available on all DNS servers. The status of the zone changes to +// INSYNC when the NS and SOA records are available on all Amazon Route 53 DNS +// servers. +// +// When trying to create a hosted zone using a reusable delegation set, you +// could specify an optional DelegationSetId, and Route53 would assign those +// 4 NS records for the zone, instead of alloting a new one. +func (c *Route53) CreateHostedZone(input *CreateHostedZoneInput) (*CreateHostedZoneOutput, error) { + req, out := c.CreateHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opCreateReusableDelegationSet = "CreateReusableDelegationSet" + +// CreateReusableDelegationSetRequest generates a request for the CreateReusableDelegationSet operation. +func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelegationSetInput) (req *request.Request, output *CreateReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opCreateReusableDelegationSet, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &CreateReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action creates a reusable delegationSet. +// +// To create a new reusable delegationSet, send a POST request to the 2013-04-01/delegationset +// resource. The request body must include an XML document with a CreateReusableDelegationSetRequest +// element. The response returns the CreateReusableDelegationSetResponse element +// that contains metadata about the delegationSet. +// +// If the optional parameter HostedZoneId is specified, it marks the delegationSet +// associated with that particular hosted zone as reusable. +func (c *Route53) CreateReusableDelegationSet(input *CreateReusableDelegationSetInput) (*CreateReusableDelegationSetOutput, error) { + req, out := c.CreateReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicy = "CreateTrafficPolicy" + +// CreateTrafficPolicyRequest generates a request for the CreateTrafficPolicy operation. +func (c *Route53) CreateTrafficPolicyRequest(input *CreateTrafficPolicyInput) (req *request.Request, output *CreateTrafficPolicyOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicy, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy", + } + + if input == nil { + input = &CreateTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyOutput{} + req.Data = output + return +} + +// Creates a traffic policy, which you use to create multiple DNS resource record +// sets for one domain name (such as example.com) or one subdomain name (such +// as www.example.com). +// +// To create a traffic policy, send a POST request to the 2013-04-01/trafficpolicy +// resource. The request body must include an XML document with a CreateTrafficPolicyRequest +// element. The response includes the CreateTrafficPolicyResponse element, which +// contains information about the new traffic policy. +func (c *Route53) CreateTrafficPolicy(input *CreateTrafficPolicyInput) (*CreateTrafficPolicyOutput, error) { + req, out := c.CreateTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyInstance = "CreateTrafficPolicyInstance" + +// CreateTrafficPolicyInstanceRequest generates a request for the CreateTrafficPolicyInstance operation. +func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyInstanceInput) (req *request.Request, output *CreateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance", + } + + if input == nil { + input = &CreateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Creates resource record sets in a specified hosted zone based on the settings +// in a specified traffic policy version. In addition, CreateTrafficPolicyInstance +// associates the resource record sets with a specified domain name (such as +// example.com) or subdomain name (such as www.example.com). Amazon Route 53 +// responds to DNS queries for the domain or subdomain name by using the resource +// record sets that CreateTrafficPolicyInstance created. +// +// To create a traffic policy instance, send a POST request to the 2013-04-01/trafficpolicyinstance +// resource. The request body must include an XML document with a CreateTrafficPolicyRequest +// element. The response returns the CreateTrafficPolicyInstanceResponse element, +// which contains information about the traffic policy instance. +func (c *Route53) CreateTrafficPolicyInstance(input *CreateTrafficPolicyInstanceInput) (*CreateTrafficPolicyInstanceOutput, error) { + req, out := c.CreateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyVersion = "CreateTrafficPolicyVersion" + +// CreateTrafficPolicyVersionRequest generates a request for the CreateTrafficPolicyVersion operation. +func (c *Route53) CreateTrafficPolicyVersionRequest(input *CreateTrafficPolicyVersionInput) (req *request.Request, output *CreateTrafficPolicyVersionOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}", + } + + if input == nil { + input = &CreateTrafficPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of an existing traffic policy. When you create a new +// version of a traffic policy, you specify the ID of the traffic policy that +// you want to update and a JSON-formatted document that describes the new version. +// +// You use traffic policies to create multiple DNS resource record sets for +// one domain name (such as example.com) or one subdomain name (such as www.example.com). +// +// To create a new version, send a POST request to the 2013-04-01/trafficpolicy/ +// resource. The request body includes an XML document with a CreateTrafficPolicyVersionRequest +// element. The response returns the CreateTrafficPolicyVersionResponse element, +// which contains information about the new version of the traffic policy. +func (c *Route53) CreateTrafficPolicyVersion(input *CreateTrafficPolicyVersionInput) (*CreateTrafficPolicyVersionOutput, error) { + req, out := c.CreateTrafficPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHealthCheck = "DeleteHealthCheck" + +// DeleteHealthCheckRequest generates a request for the DeleteHealthCheck operation. +func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req *request.Request, output *DeleteHealthCheckOutput) { + op := &request.Operation{ + Name: opDeleteHealthCheck, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &DeleteHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHealthCheckOutput{} + req.Data = output + return +} + +// This action deletes a health check. To delete a health check, send a DELETE +// request to the 2013-04-01/healthcheck/health check ID resource. +// +// You can delete a health check only if there are no resource record sets +// associated with this health check. If resource record sets are associated +// with this health check, you must disassociate them before you can delete +// your health check. If you try to delete a health check that is associated +// with resource record sets, Amazon Route 53 will deny your request with a +// HealthCheckInUse error. For information about disassociating the records +// from your health check, see ChangeResourceRecordSets. +func (c *Route53) DeleteHealthCheck(input *DeleteHealthCheckInput) (*DeleteHealthCheckOutput, error) { + req, out := c.DeleteHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHostedZone = "DeleteHostedZone" + +// DeleteHostedZoneRequest generates a request for the DeleteHostedZone operation. +func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *request.Request, output *DeleteHostedZoneOutput) { + op := &request.Operation{ + Name: opDeleteHostedZone, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &DeleteHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHostedZoneOutput{} + req.Data = output + return +} + +// This action deletes a hosted zone. To delete a hosted zone, send a DELETE +// request to the 2013-04-01/hostedzone/hosted zone ID resource. +// +// For more information about deleting a hosted zone, see Deleting a Hosted +// Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DeleteHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// You can delete a hosted zone only if there are no resource record sets +// other than the default SOA record and NS resource record sets. If your hosted +// zone contains other resource record sets, you must delete them before you +// can delete your hosted zone. If you try to delete a hosted zone that contains +// other resource record sets, Amazon Route 53 will deny your request with a +// HostedZoneNotEmpty error. For information about deleting records from your +// hosted zone, see ChangeResourceRecordSets. +func (c *Route53) DeleteHostedZone(input *DeleteHostedZoneInput) (*DeleteHostedZoneOutput, error) { + req, out := c.DeleteHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReusableDelegationSet = "DeleteReusableDelegationSet" + +// DeleteReusableDelegationSetRequest generates a request for the DeleteReusableDelegationSet operation. +func (c *Route53) DeleteReusableDelegationSetRequest(input *DeleteReusableDelegationSetInput) (req *request.Request, output *DeleteReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opDeleteReusableDelegationSet, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &DeleteReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action deletes a reusable delegation set. To delete a reusable delegation +// set, send a DELETE request to the 2013-04-01/delegationset/delegation set +// ID resource. +// +// You can delete a reusable delegation set only if there are no associated +// hosted zones. If your reusable delegation set contains associated hosted +// zones, you must delete them before you can delete your reusable delegation +// set. If you try to delete a reusable delegation set that contains associated +// hosted zones, Amazon Route 53 will deny your request with a DelegationSetInUse +// error. +func (c *Route53) DeleteReusableDelegationSet(input *DeleteReusableDelegationSetInput) (*DeleteReusableDelegationSetOutput, error) { + req, out := c.DeleteReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicy = "DeleteTrafficPolicy" + +// DeleteTrafficPolicyRequest generates a request for the DeleteTrafficPolicy operation. +func (c *Route53) DeleteTrafficPolicyRequest(input *DeleteTrafficPolicyInput) (req *request.Request, output *DeleteTrafficPolicyOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &DeleteTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyOutput{} + req.Data = output + return +} + +// Deletes a traffic policy. To delete a traffic policy, send a DELETE request +// to the 2013-04-01/trafficpolicy resource. +func (c *Route53) DeleteTrafficPolicy(input *DeleteTrafficPolicyInput) (*DeleteTrafficPolicyOutput, error) { + req, out := c.DeleteTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicyInstance = "DeleteTrafficPolicyInstance" + +// DeleteTrafficPolicyInstanceRequest generates a request for the DeleteTrafficPolicyInstance operation. +func (c *Route53) DeleteTrafficPolicyInstanceRequest(input *DeleteTrafficPolicyInstanceInput) (req *request.Request, output *DeleteTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicyInstance, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &DeleteTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Deletes a traffic policy instance and all of the resource record sets that +// Amazon Route 53 created when you created the instance. +// +// To delete a traffic policy instance, send a DELETE request to the 2013-04-01/trafficpolicy/traffic +// policy instance ID resource. +// +// When you delete a traffic policy instance, Amazon Route 53 also deletes +// all of the resource record sets that were created when you created the traffic +// policy instance. +func (c *Route53) DeleteTrafficPolicyInstance(input *DeleteTrafficPolicyInstanceInput) (*DeleteTrafficPolicyInstanceOutput, error) { + req, out := c.DeleteTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateVPCFromHostedZone = "DisassociateVPCFromHostedZone" + +// DisassociateVPCFromHostedZoneRequest generates a request for the DisassociateVPCFromHostedZone operation. +func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFromHostedZoneInput) (req *request.Request, output *DisassociateVPCFromHostedZoneOutput) { + op := &request.Operation{ + Name: opDisassociateVPCFromHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/disassociatevpc", + } + + if input == nil { + input = &DisassociateVPCFromHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateVPCFromHostedZoneOutput{} + req.Data = output + return +} + +// This action disassociates a VPC from an hosted zone. +// +// To disassociate a VPC to a hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID/disassociatevpc resource. The request body must include an XML document +// with a DisassociateVPCFromHostedZoneRequest element. The response returns +// the DisassociateVPCFromHostedZoneResponse element that contains ChangeInfo +// for you to track the progress of the DisassociateVPCFromHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) DisassociateVPCFromHostedZone(input *DisassociateVPCFromHostedZoneInput) (*DisassociateVPCFromHostedZoneOutput, error) { + req, out := c.DisassociateVPCFromHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetChange = "GetChange" + +// GetChangeRequest generates a request for the GetChange operation. +func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *request.Request, output *GetChangeOutput) { + op := &request.Operation{ + Name: opGetChange, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/change/{Id}", + } + + if input == nil { + input = &GetChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeOutput{} + req.Data = output + return +} + +// This action returns the current status of a change batch request. The status +// is one of the following values: +// +// - PENDING indicates that the changes in this request have not replicated +// to all Amazon Route 53 DNS servers. This is the initial status of all change +// batch requests. +// +// - INSYNC indicates that the changes have replicated to all Amazon Route +// 53 DNS servers. +func (c *Route53) GetChange(input *GetChangeInput) (*GetChangeOutput, error) { + req, out := c.GetChangeRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeDetails = "GetChangeDetails" + +// GetChangeDetailsRequest generates a request for the GetChangeDetails operation. +func (c *Route53) GetChangeDetailsRequest(input *GetChangeDetailsInput) (req *request.Request, output *GetChangeDetailsOutput) { + op := &request.Operation{ + Name: opGetChangeDetails, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/changedetails/{Id}", + } + + if input == nil { + input = &GetChangeDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeDetailsOutput{} + req.Data = output + return +} + +// This action returns the status and changes of a change batch request. +func (c *Route53) GetChangeDetails(input *GetChangeDetailsInput) (*GetChangeDetailsOutput, error) { + req, out := c.GetChangeDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetCheckerIpRanges = "GetCheckerIpRanges" + +// GetCheckerIpRangesRequest generates a request for the GetCheckerIpRanges operation. +func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req *request.Request, output *GetCheckerIpRangesOutput) { + op := &request.Operation{ + Name: opGetCheckerIpRanges, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/checkeripranges", + } + + if input == nil { + input = &GetCheckerIpRangesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCheckerIpRangesOutput{} + req.Data = output + return +} + +// To retrieve a list of the IP ranges used by Amazon Route 53 health checkers +// to check the health of your resources, send a GET request to the 2013-04-01/checkeripranges +// resource. You can use these IP addresses to configure router and firewall +// rules to allow health checkers to check the health of your resources. +func (c *Route53) GetCheckerIpRanges(input *GetCheckerIpRangesInput) (*GetCheckerIpRangesOutput, error) { + req, out := c.GetCheckerIpRangesRequest(input) + err := req.Send() + return out, err +} + +const opGetGeoLocation = "GetGeoLocation" + +// GetGeoLocationRequest generates a request for the GetGeoLocation operation. +func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *request.Request, output *GetGeoLocationOutput) { + op := &request.Operation{ + Name: opGetGeoLocation, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocation", + } + + if input == nil { + input = &GetGeoLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGeoLocationOutput{} + req.Data = output + return +} + +// To retrieve a single geo location, send a GET request to the 2013-04-01/geolocation +// resource with one of these options: continentcode | countrycode | countrycode +// and subdivisioncode. +func (c *Route53) GetGeoLocation(input *GetGeoLocationInput) (*GetGeoLocationOutput, error) { + req, out := c.GetGeoLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheck = "GetHealthCheck" + +// GetHealthCheckRequest generates a request for the GetHealthCheck operation. +func (c *Route53) GetHealthCheckRequest(input *GetHealthCheckInput) (req *request.Request, output *GetHealthCheckOutput) { + op := &request.Operation{ + Name: opGetHealthCheck, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &GetHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckOutput{} + req.Data = output + return +} + +// To retrieve the health check, send a GET request to the 2013-04-01/healthcheck/health +// check ID resource. +func (c *Route53) GetHealthCheck(input *GetHealthCheckInput) (*GetHealthCheckOutput, error) { + req, out := c.GetHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckCount = "GetHealthCheckCount" + +// GetHealthCheckCountRequest generates a request for the GetHealthCheckCount operation. +func (c *Route53) GetHealthCheckCountRequest(input *GetHealthCheckCountInput) (req *request.Request, output *GetHealthCheckCountOutput) { + op := &request.Operation{ + Name: opGetHealthCheckCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheckcount", + } + + if input == nil { + input = &GetHealthCheckCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your health checks, send a GET request to the +// 2013-04-01/healthcheckcount resource. +func (c *Route53) GetHealthCheckCount(input *GetHealthCheckCountInput) (*GetHealthCheckCountOutput, error) { + req, out := c.GetHealthCheckCountRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckLastFailureReason = "GetHealthCheckLastFailureReason" + +// GetHealthCheckLastFailureReasonRequest generates a request for the GetHealthCheckLastFailureReason operation. +func (c *Route53) GetHealthCheckLastFailureReasonRequest(input *GetHealthCheckLastFailureReasonInput) (req *request.Request, output *GetHealthCheckLastFailureReasonOutput) { + op := &request.Operation{ + Name: opGetHealthCheckLastFailureReason, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason", + } + + if input == nil { + input = &GetHealthCheckLastFailureReasonInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckLastFailureReasonOutput{} + req.Data = output + return +} + +// If you want to learn why a health check is currently failing or why it failed +// most recently (if at all), you can get the failure reason for the most recent +// failure. Send a GET request to the 2013-04-01/healthcheck/health check ID/lastfailurereason +// resource. +func (c *Route53) GetHealthCheckLastFailureReason(input *GetHealthCheckLastFailureReasonInput) (*GetHealthCheckLastFailureReasonOutput, error) { + req, out := c.GetHealthCheckLastFailureReasonRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckStatus = "GetHealthCheckStatus" + +// GetHealthCheckStatusRequest generates a request for the GetHealthCheckStatus operation. +func (c *Route53) GetHealthCheckStatusRequest(input *GetHealthCheckStatusInput) (req *request.Request, output *GetHealthCheckStatusOutput) { + op := &request.Operation{ + Name: opGetHealthCheckStatus, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/status", + } + + if input == nil { + input = &GetHealthCheckStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckStatusOutput{} + req.Data = output + return +} + +// To retrieve the health check status, send a GET request to the 2013-04-01/healthcheck/health +// check ID/status resource. You can use this call to get a health check's current +// status. +func (c *Route53) GetHealthCheckStatus(input *GetHealthCheckStatusInput) (*GetHealthCheckStatusOutput, error) { + req, out := c.GetHealthCheckStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZone = "GetHostedZone" + +// GetHostedZoneRequest generates a request for the GetHostedZone operation. +func (c *Route53) GetHostedZoneRequest(input *GetHostedZoneInput) (req *request.Request, output *GetHostedZoneOutput) { + op := &request.Operation{ + Name: opGetHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &GetHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneOutput{} + req.Data = output + return +} + +// To retrieve the delegation set for a hosted zone, send a GET request to the +// 2013-04-01/hostedzone/hosted zone ID resource. The delegation set is the +// four Amazon Route 53 name servers that were assigned to the hosted zone when +// you created it. +func (c *Route53) GetHostedZone(input *GetHostedZoneInput) (*GetHostedZoneOutput, error) { + req, out := c.GetHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZoneCount = "GetHostedZoneCount" + +// GetHostedZoneCountRequest generates a request for the GetHostedZoneCount operation. +func (c *Route53) GetHostedZoneCountRequest(input *GetHostedZoneCountInput) (req *request.Request, output *GetHostedZoneCountOutput) { + op := &request.Operation{ + Name: opGetHostedZoneCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonecount", + } + + if input == nil { + input = &GetHostedZoneCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount +// resource. +func (c *Route53) GetHostedZoneCount(input *GetHostedZoneCountInput) (*GetHostedZoneCountOutput, error) { + req, out := c.GetHostedZoneCountRequest(input) + err := req.Send() + return out, err +} + +const opGetReusableDelegationSet = "GetReusableDelegationSet" + +// GetReusableDelegationSetRequest generates a request for the GetReusableDelegationSet operation. +func (c *Route53) GetReusableDelegationSetRequest(input *GetReusableDelegationSetInput) (req *request.Request, output *GetReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opGetReusableDelegationSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &GetReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetReusableDelegationSetOutput{} + req.Data = output + return +} + +// To retrieve the reusable delegation set, send a GET request to the 2013-04-01/delegationset/delegation +// set ID resource. +func (c *Route53) GetReusableDelegationSet(input *GetReusableDelegationSetInput) (*GetReusableDelegationSetOutput, error) { + req, out := c.GetReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicy = "GetTrafficPolicy" + +// GetTrafficPolicyRequest generates a request for the GetTrafficPolicy operation. +func (c *Route53) GetTrafficPolicyRequest(input *GetTrafficPolicyInput) (req *request.Request, output *GetTrafficPolicyOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &GetTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyOutput{} + req.Data = output + return +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the 2013-04-01/trafficpolicy resource. +func (c *Route53) GetTrafficPolicy(input *GetTrafficPolicyInput) (*GetTrafficPolicyOutput, error) { + req, out := c.GetTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstance = "GetTrafficPolicyInstance" + +// GetTrafficPolicyInstanceRequest generates a request for the GetTrafficPolicyInstance operation. +func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanceInput) (req *request.Request, output *GetTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstance, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &GetTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Gets information about a specified traffic policy instance. +// +// To get information about the traffic policy instance, send a GET request +// to the 2013-04-01/trafficpolicyinstance resource. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. +func (c *Route53) GetTrafficPolicyInstance(input *GetTrafficPolicyInstanceInput) (*GetTrafficPolicyInstanceOutput, error) { + req, out := c.GetTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstanceCount = "GetTrafficPolicyInstanceCount" + +// GetTrafficPolicyInstanceCountRequest generates a request for the GetTrafficPolicyInstanceCount operation. +func (c *Route53) GetTrafficPolicyInstanceCountRequest(input *GetTrafficPolicyInstanceCountInput) (req *request.Request, output *GetTrafficPolicyInstanceCountOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstanceCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstancecount", + } + + if input == nil { + input = &GetTrafficPolicyInstanceCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceCountOutput{} + req.Data = output + return +} + +// Gets the number of traffic policy instances that are associated with the +// current AWS account. +// +// To get the number of traffic policy instances, send a GET request to the +// 2013-04-01/trafficpolicyinstancecount resource. +func (c *Route53) GetTrafficPolicyInstanceCount(input *GetTrafficPolicyInstanceCountInput) (*GetTrafficPolicyInstanceCountOutput, error) { + req, out := c.GetTrafficPolicyInstanceCountRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByHostedZone = "ListChangeBatchesByHostedZone" + +// ListChangeBatchesByHostedZoneRequest generates a request for the ListChangeBatchesByHostedZone operation. +func (c *Route53) ListChangeBatchesByHostedZoneRequest(input *ListChangeBatchesByHostedZoneInput) (req *request.Request, output *ListChangeBatchesByHostedZoneOutput) { + op := &request.Operation{ + Name: opListChangeBatchesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/changes", + } + + if input == nil { + input = &ListChangeBatchesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByHostedZoneOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone. +func (c *Route53) ListChangeBatchesByHostedZone(input *ListChangeBatchesByHostedZoneInput) (*ListChangeBatchesByHostedZoneOutput, error) { + req, out := c.ListChangeBatchesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByRRSet = "ListChangeBatchesByRRSet" + +// ListChangeBatchesByRRSetRequest generates a request for the ListChangeBatchesByRRSet operation. +func (c *Route53) ListChangeBatchesByRRSetRequest(input *ListChangeBatchesByRRSetInput) (req *request.Request, output *ListChangeBatchesByRRSetOutput) { + op := &request.Operation{ + Name: opListChangeBatchesByRRSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrsChanges", + } + + if input == nil { + input = &ListChangeBatchesByRRSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByRRSetOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone and RRSet. +func (c *Route53) ListChangeBatchesByRRSet(input *ListChangeBatchesByRRSetInput) (*ListChangeBatchesByRRSetOutput, error) { + req, out := c.ListChangeBatchesByRRSetRequest(input) + err := req.Send() + return out, err +} + +const opListGeoLocations = "ListGeoLocations" + +// ListGeoLocationsRequest generates a request for the ListGeoLocations operation. +func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *request.Request, output *ListGeoLocationsOutput) { + op := &request.Operation{ + Name: opListGeoLocations, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocations", + } + + if input == nil { + input = &ListGeoLocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGeoLocationsOutput{} + req.Data = output + return +} + +// To retrieve a list of supported geo locations, send a GET request to the +// 2013-04-01/geolocations resource. The response to this request includes a +// GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails +// child elements. The list is sorted by country code, and then subdivision +// code, followed by continents at the end of the list. +// +// By default, the list of geo locations is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. If the list is truncated, IsTruncated will be set to true and +// a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode +// will be populated. You can pass these as parameters to StartContinentCode, +// StartCountryCode, StartSubdivisionCode to control the geo location that the +// list begins with. +func (c *Route53) ListGeoLocations(input *ListGeoLocationsInput) (*ListGeoLocationsOutput, error) { + req, out := c.ListGeoLocationsRequest(input) + err := req.Send() + return out, err +} + +const opListHealthChecks = "ListHealthChecks" + +// ListHealthChecksRequest generates a request for the ListHealthChecks operation. +func (c *Route53) ListHealthChecksRequest(input *ListHealthChecksInput) (req *request.Request, output *ListHealthChecksOutput) { + op := &request.Operation{ + Name: opListHealthChecks, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHealthChecksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHealthChecksOutput{} + req.Data = output + return +} + +// To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck +// resource. The response to this request includes a HealthChecks element with +// zero, one, or multiple HealthCheck child elements. By default, the list of +// health checks is displayed on a single page. You can control the length of +// the page that is displayed by using the MaxItems parameter. You can use the +// Marker parameter to control the health check that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHealthChecks(input *ListHealthChecksInput) (*ListHealthChecksOutput, error) { + req, out := c.ListHealthChecksRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListHealthChecksPages(input *ListHealthChecksInput, fn func(p *ListHealthChecksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHealthChecksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHealthChecksOutput), lastPage) + }) +} + +const opListHostedZones = "ListHostedZones" + +// ListHostedZonesRequest generates a request for the ListHostedZones operation. +func (c *Route53) ListHostedZonesRequest(input *ListHostedZonesInput) (req *request.Request, output *ListHostedZonesOutput) { + op := &request.Operation{ + Name: opListHostedZones, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHostedZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone +// resource. The response to this request includes a HostedZones element with +// zero, one, or multiple HostedZone child elements. By default, the list of +// hosted zones is displayed on a single page. You can control the length of +// the page that is displayed by using the MaxItems parameter. You can use the +// Marker parameter to control the hosted zone that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZones(input *ListHostedZonesInput) (*ListHostedZonesOutput, error) { + req, out := c.ListHostedZonesRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListHostedZonesPages(input *ListHostedZonesInput, fn func(p *ListHostedZonesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHostedZonesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHostedZonesOutput), lastPage) + }) +} + +const opListHostedZonesByName = "ListHostedZonesByName" + +// ListHostedZonesByNameRequest generates a request for the ListHostedZonesByName operation. +func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput) (req *request.Request, output *ListHostedZonesByNameOutput) { + op := &request.Operation{ + Name: opListHostedZonesByName, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonesbyname", + } + + if input == nil { + input = &ListHostedZonesByNameInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesByNameOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the 2013-04-01/hostedzonesbyname resource. The response to this +// request includes a HostedZones element with zero or more HostedZone child +// elements lexicographically ordered by DNS name. By default, the list of hosted +// zones is displayed on a single page. You can control the length of the page +// that is displayed by using the MaxItems parameter. You can use the DNSName +// and HostedZoneId parameters to control the hosted zone that the list begins +// with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZonesByName(input *ListHostedZonesByNameInput) (*ListHostedZonesByNameOutput, error) { + req, out := c.ListHostedZonesByNameRequest(input) + err := req.Send() + return out, err +} + +const opListResourceRecordSets = "ListResourceRecordSets" + +// ListResourceRecordSetsRequest generates a request for the ListResourceRecordSets operation. +func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInput) (req *request.Request, output *ListResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opListResourceRecordSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset", + Paginator: &request.Paginator{ + InputTokens: []string{"StartRecordName", "StartRecordType", "StartRecordIdentifier"}, + OutputTokens: []string{"NextRecordName", "NextRecordType", "NextRecordIdentifier"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListResourceRecordSetsOutput{} + req.Data = output + return +} + +// Imagine all the resource record sets in a zone listed out in front of you. +// Imagine them sorted lexicographically first by DNS name (with the labels +// reversed, like "com.amazon.www" for example), and secondarily, lexicographically +// by record type. This operation retrieves at most MaxItems resource record +// sets from this list, in order, starting at a position specified by the Name +// and Type arguments: +// +// If both Name and Type are omitted, this means start the results at the +// first RRSET in the HostedZone. If Name is specified but Type is omitted, +// this means start the results at the first RRSET in the list whose name is +// greater than or equal to Name. If both Name and Type are specified, this +// means start the results at the first RRSET in the list whose name is greater +// than or equal to Name and whose type is greater than or equal to Type. It +// is an error to specify the Type but not the Name. Use ListResourceRecordSets +// to retrieve a single known record set by specifying the record set's name +// and type, and setting MaxItems = 1 +// +// To retrieve all the records in a HostedZone, first pause any processes making +// calls to ChangeResourceRecordSets. Initially call ListResourceRecordSets +// without a Name and Type to get the first page of record sets. For subsequent +// calls, set Name and Type to the NextName and NextType values returned by +// the previous response. +// +// In the presence of concurrent ChangeResourceRecordSets calls, there is no +// consistency of results across calls to ListResourceRecordSets. The only way +// to get a consistent multi-page snapshot of all RRSETs in a zone is to stop +// making changes while pagination is in progress. +// +// However, the results from ListResourceRecordSets are consistent within a +// page. If MakeChange calls are taking place concurrently, the result of each +// one will either be completely visible in your results or not at all. You +// will not see partial changes, or changes that do not ultimately succeed. +// (This follows from the fact that MakeChange is atomic) +// +// The results from ListResourceRecordSets are strongly consistent with ChangeResourceRecordSets. +// To be precise, if a single process makes a call to ChangeResourceRecordSets +// and receives a successful response, the effects of that change will be visible +// in a subsequent call to ListResourceRecordSets by that process. +func (c *Route53) ListResourceRecordSets(input *ListResourceRecordSetsInput) (*ListResourceRecordSetsOutput, error) { + req, out := c.ListResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListResourceRecordSetsPages(input *ListResourceRecordSetsInput, fn func(p *ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListResourceRecordSetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListResourceRecordSetsOutput), lastPage) + }) +} + +const opListReusableDelegationSets = "ListReusableDelegationSets" + +// ListReusableDelegationSetsRequest generates a request for the ListReusableDelegationSets operation. +func (c *Route53) ListReusableDelegationSetsRequest(input *ListReusableDelegationSetsInput) (req *request.Request, output *ListReusableDelegationSetsOutput) { + op := &request.Operation{ + Name: opListReusableDelegationSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &ListReusableDelegationSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReusableDelegationSetsOutput{} + req.Data = output + return +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the 2013-04-01/delegationset resource. The response to this request includes +// a DelegationSets element with zero, one, or multiple DelegationSet child +// elements. By default, the list of delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListReusableDelegationSets(input *ListReusableDelegationSetsInput) (*ListReusableDelegationSetsOutput, error) { + req, out := c.ListReusableDelegationSetsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *Route53) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResources = "ListTagsForResources" + +// ListTagsForResourcesRequest generates a request for the ListTagsForResources operation. +func (c *Route53) ListTagsForResourcesRequest(input *ListTagsForResourcesInput) (req *request.Request, output *ListTagsForResourcesOutput) { + op := &request.Operation{ + Name: opListTagsForResources, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}", + } + + if input == nil { + input = &ListTagsForResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourcesOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResources(input *ListTagsForResourcesInput) (*ListTagsForResourcesOutput, error) { + req, out := c.ListTagsForResourcesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicies = "ListTrafficPolicies" + +// ListTrafficPoliciesRequest generates a request for the ListTrafficPolicies operation. +func (c *Route53) ListTrafficPoliciesRequest(input *ListTrafficPoliciesInput) (req *request.Request, output *ListTrafficPoliciesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicies, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies", + } + + if input == nil { + input = &ListTrafficPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPoliciesOutput{} + req.Data = output + return +} + +// Gets information about the latest version for every traffic policy that is +// associated with the current AWS account. To get the information, send a GET +// request to the 2013-04-01/trafficpolicy resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policies associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// that is associated with the current account. +// +// TrafficPolicyIdMarker If IsTruncated is true, TrafficPolicyIdMarker is the +// ID of the first traffic policy in the next group of MaxItems traffic policies. +// If you want to list more traffic policies, make another call to ListTrafficPolicies, +// and specify the value of the TrafficPolicyIdMarker element from the response +// in the TrafficPolicyIdMarker request parameter. +// +// If IsTruncated is false, the TrafficPolicyIdMarker element is omitted from +// the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicies(input *ListTrafficPoliciesInput) (*ListTrafficPoliciesOutput, error) { + req, out := c.ListTrafficPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstances = "ListTrafficPolicyInstances" + +// ListTrafficPolicyInstancesRequest generates a request for the ListTrafficPolicyInstances operation. +func (c *Route53) ListTrafficPolicyInstancesRequest(input *ListTrafficPolicyInstancesInput) (req *request.Request, output *ListTrafficPolicyInstancesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstances, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances", + } + + if input == nil { + input = &ListTrafficPolicyInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// the current AWS account. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that are associated +// with the current AWS account, send a GET request to the 2013-04-01/trafficpolicyinstance +// resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these three values in the response represent the +// first traffic policy instance in the next group of MaxItems traffic policy +// instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstances, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstances(input *ListTrafficPolicyInstancesInput) (*ListTrafficPolicyInstancesOutput, error) { + req, out := c.ListTrafficPolicyInstancesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByHostedZone = "ListTrafficPolicyInstancesByHostedZone" + +// ListTrafficPolicyInstancesByHostedZoneRequest generates a request for the ListTrafficPolicyInstancesByHostedZone operation. +func (c *Route53) ListTrafficPolicyInstancesByHostedZoneRequest(input *ListTrafficPolicyInstancesByHostedZoneInput) (req *request.Request, output *ListTrafficPolicyInstancesByHostedZoneOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/hostedzone", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByHostedZoneOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created in a +// specified hosted zone. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that you created +// in a specified hosted zone, send a GET request to the 2013-04-01/trafficpolicyinstance +// resource and include the ID of the hosted zone. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes four values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// TrafficPolicyInstanceNameMarker and TrafficPolicyInstanceTypeMarker If IsTruncated +// is true, these two values in the response represent the first traffic policy +// instance in the next group of MaxItems traffic policy instances. To list +// more traffic policy instances, make another call to ListTrafficPolicyInstancesByHostedZone, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByHostedZone(input *ListTrafficPolicyInstancesByHostedZoneInput) (*ListTrafficPolicyInstancesByHostedZoneOutput, error) { + req, out := c.ListTrafficPolicyInstancesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByPolicy = "ListTrafficPolicyInstancesByPolicy" + +// ListTrafficPolicyInstancesByPolicyRequest generates a request for the ListTrafficPolicyInstancesByPolicy operation. +func (c *Route53) ListTrafficPolicyInstancesByPolicyRequest(input *ListTrafficPolicyInstancesByPolicyInput) (req *request.Request, output *ListTrafficPolicyInstancesByPolicyOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/trafficpolicy", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByPolicyOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// a specify traffic policy version. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. To get information about the +// traffic policy instances that you created by using a specify traffic policy +// version, send a GET request to the 2013-04-01/trafficpolicyinstance resource +// and include the ID and version of the traffic policy. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the specified traffic policy. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these values in the response represent the first +// traffic policy instance in the next group of MaxItems traffic policy instances. +// To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByPolicy, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByPolicy(input *ListTrafficPolicyInstancesByPolicyInput) (*ListTrafficPolicyInstancesByPolicyOutput, error) { + req, out := c.ListTrafficPolicyInstancesByPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyVersions = "ListTrafficPolicyVersions" + +// ListTrafficPolicyVersionsRequest generates a request for the ListTrafficPolicyVersions operation. +func (c *Route53) ListTrafficPolicyVersionsRequest(input *ListTrafficPolicyVersionsInput) (req *request.Request, output *ListTrafficPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyVersions, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies/{Id}/versions", + } + + if input == nil { + input = &ListTrafficPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyVersionsOutput{} + req.Data = output + return +} + +// Gets information about all of the versions for a specified traffic policy. +// ListTrafficPolicyVersions lists only versions that have not been deleted. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitemsmaxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy versions associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// version that is associated with the specified traffic policy. +// +// TrafficPolicyVersionMarker The ID of the next traffic policy version that +// is associated with the current AWS account. If you want to list more traffic +// policies, make another call to ListTrafficPolicyVersions, and specify the +// value of the TrafficPolicyVersionMarker element in the TrafficPolicyVersionMarker +// request parameter. +// +// If IsTruncated is false, Amazon Route 53 omits the TrafficPolicyVersionMarker +// element from the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicyVersions(input *ListTrafficPolicyVersionsInput) (*ListTrafficPolicyVersionsOutput, error) { + req, out := c.ListTrafficPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHealthCheck = "UpdateHealthCheck" + +// UpdateHealthCheckRequest generates a request for the UpdateHealthCheck operation. +func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req *request.Request, output *UpdateHealthCheckOutput) { + op := &request.Operation{ + Name: opUpdateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &UpdateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHealthCheckOutput{} + req.Data = output + return +} + +// This action updates an existing health check. +// +// To update a health check, send a POST request to the 2013-04-01/healthcheck/health +// check ID resource. The request body must include an XML document with an +// UpdateHealthCheckRequest element. The response returns an UpdateHealthCheckResponse +// element, which contains metadata about the health check. +func (c *Route53) UpdateHealthCheck(input *UpdateHealthCheckInput) (*UpdateHealthCheckOutput, error) { + req, out := c.UpdateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHostedZoneComment = "UpdateHostedZoneComment" + +// UpdateHostedZoneCommentRequest generates a request for the UpdateHostedZoneComment operation. +func (c *Route53) UpdateHostedZoneCommentRequest(input *UpdateHostedZoneCommentInput) (req *request.Request, output *UpdateHostedZoneCommentOutput) { + op := &request.Operation{ + Name: opUpdateHostedZoneComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &UpdateHostedZoneCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHostedZoneCommentOutput{} + req.Data = output + return +} + +// To update the hosted zone comment, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID resource. The request body must include an XML document with a UpdateHostedZoneCommentRequest +// element. The response to this request includes the modified HostedZone element. +// +// The comment can have a maximum length of 256 characters. +func (c *Route53) UpdateHostedZoneComment(input *UpdateHostedZoneCommentInput) (*UpdateHostedZoneCommentOutput, error) { + req, out := c.UpdateHostedZoneCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyComment = "UpdateTrafficPolicyComment" + +// UpdateTrafficPolicyCommentRequest generates a request for the UpdateTrafficPolicyComment operation. +func (c *Route53) UpdateTrafficPolicyCommentRequest(input *UpdateTrafficPolicyCommentInput) (req *request.Request, output *UpdateTrafficPolicyCommentOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &UpdateTrafficPolicyCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyCommentOutput{} + req.Data = output + return +} + +// Updates the comment for a specified traffic policy version. +// +// To update the comment, send a POST request to the /2013-04-01/trafficpolicy/ +// resource. +// +// The request body must include an XML document with an UpdateTrafficPolicyCommentRequest +// element. +func (c *Route53) UpdateTrafficPolicyComment(input *UpdateTrafficPolicyCommentInput) (*UpdateTrafficPolicyCommentOutput, error) { + req, out := c.UpdateTrafficPolicyCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyInstance = "UpdateTrafficPolicyInstance" + +// UpdateTrafficPolicyInstanceRequest generates a request for the UpdateTrafficPolicyInstance operation. +func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyInstanceInput) (req *request.Request, output *UpdateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &UpdateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Updates the resource record sets in a specified hosted zone that were created +// based on the settings in a specified traffic policy version. +// +// The DNS type of the resource record sets that you're updating must match +// the DNS type in the JSON document that is associated with the traffic policy +// version that you're using to update the traffic policy instance. When you +// update a traffic policy instance, Amazon Route 53 continues to respond to +// DNS queries for the root resource record set name (such as example.com) while +// it replaces one group of resource record sets with another. Amazon Route +// 53 performs the following operations: +// +// Amazon Route 53 creates a new group of resource record sets based on the +// specified traffic policy. This is true regardless of how substantial the +// differences are between the existing resource record sets and the new resource +// record sets. When all of the new resource record sets have been created, +// Amazon Route 53 starts to respond to DNS queries for the root resource record +// set name (such as example.com) by using the new resource record sets. Amazon +// Route 53 deletes the old group of resource record sets that are associated +// with the root resource record set name. To update a traffic policy instance, +// send a POST request to the /2013-04-01/trafficpolicyinstance/traffic policy +// ID resource. The request body must include an XML document with an UpdateTrafficPolicyInstanceRequest +// element. +func (c *Route53) UpdateTrafficPolicyInstance(input *UpdateTrafficPolicyInstanceInput) (*UpdateTrafficPolicyInstanceOutput, error) { + req, out := c.UpdateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +// Alias resource record sets only: Information about the CloudFront distribution, +// ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set +// to which you are routing traffic. +// +// If you're creating resource record sets for a private hosted zone, note +// the following: +// +// You can create alias resource record sets only for Amazon Route 53 resource +// record sets in the same private hosted zone. Creating alias resource record +// sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets +// is not supported. You can't create alias resource record sets for failover, +// geolocation, or latency resource record sets in a private hosted zone. For +// more information and an example, see Example: Creating Alias Resource Record +// Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) +// in the Amazon Route 53 API Reference. +type AliasTarget struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: The external DNS name associated with the + // AWS Resource. The value that you specify depends on where you want to route + // queries: + // + // A CloudFront distribution: Specify the domain name that CloudFront assigned + // when you created your distribution. Your CloudFront distribution must include + // an alternate domain name that matches the name of the resource record set. + // For example, if the name of the resource record set is acme.example.com, + // your CloudFront distribution must include acme.example.com as one of the + // alternate domain names. For more information, see Using Alternate Domain + // Names (CNAMEs) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html) + // in the Amazon CloudFront Developer Guide. An ELB load balancer: Specify the + // DNS name associated with the load balancer. You can get the DNS name by using + // the AWS Management Console, the ELB API, or the AWS CLI. Use the same method + // to get values for HostedZoneId and DNSName. If you get one value from the + // console and the other value from the API or the CLI, creating the resource + // record set will fail. An Amazon S3 bucket that is configured as a static + // website: Specify the domain name of the Amazon S3 website endpoint in which + // you created the bucket; for example, s3-website-us-east-1.amazonaws.com. + // For more information about valid values, see the table Amazon Simple Storage + // Service (S3) Website Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. For more information about + // using Amazon S3 buckets for websites, see Hosting a Static Website on Amazon + // S3 (http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) in + // the Amazon Simple Storage Service Developer Guide. For more information + // and an example, see Example: Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) + // in the Amazon Route 53 API Reference. + DNSName *string `type:"string" required:"true"` + + // Alias resource record sets only: If you set the value of EvaluateTargetHealth + // to true for the resource record set or sets in an alias, weighted alias, + // latency alias, or failover alias resource record set, and if you specify + // a value for HealthCheckId for every resource record set that is referenced + // by these alias resource record sets, the alias resource record sets inherit + // the health of the referenced resource record sets. + // + // In this configuration, when Amazon Route 53 receives a DNS query for an + // alias resource record set: + // + // Amazon Route 53 looks at the resource record sets that are referenced by + // the alias resource record sets to determine which health checks they're using. + // Amazon Route 53 checks the current status of each health check. (Amazon Route + // 53 periodically checks the health of the endpoint that is specified in a + // health check; it doesn't perform the health check when the DNS query arrives.) + // Based on the status of the health checks, Amazon Route 53 determines which + // resource record sets are healthy. Unhealthy resource record sets are immediately + // removed from consideration. In addition, if all of the resource record sets + // that are referenced by an alias resource record set are unhealthy, that alias + // resource record set also is immediately removed from consideration. Based + // on the configuration of the alias resource record sets (weighted alias or + // latency alias, for example) and the configuration of the resource record + // sets that they reference, Amazon Route 53 chooses a resource record set from + // the healthy resource record sets, and responds to the query. Note the following: + // + // You cannot set EvaluateTargetHealth to true when the alias target is a CloudFront + // distribution. If the AWS resource that you specify in AliasTarget is a resource + // record set or a group of resource record sets (for example, a group of weighted + // resource record sets), but it is not another alias resource record set, we + // recommend that you associate a health check with all of the resource record + // sets in the alias target. For more information, see What Happens When You + // Omit Health Checks? (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) + // in the Amazon Route 53 Developer Guide. If you specify an ELB load balancer + // in AliasTarget, Elastic Load Balancing routes queries only to the healthy + // Amazon EC2 instances that are registered with the load balancer. If no Amazon + // EC2 instances are healthy or if the load balancer itself is unhealthy, and + // if EvaluateTargetHealth is true for the corresponding alias resource record + // set, Amazon Route 53 routes queries to other resources. When you create a + // load balancer, you configure settings for Elastic Load Balancing health checks; + // they're not Amazon Route 53 health checks, but they perform a similar function. + // Do not create Amazon Route 53 health checks for the Amazon EC2 instances + // that you register with an ELB load balancer. For more information, see How + // Health Checks Work in More Complex Amazon Route 53 Configurations (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html) + // in the Amazon Route 53 Developer Guide. We recommend that you set EvaluateTargetHealth + // to true only when you have enough idle capacity to handle the failure of + // one or more endpoints. + // + // For more information and examples, see Amazon Route 53 Health Checks and + // DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + EvaluateTargetHealth *bool `type:"boolean" required:"true"` + + // Alias resource record sets only: The value you use depends on where you want + // to route queries: + // + // A CloudFront distribution: Specify Z2FDTNDATAQYW2. An ELB load balancer: + // Specify the value of the hosted zone ID for the load balancer. You can get + // the hosted zone ID by using the AWS Management Console, the ELB API, or the + // AWS CLI. Use the same method to get values for HostedZoneId and DNSName. + // If you get one value from the console and the other value from the API or + // the CLI, creating the resource record set will fail. An Amazon S3 bucket + // that is configured as a static website: Specify the hosted zone ID for the + // Amazon S3 website endpoint in which you created the bucket. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. Another Amazon Route 53 resource + // record set in your hosted zone: Specify the hosted zone ID of your hosted + // zone. (An alias resource record set cannot reference a resource record set + // in a different hosted zone.) For more information and an example, see Example: + // Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) + // in the Amazon Route 53 API Reference. + HostedZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AliasTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasTarget) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to associate a +// VPC with an hosted zone. +type AssociateVPCWithHostedZoneInput struct { + _ struct{} `locationName:"AssociateVPCWithHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to associate your VPC with. + // + // Note that you cannot associate a VPC with a hosted zone that doesn't have + // an existing VPC association. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type AssociateVPCWithHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your AssociateVPCWithHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information for each change in a change +// batch request. +type Change struct { + _ struct{} `type:"structure"` + + // The action to perform: + // + // CREATE: Creates a resource record set that has the specified values. DELETE: + // Deletes a existing resource record set that has the specified values for + // Name, Type, SetIdentifier (for latency, weighted, geolocation, and failover + // resource record sets), and TTL (except alias resource record sets, for which + // the TTL is determined by the AWS resource that you're routing DNS queries + // to). UPSERT: If a resource record set does not already exist, Amazon Route + // 53 creates it. If a resource record set does exist, Amazon Route 53 updates + // it with the values in the request. Amazon Route 53 can update an existing + // resource record set only when all of the following values match: Name, Type, + // and SetIdentifier (for weighted, latency, geolocation, and failover resource + // record sets). + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the resource record set to create or delete. + ResourceRecordSet *ResourceRecordSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Change) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Change) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment and the changes that you +// want to make with a change batch request. +type ChangeBatch struct { + _ struct{} `type:"structure"` + + // A complex type that contains one Change element for each resource record + // set that you want to create or delete. + Changes []*Change `locationNameList:"Change" min:"1" type:"list" required:"true"` + + // Optional: Any comments you want to include about a change batch request. + Comment *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatch) GoString() string { + return s.String() +} + +// A complex type that lists the changes and information for a ChangeBatch. +type ChangeBatchRecord struct { + _ struct{} `type:"structure"` + + // A list of changes made in the ChangeBatch. + Changes []*Change `locationNameList:"Change" min:"1" type:"list"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The AWS account ID attached to the changes. + Submitter *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatchRecord) GoString() string { + return s.String() +} + +// A complex type that describes change information about changes made to your +// hosted zone. +// +// This element contains an ID that you use when performing a GetChange action +// to get detailed information about the change. +type ChangeInfo struct { + _ struct{} `type:"structure"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s ChangeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeInfo) GoString() string { + return s.String() +} + +// A complex type that contains a change batch. +type ChangeResourceRecordSetsInput struct { + _ struct{} `locationName:"ChangeResourceRecordSetsRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains an optional comment and the Changes element. + ChangeBatch *ChangeBatch `type:"structure" required:"true"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to change. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsInput) GoString() string { + return s.String() +} + +// A complex type containing the response for the request. +type ChangeResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about changes made to your hosted + // zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request to add, change, or +// delete the tags that are associated with a resource. +type ChangeTagsForResourceInput struct { + _ struct{} `locationName:"ChangeTagsForResourceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains a list of Tag elements. Each Tag element identifies + // a tag that you want to add or update for the specified resource. + AddTags []*Tag `locationNameList:"Tag" min:"1" type:"list"` + + // A list of Tag keys that you want to remove from the specified resource. + RemoveTagKeys []*string `locationNameList:"Key" min:"1" type:"list"` + + // The ID of the resource for which you want to add, change, or delete tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ChangeTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type ChangeTagsForResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceOutput) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to create a health +// check. +type CreateHealthCheckInput struct { + _ struct{} `locationName:"CreateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHealthCheck + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a health + // check. CallerReference can be any unique string; you might choose to use + // a string that identifies your project. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the new health check. +type CreateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The unique URL representing the new health check. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to create a hosted +// zone. +type CreateHostedZoneInput struct { + _ struct{} `locationName:"CreateHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHostedZone + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a hosted + // zone. CallerReference can be any unique string; you might choose to use a + // string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The delegation set id of the reusable delgation set whose NS records you + // want to assign to the new hosted zone. + DelegationSetId *string `type:"string"` + + // A complex type that contains an optional comment about your hosted zone. + HostedZoneConfig *HostedZoneConfig `type:"structure"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. By providing + // this parameter, your newly created hosted cannot be resolved anywhere other + // than the given VPC. + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the new hosted zone. +type CreateHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the request to create a hosted + // zone. This includes an ID that you use when you call the GetChange action + // to get the current status of the change request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // A complex type that contains identifying information about the hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // The unique URL representing the new hosted zone. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneOutput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetInput struct { + _ struct{} `locationName:"CreateReusableDelegationSetRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateReusableDelegationSet + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a reusable + // delegation set. CallerReference can be any unique string; you might choose + // to use a string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The ID of the hosted zone whose delegation set you want to mark as reusable. + // It is an optional parameter. + HostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetInput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // The unique URL representing the new reusbale delegation set. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy that you +// want to create. +type CreateTrafficPolicyInput struct { + _ struct{} `locationName:"CreateTrafficPolicyRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the traffic policy. + Comment *string `type:"string"` + + // The definition of this traffic policy in JSON format. + Document *string `type:"string" required:"true"` + + // The name of the traffic policy. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to create based on a specified traffic policy. +type CreateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"CreateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the hosted zone in which you want Amazon Route 53 to create resource + // record sets by using the configuration in a traffic policy. + HostedZoneId *string `type:"string" required:"true"` + + // The domain name (such as example.com) or subdomain name (such as www.example.com) + // for which Amazon Route 53 responds to DNS queries by using the resource record + // sets that Amazon Route 53 creates for this traffic policy instance. + Name *string `type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the resource record + // sets that it creates in the specified hosted zone. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want to use to create resource record + // sets in the specified hosted zone. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want to use to create resource + // record sets in the specified hosted zone. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicyInstance +// request. +type CreateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A unique URL that represents a new traffic policy instance. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicy +// request. +type CreateTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to create a new version. +type CreateTrafficPolicyVersionInput struct { + _ struct{} `locationName:"CreateTrafficPolicyVersionRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the new traffic policy version. + Comment *string `type:"string"` + + // The definition of a new traffic policy version, in JSON format. You must + // specify the full definition of the new traffic policy. You cannot specify + // just the differences between the new version and a previous version. + Document *string `type:"string" required:"true"` + + // The ID of the traffic policy for which you want to create a new version. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicyVersion +// request. +type CreateTrafficPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new version of the traffic + // policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionOutput) GoString() string { + return s.String() +} + +// A complex type that contains name server information. +type DelegationSet struct { + _ struct{} `type:"structure"` + + CallerReference *string `min:"1" type:"string"` + + Id *string `type:"string"` + + // A complex type that contains the authoritative name servers for the hosted + // zone. Use the method provided by your domain registrar to add an NS record + // to your domain for each NameServer that is assigned to your hosted zone. + NameServers []*string `locationNameList:"NameServer" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DelegationSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DelegationSet) GoString() string { + return s.String() +} + +// A complex type containing the request information for delete health check. +type DeleteHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to delete. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type DeleteHealthCheckOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the hosted zone that you want +// to delete. +type DeleteHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type DeleteHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your delete request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type containing the information for the delete request. +type DeleteReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type DeleteReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A request to delete a specified traffic policy version. +type DeleteTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to delete. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy instance +// that you want to delete. +type DeleteTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to delete. + // + // When you delete a traffic policy instance, Amazon Route 53 also deletes + // all of the resource record sets that were created when you created the traffic + // policy instance. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to disassociate +// a VPC from an hosted zone. +type DisassociateVPCFromHostedZoneInput struct { + _ struct{} `locationName:"DisassociateVPCFromHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to disassociate your VPC from. + // + // Note that you cannot disassociate the last VPC from a hosted zone. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be disassociated from. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type DisassociateVPCFromHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your DisassociateVPCFromHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about a geo location. +type GeoLocation struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocation) GoString() string { + return s.String() +} + +// A complex type that contains information about a GeoLocation. +type GeoLocationDetails struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + ContinentCode *string `min:"2" type:"string"` + + // The name of the continent. This element is only present if ContinentCode + // is also present. + ContinentName *string `min:"1" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The name of the country. This element is only present if CountryCode is also + // present. + CountryName *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + SubdivisionCode *string `min:"1" type:"string"` + + // The name of the subdivision. This element is only present if SubdivisionCode + // is also present. + SubdivisionName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocationDetails) GoString() string { + return s.String() +} + +// The input for a GetChangeDetails request. +type GetChangeDetailsInput struct { + _ struct{} `type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsInput) GoString() string { + return s.String() +} + +// A complex type that contains the ChangeBatchRecord element. +type GetChangeDetailsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the contained + // changes. + ChangeBatchRecord *ChangeBatchRecord `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsOutput) GoString() string { + return s.String() +} + +// The input for a GetChange request. +type GetChangeInput struct { + _ struct{} `type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeInput) GoString() string { + return s.String() +} + +// A complex type that contains the ChangeInfo element. +type GetChangeOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the date and + // time of the request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeOutput) GoString() string { + return s.String() +} + +// Empty request. +type GetCheckerIpRangesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCheckerIpRangesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesInput) GoString() string { + return s.String() +} + +// A complex type that contains the CheckerIpRanges element. +type GetCheckerIpRangesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains sorted list of IP ranges in CIDR format for + // Amazon Route 53 health checkers. + CheckerIpRanges []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetCheckerIpRangesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a geo location. +type GetGeoLocationInput struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `location:"querystring" locationName:"continentcode" min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `location:"querystring" locationName:"countrycode" min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `location:"querystring" locationName:"subdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGeoLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified geo location. +type GetGeoLocationOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified geo location. + GeoLocationDetails *GeoLocationDetails `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetGeoLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your health checks, send a GET request to the +// 2013-04-01/healthcheckcount resource. +type GetHealthCheckCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHealthCheckCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of health checks associated with the +// current AWS account. +type GetHealthCheckCountOutput struct { + _ struct{} `type:"structure"` + + // The number of health checks associated with the current AWS account. + HealthCheckCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a health +// check. +type GetHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to retrieve. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get the most +// recent failure reason for a health check. +type GetHealthCheckLastFailureReasonInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check for which you want to retrieve the reason for + // the most recent failure. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the most recent failure for +// the specified health check. +type GetHealthCheckLastFailureReasonOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified health check. +type GetHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get health +// check status for a health check. +type GetHealthCheckStatusInput struct { + _ struct{} `type:"structure"` + + // If you want Amazon Route 53 to return this resource record set in response + // to a DNS query only when a health check is passing, include the HealthCheckId + // element and specify the ID of the applicable health check. + // + // Amazon Route 53 determines whether a resource record set is healthy by periodically + // sending a request to the endpoint that is specified in the health check. + // If that endpoint returns an HTTP status code of 2xx or 3xx, the endpoint + // is healthy. If the endpoint returns an HTTP status code of 400 or greater, + // or if the endpoint doesn't respond for a certain amount of time, Amazon Route + // 53 considers the endpoint unhealthy and also considers the resource record + // set unhealthy. + // + // The HealthCheckId element is only useful when Amazon Route 53 is choosing + // between two or more resource record sets to respond to a DNS query, and you + // want Amazon Route 53 to base the choice in part on the status of a health + // check. Configuring health checks only makes sense in the following configurations: + // + // You're checking the health of the resource record sets in a weighted, latency, + // geolocation, or failover resource record set, and you specify health check + // IDs for all of the resource record sets. If the health check for one resource + // record set specifies an endpoint that is not healthy, Amazon Route 53 stops + // responding to queries using the value for that resource record set. You set + // EvaluateTargetHealth to true for the resource record sets in an alias, weighted + // alias, latency alias, geolocation alias, or failover alias resource record + // set, and you specify health check IDs for all of the resource record sets + // that are referenced by the alias resource record sets. For more information + // about this configuration, see EvaluateTargetHealth. + // + // Amazon Route 53 doesn't check the health of the endpoint specified in the + // resource record set, for example, the endpoint specified by the IP address + // in the Value element. When you add a HealthCheckId element to a resource + // record set, Amazon Route 53 checks the health of the endpoint that you specified + // in the health check. + // + // For geolocation resource record sets, if an endpoint is unhealthy, Amazon + // Route 53 looks for a resource record set for the larger, associated geographic + // region. For example, suppose you have resource record sets for a state in + // the United States, for the United States, for North America, and for all + // locations. If the endpoint for the state resource record set is unhealthy, + // Amazon Route 53 checks the resource record sets for the United States, for + // North America, and for all locations (a resource record set for which the + // value of CountryCode is *), in that order, until it finds a resource record + // set for which the endpoint is healthy. + // + // If your health checks specify the endpoint only by domain name, we recommend + // that you create a separate health check for each endpoint. For example, create + // a health check for each HTTP server that is serving content for www.example.com. + // For the value of FullyQualifiedDomainName, specify the domain name of the + // server (such as us-east-1-www.example.com), not the name of the resource + // record sets (example.com). + // + // In this configuration, if you create a health check for which the value + // of FullyQualifiedDomainName matches the name of the resource record sets + // and then associate the health check with those resource record sets, health + // check results will be unpredictable. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the status of the specified +// health check. +type GetHealthCheckStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount +// resource. +type GetHostedZoneCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHostedZoneCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of hosted zones associated with the +// current AWS account. +type GetHostedZoneCountOutput struct { + _ struct{} `type:"structure"` + + // The number of hosted zones associated with the current AWS account. + HostedZoneCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountOutput) GoString() string { + return s.String() +} + +// The input for a GetHostedZone request. +type GetHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to get a list of the name servers + // in the delegation set. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified hosted zone. +type GetHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the name servers for the specified + // hosted zone. + DelegationSet *DelegationSet `type:"structure"` + + // A complex type that contains the information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // A complex type that contains information about VPCs associated with the specified + // hosted zone. + VPCs []*VPC `locationNameList:"VPC" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a GetReusableDelegationSet request. +type GetReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set for which you want to get a list of + // the name server. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified reusable delegation +// set. +type GetReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the nameservers for the + // specified delegation set ID. + DelegationSet *DelegationSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the 2013-04-01/trafficpolicy resource, and specify +// the ID and the version of the traffic policy. +type GetTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to get information + // about. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInput) GoString() string { + return s.String() +} + +// To retrieve a count of all your traffic policy instances, send a GET request +// to the 2013-04-01/trafficpolicyinstancecount resource. +type GetTrafficPolicyInstanceCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the number of traffic policy +// instances that are associated with the current AWS account. +type GetTrafficPolicyInstanceCountOutput struct { + _ struct{} `type:"structure"` + + // The number of traffic policy instances that are associated with the current + // AWS account. + TrafficPolicyInstanceCount *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) GoString() string { + return s.String() +} + +// Gets information about a specified traffic policy instance. +// +// To get information about a traffic policy instance, send a GET request to +// the 2013-04-01/trafficpolicyinstance/Id resource. +type GetTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type GetTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type GetTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains identifying information about the health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the health check. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` + + // The version of the health check. You can optionally pass this value in a + // call to UpdateHealthCheck to prevent overwriting another change to the health + // check. + HealthCheckVersion *int64 `min:"1" type:"long" required:"true"` + + // The ID of the specified health check. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// A complex type that contains the health check configuration. +type HealthCheckConfig struct { + _ struct{} `type:"structure"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + FullyQualifiedDomainName *string `type:"string"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + HealthThreshold *int64 `type:"integer"` + + // IP Address of the instance being checked. + IPAddress *string `type:"string"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + Inverted *bool `type:"boolean"` + + // A Boolean value that indicates whether you want Amazon Route 53 to measure + // the latency between health checkers in multiple AWS regions and your endpoint + // and to display CloudWatch latency graphs in the Amazon Route 53 console. + MeasureLatency *bool `type:"boolean"` + + // Port on which connection will be opened to the instance to health check. + // For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. + // For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified. + Port *int64 `min:"1" type:"integer"` + + // The number of seconds between the time that Amazon Route 53 gets a response + // from your endpoint and the time that it sends the next health-check request. + // + // Each Amazon Route 53 health checker makes requests at this interval. Valid + // values are 10 and 30. The default value is 30. + RequestInterval *int64 `min:"10" type:"integer"` + + // Path to ping on the instance to check the health. Required for HTTP, HTTPS, + // HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks. The HTTP request is issued + // to the instance on the given port and path. + ResourcePath *string `type:"string"` + + // A string to search for in the body of a health check response. Required for + // HTTP_STR_MATCH and HTTPS_STR_MATCH health checks. + SearchString *string `type:"string"` + + // The type of health check to be performed. Currently supported types are TCP, + // HTTP, HTTPS, HTTP_STR_MATCH, and HTTPS_STR_MATCH. + Type *string `type:"string" required:"true" enum:"HealthCheckType"` +} + +// String returns the string representation +func (s HealthCheckConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckConfig) GoString() string { + return s.String() +} + +// A complex type that contains the IP address of a Amazon Route 53 health checker +// and the reason for the health check status. +type HealthCheckObservation struct { + _ struct{} `type:"structure"` + + // The IP address of the Amazon Route 53 health checker that performed the health + // check. + IPAddress *string `type:"string"` + + // A complex type that contains information about the health check status for + // the current observation. + StatusReport *StatusReport `type:"structure"` +} + +// String returns the string representation +func (s HealthCheckObservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckObservation) GoString() string { + return s.String() +} + +// A complex type that contain information about the specified hosted zone. +type HostedZone struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the hosted zone. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the Comment element. + Config *HostedZoneConfig `type:"structure"` + + // The ID of the specified hosted zone. + Id *string `type:"string" required:"true"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // Total number of resource record sets in the hosted zone. + ResourceRecordSetCount *int64 `type:"long"` +} + +// String returns the string representation +func (s HostedZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZone) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment about your hosted zone. +// If you don't want to specify a comment, you can omit the HostedZoneConfig +// and Comment elements from the XML document. +type HostedZoneConfig struct { + _ struct{} `type:"structure"` + + // An optional comment about your hosted zone. If you don't want to specify + // a comment, you can omit the HostedZoneConfig and Comment elements from the + // XML document. + Comment *string `type:"string"` + + PrivateZone *bool `type:"boolean"` +} + +// String returns the string representation +func (s HostedZoneConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZoneConfig) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneInput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetInput struct { + _ struct{} `type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The name of the RRSet that you want to see changes for. + Name *string `location:"querystring" locationName:"rrSet_name" type:"string" required:"true"` + + // The identifier of the RRSet that you want to see changes for. + SetIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" type:"string" required:"true"` + + // The type of the RRSet that you want to see changes for. + Type *string `location:"querystring" locationName:"type" type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetInput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetOutput struct { + _ struct{} `type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetOutput) GoString() string { + return s.String() +} + +// The input for a ListGeoLocations request. +type ListGeoLocationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of geo locations you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The first continent code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. For non-continent geo locations, + // this should be null. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + StartContinentCode *string `location:"querystring" locationName:"startcontinentcode" min:"2" type:"string"` + + // The first country code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + StartCountryCode *string `location:"querystring" locationName:"startcountrycode" min:"1" type:"string"` + + // The first subdivision code in the lexicographic ordering of geo locations + // that you want the ListGeoLocations request to list. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + StartSubdivisionCode *string `location:"querystring" locationName:"startsubdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the geo locations that are +// returned by the request and information about the response. +type ListGeoLocationsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the geo locations that are + // returned by the request. + GeoLocationDetailsList []*GeoLocationDetails `locationNameList:"GeoLocationDetails" type:"list" required:"true"` + + // A flag that indicates whether there are more geo locations to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the values included in the ListGeoLocationsResponse$NextContinentCode, + // ListGeoLocationsResponse$NextCountryCode and ListGeoLocationsResponse$NextSubdivisionCode + // elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // If the results were truncated, the continent code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is a continent location. + NextContinentCode *string `min:"2" type:"string"` + + // If the results were truncated, the country code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is not a continent location. + NextCountryCode *string `min:"1" type:"string"` + + // If the results were truncated, the subdivision code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location has a subdivision. + NextSubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck +// resource. The response to this request includes a HealthChecks element with +// zero or more HealthCheck child elements. By default, the list of health checks +// is displayed on a single page. You can control the length of the page that +// is displayed by using the MaxItems parameter. You can use the Marker parameter +// to control the health check that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHealthChecksInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of health checks to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHealthChecksOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the health checks associated + // with the current AWS account. + HealthChecks []*HealthCheck `locationNameList:"HealthCheck" type:"list" required:"true"` + + // A flag indicating whether there are more health checks to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of health checks to be included in the response body. + // If the number of health checks associated with this AWS account exceeds MaxItems, + // the value of ListHealthChecksResponse$IsTruncated in the response is true. + // Call ListHealthChecks again and specify the value of ListHealthChecksResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing health checks. If ListHealthChecksResponse$IsTruncated + // is true, make another request to ListHealthChecks and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the 2013-04-01/hostedzonesbyname resource. The response to this +// request includes a HostedZones element with zero or more HostedZone child +// elements lexicographically ordered by DNS name. By default, the list of hosted +// zones is displayed on a single page. You can control the length of the page +// that is displayed by using the MaxItems parameter. You can use the DNSName +// and HostedZoneId parameters to control the hosted zone that the list begins +// with. +// +// For more information about listing hosted zones, see Listing the Hosted +// Zones for an AWS Account (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +type ListHostedZonesByNameInput struct { + _ struct{} `type:"structure"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListHostedZonesByNameRequest request to list. + // + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + DNSName *string `location:"querystring" locationName:"dnsname" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + HostedZoneId *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesByNameOutput struct { + _ struct{} `type:"structure"` + + // The DNSName value sent in the request. + DNSName *string `type:"string"` + + // The HostedZoneId value sent in the request. + HostedZoneId *string `type:"string"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the NextDNSName and NextHostedZoneId elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesByNameResponse$IsTruncated in the response is + // true. Call ListHostedZonesByName again and specify the value of ListHostedZonesByNameResponse$NextDNSName + // and ListHostedZonesByNameResponse$NextHostedZoneId elements respectively + // to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextDNSName *string `type:"string"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextHostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone +// resource. The response to this request includes a HostedZones element with +// zero or more HostedZone child elements. By default, the list of hosted zones +// is displayed on a single page. You can control the length of the page that +// is displayed by using the MaxItems parameter. You can use the Marker parameter +// to control the hosted zone that the list begins with. For more information +// about listing hosted zones, see Listing the Hosted Zones for an AWS Account +// (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHostedZonesInput struct { + _ struct{} `type:"structure"` + + DelegationSetId *string `location:"querystring" locationName:"delegationsetid" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesResponse$IsTruncated in the response is true. + // Call ListHostedZones again and specify the value of ListHostedZonesResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing hosted zones. If ListHostedZonesResponse$IsTruncated + // is true, make another request to ListHostedZones and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesOutput) GoString() string { + return s.String() +} + +// The input for a ListResourceRecordSets request. +type ListResourceRecordSetsInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to get. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of records you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, specify the value of ListResourceRecordSetsResponse$NextRecordIdentifier + // from the previous response to get the next resource record set that has the + // current DNS name and type. + StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListResourceRecordSets request to list. + StartRecordName *string `location:"querystring" locationName:"name" type:"string"` + + // The DNS type at which to begin the listing of resource record sets. + // + // Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT + // + // Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Alias Resource Record Sets: A | AAAA + // + // Constraint: Specifying type without specifying name returns an InvalidInput + // error. + StartRecordType *string `location:"querystring" locationName:"type" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// are returned by the request and information about the response. +type ListResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more resource record sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the ListResourceRecordSetsResponse$NextRecordName + // element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, the value of SetIdentifier for the next resource record + // set that has the current DNS name and type. + NextRecordIdentifier *string `min:"1" type:"string"` + + // If the results were truncated, the name of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordName *string `type:"string"` + + // If the results were truncated, the type of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordType *string `type:"string" enum:"RRType"` + + // A complex type that contains information about the resource record sets that + // are returned by the request. + ResourceRecordSets []*ResourceRecordSet `locationNameList:"ResourceRecordSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the 2013-04-01/delegationset resource. The response to this request includes +// a DelegationSets element with zero or more DelegationSet child elements. +// By default, the list of reusable delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListReusableDelegationSetsInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of reusable delegation sets to return per page + // of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListReusableDelegationSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the reusable delegation sets + // associated with the current AWS account. + DelegationSets []*DelegationSet `locationNameList:"DelegationSet" type:"list" required:"true"` + + // A flag indicating whether there are more reusable delegation sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of reusable delegation sets to be included in the response + // body. If the number of reusable delegation sets associated with this AWS + // account exceeds MaxItems, the value of ListReusablDelegationSetsResponse$IsTruncated + // in the response is true. Call ListReusableDelegationSets again and specify + // the value of ListReusableDelegationSetsResponse$NextMarker in the ListReusableDelegationSetsRequest$Marker + // element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing reusable delegation sets. If ListReusableDelegationSetsResponse$IsTruncated + // is true, make another request to ListReusableDelegationSets and include the + // value of the NextMarker element in the Marker element to get the next page + // of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with an individual resource. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource for which you want to retrieve tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// A complex type containing tags for the specified resource. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A ResourceTagSet containing tags associated with the specified resource. + ResourceTagSet *ResourceTagSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with up to 10 specified resources. +type ListTagsForResourcesInput struct { + _ struct{} `locationName:"ListTagsForResourcesRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains the ResourceId element for each resource for + // which you want to get a list of tags. + ResourceIds []*string `locationNameList:"ResourceId" min:"1" type:"list" required:"true"` + + // The type of the resources. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesInput) GoString() string { + return s.String() +} + +// A complex type containing tags for the specified resources. +type ListTagsForResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of ResourceTagSets containing tags associated with the specified resources. + ResourceTagSets []*ResourceTagSet `locationNameList:"ResourceTagSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list the +// traffic policies that are associated with the current AWS account. +type ListTrafficPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of traffic policies to be included in the response body + // for this request. If you have more than MaxItems traffic policies, the value + // of the IsTruncated element in the response is true, and the value of the + // TrafficPolicyIdMarker element is the ID of the first traffic policy in the + // next group of MaxItems traffic policies. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicies, do not include the TrafficPolicyIdMarker + // parameter. + // + // If you have more traffic policies than the value of MaxItems, ListTrafficPolicies + // returns only the first MaxItems traffic policies. To get the next group of + // MaxItems policies, submit another request to ListTrafficPolicies. For the + // value of TrafficPolicyIdMarker, specify the value of the TrafficPolicyIdMarker + // element that was returned in the previous response. + // + // Policies are listed in the order in which they were created. + TrafficPolicyIdMarker *string `location:"querystring" locationName:"trafficpolicyid" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of MaxItems traffic + // policies by calling ListTrafficPolicies again and specifying the value of + // the TrafficPolicyIdMarker element in the TrafficPolicyIdMarker request parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicies + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If the value of IsTruncated is true, TrafficPolicyIdMarker is the ID of the + // first traffic policy in the next group of MaxItems traffic policies. + TrafficPolicyIdMarker *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicySummary element for each traffic policy + // that was created by the current AWS account. + TrafficPolicySummaries []*TrafficPolicySummary `locationNameList:"TrafficPolicySummary" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesOutput) GoString() string { + return s.String() +} + +// A request for the traffic policy instances that you created in a specified +// hosted zone. +type ListTrafficPolicyInstancesByHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to list traffic policy instances. + HostedZoneId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByHostedZone + // again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByHostedZone + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesByPolicyInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, HostedZoneIdMarker + // is the ID of the hosted zone for the first traffic policy instance in the + // next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The ID of the traffic policy for which you want to list traffic policy instances. + TrafficPolicyId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` + + // The version of the traffic policy for which you want to list traffic policy + // instances. The version must be associated with the traffic policy that is + // specified by TrafficPolicyId. + TrafficPolicyVersion *int64 `location:"querystring" locationName:"version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByPolicyOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByPolicy again + // and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByPolicy + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, you have + // more traffic policy instances. To get the next group of MaxItems traffic + // policy instances, submit another ListTrafficPolicyInstances request. For + // the value of HostedZoneIdMarker, specify the value of HostedZoneIdMarker + // from the previous response, which is the hosted zone ID of the first traffic + // policy instance in the next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstances again and + // specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstances + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policies. +type ListTrafficPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Specify the value of Id of the traffic policy for which you want to list + // all versions. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of traffic policy versions that you want Amazon Route + // 53 to include in the response body for this request. If the specified traffic + // policy has more than MaxItems versions, the value of the IsTruncated element + // in the response is true, and the value of the TrafficPolicyVersionMarker + // element is the ID of the first version in the next group of MaxItems traffic + // policy versions. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicyVersions, do not include the TrafficPolicyVersionMarker + // parameter. + // + // If you have more traffic policy versions than the value of MaxItems, ListTrafficPolicyVersions + // returns only the first group of MaxItems versions. To get the next group + // of MaxItems traffic policy versions, submit another request to ListTrafficPolicyVersions. + // For the value of TrafficPolicyVersionMarker, specify the value of the TrafficPolicyVersionMarker + // element that was returned in the previous response. + // + // Traffic policy versions are listed in sequential order. + TrafficPolicyVersionMarker *string `location:"querystring" locationName:"trafficpolicyversion" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of maxitems traffic + // policies by calling ListTrafficPolicyVersions again and specifying the value + // of the NextMarker element in the marker parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the maxitems parameter in the call to ListTrafficPolicyVersions + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicy element for each traffic policy version + // that is associated with the specified traffic policy. + TrafficPolicies []*TrafficPolicy `locationNameList:"TrafficPolicy" type:"list" required:"true"` + + // If IsTruncated is true, the value of TrafficPolicyVersionMarker identifies + // the first traffic policy in the next group of MaxItems traffic policies. + // Call ListTrafficPolicyVersions again and specify the value of TrafficPolicyVersionMarker + // in the TrafficPolicyVersionMarker request parameter. + // + // This element is present only if IsTruncated is true. + TrafficPolicyVersionMarker *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsOutput) GoString() string { + return s.String() +} + +// A complex type that contains the value of the Value element for the current +// resource record set. +type ResourceRecord struct { + _ struct{} `type:"structure"` + + // The current or new DNS record value, not to exceed 4,000 characters. In the + // case of a DELETE action, if the current value does not match the actual value, + // an error is returned. For descriptions about how to format Value for different + // record types, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // You can specify more than one value for all record types except CNAME and + // SOA. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecord) GoString() string { + return s.String() +} + +// A complex type that contains information about the current resource record +// set. +type ResourceRecordSet struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: Information about the AWS resource to which + // you are redirecting traffic. + AliasTarget *AliasTarget `type:"structure"` + + // Failover resource record sets only: To configure failover, you add the Failover + // element to two resource record sets. For one resource record set, you specify + // PRIMARY as the value for Failover; for the other resource record set, you + // specify SECONDARY. In addition, you include the HealthCheckId element and + // specify the health check that you want Amazon Route 53 to perform for each + // resource record set. + // + // You can create failover and failover alias resource record sets only in + // public hosted zones. Except where noted, the following failover behaviors + // assume that you have included the HealthCheckId element in both resource + // record sets: + // + // When the primary resource record set is healthy, Amazon Route 53 responds + // to DNS queries with the applicable value from the primary resource record + // set regardless of the health of the secondary resource record set. When the + // primary resource record set is unhealthy and the secondary resource record + // set is healthy, Amazon Route 53 responds to DNS queries with the applicable + // value from the secondary resource record set. When the secondary resource + // record set is unhealthy, Amazon Route 53 responds to DNS queries with the + // applicable value from the primary resource record set regardless of the health + // of the primary resource record set. If you omit the HealthCheckId element + // for the secondary resource record set, and if the primary resource record + // set is unhealthy, Amazon Route 53 always responds to DNS queries with the + // applicable value from the secondary resource record set. This is true regardless + // of the health of the associated endpoint. You cannot create non-failover + // resource record sets that have the same values for the Name and Type elements + // as failover resource record sets. + // + // For failover alias resource record sets, you must also include the EvaluateTargetHealth + // element and set the value to true. + // + // For more information about configuring failover for Amazon Route 53, see + // Amazon Route 53 Health Checks and DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values: PRIMARY | SECONDARY + Failover *string `type:"string" enum:"ResourceRecordSetFailover"` + + // Geo location resource record sets only: A complex type that lets you control + // how Amazon Route 53 responds to DNS queries based on the geographic origin + // of the query. For example, if you want all queries from Africa to be routed + // to a web server with an IP address of 192.0.2.111, create a resource record + // set with a Type of A and a ContinentCode of AF. + // + // You can create geolocation and geolocation alias resource record sets only + // in public hosted zones. If you create separate resource record sets for overlapping + // geographic regions (for example, one resource record set for a continent + // and one for a country on the same continent), priority goes to the smallest + // geographic region. This allows you to route most queries for a continent + // to one resource and to route queries for a country on that continent to a + // different resource. + // + // You cannot create two geolocation resource record sets that specify the + // same geographic location. + // + // The value * in the CountryCode element matches all geographic locations + // that aren't specified in other geolocation resource record sets that have + // the same values for the Name and Type elements. + // + // Geolocation works by mapping IP addresses to locations. However, some IP + // addresses aren't mapped to geographic locations, so even if you create geolocation + // resource record sets that cover all seven continents, Amazon Route 53 will + // receive some DNS queries from locations that it can't identify. We recommend + // that you create a resource record set for which the value of CountryCode + // is *, which handles both queries that come from locations for which you haven't + // created geolocation resource record sets and queries from IP addresses that + // aren't mapped to a location. If you don't create a * resource record set, + // Amazon Route 53 returns a "no answer" response for queries from those locations. + // You cannot create non-geolocation resource record sets that have the same + // values for the Name and Type elements as geolocation resource record sets. + GeoLocation *GeoLocation `type:"structure"` + + // Health Check resource record sets only, not required for alias resource record + // sets: An identifier that is used to identify health check associated with + // the resource record set. + HealthCheckId *string `type:"string"` + + // The name of the domain you want to perform the action on. + // + // Enter a fully qualified domain name, for example, www.example.com. You can + // optionally include a trailing dot. If you omit the trailing dot, Amazon Route + // 53 still assumes that the domain name that you specify is fully qualified. + // This means that Amazon Route 53 treats www.example.com (without a trailing + // dot) and www.example.com. (with a trailing dot) as identical. + // + // For information about how to specify characters other than a-z, 0-9, and + // - (hyphen) and how to specify internationalized domain names, see DNS Domain + // Name Format (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) + // in the Amazon Route 53 Developer Guide. + // + // You can use an asterisk (*) character in the name. DNS treats the * character + // either as a wildcard or as the * character (ASCII 42), depending on where + // it appears in the name. For more information, see Using an Asterisk (*) in + // the Names of Hosted Zones and Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html#domain-name-format-asterisk) + // in the Amazon Route 53 Developer Guide + // + // You can't use the * wildcard for resource records sets that have a type + // of NS. + Name *string `type:"string" required:"true"` + + // Latency-based resource record sets only: The Amazon EC2 region where the + // resource that is specified in this resource record set resides. The resource + // typically is an AWS resource, such as an Amazon EC2 instance or an ELB load + // balancer, and is referred to by an IP address or a DNS domain name, depending + // on the record type. + // + // You can create latency and latency alias resource record sets only in public + // hosted zones. When Amazon Route 53 receives a DNS query for a domain name + // and type for which you have created latency resource record sets, Amazon + // Route 53 selects the latency resource record set that has the lowest latency + // between the end user and the associated Amazon EC2 region. Amazon Route 53 + // then returns the value that is associated with the selected resource record + // set. + // + // Note the following: + // + // You can only specify one ResourceRecord per latency resource record set. + // You can only create one latency resource record set for each Amazon EC2 region. + // You are not required to create latency resource record sets for all Amazon + // EC2 regions. Amazon Route 53 will choose the region with the best latency + // from among the regions for which you create latency resource record sets. + // You cannot create non-latency resource record sets that have the same values + // for the Name and Type elements as latency resource record sets. + Region *string `min:"1" type:"string" enum:"ResourceRecordSetRegion"` + + // A complex type that contains the resource records for the current resource + // record set. + ResourceRecords []*ResourceRecord `locationNameList:"ResourceRecord" min:"1" type:"list"` + + // Weighted, Latency, Geo, and Failover resource record sets only: An identifier + // that differentiates among multiple resource record sets that have the same + // combination of DNS name and type. The value of SetIdentifier must be unique + // for each resource record set that has the same combination of DNS name and + // type. + SetIdentifier *string `min:"1" type:"string"` + + // The cache time to live for the current resource record set. Note the following: + // + // If you're creating an alias resource record set, omit TTL. Amazon Route + // 53 uses the value of TTL for the alias target. If you're associating this + // resource record set with a health check (if you're adding a HealthCheckId + // element), we recommend that you specify a TTL of 60 seconds or less so clients + // respond quickly to changes in health status. All of the resource record sets + // in a group of weighted, latency, geolocation, or failover resource record + // sets must have the same value for TTL. If a group of weighted resource record + // sets includes one or more weighted alias resource record sets for which the + // alias target is an ELB load balancer, we recommend that you specify a TTL + // of 60 seconds for all of the non-alias weighted resource record sets that + // have the same name and type. Values other than 60 seconds (the TTL for load + // balancers) will change the effect of the values that you specify for Weight. + TTL *int64 `type:"long"` + + TrafficPolicyInstanceId *string `type:"string"` + + // The DNS record type. For information about different record types and how + // data is encoded for them, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values for basic resource record sets: A | AAAA | CNAME | MX | NS + // | PTR | SOA | SPF | SRV | TXT + // + // Values for weighted, latency, geolocation, and failover resource record + // sets: A | AAAA | CNAME | MX | PTR | SPF | SRV | TXT. When creating a group + // of weighted, latency, geolocation, or failover resource record sets, specify + // the same value for all of the resource record sets in the group. + // + // SPF records were formerly used to verify the identity of the sender of email + // messages. However, we no longer recommend that you create resource record + // sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework + // (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated + // to say, "...[I]ts existence and mechanism defined in [RFC4408] have led to + // some interoperability issues. Accordingly, its use is no longer appropriate + // for SPF version 1; implementations are not to use it." In RFC 7208, see section + // 14.1, The SPF DNS Record Type (http://tools.ietf.org/html/rfc7208#section-14.1). + // Values for alias resource record sets: + // + // CloudFront distributions: A ELB load balancers: A | AAAA Amazon S3 buckets: + // A Another resource record set in this hosted zone: Specify the type of the + // resource record set for which you're creating the alias. Specify any value + // except NS or SOA. + Type *string `type:"string" required:"true" enum:"RRType"` + + // Weighted resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that determines the proportion + // of DNS queries that Amazon Route 53 responds to using the current resource + // record set. Amazon Route 53 calculates the sum of the weights for the resource + // record sets that have the same combination of DNS name and type. Amazon Route + // 53 then responds to queries based on the ratio of a resource's weight to + // the total. Note the following: + // + // You must specify a value for the Weight element for every weighted resource + // record set. You can only specify one ResourceRecord per weighted resource + // record set. You cannot create latency, failover, or geolocation resource + // record sets that have the same values for the Name and Type elements as weighted + // resource record sets. You can create a maximum of 100 weighted resource record + // sets that have the same values for the Name and Type elements. For weighted + // (but not weighted alias) resource record sets, if you set Weight to 0 for + // a resource record set, Amazon Route 53 never responds to queries with the + // applicable value for that resource record set. However, if you set Weight + // to 0 for all resource record sets that have the same combination of DNS name + // and type, traffic is routed to all resources with equal probability. + // + // The effect of setting Weight to 0 is different when you associate health + // checks with weighted resource record sets. For more information, see Options + // for Configuring Amazon Route 53 Active-Active and Active-Passive Failover + // (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) + // in the Amazon Route 53 Developer Guide. + Weight *int64 `type:"long"` +} + +// String returns the string representation +func (s ResourceRecordSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecordSet) GoString() string { + return s.String() +} + +// A complex type containing a resource and its associated tags. +type ResourceTagSet struct { + _ struct{} `type:"structure"` + + // The ID for the specified resource. + ResourceId *string `type:"string"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `type:"string" enum:"TagResourceType"` + + // The tags associated with the specified resource. + Tags []*Tag `locationNameList:"Tag" min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceTagSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTagSet) GoString() string { + return s.String() +} + +// A complex type that contains information about the health check status for +// the current observation. +type StatusReport struct { + _ struct{} `type:"structure"` + + // The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + CheckedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The observed health check status. + Status *string `type:"string"` +} + +// String returns the string representation +func (s StatusReport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusReport) GoString() string { + return s.String() +} + +// A single tag containing a key and value. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a Tag. + Key *string `type:"string"` + + // The value for a Tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type TrafficPolicy struct { + _ struct{} `type:"structure"` + + Comment *string `type:"string"` + + Document *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` + + Version *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicy) GoString() string { + return s.String() +} + +type TrafficPolicyInstance struct { + _ struct{} `type:"structure"` + + HostedZoneId *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Message *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + State *string `type:"string" required:"true"` + + TTL *int64 `type:"long" required:"true"` + + TrafficPolicyId *string `type:"string" required:"true"` + + TrafficPolicyType *string `type:"string" required:"true" enum:"RRType"` + + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicyInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicyInstance) GoString() string { + return s.String() +} + +type TrafficPolicySummary struct { + _ struct{} `type:"structure"` + + Id *string `type:"string" required:"true"` + + LatestVersion *int64 `min:"1" type:"integer" required:"true"` + + Name *string `type:"string" required:"true"` + + TrafficPolicyCount *int64 `min:"1" type:"integer" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s TrafficPolicySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicySummary) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to update a health +// check. +type UpdateHealthCheckInput struct { + _ struct{} `locationName:"UpdateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + // + // Specify this value only if you want to change it. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + // + // Specify this value only if you want to change it. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + // + // Specify this value only if you want to change it. + FullyQualifiedDomainName *string `type:"string"` + + // The ID of the health check to update. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + // Optional. When you specify a health check version, Amazon Route 53 compares + // this value with the current value in the health check, which prevents you + // from updating the health check when the versions don't match. Using HealthCheckVersion + // lets you prevent overwriting another change to the health check. + HealthCheckVersion *int64 `min:"1" type:"long"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + // + // Specify this value only if you want to change it. + HealthThreshold *int64 `type:"integer"` + + // The IP address of the resource that you want to check. + // + // Specify this value only if you want to change it. + IPAddress *string `type:"string"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + // + // Specify this value only if you want to change it. + Inverted *bool `type:"boolean"` + + // The port on which you want Amazon Route 53 to open a connection to perform + // health checks. + // + // Specify this value only if you want to change it. + Port *int64 `min:"1" type:"integer"` + + // The path that you want Amazon Route 53 to request when performing health + // checks. The path can be any value for which your endpoint will return an + // HTTP status code of 2xx or 3xx when the endpoint is healthy, for example + // the file /docs/route53-health-check.html. + // + // Specify this value only if you want to change it. + ResourcePath *string `type:"string"` + + // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // you want Amazon Route 53 to search for in the response body from the specified + // resource. If the string appears in the response body, Amazon Route 53 considers + // the resource healthy. + // + // Specify this value only if you want to change it. + SearchString *string `type:"string"` +} + +// String returns the string representation +func (s UpdateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckInput) GoString() string { + return s.String() +} + +type UpdateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to update a hosted +// zone comment. +type UpdateHostedZoneCommentInput struct { + _ struct{} `locationName:"UpdateHostedZoneCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A comment about your hosted zone. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified hosted zone after +// the update. +type UpdateHostedZoneCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contain information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to update the comment. +type UpdateTrafficPolicyCommentInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The new comment for the specified traffic policy and version. + Comment *string `type:"string" required:"true"` + + // The value of Id for the traffic policy for which you want to update the comment. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of Version for the traffic policy for which you want to update + // the comment. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the traffic policy. +type UpdateTrafficPolicyCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to update based on a specified traffic policy instance. +type UpdateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the traffic policy instance that you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the updated resource + // record sets. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want Amazon Route 53 to use to update + // resource record sets for the specified traffic policy instance. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want Amazon Route 53 to use to + // update resource record sets for the specified traffic policy instance. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type UpdateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the updated traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +type VPC struct { + _ struct{} `type:"structure"` + + // A VPC ID + VPCId *string `type:"string"` + + VPCRegion *string `min:"1" type:"string" enum:"VPCRegion"` +} + +// String returns the string representation +func (s VPC) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPC) GoString() string { + return s.String() +} + +const ( + // @enum ChangeAction + ChangeActionCreate = "CREATE" + // @enum ChangeAction + ChangeActionDelete = "DELETE" + // @enum ChangeAction + ChangeActionUpsert = "UPSERT" +) + +const ( + // @enum ChangeStatus + ChangeStatusPending = "PENDING" + // @enum ChangeStatus + ChangeStatusInsync = "INSYNC" +) + +const ( + // @enum HealthCheckType + HealthCheckTypeHttp = "HTTP" + // @enum HealthCheckType + HealthCheckTypeHttps = "HTTPS" + // @enum HealthCheckType + HealthCheckTypeHttpStrMatch = "HTTP_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeHttpsStrMatch = "HTTPS_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeTcp = "TCP" + // @enum HealthCheckType + HealthCheckTypeCalculated = "CALCULATED" +) + +const ( + // @enum RRType + RRTypeSoa = "SOA" + // @enum RRType + RRTypeA = "A" + // @enum RRType + RRTypeTxt = "TXT" + // @enum RRType + RRTypeNs = "NS" + // @enum RRType + RRTypeCname = "CNAME" + // @enum RRType + RRTypeMx = "MX" + // @enum RRType + RRTypePtr = "PTR" + // @enum RRType + RRTypeSrv = "SRV" + // @enum RRType + RRTypeSpf = "SPF" + // @enum RRType + RRTypeAaaa = "AAAA" +) + +const ( + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverPrimary = "PRIMARY" + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverSecondary = "SECONDARY" +) + +const ( + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsEast1 = "us-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest1 = "us-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest2 = "us-west-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuWest1 = "eu-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuCentral1 = "eu-central-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast1 = "ap-southeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast2 = "ap-southeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast1 = "ap-northeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionSaEast1 = "sa-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionCnNorth1 = "cn-north-1" +) + +const ( + // @enum TagResourceType + TagResourceTypeHealthcheck = "healthcheck" + // @enum TagResourceType + TagResourceTypeHostedzone = "hostedzone" +) + +const ( + // @enum VPCRegion + VPCRegionUsEast1 = "us-east-1" + // @enum VPCRegion + VPCRegionUsWest1 = "us-west-1" + // @enum VPCRegion + VPCRegionUsWest2 = "us-west-2" + // @enum VPCRegion + VPCRegionEuWest1 = "eu-west-1" + // @enum VPCRegion + VPCRegionEuCentral1 = "eu-central-1" + // @enum VPCRegion + VPCRegionApSoutheast1 = "ap-southeast-1" + // @enum VPCRegion + VPCRegionApSoutheast2 = "ap-southeast-2" + // @enum VPCRegion + VPCRegionApNortheast1 = "ap-northeast-1" + // @enum VPCRegion + VPCRegionSaEast1 = "sa-east-1" + // @enum VPCRegion + VPCRegionCnNorth1 = "cn-north-1" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go new file mode 100644 index 000000000..c7529246d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go @@ -0,0 +1,21 @@ +package route53 + +import ( + "regexp" + + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + c.Handlers.Build.PushBack(sanitizeURL) + } +} + +var reSanitizeURL = regexp.MustCompile(`\/%2F\w+%2F`) + +func sanitizeURL(r *request.Request) { + r.HTTPRequest.URL.Opaque = + reSanitizeURL.ReplaceAllString(r.HTTPRequest.URL.Opaque, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go new file mode 100644 index 000000000..518790a24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go @@ -0,0 +1,22 @@ +package route53_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/route53" +) + +func TestBuildCorrectURI(t *testing.T) { + svc := route53.New(unit.Session) + svc.Handlers.Validate.Clear() + req, _ := svc.GetHostedZoneRequest(&route53.GetHostedZoneInput{ + Id: aws.String("/hostedzone/ABCDEFG"), + }) + + req.Build() + + awstesting.Match(t, `\/hostedzone\/ABCDEFG$`, req.HTTPRequest.URL.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go new file mode 100644 index 000000000..9d179f557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go @@ -0,0 +1,1080 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRoute53_AssociateVPCWithHostedZone() { + svc := route53.New(session.New()) + + params := &route53.AssociateVPCWithHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("AssociateVPCComment"), + } + resp, err := svc.AssociateVPCWithHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ChangeResourceRecordSets() { + svc := route53.New(session.New()) + + params := &route53.ChangeResourceRecordSetsInput{ + ChangeBatch: &route53.ChangeBatch{ // Required + Changes: []*route53.Change{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + ResourceRecordSet: &route53.ResourceRecordSet{ // Required + Name: aws.String("DNSName"), // Required + Type: aws.String("RRType"), // Required + AliasTarget: &route53.AliasTarget{ + DNSName: aws.String("DNSName"), // Required + EvaluateTargetHealth: aws.Bool(true), // Required + HostedZoneId: aws.String("ResourceId"), // Required + }, + Failover: aws.String("ResourceRecordSetFailover"), + GeoLocation: &route53.GeoLocation{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + }, + HealthCheckId: aws.String("HealthCheckId"), + Region: aws.String("ResourceRecordSetRegion"), + ResourceRecords: []*route53.ResourceRecord{ + { // Required + Value: aws.String("RData"), // Required + }, + // More values... + }, + SetIdentifier: aws.String("ResourceRecordSetIdentifier"), + TTL: aws.Int64(1), + TrafficPolicyInstanceId: aws.String("TrafficPolicyInstanceId"), + Weight: aws.Int64(1), + }, + }, + // More values... + }, + Comment: aws.String("ResourceDescription"), + }, + HostedZoneId: aws.String("ResourceId"), // Required + } + resp, err := svc.ChangeResourceRecordSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ChangeTagsForResource() { + svc := route53.New(session.New()) + + params := &route53.ChangeTagsForResourceInput{ + ResourceId: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + AddTags: []*route53.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + RemoveTagKeys: []*string{ + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.ChangeTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.CreateHealthCheckInput{ + CallerReference: aws.String("HealthCheckNonce"), // Required + HealthCheckConfig: &route53.HealthCheckConfig{ // Required + Type: aws.String("HealthCheckType"), // Required + ChildHealthChecks: []*string{ + aws.String("HealthCheckId"), // Required + // More values... + }, + FailureThreshold: aws.Int64(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + HealthThreshold: aws.Int64(1), + IPAddress: aws.String("IPAddress"), + Inverted: aws.Bool(true), + MeasureLatency: aws.Bool(true), + Port: aws.Int64(1), + RequestInterval: aws.Int64(1), + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + }, + } + resp, err := svc.CreateHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateHostedZone() { + svc := route53.New(session.New()) + + params := &route53.CreateHostedZoneInput{ + CallerReference: aws.String("Nonce"), // Required + Name: aws.String("DNSName"), // Required + DelegationSetId: aws.String("ResourceId"), + HostedZoneConfig: &route53.HostedZoneConfig{ + Comment: aws.String("ResourceDescription"), + PrivateZone: aws.Bool(true), + }, + VPC: &route53.VPC{ + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + } + resp, err := svc.CreateHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.CreateReusableDelegationSetInput{ + CallerReference: aws.String("Nonce"), // Required + HostedZoneId: aws.String("ResourceId"), + } + resp, err := svc.CreateReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyInput{ + Document: aws.String("TrafficPolicyDocument"), // Required + Name: aws.String("TrafficPolicyName"), // Required + Comment: aws.String("TrafficPolicyComment"), + } + resp, err := svc.CreateTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyInstanceInput{ + HostedZoneId: aws.String("ResourceId"), // Required + Name: aws.String("DNSName"), // Required + TTL: aws.Int64(1), // Required + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + } + resp, err := svc.CreateTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicyVersion() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyVersionInput{ + Document: aws.String("TrafficPolicyDocument"), // Required + Id: aws.String("TrafficPolicyId"), // Required + Comment: aws.String("TrafficPolicyComment"), + } + resp, err := svc.CreateTrafficPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.DeleteHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.DeleteHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteHostedZone() { + svc := route53.New(session.New()) + + params := &route53.DeleteHostedZoneInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.DeleteReusableDelegationSetInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.DeleteTrafficPolicyInput{ + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.DeleteTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.DeleteTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + } + resp, err := svc.DeleteTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DisassociateVPCFromHostedZone() { + svc := route53.New(session.New()) + + params := &route53.DisassociateVPCFromHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("DisassociateVPCComment"), + } + resp, err := svc.DisassociateVPCFromHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetChange() { + svc := route53.New(session.New()) + + params := &route53.GetChangeInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetChangeDetails() { + svc := route53.New(session.New()) + + params := &route53.GetChangeDetailsInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetChangeDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetCheckerIpRanges() { + svc := route53.New(session.New()) + + var params *route53.GetCheckerIpRangesInput + resp, err := svc.GetCheckerIpRanges(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetGeoLocation() { + svc := route53.New(session.New()) + + params := &route53.GetGeoLocationInput{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.GetGeoLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckCount() { + svc := route53.New(session.New()) + + var params *route53.GetHealthCheckCountInput + resp, err := svc.GetHealthCheckCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckLastFailureReason() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckLastFailureReasonInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckLastFailureReason(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckStatus() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckStatusInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHostedZone() { + svc := route53.New(session.New()) + + params := &route53.GetHostedZoneInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHostedZoneCount() { + svc := route53.New(session.New()) + + var params *route53.GetHostedZoneCountInput + resp, err := svc.GetHostedZoneCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.GetReusableDelegationSetInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.GetTrafficPolicyInput{ + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.GetTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.GetTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + } + resp, err := svc.GetTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicyInstanceCount() { + svc := route53.New(session.New()) + + var params *route53.GetTrafficPolicyInstanceCountInput + resp, err := svc.GetTrafficPolicyInstanceCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListChangeBatchesByHostedZone() { + svc := route53.New(session.New()) + + params := &route53.ListChangeBatchesByHostedZoneInput{ + EndDate: aws.String("Date"), // Required + HostedZoneId: aws.String("ResourceId"), // Required + StartDate: aws.String("Date"), // Required + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListChangeBatchesByHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListChangeBatchesByRRSet() { + svc := route53.New(session.New()) + + params := &route53.ListChangeBatchesByRRSetInput{ + EndDate: aws.String("Date"), // Required + HostedZoneId: aws.String("ResourceId"), // Required + Name: aws.String("DNSName"), // Required + StartDate: aws.String("Date"), // Required + Type: aws.String("RRType"), // Required + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + SetIdentifier: aws.String("ResourceRecordSetIdentifier"), + } + resp, err := svc.ListChangeBatchesByRRSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListGeoLocations() { + svc := route53.New(session.New()) + + params := &route53.ListGeoLocationsInput{ + MaxItems: aws.String("PageMaxItems"), + StartContinentCode: aws.String("GeoLocationContinentCode"), + StartCountryCode: aws.String("GeoLocationCountryCode"), + StartSubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.ListGeoLocations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHealthChecks() { + svc := route53.New(session.New()) + + params := &route53.ListHealthChecksInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHealthChecks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHostedZones() { + svc := route53.New(session.New()) + + params := &route53.ListHostedZonesInput{ + DelegationSetId: aws.String("ResourceId"), + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHostedZonesByName() { + svc := route53.New(session.New()) + + params := &route53.ListHostedZonesByNameInput{ + DNSName: aws.String("DNSName"), + HostedZoneId: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZonesByName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListResourceRecordSets() { + svc := route53.New(session.New()) + + params := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String("ResourceId"), // Required + MaxItems: aws.String("PageMaxItems"), + StartRecordIdentifier: aws.String("ResourceRecordSetIdentifier"), + StartRecordName: aws.String("DNSName"), + StartRecordType: aws.String("RRType"), + } + resp, err := svc.ListResourceRecordSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListReusableDelegationSets() { + svc := route53.New(session.New()) + + params := &route53.ListReusableDelegationSetsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListReusableDelegationSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTagsForResource() { + svc := route53.New(session.New()) + + params := &route53.ListTagsForResourceInput{ + ResourceId: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTagsForResources() { + svc := route53.New(session.New()) + + params := &route53.ListTagsForResourcesInput{ + ResourceIds: []*string{ // Required + aws.String("TagResourceId"), // Required + // More values... + }, + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicies() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPoliciesInput{ + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyIdMarker: aws.String("TrafficPolicyId"), + } + resp, err := svc.ListTrafficPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstances() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesInput{ + HostedZoneIdMarker: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstancesByHostedZone() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesByHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstancesByHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstancesByPolicy() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesByPolicyInput{ + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + HostedZoneIdMarker: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstancesByPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyVersions() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyVersionsInput{ + Id: aws.String("TrafficPolicyId"), // Required + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyVersionMarker: aws.String("TrafficPolicyVersionMarker"), + } + resp, err := svc.ListTrafficPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.UpdateHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + ChildHealthChecks: []*string{ + aws.String("HealthCheckId"), // Required + // More values... + }, + FailureThreshold: aws.Int64(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + HealthCheckVersion: aws.Int64(1), + HealthThreshold: aws.Int64(1), + IPAddress: aws.String("IPAddress"), + Inverted: aws.Bool(true), + Port: aws.Int64(1), + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + } + resp, err := svc.UpdateHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateHostedZoneComment() { + svc := route53.New(session.New()) + + params := &route53.UpdateHostedZoneCommentInput{ + Id: aws.String("ResourceId"), // Required + Comment: aws.String("ResourceDescription"), + } + resp, err := svc.UpdateHostedZoneComment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateTrafficPolicyComment() { + svc := route53.New(session.New()) + + params := &route53.UpdateTrafficPolicyCommentInput{ + Comment: aws.String("TrafficPolicyComment"), // Required + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.UpdateTrafficPolicyComment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.UpdateTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + TTL: aws.Int64(1), // Required + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + } + resp, err := svc.UpdateTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go new file mode 100644 index 000000000..a2baa130e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go @@ -0,0 +1,212 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53iface provides an interface for the Amazon Route 53. +package route53iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/route53" +) + +// Route53API is the interface type for route53.Route53. +type Route53API interface { + AssociateVPCWithHostedZoneRequest(*route53.AssociateVPCWithHostedZoneInput) (*request.Request, *route53.AssociateVPCWithHostedZoneOutput) + + AssociateVPCWithHostedZone(*route53.AssociateVPCWithHostedZoneInput) (*route53.AssociateVPCWithHostedZoneOutput, error) + + ChangeResourceRecordSetsRequest(*route53.ChangeResourceRecordSetsInput) (*request.Request, *route53.ChangeResourceRecordSetsOutput) + + ChangeResourceRecordSets(*route53.ChangeResourceRecordSetsInput) (*route53.ChangeResourceRecordSetsOutput, error) + + ChangeTagsForResourceRequest(*route53.ChangeTagsForResourceInput) (*request.Request, *route53.ChangeTagsForResourceOutput) + + ChangeTagsForResource(*route53.ChangeTagsForResourceInput) (*route53.ChangeTagsForResourceOutput, error) + + CreateHealthCheckRequest(*route53.CreateHealthCheckInput) (*request.Request, *route53.CreateHealthCheckOutput) + + CreateHealthCheck(*route53.CreateHealthCheckInput) (*route53.CreateHealthCheckOutput, error) + + CreateHostedZoneRequest(*route53.CreateHostedZoneInput) (*request.Request, *route53.CreateHostedZoneOutput) + + CreateHostedZone(*route53.CreateHostedZoneInput) (*route53.CreateHostedZoneOutput, error) + + CreateReusableDelegationSetRequest(*route53.CreateReusableDelegationSetInput) (*request.Request, *route53.CreateReusableDelegationSetOutput) + + CreateReusableDelegationSet(*route53.CreateReusableDelegationSetInput) (*route53.CreateReusableDelegationSetOutput, error) + + CreateTrafficPolicyRequest(*route53.CreateTrafficPolicyInput) (*request.Request, *route53.CreateTrafficPolicyOutput) + + CreateTrafficPolicy(*route53.CreateTrafficPolicyInput) (*route53.CreateTrafficPolicyOutput, error) + + CreateTrafficPolicyInstanceRequest(*route53.CreateTrafficPolicyInstanceInput) (*request.Request, *route53.CreateTrafficPolicyInstanceOutput) + + CreateTrafficPolicyInstance(*route53.CreateTrafficPolicyInstanceInput) (*route53.CreateTrafficPolicyInstanceOutput, error) + + CreateTrafficPolicyVersionRequest(*route53.CreateTrafficPolicyVersionInput) (*request.Request, *route53.CreateTrafficPolicyVersionOutput) + + CreateTrafficPolicyVersion(*route53.CreateTrafficPolicyVersionInput) (*route53.CreateTrafficPolicyVersionOutput, error) + + DeleteHealthCheckRequest(*route53.DeleteHealthCheckInput) (*request.Request, *route53.DeleteHealthCheckOutput) + + DeleteHealthCheck(*route53.DeleteHealthCheckInput) (*route53.DeleteHealthCheckOutput, error) + + DeleteHostedZoneRequest(*route53.DeleteHostedZoneInput) (*request.Request, *route53.DeleteHostedZoneOutput) + + DeleteHostedZone(*route53.DeleteHostedZoneInput) (*route53.DeleteHostedZoneOutput, error) + + DeleteReusableDelegationSetRequest(*route53.DeleteReusableDelegationSetInput) (*request.Request, *route53.DeleteReusableDelegationSetOutput) + + DeleteReusableDelegationSet(*route53.DeleteReusableDelegationSetInput) (*route53.DeleteReusableDelegationSetOutput, error) + + DeleteTrafficPolicyRequest(*route53.DeleteTrafficPolicyInput) (*request.Request, *route53.DeleteTrafficPolicyOutput) + + DeleteTrafficPolicy(*route53.DeleteTrafficPolicyInput) (*route53.DeleteTrafficPolicyOutput, error) + + DeleteTrafficPolicyInstanceRequest(*route53.DeleteTrafficPolicyInstanceInput) (*request.Request, *route53.DeleteTrafficPolicyInstanceOutput) + + DeleteTrafficPolicyInstance(*route53.DeleteTrafficPolicyInstanceInput) (*route53.DeleteTrafficPolicyInstanceOutput, error) + + DisassociateVPCFromHostedZoneRequest(*route53.DisassociateVPCFromHostedZoneInput) (*request.Request, *route53.DisassociateVPCFromHostedZoneOutput) + + DisassociateVPCFromHostedZone(*route53.DisassociateVPCFromHostedZoneInput) (*route53.DisassociateVPCFromHostedZoneOutput, error) + + GetChangeRequest(*route53.GetChangeInput) (*request.Request, *route53.GetChangeOutput) + + GetChange(*route53.GetChangeInput) (*route53.GetChangeOutput, error) + + GetChangeDetailsRequest(*route53.GetChangeDetailsInput) (*request.Request, *route53.GetChangeDetailsOutput) + + GetChangeDetails(*route53.GetChangeDetailsInput) (*route53.GetChangeDetailsOutput, error) + + GetCheckerIpRangesRequest(*route53.GetCheckerIpRangesInput) (*request.Request, *route53.GetCheckerIpRangesOutput) + + GetCheckerIpRanges(*route53.GetCheckerIpRangesInput) (*route53.GetCheckerIpRangesOutput, error) + + GetGeoLocationRequest(*route53.GetGeoLocationInput) (*request.Request, *route53.GetGeoLocationOutput) + + GetGeoLocation(*route53.GetGeoLocationInput) (*route53.GetGeoLocationOutput, error) + + GetHealthCheckRequest(*route53.GetHealthCheckInput) (*request.Request, *route53.GetHealthCheckOutput) + + GetHealthCheck(*route53.GetHealthCheckInput) (*route53.GetHealthCheckOutput, error) + + GetHealthCheckCountRequest(*route53.GetHealthCheckCountInput) (*request.Request, *route53.GetHealthCheckCountOutput) + + GetHealthCheckCount(*route53.GetHealthCheckCountInput) (*route53.GetHealthCheckCountOutput, error) + + GetHealthCheckLastFailureReasonRequest(*route53.GetHealthCheckLastFailureReasonInput) (*request.Request, *route53.GetHealthCheckLastFailureReasonOutput) + + GetHealthCheckLastFailureReason(*route53.GetHealthCheckLastFailureReasonInput) (*route53.GetHealthCheckLastFailureReasonOutput, error) + + GetHealthCheckStatusRequest(*route53.GetHealthCheckStatusInput) (*request.Request, *route53.GetHealthCheckStatusOutput) + + GetHealthCheckStatus(*route53.GetHealthCheckStatusInput) (*route53.GetHealthCheckStatusOutput, error) + + GetHostedZoneRequest(*route53.GetHostedZoneInput) (*request.Request, *route53.GetHostedZoneOutput) + + GetHostedZone(*route53.GetHostedZoneInput) (*route53.GetHostedZoneOutput, error) + + GetHostedZoneCountRequest(*route53.GetHostedZoneCountInput) (*request.Request, *route53.GetHostedZoneCountOutput) + + GetHostedZoneCount(*route53.GetHostedZoneCountInput) (*route53.GetHostedZoneCountOutput, error) + + GetReusableDelegationSetRequest(*route53.GetReusableDelegationSetInput) (*request.Request, *route53.GetReusableDelegationSetOutput) + + GetReusableDelegationSet(*route53.GetReusableDelegationSetInput) (*route53.GetReusableDelegationSetOutput, error) + + GetTrafficPolicyRequest(*route53.GetTrafficPolicyInput) (*request.Request, *route53.GetTrafficPolicyOutput) + + GetTrafficPolicy(*route53.GetTrafficPolicyInput) (*route53.GetTrafficPolicyOutput, error) + + GetTrafficPolicyInstanceRequest(*route53.GetTrafficPolicyInstanceInput) (*request.Request, *route53.GetTrafficPolicyInstanceOutput) + + GetTrafficPolicyInstance(*route53.GetTrafficPolicyInstanceInput) (*route53.GetTrafficPolicyInstanceOutput, error) + + GetTrafficPolicyInstanceCountRequest(*route53.GetTrafficPolicyInstanceCountInput) (*request.Request, *route53.GetTrafficPolicyInstanceCountOutput) + + GetTrafficPolicyInstanceCount(*route53.GetTrafficPolicyInstanceCountInput) (*route53.GetTrafficPolicyInstanceCountOutput, error) + + ListChangeBatchesByHostedZoneRequest(*route53.ListChangeBatchesByHostedZoneInput) (*request.Request, *route53.ListChangeBatchesByHostedZoneOutput) + + ListChangeBatchesByHostedZone(*route53.ListChangeBatchesByHostedZoneInput) (*route53.ListChangeBatchesByHostedZoneOutput, error) + + ListChangeBatchesByRRSetRequest(*route53.ListChangeBatchesByRRSetInput) (*request.Request, *route53.ListChangeBatchesByRRSetOutput) + + ListChangeBatchesByRRSet(*route53.ListChangeBatchesByRRSetInput) (*route53.ListChangeBatchesByRRSetOutput, error) + + ListGeoLocationsRequest(*route53.ListGeoLocationsInput) (*request.Request, *route53.ListGeoLocationsOutput) + + ListGeoLocations(*route53.ListGeoLocationsInput) (*route53.ListGeoLocationsOutput, error) + + ListHealthChecksRequest(*route53.ListHealthChecksInput) (*request.Request, *route53.ListHealthChecksOutput) + + ListHealthChecks(*route53.ListHealthChecksInput) (*route53.ListHealthChecksOutput, error) + + ListHealthChecksPages(*route53.ListHealthChecksInput, func(*route53.ListHealthChecksOutput, bool) bool) error + + ListHostedZonesRequest(*route53.ListHostedZonesInput) (*request.Request, *route53.ListHostedZonesOutput) + + ListHostedZones(*route53.ListHostedZonesInput) (*route53.ListHostedZonesOutput, error) + + ListHostedZonesPages(*route53.ListHostedZonesInput, func(*route53.ListHostedZonesOutput, bool) bool) error + + ListHostedZonesByNameRequest(*route53.ListHostedZonesByNameInput) (*request.Request, *route53.ListHostedZonesByNameOutput) + + ListHostedZonesByName(*route53.ListHostedZonesByNameInput) (*route53.ListHostedZonesByNameOutput, error) + + ListResourceRecordSetsRequest(*route53.ListResourceRecordSetsInput) (*request.Request, *route53.ListResourceRecordSetsOutput) + + ListResourceRecordSets(*route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) + + ListResourceRecordSetsPages(*route53.ListResourceRecordSetsInput, func(*route53.ListResourceRecordSetsOutput, bool) bool) error + + ListReusableDelegationSetsRequest(*route53.ListReusableDelegationSetsInput) (*request.Request, *route53.ListReusableDelegationSetsOutput) + + ListReusableDelegationSets(*route53.ListReusableDelegationSetsInput) (*route53.ListReusableDelegationSetsOutput, error) + + ListTagsForResourceRequest(*route53.ListTagsForResourceInput) (*request.Request, *route53.ListTagsForResourceOutput) + + ListTagsForResource(*route53.ListTagsForResourceInput) (*route53.ListTagsForResourceOutput, error) + + ListTagsForResourcesRequest(*route53.ListTagsForResourcesInput) (*request.Request, *route53.ListTagsForResourcesOutput) + + ListTagsForResources(*route53.ListTagsForResourcesInput) (*route53.ListTagsForResourcesOutput, error) + + ListTrafficPoliciesRequest(*route53.ListTrafficPoliciesInput) (*request.Request, *route53.ListTrafficPoliciesOutput) + + ListTrafficPolicies(*route53.ListTrafficPoliciesInput) (*route53.ListTrafficPoliciesOutput, error) + + ListTrafficPolicyInstancesRequest(*route53.ListTrafficPolicyInstancesInput) (*request.Request, *route53.ListTrafficPolicyInstancesOutput) + + ListTrafficPolicyInstances(*route53.ListTrafficPolicyInstancesInput) (*route53.ListTrafficPolicyInstancesOutput, error) + + ListTrafficPolicyInstancesByHostedZoneRequest(*route53.ListTrafficPolicyInstancesByHostedZoneInput) (*request.Request, *route53.ListTrafficPolicyInstancesByHostedZoneOutput) + + ListTrafficPolicyInstancesByHostedZone(*route53.ListTrafficPolicyInstancesByHostedZoneInput) (*route53.ListTrafficPolicyInstancesByHostedZoneOutput, error) + + ListTrafficPolicyInstancesByPolicyRequest(*route53.ListTrafficPolicyInstancesByPolicyInput) (*request.Request, *route53.ListTrafficPolicyInstancesByPolicyOutput) + + ListTrafficPolicyInstancesByPolicy(*route53.ListTrafficPolicyInstancesByPolicyInput) (*route53.ListTrafficPolicyInstancesByPolicyOutput, error) + + ListTrafficPolicyVersionsRequest(*route53.ListTrafficPolicyVersionsInput) (*request.Request, *route53.ListTrafficPolicyVersionsOutput) + + ListTrafficPolicyVersions(*route53.ListTrafficPolicyVersionsInput) (*route53.ListTrafficPolicyVersionsOutput, error) + + UpdateHealthCheckRequest(*route53.UpdateHealthCheckInput) (*request.Request, *route53.UpdateHealthCheckOutput) + + UpdateHealthCheck(*route53.UpdateHealthCheckInput) (*route53.UpdateHealthCheckOutput, error) + + UpdateHostedZoneCommentRequest(*route53.UpdateHostedZoneCommentInput) (*request.Request, *route53.UpdateHostedZoneCommentOutput) + + UpdateHostedZoneComment(*route53.UpdateHostedZoneCommentInput) (*route53.UpdateHostedZoneCommentOutput, error) + + UpdateTrafficPolicyCommentRequest(*route53.UpdateTrafficPolicyCommentInput) (*request.Request, *route53.UpdateTrafficPolicyCommentOutput) + + UpdateTrafficPolicyComment(*route53.UpdateTrafficPolicyCommentInput) (*route53.UpdateTrafficPolicyCommentOutput, error) + + UpdateTrafficPolicyInstanceRequest(*route53.UpdateTrafficPolicyInstanceInput) (*request.Request, *route53.UpdateTrafficPolicyInstanceOutput) + + UpdateTrafficPolicyInstance(*route53.UpdateTrafficPolicyInstanceInput) (*route53.UpdateTrafficPolicyInstanceOutput, error) +} + +var _ Route53API = (*route53.Route53)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go new file mode 100644 index 000000000..7711d53d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Route53 is a client for Route 53. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Route53 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "route53" + +// New creates a new instance of the Route53 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Route53 client from just a session. +// svc := route53.New(mySession) +// +// // Create a Route53 client with additional configuration +// svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Route53 { + svc := &Route53{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-04-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Route53 operation and runs any +// custom request initialization. +func (c *Route53) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 000000000..1d6312207 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,6267 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3 provides a client for Amazon Simple Storage Service. +package s3 + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteMultipartUploadOutput{} + req.Data = output + return +} + +// Completes a multipart upload by assembling previously uploaded parts. +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a request for the CopyObject operation. +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyObjectOutput{} + req.Data = output + return +} + +// Creates a copy of an object that is already stored in Amazon S3. +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a request for the CreateBucket operation. +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBucketOutput{} + req.Data = output + return +} + +// Creates a new bucket. +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + err := req.Send() + return out, err +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMultipartUploadOutput{} + req.Data = output + return +} + +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a request for the DeleteBucket operation. +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBucketOutput{} + req.Data = output + return +} + +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a request for the DeleteBucketCors operation. +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBucketCorsOutput{} + req.Data = output + return +} + +// Deletes the cors configuration information set for the bucket. +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBucketLifecycleOutput{} + req.Data = output + return +} + +// Deletes the lifecycle configuration from the bucket. +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBucketPolicyOutput{} + req.Data = output + return +} + +// Deletes the policy from the bucket. +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBucketTaggingOutput{} + req.Data = output + return +} + +// Deletes the tags from the bucket. +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBucketWebsiteOutput{} + req.Data = output + return +} + +// This operation removes the website configuration from the bucket. +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a request for the DeleteObject operation. +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectOutput{} + req.Data = output + return +} + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a request for the DeleteObjects operation. +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectsOutput{} + req.Data = output + return +} + +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a request for the GetBucketAcl operation. +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAclOutput{} + req.Data = output + return +} + +// Gets the access control policy for the bucket. +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a request for the GetBucketCors operation. +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketCorsOutput{} + req.Data = output + return +} + +// Returns the cors configuration for the bucket. +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a request for the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Returns the lifecycle configuration information set on the bucket. +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a request for the GetBucketLocation operation. +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLocationOutput{} + req.Data = output + return +} + +// Returns the region the bucket resides in. +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLoggingOutput{} + req.Data = output + return +} + +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfigurationDeprecated{} + req.Data = output + return +} + +// Deprecated, see the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfiguration{} + req.Data = output + return +} + +// Returns the notification configuration of a bucket. +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketPolicyOutput{} + req.Data = output + return +} + +// Returns the policy of a specified bucket. +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Returns the request payment configuration of a bucket. +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketTaggingOutput{} + req.Data = output + return +} + +// Returns the tag set associated with the bucket. +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketVersioningOutput{} + req.Data = output + return +} + +// Returns the versioning state of a bucket. +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketWebsiteOutput{} + req.Data = output + return +} + +// Returns the website configuration for a bucket. +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a request for the GetObject operation. +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectOutput{} + req.Data = output + return +} + +// Retrieves objects from Amazon S3. +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a request for the GetObjectAcl operation. +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectAclOutput{} + req.Data = output + return +} + +// Returns the access control list (ACL) of an object. +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectTorrentOutput{} + req.Data = output + return +} + +// Return torrent files from a bucket. +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + err := req.Send() + return out, err +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a request for the HeadBucket operation. +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &HeadBucketOutput{} + req.Data = output + return +} + +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + err := req.Send() + return out, err +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a request for the HeadObject operation. +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &HeadObjectOutput{} + req.Data = output + return +} + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + err := req.Send() + return out, err +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a request for the ListBuckets operation. +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBucketsOutput{} + req.Data = output + return +} + +// Returns a list of all buckets owned by the authenticated sender of the request. +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + err := req.Send() + return out, err +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads. +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectVersionsOutput{} + req.Data = output + return +} + +// Returns metadata about all of the versions of objects in a bucket. +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectVersionsOutput), lastPage) + }) +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a request for the ListObjects operation. +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsOutput{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsOutput), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a request for the ListParts operation. +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// Lists the parts that have been uploaded for a specific multipart upload. +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a request for the PutBucketAcl operation. +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketAclOutput{} + req.Data = output + return +} + +// Sets the permissions on a bucket using access control lists (ACL). +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a request for the PutBucketCors operation. +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketCorsOutput{} + req.Data = output + return +} + +// Sets the cors configuration for a bucket. +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a request for the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketLoggingOutput{} + req.Data = output + return +} + +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketNotificationOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketNotificationConfigurationOutput{} + req.Data = output + return +} + +// Enables notifications of specified events for a bucket. +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketPolicyOutput{} + req.Data = output + return +} + +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketReplicationOutput{} + req.Data = output + return +} + +// Creates a new replication configuration (or replaces an existing one, if +// present). +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketTaggingOutput{} + req.Data = output + return +} + +// Sets the tags for a bucket. +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketVersioningOutput{} + req.Data = output + return +} + +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &PutBucketWebsiteOutput{} + req.Data = output + return +} + +// Set the website configuration for a bucket. +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a request for the PutObject operation. +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectOutput{} + req.Data = output + return +} + +// Adds an object to a bucket. +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + err := req.Send() + return out, err +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a request for the PutObjectAcl operation. +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectAclOutput{} + req.Data = output + return +} + +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a request for the RestoreObject operation. +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreObjectOutput{} + req.Data = output + return +} + +// Restores an archived copy of an object back into Amazon S3 +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + err := req.Send() + return out, err +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a request for the UploadPart operation. +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartOutput{} + req.Data = output + return +} + +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + err := req.Send() + return out, err +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a request for the UploadPartCopy operation. +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartCopyOutput{} + req.Data = output + return +} + +// Uploads a part by copying data from an existing object as data source. +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + err := req.Send() + return out, err +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +type Delete struct { + _ struct{} `type:"structure"` + + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Container for key value pair that defines the criteria for the filter rule. +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the buket to get the notification configuration for. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +type GetObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// Container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +type ListPartsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +type ReplicationRule struct { + _ struct{} `type:"structure"` + + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +type Rule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type Tagging struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +const ( + // @enum BucketCannedACL + BucketCannedACLPrivate = "private" + // @enum BucketCannedACL + BucketCannedACLPublicRead = "public-read" + // @enum BucketCannedACL + BucketCannedACLPublicReadWrite = "public-read-write" + // @enum BucketCannedACL + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // @enum BucketLocationConstraint + BucketLocationConstraintEu = "EU" + // @enum BucketLocationConstraint + BucketLocationConstraintEuWest1 = "eu-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest1 = "us-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest2 = "us-west-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintSaEast1 = "sa-east-1" + // @enum BucketLocationConstraint + BucketLocationConstraintCnNorth1 = "cn-north-1" + // @enum BucketLocationConstraint + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // @enum BucketLogsPermission + BucketLogsPermissionFullControl = "FULL_CONTROL" + // @enum BucketLogsPermission + BucketLogsPermissionRead = "READ" + // @enum BucketLogsPermission + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // @enum BucketVersioningStatus + BucketVersioningStatusEnabled = "Enabled" + // @enum BucketVersioningStatus + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // @enum EncodingType + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // @enum Event + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + // @enum Event + EventS3ObjectCreated = "s3:ObjectCreated:*" + // @enum Event + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + // @enum Event + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + // @enum Event + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + // @enum Event + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + // @enum Event + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + // @enum Event + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + // @enum Event + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // @enum ExpirationStatus + ExpirationStatusEnabled = "Enabled" + // @enum ExpirationStatus + ExpirationStatusDisabled = "Disabled" +) + +const ( + // @enum FilterRuleName + FilterRuleNamePrefix = "prefix" + // @enum FilterRuleName + FilterRuleNameSuffix = "suffix" +) + +const ( + // @enum MFADelete + MFADeleteEnabled = "Enabled" + // @enum MFADelete + MFADeleteDisabled = "Disabled" +) + +const ( + // @enum MFADeleteStatus + MFADeleteStatusEnabled = "Enabled" + // @enum MFADeleteStatus + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // @enum MetadataDirective + MetadataDirectiveCopy = "COPY" + // @enum MetadataDirective + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // @enum ObjectCannedACL + ObjectCannedACLPrivate = "private" + // @enum ObjectCannedACL + ObjectCannedACLPublicRead = "public-read" + // @enum ObjectCannedACL + ObjectCannedACLPublicReadWrite = "public-read-write" + // @enum ObjectCannedACL + ObjectCannedACLAuthenticatedRead = "authenticated-read" + // @enum ObjectCannedACL + ObjectCannedACLAwsExecRead = "aws-exec-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // @enum ObjectStorageClass + ObjectStorageClassStandard = "STANDARD" + // @enum ObjectStorageClass + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum ObjectStorageClass + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // @enum ObjectVersionStorageClass + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // @enum Payer + PayerRequester = "Requester" + // @enum Payer + PayerBucketOwner = "BucketOwner" +) + +const ( + // @enum Permission + PermissionFullControl = "FULL_CONTROL" + // @enum Permission + PermissionWrite = "WRITE" + // @enum Permission + PermissionWriteAcp = "WRITE_ACP" + // @enum Permission + PermissionRead = "READ" + // @enum Permission + PermissionReadAcp = "READ_ACP" +) + +const ( + // @enum Protocol + ProtocolHttp = "http" + // @enum Protocol + ProtocolHttps = "https" +) + +const ( + // @enum ReplicationRuleStatus + ReplicationRuleStatusEnabled = "Enabled" + // @enum ReplicationRuleStatus + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // @enum ReplicationStatus + ReplicationStatusComplete = "COMPLETE" + // @enum ReplicationStatus + ReplicationStatusPending = "PENDING" + // @enum ReplicationStatus + ReplicationStatusFailed = "FAILED" + // @enum ReplicationStatus + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // @enum RequestCharged + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // @enum RequestPayer + RequestPayerRequester = "requester" +) + +const ( + // @enum ServerSideEncryption + ServerSideEncryptionAes256 = "AES256" + // @enum ServerSideEncryption + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // @enum StorageClass + StorageClassStandard = "STANDARD" + // @enum StorageClass + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum StorageClass + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum TransitionStorageClass + TransitionStorageClassGlacier = "GLACIER" + // @enum TransitionStorageClass + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum Type + TypeCanonicalUser = "CanonicalUser" + // @enum Type + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + // @enum Type + TypeGroup = "Group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 000000000..c3a2702da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = &loc + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go new file mode 100644 index 000000000..8ef61b0e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go @@ -0,0 +1,78 @@ +package s3_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +var s3LocationTests = []struct { + body string + loc string +}{ + {``, ``}, + {`EU`, `EU`}, +} + +func TestGetBucketLocation(t *testing.T) { + for _, test := range s3LocationTests { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader} + }) + + resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")}) + assert.NoError(t, err) + if test.loc == "" { + assert.Nil(t, resp.LocationConstraint) + } else { + assert.Equal(t, test.loc, *resp.LocationConstraint) + } + } +} + +func TestPopulateLocationConstraint(t *testing.T) { + s := s3.New(unit.Session) + in := &s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + } + req, _ := s.CreateBucketRequest(in) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, "mock-region", *(v[0].(*string))) + assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params +} + +func TestNoPopulateLocationConstraintIfProvided(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{}, + }) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, 0, len(v)) +} + +func TestNoPopulateLocationConstraintIfClassic(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{Region: aws.String("us-east-1")}) + req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + }) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, 0, len(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go new file mode 100644 index 000000000..9fc5df94d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 000000000..c227f242c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,37 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + // Support building custom host-style bucket endpoints + c.Handlers.Build.PushFront(updateHostWithBucket) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + } + + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go new file mode 100644 index 000000000..20a62d7ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go @@ -0,0 +1,105 @@ +package s3_test + +import ( + "crypto/md5" + "encoding/base64" + "io/ioutil" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func assertMD5(t *testing.T, req *request.Request) { + err := req.Build() + assert.NoError(t, err) + + b, _ := ioutil.ReadAll(req.HTTPRequest.Body) + out := md5.Sum(b) + assert.NotEmpty(t, b) + assert.Equal(t, base64.StdEncoding.EncodeToString(out[:]), req.HTTPRequest.Header.Get("Content-MD5")) +} + +func TestMD5InPutBucketCors(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketCorsRequest(&s3.PutBucketCorsInput{ + Bucket: aws.String("bucketname"), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: []*s3.CORSRule{ + { + AllowedMethods: []*string{aws.String("GET")}, + AllowedOrigins: []*string{aws.String("*")}, + }, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketLifecycle(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{ + Bucket: aws.String("bucketname"), + LifecycleConfiguration: &s3.LifecycleConfiguration{ + Rules: []*s3.Rule{ + { + ID: aws.String("ID"), + Prefix: aws.String("Prefix"), + Status: aws.String("Enabled"), + }, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketPolicy(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{ + Bucket: aws.String("bucketname"), + Policy: aws.String("{}"), + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketTagging(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{ + Bucket: aws.String("bucketname"), + Tagging: &s3.Tagging{ + TagSet: []*s3.Tag{ + {Key: aws.String("KEY"), Value: aws.String("VALUE")}, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InDeleteObjects(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{ + Bucket: aws.String("bucketname"), + Delete: &s3.Delete{ + Objects: []*s3.ObjectIdentifier{ + {Key: aws.String("key")}, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketLifecycleConfiguration(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketLifecycleConfigurationRequest(&s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String("bucketname"), + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: []*s3.LifecycleRule{ + {Prefix: aws.String("prefix"), Status: aws.String(s3.ExpirationStatusEnabled)}, + }, + }, + }) + assertMD5(t, req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go new file mode 100644 index 000000000..59697e6b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go @@ -0,0 +1,1599 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleS3_AbortMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.AbortMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.AbortMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CompleteMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.CompleteMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: []*s3.CompletedPart{ + { // Required + ETag: aws.String("ETag"), + PartNumber: aws.Int64(1), + }, + // More values... + }, + }, + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.CompleteMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CopyObject() { + svc := s3.New(session.New()) + + params := &s3.CopyObjectInput{ + Bucket: aws.String("BucketName"), // Required + CopySource: aws.String("CopySource"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentType: aws.String("ContentType"), + CopySourceIfMatch: aws.String("CopySourceIfMatch"), + CopySourceIfModifiedSince: aws.Time(time.Now()), + CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), + CopySourceIfUnmodifiedSince: aws.Time(time.Now()), + CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), + CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), + CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + MetadataDirective: aws.String("MetadataDirective"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.CopyObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CreateBucket() { + svc := s3.New(session.New()) + + params := &s3.CreateBucketInput{ + Bucket: aws.String("BucketName"), // Required + ACL: aws.String("BucketCannedACL"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String("BucketLocationConstraint"), + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + } + resp, err := svc.CreateBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CreateMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.CreateMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentType: aws.String("ContentType"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.CreateMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucket() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketCors() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteObject() { + svc := s3.New(session.New()) + + params := &s3.DeleteObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + MFA: aws.String("MFA"), + RequestPayer: aws.String("RequestPayer"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.DeleteObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteObjects() { + svc := s3.New(session.New()) + + params := &s3.DeleteObjectsInput{ + Bucket: aws.String("BucketName"), // Required + Delete: &s3.Delete{ // Required + Objects: []*s3.ObjectIdentifier{ // Required + { // Required + Key: aws.String("ObjectKey"), // Required + VersionId: aws.String("ObjectVersionId"), + }, + // More values... + }, + Quiet: aws.Bool(true), + }, + MFA: aws.String("MFA"), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.DeleteObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketAcl() { + svc := s3.New(session.New()) + + params := &s3.GetBucketAclInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketCors() { + svc := s3.New(session.New()) + + params := &s3.GetBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLifecycleConfiguration() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLifecycleConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLocation() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLocationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLogging() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLoggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketNotification() { + svc := s3.New(session.New()) + + params := &s3.GetBucketNotificationConfigurationRequest{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketNotification(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketNotificationConfiguration() { + svc := s3.New(session.New()) + + params := &s3.GetBucketNotificationConfigurationRequest{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.GetBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.GetBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketRequestPayment() { + svc := s3.New(session.New()) + + params := &s3.GetBucketRequestPaymentInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketRequestPayment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.GetBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketVersioning() { + svc := s3.New(session.New()) + + params := &s3.GetBucketVersioningInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketVersioning(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.GetBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObject() { + svc := s3.New(session.New()) + + params := &s3.GetObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + IfMatch: aws.String("IfMatch"), + IfModifiedSince: aws.Time(time.Now()), + IfNoneMatch: aws.String("IfNoneMatch"), + IfUnmodifiedSince: aws.Time(time.Now()), + Range: aws.String("Range"), + RequestPayer: aws.String("RequestPayer"), + ResponseCacheControl: aws.String("ResponseCacheControl"), + ResponseContentDisposition: aws.String("ResponseContentDisposition"), + ResponseContentEncoding: aws.String("ResponseContentEncoding"), + ResponseContentLanguage: aws.String("ResponseContentLanguage"), + ResponseContentType: aws.String("ResponseContentType"), + ResponseExpires: aws.Time(time.Now()), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.GetObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObjectAcl() { + svc := s3.New(session.New()) + + params := &s3.GetObjectAclInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.GetObjectAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObjectTorrent() { + svc := s3.New(session.New()) + + params := &s3.GetObjectTorrentInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.GetObjectTorrent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_HeadBucket() { + svc := s3.New(session.New()) + + params := &s3.HeadBucketInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.HeadBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_HeadObject() { + svc := s3.New(session.New()) + + params := &s3.HeadObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + IfMatch: aws.String("IfMatch"), + IfModifiedSince: aws.Time(time.Now()), + IfNoneMatch: aws.String("IfNoneMatch"), + IfUnmodifiedSince: aws.Time(time.Now()), + Range: aws.String("Range"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.HeadObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListBuckets() { + svc := s3.New(session.New()) + + var params *s3.ListBucketsInput + resp, err := svc.ListBuckets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListMultipartUploads() { + svc := s3.New(session.New()) + + params := &s3.ListMultipartUploadsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + KeyMarker: aws.String("KeyMarker"), + MaxUploads: aws.Int64(1), + Prefix: aws.String("Prefix"), + UploadIdMarker: aws.String("UploadIdMarker"), + } + resp, err := svc.ListMultipartUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListObjectVersions() { + svc := s3.New(session.New()) + + params := &s3.ListObjectVersionsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + KeyMarker: aws.String("KeyMarker"), + MaxKeys: aws.Int64(1), + Prefix: aws.String("Prefix"), + VersionIdMarker: aws.String("VersionIdMarker"), + } + resp, err := svc.ListObjectVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListObjects() { + svc := s3.New(session.New()) + + params := &s3.ListObjectsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + Marker: aws.String("Marker"), + MaxKeys: aws.Int64(1), + Prefix: aws.String("Prefix"), + } + resp, err := svc.ListObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListParts() { + svc := s3.New(session.New()) + + params := &s3.ListPartsInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + MaxParts: aws.Int64(1), + PartNumberMarker: aws.Int64(1), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.ListParts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketAcl() { + svc := s3.New(session.New()) + + params := &s3.PutBucketAclInput{ + Bucket: aws.String("BucketName"), // Required + ACL: aws.String("BucketCannedACL"), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: []*s3.Grant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("Permission"), + }, + // More values... + }, + Owner: &s3.Owner{ + DisplayName: aws.String("DisplayName"), + ID: aws.String("ID"), + }, + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + } + resp, err := svc.PutBucketAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketCors() { + svc := s3.New(session.New()) + + params := &s3.PutBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + CORSConfiguration: &s3.CORSConfiguration{ // Required + CORSRules: []*s3.CORSRule{ // Required + { // Required + AllowedMethods: []*string{ // Required + aws.String("AllowedMethod"), // Required + // More values... + }, + AllowedOrigins: []*string{ // Required + aws.String("AllowedOrigin"), // Required + // More values... + }, + AllowedHeaders: []*string{ + aws.String("AllowedHeader"), // Required + // More values... + }, + ExposeHeaders: []*string{ + aws.String("ExposeHeader"), // Required + // More values... + }, + MaxAgeSeconds: aws.Int64(1), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + LifecycleConfiguration: &s3.LifecycleConfiguration{ + Rules: []*s3.Rule{ // Required + { // Required + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ExpirationStatus"), // Required + Expiration: &s3.LifecycleExpiration{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + }, + ID: aws.String("ID"), + NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(1), + }, + NoncurrentVersionTransition: &s3.NoncurrentVersionTransition{ + NoncurrentDays: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + Transition: &s3.Transition{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLifecycleConfiguration() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: []*s3.LifecycleRule{ // Required + { // Required + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ExpirationStatus"), // Required + Expiration: &s3.LifecycleExpiration{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + }, + ID: aws.String("ID"), + NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(1), + }, + NoncurrentVersionTransitions: []*s3.NoncurrentVersionTransition{ + { // Required + NoncurrentDays: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + // More values... + }, + Transitions: []*s3.Transition{ + { // Required + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + // More values... + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketLifecycleConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLogging() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLoggingInput{ + Bucket: aws.String("BucketName"), // Required + BucketLoggingStatus: &s3.BucketLoggingStatus{ // Required + LoggingEnabled: &s3.LoggingEnabled{ + TargetBucket: aws.String("TargetBucket"), + TargetGrants: []*s3.TargetGrant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("BucketLogsPermission"), + }, + // More values... + }, + TargetPrefix: aws.String("TargetPrefix"), + }, + }, + } + resp, err := svc.PutBucketLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketNotification() { + svc := s3.New(session.New()) + + params := &s3.PutBucketNotificationInput{ + Bucket: aws.String("BucketName"), // Required + NotificationConfiguration: &s3.NotificationConfigurationDeprecated{ // Required + CloudFunctionConfiguration: &s3.CloudFunctionConfiguration{ + CloudFunction: aws.String("CloudFunction"), + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + InvocationRole: aws.String("CloudFunctionInvocationRole"), + }, + QueueConfiguration: &s3.QueueConfigurationDeprecated{ + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + Queue: aws.String("QueueArn"), + }, + TopicConfiguration: &s3.TopicConfigurationDeprecated{ + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + Topic: aws.String("TopicArn"), + }, + }, + } + resp, err := svc.PutBucketNotification(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketNotificationConfiguration() { + svc := s3.New(session.New()) + + params := &s3.PutBucketNotificationConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + NotificationConfiguration: &s3.NotificationConfiguration{ // Required + LambdaFunctionConfigurations: []*s3.LambdaFunctionConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + LambdaFunctionArn: aws.String("LambdaFunctionArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + QueueConfigurations: []*s3.QueueConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + QueueArn: aws.String("QueueArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + TopicConfigurations: []*s3.TopicConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + TopicArn: aws.String("TopicArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.PutBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + Policy: aws.String("Policy"), // Required + } + resp, err := svc.PutBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.PutBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + ReplicationConfiguration: &s3.ReplicationConfiguration{ // Required + Role: aws.String("Role"), // Required + Rules: []*s3.ReplicationRule{ // Required + { // Required + Destination: &s3.Destination{ // Required + Bucket: aws.String("BucketName"), // Required + StorageClass: aws.String("StorageClass"), + }, + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ReplicationRuleStatus"), // Required + ID: aws.String("ID"), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketRequestPayment() { + svc := s3.New(session.New()) + + params := &s3.PutBucketRequestPaymentInput{ + Bucket: aws.String("BucketName"), // Required + RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ // Required + Payer: aws.String("Payer"), // Required + }, + } + resp, err := svc.PutBucketRequestPayment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.PutBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + Tagging: &s3.Tagging{ // Required + TagSet: []*s3.Tag{ // Required + { // Required + Key: aws.String("ObjectKey"), // Required + Value: aws.String("Value"), // Required + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketVersioning() { + svc := s3.New(session.New()) + + params := &s3.PutBucketVersioningInput{ + Bucket: aws.String("BucketName"), // Required + VersioningConfiguration: &s3.VersioningConfiguration{ // Required + MFADelete: aws.String("MFADelete"), + Status: aws.String("BucketVersioningStatus"), + }, + MFA: aws.String("MFA"), + } + resp, err := svc.PutBucketVersioning(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.PutBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + WebsiteConfiguration: &s3.WebsiteConfiguration{ // Required + ErrorDocument: &s3.ErrorDocument{ + Key: aws.String("ObjectKey"), // Required + }, + IndexDocument: &s3.IndexDocument{ + Suffix: aws.String("Suffix"), // Required + }, + RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{ + HostName: aws.String("HostName"), // Required + Protocol: aws.String("Protocol"), + }, + RoutingRules: []*s3.RoutingRule{ + { // Required + Redirect: &s3.Redirect{ // Required + HostName: aws.String("HostName"), + HttpRedirectCode: aws.String("HttpRedirectCode"), + Protocol: aws.String("Protocol"), + ReplaceKeyPrefixWith: aws.String("ReplaceKeyPrefixWith"), + ReplaceKeyWith: aws.String("ReplaceKeyWith"), + }, + Condition: &s3.Condition{ + HttpErrorCodeReturnedEquals: aws.String("HttpErrorCodeReturnedEquals"), + KeyPrefixEquals: aws.String("KeyPrefixEquals"), + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutObject() { + svc := s3.New(session.New()) + + params := &s3.PutObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + Body: bytes.NewReader([]byte("PAYLOAD")), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentLength: aws.Int64(1), + ContentType: aws.String("ContentType"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.PutObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutObjectAcl() { + svc := s3.New(session.New()) + + params := &s3.PutObjectAclInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: []*s3.Grant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("Permission"), + }, + // More values... + }, + Owner: &s3.Owner{ + DisplayName: aws.String("DisplayName"), + ID: aws.String("ID"), + }, + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.PutObjectAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_RestoreObject() { + svc := s3.New(session.New()) + + params := &s3.RestoreObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + RestoreRequest: &s3.RestoreRequest{ + Days: aws.Int64(1), // Required + }, + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.RestoreObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_UploadPart() { + svc := s3.New(session.New()) + + params := &s3.UploadPartInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + PartNumber: aws.Int64(1), // Required + UploadId: aws.String("MultipartUploadId"), // Required + Body: bytes.NewReader([]byte("PAYLOAD")), + ContentLength: aws.Int64(1), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + } + resp, err := svc.UploadPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_UploadPartCopy() { + svc := s3.New(session.New()) + + params := &s3.UploadPartCopyInput{ + Bucket: aws.String("BucketName"), // Required + CopySource: aws.String("CopySource"), // Required + Key: aws.String("ObjectKey"), // Required + PartNumber: aws.Int64(1), // Required + UploadId: aws.String("MultipartUploadId"), // Required + CopySourceIfMatch: aws.String("CopySourceIfMatch"), + CopySourceIfModifiedSince: aws.Time(time.Now()), + CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), + CopySourceIfUnmodifiedSince: aws.Time(time.Now()), + CopySourceRange: aws.String("CopySourceRange"), + CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), + CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), + CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + } + resp, err := svc.UploadPartCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 000000000..b7cf4f9cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,60 @@ +package s3 + +import ( + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// hostStyleBucketName returns true if the request should put the bucket in +// the host. This is false if S3ForcePathStyle is explicitly set or if the +// bucket is not DNS compatible. +func hostStyleBucketName(r *request.Request, bucket string) bool { + if aws.BoolValue(r.Config.S3ForcePathStyle) { + return false + } + + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // GetBucketLocation should be able to be called from any region within + // a partition, and return the associated region of the bucket. + if r.Operation.Name == opGetBucketLocation { + return false + } + + // Use host-style if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +func updateHostWithBucket(r *request.Request) { + b, _ := awsutil.ValuesAtPath(r.Params, "Bucket") + if len(b) == 0 { + return + } + + if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) { + r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host + r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1) + if r.HTTPRequest.URL.Path == "" { + r.HTTPRequest.URL.Path = "/" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go new file mode 100644 index 000000000..9c8e81c38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go @@ -0,0 +1,75 @@ +package s3_test + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +type s3BucketTest struct { + bucket string + url string +} + +var ( + sslTests = []s3BucketTest{ + {"abc", "https://abc.s3.mock-region.amazonaws.com/"}, + {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"}, + {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"}, + {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"}, + } + + nosslTests = []s3BucketTest{ + {"a.b.c", "http://a.b.c.s3.mock-region.amazonaws.com/"}, + {"a..bc", "http://s3.mock-region.amazonaws.com/a..bc"}, + } + + forcepathTests = []s3BucketTest{ + {"abc", "https://s3.mock-region.amazonaws.com/abc"}, + {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"}, + {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"}, + {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"}, + } +) + +func runTests(t *testing.T, svc *s3.S3, tests []s3BucketTest) { + for _, test := range tests { + req, _ := svc.ListObjectsRequest(&s3.ListObjectsInput{Bucket: &test.bucket}) + req.Build() + assert.Equal(t, test.url, req.HTTPRequest.URL.String()) + } +} + +func TestHostStyleBucketBuild(t *testing.T) { + s := s3.New(unit.Session) + runTests(t, s, sslTests) +} + +func TestHostStyleBucketBuildNoSSL(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + runTests(t, s, nosslTests) +} + +func TestPathStyleBucketBuild(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{S3ForcePathStyle: aws.Bool(true)}) + runTests(t, s, forcepathTests) +} + +func TestHostStyleBucketGetBucketLocation(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.GetBucketLocationRequest(&s3.GetBucketLocationInput{ + Bucket: aws.String("bucket"), + }) + + req.Build() + require.NoError(t, req.Error) + u, _ := url.Parse(req.HTTPRequest.URL.String()) + assert.NotContains(t, u.Host, "bucket") + assert.Contains(t, u.Path, "bucket") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go new file mode 100644 index 000000000..9e66afb34 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -0,0 +1,246 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3iface provides an interface for the Amazon Simple Storage Service. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API is the interface type for s3.S3. +type S3API interface { + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) +} + +var _ S3API = (*s3.S3)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go new file mode 100644 index 000000000..229c0d63b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go new file mode 100644 index 000000000..8d6d9310f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -0,0 +1,354 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultConcurrency value will be used. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.New() +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: s3.New(c), + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// // The S3 client the S3 Downloader will use +// s3Svc := s3.new(session.New()) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: svc, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + impl := downloader{w: w, in: input, ctx: d} + + for _, option := range options { + option(&impl.ctx) + } + + return impl.download() +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error +} + +// init initializes the downloader with default options. +func (d *downloader) init() { + d.totalBytes = -1 + + if d.ctx.Concurrency == 0 { + d.ctx.Concurrency = DefaultDownloadConcurrency + } + + if d.ctx.PartSize == 0 { + d.ctx.PartSize = DefaultDownloadPartSize + } +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + d.init() + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.ctx.Concurrency) + + for i := 0; i < d.ctx.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queueing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + d.downloadChunk(chunk) + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + chunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + d.downloadChunk(chunk) +} + +// downloadChunk downloads the chunk froom s3 +func (d *downloader) downloadChunk(chunk dlchunk) { + if d.getErr() != nil { + return + } + // Get the next byte range of data + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + rng := fmt.Sprintf("bytes=%d-%d", + chunk.start, chunk.start+chunk.size-1) + in.Range = &rng + + req, resp := d.ctx.S3.GetObjectRequest(in) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + err := req.Send() + + if err != nil { + d.setErr(err) + } else { + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(&chunk, resp.Body) + resp.Body.Close() + + if err != nil { + d.setErr(err) + } + d.incrWritten(n) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provied, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go new file mode 100644 index 000000000..af57e17fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go @@ -0,0 +1,309 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin > int64(len(data)) { + fin = int64(len(data)) + } + + bodyBytes := data[start:fin] + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", + start, fin-1, len(data))) + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodyBytes))) + }) + + return svc, &names, &ranges +} + +func dlLoggingSvcNoChunk(data []byte) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(data))) + }) + + return svc, &names +} + +func dlLoggingSvcNoContentRangeLength(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + index++ + }) + + return svc, &names +} + +func dlLoggingSvcContentRangeTotalAny(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin >= int64(len(data)) { + fin = int64(len(data)) + } + + // Setting start and finish to 0 because this state of 1 is suppose to + // be an error state of 416 + if index == len(states)-1 { + start = 0 + fin = 0 + } + + bodyBytes := data[start:fin] + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/*", + start, fin-1)) + index++ + }) + + return svc, &names +} + +func TestDownloadOrder(t *testing.T) { + s, names, ranges := dlLoggingSvc(buf12MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf12MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadZero(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{}) + + d := s3manager.NewDownloaderWithClient(s) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(0), n) + assert.Equal(t, []string{"GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879"}, *ranges) +} + +func TestDownloadSetPartSize(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{1, 2, 3}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(3), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges) + assert.Equal(t, []byte{1, 2, 3}, w.Bytes()) +} + +func TestDownloadError(t *testing.T) { + s, names, _ := dlLoggingSvc([]byte{1, 2, 3}) + + num := 0 + s.Handlers.Send.PushBack(func(r *request.Request) { + num++ + if num > 1 { + r.HTTPResponse.StatusCode = 400 + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + }) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.NotNil(t, err) + assert.Equal(t, int64(1), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + assert.Equal(t, []byte{1}, w.Bytes()) +} + +func TestDownloadNonChunk(t *testing.T) { + s, names := dlLoggingSvcNoChunk(buf2MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadNoContentRangeLength(t *testing.T) { + s, names := dlLoggingSvcNoContentRangeLength(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadContentRangeTotalAny(t *testing.T) { + s, names := dlLoggingSvcContentRangeTotalAny(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go new file mode 100644 index 000000000..b7d0a1256 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go @@ -0,0 +1,23 @@ +// Package s3manageriface provides an interface for the s3manager package +package s3manageriface + +import ( + "io" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +// DownloaderAPI is the interface type for s3manager.Downloader. +type DownloaderAPI interface { + Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) +} + +var _ DownloaderAPI = (*s3manager.Downloader)(nil) + +// UploaderAPI is the interface type for s3manager.Uploader. +type UploaderAPI interface { + Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) +} + +var _ UploaderAPI = (*s3manager.Uploader)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go new file mode 100644 index 000000000..b5b613143 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go @@ -0,0 +1,4 @@ +package s3manager_test + +var buf12MB = make([]byte, 1024*1024*12) +var buf2MB = make([]byte, 1024*1024*2) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go new file mode 100644 index 000000000..44ee75a17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -0,0 +1,661 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadInput contains all input for upload requests to Amazon S3. +type UploadInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + // The readable body payload to send to S3. + Body io.Reader +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultConcurrency value will be used. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. + // With a limited of s3.MaxUploadParts (10,000 parts). + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.New() +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: s3.New(c), + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(session.New()) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: svc, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Dont delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, ctx: u} + + for _, option := range options { + option(&i.ctx) + } + + return i.upload() +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + u.init() + + if u.ctx.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + buf, err := u.nextReader() + if err == io.EOF || err == io.ErrUnexpectedEOF { // single part + return u.singlePart(buf) + } else if err != nil { + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(buf) +} + +// init will initialize all default options. +func (u *uploader) init() { + if u.ctx.Concurrency == 0 { + u.ctx.Concurrency = DefaultUploadConcurrency + } + if u.ctx.PartSize == 0 { + u.ctx.PartSize = DefaultUploadPartSize + } + + // Try to get the total size for some optimizations + u.initSize() +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + pos, _ := r.Seek(0, 1) + defer r.Seek(pos, 0) + + n, err := r.Seek(0, 2) + if err != nil { + return + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1 + } + } +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, error) { + switch r := u.in.Body.(type) { + case io.ReaderAt: + var err error + + n := u.ctx.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft == 0 { + err = io.EOF + n = bytesLeft + } else if bytesLeft <= u.ctx.PartSize { + err = io.ErrUnexpectedEOF + n = bytesLeft + } + } + + buf := io.NewSectionReader(r, u.readerPos, n) + u.readerPos += n + + return buf, err + + default: + packet := make([]byte, u.ctx.PartSize) + n, err := io.ReadFull(u.in.Body, packet) + u.readerPos += int64(n) + + return bytes.NewReader(packet[0:n]), err + } +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = buf + + req, out := u.ctx.S3.PutObjectRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + req, resp := u.ctx.S3.CreateMultipartUploadRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.ctx.Concurrency) + for i := 0; i < u.ctx.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num} + + // Read and queue the rest of the parts + for u.geterr() == nil { + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.ctx.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.ctx.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + num++ + + buf, err := u.nextReader() + if err == io.EOF { + break + } + + ch <- chunk{buf: buf, num: num} + + if err != nil && err != io.ErrUnexpectedEOF { + u.seterr(awserr.New( + "ReadRequestBody", + "read multipart upload data failed", + err)) + break + } + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + return &UploadOutput{ + Location: *complete.Location, + VersionID: complete.VersionId, + UploadID: u.uploadID, + }, nil +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + PartNumber: &c.num, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.ctx.LeavePartsOnError { + return + } + + req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + req.Send() +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + u.seterr(err) + u.fail() + } + + return resp +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go new file mode 100644 index 000000000..189ecb657 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go @@ -0,0 +1,482 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "sync" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/stretchr/testify/assert" +) + +var emptyList = []string{} + +func val(i interface{}, s string) interface{} { + v, err := awsutil.ValuesAtPath(i, s) + if err != nil || len(v) == 0 { + return nil + } + if _, ok := v[0].(io.Reader); ok { + return v[0] + } + + if rv := reflect.ValueOf(v[0]); rv.Kind() == reflect.Ptr { + return rv.Elem().Interface() + } + + return v[0] +} + +func contains(src []string, s string) bool { + for _, v := range src { + if s == v { + return true + } + } + return false +} + +func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) { + var m sync.Mutex + partNum := 0 + names := []string{} + params := []interface{}{} + svc := s3.New(unit.Session) + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + if !contains(ignoreOps, r.Operation.Name) { + names = append(names, r.Operation.Name) + params = append(params, r.Params) + } + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + + switch data := r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + data.UploadId = aws.String("UPLOAD-ID") + case *s3.UploadPartOutput: + partNum++ + data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum)) + case *s3.CompleteMultipartUploadOutput: + data.Location = aws.String("https://location") + data.VersionId = aws.String("VERSION-ID") + case *s3.PutObjectOutput: + data.VersionId = aws.String("VERSION-ID") + } + }) + + return svc, &names, ¶ms +} + +func buflen(i interface{}) int { + r := i.(io.Reader) + b, _ := ioutil.ReadAll(r) + return len(b) +} + +func TestUploadOrderMulti(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + u := s3manager.NewUploaderWithClient(s) + + resp, err := u.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + ServerSideEncryption: aws.String("AES256"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + assert.Equal(t, "https://location", resp.Location) + assert.Equal(t, "UPLOAD-ID", resp.UploadID) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + + // Validate input values + + // UploadPart + assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadId")) + + // CompleteMultipartUpload + assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadId")) + assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber")) + assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber")) + assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag")) + + // Custom headers + assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderMultiDifferentPartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.PartSize = 1024 * 1024 * 7 + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body"))) + assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body"))) +} + +func TestUploadIncreasePartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, int64(s3manager.DefaultDownloadPartSize), mgr.PartSize) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, (1024*1024*6)+1, buflen(val((*args)[1], "Body"))) + assert.Equal(t, (1024*1024*6)-1, buflen(val((*args)[2], "Body"))) +} + +func TestUploadFailIfPartSizeTooSmall(t *testing.T) { + mgr := s3manager.NewUploader(unit.Session, func(u *s3manager.Uploader) { + u.PartSize = 5 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Nil(t, resp) + assert.NotNil(t, err) + + aerr := err.(awserr.Error) + assert.Equal(t, "ConfigError", aerr.Code()) + assert.Contains(t, aerr.Message(), "part size must be at least") +} + +func TestUploadOrderSingle(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + ServerSideEncryption: aws.String("AES256"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderSingleFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse.StatusCode = 400 + }) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.Nil(t, resp) +} + +func TestUploadOrderZero(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 0)), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, 0, buflen(val((*args)[0], "Body"))) +} + +func TestUploadOrderMultiFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch t := r.Data.(type) { + case *s3.UploadPartOutput: + if *t.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnComplete(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CompleteMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", + "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnCreate(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureLeaveParts(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch data := r.Data.(type) { + case *s3.UploadPartOutput: + if *data.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.LeavePartsOnError = true + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops) +} + +type failreader struct { + times int + failCount int +} + +func (f *failreader) Read(b []byte) (int, error) { + f.failCount++ + if f.failCount >= f.times { + return 0, fmt.Errorf("random failure") + } + return len(b), nil +} + +func TestUploadOrderReadFail1(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 1}, + }) + + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) + assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") + assert.Equal(t, []string{}, *ops) +} + +func TestUploadOrderReadFail2(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 2}, + }) + + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) + assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +type sizedReader struct { + size int + cur int +} + +func (s *sizedReader) Read(p []byte) (n int, err error) { + if s.cur >= s.size { + return 0, io.EOF + } + + n = len(p) + s.cur += len(p) + if s.cur > s.size { + n -= s.cur - s.size + } + + return +} + +func TestUploadOrderMultiBufferedReader(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + buflen(val((*args)[3], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.Error(t, err) + assert.Nil(t, resp) + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) + + aerr := err.(awserr.Error) + assert.Equal(t, "TotalPartsExceeded", aerr.Code()) + assert.Contains(t, aerr.Message(), "configured MaxUploadParts (2)") +} + +func TestUploadOrderSingleBufferedReader(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 2}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} + +func TestUploadZeroLenObject(t *testing.T) { + requestMade := false + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestMade = true + w.WriteHeader(http.StatusOK) + })) + mgr := s3manager.NewUploaderWithClient(s3.New(unit.Session, &aws.Config{ + Endpoint: aws.String(server.URL), + })) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: strings.NewReader(""), + }) + + assert.NoError(t, err) + assert.True(t, requestMade) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 000000000..e80d95429 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// S3 is a client for Amazon S3. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "s3" + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(restxml.Build) + svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 000000000..268ea2fb4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go new file mode 100644 index 000000000..5f1ca64bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go @@ -0,0 +1,79 @@ +package s3_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func TestSSECustomerKeyOverHTTPError(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") +} + +func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + CopySourceSSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") +} + +func TestComputeSSEKeys(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + CopySourceSSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.NoError(t, err) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) + assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) + assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) +} + +func TestComputeSSEKeysShortcircuit(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + CopySourceSSECustomerKey: aws.String("key"), + SSECustomerKeyMD5: aws.String("MD5"), + CopySourceSSECustomerKeyMD5: aws.String("MD5"), + }) + err := req.Build() + + assert.NoError(t, err) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) + assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) + assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 000000000..ce65fcdaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = aws.ReadSeekCloser(body) + defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go new file mode 100644 index 000000000..f508cd153 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go @@ -0,0 +1,130 @@ +package s3_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +const errMsg = `ErrorCodemessage bodyrequestIDhostID=` + +var lastModifiedTime = time.Date(2009, 11, 23, 0, 0, 0, 0, time.UTC) + +func TestCopyObjectNoError(t *testing.T) { + const successMsg = ` + +2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"` + + res, err := newCopyTestSvc(successMsg).CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/exists.txt"), + Key: aws.String("destination.txt"), + }) + + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyObjectResult.ETag) + assert.Equal(t, lastModifiedTime, *res.CopyObjectResult.LastModified) +} + +func TestCopyObjectError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func TestUploadPartCopySuccess(t *testing.T) { + const successMsg = ` + +2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"` + + res, err := newCopyTestSvc(successMsg).UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + PartNumber: aws.Int64(0), + UploadId: aws.String("uploadID"), + }) + + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyPartResult.ETag) + assert.Equal(t, lastModifiedTime, *res.CopyPartResult.LastModified) +} + +func TestUploadPartCopyError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + PartNumber: aws.Int64(0), + UploadId: aws.String("uploadID"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func TestCompleteMultipartUploadSuccess(t *testing.T) { + const successMsg = ` + +locationNamebucketNamekeyName"etagVal"` + res, err := newCopyTestSvc(successMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("key"), + UploadId: aws.String("uploadID"), + }) + + require.NoError(t, err) + + assert.Equal(t, `"etagVal"`, *res.ETag) + assert.Equal(t, "bucketName", *res.Bucket) + assert.Equal(t, "keyName", *res.Key) + assert.Equal(t, "locationName", *res.Location) +} + +func TestCompleteMultipartUploadError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("key"), + UploadId: aws.String("uploadID"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func newCopyTestSvc(errMsg string) *s3.S3 { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, errMsg, http.StatusOK) + })) + return s3.New(unit.Session, aws.NewConfig(). + WithEndpoint(server.URL). + WithDisableSSL(true). + WithMaxRetries(0). + WithS3ForcePathStyle(true)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 000000000..cd6004825 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,57 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + if r.HTTPResponse.ContentLength == 0 { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go new file mode 100644 index 000000000..9e091398a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go @@ -0,0 +1,165 @@ +package s3_test + +import ( + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +type testErrorCase struct { + RespFn func() *http.Response + ReqID string + Code, Msg string +} + +var testUnmarshalCases = []testErrorCase{ + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 301, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: -1, + } + }, + ReqID: "abc123", + Code: "BucketRegionError", Msg: "incorrect region, the bucket is not in 'mock-region' region", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 403, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "Forbidden", Msg: "Forbidden", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 400, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "BadRequest", Msg: "Bad Request", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 404, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "NotFound", Msg: "Not Found", + }, + { + RespFn: func() *http.Response { + body := `SomeExceptionException message` + return &http.Response{ + StatusCode: 500, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(body)), + ContentLength: int64(len(body)), + } + }, + ReqID: "abc123", + Code: "SomeException", Msg: "Exception message", + }, +} + +func TestUnmarshalError(t *testing.T) { + for _, c := range testUnmarshalCases { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = c.RespFn() + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.PutBucketAcl(&s3.PutBucketAclInput{ + Bucket: aws.String("bucket"), ACL: aws.String("public-read"), + }) + + assert.Error(t, err) + assert.Equal(t, c.Code, err.(awserr.Error).Code()) + assert.Equal(t, c.Msg, err.(awserr.Error).Message()) + assert.Equal(t, c.ReqID, err.(awserr.RequestFailure).RequestID()) + } +} + +const completeMultiResp = ` +163 + + +https://bucket.s3-us-west-2.amazonaws.com/keybucketkey"a7d414b9133d6483d9a1c4e04e856e3b-2" +0 +` + +func Test200NoErrorUnmarshalError(t *testing.T) { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(completeMultiResp)), + ContentLength: -1, + } + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucket"), Key: aws.String("key"), + UploadId: aws.String("id"), + MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{ + {ETag: aws.String("etag"), PartNumber: aws.Int64(1)}, + }}, + }) + + assert.NoError(t, err) +} + +const completeMultiErrResp = `SomeExceptionException message` + +func Test200WithErrorUnmarshalError(t *testing.T) { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(completeMultiErrResp)), + ContentLength: -1, + } + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucket"), Key: aws.String("key"), + UploadId: aws.String("id"), + MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{ + {ETag: aws.String("etag"), PartNumber: aws.Int64(1)}, + }}, + }) + + assert.Error(t, err) + + assert.Equal(t, "SomeException", err.(awserr.Error).Code()) + assert.Equal(t, "Exception message", err.(awserr.Error).Message()) + assert.Equal(t, "abc123", err.(awserr.RequestFailure).RequestID()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 000000000..4879fca8a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,117 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 403, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go new file mode 100644 index 000000000..8b2c05831 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -0,0 +1,2076 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sns provides a client for Amazon Simple Notification Service. +package sns + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a request for the AddPermission operation. +func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a statement to a topic's access control policy, granting access for +// the specified AWS accounts to the specified actions. +func (c *SNS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opConfirmSubscription = "ConfirmSubscription" + +// ConfirmSubscriptionRequest generates a request for the ConfirmSubscription operation. +func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req *request.Request, output *ConfirmSubscriptionOutput) { + op := &request.Operation{ + Name: opConfirmSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmSubscriptionOutput{} + req.Data = output + return +} + +// Verifies an endpoint owner's intent to receive messages by validating the +// token sent to the endpoint by an earlier Subscribe action. If the token is +// valid, the action creates a new subscription and returns its Amazon Resource +// Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe +// flag is set to "true". +func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubscriptionOutput, error) { + req, out := c.ConfirmSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformApplication = "CreatePlatformApplication" + +// CreatePlatformApplicationRequest generates a request for the CreatePlatformApplication operation. +func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationInput) (req *request.Request, output *CreatePlatformApplicationOutput) { + op := &request.Operation{ + Name: opCreatePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformApplicationOutput{} + req.Data = output + return +} + +// Creates a platform application object for one of the supported push notification +// services, such as APNS and GCM, to which devices and mobile apps may register. +// You must specify PlatformPrincipal and PlatformCredential attributes when +// using the CreatePlatformApplication action. The PlatformPrincipal is received +// from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is +// "SSL certificate". For GCM, PlatformPrincipal is not applicable. For ADM, +// PlatformPrincipal is "client id". The PlatformCredential is also received +// from the notification service. For APNS/APNS_SANDBOX, PlatformCredential +// is "private key". For GCM, PlatformCredential is "API key". For ADM, PlatformCredential +// is "client secret". The PlatformApplicationArn that is returned when using +// CreatePlatformApplication is then used as an attribute for the CreatePlatformEndpoint +// action. For more information, see Using Amazon SNS Mobile Push Notifications +// (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) (*CreatePlatformApplicationOutput, error) { + req, out := c.CreatePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformEndpoint = "CreatePlatformEndpoint" + +// CreatePlatformEndpointRequest generates a request for the CreatePlatformEndpoint operation. +func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) (req *request.Request, output *CreatePlatformEndpointOutput) { + op := &request.Operation{ + Name: opCreatePlatformEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformEndpointOutput{} + req.Data = output + return +} + +// Creates an endpoint for a device and mobile app on one of the supported push +// notification services, such as GCM and APNS. CreatePlatformEndpoint requires +// the PlatformApplicationArn that is returned from CreatePlatformApplication. +// The EndpointArn that is returned when using CreatePlatformEndpoint can then +// be used by the Publish action to send a message to a mobile app or by the +// Subscribe action for subscription to a topic. The CreatePlatformEndpoint +// action is idempotent, so if the requester already owns an endpoint with the +// same device token and attributes, that endpoint's ARN is returned without +// creating a new endpoint. For more information, see Using Amazon SNS Mobile +// Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// +// When using CreatePlatformEndpoint with Baidu, two attributes must be provided: +// ChannelId and UserId. The token field must also contain the ChannelId. For +// more information, see Creating an Amazon SNS Endpoint for Baidu (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePushBaiduEndpoint.html). +func (c *SNS) CreatePlatformEndpoint(input *CreatePlatformEndpointInput) (*CreatePlatformEndpointOutput, error) { + req, out := c.CreatePlatformEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateTopic = "CreateTopic" + +// CreateTopicRequest generates a request for the CreateTopic operation. +func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, output *CreateTopicOutput) { + op := &request.Operation{ + Name: opCreateTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTopicOutput{} + req.Data = output + return +} + +// Creates a topic to which notifications can be published. Users can create +// at most 3000 topics. For more information, see http://aws.amazon.com/sns +// (http://aws.amazon.com/sns/). This action is idempotent, so if the requester +// already owns a topic with the specified name, that topic's ARN is returned +// without creating a new topic. +func (c *SNS) CreateTopic(input *CreateTopicInput) (*CreateTopicOutput, error) { + req, out := c.CreateTopicRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEndpoint = "DeleteEndpoint" + +// DeleteEndpointRequest generates a request for the DeleteEndpoint operation. +func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { + op := &request.Operation{ + Name: opDeleteEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEndpointOutput{} + req.Data = output + return +} + +// Deletes the endpoint from Amazon SNS. This action is idempotent. For more +// information, see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlatformApplication = "DeletePlatformApplication" + +// DeletePlatformApplicationRequest generates a request for the DeletePlatformApplication operation. +func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationInput) (req *request.Request, output *DeletePlatformApplicationOutput) { + op := &request.Operation{ + Name: opDeletePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePlatformApplicationOutput{} + req.Data = output + return +} + +// Deletes a platform application object for one of the supported push notification +// services, such as APNS and GCM. For more information, see Using Amazon SNS +// Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) DeletePlatformApplication(input *DeletePlatformApplicationInput) (*DeletePlatformApplicationOutput, error) { + req, out := c.DeletePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTopic = "DeleteTopic" + +// DeleteTopicRequest generates a request for the DeleteTopic operation. +func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, output *DeleteTopicOutput) { + op := &request.Operation{ + Name: opDeleteTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTopicOutput{} + req.Data = output + return +} + +// Deletes a topic and all its subscriptions. Deleting a topic might prevent +// some messages previously sent to the topic from being delivered to subscribers. +// This action is idempotent, so deleting a topic that does not exist does not +// result in an error. +func (c *SNS) DeleteTopic(input *DeleteTopicInput) (*DeleteTopicOutput, error) { + req, out := c.DeleteTopicRequest(input) + err := req.Send() + return out, err +} + +const opGetEndpointAttributes = "GetEndpointAttributes" + +// GetEndpointAttributesRequest generates a request for the GetEndpointAttributes operation. +func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (req *request.Request, output *GetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opGetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetEndpointAttributesOutput{} + req.Data = output + return +} + +// Retrieves the endpoint attributes for a device on one of the supported push +// notification services, such as GCM and APNS. For more information, see Using +// Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetEndpointAttributes(input *GetEndpointAttributesInput) (*GetEndpointAttributesOutput, error) { + req, out := c.GetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetPlatformApplicationAttributes = "GetPlatformApplicationAttributes" + +// GetPlatformApplicationAttributesRequest generates a request for the GetPlatformApplicationAttributes operation. +func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicationAttributesInput) (req *request.Request, output *GetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opGetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Retrieves the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetPlatformApplicationAttributes(input *GetPlatformApplicationAttributesInput) (*GetPlatformApplicationAttributesOutput, error) { + req, out := c.GetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSubscriptionAttributes = "GetSubscriptionAttributes" + +// GetSubscriptionAttributesRequest generates a request for the GetSubscriptionAttributes operation. +func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesInput) (req *request.Request, output *GetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opGetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a subscription. +func (c *SNS) GetSubscriptionAttributes(input *GetSubscriptionAttributesInput) (*GetSubscriptionAttributesOutput, error) { + req, out := c.GetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetTopicAttributes = "GetTopicAttributes" + +// GetTopicAttributesRequest generates a request for the GetTopicAttributes operation. +func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *request.Request, output *GetTopicAttributesOutput) { + op := &request.Operation{ + Name: opGetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTopicAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a topic. Topic properties returned might +// differ based on the authorization of the user. +func (c *SNS) GetTopicAttributes(input *GetTopicAttributesInput) (*GetTopicAttributesOutput, error) { + req, out := c.GetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opListEndpointsByPlatformApplication = "ListEndpointsByPlatformApplication" + +// ListEndpointsByPlatformApplicationRequest generates a request for the ListEndpointsByPlatformApplication operation. +func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPlatformApplicationInput) (req *request.Request, output *ListEndpointsByPlatformApplicationOutput) { + op := &request.Operation{ + Name: opListEndpointsByPlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEndpointsByPlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEndpointsByPlatformApplicationOutput{} + req.Data = output + return +} + +// Lists the endpoints and endpoint attributes for devices in a supported push +// notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication +// are paginated and return a limited list of endpoints, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListEndpointsByPlatformApplication +// again using the NextToken string received from the previous call. When there +// are no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListEndpointsByPlatformApplication(input *ListEndpointsByPlatformApplicationInput) (*ListEndpointsByPlatformApplicationOutput, error) { + req, out := c.ListEndpointsByPlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlatformApplicationInput, fn func(p *ListEndpointsByPlatformApplicationOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEndpointsByPlatformApplicationRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEndpointsByPlatformApplicationOutput), lastPage) + }) +} + +const opListPlatformApplications = "ListPlatformApplications" + +// ListPlatformApplicationsRequest generates a request for the ListPlatformApplications operation. +func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInput) (req *request.Request, output *ListPlatformApplicationsOutput) { + op := &request.Operation{ + Name: opListPlatformApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPlatformApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPlatformApplicationsOutput{} + req.Data = output + return +} + +// Lists the platform application objects for the supported push notification +// services, such as APNS and GCM. The results for ListPlatformApplications +// are paginated and return a limited list of applications, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListPlatformApplications +// using the NextToken string received from the previous call. When there are +// no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListPlatformApplications(input *ListPlatformApplicationsInput) (*ListPlatformApplicationsOutput, error) { + req, out := c.ListPlatformApplicationsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput, fn func(p *ListPlatformApplicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPlatformApplicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPlatformApplicationsOutput), lastPage) + }) +} + +const opListSubscriptions = "ListSubscriptions" + +// ListSubscriptionsRequest generates a request for the ListSubscriptions operation. +func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *request.Request, output *ListSubscriptionsOutput) { + op := &request.Operation{ + Name: opListSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's subscriptions. Each call returns a limited +// list of subscriptions, up to 100. If there are more subscriptions, a NextToken +// is also returned. Use the NextToken parameter in a new ListSubscriptions +// call to get further results. +func (c *SNS) ListSubscriptions(input *ListSubscriptionsInput) (*ListSubscriptionsOutput, error) { + req, out := c.ListSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(p *ListSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsOutput), lastPage) + }) +} + +const opListSubscriptionsByTopic = "ListSubscriptionsByTopic" + +// ListSubscriptionsByTopicRequest generates a request for the ListSubscriptionsByTopic operation. +func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInput) (req *request.Request, output *ListSubscriptionsByTopicOutput) { + op := &request.Operation{ + Name: opListSubscriptionsByTopic, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsByTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsByTopicOutput{} + req.Data = output + return +} + +// Returns a list of the subscriptions to a specific topic. Each call returns +// a limited list of subscriptions, up to 100. If there are more subscriptions, +// a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic +// call to get further results. +func (c *SNS) ListSubscriptionsByTopic(input *ListSubscriptionsByTopicInput) (*ListSubscriptionsByTopicOutput, error) { + req, out := c.ListSubscriptionsByTopicRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput, fn func(p *ListSubscriptionsByTopicOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsByTopicRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsByTopicOutput), lastPage) + }) +} + +const opListTopics = "ListTopics" + +// ListTopicsRequest generates a request for the ListTopics operation. +func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, output *ListTopicsOutput) { + op := &request.Operation{ + Name: opListTopics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTopicsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTopicsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's topics. Each call returns a limited list +// of topics, up to 100. If there are more topics, a NextToken is also returned. +// Use the NextToken parameter in a new ListTopics call to get further results. +func (c *SNS) ListTopics(input *ListTopicsInput) (*ListTopicsOutput, error) { + req, out := c.ListTopicsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(p *ListTopicsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTopicsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTopicsOutput), lastPage) + }) +} + +const opPublish = "Publish" + +// PublishRequest generates a request for the Publish operation. +func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { + op := &request.Operation{ + Name: opPublish, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PublishInput{} + } + + req = c.newRequest(op, input, output) + output = &PublishOutput{} + req.Data = output + return +} + +// Sends a message to all of a topic's subscribed endpoints. When a messageId +// is returned, the message has been saved and Amazon SNS will attempt to deliver +// it to the topic's subscribers shortly. The format of the outgoing message +// to each subscribed endpoint depends on the notification protocol selected. +// +// To use the Publish action for sending a message to a mobile endpoint, such +// as an app on a Kindle device or mobile phone, you must specify the EndpointArn. +// The EndpointArn is returned when making a call with the CreatePlatformEndpoint +// action. The second example below shows a request and response for publishing +// to a mobile endpoint. +func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { + req, out := c.PublishRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a request for the RemovePermission operation. +func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// Removes a statement from a topic's access control policy. +func (c *SNS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opSetEndpointAttributes = "SetEndpointAttributes" + +// SetEndpointAttributesRequest generates a request for the SetEndpointAttributes operation. +func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (req *request.Request, output *SetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opSetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &SetEndpointAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes for an endpoint for a device on one of the supported +// push notification services, such as GCM and APNS. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) SetEndpointAttributes(input *SetEndpointAttributesInput) (*SetEndpointAttributesOutput, error) { + req, out := c.SetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetPlatformApplicationAttributes = "SetPlatformApplicationAttributes" + +// SetPlatformApplicationAttributesRequest generates a request for the SetPlatformApplicationAttributes operation. +func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicationAttributesInput) (req *request.Request, output *SetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opSetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &SetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) SetPlatformApplicationAttributes(input *SetPlatformApplicationAttributesInput) (*SetPlatformApplicationAttributesOutput, error) { + req, out := c.SetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetSubscriptionAttributes = "SetSubscriptionAttributes" + +// SetSubscriptionAttributesRequest generates a request for the SetSubscriptionAttributes operation. +func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesInput) (req *request.Request, output *SetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opSetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &SetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Allows a subscription owner to set an attribute of the topic to a new value. +func (c *SNS) SetSubscriptionAttributes(input *SetSubscriptionAttributesInput) (*SetSubscriptionAttributesOutput, error) { + req, out := c.SetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetTopicAttributes = "SetTopicAttributes" + +// SetTopicAttributesRequest generates a request for the SetTopicAttributes operation. +func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *request.Request, output *SetTopicAttributesOutput) { + op := &request.Operation{ + Name: opSetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &SetTopicAttributesOutput{} + req.Data = output + return +} + +// Allows a topic owner to set an attribute of the topic to a new value. +func (c *SNS) SetTopicAttributes(input *SetTopicAttributesInput) (*SetTopicAttributesOutput, error) { + req, out := c.SetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSubscribe = "Subscribe" + +// SubscribeRequest generates a request for the Subscribe operation. +func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, output *SubscribeOutput) { + op := &request.Operation{ + Name: opSubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubscribeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubscribeOutput{} + req.Data = output + return +} + +// Prepares to subscribe an endpoint by sending the endpoint a confirmation +// message. To actually create a subscription, the endpoint owner must call +// the ConfirmSubscription action with the token from the confirmation message. +// Confirmation tokens are valid for three days. +func (c *SNS) Subscribe(input *SubscribeInput) (*SubscribeOutput, error) { + req, out := c.SubscribeRequest(input) + err := req.Send() + return out, err +} + +const opUnsubscribe = "Unsubscribe" + +// UnsubscribeRequest generates a request for the Unsubscribe operation. +func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, output *UnsubscribeOutput) { + op := &request.Operation{ + Name: opUnsubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnsubscribeInput{} + } + + req = c.newRequest(op, input, output) + output = &UnsubscribeOutput{} + req.Data = output + return +} + +// Deletes a subscription. If the subscription requires authentication for deletion, +// only the owner of the subscription or the topic's owner can unsubscribe, +// and an AWS signature is required. If the Unsubscribe call does not require +// authentication and the requester is not the subscription owner, a final cancellation +// message is delivered to the endpoint, so that the endpoint owner can easily +// resubscribe to the topic if the Unsubscribe request was unintended. +func (c *SNS) Unsubscribe(input *UnsubscribeInput) (*UnsubscribeOutput, error) { + req, out := c.UnsubscribeRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account IDs of the users (principals) who will be given access to + // the specified actions. The users must have AWS accounts, but do not need + // to be signed up for this service. + AWSAccountId []*string `type:"list" required:"true"` + + // The action you want to allow for the specified principal(s). + // + // Valid values: any Amazon SNS action name. + ActionName []*string `type:"list" required:"true"` + + // A unique identifier for the new policy statement. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// Input for ConfirmSubscription action. +type ConfirmSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Disallows unauthenticated unsubscribes of the subscription. If the value + // of this parameter is true and the request has an AWS signature, then only + // the topic owner and the subscription owner can unsubscribe the endpoint. + // The unsubscribe action requires AWS authentication. + AuthenticateOnUnsubscribe *string `type:"string"` + + // Short-lived token sent to an endpoint during the Subscribe action. + Token *string `type:"string" required:"true"` + + // The ARN of the topic for which you wish to confirm a subscription. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionInput) GoString() string { + return s.String() +} + +// Response for ConfirmSubscriptions action. +type ConfirmSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the created subscription. + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s ConfirmSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformApplication action. +type CreatePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetPlatformApplicationAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html) + Attributes map[string]*string `type:"map" required:"true"` + + // Application names must be made up of only uppercase and lowercase ASCII letters, + // numbers, underscores, hyphens, and periods, and must be between 1 and 256 + // characters long. + Name *string `type:"string" required:"true"` + + // The following platforms are supported: ADM (Amazon Device Messaging), APNS + // (Apple Push Notification Service), APNS_SANDBOX, and GCM (Google Cloud Messaging). + Platform *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationInput) GoString() string { + return s.String() +} + +// Response from CreatePlatformApplication action. +type CreatePlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn is returned. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformEndpoint action. +type CreatePlatformEndpointInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetEndpointAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetEndpointAttributes.html). + Attributes map[string]*string `type:"map"` + + // Arbitrary user data to associate with the endpoint. Amazon SNS does not use + // this data. The data must be in UTF-8 format and less than 2KB. + CustomUserData *string `type:"string"` + + // PlatformApplicationArn returned from CreatePlatformApplication is used to + // create a an endpoint. + PlatformApplicationArn *string `type:"string" required:"true"` + + // Unique identifier created by the notification service for an app on a device. + // The specific name for Token will vary, depending on which notification service + // is being used. For example, when using APNS as the notification service, + // you need the device token. Alternatively, when using GCM or ADM, the device + // token equivalent is called the registration ID. + Token *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointInput) GoString() string { + return s.String() +} + +// Response from CreateEndpoint action. +type CreatePlatformEndpointOutput struct { + _ struct{} `type:"structure"` + + // EndpointArn returned from CreateEndpoint action. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointOutput) GoString() string { + return s.String() +} + +// Input for CreateTopic action. +type CreateTopicInput struct { + _ struct{} `type:"structure"` + + // The name of the topic you want to create. + // + // Constraints: Topic names must be made up of only uppercase and lowercase + // ASCII letters, numbers, underscores, and hyphens, and must be between 1 and + // 256 characters long. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicInput) GoString() string { + return s.String() +} + +// Response from CreateTopic action. +type CreateTopicOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) assigned to the created topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicOutput) GoString() string { + return s.String() +} + +// Input for DeleteEndpoint action. +type DeleteEndpointInput struct { + _ struct{} `type:"structure"` + + // EndpointArn of endpoint to delete. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointInput) GoString() string { + return s.String() +} + +type DeleteEndpointOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointOutput) GoString() string { + return s.String() +} + +// Input for DeletePlatformApplication action. +type DeletePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn of platform application object to delete. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationInput) GoString() string { + return s.String() +} + +type DeletePlatformApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationOutput) GoString() string { + return s.String() +} + +type DeleteTopicInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic you want to delete. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicInput) GoString() string { + return s.String() +} + +type DeleteTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicOutput) GoString() string { + return s.String() +} + +// Endpoint for mobile app and device. +type Endpoint struct { + _ struct{} `type:"structure"` + + // Attributes for endpoint. + Attributes map[string]*string `type:"map"` + + // EndpointArn for mobile app and device. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Input for GetEndpointAttributes action. +type GetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // EndpointArn for GetEndpointAttributes input. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesInput) GoString() string { + return s.String() +} + +// Response from GetEndpointAttributes of the EndpointArn. +type GetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. + // Amazon SNS does not use this data. The data must be in UTF-8 format and less + // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. + // Amazon SNS will set this to false when a notification service indicates to + // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically + // after updating Token. Token -- device token, also referred to as a registration + // id, for an app and mobile device. This is returned from the notification + // service when an app and mobile device are registered with the notification + // service. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn for GetPlatformApplicationAttributesInput. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +// Response for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications + // should be sent. EventEndpointDeleted -- Topic ARN to which EndpointDeleted + // event notifications should be sent. EventEndpointUpdated -- Topic ARN to + // which EndpointUpdate event notifications should be sent. EventDeliveryFailure + // -- Topic ARN to which DeliveryFailure event notifications should be sent + // upon Direct Publish delivery failure (permanent) to one of the application's + // endpoints. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetSubscriptionAttributes. +type GetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription whose properties you want to get. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +// Response for GetSubscriptionAttributes action. +type GetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the subscription's attributes. Attributes in this map include the + // following: + // + // SubscriptionArn -- the subscription's ARN TopicArn -- the topic ARN that + // the subscription is associated with Owner -- the AWS account ID of the subscription's + // owner ConfirmationWasAuthenticated -- true if the subscription confirmation + // request was authenticated DeliveryPolicy -- the JSON serialization of the + // subscription's delivery policy EffectiveDeliveryPolicy -- the JSON serialization + // of the effective delivery policy that takes into account the topic delivery + // policy and account system defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetTopicAttributes action. +type GetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic whose properties you want to get. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesInput) GoString() string { + return s.String() +} + +// Response for GetTopicAttributes action. +type GetTopicAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the topic's attributes. Attributes in this map include the following: + // + // TopicArn -- the topic's ARN Owner -- the AWS account ID of the topic's + // owner Policy -- the JSON serialization of the topic's access control policy + // DisplayName -- the human-readable name used in the "From" field for notifications + // to email and email-json endpoints SubscriptionsPending -- the number of + // subscriptions pending confirmation on this topic SubscriptionsConfirmed + // -- the number of confirmed subscriptions on this topic SubscriptionsDeleted + // -- the number of deleted subscriptions on this topic DeliveryPolicy -- the + // JSON serialization of the topic's delivery policy EffectiveDeliveryPolicy + // -- the JSON serialization of the effective delivery policy that takes into + // account system defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListEndpointsByPlatformApplication + // action to retrieve additional records that are available after the first + // page results. + NextToken *string `type:"string"` + + // PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationInput) GoString() string { + return s.String() +} + +// Response for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // Endpoints returned for ListEndpointsByPlatformApplication action. + Endpoints []*Endpoint `type:"list"` + + // NextToken string is returned when calling ListEndpointsByPlatformApplication + // action if additional records are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) GoString() string { + return s.String() +} + +// Input for ListPlatformApplications action. +type ListPlatformApplicationsInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListPlatformApplications action to + // retrieve additional records that are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListPlatformApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsInput) GoString() string { + return s.String() +} + +// Response for ListPlatformApplications action. +type ListPlatformApplicationsOutput struct { + _ struct{} `type:"structure"` + + // NextToken string is returned when calling ListPlatformApplications action + // if additional records are available after the first page results. + NextToken *string `type:"string"` + + // Platform applications returned when calling ListPlatformApplications action. + PlatformApplications []*PlatformApplication `type:"list"` +} + +// String returns the string representation +func (s ListPlatformApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptionsByTopic request. + NextToken *string `type:"string"` + + // The ARN of the topic for which you wish to find subscriptions. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicInput) GoString() string { + return s.String() +} + +// Response for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptionsByTopic request. This element + // is returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptions action. +type ListSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptions request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsInput) GoString() string { + return s.String() +} + +// Response for ListSubscriptions action +type ListSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptions request. This element is + // returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsOutput) GoString() string { + return s.String() +} + +type ListTopicsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListTopics request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListTopicsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsInput) GoString() string { + return s.String() +} + +// Response for ListTopics action. +type ListTopicsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListTopics request. This element is returned + // if there are additional topics to retrieve. + NextToken *string `type:"string"` + + // A list of topic ARNs. + Topics []*Topic `type:"list"` +} + +// String returns the string representation +func (s ListTopicsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsOutput) GoString() string { + return s.String() +} + +// The user-specified message attribute value. For string data types, the value +// attribute has the same restrictions on the content as the message body. For +// more information, see Publish (http://docs.aws.amazon.com/sns/latest/api/API_Publish.html). +// +// Name, type, and value must not be empty or null. In addition, the message +// body should not be empty or null. All parts of the message attribute, including +// name, type, and value, are included in the message size restriction, which +// is currently 256 KB (262,144 bytes). For more information, see Using Amazon +// SNS Message Attributes (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html). +type MessageAttributeValue struct { + _ struct{} `type:"structure"` + + // Binary type attributes can store any binary data, for example, compressed + // data, encrypted data, or images. + BinaryValue []byte `type:"blob"` + + // Amazon SNS supports the following logical data types: String, Number, and + // Binary. For more information, see Message Attribute Data Types (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html#SNSMessageAttributes.DataTypes). + DataType *string `type:"string" required:"true"` + + // Strings are Unicode with UTF8 binary encoding. For a list of code values, + // see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageAttributeValue) GoString() string { + return s.String() +} + +// Platform application object. +type PlatformApplication struct { + _ struct{} `type:"structure"` + + // Attributes for platform application object. + Attributes map[string]*string `type:"map"` + + // PlatformApplicationArn for platform application object. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s PlatformApplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformApplication) GoString() string { + return s.String() +} + +// Input for Publish action. +type PublishInput struct { + _ struct{} `type:"structure"` + + // The message you want to send to the topic. + // + // If you want to send the same message to all transport protocols, include + // the text of the message as a String value. + // + // If you want to send different messages for each transport protocol, set + // the value of the MessageStructure parameter to json and use a JSON object + // for the Message parameter. See the Examples section for the format of the + // JSON object. + // + // Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size + // (262144 bytes, not 262144 characters). + // + // JSON-specific constraints: Keys in the JSON object that correspond to supported + // transport protocols must have simple JSON string values. The values will + // be parsed (unescaped) before they are used in outgoing messages. Outbound + // notifications are JSON encoded (meaning that the characters will be reescaped + // for sending). Values have a minimum length of 0 (the empty string, "", is + // allowed). Values have a maximum length bounded by the overall message size + // (so, including multiple protocols may limit message sizes). Non-string values + // will cause the key to be ignored. Keys that do not correspond to supported + // transport protocols are ignored. Duplicate keys are not allowed. Failure + // to parse or validate any key or value in the message will cause the Publish + // call to return an error (no partial delivery). + Message *string `type:"string" required:"true"` + + // Message attributes for Publish action. + MessageAttributes map[string]*MessageAttributeValue `locationNameKey:"Name" locationNameValue:"Value" type:"map"` + + // Set MessageStructure to json if you want to send a different message for + // each protocol. For example, using one publish action, you can send a short + // message to your SMS subscribers and a longer message to your email subscribers. + // If you set MessageStructure to json, the value of the Message parameter must: + // + // be a syntactically valid JSON object; and contain at least a top-level + // JSON key of "default" with a value that is a string. You can define other + // top-level keys that define the message you want to send to a specific transport + // protocol (e.g., "http"). + // + // For information about sending different messages for each protocol using + // the AWS Management Console, go to Create Different Messages for Each Protocol + // (http://docs.aws.amazon.com/sns/latest/gsg/Publish.html#sns-message-formatting-by-protocol) + // in the Amazon Simple Notification Service Getting Started Guide. + // + // Valid value: json + MessageStructure *string `type:"string"` + + // Optional parameter to be used as the "Subject" line when the message is delivered + // to email endpoints. This field will also be included, if present, in the + // standard JSON messages delivered to other endpoints. + // + // Constraints: Subjects must be ASCII text that begins with a letter, number, + // or punctuation mark; must not include line breaks or control characters; + // and must be less than 100 characters long. + Subject *string `type:"string"` + + // Either TopicArn or EndpointArn, but not both. + TargetArn *string `type:"string"` + + // The topic you want to publish to. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s PublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishInput) GoString() string { + return s.String() +} + +// Response for Publish action. +type PublishOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier assigned to the published message. + // + // Length Constraint: Maximum 100 characters + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s PublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishOutput) GoString() string { + return s.String() +} + +// Input for RemovePermission action. +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // The unique label of the statement you want to remove. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +// Input for SetEndpointAttributes action. +type SetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the endpoint attributes. Attributes in this map include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. + // Amazon SNS does not use this data. The data must be in UTF-8 format and less + // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. + // Amazon SNS will set this to false when a notification service indicates to + // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically + // after updating Token. Token -- device token, also referred to as a registration + // id, for an app and mobile device. This is returned from the notification + // service when an app and mobile device are registered with the notification + // service. + Attributes map[string]*string `type:"map" required:"true"` + + // EndpointArn used for SetEndpointAttributes action. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesInput) GoString() string { + return s.String() +} + +type SetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetPlatformApplicationAttributes action. +type SetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the platform application attributes. Attributes in this map include + // the following: + // + // PlatformCredential -- The credential received from the notification service. + // For APNS/APNS_SANDBOX, PlatformCredential is "private key". For GCM, PlatformCredential + // is "API key". For ADM, PlatformCredential is "client secret". PlatformPrincipal + // -- The principal received from the notification service. For APNS/APNS_SANDBOX, + // PlatformPrincipal is "SSL certificate". For GCM, PlatformPrincipal is not + // applicable. For ADM, PlatformPrincipal is "client id". EventEndpointCreated + // -- Topic ARN to which EndpointCreated event notifications should be sent. + // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications + // should be sent. EventEndpointUpdated -- Topic ARN to which EndpointUpdate + // event notifications should be sent. EventDeliveryFailure -- Topic ARN to + // which DeliveryFailure event notifications should be sent upon Direct Publish + // delivery failure (permanent) to one of the application's endpoints. + Attributes map[string]*string `type:"map" required:"true"` + + // PlatformApplicationArn for SetPlatformApplicationAttributes action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +type SetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetSubscriptionAttributes action. +type SetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the subscriptions + // attributes are mutable. + // + // Valid values: DeliveryPolicy | RawMessageDelivery + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute in JSON format. + AttributeValue *string `type:"string"` + + // The ARN of the subscription to modify. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +type SetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetTopicAttributes action. +type SetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the topic's attributes + // are mutable. + // + // Valid values: Policy | DisplayName | DeliveryPolicy + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute. + AttributeValue *string `type:"string"` + + // The ARN of the topic to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesInput) GoString() string { + return s.String() +} + +type SetTopicAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for Subscribe action. +type SubscribeInput struct { + _ struct{} `type:"structure"` + + // The endpoint that you want to receive notifications. Endpoints vary by protocol: + // + // For the http protocol, the endpoint is an URL beginning with "http://" + // For the https protocol, the endpoint is a URL beginning with "https://" For + // the email protocol, the endpoint is an email address For the email-json protocol, + // the endpoint is an email address For the sms protocol, the endpoint is a + // phone number of an SMS-enabled device For the sqs protocol, the endpoint + // is the ARN of an Amazon SQS queue For the application protocol, the endpoint + // is the EndpointArn of a mobile app and device. + Endpoint *string `type:"string"` + + // The protocol you want to use. Supported protocols include: + // + // http -- delivery of JSON-encoded message via HTTP POST https -- delivery + // of JSON-encoded message via HTTPS POST email -- delivery of message via + // SMTP email-json -- delivery of JSON-encoded message via SMTP sms -- delivery + // of message via SMS sqs -- delivery of JSON-encoded message to an Amazon + // SQS queue application -- delivery of JSON-encoded message to an EndpointArn + // for a mobile app and device. + Protocol *string `type:"string" required:"true"` + + // The ARN of the topic you want to subscribe to. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeInput) GoString() string { + return s.String() +} + +// Response for Subscribe action. +type SubscribeOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription, if the service was able to create a subscription + // immediately (without requiring endpoint owner confirmation). + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s SubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeOutput) GoString() string { + return s.String() +} + +// A wrapper type for the attributes of an Amazon SNS subscription. +type Subscription struct { + _ struct{} `type:"structure"` + + // The subscription's endpoint (format depends on the protocol). + Endpoint *string `type:"string"` + + // The subscription's owner. + Owner *string `type:"string"` + + // The subscription's protocol. + Protocol *string `type:"string"` + + // The subscription's ARN. + SubscriptionArn *string `type:"string"` + + // The ARN of the subscription's topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Subscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subscription) GoString() string { + return s.String() +} + +// A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a +// topic's attributes, use GetTopicAttributes. +type Topic struct { + _ struct{} `type:"structure"` + + // The topic's ARN. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Topic) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Topic) GoString() string { + return s.String() +} + +// Input for Unsubscribe action. +type UnsubscribeInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription to be deleted. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeInput) GoString() string { + return s.String() +} + +type UnsubscribeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnsubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go new file mode 100644 index 000000000..4578ed6e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go @@ -0,0 +1,542 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sns_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sns" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSNS_AddPermission() { + svc := sns.New(session.New()) + + params := &sns.AddPermissionInput{ + AWSAccountId: []*string{ // Required + aws.String("delegate"), // Required + // More values... + }, + ActionName: []*string{ // Required + aws.String("action"), // Required + // More values... + }, + Label: aws.String("label"), // Required + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ConfirmSubscription() { + svc := sns.New(session.New()) + + params := &sns.ConfirmSubscriptionInput{ + Token: aws.String("token"), // Required + TopicArn: aws.String("topicARN"), // Required + AuthenticateOnUnsubscribe: aws.String("authenticateOnUnsubscribe"), + } + resp, err := svc.ConfirmSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreatePlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.CreatePlatformApplicationInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + Name: aws.String("String"), // Required + Platform: aws.String("String"), // Required + } + resp, err := svc.CreatePlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreatePlatformEndpoint() { + svc := sns.New(session.New()) + + params := &sns.CreatePlatformEndpointInput{ + PlatformApplicationArn: aws.String("String"), // Required + Token: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + CustomUserData: aws.String("String"), + } + resp, err := svc.CreatePlatformEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreateTopic() { + svc := sns.New(session.New()) + + params := &sns.CreateTopicInput{ + Name: aws.String("topicName"), // Required + } + resp, err := svc.CreateTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeleteEndpoint() { + svc := sns.New(session.New()) + + params := &sns.DeleteEndpointInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.DeleteEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeletePlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.DeletePlatformApplicationInput{ + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.DeletePlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeleteTopic() { + svc := sns.New(session.New()) + + params := &sns.DeleteTopicInput{ + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.DeleteTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetEndpointAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetEndpointAttributesInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.GetEndpointAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetPlatformApplicationAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetPlatformApplicationAttributesInput{ + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.GetPlatformApplicationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetSubscriptionAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetSubscriptionAttributesInput{ + SubscriptionArn: aws.String("subscriptionARN"), // Required + } + resp, err := svc.GetSubscriptionAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetTopicAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetTopicAttributesInput{ + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.GetTopicAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListEndpointsByPlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.ListEndpointsByPlatformApplicationInput{ + PlatformApplicationArn: aws.String("String"), // Required + NextToken: aws.String("String"), + } + resp, err := svc.ListEndpointsByPlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListPlatformApplications() { + svc := sns.New(session.New()) + + params := &sns.ListPlatformApplicationsInput{ + NextToken: aws.String("String"), + } + resp, err := svc.ListPlatformApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListSubscriptions() { + svc := sns.New(session.New()) + + params := &sns.ListSubscriptionsInput{ + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListSubscriptionsByTopic() { + svc := sns.New(session.New()) + + params := &sns.ListSubscriptionsByTopicInput{ + TopicArn: aws.String("topicARN"), // Required + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListSubscriptionsByTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListTopics() { + svc := sns.New(session.New()) + + params := &sns.ListTopicsInput{ + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListTopics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Publish() { + svc := sns.New(session.New()) + + params := &sns.PublishInput{ + Message: aws.String("message"), // Required + MessageAttributes: map[string]*sns.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryValue: []byte("PAYLOAD"), + StringValue: aws.String("String"), + }, + // More values... + }, + MessageStructure: aws.String("messageStructure"), + Subject: aws.String("subject"), + TargetArn: aws.String("String"), + TopicArn: aws.String("topicARN"), + } + resp, err := svc.Publish(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_RemovePermission() { + svc := sns.New(session.New()) + + params := &sns.RemovePermissionInput{ + Label: aws.String("label"), // Required + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetEndpointAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetEndpointAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.SetEndpointAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetPlatformApplicationAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetPlatformApplicationAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.SetPlatformApplicationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetSubscriptionAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetSubscriptionAttributesInput{ + AttributeName: aws.String("attributeName"), // Required + SubscriptionArn: aws.String("subscriptionARN"), // Required + AttributeValue: aws.String("attributeValue"), + } + resp, err := svc.SetSubscriptionAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetTopicAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetTopicAttributesInput{ + AttributeName: aws.String("attributeName"), // Required + TopicArn: aws.String("topicARN"), // Required + AttributeValue: aws.String("attributeValue"), + } + resp, err := svc.SetTopicAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Subscribe() { + svc := sns.New(session.New()) + + params := &sns.SubscribeInput{ + Protocol: aws.String("protocol"), // Required + TopicArn: aws.String("topicARN"), // Required + Endpoint: aws.String("endpoint"), + } + resp, err := svc.Subscribe(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Unsubscribe() { + svc := sns.New(session.New()) + + params := &sns.UnsubscribeInput{ + SubscriptionArn: aws.String("subscriptionARN"), // Required + } + resp, err := svc.Unsubscribe(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go new file mode 100644 index 000000000..eb8121874 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sns + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Simple Notification Service (Amazon SNS) is a web service that enables +// you to build distributed web-enabled applications. Applications can use Amazon +// SNS to easily push real-time notification messages to interested subscribers +// over multiple delivery protocols. For more information about this product +// see http://aws.amazon.com/sns (http://aws.amazon.com/sns/). For detailed +// information about Amazon SNS features and their associated API calls, see +// the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/). +// +// We also provide SDKs that enable you to access Amazon SNS from your preferred +// programming language. The SDKs contain functionality that automatically takes +// care of tasks such as: cryptographically signing your service requests, retrying +// requests, and handling error responses. For a list of available SDKs, go +// to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SNS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sns" + +// New creates a new instance of the SNS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SNS client from just a session. +// svc := sns.New(mySession) +// +// // Create a SNS client with additional configuration +// svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SNS { + svc := &SNS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-03-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SNS operation and runs any +// custom request initialization. +func (c *SNS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go new file mode 100644 index 000000000..0df9e5024 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go @@ -0,0 +1,124 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package snsiface provides an interface for the Amazon Simple Notification Service. +package snsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sns" +) + +// SNSAPI is the interface type for sns.SNS. +type SNSAPI interface { + AddPermissionRequest(*sns.AddPermissionInput) (*request.Request, *sns.AddPermissionOutput) + + AddPermission(*sns.AddPermissionInput) (*sns.AddPermissionOutput, error) + + ConfirmSubscriptionRequest(*sns.ConfirmSubscriptionInput) (*request.Request, *sns.ConfirmSubscriptionOutput) + + ConfirmSubscription(*sns.ConfirmSubscriptionInput) (*sns.ConfirmSubscriptionOutput, error) + + CreatePlatformApplicationRequest(*sns.CreatePlatformApplicationInput) (*request.Request, *sns.CreatePlatformApplicationOutput) + + CreatePlatformApplication(*sns.CreatePlatformApplicationInput) (*sns.CreatePlatformApplicationOutput, error) + + CreatePlatformEndpointRequest(*sns.CreatePlatformEndpointInput) (*request.Request, *sns.CreatePlatformEndpointOutput) + + CreatePlatformEndpoint(*sns.CreatePlatformEndpointInput) (*sns.CreatePlatformEndpointOutput, error) + + CreateTopicRequest(*sns.CreateTopicInput) (*request.Request, *sns.CreateTopicOutput) + + CreateTopic(*sns.CreateTopicInput) (*sns.CreateTopicOutput, error) + + DeleteEndpointRequest(*sns.DeleteEndpointInput) (*request.Request, *sns.DeleteEndpointOutput) + + DeleteEndpoint(*sns.DeleteEndpointInput) (*sns.DeleteEndpointOutput, error) + + DeletePlatformApplicationRequest(*sns.DeletePlatformApplicationInput) (*request.Request, *sns.DeletePlatformApplicationOutput) + + DeletePlatformApplication(*sns.DeletePlatformApplicationInput) (*sns.DeletePlatformApplicationOutput, error) + + DeleteTopicRequest(*sns.DeleteTopicInput) (*request.Request, *sns.DeleteTopicOutput) + + DeleteTopic(*sns.DeleteTopicInput) (*sns.DeleteTopicOutput, error) + + GetEndpointAttributesRequest(*sns.GetEndpointAttributesInput) (*request.Request, *sns.GetEndpointAttributesOutput) + + GetEndpointAttributes(*sns.GetEndpointAttributesInput) (*sns.GetEndpointAttributesOutput, error) + + GetPlatformApplicationAttributesRequest(*sns.GetPlatformApplicationAttributesInput) (*request.Request, *sns.GetPlatformApplicationAttributesOutput) + + GetPlatformApplicationAttributes(*sns.GetPlatformApplicationAttributesInput) (*sns.GetPlatformApplicationAttributesOutput, error) + + GetSubscriptionAttributesRequest(*sns.GetSubscriptionAttributesInput) (*request.Request, *sns.GetSubscriptionAttributesOutput) + + GetSubscriptionAttributes(*sns.GetSubscriptionAttributesInput) (*sns.GetSubscriptionAttributesOutput, error) + + GetTopicAttributesRequest(*sns.GetTopicAttributesInput) (*request.Request, *sns.GetTopicAttributesOutput) + + GetTopicAttributes(*sns.GetTopicAttributesInput) (*sns.GetTopicAttributesOutput, error) + + ListEndpointsByPlatformApplicationRequest(*sns.ListEndpointsByPlatformApplicationInput) (*request.Request, *sns.ListEndpointsByPlatformApplicationOutput) + + ListEndpointsByPlatformApplication(*sns.ListEndpointsByPlatformApplicationInput) (*sns.ListEndpointsByPlatformApplicationOutput, error) + + ListEndpointsByPlatformApplicationPages(*sns.ListEndpointsByPlatformApplicationInput, func(*sns.ListEndpointsByPlatformApplicationOutput, bool) bool) error + + ListPlatformApplicationsRequest(*sns.ListPlatformApplicationsInput) (*request.Request, *sns.ListPlatformApplicationsOutput) + + ListPlatformApplications(*sns.ListPlatformApplicationsInput) (*sns.ListPlatformApplicationsOutput, error) + + ListPlatformApplicationsPages(*sns.ListPlatformApplicationsInput, func(*sns.ListPlatformApplicationsOutput, bool) bool) error + + ListSubscriptionsRequest(*sns.ListSubscriptionsInput) (*request.Request, *sns.ListSubscriptionsOutput) + + ListSubscriptions(*sns.ListSubscriptionsInput) (*sns.ListSubscriptionsOutput, error) + + ListSubscriptionsPages(*sns.ListSubscriptionsInput, func(*sns.ListSubscriptionsOutput, bool) bool) error + + ListSubscriptionsByTopicRequest(*sns.ListSubscriptionsByTopicInput) (*request.Request, *sns.ListSubscriptionsByTopicOutput) + + ListSubscriptionsByTopic(*sns.ListSubscriptionsByTopicInput) (*sns.ListSubscriptionsByTopicOutput, error) + + ListSubscriptionsByTopicPages(*sns.ListSubscriptionsByTopicInput, func(*sns.ListSubscriptionsByTopicOutput, bool) bool) error + + ListTopicsRequest(*sns.ListTopicsInput) (*request.Request, *sns.ListTopicsOutput) + + ListTopics(*sns.ListTopicsInput) (*sns.ListTopicsOutput, error) + + ListTopicsPages(*sns.ListTopicsInput, func(*sns.ListTopicsOutput, bool) bool) error + + PublishRequest(*sns.PublishInput) (*request.Request, *sns.PublishOutput) + + Publish(*sns.PublishInput) (*sns.PublishOutput, error) + + RemovePermissionRequest(*sns.RemovePermissionInput) (*request.Request, *sns.RemovePermissionOutput) + + RemovePermission(*sns.RemovePermissionInput) (*sns.RemovePermissionOutput, error) + + SetEndpointAttributesRequest(*sns.SetEndpointAttributesInput) (*request.Request, *sns.SetEndpointAttributesOutput) + + SetEndpointAttributes(*sns.SetEndpointAttributesInput) (*sns.SetEndpointAttributesOutput, error) + + SetPlatformApplicationAttributesRequest(*sns.SetPlatformApplicationAttributesInput) (*request.Request, *sns.SetPlatformApplicationAttributesOutput) + + SetPlatformApplicationAttributes(*sns.SetPlatformApplicationAttributesInput) (*sns.SetPlatformApplicationAttributesOutput, error) + + SetSubscriptionAttributesRequest(*sns.SetSubscriptionAttributesInput) (*request.Request, *sns.SetSubscriptionAttributesOutput) + + SetSubscriptionAttributes(*sns.SetSubscriptionAttributesInput) (*sns.SetSubscriptionAttributesOutput, error) + + SetTopicAttributesRequest(*sns.SetTopicAttributesInput) (*request.Request, *sns.SetTopicAttributesOutput) + + SetTopicAttributes(*sns.SetTopicAttributesInput) (*sns.SetTopicAttributesOutput, error) + + SubscribeRequest(*sns.SubscribeInput) (*request.Request, *sns.SubscribeOutput) + + Subscribe(*sns.SubscribeInput) (*sns.SubscribeOutput, error) + + UnsubscribeRequest(*sns.UnsubscribeInput) (*request.Request, *sns.UnsubscribeOutput) + + Unsubscribe(*sns.UnsubscribeInput) (*sns.UnsubscribeOutput, error) +} + +var _ SNSAPI = (*sns.SNS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go new file mode 100644 index 000000000..eb9350545 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -0,0 +1,1806 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sqs provides a client for Amazon Simple Queue Service. +package sqs + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a request for the AddPermission operation. +func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a permission to a queue for a specific principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P). +// This allows for sharing access to the queue. +// +// When you create a queue, you have full control access rights for the queue. +// Only you (as owner of the queue) can grant or deny permissions to the queue. +// For more information about these permissions, see Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) +// in the Amazon SQS Developer Guide. +// +// AddPermission writes an Amazon SQS-generated policy. If you want to write +// your own policy, use SetQueueAttributes to upload your policy. For more information +// about writing your own policy, see Using The Access Policy Language (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AccessPolicyLanguage.html) +// in the Amazon SQS Developer Guide. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opChangeMessageVisibility = "ChangeMessageVisibility" + +// ChangeMessageVisibilityRequest generates a request for the ChangeMessageVisibility operation. +func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput) (req *request.Request, output *ChangeMessageVisibilityOutput) { + op := &request.Operation{ + Name: opChangeMessageVisibility, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangeMessageVisibilityInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeMessageVisibilityOutput{} + req.Data = output + return +} + +// Changes the visibility timeout of a specified message in a queue to a new +// value. The maximum allowed timeout value you can set the value to is 12 hours. +// This means you can't extend the timeout of a message in an existing queue +// to more than a total visibility timeout of 12 hours. (For more information +// visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide.) +// +// For example, let's say you have a message and its default message visibility +// timeout is 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with +// a timeout of 10 minutes. At that time, the timeout for the message would +// be extended by 10 minutes beyond the time of the ChangeMessageVisibility +// call. This results in a total visibility timeout of 13 minutes. You can continue +// to call ChangeMessageVisibility to extend the visibility timeout to a maximum +// of 12 hours. If you try to extend beyond 12 hours, the request will be rejected. +// +// There is a 120,000 limit for the number of inflight messages per queue. +// Messages are inflight after they have been received from the queue by a consuming +// component, but have not yet been deleted from the queue. If you reach the +// 120,000 limit, you will receive an OverLimit error message from Amazon SQS. +// To help avoid reaching the limit, you should delete the messages from the +// queue after they have been processed. You can also increase the number of +// queues you use to process the messages. +// +// If you attempt to set the VisibilityTimeout to an amount more than the maximum +// time left, Amazon SQS returns an error. It will not automatically recalculate +// and increase the timeout to the maximum time remaining. Unlike with a queue, +// when you change the visibility timeout for a specific message, that timeout +// value is applied immediately but is not saved in memory for that message. +// If you don't delete a message after it is received, the visibility timeout +// for the message the next time it is received reverts to the original timeout +// value, not the value you set with the ChangeMessageVisibility action. +func (c *SQS) ChangeMessageVisibility(input *ChangeMessageVisibilityInput) (*ChangeMessageVisibilityOutput, error) { + req, out := c.ChangeMessageVisibilityRequest(input) + err := req.Send() + return out, err +} + +const opChangeMessageVisibilityBatch = "ChangeMessageVisibilityBatch" + +// ChangeMessageVisibilityBatchRequest generates a request for the ChangeMessageVisibilityBatch operation. +func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibilityBatchInput) (req *request.Request, output *ChangeMessageVisibilityBatchOutput) { + op := &request.Operation{ + Name: opChangeMessageVisibilityBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangeMessageVisibilityBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeMessageVisibilityBatchOutput{} + req.Data = output + return +} + +// Changes the visibility timeout of multiple messages. This is a batch version +// of ChangeMessageVisibility. The result of the action on each message is reported +// individually in the response. You can send up to 10 ChangeMessageVisibility +// requests with each ChangeMessageVisibilityBatch action. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. Some API actions take lists of parameters. +// These lists are specified using the param.n notation. Values of n are integers +// starting from 1. For example, a parameter list with two elements looks like +// this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) ChangeMessageVisibilityBatch(input *ChangeMessageVisibilityBatchInput) (*ChangeMessageVisibilityBatchOutput, error) { + req, out := c.ChangeMessageVisibilityBatchRequest(input) + err := req.Send() + return out, err +} + +const opCreateQueue = "CreateQueue" + +// CreateQueueRequest generates a request for the CreateQueue operation. +func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) { + op := &request.Operation{ + Name: opCreateQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateQueueInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateQueueOutput{} + req.Data = output + return +} + +// Creates a new queue, or returns the URL of an existing one. When you request +// CreateQueue, you provide a name for the queue. To successfully create a new +// queue, you must provide a name that is unique within the scope of your own +// queues. +// +// If you delete a queue, you must wait at least 60 seconds before creating +// a queue with the same name. +// +// You may pass one or more attributes in the request. If you do not provide +// a value for any attribute, the queue will have the default value for that +// attribute. Permitted attributes are the same that can be set using SetQueueAttributes. +// +// Use GetQueueUrl to get a queue's URL. GetQueueUrl requires only the QueueName +// parameter. +// +// If you provide the name of an existing queue, along with the exact names +// and values of all the queue's attributes, CreateQueue returns the queue URL +// for the existing queue. If the queue name, attribute names, or attribute +// values do not match an existing queue, CreateQueue returns an error. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) { + req, out := c.CreateQueueRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMessage = "DeleteMessage" + +// DeleteMessageRequest generates a request for the DeleteMessage operation. +func (c *SQS) DeleteMessageRequest(input *DeleteMessageInput) (req *request.Request, output *DeleteMessageOutput) { + op := &request.Operation{ + Name: opDeleteMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMessageOutput{} + req.Data = output + return +} + +// Deletes the specified message from the specified queue. You specify the message +// by using the message's receipt handle and not the message ID you received +// when you sent the message. Even if the message is locked by another reader +// due to the visibility timeout setting, it is still deleted from the queue. +// If you leave a message in the queue for longer than the queue's configured +// retention period, Amazon SQS automatically deletes it. +// +// The receipt handle is associated with a specific instance of receiving +// the message. If you receive a message more than once, the receipt handle +// you get each time you receive the message is different. When you request +// DeleteMessage, if you don't provide the most recently received receipt handle +// for the message, the request will still succeed, but the message might not +// be deleted. +// +// It is possible you will receive a message even after you have deleted +// it. This might happen on rare occasions if one of the servers storing a copy +// of the message is unavailable when you request to delete the message. The +// copy remains on the server and might be returned to you again on a subsequent +// receive request. You should create your system to be idempotent so that receiving +// a particular message more than once is not a problem. +func (c *SQS) DeleteMessage(input *DeleteMessageInput) (*DeleteMessageOutput, error) { + req, out := c.DeleteMessageRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMessageBatch = "DeleteMessageBatch" + +// DeleteMessageBatchRequest generates a request for the DeleteMessageBatch operation. +func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *request.Request, output *DeleteMessageBatchOutput) { + op := &request.Operation{ + Name: opDeleteMessageBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMessageBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMessageBatchOutput{} + req.Data = output + return +} + +// Deletes up to ten messages from the specified queue. This is a batch version +// of DeleteMessage. The result of the delete action on each message is reported +// individually in the response. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) DeleteMessageBatch(input *DeleteMessageBatchInput) (*DeleteMessageBatchOutput, error) { + req, out := c.DeleteMessageBatchRequest(input) + err := req.Send() + return out, err +} + +const opDeleteQueue = "DeleteQueue" + +// DeleteQueueRequest generates a request for the DeleteQueue operation. +func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) { + op := &request.Operation{ + Name: opDeleteQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueueInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteQueueOutput{} + req.Data = output + return +} + +// Deletes the queue specified by the queue URL, regardless of whether the queue +// is empty. If the specified queue does not exist, Amazon SQS returns a successful +// response. +// +// Use DeleteQueue with care; once you delete your queue, any messages in +// the queue are no longer available. +// +// When you delete a queue, the deletion process takes up to 60 seconds. +// Requests you send involving that queue during the 60 seconds might succeed. +// For example, a SendMessage request might succeed, but after the 60 seconds, +// the queue and that message you sent no longer exist. Also, when you delete +// a queue, you must wait at least 60 seconds before creating a queue with the +// same name. +// +// We reserve the right to delete queues that have had no activity for more +// than 30 days. For more information, see How Amazon SQS Queues Work (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSConcepts.html) +// in the Amazon SQS Developer Guide. +func (c *SQS) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) { + req, out := c.DeleteQueueRequest(input) + err := req.Send() + return out, err +} + +const opGetQueueAttributes = "GetQueueAttributes" + +// GetQueueAttributesRequest generates a request for the GetQueueAttributes operation. +func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *request.Request, output *GetQueueAttributesOutput) { + op := &request.Operation{ + Name: opGetQueueAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueueAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetQueueAttributesOutput{} + req.Data = output + return +} + +// Gets attributes for the specified queue. The following attributes are supported: +// All - returns all values. ApproximateNumberOfMessages - returns the approximate +// number of visible messages in a queue. For more information, see Resources +// Required to Process Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) +// in the Amazon SQS Developer Guide. ApproximateNumberOfMessagesNotVisible +// - returns the approximate number of messages that are not timed-out and not +// deleted. For more information, see Resources Required to Process Messages +// (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) +// in the Amazon SQS Developer Guide. VisibilityTimeout - returns the visibility +// timeout for the queue. For more information about visibility timeout, see +// Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide. CreatedTimestamp - returns the time when +// the queue was created (epoch time in seconds). LastModifiedTimestamp - returns +// the time when the queue was last changed (epoch time in seconds). Policy +// - returns the queue's policy. MaximumMessageSize - returns the limit of +// how many bytes a message can contain before Amazon SQS rejects it. MessageRetentionPeriod +// - returns the number of seconds Amazon SQS retains a message. QueueArn - +// returns the queue's Amazon resource name (ARN). ApproximateNumberOfMessagesDelayed +// - returns the approximate number of messages that are pending to be added +// to the queue. DelaySeconds - returns the default delay on the queue in seconds. +// ReceiveMessageWaitTimeSeconds - returns the time for which a ReceiveMessage +// call will wait for a message to arrive. RedrivePolicy - returns the parameters +// for dead letter queue functionality of the source queue. For more information +// about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter +// Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) +// in the Amazon SQS Developer Guide. +// +// Going forward, new attributes might be added. If you are writing code that +// calls this action, we recommend that you structure your code so that it can +// handle new attributes gracefully. Some API actions take lists of parameters. +// These lists are specified using the param.n notation. Values of n are integers +// starting from 1. For example, a parameter list with two elements looks like +// this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) GetQueueAttributes(input *GetQueueAttributesInput) (*GetQueueAttributesOutput, error) { + req, out := c.GetQueueAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetQueueUrl = "GetQueueUrl" + +// GetQueueUrlRequest generates a request for the GetQueueUrl operation. +func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, output *GetQueueUrlOutput) { + op := &request.Operation{ + Name: opGetQueueUrl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueueUrlInput{} + } + + req = c.newRequest(op, input, output) + output = &GetQueueUrlOutput{} + req.Data = output + return +} + +// Returns the URL of an existing queue. This action provides a simple way to +// retrieve the URL of an Amazon SQS queue. +// +// To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId +// parameter to specify the account ID of the queue's owner. The queue's owner +// must grant you permission to access the queue. For more information about +// shared queue access, see AddPermission or go to Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) +// in the Amazon SQS Developer Guide. +func (c *SQS) GetQueueUrl(input *GetQueueUrlInput) (*GetQueueUrlOutput, error) { + req, out := c.GetQueueUrlRequest(input) + err := req.Send() + return out, err +} + +const opListDeadLetterSourceQueues = "ListDeadLetterSourceQueues" + +// ListDeadLetterSourceQueuesRequest generates a request for the ListDeadLetterSourceQueues operation. +func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueuesInput) (req *request.Request, output *ListDeadLetterSourceQueuesOutput) { + op := &request.Operation{ + Name: opListDeadLetterSourceQueues, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDeadLetterSourceQueuesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeadLetterSourceQueuesOutput{} + req.Data = output + return +} + +// Returns a list of your queues that have the RedrivePolicy queue attribute +// configured with a dead letter queue. +// +// For more information about using dead letter queues, see Using Amazon SQS +// Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html). +func (c *SQS) ListDeadLetterSourceQueues(input *ListDeadLetterSourceQueuesInput) (*ListDeadLetterSourceQueuesOutput, error) { + req, out := c.ListDeadLetterSourceQueuesRequest(input) + err := req.Send() + return out, err +} + +const opListQueues = "ListQueues" + +// ListQueuesRequest generates a request for the ListQueues operation. +func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) { + op := &request.Operation{ + Name: opListQueues, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListQueuesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListQueuesOutput{} + req.Data = output + return +} + +// Returns a list of your queues. The maximum number of queues that can be returned +// is 1000. If you specify a value for the optional QueueNamePrefix parameter, +// only queues with a name beginning with the specified value are returned. +func (c *SQS) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) { + req, out := c.ListQueuesRequest(input) + err := req.Send() + return out, err +} + +const opPurgeQueue = "PurgeQueue" + +// PurgeQueueRequest generates a request for the PurgeQueue operation. +func (c *SQS) PurgeQueueRequest(input *PurgeQueueInput) (req *request.Request, output *PurgeQueueOutput) { + op := &request.Operation{ + Name: opPurgeQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurgeQueueInput{} + } + + req = c.newRequest(op, input, output) + output = &PurgeQueueOutput{} + req.Data = output + return +} + +// Deletes the messages in a queue specified by the queue URL. +// +// When you use the PurgeQueue API, the deleted messages in the queue cannot +// be retrieved. When you purge a queue, the message deletion process takes +// up to 60 seconds. All messages sent to the queue before calling PurgeQueue +// will be deleted; messages sent to the queue while it is being purged may +// be deleted. While the queue is being purged, messages sent to the queue before +// PurgeQueue was called may be received, but will be deleted within the next +// minute. +func (c *SQS) PurgeQueue(input *PurgeQueueInput) (*PurgeQueueOutput, error) { + req, out := c.PurgeQueueRequest(input) + err := req.Send() + return out, err +} + +const opReceiveMessage = "ReceiveMessage" + +// ReceiveMessageRequest generates a request for the ReceiveMessage operation. +func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Request, output *ReceiveMessageOutput) { + op := &request.Operation{ + Name: opReceiveMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReceiveMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &ReceiveMessageOutput{} + req.Data = output + return +} + +// Retrieves one or more messages, with a maximum limit of 10 messages, from +// the specified queue. Long poll support is enabled by using the WaitTimeSeconds +// parameter. For more information, see Amazon SQS Long Poll (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) +// in the Amazon SQS Developer Guide. +// +// Short poll is the default behavior where a weighted random set of machines +// is sampled on a ReceiveMessage call. This means only the messages on the +// sampled machines are returned. If the number of messages in the queue is +// small (less than 1000), it is likely you will get fewer messages than you +// requested per ReceiveMessage call. If the number of messages in the queue +// is extremely small, you might not receive any messages in a particular ReceiveMessage +// response; in which case you should repeat the request. +// +// For each message returned, the response includes the following: +// +// Message body +// +// MD5 digest of the message body. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html +// (http://www.faqs.org/rfcs/rfc1321.html). +// +// Message ID you received when you sent the message to the queue. +// +// Receipt handle. +// +// Message attributes. +// +// MD5 digest of the message attributes. +// +// The receipt handle is the identifier you must provide when deleting the +// message. For more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html) +// in the Amazon SQS Developer Guide. +// +// You can provide the VisibilityTimeout parameter in your request, which +// will be applied to the messages that Amazon SQS returns in the response. +// If you do not include the parameter, the overall visibility timeout for the +// queue is used for the returned messages. For more information, see Visibility +// Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide. +// +// Going forward, new attributes might be added. If you are writing code +// that calls this action, we recommend that you structure your code so that +// it can handle new attributes gracefully. +func (c *SQS) ReceiveMessage(input *ReceiveMessageInput) (*ReceiveMessageOutput, error) { + req, out := c.ReceiveMessageRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a request for the RemovePermission operation. +func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// Revokes any permissions in the queue policy that matches the specified Label +// parameter. Only the owner of the queue can remove permissions. +func (c *SQS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opSendMessage = "SendMessage" + +// SendMessageRequest generates a request for the SendMessage operation. +func (c *SQS) SendMessageRequest(input *SendMessageInput) (req *request.Request, output *SendMessageOutput) { + op := &request.Operation{ + Name: opSendMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &SendMessageOutput{} + req.Data = output + return +} + +// Delivers a message to the specified queue. With Amazon SQS, you now have +// the ability to send large payload messages that are up to 256KB (262,144 +// bytes) in size. To send large payloads, you must use an AWS SDK that supports +// SigV4 signing. To verify whether SigV4 is supported for an AWS SDK, check +// the SDK release notes. +// +// The following list shows the characters (in Unicode) allowed in your message, +// according to the W3C XML specification. For more information, go to http://www.w3.org/TR/REC-xml/#charsets +// (http://www.w3.org/TR/REC-xml/#charsets) If you send any characters not included +// in the list, your request will be rejected. +// +// #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF] +func (c *SQS) SendMessage(input *SendMessageInput) (*SendMessageOutput, error) { + req, out := c.SendMessageRequest(input) + err := req.Send() + return out, err +} + +const opSendMessageBatch = "SendMessageBatch" + +// SendMessageBatchRequest generates a request for the SendMessageBatch operation. +func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *request.Request, output *SendMessageBatchOutput) { + op := &request.Operation{ + Name: opSendMessageBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendMessageBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &SendMessageBatchOutput{} + req.Data = output + return +} + +// Delivers up to ten messages to the specified queue. This is a batch version +// of SendMessage. The result of the send action on each message is reported +// individually in the response. The maximum allowed individual message size +// is 256 KB (262,144 bytes). +// +// The maximum total payload size (i.e., the sum of all a batch's individual +// message lengths) is also 256 KB (262,144 bytes). +// +// If the DelaySeconds parameter is not specified for an entry, the default +// for the queue is used. +// +// The following list shows the characters (in Unicode) that are allowed in +// your message, according to the W3C XML specification. For more information, +// go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). +// If you send any characters that are not included in the list, your request +// will be rejected. #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] +// | [#x10000 to #x10FFFF] +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. Some API actions take lists of parameters. +// These lists are specified using the param.n notation. Values of n are integers +// starting from 1. For example, a parameter list with two elements looks like +// this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) SendMessageBatch(input *SendMessageBatchInput) (*SendMessageBatchOutput, error) { + req, out := c.SendMessageBatchRequest(input) + err := req.Send() + return out, err +} + +const opSetQueueAttributes = "SetQueueAttributes" + +// SetQueueAttributesRequest generates a request for the SetQueueAttributes operation. +func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *request.Request, output *SetQueueAttributesOutput) { + op := &request.Operation{ + Name: opSetQueueAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetQueueAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &SetQueueAttributesOutput{} + req.Data = output + return +} + +// Sets the value of one or more queue attributes. When you change a queue's +// attributes, the change can take up to 60 seconds for most of the attributes +// to propagate throughout the SQS system. Changes made to the MessageRetentionPeriod +// attribute can take up to 15 minutes. +// +// Going forward, new attributes might be added. If you are writing code that +// calls this action, we recommend that you structure your code so that it can +// handle new attributes gracefully. +func (c *SQS) SetQueueAttributes(input *SetQueueAttributesInput) (*SetQueueAttributesOutput, error) { + req, out := c.SetQueueAttributesRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account number of the principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) + // who will be given permission. The principal must have an AWS account, but + // does not need to be signed up for Amazon SQS. For information about locating + // the AWS account identification, see Your AWS Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AWSCredentials.html) + // in the Amazon SQS Developer Guide. + AWSAccountIds []*string `locationNameList:"AWSAccountId" type:"list" flattened:"true" required:"true"` + + // The action the client wants to allow for the specified principal. The following + // are valid values: * | SendMessage | ReceiveMessage | DeleteMessage | ChangeMessageVisibility + // | GetQueueAttributes | GetQueueUrl. For more information about these actions, + // see Understanding Permissions (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html#PermissionTypes) + // in the Amazon SQS Developer Guide. + // + // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for the + // ActionName.n also grants permissions for the corresponding batch versions + // of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch. + Actions []*string `locationNameList:"ActionName" type:"list" flattened:"true" required:"true"` + + // The unique identification of the permission you're setting (e.g., AliceSendMessage). + // Constraints: Maximum 80 characters; alphanumeric characters, hyphens (-), + // and underscores (_) are allowed. + Label *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// This is used in the responses of batch API to give a detailed description +// of the result of an action on each entry in the request. +type BatchResultErrorEntry struct { + _ struct{} `type:"structure"` + + // An error code representing why the action failed on this entry. + Code *string `type:"string" required:"true"` + + // The id of an entry in a batch request. + Id *string `type:"string" required:"true"` + + // A message explaining why the action failed on this entry. + Message *string `type:"string"` + + // Whether the error happened due to the sender's fault. + SenderFault *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s BatchResultErrorEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchResultErrorEntry) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityBatchInput struct { + _ struct{} `type:"structure"` + + // A list of receipt handles of the messages for which the visibility timeout + // must be changed. + Entries []*ChangeMessageVisibilityBatchRequestEntry `locationNameList:"ChangeMessageVisibilityBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchInput) GoString() string { + return s.String() +} + +// For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry +// tag if the message succeeds or a BatchResultErrorEntry tag if the message +// fails. +type ChangeMessageVisibilityBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of ChangeMessageVisibilityBatchResultEntry items. + Successful []*ChangeMessageVisibilityBatchResultEntry `locationNameList:"ChangeMessageVisibilityBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchOutput) GoString() string { + return s.String() +} + +// Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch. +// +// All of the following parameters are list parameters that must be prefixed +// with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value +// starting with 1. For example, a parameter list for this action might look +// like this: +// +// &ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2 +// +// &ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=Your_Receipt_Handle +// +// &ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45 +type ChangeMessageVisibilityBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // An identifier for this particular receipt handle. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // A receipt handle. + ReceiptHandle *string `type:"string" required:"true"` + + // The new value (in seconds) for the message's visibility timeout. + VisibilityTimeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchRequestEntry) GoString() string { + return s.String() +} + +// Encloses the id of an entry in ChangeMessageVisibilityBatch. +type ChangeMessageVisibilityBatchResultEntry struct { + _ struct{} `type:"structure"` + + // Represents a message whose visibility timeout has been changed successfully. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchResultEntry) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` + + // The receipt handle associated with the message whose visibility timeout should + // be changed. This parameter is returned by the ReceiveMessage action. + ReceiptHandle *string `type:"string" required:"true"` + + // The new value (in seconds - from 0 to 43200 - maximum 12 hours) for the message's + // visibility timeout. + VisibilityTimeout *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityInput) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityOutput) GoString() string { + return s.String() +} + +type CreateQueueInput struct { + _ struct{} `type:"structure"` + + // A map of attributes with their corresponding values. + // + // The following lists the names, descriptions, and values of the special request + // parameters the CreateQueue action uses: + // + // DelaySeconds - The time in seconds that the delivery of all messages + // in the queue will be delayed. An integer from 0 to 900 (15 minutes). The + // default for this attribute is 0 (zero). MaximumMessageSize - The limit of + // how many bytes a message can contain before Amazon SQS rejects it. An integer + // from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this + // attribute is 262144 (256 KiB). MessageRetentionPeriod - The number of seconds + // Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) + // to 1209600 (14 days). The default for this attribute is 345600 (4 days). + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. ReceiveMessageWaitTimeSeconds - The time for + // which a ReceiveMessage call will wait for a message to arrive. An integer + // from 0 to 20 (seconds). The default for this attribute is 0. VisibilityTimeout + // - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). + // The default for this attribute is 30. For more information about visibility + // timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) + // in the Amazon SQS Developer Guide. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The name for the queue to be created. + QueueName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateQueueInput) GoString() string { + return s.String() +} + +// Returns the QueueUrl element of the created queue. +type CreateQueueOutput struct { + _ struct{} `type:"structure"` + + // The URL for the created Amazon SQS queue. + QueueUrl *string `type:"string"` +} + +// String returns the string representation +func (s CreateQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateQueueOutput) GoString() string { + return s.String() +} + +type DeleteMessageBatchInput struct { + _ struct{} `type:"structure"` + + // A list of receipt handles for the messages to be deleted. + Entries []*DeleteMessageBatchRequestEntry `locationNameList:"DeleteMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchInput) GoString() string { + return s.String() +} + +// For each message in the batch, the response contains a DeleteMessageBatchResultEntry +// tag if the message is deleted or a BatchResultErrorEntry tag if the message +// cannot be deleted. +type DeleteMessageBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of DeleteMessageBatchResultEntry items. + Successful []*DeleteMessageBatchResultEntry `locationNameList:"DeleteMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchOutput) GoString() string { + return s.String() +} + +// Encloses a receipt handle and an identifier for it. +type DeleteMessageBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // An identifier for this particular receipt handle. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // A receipt handle. + ReceiptHandle *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchRequestEntry) GoString() string { + return s.String() +} + +// Encloses the id an entry in DeleteMessageBatch. +type DeleteMessageBatchResultEntry struct { + _ struct{} `type:"structure"` + + // Represents a successfully deleted message. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchResultEntry) GoString() string { + return s.String() +} + +type DeleteMessageInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` + + // The receipt handle associated with the message to delete. + ReceiptHandle *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageInput) GoString() string { + return s.String() +} + +type DeleteMessageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageOutput) GoString() string { + return s.String() +} + +type DeleteQueueInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueueInput) GoString() string { + return s.String() +} + +type DeleteQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueueOutput) GoString() string { + return s.String() +} + +type GetQueueAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of attributes to retrieve information for. + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetQueueAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueAttributesInput) GoString() string { + return s.String() +} + +// A list of returned queue attributes. +type GetQueueAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of attributes to the respective values. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` +} + +// String returns the string representation +func (s GetQueueAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueAttributesOutput) GoString() string { + return s.String() +} + +type GetQueueUrlInput struct { + _ struct{} `type:"structure"` + + // The name of the queue whose URL must be fetched. Maximum 80 characters; alphanumeric + // characters, hyphens (-), and underscores (_) are allowed. + QueueName *string `type:"string" required:"true"` + + // The AWS account ID of the account that created the queue. + QueueOwnerAWSAccountId *string `type:"string"` +} + +// String returns the string representation +func (s GetQueueUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueUrlInput) GoString() string { + return s.String() +} + +// For more information, see Responses (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/UnderstandingResponses.html) +// in the Amazon SQS Developer Guide. +type GetQueueUrlOutput struct { + _ struct{} `type:"structure"` + + // The URL for the queue. + QueueUrl *string `type:"string"` +} + +// String returns the string representation +func (s GetQueueUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueUrlOutput) GoString() string { + return s.String() +} + +type ListDeadLetterSourceQueuesInput struct { + _ struct{} `type:"structure"` + + // The queue URL of a dead letter queue. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListDeadLetterSourceQueuesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeadLetterSourceQueuesInput) GoString() string { + return s.String() +} + +// A list of your dead letter source queues. +type ListDeadLetterSourceQueuesOutput struct { + _ struct{} `type:"structure"` + + // A list of source queue URLs that have the RedrivePolicy queue attribute configured + // with a dead letter queue. + QueueUrls []*string `locationName:"queueUrls" locationNameList:"QueueUrl" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ListDeadLetterSourceQueuesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeadLetterSourceQueuesOutput) GoString() string { + return s.String() +} + +type ListQueuesInput struct { + _ struct{} `type:"structure"` + + // A string to use for filtering the list results. Only those queues whose name + // begins with the specified string are returned. + QueueNamePrefix *string `type:"string"` +} + +// String returns the string representation +func (s ListQueuesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueuesInput) GoString() string { + return s.String() +} + +// A list of your queues. +type ListQueuesOutput struct { + _ struct{} `type:"structure"` + + // A list of queue URLs, up to 1000 entries. + QueueUrls []*string `locationNameList:"QueueUrl" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListQueuesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueuesOutput) GoString() string { + return s.String() +} + +// An Amazon SQS message. +type Message struct { + _ struct{} `type:"structure"` + + // SenderId, SentTimestamp, ApproximateReceiveCount, and/or ApproximateFirstReceiveTimestamp. + // SentTimestamp and ApproximateFirstReceiveTimestamp are each returned as an + // integer representing the epoch time (http://en.wikipedia.org/wiki/Unix_time) + // in milliseconds. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The message's contents (not URL-encoded). + Body *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. + MD5OfBody *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message correctly. Amazon SQS + // first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // A unique identifier for the message. Message IDs are considered unique across + // all AWS accounts for an extended period of time. + MessageId *string `type:"string"` + + // An identifier associated with the act of receiving the message. A new receipt + // handle is returned every time you receive a message. When deleting a message, + // you provide the last received receipt handle to delete the message. + ReceiptHandle *string `type:"string"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// The user-specified message attribute value. For string data types, the value +// attribute has the same restrictions on the content as the message body. For +// more information, see SendMessage (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html). +// +// Name, type, and value must not be empty or null. In addition, the message +// body should not be empty or null. All parts of the message attribute, including +// name, type, and value, are included in the message size restriction, which +// is currently 256 KB (262,144 bytes). +type MessageAttributeValue struct { + _ struct{} `type:"structure"` + + // Not implemented. Reserved for future use. + BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"` + + // Binary type attributes can store any binary data, for example, compressed + // data, encrypted data, or images. + BinaryValue []byte `type:"blob"` + + // Amazon SQS supports the following logical data types: String, Number, and + // Binary. In addition, you can append your own custom labels. For more information, + // see Message Attribute Data Types (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributes.DataTypes). + DataType *string `type:"string" required:"true"` + + // Not implemented. Reserved for future use. + StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"` + + // Strings are Unicode with UTF8 binary encoding. For a list of code values, + // see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageAttributeValue) GoString() string { + return s.String() +} + +type PurgeQueueInput struct { + _ struct{} `type:"structure"` + + // The queue URL of the queue to delete the messages from when using the PurgeQueue + // API. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurgeQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurgeQueueInput) GoString() string { + return s.String() +} + +type PurgeQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PurgeQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurgeQueueOutput) GoString() string { + return s.String() +} + +type ReceiveMessageInput struct { + _ struct{} `type:"structure"` + + // A list of attributes that need to be returned along with each message. + // + // The following lists the names and descriptions of the attributes that can + // be returned: + // + // All - returns all values. ApproximateFirstReceiveTimestamp - returns + // the time when the message was first received from the queue (epoch time in + // milliseconds). ApproximateReceiveCount - returns the number of times a message + // has been received from the queue but not deleted. SenderId - returns the + // AWS account number (or the IP address, if anonymous access is allowed) of + // the sender. SentTimestamp - returns the time when the message was sent to + // the queue (epoch time in milliseconds). + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // The maximum number of messages to return. Amazon SQS never returns more messages + // than this value but may return fewer. Values can be from 1 to 10. Default + // is 1. + // + // All of the messages are not necessarily returned. + MaxNumberOfMessages *int64 `type:"integer"` + + // The name of the message attribute, where N is the index. The message attribute + // name can contain the following characters: A-Z, a-z, 0-9, underscore (_), + // hyphen (-), and period (.). The name must not start or end with a period, + // and it should not have successive periods. The name is case sensitive and + // must be unique among all attribute names for the message. The name can be + // up to 256 characters long. The name cannot start with "AWS." or "Amazon." + // (or any variations in casing), because these prefixes are reserved for use + // by Amazon Web Services. + // + // When using ReceiveMessage, you can send a list of attribute names to receive, + // or you can return all of the attributes by specifying "All" or ".*" in your + // request. You can also use "foo.*" to return all message attributes starting + // with the "foo" prefix. + MessageAttributeNames []*string `locationNameList:"MessageAttributeName" type:"list" flattened:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` + + // The duration (in seconds) that the received messages are hidden from subsequent + // retrieve requests after being retrieved by a ReceiveMessage request. + VisibilityTimeout *int64 `type:"integer"` + + // The duration (in seconds) for which the call will wait for a message to arrive + // in the queue before returning. If a message is available, the call will return + // sooner than WaitTimeSeconds. + WaitTimeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReceiveMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiveMessageInput) GoString() string { + return s.String() +} + +// A list of received messages. +type ReceiveMessageOutput struct { + _ struct{} `type:"structure"` + + // A list of messages. + Messages []*Message `locationNameList:"Message" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReceiveMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiveMessageOutput) GoString() string { + return s.String() +} + +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // The identification of the permission to remove. This is the label added with + // the AddPermission action. + Label *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +type SendMessageBatchInput struct { + _ struct{} `type:"structure"` + + // A list of SendMessageBatchRequestEntry items. + Entries []*SendMessageBatchRequestEntry `locationNameList:"SendMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchInput) GoString() string { + return s.String() +} + +// For each message in the batch, the response contains a SendMessageBatchResultEntry +// tag if the message succeeds or a BatchResultErrorEntry tag if the message +// fails. +type SendMessageBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items with the error detail about each message + // that could not be enqueued. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of SendMessageBatchResultEntry items. + Successful []*SendMessageBatchResultEntry `locationNameList:"SendMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchOutput) GoString() string { + return s.String() +} + +// Contains the details of a single Amazon SQS message along with a Id. +type SendMessageBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // The number of seconds for which the message has to be delayed. + DelaySeconds *int64 `type:"integer"` + + // An identifier for the message in this batch. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // Body of the message. + MessageBody *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchRequestEntry) GoString() string { + return s.String() +} + +// Encloses a message ID for successfully enqueued message of a SendMessageBatch. +type SendMessageBatchResultEntry struct { + _ struct{} `type:"structure"` + + // An identifier for the message in this batch. + Id *string `type:"string" required:"true"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message batch correctly. Amazon + // SQS first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. This can be used + // to verify that Amazon SQS received the message correctly. Amazon SQS first + // URL decodes the message before creating the MD5 digest. For information about + // MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageBody *string `type:"string" required:"true"` + + // An identifier for the message. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchResultEntry) GoString() string { + return s.String() +} + +type SendMessageInput struct { + _ struct{} `type:"structure"` + + // The number of seconds (0 to 900 - 15 minutes) to delay a specific message. + // Messages with a positive DelaySeconds value become available for processing + // after the delay time is finished. If you don't specify a value, the default + // value for the queue applies. + DelaySeconds *int64 `type:"integer"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The message to send. String maximum 256 KB in size. For a list of allowed + // characters, see the preceding important note. + MessageBody *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageInput) GoString() string { + return s.String() +} + +// The MD5OfMessageBody and MessageId elements. +type SendMessageOutput struct { + _ struct{} `type:"structure"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message correctly. Amazon SQS + // first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. This can be used + // to verify that Amazon SQS received the message correctly. Amazon SQS first + // URL decodes the message before creating the MD5 digest. For information about + // MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageBody *string `type:"string"` + + // An element containing the message ID of the message sent to the queue. For + // more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html) + // in the Amazon SQS Developer Guide. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageOutput) GoString() string { + return s.String() +} + +type SetQueueAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of attributes to set. + // + // The following lists the names, descriptions, and values of the special request + // parameters the SetQueueAttributes action uses: + // + // DelaySeconds - The time in seconds that the delivery of all messages + // in the queue will be delayed. An integer from 0 to 900 (15 minutes). The + // default for this attribute is 0 (zero). MaximumMessageSize - The limit of + // how many bytes a message can contain before Amazon SQS rejects it. An integer + // from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this + // attribute is 262144 (256 KiB). MessageRetentionPeriod - The number of seconds + // Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) + // to 1209600 (14 days). The default for this attribute is 345600 (4 days). + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. ReceiveMessageWaitTimeSeconds - The time for + // which a ReceiveMessage call will wait for a message to arrive. An integer + // from 0 to 20 (seconds). The default for this attribute is 0. VisibilityTimeout + // - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). + // The default for this attribute is 30. For more information about visibility + // timeout, see Visibility Timeout in the Amazon SQS Developer Guide. RedrivePolicy + // - The parameters for dead letter queue functionality of the source queue. + // For more information about RedrivePolicy and dead letter queues, see Using + // Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetQueueAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetQueueAttributesInput) GoString() string { + return s.String() +} + +type SetQueueAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetQueueAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetQueueAttributesOutput) GoString() string { + return s.String() +} + +const ( + // @enum QueueAttributeName + QueueAttributeNamePolicy = "Policy" + // @enum QueueAttributeName + QueueAttributeNameVisibilityTimeout = "VisibilityTimeout" + // @enum QueueAttributeName + QueueAttributeNameMaximumMessageSize = "MaximumMessageSize" + // @enum QueueAttributeName + QueueAttributeNameMessageRetentionPeriod = "MessageRetentionPeriod" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessages = "ApproximateNumberOfMessages" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessagesNotVisible = "ApproximateNumberOfMessagesNotVisible" + // @enum QueueAttributeName + QueueAttributeNameCreatedTimestamp = "CreatedTimestamp" + // @enum QueueAttributeName + QueueAttributeNameLastModifiedTimestamp = "LastModifiedTimestamp" + // @enum QueueAttributeName + QueueAttributeNameQueueArn = "QueueArn" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessagesDelayed = "ApproximateNumberOfMessagesDelayed" + // @enum QueueAttributeName + QueueAttributeNameDelaySeconds = "DelaySeconds" + // @enum QueueAttributeName + QueueAttributeNameReceiveMessageWaitTimeSeconds = "ReceiveMessageWaitTimeSeconds" + // @enum QueueAttributeName + QueueAttributeNameRedrivePolicy = "RedrivePolicy" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go new file mode 100644 index 000000000..3f5517e60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go @@ -0,0 +1,30 @@ +// +build integration + +package sqs_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs" +) + +func TestFlattenedTraits(t *testing.T) { + s := sqs.New(session.New()) + _, err := s.DeleteMessageBatch(&sqs.DeleteMessageBatchInput{ + QueueURL: aws.String("QUEUE"), + Entries: []*sqs.DeleteMessageBatchRequestEntry{ + { + ID: aws.String("TEST"), + ReceiptHandle: aws.String("RECEIPT"), + }, + }, + }) + + assert.Error(t, err) + assert.Equal(t, "InvalidAddress", err.Code()) + assert.Equal(t, "The address QUEUE is not valid for this endpoint.", err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go new file mode 100644 index 000000000..ef0c3d014 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go @@ -0,0 +1,105 @@ +package sqs + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + errChecksumMissingBody = fmt.Errorf("cannot compute checksum. missing body") + errChecksumMissingMD5 = fmt.Errorf("cannot verify checksum. missing response MD5") +) + +func setupChecksumValidation(r *request.Request) { + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + switch r.Operation.Name { + case opSendMessage: + r.Handlers.Unmarshal.PushBack(verifySendMessage) + case opSendMessageBatch: + r.Handlers.Unmarshal.PushBack(verifySendMessageBatch) + case opReceiveMessage: + r.Handlers.Unmarshal.PushBack(verifyReceiveMessage) + } +} + +func verifySendMessage(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + in := r.Params.(*SendMessageInput) + out := r.Data.(*SendMessageOutput) + err := checksumsMatch(in.MessageBody, out.MD5OfMessageBody) + if err != nil { + setChecksumError(r, err.Error()) + } + } +} + +func verifySendMessageBatch(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + entries := map[string]*SendMessageBatchResultEntry{} + ids := []string{} + + out := r.Data.(*SendMessageBatchOutput) + for _, entry := range out.Successful { + entries[*entry.Id] = entry + } + + in := r.Params.(*SendMessageBatchInput) + for _, entry := range in.Entries { + if e := entries[*entry.Id]; e != nil { + err := checksumsMatch(entry.MessageBody, e.MD5OfMessageBody) + if err != nil { + ids = append(ids, *e.MessageId) + } + } + } + if len(ids) > 0 { + setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) + } + } +} + +func verifyReceiveMessage(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + ids := []string{} + out := r.Data.(*ReceiveMessageOutput) + for _, msg := range out.Messages { + err := checksumsMatch(msg.Body, msg.MD5OfBody) + if err != nil { + ids = append(ids, *msg.MessageId) + } + } + if len(ids) > 0 { + setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) + } + } +} + +func checksumsMatch(body, expectedMD5 *string) error { + if body == nil { + return errChecksumMissingBody + } else if expectedMD5 == nil { + return errChecksumMissingMD5 + } + + msum := md5.Sum([]byte(*body)) + sum := hex.EncodeToString(msum[:]) + if sum != *expectedMD5 { + return fmt.Errorf("expected MD5 checksum '%s', got '%s'", *expectedMD5, sum) + } + + return nil +} + +func setChecksumError(r *request.Request, format string, args ...interface{}) { + r.Retryable = aws.Bool(true) + r.Error = awserr.New("InvalidChecksum", fmt.Sprintf(format, args...), nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go new file mode 100644 index 000000000..b6a2a0a2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go @@ -0,0 +1,207 @@ +package sqs_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/sqs" +) + +var svc = func() *sqs.SQS { + s := sqs.New(unit.Session, &aws.Config{ + DisableParamValidation: aws.Bool(true), + }) + s.Handlers.Send.Clear() + return s +}() + +func TestSendMessageChecksum(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("098f6bcd4621d373cade4e832627b4f6"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageChecksumInvalid(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("000"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "expected MD5 checksum '000', got '098f6bcd4621d373cade4e832627b4f6'") +} + +func TestSendMessageChecksumInvalidNoValidation(t *testing.T) { + s := sqs.New(unit.Session, &aws.Config{ + DisableParamValidation: aws.Bool(true), + DisableComputeChecksums: aws.Bool(true), + }) + s.Handlers.Send.Clear() + + req, _ := s.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("000"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageChecksumNoInput(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{} + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot compute checksum. missing body") +} + +func TestSendMessageChecksumNoOutput(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{} + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot verify checksum. missing response MD5") +} + +func TestRecieveMessageChecksum(t *testing.T) { + req, _ := svc.ReceiveMessageRequest(&sqs.ReceiveMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.ReceiveMessageOutput{ + Messages: []*sqs.Message{ + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + }, + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestRecieveMessageChecksumInvalid(t *testing.T) { + req, _ := svc.ReceiveMessageRequest(&sqs.ReceiveMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.ReceiveMessageOutput{ + Messages: []*sqs.Message{ + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: aws.String("000"), MessageId: aws.String("123")}, + {Body: aws.String("test"), MD5OfBody: aws.String("000"), MessageId: aws.String("456")}, + {Body: aws.String("test"), MD5OfBody: &md5}, + }, + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "invalid messages: 123, 456") +} + +func TestSendMessageBatchChecksum(t *testing.T) { + req, _ := svc.SendMessageBatchRequest(&sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ + {Id: aws.String("1"), MessageBody: aws.String("test")}, + {Id: aws.String("2"), MessageBody: aws.String("test")}, + {Id: aws.String("3"), MessageBody: aws.String("test")}, + {Id: aws.String("4"), MessageBody: aws.String("test")}, + }, + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageBatchOutput{ + Successful: []*sqs.SendMessageBatchResultEntry{ + {MD5OfMessageBody: &md5, MessageId: aws.String("123"), Id: aws.String("1")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("456"), Id: aws.String("2")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("789"), Id: aws.String("3")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("012"), Id: aws.String("4")}, + }, + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageBatchChecksumInvalid(t *testing.T) { + req, _ := svc.SendMessageBatchRequest(&sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ + {Id: aws.String("1"), MessageBody: aws.String("test")}, + {Id: aws.String("2"), MessageBody: aws.String("test")}, + {Id: aws.String("3"), MessageBody: aws.String("test")}, + {Id: aws.String("4"), MessageBody: aws.String("test")}, + }, + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageBatchOutput{ + Successful: []*sqs.SendMessageBatchResultEntry{ + {MD5OfMessageBody: &md5, MessageId: aws.String("123"), Id: aws.String("1")}, + {MD5OfMessageBody: aws.String("000"), MessageId: aws.String("456"), Id: aws.String("2")}, + {MD5OfMessageBody: aws.String("000"), MessageId: aws.String("789"), Id: aws.String("3")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("012"), Id: aws.String("4")}, + }, + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "invalid messages: 456, 789") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go new file mode 100644 index 000000000..7498363de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go @@ -0,0 +1,9 @@ +package sqs + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + setupChecksumValidation(r) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go new file mode 100644 index 000000000..2b8626773 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go @@ -0,0 +1,433 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sqs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSQS_AddPermission() { + svc := sqs.New(session.New()) + + params := &sqs.AddPermissionInput{ + AWSAccountIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Actions: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Label: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ChangeMessageVisibility() { + svc := sqs.New(session.New()) + + params := &sqs.ChangeMessageVisibilityInput{ + QueueUrl: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + VisibilityTimeout: aws.Int64(1), // Required + } + resp, err := svc.ChangeMessageVisibility(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ChangeMessageVisibilityBatch() { + svc := sqs.New(session.New()) + + params := &sqs.ChangeMessageVisibilityBatchInput{ + Entries: []*sqs.ChangeMessageVisibilityBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + VisibilityTimeout: aws.Int64(1), + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.ChangeMessageVisibilityBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_CreateQueue() { + svc := sqs.New(session.New()) + + params := &sqs.CreateQueueInput{ + QueueName: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteMessage() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteMessageInput{ + QueueUrl: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + } + resp, err := svc.DeleteMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteMessageBatch() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteMessageBatchInput{ + Entries: []*sqs.DeleteMessageBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.DeleteMessageBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteQueue() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteQueueInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.DeleteQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_GetQueueAttributes() { + svc := sqs.New(session.New()) + + params := &sqs.GetQueueAttributesInput{ + QueueUrl: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("QueueAttributeName"), // Required + // More values... + }, + } + resp, err := svc.GetQueueAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_GetQueueUrl() { + svc := sqs.New(session.New()) + + params := &sqs.GetQueueUrlInput{ + QueueName: aws.String("String"), // Required + QueueOwnerAWSAccountId: aws.String("String"), + } + resp, err := svc.GetQueueUrl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ListDeadLetterSourceQueues() { + svc := sqs.New(session.New()) + + params := &sqs.ListDeadLetterSourceQueuesInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.ListDeadLetterSourceQueues(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ListQueues() { + svc := sqs.New(session.New()) + + params := &sqs.ListQueuesInput{ + QueueNamePrefix: aws.String("String"), + } + resp, err := svc.ListQueues(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_PurgeQueue() { + svc := sqs.New(session.New()) + + params := &sqs.PurgeQueueInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.PurgeQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ReceiveMessage() { + svc := sqs.New(session.New()) + + params := &sqs.ReceiveMessageInput{ + QueueUrl: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("QueueAttributeName"), // Required + // More values... + }, + MaxNumberOfMessages: aws.Int64(1), + MessageAttributeNames: []*string{ + aws.String("MessageAttributeName"), // Required + // More values... + }, + VisibilityTimeout: aws.Int64(1), + WaitTimeSeconds: aws.Int64(1), + } + resp, err := svc.ReceiveMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_RemovePermission() { + svc := sqs.New(session.New()) + + params := &sqs.RemovePermissionInput{ + Label: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SendMessage() { + svc := sqs.New(session.New()) + + params := &sqs.SendMessageInput{ + MessageBody: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + DelaySeconds: aws.Int64(1), + MessageAttributes: map[string]*sqs.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryListValues: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + BinaryValue: []byte("PAYLOAD"), + StringListValues: []*string{ + aws.String("String"), // Required + // More values... + }, + StringValue: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.SendMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SendMessageBatch() { + svc := sqs.New(session.New()) + + params := &sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + MessageBody: aws.String("String"), // Required + DelaySeconds: aws.Int64(1), + MessageAttributes: map[string]*sqs.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryListValues: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + BinaryValue: []byte("PAYLOAD"), + StringListValues: []*string{ + aws.String("String"), // Required + // More values... + }, + StringValue: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.SendMessageBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SetQueueAttributes() { + svc := sqs.New(session.New()) + + params := &sqs.SetQueueAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.SetQueueAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go new file mode 100644 index 000000000..c6e820834 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go @@ -0,0 +1,110 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sqs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Welcome to the Amazon Simple Queue Service API Reference. This section describes +// who should read this guide, how the guide is organized, and other resources +// related to the Amazon Simple Queue Service (Amazon SQS). +// +// Amazon SQS offers reliable and scalable hosted queues for storing messages +// as they travel between computers. By using Amazon SQS, you can move data +// between distributed components of your applications that perform different +// tasks without losing messages or requiring each component to be always available. +// +// Helpful Links: Current WSDL (2012-11-05) (http://queue.amazonaws.com/doc/2012-11-05/QueueService.wsdl) +// Making API Requests (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/MakingRequestsArticle.html) +// Amazon SQS product page (http://aws.amazon.com/sqs/) Using Amazon SQS Message +// Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html) +// Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) +// Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) +// +// +// We also provide SDKs that enable you to access Amazon SQS from your preferred +// programming language. The SDKs contain functionality that automatically takes +// care of tasks such as: +// +// Cryptographically signing your service requests Retrying requests Handling +// error responses +// +// For a list of available SDKs, go to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SQS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sqs" + +// New creates a new instance of the SQS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SQS client from just a session. +// svc := sqs.New(mySession) +// +// // Create a SQS client with additional configuration +// svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SQS { + svc := &SQS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-11-05", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SQS operation and runs any +// custom request initialization. +func (c *SQS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go new file mode 100644 index 000000000..55647fa17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sqsiface provides an interface for the Amazon Simple Queue Service. +package sqsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sqs" +) + +// SQSAPI is the interface type for sqs.SQS. +type SQSAPI interface { + AddPermissionRequest(*sqs.AddPermissionInput) (*request.Request, *sqs.AddPermissionOutput) + + AddPermission(*sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error) + + ChangeMessageVisibilityRequest(*sqs.ChangeMessageVisibilityInput) (*request.Request, *sqs.ChangeMessageVisibilityOutput) + + ChangeMessageVisibility(*sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error) + + ChangeMessageVisibilityBatchRequest(*sqs.ChangeMessageVisibilityBatchInput) (*request.Request, *sqs.ChangeMessageVisibilityBatchOutput) + + ChangeMessageVisibilityBatch(*sqs.ChangeMessageVisibilityBatchInput) (*sqs.ChangeMessageVisibilityBatchOutput, error) + + CreateQueueRequest(*sqs.CreateQueueInput) (*request.Request, *sqs.CreateQueueOutput) + + CreateQueue(*sqs.CreateQueueInput) (*sqs.CreateQueueOutput, error) + + DeleteMessageRequest(*sqs.DeleteMessageInput) (*request.Request, *sqs.DeleteMessageOutput) + + DeleteMessage(*sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) + + DeleteMessageBatchRequest(*sqs.DeleteMessageBatchInput) (*request.Request, *sqs.DeleteMessageBatchOutput) + + DeleteMessageBatch(*sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error) + + DeleteQueueRequest(*sqs.DeleteQueueInput) (*request.Request, *sqs.DeleteQueueOutput) + + DeleteQueue(*sqs.DeleteQueueInput) (*sqs.DeleteQueueOutput, error) + + GetQueueAttributesRequest(*sqs.GetQueueAttributesInput) (*request.Request, *sqs.GetQueueAttributesOutput) + + GetQueueAttributes(*sqs.GetQueueAttributesInput) (*sqs.GetQueueAttributesOutput, error) + + GetQueueUrlRequest(*sqs.GetQueueUrlInput) (*request.Request, *sqs.GetQueueUrlOutput) + + GetQueueUrl(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) + + ListDeadLetterSourceQueuesRequest(*sqs.ListDeadLetterSourceQueuesInput) (*request.Request, *sqs.ListDeadLetterSourceQueuesOutput) + + ListDeadLetterSourceQueues(*sqs.ListDeadLetterSourceQueuesInput) (*sqs.ListDeadLetterSourceQueuesOutput, error) + + ListQueuesRequest(*sqs.ListQueuesInput) (*request.Request, *sqs.ListQueuesOutput) + + ListQueues(*sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error) + + PurgeQueueRequest(*sqs.PurgeQueueInput) (*request.Request, *sqs.PurgeQueueOutput) + + PurgeQueue(*sqs.PurgeQueueInput) (*sqs.PurgeQueueOutput, error) + + ReceiveMessageRequest(*sqs.ReceiveMessageInput) (*request.Request, *sqs.ReceiveMessageOutput) + + ReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) + + RemovePermissionRequest(*sqs.RemovePermissionInput) (*request.Request, *sqs.RemovePermissionOutput) + + RemovePermission(*sqs.RemovePermissionInput) (*sqs.RemovePermissionOutput, error) + + SendMessageRequest(*sqs.SendMessageInput) (*request.Request, *sqs.SendMessageOutput) + + SendMessage(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error) + + SendMessageBatchRequest(*sqs.SendMessageBatchInput) (*request.Request, *sqs.SendMessageBatchOutput) + + SendMessageBatch(*sqs.SendMessageBatchInput) (*sqs.SendMessageBatchOutput, error) + + SetQueueAttributesRequest(*sqs.SetQueueAttributesInput) (*request.Request, *sqs.SetQueueAttributesOutput) + + SetQueueAttributes(*sqs.SetQueueAttributesInput) (*sqs.SetQueueAttributesOutput, error) +} + +var _ SQSAPI = (*sqs.SQS)(nil) diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore new file mode 100644 index 000000000..9e1311461 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/.gitignore @@ -0,0 +1,2 @@ +example/example +example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 000000000..ff177f612 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md new file mode 100644 index 000000000..fceda7518 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/Readme.md @@ -0,0 +1,30 @@ +# Speakeasy + +This package provides cross-platform Go (#golang) helpers for taking user input +from the terminal while not echoing the input back (similar to `getpasswd`). The +package uses syscalls to avoid any dependence on cgo, and is therefore +compatible with cross-compiling. + +[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] + +## Unicode + +Multi-byte unicode characters work successfully on Mac OS X. On Windows, +however, this may be problematic (as is UTF in general on Windows). Other +platforms have not been tested. + +## License + +The code herein was not written by me, but was compiled from two separate open +source packages. Unix portions were imported from [gopass][gopass], while +Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s +[Windows terminal helpers][cf-ui-windows]. + +The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly +from the source (though I attempted to fill in the correct owner in the +boilerplate copyright notice). + +[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" +[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" +[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" +[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/example/main.go b/vendor/github.com/bgentry/speakeasy/example/main.go new file mode 100644 index 000000000..d71153a16 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/example/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + "os" + + "github.com/bgentry/speakeasy" +) + +func main() { + password, err := speakeasy.Ask("Please enter a password: ") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Printf("Password result: %q\n", password) + fmt.Printf("Password len: %d\n", len(password)) +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 000000000..d16233a1a --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,47 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// Same as the Ask function, except it is possible to specify the file to write +// the prompt to. +func FAsk(file *os.File, prompt string) (password string, err error) { + if prompt != "" { + fmt.Fprint(file, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + fmt.Fprintln(file, "") + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 000000000..dca74bd99 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 000000000..fc4177c44 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package speakeasy + +import ( + "os" + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + hStdin := syscall.Handle(os.Stdin.Fd()) + var oldMode uint32 + + err = syscall.GetConsoleMode(hStdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(hStdin, newMode) + defer setConsoleMode(hStdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go new file mode 100644 index 000000000..caa7e0a3b --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go @@ -0,0 +1,193 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go codec/encoding library for +binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package understands the 'unsafe' tag, to allow using unsafe semantics: + + - When decoding into a struct, you need to read the field name as a string + so you can find the struct field it is mapped to. + Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. + +To install using unsafe, pass the 'unsafe' tag: + + go get -tags=unsafe github.com/ugorji/go/codec + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosynchracies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +*/ +package codec + +// Benefits of go-codec: +// +// - encoding/json always reads whole file into memory first. +// This makes it unsuitable for parsing very large files. +// - encoding/xml cannot parse into a map[string]interface{} +// I found this out on reading https://github.com/clbanning/mxj + +// TODO: +// +// - (En|De)coder should store an error when it occurs. +// Until reset, subsequent calls return that error that was stored. +// This means that free panics must go away. +// All errors must be raised through errorf method. +// - Decoding using a chan is good, but incurs concurrency costs. +// This is because there's no fast way to use a channel without it +// having to switch goroutines constantly. +// Callback pattern is still the best. Maybe cnsider supporting something like: +// type X struct { +// Name string +// Ys []Y +// Ys chan <- Y +// Ys func(interface{}) -> call this interface for each entry in there. +// } +// - Consider adding a isZeroer interface { isZero() bool } +// It is used within isEmpty, for omitEmpty support. +// - Consider making Handle used AS-IS within the encoding/decoding session. +// This means that we don't cache Handle information within the (En|De)coder, +// except we really need it at Reset(...) +// - Handle recursive types during encoding/decoding? diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md new file mode 100644 index 000000000..a790a52bb --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md @@ -0,0 +1,148 @@ +# Codec + +High Performance, Feature-Rich Idiomatic Go codec/encoding library for +binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package understands the `unsafe` tag, to allow using unsafe semantics: + + - When decoding into a struct, you need to read the field name as a string + so you can find the struct field it is mapped to. + Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion. + +To use it, you must pass the `unsafe` tag during install: + +``` +go install -tags=unsafe github.com/ugorji/go/codec +``` + +Online documentation: http://godoc.org/github.com/ugorji/go/codec +Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosynchracies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go new file mode 100644 index 000000000..c884d14dc --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go @@ -0,0 +1,918 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + e *Encoder + w encWriter + m map[string]uint16 // symbols + b [scratchByteArrayLen]byte + s uint16 // symbols sequencer + encNoSeparator +} + +func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { + if rt == timeTypId { + var bs []byte + switch x := v.(type) { + case time.Time: + bs = encodeTime(x) + case *time.Time: + bs = encodeTime(*x) + default: + e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) + } + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) EncodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) EncodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:8], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:8]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) EncodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd|0x0, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.encIntegerPrune(bd, pos, v, 4) + } else { + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) EncodeArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + if l == 0 { + e.encBytesLen(c_UTF8, 0) + return + } else if l == 1 { + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + } else { + e.s++ + ui = e.s + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + // lenprec = 0 + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { + e.w.writen1(bd | 0x03) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + +type bincDecDriver struct { + d *Decoder + h *BincHandle + r decReader + br bool // bytes reader + bdRead bool + bd byte + vd byte + vs byte + noStreamingCodec + decNoSeparator + b [scratchByteArrayLen]byte + + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol +} + +func (d *bincDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriver) ContainerType() (vt valueType) { + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { + if !d.bdRead { + d.readNextBd() + } + if rt == timeTypId { + if d.vd != bincVdTimestamp { + d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + return + } + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) + return + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + } else { + d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + return + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 3: + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:8]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:8])) + case 7: + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) + default: + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return + } + return +} + +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + //i = 0 + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) + return + } + } else { + d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return + } + return +} + +func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("binc: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("binc: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + d.bdRead = false + if vs == bincSpNan { + return math.NaN() + } else if vs == bincSpPosInf { + return math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + return + } else if vs == bincSpNegInf { + return math.Inf(-1) + } else { + d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + return + } + } else if vd == bincVdFloat { + f = d.decFloat() + } else { + f = float64(d.DecodeInt(64)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("binc: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { + // b = false + } else if bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadMapStart() (length int) { + if d.vd != bincVdMap { + d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadArrayStart() (length int) { + if d.vd != bincVdArray { + d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen int = -1 + // var ok bool + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(slen) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, bs) + } + if withString { + s = string(bs2) + } + case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readx(2))) + } + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) + } + + if vs&0x4 == 0 { + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readx(2))) + case 2: + slen = int(bigen.Uint32(d.r.readx(4))) + case 3: + slen = int(bigen.Uint64(d.r.readx(8))) + } + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) + } + default: + d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accomodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if isstring { + bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) + return + } + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + var clen int + if d.vd == bincVdString || d.vd == bincVdByteArray { + clen = d.decLen() + } else { + d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + return + } + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, false, true) + } else { + d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) // int8(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) + default: + d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloat() + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.DecodeString() + case bincVdString: + n.v = valueTypeString + n.s = d.DecodeString() + case bincVdByteArray: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case bincVdTimestamp: + n.v = valueTypeTimestamp + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle + binaryEncodingType +} + +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *bincEncDriver) reset() { + e.w = e.e.w +} + +func (d *bincDecDriver) reset() { + d.r = d.d.r +} + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go new file mode 100644 index 000000000..0e5d32b2e --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go @@ -0,0 +1,584 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString = 0x7f + cborBdIndefiniteArray = 0x9f + cborBdIndefiniteMap = 0xbf + cborBdBreak = 0xff +) + +const ( + CborStreamBytes byte = 0x5f + CborStreamString = 0x7f + CborStreamArray = 0x9f + CborStreamMap = 0xbf + CborStreamBreak = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt = 0x20 + cborBaseBytes = 0x40 + cborBaseString = 0x60 + cborBaseArray = 0x80 + cborBaseMap = 0xa0 + cborBaseTag = 0xc0 + cborBaseSimple = 0xe0 +) + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encNoSeparator + e *Encoder + w encWriter + h *CborHandle + x [8]byte +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + if re.Data != nil { + en.encode(re.Data) + } else if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *cborEncDriver) EncodeArrayStart(length int) { + e.encLen(cborBaseArray, length) +} + +func (e *cborEncDriver) EncodeMapStart(length int) { + e.encLen(cborBaseMap, length) +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encLen(cborBaseString, len(v)) + e.w.writestr(v) +} + +func (e *cborEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if c == c_RAW { + e.encLen(cborBaseBytes, len(v)) + } else { + e.encLen(cborBaseString, len(v)) + } + e.w.writeb(v) +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r decReader + b [scratchByteArrayLen]byte + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + decNoSeparator +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("decUint: Invalid descriptor: %v", d.bd) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) + return + } + return +} + +func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + var overflow bool + if neg { + if i, overflow = chkOvf.SignedInt(ui + 1); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) + return + } + i = -i + } else { + if i, overflow = chkOvf.SignedInt(ui); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui) + return + } + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("cbor: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("cbor: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) + return + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("cbor: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + if bs == nil { + return d.decAppendIndefiniteBytes(nil) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case cborBdIndefiniteBytes: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +// +// To encode with indefinite lengths (streaming), users will use +// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. +// +// For example, to encode "one-byte" as an indefinite length string: +// var buf bytes.Buffer +// e := NewEncoder(&buf, new(CborHandle)) +// buf.WriteByte(CborStreamString) +// e.MustEncode("one-") +// e.MustEncode("byte") +// buf.WriteByte(CborStreamBreak) +// encodedBytes := buf.Bytes() +// var vv interface{} +// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) +// // Now, vv contains the same string "one-byte" +// +type CborHandle struct { + binaryEncodingType + BasicHandle +} + +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r = d.d.r +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go new file mode 100644 index 000000000..205dffa7d --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go @@ -0,0 +1,205 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "bytes" + "encoding/hex" + "math" + "os" + "regexp" + "strings" + "testing" +) + +func TestCborIndefiniteLength(t *testing.T) { + oldMapType := testCborH.MapType + defer func() { + testCborH.MapType = oldMapType + }() + testCborH.MapType = testMapStrIntfTyp + // var ( + // M1 map[string][]byte + // M2 map[uint64]bool + // L1 []interface{} + // S1 []string + // B1 []byte + // ) + var v, vv interface{} + // define it (v), encode it using indefinite lengths, decode it (vv), compare v to vv + v = map[string]interface{}{ + "one-byte-key": []byte{1, 2, 3, 4, 5, 6}, + "two-string-key": "two-value", + "three-list-key": []interface{}{true, false, uint64(1), int64(-1)}, + } + var buf bytes.Buffer + // buf.Reset() + e := NewEncoder(&buf, testCborH) + buf.WriteByte(cborBdIndefiniteMap) + //---- + buf.WriteByte(cborBdIndefiniteString) + e.MustEncode("one-") + e.MustEncode("byte-") + e.MustEncode("key") + buf.WriteByte(cborBdBreak) + + buf.WriteByte(cborBdIndefiniteBytes) + e.MustEncode([]byte{1, 2, 3}) + e.MustEncode([]byte{4, 5, 6}) + buf.WriteByte(cborBdBreak) + + //---- + buf.WriteByte(cborBdIndefiniteString) + e.MustEncode("two-") + e.MustEncode("string-") + e.MustEncode("key") + buf.WriteByte(cborBdBreak) + + buf.WriteByte(cborBdIndefiniteString) + e.MustEncode([]byte("two-")) // encode as bytes, to check robustness of code + e.MustEncode([]byte("value")) + buf.WriteByte(cborBdBreak) + + //---- + buf.WriteByte(cborBdIndefiniteString) + e.MustEncode("three-") + e.MustEncode("list-") + e.MustEncode("key") + buf.WriteByte(cborBdBreak) + + buf.WriteByte(cborBdIndefiniteArray) + e.MustEncode(true) + e.MustEncode(false) + e.MustEncode(uint64(1)) + e.MustEncode(int64(-1)) + buf.WriteByte(cborBdBreak) + + buf.WriteByte(cborBdBreak) // close map + + NewDecoderBytes(buf.Bytes(), testCborH).MustDecode(&vv) + if err := deepEqual(v, vv); err != nil { + logT(t, "-------- Before and After marshal do not match: Error: %v", err) + logT(t, " ....... GOLDEN: (%T) %#v", v, v) + logT(t, " ....... DECODED: (%T) %#v", vv, vv) + failT(t) + } +} + +type testCborGolden struct { + Base64 string `codec:"cbor"` + Hex string `codec:"hex"` + Roundtrip bool `codec:"roundtrip"` + Decoded interface{} `codec:"decoded"` + Diagnostic string `codec:"diagnostic"` + Skip bool `codec:"skip"` +} + +// Some tests are skipped because they include numbers outside the range of int64/uint64 +func doTestCborGoldens(t *testing.T) { + oldMapType := testCborH.MapType + defer func() { + testCborH.MapType = oldMapType + }() + testCborH.MapType = testMapStrIntfTyp + // decode test-cbor-goldens.json into a list of []*testCborGolden + // for each one, + // - decode hex into []byte bs + // - decode bs into interface{} v + // - compare both using deepequal + // - for any miss, record it + var gs []*testCborGolden + f, err := os.Open("test-cbor-goldens.json") + if err != nil { + logT(t, "error opening test-cbor-goldens.json: %v", err) + failT(t) + } + defer f.Close() + jh := new(JsonHandle) + jh.MapType = testMapStrIntfTyp + // d := NewDecoder(f, jh) + d := NewDecoder(bufio.NewReader(f), jh) + // err = d.Decode(&gs) + d.MustDecode(&gs) + if err != nil { + logT(t, "error json decoding test-cbor-goldens.json: %v", err) + failT(t) + } + + tagregex := regexp.MustCompile(`[\d]+\(.+?\)`) + hexregex := regexp.MustCompile(`h'([0-9a-fA-F]*)'`) + for i, g := range gs { + // fmt.Printf("%v, skip: %v, isTag: %v, %s\n", i, g.Skip, tagregex.MatchString(g.Diagnostic), g.Diagnostic) + // skip tags or simple or those with prefix, as we can't verify them. + if g.Skip || strings.HasPrefix(g.Diagnostic, "simple(") || tagregex.MatchString(g.Diagnostic) { + // fmt.Printf("%v: skipped\n", i) + logT(t, "[%v] skipping because skip=true OR unsupported simple value or Tag Value", i) + continue + } + // println("++++++++++++", i, "g.Diagnostic", g.Diagnostic) + if hexregex.MatchString(g.Diagnostic) { + // println(i, "g.Diagnostic matched hex") + if s2 := g.Diagnostic[2 : len(g.Diagnostic)-1]; s2 == "" { + g.Decoded = zeroByteSlice + } else if bs2, err2 := hex.DecodeString(s2); err2 == nil { + g.Decoded = bs2 + } + // fmt.Printf("%v: hex: %v\n", i, g.Decoded) + } + bs, err := hex.DecodeString(g.Hex) + if err != nil { + logT(t, "[%v] error hex decoding %s [%v]: %v", i, g.Hex, err) + failT(t) + } + var v interface{} + NewDecoderBytes(bs, testCborH).MustDecode(&v) + if _, ok := v.(RawExt); ok { + continue + } + // check the diagnostics to compare + switch g.Diagnostic { + case "Infinity": + b := math.IsInf(v.(float64), 1) + testCborError(t, i, math.Inf(1), v, nil, &b) + case "-Infinity": + b := math.IsInf(v.(float64), -1) + testCborError(t, i, math.Inf(-1), v, nil, &b) + case "NaN": + // println(i, "checking NaN") + b := math.IsNaN(v.(float64)) + testCborError(t, i, math.NaN(), v, nil, &b) + case "undefined": + b := v == nil + testCborError(t, i, nil, v, nil, &b) + default: + v0 := g.Decoded + // testCborCoerceJsonNumber(reflect.ValueOf(&v0)) + testCborError(t, i, v0, v, deepEqual(v0, v), nil) + } + } +} + +func testCborError(t *testing.T, i int, v0, v1 interface{}, err error, equal *bool) { + if err == nil && equal == nil { + // fmt.Printf("%v testCborError passed (err and equal nil)\n", i) + return + } + if err != nil { + logT(t, "[%v] deepEqual error: %v", i, err) + logT(t, " ....... GOLDEN: (%T) %#v", v0, v0) + logT(t, " ....... DECODED: (%T) %#v", v1, v1) + failT(t) + } + if equal != nil && !*equal { + logT(t, "[%v] values not equal", i) + logT(t, " ....... GOLDEN: (%T) %#v", v0, v0) + logT(t, " ....... DECODED: (%T) %#v", v1, v1) + failT(t) + } + // fmt.Printf("%v testCborError passed (checks passed)\n", i) +} + +func TestCborGoldens(t *testing.T) { + doTestCborGoldens(t) +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go new file mode 100644 index 000000000..ab14e2d01 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go @@ -0,0 +1,1185 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Test works by using a slice of interfaces. +// It can test for encoding/decoding into/from a nil interface{} +// or passing the object to encode/decode into. +// +// There are basically 2 main tests here. +// First test internally encodes and decodes things and verifies that +// the artifact was as expected. +// Second test will use python msgpack to create a bunch of golden files, +// read those files, and compare them to what it should be. It then +// writes those files back out and compares the byte streams. +// +// Taken together, the tests are pretty extensive. +// +// The following manual tests must be done: +// - TestCodecUnderlyingType +// - Set fastpathEnabled to false and run tests (to ensure that regular reflection works). +// We don't want to use a variable there so that code is ellided. + +import ( + "bytes" + "encoding/gob" + "flag" + "fmt" + "io/ioutil" + "math" + "math/rand" + "net" + "net/rpc" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strconv" + "sync/atomic" + "testing" + "time" +) + +func init() { + testInitFlags() + testPreInitFns = append(testPreInitFns, testInit) +} + +type testVerifyArg int + +const ( + testVerifyMapTypeSame testVerifyArg = iota + testVerifyMapTypeStrIntf + testVerifyMapTypeIntfIntf + // testVerifySliceIntf + testVerifyForPython +) + +const testSkipRPCTests = false + +var ( + testVerbose bool + testInitDebug bool + testUseIoEncDec bool + testStructToArray bool + testCanonical bool + testUseReset bool + testWriteNoSymbols bool + testSkipIntf bool + testInternStr bool + testUseMust bool + + skipVerifyVal interface{} = &(struct{}{}) + + testMapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8 + timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc).UTC() + timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc).UTC() + timeToCompare3 = time.Unix(0, 270).UTC() // use value that must be encoded as uint64 for nanoseconds (for cbor/msgpack comparison) + //timeToCompare4 = time.Time{}.UTC() // does not work well with simple cbor time encoding (overflow) + timeToCompare4 = time.Unix(-2013855848, 4223).UTC() + + table []interface{} // main items we encode + tableVerify []interface{} // we verify encoded things against this after decode + tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different) + tablePythonVerify []interface{} // for verifying for python, since Python sometimes + // will encode a float32 as float64, or large int as uint + testRpcInt = new(TestRpcInt) +) + +func testInitFlags() { + // delete(testDecOpts.ExtFuncs, timeTyp) + flag.BoolVar(&testVerbose, "tv", false, "Test Verbose") + flag.BoolVar(&testInitDebug, "tg", false, "Test Init Debug") + flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal") + flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option") + flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option") + flag.BoolVar(&testCanonical, "tc", false, "Set Canonical option") + flag.BoolVar(&testInternStr, "te", false, "Set InternStr option") + flag.BoolVar(&testSkipIntf, "tf", false, "Skip Interfaces") + flag.BoolVar(&testUseReset, "tr", false, "Use Reset") + flag.BoolVar(&testUseMust, "tm", true, "Use Must(En|De)code") +} + +func testByteBuf(in []byte) *bytes.Buffer { + return bytes.NewBuffer(in) +} + +type TestABC struct { + A, B, C string +} + +type TestRpcInt struct { + i int +} + +func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil } +func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil } +func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil } +func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error { + *res = fmt.Sprintf("%#v", arg) + return nil +} +func (r *TestRpcInt) Echo123(args []string, res *string) error { + *res = fmt.Sprintf("%#v", args) + return nil +} + +type testUnixNanoTimeExt struct { + // keep timestamp here, so that do not incur interface-conversion costs + ts int64 +} + +// func (x *testUnixNanoTimeExt) WriteExt(interface{}) []byte { panic("unsupported") } +// func (x *testUnixNanoTimeExt) ReadExt(interface{}, []byte) { panic("unsupported") } +func (x *testUnixNanoTimeExt) ConvertExt(v interface{}) interface{} { + switch v2 := v.(type) { + case time.Time: + x.ts = v2.UTC().UnixNano() + case *time.Time: + x.ts = v2.UTC().UnixNano() + default: + panic(fmt.Sprintf("unsupported format for time conversion: expecting time.Time; got %T", v)) + } + return &x.ts +} +func (x *testUnixNanoTimeExt) UpdateExt(dest interface{}, v interface{}) { + // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v\n", v) + tt := dest.(*time.Time) + switch v2 := v.(type) { + case int64: + *tt = time.Unix(0, v2).UTC() + case *int64: + *tt = time.Unix(0, *v2).UTC() + case uint64: + *tt = time.Unix(0, int64(v2)).UTC() + case *uint64: + *tt = time.Unix(0, int64(*v2)).UTC() + //case float64: + //case string: + default: + panic(fmt.Sprintf("unsupported format for time conversion: expecting int64/uint64; got %T", v)) + } + // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v, tt: %#v\n", v, tt) +} + +func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) { + //for python msgpack, + // - all positive integers are unsigned 64-bit ints + // - all floats are float64 + switch iv := v.(type) { + case int8: + if iv >= 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int16: + if iv >= 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int32: + if iv >= 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int64: + if iv >= 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case uint8: + v2 = uint64(iv) + case uint16: + v2 = uint64(iv) + case uint32: + v2 = uint64(iv) + case uint64: + v2 = uint64(iv) + case float32: + v2 = float64(iv) + case float64: + v2 = float64(iv) + case []interface{}: + m2 := make([]interface{}, len(iv)) + for j, vj := range iv { + m2[j] = testVerifyVal(vj, arg) + } + v2 = m2 + case map[string]bool: + switch arg { + case testVerifyMapTypeSame: + m2 := make(map[string]bool) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + case testVerifyMapTypeStrIntf, testVerifyForPython: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + case testVerifyMapTypeIntfIntf: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + } + case map[string]interface{}: + switch arg { + case testVerifyMapTypeSame: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + case testVerifyMapTypeStrIntf, testVerifyForPython: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + case testVerifyMapTypeIntfIntf: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + } + case map[interface{}]interface{}: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg) + } + v2 = m2 + case time.Time: + switch arg { + case testVerifyForPython: + if iv2 := iv.UnixNano(); iv2 >= 0 { + v2 = uint64(iv2) + } else { + v2 = int64(iv2) + } + default: + v2 = v + } + default: + v2 = v + } + return +} + +func testInit() { + gob.Register(new(TestStruc)) + if testInitDebug { + ts0 := newTestStruc(2, false, !testSkipIntf, false) + fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0) + } + + for _, v := range testHandles { + bh := v.getBasicHandle() + bh.InternString = testInternStr + bh.Canonical = testCanonical + bh.StructToArray = testStructToArray + // mostly doing this for binc + if testWriteNoSymbols { + bh.AsSymbols = AsSymbolNone + } else { + bh.AsSymbols = AsSymbolAll + } + } + + testMsgpackH.RawToString = true + + // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt) + // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt) + timeEncExt := func(rv reflect.Value) (bs []byte, err error) { + defer panicToErr(&err) + bs = timeExt{}.WriteExt(rv.Interface()) + return + } + timeDecExt := func(rv reflect.Value, bs []byte) (err error) { + defer panicToErr(&err) + timeExt{}.ReadExt(rv.Interface(), bs) + return + } + + // add extensions for msgpack, simple for time.Time, so we can encode/decode same way. + // use different flavors of XXXExt calls, including deprecated ones. + testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) + testMsgpackH.SetBytesExt(timeTyp, 1, timeExt{}) + testCborH.SetInterfaceExt(timeTyp, 1, &testUnixNanoTimeExt{}) + testJsonH.SetInterfaceExt(timeTyp, 1, &testUnixNanoTimeExt{}) + + primitives := []interface{}{ + int8(-8), + int16(-1616), + int32(-32323232), + int64(-6464646464646464), + uint8(192), + uint16(1616), + uint32(32323232), + uint64(6464646464646464), + byte(192), + float32(-3232.0), + float64(-6464646464.0), + float32(3232.0), + float64(6464646464.0), + false, + true, + nil, + "someday", + "", + "bytestring", + timeToCompare1, + timeToCompare2, + timeToCompare3, + timeToCompare4, + } + mapsAndStrucs := []interface{}{ + map[string]bool{ + "true": true, + "false": false, + }, + map[string]interface{}{ + "true": "True", + "false": false, + "uint16(1616)": uint16(1616), + }, + //add a complex combo map in here. (map has list which has map) + //note that after the first thing, everything else should be generic. + map[string]interface{}{ + "list": []interface{}{ + int16(1616), + int32(32323232), + true, + float32(-3232.0), + map[string]interface{}{ + "TRUE": true, + "FALSE": false, + }, + []interface{}{true, false}, + }, + "int32": int32(32323232), + "bool": true, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890", + }, + map[interface{}]interface{}{ + true: "true", + uint8(138): false, + "false": uint8(200), + }, + newTestStruc(0, false, !testSkipIntf, false), + } + + table = []interface{}{} + table = append(table, primitives...) //0-19 are primitives + table = append(table, primitives) //20 is a list of primitives + table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct + + tableVerify = make([]interface{}, len(table)) + tableTestNilVerify = make([]interface{}, len(table)) + tablePythonVerify = make([]interface{}, len(table)) + + lp := len(primitives) + av := tableVerify + for i, v := range table { + if i == lp+3 { + av[i] = skipVerifyVal + continue + } + //av[i] = testVerifyVal(v, testVerifyMapTypeSame) + switch v.(type) { + case []interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + case map[string]interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + case map[interface{}]interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + default: + av[i] = v + } + } + + av = tableTestNilVerify + for i, v := range table { + if i > lp+3 { + av[i] = skipVerifyVal + continue + } + av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf) + } + + av = tablePythonVerify + for i, v := range table { + if i > lp+3 { + av[i] = skipVerifyVal + continue + } + av[i] = testVerifyVal(v, testVerifyForPython) + } + + tablePythonVerify = tablePythonVerify[:24] +} + +func testUnmarshal(v interface{}, data []byte, h Handle) (err error) { + return testCodecDecode(data, v, h) +} + +func testMarshal(v interface{}, h Handle) (bs []byte, err error) { + return testCodecEncode(v, nil, testByteBuf, h) +} + +func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) { + if bs, err = testMarshal(v, h); err != nil { + logT(t, "Error encoding %s: %v, Err: %v", name, v, err) + t.FailNow() + } + return +} + +func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) { + if err = testUnmarshal(v, data, h); err != nil { + logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err) + t.FailNow() + } + return +} + +// doTestCodecTableOne allows us test for different variations based on arguments passed. +func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, + vs []interface{}, vsVerify []interface{}) { + //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work. + //Current setup allows us test (at least manually) the nil interface or typed interface. + logT(t, "================ TestNil: %v ================\n", testNil) + for i, v0 := range vs { + logT(t, "..............................................") + logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0) + b0, err := testMarshalErr(v0, h, t, "v0") + if err != nil { + continue + } + if h.isBinary() { + logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0) + } else { + logT(t, " Encoded string: len: %v, %v\n", len(string(b0)), string(b0)) + // println("########### encoded string: " + string(b0)) + } + var v1 interface{} + + if testNil { + err = testUnmarshal(&v1, b0, h) + } else { + if v0 != nil { + v0rt := reflect.TypeOf(v0) // ptr + rv1 := reflect.New(v0rt) + err = testUnmarshal(rv1.Interface(), b0, h) + v1 = rv1.Elem().Interface() + // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() + } + } + + logT(t, " v1 returned: %T, %#v", v1, v1) + // if v1 != nil { + // logT(t, " v1 returned: %T, %#v", v1, v1) + // //we always indirect, because ptr to typed value may be passed (if not testNil) + // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() + // } + if err != nil { + logT(t, "-------- Error: %v. Partial return: %v", err, v1) + failT(t) + continue + } + v0check := vsVerify[i] + if v0check == skipVerifyVal { + logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1) + continue + } + + if err = deepEqual(v0check, v1); err == nil { + logT(t, "++++++++ Before and After marshal matched\n") + } else { + // logT(t, "-------- Before and After marshal do not match: Error: %v"+ + // " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1) + logT(t, "-------- Before and After marshal do not match: Error: %v", err) + logT(t, " ....... GOLDEN: (%T) %#v", v0check, v0check) + logT(t, " ....... DECODED: (%T) %#v", v1, v1) + failT(t) + } + } +} + +func testCodecTableOne(t *testing.T, h Handle) { + testOnce.Do(testInitAll) + // func TestMsgpackAllExperimental(t *testing.T) { + // dopts := testDecOpts(nil, nil, false, true, true), + + idxTime, numPrim, numMap := 19, 23, 4 + //println("#################") + switch v := h.(type) { + case *MsgpackHandle: + var oldWriteExt, oldRawToString bool + oldWriteExt, v.WriteExt = v.WriteExt, true + oldRawToString, v.RawToString = v.RawToString, true + doTestCodecTableOne(t, false, h, table, tableVerify) + v.WriteExt, v.RawToString = oldWriteExt, oldRawToString + case *JsonHandle: + //skip []interface{} containing time.Time, as it encodes as a number, but cannot decode back to time.Time. + //As there is no real support for extension tags in json, this must be skipped. + doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) + doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) + default: + doTestCodecTableOne(t, false, h, table, tableVerify) + } + // func TestMsgpackAll(t *testing.T) { + + // //skip []interface{} containing time.Time + // doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) + // doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) + // func TestMsgpackNilStringMap(t *testing.T) { + var oldMapType reflect.Type + v := h.getBasicHandle() + + oldMapType, v.MapType = v.MapType, testMapStrIntfTyp + + //skip time.Time, []interface{} containing time.Time, last map, and newStruc + doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime]) + doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap]) + + v.MapType = oldMapType + + // func TestMsgpackNilIntf(t *testing.T) { + + //do newTestStruc and last element of map + doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:]) + //TODO? What is this one? + //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) +} + +func testCodecMiscOne(t *testing.T, h Handle) { + testOnce.Do(testInitAll) + b, err := testMarshalErr(32, h, t, "32") + // Cannot do this nil one, because faster type assertion decoding will panic + // var i *int32 + // if err = testUnmarshal(b, i, nil); err == nil { + // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr") + // t.FailNow() + // } + var i2 int32 = 0 + err = testUnmarshalErr(&i2, b, h, t, "int32-ptr") + if i2 != int32(32) { + logT(t, "------- didn't unmarshal to 32: Received: %d", i2) + t.FailNow() + } + + // func TestMsgpackDecodePtr(t *testing.T) { + ts := newTestStruc(0, false, !testSkipIntf, false) + b, err = testMarshalErr(ts, h, t, "pointer-to-struct") + if len(b) < 40 { + logT(t, "------- Size must be > 40. Size: %d", len(b)) + t.FailNow() + } + if h.isBinary() { + logT(t, "------- b: %v", b) + } else { + logT(t, "------- b: %s", b) + } + ts2 := new(TestStruc) + err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct") + if ts2.I64 != math.MaxInt64*2/3 { + logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64) + t.FailNow() + } + + // func TestMsgpackIntfDecode(t *testing.T) { + m := map[string]int{"A": 2, "B": 3} + p := []interface{}{m} + bs, err := testMarshalErr(p, h, t, "p") + + m2 := map[string]int{} + p2 := []interface{}{m2} + err = testUnmarshalErr(&p2, bs, h, t, "&p2") + + if m2["A"] != 2 || m2["B"] != 3 { + logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2) + t.FailNow() + } + // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2) + checkEqualT(t, p, p2, "p=p2") + checkEqualT(t, m, m2, "m=m2") + if err = deepEqual(p, p2); err == nil { + logT(t, "p and p2 match") + } else { + logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2) + t.FailNow() + } + if err = deepEqual(m, m2); err == nil { + logT(t, "m and m2 match") + } else { + logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2) + t.FailNow() + } + + // func TestMsgpackDecodeStructSubset(t *testing.T) { + // test that we can decode a subset of the stream + mm := map[string]interface{}{"A": 5, "B": 99, "C": 333} + bs, err = testMarshalErr(mm, h, t, "mm") + type ttt struct { + A uint8 + C int32 + } + var t2 ttt + testUnmarshalErr(&t2, bs, h, t, "t2") + t3 := ttt{5, 333} + checkEqualT(t, t2, t3, "t2=t3") + + // println(">>>>>") + // test simple arrays, non-addressable arrays, slices + type tarr struct { + A int64 + B [3]int64 + C []byte + D [3]byte + } + var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}} + // test both pointer and non-pointer (value) + for _, tarr1 := range []interface{}{tarr0, &tarr0} { + bs, err = testMarshalErr(tarr1, h, t, "tarr1") + if err != nil { + logT(t, "Error marshalling: %v", err) + t.FailNow() + } + if _, ok := h.(*JsonHandle); ok { + logT(t, "Marshal as: %s", bs) + } + var tarr2 tarr + testUnmarshalErr(&tarr2, bs, h, t, "tarr2") + checkEqualT(t, tarr0, tarr2, "tarr0=tarr2") + // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2) + } + + // test byte array, even if empty (msgpack only) + if h == testMsgpackH { + type ystruct struct { + Anarray []byte + } + var ya = ystruct{} + testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya") + } +} + +func testCodecEmbeddedPointer(t *testing.T, h Handle) { + testOnce.Do(testInitAll) + type Z int + type A struct { + AnInt int + } + type B struct { + *Z + *A + MoreInt int + } + var z Z = 4 + x1 := &B{&z, &A{5}, 6} + bs, err := testMarshalErr(x1, h, t, "x1") + // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes()) + var x2 = new(B) + err = testUnmarshalErr(x2, bs, h, t, "x2") + err = checkEqualT(t, x1, x2, "x1=x2") + _ = err +} + +func testCodecUnderlyingType(t *testing.T, h Handle) { + testOnce.Do(testInitAll) + // Manual Test. + // Run by hand, with accompanying print statements in fast-path.go + // to ensure that the fast functions are called. + type T1 map[string]string + v := T1{"1": "1s", "2": "2s"} + var bs []byte + var err error + NewEncoderBytes(&bs, h).MustEncode(v) + if err != nil { + logT(t, "Error during encode: %v", err) + failT(t) + } + var v2 T1 + NewDecoderBytes(bs, h).MustDecode(&v2) + if err != nil { + logT(t, "Error during decode: %v", err) + failT(t) + } +} + +func testCodecChan(t *testing.T, h Handle) { + // - send a slice []*int64 (sl1) into an chan (ch1) with cap > len(s1) + // - encode ch1 as a stream array + // - decode a chan (ch2), with cap > len(s1) from the stream array + // - receive from ch2 into slice sl2 + // - compare sl1 and sl2 + // - do this for codecs: json, cbor (covers all types) + sl1 := make([]*int64, 4) + for i := range sl1 { + var j int64 = int64(i) + sl1[i] = &j + } + ch1 := make(chan *int64, 4) + for _, j := range sl1 { + ch1 <- j + } + var bs []byte + NewEncoderBytes(&bs, h).MustEncode(ch1) + // if !h.isBinary() { + // fmt.Printf("before: len(ch1): %v, bs: %s\n", len(ch1), bs) + // } + // var ch2 chan *int64 // this will block if json, etc. + ch2 := make(chan *int64, 8) + NewDecoderBytes(bs, h).MustDecode(&ch2) + // logT(t, "Len(ch2): %v", len(ch2)) + // fmt.Printf("after: len(ch2): %v, ch2: %v\n", len(ch2), ch2) + close(ch2) + var sl2 []*int64 + for j := range ch2 { + sl2 = append(sl2, j) + } + if err := deepEqual(sl1, sl2); err != nil { + logT(t, "Not Match: %v; len: %v, %v", err, len(sl1), len(sl2)) + failT(t) + } +} + +func testCodecRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration, +) (port int) { + testOnce.Do(testInitAll) + if testSkipRPCTests { + return + } + // rpc needs EOF, which is sent via a panic, and so must be recovered. + if !recoverPanicToErr { + logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF") + t.FailNow() + } + srv := rpc.NewServer() + srv.Register(testRpcInt) + ln, err := net.Listen("tcp", "127.0.0.1:0") + // log("listener: %v", ln.Addr()) + checkErrT(t, err) + port = (ln.Addr().(*net.TCPAddr)).Port + // var opts *DecoderOptions + // opts := testDecOpts + // opts.MapType = mapStrIntfTyp + // opts.RawToString = false + serverExitChan := make(chan bool, 1) + var serverExitFlag uint64 = 0 + serverFn := func() { + for { + conn1, err1 := ln.Accept() + // if err1 != nil { + // //fmt.Printf("accept err1: %v\n", err1) + // continue + // } + if atomic.LoadUint64(&serverExitFlag) == 1 { + serverExitChan <- true + conn1.Close() + return // exit serverFn goroutine + } + if err1 == nil { + var sc rpc.ServerCodec = rr.ServerCodec(conn1, h) + srv.ServeCodec(sc) + } + } + } + + clientFn := func(cc rpc.ClientCodec) { + cl := rpc.NewClientWithCodec(cc) + defer cl.Close() + // defer func() { println("##### client closing"); cl.Close() }() + var up, sq, mult int + var rstr string + // log("Calling client") + checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up)) + // log("Called TestRpcInt.Update") + checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5") + checkEqualT(t, up, 5, "up=5") + checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq)) + checkEqualT(t, sq, 25, "sq=25") + checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult)) + checkEqualT(t, mult, 100, "mult=100") + checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) + checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=") + checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr)) + checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=") + } + + connFn := func() (bs net.Conn) { + // log("calling f1") + bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String()) + //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2) + checkErrT(t, err2) + return + } + + exitFn := func() { + atomic.StoreUint64(&serverExitFlag, 1) + bs := connFn() + <-serverExitChan + bs.Close() + // serverExitChan <- true + } + + go serverFn() + runtime.Gosched() + //time.Sleep(100 * time.Millisecond) + if exitSleepMs == 0 { + defer ln.Close() + defer exitFn() + } + if doRequest { + bs := connFn() + cc := rr.ClientCodec(bs, h) + clientFn(cc) + } + if exitSleepMs != 0 { + go func() { + defer ln.Close() + time.Sleep(exitSleepMs) + exitFn() + }() + } + return +} + +func doTestMapEncodeForCanonical(t *testing.T, name string, h Handle) { + v1 := map[string]interface{}{ + "a": 1, + "b": "hello", + "c": map[string]interface{}{ + "c/a": 1, + "c/b": "world", + "c/c": []int{1, 2, 3, 4}, + "c/d": map[string]interface{}{ + "c/d/a": "fdisajfoidsajfopdjsaopfjdsapofda", + "c/d/b": "fdsafjdposakfodpsakfopdsakfpodsakfpodksaopfkdsopafkdopsa", + "c/d/c": "poir02 ir30qif4p03qir0pogjfpoaerfgjp ofke[padfk[ewapf kdp[afep[aw", + "c/d/d": "fdsopafkd[sa f-32qor-=4qeof -afo-erfo r-eafo 4e- o r4-qwo ag", + "c/d/e": "kfep[a sfkr0[paf[a foe-[wq ewpfao-q ro3-q ro-4qof4-qor 3-e orfkropzjbvoisdb", + "c/d/f": "", + }, + "c/e": map[int]string{ + 1: "1", + 22: "22", + 333: "333", + 4444: "4444", + 55555: "55555", + }, + "c/f": map[string]int{ + "1": 1, + "22": 22, + "333": 333, + "4444": 4444, + "55555": 55555, + }, + }, + } + var v2 map[string]interface{} + var b1, b2 []byte + + // encode v1 into b1, decode b1 into v2, encode v2 into b2, compare b1 and b2 + + bh := h.getBasicHandle() + if !bh.Canonical { + bh.Canonical = true + defer func() { bh.Canonical = false }() + } + + e1 := NewEncoderBytes(&b1, h) + e1.MustEncode(v1) + d1 := NewDecoderBytes(b1, h) + d1.MustDecode(&v2) + e2 := NewEncoderBytes(&b2, h) + e2.MustEncode(v2) + if !bytes.Equal(b1, b2) { + logT(t, "Unequal bytes: %v VS %v", b1, b2) + t.FailNow() + } +} + +// Comprehensive testing that generates data encoded from python handle (cbor, msgpack), +// and validates that our code can read and write it out accordingly. +// We keep this unexported here, and put actual test in ext_dep_test.go. +// This way, it can be excluded by excluding file completely. +func doTestPythonGenStreams(t *testing.T, name string, h Handle) { + logT(t, "TestPythonGenStreams-%v", name) + tmpdir, err := ioutil.TempDir("", "golang-"+name+"-test") + if err != nil { + logT(t, "-------- Unable to create temp directory\n") + t.FailNow() + } + defer os.RemoveAll(tmpdir) + logT(t, "tmpdir: %v", tmpdir) + cmd := exec.Command("python", "test.py", "testdata", tmpdir) + //cmd.Stdin = strings.NewReader("some input") + //cmd.Stdout = &out + var cmdout []byte + if cmdout, err = cmd.CombinedOutput(); err != nil { + logT(t, "-------- Error running test.py testdata. Err: %v", err) + logT(t, " %v", string(cmdout)) + t.FailNow() + } + + bh := h.getBasicHandle() + + oldMapType := bh.MapType + for i, v := range tablePythonVerify { + // if v == uint64(0) && h == testMsgpackH { + // v = int64(0) + // } + bh.MapType = oldMapType + //load up the golden file based on number + //decode it + //compare to in-mem object + //encode it again + //compare to output stream + logT(t, "..............................................") + logT(t, " Testing: #%d: %T, %#v\n", i, v, v) + var bss []byte + bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+"."+name+".golden")) + if err != nil { + logT(t, "-------- Error reading golden file: %d. Err: %v", i, err) + failT(t) + continue + } + bh.MapType = testMapStrIntfTyp + + var v1 interface{} + if err = testUnmarshal(&v1, bss, h); err != nil { + logT(t, "-------- Error decoding stream: %d: Err: %v", i, err) + failT(t) + continue + } + if v == skipVerifyVal { + continue + } + //no need to indirect, because we pass a nil ptr, so we already have the value + //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() } + if err = deepEqual(v, v1); err == nil { + logT(t, "++++++++ Objects match: %T, %v", v, v) + } else { + logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1) + logT(t, "-------- GOLDEN: %#v", v) + // logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) + logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) + failT(t) + } + bsb, err := testMarshal(v1, h) + if err != nil { + logT(t, "Error encoding to stream: %d: Err: %v", i, err) + failT(t) + continue + } + if err = deepEqual(bsb, bss); err == nil { + logT(t, "++++++++ Bytes match") + } else { + logT(t, "???????? Bytes do not match. %v.", err) + xs := "--------" + if reflect.ValueOf(v).Kind() == reflect.Map { + xs = " " + logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs) + } else { + logT(t, "%s It's not a map. They should match.", xs) + failT(t) + } + logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss) + logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb) + } + } + bh.MapType = oldMapType +} + +// To test MsgpackSpecRpc, we test 3 scenarios: +// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec) +// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc) +// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc) +// +// This allows us test the different calling conventions +// - Go Service requires only one argument +// - Python Service allows multiple arguments + +func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { + if testSkipRPCTests { + return + } + // openPorts are between 6700 and 6800 + r := rand.New(rand.NewSource(time.Now().UnixNano())) + openPort := strconv.FormatInt(6700+r.Int63n(99), 10) + // openPort := "6792" + cmd := exec.Command("python", "test.py", "rpc-server", openPort, "4") + checkErrT(t, cmd.Start()) + bs, err2 := net.Dial("tcp", ":"+openPort) + for i := 0; i < 10 && err2 != nil; i++ { + time.Sleep(50 * time.Millisecond) // time for python rpc server to start + bs, err2 = net.Dial("tcp", ":"+openPort) + } + checkErrT(t, err2) + cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH) + cl := rpc.NewClientWithCodec(cc) + defer cl.Close() + var rstr string + checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) + //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}") + var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"} + checkErrT(t, cl.Call("Echo123", mArgs, &rstr)) + checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=") + cmd.Process.Kill() +} + +func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { + if testSkipRPCTests { + return + } + port := testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second) + //time.Sleep(1000 * time.Millisecond) + cmd := exec.Command("python", "test.py", "rpc-client-go-service", strconv.Itoa(port)) + var cmdout []byte + var err error + if cmdout, err = cmd.CombinedOutput(); err != nil { + logT(t, "-------- Error running test.py rpc-client-go-service. Err: %v", err) + logT(t, " %v", string(cmdout)) + t.FailNow() + } + checkEqualT(t, string(cmdout), + fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=") +} + +func TestBincCodecsTable(t *testing.T) { + testCodecTableOne(t, testBincH) +} + +func TestBincCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testBincH) +} + +func TestBincCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testBincH) +} + +func TestSimpleCodecsTable(t *testing.T) { + testCodecTableOne(t, testSimpleH) +} + +func TestSimpleCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testSimpleH) +} + +func TestSimpleCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testSimpleH) +} + +func TestMsgpackCodecsTable(t *testing.T) { + testCodecTableOne(t, testMsgpackH) +} + +func TestMsgpackCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testMsgpackH) +} + +func TestMsgpackCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testMsgpackH) +} + +func TestCborCodecsTable(t *testing.T) { + testCodecTableOne(t, testCborH) +} + +func TestCborCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testCborH) +} + +func TestCborCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testCborH) +} + +func TestCborMapEncodeForCanonical(t *testing.T) { + doTestMapEncodeForCanonical(t, "cbor", testCborH) +} + +func TestJsonCodecsTable(t *testing.T) { + testCodecTableOne(t, testJsonH) +} + +func TestJsonCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testJsonH) +} + +func TestJsonCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testJsonH) +} + +func TestJsonCodecChan(t *testing.T) { + testCodecChan(t, testJsonH) +} + +func TestCborCodecChan(t *testing.T) { + testCodecChan(t, testCborH) +} + +// ----- RPC ----- + +func TestBincRpcGo(t *testing.T) { + testCodecRpcOne(t, GoRpc, testBincH, true, 0) +} + +func TestSimpleRpcGo(t *testing.T) { + testCodecRpcOne(t, GoRpc, testSimpleH, true, 0) +} + +func TestMsgpackRpcGo(t *testing.T) { + testCodecRpcOne(t, GoRpc, testMsgpackH, true, 0) +} + +func TestCborRpcGo(t *testing.T) { + testCodecRpcOne(t, GoRpc, testCborH, true, 0) +} + +func TestJsonRpcGo(t *testing.T) { + testCodecRpcOne(t, GoRpc, testJsonH, true, 0) +} + +func TestMsgpackRpcSpec(t *testing.T) { + testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0) +} + +func TestBincUnderlyingType(t *testing.T) { + testCodecUnderlyingType(t, testBincH) +} + +// TODO: +// Add Tests for: +// - decoding empty list/map in stream into a nil slice/map +// - binary(M|Unm)arsher support for time.Time (e.g. cbor encoding) +// - text(M|Unm)arshaler support for time.Time (e.g. json encoding) +// - non fast-path scenarios e.g. map[string]uint16, []customStruct. +// Expand cbor to include indefinite length stuff for this non-fast-path types. +// This may not be necessary, since we have the manual tests (fastpathEnabled=false) to test/validate with. +// - CodecSelfer +// Ensure it is called when (en|de)coding interface{} or reflect.Value (2 different codepaths). +// - interfaces: textMarshaler, binaryMarshaler, codecSelfer +// - struct tags: +// on anonymous fields, _struct (all fields), etc +// - codecgen of struct containing channels. +// +// Cleanup tests: +// - The are brittle in their handling of validation and skipping diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md new file mode 100644 index 000000000..3ae8a056f --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md @@ -0,0 +1,36 @@ +# codecgen tool + +Generate is given a list of *.go files to parse, and an output file (fout), +codecgen will create an output file __file.go__ which +contains `codec.Selfer` implementations for the named types found +in the files parsed. + +Using codecgen is very straightforward. + +**Download and install the tool** + +`go get -u github.com/ugorji/go/codec/codecgen` + +**Run the tool on your files** + +The command line format is: + +`codecgen [options] (-o outfile) (infile ...)` + +```sh +% codecgen -? +Usage of codecgen: + -c="github.com/ugorji/go/codec": codec path + -o="": out file + -r=".*": regex for type name to match + -rt="": tags for go run + -t="": build tag to put in file + -u=false: Use unsafe, e.g. to avoid unnecessary allocation on []byte->string + -x=false: keep temp file + +% codecgen -o values_codecgen.go values.go values2.go moretypedefs.go +``` + +Please see the [blog article](http://ugorji.net/blog/go-codecgen) +for more information on how to use the tool. + diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go new file mode 100644 index 000000000..f370b4c79 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go @@ -0,0 +1,273 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// codecgen generates codec.Selfer implementations for a set of types. +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "math/rand" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "text/template" + "time" +) + +const genCodecPkg = "codec1978" // keep this in sync with codec.genCodecPkg + +const genFrunMainTmpl = `//+build ignore + +package main +{{ if .Types }}import "{{ .ImportPath }}"{{ end }} +func main() { + {{ $.PackageName }}.CodecGenTempWrite{{ .RandString }}() +} +` + +// const genFrunPkgTmpl = `//+build codecgen +const genFrunPkgTmpl = ` +package {{ $.PackageName }} + +import ( + {{ if not .CodecPkgFiles }}{{ .CodecPkgName }} "{{ .CodecImportPath }}"{{ end }} + "os" + "reflect" + "bytes" + "strings" + "go/format" +) + +func CodecGenTempWrite{{ .RandString }}() { + fout, err := os.Create("{{ .OutFile }}") + if err != nil { + panic(err) + } + defer fout.Close() + var out bytes.Buffer + + var typs []reflect.Type +{{ range $index, $element := .Types }} + var t{{ $index }} {{ . }} + typs = append(typs, reflect.TypeOf(t{{ $index }})) +{{ end }} + {{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}Gen(&out, "{{ .BuildTag }}", "{{ .PackageName }}", "{{ .RandString }}", {{ .UseUnsafe }}, {{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}NewTypeInfos(strings.Split("{{ .StructTags }}", ",")), typs...) + bout, err := format.Source(out.Bytes()) + if err != nil { + fout.Write(out.Bytes()) + panic(err) + } + fout.Write(bout) +} + +` + +// Generate is given a list of *.go files to parse, and an output file (fout). +// +// It finds all types T in the files, and it creates 2 tmp files (frun). +// - main package file passed to 'go run' +// - package level file which calls *genRunner.Selfer to write Selfer impls for each T. +// We use a package level file so that it can reference unexported types in the package being worked on. +// Tool then executes: "go run __frun__" which creates fout. +// fout contains Codec(En|De)codeSelf implementations for every type T. +// +func Generate(outfile, buildTag, codecPkgPath string, uid int64, useUnsafe bool, goRunTag string, + st string, regexName *regexp.Regexp, deleteTempFile bool, infiles ...string) (err error) { + // For each file, grab AST, find each type, and write a call to it. + if len(infiles) == 0 { + return + } + if outfile == "" || codecPkgPath == "" { + err = errors.New("outfile and codec package path cannot be blank") + return + } + if uid < 0 { + uid = -uid + } + if uid == 0 { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + uid = 101 + rr.Int63n(9777) + } + // We have to parse dir for package, before opening the temp file for writing (else ImportDir fails). + // Also, ImportDir(...) must take an absolute path. + lastdir := filepath.Dir(outfile) + absdir, err := filepath.Abs(lastdir) + if err != nil { + return + } + pkg, err := build.Default.ImportDir(absdir, build.AllowBinary) + if err != nil { + return + } + type tmplT struct { + CodecPkgName string + CodecImportPath string + ImportPath string + OutFile string + PackageName string + RandString string + BuildTag string + StructTags string + Types []string + CodecPkgFiles bool + UseUnsafe bool + } + tv := tmplT{ + CodecPkgName: genCodecPkg, + OutFile: outfile, + CodecImportPath: codecPkgPath, + BuildTag: buildTag, + UseUnsafe: useUnsafe, + RandString: strconv.FormatInt(uid, 10), + StructTags: st, + } + tv.ImportPath = pkg.ImportPath + if tv.ImportPath == tv.CodecImportPath { + tv.CodecPkgFiles = true + tv.CodecPkgName = "codec" + } + astfiles := make([]*ast.File, len(infiles)) + for i, infile := range infiles { + if filepath.Dir(infile) != lastdir { + err = errors.New("in files must all be in same directory as outfile") + return + } + fset := token.NewFileSet() + astfiles[i], err = parser.ParseFile(fset, infile, nil, 0) + if err != nil { + return + } + if i == 0 { + tv.PackageName = astfiles[i].Name.Name + if tv.PackageName == "main" { + // codecgen cannot be run on types in the 'main' package. + // A temporary 'main' package must be created, and should reference the fully built + // package containing the types. + // Also, the temporary main package will conflict with the main package which already has a main method. + err = errors.New("codecgen cannot be run on types in the 'main' package") + return + } + } + } + + for _, f := range astfiles { + for _, d := range f.Decls { + if gd, ok := d.(*ast.GenDecl); ok { + for _, dd := range gd.Specs { + if td, ok := dd.(*ast.TypeSpec); ok { + // if len(td.Name.Name) == 0 || td.Name.Name[0] > 'Z' || td.Name.Name[0] < 'A' { + if len(td.Name.Name) == 0 { + continue + } + + // only generate for: + // struct: StructType + // primitives (numbers, bool, string): Ident + // map: MapType + // slice, array: ArrayType + // chan: ChanType + // do not generate: + // FuncType, InterfaceType, StarExpr (ptr), etc + switch td.Type.(type) { + case *ast.StructType, *ast.Ident, *ast.MapType, *ast.ArrayType, *ast.ChanType: + if regexName.FindStringIndex(td.Name.Name) != nil { + tv.Types = append(tv.Types, td.Name.Name) + } + } + } + } + } + } + } + + if len(tv.Types) == 0 { + return + } + + // we cannot use ioutil.TempFile, because we cannot guarantee the file suffix (.go). + // Also, we cannot create file in temp directory, + // because go run will not work (as it needs to see the types here). + // Consequently, create the temp file in the current directory, and remove when done. + + // frun, err = ioutil.TempFile("", "codecgen-") + // frunName := filepath.Join(os.TempDir(), "codecgen-"+strconv.FormatInt(time.Now().UnixNano(), 10)+".go") + + frunMainName := "codecgen-main-" + tv.RandString + ".generated.go" + frunPkgName := "codecgen-pkg-" + tv.RandString + ".generated.go" + if deleteTempFile { + defer os.Remove(frunMainName) + defer os.Remove(frunPkgName) + } + // var frunMain, frunPkg *os.File + if _, err = gen1(frunMainName, genFrunMainTmpl, &tv); err != nil { + return + } + if _, err = gen1(frunPkgName, genFrunPkgTmpl, &tv); err != nil { + return + } + + // remove outfile, so "go run ..." will not think that types in outfile already exist. + os.Remove(outfile) + + // execute go run frun + cmd := exec.Command("go", "run", "-tags="+goRunTag, frunMainName) //, frunPkg.Name()) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + if err = cmd.Run(); err != nil { + err = fmt.Errorf("error running 'go run %s': %v, console: %s", + frunMainName, err, buf.Bytes()) + return + } + os.Stdout.Write(buf.Bytes()) + return +} + +func gen1(frunName, tmplStr string, tv interface{}) (frun *os.File, err error) { + os.Remove(frunName) + if frun, err = os.Create(frunName); err != nil { + return + } + defer frun.Close() + + t := template.New("") + if t, err = t.Parse(tmplStr); err != nil { + return + } + bw := bufio.NewWriter(frun) + if err = t.Execute(bw, tv); err != nil { + return + } + if err = bw.Flush(); err != nil { + return + } + return +} + +func main() { + o := flag.String("o", "", "out file") + c := flag.String("c", genCodecPath, "codec path") + t := flag.String("t", "", "build tag to put in file") + r := flag.String("r", ".*", "regex for type name to match") + rt := flag.String("rt", "", "tags for go run") + st := flag.String("st", "codec,json", "struct tag keys to introspect") + x := flag.Bool("x", false, "keep temp file") + u := flag.Bool("u", false, "Use unsafe, e.g. to avoid unnecessary allocation on []byte->string") + d := flag.Int64("d", 0, "random identifier for use in generated code") + flag.Parse() + if err := Generate(*o, *t, *c, *d, *u, *rt, *st, + regexp.MustCompile(*r), !*x, flag.Args()...); err != nil { + fmt.Fprintf(os.Stderr, "codecgen error: %v\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go new file mode 100644 index 000000000..e120a4eb9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go @@ -0,0 +1,3 @@ +package main + +const genCodecPath = "github.com/ugorji/go/codec" diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go new file mode 100644 index 000000000..a73497e91 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go @@ -0,0 +1,24 @@ +//+build x,codecgen + +package codec + +import ( + "fmt" + "testing" +) + +func _TestCodecgenJson1(t *testing.T) { + // This is just a simplistic test for codecgen. + // It is typically disabled. We only enable it for debugging purposes. + const callCodecgenDirect bool = true + v := newTestStruc(2, false, !testSkipIntf, false) + var bs []byte + e := NewEncoderBytes(&bs, testJsonH) + if callCodecgenDirect { + v.CodecEncodeSelf(e) + e.w.atEndOfEncode() + } else { + e.MustEncode(v) + } + fmt.Printf("%s\n", bs) +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go new file mode 100644 index 000000000..b3b99f036 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go @@ -0,0 +1,2015 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +var ( + onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") + cannotDecodeIntoNilErr = errors.New("cannot decode into nil") +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + unreadn1() + + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + readn1eof() (v uint8, eof bool) + numread() int // number of bytes read + track() + stopTrack() []byte +} + +type decReaderByteScanner interface { + io.Reader + io.ByteScanner +} + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + TryDecodeAsNil() bool + // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + IsBuiltinType(rt uintptr) bool + DecodeBuiltin(rt uintptr, v interface{}) + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + DecodeInt(bitsize uint8) (i int64) + DecodeUint(bitsize uint8) (ui uint64) + DecodeFloat(chkOverflow32 bool) (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + ReadMapStart() int + ReadArrayStart() int + + reset() + uncacheRead() +} + +type decNoSeparator struct{} + +func (_ decNoSeparator) ReadEnd() {} +func (_ decNoSeparator) uncacheRead() {} + +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + + // MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with. + // If 0 or negative, we default to a sensible value based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has MAX_UINT elements. + // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // So everything will not be interned. + InternString bool +} + +// ------------------------------------ + +// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods +// of io.Reader, io.ByteScanner. +type ioDecByteScanner struct { + r io.Reader + l byte // last byte + ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread + b [1]byte // tiny buffer for reading single bytes +} + +func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { + var firstByte bool + if z.ls == 1 { + z.ls = 2 + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = 2 + } + if firstByte { + n++ + } + return +} + +func (z *ioDecByteScanner) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecByteScanner) UnreadByte() (err error) { + x := z.ls + if x == 0 { + err = errors.New("cannot unread - nothing has been read") + } else if x == 1 { + err = errors.New("cannot unread - last byte has not been read") + } else if x == 2 { + z.ls = 1 + } + return +} + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + br decReaderByteScanner + // temp byte array re-used internally for efficiency during read. + // shares buffer with Decoder, so we keep size of struct within 8 words. + x *[scratchByteArrayLen]byte + bs ioDecByteScanner + n int // num read + tr []byte // tracking bytes read + trb bool +} + +func (z *ioDecReader) numread() int { + return z.n +} + +func (z *ioDecReader) readx(n int) (bs []byte) { + if n <= 0 { + return + } + if n < len(z.x) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := io.ReadAtLeast(z.br, bs, n); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := io.ReadAtLeast(z.br, bs, len(bs)) + z.n += n + if err != nil { + panic(err) + } + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1() (b uint8) { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return b +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +func (z *ioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------ + +var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available + t int // track start +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() int { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(bytesDecReaderCannotUnreadErr) + } + z.c-- + z.a++ + return +} + +func (z *bytesDecReader) readx(n int) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + if n <= 0 { + } else if z.a == 0 { + panic(io.EOF) + } else if n > z.a { + panic(io.ErrUnexpectedEOF) + } else { + c0 := z.c + z.c = c0 + n + z.a = z.a - n + bs = z.b[c0:z.c] + } + return +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.a == 0 { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { + if z.a == 0 { + eof = true + return + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(len(bs))) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ------------------------------------ + +type decFnInfo struct { + d *Decoder + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType +} + +// ---------------------------------------- + +type decFn struct { + i decFnInfo + f func(*decFnInfo, reflect.Value) +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) +} + +func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { + if indir == -1 { + v = rv.Addr().Interface() + } else if indir == 0 { + v = rv.Interface() + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + v = rv.Interface() + } + return +} + +func (f *decFnInfo) selferUnmarshal(rv reflect.Value) { + f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d) +} + +func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) { + bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) + xbs := f.d.d.DecodeBytes(nil, false, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) textUnmarshal(rv reflect.Value) { + tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) { + tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) + // bs := f.d.d.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + f.d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.d.d.DecodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.d.d.DecodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.d.d.DecodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.d.d.DecodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUintptr(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +// var kIntfCtr uint64 + +func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + d := f.d + d.d.DecodeNaked() + n := &d.n + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + // if num := f.ti.rt.NumMethod(); num > 0 { + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp { + // } else if d.h.MapType == mapStrIntfTyp { // for json performance + // } + if d.mtid == 0 || d.mtid == mapIntfIntfTypId { + l := len(n.ms) + n.ms = append(n.ms, nil) + d.decode(&n.ms[l]) + rvn = reflect.ValueOf(&n.ms[l]).Elem() + n.ms = n.ms[:l] + } else if d.mtid == mapStrIntfTypId { // for json performance + l := len(n.ns) + n.ns = append(n.ns, nil) + d.decode(&n.ns[l]) + rvn = reflect.ValueOf(&n.ns[l]).Elem() + n.ns = n.ns[:l] + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil) + } + case valueTypeArray: + // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp { + if d.stid == 0 || d.stid == intfSliceTypId { + l := len(n.ss) + n.ss = append(n.ss, nil) + d.decode(&n.ss[l]) + rvn = reflect.ValueOf(&n.ss[l]).Elem() + n.ss = n.ss[:l] + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil) + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + l := len(n.is) + n.is = append(n.is, nil) + v2 := &n.is[l] + n.is = n.is[:l] + d.decode(v2) + v = *v2 + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + rvn = reflect.ValueOf(re) + } else { + rvnA := reflect.New(bfn.rt) + rvn = rvnA.Elem() + if bytes != nil { + bfn.ext.ReadExt(rvnA.Interface(), bytes) + } else { + bfn.ext.UpdateExt(rvnA.Interface(), v) + } + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = reflect.ValueOf(&n.i).Elem() + case valueTypeUint: + rvn = reflect.ValueOf(&n.u).Elem() + case valueTypeFloat: + rvn = reflect.ValueOf(&n.f).Elem() + case valueTypeBool: + rvn = reflect.ValueOf(&n.b).Elem() + case valueTypeString, valueTypeSymbol: + rvn = reflect.ValueOf(&n.s).Elem() + case valueTypeBytes: + rvn = reflect.ValueOf(&n.l).Elem() + case valueTypeTimestamp: + rvn = reflect.ValueOf(&n.t).Elem() + default: + panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) + } + return +} + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + var rvn reflect.Value + if rv.IsNil() { + rvn = f.kInterfaceNaked() + if rvn.IsValid() { + rv.Set(rvn) + } + } else if f.d.h.InterfaceReset { + rvn = f.kInterfaceNaked() + if rvn.IsValid() { + rv.Set(rvn) + } else { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + } else { + rvn = rv.Elem() + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we have to set the reflect.Value directly. + // if underlying type is settable (e.g. ptr or interface), + // we just decode into it. + // Else we create a settable value, decode into it, and set on the interface. + if rvn.CanSet() { + f.d.decodeValue(rvn, nil) + } else { + rvn2 := reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + f.d.decodeValue(rvn2, nil) + rv.Set(rvn2) + } + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + d := f.d + dd := d.d + cr := d.cr + ctyp := dd.ContainerType() + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + tisfi := fti.sfi + hasLen := containerLen >= 0 + if hasLen { + for j := 0; j < containerLen; j++ { + // rvkencname := dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + // rvksi := ti.getForEncName(rvkencname) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + } + } else { + for j := 0; !dd.CheckBreak(); j++ { + // rvkencname := dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + // rvksi := ti.getForEncName(rvkencname) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + return + } + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + for j, si := range fti.sfip { + if hasLen { + if j == containerLen { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + d.structFieldNotFound(j, "") + } + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + } else { + f.d.error(onlyMapOrArrayCanDecodeIntoStructErr) + return + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + d := f.d + dd := d.d + rtelem0 := ti.rt.Elem() + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, false, true) + ch := rv.Interface().(chan<- byte) + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false, false) + if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + slh.End() + return + } + + rtelem := rtelem0 + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + fn := d.getDecFn(rtelem, true, true) + + var rv0, rv9 reflect.Value + rv0 = rv + rvChanged := false + + // for j := 0; j < containerLenS; j++ { + var rvlen int + if containerLenS > 0 { // hasLen + if f.seq == seqTypeChan { + if rv.IsNil() { + rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) + rv.Set(reflect.MakeChan(ti.rt, rvlen)) + } + // handle chan specially: + for j := 0; j < containerLenS; j++ { + rv9 = reflect.New(rtelem0).Elem() + slh.ElemContainerState(j) + d.decodeValue(rv9, fn) + rv.Send(rv9) + } + } else { // slice or array + var truncated bool // says len of sequence is not same as expected number of elements + numToRead := containerLenS // if truncated, reset numToRead + + rvcap := rv.Cap() + rvlen = rv.Len() + if containerLenS > rvcap { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, containerLenS) + } else { + oldRvlenGtZero := rvlen > 0 + rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) + if truncated { + if rvlen <= rvcap { + rv.SetLen(rvlen) + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + rvcap = rvlen + } + numToRead = rvlen + } else if containerLenS != rvlen { + if f.seq == seqTypeSlice { + rv.SetLen(containerLenS) + rvlen = containerLenS + } + } + j := 0 + // we read up to the numToRead + for ; j < numToRead; j++ { + slh.ElemContainerState(j) + d.decodeValue(rv.Index(j), fn) + } + + // if slice, expand and read up to containerLenS (or EOF) iff truncated + // if array, swallow all the rest. + + if f.seq == seqTypeArray { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } else if truncated { // slice was truncated, as chan NOT in this block + for ; j < containerLenS; j++ { + rv = expandSliceValue(rv, 1) + rv9 = rv.Index(j) + if resetSliceElemToZeroValue { + rv9.Set(reflect.Zero(rtelem0)) + } + slh.ElemContainerState(j) + d.decodeValue(rv9, fn) + } + } + } + } else { + rvlen = rv.Len() + j := 0 + for ; !dd.CheckBreak(); j++ { + if f.seq == seqTypeChan { + slh.ElemContainerState(j) + rv9 = reflect.New(rtelem0).Elem() + d.decodeValue(rv9, fn) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs + rv = expandSliceValue(rv, 1) + rv9 = rv.Index(j) + // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0)) + if resetSliceElemToZeroValue { + rv9.Set(reflect.Zero(rtelem0)) + } + rvlen++ + rvChanged = true + } + } else { // slice or array + rv9 = rv.Index(j) + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { // seqTypeSlice + d.decodeValue(rv9, fn) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + rv.SetLen(j) + } else if j == 0 && rv.IsNil() { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } + } + } + slh.End() + + if rvChanged { + rv0.Set(rv) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + d := f.d + dd := d.d + containerLen := dd.ReadMapStart() + cr := d.cr + ti := f.ti + if rv.IsNil() { + rv.Set(reflect.MakeMap(ti.rt)) + } + + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + + ktype, vtype := ti.rt.Key(), ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + vtypeKind := vtype.Kind() + var keyFn, valFn *decFn + var xtyp reflect.Type + for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + } + keyFn = d.getDecFn(xtyp, true, true) + for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + } + valFn = d.getDecFn(xtyp, true, true) + var mapGet, mapSet bool + if !f.d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !f.d.h.InterfaceReset { + mapGet = true + } + } else if !isImmutableKind(vtypeKind) { + mapGet = true + } + } + + var rvk, rvv, rvz reflect.Value + + // for j := 0; j < containerLen; j++ { + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + rvk = reflect.New(ktype).Elem() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.decodeValue(rvk, keyFn) + + // special case if a byte array. + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + mapSet = true // set to false if u do a get, and its a pointer, and exists + if mapGet { + rvv = rv.MapIndex(rvk) + if rvv.IsValid() { + if vtypeKind == reflect.Ptr { + mapSet = false + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.decodeValue(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + } + } else { + for j := 0; !dd.CheckBreak(); j++ { + rvk = reflect.New(ktype).Elem() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.decodeValue(rvk, keyFn) + + // special case if a byte array. + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + mapSet = true // set to false if u do a get, and its a pointer, and exists + if mapGet { + rvv = rv.MapIndex(rvk) + if rvv.IsValid() { + if vtypeKind == reflect.Ptr { + mapSet = false + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.decodeValue(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +type decRtidFn struct { + rtid uintptr + fn decFn +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accomodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + u uint64 + i int64 + f float64 + l []byte + s string + t time.Time + b bool + v valueType + + // stacks for reducing allocation + is []interface{} + ms []map[interface{}]interface{} + ns []map[string]interface{} + ss [][]interface{} + // rs []RawExt + + // keep arrays at the bottom? Chance is that they are not used much. + ia [4]interface{} + ma [4]map[interface{}]interface{} + na [4]map[string]interface{} + sa [4][]interface{} + // ra [2]RawExt +} + +func (n *decNaked) reset() { + if n.ss != nil { + n.ss = n.ss[:0] + } + if n.is != nil { + n.is = n.is[:0] + } + if n.ms != nil { + n.ms = n.ms[:0] + } + if n.ns != nil { + n.ns = n.ns[:0] + } +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r decReader + // sa [initCollectionCap]decRtidFn + h *BasicHandle + hh Handle + + be bool // is binary encoding + bytes bool // is bytes reader + js bool // is json handle + + rb bytesDecReader + ri ioDecReader + cr containerStateRecv + + s []decRtidFn + f map[uintptr]*decFn + + // _ uintptr // for alignment purposes, so next one starts from a cache line + + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + n decNaked + b [scratchByteArrayLen]byte + is map[string]string // used for interning strings +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered reader +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +func newDecoder(h Handle) *Decoder { + d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + n := &d.n + // n.rs = n.ra[:0] + n.ms = n.ma[:0] + n.is = n.ia[:0] + n.ns = n.na[:0] + n.ss = n.sa[:0] + _, d.js = h.(*JsonHandle) + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + d.cr, _ = d.d.(containerStateRecv) + // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri}) + return d +} + +func (d *Decoder) resetCommon() { + d.n.reset() + d.d.reset() + // reset all things which were cached from the Handle, + // but could be changed. + d.mtid, d.stid = 0, 0 + if d.h.MapType != nil { + d.mtid = reflect.ValueOf(d.h.MapType).Pointer() + } + if d.h.SliceType != nil { + d.stid = reflect.ValueOf(d.h.SliceType).Pointer() + } +} + +func (d *Decoder) Reset(r io.Reader) { + d.ri.x = &d.b + // d.s = d.sa[:0] + d.ri.bs.r = r + var ok bool + d.ri.br, ok = r.(decReaderByteScanner) + if !ok { + d.ri.br = &d.ri.bs + } + d.r = &d.ri + d.resetCommon() +} + +func (d *Decoder) ResetBytes(in []byte) { + // d.s = d.sa[:0] + d.rb.reset(in) + d.r = &d.rb + d.resetCommon() +} + +// func (d *Decoder) sendContainerState(c containerState) { +// if d.cr != nil { +// d.cr.sendContainerState(c) +// } +// } + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +// this is not a smart swallow, as it allocates objects and does unnecessary work. +func (d *Decoder) swallowViaHammer() { + var blank interface{} + d.decodeValue(reflect.ValueOf(&blank).Elem(), nil) +} + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + cr := d.cr + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + clenGtEqualZero := containerLen >= 0 + for j := 0; ; j++ { + if clenGtEqualZero { + if j >= containerLen { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.swallow() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.swallow() + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + case valueTypeArray: + containerLenS := dd.ReadArrayStart() + clenGtEqualZero := containerLenS >= 0 + for j := 0; ; j++ { + if clenGtEqualZero { + if j >= containerLenS { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + d.swallow() + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + case valueTypeBytes: + dd.DecodeBytes(d.b[:], false, true) + case valueTypeString: + dd.DecodeBytes(d.b[:], true, true) + // dd.DecodeStringAsBytes(d.b[:]) + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + dd.DecodeNaked() + if n := &d.n; n.v == valueTypeExt && n.l == nil { + l := len(n.is) + n.is = append(n.is, nil) + v2 := &n.is[l] + n.is = n.is[:l] + d.decode(v2) + } + } +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + d.decode(v) +} + +func (d *Decoder) decode(iv interface{}) { + // if ics, ok := iv.(Selfer); ok { + // ics.CodecDecodeSelf(d) + // return + // } + + if d.d.TryDecodeAsNil() { + switch v := iv.(type) { + case nil: + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case reflect.Value: + if v.Kind() != reflect.Ptr || v.IsNil() { + d.errNotValidPtrValue(v) + } + // d.chkPtrValue(v) + v = v.Elem() + if v.IsValid() { + v.Set(reflect.Zero(v.Type())) + } + default: + rv := reflect.ValueOf(iv) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + d.errNotValidPtrValue(rv) + } + // d.chkPtrValue(rv) + rv = rv.Elem() + if rv.IsValid() { + rv.Set(reflect.Zero(rv.Type())) + } + } + return + } + + switch v := iv.(type) { + case nil: + d.error(cannotDecodeIntoNilErr) + return + + case Selfer: + v.CodecDecodeSelf(d) + + case reflect.Value: + if v.Kind() != reflect.Ptr || v.IsNil() { + d.errNotValidPtrValue(v) + } + // d.chkPtrValue(v) + d.decodeValueNotNil(v.Elem(), nil) + + case *string: + + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(d.d.DecodeInt(intBitsize)) + case *int8: + *v = int8(d.d.DecodeInt(8)) + case *int16: + *v = int16(d.d.DecodeInt(16)) + case *int32: + *v = int32(d.d.DecodeInt(32)) + case *int64: + *v = d.d.DecodeInt(64) + case *uint: + *v = uint(d.d.DecodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.DecodeUint(8)) + case *uint16: + *v = uint16(d.d.DecodeUint(16)) + case *uint32: + *v = uint32(d.d.DecodeUint(32)) + case *uint64: + *v = d.d.DecodeUint(64) + case *float32: + *v = float32(d.d.DecodeFloat(true)) + case *float64: + *v = d.d.DecodeFloat(false) + case *[]uint8: + *v = d.d.DecodeBytes(*v, false, false) + + case *interface{}: + d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) + + default: + if !fastpathDecodeTypeSwitch(iv, d) { + d.decodeI(iv, true, false, false, false) + } + } +} + +func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) { + if tryNil && d.d.TryDecodeAsNil() { + // No need to check if a ptr, recursively, to determine + // whether to set value to nil. + // Just always set value to its zero type. + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + return rv, true +} + +func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) { + rv := reflect.ValueOf(iv) + if checkPtr { + if rv.Kind() != reflect.Ptr || rv.IsNil() { + d.errNotValidPtrValue(rv) + } + // d.chkPtrValue(rv) + } + rv, proceed := d.preDecodeValue(rv, tryNil) + if proceed { + fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer) + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) { + if rv, proceed := d.preDecodeValue(rv, true); proceed { + if fn == nil { + fn = d.getDecFn(rv.Type(), true, true) + } + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) { + if rv, proceed := d.preDecodeValue(rv, false); proceed { + if fn == nil { + fn = d.getDecFn(rv.Type(), true, true) + } + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) { + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i := range d.s { + v := &(d.s[i]) + if v.rtid == rtid { + fn, ok = &(v.fn), true + break + } + } + } + if ok { + return + } + + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]*decFn, initCollectionCap) + } + fn = new(decFn) + d.f[rtid] = fn + } else { + if d.s == nil { + d.s = make([]decRtidFn, 0, initCollectionCap) + } + d.s = append(d.s, decRtidFn{rtid: rtid}) + fn = &(d.s[len(d.s)-1]).fn + } + + // debugf("\tCreating new dec fn for type: %v\n", rt) + ti := d.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.d = d + fi.ti = ti + + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if checkCodecSelfer && ti.cs { + fn.f = (*decFnInfo).selferUnmarshal + } else if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if d.d.IsBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfFn := d.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.f = (*decFnInfo).ext + } else if supportMarshalInterfaces && d.be && ti.bunm { + fn.f = (*decFnInfo).binaryUnmarshal + } else if supportMarshalInterfaces && !d.be && d.js && ti.junm { + //If JSON, we should check JSONUnmarshal before textUnmarshal + fn.f = (*decFnInfo).jsonUnmarshal + } else if supportMarshalInterfaces && !d.be && ti.tunm { + fn.f = (*decFnInfo).textUnmarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { + if idx := fastpathAV.index(rtid); idx != -1 { + fn.f = fastpathAV[idx].decfn + } + } else { + // use mapping for underlying type if there + ok = false + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := reflect.ValueOf(rtu).Pointer() + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].decfn + xrt := fastpathAV[idx].rt + fn.f = func(xf *decFnInfo, xrv reflect.Value) { + // xfnf(xf, xrv.Convert(xrt)) + xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem()) + } + } + } + } + if fn.f == nil { + switch rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Uintptr: + fn.f = (*decFnInfo).kUintptr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Chan: + fi.seq = seqTypeChan + fn.f = (*decFnInfo).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + } + + return +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key %s", rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + d.errNotValidPtrValue(rv) +} + +func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { + if !rv.IsValid() { + d.error(cannotDecodeIntoNilErr) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv.Interface() + d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) +} + +func (d *Decoder) error(err error) { + panic(err) +} + +func (d *Decoder) errorf(format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = d.r.numread() + copy(params2[1:], params) + err := fmt.Errorf("[pos %d]: "+format, params2...) + panic(err) +} + +func (d *Decoder) string(v []byte) (s string) { + if d.is != nil { + s, ok := d.is[string(v)] // no allocation here. + if !ok { + s = string(v) + d.is[s] = s + } + return s + } + return string(v) // don't return stringView, as we need a real string here. +} + +func (d *Decoder) intern(s string) { + if d.is != nil { + d.is[s] = s + } +} + +func (d *Decoder) nextValueBytes() []byte { + d.d.uncacheRead() + d.r.track() + d.swallow() + return d.r.stopTrack() +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + if ctyp == valueTypeArray { + x.array = true + clen = dd.ReadArrayStart() + } else if ctyp == valueTypeMap { + clen = dd.ReadMapStart() * 2 + } else { + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + cr := x.d.cr + if cr == nil { + return + } + if x.array { + cr.sendContainerState(containerArrayEnd) + } else { + cr.sendContainerState(containerMapEnd) + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + cr := x.d.cr + if cr == nil { + return + } + if x.array { + cr.sendContainerState(containerArrayElem) + } else { + if index%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } +} + +func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + } else if cap(bs) >= clen { + bsOut = bs[:clen] + } else { + bsOut = make([]byte, clen) + } + r.readb(bsOut) + return +} + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + truncated = true + } else { + rvlen = clen + } + return + // if clen <= 0 { + // rvlen = 0 + // } else if maxlen > 0 && clen > maxlen { + // rvlen = maxlen + // truncated = true + // } else { + // rvlen = clen + // } + // return +} + +// // implement overall decReader wrapping both, for possible use inline: +// type decReaderT struct { +// bytes bool +// rb *bytesDecReader +// ri *ioDecReader +// } +// +// // implement *Decoder as a decReader. +// // Using decReaderT (defined just above) caused performance degradation +// // possibly because of constant copying the value, +// // and some value->interface conversion causing allocation. +// func (d *Decoder) unreadn1() { +// if d.bytes { +// d.rb.unreadn1() +// } else { +// d.ri.unreadn1() +// } +// } +// ... for other methods of decReader. +// Testing showed that performance improvement was negligible. diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go new file mode 100644 index 000000000..99af6fa55 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go @@ -0,0 +1,1405 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "fmt" + "io" + "reflect" + "sort" + "sync" +) + +const ( + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracts writing to a byte array or to an io.Writer. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + IsBuiltinType(rt uintptr) bool + EncodeBuiltin(rt uintptr, v interface{}) + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + EncodeArrayStart(length int) + EncodeMapStart(length int) + EncodeString(c charEncoding, v string) + EncodeSymbol(v string) + EncodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + + reset() +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +type encNoSeparator struct{} + +func (_ encNoSeparator) EncodeEnd() {} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + _, err = o.w.Write([]byte{c}) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + // return o.w.Write([]byte(s)) + return o.w.Write(bytesView(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + s simpleIoEncWriterWriter + // x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) > 0 { + c := z.grow(len(s)) + copy(z.b[c:], s) + } +} + +func (z *bytesEncWriter) writestr(s string) { + if len(s) > 0 { + c := z.grow(len(s)) + copy(z.b[c:], s) + } +} + +func (z *bytesEncWriter) writen1(b1 byte) { + c := z.grow(1) + z.b[c] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + c := z.grow(2) + z.b[c] = b1 + z.b[c+1] = b2 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +func (z *bytesEncWriter) grow(n int) (oldcursor int) { + oldcursor = z.c + z.c = oldcursor + n + if z.c > len(z.b) { + if z.c > cap(z.b) { + // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. + // bytes.Buffer model (2*cap + n): much better + // bs := make([]byte, 2*cap(z.b)+n) + bs := make([]byte, growCap(cap(z.b), 1, n)) + copy(bs, z.b[:oldcursor]) + z.b = bs + } else { + z.b = z.b[:cap(z.b)] + } + } + return +} + +// --------------------------------------------- + +type encFnInfo struct { + e *Encoder + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + // rev := rv.Interface().(RawExt) + // f.e.e.EncodeRawExt(&rev, f.e) + var re *RawExt + if rv.CanAddr() { + re = rv.Addr().Interface().(*RawExt) + } else { + rev := rv.Interface().(RawExt) + re = &rev + } + f.e.e.EncodeRawExt(re, f.e) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + // if this is a struct|array and it was addressable, then pass the address directly (not the value) + if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { + rv = rv.Addr() + } + f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e) +} + +func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { + if indir == 0 { + v = rv.Interface() + } else if indir == -1 { + // If a non-pointer was passed to Encode(), then that value is not addressable. + // Take addr if addresable, else copy value to an addressable value. + if rv.CanAddr() { + v = rv.Addr().Interface() + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v) + } + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + f.e.e.EncodeNil() + return + } + rv = rv.Elem() + } + v = rv.Interface() + } + return v, true +} + +func (f *encFnInfo) selferMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { + v.(Selfer).CodecEncodeSelf(f.e) + } +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { + bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) + } +} + +func (f *encFnInfo) textMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { + // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface()) + bs, fnerr := v.(encoding.TextMarshaler).MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) + } +} + +func (f *encFnInfo) jsonMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { + bs, fnerr := v.(jsonMarshaler).MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.e.e.EncodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.e.e.EncodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.e.e.EncodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.e.e.EncodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.e.e.EncodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.e.e.EncodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.e.e.EncodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + ti := f.ti + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + e := f.e + if f.seq != seqTypeArray { + if rv.IsNil() { + e.e.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + e.e.EncodeStringBytes(c_RAW, rv.Bytes()) + return + } + } + cr := e.cr + rtelem := ti.rt.Elem() + l := rv.Len() + if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { + switch f.seq { + case seqTypeArray: + // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else + if rv.CanAddr() { + e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + // TODO: Test that reflect.Copy works instead of manual one-by-one + // for i := 0; i < l; i++ { + // bs[i] = byte(rv.Index(i).Uint()) + // } + e.e.EncodeStringBytes(c_RAW, bs) + } + case seqTypeSlice: + e.e.EncodeStringBytes(c_RAW, rv.Bytes()) + case seqTypeChan: + bs := e.b[:0] + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv.Interface().(<-chan byte) { + // bs = append(bs, b) + // } + ch := rv.Interface().(<-chan byte) + for i := 0; i < l; i++ { + bs = append(bs, <-ch) + } + e.e.EncodeStringBytes(c_RAW, bs) + } + return + } + + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + e.e.EncodeMapStart(l / 2) + } else { + e.e.EncodeArrayStart(l) + } + + if l > 0 { + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var fn *encFn + if rtelem.Kind() != reflect.Interface { + rtelemid := reflect.ValueOf(rtelem).Pointer() + fn = e.getEncFn(rtelemid, rtelem, true, true) + } + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + for j := 0; j < l; j++ { + if cr != nil { + if ti.mbs { + if l%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } else { + cr.sendContainerState(containerArrayElem) + } + } + if f.seq == seqTypeChan { + if rv2, ok2 := rv.Recv(); ok2 { + e.encodeValue(rv2, fn) + } else { + e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received. + } + } else { + e.encodeValue(rv.Index(j), fn) + } + } + } + + if cr != nil { + if ti.mbs { + cr.sendContainerState(containerMapEnd) + } else { + cr.sendContainerState(containerArrayEnd) + } + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + e := f.e + cr := e.cr + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + newlen := len(fti.sfi) + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of the occasional locking is less than the cost of new allocation. + pool, poolv, fkvs := encStructPoolGet(newlen) + + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + } + newlen = 0 + var kv stringRv + for _, si := range tisfi { + kv.r = si.field(rv, false) + // if si.i != -1 { + // rvals[newlen] = rv.Field(int(si.i)) + // } else { + // rvals[newlen] = rv.FieldByIndex(si.is) + // } + if toMap { + if si.omitEmpty && isEmptyValue(kv.r) { + continue + } + kv.v = si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty && isEmptyValue(kv.r) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, + reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + // sep := !e.be + ee := e.e //don't dereference everytime + + if toMap { + ee.EncodeMapStart(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(kv.r, nil) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + } else { + ee.EncodeArrayStart(newlen) + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encodeValue(kv.r, nil) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + if pool != nil { + pool.Put(poolv) + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.e.e.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +func (f *encFnInfo) kInterface(rv reflect.Value) { + if rv.IsNil() { + f.e.e.EncodeNil() + return + } + f.e.encodeValue(rv.Elem(), nil) +} + +func (f *encFnInfo) kMap(rv reflect.Value) { + ee := f.e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.EncodeMapStart(l) + e := f.e + cr := e.cr + if l == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *encFn + ti := f.ti + rtkey := ti.rt.Key() + rtval := ti.rt.Elem() + rtkeyid := reflect.ValueOf(rtkey).Pointer() + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + var keyTypeIsString = rtkeyid == stringTypId + if keyTypeIsString { + asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + rtkeyid = reflect.ValueOf(rtkey).Pointer() + keyFn = e.getEncFn(rtkeyid, rtkey, true, true) + } + } + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + rtvalid := reflect.ValueOf(rtval).Pointer() + valFn = e.getEncFn(rtvalid, rtval, true, true) + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + + if e.h.Canonical { + e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols) + } else { + for j := range mks { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if keyTypeIsString { + if asSymbols { + ee.EncodeSymbol(mks[j].String()) + } else { + ee.EncodeString(c_UTF8, mks[j].String()) + } + } else { + e.encodeValue(mks[j], keyFn) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mks[j]), valFn) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) { + ee := e.e + cr := e.cr + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + if rtkeyid == uint8SliceTypId { + mksv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bytes() + } + sort.Sort(bytesRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeStringBytes(c_RAW, mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + } else { + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(mksv[i].v) + } else { + ee.EncodeString(c_UTF8, mksv[i].v) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(mksv[i].v)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + // fmt.Printf(">>>>> %s\n", mksv[l:]) + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(mksbv[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn) + } + } + } +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +type encRtidFn struct { + rtid uintptr + fn encFn +} + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w encWriter + s []encRtidFn + be bool // is binary encoding + js bool // is json handle + + wi ioEncWriter + wb bytesEncWriter + + h *BasicHandle + hh Handle + + cr containerStateRecv + as encDriverAsis + + f map[uintptr]*encFn + b [scratchByteArrayLen]byte +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + _, e.js = h.(*JsonHandle) + e.e = h.newEncDriver(e) + e.as, _ = e.e.(encDriverAsis) + e.cr, _ = e.e.(containerStateRecv) + return e +} + +// Reset the Encoder with a new output stream. +// +// This accomodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + ww, ok := w.(ioEncWriterWriter) + if ok { + e.wi.w = ww + } else { + sww := &e.wi.s + sww.w = w + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + e.wi.w = sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + e.w = &e.wi + e.e.reset() +} + +func (e *Encoder) ResetBytes(out *[]byte) { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.wb.b, e.wb.out, e.wb.c = in, out, 0 + e.w = &e.wb + e.e.reset() +} + +// func (e *Encoder) sendContainerState(c containerState) { +// if e.cr != nil { +// e.cr.sendContainerState(c) +// } +// } + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// Note that the "json" key is used in the absence of the "codec" key. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + e.encode(v) + e.w.atEndOfEncode() +} + +// comment out these (Must)Write methods. They were only put there to support cbor. +// However, users already have access to the streams, and can write directly. +// +// // Write allows users write to the Encoder stream directly. +// func (e *Encoder) Write(bs []byte) (err error) { +// defer panicToErr(&err) +// e.w.writeb(bs) +// return +// } +// // MustWrite is like write, but panics if unable to Write. +// func (e *Encoder) MustWrite(bs []byte) { +// e.w.writeb(bs) +// } + +func (e *Encoder) encode(iv interface{}) { + // if ics, ok := iv.(Selfer); ok { + // ics.CodecEncodeSelf(e) + // return + // } + + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + case Selfer: + v.CodecEncodeSelf(e) + + case reflect.Value: + e.encodeValue(v, nil) + + case string: + e.e.EncodeString(c_UTF8, v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + + case []uint8: + e.e.EncodeStringBytes(c_RAW, v) + + case *string: + e.e.EncodeString(c_UTF8, *v) + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + + case *[]uint8: + e.e.EncodeStringBytes(c_RAW, *v) + + default: + const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer + if !fastpathEncodeTypeSwitch(iv, e) { + e.encodeI(iv, false, checkCodecSelfer1) + } + } +} + +func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { + if rv, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + fn := e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) + fn.f(&fn.i, rv) + } +} + +func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, proceed bool) { + // use a goto statement instead of a recursive function for ptr/interface. +TOP: + switch rv.Kind() { + case reflect.Ptr, reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + return rv, true +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + if rv, proceed := e.preEncodeValue(rv); proceed { + if fn == nil { + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + fn = e.getEncFn(rtid, rt, true, true) + } + fn.f(&fn.i, rv) + } +} + +func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) { + // rtid := reflect.ValueOf(rt).Pointer() + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i := range e.s { + v := &(e.s[i]) + if v.rtid == rtid { + fn, ok = &(v.fn), true + break + } + } + } + if ok { + return + } + + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]*encFn, initCollectionCap) + } + fn = new(encFn) + e.f[rtid] = fn + } else { + if e.s == nil { + e.s = make([]encRtidFn, 0, initCollectionCap) + } + e.s = append(e.s, encRtidFn{rtid: rtid}) + fn = &(e.s[len(e.s)-1]).fn + } + + ti := e.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.e = e + fi.ti = ti + + if checkCodecSelfer && ti.cs { + fn.f = (*encFnInfo).selferMarshal + } else if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.IsBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfFn := e.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.f = (*encFnInfo).ext + } else if supportMarshalInterfaces && e.be && ti.bm { + fn.f = (*encFnInfo).binaryMarshal + } else if supportMarshalInterfaces && !e.be && e.js && ti.jm { + //If JSON, we should check JSONMarshal before textMarshal + fn.f = (*encFnInfo).jsonMarshal + } else if supportMarshalInterfaces && !e.be && ti.tm { + fn.f = (*encFnInfo).textMarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { + if idx := fastpathAV.index(rtid); idx != -1 { + fn.f = fastpathAV[idx].encfn + } + } else { + ok = false + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := reflect.ValueOf(rtu).Pointer() + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.f = func(xf *encFnInfo, xrv reflect.Value) { + xfnf(xf, xrv.Convert(xrt)) + } + } + } + } + if fn.f == nil { + switch rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Chan: + fi.seq = seqTypeChan + fn.f = (*encFnInfo).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.f = (*encFnInfo).kSlice + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + case reflect.Interface: + fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + } + + return +} + +func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else if asis { + e.asis(bs) + } else { + e.e.EncodeStringBytes(c, bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) errorf(format string, params ...interface{}) { + err := fmt.Errorf(format, params...) + panic(err) +} + +// ---------------------------------------- + +const encStructPoolLen = 5 + +// encStructPool is an array of sync.Pool. +// Each element of the array pools one of encStructPool(8|16|32|64). +// It allows the re-use of slices up to 64 in length. +// A performance cost of encoding structs was collecting +// which values were empty and should be omitted. +// We needed slices of reflect.Value and string to collect them. +// This shared pool reduces the amount of unnecessary creation we do. +// The cost is that of locking sometimes, but sync.Pool is efficient +// enough to reduce thread contention. +var encStructPool [encStructPoolLen]sync.Pool + +func init() { + encStructPool[0].New = func() interface{} { return new([8]stringRv) } + encStructPool[1].New = func() interface{} { return new([16]stringRv) } + encStructPool[2].New = func() interface{} { return new([32]stringRv) } + encStructPool[3].New = func() interface{} { return new([64]stringRv) } + encStructPool[4].New = func() interface{} { return new([128]stringRv) } +} + +func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { + // if encStructPoolLen != 5 { // constant chec, so removed at build time. + // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed + // } + // idxpool := newlen / 8 + + // if pool == nil { + // fkvs = make([]stringRv, newlen) + // } else { + // poolv = pool.Get() + // switch vv := poolv.(type) { + // case *[8]stringRv: + // fkvs = vv[:newlen] + // case *[16]stringRv: + // fkvs = vv[:newlen] + // case *[32]stringRv: + // fkvs = vv[:newlen] + // case *[64]stringRv: + // fkvs = vv[:newlen] + // case *[128]stringRv: + // fkvs = vv[:newlen] + // } + // } + + if newlen <= 8 { + p = &encStructPool[0] + v = p.Get() + s = v.(*[8]stringRv)[:newlen] + } else if newlen <= 16 { + p = &encStructPool[1] + v = p.Get() + s = v.(*[16]stringRv)[:newlen] + } else if newlen <= 32 { + p = &encStructPool[2] + v = p.Get() + s = v.(*[32]stringRv)[:newlen] + } else if newlen <= 64 { + p = &encStructPool[3] + v = p.Get() + s = v.(*[64]stringRv)[:newlen] + } else if newlen <= 128 { + p = &encStructPool[4] + v = p.Get() + s = v.(*[128]stringRv)[:newlen] + } else { + s = make([]stringRv, newlen) + } + return +} + +// ---------------------------------------- + +// func encErr(format string, params ...interface{}) { +// doPanic(msgTagEnc, format, params...) +// } diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go new file mode 100644 index 000000000..d968a500f --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go @@ -0,0 +1,38900 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathCheckNilFalse = false // for reflect +const fastpathCheckNilTrue = true // for type switch + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, 271 // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < 271 && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + if !fastpathEnabled { + return + } + i := 0 + fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := reflect.ValueOf(xrt).Pointer() + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR) + fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR) + fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R) + fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R) + fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR) + fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R) + fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R) + fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR) + fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR) + fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R) + fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R) + fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R) + fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R) + fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) + + case []string: + fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) + + case []uint: + fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) + + case []int: + fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) + + case []string: + fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) + + case []uint: + fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) + + case []int: + fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { + fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { + fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeString(c_UTF8, v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { + fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeFloat32(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { + fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeFloat64(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { + fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { + fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { + fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { + fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { + fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { + fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { + fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { + fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { + fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { + fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { + fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeBool(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { + + case []interface{}: + fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d) + case *[]interface{}: + v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]interface{}: + v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]string: + v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint: + v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint8: + v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint16: + v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint32: + v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint64: + v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uintptr: + v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int: + v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int8: + v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int16: + v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int32: + v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int64: + v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]float32: + v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]float64: + v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]bool: + v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []string: + fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d) + case *[]string: + v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d) + case *map[string]interface{}: + v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]string: + fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d) + case *map[string]string: + v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint: + fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d) + case *map[string]uint: + v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint8: + v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint16: + v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint32: + v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint64: + v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[string]uintptr: + v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int: + fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d) + case *map[string]int: + v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d) + case *map[string]int8: + v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d) + case *map[string]int16: + v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d) + case *map[string]int32: + v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d) + case *map[string]int64: + v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[string]float32: + v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[string]float64: + v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d) + case *map[string]bool: + v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []float32: + fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d) + case *[]float32: + v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[float32]interface{}: + v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d) + case *map[float32]string: + v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint: + v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint8: + v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint16: + v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint32: + v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint64: + v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[float32]uintptr: + v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d) + case *map[float32]int: + v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int8: + v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int16: + v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int32: + v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int64: + v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]float32: + v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]float64: + v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[float32]bool: + v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []float64: + fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d) + case *[]float64: + v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[float64]interface{}: + v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d) + case *map[float64]string: + v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint: + v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint8: + v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint16: + v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint32: + v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint64: + v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[float64]uintptr: + v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d) + case *map[float64]int: + v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int8: + v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int16: + v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int32: + v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int64: + v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]float32: + v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]float64: + v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[float64]bool: + v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint: + fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d) + case *[]uint: + v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint]interface{}: + v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]string: + fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d) + case *map[uint]string: + v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint: + v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint8: + v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint16: + v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint32: + v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint64: + v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint]uintptr: + v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int: + fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d) + case *map[uint]int: + v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int8: + v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int16: + v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int32: + v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int64: + v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]float32: + v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]float64: + v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint]bool: + v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]interface{}: + v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]string: + v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint: + v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint8: + v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint16: + v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint32: + v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint64: + v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uintptr: + v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int: + v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int8: + v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int16: + v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int32: + v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int64: + v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]float32: + v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]float64: + v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]bool: + v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint16: + fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d) + case *[]uint16: + v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]interface{}: + v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]string: + v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint: + v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint8: + v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint16: + v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint32: + v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint64: + v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uintptr: + v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int: + v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int8: + v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int16: + v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int32: + v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int64: + v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]float32: + v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]float64: + v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]bool: + v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint32: + fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d) + case *[]uint32: + v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]interface{}: + v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]string: + v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint: + v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint8: + v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint16: + v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint32: + v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint64: + v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uintptr: + v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int: + v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int8: + v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int16: + v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int32: + v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int64: + v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]float32: + v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]float64: + v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]bool: + v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint64: + fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d) + case *[]uint64: + v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]interface{}: + v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]string: + v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint: + v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint8: + v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint16: + v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint32: + v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint64: + v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uintptr: + v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int: + v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int8: + v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int16: + v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int32: + v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int64: + v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]float32: + v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]float64: + v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]bool: + v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uintptr: + fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d) + case *[]uintptr: + v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]interface{}: + v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]string: + v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint: + v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint8: + v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint16: + v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint32: + v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint64: + v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uintptr: + v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int: + v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int8: + v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int16: + v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int32: + v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int64: + v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]float32: + v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]float64: + v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]bool: + v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int: + fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d) + case *[]int: + v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d) + case *map[int]interface{}: + v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]string: + fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d) + case *map[int]string: + v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint: + fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d) + case *map[int]uint: + v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint8: + v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint16: + v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint32: + v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint64: + v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int]uintptr: + v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int: + fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d) + case *map[int]int: + v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d) + case *map[int]int8: + v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d) + case *map[int]int16: + v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d) + case *map[int]int32: + v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d) + case *map[int]int64: + v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[int]float32: + v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[int]float64: + v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d) + case *map[int]bool: + v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int8: + fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d) + case *[]int8: + v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int8]interface{}: + v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d) + case *map[int8]string: + v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint: + v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint8: + v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint16: + v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint32: + v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint64: + v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int8]uintptr: + v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d) + case *map[int8]int: + v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int8: + v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int16: + v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int32: + v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int64: + v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]float32: + v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]float64: + v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int8]bool: + v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int16: + fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d) + case *[]int16: + v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int16]interface{}: + v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d) + case *map[int16]string: + v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint: + v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint8: + v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint16: + v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint32: + v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint64: + v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int16]uintptr: + v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d) + case *map[int16]int: + v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int8: + v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int16: + v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int32: + v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int64: + v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]float32: + v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]float64: + v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int16]bool: + v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int32: + fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d) + case *[]int32: + v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int32]interface{}: + v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d) + case *map[int32]string: + v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint: + v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint8: + v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint16: + v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint32: + v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint64: + v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int32]uintptr: + v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d) + case *map[int32]int: + v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int8: + v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int16: + v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int32: + v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int64: + v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]float32: + v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]float64: + v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int32]bool: + v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int64: + fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d) + case *[]int64: + v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int64]interface{}: + v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d) + case *map[int64]string: + v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint: + v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint8: + v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint16: + v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint32: + v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint64: + v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int64]uintptr: + v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d) + case *map[int64]int: + v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int8: + v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int16: + v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int32: + v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int64: + v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]float32: + v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]float64: + v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int64]bool: + v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []bool: + fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d) + case *[]bool: + v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d) + case *map[bool]interface{}: + v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d) + case *map[bool]string: + v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint: + v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint8: + v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint16: + v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint32: + v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint64: + v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[bool]uintptr: + v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d) + case *map[bool]int: + v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int8: + v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int16: + v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int32: + v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int64: + v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]float32: + v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]float64: + v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d) + case *map[bool]bool: + v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]interface{}) + v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]interface{}) + fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecSliceIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]interface{}, xlen) + } + } else { + v = make([]interface{}, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + d.decode(&v[j]) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, nil) + slh.ElemContainerState(j) + d.decode(&v[j]) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]interface{}, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + d.decode(&v[j]) + + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]string) + v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]string) + fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) { + v, changed := f.DecSliceStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]string, xlen) + } + } else { + v = make([]string, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeString() + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, "") + slh.ElemContainerState(j) + v[j] = dd.DecodeString() + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]string, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeString() + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]float32) + v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]float32) + fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float32, xlen) + } + } else { + v = make([]float32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = float32(dd.DecodeFloat(true)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = float32(dd.DecodeFloat(true)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]float32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = float32(dd.DecodeFloat(true)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]float64) + v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]float64) + fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float64, xlen) + } + } else { + v = make([]float64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeFloat(false) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeFloat(false) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]float64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeFloat(false) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint) + v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint) + fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint, xlen) + } + } else { + v = make([]uint, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]uint, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint(dd.DecodeUint(uintBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint16) + v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint16) + fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint16, xlen) + } + } else { + v = make([]uint16, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint16(dd.DecodeUint(16)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint16(dd.DecodeUint(16)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]uint16, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint16(dd.DecodeUint(16)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint32) + v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint32) + fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint32, xlen) + } + } else { + v = make([]uint32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint32(dd.DecodeUint(32)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint32(dd.DecodeUint(32)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]uint32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint32(dd.DecodeUint(32)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint64) + v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint64) + fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint64, xlen) + } + } else { + v = make([]uint64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeUint(64) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeUint(64) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]uint64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeUint(64) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uintptr) + v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uintptr) + fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uintptr, xlen) + } + } else { + v = make([]uintptr, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]uintptr, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int) + v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int) + fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) { + v, changed := f.DecSliceIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int, xlen) + } + } else { + v = make([]int, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int(dd.DecodeInt(intBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int(dd.DecodeInt(intBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]int, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int(dd.DecodeInt(intBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int8) + v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int8) + fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int8, xlen) + } + } else { + v = make([]int8, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int8(dd.DecodeInt(8)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int8(dd.DecodeInt(8)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]int8, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int8(dd.DecodeInt(8)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int16) + v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int16) + fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int16, xlen) + } + } else { + v = make([]int16, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int16(dd.DecodeInt(16)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int16(dd.DecodeInt(16)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]int16, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int16(dd.DecodeInt(16)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int32) + v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int32) + fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int32, xlen) + } + } else { + v = make([]int32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int32(dd.DecodeInt(32)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int32(dd.DecodeInt(32)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]int32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int32(dd.DecodeInt(32)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int64) + v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int64) + fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int64, xlen) + } + } else { + v = make([]int64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeInt(64) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeInt(64) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]int64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeInt(64) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]bool) + v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]bool) + fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) { + v, changed := f.DecSliceBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]bool, xlen) + } + } else { + v = make([]bool, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeBool() + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, false) + slh.ElemContainerState(j) + v[j] = dd.DecodeBool() + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]bool, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeBool() + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]interface{}) + v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]interface{}) + fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]string) + v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]string) + fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + + var mk interface{} + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint) + v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint) + fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + + var mk interface{} + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint8) + v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint8) + fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + + var mk interface{} + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint16) + v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint16) + fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + + var mk interface{} + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint32) + v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint32) + fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + + var mk interface{} + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint64) + v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint64) + fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + + var mk interface{} + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uintptr) + v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uintptr) + fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + + var mk interface{} + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int) + v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int) + fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + + var mk interface{} + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int8) + v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int8) + fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + + var mk interface{} + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int16) + v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int16) + fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + + var mk interface{} + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int32) + v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int32) + fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + + var mk interface{} + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int64) + v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int64) + fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + + var mk interface{} + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]float32) + v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]float32) + fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + + var mk interface{} + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]float64) + v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]float64) + fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + + var mk interface{} + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]bool) + v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]bool) + fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + + var mk interface{} + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]interface{}) + v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]interface{}) + fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]string) + v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]string) + fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + + var mk string + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint) + v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint) + fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + + var mk string + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint8) + v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint8) + fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + + var mk string + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint16) + v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint16) + fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + + var mk string + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint32) + v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint32) + fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + + var mk string + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint64) + v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint64) + fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + + var mk string + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uintptr) + v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uintptr) + fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + + var mk string + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int) + v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int) + fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + + var mk string + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int8) + v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int8) + fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + + var mk string + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int16) + v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int16) + fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + + var mk string + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int32) + v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int32) + fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + + var mk string + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int64) + v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int64) + fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + + var mk string + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]float32) + v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]float32) + fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + + var mk string + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]float64) + v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]float64) + fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + + var mk string + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]bool) + v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]bool) + fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + + var mk string + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]interface{}) + v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]interface{}) + fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]string) + v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]string) + fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + + var mk float32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint) + v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint) + fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + + var mk float32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint8) + v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint8) + fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + + var mk float32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint16) + v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint16) + fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + + var mk float32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint32) + v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint32) + fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + + var mk float32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint64) + v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint64) + fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + + var mk float32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uintptr) + v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uintptr) + fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + + var mk float32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int) + v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int) + fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + + var mk float32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int8) + v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int8) + fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + + var mk float32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int16) + v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int16) + fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + + var mk float32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int32) + v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int32) + fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + + var mk float32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int64) + v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int64) + fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + + var mk float32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]float32) + v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]float32) + fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + + var mk float32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]float64) + v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]float64) + fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + + var mk float32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]bool) + v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]bool) + fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + + var mk float32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]interface{}) + v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]interface{}) + fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]string) + v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]string) + fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + + var mk float64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint) + v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint) + fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + + var mk float64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint8) + v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint8) + fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + + var mk float64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint16) + v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint16) + fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + + var mk float64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint32) + v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint32) + fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + + var mk float64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint64) + v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint64) + fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + + var mk float64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uintptr) + v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uintptr) + fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + + var mk float64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int) + v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int) + fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + + var mk float64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int8) + v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int8) + fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + + var mk float64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int16) + v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int16) + fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + + var mk float64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int32) + v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int32) + fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + + var mk float64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int64) + v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int64) + fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + + var mk float64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]float32) + v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]float32) + fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + + var mk float64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]float64) + v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]float64) + fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + + var mk float64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]bool) + v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]bool) + fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + + var mk float64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]interface{}) + v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]interface{}) + fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]string) + v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]string) + fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + + var mk uint + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint) + v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint) + fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + + var mk uint + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint8) + v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint8) + fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + + var mk uint + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint16) + v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint16) + fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + + var mk uint + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint32) + v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint32) + fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + + var mk uint + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint64) + v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint64) + fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + + var mk uint + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uintptr) + v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uintptr) + fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + + var mk uint + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int) + v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int) + fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + + var mk uint + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int8) + v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int8) + fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + + var mk uint + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int16) + v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int16) + fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + + var mk uint + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int32) + v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int32) + fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + + var mk uint + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int64) + v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int64) + fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + + var mk uint + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]float32) + v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]float32) + fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + + var mk uint + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]float64) + v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]float64) + fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + + var mk uint + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]bool) + v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]bool) + fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + + var mk uint + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]interface{}) + v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]interface{}) + fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]string) + v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]string) + fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + + var mk uint8 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint) + v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint) + fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + + var mk uint8 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint8) + v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint8) + fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + + var mk uint8 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint16) + v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint16) + fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + + var mk uint8 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint32) + v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint32) + fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + + var mk uint8 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint64) + v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint64) + fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + + var mk uint8 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uintptr) + v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uintptr) + fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + + var mk uint8 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int) + v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int) + fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + + var mk uint8 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int8) + v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int8) + fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + + var mk uint8 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int16) + v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int16) + fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + + var mk uint8 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int32) + v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int32) + fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + + var mk uint8 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int64) + v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int64) + fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + + var mk uint8 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]float32) + v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]float32) + fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + + var mk uint8 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]float64) + v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]float64) + fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + + var mk uint8 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]bool) + v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]bool) + fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + + var mk uint8 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]interface{}) + v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]interface{}) + fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]string) + v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]string) + fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + + var mk uint16 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint) + v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint) + fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + + var mk uint16 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint8) + v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint8) + fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + + var mk uint16 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint16) + v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint16) + fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + + var mk uint16 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint32) + v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint32) + fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + + var mk uint16 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint64) + v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint64) + fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + + var mk uint16 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uintptr) + v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uintptr) + fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + + var mk uint16 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int) + v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int) + fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + + var mk uint16 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int8) + v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int8) + fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + + var mk uint16 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int16) + v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int16) + fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + + var mk uint16 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int32) + v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int32) + fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + + var mk uint16 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int64) + v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int64) + fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + + var mk uint16 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]float32) + v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]float32) + fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + + var mk uint16 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]float64) + v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]float64) + fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + + var mk uint16 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]bool) + v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]bool) + fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + + var mk uint16 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]interface{}) + v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]interface{}) + fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]string) + v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]string) + fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + + var mk uint32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint) + v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint) + fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + + var mk uint32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint8) + v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint8) + fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + + var mk uint32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint16) + v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint16) + fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + + var mk uint32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint32) + v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint32) + fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + + var mk uint32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint64) + v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint64) + fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + + var mk uint32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uintptr) + v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uintptr) + fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + + var mk uint32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int) + v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int) + fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + + var mk uint32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int8) + v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int8) + fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + + var mk uint32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int16) + v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int16) + fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + + var mk uint32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int32) + v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int32) + fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + + var mk uint32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int64) + v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int64) + fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + + var mk uint32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]float32) + v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]float32) + fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + + var mk uint32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]float64) + v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]float64) + fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + + var mk uint32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]bool) + v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]bool) + fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + + var mk uint32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]interface{}) + v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]interface{}) + fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]string) + v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]string) + fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + + var mk uint64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint) + v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint) + fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + + var mk uint64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint8) + v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint8) + fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + + var mk uint64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint16) + v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint16) + fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + + var mk uint64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint32) + v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint32) + fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + + var mk uint64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint64) + v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint64) + fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + + var mk uint64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uintptr) + v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uintptr) + fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + + var mk uint64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int) + v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int) + fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + + var mk uint64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int8) + v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int8) + fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + + var mk uint64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int16) + v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int16) + fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + + var mk uint64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int32) + v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int32) + fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + + var mk uint64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int64) + v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int64) + fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + + var mk uint64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]float32) + v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]float32) + fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + + var mk uint64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]float64) + v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]float64) + fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + + var mk uint64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]bool) + v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]bool) + fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + + var mk uint64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]interface{}) + v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]interface{}) + fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]string) + v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]string) + fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + + var mk uintptr + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint) + v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint) + fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + + var mk uintptr + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint8) + v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint8) + fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + + var mk uintptr + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint16) + v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint16) + fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + + var mk uintptr + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint32) + v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint32) + fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + + var mk uintptr + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint64) + v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint64) + fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + + var mk uintptr + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uintptr) + v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uintptr) + fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + + var mk uintptr + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int) + v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int) + fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + + var mk uintptr + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int8) + v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int8) + fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + + var mk uintptr + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int16) + v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int16) + fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + + var mk uintptr + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int32) + v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int32) + fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + + var mk uintptr + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int64) + v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int64) + fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + + var mk uintptr + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]float32) + v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]float32) + fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + + var mk uintptr + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]float64) + v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]float64) + fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + + var mk uintptr + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]bool) + v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]bool) + fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + + var mk uintptr + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]interface{}) + v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]interface{}) + fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]string) + v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]string) + fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + + var mk int + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint) + v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint) + fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + + var mk int + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint8) + v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint8) + fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + + var mk int + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint16) + v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint16) + fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + + var mk int + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint32) + v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint32) + fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + + var mk int + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint64) + v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint64) + fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + + var mk int + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uintptr) + v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uintptr) + fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + + var mk int + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int) + v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int) + fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + + var mk int + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int8) + v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int8) + fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + + var mk int + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int16) + v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int16) + fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + + var mk int + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int32) + v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int32) + fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + + var mk int + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int64) + v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int64) + fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + + var mk int + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]float32) + v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]float32) + fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + + var mk int + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]float64) + v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]float64) + fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + + var mk int + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]bool) + v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]bool) + fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + + var mk int + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]interface{}) + v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]interface{}) + fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]string) + v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]string) + fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + + var mk int8 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint) + v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint) + fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + + var mk int8 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint8) + v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint8) + fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + + var mk int8 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint16) + v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint16) + fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + + var mk int8 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint32) + v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint32) + fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + + var mk int8 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint64) + v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint64) + fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + + var mk int8 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uintptr) + v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uintptr) + fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + + var mk int8 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int) + v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int) + fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + + var mk int8 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int8) + v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int8) + fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + + var mk int8 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int16) + v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int16) + fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + + var mk int8 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int32) + v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int32) + fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + + var mk int8 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int64) + v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int64) + fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + + var mk int8 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]float32) + v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]float32) + fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + + var mk int8 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]float64) + v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]float64) + fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + + var mk int8 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]bool) + v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]bool) + fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + + var mk int8 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]interface{}) + v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]interface{}) + fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]string) + v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]string) + fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + + var mk int16 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint) + v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint) + fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + + var mk int16 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint8) + v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint8) + fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + + var mk int16 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint16) + v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint16) + fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + + var mk int16 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint32) + v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint32) + fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + + var mk int16 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint64) + v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint64) + fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + + var mk int16 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uintptr) + v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uintptr) + fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + + var mk int16 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int) + v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int) + fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + + var mk int16 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int8) + v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int8) + fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + + var mk int16 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int16) + v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int16) + fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + + var mk int16 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int32) + v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int32) + fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + + var mk int16 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int64) + v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int64) + fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + + var mk int16 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]float32) + v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]float32) + fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + + var mk int16 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]float64) + v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]float64) + fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + + var mk int16 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]bool) + v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]bool) + fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + + var mk int16 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]interface{}) + v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]interface{}) + fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]string) + v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]string) + fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + + var mk int32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint) + v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint) + fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + + var mk int32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint8) + v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint8) + fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + + var mk int32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint16) + v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint16) + fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + + var mk int32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint32) + v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint32) + fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + + var mk int32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint64) + v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint64) + fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + + var mk int32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uintptr) + v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uintptr) + fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + + var mk int32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int) + v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int) + fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + + var mk int32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int8) + v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int8) + fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + + var mk int32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int16) + v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int16) + fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + + var mk int32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int32) + v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int32) + fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + + var mk int32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int64) + v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int64) + fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + + var mk int32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]float32) + v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]float32) + fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + + var mk int32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]float64) + v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]float64) + fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + + var mk int32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]bool) + v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]bool) + fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + + var mk int32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]interface{}) + v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]interface{}) + fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]string) + v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]string) + fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + + var mk int64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint) + v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint) + fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + + var mk int64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint8) + v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint8) + fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + + var mk int64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint16) + v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint16) + fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + + var mk int64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint32) + v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint32) + fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + + var mk int64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint64) + v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint64) + fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + + var mk int64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uintptr) + v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uintptr) + fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + + var mk int64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int) + v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int) + fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + + var mk int64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int8) + v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int8) + fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + + var mk int64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int16) + v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int16) + fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + + var mk int64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int32) + v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int32) + fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + + var mk int64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int64) + v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int64) + fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + + var mk int64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]float32) + v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]float32) + fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + + var mk int64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]float64) + v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]float64) + fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + + var mk int64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]bool) + v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]bool) + fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + + var mk int64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]interface{}) + v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]interface{}) + fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]string) + v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]string) + fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + + var mk bool + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint) + v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint) + fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + + var mk bool + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint8) + v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint8) + fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + + var mk bool + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint16) + v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint16) + fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + + var mk bool + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint32) + v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint32) + fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + + var mk bool + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint64) + v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint64) + fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + + var mk bool + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uintptr) + v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uintptr) + fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + + var mk bool + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int) + v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int) + fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + + var mk bool + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int8) + v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int8) + fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + + var mk bool + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int16) + v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int16) + fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + + var mk bool + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int32) + v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int32) + fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + + var mk bool + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int64) + v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int64) + fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + + var mk bool + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]float32) + v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]float32) + fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + + var mk bool + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]float64) + v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]float64) + fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + + var mk bool + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]bool) + v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]bool) + fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + + var mk bool + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl new file mode 100644 index 000000000..58cc6df4c --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl @@ -0,0 +1,511 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathCheckNilFalse = false // for reflect +const fastpathCheckNilTrue = true // for type switch + +type fastpathT struct {} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} + +type fastpathA [{{ .FastpathLen }}]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, {{ .FastpathLen }} // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < {{ .FastpathLen }} && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + if !fastpathEnabled { + return + } + i := 0 + fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := reflect.ValueOf(xrt).Pointer() + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey }} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}:{{else}} + case map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }} + case *[]{{ .Elem }}:{{else}} + case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) + case *[]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) +{{end}}{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) + case *map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) +{{end}}{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + +func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { cr.sendContainerState(containerArrayElem) } + {{ encmd .Elem "v2"}} + } + if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} +} + +{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + +func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + {{end}}if e.h.Canonical { + {{if eq .MapKey "interface{}"}}{{/* out of band + */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { cr.sendContainerState(containerMapKey) } + e.asis(v2[j].v) + if cr != nil { cr.sendContainerState(containerMapValue) } + e.encode(v[v2[j].i]) + } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) + var i int + for k, _ := range v { + v2[i] = {{ $x }}(k) + i++ + } + sort.Sort({{ sorttype .MapKey false}}(v2)) + for _, k2 := range v2 { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{if eq .MapKey "string"}}if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} + } {{end}} + } else { + for k2, v2 := range v { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{if eq .MapKey "string"}}if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + }{{else}}{{ encmd .MapKey "k2"}}{{end}} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ encmd .Elem "v2"}} + } + } + if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}} +} + +{{end}}{{end}}{{end}} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + if !fastpathEnabled { + return false + } + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}:{{else}} + case map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }} + case *[]{{ .Elem }}:{{else}} + case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} + v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} +{{/* +Slices can change if they +- did not come from an array +- are addressable (from a ptr) +- are settable (e.g. contained in an interface{}) +*/}} +func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}} + vp := rv.Addr().Interface().(*[]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]{{ .Elem }}) + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { + dd := d.d + {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []{{ .Elem }}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { {{/* + // fast-path is for "basic" immutable types, so no need to copy them over + // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen)) + // copy(s, v[:cap(v)]) + // v = s */}} + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]{{ .Elem }}, xlen) + } + } else { + v = make([]{{ .Elem }}, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } {{/* // all checks done. cannot go past len. */}} + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } + if xtrunc { {{/* // means canChange=true, changed=true already. */}} + for ; j < containerLenS; j++ { + v = append(v, {{ zerocmd .Elem }}) + slh.ElemContainerState(j) + {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}} + if breakFound { + if canChange { + if v == nil { + v = []{{ .Elem }}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return + } + if cap(v) == 0 { + v = make([]{{ .Elem }}, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, {{ zerocmd .Elem }}) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { {{/* // all checks done. cannot go past len. */}} + {{ if eq .Elem "interface{}" }}d.decode(&v[j]) + {{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +{{end}}{{end}}{{end}} + + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} +{{/* +Maps can change if they are +- addressable (from a ptr) +- settable (e.g. contained in an interface{}) +*/}} +func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}) + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool, + d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { + dd := d.d + cr := d.cr + {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) + v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) + changed = true + } + {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} + var mk {{ .MapKey }} + var mv {{ .Elem }} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { cr.sendContainerState(containerMapEnd) } + return v, changed +} + +{{end}}{{end}}{{end}} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go new file mode 100644 index 000000000..d6f5f0c91 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go @@ -0,0 +1,32 @@ +// +build notfastpath + +package codec + +import "reflect" + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +var fastpathAV fastpathA +var fastpathTV fastpathT diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl new file mode 100644 index 000000000..2caae5bfd --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl @@ -0,0 +1,101 @@ +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} +var {{var "c"}} bool {{/* // changed */}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else if {{var "l"}} > 0 { + {{if isChan }}if {{var "v"}} == nil { + {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) + {{var "c"}} = true + } + for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { + {{var "h"}}.ElemContainerState({{var "r"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + } + {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} + var {{var "rt"}} bool {{/* truncated */}} + if {{var "l"}} > cap({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) + {{ else }}{{if not .Immutable }} + {{var "rg"}} := len({{var "v"}}) > 0 + {{var "v2"}} := {{var "v"}} {{end}} + {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rt"}} { + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} + if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} + } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } {{end}} {{/* end isSlice:47 */}} + {{var "j"}} := 0 + for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + z.DecSwallow() + } + {{ else }}if {{var "rt"}} { + for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "v"}} = append({{var "v"}}, {{ zero}}) + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + } {{end}} {{/* end isArray:56 */}} + {{end}} {{/* end isChan:16 */}} +} else { {{/* len < 0 */}} + {{var "j"}} := 0 + for ; !r.CheckBreak(); {{var "j"}}++ { + {{if isChan }} + {{var "h"}}.ElemContainerState({{var "j"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + {{ else }} + if {{var "j"}} >= len({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) + {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} + {{var "c"}} = true {{end}} + } + {{var "h"}}.ElemContainerState({{var "j"}}) + if {{var "j"}} < len({{var "v"}}) { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } else { + z.DecSwallow() + } + {{end}} + } + {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + }{{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl new file mode 100644 index 000000000..77400e0a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl @@ -0,0 +1,58 @@ +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} > 0 { +for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} else if {{var "l"}} < 0 { +for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true {{ end }} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go new file mode 100644 index 000000000..22bce776b --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go @@ -0,0 +1,233 @@ +// //+build ignore + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e: e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d: d}, d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + f.e.encodeI(iv, false, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncSendContainerState(c containerState) { + if f.e.cr != nil { + f.e.cr.sendContainerState(c) + } +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + f.d.decodeI(iv, chkPtr, false, false, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSendContainerState(c containerState) { + if f.d.cr != nil { + f.d.cr.sendContainerState(c) + } +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl new file mode 100644 index 000000000..31958574f --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl @@ -0,0 +1,364 @@ +// //+build ignore + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e:e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d:d}, d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + f.e.encodeI(iv, false, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncSendContainerState(c containerState) { + if f.e.cr != nil { + f.e.cr.sendContainerState(c) + } +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + f.d.decodeI(iv, chkPtr, false, false, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + return decInferLen(clen, maxlen, unit) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSendContainerState(c containerState) { + if f.d.cr != nil { + f.d.cr.sendContainerState(c) + } +} + +{{/* + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncDriver() encDriver { + return f.e.e +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecDriver() decDriver { + return f.d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncNil() { + f.e.e.EncodeNil() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBytes(v []byte) { + f.e.e.EncodeStringBytes(c_RAW, v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncArrayStart(length int) { + f.e.e.EncodeArrayStart(length) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncArrayEnd() { + f.e.e.EncodeArrayEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncArrayEntrySeparator() { + f.e.e.EncodeArrayEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapStart(length int) { + f.e.e.EncodeMapStart(length) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapEnd() { + f.e.e.EncodeMapEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapEntrySeparator() { + f.e.e.EncodeMapEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapKVSeparator() { + f.e.e.EncodeMapKVSeparator() +} + +// --------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBytes(v *[]byte) { + *v = f.d.d.DecodeBytes(*v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTryNil() bool { + return f.d.d.TryDecodeAsNil() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecContainerIsNil() (b bool) { + return f.d.d.IsContainerType(valueTypeNil) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecContainerIsMap() (b bool) { + return f.d.d.IsContainerType(valueTypeMap) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecContainerIsArray() (b bool) { + return f.d.d.IsContainerType(valueTypeArray) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecCheckBreak() bool { + return f.d.d.CheckBreak() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapStart() int { + return f.d.d.ReadMapStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayStart() int { + return f.d.d.ReadArrayStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapEnd() { + f.d.d.ReadMapEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayEnd() { + f.d.d.ReadArrayEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayEntrySeparator() { + f.d.d.ReadArrayEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapEntrySeparator() { + f.d.d.ReadMapEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapKVSeparator() { + f.d.d.ReadMapKVSeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte { + return f.d.d.DecodeStringAsBytes(bs) +} + + +// -- encode calls (primitives) +{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) { + ee := f.e.e + {{ encmd .Primitive "v" }} +} +{{ end }}{{ end }}{{ end }} + +// -- decode calls (primitives) +{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) { + dd := f.d.d + *vp = {{ decmd .Primitive }} +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) { + dd := f.d.d + v = {{ decmd .Primitive }} + return +} +{{ end }}{{ end }}{{ end }} + + +// -- encode calls (slices/maps) +{{range .Values}}{{if not .Primitive }}{{if .Slice }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}} + f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e) +} +{{ end }}{{ end }} + +// -- decode calls (slices/maps) +{{range .Values}}{{if not .Primitive }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) { +{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}} + v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d) + if changed { + *vp = v + } +} +{{ end }}{{ end }} +*/}} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go new file mode 100644 index 000000000..fb6f4b809 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go @@ -0,0 +1,172 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} > 0 { +for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} else if {{var "l"}} < 0 { +for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true {{ end }} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} +var {{var "c"}} bool {{/* // changed */}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else if {{var "l"}} > 0 { + {{if isChan }}if {{var "v"}} == nil { + {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) + {{var "c"}} = true + } + for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { + {{var "h"}}.ElemContainerState({{var "r"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + } + {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} + var {{var "rt"}} bool {{/* truncated */}} + if {{var "l"}} > cap({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) + {{ else }}{{if not .Immutable }} + {{var "rg"}} := len({{var "v"}}) > 0 + {{var "v2"}} := {{var "v"}} {{end}} + {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rt"}} { + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} + if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} + } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } {{end}} {{/* end isSlice:47 */}} + {{var "j"}} := 0 + for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + z.DecSwallow() + } + {{ else }}if {{var "rt"}} { + for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "v"}} = append({{var "v"}}, {{ zero}}) + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + } {{end}} {{/* end isArray:56 */}} + {{end}} {{/* end isChan:16 */}} +} else { {{/* len < 0 */}} + {{var "j"}} := 0 + for ; !r.CheckBreak(); {{var "j"}}++ { + {{if isChan }} + {{var "h"}}.ElemContainerState({{var "j"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + {{ else }} + if {{var "j"}} >= len({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) + {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} + {{var "c"}} = true {{end}} + } + {{var "h"}}.ElemContainerState({{var "j"}}) + if {{var "j"}} < len({{var "v"}}) { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } else { + z.DecSwallow() + } + {{end}} + } + {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + }{{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go new file mode 100644 index 000000000..a075e7c0d --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go @@ -0,0 +1,1920 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Builtins +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +const GenVersion = 5 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + genAllTypesSamePkgErr = errors.New("All types must be in the same package") + genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + unsafe bool // is unsafe to be used in generated code? + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* +func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { + if len(typ) == 0 { + return + } + x := genRunner{ + unsafe: useUnsafe, + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(genAllTypesSamePkgErr) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("//+build " + buildTags) + x.line("") + } + x.line(` + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k, _ := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + x.linef("%s \"%s\"", x.imn[k], k) + } + // add required packages + for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} { + if _, ok := x.im[k]; !ok { + if k == "unsafe" && !x.unsafe { + continue + } + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) + x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) + x.linef("// ----- value types used ----") + x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) + x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) + x.linef("// ----- containerStateValues ----") + x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) + x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) + x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) + x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) + x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) + x.line(")") + x.line("var (") + x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") + x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + if x.unsafe { + x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}") + x.line("") + } + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx) + x.line("panic(err)") + x.linef("}") + x.line("if false { // reference the types, but skip this branch at build/run time") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if x.unsafe { + x.linef("var v%v unsafe.Pointer", n) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := reflect.ValueOf(t).Pointer() + // generate enc functions for all these slice/map types. + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + tpkg[idx+1:] + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) out(s string) { + if _, err := io.WriteString(x.w, s); err != nil { + panic(err) + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.line(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) outf(s string, params ...interface{}) { + x.out(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Struct + fnSigPfx := "func (x " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + + x.out(fnSigPfx) + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + // x.enc(genTopLevelVarName, t) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) + } + if _, ok := x.tm[t]; !ok { + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + switch t.Kind() { + case reflect.Ptr: + switch t.Elem().Kind() { + case reflect.Struct, reflect.Array: + x.enc(varname, genNonPtr(t)) + default: + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + } + case reflect.Struct, reflect.Array: + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type T, +// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type *T (to prevent copying) +func (x *genRunner) enc(varname string, t reflect.Type) { + // varName here must be to a pointer to a struct/array, or to a value directly. + rtid := reflect.ValueOf(t).Pointer() + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if t.Implements(selferTyp) || (tptr.Implements(selferTyp) && (tk == reflect.Array || tk == reflect.Struct)) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + mi := x.varsfx() + x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%v, e)", varname) + return + } + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) + } + if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } + + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") + case reflect.Chan: + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, true, t) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + } else { + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + } else { + x.xtraSM(varname, true, t) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) + x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar) + x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) + nn := 0 + for j, si := range tisfi { + if !si.omitEmpty { + nn++ + continue + } + var t2 reflect.StructField + var omitline string + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + omitline += varname3 + " != nil && " + } + } + } + // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + switch t2.Type.Kind() { + case reflect.Struct: + omitline += " true" + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + omitline += "len(" + varname + "." + t2.Name + ") != 0" + default: + omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) + } + x.linef("%s[%v] = %s", numfieldsvar, j, omitline) + } + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + x.linef("} else {") // if not ti.toArray + x.linef("%snn%s = %v", genTempVarPfx, i, nn) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + t2typ := t + varname3 := varname + for _, ix := range si.is { + // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + } + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") + x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else {") + x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + i := x.varsfx() + g := genTempVarPfx + x.line("r.EncodeArrayStart(len(" + varname + "))") + if t.Kind() == reflect.Chan { + x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef("%sv%s := <-%s", g, i, varname) + } else { + // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + } + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.EncodeMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") + x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + i := x.varsfx() + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + if canBeNil { + x.line("if r.TryDecodeAsNil() {") + if t.Kind() == reflect.Ptr { + x.line("if " + varname + " != nil { ") + + // if varname is a field of a struct (has a dot in it), + // then just set it to nil + if strings.IndexByte(varname, '.') != -1 { + x.line(varname + " = nil") + } else { + x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) + } + x.line("}") + } else { + x.line(varname + " = " + x.genZeroValueR(t)) + } + x.line("} else {") + } else { + x.line("// cannot be nil") + } + if t.Kind() != reflect.Ptr { + if x.decTryAssignPrimitive(varname, t) { + x.line(genTempVarPfx + "v" + i + " := &" + varname) + x.dec(genTempVarPfx+"v"+i, t) + } + } else { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + // if varname has [ in it, then create temp variable for this ptr thingie + if strings.Index(varname, "[") >= 0 { + varname2 := genTempVarPfx + "w" + i + x.line(varname2 + " := " + varname) + varname = varname2 + } + + if ptrPfx == "" { + x.dec(varname, t) + } else { + x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) + x.dec(genTempVarPfx+"z"+i, t) + } + + } + + if canBeNil { + x.line("} ") + } +} + +func (x *genRunner) dec(varname string, t reflect.Type) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := reflect.ValueOf(t).Pointer() + tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + mi := x.varsfx() + x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) + return + } + + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + } + + if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) + } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) + } + + x.line("} else {") + + // Since these are pointers, we cannot share, and have to use them one by one + switch t.Kind() { + case reflect.Int: + x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecInt((*int)(" + varname + "))") + case reflect.Int8: + x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") + // x.line("z.DecInt8((*int8)(" + varname + "))") + case reflect.Int16: + x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") + // x.line("z.DecInt16((*int16)(" + varname + "))") + case reflect.Int32: + x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") + // x.line("z.DecInt32((*int32)(" + varname + "))") + case reflect.Int64: + x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") + // x.line("z.DecInt64((*int64)(" + varname + "))") + + case reflect.Uint: + x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecUint((*uint)(" + varname + "))") + case reflect.Uint8: + x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") + // x.line("z.DecUint8((*uint8)(" + varname + "))") + case reflect.Uint16: + x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") + //x.line("z.DecUint16((*uint16)(" + varname + "))") + case reflect.Uint32: + x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") + //x.line("z.DecUint32((*uint32)(" + varname + "))") + case reflect.Uint64: + x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") + //x.line("z.DecUint64((*uint64)(" + varname + "))") + case reflect.Uintptr: + x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + + case reflect.Float32: + x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") + //x.line("z.DecFloat32((*float32)(" + varname + "))") + case reflect.Float64: + x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") + // x.line("z.DecFloat64((*float64)(" + varname + "))") + + case reflect.Bool: + x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") + // x.line("z.DecBool((*bool)(" + varname + "))") + case reflect.String: + x.line("*((*string)(" + varname + ")) = r.DecodeString()") + // x.line("z.DecString((*string)(" + varname + "))") + case reflect.Array, reflect.Chan: + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, true, t) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + } else { + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + } else { + x.xtraSM(varname, false, t) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + x.decStruct(varname, rtid, t) + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { + // We have to use the actual type name when doing a direct assignment. + // We don't have the luxury of casting the pointer to the underlying type. + // + // Consequently, in the situation of a + // type Message int32 + // var x Message + // var i int32 = 32 + // x = i // this will bomb + // x = Message(i) // this will work + // *((*int32)(&x)) = i // this will work + // + // Consequently, we replace: + // case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))") + // with: + // case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))") + + xfn := func(t reflect.Type) string { + return x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Int: + x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs) + case reflect.Int8: + x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t)) + case reflect.Int16: + x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t)) + case reflect.Int32: + x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t)) + case reflect.Int64: + x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t)) + + case reflect.Uint: + x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) + case reflect.Uint8: + x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t)) + case reflect.Uint16: + x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t)) + case reflect.Uint32: + x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t)) + case reflect.Uint64: + x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t)) + case reflect.Uintptr: + x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) + + case reflect.Float32: + x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t)) + case reflect.Float64: + x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t)) + + case reflect.Bool: + x.linef("%s = %s(r.DecodeBool())", varname, xfn(t)) + case reflect.String: + x.linef("%s = %s(r.DecodeString())", varname, xfn(t)) + default: + tryAsPtr = true + } + return +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, telem, false) + return "" + } + funcs["decLine"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, tkey, false) + return "" + } + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, telem, false) + return "" + } + funcs["decLineK"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) + return "" + } + funcs["decLine"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + x.decVar(varname+"."+t2.Name, t2.Type, false) + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + i := x.varsfx() + kName := tpfx + "s" + i + + // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out. + // However, using that was more expensive, as it seems that the switch expression + // is evaluated each time. + // + // We could depend on decodeString using a temporary/shared buffer internally. + // However, this model of creating a byte array, and using explicitly is faster, + // and allows optional use of unsafe []byte->string conversion without alloc. + + // Also, ensure that the slice array doesn't escape. + // That will help escape analysis prevent allocation when it gets better. + + // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") + // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") + // use the scratch buffer to avoid allocation (most field names are < 32). + + x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") + + x.line("_ = " + kName + "Slc") + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)") + // let string be scoped to this loop alone, so it doesn't escape. + if x.unsafe { + x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" + + kName + "Slc[0])), len(" + kName + "Slc)}") + x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))") + } else { + x.line(kName + " := string(" + kName + "Slc)") + } + x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + for _, si := range tisfi { + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", + tpfx, i, x.xs, breakString) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.decVar(varname+"."+t2.Name, t2.Type, true) + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // if container is map + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else { ") + x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + } + return +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Values []genV + Unsafe bool +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "ee.EncodeString(c_UTF8, " + vname + ")" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + case "symbol": + return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(dd.DecodeUint(uintBitsize))" + case "uint8": + return "uint8(dd.DecodeUint(8))" + case "uint16": + return "uint16(dd.DecodeUint(16))" + case "uint32": + return "uint32(dd.DecodeUint(32))" + case "uint64": + return "dd.DecodeUint(64)" + case "uintptr": + return "uintptr(dd.DecodeUint(uintBitsize))" + case "int": + return "int(dd.DecodeInt(intBitsize))" + case "int8": + return "int8(dd.DecodeInt(8))" + case "int16": + return "int16(dd.DecodeInt(16))" + case "int32": + return "int32(dd.DecodeInt(32))" + case "int64": + return "dd.DecodeInt(64)" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(dd.DecodeFloat(true))" + case "float64": + return "dd.DecodeFloat(false)" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +// var genInternalMu sync.Mutex +var genInternalV genInternal +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt genInternal + + // For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + } + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + gt.Unsafe = !safe + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go new file mode 100644 index 000000000..560014ae3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go @@ -0,0 +1,1129 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length. +// It may suffer because we treat it like a text-based codec, and read separators. +// However, this read is a no-op and the cost is insignificant. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +const ( + scratchByteArrayLen = 32 + initCollectionCap = 32 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + // + // From benchmarks, slices with linear search perform better with < 32 entries. + // We have typically seen a high threshold of about 24 entries. + useMapForCodecCache = false + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // Fast path functions try to create a fast path encode or decode implementation + // for common maps and slices, by by-passing reflection altogether. + fastpathEnabled = true + + // if checkStructForEmptyValue, check structs fields to see if an empty value. + // This could be an expensive call, so possibly disable it. + checkStructForEmptyValue = false + + // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue + derefForIsEmptyValue = false + + // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. + // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. + // The chances of this are slim, so leave this "optimization". + // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value? + resetSliceElemToZeroValue bool = false +) + +var oneByteArr = [1]byte{0} +var zeroByteSlice = oneByteArr[:0:0] + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + // valueTypeInvalid = 0xff +) + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +type containerStateRecv interface { + sendContainerState(containerState) +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + stringTypId = reflect.ValueOf(stringTyp).Pointer() + + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + extHandle + EncodeOptions + DecodeOptions +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos != nil { + return x.TypeInfos.get(rtid, rt) + } + return defTypeInfos.get(rtid, rt) +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + getBasicHandle() *BasicHandle + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool +} + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type setExtWrapper struct { + b BytesExt + i InterfaceExt +} + +func (x *setExtWrapper) WriteExt(v interface{}) []byte { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + } + return x.b.WriteExt(v) +} + +func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + + } + x.b.ReadExt(v, bs) +} + +func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { + if x.i == nil { + panic("InterfaceExt.ConvertExt is not supported") + + } + return x.i.ConvertExt(v) +} + +func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { + if x.i == nil { + panic("InterfaceExxt.UpdateExt is not supported") + + } + x.i.UpdateExt(dest, v) +} + +// type errorString string +// func (x errorString) Error() string { return string(x) } + +type binaryEncodingType struct{} + +func (_ binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (_ textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. +type noBuiltInTypes struct{} + +func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false } +func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +type noStreamingCodec struct{} + +func (_ noStreamingCodec) CheckBreak() bool { return false } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w encWriter +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag uint64 + ext Ext +} + +type extHandle []extTypeTagFn + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// AddExt registes an encode and decode function for a reflect.Type. +// AddExt internally calls SetExt. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, +) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call SetExt with nil Ext +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + + if *o == nil { + *o = make([]extTypeTagFn, 0, 4) + } + *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.tag == tag { + return v + } + } + return nil +} + +type structFieldInfo struct { + encName string // encode name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? +} + +// func (si *structFieldInfo) isZero() bool { +// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray +// } + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) { + if si.i != -1 { + v = v.Field(int(si.i)) + return v + } + // replicate FieldByIndex + for _, x := range si.is { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.Field(x) + } + return v +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if si.i != -1 { + v = v.Field(int(si.i)) + v.Set(reflect.Zero(v.Type())) + // v.Set(reflect.New(v.Type()).Elem()) + // v.Set(reflect.New(v.Type())) + } else { + // replicate FieldByIndex + for _, x := range si.is { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return + } + v = v.Elem() + } + v = v.Field(x) + } + v.Set(reflect.Zero(v.Type())) + } +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + // if fname == "" { + // panic(noFieldNameToStructFieldInfoErr) + // } + si := structFieldInfo{ + encName: fname, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + if s == "omitempty" { + si.omitEmpty = true + } else if s == "toarray" { + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + numMeth uint16 // number of methods + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + bm bool // base type (T or *T) is a binaryMarshaler + bunm bool // base type (T or *T) is a binaryUnmarshaler + bmIndir int8 // number of indirections to get to binaryMarshaler type + bunmIndir int8 // number of indirections to get to binaryUnmarshaler type + + tm bool // base type (T or *T) is a textMarshaler + tunm bool // base type (T or *T) is a textUnmarshaler + tmIndir int8 // number of indirections to get to textMarshaler type + tunmIndir int8 // number of indirections to get to textUnmarshaler type + + jm bool // base type (T or *T) is a jsonMarshaler + junm bool // base type (T or *T) is a jsonUnmarshaler + jmIndir int8 // number of indirections to get to jsonMarshaler type + junmIndir int8 // number of indirections to get to jsonUnmarshaler type + + cs bool // base type (T or *T) is a Selfer + csIndir int8 // number of indirections to get to Selfer type + + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + infos map[uintptr]*typeInfo + mu sync.RWMutex + tags []string +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + x.mu.RLock() + pti, ok = x.infos[rtid] + x.mu.RUnlock() + if ok { + return + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{rt: rt, rtid: rtid} + ti.numMeth = uint16(rt.NumMethod()) + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.bm, ti.bmIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.bunm, ti.bunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { + ti.tm, ti.tmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { + ti.tunm, ti.tunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { + ti.jm, ti.jmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { + ti.junm, ti.junmIndir = true, indir + } + if ok, indir = implementsIntf(rt, selferTyp); ok { + ti.cs, ti.csIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var siInfo *structFieldInfo + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) + ti.toArray = siInfo.toArray + } + sfip := make([]*structFieldInfo, 0, rt.NumField()) + x.rget(rt, nil, make(map[string]bool, 16), &sfip, siInfo) + + ti.sfip = make([]*structFieldInfo, len(sfip)) + ti.sfi = make([]*structFieldInfo, len(sfip)) + copy(ti.sfip, sfip) + sort.Sort(sfiSortedByEncName(sfip)) + copy(ti.sfi, sfip) + } + // sfi = sfip + + x.mu.Lock() + if pti, ok = x.infos[rtid]; !ok { + pti = &ti + x.infos[rtid] = pti + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, + sfi *[]*structFieldInfo, siInfo *structFieldInfo, +) { + for j := 0; j < rt.NumField(); j++ { + f := rt.Field(j) + fkind := f.Type.Kind() + // skip if a func type, or is unexported, or structTag value == "-" + if fkind == reflect.Func { + continue + } + // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded + continue + } + stag := x.structTag(f.Tag) + if stag == "-" { + continue + } + var si *structFieldInfo + // if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it. + if f.Anonymous && fkind != reflect.Interface { + doInline := stag == "" + if !doInline { + si = parseStructFieldInfo("", stag) + doInline = si.encName == "" + // doInline = si.isZero() + } + if doInline { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4) + copy(indexstack2, indexstack) + indexstack2[len(indexstack)] = j + // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo) + continue + } + } + } + + // after the anonymous dance: if an unexported field, skip + if f.PkgPath != "" { // unexported + continue + } + + // do not let fields with same name in embedded structs override field at higher level. + // this must be done after anonymous check, to allow anonymous field + // still include their child fields + if _, ok := fnameToHastag[f.Name]; ok { + continue + } + if f.Name == "" { + panic(noFieldNameToStructFieldInfoErr) + } + if si == nil { + si = parseStructFieldInfo(f.Name, stag) + } else if si.encName == "" { + si.encName = f.Name + } + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if siInfo != nil { + if siInfo.omitEmpty { + si.omitEmpty = true + } + } + *sfi = append(*sfi, si) + fnameToHastag[f.Name] = stag != "" + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +// func doPanic(tag string, format string, params ...interface{}) { +// params2 := make([]interface{}, len(params)+1) +// params2[0] = tag +// copy(params2[1:], params) +// panic(fmt.Errorf("%s: "+format, params2...)) +// } + +func isImmutableKind(k reflect.Kind) (v bool) { + return false || + k == reflect.Int || + k == reflect.Int8 || + k == reflect.Int16 || + k == reflect.Int32 || + k == reflect.Int64 || + k == reflect.Uint || + k == reflect.Uint8 || + k == reflect.Uint16 || + k == reflect.Uint32 || + k == reflect.Uint64 || + k == reflect.Uintptr || + k == reflect.Float32 || + k == reflect.Float64 || + k == reflect.Bool || + k == reflect.String +} + +// these functions must be inlinable, and not call anybody +type checkOverflow struct{} + +func (_ checkOverflow) Float32(f float64) (overflow bool) { + if f < 0 { + f = -f + } + return math.MaxFloat32 < f && f <= math.MaxFloat64 +} + +func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + return + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + return + } + } + i = int64(v) + return +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string +type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) +} +func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesSlice) Len() int { return len(p) } +func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } +func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } +func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// --------------------- + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) +} +func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } +func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go new file mode 100644 index 000000000..dea981fbb --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go @@ -0,0 +1,242 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +func panicValToErr(panicVal interface{}, err *error) { + if panicVal == nil { + return + } + // case nil + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + return +} + +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return hIsEmptyValue(v.Elem(), deref, checkStruct) + } else { + return v.IsNil() + } + case reflect.Struct: + if !checkStruct { + return false + } + // return true if all fields are empty. else return false. + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} + +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 + } else { // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 + } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 + } else { // NaN + return (s << 31) | 0x7f800000 | (m << 13) + } + } + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} + +func expandSliceValue(s reflect.Value, num int) reflect.Value { + if num <= 0 { + return s + } + l0 := s.Len() + l1 := l0 + num // new slice length + if l1 < l0 { + panic("ExpandSlice: slice overflow") + } + c0 := s.Cap() + if l1 <= c0 { + return s.Slice(0, l1) + } + st := s.Type() + c1 := growCap(c0, int(st.Elem().Size()), num) + s2 := reflect.MakeSlice(st, l1, c1) + // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1) + reflect.Copy(s2, s) + return s2 +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go new file mode 100644 index 000000000..7c2ffc0fd --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -0,0 +1,20 @@ +//+build !unsafe + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func bytesView(v string) []byte { + return []byte(v) +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go new file mode 100644 index 000000000..e1dea52f4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go @@ -0,0 +1,242 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies related to testing live in this file, +// so porting to different environment is easy (just update functions). +// +// This file sets up the variables used, including testInitFns. +// Each file should add initialization that should be performed +// after flags are parsed. +// +// init is a multi-step process: +// - setup vars (handled by init functions in each file) +// - parse flags +// - setup derived vars (handled by pre-init registered functions - registered in init function) +// - post init (handled by post-init registered functions - registered in init function) +// This way, no one has to manage carefully control the initialization +// using file names, etc. +// +// Tests which require external dependencies need the -tag=x parameter. +// They should be run as: +// go test -tags=x -run=. +// Benchmarks should also take this parameter, to include the sereal, xdr, etc. +// To run against codecgen, etc, make sure you pass extra parameters. +// Example usage: +// go test "-tags=x codecgen unsafe" -bench=. +// +// To fully test everything: +// go test -tags=x -benchtime=100ms -tv -bg -bi -brw -bu -v -run=. -bench=. + +// Handling flags +// codec_test.go will define a set of global flags for testing, including: +// - Use Reset +// - Use IO reader/writer (vs direct bytes) +// - Set Canonical +// - Set InternStrings +// - Use Symbols +// +// This way, we can test them all by running same set of tests with a different +// set of flags. +// +// Following this, all the benchmarks will utilize flags set by codec_test.go +// and will not redefine these "global" flags. + +import ( + "bytes" + "errors" + "flag" + "fmt" + "reflect" + "sync" + "testing" +) + +type testHED struct { + H Handle + E *Encoder + D *Decoder +} + +var ( + testNoopH = NoopHandle(8) + testMsgpackH = &MsgpackHandle{} + testBincH = &BincHandle{} + testSimpleH = &SimpleHandle{} + testCborH = &CborHandle{} + testJsonH = &JsonHandle{} + + testHandles []Handle + testPreInitFns []func() + testPostInitFns []func() + + testOnce sync.Once + + testHEDs []testHED +) + +func init() { + testHEDs = make([]testHED, 0, 32) + testHandles = append(testHandles, + testNoopH, testMsgpackH, testBincH, testSimpleH, + testCborH, testJsonH) +} + +func testHEDGet(h Handle) *testHED { + for i := range testHEDs { + v := &testHEDs[i] + if v.H == h { + return v + } + } + testHEDs = append(testHEDs, testHED{h, NewEncoder(nil, h), NewDecoder(nil, h)}) + return &testHEDs[len(testHEDs)-1] +} + +func testInitAll() { + flag.Parse() + for _, f := range testPreInitFns { + f() + } + for _, f := range testPostInitFns { + f() + } +} + +func testCodecEncode(ts interface{}, bsIn []byte, + fn func([]byte) *bytes.Buffer, h Handle) (bs []byte, err error) { + // bs = make([]byte, 0, approxSize) + var e *Encoder + var buf *bytes.Buffer + if testUseReset { + e = testHEDGet(h).E + } else { + e = NewEncoder(nil, h) + } + if testUseIoEncDec { + buf = fn(bsIn) + e.Reset(buf) + } else { + bs = bsIn + e.ResetBytes(&bs) + } + if testUseMust { + e.MustEncode(ts) + } else { + err = e.Encode(ts) + } + if testUseIoEncDec { + bs = buf.Bytes() + } + return +} + +func testCodecDecode(bs []byte, ts interface{}, h Handle) (err error) { + var d *Decoder + var buf *bytes.Reader + if testUseReset { + d = testHEDGet(h).D + } else { + d = NewDecoder(nil, h) + } + if testUseIoEncDec { + buf = bytes.NewReader(bs) + d.Reset(buf) + } else { + d.ResetBytes(bs) + } + if testUseMust { + d.MustDecode(ts) + } else { + err = d.Decode(ts) + } + return +} + +// ----- functions below are used only by tests (not benchmarks) + +const ( + testLogToT = true + failNowOnFail = true +) + +func checkErrT(t *testing.T, err error) { + if err != nil { + logT(t, err.Error()) + failT(t) + } +} + +func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) { + if err = deepEqual(v1, v2); err != nil { + logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2) + failT(t) + } + return +} + +func failT(t *testing.T) { + if failNowOnFail { + t.FailNow() + } else { + t.Fail() + } +} + +// --- these functions are used by both benchmarks and tests + +func deepEqual(v1, v2 interface{}) (err error) { + if !reflect.DeepEqual(v1, v2) { + err = errors.New("Not Match") + } + return +} + +func logT(x interface{}, format string, args ...interface{}) { + if t, ok := x.(*testing.T); ok && t != nil && testLogToT { + if testVerbose { + t.Logf(format, args...) + } + } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT { + b.Logf(format, args...) + } else { + if len(format) == 0 || format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Printf(format, args...) + } +} + +func approxDataSize(rv reflect.Value) (sum int) { + switch rk := rv.Kind(); rk { + case reflect.Invalid: + case reflect.Ptr, reflect.Interface: + sum += int(rv.Type().Size()) + sum += approxDataSize(rv.Elem()) + case reflect.Slice: + sum += int(rv.Type().Size()) + for j := 0; j < rv.Len(); j++ { + sum += approxDataSize(rv.Index(j)) + } + case reflect.String: + sum += int(rv.Type().Size()) + sum += rv.Len() + case reflect.Map: + sum += int(rv.Type().Size()) + for _, mk := range rv.MapKeys() { + sum += approxDataSize(mk) + sum += approxDataSize(rv.MapIndex(mk)) + } + case reflect.Struct: + //struct size already includes the full data size. + //sum += int(rv.Type().Size()) + for j := 0; j < rv.NumField(); j++ { + sum += approxDataSize(rv.Field(j)) + } + default: + //pure value types + sum += int(rv.Type().Size()) + } + return +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go new file mode 100644 index 000000000..373b2b102 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go @@ -0,0 +1,45 @@ +//+build unsafe + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "unsafe" +) + +// This file has unsafe variants of some helper methods. + +type unsafeString struct { + Data uintptr + Len int +} + +type unsafeBytes struct { + Data uintptr + Len int + Cap int +} + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)} + return *(*string)(unsafe.Pointer(&x)) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)} + return *(*[]byte)(unsafe.Pointer(&x)) +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go new file mode 100644 index 000000000..a18a5f706 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go @@ -0,0 +1,1072 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. +// - Also, strconv.ParseXXX for floats and integers +// - only works on strings resulting in unnecessary allocation and []byte-string conversion. +// - it does a lot of redundant checks, because json numbers are simpler that what it supports. +// - We parse numbers (floats and integers) directly here. +// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. +// In that case, we delegate to strconv.ParseFloat. +// +// Note: +// - encode does not beautify. There is no whitespace when encoding. +// - rpc calls which take single integer arguments or write single numeric arguments will need care. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} + +var jsonFloat64Pow10 = [...]float64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22, +} + +var jsonUint64Pow10 = [...]uint64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, +} + +const ( + // jsonUnreadAfterDecNum controls whether we unread after decoding a number. + // + // instead of unreading, just update d.tok (iff it's not a whitespace char) + // However, doing this means that we may HOLD onto some data which belongs to another stream. + // Thus, it is safest to unread the data when done. + // keep behind a constant flag for now. + jsonUnreadAfterDecNum = true + + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + // if jsonTruncateMantissa, truncate mantissa if trailing 0's. + // This is important because it could allow some floats to be decoded without + // deferring to strconv.ParseFloat. + jsonTruncateMantissa = true + + // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow + jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) + + // if mantissa >= jsonNumUintMaxVal, this is an overflow + jsonNumUintMaxVal = 1<= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + base64.StdEncoding.Encode(e.bs, v) + e.w.writen1('"') + e.w.writeb(e.bs) + e.w.writen1('"') + } else { + // e.EncodeString(c, string(v)) + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + w.writen1('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +//-------------------------------- + +type jsonNum struct { + // bytes []byte // may have [+-.eE0-9] + mantissa uint64 // where mantissa ends, and maybe dot begins. + exponent int16 // exponent value. + manOverflow bool + neg bool // started with -. No initial sign in the bytes above. + dot bool // has dot + explicitExponent bool // explicit exponent +} + +func (x *jsonNum) reset() { + x.manOverflow = false + x.neg = false + x.dot = false + x.explicitExponent = false + x.mantissa = 0 + x.exponent = 0 +} + +// uintExp is called only if exponent > 0. +func (x *jsonNum) uintExp() (n uint64, overflow bool) { + n = x.mantissa + e := x.exponent + if e >= int16(len(jsonUint64Pow10)) { + overflow = true + return + } + n *= jsonUint64Pow10[e] + if n < x.mantissa || n > jsonNumUintMaxVal { + overflow = true + return + } + return + // for i := int16(0); i < e; i++ { + // if n >= jsonNumUintCutoff { + // overflow = true + // return + // } + // n *= 10 + // } + // return +} + +// these constants are only used withn floatVal. +// They are brought out, so that floatVal can be inlined. +const ( + jsonUint64MantissaBits = 52 + jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1 +) + +func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) { + // We do not want to lose precision. + // Consequently, we will delegate to strconv.ParseFloat if any of the following happen: + // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits) + // We expect up to 99.... (19 digits) + // - The mantissa cannot fit into a 52 bits of uint64 + // - The exponent is beyond our scope ie beyong 22. + parseUsingStrConv = x.manOverflow || + x.exponent > jsonMaxExponent || + (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) || + x.mantissa>>jsonUint64MantissaBits != 0 + + if parseUsingStrConv { + return + } + + // all good. so handle parse here. + f = float64(x.mantissa) + // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f) + if x.neg { + f = -f + } + if x.exponent > 0 { + f *= jsonFloat64Pow10[x.exponent] + } else if x.exponent < 0 { + f /= jsonFloat64Pow10[-x.exponent] + } + return +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r decReader + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + + bstr [8]byte // scratch used for string \UXXX parsing + b [64]byte // scratch, used for parsing strings or numbers + b2 [64]byte // scratch, used only for decodeBytes (after base64) + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + + se setExtWrapper + + n jsonNum +} + +func jsonIsWS(b byte) bool { + return b == ' ' || b == '\t' || b == '\r' || b == '\n' +} + +// // This will skip whitespace characters and return the next byte to read. +// // The next byte determines what the value will be one of. +// func (d *jsonDecDriver) skipWhitespace() { +// // fast-path: do not enter loop. Just check first (in case no whitespace). +// b := d.r.readn1() +// if jsonIsWS(b) { +// r := d.r +// for b = r.readn1(); jsonIsWS(b); b = r.readn1() { +// } +// } +// d.tok = b +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) sendContainerState(c containerState) { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + var xc uint8 // char expected + if c == containerMapKey { + if d.c != containerMapStart { + xc = ',' + } + } else if c == containerMapValue { + xc = ':' + } else if c == containerMapEnd { + xc = '}' + } else if c == containerArrayElem { + if d.c != containerArrayStart { + xc = ',' + } + } else if c == containerArrayEnd { + xc = ']' + } + if xc != 0 { + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = c +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == '}' || d.tok == ']' { + // d.tok = 0 // only checking, not consuming + return true + } + return false +} + +func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) { + bs := d.r.readx(int(toIdx - fromIdx)) + d.tok = 0 + if jsonValidateSymbols { + if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { + d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) + return + } + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == 'n' { + d.readStrIdx(10, 13) // ull + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == 'f' { + d.readStrIdx(5, 9) // alse + return false + } + if d.tok == 't' { + d.readStrIdx(1, 4) // rue + return true + } + d.d.errorf("json: decode bool: got first char %c", d.tok) + return false // "unreachable" +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok != '{' { + d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok != '[' { + d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // return false // "unreachable" +} + +func (d *jsonDecDriver) decNum(storeBytes bool) { + // If it is has a . or an e|E, decode as a float; else decode as an int. + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + b := d.tok + if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { + d.d.errorf("json: decNum: got first char '%c'", b) + return + } + d.tok = 0 + + const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) + const jsonNumUintMaxVal = 1<= jsonNumUintCutoff { + n.manOverflow = true + break + } + v := uint64(b - '0') + n.mantissa *= 10 + if v != 0 { + n1 := n.mantissa + v + if n1 < n.mantissa || n1 > jsonNumUintMaxVal { + n.manOverflow = true // n+v overflows + break + } + n.mantissa = n1 + } + case 6: + state = 7 + fallthrough + case 7: + if !(b == '0' && e == 0) { + e = e*10 + int16(b-'0') + } + default: + break LOOP + } + default: + break LOOP + } + if storeBytes { + d.bs = append(d.bs, b) + } + b, eof = r.readn1eof() + } + + if jsonTruncateMantissa && n.mantissa != 0 { + for n.mantissa%10 == 0 { + n.mantissa /= 10 + n.exponent++ + } + } + + if e != 0 { + if eNeg { + n.exponent -= e + } else { + n.exponent += e + } + } + + // d.n = n + + if !eof { + if jsonUnreadAfterDecNum { + r.unreadn1() + } else { + if !jsonIsWS(b) { + d.tok = b + } + } + } + // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n", + // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex) + return +} + +func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { + d.decNum(false) + n := &d.n + if n.manOverflow { + d.d.errorf("json: overflow integer after: %v", n.mantissa) + return + } + var u uint64 + if n.exponent == 0 { + u = n.mantissa + } else if n.exponent < 0 { + d.d.errorf("json: fractional integer") + return + } else if n.exponent > 0 { + var overflow bool + if u, overflow = n.uintExp(); overflow { + d.d.errorf("json: overflow integer") + return + } + } + i = int64(u) + if n.neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) + return + } + // fmt.Printf("DecodeInt: %v\n", i) + return +} + +// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number +func (d *jsonDecDriver) floatVal() (f float64) { + f, useStrConv := d.n.floatVal() + if useStrConv { + var err error + if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil { + panic(fmt.Errorf("parse float: %s, %v", d.bs, err)) + } + if d.n.neg { + f = -f + } + } + return +} + +func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { + d.decNum(false) + n := &d.n + if n.neg { + d.d.errorf("json: unsigned integer cannot be negative") + return + } + if n.manOverflow { + d.d.errorf("json: overflow integer after: %v", n.mantissa) + return + } + if n.exponent == 0 { + u = n.mantissa + } else if n.exponent < 0 { + d.d.errorf("json: fractional integer") + return + } else if n.exponent > 0 { + var overflow bool + if u, overflow = n.uintExp(); overflow { + d.d.errorf("json: overflow integer") + return + } + } + if chkOvf.Uint(u, bitsize) { + d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) + return + } + // fmt.Printf("DecodeUint: %v\n", u) + return +} + +func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + d.decNum(true) + f = d.floatVal() + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("json: overflow float32: %v, %s", f, d.bs) + return + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if !isstring && d.se.i != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + d.appendStringAsBytes() + // if isstring, then just return the bytes, even if it is using the scratch buffer. + // the bytes will be converted to a string as needed. + if isstring { + return d.bs + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok != '"' { + d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) + } + d.tok = 0 + + v := d.bs[:0] + var c uint8 + r := d.r + for { + c = r.readn1() + if c == '"' { + break + } else if c == '\\' { + c = r.readn1() + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + rr := d.jsonU4(false) + // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr)) + if utf16.IsSurrogate(rr) { + rr = utf16.DecodeRune(rr, d.jsonU4(true)) + } + w2 := utf8.EncodeRune(d.bstr[:], rr) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("json: unsupported escaped value: %c", c) + } + } else { + v = append(v, c) + } + } + d.bs = v +} + +func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune { + r := d.r + if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') { + d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) + return 0 + } + // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) + var u uint32 + for i := 0; i < 4; i++ { + v := r.readn1() + if '0' <= v && v <= '9' { + v = v - '0' + } else if 'a' <= v && v <= 'z' { + v = v - 'a' + 10 + } else if 'A' <= v && v <= 'Z' { + v = v - 'A' + 10 + } else { + d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) + return 0 + } + u = u*16 + uint32(v) + } + return rune(u) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := &d.d.n + // var decodeFurther bool + + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + switch d.tok { + case 'n': + d.readStrIdx(10, 13) // ull + z.v = valueTypeNil + case 'f': + d.readStrIdx(5, 9) // alse + z.v = valueTypeBool + z.b = false + case 't': + d.readStrIdx(1, 4) // rue + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap + // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart + // decodeFurther = true + case '[': + z.v = valueTypeArray + // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart + // decodeFurther = true + case '"': + z.v = valueTypeString + z.s = d.DecodeString() + default: // number + d.decNum(true) + n := &d.n + // if the string had a any of [.eE], then decode as float. + switch { + case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow: + z.v = valueTypeFloat + z.f = d.floatVal() + case n.exponent == 0: + u := n.mantissa + switch { + case n.neg: + z.v = valueTypeInt + z.i = -int64(u) + case d.h.SignedInteger: + z.v = valueTypeInt + z.i = int64(u) + default: + z.v = valueTypeUint + z.u = u + } + default: + u, overflow := n.uintExp() + switch { + case overflow: + z.v = valueTypeFloat + z.f = d.floatVal() + case n.neg: + z.v = valueTypeInt + z.i = -int64(u) + case d.h.SignedInteger: + z.v = valueTypeInt + z.i = int64(u) + default: + z.v = valueTypeUint + z.u = u + } + } + // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v) + } + // if decodeFurther { + // d.s.sc.retryRead() + // } + return +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library and +// minimizing allocations. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +type JsonHandle struct { + textEncodingType + BasicHandle + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt +} + +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { + hd := jsonEncDriver{e: e, w: e.w, h: h} + hd.bs = hd.b[:0] + hd.se.i = h.RawBytesExt + return &hd +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, r: d.r, h: h} + hd.bs = hd.b[:0] + hd.se.i = h.RawBytesExt + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r +} + +var jsonEncodeTerminate = []byte{' '} + +func (h *JsonHandle) rpcEncodeTerminate() []byte { + return jsonEncodeTerminate +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go new file mode 100644 index 000000000..5eb4c9636 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go @@ -0,0 +1,844 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encNoSeparator + e *Encoder + w encWriter + h *MsgpackHandle + x [8]byte +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + if i >= 0 { + e.EncodeUint(uint64(i)) + } else if i >= -32 { + e.w.writen1(byte(i)) + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + e.w.writen1(byte(i)) + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytes(c_RAW, bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) EncodeArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) EncodeMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.hasFixMin && l < ct.fixCutoff { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r decReader // *Decoder decReader decReaderT + h *MsgpackHandle + b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + noStreamingCodec + decNoSeparator +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := &d.d.n + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(clen) + default: + d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.v) + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + d.d.errorf("Overflow int value: %v", i) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + d.d.errorf("Overflow uint value: %v", ui) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt(0)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + var clen int + // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin + if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else { + clen = d.readContainerLen(msgpackContainerStr) + } + // println("DecodeBytes: clen: ", clen) + d.bdRead = false + // bytes may be nil, so handle it. if nil, clen=-1. + if clen < 0 { + return nil + } + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + bd := d.bd + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || + (!d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { + return valueTypeBytes + } else if d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { + return valueTypeString + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + v = true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, false, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeBytes(nil, true, true) + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool + binaryEncodingType +} + +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r = d.d.r +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.isClosed() { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go new file mode 100644 index 000000000..cfee3d084 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go @@ -0,0 +1,213 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math/rand" + "time" +) + +// NoopHandle returns a no-op handle. It basically does nothing. +// It is only useful for benchmarking, as it gives an idea of the +// overhead from the codec framework. +// +// LIBRARY USERS: *** DO NOT USE *** +func NoopHandle(slen int) *noopHandle { + h := noopHandle{} + h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) + h.B = make([][]byte, slen) + h.S = make([]string, slen) + for i := 0; i < len(h.S); i++ { + b := make([]byte, i+1) + for j := 0; j < len(b); j++ { + b[j] = 'a' + byte(i) + } + h.B[i] = b + h.S[i] = string(b) + } + return &h +} + +// noopHandle does nothing. +// It is used to simulate the overhead of the codec framework. +type noopHandle struct { + BasicHandle + binaryEncodingType + noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. +} + +type noopDrv struct { + d *Decoder + e *Encoder + i int + S []string + B [][]byte + mks []bool // stack. if map (true), else if array (false) + mk bool // top of stack. what container are we on? map or array? + ct valueType // last response for IsContainerType. + cb int // counter for ContainerType + rand *rand.Rand +} + +func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } +func (h *noopDrv) m(v int) int { h.i++; return h.i % v } + +func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } +func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } + +func (h *noopDrv) reset() {} +func (h *noopDrv) uncacheRead() {} + +// --- encDriver + +// stack functions (for map and array) +func (h *noopDrv) start(b bool) { + // println("start", len(h.mks)+1) + h.mks = append(h.mks, b) + h.mk = b +} +func (h *noopDrv) end() { + // println("end: ", len(h.mks)-1) + h.mks = h.mks[:len(h.mks)-1] + if len(h.mks) > 0 { + h.mk = h.mks[len(h.mks)-1] + } else { + h.mk = false + } +} + +func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) EncodeNil() {} +func (h *noopDrv) EncodeInt(i int64) {} +func (h *noopDrv) EncodeUint(i uint64) {} +func (h *noopDrv) EncodeBool(b bool) {} +func (h *noopDrv) EncodeFloat32(f float32) {} +func (h *noopDrv) EncodeFloat64(f float64) {} +func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} +func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } +func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } +func (h *noopDrv) EncodeEnd() { h.end() } + +func (h *noopDrv) EncodeString(c charEncoding, v string) {} +func (h *noopDrv) EncodeSymbol(v string) {} +func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} + +func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} + +// ---- decDriver +func (h *noopDrv) initReadNext() {} +func (h *noopDrv) CheckBreak() bool { return false } +func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } +func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } +func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } +func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } +func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } +func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } + +// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } + +func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } + +func (h *noopDrv) ReadEnd() { h.end() } + +// toggle map/slice +func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } +func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } + +func (h *noopDrv) ContainerType() (vt valueType) { + // return h.m(2) == 0 + // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. + // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 + // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) + // however, every 10th time it is called, we just return something else. + var vals = [...]valueType{valueTypeArray, valueTypeMap} + // ------------ TAKE ------------ + // if h.cb%2 == 0 { + // if h.ct == valueTypeMap || h.ct == valueTypeArray { + // } else { + // h.ct = vals[h.m(2)] + // } + // } else if h.cb%5 == 0 { + // h.ct = valueType(h.m(8)) + // } else { + // h.ct = vals[h.m(2)] + // } + // ------------ TAKE ------------ + // if h.cb%16 == 0 { + // h.ct = valueType(h.cb % 8) + // } else { + // h.ct = vals[h.cb%2] + // } + h.ct = vals[h.cb%2] + h.cb++ + return h.ct + + // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { + // return h.ct + // } + // return valueTypeUnset + // TODO: may need to tweak this so it works. + // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { + // h.cb = !h.cb + // h.ct = vt + // return h.cb + // } + // // go in a loop and check it. + // h.ct = vt + // h.cb = h.m(7) == 0 + // return h.cb +} +func (h *noopDrv) TryDecodeAsNil() bool { + if h.mk { + return false + } else { + return h.m(8) == 0 + } +} +func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { + return 0 +} + +func (h *noopDrv) DecodeNaked() { + // use h.r (random) not h.m() because h.m() could cause the same value to be given. + var sk int + if h.mk { + // if mapkey, do not support values of nil OR bytes, array, map or rawext + sk = h.r(7) + 1 + } else { + sk = h.r(12) + } + n := &h.d.n + switch sk { + case 0: + n.v = valueTypeNil + case 1: + n.v, n.b = valueTypeBool, false + case 2: + n.v, n.b = valueTypeBool, true + case 3: + n.v, n.i = valueTypeInt, h.DecodeInt(64) + case 4: + n.v, n.u = valueTypeUint, h.DecodeUint(64) + case 5: + n.v, n.f = valueTypeFloat, h.DecodeFloat(true) + case 6: + n.v, n.f = valueTypeFloat, h.DecodeFloat(false) + case 7: + n.v, n.s = valueTypeString, h.DecodeString() + case 8: + n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] + case 9: + n.v = valueTypeArray + case 10: + n.v = valueTypeMap + default: + n.v = valueTypeExt + n.u = h.DecodeUint(64) + n.l = h.B[h.m(len(h.B))] + } + h.ct = n.v + return +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go new file mode 100644 index 000000000..2353263e8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go @@ -0,0 +1,3 @@ +package codec + +//go:generate bash prebuild.sh diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh new file mode 100644 index 000000000..98f442487 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh @@ -0,0 +1,199 @@ +#!/bin/bash + +# _needgen is a helper function to tell if we need to generate files for msgp, codecgen. +_needgen() { + local a="$1" + zneedgen=0 + if [[ ! -e "$a" ]] + then + zneedgen=1 + echo 1 + return 0 + fi + for i in `ls -1 *.go.tmpl gen.go values_test.go` + do + if [[ "$a" -ot "$i" ]] + then + zneedgen=1 + echo 1 + return 0 + fi + done + echo 0 +} + +# _build generates fast-path.go and gen-helper.go. +# +# It is needed because there is some dependency between the generated code +# and the other classes. Consequently, we have to totally remove the +# generated files and put stubs in place, before calling "go run" again +# to recreate them. +_build() { + if ! [[ "${zforce}" == "1" || + "1" == $( _needgen "fast-path.generated.go" ) || + "1" == $( _needgen "gen-helper.generated.go" ) || + "1" == $( _needgen "gen.generated.go" ) || + 1 == 0 ]] + then + return 0 + fi + + # echo "Running prebuild" + if [ "${zbak}" == "1" ] + then + # echo "Backing up old generated files" + _zts=`date '+%m%d%Y_%H%M%S'` + _gg=".generated.go" + [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak + [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak + # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak + # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak + else + rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \ + *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go + fi + + cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl + + cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl + + cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < math.MaxInt64 { + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return + // } + return +} + +func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("simple: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + return + } + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { + b = true + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) ReadMapStart() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) ReadArrayStart() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readx(2))) + case 3: + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + case 4: + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + } + d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, false, true) + default: + d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle + binaryEncodingType +} + +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} +} + +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *simpleEncDriver) reset() { + e.w = e.e.w +} + +func (d *simpleDecDriver) reset() { + d.r = d.d.r +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json new file mode 100644 index 000000000..902858671 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json @@ -0,0 +1,639 @@ +[ + { + "cbor": "AA==", + "hex": "00", + "roundtrip": true, + "decoded": 0 + }, + { + "cbor": "AQ==", + "hex": "01", + "roundtrip": true, + "decoded": 1 + }, + { + "cbor": "Cg==", + "hex": "0a", + "roundtrip": true, + "decoded": 10 + }, + { + "cbor": "Fw==", + "hex": "17", + "roundtrip": true, + "decoded": 23 + }, + { + "cbor": "GBg=", + "hex": "1818", + "roundtrip": true, + "decoded": 24 + }, + { + "cbor": "GBk=", + "hex": "1819", + "roundtrip": true, + "decoded": 25 + }, + { + "cbor": "GGQ=", + "hex": "1864", + "roundtrip": true, + "decoded": 100 + }, + { + "cbor": "GQPo", + "hex": "1903e8", + "roundtrip": true, + "decoded": 1000 + }, + { + "cbor": "GgAPQkA=", + "hex": "1a000f4240", + "roundtrip": true, + "decoded": 1000000 + }, + { + "cbor": "GwAAAOjUpRAA", + "hex": "1b000000e8d4a51000", + "roundtrip": true, + "decoded": 1000000000000 + }, + { + "cbor": "G///////////", + "hex": "1bffffffffffffffff", + "roundtrip": true, + "decoded": 18446744073709551615 + }, + { + "cbor": "wkkBAAAAAAAAAAA=", + "hex": "c249010000000000000000", + "roundtrip": true, + "decoded": 18446744073709551616 + }, + { + "cbor": "O///////////", + "hex": "3bffffffffffffffff", + "roundtrip": true, + "decoded": -18446744073709551616, + "skip": true + }, + { + "cbor": "w0kBAAAAAAAAAAA=", + "hex": "c349010000000000000000", + "roundtrip": true, + "decoded": -18446744073709551617 + }, + { + "cbor": "IA==", + "hex": "20", + "roundtrip": true, + "decoded": -1 + }, + { + "cbor": "KQ==", + "hex": "29", + "roundtrip": true, + "decoded": -10 + }, + { + "cbor": "OGM=", + "hex": "3863", + "roundtrip": true, + "decoded": -100 + }, + { + "cbor": "OQPn", + "hex": "3903e7", + "roundtrip": true, + "decoded": -1000 + }, + { + "cbor": "+QAA", + "hex": "f90000", + "roundtrip": true, + "decoded": 0.0 + }, + { + "cbor": "+YAA", + "hex": "f98000", + "roundtrip": true, + "decoded": -0.0 + }, + { + "cbor": "+TwA", + "hex": "f93c00", + "roundtrip": true, + "decoded": 1.0 + }, + { + "cbor": "+z/xmZmZmZma", + "hex": "fb3ff199999999999a", + "roundtrip": true, + "decoded": 1.1 + }, + { + "cbor": "+T4A", + "hex": "f93e00", + "roundtrip": true, + "decoded": 1.5 + }, + { + "cbor": "+Xv/", + "hex": "f97bff", + "roundtrip": true, + "decoded": 65504.0 + }, + { + "cbor": "+kfDUAA=", + "hex": "fa47c35000", + "roundtrip": true, + "decoded": 100000.0 + }, + { + "cbor": "+n9///8=", + "hex": "fa7f7fffff", + "roundtrip": true, + "decoded": 3.4028234663852886e+38 + }, + { + "cbor": "+3435DyIAHWc", + "hex": "fb7e37e43c8800759c", + "roundtrip": true, + "decoded": 1.0e+300 + }, + { + "cbor": "+QAB", + "hex": "f90001", + "roundtrip": true, + "decoded": 5.960464477539063e-08 + }, + { + "cbor": "+QQA", + "hex": "f90400", + "roundtrip": true, + "decoded": 6.103515625e-05 + }, + { + "cbor": "+cQA", + "hex": "f9c400", + "roundtrip": true, + "decoded": -4.0 + }, + { + "cbor": "+8AQZmZmZmZm", + "hex": "fbc010666666666666", + "roundtrip": true, + "decoded": -4.1 + }, + { + "cbor": "+XwA", + "hex": "f97c00", + "roundtrip": true, + "diagnostic": "Infinity" + }, + { + "cbor": "+X4A", + "hex": "f97e00", + "roundtrip": true, + "diagnostic": "NaN" + }, + { + "cbor": "+fwA", + "hex": "f9fc00", + "roundtrip": true, + "diagnostic": "-Infinity" + }, + { + "cbor": "+n+AAAA=", + "hex": "fa7f800000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+n/AAAA=", + "hex": "fa7fc00000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+v+AAAA=", + "hex": "faff800000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "+3/wAAAAAAAA", + "hex": "fb7ff0000000000000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+3/4AAAAAAAA", + "hex": "fb7ff8000000000000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+//wAAAAAAAA", + "hex": "fbfff0000000000000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "9A==", + "hex": "f4", + "roundtrip": true, + "decoded": false + }, + { + "cbor": "9Q==", + "hex": "f5", + "roundtrip": true, + "decoded": true + }, + { + "cbor": "9g==", + "hex": "f6", + "roundtrip": true, + "decoded": null + }, + { + "cbor": "9w==", + "hex": "f7", + "roundtrip": true, + "diagnostic": "undefined" + }, + { + "cbor": "8A==", + "hex": "f0", + "roundtrip": true, + "diagnostic": "simple(16)" + }, + { + "cbor": "+Bg=", + "hex": "f818", + "roundtrip": true, + "diagnostic": "simple(24)" + }, + { + "cbor": "+P8=", + "hex": "f8ff", + "roundtrip": true, + "diagnostic": "simple(255)" + }, + { + "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", + "hex": "c074323031332d30332d32315432303a30343a30305a", + "roundtrip": true, + "diagnostic": "0(\"2013-03-21T20:04:00Z\")" + }, + { + "cbor": "wRpRS2ew", + "hex": "c11a514b67b0", + "roundtrip": true, + "diagnostic": "1(1363896240)" + }, + { + "cbor": "wftB1FLZ7CAAAA==", + "hex": "c1fb41d452d9ec200000", + "roundtrip": true, + "diagnostic": "1(1363896240.5)" + }, + { + "cbor": "10QBAgME", + "hex": "d74401020304", + "roundtrip": true, + "diagnostic": "23(h'01020304')" + }, + { + "cbor": "2BhFZElFVEY=", + "hex": "d818456449455446", + "roundtrip": true, + "diagnostic": "24(h'6449455446')" + }, + { + "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", + "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", + "roundtrip": true, + "diagnostic": "32(\"http://www.example.com\")" + }, + { + "cbor": "QA==", + "hex": "40", + "roundtrip": true, + "diagnostic": "h''" + }, + { + "cbor": "RAECAwQ=", + "hex": "4401020304", + "roundtrip": true, + "diagnostic": "h'01020304'" + }, + { + "cbor": "YA==", + "hex": "60", + "roundtrip": true, + "decoded": "" + }, + { + "cbor": "YWE=", + "hex": "6161", + "roundtrip": true, + "decoded": "a" + }, + { + "cbor": "ZElFVEY=", + "hex": "6449455446", + "roundtrip": true, + "decoded": "IETF" + }, + { + "cbor": "YiJc", + "hex": "62225c", + "roundtrip": true, + "decoded": "\"\\" + }, + { + "cbor": "YsO8", + "hex": "62c3bc", + "roundtrip": true, + "decoded": "ü" + }, + { + "cbor": "Y+awtA==", + "hex": "63e6b0b4", + "roundtrip": true, + "decoded": "æ°´" + }, + { + "cbor": "ZPCQhZE=", + "hex": "64f0908591", + "roundtrip": true, + "decoded": "ð…‘" + }, + { + "cbor": "gA==", + "hex": "80", + "roundtrip": true, + "decoded": [ + + ] + }, + { + "cbor": "gwECAw==", + "hex": "83010203", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3 + ] + }, + { + "cbor": "gwGCAgOCBAU=", + "hex": "8301820203820405", + "roundtrip": true, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", + "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "oA==", + "hex": "a0", + "roundtrip": true, + "decoded": { + } + }, + { + "cbor": "ogECAwQ=", + "hex": "a201020304", + "roundtrip": true, + "skip": true, + "diagnostic": "{1: 2, 3: 4}" + }, + { + "cbor": "omFhAWFiggID", + "hex": "a26161016162820203", + "roundtrip": true, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhoWFiYWM=", + "hex": "826161a161626163", + "roundtrip": true, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", + "hex": "a56161614161626142616361436164614461656145", + "roundtrip": true, + "decoded": { + "a": "A", + "b": "B", + "c": "C", + "d": "D", + "e": "E" + } + }, + { + "cbor": "X0IBAkMDBAX/", + "hex": "5f42010243030405ff", + "roundtrip": false, + "skip": true, + "diagnostic": "(_ h'0102', h'030405')" + }, + { + "cbor": "f2VzdHJlYWRtaW5n/w==", + "hex": "7f657374726561646d696e67ff", + "roundtrip": false, + "decoded": "streaming" + }, + { + "cbor": "n/8=", + "hex": "9fff", + "roundtrip": false, + "decoded": [ + + ] + }, + { + "cbor": "nwGCAgOfBAX//w==", + "hex": "9f018202039f0405ffff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwGCAgOCBAX/", + "hex": "9f01820203820405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGCAgOfBAX/", + "hex": "83018202039f0405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGfAgP/ggQF", + "hex": "83019f0203ff820405", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", + "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", + "roundtrip": false, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "v2FhAWFinwID//8=", + "hex": "bf61610161629f0203ffff", + "roundtrip": false, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhv2FiYWP/", + "hex": "826161bf61626163ff", + "roundtrip": false, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "v2NGdW71Y0FtdCH/", + "hex": "bf6346756ef563416d7421ff", + "roundtrip": false, + "decoded": { + "Fun": true, + "Amt": -2 + } + } +] diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py new file mode 100644 index 000000000..dfe3b0c9a --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +# Ensure msgpack-python and cbor are installed first, using: +# sudo apt-get install python-dev +# sudo apt-get install python-pip +# pip install --user msgpack-python msgpack-rpc-python cbor + +import cbor, msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464646464.0, + False, + True, + None, + u"someday", + u"", + u"bytestring", + 1328176922000002000, + -2206187877999998000, + 270, + -2013855847999995777, + #-6795364578871345152, + ] + l1 = [ + { "true": True, + "false": False }, + { "true": "True", + "false": False, + "uint16(1616)": 1616 }, + { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], + "int32":32323232, "bool": True, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890" }, + { True: "true", 8: False, "false": 0 } + ] + + l = [] + l.extend(l0) + l.append(l0) + l.extend(l1) + return l + +def build_test_data(destdir): + l = get_test_data_list() + for i in range(len(l)): + # packer = msgpack.Packer() + serialized = msgpack.dumps(l[i]) + f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') + f.write(serialized) + f.close() + serialized = cbor.dumps(l[i]) + f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') + f.write(serialized) + f.close() + +def doRpcServer(port, stopTimeSec): + class EchoHandler(object): + def Echo123(self, msg1, msg2, msg3): + return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) + def EchoStruct(self, msg): + return ("%s" % msg) + + addr = msgpackrpc.Address('localhost', port) + server = msgpackrpc.Server(EchoHandler()) + server.listen(addr) + # run thread to stop it after stopTimeSec seconds if > 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("Echo123", "A1", "B2", "C3") + print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) + print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/tests.sh b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/tests.sh new file mode 100644 index 000000000..b1602ea7e --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/tests.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Run all the different permutations of all the tests. +# This helps ensure that nothing gets broken. + +_run() { + # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i), + # binc-nosymbols (n), struct2array (s), intern string (e), + # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f) + # 3. OPTIONS: verbose (v), reset (z), must (m), + # + # Use combinations of mode to get exactly what you want, + # and then pass the variations you need. + + ztags="" + zargs="" + local OPTIND + OPTIND=1 + while getopts "xurtcinsvgzmef" flag + do + case "x$flag" in + 'xr') ;; + 'xf') ztags="$ztags notfastpath" ;; + 'xg') ztags="$ztags codecgen" ;; + 'xx') ztags="$ztags x" ;; + 'xu') ztags="$ztags unsafe" ;; + 'xv') zargs="$zargs -tv" ;; + 'xz') zargs="$zargs -tr" ;; + 'xm') zargs="$zargs -tm" ;; + *) ;; + esac + done + # shift $((OPTIND-1)) + printf '............. TAGS: %s .............\n' "$ztags" + # echo ">>>>>>> TAGS: $ztags" + + OPTIND=1 + while getopts "xurtcinsvgzmef" flag + do + case "x$flag" in + 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; + 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; + 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; + 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" $zargs -tn; sleep 2 ;; + 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; + 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; + *) ;; + esac + done + shift $((OPTIND-1)) + + OPTIND=1 +} + +# echo ">>>>>>> RUNNING VARIATIONS OF TESTS" +if [[ "x$@" = "x" ]]; then + # All: r, x, g, gu + _run "-rtcinsm" # regular + _run "-rtcinsmz" # regular with reset + _run "-rtcinsmf" # regular with no fastpath (notfastpath) + _run "-xtcinsm" # external + _run "-gxtcinsm" # codecgen: requires external + _run "-gxutcinsm" # codecgen + unsafe +elif [[ "x$@" = "x-Z" ]]; then + # Regular + _run "-rtcinsm" # regular + _run "-rtcinsmz" # regular with reset +elif [[ "x$@" = "x-F" ]]; then + # regular with notfastpath + _run "-rtcinsmf" # regular + _run "-rtcinsmzf" # regular with reset +else + _run "$@" +fi diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go new file mode 100644 index 000000000..fc4c63e1d --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go @@ -0,0 +1,222 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "fmt" + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) + +type timeExt struct{} + +func (x timeExt) WriteExt(v interface{}) (bs []byte) { + switch v2 := v.(type) { + case time.Time: + bs = encodeTime(v2) + case *time.Time: + bs = encodeTime(*v2) + default: + panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2)) + } + return +} +func (x timeExt) ReadExt(v interface{}, bs []byte) { + tt, err := decodeTime(bs) + if err != nil { + panic(err) + } + *(v.(*time.Time)) = tt +} + +func (x timeExt) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} +func (x timeExt) UpdateExt(v interface{}, src interface{}) { + x.ReadExt(v, src.([]byte)) +} + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go new file mode 100644 index 000000000..4ec28e131 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go @@ -0,0 +1,203 @@ +// // +build testing + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// This file contains values used by tests and benchmarks. +// JSON/BSON do not like maps with keys that are not strings, +// so we only use maps with string keys here. + +import ( + "math" + "time" +) + +var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC() + +type AnonInTestStruc struct { + AS string + AI64 int64 + AI16 int16 + AUi64 uint64 + ASslice []string + AI64slice []int64 + AF64slice []float64 + // AMI32U32 map[int32]uint32 + // AMU32F64 map[uint32]float64 // json/bson do not like it + AMSU16 map[string]uint16 +} + +type AnonInTestStrucIntf struct { + Islice []interface{} + Ms map[string]interface{} + Nintf interface{} //don't set this, so we can test for nil + T time.Time +} + +type TestStruc struct { + _struct struct{} `codec:",omitempty"` //set omitempty for every field + + S string + I64 int64 + I16 int16 + Ui64 uint64 + Ui8 uint8 + B bool + By uint8 // byte: msgp doesn't like byte + + Sslice []string + I64slice []int64 + I16slice []int16 + Ui64slice []uint64 + Ui8slice []uint8 + Bslice []bool + Byslice []byte + + Iptrslice []*int64 + + // TODO: test these separately, specifically for reflection and codecgen. + // Unfortunately, ffjson doesn't support these. Its compilation even fails. + // Ui64array [4]uint64 + // Ui64slicearray [][4]uint64 + + AnonInTestStruc + + //M map[interface{}]interface{} `json:"-",bson:"-"` + Msi64 map[string]int64 + + // make this a ptr, so that it could be set or not. + // for comparison (e.g. with msgp), give it a struct tag (so it is not inlined), + // make this one omitempty (so it is included if nil). + *AnonInTestStrucIntf `codec:",omitempty"` + + Nmap map[string]bool //don't set this, so we can test for nil + Nslice []byte //don't set this, so we can test for nil + Nint64 *int64 //don't set this, so we can test for nil + Mtsptr map[string]*TestStruc + Mts map[string]TestStruc + Its []*TestStruc + Nteststruc *TestStruc +} + +// small struct for testing that codecgen works for unexported types +type tLowerFirstLetter struct { + I int + u uint64 + S string + b []byte +} + +func newTestStruc(depth int, bench bool, useInterface, useStringKeyOnly bool) (ts *TestStruc) { + var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464 + + ts = &TestStruc{ + S: "some string", + I64: math.MaxInt64 * 2 / 3, // 64, + I16: 1616, + Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it + Ui8: 160, + B: true, + By: 5, + + Sslice: []string{"one", "two", "three"}, + I64slice: []int64{1111, 2222, 3333}, + I16slice: []int16{44, 55, 66}, + Ui64slice: []uint64{12121212, 34343434, 56565656}, + Ui8slice: []uint8{210, 211, 212}, + Bslice: []bool{true, false, true, false}, + Byslice: []byte{13, 14, 15}, + + Msi64: map[string]int64{ + "one": 1, + "two": 2, + }, + AnonInTestStruc: AnonInTestStruc{ + // There's more leeway in altering this. + AS: "A-String", + AI64: -64646464, + AI16: 1616, + AUi64: 64646464, + // (U+1D11E)G-clef character may be represented in json as "\uD834\uDD1E". + // single reverse solidus character may be represented in json as "\u005C". + // include these in ASslice below. + ASslice: []string{"Aone", "Atwo", "Athree", + "Afour.reverse_solidus.\u005c", "Afive.Gclef.\U0001d11E"}, + AI64slice: []int64{1, -22, 333, -4444, 55555, -666666}, + AMSU16: map[string]uint16{"1": 1, "22": 2, "333": 3, "4444": 4}, + AF64slice: []float64{11.11e-11, 22.22E+22, 33.33E-33, 44.44e+44, 555.55E-6, 666.66E6}, + }, + } + if useInterface { + ts.AnonInTestStrucIntf = &AnonInTestStrucIntf{ + Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)}, + Ms: map[string]interface{}{ + "true": "true", + "int64(9)": false, + }, + T: testStrucTime, + } + } + + //For benchmarks, some things will not work. + if !bench { + //json and bson require string keys in maps + //ts.M = map[interface{}]interface{}{ + // true: "true", + // int8(9): false, + //} + //gob cannot encode nil in element in array (encodeArray: nil element) + ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil} + // ts.Iptrslice = nil + } + if !useStringKeyOnly { + // ts.AnonInTestStruc.AMU32F64 = map[uint32]float64{1: 1, 2: 2, 3: 3} // Json/Bson barf + } + if depth > 0 { + depth-- + if ts.Mtsptr == nil { + ts.Mtsptr = make(map[string]*TestStruc) + } + if ts.Mts == nil { + ts.Mts = make(map[string]TestStruc) + } + ts.Mtsptr["0"] = newTestStruc(depth, bench, useInterface, useStringKeyOnly) + ts.Mts["0"] = *(ts.Mtsptr["0"]) + ts.Its = append(ts.Its, ts.Mtsptr["0"]) + } + return +} + +// Some other types + +type Sstring string +type Bbool bool +type Sstructsmall struct { + A int +} + +type Sstructbig struct { + A int + B bool + c string + // Sval Sstruct + Ssmallptr *Sstructsmall + Ssmall *Sstructsmall + Sptr *Sstructbig +} + +type SstructbigMapBySlice struct { + _struct struct{} `codec:",toarray"` + A int + B bool + c string + // Sval Sstruct + Ssmallptr *Sstructsmall + Ssmall *Sstructsmall + Sptr *Sstructbig +} + +type Sinterface interface { + Noop() +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context.go new file mode 100644 index 000000000..11bd8d34e --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context.go @@ -0,0 +1,447 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context_test.go new file mode 100644 index 000000000..05345fc5e --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context_test.go @@ -0,0 +1,575 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 100*time.Millisecond) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o = otherContext{c} + c, _ = WithTimeout(o, 300*time.Millisecond) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestCanceledTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 200*time.Millisecond) + o := otherContext{c} + c, cancel := WithTimeout(o, 400*time.Millisecond) + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 15, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TOOD(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 000000000..e3170e333 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 000000000..56bcbadb8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 000000000..8089ef0af --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go new file mode 100644 index 000000000..8846f6b6e --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -0,0 +1,176 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package ctxhttp + +import ( + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func TestNoTimeout(t *testing.T) { + ctx := context.Background() + resp, err := doRequest(ctx) + + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } +} + +func TestCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(requestDuration / 2) + cancel() + }() + + resp, err := doRequest(ctx) + + if resp != nil || err == nil { + t.Fatalf("expected error, didn't get one. resp: %v", resp) + } + if err != ctx.Err() { + t.Fatalf("expected error from context but got: %v", err) + } +} + +func TestCancelAfterRequest(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + resp, err := doRequest(ctx) + + // Cancel before reading the body. + // Request.Body should still be readable after the context is canceled. + cancel() + + b, err := ioutil.ReadAll(resp.Body) + if err != nil || string(b) != requestBody { + t.Fatalf("could not read body: %q %v", b, err) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + }) + + serv := httptest.NewServer(handler) + defer serv.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, serv.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} + +func doRequest(ctx context.Context) (*http.Response, error) { + var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + w.Write([]byte(requestBody)) + }) + + serv := httptest.NewServer(okHandler) + defer serv.Close() + + return Get(ctx, nil, serv.URL) +} + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 000000000..04adc00f7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +} diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md new file mode 100644 index 000000000..e9e4be468 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/README.md @@ -0,0 +1,110 @@ +# etcd/client + +etcd/client is the Go client library for etcd. + +[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) + +## Install + +```bash +go get github.com/coreos/etcd/client +``` + +## Usage + +```go +package main + +import ( + "log" + "time" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/coreos/etcd/client" +) + +func main() { + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: client.DefaultTransport, + // set timeout per request to fail fast when the target endpoint is unavailable + HeaderTimeoutPerRequest: time.Second, + } + c, err := client.New(cfg) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + // set "/foo" key with "bar" value + log.Print("Setting '/foo' key with 'bar' value") + resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Set is done. Metadata is %q\n", resp) + } + // get "/foo" key's value + log.Print("Getting '/foo' key value") + resp, err = kapi.Get(context.Background(), "/foo", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Get is done. Metadata is %q\n", resp) + // print value + log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) + } +} +``` + +## Error Handling + +etcd client might return three types of errors. + +- context error + +Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. + +- cluster error + +Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. + +- response error + +If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. + +Here is the example code to handle client errors: + +```go +cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} +c, err := client.New(cfg) +if err != nil { + log.Fatal(err) +} + +kapi := client.NewKeysAPI(c) +resp, err := kapi.Set(ctx, "test", "bar", nil) +if err != nil { + if err == context.Canceled { + // ctx is canceled by another routine + } else if err == context.DeadlineExceeded { + // ctx is attached with a deadline and it exceeded + } else if cerr, ok := err.(*client.ClusterError); ok { + // process (cerr.Errors) + } else { + // bad cluster endpoints, which are not etcd servers + } +} +``` + + +## Caveat + +1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. + +2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. + +3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. + +4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265). diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go new file mode 100644 index 000000000..f410edbe0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_role.go @@ -0,0 +1,233 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. +func NewAuthRoleAPI(c Client) AuthRoleAPI { + return &httpAuthRoleAPI{ + client: c, + } +} + +type AuthRoleAPI interface { + // Add a role. + AddRole(ctx context.Context, role string) error + + // Remove a role. + RemoveRole(ctx context.Context, role string) error + + // Get role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // Grant a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // Revoke some some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // List roles. + ListRoles(ctx context.Context) ([]string, error) +} + +type httpAuthRoleAPI struct { + client httpClient +} + +type authRoleAPIAction struct { + verb string + name string + role *Role +} + +type authRoleAPIList struct{} + +func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", l.name) + if l.role == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.role) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { + resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + var userList struct { + Roles []string `json:"roles"` + } + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + return userList.Roles, nil +} + +func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { + role := &Role{ + Role: rolename, + } + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "DELETE", + name: rolename, + }) +} + +func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return err + } + if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err := json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { + return r.modRole(ctx, &authRoleAPIAction{ + verb: "GET", + name: rolename, + }) +} + +func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { + var out rwPermission + switch permType { + case ReadPermission: + out.Read = prefixes + case WritePermission: + out.Write = prefixes + case ReadWritePermission: + out.Read = prefixes + out.Write = prefixes + } + return out +} + +func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Grant: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Revoke: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var role Role + if err = json.Unmarshal(body, &role); err != nil { + return nil, err + } + return &role, nil +} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go new file mode 100644 index 000000000..8c04baa34 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_user.go @@ -0,0 +1,295 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "path" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" +) + +var ( + defaultV2AuthPrefix = "/v2/auth" +) + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +// NewAuthAPI constructs a new AuthAPI that uses HTTP to +// interact with etcd's general auth features. +func NewAuthAPI(c Client) AuthAPI { + return &httpAuthAPI{ + client: c, + } +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type httpAuthAPI struct { + client httpClient +} + +func (s *httpAuthAPI) Enable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"PUT"}) +} + +func (s *httpAuthAPI) Disable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"DELETE"}) +} + +func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { + resp, body, err := s.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +type authAPIAction struct { + verb string +} + +func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "enable", "") + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. +func NewAuthUserAPI(c Client) AuthUserAPI { + return &httpAuthUserAPI{ + client: c, + } +} + +type AuthUserAPI interface { + // Add a user. + AddUser(ctx context.Context, username string, password string) error + + // Remove a user. + RemoveUser(ctx context.Context, username string) error + + // Get user details. + GetUser(ctx context.Context, username string) (*User, error) + + // Grant a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // Revoke some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // Change the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // List users. + ListUsers(ctx context.Context) ([]string, error) +} + +type httpAuthUserAPI struct { + client httpClient +} + +type authUserAPIAction struct { + verb string + username string + user *User +} + +type authUserAPIList struct{} + +func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", l.username) + if l.user == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.user) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { + resp, body, err := u.client.Do(ctx, &authUserAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var userList struct { + Users []string `json:"users"` + } + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + return userList.Users, nil +} + +func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { + user := &User{ + User: username, + Password: password, + } + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "DELETE", + username: username, + }) +} + +func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { + return u.modUser(ctx, &authUserAPIAction{ + verb: "GET", + username: username, + }) +} + +func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Grant: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Revoke: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { + user := &User{ + User: username, + Password: password, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var user User + if err = json.Unmarshal(body, &user); err != nil { + return nil, err + } + return &user, nil +} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go new file mode 100644 index 000000000..fefdb40e4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -0,0 +1,20 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +// +build go1.5 + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq_go14.go b/vendor/github.com/coreos/etcd/client/cancelreq_go14.go new file mode 100644 index 000000000..2bed38a41 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cancelreq_go14.go @@ -0,0 +1,17 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq_go14.go + +// +build !go1.5 + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + return func() { + tr.CancelRequest(req) + } +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go new file mode 100644 index 000000000..9d7735cdf --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -0,0 +1,596 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have alrady been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, server returns the response body immediately. + // For PUT/POST/DELETE request, server will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, server returns the header immediately to notify Client + // watch start. But if server is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to server and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint() (string, error) { + mAPI := NewMembersAPI(c) + leader, err := mAPI.Leader(context.Background()) + if err != nil { + return "", err + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + if len(eps) == 0 { + return ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return err + } + neps[i] = *u + } + + switch c.selectionMode { + case EndpointSelectionRandom: + c.endpoints = shuffleEndpoints(c.rand, neps) + c.pinned = 0 + case EndpointSelectionPrioritizeLeader: + c.endpoints = neps + lep, err := c.getLeaderEndpoint() + if err != nil { + return ErrNoLeaderEndpoint + } + + for i := range c.endpoints { + if c.endpoints[i].String() == lep { + c.pinned = i + break + } + } + // If endpoints doesn't have the lu, just keep c.pinned = 0. + // Forwarding between follower and leader would be required but it works. + default: + return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode)) + } + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + // mask previous errors with context error, which is controlled by user + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + continue + } + if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + continue + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + eps := make([]string, 0) + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + sort.Sort(sort.StringSlice(eps)) + + ceps := make([]string, len(c.endpoints)) + for i, cep := range c.endpoints { + ceps[i] = cep.String() + } + sort.Sort(sort.StringSlice(ceps)) + // fast path if no change happens + // this helps client to pin the endpoint when no cluster change + if reflect.DeepEqual(eps, ceps) { + return nil + } + + return c.SetEndpoints(eps) +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("Location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + p := r.Perm(len(eps)) + neps := make([]url.URL, len(eps)) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} diff --git a/vendor/github.com/coreos/etcd/client/client_test.go b/vendor/github.com/coreos/etcd/client/client_test.go new file mode 100644 index 000000000..74437d690 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/client_test.go @@ -0,0 +1,896 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "errors" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/coreos/etcd/pkg/testutil" +) + +type actionAssertingHTTPClient struct { + t *testing.T + num int + act httpAction + + resp http.Response + body []byte + err error +} + +func (a *actionAssertingHTTPClient) Do(_ context.Context, act httpAction) (*http.Response, []byte, error) { + if !reflect.DeepEqual(a.act, act) { + a.t.Errorf("#%d: unexpected httpAction: want=%#v got=%#v", a.num, a.act, act) + } + + return &a.resp, a.body, a.err +} + +type staticHTTPClient struct { + resp http.Response + body []byte + err error +} + +func (s *staticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) { + return &s.resp, s.body, s.err +} + +type staticHTTPAction struct { + request http.Request +} + +func (s *staticHTTPAction) HTTPRequest(url.URL) *http.Request { + return &s.request +} + +type staticHTTPResponse struct { + resp http.Response + body []byte + err error +} + +type multiStaticHTTPClient struct { + responses []staticHTTPResponse + cur int +} + +func (s *multiStaticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) { + r := s.responses[s.cur] + s.cur++ + return &r.resp, r.body, r.err +} + +func newStaticHTTPClientFactory(responses []staticHTTPResponse) httpClientFactory { + var cur int + return func(url.URL) httpClient { + r := responses[cur] + cur++ + return &staticHTTPClient{resp: r.resp, body: r.body, err: r.err} + } +} + +type fakeTransport struct { + respchan chan *http.Response + errchan chan error + startCancel chan struct{} + finishCancel chan struct{} +} + +func newFakeTransport() *fakeTransport { + return &fakeTransport{ + respchan: make(chan *http.Response, 1), + errchan: make(chan error, 1), + startCancel: make(chan struct{}, 1), + finishCancel: make(chan struct{}, 1), + } +} + +func (t *fakeTransport) CancelRequest(*http.Request) { + t.startCancel <- struct{}{} +} + +type fakeAction struct{} + +func (a *fakeAction) HTTPRequest(url.URL) *http.Request { + return &http.Request{} +} + +func TestSimpleHTTPClientDoSuccess(t *testing.T) { + tr := newFakeTransport() + c := &simpleHTTPClient{transport: tr} + + tr.respchan <- &http.Response{ + StatusCode: http.StatusTeapot, + Body: ioutil.NopCloser(strings.NewReader("foo")), + } + + resp, body, err := c.Do(context.Background(), &fakeAction{}) + if err != nil { + t.Fatalf("incorrect error value: want=nil got=%v", err) + } + + wantCode := http.StatusTeapot + if wantCode != resp.StatusCode { + t.Fatalf("invalid response code: want=%d got=%d", wantCode, resp.StatusCode) + } + + wantBody := []byte("foo") + if !reflect.DeepEqual(wantBody, body) { + t.Fatalf("invalid response body: want=%q got=%q", wantBody, body) + } +} + +func TestSimpleHTTPClientDoError(t *testing.T) { + tr := newFakeTransport() + c := &simpleHTTPClient{transport: tr} + + tr.errchan <- errors.New("fixture") + + _, _, err := c.Do(context.Background(), &fakeAction{}) + if err == nil { + t.Fatalf("expected non-nil error, got nil") + } +} + +func TestSimpleHTTPClientDoCancelContext(t *testing.T) { + tr := newFakeTransport() + c := &simpleHTTPClient{transport: tr} + + tr.startCancel <- struct{}{} + tr.finishCancel <- struct{}{} + + _, _, err := c.Do(context.Background(), &fakeAction{}) + if err == nil { + t.Fatalf("expected non-nil error, got nil") + } +} + +type checkableReadCloser struct { + io.ReadCloser + closed bool +} + +func (c *checkableReadCloser) Close() error { + if !c.closed { + c.closed = true + return c.ReadCloser.Close() + } + return nil +} + +func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) { + tr := newFakeTransport() + c := &simpleHTTPClient{transport: tr} + + // create an already-cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))} + go func() { + // wait that simpleHTTPClient knows the context is already timed out, + // and calls CancelRequest + testutil.WaitSchedule() + + // response is returned before cancel effects + tr.respchan <- &http.Response{Body: body} + }() + + _, _, err := c.Do(ctx, &fakeAction{}) + if err == nil { + t.Fatalf("expected non-nil error, got nil") + } + + if !body.closed { + t.Fatalf("expected closed body") + } +} + +type blockingBody struct { + c chan struct{} +} + +func (bb *blockingBody) Read(p []byte) (n int, err error) { + <-bb.c + return 0, errors.New("closed") +} + +func (bb *blockingBody) Close() error { + close(bb.c) + return nil +} + +func TestSimpleHTTPClientDoCancelContextResponseBodyClosedWithBlockingBody(t *testing.T) { + tr := newFakeTransport() + c := &simpleHTTPClient{transport: tr} + + ctx, cancel := context.WithCancel(context.Background()) + body := &checkableReadCloser{ReadCloser: &blockingBody{c: make(chan struct{})}} + go func() { + tr.respchan <- &http.Response{Body: body} + time.Sleep(2 * time.Millisecond) + // cancel after the body is received + cancel() + }() + + _, _, err := c.Do(ctx, &fakeAction{}) + if err != context.Canceled { + t.Fatalf("expected %+v, got %+v", context.Canceled, err) + } + + if !body.closed { + t.Fatalf("expected closed body") + } +} + +func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) { + tr := newFakeTransport() + c := &simpleHTTPClient{transport: tr} + + donechan := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + c.Do(ctx, &fakeAction{}) + close(donechan) + }() + + // This should call CancelRequest and begin the cancellation process + cancel() + + select { + case <-donechan: + t.Fatalf("simpleHTTPClient.Do should not have exited yet") + default: + } + + tr.finishCancel <- struct{}{} + + select { + case <-donechan: + //expected behavior + return + case <-time.After(time.Second): + t.Fatalf("simpleHTTPClient.Do did not exit within 1s") + } +} + +func TestSimpleHTTPClientDoHeaderTimeout(t *testing.T) { + tr := newFakeTransport() + tr.finishCancel <- struct{}{} + c := &simpleHTTPClient{transport: tr, headerTimeout: time.Millisecond} + + errc := make(chan error) + go func() { + _, _, err := c.Do(context.Background(), &fakeAction{}) + errc <- err + }() + + select { + case err := <-errc: + if err == nil { + t.Fatalf("expected non-nil error, got nil") + } + case <-time.After(time.Second): + t.Fatalf("unexpected timeout when waitting for the test to finish") + } +} + +func TestHTTPClusterClientDo(t *testing.T) { + fakeErr := errors.New("fake!") + fakeURL := url.URL{} + tests := []struct { + client *httpClusterClient + wantCode int + wantErr error + wantPinned int + }{ + // first good response short-circuits Do + { + client: &httpClusterClient{ + endpoints: []url.URL{fakeURL, fakeURL}, + clientFactory: newStaticHTTPClientFactory( + []staticHTTPResponse{ + {resp: http.Response{StatusCode: http.StatusTeapot}}, + {err: fakeErr}, + }, + ), + rand: rand.New(rand.NewSource(0)), + }, + wantCode: http.StatusTeapot, + }, + + // fall through to good endpoint if err is arbitrary + { + client: &httpClusterClient{ + endpoints: []url.URL{fakeURL, fakeURL}, + clientFactory: newStaticHTTPClientFactory( + []staticHTTPResponse{ + {err: fakeErr}, + {resp: http.Response{StatusCode: http.StatusTeapot}}, + }, + ), + rand: rand.New(rand.NewSource(0)), + }, + wantCode: http.StatusTeapot, + wantPinned: 1, + }, + + // context.Canceled short-circuits Do + { + client: &httpClusterClient{ + endpoints: []url.URL{fakeURL, fakeURL}, + clientFactory: newStaticHTTPClientFactory( + []staticHTTPResponse{ + {err: context.Canceled}, + {resp: http.Response{StatusCode: http.StatusTeapot}}, + }, + ), + rand: rand.New(rand.NewSource(0)), + }, + wantErr: context.Canceled, + }, + + // return err if there are no endpoints + { + client: &httpClusterClient{ + endpoints: []url.URL{}, + clientFactory: newHTTPClientFactory(nil, nil, 0), + rand: rand.New(rand.NewSource(0)), + }, + wantErr: ErrNoEndpoints, + }, + + // return err if all endpoints return arbitrary errors + { + client: &httpClusterClient{ + endpoints: []url.URL{fakeURL, fakeURL}, + clientFactory: newStaticHTTPClientFactory( + []staticHTTPResponse{ + {err: fakeErr}, + {err: fakeErr}, + }, + ), + rand: rand.New(rand.NewSource(0)), + }, + wantErr: &ClusterError{Errors: []error{fakeErr, fakeErr}}, + }, + + // 500-level errors cause Do to fallthrough to next endpoint + { + client: &httpClusterClient{ + endpoints: []url.URL{fakeURL, fakeURL}, + clientFactory: newStaticHTTPClientFactory( + []staticHTTPResponse{ + {resp: http.Response{StatusCode: http.StatusBadGateway}}, + {resp: http.Response{StatusCode: http.StatusTeapot}}, + }, + ), + rand: rand.New(rand.NewSource(0)), + }, + wantCode: http.StatusTeapot, + wantPinned: 1, + }, + } + + for i, tt := range tests { + resp, _, err := tt.client.Do(context.Background(), nil) + if !reflect.DeepEqual(tt.wantErr, err) { + t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr) + continue + } + + if resp == nil { + if tt.wantCode != 0 { + t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode) + } + continue + } + + if resp.StatusCode != tt.wantCode { + t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode) + continue + } + + if tt.client.pinned != tt.wantPinned { + t.Errorf("#%d: pinned=%d, want=%d", i, tt.client.pinned, tt.wantPinned) + } + } +} + +func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) { + fakeURL := url.URL{} + tr := newFakeTransport() + tr.finishCancel <- struct{}{} + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0), + endpoints: []url.URL{fakeURL}, + } + + errc := make(chan error) + go func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + _, _, err := c.Do(ctx, &fakeAction{}) + errc <- err + }() + + select { + case err := <-errc: + if err != context.DeadlineExceeded { + t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded) + } + case <-time.After(time.Second): + t.Fatalf("unexpected timeout when waitting for request to deadline exceed") + } +} + +func TestRedirectedHTTPAction(t *testing.T) { + act := &redirectedHTTPAction{ + action: &staticHTTPAction{ + request: http.Request{ + Method: "DELETE", + URL: &url.URL{ + Scheme: "https", + Host: "foo.example.com", + Path: "/ping", + }, + }, + }, + location: url.URL{ + Scheme: "https", + Host: "bar.example.com", + Path: "/pong", + }, + } + + want := &http.Request{ + Method: "DELETE", + URL: &url.URL{ + Scheme: "https", + Host: "bar.example.com", + Path: "/pong", + }, + } + got := act.HTTPRequest(url.URL{Scheme: "http", Host: "baz.example.com", Path: "/pang"}) + + if !reflect.DeepEqual(want, got) { + t.Fatalf("HTTPRequest is %#v, want %#v", want, got) + } +} + +func TestRedirectFollowingHTTPClient(t *testing.T) { + tests := []struct { + checkRedirect CheckRedirectFunc + client httpClient + wantCode int + wantErr error + }{ + // errors bubbled up + { + checkRedirect: func(int) error { return ErrTooManyRedirects }, + client: &multiStaticHTTPClient{ + responses: []staticHTTPResponse{ + { + err: errors.New("fail!"), + }, + }, + }, + wantErr: errors.New("fail!"), + }, + + // no need to follow redirect if none given + { + checkRedirect: func(int) error { return ErrTooManyRedirects }, + client: &multiStaticHTTPClient{ + responses: []staticHTTPResponse{ + { + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + }, + }, + wantCode: http.StatusTeapot, + }, + + // redirects if less than max + { + checkRedirect: func(via int) error { + if via >= 2 { + return ErrTooManyRedirects + } + return nil + }, + client: &multiStaticHTTPClient{ + responses: []staticHTTPResponse{ + { + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + Header: http.Header{"Location": []string{"http://example.com"}}, + }, + }, + { + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + }, + }, + wantCode: http.StatusTeapot, + }, + + // succeed after reaching max redirects + { + checkRedirect: func(via int) error { + if via >= 3 { + return ErrTooManyRedirects + } + return nil + }, + client: &multiStaticHTTPClient{ + responses: []staticHTTPResponse{ + { + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + Header: http.Header{"Location": []string{"http://example.com"}}, + }, + }, + { + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + Header: http.Header{"Location": []string{"http://example.com"}}, + }, + }, + { + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + }, + }, + wantCode: http.StatusTeapot, + }, + + // fail if too many redirects + { + checkRedirect: func(via int) error { + if via >= 2 { + return ErrTooManyRedirects + } + return nil + }, + client: &multiStaticHTTPClient{ + responses: []staticHTTPResponse{ + { + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + Header: http.Header{"Location": []string{"http://example.com"}}, + }, + }, + { + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + Header: http.Header{"Location": []string{"http://example.com"}}, + }, + }, + { + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + }, + }, + wantErr: ErrTooManyRedirects, + }, + + // fail if Location header not set + { + checkRedirect: func(int) error { return ErrTooManyRedirects }, + client: &multiStaticHTTPClient{ + responses: []staticHTTPResponse{ + { + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + }, + }, + }, + }, + wantErr: errors.New("Location header not set"), + }, + + // fail if Location header is invalid + { + checkRedirect: func(int) error { return ErrTooManyRedirects }, + client: &multiStaticHTTPClient{ + responses: []staticHTTPResponse{ + { + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + Header: http.Header{"Location": []string{":"}}, + }, + }, + }, + }, + wantErr: errors.New("Location header not valid URL: :"), + }, + + // fail if redirects checked way too many times + { + checkRedirect: func(int) error { return nil }, + client: &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusTemporaryRedirect, + Header: http.Header{"Location": []string{"http://example.com"}}, + }, + }, + wantErr: errTooManyRedirectChecks, + }, + } + + for i, tt := range tests { + client := &redirectFollowingHTTPClient{client: tt.client, checkRedirect: tt.checkRedirect} + resp, _, err := client.Do(context.Background(), nil) + if !reflect.DeepEqual(tt.wantErr, err) { + t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr) + continue + } + + if resp == nil { + if tt.wantCode != 0 { + t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode) + } + continue + } + + if resp.StatusCode != tt.wantCode { + t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode) + continue + } + } +} + +func TestDefaultCheckRedirect(t *testing.T) { + tests := []struct { + num int + err error + }{ + {0, nil}, + {5, nil}, + {10, nil}, + {11, ErrTooManyRedirects}, + {29, ErrTooManyRedirects}, + } + + for i, tt := range tests { + err := DefaultCheckRedirect(tt.num) + if !reflect.DeepEqual(tt.err, err) { + t.Errorf("#%d: want=%#v got=%#v", i, tt.err, err) + } + } +} + +func TestHTTPClusterClientSync(t *testing.T) { + cf := newStaticHTTPClientFactory([]staticHTTPResponse{ + { + resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, + body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), + }, + }) + + hc := &httpClusterClient{ + clientFactory: cf, + rand: rand.New(rand.NewSource(0)), + } + err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) + if err != nil { + t.Fatalf("unexpected error during setup: %#v", err) + } + + want := []string{"http://127.0.0.1:2379"} + got := hc.Endpoints() + if !reflect.DeepEqual(want, got) { + t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) + } + + err = hc.Sync(context.Background()) + if err != nil { + t.Fatalf("unexpected error during Sync: %#v", err) + } + + want = []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"} + got = hc.Endpoints() + sort.Sort(sort.StringSlice(got)) + if !reflect.DeepEqual(want, got) { + t.Fatalf("incorrect endpoints post-Sync: want=%#v got=%#v", want, got) + } + + err = hc.SetEndpoints([]string{"http://127.0.0.1:4009"}) + if err != nil { + t.Fatalf("unexpected error during reset: %#v", err) + } + + want = []string{"http://127.0.0.1:4009"} + got = hc.Endpoints() + if !reflect.DeepEqual(want, got) { + t.Fatalf("incorrect endpoints post-reset: want=%#v got=%#v", want, got) + } +} + +func TestHTTPClusterClientSyncFail(t *testing.T) { + cf := newStaticHTTPClientFactory([]staticHTTPResponse{ + {err: errors.New("fail!")}, + }) + + hc := &httpClusterClient{ + clientFactory: cf, + rand: rand.New(rand.NewSource(0)), + } + err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) + if err != nil { + t.Fatalf("unexpected error during setup: %#v", err) + } + + want := []string{"http://127.0.0.1:2379"} + got := hc.Endpoints() + if !reflect.DeepEqual(want, got) { + t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) + } + + err = hc.Sync(context.Background()) + if err == nil { + t.Fatalf("got nil error during Sync") + } + + got = hc.Endpoints() + if !reflect.DeepEqual(want, got) { + t.Fatalf("incorrect endpoints after failed Sync: want=%#v got=%#v", want, got) + } +} + +func TestHTTPClusterClientAutoSyncCancelContext(t *testing.T) { + cf := newStaticHTTPClientFactory([]staticHTTPResponse{ + { + resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, + body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), + }, + }) + + hc := &httpClusterClient{ + clientFactory: cf, + rand: rand.New(rand.NewSource(0)), + } + err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) + if err != nil { + t.Fatalf("unexpected error during setup: %#v", err) + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + err = hc.AutoSync(ctx, time.Hour) + if err != context.Canceled { + t.Fatalf("incorrect error value: want=%v got=%v", context.Canceled, err) + } +} + +func TestHTTPClusterClientAutoSyncFail(t *testing.T) { + cf := newStaticHTTPClientFactory([]staticHTTPResponse{ + {err: errors.New("fail!")}, + }) + + hc := &httpClusterClient{ + clientFactory: cf, + rand: rand.New(rand.NewSource(0)), + } + err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) + if err != nil { + t.Fatalf("unexpected error during setup: %#v", err) + } + + err = hc.AutoSync(context.Background(), time.Hour) + if err.Error() != ErrClusterUnavailable.Error() { + t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err) + } +} + +// TestHTTPClusterClientSyncPinEndpoint tests that Sync() pins the endpoint when +// it gets the exactly same member list as before. +func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) { + cf := newStaticHTTPClientFactory([]staticHTTPResponse{ + { + resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, + body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), + }, + { + resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, + body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), + }, + { + resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, + body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), + }, + }) + + hc := &httpClusterClient{ + clientFactory: cf, + rand: rand.New(rand.NewSource(0)), + } + err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}) + if err != nil { + t.Fatalf("unexpected error during setup: %#v", err) + } + pinnedEndpoint := hc.endpoints[hc.pinned] + + for i := 0; i < 3; i++ { + err = hc.Sync(context.Background()) + if err != nil { + t.Fatalf("#%d: unexpected error during Sync: %#v", i, err) + } + + if g := hc.endpoints[hc.pinned]; g != pinnedEndpoint { + t.Errorf("#%d: pinned endpoint = %s, want %s", i, g, pinnedEndpoint) + } + } +} + +func TestHTTPClusterClientResetFail(t *testing.T) { + tests := [][]string{ + // need at least one endpoint + {}, + + // urls must be valid + {":"}, + } + + for i, tt := range tests { + hc := &httpClusterClient{rand: rand.New(rand.NewSource(0))} + err := hc.SetEndpoints(tt) + if err == nil { + t.Errorf("#%d: expected non-nil error", i) + } + } +} + +func TestHTTPClusterClientResetPinRandom(t *testing.T) { + round := 2000 + pinNum := 0 + for i := 0; i < round; i++ { + hc := &httpClusterClient{rand: rand.New(rand.NewSource(int64(i)))} + err := hc.SetEndpoints([]string{"http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"}) + if err != nil { + t.Fatalf("#%d: reset error (%v)", i, err) + } + if hc.endpoints[hc.pinned].String() == "http://127.0.0.1:4001" { + pinNum++ + } + } + + min := 1.0/3.0 - 0.05 + max := 1.0/3.0 + 0.05 + if ratio := float64(pinNum) / float64(round); ratio > max || ratio < min { + t.Errorf("pinned ratio = %v, want [%v, %v]", ratio, min, max) + } +} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go new file mode 100644 index 000000000..957ed4624 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -0,0 +1,33 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "fmt" + +type ClusterError struct { + Errors []error +} + +func (ce *ClusterError) Error() string { + return ErrClusterUnavailable.Error() +} + +func (ce *ClusterError) Detail() string { + s := "" + for i, e := range ce.Errors { + s += fmt.Sprintf("error #%d: %s\n", i, e) + } + return s +} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go new file mode 100644 index 000000000..5a5a69a94 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/curl.go @@ -0,0 +1,70 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var ( + cURLDebug = false +) + +func EnablecURLDebug() { + cURLDebug = true +} + +func DisablecURLDebug() { + cURLDebug = false +} + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go new file mode 100644 index 000000000..ae88659f4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -0,0 +1,21 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string) ([]string, error) +} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go new file mode 100644 index 000000000..70111cace --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/doc.go @@ -0,0 +1,71 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + + "github.com/coreos/etcd/client" + "golang.org/x/net/context" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring it's previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/vendor/github.com/coreos/etcd/client/fake_transport_go14_test.go b/vendor/github.com/coreos/etcd/client/fake_transport_go14_test.go new file mode 100644 index 000000000..4a99a7d37 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/fake_transport_go14_test.go @@ -0,0 +1,41 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.5 + +package client + +import ( + "errors" + "net/http" +) + +func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + select { + case resp := <-t.respchan: + return resp, nil + case err := <-t.errchan: + return nil, err + case <-t.startCancel: + select { + // this simulates that the request is finished before cancel effects + case resp := <-t.respchan: + return resp, nil + // wait on finishCancel to simulate taking some amount of + // time while calling CancelRequest + case <-t.finishCancel: + return nil, errors.New("cancelled") + } + } +} diff --git a/vendor/github.com/coreos/etcd/client/fake_transport_test.go b/vendor/github.com/coreos/etcd/client/fake_transport_test.go new file mode 100644 index 000000000..06761e266 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/fake_transport_test.go @@ -0,0 +1,42 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.5 + +package client + +import ( + "errors" + "net/http" +) + +func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + select { + case resp := <-t.respchan: + return resp, nil + case err := <-t.errchan: + return nil, err + case <-t.startCancel: + case <-req.Cancel: + } + select { + // this simulates that the request is finished before cancel effects + case resp := <-t.respchan: + return resp, nil + // wait on finishCancel to simulate taking some amount of + // time while calling CancelRequest + case <-t.finishCancel: + return nil, errors.New("cancelled") + } +} diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go new file mode 100644 index 000000000..feac0d1d8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -0,0 +1,1000 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package client + +import ( + "errors" + "fmt" + codec1978 "github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81819 = 1 + codecSelferC_RAW1819 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1819 = 10 + codecSelferValueTypeMap1819 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1819 = 2 + codecSelfer_containerMapValue1819 = 3 + codecSelfer_containerMapEnd1819 = 4 + codecSelfer_containerArrayElem1819 = 6 + codecSelfer_containerArrayEnd1819 = 7 +) + +var ( + codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1819 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 time.Time + _ = v0 + } +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("action")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("node")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("prevNode")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1819) + } + } + } +} + +func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct9 := r.ContainerType() + if yyct9 == codecSelferValueTypeMap1819 { + yyl9 := r.ReadMapStart() + if yyl9 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1819) + } else { + x.codecDecodeSelfFromMap(yyl9, d) + } + } else if yyct9 == codecSelferValueTypeArray1819 { + yyl9 := r.ReadArrayStart() + if yyl9 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + x.codecDecodeSelfFromArray(yyl9, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + } + } +} + +func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys10Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys10Slc + var yyhl10 bool = l >= 0 + for yyj10 := 0; ; yyj10++ { + if yyhl10 { + if yyj10 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1819) + yys10Slc = r.DecodeBytes(yys10Slc, true, true) + yys10 := string(yys10Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1819) + switch yys10 { + case "action": + if r.TryDecodeAsNil() { + x.Action = "" + } else { + x.Action = string(r.DecodeString()) + } + case "node": + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + case "prevNode": + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys10) + } // end switch yys10 + } // end for yyj10 + z.DecSendContainerState(codecSelfer_containerMapEnd1819) +} + +func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Action = "" + } else { + x.Action = string(r.DecodeString()) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep19 := !z.EncBinary() + yy2arr19 := z.EncBasicHandle().StructToArray + var yyq19 [8]bool + _, _, _ = yysep19, yyq19, yy2arr19 + const yyr19 bool = false + yyq19[1] = x.Dir != false + yyq19[6] = x.Expiration != nil + yyq19[7] = x.TTL != 0 + var yynn19 int + if yyr19 || yy2arr19 { + r.EncodeArrayStart(8) + } else { + yynn19 = 5 + for _, b := range yyq19 { + if b { + yynn19++ + } + } + r.EncodeMapStart(yynn19) + yynn19 = 0 + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq19[1] { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq19[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("dir")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("nodes")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq19[6] { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym37 := z.EncBinary() + _ = yym37 + if false { + } else if yym38 := z.TimeRtidIfBinc(); yym38 != 0 { + r.EncodeBuiltin(yym38, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym37 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym37 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq19[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("expiration")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Expiration == nil { + r.EncodeNil() + } else { + yym39 := z.EncBinary() + _ = yym39 + if false { + } else if yym40 := z.TimeRtidIfBinc(); yym40 != 0 { + r.EncodeBuiltin(yym40, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym39 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym39 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq19[7] { + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq19[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("ttl")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym43 := z.EncBinary() + _ = yym43 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + } + if yyr19 || yy2arr19 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1819) + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym44 := z.DecBinary() + _ = yym44 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct45 := r.ContainerType() + if yyct45 == codecSelferValueTypeMap1819 { + yyl45 := r.ReadMapStart() + if yyl45 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1819) + } else { + x.codecDecodeSelfFromMap(yyl45, d) + } + } else if yyct45 == codecSelferValueTypeArray1819 { + yyl45 := r.ReadArrayStart() + if yyl45 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + x.codecDecodeSelfFromArray(yyl45, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys46Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys46Slc + var yyhl46 bool = l >= 0 + for yyj46 := 0; ; yyj46++ { + if yyhl46 { + if yyj46 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1819) + yys46Slc = r.DecodeBytes(yys46Slc, true, true) + yys46 := string(yys46Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1819) + switch yys46 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + case "dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + x.Dir = bool(r.DecodeBool()) + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + x.Value = string(r.DecodeString()) + } + case "nodes": + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv50 := &x.Nodes + yyv50.CodecDecodeSelf(d) + } + case "createdIndex": + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + x.CreatedIndex = uint64(r.DecodeUint(64)) + } + case "modifiedIndex": + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + x.ModifiedIndex = uint64(r.DecodeUint(64)) + } + case "expiration": + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym54 := z.DecBinary() + _ = yym54 + if false { + } else if yym55 := z.TimeRtidIfBinc(); yym55 != 0 { + r.DecodeBuiltin(yym55, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym54 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym54 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + case "ttl": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + x.TTL = int64(r.DecodeInt(64)) + } + default: + z.DecStructFieldNotFound(-1, yys46) + } // end switch yys46 + } // end for yyj46 + z.DecSendContainerState(codecSelfer_containerMapEnd1819) +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj57 int + var yyb57 bool + var yyhl57 bool = l >= 0 + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Dir = false + } else { + x.Dir = bool(r.DecodeBool()) + } + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + x.Value = string(r.DecodeString()) + } + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv61 := &x.Nodes + yyv61.CodecDecodeSelf(d) + } + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + x.CreatedIndex = uint64(r.DecodeUint(64)) + } + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + x.ModifiedIndex = uint64(r.DecodeUint(64)) + } + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym65 := z.DecBinary() + _ = yym65 + if false { + } else if yym66 := z.TimeRtidIfBinc(); yym66 != 0 { + r.DecodeBuiltin(yym66, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym65 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym65 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + x.TTL = int64(r.DecodeInt(64)) + } + for { + yyj57++ + if yyhl57 { + yyb57 = yyj57 > l + } else { + yyb57 = r.CheckBreak() + } + if yyb57 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + z.DecStructFieldNotFound(yyj57-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym68 := z.EncBinary() + _ = yym68 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encNodes((Nodes)(x), e) + } + } +} + +func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym69 := z.DecBinary() + _ = yym69 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decNodes((*Nodes)(x), d) + } +} + +func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv70 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyv70 == nil { + r.EncodeNil() + } else { + yyv70.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv71 := *v + yyh71, yyl71 := z.DecSliceHelperStart() + var yyc71 bool + if yyl71 == 0 { + if yyv71 == nil { + yyv71 = []*Node{} + yyc71 = true + } else if len(yyv71) != 0 { + yyv71 = yyv71[:0] + yyc71 = true + } + } else if yyl71 > 0 { + var yyrr71, yyrl71 int + var yyrt71 bool + if yyl71 > cap(yyv71) { + + yyrg71 := len(yyv71) > 0 + yyv271 := yyv71 + yyrl71, yyrt71 = z.DecInferLen(yyl71, z.DecBasicHandle().MaxInitLen, 8) + if yyrt71 { + if yyrl71 <= cap(yyv71) { + yyv71 = yyv71[:yyrl71] + } else { + yyv71 = make([]*Node, yyrl71) + } + } else { + yyv71 = make([]*Node, yyrl71) + } + yyc71 = true + yyrr71 = len(yyv71) + if yyrg71 { + copy(yyv71, yyv271) + } + } else if yyl71 != len(yyv71) { + yyv71 = yyv71[:yyl71] + yyc71 = true + } + yyj71 := 0 + for ; yyj71 < yyrr71; yyj71++ { + yyh71.ElemContainerState(yyj71) + if r.TryDecodeAsNil() { + if yyv71[yyj71] != nil { + *yyv71[yyj71] = Node{} + } + } else { + if yyv71[yyj71] == nil { + yyv71[yyj71] = new(Node) + } + yyw72 := yyv71[yyj71] + yyw72.CodecDecodeSelf(d) + } + + } + if yyrt71 { + for ; yyj71 < yyl71; yyj71++ { + yyv71 = append(yyv71, nil) + yyh71.ElemContainerState(yyj71) + if r.TryDecodeAsNil() { + if yyv71[yyj71] != nil { + *yyv71[yyj71] = Node{} + } + } else { + if yyv71[yyj71] == nil { + yyv71[yyj71] = new(Node) + } + yyw73 := yyv71[yyj71] + yyw73.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj71 := 0 + for ; !r.CheckBreak(); yyj71++ { + + if yyj71 >= len(yyv71) { + yyv71 = append(yyv71, nil) // var yyz71 *Node + yyc71 = true + } + yyh71.ElemContainerState(yyj71) + if yyj71 < len(yyv71) { + if r.TryDecodeAsNil() { + if yyv71[yyj71] != nil { + *yyv71[yyj71] = Node{} + } + } else { + if yyv71[yyj71] == nil { + yyv71[yyj71] = new(Node) + } + yyw74 := yyv71[yyj71] + yyw74.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj71 < len(yyv71) { + yyv71 = yyv71[:yyj71] + yyc71 = true + } else if yyj71 == 0 && yyv71 == nil { + yyv71 = []*Node{} + yyc71 = true + } + } + yyh71.End() + if yyc71 { + *v = yyv71 + } +} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go new file mode 100644 index 000000000..97e275008 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -0,0 +1,651 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec" + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/coreos/etcd/pkg/pathutil" +) + +const ( + ErrorCodeKeyNotFound = 100 + ErrorCodeTestFailed = 101 + ErrorCodeNotFile = 102 + ErrorCodeNotDir = 104 + ErrorCodeNodeExist = 105 + ErrorCodeRootROnly = 107 + ErrorCodeDirNotEmpty = 108 + ErrorCodeUnauthorized = 110 + + ErrorCodePrevValueRequired = 201 + ErrorCodeTTLNaN = 202 + ErrorCodeIndexNaN = 203 + ErrorCodeInvalidField = 209 + ErrorCodeInvalidForm = 210 + + ErrorCodeRaftInternal = 300 + ErrorCodeLeaderElect = 301 + + ErrorCodeWatcherCleared = 400 + ErrorCodeEventIndexCleared = 401 +) + +type Error struct { + Code int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause"` + Index uint64 `json:"index"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) +} + +var ( + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrEmptyBody = errors.New("client: response body is empty") +) + +// PrevExistType is used to define an existence condition when setting +// or deleting Nodes. +type PrevExistType string + +const ( + PrevIgnore = PrevExistType("") + PrevExist = PrevExistType("true") + PrevNoExist = PrevExistType("false") +) + +var ( + defaultV2KeysPrefix = "/v2/keys" +) + +// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value +// API over HTTP. +func NewKeysAPI(c Client) KeysAPI { + return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) +} + +// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller +// to provide a custom base URL path. This should only be used in +// very rare cases. +func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { + return &httpKeysAPI{ + client: c, + prefix: p, + } +} + +type KeysAPI interface { + // Get retrieves a set of Nodes from etcd + Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) + + // Set assigns a new value to a Node identified by a given key. The caller + // may define a set of conditions in the SetOptions. If SetOptions.Dir=true + // then value is ignored. + Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) + + // Delete removes a Node identified by the given key, optionally destroying + // all of its children as well. The caller may define a set of required + // conditions in an DeleteOptions object. + Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) + + // Create is an alias for Set w/ PrevExist=false + Create(ctx context.Context, key, value string) (*Response, error) + + // CreateInOrder is used to atomically create in-order keys within the given directory. + CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) + + // Update is an alias for Set w/ PrevExist=true + Update(ctx context.Context, key, value string) (*Response, error) + + // Watcher builds a new Watcher targeted at a specific Node identified + // by the given key. The Watcher may be configured at creation time + // through a WatcherOptions object. The returned Watcher is designed + // to emit events that happen to a Node, and optionally to its children. + Watcher(key string, opts *WatcherOptions) Watcher +} + +type WatcherOptions struct { + // AfterIndex defines the index after-which the Watcher should + // start emitting events. For example, if a value of 5 is + // provided, the first event will have an index >= 6. + // + // Setting AfterIndex to 0 (default) means that the Watcher + // should start watching for events starting at the current + // index, whatever that may be. + AfterIndex uint64 + + // Recursive specifies whether or not the Watcher should emit + // events that occur in children of the given keyspace. If set + // to false (default), events will be limited to those that + // occur for the exact key. + Recursive bool +} + +type CreateInOrderOptions struct { + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration +} + +type SetOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Set operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + // + // PrevValue is ignored if Dir=true + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Set operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // PrevExist specifies whether the Node must currently exist + // (PrevExist) or not (PrevNoExist). If the caller does not + // care about existence, set PrevExist to PrevIgnore, or simply + // leave it unset. + PrevExist PrevExistType + + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration + + // Dir specifies whether or not this Node should be created as a directory. + Dir bool +} + +type GetOptions struct { + // Recursive defines whether or not all children of the Node + // should be returned. + Recursive bool + + // Sort instructs the server whether or not to sort the Nodes. + // If true, the Nodes are sorted alphabetically by key in + // ascending order (A to z). If false (default), the Nodes will + // not be sorted and the ordering used should not be considered + // predictable. + Sort bool + + // Quorum specifies whether it gets the latest committed value that + // has been applied in quorum of members, which ensures external + // consistency (or linearizability). + Quorum bool +} + +type DeleteOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Delete operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Delete operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // Recursive defines whether or not all children of the Node + // should be deleted. If set to true, all children of the Node + // identified by the given key will be deleted. If left unset + // or explicitly set to false, only a single Node will be + // deleted. + Recursive bool + + // Dir specifies whether or not this Node should be removed as a directory. + Dir bool +} + +type Watcher interface { + // Next blocks until an etcd event occurs, then returns a Response + // representing that event. The behavior of Next depends on the + // WatcherOptions used to construct the Watcher. Next is designed to + // be called repeatedly, each time blocking until a subsequent event + // is available. + // + // If the provided context is cancelled, Next will return a non-nil + // error. Any other failures encountered while waiting for the next + // event (connection issues, deserialization failures, etc) will + // also result in a non-nil error. + Next(context.Context) (*Response, error) +} + +type Response struct { + // Action is the name of the operation that occurred. Possible values + // include get, set, delete, update, create, compareAndSwap, + // compareAndDelete and expire. + Action string `json:"action"` + + // Node represents the state of the relevant etcd Node. + Node *Node `json:"node"` + + // PrevNode represents the previous state of the Node. PrevNode is non-nil + // only if the Node existed before the action occurred and the action + // caused a change to the Node. + PrevNode *Node `json:"prevNode"` + + // Index holds the cluster-level index at the time the Response was generated. + // This index is not tied to the Node(s) contained in this Response. + Index uint64 `json:"-"` +} + +type Node struct { + // Key represents the unique location of this Node (e.g. "/foo/bar"). + Key string `json:"key"` + + // Dir reports whether node describes a directory. + Dir bool `json:"dir,omitempty"` + + // Value is the current data stored on this Node. If this Node + // is a directory, Value will be empty. + Value string `json:"value"` + + // Nodes holds the children of this Node, only if this Node is a directory. + // This slice of will be arbitrarily deep (children, grandchildren, great- + // grandchildren, etc.) if a recursive Get or Watch request were made. + Nodes Nodes `json:"nodes"` + + // CreatedIndex is the etcd index at-which this Node was created. + CreatedIndex uint64 `json:"createdIndex"` + + // ModifiedIndex is the etcd index at-which this Node was last modified. + ModifiedIndex uint64 `json:"modifiedIndex"` + + // Expiration is the server side expiration time of the key. + Expiration *time.Time `json:"expiration,omitempty"` + + // TTL is the time to live of the key in second. + TTL int64 `json:"ttl,omitempty"` +} + +func (n *Node) String() string { + return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) +} + +// TTLDuration returns the Node's TTL as a time.Duration object +func (n *Node) TTLDuration() time.Duration { + return time.Duration(n.TTL) * time.Second +} + +type Nodes []*Node + +// interfaces for sorting +func (ns Nodes) Len() int { return len(ns) } +func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } +func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } + +type httpKeysAPI struct { + client httpClient + prefix string +} + +func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { + act := &setAction{ + Prefix: k.prefix, + Key: key, + Value: val, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.PrevExist = opts.PrevExist + act.TTL = opts.TTL + act.Dir = opts.Dir + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) +} + +func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { + act := &createInOrderAction{ + Prefix: k.prefix, + Dir: dir, + Value: val, + } + + if opts != nil { + act.TTL = opts.TTL + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) +} + +func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { + act := &deleteAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.Dir = opts.Dir + act.Recursive = opts.Recursive + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { + act := &getAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + act.Sorted = opts.Sort + act.Quorum = opts.Quorum + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { + act := waitAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + if opts.AfterIndex > 0 { + act.WaitIndex = opts.AfterIndex + 1 + } + } + + return &httpWatcher{ + client: k.client, + nextWait: act, + } +} + +type httpWatcher struct { + client httpClient + nextWait waitAction +} + +func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { + for { + httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) + if err != nil { + return nil, err + } + + resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) + if err != nil { + if err == ErrEmptyBody { + continue + } + return nil, err + } + + hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 + return resp, nil + } +} + +// v2KeysURL forms a URL representing the location of a key. +// The endpoint argument represents the base URL of an etcd +// server. The prefix is the path needed to route from the +// provided endpoint's path to the root of the keys API +// (typically "/v2/keys"). +func v2KeysURL(ep url.URL, prefix, key string) *url.URL { + // We concatenate all parts together manually. We cannot use + // path.Join because it does not reserve trailing slash. + // We call CanonicalURLPath to further cleanup the path. + if prefix != "" && prefix[0] != '/' { + prefix = "/" + prefix + } + if key != "" && key[0] != '/' { + key = "/" + key + } + ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) + return &ep +} + +type getAction struct { + Prefix string + Key string + Recursive bool + Sorted bool + Quorum bool +} + +func (g *getAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, g.Prefix, g.Key) + + params := u.Query() + params.Set("recursive", strconv.FormatBool(g.Recursive)) + params.Set("sorted", strconv.FormatBool(g.Sorted)) + params.Set("quorum", strconv.FormatBool(g.Quorum)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type waitAction struct { + Prefix string + Key string + WaitIndex uint64 + Recursive bool +} + +func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, w.Prefix, w.Key) + + params := u.Query() + params.Set("wait", "true") + params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) + params.Set("recursive", strconv.FormatBool(w.Recursive)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type setAction struct { + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Dir bool +} + +func (a *setAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + form := url.Values{} + + // we're either creating a directory or setting a key + if a.Dir { + params.Set("dir", strconv.FormatBool(a.Dir)) + } else { + // These options are only valid for setting a key + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + form.Add("value", a.Value) + } + + // Options which apply to both setting a key and creating a dir + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.PrevExist != PrevIgnore { + params.Set("prevExist", string(a.PrevExist)) + } + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + + u.RawQuery = params.Encode() + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("PUT", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type deleteAction struct { + Prefix string + Key string + PrevValue string + PrevIndex uint64 + Dir bool + Recursive bool +} + +func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.Dir { + params.Set("dir", "true") + } + if a.Recursive { + params.Set("recursive", "true") + } + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("DELETE", u.String(), nil) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type createInOrderAction struct { + Prefix string + Dir string + Value string + TTL time.Duration +} + +func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Dir) + + form := url.Values{} + form.Add("value", a.Value) + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("POST", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req +} + +func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { + switch code { + case http.StatusOK, http.StatusCreated: + if len(body) == 0 { + return nil, ErrEmptyBody + } + res, err = unmarshalSuccessfulKeysResponse(header, body) + default: + err = unmarshalFailedKeysResponse(body) + } + + return +} + +func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { + var res Response + err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) + if err != nil { + return nil, ErrInvalidJSON + } + if header.Get("X-Etcd-Index") != "" { + res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) + if err != nil { + return nil, err + } + } + return &res, nil +} + +func unmarshalFailedKeysResponse(body []byte) error { + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return ErrInvalidJSON + } + return etcdErr +} diff --git a/vendor/github.com/coreos/etcd/client/keys_bench_test.go b/vendor/github.com/coreos/etcd/client/keys_bench_test.go new file mode 100644 index 000000000..5b6541592 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys_bench_test.go @@ -0,0 +1,87 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "encoding/json" + "net/http" + "reflect" + "strings" + "testing" +) + +func createTestNode(size int) *Node { + return &Node{ + Key: strings.Repeat("a", 30), + Value: strings.Repeat("a", size), + CreatedIndex: 123456, + ModifiedIndex: 123456, + TTL: 123456789, + } +} + +func createTestNodeWithChildren(children, size int) *Node { + node := createTestNode(size) + for i := 0; i < children; i++ { + node.Nodes = append(node.Nodes, createTestNode(size)) + } + return node +} + +func createTestResponse(children, size int) *Response { + return &Response{ + Action: "aaaaa", + Node: createTestNodeWithChildren(children, size), + PrevNode: nil, + } +} + +func benchmarkResponseUnmarshalling(b *testing.B, children, size int) { + header := http.Header{} + header.Add("X-Etcd-Index", "123456") + response := createTestResponse(children, size) + body, err := json.Marshal(response) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + newResponse := new(Response) + for i := 0; i < b.N; i++ { + if newResponse, err = unmarshalSuccessfulKeysResponse(header, body); err != nil { + b.Errorf("error unmarshaling response (%v)", err) + } + + } + if !reflect.DeepEqual(response.Node, newResponse.Node) { + b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse) + } +} + +func BenchmarkSmallResponseUnmarshal(b *testing.B) { + benchmarkResponseUnmarshalling(b, 30, 20) +} + +func BenchmarkManySmallResponseUnmarshal(b *testing.B) { + benchmarkResponseUnmarshalling(b, 3000, 20) +} + +func BenchmarkMediumResponseUnmarshal(b *testing.B) { + benchmarkResponseUnmarshalling(b, 300, 200) +} + +func BenchmarkLargeResponseUnmarshal(b *testing.B) { + benchmarkResponseUnmarshalling(b, 3000, 2000) +} diff --git a/vendor/github.com/coreos/etcd/client/keys_test.go b/vendor/github.com/coreos/etcd/client/keys_test.go new file mode 100644 index 000000000..cafe6bea4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys_test.go @@ -0,0 +1,1407 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "testing" + "time" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" +) + +func TestV2KeysURLHelper(t *testing.T) { + tests := []struct { + endpoint url.URL + prefix string + key string + want url.URL + }{ + // key is empty, no problem + { + endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, + prefix: "", + key: "", + want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, + }, + + // key is joined to path + { + endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, + prefix: "", + key: "/foo/bar", + want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys/foo/bar"}, + }, + + // key is joined to path when path is empty + { + endpoint: url.URL{Scheme: "http", Host: "example.com", Path: ""}, + prefix: "", + key: "/foo/bar", + want: url.URL{Scheme: "http", Host: "example.com", Path: "/foo/bar"}, + }, + + // Host field carries through with port + { + endpoint: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"}, + prefix: "", + key: "", + want: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"}, + }, + + // Scheme carries through + { + endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"}, + prefix: "", + key: "", + want: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"}, + }, + // Prefix is applied + { + endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, + prefix: "/bar", + key: "/baz", + want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz"}, + }, + // Prefix is joined to path + { + endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, + prefix: "/bar", + key: "", + want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar"}, + }, + // Keep trailing slash + { + endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, + prefix: "/bar", + key: "/baz/", + want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz/"}, + }, + } + + for i, tt := range tests { + got := v2KeysURL(tt.endpoint, tt.prefix, tt.key) + if tt.want != *got { + t.Errorf("#%d: want=%#v, got=%#v", i, tt.want, *got) + } + } +} + +func TestGetAction(t *testing.T) { + ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"} + baseWantURL := &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/v2/keys/foo/bar", + } + wantHeader := http.Header{} + + tests := []struct { + recursive bool + sorted bool + quorum bool + wantQuery string + }{ + { + recursive: false, + sorted: false, + quorum: false, + wantQuery: "quorum=false&recursive=false&sorted=false", + }, + { + recursive: true, + sorted: false, + quorum: false, + wantQuery: "quorum=false&recursive=true&sorted=false", + }, + { + recursive: false, + sorted: true, + quorum: false, + wantQuery: "quorum=false&recursive=false&sorted=true", + }, + { + recursive: true, + sorted: true, + quorum: false, + wantQuery: "quorum=false&recursive=true&sorted=true", + }, + { + recursive: false, + sorted: false, + quorum: true, + wantQuery: "quorum=true&recursive=false&sorted=false", + }, + } + + for i, tt := range tests { + f := getAction{ + Key: "/foo/bar", + Recursive: tt.recursive, + Sorted: tt.sorted, + Quorum: tt.quorum, + } + got := *f.HTTPRequest(ep) + + wantURL := baseWantURL + wantURL.RawQuery = tt.wantQuery + + err := assertRequest(got, "GET", wantURL, wantHeader, nil) + if err != nil { + t.Errorf("#%d: %v", i, err) + } + } +} + +func TestWaitAction(t *testing.T) { + ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"} + baseWantURL := &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/v2/keys/foo/bar", + } + wantHeader := http.Header{} + + tests := []struct { + waitIndex uint64 + recursive bool + wantQuery string + }{ + { + recursive: false, + waitIndex: uint64(0), + wantQuery: "recursive=false&wait=true&waitIndex=0", + }, + { + recursive: false, + waitIndex: uint64(12), + wantQuery: "recursive=false&wait=true&waitIndex=12", + }, + { + recursive: true, + waitIndex: uint64(12), + wantQuery: "recursive=true&wait=true&waitIndex=12", + }, + } + + for i, tt := range tests { + f := waitAction{ + Key: "/foo/bar", + WaitIndex: tt.waitIndex, + Recursive: tt.recursive, + } + got := *f.HTTPRequest(ep) + + wantURL := baseWantURL + wantURL.RawQuery = tt.wantQuery + + err := assertRequest(got, "GET", wantURL, wantHeader, nil) + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } + } +} + +func TestSetAction(t *testing.T) { + wantHeader := http.Header(map[string][]string{ + "Content-Type": {"application/x-www-form-urlencoded"}, + }) + + tests := []struct { + act setAction + wantURL string + wantBody string + }{ + // default prefix + { + act: setAction{ + Prefix: defaultV2KeysPrefix, + Key: "foo", + }, + wantURL: "http://example.com/v2/keys/foo", + wantBody: "value=", + }, + + // non-default prefix + { + act: setAction{ + Prefix: "/pfx", + Key: "foo", + }, + wantURL: "http://example.com/pfx/foo", + wantBody: "value=", + }, + + // no prefix + { + act: setAction{ + Key: "foo", + }, + wantURL: "http://example.com/foo", + wantBody: "value=", + }, + + // Key with path separators + { + act: setAction{ + Prefix: defaultV2KeysPrefix, + Key: "foo/bar/baz", + }, + wantURL: "http://example.com/v2/keys/foo/bar/baz", + wantBody: "value=", + }, + + // Key with leading slash, Prefix with trailing slash + { + act: setAction{ + Prefix: "/foo/", + Key: "/bar", + }, + wantURL: "http://example.com/foo/bar", + wantBody: "value=", + }, + + // Key with trailing slash + { + act: setAction{ + Key: "/foo/", + }, + wantURL: "http://example.com/foo/", + wantBody: "value=", + }, + + // Value is set + { + act: setAction{ + Key: "foo", + Value: "baz", + }, + wantURL: "http://example.com/foo", + wantBody: "value=baz", + }, + + // PrevExist set, but still ignored + { + act: setAction{ + Key: "foo", + PrevExist: PrevIgnore, + }, + wantURL: "http://example.com/foo", + wantBody: "value=", + }, + + // PrevExist set to true + { + act: setAction{ + Key: "foo", + PrevExist: PrevExist, + }, + wantURL: "http://example.com/foo?prevExist=true", + wantBody: "value=", + }, + + // PrevExist set to false + { + act: setAction{ + Key: "foo", + PrevExist: PrevNoExist, + }, + wantURL: "http://example.com/foo?prevExist=false", + wantBody: "value=", + }, + + // PrevValue is urlencoded + { + act: setAction{ + Key: "foo", + PrevValue: "bar baz", + }, + wantURL: "http://example.com/foo?prevValue=bar+baz", + wantBody: "value=", + }, + + // PrevIndex is set + { + act: setAction{ + Key: "foo", + PrevIndex: uint64(12), + }, + wantURL: "http://example.com/foo?prevIndex=12", + wantBody: "value=", + }, + + // TTL is set + { + act: setAction{ + Key: "foo", + TTL: 3 * time.Minute, + }, + wantURL: "http://example.com/foo", + wantBody: "ttl=180&value=", + }, + // Dir is set + { + act: setAction{ + Key: "foo", + Dir: true, + }, + wantURL: "http://example.com/foo?dir=true", + wantBody: "", + }, + // Dir is set with a value + { + act: setAction{ + Key: "foo", + Value: "bar", + Dir: true, + }, + wantURL: "http://example.com/foo?dir=true", + wantBody: "", + }, + // Dir is set with PrevExist set to true + { + act: setAction{ + Key: "foo", + PrevExist: PrevExist, + Dir: true, + }, + wantURL: "http://example.com/foo?dir=true&prevExist=true", + wantBody: "", + }, + // Dir is set with PrevValue + { + act: setAction{ + Key: "foo", + PrevValue: "bar", + Dir: true, + }, + wantURL: "http://example.com/foo?dir=true", + wantBody: "", + }, + } + + for i, tt := range tests { + u, err := url.Parse(tt.wantURL) + if err != nil { + t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) + } + + got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) + if err := assertRequest(*got, "PUT", u, wantHeader, []byte(tt.wantBody)); err != nil { + t.Errorf("#%d: %v", i, err) + } + } +} + +func TestCreateInOrderAction(t *testing.T) { + wantHeader := http.Header(map[string][]string{ + "Content-Type": {"application/x-www-form-urlencoded"}, + }) + + tests := []struct { + act createInOrderAction + wantURL string + wantBody string + }{ + // default prefix + { + act: createInOrderAction{ + Prefix: defaultV2KeysPrefix, + Dir: "foo", + }, + wantURL: "http://example.com/v2/keys/foo", + wantBody: "value=", + }, + + // non-default prefix + { + act: createInOrderAction{ + Prefix: "/pfx", + Dir: "foo", + }, + wantURL: "http://example.com/pfx/foo", + wantBody: "value=", + }, + + // no prefix + { + act: createInOrderAction{ + Dir: "foo", + }, + wantURL: "http://example.com/foo", + wantBody: "value=", + }, + + // Key with path separators + { + act: createInOrderAction{ + Prefix: defaultV2KeysPrefix, + Dir: "foo/bar/baz", + }, + wantURL: "http://example.com/v2/keys/foo/bar/baz", + wantBody: "value=", + }, + + // Key with leading slash, Prefix with trailing slash + { + act: createInOrderAction{ + Prefix: "/foo/", + Dir: "/bar", + }, + wantURL: "http://example.com/foo/bar", + wantBody: "value=", + }, + + // Key with trailing slash + { + act: createInOrderAction{ + Dir: "/foo/", + }, + wantURL: "http://example.com/foo/", + wantBody: "value=", + }, + + // Value is set + { + act: createInOrderAction{ + Dir: "foo", + Value: "baz", + }, + wantURL: "http://example.com/foo", + wantBody: "value=baz", + }, + // TTL is set + { + act: createInOrderAction{ + Dir: "foo", + TTL: 3 * time.Minute, + }, + wantURL: "http://example.com/foo", + wantBody: "ttl=180&value=", + }, + } + + for i, tt := range tests { + u, err := url.Parse(tt.wantURL) + if err != nil { + t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) + } + + got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) + if err := assertRequest(*got, "POST", u, wantHeader, []byte(tt.wantBody)); err != nil { + t.Errorf("#%d: %v", i, err) + } + } +} + +func TestDeleteAction(t *testing.T) { + wantHeader := http.Header(map[string][]string{ + "Content-Type": {"application/x-www-form-urlencoded"}, + }) + + tests := []struct { + act deleteAction + wantURL string + }{ + // default prefix + { + act: deleteAction{ + Prefix: defaultV2KeysPrefix, + Key: "foo", + }, + wantURL: "http://example.com/v2/keys/foo", + }, + + // non-default prefix + { + act: deleteAction{ + Prefix: "/pfx", + Key: "foo", + }, + wantURL: "http://example.com/pfx/foo", + }, + + // no prefix + { + act: deleteAction{ + Key: "foo", + }, + wantURL: "http://example.com/foo", + }, + + // Key with path separators + { + act: deleteAction{ + Prefix: defaultV2KeysPrefix, + Key: "foo/bar/baz", + }, + wantURL: "http://example.com/v2/keys/foo/bar/baz", + }, + + // Key with leading slash, Prefix with trailing slash + { + act: deleteAction{ + Prefix: "/foo/", + Key: "/bar", + }, + wantURL: "http://example.com/foo/bar", + }, + + // Key with trailing slash + { + act: deleteAction{ + Key: "/foo/", + }, + wantURL: "http://example.com/foo/", + }, + + // Recursive set to true + { + act: deleteAction{ + Key: "foo", + Recursive: true, + }, + wantURL: "http://example.com/foo?recursive=true", + }, + + // PrevValue is urlencoded + { + act: deleteAction{ + Key: "foo", + PrevValue: "bar baz", + }, + wantURL: "http://example.com/foo?prevValue=bar+baz", + }, + + // PrevIndex is set + { + act: deleteAction{ + Key: "foo", + PrevIndex: uint64(12), + }, + wantURL: "http://example.com/foo?prevIndex=12", + }, + } + + for i, tt := range tests { + u, err := url.Parse(tt.wantURL) + if err != nil { + t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) + } + + got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) + if err := assertRequest(*got, "DELETE", u, wantHeader, nil); err != nil { + t.Errorf("#%d: %v", i, err) + } + } +} + +func assertRequest(got http.Request, wantMethod string, wantURL *url.URL, wantHeader http.Header, wantBody []byte) error { + if wantMethod != got.Method { + return fmt.Errorf("want.Method=%#v got.Method=%#v", wantMethod, got.Method) + } + + if !reflect.DeepEqual(wantURL, got.URL) { + return fmt.Errorf("want.URL=%#v got.URL=%#v", wantURL, got.URL) + } + + if !reflect.DeepEqual(wantHeader, got.Header) { + return fmt.Errorf("want.Header=%#v got.Header=%#v", wantHeader, got.Header) + } + + if got.Body == nil { + if wantBody != nil { + return fmt.Errorf("want.Body=%v got.Body=%v", wantBody, got.Body) + } + } else { + if wantBody == nil { + return fmt.Errorf("want.Body=%v got.Body=%s", wantBody, got.Body) + } else { + gotBytes, err := ioutil.ReadAll(got.Body) + if err != nil { + return err + } + + if !reflect.DeepEqual(wantBody, gotBytes) { + return fmt.Errorf("want.Body=%s got.Body=%s", wantBody, gotBytes) + } + } + } + + return nil +} + +func TestUnmarshalSuccessfulResponse(t *testing.T) { + var expiration time.Time + expiration.UnmarshalText([]byte("2015-04-07T04:40:23.044979686Z")) + + tests := []struct { + hdr string + body string + wantRes *Response + wantErr bool + }{ + // Neither PrevNode or Node + { + hdr: "1", + body: `{"action":"delete"}`, + wantRes: &Response{Action: "delete", Index: 1}, + wantErr: false, + }, + + // PrevNode + { + hdr: "15", + body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`, + wantRes: &Response{ + Action: "delete", + Index: 15, + Node: nil, + PrevNode: &Node{ + Key: "/foo", + Value: "bar", + ModifiedIndex: 12, + CreatedIndex: 10, + }, + }, + wantErr: false, + }, + + // Node + { + hdr: "15", + body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`, + wantRes: &Response{ + Action: "get", + Index: 15, + Node: &Node{ + Key: "/foo", + Value: "bar", + ModifiedIndex: 12, + CreatedIndex: 10, + TTL: 10, + Expiration: &expiration, + }, + PrevNode: nil, + }, + wantErr: false, + }, + + // Node Dir + { + hdr: "15", + body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`, + wantRes: &Response{ + Action: "get", + Index: 15, + Node: &Node{ + Key: "/foo", + Dir: true, + ModifiedIndex: 12, + CreatedIndex: 10, + }, + PrevNode: nil, + }, + wantErr: false, + }, + + // PrevNode and Node + { + hdr: "15", + body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`, + wantRes: &Response{ + Action: "update", + Index: 15, + PrevNode: &Node{ + Key: "/foo", + Value: "baz", + ModifiedIndex: 10, + CreatedIndex: 10, + }, + Node: &Node{ + Key: "/foo", + Value: "bar", + ModifiedIndex: 12, + CreatedIndex: 10, + }, + }, + wantErr: false, + }, + + // Garbage in body + { + hdr: "", + body: `garbage`, + wantRes: nil, + wantErr: true, + }, + + // non-integer index + { + hdr: "poo", + body: `{}`, + wantRes: nil, + wantErr: true, + }, + } + + for i, tt := range tests { + h := make(http.Header) + h.Add("X-Etcd-Index", tt.hdr) + res, err := unmarshalSuccessfulKeysResponse(h, []byte(tt.body)) + if tt.wantErr != (err != nil) { + t.Errorf("#%d: wantErr=%t, err=%v", i, tt.wantErr, err) + } + + if (res == nil) != (tt.wantRes == nil) { + t.Errorf("#%d: received res=%#v, but expected res=%#v", i, res, tt.wantRes) + continue + } else if tt.wantRes == nil { + // expected and successfully got nil response + continue + } + + if res.Action != tt.wantRes.Action { + t.Errorf("#%d: Action=%s, expected %s", i, res.Action, tt.wantRes.Action) + } + if res.Index != tt.wantRes.Index { + t.Errorf("#%d: Index=%d, expected %d", i, res.Index, tt.wantRes.Index) + } + if !reflect.DeepEqual(res.Node, tt.wantRes.Node) { + t.Errorf("#%d: Node=%v, expected %v", i, res.Node, tt.wantRes.Node) + } + } +} + +func TestUnmarshalFailedKeysResponse(t *testing.T) { + body := []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`) + + wantErr := Error{ + Code: 100, + Message: "Key not found", + Cause: "/foo", + Index: uint64(18), + } + + gotErr := unmarshalFailedKeysResponse(body) + if !reflect.DeepEqual(wantErr, gotErr) { + t.Errorf("unexpected error: want=%#v got=%#v", wantErr, gotErr) + } +} + +func TestUnmarshalFailedKeysResponseBadJSON(t *testing.T) { + err := unmarshalFailedKeysResponse([]byte(`{"er`)) + if err == nil { + t.Errorf("got nil error") + } else if _, ok := err.(Error); ok { + t.Errorf("error is of incorrect type *Error: %#v", err) + } +} + +func TestHTTPWatcherNextWaitAction(t *testing.T) { + initAction := waitAction{ + Prefix: "/pants", + Key: "/foo/bar", + Recursive: true, + WaitIndex: 19, + } + + client := &actionAssertingHTTPClient{ + t: t, + act: &initAction, + resp: http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"X-Etcd-Index": []string{"42"}}, + }, + body: []byte(`{"action":"update","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), + } + + wantResponse := &Response{ + Action: "update", + Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(21)}, + PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, + Index: uint64(42), + } + + wantNextWait := waitAction{ + Prefix: "/pants", + Key: "/foo/bar", + Recursive: true, + WaitIndex: 22, + } + + watcher := &httpWatcher{ + client: client, + nextWait: initAction, + } + + resp, err := watcher.Next(context.Background()) + if err != nil { + t.Errorf("non-nil error: %#v", err) + } + + if !reflect.DeepEqual(wantResponse, resp) { + t.Errorf("received incorrect Response: want=%#v got=%#v", wantResponse, resp) + } + + if !reflect.DeepEqual(wantNextWait, watcher.nextWait) { + t.Errorf("nextWait incorrect: want=%#v got=%#v", wantNextWait, watcher.nextWait) + } +} + +func TestHTTPWatcherNextFail(t *testing.T) { + tests := []httpClient{ + // generic HTTP client failure + &staticHTTPClient{ + err: errors.New("fail!"), + }, + + // unusable status code + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + + // etcd Error response + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusNotFound, + }, + body: []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`), + }, + } + + for i, tt := range tests { + act := waitAction{ + Prefix: "/pants", + Key: "/foo/bar", + Recursive: true, + WaitIndex: 19, + } + + watcher := &httpWatcher{ + client: tt, + nextWait: act, + } + + resp, err := watcher.Next(context.Background()) + if err == nil { + t.Errorf("#%d: expected non-nil error", i) + } + if resp != nil { + t.Errorf("#%d: expected nil Response, got %#v", i, resp) + } + if !reflect.DeepEqual(act, watcher.nextWait) { + t.Errorf("#%d: nextWait changed: want=%#v got=%#v", i, act, watcher.nextWait) + } + } +} + +func TestHTTPKeysAPIWatcherAction(t *testing.T) { + tests := []struct { + key string + opts *WatcherOptions + want waitAction + }{ + { + key: "/foo", + opts: nil, + want: waitAction{ + Key: "/foo", + Recursive: false, + WaitIndex: 0, + }, + }, + + { + key: "/foo", + opts: &WatcherOptions{ + Recursive: false, + AfterIndex: 0, + }, + want: waitAction{ + Key: "/foo", + Recursive: false, + WaitIndex: 0, + }, + }, + + { + key: "/foo", + opts: &WatcherOptions{ + Recursive: true, + AfterIndex: 0, + }, + want: waitAction{ + Key: "/foo", + Recursive: true, + WaitIndex: 0, + }, + }, + + { + key: "/foo", + opts: &WatcherOptions{ + Recursive: false, + AfterIndex: 19, + }, + want: waitAction{ + Key: "/foo", + Recursive: false, + WaitIndex: 20, + }, + }, + } + + for i, tt := range tests { + kAPI := &httpKeysAPI{ + client: &staticHTTPClient{err: errors.New("fail!")}, + } + + want := &httpWatcher{ + client: &staticHTTPClient{err: errors.New("fail!")}, + nextWait: tt.want, + } + + got := kAPI.Watcher(tt.key, tt.opts) + if !reflect.DeepEqual(want, got) { + t.Errorf("#%d: incorrect watcher: want=%#v got=%#v", i, want, got) + } + } +} + +func TestHTTPKeysAPISetAction(t *testing.T) { + tests := []struct { + key string + value string + opts *SetOptions + wantAction httpAction + }{ + // nil SetOptions + { + key: "/foo", + value: "bar", + opts: nil, + wantAction: &setAction{ + Key: "/foo", + Value: "bar", + PrevValue: "", + PrevIndex: 0, + PrevExist: PrevIgnore, + TTL: 0, + }, + }, + // empty SetOptions + { + key: "/foo", + value: "bar", + opts: &SetOptions{}, + wantAction: &setAction{ + Key: "/foo", + Value: "bar", + PrevValue: "", + PrevIndex: 0, + PrevExist: PrevIgnore, + TTL: 0, + }, + }, + // populated SetOptions + { + key: "/foo", + value: "bar", + opts: &SetOptions{ + PrevValue: "baz", + PrevIndex: 13, + PrevExist: PrevExist, + TTL: time.Minute, + Dir: true, + }, + wantAction: &setAction{ + Key: "/foo", + Value: "bar", + PrevValue: "baz", + PrevIndex: 13, + PrevExist: PrevExist, + TTL: time.Minute, + Dir: true, + }, + }, + } + + for i, tt := range tests { + client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} + kAPI := httpKeysAPI{client: client} + kAPI.Set(context.Background(), tt.key, tt.value, tt.opts) + } +} + +func TestHTTPKeysAPISetError(t *testing.T) { + tests := []httpClient{ + // generic HTTP client failure + &staticHTTPClient{ + err: errors.New("fail!"), + }, + + // unusable status code + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + + // etcd Error response + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusInternalServerError, + }, + body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), + }, + } + + for i, tt := range tests { + kAPI := httpKeysAPI{client: tt} + resp, err := kAPI.Set(context.Background(), "/foo", "bar", nil) + if err == nil { + t.Errorf("#%d: received nil error", i) + } + if resp != nil { + t.Errorf("#%d: received non-nil Response: %#v", i, resp) + } + } +} + +func TestHTTPKeysAPISetResponse(t *testing.T) { + client := &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"X-Etcd-Index": []string{"21"}}, + }, + body: []byte(`{"action":"set","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":21},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), + } + + wantResponse := &Response{ + Action: "set", + Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(21), ModifiedIndex: uint64(21)}, + PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, + Index: uint64(21), + } + + kAPI := &httpKeysAPI{client: client, prefix: "/pants"} + resp, err := kAPI.Set(context.Background(), "/foo/bar/baz", "snarf", nil) + if err != nil { + t.Errorf("non-nil error: %#v", err) + } + if !reflect.DeepEqual(wantResponse, resp) { + t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) + } +} + +func TestHTTPKeysAPIGetAction(t *testing.T) { + tests := []struct { + key string + opts *GetOptions + wantAction httpAction + }{ + // nil GetOptions + { + key: "/foo", + opts: nil, + wantAction: &getAction{ + Key: "/foo", + Sorted: false, + Recursive: false, + }, + }, + // empty GetOptions + { + key: "/foo", + opts: &GetOptions{}, + wantAction: &getAction{ + Key: "/foo", + Sorted: false, + Recursive: false, + }, + }, + // populated GetOptions + { + key: "/foo", + opts: &GetOptions{ + Sort: true, + Recursive: true, + Quorum: true, + }, + wantAction: &getAction{ + Key: "/foo", + Sorted: true, + Recursive: true, + Quorum: true, + }, + }, + } + + for i, tt := range tests { + client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} + kAPI := httpKeysAPI{client: client} + kAPI.Get(context.Background(), tt.key, tt.opts) + } +} + +func TestHTTPKeysAPIGetError(t *testing.T) { + tests := []httpClient{ + // generic HTTP client failure + &staticHTTPClient{ + err: errors.New("fail!"), + }, + + // unusable status code + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + + // etcd Error response + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusInternalServerError, + }, + body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), + }, + } + + for i, tt := range tests { + kAPI := httpKeysAPI{client: tt} + resp, err := kAPI.Get(context.Background(), "/foo", nil) + if err == nil { + t.Errorf("#%d: received nil error", i) + } + if resp != nil { + t.Errorf("#%d: received non-nil Response: %#v", i, resp) + } + } +} + +func TestHTTPKeysAPIGetResponse(t *testing.T) { + client := &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"X-Etcd-Index": []string{"42"}}, + }, + body: []byte(`{"action":"get","node":{"key":"/pants/foo/bar","modifiedIndex":25,"createdIndex":19,"nodes":[{"key":"/pants/foo/bar/baz","value":"snarf","createdIndex":21,"modifiedIndex":25}]}}`), + } + + wantResponse := &Response{ + Action: "get", + Node: &Node{ + Key: "/pants/foo/bar", + Nodes: []*Node{ + {Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: 21, ModifiedIndex: 25}, + }, + CreatedIndex: uint64(19), + ModifiedIndex: uint64(25), + }, + Index: uint64(42), + } + + kAPI := &httpKeysAPI{client: client, prefix: "/pants"} + resp, err := kAPI.Get(context.Background(), "/foo/bar", &GetOptions{Recursive: true}) + if err != nil { + t.Errorf("non-nil error: %#v", err) + } + if !reflect.DeepEqual(wantResponse, resp) { + t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) + } +} + +func TestHTTPKeysAPIDeleteAction(t *testing.T) { + tests := []struct { + key string + value string + opts *DeleteOptions + wantAction httpAction + }{ + // nil DeleteOptions + { + key: "/foo", + opts: nil, + wantAction: &deleteAction{ + Key: "/foo", + PrevValue: "", + PrevIndex: 0, + Recursive: false, + }, + }, + // empty DeleteOptions + { + key: "/foo", + opts: &DeleteOptions{}, + wantAction: &deleteAction{ + Key: "/foo", + PrevValue: "", + PrevIndex: 0, + Recursive: false, + }, + }, + // populated DeleteOptions + { + key: "/foo", + opts: &DeleteOptions{ + PrevValue: "baz", + PrevIndex: 13, + Recursive: true, + }, + wantAction: &deleteAction{ + Key: "/foo", + PrevValue: "baz", + PrevIndex: 13, + Recursive: true, + }, + }, + } + + for i, tt := range tests { + client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} + kAPI := httpKeysAPI{client: client} + kAPI.Delete(context.Background(), tt.key, tt.opts) + } +} + +func TestHTTPKeysAPIDeleteError(t *testing.T) { + tests := []httpClient{ + // generic HTTP client failure + &staticHTTPClient{ + err: errors.New("fail!"), + }, + + // unusable status code + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusTeapot, + }, + }, + + // etcd Error response + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusInternalServerError, + }, + body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), + }, + } + + for i, tt := range tests { + kAPI := httpKeysAPI{client: tt} + resp, err := kAPI.Delete(context.Background(), "/foo", nil) + if err == nil { + t.Errorf("#%d: received nil error", i) + } + if resp != nil { + t.Errorf("#%d: received non-nil Response: %#v", i, resp) + } + } +} + +func TestHTTPKeysAPIDeleteResponse(t *testing.T) { + client := &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"X-Etcd-Index": []string{"22"}}, + }, + body: []byte(`{"action":"delete","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":22,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), + } + + wantResponse := &Response{ + Action: "delete", + Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(22)}, + PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, + Index: uint64(22), + } + + kAPI := &httpKeysAPI{client: client, prefix: "/pants"} + resp, err := kAPI.Delete(context.Background(), "/foo/bar/baz", nil) + if err != nil { + t.Errorf("non-nil error: %#v", err) + } + if !reflect.DeepEqual(wantResponse, resp) { + t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) + } +} + +func TestHTTPKeysAPICreateAction(t *testing.T) { + act := &setAction{ + Key: "/foo", + Value: "bar", + PrevExist: PrevNoExist, + PrevIndex: 0, + PrevValue: "", + TTL: 0, + } + + kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} + kAPI.Create(context.Background(), "/foo", "bar") +} + +func TestHTTPKeysAPICreateInOrderAction(t *testing.T) { + act := &createInOrderAction{ + Dir: "/foo", + Value: "bar", + TTL: 0, + } + kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} + kAPI.CreateInOrder(context.Background(), "/foo", "bar", nil) +} + +func TestHTTPKeysAPIUpdateAction(t *testing.T) { + act := &setAction{ + Key: "/foo", + Value: "bar", + PrevExist: PrevExist, + PrevIndex: 0, + PrevValue: "", + TTL: 0, + } + + kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} + kAPI.Update(context.Background(), "/foo", "bar") +} + +func TestNodeTTLDuration(t *testing.T) { + tests := []struct { + node *Node + want time.Duration + }{ + { + node: &Node{TTL: 0}, + want: 0, + }, + { + node: &Node{TTL: 97}, + want: 97 * time.Second, + }, + } + + for i, tt := range tests { + got := tt.node.TTLDuration() + if tt.want != got { + t.Errorf("#%d: incorrect duration: want=%v got=%v", i, tt.want, got) + } + } +} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go new file mode 100644 index 000000000..71b01b27f --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/members.go @@ -0,0 +1,304 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + defaultV2MembersPrefix = "/v2/members" + defaultLeaderSuffix = "/leader" +) + +type Member struct { + // ID is the unique identifier of this Member. + ID string `json:"id"` + + // Name is a human-readable, non-unique identifier of this Member. + Name string `json:"name"` + + // PeerURLs represents the HTTP(S) endpoints this Member uses to + // participate in etcd's consensus protocol. + PeerURLs []string `json:"peerURLs"` + + // ClientURLs represents the HTTP(S) endpoints on which this Member + // serves it's client-facing APIs. + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} + +type memberCreateOrUpdateRequest struct { + PeerURLs types.URLs +} + +func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{ + PeerURLs: make([]string, len(m.PeerURLs)), + } + + for i, u := range m.PeerURLs { + s.PeerURLs[i] = u.String() + } + + return json.Marshal(&s) +} + +// NewMembersAPI constructs a new MembersAPI that uses HTTP to +// interact with etcd's membership API. +func NewMembersAPI(c Client) MembersAPI { + return &httpMembersAPI{ + client: c, + } +} + +type MembersAPI interface { + // List enumerates the current cluster membership. + List(ctx context.Context) ([]Member, error) + + // Add instructs etcd to accept a new Member into the cluster. + Add(ctx context.Context, peerURL string) (*Member, error) + + // Remove demotes an existing Member out of the cluster. + Remove(ctx context.Context, mID string) error + + // Update instructs etcd to update an existing Member in the cluster. + Update(ctx context.Context, mID string, peerURLs []string) error + + // Leader gets current leader of the cluster + Leader(ctx context.Context) (*Member, error) +} + +type httpMembersAPI struct { + client httpClient +} + +func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { + req := &membersAPIActionList{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var mCollection memberCollection + if err := json.Unmarshal(body, &mCollection); err != nil { + return nil, err + } + + return []Member(mCollection), nil +} + +func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { + urls, err := types.NewURLs([]string{peerURL}) + if err != nil { + return nil, err + } + + req := &membersAPIActionAdd{peerURLs: urls} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return nil, err + } + return nil, merr + } + + var memb Member + if err := json.Unmarshal(body, &memb); err != nil { + return nil, err + } + + return &memb, nil +} + +func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { + urls, err := types.NewURLs(peerURLs) + if err != nil { + return err + } + + req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return err + } + return merr + } + + return nil +} + +func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { + req := &membersAPIActionRemove{memberID: memberID} + resp, _, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) +} + +func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { + req := &membersAPIActionLeader{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var leader Member + if err := json.Unmarshal(body, &leader); err != nil { + return nil, err + } + + return &leader, nil +} + +type membersAPIActionList struct{} + +func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type membersAPIActionRemove struct { + memberID string +} + +func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, d.memberID) + req, _ := http.NewRequest("DELETE", u.String(), nil) + return req +} + +type membersAPIActionAdd struct { + peerURLs types.URLs +} + +func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +type membersAPIActionUpdate struct { + memberID string + peerURLs types.URLs +} + +func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + u.Path = path.Join(u.Path, a.memberID) + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +func assertStatusCode(got int, want ...int) (err error) { + for _, w := range want { + if w == got { + return nil + } + } + return fmt.Errorf("unexpected status code %d", got) +} + +type membersAPIActionLeader struct{} + +func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, defaultLeaderSuffix) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// v2MembersURL add the necessary path to the provided endpoint +// to route requests to the default v2 members API. +func v2MembersURL(ep url.URL) *url.URL { + ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) + return &ep +} + +type membersError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e membersError) Error() string { + return e.Message +} diff --git a/vendor/github.com/coreos/etcd/client/members_test.go b/vendor/github.com/coreos/etcd/client/members_test.go new file mode 100644 index 000000000..494e2dc56 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/members_test.go @@ -0,0 +1,599 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "encoding/json" + "errors" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + + "github.com/coreos/etcd/pkg/types" +) + +func TestMembersAPIActionList(t *testing.T) { + ep := url.URL{Scheme: "http", Host: "example.com"} + act := &membersAPIActionList{} + + wantURL := &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/v2/members", + } + + got := *act.HTTPRequest(ep) + err := assertRequest(got, "GET", wantURL, http.Header{}, nil) + if err != nil { + t.Error(err.Error()) + } +} + +func TestMembersAPIActionAdd(t *testing.T) { + ep := url.URL{Scheme: "http", Host: "example.com"} + act := &membersAPIActionAdd{ + peerURLs: types.URLs([]url.URL{ + {Scheme: "https", Host: "127.0.0.1:8081"}, + {Scheme: "http", Host: "127.0.0.1:8080"}, + }), + } + + wantURL := &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/v2/members", + } + wantHeader := http.Header{ + "Content-Type": []string{"application/json"}, + } + wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`) + + got := *act.HTTPRequest(ep) + err := assertRequest(got, "POST", wantURL, wantHeader, wantBody) + if err != nil { + t.Error(err.Error()) + } +} + +func TestMembersAPIActionUpdate(t *testing.T) { + ep := url.URL{Scheme: "http", Host: "example.com"} + act := &membersAPIActionUpdate{ + memberID: "0xabcd", + peerURLs: types.URLs([]url.URL{ + {Scheme: "https", Host: "127.0.0.1:8081"}, + {Scheme: "http", Host: "127.0.0.1:8080"}, + }), + } + + wantURL := &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/v2/members/0xabcd", + } + wantHeader := http.Header{ + "Content-Type": []string{"application/json"}, + } + wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`) + + got := *act.HTTPRequest(ep) + err := assertRequest(got, "PUT", wantURL, wantHeader, wantBody) + if err != nil { + t.Error(err.Error()) + } +} + +func TestMembersAPIActionRemove(t *testing.T) { + ep := url.URL{Scheme: "http", Host: "example.com"} + act := &membersAPIActionRemove{memberID: "XXX"} + + wantURL := &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/v2/members/XXX", + } + + got := *act.HTTPRequest(ep) + err := assertRequest(got, "DELETE", wantURL, http.Header{}, nil) + if err != nil { + t.Error(err.Error()) + } +} + +func TestMembersAPIActionLeader(t *testing.T) { + ep := url.URL{Scheme: "http", Host: "example.com"} + act := &membersAPIActionLeader{} + + wantURL := &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/v2/members/leader", + } + + got := *act.HTTPRequest(ep) + err := assertRequest(got, "GET", wantURL, http.Header{}, nil) + if err != nil { + t.Error(err.Error()) + } +} + +func TestAssertStatusCode(t *testing.T) { + if err := assertStatusCode(404, 400); err == nil { + t.Errorf("assertStatusCode failed to detect conflict in 400 vs 404") + } + + if err := assertStatusCode(404, 400, 404); err != nil { + t.Errorf("assertStatusCode found conflict in (404,400) vs 400: %v", err) + } +} + +func TestV2MembersURL(t *testing.T) { + got := v2MembersURL(url.URL{ + Scheme: "http", + Host: "foo.example.com:4002", + Path: "/pants", + }) + want := &url.URL{ + Scheme: "http", + Host: "foo.example.com:4002", + Path: "/pants/v2/members", + } + + if !reflect.DeepEqual(want, got) { + t.Fatalf("v2MembersURL got %#v, want %#v", got, want) + } +} + +func TestMemberUnmarshal(t *testing.T) { + tests := []struct { + body []byte + wantMember Member + wantError bool + }{ + // no URLs, just check ID & Name + { + body: []byte(`{"id": "c", "name": "dungarees"}`), + wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil}, + }, + + // both client and peer URLs + { + body: []byte(`{"peerURLs": ["http://127.0.0.1:2379"], "clientURLs": ["http://127.0.0.1:2379"]}`), + wantMember: Member{ + PeerURLs: []string{ + "http://127.0.0.1:2379", + }, + ClientURLs: []string{ + "http://127.0.0.1:2379", + }, + }, + }, + + // multiple peer URLs + { + body: []byte(`{"peerURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), + wantMember: Member{ + PeerURLs: []string{ + "http://127.0.0.1:2379", + "https://example.com", + }, + ClientURLs: nil, + }, + }, + + // multiple client URLs + { + body: []byte(`{"clientURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), + wantMember: Member{ + PeerURLs: nil, + ClientURLs: []string{ + "http://127.0.0.1:2379", + "https://example.com", + }, + }, + }, + + // invalid JSON + { + body: []byte(`{"peerU`), + wantError: true, + }, + } + + for i, tt := range tests { + got := Member{} + err := json.Unmarshal(tt.body, &got) + if tt.wantError != (err != nil) { + t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err) + continue + } + + if !reflect.DeepEqual(tt.wantMember, got) { + t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got) + } + } +} + +func TestMemberCollectionUnmarshalFail(t *testing.T) { + mc := &memberCollection{} + if err := mc.UnmarshalJSON([]byte(`{`)); err == nil { + t.Errorf("got nil error") + } +} + +func TestMemberCollectionUnmarshal(t *testing.T) { + tests := []struct { + body []byte + want memberCollection + }{ + { + body: []byte(`{}`), + want: memberCollection([]Member{}), + }, + { + body: []byte(`{"members":[]}`), + want: memberCollection([]Member{}), + }, + { + body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), + want: memberCollection( + []Member{ + { + ID: "2745e2525fce8fe", + Name: "node3", + PeerURLs: []string{ + "http://127.0.0.1:7003", + }, + ClientURLs: []string{ + "http://127.0.0.1:4003", + }, + }, + { + ID: "42134f434382925", + Name: "node1", + PeerURLs: []string{ + "http://127.0.0.1:2380", + "http://127.0.0.1:7001", + }, + ClientURLs: []string{ + "http://127.0.0.1:2379", + "http://127.0.0.1:4001", + }, + }, + { + ID: "94088180e21eb87b", + Name: "node2", + PeerURLs: []string{ + "http://127.0.0.1:7002", + }, + ClientURLs: []string{ + "http://127.0.0.1:4002", + }, + }, + }, + ), + }, + } + + for i, tt := range tests { + var got memberCollection + err := json.Unmarshal(tt.body, &got) + if err != nil { + t.Errorf("#%d: unexpected error: %v", i, err) + continue + } + + if !reflect.DeepEqual(tt.want, got) { + t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got) + } + } +} + +func TestMemberCreateRequestMarshal(t *testing.T) { + req := memberCreateOrUpdateRequest{ + PeerURLs: types.URLs([]url.URL{ + {Scheme: "http", Host: "127.0.0.1:8081"}, + {Scheme: "https", Host: "127.0.0.1:8080"}, + }), + } + want := []byte(`{"peerURLs":["http://127.0.0.1:8081","https://127.0.0.1:8080"]}`) + + got, err := json.Marshal(&req) + if err != nil { + t.Fatalf("Marshal returned unexpected err=%v", err) + } + + if !reflect.DeepEqual(want, got) { + t.Fatalf("Failed to marshal memberCreateRequest: want=%s, got=%s", want, got) + } +} + +func TestHTTPMembersAPIAddSuccess(t *testing.T) { + wantAction := &membersAPIActionAdd{ + peerURLs: types.URLs([]url.URL{ + {Scheme: "http", Host: "127.0.0.1:7002"}, + }), + } + + mAPI := &httpMembersAPI{ + client: &actionAssertingHTTPClient{ + t: t, + act: wantAction, + resp: http.Response{ + StatusCode: http.StatusCreated, + }, + body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"]}`), + }, + } + + wantResponseMember := &Member{ + ID: "94088180e21eb87b", + PeerURLs: []string{"http://127.0.0.1:7002"}, + } + + m, err := mAPI.Add(context.Background(), "http://127.0.0.1:7002") + if err != nil { + t.Errorf("got non-nil err: %#v", err) + } + if !reflect.DeepEqual(wantResponseMember, m) { + t.Errorf("incorrect Member: want=%#v got=%#v", wantResponseMember, m) + } +} + +func TestHTTPMembersAPIAddError(t *testing.T) { + okPeer := "http://example.com:2379" + tests := []struct { + peerURL string + client httpClient + + // if wantErr == nil, assert that the returned error is non-nil + // if wantErr != nil, assert that the returned error matches + wantErr error + }{ + // malformed peer URL + { + peerURL: ":", + }, + + // generic httpClient failure + { + peerURL: okPeer, + client: &staticHTTPClient{err: errors.New("fail!")}, + }, + + // unrecognized HTTP status code + { + peerURL: okPeer, + client: &staticHTTPClient{ + resp: http.Response{StatusCode: http.StatusTeapot}, + }, + }, + + // unmarshal body into membersError on StatusConflict + { + peerURL: okPeer, + client: &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusConflict, + }, + body: []byte(`{"message":"fail!"}`), + }, + wantErr: membersError{Message: "fail!"}, + }, + + // fail to unmarshal body on StatusConflict + { + peerURL: okPeer, + client: &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusConflict, + }, + body: []byte(`{"`), + }, + }, + + // fail to unmarshal body on StatusCreated + { + peerURL: okPeer, + client: &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusCreated, + }, + body: []byte(`{"id":"XX`), + }, + }, + } + + for i, tt := range tests { + mAPI := &httpMembersAPI{client: tt.client} + m, err := mAPI.Add(context.Background(), tt.peerURL) + if err == nil { + t.Errorf("#%d: got nil err", i) + } + if tt.wantErr != nil && !reflect.DeepEqual(tt.wantErr, err) { + t.Errorf("#%d: incorrect error: want=%#v got=%#v", i, tt.wantErr, err) + } + if m != nil { + t.Errorf("#%d: got non-nil Member", i) + } + } +} + +func TestHTTPMembersAPIRemoveSuccess(t *testing.T) { + wantAction := &membersAPIActionRemove{ + memberID: "94088180e21eb87b", + } + + mAPI := &httpMembersAPI{ + client: &actionAssertingHTTPClient{ + t: t, + act: wantAction, + resp: http.Response{ + StatusCode: http.StatusNoContent, + }, + }, + } + + if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err != nil { + t.Errorf("got non-nil err: %#v", err) + } +} + +func TestHTTPMembersAPIRemoveFail(t *testing.T) { + tests := []httpClient{ + // generic error + &staticHTTPClient{ + err: errors.New("fail!"), + }, + + // unexpected HTTP status code + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusInternalServerError, + }, + }, + } + + for i, tt := range tests { + mAPI := &httpMembersAPI{client: tt} + if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err == nil { + t.Errorf("#%d: got nil err", i) + } + } +} + +func TestHTTPMembersAPIListSuccess(t *testing.T) { + wantAction := &membersAPIActionList{} + mAPI := &httpMembersAPI{ + client: &actionAssertingHTTPClient{ + t: t, + act: wantAction, + resp: http.Response{ + StatusCode: http.StatusOK, + }, + body: []byte(`{"members":[{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}]}`), + }, + } + + wantResponseMembers := []Member{ + { + ID: "94088180e21eb87b", + Name: "node2", + PeerURLs: []string{"http://127.0.0.1:7002"}, + ClientURLs: []string{"http://127.0.0.1:4002"}, + }, + } + + m, err := mAPI.List(context.Background()) + if err != nil { + t.Errorf("got non-nil err: %#v", err) + } + if !reflect.DeepEqual(wantResponseMembers, m) { + t.Errorf("incorrect Members: want=%#v got=%#v", wantResponseMembers, m) + } +} + +func TestHTTPMembersAPIListError(t *testing.T) { + tests := []httpClient{ + // generic httpClient failure + &staticHTTPClient{err: errors.New("fail!")}, + + // unrecognized HTTP status code + &staticHTTPClient{ + resp: http.Response{StatusCode: http.StatusTeapot}, + }, + + // fail to unmarshal body on StatusOK + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusOK, + }, + body: []byte(`[{"id":"XX`), + }, + } + + for i, tt := range tests { + mAPI := &httpMembersAPI{client: tt} + ms, err := mAPI.List(context.Background()) + if err == nil { + t.Errorf("#%d: got nil err", i) + } + if ms != nil { + t.Errorf("#%d: got non-nil Member slice", i) + } + } +} + +func TestHTTPMembersAPILeaderSuccess(t *testing.T) { + wantAction := &membersAPIActionLeader{} + mAPI := &httpMembersAPI{ + client: &actionAssertingHTTPClient{ + t: t, + act: wantAction, + resp: http.Response{ + StatusCode: http.StatusOK, + }, + body: []byte(`{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}`), + }, + } + + wantResponseMember := &Member{ + ID: "94088180e21eb87b", + Name: "node2", + PeerURLs: []string{"http://127.0.0.1:7002"}, + ClientURLs: []string{"http://127.0.0.1:4002"}, + } + + m, err := mAPI.Leader(context.Background()) + if err != nil { + t.Errorf("err = %v, want %v", err, nil) + } + if !reflect.DeepEqual(wantResponseMember, m) { + t.Errorf("incorrect member: member = %v, want %v", wantResponseMember, m) + } +} + +func TestHTTPMembersAPILeaderError(t *testing.T) { + tests := []httpClient{ + // generic httpClient failure + &staticHTTPClient{err: errors.New("fail!")}, + + // unrecognized HTTP status code + &staticHTTPClient{ + resp: http.Response{StatusCode: http.StatusTeapot}, + }, + + // fail to unmarshal body on StatusOK + &staticHTTPClient{ + resp: http.Response{ + StatusCode: http.StatusOK, + }, + body: []byte(`[{"id":"XX`), + }, + } + + for i, tt := range tests { + mAPI := &httpMembersAPI{client: tt} + m, err := mAPI.Leader(context.Background()) + if err == nil { + t.Errorf("#%d: err = nil, want not nil", i) + } + if m != nil { + t.Errorf("member slice = %v, want nil", m) + } + } +} diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go new file mode 100644 index 000000000..bc48eb2cf --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/srv.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "fmt" + "net" + "net/url" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV +) + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Dicoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +// Discover looks up the etcd servers for the domain. +func (d *srvDiscover) Discover(domain string) ([]string, error) { + var urls []*url.URL + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + return nil + } + + errHTTPS := updateURLs("etcd-client-ssl", "https") + errHTTP := updateURLs("etcd-client", "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/srv_test.go b/vendor/github.com/coreos/etcd/client/srv_test.go new file mode 100644 index 000000000..52d4a08e3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/srv_test.go @@ -0,0 +1,102 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "errors" + "net" + "reflect" + "testing" +) + +func TestSRVDiscover(t *testing.T) { + defer func() { lookupSRV = net.LookupSRV }() + + tests := []struct { + withSSL []*net.SRV + withoutSSL []*net.SRV + expected []string + }{ + { + []*net.SRV{}, + []*net.SRV{}, + []string{}, + }, + { + []*net.SRV{ + {Target: "10.0.0.1", Port: 2480}, + {Target: "10.0.0.2", Port: 2480}, + {Target: "10.0.0.3", Port: 2480}, + }, + []*net.SRV{}, + []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"}, + }, + { + []*net.SRV{ + {Target: "10.0.0.1", Port: 2480}, + {Target: "10.0.0.2", Port: 2480}, + {Target: "10.0.0.3", Port: 2480}, + }, + []*net.SRV{ + {Target: "10.0.0.1", Port: 7001}, + }, + []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, + }, + { + []*net.SRV{ + {Target: "10.0.0.1", Port: 2480}, + {Target: "10.0.0.2", Port: 2480}, + {Target: "10.0.0.3", Port: 2480}, + }, + []*net.SRV{ + {Target: "10.0.0.1", Port: 7001}, + }, + []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, + }, + { + []*net.SRV{ + {Target: "a.example.com", Port: 2480}, + {Target: "b.example.com", Port: 2480}, + {Target: "c.example.com", Port: 2480}, + }, + []*net.SRV{}, + []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com:2480"}, + }, + } + + for i, tt := range tests { + lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) { + if service == "etcd-client-ssl" { + return "", tt.withSSL, nil + } + if service == "etcd-client" { + return "", tt.withoutSSL, nil + } + return "", nil, errors.New("Unknown service in mock") + } + + d := NewSRVDiscover() + + endpoints, err := d.Discover("example.com") + if err != nil { + t.Fatalf("%d: err: %#v", i, err) + } + + if !reflect.DeepEqual(endpoints, tt.expected) { + t.Errorf("#%d: endpoints = %v, want %v", i, endpoints, tt.expected) + } + + } +} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go new file mode 100644 index 000000000..fc0800b3d --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -0,0 +1,23 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. +func IsKeyNotFound(err error) bool { + if cErr, ok := err.(Error); ok { + return cErr.Code == ErrorCodeKeyNotFound + } + return false +} diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go new file mode 100644 index 000000000..f26254ba9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pathutil implements utility functions for handling slash-separated +// paths. +package pathutil + +import "path" + +// CanonicalURLPath returns the canonical url path for p, which follows the rules: +// 1. the path always starts with "/" +// 2. replace multiple slashes with a single slash +// 3. replace each '.' '..' path name element with equivalent one +// 4. keep the trailing slash +// The function is borrowed from stdlib http.cleanPath in server.go. +func CanonicalURLPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root, + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path_test.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path_test.go new file mode 100644 index 000000000..6d3d803cf --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/path_test.go @@ -0,0 +1,38 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pathutil + +import "testing" + +func TestCanonicalURLPath(t *testing.T) { + tests := []struct { + p string + wp string + }{ + {"/a", "/a"}, + {"", "/"}, + {"a", "/a"}, + {"//a", "/a"}, + {"/a/.", "/a"}, + {"/a/..", "/"}, + {"/a/", "/a/"}, + {"/a//", "/a/"}, + } + for i, tt := range tests { + if g := CanonicalURLPath(tt.p); g != tt.wp { + t.Errorf("#%d: canonical path = %s, want %s", i, g, tt.wp) + } + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go new file mode 100644 index 000000000..04b4c38d1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go new file mode 100644 index 000000000..88cb9e634 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -0,0 +1,41 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" +) + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/id_test.go b/vendor/github.com/coreos/etcd/pkg/types/id_test.go new file mode 100644 index 000000000..97d168f58 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/id_test.go @@ -0,0 +1,95 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "testing" +) + +func TestIDString(t *testing.T) { + tests := []struct { + input ID + want string + }{ + { + input: 12, + want: "c", + }, + { + input: 4918257920282737594, + want: "444129853c343bba", + }, + } + + for i, tt := range tests { + got := tt.input.String() + if tt.want != got { + t.Errorf("#%d: ID.String failure: want=%v, got=%v", i, tt.want, got) + } + } +} + +func TestIDFromString(t *testing.T) { + tests := []struct { + input string + want ID + }{ + { + input: "17", + want: 23, + }, + { + input: "612840dae127353", + want: 437557308098245459, + }, + } + + for i, tt := range tests { + got, err := IDFromString(tt.input) + if err != nil { + t.Errorf("#%d: IDFromString failure: err=%v", i, err) + continue + } + if tt.want != got { + t.Errorf("#%d: IDFromString failure: want=%v, got=%v", i, tt.want, got) + } + } +} + +func TestIDFromStringFail(t *testing.T) { + tests := []string{ + "", + "XXX", + "612840dae127353612840dae127353", + } + + for i, tt := range tests { + _, err := IDFromString(tt) + if err == nil { + t.Fatalf("#%d: IDFromString expected error, but err=nil", i) + } + } +} + +func TestIDSlice(t *testing.T) { + g := []ID{10, 500, 5, 1, 100, 25} + w := []ID{1, 5, 10, 25, 100, 500} + sort.Sort(IDSlice(g)) + if !reflect.DeepEqual(g, w) { + t.Errorf("slice after sort = %#v, want %#v", g, w) + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go new file mode 100644 index 000000000..bb997174c --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/set.go @@ -0,0 +1,178 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/set_test.go b/vendor/github.com/coreos/etcd/pkg/types/set_test.go new file mode 100644 index 000000000..ff1ecc68d --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/set_test.go @@ -0,0 +1,186 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "testing" +) + +func TestUnsafeSet(t *testing.T) { + driveSetTests(t, NewUnsafeSet()) +} + +func TestThreadsafeSet(t *testing.T) { + driveSetTests(t, NewThreadsafeSet()) +} + +// Check that two slices contents are equal; order is irrelevant +func equal(a, b []string) bool { + as := sort.StringSlice(a) + bs := sort.StringSlice(b) + as.Sort() + bs.Sort() + return reflect.DeepEqual(as, bs) +} + +func driveSetTests(t *testing.T, s Set) { + // Verify operations on an empty set + eValues := []string{} + values := s.Values() + if !reflect.DeepEqual(values, eValues) { + t.Fatalf("Expect values=%v got %v", eValues, values) + } + if l := s.Length(); l != 0 { + t.Fatalf("Expected length=0, got %d", l) + } + for _, v := range []string{"foo", "bar", "baz"} { + if s.Contains(v) { + t.Fatalf("Expect s.Contains(%q) to be fale, got true", v) + } + } + + // Add three items, ensure they show up + s.Add("foo") + s.Add("bar") + s.Add("baz") + + eValues = []string{"foo", "bar", "baz"} + values = s.Values() + if !equal(values, eValues) { + t.Fatalf("Expect values=%v got %v", eValues, values) + } + + for _, v := range eValues { + if !s.Contains(v) { + t.Fatalf("Expect s.Contains(%q) to be true, got false", v) + } + } + + if l := s.Length(); l != 3 { + t.Fatalf("Expected length=3, got %d", l) + } + + // Add the same item a second time, ensuring it is not duplicated + s.Add("foo") + + values = s.Values() + if !equal(values, eValues) { + t.Fatalf("Expect values=%v got %v", eValues, values) + } + if l := s.Length(); l != 3 { + t.Fatalf("Expected length=3, got %d", l) + } + + // Remove all items, ensure they are gone + s.Remove("foo") + s.Remove("bar") + s.Remove("baz") + + eValues = []string{} + values = s.Values() + if !equal(values, eValues) { + t.Fatalf("Expect values=%v got %v", eValues, values) + } + + if l := s.Length(); l != 0 { + t.Fatalf("Expected length=0, got %d", l) + } + + // Create new copies of the set, and ensure they are unlinked to the + // original Set by making modifications + s.Add("foo") + s.Add("bar") + cp1 := s.Copy() + cp2 := s.Copy() + s.Remove("foo") + cp3 := s.Copy() + cp1.Add("baz") + + for i, tt := range []struct { + want []string + got []string + }{ + {[]string{"bar"}, s.Values()}, + {[]string{"foo", "bar", "baz"}, cp1.Values()}, + {[]string{"foo", "bar"}, cp2.Values()}, + {[]string{"bar"}, cp3.Values()}, + } { + if !equal(tt.want, tt.got) { + t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) + } + } + + for i, tt := range []struct { + want bool + got bool + }{ + {true, s.Equals(cp3)}, + {true, cp3.Equals(s)}, + {false, s.Equals(cp2)}, + {false, s.Equals(cp1)}, + {false, cp1.Equals(s)}, + {false, cp2.Equals(s)}, + {false, cp2.Equals(cp1)}, + } { + if tt.got != tt.want { + t.Fatalf("case %d: want %t, got %t", i, tt.want, tt.got) + + } + } + + // Subtract values from a Set, ensuring a new Set is created and + // the original Sets are unmodified + sub1 := cp1.Sub(s) + sub2 := cp2.Sub(cp1) + + for i, tt := range []struct { + want []string + got []string + }{ + {[]string{"foo", "bar", "baz"}, cp1.Values()}, + {[]string{"foo", "bar"}, cp2.Values()}, + {[]string{"bar"}, s.Values()}, + {[]string{"foo", "baz"}, sub1.Values()}, + {[]string{}, sub2.Values()}, + } { + if !equal(tt.want, tt.got) { + t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) + } + } +} + +func TestUnsafeSetContainsAll(t *testing.T) { + vals := []string{"foo", "bar", "baz"} + s := NewUnsafeSet(vals...) + + tests := []struct { + strs []string + wcontain bool + }{ + {[]string{}, true}, + {vals[:1], true}, + {vals[:2], true}, + {vals, true}, + {[]string{"cuz"}, false}, + {[]string{vals[0], "cuz"}, false}, + } + for i, tt := range tests { + if g := s.ContainsAll(tt.strs); g != tt.wcontain { + t.Errorf("#%d: ok = %v, want %v", i, g, tt.wcontain) + } + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go new file mode 100644 index 000000000..0327950f7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice_test.go b/vendor/github.com/coreos/etcd/pkg/types/slice_test.go new file mode 100644 index 000000000..95e37e04d --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/slice_test.go @@ -0,0 +1,30 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "testing" +) + +func TestUint64Slice(t *testing.T) { + g := Uint64Slice{10, 500, 5, 1, 100, 25} + w := Uint64Slice{1, 5, 10, 25, 100, 500} + sort.Sort(g) + if !reflect.DeepEqual(g, w) { + t.Errorf("slice after sort = %#v, want %#v", g, w) + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go new file mode 100644 index 000000000..ce2483ffa --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go @@ -0,0 +1,74 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, fmt.Errorf("URL scheme must be http or https: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls_test.go b/vendor/github.com/coreos/etcd/pkg/types/urls_test.go new file mode 100644 index 000000000..ffa2cf007 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urls_test.go @@ -0,0 +1,169 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "testing" + + "github.com/coreos/etcd/pkg/testutil" +) + +func TestNewURLs(t *testing.T) { + tests := []struct { + strs []string + wurls URLs + }{ + { + []string{"http://127.0.0.1:2379"}, + testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), + }, + // it can trim space + { + []string{" http://127.0.0.1:2379 "}, + testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), + }, + // it does sort + { + []string{ + "http://127.0.0.2:2379", + "http://127.0.0.1:2379", + }, + testutil.MustNewURLs(t, []string{ + "http://127.0.0.1:2379", + "http://127.0.0.2:2379", + }), + }, + } + for i, tt := range tests { + urls, _ := NewURLs(tt.strs) + if !reflect.DeepEqual(urls, tt.wurls) { + t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls) + } + } +} + +func TestURLsString(t *testing.T) { + tests := []struct { + us URLs + wstr string + }{ + { + URLs{}, + "", + }, + { + testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), + "http://127.0.0.1:2379", + }, + { + testutil.MustNewURLs(t, []string{ + "http://127.0.0.1:2379", + "http://127.0.0.2:2379", + }), + "http://127.0.0.1:2379,http://127.0.0.2:2379", + }, + { + testutil.MustNewURLs(t, []string{ + "http://127.0.0.2:2379", + "http://127.0.0.1:2379", + }), + "http://127.0.0.2:2379,http://127.0.0.1:2379", + }, + } + for i, tt := range tests { + g := tt.us.String() + if g != tt.wstr { + t.Errorf("#%d: string = %s, want %s", i, g, tt.wstr) + } + } +} + +func TestURLsSort(t *testing.T) { + g := testutil.MustNewURLs(t, []string{ + "http://127.0.0.4:2379", + "http://127.0.0.2:2379", + "http://127.0.0.1:2379", + "http://127.0.0.3:2379", + }) + w := testutil.MustNewURLs(t, []string{ + "http://127.0.0.1:2379", + "http://127.0.0.2:2379", + "http://127.0.0.3:2379", + "http://127.0.0.4:2379", + }) + gurls := URLs(g) + gurls.Sort() + if !reflect.DeepEqual(g, w) { + t.Errorf("URLs after sort = %#v, want %#v", g, w) + } +} + +func TestURLsStringSlice(t *testing.T) { + tests := []struct { + us URLs + wstr []string + }{ + { + URLs{}, + []string{}, + }, + { + testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), + []string{"http://127.0.0.1:2379"}, + }, + { + testutil.MustNewURLs(t, []string{ + "http://127.0.0.1:2379", + "http://127.0.0.2:2379", + }), + []string{"http://127.0.0.1:2379", "http://127.0.0.2:2379"}, + }, + { + testutil.MustNewURLs(t, []string{ + "http://127.0.0.2:2379", + "http://127.0.0.1:2379", + }), + []string{"http://127.0.0.2:2379", "http://127.0.0.1:2379"}, + }, + } + for i, tt := range tests { + g := tt.us.StringSlice() + if !reflect.DeepEqual(g, tt.wstr) { + t.Errorf("#%d: string slice = %+v, want %+v", i, g, tt.wstr) + } + } +} + +func TestNewURLsFail(t *testing.T) { + tests := [][]string{ + // no urls given + {}, + // missing protocol scheme + {"://127.0.0.1:2379"}, + // unsupported scheme + {"mailto://127.0.0.1:2379"}, + // not conform to host:port + {"http://127.0.0.1"}, + // contain a path + {"http://127.0.0.1:2379/path"}, + } + for i, tt := range tests { + _, err := NewURLs(tt) + if err == nil { + t.Errorf("#%d: err = nil, but error", i) + } + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go new file mode 100644 index 000000000..f7d5c1c90 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go @@ -0,0 +1,91 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// String returns NameURLPairs into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + pairs := make([]string, 0) + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + urls := make([]string, 0) + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap_go15_test.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap_go15_test.go new file mode 100644 index 000000000..1955bb9cb --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap_go15_test.go @@ -0,0 +1,40 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.5 + +package types + +import ( + "reflect" + "testing" + + "github.com/coreos/etcd/pkg/testutil" +) + +// This is only tested in Go1.5+ because Go1.4 doesn't support literal IPv6 address with zone in +// URI (https://github.com/golang/go/issues/6530). +func TestNewURLsMapIPV6(t *testing.T) { + c, err := NewURLsMap("mem1=http://[2001:db8::1]:2380,mem1=http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380,mem2=http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380") + if err != nil { + t.Fatalf("unexpected parse error: %v", err) + } + wc := URLsMap(map[string]URLs{ + "mem1": testutil.MustNewURLs(t, []string{"http://[2001:db8::1]:2380", "http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380"}), + "mem2": testutil.MustNewURLs(t, []string{"http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380"}), + }) + if !reflect.DeepEqual(c, wc) { + t.Errorf("cluster = %#v, want %#v", c, wc) + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap_test.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap_test.go new file mode 100644 index 000000000..a01b5efa9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap_test.go @@ -0,0 +1,99 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "testing" + + "github.com/coreos/etcd/pkg/testutil" +) + +func TestParseInitialCluster(t *testing.T) { + c, err := NewURLsMap("mem1=http://10.0.0.1:2379,mem1=http://128.193.4.20:2379,mem2=http://10.0.0.2:2379,default=http://127.0.0.1:2379") + if err != nil { + t.Fatalf("unexpected parse error: %v", err) + } + wc := URLsMap(map[string]URLs{ + "mem1": testutil.MustNewURLs(t, []string{"http://10.0.0.1:2379", "http://128.193.4.20:2379"}), + "mem2": testutil.MustNewURLs(t, []string{"http://10.0.0.2:2379"}), + "default": testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), + }) + if !reflect.DeepEqual(c, wc) { + t.Errorf("cluster = %+v, want %+v", c, wc) + } +} + +func TestParseInitialClusterBad(t *testing.T) { + tests := []string{ + // invalid URL + "%^", + // no URL defined for member + "mem1=,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379", + "mem1,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379", + // bad URL for member + "default=http://localhost/", + } + for i, tt := range tests { + if _, err := NewURLsMap(tt); err == nil { + t.Errorf("#%d: unexpected successful parse, want err", i) + } + } +} + +func TestNameURLPairsString(t *testing.T) { + cls := URLsMap(map[string]URLs{ + "abc": testutil.MustNewURLs(t, []string{"http://1.1.1.1:1111", "http://0.0.0.0:0000"}), + "def": testutil.MustNewURLs(t, []string{"http://2.2.2.2:2222"}), + "ghi": testutil.MustNewURLs(t, []string{"http://3.3.3.3:1234", "http://127.0.0.1:2380"}), + // no PeerURLs = not included + "four": testutil.MustNewURLs(t, []string{}), + "five": testutil.MustNewURLs(t, nil), + }) + w := "abc=http://0.0.0.0:0000,abc=http://1.1.1.1:1111,def=http://2.2.2.2:2222,ghi=http://127.0.0.1:2380,ghi=http://3.3.3.3:1234" + if g := cls.String(); g != w { + t.Fatalf("NameURLPairs.String():\ngot %#v\nwant %#v", g, w) + } +} + +func TestParse(t *testing.T) { + tests := []struct { + s string + wm map[string][]string + }{ + { + "", + map[string][]string{}, + }, + { + "a=b", + map[string][]string{"a": {"b"}}, + }, + { + "a=b,a=c", + map[string][]string{"a": {"b", "c"}}, + }, + { + "a=b,a1=c", + map[string][]string{"a": {"b"}, "a1": {"c"}}, + }, + } + for i, tt := range tests { + m := parse(tt.s) + if !reflect.DeepEqual(m, tt.wm) { + t.Errorf("#%d: m = %+v, want %+v", i, m, tt.wm) + } + } +} diff --git a/vendor/github.com/cyberdelia/heroku-go/v3/heroku.go b/vendor/github.com/cyberdelia/heroku-go/v3/heroku.go new file mode 100644 index 000000000..dbfcb955b --- /dev/null +++ b/vendor/github.com/cyberdelia/heroku-go/v3/heroku.go @@ -0,0 +1,1734 @@ +// Generated service client for heroku API. +// +// To be able to interact with this API, you have to +// create a new service: +// +// s := heroku.NewService(nil) +// +// The Service struct has all the methods you need +// to interact with heroku API. +// +package heroku + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "runtime" + "time" +) + +const ( + Version = "v3" + DefaultAPIURL = "https://api.heroku.com" + DefaultUserAgent = "heroku/" + Version + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")" +) + +// Service represents your API. +type Service struct { + client *http.Client +} + +// NewService creates a Service using the given, if none is provided +// it uses http.DefaultClient. +func NewService(c *http.Client) *Service { + if c == nil { + c = http.DefaultClient + } + return &Service{ + client: c, + } +} + +// NewRequest generates an HTTP request, but does not perform the request. +func (s *Service) NewRequest(method, path string, body interface{}) (*http.Request, error) { + var ctype string + var rbody io.Reader + switch t := body.(type) { + case nil: + case string: + rbody = bytes.NewBufferString(t) + case io.Reader: + rbody = t + default: + v := reflect.ValueOf(body) + if !v.IsValid() { + break + } + if v.Type().Kind() == reflect.Ptr { + v = reflect.Indirect(v) + if !v.IsValid() { + break + } + } + j, err := json.Marshal(body) + if err != nil { + return nil, err + } + rbody = bytes.NewReader(j) + ctype = "application/json" + } + req, err := http.NewRequest(method, DefaultAPIURL+path, rbody) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", DefaultUserAgent) + if ctype != "" { + req.Header.Set("Content-Type", ctype) + } + return req, nil +} + +// Do sends a request and decodes the response into v. +func (s *Service) Do(v interface{}, method, path string, body interface{}, lr *ListRange) error { + req, err := s.NewRequest(method, path, body) + if err != nil { + return err + } + if lr != nil { + lr.SetHeader(req) + } + resp, err := s.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + switch t := v.(type) { + case nil: + case io.Writer: + _, err = io.Copy(t, resp.Body) + default: + err = json.NewDecoder(resp.Body).Decode(v) + } + return err +} + +// Get sends a GET request and decodes the response into v. +func (s *Service) Get(v interface{}, path string, lr *ListRange) error { + return s.Do(v, "GET", path, nil, lr) +} + +// Patch sends a Path request and decodes the response into v. +func (s *Service) Patch(v interface{}, path string, body interface{}) error { + return s.Do(v, "PATCH", path, body, nil) +} + +// Post sends a POST request and decodes the response into v. +func (s *Service) Post(v interface{}, path string, body interface{}) error { + return s.Do(v, "POST", path, body, nil) +} + +// Put sends a PUT request and decodes the response into v. +func (s *Service) Put(v interface{}, path string, body interface{}) error { + return s.Do(v, "PUT", path, body, nil) +} + +// Delete sends a DELETE request. +func (s *Service) Delete(path string) error { + return s.Do(nil, "DELETE", path, nil, nil) +} + +// ListRange describes a range. +type ListRange struct { + Field string + Max int + Descending bool + FirstID string + LastID string +} + +// SetHeader set headers on the given Request. +func (lr *ListRange) SetHeader(req *http.Request) { + var hdrval string + if lr.Field != "" { + hdrval += lr.Field + " " + } + hdrval += lr.FirstID + ".." + lr.LastID + if lr.Max != 0 { + hdrval += fmt.Sprintf("; max=%d", lr.Max) + if lr.Descending { + hdrval += ", " + } + } + if lr.Descending { + hdrval += ", order=desc" + } + req.Header.Set("Range", hdrval) + return +} + +// Bool allocates a new int value returns a pointer to it. +func Bool(v bool) *bool { + p := new(bool) + *p = v + return p +} + +// Int allocates a new int value returns a pointer to it. +func Int(v int) *int { + p := new(int) + *p = v + return p +} + +// Float64 allocates a new float64 value returns a pointer to it. +func Float64(v float64) *float64 { + p := new(float64) + *p = v + return p +} + +// String allocates a new string value returns a pointer to it. +func String(v string) *string { + p := new(string) + *p = v + return p +} + +// An account represents an individual signed up to use the Heroku +// platform. +type Account struct { + AllowTracking bool `json:"allow_tracking"` // whether to allow third party web activity tracking + Beta bool `json:"beta"` // whether allowed to utilize beta Heroku features + CreatedAt time.Time `json:"created_at"` // when account was created + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + LastLogin time.Time `json:"last_login"` // when account last authorized with Heroku + Name *string `json:"name"` // full name of the account owner + UpdatedAt time.Time `json:"updated_at"` // when account was updated + Verified bool `json:"verified"` // whether account has been verified with billing information +} + +// Info for account. +func (s *Service) AccountInfo() (*Account, error) { + var account Account + return &account, s.Get(&account, fmt.Sprintf("/account"), nil) +} + +type AccountUpdateOpts struct { + AllowTracking *bool `json:"allow_tracking,omitempty"` // whether to allow third party web activity tracking + Beta *bool `json:"beta,omitempty"` // whether allowed to utilize beta Heroku features + Name *string `json:"name,omitempty"` // full name of the account owner + Password string `json:"password"` // current password on the account +} + +// Update account. +func (s *Service) AccountUpdate(o struct { + AllowTracking *bool `json:"allow_tracking,omitempty"` // whether to allow third party web activity tracking + Beta *bool `json:"beta,omitempty"` // whether allowed to utilize beta Heroku features + Name *string `json:"name,omitempty"` // full name of the account owner + Password string `json:"password"` // current password on the account +}) (*Account, error) { + var account Account + return &account, s.Patch(&account, fmt.Sprintf("/account"), o) +} + +type AccountChangeEmailOpts struct { + Email string `json:"email"` // unique email address of account + Password string `json:"password"` // current password on the account +} + +// Change Email for account. +func (s *Service) AccountChangeEmail(o struct { + Email string `json:"email"` // unique email address of account + Password string `json:"password"` // current password on the account +}) (*Account, error) { + var account Account + return &account, s.Patch(&account, fmt.Sprintf("/account"), o) +} + +type AccountChangePasswordOpts struct { + NewPassword string `json:"new_password"` // the new password for the account when changing the password + Password string `json:"password"` // current password on the account +} + +// Change Password for account. +func (s *Service) AccountChangePassword(o struct { + NewPassword string `json:"new_password"` // the new password for the account when changing the password + Password string `json:"password"` // current password on the account +}) (*Account, error) { + var account Account + return &account, s.Patch(&account, fmt.Sprintf("/account"), o) +} + +// An account feature represents a Heroku labs capability that can be +// enabled or disabled for an account on Heroku. +type AccountFeature struct { + CreatedAt time.Time `json:"created_at"` // when account feature was created + Description string `json:"description"` // description of account feature + DocURL string `json:"doc_url"` // documentation URL of account feature + Enabled bool `json:"enabled"` // whether or not account feature has been enabled + ID string `json:"id"` // unique identifier of account feature + Name string `json:"name"` // unique name of account feature + State string `json:"state"` // state of account feature + UpdatedAt time.Time `json:"updated_at"` // when account feature was updated +} + +// Info for an existing account feature. +func (s *Service) AccountFeatureInfo(accountFeatureIdentity string) (*AccountFeature, error) { + var accountFeature AccountFeature + return &accountFeature, s.Get(&accountFeature, fmt.Sprintf("/account/features/%v", accountFeatureIdentity), nil) +} + +// List existing account features. +func (s *Service) AccountFeatureList(lr *ListRange) ([]*AccountFeature, error) { + var accountFeatureList []*AccountFeature + return accountFeatureList, s.Get(&accountFeatureList, fmt.Sprintf("/account/features"), lr) +} + +type AccountFeatureUpdateOpts struct { + Enabled bool `json:"enabled"` // whether or not account feature has been enabled +} + +// Update an existing account feature. +func (s *Service) AccountFeatureUpdate(accountFeatureIdentity string, o struct { + Enabled bool `json:"enabled"` // whether or not account feature has been enabled +}) (*AccountFeature, error) { + var accountFeature AccountFeature + return &accountFeature, s.Patch(&accountFeature, fmt.Sprintf("/account/features/%v", accountFeatureIdentity), o) +} + +// Add-ons represent add-ons that have been provisioned for an app. +type Addon struct { + AddonService struct { + ID string `json:"id"` // unique identifier of this addon-service + Name string `json:"name"` // unique name of this addon-service + } `json:"addon_service"` // identity of add-on service + ConfigVars []string `json:"config_vars"` // config vars associated with this application + CreatedAt time.Time `json:"created_at"` // when add-on was updated + ID string `json:"id"` // unique identifier of add-on + Name string `json:"name"` // name of the add-on unique within its app + Plan struct { + ID string `json:"id"` // unique identifier of this plan + Name string `json:"name"` // unique name of this plan + } `json:"plan"` // identity of add-on plan + ProviderID string `json:"provider_id"` // id of this add-on with its provider + UpdatedAt time.Time `json:"updated_at"` // when add-on was updated +} +type AddonCreateOpts struct { + Config *map[string]string `json:"config,omitempty"` // custom add-on provisioning options + Plan string `json:"plan"` // unique identifier of this plan +} + +// Create a new add-on. +func (s *Service) AddonCreate(appIdentity string, o struct { + Config *map[string]string `json:"config,omitempty"` // custom add-on provisioning options + Plan string `json:"plan"` // unique identifier of this plan +}) (*Addon, error) { + var addon Addon + return &addon, s.Post(&addon, fmt.Sprintf("/apps/%v/addons", appIdentity), o) +} + +// Delete an existing add-on. +func (s *Service) AddonDelete(appIdentity string, addonIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v/addons/%v", appIdentity, addonIdentity)) +} + +// Info for an existing add-on. +func (s *Service) AddonInfo(appIdentity string, addonIdentity string) (*Addon, error) { + var addon Addon + return &addon, s.Get(&addon, fmt.Sprintf("/apps/%v/addons/%v", appIdentity, addonIdentity), nil) +} + +// List existing add-ons. +func (s *Service) AddonList(appIdentity string, lr *ListRange) ([]*Addon, error) { + var addonList []*Addon + return addonList, s.Get(&addonList, fmt.Sprintf("/apps/%v/addons", appIdentity), lr) +} + +type AddonUpdateOpts struct { + Plan string `json:"plan"` // unique identifier of this plan +} + +// Change add-on plan. Some add-ons may not support changing plans. In +// that case, an error will be returned. +func (s *Service) AddonUpdate(appIdentity string, addonIdentity string, o struct { + Plan string `json:"plan"` // unique identifier of this plan +}) (*Addon, error) { + var addon Addon + return &addon, s.Patch(&addon, fmt.Sprintf("/apps/%v/addons/%v", appIdentity, addonIdentity), o) +} + +// Add-on services represent add-ons that may be provisioned for apps. +// Endpoints under add-on services can be accessed without +// authentication. +type AddonService struct { + CreatedAt time.Time `json:"created_at"` // when addon-service was created + ID string `json:"id"` // unique identifier of this addon-service + Name string `json:"name"` // unique name of this addon-service + UpdatedAt time.Time `json:"updated_at"` // when addon-service was updated +} + +// Info for existing addon-service. +func (s *Service) AddonServiceInfo(addonServiceIdentity string) (*AddonService, error) { + var addonService AddonService + return &addonService, s.Get(&addonService, fmt.Sprintf("/addon-services/%v", addonServiceIdentity), nil) +} + +// List existing addon-services. +func (s *Service) AddonServiceList(lr *ListRange) ([]*AddonService, error) { + var addonServiceList []*AddonService + return addonServiceList, s.Get(&addonServiceList, fmt.Sprintf("/addon-services"), lr) +} + +// An app represents the program that you would like to deploy and run +// on Heroku. +type App struct { + ArchivedAt *time.Time `json:"archived_at"` // when app was archived + BuildpackProvidedDescription *string `json:"buildpack_provided_description"` // description from buildpack of app + CreatedAt time.Time `json:"created_at"` // when app was created + GitURL string `json:"git_url"` // git repo URL of app + ID string `json:"id"` // unique identifier of app + Maintenance bool `json:"maintenance"` // maintenance status of app + Name string `json:"name"` // unique name of app + Owner struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"owner"` // identity of app owner + Region struct { + ID string `json:"id"` // unique identifier of region + Name string `json:"name"` // unique name of region + } `json:"region"` // identity of app region + ReleasedAt *time.Time `json:"released_at"` // when app was released + RepoSize *int `json:"repo_size"` // git repo size in bytes of app + SlugSize *int `json:"slug_size"` // slug size in bytes of app + Stack struct { + ID string `json:"id"` // unique identifier of stack + Name string `json:"name"` // unique name of stack + } `json:"stack"` // identity of app stack + UpdatedAt time.Time `json:"updated_at"` // when app was updated + WebURL string `json:"web_url"` // web URL of app +} +type AppCreateOpts struct { + Name *string `json:"name,omitempty"` // unique name of app + Region *string `json:"region,omitempty"` // unique identifier of region + Stack *string `json:"stack,omitempty"` // unique name of stack +} + +// Create a new app. +func (s *Service) AppCreate(o struct { + Name *string `json:"name,omitempty"` // unique name of app + Region *string `json:"region,omitempty"` // unique identifier of region + Stack *string `json:"stack,omitempty"` // unique name of stack +}) (*App, error) { + var app App + return &app, s.Post(&app, fmt.Sprintf("/apps"), o) +} + +// Delete an existing app. +func (s *Service) AppDelete(appIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v", appIdentity)) +} + +// Info for existing app. +func (s *Service) AppInfo(appIdentity string) (*App, error) { + var app App + return &app, s.Get(&app, fmt.Sprintf("/apps/%v", appIdentity), nil) +} + +// List existing apps. +func (s *Service) AppList(lr *ListRange) ([]*App, error) { + var appList []*App + return appList, s.Get(&appList, fmt.Sprintf("/apps"), lr) +} + +type AppUpdateOpts struct { + Maintenance *bool `json:"maintenance,omitempty"` // maintenance status of app + Name *string `json:"name,omitempty"` // unique name of app +} + +// Update an existing app. +func (s *Service) AppUpdate(appIdentity string, o struct { + Maintenance *bool `json:"maintenance,omitempty"` // maintenance status of app + Name *string `json:"name,omitempty"` // unique name of app +}) (*App, error) { + var app App + return &app, s.Patch(&app, fmt.Sprintf("/apps/%v", appIdentity), o) +} + +// An app feature represents a Heroku labs capability that can be +// enabled or disabled for an app on Heroku. +type AppFeature struct { + CreatedAt time.Time `json:"created_at"` // when app feature was created + Description string `json:"description"` // description of app feature + DocURL string `json:"doc_url"` // documentation URL of app feature + Enabled bool `json:"enabled"` // whether or not app feature has been enabled + ID string `json:"id"` // unique identifier of app feature + Name string `json:"name"` // unique name of app feature + State string `json:"state"` // state of app feature + UpdatedAt time.Time `json:"updated_at"` // when app feature was updated +} + +// Info for an existing app feature. +func (s *Service) AppFeatureInfo(appIdentity string, appFeatureIdentity string) (*AppFeature, error) { + var appFeature AppFeature + return &appFeature, s.Get(&appFeature, fmt.Sprintf("/apps/%v/features/%v", appIdentity, appFeatureIdentity), nil) +} + +// List existing app features. +func (s *Service) AppFeatureList(appIdentity string, lr *ListRange) ([]*AppFeature, error) { + var appFeatureList []*AppFeature + return appFeatureList, s.Get(&appFeatureList, fmt.Sprintf("/apps/%v/features", appIdentity), lr) +} + +type AppFeatureUpdateOpts struct { + Enabled bool `json:"enabled"` // whether or not app feature has been enabled +} + +// Update an existing app feature. +func (s *Service) AppFeatureUpdate(appIdentity string, appFeatureIdentity string, o struct { + Enabled bool `json:"enabled"` // whether or not app feature has been enabled +}) (*AppFeature, error) { + var appFeature AppFeature + return &appFeature, s.Patch(&appFeature, fmt.Sprintf("/apps/%v/features/%v", appIdentity, appFeatureIdentity), o) +} + +// An app setup represents an app on Heroku that is setup using an +// environment, addons, and scripts described in an app.json manifest +// file. +type AppSetup struct { + App struct { + ID string `json:"id"` // unique identifier of app + Name string `json:"name"` // unique name of app + } `json:"app"` // identity of app + Build struct { + ID string `json:"id"` // unique identifier of build + Status string `json:"status"` // status of build + } `json:"build"` // identity and status of build + CreatedAt time.Time `json:"created_at"` // when app setup was created + FailureMessage *string `json:"failure_message"` // reason that app setup has failed + ID string `json:"id"` // unique identifier of app setup + ManifestErrors []string `json:"manifest_errors"` // errors associated with invalid app.json manifest file + Postdeploy *struct { + ExitCode int `json:"exit_code"` // The exit code of the postdeploy script + Output string `json:"output"` // output of the postdeploy script + } `json:"postdeploy"` // result of postdeploy script + ResolvedSuccessURL *string `json:"resolved_success_url"` // fully qualified success url + Status string `json:"status"` // the overall status of app setup + UpdatedAt time.Time `json:"updated_at"` // when app setup was updated +} +type AppSetupCreateOpts struct { + App *struct { + Locked *bool `json:"locked,omitempty"` // are other organization members forbidden from joining this app. + Name *string `json:"name,omitempty"` // unique name of app + Organization *string `json:"organization,omitempty"` // unique name of organization + Personal *bool `json:"personal,omitempty"` // force creation of the app in the user account even if a default org + // is set. + Region *string `json:"region,omitempty"` // unique name of region + Stack *string `json:"stack,omitempty"` // unique name of stack + } `json:"app,omitempty"` // optional parameters for created app + Overrides *struct { + Env *map[string]string `json:"env,omitempty"` // overrides of the env specified in the app.json manifest file + } `json:"overrides,omitempty"` // overrides of keys in the app.json manifest file + SourceBlob struct { + URL *string `json:"url,omitempty"` // URL of gzipped tarball of source code containing app.json manifest + // file + } `json:"source_blob"` // gzipped tarball of source code containing app.json manifest file +} + +// Create a new app setup from a gzipped tar archive containing an +// app.json manifest file. +func (s *Service) AppSetupCreate(o struct { + App *struct { + Locked *bool `json:"locked,omitempty"` // are other organization members forbidden from joining this app. + Name *string `json:"name,omitempty"` // unique name of app + Organization *string `json:"organization,omitempty"` // unique name of organization + Personal *bool `json:"personal,omitempty"` // force creation of the app in the user account even if a default org + // is set. + Region *string `json:"region,omitempty"` // unique name of region + Stack *string `json:"stack,omitempty"` // unique name of stack + } `json:"app,omitempty"` // optional parameters for created app + Overrides *struct { + Env *map[string]string `json:"env,omitempty"` // overrides of the env specified in the app.json manifest file + } `json:"overrides,omitempty"` // overrides of keys in the app.json manifest file + SourceBlob struct { + URL *string `json:"url,omitempty"` // URL of gzipped tarball of source code containing app.json manifest + // file + } `json:"source_blob"` // gzipped tarball of source code containing app.json manifest file +}) (*AppSetup, error) { + var appSetup AppSetup + return &appSetup, s.Post(&appSetup, fmt.Sprintf("/app-setups"), o) +} + +// Get the status of an app setup. +func (s *Service) AppSetupInfo(appSetupIdentity string) (*AppSetup, error) { + var appSetup AppSetup + return &appSetup, s.Get(&appSetup, fmt.Sprintf("/app-setups/%v", appSetupIdentity), nil) +} + +// An app transfer represents a two party interaction for transferring +// ownership of an app. +type AppTransfer struct { + App struct { + ID string `json:"id"` // unique identifier of app + Name string `json:"name"` // unique name of app + } `json:"app"` // app involved in the transfer + CreatedAt time.Time `json:"created_at"` // when app transfer was created + ID string `json:"id"` // unique identifier of app transfer + Owner struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"owner"` // identity of the owner of the transfer + Recipient struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"recipient"` // identity of the recipient of the transfer + State string `json:"state"` // the current state of an app transfer + UpdatedAt time.Time `json:"updated_at"` // when app transfer was updated +} +type AppTransferCreateOpts struct { + App string `json:"app"` // unique identifier of app + Recipient string `json:"recipient"` // unique email address of account +} + +// Create a new app transfer. +func (s *Service) AppTransferCreate(o struct { + App string `json:"app"` // unique identifier of app + Recipient string `json:"recipient"` // unique email address of account +}) (*AppTransfer, error) { + var appTransfer AppTransfer + return &appTransfer, s.Post(&appTransfer, fmt.Sprintf("/account/app-transfers"), o) +} + +// Delete an existing app transfer +func (s *Service) AppTransferDelete(appTransferIdentity string) error { + return s.Delete(fmt.Sprintf("/account/app-transfers/%v", appTransferIdentity)) +} + +// Info for existing app transfer. +func (s *Service) AppTransferInfo(appTransferIdentity string) (*AppTransfer, error) { + var appTransfer AppTransfer + return &appTransfer, s.Get(&appTransfer, fmt.Sprintf("/account/app-transfers/%v", appTransferIdentity), nil) +} + +// List existing apps transfers. +func (s *Service) AppTransferList(lr *ListRange) ([]*AppTransfer, error) { + var appTransferList []*AppTransfer + return appTransferList, s.Get(&appTransferList, fmt.Sprintf("/account/app-transfers"), lr) +} + +type AppTransferUpdateOpts struct { + State string `json:"state"` // the current state of an app transfer +} + +// Update an existing app transfer. +func (s *Service) AppTransferUpdate(appTransferIdentity string, o struct { + State string `json:"state"` // the current state of an app transfer +}) (*AppTransfer, error) { + var appTransfer AppTransfer + return &appTransfer, s.Patch(&appTransfer, fmt.Sprintf("/account/app-transfers/%v", appTransferIdentity), o) +} + +// A build represents the process of transforming a code tarball into a +// slug +type Build struct { + CreatedAt time.Time `json:"created_at"` // when build was created + ID string `json:"id"` // unique identifier of build + Slug *struct { + ID string `json:"id"` // unique identifier of slug + } `json:"slug"` // slug created by this build + SourceBlob struct { + URL string `json:"url"` // URL where gzipped tar archive of source code for build was + // downloaded. + Version *string `json:"version"` // Version of the gzipped tarball. + } `json:"source_blob"` // location of gzipped tarball of source code used to create build + Status string `json:"status"` // status of build + UpdatedAt time.Time `json:"updated_at"` // when build was updated + User struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"user"` // user that started the build +} +type BuildCreateOpts struct { + SourceBlob struct { + URL *string `json:"url,omitempty"` // URL where gzipped tar archive of source code for build was + // downloaded. + Version *string `json:"version,omitempty"` // Version of the gzipped tarball. + } `json:"source_blob"` // location of gzipped tarball of source code used to create build +} + +// Create a new build. +func (s *Service) BuildCreate(appIdentity string, o struct { + SourceBlob struct { + URL *string `json:"url,omitempty"` // URL where gzipped tar archive of source code for build was + // downloaded. + Version *string `json:"version,omitempty"` // Version of the gzipped tarball. + } `json:"source_blob"` // location of gzipped tarball of source code used to create build +}) (*Build, error) { + var build Build + return &build, s.Post(&build, fmt.Sprintf("/apps/%v/builds", appIdentity), o) +} + +// Info for existing build. +func (s *Service) BuildInfo(appIdentity string, buildIdentity string) (*Build, error) { + var build Build + return &build, s.Get(&build, fmt.Sprintf("/apps/%v/builds/%v", appIdentity, buildIdentity), nil) +} + +// List existing build. +func (s *Service) BuildList(appIdentity string, lr *ListRange) ([]*Build, error) { + var buildList []*Build + return buildList, s.Get(&buildList, fmt.Sprintf("/apps/%v/builds", appIdentity), lr) +} + +// A build result contains the output from a build. +type BuildResult struct { + Build struct { + ID string `json:"id"` // unique identifier of build + Status string `json:"status"` // status of build + } `json:"build"` // identity of build + ExitCode float64 `json:"exit_code"` // status from the build + Lines []struct { + Line string `json:"line"` // A line of output from the build. + Stream string `json:"stream"` // The output stream where the line was sent. + } `json:"lines"` // A list of all the lines of a build's output. +} + +// Info for existing result. +func (s *Service) BuildResultInfo(appIdentity string, buildIdentity string) (*BuildResult, error) { + var buildResult BuildResult + return &buildResult, s.Get(&buildResult, fmt.Sprintf("/apps/%v/builds/%v/result", appIdentity, buildIdentity), nil) +} + +// A collaborator represents an account that has been given access to an +// app on Heroku. +type Collaborator struct { + CreatedAt time.Time `json:"created_at"` // when collaborator was created + ID string `json:"id"` // unique identifier of collaborator + UpdatedAt time.Time `json:"updated_at"` // when collaborator was updated + User struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"user"` // identity of collaborated account +} +type CollaboratorCreateOpts struct { + Silent *bool `json:"silent,omitempty"` // whether to suppress email invitation when creating collaborator + User string `json:"user"` // unique email address of account +} + +// Create a new collaborator. +func (s *Service) CollaboratorCreate(appIdentity string, o struct { + Silent *bool `json:"silent,omitempty"` // whether to suppress email invitation when creating collaborator + User string `json:"user"` // unique email address of account +}) (*Collaborator, error) { + var collaborator Collaborator + return &collaborator, s.Post(&collaborator, fmt.Sprintf("/apps/%v/collaborators", appIdentity), o) +} + +// Delete an existing collaborator. +func (s *Service) CollaboratorDelete(appIdentity string, collaboratorIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v/collaborators/%v", appIdentity, collaboratorIdentity)) +} + +// Info for existing collaborator. +func (s *Service) CollaboratorInfo(appIdentity string, collaboratorIdentity string) (*Collaborator, error) { + var collaborator Collaborator + return &collaborator, s.Get(&collaborator, fmt.Sprintf("/apps/%v/collaborators/%v", appIdentity, collaboratorIdentity), nil) +} + +// List existing collaborators. +func (s *Service) CollaboratorList(appIdentity string, lr *ListRange) ([]*Collaborator, error) { + var collaboratorList []*Collaborator + return collaboratorList, s.Get(&collaboratorList, fmt.Sprintf("/apps/%v/collaborators", appIdentity), lr) +} + +// Config Vars allow you to manage the configuration information +// provided to an app on Heroku. +type ConfigVar map[string]string + +// Get config-vars for app. +func (s *Service) ConfigVarInfo(appIdentity string) (map[string]string, error) { + var configVar ConfigVar + return configVar, s.Get(&configVar, fmt.Sprintf("/apps/%v/config-vars", appIdentity), nil) +} + +type ConfigVarUpdateOpts map[string]*string + +// Update config-vars for app. You can update existing config-vars by +// setting them again, and remove by setting it to `NULL`. +func (s *Service) ConfigVarUpdate(appIdentity string, o map[string]*string) (map[string]string, error) { + var configVar ConfigVar + return configVar, s.Patch(&configVar, fmt.Sprintf("/apps/%v/config-vars", appIdentity), o) +} + +// A credit represents value that will be used up before further charges +// are assigned to an account. +type Credit struct { + Amount float64 `json:"amount"` // total value of credit in cents + Balance float64 `json:"balance"` // remaining value of credit in cents + CreatedAt time.Time `json:"created_at"` // when credit was created + ExpiresAt time.Time `json:"expires_at"` // when credit will expire + ID string `json:"id"` // unique identifier of credit + Title string `json:"title"` // a name for credit + UpdatedAt time.Time `json:"updated_at"` // when credit was updated +} + +// Info for existing credit. +func (s *Service) CreditInfo(creditIdentity string) (*Credit, error) { + var credit Credit + return &credit, s.Get(&credit, fmt.Sprintf("/account/credits/%v", creditIdentity), nil) +} + +// List existing credits. +func (s *Service) CreditList(lr *ListRange) ([]*Credit, error) { + var creditList []*Credit + return creditList, s.Get(&creditList, fmt.Sprintf("/account/credits"), lr) +} + +// Domains define what web routes should be routed to an app on Heroku. +type Domain struct { + CreatedAt time.Time `json:"created_at"` // when domain was created + Hostname string `json:"hostname"` // full hostname + ID string `json:"id"` // unique identifier of this domain + UpdatedAt time.Time `json:"updated_at"` // when domain was updated +} +type DomainCreateOpts struct { + Hostname string `json:"hostname"` // full hostname +} + +// Create a new domain. +func (s *Service) DomainCreate(appIdentity string, o struct { + Hostname string `json:"hostname"` // full hostname +}) (*Domain, error) { + var domain Domain + return &domain, s.Post(&domain, fmt.Sprintf("/apps/%v/domains", appIdentity), o) +} + +// Delete an existing domain +func (s *Service) DomainDelete(appIdentity string, domainIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v/domains/%v", appIdentity, domainIdentity)) +} + +// Info for existing domain. +func (s *Service) DomainInfo(appIdentity string, domainIdentity string) (*Domain, error) { + var domain Domain + return &domain, s.Get(&domain, fmt.Sprintf("/apps/%v/domains/%v", appIdentity, domainIdentity), nil) +} + +// List existing domains. +func (s *Service) DomainList(appIdentity string, lr *ListRange) ([]*Domain, error) { + var domainList []*Domain + return domainList, s.Get(&domainList, fmt.Sprintf("/apps/%v/domains", appIdentity), lr) +} + +// Dynos encapsulate running processes of an app on Heroku. +type Dyno struct { + AttachURL *string `json:"attach_url"` // a URL to stream output from for attached processes or null for + // non-attached processes + Command string `json:"command"` // command used to start this process + CreatedAt time.Time `json:"created_at"` // when dyno was created + ID string `json:"id"` // unique identifier of this dyno + Name string `json:"name"` // the name of this process on this dyno + Release struct { + ID string `json:"id"` // unique identifier of release + Version int `json:"version"` // unique version assigned to the release + } `json:"release"` // app release of the dyno + Size string `json:"size"` // dyno size (default: "1X") + State string `json:"state"` // current status of process (either: crashed, down, idle, starting, or + // up) + Type string `json:"type"` // type of process + UpdatedAt time.Time `json:"updated_at"` // when process last changed state +} +type DynoCreateOpts struct { + Attach *bool `json:"attach,omitempty"` // whether to stream output or not + Command string `json:"command"` // command used to start this process + Env *map[string]string `json:"env,omitempty"` // custom environment to add to the dyno config vars + Size *string `json:"size,omitempty"` // dyno size (default: "1X") +} + +// Create a new dyno. +func (s *Service) DynoCreate(appIdentity string, o struct { + Attach *bool `json:"attach,omitempty"` // whether to stream output or not + Command string `json:"command"` // command used to start this process + Env *map[string]string `json:"env,omitempty"` // custom environment to add to the dyno config vars + Size *string `json:"size,omitempty"` // dyno size (default: "1X") +}) (*Dyno, error) { + var dyno Dyno + return &dyno, s.Post(&dyno, fmt.Sprintf("/apps/%v/dynos", appIdentity), o) +} + +// Restart dyno. +func (s *Service) DynoRestart(appIdentity string, dynoIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v/dynos/%v", appIdentity, dynoIdentity)) +} + +// Restart all dynos +func (s *Service) DynoRestartAll(appIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v/dynos", appIdentity)) +} + +// Info for existing dyno. +func (s *Service) DynoInfo(appIdentity string, dynoIdentity string) (*Dyno, error) { + var dyno Dyno + return &dyno, s.Get(&dyno, fmt.Sprintf("/apps/%v/dynos/%v", appIdentity, dynoIdentity), nil) +} + +// List existing dynos. +func (s *Service) DynoList(appIdentity string, lr *ListRange) ([]*Dyno, error) { + var dynoList []*Dyno + return dynoList, s.Get(&dynoList, fmt.Sprintf("/apps/%v/dynos", appIdentity), lr) +} + +// The formation of processes that should be maintained for an app. +// Update the formation to scale processes or change dyno sizes. +// Available process type names and commands are defined by the +// `process_types` attribute for the [slug](#slug) currently released on +// an app. +type Formation struct { + Command string `json:"command"` // command to use to launch this process + CreatedAt time.Time `json:"created_at"` // when process type was created + ID string `json:"id"` // unique identifier of this process type + Quantity int `json:"quantity"` // number of processes to maintain + Size string `json:"size"` // dyno size (default: "1X") + Type string `json:"type"` // type of process to maintain + UpdatedAt time.Time `json:"updated_at"` // when dyno type was updated +} + +// Info for a process type +func (s *Service) FormationInfo(appIdentity string, formationIdentity string) (*Formation, error) { + var formation Formation + return &formation, s.Get(&formation, fmt.Sprintf("/apps/%v/formation/%v", appIdentity, formationIdentity), nil) +} + +// List process type formation +func (s *Service) FormationList(appIdentity string, lr *ListRange) ([]*Formation, error) { + var formationList []*Formation + return formationList, s.Get(&formationList, fmt.Sprintf("/apps/%v/formation", appIdentity), lr) +} + +type FormationBatchUpdateOpts struct { + Updates []struct { + Process string `json:"process"` // unique identifier of this process type + Quantity *int `json:"quantity,omitempty"` // number of processes to maintain + Size *string `json:"size,omitempty"` // dyno size (default: "1X") + } `json:"updates"` // Array with formation updates. Each element must have "process", the + // id or name of the process type to be updated, and can optionally + // update its "quantity" or "size". +} + +// Batch update process types +func (s *Service) FormationBatchUpdate(appIdentity string, o struct { + Updates []struct { + Process string `json:"process"` // unique identifier of this process type + Quantity *int `json:"quantity,omitempty"` // number of processes to maintain + Size *string `json:"size,omitempty"` // dyno size (default: "1X") + } `json:"updates"` // Array with formation updates. Each element must have "process", the + // id or name of the process type to be updated, and can optionally + // update its "quantity" or "size". +}) (*Formation, error) { + var formation Formation + return &formation, s.Patch(&formation, fmt.Sprintf("/apps/%v/formation", appIdentity), o) +} + +type FormationUpdateOpts struct { + Quantity *int `json:"quantity,omitempty"` // number of processes to maintain + Size *string `json:"size,omitempty"` // dyno size (default: "1X") +} + +// Update process type +func (s *Service) FormationUpdate(appIdentity string, formationIdentity string, o struct { + Quantity *int `json:"quantity,omitempty"` // number of processes to maintain + Size *string `json:"size,omitempty"` // dyno size (default: "1X") +}) (*Formation, error) { + var formation Formation + return &formation, s.Patch(&formation, fmt.Sprintf("/apps/%v/formation/%v", appIdentity, formationIdentity), o) +} + +// Keys represent public SSH keys associated with an account and are +// used to authorize accounts as they are performing git operations. +type Key struct { + Comment string `json:"comment"` // comment on the key + CreatedAt time.Time `json:"created_at"` // when key was created + Email string `json:"email"` // deprecated. Please refer to 'comment' instead + Fingerprint string `json:"fingerprint"` // a unique identifying string based on contents + ID string `json:"id"` // unique identifier of this key + PublicKey string `json:"public_key"` // full public_key as uploaded + UpdatedAt time.Time `json:"updated_at"` // when key was updated +} +type KeyCreateOpts struct { + PublicKey string `json:"public_key"` // full public_key as uploaded +} + +// Create a new key. +func (s *Service) KeyCreate(o struct { + PublicKey string `json:"public_key"` // full public_key as uploaded +}) (*Key, error) { + var key Key + return &key, s.Post(&key, fmt.Sprintf("/account/keys"), o) +} + +// Delete an existing key +func (s *Service) KeyDelete(keyIdentity string) error { + return s.Delete(fmt.Sprintf("/account/keys/%v", keyIdentity)) +} + +// Info for existing key. +func (s *Service) KeyInfo(keyIdentity string) (*Key, error) { + var key Key + return &key, s.Get(&key, fmt.Sprintf("/account/keys/%v", keyIdentity), nil) +} + +// List existing keys. +func (s *Service) KeyList(lr *ListRange) ([]*Key, error) { + var keyList []*Key + return keyList, s.Get(&keyList, fmt.Sprintf("/account/keys"), lr) +} + +// [Log +// drains](https://devcenter.heroku.com/articles/logging#syslog-drains) +// provide a way to forward your Heroku logs to an external syslog +// server for long-term archiving. This external service must be +// configured to receive syslog packets from Heroku, whereupon its URL +// can be added to an app using this API. Some addons will add a log +// drain when they are provisioned to an app. These drains can only be +// removed by removing the add-on. +type LogDrain struct { + Addon *struct { + ID string `json:"id"` // unique identifier of add-on + } `json:"addon"` // addon that created the drain + CreatedAt time.Time `json:"created_at"` // when log drain was created + ID string `json:"id"` // unique identifier of this log drain + Token string `json:"token"` // token associated with the log drain + UpdatedAt time.Time `json:"updated_at"` // when log drain was updated + URL string `json:"url"` // url associated with the log drain +} +type LogDrainCreateOpts struct { + URL string `json:"url"` // url associated with the log drain +} + +// Create a new log drain. +func (s *Service) LogDrainCreate(appIdentity string, o struct { + URL string `json:"url"` // url associated with the log drain +}) (*LogDrain, error) { + var logDrain LogDrain + return &logDrain, s.Post(&logDrain, fmt.Sprintf("/apps/%v/log-drains", appIdentity), o) +} + +// Delete an existing log drain. Log drains added by add-ons can only be +// removed by removing the add-on. +func (s *Service) LogDrainDelete(appIdentity string, logDrainIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v/log-drains/%v", appIdentity, logDrainIdentity)) +} + +// Info for existing log drain. +func (s *Service) LogDrainInfo(appIdentity string, logDrainIdentity string) (*LogDrain, error) { + var logDrain LogDrain + return &logDrain, s.Get(&logDrain, fmt.Sprintf("/apps/%v/log-drains/%v", appIdentity, logDrainIdentity), nil) +} + +// List existing log drains. +func (s *Service) LogDrainList(appIdentity string, lr *ListRange) ([]*LogDrain, error) { + var logDrainList []*LogDrain + return logDrainList, s.Get(&logDrainList, fmt.Sprintf("/apps/%v/log-drains", appIdentity), lr) +} + +// A log session is a reference to the http based log stream for an app. +type LogSession struct { + CreatedAt time.Time `json:"created_at"` // when log connection was created + ID string `json:"id"` // unique identifier of this log session + LogplexURL string `json:"logplex_url"` // URL for log streaming session + UpdatedAt time.Time `json:"updated_at"` // when log session was updated +} +type LogSessionCreateOpts struct { + Dyno *string `json:"dyno,omitempty"` // dyno to limit results to + Lines *int `json:"lines,omitempty"` // number of log lines to stream at once + Source *string `json:"source,omitempty"` // log source to limit results to + Tail *bool `json:"tail,omitempty"` // whether to stream ongoing logs +} + +// Create a new log session. +func (s *Service) LogSessionCreate(appIdentity string, o struct { + Dyno *string `json:"dyno,omitempty"` // dyno to limit results to + Lines *int `json:"lines,omitempty"` // number of log lines to stream at once + Source *string `json:"source,omitempty"` // log source to limit results to + Tail *bool `json:"tail,omitempty"` // whether to stream ongoing logs +}) (*LogSession, error) { + var logSession LogSession + return &logSession, s.Post(&logSession, fmt.Sprintf("/apps/%v/log-sessions", appIdentity), o) +} + +// OAuth authorizations represent clients that a Heroku user has +// authorized to automate, customize or extend their usage of the +// platform. For more information please refer to the [Heroku OAuth +// documentation](https://devcenter.heroku.com/articles/oauth) +type OAuthAuthorization struct { + AccessToken *struct { + ExpiresIn *int `json:"expires_in"` // seconds until OAuth token expires; may be `null` for tokens with + // indefinite lifetime + ID string `json:"id"` // unique identifier of OAuth token + Token string `json:"token"` // contents of the token to be used for authorization + } `json:"access_token"` // access token for this authorization + Client *struct { + ID string `json:"id"` // unique identifier of this OAuth client + Name string `json:"name"` // OAuth client name + RedirectURI string `json:"redirect_uri"` // endpoint for redirection after authorization with OAuth client + } `json:"client"` // identifier of the client that obtained this authorization, if any + CreatedAt time.Time `json:"created_at"` // when OAuth authorization was created + Grant *struct { + Code string `json:"code"` // grant code received from OAuth web application authorization + ExpiresIn int `json:"expires_in"` // seconds until OAuth grant expires + ID string `json:"id"` // unique identifier of OAuth grant + } `json:"grant"` // this authorization's grant + ID string `json:"id"` // unique identifier of OAuth authorization + RefreshToken *struct { + ExpiresIn *int `json:"expires_in"` // seconds until OAuth token expires; may be `null` for tokens with + // indefinite lifetime + ID string `json:"id"` // unique identifier of OAuth token + Token string `json:"token"` // contents of the token to be used for authorization + } `json:"refresh_token"` // refresh token for this authorization + Scope []string `json:"scope"` // The scope of access OAuth authorization allows + UpdatedAt time.Time `json:"updated_at"` // when OAuth authorization was updated +} +type OAuthAuthorizationCreateOpts struct { + Client *string `json:"client,omitempty"` // unique identifier of this OAuth client + Description *string `json:"description,omitempty"` // human-friendly description of this OAuth authorization + ExpiresIn *int `json:"expires_in,omitempty"` // seconds until OAuth token expires; may be `null` for tokens with + // indefinite lifetime + Scope []string `json:"scope"` // The scope of access OAuth authorization allows +} + +// Create a new OAuth authorization. +func (s *Service) OAuthAuthorizationCreate(o struct { + Client *string `json:"client,omitempty"` // unique identifier of this OAuth client + Description *string `json:"description,omitempty"` // human-friendly description of this OAuth authorization + ExpiresIn *int `json:"expires_in,omitempty"` // seconds until OAuth token expires; may be `null` for tokens with + // indefinite lifetime + Scope []string `json:"scope"` // The scope of access OAuth authorization allows +}) (*OAuthAuthorization, error) { + var oauthAuthorization OAuthAuthorization + return &oauthAuthorization, s.Post(&oauthAuthorization, fmt.Sprintf("/oauth/authorizations"), o) +} + +// Delete OAuth authorization. +func (s *Service) OAuthAuthorizationDelete(oauthAuthorizationIdentity string) error { + return s.Delete(fmt.Sprintf("/oauth/authorizations/%v", oauthAuthorizationIdentity)) +} + +// Info for an OAuth authorization. +func (s *Service) OAuthAuthorizationInfo(oauthAuthorizationIdentity string) (*OAuthAuthorization, error) { + var oauthAuthorization OAuthAuthorization + return &oauthAuthorization, s.Get(&oauthAuthorization, fmt.Sprintf("/oauth/authorizations/%v", oauthAuthorizationIdentity), nil) +} + +// List OAuth authorizations. +func (s *Service) OAuthAuthorizationList(lr *ListRange) ([]*OAuthAuthorization, error) { + var oauthAuthorizationList []*OAuthAuthorization + return oauthAuthorizationList, s.Get(&oauthAuthorizationList, fmt.Sprintf("/oauth/authorizations"), lr) +} + +// OAuth clients are applications that Heroku users can authorize to +// automate, customize or extend their usage of the platform. For more +// information please refer to the [Heroku OAuth +// documentation](https://devcenter.heroku.com/articles/oauth). +type OAuthClient struct { + CreatedAt time.Time `json:"created_at"` // when OAuth client was created + ID string `json:"id"` // unique identifier of this OAuth client + IgnoresDelinquent *bool `json:"ignores_delinquent"` // whether the client is still operable given a delinquent account + Name string `json:"name"` // OAuth client name + RedirectURI string `json:"redirect_uri"` // endpoint for redirection after authorization with OAuth client + Secret string `json:"secret"` // secret used to obtain OAuth authorizations under this client + UpdatedAt time.Time `json:"updated_at"` // when OAuth client was updated +} +type OAuthClientCreateOpts struct { + Name string `json:"name"` // OAuth client name + RedirectURI string `json:"redirect_uri"` // endpoint for redirection after authorization with OAuth client +} + +// Create a new OAuth client. +func (s *Service) OAuthClientCreate(o struct { + Name string `json:"name"` // OAuth client name + RedirectURI string `json:"redirect_uri"` // endpoint for redirection after authorization with OAuth client +}) (*OAuthClient, error) { + var oauthClient OAuthClient + return &oauthClient, s.Post(&oauthClient, fmt.Sprintf("/oauth/clients"), o) +} + +// Delete OAuth client. +func (s *Service) OAuthClientDelete(oauthClientIdentity string) error { + return s.Delete(fmt.Sprintf("/oauth/clients/%v", oauthClientIdentity)) +} + +// Info for an OAuth client +func (s *Service) OAuthClientInfo(oauthClientIdentity string) (*OAuthClient, error) { + var oauthClient OAuthClient + return &oauthClient, s.Get(&oauthClient, fmt.Sprintf("/oauth/clients/%v", oauthClientIdentity), nil) +} + +// List OAuth clients +func (s *Service) OAuthClientList(lr *ListRange) ([]*OAuthClient, error) { + var oauthClientList []*OAuthClient + return oauthClientList, s.Get(&oauthClientList, fmt.Sprintf("/oauth/clients"), lr) +} + +type OAuthClientUpdateOpts struct { + Name *string `json:"name,omitempty"` // OAuth client name + RedirectURI *string `json:"redirect_uri,omitempty"` // endpoint for redirection after authorization with OAuth client +} + +// Update OAuth client +func (s *Service) OAuthClientUpdate(oauthClientIdentity string, o struct { + Name *string `json:"name,omitempty"` // OAuth client name + RedirectURI *string `json:"redirect_uri,omitempty"` // endpoint for redirection after authorization with OAuth client +}) (*OAuthClient, error) { + var oauthClient OAuthClient + return &oauthClient, s.Patch(&oauthClient, fmt.Sprintf("/oauth/clients/%v", oauthClientIdentity), o) +} + +// OAuth grants are used to obtain authorizations on behalf of a user. +// For more information please refer to the [Heroku OAuth +// documentation](https://devcenter.heroku.com/articles/oauth) +type OAuthGrant struct{} + +// OAuth tokens provide access for authorized clients to act on behalf +// of a Heroku user to automate, customize or extend their usage of the +// platform. For more information please refer to the [Heroku OAuth +// documentation](https://devcenter.heroku.com/articles/oauth) +type OAuthToken struct { + AccessToken struct { + ExpiresIn *int `json:"expires_in"` // seconds until OAuth token expires; may be `null` for tokens with + // indefinite lifetime + ID string `json:"id"` // unique identifier of OAuth token + Token string `json:"token"` // contents of the token to be used for authorization + } `json:"access_token"` // current access token + Authorization struct { + ID string `json:"id"` // unique identifier of OAuth authorization + } `json:"authorization"` // authorization for this set of tokens + Client *struct { + Secret string `json:"secret"` // secret used to obtain OAuth authorizations under this client + } `json:"client"` // OAuth client secret used to obtain token + CreatedAt time.Time `json:"created_at"` // when OAuth token was created + Grant struct { + Code string `json:"code"` // grant code received from OAuth web application authorization + Type string `json:"type"` // type of grant requested, one of `authorization_code` or + // `refresh_token` + } `json:"grant"` // grant used on the underlying authorization + ID string `json:"id"` // unique identifier of OAuth token + RefreshToken struct { + ExpiresIn *int `json:"expires_in"` // seconds until OAuth token expires; may be `null` for tokens with + // indefinite lifetime + ID string `json:"id"` // unique identifier of OAuth token + Token string `json:"token"` // contents of the token to be used for authorization + } `json:"refresh_token"` // refresh token for this authorization + Session struct { + ID string `json:"id"` // unique identifier of OAuth token + } `json:"session"` // OAuth session using this token + UpdatedAt time.Time `json:"updated_at"` // when OAuth token was updated + User struct { + ID string `json:"id"` // unique identifier of an account + } `json:"user"` // Reference to the user associated with this token +} +type OAuthTokenCreateOpts struct { + Client struct { + Secret *string `json:"secret,omitempty"` // secret used to obtain OAuth authorizations under this client + } `json:"client"` + Grant struct { + Code *string `json:"code,omitempty"` // grant code received from OAuth web application authorization + Type *string `json:"type,omitempty"` // type of grant requested, one of `authorization_code` or + // `refresh_token` + } `json:"grant"` + RefreshToken struct { + Token *string `json:"token,omitempty"` // contents of the token to be used for authorization + } `json:"refresh_token"` +} + +// Create a new OAuth token. +func (s *Service) OAuthTokenCreate(o struct { + Client struct { + Secret *string `json:"secret,omitempty"` // secret used to obtain OAuth authorizations under this client + } `json:"client"` + Grant struct { + Code *string `json:"code,omitempty"` // grant code received from OAuth web application authorization + Type *string `json:"type,omitempty"` // type of grant requested, one of `authorization_code` or + // `refresh_token` + } `json:"grant"` + RefreshToken struct { + Token *string `json:"token,omitempty"` // contents of the token to be used for authorization + } `json:"refresh_token"` +}) (*OAuthToken, error) { + var oauthToken OAuthToken + return &oauthToken, s.Post(&oauthToken, fmt.Sprintf("/oauth/tokens"), o) +} + +// Organizations allow you to manage access to a shared group of +// applications across your development team. +type Organization struct { + CreditCardCollections bool `json:"credit_card_collections"` // whether charges incurred by the org are paid by credit card. + Default bool `json:"default"` // whether to use this organization when none is specified + Name string `json:"name"` // unique name of organization + ProvisionedLicenses bool `json:"provisioned_licenses"` // whether the org is provisioned licenses by salesforce. + Role string `json:"role"` // role in the organization +} + +// List organizations in which you are a member. +func (s *Service) OrganizationList(lr *ListRange) ([]*Organization, error) { + var organizationList []*Organization + return organizationList, s.Get(&organizationList, fmt.Sprintf("/organizations"), lr) +} + +type OrganizationUpdateOpts struct { + Default *bool `json:"default,omitempty"` // whether to use this organization when none is specified +} + +// Set or unset the organization as your default organization. +func (s *Service) OrganizationUpdate(organizationIdentity string, o struct { + Default *bool `json:"default,omitempty"` // whether to use this organization when none is specified +}) (*Organization, error) { + var organization Organization + return &organization, s.Patch(&organization, fmt.Sprintf("/organizations/%v", organizationIdentity), o) +} + +// An organization app encapsulates the organization specific +// functionality of Heroku apps. +type OrganizationApp struct { + ArchivedAt *time.Time `json:"archived_at"` // when app was archived + BuildpackProvidedDescription *string `json:"buildpack_provided_description"` // description from buildpack of app + CreatedAt time.Time `json:"created_at"` // when app was created + GitURL string `json:"git_url"` // git repo URL of app + ID string `json:"id"` // unique identifier of app + Joined bool `json:"joined"` // is the current member a collaborator on this app. + Locked bool `json:"locked"` // are other organization members forbidden from joining this app. + Maintenance bool `json:"maintenance"` // maintenance status of app + Name string `json:"name"` // unique name of app + Organization *struct { + Name string `json:"name"` // unique name of organization + } `json:"organization"` // organization that owns this app + Owner *struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"owner"` // identity of app owner + Region struct { + ID string `json:"id"` // unique identifier of region + Name string `json:"name"` // unique name of region + } `json:"region"` // identity of app region + ReleasedAt *time.Time `json:"released_at"` // when app was released + RepoSize *int `json:"repo_size"` // git repo size in bytes of app + SlugSize *int `json:"slug_size"` // slug size in bytes of app + Stack struct { + ID string `json:"id"` // unique identifier of stack + Name string `json:"name"` // unique name of stack + } `json:"stack"` // identity of app stack + UpdatedAt time.Time `json:"updated_at"` // when app was updated + WebURL string `json:"web_url"` // web URL of app +} +type OrganizationAppCreateOpts struct { + Locked *bool `json:"locked,omitempty"` // are other organization members forbidden from joining this app. + Name *string `json:"name,omitempty"` // unique name of app + Organization *string `json:"organization,omitempty"` // unique name of organization + Personal *bool `json:"personal,omitempty"` // force creation of the app in the user account even if a default org + // is set. + Region *string `json:"region,omitempty"` // unique name of region + Stack *string `json:"stack,omitempty"` // unique name of stack +} + +// Create a new app in the specified organization, in the default +// organization if unspecified, or in personal account, if default +// organization is not set. +func (s *Service) OrganizationAppCreate(o struct { + Locked *bool `json:"locked,omitempty"` // are other organization members forbidden from joining this app. + Name *string `json:"name,omitempty"` // unique name of app + Organization *string `json:"organization,omitempty"` // unique name of organization + Personal *bool `json:"personal,omitempty"` // force creation of the app in the user account even if a default org + // is set. + Region *string `json:"region,omitempty"` // unique name of region + Stack *string `json:"stack,omitempty"` // unique name of stack +}) (*OrganizationApp, error) { + var organizationApp OrganizationApp + return &organizationApp, s.Post(&organizationApp, fmt.Sprintf("/organizations/apps"), o) +} + +// List apps in the default organization, or in personal account, if +// default organization is not set. +func (s *Service) OrganizationAppList(lr *ListRange) ([]*OrganizationApp, error) { + var organizationAppList []*OrganizationApp + return organizationAppList, s.Get(&organizationAppList, fmt.Sprintf("/organizations/apps"), lr) +} + +// List organization apps. +func (s *Service) OrganizationAppListForOrganization(organizationIdentity string, lr *ListRange) ([]*OrganizationApp, error) { + var organizationAppList []*OrganizationApp + return organizationAppList, s.Get(&organizationAppList, fmt.Sprintf("/organizations/%v/apps", organizationIdentity), lr) +} + +// Info for an organization app. +func (s *Service) OrganizationAppInfo(organizationAppIdentity string) (*OrganizationApp, error) { + var organizationApp OrganizationApp + return &organizationApp, s.Get(&organizationApp, fmt.Sprintf("/organizations/apps/%v", organizationAppIdentity), nil) +} + +type OrganizationAppUpdateLockedOpts struct { + Locked bool `json:"locked"` // are other organization members forbidden from joining this app. +} + +// Lock or unlock an organization app. +func (s *Service) OrganizationAppUpdateLocked(organizationAppIdentity string, o struct { + Locked bool `json:"locked"` // are other organization members forbidden from joining this app. +}) (*OrganizationApp, error) { + var organizationApp OrganizationApp + return &organizationApp, s.Patch(&organizationApp, fmt.Sprintf("/organizations/apps/%v", organizationAppIdentity), o) +} + +type OrganizationAppTransferToAccountOpts struct { + Owner string `json:"owner"` // unique email address of account +} + +// Transfer an existing organization app to another Heroku account. +func (s *Service) OrganizationAppTransferToAccount(organizationAppIdentity string, o struct { + Owner string `json:"owner"` // unique email address of account +}) (*OrganizationApp, error) { + var organizationApp OrganizationApp + return &organizationApp, s.Patch(&organizationApp, fmt.Sprintf("/organizations/apps/%v", organizationAppIdentity), o) +} + +type OrganizationAppTransferToOrganizationOpts struct { + Owner string `json:"owner"` // unique name of organization +} + +// Transfer an existing organization app to another organization. +func (s *Service) OrganizationAppTransferToOrganization(organizationAppIdentity string, o struct { + Owner string `json:"owner"` // unique name of organization +}) (*OrganizationApp, error) { + var organizationApp OrganizationApp + return &organizationApp, s.Patch(&organizationApp, fmt.Sprintf("/organizations/apps/%v", organizationAppIdentity), o) +} + +// An organization collaborator represents an account that has been +// given access to an organization app on Heroku. +type OrganizationAppCollaborator struct { + CreatedAt time.Time `json:"created_at"` // when collaborator was created + ID string `json:"id"` // unique identifier of collaborator + Role string `json:"role"` // role in the organization + UpdatedAt time.Time `json:"updated_at"` // when collaborator was updated + User struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"user"` // identity of collaborated account +} +type OrganizationAppCollaboratorCreateOpts struct { + Silent *bool `json:"silent,omitempty"` // whether to suppress email invitation when creating collaborator + User string `json:"user"` // unique email address of account +} + +// Create a new collaborator on an organization app. Use this endpoint +// instead of the `/apps/{app_id_or_name}/collaborator` endpoint when +// you want the collaborator to be granted [privileges] +// (https://devcenter.heroku.com/articles/org-users-access#roles) +// according to their role in the organization. +func (s *Service) OrganizationAppCollaboratorCreate(appIdentity string, o struct { + Silent *bool `json:"silent,omitempty"` // whether to suppress email invitation when creating collaborator + User string `json:"user"` // unique email address of account +}) (*OrganizationAppCollaborator, error) { + var organizationAppCollaborator OrganizationAppCollaborator + return &organizationAppCollaborator, s.Post(&organizationAppCollaborator, fmt.Sprintf("/organizations/apps/%v/collaborators", appIdentity), o) +} + +// Delete an existing collaborator from an organization app. +func (s *Service) OrganizationAppCollaboratorDelete(organizationAppIdentity string, organizationAppCollaboratorIdentity string) error { + return s.Delete(fmt.Sprintf("/organizations/apps/%v/collaborators/%v", organizationAppIdentity, organizationAppCollaboratorIdentity)) +} + +// Info for a collaborator on an organization app. +func (s *Service) OrganizationAppCollaboratorInfo(organizationAppIdentity string, organizationAppCollaboratorIdentity string) (*OrganizationAppCollaborator, error) { + var organizationAppCollaborator OrganizationAppCollaborator + return &organizationAppCollaborator, s.Get(&organizationAppCollaborator, fmt.Sprintf("/organizations/apps/%v/collaborators/%v", organizationAppIdentity, organizationAppCollaboratorIdentity), nil) +} + +// List collaborators on an organization app. +func (s *Service) OrganizationAppCollaboratorList(organizationAppIdentity string, lr *ListRange) ([]*OrganizationAppCollaborator, error) { + var organizationAppCollaboratorList []*OrganizationAppCollaborator + return organizationAppCollaboratorList, s.Get(&organizationAppCollaboratorList, fmt.Sprintf("/organizations/apps/%v/collaborators", organizationAppIdentity), lr) +} + +// An organization member is an individual with access to an +// organization. +type OrganizationMember struct { + CreatedAt time.Time `json:"created_at"` // when organization-member was created + Email string `json:"email"` // email address of the organization member + Role string `json:"role"` // role in the organization + UpdatedAt time.Time `json:"updated_at"` // when organization-member was updated +} +type OrganizationMemberCreateOrUpdateOpts struct { + Email string `json:"email"` // email address of the organization member + Role string `json:"role"` // role in the organization +} + +// Create a new organization member, or update their role. +func (s *Service) OrganizationMemberCreateOrUpdate(organizationIdentity string, o struct { + Email string `json:"email"` // email address of the organization member + Role string `json:"role"` // role in the organization +}) (*OrganizationMember, error) { + var organizationMember OrganizationMember + return &organizationMember, s.Put(&organizationMember, fmt.Sprintf("/organizations/%v/members", organizationIdentity), o) +} + +// Remove a member from the organization. +func (s *Service) OrganizationMemberDelete(organizationIdentity string, organizationMemberIdentity string) error { + return s.Delete(fmt.Sprintf("/organizations/%v/members/%v", organizationIdentity, organizationMemberIdentity)) +} + +// List members of the organization. +func (s *Service) OrganizationMemberList(organizationIdentity string, lr *ListRange) ([]*OrganizationMember, error) { + var organizationMemberList []*OrganizationMember + return organizationMemberList, s.Get(&organizationMemberList, fmt.Sprintf("/organizations/%v/members", organizationIdentity), lr) +} + +// Plans represent different configurations of add-ons that may be added +// to apps. Endpoints under add-on services can be accessed without +// authentication. +type Plan struct { + CreatedAt time.Time `json:"created_at"` // when plan was created + Default bool `json:"default"` // whether this plan is the default for its addon service + Description string `json:"description"` // description of plan + ID string `json:"id"` // unique identifier of this plan + Name string `json:"name"` // unique name of this plan + Price struct { + Cents int `json:"cents"` // price in cents per unit of plan + Unit string `json:"unit"` // unit of price for plan + } `json:"price"` // price + State string `json:"state"` // release status for plan + UpdatedAt time.Time `json:"updated_at"` // when plan was updated +} + +// Info for existing plan. +func (s *Service) PlanInfo(addonServiceIdentity string, planIdentity string) (*Plan, error) { + var plan Plan + return &plan, s.Get(&plan, fmt.Sprintf("/addon-services/%v/plans/%v", addonServiceIdentity, planIdentity), nil) +} + +// List existing plans. +func (s *Service) PlanList(addonServiceIdentity string, lr *ListRange) ([]*Plan, error) { + var planList []*Plan + return planList, s.Get(&planList, fmt.Sprintf("/addon-services/%v/plans", addonServiceIdentity), lr) +} + +// Rate Limit represents the number of request tokens each account +// holds. Requests to this endpoint do not count towards the rate limit. +type RateLimit struct { + Remaining int `json:"remaining"` // allowed requests remaining in current interval +} + +// Info for rate limits. +func (s *Service) RateLimitInfo() (*RateLimit, error) { + var rateLimit RateLimit + return &rateLimit, s.Get(&rateLimit, fmt.Sprintf("/account/rate-limits"), nil) +} + +// A region represents a geographic location in which your application +// may run. +type Region struct { + CreatedAt time.Time `json:"created_at"` // when region was created + Description string `json:"description"` // description of region + ID string `json:"id"` // unique identifier of region + Name string `json:"name"` // unique name of region + UpdatedAt time.Time `json:"updated_at"` // when region was updated +} + +// Info for existing region. +func (s *Service) RegionInfo(regionIdentity string) (*Region, error) { + var region Region + return ®ion, s.Get(®ion, fmt.Sprintf("/regions/%v", regionIdentity), nil) +} + +// List existing regions. +func (s *Service) RegionList(lr *ListRange) ([]*Region, error) { + var regionList []*Region + return regionList, s.Get(®ionList, fmt.Sprintf("/regions"), lr) +} + +// A release represents a combination of code, config vars and add-ons +// for an app on Heroku. +type Release struct { + CreatedAt time.Time `json:"created_at"` // when release was created + Description string `json:"description"` // description of changes in this release + ID string `json:"id"` // unique identifier of release + Slug *struct { + ID string `json:"id"` // unique identifier of slug + } `json:"slug"` // slug running in this release + UpdatedAt time.Time `json:"updated_at"` // when release was updated + User struct { + Email string `json:"email"` // unique email address of account + ID string `json:"id"` // unique identifier of an account + } `json:"user"` // user that created the release + Version int `json:"version"` // unique version assigned to the release +} + +// Info for existing release. +func (s *Service) ReleaseInfo(appIdentity string, releaseIdentity string) (*Release, error) { + var release Release + return &release, s.Get(&release, fmt.Sprintf("/apps/%v/releases/%v", appIdentity, releaseIdentity), nil) +} + +// List existing releases. +func (s *Service) ReleaseList(appIdentity string, lr *ListRange) ([]*Release, error) { + var releaseList []*Release + return releaseList, s.Get(&releaseList, fmt.Sprintf("/apps/%v/releases", appIdentity), lr) +} + +type ReleaseCreateOpts struct { + Description *string `json:"description,omitempty"` // description of changes in this release + Slug string `json:"slug"` // unique identifier of slug +} + +// Create new release. The API cannot be used to create releases on +// Bamboo apps. +func (s *Service) ReleaseCreate(appIdentity string, o struct { + Description *string `json:"description,omitempty"` // description of changes in this release + Slug string `json:"slug"` // unique identifier of slug +}) (*Release, error) { + var release Release + return &release, s.Post(&release, fmt.Sprintf("/apps/%v/releases", appIdentity), o) +} + +type ReleaseRollbackOpts struct { + Release string `json:"release"` // unique identifier of release +} + +// Rollback to an existing release. +func (s *Service) ReleaseRollback(appIdentity string, o struct { + Release string `json:"release"` // unique identifier of release +}) (*Release, error) { + var release Release + return &release, s.Post(&release, fmt.Sprintf("/apps/%v/releases", appIdentity), o) +} + +// A slug is a snapshot of your application code that is ready to run on +// the platform. +type Slug struct { + Blob struct { + Method string `json:"method"` // method to be used to interact with the slug blob + URL string `json:"url"` // URL to interact with the slug blob + } `json:"blob"` // pointer to the url where clients can fetch or store the actual + // release binary + BuildpackProvidedDescription *string `json:"buildpack_provided_description"` // description from buildpack of slug + Commit *string `json:"commit"` // identification of the code with your version control system (eg: SHA + // of the git HEAD) + CreatedAt time.Time `json:"created_at"` // when slug was created + ID string `json:"id"` // unique identifier of slug + ProcessTypes map[string]string `json:"process_types"` // hash mapping process type names to their respective command + Size *int `json:"size"` // size of slug, in bytes + UpdatedAt time.Time `json:"updated_at"` // when slug was updated +} + +// Info for existing slug. +func (s *Service) SlugInfo(appIdentity string, slugIdentity string) (*Slug, error) { + var slug Slug + return &slug, s.Get(&slug, fmt.Sprintf("/apps/%v/slugs/%v", appIdentity, slugIdentity), nil) +} + +type SlugCreateOpts struct { + BuildpackProvidedDescription *string `json:"buildpack_provided_description,omitempty"` // description from buildpack of slug + Commit *string `json:"commit,omitempty"` // identification of the code with your version control system (eg: SHA + // of the git HEAD) + ProcessTypes map[string]string `json:"process_types"` // hash mapping process type names to their respective command +} + +// Create a new slug. For more information please refer to [Deploying +// Slugs using the Platform +// API](https://devcenter.heroku.com/articles/platform-api-deploying-slug +// s?preview=1). +func (s *Service) SlugCreate(appIdentity string, o struct { + BuildpackProvidedDescription *string `json:"buildpack_provided_description,omitempty"` // description from buildpack of slug + Commit *string `json:"commit,omitempty"` // identification of the code with your version control system (eg: SHA + // of the git HEAD) + ProcessTypes map[string]string `json:"process_types"` // hash mapping process type names to their respective command +}) (*Slug, error) { + var slug Slug + return &slug, s.Post(&slug, fmt.Sprintf("/apps/%v/slugs", appIdentity), o) +} + +// [SSL Endpoint](https://devcenter.heroku.com/articles/ssl-endpoint) is +// a public address serving custom SSL cert for HTTPS traffic to a +// Heroku app. Note that an app must have the `ssl:endpoint` addon +// installed before it can provision an SSL Endpoint using these APIs. +type SSLEndpoint struct { + CertificateChain string `json:"certificate_chain"` // raw contents of the public certificate chain (eg: .crt or .pem file) + CName string `json:"cname"` // canonical name record, the address to point a domain at + CreatedAt time.Time `json:"created_at"` // when endpoint was created + ID string `json:"id"` // unique identifier of this SSL endpoint + Name string `json:"name"` // unique name for SSL endpoint + UpdatedAt time.Time `json:"updated_at"` // when endpoint was updated +} +type SSLEndpointCreateOpts struct { + CertificateChain string `json:"certificate_chain"` // raw contents of the public certificate chain (eg: .crt or .pem file) + Preprocess *bool `json:"preprocess,omitempty"` // allow Heroku to modify an uploaded public certificate chain if deemed + // advantageous by adding missing intermediaries, stripping unnecessary + // ones, etc. + PrivateKey string `json:"private_key"` // contents of the private key (eg .key file) +} + +// Create a new SSL endpoint. +func (s *Service) SSLEndpointCreate(appIdentity string, o struct { + CertificateChain string `json:"certificate_chain"` // raw contents of the public certificate chain (eg: .crt or .pem file) + Preprocess *bool `json:"preprocess,omitempty"` // allow Heroku to modify an uploaded public certificate chain if deemed + // advantageous by adding missing intermediaries, stripping unnecessary + // ones, etc. + PrivateKey string `json:"private_key"` // contents of the private key (eg .key file) +}) (*SSLEndpoint, error) { + var sslEndpoint SSLEndpoint + return &sslEndpoint, s.Post(&sslEndpoint, fmt.Sprintf("/apps/%v/ssl-endpoints", appIdentity), o) +} + +// Delete existing SSL endpoint. +func (s *Service) SSLEndpointDelete(appIdentity string, sslEndpointIdentity string) error { + return s.Delete(fmt.Sprintf("/apps/%v/ssl-endpoints/%v", appIdentity, sslEndpointIdentity)) +} + +// Info for existing SSL endpoint. +func (s *Service) SSLEndpointInfo(appIdentity string, sslEndpointIdentity string) (*SSLEndpoint, error) { + var sslEndpoint SSLEndpoint + return &sslEndpoint, s.Get(&sslEndpoint, fmt.Sprintf("/apps/%v/ssl-endpoints/%v", appIdentity, sslEndpointIdentity), nil) +} + +// List existing SSL endpoints. +func (s *Service) SSLEndpointList(appIdentity string, lr *ListRange) ([]*SSLEndpoint, error) { + var sslEndpointList []*SSLEndpoint + return sslEndpointList, s.Get(&sslEndpointList, fmt.Sprintf("/apps/%v/ssl-endpoints", appIdentity), lr) +} + +type SSLEndpointUpdateOpts struct { + CertificateChain *string `json:"certificate_chain,omitempty"` // raw contents of the public certificate chain (eg: .crt or .pem file) + Preprocess *bool `json:"preprocess,omitempty"` // allow Heroku to modify an uploaded public certificate chain if deemed + // advantageous by adding missing intermediaries, stripping unnecessary + // ones, etc. + PrivateKey *string `json:"private_key,omitempty"` // contents of the private key (eg .key file) + Rollback *bool `json:"rollback,omitempty"` // indicates that a rollback should be performed +} + +// Update an existing SSL endpoint. +func (s *Service) SSLEndpointUpdate(appIdentity string, sslEndpointIdentity string, o struct { + CertificateChain *string `json:"certificate_chain,omitempty"` // raw contents of the public certificate chain (eg: .crt or .pem file) + Preprocess *bool `json:"preprocess,omitempty"` // allow Heroku to modify an uploaded public certificate chain if deemed + // advantageous by adding missing intermediaries, stripping unnecessary + // ones, etc. + PrivateKey *string `json:"private_key,omitempty"` // contents of the private key (eg .key file) + Rollback *bool `json:"rollback,omitempty"` // indicates that a rollback should be performed +}) (*SSLEndpoint, error) { + var sslEndpoint SSLEndpoint + return &sslEndpoint, s.Patch(&sslEndpoint, fmt.Sprintf("/apps/%v/ssl-endpoints/%v", appIdentity, sslEndpointIdentity), o) +} + +// Stacks are the different application execution environments available +// in the Heroku platform. +type Stack struct { + CreatedAt time.Time `json:"created_at"` // when stack was introduced + ID string `json:"id"` // unique identifier of stack + Name string `json:"name"` // unique name of stack + State string `json:"state"` // availability of this stack: beta, deprecated or public + UpdatedAt time.Time `json:"updated_at"` // when stack was last modified +} + +// Stack info. +func (s *Service) StackInfo(stackIdentity string) (*Stack, error) { + var stack Stack + return &stack, s.Get(&stack, fmt.Sprintf("/stacks/%v", stackIdentity), nil) +} + +// List available stacks. +func (s *Service) StackList(lr *ListRange) ([]*Stack, error) { + var stackList []*Stack + return stackList, s.Get(&stackList, fmt.Sprintf("/stacks"), lr) +} + diff --git a/vendor/github.com/cyberdelia/heroku-go/v3/schema.json b/vendor/github.com/cyberdelia/heroku-go/v3/schema.json new file mode 100644 index 000000000..40fac7895 --- /dev/null +++ b/vendor/github.com/cyberdelia/heroku-go/v3/schema.json @@ -0,0 +1,5864 @@ +{ + "$schema": "http://interagent.github.io/interagent-hyper-schema", + "definitions": { + "account-feature": { + "description": "An account feature represents a Heroku labs capability that can be enabled or disabled for an account on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Account Feature", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when account feature was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "description": { + "description": "description of account feature", + "example": "Causes account to example.", + "readOnly": true, + "type": [ + "string" + ] + }, + "doc_url": { + "description": "documentation URL of account feature", + "example": "http://devcenter.heroku.com/articles/example", + "readOnly": true, + "type": [ + "string" + ] + }, + "enabled": { + "description": "whether or not account feature has been enabled", + "example": true, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "id": { + "description": "unique identifier of account feature", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/account-feature/definitions/id" + }, + { + "$ref": "#/definitions/account-feature/definitions/name" + } + ] + }, + "name": { + "description": "unique name of account feature", + "example": "name", + "readOnly": true, + "type": [ + "string" + ] + }, + "state": { + "description": "state of account feature", + "example": "public", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when account feature was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Info for an existing account feature.", + "href": "/account/features/{(%23%2Fdefinitions%2Faccount-feature%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/account-feature" + }, + "title": "Info" + }, + { + "description": "List existing account features.", + "href": "/account/features", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/account-feature" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Update an existing account feature.", + "href": "/account/features/{(%23%2Fdefinitions%2Faccount-feature%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "enabled": { + "$ref": "#/definitions/account-feature/definitions/enabled" + } + }, + "required": [ + "enabled" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/account-feature" + }, + "title": "Update" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/account-feature/definitions/created_at" + }, + "description": { + "$ref": "#/definitions/account-feature/definitions/description" + }, + "doc_url": { + "$ref": "#/definitions/account-feature/definitions/doc_url" + }, + "enabled": { + "$ref": "#/definitions/account-feature/definitions/enabled" + }, + "id": { + "$ref": "#/definitions/account-feature/definitions/id" + }, + "name": { + "$ref": "#/definitions/account-feature/definitions/name" + }, + "state": { + "$ref": "#/definitions/account-feature/definitions/state" + }, + "updated_at": { + "$ref": "#/definitions/account-feature/definitions/updated_at" + } + } + }, + "account": { + "description": "An account represents an individual signed up to use the Heroku platform.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Account", + "type": [ + "object" + ], + "definitions": { + "allow_tracking": { + "default": true, + "description": "whether to allow third party web activity tracking", + "example": true, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "beta": { + "default": false, + "description": "whether allowed to utilize beta Heroku features", + "example": false, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "created_at": { + "description": "when account was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "email": { + "description": "unique email address of account", + "example": "username@example.com", + "format": "email", + "readOnly": false, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of an account", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/account/definitions/email" + }, + { + "$ref": "#/definitions/account/definitions/id" + } + ] + }, + "last_login": { + "description": "when account last authorized with Heroku", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "name": { + "description": "full name of the account owner", + "example": "Tina Edmonds", + "readOnly": false, + "type": [ + "string", + "null" + ] + }, + "new_password": { + "description": "the new password for the account when changing the password", + "example": "newpassword", + "readOnly": true, + "type": [ + "string" + ] + }, + "password": { + "description": "current password on the account", + "example": "currentpassword", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when account was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "verified": { + "default": false, + "description": "whether account has been verified with billing information", + "example": false, + "readOnly": true, + "type": [ + "boolean" + ] + } + }, + "links": [ + { + "description": "Info for account.", + "href": "/account", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/account" + }, + "title": "Info" + }, + { + "description": "Update account.", + "href": "/account", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "allow_tracking": { + "$ref": "#/definitions/account/definitions/allow_tracking" + }, + "beta": { + "$ref": "#/definitions/account/definitions/beta" + }, + "name": { + "$ref": "#/definitions/account/definitions/name" + }, + "password": { + "$ref": "#/definitions/account/definitions/password" + } + }, + "required": [ + "password" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/account" + }, + "title": "Update" + }, + { + "description": "Change Email for account.", + "href": "/account", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "password": { + "$ref": "#/definitions/account/definitions/password" + } + }, + "required": [ + "password", + "email" + ], + "type": [ + "object" + ] + }, + "title": "Change Email" + }, + { + "description": "Change Password for account.", + "href": "/account", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "new_password": { + "$ref": "#/definitions/account/definitions/new_password" + }, + "password": { + "$ref": "#/definitions/account/definitions/password" + } + }, + "required": [ + "new_password", + "password" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/account" + }, + "title": "Change Password" + } + ], + "properties": { + "allow_tracking": { + "$ref": "#/definitions/account/definitions/allow_tracking" + }, + "beta": { + "$ref": "#/definitions/account/definitions/beta" + }, + "created_at": { + "$ref": "#/definitions/account/definitions/created_at" + }, + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "id": { + "$ref": "#/definitions/account/definitions/id" + }, + "last_login": { + "$ref": "#/definitions/account/definitions/last_login" + }, + "name": { + "$ref": "#/definitions/account/definitions/name" + }, + "updated_at": { + "$ref": "#/definitions/account/definitions/updated_at" + }, + "verified": { + "$ref": "#/definitions/account/definitions/verified" + } + } + }, + "addon-service": { + "description": "Add-on services represent add-ons that may be provisioned for apps. Endpoints under add-on services can be accessed without authentication.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Add-on Service", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when addon-service was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this addon-service", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/addon-service/definitions/id" + }, + { + "$ref": "#/definitions/addon-service/definitions/name" + } + ] + }, + "name": { + "description": "unique name of this addon-service", + "example": "heroku-postgresql", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when addon-service was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Info for existing addon-service.", + "href": "/addon-services/{(%23%2Fdefinitions%2Faddon-service%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/addon-service" + }, + "title": "Info" + }, + { + "description": "List existing addon-services.", + "href": "/addon-services", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/addon-service" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/addon-service/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/addon-service/definitions/id" + }, + "name": { + "$ref": "#/definitions/addon-service/definitions/name" + }, + "updated_at": { + "$ref": "#/definitions/addon-service/definitions/updated_at" + } + } + }, + "addon": { + "description": "Add-ons represent add-ons that have been provisioned for an app.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Add-on", + "type": [ + "object" + ], + "definitions": { + "config_vars": { + "description": "config vars associated with this application", + "example": [ + "FOO", + "BAZ" + ], + "items": { + "type": [ + "string" + ] + }, + "readOnly": true, + "type": [ + "array" + ] + }, + "created_at": { + "description": "when add-on was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of add-on", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/addon/definitions/id" + }, + { + "$ref": "#/definitions/addon/definitions/name" + } + ] + }, + "name": { + "description": "name of the add-on unique within its app", + "example": "heroku-postgresql-teal", + "pattern": "^[a-z][a-z0-9-]+$", + "readOnly": true, + "type": [ + "string" + ] + }, + "provider_id": { + "description": "id of this add-on with its provider", + "example": "app123@heroku.com", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when add-on was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new add-on.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/addons", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "config": { + "additionalProperties": false, + "description": "custom add-on provisioning options", + "example": { + "db-version": "1.2.3" + }, + "patternProperties": { + "^\\w+$": { + "type": [ + "string" + ] + } + }, + "type": [ + "object" + ] + }, + "plan": { + "$ref": "#/definitions/plan/definitions/identity" + } + }, + "required": [ + "plan" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/addon" + }, + "title": "Create" + }, + { + "description": "Delete an existing add-on.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/addons/{(%23%2Fdefinitions%2Faddon%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/addon" + }, + "title": "Delete" + }, + { + "description": "Info for an existing add-on.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/addons/{(%23%2Fdefinitions%2Faddon%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/addon" + }, + "title": "Info" + }, + { + "description": "List existing add-ons.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/addons", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/addon" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Change add-on plan. Some add-ons may not support changing plans. In that case, an error will be returned.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/addons/{(%23%2Fdefinitions%2Faddon%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "plan": { + "$ref": "#/definitions/plan/definitions/identity" + } + }, + "required": [ + "plan" + ], + "type": [ + "object" + ] + }, + "title": "Update" + } + ], + "properties": { + "addon_service": { + "description": "identity of add-on service", + "properties": { + "id": { + "$ref": "#/definitions/addon-service/definitions/id" + }, + "name": { + "$ref": "#/definitions/addon-service/definitions/name" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "config_vars": { + "$ref": "#/definitions/addon/definitions/config_vars" + }, + "created_at": { + "$ref": "#/definitions/addon/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/addon/definitions/id" + }, + "name": { + "$ref": "#/definitions/addon/definitions/name" + }, + "plan": { + "description": "identity of add-on plan", + "properties": { + "id": { + "$ref": "#/definitions/plan/definitions/id" + }, + "name": { + "$ref": "#/definitions/plan/definitions/name" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "provider_id": { + "$ref": "#/definitions/addon/definitions/provider_id" + }, + "updated_at": { + "$ref": "#/definitions/addon/definitions/updated_at" + } + } + }, + "app-feature": { + "description": "An app feature represents a Heroku labs capability that can be enabled or disabled for an app on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - App Feature", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when app feature was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "description": { + "description": "description of app feature", + "example": "Causes app to example.", + "readOnly": true, + "type": [ + "string" + ] + }, + "doc_url": { + "description": "documentation URL of app feature", + "example": "http://devcenter.heroku.com/articles/example", + "readOnly": true, + "type": [ + "string" + ] + }, + "enabled": { + "description": "whether or not app feature has been enabled", + "example": true, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "id": { + "description": "unique identifier of app feature", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/app-feature/definitions/id" + }, + { + "$ref": "#/definitions/app-feature/definitions/name" + } + ] + }, + "name": { + "description": "unique name of app feature", + "example": "name", + "readOnly": true, + "type": [ + "string" + ] + }, + "state": { + "description": "state of app feature", + "example": "public", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when app feature was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Info for an existing app feature.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/features/{(%23%2Fdefinitions%2Fapp-feature%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/app-feature" + }, + "title": "Info" + }, + { + "description": "List existing app features.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/features", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/app-feature" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Update an existing app feature.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/features/{(%23%2Fdefinitions%2Fapp-feature%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "enabled": { + "$ref": "#/definitions/app-feature/definitions/enabled" + } + }, + "required": [ + "enabled" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/app-feature" + }, + "title": "Update" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/app-feature/definitions/created_at" + }, + "description": { + "$ref": "#/definitions/app-feature/definitions/description" + }, + "doc_url": { + "$ref": "#/definitions/app-feature/definitions/doc_url" + }, + "enabled": { + "$ref": "#/definitions/app-feature/definitions/enabled" + }, + "id": { + "$ref": "#/definitions/app-feature/definitions/id" + }, + "name": { + "$ref": "#/definitions/app-feature/definitions/name" + }, + "state": { + "$ref": "#/definitions/app-feature/definitions/state" + }, + "updated_at": { + "$ref": "#/definitions/app-feature/definitions/updated_at" + } + } + }, + "app-setup": { + "description": "An app setup represents an app on Heroku that is setup using an environment, addons, and scripts described in an app.json manifest file.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Setup API - App Setup", + "type": [ + "object" + ], + "definitions": { + "id": { + "description": "unique identifier of app setup", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "readOnly": true, + "type": [ + "string" + ], + "format": "uuid" + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/app-setup/definitions/id" + } + ] + }, + "created_at": { + "description": "when app setup was created", + "example": "2012-01-01T12:00:00Z", + "readOnly": true, + "type": [ + "string" + ], + "format": "date-time" + }, + "updated_at": { + "description": "when app setup was updated", + "example": "2012-01-01T12:00:00Z", + "readOnly": true, + "type": [ + "string" + ], + "format": "date-time" + }, + "status": { + "description": "the overall status of app setup", + "example": "succeeded", + "enum": [ + "failed", + "pending", + "succeeded" + ], + "readOnly": true, + "type": [ + "string" + ] + }, + "resolved_success_url": { + "description": "fully qualified success url", + "example": "http://example.herokuapp.com/welcome", + "readOnly": true, + "type": [ + "string", + "null" + ] + }, + "failure_message": { + "description": "reason that app setup has failed", + "example": "invalid app.json", + "readOnly": true, + "type": [ + "string", + "null" + ] + }, + "manifest_errors": { + "description": "errors associated with invalid app.json manifest file", + "example": [ + "config var FOO is required" + ], + "readOnly": true, + "items": { + "type": [ + "string" + ] + }, + "type": [ + "array" + ] + }, + "postdeploy": { + "description": "result of postdeploy script", + "type": [ + "object", + "null" + ], + "properties": { + "output": { + "description": "output of the postdeploy script", + "example": "assets precompiled", + "readOnly": true, + "type": [ + "string" + ] + }, + "exit_code": { + "description": "The exit code of the postdeploy script", + "example": 1, + "readOnly": true, + "type": [ + "integer" + ] + } + }, + "readOnly": true + } + }, + "properties": { + "id": { + "$ref": "#/definitions/app-setup/definitions/id" + }, + "created_at": { + "$ref": "#/definitions/app-setup/definitions/created_at" + }, + "updated_at": { + "$ref": "#/definitions/app-setup/definitions/updated_at" + }, + "status": { + "$ref": "#/definitions/app-setup/definitions/status" + }, + "failure_message": { + "$ref": "#/definitions/app-setup/definitions/failure_message" + }, + "app": { + "description": "identity of app", + "strictProperties": true, + "type": [ + "object" + ], + "properties": { + "id": { + "$ref": "#/definitions/app/definitions/id" + }, + "name": { + "$ref": "#/definitions/app/definitions/name" + } + } + }, + "build": { + "description": "identity and status of build", + "strictProperties": true, + "type": [ + "object" + ], + "properties": { + "id": { + "$ref": "#/definitions/build/definitions/id" + }, + "status": { + "$ref": "#/definitions/build/definitions/status" + } + } + }, + "manifest_errors": { + "$ref": "#/definitions/app-setup/definitions/manifest_errors" + }, + "postdeploy": { + "$ref": "#/definitions/app-setup/definitions/postdeploy" + }, + "resolved_success_url": { + "$ref": "#/definitions/app-setup/definitions/resolved_success_url" + } + }, + "links": [ + { + "description": "Create a new app setup from a gzipped tar archive containing an app.json manifest file.", + "title": "Create", + "rel": "create", + "method": "POST", + "href": "/app-setups", + "schema": { + "required": [ + "source_blob" + ], + "type": [ + "object" + ], + "properties": { + "app": { + "description": "optional parameters for created app", + "properties": { + "locked": { + "$ref": "#/definitions/organization-app/definitions/locked" + }, + "name": { + "$ref": "#/definitions/app/definitions/name" + }, + "organization": { + "$ref": "#/definitions/organization/definitions/name" + }, + "personal": { + "$ref": "#/definitions/organization-app/definitions/personal" + }, + "region": { + "$ref": "#/definitions/region/definitions/name" + }, + "stack": { + "$ref": "#/definitions/stack/definitions/name" + } + }, + "type": [ + "object" + ] + }, + "source_blob": { + "description": "gzipped tarball of source code containing app.json manifest file", + "example": "https://example.com/source.tgz?token=xyz", + "properties": { + "url": { + "description": "URL of gzipped tarball of source code containing app.json manifest file", + "example": "https://example.com/source.tgz?token=xyz", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "type": [ + "object" + ] + }, + "overrides": { + "description": "overrides of keys in the app.json manifest file", + "example": { + "env": { + "FOO": "bar", + "BAZ": "qux" + } + }, + "properties": { + "env": { + "description": "overrides of the env specified in the app.json manifest file", + "example": { + "FOO": "bar", + "BAZ": "qux" + }, + "readOnly": true, + "additionalProperties": false, + "patternProperties": { + "^\\w+$": { + "type": [ + "string" + ] + } + }, + "type": [ + "object" + ] + } + }, + "type": [ + "object" + ] + } + } + }, + "targetSchema": { + "$ref": "#/definitions/app-setup" + } + }, + { + "description": "Get the status of an app setup.", + "title": "Info", + "rel": "self", + "method": "GET", + "href": "/app-setups/{(%23%2Fdefinitions%2Fapp-setup%2Fdefinitions%2Fidentity)}", + "targetSchema": { + "$ref": "#/definitions/app-setup" + } + } + ] + }, + "app-transfer": { + "description": "An app transfer represents a two party interaction for transferring ownership of an app.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - App Transfer", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when app transfer was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of app transfer", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/app-transfer/definitions/id" + }, + { + "$ref": "#/definitions/app/definitions/name" + } + ] + }, + "state": { + "description": "the current state of an app transfer", + "enum": [ + "pending", + "accepted", + "declined" + ], + "example": "pending", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when app transfer was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new app transfer.", + "href": "/account/app-transfers", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "app": { + "$ref": "#/definitions/app/definitions/identity" + }, + "recipient": { + "$ref": "#/definitions/account/definitions/identity" + } + }, + "required": [ + "app", + "recipient" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/app-transfer" + }, + "title": "Create" + }, + { + "description": "Delete an existing app transfer", + "href": "/account/app-transfers/{(%23%2Fdefinitions%2Fapp-transfer%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/app-transfer" + }, + "title": "Delete" + }, + { + "description": "Info for existing app transfer.", + "href": "/account/app-transfers/{(%23%2Fdefinitions%2Fapp-transfer%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/app-transfer" + }, + "title": "Info" + }, + { + "description": "List existing apps transfers.", + "href": "/account/app-transfers", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/app-transfer" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Update an existing app transfer.", + "href": "/account/app-transfers/{(%23%2Fdefinitions%2Fapp-transfer%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "state": { + "$ref": "#/definitions/app-transfer/definitions/state" + } + }, + "required": [ + "state" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/app-transfer" + }, + "title": "Update" + } + ], + "properties": { + "app": { + "description": "app involved in the transfer", + "properties": { + "name": { + "$ref": "#/definitions/app/definitions/name" + }, + "id": { + "$ref": "#/definitions/app/definitions/id" + } + }, + "type": [ + "object" + ] + }, + "created_at": { + "$ref": "#/definitions/app-transfer/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/app-transfer/definitions/id" + }, + "owner": { + "description": "identity of the owner of the transfer", + "properties": { + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "id": { + "$ref": "#/definitions/account/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "recipient": { + "description": "identity of the recipient of the transfer", + "properties": { + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "id": { + "$ref": "#/definitions/account/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "state": { + "$ref": "#/definitions/app-transfer/definitions/state" + }, + "updated_at": { + "$ref": "#/definitions/app-transfer/definitions/updated_at" + } + } + }, + "app": { + "description": "An app represents the program that you would like to deploy and run on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - App", + "type": [ + "object" + ], + "definitions": { + "archived_at": { + "description": "when app was archived", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "null", + "string" + ] + }, + "buildpack_provided_description": { + "description": "description from buildpack of app", + "example": "Ruby/Rack", + "readOnly": true, + "type": [ + "null", + "string" + ] + }, + "created_at": { + "description": "when app was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "git_url": { + "description": "git repo URL of app", + "example": "git@heroku.com:example.git", + "pattern": "^git@heroku\\.com:[a-z][a-z0-9-]{3,30}\\.git$", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of app", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/app/definitions/id" + }, + { + "$ref": "#/definitions/app/definitions/name" + } + ] + }, + "maintenance": { + "default": false, + "description": "maintenance status of app", + "example": false, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "name": { + "description": "unique name of app", + "example": "example", + "pattern": "^[a-z][a-z0-9-]{3,30}$", + "readOnly": false, + "type": [ + "string" + ] + }, + "released_at": { + "default": null, + "description": "when app was released", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "null", + "string" + ] + }, + "repo_size": { + "default": null, + "description": "git repo size in bytes of app", + "example": 0, + "readOnly": true, + "type": [ + "integer", + "null" + ] + }, + "slug_size": { + "default": null, + "description": "slug size in bytes of app", + "example": 0, + "readOnly": true, + "type": [ + "integer", + "null" + ] + }, + "updated_at": { + "description": "when app was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "web_url": { + "description": "web URL of app", + "example": "http://example.herokuapp.com/", + "format": "uri", + "pattern": "^http://[a-z][a-z0-9-]{3,30}\\.herokuapp\\.com/$", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new app.", + "href": "/apps", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "name": { + "$ref": "#/definitions/app/definitions/name" + }, + "region": { + "$ref": "#/definitions/region/definitions/identity" + }, + "stack": { + "$ref": "#/definitions/stack/definitions/identity" + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/app" + }, + "title": "Create" + }, + { + "description": "Delete an existing app.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/app" + }, + "title": "Delete" + }, + { + "description": "Info for existing app.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/app" + }, + "title": "Info" + }, + { + "description": "List existing apps.", + "href": "/apps", + "method": "GET", + "ranges": [ + "id", + "name", + "updated_at" + ], + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/app" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Update an existing app.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "maintenance": { + "$ref": "#/definitions/app/definitions/maintenance" + }, + "name": { + "$ref": "#/definitions/app/definitions/name" + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/app" + }, + "title": "Update" + } + ], + "properties": { + "archived_at": { + "$ref": "#/definitions/app/definitions/archived_at" + }, + "buildpack_provided_description": { + "$ref": "#/definitions/app/definitions/buildpack_provided_description" + }, + "created_at": { + "$ref": "#/definitions/app/definitions/created_at" + }, + "git_url": { + "$ref": "#/definitions/app/definitions/git_url" + }, + "id": { + "$ref": "#/definitions/app/definitions/id" + }, + "maintenance": { + "$ref": "#/definitions/app/definitions/maintenance" + }, + "name": { + "$ref": "#/definitions/app/definitions/name" + }, + "owner": { + "description": "identity of app owner", + "properties": { + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "id": { + "$ref": "#/definitions/account/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "region": { + "description": "identity of app region", + "properties": { + "id": { + "$ref": "#/definitions/region/definitions/id" + }, + "name": { + "$ref": "#/definitions/region/definitions/name" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "released_at": { + "$ref": "#/definitions/app/definitions/released_at" + }, + "repo_size": { + "$ref": "#/definitions/app/definitions/repo_size" + }, + "slug_size": { + "$ref": "#/definitions/app/definitions/slug_size" + }, + "stack": { + "description": "identity of app stack", + "properties": { + "id": { + "$ref": "#/definitions/stack/definitions/id" + }, + "name": { + "$ref": "#/definitions/stack/definitions/name" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "updated_at": { + "$ref": "#/definitions/app/definitions/updated_at" + }, + "web_url": { + "$ref": "#/definitions/app/definitions/web_url" + } + } + }, + "build-result": { + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "description": "A build result contains the output from a build.", + "title": "Heroku Build API - Build Result", + "stability": "production", + "strictProperties": true, + "type": [ + "object" + ], + "definitions": { + "identity": { + }, + "exit_code": { + "description": "status from the build", + "example": 0, + "readOnly": true, + "type": [ + "number" + ] + }, + "line": { + "description": "a single line of output to STDOUT or STDERR from the build.", + "strictProperties": true, + "type": [ + "object" + ], + "example": { + "stream": "STDOUT", + "line": "-----> Ruby app detected\n" + }, + "readOnly": true, + "definitions": { + "stream": { + "type": [ + "string" + ], + "enum": [ + "STDOUT", + "STDERR" + ], + "description": "The output stream where the line was sent.", + "example": "STDOUT", + "readOnly": true + }, + "line": { + "type": [ + "string" + ], + "example": "-----> Ruby app detected\n", + "readOnly": true, + "description": "A line of output from the build." + } + }, + "properties": { + "stream": { + "$ref": "#/definitions/build-result/definitions/line/definitions/stream" + }, + "line": { + "$ref": "#/definitions/build-result/definitions/line/definitions/line" + } + } + } + }, + "links": [ + { + "description": "Info for existing result.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/builds/{(%23%2Fdefinitions%2Fbuild%2Fdefinitions%2Fidentity)}/result", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/build-result" + }, + "title": "Info" + } + ], + "properties": { + "build": { + "description": "identity of build", + "properties": { + "id": { + "$ref": "#/definitions/build/definitions/id" + }, + "status": { + "$ref": "#/definitions/build/definitions/status" + } + }, + "type": [ + "object" + ] + }, + "exit_code": { + "$ref": "#/definitions/build-result/definitions/exit_code" + }, + "lines": { + "type": [ + "array" + ], + "items": { + "$ref": "#/definitions/build-result/definitions/line" + }, + "description": "A list of all the lines of a build's output.", + "example": [ + { + "line": "-----> Ruby app detected\n", + "stream": "STDOUT" + } + ] + } + } + }, + "build": { + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "description": "A build represents the process of transforming a code tarball into a slug", + "title": "Heroku Build API - Build", + "stability": "production", + "strictProperties": true, + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when build was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of build", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/build/definitions/id" + } + ] + }, + "source_blob": { + "description": "location of gzipped tarball of source code used to create build", + "properties": { + "url": { + "description": "URL where gzipped tar archive of source code for build was downloaded.", + "example": "https://example.com/source.tgz?token=xyz", + "readOnly": true, + "type": [ + "string" + ] + }, + "version": { + "description": "Version of the gzipped tarball.", + "example": "v1.3.0", + "readOnly": true, + "type": [ + "string", + "null" + ] + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "status": { + "description": "status of build", + "enum": [ + "failed", + "pending", + "succeeded" + ], + "example": "succeeded", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when build was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new build.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/builds", + "method": "POST", + "rel": "create", + "schema": { + "type": [ + "object" + ], + "properties": { + "source_blob": { + "$ref": "#/definitions/build/definitions/source_blob" + } + }, + "required": [ + "source_blob" + ] + }, + "targetSchema": { + "$ref": "#/definitions/build" + }, + "title": "Create" + }, + { + "description": "Info for existing build.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/builds/{(%23%2Fdefinitions%2Fbuild%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/build" + }, + "title": "Info" + }, + { + "description": "List existing build.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/builds", + "method": "GET", + "ranges": [ + "id", + "started_at" + ], + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/build" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/build/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/build/definitions/id" + }, + "source_blob": { + "$ref": "#/definitions/build/definitions/source_blob" + }, + "slug": { + "description": "slug created by this build", + "properties": { + "id": { + "$ref": "#/definitions/slug/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object", + "null" + ] + }, + "status": { + "$ref": "#/definitions/build/definitions/status" + }, + "updated_at": { + "$ref": "#/definitions/build/definitions/updated_at" + }, + "user": { + "description": "user that started the build", + "properties": { + "id": { + "$ref": "#/definitions/account/definitions/id" + }, + "email": { + "$ref": "#/definitions/account/definitions/email" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + } + } + }, + "collaborator": { + "description": "A collaborator represents an account that has been given access to an app on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Collaborator", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when collaborator was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "email": { + "description": "invited email address of collaborator", + "example": "collaborator@example.com", + "format": "email", + "readOnly": false, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of collaborator", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/collaborator/definitions/email" + }, + { + "$ref": "#/definitions/collaborator/definitions/id" + } + ] + }, + "silent": { + "default": false, + "description": "whether to suppress email invitation when creating collaborator", + "example": false, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "updated_at": { + "description": "when collaborator was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new collaborator.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/collaborators", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "silent": { + "$ref": "#/definitions/collaborator/definitions/silent" + }, + "user": { + "$ref": "#/definitions/account/definitions/identity" + } + }, + "required": [ + "user" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/collaborator" + }, + "title": "Create" + }, + { + "description": "Delete an existing collaborator.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/collaborators/{(%23%2Fdefinitions%2Fcollaborator%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/collaborator" + }, + "title": "Delete" + }, + { + "description": "Info for existing collaborator.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/collaborators/{(%23%2Fdefinitions%2Fcollaborator%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/collaborator" + }, + "title": "Info" + }, + { + "description": "List existing collaborators.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/collaborators", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/collaborator" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/collaborator/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/collaborator/definitions/id" + }, + "updated_at": { + "$ref": "#/definitions/collaborator/definitions/updated_at" + }, + "user": { + "description": "identity of collaborated account", + "properties": { + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "id": { + "$ref": "#/definitions/account/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + } + } + }, + "config-var": { + "description": "Config Vars allow you to manage the configuration information provided to an app on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Config Vars", + "type": [ + "object" + ], + "definitions": { + "config_vars": { + "additionalProperties": false, + "description": "hash of config vars", + "example": { + "FOO": "bar", + "BAZ": "qux" + }, + "patternProperties": { + "^\\w+$": { + "type": [ + "string", + "null" + ] + } + }, + "type": [ + "object" + ] + } + }, + "links": [ + { + "description": "Get config-vars for app.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/config-vars", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/config-var/definitions/config_vars" + }, + "title": "Info" + }, + { + "description": "Update config-vars for app. You can update existing config-vars by setting them again, and remove by setting it to `NULL`.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/config-vars", + "method": "PATCH", + "rel": "update", + "schema": { + "additionalProperties": false, + "description": "hash of config changes – update values or delete by seting it to NULL", + "example": { + "FOO": "bar", + "BAZ": "qux" + }, + "patternProperties": { + "^\\w+$": { + "type": [ + "string", + "null" + ] + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/config-var/definitions/config_vars" + }, + "title": "Update" + } + ], + "example": { + "FOO": "bar", + "BAZ": "qux" + }, + "patternProperties": { + "^\\w+$": { + "type": [ + "string" + ] + } + } + }, + "credit": { + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "description": "A credit represents value that will be used up before further charges are assigned to an account.", + "stability": "development", + "strictProperties": true, + "title": "Heroku Platform API - Credit", + "type": [ + "object" + ], + "definitions": { + "amount": { + "description": "total value of credit in cents", + "example": 10000, + "type": [ + "number" + ] + }, + "balance": { + "description": "remaining value of credit in cents", + "example": 5000, + "type": [ + "number" + ] + }, + "created_at": { + "description": "when credit was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "type": [ + "string" + ] + }, + "expires_at": { + "description": "when credit will expire", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of credit", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "type": [ + "string" + ] + }, + "identity": { + "$ref": "#/definitions/credit/definitions/id" + }, + "title": { + "description": "a name for credit", + "example": "gift card", + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when credit was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Info for existing credit.", + "href": "/account/credits/{(%23%2Fdefinitions%2Fcredit%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/credit" + }, + "title": "Info" + }, + { + "description": "List existing credits.", + "href": "/account/credits", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/credit" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "amount": { + "$ref": "#/definitions/credit/definitions/amount" + }, + "balance": { + "$ref": "#/definitions/credit/definitions/balance" + }, + "created_at": { + "$ref": "#/definitions/credit/definitions/created_at" + }, + "expires_at": { + "$ref": "#/definitions/credit/definitions/expires_at" + }, + "id": { + "$ref": "#/definitions/credit/definitions/id" + }, + "title": { + "$ref": "#/definitions/credit/definitions/title" + }, + "updated_at": { + "$ref": "#/definitions/credit/definitions/updated_at" + } + } + }, + "domain": { + "description": "Domains define what web routes should be routed to an app on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Domain", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when domain was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "hostname": { + "description": "full hostname", + "example": "subdomain.example.com", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this domain", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/domain/definitions/id" + }, + { + "$ref": "#/definitions/domain/definitions/hostname" + } + ] + }, + "updated_at": { + "description": "when domain was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new domain.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/domains", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "hostname": { + "$ref": "#/definitions/domain/definitions/hostname" + } + }, + "required": [ + "hostname" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/domain" + }, + "title": "Create" + }, + { + "description": "Delete an existing domain", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/domains/{(%23%2Fdefinitions%2Fdomain%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/domain" + }, + "title": "Delete" + }, + { + "description": "Info for existing domain.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/domains/{(%23%2Fdefinitions%2Fdomain%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/domain" + }, + "title": "Info" + }, + { + "description": "List existing domains.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/domains", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/domain" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/domain/definitions/created_at" + }, + "hostname": { + "$ref": "#/definitions/domain/definitions/hostname" + }, + "id": { + "$ref": "#/definitions/domain/definitions/id" + }, + "updated_at": { + "$ref": "#/definitions/domain/definitions/updated_at" + } + } + }, + "dyno": { + "description": "Dynos encapsulate running processes of an app on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Dyno", + "type": [ + "object" + ], + "definitions": { + "attach": { + "description": "whether to stream output or not", + "example": true, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "attach_url": { + "description": "a URL to stream output from for attached processes or null for non-attached processes", + "example": "rendezvous://rendezvous.runtime.heroku.com:5000/{rendezvous-id}", + "readOnly": true, + "type": [ + "string", + "null" + ] + }, + "command": { + "description": "command used to start this process", + "example": "bash", + "readOnly": false, + "type": [ + "string" + ] + }, + "created_at": { + "description": "when dyno was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "env": { + "additionalProperties": false, + "description": "custom environment to add to the dyno config vars", + "example": { + "COLUMNS": "80", + "LINES": "24" + }, + "patternProperties": { + "^\\w+$": { + "type": [ + "string" + ] + } + }, + "readOnly": false, + "strictProperties": true, + "type": [ + "object" + ] + }, + "id": { + "description": "unique identifier of this dyno", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/dyno/definitions/id" + }, + { + "$ref": "#/definitions/dyno/definitions/name" + } + ] + }, + "name": { + "description": "the name of this process on this dyno", + "example": "run.1", + "readOnly": true, + "type": [ + "string" + ] + }, + "size": { + "description": "dyno size (default: \"1X\")", + "example": "1X", + "readOnly": false, + "type": [ + "string" + ] + }, + "state": { + "description": "current status of process (either: crashed, down, idle, starting, or up)", + "example": "up", + "readOnly": true, + "type": [ + "string" + ] + }, + "type": { + "description": "type of process", + "example": "run", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when process last changed state", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new dyno.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/dynos", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "attach": { + "$ref": "#/definitions/dyno/definitions/attach" + }, + "command": { + "$ref": "#/definitions/dyno/definitions/command" + }, + "env": { + "$ref": "#/definitions/dyno/definitions/env" + }, + "size": { + "$ref": "#/definitions/dyno/definitions/size" + } + }, + "required": [ + "command" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/dyno" + }, + "title": "Create" + }, + { + "description": "Restart dyno.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/dynos/{(%23%2Fdefinitions%2Fdyno%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "empty", + "targetSchema": { + "additionalPoperties": false, + "type": [ + "object" + ] + }, + "title": "Restart" + }, + { + "description": "Restart all dynos", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/dynos", + "method": "DELETE", + "rel": "empty", + "targetSchema": { + "additionalPoperties": false, + "type": [ + "object" + ] + }, + "title": "Restart all" + }, + { + "description": "Info for existing dyno.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/dynos/{(%23%2Fdefinitions%2Fdyno%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/dyno" + }, + "title": "Info" + }, + { + "description": "List existing dynos.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/dynos", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/dyno" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "attach_url": { + "$ref": "#/definitions/dyno/definitions/attach_url" + }, + "command": { + "$ref": "#/definitions/dyno/definitions/command" + }, + "created_at": { + "$ref": "#/definitions/dyno/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/dyno/definitions/id" + }, + "name": { + "$ref": "#/definitions/dyno/definitions/name" + }, + "release": { + "description": "app release of the dyno", + "properties": { + "id": { + "$ref": "#/definitions/release/definitions/id" + }, + "version": { + "$ref": "#/definitions/release/definitions/version" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "size": { + "$ref": "#/definitions/dyno/definitions/size" + }, + "state": { + "$ref": "#/definitions/dyno/definitions/state" + }, + "type": { + "$ref": "#/definitions/dyno/definitions/type" + }, + "updated_at": { + "$ref": "#/definitions/dyno/definitions/updated_at" + } + } + }, + "formation": { + "description": "The formation of processes that should be maintained for an app. Update the formation to scale processes or change dyno sizes. Available process type names and commands are defined by the `process_types` attribute for the [slug](#slug) currently released on an app.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Formation", + "type": [ + "object" + ], + "definitions": { + "command": { + "description": "command to use to launch this process", + "example": "bundle exec rails server -p $PORT", + "readOnly": false, + "type": [ + "string" + ] + }, + "created_at": { + "description": "when process type was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this process type", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/formation/definitions/id" + }, + { + "$ref": "#/definitions/formation/definitions/type" + } + ] + }, + "quantity": { + "description": "number of processes to maintain", + "example": 1, + "readOnly": false, + "type": [ + "integer" + ] + }, + "size": { + "description": "dyno size (default: \"1X\")", + "example": "1X", + "readOnly": false, + "type": [ + "string" + ] + }, + "type": { + "description": "type of process to maintain", + "example": "web", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when dyno type was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "update": { + "additionalProperties": false, + "description": "Properties to update a process type", + "properties": { + "process": { + "$ref": "#/definitions/formation/definitions/identity" + }, + "quantity": { + "$ref": "#/definitions/formation/definitions/quantity" + }, + "size": { + "$ref": "#/definitions/formation/definitions/size" + } + }, + "readOnly": false, + "required": [ + "process" + ], + "type": [ + "object" + ] + } + }, + "links": [ + { + "description": "Info for a process type", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/formation/{(%23%2Fdefinitions%2Fformation%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/formation" + }, + "title": "Info" + }, + { + "description": "List process type formation", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/formation", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/formation" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Batch update process types", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/formation", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "updates": { + "type": [ + "array" + ], + "items": { + "$ref": "#/definitions/formation/definitions/update" + }, + "description": "Array with formation updates. Each element must have \"process\", the id or name of the process type to be updated, and can optionally update its \"quantity\" or \"size\".", + "example": [ + { + "process": "web", + "quantity": 1, + "size": "2X" + } + ] + } + }, + "required": [ + "updates" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "items": { + "$ref": "#/definitions/formation" + }, + "type": [ + "array" + ] + }, + "title": "Batch update" + }, + { + "description": "Update process type", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/formation/{(%23%2Fdefinitions%2Fformation%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "quantity": { + "$ref": "#/definitions/formation/definitions/quantity" + }, + "size": { + "$ref": "#/definitions/formation/definitions/size" + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/formation" + }, + "title": "Update", + "type": [ + "object" + ] + } + ], + "properties": { + "command": { + "$ref": "#/definitions/formation/definitions/command" + }, + "created_at": { + "$ref": "#/definitions/formation/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/formation/definitions/id" + }, + "quantity": { + "$ref": "#/definitions/formation/definitions/quantity" + }, + "size": { + "$ref": "#/definitions/formation/definitions/size" + }, + "type": { + "$ref": "#/definitions/formation/definitions/type" + }, + "updated_at": { + "$ref": "#/definitions/formation/definitions/updated_at" + } + } + }, + "key": { + "description": "Keys represent public SSH keys associated with an account and are used to authorize accounts as they are performing git operations.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Key", + "type": [ + "object" + ], + "definitions": { + "comment": { + "description": "comment on the key", + "example": "username@host", + "readOnly": true, + "type": [ + "string" + ] + }, + "created_at": { + "description": "when key was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "email": { + "deprecated": true, + "description": "deprecated. Please refer to 'comment' instead", + "example": "username@host", + "readOnly": true, + "type": [ + "string" + ] + }, + "fingerprint": { + "description": "a unique identifying string based on contents", + "example": "17:63:a4:ba:24:d3:7f:af:17:c8:94:82:7e:80:56:bf", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this key", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/key/definitions/id" + }, + { + "$ref": "#/definitions/key/definitions/fingerprint" + } + ] + }, + "public_key": { + "description": "full public_key as uploaded", + "example": "ssh-rsa AAAAB3NzaC1ycVc/../839Uv username@example.com", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when key was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new key.", + "href": "/account/keys", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "public_key": { + "$ref": "#/definitions/key/definitions/public_key" + } + }, + "required": [ + "public_key" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/key" + }, + "title": "Create" + }, + { + "description": "Delete an existing key", + "href": "/account/keys/{(%23%2Fdefinitions%2Fkey%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/key" + }, + "title": "Delete" + }, + { + "description": "Info for existing key.", + "href": "/account/keys/{(%23%2Fdefinitions%2Fkey%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "items": { + "$ref": "#/definitions/key" + }, + "type": [ + "array" + ] + }, + "title": "Info" + }, + { + "description": "List existing keys.", + "href": "/account/keys", + "method": "GET", + "rel": "instances", + "title": "List" + } + ], + "properties": { + "comment": { + "$ref": "#/definitions/key/definitions/comment" + }, + "created_at": { + "$ref": "#/definitions/key/definitions/created_at" + }, + "email": { + "$ref": "#/definitions/key/definitions/email" + }, + "fingerprint": { + "$ref": "#/definitions/key/definitions/fingerprint" + }, + "id": { + "$ref": "#/definitions/key/definitions/id" + }, + "public_key": { + "$ref": "#/definitions/key/definitions/public_key" + }, + "updated_at": { + "$ref": "#/definitions/key/definitions/updated_at" + } + } + }, + "log-drain": { + "description": "[Log drains](https://devcenter.heroku.com/articles/logging#syslog-drains) provide a way to forward your Heroku logs to an external syslog server for long-term archiving. This external service must be configured to receive syslog packets from Heroku, whereupon its URL can be added to an app using this API. Some addons will add a log drain when they are provisioned to an app. These drains can only be removed by removing the add-on.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Log Drain", + "type": [ + "object" + ], + "definitions": { + "addon": { + "description": "addon that created the drain", + "example": "example", + "properties": { + "id": { + "$ref": "#/definitions/addon/definitions/id" + } + }, + "readOnly": true, + "type": [ + "object", + "null" + ] + }, + "created_at": { + "description": "when log drain was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this log drain", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/log-drain/definitions/id" + }, + { + "$ref": "#/definitions/log-drain/definitions/url" + } + ] + }, + "token": { + "description": "token associated with the log drain", + "example": "d.01234567-89ab-cdef-0123-456789abcdef", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when log drain was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "url": { + "description": "url associated with the log drain", + "example": "https://example.com/drain", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new log drain.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/log-drains", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "url": { + "$ref": "#/definitions/log-drain/definitions/url" + } + }, + "required": [ + "url" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/log-drain" + }, + "title": "Create" + }, + { + "description": "Delete an existing log drain. Log drains added by add-ons can only be removed by removing the add-on.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/log-drains/{(%23%2Fdefinitions%2Flog-drain%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/log-drain" + }, + "title": "Delete" + }, + { + "description": "Info for existing log drain.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/log-drains/{(%23%2Fdefinitions%2Flog-drain%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/log-drain" + }, + "title": "Info" + }, + { + "description": "List existing log drains.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/log-drains", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/log-drain" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "addon": { + "$ref": "#/definitions/log-drain/definitions/addon" + }, + "created_at": { + "$ref": "#/definitions/log-drain/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/log-drain/definitions/id" + }, + "token": { + "$ref": "#/definitions/log-drain/definitions/token" + }, + "updated_at": { + "$ref": "#/definitions/log-drain/definitions/updated_at" + }, + "url": { + "$ref": "#/definitions/log-drain/definitions/url" + } + } + }, + "log-session": { + "description": "A log session is a reference to the http based log stream for an app.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Log Session", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when log connection was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "dyno": { + "description": "dyno to limit results to", + "example": "web.1", + "readOnly": false, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this log session", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/log-session/definitions/id" + } + ] + }, + "lines": { + "description": "number of log lines to stream at once", + "example": 10, + "readOnly": false, + "type": [ + "integer" + ] + }, + "logplex_url": { + "description": "URL for log streaming session", + "example": "https://logplex.heroku.com/sessions/01234567-89ab-cdef-0123-456789abcdef?srv=1325419200", + "readOnly": true, + "type": [ + "string" + ] + }, + "source": { + "description": "log source to limit results to", + "example": "app", + "readOnly": false, + "type": [ + "string" + ] + }, + "tail": { + "description": "whether to stream ongoing logs", + "example": true, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "updated_at": { + "description": "when log session was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new log session.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/log-sessions", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "dyno": { + "$ref": "#/definitions/log-session/definitions/dyno" + }, + "lines": { + "$ref": "#/definitions/log-session/definitions/lines" + }, + "source": { + "$ref": "#/definitions/log-session/definitions/source" + }, + "tail": { + "$ref": "#/definitions/log-session/definitions/tail" + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/log-session" + }, + "title": "Create" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/log-session/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/log-session/definitions/id" + }, + "logplex_url": { + "$ref": "#/definitions/log-session/definitions/logplex_url" + }, + "updated_at": { + "$ref": "#/definitions/log-session/definitions/updated_at" + } + } + }, + "oauth-authorization": { + "description": "OAuth authorizations represent clients that a Heroku user has authorized to automate, customize or extend their usage of the platform. For more information please refer to the [Heroku OAuth documentation](https://devcenter.heroku.com/articles/oauth)", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - OAuth Authorization", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when OAuth authorization was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "description": { + "description": "human-friendly description of this OAuth authorization", + "example": "sample authorization", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of OAuth authorization", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/oauth-authorization/definitions/id" + } + ] + }, + "scope": { + "description": "The scope of access OAuth authorization allows", + "example": [ + "global" + ], + "readOnly": true, + "type": [ + "array" + ], + "items": { + "type": [ + "string" + ] + } + }, + "updated_at": { + "description": "when OAuth authorization was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new OAuth authorization.", + "href": "/oauth/authorizations", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "client": { + "$ref": "#/definitions/oauth-client/definitions/identity" + }, + "description": { + "$ref": "#/definitions/oauth-authorization/definitions/description" + }, + "expires_in": { + "$ref": "#/definitions/oauth-token/definitions/expires_in" + }, + "scope": { + "$ref": "#/definitions/oauth-authorization/definitions/scope" + } + }, + "required": [ + "scope" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/oauth-authorization" + }, + "title": "Create" + }, + { + "description": "Delete OAuth authorization.", + "href": "/oauth/authorizations/{(%23%2Fdefinitions%2Foauth-authorization%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/oauth-authorization" + }, + "title": "Delete" + }, + { + "description": "Info for an OAuth authorization.", + "href": "/oauth/authorizations/{(%23%2Fdefinitions%2Foauth-authorization%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/oauth-authorization" + }, + "title": "Info" + }, + { + "description": "List OAuth authorizations.", + "href": "/oauth/authorizations", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/oauth-authorization" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "access_token": { + "description": "access token for this authorization", + "properties": { + "expires_in": { + "$ref": "#/definitions/oauth-token/definitions/expires_in" + }, + "id": { + "$ref": "#/definitions/oauth-token/definitions/id" + }, + "token": { + "$ref": "#/definitions/oauth-token/definitions/token" + } + }, + "type": [ + "null", + "object" + ] + }, + "client": { + "description": "identifier of the client that obtained this authorization, if any", + "properties": { + "id": { + "$ref": "#/definitions/oauth-client/definitions/id" + }, + "name": { + "$ref": "#/definitions/oauth-client/definitions/name" + }, + "redirect_uri": { + "$ref": "#/definitions/oauth-client/definitions/redirect_uri" + } + }, + "type": [ + "null", + "object" + ] + }, + "created_at": { + "$ref": "#/definitions/oauth-authorization/definitions/created_at" + }, + "grant": { + "description": "this authorization's grant", + "properties": { + "code": { + "$ref": "#/definitions/oauth-grant/definitions/code" + }, + "expires_in": { + "$ref": "#/definitions/oauth-grant/definitions/expires_in" + }, + "id": { + "$ref": "#/definitions/oauth-grant/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "null", + "object" + ] + }, + "id": { + "$ref": "#/definitions/oauth-authorization/definitions/id" + }, + "refresh_token": { + "description": "refresh token for this authorization", + "properties": { + "expires_in": { + "$ref": "#/definitions/oauth-token/definitions/expires_in" + }, + "id": { + "$ref": "#/definitions/oauth-token/definitions/id" + }, + "token": { + "$ref": "#/definitions/oauth-token/definitions/token" + } + }, + "strictProperties": true, + "type": [ + "null", + "object" + ] + }, + "scope": { + "$ref": "#/definitions/oauth-authorization/definitions/scope" + }, + "updated_at": { + "$ref": "#/definitions/oauth-authorization/definitions/updated_at" + } + } + }, + "oauth-client": { + "description": "OAuth clients are applications that Heroku users can authorize to automate, customize or extend their usage of the platform. For more information please refer to the [Heroku OAuth documentation](https://devcenter.heroku.com/articles/oauth).", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - OAuth Client", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when OAuth client was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this OAuth client", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/oauth-client/definitions/id" + } + ] + }, + "ignores_delinquent": { + "description": "whether the client is still operable given a delinquent account", + "example": false, + "readOnly": true, + "type": [ + "boolean", + "null" + ] + }, + "name": { + "description": "OAuth client name", + "example": "example", + "readOnly": true, + "type": [ + "string" + ] + }, + "redirect_uri": { + "description": "endpoint for redirection after authorization with OAuth client", + "example": "https://example.com/auth/heroku/callback", + "readOnly": true, + "type": [ + "string" + ] + }, + "secret": { + "description": "secret used to obtain OAuth authorizations under this client", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when OAuth client was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new OAuth client.", + "href": "/oauth/clients", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "name": { + "$ref": "#/definitions/oauth-client/definitions/name" + }, + "redirect_uri": { + "$ref": "#/definitions/oauth-client/definitions/redirect_uri" + } + }, + "required": [ + "name", + "redirect_uri" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/oauth-client" + }, + "title": "Create" + }, + { + "description": "Delete OAuth client.", + "href": "/oauth/clients/{(%23%2Fdefinitions%2Foauth-client%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/oauth-client" + }, + "title": "Delete" + }, + { + "description": "Info for an OAuth client", + "href": "/oauth/clients/{(%23%2Fdefinitions%2Foauth-client%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "title": "Info" + }, + { + "description": "List OAuth clients", + "href": "/oauth/clients", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/oauth-client" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Update OAuth client", + "href": "/oauth/clients/{(%23%2Fdefinitions%2Foauth-client%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "name": { + "$ref": "#/definitions/oauth-client/definitions/name" + }, + "redirect_uri": { + "$ref": "#/definitions/oauth-client/definitions/redirect_uri" + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/oauth-client" + }, + "title": "Update" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/oauth-client/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/oauth-client/definitions/id" + }, + "ignores_delinquent": { + "$ref": "#/definitions/oauth-client/definitions/ignores_delinquent" + }, + "name": { + "$ref": "#/definitions/oauth-client/definitions/name" + }, + "redirect_uri": { + "$ref": "#/definitions/oauth-client/definitions/redirect_uri" + }, + "secret": { + "$ref": "#/definitions/oauth-client/definitions/secret" + }, + "updated_at": { + "$ref": "#/definitions/oauth-client/definitions/updated_at" + } + } + }, + "oauth-grant": { + "description": "OAuth grants are used to obtain authorizations on behalf of a user. For more information please refer to the [Heroku OAuth documentation](https://devcenter.heroku.com/articles/oauth)", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - OAuth Grant", + "type": [ + "object" + ], + "definitions": { + "code": { + "description": "grant code received from OAuth web application authorization", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "readOnly": true, + "type": [ + "string" + ] + }, + "expires_in": { + "description": "seconds until OAuth grant expires", + "example": 2592000, + "readOnly": true, + "type": [ + "integer" + ] + }, + "id": { + "description": "unique identifier of OAuth grant", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/oauth-grant/definitions/id" + } + ] + }, + "type": { + "description": "type of grant requested, one of `authorization_code` or `refresh_token`", + "example": "authorization_code", + "readOnly": false, + "type": [ + "string" + ] + } + }, + "links": [ + ], + "properties": { + } + }, + "oauth-token": { + "description": "OAuth tokens provide access for authorized clients to act on behalf of a Heroku user to automate, customize or extend their usage of the platform. For more information please refer to the [Heroku OAuth documentation](https://devcenter.heroku.com/articles/oauth)", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - OAuth Token", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when OAuth token was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "expires_in": { + "description": "seconds until OAuth token expires; may be `null` for tokens with indefinite lifetime", + "example": 2592000, + "readOnly": true, + "type": [ + "null", + "integer" + ] + }, + "id": { + "description": "unique identifier of OAuth token", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/oauth-token/definitions/id" + } + ] + }, + "token": { + "description": "contents of the token to be used for authorization", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when OAuth token was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new OAuth token.", + "href": "/oauth/tokens", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "client": { + "type": [ + "object" + ], + "properties": { + "secret": { + "$ref": "#/definitions/oauth-client/definitions/secret" + } + } + }, + "grant": { + "type": [ + "object" + ], + "properties": { + "code": { + "$ref": "#/definitions/oauth-grant/definitions/code" + }, + "type": { + "$ref": "#/definitions/oauth-grant/definitions/type" + } + } + }, + "refresh_token": { + "type": [ + "object" + ], + "properties": { + "token": { + "$ref": "#/definitions/oauth-token/definitions/token" + } + } + } + }, + "required": [ + "grant", + "client", + "refresh_token" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/oauth-token" + }, + "title": "Create" + } + ], + "properties": { + "access_token": { + "description": "current access token", + "properties": { + "expires_in": { + "$ref": "#/definitions/oauth-token/definitions/expires_in" + }, + "id": { + "$ref": "#/definitions/oauth-token/definitions/id" + }, + "token": { + "$ref": "#/definitions/oauth-token/definitions/token" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "authorization": { + "description": "authorization for this set of tokens", + "properties": { + "id": { + "$ref": "#/definitions/oauth-authorization/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "client": { + "description": "OAuth client secret used to obtain token", + "properties": { + "secret": { + "$ref": "#/definitions/oauth-client/definitions/secret" + } + }, + "strictProperties": true, + "type": [ + "null", + "object" + ] + }, + "created_at": { + "$ref": "#/definitions/oauth-token/definitions/created_at" + }, + "grant": { + "description": "grant used on the underlying authorization", + "properties": { + "code": { + "$ref": "#/definitions/oauth-grant/definitions/code" + }, + "type": { + "$ref": "#/definitions/oauth-grant/definitions/type" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "id": { + "$ref": "#/definitions/oauth-token/definitions/id" + }, + "refresh_token": { + "description": "refresh token for this authorization", + "properties": { + "expires_in": { + "$ref": "#/definitions/oauth-token/definitions/expires_in" + }, + "id": { + "$ref": "#/definitions/oauth-token/definitions/id" + }, + "token": { + "$ref": "#/definitions/oauth-token/definitions/token" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "session": { + "description": "OAuth session using this token", + "properties": { + "id": { + "$ref": "#/definitions/oauth-token/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "updated_at": { + "$ref": "#/definitions/oauth-token/definitions/updated_at" + }, + "user": { + "description": "Reference to the user associated with this token", + "properties": { + "id": { + "$ref": "#/definitions/account/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + } + } + }, + "organization-app-collaborator": { + "description": "An organization collaborator represents an account that has been given access to an organization app on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "prototype", + "strictProperties": true, + "title": "Heroku Platform API - Organization App Collaborator", + "type": [ + "object" + ], + "definitions": { + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/collaborator/definitions/email" + } + ] + } + }, + "links": [ + { + "description": "Create a new collaborator on an organization app. Use this endpoint instead of the `/apps/{app_id_or_name}/collaborator` endpoint when you want the collaborator to be granted [privileges] (https://devcenter.heroku.com/articles/org-users-access#roles) according to their role in the organization.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/collaborators", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "silent": { + "$ref": "#/definitions/collaborator/definitions/silent" + }, + "user": { + "$ref": "#/definitions/account/definitions/identity" + } + }, + "required": [ + "user" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/collaborator" + }, + "title": "Create" + }, + { + "description": "Delete an existing collaborator from an organization app.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Forganization-app%2Fdefinitions%2Fidentity)}/collaborators/{(%23%2Fdefinitions%2Forganization-app-collaborator%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/collaborator" + }, + "title": "Delete" + }, + { + "description": "Info for a collaborator on an organization app.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Forganization-app%2Fdefinitions%2Fidentity)}/collaborators/{(%23%2Fdefinitions%2Forganization-app-collaborator%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/collaborator" + }, + "title": "Info" + }, + { + "description": "List collaborators on an organization app.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Forganization-app%2Fdefinitions%2Fidentity)}/collaborators", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/collaborator" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/collaborator/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/collaborator/definitions/id" + }, + "role": { + "$ref": "#/definitions/organization/definitions/role" + }, + "updated_at": { + "$ref": "#/definitions/collaborator/definitions/updated_at" + }, + "user": { + "description": "identity of collaborated account", + "properties": { + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "id": { + "$ref": "#/definitions/account/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + } + } + }, + "organization-app": { + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "description": "An organization app encapsulates the organization specific functionality of Heroku apps.", + "stability": "prototype", + "title": "Heroku Platform API - Organization App", + "type": [ + "object" + ], + "definitions": { + "locked": { + "default": false, + "description": "are other organization members forbidden from joining this app.", + "example": false, + "type": [ + "boolean" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/app/definitions/name" + } + ] + }, + "joined": { + "default": false, + "description": "is the current member a collaborator on this app.", + "example": false, + "type": [ + "boolean" + ] + }, + "personal": { + "default": false, + "description": "force creation of the app in the user account even if a default org is set.", + "example": false, + "type": [ + "boolean" + ] + } + }, + "links": [ + { + "description": "Create a new app in the specified organization, in the default organization if unspecified, or in personal account, if default organization is not set.", + "href": "/organizations/apps", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "locked": { + "$ref": "#/definitions/organization-app/definitions/locked" + }, + "name": { + "$ref": "#/definitions/app/definitions/name" + }, + "organization": { + "$ref": "#/definitions/organization/definitions/name" + }, + "personal": { + "$ref": "#/definitions/organization-app/definitions/personal" + }, + "region": { + "$ref": "#/definitions/region/definitions/name" + }, + "stack": { + "$ref": "#/definitions/stack/definitions/name" + } + }, + "type": [ + "object" + ] + }, + "title": "Create" + }, + { + "description": "List apps in the default organization, or in personal account, if default organization is not set.", + "href": "/organizations/apps", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/organization-app" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "List organization apps.", + "href": "/organizations/{(%23%2Fdefinitions%2Forganization%2Fdefinitions%2Fidentity)}/apps", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/organization-app" + }, + "type": [ + "array" + ] + }, + "title": "List For Organization" + }, + { + "description": "Info for an organization app.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Forganization-app%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "title": "Info" + }, + { + "description": "Lock or unlock an organization app.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Forganization-app%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "locked": { + "$ref": "#/definitions/organization-app/definitions/locked" + } + }, + "required": [ + "locked" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/organization-app" + }, + "title": "Update Locked" + }, + { + "description": "Transfer an existing organization app to another Heroku account.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Forganization-app%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "owner": { + "$ref": "#/definitions/account/definitions/identity" + } + }, + "required": [ + "owner" + ], + "type": [ + "object" + ] + }, + "title": "Transfer to Account" + }, + { + "description": "Transfer an existing organization app to another organization.", + "href": "/organizations/apps/{(%23%2Fdefinitions%2Forganization-app%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "owner": { + "$ref": "#/definitions/organization/definitions/name" + } + }, + "required": [ + "owner" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/organization-app" + }, + "title": "Transfer to Organization" + } + ], + "properties": { + "archived_at": { + "$ref": "#/definitions/app/definitions/archived_at" + }, + "buildpack_provided_description": { + "$ref": "#/definitions/app/definitions/buildpack_provided_description" + }, + "created_at": { + "$ref": "#/definitions/app/definitions/created_at" + }, + "git_url": { + "$ref": "#/definitions/app/definitions/git_url" + }, + "id": { + "$ref": "#/definitions/app/definitions/id" + }, + "joined": { + "$ref": "#/definitions/organization-app/definitions/joined" + }, + "locked": { + "$ref": "#/definitions/organization-app/definitions/locked" + }, + "maintenance": { + "$ref": "#/definitions/app/definitions/maintenance" + }, + "name": { + "$ref": "#/definitions/app/definitions/name" + }, + "organization": { + "description": "organization that owns this app", + "properties": { + "name": { + "$ref": "#/definitions/organization/definitions/name" + } + }, + "type": [ + "null", + "object" + ] + }, + "owner": { + "description": "identity of app owner", + "properties": { + "email": { + "$ref": "#/definitions/account/definitions/email" + }, + "id": { + "$ref": "#/definitions/account/definitions/id" + } + }, + "type": [ + "null", + "object" + ] + }, + "region": { + "description": "identity of app region", + "properties": { + "id": { + "$ref": "#/definitions/region/definitions/id" + }, + "name": { + "$ref": "#/definitions/region/definitions/name" + } + }, + "type": [ + "object" + ] + }, + "released_at": { + "$ref": "#/definitions/app/definitions/released_at" + }, + "repo_size": { + "$ref": "#/definitions/app/definitions/repo_size" + }, + "slug_size": { + "$ref": "#/definitions/app/definitions/slug_size" + }, + "stack": { + "description": "identity of app stack", + "properties": { + "id": { + "$ref": "#/definitions/stack/definitions/id" + }, + "name": { + "$ref": "#/definitions/stack/definitions/name" + } + }, + "type": [ + "object" + ] + }, + "updated_at": { + "$ref": "#/definitions/app/definitions/updated_at" + }, + "web_url": { + "$ref": "#/definitions/app/definitions/web_url" + } + } + }, + "organization-member": { + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "description": "An organization member is an individual with access to an organization.", + "stability": "prototype", + "strictProperties": true, + "title": "Heroku Platform API - Organization Member", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when organization-member was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "email": { + "description": "email address of the organization member", + "example": "someone@example.org", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/organization-member/definitions/email" + } + ] + }, + "updated_at": { + "description": "when organization-member was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new organization member, or update their role.", + "href": "/organizations/{(%23%2Fdefinitions%2Forganization%2Fdefinitions%2Fidentity)}/members", + "method": "PUT", + "rel": "create", + "schema": { + "properties": { + "email": { + "$ref": "#/definitions/organization-member/definitions/email" + }, + "role": { + "$ref": "#/definitions/organization/definitions/role" + } + }, + "required": [ + "email", + "role" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/organization-member" + }, + "title": "Create or Update" + }, + { + "description": "Remove a member from the organization.", + "href": "/organizations/{(%23%2Fdefinitions%2Forganization%2Fdefinitions%2Fidentity)}/members/{(%23%2Fdefinitions%2Forganization-member%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/organization-member" + }, + "title": "Delete" + }, + { + "description": "List members of the organization.", + "href": "/organizations/{(%23%2Fdefinitions%2Forganization%2Fdefinitions%2Fidentity)}/members", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/organization-member" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/organization-member/definitions/created_at" + }, + "email": { + "$ref": "#/definitions/organization-member/definitions/email" + }, + "role": { + "$ref": "#/definitions/organization/definitions/role" + }, + "updated_at": { + "$ref": "#/definitions/organization-member/definitions/updated_at" + } + } + }, + "organization": { + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "description": "Organizations allow you to manage access to a shared group of applications across your development team.", + "stability": "prototype", + "strictProperties": true, + "title": "Heroku Platform API - Organization", + "type": [ + "object" + ], + "definitions": { + "credit_card_collections": { + "description": "whether charges incurred by the org are paid by credit card.", + "example": "true", + "readOnly": true, + "type": [ + "boolean" + ] + }, + "default": { + "description": "whether to use this organization when none is specified", + "example": "true", + "readOnly": false, + "type": [ + "boolean" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/organization/definitions/name" + } + ] + }, + "name": { + "description": "unique name of organization", + "example": "example", + "readOnly": true, + "type": [ + "string" + ] + }, + "provisioned_licenses": { + "description": "whether the org is provisioned licenses by salesforce.", + "example": "true", + "readOnly": true, + "type": [ + "boolean" + ] + }, + "role": { + "description": "role in the organization", + "enum": [ + "admin", + "member", + "collaborator" + ], + "example": "admin", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "List organizations in which you are a member.", + "href": "/organizations", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/organization" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Set or unset the organization as your default organization.", + "href": "/organizations/{(%23%2Fdefinitions%2Forganization%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "default": { + "$ref": "#/definitions/organization/definitions/default" + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/organization" + }, + "title": "Update" + } + ], + "properties": { + "credit_card_collections": { + "$ref": "#/definitions/organization/definitions/credit_card_collections" + }, + "default": { + "$ref": "#/definitions/organization/definitions/default" + }, + "name": { + "$ref": "#/definitions/organization/definitions/name" + }, + "provisioned_licenses": { + "$ref": "#/definitions/organization/definitions/provisioned_licenses" + }, + "role": { + "$ref": "#/definitions/organization/definitions/role" + } + } + }, + "plan": { + "description": "Plans represent different configurations of add-ons that may be added to apps. Endpoints under add-on services can be accessed without authentication.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Plan", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when plan was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "default": { + "description": "whether this plan is the default for its addon service", + "example": false, + "readOnly": true, + "type": [ + "boolean" + ] + }, + "description": { + "description": "description of plan", + "example": "Heroku Postgres Dev", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this plan", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/plan/definitions/id" + }, + { + "$ref": "#/definitions/plan/definitions/name" + } + ] + }, + "name": { + "description": "unique name of this plan", + "example": "heroku-postgresql:dev", + "readOnly": true, + "type": [ + "string" + ] + }, + "cents": { + "description": "price in cents per unit of plan", + "example": 0, + "readOnly": true, + "type": [ + "integer" + ] + }, + "unit": { + "description": "unit of price for plan", + "example": "month", + "readOnly": true, + "type": [ + "string" + ] + }, + "state": { + "description": "release status for plan", + "example": "public", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when plan was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Info for existing plan.", + "href": "/addon-services/{(%23%2Fdefinitions%2Faddon-service%2Fdefinitions%2Fidentity)}/plans/{(%23%2Fdefinitions%2Fplan%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/plan" + }, + "title": "Info" + }, + { + "description": "List existing plans.", + "href": "/addon-services/{(%23%2Fdefinitions%2Faddon-service%2Fdefinitions%2Fidentity)}/plans", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/plan" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/plan/definitions/created_at" + }, + "default": { + "$ref": "#/definitions/plan/definitions/default" + }, + "description": { + "$ref": "#/definitions/plan/definitions/description" + }, + "id": { + "$ref": "#/definitions/plan/definitions/id" + }, + "name": { + "$ref": "#/definitions/plan/definitions/name" + }, + "price": { + "description": "price", + "properties": { + "cents": { + "$ref": "#/definitions/plan/definitions/cents" + }, + "unit": { + "$ref": "#/definitions/plan/definitions/unit" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "state": { + "$ref": "#/definitions/plan/definitions/state" + }, + "updated_at": { + "$ref": "#/definitions/plan/definitions/updated_at" + } + } + }, + "rate-limit": { + "description": "Rate Limit represents the number of request tokens each account holds. Requests to this endpoint do not count towards the rate limit.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Rate Limit", + "type": [ + "object" + ], + "definitions": { + "identity": { + }, + "remaining": { + "description": "allowed requests remaining in current interval", + "example": 2399, + "readOnly": true, + "type": [ + "integer" + ] + } + }, + "links": [ + { + "description": "Info for rate limits.", + "href": "/account/rate-limits", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/rate-limit" + }, + "title": "Info" + } + ], + "properties": { + "remaining": { + "$ref": "#/definitions/rate-limit/definitions/remaining" + } + } + }, + "region": { + "description": "A region represents a geographic location in which your application may run.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Region", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when region was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "description": { + "description": "description of region", + "example": "United States", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of region", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/region/definitions/id" + }, + { + "$ref": "#/definitions/region/definitions/name" + } + ] + }, + "name": { + "description": "unique name of region", + "example": "us", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when region was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Info for existing region.", + "href": "/regions/{(%23%2Fdefinitions%2Fregion%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/region" + }, + "title": "Info" + }, + { + "description": "List existing regions.", + "href": "/regions", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/region" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/region/definitions/created_at" + }, + "description": { + "$ref": "#/definitions/region/definitions/description" + }, + "id": { + "$ref": "#/definitions/region/definitions/id" + }, + "name": { + "$ref": "#/definitions/region/definitions/name" + }, + "updated_at": { + "$ref": "#/definitions/region/definitions/updated_at" + } + } + }, + "release": { + "description": "A release represents a combination of code, config vars and add-ons for an app on Heroku.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Release", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when release was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "description": { + "description": "description of changes in this release", + "example": "Added new feature", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of release", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/release/definitions/id" + }, + { + "$ref": "#/definitions/release/definitions/version" + } + ] + }, + "updated_at": { + "description": "when release was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "version": { + "description": "unique version assigned to the release", + "example": 11, + "readOnly": true, + "type": [ + "integer" + ] + } + }, + "links": [ + { + "description": "Info for existing release.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/releases/{(%23%2Fdefinitions%2Frelease%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/release" + }, + "title": "Info" + }, + { + "description": "List existing releases.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/releases", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/release" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Create new release. The API cannot be used to create releases on Bamboo apps.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/releases", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "description": { + "$ref": "#/definitions/release/definitions/description" + }, + "slug": { + "$ref": "#/definitions/slug/definitions/identity" + } + }, + "required": [ + "slug" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/release" + }, + "title": "Create" + }, + { + "description": "Rollback to an existing release.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/releases", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "release": { + "$ref": "#/definitions/release/definitions/id" + } + }, + "required": [ + "release" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/release" + }, + "title": "Rollback" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/release/definitions/created_at" + }, + "description": { + "$ref": "#/definitions/release/definitions/description" + }, + "id": { + "$ref": "#/definitions/release/definitions/id" + }, + "updated_at": { + "$ref": "#/definitions/release/definitions/updated_at" + }, + "slug": { + "description": "slug running in this release", + "properties": { + "id": { + "$ref": "#/definitions/slug/definitions/id" + } + }, + "strictProperties": true, + "type": [ + "object", + "null" + ] + }, + "user": { + "description": "user that created the release", + "properties": { + "id": { + "$ref": "#/definitions/account/definitions/id" + }, + "email": { + "$ref": "#/definitions/account/definitions/email" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "version": { + "$ref": "#/definitions/release/definitions/version" + } + } + }, + "slug": { + "description": "A slug is a snapshot of your application code that is ready to run on the platform.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Slug", + "type": [ + "object" + ], + "definitions": { + "buildpack_provided_description": { + "description": "description from buildpack of slug", + "example": "Ruby/Rack", + "readOnly": false, + "type": [ + "null", + "string" + ] + }, + "commit": { + "description": "identification of the code with your version control system (eg: SHA of the git HEAD)", + "example": "60883d9e8947a57e04dc9124f25df004866a2051", + "readOnly": false, + "type": [ + "null", + "string" + ] + }, + "created_at": { + "description": "when slug was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of slug", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/slug/definitions/id" + } + ] + }, + "method": { + "description": "method to be used to interact with the slug blob", + "example": "GET", + "readOnly": true, + "type": [ + "string" + ] + }, + "process_types": { + "additionalProperties": false, + "description": "hash mapping process type names to their respective command", + "example": { + "web": "./bin/web -p $PORT" + }, + "patternProperties": { + "^\\w+$": { + "type": [ + "string" + ] + } + }, + "readOnly": false, + "type": [ + "object" + ] + }, + "size": { + "default": null, + "description": "size of slug, in bytes", + "example": 2048, + "readOnly": true, + "type": [ + "integer", + "null" + ] + }, + "updated_at": { + "description": "when slug was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "url": { + "description": "URL to interact with the slug blob", + "example": "https://api.heroku.com/slugs/1234.tgz", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Info for existing slug.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/slugs/{(%23%2Fdefinitions%2Fslug%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/slug" + }, + "title": "Info" + }, + { + "description": "Create a new slug. For more information please refer to [Deploying Slugs using the Platform API](https://devcenter.heroku.com/articles/platform-api-deploying-slugs?preview=1).", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/slugs", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "buildpack_provided_description": { + "$ref": "#/definitions/slug/definitions/buildpack_provided_description" + }, + "commit": { + "$ref": "#/definitions/slug/definitions/commit" + }, + "process_types": { + "$ref": "#/definitions/slug/definitions/process_types" + } + }, + "required": [ + "process_types" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/slug" + }, + "title": "Create" + } + ], + "properties": { + "blob": { + "description": "pointer to the url where clients can fetch or store the actual release binary", + "properties": { + "method": { + "$ref": "#/definitions/slug/definitions/method" + }, + "url": { + "$ref": "#/definitions/slug/definitions/url" + } + }, + "strictProperties": true, + "type": [ + "object" + ] + }, + "buildpack_provided_description": { + "$ref": "#/definitions/slug/definitions/buildpack_provided_description" + }, + "commit": { + "$ref": "#/definitions/slug/definitions/commit" + }, + "created_at": { + "$ref": "#/definitions/slug/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/slug/definitions/id" + }, + "process_types": { + "$ref": "#/definitions/slug/definitions/process_types" + }, + "size": { + "$ref": "#/definitions/slug/definitions/size" + }, + "updated_at": { + "$ref": "#/definitions/slug/definitions/updated_at" + } + } + }, + "ssl-endpoint": { + "description": "[SSL Endpoint](https://devcenter.heroku.com/articles/ssl-endpoint) is a public address serving custom SSL cert for HTTPS traffic to a Heroku app. Note that an app must have the `ssl:endpoint` addon installed before it can provision an SSL Endpoint using these APIs.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "title": "Heroku Platform API - SSL Endpoint", + "stability": "production", + "strictProperties": true, + "type": [ + "object" + ], + "definitions": { + "certificate_chain": { + "description": "raw contents of the public certificate chain (eg: .crt or .pem file)", + "example": "-----BEGIN CERTIFICATE----- ...", + "readOnly": false, + "type": [ + "string" + ] + }, + "cname": { + "description": "canonical name record, the address to point a domain at", + "example": "example.herokussl.com", + "readOnly": false, + "type": [ + "string" + ] + }, + "created_at": { + "description": "when endpoint was created", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of this SSL endpoint", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/ssl-endpoint/definitions/id" + }, + { + "$ref": "#/definitions/ssl-endpoint/definitions/name" + } + ] + }, + "name": { + "description": "unique name for SSL endpoint", + "example": "example", + "pattern": "^[a-z][a-z0-9-]{3,30}$", + "readOnly": true, + "type": [ + "string" + ] + }, + "preprocess": { + "default": true, + "description": "allow Heroku to modify an uploaded public certificate chain if deemed advantageous by adding missing intermediaries, stripping unnecessary ones, etc.", + "example": true, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "private_key": { + "description": "contents of the private key (eg .key file)", + "example": "-----BEGIN RSA PRIVATE KEY----- ...", + "readOnly": false, + "type": [ + "string" + ] + }, + "rollback": { + "default": false, + "description": "indicates that a rollback should be performed", + "example": false, + "readOnly": false, + "type": [ + "boolean" + ] + }, + "updated_at": { + "description": "when endpoint was updated", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Create a new SSL endpoint.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/ssl-endpoints", + "method": "POST", + "rel": "create", + "schema": { + "properties": { + "certificate_chain": { + "$ref": "#/definitions/ssl-endpoint/definitions/certificate_chain" + }, + "preprocess": { + "$ref": "#/definitions/ssl-endpoint/definitions/preprocess" + }, + "private_key": { + "$ref": "#/definitions/ssl-endpoint/definitions/private_key" + } + }, + "required": [ + "certificate_chain", + "private_key" + ], + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/ssl-endpoint" + }, + "title": "Create" + }, + { + "description": "Delete existing SSL endpoint.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/ssl-endpoints/{(%23%2Fdefinitions%2Fssl-endpoint%2Fdefinitions%2Fidentity)}", + "method": "DELETE", + "rel": "destroy", + "targetSchema": { + "$ref": "#/definitions/ssl-endpoint" + }, + "title": "Delete" + }, + { + "description": "Info for existing SSL endpoint.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/ssl-endpoints/{(%23%2Fdefinitions%2Fssl-endpoint%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/ssl-endpoint" + }, + "title": "Info" + }, + { + "description": "List existing SSL endpoints.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/ssl-endpoints", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/ssl-endpoint" + }, + "type": [ + "array" + ] + }, + "title": "List" + }, + { + "description": "Update an existing SSL endpoint.", + "href": "/apps/{(%23%2Fdefinitions%2Fapp%2Fdefinitions%2Fidentity)}/ssl-endpoints/{(%23%2Fdefinitions%2Fssl-endpoint%2Fdefinitions%2Fidentity)}", + "method": "PATCH", + "rel": "update", + "schema": { + "properties": { + "certificate_chain": { + "$ref": "#/definitions/ssl-endpoint/definitions/certificate_chain" + }, + "preprocess": { + "$ref": "#/definitions/ssl-endpoint/definitions/preprocess" + }, + "private_key": { + "$ref": "#/definitions/ssl-endpoint/definitions/private_key" + }, + "rollback": { + "$ref": "#/definitions/ssl-endpoint/definitions/rollback" + } + }, + "type": [ + "object" + ] + }, + "targetSchema": { + "$ref": "#/definitions/ssl-endpoint" + }, + "title": "Update" + } + ], + "properties": { + "certificate_chain": { + "$ref": "#/definitions/ssl-endpoint/definitions/certificate_chain" + }, + "cname": { + "$ref": "#/definitions/ssl-endpoint/definitions/cname" + }, + "created_at": { + "$ref": "#/definitions/ssl-endpoint/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/ssl-endpoint/definitions/id" + }, + "name": { + "$ref": "#/definitions/ssl-endpoint/definitions/name" + }, + "updated_at": { + "$ref": "#/definitions/ssl-endpoint/definitions/updated_at" + } + } + }, + "stack": { + "description": "Stacks are the different application execution environments available in the Heroku platform.", + "$schema": "http://json-schema.org/draft-04/hyper-schema", + "stability": "production", + "strictProperties": true, + "title": "Heroku Platform API - Stack", + "type": [ + "object" + ], + "definitions": { + "created_at": { + "description": "when stack was introduced", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + }, + "id": { + "description": "unique identifier of stack", + "example": "01234567-89ab-cdef-0123-456789abcdef", + "format": "uuid", + "readOnly": true, + "type": [ + "string" + ] + }, + "identity": { + "anyOf": [ + { + "$ref": "#/definitions/stack/definitions/name" + }, + { + "$ref": "#/definitions/stack/definitions/id" + } + ] + }, + "name": { + "description": "unique name of stack", + "example": "cedar", + "readOnly": true, + "type": [ + "string" + ] + }, + "state": { + "description": "availability of this stack: beta, deprecated or public", + "example": "public", + "readOnly": true, + "type": [ + "string" + ] + }, + "updated_at": { + "description": "when stack was last modified", + "example": "2012-01-01T12:00:00Z", + "format": "date-time", + "readOnly": true, + "type": [ + "string" + ] + } + }, + "links": [ + { + "description": "Stack info.", + "href": "/stacks/{(%23%2Fdefinitions%2Fstack%2Fdefinitions%2Fidentity)}", + "method": "GET", + "rel": "self", + "targetSchema": { + "$ref": "#/definitions/stack" + }, + "title": "Info" + }, + { + "description": "List available stacks.", + "href": "/stacks", + "method": "GET", + "rel": "instances", + "targetSchema": { + "items": { + "$ref": "#/definitions/stack" + }, + "type": [ + "array" + ] + }, + "title": "List" + } + ], + "properties": { + "created_at": { + "$ref": "#/definitions/stack/definitions/created_at" + }, + "id": { + "$ref": "#/definitions/stack/definitions/id" + }, + "name": { + "$ref": "#/definitions/stack/definitions/name" + }, + "state": { + "$ref": "#/definitions/stack/definitions/state" + }, + "updated_at": { + "$ref": "#/definitions/stack/definitions/updated_at" + } + } + } + }, + "properties": { + "account-feature": { + "$ref": "#/definitions/account-feature" + }, + "account": { + "$ref": "#/definitions/account" + }, + "addon-service": { + "$ref": "#/definitions/addon-service" + }, + "addon": { + "$ref": "#/definitions/addon" + }, + "app-feature": { + "$ref": "#/definitions/app-feature" + }, + "app-setup": { + "$ref": "#/definitions/app-setup" + }, + "app-transfer": { + "$ref": "#/definitions/app-transfer" + }, + "app": { + "$ref": "#/definitions/app" + }, + "build-result": { + "$ref": "#/definitions/build-result" + }, + "build": { + "$ref": "#/definitions/build" + }, + "collaborator": { + "$ref": "#/definitions/collaborator" + }, + "config-var": { + "$ref": "#/definitions/config-var" + }, + "credit": { + "$ref": "#/definitions/credit" + }, + "domain": { + "$ref": "#/definitions/domain" + }, + "dyno": { + "$ref": "#/definitions/dyno" + }, + "formation": { + "$ref": "#/definitions/formation" + }, + "key": { + "$ref": "#/definitions/key" + }, + "log-drain": { + "$ref": "#/definitions/log-drain" + }, + "log-session": { + "$ref": "#/definitions/log-session" + }, + "oauth-authorization": { + "$ref": "#/definitions/oauth-authorization" + }, + "oauth-client": { + "$ref": "#/definitions/oauth-client" + }, + "oauth-grant": { + "$ref": "#/definitions/oauth-grant" + }, + "oauth-token": { + "$ref": "#/definitions/oauth-token" + }, + "organization-app-collaborator": { + "$ref": "#/definitions/organization-app-collaborator" + }, + "organization-app": { + "$ref": "#/definitions/organization-app" + }, + "organization-member": { + "$ref": "#/definitions/organization-member" + }, + "organization": { + "$ref": "#/definitions/organization" + }, + "plan": { + "$ref": "#/definitions/plan" + }, + "rate-limit": { + "$ref": "#/definitions/rate-limit" + }, + "region": { + "$ref": "#/definitions/region" + }, + "release": { + "$ref": "#/definitions/release" + }, + "slug": { + "$ref": "#/definitions/slug" + }, + "ssl-endpoint": { + "$ref": "#/definitions/ssl-endpoint" + }, + "stack": { + "$ref": "#/definitions/stack" + } + }, + "type": [ + "object" + ], + "description": "The platform API empowers developers to automate, extend and combine Heroku with other services.", + "id": "http://api.heroku.com/schema#", + "links": [ + { + "href": "https://api.heroku.com", + "rel": "self" + }, + { + "href": "/schema", + "method": "GET", + "rel": "self", + "targetSchema": { + "additionalProperties": true + } + } + ], + "title": "Heroku Platform API" +} diff --git a/vendor/github.com/cyberdelia/heroku-go/v3/transport.go b/vendor/github.com/cyberdelia/heroku-go/v3/transport.go new file mode 100644 index 000000000..f1f53c322 --- /dev/null +++ b/vendor/github.com/cyberdelia/heroku-go/v3/transport.go @@ -0,0 +1,152 @@ +package heroku + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "net/http/httputil" + "os" + "strings" + + "github.com/pborman/uuid" +) + +var DefaultTransport = &Transport{} + +var DefaultClient = &http.Client{ + Transport: DefaultTransport, +} + +type Transport struct { + // Username is the HTTP basic auth username for API calls made by this Client. + Username string + + // Password is the HTTP basic auth password for API calls made by this Client. + Password string + + // UserAgent to be provided in API requests. Set to DefaultUserAgent if not + // specified. + UserAgent string + + // Debug mode can be used to dump the full request and response to stdout. + Debug bool + + // AdditionalHeaders are extra headers to add to each HTTP request sent by + // this Client. + AdditionalHeaders http.Header + + // Transport is the HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper +} + +// Forward CancelRequest to underlying Transport +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + tr, ok := t.Transport.(canceler) + if !ok { + log.Printf("heroku: Client Transport of type %T doesn't support CancelRequest; Timeout not supported\n", t.Transport) + return + } + tr.CancelRequest(req) +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Transport == nil { + t.Transport = http.DefaultTransport + } + + // Making a copy of the Request so that + // we don't modify the Request we were given. + req = cloneRequest(req) + + if t.UserAgent != "" { + req.Header.Set("User-Agent", t.UserAgent) + } + + req.Header.Set("Accept", "application/vnd.heroku+json; version=3") + req.Header.Set("Request-Id", uuid.New()) + req.SetBasicAuth(t.Username, t.Password) + for k, v := range t.AdditionalHeaders { + req.Header[k] = v + } + + if t.Debug { + dump, err := httputil.DumpRequestOut(req, true) + if err != nil { + log.Println(err) + } else { + os.Stderr.Write(dump) + os.Stderr.Write([]byte{'\n', '\n'}) + } + } + + resp, err := t.Transport.RoundTrip(req) + if err != nil { + if resp != nil { + resp.Body.Close() + } + return nil, err + } + + if t.Debug { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + log.Println(err) + } else { + os.Stderr.Write(dump) + os.Stderr.Write([]byte{'\n'}) + } + } + + if err = checkResponse(resp); err != nil { + if resp != nil { + resp.Body.Close() + } + return nil, err + } + + return resp, nil +} + +type Error struct { + error + ID string + URL string +} + +func checkResponse(resp *http.Response) error { + if resp.StatusCode/100 != 2 { // 200, 201, 202, etc + var e struct { + Message string + ID string + URL string `json:"url"` + } + err := json.NewDecoder(resp.Body).Decode(&e) + if err != nil { + return fmt.Errorf("encountered an error : %s", resp.Status) + } + return Error{error: errors.New(e.Message), ID: e.ID, URL: e.URL} + } + if msg := resp.Header.Get("X-Heroku-Warning"); msg != "" { + log.Println(strings.TrimSpace(msg)) + } + return nil +} + +// cloneRequest returns a clone of the provided *http.Request. +func cloneRequest(req *http.Request) *http.Request { + // shallow copy of the struct + clone := new(http.Request) + *clone = *req + // deep copy of the Header + clone.Header = make(http.Header) + for k, s := range req.Header { + clone.Header[k] = s + } + return clone +} diff --git a/vendor/github.com/digitalocean/godo/.travis.yml b/vendor/github.com/digitalocean/godo/.travis.yml new file mode 100644 index 000000000..245a2f517 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.3 + - 1.4 + - tip diff --git a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md new file mode 100644 index 000000000..f27200a7a --- /dev/null +++ b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing + +If you submit a pull request, please keep the following guidelines in mind: + +1. Code should be `go fmt` compliant. +2. Types, structs and funcs should be documented. +3. Tests pass. + +## Getting set up + +Assuming your `$GOPATH` is set up according to your desires, run: + +```sh +go get github.com/digitalocean/godo +``` + +## Running tests + +When working on code in this repository, tests can be run via: + +```sh +go test . +``` diff --git a/vendor/github.com/digitalocean/godo/LICENSE.txt b/vendor/github.com/digitalocean/godo/LICENSE.txt new file mode 100644 index 000000000..43c5d2eee --- /dev/null +++ b/vendor/github.com/digitalocean/godo/LICENSE.txt @@ -0,0 +1,55 @@ +Copyright (c) 2014-2016 The godo AUTHORS. All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +====================== +Portions of the client are based on code at: +https://github.com/google/go-github/ + +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/digitalocean/godo/README.md b/vendor/github.com/digitalocean/godo/README.md new file mode 100644 index 000000000..03d0c0768 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/README.md @@ -0,0 +1,136 @@ +[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo) + +# Godo + +Godo is a Go client library for accessing the DigitalOcean V2 API. + +You can view the client API docs here: [http://godoc.org/github.com/digitalocean/godo](http://godoc.org/github.com/digitalocean/godo) + +You can view DigitalOcean API docs here: [https://developers.digitalocean.com/documentation/v2/](https://developers.digitalocean.com/documentation/v2/) + + +## Usage + +```go +import "github.com/digitalocean/godo" +``` + +Create a new DigitalOcean client, then use the exposed services to +access different parts of the DigitalOcean API. + +### Authentication + +Currently, Personal Access Token (PAT) is the only method of +authenticating with the API. You can manage your tokens +at the DigitalOcean Control Panel [Applications Page](https://cloud.digitalocean.com/settings/applications). + +You can then use your token to create a new client: + +```go +import "golang.org/x/oauth2" + +pat := "mytoken" +type TokenSource struct { + AccessToken string +} + +func (t *TokenSource) Token() (*oauth2.Token, error) { + token := &oauth2.Token{ + AccessToken: t.AccessToken, + } + return token, nil +} + +tokenSource := &TokenSource{ + AccessToken: pat, +} +oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) +client := godo.NewClient(oauthClient) +``` + +## Examples + + +To create a new Droplet: + +```go +dropletName := "super-cool-droplet" + +createRequest := &godo.DropletCreateRequest{ + Name: dropletName, + Region: "nyc3", + Size: "512mb", + Image: godo.DropletCreateImage{ + Slug: "ubuntu-14-04-x64", + }, +} + +newDroplet, _, err := client.Droplets.Create(createRequest) + +if err != nil { + fmt.Printf("Something bad happened: %s\n\n", err) + return err +} +``` + +### Pagination + +If a list of items is paginated by the API, you must request pages individually. For example, to fetch all Droplets: + +```go +func DropletList(client *godo.Client) ([]godo.Droplet, error) { + // create a list to hold our droplets + list := []godo.Droplet{} + + // create options. initially, these will be blank + opt := &godo.ListOptions{} + for { + droplets, resp, err := client.Droplets.List(opt) + if err != nil { + return nil, err + } + + // append the current page's droplets to our list + for _, d := range droplets { + list = append(list, d) + } + + // if we are at the last page, break out the for loop + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + + page, err := resp.Links.CurrentPage() + if err != nil { + return nil, err + } + + // set the page we want for the next request + opt.Page = page + 1 + } + + return list, nil +} +``` + +## Versioning + +Each version of the client is tagged and the version is updated accordingly. + +Since Go does not have a built-in versioning, a package management tool is +recommended - a good one that works with git tags is +[gopkg.in](http://labix.org/gopkg.in). + +To see the list of past versions, run `git tag`. + + +## Documentation + +For a comprehensive list of examples, check out the [API documentation](https://developers.digitalocean.com/documentation/v2/). + +For details on all the functionality in this library, see the [GoDoc](http://godoc.org/github.com/digitalocean/godo) documentation. + + +## Contributing + +We love pull requests! Please see the [contribution guidelines](CONTRIBUTING.md). diff --git a/vendor/github.com/digitalocean/godo/account.go b/vendor/github.com/digitalocean/godo/account.go new file mode 100644 index 000000000..3c2a1c3af --- /dev/null +++ b/vendor/github.com/digitalocean/godo/account.go @@ -0,0 +1,53 @@ +package godo + +// AccountService is an interface for interfacing with the Account +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2/#account +type AccountService interface { + Get() (*Account, *Response, error) +} + +// AccountServiceOp handles communication with the Account related methods of +// the DigitalOcean API. +type AccountServiceOp struct { + client *Client +} + +var _ AccountService = &AccountServiceOp{} + +// Account represents a DigitalOcean Account +type Account struct { + DropletLimit int `json:"droplet_limit,omitempty"` + FloatingIPLimit int `json:"floating_ip_limit,omitempty"` + Email string `json:"email,omitempty"` + UUID string `json:"uuid,omitempty"` + EmailVerified bool `json:"email_verified,omitempty"` + Status string `json:"status,omitempty"` + StatusMessage string `json:"status_message,omitempty"` +} + +type accountRoot struct { + Account *Account `json:"account"` +} + +func (r Account) String() string { + return Stringify(r) +} + +// Get DigitalOcean account info +func (s *AccountServiceOp) Get() (*Account, *Response, error) { + path := "v2/account" + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(accountRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Account, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/account_test.go b/vendor/github.com/digitalocean/godo/account_test.go new file mode 100644 index 000000000..53e86ac1b --- /dev/null +++ b/vendor/github.com/digitalocean/godo/account_test.go @@ -0,0 +1,59 @@ +package godo + +import ( + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestAccountGet(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/account", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + response := ` + { "account": { + "droplet_limit": 25, + "floating_ip_limit": 25, + "email": "sammy@digitalocean.com", + "uuid": "b6fr89dbf6d9156cace5f3c78dc9851d957381ef", + "email_verified": true + } + }` + + fmt.Fprint(w, response) + }) + + acct, _, err := client.Account.Get() + if err != nil { + t.Errorf("Account.Get returned error: %v", err) + } + + expected := &Account{DropletLimit: 25, FloatingIPLimit: 25, Email: "sammy@digitalocean.com", + UUID: "b6fr89dbf6d9156cace5f3c78dc9851d957381ef", EmailVerified: true} + if !reflect.DeepEqual(acct, expected) { + t.Errorf("Account.Get returned %+v, expected %+v", acct, expected) + } +} + +func TestAccountString(t *testing.T) { + acct := &Account{ + DropletLimit: 25, + FloatingIPLimit: 25, + Email: "sammy@digitalocean.com", + UUID: "b6fr89dbf6d9156cace5f3c78dc9851d957381ef", + EmailVerified: true, + Status: "active", + StatusMessage: "message", + } + + stringified := acct.String() + expected := `godo.Account{DropletLimit:25, FloatingIPLimit:25, Email:"sammy@digitalocean.com", UUID:"b6fr89dbf6d9156cace5f3c78dc9851d957381ef", EmailVerified:true, Status:"active", StatusMessage:"message"}` + if expected != stringified { + t.Errorf("Account.String returned %+v, expected %+v", stringified, expected) + } + +} diff --git a/vendor/github.com/digitalocean/godo/action.go b/vendor/github.com/digitalocean/godo/action.go new file mode 100644 index 000000000..e19493b92 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/action.go @@ -0,0 +1,100 @@ +package godo + +import "fmt" + +const ( + actionsBasePath = "v2/actions" + + // ActionInProgress is an in progress action status + ActionInProgress = "in-progress" + + //ActionCompleted is a completed action status + ActionCompleted = "completed" +) + +// ActionsService handles communction with action related methods of the +// DigitalOcean API: https://developers.digitalocean.com/documentation/v2#actions +type ActionsService interface { + List(*ListOptions) ([]Action, *Response, error) + Get(int) (*Action, *Response, error) +} + +// ActionsServiceOp handles communition with the image action related methods of the +// DigitalOcean API. +type ActionsServiceOp struct { + client *Client +} + +var _ ActionsService = &ActionsServiceOp{} + +type actionsRoot struct { + Actions []Action `json:"actions"` + Links *Links `json:"links"` +} + +type actionRoot struct { + Event Action `json:"action"` +} + +// Action represents a DigitalOcean Action +type Action struct { + ID int `json:"id"` + Status string `json:"status"` + Type string `json:"type"` + StartedAt *Timestamp `json:"started_at"` + CompletedAt *Timestamp `json:"completed_at"` + ResourceID int `json:"resource_id"` + ResourceType string `json:"resource_type"` + Region *Region `json:"region,omitempty"` + RegionSlug string `json:"region_slug,omitempty"` +} + +// List all actions +func (s *ActionsServiceOp) List(opt *ListOptions) ([]Action, *Response, error) { + path := actionsBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Actions, resp, err +} + +// Get an action by ID. +func (s *ActionsServiceOp) Get(id int) (*Action, *Response, error) { + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", actionsBasePath, id) + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} + +func (a Action) String() string { + return Stringify(a) +} diff --git a/vendor/github.com/digitalocean/godo/action_test.go b/vendor/github.com/digitalocean/godo/action_test.go new file mode 100644 index 000000000..9c63da03e --- /dev/null +++ b/vendor/github.com/digitalocean/godo/action_test.go @@ -0,0 +1,136 @@ +package godo + +import ( + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestAction_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/actions", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, `{"actions": [{"id":1},{"id":2}]}`) + testMethod(t, r, "GET") + }) + + actions, _, err := client.Actions.List(nil) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + expected := []Action{{ID: 1}, {ID: 2}} + if len(actions) != len(expected) || actions[0].ID != expected[0].ID || actions[1].ID != expected[1].ID { + t.Fatalf("unexpected response") + } +} + +func TestAction_ListActionMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/actions", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, `{"actions": [{"id":1},{"id":2}], "links":{"pages":{"next":"http://example.com/v2/droplets/?page=2"}}}`) + testMethod(t, r, "GET") + }) + + _, resp, err := client.Actions.List(nil) + if err != nil { + t.Fatal(nil) + } + + checkCurrentPage(t, resp, 1) +} + +func TestAction_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "actions": [{"id":1},{"id":2}], + "links":{ + "pages":{ + "next":"http://example.com/v2/actions/?page=3", + "prev":"http://example.com/v2/actions/?page=1", + "last":"http://example.com/v2/actions/?page=3", + "first":"http://example.com/v2/actions/?page=1" + } + } + }` + + mux.HandleFunc("/v2/actions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.Actions.List(opt) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 2) +} + +func TestAction_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/actions/12345", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, `{"action": {"id":12345,"region":{"name":"name","slug":"slug","available":true,"sizes":["512mb"],"features":["virtio"]},"region_slug":"slug"}}`) + testMethod(t, r, "GET") + }) + + action, _, err := client.Actions.Get(12345) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if action.ID != 12345 { + t.Fatalf("unexpected response") + } + + region := &Region{ + Name: "name", + Slug: "slug", + Available: true, + Sizes: []string{"512mb"}, + Features: []string{"virtio"}, + } + if !reflect.DeepEqual(action.Region, region) { + t.Fatalf("unexpected response, invalid region") + } + + if action.RegionSlug != "slug" { + t.Fatalf("unexpected response, invalid region slug") + } +} + +func TestAction_String(t *testing.T) { + pt, err := time.Parse(time.RFC3339, "2014-05-08T20:36:47Z") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + startedAt := &Timestamp{ + Time: pt, + } + action := &Action{ + ID: 1, + Status: "in-progress", + Type: "transfer", + StartedAt: startedAt, + } + + stringified := action.String() + expected := `godo.Action{ID:1, Status:"in-progress", Type:"transfer", ` + + `StartedAt:godo.Timestamp{2014-05-08 20:36:47 +0000 UTC}, ` + + `ResourceID:0, ResourceType:"", RegionSlug:""}` + if expected != stringified { + t.Errorf("Action.Stringify returned %+v, expected %+v", stringified, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/doc.go b/vendor/github.com/digitalocean/godo/doc.go new file mode 100644 index 000000000..e660f794a --- /dev/null +++ b/vendor/github.com/digitalocean/godo/doc.go @@ -0,0 +1,2 @@ +// Package godo is the DigtalOcean API v2 client for Go +package godo diff --git a/vendor/github.com/digitalocean/godo/domains.go b/vendor/github.com/digitalocean/godo/domains.go new file mode 100644 index 000000000..31de17868 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/domains.go @@ -0,0 +1,323 @@ +package godo + +import "fmt" + +const domainsBasePath = "v2/domains" + +// DomainsService is an interface for managing DNS with the DigitalOcean API. +// See: https://developers.digitalocean.com/documentation/v2#domains and +// https://developers.digitalocean.com/documentation/v2#domain-records +type DomainsService interface { + List(*ListOptions) ([]Domain, *Response, error) + Get(string) (*Domain, *Response, error) + Create(*DomainCreateRequest) (*Domain, *Response, error) + Delete(string) (*Response, error) + + Records(string, *ListOptions) ([]DomainRecord, *Response, error) + Record(string, int) (*DomainRecord, *Response, error) + DeleteRecord(string, int) (*Response, error) + EditRecord(string, int, *DomainRecordEditRequest) (*DomainRecord, *Response, error) + CreateRecord(string, *DomainRecordEditRequest) (*DomainRecord, *Response, error) +} + +// DomainsServiceOp handles communication with the domain related methods of the +// DigitalOcean API. +type DomainsServiceOp struct { + client *Client +} + +var _ DomainsService = &DomainsServiceOp{} + +// Domain represents a DigitalOcean domain +type Domain struct { + Name string `json:"name"` + TTL int `json:"ttl"` + ZoneFile string `json:"zone_file"` +} + +// domainRoot represents a response from the DigitalOcean API +type domainRoot struct { + Domain *Domain `json:"domain"` +} + +type domainsRoot struct { + Domains []Domain `json:"domains"` + Links *Links `json:"links"` +} + +// DomainCreateRequest respresents a request to create a domain. +type DomainCreateRequest struct { + Name string `json:"name"` + IPAddress string `json:"ip_address"` +} + +// DomainRecordRoot is the root of an individual Domain Record response +type domainRecordRoot struct { + DomainRecord *DomainRecord `json:"domain_record"` +} + +// DomainRecordsRoot is the root of a group of Domain Record responses +type domainRecordsRoot struct { + DomainRecords []DomainRecord `json:"domain_records"` + Links *Links `json:"links"` +} + +// DomainRecord represents a DigitalOcean DomainRecord +type DomainRecord struct { + ID int `json:"id,float64,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Data string `json:"data,omitempty"` + Priority int `json:"priority,omitempty"` + Port int `json:"port,omitempty"` + Weight int `json:"weight,omitempty"` +} + +// DomainRecordEditRequest represents a request to update a domain record. +type DomainRecordEditRequest struct { + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Data string `json:"data,omitempty"` + Priority int `json:"priority,omitempty"` + Port int `json:"port,omitempty"` + Weight int `json:"weight,omitempty"` +} + +func (d Domain) String() string { + return Stringify(d) +} + +// List all domains. +func (s DomainsServiceOp) List(opt *ListOptions) ([]Domain, *Response, error) { + path := domainsBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(domainsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Domains, resp, err +} + +// Get individual domain. It requires a non-empty domain name. +func (s *DomainsServiceOp) Get(name string) (*Domain, *Response, error) { + if len(name) < 1 { + return nil, nil, NewArgError("name", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s", domainsBasePath, name) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(domainRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Domain, resp, err +} + +// Create a new domain +func (s *DomainsServiceOp) Create(createRequest *DomainCreateRequest) (*Domain, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := domainsBasePath + + req, err := s.client.NewRequest("POST", path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(domainRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + return root.Domain, resp, err +} + +// Delete domain +func (s *DomainsServiceOp) Delete(name string) (*Response, error) { + if len(name) < 1 { + return nil, NewArgError("name", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s", domainsBasePath, name) + + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// Converts a DomainRecord to a string. +func (d DomainRecord) String() string { + return Stringify(d) +} + +// Converts a DomainRecordEditRequest to a string. +func (d DomainRecordEditRequest) String() string { + return Stringify(d) +} + +// Records returns a slice of DomainRecords for a domain +func (s *DomainsServiceOp) Records(domain string, opt *ListOptions) ([]DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(domainRecordsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.DomainRecords, resp, err +} + +// Record returns the record id from a domain +func (s *DomainsServiceOp) Record(domain string, id int) (*DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + record := new(domainRecordRoot) + resp, err := s.client.Do(req, record) + if err != nil { + return nil, resp, err + } + + return record.DomainRecord, resp, err +} + +// DeleteRecord deletes a record from a domain identified by id +func (s *DomainsServiceOp) DeleteRecord(domain string, id int) (*Response, error) { + if len(domain) < 1 { + return nil, NewArgError("domain", "cannot be an empty string") + } + + if id < 1 { + return nil, NewArgError("id", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) + + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// EditRecord edits a record using a DomainRecordEditRequest +func (s *DomainsServiceOp) EditRecord( + domain string, + id int, + editRequest *DomainRecordEditRequest, +) (*DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + if editRequest == nil { + return nil, nil, NewArgError("editRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) + + req, err := s.client.NewRequest("PUT", path, editRequest) + if err != nil { + return nil, nil, err + } + + d := new(DomainRecord) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, err +} + +// CreateRecord creates a record using a DomainRecordEditRequest +func (s *DomainsServiceOp) CreateRecord( + domain string, + createRequest *DomainRecordEditRequest) (*DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be empty string") + } + + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain) + req, err := s.client.NewRequest("POST", path, createRequest) + + if err != nil { + return nil, nil, err + } + + d := new(domainRecordRoot) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d.DomainRecord, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/domains_test.go b/vendor/github.com/digitalocean/godo/domains_test.go new file mode 100644 index 000000000..6dbd72478 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/domains_test.go @@ -0,0 +1,340 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestDomains_ListDomains(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"domains": [{"name":"foo.com"},{"name":"bar.com"}]}`) + }) + + domains, _, err := client.Domains.List(nil) + if err != nil { + t.Errorf("Domains.List returned error: %v", err) + } + + expected := []Domain{{Name: "foo.com"}, {Name: "bar.com"}} + if !reflect.DeepEqual(domains, expected) { + t.Errorf("Domains.List returned %+v, expected %+v", domains, expected) + } +} + +func TestDomains_ListDomainsMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"domains": [{"id":1},{"id":2}], "links":{"pages":{"next":"http://example.com/v2/domains/?page=2"}}}`) + }) + + _, resp, err := client.Domains.List(nil) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 1) +} + +func TestDomains_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "domains": [{"id":1},{"id":2}], + "links":{ + "pages":{ + "next":"http://example.com/v2/domains/?page=3", + "prev":"http://example.com/v2/domains/?page=1", + "last":"http://example.com/v2/domains/?page=3", + "first":"http://example.com/v2/domains/?page=1" + } + } + }` + + mux.HandleFunc("/v2/domains", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.Domains.List(opt) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 2) +} + +func TestDomains_GetDomain(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains/example.com", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"domain":{"name":"example.com"}}`) + }) + + domains, _, err := client.Domains.Get("example.com") + if err != nil { + t.Errorf("domain.Get returned error: %v", err) + } + + expected := &Domain{Name: "example.com"} + if !reflect.DeepEqual(domains, expected) { + t.Errorf("domains.Get returned %+v, expected %+v", domains, expected) + } +} + +func TestDomains_Create(t *testing.T) { + setup() + defer teardown() + + createRequest := &DomainCreateRequest{ + Name: "example.com", + IPAddress: "127.0.0.1", + } + + mux.HandleFunc("/v2/domains", func(w http.ResponseWriter, r *http.Request) { + v := new(DomainCreateRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatal(err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, createRequest) { + t.Errorf("Request body = %+v, expected %+v", v, createRequest) + } + + fmt.Fprint(w, `{"domain":{"name":"example.com"}}`) + }) + + domain, _, err := client.Domains.Create(createRequest) + if err != nil { + t.Errorf("Domains.Create returned error: %v", err) + } + + expected := &Domain{Name: "example.com"} + if !reflect.DeepEqual(domain, expected) { + t.Errorf("Domains.Create returned %+v, expected %+v", domain, expected) + } +} + +func TestDomains_Destroy(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains/example.com", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Domains.Delete("example.com") + if err != nil { + t.Errorf("Domains.Delete returned error: %v", err) + } +} + +func TestDomains_AllRecordsForDomainName(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains/example.com/records", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"domain_records":[{"id":1},{"id":2}]}`) + }) + + records, _, err := client.Domains.Records("example.com", nil) + if err != nil { + t.Errorf("Domains.List returned error: %v", err) + } + + expected := []DomainRecord{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(records, expected) { + t.Errorf("Domains.List returned %+v, expected %+v", records, expected) + } +} + +func TestDomains_AllRecordsForDomainName_PerPage(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains/example.com/records", func(w http.ResponseWriter, r *http.Request) { + perPage := r.URL.Query().Get("per_page") + if perPage != "2" { + t.Fatalf("expected '2', got '%s'", perPage) + } + + fmt.Fprint(w, `{"domain_records":[{"id":1},{"id":2}]}`) + }) + + dro := &ListOptions{PerPage: 2} + records, _, err := client.Domains.Records("example.com", dro) + if err != nil { + t.Errorf("Domains.List returned error: %v", err) + } + + expected := []DomainRecord{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(records, expected) { + t.Errorf("Domains.List returned %+v, expected %+v", records, expected) + } +} + +func TestDomains_GetRecordforDomainName(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains/example.com/records/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"domain_record":{"id":1}}`) + }) + + record, _, err := client.Domains.Record("example.com", 1) + if err != nil { + t.Errorf("Domains.GetRecord returned error: %v", err) + } + + expected := &DomainRecord{ID: 1} + if !reflect.DeepEqual(record, expected) { + t.Errorf("Domains.GetRecord returned %+v, expected %+v", record, expected) + } +} + +func TestDomains_DeleteRecordForDomainName(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/domains/example.com/records/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Domains.DeleteRecord("example.com", 1) + if err != nil { + t.Errorf("Domains.RecordDelete returned error: %v", err) + } +} + +func TestDomains_CreateRecordForDomainName(t *testing.T) { + setup() + defer teardown() + + createRequest := &DomainRecordEditRequest{ + Type: "CNAME", + Name: "example", + Data: "@", + Priority: 10, + Port: 10, + Weight: 10, + } + + mux.HandleFunc("/v2/domains/example.com/records", + func(w http.ResponseWriter, r *http.Request) { + v := new(DomainRecordEditRequest) + err := json.NewDecoder(r.Body).Decode(v) + + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, createRequest) { + t.Errorf("Request body = %+v, expected %+v", v, createRequest) + } + + fmt.Fprintf(w, `{"domain_record": {"id":1}}`) + }) + + record, _, err := client.Domains.CreateRecord("example.com", createRequest) + if err != nil { + t.Errorf("Domains.CreateRecord returned error: %v", err) + } + + expected := &DomainRecord{ID: 1} + if !reflect.DeepEqual(record, expected) { + t.Errorf("Domains.CreateRecord returned %+v, expected %+v", record, expected) + } +} + +func TestDomains_EditRecordForDomainName(t *testing.T) { + setup() + defer teardown() + + editRequest := &DomainRecordEditRequest{ + Type: "CNAME", + Name: "example", + Data: "@", + Priority: 10, + Port: 10, + Weight: 10, + } + + mux.HandleFunc("/v2/domains/example.com/records/1", func(w http.ResponseWriter, r *http.Request) { + v := new(DomainRecordEditRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, editRequest) { + t.Errorf("Request body = %+v, expected %+v", v, editRequest) + } + + fmt.Fprintf(w, `{"id":1}`) + }) + + record, _, err := client.Domains.EditRecord("example.com", 1, editRequest) + if err != nil { + t.Errorf("Domains.EditRecord returned error: %v", err) + } + + expected := &DomainRecord{ID: 1} + if !reflect.DeepEqual(record, expected) { + t.Errorf("Domains.EditRecord returned %+v, expected %+v", record, expected) + } +} + +func TestDomainRecord_String(t *testing.T) { + record := &DomainRecord{ + ID: 1, + Type: "CNAME", + Name: "example", + Data: "@", + Priority: 10, + Port: 10, + Weight: 10, + } + + stringified := record.String() + expected := `godo.DomainRecord{ID:1, Type:"CNAME", Name:"example", Data:"@", Priority:10, Port:10, Weight:10}` + if expected != stringified { + t.Errorf("DomainRecord.String returned %+v, expected %+v", stringified, expected) + } +} + +func TestDomainRecordEditRequest_String(t *testing.T) { + record := &DomainRecordEditRequest{ + Type: "CNAME", + Name: "example", + Data: "@", + Priority: 10, + Port: 10, + Weight: 10, + } + + stringified := record.String() + expected := `godo.DomainRecordEditRequest{Type:"CNAME", Name:"example", Data:"@", Priority:10, Port:10, Weight:10}` + if expected != stringified { + t.Errorf("DomainRecordEditRequest.String returned %+v, expected %+v", stringified, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/droplet_actions.go b/vendor/github.com/digitalocean/godo/droplet_actions.go new file mode 100644 index 000000000..7012aee7f --- /dev/null +++ b/vendor/github.com/digitalocean/godo/droplet_actions.go @@ -0,0 +1,238 @@ +package godo + +import ( + "fmt" + "net/url" +) + +// ActionRequest reprents DigitalOcean Action Request +type ActionRequest map[string]interface{} + +// DropletActionsService is an interface for interfacing with the droplet actions +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#droplet-actions +type DropletActionsService interface { + Shutdown(int) (*Action, *Response, error) + PowerOff(int) (*Action, *Response, error) + PowerOn(int) (*Action, *Response, error) + PowerCycle(int) (*Action, *Response, error) + Reboot(int) (*Action, *Response, error) + Restore(int, int) (*Action, *Response, error) + Resize(int, string, bool) (*Action, *Response, error) + Rename(int, string) (*Action, *Response, error) + Snapshot(int, string) (*Action, *Response, error) + EnableBackups(int) (*Action, *Response, error) + DisableBackups(int) (*Action, *Response, error) + PasswordReset(int) (*Action, *Response, error) + RebuildByImageID(int, int) (*Action, *Response, error) + RebuildByImageSlug(int, string) (*Action, *Response, error) + ChangeKernel(int, int) (*Action, *Response, error) + EnableIPv6(int) (*Action, *Response, error) + EnablePrivateNetworking(int) (*Action, *Response, error) + Upgrade(int) (*Action, *Response, error) + Get(int, int) (*Action, *Response, error) + GetByURI(string) (*Action, *Response, error) +} + +// DropletActionsServiceOp handles communication with the droplet action related +// methods of the DigitalOcean API. +type DropletActionsServiceOp struct { + client *Client +} + +var _ DropletActionsService = &DropletActionsServiceOp{} + +// Shutdown a Droplet +func (s *DropletActionsServiceOp) Shutdown(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "shutdown"} + return s.doAction(id, request) +} + +// PowerOff a Droplet +func (s *DropletActionsServiceOp) PowerOff(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_off"} + return s.doAction(id, request) +} + +// PowerOn a Droplet +func (s *DropletActionsServiceOp) PowerOn(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_on"} + return s.doAction(id, request) +} + +// PowerCycle a Droplet +func (s *DropletActionsServiceOp) PowerCycle(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_cycle"} + return s.doAction(id, request) +} + +// Reboot a Droplet +func (s *DropletActionsServiceOp) Reboot(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "reboot"} + return s.doAction(id, request) +} + +// Restore an image to a Droplet +func (s *DropletActionsServiceOp) Restore(id, imageID int) (*Action, *Response, error) { + requestType := "restore" + request := &ActionRequest{ + "type": requestType, + "image": float64(imageID), + } + return s.doAction(id, request) +} + +// Resize a Droplet +func (s *DropletActionsServiceOp) Resize(id int, sizeSlug string, resizeDisk bool) (*Action, *Response, error) { + requestType := "resize" + request := &ActionRequest{ + "type": requestType, + "size": sizeSlug, + "disk": resizeDisk, + } + return s.doAction(id, request) +} + +// Rename a Droplet +func (s *DropletActionsServiceOp) Rename(id int, name string) (*Action, *Response, error) { + requestType := "rename" + request := &ActionRequest{ + "type": requestType, + "name": name, + } + return s.doAction(id, request) +} + +// Snapshot a Droplet. +func (s *DropletActionsServiceOp) Snapshot(id int, name string) (*Action, *Response, error) { + requestType := "snapshot" + request := &ActionRequest{ + "type": requestType, + "name": name, + } + return s.doAction(id, request) +} + +// EnableBackups enables backups for a droplet. +func (s *DropletActionsServiceOp) EnableBackups(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_backups"} + return s.doAction(id, request) +} + +// DisableBackups disables backups for a droplet. +func (s *DropletActionsServiceOp) DisableBackups(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "disable_backups"} + return s.doAction(id, request) +} + +// PasswordReset resets the password for a droplet. +func (s *DropletActionsServiceOp) PasswordReset(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "password_reset"} + return s.doAction(id, request) +} + +// RebuildByImageID rebuilds a droplet droplet from an image with a given id. +func (s *DropletActionsServiceOp) RebuildByImageID(id, imageID int) (*Action, *Response, error) { + request := &ActionRequest{"type": "rebuild", "image": imageID} + return s.doAction(id, request) +} + +// RebuildByImageSlug rebuilds a droplet from an image with a given slug. +func (s *DropletActionsServiceOp) RebuildByImageSlug(id int, slug string) (*Action, *Response, error) { + request := &ActionRequest{"type": "rebuild", "image": slug} + return s.doAction(id, request) +} + +// ChangeKernel changes the kernel for a droplet. +func (s *DropletActionsServiceOp) ChangeKernel(id, kernelID int) (*Action, *Response, error) { + request := &ActionRequest{"type": "change_kernel", "kernel": kernelID} + return s.doAction(id, request) +} + +// EnableIPv6 enables IPv6 for a droplet. +func (s *DropletActionsServiceOp) EnableIPv6(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_ipv6"} + return s.doAction(id, request) +} + +// EnablePrivateNetworking enables private networking for a droplet. +func (s *DropletActionsServiceOp) EnablePrivateNetworking(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_private_networking"} + return s.doAction(id, request) +} + +// Upgrade a droplet. +func (s *DropletActionsServiceOp) Upgrade(id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "upgrade"} + return s.doAction(id, request) +} + +func (s *DropletActionsServiceOp) doAction(id int, request *ActionRequest) (*Action, *Response, error) { + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + if request == nil { + return nil, nil, NewArgError("request", "request can't be nil") + } + + path := dropletActionPath(id) + + req, err := s.client.NewRequest("POST", path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} + +// Get an action for a particular droplet by id. +func (s *DropletActionsServiceOp) Get(dropletID, actionID int) (*Action, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + if actionID < 1 { + return nil, nil, NewArgError("actionID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", dropletActionPath(dropletID), actionID) + return s.get(path) +} + +// GetByURI gets an action for a particular droplet by id. +func (s *DropletActionsServiceOp) GetByURI(rawurl string) (*Action, *Response, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, nil, err + } + + return s.get(u.Path) + +} + +func (s *DropletActionsServiceOp) get(path string) (*Action, *Response, error) { + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err + +} + +func dropletActionPath(dropletID int) string { + return fmt.Sprintf("v2/droplets/%d/actions", dropletID) +} diff --git a/vendor/github.com/digitalocean/godo/droplet_actions_test.go b/vendor/github.com/digitalocean/godo/droplet_actions_test.go new file mode 100644 index 000000000..fa063dba5 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/droplet_actions_test.go @@ -0,0 +1,666 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestDropletActions_Shutdown(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "shutdown", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.Shutdown(1) + if err != nil { + t.Errorf("DropletActions.Shutdown returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Shutdown returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_PowerOff(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "power_off", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.PowerOff(1) + if err != nil { + t.Errorf("DropletActions.PowerOff returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Poweroff returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_PowerOn(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "power_on", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.PowerOn(1) + if err != nil { + t.Errorf("DropletActions.PowerOn returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.PowerOn returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_Reboot(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "reboot", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + + }) + + action, _, err := client.DropletActions.Reboot(1) + if err != nil { + t.Errorf("DropletActions.Reboot returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Reboot returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_Restore(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "restore", + "image": float64(1), + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + + }) + + action, _, err := client.DropletActions.Restore(1, 1) + if err != nil { + t.Errorf("DropletActions.Restore returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Restore returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_Resize(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "resize", + "size": "1024mb", + "disk": true, + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + + }) + + action, _, err := client.DropletActions.Resize(1, "1024mb", true) + if err != nil { + t.Errorf("DropletActions.Resize returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Resize returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_Rename(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "rename", + "name": "Droplet-Name", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.Rename(1, "Droplet-Name") + if err != nil { + t.Errorf("DropletActions.Rename returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Rename returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_PowerCycle(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "power_cycle", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + + }) + + action, _, err := client.DropletActions.PowerCycle(1) + if err != nil { + t.Errorf("DropletActions.PowerCycle returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.PowerCycle returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_Snapshot(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "snapshot", + "name": "Image-Name", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.Snapshot(1, "Image-Name") + if err != nil { + t.Errorf("DropletActions.Snapshot returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Snapshot returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_EnableBackups(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "enable_backups", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.EnableBackups(1) + if err != nil { + t.Errorf("DropletActions.EnableBackups returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.EnableBackups returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_DisableBackups(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "disable_backups", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.DisableBackups(1) + if err != nil { + t.Errorf("DropletActions.DisableBackups returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.DisableBackups returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_PasswordReset(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "password_reset", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.PasswordReset(1) + if err != nil { + t.Errorf("DropletActions.PasswordReset returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.PasswordReset returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_RebuildByImageID(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "rebuild", + "image": float64(2), + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = \n%#v, expected \n%#v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.RebuildByImageID(1, 2) + if err != nil { + t.Errorf("DropletActions.RebuildByImageID returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.RebuildByImageID returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_RebuildByImageSlug(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "rebuild", + "image": "Image-Name", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.RebuildByImageSlug(1, "Image-Name") + if err != nil { + t.Errorf("DropletActions.RebuildByImageSlug returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.RebuildByImageSlug returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_ChangeKernel(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "change_kernel", + "kernel": float64(2), + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.ChangeKernel(1, 2) + if err != nil { + t.Errorf("DropletActions.ChangeKernel returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.ChangeKernel returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_EnableIPv6(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "enable_ipv6", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.EnableIPv6(1) + if err != nil { + t.Errorf("DropletActions.EnableIPv6 returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.EnableIPv6 returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_EnablePrivateNetworking(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "enable_private_networking", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.EnablePrivateNetworking(1) + if err != nil { + t.Errorf("DropletActions.EnablePrivateNetworking returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.EnablePrivateNetworking returned %+v, expected %+v", action, expected) + } +} + +func TestDropletAction_Upgrade(t *testing.T) { + setup() + defer teardown() + + request := &ActionRequest{ + "type": "upgrade", + } + + mux.HandleFunc("/v2/droplets/1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + + if !reflect.DeepEqual(v, request) { + t.Errorf("Request body = %+v, expected %+v", v, request) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.Upgrade(1) + if err != nil { + t.Errorf("DropletActions.Upgrade returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Upgrade returned %+v, expected %+v", action, expected) + } +} + +func TestDropletActions_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/123/actions/456", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.DropletActions.Get(123, 456) + if err != nil { + t.Errorf("DropletActions.Get returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("DropletActions.Get returned %+v, expected %+v", action, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/droplets.go b/vendor/github.com/digitalocean/godo/droplets.go new file mode 100644 index 000000000..978d41e39 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/droplets.go @@ -0,0 +1,447 @@ +package godo + +import ( + "encoding/json" + "fmt" +) + +const dropletBasePath = "v2/droplets" + +// DropletsService is an interface for interfacing with the droplet +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#droplets +type DropletsService interface { + List(*ListOptions) ([]Droplet, *Response, error) + Get(int) (*Droplet, *Response, error) + Create(*DropletCreateRequest) (*Droplet, *Response, error) + CreateMultiple(*DropletMultiCreateRequest) ([]Droplet, *Response, error) + Delete(int) (*Response, error) + Kernels(int, *ListOptions) ([]Kernel, *Response, error) + Snapshots(int, *ListOptions) ([]Image, *Response, error) + Backups(int, *ListOptions) ([]Image, *Response, error) + Actions(int, *ListOptions) ([]Action, *Response, error) + Neighbors(int) ([]Droplet, *Response, error) +} + +// DropletsServiceOp handles communication with the droplet related methods of the +// DigitalOcean API. +type DropletsServiceOp struct { + client *Client +} + +var _ DropletsService = &DropletsServiceOp{} + +// Droplet represents a DigitalOcean Droplet +type Droplet struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Memory int `json:"memory,omitempty"` + Vcpus int `json:"vcpus,omitempty"` + Disk int `json:"disk,omitempty"` + Region *Region `json:"region,omitempty"` + Image *Image `json:"image,omitempty"` + Size *Size `json:"size,omitempty"` + SizeSlug string `json:"size_slug,omitempty"` + BackupIDs []int `json:"backup_ids,omitempty"` + SnapshotIDs []int `json:"snapshot_ids,omitempty"` + Locked bool `json:"locked,bool,omitempty"` + Status string `json:"status,omitempty"` + Networks *Networks `json:"networks,omitempty"` + ActionIDs []int `json:"action_ids,omitempty"` + Created string `json:"created_at,omitempty"` + Kernel *Kernel `json:"kernel, omitempty"` +} + +// Kernel object +type Kernel struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` +} + +// Convert Droplet to a string +func (d Droplet) String() string { + return Stringify(d) +} + +// DropletRoot represents a Droplet root +type dropletRoot struct { + Droplet *Droplet `json:"droplet"` + Links *Links `json:"links,omitempty"` +} + +type dropletsRoot struct { + Droplets []Droplet `json:"droplets"` + Links *Links `json:"links"` +} + +type kernelsRoot struct { + Kernels []Kernel `json:"kernels,omitempty"` + Links *Links `json:"links"` +} + +type snapshotsRoot struct { + Snapshots []Image `json:"snapshots,omitempty"` + Links *Links `json:"links"` +} + +type backupsRoot struct { + Backups []Image `json:"backups,omitempty"` + Links *Links `json:"links"` +} + +// DropletCreateImage identifies an image for the create request. It prefers slug over ID. +type DropletCreateImage struct { + ID int + Slug string +} + +// MarshalJSON returns either the slug or id of the image. It returns the id +// if the slug is empty. +func (d DropletCreateImage) MarshalJSON() ([]byte, error) { + if d.Slug != "" { + return json.Marshal(d.Slug) + } + + return json.Marshal(d.ID) +} + +// DropletCreateSSHKey identifies a SSH Key for the create request. It prefers fingerprint over ID. +type DropletCreateSSHKey struct { + ID int + Fingerprint string +} + +// MarshalJSON returns either the fingerprint or id of the ssh key. It returns +// the id if the fingerprint is empty. +func (d DropletCreateSSHKey) MarshalJSON() ([]byte, error) { + if d.Fingerprint != "" { + return json.Marshal(d.Fingerprint) + } + + return json.Marshal(d.ID) +} + +// DropletCreateRequest represents a request to create a droplet. +type DropletCreateRequest struct { + Name string `json:"name"` + Region string `json:"region"` + Size string `json:"size"` + Image DropletCreateImage `json:"image"` + SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` + Backups bool `json:"backups"` + IPv6 bool `json:"ipv6"` + PrivateNetworking bool `json:"private_networking"` + UserData string `json:"user_data,omitempty"` +} + + +type DropletMultiCreateRequest struct { + Names []string `json:"names"` + Region string `json:"region"` + Size string `json:"size"` + Image DropletCreateImage `json:"image"` + SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` + Backups bool `json:"backups"` + IPv6 bool `json:"ipv6"` + PrivateNetworking bool `json:"private_networking"` + UserData string `json:"user_data,omitempty"` +} + +func (d DropletCreateRequest) String() string { + return Stringify(d) +} + +func (d DropletMultiCreateRequest) String() string { + return Stringify(d) +} + +// Networks represents the droplet's networks +type Networks struct { + V4 []NetworkV4 `json:"v4,omitempty"` + V6 []NetworkV6 `json:"v6,omitempty"` +} + +// NetworkV4 represents a DigitalOcean IPv4 Network +type NetworkV4 struct { + IPAddress string `json:"ip_address,omitempty"` + Netmask string `json:"netmask,omitempty"` + Gateway string `json:"gateway,omitempty"` + Type string `json:"type,omitempty"` +} + +func (n NetworkV4) String() string { + return Stringify(n) +} + +// NetworkV6 represents a DigitalOcean IPv6 network. +type NetworkV6 struct { + IPAddress string `json:"ip_address,omitempty"` + Netmask int `json:"netmask,omitempty"` + Gateway string `json:"gateway,omitempty"` + Type string `json:"type,omitempty"` +} + +func (n NetworkV6) String() string { + return Stringify(n) +} + +// List all droplets +func (s *DropletsServiceOp) List(opt *ListOptions) ([]Droplet, *Response, error) { + path := dropletBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Droplets, resp, err +} + +// Get individual droplet +func (s *DropletsServiceOp) Get(dropletID int) (*Droplet, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Droplet, resp, err +} + +// Create droplet +func (s *DropletsServiceOp) Create(createRequest *DropletCreateRequest) (*Droplet, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := dropletBasePath + + req, err := s.client.NewRequest("POST", path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(dropletRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Droplet, resp, err +} + +// Create multiple droplet +func (s *DropletsServiceOp) CreateMultiple(createRequest *DropletMultiCreateRequest) ([]Droplet, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := dropletBasePath + + req, err := s.client.NewRequest("POST", path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(dropletsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Droplets, resp, err +} + +// Delete droplet +func (s *DropletsServiceOp) Delete(dropletID int) (*Response, error) { + if dropletID < 1 { + return nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) + + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// Kernels lists kernels available for a droplet. +func (s *DropletsServiceOp) Kernels(dropletID int, opt *ListOptions) ([]Kernel, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/kernels", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(kernelsRoot) + resp, err := s.client.Do(req, root) + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Kernels, resp, err +} + +// Actions lists the actions for a droplet. +func (s *DropletsServiceOp) Actions(dropletID int, opt *ListOptions) ([]Action, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/actions", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Actions, resp, err +} + +// Backups lists the backups for a droplet. +func (s *DropletsServiceOp) Backups(dropletID int, opt *ListOptions) ([]Image, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/backups", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(backupsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Backups, resp, err +} + +// Snapshots lists the snapshots available for a droplet. +func (s *DropletsServiceOp) Snapshots(dropletID int, opt *ListOptions) ([]Image, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/snapshots", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(snapshotsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Snapshots, resp, err +} + +// Neighbors lists the neighbors for a droplet. +func (s *DropletsServiceOp) Neighbors(dropletID int) ([]Droplet, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/neighbors", dropletBasePath, dropletID) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Droplets, resp, err +} + +func (s *DropletsServiceOp) dropletActionStatus(uri string) (string, error) { + action, _, err := s.client.DropletActions.GetByURI(uri) + + if err != nil { + return "", err + } + + return action.Status, nil +} diff --git a/vendor/github.com/digitalocean/godo/droplets_test.go b/vendor/github.com/digitalocean/godo/droplets_test.go new file mode 100644 index 000000000..b7504992f --- /dev/null +++ b/vendor/github.com/digitalocean/godo/droplets_test.go @@ -0,0 +1,428 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestDroplets_ListDroplets(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"droplets": [{"id":1},{"id":2}]}`) + }) + + droplets, _, err := client.Droplets.List(nil) + if err != nil { + t.Errorf("Droplets.List returned error: %v", err) + } + + expected := []Droplet{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(droplets, expected) { + t.Errorf("Droplets.List returned %+v, expected %+v", droplets, expected) + } +} + +func TestDroplets_ListDropletsMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + dr := dropletsRoot{ + Droplets: []Droplet{ + {ID: 1}, + {ID: 2}, + }, + Links: &Links{ + Pages: &Pages{Next: "http://example.com/v2/droplets/?page=2"}, + }, + } + + b, err := json.Marshal(dr) + if err != nil { + t.Fatal(err) + } + + fmt.Fprint(w, string(b)) + }) + + _, resp, err := client.Droplets.List(nil) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 1) +} + +func TestDroplets_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "droplets": [{"id":1},{"id":2}], + "links":{ + "pages":{ + "next":"http://example.com/v2/droplets/?page=3", + "prev":"http://example.com/v2/droplets/?page=1", + "last":"http://example.com/v2/droplets/?page=3", + "first":"http://example.com/v2/droplets/?page=1" + } + } + }` + + mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.Droplets.List(opt) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 2) +} + +func TestDroplets_GetDroplet(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/12345", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"droplet":{"id":12345}}`) + }) + + droplets, _, err := client.Droplets.Get(12345) + if err != nil { + t.Errorf("Droplet.Get returned error: %v", err) + } + + expected := &Droplet{ID: 12345} + if !reflect.DeepEqual(droplets, expected) { + t.Errorf("Droplets.Get returned %+v, expected %+v", droplets, expected) + } +} + +func TestDroplets_Create(t *testing.T) { + setup() + defer teardown() + + createRequest := &DropletCreateRequest{ + Name: "name", + Region: "region", + Size: "size", + Image: DropletCreateImage{ + ID: 1, + }, + } + + mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) { + expected := map[string]interface{}{ + "name": "name", + "region": "region", + "size": "size", + "image": float64(1), + "ssh_keys": nil, + "backups": false, + "ipv6": false, + "private_networking": false, + } + + var v map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + if !reflect.DeepEqual(v, expected) { + t.Errorf("Request body = %#v, expected %#v", v, expected) + } + + fmt.Fprintf(w, `{"droplet":{"id":1}, "links":{"actions": [{"id": 1, "href": "http://example.com", "rel": "create"}]}}`) + }) + + droplet, resp, err := client.Droplets.Create(createRequest) + if err != nil { + t.Errorf("Droplets.Create returned error: %v", err) + } + + if id := droplet.ID; id != 1 { + t.Errorf("expected id '%d', received '%d'", 1, id) + } + + if a := resp.Links.Actions[0]; a.ID != 1 { + t.Errorf("expected action id '%d', received '%d'", 1, a.ID) + } +} + +func TestDroplets_CreateMultiple(t *testing.T) { + setup() + defer teardown() + + createRequest := &DropletMultiCreateRequest{ + Names: []string{"name1", "name2"}, + Region: "region", + Size: "size", + Image: DropletCreateImage{ + ID: 1, + }, + } + + mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) { + expected := map[string]interface{}{ + "names": []interface {}{"name1", "name2"}, + "region": "region", + "size": "size", + "image": float64(1), + "ssh_keys": nil, + "backups": false, + "ipv6": false, + "private_networking": false, + } + + var v map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + if !reflect.DeepEqual(v, expected) { + t.Errorf("Request body = %#v, expected %#v", v, expected) + } + + fmt.Fprintf(w, `{"droplets":[{"id":1},{"id":2}], "links":{"actions": [{"id": 1, "href": "http://example.com", "rel": "multiple_create"}]}}`) + }) + + droplets, resp, err := client.Droplets.CreateMultiple(createRequest) + if err != nil { + t.Errorf("Droplets.CreateMultiple returned error: %v", err) + } + + if id := droplets[0].ID; id != 1 { + t.Errorf("expected id '%d', received '%d'", 1, id) + } + + if id := droplets[1].ID; id != 2 { + t.Errorf("expected id '%d', received '%d'", 1, id) + } + + if a := resp.Links.Actions[0]; a.ID != 1 { + t.Errorf("expected action id '%d', received '%d'", 1, a.ID) + } +} + +func TestDroplets_Destroy(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/12345", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Droplets.Delete(12345) + if err != nil { + t.Errorf("Droplet.Delete returned error: %v", err) + } +} + +func TestDroplets_Kernels(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/12345/kernels", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"kernels": [{"id":1},{"id":2}]}`) + }) + + opt := &ListOptions{Page: 2} + kernels, _, err := client.Droplets.Kernels(12345, opt) + if err != nil { + t.Errorf("Droplets.Kernels returned error: %v", err) + } + + expected := []Kernel{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(kernels, expected) { + t.Errorf("Droplets.Kernels returned %+v, expected %+v", kernels, expected) + } +} + +func TestDroplets_Snapshots(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/12345/snapshots", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"snapshots": [{"id":1},{"id":2}]}`) + }) + + opt := &ListOptions{Page: 2} + snapshots, _, err := client.Droplets.Snapshots(12345, opt) + if err != nil { + t.Errorf("Droplets.Snapshots returned error: %v", err) + } + + expected := []Image{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(snapshots, expected) { + t.Errorf("Droplets.Snapshots returned %+v, expected %+v", snapshots, expected) + } +} + +func TestDroplets_Backups(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/12345/backups", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"backups": [{"id":1},{"id":2}]}`) + }) + + opt := &ListOptions{Page: 2} + backups, _, err := client.Droplets.Backups(12345, opt) + if err != nil { + t.Errorf("Droplets.Backups returned error: %v", err) + } + + expected := []Image{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(backups, expected) { + t.Errorf("Droplets.Backups returned %+v, expected %+v", backups, expected) + } +} + +func TestDroplets_Actions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/12345/actions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"actions": [{"id":1},{"id":2}]}`) + }) + + opt := &ListOptions{Page: 2} + actions, _, err := client.Droplets.Actions(12345, opt) + if err != nil { + t.Errorf("Droplets.Actions returned error: %v", err) + } + + expected := []Action{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(actions, expected) { + t.Errorf("Droplets.Actions returned %+v, expected %+v", actions, expected) + } +} + +func TestDroplets_Neighbors(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/droplets/12345/neighbors", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"droplets": [{"id":1},{"id":2}]}`) + }) + + neighbors, _, err := client.Droplets.Neighbors(12345) + if err != nil { + t.Errorf("Droplets.Neighbors returned error: %v", err) + } + + expected := []Droplet{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(neighbors, expected) { + t.Errorf("Droplets.Neighbors returned %+v, expected %+v", neighbors, expected) + } +} + +func TestNetworkV4_String(t *testing.T) { + network := &NetworkV4{ + IPAddress: "192.168.1.2", + Netmask: "255.255.255.0", + Gateway: "192.168.1.1", + } + + stringified := network.String() + expected := `godo.NetworkV4{IPAddress:"192.168.1.2", Netmask:"255.255.255.0", Gateway:"192.168.1.1", Type:""}` + if expected != stringified { + t.Errorf("NetworkV4.String returned %+v, expected %+v", stringified, expected) + } + +} + +func TestNetworkV6_String(t *testing.T) { + network := &NetworkV6{ + IPAddress: "2604:A880:0800:0010:0000:0000:02DD:4001", + Netmask: 64, + Gateway: "2604:A880:0800:0010:0000:0000:0000:0001", + } + stringified := network.String() + expected := `godo.NetworkV6{IPAddress:"2604:A880:0800:0010:0000:0000:02DD:4001", Netmask:64, Gateway:"2604:A880:0800:0010:0000:0000:0000:0001", Type:""}` + if expected != stringified { + t.Errorf("NetworkV6.String returned %+v, expected %+v", stringified, expected) + } +} + +func TestDroplet_String(t *testing.T) { + + region := &Region{ + Slug: "region", + Name: "Region", + Sizes: []string{"1", "2"}, + Available: true, + } + + image := &Image{ + ID: 1, + Name: "Image", + Type: "snapshot", + Distribution: "Ubuntu", + Slug: "image", + Public: true, + Regions: []string{"one", "two"}, + MinDiskSize: 20, + Created: "2013-11-27T09:24:55Z", + } + + size := &Size{ + Slug: "size", + PriceMonthly: 123, + PriceHourly: 456, + Regions: []string{"1", "2"}, + } + network := &NetworkV4{ + IPAddress: "192.168.1.2", + Netmask: "255.255.255.0", + Gateway: "192.168.1.1", + } + networks := &Networks{ + V4: []NetworkV4{*network}, + } + + droplet := &Droplet{ + ID: 1, + Name: "droplet", + Memory: 123, + Vcpus: 456, + Disk: 789, + Region: region, + Image: image, + Size: size, + BackupIDs: []int{1}, + SnapshotIDs: []int{1}, + ActionIDs: []int{1}, + Locked: false, + Status: "active", + Networks: networks, + SizeSlug: "1gb", + } + + stringified := droplet.String() + expected := `godo.Droplet{ID:1, Name:"droplet", Memory:123, Vcpus:456, Disk:789, Region:godo.Region{Slug:"region", Name:"Region", Sizes:["1" "2"], Available:true}, Image:godo.Image{ID:1, Name:"Image", Type:"snapshot", Distribution:"Ubuntu", Slug:"image", Public:true, Regions:["one" "two"], MinDiskSize:20, Created:"2013-11-27T09:24:55Z"}, Size:godo.Size{Slug:"size", Memory:0, Vcpus:0, Disk:0, PriceMonthly:123, PriceHourly:456, Regions:["1" "2"], Available:false, Transfer:0}, SizeSlug:"1gb", BackupIDs:[1], SnapshotIDs:[1], Locked:false, Status:"active", Networks:godo.Networks{V4:[godo.NetworkV4{IPAddress:"192.168.1.2", Netmask:"255.255.255.0", Gateway:"192.168.1.1", Type:""}]}, ActionIDs:[1], Created:""}` + if expected != stringified { + t.Errorf("Droplet.String returned %+v, expected %+v", stringified, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/errors.go b/vendor/github.com/digitalocean/godo/errors.go new file mode 100644 index 000000000..a65ebd76b --- /dev/null +++ b/vendor/github.com/digitalocean/godo/errors.go @@ -0,0 +1,24 @@ +package godo + +import "fmt" + +// ArgError is an error that represents an error with an input to godo. It +// identifies the argument and the cause (if possible). +type ArgError struct { + arg string + reason string +} + +var _ error = &ArgError{} + +// NewArgError creates an InputError. +func NewArgError(arg, reason string) *ArgError { + return &ArgError{ + arg: arg, + reason: reason, + } +} + +func (e *ArgError) Error() string { + return fmt.Sprintf("%s is invalid because %s", e.arg, e.reason) +} diff --git a/vendor/github.com/digitalocean/godo/errors_test.go b/vendor/github.com/digitalocean/godo/errors_test.go new file mode 100644 index 000000000..71bf1edc3 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/errors_test.go @@ -0,0 +1,11 @@ +package godo + +import "testing" + +func TestArgError(t *testing.T) { + expected := "foo is invalid because bar" + err := NewArgError("foo", "bar") + if got := err.Error(); got != expected { + t.Errorf("ArgError().Error() = %q; expected %q", got, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/floating_ips.go b/vendor/github.com/digitalocean/godo/floating_ips.go new file mode 100644 index 000000000..bad04b96c --- /dev/null +++ b/vendor/github.com/digitalocean/godo/floating_ips.go @@ -0,0 +1,131 @@ +package godo + +import "fmt" + +const floatingBasePath = "v2/floating_ips" + +// FloatingIPsService is an interface for interfacing with the floating IPs +// endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#floating-ips +type FloatingIPsService interface { + List(*ListOptions) ([]FloatingIP, *Response, error) + Get(string) (*FloatingIP, *Response, error) + Create(*FloatingIPCreateRequest) (*FloatingIP, *Response, error) + Delete(string) (*Response, error) +} + +// FloatingIPsServiceOp handles communication with the floating IPs related methods of the +// DigitalOcean API. +type FloatingIPsServiceOp struct { + client *Client +} + +var _ FloatingIPsService = &FloatingIPsServiceOp{} + +// FloatingIP represents a Digital Ocean floating IP. +type FloatingIP struct { + Region *Region `json:"region"` + Droplet *Droplet `json:"droplet"` + IP string `json:"ip"` +} + +func (f FloatingIP) String() string { + return Stringify(f) +} + +type floatingIPsRoot struct { + FloatingIPs []FloatingIP `json:"floating_ips"` + Links *Links `json:"links"` +} + +type floatingIPRoot struct { + FloatingIP *FloatingIP `json:"floating_ip"` + Links *Links `json:"links,omitempty"` +} + +// FloatingIPCreateRequest represents a request to create a floating IP. +// If DropletID is not empty, the floating IP will be assigned to the +// droplet. +type FloatingIPCreateRequest struct { + Region string `json:"region"` + DropletID int `json:"droplet_id,omitempty"` +} + +// List all floating IPs. +func (f *FloatingIPsServiceOp) List(opt *ListOptions) ([]FloatingIP, *Response, error) { + path := floatingBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := f.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(floatingIPsRoot) + resp, err := f.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.FloatingIPs, resp, err +} + +// Get an individual floating IP. +func (f *FloatingIPsServiceOp) Get(ip string) (*FloatingIP, *Response, error) { + path := fmt.Sprintf("%s/%s", floatingBasePath, ip) + + req, err := f.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(floatingIPRoot) + resp, err := f.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.FloatingIP, resp, err +} + +// Create a floating IP. If the DropletID field of the request is not empty, +// the floating IP will also be assigned to the droplet. +func (f *FloatingIPsServiceOp) Create(createRequest *FloatingIPCreateRequest) (*FloatingIP, *Response, error) { + path := floatingBasePath + + req, err := f.client.NewRequest("POST", path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(floatingIPRoot) + resp, err := f.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.FloatingIP, resp, err +} + +// Delete a floating IP. +func (f *FloatingIPsServiceOp) Delete(ip string) (*Response, error) { + path := fmt.Sprintf("%s/%s", floatingBasePath, ip) + + req, err := f.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(req, nil) + + return resp, err +} diff --git a/vendor/github.com/digitalocean/godo/floating_ips_actions.go b/vendor/github.com/digitalocean/godo/floating_ips_actions.go new file mode 100644 index 000000000..f435384eb --- /dev/null +++ b/vendor/github.com/digitalocean/godo/floating_ips_actions.go @@ -0,0 +1,105 @@ +package godo + +import "fmt" + +// FloatingIPActionsService is an interface for interfacing with the +// floating IPs actions endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#floating-ips-action +type FloatingIPActionsService interface { + Assign(ip string, dropletID int) (*Action, *Response, error) + Unassign(ip string) (*Action, *Response, error) + Get(ip string, actionID int) (*Action, *Response, error) + List(ip string, opt *ListOptions) ([]Action, *Response, error) +} + +// FloatingIPActionsServiceOp handles communication with the floating IPs +// action related methods of the DigitalOcean API. +type FloatingIPActionsServiceOp struct { + client *Client +} + +// Assign a floating IP to a droplet. +func (s *FloatingIPActionsServiceOp) Assign(ip string, dropletID int) (*Action, *Response, error) { + request := &ActionRequest{ + "type": "assign", + "droplet_id": dropletID, + } + return s.doAction(ip, request) +} + +// Unassign a floating IP from the droplet it is currently assigned to. +func (s *FloatingIPActionsServiceOp) Unassign(ip string) (*Action, *Response, error) { + request := &ActionRequest{"type": "unassign"} + return s.doAction(ip, request) +} + +// Get an action for a particular floating IP by id. +func (s *FloatingIPActionsServiceOp) Get(ip string, actionID int) (*Action, *Response, error) { + path := fmt.Sprintf("%s/%d", floatingIPActionPath(ip), actionID) + return s.get(path) +} + +// List the actions for a particular floating IP. +func (s *FloatingIPActionsServiceOp) List(ip string, opt *ListOptions) ([]Action, *Response, error) { + path := floatingIPActionPath(ip) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(path) +} + +func (s *FloatingIPActionsServiceOp) doAction(ip string, request *ActionRequest) (*Action, *Response, error) { + path := floatingIPActionPath(ip) + + req, err := s.client.NewRequest("POST", path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} + +func (s *FloatingIPActionsServiceOp) get(path string) (*Action, *Response, error) { + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} + +func (s *FloatingIPActionsServiceOp) list(path string) ([]Action, *Response, error) { + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Actions, resp, err +} + +func floatingIPActionPath(ip string) string { + return fmt.Sprintf("%s/%s/actions", floatingBasePath, ip) +} diff --git a/vendor/github.com/digitalocean/godo/floating_ips_actions_test.go b/vendor/github.com/digitalocean/godo/floating_ips_actions_test.go new file mode 100644 index 000000000..3e1077dde --- /dev/null +++ b/vendor/github.com/digitalocean/godo/floating_ips_actions_test.go @@ -0,0 +1,167 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestFloatingIPsActions_Assign(t *testing.T) { + setup() + defer teardown() + dropletID := 12345 + assignRequest := &ActionRequest{ + "droplet_id": float64(dropletID), // encoding/json decodes numbers as floats + "type": "assign", + } + + mux.HandleFunc("/v2/floating_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, assignRequest) { + t.Errorf("Request body = %#v, expected %#v", v, assignRequest) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + + }) + + assign, _, err := client.FloatingIPActions.Assign("192.168.0.1", 12345) + if err != nil { + t.Errorf("FloatingIPsActions.Assign returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(assign, expected) { + t.Errorf("FloatingIPsActions.Assign returned %+v, expected %+v", assign, expected) + } +} + +func TestFloatingIPsActions_Unassign(t *testing.T) { + setup() + defer teardown() + + unassignRequest := &ActionRequest{ + "type": "unassign", + } + + mux.HandleFunc("/v2/floating_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, unassignRequest) { + t.Errorf("Request body = %+v, expected %+v", v, unassignRequest) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.FloatingIPActions.Unassign("192.168.0.1") + if err != nil { + t.Errorf("FloatingIPsActions.Get returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("FloatingIPsActions.Get returned %+v, expected %+v", action, expected) + } +} + +func TestFloatingIPsActions_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/floating_ips/192.168.0.1/actions/456", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.FloatingIPActions.Get("192.168.0.1", 456) + if err != nil { + t.Errorf("FloatingIPsActions.Get returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("FloatingIPsActions.Get returned %+v, expected %+v", action, expected) + } +} + +func TestFloatingIPsActions_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/floating_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{"actions":[{"status":"in-progress"}]}`) + }) + + actions, _, err := client.FloatingIPActions.List("192.168.0.1", nil) + if err != nil { + t.Errorf("FloatingIPsActions.List returned error: %v", err) + } + + expected := []Action{Action{Status: "in-progress"}} + if !reflect.DeepEqual(actions, expected) { + t.Errorf("FloatingIPsActions.List returned %+v, expected %+v", actions, expected) + } +} + +func TestFloatingIPsActions_ListMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/floating_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"actions":[{"status":"in-progress"}], "links":{"pages":{"next":"http://example.com/v2/floating_ips/192.168.0.1/actions?page=2"}}}`) + }) + + _, resp, err := client.FloatingIPActions.List("192.168.0.1", nil) + if err != nil { + t.Errorf("FloatingIPsActions.List returned error: %v", err) + } + + checkCurrentPage(t, resp, 1) +} + +func TestFloatingIPsActions_ListPageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "actions":[{"status":"in-progress"}], + "links":{ + "pages":{ + "next":"http://example.com/v2/regions/?page=3", + "prev":"http://example.com/v2/regions/?page=1", + "last":"http://example.com/v2/regions/?page=3", + "first":"http://example.com/v2/regions/?page=1" + } + } + }` + + mux.HandleFunc("/v2/floating_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.FloatingIPActions.List("192.168.0.1", opt) + if err != nil { + t.Errorf("FloatingIPsActions.List returned error: %v", err) + } + + checkCurrentPage(t, resp, 2) +} diff --git a/vendor/github.com/digitalocean/godo/floating_ips_test.go b/vendor/github.com/digitalocean/godo/floating_ips_test.go new file mode 100644 index 000000000..9bd2eae11 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/floating_ips_test.go @@ -0,0 +1,149 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestFloatingIPs_ListFloatingIPs(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/floating_ips", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"floating_ips": [{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"},{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2"}]}`) + }) + + floatingIPs, _, err := client.FloatingIPs.List(nil) + if err != nil { + t.Errorf("FloatingIPs.List returned error: %v", err) + } + + expected := []FloatingIP{ + {Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1"}, + {Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 2}, IP: "192.168.0.2"}, + } + if !reflect.DeepEqual(floatingIPs, expected) { + t.Errorf("FloatingIPs.List returned %+v, expected %+v", floatingIPs, expected) + } +} + +func TestFloatingIPs_ListFloatingIPsMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/floating_ips", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"floating_ips": [{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"},{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2"}], "links":{"pages":{"next":"http://example.com/v2/floating_ips/?page=2"}}}`) + }) + + _, resp, err := client.FloatingIPs.List(nil) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 1) +} + +func TestFloatingIPs_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "floating_ips": [{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"},{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2"}], + "links":{ + "pages":{ + "next":"http://example.com/v2/floating_ips/?page=3", + "prev":"http://example.com/v2/floating_ips/?page=1", + "last":"http://example.com/v2/floating_ips/?page=3", + "first":"http://example.com/v2/floating_ips/?page=1" + } + } + }` + + mux.HandleFunc("/v2/floating_ips", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.FloatingIPs.List(opt) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 2) +} + +func TestFloatingIPs_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/floating_ips/192.168.0.1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"floating_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"}}`) + }) + + floatingIP, _, err := client.FloatingIPs.Get("192.168.0.1") + if err != nil { + t.Errorf("domain.Get returned error: %v", err) + } + + expected := &FloatingIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1"} + if !reflect.DeepEqual(floatingIP, expected) { + t.Errorf("FloatingIPs.Get returned %+v, expected %+v", floatingIP, expected) + } +} + +func TestFloatingIPs_Create(t *testing.T) { + setup() + defer teardown() + + createRequest := &FloatingIPCreateRequest{ + Region: "nyc3", + DropletID: 1, + } + + mux.HandleFunc("/v2/floating_ips", func(w http.ResponseWriter, r *http.Request) { + v := new(FloatingIPCreateRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatal(err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, createRequest) { + t.Errorf("Request body = %+v, expected %+v", v, createRequest) + } + + fmt.Fprint(w, `{"floating_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"}}`) + }) + + floatingIP, _, err := client.FloatingIPs.Create(createRequest) + if err != nil { + t.Errorf("FloatingIPs.Create returned error: %v", err) + } + + expected := &FloatingIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1"} + if !reflect.DeepEqual(floatingIP, expected) { + t.Errorf("FloatingIPs.Create returned %+v, expected %+v", floatingIP, expected) + } +} + +func TestFloatingIPs_Destroy(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/floating_ips/192.168.0.1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.FloatingIPs.Delete("192.168.0.1") + if err != nil { + t.Errorf("FloatingIPs.Delete returned error: %v", err) + } +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go new file mode 100644 index 000000000..f57b3bca6 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -0,0 +1,341 @@ +package godo + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "time" + + "github.com/google/go-querystring/query" + headerLink "github.com/tent/http-link-go" +) + +const ( + libraryVersion = "0.1.0" + defaultBaseURL = "https://api.digitalocean.com/" + userAgent = "godo/" + libraryVersion + mediaType = "application/json" + + headerRateLimit = "X-RateLimit-Limit" + headerRateRemaining = "X-RateLimit-Remaining" + headerRateReset = "X-RateLimit-Reset" +) + +// Client manages communication with DigitalOcean V2 API. +type Client struct { + // HTTP client used to communicate with the DO API. + client *http.Client + + // Base URL for API requests. + BaseURL *url.URL + + // User agent for client + UserAgent string + + // Rate contains the current rate limit for the client as determined by the most recent + // API call. + Rate Rate + + // Services used for communicating with the API + Account AccountService + Actions ActionsService + Domains DomainsService + Droplets DropletsService + DropletActions DropletActionsService + Images ImagesService + ImageActions ImageActionsService + Keys KeysService + Regions RegionsService + Sizes SizesService + FloatingIPs FloatingIPsService + FloatingIPActions FloatingIPActionsService + + // Optional function called after every successful request made to the DO APIs + onRequestCompleted RequestCompletionCallback +} + +// RequestCompletionCallback defines the type of the request callback function +type RequestCompletionCallback func(*http.Request, *http.Response) + +// ListOptions specifies the optional parameters to various List methods that +// support pagination. +type ListOptions struct { + // For paginated result sets, page of results to retrieve. + Page int `url:"page,omitempty"` + + // For paginated result sets, the number of results to include per page. + PerPage int `url:"per_page,omitempty"` +} + +// Response is a DigitalOcean response. This wraps the standard http.Response returned from DigitalOcean. +type Response struct { + *http.Response + + // Links that were returned with the response. These are parsed from + // request body and not the header. + Links *Links + + // Monitoring URI + Monitor string + + Rate +} + +// An ErrorResponse reports the error caused by an API request +type ErrorResponse struct { + // HTTP response that caused this error + Response *http.Response + + // Error message + Message string +} + +// Rate contains the rate limit for the current client. +type Rate struct { + // The number of request per hour the client is currently limited to. + Limit int `json:"limit"` + + // The number of remaining requests the client can make this hour. + Remaining int `json:"remaining"` + + // The time at which the current rate limit will reset. + Reset Timestamp `json:"reset"` +} + +func addOptions(s string, opt interface{}) (string, error) { + v := reflect.ValueOf(opt) + + if v.Kind() == reflect.Ptr && v.IsNil() { + return s, nil + } + + origURL, err := url.Parse(s) + if err != nil { + return s, err + } + + origValues := origURL.Query() + + newValues, err := query.Values(opt) + if err != nil { + return s, err + } + + for k, v := range newValues { + origValues[k] = v + } + + origURL.RawQuery = origValues.Encode() + return origURL.String(), nil +} + +// NewClient returns a new DigitalOcean API client. +func NewClient(httpClient *http.Client) *Client { + if httpClient == nil { + httpClient = http.DefaultClient + } + + baseURL, _ := url.Parse(defaultBaseURL) + + c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent} + c.Account = &AccountServiceOp{client: c} + c.Actions = &ActionsServiceOp{client: c} + c.Domains = &DomainsServiceOp{client: c} + c.Droplets = &DropletsServiceOp{client: c} + c.DropletActions = &DropletActionsServiceOp{client: c} + c.Images = &ImagesServiceOp{client: c} + c.ImageActions = &ImageActionsServiceOp{client: c} + c.Keys = &KeysServiceOp{client: c} + c.Regions = &RegionsServiceOp{client: c} + c.Sizes = &SizesServiceOp{client: c} + c.FloatingIPs = &FloatingIPsServiceOp{client: c} + c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c} + + return c +} + +// NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the +// BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the +// value pointed to by body is JSON encoded and included in as the request body. +func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { + rel, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + u := c.BaseURL.ResolveReference(rel) + + buf := new(bytes.Buffer) + if body != nil { + err := json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", mediaType) + req.Header.Add("Accept", mediaType) + req.Header.Add("User-Agent", userAgent) + return req, nil +} + +// OnRequestCompleted sets the DO API request completion callback +func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) { + c.onRequestCompleted = rc +} + +// newResponse creates a new Response for the provided http.Response +func newResponse(r *http.Response) *Response { + response := Response{Response: r} + response.populateRate() + + return &response +} + +func (r *Response) links() (map[string]headerLink.Link, error) { + if linkText, ok := r.Response.Header["Link"]; ok { + links, err := headerLink.Parse(linkText[0]) + + if err != nil { + return nil, err + } + + linkMap := map[string]headerLink.Link{} + for _, link := range links { + linkMap[link.Rel] = link + } + + return linkMap, nil + } + + return map[string]headerLink.Link{}, nil +} + +// populateRate parses the rate related headers and populates the response Rate. +func (r *Response) populateRate() { + if limit := r.Header.Get(headerRateLimit); limit != "" { + r.Rate.Limit, _ = strconv.Atoi(limit) + } + if remaining := r.Header.Get(headerRateRemaining); remaining != "" { + r.Rate.Remaining, _ = strconv.Atoi(remaining) + } + if reset := r.Header.Get(headerRateReset); reset != "" { + if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { + r.Rate.Reset = Timestamp{time.Unix(v, 0)} + } + } +} + +// Do sends an API request and returns the API response. The API response is JSON decoded and stored in the value +// pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface, +// the raw response will be written to v, without attempting to decode it. +func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + if c.onRequestCompleted != nil { + c.onRequestCompleted(req, resp) + } + + defer func() { + if rerr := resp.Body.Close(); err == nil { + err = rerr + } + }() + + response := newResponse(resp) + c.Rate = response.Rate + + err = CheckResponse(resp) + if err != nil { + return response, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + _, err := io.Copy(w, resp.Body) + if err != nil { + return nil, err + } + } else { + err := json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return nil, err + } + } + } + + return response, err +} +func (r *ErrorResponse) Error() string { + return fmt.Sprintf("%v %v: %d %v", + r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Message) +} + +// CheckResponse checks the API response for errors, and returns them if present. A response is considered an +// error if it has a status code outside the 200 range. API error responses are expected to have either no response +// body, or a JSON response body that maps to ErrorResponse. Any other response body will be silently ignored. +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; c >= 200 && c <= 299 { + return nil + } + + errorResponse := &ErrorResponse{Response: r} + data, err := ioutil.ReadAll(r.Body) + if err == nil && len(data) > 0 { + err := json.Unmarshal(data, errorResponse) + if err != nil { + return err + } + } + + return errorResponse +} + +func (r Rate) String() string { + return Stringify(r) +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + p := new(string) + *p = v + return p +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int { + p := new(int) + *p = v + return p +} + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + p := new(bool) + *p = v + return p +} + +// StreamToString converts a reader to a string +func StreamToString(stream io.Reader) string { + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(stream) + return buf.String() +} diff --git a/vendor/github.com/digitalocean/godo/godo_test.go b/vendor/github.com/digitalocean/godo/godo_test.go new file mode 100644 index 000000000..5da13f9e0 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/godo_test.go @@ -0,0 +1,455 @@ +package godo + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "strings" + "testing" + "time" +) + +var ( + mux *http.ServeMux + + client *Client + + server *httptest.Server +) + +func setup() { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + + client = NewClient(nil) + url, _ := url.Parse(server.URL) + client.BaseURL = url +} + +func teardown() { + server.Close() +} + +func testMethod(t *testing.T, r *http.Request, expected string) { + if expected != r.Method { + t.Errorf("Request method = %v, expected %v", r.Method, expected) + } +} + +type values map[string]string + +func testFormValues(t *testing.T, r *http.Request, values values) { + expected := url.Values{} + for k, v := range values { + expected.Add(k, v) + } + + err := r.ParseForm() + if err != nil { + t.Fatalf("parseForm(): %v", err) + } + + if !reflect.DeepEqual(expected, r.Form) { + t.Errorf("Request parameters = %v, expected %v", r.Form, expected) + } +} + +func testURLParseError(t *testing.T, err error) { + if err == nil { + t.Errorf("Expected error to be returned") + } + if err, ok := err.(*url.Error); !ok || err.Op != "parse" { + t.Errorf("Expected URL parse error, got %+v", err) + } +} + +func TestNewClient(t *testing.T) { + c := NewClient(nil) + if c.BaseURL.String() != defaultBaseURL { + t.Errorf("NewClient BaseURL = %v, expected %v", c.BaseURL.String(), defaultBaseURL) + } + + if c.UserAgent != userAgent { + t.Errorf("NewClick UserAgent = %v, expected %v", c.UserAgent, userAgent) + } +} + +func TestNewRequest(t *testing.T) { + c := NewClient(nil) + + inURL, outURL := "/foo", defaultBaseURL+"foo" + inBody, outBody := &DropletCreateRequest{Name: "l"}, + `{"name":"l","region":"","size":"","image":0,`+ + `"ssh_keys":null,"backups":false,"ipv6":false,`+ + `"private_networking":false}`+"\n" + req, _ := c.NewRequest("GET", inURL, inBody) + + // test relative URL was expanded + if req.URL.String() != outURL { + t.Errorf("NewRequest(%v) URL = %v, expected %v", inURL, req.URL, outURL) + } + + // test body was JSON encoded + body, _ := ioutil.ReadAll(req.Body) + if string(body) != outBody { + t.Errorf("NewRequest(%v)Body = %v, expected %v", inBody, string(body), outBody) + } + + // test default user-agent is attached to the request + userAgent := req.Header.Get("User-Agent") + if c.UserAgent != userAgent { + t.Errorf("NewRequest() User-Agent = %v, expected %v", userAgent, c.UserAgent) + } +} + +func TestNewRequest_withUserData(t *testing.T) { + c := NewClient(nil) + + inURL, outURL := "/foo", defaultBaseURL+"foo" + inBody, outBody := &DropletCreateRequest{Name: "l", UserData: "u"}, + `{"name":"l","region":"","size":"","image":0,`+ + `"ssh_keys":null,"backups":false,"ipv6":false,`+ + `"private_networking":false,"user_data":"u"}`+"\n" + req, _ := c.NewRequest("GET", inURL, inBody) + + // test relative URL was expanded + if req.URL.String() != outURL { + t.Errorf("NewRequest(%v) URL = %v, expected %v", inURL, req.URL, outURL) + } + + // test body was JSON encoded + body, _ := ioutil.ReadAll(req.Body) + if string(body) != outBody { + t.Errorf("NewRequest(%v)Body = %v, expected %v", inBody, string(body), outBody) + } + + // test default user-agent is attached to the request + userAgent := req.Header.Get("User-Agent") + if c.UserAgent != userAgent { + t.Errorf("NewRequest() User-Agent = %v, expected %v", userAgent, c.UserAgent) + } +} + +func TestNewRequest_invalidJSON(t *testing.T) { + c := NewClient(nil) + + type T struct { + A map[int]interface{} + } + _, err := c.NewRequest("GET", "/", &T{}) + + if err == nil { + t.Error("Expected error to be returned.") + } + if err, ok := err.(*json.UnsupportedTypeError); !ok { + t.Errorf("Expected a JSON error; got %#v.", err) + } +} + +func TestNewRequest_badURL(t *testing.T) { + c := NewClient(nil) + _, err := c.NewRequest("GET", ":", nil) + testURLParseError(t, err) +} + +func TestDo(t *testing.T) { + setup() + defer teardown() + + type foo struct { + A string + } + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if m := "GET"; m != r.Method { + t.Errorf("Request method = %v, expected %v", r.Method, m) + } + fmt.Fprint(w, `{"A":"a"}`) + }) + + req, _ := client.NewRequest("GET", "/", nil) + body := new(foo) + _, err := client.Do(req, body) + if err != nil { + t.Fatalf("Do(): %v", err) + } + + expected := &foo{"a"} + if !reflect.DeepEqual(body, expected) { + t.Errorf("Response body = %v, expected %v", body, expected) + } +} + +func TestDo_httpError(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Bad Request", 400) + }) + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(req, nil) + + if err == nil { + t.Error("Expected HTTP 400 error.") + } +} + +// Test handling of an error caused by the internal http client's Do() +// function. +func TestDo_redirectLoop(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/", http.StatusFound) + }) + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(req, nil) + + if err == nil { + t.Error("Expected error to be returned.") + } + if err, ok := err.(*url.Error); !ok { + t.Errorf("Expected a URL error; got %#v.", err) + } +} + +func TestCheckResponse(t *testing.T) { + res := &http.Response{ + Request: &http.Request{}, + StatusCode: http.StatusBadRequest, + Body: ioutil.NopCloser(strings.NewReader(`{"message":"m", + "errors": [{"resource": "r", "field": "f", "code": "c"}]}`)), + } + err := CheckResponse(res).(*ErrorResponse) + + if err == nil { + t.Fatalf("Expected error response.") + } + + expected := &ErrorResponse{ + Response: res, + Message: "m", + } + if !reflect.DeepEqual(err, expected) { + t.Errorf("Error = %#v, expected %#v", err, expected) + } +} + +// ensure that we properly handle API errors that do not contain a response +// body +func TestCheckResponse_noBody(t *testing.T) { + res := &http.Response{ + Request: &http.Request{}, + StatusCode: http.StatusBadRequest, + Body: ioutil.NopCloser(strings.NewReader("")), + } + err := CheckResponse(res).(*ErrorResponse) + + if err == nil { + t.Errorf("Expected error response.") + } + + expected := &ErrorResponse{ + Response: res, + } + if !reflect.DeepEqual(err, expected) { + t.Errorf("Error = %#v, expected %#v", err, expected) + } +} + +func TestErrorResponse_Error(t *testing.T) { + res := &http.Response{Request: &http.Request{}} + err := ErrorResponse{Message: "m", Response: res} + if err.Error() == "" { + t.Errorf("Expected non-empty ErrorResponse.Error()") + } +} + +func TestDo_rateLimit(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(headerRateLimit, "60") + w.Header().Add(headerRateRemaining, "59") + w.Header().Add(headerRateReset, "1372700873") + }) + + var expected int + + if expected = 0; client.Rate.Limit != expected { + t.Errorf("Client rate limit = %v, expected %v", client.Rate.Limit, expected) + } + if expected = 0; client.Rate.Remaining != expected { + t.Errorf("Client rate remaining = %v, got %v", client.Rate.Remaining, expected) + } + if !client.Rate.Reset.IsZero() { + t.Errorf("Client rate reset not initialized to zero value") + } + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(req, nil) + if err != nil { + t.Fatalf("Do(): %v", err) + } + + if expected = 60; client.Rate.Limit != expected { + t.Errorf("Client rate limit = %v, expected %v", client.Rate.Limit, expected) + } + if expected = 59; client.Rate.Remaining != expected { + t.Errorf("Client rate remaining = %v, expected %v", client.Rate.Remaining, expected) + } + reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC) + if client.Rate.Reset.UTC() != reset { + t.Errorf("Client rate reset = %v, expected %v", client.Rate.Reset, reset) + } +} + +func TestDo_rateLimit_errorResponse(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(headerRateLimit, "60") + w.Header().Add(headerRateRemaining, "59") + w.Header().Add(headerRateReset, "1372700873") + http.Error(w, `{"message":"bad request"}`, 400) + }) + + var expected int + + req, _ := client.NewRequest("GET", "/", nil) + _, _ = client.Do(req, nil) + + if expected = 60; client.Rate.Limit != expected { + t.Errorf("Client rate limit = %v, expected %v", client.Rate.Limit, expected) + } + if expected = 59; client.Rate.Remaining != expected { + t.Errorf("Client rate remaining = %v, expected %v", client.Rate.Remaining, expected) + } + reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC) + if client.Rate.Reset.UTC() != reset { + t.Errorf("Client rate reset = %v, expected %v", client.Rate.Reset, reset) + } +} + +func checkCurrentPage(t *testing.T, resp *Response, expectedPage int) { + links := resp.Links + p, err := links.CurrentPage() + if err != nil { + t.Fatal(err) + } + + if p != expectedPage { + t.Fatalf("expected current page to be '%d', was '%d'", expectedPage, p) + } +} + +func TestDo_completion_callback(t *testing.T) { + setup() + defer teardown() + + type foo struct { + A string + } + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if m := "GET"; m != r.Method { + t.Errorf("Request method = %v, expected %v", r.Method, m) + } + fmt.Fprint(w, `{"A":"a"}`) + }) + + req, _ := client.NewRequest("GET", "/", nil) + body := new(foo) + var completedReq *http.Request + var completedResp string + client.OnRequestCompleted(func(req *http.Request, resp *http.Response) { + completedReq = req + b, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Errorf("Failed to dump response: %s", err) + } + completedResp = string(b) + }) + _, err := client.Do(req, body) + if err != nil { + t.Fatalf("Do(): %v", err) + } + if !reflect.DeepEqual(req, completedReq) { + t.Errorf("Completed request = %v, expected %v", completedReq, req) + } + expected := `{"A":"a"}` + if !strings.Contains(completedResp, expected) { + t.Errorf("expected response to contain %v, Response = %v", expected, completedResp) + } +} + +func TestAddOptions(t *testing.T) { + cases := []struct { + name string + path string + expected string + opts *ListOptions + isErr bool + }{ + { + name: "add options", + path: "/action", + expected: "/action?page=1", + opts: &ListOptions{Page: 1}, + isErr: false, + }, + { + name: "add options with existing parameters", + path: "/action?scope=all", + expected: "/action?page=1&scope=all", + opts: &ListOptions{Page: 1}, + isErr: false, + }, + } + + for _, c := range cases { + got, err := addOptions(c.path, c.opts) + if c.isErr && err == nil { + t.Errorf("%q expected error but none was encountered", c.name) + continue + } + + if !c.isErr && err != nil { + t.Errorf("%q unexpected error: %v", c.name, err) + continue + } + + gotURL, err := url.Parse(got) + if err != nil { + t.Errorf("%q unable to parse returned URL", c.name) + continue + } + + expectedURL, err := url.Parse(c.expected) + if err != nil { + t.Errorf("%q unable to parse expected URL", c.name) + continue + } + + if g, e := gotURL.Path, expectedURL.Path; g != e { + t.Errorf("%q path = %q; expected %q", c.name, g, e) + continue + } + + if g, e := gotURL.Query(), expectedURL.Query(); !reflect.DeepEqual(g, e) { + t.Errorf("%q query = %#v; expected %#v", c.name, g, e) + continue + } + } +} diff --git a/vendor/github.com/digitalocean/godo/image_actions.go b/vendor/github.com/digitalocean/godo/image_actions.go new file mode 100644 index 000000000..2d0538567 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/image_actions.go @@ -0,0 +1,71 @@ +package godo + +import "fmt" + +// ImageActionsService is an interface for interfacing with the image actions +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#image-actions +type ImageActionsService interface { + Get(int, int) (*Action, *Response, error) + Transfer(int, *ActionRequest) (*Action, *Response, error) +} + +// ImageActionsServiceOp handles communition with the image action related methods of the +// DigitalOcean API. +type ImageActionsServiceOp struct { + client *Client +} + +var _ ImageActionsService = &ImageActionsServiceOp{} + +// Transfer an image +func (i *ImageActionsServiceOp) Transfer(imageID int, transferRequest *ActionRequest) (*Action, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + if transferRequest == nil { + return nil, nil, NewArgError("transferRequest", "cannot be nil") + } + + path := fmt.Sprintf("v2/images/%d/actions", imageID) + + req, err := i.client.NewRequest("POST", path, transferRequest) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := i.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} + +// Get an action for a particular image by id. +func (i *ImageActionsServiceOp) Get(imageID, actionID int) (*Action, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + if actionID < 1 { + return nil, nil, NewArgError("actionID", "cannot be less than 1") + } + + path := fmt.Sprintf("v2/images/%d/actions/%d", imageID, actionID) + + req, err := i.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := i.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/image_actions_test.go b/vendor/github.com/digitalocean/godo/image_actions_test.go new file mode 100644 index 000000000..5d3cf67c0 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/image_actions_test.go @@ -0,0 +1,62 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestImageActions_Transfer(t *testing.T) { + setup() + defer teardown() + + transferRequest := &ActionRequest{} + + mux.HandleFunc("/v2/images/12345/actions", func(w http.ResponseWriter, r *http.Request) { + v := new(ActionRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, transferRequest) { + t.Errorf("Request body = %+v, expected %+v", v, transferRequest) + } + + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + + }) + + transfer, _, err := client.ImageActions.Transfer(12345, transferRequest) + if err != nil { + t.Errorf("ImageActions.Transfer returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(transfer, expected) { + t.Errorf("ImageActions.Transfer returned %+v, expected %+v", transfer, expected) + } +} + +func TestImageActions_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images/123/actions/456", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`) + }) + + action, _, err := client.ImageActions.Get(123, 456) + if err != nil { + t.Errorf("ImageActions.Get returned error: %v", err) + } + + expected := &Action{Status: "in-progress"} + if !reflect.DeepEqual(action, expected) { + t.Errorf("ImageActions.Get returned %+v, expected %+v", action, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/images.go b/vendor/github.com/digitalocean/godo/images.go new file mode 100644 index 000000000..96894f886 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/images.go @@ -0,0 +1,194 @@ +package godo + +import "fmt" + +const imageBasePath = "v2/images" + +// ImagesService is an interface for interfacing with the images +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#images +type ImagesService interface { + List(*ListOptions) ([]Image, *Response, error) + ListDistribution(opt *ListOptions) ([]Image, *Response, error) + ListApplication(opt *ListOptions) ([]Image, *Response, error) + ListUser(opt *ListOptions) ([]Image, *Response, error) + GetByID(int) (*Image, *Response, error) + GetBySlug(string) (*Image, *Response, error) + Update(int, *ImageUpdateRequest) (*Image, *Response, error) + Delete(int) (*Response, error) +} + +// ImagesServiceOp handles communication with the image related methods of the +// DigitalOcean API. +type ImagesServiceOp struct { + client *Client +} + +var _ ImagesService = &ImagesServiceOp{} + +// Image represents a DigitalOcean Image +type Image struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Distribution string `json:"distribution,omitempty"` + Slug string `json:"slug,omitempty"` + Public bool `json:"public,omitempty"` + Regions []string `json:"regions,omitempty"` + MinDiskSize int `json:"min_disk_size,omitempty"` + Created string `json:"created_at,omitempty"` +} + +// ImageUpdateRequest represents a request to update an image. +type ImageUpdateRequest struct { + Name string `json:"name"` +} + +type imageRoot struct { + Image Image +} + +type imagesRoot struct { + Images []Image + Links *Links `json:"links"` +} + +type listImageOptions struct { + Private bool `url:"private,omitempty"` + Type string `url:"type,omitempty"` +} + +func (i Image) String() string { + return Stringify(i) +} + +// List lists all the images available. +func (s *ImagesServiceOp) List(opt *ListOptions) ([]Image, *Response, error) { + return s.list(opt, nil) +} + +// ListDistribution lists all the distribution images. +func (s *ImagesServiceOp) ListDistribution(opt *ListOptions) ([]Image, *Response, error) { + listOpt := listImageOptions{Type: "distribution"} + return s.list(opt, &listOpt) +} + +// ListApplication lists all the application images. +func (s *ImagesServiceOp) ListApplication(opt *ListOptions) ([]Image, *Response, error) { + listOpt := listImageOptions{Type: "application"} + return s.list(opt, &listOpt) +} + +// ListUser lists all the user images. +func (s *ImagesServiceOp) ListUser(opt *ListOptions) ([]Image, *Response, error) { + listOpt := listImageOptions{Private: true} + return s.list(opt, &listOpt) +} + +// GetByID retrieves an image by id. +func (s *ImagesServiceOp) GetByID(imageID int) (*Image, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + return s.get(interface{}(imageID)) +} + +// GetBySlug retrieves an image by slug. +func (s *ImagesServiceOp) GetBySlug(slug string) (*Image, *Response, error) { + if len(slug) < 1 { + return nil, nil, NewArgError("slug", "cannot be blank") + } + + return s.get(interface{}(slug)) +} + +// Update an image name. +func (s *ImagesServiceOp) Update(imageID int, updateRequest *ImageUpdateRequest) (*Image, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%d", imageBasePath, imageID) + req, err := s.client.NewRequest("PUT", path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(imageRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Image, resp, err +} + +// Delete an image. +func (s *ImagesServiceOp) Delete(imageID int) (*Response, error) { + if imageID < 1 { + return nil, NewArgError("imageID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", imageBasePath, imageID) + + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// Helper method for getting an individual image +func (s *ImagesServiceOp) get(ID interface{}) (*Image, *Response, error) { + path := fmt.Sprintf("%s/%v", imageBasePath, ID) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(imageRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Image, resp, err +} + +// Helper method for listing images +func (s *ImagesServiceOp) list(opt *ListOptions, listOpt *listImageOptions) ([]Image, *Response, error) { + path := imageBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + path, err = addOptions(path, listOpt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(imagesRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Images, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/images_test.go b/vendor/github.com/digitalocean/godo/images_test.go new file mode 100644 index 000000000..3fd4d1a80 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/images_test.go @@ -0,0 +1,262 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestImages_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"images":[{"id":1},{"id":2}]}`) + }) + + images, _, err := client.Images.List(nil) + if err != nil { + t.Errorf("Images.List returned error: %v", err) + } + + expected := []Image{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(images, expected) { + t.Errorf("Images.List returned %+v, expected %+v", images, expected) + } +} + +func TestImages_ListDistribution(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + expected := "distribution" + actual := r.URL.Query().Get("type") + if actual != expected { + t.Errorf("'type' query = %v, expected %v", actual, expected) + } + fmt.Fprint(w, `{"images":[{"id":1},{"id":2}]}`) + }) + + images, _, err := client.Images.ListDistribution(nil) + if err != nil { + t.Errorf("Images.ListDistribution returned error: %v", err) + } + + expected := []Image{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(images, expected) { + t.Errorf("Images.ListDistribution returned %+v, expected %+v", images, expected) + } +} + +func TestImages_ListApplication(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + expected := "application" + actual := r.URL.Query().Get("type") + if actual != expected { + t.Errorf("'type' query = %v, expected %v", actual, expected) + } + fmt.Fprint(w, `{"images":[{"id":1},{"id":2}]}`) + }) + + images, _, err := client.Images.ListApplication(nil) + if err != nil { + t.Errorf("Images.ListApplication returned error: %v", err) + } + + expected := []Image{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(images, expected) { + t.Errorf("Images.ListApplication returned %+v, expected %+v", images, expected) + } +} + +func TestImages_ListUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + expected := "true" + actual := r.URL.Query().Get("private") + if actual != expected { + t.Errorf("'private' query = %v, expected %v", actual, expected) + } + + fmt.Fprint(w, `{"images":[{"id":1},{"id":2}]}`) + }) + + images, _, err := client.Images.ListUser(nil) + if err != nil { + t.Errorf("Images.ListUser returned error: %v", err) + } + + expected := []Image{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(images, expected) { + t.Errorf("Images.ListUser returned %+v, expected %+v", images, expected) + } +} + +func TestImages_ListImagesMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"images": [{"id":1},{"id":2}], "links":{"pages":{"next":"http://example.com/v2/images/?page=2"}}}`) + }) + + _, resp, err := client.Images.List(&ListOptions{Page: 2}) + if err != nil { + t.Fatal(err) + } + checkCurrentPage(t, resp, 1) +} + +func TestImages_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "images": [{"id":1},{"id":2}], + "links":{ + "pages":{ + "next":"http://example.com/v2/images/?page=3", + "prev":"http://example.com/v2/images/?page=1", + "last":"http://example.com/v2/images/?page=3", + "first":"http://example.com/v2/images/?page=1" + } + } + }` + + mux.HandleFunc("/v2/images", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.Images.List(opt) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 2) +} + +func TestImages_GetImageByID(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images/12345", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"image":{"id":12345}}`) + }) + + images, _, err := client.Images.GetByID(12345) + if err != nil { + t.Errorf("Image.GetByID returned error: %v", err) + } + + expected := &Image{ID: 12345} + if !reflect.DeepEqual(images, expected) { + t.Errorf("Images.GetByID returned %+v, expected %+v", images, expected) + } +} + +func TestImages_GetImageBySlug(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images/ubuntu", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"image":{"id":12345}}`) + }) + + images, _, err := client.Images.GetBySlug("ubuntu") + if err != nil { + t.Errorf("Image.GetBySlug returned error: %v", err) + } + + expected := &Image{ID: 12345} + if !reflect.DeepEqual(images, expected) { + t.Errorf("Images.Get returned %+v, expected %+v", images, expected) + } +} + +func TestImages_Update(t *testing.T) { + setup() + defer teardown() + + updateRequest := &ImageUpdateRequest{ + Name: "name", + } + + mux.HandleFunc("/v2/images/12345", func(w http.ResponseWriter, r *http.Request) { + expected := map[string]interface{}{ + "name": "name", + } + + var v map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + if !reflect.DeepEqual(v, expected) { + t.Errorf("Request body = %#v, expected %#v", v, expected) + } + + fmt.Fprintf(w, `{"image":{"id":1}}`) + }) + + image, _, err := client.Images.Update(12345, updateRequest) + if err != nil { + t.Errorf("Images.Update returned error: %v", err) + } else { + if id := image.ID; id != 1 { + t.Errorf("expected id '%d', received '%d'", 1, id) + } + } +} + +func TestImages_Destroy(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/images/12345", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Images.Delete(12345) + if err != nil { + t.Errorf("Image.Delete returned error: %v", err) + } +} + +func TestImage_String(t *testing.T) { + image := &Image{ + ID: 1, + Name: "Image", + Type: "snapshot", + Distribution: "Ubuntu", + Slug: "image", + Public: true, + Regions: []string{"one", "two"}, + MinDiskSize: 20, + Created: "2013-11-27T09:24:55Z", + } + + stringified := image.String() + expected := `godo.Image{ID:1, Name:"Image", Type:"snapshot", Distribution:"Ubuntu", Slug:"image", Public:true, Regions:["one" "two"], MinDiskSize:20, Created:"2013-11-27T09:24:55Z"}` + if expected != stringified { + t.Errorf("Image.String returned %+v, expected %+v", stringified, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/keys.go b/vendor/github.com/digitalocean/godo/keys.go new file mode 100644 index 000000000..8dc50f8a0 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/keys.go @@ -0,0 +1,222 @@ +package godo + +import "fmt" + +const keysBasePath = "v2/account/keys" + +// KeysService is an interface for interfacing with the keys +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#keys +type KeysService interface { + List(*ListOptions) ([]Key, *Response, error) + GetByID(int) (*Key, *Response, error) + GetByFingerprint(string) (*Key, *Response, error) + Create(*KeyCreateRequest) (*Key, *Response, error) + UpdateByID(int, *KeyUpdateRequest) (*Key, *Response, error) + UpdateByFingerprint(string, *KeyUpdateRequest) (*Key, *Response, error) + DeleteByID(int) (*Response, error) + DeleteByFingerprint(string) (*Response, error) +} + +// KeysServiceOp handles communication with key related method of the +// DigitalOcean API. +type KeysServiceOp struct { + client *Client +} + +var _ KeysService = &KeysServiceOp{} + +// Key represents a DigitalOcean Key. +type Key struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + PublicKey string `json:"public_key,omitempty"` +} + +// KeyUpdateRequest represents a request to update a DigitalOcean key. +type KeyUpdateRequest struct { + Name string `json:"name"` +} + +type keysRoot struct { + SSHKeys []Key `json:"ssh_keys"` + Links *Links `json:"links"` +} + +type keyRoot struct { + SSHKey Key `json:"ssh_key"` +} + +func (s Key) String() string { + return Stringify(s) +} + +// KeyCreateRequest represents a request to create a new key. +type KeyCreateRequest struct { + Name string `json:"name"` + PublicKey string `json:"public_key"` +} + +// List all keys +func (s *KeysServiceOp) List(opt *ListOptions) ([]Key, *Response, error) { + path := keysBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(keysRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.SSHKeys, resp, err +} + +// Performs a get given a path +func (s *KeysServiceOp) get(path string) (*Key, *Response, error) { + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.SSHKey, resp, err +} + +// GetByID gets a Key by id +func (s *KeysServiceOp) GetByID(keyID int) (*Key, *Response, error) { + if keyID < 1 { + return nil, nil, NewArgError("keyID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", keysBasePath, keyID) + return s.get(path) +} + +// GetByFingerprint gets a Key by by fingerprint +func (s *KeysServiceOp) GetByFingerprint(fingerprint string) (*Key, *Response, error) { + if len(fingerprint) < 1 { + return nil, nil, NewArgError("fingerprint", "cannot not be empty") + } + + path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) + return s.get(path) +} + +// Create a key using a KeyCreateRequest +func (s *KeysServiceOp) Create(createRequest *KeyCreateRequest) (*Key, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + req, err := s.client.NewRequest("POST", keysBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.SSHKey, resp, err +} + +// UpdateByID updates a key name by ID. +func (s *KeysServiceOp) UpdateByID(keyID int, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { + if keyID < 1 { + return nil, nil, NewArgError("keyID", "cannot be less than 1") + } + + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%d", keysBasePath, keyID) + req, err := s.client.NewRequest("PUT", path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.SSHKey, resp, err +} + +// UpdateByFingerprint updates a key name by fingerprint. +func (s *KeysServiceOp) UpdateByFingerprint(fingerprint string, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { + if len(fingerprint) < 1 { + return nil, nil, NewArgError("fingerprint", "cannot be empty") + } + + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) + req, err := s.client.NewRequest("PUT", path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.SSHKey, resp, err +} + +// Delete key using a path +func (s *KeysServiceOp) delete(path string) (*Response, error) { + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// DeleteByID deletes a key by its id +func (s *KeysServiceOp) DeleteByID(keyID int) (*Response, error) { + if keyID < 1 { + return nil, NewArgError("keyID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", keysBasePath, keyID) + return s.delete(path) +} + +// DeleteByFingerprint deletes a key by its fingerprint +func (s *KeysServiceOp) DeleteByFingerprint(fingerprint string) (*Response, error) { + if len(fingerprint) < 1 { + return nil, NewArgError("fingerprint", "cannot be empty") + } + + path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) + return s.delete(path) +} diff --git a/vendor/github.com/digitalocean/godo/keys_test.go b/vendor/github.com/digitalocean/godo/keys_test.go new file mode 100644 index 000000000..b209f61f5 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/keys_test.go @@ -0,0 +1,265 @@ +package godo + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestKeys_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"ssh_keys":[{"id":1},{"id":2}]}`) + }) + + keys, _, err := client.Keys.List(nil) + if err != nil { + t.Errorf("Keys.List returned error: %v", err) + } + + expected := []Key{{ID: 1}, {ID: 2}} + if !reflect.DeepEqual(keys, expected) { + t.Errorf("Keys.List returned %+v, expected %+v", keys, expected) + } +} + +func TestKeys_ListKeysMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"droplets": [{"id":1},{"id":2}], "links":{"pages":{"next":"http://example.com/v2/account/keys/?page=2"}}}`) + }) + + _, resp, err := client.Keys.List(nil) + if err != nil { + t.Fatal(err) + } + checkCurrentPage(t, resp, 1) +} + +func TestKeys_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "keys": [{"id":1},{"id":2}], + "links":{ + "pages":{ + "next":"http://example.com/v2/account/keys/?page=3", + "prev":"http://example.com/v2/account/keys/?page=1", + "last":"http://example.com/v2/account/keys/?page=3", + "first":"http://example.com/v2/account/keys/?page=1" + } + } + }` + + mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.Keys.List(opt) + if err != nil { + t.Fatal(err) + } + checkCurrentPage(t, resp, 2) +} + +func TestKeys_GetByID(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/account/keys/12345", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"ssh_key": {"id":12345}}`) + }) + + keys, _, err := client.Keys.GetByID(12345) + if err != nil { + t.Errorf("Keys.GetByID returned error: %v", err) + } + + expected := &Key{ID: 12345} + if !reflect.DeepEqual(keys, expected) { + t.Errorf("Keys.GetByID returned %+v, expected %+v", keys, expected) + } +} + +func TestKeys_GetByFingerprint(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/account/keys/aa:bb:cc", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"ssh_key": {"fingerprint":"aa:bb:cc"}}`) + }) + + keys, _, err := client.Keys.GetByFingerprint("aa:bb:cc") + if err != nil { + t.Errorf("Keys.GetByFingerprint returned error: %v", err) + } + + expected := &Key{Fingerprint: "aa:bb:cc"} + if !reflect.DeepEqual(keys, expected) { + t.Errorf("Keys.GetByFingerprint returned %+v, expected %+v", keys, expected) + } +} + +func TestKeys_Create(t *testing.T) { + setup() + defer teardown() + + createRequest := &KeyCreateRequest{ + Name: "name", + PublicKey: "ssh-rsa longtextandstuff", + } + + mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) { + v := new(KeyCreateRequest) + err := json.NewDecoder(r.Body).Decode(v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, createRequest) { + t.Errorf("Request body = %+v, expected %+v", v, createRequest) + } + + fmt.Fprintf(w, `{"ssh_key":{"id":1}}`) + }) + + key, _, err := client.Keys.Create(createRequest) + if err != nil { + t.Errorf("Keys.Create returned error: %v", err) + } + + expected := &Key{ID: 1} + if !reflect.DeepEqual(key, expected) { + t.Errorf("Keys.Create returned %+v, expected %+v", key, expected) + } +} + +func TestKeys_UpdateByID(t *testing.T) { + setup() + defer teardown() + + updateRequest := &KeyUpdateRequest{ + Name: "name", + } + + mux.HandleFunc("/v2/account/keys/12345", func(w http.ResponseWriter, r *http.Request) { + expected := map[string]interface{}{ + "name": "name", + } + + var v map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + if !reflect.DeepEqual(v, expected) { + t.Errorf("Request body = %#v, expected %#v", v, expected) + } + + fmt.Fprintf(w, `{"ssh_key":{"id":1}}`) + }) + + key, _, err := client.Keys.UpdateByID(12345, updateRequest) + if err != nil { + t.Errorf("Keys.Update returned error: %v", err) + } else { + if id := key.ID; id != 1 { + t.Errorf("expected id '%d', received '%d'", 1, id) + } + } +} + +func TestKeys_UpdateByFingerprint(t *testing.T) { + setup() + defer teardown() + + updateRequest := &KeyUpdateRequest{ + Name: "name", + } + + mux.HandleFunc("/v2/account/keys/3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", func(w http.ResponseWriter, r *http.Request) { + expected := map[string]interface{}{ + "name": "name", + } + + var v map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&v) + if err != nil { + t.Fatalf("decode json: %v", err) + } + + if !reflect.DeepEqual(v, expected) { + t.Errorf("Request body = %#v, expected %#v", v, expected) + } + + fmt.Fprintf(w, `{"ssh_key":{"id":1}}`) + }) + + key, _, err := client.Keys.UpdateByFingerprint("3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", updateRequest) + if err != nil { + t.Errorf("Keys.Update returned error: %v", err) + } else { + if id := key.ID; id != 1 { + t.Errorf("expected id '%d', received '%d'", 1, id) + } + } +} + +func TestKeys_DestroyByID(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/account/keys/12345", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Keys.DeleteByID(12345) + if err != nil { + t.Errorf("Keys.Delete returned error: %v", err) + } +} + +func TestKeys_DestroyByFingerprint(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/account/keys/aa:bb:cc", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Keys.DeleteByFingerprint("aa:bb:cc") + if err != nil { + t.Errorf("Keys.Delete returned error: %v", err) + } +} + +func TestKey_String(t *testing.T) { + key := &Key{ + ID: 123, + Name: "Key", + Fingerprint: "fingerprint", + PublicKey: "public key", + } + + stringified := key.String() + expected := `godo.Key{ID:123, Name:"Key", Fingerprint:"fingerprint", PublicKey:"public key"}` + if expected != stringified { + t.Errorf("Key.String returned %+v, expected %+v", stringified, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/links.go b/vendor/github.com/digitalocean/godo/links.go new file mode 100644 index 000000000..463510523 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/links.go @@ -0,0 +1,86 @@ +package godo + +import ( + "net/url" + "strconv" +) + +// Links manages links that are returned along with a List +type Links struct { + Pages *Pages `json:"pages,omitempty"` + Actions []LinkAction `json:"actions,omitempty"` +} + +// Pages are pages specified in Links +type Pages struct { + First string `json:"first,omitempty"` + Prev string `json:"prev,omitempty"` + Last string `json:"last,omitempty"` + Next string `json:"next,omitempty"` +} + +// LinkAction is a pointer to an action +type LinkAction struct { + ID int `json:"id,omitempty"` + Rel string `json:"rel,omitempty"` + HREF string `json:"href,omitempty"` +} + +// CurrentPage is current page of the list +func (l *Links) CurrentPage() (int, error) { + return l.Pages.current() +} + +func (p *Pages) current() (int, error) { + switch { + case p == nil: + return 1, nil + case p.Prev == "" && p.Next != "": + return 1, nil + case p.Prev != "": + prevPage, err := pageForURL(p.Prev) + if err != nil { + return 0, err + } + + return prevPage + 1, nil + } + + return 0, nil +} + +// IsLastPage returns true if the current page is the last +func (l *Links) IsLastPage() bool { + if l.Pages == nil { + return true + } + return l.Pages.isLast() +} + +func (p *Pages) isLast() bool { + if p.Last == "" { + return true + } + + return false +} + +func pageForURL(urlText string) (int, error) { + u, err := url.ParseRequestURI(urlText) + if err != nil { + return 0, err + } + + pageStr := u.Query().Get("page") + page, err := strconv.Atoi(pageStr) + if err != nil { + return 0, err + } + + return page, nil +} + +// Get a link action by id. +func (la *LinkAction) Get(client *Client) (*Action, *Response, error) { + return client.Actions.Get(la.ID) +} diff --git a/vendor/github.com/digitalocean/godo/links_test.go b/vendor/github.com/digitalocean/godo/links_test.go new file mode 100644 index 000000000..02c9f34d2 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/links_test.go @@ -0,0 +1,176 @@ +package godo + +import ( + "encoding/json" + "testing" +) + +var ( + firstPageLinksJSONBlob = []byte(`{ + "links": { + "pages": { + "last": "https://api.digitalocean.com/v2/droplets/?page=3", + "next": "https://api.digitalocean.com/v2/droplets/?page=2" + } + } + }`) + otherPageLinksJSONBlob = []byte(`{ + "links": { + "pages": { + "first": "https://api.digitalocean.com/v2/droplets/?page=1", + "prev": "https://api.digitalocean.com/v2/droplets/?page=1", + "last": "https://api.digitalocean.com/v2/droplets/?page=3", + "next": "https://api.digitalocean.com/v2/droplets/?page=3" + } + } + }`) + lastPageLinksJSONBlob = []byte(`{ + "links": { + "pages": { + "first": "https://api.digitalocean.com/v2/droplets/?page=1", + "prev": "https://api.digitalocean.com/v2/droplets/?page=2" + } + } + }`) + + missingLinksJSONBlob = []byte(`{ }`) +) + +type godoList struct { + Links Links `json:"links"` +} + +func loadLinksJSON(t *testing.T, j []byte) Links { + var list godoList + err := json.Unmarshal(j, &list) + if err != nil { + t.Fatal(err) + } + + return list.Links +} + +func TestLinks_ParseFirst(t *testing.T) { + links := loadLinksJSON(t, firstPageLinksJSONBlob) + _, err := links.CurrentPage() + if err != nil { + t.Fatal(err) + } + + r := &Response{Links: &links} + checkCurrentPage(t, r, 1) + + if links.IsLastPage() { + t.Fatalf("shouldn't be last page") + } +} + +func TestLinks_ParseMiddle(t *testing.T) { + links := loadLinksJSON(t, otherPageLinksJSONBlob) + _, err := links.CurrentPage() + if err != nil { + t.Fatal(err) + } + + r := &Response{Links: &links} + checkCurrentPage(t, r, 2) + + if links.IsLastPage() { + t.Fatalf("shouldn't be last page") + } +} + +func TestLinks_ParseLast(t *testing.T) { + links := loadLinksJSON(t, lastPageLinksJSONBlob) + _, err := links.CurrentPage() + if err != nil { + t.Fatal(err) + } + + r := &Response{Links: &links} + checkCurrentPage(t, r, 3) + if !links.IsLastPage() { + t.Fatalf("expected last page") + } +} + +func TestLinks_ParseMissing(t *testing.T) { + links := loadLinksJSON(t, missingLinksJSONBlob) + _, err := links.CurrentPage() + if err != nil { + t.Fatal(err) + } + + r := &Response{Links: &links} + checkCurrentPage(t, r, 1) +} + +func TestLinks_ParseURL(t *testing.T) { + type linkTest struct { + name, url string + expected int + } + + linkTests := []linkTest{ + { + name: "prev", + url: "https://api.digitalocean.com/v2/droplets/?page=1", + expected: 1, + }, + { + name: "last", + url: "https://api.digitalocean.com/v2/droplets/?page=5", + expected: 5, + }, + { + name: "nexta", + url: "https://api.digitalocean.com/v2/droplets/?page=2", + expected: 2, + }, + } + + for _, lT := range linkTests { + p, err := pageForURL(lT.url) + if err != nil { + t.Fatal(err) + } + + if p != lT.expected { + t.Errorf("expected page for '%s' to be '%d', was '%d'", + lT.url, lT.expected, p) + } + } + +} + +func TestLinks_ParseEmptyString(t *testing.T) { + type linkTest struct { + name, url string + expected int + } + + linkTests := []linkTest{ + { + name: "none", + url: "http://example.com", + expected: 0, + }, + { + name: "bad", + url: "no url", + expected: 0, + }, + { + name: "empty", + url: "", + expected: 0, + }, + } + + for _, lT := range linkTests { + _, err := pageForURL(lT.url) + if err == nil { + t.Fatalf("expected error for test '%s', but received none", lT.name) + } + } +} diff --git a/vendor/github.com/digitalocean/godo/regions.go b/vendor/github.com/digitalocean/godo/regions.go new file mode 100644 index 000000000..e44e8bcd6 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/regions.go @@ -0,0 +1,63 @@ +package godo + +// RegionsService is an interface for interfacing with the regions +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#regions +type RegionsService interface { + List(*ListOptions) ([]Region, *Response, error) +} + +// RegionsServiceOp handles communication with the region related methods of the +// DigitalOcean API. +type RegionsServiceOp struct { + client *Client +} + +var _ RegionsService = &RegionsServiceOp{} + +// Region represents a DigitalOcean Region +type Region struct { + Slug string `json:"slug,omitempty"` + Name string `json:"name,omitempty"` + Sizes []string `json:"sizes,omitempty"` + Available bool `json:"available,omitempty"` + Features []string `json:"features,omitempty"` +} + +type regionsRoot struct { + Regions []Region + Links *Links `json:"links"` +} + +type regionRoot struct { + Region Region +} + +func (r Region) String() string { + return Stringify(r) +} + +// List all regions +func (s *RegionsServiceOp) List(opt *ListOptions) ([]Region, *Response, error) { + path := "v2/regions" + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(regionsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Regions, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/regions_test.go b/vendor/github.com/digitalocean/godo/regions_test.go new file mode 100644 index 000000000..3c725763f --- /dev/null +++ b/vendor/github.com/digitalocean/godo/regions_test.go @@ -0,0 +1,91 @@ +package godo + +import ( + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRegions_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/regions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"regions":[{"slug":"1"},{"slug":"2"}]}`) + }) + + regions, _, err := client.Regions.List(nil) + if err != nil { + t.Errorf("Regions.List returned error: %v", err) + } + + expected := []Region{{Slug: "1"}, {Slug: "2"}} + if !reflect.DeepEqual(regions, expected) { + t.Errorf("Regions.List returned %+v, expected %+v", regions, expected) + } +} + +func TestRegions_ListRegionsMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/regions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"regions": [{"id":1},{"id":2}], "links":{"pages":{"next":"http://example.com/v2/regions/?page=2"}}}`) + }) + + _, resp, err := client.Regions.List(nil) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 1) +} + +func TestRegions_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "regions": [{"id":1},{"id":2}], + "links":{ + "pages":{ + "next":"http://example.com/v2/regions/?page=3", + "prev":"http://example.com/v2/regions/?page=1", + "last":"http://example.com/v2/regions/?page=3", + "first":"http://example.com/v2/regions/?page=1" + } + } + }` + + mux.HandleFunc("/v2/regions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.Regions.List(opt) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 2) +} + +func TestRegion_String(t *testing.T) { + region := &Region{ + Slug: "region", + Name: "Region", + Sizes: []string{"1", "2"}, + Available: true, + } + + stringified := region.String() + expected := `godo.Region{Slug:"region", Name:"Region", Sizes:["1" "2"], Available:true}` + if expected != stringified { + t.Errorf("Region.String returned %+v, expected %+v", stringified, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/sizes.go b/vendor/github.com/digitalocean/godo/sizes.go new file mode 100644 index 000000000..7f454e7eb --- /dev/null +++ b/vendor/github.com/digitalocean/godo/sizes.go @@ -0,0 +1,63 @@ +package godo + +// SizesService is an interface for interfacing with the size +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#sizes +type SizesService interface { + List(*ListOptions) ([]Size, *Response, error) +} + +// SizesServiceOp handles communication with the size related methods of the +// DigitalOcean API. +type SizesServiceOp struct { + client *Client +} + +var _ SizesService = &SizesServiceOp{} + +// Size represents a DigitalOcean Size +type Size struct { + Slug string `json:"slug,omitempty"` + Memory int `json:"memory,omitempty"` + Vcpus int `json:"vcpus,omitempty"` + Disk int `json:"disk,omitempty"` + PriceMonthly float64 `json:"price_monthly,omitempty"` + PriceHourly float64 `json:"price_hourly,omitempty"` + Regions []string `json:"regions,omitempty"` + Available bool `json:"available,omitempty"` + Transfer float64 `json:"transfer,omitempty"` +} + +func (s Size) String() string { + return Stringify(s) +} + +type sizesRoot struct { + Sizes []Size + Links *Links `json:"links"` +} + +// List all images +func (s *SizesServiceOp) List(opt *ListOptions) ([]Size, *Response, error) { + path := "v2/sizes" + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(sizesRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Sizes, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/sizes_test.go b/vendor/github.com/digitalocean/godo/sizes_test.go new file mode 100644 index 000000000..390ddf4db --- /dev/null +++ b/vendor/github.com/digitalocean/godo/sizes_test.go @@ -0,0 +1,96 @@ +package godo + +import ( + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestSizes_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/sizes", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"sizes":[{"slug":"1"},{"slug":"2"}]}`) + }) + + sizes, _, err := client.Sizes.List(nil) + if err != nil { + t.Errorf("Sizes.List returned error: %v", err) + } + + expected := []Size{{Slug: "1"}, {Slug: "2"}} + if !reflect.DeepEqual(sizes, expected) { + t.Errorf("Sizes.List returned %+v, expected %+v", sizes, expected) + } +} + +func TestSizes_ListSizesMultiplePages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/v2/sizes", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"sizes": [{"id":1},{"id":2}], "links":{"pages":{"next":"http://example.com/v2/sizes/?page=2"}}}`) + }) + + _, resp, err := client.Sizes.List(nil) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 1) +} + +func TestSizes_RetrievePageByNumber(t *testing.T) { + setup() + defer teardown() + + jBlob := ` + { + "sizes": [{"id":1},{"id":2}], + "links":{ + "pages":{ + "next":"http://example.com/v2/sizes/?page=3", + "prev":"http://example.com/v2/sizes/?page=1", + "last":"http://example.com/v2/sizes/?page=3", + "first":"http://example.com/v2/sizes/?page=1" + } + } + }` + + mux.HandleFunc("/v2/sizes", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, jBlob) + }) + + opt := &ListOptions{Page: 2} + _, resp, err := client.Sizes.List(opt) + if err != nil { + t.Fatal(err) + } + + checkCurrentPage(t, resp, 2) +} + +func TestSize_String(t *testing.T) { + size := &Size{ + Slug: "slize", + Memory: 123, + Vcpus: 456, + Disk: 789, + PriceMonthly: 123, + PriceHourly: 456, + Regions: []string{"1", "2"}, + Available: true, + Transfer: 789, + } + + stringified := size.String() + expected := `godo.Size{Slug:"slize", Memory:123, Vcpus:456, Disk:789, PriceMonthly:123, PriceHourly:456, Regions:["1" "2"], Available:true, Transfer:789}` + if expected != stringified { + t.Errorf("Size.String returned %+v, expected %+v", stringified, expected) + } +} diff --git a/vendor/github.com/digitalocean/godo/strings.go b/vendor/github.com/digitalocean/godo/strings.go new file mode 100644 index 000000000..bbdbc9241 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/strings.go @@ -0,0 +1,83 @@ +package godo + +import ( + "bytes" + "fmt" + "reflect" +) + +var timestampType = reflect.TypeOf(Timestamp{}) + +// Stringify attempts to create a string representation of DigitalOcean types +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + stringifyValue(&buf, v) + return buf.String() +} + +// stringifyValue was graciously cargoculted from the goprotubuf library +func stringifyValue(w *bytes.Buffer, val reflect.Value) { + if val.Kind() == reflect.Ptr && val.IsNil() { + _, _ = w.Write([]byte("")) + return + } + + v := reflect.Indirect(val) + + switch v.Kind() { + case reflect.String: + fmt.Fprintf(w, `"%s"`, v) + case reflect.Slice: + _, _ = w.Write([]byte{'['}) + for i := 0; i < v.Len(); i++ { + if i > 0 { + _, _ = w.Write([]byte{' '}) + } + + stringifyValue(w, v.Index(i)) + } + + _, _ = w.Write([]byte{']'}) + return + case reflect.Struct: + if v.Type().Name() != "" { + _, _ = w.Write([]byte(v.Type().String())) + } + + // special handling of Timestamp values + if v.Type() == timestampType { + fmt.Fprintf(w, "{%s}", v.Interface()) + return + } + + _, _ = w.Write([]byte{'{'}) + + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + + if sep { + _, _ = w.Write([]byte(", ")) + } else { + sep = true + } + + _, _ = w.Write([]byte(v.Type().Field(i).Name)) + _, _ = w.Write([]byte{':'}) + stringifyValue(w, fv) + } + + _, _ = w.Write([]byte{'}'}) + default: + if v.CanInterface() { + fmt.Fprint(w, v.Interface()) + } + } +} diff --git a/vendor/github.com/digitalocean/godo/timestamp.go b/vendor/github.com/digitalocean/godo/timestamp.go new file mode 100644 index 000000000..37a28e5f2 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/timestamp.go @@ -0,0 +1,35 @@ +package godo + +import ( + "strconv" + "time" +) + +// Timestamp represents a time that can be unmarshalled from a JSON string +// formatted as either an RFC3339 or Unix timestamp. All +// exported methods of time.Time can be called on Timestamp. +type Timestamp struct { + time.Time +} + +func (t Timestamp) String() string { + return t.Time.String() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// Time is expected in RFC3339 or Unix format. +func (t *Timestamp) UnmarshalJSON(data []byte) error { + str := string(data) + i, err := strconv.ParseInt(str, 10, 64) + if err == nil { + t.Time = time.Unix(i, 0) + } else { + t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) + } + return err +} + +// Equal reports whether t and u are equal based on time.Equal +func (t Timestamp) Equal(u Timestamp) bool { + return t.Time.Equal(u.Time) +} diff --git a/vendor/github.com/digitalocean/godo/timestamp_test.go b/vendor/github.com/digitalocean/godo/timestamp_test.go new file mode 100644 index 000000000..087e8aac1 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/timestamp_test.go @@ -0,0 +1,176 @@ +package godo + +import ( + "encoding/json" + "fmt" + "testing" + "time" +) + +const ( + emptyTimeStr = `"0001-01-01T00:00:00Z"` + referenceTimeStr = `"2006-01-02T15:04:05Z"` + referenceUnixTimeStr = `1136214245` +) + +var ( + referenceTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC) + unixOrigin = time.Unix(0, 0).In(time.UTC) +) + +func TestTimestamp_Marshal(t *testing.T) { + testCases := []struct { + desc string + data Timestamp + want string + wantErr bool + equal bool + }{ + {"Reference", Timestamp{referenceTime}, referenceTimeStr, false, true}, + {"Empty", Timestamp{}, emptyTimeStr, false, true}, + {"Mismatch", Timestamp{}, referenceTimeStr, false, false}, + } + for _, tc := range testCases { + out, err := json.Marshal(tc.data) + if gotErr := (err != nil); gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + } + got := string(out) + equal := got == tc.want + if (got == tc.want) != tc.equal { + t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestTimestamp_Unmarshal(t *testing.T) { + testCases := []struct { + desc string + data string + want Timestamp + wantErr bool + equal bool + }{ + {"Reference", referenceTimeStr, Timestamp{referenceTime}, false, true}, + {"ReferenceUnix", `1136214245`, Timestamp{referenceTime}, false, true}, + {"Empty", emptyTimeStr, Timestamp{}, false, true}, + {"UnixStart", `0`, Timestamp{unixOrigin}, false, true}, + {"Mismatch", referenceTimeStr, Timestamp{}, false, false}, + {"MismatchUnix", `0`, Timestamp{}, false, false}, + {"Invalid", `"asdf"`, Timestamp{referenceTime}, true, false}, + } + for _, tc := range testCases { + var got Timestamp + err := json.Unmarshal([]byte(tc.data), &got) + if gotErr := err != nil; gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + continue + } + equal := got.Equal(tc.want) + if equal != tc.equal { + t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestTimstamp_MarshalReflexivity(t *testing.T) { + testCases := []struct { + desc string + data Timestamp + }{ + {"Reference", Timestamp{referenceTime}}, + {"Empty", Timestamp{}}, + } + for _, tc := range testCases { + data, err := json.Marshal(tc.data) + if err != nil { + t.Errorf("%s: Marshal err=%v", tc.desc, err) + } + var got Timestamp + err = json.Unmarshal(data, &got) + if !got.Equal(tc.data) { + t.Errorf("%s: %+v != %+v", tc.desc, got, data) + } + } +} + +type WrappedTimestamp struct { + A int + Time Timestamp +} + +func TestWrappedTimstamp_Marshal(t *testing.T) { + testCases := []struct { + desc string + data WrappedTimestamp + want string + wantErr bool + equal bool + }{ + {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, true}, + {"Empty", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, emptyTimeStr), false, true}, + {"Mismatch", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, false}, + } + for _, tc := range testCases { + out, err := json.Marshal(tc.data) + if gotErr := err != nil; gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + } + got := string(out) + equal := got == tc.want + if equal != tc.equal { + t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestWrappedTimestamp_Unmarshal(t *testing.T) { + testCases := []struct { + desc string + data string + want WrappedTimestamp + wantErr bool + equal bool + }{ + {"Reference", referenceTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true}, + {"ReferenceUnix", referenceUnixTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true}, + {"Empty", emptyTimeStr, WrappedTimestamp{0, Timestamp{}}, false, true}, + {"UnixStart", `0`, WrappedTimestamp{0, Timestamp{unixOrigin}}, false, true}, + {"Mismatch", referenceTimeStr, WrappedTimestamp{0, Timestamp{}}, false, false}, + {"MismatchUnix", `0`, WrappedTimestamp{0, Timestamp{}}, false, false}, + {"Invalid", `"asdf"`, WrappedTimestamp{0, Timestamp{referenceTime}}, true, false}, + } + for _, tc := range testCases { + var got Timestamp + err := json.Unmarshal([]byte(tc.data), &got) + if gotErr := err != nil; gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + continue + } + equal := got.Time.Equal(tc.want.Time.Time) + if equal != tc.equal { + t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestWrappedTimestamp_MarshalReflexivity(t *testing.T) { + testCases := []struct { + desc string + data WrappedTimestamp + }{ + {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}}, + {"Empty", WrappedTimestamp{0, Timestamp{}}}, + } + for _, tc := range testCases { + bytes, err := json.Marshal(tc.data) + if err != nil { + t.Errorf("%s: Marshal err=%v", tc.desc, err) + } + var got WrappedTimestamp + err = json.Unmarshal(bytes, &got) + if !got.Time.Equal(tc.data.Time) { + t.Errorf("%s: %+v != %+v", tc.desc, got, tc.data) + } + } +} diff --git a/vendor/github.com/digitalocean/godo/util/droplet.go b/vendor/github.com/digitalocean/godo/util/droplet.go new file mode 100644 index 000000000..4a016bc68 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/util/droplet.go @@ -0,0 +1,47 @@ +package util + +import ( + "fmt" + "time" + + "github.com/digitalocean/godo" +) + +const ( + // activeFailure is the amount of times we can fail before deciding + // the check for active is a total failure. This can help account + // for servers randomly not answering. + activeFailure = 3 +) + +// WaitForActive waits for a droplet to become active +func WaitForActive(client *godo.Client, monitorURI string) error { + if len(monitorURI) == 0 { + return fmt.Errorf("create had no monitor uri") + } + + completed := false + failCount := 0 + for !completed { + action, _, err := client.DropletActions.GetByURI(monitorURI) + + if err != nil { + if failCount <= activeFailure { + failCount++ + continue + } + return err + } + + switch action.Status { + case godo.ActionInProgress: + time.Sleep(5 * time.Second) + case godo.ActionCompleted: + completed = true + default: + return fmt.Errorf("unknown status: [%s]", action.Status) + } + } + + return nil +} diff --git a/vendor/github.com/digitalocean/godo/util/droplet_test.go b/vendor/github.com/digitalocean/godo/util/droplet_test.go new file mode 100644 index 000000000..e0a82ad96 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/util/droplet_test.go @@ -0,0 +1,26 @@ +package util + +import ( + "golang.org/x/oauth2" + + "github.com/digitalocean/godo" +) + +func ExampleWaitForActive() { + // build client + pat := "mytoken" + token := &oauth2.Token{AccessToken: pat} + t := oauth2.StaticTokenSource(token) + + oauthClient := oauth2.NewClient(oauth2.NoContext, t) + client := godo.NewClient(oauthClient) + + // create your droplet and retrieve the create action uri + uri := "https://api.digitalocean.com/v2/actions/xxxxxxxx" + + // block until until the action is complete + err := WaitForActive(client, uri) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dylanmei/iso8601/LICENSE b/vendor/github.com/dylanmei/iso8601/LICENSE new file mode 100644 index 000000000..dcabcdc70 --- /dev/null +++ b/vendor/github.com/dylanmei/iso8601/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dylan Meissner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dylanmei/iso8601/README.md b/vendor/github.com/dylanmei/iso8601/README.md new file mode 100644 index 000000000..c93b3cf77 --- /dev/null +++ b/vendor/github.com/dylanmei/iso8601/README.md @@ -0,0 +1,9 @@ + +iso 8601 parser and formatter +============================= + +An [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) Go utility. + +- *Time* is not yet implemented +- *Duration* is mostly implemented + diff --git a/vendor/github.com/dylanmei/iso8601/duration.go b/vendor/github.com/dylanmei/iso8601/duration.go new file mode 100644 index 000000000..d5cab17dc --- /dev/null +++ b/vendor/github.com/dylanmei/iso8601/duration.go @@ -0,0 +1,96 @@ +package iso8601 + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "time" +) + +var ( + // ErrBadFormat is returned when parsing fails + ErrBadFormat = errors.New("bad format string") + + // ErrNoMonth is raised when a month is in the format string + ErrNoMonth = errors.New("no months allowed") + + full = regexp.MustCompile(`P((?P\d+)Y)?((?P\d+)M)?((?P\d+)D)?(T((?P\d+)H)?((?P\d+)M)?((?P\d+)S)?)?`) + week = regexp.MustCompile(`P((?P\d+)W)`) +) + +// adapted from https://github.com/BrianHicks/finch/duration +func ParseDuration(value string) (time.Duration, error) { + var match []string + var regex *regexp.Regexp + + if week.MatchString(value) { + match = week.FindStringSubmatch(value) + regex = week + } else if full.MatchString(value) { + match = full.FindStringSubmatch(value) + regex = full + } else { + return time.Duration(0), ErrBadFormat + } + + d := time.Duration(0) + day := time.Hour * 24 + week := day * 7 + year := day * 365 + + for i, name := range regex.SubexpNames() { + part := match[i] + if i == 0 || name == "" || part == "" { + continue + } + + value, err := strconv.Atoi(part) + if err != nil { + return time.Duration(0), err + } + switch name { + case "year": + d += year * time.Duration(value) + case "month": + return time.Duration(0), ErrNoMonth + case "week": + d += week * time.Duration(value) + case "day": + d += day * time.Duration(value) + case "hour": + d += time.Hour * time.Duration(value) + case "minute": + d += time.Minute * time.Duration(value) + case "second": + d += time.Second * time.Duration(value) + } + } + + return d, nil +} + +func FormatDuration(duration time.Duration) string { + // we're not doing negative durations + if duration.Seconds() <= 0 { + return "PT0S" + } + + hours := int(duration.Hours()) + minutes := int(duration.Minutes()) - (hours * 60) + seconds := int(duration.Seconds()) - (hours*3600 + minutes*60) + + // we're not doing Y,M,W + s := "PT" + if hours > 0 { + s = fmt.Sprintf("%s%dH", s, hours) + } + if minutes > 0 { + s = fmt.Sprintf("%s%dM", s, minutes) + } + if seconds > 0 { + s = fmt.Sprintf("%s%dS", s, seconds) + } + + return s +} diff --git a/vendor/github.com/dylanmei/iso8601/duration_test.go b/vendor/github.com/dylanmei/iso8601/duration_test.go new file mode 100644 index 000000000..5a2b2527f --- /dev/null +++ b/vendor/github.com/dylanmei/iso8601/duration_test.go @@ -0,0 +1,81 @@ +package iso8601 + +import ( + "testing" + "time" +) + +func Test_parse_duration(t *testing.T) { + var dur time.Duration + var err error + + // test with bad format + _, err = ParseDuration("asdf") + if err != ErrBadFormat { + t.Fatalf("Expected an ErrBadFormat") + } + + // test with month + _, err = ParseDuration("P1M") + if err != ErrNoMonth { + t.Fatalf("Expected an ErrNoMonth") + } + + // test with good full string + exp, _ := time.ParseDuration("51h4m5s") + dur, err = ParseDuration("P2DT3H4M5S") + if err != nil { + t.Fatalf("Did not expect err: %v", err) + } + if dur.Hours() != exp.Hours() { + t.Errorf("Expected %v hours, not %v", exp.Hours(), dur.Hours()) + } + if dur.Minutes() != exp.Minutes() { + t.Errorf("Expected %v minutes, not %v", exp.Hours(), dur.Minutes()) + } + if dur.Seconds() != exp.Seconds() { + t.Errorf("Expected 5 seconds, not %v", exp.Nanoseconds(), dur.Seconds()) + } + if dur.Nanoseconds() != exp.Nanoseconds() { + t.Error("Expected %v nanoseconds, not %v", exp.Nanoseconds(), dur.Nanoseconds()) + } + + // test with good week string + dur, err = ParseDuration("P1W") + if err != nil { + t.Fatalf("Did not expect err: %v", err) + } + if dur.Hours() != 24*7 { + t.Errorf("Expected 168 hours, not %d", dur.Hours()) + } +} + +func Test_format_duration(t *testing.T) { + // Test complex duration with hours, minutes, seconds + d := time.Duration(3701) * time.Second + s := FormatDuration(d) + if s != "PT1H1M41S" { + t.Fatalf("bad ISO 8601 duration string: %s", s) + } + + // Test only minutes duration + d = time.Duration(20) * time.Minute + s = FormatDuration(d) + if s != "PT20M" { + t.Fatalf("bad ISO 8601 duration string for 20M: %s", s) + } + + // Test only seconds + d = time.Duration(1) * time.Second + s = FormatDuration(d) + if s != "PT1S" { + t.Fatalf("bad ISO 8601 duration string for 1S: %s", s) + } + + // Test negative duration (unsupported) + d = time.Duration(-1) * time.Second + s = FormatDuration(d) + if s != "PT0S" { + t.Fatalf("bad ISO 8601 duration string for negative: %s", s) + } +} diff --git a/vendor/github.com/dylanmei/winrmtest/LICENSE b/vendor/github.com/dylanmei/winrmtest/LICENSE new file mode 100644 index 000000000..aac5c68e7 --- /dev/null +++ b/vendor/github.com/dylanmei/winrmtest/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2014-2015 Dylan Meissner + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dylanmei/winrmtest/README.md b/vendor/github.com/dylanmei/winrmtest/README.md new file mode 100644 index 000000000..19c19609d --- /dev/null +++ b/vendor/github.com/dylanmei/winrmtest/README.md @@ -0,0 +1,48 @@ + +# winrmtest + +An in-progress testing package to compliment the [masterzen/winrm](https://github.com/masterzen/winrm) Go-based winrm library. + +My primary use-case for this is for [dylanmei/packer-communicator-winrm](https://github.com/dylanmei/packer-communicator-winrm), a [Packer](http://packer.io) communicator plugin for interacting with machines using Windows Remote Management. + +## Example Use + +A fictitious "Windows tools" package. + +``` + +package wintools + +import ( + "io" + "testing" + "github.com/dylanmei/winrmtest" +) + +func Test_empty_temp_directory(t *testing.T) { + r := winrmtest.NewRemote() + defer r.Close() + + r.CommandFunc(wimrmtest.MatchText("dir C:\Temp"), func(out, err io.Writer) int { + out.Write([]byte(` Volume in drive C is Windows 2012 R2 + Volume Serial Number is XXXX-XXXX + + Directory of C:\ + +File Not Found`)) + return 0 + }) + + lister := NewDirectoryLister(r.Host, r.Port) + list, _ := lister.TempDirectory() + + if count := len(list.Dirs()); count != 0 { + t.Errorf("Expected 0 directories but found %d.\n", count) + } + + if count := len(list.Files()); count != 0 { + t.Errorf("Expected 0 files but found %d.\n", count) + } +} +``` + diff --git a/vendor/github.com/dylanmei/winrmtest/remote.go b/vendor/github.com/dylanmei/winrmtest/remote.go new file mode 100644 index 000000000..ecc083f79 --- /dev/null +++ b/vendor/github.com/dylanmei/winrmtest/remote.go @@ -0,0 +1,79 @@ +package winrmtest + +import ( + "io" + "net/http" + "net/http/httptest" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Remote respresents a WinRM server +type Remote struct { + Host string + Port int + server *httptest.Server + service *wsman +} + +// NewRemote returns a new initialized Remote +func NewRemote() *Remote { + mux := http.NewServeMux() + srv := httptest.NewServer(mux) + + host, port, _ := splitAddr(srv.URL) + remote := Remote{ + Host: host, + Port: port, + server: srv, + service: &wsman{}, + } + + mux.Handle("/wsman", remote.service) + return &remote +} + +// Close closes the WinRM server +func (r *Remote) Close() { + r.server.Close() +} + +// MatcherFunc respresents a function used to match WinRM commands +type MatcherFunc func(candidate string) bool + +// MatchText return a new MatcherFunc based on text matching +func MatchText(text string) MatcherFunc { + return func(candidate string) bool { + return text == candidate + } +} + +// MatchPattern return a new MatcherFunc based on pattern matching +func MatchPattern(pattern string) MatcherFunc { + r := regexp.MustCompile(pattern) + return func(candidate string) bool { + return r.MatchString(candidate) + } +} + +// CommandFunc respresents a function used to mock WinRM commands +type CommandFunc func(out, err io.Writer) (exitCode int) + +// CommandFunc adds a WinRM command mock function to the WinRM server +func (r *Remote) CommandFunc(m MatcherFunc, f CommandFunc) { + r.service.HandleCommand(m, f) +} + +func splitAddr(addr string) (host string, port int, err error) { + u, err := url.Parse(addr) + if err != nil { + return + } + + split := strings.Split(u.Host, ":") + host = split[0] + port, err = strconv.Atoi(split[1]) + return +} diff --git a/vendor/github.com/dylanmei/winrmtest/wsman.go b/vendor/github.com/dylanmei/winrmtest/wsman.go new file mode 100644 index 000000000..c6d1c247b --- /dev/null +++ b/vendor/github.com/dylanmei/winrmtest/wsman.go @@ -0,0 +1,170 @@ +package winrmtest + +import ( + "bytes" + "encoding/base64" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/masterzen/winrm/soap" + "github.com/masterzen/xmlpath" + "github.com/satori/go.uuid" +) + +type wsman struct { + commands []*command + identitySeed int +} + +type command struct { + id string + matcher MatcherFunc + handler CommandFunc +} + +func (w *wsman) HandleCommand(m MatcherFunc, f CommandFunc) string { + id := uuid.NewV4().String() + w.commands = append(w.commands, &command{ + id: id, + matcher: m, + handler: f, + }) + + return id +} + +func (w *wsman) CommandByText(cmd string) *command { + for _, c := range w.commands { + if c.matcher(cmd) { + return c + } + } + return nil +} + +func (w *wsman) CommandByID(id string) *command { + for _, c := range w.commands { + if c.id == id { + return c + } + } + return nil +} + +func (w *wsman) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add("Content-Type", "application/soap+xml") + + defer r.Body.Close() + env, err := xmlpath.Parse(r.Body) + + if err != nil { + return + } + + action := readAction(env) + switch { + case strings.HasSuffix(action, "transfer/Create"): + // create a new shell + + rw.Write([]byte(` + + 123 + `)) + + case strings.HasSuffix(action, "shell/Command"): + // execute on behalf of the client + text := readCommand(env) + cmd := w.CommandByText(text) + + if cmd == nil { + fmt.Printf("I don't know this command: Command=%s\n", text) + rw.WriteHeader(http.StatusInternalServerError) + return + } + + rw.Write([]byte(fmt.Sprintf(` + + %s + `, cmd.id))) + + case strings.HasSuffix(action, "shell/Receive"): + // client ready to receive the results + + id := readCommandIDFromDesiredStream(env) + cmd := w.CommandByID(id) + + if cmd == nil { + fmt.Printf("I don't know this command: CommandId=%s\n", id) + rw.WriteHeader(http.StatusInternalServerError) + return + } + + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + result := cmd.handler(stdout, stderr) + content := base64.StdEncoding.EncodeToString(stdout.Bytes()) + + rw.Write([]byte(fmt.Sprintf(` + + + %s + + + + %d + + + `, id, content, id, id, result))) + + case strings.HasSuffix(action, "shell/Signal"): + // end of the shell command + rw.WriteHeader(http.StatusOK) + case strings.HasSuffix(action, "transfer/Delete"): + // end of the session + rw.WriteHeader(http.StatusOK) + default: + fmt.Printf("I don't know this action: %s\n", action) + rw.WriteHeader(http.StatusInternalServerError) + } +} + +func readAction(env *xmlpath.Node) string { + xpath, err := xmlpath.CompileWithNamespace( + "//a:Action", soap.GetAllNamespaces()) + + if err != nil { + return "" + } + + action, _ := xpath.String(env) + return action +} + +func readCommand(env *xmlpath.Node) string { + xpath, err := xmlpath.CompileWithNamespace( + "//rsp:Command", soap.GetAllNamespaces()) + + if err != nil { + return "" + } + + command, _ := xpath.String(env) + if unquoted, err := strconv.Unquote(command); err == nil { + return unquoted + } + return command +} + +func readCommandIDFromDesiredStream(env *xmlpath.Node) string { + xpath, err := xmlpath.CompileWithNamespace( + "//rsp:DesiredStream/@CommandId", soap.GetAllNamespaces()) + + if err != nil { + return "" + } + + id, _ := xpath.String(env) + return id +} diff --git a/vendor/github.com/dylanmei/winrmtest/wsman_test.go b/vendor/github.com/dylanmei/winrmtest/wsman_test.go new file mode 100644 index 000000000..e9e4999e8 --- /dev/null +++ b/vendor/github.com/dylanmei/winrmtest/wsman_test.go @@ -0,0 +1,220 @@ +package winrmtest + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/masterzen/winrm/soap" + "github.com/masterzen/xmlpath" + "github.com/satori/go.uuid" +) + +func Test_creating_a_shell(t *testing.T) { + w := &wsman{} + + res := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "", strings.NewReader(` + + + http://schemas.xmlsoap.org/ws/2004/09/transfer/Create + + + + stdin + stdout stderr + + + `)) + + w.ServeHTTP(res, req) + if res.Code != http.StatusOK { + t.Errorf("Expected 200 OK but was %d.\n", res.Code) + } + + if contentType := res.HeaderMap.Get("Content-Type"); contentType != "application/soap+xml" { + t.Errorf("Expected ContentType application/soap+xml was %s.\n", contentType) + } + + env, err := xmlpath.Parse(res.Body) + if err != nil { + t.Error("Couldn't compile the SOAP response.") + } + + xpath, _ := xmlpath.CompileWithNamespace( + "//rsp:ShellId", soap.GetAllNamespaces()) + + if _, found := xpath.String(env); !found { + t.Error("Expected a Shell identifier.") + } +} + +func Test_executing_a_command(t *testing.T) { + w := &wsman{} + id := w.HandleCommand(MatchText("echo tacos"), func(out, err io.Writer) int { + return 0 + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "", strings.NewReader(` + + + http://schemas.xmlsoap.org/ws/2004/09/shell/Command + + + "echo tacos" + + `)) + + w.ServeHTTP(res, req) + if res.Code != http.StatusOK { + t.Errorf("Expected 200 OK but was %d.\n", res.Code) + } + + env, err := xmlpath.Parse(res.Body) + if err != nil { + t.Error("Couldn't compile the SOAP response.") + } + + xpath, _ := xmlpath.CompileWithNamespace( + "//rsp:CommandId", soap.GetAllNamespaces()) + + result, _ := xpath.String(env) + if result != id { + t.Errorf("Expected CommandId=%s but was \"%s\"", id, result) + } +} + +func Test_executing_a_regex_command(t *testing.T) { + w := &wsman{} + id := w.HandleCommand(MatchPattern(`echo .* >> C:\file.cmd`), func(out, err io.Writer) int { + return 0 + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "", strings.NewReader(fmt.Sprintf(` + + + http://schemas.xmlsoap.org/ws/2004/09/shell/Command + + + "echo %d >> C:\file.cmd" + + `, uuid.NewV4().String()))) + + w.ServeHTTP(res, req) + if res.Code != http.StatusOK { + t.Errorf("Expected 200 OK but was %d.\n", res.Code) + } + + env, err := xmlpath.Parse(res.Body) + if err != nil { + t.Error("Couldn't compile the SOAP response.") + } + + xpath, _ := xmlpath.CompileWithNamespace( + "//rsp:CommandId", soap.GetAllNamespaces()) + + result, _ := xpath.String(env) + if result != id { + t.Errorf("Expected CommandId=%s but was \"%s\"", id, result) + } +} + +func Test_receiving_command_results(t *testing.T) { + w := &wsman{} + id := w.HandleCommand(MatchText("echo tacos"), func(out, err io.Writer) int { + out.Write([]byte("tacos")) + return 0 + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "", strings.NewReader(fmt.Sprintf(` + + + http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive + + + stdout stderr + + `, id))) + + w.ServeHTTP(res, req) + if res.Code != http.StatusOK { + t.Errorf("Expected 200 OK but was %d.\n", res.Code) + } + + env, err := xmlpath.Parse(res.Body) + if err != nil { + t.Error("Couldn't compile the SOAP response.") + } + + xpath, _ := xmlpath.CompileWithNamespace("//rsp:ReceiveResponse", soap.GetAllNamespaces()) + iter := xpath.Iter(env) + if !iter.Next() { + t.Error("Expected a ReceiveResponse element.") + } + + xresp := iter.Node() + xpath, _ = xmlpath.CompileWithNamespace( + fmt.Sprintf("rsp:Stream[@CommandId='%s']", id), soap.GetAllNamespaces()) + iter = xpath.Iter(xresp) + + if !iter.Next() || !nodeHasAttribute(iter.Node(), "Name", "stdout") || iter.Node().String() != "dGFjb3M=" { + t.Error("Expected an stdout Stream with the text \"dGFjb3M=\".") + } + + if !iter.Next() || !nodeHasAttribute(iter.Node(), "Name", "stdout") || !nodeHasAttribute(iter.Node(), "End", "true") { + t.Error("Expected an stdout Stream with an \"End\" attribute.") + } + + if !iter.Next() || !nodeHasAttribute(iter.Node(), "Name", "stderr") || !nodeHasAttribute(iter.Node(), "End", "true") { + t.Error("Expected an stderr Stream with an \"End\" attribute.") + } + + xpath, _ = xmlpath.CompileWithNamespace( + "//rsp:CommandState[@State='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done']", + soap.GetAllNamespaces()) + + if _, found := xpath.String(env); !found { + t.Error("Expected CommandState=\"Done\"") + } + + xpath, _ = xmlpath.CompileWithNamespace("//rsp:CommandState/rsp:ExitCode", soap.GetAllNamespaces()) + if code, _ := xpath.String(env); code != "0" { + t.Errorf("Expected ExitCode=0 but found \"%s\"\n", code) + } +} + +func Test_deleting_a_shell(t *testing.T) { + w := &wsman{} + + res := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "", strings.NewReader(` + + + http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete + + `)) + + w.ServeHTTP(res, req) + if res.Code != http.StatusOK { + t.Errorf("Expected 200 OK but was %d.\n", res.Code) + } + + if res.Body.Len() != 0 { + t.Errorf("Expected body to be empty but was \"%v\".", res.Body) + } +} + +func nodeHasAttribute(n *xmlpath.Node, name, value string) bool { + xpath := xmlpath.MustCompile("attribute::" + name) + if result, found := xpath.String(n); found { + return result == value + } + + return false +} diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore new file mode 100644 index 000000000..5f6b48eae --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.gitignore @@ -0,0 +1,2 @@ +# temporary symlink for testing +testing/data/symlink diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml new file mode 100644 index 000000000..aee4e902a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.travis.yml @@ -0,0 +1,22 @@ +language: go +sudo: required +go: + - 1.3.3 + - 1.4.2 + - 1.5.3 + - 1.6rc1 + - tip +env: + - GOARCH=amd64 DOCKER_VERSION=1.7.1 + - GOARCH=386 DOCKER_VERSION=1.7.1 + - GOARCH=amd64 DOCKER_VERSION=1.8.3 + - GOARCH=386 DOCKER_VERSION=1.8.3 + - GOARCH=amd64 DOCKER_VERSION=1.9.1 + - GOARCH=386 DOCKER_VERSION=1.9.1 +install: + - make prepare_docker +script: + - make test + - DOCKER_HOST=tcp://127.0.0.1:2375 make integration +services: + - docker diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS new file mode 100644 index 000000000..0c42ae344 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -0,0 +1,125 @@ +# This is the official list of go-dockerclient authors for copyright purposes. + +Abhishek Chanda +Adam Bell-Hanssen +Adrien Kohlbecker +Aldrin Leal +Andreas Jaekle +Andrews Medina +Andrey Sibiryov +Andy Goldstein +Antonio Murdaca +Artem Sidorenko +Ben Marini +Ben McCann +Ben Parees +Benno van den Berg +Brendan Fosberry +Brian Lalor +Brian P. Hamachek +Brian Palmer +Bryan Boreham +Burke Libbey +Carlos Diaz-Padron +Cesar Wong +Cezar Sa Espinola +Cheah Chu Yeow +cheneydeng +Chris Bednarski +CMGS +Colin Hebert +Craig Jellick +Dan Williams +Daniel, Dao Quang Minh +Daniel Garcia +Daniel Hiltgen +Darren Shepherd +Dave Choi +David Huie +Dawn Chen +Dinesh Subhraveti +Drew Wells +Ed +Elias G. Schneevoigt +Erez Horev +Eric Anderson +Ewout Prangsma +Fabio Rehm +Fatih Arslan +Flavia Missi +Francisco Souza +Grégoire Delattre +Guillermo Ãlvarez Fernández +Harry Zhang +He Simei +Ivan Mikushin +James Bardin +James Nugent +Jari Kolehmainen +Jason Wilder +Jawher Moussa +Jean-Baptiste Dalido +Jeff Mitchell +Jeffrey Hulten +Jen Andre +Jérôme Laurens +Johan Euphrosine +John Hughes +Kamil Domanski +Karan Misra +Ken Herner +Kim, Hirokuni +Kyle Allan +Liron Levin +Lior Yankovich +Liu Peng +Lorenz Leutgeb +Lucas Clemente +Lucas Weiblen +Lyon Hill +Mantas Matelis +Martin Sweeney +Máximo Cuadros Ortiz +Michael Schmatz +Michal Fojtik +Mike Dillon +Mrunal Patel +Nguyen Sy Thanh Son +Nick Ethier +Omeid Matten +Orivej Desh +Paul Bellamy +Paul Morie +Paul Weil +Peter Edge +Peter Jihoon Kim +Phil Lu +Philippe LafoucrieÌ€re +Rafe Colton +Rob Miller +Robert Williamson +Salvador Gironès +Sam Rijs +Sami Wagiaalla +Samuel Karp +Silas Sewell +Simon Eskildsen +Simon Menke +Skolos +Soulou +Sridhar Ratnakumar +Summer Mousa +Sunjin Lee +Tarsis Azevedo +Tim Schindler +Timothy St. Clair +Tobi Knaup +Tom Wilkie +Tonic +ttyh061 +Victor Marmol +Vincenzo Prignano +Wiliam Souza +Ye Yin +Yu, Zou +Yuriy Bogdanov diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE new file mode 100644 index 000000000..706634474 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE @@ -0,0 +1,6 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +You can find the Docker license at the following link: +https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE new file mode 100644 index 000000000..4e11de100 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2015, go-dockerclient authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile new file mode 100644 index 000000000..205d8f3c2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -0,0 +1,60 @@ +.PHONY: \ + all \ + vendor \ + lint \ + vet \ + fmt \ + fmtcheck \ + pretest \ + test \ + integration \ + cov \ + clean + +SRCS = $(shell git ls-files '*.go' | grep -v '^external/') +PKGS = ./. ./testing + +all: test + +vendor: + @ go get -v github.com/mjibson/party + party -d external -c -u + +lint: + @ go get -v github.com/golang/lint/golint + $(foreach file,$(SRCS),golint $(file) || exit;) + +vet: + @-go get -v golang.org/x/tools/cmd/vet + $(foreach pkg,$(PKGS),go vet $(pkg);) + +fmt: + gofmt -w $(SRCS) + +fmtcheck: + $(foreach file,$(SRCS),gofmt -d $(file);) + +prepare_docker: + sudo stop docker + sudo rm -rf /var/lib/docker + sudo rm -f `which docker` + sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D + echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list + sudo apt-get update + sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes + +pretest: lint vet fmtcheck + +test: pretest + $(foreach pkg,$(PKGS),go test $(pkg) || exit;) + +integration: + go test -tags docker_integration -run TestIntegration -v + +cov: + @ go get -v github.com/axw/gocov/gocov + @ go get golang.org/x/tools/cmd/cover + gocov test | gocov report + +clean: + $(foreach pkg,$(PKGS),go clean $(pkg) || exit;) diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown new file mode 100644 index 000000000..b75a7e920 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/README.markdown @@ -0,0 +1,105 @@ +# go-dockerclient + +[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient) +[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) + +This package presents a client for the Docker remote API. It also provides +support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/). + +This package also provides support for docker's network API, which is a simple +passthrough to the libnetwork remote API. Note that docker's network API is +only available in docker 1.8 and above, and only enabled in docker if +DOCKER_EXPERIMENTAL is defined during the docker build process. + +For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/). + +## Vendoring + +If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored, +please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient +is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339) +for details. + +## Example + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + endpoint := "unix:///var/run/docker.sock" + client, _ := docker.NewClient(endpoint) + imgs, _ := client.ListImages(docker.ListImagesOptions{All: false}) + for _, img := range imgs { + fmt.Println("ID: ", img.ID) + fmt.Println("RepoTags: ", img.RepoTags) + fmt.Println("Created: ", img.Created) + fmt.Println("Size: ", img.Size) + fmt.Println("VirtualSize: ", img.VirtualSize) + fmt.Println("ParentId: ", img.ParentID) + } +} +``` + +## Using with TLS + +In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters. + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + endpoint := "tcp://[ip]:[port]" + path := os.Getenv("DOCKER_CERT_PATH") + ca := fmt.Sprintf("%s/ca.pem", path) + cert := fmt.Sprintf("%s/cert.pem", path) + key := fmt.Sprintf("%s/key.pem", path) + client, _ := docker.NewTLSClient(endpoint, cert, key, ca) + // use client +} +``` + +If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables +`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv. + + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + client, _ := docker.NewClientFromEnv() + // use client +} +``` + +See the documentation for more details. + +## Developing + +All development commands can be seen in the [Makefile](Makefile). + +Commited code must pass: + +* [golint](https://github.com/golang/lint) +* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) +* [gofmt](https://golang.org/cmd/gofmt) +* [go test](https://golang.org/cmd/go/#hdr-Test_packages) + +Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository. diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go new file mode 100644 index 000000000..775c70c0b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -0,0 +1,136 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "strings" +) + +// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. +var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg") + +// AuthConfiguration represents authentication options to use in the PushImage +// method. It represents the authentication in the Docker index server. +type AuthConfiguration struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +// AuthConfigurations represents authentication options to use for the +// PushImage method accommodating the new X-Registry-Config header +type AuthConfigurations struct { + Configs map[string]AuthConfiguration `json:"configs"` +} + +// AuthConfigurations119 is used to serialize a set of AuthConfigurations +// for Docker API >= 1.19. +type AuthConfigurations119 map[string]AuthConfiguration + +// dockerConfig represents a registry authentation configuration from the +// .dockercfg file. +type dockerConfig struct { + Auth string `json:"auth"` + Email string `json:"email"` +} + +// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the +// ~/.dockercfg file. +func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { + var r io.Reader + var err error + p := path.Join(os.Getenv("HOME"), ".docker", "config.json") + r, err = os.Open(p) + if err != nil { + p := path.Join(os.Getenv("HOME"), ".dockercfg") + r, err = os.Open(p) + if err != nil { + return nil, err + } + } + return NewAuthConfigurations(r) +} + +// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the +// same format as the .dockercfg file. +func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { + var auth *AuthConfigurations + confs, err := parseDockerConfig(r) + if err != nil { + return nil, err + } + auth, err = authConfigs(confs) + if err != nil { + return nil, err + } + return auth, nil +} + +func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { + buf := new(bytes.Buffer) + buf.ReadFrom(r) + byteData := buf.Bytes() + + var confsWrapper map[string]map[string]dockerConfig + if err := json.Unmarshal(byteData, &confsWrapper); err == nil { + if confs, ok := confsWrapper["auths"]; ok { + return confs, nil + } + } + + var confs map[string]dockerConfig + if err := json.Unmarshal(byteData, &confs); err != nil { + return nil, err + } + return confs, nil +} + +// authConfigs converts a dockerConfigs map to a AuthConfigurations object. +func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { + c := &AuthConfigurations{ + Configs: make(map[string]AuthConfiguration), + } + for reg, conf := range confs { + data, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + return nil, err + } + userpass := strings.SplitN(string(data), ":", 2) + if len(userpass) != 2 { + return nil, ErrCannotParseDockercfg + } + c.Configs[reg] = AuthConfiguration{ + Email: conf.Email, + Username: userpass[0], + Password: userpass[1], + ServerAddress: reg, + } + } + return c, nil +} + +// AuthCheck validates the given credentials. It returns nil if successful. +// +// See https://goo.gl/m2SleN for more details. +func (c *Client) AuthCheck(conf *AuthConfiguration) error { + if conf == nil { + return fmt.Errorf("conf is nil") + } + resp, err := c.do("POST", "/auth", doOptions{data: conf}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/auth_test.go b/vendor/github.com/fsouza/go-dockerclient/auth_test.go new file mode 100644 index 000000000..e53b17601 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/auth_test.go @@ -0,0 +1,91 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/base64" + "fmt" + "net/http" + "strings" + "testing" +) + +func TestAuthLegacyConfig(t *testing.T) { + auth := base64.StdEncoding.EncodeToString([]byte("user:pa:ss")) + read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) + ac, err := NewAuthConfigurations(read) + if err != nil { + t.Error(err) + } + c, ok := ac.Configs["docker.io"] + if !ok { + t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") + } + if got, want := c.Email, "user@example.com"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Username, "user"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Password, "pa:ss"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.ServerAddress, "docker.io"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) + } +} + +func TestAuthBadConfig(t *testing.T) { + auth := base64.StdEncoding.EncodeToString([]byte("userpass")) + read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) + ac, err := NewAuthConfigurations(read) + if err != ErrCannotParseDockercfg { + t.Errorf("Incorrect error returned %v\n", err) + } + if ac != nil { + t.Errorf("Invalid auth configuration returned, should be nil %v\n", ac) + } +} + +func TestAuthConfig(t *testing.T) { + auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) + read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth)) + ac, err := NewAuthConfigurations(read) + if err != nil { + t.Error(err) + } + c, ok := ac.Configs["docker.io"] + if !ok { + t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") + } + if got, want := c.Email, "user@example.com"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Username, "user"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Password, "pass"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.ServerAddress, "docker.io"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) + } +} + +func TestAuthCheck(t *testing.T) { + fakeRT := &FakeRoundTripper{status: http.StatusOK} + client := newTestClient(fakeRT) + if err := client.AuthCheck(nil); err == nil { + t.Fatalf("expected error on nil auth config") + } + // test good auth + if err := client.AuthCheck(&AuthConfiguration{}); err != nil { + t.Fatal(err) + } + *fakeRT = FakeRoundTripper{status: http.StatusUnauthorized} + if err := client.AuthCheck(&AuthConfiguration{}); err == nil { + t.Fatal("expected failure from unauthorized auth") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/build_test.go b/vendor/github.com/fsouza/go-dockerclient/build_test.go new file mode 100644 index 000000000..c9640f205 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/build_test.go @@ -0,0 +1,154 @@ +package docker + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "os" + "reflect" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" +) + +func TestBuildImageMultipleContextsError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + InputStream: &buf, + OutputStream: &buf, + ContextDir: "testing/data", + } + err := client.BuildImage(opts) + if err != ErrMultipleContexts { + t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error") + } +} + +func TestBuildImageContextDirDockerignoreParsing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + + if err := os.Symlink("doesnotexist", "testing/data/symlink"); err != nil { + t.Errorf("error creating symlink on demand: %s", err) + } + defer func() { + if err := os.Remove("testing/data/symlink"); err != nil { + t.Errorf("error removing symlink on demand: %s", err) + } + }() + + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + OutputStream: &buf, + ContextDir: "testing/data", + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + reqBody := fakeRT.requests[0].Body + tmpdir, err := unpackBodyTarball(reqBody) + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := os.RemoveAll(tmpdir); err != nil { + t.Fatal(err) + } + }() + + files, err := ioutil.ReadDir(tmpdir) + if err != nil { + t.Fatal(err) + } + + foundFiles := []string{} + for _, file := range files { + foundFiles = append(foundFiles, file.Name()) + } + + expectedFiles := []string{ + ".dockerignore", + "Dockerfile", + "barfile", + "ca.pem", + "cert.pem", + "key.pem", + "server.pem", + "serverkey.pem", + "symlink", + } + + if !reflect.DeepEqual(expectedFiles, foundFiles) { + t.Errorf( + "BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v", + expectedFiles, foundFiles, + ) + } +} + +func TestBuildImageSendXRegistryConfig(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + OutputStream: &buf, + ContextDir: "testing/data", + AuthConfigs: AuthConfigurations{ + Configs: map[string]AuthConfiguration{ + "quay.io": { + Username: "foo", + Password: "bar", + Email: "baz", + ServerAddress: "quay.io", + }, + }, + }, + } + + encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg==" + + if err := client.BuildImage(opts); err != nil { + t.Fatal(err) + } + + xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0] + if xRegistryConfig != encodedConfig { + t.Errorf( + "BuildImage: X-Registry-Config not set currectly: expected %q, got %q", + encodedConfig, + xRegistryConfig, + ) + } +} + +func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) { + tmpdir, err = ioutil.TempDir("", "go-dockerclient-test") + if err != nil { + return + } + err = archive.Untar(req, tmpdir, &archive.TarOptions{ + Compression: archive.Uncompressed, + NoLchown: true, + }) + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/change.go b/vendor/github.com/fsouza/go-dockerclient/change.go new file mode 100644 index 000000000..d133594d4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/change.go @@ -0,0 +1,43 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import "fmt" + +// ChangeType is a type for constants indicating the type of change +// in a container +type ChangeType int + +const ( + // ChangeModify is the ChangeType for container modifications + ChangeModify ChangeType = iota + + // ChangeAdd is the ChangeType for additions to a container + ChangeAdd + + // ChangeDelete is the ChangeType for deletions from a container + ChangeDelete +) + +// Change represents a change in a container. +// +// See https://goo.gl/9GsTIF for more details. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/change_test.go b/vendor/github.com/fsouza/go-dockerclient/change_test.go new file mode 100644 index 000000000..9418b183c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/change_test.go @@ -0,0 +1,24 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import "testing" + +func TestChangeString(t *testing.T) { + var tests = []struct { + change Change + expected string + }{ + {Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"}, + {Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"}, + {Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"}, + {Change{"/etc/passwd", 33}, " /etc/passwd"}, + } + for _, tt := range tests { + if got := tt.change.String(); got != tt.expected { + t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go new file mode 100644 index 000000000..cf9d61678 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -0,0 +1,928 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package docker provides a client for the Docker remote API. +// +// See https://goo.gl/G3plxW for more details on the remote API. +package docker + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" + "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" +) + +const userAgent = "go-dockerclient" + +var ( + // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. + ErrInvalidEndpoint = errors.New("invalid endpoint") + + // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. + ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") + + apiVersion112, _ = NewAPIVersion("1.12") + + apiVersion119, _ = NewAPIVersion("1.19") +) + +// APIVersion is an internal representation of a version of the Remote API. +type APIVersion []int + +// NewAPIVersion returns an instance of APIVersion for the given string. +// +// The given string must be in the form .., where , +// and are integer numbers. +func NewAPIVersion(input string) (APIVersion, error) { + if !strings.Contains(input, ".") { + return nil, fmt.Errorf("Unable to parse version %q", input) + } + raw := strings.Split(input, "-") + arr := strings.Split(raw[0], ".") + ret := make(APIVersion, len(arr)) + var err error + for i, val := range arr { + ret[i], err = strconv.Atoi(val) + if err != nil { + return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) + } + } + return ret, nil +} + +func (version APIVersion) String() string { + var str string + for i, val := range version { + str += strconv.Itoa(val) + if i < len(version)-1 { + str += "." + } + } + return str +} + +// LessThan is a function for comparing APIVersion structs +func (version APIVersion) LessThan(other APIVersion) bool { + return version.compare(other) < 0 +} + +// LessThanOrEqualTo is a function for comparing APIVersion structs +func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { + return version.compare(other) <= 0 +} + +// GreaterThan is a function for comparing APIVersion structs +func (version APIVersion) GreaterThan(other APIVersion) bool { + return version.compare(other) > 0 +} + +// GreaterThanOrEqualTo is a function for comparing APIVersion structs +func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { + return version.compare(other) >= 0 +} + +func (version APIVersion) compare(other APIVersion) int { + for i, v := range version { + if i <= len(other)-1 { + otherVersion := other[i] + + if v < otherVersion { + return -1 + } else if v > otherVersion { + return 1 + } + } + } + if len(version) > len(other) { + return 1 + } + if len(version) < len(other) { + return -1 + } + return 0 +} + +// Client is the basic type of this package. It provides methods for +// interaction with the API. +type Client struct { + SkipServerVersionCheck bool + HTTPClient *http.Client + TLSConfig *tls.Config + Dialer *net.Dialer + + endpoint string + endpointURL *url.URL + eventMonitor *eventMonitoringState + requestedAPIVersion APIVersion + serverAPIVersion APIVersion + expectedAPIVersion APIVersion + unixHTTPClient *http.Client +} + +// NewClient returns a Client instance ready for communication with the given +// server endpoint. It will use the latest remote API version available in the +// server. +func NewClient(endpoint string) (*Client, error) { + client, err := NewVersionedClient(endpoint, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates . It will use the latest remote API version +// available in the server. +func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { + client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file). It will use the latest remote API version available in the server. +func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { + client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClient returns a Client instance ready for communication with +// the given server endpoint, using a specific remote API version. +func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, false) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + return &Client{ + HTTPClient: cleanhttp.DefaultClient(), + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + }, nil +} + +// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient. +func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) +} + +// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates, using a specific remote API version. +func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + certPEMBlock, err := ioutil.ReadFile(cert) + if err != nil { + return nil, err + } + keyPEMBlock, err := ioutil.ReadFile(key) + if err != nil { + return nil, err + } + caPEMCert, err := ioutil.ReadFile(ca) + if err != nil { + return nil, err + } + return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) +} + +// NewClientFromEnv returns a Client instance ready for communication created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +func NewClientFromEnv() (*Client, error) { + client, err := NewVersionedClientFromEnv("") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, +// and using a specific remote API version. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { + dockerEnv, err := getDockerEnv() + if err != nil { + return nil, err + } + dockerHost := dockerEnv.dockerHost + if dockerEnv.dockerTLSVerify { + parts := strings.SplitN(dockerEnv.dockerHost, "://", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) + } + cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") + key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") + ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") + return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString) + } + return NewVersionedClient(dockerEnv.dockerHost, apiVersionString) +} + +// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file), using a specific remote API version. +func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, true) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + if certPEMBlock == nil || keyPEMBlock == nil { + return nil, errors.New("Both cert and key are required") + } + tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} + if caPEMCert == nil { + tlsConfig.InsecureSkipVerify = true + } else { + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(caPEMCert) { + return nil, errors.New("Could not add RootCA pem") + } + tlsConfig.RootCAs = caPool + } + tr := cleanhttp.DefaultTransport() + tr.TLSClientConfig = tlsConfig + if err != nil { + return nil, err + } + return &Client{ + HTTPClient: &http.Client{Transport: tr}, + TLSConfig: tlsConfig, + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + }, nil +} + +func (c *Client) checkAPIVersion() error { + serverAPIVersionString, err := c.getServerAPIVersionString() + if err != nil { + return err + } + c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) + if err != nil { + return err + } + if c.requestedAPIVersion == nil { + c.expectedAPIVersion = c.serverAPIVersion + } else { + c.expectedAPIVersion = c.requestedAPIVersion + } + return nil +} + +// Endpoint returns the current endpoint. It's useful for getting the endpoint +// when using functions that get this data from the environment (like +// NewClientFromEnv. +func (c *Client) Endpoint() string { + return c.endpoint +} + +// Ping pings the docker server +// +// See https://goo.gl/kQCfJj for more details. +func (c *Client) Ping() error { + path := "/_ping" + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return newError(resp) + } + resp.Body.Close() + return nil +} + +func (c *Client) getServerAPIVersionString() (version string, err error) { + resp, err := c.do("GET", "/version", doOptions{}) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode) + } + var versionResponse map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil { + return "", err + } + if version, ok := (versionResponse["ApiVersion"]).(string); ok { + return version, nil + } + return "", nil +} + +type doOptions struct { + data interface{} + forceJSON bool + headers map[string]string +} + +func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) { + var params io.Reader + if doOptions.data != nil || doOptions.forceJSON { + buf, err := json.Marshal(doOptions.data) + if err != nil { + return nil, err + } + params = bytes.NewBuffer(buf) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return nil, err + } + } + httpClient := c.HTTPClient + protocol := c.endpointURL.Scheme + var u string + if protocol == "unix" { + httpClient = c.unixClient() + u = c.getFakeUnixURL(path) + } else { + u = c.getURL(path) + } + req, err := http.NewRequest(method, u, params) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", userAgent) + if doOptions.data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + + for k, v := range doOptions.headers { + req.Header.Set(k, v) + } + resp, err := httpClient.Do(req) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, ErrConnectionRefused + } + return nil, err + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, newError(resp) + } + return resp, nil +} + +type streamOptions struct { + setRawTerminal bool + rawJSONStream bool + useJSONDecoder bool + headers map[string]string + in io.Reader + stdout io.Writer + stderr io.Writer + // timeout is the inital connection timeout + timeout time.Duration +} + +func (c *Client) stream(method, path string, streamOptions streamOptions) error { + if (method == "POST" || method == "PUT") && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) + if err != nil { + return err + } + req.Header.Set("User-Agent", userAgent) + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + for key, val := range streamOptions.headers { + req.Header.Set(key, val) + } + var resp *http.Response + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if streamOptions.stdout == nil { + streamOptions.stdout = ioutil.Discard + } + if streamOptions.stderr == nil { + streamOptions.stderr = ioutil.Discard + } + if protocol == "unix" { + dial, err := c.Dialer.Dial(protocol, address) + if err != nil { + return err + } + defer dial.Close() + breader := bufio.NewReader(dial) + err = req.Write(dial) + if err != nil { + return err + } + + // ReadResponse may hang if server does not replay + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Now().Add(streamOptions.timeout)) + } + + if resp, err = http.ReadResponse(breader, req); err != nil { + // Cancel timeout for future I/O operations + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Time{}) + } + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + return err + } + } else { + if resp, err = c.HTTPClient.Do(req); err != nil { + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + return err + } + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return newError(resp) + } + if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" { + // if we want to get raw json stream, just copy it back to output + // without decoding it + if streamOptions.rawJSONStream { + _, err = io.Copy(streamOptions.stdout, resp.Body) + return err + } + dec := json.NewDecoder(resp.Body) + for { + var m jsonMessage + if err := dec.Decode(&m); err == io.EOF { + break + } else if err != nil { + return err + } + if m.Stream != "" { + fmt.Fprint(streamOptions.stdout, m.Stream) + } else if m.Progress != "" { + fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress) + } else if m.Error != "" { + return errors.New(m.Error) + } + if m.Status != "" { + fmt.Fprintln(streamOptions.stdout, m.Status) + } + } + } else { + if streamOptions.setRawTerminal { + _, err = io.Copy(streamOptions.stdout, resp.Body) + } else { + _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) + } + return err + } + return nil +} + +type hijackOptions struct { + success chan struct{} + setRawTerminal bool + in io.Reader + stdout io.Writer + stderr io.Writer + data interface{} +} + +type CloseWaiter interface { + io.Closer + Wait() error +} + +type waiterFunc func() error + +func (w waiterFunc) Wait() error { return w() } + +type closerFunc func() error + +func (c closerFunc) Close() error { return c() } + +func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) { + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return nil, err + } + } + var params io.Reader + if hijackOptions.data != nil { + buf, err := json.Marshal(hijackOptions.data) + if err != nil { + return nil, err + } + params = bytes.NewBuffer(buf) + } + req, err := http.NewRequest(method, c.getURL(path), params) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "plain/text") + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + if c.TLSConfig != nil && protocol != "unix" { + dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) + if err != nil { + return nil, err + } + } else { + dial, err = c.Dialer.Dial(protocol, address) + if err != nil { + return nil, err + } + } + + errs := make(chan error) + quit := make(chan struct{}) + go func() { + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + clientconn.Do(req) + if hijackOptions.success != nil { + hijackOptions.success <- struct{}{} + <-hijackOptions.success + } + rwc, br := clientconn.Hijack() + defer rwc.Close() + + errChanOut := make(chan error, 1) + errChanIn := make(chan error, 1) + if hijackOptions.stdout == nil && hijackOptions.stderr == nil { + close(errChanOut) + } else { + // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set. + // Otherwise, if the only stream you care about is stdin, your attach session + // will "hang" until the container terminates, even though you're not reading + // stdout/stderr + if hijackOptions.stdout == nil { + hijackOptions.stdout = ioutil.Discard + } + if hijackOptions.stderr == nil { + hijackOptions.stderr = ioutil.Discard + } + + go func() { + defer func() { + if hijackOptions.in != nil { + if closer, ok := hijackOptions.in.(io.Closer); ok { + closer.Close() + } + errChanIn <- nil + } + }() + + var err error + if hijackOptions.setRawTerminal { + _, err = io.Copy(hijackOptions.stdout, br) + } else { + _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) + } + errChanOut <- err + }() + } + + go func() { + var err error + if hijackOptions.in != nil { + _, err = io.Copy(rwc, hijackOptions.in) + } + errChanIn <- err + rwc.(interface { + CloseWrite() error + }).CloseWrite() + }() + + var errIn error + select { + case errIn = <-errChanIn: + case <-quit: + return + } + + var errOut error + select { + case errOut = <-errChanOut: + case <-quit: + return + } + + if errIn != nil { + errs <- errIn + } else { + errs <- errOut + } + }() + + return struct { + closerFunc + waiterFunc + }{ + closerFunc(func() error { close(quit); return nil }), + waiterFunc(func() error { return <-errs }), + }, nil +} + +func (c *Client) getURL(path string) string { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == "unix" { + urlStr = "" + } + if c.requestedAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) + } + return fmt.Sprintf("%s%s", urlStr, path) +} + +// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX +// domain socket to the given path. +func (c *Client) getFakeUnixURL(path string) string { + u := *c.endpointURL // Copy. + + // Override URL so that net/http will not complain. + u.Scheme = "http" + u.Host = "unix.sock" // Doesn't matter what this is - it's not used. + u.Path = "" + urlStr := strings.TrimRight(u.String(), "/") + if c.requestedAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) + } + return fmt.Sprintf("%s%s", urlStr, path) +} + +func (c *Client) unixClient() *http.Client { + if c.unixHTTPClient != nil { + return c.unixHTTPClient + } + socketPath := c.endpointURL.Path + tr := &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return c.Dialer.Dial("unix", socketPath) + }, + } + cleanhttp.SetTransportFinalizer(tr) + c.unixHTTPClient = &http.Client{Transport: tr} + return c.unixHTTPClient +} + +type jsonMessage struct { + Status string `json:"status,omitempty"` + Progress string `json:"progress,omitempty"` + Error string `json:"error,omitempty"` + Stream string `json:"stream,omitempty"` +} + +func queryString(opts interface{}) string { + if opts == nil { + return "" + } + value := reflect.ValueOf(opts) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + if value.Kind() != reflect.Struct { + return "" + } + items := url.Values(map[string][]string{}) + for i := 0; i < value.NumField(); i++ { + field := value.Type().Field(i) + if field.PkgPath != "" { + continue + } + key := field.Tag.Get("qs") + if key == "" { + key = strings.ToLower(field.Name) + } else if key == "-" { + continue + } + addQueryStringValue(items, key, value.Field(i)) + } + return items.Encode() +} + +func addQueryStringValue(items url.Values, key string, v reflect.Value) { + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + items.Add(key, "1") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.Int() > 0 { + items.Add(key, strconv.FormatInt(v.Int(), 10)) + } + case reflect.Float32, reflect.Float64: + if v.Float() > 0 { + items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + } + case reflect.String: + if v.String() != "" { + items.Add(key, v.String()) + } + case reflect.Ptr: + if !v.IsNil() { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + } + } + case reflect.Map: + if len(v.MapKeys()) > 0 { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + } + } + case reflect.Array, reflect.Slice: + vLen := v.Len() + if vLen > 0 { + for i := 0; i < vLen; i++ { + addQueryStringValue(items, key, v.Index(i)) + } + } + } +} + +// Error represents failures in the API. It represents a failure from the API. +type Error struct { + Status int + Message string +} + +func newError(resp *http.Response) *Error { + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} + } + return &Error{Status: resp.StatusCode, Message: string(data)} +} + +func (e *Error) Error() string { + return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) +} + +func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { + if endpoint != "" && !strings.Contains(endpoint, "://") { + endpoint = "tcp://" + endpoint + } + u, err := url.Parse(endpoint) + if err != nil { + return nil, ErrInvalidEndpoint + } + if tls { + u.Scheme = "https" + } + switch u.Scheme { + case "unix": + return u, nil + case "http", "https", "tcp": + _, port, err := net.SplitHostPort(u.Host) + if err != nil { + if e, ok := err.(*net.AddrError); ok { + if e.Err == "missing port in address" { + return u, nil + } + } + return nil, ErrInvalidEndpoint + } + number, err := strconv.ParseInt(port, 10, 64) + if err == nil && number > 0 && number < 65536 { + if u.Scheme == "tcp" { + if tls { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + } + return u, nil + } + return nil, ErrInvalidEndpoint + default: + return nil, ErrInvalidEndpoint + } +} + +type dockerEnv struct { + dockerHost string + dockerTLSVerify bool + dockerCertPath string +} + +func getDockerEnv() (*dockerEnv, error) { + dockerHost := os.Getenv("DOCKER_HOST") + var err error + if dockerHost == "" { + dockerHost, err = DefaultDockerHost() + if err != nil { + return nil, err + } + } + dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" + var dockerCertPath string + if dockerTLSVerify { + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + if dockerCertPath == "" { + home := homedir.Get() + if home == "" { + return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") + } + dockerCertPath = filepath.Join(home, ".docker") + dockerCertPath, err = filepath.Abs(dockerCertPath) + if err != nil { + return nil, err + } + } + } + return &dockerEnv{ + dockerHost: dockerHost, + dockerTLSVerify: dockerTLSVerify, + dockerCertPath: dockerCertPath, + }, nil +} + +// DefaultDockerHost returns the default docker socket for the current OS +func DefaultDockerHost() (string, error) { + var defaultHost string + if runtime.GOOS == "windows" { + // If we do not have a host, default to TCP socket on Windows + defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) + } else { + // If we do not have a host, default to unix socket + defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) + } + return opts.ValidateHost(defaultHost) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/client_test.go b/vendor/github.com/fsouza/go-dockerclient/client_test.go new file mode 100644 index 000000000..d6ae570ba --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/client_test.go @@ -0,0 +1,502 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" +) + +func TestNewAPIClient(t *testing.T) { + endpoint := "http://localhost:4243" + client, err := NewClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + // test unix socket endpoints + endpoint = "unix:///var/run/docker.sock" + client, err = NewClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if !client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be true, got false") + } + if client.requestedAPIVersion != nil { + t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) + } +} + +func newTLSClient(endpoint string) (*Client, error) { + return NewTLSClient(endpoint, + "testing/data/cert.pem", + "testing/data/key.pem", + "testing/data/ca.pem") +} + +func TestNewTSLAPIClient(t *testing.T) { + endpoint := "https://localhost:4243" + client, err := newTLSClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if !client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be true, got false") + } + if client.requestedAPIVersion != nil { + t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) + } +} + +func TestNewVersionedClient(t *testing.T) { + endpoint := "http://localhost:4243" + client, err := NewVersionedClient(endpoint, "1.12") + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { + t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) + } + if client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be false, got true") + } +} + +func TestNewVersionedClientFromEnv(t *testing.T) { + endpoint := "tcp://localhost:2376" + endpointURL := "http://localhost:2376" + os.Setenv("DOCKER_HOST", endpoint) + os.Setenv("DOCKER_TLS_VERIFY", "") + client, err := NewVersionedClientFromEnv("1.12") + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if client.endpointURL.String() != endpointURL { + t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint) + } + if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { + t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) + } + if client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be false, got true") + } +} + +func TestNewVersionedClientFromEnvTLS(t *testing.T) { + endpoint := "tcp://localhost:2376" + endpointURL := "https://localhost:2376" + base, _ := os.Getwd() + os.Setenv("DOCKER_CERT_PATH", filepath.Join(base, "/testing/data/")) + os.Setenv("DOCKER_HOST", endpoint) + os.Setenv("DOCKER_TLS_VERIFY", "1") + client, err := NewVersionedClientFromEnv("1.12") + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if client.endpointURL.String() != endpointURL { + t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint) + } + if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { + t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) + } + if client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be false, got true") + } +} + +func TestNewTLSVersionedClient(t *testing.T) { + certPath := "testing/data/cert.pem" + keyPath := "testing/data/key.pem" + caPath := "testing/data/ca.pem" + endpoint := "https://localhost:4243" + client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" { + t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion) + } + if client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be false, got true") + } +} + +func TestNewTLSVersionedClientInvalidCA(t *testing.T) { + certPath := "testing/data/cert.pem" + keyPath := "testing/data/key.pem" + caPath := "testing/data/key.pem" + endpoint := "https://localhost:4243" + _, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") + if err == nil { + t.Errorf("Expected invalid ca at %s", caPath) + } +} + +func TestNewClientInvalidEndpoint(t *testing.T) { + cases := []string{ + "htp://localhost:3243", "http://localhost:a", + "", "http://localhost:8080:8383", "http://localhost:65536", + "https://localhost:-20", + } + for _, c := range cases { + client, err := NewClient(c) + if client != nil { + t.Errorf("Want client for invalid endpoint, got %#v.", client) + } + if !reflect.DeepEqual(err, ErrInvalidEndpoint) { + t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err) + } + } +} + +func TestNewClientNoSchemeEndpoint(t *testing.T) { + cases := []string{"localhost", "localhost:8080"} + for _, c := range cases { + client, err := NewClient(c) + if client == nil { + t.Errorf("Want client for scheme-less endpoint, got ") + } + if err != nil { + t.Errorf("Got unexpected error scheme-less endpoint: %q", err) + } + } +} + +func TestNewTLSClient(t *testing.T) { + var tests = []struct { + endpoint string + expected string + }{ + {"tcp://localhost:2376", "https"}, + {"tcp://localhost:2375", "https"}, + {"tcp://localhost:4000", "https"}, + {"http://localhost:4000", "https"}, + } + for _, tt := range tests { + client, err := newTLSClient(tt.endpoint) + if err != nil { + t.Error(err) + } + got := client.endpointURL.Scheme + if got != tt.expected { + t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected) + } + } +} + +func TestEndpoint(t *testing.T) { + client, err := NewVersionedClient("http://localhost:4243", "1.12") + if err != nil { + t.Fatal(err) + } + if endpoint := client.Endpoint(); endpoint != client.endpoint { + t.Errorf("Client.Endpoint(): want %q. Got %q", client.endpoint, endpoint) + } +} + +func TestGetURL(t *testing.T) { + var tests = []struct { + endpoint string + path string + expected string + }{ + {"http://localhost:4243/", "/", "http://localhost:4243/"}, + {"http://localhost:4243", "/", "http://localhost:4243/"}, + {"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, + {"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, + {"http://localhost:4243/////", "/", "http://localhost:4243/"}, + {"unix:///var/run/docker.socket", "/containers", "/containers"}, + } + for _, tt := range tests { + client, _ := NewClient(tt.endpoint) + client.endpoint = tt.endpoint + client.SkipServerVersionCheck = true + got := client.getURL(tt.path) + if got != tt.expected { + t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) + } + } +} + +func TestGetFakeUnixURL(t *testing.T) { + var tests = []struct { + endpoint string + path string + expected string + }{ + {"unix://var/run/docker.sock", "/", "http://unix.sock/"}, + {"unix://var/run/docker.socket", "/", "http://unix.sock/"}, + {"unix://var/run/docker.sock", "/containers/ps", "http://unix.sock/containers/ps"}, + } + for _, tt := range tests { + client, _ := NewClient(tt.endpoint) + client.endpoint = tt.endpoint + client.SkipServerVersionCheck = true + got := client.getFakeUnixURL(tt.path) + if got != tt.expected { + t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) + } + } +} + +func TestError(t *testing.T) { + fakeBody := ioutil.NopCloser(bytes.NewBufferString("bad parameter")) + resp := &http.Response{ + StatusCode: 400, + Body: fakeBody, + } + err := newError(resp) + expected := Error{Status: 400, Message: "bad parameter"} + if !reflect.DeepEqual(expected, *err) { + t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err) + } + message := "API error (400): bad parameter" + if err.Error() != message { + t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error()) + } +} + +func TestQueryString(t *testing.T) { + v := float32(2.4) + f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64)) + jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`) + var tests = []struct { + input interface{} + want string + }{ + {&ListContainersOptions{All: true}, "all=1"}, + {ListContainersOptions{All: true}, "all=1"}, + {ListContainersOptions{Before: "something"}, "before=something"}, + {ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"}, + {ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"}, + {dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"}, + {dumb{W: v, X: 10, Y: 10.35000}, f32QueryString}, + {dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"}, + {dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"}, + {dumb{T: 10, Y: 10.35000}, "y=10.35"}, + {dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson}, + {nil, ""}, + {10, ""}, + {"not_a_struct", ""}, + } + for _, tt := range tests { + got := queryString(tt.input) + if got != tt.want { + t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got) + } + } +} + +func TestAPIVersions(t *testing.T) { + var tests = []struct { + a string + b string + expectedALessThanB bool + expectedALessThanOrEqualToB bool + expectedAGreaterThanB bool + expectedAGreaterThanOrEqualToB bool + }{ + {"1.11", "1.11", false, true, false, true}, + {"1.10", "1.11", true, true, false, false}, + {"1.11", "1.10", false, false, true, true}, + + {"1.11-ubuntu0", "1.11", false, true, false, true}, + {"1.10", "1.11-el7", true, true, false, false}, + + {"1.9", "1.11", true, true, false, false}, + {"1.11", "1.9", false, false, true, true}, + + {"1.1.1", "1.1", false, false, true, true}, + {"1.1", "1.1.1", true, true, false, false}, + + {"2.1", "1.1.1", false, false, true, true}, + {"2.1", "1.3.1", false, false, true, true}, + {"1.1.1", "2.1", true, true, false, false}, + {"1.3.1", "2.1", true, true, false, false}, + } + + for _, tt := range tests { + a, _ := NewAPIVersion(tt.a) + b, _ := NewAPIVersion(tt.b) + + if tt.expectedALessThanB && !a.LessThan(b) { + t.Errorf("Expected %#v < %#v", a, b) + } + if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) { + t.Errorf("Expected %#v <= %#v", a, b) + } + if tt.expectedAGreaterThanB && !a.GreaterThan(b) { + t.Errorf("Expected %#v > %#v", a, b) + } + if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) { + t.Errorf("Expected %#v >= %#v", a, b) + } + } +} + +func TestPing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + err := client.Ping() + if err != nil { + t.Fatal(err) + } +} + +func TestPingFailing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } + expectedErrMsg := "API error (500): " + if err.Error() != expectedErrMsg { + t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) + } +} + +func TestPingFailingWrongStatus(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted} + client := newTestClient(fakeRT) + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } + expectedErrMsg := "API error (202): " + if err.Error() != expectedErrMsg { + t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) + } +} + +func TestPingErrorWithUnixSocket(t *testing.T) { + go func() { + li, err := net.Listen("unix", "/tmp/echo.sock") + if err != nil { + t.Fatal(err) + } + defer li.Close() + if err != nil { + t.Fatalf("Expected to get listener, but failed: %#v", err) + } + + fd, err := li.Accept() + if err != nil { + t.Fatalf("Expected to accept connection, but failed: %#v", err) + } + + buf := make([]byte, 512) + nr, err := fd.Read(buf) + + // Create invalid response message to trigger error. + data := buf[0:nr] + for i := 0; i < 10; i++ { + data[i] = 63 + } + + _, err = fd.Write(data) + if err != nil { + t.Fatalf("Expected to write to socket, but failed: %#v", err) + } + + return + }() + + // Wait for unix socket to listen + time.Sleep(10 * time.Millisecond) + + endpoint := "unix:///tmp/echo.sock" + u, _ := parseEndpoint(endpoint, false) + client := Client{ + HTTPClient: cleanhttp.DefaultClient(), + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + SkipServerVersionCheck: true, + } + + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } +} + +type FakeRoundTripper struct { + message string + status int + header map[string]string + requests []*http.Request +} + +func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + body := strings.NewReader(rt.message) + rt.requests = append(rt.requests, r) + res := &http.Response{ + StatusCode: rt.status, + Body: ioutil.NopCloser(body), + Header: make(http.Header), + } + for k, v := range rt.header { + res.Header.Set(k, v) + } + return res, nil +} + +func (rt *FakeRoundTripper) Reset() { + rt.requests = nil +} + +type person struct { + Name string + Age int `json:"age"` +} + +type dumb struct { + T int `qs:"-"` + v int + W float32 + X int + Y float64 + Z int `qs:"zee"` + Person *person `qs:"p"` +} + +type fakeEndpointURL struct { + Scheme string +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go new file mode 100644 index 000000000..317814b90 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -0,0 +1,1180 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// ErrContainerAlreadyExists is the error returned by CreateContainer when the +// container already exists. +var ErrContainerAlreadyExists = errors.New("container already exists") + +// ListContainersOptions specify parameters to the ListContainers function. +// +// See https://goo.gl/47a6tO for more details. +type ListContainersOptions struct { + All bool + Size bool + Limit int + Since string + Before string + Filters map[string][]string +} + +// APIPort is a type that represents a port mapping returned by the Docker API +type APIPort struct { + PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"` + PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty"` +} + +// APIContainers represents each container in the list returned by +// ListContainers. +type APIContainers struct { + ID string `json:"Id" yaml:"Id"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Command string `json:"Command,omitempty" yaml:"Command,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + Status string `json:"Status,omitempty" yaml:"Status,omitempty"` + Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"` + Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels, omitempty"` +} + +// ListContainers returns a slice of containers matching the given criteria. +// +// See https://goo.gl/47a6tO for more details. +func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { + path := "/containers/json?" + queryString(opts) + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var containers []APIContainers + if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { + return nil, err + } + return containers, nil +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +type Port string + +// Port returns the number of the port. +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +// Proto returns the name of the protocol. +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +// State represents the state of a container. +type State struct { + Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` + Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"` + Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"` + OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"` + Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` + Error string `json:"Error,omitempty" yaml:"Error,omitempty"` + StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"` + FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"` +} + +// String returns the string representation of a state. +func (s *State) String() string { + if s.Running { + if s.Paused { + return "paused" + } + return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt)) + } + return fmt.Sprintf("Exit %d", s.ExitCode) +} + +// PortBinding represents the host/container port mapping as returned in the +// `docker inspect` json +type PortBinding struct { + HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"` + HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"` +} + +// PortMapping represents a deprecated field in the `docker inspect` output, +// and its value as found in NetworkSettings should always be nil +type PortMapping map[string]string + +// ContainerNetwork represents the networking settings of a container per network. +type ContainerNetwork struct { + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"` +} + +// NetworkSettings contains network-related information about a container +type NetworkSettings struct { + Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"` + Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"` + PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"` + Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"` + SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"` + LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"` + LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"` + SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"` + SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"` +} + +// PortMappingAPI translates the port mappings as contained in NetworkSettings +// into the format in which they would appear when returned by the API +func (settings *NetworkSettings) PortMappingAPI() []APIPort { + var mapping []APIPort + for port, bindings := range settings.Ports { + p, _ := parsePort(port.Port()) + if len(bindings) == 0 { + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + p, _ := parsePort(port.Port()) + h, _ := parsePort(binding.HostPort) + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + PublicPort: int64(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + return mapping +} + +func parsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` + StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +type Mount struct { + Name string + Source string + Destination string + Driver string + Mode string + RW bool +} + +// LogConfig defines the log driver type and the configuration for it. +type LogConfig struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty"` + Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"` +} + +// ULimit defines system-wide resource limitations +// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users. +type ULimit struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"` + Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"` +} + +// SwarmNode containers information about which Swarm node the container is on +type SwarmNode struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty"` + Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Container is the type encompasing everything about a container - its config, +// hostconfig, etc. +type Container struct { + ID string `json:"Id" yaml:"Id"` + + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` + + Path string `json:"Path,omitempty" yaml:"Path,omitempty"` + Args []string `json:"Args,omitempty" yaml:"Args,omitempty"` + + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` + State State `json:"State,omitempty" yaml:"State,omitempty"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + + Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"` + + NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"` + + SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"` + ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"` + HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"` + HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"` + LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` + + Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"` + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` + ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"` + + RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"` + + AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"` +} + +// RenameContainerOptions specify parameters to the RenameContainer function. +// +// See https://goo.gl/laSOIy for more details. +type RenameContainerOptions struct { + // ID of container to rename + ID string `qs:"-"` + + // New name + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} + +// RenameContainer updates and existing containers name +// +// See https://goo.gl/laSOIy for more details. +func (c *Client) RenameContainer(opts RenameContainerOptions) error { + resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// InspectContainer returns information about a container by its ID. +// +// See https://goo.gl/RdIq0b for more details. +func (c *Client) InspectContainer(id string) (*Container, error) { + path := "/containers/" + id + "/json" + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var container Container + if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { + return nil, err + } + return &container, nil +} + +// ContainerChanges returns changes in the filesystem of the given container. +// +// See https://goo.gl/9GsTIF for more details. +func (c *Client) ContainerChanges(id string) ([]Change, error) { + path := "/containers/" + id + "/changes" + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var changes []Change + if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil { + return nil, err + } + return changes, nil +} + +// CreateContainerOptions specify parameters to the CreateContainer function. +// +// See https://goo.gl/WxQzrr for more details. +type CreateContainerOptions struct { + Name string + Config *Config `qs:"-"` + HostConfig *HostConfig `qs:"-"` +} + +// CreateContainer creates a new container, returning the container instance, +// or an error in case of failure. +// +// See https://goo.gl/WxQzrr for more details. +func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { + path := "/containers/create?" + queryString(opts) + resp, err := c.do( + "POST", + path, + doOptions{ + data: struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` + }{ + opts.Config, + opts.HostConfig, + }, + }, + ) + + if e, ok := err.(*Error); ok { + if e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + if e.Status == http.StatusConflict { + return nil, ErrContainerAlreadyExists + } + } + + if err != nil { + return nil, err + } + defer resp.Body.Close() + var container Container + if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { + return nil, err + } + + container.Name = opts.Name + + return &container, nil +} + +// KeyValuePair is a type for generic key/value pairs as used in the Lxc +// configuration +type KeyValuePair struct { + Key string `json:"Key,omitempty" yaml:"Key,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty"` +} + +// RestartPolicy represents the policy for automatically restarting a container. +// +// Possible values are: +// +// - always: the docker daemon will always restart the container +// - on-failure: the docker daemon will restart the container on failures, at +// most MaximumRetryCount times +// - no: the docker daemon will not restart the container automatically +type RestartPolicy struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"` +} + +// AlwaysRestart returns a restart policy that tells the Docker daemon to +// always restart the container. +func AlwaysRestart() RestartPolicy { + return RestartPolicy{Name: "always"} +} + +// RestartOnFailure returns a restart policy that tells the Docker daemon to +// restart the container on failures, trying at most maxRetry times. +func RestartOnFailure(maxRetry int) RestartPolicy { + return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} +} + +// NeverRestart returns a restart policy that tells the Docker daemon to never +// restart the container on failures. +func NeverRestart() RestartPolicy { + return RestartPolicy{Name: "no"} +} + +// Device represents a device mapping between the Docker host and the +// container. +type Device struct { + PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"` + PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"` + CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"` +} + +// HostConfig contains the container options related to starting a container on +// a given host +type HostConfig struct { + Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"` + CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"` + CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"` + GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"` + ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"` + LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"` + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` + PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"` + Links []string `json:"Links,omitempty" yaml:"Links,omitempty"` + PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only + DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty"` + DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"` + ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"` + VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"` + IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"` + PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"` + UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"` + RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` + Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` + LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` + ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"` + SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` + CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"` + OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"` + CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"` + CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"` + CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"` + BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"` + Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` + OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty"` +} + +// StartContainer starts a container, returning an error in case of failure. +// +// See https://goo.gl/MrBAJv for more details. +func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { + path := "/containers/" + id + "/start" + resp, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id, Err: err} + } + return err + } + if resp.StatusCode == http.StatusNotModified { + return &ContainerAlreadyRunning{ID: id} + } + resp.Body.Close() + return nil +} + +// StopContainer stops a container, killing it after the given timeout (in +// seconds). +// +// See https://goo.gl/USqsFt for more details. +func (c *Client) StopContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + if resp.StatusCode == http.StatusNotModified { + return &ContainerNotRunning{ID: id} + } + resp.Body.Close() + return nil +} + +// RestartContainer stops a container, killing it after the given timeout (in +// seconds), during the stop process. +// +// See https://goo.gl/QzsDnz for more details. +func (c *Client) RestartContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// PauseContainer pauses the given container. +// +// See https://goo.gl/OF7W9X for more details. +func (c *Client) PauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/pause", id) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// UnpauseContainer unpauses the given container. +// +// See https://goo.gl/7dwyPA for more details. +func (c *Client) UnpauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/unpause", id) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// TopResult represents the list of processes running in a container, as +// returned by /containers//top. +// +// See https://goo.gl/Rb46aY for more details. +type TopResult struct { + Titles []string + Processes [][]string +} + +// TopContainer returns processes running inside a container +// +// See https://goo.gl/Rb46aY for more details. +func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { + var args string + var result TopResult + if psArgs != "" { + args = fmt.Sprintf("?ps_args=%s", psArgs) + } + path := fmt.Sprintf("/containers/%s/top%s", id, args) + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return result, &NoSuchContainer{ID: id} + } + return result, err + } + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return result, err + } + return result, nil +} + +// Stats represents container statistics, returned by /containers//stats. +// +// See https://goo.gl/GNmLHb for more details. +type Stats struct { + Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` + Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty"` + Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty"` + MemoryStats struct { + Stats struct { + TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"` + Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"` + MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"` + TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"` + Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"` + Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"` + TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"` + Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"` + Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"` + Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"` + TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"` + Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"` + TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"` + TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"` + TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"` + TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"` + RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"` + HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"` + TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"` + TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"` + ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"` + TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"` + TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"` + TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"` + InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"` + ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"` + Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"` + InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"` + TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"` + HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty"` + Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty"` + } `json:"stats,omitempty" yaml:"stats,omitempty"` + MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"` + Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"` + Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"` + } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"` + BlkioStats struct { + IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"` + IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"` + IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"` + IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"` + IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"` + IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"` + IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"` + SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"` + } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` +} + +// NetworkStats is a stats entry for network stats +type NetworkStats struct { + RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"` + RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"` + RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"` + TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"` + TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"` + RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"` + TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"` + TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"` +} + +// CPUStats is a stats entry for cpu stats +type CPUStats struct { + CPUUsage struct { + PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"` + UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"` + TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"` + UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"` + } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"` + SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"` + ThrottlingData struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + ThrottledTime uint64 `json:"throttled_time,omitempty"` + } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"` +} + +// BlkioStatsEntry is a stats entry for blkio_stats +type BlkioStatsEntry struct { + Major uint64 `json:"major,omitempty" yaml:"major,omitempty"` + Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"` + Op string `json:"op,omitempty" yaml:"op,omitempty"` + Value uint64 `json:"value,omitempty" yaml:"value,omitempty"` +} + +// StatsOptions specify parameters to the Stats function. +// +// See https://goo.gl/GNmLHb for more details. +type StatsOptions struct { + ID string + Stats chan<- *Stats + Stream bool + // A flag that enables stopping the stats operation + Done <-chan bool + // Initial connection timeout + Timeout time.Duration +} + +// Stats sends container statistics for the given container to the given channel. +// +// This function is blocking, similar to a streaming call for logs, and should be run +// on a separate goroutine from the caller. Note that this function will block until +// the given container is removed, not just exited. When finished, this function +// will close the given channel. Alternatively, function can be stopped by +// signaling on the Done channel. +// +// See https://goo.gl/GNmLHb for more details. +func (c *Client) Stats(opts StatsOptions) (retErr error) { + errC := make(chan error, 1) + readCloser, writeCloser := io.Pipe() + + defer func() { + close(opts.Stats) + + select { + case err := <-errC: + if err != nil && retErr == nil { + retErr = err + } + default: + // No errors + } + + if err := readCloser.Close(); err != nil && retErr == nil { + retErr = err + } + }() + + go func() { + err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ + rawJSONStream: true, + useJSONDecoder: true, + stdout: writeCloser, + timeout: opts.Timeout, + }) + if err != nil { + dockerError, ok := err.(*Error) + if ok { + if dockerError.Status == http.StatusNotFound { + err = &NoSuchContainer{ID: opts.ID} + } + } + } + if closeErr := writeCloser.Close(); closeErr != nil && err == nil { + err = closeErr + } + errC <- err + close(errC) + }() + + quit := make(chan struct{}) + defer close(quit) + go func() { + // block here waiting for the signal to stop function + select { + case <-opts.Done: + readCloser.Close() + case <-quit: + return + } + }() + + decoder := json.NewDecoder(readCloser) + stats := new(Stats) + for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) { + if err != nil { + return err + } + opts.Stats <- stats + stats = new(Stats) + } + return nil +} + +// KillContainerOptions represents the set of options that can be used in a +// call to KillContainer. +// +// See https://goo.gl/hkS9i8 for more details. +type KillContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // The signal to send to the container. When omitted, Docker server + // will assume SIGKILL. + Signal Signal +} + +// KillContainer sends a signal to a container, returning an error in case of +// failure. +// +// See https://goo.gl/hkS9i8 for more details. +func (c *Client) KillContainer(opts KillContainerOptions) error { + path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} + +// RemoveContainerOptions encapsulates options to remove a container. +// +// See https://goo.gl/RQyX62 for more details. +type RemoveContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // A flag that indicates whether Docker should remove the volumes + // associated to the container. + RemoveVolumes bool `qs:"v"` + + // A flag that indicates whether Docker should remove the container + // even if it is currently running. + Force bool +} + +// RemoveContainer removes a container, returning an error in case of failure. +// +// See https://goo.gl/RQyX62 for more details. +func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { + path := "/containers/" + opts.ID + "?" + queryString(opts) + resp, err := c.do("DELETE", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} + +// UploadToContainerOptions is the set of options that can be used when +// uploading an archive into a container. +// +// See https://goo.gl/Ss97HW for more details. +type UploadToContainerOptions struct { + InputStream io.Reader `json:"-" qs:"-"` + Path string `qs:"path"` + NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"` +} + +// UploadToContainer uploads a tar archive to be extracted to a path in the +// filesystem of the container. +// +// See https://goo.gl/Ss97HW for more details. +func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { + url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) + + return c.stream("PUT", url, streamOptions{ + in: opts.InputStream, + }) +} + +// DownloadFromContainerOptions is the set of options that can be used when +// downloading resources from a container. +// +// See https://goo.gl/KnZJDX for more details. +type DownloadFromContainerOptions struct { + OutputStream io.Writer `json:"-" qs:"-"` + Path string `qs:"path"` +} + +// DownloadFromContainer downloads a tar archive of files or folders in a container. +// +// See https://goo.gl/KnZJDX for more details. +func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { + url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) + + return c.stream("GET", url, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + }) +} + +// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. +// +// See https://goo.gl/R2jevW for more details. +type CopyFromContainerOptions struct { + OutputStream io.Writer `json:"-"` + Container string `json:"-"` + Resource string +} + +// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. +// +// See https://goo.gl/R2jevW for more details. +func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + url := fmt.Sprintf("/containers/%s/copy", opts.Container) + resp, err := c.do("POST", url, doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.Container} + } + return err + } + defer resp.Body.Close() + _, err = io.Copy(opts.OutputStream, resp.Body) + return err +} + +// WaitContainer blocks until the given container stops, return the exit code +// of the container status. +// +// See https://goo.gl/Gc1rge for more details. +func (c *Client) WaitContainer(id string) (int, error) { + resp, err := c.do("POST", "/containers/"+id+"/wait", doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return 0, &NoSuchContainer{ID: id} + } + return 0, err + } + defer resp.Body.Close() + var r struct{ StatusCode int } + if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { + return 0, err + } + return r.StatusCode, nil +} + +// CommitContainerOptions aggregates parameters to the CommitContainer method. +// +// See https://goo.gl/mqfoCw for more details. +type CommitContainerOptions struct { + Container string + Repository string `qs:"repo"` + Tag string + Message string `qs:"comment"` + Author string + Run *Config `qs:"-"` +} + +// CommitContainer creates a new image from a container's changes. +// +// See https://goo.gl/mqfoCw for more details. +func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { + path := "/commit?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{data: opts.Run}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + return nil, err + } + defer resp.Body.Close() + var image Image + if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { + return nil, err + } + return &image, nil +} + +// AttachToContainerOptions is the set of options that can be used when +// attaching to a container. +// +// See https://goo.gl/NKpkFk for more details. +type AttachToContainerOptions struct { + Container string `qs:"-"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + // Get container logs, sending it to OutputStream. + Logs bool + + // Stream the response? + Stream bool + + // Attach to stdin, and use InputStream. + Stdin bool + + // Attach to stdout, and use OutputStream. + Stdout bool + + // Attach to stderr, and use ErrorStream. + Stderr bool + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// AttachToContainer attaches to a container, using the given options. +// +// See https://goo.gl/NKpkFk for more details. +func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { + cw, err := c.AttachToContainerNonBlocking(opts) + if err != nil { + return err + } + return cw.Wait() +} + +// AttachToContainerNonBlocking attaches to a container, using the given options. +// This function does not block. +// +// See https://goo.gl/NKpkFk for more details. +func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) { + if opts.Container == "" { + return nil, &NoSuchContainer{ID: opts.Container} + } + path := "/containers/" + opts.Container + "/attach?" + queryString(opts) + return c.hijack("POST", path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + }) +} + +// LogsOptions represents the set of options used when getting logs from a +// container. +// +// See https://goo.gl/yl8PGm for more details. +type LogsOptions struct { + Container string `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + Follow bool + Stdout bool + Stderr bool + Since int64 + Timestamps bool + Tail string + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// Logs gets stdout and stderr logs from the specified container. +// +// See https://goo.gl/yl8PGm for more details. +func (c *Client) Logs(opts LogsOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + if opts.Tail == "" { + opts.Tail = "all" + } + path := "/containers/" + opts.Container + "/logs?" + queryString(opts) + return c.stream("GET", path, streamOptions{ + setRawTerminal: opts.RawTerminal, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + }) +} + +// ResizeContainerTTY resizes the terminal to the given height and width. +// +// See https://goo.gl/xERhCc for more details. +func (c *Client) ResizeContainerTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ExportContainerOptions is the set of parameters to the ExportContainer +// method. +// +// See https://goo.gl/dOkTyk for more details. +type ExportContainerOptions struct { + ID string + OutputStream io.Writer +} + +// ExportContainer export the contents of container id as tar archive +// and prints the exported contents to stdout. +// +// See https://goo.gl/dOkTyk for more details. +func (c *Client) ExportContainer(opts ExportContainerOptions) error { + if opts.ID == "" { + return &NoSuchContainer{ID: opts.ID} + } + url := fmt.Sprintf("/containers/%s/export", opts.ID) + return c.stream("GET", url, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + }) +} + +// NoSuchContainer is the error returned when a given container does not exist. +type NoSuchContainer struct { + ID string + Err error +} + +func (err *NoSuchContainer) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such container: " + err.ID +} + +// ContainerAlreadyRunning is the error returned when a given container is +// already running. +type ContainerAlreadyRunning struct { + ID string +} + +func (err *ContainerAlreadyRunning) Error() string { + return "Container already running: " + err.ID +} + +// ContainerNotRunning is the error returned when a given container is not +// running. +type ContainerNotRunning struct { + ID string +} + +func (err *ContainerNotRunning) Error() string { + return "Container not running: " + err.ID +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_test.go b/vendor/github.com/fsouza/go-dockerclient/container_test.go new file mode 100644 index 000000000..1b05b27bb --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_test.go @@ -0,0 +1,2263 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" +) + +func TestStateString(t *testing.T) { + started := time.Now().Add(-3 * time.Hour) + var tests = []struct { + input State + expected string + }{ + {State{Running: true, Paused: true}, "^paused$"}, + {State{Running: true, StartedAt: started}, "^Up 3h.*$"}, + {State{Running: false, ExitCode: 7}, "^Exit 7$"}, + } + for _, tt := range tests { + re := regexp.MustCompile(tt.expected) + if got := tt.input.String(); !re.MatchString(got) { + t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got) + } + } +} + +func TestListContainers(t *testing.T) { + jsonContainers := `[ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}], + "Created": 1367854152, + "Status": "Exit 0" + } +]` + var expected []APIContainers + err := json.Unmarshal([]byte(jsonContainers), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK}) + containers, err := client.ListContainers(ListContainersOptions{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(containers, expected) { + t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers) + } +} + +func TestListContainersParams(t *testing.T) { + var tests = []struct { + input ListContainersOptions + params map[string][]string + }{ + {ListContainersOptions{}, map[string][]string{}}, + {ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}}, + {ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}}, + { + ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"}, + map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}}, + }, + { + ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, + map[string][]string{"filters": {"{\"status\":[\"paused\",\"running\"]}"}}, + }, + { + ListContainersOptions{All: true, Filters: map[string][]string{"exited": {"0"}, "status": {"exited"}}}, + map[string][]string{"all": {"1"}, "filters": {"{\"exited\":[\"0\"],\"status\":[\"exited\"]}"}}, + }, + } + fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK} + client := newTestClient(fakeRT) + u, _ := url.Parse(client.getURL("/containers/json")) + for _, tt := range tests { + if _, err := client.ListContainers(tt.input); err != nil { + t.Error(err) + } + got := map[string][]string(fakeRT.requests[0].URL.Query()) + if !reflect.DeepEqual(got, tt.params) { + t.Errorf("Expected %#v, got %#v.", tt.params, got) + } + if path := fakeRT.requests[0].URL.Path; path != u.Path { + t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) + } + if meth := fakeRT.requests[0].Method; meth != "GET" { + t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth) + } + fakeRT.Reset() + } +} + +func TestListContainersFailure(t *testing.T) { + var tests = []struct { + status int + message string + }{ + {400, "bad parameter"}, + {500, "internal server error"}, + } + for _, tt := range tests { + client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status}) + expected := Error{Status: tt.status, Message: tt.message} + containers, err := client.ListContainers(ListContainersOptions{}) + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err) + } + if len(containers) > 0 { + t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers) + } + } +} + +func TestInspectContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "AppArmorProfile": "Profile", + "Created": "2013-05-07T14:51:42.087658+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 17179869184, + "MemorySwap": 34359738368, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "SecurityOpt": [ + "label:user:USER" + ], + "Ulimits": [ + { "Name": "nofile", "Soft": 1024, "Hard": 2048 } + ] + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:00", + "Ghost": false + }, + "Node": { + "ID": "4I4E:QR4I:Z733:QEZK:5X44:Q4T7:W2DD:JRDY:KB2O:PODO:Z5SR:XRB6", + "IP": "192.168.99.105", + "Addra": "192.168.99.105:2376", + "Name": "node-01", + "Cpus": 4, + "Memory": 1048436736, + "Labels": { + "executiondriver": "native-0.2", + "kernelversion": "3.18.5-tinycore64", + "operatingsystem": "Boot2Docker 1.5.0 (TCL 5.4); master : a66bce5 - Tue Feb 10 23:31:27 UTC 2015", + "provider": "virtualbox", + "storagedriver": "aufs" + } + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false, + "CgroupParent": "/mesos", + "Memory": 17179869184, + "MemorySwap": 34359738368, + "GroupAdd": ["fake", "12345"], + "OomScoreAdj": 642 + } +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + container, err := client.InspectContainer(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*container, expected) { + t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestInspectContainerNetwork(t *testing.T) { + jsonContainer := `{ + "Id": "81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c", + "Created": "2015-11-12T14:54:04.791485659Z", + "Path": "consul-template", + "Args": [ + "-config=/tmp/haproxy.json", + "-consul=192.168.99.120:8500" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 3196, + "ExitCode": 0, + "Error": "", + "StartedAt": "2015-11-12T14:54:05.026747471Z", + "FinishedAt": "0001-01-01T00:00:00Z" + }, + "Image": "4921c5917fc117df3dec32f4c1976635dc6c56ccd3336fe1db3477f950e78bf7", + "ResolvConfPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/resolv.conf", + "HostnamePath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/hostname", + "HostsPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/hosts", + "LogPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c-json.log", + "Node": { + "ID": "AUIB:LFOT:3LSF:SCFS:OYDQ:NLXD:JZNE:4INI:3DRC:ZFBB:GWCY:DWJK", + "IP": "192.168.99.121", + "Addr": "192.168.99.121:2376", + "Name": "swl-demo1", + "Cpus": 1, + "Memory": 2099945472, + "Labels": { + "executiondriver": "native-0.2", + "kernelversion": "4.1.12-boot2docker", + "operatingsystem": "Boot2Docker 1.9.0 (TCL 6.4); master : 16e4a2a - Tue Nov 3 19:49:22 UTC 2015", + "provider": "virtualbox", + "storagedriver": "aufs" + } + }, + "Name": "/docker-proxy.swl-demo1", + "RestartCount": 0, + "Driver": "aufs", + "ExecDriver": "native-0.2", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "KernelMemory": 0, + "CpuShares": 0, + "CpuPeriod": 0, + "CpusetCpus": "", + "CpusetMems": "", + "CpuQuota": 0, + "BlkioWeight": 0, + "OomKillDisable": false, + "MemorySwappiness": -1, + "Privileged": false, + "PortBindings": { + "443/tcp": [ + { + "HostIp": "", + "HostPort": "443" + } + ] + }, + "Links": null, + "PublishAllPorts": false, + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "VolumesFrom": null, + "Devices": [], + "NetworkMode": "swl-net", + "IpcMode": "", + "PidMode": "", + "UTSMode": "", + "CapAdd": null, + "CapDrop": null, + "GroupAdd": null, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "SecurityOpt": null, + "ReadonlyRootfs": false, + "Ulimits": null, + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "CgroupParent": "", + "ConsoleSize": [ + 0, + 0 + ], + "VolumeDriver": "" + }, + "GraphDriver": { + "Name": "aufs", + "Data": null + }, + "Mounts": [], + "Config": { + "Hostname": "81e1bbe20b55", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "443/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "DOMAIN=local.auto", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "CONSUL_TEMPLATE_VERSION=0.11.1" + ], + "Cmd": [ + "-consul=192.168.99.120:8500" + ], + "Image": "docker-proxy:latest", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": [ + "consul-template", + "-config=/tmp/haproxy.json" + ], + "OnBuild": null, + "Labels": {}, + "StopSignal": "SIGTERM" + }, + "NetworkSettings": { + "Bridge": "", + "SandboxID": "c6b903dc5c1a96113a22dbc44709e30194079bd2d262eea1eb4f38d85821f6e1", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": { + "443/tcp": [ + { + "HostIp": "192.168.99.121", + "HostPort": "443" + } + ] + }, + "SandboxKey": "/var/run/docker/netns/c6b903dc5c1a", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "swl-net": { + "EndpointID": "683e3092275782a53c3b0968cc7e3a10f23264022ded9cb20490902f96fc5981", + "Gateway": "", + "IPAddress": "10.0.0.3", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:0a:00:00:03" + } + } + } +}` + + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "81e1bbe20b55" + exp := "10.0.0.3" + + container, err := client.InspectContainer(id) + if err != nil { + t.Fatal(err) + } + + s := reflect.Indirect(reflect.ValueOf(container.NetworkSettings)) + networks := s.FieldByName("Networks") + if networks.IsValid() { + var ip string + for _, net := range networks.MapKeys() { + if net.Interface().(string) == container.HostConfig.NetworkMode { + ip = networks.MapIndex(net).FieldByName("IPAddress").Interface().(string) + t.Logf("%s %v", net, ip) + } + } + if ip != exp { + t.Errorf("InspectContainerNetworks(%q): Expected %#v. Got %#v.", id, exp, ip) + } + } else { + t.Errorf("InspectContainerNetworks(%q): No method Networks for NetworkSettings", id) + } + +} + +func TestInspectContainerNegativeSwap(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.087658+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 17179869184, + "MemorySwap": -1, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Image": "base", + "Volumes": {}, + "VolumesFrom": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:00", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + container, err := client.InspectContainer(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*container, expected) { + t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestInspectContainerFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) + expected := Error{Status: 500, Message: "server error"} + container, err := client.InspectContainer("abe033") + if container != nil { + t.Errorf("InspectContainer: Expected container, got %#v", container) + } + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestInspectContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) + container, err := client.InspectContainer("abe033") + if container != nil { + t.Errorf("InspectContainer: Expected container, got %#v", container) + } + expected := &NoSuchContainer{ID: "abe033"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestContainerChanges(t *testing.T) { + jsonChanges := `[ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } +]` + var expected []Change + err := json.Unmarshal([]byte(jsonChanges), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + changes, err := client.ContainerChanges(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(changes, expected) { + t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestContainerChangesFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) + expected := Error{Status: 500, Message: "server error"} + changes, err := client.ContainerChanges("abe033") + if changes != nil { + t.Errorf("ContainerChanges: Expected changes, got %#v", changes) + } + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestContainerChangesNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) + changes, err := client.ContainerChanges("abe033") + if changes != nil { + t.Errorf("ContainerChanges: Expected changes, got %#v", changes) + } + expected := &NoSuchContainer{ID: "abe033"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestCreateContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Warnings": [] +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{AttachStdout: true, AttachStdin: true} + opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} + container, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + if container.ID != id { + t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/create")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + var gotBody Config + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateContainerImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound}) + config := Config{AttachStdout: true, AttachStdin: true} + container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) + if container != nil { + t.Errorf("CreateContainer: expected container, got %#v.", container) + } + if !reflect.DeepEqual(err, ErrNoSuchImage) { + t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestCreateContainerDuplicateName(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusConflict}) + config := Config{AttachStdout: true, AttachStdin: true} + container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) + if container != nil { + t.Errorf("CreateContainer: expected container, got %#v.", container) + } + if err != ErrContainerAlreadyExists { + t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrContainerAlreadyExists, err) + } +} + +func TestCreateContainerWithHostConfig(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{} + hostConfig := HostConfig{PublishAllPorts: true} + opts := CreateContainerOptions{Name: "TestCreateContainerWithHostConfig", Config: &config, HostConfig: &hostConfig} + _, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + var gotBody map[string]interface{} + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } + if _, ok := gotBody["HostConfig"]; !ok { + t.Errorf("CreateContainer: wrong body. HostConfig was not serialized") + } +} + +func TestStartContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StartContainer(id, &HostConfig{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + expectedContentType := "application/json" + if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { + t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) + } +} + +func TestStartContainerNilHostConfig(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StartContainer(id, nil) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + expectedContentType := "application/json" + if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { + t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) + } + var buf [4]byte + req.Body.Read(buf[:]) + if string(buf[:]) != "null" { + t.Errorf("Startcontainer(%q): Wrong body. Want null. Got %s", id, buf[:]) + } +} + +func TestStartContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.StartContainer("a2344", &HostConfig{}) + expected := &NoSuchContainer{ID: "a2344", Err: err.(*NoSuchContainer).Err} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestStartContainerAlreadyRunning(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "container already running", status: http.StatusNotModified}) + err := client.StartContainer("a2334", &HostConfig{}) + expected := &ContainerAlreadyRunning{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestStopContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StopContainer(id, 10) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestStopContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.StopContainer("a2334", 10) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestStopContainerNotRunning(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "container not running", status: http.StatusNotModified}) + err := client.StopContainer("a2334", 10) + expected := &ContainerNotRunning{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRestartContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.RestartContainer(id, 10) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestRestartContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.RestartContainer("a2334", 10) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestPauseContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.PauseContainer(id) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/pause")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestPauseContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.PauseContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestUnpauseContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.UnpauseContainer(id) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/unpause")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestUnpauseContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.UnpauseContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestKillContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.KillContainer(KillContainerOptions{ID: id}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestKillContainerSignal(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + if signal := req.URL.Query().Get("signal"); signal != "15" { + t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal) + } +} + +func TestKillContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.KillContainer(KillContainerOptions{ID: "a2334"}) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRemoveContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := RemoveContainerOptions{ID: id} + err := client.RemoveContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "DELETE" { + t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id)) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestRemoveContainerRemoveVolumes(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := RemoveContainerOptions{ID: id, RemoveVolumes: true} + err := client.RemoveContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + params := map[string][]string(req.URL.Query()) + expected := map[string][]string{"v": {"1"}} + if !reflect.DeepEqual(params, expected) { + t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params) + } +} + +func TestRemoveContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"}) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestResizeContainerTTY(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.ResizeContainerTTY(id, 40, 80) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + got := map[string][]string(req.URL.Query()) + expectedParams := map[string][]string{ + "w": {"80"}, + "h": {"40"}, + } + if !reflect.DeepEqual(got, expectedParams) { + t.Errorf("Expected %#v, got %#v.", expectedParams, got) + } +} + +func TestWaitContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + status, err := client.WaitContainer(id) + if err != nil { + t.Fatal(err) + } + if status != 56 { + t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestWaitContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.WaitContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestCommitContainer(t *testing.T) { + response := `{"Id":"596069db4bf5"}` + client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK}) + id := "596069db4bf5" + image, err := client.CommitContainer(CommitContainerOptions{}) + if err != nil { + t.Fatal(err) + } + if image.ID != id { + t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID) + } +} + +func TestCommitContainerParams(t *testing.T) { + cfg := Config{Memory: 67108864} + json, _ := json.Marshal(&cfg) + var tests = []struct { + input CommitContainerOptions + params map[string][]string + body []byte + }{ + {CommitContainerOptions{}, map[string][]string{}, nil}, + {CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil}, + { + CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"}, + map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "comment": {"something"}}, + nil, + }, + { + CommitContainerOptions{Container: "44c004db4b17", Run: &cfg}, + map[string][]string{"container": {"44c004db4b17"}}, + json, + }, + } + fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} + client := newTestClient(fakeRT) + u, _ := url.Parse(client.getURL("/commit")) + for _, tt := range tests { + if _, err := client.CommitContainer(tt.input); err != nil { + t.Error(err) + } + got := map[string][]string(fakeRT.requests[0].URL.Query()) + if !reflect.DeepEqual(got, tt.params) { + t.Errorf("Expected %#v, got %#v.", tt.params, got) + } + if path := fakeRT.requests[0].URL.Path; path != u.Path { + t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) + } + if meth := fakeRT.requests[0].Method; meth != "POST" { + t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth) + } + if tt.body != nil { + if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil { + if bytes.Compare(requestBody, tt.body) != 0 { + t.Errorf("Expected body %#v, got %#v", tt.body, requestBody) + } + } else { + t.Errorf("Error reading request body: %#v", err) + } + } + fakeRT.Reset() + } +} + +func TestCommitContainerFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError}) + _, err := client.CommitContainer(CommitContainerOptions{}) + if err == nil { + t.Error("Expected non-nil error, got .") + } +} + +func TestCommitContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.CommitContainer(CommitContainerOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestAttachToContainerLogs(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19}) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &buf, + Stdout: true, + Stderr: true, + Logs: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "POST" { + t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/attach")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "logs": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestAttachToContainer(t *testing.T) { + var reader = strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := map[string][]string{ + "stdin": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "stream": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestAttachToContainerSentinel(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + success := make(chan struct{}) + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + Success: success, + } + go func() { + if err := client.AttachToContainer(opts); err != nil { + t.Error(err) + } + }() + success <- <-success +} + +func TestAttachToContainerNilStdout(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: nil, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestAttachToContainerNilStderr(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestAttachToContainerStdinOnly(t *testing.T) { + var reader = strings.NewReader("send value") + serverFinished := make(chan struct{}) + clientFinished := make(chan struct{}) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + hj, ok := w.(http.Hijacker) + if !ok { + t.Fatal("cannot hijack server connection") + } + conn, _, err := hj.Hijack() + if err != nil { + t.Fatal(err) + } + // wait for client to indicate it's finished + <-clientFinished + // inform test that the server has finished + close(serverFinished) + conn.Close() + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + success := make(chan struct{}) + opts := AttachToContainerOptions{ + Container: "a123456", + InputStream: reader, + Stdin: true, + Stdout: false, + Stderr: false, + Stream: true, + RawTerminal: false, + Success: success, + } + go func() { + if err := client.AttachToContainer(opts); err != nil { + t.Error(err) + } + // client's attach session is over + close(clientFinished) + }() + success <- <-success + // wait for server to finish handling attach + <-serverFinished +} + +func TestAttachToContainerRawTerminalFalse(t *testing.T) { + input := strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req = *r + w.WriteHeader(http.StatusOK) + hj, ok := w.(http.Hijacker) + if !ok { + t.Fatal("cannot hijack server connection") + } + conn, _, err := hj.Hijack() + if err != nil { + t.Fatal(err) + } + conn.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + conn.Write([]byte("hello")) + conn.Write([]byte{2, 0, 0, 0, 0, 0, 0, 6}) + conn.Write([]byte("hello!")) + conn.Close() + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: input, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: false, + } + client.AttachToContainer(opts) + expected := map[string][]string{ + "stdin": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "stream": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) + } + if stdout.String() != "hello" { + t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stdout.String()) + } + if stderr.String() != "hello!" { + t.Errorf("AttachToContainer: wrong content written to stderr. Want %q. Got %q.", "hello!", stderr.String()) + } +} + +func TestAttachToContainerWithoutContainer(t *testing.T) { + var client Client + err := client.AttachToContainer(AttachToContainerOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) + } +} + +func TestLogs(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "GET" { + t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/logs")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "follow": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "timestamps": {"1"}, + "tail": {"all"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestLogsNilStdoutDoesntFail(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + opts := LogsOptions{ + Container: "a123456", + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestLogsNilStderrDoesntFail(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + opts := LogsOptions{ + Container: "a123456", + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestLogsSpecifyingTail(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + Tail: "100", + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "GET" { + t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/logs")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "follow": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "timestamps": {"1"}, + "tail": {"100"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestLogsRawTerminal(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("something happened!")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + RawTerminal: true, + Stdout: true, + Stderr: true, + Timestamps: true, + Tail: "100", + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } +} + +func TestLogsNoContainer(t *testing.T) { + var client Client + err := client.Logs(LogsOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) + } +} + +func TestNoSuchContainerError(t *testing.T) { + var err = &NoSuchContainer{ID: "i345"} + expected := "No such container: i345" + if got := err.Error(); got != expected { + t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) + } +} + +func TestNoSuchContainerErrorMessage(t *testing.T) { + var err = &NoSuchContainer{ID: "i345", Err: errors.New("some advanced error info")} + expected := "some advanced error info" + if got := err.Error(); got != expected { + t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) + } +} + +func TestExportContainer(t *testing.T) { + content := "exported container tar content" + out := stdoutMock{bytes.NewBufferString(content)} + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} + err := client.ExportContainer(opts) + if err != nil { + t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func TestExportContainerViaUnixSocket(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skip(fmt.Sprintf("skipping test on %s", runtime.GOOS)) + } + content := "exported container tar content" + var buf []byte + out := bytes.NewBuffer(buf) + tempSocket := tempfile("export_socket") + defer os.Remove(tempSocket) + endpoint := "unix://" + tempSocket + u, _ := parseEndpoint(endpoint, false) + client := Client{ + HTTPClient: cleanhttp.DefaultClient(), + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + SkipServerVersionCheck: true, + } + listening := make(chan string) + done := make(chan int) + go runStreamConnServer(t, "unix", tempSocket, listening, done) + <-listening // wait for server to start + opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} + err := client.ExportContainer(opts) + <-done // make sure server stopped + if err != nil { + t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int) { + defer close(done) + l, err := net.Listen(network, laddr) + if err != nil { + t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err) + listening <- "" + return + } + defer l.Close() + listening <- l.Addr().String() + c, err := l.Accept() + if err != nil { + t.Logf("Accept failed: %v", err) + return + } + c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content")) + c.Close() +} + +func tempfile(filename string) string { + return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid()) +} + +func TestExportContainerNoId(t *testing.T) { + client := Client{} + out := stdoutMock{bytes.NewBufferString("")} + err := client.ExportContainer(ExportContainerOptions{OutputStream: out}) + e, ok := err.(*NoSuchContainer) + if !ok { + t.Errorf("ExportContainer: wrong error. Want NoSuchContainer. Got %#v.", e) + } + if e.ID != "" { + t.Errorf("ExportContainer: wrong ID. Want %q. Got %q", "", e.ID) + } +} + +func TestUploadToContainer(t *testing.T) { + content := "File content" + in := stdinMock{bytes.NewBufferString(content)} + fakeRT := &FakeRoundTripper{status: http.StatusOK} + client := newTestClient(fakeRT) + opts := UploadToContainerOptions{ + Path: "abc", + InputStream: in, + } + err := client.UploadToContainer("a123456", opts) + if err != nil { + t.Errorf("UploadToContainer: caught error %#v while uploading archive to container, expected nil", err) + } + + req := fakeRT.requests[0] + + if req.Method != "PUT" { + t.Errorf("UploadToContainer{Path:abc}: Wrong HTTP method. Want PUT. Got %s", req.Method) + } + + if pathParam := req.URL.Query().Get("path"); pathParam != "abc" { + t.Errorf("ListImages({Path:abc}): Wrong parameter. Want path=abc. Got path=%s", pathParam) + } + +} + +func TestDownloadFromContainer(t *testing.T) { + filecontent := "File content" + client := newTestClient(&FakeRoundTripper{message: filecontent, status: http.StatusOK}) + + var out bytes.Buffer + opts := DownloadFromContainerOptions{ + OutputStream: &out, + } + err := client.DownloadFromContainer("a123456", opts) + if err != nil { + t.Errorf("DownloadFromContainer: caught error %#v while downloading from container, expected nil", err.Error()) + } + if out.String() != filecontent { + t.Errorf("DownloadFromContainer: wrong stdout. Want %#v. Got %#v.", filecontent, out.String()) + } +} + +func TestCopyFromContainer(t *testing.T) { + content := "File content" + out := stdoutMock{bytes.NewBufferString(content)} + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + opts := CopyFromContainerOptions{ + Container: "a123456", + OutputStream: &out, + } + err := client.CopyFromContainer(opts) + if err != nil { + t.Errorf("CopyFromContainer: caught error %#v while copying from container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func TestCopyFromContainerEmptyContainer(t *testing.T) { + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + err := client.CopyFromContainer(CopyFromContainerOptions{}) + _, ok := err.(*NoSuchContainer) + if !ok { + t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err) + } +} + +func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Warnings": [] +}` + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{AttachStdout: true, AttachStdin: true} + opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} + container, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + if container.Name != "TestCreateContainer" { + t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name) + } +} + +func TestAlwaysRestart(t *testing.T) { + policy := AlwaysRestart() + if policy.Name != "always" { + t.Errorf("AlwaysRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) + } + if policy.MaximumRetryCount != 0 { + t.Errorf("AlwaysRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) + } +} + +func TestRestartOnFailure(t *testing.T) { + const retry = 5 + policy := RestartOnFailure(retry) + if policy.Name != "on-failure" { + t.Errorf("RestartOnFailure(%d): wrong policy name. Want %q. Got %q", retry, "on-failure", policy.Name) + } + if policy.MaximumRetryCount != retry { + t.Errorf("RestartOnFailure(%d): wrong MaximumRetryCount. Want %d. Got %d", retry, retry, policy.MaximumRetryCount) + } +} + +func TestNeverRestart(t *testing.T) { + policy := NeverRestart() + if policy.Name != "no" { + t.Errorf("NeverRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) + } + if policy.MaximumRetryCount != 0 { + t.Errorf("NeverRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) + } +} + +func TestTopContainer(t *testing.T) { + jsonTop := `{ + "Processes": [ + [ + "ubuntu", + "3087", + "815", + "0", + "01:44", + "?", + "00:00:00", + "cmd1" + ], + [ + "root", + "3158", + "3087", + "0", + "01:44", + "?", + "00:00:01", + "cmd2" + ] + ], + "Titles": [ + "UID", + "PID", + "PPID", + "C", + "STIME", + "TTY", + "TIME", + "CMD" + ] +}` + var expected TopResult + err := json.Unmarshal([]byte(jsonTop), &expected) + if err != nil { + t.Fatal(err) + } + id := "4fa6e0f0" + fakeRT := &FakeRoundTripper{message: jsonTop, status: http.StatusOK} + client := newTestClient(fakeRT) + processes, err := client.TopContainer(id, "") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(processes, expected) { + t.Errorf("TopContainer: Expected %#v. Got %#v.", expected, processes) + } + if len(processes.Processes) != 2 || len(processes.Processes[0]) != 8 || + processes.Processes[0][7] != "cmd1" { + t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", processes) + } + expectedURI := "/containers/" + id + "/top" + if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { + t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) + } +} + +func TestTopContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.TopContainer("abef348", "") + expected := &NoSuchContainer{ID: "abef348"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestTopContainerWithPsArgs(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "no such container", status: http.StatusNotFound} + client := newTestClient(fakeRT) + expectedErr := &NoSuchContainer{ID: "abef348"} + if _, err := client.TopContainer("abef348", "aux"); !reflect.DeepEqual(expectedErr, err) { + t.Errorf("TopContainer: Expected %v. Got %v.", expectedErr, err) + } + expectedURI := "/containers/abef348/top?ps_args=aux" + if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { + t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) + } +} + +func TestStatsTimeout(t *testing.T) { + l, err := net.Listen("unix", "/tmp/docker_test.sock") + if err != nil { + t.Fatal(err) + } + received := false + defer l.Close() + go func() { + l.Accept() + received = true + time.Sleep(time.Second) + }() + client, _ := NewClient("unix:///tmp/docker_test.sock") + client.SkipServerVersionCheck = true + errC := make(chan error, 1) + statsC := make(chan *Stats) + done := make(chan bool) + go func() { + errC <- client.Stats(StatsOptions{"c", statsC, true, done, time.Millisecond * 100}) + close(errC) + }() + err = <-errC + e, ok := err.(net.Error) + if !ok || !e.Timeout() { + t.Error("Failed to receive timeout exception") + } + if !received { + t.Fatal("Failed to receive message") + } +} + +func TestStats(t *testing.T) { + jsonStats1 := `{ + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "networks" : { + "eth0":{ + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit": 189204833, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477, + "swap" : 47312896, + "hierarchical_memsw_limit" : 1610612736 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 428795731968 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 388177920 + } + ], + "io_serviced_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 25994442 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 1734 + } + ], + "io_queue_recursive": [], + "io_service_time_recursive": [], + "io_wait_time_recursive": [], + "io_merged_recursive": [], + "io_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + } + }` + // 1 second later, cache is 100 + jsonStats2 := `{ + "read" : "2015-01-08T22:57:32.547920715Z", + "networks" : { + "eth0":{ + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 100, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477, + "swap" : 47312896, + "hierarchical_memsw_limit" : 1610612736 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 428795731968 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 388177920 + } + ], + "io_serviced_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 25994442 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 1734 + } + ], + "io_queue_recursive": [], + "io_service_time_recursive": [], + "io_wait_time_recursive": [], + "io_merged_recursive": [], + "io_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + } + }` + var expected1 Stats + var expected2 Stats + err := json.Unmarshal([]byte(jsonStats1), &expected1) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal([]byte(jsonStats2), &expected2) + if err != nil { + t.Fatal(err) + } + id := "4fa6e0f0" + + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(jsonStats1)) + w.Write([]byte(jsonStats2)) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + errC := make(chan error, 1) + statsC := make(chan *Stats) + done := make(chan bool) + go func() { + errC <- client.Stats(StatsOptions{id, statsC, true, done, 0}) + close(errC) + }() + var resultStats []*Stats + for { + stats, ok := <-statsC + if !ok { + break + } + resultStats = append(resultStats, stats) + } + err = <-errC + if err != nil { + t.Fatal(err) + } + if len(resultStats) != 2 { + t.Fatalf("Stats: Expected 2 results. Got %d.", len(resultStats)) + } + if !reflect.DeepEqual(resultStats[0], &expected1) { + t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected1, resultStats[0]) + } + if !reflect.DeepEqual(resultStats[1], &expected2) { + t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected2, resultStats[1]) + } + if req.Method != "GET" { + t.Errorf("Stats: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/" + id + "/stats")) + if req.URL.Path != u.Path { + t.Errorf("Stats: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestStatsContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + statsC := make(chan *Stats) + done := make(chan bool) + err := client.Stats(StatsOptions{"abef348", statsC, true, done, 0}) + expected := &NoSuchContainer{ID: "abef348"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("Stats: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRenameContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := RenameContainerOptions{ID: "something_old", Name: "something_new"} + err := client.RenameContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("RenameContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/something_old/rename?name=something_new")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RenameContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + expectedValues := expectedURL.Query()["name"] + actualValues := req.URL.Query()["name"] + if len(actualValues) != 1 || expectedValues[0] != actualValues[0] { + t.Errorf("RenameContainer: Wrong params in request. Want %q. Got %q.", expectedValues, actualValues) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/fsouza/go-dockerclient/env.go new file mode 100644 index 000000000..c54b0b0e8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/env.go @@ -0,0 +1,168 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package docker + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// Env represents a list of key-pair represented in the form KEY=VALUE. +type Env []string + +// Get returns the string value of the given key. +func (env *Env) Get(key string) (value string) { + return env.Map()[key] +} + +// Exists checks whether the given key is defined in the internal Env +// representation. +func (env *Env) Exists(key string) bool { + _, exists := env.Map()[key] + return exists +} + +// GetBool returns a boolean representation of the given key. The key is false +// whenever its value if 0, no, false, none or an empty string. Any other value +// will be interpreted as true. +func (env *Env) GetBool(key string) (value bool) { + s := strings.ToLower(strings.Trim(env.Get(key), " \t")) + if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { + return false + } + return true +} + +// SetBool defines a boolean value to the given key. +func (env *Env) SetBool(key string, value bool) { + if value { + env.Set(key, "1") + } else { + env.Set(key, "0") + } +} + +// GetInt returns the value of the provided key, converted to int. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt(key string) int { + return int(env.GetInt64(key)) +} + +// SetInt defines an integer value to the given key. +func (env *Env) SetInt(key string, value int) { + env.Set(key, strconv.Itoa(value)) +} + +// GetInt64 returns the value of the provided key, converted to int64. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt64(key string) int64 { + s := strings.Trim(env.Get(key), " \t") + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return -1 + } + return val +} + +// SetInt64 defines an integer (64-bit wide) value to the given key. +func (env *Env) SetInt64(key string, value int64) { + env.Set(key, strconv.FormatInt(value, 10)) +} + +// GetJSON unmarshals the value of the provided key in the provided iface. +// +// iface is a value that can be provided to the json.Unmarshal function. +func (env *Env) GetJSON(key string, iface interface{}) error { + sval := env.Get(key) + if sval == "" { + return nil + } + return json.Unmarshal([]byte(sval), iface) +} + +// SetJSON marshals the given value to JSON format and stores it using the +// provided key. +func (env *Env) SetJSON(key string, value interface{}) error { + sval, err := json.Marshal(value) + if err != nil { + return err + } + env.Set(key, string(sval)) + return nil +} + +// GetList returns a list of strings matching the provided key. It handles the +// list as a JSON representation of a list of strings. +// +// If the given key matches to a single string, it will return a list +// containing only the value that matches the key. +func (env *Env) GetList(key string) []string { + sval := env.Get(key) + if sval == "" { + return nil + } + var l []string + if err := json.Unmarshal([]byte(sval), &l); err != nil { + l = append(l, sval) + } + return l +} + +// SetList stores the given list in the provided key, after serializing it to +// JSON format. +func (env *Env) SetList(key string, value []string) error { + return env.SetJSON(key, value) +} + +// Set defines the value of a key to the given string. +func (env *Env) Set(key, value string) { + *env = append(*env, key+"="+value) +} + +// Decode decodes `src` as a json dictionary, and adds each decoded key-value +// pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error is returned. +func (env *Env) Decode(src io.Reader) error { + m := make(map[string]interface{}) + if err := json.NewDecoder(src).Decode(&m); err != nil { + return err + } + for k, v := range m { + env.SetAuto(k, v) + } + return nil +} + +// SetAuto will try to define the Set* method to call based on the given value. +func (env *Env) SetAuto(key string, value interface{}) { + if fval, ok := value.(float64); ok { + env.SetInt64(key, int64(fval)) + } else if sval, ok := value.(string); ok { + env.Set(key, sval) + } else if val, err := json.Marshal(value); err == nil { + env.Set(key, string(val)) + } else { + env.Set(key, fmt.Sprintf("%v", value)) + } +} + +// Map returns the map representation of the env. +func (env *Env) Map() map[string]string { + if len(*env) == 0 { + return nil + } + m := make(map[string]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = parts[1] + } + return m +} diff --git a/vendor/github.com/fsouza/go-dockerclient/env_test.go b/vendor/github.com/fsouza/go-dockerclient/env_test.go new file mode 100644 index 000000000..df5169d06 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/env_test.go @@ -0,0 +1,351 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package docker + +import ( + "bytes" + "errors" + "reflect" + "sort" + "testing" +) + +func TestGet(t *testing.T) { + var tests = []struct { + input []string + query string + expected string + }{ + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""}, + {[]string{"WAT="}, "WAT", ""}, + } + for _, tt := range tests { + env := Env(tt.input) + got := env.Get(tt.query) + if got != tt.expected { + t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got) + } + } +} + +func TestExists(t *testing.T) { + var tests = []struct { + input []string + query string + expected bool + }{ + {[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false}, + } + for _, tt := range tests { + env := Env(tt.input) + got := env.Exists(tt.query) + if got != tt.expected { + t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got) + } + } +} + +func TestGetBool(t *testing.T) { + var tests = []struct { + input string + expected bool + }{ + {"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false}, + {"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true}, + {"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false}, + } + env := Env([]string{ + "EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false", + "NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin", + "ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t", + }) + for _, tt := range tests { + got := env.GetBool(tt.input) + if got != tt.expected { + t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got) + } + } +} + +func TestSetBool(t *testing.T) { + var tests = []struct { + input bool + expected string + }{ + {true, "1"}, {false, "0"}, + } + for _, tt := range tests { + var env Env + env.SetBool("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestGetInt(t *testing.T) { + var tests = []struct { + input string + expected int + }{ + {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, + } + env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) + for _, tt := range tests { + got := env.GetInt(tt.input) + if got != tt.expected { + t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) + } + } +} + +func TestSetInt(t *testing.T) { + var tests = []struct { + input int + expected string + }{ + {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, + {0, "0"}, {-34, "-34"}, + } + for _, tt := range tests { + var env Env + env.SetInt("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestGetInt64(t *testing.T) { + var tests = []struct { + input string + expected int64 + }{ + {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, + } + env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) + for _, tt := range tests { + got := env.GetInt64(tt.input) + if got != tt.expected { + t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) + } + } +} + +func TestSetInt64(t *testing.T) { + var tests = []struct { + input int64 + expected string + }{ + {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, + {0, "0"}, {-34, "-34"}, + } + for _, tt := range tests { + var env Env + env.SetInt64("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestGetJSON(t *testing.T) { + var p struct { + Name string `json:"name"` + Age int `json:"age"` + } + var env Env + env.Set("person", `{"name":"Gopher","age":5}`) + err := env.GetJSON("person", &p) + if err != nil { + t.Error(err) + } + if p.Name != "Gopher" { + t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name) + } + if p.Age != 5 { + t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age) + } +} + +func TestGetJSONAbsent(t *testing.T) { + var l []string + var env Env + err := env.GetJSON("person", &l) + if err != nil { + t.Error(err) + } + if l != nil { + t.Errorf("Env.GetJSON(): get unexpected list %v", l) + } +} + +func TestGetJSONFailure(t *testing.T) { + var p []string + var env Env + env.Set("list-person", `{"name":"Gopher","age":5}`) + err := env.GetJSON("list-person", &p) + if err == nil { + t.Errorf("Env.GetJSON(%q): got unexpected error.", "list-person") + } +} + +func TestSetJSON(t *testing.T) { + var p1 = struct { + Name string `json:"name"` + Age int `json:"age"` + }{Name: "Gopher", Age: 5} + var env Env + err := env.SetJSON("person", p1) + if err != nil { + t.Error(err) + } + var p2 struct { + Name string `json:"name"` + Age int `json:"age"` + } + err = env.GetJSON("person", &p2) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(p1, p2) { + t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2) + } +} + +func TestSetJSONFailure(t *testing.T) { + var env Env + err := env.SetJSON("person", unmarshable{}) + if err == nil { + t.Error("Env.SetJSON(): got unexpected error") + } + if env.Exists("person") { + t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person") + } +} + +func TestGetList(t *testing.T) { + var tests = []struct { + input string + expected []string + }{ + {"WAT=wat", []string{"wat"}}, + {`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}}, + {"WAT=", nil}, + } + for _, tt := range tests { + env := Env([]string{tt.input}) + got := env.GetList("WAT") + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got) + } + } +} + +func TestSetList(t *testing.T) { + list := []string{"a", "b", "c"} + var env Env + if err := env.SetList("SOME", list); err != nil { + t.Error(err) + } + if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) { + t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got) + } +} + +func TestSet(t *testing.T) { + var env Env + env.Set("PATH", "/home/bin:/bin") + env.Set("SOMETHING", "/usr/bin") + env.Set("PATH", "/bin") + if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected { + t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) + } + if expected, got := "/bin", env.Get("PATH"); got != expected { + t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) + } +} + +func TestDecode(t *testing.T) { + var tests = []struct { + input string + expectedOut []string + expectedErr string + }{ + { + `{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`, + []string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`}, + "", + }, + {"}}", nil, "invalid character '}' looking for beginning of value"}, + {`{}`, nil, ""}, + } + for _, tt := range tests { + var env Env + err := env.Decode(bytes.NewBufferString(tt.input)) + if tt.expectedErr == "" { + if err != nil { + t.Error(err) + } + } else if tt.expectedErr != err.Error() { + t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err) + } + got := []string(env) + sort.Strings(got) + sort.Strings(tt.expectedOut) + if !reflect.DeepEqual(got, tt.expectedOut) { + t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got) + } + } +} + +func TestSetAuto(t *testing.T) { + buf := bytes.NewBufferString("oi") + var tests = []struct { + input interface{} + expected string + }{ + {10, "10"}, + {10.3, "10"}, + {"oi", "oi"}, + {buf, "{}"}, + {unmarshable{}, "{}"}, + } + for _, tt := range tests { + var env Env + env.SetAuto("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestMap(t *testing.T) { + var tests = []struct { + input []string + expected map[string]string + }{ + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}}, + {nil, nil}, + } + for _, tt := range tests { + env := Env(tt.input) + got := env.Map() + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got) + } + } +} + +type unmarshable struct { +} + +func (unmarshable) MarshalJSON() ([]byte, error) { + return nil, errors.New("cannot marshal") +} diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go new file mode 100644 index 000000000..eaffddb82 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/event.go @@ -0,0 +1,304 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/http/httputil" + "sync" + "sync/atomic" + "time" +) + +// APIEvents represents an event returned by the API. +type APIEvents struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty"` + ID string `json:"ID,omitempty" yaml:"ID,omitempty"` + From string `json:"From,omitempty" yaml:"From,omitempty"` + Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"` +} + +type eventMonitoringState struct { + sync.RWMutex + sync.WaitGroup + enabled bool + lastSeen *int64 + C chan *APIEvents + errC chan error + listeners []chan<- *APIEvents +} + +const ( + maxMonitorConnRetries = 5 + retryInitialWaitTime = 10. +) + +var ( + // ErrNoListeners is the error returned when no listeners are available + // to receive an event. + ErrNoListeners = errors.New("no listeners present to receive event") + + // ErrListenerAlreadyExists is the error returned when the listerner already + // exists. + ErrListenerAlreadyExists = errors.New("listener already exists for docker events") + + // EOFEvent is sent when the event listener receives an EOF error. + EOFEvent = &APIEvents{ + Status: "EOF", + } +) + +// AddEventListener adds a new listener to container events in the Docker API. +// +// The parameter is a channel through which events will be sent. +func (c *Client) AddEventListener(listener chan<- *APIEvents) error { + var err error + if !c.eventMonitor.isEnabled() { + err = c.eventMonitor.enableEventMonitoring(c) + if err != nil { + return err + } + } + err = c.eventMonitor.addListener(listener) + if err != nil { + return err + } + return nil +} + +// RemoveEventListener removes a listener from the monitor. +func (c *Client) RemoveEventListener(listener chan *APIEvents) error { + err := c.eventMonitor.removeListener(listener) + if err != nil { + return err + } + if len(c.eventMonitor.listeners) == 0 { + c.eventMonitor.disableEventMonitoring() + } + return nil +} + +func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + return ErrListenerAlreadyExists + } + eventState.Add(1) + eventState.listeners = append(eventState.listeners, listener) + return nil +} + +func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + var newListeners []chan<- *APIEvents + for _, l := range eventState.listeners { + if l != listener { + newListeners = append(newListeners, l) + } + } + eventState.listeners = newListeners + eventState.Add(-1) + } + return nil +} + +func (eventState *eventMonitoringState) closeListeners() { + for _, l := range eventState.listeners { + close(l) + eventState.Add(-1) + } + eventState.listeners = nil +} + +func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { + for _, b := range *list { + if b == a { + return true + } + } + return false +} + +func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { + eventState.Lock() + defer eventState.Unlock() + if !eventState.enabled { + eventState.enabled = true + var lastSeenDefault = int64(0) + eventState.lastSeen = &lastSeenDefault + eventState.C = make(chan *APIEvents, 100) + eventState.errC = make(chan error, 1) + go eventState.monitorEvents(c) + } + return nil +} + +func (eventState *eventMonitoringState) disableEventMonitoring() error { + eventState.Lock() + defer eventState.Unlock() + + eventState.closeListeners() + + eventState.Wait() + + if eventState.enabled { + eventState.enabled = false + close(eventState.C) + close(eventState.errC) + } + return nil +} + +func (eventState *eventMonitoringState) monitorEvents(c *Client) { + var err error + for eventState.noListeners() { + time.Sleep(10 * time.Millisecond) + } + if err = eventState.connectWithRetry(c); err != nil { + // terminate if connect failed + eventState.disableEventMonitoring() + return + } + for eventState.isEnabled() { + timeout := time.After(100 * time.Millisecond) + select { + case ev, ok := <-eventState.C: + if !ok { + return + } + if ev == EOFEvent { + eventState.disableEventMonitoring() + return + } + eventState.updateLastSeen(ev) + go eventState.sendEvent(ev) + case err = <-eventState.errC: + if err == ErrNoListeners { + eventState.disableEventMonitoring() + return + } else if err != nil { + defer func() { go eventState.monitorEvents(c) }() + return + } + case <-timeout: + continue + } + } +} + +func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { + var retries int + var err error + for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ { + waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) + time.Sleep(time.Duration(waitTime) * time.Millisecond) + err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC) + } + return err +} + +func (eventState *eventMonitoringState) noListeners() bool { + eventState.RLock() + defer eventState.RUnlock() + return len(eventState.listeners) == 0 +} + +func (eventState *eventMonitoringState) isEnabled() bool { + eventState.RLock() + defer eventState.RUnlock() + return eventState.enabled +} + +func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { + eventState.RLock() + defer eventState.RUnlock() + eventState.Add(1) + defer eventState.Done() + if eventState.enabled { + if len(eventState.listeners) == 0 { + eventState.errC <- ErrNoListeners + return + } + + for _, listener := range eventState.listeners { + listener <- event + } + } +} + +func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { + eventState.Lock() + defer eventState.Unlock() + if atomic.LoadInt64(eventState.lastSeen) < e.Time { + atomic.StoreInt64(eventState.lastSeen, e.Time) + } +} + +func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { + uri := "/events" + if startTime != 0 { + uri += fmt.Sprintf("?since=%d", startTime) + } + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + var err error + if c.TLSConfig == nil { + dial, err = c.Dialer.Dial(protocol, address) + } else { + dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) + } + if err != nil { + return err + } + conn := httputil.NewClientConn(dial, nil) + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return err + } + res, err := conn.Do(req) + if err != nil { + return err + } + go func(res *http.Response, conn *httputil.ClientConn) { + defer conn.Close() + defer res.Body.Close() + decoder := json.NewDecoder(res.Body) + for { + var event APIEvents + if err = decoder.Decode(&event); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + if c.eventMonitor.isEnabled() { + // Signal that we're exiting. + eventChan <- EOFEvent + } + break + } + errChan <- err + } + if event.Time == 0 { + continue + } + if !c.eventMonitor.isEnabled() { + return + } + eventChan <- &event + } + }(res, conn) + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/event_test.go b/vendor/github.com/fsouza/go-dockerclient/event_test.go new file mode 100644 index 000000000..a308538cc --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/event_test.go @@ -0,0 +1,132 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bufio" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +func TestEventListeners(t *testing.T) { + testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient) +} + +func TestTLSEventListeners(t *testing.T) { + testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server { + server := httptest.NewUnstartedServer(handler) + + cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem") + if err != nil { + t.Fatalf("Error loading server key pair: %s", err) + } + + caCert, err := ioutil.ReadFile("testing/data/ca.pem") + if err != nil { + t.Fatalf("Error loading ca certificate: %s", err) + } + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(caCert) { + t.Fatalf("Could not add ca certificate") + } + + server.TLS = &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caPool, + } + server.StartTLS() + return server + }, func(url string) (*Client, error) { + return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem") + }) +} + +func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) { + response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} +{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} +{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} +{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} +` + + server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rsc := bufio.NewScanner(strings.NewReader(response)) + for rsc.Scan() { + w.Write([]byte(rsc.Text())) + w.(http.Flusher).Flush() + time.Sleep(10 * time.Millisecond) + } + })) + defer server.Close() + + client, err := buildClient(server.URL) + if err != nil { + t.Errorf("Failed to create client: %s", err) + } + client.SkipServerVersionCheck = true + + listener := make(chan *APIEvents, 10) + defer func() { + time.Sleep(10 * time.Millisecond) + if err := client.RemoveEventListener(listener); err != nil { + t.Error(err) + } + }() + + err = client.AddEventListener(listener) + if err != nil { + t.Errorf("Failed to add event listener: %s", err) + } + + timeout := time.After(1 * time.Second) + var count int + + for { + select { + case msg := <-listener: + t.Logf("Received: %v", *msg) + count++ + err = checkEvent(count, msg) + if err != nil { + t.Fatalf("Check event failed: %s", err) + } + if count == 4 { + return + } + case <-timeout: + t.Fatalf("%s timed out waiting on events", testName) + } + } +} + +func checkEvent(index int, event *APIEvents) error { + if event.ID != "dfdf82bd3881" { + return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID) + } + if event.From != "base:latest" { + return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From) + } + var status string + switch index { + case 1: + status = "create" + case 2: + status = "start" + case 3: + status = "stop" + case 4: + status = "destroy" + } + if event.Status != status { + return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status) + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/example_test.go b/vendor/github.com/fsouza/go-dockerclient/example_test.go new file mode 100644 index 000000000..8c2c719e6 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/example_test.go @@ -0,0 +1,168 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker_test + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "time" + + "github.com/fsouza/go-dockerclient" +) + +func ExampleClient_AttachToContainer() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + client.SkipServerVersionCheck = true + // Reading logs from container a84849 and sending them to buf. + var buf bytes.Buffer + err = client.AttachToContainer(docker.AttachToContainerOptions{ + Container: "a84849", + OutputStream: &buf, + Logs: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + log.Fatal(err) + } + log.Println(buf.String()) + buf.Reset() + err = client.AttachToContainer(docker.AttachToContainerOptions{ + Container: "a84849", + OutputStream: &buf, + Stdout: true, + Stream: true, + }) + if err != nil { + log.Fatal(err) + } + log.Println(buf.String()) +} + +func ExampleClient_CopyFromContainer() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + cid := "a84849" + var buf bytes.Buffer + filename := "/tmp/output.txt" + err = client.CopyFromContainer(docker.CopyFromContainerOptions{ + Container: cid, + Resource: filename, + OutputStream: &buf, + }) + if err != nil { + log.Fatalf("Error while copying from %s: %s\n", cid, err) + } + content := new(bytes.Buffer) + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + tr.Next() + if err != nil && err != io.EOF { + log.Fatal(err) + } + if _, err := io.Copy(content, tr); err != nil { + log.Fatal(err) + } + log.Println(buf.String()) +} + +func ExampleClient_BuildImage() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + + t := time.Now() + inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) + tr := tar.NewWriter(inputbuf) + tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t}) + tr.Write([]byte("FROM base\n")) + tr.Close() + opts := docker.BuildImageOptions{ + Name: "test", + InputStream: inputbuf, + OutputStream: outputbuf, + } + if err := client.BuildImage(opts); err != nil { + log.Fatal(err) + } +} + +func ExampleClient_ListenEvents() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + + listener := make(chan *docker.APIEvents) + err = client.AddEventListener(listener) + if err != nil { + log.Fatal(err) + } + + defer func() { + + err = client.RemoveEventListener(listener) + if err != nil { + log.Fatal(err) + } + + }() + + timeout := time.After(1 * time.Second) + + for { + select { + case msg := <-listener: + log.Println(msg) + case <-timeout: + break + } + } + +} + +func ExampleEnv_Map() { + e := docker.Env([]string{"A=1", "B=2", "C=3"}) + envs := e.Map() + for k, v := range envs { + fmt.Printf("%s=%q\n", k, v) + } +} + +func ExampleEnv_SetJSON() { + type Person struct { + Name string + Age int + } + p := Person{Name: "Gopher", Age: 4} + var e docker.Env + err := e.SetJSON("person", p) + if err != nil { + log.Fatal(err) + } +} + +func ExampleEnv_GetJSON() { + type Person struct { + Name string + Age int + } + p := Person{Name: "Gopher", Age: 4} + var e docker.Env + e.Set("person", `{"name":"Gopher","age":4}`) + err := e.GetJSON("person", &p) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go new file mode 100644 index 000000000..1a16da9d6 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/exec.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" +) + +// Exec is the type representing a `docker exec` instance and containing the +// instance ID +type Exec struct { + ID string `json:"Id,omitempty" yaml:"Id,omitempty"` +} + +// CreateExecOptions specify parameters to the CreateExecContainer function. +// +// See https://goo.gl/1KSIb7 for more details +type CreateExecOptions struct { + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` +} + +// CreateExec sets up an exec instance in a running container `id`, returning the exec +// instance, or an error in case of failure. +// +// See https://goo.gl/1KSIb7 for more details +func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { + path := fmt.Sprintf("/containers/%s/exec", opts.Container) + resp, err := c.do("POST", path, doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + return nil, err + } + defer resp.Body.Close() + var exec Exec + if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { + return nil, err + } + + return &exec, nil +} + +// StartExecOptions specify parameters to the StartExecContainer function. +// +// See https://goo.gl/iQCnto for more details +type StartExecOptions struct { + Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"` + + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} `json:"-"` +} + +// StartExec starts a previously set up exec instance id. If opts.Detach is +// true, it returns after starting the exec command. Otherwise, it sets up an +// interactive session with the exec command. +// +// See https://goo.gl/iQCnto for more details +func (c *Client) StartExec(id string, opts StartExecOptions) error { + cw, err := c.StartExecNonBlocking(id, opts) + if err != nil { + return err + } + if cw != nil { + return cw.Wait() + } + return nil +} + +// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is +// true, it returns after starting the exec command. Otherwise, it sets up an +// interactive session with the exec command. +// +// See https://goo.gl/iQCnto for more details +func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) { + if id == "" { + return nil, &NoSuchExec{ID: id} + } + + path := fmt.Sprintf("/exec/%s/start", id) + + if opts.Detach { + resp, err := c.do("POST", path, doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchExec{ID: id} + } + return nil, err + } + defer resp.Body.Close() + return nil, nil + } + + return c.hijack("POST", path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + data: opts, + }) +} + +// ResizeExecTTY resizes the tty session used by the exec command id. This API +// is valid only if Tty was specified as part of creating and starting the exec +// command. +// +// See https://goo.gl/e1JpsA for more details +func (c *Client) ResizeExecTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + + path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ExecProcessConfig is a type describing the command associated to a Exec +// instance. It's used in the ExecInspect type. +type ExecProcessConfig struct { + Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` + User string `json:"user,omitempty" yaml:"user,omitempty"` + Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` + EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` + Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"` +} + +// ExecInspect is a type with details about a exec instance, including the +// exit code if the command has finished running. It's returned by a api +// call to /exec/(id)/json +// +// See https://goo.gl/gPtX9R for more details +type ExecInspect struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty"` + Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"` + OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"` + ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"` + Container Container `json:"Container,omitempty" yaml:"Container,omitempty"` +} + +// InspectExec returns low-level information about the exec command id. +// +// See https://goo.gl/gPtX9R for more details +func (c *Client) InspectExec(id string) (*ExecInspect, error) { + path := fmt.Sprintf("/exec/%s/json", id) + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchExec{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var exec ExecInspect + if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { + return nil, err + } + return &exec, nil +} + +// NoSuchExec is the error returned when a given exec instance does not exist. +type NoSuchExec struct { + ID string +} + +func (err *NoSuchExec) Error() string { + return "No such exec instance: " + err.ID +} diff --git a/vendor/github.com/fsouza/go-dockerclient/exec_test.go b/vendor/github.com/fsouza/go-dockerclient/exec_test.go new file mode 100644 index 000000000..2dc8d2100 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/exec_test.go @@ -0,0 +1,262 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" +) + +func TestExecCreate(t *testing.T) { + jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` + var expected struct{ ID string } + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := CreateExecOptions{ + Container: "test", + AttachStdin: true, + AttachStdout: true, + AttachStderr: false, + Tty: false, + Cmd: []string{"touch", "/tmp/file"}, + User: "a-user", + } + execObj, err := client.CreateExec(config) + if err != nil { + t.Fatal(err) + } + expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + if execObj.ID != expectedID { + t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/test/exec")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + var gotBody struct{ ID string } + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } +} + +func TestExecStartDetached(t *testing.T) { + execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + fakeRT := &FakeRoundTripper{status: http.StatusOK} + client := newTestClient(fakeRT) + config := StartExecOptions{ + Detach: true, + } + err := client.StartExec(execID, config) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + t.Log(req.Body) + var gotBody struct{ Detach bool } + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } + if !gotBody.Detach { + t.Fatal("Expected Detach in StartExecOptions to be true") + } +} + +func TestExecStartAndAttach(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + success := make(chan struct{}) + execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := StartExecOptions{ + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + RawTerminal: true, + Success: success, + } + go func() { + if err := client.StartExec(execID, opts); err != nil { + t.Error(err) + } + }() + <-success +} + +func TestExecResize(t *testing.T) { + execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + fakeRT := &FakeRoundTripper{status: http.StatusOK} + client := newTestClient(fakeRT) + err := client.ResizeExecTTY(execID, 10, 20) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20")) + if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() { + t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } +} + +func TestExecInspect(t *testing.T) { + jsonExec := `{ + "ID": "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e", + "Running": true, + "ExitCode": 0, + "ProcessConfig": { + "privileged": false, + "user": "", + "tty": true, + "entrypoint": "bash", + "arguments": [] + }, + "OpenStdin": true, + "OpenStderr": true, + "OpenStdout": true, + "Container": { + "State": { + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Pid": 29392, + "ExitCode": 0, + "Error": "", + "StartedAt": "2015-01-21T17:08:59.634662178Z", + "FinishedAt": "0001-01-01T00:00:00Z" + }, + "ID": "922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521", + "Created": "2015-01-21T17:08:59.46407212Z", + "Path": "/bin/bash", + "Args": [ + "-lc", + "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" + ], + "Config": { + "Hostname": "922cd0568714", + "Domainname": "", + "User": "ubuntu", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 100, + "Cpuset": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": { + "8888/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/bash", + "-lc", + "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" + ], + "Image": "tsuru/app-gtest", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null + }, + "Image": "a88060b8b54fde0f7168c86742d0ce83b80f3f10925d85c98fdad9ed00bef544", + "NetworkSettings": { + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "MacAddress": "02:42:ac:11:00:08", + "LinkLocalIPv6Address": "fe80::42:acff:fe11:8", + "LinkLocalIPv6PrefixLen": 64, + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "Gateway": "172.17.42.1", + "IPv6Gateway": "", + "Bridge": "docker0", + "PortMapping": null, + "Ports": { + "8888/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49156" + } + ] + } + }, + "ResolvConfPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hostname", + "HostsPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hosts", + "Name": "/c7e43b72288ee9d0270a", + "Driver": "aufs", + "ExecDriver": "native-0.2", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "", + "RestartCount": 0, + "UpdateDns": false, + "Volumes": {}, + "VolumesRW": {} + } + }` + var expected ExecInspect + err := json.Unmarshal([]byte(jsonExec), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK} + client := newTestClient(fakeRT) + expectedID := "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e" + execObj, err := client.InspectExec(expectedID) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*execObj, expected) { + t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..ecc843272 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,55 @@ +# 0.9.0 (Unreleased) + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md new file mode 100644 index 000000000..55d3a8d5f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,365 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. + + ```go + logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) + ``` + +Third party logging formatters: + +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ÅÌ Í•Í–ÌšfÌÍÌ  ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗lÍ–ÍŽg̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 000000000..dddd5f877 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..9ae900bc5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,264 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 000000000..2b1cc4e1d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,77 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" +) + +func TestEntryWithError(t *testing.T) { + + assert := assert.New(t) + + defer func() { + ErrorKey = "error" + }() + + err := fmt.Errorf("kaboom at layer %d", 4711) + + assert.Equal(err, WithError(err).Data["error"]) + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + + assert.Equal(err, entry.WithError(err).Data["error"]) + + ErrorKey = "err" + + assert.Equal(err, entry.WithError(err).Data["err"]) + +} + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..9a0120ac1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..104d689f1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,48 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 000000000..c6d290c77 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 000000000..938b97495 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..2ad6dc5cf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 000000000..1d7087325 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..2fdb23176 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,212 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + return NewEntry(logger).WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..0c09fbc26 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 000000000..e8719b090 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,301 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 000000000..71f8d67a5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd openbsd netbsd dragonfly + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..a2c0b40db --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..b343b3a37 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 000000000..743df457f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris + +package logrus + +import ( + "os" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..0146845d1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..06ef20233 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,161 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return false + } + } + return true +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + + switch value := value.(type) { + case string: + if needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", value) + } + default: + fmt.Fprint(b, value) + } + + b.WriteByte(' ') +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 000000000..e25a44f67 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "bytes" + "errors" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte{'"'}) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..1e30b1c75 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go new file mode 100644 index 000000000..ba8b4f201 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go @@ -0,0 +1,67 @@ +package opts + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +// ParseEnvFile reads a file with environment variables enumerated by lines +// +// ``Environment variable names used by the utilities in the Shell and +// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase +// letters, digits, and the '_' (underscore) from the characters defined in +// Portable Character Set and do not begin with a digit. *But*, other +// characters may be permitted by an implementation; applications shall +// tolerate the presence of such names.'' +// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html +// +// As of #16585, it's up to application inside docker to validate or not +// environment variables, that's why we just strip leading whitespace and +// nothing more. +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + // trim the line from all leading whitespace first + line := strings.TrimLeft(scanner.Text(), whiteSpaces) + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} + } + + if len(data) > 1 { + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, scanner.Err() +} + +var whiteSpaces = " \t" + +// ErrBadEnvVariable typed error for bad environment variable +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go new file mode 100644 index 000000000..a2e2200fa --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go @@ -0,0 +1,142 @@ +package opts + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func tmpFileWithContent(content string, t *testing.T) string { + tmpFile, err := ioutil.TempFile("", "envfile-test") + if err != nil { + t.Fatal(err) + } + defer tmpFile.Close() + + tmpFile.WriteString(content) + return tmpFile.Name() +} + +// Test ParseEnvFile for a file with a few well formatted lines +func TestParseEnvFileGoodFile(t *testing.T) { + content := `foo=bar + baz=quux +# comment + +_foobar=foobaz +with.dots=working +and_underscore=working too +` + // Adding a newline + a line with pure whitespace. + // This is being done like this instead of the block above + // because it's common for editors to trim trailing whitespace + // from lines, which becomes annoying since that's the + // exact thing we need to test. + content += "\n \t " + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + expectedLines := []string{ + "foo=bar", + "baz=quux", + "_foobar=foobaz", + "with.dots=working", + "and_underscore=working too", + } + + if !reflect.DeepEqual(lines, expectedLines) { + t.Fatal("lines not equal to expected_lines") + } +} + +// Test ParseEnvFile for an empty file +func TestParseEnvFileEmptyFile(t *testing.T) { + tmpFile := tmpFileWithContent("", t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if len(lines) != 0 { + t.Fatal("lines not empty; expected empty") + } +} + +// Test ParseEnvFile for a non existent file +func TestParseEnvFileNonExistentFile(t *testing.T) { + _, err := ParseEnvFile("foo_bar_baz") + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("Expected a PathError, got [%v]", err) + } +} + +// Test ParseEnvFile for a badly formatted file +func TestParseEnvFileBadlyFormattedFile(t *testing.T) { + content := `foo=bar + f =quux +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} + +// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize +func TestParseEnvFileLineTooLongFile(t *testing.T) { + content := strings.Repeat("a", bufio.MaxScanTokenSize+42) + content = fmt.Sprint("foo=", content) + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } +} + +// ParseEnvFile with a random file, pass through +func TestParseEnvFileRandomFile(t *testing.T) { + content := `first line +another invalid line` + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go new file mode 100644 index 000000000..d1b698541 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,146 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "runtime" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter + // is not supplied. A better longer term solution would be to use a named + // pipe as the default on the Windows daemon. + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + _, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) + if err != nil { + return val, err + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultHost, val string) (string, error) { + host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) + if err != nil { + return val, err + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr +// defaultUnixAddr must be a absolute file path (no `unix://` prefix) +// defaultTCPAddr must be the full `tcp://host:port` form +func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { + addr = strings.TrimSpace(addr) + if addr == "" { + if defaultAddr == defaultTLSHost { + return defaultTLSHost, nil + } + if runtime.GOOS != "windows" { + return fmt.Sprintf("unix://%s", defaultUnixAddr), nil + } + return defaultTCPAddr, nil + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return parseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return parseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseUnixAddr parses and validates that the specified address is a valid UNIX +// socket address. It returns a formatted UNIX socket address, either using the +// address parsed from addr, or the contents of defaultAddr if addr is a blank +// string. +func parseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +// parseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_test.go new file mode 100644 index 000000000..e497e2865 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_test.go @@ -0,0 +1,164 @@ +package opts + +import ( + "runtime" + "testing" +) + +func TestParseHost(t *testing.T) { + invalid := map[string]string{ + "anything": "Invalid bind address format: anything", + "something with spaces": "Invalid bind address format: something with spaces", + "://": "Invalid bind address format: ://", + "unknown://": "Invalid bind address format: unknown://", + "tcp://:port": "Invalid bind address format: :port", + "tcp://invalid": "Invalid bind address format: invalid", + "tcp://invalid:port": "Invalid bind address format: invalid:port", + } + const defaultHTTPHost = "tcp://127.0.0.1:2375" + var defaultHOST = "unix:///var/run/docker.sock" + + if runtime.GOOS == "windows" { + defaultHOST = defaultHTTPHost + } + valid := map[string]string{ + "": defaultHOST, + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://host:": "tcp://host:2375", + "tcp://": "tcp://localhost:2375", + "tcp://:2375": "tcp://localhost:2375", // default ip address + "tcp://:2376": "tcp://localhost:2376", // default ip address + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix:///var/run/docker.sock", // default unix:// value + "unix://path/to/socket": "unix://path/to/socket", + } + + for value, errorMessage := range invalid { + if _, err := ParseHost(defaultHTTPHost, value); err == nil || err.Error() != errorMessage { + t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) + } + } + for value, expected := range valid { + if actual, err := ParseHost(defaultHTTPHost, value); err != nil || actual != expected { + t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func TestParseDockerDaemonHost(t *testing.T) { + var ( + defaultHTTPHost = "tcp://localhost:2375" + defaultHTTPSHost = "tcp://localhost:2376" + defaultUnix = "/var/run/docker.sock" + defaultHOST = "unix:///var/run/docker.sock" + ) + if runtime.GOOS == "windows" { + defaultHOST = defaultHTTPHost + } + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", + "tcp": "Invalid bind address format: tcp", + "unix": "Invalid bind address format: unix", + "fd": "Invalid bind address format: fd", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2375", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + "[::1]:": "tcp://[::1]:2375", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + ":6666": "tcp://localhost:6666", + ":6666/path": "tcp://localhost:6666/path", + "": defaultHOST, + " ": defaultHOST, + " ": defaultHOST, + "tcp://": defaultHTTPHost, + "tcp://:7777": "tcp://localhost:7777", + "tcp://:7777/path": "tcp://localhost:7777/path", + " tcp://:7777/path ": "tcp://localhost:7777/path", + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix:///var/run/docker.sock", + "fd://": "fd://", + "fd://something": "fd://something", + "localhost:": "tcp://localhost:2375", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := parseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := parseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "[::1]:": "tcp://[::1]:2376", + "[::1]:5555": "tcp://[::1]:5555", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", + "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + "localhost:": "tcp://localhost:2376", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := parseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := parseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := parseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := parseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := parseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 000000000..611407a9d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 000000000..ec52e9a70 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = DefaultTCPHost diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go new file mode 100644 index 000000000..c7b0dc994 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go @@ -0,0 +1,42 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parseable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go new file mode 100644 index 000000000..1027d84a0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIPOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IPOpt{IP: &ip} + + invalidIP := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIP) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go new file mode 100644 index 000000000..b244f5a3a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go @@ -0,0 +1,252 @@ +package opts + +import ( + "fmt" + "net" + "os" + "regexp" + "strings" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and add it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +//MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateAttach validates that the specified string is a valid attach option. +func ValidateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") +} + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if len(arr) > 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateMACAddress validates a MAC address. +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if parts[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go new file mode 100644 index 000000000..e2af1c11d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go @@ -0,0 +1,301 @@ +package opts + +import ( + "fmt" + "os" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Errorf("validator is not being called") + } +} + +func TestValidateMACAddress(t *testing.T) { + if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) + } + + if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") + } + + if _, err := ValidateMACAddress(`random invalid string`); err == nil { + t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("foo=bar") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := ValidateAttach("invalid"); err == nil { + t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := ValidateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func TestValidateEnv(t *testing.T) { + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + "asd!qwe": "asd!qwe", + "1asd": "1asd", + "123": "123", + "some space": "some space", + " some space before": " some space before", + "some space after ": "some space after ", + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 000000000..f1ce844a8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 000000000..2a9e2be74 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP4. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows TP4 build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 000000000..ce84347d3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1049 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +type ( + // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. + Archive io.ReadCloser + // Reader is a type of io.Reader. + Reader io.Reader + // Compression is the state represents if compressed or not. + Compression int + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *TarChownOptions + IncludeSourceDir bool + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debugf("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + return err + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Debugf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Debugf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Debugf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + if !exceptions && f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", filePath, err) + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0777) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := system.MkdirAll(dst, 0755); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go new file mode 100644 index 000000000..94deff3f4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go @@ -0,0 +1,1248 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestIsArchivePathDir(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "mkdir -p /tmp/archivedir") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath("/tmp/archivedir") { + t.Fatalf("Incorrectly recognised directory as an archive") + } +} + +func TestIsArchivePathInvalidFile(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1K count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath("/tmp/archive") { + t.Fatalf("Incorrectly recognised invalid tar path as archive") + } + if IsArchivePath("/tmp/archive.gz") { + t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") + } +} + +func TestIsArchivePathTar(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archivedata && tar -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if !IsArchivePath("/tmp/archive") { + t.Fatalf("Did not recognise valid tar path as archive") + } + if !IsArchivePath("/tmp/archive.gz") { + t.Fatalf("Did not recognise valid compressed tar path as archive") + } +} + +func TestDecompressStreamGzip(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.gz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a gzip file.") + } +} + +func TestDecompressStreamBzip2(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.bz2") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a bzip2 file.") + } +} + +func TestDecompressStreamXz(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.xz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a xz file.") + } +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of a uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, _, err := cmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := path.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := path.Join(tempFolder, "src") + tarFile := path.Join(tempFolder, "src.tar") + os.Create(srcFile) + os.Create(invalidDestFolder) // being a file (not dir) should cause an error + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = UntarPath(tarFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := path.Join(destFolder, srcFile) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := path.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := path.Join(destFolder, srcFile) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := path.Join(tempFolder, "dest") + invalidSrc := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, path.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := path.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := path.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarUntarWithXattr(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} + +func TestTarWithOptions(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(path.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(path.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(path.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 000000000..86c688825 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,112 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + inode = uint64(s.Ino) + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go new file mode 100644 index 000000000..18f45c480 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go @@ -0,0 +1,60 @@ +// +build !windows + +package archive + +import ( + "os" + "testing" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 000000000..23d60aa41 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go new file mode 100644 index 000000000..b7abc4022 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -0,0 +1,87 @@ +// +build windows + +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestCopyFileWithInvalidDest(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := "c:dest" + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, "src", "src") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err == nil { + t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") + } +} + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 000000000..a2a1dc36e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,416 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // Skip AUFS metadata + if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { + return err + } + + change := Change{ + Path: path, + } + + // Find out what kind of modification happened + file := filepath.Base(path) + // If there is a whiteout, then the file was removed + if strings.HasPrefix(file, WhiteoutPrefix) { + originalFile := file[len(WhiteoutPrefix):] + change.Path = filepath.Join(filepath.Dir(path), originalFile) + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 000000000..378cc09c8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,285 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 000000000..35832f087 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go new file mode 100644 index 000000000..5a3282b5a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go @@ -0,0 +1,127 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + //defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go new file mode 100644 index 000000000..f4316ce21 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go @@ -0,0 +1,527 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "sort" + "testing" + "time" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + {Symlink, "symlink3", root + "/file1", 0666}, + {Symlink, "symlink4", root + "/symlink3", 0666}, + {Symlink, "dirSymlink", root + "/dir1", 0740}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := os.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create an directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithHardlinks(t *testing.T) { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destDir) + + creationSize, err := prepareUntarSourceDirectory(100, destDir, true) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + t.Fatal(err) + } + + got := ChangesSize(destDir, changes) + if got != int64(creationSize) { + t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("Expected 6 bytes of changes, got %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 000000000..6646b4dfd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 000000000..2d8708d0a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 000000000..e95091264 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content Archive, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between it's directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go new file mode 100644 index 000000000..f1dc23824 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go @@ -0,0 +1,974 @@ +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, false) +} + +func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, true) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + symlinkPath1 := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + + if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// C. Symbol link following version: +// SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseCFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + symlinkPathBad := filepath.Join(tmpDirA, "symlink1") + symlinkPath := filepath.Join(tmpDirA, "symlink3") + linkTarget := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // first to test broken link + if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + // test symbol link -> symbol link -> target + // Ensure they start out different. + if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. Symbol link following version: +// SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseDFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "symlink4") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// E. Symbol link following version: +// SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseEFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + symSrcDir := filepath.Join(tmpDirA, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now test with symbol link + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// G. Symbol link version: +// SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseGFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dirSymlink") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// H. Symbol link following version: +// SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseHFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + symSrcDir := filepath.Join(tmpDirB, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now try with symbol link of dir + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// J. Symbol link following version: +// SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 000000000..e305b5e4a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 000000000..2b775b45c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 000000000..887dd54cc --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,279 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + if options == nil { + options = &TarOptions{} + } + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go new file mode 100644 index 000000000..4388d69c7 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go @@ -0,0 +1,370 @@ +package archive + +import ( + "archive/tar" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerWhiteouts(t *testing.T) { + wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") + if err != nil { + return + } + defer os.RemoveAll(wd) + + base := []string{ + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "baz", + "foo/", + "foo/.abc", + "foo/.bcd/", + "foo/.bcd/a", + "foo/cde/", + "foo/cde/def", + "foo/cde/efg", + "foo/fgh", + "foobar", + } + + type tcase struct { + change, expected []string + } + + tcases := []tcase{ + { + base, + base, + }, + { + []string{ + ".bay", + ".wh.baz", + "foo/", + "foo/.bce", + "foo/.wh..wh..opq", + "foo/cde/", + "foo/cde/efg", + }, + []string{ + ".bay", + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.bce", + "foo/cde/", + "foo/cde/efg", + "foobar", + }, + }, + { + []string{ + ".bay", + ".wh..baz", + ".wh.foobar", + "foo/", + "foo/.abc", + "foo/.wh.cde", + "bar/", + }, + []string{ + ".bay", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.abc", + "foo/.bce", + }, + }, + { + []string{ + ".abc", + ".wh..wh..opq", + "foobar", + }, + []string{ + ".abc", + "foobar", + }, + }, + } + + for i, tc := range tcases { + l, err := makeTestLayer(tc.change) + if err != nil { + t.Fatal(err) + } + + _, err = UnpackLayer(wd, l, nil) + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + + paths, err := readDirContents(wd) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, paths) { + t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) + } + } + +} + +func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { + tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + for _, p := range paths { + if p[len(p)-1] == filepath.Separator { + if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { + return + } + } else { + if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { + return + } + } + } + archive, err := Tar(tmpDir, Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + os.RemoveAll(tmpDir) + return err + }), nil +} + +func readDirContents(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + if info.IsDir() { + rel = rel + "/" + } + files = append(files, rel) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 000000000..a5e08e4ee --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 000000000..3448569b1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..e85aac054 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go new file mode 100644 index 000000000..98719032f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, Reader(r)) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 000000000..d20478a10 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 000000000..dfb335c0b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go new file mode 100644 index 000000000..46ab36697 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 000000000..a15cf4bc5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,279 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" +) + +// exclusion return true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty return true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := regexpMatch(pattern, file) + if err != nil { + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if strings.Index(".$", string(ch)) != -1 { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and remove +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 000000000..2d584c667 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,573 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Test lots of variants of patterns & strings +func TestMatches(t *testing.T) { + tests := []struct { + pattern string + text string + pass bool + }{ + {"**", "file", true}, + {"**", "file/", true}, + {"**/", "file", true}, // weird one + {"**/", "file/", true}, + {"**", "/", true}, + {"**/", "/", true}, + {"**", "dir/file", true}, + {"**/", "dir/file", false}, + {"**", "dir/file/", true}, + {"**/", "dir/file/", true}, + {"**/**", "dir/file", true}, + {"**/**", "dir/file/", true}, + {"dir/**", "dir/file", true}, + {"dir/**", "dir/file/", true}, + {"dir/**", "dir/dir2/file", true}, + {"dir/**", "dir/dir2/file/", true}, + {"**/dir2/*", "dir/dir2/file", true}, + {"**/dir2/*", "dir/dir2/file/", false}, + {"**/dir2/**", "dir/dir2/dir3/file", true}, + {"**/dir2/**", "dir/dir2/dir3/file/", true}, + {"**file", "file", true}, + {"**file", "dir/file", true}, + {"**/file", "dir/file", true}, + {"**file", "dir/dir/file", true}, + {"**/file", "dir/dir/file", true}, + {"**/file*", "dir/dir/file", true}, + {"**/file*", "dir/dir/file.txt", true}, + {"**/file*txt", "dir/dir/file.txt", true}, + {"**/file*.txt", "dir/dir/file.txt", true}, + {"**/file*.txt*", "dir/dir/file.txt", true}, + {"**/**/*.txt", "dir/dir/file.txt", true}, + {"**/**/*.txt2", "dir/dir/file.txt", false}, + {"**/*.txt", "file.txt", true}, + {"**/**/*.txt", "file.txt", true}, + {"a**/*.txt", "a/file.txt", true}, + {"a**/*.txt", "a/dir/file.txt", true}, + {"a**/*.txt", "a/dir/dir/file.txt", true}, + {"a/*.txt", "a/dir/file.txt", false}, + {"a/*.txt", "a/file.txt", true}, + {"a/*.txt**", "a/file.txt", true}, + {"a[b-d]e", "ae", false}, + {"a[b-d]e", "ace", true}, + {"a[b-d]e", "aae", false}, + {"a[^b-d]e", "aze", true}, + {".*", ".foo", true}, + {".*", "foo", false}, + {"abc.def", "abcdef", false}, + {"abc.def", "abc.def", true}, + {"abc.def", "abcZdef", false}, + {"abc?def", "abcZdef", true}, + {"abc?def", "abcdef", false}, + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + {"a\\\\", "a\\", true}, + {"**/foo/bar", "foo/bar", true}, + {"**/foo/bar", "dir/foo/bar", true}, + {"**/foo/bar", "dir/dir2/foo/bar", true}, + {"abc/**", "abc", false}, + {"abc/**", "abc/def", true}, + {"abc/**", "abc/def/ghi", true}, + } + + for _, test := range tests { + res, _ := regexpMatch(test.pattern, test.text) + if res != test.pass { + t.Fatalf("Failed: %v - res:%v", test, res) + } + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} + +// These matchTests are stolen from go's filepath Match tests. +type matchTest struct { + pattern, s string + match bool + err error +} + +var matchTests = []matchTest{ + {"abc", "abc", true, nil}, + {"*", "abc", true, nil}, + {"*c", "abc", true, nil}, + {"a*", "a", true, nil}, + {"a*", "abc", true, nil}, + {"a*", "ab/c", false, nil}, + {"a*/b", "abc/b", true, nil}, + {"a*/b", "a/c/b", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, + {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, + {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, + {"ab[c]", "abc", true, nil}, + {"ab[b-d]", "abc", true, nil}, + {"ab[e-g]", "abc", false, nil}, + {"ab[^c]", "abc", false, nil}, + {"ab[^b-d]", "abc", false, nil}, + {"ab[^e-g]", "abc", true, nil}, + {"a\\*b", "a*b", true, nil}, + {"a\\*b", "ab", false, nil}, + {"a?b", "a☺b", true, nil}, + {"a[^a]b", "a☺b", true, nil}, + {"a???b", "a☺b", false, nil}, + {"a[^a][^a][^a]b", "a☺b", false, nil}, + {"[a-ζ]*", "α", true, nil}, + {"*[a-ζ]", "A", false, nil}, + {"a?b", "a/b", false, nil}, + {"a*b", "a/b", false, nil}, + {"[\\]a]", "]", true, nil}, + {"[\\-]", "-", true, nil}, + {"[x\\-]", "x", true, nil}, + {"[x\\-]", "-", true, nil}, + {"[x\\-]", "z", false, nil}, + {"[\\-x]", "x", true, nil}, + {"[\\-x]", "-", true, nil}, + {"[\\-x]", "a", false, nil}, + {"[]a]", "]", false, filepath.ErrBadPattern}, + {"[-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "x", false, filepath.ErrBadPattern}, + {"[x-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "z", false, filepath.ErrBadPattern}, + {"[-x]", "x", false, filepath.ErrBadPattern}, + {"[-x]", "-", false, filepath.ErrBadPattern}, + {"[-x]", "a", false, filepath.ErrBadPattern}, + {"\\", "a", false, filepath.ErrBadPattern}, + {"[a-b-c]", "a", false, filepath.ErrBadPattern}, + {"[", "a", false, filepath.ErrBadPattern}, + {"[^", "a", false, filepath.ErrBadPattern}, + {"[^bc", "a", false, filepath.ErrBadPattern}, + {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong + {"a[", "ab", false, filepath.ErrBadPattern}, + {"*x", "xxx", true, nil}, +} + +func errp(e error) string { + if e == nil { + return "" + } + return e.Error() +} + +// TestMatch test's our version of filepath.Match, called regexpMatch. +func TestMatch(t *testing.T) { + for _, tt := range matchTests { + pattern := tt.pattern + s := tt.s + if runtime.GOOS == "windows" { + if strings.Index(pattern, "\\") >= 0 { + // no escape allowed on windows. + continue + } + pattern = filepath.Clean(pattern) + s = filepath.Clean(s) + } + ok, err := regexpMatch(pattern, s) + if ok != tt.match || err != tt.err { + t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 000000000..7e00802c1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 000000000..5ec21cace --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 000000000..dcae17882 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go new file mode 100644 index 000000000..7a95cb2bd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 000000000..a1301ee97 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,195 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username { + // return the first entry for a user; ignores potential for multiple ranges per user + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 000000000..0444307d2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package idtools + +import ( + "os" + "path/filepath" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix_test.go new file mode 100644 index 000000000..55b338c96 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix_test.go @@ -0,0 +1,243 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +type node struct { + uid int + gid int +} + +func TestMkdirAllAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirall") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should be chowned, but nothing else + if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllNewAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdirnew") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should NOT be chowned + if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdir") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + } + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should just chown to the requested uid/gid + if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // create a subdir under a dir which doesn't exist--should fail + if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") + } + + // create a subdir under an existing dir; should only change the ownership of the new subdir + if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr/bin"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func buildTree(base string, tree map[string]node) error { + for path, node := range tree { + fullPath := filepath.Join(base, path) + if err := os.MkdirAll(fullPath, 0755); err != nil { + return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) + } + if err := os.Chown(fullPath, node.uid, node.gid); err != nil { + return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) + } + } + return nil +} + +func readTree(base, root string) (map[string]node, error) { + tree := make(map[string]node) + + dirInfos, err := ioutil.ReadDir(base) + if err != nil { + return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) + } + + for _, info := range dirInfos { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) + } + tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} + if info.IsDir() { + // read the subdirectory + subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) + if err != nil { + return nil, err + } + for path, nodeinfo := range subtree { + tree[path] = nodeinfo + } + } + } + return tree, nil +} + +func compareTrees(left, right map[string]node) error { + if len(left) != len(right) { + return fmt.Errorf("Trees aren't the same size") + } + for path, nodeLeft := range left { + if nodeRight, ok := right[path]; ok { + if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { + // mismatch + return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, + nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) + } + continue + } + return fmt.Errorf("right tree didn't contain path %q", path) + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 000000000..d5ec992db --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 000000000..c1eedff10 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,155 @@ +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" + "syscall" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup +// useradd -M -u -s /bin/nologin -N -g +// addgroup --gid +// groupadd -g + +const baseUID int = 10000 +const baseGID int = 10000 +const idMAX int = 65534 + +var ( + userCommand string + groupCommand string + + cmdTemplates = map[string]string{ + "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", + "useradd": "-M -u %d -s /bin/false -N -g %s %s", + "addgroup": "--gid %d %s", + "groupadd": "-g %d %s", + } +) + +func init() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + if _, err := resolveBinary("addgroup"); err == nil { + groupCommand = "addgroup" + } else if _, err := resolveBinary("groupadd"); err == nil { + groupCommand = "groupadd" + } +} + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +// This new user's /etc/sub{uid,gid} ranges will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + // Find unused uid, gid pair + uid, err := findUnusedUID(baseUID) + if err != nil { + return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) + } + gid, err := findUnusedGID(baseGID) + if err != nil { + return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) + } + + // First add the group that we will use + if err := addGroup(name, gid); err != nil { + return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) + } + // Add the user as a member of the group + if err := addUser(name, uid, name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + return uid, gid, nil +} + +func addUser(userName string, uid int, groupName string) error { + + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) + return execAddCmd(userCommand, args) +} + +func addGroup(groupName string, gid int) error { + + if groupCommand == "" { + return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") + } + args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) + // only error out if the error isn't that the group already exists + // if the group exists then our needs are already met + if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { + return err + } + return nil +} + +func execAddCmd(cmd, args string) error { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + out, err := execCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) + } + return nil +} + +func findUnusedUID(startUID int) (int, error) { + return findUnused("passwd", startUID) +} + +func findUnusedGID(startGID int) (int, error) { + return findUnused("group", startGID) +} + +func findUnused(file string, id int) (int, error) { + for { + cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) + cmd := exec.Command("sh", "-c", cmdStr) + if err := cmd.Run(); err != nil { + // if a non-zero return code occurs, then we know the ID was not found + // and is usable + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + if status.ExitStatus() == 1 { + //no match, we can use this ID + return id, nil + } + } + } + return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) + } + id++ + if id > idMAX { + return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 000000000..d98b354cb --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 000000000..e263c284f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,152 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +// ErrClosed is returned when Write is called on a closed BytesPipe. +var ErrClosed = errors.New("write to closed BytesPipe") + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf [][]byte // slice of byte-slices of buffered data + lastRead int // index in the first slice to a read point + bufLen int // length of data buffered over the slices + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe(buf []byte) *BytesPipe { + if cap(buf) == 0 { + buf = make([]byte, 0, 64) + } + bp := &BytesPipe{ + buf: [][]byte{buf[:0]}, + } + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + defer bp.mu.Unlock() + written := 0 + for { + if bp.closeErr != nil { + return written, ErrClosed + } + // write data to the last buffer + b := bp.buf[len(bp.buf)-1] + // copy data to the current empty allocated area + n := copy(b[len(b):cap(b)], p) + // increment buffered data length + bp.bufLen += n + // include written data in last buffer + bp.buf[len(bp.buf)-1] = b[:len(b)+n] + + written += n + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // block if too much data is still in the buffer + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + } + + // allocate slice that has twice the size of the last unless maximum reached + nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) + if nextCap > maxCap { + nextCap = maxCap + } + // add new byte slice to the buffers slice and continue writing + bp.buf = append(bp.buf, make([]byte, 0, nextCap)) + } + bp.wait.Broadcast() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +func (bp *BytesPipe) len() int { + return bp.bufLen - bp.lastRead +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.len() == 0 { + if bp.closeErr != nil { + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.len() == 0 && bp.closeErr != nil { + return 0, bp.closeErr + } + } + for { + read := copy(p, bp.buf[0][bp.lastRead:]) + n += read + bp.lastRead += read + if bp.len() == 0 { + // we have read everything. reset to the beginning. + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = bp.buf[0][:0] + break + } + // break if everything was read + if len(p) == read { + break + } + // more buffered data and more asked. read from next slice. + p = p[read:] + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = nil // throw away old slice + bp.buf = bp.buf[1:] // switch to next + } + bp.wait.Broadcast() + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe_test.go new file mode 100644 index 000000000..b051139ad --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe_test.go @@ -0,0 +1,158 @@ +package ioutils + +import ( + "crypto/sha1" + "encoding/hex" + "math/rand" + "testing" + "time" +) + +func TestBytesPipeRead(t *testing.T) { + buf := NewBytesPipe(nil) + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + rd := make([]byte, 4) + n, err := buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "1234" { + t.Fatalf("Read %s, but must be %s", rd, "1234") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "5678" { + t.Fatalf("Read %s, but must be %s", rd, "5679") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) + } + if string(rd[:n]) != "90" { + t.Fatalf("Read %s, but must be %s", rd, "90") + } +} + +func TestBytesPipeWrite(t *testing.T) { + buf := NewBytesPipe(nil) + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + if string(buf.buf[0]) != "1234567890" { + t.Fatalf("Buffer %s, must be %s", buf.buf, "1234567890") + } +} + +// Write and read in different speeds/chunk sizes and check valid data is read. +func TestBytesPipeWriteRandomChunks(t *testing.T) { + cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ + {100, 10, 1}, + {1000, 10, 5}, + {1000, 100, 0}, + {1000, 5, 6}, + {10000, 50, 25}, + } + + testMessage := []byte("this is a random string for testing") + // random slice sizes to read and write + writeChunks := []int{25, 35, 15, 20} + readChunks := []int{5, 45, 20, 25} + + for _, c := range cases { + // first pass: write directly to hash + hash := sha1.New() + for i := 0; i < c.iterations*c.writesPerLoop; i++ { + if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { + t.Fatal(err) + } + } + expected := hex.EncodeToString(hash.Sum(nil)) + + // write/read through buffer + buf := NewBytesPipe(nil) + hash.Reset() + + done := make(chan struct{}) + + go func() { + // random delay before read starts + <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) + for i := 0; ; i++ { + p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) + n, _ := buf.Read(p) + if n == 0 { + break + } + hash.Write(p[:n]) + } + + close(done) + }() + + for i := 0; i < c.iterations; i++ { + for w := 0; w < c.writesPerLoop; w++ { + buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) + } + } + buf.Close() + <-done + + actual := hex.EncodeToString(hash.Sum(nil)) + + if expected != actual { + t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) + } + + } +} + +func BenchmarkBytesPipeWrite(b *testing.B) { + for i := 0; i < b.N; i++ { + readBuf := make([]byte, 1024) + buf := NewBytesPipe(nil) + go func() { + var err error + for err == nil { + _, err = buf.Read(readBuf) + } + }() + for j := 0; j < 1000; j++ { + buf.Write([]byte("pretty short line, because why not?")) + } + buf.Close() + } +} + +func BenchmarkBytesPipeRead(b *testing.B) { + rd := make([]byte, 512) + for i := 0; i < b.N; i++ { + b.StopTimer() + buf := NewBytesPipe(nil) + for j := 0; j < 500; j++ { + buf.Write(make([]byte, 1024)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + if n, _ := buf.Read(rd); n != 512 { + b.Fatalf("Wrong number of bytes: %d", n) + } + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go new file mode 100644 index 000000000..0b04b0ba3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go new file mode 100644 index 000000000..896886329 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go new file mode 100644 index 000000000..0d2d76b47 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go new file mode 100644 index 000000000..de495b56d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go @@ -0,0 +1,149 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 000000000..a891955ac --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps a io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go new file mode 100644 index 000000000..8f9e03c31 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -0,0 +1,94 @@ +package ioutils + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type perpetualReader struct{} + +func (p *perpetualReader) Read(buf []byte) (n int, err error) { + for i := 0; i != len(buf); i++ { + buf[i] = 'a' + } + return len(buf), nil +} + +func TestCancelReadCloser(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) + for { + var buf [128]byte + _, err := cancelReadCloser.Read(buf[:]) + if err == context.DeadlineExceeded { + break + } else if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go new file mode 100644 index 000000000..3c88f29e3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go @@ -0,0 +1,6 @@ +// +build !gccgo + +package ioutils + +func callSchedulerIfNecessary() { +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go new file mode 100644 index 000000000..c11d02b94 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go @@ -0,0 +1,13 @@ +// +build gccgo + +package ioutils + +import ( + "runtime" +) + +func callSchedulerIfNecessary() { + //allow or force Go scheduler to switch context, without explicitly + //forcing this will make it hang when using gccgo implementation + runtime.Gosched() +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 000000000..1539ad21b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 000000000..72c0bc597 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 000000000..2b35a2666 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + mu sync.Mutex + w io.Writer + flusher http.Flusher + flushed bool + closed error + + // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a + // channel will allow one to properly order the operations. +} + +var errWriteFlusherClosed = errors.New("writeflusher: closed") + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + wf.mu.Lock() + defer wf.mu.Unlock() + if wf.closed != nil { + return 0, wf.closed + } + + n, err = wf.w.Write(b) + wf.flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + wf.mu.Lock() + defer wf.mu.Unlock() + + wf.flush() +} + +// flush the stream immediately without taking a lock. Used internally. +func (wf *WriteFlusher) flush() { + if wf.closed != nil { + return + } + + wf.flushed = true + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + wf.mu.Lock() + defer wf.mu.Unlock() + + return wf.flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.mu.Lock() + defer wf.mu.Unlock() + + if wf.closed != nil { + return wf.closed + } + + wf.closed = errWriteFlusherClosed + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var flusher http.Flusher + if f, ok := w.(http.Flusher); ok { + flusher = f + } else { + flusher = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: flusher} +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 000000000..ccc7f9c23 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go new file mode 100644 index 000000000..564b1cd4f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 000000000..9b15bfff4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath_test.go new file mode 100644 index 000000000..01865eff0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath_test.go @@ -0,0 +1,22 @@ +package longpath + +import ( + "strings" + "testing" +) + +func TestStandardLongPath(t *testing.T) { + c := `C:\simple\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\C:\simple\path`) { + t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) + } +} + +func TestUNCLongPath(t *testing.T) { + c := `\\server\share\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { + t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 000000000..515fb4d05 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go new file mode 100644 index 000000000..78689800b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go @@ -0,0 +1,162 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + written, err = writer.Write([]byte("barfoo")) + if err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go new file mode 100644 index 000000000..dd52b9082 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 000000000..b2c60046a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,175 @@ +package stdcopy + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" +) + +const ( + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +// StdType prefixes type and length to standard stream. +type StdType [stdWriterPrefixLen]byte + +var ( + // Stdin represents standard input stream type. + Stdin = StdType{0: 0} + // Stdout represents standard output stream type. + Stdout = StdType{0: 1} + // Stderr represents standard error steam type. + Stderr = StdType{0: 2} +) + +// StdWriter is wrapper of io.Writer with extra customized info. +type StdWriter struct { + io.Writer + prefix StdType + sizeBuf []byte +} + +func (w *StdWriter) Write(buf []byte) (n int, err error) { + var n1, n2 int + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) + n1, err = w.Writer.Write(w.prefix[:]) + if err != nil { + n = n1 - stdWriterPrefixLen + } else { + n2, err = w.Writer.Write(buf) + n = n1 + n2 - stdWriterPrefixLen + } + if n < 0 { + n = 0 + } + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) *StdWriter { + return &StdWriter{ + Writer: w, + prefix: t, + sizeBuf: make([]byte, 4), + } +} + +var errInvalidStdHeader = errors.New("Unrecognized input header") + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + logrus.Debugf("Corrupted prefix: %v", buf[:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading header: %s", er) + return 0, er + } + } + + // Check the first byte to know where to write + switch buf[stdWriterFdIndex] { + case 0: + fallthrough + case 1: + // Write on stdout + out = dstout + case 2: + // Write on stderr + out = dsterr + default: + logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) + return 0, errInvalidStdHeader + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + logrus.Debugf("framesize: %d", frameSize) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading frame: %s", er) + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + logrus.Debugf("Error writing frame: %s", ew) + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 000000000..88d88d41e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,261 @@ +package stdcopy + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := StdWriter{ + Writer: nil, + prefix: Stdout, + sizeBuf: make([]byte, 4), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) + } +} + +type errWriter struct { + n int + err error +} + +func (f *errWriter) Write(buf []byte) (int, error) { + return f.n, f.err +} + +func TestWriteWithWriterError(t *testing.T) { + expectedError := errors.New("expected") + expectedReturnedBytes := 10 + writer := NewStdWriter(&errWriter{ + n: stdWriterPrefixLen + expectedReturnedBytes, + err: expectedError}, Stdout) + data := []byte("This won't get written, sigh") + n, err := writer.Write(data) + if err != expectedError { + t.Fatalf("Didn't get expected error.") + } + if n != expectedReturnedBytes { + t.Fatalf("Didn't get expected writen bytes %d, got %d.", + expectedReturnedBytes, n) + } +} + +func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { + writer := NewStdWriter(&errWriter{n: -1}, Stdout) + data := []byte("This won't get written, sigh") + actual, _ := writer.Write(data) + if actual != 0 { + t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) + } +} + +func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { + buffer = new(bytes.Buffer) + dstOut := NewStdWriter(buffer, Stdout) + _, err = dstOut.Write(stdOutBytes) + if err != nil { + return + } + dstErr := NewStdWriter(buffer, Stderr) + _, err = dstErr.Write(stdErrBytes) + return +} + +func TestStdCopyWriteAndRead(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err != nil { + t.Fatal(err) + } + expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) + if written != int64(expectedTotalWritten) { + t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) + } +} + +type customReader struct { + n int + err error + totalCalls int + correctCalls int + src *bytes.Buffer +} + +func (f *customReader) Read(buf []byte) (int, error) { + f.totalCalls++ + if f.totalCalls <= f.correctCalls { + return f.src.Read(buf) + } + return f.n, f.err +} + +func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { + expectedError := errors.New("error") + reader := &customReader{ + err: expectedError} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { + expectedError := errors.New("error") + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: expectedError, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyDetectsCorruptedFrame(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: io.EOF, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != startingBufLen { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != nil { + t.Fatal("Didn't get nil error") + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func TestStdCopyReturnsWriteErrors(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + expectedError := errors.New("expected") + + dstOut := &errWriter{err: expectedError} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error, got %v", err) + } +} + +func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + dstOut := &errWriter{n: startingBufLen - 10} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) + } + if err != io.ErrShortWrite { + t.Fatalf("Didn't get expected io.ErrShortWrite error") + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 000000000..acf3f566f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,47 @@ +package system + +import ( + "os" + "syscall" + "time" + "unsafe" +) + +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_test.go new file mode 100644 index 000000000..5c87df32a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_test.go @@ -0,0 +1,94 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +// prepareTempFile creates a temporary file in a temporary directory. +func prepareTempFile(t *testing.T) (string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + return file, dir +} + +// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent +func TestChtimes(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix_test.go new file mode 100644 index 000000000..fcd594023 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix_test.go @@ -0,0 +1,91 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimes tests Chtimes access time on a tempfile on Linux +func TestChtimesLinux(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat := f.Sys().(*syscall.Stat_t) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows_test.go new file mode 100644 index 000000000..be57558e1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows_test.go @@ -0,0 +1,86 @@ +// +build windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimes tests Chtimes access time on a tempfile on Windows +func TestChtimesWindows(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 000000000..288318985 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go new file mode 100644 index 000000000..04e2de787 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 000000000..c14feb849 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "os" + "path/filepath" +) + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 000000000..16823d551 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,82 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" +) + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, perm os.FileMode) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go new file mode 100644 index 000000000..bd23c4d50 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_unix_test.go new file mode 100644 index 000000000..062cf53bf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_unix_test.go @@ -0,0 +1,30 @@ +// +build linux freebsd + +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 000000000..49e87eb40 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package system + +import ( + "os" +) + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 000000000..3b6e947e6 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 000000000..c14dbf376 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,66 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given a io.Reader to the file. +// +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unix_test.go new file mode 100644 index 000000000..dda048379 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unix_test.go @@ -0,0 +1,40 @@ +// +build linux freebsd + +package system + +import ( + "strings" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 000000000..82ddd30c1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 000000000..d46642598 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,44 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 000000000..73958182b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 000000000..2e863c021 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 000000000..1b6cc9cbd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 000000000..09e7f89fe --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package system + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go new file mode 100644 index 000000000..087034c5e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go @@ -0,0 +1,53 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 000000000..d0fb6f151 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 000000000..8b1eded13 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 000000000..b01d08acf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,17 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unix_test.go new file mode 100644 index 000000000..dee8d30a1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unix_test.go @@ -0,0 +1,39 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.UID() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.GID() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go new file mode 100644 index 000000000..c6075d4ff --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd,!solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 000000000..39490c625 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime +} + +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 000000000..f1497c587 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 000000000..273aa234b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,36 @@ +package system + +import ( + "fmt" + "syscall" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() (OSVersion, error) { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + return osv, fmt.Errorf("Failed to call GetVersion()") + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv, nil +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 000000000..c670fcd75 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and return oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 000000000..13f1de176 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go new file mode 100644 index 000000000..0a1619754 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go @@ -0,0 +1,8 @@ +package system + +import "syscall" + +// LUtimesNano is not supported by darwin platform. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 000000000..e2eac3b55 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 000000000..fc8a1aba9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unix_test.go new file mode 100644 index 000000000..1ee0d099f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unix_test.go @@ -0,0 +1,68 @@ +// +build linux freebsd + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{0, 0}, {0, 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 000000000..50c3a0436 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 000000000..d2e2c0579 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 000000000..0114f2227 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 000000000..9ea86d784 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 000000000..477be8b21 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md new file mode 100644 index 000000000..3ce4d79da --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md @@ -0,0 +1,18 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml new file mode 100644 index 000000000..9043b3547 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go new file mode 100644 index 000000000..c219a8a96 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go @@ -0,0 +1,33 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration_test.go new file mode 100644 index 000000000..63baa515b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration_test.go @@ -0,0 +1,81 @@ +package units + +import ( + "fmt" + "testing" + "time" +) + +func ExampleHumanDuration() { + fmt.Println(HumanDuration(450 * time.Millisecond)) + fmt.Println(HumanDuration(47 * time.Second)) + fmt.Println(HumanDuration(1 * time.Minute)) + fmt.Println(HumanDuration(3 * time.Minute)) + fmt.Println(HumanDuration(35 * time.Minute)) + fmt.Println(HumanDuration(35*time.Minute + 40*time.Second)) + fmt.Println(HumanDuration(1 * time.Hour)) + fmt.Println(HumanDuration(1*time.Hour + 45*time.Minute)) + fmt.Println(HumanDuration(3 * time.Hour)) + fmt.Println(HumanDuration(3*time.Hour + 59*time.Minute)) + fmt.Println(HumanDuration(3*time.Hour + 60*time.Minute)) + fmt.Println(HumanDuration(24 * time.Hour)) + fmt.Println(HumanDuration(24*time.Hour + 12*time.Hour)) + fmt.Println(HumanDuration(2 * 24 * time.Hour)) + fmt.Println(HumanDuration(7 * 24 * time.Hour)) + fmt.Println(HumanDuration(13*24*time.Hour + 5*time.Hour)) + fmt.Println(HumanDuration(2 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(2*7*24*time.Hour + 4*24*time.Hour)) + fmt.Println(HumanDuration(3 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(4 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(4*7*24*time.Hour + 3*24*time.Hour)) + fmt.Println(HumanDuration(1 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(1*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(2 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(3*30*24*time.Hour + 1*7*24*time.Hour)) + fmt.Println(HumanDuration(5*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(13 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(23 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(24 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(24*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(3*365*24*time.Hour + 2*30*24*time.Hour)) +} + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "8 weeks", HumanDuration(2*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3 years", HumanDuration(3*year+2*month)) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go new file mode 100644 index 000000000..3b59daff3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go @@ -0,0 +1,95 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + i := 0 + for size >= base { + size = size / base + i++ + } + return fmt.Sprintf(format, size, _map[i]) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 3 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[2]) + if mul, ok := uMap[unitPrefix]; ok { + size *= mul + } + + return size, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size_test.go new file mode 100644 index 000000000..a968f5c0d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size_test.go @@ -0,0 +1,160 @@ +package units + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "testing" +) + +func ExampleBytesSize() { + fmt.Println(BytesSize(1024)) + fmt.Println(BytesSize(1024 * 1024)) + fmt.Println(BytesSize(1048576)) + fmt.Println(BytesSize(2 * MiB)) + fmt.Println(BytesSize(3.42 * GiB)) + fmt.Println(BytesSize(5.372 * TiB)) + fmt.Println(BytesSize(2.22 * PiB)) +} + +func ExampleHumanSize() { + fmt.Println(HumanSize(1000)) + fmt.Println(HumanSize(1024)) + fmt.Println(HumanSize(1000000)) + fmt.Println(HumanSize(1048576)) + fmt.Println(HumanSize(2 * MB)) + fmt.Println(HumanSize(float64(3.42 * GB))) + fmt.Println(HumanSize(float64(5.372 * TB))) + fmt.Println(HumanSize(float64(2.22 * PB))) +} + +func ExampleFromHumanSize() { + fmt.Println(FromHumanSize("32")) + fmt.Println(FromHumanSize("32b")) + fmt.Println(FromHumanSize("32B")) + fmt.Println(FromHumanSize("32k")) + fmt.Println(FromHumanSize("32K")) + fmt.Println(FromHumanSize("32kb")) + fmt.Println(FromHumanSize("32Kb")) + fmt.Println(FromHumanSize("32Mb")) + fmt.Println(FromHumanSize("32Gb")) + fmt.Println(FromHumanSize("32Tb")) + fmt.Println(FromHumanSize("32Pb")) +} + +func ExampleRAMInBytes() { + fmt.Println(RAMInBytes("32")) + fmt.Println(RAMInBytes("32b")) + fmt.Println(RAMInBytes("32B")) + fmt.Println(RAMInBytes("32k")) + fmt.Println(RAMInBytes("32K")) + fmt.Println(RAMInBytes("32kb")) + fmt.Println(RAMInBytes("32Kb")) + fmt.Println(RAMInBytes("32Mb")) + fmt.Println(RAMInBytes("32Gb")) + fmt.Println(RAMInBytes("32Tb")) + fmt.Println(RAMInBytes("32Pb")) + fmt.Println(RAMInBytes("32PB")) + fmt.Println(RAMInBytes("32P")) +} + +func TestBytesSize(t *testing.T) { + assertEquals(t, "1 KiB", BytesSize(1024)) + assertEquals(t, "1 MiB", BytesSize(1024*1024)) + assertEquals(t, "1 MiB", BytesSize(1048576)) + assertEquals(t, "2 MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) +} + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1 kB", HumanSize(1000)) + assertEquals(t, "1.024 kB", HumanSize(1024)) + assertEquals(t, "1 MB", HumanSize(1000000)) + assertEquals(t, "1.049 MB", HumanSize(1048576)) + assertEquals(t, "2 MB", HumanSize(2*MB)) + assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) + assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) + assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, "32.3") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32.3Kb") + assertError(t, FromHumanSize, "32 mb") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, "32.3") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32.3Kb") + assertError(t, RAMInBytes, "32 mb") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go new file mode 100644 index 000000000..5ac7fd825 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit_test.go new file mode 100644 index 000000000..3e7f10fc2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit_test.go @@ -0,0 +1,74 @@ +package units + +import ( + "fmt" + "strconv" + "testing" +) + +func ExampleParseUlimit() { + fmt.Println(ParseUlimit("nofile=512:1024")) + fmt.Println(ParseUlimit("nofile=1024")) + fmt.Println(ParseUlimit("cpu=2:4")) + fmt.Println(ParseUlimit("cpu=6")) +} + +func TestParseUlimitValid(t *testing.T) { + u1 := &Ulimit{"nofile", 1024, 512} + if u2, _ := ParseUlimit("nofile=512:1024"); *u1 != *u2 { + t.Fatalf("expected %q, but got %q", u1, u2) + } +} + +func TestParseUlimitInvalidLimitType(t *testing.T) { + if _, err := ParseUlimit("notarealtype=1024:1024"); err == nil { + t.Fatalf("expected error on invalid ulimit type") + } +} + +func TestParseUlimitBadFormat(t *testing.T) { + if _, err := ParseUlimit("nofile:1024:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := ParseUlimit("nofile"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := ParseUlimit("nofile="); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := ParseUlimit("nofile=:"); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := ParseUlimit("nofile=:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } +} + +func TestParseUlimitHardLessThanSoft(t *testing.T) { + if _, err := ParseUlimit("nofile=1024:1"); err == nil { + t.Fatal("expected error on hard limit less than soft limit") + } +} + +func TestParseUlimitInvalidValueType(t *testing.T) { + if _, err := ParseUlimit("nofile=asdf"); err == nil { + t.Fatal("expected error on bad value type, but got no error") + } else if _, ok := err.(*strconv.NumError); !ok { + t.Fatalf("expected error on bad value type, but got `%s`", err) + } + + if _, err := ParseUlimit("nofile=1024:asdf"); err == nil { + t.Fatal("expected error on bad value type, but got no error") + } else if _, ok := err.(*strconv.NumError); !ok { + t.Fatalf("expected error on bad value type, but got `%s`", err) + } +} + +func TestUlimitStringOutput(t *testing.T) { + u := &Ulimit{"nofile", 1024, 512} + if s := u.String(); s != "nofile=512:1024" { + t.Fatal("expected String to return nofile=512:1024, but got", s) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md new file mode 100644 index 000000000..c60a31b05 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go new file mode 100644 index 000000000..81cb128b1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go new file mode 100644 index 000000000..9814c501e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go new file mode 100644 index 000000000..73c740031 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md new file mode 100644 index 000000000..b987c9e5d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md @@ -0,0 +1,240 @@ +mux +=== +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) + +Package `gorilla/mux` implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts and paths can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +vars := mux.Vars(request) +category := vars["category"] +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.domain.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.domain.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.domain.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + http.ListenAndServe(":8000", r) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go new file mode 100644 index 000000000..c5f97b2b2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "net/http" + "testing" +) + +func BenchmarkMux(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}", handler) + + request, _ := http.NewRequest("GET", "/v1/anything", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, request) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000..49798cb5c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go @@ -0,0 +1,206 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000..cb03ddfe5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go @@ -0,0 +1,481 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" + + "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + return true + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = http.NotFoundHandler() + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVars registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + if val != nil { + context.Set(r, varsKey, val) + } +} + +func setCurrentRoute(r *http.Request, val interface{}) { + if val != nil { + context.Set(r, routeKey, val) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string paramers to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go new file mode 100644 index 000000000..98f73fe9c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go @@ -0,0 +1,1358 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "strings" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" +) + +func (r *Route) GoString() string { + matchers := make([]string, len(r.matchers)) + for i, m := range r.matchers { + matchers[i] = fmt.Sprintf("%#v", m) + } + return fmt.Sprintf("&Route{matchers:[]matcher{%s}}", strings.Join(matchers, ", ")) +} + +func (r *routeRegexp) GoString() string { + return fmt.Sprintf("&routeRegexp{template: %q, matchHost: %t, matchQuery: %t, strictSlash: %t, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.matchHost, r.matchQuery, r.strictSlash, r.regexp.String(), r.reverse, r.varsN, r.varsR) +} + +type routeTest struct { + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect +} + +func TestHost(t *testing.T) { + // newRequestHost a new request with a method, url, and host header + newRequestHost := func(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req + } + + tests := []routeTest{ + { + title: "Host route match", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with port, match", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, + { + title: "Host route with port, wrong port in request URL", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route, match with host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v1:[a-z]{2}(b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with hyphenated name and pattern, match", + route: new(Route).Host("aaa.{v-1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with hyphenated name and pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v-1:[a-z]{2}(b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple hyphenated names and patterns, match", + route: new(Route).Host("{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "aaa", "v-2": "bbb", "v-3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/a"), + vars: map[string]string{"category": "a"}, + host: "", + path: "/a", + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/b/c"), + vars: map[string]string{"category": "b/c"}, + host: "", + path: "/b/c", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/b/c/product_name/1"), + vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, + host: "", + path: "/b/c/product_name/1", + shouldMatch: true, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPath(t *testing.T) { + tests := []routeTest{ + { + title: "Path route, match", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, + { + title: "Path route, wrong path in request in request URL", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|(b/c)}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with hyphenated name and pattern, match", + route: new(Route).Path("/111/{v-1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns, match", + route: new(Route).Path("/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "111", "v-2": "222", "v-3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns with pipe, match", + route: new(Route).Path("/{product-category:a|(b/c)}/{product-name}/{product-id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"product-category": "a", "product-name": "product_name", "product-id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPathPrefix(t *testing.T) { + tests := []routeTest{ + { + title: "PathPrefix route, match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, + { + title: "PathPrefix route, URL prefix in request does not match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + { + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHostPath(t *testing.T) { + tests := []routeTest{ + { + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHeaders(t *testing.T) { + // newRequestHeaders creates a new request with a method, url, and headers + newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + for k, v := range headers { + req.Header.Add(k, v) + } + return req + } + + tests := []routeTest{ + { + title: "Headers route, match", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Headers route, bad header values", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).Headers("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).HeadersRegexp("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } + +} + +func TestMethods(t *testing.T) { + tests := []routeTest{ + { + title: "Methods route, match GET", + route: new(Route).Methods("GET", "POST"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, match POST", + route: new(Route).Methods("GET", "POST"), + request: newRequest("POST", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, bad method", + route: new(Route).Methods("GET", "POST"), + request: newRequest("PUT", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestQueries(t *testing.T) { + tests := []routeTest{ + { + title: "Queries route, match", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad query", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?bar=2&foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v1:[0-9]{1}(a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with hyphenated name, match", + route: new(Route).Queries("foo", "{v-1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v-1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple hyphenated names, match", + route: new(Route).Queries("foo", "{v-1}", "baz", "{v-2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v-1": "bar", "v-2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenate name and pattern, match", + route: new(Route).Queries("foo", "{v-1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v-1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenated name and pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v-1:[0-9]{1}(a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v-1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value and no parameter in request, should not match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty value and empty parameter in request, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with overlapping value, should not match", + route: new(Route).Queries("foo", "bar"), + request: newRequest("GET", "http://localhost?foo=barfoo"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with no parameter in request, should not match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty parameter in request, should match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{"foo": ""}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad submatch", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?fffoo=bar&baz=dingggg"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSchemes(t *testing.T) { + tests := []routeTest{ + // Schemes + { + title: "Schemes route, match https", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "https://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, match ftp", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "ftp://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, bad scheme", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestMatcherFunc(t *testing.T) { + m := func(r *http.Request, m *RouteMatch) bool { + if r.URL.Host == "aaa.bbb.ccc" { + return true + } + return false + } + + tests := []routeTest{ + { + title: "MatchFunc route, match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.bbb.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "MatchFunc route, non-match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.222.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestBuildVarsFunc(t *testing.T) { + tests := []routeTest{ + { + title: "BuildVarsFunc set on route", + route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "3" + vars["v2"] = "a" + return vars + }), + request: newRequest("GET", "http://localhost/111/2"), + path: "/111/3a", + shouldMatch: true, + }, + { + title: "BuildVarsFunc set on route and parent route", + route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "2" + return vars + }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v2"] = "b" + return vars + }), + request: newRequest("GET", "http://localhost/1/a"), + path: "/2/b", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSubRouter(t *testing.T) { + subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() + subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() + + tests := []routeTest{ + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: true, + }, + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: false, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: true, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestNamedRoutes(t *testing.T) { + r1 := NewRouter() + r1.NewRoute().Name("a") + r1.NewRoute().Name("b") + r1.NewRoute().Name("c") + + r2 := r1.NewRoute().Subrouter() + r2.NewRoute().Name("d") + r2.NewRoute().Name("e") + r2.NewRoute().Name("f") + + r3 := r2.NewRoute().Subrouter() + r3.NewRoute().Name("g") + r3.NewRoute().Name("h") + r3.NewRoute().Name("i") + + if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { + t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) + } else if r1.Get("i") == nil { + t.Errorf("Subroute name not registered") + } +} + +func TestStrictSlash(t *testing.T) { + r := NewRouter() + r.StrictSlash(true) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestWalkSingleDepth(t *testing.T) { + r0 := NewRouter() + r1 := NewRouter() + r2 := NewRouter() + + r0.Path("/g") + r0.Path("/o") + r0.Path("/d").Handler(r1) + r0.Path("/r").Handler(r2) + r0.Path("/a") + + r1.Path("/z") + r1.Path("/i") + r1.Path("/l") + r1.Path("/l") + + r2.Path("/i") + r2.Path("/l") + r2.Path("/l") + + paths := []string{"g", "o", "r", "i", "l", "l", "a"} + depths := []int{0, 0, 0, 1, 1, 1, 0} + i := 0 + err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { + matcher := route.matchers[0].(*routeRegexp) + if matcher.template == "/d" { + return SkipRouter + } + if len(ancestors) != depths[i] { + t.Errorf(`Expected depth of %d at i = %d; got "%d"`, depths[i], i, len(ancestors)) + } + if matcher.template != "/"+paths[i] { + t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) + } + i++ + return nil + }) + if err != nil { + panic(err) + } + if i != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), i) + } +} + +func TestWalkNested(t *testing.T) { + router := NewRouter() + + g := router.Path("/g").Subrouter() + o := g.PathPrefix("/o").Subrouter() + r := o.PathPrefix("/r").Subrouter() + i := r.PathPrefix("/i").Subrouter() + l1 := i.PathPrefix("/l").Subrouter() + l2 := l1.PathPrefix("/l").Subrouter() + l2.Path("/a") + + paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"} + idx := 0 + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + path := paths[idx] + tpl := route.regexp.path.template + if tpl != path { + t.Errorf(`Expected %s got %s`, path, tpl) + } + idx++ + return nil + }) + if err != nil { + panic(err) + } + if idx != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), idx) + } +} + +func TestSubrouterErrorHandling(t *testing.T) { + superRouterCalled := false + subRouterCalled := false + + router := NewRouter() + router.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + superRouterCalled = true + }) + subRouter := router.PathPrefix("/bign8").Subrouter() + subRouter.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + subRouterCalled = true + }) + + req, _ := http.NewRequest("GET", "http://localhost/bign8/was/here", nil) + router.ServeHTTP(NewRecorder(), req) + + if superRouterCalled { + t.Error("Super router 404 handler called when sub-router 404 handler is available.") + } + if !subRouterCalled { + t.Error("Sub-router 404 handler was not called.") + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +func getRouteTemplate(route *Route) string { + host, path := "none", "none" + if route.regexp != nil { + if route.regexp.host != nil { + host = route.regexp.host.template + } + if route.regexp.path != nil { + path = route.regexp.path.template + } + } + return fmt.Sprintf("Host: %v, Path: %v", host, path) +} + +func testRoute(t *testing.T, test routeTest) { + request := test.request + route := test.route + vars := test.vars + shouldMatch := test.shouldMatch + host := test.host + path := test.path + url := test.host + test.path + shouldRedirect := test.shouldRedirect + + var match RouteMatch + ok := route.Match(request, &match) + if ok != shouldMatch { + msg := "Should match" + if !shouldMatch { + msg = "Should not match" + } + t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) + return + } + if shouldMatch { + if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { + t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) + return + } + if host != "" { + u, _ := test.route.URLHost(mapToPairs(match.Vars)...) + if host != u.Host { + t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) + return + } + } + if path != "" { + u, _ := route.URLPath(mapToPairs(match.Vars)...) + if path != u.Path { + t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) + return + } + } + if url != "" { + u, _ := route.URL(mapToPairs(match.Vars)...) + if url != u.Host+u.Path { + t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) + return + } + } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") + } +} + +// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW +func TestSubrouterHeader(t *testing.T) { + expected := "func1 response" + func1 := func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, expected) + } + func2 := func(http.ResponseWriter, *http.Request) {} + + r := NewRouter() + s := r.Headers("SomeSpecialHeader", "").Subrouter() + s.HandleFunc("/", func1).Name("func1") + r.HandleFunc("/", func2).Name("func2") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + req.Header.Add("SomeSpecialHeader", "foo") + match := new(RouteMatch) + matched := r.Match(req, match) + if !matched { + t.Errorf("Should match request") + } + if match.Route.GetName() != "func1" { + t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) + } + resp := NewRecorder() + match.Handler.ServeHTTP(resp, req) + if resp.Body.String() != expected { + t.Errorf("Expecting %q", expected) + } +} + +// mapToPairs converts a string map to a slice of string pairs +func mapToPairs(m map[string]string) []string { + var i int + p := make([]string, len(m)*2) + for k, v := range m { + p[i] = k + p[i+1] = v + i += 2 + } + return p +} + +// stringMapEqual checks the equality of two string maps +func stringMapEqual(m1, m2 map[string]string) bool { + nil1 := m1 == nil + nil2 := m2 == nil + if nil1 != nil2 || len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if v != m2[k] { + return false + } + } + return true +} + +// newRequest is a helper function to create a new request with a method and url +func newRequest(method, url string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return req +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go new file mode 100644 index 000000000..755db483e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go @@ -0,0 +1,714 @@ +// Old tests ported to Go1. This is a mess. Want to drop it one day. + +// Copyright 2011 Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "net/http" + "testing" +) + +// ---------------------------------------------------------------------------- +// ResponseRecorder +// ---------------------------------------------------------------------------- +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +func TestRouteMatchers(t *testing.T) { + var scheme, host, path, query, method string + var headers map[string]string + var resultVars map[bool]map[string]string + + router := NewRouter() + router.NewRoute().Host("{var1}.google.com"). + Path("/{var2:[a-z]+}/{var3:[0-9]+}"). + Queries("foo", "bar"). + Methods("GET"). + Schemes("https"). + Headers("x-requested-with", "XMLHttpRequest") + router.NewRoute().Host("www.{var4}.com"). + PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). + Queries("baz", "ding"). + Methods("POST"). + Schemes("http"). + Headers("Content-Type", "application/json") + + reset := func() { + // Everything match. + scheme = "https" + host = "www.google.com" + path = "/product/42" + query = "?foo=bar" + method = "GET" + headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} + resultVars = map[bool]map[string]string{ + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, + } + } + + reset2 := func() { + // Everything match. + scheme = "http" + host = "www.google.com" + path = "/foo/product/42/path/that/is/ignored" + query = "?baz=ding" + method = "POST" + headers = map[string]string{"Content-Type": "application/json"} + resultVars = map[bool]map[string]string{ + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, + } + } + + match := func(shouldMatch bool) { + url := scheme + "://" + host + path + query + request, _ := http.NewRequest(method, url, nil) + for key, value := range headers { + request.Header.Add(key, value) + } + + var routeMatch RouteMatch + matched := router.Match(request, &routeMatch) + if matched != shouldMatch { + // Need better messages. :) + if matched { + t.Errorf("Should match.") + } else { + t.Errorf("Should not match.") + } + } + + if matched { + currentRoute := routeMatch.Route + if currentRoute == nil { + t.Errorf("Expected a current route.") + } + vars := routeMatch.Vars + expectedVars := resultVars[shouldMatch] + if len(vars) != len(expectedVars) { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + for name, value := range vars { + if expectedVars[name] != value { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + } + } + } + + // 1st route -------------------------------------------------------------- + + // Everything match. + reset() + match(true) + + // Scheme doesn't match. + reset() + scheme = "http" + match(false) + + // Host doesn't match. + reset() + host = "www.mygoogle.com" + match(false) + + // Path doesn't match. + reset() + path = "/product/notdigits" + match(false) + + // Query doesn't match. + reset() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset() + method = "POST" + match(false) + + // Header doesn't match. + reset() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset() + match(true) + + // 2nd route -------------------------------------------------------------- + + // Everything match. + reset2() + match(true) + + // Scheme doesn't match. + reset2() + scheme = "https" + match(false) + + // Host doesn't match. + reset2() + host = "sub.google.com" + match(false) + + // Path doesn't match. + reset2() + path = "/bar/product/42" + match(false) + + // Query doesn't match. + reset2() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset2() + method = "GET" + match(false) + + // Header doesn't match. + reset2() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset2() + match(true) +} + +type headerMatcherTest struct { + matcher headerMatcher + headers map[string]string + result bool +} + +var headerMatcherTests = []headerMatcherTest{ + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": ""}), + headers: map[string]string{"X-Requested-With": "anything"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{}, + result: false, + }, +} + +type hostMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var hostMatcherTests = []hostMatcherTest{ + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://abc.def.ghi/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://a.b.c/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: false, + }, +} + +type methodMatcherTest struct { + matcher methodMatcher + method string + result bool +} + +var methodMatcherTests = []methodMatcherTest{ + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "GET", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "POST", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "PUT", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "DELETE", + result: false, + }, +} + +type pathMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var pathMatcherTests = []pathMatcherTest{ + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/123/456/789", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/1/2/3", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: false, + }, +} + +type schemeMatcherTest struct { + matcher schemeMatcher + url string + result bool +} + +var schemeMatcherTests = []schemeMatcherTest{ + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "http://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "https://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"https"}), + url: "http://localhost:8080/", + result: false, + }, + { + matcher: schemeMatcher([]string{"http"}), + url: "https://localhost:8080/", + result: false, + }, +} + +type urlBuildingTest struct { + route *Route + vars []string + url string +} + +var urlBuildingTests = []urlBuildingTest{ + { + route: new(Route).Host("foo.domain.com"), + vars: []string{}, + url: "http://foo.domain.com", + }, + { + route: new(Route).Host("{subdomain}.domain.com"), + vars: []string{"subdomain", "bar"}, + url: "http://bar.domain.com", + }, + { + route: new(Route).Host("foo.domain.com").Path("/articles"), + vars: []string{}, + url: "http://foo.domain.com/articles", + }, + { + route: new(Route).Path("/articles"), + vars: []string{}, + url: "/articles", + }, + { + route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"category", "technology", "id", "42"}, + url: "/articles/technology/42", + }, + { + route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, + url: "http://foo.domain.com/articles/technology/42", + }, +} + +func TestHeaderMatcher(t *testing.T) { + for _, v := range headerMatcherTests { + request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + for key, value := range v.headers { + request.Header.Add(key, value) + } + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, request.Header) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, request.Header) + } + } + } +} + +func TestHostMatcher(t *testing.T) { + for _, v := range hostMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestMethodMatcher(t *testing.T) { + for _, v := range methodMatcherTests { + request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.method) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.method) + } + } + } +} + +func TestPathMatcher(t *testing.T) { + for _, v := range pathMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestSchemeMatcher(t *testing.T) { + for _, v := range schemeMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + } +} + +func TestUrlBuilding(t *testing.T) { + + for _, v := range urlBuildingTests { + u, _ := v.route.URL(v.vars...) + url := u.String() + if url != v.url { + t.Errorf("expected %v, got %v", v.url, url) + /* + reversePath := "" + reverseHost := "" + if v.route.pathTemplate != nil { + reversePath = v.route.pathTemplate.Reverse + } + if v.route.hostTemplate != nil { + reverseHost = v.route.hostTemplate.Reverse + } + + t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) + */ + } + } + + ArticleHandler := func(w http.ResponseWriter, r *http.Request) { + } + + router := NewRouter() + router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") + + url, _ := router.Get("article").URL("category", "technology", "id", "42") + expected := "/articles/technology/42" + if url.String() != expected { + t.Errorf("Expected %v, got %v", expected, url.String()) + } +} + +func TestMatchedRouteName(t *testing.T) { + routeName := "stock" + router := NewRouter() + route := router.NewRoute().Path("/products/").Name(routeName) + + url := "http://www.example.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + retName := rv.Route.GetName() + if retName != routeName { + t.Errorf("Expected %q, got %q.", routeName, retName) + } +} + +func TestSubRouting(t *testing.T) { + // Example from docs. + router := NewRouter() + subrouter := router.NewRoute().Host("www.example.com").Subrouter() + route := subrouter.NewRoute().Path("/products/").Name("products") + + url := "http://www.example.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + u, _ := router.Get("products").URL() + builtUrl := u.String() + // Yay, subroute aware of the domain when building! + if builtUrl != url { + t.Errorf("Expected %q, got %q.", url, builtUrl) + } +} + +func TestVariableNames(t *testing.T) { + route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") + if route.err == nil { + t.Errorf("Expected error for duplicated variable names") + } +} + +func TestRedirectSlash(t *testing.T) { + var route *Route + var routeMatch RouteMatch + r := NewRouter() + + r.StrictSlash(false) + route = r.NewRoute() + if route.strictSlash != false { + t.Errorf("Expected false redirectSlash.") + } + + r.StrictSlash(true) + route = r.NewRoute() + if route.strictSlash != true { + t.Errorf("Expected true redirectSlash.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}/") + request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars := routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp := NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { + t.Errorf("Expected redirect header.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}") + request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars = routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp = NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { + t.Errorf("Expected redirect header.") + } +} + +// Test for the new regexp library, still not available in stable Go. +func TestNewRegexp(t *testing.T) { + var p *routeRegexp + var matches []string + + tests := map[string]map[string][]string{ + "/{foo:a{2}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": nil, + "/aaaa": nil, + }, + "/{foo:a{2,}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": {"aaaa"}, + }, + "/{foo:a{2,3}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": nil, + }, + "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abcd": nil, + "/abc/ab": {"abc", "ab"}, + "/abc/abc": nil, + "/abcd/ab": nil, + }, + `/{foo:\w{3,}}/{bar:\d{2,}}`: { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abc/1": nil, + "/abc/12": {"abc", "12"}, + "/abcd/12": {"abcd", "12"}, + "/abcd/123": {"abcd", "123"}, + }, + } + + for pattern, paths := range tests { + p, _ = newRouteRegexp(pattern, false, false, false, false) + for path, result := range paths { + matches = p.regexp.FindStringSubmatch(path) + if result == nil { + if matches != nil { + t.Errorf("%v should not match %v.", pattern, path) + } + } else { + if len(matches) != len(result)+1 { + t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) + } else { + for k, v := range result { + if matches[k+1] != v { + t.Errorf("Expected %v, got %v.", v, matches[k+1]) + } + } + } + } + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000..06728dd54 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go @@ -0,0 +1,317 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]*" + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + varIdx := i / 2 + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[varIdx] = name + varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.matchQueryString(req) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getUrlQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getUrlQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getUrlQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + subexpNames := v.host.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.host.varsN[varName]] = hostVars[i+1] + varName++ + } + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + subexpNames := v.path.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.path.varsN[varName]] = pathVars[i+1] + varName++ + } + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) + if queryVars != nil { + subexpNames := q.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[q.varsN[varName]] = queryVars[i+1] + varName++ + } + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go new file mode 100644 index 000000000..913432c1c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go @@ -0,0 +1,595 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// Regular expressions can be used with headers as well. +// It accepts a sequence of key/value pairs, where the value has regex support. For example +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 000000000..036e5313f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 000000000..c692e23f4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,40 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with the same default values +// as http.DefaultTransport +func DefaultTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } + SetTransportFinalizer(transport) + return transport +} + +// DefaultClient returns a new http.Client with the same default values as +// http.Client, but with a non-shared Transport +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// SetTransportFinalizer sets a finalizer on the transport to ensure that +// idle connections are closed prior to garbage collection; otherwise +// these may leak +func SetTransportFinalizer(transport *http.Transport) { + runtime.SetFinalizer(&transport, func(t **http.Transport) { + (*t).CloseIdleConnections() + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS new file mode 100644 index 000000000..edbe20066 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Aleksa Sarai (@cyphar) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go new file mode 100644 index 000000000..6f8a982ff --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go @@ -0,0 +1,108 @@ +package user + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, fmt.Errorf("no matching entries in passwd file") + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, fmt.Errorf("no matching entries in group file") + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go new file mode 100644 index 000000000..758b734c2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -0,0 +1,30 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go new file mode 100644 index 000000000..721794887 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go @@ -0,0 +1,21 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package user + +import "io" + +func GetPasswdPath() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupPath() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go new file mode 100644 index 000000000..e6375ea4d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go @@ -0,0 +1,418 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +type ExecUser struct { + Uid, Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + var ( + userArg, groupArg string + name string + ) + + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + return u.Uid == user.Uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + name = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + user.Uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find user %v", userArg) + } + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || name != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // Explicit group format takes precedence. + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + + // Check if user is a member. + for _, u := range g.List { + if u == name { + return true + } + } + + return false + }) + if err != nil && group != nil { + return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + user.Gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + user.Gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find group %v", groupArg) + } + + // Ensure gid is inside gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + // If implicit group format, fill supplementary gids. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + var groups = []Group{} + if group != nil { + var err error + groups, err = ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + } + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.Atoi(ag) + if err != nil { + return nil, fmt.Errorf("Unable to find group %s", ag) + } + // Ensure gid is inside gid range. + if gid < minId || gid > maxId { + return nil, ErrRange + } + gidMap[gid] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + group, err := os.Open(groupPath) + if err == nil { + defer group.Close() + } + return GetAdditionalGroups(additionalGroups, group) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go new file mode 100644 index 000000000..53b2289bf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go @@ -0,0 +1,472 @@ +package user + +import ( + "io" + "reflect" + "sort" + "strconv" + "strings" + "testing" +) + +func TestUserParseLine(t *testing.T) { + var ( + a, b string + c []string + d int + ) + + parseLine("", &a, &b) + if a != "" || b != "" { + t.Fatalf("a and b should be empty ('%v', '%v')", a, b) + } + + parseLine("a", &a, &b) + if a != "a" || b != "" { + t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) + } + + parseLine("bad boys:corny cows", &a, &b) + if a != "bad boys" || b != "corny cows" { + t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) + } + + parseLine("", &c) + if len(c) != 0 { + t.Fatalf("c should be empty (%#v)", c) + } + + parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) + if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { + t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("::::::::::", &a, &b, &c) + if a != "" || b != "" || len(c) != 0 { + t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("not a number", &d) + if d != 0 { + t.Fatalf("d should be 0 (%v)", d) + } + + parseLine("b:12:c", &a, &d, &b) + if a != "b" || b != "c" || d != 12 { + t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) + } +} + +func TestUserParsePasswd(t *testing.T) { + users, err := ParsePasswdFilter(strings.NewReader(` +root:x:0:0:root:/root:/bin/bash +adm:x:3:4:adm:/var/adm:/bin/false +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(users) != 3 { + t.Fatalf("Expected 3 users, got %v", len(users)) + } + if users[0].Uid != 0 || users[0].Name != "root" { + t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) + } + if users[1].Uid != 3 || users[1].Name != "adm" { + t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) + } +} + +func TestUserParseGroup(t *testing.T) { + groups, err := ParseGroupFilter(strings.NewReader(` +root:x:0:root +adm:x:4:root,adm,daemon +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(groups) != 3 { + t.Fatalf("Expected 3 groups, got %v", len(groups)) + } + if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { + t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) + } + if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { + t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) + } +} + +func TestValidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + expected ExecUser + }{ + { + ref: "root", + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{0, 1234}, + Home: "/root", + }, + }, + { + ref: "adm", + expected: ExecUser{ + Uid: 42, + Gid: 43, + Sgids: []int{1234}, + Home: "/var/adm", + }, + }, + { + ref: "root:adm", + expected: ExecUser{ + Uid: 0, + Gid: 43, + Sgids: defaultExecUser.Sgids, + Home: "/root", + }, + }, + { + ref: "adm:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "42:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "1337:1234", + expected: ExecUser{ + Uid: 1337, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "1337", + expected: ExecUser{ + Uid: 1337, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "", + expected: ExecUser{ + Uid: defaultExecUser.Uid, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestInvalidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + tests := []string{ + // No such user/group. + "notuser", + "notuser:notgroup", + "root:notgroup", + "notuser:adm", + "8888:notgroup", + "notuser:8888", + + // Invalid user/group values. + "-1:0", + "0:-3", + "-5:-2", + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test, nil, passwd, group) + if err == nil { + t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) + t.Fail() + continue + } + } +} + +func TestGetExecUserNilSources(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + passwd, group bool + expected ExecUser + }{ + { + ref: "", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "root", + passwd: true, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/root", + }, + }, + { + ref: "0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "0:0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + } + + for _, test := range tests { + var passwd, group io.Reader + + if test.passwd { + passwd = strings.NewReader(passwdContent) + } + + if test.group { + group = strings.NewReader(groupContent) + } + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestGetAdditionalGroups(t *testing.T) { + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +adm:x:4343:root,adm-duplicate +this is just some garbage data +` + tests := []struct { + groups []string + expected []int + hasError bool + }{ + { + // empty group + groups: []string{}, + expected: []int{}, + }, + { + // single group + groups: []string{"adm"}, + expected: []int{43}, + }, + { + // multiple groups + groups: []string{"adm", "grp"}, + expected: []int{43, 1234}, + }, + { + // invalid group + groups: []string{"adm", "grp", "not-exist"}, + expected: nil, + hasError: true, + }, + { + // group with numeric id + groups: []string{"43"}, + expected: []int{43}, + }, + { + // group with unknown numeric id + groups: []string{"adm", "10001"}, + expected: []int{43, 10001}, + }, + { + // groups specified twice with numeric and name + groups: []string{"adm", "43"}, + expected: []int{43}, + }, + { + // groups with too small id + groups: []string{"-1"}, + expected: nil, + hasError: true, + }, + { + // groups with too large id + groups: []string{strconv.Itoa(1 << 31)}, + expected: nil, + hasError: true, + }, + } + + for _, test := range tests { + group := strings.NewReader(groupContent) + + gids, err := GetAdditionalGroups(test.groups, group) + if test.hasError && err == nil { + t.Errorf("Parse(%#v) expects error but has none", test) + continue + } + if !test.hasError && err != nil { + t.Errorf("Parse(%#v) has error %v", test, err) + continue + } + sort.Sort(sort.IntSlice(gids)) + if !reflect.DeepEqual(gids, test.expected) { + t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) + } + } +} + +func TestGetAdditionalGroupsNumeric(t *testing.T) { + tests := []struct { + groups []string + expected []int + hasError bool + }{ + { + // numeric groups only + groups: []string{"1234", "5678"}, + expected: []int{1234, 5678}, + }, + { + // numeric and alphabetic + groups: []string{"1234", "fake"}, + expected: nil, + hasError: true, + }, + } + + for _, test := range tests { + gids, err := GetAdditionalGroups(test.groups, nil) + if test.hasError && err == nil { + t.Errorf("Parse(%#v) expects error but has none", test) + continue + } + if !test.hasError && err != nil { + t.Errorf("Parse(%#v) has error %v", test, err) + continue + } + sort.Sort(sort.IntSlice(gids)) + if !reflect.DeepEqual(gids, test.expected) { + t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go new file mode 100644 index 000000000..11bd8d34e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go @@ -0,0 +1,447 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context_test.go new file mode 100644 index 000000000..05345fc5e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context_test.go @@ -0,0 +1,575 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 100*time.Millisecond) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o = otherContext{c} + c, _ = WithTimeout(o, 300*time.Millisecond) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestCanceledTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 200*time.Millisecond) + o := otherContext{c} + c, cancel := WithTimeout(o, 400*time.Millisecond) + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 15, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TOOD(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/withtimeout_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 000000000..00d5d1ca9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm.s new file mode 100644 index 000000000..8ed2fdb94 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm.s @@ -0,0 +1,10 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +TEXT ·use(SB),NOSPLIT,$0 + RET diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_386.s new file mode 100644 index 000000000..8a7278319 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_amd64.s new file mode 100644 index 000000000..6321421f2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm.s new file mode 100644 index 000000000..333242d50 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm.s @@ -0,0 +1,30 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo +// +build arm,darwin + +#include "textflag.h" + +// +// System call support for ARM, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm64.s new file mode 100644 index 000000000..97e017437 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm64.s @@ -0,0 +1,30 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo +// +build arm64,darwin + +#include "textflag.h" + +// +// System call support for AMD64, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + B syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_386.s new file mode 100644 index 000000000..7e55e0d31 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, FreeBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-32 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-44 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-56 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-32 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-44 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_amd64.s new file mode 100644 index 000000000..d5ed6726c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, DragonFly +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-64 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-88 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-112 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-64 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_386.s new file mode 100644 index 000000000..c9a0a2601 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, FreeBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_amd64.s new file mode 100644 index 000000000..35172477c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, FreeBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_arm.s new file mode 100644 index 000000000..9227c875b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_arm.s @@ -0,0 +1,29 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for ARM, FreeBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_386.s new file mode 100644 index 000000000..4db290932 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_386.s @@ -0,0 +1,35 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for 386, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) + +TEXT ·seek(SB),NOSPLIT,$0-28 + JMP syscall·seek(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_amd64.s new file mode 100644 index 000000000..44e25c62f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for AMD64, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·gettimeofday(SB),NOSPLIT,$0-16 + JMP syscall·gettimeofday(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm.s new file mode 100644 index 000000000..cf0b57465 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for arm, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) + +TEXT ·seek(SB),NOSPLIT,$0-32 + B syscall·seek(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm64.s new file mode 100644 index 000000000..4be9bfede --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm64.s @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build arm64 +// +build !gccgo + +#include "textflag.h" + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + B syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + B syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_ppc64x.s new file mode 100644 index 000000000..8d231feb4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -0,0 +1,28 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + BR syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + BR syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + BR syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + BR syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_386.s new file mode 100644 index 000000000..48bdcd763 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, NetBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_amd64.s new file mode 100644 index 000000000..2ede05c72 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, NetBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_arm.s new file mode 100644 index 000000000..e8928571c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_arm.s @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for ARM, NetBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_386.s new file mode 100644 index 000000000..00576f3c8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_amd64.s new file mode 100644 index 000000000..790ef77f8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_solaris_amd64.s new file mode 100644 index 000000000..43ed17a05 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go +// + +TEXT ·sysvicall6(SB),NOSPLIT,$0-64 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-64 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/constants.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/constants.go new file mode 100644 index 000000000..a96f0ebc2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/constants.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +const ( + R_OK = 0x4 + W_OK = 0x2 + X_OK = 0x1 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/creds_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/creds_test.go new file mode 100644 index 000000000..31cf39b1e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/creds_test.go @@ -0,0 +1,121 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package unix_test + +import ( + "bytes" + "net" + "os" + "syscall" + "testing" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" +) + +// TestSCMCredentials tests the sending and receiving of credentials +// (PID, UID, GID) in an ancillary message between two UNIX +// sockets. The SO_PASSCRED socket option is enabled on the sending +// socket for this to work. +func TestSCMCredentials(t *testing.T) { + fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) + } + defer unix.Close(fds[0]) + defer unix.Close(fds[1]) + + err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1) + if err != nil { + t.Fatalf("SetsockoptInt: %v", err) + } + + srvFile := os.NewFile(uintptr(fds[0]), "server") + defer srvFile.Close() + srv, err := net.FileConn(srvFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer srv.Close() + + cliFile := os.NewFile(uintptr(fds[1]), "client") + defer cliFile.Close() + cli, err := net.FileConn(cliFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer cli.Close() + + var ucred unix.Ucred + if os.Getuid() != 0 { + ucred.Pid = int32(os.Getpid()) + ucred.Uid = 0 + ucred.Gid = 0 + oob := unix.UnixCredentials(&ucred) + _, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if op, ok := err.(*net.OpError); ok { + err = op.Err + } + if sys, ok := err.(*os.SyscallError); ok { + err = sys.Err + } + if err != syscall.EPERM { + t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err) + } + } + + ucred.Pid = int32(os.Getpid()) + ucred.Uid = uint32(os.Getuid()) + ucred.Gid = uint32(os.Getgid()) + oob := unix.UnixCredentials(&ucred) + + // this is going to send a dummy byte + n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if err != nil { + t.Fatalf("WriteMsgUnix: %v", err) + } + if n != 0 { + t.Fatalf("WriteMsgUnix n = %d, want 0", n) + } + if oobn != len(oob) { + t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob)) + } + + oob2 := make([]byte, 10*len(oob)) + n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2) + if err != nil { + t.Fatalf("ReadMsgUnix: %v", err) + } + if flags != 0 { + t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags) + } + if n != 1 { + t.Fatalf("ReadMsgUnix n = %d, want 1 (dummy byte)", n) + } + if oobn2 != oobn { + // without SO_PASSCRED set on the socket, ReadMsgUnix will + // return zero oob bytes + t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn) + } + oob2 = oob2[:oobn2] + if !bytes.Equal(oob, oob2) { + t.Fatal("ReadMsgUnix oob bytes don't match") + } + + scm, err := unix.ParseSocketControlMessage(oob2) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + newUcred, err := unix.ParseUnixCredentials(&scm[0]) + if err != nil { + t.Fatalf("ParseUnixCredentials: %v", err) + } + if *newUcred != ucred { + t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unix.go new file mode 100644 index 000000000..45e281a04 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unix.go @@ -0,0 +1,27 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +// Unix environment variables. + +package unix + +import "syscall" + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unset.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unset.go new file mode 100644 index 000000000..922226255 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unset.go @@ -0,0 +1,14 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.4 + +package unix + +import "syscall" + +func Unsetenv(key string) error { + // This was added in Go 1.4. + return syscall.Unsetenv(key) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/export_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/export_test.go new file mode 100644 index 000000000..b4fdd970b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/export_test.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +var Itoa = itoa diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock.go new file mode 100644 index 000000000..ce67a5952 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock.go @@ -0,0 +1,24 @@ +// +build linux darwin freebsd openbsd netbsd dragonfly + +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package unix + +import "unsafe" + +// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux +// systems by flock_linux_32bit.go to be SYS_FCNTL64. +var fcntl64Syscall uintptr = SYS_FCNTL + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) + if errno == 0 { + return nil + } + return errno +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock_linux_32bit.go new file mode 100644 index 000000000..362831c3f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock_linux_32bit.go @@ -0,0 +1,13 @@ +// +build linux,386 linux,arm + +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +func init() { + // On 32-bit Linux systems, the fcntl syscall that matches Go's + // Flock_t type is SYS_FCNTL64, not SYS_FCNTL. + fcntl64Syscall = SYS_FCNTL64 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo.go new file mode 100644 index 000000000..94c823212 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package unix + +import "syscall" + +// We can't use the gc-syntax .s files for gccgo. On the plus side +// much of the functionality can be written directly in Go. + +//extern gccgoRealSyscall +func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + syscall.Entersyscall() + r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + syscall.Exitsyscall() + return r, 0, syscall.Errno(errno) +} + +func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) { + syscall.Entersyscall() + r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0) + syscall.Exitsyscall() + return r, 0, syscall.Errno(errno) +} + +func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) { + syscall.Entersyscall() + r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9) + syscall.Exitsyscall() + return r, 0, syscall.Errno(errno) +} + +func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + return r, 0, syscall.Errno(errno) +} + +func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) { + r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0) + return r, 0, syscall.Errno(errno) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_c.c b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_c.c new file mode 100644 index 000000000..07f6be039 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_c.c @@ -0,0 +1,41 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +#include +#include +#include + +#define _STRINGIFY2_(x) #x +#define _STRINGIFY_(x) _STRINGIFY2_(x) +#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__) + +// Call syscall from C code because the gccgo support for calling from +// Go to C does not support varargs functions. + +struct ret { + uintptr_t r; + uintptr_t err; +}; + +struct ret +gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) +{ + struct ret r; + + errno = 0; + r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9); + r.err = errno; + return r; +} + +// Define the use function in C so that it is not inlined. + +extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline)); + +void +use(void *p __attribute__ ((unused))) +{ +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_linux_amd64.go new file mode 100644 index 000000000..bffe1a77d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -0,0 +1,20 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo,linux,amd64 + +package unix + +import "syscall" + +//extern gettimeofday +func realGettimeofday(*Timeval, *byte) int32 + +func gettimeofday(tv *Timeval) (err syscall.Errno) { + r := realGettimeofday(tv, nil) + if r < 0 { + return syscall.GetErrno() + } + return 0 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh new file mode 100644 index 000000000..de95a4bbc --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh @@ -0,0 +1,274 @@ +#!/usr/bin/env bash +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# The unix package provides access to the raw system call +# interface of the underlying operating system. Porting Go to +# a new architecture/operating system combination requires +# some manual effort, though there are tools that automate +# much of the process. The auto-generated files have names +# beginning with z. +# +# This script runs or (given -n) prints suggested commands to generate z files +# for the current system. Running those commands is not automatic. +# This script is documentation more than anything else. +# +# * asm_${GOOS}_${GOARCH}.s +# +# This hand-written assembly file implements system call dispatch. +# There are three entry points: +# +# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); +# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr); +# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); +# +# The first and second are the standard ones; they differ only in +# how many arguments can be passed to the kernel. +# The third is for low-level use by the ForkExec wrapper; +# unlike the first two, it does not call into the scheduler to +# let it know that a system call is running. +# +# * syscall_${GOOS}.go +# +# This hand-written Go file implements system calls that need +# special handling and lists "//sys" comments giving prototypes +# for ones that can be auto-generated. Mksyscall reads those +# comments to generate the stubs. +# +# * syscall_${GOOS}_${GOARCH}.go +# +# Same as syscall_${GOOS}.go except that it contains code specific +# to ${GOOS} on one particular architecture. +# +# * types_${GOOS}.c +# +# This hand-written C file includes standard C headers and then +# creates typedef or enum names beginning with a dollar sign +# (use of $ in variable names is a gcc extension). The hardest +# part about preparing this file is figuring out which headers to +# include and which symbols need to be #defined to get the +# actual data structures that pass through to the kernel system calls. +# Some C libraries present alternate versions for binary compatibility +# and translate them on the way in and out of system calls, but +# there is almost always a #define that can get the real ones. +# See types_darwin.c and types_linux.c for examples. +# +# * zerror_${GOOS}_${GOARCH}.go +# +# This machine-generated file defines the system's error numbers, +# error strings, and signal numbers. The generator is "mkerrors.sh". +# Usually no arguments are needed, but mkerrors.sh will pass its +# arguments on to godefs. +# +# * zsyscall_${GOOS}_${GOARCH}.go +# +# Generated by mksyscall.pl; see syscall_${GOOS}.go above. +# +# * zsysnum_${GOOS}_${GOARCH}.go +# +# Generated by mksysnum_${GOOS}. +# +# * ztypes_${GOOS}_${GOARCH}.go +# +# Generated by godefs; see types_${GOOS}.c above. + +GOOSARCH="${GOOS}_${GOARCH}" + +# defaults +mksyscall="./mksyscall.pl" +mkerrors="./mkerrors.sh" +zerrors="zerrors_$GOOSARCH.go" +mksysctl="" +zsysctl="zsysctl_$GOOSARCH.go" +mksysnum= +mktypes= +run="sh" + +case "$1" in +-syscalls) + for i in zsyscall*go + do + sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i + rm _$i + done + exit 0 + ;; +-n) + run="cat" + shift +esac + +case "$#" in +0) + ;; +*) + echo 'usage: mkall.sh [-n]' 1>&2 + exit 2 +esac + +GOOSARCH_in=syscall_$GOOSARCH.go +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +darwin_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32" + mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +darwin_amd64) + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +darwin_arm) + mkerrors="$mkerrors" + mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +darwin_arm64) + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +dragonfly_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -dragonfly" + mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +dragonfly_amd64) + mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -dragonfly" + mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +freebsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32" + mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +freebsd_amd64) + mkerrors="$mkerrors -m64" + mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +freebsd_arm) + mkerrors="$mkerrors" + mksyscall="./mksyscall.pl -l32 -arm" + mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" + # Let the type of C char be singed for making the bare syscall + # API consistent across over platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +linux_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32" + mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +linux_amd64) + unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1) + if [ "$unistd_h" = "" ]; then + echo >&2 cannot find unistd_64.h + exit 1 + fi + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_linux.pl $unistd_h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +linux_arm) + mkerrors="$mkerrors" + mksyscall="./mksyscall.pl -l32 -arm" + mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +linux_arm64) + unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1) + if [ "$unistd_h" = "" ]; then + echo >&2 cannot find unistd_64.h + exit 1 + fi + mksysnum="./mksysnum_linux.pl $unistd_h" + # Let the type of C char be singed for making the bare syscall + # API consistent across over platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +linux_ppc64) + GOOSARCH_in=syscall_linux_ppc64x.go + unistd_h=/usr/include/asm/unistd.h + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_linux.pl $unistd_h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +linux_ppc64le) + GOOSARCH_in=syscall_linux_ppc64x.go + unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_linux.pl $unistd_h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +netbsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -netbsd" + mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +netbsd_amd64) + mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -netbsd" + mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +openbsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -openbsd" + mksysctl="./mksysctl_openbsd.pl" + zsysctl="zsysctl_openbsd.go" + mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +openbsd_amd64) + mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -openbsd" + mksysctl="./mksysctl_openbsd.pl" + zsysctl="zsysctl_openbsd.go" + mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +solaris_amd64) + mksyscall="./mksyscall_solaris.pl" + mkerrors="$mkerrors -m64" + mksysnum= + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +*) + echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +( + if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi + case "$GOOS" in + *) + syscall_goos="syscall_$GOOS.go" + case "$GOOS" in + darwin | dragonfly | freebsd | netbsd | openbsd) + syscall_goos="syscall_bsd.go $syscall_goos" + ;; + esac + if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi + ;; + esac + if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi + if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi + if [ -n "$mktypes" ]; then + echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go"; + echo "$mktypes types_$GOOS.go | gofmt >>ztypes_$GOOSARCH.go"; + fi +) | $run diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh new file mode 100644 index 000000000..c40d788c4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh @@ -0,0 +1,476 @@ +#!/usr/bin/env bash +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Generate Go code listing errors and other #defined constant +# values (ENAMETOOLONG etc.), by asking the preprocessor +# about the definitions. + +unset LANG +export LC_ALL=C +export LC_CTYPE=C + +if test -z "$GOARCH" -o -z "$GOOS"; then + echo 1>&2 "GOARCH or GOOS not defined in environment" + exit 1 +fi + +CC=${CC:-cc} + +if [[ "$GOOS" -eq "solaris" ]]; then + # Assumes GNU versions of utilities in PATH. + export PATH=/usr/gnu/bin:$PATH +fi + +uname=$(uname) + +includes_Darwin=' +#define _DARWIN_C_SOURCE +#define KERNEL +#define _DARWIN_USE_64_BIT_INODE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' + +includes_DragonFly=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' + +includes_FreeBSD=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if __FreeBSD__ >= 10 +#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10 +#undef SIOCAIFADDR +#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data +#undef SIOCSIFPHYADDR +#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data +#endif +' + +includes_Linux=' +#define _LARGEFILE_SOURCE +#define _LARGEFILE64_SOURCE +#ifndef __LP64__ +#define _FILE_OFFSET_BITS 64 +#endif +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef MSG_FASTOPEN +#define MSG_FASTOPEN 0x20000000 +#endif + +#ifndef PTRACE_GETREGS +#define PTRACE_GETREGS 0xc +#endif + +#ifndef PTRACE_SETREGS +#define PTRACE_SETREGS 0xd +#endif +' + +includes_NetBSD=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Needed since refers to it... +#define schedppq 1 +' + +includes_OpenBSD=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// We keep some constants not supported in OpenBSD 5.5 and beyond for +// the promise of compatibility. +#define EMUL_ENABLED 0x1 +#define EMUL_NATIVE 0x2 +#define IPV6_FAITH 0x1d +#define IPV6_OPTIONS 0x1 +#define IPV6_RTHDR_STRICT 0x1 +#define IPV6_SOCKOPT_RESERVED1 0x3 +#define SIOCGIFGENERIC 0xc020693a +#define SIOCSIFGENERIC 0x80206939 +#define WALTSIG 0x4 +' + +includes_SunOS=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' + + +includes=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' +ccflags="$@" + +# Write go tool cgo -godefs input. +( + echo package unix + echo + echo '/*' + indirect="includes_$(uname)" + echo "${!indirect} $includes" + echo '*/' + echo 'import "C"' + echo 'import "syscall"' + echo + echo 'const (' + + # The gcc command line prints all the #defines + # it encounters while processing the input + echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags | + awk ' + $1 != "#define" || $2 ~ /\(/ || $3 == "" {next} + + $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers + $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next} + $2 ~ /^(SCM_SRCRT)$/ {next} + $2 ~ /^(MAP_FAILED)$/ {next} + $2 ~ /^ELF_.*$/ {next}# contains ELF_ARCH, etc. + + $2 ~ /^EXTATTR_NAMESPACE_NAMES/ || + $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next} + + $2 !~ /^ETH_/ && + $2 !~ /^EPROC_/ && + $2 !~ /^EQUIV_/ && + $2 !~ /^EXPR_/ && + $2 ~ /^E[A-Z0-9_]+$/ || + $2 ~ /^B[0-9_]+$/ || + $2 == "BOTHER" || + $2 ~ /^CI?BAUD(EX)?$/ || + $2 == "IBSHIFT" || + $2 ~ /^V[A-Z0-9]+$/ || + $2 ~ /^CS[A-Z0-9]/ || + $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ || + $2 ~ /^IGN/ || + $2 ~ /^IX(ON|ANY|OFF)$/ || + $2 ~ /^IN(LCR|PCK)$/ || + $2 ~ /(^FLU?SH)|(FLU?SH$)/ || + $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ || + $2 == "BRKINT" || + $2 == "HUPCL" || + $2 == "PENDIN" || + $2 == "TOSTOP" || + $2 == "XCASE" || + $2 == "ALTWERASE" || + $2 == "NOKERNINFO" || + $2 ~ /^PAR/ || + $2 ~ /^SIG[^_]/ || + $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || + $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || + $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^O?XTABS$/ || + $2 ~ /^TC[IO](ON|OFF)$/ || + $2 ~ /^IN_/ || + $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 == "ICMPV6_FILTER" || + $2 == "SOMAXCONN" || + $2 == "NAME_MAX" || + $2 == "IFNAMSIZ" || + $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ || + $2 ~ /^SYSCTL_VERS/ || + $2 ~ /^(MS|MNT)_/ || + $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || + $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^LINUX_REBOOT_CMD_/ || + $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || + $2 !~ "NLA_TYPE_MASK" && + $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || + $2 ~ /^SIOC/ || + $2 ~ /^TIOC/ || + $2 ~ /^TCGET/ || + $2 ~ /^TCSET/ || + $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || + $2 !~ "RTF_BITS" && + $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || + $2 ~ /^BIOC/ || + $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || + $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ || + $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || + $2 ~ /^CLONE_[A-Z_]+/ || + $2 !~ /^(BPF_TIMEVAL)$/ && + $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^CLOCK_/ || + $2 !~ "WMESGLEN" && + $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^__WCOREFLAG$/ {next} + $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} + + {next} + ' | sort + + echo ')' +) >_const.go + +# Pull out the error names for later. +errors=$( + echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' | + sort +) + +# Pull out the signal names for later. +signals=$( + echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | + egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + sort +) + +# Again, writing regexps to a file. +echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' | + sort >_error.grep +echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | + egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + sort >_signal.grep + +echo '// mkerrors.sh' "$@" +echo '// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT' +echo +echo "// +build ${GOARCH},${GOOS}" +echo +go tool cgo -godefs -- "$@" _const.go >_error.out +cat _error.out | grep -vf _error.grep | grep -vf _signal.grep +echo +echo '// Errors' +echo 'const (' +cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/' +echo ')' + +echo +echo '// Signals' +echo 'const (' +cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/' +echo ')' + +# Run C program to print error and syscall strings. +( + echo -E " +#include +#include +#include +#include +#include +#include + +#define nelem(x) (sizeof(x)/sizeof((x)[0])) + +enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below + +int errors[] = { +" + for i in $errors + do + echo -E ' '$i, + done + + echo -E " +}; + +int signals[] = { +" + for i in $signals + do + echo -E ' '$i, + done + + # Use -E because on some systems bash builtin interprets \n itself. + echo -E ' +}; + +static int +intcmp(const void *a, const void *b) +{ + return *(int*)a - *(int*)b; +} + +int +main(void) +{ + int i, j, e; + char buf[1024], *p; + + printf("\n\n// Error table\n"); + printf("var errors = [...]string {\n"); + qsort(errors, nelem(errors), sizeof errors[0], intcmp); + for(i=0; i 0 && errors[i-1] == e) + continue; + strcpy(buf, strerror(e)); + // lowercase first letter: Bad -> bad, but STREAM -> STREAM. + if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) + buf[0] += a - A; + printf("\t%d: \"%s\",\n", e, buf); + } + printf("}\n\n"); + + printf("\n\n// Signal table\n"); + printf("var signals = [...]string {\n"); + qsort(signals, nelem(signals), sizeof signals[0], intcmp); + for(i=0; i 0 && signals[i-1] == e) + continue; + strcpy(buf, strsignal(e)); + // lowercase first letter: Bad -> bad, but STREAM -> STREAM. + if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) + buf[0] += a - A; + // cut trailing : number. + p = strrchr(buf, ":"[0]); + if(p) + *p = '\0'; + printf("\t%d: \"%s\",\n", e, buf); + } + printf("}\n\n"); + + return 0; +} + +' +) >_errors.c + +$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl new file mode 100644 index 000000000..b1e7766da --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl @@ -0,0 +1,323 @@ +#!/usr/bin/env perl +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# This program reads a file containing function prototypes +# (like syscall_darwin.go) and generates system call bodies. +# The prototypes are marked by lines beginning with "//sys" +# and read like func declarations if //sys is replaced by func, but: +# * The parameter lists must give a name for each argument. +# This includes return parameters. +# * The parameter lists must give a type for each argument: +# the (x, y, z int) shorthand is not allowed. +# * If the return parameter is an error number, it must be named errno. + +# A line beginning with //sysnb is like //sys, except that the +# goroutine will not be suspended during the execution of the system +# call. This must only be used for system calls which can never +# block, as otherwise the system call could cause all goroutines to +# hang. + +use strict; + +my $cmdline = "mksyscall.pl " . join(' ', @ARGV); +my $errors = 0; +my $_32bit = ""; +my $plan9 = 0; +my $openbsd = 0; +my $netbsd = 0; +my $dragonfly = 0; +my $arm = 0; # 64-bit value should use (even, odd)-pair + +if($ARGV[0] eq "-b32") { + $_32bit = "big-endian"; + shift; +} elsif($ARGV[0] eq "-l32") { + $_32bit = "little-endian"; + shift; +} +if($ARGV[0] eq "-plan9") { + $plan9 = 1; + shift; +} +if($ARGV[0] eq "-openbsd") { + $openbsd = 1; + shift; +} +if($ARGV[0] eq "-netbsd") { + $netbsd = 1; + shift; +} +if($ARGV[0] eq "-dragonfly") { + $dragonfly = 1; + shift; +} +if($ARGV[0] eq "-arm") { + $arm = 1; + shift; +} + +if($ARGV[0] =~ /^-/) { + print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n"; + exit 1; +} + +if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") { + print STDERR "GOARCH or GOOS not defined in environment\n"; + exit 1; +} + +sub parseparamlist($) { + my ($list) = @_; + $list =~ s/^\s*//; + $list =~ s/\s*$//; + if($list eq "") { + return (); + } + return split(/\s*,\s*/, $list); +} + +sub parseparam($) { + my ($p) = @_; + if($p !~ /^(\S*) (\S*)$/) { + print STDERR "$ARGV:$.: malformed parameter: $p\n"; + $errors = 1; + return ("xx", "int"); + } + return ($1, $2); +} + +my $text = ""; +while(<>) { + chomp; + s/\s+/ /g; + s/^\s+//; + s/\s+$//; + my $nonblock = /^\/\/sysnb /; + next if !/^\/\/sys / && !$nonblock; + + # Line must be of the form + # func Open(path string, mode int, perm int) (fd int, errno error) + # Split into name, in params, out params. + if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { + print STDERR "$ARGV:$.: malformed //sys declaration\n"; + $errors = 1; + next; + } + my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); + + # Split argument lists on comma. + my @in = parseparamlist($in); + my @out = parseparamlist($out); + + # Try in vain to keep people from editing this file. + # The theory is that they jump into the middle of the file + # without reading the header. + $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + + # Go function header. + my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; + $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; + + # Check if err return available + my $errvar = ""; + foreach my $p (@out) { + my ($name, $type) = parseparam($p); + if($type eq "error") { + $errvar = $name; + last; + } + } + + # Prepare arguments to Syscall. + my @args = (); + my @uses = (); + my $n = 0; + foreach my $p (@in) { + my ($name, $type) = parseparam($p); + if($type =~ /^\*/) { + push @args, "uintptr(unsafe.Pointer($name))"; + } elsif($type eq "string" && $errvar ne "") { + $text .= "\tvar _p$n *byte\n"; + $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; + $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + push @uses, "use(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type eq "string") { + print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; + $text .= "\tvar _p$n *byte\n"; + $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + push @uses, "use(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type =~ /^\[\](.*)/) { + # Convert slice into pointer, length. + # Have to be careful not to take address of &a[0] if len == 0: + # pass dummy pointer in that case. + # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). + $text .= "\tvar _p$n unsafe.Pointer\n"; + $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; + $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; + $text .= "\n"; + push @args, "uintptr(_p$n)", "uintptr(len($name))"; + $n++; + } elsif($type eq "int64" && ($openbsd || $netbsd)) { + push @args, "0"; + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } elsif($_32bit eq "little-endian") { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } else { + push @args, "uintptr($name)"; + } + } elsif($type eq "int64" && $dragonfly) { + if ($func !~ /^extp(read|write)/i) { + push @args, "0"; + } + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } elsif($_32bit eq "little-endian") { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } else { + push @args, "uintptr($name)"; + } + } elsif($type eq "int64" && $_32bit ne "") { + if(@args % 2 && $arm) { + # arm abi specifies 64-bit argument uses + # (even, odd) pair + push @args, "0" + } + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } else { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } + } else { + push @args, "uintptr($name)"; + } + } + + # Determine which form to use; pad args with zeros. + my $asm = "Syscall"; + if ($nonblock) { + $asm = "RawSyscall"; + } + if(@args <= 3) { + while(@args < 3) { + push @args, "0"; + } + } elsif(@args <= 6) { + $asm .= "6"; + while(@args < 6) { + push @args, "0"; + } + } elsif(@args <= 9) { + $asm .= "9"; + while(@args < 9) { + push @args, "0"; + } + } else { + print STDERR "$ARGV:$.: too many arguments to system call\n"; + } + + # System call number. + if($sysname eq "") { + $sysname = "SYS_$func"; + $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar + $sysname =~ y/a-z/A-Z/; + } + + # Actual call. + my $args = join(', ', @args); + my $call = "$asm($sysname, $args)"; + + # Assign return values. + my $body = ""; + my @ret = ("_", "_", "_"); + my $do_errno = 0; + for(my $i=0; $i<@out; $i++) { + my $p = $out[$i]; + my ($name, $type) = parseparam($p); + my $reg = ""; + if($name eq "err" && !$plan9) { + $reg = "e1"; + $ret[2] = $reg; + $do_errno = 1; + } elsif($name eq "err" && $plan9) { + $ret[0] = "r0"; + $ret[2] = "e1"; + next; + } else { + $reg = sprintf("r%d", $i); + $ret[$i] = $reg; + } + if($type eq "bool") { + $reg = "$reg != 0"; + } + if($type eq "int64" && $_32bit ne "") { + # 64-bit number in r1:r0 or r0:r1. + if($i+2 > @out) { + print STDERR "$ARGV:$.: not enough registers for int64 return\n"; + } + if($_32bit eq "big-endian") { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); + } else { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); + } + $ret[$i] = sprintf("r%d", $i); + $ret[$i+1] = sprintf("r%d", $i+1); + } + if($reg ne "e1" || $plan9) { + $body .= "\t$name = $type($reg)\n"; + } + } + if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { + $text .= "\t$call\n"; + } else { + $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; + } + foreach my $use (@uses) { + $text .= "\t$use\n"; + } + $text .= $body; + + if ($plan9 && $ret[2] eq "e1") { + $text .= "\tif int32(r0) == -1 {\n"; + $text .= "\t\terr = e1\n"; + $text .= "\t}\n"; + } elsif ($do_errno) { + $text .= "\tif e1 != 0 {\n"; + $text .= "\t\terr = errnoErr(e1)\n"; + $text .= "\t}\n"; + } + $text .= "\treturn\n"; + $text .= "}\n\n"; +} + +chomp $text; +chomp $text; + +if($errors) { + exit 1; +} + +print <) { + chomp; + s/\s+/ /g; + s/^\s+//; + s/\s+$//; + $package = $1 if !$package && /^package (\S+)$/; + my $nonblock = /^\/\/sysnb /; + next if !/^\/\/sys / && !$nonblock; + + # Line must be of the form + # func Open(path string, mode int, perm int) (fd int, err error) + # Split into name, in params, out params. + if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { + print STDERR "$ARGV:$.: malformed //sys declaration\n"; + $errors = 1; + next; + } + my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); + + # Split argument lists on comma. + my @in = parseparamlist($in); + my @out = parseparamlist($out); + + # So file name. + if($modname eq "") { + $modname = "libc"; + } + + # System call name. + if($sysname eq "") { + $sysname = "$func"; + } + + # System call pointer variable name. + my $sysvarname = "proc$sysname"; + + my $strconvfunc = "BytePtrFromString"; + my $strconvtype = "*byte"; + + $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. + + # Runtime import of function to allow cross-platform builds. + $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; + # Link symbol to proc address variable. + $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; + # Library proc address variable. + push @vars, $sysvarname; + + # Go function header. + $out = join(', ', @out); + if($out ne "") { + $out = " ($out)"; + } + if($text ne "") { + $text .= "\n" + } + $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out; + + # Check if err return available + my $errvar = ""; + foreach my $p (@out) { + my ($name, $type) = parseparam($p); + if($type eq "error") { + $errvar = $name; + last; + } + } + + # Prepare arguments to Syscall. + my @args = (); + my @uses = (); + my $n = 0; + foreach my $p (@in) { + my ($name, $type) = parseparam($p); + if($type =~ /^\*/) { + push @args, "uintptr(unsafe.Pointer($name))"; + } elsif($type eq "string" && $errvar ne "") { + $text .= "\tvar _p$n $strconvtype\n"; + $text .= "\t_p$n, $errvar = $strconvfunc($name)\n"; + $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + push @uses, "use(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type eq "string") { + print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; + $text .= "\tvar _p$n $strconvtype\n"; + $text .= "\t_p$n, _ = $strconvfunc($name)\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + push @uses, "use(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type =~ /^\[\](.*)/) { + # Convert slice into pointer, length. + # Have to be careful not to take address of &a[0] if len == 0: + # pass nil in that case. + $text .= "\tvar _p$n *$1\n"; + $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))"; + $n++; + } elsif($type eq "int64" && $_32bit ne "") { + if($_32bit eq "big-endian") { + push @args, "uintptr($name >> 32)", "uintptr($name)"; + } else { + push @args, "uintptr($name)", "uintptr($name >> 32)"; + } + } elsif($type eq "bool") { + $text .= "\tvar _p$n uint32\n"; + $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; + push @args, "uintptr(_p$n)"; + $n++; + } else { + push @args, "uintptr($name)"; + } + } + my $nargs = @args; + + # Determine which form to use; pad args with zeros. + my $asm = "sysvicall6"; + if ($nonblock) { + $asm = "rawSysvicall6"; + } + if(@args <= 6) { + while(@args < 6) { + push @args, "0"; + } + } else { + print STDERR "$ARGV:$.: too many arguments to system call\n"; + } + + # Actual call. + my $args = join(', ', @args); + my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)"; + + # Assign return values. + my $body = ""; + my $failexpr = ""; + my @ret = ("_", "_", "_"); + my @pout= (); + my $do_errno = 0; + for(my $i=0; $i<@out; $i++) { + my $p = $out[$i]; + my ($name, $type) = parseparam($p); + my $reg = ""; + if($name eq "err") { + $reg = "e1"; + $ret[2] = $reg; + $do_errno = 1; + } else { + $reg = sprintf("r%d", $i); + $ret[$i] = $reg; + } + if($type eq "bool") { + $reg = "$reg != 0"; + } + if($type eq "int64" && $_32bit ne "") { + # 64-bit number in r1:r0 or r0:r1. + if($i+2 > @out) { + print STDERR "$ARGV:$.: not enough registers for int64 return\n"; + } + if($_32bit eq "big-endian") { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); + } else { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); + } + $ret[$i] = sprintf("r%d", $i); + $ret[$i+1] = sprintf("r%d", $i+1); + } + if($reg ne "e1") { + $body .= "\t$name = $type($reg)\n"; + } + } + if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { + $text .= "\t$call\n"; + } else { + $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; + } + foreach my $use (@uses) { + $text .= "\t$use\n"; + } + $text .= $body; + + if ($do_errno) { + $text .= "\tif e1 != 0 {\n"; + $text .= "\t\terr = e1\n"; + $text .= "\t}\n"; + } + $text .= "\treturn\n"; + $text .= "}\n"; +} + +if($errors) { + exit 1; +} + +print < "net.inet", + "net.inet.ipproto" => "net.inet", + "net.inet6.ipv6proto" => "net.inet6", + "net.inet6.ipv6" => "net.inet6.ip6", + "net.inet.icmpv6" => "net.inet6.icmp6", + "net.inet6.divert6" => "net.inet6.divert", + "net.inet6.tcp6" => "net.inet.tcp", + "net.inet6.udp6" => "net.inet.udp", + "mpls" => "net.mpls", + "swpenc" => "vm.swapencrypt" +); + +# Node mappings +my %node_map = ( + "net.inet.ip.ifq" => "net.ifq", + "net.inet.pfsync" => "net.pfsync", + "net.mpls.ifq" => "net.ifq" +); + +my $ctlname; +my %mib = (); +my %sysctl = (); +my $node; + +sub debug() { + print STDERR "$_[0]\n" if $debug; +} + +# Walk the MIB and build a sysctl name to OID mapping. +sub build_sysctl() { + my ($node, $name, $oid) = @_; + my %node = %{$node}; + my @oid = @{$oid}; + + foreach my $key (sort keys %node) { + my @node = @{$node{$key}}; + my $nodename = $name.($name ne '' ? '.' : '').$key; + my @nodeoid = (@oid, $node[0]); + if ($node[1] eq 'CTLTYPE_NODE') { + if (exists $node_map{$nodename}) { + $node = \%mib; + $ctlname = $node_map{$nodename}; + foreach my $part (split /\./, $ctlname) { + $node = \%{@{$$node{$part}}[2]}; + } + } else { + $node = $node[2]; + } + &build_sysctl($node, $nodename, \@nodeoid); + } elsif ($node[1] ne '') { + $sysctl{$nodename} = \@nodeoid; + } + } +} + +foreach my $ctl (@ctls) { + $ctls{$ctl} = $ctl; +} + +# Build MIB +foreach my $header (@headers) { + &debug("Processing $header..."); + open HEADER, "/usr/include/$header" || + print STDERR "Failed to open $header\n"; + while (
) { + if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || + $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || + $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { + if ($1 eq 'CTL_NAMES') { + # Top level. + $node = \%mib; + } else { + # Node. + my $nodename = lc($2); + if ($header =~ /^netinet\//) { + $ctlname = "net.inet.$nodename"; + } elsif ($header =~ /^netinet6\//) { + $ctlname = "net.inet6.$nodename"; + } elsif ($header =~ /^net\//) { + $ctlname = "net.$nodename"; + } else { + $ctlname = "$nodename"; + $ctlname =~ s/^(fs|net|kern)_/$1\./; + } + if (exists $ctl_map{$ctlname}) { + $ctlname = $ctl_map{$ctlname}; + } + if (not exists $ctls{$ctlname}) { + &debug("Ignoring $ctlname..."); + next; + } + + # Walk down from the top of the MIB. + $node = \%mib; + foreach my $part (split /\./, $ctlname) { + if (not exists $$node{$part}) { + &debug("Missing node $part"); + $$node{$part} = [ 0, '', {} ]; + } + $node = \%{@{$$node{$part}}[2]}; + } + } + + # Populate current node with entries. + my $i = -1; + while (defined($_) && $_ !~ /^}/) { + $_ =
; + $i++ if $_ =~ /{.*}/; + next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; + $$node{$1} = [ $i, $2, {} ]; + } + } + } + close HEADER; +} + +&build_sysctl(\%mib, "", []); + +print <){ + if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){ + my $name = $1; + my $num = $2; + $name =~ y/a-z/A-Z/; + print " SYS_$name = $num;" + } +} + +print <){ + if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){ + my $num = $1; + my $proto = $2; + my $name = "SYS_$3"; + $name =~ y/a-z/A-Z/; + + # There are multiple entries for enosys and nosys, so comment them out. + if($name =~ /^SYS_E?NOSYS$/){ + $name = "// $name"; + } + if($name eq 'SYS_SYS_EXIT'){ + $name = 'SYS_EXIT'; + } + + print " $name = $num; // $proto\n"; + } +} + +print <){ + if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){ + my $num = $1; + my $proto = $2; + my $name = "SYS_$3"; + $name =~ y/a-z/A-Z/; + + # There are multiple entries for enosys and nosys, so comment them out. + if($name =~ /^SYS_E?NOSYS$/){ + $name = "// $name"; + } + if($name eq 'SYS_SYS_EXIT'){ + $name = 'SYS_EXIT'; + } + if($name =~ /^SYS_CAP_+/ || $name =~ /^SYS___CAP_+/){ + next + } + + print " $name = $num; // $proto\n"; + + # We keep Capsicum syscall numbers for FreeBSD + # 9-STABLE here because we are not sure whether they + # are mature and stable. + if($num == 513){ + print " SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }\n"; + print " SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \\\n"; + print " SYS_CAP_ENTER = 516 // { int cap_enter(void); }\n"; + print " SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }\n"; + } + } +} + +print < 999){ + # ignore deprecated syscalls that are no longer implemented + # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716 + return; + } + $name =~ y/a-z/A-Z/; + print " SYS_$name = $num;\n"; +} + +my $prev; +open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc"; +while(){ + if(/^#define __NR_syscalls\s+/) { + # ignore redefinitions of __NR_syscalls + } + elsif(/^#define __NR_(\w+)\s+([0-9]+)/){ + $prev = $2; + fmt($1, $2); + } + elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){ + $prev = $2; + fmt($1, $2); + } + elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){ + fmt($1, $prev+$2) + } +} + +print <){ + if($line =~ /^(.*)\\$/) { + # Handle continuation + $line = $1; + $_ =~ s/^\s+//; + $line .= $_; + } else { + # New line + $line = $_; + } + next if $line =~ /\\$/; + if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) { + my $num = $1; + my $proto = $6; + my $compat = $8; + my $name = "$7_$9"; + + $name = "$7_$11" if $11 ne ''; + $name =~ y/a-z/A-Z/; + + if($compat eq '' || $compat eq '30' || $compat eq '50') { + print " $name = $num; // $proto\n"; + } + } +} + +print <){ + if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){ + my $num = $1; + my $proto = $3; + my $name = $4; + $name =~ y/a-z/A-Z/; + + # There are multiple entries for enosys and nosys, so comment them out. + if($name =~ /^SYS_E?NOSYS$/){ + $name = "// $name"; + } + if($name eq 'SYS_SYS_EXIT'){ + $name = 'SYS_EXIT'; + } + + print " $name = $num; // $proto\n"; + } +} + +print < len(b) { + return nil, nil, EINVAL + } + return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil +} + +// UnixRights encodes a set of open file descriptors into a socket +// control message for sending to another process. +func UnixRights(fds ...int) []byte { + datalen := len(fds) * 4 + b := make([]byte, CmsgSpace(datalen)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_RIGHTS + h.SetLen(CmsgLen(datalen)) + data := cmsgData(h) + for _, fd := range fds { + *(*int32)(data) = int32(fd) + data = unsafe.Pointer(uintptr(data) + 4) + } + return b +} + +// ParseUnixRights decodes a socket control message that contains an +// integer array of open file descriptors from another process. +func ParseUnixRights(m *SocketControlMessage) ([]int, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_RIGHTS { + return nil, EINVAL + } + fds := make([]int, len(m.Data)>>2) + for i, j := 0, 0; i < len(m.Data); i += 4 { + fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i]))) + j++ + } + return fds, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/str.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/str.go new file mode 100644 index 000000000..35ed66435 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/str.go @@ -0,0 +1,26 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +func itoa(val int) string { // do it here rather than with fmt to avoid dependency + if val < 0 { + return "-" + uitoa(uint(-val)) + } + return uitoa(uint(val)) +} + +func uitoa(val uint) string { + var buf [32]byte // big enough for int64 + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go new file mode 100644 index 000000000..a48d47cff --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go @@ -0,0 +1,74 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +// Package unix contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display OS-specific documentation for the current +// system. If you want godoc to display OS documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package unix + +import "unsafe" + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, EINVAL + } + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mkunix.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +//go:noescape +func use(p unsafe.Pointer) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd.go new file mode 100644 index 000000000..e9671764c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd.go @@ -0,0 +1,628 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// BSD system call wrappers shared by *BSD based systems +// including OS X (Darwin) and FreeBSD. Like the other +// syscall_*.go files it is compiled as Go code but also +// used as input to mksyscall which parses the //sys +// lines and generates system call stubs. + +package unix + +import ( + "runtime" + "syscall" + "unsafe" +) + +/* + * Wrapped + */ + +//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error) +//sysnb setgroups(ngid int, gid *_Gid_t) (err error) + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 16 on BSD. + if n < 0 || n > 1000 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + // Final argument is (basep *uintptr) and the syscall doesn't take nil. + // 64 bits should be enough. (32 bits isn't even on 386). Since the + // actual system call is getdirentries64, 64 is a good guess. + // TODO(rsc): Can we use a single global basep for all calls? + var base = (*uintptr)(unsafe.Pointer(new(uint64))) + return Getdirentries(fd, buf, base) +} + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. + +type WaitStatus uint32 + +const ( + mask = 0x7F + core = 0x80 + shift = 8 + + exited = 0 + stopped = 0x7F +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) ExitStatus() int { + if w&mask != exited { + return -1 + } + return int(w >> shift) +} + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 } + +func (w WaitStatus) Signal() syscall.Signal { + sig := syscall.Signal(w & mask) + if sig == stopped || sig == 0 { + return -1 + } + return sig +} + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP } + +func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP } + +func (w WaitStatus) StopSignal() syscall.Signal { + if !w.Stopped() { + return -1 + } + return syscall.Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { return -1 } + +//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + var status _C_int + wpid, err = wait4(pid, &status, options, rusage) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys Shutdown(s int, how int) (err error) + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet4 + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet6 + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) || n == 0 { + return nil, 0, EINVAL + } + sa.raw.Len = byte(3 + n) // 2 for Family, Len; 1 for NUL + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Index == 0 { + return nil, 0, EINVAL + } + sa.raw.Len = sa.Len + sa.raw.Family = AF_LINK + sa.raw.Index = sa.Index + sa.raw.Type = sa.Type + sa.raw.Nlen = sa.Nlen + sa.raw.Alen = sa.Alen + sa.raw.Slen = sa.Slen + for i := 0; i < len(sa.raw.Data); i++ { + sa.raw.Data[i] = sa.Data[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil +} + +func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_LINK: + pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa)) + sa := new(SockaddrDatalink) + sa.Len = pp.Len + sa.Family = pp.Family + sa.Index = pp.Index + sa.Type = pp.Type + sa.Nlen = pp.Nlen + sa.Alen = pp.Alen + sa.Slen = pp.Slen + for i := 0; i < len(sa.Data); i++ { + sa.Data[i] = pp.Data[i] + } + return sa, nil + + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + if pp.Len < 2 || pp.Len > SizeofSockaddrUnix { + return nil, EINVAL + } + sa := new(SockaddrUnix) + + // Some BSDs include the trailing NUL in the length, whereas + // others do not. Work around this by subtracting the leading + // family and len. The path is then scanned to see if a NUL + // terminator still exists within the length. + n := int(pp.Len) - 2 // subtract leading Family, Len + for i := 0; i < n; i++ { + if pp.Path[i] == 0 { + // found early NUL; assume Len included the NUL + // or was overestimating. + n = i + break + } + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + if runtime.GOOS == "darwin" && len == 0 { + // Accepted socket has no address. + // This is likely due to a bug in xnu kernels, + // where instead of ECONNABORTED error socket + // is accepted, but has no address. + Close(nfd) + return 0, nil, ECONNABORTED + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + // TODO(jsing): DragonFly has a "bug" (see issue 3349), which should be + // reported upstream. + if runtime.GOOS == "dragonfly" && rsa.Addr.Family == AF_UNSPEC && rsa.Addr.Len == 0 { + rsa.Addr.Family = AF_UNIX + rsa.Addr.Len = SizeofSockaddrUnix + } + return anyToSockaddr(&rsa) +} + +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) + +func GetsockoptByte(fd, level, opt int) (value byte, err error) { + var n byte + vallen := _Socklen(1) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +//sys kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) + +func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err error) { + var change, event unsafe.Pointer + if len(changes) > 0 { + change = unsafe.Pointer(&changes[0]) + } + if len(events) > 0 { + event = unsafe.Pointer(&events[0]) + } + return kevent(kq, change, len(changes), event, len(events), timeout) +} + +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + +// sysctlmib translates name to mib number and appends any additional args. +func sysctlmib(name string, args ...int) ([]_C_int, error) { + // Translate name to mib number. + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + for _, a := range args { + mib = append(mib, _C_int(a)) + } + + return mib, nil +} + +func Sysctl(name string) (string, error) { + return SysctlArgs(name) +} + +func SysctlArgs(name string, args ...int) (string, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return "", err + } + + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return "", err + } + if n == 0 { + return "", nil + } + + // Read into buffer of that size. + buf := make([]byte, n) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return "", err + } + + // Throw away terminating NUL. + if n > 0 && buf[n-1] == '\x00' { + n-- + } + return string(buf[0:n]), nil +} + +func SysctlUint32(name string) (uint32, error) { + return SysctlUint32Args(name) +} + +func SysctlUint32Args(name string, args ...int) (uint32, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return 0, err + } + + n := uintptr(4) + buf := make([]byte, 4) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return 0, err + } + if n != 4 { + return 0, EIO + } + return *(*uint32)(unsafe.Pointer(&buf[0])), nil +} + +func SysctlUint64(name string, args ...int) (uint64, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return 0, err + } + + n := uintptr(8) + buf := make([]byte, 8) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return 0, err + } + if n != 8 { + return 0, EIO + } + return *(*uint64)(unsafe.Pointer(&buf[0])), nil +} + +func SysctlRaw(name string, args ...int) ([]byte, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return nil, err + } + + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Read into buffer of that size. + buf := make([]byte, n) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return nil, err + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n], nil +} + +//sys utimes(path string, timeval *[2]Timeval) (err error) + +func Utimes(path string, tv []Timeval) error { + if tv == nil { + return utimes(path, nil) + } + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNano(path string, ts []Timespec) error { + if ts == nil { + return utimes(path, nil) + } + // TODO: The BSDs can do utimensat with SYS_UTIMENSAT but it + // isn't supported by darwin so this uses utimes instead + if len(ts) != 2 { + return EINVAL + } + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := [2]Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys futimes(fd int, timeval *[2]Timeval) (err error) + +func Futimes(fd int, tv []Timeval) error { + if tv == nil { + return futimes(fd, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + +// TODO: wrap +// Acct(name nil-string) (err error) +// Gethostuuid(uuid *byte, timeout *Timespec) (err error) +// Madvise(addr *byte, len int, behav int) (err error) +// Mprotect(addr *byte, len int, prot int) (err error) +// Msync(addr *byte, len int, flags int) (err error) +// Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd_test.go new file mode 100644 index 000000000..7cd360647 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd_test.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package unix_test + +import ( + "testing" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" +) + +const MNT_WAIT = 1 + +func TestGetfsstat(t *testing.T) { + n, err := unix.Getfsstat(nil, MNT_WAIT) + if err != nil { + t.Fatal(err) + } + + data := make([]unix.Statfs_t, n) + n, err = unix.Getfsstat(data, MNT_WAIT) + if err != nil { + t.Fatal(err) + } + + empty := unix.Statfs_t{} + for _, stat := range data { + if stat == empty { + t.Fatal("an empty Statfs_t struct was returned") + } + } +} + +func TestSysctlRaw(t *testing.T) { + _, err := unix.SysctlRaw("kern.proc.pid", unix.Getpid()) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin.go new file mode 100644 index 000000000..0d1771c3f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin.go @@ -0,0 +1,509 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Darwin system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import ( + errorspkg "errors" + "syscall" + "unsafe" +) + +const ImplementsGetwd = true + +func Getwd() (string, error) { + buf := make([]byte, 2048) + attrs, err := getAttrList(".", attrList{CommonAttr: attrCmnFullpath}, buf, 0) + if err == nil && len(attrs) == 1 && len(attrs[0]) >= 2 { + wd := string(attrs[0]) + // Sanity check that it's an absolute path and ends + // in a null byte, which we then strip. + if wd[0] == '/' && wd[len(wd)-1] == 0 { + return wd[:len(wd)-1], nil + } + } + // If pkg/os/getwd.go gets ENOTSUP, it will fall back to the + // slow algorithm. + return "", ENOTSUP +} + +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + raw RawSockaddrDatalink +} + +// Translate "kern.hostname" to []_C_int{0,1,2,3}. +func nametomib(name string) (mib []_C_int, err error) { + const siz = unsafe.Sizeof(mib[0]) + + // NOTE(rsc): It seems strange to set the buffer to have + // size CTL_MAXNAME+2 but use only CTL_MAXNAME + // as the size. I don't know why the +2 is here, but the + // kernel uses +2 for its own implementation of this function. + // I am scared that if we don't include the +2 here, the kernel + // will silently write 2 words farther than we specify + // and we'll get memory corruption. + var buf [CTL_MAXNAME + 2]_C_int + n := uintptr(CTL_MAXNAME) * siz + + p := (*byte)(unsafe.Pointer(&buf[0])) + bytes, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + + // Magic sysctl: "setting" 0.3 to a string name + // lets you read back the array of integers form. + if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { + return nil, err + } + return buf[0 : n/siz], nil +} + +// ParseDirent parses up to max directory entries in buf, +// appending the names to names. It returns the number +// bytes consumed from buf, the number of entries added +// to names, and the new names slice. +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + origlen := len(buf) + for max != 0 && len(buf) > 0 { + dirent := (*Dirent)(unsafe.Pointer(&buf[0])) + if dirent.Reclen == 0 { + buf = nil + break + } + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:dirent.Namlen]) + if name == "." || name == ".." { // Useless names + continue + } + max-- + count++ + names = append(names, name) + } + return origlen - len(buf), count, names +} + +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } +func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } + +const ( + attrBitMapCount = 5 + attrCmnFullpath = 0x08000000 +) + +type attrList struct { + bitmapCount uint16 + _ uint16 + CommonAttr uint32 + VolAttr uint32 + DirAttr uint32 + FileAttr uint32 + Forkattr uint32 +} + +func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) { + if len(attrBuf) < 4 { + return nil, errorspkg.New("attrBuf too small") + } + attrList.bitmapCount = attrBitMapCount + + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return nil, err + } + + _, _, e1 := Syscall6( + SYS_GETATTRLIST, + uintptr(unsafe.Pointer(_p0)), + uintptr(unsafe.Pointer(&attrList)), + uintptr(unsafe.Pointer(&attrBuf[0])), + uintptr(len(attrBuf)), + uintptr(options), + 0, + ) + if e1 != 0 { + return nil, e1 + } + size := *(*uint32)(unsafe.Pointer(&attrBuf[0])) + + // dat is the section of attrBuf that contains valid data, + // without the 4 byte length header. All attribute offsets + // are relative to dat. + dat := attrBuf + if int(size) < len(attrBuf) { + dat = dat[:size] + } + dat = dat[4:] // remove length prefix + + for i := uint32(0); int(i) < len(dat); { + header := dat[i:] + if len(header) < 8 { + return attrs, errorspkg.New("truncated attribute header") + } + datOff := *(*int32)(unsafe.Pointer(&header[0])) + attrLen := *(*uint32)(unsafe.Pointer(&header[4])) + if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) { + return attrs, errorspkg.New("truncated results; attrBuf too small") + } + end := uint32(datOff) + attrLen + attrs = append(attrs, dat[datOff:end]) + i = end + if r := i % 4; r != 0 { + i += (4 - r) + } + } + return +} + +//sysnb pipe() (r int, w int, err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +/* + * Wrapped + */ + +//sys kill(pid int, signum int, posix int) (err error) + +func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exchangedata(path1 string, path2 string, options int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 +//sys Getdtablesize() (size int) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Getuid() (uid int) +//sysnb Issetugid() (tainted bool) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sys Setprivexec(flag int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Undelete(path string) (err error) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE + +/* + * Unimplemented + */ +// Profil +// Sigaction +// Sigprocmask +// Getlogin +// Sigpending +// Sigaltstack +// Ioctl +// Reboot +// Execve +// Vfork +// Sbrk +// Sstk +// Ovadvise +// Mincore +// Setitimer +// Swapon +// Select +// Sigsuspend +// Readv +// Writev +// Nfssvc +// Getfh +// Quotactl +// Mount +// Csops +// Waitid +// Add_profil +// Kdebug_trace +// Sigreturn +// Mmap +// Mlock +// Munlock +// Atsocket +// Kqueue_from_portset_np +// Kqueue_portset +// Getattrlist +// Setattrlist +// Getdirentriesattr +// Searchfs +// Delete +// Copyfile +// Poll +// Watchevent +// Waitevent +// Modwatch +// Getxattr +// Fgetxattr +// Setxattr +// Fsetxattr +// Removexattr +// Fremovexattr +// Listxattr +// Flistxattr +// Fsctl +// Initgroups +// Posix_spawn +// Nfsclnt +// Fhopen +// Minherit +// Semsys +// Msgsys +// Shmsys +// Semctl +// Semget +// Semop +// Msgctl +// Msgget +// Msgsnd +// Msgrcv +// Shmat +// Shmctl +// Shmdt +// Shmget +// Shm_open +// Shm_unlink +// Sem_open +// Sem_close +// Sem_unlink +// Sem_wait +// Sem_trywait +// Sem_post +// Sem_getvalue +// Sem_init +// Sem_destroy +// Open_extended +// Umask_extended +// Stat_extended +// Lstat_extended +// Fstat_extended +// Chmod_extended +// Fchmod_extended +// Access_extended +// Settid +// Gettid +// Setsgroups +// Getsgroups +// Setwgroups +// Getwgroups +// Mkfifo_extended +// Mkdir_extended +// Identitysvc +// Shared_region_check_np +// Shared_region_map_np +// __pthread_mutex_destroy +// __pthread_mutex_init +// __pthread_mutex_lock +// __pthread_mutex_trylock +// __pthread_mutex_unlock +// __pthread_cond_init +// __pthread_cond_destroy +// __pthread_cond_broadcast +// __pthread_cond_signal +// Setsid_with_pid +// __pthread_cond_timedwait +// Aio_fsync +// Aio_return +// Aio_suspend +// Aio_cancel +// Aio_error +// Aio_read +// Aio_write +// Lio_listio +// __pthread_cond_wait +// Iopolicysys +// Mlockall +// Munlockall +// __pthread_kill +// __pthread_sigmask +// __sigwait +// __disable_threadsignal +// __pthread_markcancel +// __pthread_canceled +// __semwait_signal +// Proc_info +// sendfile +// Stat64_extended +// Lstat64_extended +// Fstat64_extended +// __pthread_chdir +// __pthread_fchdir +// Audit +// Auditon +// Getauid +// Setauid +// Getaudit +// Setaudit +// Getaudit_addr +// Setaudit_addr +// Auditctl +// Bsdthread_create +// Bsdthread_terminate +// Stack_snapshot +// Bsdthread_register +// Workq_open +// Workq_ops +// __mac_execve +// __mac_syscall +// __mac_get_file +// __mac_set_file +// __mac_get_link +// __mac_set_link +// __mac_get_proc +// __mac_set_proc +// __mac_get_fd +// __mac_set_fd +// __mac_get_pid +// __mac_get_lcid +// __mac_get_lctx +// __mac_set_lctx +// Setlcid +// Read_nocancel +// Write_nocancel +// Open_nocancel +// Close_nocancel +// Wait4_nocancel +// Recvmsg_nocancel +// Sendmsg_nocancel +// Recvfrom_nocancel +// Accept_nocancel +// Msync_nocancel +// Fcntl_nocancel +// Select_nocancel +// Fsync_nocancel +// Connect_nocancel +// Sigsuspend_nocancel +// Readv_nocancel +// Writev_nocancel +// Sendto_nocancel +// Pread_nocancel +// Pwrite_nocancel +// Waitid_nocancel +// Poll_nocancel +// Msgsnd_nocancel +// Msgrcv_nocancel +// Sem_wait_nocancel +// Aio_suspend_nocancel +// __sigwait_nocancel +// __semwait_signal_nocancel +// __mac_mount +// __mac_get_mount +// __mac_getfsstat diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_386.go new file mode 100644 index 000000000..3195c8bf5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_386.go @@ -0,0 +1,79 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int32(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int32(nsec / 1e9) + return +} + +//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = int32(sec) + tv.Usec = int32(usec) + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of darwin/386 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_amd64.go new file mode 100644 index 000000000..7adb98ded --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -0,0 +1,81 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int64(nsec / 1e9) + return +} + +//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = sec + tv.Usec = usec + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of darwin/amd64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm.go new file mode 100644 index 000000000..e47ffd739 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int32(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int32(nsec / 1e9) + return +} + +//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = int32(sec) + tv.Usec = int32(usec) + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm64.go new file mode 100644 index 000000000..2560a9599 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 16384 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int64(nsec / 1e9) + return +} + +//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = sec + tv.Usec = usec + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of darwin/arm64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly.go new file mode 100644 index 000000000..fbbe0dce2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly.go @@ -0,0 +1,411 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// FreeBSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import "unsafe" + +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + Rcf uint16 + Route [16]uint16 + raw RawSockaddrDatalink +} + +// Translate "kern.hostname" to []_C_int{0,1,2,3}. +func nametomib(name string) (mib []_C_int, err error) { + const siz = unsafe.Sizeof(mib[0]) + + // NOTE(rsc): It seems strange to set the buffer to have + // size CTL_MAXNAME+2 but use only CTL_MAXNAME + // as the size. I don't know why the +2 is here, but the + // kernel uses +2 for its own implementation of this function. + // I am scared that if we don't include the +2 here, the kernel + // will silently write 2 words farther than we specify + // and we'll get memory corruption. + var buf [CTL_MAXNAME + 2]_C_int + n := uintptr(CTL_MAXNAME) * siz + + p := (*byte)(unsafe.Pointer(&buf[0])) + bytes, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + + // Magic sysctl: "setting" 0.3 to a string name + // lets you read back the array of integers form. + if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { + return nil, err + } + return buf[0 : n/siz], nil +} + +// ParseDirent parses up to max directory entries in buf, +// appending the names to names. It returns the number +// bytes consumed from buf, the number of entries added +// to names, and the new names slice. +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + origlen := len(buf) + for max != 0 && len(buf) > 0 { + dirent := (*Dirent)(unsafe.Pointer(&buf[0])) + reclen := int(16+dirent.Namlen+1+7) & ^7 + buf = buf[reclen:] + if dirent.Fileno == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:dirent.Namlen]) + if name == "." || name == ".." { // Useless names + continue + } + max-- + count++ + names = append(names, name) + } + return origlen - len(buf), count, names +} + +//sysnb pipe() (r int, w int, err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) +func Pread(fd int, p []byte, offset int64) (n int, err error) { + return extpread(fd, p, 0, offset) +} + +//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + return extpwrite(fd, p, 0, offset) +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys Getdtablesize() (size int) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Undelete(path string) (err error) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE + +/* + * Unimplemented + * TODO(jsing): Update this list for DragonFly. + */ +// Profil +// Sigaction +// Sigprocmask +// Getlogin +// Sigpending +// Sigaltstack +// Ioctl +// Reboot +// Execve +// Vfork +// Sbrk +// Sstk +// Ovadvise +// Mincore +// Setitimer +// Swapon +// Select +// Sigsuspend +// Readv +// Writev +// Nfssvc +// Getfh +// Quotactl +// Mount +// Csops +// Waitid +// Add_profil +// Kdebug_trace +// Sigreturn +// Mmap +// Atsocket +// Kqueue_from_portset_np +// Kqueue_portset +// Getattrlist +// Setattrlist +// Getdirentriesattr +// Searchfs +// Delete +// Copyfile +// Poll +// Watchevent +// Waitevent +// Modwatch +// Getxattr +// Fgetxattr +// Setxattr +// Fsetxattr +// Removexattr +// Fremovexattr +// Listxattr +// Flistxattr +// Fsctl +// Initgroups +// Posix_spawn +// Nfsclnt +// Fhopen +// Minherit +// Semsys +// Msgsys +// Shmsys +// Semctl +// Semget +// Semop +// Msgctl +// Msgget +// Msgsnd +// Msgrcv +// Shmat +// Shmctl +// Shmdt +// Shmget +// Shm_open +// Shm_unlink +// Sem_open +// Sem_close +// Sem_unlink +// Sem_wait +// Sem_trywait +// Sem_post +// Sem_getvalue +// Sem_init +// Sem_destroy +// Open_extended +// Umask_extended +// Stat_extended +// Lstat_extended +// Fstat_extended +// Chmod_extended +// Fchmod_extended +// Access_extended +// Settid +// Gettid +// Setsgroups +// Getsgroups +// Setwgroups +// Getwgroups +// Mkfifo_extended +// Mkdir_extended +// Identitysvc +// Shared_region_check_np +// Shared_region_map_np +// __pthread_mutex_destroy +// __pthread_mutex_init +// __pthread_mutex_lock +// __pthread_mutex_trylock +// __pthread_mutex_unlock +// __pthread_cond_init +// __pthread_cond_destroy +// __pthread_cond_broadcast +// __pthread_cond_signal +// Setsid_with_pid +// __pthread_cond_timedwait +// Aio_fsync +// Aio_return +// Aio_suspend +// Aio_cancel +// Aio_error +// Aio_read +// Aio_write +// Lio_listio +// __pthread_cond_wait +// Iopolicysys +// __pthread_kill +// __pthread_sigmask +// __sigwait +// __disable_threadsignal +// __pthread_markcancel +// __pthread_canceled +// __semwait_signal +// Proc_info +// Stat64_extended +// Lstat64_extended +// Fstat64_extended +// __pthread_chdir +// __pthread_fchdir +// Audit +// Auditon +// Getauid +// Setauid +// Getaudit +// Setaudit +// Getaudit_addr +// Setaudit_addr +// Auditctl +// Bsdthread_create +// Bsdthread_terminate +// Stack_snapshot +// Bsdthread_register +// Workq_open +// Workq_ops +// __mac_execve +// __mac_syscall +// __mac_get_file +// __mac_set_file +// __mac_get_link +// __mac_set_link +// __mac_get_proc +// __mac_set_proc +// __mac_get_fd +// __mac_set_fd +// __mac_get_pid +// __mac_get_lcid +// __mac_get_lctx +// __mac_set_lctx +// Setlcid +// Read_nocancel +// Write_nocancel +// Open_nocancel +// Close_nocancel +// Wait4_nocancel +// Recvmsg_nocancel +// Sendmsg_nocancel +// Recvfrom_nocancel +// Accept_nocancel +// Msync_nocancel +// Fcntl_nocancel +// Select_nocancel +// Fsync_nocancel +// Connect_nocancel +// Sigsuspend_nocancel +// Readv_nocancel +// Writev_nocancel +// Sendto_nocancel +// Pread_nocancel +// Pwrite_nocancel +// Waitid_nocancel +// Poll_nocancel +// Msgsnd_nocancel +// Msgrcv_nocancel +// Sem_wait_nocancel +// Aio_suspend_nocancel +// __sigwait_nocancel +// __semwait_signal_nocancel +// __mac_mount +// __mac_get_mount +// __mac_getfsstat diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_386.go new file mode 100644 index 000000000..41c2e6978 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_386.go @@ -0,0 +1,63 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,dragonfly + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int32(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int32(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_amd64.go new file mode 100644 index 000000000..2ed92590e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -0,0 +1,63 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,dragonfly + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = nsec % 1e9 / 1e3 + tv.Sec = int64(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd.go new file mode 100644 index 000000000..ec56ed608 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd.go @@ -0,0 +1,682 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// FreeBSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import "unsafe" + +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 + raw RawSockaddrDatalink +} + +// Translate "kern.hostname" to []_C_int{0,1,2,3}. +func nametomib(name string) (mib []_C_int, err error) { + const siz = unsafe.Sizeof(mib[0]) + + // NOTE(rsc): It seems strange to set the buffer to have + // size CTL_MAXNAME+2 but use only CTL_MAXNAME + // as the size. I don't know why the +2 is here, but the + // kernel uses +2 for its own implementation of this function. + // I am scared that if we don't include the +2 here, the kernel + // will silently write 2 words farther than we specify + // and we'll get memory corruption. + var buf [CTL_MAXNAME + 2]_C_int + n := uintptr(CTL_MAXNAME) * siz + + p := (*byte)(unsafe.Pointer(&buf[0])) + bytes, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + + // Magic sysctl: "setting" 0.3 to a string name + // lets you read back the array of integers form. + if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { + return nil, err + } + return buf[0 : n/siz], nil +} + +// ParseDirent parses up to max directory entries in buf, +// appending the names to names. It returns the number +// bytes consumed from buf, the number of entries added +// to names, and the new names slice. +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + origlen := len(buf) + for max != 0 && len(buf) > 0 { + dirent := (*Dirent)(unsafe.Pointer(&buf[0])) + if dirent.Reclen == 0 { + buf = nil + break + } + buf = buf[dirent.Reclen:] + if dirent.Fileno == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:dirent.Namlen]) + if name == "." || name == ".." { // Useless names + continue + } + max-- + count++ + names = append(names, name) + } + return origlen - len(buf), count, names +} + +//sysnb pipe() (r int, w int, err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { + var value IPMreqn + vallen := _Socklen(SizeofIPMreqn) + errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, errno +} + +func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) +} + +func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// Derive extattr namespace and attribute name + +func xattrnamespace(fullattr string) (ns int, attr string, err error) { + s := -1 + for idx, val := range fullattr { + if val == '.' { + s = idx + break + } + } + + if s == -1 { + return -1, "", ENOATTR + } + + namespace := fullattr[0:s] + attr = fullattr[s+1:] + + switch namespace { + case "user": + return EXTATTR_NAMESPACE_USER, attr, nil + case "system": + return EXTATTR_NAMESPACE_SYSTEM, attr, nil + default: + return -1, "", ENOATTR + } +} + +func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { + if len(dest) > idx { + return unsafe.Pointer(&dest[idx]) + } else { + return unsafe.Pointer(_zero) + } +} + +// FreeBSD implements its own syscalls to handle extended attributes + +func Getxattr(file string, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetFile(file, nsid, a, uintptr(d), destsize) +} + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetFd(fd, nsid, a, uintptr(d), destsize) +} + +func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetLink(link, nsid, a, uintptr(d), destsize) +} + +// flags are unused on FreeBSD + +func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetFd(fd, nsid, a, uintptr(d), datasiz) + return +} + +func Setxattr(file string, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetFile(file, nsid, a, uintptr(d), datasiz) + return +} + +func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetLink(link, nsid, a, uintptr(d), datasiz) + return +} + +func Removexattr(file string, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteFile(file, nsid, a) + return +} + +func Fremovexattr(fd int, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteFd(fd, nsid, a) + return +} + +func Lremovexattr(link string, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteLink(link, nsid, a) + return +} + +func Listxattr(file string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + // FreeBSD won't allow you to list xattrs from multiple namespaces + s := 0 + var e error + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + + /* Errors accessing system attrs are ignored so that + * we can implement the Linux-like behavior of omitting errors that + * we don't have read permissions on + * + * Linux will still error if we ask for user attributes on a file that + * we don't have read permissions on, so don't ignore those errors + */ + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + e = nil + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, e +} + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s := 0 + var e error + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + e = nil + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, e +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s := 0 + var e error + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + e = nil + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, e +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) +//sys ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) +//sys ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) +//sys ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys Getdtablesize() (size int) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Undelete(path string) (err error) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE +//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) + +/* + * Unimplemented + */ +// Profil +// Sigaction +// Sigprocmask +// Getlogin +// Sigpending +// Sigaltstack +// Ioctl +// Reboot +// Execve +// Vfork +// Sbrk +// Sstk +// Ovadvise +// Mincore +// Setitimer +// Swapon +// Select +// Sigsuspend +// Readv +// Writev +// Nfssvc +// Getfh +// Quotactl +// Mount +// Csops +// Waitid +// Add_profil +// Kdebug_trace +// Sigreturn +// Mmap +// Mlock +// Munlock +// Atsocket +// Kqueue_from_portset_np +// Kqueue_portset +// Getattrlist +// Setattrlist +// Getdirentriesattr +// Searchfs +// Delete +// Copyfile +// Poll +// Watchevent +// Waitevent +// Modwatch +// Getxattr +// Fgetxattr +// Setxattr +// Fsetxattr +// Removexattr +// Fremovexattr +// Listxattr +// Flistxattr +// Fsctl +// Initgroups +// Posix_spawn +// Nfsclnt +// Fhopen +// Minherit +// Semsys +// Msgsys +// Shmsys +// Semctl +// Semget +// Semop +// Msgctl +// Msgget +// Msgsnd +// Msgrcv +// Shmat +// Shmctl +// Shmdt +// Shmget +// Shm_open +// Shm_unlink +// Sem_open +// Sem_close +// Sem_unlink +// Sem_wait +// Sem_trywait +// Sem_post +// Sem_getvalue +// Sem_init +// Sem_destroy +// Open_extended +// Umask_extended +// Stat_extended +// Lstat_extended +// Fstat_extended +// Chmod_extended +// Fchmod_extended +// Access_extended +// Settid +// Gettid +// Setsgroups +// Getsgroups +// Setwgroups +// Getwgroups +// Mkfifo_extended +// Mkdir_extended +// Identitysvc +// Shared_region_check_np +// Shared_region_map_np +// __pthread_mutex_destroy +// __pthread_mutex_init +// __pthread_mutex_lock +// __pthread_mutex_trylock +// __pthread_mutex_unlock +// __pthread_cond_init +// __pthread_cond_destroy +// __pthread_cond_broadcast +// __pthread_cond_signal +// Setsid_with_pid +// __pthread_cond_timedwait +// Aio_fsync +// Aio_return +// Aio_suspend +// Aio_cancel +// Aio_error +// Aio_read +// Aio_write +// Lio_listio +// __pthread_cond_wait +// Iopolicysys +// Mlockall +// Munlockall +// __pthread_kill +// __pthread_sigmask +// __sigwait +// __disable_threadsignal +// __pthread_markcancel +// __pthread_canceled +// __semwait_signal +// Proc_info +// Stat64_extended +// Lstat64_extended +// Fstat64_extended +// __pthread_chdir +// __pthread_fchdir +// Audit +// Auditon +// Getauid +// Setauid +// Getaudit +// Setaudit +// Getaudit_addr +// Setaudit_addr +// Auditctl +// Bsdthread_create +// Bsdthread_terminate +// Stack_snapshot +// Bsdthread_register +// Workq_open +// Workq_ops +// __mac_execve +// __mac_syscall +// __mac_get_file +// __mac_set_file +// __mac_get_link +// __mac_set_link +// __mac_get_proc +// __mac_set_proc +// __mac_get_fd +// __mac_set_fd +// __mac_get_pid +// __mac_get_lcid +// __mac_get_lctx +// __mac_set_lctx +// Setlcid +// Read_nocancel +// Write_nocancel +// Open_nocancel +// Close_nocancel +// Wait4_nocancel +// Recvmsg_nocancel +// Sendmsg_nocancel +// Recvfrom_nocancel +// Accept_nocancel +// Msync_nocancel +// Fcntl_nocancel +// Select_nocancel +// Fsync_nocancel +// Connect_nocancel +// Sigsuspend_nocancel +// Readv_nocancel +// Writev_nocancel +// Sendto_nocancel +// Pread_nocancel +// Pwrite_nocancel +// Waitid_nocancel +// Poll_nocancel +// Msgsnd_nocancel +// Msgrcv_nocancel +// Sem_wait_nocancel +// Aio_suspend_nocancel +// __sigwait_nocancel +// __semwait_signal_nocancel +// __mac_mount +// __mac_get_mount +// __mac_getfsstat diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_386.go new file mode 100644 index 000000000..6255d40ff --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -0,0 +1,63 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int32(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int32(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_amd64.go new file mode 100644 index 000000000..8b395d596 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -0,0 +1,63 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = nsec % 1e9 / 1e3 + tv.Sec = int64(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_arm.go new file mode 100644 index 000000000..4e72d46a8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -0,0 +1,63 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return ts.Sec*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return tv.Sec*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = nsec / 1e9 + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_test.go new file mode 100644 index 000000000..62f8052a8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_test.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd + +package unix_test + +import ( + "testing" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" +) + +func TestSysctUint64(t *testing.T) { + _, err := unix.SysctlUint64("vm.max_kernel_address") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux.go new file mode 100644 index 000000000..d3ee5d2c2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux.go @@ -0,0 +1,1086 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Linux system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and +// wrap it in our own nicer implementation. + +package unix + +import ( + "syscall" + "unsafe" +) + +/* + * Wrapped + */ + +func Access(path string, mode uint32) (err error) { + return Faccessat(AT_FDCWD, path, mode, 0) +} + +func Chmod(path string, mode uint32) (err error) { + return Fchmodat(AT_FDCWD, path, mode, 0) +} + +func Chown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, 0) +} + +func Creat(path string, mode uint32) (fd int, err error) { + return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) +} + +//sys linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) + +func Link(oldpath string, newpath string) (err error) { + return linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0) +} + +func Mkdir(path string, mode uint32) (err error) { + return Mkdirat(AT_FDCWD, path, mode) +} + +func Mknod(path string, mode uint32, dev int) (err error) { + return Mknodat(AT_FDCWD, path, mode, dev) +} + +func Open(path string, mode int, perm uint32) (fd int, err error) { + return openat(AT_FDCWD, path, mode|O_LARGEFILE, perm) +} + +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + return openat(dirfd, path, flags|O_LARGEFILE, mode) +} + +//sys readlinkat(dirfd int, path string, buf []byte) (n int, err error) + +func Readlink(path string, buf []byte) (n int, err error) { + return readlinkat(AT_FDCWD, path, buf) +} + +func Rename(oldpath string, newpath string) (err error) { + return Renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath) +} + +func Rmdir(path string) error { + return unlinkat(AT_FDCWD, path, AT_REMOVEDIR) +} + +//sys symlinkat(oldpath string, newdirfd int, newpath string) (err error) + +func Symlink(oldpath string, newpath string) (err error) { + return symlinkat(oldpath, AT_FDCWD, newpath) +} + +func Unlink(path string) error { + return unlinkat(AT_FDCWD, path, 0) +} + +//sys unlinkat(dirfd int, path string, flags int) (err error) + +func Unlinkat(dirfd int, path string) error { + return unlinkat(dirfd, path, 0) +} + +//sys utimes(path string, times *[2]Timeval) (err error) + +func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) + +func UtimesNano(path string, ts []Timespec) error { + if ts == nil { + err := utimensat(AT_FDCWD, path, nil, 0) + if err != ENOSYS { + return err + } + return utimes(path, nil) + } + if len(ts) != 2 { + return EINVAL + } + err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) + if err != ENOSYS { + return err + } + // If the utimensat syscall isn't available (utimensat was added to Linux + // in 2.6.22, Released, 8 July 2007) then fall back to utimes + var tv [2]Timeval + for i := 0; i < 2; i++ { + tv[i].Sec = ts[i].Sec + tv[i].Usec = ts[i].Nsec / 1000 + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +//sys futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) + +func Futimesat(dirfd int, path string, tv []Timeval) error { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + if tv == nil { + return futimesat(dirfd, pathp, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func Futimes(fd int, tv []Timeval) (err error) { + // Believe it or not, this is the best we can do on Linux + // (and is what glibc does). + return Utimes("/proc/self/fd/"+itoa(fd), tv) +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) + +func Getwd() (wd string, err error) { + var buf [PathMax]byte + n, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + // Getcwd returns the number of bytes written to buf, including the NUL. + if n < 1 || n > len(buf) || buf[n-1] != 0 { + return "", EINVAL + } + return string(buf[0 : n-1]), nil +} + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 1<<16 on Linux. + if n < 0 || n > 1<<20 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +type WaitStatus uint32 + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. At least that's the idea. +// There are various irregularities. For example, the +// "continued" status is 0xFFFF, distinguishing itself +// from stopped via the core dump bit. + +const ( + mask = 0x7F + core = 0x80 + exited = 0x00 + stopped = 0x7F + shift = 8 +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited } + +func (w WaitStatus) Stopped() bool { return w&0xFF == stopped } + +func (w WaitStatus) Continued() bool { return w == 0xFFFF } + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) ExitStatus() int { + if !w.Exited() { + return -1 + } + return int(w>>shift) & 0xFF +} + +func (w WaitStatus) Signal() syscall.Signal { + if !w.Signaled() { + return -1 + } + return syscall.Signal(w & mask) +} + +func (w WaitStatus) StopSignal() syscall.Signal { + if !w.Stopped() { + return -1 + } + return syscall.Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { + if w.StopSignal() != SIGTRAP { + return -1 + } + return int(w>>shift) >> 8 +} + +//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + var status _C_int + wpid, err = wait4(pid, &status, options, rusage) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +func Mkfifo(path string, mode uint32) (err error) { + return Mknod(path, mode|S_IFIFO, 0) +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) { + return nil, 0, EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := _Socklen(2) + if n > 0 { + sl += _Socklen(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +type SockaddrLinklayer struct { + Protocol uint16 + Ifindex int + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]byte + raw RawSockaddrLinklayer +} + +func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff { + return nil, 0, EINVAL + } + sa.raw.Family = AF_PACKET + sa.raw.Protocol = sa.Protocol + sa.raw.Ifindex = int32(sa.Ifindex) + sa.raw.Hatype = sa.Hatype + sa.raw.Pkttype = sa.Pkttype + sa.raw.Halen = sa.Halen + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil +} + +type SockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 + raw RawSockaddrNetlink +} + +func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_NETLINK + sa.raw.Pad = sa.Pad + sa.raw.Pid = sa.Pid + sa.raw.Groups = sa.Groups + return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil +} + +func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_NETLINK: + pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa)) + sa := new(SockaddrNetlink) + sa.Family = pp.Family + sa.Pad = pp.Pad + sa.Pid = pp.Pid + sa.Groups = pp.Groups + return sa, nil + + case AF_PACKET: + pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa)) + sa := new(SockaddrLinklayer) + sa.Protocol = pp.Protocol + sa.Ifindex = int(pp.Ifindex) + sa.Hatype = pp.Hatype + sa.Pkttype = pp.Pkttype + sa.Halen = pp.Halen + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + if pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(&rsa) +} + +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { + var value IPMreqn + vallen := _Socklen(SizeofIPMreqn) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptUcred(fd, level, opt int) (*Ucred, error) { + var value Ucred + vallen := _Socklen(SizeofUcred) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) +} + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + var err error + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +// BindToDevice binds the socket associated with fd to device. +func BindToDevice(fd int, device string) (err error) { + return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device) +} + +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + +func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { + // The peek requests are machine-size oriented, so we wrap it + // to retrieve arbitrary-length data. + + // The ptrace syscall differs from glibc's ptrace. + // Peeks returns the word in *data, not as the return value. + + var buf [sizeofPtr]byte + + // Leading edge. PEEKTEXT/PEEKDATA don't require aligned + // access (PEEKUSER warns that it might), but if we don't + // align our reads, we might straddle an unmapped page + // boundary and not get the bytes leading up to the page + // boundary. + n := 0 + if addr%sizeofPtr != 0 { + err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return 0, err + } + n += copy(out, buf[addr%sizeofPtr:]) + out = out[n:] + } + + // Remainder. + for len(out) > 0 { + // We use an internal buffer to guarantee alignment. + // It's not documented if this is necessary, but we're paranoid. + err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return n, err + } + copied := copy(out, buf[0:]) + n += copied + out = out[copied:] + } + + return n, nil +} + +func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) { + return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out) +} + +func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { + return ptracePeek(PTRACE_PEEKDATA, pid, addr, out) +} + +func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) { + // As for ptracePeek, we need to align our accesses to deal + // with the possibility of straddling an invalid page. + + // Leading edge. + n := 0 + if addr%sizeofPtr != 0 { + var buf [sizeofPtr]byte + err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return 0, err + } + n += copy(buf[addr%sizeofPtr:], data) + word := *((*uintptr)(unsafe.Pointer(&buf[0]))) + err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word) + if err != nil { + return 0, err + } + data = data[n:] + } + + // Interior. + for len(data) > sizeofPtr { + word := *((*uintptr)(unsafe.Pointer(&data[0]))) + err = ptrace(pokeReq, pid, addr+uintptr(n), word) + if err != nil { + return n, err + } + n += sizeofPtr + data = data[sizeofPtr:] + } + + // Trailing edge. + if len(data) > 0 { + var buf [sizeofPtr]byte + err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return n, err + } + copy(buf[0:], data) + word := *((*uintptr)(unsafe.Pointer(&buf[0]))) + err = ptrace(pokeReq, pid, addr+uintptr(n), word) + if err != nil { + return n, err + } + n += len(data) + } + + return n, nil +} + +func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { + return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data) +} + +func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) { + return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data) +} + +func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +func PtraceSetOptions(pid int, options int) (err error) { + return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options)) +} + +func PtraceGetEventMsg(pid int) (msg uint, err error) { + var data _C_long + err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + msg = uint(data) + return +} + +func PtraceCont(pid int, signal int) (err error) { + return ptrace(PTRACE_CONT, pid, 0, uintptr(signal)) +} + +func PtraceSyscall(pid int, signal int) (err error) { + return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal)) +} + +func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } + +func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } + +func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } + +//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) + +func Reboot(cmd int) (err error) { + return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "") +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + return Getdents(fd, buf) +} + +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + origlen := len(buf) + count = 0 + for max != 0 && len(buf) > 0 { + dirent := (*Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + max-- + count++ + names = append(names, name) + } + return origlen - len(buf), count, names +} + +//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) + +func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + // Certain file systems get rather angry and EINVAL if you give + // them an empty string of data, rather than NULL. + if data == "" { + return mount(source, target, fstype, flags, nil) + } + datap, err := BytePtrFromString(data) + if err != nil { + return err + } + return mount(source, target, fstype, flags, datap) +} + +// Sendto +// Recvfrom +// Socketpair + +/* + * Direct access + */ +//sys Acct(path string) (err error) +//sys Adjtimex(buf *Timex) (state int, err error) +//sys Chdir(path string) (err error) +//sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys Close(fd int) (err error) +//sys Dup(oldfd int) (fd int, err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sysnb EpollCreate1(flag int) (fd int, err error) +//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Exit(code int) = SYS_EXIT_GROUP +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Fdatasync(fd int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fsync(fd int) (err error) +//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 +//sysnb Getpgid(pid int) (pgid int, err error) + +func Getpgrp() (pid int) { + pid, _ = Getpgid(0) + return +} + +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Gettid() (tid int) +//sys Getxattr(path string, attr string, dest []byte) (sz int, err error) +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) +//sysnb InotifyInit1(flags int) (fd int, err error) +//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) +//sysnb Kill(pid int, sig syscall.Signal) (err error) +//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG +//sys Listxattr(path string, dest []byte) (sz int, err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Pause() (err error) +//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT +//sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64 +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Removexattr(path string, attr string) (err error) +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Setdomainname(p []byte) (err error) +//sys Sethostname(p []byte) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tv *Timeval) (err error) + +// issue 1435. +// On linux Setuid and Setgid only affects the current thread, not the process. +// This does not match what most callers expect so we must return an error +// here rather than letting the caller think that the call succeeded. + +func Setuid(uid int) (err error) { + return EOPNOTSUPP +} + +func Setgid(uid int) (err error) { + return EOPNOTSUPP +} + +//sys Setpriority(which int, who int, prio int) (err error) +//sys Setxattr(path string, attr string, data []byte, flags int) (err error) +//sys Sync() +//sysnb Sysinfo(info *Sysinfo_t) (err error) +//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) +//sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) +//sysnb Times(tms *Tms) (ticks uintptr, err error) +//sysnb Umask(mask int) (oldmask int) +//sysnb Uname(buf *Utsname) (err error) +//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2 +//sys Unshare(flags int) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys Utime(path string, buf *Utimbuf) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys exitThread(code int) (err error) = SYS_EXIT +//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ +//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE + +// mmap varies by architecture; see syscall_linux_*.go. +//sys munmap(addr uintptr, length uintptr) (err error) + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys Madvise(b []byte, advice int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Mlock(b []byte) (err error) +//sys Munlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Munlockall() (err error) + +/* + * Unimplemented + */ +// AddKey +// AfsSyscall +// Alarm +// ArchPrctl +// Brk +// Capget +// Capset +// ClockGetres +// ClockNanosleep +// ClockSettime +// Clone +// CreateModule +// DeleteModule +// EpollCtlOld +// EpollPwait +// EpollWaitOld +// Eventfd +// Execve +// Fgetxattr +// Flistxattr +// Fork +// Fremovexattr +// Fsetxattr +// Futex +// GetKernelSyms +// GetMempolicy +// GetRobustList +// GetThreadArea +// Getitimer +// Getpmsg +// IoCancel +// IoDestroy +// IoGetevents +// IoSetup +// IoSubmit +// Ioctl +// IoprioGet +// IoprioSet +// KexecLoad +// Keyctl +// Lgetxattr +// Llistxattr +// LookupDcookie +// Lremovexattr +// Lsetxattr +// Mbind +// MigratePages +// Mincore +// ModifyLdt +// Mount +// MovePages +// Mprotect +// MqGetsetattr +// MqNotify +// MqOpen +// MqTimedreceive +// MqTimedsend +// MqUnlink +// Mremap +// Msgctl +// Msgget +// Msgrcv +// Msgsnd +// Msync +// Newfstatat +// Nfsservctl +// Personality +// Poll +// Ppoll +// Pselect6 +// Ptrace +// Putpmsg +// QueryModule +// Quotactl +// Readahead +// Readv +// RemapFilePages +// RequestKey +// RestartSyscall +// RtSigaction +// RtSigpending +// RtSigprocmask +// RtSigqueueinfo +// RtSigreturn +// RtSigsuspend +// RtSigtimedwait +// SchedGetPriorityMax +// SchedGetPriorityMin +// SchedGetaffinity +// SchedGetparam +// SchedGetscheduler +// SchedRrGetInterval +// SchedSetaffinity +// SchedSetparam +// SchedYield +// Security +// Semctl +// Semget +// Semop +// Semtimedop +// SetMempolicy +// SetRobustList +// SetThreadArea +// SetTidAddress +// Shmat +// Shmctl +// Shmdt +// Shmget +// Sigaltstack +// Signalfd +// Swapoff +// Swapon +// Sysfs +// TimerCreate +// TimerDelete +// TimerGetoverrun +// TimerGettime +// TimerSettime +// Timerfd +// Tkill (obsolete) +// Tuxcall +// Umount2 +// Uselib +// Utimensat +// Vfork +// Vhangup +// Vmsplice +// Vserver +// Waitid +// _Sysctl diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_386.go new file mode 100644 index 000000000..7171219af --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_386.go @@ -0,0 +1,388 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(rsc): Rewrite all nn(SP) references into name+(nn-8)(FP) +// so that go vet can check that they are correct. + +// +build 386,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int32(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Sec = int32(nsec / 1e9) + tv.Usec = int32(nsec % 1e9 / 1e3) + return +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +// 64-bit file system and 32-bit uid calls +// (386 default is 32-bit file system and 16-bit uid). +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 +//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 +//sysnb Getegid() (egid int) = SYS_GETEGID32 +//sysnb Geteuid() (euid int) = SYS_GETEUID32 +//sysnb Getgid() (gid int) = SYS_GETGID32 +//sysnb Getuid() (uid int) = SYS_GETUID32 +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 +//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 +//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 +//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 +//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32 +//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32 +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT + +//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + page := uintptr(offset / 4096) + if offset != int64(page)*4096 { + return 0, EINVAL + } + return mmap2(addr, length, prot, flags, fd, page) +} + +type rlimit32 struct { + Cur uint32 + Max uint32 +} + +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT + +const rlimInf32 = ^uint32(0) +const rlimInf64 = ^uint64(0) + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + err = getrlimit(resource, &rl) + if err != nil { + return + } + + if rl.Cur == rlimInf32 { + rlim.Cur = rlimInf64 + } else { + rlim.Cur = uint64(rl.Cur) + } + + if rl.Max == rlimInf32 { + rlim.Max = rlimInf64 + } else { + rlim.Max = uint64(rl.Max) + } + return +} + +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + if rlim.Cur == rlimInf64 { + rl.Cur = rlimInf32 + } else if rlim.Cur < uint64(rlimInf32) { + rl.Cur = uint32(rlim.Cur) + } else { + return EINVAL + } + if rlim.Max == rlimInf64 { + rl.Max = rlimInf32 + } else if rlim.Max < uint64(rlimInf32) { + rl.Max = uint32(rlim.Max) + } else { + return EINVAL + } + + return setrlimit(resource, &rl) +} + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + newoffset, errno := seek(fd, offset, whence) + if errno != 0 { + return 0, errno + } + return newoffset, nil +} + +// Vsyscalls on amd64. +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) + +// On x86 Linux, all the socket calls go through an extra indirection, +// I think because the 5-register system call interface can't handle +// the 6-argument calls like sendto and recvfrom. Instead the +// arguments to the underlying system call are the number below +// and a pointer to an array of uintptr. We hide the pointer in the +// socketcall assembly to avoid allocation on every system call. + +const ( + // see linux/net.h + _SOCKET = 1 + _BIND = 2 + _CONNECT = 3 + _LISTEN = 4 + _ACCEPT = 5 + _GETSOCKNAME = 6 + _GETPEERNAME = 7 + _SOCKETPAIR = 8 + _SEND = 9 + _RECV = 10 + _SENDTO = 11 + _RECVFROM = 12 + _SHUTDOWN = 13 + _SETSOCKOPT = 14 + _GETSOCKOPT = 15 + _SENDMSG = 16 + _RECVMSG = 17 + _ACCEPT4 = 18 + _RECVMMSG = 19 + _SENDMMSG = 20 +) + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) +func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + if e != 0 { + err = e + } + return +} + +func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, e := rawsocketcall(_GETSOCKNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, e := rawsocketcall(_GETPEERNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) { + _, e := rawsocketcall(_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0) + if e != 0 { + err = e + } + return +} + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, e := socketcall(_BIND, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, e := socketcall(_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func socket(domain int, typ int, proto int) (fd int, err error) { + fd, e := rawsocketcall(_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, e := socketcall(_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e != 0 { + err = e + } + return +} + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, e := socketcall(_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen, 0) + if e != 0 { + err = e + } + return +} + +func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var base uintptr + if len(p) > 0 { + base = uintptr(unsafe.Pointer(&p[0])) + } + n, e := socketcall(_RECVFROM, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + if e != 0 { + err = e + } + return +} + +func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var base uintptr + if len(p) > 0 { + base = uintptr(unsafe.Pointer(&p[0])) + } + _, e := socketcall(_SENDTO, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e != 0 { + err = e + } + return +} + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + n, e := socketcall(_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + n, e := socketcall(_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func Listen(s int, n int) (err error) { + _, e := socketcall(_LISTEN, uintptr(s), uintptr(n), 0, 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func Shutdown(s, how int) (err error) { + _, e := socketcall(_SHUTDOWN, uintptr(s), uintptr(how), 0, 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func Statfs(path string, buf *Statfs_t) (err error) { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func (r *PtraceRegs) PC() uint64 { return uint64(uint32(r.Eip)) } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Eip = int32(pc) } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_amd64.go new file mode 100644 index 000000000..ae70c2afc --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -0,0 +1,146 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,linux + +package unix + +import "syscall" + +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//go:noescape +func gettimeofday(tv *Timeval) (err syscall.Errno) + +func Gettimeofday(tv *Timeval) (err error) { + errno := gettimeofday(tv) + if errno != 0 { + return errno + } + return nil +} + +func Getpagesize() int { return 4096 } + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + errno := gettimeofday(&tv) + if errno != 0 { + return 0, errno + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Sec = nsec / 1e9 + tv.Usec = nsec % 1e9 / 1e3 + return +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func (r *PtraceRegs) PC() uint64 { return r.Rip } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm.go new file mode 100644 index 000000000..abc41c3ea --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm.go @@ -0,0 +1,233 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int32(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Sec = int32(nsec / 1e9) + tv.Usec = int32(nsec % 1e9 / 1e3) + return +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + newoffset, errno := seek(fd, offset, whence) + if errno != 0 { + return 0, errno + } + return newoffset, nil +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32 +//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32 +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +// 64-bit file system and 32-bit uid calls +// (16-bit uid calls are not always supported in newer kernels) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sysnb Getegid() (egid int) = SYS_GETEGID32 +//sysnb Geteuid() (euid int) = SYS_GETEUID32 +//sysnb Getgid() (gid int) = SYS_GETGID32 +//sysnb Getuid() (uid int) = SYS_GETUID32 +//sysnb InotifyInit() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT +//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 +//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 +//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 +//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 + +// Vsyscalls on amd64. +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) + +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_ARM_FADVISE64_64, uintptr(fd), uintptr(advice), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func Statfs(path string, buf *Statfs_t) (err error) { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + page := uintptr(offset / 4096) + if offset != int64(page)*4096 { + return 0, EINVAL + } + return mmap2(addr, length, prot, flags, fd, page) +} + +type rlimit32 struct { + Cur uint32 + Max uint32 +} + +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT + +const rlimInf32 = ^uint32(0) +const rlimInf64 = ^uint64(0) + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + err = getrlimit(resource, &rl) + if err != nil { + return + } + + if rl.Cur == rlimInf32 { + rlim.Cur = rlimInf64 + } else { + rlim.Cur = uint64(rl.Cur) + } + + if rl.Max == rlimInf32 { + rlim.Max = rlimInf64 + } else { + rlim.Max = uint64(rl.Max) + } + return +} + +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + if rlim.Cur == rlimInf64 { + rl.Cur = rlimInf32 + } else if rlim.Cur < uint64(rlimInf32) { + rl.Cur = uint32(rlim.Cur) + } else { + return EINVAL + } + if rlim.Max == rlimInf64 { + rl.Max = rlimInf32 + } else if rlim.Max < uint64(rlimInf32) { + rl.Max = uint32(rlim.Max) + } else { + return EINVAL + } + + return setrlimit(resource, &rl) +} + +func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm64.go new file mode 100644 index 000000000..f3d72dfd3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -0,0 +1,150 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,linux + +package unix + +const _SYS_dup = SYS_DUP3 + +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6 +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +func Getpagesize() int { return 65536 } + +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Sec = nsec / 1e9 + tv.Usec = nsec % 1e9 / 1e3 + return +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func (r *PtraceRegs) PC() uint64 { return r.Pc } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func InotifyInit() (fd int, err error) { + return InotifyInit1(0) +} + +// TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove +// these when the deprecated syscalls that the syscall package relies on +// are removed. +const ( + SYS_GETPGRP = 1060 + SYS_UTIMES = 1037 + SYS_FUTIMESAT = 1066 + SYS_PAUSE = 1061 + SYS_USTAT = 1070 + SYS_UTIME = 1063 + SYS_LCHOWN = 1032 + SYS_TIME = 1062 + SYS_EPOLL_CREATE = 1042 + SYS_EPOLL_WAIT = 1069 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_ppc64x.go new file mode 100644 index 000000000..67eed6334 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -0,0 +1,96 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package unix + +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT +//sysnb Getuid() (uid int) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2 +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +func Getpagesize() int { return 65536 } + +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Sec = nsec / 1e9 + tv.Usec = nsec % 1e9 / 1e3 + return +} + +func (r *PtraceRegs) PC() uint64 { return r.Nip } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Nip = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd.go new file mode 100644 index 000000000..c4e945cd6 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd.go @@ -0,0 +1,492 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// NetBSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import ( + "syscall" + "unsafe" +) + +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + raw RawSockaddrDatalink +} + +func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, CTL_QUERY) + qnode := Sysctlnode{Flags: SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err = sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes = make([]Sysctlnode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err = sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) (mib []_C_int, err error) { + + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, _C_int(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, EINVAL + } + } + + return mib, nil +} + +// ParseDirent parses up to max directory entries in buf, +// appending the names to names. It returns the number +// bytes consumed from buf, the number of entries added +// to names, and the new names slice. +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + origlen := len(buf) + for max != 0 && len(buf) > 0 { + dirent := (*Dirent)(unsafe.Pointer(&buf[0])) + if dirent.Reclen == 0 { + buf = nil + break + } + buf = buf[dirent.Reclen:] + if dirent.Fileno == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:dirent.Namlen]) + if name == "." || name == ".." { // Useless names + continue + } + max-- + count++ + names = append(names, name) + } + return origlen - len(buf), count, names +} + +//sysnb pipe() (fd1 int, fd2 int, err error) +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +//sys getdents(fd int, buf []byte) (n int, err error) +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return getdents(fd, buf) +} + +// TODO +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + return -1, ENOSYS +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE + +/* + * Unimplemented + */ +// ____semctl13 +// __clone +// __fhopen40 +// __fhstat40 +// __fhstatvfs140 +// __fstat30 +// __getcwd +// __getfh30 +// __getlogin +// __lstat30 +// __mount50 +// __msgctl13 +// __msync13 +// __ntp_gettime30 +// __posix_chown +// __posix_fadvise50 +// __posix_fchown +// __posix_lchown +// __posix_rename +// __setlogin +// __shmctl13 +// __sigaction_sigtramp +// __sigaltstack14 +// __sigpending14 +// __sigprocmask14 +// __sigsuspend14 +// __sigtimedwait +// __stat30 +// __syscall +// __vfork14 +// _ksem_close +// _ksem_destroy +// _ksem_getvalue +// _ksem_init +// _ksem_open +// _ksem_post +// _ksem_trywait +// _ksem_unlink +// _ksem_wait +// _lwp_continue +// _lwp_create +// _lwp_ctl +// _lwp_detach +// _lwp_exit +// _lwp_getname +// _lwp_getprivate +// _lwp_kill +// _lwp_park +// _lwp_self +// _lwp_setname +// _lwp_setprivate +// _lwp_suspend +// _lwp_unpark +// _lwp_unpark_all +// _lwp_wait +// _lwp_wakeup +// _pset_bind +// _sched_getaffinity +// _sched_getparam +// _sched_setaffinity +// _sched_setparam +// acct +// aio_cancel +// aio_error +// aio_fsync +// aio_read +// aio_return +// aio_suspend +// aio_write +// break +// clock_getres +// clock_gettime +// clock_settime +// compat_09_ogetdomainname +// compat_09_osetdomainname +// compat_09_ouname +// compat_10_omsgsys +// compat_10_osemsys +// compat_10_oshmsys +// compat_12_fstat12 +// compat_12_getdirentries +// compat_12_lstat12 +// compat_12_msync +// compat_12_oreboot +// compat_12_oswapon +// compat_12_stat12 +// compat_13_sigaction13 +// compat_13_sigaltstack13 +// compat_13_sigpending13 +// compat_13_sigprocmask13 +// compat_13_sigreturn13 +// compat_13_sigsuspend13 +// compat_14___semctl +// compat_14_msgctl +// compat_14_shmctl +// compat_16___sigaction14 +// compat_16___sigreturn14 +// compat_20_fhstatfs +// compat_20_fstatfs +// compat_20_getfsstat +// compat_20_statfs +// compat_30___fhstat30 +// compat_30___fstat13 +// compat_30___lstat13 +// compat_30___stat13 +// compat_30_fhopen +// compat_30_fhstat +// compat_30_fhstatvfs1 +// compat_30_getdents +// compat_30_getfh +// compat_30_ntp_gettime +// compat_30_socket +// compat_40_mount +// compat_43_fstat43 +// compat_43_lstat43 +// compat_43_oaccept +// compat_43_ocreat +// compat_43_oftruncate +// compat_43_ogetdirentries +// compat_43_ogetdtablesize +// compat_43_ogethostid +// compat_43_ogethostname +// compat_43_ogetkerninfo +// compat_43_ogetpagesize +// compat_43_ogetpeername +// compat_43_ogetrlimit +// compat_43_ogetsockname +// compat_43_okillpg +// compat_43_olseek +// compat_43_ommap +// compat_43_oquota +// compat_43_orecv +// compat_43_orecvfrom +// compat_43_orecvmsg +// compat_43_osend +// compat_43_osendmsg +// compat_43_osethostid +// compat_43_osethostname +// compat_43_osetrlimit +// compat_43_osigblock +// compat_43_osigsetmask +// compat_43_osigstack +// compat_43_osigvec +// compat_43_otruncate +// compat_43_owait +// compat_43_stat43 +// execve +// extattr_delete_fd +// extattr_delete_file +// extattr_delete_link +// extattr_get_fd +// extattr_get_file +// extattr_get_link +// extattr_list_fd +// extattr_list_file +// extattr_list_link +// extattr_set_fd +// extattr_set_file +// extattr_set_link +// extattrctl +// fchroot +// fdatasync +// fgetxattr +// fktrace +// flistxattr +// fork +// fremovexattr +// fsetxattr +// fstatvfs1 +// fsync_range +// getcontext +// getitimer +// getvfsstat +// getxattr +// ioctl +// ktrace +// lchflags +// lchmod +// lfs_bmapv +// lfs_markv +// lfs_segclean +// lfs_segwait +// lgetxattr +// lio_listio +// listxattr +// llistxattr +// lremovexattr +// lseek +// lsetxattr +// lutimes +// madvise +// mincore +// minherit +// modctl +// mq_close +// mq_getattr +// mq_notify +// mq_open +// mq_receive +// mq_send +// mq_setattr +// mq_timedreceive +// mq_timedsend +// mq_unlink +// mremap +// msgget +// msgrcv +// msgsnd +// nfssvc +// ntp_adjtime +// pmc_control +// pmc_get_info +// poll +// pollts +// preadv +// profil +// pselect +// pset_assign +// pset_create +// pset_destroy +// ptrace +// pwritev +// quotactl +// rasctl +// readv +// reboot +// removexattr +// sa_enable +// sa_preempt +// sa_register +// sa_setconcurrency +// sa_stacks +// sa_yield +// sbrk +// sched_yield +// semconfig +// semget +// semop +// setcontext +// setitimer +// setxattr +// shmat +// shmdt +// shmget +// sstk +// statvfs1 +// swapctl +// sysarch +// syscall +// timer_create +// timer_delete +// timer_getoverrun +// timer_gettime +// timer_settime +// undelete +// utrace +// uuidgen +// vadvise +// vfork +// writev diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_386.go new file mode 100644 index 000000000..1b0e1af12 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -0,0 +1,44 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,netbsd + +package unix + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int64(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int64(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = uint32(mode) + k.Flags = uint32(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_amd64.go new file mode 100644 index 000000000..1b6dcbe35 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -0,0 +1,44 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,netbsd + +package unix + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int64(nsec / 1e9) + ts.Nsec = int64(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int64(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = uint32(mode) + k.Flags = uint32(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_arm.go new file mode 100644 index 000000000..87d1d6fed --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -0,0 +1,44 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,netbsd + +package unix + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int64(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int64(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = uint32(mode) + k.Flags = uint32(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_no_getwd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_no_getwd.go new file mode 100644 index 000000000..530792ea9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_no_getwd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd openbsd + +package unix + +const ImplementsGetwd = false + +func Getwd() (string, error) { return "", ENOTSUP } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd.go new file mode 100644 index 000000000..246131d2a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd.go @@ -0,0 +1,303 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenBSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import ( + "syscall" + "unsafe" +) + +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 + raw RawSockaddrDatalink +} + +func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func nametomib(name string) (mib []_C_int, err error) { + + // Perform lookup via a binary search + left := 0 + right := len(sysctlMib) - 1 + for { + idx := left + (right-left)/2 + switch { + case name == sysctlMib[idx].ctlname: + return sysctlMib[idx].ctloid, nil + case name > sysctlMib[idx].ctlname: + left = idx + 1 + default: + right = idx - 1 + } + if left > right { + break + } + } + return nil, EINVAL +} + +// ParseDirent parses up to max directory entries in buf, +// appending the names to names. It returns the number +// bytes consumed from buf, the number of entries added +// to names, and the new names slice. +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + origlen := len(buf) + for max != 0 && len(buf) > 0 { + dirent := (*Dirent)(unsafe.Pointer(&buf[0])) + if dirent.Reclen == 0 { + buf = nil + break + } + buf = buf[dirent.Reclen:] + if dirent.Fileno == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:dirent.Namlen]) + if name == "." || name == ".." { // Useless names + continue + } + max-- + count++ + names = append(names, name) + } + return origlen - len(buf), count, names +} + +//sysnb pipe(p *[2]_C_int) (err error) +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys getdents(fd int, buf []byte) (n int, err error) +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return getdents(fd, buf) +} + +// TODO +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + return -1, ENOSYS +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE + +/* + * Unimplemented + */ +// __getcwd +// __semctl +// __syscall +// __sysctl +// adjfreq +// break +// clock_getres +// clock_gettime +// clock_settime +// closefrom +// execve +// faccessat +// fchmodat +// fchownat +// fcntl +// fhopen +// fhstat +// fhstatfs +// fork +// fstatat +// futimens +// getfh +// getgid +// getitimer +// getlogin +// getresgid +// getresuid +// getrtable +// getthrid +// ioctl +// ktrace +// lfs_bmapv +// lfs_markv +// lfs_segclean +// lfs_segwait +// linkat +// mincore +// minherit +// mkdirat +// mkfifoat +// mknodat +// mount +// mquery +// msgctl +// msgget +// msgrcv +// msgsnd +// nfssvc +// nnpfspioctl +// openat +// poll +// preadv +// profil +// pwritev +// quotactl +// readlinkat +// readv +// reboot +// renameat +// rfork +// sched_yield +// semget +// semop +// setgroups +// setitimer +// setrtable +// setsockopt +// shmat +// shmctl +// shmdt +// shmget +// sigaction +// sigaltstack +// sigpending +// sigprocmask +// sigreturn +// sigsuspend +// symlinkat +// sysarch +// syscall +// threxit +// thrsigdivert +// thrsleep +// thrwakeup +// unlinkat +// utimensat +// vfork +// writev diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_386.go new file mode 100644 index 000000000..9529b20e8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -0,0 +1,44 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,openbsd + +package unix + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int64(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int64(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_amd64.go new file mode 100644 index 000000000..fc6402946 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -0,0 +1,44 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,openbsd + +package unix + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = nsec % 1e9 / 1e3 + tv.Sec = nsec / 1e9 + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris.go new file mode 100644 index 000000000..008373264 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris.go @@ -0,0 +1,713 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Solaris system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_solaris.go or syscall_unix.go. + +package unix + +import ( + "sync/atomic" + "syscall" + "unsafe" +) + +// Implemented in runtime/syscall_solaris.go. +type syscallFunc uintptr + +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +type SockaddrDatalink struct { + Family uint16 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [244]int8 + raw RawSockaddrDatalink +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// ParseDirent parses up to max directory entries in buf, +// appending the names to names. It returns the number +// bytes consumed from buf, the number of entries added +// to names, and the new names slice. +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + origlen := len(buf) + for max != 0 && len(buf) > 0 { + dirent := (*Dirent)(unsafe.Pointer(&buf[0])) + if dirent.Reclen == 0 { + buf = nil + break + } + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + max-- + count++ + names = append(names, name) + } + return origlen - len(buf), count, names +} + +func pipe() (r uintptr, w uintptr, err uintptr) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + r0, w0, e1 := pipe() + if e1 != 0 { + err = syscall.Errno(e1) + } + p[0], p[1] = int(r0), int(w0) + return +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) { + return nil, 0, EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := _Socklen(2) + if n > 0 { + sl += _Socklen(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(&rsa) +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) + +func Getwd() (wd string, err error) { + var buf [PathMax]byte + // Getcwd will return an error if it failed for any reason. + _, err = Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + +/* + * Wrapped + */ + +//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error) +//sysnb setgroups(ngid int, gid *_Gid_t) (err error) + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + // Check for error and sanity check group count. Newer versions of + // Solaris allow up to 1024 (NGROUPS_MAX). + if n < 0 || n > 1024 { + if err != nil { + return nil, err + } + return nil, EINVAL + } else if n == 0 { + return nil, nil + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if n == -1 { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + // Final argument is (basep *uintptr) and the syscall doesn't take nil. + // TODO(rsc): Can we use a single global basep for all calls? + return Getdents(fd, buf, new(uintptr)) +} + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. + +type WaitStatus uint32 + +const ( + mask = 0x7F + core = 0x80 + shift = 8 + + exited = 0 + stopped = 0x7F +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) ExitStatus() int { + if w&mask != exited { + return -1 + } + return int(w >> shift) +} + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 } + +func (w WaitStatus) Signal() syscall.Signal { + sig := syscall.Signal(w & mask) + if sig == stopped || sig == 0 { + return -1 + } + return sig +} + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP } + +func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP } + +func (w WaitStatus) StopSignal() syscall.Signal { + if !w.Stopped() { + return -1 + } + return syscall.Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { return -1 } + +func wait4(pid uintptr, wstatus *WaitStatus, options uintptr, rusage *Rusage) (wpid uintptr, err uintptr) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + r0, e1 := wait4(uintptr(pid), wstatus, uintptr(options), rusage) + if e1 != 0 { + err = syscall.Errno(e1) + } + return int(r0), err +} + +func gethostname() (name string, err uintptr) + +func Gethostname() (name string, err error) { + name, e1 := gethostname() + if e1 != 0 { + err = syscall.Errno(e1) + } + return name, err +} + +//sys utimes(path string, times *[2]Timeval) (err error) + +func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) + +func UtimesNano(path string, ts []Timespec) error { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) + if e1 != 0 { + return e1 + } + return nil +} + +//sys futimesat(fildes int, path *byte, times *[2]Timeval) (err error) + +func Futimesat(dirfd int, path string, tv []Timeval) error { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + if tv == nil { + return futimesat(dirfd, pathp, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +// Solaris doesn't have an futimes function because it allows NULL to be +// specified as the path for futimesat. However, Go doesn't like +// NULL-style string interfaces, so this simple wrapper is provided. +func Futimes(fd int, tv []Timeval) error { + if tv == nil { + return futimesat(fd, nil, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + // Assume path ends at NUL. + // This is not technically the Solaris semantics for + // abstract Unix domain sockets -- they are supposed + // to be uninterpreted fixed-size binary blobs -- but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = libsocket.accept + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if nfd == -1 { + return + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var iov Iovec + if len(p) > 0 { + iov.Base = (*int8)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy int8 + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); n == -1 { + return + } + oobn = int(msg.Accrightslen) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.sendmsg + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*int8)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy int8 + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +//sys acct(path *byte) (err error) + +func Acct(path string) (err error) { + if len(path) == 0 { + // Assume caller wants to disable accounting. + return acct(nil) + } + + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + return acct(pathp) +} + +/* + * Expose the ioctl function + */ + +//sys ioctl(fd int, req int, arg uintptr) (err error) + +func IoctlSetInt(fd int, req int, value int) (err error) { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req int, value *Winsize) (err error) { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req int, value *Termios) (err error) { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermio(fd int, req int, value *Termio) (err error) { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermio(fd int, req int) (*Termio, error) { + var value Termio + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Creat(path string, mode uint32) (fd int, err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys Fdatasync(fd int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) +//sysnb Getgid() (gid int) +//sysnb Getpid() (pid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgid int, err error) +//sys Geteuid() (euid int) +//sys Getegid() (egid int) +//sys Getppid() (ppid int) +//sys Getpriority(which int, who int) (n int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) = libsocket.listen +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Madvise(b []byte, advice int) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mkfifoat(dirfd int, path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Sethostname(p []byte) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Setuid(uid int) (err error) +//sys Shutdown(s int, how int) (err error) = libsocket.shutdown +//sys Stat(path string, stat *Stat_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sysnb Times(tms *Tms) (ticks uintptr, err error) +//sys Truncate(path string, length int64) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Umask(mask int) (oldmask int) +//sysnb Uname(buf *Utsname) (err error) +//sys Unmount(target string, flags int) (err error) = libc.umount +//sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys Utime(path string, buf *Utimbuf) (err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.bind +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.connect +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.sendto +//sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.socket +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.socketpair +//sys write(fd int, p []byte) (n int, err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.getsockopt +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys sysconf(name int) (n int64, err error) + +// pageSize caches the value of Getpagesize, since it can't change +// once the system is booted. +var pageSize int64 // accessed atomically + +func Getpagesize() int { + n := atomic.LoadInt64(&pageSize) + if n == 0 { + n, _ = sysconf(_SC_PAGESIZE) + atomic.StoreInt64(&pageSize, n) + } + return int(n) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris_amd64.go new file mode 100644 index 000000000..2e44630cd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,solaris + +package unix + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = nsec % 1e9 / 1e3 + tv.Sec = int64(nsec / 1e9) + return +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO(aram): implement this, see issue 5847. + panic("unimplemented") +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_test.go new file mode 100644 index 000000000..b1b2c23a0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_test.go @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "fmt" + "testing" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" +) + +func testSetGetenv(t *testing.T, key, value string) { + err := unix.Setenv(key, value) + if err != nil { + t.Fatalf("Setenv failed to set %q: %v", value, err) + } + newvalue, found := unix.Getenv(key) + if !found { + t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) + } + if newvalue != value { + t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) + } +} + +func TestEnv(t *testing.T) { + testSetGetenv(t, "TESTENV", "AVALUE") + // make sure TESTENV gets set to "", not deleted + testSetGetenv(t, "TESTENV", "") +} + +func TestItoa(t *testing.T) { + // Make most negative integer: 0x8000... + i := 1 + for i<<1 != 0 { + i <<= 1 + } + if i >= 0 { + t.Fatal("bad math") + } + s := unix.Itoa(i) + f := fmt.Sprint(i) + if s != f { + t.Fatalf("itoa(%d) = %s, want %s", i, s, f) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix.go new file mode 100644 index 000000000..b46b25028 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix.go @@ -0,0 +1,297 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +import ( + "runtime" + "sync" + "syscall" + "unsafe" +) + +var ( + Stdin = 0 + Stdout = 1 + Stderr = 2 +) + +const ( + darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8 + dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8 + netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4 +) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + } + return e +} + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) +func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) +func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) +func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +// Mmap manager, for use by operating system-specific implementations. + +type mmapper struct { + sync.Mutex + active map[*byte][]byte // active mappings; key is last byte in mapping + mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error) + munmap func(addr uintptr, length uintptr) error +} + +func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + if length <= 0 { + return nil, EINVAL + } + + // Map the requested memory. + addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) + if errno != nil { + return nil, errno + } + + // Slice memory layout + var sl = struct { + addr uintptr + len int + cap int + }{addr, length, length} + + // Use unsafe to turn sl into a []byte. + b := *(*[]byte)(unsafe.Pointer(&sl)) + + // Register mapping in m and return it. + p := &b[cap(b)-1] + m.Lock() + defer m.Unlock() + m.active[p] = b + return b, nil +} + +func (m *mmapper) Munmap(data []byte) (err error) { + if len(data) == 0 || len(data) != cap(data) { + return EINVAL + } + + // Find the base of the mapping. + p := &data[cap(data)-1] + m.Lock() + defer m.Unlock() + b := m.active[p] + if b == nil || &b[0] != &data[0] { + return EINVAL + } + + // Unmap the memory and update m. + if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil { + return errno + } + delete(m.active, p) + return nil +} + +func Read(fd int, p []byte) (n int, err error) { + n, err = read(fd, p) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Write(fd int, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = write(fd, p) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs +} + +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func Bind(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getpeername(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getpeername(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(&rsa) +} + +func GetsockoptInt(fd, level, opt int) (value int, err error) { + var n int32 + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return int(n), err +} + +func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil { + return + } + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { + ptr, n, err := to.sockaddr() + if err != nil { + return err + } + return sendto(fd, p, flags, ptr, n) +} + +func SetsockoptByte(fd, level, opt int, value byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1) +} + +func SetsockoptInt(fd, level, opt int, value int) (err error) { + var n = int32(value) + return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4) +} + +func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4) +} + +func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq) +} + +func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq) +} + +func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error { + return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter) +} + +func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger) +} + +func SetsockoptString(fd, level, opt int, s string) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&[]byte(s)[0]), uintptr(len(s))) +} + +func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv)) +} + +func Socket(domain, typ, proto int) (fd int, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return -1, EAFNOSUPPORT + } + fd, err = socket(domain, typ, proto) + return +} + +func Socketpair(domain, typ, proto int) (fd [2]int, err error) { + var fdx [2]int32 + err = socketpair(domain, typ, proto, &fdx) + if err == nil { + fd[0] = int(fdx[0]) + fd[1] = int(fdx[1]) + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +var ioSync int64 + +func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) } + +func SetNonblock(fd int, nonblocking bool) (err error) { + flag, err := fcntl(fd, F_GETFL, 0) + if err != nil { + return err + } + if nonblocking { + flag |= O_NONBLOCK + } else { + flag &= ^O_NONBLOCK + } + _, err = fcntl(fd, F_SETFL, flag) + return err +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix_test.go new file mode 100644 index 000000000..ddad90e5f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix_test.go @@ -0,0 +1,318 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "flag" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" +) + +// Tests that below functions, structures and constants are consistent +// on all Unix-like systems. +func _() { + // program scheduling priority functions and constants + var ( + _ func(int, int, int) error = unix.Setpriority + _ func(int, int) (int, error) = unix.Getpriority + ) + const ( + _ int = unix.PRIO_USER + _ int = unix.PRIO_PROCESS + _ int = unix.PRIO_PGRP + ) + + // termios constants + const ( + _ int = unix.TCIFLUSH + _ int = unix.TCIOFLUSH + _ int = unix.TCOFLUSH + ) + + // fcntl file locking structure and constants + var ( + _ = unix.Flock_t{ + Type: int16(0), + Whence: int16(0), + Start: int64(0), + Len: int64(0), + Pid: int32(0), + } + ) + const ( + _ = unix.F_GETLK + _ = unix.F_SETLK + _ = unix.F_SETLKW + ) +} + +// TestFcntlFlock tests whether the file locking structure matches +// the calling convention of each kernel. +func TestFcntlFlock(t *testing.T) { + name := filepath.Join(os.TempDir(), "TestFcntlFlock") + fd, err := unix.Open(name, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0) + if err != nil { + t.Fatalf("Open failed: %v", err) + } + defer unix.Unlink(name) + defer unix.Close(fd) + flock := unix.Flock_t{ + Type: unix.F_RDLCK, + Start: 0, Len: 0, Whence: 1, + } + if err := unix.FcntlFlock(uintptr(fd), unix.F_GETLK, &flock); err != nil { + t.Fatalf("FcntlFlock failed: %v", err) + } +} + +// TestPassFD tests passing a file descriptor over a Unix socket. +// +// This test involved both a parent and child process. The parent +// process is invoked as a normal test, with "go test", which then +// runs the child process by running the current test binary with args +// "-test.run=^TestPassFD$" and an environment variable used to signal +// that the test should become the child process instead. +func TestPassFD(t *testing.T) { + switch runtime.GOOS { + case "dragonfly": + // TODO(jsing): Figure out why sendmsg is returning EINVAL. + t.Skip("skipping test on dragonfly") + case "solaris": + // TODO(aram): Figure out why ReadMsgUnix is returning empty message. + t.Skip("skipping test on solaris, see issue 7402") + } + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + passFDChild() + return + } + + tempDir, err := ioutil.TempDir("", "TestPassFD") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) + } + defer unix.Close(fds[0]) + defer unix.Close(fds[1]) + writeFile := os.NewFile(uintptr(fds[0]), "child-writes") + readFile := os.NewFile(uintptr(fds[1]), "parent-reads") + defer writeFile.Close() + defer readFile.Close() + + cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + if lp := os.Getenv("LD_LIBRARY_PATH"); lp != "" { + cmd.Env = append(cmd.Env, "LD_LIBRARY_PATH="+lp) + } + cmd.ExtraFiles = []*os.File{writeFile} + + out, err := cmd.CombinedOutput() + if len(out) > 0 || err != nil { + t.Fatalf("child process: %q, %v", out, err) + } + + c, err := net.FileConn(readFile) + if err != nil { + t.Fatalf("FileConn: %v", err) + } + defer c.Close() + + uc, ok := c.(*net.UnixConn) + if !ok { + t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c) + } + + buf := make([]byte, 32) // expect 1 byte + oob := make([]byte, 32) // expect 24 bytes + closeUnix := time.AfterFunc(5*time.Second, func() { + t.Logf("timeout reading from unix socket") + uc.Close() + }) + _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) + closeUnix.Stop() + + scms, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms) + } + scm := scms[0] + gotFds, err := unix.ParseUnixRights(&scm) + if err != nil { + t.Fatalf("unix.ParseUnixRights: %v", err) + } + if len(gotFds) != 1 { + t.Fatalf("wanted 1 fd; got %#v", gotFds) + } + + f := os.NewFile(uintptr(gotFds[0]), "fd-from-child") + defer f.Close() + + got, err := ioutil.ReadAll(f) + want := "Hello from child process!\n" + if string(got) != want { + t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want) + } +} + +// passFDChild is the child process used by TestPassFD. +func passFDChild() { + defer os.Exit(0) + + // Look for our fd. It should be fd 3, but we work around an fd leak + // bug here (http://golang.org/issue/2603) to let it be elsewhere. + var uc *net.UnixConn + for fd := uintptr(3); fd <= 10; fd++ { + f := os.NewFile(fd, "unix-conn") + var ok bool + netc, _ := net.FileConn(f) + uc, ok = netc.(*net.UnixConn) + if ok { + break + } + } + if uc == nil { + fmt.Println("failed to find unix fd") + return + } + + // Make a file f to send to our parent process on uc. + // We make it in tempDir, which our parent will clean up. + flag.Parse() + tempDir := flag.Arg(0) + f, err := ioutil.TempFile(tempDir, "") + if err != nil { + fmt.Printf("TempFile: %v", err) + return + } + + f.Write([]byte("Hello from child process!\n")) + f.Seek(0, 0) + + rights := unix.UnixRights(int(f.Fd())) + dummyByte := []byte("x") + n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil) + if err != nil { + fmt.Printf("WriteMsgUnix: %v", err) + return + } + if n != 1 || oobn != len(rights) { + fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights)) + return + } +} + +// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage, +// and ParseUnixRights are able to successfully round-trip lists of file descriptors. +func TestUnixRightsRoundtrip(t *testing.T) { + testCases := [...][][]int{ + {{42}}, + {{1, 2}}, + {{3, 4, 5}}, + {{}}, + {{1, 2}, {3, 4, 5}, {}, {7}}, + } + for _, testCase := range testCases { + b := []byte{} + var n int + for _, fds := range testCase { + // Last assignment to n wins + n = len(b) + unix.CmsgLen(4*len(fds)) + b = append(b, unix.UnixRights(fds...)...) + } + // Truncate b + b = b[:n] + + scms, err := unix.ParseSocketControlMessage(b) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + if len(scms) != len(testCase) { + t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms) + } + for i, scm := range scms { + gotFds, err := unix.ParseUnixRights(&scm) + if err != nil { + t.Fatalf("ParseUnixRights: %v", err) + } + wantFds := testCase[i] + if len(gotFds) != len(wantFds) { + t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds) + } + for j, fd := range gotFds { + if fd != wantFds[j] { + t.Fatalf("expected fd %v, got %v", wantFds[j], fd) + } + } + } + } +} + +func TestRlimit(t *testing.T) { + var rlimit, zero unix.Rlimit + err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit) + if err != nil { + t.Fatalf("Getrlimit: save failed: %v", err) + } + if zero == rlimit { + t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit) + } + set := rlimit + set.Cur = set.Max - 1 + err = unix.Setrlimit(unix.RLIMIT_NOFILE, &set) + if err != nil { + t.Fatalf("Setrlimit: set failed: %#v %v", set, err) + } + var get unix.Rlimit + err = unix.Getrlimit(unix.RLIMIT_NOFILE, &get) + if err != nil { + t.Fatalf("Getrlimit: get failed: %v", err) + } + set = rlimit + set.Cur = set.Max - 1 + if set != get { + // Seems like Darwin requires some privilege to + // increase the soft limit of rlimit sandbox, though + // Setrlimit never reports an error. + switch runtime.GOOS { + case "darwin": + default: + t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get) + } + } + err = unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit) + if err != nil { + t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err) + } +} + +func TestSeekFailure(t *testing.T) { + _, err := unix.Seek(-1, 0, 0) + if err == nil { + t.Fatalf("Seek(-1, 0, 0) did not fail") + } + str := err.Error() // used to crash on Linux + t.Logf("Seek: %v", str) + if str == "" { + t.Fatalf("Seek(-1, 0, 0) return error with empty message") + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_darwin.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_darwin.go new file mode 100644 index 000000000..115326182 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_darwin.go @@ -0,0 +1,250 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define __DARWIN_UNIX03 0 +#define KERNEL +#define _DARWIN_USE_64_BIT_INODE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat64 + +type Statfs_t C.struct_statfs64 + +type Flock_t C.struct_flock + +type Fstore_t C.struct_fstore + +type Radvisory_t C.struct_radvisory + +type Fbootstraptransfer_t C.struct_fbootstraptransfer + +type Log2phys_t C.struct_log2phys + +type Fsid C.struct_fsid + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet4Pktinfo C.struct_in_pktinfo + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfmaMsghdr2 C.struct_ifma_msghdr2 + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_dragonfly.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_dragonfly.go new file mode 100644 index 000000000..f3c971dff --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_dragonfly.go @@ -0,0 +1,242 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.struct_fsid + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_freebsd.go new file mode 100644 index 000000000..ae24557ad --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_freebsd.go @@ -0,0 +1,353 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +// This structure is a duplicate of stat on FreeBSD 8-STABLE. +// See /usr/include/sys/stat.h. +struct stat8 { +#undef st_atimespec st_atim +#undef st_mtimespec st_mtim +#undef st_ctimespec st_ctim +#undef st_birthtimespec st_birthtim + __dev_t st_dev; + ino_t st_ino; + mode_t st_mode; + nlink_t st_nlink; + uid_t st_uid; + gid_t st_gid; + __dev_t st_rdev; +#if __BSD_VISIBLE + struct timespec st_atimespec; + struct timespec st_mtimespec; + struct timespec st_ctimespec; +#else + time_t st_atime; + long __st_atimensec; + time_t st_mtime; + long __st_mtimensec; + time_t st_ctime; + long __st_ctimensec; +#endif + off_t st_size; + blkcnt_t st_blocks; + blksize_t st_blksize; + fflags_t st_flags; + __uint32_t st_gen; + __int32_t st_lspare; +#if __BSD_VISIBLE + struct timespec st_birthtimespec; + unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); + unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); +#else + time_t st_birthtime; + long st_birthtimensec; + unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); + unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); +#endif +}; + +// This structure is a duplicate of if_data on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_data8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t ifi_epoch; + struct timeval ifi_lastchange; +}; + +// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_msghdr8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data8 ifm_data; +}; +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat8 + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.struct_fsid + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPMreqn C.struct_ip_mreqn + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPMreqn = C.sizeof_struct_ip_mreqn + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + sizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 + sizeofIfData = C.sizeof_struct_if_data + SizeofIfData = C.sizeof_struct_if_data8 + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type ifMsghdr C.struct_if_msghdr + +type IfMsghdr C.struct_if_msghdr8 + +type ifData C.struct_if_data + +type IfData C.struct_if_data8 + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr + SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfZbuf C.struct_bpf_zbuf + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfZbufHeader C.struct_bpf_zbuf_header + +// Terminal handling + +type Termios C.struct_termios diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_linux.go new file mode 100644 index 000000000..d5275875f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_linux.go @@ -0,0 +1,406 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define _LARGEFILE_SOURCE +#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef TCSETS2 +// On systems that have "struct termios2" use this as type Termios. +typedef struct termios2 termios_t; +#else +typedef struct termios termios_t; +#endif + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_ll s5; + struct sockaddr_nl s6; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +// copied from /usr/include/linux/un.h +struct my_sockaddr_un { + sa_family_t sun_family; +#if defined(__ARM_EABI__) || defined(__powerpc64__) + // on ARM char is by default unsigned + signed char sun_path[108]; +#else + char sun_path[108]; +#endif +}; + +#ifdef __ARM_EABI__ +typedef struct user_regs PtraceRegs; +#elif defined(__aarch64__) +typedef struct user_pt_regs PtraceRegs; +#elif defined(__powerpc64__) +typedef struct pt_regs PtraceRegs; +#else +typedef struct user_regs_struct PtraceRegs; +#endif + +// The real epoll_event is a union, and godefs doesn't handle it well. +struct my_epoll_event { + uint32_t events; +#ifdef __ARM_EABI__ + // padding is not specified in linux/eventpoll.h but added to conform to the + // alignment requirements of EABI + int32_t padFd; +#endif + int32_t fd; + int32_t pad; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timex C.struct_timex + +type Time_t C.time_t + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +type Flock_t C.struct_flock + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_my_sockaddr_un + +type RawSockaddrLinklayer C.struct_sockaddr_ll + +type RawSockaddrNetlink C.struct_sockaddr_nl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPMreqn C.struct_ip_mreqn + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet4Pktinfo C.struct_in_pktinfo + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +type Ucred C.struct_ucred + +type TCPInfo C.struct_tcp_info + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll + SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPMreqn = C.sizeof_struct_ip_mreqn + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + SizeofUcred = C.sizeof_struct_ucred + SizeofTCPInfo = C.sizeof_struct_tcp_info +) + +// Netlink routing and interface messages + +const ( + IFA_UNSPEC = C.IFA_UNSPEC + IFA_ADDRESS = C.IFA_ADDRESS + IFA_LOCAL = C.IFA_LOCAL + IFA_LABEL = C.IFA_LABEL + IFA_BROADCAST = C.IFA_BROADCAST + IFA_ANYCAST = C.IFA_ANYCAST + IFA_CACHEINFO = C.IFA_CACHEINFO + IFA_MULTICAST = C.IFA_MULTICAST + IFLA_UNSPEC = C.IFLA_UNSPEC + IFLA_ADDRESS = C.IFLA_ADDRESS + IFLA_BROADCAST = C.IFLA_BROADCAST + IFLA_IFNAME = C.IFLA_IFNAME + IFLA_MTU = C.IFLA_MTU + IFLA_LINK = C.IFLA_LINK + IFLA_QDISC = C.IFLA_QDISC + IFLA_STATS = C.IFLA_STATS + IFLA_COST = C.IFLA_COST + IFLA_PRIORITY = C.IFLA_PRIORITY + IFLA_MASTER = C.IFLA_MASTER + IFLA_WIRELESS = C.IFLA_WIRELESS + IFLA_PROTINFO = C.IFLA_PROTINFO + IFLA_TXQLEN = C.IFLA_TXQLEN + IFLA_MAP = C.IFLA_MAP + IFLA_WEIGHT = C.IFLA_WEIGHT + IFLA_OPERSTATE = C.IFLA_OPERSTATE + IFLA_LINKMODE = C.IFLA_LINKMODE + IFLA_LINKINFO = C.IFLA_LINKINFO + IFLA_NET_NS_PID = C.IFLA_NET_NS_PID + IFLA_IFALIAS = C.IFLA_IFALIAS + IFLA_MAX = C.IFLA_MAX + RT_SCOPE_UNIVERSE = C.RT_SCOPE_UNIVERSE + RT_SCOPE_SITE = C.RT_SCOPE_SITE + RT_SCOPE_LINK = C.RT_SCOPE_LINK + RT_SCOPE_HOST = C.RT_SCOPE_HOST + RT_SCOPE_NOWHERE = C.RT_SCOPE_NOWHERE + RT_TABLE_UNSPEC = C.RT_TABLE_UNSPEC + RT_TABLE_COMPAT = C.RT_TABLE_COMPAT + RT_TABLE_DEFAULT = C.RT_TABLE_DEFAULT + RT_TABLE_MAIN = C.RT_TABLE_MAIN + RT_TABLE_LOCAL = C.RT_TABLE_LOCAL + RT_TABLE_MAX = C.RT_TABLE_MAX + RTA_UNSPEC = C.RTA_UNSPEC + RTA_DST = C.RTA_DST + RTA_SRC = C.RTA_SRC + RTA_IIF = C.RTA_IIF + RTA_OIF = C.RTA_OIF + RTA_GATEWAY = C.RTA_GATEWAY + RTA_PRIORITY = C.RTA_PRIORITY + RTA_PREFSRC = C.RTA_PREFSRC + RTA_METRICS = C.RTA_METRICS + RTA_MULTIPATH = C.RTA_MULTIPATH + RTA_FLOW = C.RTA_FLOW + RTA_CACHEINFO = C.RTA_CACHEINFO + RTA_TABLE = C.RTA_TABLE + RTN_UNSPEC = C.RTN_UNSPEC + RTN_UNICAST = C.RTN_UNICAST + RTN_LOCAL = C.RTN_LOCAL + RTN_BROADCAST = C.RTN_BROADCAST + RTN_ANYCAST = C.RTN_ANYCAST + RTN_MULTICAST = C.RTN_MULTICAST + RTN_BLACKHOLE = C.RTN_BLACKHOLE + RTN_UNREACHABLE = C.RTN_UNREACHABLE + RTN_PROHIBIT = C.RTN_PROHIBIT + RTN_THROW = C.RTN_THROW + RTN_NAT = C.RTN_NAT + RTN_XRESOLVE = C.RTN_XRESOLVE + RTNLGRP_NONE = C.RTNLGRP_NONE + RTNLGRP_LINK = C.RTNLGRP_LINK + RTNLGRP_NOTIFY = C.RTNLGRP_NOTIFY + RTNLGRP_NEIGH = C.RTNLGRP_NEIGH + RTNLGRP_TC = C.RTNLGRP_TC + RTNLGRP_IPV4_IFADDR = C.RTNLGRP_IPV4_IFADDR + RTNLGRP_IPV4_MROUTE = C.RTNLGRP_IPV4_MROUTE + RTNLGRP_IPV4_ROUTE = C.RTNLGRP_IPV4_ROUTE + RTNLGRP_IPV4_RULE = C.RTNLGRP_IPV4_RULE + RTNLGRP_IPV6_IFADDR = C.RTNLGRP_IPV6_IFADDR + RTNLGRP_IPV6_MROUTE = C.RTNLGRP_IPV6_MROUTE + RTNLGRP_IPV6_ROUTE = C.RTNLGRP_IPV6_ROUTE + RTNLGRP_IPV6_IFINFO = C.RTNLGRP_IPV6_IFINFO + RTNLGRP_IPV6_PREFIX = C.RTNLGRP_IPV6_PREFIX + RTNLGRP_IPV6_RULE = C.RTNLGRP_IPV6_RULE + RTNLGRP_ND_USEROPT = C.RTNLGRP_ND_USEROPT + SizeofNlMsghdr = C.sizeof_struct_nlmsghdr + SizeofNlMsgerr = C.sizeof_struct_nlmsgerr + SizeofRtGenmsg = C.sizeof_struct_rtgenmsg + SizeofNlAttr = C.sizeof_struct_nlattr + SizeofRtAttr = C.sizeof_struct_rtattr + SizeofIfInfomsg = C.sizeof_struct_ifinfomsg + SizeofIfAddrmsg = C.sizeof_struct_ifaddrmsg + SizeofRtMsg = C.sizeof_struct_rtmsg + SizeofRtNexthop = C.sizeof_struct_rtnexthop +) + +type NlMsghdr C.struct_nlmsghdr + +type NlMsgerr C.struct_nlmsgerr + +type RtGenmsg C.struct_rtgenmsg + +type NlAttr C.struct_nlattr + +type RtAttr C.struct_rtattr + +type IfInfomsg C.struct_ifinfomsg + +type IfAddrmsg C.struct_ifaddrmsg + +type RtMsg C.struct_rtmsg + +type RtNexthop C.struct_rtnexthop + +// Linux socket filter + +const ( + SizeofSockFilter = C.sizeof_struct_sock_filter + SizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type SockFilter C.struct_sock_filter + +type SockFprog C.struct_sock_fprog + +// Inotify + +type InotifyEvent C.struct_inotify_event + +const SizeofInotifyEvent = C.sizeof_struct_inotify_event + +// Ptrace + +// Register structures +type PtraceRegs C.PtraceRegs + +// Misc + +type FdSet C.fd_set + +type Sysinfo_t C.struct_sysinfo + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +type EpollEvent C.struct_my_epoll_event + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// Terminal handling + +type Termios C.termios_t diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_netbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_netbsd.go new file mode 100644 index 000000000..d15f93d19 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_netbsd.go @@ -0,0 +1,232 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios + +// Sysctl + +type Sysctlnode C.struct_sysctlnode diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_openbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_openbsd.go new file mode 100644 index 000000000..b66fe25f7 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_openbsd.go @@ -0,0 +1,244 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_solaris.go new file mode 100644 index 000000000..6ad50eaba --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_solaris.go @@ -0,0 +1,260 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +// These defines ensure that builds done on newer versions of Solaris are +// backwards-compatible with older versions of Solaris and +// OpenSolaris-based derivatives. +#define __USE_SUNOS_SOCKETS__ // msghdr +#define __USE_LEGACY_PROTOTYPES__ // iovec +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Select + +type FdSet C.fd_set + +// Misc + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_EACCESS = C.AT_EACCESS +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfTimeval C.struct_bpf_timeval + +type BpfHdr C.struct_bpf_hdr + +// sysconf information + +const _SC_PAGESIZE = C._SC_PAGESIZE + +// Terminal handling + +type Termios C.struct_termios + +type Termio C.struct_termio + +type Winsize C.struct_winsize diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_386.go new file mode 100644 index 000000000..8e6388835 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -0,0 +1,1576 @@ +// mkerrors.sh -m32 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,darwin + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x8008427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADDFILESIGS = 0x3d + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NOFLSH = 0x80000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc01c697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc0086924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6981 + SIOCRSLVMULTI = 0xc008693b + SIOCSDRVSPEC = 0x801c697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_ENABLE_ECN = 0x104 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40087458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x20 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "resource busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "device power is off", + 83: "device error", + 84: "value too large to be stored in data type", + 85: "bad executable (or shared library)", + 86: "bad CPU type in executable", + 87: "shared library version mismatch", + 88: "malformed Mach-o file", + 89: "operation canceled", + 90: "identifier removed", + 91: "no message of desired type", + 92: "illegal byte sequence", + 93: "attribute not found", + 94: "bad message", + 95: "EMULTIHOP (Reserved)", + 96: "no message available on STREAM", + 97: "ENOLINK (Reserved)", + 98: "no STREAM resources", + 99: "not a STREAM", + 100: "protocol error", + 101: "STREAM ioctl timeout", + 102: "operation not supported on socket", + 103: "policy not found", + 104: "state not recoverable", + 105: "previous owner died", + 106: "interface output queue is full", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_amd64.go new file mode 100644 index 000000000..9594f9381 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -0,0 +1,1576 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,darwin + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADDFILESIGS = 0x3d + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NOFLSH = 0x80000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_ENABLE_ECN = 0x104 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "resource busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "device power is off", + 83: "device error", + 84: "value too large to be stored in data type", + 85: "bad executable (or shared library)", + 86: "bad CPU type in executable", + 87: "shared library version mismatch", + 88: "malformed Mach-o file", + 89: "operation canceled", + 90: "identifier removed", + 91: "no message of desired type", + 92: "illegal byte sequence", + 93: "attribute not found", + 94: "bad message", + 95: "EMULTIHOP (Reserved)", + 96: "no message available on STREAM", + 97: "ENOLINK (Reserved)", + 98: "no STREAM resources", + 99: "not a STREAM", + 100: "protocol error", + 101: "STREAM ioctl timeout", + 102: "operation not supported on socket", + 103: "policy not found", + 104: "state not recoverable", + 105: "previous owner died", + 106: "interface output queue is full", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm.go new file mode 100644 index 000000000..a410e88ed --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -0,0 +1,1293 @@ +// mkerrors.sh +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- _const.go + +// +build arm,darwin + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_NULL = 0x0 + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xc + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADDFILESIGS = 0x3d + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NOFLSH = 0x80000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_ENABLE_ECN = 0x104 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm64.go new file mode 100644 index 000000000..3189c6b34 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -0,0 +1,1576 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm64,darwin + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADDFILESIGS = 0x3d + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NOFLSH = 0x80000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_ENABLE_ECN = 0x104 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "resource busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "device power is off", + 83: "device error", + 84: "value too large to be stored in data type", + 85: "bad executable (or shared library)", + 86: "bad CPU type in executable", + 87: "shared library version mismatch", + 88: "malformed Mach-o file", + 89: "operation canceled", + 90: "identifier removed", + 91: "no message of desired type", + 92: "illegal byte sequence", + 93: "attribute not found", + 94: "bad message", + 95: "EMULTIHOP (Reserved)", + 96: "no message available on STREAM", + 97: "ENOLINK (Reserved)", + 98: "no STREAM resources", + 99: "not a STREAM", + 100: "protocol error", + 101: "STREAM ioctl timeout", + 102: "operation not supported on socket", + 103: "policy not found", + 104: "state not recoverable", + 105: "previous owner died", + 106: "interface output queue is full", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_386.go new file mode 100644 index 000000000..2a329f06e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_386.go @@ -0,0 +1,1530 @@ +// mkerrors.sh -m32 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,dragonfly + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x21 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x23 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x22 + AF_NATM = 0x1d + AF_NETGRAPH = 0x20 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DEFAULTBUFSIZE = 0x1000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MAX_CLONES = 0x80 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DBF = 0xf + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0x8 + EVFILT_MARKER = 0xf + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x8 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_NODATA = 0x1000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTEXIT_LWP = 0x10000 + EXTEXIT_PROC = 0x0 + EXTEXIT_SETINT = 0x1 + EXTEXIT_SIMPLE = 0x0 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x118e72 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NPOLLING = 0x100000 + IFF_OACTIVE = 0x400 + IFF_OACTIVE_COMPAT = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_POLLING = 0x10000 + IFF_POLLING_COMPAT = 0x10000 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xf3 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SKIP = 0x39 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UNKNOWN = 0x102 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PKTOPTIONS = 0x34 + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_RESETLOG = 0x37 + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CONTROL_END = 0xb + MADV_CONTROL_START = 0xa + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_INVAL = 0xa + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SETMAP = 0xb + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_NOCORE = 0x20000 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_SIZEALIGN = 0x40000 + MAP_STACK = 0x400 + MAP_TRYFIXED = 0x10000 + MAP_VPAGETABLE = 0x2000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FBLOCKING = 0x10000 + MSG_FMASK = 0xffff0000 + MSG_FNONBLOCKING = 0x20000 + MSG_NOSIGNAL = 0x400 + MSG_NOTIFICATION = 0x200 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_SYNC = 0x800 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x4 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x20000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x8000000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FAPPEND = 0x100000 + O_FASYNCWRITE = 0x800000 + O_FBLOCKING = 0x40000 + O_FBUFFERED = 0x2000000 + O_FMASK = 0x7fc0000 + O_FNONBLOCKING = 0x80000 + O_FOFFSET = 0x200000 + O_FSYNC = 0x80 + O_FSYNCWRITE = 0x400000 + O_FUNBUFFERED = 0x1000000 + O_MAPONREAD = 0x4000000 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0xb + RTAX_MPLS1 = 0x8 + RTAX_MPLS2 = 0x9 + RTAX_MPLS3 = 0xa + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_MPLS1 = 0x100 + RTA_MPLS2 = 0x200 + RTA_MPLS3 = 0x400 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPLSOPS = 0x1000000 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x6 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_IWCAPSEGS = 0x400 + RTV_IWMAXSEGS = 0x200 + RTV_MSL = 0x100 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDATA = 0xc0206926 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPOLLCPU = 0xc020697e + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFTSOLEN = 0xc0206980 + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFPOLLCPU = 0x8020697d + SIOCSIFTSOLEN = 0x8020697f + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDSPACE = 0x100a + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_FASTKEEP = 0x80 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x20 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0x100 + TCP_MIN_WINSHIFT = 0x5 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_SIGNATURE_ENABLE = 0x10 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40087458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCISPTMASTER = 0x20007455 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VCHECKPT = 0x13 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EASYNC = syscall.Errno(0x63) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x63) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEDIUM = syscall.Errno(0x5d) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUNUSED94 = syscall.Errno(0x5e) + EUNUSED95 = syscall.Errno(0x5f) + EUNUSED96 = syscall.Errno(0x60) + EUNUSED97 = syscall.Errno(0x61) + EUNUSED98 = syscall.Errno(0x62) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCKPT = syscall.Signal(0x21) + SIGCKPTEXIT = syscall.Signal(0x22) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "no medium found", + 94: "unknown error: 94", + 95: "unknown error: 95", + 96: "unknown error: 96", + 97: "unknown error: 97", + 98: "unknown error: 98", + 99: "unknown error: 99", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread Scheduler", + 33: "checkPoint", + 34: "checkPointExit", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go new file mode 100644 index 000000000..0feceee15 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -0,0 +1,1530 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,dragonfly + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x21 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x23 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x22 + AF_NATM = 0x1d + AF_NETGRAPH = 0x20 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8010427b + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DEFAULTBUFSIZE = 0x1000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MAX_CLONES = 0x80 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DBF = 0xf + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0x8 + EVFILT_MARKER = 0xf + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x8 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_NODATA = 0x1000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTEXIT_LWP = 0x10000 + EXTEXIT_PROC = 0x0 + EXTEXIT_SETINT = 0x1 + EXTEXIT_SIMPLE = 0x0 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x118e72 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NPOLLING = 0x100000 + IFF_OACTIVE = 0x400 + IFF_OACTIVE_COMPAT = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_POLLING = 0x10000 + IFF_POLLING_COMPAT = 0x10000 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xf3 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SKIP = 0x39 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UNKNOWN = 0x102 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PKTOPTIONS = 0x34 + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_RESETLOG = 0x37 + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CONTROL_END = 0xb + MADV_CONTROL_START = 0xa + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_INVAL = 0xa + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SETMAP = 0xb + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_NOCORE = 0x20000 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_SIZEALIGN = 0x40000 + MAP_STACK = 0x400 + MAP_TRYFIXED = 0x10000 + MAP_VPAGETABLE = 0x2000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FBLOCKING = 0x10000 + MSG_FMASK = 0xffff0000 + MSG_FNONBLOCKING = 0x20000 + MSG_NOSIGNAL = 0x400 + MSG_NOTIFICATION = 0x200 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_SYNC = 0x800 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x4 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x20000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x8000000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FAPPEND = 0x100000 + O_FASYNCWRITE = 0x800000 + O_FBLOCKING = 0x40000 + O_FBUFFERED = 0x2000000 + O_FMASK = 0x7fc0000 + O_FNONBLOCKING = 0x80000 + O_FOFFSET = 0x200000 + O_FSYNC = 0x80 + O_FSYNCWRITE = 0x400000 + O_FUNBUFFERED = 0x1000000 + O_MAPONREAD = 0x4000000 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0xb + RTAX_MPLS1 = 0x8 + RTAX_MPLS2 = 0x9 + RTAX_MPLS3 = 0xa + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_MPLS1 = 0x100 + RTA_MPLS2 = 0x200 + RTA_MPLS3 = 0x400 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPLSOPS = 0x1000000 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x6 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_IWCAPSEGS = 0x400 + RTV_IWMAXSEGS = 0x200 + RTV_MSL = 0x100 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8040720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8040720b + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc0206926 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPOLLCPU = 0xc020697e + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFTSOLEN = 0xc0206980 + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFPOLLCPU = 0x8020697d + SIOCSIFTSOLEN = 0x8020697f + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDSPACE = 0x100a + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_FASTKEEP = 0x80 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x20 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0x100 + TCP_MIN_WINSHIFT = 0x5 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_SIGNATURE_ENABLE = 0x10 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCISPTMASTER = 0x20007455 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VCHECKPT = 0x13 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EASYNC = syscall.Errno(0x63) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x63) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEDIUM = syscall.Errno(0x5d) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUNUSED94 = syscall.Errno(0x5e) + EUNUSED95 = syscall.Errno(0x5f) + EUNUSED96 = syscall.Errno(0x60) + EUNUSED97 = syscall.Errno(0x61) + EUNUSED98 = syscall.Errno(0x62) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCKPT = syscall.Signal(0x21) + SIGCKPTEXIT = syscall.Signal(0x22) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "no medium found", + 94: "unknown error: 94", + 95: "unknown error: 95", + 96: "unknown error: 96", + 97: "unknown error: 97", + 98: "unknown error: 98", + 99: "unknown error: 99", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread Scheduler", + 33: "checkPoint", + 34: "checkPointExit", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_386.go new file mode 100644 index 000000000..7b95751c3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -0,0 +1,1743 @@ +// mkerrors.sh -m32 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,freebsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4004427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x400c4280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x80084282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSETZBUF = 0x800c4281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf6 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xb + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_MAXID = 0x6 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_NORTREF = 0x2 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CONGESTION = 0x40 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "capabilities insufficient", + 94: "not permitted in capability mode", + 95: "state not recoverable", + 96: "previous owner died", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "unknown signal", + 33: "unknown signal", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_amd64.go new file mode 100644 index 000000000..e48e7799a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -0,0 +1,1748 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,freebsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf6 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xb + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_MAXID = 0x6 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_NORTREF = 0x2 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8040720a + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8040720b + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CONGESTION = 0x40 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "capabilities insufficient", + 94: "not permitted in capability mode", + 95: "state not recoverable", + 96: "previous owner died", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "unknown signal", + 33: "unknown signal", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_arm.go new file mode 100644 index 000000000..2afbe2d5e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -0,0 +1,1729 @@ +// mkerrors.sh +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm,freebsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4004427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x400c4280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x80084282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSETZBUF = 0x800c4281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf6 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xb + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_MAXID = 0x6 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_NORTREF = 0x2 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CONGESTION = 0x40 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "capabilities insufficient", + 94: "not permitted in capability mode", + 95: "state not recoverable", + 96: "previous owner died", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "unknown signal", + 33: "unknown signal", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_386.go new file mode 100644 index 000000000..d370be0ec --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_386.go @@ -0,0 +1,1817 @@ +// mkerrors.sh -m32 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + EPOLL_NONBLOCK = 0x800 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_NODAD = 0x2 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x7 + IFF_802_1Q_VLAN = 0x1 + IFF_ALLMULTI = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BONDING = 0x20 + IFF_BRIDGE_PORT = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DISABLE_NETPOLL = 0x1000 + IFF_DONT_BRIDGE = 0x800 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_EBRIDGE = 0x2 + IFF_ECHO = 0x40000 + IFF_ISATAP = 0x80 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MACVLAN_PORT = 0x2000 + IFF_MASTER = 0x400 + IFF_MASTER_8023AD = 0x8 + IFF_MASTER_ALB = 0x10 + IFF_MASTER_ARPMON = 0x100 + IFF_MULTICAST = 0x1000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_OVS_DATAPATH = 0x8000 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_SLAVE_INACTIVE = 0x4 + IFF_SLAVE_NEEDARP = 0x40 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_TX_SKB_SHARING = 0x10000 + IFF_UNICAST_FLT = 0x20000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFF_WAN_HDLC = 0x200 + IFF_XMIT_DST_RELEASE = 0x400 + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_CHECKSUM = 0x7 + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_UNICAST_HOPS = 0x10 + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BLOCK_SOURCE = 0x26 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DOFORK = 0xb + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x8000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_MASK = 0xff + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SEIZE = 0x4206 + PTRACE_SEIZE_DEVEL = 0x80000000 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x1 + RTAX_ADVMSS = 0x8 + RTAX_CWND = 0x7 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0xe + RTAX_MTU = 0x2 + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x10 + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELNEIGH = 0x1d + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x4f + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWLINK = 0x10 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x10 + RTM_NR_MSGTYPES = 0x40 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_F_DEAD = 0x1 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTN_MAX = 0xb + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPNS = 0x23 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCGARP = 0x8954 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ATM = 0x108 + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_PACKET = 0x107 + SOL_RAW = 0xff + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_FILTER = 0x1a + SO_BINDTODEVICE = 0x19 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_DEBUG = 0x1 + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_MARK = 0x24 + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEERCRED = 0x11 + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CONGESTION = 0xd + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_QUICKACK = 0xc + TCP_SYNCNT = 0x7 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETFEATURES = 0x800454cf + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETHDRSZ = 0x800454d7 + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETHDRSZ = 0x400454d8 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x6 + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x20 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale NFS file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "unknown error 133", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_amd64.go new file mode 100644 index 000000000..b83fb40b3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -0,0 +1,1818 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + EPOLL_NONBLOCK = 0x800 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_NODAD = 0x2 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x7 + IFF_802_1Q_VLAN = 0x1 + IFF_ALLMULTI = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BONDING = 0x20 + IFF_BRIDGE_PORT = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DISABLE_NETPOLL = 0x1000 + IFF_DONT_BRIDGE = 0x800 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_EBRIDGE = 0x2 + IFF_ECHO = 0x40000 + IFF_ISATAP = 0x80 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MACVLAN_PORT = 0x2000 + IFF_MASTER = 0x400 + IFF_MASTER_8023AD = 0x8 + IFF_MASTER_ALB = 0x10 + IFF_MASTER_ARPMON = 0x100 + IFF_MULTICAST = 0x1000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_OVS_DATAPATH = 0x8000 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_SLAVE_INACTIVE = 0x4 + IFF_SLAVE_NEEDARP = 0x40 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_TX_SKB_SHARING = 0x10000 + IFF_UNICAST_FLT = 0x20000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFF_WAN_HDLC = 0x200 + IFF_XMIT_DST_RELEASE = 0x400 + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_CHECKSUM = 0x7 + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_UNICAST_HOPS = 0x10 + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BLOCK_SOURCE = 0x26 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DOFORK = 0xb + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = -0x1 + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ARCH_PRCTL = 0x1e + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_MASK = 0xff + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SEIZE = 0x4206 + PTRACE_SEIZE_DEVEL = 0x80000000 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x1 + RTAX_ADVMSS = 0x8 + RTAX_CWND = 0x7 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0xe + RTAX_MTU = 0x2 + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x10 + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELNEIGH = 0x1d + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x4f + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWLINK = 0x10 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x10 + RTM_NR_MSGTYPES = 0x40 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_F_DEAD = 0x1 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTN_MAX = 0xb + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPNS = 0x23 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCGARP = 0x8954 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ATM = 0x108 + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_PACKET = 0x107 + SOL_RAW = 0xff + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_FILTER = 0x1a + SO_BINDTODEVICE = 0x19 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_DEBUG = 0x1 + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_MARK = 0x24 + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEERCRED = 0x11 + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CONGESTION = 0xd + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_QUICKACK = 0xc + TCP_SYNCNT = 0x7 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETFEATURES = 0x800454cf + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETHDRSZ = 0x800454d7 + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETHDRSZ = 0x400454d8 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x6 + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale NFS file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "unknown error 133", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm.go new file mode 100644 index 000000000..1cc76a78c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -0,0 +1,1742 @@ +// mkerrors.sh +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x27 + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_PHY = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ELF_NGREG = 0x12 + ELF_PRARGSZ = 0x50 + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EPOLLERR = 0x8 + EPOLLET = -0x80000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + EPOLL_NONBLOCK = 0x800 + ETH_P_1588 = 0x88f7 + ETH_P_8021Q = 0x8100 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_AARP = 0x80f3 + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_NODAD = 0x2 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x7 + IFF_ALLMULTI = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DYNAMIC = 0x8000 + IFF_LOOPBACK = 0x8 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_CHECKSUM = 0x7 + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_UNICAST_HOPS = 0x10 + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BLOCK_SOURCE = 0x26 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DOFORK = 0xb + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CONNECTOR = 0xb + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x1000 + O_LARGEFILE = 0x20000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x1000 + O_SYNC = 0x1000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_BROADCAST = 0x1 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FASTROUTE = 0x6 + PACKET_HOST = 0x0 + PACKET_LOOPBACK = 0x5 + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MULTICAST = 0x2 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PARENB = 0x100 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CLEAR_SECCOMP_FILTER = 0x25 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECCOMP_FILTER = 0x23 + PR_GET_SECUREBITS = 0x1b + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_SECCOMP_FILTER_EVENT = 0x1 + PR_SECCOMP_FILTER_SYSCALL = 0x0 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_KEEPCAPS = 0x8 + PR_SET_NAME = 0xf + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_SECCOMP = 0x16 + PR_SET_SECCOMP_FILTER = 0x24 + PR_SET_SECUREBITS = 0x1c + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETCRUNCHREGS = 0x19 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETHBPREGS = 0x1d + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETVFPREGS = 0x1b + PTRACE_GETWMMXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x16 + PTRACE_KILL = 0x8 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_MASK = 0x7f + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SETCRUNCHREGS = 0x1a + PTRACE_SETFPREGS = 0xf + PTRACE_SETHBPREGS = 0x1e + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETVFPREGS = 0x1c + PTRACE_SETWMMXREGS = 0x13 + PTRACE_SET_SYSCALL = 0x17 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + PT_DATA_ADDR = 0x10004 + PT_TEXT_ADDR = 0x10000 + PT_TEXT_END_ADDR = 0x10008 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x1 + RTAX_ADVMSS = 0x8 + RTAX_CWND = 0x7 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0xe + RTAX_MTU = 0x2 + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x10 + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELNEIGH = 0x1d + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x4f + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWLINK = 0x10 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x10 + RTM_NR_MSGTYPES = 0x40 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_F_DEAD = 0x1 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTN_MAX = 0xb + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPNS = 0x23 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCGARP = 0x8954 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ATM = 0x108 + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_PACKET = 0x107 + SOL_RAW = 0xff + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_FILTER = 0x1a + SO_BINDTODEVICE = 0x19 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_DEBUG = 0x1 + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_MARK = 0x24 + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEERCRED = 0x11 + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CONGESTION = 0xd + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_QUICKACK = 0xc + TCP_SYNCNT = 0x7 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETFEATURES = 0x800454cf + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETHDRSZ = 0x800454d7 + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETHDRSZ = 0x400454d8 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x6 + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x20 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale NFS file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "unknown error 133", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm64.go new file mode 100644 index 000000000..47027b79c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -0,0 +1,1896 @@ +// mkerrors.sh +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x29 + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + ELF_NGREG = 0x22 + ELF_PRARGSZ = 0x50 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_NODAD = 0x2 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x7 + IFF_802_1Q_VLAN = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BONDING = 0x20 + IFF_BRIDGE_PORT = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DISABLE_NETPOLL = 0x1000 + IFF_DONT_BRIDGE = 0x800 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_EBRIDGE = 0x2 + IFF_ECHO = 0x40000 + IFF_ISATAP = 0x80 + IFF_LIVE_ADDR_CHANGE = 0x100000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MACVLAN = 0x200000 + IFF_MACVLAN_PORT = 0x2000 + IFF_MASTER = 0x400 + IFF_MASTER_8023AD = 0x8 + IFF_MASTER_ALB = 0x10 + IFF_MASTER_ARPMON = 0x100 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_OVS_DATAPATH = 0x8000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_SLAVE_INACTIVE = 0x4 + IFF_SLAVE_NEEDARP = 0x40 + IFF_SUPP_NOFCS = 0x80000 + IFF_TAP = 0x2 + IFF_TEAM_PORT = 0x40000 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_TX_SKB_SHARING = 0x10000 + IFF_UNICAST_FLT = 0x20000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFF_WAN_HDLC = 0x200 + IFF_XMIT_DST_RELEASE = 0x400 + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_CHECKSUM = 0x7 + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_UNICAST_HOPS = 0x10 + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BLOCK_SOURCE = 0x26 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = -0x1 + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x1000ff + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SEIZE = 0x4206 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x1 + RTAX_ADVMSS = 0x8 + RTAX_CWND = 0x7 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0xf + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x11 + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x57 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x12 + RTM_NR_MSGTYPES = 0x48 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_F_DEAD = 0x1 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTN_MAX = 0xb + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCGARP = 0x8954 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ATM = 0x108 + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_PACKET = 0x107 + SOL_RAW = 0xff + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_FILTER = 0x1a + SO_BINDTODEVICE = 0x19 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_DEBUG = 0x1 + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETHDRSZ = 0x800454d7 + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETHDRSZ = 0x400454d8 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x6 + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64.go new file mode 100644 index 000000000..5b90d07ed --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -0,0 +1,1969 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build ppc64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x29 + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x17 + B110 = 0x3 + B115200 = 0x11 + B1152000 = 0x18 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x19 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x1a + B230400 = 0x12 + B2400 = 0xb + B2500000 = 0x1b + B300 = 0x7 + B3000000 = 0x1c + B3500000 = 0x1d + B38400 = 0xf + B4000000 = 0x1e + B460800 = 0x13 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x16 + B9600 = 0xd + BOTHER = 0x1f + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CFLUSH = 0xf + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIGNAL = 0xff + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000000 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x4000 + IBSHIFT = 0x10 + ICANON = 0x100 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x400 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_CHECKSUM = 0x7 + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_UNICAST_HOPS = 0x10 + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BLOCK_SOURCE = 0x26 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x300 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80000000 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x4 + ONLCR = 0x2 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x1000 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_SAO = 0x10 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = -0x1 + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGS64 = 0x16 + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x1000ff + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SEIZE = 0x4206 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGS64 = 0x17 + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x1 + RTAX_ADVMSS = 0x8 + RTAX_CWND = 0x7 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0xf + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x11 + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x57 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x12 + RTM_NR_MSGTYPES = 0x48 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_F_DEAD = 0x1 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTN_MAX = 0xb + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCGARP = 0x8954 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ATM = 0x108 + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_PACKET = 0x107 + SOL_RAW = 0xff + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_FILTER = 0x1a + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_DEBUG = 0x1 + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETHDRSZ = 0x400454d7 + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETHDRSZ = 0x800454d8 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x5 + VQUIT = 0x1 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XCASE = 0x4000 + XTABS = 0xc00 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 58: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64le.go new file mode 100644 index 000000000..0861bd566 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -0,0 +1,1968 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build ppc64le,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x29 + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x17 + B110 = 0x3 + B115200 = 0x11 + B1152000 = 0x18 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x19 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x1a + B230400 = 0x12 + B2400 = 0xb + B2500000 = 0x1b + B300 = 0x7 + B3000000 = 0x1c + B3500000 = 0x1d + B38400 = 0xf + B4000000 = 0x1e + B460800 = 0x13 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x16 + B9600 = 0xd + BOTHER = 0x1f + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CFLUSH = 0xf + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIGNAL = 0xff + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000000 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x4000 + IBSHIFT = 0x10 + ICANON = 0x100 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x400 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_NODAD = 0x2 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x7 + IFF_802_1Q_VLAN = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BONDING = 0x20 + IFF_BRIDGE_PORT = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DISABLE_NETPOLL = 0x1000 + IFF_DONT_BRIDGE = 0x800 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_EBRIDGE = 0x2 + IFF_ECHO = 0x40000 + IFF_ISATAP = 0x80 + IFF_LIVE_ADDR_CHANGE = 0x100000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MACVLAN = 0x200000 + IFF_MACVLAN_PORT = 0x2000 + IFF_MASTER = 0x400 + IFF_MASTER_8023AD = 0x8 + IFF_MASTER_ALB = 0x10 + IFF_MASTER_ARPMON = 0x100 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_OVS_DATAPATH = 0x8000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_SLAVE_INACTIVE = 0x4 + IFF_SLAVE_NEEDARP = 0x40 + IFF_SUPP_NOFCS = 0x80000 + IFF_TAP = 0x2 + IFF_TEAM_PORT = 0x40000 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_TX_SKB_SHARING = 0x10000 + IFF_UNICAST_FLT = 0x20000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFF_WAN_HDLC = 0x200 + IFF_XMIT_DST_RELEASE = 0x400 + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_CHECKSUM = 0x7 + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_UNICAST_HOPS = 0x10 + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BLOCK_SOURCE = 0x26 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x300 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80000000 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x4 + ONLCR = 0x2 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x1000 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_SAO = 0x10 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = -0x1 + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGS64 = 0x16 + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x1000ff + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SEIZE = 0x4206 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGS64 = 0x17 + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x1 + RTAX_ADVMSS = 0x8 + RTAX_CWND = 0x7 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0xf + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x11 + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x57 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x12 + RTM_NR_MSGTYPES = 0x48 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_F_DEAD = 0x1 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTN_MAX = 0xb + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCGARP = 0x8954 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ATM = 0x108 + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_PACKET = 0x107 + SOL_RAW = 0xff + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_FILTER = 0x1a + SO_BINDTODEVICE = 0x19 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_DEBUG = 0x1 + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETHDRSZ = 0x400454d7 + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETHDRSZ = 0x800454d8 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x5 + VQUIT = 0x1 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XCASE = 0x4000 + XTABS = 0xc00 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 58: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_386.go new file mode 100644 index 000000000..b4338d5f2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -0,0 +1,1712 @@ +// mkerrors.sh -m32 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,netbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x1c + AF_BLUETOOTH = 0x1f + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x20 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x23 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OROUTE = 0x11 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x22 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ARCNET = 0x7 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_STRIP = 0x17 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084277 + BIOCGETIF = 0x4090426b + BIOCGFEEDBACK = 0x4004427c + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x400c427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044276 + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8090426c + BIOCSFEEDBACK = 0x8004427d + BIOCSHDRCMPLT = 0x80044275 + BIOCSRTIMEOUT = 0x800c427a + BIOCSSEESENT = 0x80044279 + BIOCSTCPF = 0x80084272 + BIOCSUDPF = 0x80084273 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALIGNMENT32 = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLONE_CSIGNAL = 0xff + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_PID = 0x1000 + CLONE_PTRACE = 0x2000 + CLONE_SIGHAND = 0x800 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + CTL_QUERY = -0x2 + DIOCBSFLUSH = 0x20006478 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMUL_LINUX = 0x1 + EMUL_LINUX32 = 0x5 + EMUL_MAXID = 0x6 + EN_SW_CTL_INF = 0x1000 + EN_SW_CTL_PREC = 0x300 + EN_SW_CTL_ROUND = 0xc00 + EN_SW_DATACHAIN = 0x80 + EN_SW_DENORM = 0x2 + EN_SW_INVOP = 0x1 + EN_SW_OVERFLOW = 0x8 + EN_SW_PRECLOSS = 0x20 + EN_SW_UNDERFLOW = 0x10 + EN_SW_ZERODIV = 0x4 + ETHERCAP_JUMBO_MTU = 0x4 + ETHERCAP_VLAN_HWTAGGING = 0x2 + ETHERCAP_VLAN_MTU = 0x1 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERMTU_JUMBO = 0x2328 + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOWPROTOCOLS = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_LEN = 0x5ee + ETHER_MAX_LEN_JUMBO = 0x233a + ETHER_MIN_LEN = 0x40 + ETHER_PPPOE_ENCAP_LEN = 0x8 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = 0x2 + EVFILT_PROC = 0x4 + EVFILT_READ = 0x0 + EVFILT_SIGNAL = 0x5 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = 0x6 + EVFILT_VNODE = 0x3 + EVFILT_WRITE = 0x1 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x100 + FLUSHO = 0x800000 + F_CLOSEM = 0xa + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xc + F_FSCTL = -0x80000000 + F_FSDIRMASK = 0x70000000 + F_FSIN = 0x10000000 + F_FSINOUT = 0x30000000 + F_FSOUT = 0x20000000 + F_FSPRIV = 0x8000 + F_FSVOID = 0x40000000 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETNOSIGPIPE = 0xd + F_GETOWN = 0x5 + F_MAXFD = 0xb + F_OK = 0x0 + F_PARAM_MASK = 0xfff + F_PARAM_MAX = 0xfff + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETNOSIGPIPE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8f52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IPV6_ICMP = 0x3a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MOBILE = 0x37 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_VRRP = 0x70 + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_EF = 0x8000 + IP_ERRORMTU = 0x15 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x16 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINFRAGSIZE = 0x45 + IP_MINTTL = 0x18 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x17 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ALIGNMENT_16MB = 0x18000000 + MAP_ALIGNMENT_1TB = 0x28000000 + MAP_ALIGNMENT_256TB = 0x30000000 + MAP_ALIGNMENT_4GB = 0x20000000 + MAP_ALIGNMENT_64KB = 0x10000000 + MAP_ALIGNMENT_64PB = 0x38000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DEFAULT = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_STACK = 0x2000 + MAP_TRYFIXED = 0x400 + MAP_WIRED = 0x800 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CONTROLMBUF = 0x2000000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_IOVUSRSPACE = 0x4000000 + MSG_LENUSRSPACE = 0x8000000 + MSG_MCAST = 0x200 + MSG_NAMEMBUF = 0x1000000 + MSG_NBIO = 0x1000 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_USERFLAGS = 0xffffff + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x4 + NAME_MAX = 0x1ff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x5 + NET_RT_MAXID = 0x6 + NET_RT_OIFLIST = 0x4 + NET_RT_OOIFLIST = 0x3 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFIOGETBMAP = 0xc004667a + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALT_IO = 0x40000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x80000 + O_DIRECTORY = 0x200000 + O_DSYNC = 0x10000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_NOSIGPIPE = 0x1000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x20000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PRI_IOFLUSH = 0x7c + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_TAG = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_TAG = 0x100 + RTF_ANNOUNCE = 0x20000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x2000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SRC = 0x10000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0x15 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x11 + RTM_IFANNOUNCE = 0x10 + RTM_IFINFO = 0x14 + RTM_LLINFO_UPD = 0x13 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OIFINFO = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_OOIFINFO = 0xe + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_SETGATE = 0x12 + RTM_VERSION = 0x4 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x4 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x8 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80906931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691c + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80906932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80906919 + SIOCDIFPHYADDR = 0x80906949 + SIOCDLIFADDR = 0x8118691e + SIOCGDRVSPEC = 0xc01c697b + SIOCGETPFSYNC = 0xc09069f8 + SIOCGETSGCNT = 0xc0147534 + SIOCGETVIFCNT = 0xc0147533 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0906921 + SIOCGIFADDRPREF = 0xc0946920 + SIOCGIFALIAS = 0xc040691b + SIOCGIFBRDADDR = 0xc0906923 + SIOCGIFCAP = 0xc0206976 + SIOCGIFCONF = 0xc0086926 + SIOCGIFDATA = 0xc0946985 + SIOCGIFDLT = 0xc0906977 + SIOCGIFDSTADDR = 0xc0906922 + SIOCGIFFLAGS = 0xc0906911 + SIOCGIFGENERIC = 0xc090693a + SIOCGIFMEDIA = 0xc0286936 + SIOCGIFMETRIC = 0xc0906917 + SIOCGIFMTU = 0xc090697e + SIOCGIFNETMASK = 0xc0906925 + SIOCGIFPDSTADDR = 0xc0906948 + SIOCGIFPSRCADDR = 0xc0906947 + SIOCGLIFADDR = 0xc118691d + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLINKSTR = 0xc01c6987 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGVH = 0xc0906983 + SIOCIFCREATE = 0x8090697a + SIOCIFDESTROY = 0x80906979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCINITIFADDR = 0xc0446984 + SIOCSDRVSPEC = 0x801c697b + SIOCSETPFSYNC = 0x809069f7 + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8090690c + SIOCSIFADDRPREF = 0x8094691f + SIOCSIFBRDADDR = 0x80906913 + SIOCSIFCAP = 0x80206975 + SIOCSIFDSTADDR = 0x8090690e + SIOCSIFFLAGS = 0x80906910 + SIOCSIFGENERIC = 0x80906939 + SIOCSIFMEDIA = 0xc0906935 + SIOCSIFMETRIC = 0x80906918 + SIOCSIFMTU = 0x8090697f + SIOCSIFNETMASK = 0x80906916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLINKSTR = 0x801c6988 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSVH = 0xc0906982 + SIOCZIFDATA = 0xc0946986 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_FLAGS_MASK = 0xf0000000 + SOCK_NONBLOCK = 0x20000000 + SOCK_NOSIGPIPE = 0x40000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOHEADER = 0x100a + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_OVERFLOWED = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x100c + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x100b + SO_TIMESTAMP = 0x2000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SYSCTL_VERSION = 0x1000000 + SYSCTL_VERS_0 = 0x0 + SYSCTL_VERS_1 = 0x1000000 + SYSCTL_VERS_MASK = 0xff000000 + S_ARCH1 = 0x10000 + S_ARCH2 = 0x20000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + S_LOGIN_SET = 0x1 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONGCTL = 0x20 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x3 + TCP_KEEPINIT = 0x7 + TCP_KEEPINTVL = 0x5 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x400c7458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CDTRCTS = 0x10 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGLINED = 0x40207442 + TIOCGPGRP = 0x40047477 + TIOCGQSIZE = 0x40047481 + TIOCGRANTPT = 0x20007447 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMGET = 0x40287446 + TIOCPTSNAME = 0x40287448 + TIOCRCVFRAME = 0x80047445 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x2000745f + TIOCSLINED = 0x80207443 + TIOCSPGRP = 0x80047476 + TIOCSQSIZE = 0x80047480 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCXMTFRAME = 0x80047444 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALL = 0x8 + WALLSIG = 0x8 + WALTSIG = 0x4 + WCLONE = 0x4 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WNOWAIT = 0x10000 + WNOZOMBIE = 0x20000 + WOPTSCHECKED = 0x40000 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x58) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x57) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x55) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5e) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x59) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x5a) + ENOSTR = syscall.Errno(0x5b) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x56) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x60) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x5c) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x20) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large or too small", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol option not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "illegal byte sequence", + 86: "not supported", + 87: "operation Canceled", + 88: "bad or Corrupt message", + 89: "no message available", + 90: "no STREAM resources", + 91: "not a STREAM", + 92: "STREAM ioctl timeout", + 93: "attribute not found", + 94: "multihop attempted", + 95: "link has been severed", + 96: "protocol error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "power fail/restart", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_amd64.go new file mode 100644 index 000000000..4994437b6 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -0,0 +1,1702 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,netbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x1c + AF_BLUETOOTH = 0x1f + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x20 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x23 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OROUTE = 0x11 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x22 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ARCNET = 0x7 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_STRIP = 0x17 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104277 + BIOCGETIF = 0x4090426b + BIOCGFEEDBACK = 0x4004427c + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x4010427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044276 + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8090426c + BIOCSFEEDBACK = 0x8004427d + BIOCSHDRCMPLT = 0x80044275 + BIOCSRTIMEOUT = 0x8010427a + BIOCSSEESENT = 0x80044279 + BIOCSTCPF = 0x80104272 + BIOCSUDPF = 0x80104273 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALIGNMENT32 = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLONE_CSIGNAL = 0xff + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_PID = 0x1000 + CLONE_PTRACE = 0x2000 + CLONE_SIGHAND = 0x800 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + CTL_QUERY = -0x2 + DIOCBSFLUSH = 0x20006478 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMUL_LINUX = 0x1 + EMUL_LINUX32 = 0x5 + EMUL_MAXID = 0x6 + ETHERCAP_JUMBO_MTU = 0x4 + ETHERCAP_VLAN_HWTAGGING = 0x2 + ETHERCAP_VLAN_MTU = 0x1 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERMTU_JUMBO = 0x2328 + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOWPROTOCOLS = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_LEN = 0x5ee + ETHER_MAX_LEN_JUMBO = 0x233a + ETHER_MIN_LEN = 0x40 + ETHER_PPPOE_ENCAP_LEN = 0x8 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = 0x2 + EVFILT_PROC = 0x4 + EVFILT_READ = 0x0 + EVFILT_SIGNAL = 0x5 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = 0x6 + EVFILT_VNODE = 0x3 + EVFILT_WRITE = 0x1 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x100 + FLUSHO = 0x800000 + F_CLOSEM = 0xa + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xc + F_FSCTL = -0x80000000 + F_FSDIRMASK = 0x70000000 + F_FSIN = 0x10000000 + F_FSINOUT = 0x30000000 + F_FSOUT = 0x20000000 + F_FSPRIV = 0x8000 + F_FSVOID = 0x40000000 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETNOSIGPIPE = 0xd + F_GETOWN = 0x5 + F_MAXFD = 0xb + F_OK = 0x0 + F_PARAM_MASK = 0xfff + F_PARAM_MAX = 0xfff + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETNOSIGPIPE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8f52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IPV6_ICMP = 0x3a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MOBILE = 0x37 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_VRRP = 0x70 + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_EF = 0x8000 + IP_ERRORMTU = 0x15 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x16 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINFRAGSIZE = 0x45 + IP_MINTTL = 0x18 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x17 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ALIGNMENT_16MB = 0x18000000 + MAP_ALIGNMENT_1TB = 0x28000000 + MAP_ALIGNMENT_256TB = 0x30000000 + MAP_ALIGNMENT_4GB = 0x20000000 + MAP_ALIGNMENT_64KB = 0x10000000 + MAP_ALIGNMENT_64PB = 0x38000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DEFAULT = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_STACK = 0x2000 + MAP_TRYFIXED = 0x400 + MAP_WIRED = 0x800 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CONTROLMBUF = 0x2000000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_IOVUSRSPACE = 0x4000000 + MSG_LENUSRSPACE = 0x8000000 + MSG_MCAST = 0x200 + MSG_NAMEMBUF = 0x1000000 + MSG_NBIO = 0x1000 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_USERFLAGS = 0xffffff + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x4 + NAME_MAX = 0x1ff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x5 + NET_RT_MAXID = 0x6 + NET_RT_OIFLIST = 0x4 + NET_RT_OOIFLIST = 0x3 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFIOGETBMAP = 0xc004667a + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALT_IO = 0x40000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x80000 + O_DIRECTORY = 0x200000 + O_DSYNC = 0x10000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_NOSIGPIPE = 0x1000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x20000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PRI_IOFLUSH = 0x7c + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_TAG = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_TAG = 0x100 + RTF_ANNOUNCE = 0x20000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x2000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SRC = 0x10000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0x15 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x11 + RTM_IFANNOUNCE = 0x10 + RTM_IFINFO = 0x14 + RTM_LLINFO_UPD = 0x13 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OIFINFO = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_OOIFINFO = 0xe + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_SETGATE = 0x12 + RTM_VERSION = 0x4 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x4 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x8 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80906931 + SIOCADDRT = 0x8038720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691c + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80906932 + SIOCDELRT = 0x8038720b + SIOCDIFADDR = 0x80906919 + SIOCDIFPHYADDR = 0x80906949 + SIOCDLIFADDR = 0x8118691e + SIOCGDRVSPEC = 0xc028697b + SIOCGETPFSYNC = 0xc09069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0906921 + SIOCGIFADDRPREF = 0xc0986920 + SIOCGIFALIAS = 0xc040691b + SIOCGIFBRDADDR = 0xc0906923 + SIOCGIFCAP = 0xc0206976 + SIOCGIFCONF = 0xc0106926 + SIOCGIFDATA = 0xc0986985 + SIOCGIFDLT = 0xc0906977 + SIOCGIFDSTADDR = 0xc0906922 + SIOCGIFFLAGS = 0xc0906911 + SIOCGIFGENERIC = 0xc090693a + SIOCGIFMEDIA = 0xc0306936 + SIOCGIFMETRIC = 0xc0906917 + SIOCGIFMTU = 0xc090697e + SIOCGIFNETMASK = 0xc0906925 + SIOCGIFPDSTADDR = 0xc0906948 + SIOCGIFPSRCADDR = 0xc0906947 + SIOCGLIFADDR = 0xc118691d + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLINKSTR = 0xc0286987 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGVH = 0xc0906983 + SIOCIFCREATE = 0x8090697a + SIOCIFDESTROY = 0x80906979 + SIOCIFGCLONERS = 0xc0106978 + SIOCINITIFADDR = 0xc0706984 + SIOCSDRVSPEC = 0x8028697b + SIOCSETPFSYNC = 0x809069f7 + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8090690c + SIOCSIFADDRPREF = 0x8098691f + SIOCSIFBRDADDR = 0x80906913 + SIOCSIFCAP = 0x80206975 + SIOCSIFDSTADDR = 0x8090690e + SIOCSIFFLAGS = 0x80906910 + SIOCSIFGENERIC = 0x80906939 + SIOCSIFMEDIA = 0xc0906935 + SIOCSIFMETRIC = 0x80906918 + SIOCSIFMTU = 0x8090697f + SIOCSIFNETMASK = 0x80906916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLINKSTR = 0x80286988 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSVH = 0xc0906982 + SIOCZIFDATA = 0xc0986986 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_FLAGS_MASK = 0xf0000000 + SOCK_NONBLOCK = 0x20000000 + SOCK_NOSIGPIPE = 0x40000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOHEADER = 0x100a + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_OVERFLOWED = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x100c + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x100b + SO_TIMESTAMP = 0x2000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SYSCTL_VERSION = 0x1000000 + SYSCTL_VERS_0 = 0x0 + SYSCTL_VERS_1 = 0x1000000 + SYSCTL_VERS_MASK = 0xff000000 + S_ARCH1 = 0x10000 + S_ARCH2 = 0x20000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + S_LOGIN_SET = 0x1 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONGCTL = 0x20 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x3 + TCP_KEEPINIT = 0x7 + TCP_KEEPINTVL = 0x5 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CDTRCTS = 0x10 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGLINED = 0x40207442 + TIOCGPGRP = 0x40047477 + TIOCGQSIZE = 0x40047481 + TIOCGRANTPT = 0x20007447 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMGET = 0x40287446 + TIOCPTSNAME = 0x40287448 + TIOCRCVFRAME = 0x80087445 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x2000745f + TIOCSLINED = 0x80207443 + TIOCSPGRP = 0x80047476 + TIOCSQSIZE = 0x80047480 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCXMTFRAME = 0x80087444 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALL = 0x8 + WALLSIG = 0x8 + WALTSIG = 0x4 + WCLONE = 0x4 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WNOWAIT = 0x10000 + WNOZOMBIE = 0x20000 + WOPTSCHECKED = 0x40000 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x58) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x57) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x55) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5e) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x59) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x5a) + ENOSTR = syscall.Errno(0x5b) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x56) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x60) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x5c) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x20) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large or too small", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol option not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "illegal byte sequence", + 86: "not supported", + 87: "operation Canceled", + 88: "bad or Corrupt message", + 89: "no message available", + 90: "no STREAM resources", + 91: "not a STREAM", + 92: "STREAM ioctl timeout", + 93: "attribute not found", + 94: "multihop attempted", + 95: "link has been severed", + 96: "protocol error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "power fail/restart", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_arm.go new file mode 100644 index 000000000..ac85ca645 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -0,0 +1,1688 @@ +// mkerrors.sh -marm +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm,netbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -marm _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x1c + AF_BLUETOOTH = 0x1f + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x20 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x23 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OROUTE = 0x11 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x22 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ARCNET = 0x7 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_STRIP = 0x17 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084277 + BIOCGETIF = 0x4090426b + BIOCGFEEDBACK = 0x4004427c + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x400c427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044276 + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8090426c + BIOCSFEEDBACK = 0x8004427d + BIOCSHDRCMPLT = 0x80044275 + BIOCSRTIMEOUT = 0x800c427a + BIOCSSEESENT = 0x80044279 + BIOCSTCPF = 0x80084272 + BIOCSUDPF = 0x80084273 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALIGNMENT32 = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + CTL_QUERY = -0x2 + DIOCBSFLUSH = 0x20006478 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMUL_LINUX = 0x1 + EMUL_LINUX32 = 0x5 + EMUL_MAXID = 0x6 + ETHERCAP_JUMBO_MTU = 0x4 + ETHERCAP_VLAN_HWTAGGING = 0x2 + ETHERCAP_VLAN_MTU = 0x1 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERMTU_JUMBO = 0x2328 + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOWPROTOCOLS = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_LEN = 0x5ee + ETHER_MAX_LEN_JUMBO = 0x233a + ETHER_MIN_LEN = 0x40 + ETHER_PPPOE_ENCAP_LEN = 0x8 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = 0x2 + EVFILT_PROC = 0x4 + EVFILT_READ = 0x0 + EVFILT_SIGNAL = 0x5 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = 0x6 + EVFILT_VNODE = 0x3 + EVFILT_WRITE = 0x1 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x100 + FLUSHO = 0x800000 + F_CLOSEM = 0xa + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xc + F_FSCTL = -0x80000000 + F_FSDIRMASK = 0x70000000 + F_FSIN = 0x10000000 + F_FSINOUT = 0x30000000 + F_FSOUT = 0x20000000 + F_FSPRIV = 0x8000 + F_FSVOID = 0x40000000 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETNOSIGPIPE = 0xd + F_GETOWN = 0x5 + F_MAXFD = 0xb + F_OK = 0x0 + F_PARAM_MASK = 0xfff + F_PARAM_MAX = 0xfff + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETNOSIGPIPE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8f52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IPV6_ICMP = 0x3a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MOBILE = 0x37 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_VRRP = 0x70 + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_EF = 0x8000 + IP_ERRORMTU = 0x15 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x16 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINFRAGSIZE = 0x45 + IP_MINTTL = 0x18 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x17 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ALIGNMENT_16MB = 0x18000000 + MAP_ALIGNMENT_1TB = 0x28000000 + MAP_ALIGNMENT_256TB = 0x30000000 + MAP_ALIGNMENT_4GB = 0x20000000 + MAP_ALIGNMENT_64KB = 0x10000000 + MAP_ALIGNMENT_64PB = 0x38000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DEFAULT = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_STACK = 0x2000 + MAP_TRYFIXED = 0x400 + MAP_WIRED = 0x800 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CONTROLMBUF = 0x2000000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_IOVUSRSPACE = 0x4000000 + MSG_LENUSRSPACE = 0x8000000 + MSG_MCAST = 0x200 + MSG_NAMEMBUF = 0x1000000 + MSG_NBIO = 0x1000 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_USERFLAGS = 0xffffff + MSG_WAITALL = 0x40 + NAME_MAX = 0x1ff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x5 + NET_RT_MAXID = 0x6 + NET_RT_OIFLIST = 0x4 + NET_RT_OOIFLIST = 0x3 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFIOGETBMAP = 0xc004667a + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALT_IO = 0x40000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x80000 + O_DIRECTORY = 0x200000 + O_DSYNC = 0x10000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_NOSIGPIPE = 0x1000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x20000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PRI_IOFLUSH = 0x7c + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_TAG = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_TAG = 0x100 + RTF_ANNOUNCE = 0x20000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x2000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SRC = 0x10000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0x15 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x11 + RTM_IFANNOUNCE = 0x10 + RTM_IFINFO = 0x14 + RTM_LLINFO_UPD = 0x13 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OIFINFO = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_OOIFINFO = 0xe + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_SETGATE = 0x12 + RTM_VERSION = 0x4 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x4 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x8 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80906931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691c + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80906932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80906919 + SIOCDIFPHYADDR = 0x80906949 + SIOCDLIFADDR = 0x8118691e + SIOCGDRVSPEC = 0xc01c697b + SIOCGETPFSYNC = 0xc09069f8 + SIOCGETSGCNT = 0xc0147534 + SIOCGETVIFCNT = 0xc0147533 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0906921 + SIOCGIFADDRPREF = 0xc0946920 + SIOCGIFALIAS = 0xc040691b + SIOCGIFBRDADDR = 0xc0906923 + SIOCGIFCAP = 0xc0206976 + SIOCGIFCONF = 0xc0086926 + SIOCGIFDATA = 0xc0946985 + SIOCGIFDLT = 0xc0906977 + SIOCGIFDSTADDR = 0xc0906922 + SIOCGIFFLAGS = 0xc0906911 + SIOCGIFGENERIC = 0xc090693a + SIOCGIFMEDIA = 0xc0286936 + SIOCGIFMETRIC = 0xc0906917 + SIOCGIFMTU = 0xc090697e + SIOCGIFNETMASK = 0xc0906925 + SIOCGIFPDSTADDR = 0xc0906948 + SIOCGIFPSRCADDR = 0xc0906947 + SIOCGLIFADDR = 0xc118691d + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLINKSTR = 0xc01c6987 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGVH = 0xc0906983 + SIOCIFCREATE = 0x8090697a + SIOCIFDESTROY = 0x80906979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCINITIFADDR = 0xc0446984 + SIOCSDRVSPEC = 0x801c697b + SIOCSETPFSYNC = 0x809069f7 + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8090690c + SIOCSIFADDRPREF = 0x8094691f + SIOCSIFBRDADDR = 0x80906913 + SIOCSIFCAP = 0x80206975 + SIOCSIFDSTADDR = 0x8090690e + SIOCSIFFLAGS = 0x80906910 + SIOCSIFGENERIC = 0x80906939 + SIOCSIFMEDIA = 0xc0906935 + SIOCSIFMETRIC = 0x80906918 + SIOCSIFMTU = 0x8090697f + SIOCSIFNETMASK = 0x80906916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLINKSTR = 0x801c6988 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSVH = 0xc0906982 + SIOCZIFDATA = 0xc0946986 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_FLAGS_MASK = 0xf0000000 + SOCK_NONBLOCK = 0x20000000 + SOCK_NOSIGPIPE = 0x40000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOHEADER = 0x100a + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_OVERFLOWED = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x100c + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x100b + SO_TIMESTAMP = 0x2000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SYSCTL_VERSION = 0x1000000 + SYSCTL_VERS_0 = 0x0 + SYSCTL_VERS_1 = 0x1000000 + SYSCTL_VERS_MASK = 0xff000000 + S_ARCH1 = 0x10000 + S_ARCH2 = 0x20000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONGCTL = 0x20 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x3 + TCP_KEEPINIT = 0x7 + TCP_KEEPINTVL = 0x5 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x400c7458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CDTRCTS = 0x10 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGLINED = 0x40207442 + TIOCGPGRP = 0x40047477 + TIOCGQSIZE = 0x40047481 + TIOCGRANTPT = 0x20007447 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMGET = 0x48087446 + TIOCPTSNAME = 0x48087448 + TIOCRCVFRAME = 0x80047445 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x2000745f + TIOCSLINED = 0x80207443 + TIOCSPGRP = 0x80047476 + TIOCSQSIZE = 0x80047480 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCXMTFRAME = 0x80047444 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALL = 0x8 + WALLSIG = 0x8 + WALTSIG = 0x4 + WCLONE = 0x4 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WNOWAIT = 0x10000 + WNOZOMBIE = 0x20000 + WOPTSCHECKED = 0x40000 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x58) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x57) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x55) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5e) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x59) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x5a) + ENOSTR = syscall.Errno(0x5b) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x56) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x60) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x5c) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x20) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large or too small", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol option not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "illegal byte sequence", + 86: "not supported", + 87: "operation Canceled", + 88: "bad or Corrupt message", + 89: "no message available", + 90: "no STREAM resources", + 91: "not a STREAM", + 92: "STREAM ioctl timeout", + 93: "attribute not found", + 94: "multihop attempted", + 95: "link has been severed", + 96: "protocol error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "power fail/restart", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_386.go new file mode 100644 index 000000000..3322e998d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -0,0 +1,1584 @@ +// mkerrors.sh -m32 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,openbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc008427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x400c426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80084277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x800c426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCOSFPFLUSH = 0x2000444e + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DIVERT_INIT = 0x2 + IPPROTO_DIVERT_RESP = 0x1 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DIVERTFL = 0x1022 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_COPY = 0x4 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0x1ff7 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_TRYFIXED = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x6 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_MASK = 0x3ff000 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xb + RTAX_NETMASK = 0x2 + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTF_ANNOUNCE = 0x4000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x10f808 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_SOURCE = 0x20000 + RTF_STATIC = 0x800 + RTF_TUNNEL = 0x100000 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCALIFADDR = 0x8218691c + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8054693c + SIOCBRDGADDS = 0x80546941 + SIOCBRDGARL = 0x806e694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8054693d + SIOCBRDGDELS = 0x80546942 + SIOCBRDGFLUSH = 0x80546948 + SIOCBRDGFRL = 0x806e694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc03c6958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc028694f + SIOCBRDGGSIFS = 0xc054693c + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0546942 + SIOCBRDGRTS = 0xc0186943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80546955 + SIOCBRDGSIFFLGS = 0x8054693f + SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8218691e + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0147534 + SIOCGETVIFCNT = 0xc0147533 + SIOCGETVLAN = 0xc0206990 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0086924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc024698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFMEDIA = 0xc0286936 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFADDR = 0xc218691d + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGVH = 0xc02069f6 + SIOCGVNETID = 0xc02069a7 + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8024698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFMEDIA = 0xc0206935 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFTIMESLOT = 0x80206985 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSSPPPPARAMS = 0x80206993 + SIOCSVH = 0xc02069f5 + SIOCSVNETID = 0x802069a6 + SOCK_DGRAM = 0x2 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_NSTATES = 0xb + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x400c745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5b) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "IPsec processing failure", + 83: "attribute not found", + 84: "illegal byte sequence", + 85: "no medium found", + 86: "wrong medium type", + 87: "value too large to be stored in data type", + 88: "operation canceled", + 89: "identifier removed", + 90: "no message of desired type", + 91: "not supported", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread AST", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_amd64.go new file mode 100644 index 000000000..1758ecca9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -0,0 +1,1583 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,openbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCOSFPFLUSH = 0x2000444e + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DIVERT_INIT = 0x2 + IPPROTO_DIVERT_RESP = 0x1 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DIVERTFL = 0x1022 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_COPY = 0x4 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0x1ff7 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_TRYFIXED = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x6 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xb + RTAX_NETMASK = 0x2 + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTF_ANNOUNCE = 0x4000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x10f808 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_SOURCE = 0x20000 + RTF_STATIC = 0x800 + RTF_TUNNEL = 0x100000 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCALIFADDR = 0x8218691c + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8058693c + SIOCBRDGADDS = 0x80586941 + SIOCBRDGARL = 0x806e694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8058693d + SIOCBRDGDELS = 0x80586942 + SIOCBRDGFLUSH = 0x80586948 + SIOCBRDGFRL = 0x806e694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc058693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGSIFS = 0xc058693c + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0586942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80586955 + SIOCBRDGSIFFLGS = 0x8058693f + SIOCBRDGSIFPRIO = 0x80586954 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8218691e + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFMEDIA = 0xc0306936 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFADDR = 0xc218691d + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGVH = 0xc02069f6 + SIOCGVNETID = 0xc02069a7 + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFMEDIA = 0xc0206935 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFTIMESLOT = 0x80206985 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSSPPPPARAMS = 0x80206993 + SIOCSVH = 0xc02069f5 + SIOCSVNETID = 0x802069a6 + SOCK_DGRAM = 0x2 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_NSTATES = 0xb + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5b) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "IPsec processing failure", + 83: "attribute not found", + 84: "illegal byte sequence", + 85: "no medium found", + 86: "wrong medium type", + 87: "value too large to be stored in data type", + 88: "operation canceled", + 89: "identifier removed", + 90: "no message of desired type", + 91: "not supported", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread AST", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_solaris_amd64.go new file mode 100644 index 000000000..a08922b98 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -0,0 +1,1436 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,solaris + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_802 = 0x12 + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_ECMA = 0x8 + AF_FILE = 0x1 + AF_GOSIP = 0x16 + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1a + AF_INET_OFFLOAD = 0x1e + AF_IPX = 0x17 + AF_KEY = 0x1b + AF_LAT = 0xe + AF_LINK = 0x19 + AF_LOCAL = 0x1 + AF_MAX = 0x20 + AF_NBS = 0x7 + AF_NCA = 0x1c + AF_NIT = 0x11 + AF_NS = 0x6 + AF_OSI = 0x13 + AF_OSINET = 0x15 + AF_PACKET = 0x20 + AF_POLICY = 0x1d + AF_PUP = 0x4 + AF_ROUTE = 0x18 + AF_SNA = 0xb + AF_TRILL = 0x1f + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_X25 = 0x14 + ARPHRD_ARCNET = 0x7 + ARPHRD_ATM = 0x10 + ARPHRD_AX25 = 0x3 + ARPHRD_CHAOS = 0x5 + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_FC = 0x12 + ARPHRD_FRAME = 0xf + ARPHRD_HDLC = 0x11 + ARPHRD_IB = 0x20 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IPATM = 0x13 + ARPHRD_METRICOM = 0x17 + ARPHRD_TUNNEL = 0x1f + B0 = 0x0 + B110 = 0x3 + B115200 = 0x12 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B153600 = 0x13 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B230400 = 0x14 + B2400 = 0xb + B300 = 0x7 + B307200 = 0x15 + B38400 = 0xf + B460800 = 0x16 + B4800 = 0xc + B50 = 0x1 + B57600 = 0x10 + B600 = 0x8 + B75 = 0x2 + B76800 = 0x11 + B921600 = 0x17 + B9600 = 0xd + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = -0x3fefbd89 + BIOCGDLTLIST32 = -0x3ff7bd89 + BIOCGETIF = 0x4020426b + BIOCGETLIF = 0x4078426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x4010427b + BIOCGRTIMEOUT32 = 0x4008427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = -0x7ffbbd90 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = -0x3ffbbd9a + BIOCSDLT = -0x7ffbbd8a + BIOCSETF = -0x7fefbd99 + BIOCSETF32 = -0x7ff7bd99 + BIOCSETIF = -0x7fdfbd94 + BIOCSETLIF = -0x7f87bd94 + BIOCSHDRCMPLT = -0x7ffbbd8b + BIOCSRTIMEOUT = -0x7fefbd86 + BIOCSRTIMEOUT32 = -0x7ff7bd86 + BIOCSSEESENT = -0x7ffbbd87 + BIOCSTCPF = -0x7fefbd8e + BIOCSUDPF = -0x7fefbd8d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x800 + CLOCK_HIGHRES = 0x4 + CLOCK_LEVEL = 0xa + CLOCK_MONOTONIC = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x5 + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x3 + CLOCK_THREAD_CPUTIME_ID = 0x2 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x80 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + CSWTCH = 0x1a + DLT_AIRONET_HEADER = 0x78 + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_BACNET_MS_TP = 0xa5 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FDDI = 0xa + DLT_FRELAY = 0x6b + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xa2 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_PPPD = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EMPTY_SET = 0x0 + EMT_CPCOVF = 0x1 + EQUALITY_CHECK = 0x0 + EXTA = 0xe + EXTB = 0xf + FD_CLOEXEC = 0x1 + FD_NFDBITS = 0x40 + FD_SETSIZE = 0x10000 + FLUSHALL = 0x1 + FLUSHDATA = 0x0 + FLUSHO = 0x2000 + F_ALLOCSP = 0xa + F_ALLOCSP64 = 0xa + F_BADFD = 0x2e + F_BLKSIZE = 0x13 + F_BLOCKS = 0x12 + F_CHKFL = 0x8 + F_COMPAT = 0x8 + F_DUP2FD = 0x9 + F_DUP2FD_CLOEXEC = 0x24 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x25 + F_FREESP = 0xb + F_FREESP64 = 0xb + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_GETXFL = 0x2d + F_HASREMOTELOCKS = 0x1a + F_ISSTREAM = 0xd + F_MANDDNY = 0x10 + F_MDACC = 0x20 + F_NODNY = 0x0 + F_NPRIV = 0x10 + F_PRIV = 0xf + F_QUOTACTL = 0x11 + F_RDACC = 0x1 + F_RDDNY = 0x1 + F_RDLCK = 0x1 + F_REVOKE = 0x19 + F_RMACC = 0x4 + F_RMDNY = 0x4 + F_RWACC = 0x3 + F_RWDNY = 0x3 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLK64_NBMAND = 0x2a + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETLK_NBMAND = 0x2a + F_SETOWN = 0x18 + F_SHARE = 0x28 + F_SHARE_NBMAND = 0x2b + F_UNLCK = 0x3 + F_UNLKSYS = 0x4 + F_UNSHARE = 0x29 + F_WRACC = 0x2 + F_WRDNY = 0x2 + F_WRLCK = 0x2 + HUPCL = 0x400 + ICANON = 0x2 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFF_ADDRCONF = 0x80000 + IFF_ALLMULTI = 0x200 + IFF_ANYCAST = 0x400000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x7f203003b5a + IFF_COS_ENABLED = 0x200000000 + IFF_DEBUG = 0x4 + IFF_DEPRECATED = 0x40000 + IFF_DHCPRUNNING = 0x4000 + IFF_DUPLICATE = 0x4000000000 + IFF_FAILED = 0x10000000 + IFF_FIXEDMTU = 0x1000000000 + IFF_INACTIVE = 0x40000000 + IFF_INTELLIGENT = 0x400 + IFF_IPMP = 0x8000000000 + IFF_IPMP_CANTCHANGE = 0x10000000 + IFF_IPMP_INVALID = 0x1ec200080 + IFF_IPV4 = 0x1000000 + IFF_IPV6 = 0x2000000 + IFF_L3PROTECT = 0x40000000000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x800 + IFF_MULTI_BCAST = 0x1000 + IFF_NOACCEPT = 0x4000000 + IFF_NOARP = 0x80 + IFF_NOFAILOVER = 0x8000000 + IFF_NOLINKLOCAL = 0x20000000000 + IFF_NOLOCAL = 0x20000 + IFF_NONUD = 0x200000 + IFF_NORTEXCH = 0x800000 + IFF_NOTRAILERS = 0x20 + IFF_NOXMIT = 0x10000 + IFF_OFFLINE = 0x80000000 + IFF_POINTOPOINT = 0x10 + IFF_PREFERRED = 0x400000000 + IFF_PRIVATE = 0x8000 + IFF_PROMISC = 0x100 + IFF_ROUTER = 0x100000 + IFF_RUNNING = 0x40 + IFF_STANDBY = 0x20000000 + IFF_TEMPORARY = 0x800000000 + IFF_UNNUMBERED = 0x2000 + IFF_UP = 0x1 + IFF_VIRTUAL = 0x2000000000 + IFF_VRRP = 0x10000000000 + IFF_XRESOLV = 0x100000000 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_6TO4 = 0xca + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IB = 0xc7 + IFT_IPV4 = 0xc8 + IFT_IPV6 = 0xc9 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_AUTOCONF_MASK = 0xffff0000 + IN_AUTOCONF_NET = 0xa9fe0000 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_CLASSE_NET = 0xffffffff + IN_LOOPBACKNET = 0x7f + IN_PRIVATE12_MASK = 0xfff00000 + IN_PRIVATE12_NET = 0xac100000 + IN_PRIVATE16_MASK = 0xffff0000 + IN_PRIVATE16_NET = 0xc0a80000 + IN_PRIVATE8_MASK = 0xff000000 + IN_PRIVATE8_NET = 0xa000000 + IPPROTO_AH = 0x33 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x4 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_HELLO = 0x3f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_ND = 0x4d + IPPROTO_NONE = 0x3b + IPPROTO_OSPF = 0x59 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_UDP = 0x11 + IPV6_ADD_MEMBERSHIP = 0x9 + IPV6_BOUND_IF = 0x41 + IPV6_CHECKSUM = 0x18 + IPV6_DONTFRAG = 0x21 + IPV6_DROP_MEMBERSHIP = 0xa + IPV6_DSTOPTS = 0xf + IPV6_FLOWINFO_FLOWLABEL = 0xffff0f00 + IPV6_FLOWINFO_TCLASS = 0xf00f + IPV6_HOPLIMIT = 0xc + IPV6_HOPOPTS = 0xe + IPV6_JOIN_GROUP = 0x9 + IPV6_LEAVE_GROUP = 0xa + IPV6_MULTICAST_HOPS = 0x7 + IPV6_MULTICAST_IF = 0x6 + IPV6_MULTICAST_LOOP = 0x8 + IPV6_NEXTHOP = 0xd + IPV6_PAD1_OPT = 0x0 + IPV6_PATHMTU = 0x25 + IPV6_PKTINFO = 0xb + IPV6_PREFER_SRC_CGA = 0x20 + IPV6_PREFER_SRC_CGADEFAULT = 0x10 + IPV6_PREFER_SRC_CGAMASK = 0x30 + IPV6_PREFER_SRC_COA = 0x2 + IPV6_PREFER_SRC_DEFAULT = 0x15 + IPV6_PREFER_SRC_HOME = 0x1 + IPV6_PREFER_SRC_MASK = 0x3f + IPV6_PREFER_SRC_MIPDEFAULT = 0x1 + IPV6_PREFER_SRC_MIPMASK = 0x3 + IPV6_PREFER_SRC_NONCGA = 0x10 + IPV6_PREFER_SRC_PUBLIC = 0x4 + IPV6_PREFER_SRC_TMP = 0x8 + IPV6_PREFER_SRC_TMPDEFAULT = 0x4 + IPV6_PREFER_SRC_TMPMASK = 0xc + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x13 + IPV6_RECVHOPOPTS = 0x14 + IPV6_RECVPATHMTU = 0x24 + IPV6_RECVPKTINFO = 0x12 + IPV6_RECVRTHDR = 0x16 + IPV6_RECVRTHDRDSTOPTS = 0x17 + IPV6_RECVTCLASS = 0x19 + IPV6_RTHDR = 0x10 + IPV6_RTHDRDSTOPTS = 0x11 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SEC_OPT = 0x22 + IPV6_SRC_PREFERENCES = 0x23 + IPV6_TCLASS = 0x26 + IPV6_UNICAST_HOPS = 0x5 + IPV6_UNSPEC_SRC = 0x42 + IPV6_USE_MIN_MTU = 0x20 + IPV6_V6ONLY = 0x27 + IP_ADD_MEMBERSHIP = 0x13 + IP_ADD_SOURCE_MEMBERSHIP = 0x17 + IP_BLOCK_SOURCE = 0x15 + IP_BOUND_IF = 0x41 + IP_BROADCAST = 0x106 + IP_BROADCAST_TTL = 0x43 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DHCPINIT_IF = 0x45 + IP_DONTFRAG = 0x1b + IP_DONTROUTE = 0x105 + IP_DROP_MEMBERSHIP = 0x14 + IP_DROP_SOURCE_MEMBERSHIP = 0x18 + IP_HDRINCL = 0x2 + IP_MAXPACKET = 0xffff + IP_MF = 0x2000 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x10 + IP_MULTICAST_LOOP = 0x12 + IP_MULTICAST_TTL = 0x11 + IP_NEXTHOP = 0x19 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x9 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVSLLA = 0xa + IP_RECVTTL = 0xb + IP_RETOPTS = 0x8 + IP_REUSEADDR = 0x104 + IP_SEC_OPT = 0x22 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x16 + IP_UNSPEC_SRC = 0x42 + ISIG = 0x1 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + MADV_ACCESS_DEFAULT = 0x6 + MADV_ACCESS_LWP = 0x7 + MADV_ACCESS_MANY = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80 + MAP_ALIGN = 0x200 + MAP_ANON = 0x100 + MAP_ANONYMOUS = 0x100 + MAP_FIXED = 0x10 + MAP_INITDATA = 0x800 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_TEXT = 0x400 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x10 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_DUPCTRL = 0x800 + MSG_EOR = 0x8 + MSG_MAXIOVLEN = 0x10 + MSG_NOTIFICATION = 0x100 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x20 + MSG_WAITALL = 0x40 + MSG_XPG4_2 = 0x8000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_OLDSYNC = 0x0 + MS_SYNC = 0x4 + M_FLUSH = 0x86 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPENFAIL = -0x1 + OPOST = 0x1 + O_ACCMODE = 0x600003 + O_APPEND = 0x8 + O_CLOEXEC = 0x800000 + O_CREAT = 0x100 + O_DSYNC = 0x40 + O_EXCL = 0x400 + O_EXEC = 0x400000 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x4 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NOLINKS = 0x40000 + O_NONBLOCK = 0x80 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x8000 + O_SEARCH = 0x200000 + O_SIOCGIFCONF = -0x3ff796ec + O_SIOCGLIFCONF = -0x3fef9688 + O_SYNC = 0x10 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + O_XATTR = 0x4000 + PARENB = 0x100 + PAREXT = 0x100000 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x3 + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_SRC = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_NUMBITS = 0x9 + RTA_SRC = 0x100 + RTF_BLACKHOLE = 0x1000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INDIRECT = 0x40000 + RTF_KERNEL = 0x80000 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MULTIRT = 0x10000 + RTF_PRIVATE = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SETSRC = 0x20000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTF_ZONE = 0x100000 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0xf + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_FREEADDR = 0x10 + RTM_GET = 0x4 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_VERSION = 0x3 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_AWARE = 0x1 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_RIGHTS = 0x1010 + SCM_TIMESTAMP = 0x1013 + SCM_UCRED = 0x1012 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIG2STR_MAX = 0x20 + SIOCADDMULTI = -0x7fdf96cf + SIOCADDRT = -0x7fcf8df6 + SIOCATMARK = 0x40047307 + SIOCDARP = -0x7fdb96e0 + SIOCDELMULTI = -0x7fdf96ce + SIOCDELRT = -0x7fcf8df5 + SIOCDXARP = -0x7fff9658 + SIOCGARP = -0x3fdb96e1 + SIOCGDSTINFO = -0x3fff965c + SIOCGENADDR = -0x3fdf96ab + SIOCGENPSTATS = -0x3fdf96c7 + SIOCGETLSGCNT = -0x3fef8deb + SIOCGETNAME = 0x40107334 + SIOCGETPEER = 0x40107335 + SIOCGETPROP = -0x3fff8f44 + SIOCGETSGCNT = -0x3feb8deb + SIOCGETSYNC = -0x3fdf96d3 + SIOCGETVIFCNT = -0x3feb8dec + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = -0x3fdf96f3 + SIOCGIFBRDADDR = -0x3fdf96e9 + SIOCGIFCONF = -0x3ff796a4 + SIOCGIFDSTADDR = -0x3fdf96f1 + SIOCGIFFLAGS = -0x3fdf96ef + SIOCGIFHWADDR = -0x3fdf9647 + SIOCGIFINDEX = -0x3fdf96a6 + SIOCGIFMEM = -0x3fdf96ed + SIOCGIFMETRIC = -0x3fdf96e5 + SIOCGIFMTU = -0x3fdf96ea + SIOCGIFMUXID = -0x3fdf96a8 + SIOCGIFNETMASK = -0x3fdf96e7 + SIOCGIFNUM = 0x40046957 + SIOCGIP6ADDRPOLICY = -0x3fff965e + SIOCGIPMSFILTER = -0x3ffb964c + SIOCGLIFADDR = -0x3f87968f + SIOCGLIFBINDING = -0x3f879666 + SIOCGLIFBRDADDR = -0x3f879685 + SIOCGLIFCONF = -0x3fef965b + SIOCGLIFDADSTATE = -0x3f879642 + SIOCGLIFDSTADDR = -0x3f87968d + SIOCGLIFFLAGS = -0x3f87968b + SIOCGLIFGROUPINFO = -0x3f4b9663 + SIOCGLIFGROUPNAME = -0x3f879664 + SIOCGLIFHWADDR = -0x3f879640 + SIOCGLIFINDEX = -0x3f87967b + SIOCGLIFLNKINFO = -0x3f879674 + SIOCGLIFMETRIC = -0x3f879681 + SIOCGLIFMTU = -0x3f879686 + SIOCGLIFMUXID = -0x3f87967d + SIOCGLIFNETMASK = -0x3f879683 + SIOCGLIFNUM = -0x3ff3967e + SIOCGLIFSRCOF = -0x3fef964f + SIOCGLIFSUBNET = -0x3f879676 + SIOCGLIFTOKEN = -0x3f879678 + SIOCGLIFUSESRC = -0x3f879651 + SIOCGLIFZONE = -0x3f879656 + SIOCGLOWAT = 0x40047303 + SIOCGMSFILTER = -0x3ffb964e + SIOCGPGRP = 0x40047309 + SIOCGSTAMP = -0x3fef9646 + SIOCGXARP = -0x3fff9659 + SIOCIFDETACH = -0x7fdf96c8 + SIOCILB = -0x3ffb9645 + SIOCLIFADDIF = -0x3f879691 + SIOCLIFDELND = -0x7f879673 + SIOCLIFGETND = -0x3f879672 + SIOCLIFREMOVEIF = -0x7f879692 + SIOCLIFSETND = -0x7f879671 + SIOCLOWER = -0x7fdf96d7 + SIOCSARP = -0x7fdb96e2 + SIOCSCTPGOPT = -0x3fef9653 + SIOCSCTPPEELOFF = -0x3ffb9652 + SIOCSCTPSOPT = -0x7fef9654 + SIOCSENABLESDP = -0x3ffb9649 + SIOCSETPROP = -0x7ffb8f43 + SIOCSETSYNC = -0x7fdf96d4 + SIOCSHIWAT = -0x7ffb8d00 + SIOCSIFADDR = -0x7fdf96f4 + SIOCSIFBRDADDR = -0x7fdf96e8 + SIOCSIFDSTADDR = -0x7fdf96f2 + SIOCSIFFLAGS = -0x7fdf96f0 + SIOCSIFINDEX = -0x7fdf96a5 + SIOCSIFMEM = -0x7fdf96ee + SIOCSIFMETRIC = -0x7fdf96e4 + SIOCSIFMTU = -0x7fdf96eb + SIOCSIFMUXID = -0x7fdf96a7 + SIOCSIFNAME = -0x7fdf96b7 + SIOCSIFNETMASK = -0x7fdf96e6 + SIOCSIP6ADDRPOLICY = -0x7fff965d + SIOCSIPMSFILTER = -0x7ffb964b + SIOCSLGETREQ = -0x3fdf96b9 + SIOCSLIFADDR = -0x7f879690 + SIOCSLIFBRDADDR = -0x7f879684 + SIOCSLIFDSTADDR = -0x7f87968e + SIOCSLIFFLAGS = -0x7f87968c + SIOCSLIFGROUPNAME = -0x7f879665 + SIOCSLIFINDEX = -0x7f87967a + SIOCSLIFLNKINFO = -0x7f879675 + SIOCSLIFMETRIC = -0x7f879680 + SIOCSLIFMTU = -0x7f879687 + SIOCSLIFMUXID = -0x7f87967c + SIOCSLIFNAME = -0x3f87967f + SIOCSLIFNETMASK = -0x7f879682 + SIOCSLIFPREFIX = -0x3f879641 + SIOCSLIFSUBNET = -0x7f879677 + SIOCSLIFTOKEN = -0x7f879679 + SIOCSLIFUSESRC = -0x7f879650 + SIOCSLIFZONE = -0x7f879655 + SIOCSLOWAT = -0x7ffb8cfe + SIOCSLSTAT = -0x7fdf96b8 + SIOCSMSFILTER = -0x7ffb964d + SIOCSPGRP = -0x7ffb8cf8 + SIOCSPROMISC = -0x7ffb96d0 + SIOCSQPTR = -0x3ffb9648 + SIOCSSDSTATS = -0x3fdf96d2 + SIOCSSESTATS = -0x3fdf96d1 + SIOCSXARP = -0x7fff965a + SIOCTMYADDR = -0x3ff79670 + SIOCTMYSITE = -0x3ff7966e + SIOCTONLINK = -0x3ff7966f + SIOCUPPER = -0x7fdf96d8 + SIOCX25RCV = -0x3fdf96c4 + SIOCX25TBL = -0x3fdf96c3 + SIOCX25XMT = -0x3fdf96c5 + SIOCXPROTO = 0x20007337 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NDELAY = 0x200000 + SOCK_NONBLOCK = 0x100000 + SOCK_RAW = 0x4 + SOCK_RDM = 0x5 + SOCK_SEQPACKET = 0x6 + SOCK_STREAM = 0x2 + SOCK_TYPE_MASK = 0xffff + SOL_FILTER = 0xfffc + SOL_PACKET = 0xfffd + SOL_ROUTE = 0xfffe + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ALL = 0x3f + SO_ALLZONES = 0x1014 + SO_ANON_MLP = 0x100a + SO_ATTACH_FILTER = 0x40000001 + SO_BAND = 0x4000 + SO_BROADCAST = 0x20 + SO_COPYOPT = 0x80000 + SO_DEBUG = 0x1 + SO_DELIM = 0x8000 + SO_DETACH_FILTER = 0x40000002 + SO_DGRAM_ERRIND = 0x200 + SO_DOMAIN = 0x100c + SO_DONTLINGER = -0x81 + SO_DONTROUTE = 0x10 + SO_ERROPT = 0x40000 + SO_ERROR = 0x1007 + SO_EXCLBIND = 0x1015 + SO_HIWAT = 0x10 + SO_ISNTTY = 0x800 + SO_ISTTY = 0x400 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOWAT = 0x20 + SO_MAC_EXEMPT = 0x100b + SO_MAC_IMPLICIT = 0x1016 + SO_MAXBLK = 0x100000 + SO_MAXPSZ = 0x8 + SO_MINPSZ = 0x4 + SO_MREADOFF = 0x80 + SO_MREADON = 0x40 + SO_NDELOFF = 0x200 + SO_NDELON = 0x100 + SO_NODELIM = 0x10000 + SO_OOBINLINE = 0x100 + SO_PROTOTYPE = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVPSH = 0x100d + SO_RCVTIMEO = 0x1006 + SO_READOPT = 0x1 + SO_RECVUCRED = 0x400 + SO_REUSEADDR = 0x4 + SO_SECATTR = 0x1011 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_STRHOLD = 0x20000 + SO_TAIL = 0x200000 + SO_TIMESTAMP = 0x1013 + SO_TONSTOP = 0x2000 + SO_TOSTOP = 0x1000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_VRRP = 0x1017 + SO_WROFF = 0x2 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCIFLUSH = 0x0 + TCIOFLUSH = 0x2 + TCOFLUSH = 0x1 + TCP_ABORT_THRESHOLD = 0x11 + TCP_ANONPRIVBIND = 0x20 + TCP_CONN_ABORT_THRESHOLD = 0x13 + TCP_CONN_NOTIFY_THRESHOLD = 0x12 + TCP_CORK = 0x18 + TCP_EXCLBIND = 0x21 + TCP_INIT_CWND = 0x15 + TCP_KEEPALIVE = 0x8 + TCP_KEEPALIVE_ABORT_THRESHOLD = 0x17 + TCP_KEEPALIVE_THRESHOLD = 0x16 + TCP_KEEPCNT = 0x23 + TCP_KEEPIDLE = 0x22 + TCP_KEEPINTVL = 0x24 + TCP_LINGER2 = 0x1c + TCP_MAXSEG = 0x2 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOTIFY_THRESHOLD = 0x10 + TCP_RECVDSTADDR = 0x14 + TCP_RTO_INITIAL = 0x19 + TCP_RTO_MAX = 0x1b + TCP_RTO_MIN = 0x1a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETSF = 0x5410 + TCSETSW = 0x540f + TCXONC = 0x5406 + TIOC = 0x5400 + TIOCCBRK = 0x747a + TIOCCDTR = 0x7478 + TIOCCILOOP = 0x746c + TIOCEXCL = 0x740d + TIOCFLUSH = 0x7410 + TIOCGETC = 0x7412 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x7414 + TIOCGPPS = 0x547d + TIOCGPPSEV = 0x547f + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5469 + TIOCGWINSZ = 0x5468 + TIOCHPCL = 0x7402 + TIOCKBOF = 0x5409 + TIOCKBON = 0x5408 + TIOCLBIC = 0x747e + TIOCLBIS = 0x747f + TIOCLGET = 0x747c + TIOCLSET = 0x747d + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMSET = 0x741a + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x7471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7473 + TIOCREMOTE = 0x741e + TIOCSBRK = 0x747b + TIOCSCTTY = 0x7484 + TIOCSDTR = 0x7479 + TIOCSETC = 0x7411 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIGNAL = 0x741f + TIOCSILOOP = 0x746d + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x7415 + TIOCSPPS = 0x547e + TIOCSSOFTCAR = 0x546a + TIOCSTART = 0x746e + TIOCSTI = 0x7417 + TIOCSTOP = 0x746f + TIOCSWINSZ = 0x5467 + TOSTOP = 0x100 + VCEOF = 0x8 + VCEOL = 0x9 + VDISCARD = 0xd + VDSUSP = 0xb + VEOF = 0x4 + VEOL = 0x5 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x4 + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTATUS = 0x10 + VSTOP = 0x9 + VSUSP = 0xa + VSWTCH = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WCONTFLG = 0xffff + WCONTINUED = 0x8 + WCOREFLG = 0x80 + WEXITED = 0x1 + WNOHANG = 0x40 + WNOWAIT = 0x80 + WOPTMASK = 0xcf + WRAP = 0x20000 + WSIGMASK = 0x7f + WSTOPFLG = 0x7f + WSTOPPED = 0x4 + WTRAPPED = 0x2 + WUNTRACED = 0x4 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x7d) + EADDRNOTAVAIL = syscall.Errno(0x7e) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x7c) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x95) + EBADE = syscall.Errno(0x32) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x51) + EBADMSG = syscall.Errno(0x4d) + EBADR = syscall.Errno(0x33) + EBADRQC = syscall.Errno(0x36) + EBADSLT = syscall.Errno(0x37) + EBFONT = syscall.Errno(0x39) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x2f) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x82) + ECONNREFUSED = syscall.Errno(0x92) + ECONNRESET = syscall.Errno(0x83) + EDEADLK = syscall.Errno(0x2d) + EDEADLOCK = syscall.Errno(0x38) + EDESTADDRREQ = syscall.Errno(0x60) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x31) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x93) + EHOSTUNREACH = syscall.Errno(0x94) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x58) + EINPROGRESS = syscall.Errno(0x96) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x85) + EISDIR = syscall.Errno(0x15) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELIBACC = syscall.Errno(0x53) + ELIBBAD = syscall.Errno(0x54) + ELIBEXEC = syscall.Errno(0x57) + ELIBMAX = syscall.Errno(0x56) + ELIBSCN = syscall.Errno(0x55) + ELNRNG = syscall.Errno(0x29) + ELOCKUNMAPPED = syscall.Errno(0x48) + ELOOP = syscall.Errno(0x5a) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x61) + EMULTIHOP = syscall.Errno(0x4a) + ENAMETOOLONG = syscall.Errno(0x4e) + ENETDOWN = syscall.Errno(0x7f) + ENETRESET = syscall.Errno(0x81) + ENETUNREACH = syscall.Errno(0x80) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x35) + ENOBUFS = syscall.Errno(0x84) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x2e) + ENOLINK = syscall.Errno(0x43) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x63) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x59) + ENOTACTIVE = syscall.Errno(0x49) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x86) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x5d) + ENOTRECOVERABLE = syscall.Errno(0x3b) + ENOTSOCK = syscall.Errno(0x5f) + ENOTSUP = syscall.Errno(0x30) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x50) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x7a) + EOVERFLOW = syscall.Errno(0x4f) + EOWNERDEAD = syscall.Errno(0x3a) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x7b) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x78) + EPROTOTYPE = syscall.Errno(0x62) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x52) + EREMOTE = syscall.Errno(0x42) + ERESTART = syscall.Errno(0x5b) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x8f) + ESOCKTNOSUPPORT = syscall.Errno(0x79) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x97) + ESTRPIPE = syscall.Errno(0x5c) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x91) + ETOOMANYREFS = syscall.Errno(0x90) + ETXTBSY = syscall.Errno(0x1a) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x5e) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x34) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCANCEL = syscall.Signal(0x24) + SIGCHLD = syscall.Signal(0x12) + SIGCLD = syscall.Signal(0x12) + SIGCONT = syscall.Signal(0x19) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGFREEZE = syscall.Signal(0x22) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x29) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x16) + SIGIOT = syscall.Signal(0x6) + SIGJVM1 = syscall.Signal(0x27) + SIGJVM2 = syscall.Signal(0x28) + SIGKILL = syscall.Signal(0x9) + SIGLOST = syscall.Signal(0x25) + SIGLWP = syscall.Signal(0x21) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x16) + SIGPROF = syscall.Signal(0x1d) + SIGPWR = syscall.Signal(0x13) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x17) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHAW = syscall.Signal(0x23) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x18) + SIGTTIN = syscall.Signal(0x1a) + SIGTTOU = syscall.Signal(0x1b) + SIGURG = syscall.Signal(0x15) + SIGUSR1 = syscall.Signal(0x10) + SIGUSR2 = syscall.Signal(0x11) + SIGVTALRM = syscall.Signal(0x1c) + SIGWAITING = syscall.Signal(0x20) + SIGWINCH = syscall.Signal(0x14) + SIGXCPU = syscall.Signal(0x1e) + SIGXFSZ = syscall.Signal(0x1f) + SIGXRES = syscall.Signal(0x26) +) + +// Error table +var errors = [...]string{ + 1: "not owner", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "I/O error", + 6: "no such device or address", + 7: "arg list too long", + 8: "exec format error", + 9: "bad file number", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "not enough space", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "file table overflow", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "argument out of domain", + 34: "result too large", + 35: "no message of desired type", + 36: "identifier removed", + 37: "channel number out of range", + 38: "level 2 not synchronized", + 39: "level 3 halted", + 40: "level 3 reset", + 41: "link number out of range", + 42: "protocol driver not attached", + 43: "no CSI structure available", + 44: "level 2 halted", + 45: "deadlock situation detected/avoided", + 46: "no record locks available", + 47: "operation canceled", + 48: "operation not supported", + 49: "disc quota exceeded", + 50: "bad exchange descriptor", + 51: "bad request descriptor", + 52: "message tables full", + 53: "anode table overflow", + 54: "bad request code", + 55: "invalid slot", + 56: "file locking deadlock", + 57: "bad font file format", + 58: "owner of the lock died", + 59: "lock is not recoverable", + 60: "not a stream device", + 61: "no data available", + 62: "timer expired", + 63: "out of stream resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "locked lock was unmapped ", + 73: "facility is not active", + 74: "multihop attempted", + 77: "not a data message", + 78: "file name too long", + 79: "value too large for defined data type", + 80: "name not unique on network", + 81: "file descriptor in bad state", + 82: "remote address changed", + 83: "can not access a needed shared library", + 84: "accessing a corrupted shared library", + 85: ".lib section in a.out corrupted", + 86: "attempting to link in more shared libraries than system limit", + 87: "can not exec a shared library directly", + 88: "illegal byte sequence", + 89: "operation not applicable", + 90: "number of symbolic links encountered during path name traversal exceeds MAXSYMLINKS", + 91: "error 91", + 92: "error 92", + 93: "directory not empty", + 94: "too many users", + 95: "socket operation on non-socket", + 96: "destination address required", + 97: "message too long", + 98: "protocol wrong type for socket", + 99: "option not supported by protocol", + 120: "protocol not supported", + 121: "socket type not supported", + 122: "operation not supported on transport endpoint", + 123: "protocol family not supported", + 124: "address family not supported by protocol family", + 125: "address already in use", + 126: "cannot assign requested address", + 127: "network is down", + 128: "network is unreachable", + 129: "network dropped connection because of reset", + 130: "software caused connection abort", + 131: "connection reset by peer", + 132: "no buffer space available", + 133: "transport endpoint is already connected", + 134: "transport endpoint is not connected", + 143: "cannot send after socket shutdown", + 144: "too many references: cannot splice", + 145: "connection timed out", + 146: "connection refused", + 147: "host is down", + 148: "no route to host", + 149: "operation already in progress", + 150: "operation now in progress", + 151: "stale NFS file handle", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal Instruction", + 5: "trace/Breakpoint Trap", + 6: "abort", + 7: "emulation Trap", + 8: "arithmetic Exception", + 9: "killed", + 10: "bus Error", + 11: "segmentation Fault", + 12: "bad System Call", + 13: "broken Pipe", + 14: "alarm Clock", + 15: "terminated", + 16: "user Signal 1", + 17: "user Signal 2", + 18: "child Status Changed", + 19: "power-Fail/Restart", + 20: "window Size Change", + 21: "urgent Socket Condition", + 22: "pollable Event", + 23: "stopped (signal)", + 24: "stopped (user)", + 25: "continued", + 26: "stopped (tty input)", + 27: "stopped (tty output)", + 28: "virtual Timer Expired", + 29: "profiling Timer Expired", + 30: "cpu Limit Exceeded", + 31: "file Size Limit Exceeded", + 32: "no runnable lwp", + 33: "inter-lwp signal", + 34: "checkpoint Freeze", + 35: "checkpoint Thaw", + 36: "thread Cancellation", + 37: "resource Lost", + 38: "resource Control Exceeded", + 39: "reserved for JVM 1", + 40: "reserved for JVM 2", + 41: "information Request", +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_386.go new file mode 100644 index 000000000..a15aaf120 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -0,0 +1,1426 @@ +// mksyscall.pl -l32 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int32(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_amd64.go new file mode 100644 index 000000000..74606b2f4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -0,0 +1,1442 @@ +// mksyscall.pl syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int64(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm.go new file mode 100644 index 000000000..640e85426 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -0,0 +1,1426 @@ +// mksyscall.pl syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int32(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm64.go new file mode 100644 index 000000000..933f67bbf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -0,0 +1,1426 @@ +// mksyscall.pl syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm64,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int64(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_386.go new file mode 100644 index 000000000..32e46af60 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_386.go @@ -0,0 +1,1412 @@ +// mksyscall.pl -l32 -dragonfly syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_386.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,dragonfly + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func extpread(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go new file mode 100644 index 000000000..3fa6ff796 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -0,0 +1,1412 @@ +// mksyscall.pl -dragonfly syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,dragonfly + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func extpread(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_386.go new file mode 100644 index 000000000..1a0e528cd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -0,0 +1,1664 @@ +// mksyscall.pl -l32 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go new file mode 100644 index 000000000..6e4cf1455 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -0,0 +1,1664 @@ +// mksyscall.pl syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_arm.go new file mode 100644 index 000000000..1872d3230 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -0,0 +1,1664 @@ +// mksyscall.pl -l32 -arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + use(unsafe.Pointer(_p0)) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_386.go new file mode 100644 index 000000000..ff6c39dc2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -0,0 +1,1628 @@ +// mksyscall.pl -l32 syscall_linux.go syscall_linux_386.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + use(unsafe.Pointer(_p2)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + use(unsafe.Pointer(_p0)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + Syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64_64, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN32, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE64, uintptr(fd), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID32, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := RawSyscall(SYS_GETEUID32, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID32, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID32, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN32, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_amd64.go new file mode 100644 index 000000000..c2438522d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -0,0 +1,1822 @@ +// mksyscall.pl syscall_linux.go syscall_linux_amd64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + use(unsafe.Pointer(_p2)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + use(unsafe.Pointer(_p0)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + Syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm.go new file mode 100644 index 000000000..dd66c9758 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -0,0 +1,1756 @@ +// mksyscall.pl -l32 -arm syscall_linux.go syscall_linux_arm.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + use(unsafe.Pointer(_p2)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + use(unsafe.Pointer(_p0)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + Syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN32, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID32, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := RawSyscall(SYS_GETEUID32, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID32, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID32, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN32, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE64, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm64.go new file mode 100644 index 000000000..d0a6ed829 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -0,0 +1,1750 @@ +// mksyscall.pl syscall_linux.go syscall_linux_arm64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm64,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + use(unsafe.Pointer(_p2)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + use(unsafe.Pointer(_p0)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + Syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64.go new file mode 100644 index 000000000..f58a3ff2f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -0,0 +1,1792 @@ +// mksyscall.pl syscall_linux.go syscall_linux_ppc64x.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build ppc64,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + use(unsafe.Pointer(_p2)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + use(unsafe.Pointer(_p0)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + Syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go new file mode 100644 index 000000000..22fc7a457 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -0,0 +1,1792 @@ +// mksyscall.pl syscall_linux.go syscall_linux_ppc64x.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build ppc64le,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + use(unsafe.Pointer(_p2)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + use(unsafe.Pointer(_p0)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + Syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_386.go new file mode 100644 index 000000000..00ca1f9c1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -0,0 +1,1326 @@ +// mksyscall.pl -l32 -netbsd syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,netbsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (fd1 int, fd2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + fd1 = int(r0) + fd2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go new file mode 100644 index 000000000..03f31b973 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -0,0 +1,1326 @@ +// mksyscall.pl -netbsd syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,netbsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (fd1 int, fd2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + fd1 = int(r0) + fd2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_arm.go new file mode 100644 index 000000000..84dc61cfa --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -0,0 +1,1326 @@ +// mksyscall.pl -l32 -arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build arm,netbsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (fd1 int, fd2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + fd1 = int(r0) + fd2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_386.go new file mode 100644 index 000000000..02b3528a6 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -0,0 +1,1386 @@ +// mksyscall.pl -l32 -openbsd syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,openbsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go new file mode 100644 index 000000000..7dc2b7eaf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -0,0 +1,1386 @@ +// mksyscall.pl -openbsd syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,openbsd + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_solaris_amd64.go new file mode 100644 index 000000000..8d2a8365b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -0,0 +1,1559 @@ +// mksyscall_solaris.pl syscall_solaris.go syscall_solaris_amd64.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build amd64,solaris + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsockname getsockname "libsocket.so" +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" +//go:cgo_import_dynamic libc_utimes utimes "libc.so" +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" +//go:cgo_import_dynamic libc_futimesat futimesat "libc.so" +//go:cgo_import_dynamic libc_accept accept "libsocket.so" +//go:cgo_import_dynamic libc_recvmsg recvmsg "libsocket.so" +//go:cgo_import_dynamic libc_sendmsg sendmsg "libsocket.so" +//go:cgo_import_dynamic libc_acct acct "libc.so" +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" +//go:cgo_import_dynamic libc_access access "libc.so" +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" +//go:cgo_import_dynamic libc_chdir chdir "libc.so" +//go:cgo_import_dynamic libc_chmod chmod "libc.so" +//go:cgo_import_dynamic libc_chown chown "libc.so" +//go:cgo_import_dynamic libc_chroot chroot "libc.so" +//go:cgo_import_dynamic libc_close close "libc.so" +//go:cgo_import_dynamic libc_creat creat "libc.so" +//go:cgo_import_dynamic libc_dup dup "libc.so" +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" +//go:cgo_import_dynamic libc_exit exit "libc.so" +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" +//go:cgo_import_dynamic libc_fchown fchown "libc.so" +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" +//go:cgo_import_dynamic libc_fdatasync fdatasync "libc.so" +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" +//go:cgo_import_dynamic libc_fstat fstat "libc.so" +//go:cgo_import_dynamic libc_getdents getdents "libc.so" +//go:cgo_import_dynamic libc_getgid getgid "libc.so" +//go:cgo_import_dynamic libc_getpid getpid "libc.so" +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" +//go:cgo_import_dynamic libc_getegid getegid "libc.so" +//go:cgo_import_dynamic libc_getppid getppid "libc.so" +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" +//go:cgo_import_dynamic libc_getuid getuid "libc.so" +//go:cgo_import_dynamic libc_kill kill "libc.so" +//go:cgo_import_dynamic libc_lchown lchown "libc.so" +//go:cgo_import_dynamic libc_link link "libc.so" +//go:cgo_import_dynamic libc_listen listen "libsocket.so" +//go:cgo_import_dynamic libc_lstat lstat "libc.so" +//go:cgo_import_dynamic libc_madvise madvise "libc.so" +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" +//go:cgo_import_dynamic libc_mknod mknod "libc.so" +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" +//go:cgo_import_dynamic libc_mlock mlock "libc.so" +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" +//go:cgo_import_dynamic libc_munlock munlock "libc.so" +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" +//go:cgo_import_dynamic libc_open open "libc.so" +//go:cgo_import_dynamic libc_openat openat "libc.so" +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" +//go:cgo_import_dynamic libc_pause pause "libc.so" +//go:cgo_import_dynamic libc_pread pread "libc.so" +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" +//go:cgo_import_dynamic libc_read read "libc.so" +//go:cgo_import_dynamic libc_readlink readlink "libc.so" +//go:cgo_import_dynamic libc_rename rename "libc.so" +//go:cgo_import_dynamic libc_renameat renameat "libc.so" +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" +//go:cgo_import_dynamic libc_lseek lseek "libc.so" +//go:cgo_import_dynamic libc_setegid setegid "libc.so" +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" +//go:cgo_import_dynamic libc_setgid setgid "libc.so" +//go:cgo_import_dynamic libc_sethostname sethostname "libc.so" +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" +//go:cgo_import_dynamic libc_setregid setregid "libc.so" +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" +//go:cgo_import_dynamic libc_setsid setsid "libc.so" +//go:cgo_import_dynamic libc_setuid setuid "libc.so" +//go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" +//go:cgo_import_dynamic libc_stat stat "libc.so" +//go:cgo_import_dynamic libc_symlink symlink "libc.so" +//go:cgo_import_dynamic libc_sync sync "libc.so" +//go:cgo_import_dynamic libc_times times "libc.so" +//go:cgo_import_dynamic libc_truncate truncate "libc.so" +//go:cgo_import_dynamic libc_fsync fsync "libc.so" +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" +//go:cgo_import_dynamic libc_umask umask "libc.so" +//go:cgo_import_dynamic libc_uname uname "libc.so" +//go:cgo_import_dynamic libc_umount umount "libc.so" +//go:cgo_import_dynamic libc_unlink unlink "libc.so" +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" +//go:cgo_import_dynamic libc_ustat ustat "libc.so" +//go:cgo_import_dynamic libc_utime utime "libc.so" +//go:cgo_import_dynamic libc_bind bind "libsocket.so" +//go:cgo_import_dynamic libc_connect connect "libsocket.so" +//go:cgo_import_dynamic libc_mmap mmap "libc.so" +//go:cgo_import_dynamic libc_munmap munmap "libc.so" +//go:cgo_import_dynamic libc_sendto sendto "libsocket.so" +//go:cgo_import_dynamic libc_socket socket "libsocket.so" +//go:cgo_import_dynamic libc_socketpair socketpair "libsocket.so" +//go:cgo_import_dynamic libc_write write "libc.so" +//go:cgo_import_dynamic libc_getsockopt getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_sysconf sysconf "libc.so" + +//go:linkname procgetsockname libc_getsockname +//go:linkname procGetcwd libc_getcwd +//go:linkname procgetgroups libc_getgroups +//go:linkname procsetgroups libc_setgroups +//go:linkname procutimes libc_utimes +//go:linkname procutimensat libc_utimensat +//go:linkname procfcntl libc_fcntl +//go:linkname procfutimesat libc_futimesat +//go:linkname procaccept libc_accept +//go:linkname procrecvmsg libc_recvmsg +//go:linkname procsendmsg libc_sendmsg +//go:linkname procacct libc_acct +//go:linkname procioctl libc_ioctl +//go:linkname procAccess libc_access +//go:linkname procAdjtime libc_adjtime +//go:linkname procChdir libc_chdir +//go:linkname procChmod libc_chmod +//go:linkname procChown libc_chown +//go:linkname procChroot libc_chroot +//go:linkname procClose libc_close +//go:linkname procCreat libc_creat +//go:linkname procDup libc_dup +//go:linkname procDup2 libc_dup2 +//go:linkname procExit libc_exit +//go:linkname procFchdir libc_fchdir +//go:linkname procFchmod libc_fchmod +//go:linkname procFchmodat libc_fchmodat +//go:linkname procFchown libc_fchown +//go:linkname procFchownat libc_fchownat +//go:linkname procFdatasync libc_fdatasync +//go:linkname procFpathconf libc_fpathconf +//go:linkname procFstat libc_fstat +//go:linkname procGetdents libc_getdents +//go:linkname procGetgid libc_getgid +//go:linkname procGetpid libc_getpid +//go:linkname procGetpgid libc_getpgid +//go:linkname procGetpgrp libc_getpgrp +//go:linkname procGeteuid libc_geteuid +//go:linkname procGetegid libc_getegid +//go:linkname procGetppid libc_getppid +//go:linkname procGetpriority libc_getpriority +//go:linkname procGetrlimit libc_getrlimit +//go:linkname procGetrusage libc_getrusage +//go:linkname procGettimeofday libc_gettimeofday +//go:linkname procGetuid libc_getuid +//go:linkname procKill libc_kill +//go:linkname procLchown libc_lchown +//go:linkname procLink libc_link +//go:linkname proclisten libc_listen +//go:linkname procLstat libc_lstat +//go:linkname procMadvise libc_madvise +//go:linkname procMkdir libc_mkdir +//go:linkname procMkdirat libc_mkdirat +//go:linkname procMkfifo libc_mkfifo +//go:linkname procMkfifoat libc_mkfifoat +//go:linkname procMknod libc_mknod +//go:linkname procMknodat libc_mknodat +//go:linkname procMlock libc_mlock +//go:linkname procMlockall libc_mlockall +//go:linkname procMprotect libc_mprotect +//go:linkname procMunlock libc_munlock +//go:linkname procMunlockall libc_munlockall +//go:linkname procNanosleep libc_nanosleep +//go:linkname procOpen libc_open +//go:linkname procOpenat libc_openat +//go:linkname procPathconf libc_pathconf +//go:linkname procPause libc_pause +//go:linkname procPread libc_pread +//go:linkname procPwrite libc_pwrite +//go:linkname procread libc_read +//go:linkname procReadlink libc_readlink +//go:linkname procRename libc_rename +//go:linkname procRenameat libc_renameat +//go:linkname procRmdir libc_rmdir +//go:linkname proclseek libc_lseek +//go:linkname procSetegid libc_setegid +//go:linkname procSeteuid libc_seteuid +//go:linkname procSetgid libc_setgid +//go:linkname procSethostname libc_sethostname +//go:linkname procSetpgid libc_setpgid +//go:linkname procSetpriority libc_setpriority +//go:linkname procSetregid libc_setregid +//go:linkname procSetreuid libc_setreuid +//go:linkname procSetrlimit libc_setrlimit +//go:linkname procSetsid libc_setsid +//go:linkname procSetuid libc_setuid +//go:linkname procshutdown libc_shutdown +//go:linkname procStat libc_stat +//go:linkname procSymlink libc_symlink +//go:linkname procSync libc_sync +//go:linkname procTimes libc_times +//go:linkname procTruncate libc_truncate +//go:linkname procFsync libc_fsync +//go:linkname procFtruncate libc_ftruncate +//go:linkname procUmask libc_umask +//go:linkname procUname libc_uname +//go:linkname procumount libc_umount +//go:linkname procUnlink libc_unlink +//go:linkname procUnlinkat libc_unlinkat +//go:linkname procUstat libc_ustat +//go:linkname procUtime libc_utime +//go:linkname procbind libc_bind +//go:linkname procconnect libc_connect +//go:linkname procmmap libc_mmap +//go:linkname procmunmap libc_munmap +//go:linkname procsendto libc_sendto +//go:linkname procsocket libc_socket +//go:linkname procsocketpair libc_socketpair +//go:linkname procwrite libc_write +//go:linkname procgetsockopt libc_getsockopt +//go:linkname procgetpeername libc_getpeername +//go:linkname procsetsockopt libc_setsockopt +//go:linkname procrecvfrom libc_recvfrom +//go:linkname procsysconf libc_sysconf + +var ( + procgetsockname, + procGetcwd, + procgetgroups, + procsetgroups, + procutimes, + procutimensat, + procfcntl, + procfutimesat, + procaccept, + procrecvmsg, + procsendmsg, + procacct, + procioctl, + procAccess, + procAdjtime, + procChdir, + procChmod, + procChown, + procChroot, + procClose, + procCreat, + procDup, + procDup2, + procExit, + procFchdir, + procFchmod, + procFchmodat, + procFchown, + procFchownat, + procFdatasync, + procFpathconf, + procFstat, + procGetdents, + procGetgid, + procGetpid, + procGetpgid, + procGetpgrp, + procGeteuid, + procGetegid, + procGetppid, + procGetpriority, + procGetrlimit, + procGetrusage, + procGettimeofday, + procGetuid, + procKill, + procLchown, + procLink, + proclisten, + procLstat, + procMadvise, + procMkdir, + procMkdirat, + procMkfifo, + procMkfifoat, + procMknod, + procMknodat, + procMlock, + procMlockall, + procMprotect, + procMunlock, + procMunlockall, + procNanosleep, + procOpen, + procOpenat, + procPathconf, + procPause, + procPread, + procPwrite, + procread, + procReadlink, + procRename, + procRenameat, + procRmdir, + proclseek, + procSetegid, + procSeteuid, + procSetgid, + procSethostname, + procSetpgid, + procSetpriority, + procSetregid, + procSetreuid, + procSetrlimit, + procSetsid, + procSetuid, + procshutdown, + procStat, + procSymlink, + procSync, + procTimes, + procTruncate, + procFsync, + procFtruncate, + procUmask, + procUname, + procumount, + procUnlink, + procUnlinkat, + procUstat, + procUtime, + procbind, + procconnect, + procmmap, + procmunmap, + procsendto, + procsocket, + procsocketpair, + procwrite, + procgetsockopt, + procgetpeername, + procsetsockopt, + procrecvfrom, + procsysconf syscallFunc +) + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getcwd(buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func acct(path *byte) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func ioctl(fd int, req int, arg uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Close(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Creat(path string, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) + nfd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Exit(code int) { + sysvicall6(uintptr(unsafe.Pointer(&procExit)), 1, uintptr(code), 0, 0, 0, 0, 0) + return +} + +func Fchdir(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Fdatasync(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Getgid() (gid int) { + r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetgid)), 0, 0, 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +func Getpid() (pid int) { + r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpid)), 0, 0, 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + pgid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Getpgrp() (pgid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) + pgid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Geteuid() (euid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGeteuid)), 0, 0, 0, 0, 0, 0, 0) + euid = int(r0) + return +} + +func Getegid() (egid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetegid)), 0, 0, 0, 0, 0, 0, 0) + egid = int(r0) + return +} + +func Getppid() (ppid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetppid)), 0, 0, 0, 0, 0, 0, 0) + ppid = int(r0) + return +} + +func Getpriority(which int, who int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getuid() (uid int) { + r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetuid)), 0, 0, 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = e1 + } + return +} + +func Listen(s int, backlog int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Madvise(b []byte, advice int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Mlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mlockall(flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mprotect(b []byte, prot int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Munlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Munlockall() (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + val = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Pause() (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func read(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + if len(buf) > 0 { + _p1 = &buf[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0) + use(unsafe.Pointer(_p0)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = e1 + } + return +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = e1 + } + return +} + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Setegid(egid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Seteuid(euid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setgid(gid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Sethostname(p []byte) (err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setsid() (pid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Setuid(uid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Shutdown(s int, how int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = e1 + } + return +} + +func Sync() (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Fsync(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Umask(mask int) (oldmask int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(mask), 0, 0, 0, 0, 0) + oldmask = int(r0) + return +} + +func Uname(buf *Utsname) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Unlinkat(dirfd int, path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = e1 + } + return +} + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procbind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procconnect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = e1 + } + return +} + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = e1 + } + return +} + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsocket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsocketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func write(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = e1 + } + return +} + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func sysconf(name int) (n int64, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsysconf)), 1, uintptr(name), 0, 0, 0, 0, 0) + n = int64(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysctl_openbsd.go new file mode 100644 index 000000000..83bb935b9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysctl_openbsd.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_386.go new file mode 100644 index 000000000..2786773ba --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_386.go @@ -0,0 +1,398 @@ +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk/usr/include/sys/syscall.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_CHUD = 185 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_STACK_SNAPSHOT = 365 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS___MAC_GET_LCID = 391 + SYS___MAC_GET_LCTX = 392 + SYS___MAC_SET_LCTX = 393 + SYS_SETLCID = 394 + SYS_GETLCID = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAME_EXT = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_MAXSYSCALL = 490 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_amd64.go new file mode 100644 index 000000000..09de240c8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -0,0 +1,398 @@ +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk/usr/include/sys/syscall.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_CHUD = 185 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_STACK_SNAPSHOT = 365 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS___MAC_GET_LCID = 391 + SYS___MAC_GET_LCTX = 392 + SYS___MAC_SET_LCTX = 393 + SYS_SETLCID = 394 + SYS_GETLCID = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAME_EXT = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_MAXSYSCALL = 490 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm.go new file mode 100644 index 000000000..b8c9aea85 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm.go @@ -0,0 +1,358 @@ +// mksysnum_darwin.pl /usr/include/sys/syscall.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_CHUD = 185 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS___SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_ATSOCKET = 206 + SYS_ATGETMSG = 207 + SYS_ATPUTMSG = 208 + SYS_ATPSNDREQ = 209 + SYS_ATPSNDRSP = 210 + SYS_ATPGETREQ = 211 + SYS_ATPGETRSP = 212 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SEM_GETVALUE = 274 + SYS_SEM_INIT = 275 + SYS_SEM_DESTROY = 276 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_STACK_SNAPSHOT = 365 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS___MAC_GET_LCID = 391 + SYS___MAC_GET_LCTX = 392 + SYS___MAC_SET_LCTX = 393 + SYS_SETLCID = 394 + SYS_GETLCID = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MAXSYSCALL = 440 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm64.go new file mode 100644 index 000000000..26677ebbf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -0,0 +1,398 @@ +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.4.sdk/usr/include/sys/syscall.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm64,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_CHUD = 185 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_STACK_SNAPSHOT = 365 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS___MAC_GET_LCID = 391 + SYS___MAC_GET_LCTX = 392 + SYS___MAC_SET_LCTX = 393 + SYS_SETLCID = 394 + SYS_GETLCID = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAME_EXT = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_MAXSYSCALL = 490 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_386.go new file mode 100644 index 000000000..785240a75 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_386.go @@ -0,0 +1,304 @@ +// mksysnum_dragonfly.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,dragonfly + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void exit(int rval); } + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, \ + SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } + SYS_ACCESS = 33 // { int access(char *path, int flags); } + SYS_CHFLAGS = 34 // { int chflags(char *path, int flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, int flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { pid_t vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_GETDOMAINNAME = 162 // { int getdomainname(char *domainname, int len); } + SYS_SETDOMAINNAME = 163 // { int setdomainname(char *domainname, int len); } + SYS_UNAME = 164 // { int uname(struct utsname *name); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, \ + SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, \ + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, \ + // SYS_NOSYS = 198; // { int nosys(void); } __syscall __syscall_args int + SYS_LSEEK = 199 // { off_t lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int truncate(char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS___SEMCTL = 220 // { int __semctl(int semid, int semnum, int cmd, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_MSGCTL = 224 // { int msgctl(int msqid, int cmd, \ + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { caddr_t shmat(int shmid, const void *shmaddr, \ + SYS_SHMCTL = 229 // { int shmctl(int shmid, int cmd, \ + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, struct iovec *iovp, \ + SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, struct iovec *iovp,\ + SYS_FHSTATFS = 297 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_AIO_READ = 318 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 319 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 320 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(u_char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGACTION = 342 // { int sigaction(int sig, const struct sigaction *act, \ + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGRETURN = 344 // { int sigreturn(ucontext_t *sigcntxp); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set,\ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set,\ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { int extattr_set_file(const char *path, \ + SYS_EXTATTR_GET_FILE = 357 // { int extattr_get_file(const char *path, \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_SCTP_PEELOFF = 364 // { int sctp_peeloff(int sd, caddr_t name ); } + SYS_LCHFLAGS = 391 // { int lchflags(char *path, int flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, \ + SYS_VARSYM_SET = 450 // { int varsym_set(int level, const char *name, const char *data); } + SYS_VARSYM_GET = 451 // { int varsym_get(int mask, const char *wild, char *buf, int bufsize); } + SYS_VARSYM_LIST = 452 // { int varsym_list(int level, char *buf, int maxsize, int *marker); } + SYS_EXEC_SYS_REGISTER = 465 // { int exec_sys_register(void *entry); } + SYS_EXEC_SYS_UNREGISTER = 466 // { int exec_sys_unregister(int id); } + SYS_SYS_CHECKPOINT = 467 // { int sys_checkpoint(int type, int fd, pid_t pid, int retval); } + SYS_MOUNTCTL = 468 // { int mountctl(const char *path, int op, int fd, const void *ctl, int ctllen, void *buf, int buflen); } + SYS_UMTX_SLEEP = 469 // { int umtx_sleep(volatile const int *ptr, int value, int timeout); } + SYS_UMTX_WAKEUP = 470 // { int umtx_wakeup(volatile const int *ptr, int count); } + SYS_JAIL_ATTACH = 471 // { int jail_attach(int jid); } + SYS_SET_TLS_AREA = 472 // { int set_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_GET_TLS_AREA = 473 // { int get_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_CLOSEFROM = 474 // { int closefrom(int fd); } + SYS_STAT = 475 // { int stat(const char *path, struct stat *ub); } + SYS_FSTAT = 476 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 477 // { int lstat(const char *path, struct stat *ub); } + SYS_FHSTAT = 478 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 479 // { int getdirentries(int fd, char *buf, u_int count, \ + SYS_GETDENTS = 480 // { int getdents(int fd, char *buf, size_t count); } + SYS_USCHED_SET = 481 // { int usched_set(pid_t pid, int cmd, void *data, \ + SYS_EXTACCEPT = 482 // { int extaccept(int s, int flags, caddr_t name, int *anamelen); } + SYS_EXTCONNECT = 483 // { int extconnect(int s, int flags, caddr_t name, int namelen); } + SYS_MCONTROL = 485 // { int mcontrol(void *addr, size_t len, int behav, off_t value); } + SYS_VMSPACE_CREATE = 486 // { int vmspace_create(void *id, int type, void *data); } + SYS_VMSPACE_DESTROY = 487 // { int vmspace_destroy(void *id); } + SYS_VMSPACE_CTL = 488 // { int vmspace_ctl(void *id, int cmd, \ + SYS_VMSPACE_MMAP = 489 // { int vmspace_mmap(void *id, void *addr, size_t len, \ + SYS_VMSPACE_MUNMAP = 490 // { int vmspace_munmap(void *id, void *addr, \ + SYS_VMSPACE_MCONTROL = 491 // { int vmspace_mcontrol(void *id, void *addr, \ + SYS_VMSPACE_PREAD = 492 // { ssize_t vmspace_pread(void *id, void *buf, \ + SYS_VMSPACE_PWRITE = 493 // { ssize_t vmspace_pwrite(void *id, const void *buf, \ + SYS_EXTEXIT = 494 // { void extexit(int how, int status, void *addr); } + SYS_LWP_CREATE = 495 // { int lwp_create(struct lwp_params *params); } + SYS_LWP_GETTID = 496 // { lwpid_t lwp_gettid(void); } + SYS_LWP_KILL = 497 // { int lwp_kill(pid_t pid, lwpid_t tid, int signum); } + SYS_LWP_RTPRIO = 498 // { int lwp_rtprio(int function, pid_t pid, lwpid_t tid, struct rtprio *rtp); } + SYS_PSELECT = 499 // { int pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_STATVFS = 500 // { int statvfs(const char *path, struct statvfs *buf); } + SYS_FSTATVFS = 501 // { int fstatvfs(int fd, struct statvfs *buf); } + SYS_FHSTATVFS = 502 // { int fhstatvfs(const struct fhandle *u_fhp, struct statvfs *buf); } + SYS_GETVFSSTAT = 503 // { int getvfsstat(struct statfs *buf, \ + SYS_OPENAT = 504 // { int openat(int fd, char *path, int flags, int mode); } + SYS_FSTATAT = 505 // { int fstatat(int fd, char *path, \ + SYS_FCHMODAT = 506 // { int fchmodat(int fd, char *path, int mode, \ + SYS_FCHOWNAT = 507 // { int fchownat(int fd, char *path, int uid, int gid, \ + SYS_UNLINKAT = 508 // { int unlinkat(int fd, char *path, int flags); } + SYS_FACCESSAT = 509 // { int faccessat(int fd, char *path, int amode, \ + SYS_MQ_OPEN = 510 // { mqd_t mq_open(const char * name, int oflag, \ + SYS_MQ_CLOSE = 511 // { int mq_close(mqd_t mqdes); } + SYS_MQ_UNLINK = 512 // { int mq_unlink(const char *name); } + SYS_MQ_GETATTR = 513 // { int mq_getattr(mqd_t mqdes, \ + SYS_MQ_SETATTR = 514 // { int mq_setattr(mqd_t mqdes, \ + SYS_MQ_NOTIFY = 515 // { int mq_notify(mqd_t mqdes, \ + SYS_MQ_SEND = 516 // { int mq_send(mqd_t mqdes, const char *msg_ptr, \ + SYS_MQ_RECEIVE = 517 // { ssize_t mq_receive(mqd_t mqdes, char *msg_ptr, \ + SYS_MQ_TIMEDSEND = 518 // { int mq_timedsend(mqd_t mqdes, \ + SYS_MQ_TIMEDRECEIVE = 519 // { ssize_t mq_timedreceive(mqd_t mqdes, \ + SYS_IOPRIO_SET = 520 // { int ioprio_set(int which, int who, int prio); } + SYS_IOPRIO_GET = 521 // { int ioprio_get(int which, int who); } + SYS_CHROOT_KERNEL = 522 // { int chroot_kernel(char *path); } + SYS_RENAMEAT = 523 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_MKDIRAT = 524 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 525 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 526 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_READLINKAT = 527 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_SYMLINKAT = 528 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_SWAPOFF = 529 // { int swapoff(char *name); } + SYS_VQUOTACTL = 530 // { int vquotactl(const char *path, \ + SYS_LINKAT = 531 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_EACCESS = 532 // { int eaccess(char *path, int flags); } + SYS_LPATHCONF = 533 // { int lpathconf(char *path, int name); } + SYS_VMM_GUEST_CTL = 534 // { int vmm_guest_ctl(int op, struct vmm_guest_options *options); } + SYS_VMM_GUEST_SYNC_ADDR = 535 // { int vmm_guest_sync_addr(long *dstaddr, long *srcaddr); } +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go new file mode 100644 index 000000000..d6038fa9b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -0,0 +1,304 @@ +// mksysnum_dragonfly.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,dragonfly + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void exit(int rval); } + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, \ + SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } + SYS_ACCESS = 33 // { int access(char *path, int flags); } + SYS_CHFLAGS = 34 // { int chflags(char *path, int flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, int flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { pid_t vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_GETDOMAINNAME = 162 // { int getdomainname(char *domainname, int len); } + SYS_SETDOMAINNAME = 163 // { int setdomainname(char *domainname, int len); } + SYS_UNAME = 164 // { int uname(struct utsname *name); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, \ + SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, \ + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, \ + // SYS_NOSYS = 198; // { int nosys(void); } __syscall __syscall_args int + SYS_LSEEK = 199 // { off_t lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int truncate(char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS___SEMCTL = 220 // { int __semctl(int semid, int semnum, int cmd, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_MSGCTL = 224 // { int msgctl(int msqid, int cmd, \ + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { caddr_t shmat(int shmid, const void *shmaddr, \ + SYS_SHMCTL = 229 // { int shmctl(int shmid, int cmd, \ + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, struct iovec *iovp, \ + SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, struct iovec *iovp,\ + SYS_FHSTATFS = 297 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_AIO_READ = 318 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 319 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 320 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(u_char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGACTION = 342 // { int sigaction(int sig, const struct sigaction *act, \ + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGRETURN = 344 // { int sigreturn(ucontext_t *sigcntxp); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set,\ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set,\ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { int extattr_set_file(const char *path, \ + SYS_EXTATTR_GET_FILE = 357 // { int extattr_get_file(const char *path, \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_SCTP_PEELOFF = 364 // { int sctp_peeloff(int sd, caddr_t name ); } + SYS_LCHFLAGS = 391 // { int lchflags(char *path, int flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, \ + SYS_VARSYM_SET = 450 // { int varsym_set(int level, const char *name, const char *data); } + SYS_VARSYM_GET = 451 // { int varsym_get(int mask, const char *wild, char *buf, int bufsize); } + SYS_VARSYM_LIST = 452 // { int varsym_list(int level, char *buf, int maxsize, int *marker); } + SYS_EXEC_SYS_REGISTER = 465 // { int exec_sys_register(void *entry); } + SYS_EXEC_SYS_UNREGISTER = 466 // { int exec_sys_unregister(int id); } + SYS_SYS_CHECKPOINT = 467 // { int sys_checkpoint(int type, int fd, pid_t pid, int retval); } + SYS_MOUNTCTL = 468 // { int mountctl(const char *path, int op, int fd, const void *ctl, int ctllen, void *buf, int buflen); } + SYS_UMTX_SLEEP = 469 // { int umtx_sleep(volatile const int *ptr, int value, int timeout); } + SYS_UMTX_WAKEUP = 470 // { int umtx_wakeup(volatile const int *ptr, int count); } + SYS_JAIL_ATTACH = 471 // { int jail_attach(int jid); } + SYS_SET_TLS_AREA = 472 // { int set_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_GET_TLS_AREA = 473 // { int get_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_CLOSEFROM = 474 // { int closefrom(int fd); } + SYS_STAT = 475 // { int stat(const char *path, struct stat *ub); } + SYS_FSTAT = 476 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 477 // { int lstat(const char *path, struct stat *ub); } + SYS_FHSTAT = 478 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 479 // { int getdirentries(int fd, char *buf, u_int count, \ + SYS_GETDENTS = 480 // { int getdents(int fd, char *buf, size_t count); } + SYS_USCHED_SET = 481 // { int usched_set(pid_t pid, int cmd, void *data, \ + SYS_EXTACCEPT = 482 // { int extaccept(int s, int flags, caddr_t name, int *anamelen); } + SYS_EXTCONNECT = 483 // { int extconnect(int s, int flags, caddr_t name, int namelen); } + SYS_MCONTROL = 485 // { int mcontrol(void *addr, size_t len, int behav, off_t value); } + SYS_VMSPACE_CREATE = 486 // { int vmspace_create(void *id, int type, void *data); } + SYS_VMSPACE_DESTROY = 487 // { int vmspace_destroy(void *id); } + SYS_VMSPACE_CTL = 488 // { int vmspace_ctl(void *id, int cmd, \ + SYS_VMSPACE_MMAP = 489 // { int vmspace_mmap(void *id, void *addr, size_t len, \ + SYS_VMSPACE_MUNMAP = 490 // { int vmspace_munmap(void *id, void *addr, \ + SYS_VMSPACE_MCONTROL = 491 // { int vmspace_mcontrol(void *id, void *addr, \ + SYS_VMSPACE_PREAD = 492 // { ssize_t vmspace_pread(void *id, void *buf, \ + SYS_VMSPACE_PWRITE = 493 // { ssize_t vmspace_pwrite(void *id, const void *buf, \ + SYS_EXTEXIT = 494 // { void extexit(int how, int status, void *addr); } + SYS_LWP_CREATE = 495 // { int lwp_create(struct lwp_params *params); } + SYS_LWP_GETTID = 496 // { lwpid_t lwp_gettid(void); } + SYS_LWP_KILL = 497 // { int lwp_kill(pid_t pid, lwpid_t tid, int signum); } + SYS_LWP_RTPRIO = 498 // { int lwp_rtprio(int function, pid_t pid, lwpid_t tid, struct rtprio *rtp); } + SYS_PSELECT = 499 // { int pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_STATVFS = 500 // { int statvfs(const char *path, struct statvfs *buf); } + SYS_FSTATVFS = 501 // { int fstatvfs(int fd, struct statvfs *buf); } + SYS_FHSTATVFS = 502 // { int fhstatvfs(const struct fhandle *u_fhp, struct statvfs *buf); } + SYS_GETVFSSTAT = 503 // { int getvfsstat(struct statfs *buf, \ + SYS_OPENAT = 504 // { int openat(int fd, char *path, int flags, int mode); } + SYS_FSTATAT = 505 // { int fstatat(int fd, char *path, \ + SYS_FCHMODAT = 506 // { int fchmodat(int fd, char *path, int mode, \ + SYS_FCHOWNAT = 507 // { int fchownat(int fd, char *path, int uid, int gid, \ + SYS_UNLINKAT = 508 // { int unlinkat(int fd, char *path, int flags); } + SYS_FACCESSAT = 509 // { int faccessat(int fd, char *path, int amode, \ + SYS_MQ_OPEN = 510 // { mqd_t mq_open(const char * name, int oflag, \ + SYS_MQ_CLOSE = 511 // { int mq_close(mqd_t mqdes); } + SYS_MQ_UNLINK = 512 // { int mq_unlink(const char *name); } + SYS_MQ_GETATTR = 513 // { int mq_getattr(mqd_t mqdes, \ + SYS_MQ_SETATTR = 514 // { int mq_setattr(mqd_t mqdes, \ + SYS_MQ_NOTIFY = 515 // { int mq_notify(mqd_t mqdes, \ + SYS_MQ_SEND = 516 // { int mq_send(mqd_t mqdes, const char *msg_ptr, \ + SYS_MQ_RECEIVE = 517 // { ssize_t mq_receive(mqd_t mqdes, char *msg_ptr, \ + SYS_MQ_TIMEDSEND = 518 // { int mq_timedsend(mqd_t mqdes, \ + SYS_MQ_TIMEDRECEIVE = 519 // { ssize_t mq_timedreceive(mqd_t mqdes, \ + SYS_IOPRIO_SET = 520 // { int ioprio_set(int which, int who, int prio); } + SYS_IOPRIO_GET = 521 // { int ioprio_get(int which, int who); } + SYS_CHROOT_KERNEL = 522 // { int chroot_kernel(char *path); } + SYS_RENAMEAT = 523 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_MKDIRAT = 524 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 525 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 526 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_READLINKAT = 527 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_SYMLINKAT = 528 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_SWAPOFF = 529 // { int swapoff(char *name); } + SYS_VQUOTACTL = 530 // { int vquotactl(const char *path, \ + SYS_LINKAT = 531 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_EACCESS = 532 // { int eaccess(char *path, int flags); } + SYS_LPATHCONF = 533 // { int lpathconf(char *path, int name); } + SYS_VMM_GUEST_CTL = 534 // { int vmm_guest_ctl(int op, struct vmm_guest_options *options); } + SYS_VMM_GUEST_SYNC_ADDR = 535 // { int vmm_guest_sync_addr(long *dstaddr, long *srcaddr); } +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_386.go new file mode 100644 index 000000000..262a84536 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -0,0 +1,351 @@ +// mksysnum_freebsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); } + SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go new file mode 100644 index 000000000..57a60ea12 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -0,0 +1,351 @@ +// mksysnum_freebsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); } + SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_arm.go new file mode 100644 index 000000000..206b9f612 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -0,0 +1,351 @@ +// mksysnum_freebsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); } + SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_386.go new file mode 100644 index 000000000..ba952c675 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -0,0 +1,355 @@ +// mksysnum_linux.pl /usr/include/asm/unistd_32.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86OLD = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_VM86 = 166 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_MADVISE1 = 219 + SYS_GETDENTS64 = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_SET_THREAD_AREA = 243 + SYS_GET_THREAD_AREA = 244 + SYS_IO_SETUP = 245 + SYS_IO_DESTROY = 246 + SYS_IO_GETEVENTS = 247 + SYS_IO_SUBMIT = 248 + SYS_IO_CANCEL = 249 + SYS_FADVISE64 = 250 + SYS_EXIT_GROUP = 252 + SYS_LOOKUP_DCOOKIE = 253 + SYS_EPOLL_CREATE = 254 + SYS_EPOLL_CTL = 255 + SYS_EPOLL_WAIT = 256 + SYS_REMAP_FILE_PAGES = 257 + SYS_SET_TID_ADDRESS = 258 + SYS_TIMER_CREATE = 259 + SYS_TIMER_SETTIME = 260 + SYS_TIMER_GETTIME = 261 + SYS_TIMER_GETOVERRUN = 262 + SYS_TIMER_DELETE = 263 + SYS_CLOCK_SETTIME = 264 + SYS_CLOCK_GETTIME = 265 + SYS_CLOCK_GETRES = 266 + SYS_CLOCK_NANOSLEEP = 267 + SYS_STATFS64 = 268 + SYS_FSTATFS64 = 269 + SYS_TGKILL = 270 + SYS_UTIMES = 271 + SYS_FADVISE64_64 = 272 + SYS_VSERVER = 273 + SYS_MBIND = 274 + SYS_GET_MEMPOLICY = 275 + SYS_SET_MEMPOLICY = 276 + SYS_MQ_OPEN = 277 + SYS_MQ_UNLINK = 278 + SYS_MQ_TIMEDSEND = 279 + SYS_MQ_TIMEDRECEIVE = 280 + SYS_MQ_NOTIFY = 281 + SYS_MQ_GETSETATTR = 282 + SYS_KEXEC_LOAD = 283 + SYS_WAITID = 284 + SYS_ADD_KEY = 286 + SYS_REQUEST_KEY = 287 + SYS_KEYCTL = 288 + SYS_IOPRIO_SET = 289 + SYS_IOPRIO_GET = 290 + SYS_INOTIFY_INIT = 291 + SYS_INOTIFY_ADD_WATCH = 292 + SYS_INOTIFY_RM_WATCH = 293 + SYS_MIGRATE_PAGES = 294 + SYS_OPENAT = 295 + SYS_MKDIRAT = 296 + SYS_MKNODAT = 297 + SYS_FCHOWNAT = 298 + SYS_FUTIMESAT = 299 + SYS_FSTATAT64 = 300 + SYS_UNLINKAT = 301 + SYS_RENAMEAT = 302 + SYS_LINKAT = 303 + SYS_SYMLINKAT = 304 + SYS_READLINKAT = 305 + SYS_FCHMODAT = 306 + SYS_FACCESSAT = 307 + SYS_PSELECT6 = 308 + SYS_PPOLL = 309 + SYS_UNSHARE = 310 + SYS_SET_ROBUST_LIST = 311 + SYS_GET_ROBUST_LIST = 312 + SYS_SPLICE = 313 + SYS_SYNC_FILE_RANGE = 314 + SYS_TEE = 315 + SYS_VMSPLICE = 316 + SYS_MOVE_PAGES = 317 + SYS_GETCPU = 318 + SYS_EPOLL_PWAIT = 319 + SYS_UTIMENSAT = 320 + SYS_SIGNALFD = 321 + SYS_TIMERFD_CREATE = 322 + SYS_EVENTFD = 323 + SYS_FALLOCATE = 324 + SYS_TIMERFD_SETTIME = 325 + SYS_TIMERFD_GETTIME = 326 + SYS_SIGNALFD4 = 327 + SYS_EVENTFD2 = 328 + SYS_EPOLL_CREATE1 = 329 + SYS_DUP3 = 330 + SYS_PIPE2 = 331 + SYS_INOTIFY_INIT1 = 332 + SYS_PREADV = 333 + SYS_PWRITEV = 334 + SYS_RT_TGSIGQUEUEINFO = 335 + SYS_PERF_EVENT_OPEN = 336 + SYS_RECVMMSG = 337 + SYS_FANOTIFY_INIT = 338 + SYS_FANOTIFY_MARK = 339 + SYS_PRLIMIT64 = 340 + SYS_NAME_TO_HANDLE_AT = 341 + SYS_OPEN_BY_HANDLE_AT = 342 + SYS_CLOCK_ADJTIME = 343 + SYS_SYNCFS = 344 + SYS_SENDMMSG = 345 + SYS_SETNS = 346 + SYS_PROCESS_VM_READV = 347 + SYS_PROCESS_VM_WRITEV = 348 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_amd64.go new file mode 100644 index 000000000..ddac31f58 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -0,0 +1,321 @@ +// mksysnum_linux.pl /usr/include/asm/unistd_64.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,linux + +package unix + +const ( + SYS_READ = 0 + SYS_WRITE = 1 + SYS_OPEN = 2 + SYS_CLOSE = 3 + SYS_STAT = 4 + SYS_FSTAT = 5 + SYS_LSTAT = 6 + SYS_POLL = 7 + SYS_LSEEK = 8 + SYS_MMAP = 9 + SYS_MPROTECT = 10 + SYS_MUNMAP = 11 + SYS_BRK = 12 + SYS_RT_SIGACTION = 13 + SYS_RT_SIGPROCMASK = 14 + SYS_RT_SIGRETURN = 15 + SYS_IOCTL = 16 + SYS_PREAD64 = 17 + SYS_PWRITE64 = 18 + SYS_READV = 19 + SYS_WRITEV = 20 + SYS_ACCESS = 21 + SYS_PIPE = 22 + SYS_SELECT = 23 + SYS_SCHED_YIELD = 24 + SYS_MREMAP = 25 + SYS_MSYNC = 26 + SYS_MINCORE = 27 + SYS_MADVISE = 28 + SYS_SHMGET = 29 + SYS_SHMAT = 30 + SYS_SHMCTL = 31 + SYS_DUP = 32 + SYS_DUP2 = 33 + SYS_PAUSE = 34 + SYS_NANOSLEEP = 35 + SYS_GETITIMER = 36 + SYS_ALARM = 37 + SYS_SETITIMER = 38 + SYS_GETPID = 39 + SYS_SENDFILE = 40 + SYS_SOCKET = 41 + SYS_CONNECT = 42 + SYS_ACCEPT = 43 + SYS_SENDTO = 44 + SYS_RECVFROM = 45 + SYS_SENDMSG = 46 + SYS_RECVMSG = 47 + SYS_SHUTDOWN = 48 + SYS_BIND = 49 + SYS_LISTEN = 50 + SYS_GETSOCKNAME = 51 + SYS_GETPEERNAME = 52 + SYS_SOCKETPAIR = 53 + SYS_SETSOCKOPT = 54 + SYS_GETSOCKOPT = 55 + SYS_CLONE = 56 + SYS_FORK = 57 + SYS_VFORK = 58 + SYS_EXECVE = 59 + SYS_EXIT = 60 + SYS_WAIT4 = 61 + SYS_KILL = 62 + SYS_UNAME = 63 + SYS_SEMGET = 64 + SYS_SEMOP = 65 + SYS_SEMCTL = 66 + SYS_SHMDT = 67 + SYS_MSGGET = 68 + SYS_MSGSND = 69 + SYS_MSGRCV = 70 + SYS_MSGCTL = 71 + SYS_FCNTL = 72 + SYS_FLOCK = 73 + SYS_FSYNC = 74 + SYS_FDATASYNC = 75 + SYS_TRUNCATE = 76 + SYS_FTRUNCATE = 77 + SYS_GETDENTS = 78 + SYS_GETCWD = 79 + SYS_CHDIR = 80 + SYS_FCHDIR = 81 + SYS_RENAME = 82 + SYS_MKDIR = 83 + SYS_RMDIR = 84 + SYS_CREAT = 85 + SYS_LINK = 86 + SYS_UNLINK = 87 + SYS_SYMLINK = 88 + SYS_READLINK = 89 + SYS_CHMOD = 90 + SYS_FCHMOD = 91 + SYS_CHOWN = 92 + SYS_FCHOWN = 93 + SYS_LCHOWN = 94 + SYS_UMASK = 95 + SYS_GETTIMEOFDAY = 96 + SYS_GETRLIMIT = 97 + SYS_GETRUSAGE = 98 + SYS_SYSINFO = 99 + SYS_TIMES = 100 + SYS_PTRACE = 101 + SYS_GETUID = 102 + SYS_SYSLOG = 103 + SYS_GETGID = 104 + SYS_SETUID = 105 + SYS_SETGID = 106 + SYS_GETEUID = 107 + SYS_GETEGID = 108 + SYS_SETPGID = 109 + SYS_GETPPID = 110 + SYS_GETPGRP = 111 + SYS_SETSID = 112 + SYS_SETREUID = 113 + SYS_SETREGID = 114 + SYS_GETGROUPS = 115 + SYS_SETGROUPS = 116 + SYS_SETRESUID = 117 + SYS_GETRESUID = 118 + SYS_SETRESGID = 119 + SYS_GETRESGID = 120 + SYS_GETPGID = 121 + SYS_SETFSUID = 122 + SYS_SETFSGID = 123 + SYS_GETSID = 124 + SYS_CAPGET = 125 + SYS_CAPSET = 126 + SYS_RT_SIGPENDING = 127 + SYS_RT_SIGTIMEDWAIT = 128 + SYS_RT_SIGQUEUEINFO = 129 + SYS_RT_SIGSUSPEND = 130 + SYS_SIGALTSTACK = 131 + SYS_UTIME = 132 + SYS_MKNOD = 133 + SYS_USELIB = 134 + SYS_PERSONALITY = 135 + SYS_USTAT = 136 + SYS_STATFS = 137 + SYS_FSTATFS = 138 + SYS_SYSFS = 139 + SYS_GETPRIORITY = 140 + SYS_SETPRIORITY = 141 + SYS_SCHED_SETPARAM = 142 + SYS_SCHED_GETPARAM = 143 + SYS_SCHED_SETSCHEDULER = 144 + SYS_SCHED_GETSCHEDULER = 145 + SYS_SCHED_GET_PRIORITY_MAX = 146 + SYS_SCHED_GET_PRIORITY_MIN = 147 + SYS_SCHED_RR_GET_INTERVAL = 148 + SYS_MLOCK = 149 + SYS_MUNLOCK = 150 + SYS_MLOCKALL = 151 + SYS_MUNLOCKALL = 152 + SYS_VHANGUP = 153 + SYS_MODIFY_LDT = 154 + SYS_PIVOT_ROOT = 155 + SYS__SYSCTL = 156 + SYS_PRCTL = 157 + SYS_ARCH_PRCTL = 158 + SYS_ADJTIMEX = 159 + SYS_SETRLIMIT = 160 + SYS_CHROOT = 161 + SYS_SYNC = 162 + SYS_ACCT = 163 + SYS_SETTIMEOFDAY = 164 + SYS_MOUNT = 165 + SYS_UMOUNT2 = 166 + SYS_SWAPON = 167 + SYS_SWAPOFF = 168 + SYS_REBOOT = 169 + SYS_SETHOSTNAME = 170 + SYS_SETDOMAINNAME = 171 + SYS_IOPL = 172 + SYS_IOPERM = 173 + SYS_CREATE_MODULE = 174 + SYS_INIT_MODULE = 175 + SYS_DELETE_MODULE = 176 + SYS_GET_KERNEL_SYMS = 177 + SYS_QUERY_MODULE = 178 + SYS_QUOTACTL = 179 + SYS_NFSSERVCTL = 180 + SYS_GETPMSG = 181 + SYS_PUTPMSG = 182 + SYS_AFS_SYSCALL = 183 + SYS_TUXCALL = 184 + SYS_SECURITY = 185 + SYS_GETTID = 186 + SYS_READAHEAD = 187 + SYS_SETXATTR = 188 + SYS_LSETXATTR = 189 + SYS_FSETXATTR = 190 + SYS_GETXATTR = 191 + SYS_LGETXATTR = 192 + SYS_FGETXATTR = 193 + SYS_LISTXATTR = 194 + SYS_LLISTXATTR = 195 + SYS_FLISTXATTR = 196 + SYS_REMOVEXATTR = 197 + SYS_LREMOVEXATTR = 198 + SYS_FREMOVEXATTR = 199 + SYS_TKILL = 200 + SYS_TIME = 201 + SYS_FUTEX = 202 + SYS_SCHED_SETAFFINITY = 203 + SYS_SCHED_GETAFFINITY = 204 + SYS_SET_THREAD_AREA = 205 + SYS_IO_SETUP = 206 + SYS_IO_DESTROY = 207 + SYS_IO_GETEVENTS = 208 + SYS_IO_SUBMIT = 209 + SYS_IO_CANCEL = 210 + SYS_GET_THREAD_AREA = 211 + SYS_LOOKUP_DCOOKIE = 212 + SYS_EPOLL_CREATE = 213 + SYS_EPOLL_CTL_OLD = 214 + SYS_EPOLL_WAIT_OLD = 215 + SYS_REMAP_FILE_PAGES = 216 + SYS_GETDENTS64 = 217 + SYS_SET_TID_ADDRESS = 218 + SYS_RESTART_SYSCALL = 219 + SYS_SEMTIMEDOP = 220 + SYS_FADVISE64 = 221 + SYS_TIMER_CREATE = 222 + SYS_TIMER_SETTIME = 223 + SYS_TIMER_GETTIME = 224 + SYS_TIMER_GETOVERRUN = 225 + SYS_TIMER_DELETE = 226 + SYS_CLOCK_SETTIME = 227 + SYS_CLOCK_GETTIME = 228 + SYS_CLOCK_GETRES = 229 + SYS_CLOCK_NANOSLEEP = 230 + SYS_EXIT_GROUP = 231 + SYS_EPOLL_WAIT = 232 + SYS_EPOLL_CTL = 233 + SYS_TGKILL = 234 + SYS_UTIMES = 235 + SYS_VSERVER = 236 + SYS_MBIND = 237 + SYS_SET_MEMPOLICY = 238 + SYS_GET_MEMPOLICY = 239 + SYS_MQ_OPEN = 240 + SYS_MQ_UNLINK = 241 + SYS_MQ_TIMEDSEND = 242 + SYS_MQ_TIMEDRECEIVE = 243 + SYS_MQ_NOTIFY = 244 + SYS_MQ_GETSETATTR = 245 + SYS_KEXEC_LOAD = 246 + SYS_WAITID = 247 + SYS_ADD_KEY = 248 + SYS_REQUEST_KEY = 249 + SYS_KEYCTL = 250 + SYS_IOPRIO_SET = 251 + SYS_IOPRIO_GET = 252 + SYS_INOTIFY_INIT = 253 + SYS_INOTIFY_ADD_WATCH = 254 + SYS_INOTIFY_RM_WATCH = 255 + SYS_MIGRATE_PAGES = 256 + SYS_OPENAT = 257 + SYS_MKDIRAT = 258 + SYS_MKNODAT = 259 + SYS_FCHOWNAT = 260 + SYS_FUTIMESAT = 261 + SYS_NEWFSTATAT = 262 + SYS_UNLINKAT = 263 + SYS_RENAMEAT = 264 + SYS_LINKAT = 265 + SYS_SYMLINKAT = 266 + SYS_READLINKAT = 267 + SYS_FCHMODAT = 268 + SYS_FACCESSAT = 269 + SYS_PSELECT6 = 270 + SYS_PPOLL = 271 + SYS_UNSHARE = 272 + SYS_SET_ROBUST_LIST = 273 + SYS_GET_ROBUST_LIST = 274 + SYS_SPLICE = 275 + SYS_TEE = 276 + SYS_SYNC_FILE_RANGE = 277 + SYS_VMSPLICE = 278 + SYS_MOVE_PAGES = 279 + SYS_UTIMENSAT = 280 + SYS_EPOLL_PWAIT = 281 + SYS_SIGNALFD = 282 + SYS_TIMERFD_CREATE = 283 + SYS_EVENTFD = 284 + SYS_FALLOCATE = 285 + SYS_TIMERFD_SETTIME = 286 + SYS_TIMERFD_GETTIME = 287 + SYS_ACCEPT4 = 288 + SYS_SIGNALFD4 = 289 + SYS_EVENTFD2 = 290 + SYS_EPOLL_CREATE1 = 291 + SYS_DUP3 = 292 + SYS_PIPE2 = 293 + SYS_INOTIFY_INIT1 = 294 + SYS_PREADV = 295 + SYS_PWRITEV = 296 + SYS_RT_TGSIGQUEUEINFO = 297 + SYS_PERF_EVENT_OPEN = 298 + SYS_RECVMMSG = 299 + SYS_FANOTIFY_INIT = 300 + SYS_FANOTIFY_MARK = 301 + SYS_PRLIMIT64 = 302 + SYS_NAME_TO_HANDLE_AT = 303 + SYS_OPEN_BY_HANDLE_AT = 304 + SYS_CLOCK_ADJTIME = 305 + SYS_SYNCFS = 306 + SYS_SENDMMSG = 307 + SYS_SETNS = 308 + SYS_GETCPU = 309 + SYS_PROCESS_VM_READV = 310 + SYS_PROCESS_VM_WRITEV = 311 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm.go new file mode 100644 index 000000000..45ced17fc --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -0,0 +1,356 @@ +// mksysnum_linux.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm,linux + +package unix + +const ( + SYS_OABI_SYSCALL_BASE = 0 + SYS_SYSCALL_BASE = 0 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_VHANGUP = 111 + SYS_SYSCALL = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_GETDENTS64 = 217 + SYS_PIVOT_ROOT = 218 + SYS_MINCORE = 219 + SYS_MADVISE = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_LOOKUP_DCOOKIE = 249 + SYS_EPOLL_CREATE = 250 + SYS_EPOLL_CTL = 251 + SYS_EPOLL_WAIT = 252 + SYS_REMAP_FILE_PAGES = 253 + SYS_SET_TID_ADDRESS = 256 + SYS_TIMER_CREATE = 257 + SYS_TIMER_SETTIME = 258 + SYS_TIMER_GETTIME = 259 + SYS_TIMER_GETOVERRUN = 260 + SYS_TIMER_DELETE = 261 + SYS_CLOCK_SETTIME = 262 + SYS_CLOCK_GETTIME = 263 + SYS_CLOCK_GETRES = 264 + SYS_CLOCK_NANOSLEEP = 265 + SYS_STATFS64 = 266 + SYS_FSTATFS64 = 267 + SYS_TGKILL = 268 + SYS_UTIMES = 269 + SYS_ARM_FADVISE64_64 = 270 + SYS_PCICONFIG_IOBASE = 271 + SYS_PCICONFIG_READ = 272 + SYS_PCICONFIG_WRITE = 273 + SYS_MQ_OPEN = 274 + SYS_MQ_UNLINK = 275 + SYS_MQ_TIMEDSEND = 276 + SYS_MQ_TIMEDRECEIVE = 277 + SYS_MQ_NOTIFY = 278 + SYS_MQ_GETSETATTR = 279 + SYS_WAITID = 280 + SYS_SOCKET = 281 + SYS_BIND = 282 + SYS_CONNECT = 283 + SYS_LISTEN = 284 + SYS_ACCEPT = 285 + SYS_GETSOCKNAME = 286 + SYS_GETPEERNAME = 287 + SYS_SOCKETPAIR = 288 + SYS_SEND = 289 + SYS_SENDTO = 290 + SYS_RECV = 291 + SYS_RECVFROM = 292 + SYS_SHUTDOWN = 293 + SYS_SETSOCKOPT = 294 + SYS_GETSOCKOPT = 295 + SYS_SENDMSG = 296 + SYS_RECVMSG = 297 + SYS_SEMOP = 298 + SYS_SEMGET = 299 + SYS_SEMCTL = 300 + SYS_MSGSND = 301 + SYS_MSGRCV = 302 + SYS_MSGGET = 303 + SYS_MSGCTL = 304 + SYS_SHMAT = 305 + SYS_SHMDT = 306 + SYS_SHMGET = 307 + SYS_SHMCTL = 308 + SYS_ADD_KEY = 309 + SYS_REQUEST_KEY = 310 + SYS_KEYCTL = 311 + SYS_SEMTIMEDOP = 312 + SYS_VSERVER = 313 + SYS_IOPRIO_SET = 314 + SYS_IOPRIO_GET = 315 + SYS_INOTIFY_INIT = 316 + SYS_INOTIFY_ADD_WATCH = 317 + SYS_INOTIFY_RM_WATCH = 318 + SYS_MBIND = 319 + SYS_GET_MEMPOLICY = 320 + SYS_SET_MEMPOLICY = 321 + SYS_OPENAT = 322 + SYS_MKDIRAT = 323 + SYS_MKNODAT = 324 + SYS_FCHOWNAT = 325 + SYS_FUTIMESAT = 326 + SYS_FSTATAT64 = 327 + SYS_UNLINKAT = 328 + SYS_RENAMEAT = 329 + SYS_LINKAT = 330 + SYS_SYMLINKAT = 331 + SYS_READLINKAT = 332 + SYS_FCHMODAT = 333 + SYS_FACCESSAT = 334 + SYS_PSELECT6 = 335 + SYS_PPOLL = 336 + SYS_UNSHARE = 337 + SYS_SET_ROBUST_LIST = 338 + SYS_GET_ROBUST_LIST = 339 + SYS_SPLICE = 340 + SYS_ARM_SYNC_FILE_RANGE = 341 + SYS_TEE = 342 + SYS_VMSPLICE = 343 + SYS_MOVE_PAGES = 344 + SYS_GETCPU = 345 + SYS_EPOLL_PWAIT = 346 + SYS_KEXEC_LOAD = 347 + SYS_UTIMENSAT = 348 + SYS_SIGNALFD = 349 + SYS_TIMERFD_CREATE = 350 + SYS_EVENTFD = 351 + SYS_FALLOCATE = 352 + SYS_TIMERFD_SETTIME = 353 + SYS_TIMERFD_GETTIME = 354 + SYS_SIGNALFD4 = 355 + SYS_EVENTFD2 = 356 + SYS_EPOLL_CREATE1 = 357 + SYS_DUP3 = 358 + SYS_PIPE2 = 359 + SYS_INOTIFY_INIT1 = 360 + SYS_PREADV = 361 + SYS_PWRITEV = 362 + SYS_RT_TGSIGQUEUEINFO = 363 + SYS_PERF_EVENT_OPEN = 364 + SYS_RECVMMSG = 365 + SYS_ACCEPT4 = 366 + SYS_FANOTIFY_INIT = 367 + SYS_FANOTIFY_MARK = 368 + SYS_PRLIMIT64 = 369 + SYS_NAME_TO_HANDLE_AT = 370 + SYS_OPEN_BY_HANDLE_AT = 371 + SYS_CLOCK_ADJTIME = 372 + SYS_SYNCFS = 373 + SYS_SENDMMSG = 374 + SYS_SETNS = 375 + SYS_PROCESS_VM_READV = 376 + SYS_PROCESS_VM_WRITEV = 377 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm64.go new file mode 100644 index 000000000..2e9514f28 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -0,0 +1,272 @@ +// mksysnum_linux.pl /usr/include/asm-generic/unistd.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_RENAMEAT = 38 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64.go new file mode 100644 index 000000000..e1b08f00d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -0,0 +1,360 @@ +// mksysnum_linux.pl /usr/include/asm/unistd.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build ppc64,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go new file mode 100644 index 000000000..45e63f51a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -0,0 +1,353 @@ +// mksysnum_linux.pl /usr/include/powerpc64le-linux-gnu/asm/unistd.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build ppc64le,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_386.go new file mode 100644 index 000000000..f60d8f988 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -0,0 +1,273 @@ +// mksysnum_netbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,netbsd + +package unix + +const ( + SYS_EXIT = 1 // { void|sys||exit(int rval); } + SYS_FORK = 2 // { int|sys||fork(void); } + SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int|sys||close(int fd); } + SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int|sys||unlink(const char *path); } + SYS_CHDIR = 12 // { int|sys||chdir(const char *path); } + SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); } + SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); } + SYS_BREAK = 17 // { int|sys||obreak(char *nsize); } + SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); } + SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); } + SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); } + SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); } + SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { void|sys||sync(void); } + SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); } + SYS_GETPPID = 39 // { pid_t|sys||getppid(void); } + SYS_DUP = 41 // { int|sys||dup(int fd); } + SYS_PIPE = 42 // { int|sys||pipe(void); } + SYS_GETEGID = 43 // { gid_t|sys||getegid(void); } + SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); } + SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); } + SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int|sys||acct(const char *path); } + SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); } + SYS_REVOKE = 56 // { int|sys||revoke(const char *path); } + SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); } + SYS_CHROOT = 61 // { int|sys||chroot(const char *path); } + SYS_VFORK = 66 // { int|sys||vfork(void); } + SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); } + SYS_SSTK = 70 // { int|sys||sstk(int incr); } + SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); } + SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int|sys||getpgrp(void); } + SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); } + SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); } + SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); } + SYS_FSYNC = 95 // { int|sys||fsync(int fd); } + SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); } + SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); } + SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); } + SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); } + SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); } + SYS_SETSID = 147 // { int|sys||setsid(void); } + SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); } + SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); } + SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); } + SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); } + SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); } + SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); } + SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); } + SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); } + SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); } + SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); } + SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); } + SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); } + SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); } + SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); } + SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); } + SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); } + SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); } + SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); } + SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); } + SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); } + SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); } + SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } + SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } + SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } + SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); } + SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); } + SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); } + SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); } + SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); } + SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); } + SYS_ISSETUGID = 305 // { int|sys||issetugid(void); } + SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); } + SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); } + SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); } + SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); } + SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); } + SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); } + SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); } + SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); } + SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); } + SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); } + SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); } + SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); } + SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); } + SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); } + SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); } + SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); } + SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); } + SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); } + SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); } + SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); } + SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); } + SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); } + SYS_KQUEUE = 344 // { int|sys||kqueue(void); } + SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); } + SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); } + SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); } + SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); } + SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); } + SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); } + SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); } + SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); } + SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); } + SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); } + SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); } + SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); } + SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); } + SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); } + SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); } + SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); } + SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); } + SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); } + SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); } + SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); } + SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); } + SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); } + SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); } + SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); } + SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); } + SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); } + SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); } + SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); } + SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); } + SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); } + SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); } + SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); } + SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); } + SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); } + SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); } + SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); } + SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); } + SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); } + SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); } + SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); } + SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); } + SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); } + SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); } + SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); } + SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); } + SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); } + SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); } + SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); } + SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); } + SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); } + SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); } + SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); } + SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); } + SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); } + SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); } + SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); } + SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); } + SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); } + SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); } + SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); } + SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); } + SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); } + SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); } + SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); } + SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); } + SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); } + SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); } +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go new file mode 100644 index 000000000..48a91d464 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -0,0 +1,273 @@ +// mksysnum_netbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,netbsd + +package unix + +const ( + SYS_EXIT = 1 // { void|sys||exit(int rval); } + SYS_FORK = 2 // { int|sys||fork(void); } + SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int|sys||close(int fd); } + SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int|sys||unlink(const char *path); } + SYS_CHDIR = 12 // { int|sys||chdir(const char *path); } + SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); } + SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); } + SYS_BREAK = 17 // { int|sys||obreak(char *nsize); } + SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); } + SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); } + SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); } + SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); } + SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { void|sys||sync(void); } + SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); } + SYS_GETPPID = 39 // { pid_t|sys||getppid(void); } + SYS_DUP = 41 // { int|sys||dup(int fd); } + SYS_PIPE = 42 // { int|sys||pipe(void); } + SYS_GETEGID = 43 // { gid_t|sys||getegid(void); } + SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); } + SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); } + SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int|sys||acct(const char *path); } + SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); } + SYS_REVOKE = 56 // { int|sys||revoke(const char *path); } + SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); } + SYS_CHROOT = 61 // { int|sys||chroot(const char *path); } + SYS_VFORK = 66 // { int|sys||vfork(void); } + SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); } + SYS_SSTK = 70 // { int|sys||sstk(int incr); } + SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); } + SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int|sys||getpgrp(void); } + SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); } + SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); } + SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); } + SYS_FSYNC = 95 // { int|sys||fsync(int fd); } + SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); } + SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); } + SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); } + SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); } + SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); } + SYS_SETSID = 147 // { int|sys||setsid(void); } + SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); } + SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); } + SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); } + SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); } + SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); } + SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); } + SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); } + SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); } + SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); } + SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); } + SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); } + SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); } + SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); } + SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); } + SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); } + SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); } + SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); } + SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); } + SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); } + SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); } + SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); } + SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } + SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } + SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } + SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); } + SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); } + SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); } + SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); } + SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); } + SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); } + SYS_ISSETUGID = 305 // { int|sys||issetugid(void); } + SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); } + SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); } + SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); } + SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); } + SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); } + SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); } + SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); } + SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); } + SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); } + SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); } + SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); } + SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); } + SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); } + SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); } + SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); } + SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); } + SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); } + SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); } + SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); } + SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); } + SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); } + SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); } + SYS_KQUEUE = 344 // { int|sys||kqueue(void); } + SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); } + SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); } + SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); } + SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); } + SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); } + SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); } + SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); } + SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); } + SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); } + SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); } + SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); } + SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); } + SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); } + SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); } + SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); } + SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); } + SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); } + SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); } + SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); } + SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); } + SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); } + SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); } + SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); } + SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); } + SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); } + SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); } + SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); } + SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); } + SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); } + SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); } + SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); } + SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); } + SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); } + SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); } + SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); } + SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); } + SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); } + SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); } + SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); } + SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); } + SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); } + SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); } + SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); } + SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); } + SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); } + SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); } + SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); } + SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); } + SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); } + SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); } + SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); } + SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); } + SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); } + SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); } + SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); } + SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); } + SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); } + SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); } + SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); } + SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); } + SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); } + SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); } + SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); } + SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); } + SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); } + SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); } + SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); } +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_arm.go new file mode 100644 index 000000000..612ba662c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -0,0 +1,273 @@ +// mksysnum_netbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm,netbsd + +package unix + +const ( + SYS_EXIT = 1 // { void|sys||exit(int rval); } + SYS_FORK = 2 // { int|sys||fork(void); } + SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int|sys||close(int fd); } + SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int|sys||unlink(const char *path); } + SYS_CHDIR = 12 // { int|sys||chdir(const char *path); } + SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); } + SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); } + SYS_BREAK = 17 // { int|sys||obreak(char *nsize); } + SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); } + SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); } + SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); } + SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); } + SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { void|sys||sync(void); } + SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); } + SYS_GETPPID = 39 // { pid_t|sys||getppid(void); } + SYS_DUP = 41 // { int|sys||dup(int fd); } + SYS_PIPE = 42 // { int|sys||pipe(void); } + SYS_GETEGID = 43 // { gid_t|sys||getegid(void); } + SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); } + SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); } + SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int|sys||acct(const char *path); } + SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); } + SYS_REVOKE = 56 // { int|sys||revoke(const char *path); } + SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); } + SYS_CHROOT = 61 // { int|sys||chroot(const char *path); } + SYS_VFORK = 66 // { int|sys||vfork(void); } + SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); } + SYS_SSTK = 70 // { int|sys||sstk(int incr); } + SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); } + SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int|sys||getpgrp(void); } + SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); } + SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); } + SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); } + SYS_FSYNC = 95 // { int|sys||fsync(int fd); } + SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); } + SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); } + SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); } + SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); } + SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); } + SYS_SETSID = 147 // { int|sys||setsid(void); } + SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); } + SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); } + SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); } + SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); } + SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); } + SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); } + SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); } + SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); } + SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); } + SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); } + SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); } + SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); } + SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); } + SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); } + SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); } + SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); } + SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); } + SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); } + SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); } + SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); } + SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); } + SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } + SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } + SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } + SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); } + SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); } + SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); } + SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); } + SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); } + SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); } + SYS_ISSETUGID = 305 // { int|sys||issetugid(void); } + SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); } + SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); } + SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); } + SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); } + SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); } + SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); } + SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); } + SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); } + SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); } + SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); } + SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); } + SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); } + SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); } + SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); } + SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); } + SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); } + SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); } + SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); } + SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); } + SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); } + SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); } + SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); } + SYS_KQUEUE = 344 // { int|sys||kqueue(void); } + SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); } + SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); } + SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); } + SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); } + SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); } + SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); } + SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); } + SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); } + SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); } + SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); } + SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); } + SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); } + SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); } + SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); } + SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); } + SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); } + SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); } + SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); } + SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); } + SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); } + SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); } + SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); } + SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); } + SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); } + SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); } + SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); } + SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); } + SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); } + SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); } + SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); } + SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); } + SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); } + SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); } + SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); } + SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); } + SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); } + SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); } + SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); } + SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); } + SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); } + SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); } + SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); } + SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); } + SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); } + SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); } + SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); } + SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); } + SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); } + SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); } + SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); } + SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); } + SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); } + SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); } + SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); } + SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); } + SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); } + SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); } + SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); } + SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); } + SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); } + SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); } + SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); } + SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); } + SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); } + SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); } + SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); } + SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); } +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_386.go new file mode 100644 index 000000000..3e8ce2a1d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -0,0 +1,207 @@ +// mksysnum_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int sys_open(const char *path, \ + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, \ + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, \ + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, \ + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, \ + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, \ + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, \ + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \ + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ + SYS_ACCESS = 33 // { int sys_access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_KILL = 37 // { int sys_kill(int pid, int signum); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, \ + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, \ + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, \ + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ + SYS_READLINK = 58 // { int sys_readlink(const char *path, char *buf, \ + SYS_EXECVE = 59 // { int sys_execve(const char *path, \ + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \ + SYS_STATFS = 63 // { int sys_statfs(const char *path, \ + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, \ + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, \ + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, \ + SYS_SETITIMER = 69 // { int sys_setitimer(int which, \ + SYS_GETITIMER = 70 // { int sys_getitimer(int which, \ + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, \ + SYS_KEVENT = 72 // { int sys_kevent(int fd, \ + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, \ + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, \ + SYS_UTIMES = 76 // { int sys_utimes(const char *path, \ + SYS_FUTIMES = 77 // { int sys_futimes(int fd, \ + SYS_MINCORE = 78 // { int sys_mincore(void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, int pgid); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { ssize_t sys_readv(int fd, \ + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, \ + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, \ + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, \ + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, \ + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, \ + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, \ + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, \ + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, \ + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, \ + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, \ + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \ + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \ + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \ + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, \ + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \ + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, \ + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, \ + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, \ + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, \ + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, \ + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, \ + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \ + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, \ + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, \ + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, \ + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, \ + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \ + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, \ + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, \ + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, \ + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, \ + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \ + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, \ + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, \ + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, \ + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go new file mode 100644 index 000000000..bd28146dd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -0,0 +1,207 @@ +// mksysnum_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int sys_open(const char *path, \ + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, \ + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, \ + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, \ + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, \ + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, \ + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, \ + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \ + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ + SYS_ACCESS = 33 // { int sys_access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_KILL = 37 // { int sys_kill(int pid, int signum); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, \ + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, \ + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, \ + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ + SYS_READLINK = 58 // { int sys_readlink(const char *path, char *buf, \ + SYS_EXECVE = 59 // { int sys_execve(const char *path, \ + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \ + SYS_STATFS = 63 // { int sys_statfs(const char *path, \ + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, \ + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, \ + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, \ + SYS_SETITIMER = 69 // { int sys_setitimer(int which, \ + SYS_GETITIMER = 70 // { int sys_getitimer(int which, \ + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, \ + SYS_KEVENT = 72 // { int sys_kevent(int fd, \ + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, \ + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, \ + SYS_UTIMES = 76 // { int sys_utimes(const char *path, \ + SYS_FUTIMES = 77 // { int sys_futimes(int fd, \ + SYS_MINCORE = 78 // { int sys_mincore(void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, int pgid); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { ssize_t sys_readv(int fd, \ + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, \ + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, \ + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, \ + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, \ + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, \ + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, \ + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, \ + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, \ + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, \ + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, \ + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \ + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \ + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \ + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, \ + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \ + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, \ + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, \ + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, \ + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, \ + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, \ + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, \ + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \ + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, \ + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, \ + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, \ + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, \ + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \ + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, \ + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, \ + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, \ + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, \ + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \ + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, \ + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, \ + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, \ + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_solaris_amd64.go new file mode 100644 index 000000000..c70865985 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_solaris_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,solaris + +package unix + +// TODO(aram): remove these before Go 1.3. +const ( + SYS_EXECVE = 59 + SYS_FCNTL = 62 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_386.go new file mode 100644 index 000000000..2de1d44e2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -0,0 +1,447 @@ +// +build 386,darwin +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timeval32 struct{} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint32 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Contigbytes int64 + Devoffset int64 +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_amd64.go new file mode 100644 index 000000000..044657878 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -0,0 +1,462 @@ +// +build amd64,darwin +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Pad_cgo_0 [4]byte + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 + Pad_cgo_0 [4]byte +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint64 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Pad_cgo_0 [8]byte + Pad_cgo_1 [8]byte +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + Pad_cgo_0 [4]byte + Ispeed uint64 + Ospeed uint64 +} + +const ( + AT_FDCWD = -0x2 + AT_SYMLINK_NOFOLLOW = 0x20 +) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm.go new file mode 100644 index 000000000..66df363ce --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -0,0 +1,449 @@ +// NOTE: cgo can't generate struct Stat_t and struct Statfs_t yet +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +// +build arm,darwin + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timeval32 [0]byte + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint32 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Contigbytes int64 + Devoffset int64 +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm64.go new file mode 100644 index 000000000..85d56eabd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -0,0 +1,457 @@ +// +build arm64,darwin +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Pad_cgo_0 [4]byte + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 + Pad_cgo_0 [4]byte +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint64 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Pad_cgo_0 [8]byte + Pad_cgo_1 [8]byte +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + Pad_cgo_0 [4]byte + Ispeed uint64 + Ospeed uint64 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_386.go new file mode 100644 index 000000000..b7e7ff088 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_386.go @@ -0,0 +1,437 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_dragonfly.go + +// +build 386,dragonfly + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Ino uint64 + Nlink uint32 + Dev uint32 + Mode uint16 + Padding1 uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare1 int64 + Qspare2 int64 +} + +type Statfs_t struct { + Spare2 int32 + Bsize int32 + Iosize int32 + Blocks int32 + Bfree int32 + Bavail int32 + Files int32 + Ffree int32 + Fsid Fsid + Owner uint32 + Type int32 + Flags int32 + Syncwrites int32 + Asyncwrites int32 + Fstypename [16]int8 + Mntonname [80]int8 + Syncreads int32 + Asyncreads int32 + Spares1 int16 + Mntfromname [80]int8 + Spares2 int16 + Spare [2]int32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Namlen uint16 + Type uint8 + Unused1 uint8 + Unused2 uint32 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + Rcf uint16 + Route [16]uint16 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0x68 + SizeofIfData = 0x58 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Pad_cgo_0 [2]byte + Mtu uint32 + Metric uint32 + Link_state uint32 + Baudrate uint64 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint32 + Unused uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Pksent uint32 + Expire uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Recvpipe uint32 + Hopcount uint32 + Mssopt uint16 + Pad uint16 + Msl uint32 + Iwmaxsegs uint32 + Iwcapsegs uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go new file mode 100644 index 000000000..8a6f4e1ce --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -0,0 +1,443 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_dragonfly.go + +// +build amd64,dragonfly + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Ino uint64 + Nlink uint32 + Dev uint32 + Mode uint16 + Padding1 uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare1 int64 + Qspare2 int64 +} + +type Statfs_t struct { + Spare2 int64 + Bsize int64 + Iosize int64 + Blocks int64 + Bfree int64 + Bavail int64 + Files int64 + Ffree int64 + Fsid Fsid + Owner uint32 + Type int32 + Flags int32 + Pad_cgo_0 [4]byte + Syncwrites int64 + Asyncwrites int64 + Fstypename [16]int8 + Mntonname [80]int8 + Syncreads int64 + Asyncreads int64 + Spares1 int16 + Mntfromname [80]int8 + Spares2 int16 + Pad_cgo_1 [4]byte + Spare [2]int64 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Namlen uint16 + Type uint8 + Unused1 uint8 + Unused2 uint32 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + Rcf uint16 + Route [16]uint16 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [16]uint64 +} + +const ( + SizeofIfMsghdr = 0xb0 + SizeofIfData = 0xa0 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Pad_cgo_0 [2]byte + Mtu uint64 + Metric uint64 + Link_state uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Unused uint64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Pksent uint64 + Expire uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Recvpipe uint64 + Hopcount uint64 + Mssopt uint16 + Pad uint16 + Pad_cgo_0 [4]byte + Msl uint64 + Iwmaxsegs uint64 + Iwcapsegs uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [6]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_386.go new file mode 100644 index 000000000..8cf30947b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -0,0 +1,502 @@ +// +build 386,freebsd +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtimespec Timespec + Pad_cgo_0 [8]byte +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 +} + +type Dirent struct { + Fileno uint32 + Reclen uint16 + Type uint8 + Namlen uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + X__fds_bits [32]uint32 +} + +const ( + sizeofIfMsghdr = 0x64 + SizeofIfMsghdr = 0x60 + sizeofIfData = 0x54 + SizeofIfData = 0x50 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Baudrate_pf uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint64 + Epoch int32 + Lastchange Timeval +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint32 + Epoch int32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Weight uint32 + Filler [3]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0xc + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + X_bzh_pad [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_amd64.go new file mode 100644 index 000000000..e5feb207b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -0,0 +1,505 @@ +// +build amd64,freebsd +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtimespec Timespec +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + Pad_cgo_0 [4]byte +} + +type Dirent struct { + Fileno uint32 + Reclen uint16 + Type uint8 + Namlen uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + X__fds_bits [16]uint64 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0xa8 + sizeofIfData = 0x98 + SizeofIfData = 0x98 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Baudrate_pf uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Expire uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Pksent uint64 + Weight uint64 + Filler [3]uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0x18 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint64 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + X_bzh_pad [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_arm.go new file mode 100644 index 000000000..5472b5428 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -0,0 +1,497 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -fsigned-char types_freebsd.go + +// +build arm,freebsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 + Pad_cgo_0 [4]byte +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtimespec Timespec +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + Pad_cgo_0 [4]byte +} + +type Dirent struct { + Fileno uint32 + Reclen uint16 + Type uint8 + Namlen uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + X__fds_bits [32]uint32 +} + +const ( + sizeofIfMsghdr = 0x70 + SizeofIfMsghdr = 0x70 + sizeofIfData = 0x60 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Baudrate_pf uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint32 + Pad_cgo_0 [4]byte + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Weight uint32 + Filler [3]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0xc + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + X_bzh_pad [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_386.go new file mode 100644 index 000000000..cf5db0e1b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_386.go @@ -0,0 +1,590 @@ +// +build 386,linux +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + Pad_cgo_0 [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + X__pad1 uint16 + Pad_cgo_0 [2]byte + X__st_ino uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + X__pad2 uint16 + Pad_cgo_1 [2]byte + Size int64 + Blksize int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + Pad_cgo_0 [1]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 +} + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 + X__cmsg_data [0]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Pad_cgo_0 [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_MAX = 0x1d + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name [0]int8 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Ebx int32 + Ecx int32 + Edx int32 + Esi int32 + Edi int32 + Ebp int32 + Eax int32 + Xds int32 + Xes int32 + Xfs int32 + Xgs int32 + Orig_eax int32 + Eip int32 + Xcs int32 + Eflags int32 + Esp int32 + Xss int32 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + X_f [8]int8 +} + +type Utsname struct { + Sysname [65]int8 + Nodename [65]int8 + Release [65]int8 + Version [65]int8 + Machine [65]int8 + Domainname [65]int8 +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]int8 + Fpack [6]int8 +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x100 + AT_REMOVEDIR = 0x200 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_amd64.go new file mode 100644 index 000000000..ac27784ab --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -0,0 +1,608 @@ +// +build amd64,linux +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Pad_cgo_0 [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Pad_cgo_1 [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Pad_cgo_2 [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + Pad_cgo_3 [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + X__pad0 int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + X__unused [3]int64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + Pad_cgo_0 [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + Pad_cgo_1 [4]byte +} + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 + X__cmsg_data [0]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Pad_cgo_0 [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_MAX = 0x1d + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name [0]int8 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + R15 uint64 + R14 uint64 + R13 uint64 + R12 uint64 + Rbp uint64 + Rbx uint64 + R11 uint64 + R10 uint64 + R9 uint64 + R8 uint64 + Rax uint64 + Rcx uint64 + Rdx uint64 + Rsi uint64 + Rdi uint64 + Orig_rax uint64 + Rip uint64 + Cs uint64 + Eflags uint64 + Rsp uint64 + Ss uint64 + Fs_base uint64 + Gs_base uint64 + Ds uint64 + Es uint64 + Fs uint64 + Gs uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Pad_cgo_0 [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]int8 + Pad_cgo_1 [4]byte +} + +type Utsname struct { + Sysname [65]int8 + Nodename [65]int8 + Release [65]int8 + Version [65]int8 + Machine [65]int8 + Domainname [65]int8 +} + +type Ustat_t struct { + Tfree int32 + Pad_cgo_0 [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + Pad_cgo_1 [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x100 + AT_REMOVEDIR = 0x200 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm.go new file mode 100644 index 000000000..b318bb851 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -0,0 +1,579 @@ +// +build arm,linux +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + Pad_cgo_0 [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + X__pad1 uint16 + Pad_cgo_0 [2]byte + X__st_ino uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + X__pad2 uint16 + Pad_cgo_1 [6]byte + Size int64 + Blksize int32 + Pad_cgo_2 [4]byte + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 + Pad_cgo_0 [4]byte +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + Pad_cgo_0 [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + Pad_cgo_1 [4]byte +} + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 + X__cmsg_data [0]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Pad_cgo_0 [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_MAX = 0x1d + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name [0]uint8 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Uregs [18]uint32 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + X_f [8]uint8 +} + +type Utsname struct { + Sysname [65]uint8 + Nodename [65]uint8 + Release [65]uint8 + Version [65]uint8 + Machine [65]uint8 + Domainname [65]uint8 +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]uint8 + Fpack [6]uint8 +} + +type EpollEvent struct { + Events uint32 + PadFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x100 + AT_REMOVEDIR = 0x200 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm64.go new file mode 100644 index 000000000..a159aadab --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -0,0 +1,595 @@ +// +build arm64,linux +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -fsigned-char types_linux.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Pad_cgo_0 [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Pad_cgo_1 [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Pad_cgo_2 [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + Pad_cgo_3 [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + X__pad1 uint64 + Size int64 + Blksize int32 + X__pad2 int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + X__glibc_reserved [2]int32 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + Pad_cgo_0 [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + Pad_cgo_1 [4]byte +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 + X__cmsg_data [0]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Pad_cgo_0 [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_MAX = 0x22 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name [0]int8 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Regs [31]uint64 + Sp uint64 + Pc uint64 + Pstate uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Pad_cgo_0 [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]int8 + Pad_cgo_1 [4]byte +} + +type Utsname struct { + Sysname [65]int8 + Nodename [65]int8 + Release [65]int8 + Version [65]int8 + Machine [65]int8 + Domainname [65]int8 +} + +type Ustat_t struct { + Tfree int32 + Pad_cgo_0 [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + Pad_cgo_1 [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_FDCWD = -0x64 + AT_REMOVEDIR = 0x200 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64.go new file mode 100644 index 000000000..b14cbfef0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -0,0 +1,605 @@ +// +build ppc64,linux +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Pad_cgo_0 [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Pad_cgo_1 [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Pad_cgo_2 [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + Pad_cgo_3 [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + X__pad2 int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + X__glibc_reserved4 uint64 + X__glibc_reserved5 uint64 + X__glibc_reserved6 uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + Pad_cgo_0 [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + Pad_cgo_1 [4]byte +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 + X__cmsg_data [0]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Pad_cgo_0 [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_MAX = 0x23 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name [0]uint8 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Gpr [32]uint64 + Nip uint64 + Msr uint64 + Orig_gpr3 uint64 + Ctr uint64 + Link uint64 + Xer uint64 + Ccr uint64 + Softe uint64 + Trap uint64 + Dar uint64 + Dsisr uint64 + Result uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Pad_cgo_0 [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]uint8 + Pad_cgo_1 [4]byte +} + +type Utsname struct { + Sysname [65]uint8 + Nodename [65]uint8 + Release [65]uint8 + Version [65]uint8 + Machine [65]uint8 + Domainname [65]uint8 +} + +type Ustat_t struct { + Tfree int32 + Pad_cgo_0 [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + Pad_cgo_1 [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_FDCWD = -0x64 + AT_REMOVEDIR = 0x200 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + Line uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64le.go new file mode 100644 index 000000000..22c96a2f0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -0,0 +1,605 @@ +// +build ppc64le,linux +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Pad_cgo_0 [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Pad_cgo_1 [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Pad_cgo_2 [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + Pad_cgo_3 [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + X__pad2 int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + X__glibc_reserved4 uint64 + X__glibc_reserved5 uint64 + X__glibc_reserved6 uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + Pad_cgo_0 [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + Pad_cgo_1 [4]byte +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 + X__cmsg_data [0]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Pad_cgo_0 [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_MAX = 0x22 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name [0]uint8 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Gpr [32]uint64 + Nip uint64 + Msr uint64 + Orig_gpr3 uint64 + Ctr uint64 + Link uint64 + Xer uint64 + Ccr uint64 + Softe uint64 + Trap uint64 + Dar uint64 + Dsisr uint64 + Result uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Pad_cgo_0 [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]uint8 + Pad_cgo_1 [4]byte +} + +type Utsname struct { + Sysname [65]uint8 + Nodename [65]uint8 + Release [65]uint8 + Version [65]uint8 + Machine [65]uint8 + Domainname [65]uint8 +} + +type Ustat_t struct { + Tfree int32 + Pad_cgo_0 [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + Pad_cgo_1 [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_FDCWD = -0x64 + AT_REMOVEDIR = 0x200 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + Line uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_386.go new file mode 100644 index 000000000..caf755fb8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -0,0 +1,396 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_netbsd.go + +// +build 386,netbsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Mode uint32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 +} + +type Statfs_t [0]byte + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [512]int8 + Pad_cgo_0 [3]byte +} + +type Fsid struct { + X__fsid_val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter uint32 + Flags uint32 + Fflags uint32 + Data int64 + Udata int32 +} + +type FdSet struct { + Bits [8]uint32 +} + +const ( + SizeofIfMsghdr = 0x98 + SizeofIfData = 0x84 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x78 + SizeofRtMetrics = 0x50 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData + Pad_cgo_1 [4]byte +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Pad_cgo_0 [1]byte + Link_state int32 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Lastchange Timespec +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Metric int32 + Index uint16 + Pad_cgo_0 [6]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits int32 + Pad_cgo_1 [4]byte + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Expire int64 + Pksent int64 +} + +type Mclpool [0]byte + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec int32 + Usec int32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Sysctlnode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + X__rsvd uint32 + Un [16]byte + X_sysctl_size [8]byte + X_sysctl_func [8]byte + X_sysctl_parent [8]byte + X_sysctl_desc [8]byte +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_amd64.go new file mode 100644 index 000000000..91b4a5305 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -0,0 +1,403 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_netbsd.go + +// +build amd64,netbsd + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Mode uint32 + Pad_cgo_0 [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Pad_cgo_1 [4]byte + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + Pad_cgo_2 [4]byte +} + +type Statfs_t [0]byte + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [512]int8 + Pad_cgo_0 [3]byte +} + +type Fsid struct { + X__fsid_val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter uint32 + Flags uint32 + Fflags uint32 + Pad_cgo_0 [4]byte + Data int64 + Udata int64 +} + +type FdSet struct { + Bits [8]uint32 +} + +const ( + SizeofIfMsghdr = 0x98 + SizeofIfData = 0x88 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x78 + SizeofRtMetrics = 0x50 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Pad_cgo_0 [1]byte + Link_state int32 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Lastchange Timespec +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Metric int32 + Index uint16 + Pad_cgo_0 [6]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits int32 + Pad_cgo_1 [4]byte + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Expire int64 + Pksent int64 +} + +type Mclpool [0]byte + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [6]byte +} + +type BpfTimeval struct { + Sec int64 + Usec int64 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Sysctlnode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + X__rsvd uint32 + Un [16]byte + X_sysctl_size [8]byte + X_sysctl_func [8]byte + X_sysctl_parent [8]byte + X_sysctl_desc [8]byte +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_arm.go new file mode 100644 index 000000000..c0758f9d3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -0,0 +1,401 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_netbsd.go + +// +build arm,netbsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 + Pad_cgo_0 [4]byte +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Mode uint32 + Pad_cgo_0 [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Pad_cgo_1 [4]byte + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + Pad_cgo_2 [4]byte +} + +type Statfs_t [0]byte + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [512]int8 + Pad_cgo_0 [3]byte +} + +type Fsid struct { + X__fsid_val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter uint32 + Flags uint32 + Fflags uint32 + Data int64 + Udata int32 + Pad_cgo_0 [4]byte +} + +type FdSet struct { + Bits [8]uint32 +} + +const ( + SizeofIfMsghdr = 0x98 + SizeofIfData = 0x88 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x78 + SizeofRtMetrics = 0x50 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Pad_cgo_0 [1]byte + Link_state int32 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Lastchange Timespec +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Metric int32 + Index uint16 + Pad_cgo_0 [6]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits int32 + Pad_cgo_1 [4]byte + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Expire int64 + Pksent int64 +} + +type Mclpool [0]byte + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec int32 + Usec int32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Sysctlnode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + X__rsvd uint32 + Un [16]byte + X_sysctl_size [8]byte + X_sysctl_func [8]byte + X_sysctl_parent [8]byte + X_sysctl_desc [8]byte +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_386.go new file mode 100644 index 000000000..860a46979 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -0,0 +1,441 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +// +build 386,openbsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + X__st_birthtim Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_0 [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + X__d_padding [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xec + SizeofIfData = 0xd4 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Pad uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval + Mclpool [7]Mclpool +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct { + Grown int32 + Alive uint16 + Hwm uint16 + Cwm uint16 + Lwm uint16 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_amd64.go new file mode 100644 index 000000000..23c52727f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -0,0 +1,448 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +// +build amd64,openbsd + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Pad_cgo_0 [4]byte + X__st_birthtim Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + Pad_cgo_0 [4]byte + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_1 [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + X__d_padding [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xf8 + SizeofIfData = 0xe0 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Pad uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Capabilities uint32 + Pad_cgo_0 [4]byte + Lastchange Timeval + Mclpool [7]Mclpool + Pad_cgo_1 [4]byte +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct { + Grown int32 + Alive uint16 + Hwm uint16 + Cwm uint16 + Lwm uint16 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_solaris_amd64.go new file mode 100644 index 000000000..b3b928a51 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -0,0 +1,422 @@ +// +build amd64,solaris +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_solaris.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x400 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int32 + Pad_cgo_0 [4]byte + Blocks int64 + Fstype [16]int8 +} + +type Flock_t struct { + Type int16 + Whence int16 + Pad_cgo_0 [4]byte + Start int64 + Len int64 + Sysid int32 + Pid int32 + Pad [4]int64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Name [1]int8 + Pad_cgo_0 [5]byte +} + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrDatalink struct { + Family uint16 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [244]int8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [236]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *int8 + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + X__icmp6_filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x20 + SizeofSockaddrAny = 0xfc + SizeofSockaddrUnix = 0x6e + SizeofSockaddrDatalink = 0xfc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x24 + SizeofICMPv6Filter = 0x20 +) + +type FdSet struct { + Bits [1024]int64 +} + +type Utsname struct { + Sysname [257]int8 + Nodename [257]int8 + Release [257]int8 + Version [257]int8 + Machine [257]int8 +} + +type Ustat_t struct { + Tfree int64 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + Pad_cgo_0 [4]byte +} + +const ( + AT_FDCWD = 0xffd19553 + AT_SYMLINK_NOFOLLOW = 0x1000 + AT_SYMLINK_FOLLOW = 0x2000 + AT_REMOVEDIR = 0x1 + AT_EACCESS = 0x4 +) + +const ( + SizeofIfMsghdr = 0x54 + SizeofIfData = 0x44 + SizeofIfaMsghdr = 0x14 + SizeofRtMsghdr = 0x4c + SizeofRtMetrics = 0x28 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Pad_cgo_0 [1]byte + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Lastchange Timeval32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfTimeval struct { + Sec int32 + Usec int32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +const _SC_PAGESIZE = 0xb + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + Pad_cgo_0 [1]byte +} + +type Termio struct { + Iflag uint16 + Oflag uint16 + Cflag uint16 + Lflag uint16 + Line int8 + Cc [8]uint8 + Pad_cgo_0 [1]byte +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go new file mode 100644 index 000000000..47da77dbe --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/image.go @@ -0,0 +1,599 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "time" +) + +// APIImages represent an image returned in the ListImages call. +type APIImages struct { + ID string `json:"Id" yaml:"Id"` + RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` + ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"` + RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Image is the type representing a docker image and its various properties +type Image struct { + ID string `json:"Id" yaml:"Id"` + Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"` + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty"` + ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty" yaml:"Author,omitempty"` + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` + RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +var ( + // ErrNoSuchImage is the error returned when the image does not exist. + ErrNoSuchImage = errors.New("no such image") + + // ErrMissingRepo is the error returned when the remote repository is + // missing. + ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") + + // ErrMissingOutputStream is the error returned when no output stream + // is provided to some calls, like BuildImage. + ErrMissingOutputStream = errors.New("missing output stream") + + // ErrMultipleContexts is the error returned when both a ContextDir and + // InputStream are provided in BuildImageOptions + ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") + + // ErrMustSpecifyNames is the error rreturned when the Names field on + // ExportImagesOptions is nil or empty + ErrMustSpecifyNames = errors.New("must specify at least one name to export") +) + +// ListImagesOptions specify parameters to the ListImages function. +// +// See https://goo.gl/xBe1u3 for more details. +type ListImagesOptions struct { + All bool + Filters map[string][]string + Digests bool + Filter string +} + +// ListImages returns the list of available images in the server. +// +// See https://goo.gl/xBe1u3 for more details. +func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { + path := "/images/json?" + queryString(opts) + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var images []APIImages + if err := json.NewDecoder(resp.Body).Decode(&images); err != nil { + return nil, err + } + return images, nil +} + +// ImageHistory represent a layer in an image's history returned by the +// ImageHistory call. +type ImageHistory struct { + ID string `json:"Id" yaml:"Id"` + Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` +} + +// ImageHistory returns the history of the image by its name or ID. +// +// See https://goo.gl/8bnTId for more details. +func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { + resp, err := c.do("GET", "/images/"+name+"/history", doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + return nil, err + } + defer resp.Body.Close() + var history []ImageHistory + if err := json.NewDecoder(resp.Body).Decode(&history); err != nil { + return nil, err + } + return history, nil +} + +// RemoveImage removes an image by its name or ID. +// +// See https://goo.gl/V3ZWnK for more details. +func (c *Client) RemoveImage(name string) error { + resp, err := c.do("DELETE", "/images/"+name, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return ErrNoSuchImage + } + return err + } + resp.Body.Close() + return nil +} + +// RemoveImageOptions present the set of options available for removing an image +// from a registry. +// +// See https://goo.gl/V3ZWnK for more details. +type RemoveImageOptions struct { + Force bool `qs:"force"` + NoPrune bool `qs:"noprune"` +} + +// RemoveImageExtended removes an image by its name or ID. +// Extra params can be passed, see RemoveImageOptions +// +// See https://goo.gl/V3ZWnK for more details. +func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { + uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) + resp, err := c.do("DELETE", uri, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return ErrNoSuchImage + } + return err + } + resp.Body.Close() + return nil +} + +// InspectImage returns an image by its name or ID. +// +// See https://goo.gl/jHPcg6 for more details. +func (c *Client) InspectImage(name string) (*Image, error) { + resp, err := c.do("GET", "/images/"+name+"/json", doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + return nil, err + } + defer resp.Body.Close() + + var image Image + + // if the caller elected to skip checking the server's version, assume it's the latest + if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { + if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { + return nil, err + } + } else { + var imagePre012 ImagePre012 + if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil { + return nil, err + } + + image.ID = imagePre012.ID + image.Parent = imagePre012.Parent + image.Comment = imagePre012.Comment + image.Created = imagePre012.Created + image.Container = imagePre012.Container + image.ContainerConfig = imagePre012.ContainerConfig + image.DockerVersion = imagePre012.DockerVersion + image.Author = imagePre012.Author + image.Config = imagePre012.Config + image.Architecture = imagePre012.Architecture + image.Size = imagePre012.Size + } + + return &image, nil +} + +// PushImageOptions represents options to use in the PushImage method. +// +// See https://goo.gl/zPtZaT for more details. +type PushImageOptions struct { + // Name of the image + Name string + + // Tag of the image + Tag string + + // Registry server to push the image + Registry string + + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` +} + +// PushImage pushes an image to a remote registry, logging progress to w. +// +// An empty instance of AuthConfiguration may be used for unauthenticated +// pushes. +// +// See https://goo.gl/zPtZaT for more details. +func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { + if opts.Name == "" { + return ErrNoSuchImage + } + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + name := opts.Name + opts.Name = "" + path := "/images/" + name + "/push?" + queryString(&opts) + return c.stream("POST", path, streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + stdout: opts.OutputStream, + }) +} + +// PullImageOptions present the set of options available for pulling an image +// from a registry. +// +// See https://goo.gl/iJkZjD for more details. +type PullImageOptions struct { + Repository string `qs:"fromImage"` + Registry string + Tag string + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` +} + +// PullImage pulls an image from a remote registry, logging progress to +// opts.OutputStream. +// +// See https://goo.gl/iJkZjD for more details. +func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream) +} + +func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error { + path := "/images/create?" + qs + return c.stream("POST", path, streamOptions{ + setRawTerminal: true, + rawJSONStream: rawJSONStream, + headers: headers, + in: in, + stdout: w, + }) +} + +// LoadImageOptions represents the options for LoadImage Docker API Call +// +// See https://goo.gl/JyClMX for more details. +type LoadImageOptions struct { + InputStream io.Reader +} + +// LoadImage imports a tarball docker image +// +// See https://goo.gl/JyClMX for more details. +func (c *Client) LoadImage(opts LoadImageOptions) error { + return c.stream("POST", "/images/load", streamOptions{ + setRawTerminal: true, + in: opts.InputStream, + }) +} + +// ExportImageOptions represent the options for ExportImage Docker API call. +// +// See https://goo.gl/le7vK8 for more details. +type ExportImageOptions struct { + Name string + OutputStream io.Writer +} + +// ExportImage exports an image (as a tar file) into the stream. +// +// See https://goo.gl/le7vK8 for more details. +func (c *Client) ExportImage(opts ExportImageOptions) error { + return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + }) +} + +// ExportImagesOptions represent the options for ExportImages Docker API call +// +// See https://goo.gl/huC7HA for more details. +type ExportImagesOptions struct { + Names []string + OutputStream io.Writer `qs:"-"` +} + +// ExportImages exports one or more images (as a tar file) into the stream +// +// See https://goo.gl/huC7HA for more details. +func (c *Client) ExportImages(opts ExportImagesOptions) error { + if opts.Names == nil || len(opts.Names) == 0 { + return ErrMustSpecifyNames + } + return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + }) +} + +// ImportImageOptions present the set of informations available for importing +// an image from a source file or the stdin. +// +// See https://goo.gl/iJkZjD for more details. +type ImportImageOptions struct { + Repository string `qs:"repo"` + Source string `qs:"fromSrc"` + Tag string `qs:"tag"` + + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` +} + +// ImportImage imports an image from a url, a file or stdin +// +// See https://goo.gl/iJkZjD for more details. +func (c *Client) ImportImage(opts ImportImageOptions) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + if opts.Source != "-" { + opts.InputStream = nil + } + if opts.Source != "-" && !isURL(opts.Source) { + f, err := os.Open(opts.Source) + if err != nil { + return err + } + opts.InputStream = f + opts.Source = "-" + } + return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream) +} + +// BuildImageOptions present the set of informations available for building an +// image from a tarfile with a Dockerfile in it. +// +// For more details about the Docker building process, see +// http://goo.gl/tlPXPu. +type BuildImageOptions struct { + Name string `qs:"t"` + Dockerfile string `qs:"dockerfile"` + NoCache bool `qs:"nocache"` + SuppressOutput bool `qs:"q"` + Pull bool `qs:"pull"` + RmTmpContainer bool `qs:"rm"` + ForceRmTmpContainer bool `qs:"forcerm"` + Memory int64 `qs:"memory"` + Memswap int64 `qs:"memswap"` + CPUShares int64 `qs:"cpushares"` + CPUQuota int64 `qs:"cpuquota"` + CPUPeriod int64 `qs:"cpuperiod"` + CPUSetCPUs string `qs:"cpusetcpus"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + Remote string `qs:"remote"` + Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header + AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header + ContextDir string `qs:"-"` + Ulimits []ULimit `qs:"-"` +} + +// BuildImage builds an image from a tarball's url or a Dockerfile in the input +// stream. +// +// See https://goo.gl/xySxCe for more details. +func (c *Client) BuildImage(opts BuildImageOptions) error { + if opts.OutputStream == nil { + return ErrMissingOutputStream + } + headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs)) + if err != nil { + return err + } + + if opts.Remote != "" && opts.Name == "" { + opts.Name = opts.Remote + } + if opts.InputStream != nil || opts.ContextDir != "" { + headers["Content-Type"] = "application/tar" + } else if opts.Remote == "" { + return ErrMissingRepo + } + if opts.ContextDir != "" { + if opts.InputStream != nil { + return ErrMultipleContexts + } + var err error + if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil { + return err + } + } + + qs := queryString(&opts) + if len(opts.Ulimits) > 0 { + if b, err := json.Marshal(opts.Ulimits); err == nil { + item := url.Values(map[string][]string{}) + item.Add("ulimits", string(b)) + qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + } + } + + return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + in: opts.InputStream, + stdout: opts.OutputStream, + }) +} + +func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { + return AuthConfigurations119(authConfigs.Configs) + } + return authConfigs +} + +// TagImageOptions present the set of options to tag an image. +// +// See https://goo.gl/98ZzkU for more details. +type TagImageOptions struct { + Repo string + Tag string + Force bool +} + +// TagImage adds a tag to the image identified by the given name. +// +// See https://goo.gl/98ZzkU for more details. +func (c *Client) TagImage(name string, opts TagImageOptions) error { + if name == "" { + return ErrNoSuchImage + } + resp, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s", + queryString(&opts)), doOptions{}) + + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return ErrNoSuchImage + } + + return err +} + +func isURL(u string) bool { + p, err := url.Parse(u) + if err != nil { + return false + } + return p.Scheme == "http" || p.Scheme == "https" +} + +func headersWithAuth(auths ...interface{}) (map[string]string, error) { + var headers = make(map[string]string) + + for _, auth := range auths { + switch auth.(type) { + case AuthConfiguration: + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + case AuthConfigurations, AuthConfigurations119: + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + } + } + + return headers, nil +} + +// APIImageSearch reflect the result of a search on the Docker Hub. +// +// See https://goo.gl/AYjyrF for more details. +type APIImageSearch struct { + Description string `json:"description,omitempty" yaml:"description,omitempty"` + IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"` + IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"` +} + +// SearchImages search the docker hub with a specific given term. +// +// See https://goo.gl/AYjyrF for more details. +func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { + resp, err := c.do("GET", "/images/search?term="+term, doOptions{}) + defer resp.Body.Close() + if err != nil { + return nil, err + } + var searchResult []APIImageSearch + if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { + return nil, err + } + return searchResult, nil +} + +// SearchImagesEx search the docker hub with a specific given term and authentication. +// +// See https://goo.gl/AYjyrF for more details. +func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) { + headers, err := headersWithAuth(auth) + if err != nil { + return nil, err + } + + resp, err := c.do("GET", "/images/search?term="+term, doOptions{ + headers: headers, + }) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + var searchResult []APIImageSearch + if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { + return nil, err + } + + return searchResult, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/image_test.go b/vendor/github.com/fsouza/go-dockerclient/image_test.go new file mode 100644 index 000000000..c7544a643 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/image_test.go @@ -0,0 +1,1018 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "testing" + "time" +) + +func newTestClient(rt *FakeRoundTripper) Client { + endpoint := "http://localhost:4243" + u, _ := parseEndpoint("http://localhost:4243", false) + testAPIVersion, _ := NewAPIVersion("1.17") + client := Client{ + HTTPClient: &http.Client{Transport: rt}, + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + SkipServerVersionCheck: true, + serverAPIVersion: testAPIVersion, + } + return client +} + +type stdoutMock struct { + *bytes.Buffer +} + +func (m stdoutMock) Close() error { + return nil +} + +type stdinMock struct { + *bytes.Buffer +} + +func (m stdinMock) Close() error { + return nil +} + +func TestListImages(t *testing.T) { + body := `[ + { + "Repository":"base", + "Tag":"ubuntu-12.10", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "Repository":"base", + "Tag":"ubuntu-quantal", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "RepoTag": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTag": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2e", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } +]` + var expected []APIImages + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + images, err := client.ListImages(ListImagesOptions{}) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(images, expected) { + t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images) + } +} + +func TestListImagesParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK} + client := newTestClient(fakeRT) + _, err := client.ListImages(ListImagesOptions{All: false}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ListImages({All: false}: Wrong HTTP method. Want GET. Got %s.", req.Method) + } + if all := req.URL.Query().Get("all"); all != "0" && all != "" { + t.Errorf("ListImages({All: false}): Wrong parameter. Want all=0 or not present at all. Got all=%s", all) + } + fakeRT.Reset() + _, err = client.ListImages(ListImagesOptions{All: true}) + if err != nil { + t.Fatal(err) + } + req = fakeRT.requests[0] + if all := req.URL.Query().Get("all"); all != "1" { + t.Errorf("ListImages({All: true}): Wrong parameter. Want all=1. Got all=%s", all) + } + fakeRT.Reset() + _, err = client.ListImages(ListImagesOptions{Filters: map[string][]string{ + "dangling": {"true"}, + }}) + if err != nil { + t.Fatal(err) + } + req = fakeRT.requests[0] + body := req.URL.Query().Get("filters") + var filters map[string][]string + err = json.Unmarshal([]byte(body), &filters) + if err != nil { + t.Fatal(err) + } + if len(filters["dangling"]) != 1 || filters["dangling"][0] != "true" { + t.Errorf("ListImages(dangling=[true]): Wrong filter map. Want dangling=[true], got dangling=%v", filters["dangling"]) + } +} + +func TestImageHistory(t *testing.T) { + body := `[ + { + "Id": "25daec02219d2d852f7526137213a9b199926b4b24e732eab5b8bc6c49bd470e", + "Tags": [ + "debian:7.6", + "debian:latest", + "debian:7", + "debian:wheezy" + ], + "Created": 1409856216, + "CreatedBy": "/bin/sh -c #(nop) CMD [/bin/bash]" + }, + { + "Id": "41026a5347fb5be6ed16115bf22df8569697139f246186de9ae8d4f67c335dce", + "Created": 1409856213, + "CreatedBy": "/bin/sh -c #(nop) ADD file:1ee9e97209d00e3416a4543b23574cc7259684741a46bbcbc755909b8a053a38 in /", + "Size": 85178663 + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Tags": [ + "scratch:latest" + ], + "Created": 1371157430 + } +]` + var expected []ImageHistory + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + history, err := client.ImageHistory("debian:latest") + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(history, expected) { + t.Errorf("ImageHistory: Wrong return value. Want %#v. Got %#v.", expected, history) + } +} + +func TestRemoveImage(t *testing.T) { + name := "test" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + err := client.RemoveImage(name) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + name)) + if req.URL.Path != u.Path { + t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } +} + +func TestRemoveImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) + err := client.RemoveImage("test:") + if err != ErrNoSuchImage { + t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestRemoveImageExtended(t *testing.T) { + name := "test" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + err := client.RemoveImageExtended(name, RemoveImageOptions{Force: true, NoPrune: true}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + name)) + if req.URL.Path != u.Path { + t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } + expectedQuery := "force=1&noprune=1" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestInspectImage(t *testing.T) { + body := `{ + "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent":"27cf784147099545", + "Created":"2013-03-23T22:24:18.818426Z", + "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig":{"Memory":1}, + "VirtualSize":12345 +}` + + created, err := time.Parse(time.RFC3339Nano, "2013-03-23T22:24:18.818426Z") + if err != nil { + t.Fatal(err) + } + + expected := Image{ + ID: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + Parent: "27cf784147099545", + Created: created, + Container: "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + ContainerConfig: Config{ + Memory: 1, + }, + VirtualSize: 12345, + } + fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(fakeRT) + image, err := client.InspectImage(expected.ID) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*image, expected) { + t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json")) + if req.URL.Path != u.Path { + t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path) + } +} + +func TestInspectImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) + name := "test" + image, err := client.InspectImage(name) + if image != nil { + t.Errorf("InspectImage(%q): expected image, got %#v.", name, image) + } + if err != ErrNoSuchImage { + t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err) + } +} + +func TestPushImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + expected := "Pushing 1/100" + if buf.String() != expected { + t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String()) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/images/test/push")) + if req.URL.Path != u.Path { + t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } + if query := req.URL.Query().Encode(); query != "" { + t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query) + } + + auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) + if err != nil { + t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) + } + if strings.TrimSpace(string(auth)) != "{}" { + t.Errorf("PushImage: wrong body. Want %q. Got %q.", + base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth")) + } +} + +func TestPushImageWithRawJSON(t *testing.T) { + body := ` + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"status":"Image successfully pushed"} + ` + fakeRT := &FakeRoundTripper{ + message: body, + status: http.StatusOK, + header: map[string]string{ + "Content-Type": "application/json", + }, + } + client := newTestClient(fakeRT) + var buf bytes.Buffer + + err := client.PushImage(PushImageOptions{ + Name: "test", + OutputStream: &buf, + RawJSONStream: true, + }, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + if buf.String() != body { + t.Errorf("PushImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) + } +} + +func TestPushImageWithAuthentication(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + inputAuth := AuthConfiguration{ + Username: "gopher", + Password: "gopher123", + Email: "gopher@tsuru.io", + } + err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + var gotAuth AuthConfiguration + + auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) + if err != nil { + t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) + } + + err = json.Unmarshal(auth, &gotAuth) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotAuth, inputAuth) { + t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth) + } +} + +func TestPushImageCustomRegistry(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var authConfig AuthConfiguration + var buf bytes.Buffer + opts := PushImageOptions{ + Name: "test", Registry: "docker.tsuru.io", + OutputStream: &buf, + } + err := client.PushImage(opts, authConfig) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedQuery := "registry=docker.tsuru.io" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestPushImageNoName(t *testing.T) { + client := Client{} + err := client.PushImage(PushImageOptions{}, AuthConfiguration{}) + if err != ErrNoSuchImage { + t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestPullImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf}, + AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + expected := "Pulling 1/100" + if buf.String() != expected { + t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/images/create")) + if req.URL.Path != u.Path { + t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQuery := "fromImage=base" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestPullImageWithRawJSON(t *testing.T) { + body := ` + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + ` + fakeRT := &FakeRoundTripper{ + message: body, + status: http.StatusOK, + header: map[string]string{ + "Content-Type": "application/json", + }, + } + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PullImage(PullImageOptions{ + Repository: "base", + OutputStream: &buf, + RawJSONStream: true, + }, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + if buf.String() != body { + t.Errorf("PullImage: Wrong raw output. Want %q. Got %q", body, buf.String()) + } +} + +func TestPullImageWithoutOutputStream(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageCustomRegistry(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + OutputStream: &buf, + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageTag(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + Tag: "latest", + OutputStream: &buf, + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageNoRepository(t *testing.T) { + var opts PullImageOptions + client := Client{} + err := client.PullImage(opts, AuthConfiguration{}) + if err != ErrNoSuchImage { + t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestImportImageFromUrl(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := ImportImageOptions{ + Source: "http://mycompany.com/file.tar", + Repository: "testimage", + Tag: "tag", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestImportImageFromInput(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + in := bytes.NewBufferString("tar content") + var buf bytes.Buffer + opts := ImportImageOptions{ + Source: "-", Repository: "testimage", + InputStream: in, OutputStream: &buf, + Tag: "tag", + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) + } + e := "tar content" + if string(body) != e { + t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body)) + } +} + +func TestImportImageDoesNotPassesInputIfSourceIsNotDash(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + in := bytes.NewBufferString("foo") + opts := ImportImageOptions{ + Source: "http://test.com/container.tar", Repository: "testimage", + InputStream: in, OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) + } + if string(body) != "" { + t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body)) + } +} + +func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + tarPath := "testing/data/container.tar" + opts := ImportImageOptions{ + Source: tarPath, Repository: "testimage", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + tar, err := os.Open(tarPath) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + tarContent, err := ioutil.ReadAll(tar) + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tarContent, body) { + t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body) + } +} + +func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + tarPath := "testing/data/container.tar" + opts := ImportImageOptions{ + Source: tarPath, Repository: "testimage", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + Pull: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + Memory: 1024, + Memswap: 2048, + CPUShares: 10, + CPUQuota: 7500, + CPUPeriod: 100000, + CPUSetCPUs: "0-3", + Ulimits: []ULimit{{Name: "nofile", Soft: 100, Hard: 200}}, + InputStream: &buf, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil && strings.Index(err.Error(), "build image fail") == -1 { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{ + "t": {opts.Name}, + "nocache": {"1"}, + "q": {"1"}, + "pull": {"1"}, + "rm": {"1"}, + "forcerm": {"1"}, + "memory": {"1024"}, + "memswap": {"2048"}, + "cpushares": {"10"}, + "cpuquota": {"7500"}, + "cpuperiod": {"100000"}, + "cpusetcpus": {"0-3"}, + "ulimits": {"[{\"Name\":\"nofile\",\"Soft\":100,\"Hard\":200}]"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageParametersForRemoteBuild(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + Remote: "testing/data/container.tar", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageMissingRepoAndNilInput(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != ErrMissingRepo { + t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err) + } +} + +func TestBuildImageMissingOutputStream(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := BuildImageOptions{Name: "testImage"} + err := client.BuildImage(opts) + if err != ErrMissingOutputStream { + t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err) + } +} + +func TestBuildImageWithRawJSON(t *testing.T) { + body := ` + {"stream":"Step 0 : FROM ubuntu:latest\n"} + {"stream":" ---\u003e 4300eb9d3c8d\n"} + {"stream":"Step 1 : MAINTAINER docker \n"} + {"stream":" ---\u003e Using cache\n"} + {"stream":" ---\u003e 3a3ed758c370\n"} + {"stream":"Step 2 : CMD /usr/bin/top\n"} + {"stream":" ---\u003e Running in 36b1479cc2e4\n"} + {"stream":" ---\u003e 4b6188aebe39\n"} + {"stream":"Removing intermediate container 36b1479cc2e4\n"} + {"stream":"Successfully built 4b6188aebe39\n"} + ` + fakeRT := &FakeRoundTripper{ + message: body, + status: http.StatusOK, + header: map[string]string{ + "Content-Type": "application/json", + }, + } + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + RmTmpContainer: true, + InputStream: &buf, + OutputStream: &buf, + RawJSONStream: true, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + if buf.String() != body { + t.Errorf("BuildImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) + } +} + +func TestBuildImageRemoteWithoutName(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Remote: "testing/data/container.tar", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestTagImageParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := TagImageOptions{Repo: "testImage"} + err := client.TagImage("base", opts) + if err != nil && strings.Index(err.Error(), "tag image fail") == -1 { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := "http://localhost:4243/images/base/tag?repo=testImage" + got := req.URL.String() + if !reflect.DeepEqual(got, expected) { + t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestTagImageMissingRepo(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := TagImageOptions{Repo: "testImage"} + err := client.TagImage("", opts) + if err != ErrNoSuchImage { + t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.", + ErrNoSuchImage, err) + } +} + +func TestIsUrl(t *testing.T) { + url := "http://foo.bar/" + result := isURL(url) + if !result { + t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result) + } + url = "/foo/bar.tar" + result = isURL(url) + if result { + t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result) + } +} + +func TestLoadImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + tar, err := os.Open("testing/data/container.tar") + if err != nil { + t.Fatal(err) + } else { + defer tar.Close() + } + opts := LoadImageOptions{InputStream: tar} + err = client.LoadImage(opts) + if nil != err { + t.Error(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("LoadImage: wrong method. Expected %q. Got %q.", "POST", req.Method) + } + if req.URL.Path != "/images/load" { + t.Errorf("LoadImage: wrong URL. Expected %q. Got %q.", "/images/load", req.URL.Path) + } +} + +func TestExportImage(t *testing.T) { + var buf bytes.Buffer + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := ExportImageOptions{Name: "testimage", OutputStream: &buf} + err := client.ExportImage(opts) + if nil != err { + t.Error(err) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) + } + expectedPath := "/images/testimage/get" + if req.URL.Path != expectedPath { + t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expectedPath, req.URL.Path) + } +} + +func TestExportImages(t *testing.T) { + var buf bytes.Buffer + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := ExportImagesOptions{Names: []string{"testimage1", "testimage2:latest"}, OutputStream: &buf} + err := client.ExportImages(opts) + if nil != err { + t.Error(err) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) + } + expected := "http://localhost:4243/images/get?names=testimage1&names=testimage2%3Alatest" + got := req.URL.String() + if !reflect.DeepEqual(got, expected) { + t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expected, got) + } +} + +func TestExportImagesNoNames(t *testing.T) { + var buf bytes.Buffer + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := ExportImagesOptions{Names: []string{}, OutputStream: &buf} + err := client.ExportImages(opts) + if err == nil { + t.Error("Expected an error") + } + if err != ErrMustSpecifyNames { + t.Error(err) + } +} + +func TestSearchImages(t *testing.T) { + body := `[ + { + "description":"A container with Cassandra 2.0.3", + "is_official":true, + "is_automated":true, + "name":"poklet/cassandra", + "star_count":17 + }, + { + "description":"A container with Cassandra 2.0.3", + "is_official":true, + "is_automated":false, + "name":"poklet/cassandra", + "star_count":17 + } + , + { + "description":"A container with Cassandra 2.0.3", + "is_official":false, + "is_automated":true, + "name":"poklet/cassandra", + "star_count":17 + } +]` + var expected []APIImageSearch + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + result, err := client.SearchImages("cassandra") + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) + } +} + +func TestSearchImagesEx(t *testing.T) { + body := `[ + { + "description":"A container with Cassandra 2.0.3", + "is_official":true, + "is_automated":true, + "name":"poklet/cassandra", + "star_count":17 + }, + { + "description":"A container with Cassandra 2.0.3", + "is_official":true, + "is_automated":false, + "name":"poklet/cassandra", + "star_count":17 + } + , + { + "description":"A container with Cassandra 2.0.3", + "is_official":false, + "is_automated":true, + "name":"poklet/cassandra", + "star_count":17 + } +]` + var expected []APIImageSearch + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + auth := AuthConfiguration{} + result, err := client.SearchImagesEx("cassandra", auth) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/integration_test.go b/vendor/github.com/fsouza/go-dockerclient/integration_test.go new file mode 100644 index 000000000..f5aeea27a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/integration_test.go @@ -0,0 +1,94 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build docker_integration + +package docker + +import ( + "bytes" + "os" + "testing" +) + +var dockerEndpoint string + +func init() { + dockerEndpoint = os.Getenv("DOCKER_HOST") + if dockerEndpoint == "" { + dockerEndpoint = "unix:///var/run/docker.sock" + } +} + +func TestIntegrationPullCreateStartLogs(t *testing.T) { + imageName := pullImage(t) + client := getClient() + hostConfig := HostConfig{PublishAllPorts: true} + createOpts := CreateContainerOptions{ + Config: &Config{ + Image: imageName, + Cmd: []string{"cat", "/home/gopher/file.txt"}, + User: "gopher", + }, + HostConfig: &hostConfig, + } + container, err := client.CreateContainer(createOpts) + if err != nil { + t.Fatal(err) + } + err = client.StartContainer(container.ID, &hostConfig) + if err != nil { + t.Fatal(err) + } + status, err := client.WaitContainer(container.ID) + if err != nil { + t.Error(err) + } + if status != 0 { + t.Error("WaitContainer(%q): wrong status. Want 0. Got %d", container.ID, status) + } + var stdout, stderr bytes.Buffer + logsOpts := LogsOptions{ + Container: container.ID, + OutputStream: &stdout, + ErrorStream: &stderr, + Stdout: true, + Stderr: true, + } + err = client.Logs(logsOpts) + if err != nil { + t.Error(err) + } + if stderr.String() != "" { + t.Errorf("Got unexpected stderr from logs: %q", stderr.String()) + } + expected := `Welcome to reality, wake up and rejoice +Welcome to reality, you've made the right choice +Welcome to reality, and let them hear your voice, shout it out! +` + if stdout.String() != expected { + t.Errorf("Got wrong stdout from logs.\nWant:\n%#v.\n\nGot:\n%#v.", expected, stdout.String()) + } +} + +func pullImage(t *testing.T) string { + imageName := "fsouza/go-dockerclient-integration" + var buf bytes.Buffer + pullOpts := PullImageOptions{ + Repository: imageName, + OutputStream: &buf, + } + client := getClient() + err := client.PullImage(pullOpts, AuthConfiguration{}) + if err != nil { + t.Logf("Pull output: %s", buf.String()) + t.Fatal(err) + } + return imageName +} + +func getClient() *Client { + client, _ := NewClient(dockerEndpoint) + return client +} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go new file mode 100644 index 000000000..34c96531a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/misc.go @@ -0,0 +1,57 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import "strings" + +// Version returns version information about the docker server. +// +// See https://goo.gl/ND9R8L for more details. +func (c *Client) Version() (*Env, error) { + resp, err := c.do("GET", "/version", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var env Env + if err := env.Decode(resp.Body); err != nil { + return nil, err + } + return &env, nil +} + +// Info returns system-wide information about the Docker server. +// +// See https://goo.gl/ElTHi2 for more details. +func (c *Client) Info() (*Env, error) { + resp, err := c.do("GET", "/info", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var info Env + if err := info.Decode(resp.Body); err != nil { + return nil, err + } + return &info, nil +} + +// ParseRepositoryTag gets the name of the repository and returns it splitted +// in two parts: the repository and the tag. +// +// Some examples: +// +// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest +// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" +func ParseRepositoryTag(repoTag string) (repository string, tag string) { + n := strings.LastIndex(repoTag, ":") + if n < 0 { + return repoTag, "" + } + if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { + return repoTag[:n], tag + } + return repoTag, "" +} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc_test.go b/vendor/github.com/fsouza/go-dockerclient/misc_test.go new file mode 100644 index 000000000..ceaf076ed --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/misc_test.go @@ -0,0 +1,159 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "net/http" + "net/url" + "reflect" + "sort" + "testing" +) + +type DockerVersion struct { + Version string + GitCommit string + GoVersion string +} + +func TestVersion(t *testing.T) { + body := `{ + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" +}` + fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(&fakeRT) + expected := DockerVersion{ + Version: "0.2.2", + GitCommit: "5a2a5cc+CHANGES", + GoVersion: "go1.0.3", + } + version, err := client.Version() + if err != nil { + t.Fatal(err) + } + + if result := version.Get("Version"); result != expected.Version { + t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version")) + } + if result := version.Get("GitCommit"); result != expected.GitCommit { + t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit")) + } + if result := version.Get("GoVersion"); result != expected.GoVersion { + t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion")) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/version")) + if req.URL.Path != u.Path { + t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestVersionError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + version, err := client.Version() + if version != nil { + t.Errorf("Version(): expected value, got %#v.", version) + } + if err == nil { + t.Error("Version(): unexpected error") + } +} + +func TestInfo(t *testing.T) { + body := `{ + "Containers":11, + "Images":16, + "Debug":0, + "NFd":11, + "NGoroutines":21, + "MemoryLimit":1, + "SwapLimit":0 +}` + fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(&fakeRT) + expected := Env{} + expected.SetInt("Containers", 11) + expected.SetInt("Images", 16) + expected.SetBool("Debug", false) + expected.SetInt("NFd", 11) + expected.SetInt("NGoroutines", 21) + expected.SetBool("MemoryLimit", true) + expected.SetBool("SwapLimit", false) + info, err := client.Info() + if err != nil { + t.Fatal(err) + } + infoSlice := []string(*info) + expectedSlice := []string(expected) + sort.Strings(infoSlice) + sort.Strings(expectedSlice) + if !reflect.DeepEqual(expectedSlice, infoSlice) { + t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, *info) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/info")) + if req.URL.Path != u.Path { + t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestInfoError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + version, err := client.Info() + if version != nil { + t.Errorf("Info(): expected value, got %#v.", version) + } + if err == nil { + t.Error("Info(): unexpected error") + } +} + +func TestParseRepositoryTag(t *testing.T) { + var tests = []struct { + input string + expectedRepo string + expectedTag string + }{ + { + "localhost.localdomain:5000/samalba/hipache:latest", + "localhost.localdomain:5000/samalba/hipache", + "latest", + }, + { + "localhost.localdomain:5000/samalba/hipache", + "localhost.localdomain:5000/samalba/hipache", + "", + }, + { + "tsuru/python", + "tsuru/python", + "", + }, + { + "tsuru/python:2.7", + "tsuru/python", + "2.7", + }, + } + for _, tt := range tests { + repo, tag := ParseRepositoryTag(tt.input) + if repo != tt.expectedRepo { + t.Errorf("ParseRepositoryTag(%q): wrong repository. Want %q. Got %q", tt.input, tt.expectedRepo, repo) + } + if tag != tt.expectedTag { + t.Errorf("ParseRepositoryTag(%q): wrong tag. Want %q. Got %q", tt.input, tt.expectedTag, tag) + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go new file mode 100644 index 000000000..30d54230a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/network.go @@ -0,0 +1,215 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" +) + +// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the +// network already exists. +var ErrNetworkAlreadyExists = errors.New("network already exists") + +// Network represents a network. +// +// See https://goo.gl/6GugX3 for more details. +type Network struct { + Name string + ID string `json:"Id"` + Scope string + Driver string + IPAM IPAMOptions + Containers map[string]Endpoint + Options map[string]string +} + +// Endpoint contains network resources allocated and used for a container in a network +// +// See https://goo.gl/6GugX3 for more details. +type Endpoint struct { + Name string + ID string `json:"EndpointID"` + MacAddress string + IPv4Address string + IPv6Address string +} + +// ListNetworks returns all networks. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) ListNetworks() ([]Network, error) { + resp, err := c.do("GET", "/networks", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var networks []Network + if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { + return nil, err + } + return networks, nil +} + +// NetworkInfo returns information about a network by its ID. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) NetworkInfo(id string) (*Network, error) { + path := "/networks/" + id + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchNetwork{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var network Network + if err := json.NewDecoder(resp.Body).Decode(&network); err != nil { + return nil, err + } + return &network, nil +} + +// CreateNetworkOptions specify parameters to the CreateNetwork function and +// (for now) is the expected body of the "create network" http request message +// +// See https://goo.gl/6GugX3 for more details. +type CreateNetworkOptions struct { + Name string `json:"Name"` + CheckDuplicate bool `json:"CheckDuplicate"` + Driver string `json:"Driver"` + IPAM IPAMOptions `json:"IPAM"` + Options map[string]interface{} `json:"options"` +} + +// IPAMOptions controls IP Address Management when creating a network +// +// See https://goo.gl/T8kRVH for more details. +type IPAMOptions struct { + Driver string `json:"Driver"` + Config []IPAMConfig `json:"IPAMConfig"` +} + +// IPAMConfig represents IPAM configurations +// +// See https://goo.gl/T8kRVH for more details. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// CreateNetwork creates a new network, returning the network instance, +// or an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { + resp, err := c.do( + "POST", + "/networks/create", + doOptions{ + data: opts, + }, + ) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusConflict { + return nil, ErrNetworkAlreadyExists + } + return nil, err + } + defer resp.Body.Close() + + type createNetworkResponse struct { + ID string + } + var ( + network Network + cnr createNetworkResponse + ) + if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil { + return nil, err + } + + network.Name = opts.Name + network.ID = cnr.ID + network.Driver = opts.Driver + + return &network, nil +} + +// RemoveNetwork removes a network or returns an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) RemoveNetwork(id string) error { + resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetwork{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// NetworkConnectionOptions specify parameters to the ConnectNetwork and DisconnectNetwork function. +// +// See https://goo.gl/6GugX3 for more details. +type NetworkConnectionOptions struct { + Container string +} + +// ConnectNetwork adds a container to a network or returns an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + +// DisconnectNetwork removes a container from a network or returns an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + +// NoSuchNetwork is the error returned when a given network does not exist. +type NoSuchNetwork struct { + ID string +} + +func (err *NoSuchNetwork) Error() string { + return fmt.Sprintf("No such network: %s", err.ID) +} + +// NoSuchNetwork is the error returned when a given network or container does not exist. +type NoSuchNetworkOrContainer struct { + NetworkID string + ContainerID string +} + +func (err *NoSuchNetworkOrContainer) Error() string { + return fmt.Sprintf("No such network (%s) or container (%s)", err.NetworkID, err.ContainerID) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/network_test.go b/vendor/github.com/fsouza/go-dockerclient/network_test.go new file mode 100644 index 000000000..2bff70fe4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/network_test.go @@ -0,0 +1,173 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "net/http" + "net/url" + "reflect" + "testing" +) + +func TestListNetworks(t *testing.T) { + jsonNetworks := `[ + { + "ID": "8dfafdbc3a40", + "Name": "blah", + "Type": "bridge", + "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] + }, + { + "ID": "9fb1e39c", + "Name": "foo", + "Type": "bridge", + "Endpoints":[{"ID": "c080be979dda", "Name": "lllll2222", "Network": "9fb1e39c"}] + } +]` + var expected []Network + err := json.Unmarshal([]byte(jsonNetworks), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: jsonNetworks, status: http.StatusOK}) + containers, err := client.ListNetworks() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(containers, expected) { + t.Errorf("ListNetworks: Expected %#v. Got %#v.", expected, containers) + } +} + +func TestNetworkInfo(t *testing.T) { + jsonNetwork := `{ + "ID": "8dfafdbc3a40", + "Name": "blah", + "Type": "bridge", + "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] + }` + var expected Network + err := json.Unmarshal([]byte(jsonNetwork), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonNetwork, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "8dfafdbc3a40" + network, err := client.NetworkInfo(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*network, expected) { + t.Errorf("NetworkInfo(%q): Expected %#v. Got %#v.", id, expected, network) + } + expectedURL, _ := url.Parse(client.getURL("/networks/8dfafdbc3a40")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("NetworkInfo(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestNetworkCreate(t *testing.T) { + jsonID := `{"ID": "8dfafdbc3a40"}` + jsonNetwork := `{ + "ID": "8dfafdbc3a40", + "Name": "foobar", + "Driver": "bridge" + }` + var expected Network + err := json.Unmarshal([]byte(jsonNetwork), &expected) + if err != nil { + t.Fatal(err) + } + + client := newTestClient(&FakeRoundTripper{message: jsonID, status: http.StatusOK}) + opts := CreateNetworkOptions{"foobar", false, "bridge", IPAMOptions{}, nil} + network, err := client.CreateNetwork(opts) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(*network, expected) { + t.Errorf("CreateNetwork: Expected %#v. Got %#v.", expected, network) + } +} + +func TestNetworkRemove(t *testing.T) { + id := "8dfafdbc3a40" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + err := client.RemoveNetwork(id) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/networks/" + id)) + if req.URL.Path != u.Path { + t.Errorf("RemoveNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) + } +} + +func TestNetworkConnect(t *testing.T) { + id := "8dfafdbc3a40" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + opts := NetworkConnectionOptions{"foobar"} + err := client.ConnectNetwork(id, opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "POST" + if req.Method != expectedMethod { + t.Errorf("ConnectNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/networks/" + id + "/connect")) + if req.URL.Path != u.Path { + t.Errorf("ConnectNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) + } +} + +func TestNetworkConnectNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such network container", status: http.StatusNotFound}) + opts := NetworkConnectionOptions{"foobar"} + err := client.ConnectNetwork("8dfafdbc3a40", opts) + if serr, ok := err.(*NoSuchNetworkOrContainer); !ok { + t.Errorf("ConnectNetwork: wrong error type: %s.", serr) + } +} + +func TestNetworkDisconnect(t *testing.T) { + id := "8dfafdbc3a40" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + opts := NetworkConnectionOptions{"foobar"} + err := client.DisconnectNetwork(id, opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "POST" + if req.Method != expectedMethod { + t.Errorf("DisconnectNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/networks/" + id + "/disconnect")) + if req.URL.Path != u.Path { + t.Errorf("DisconnectNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) + } +} + +func TestNetworkDisconnectNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such network container", status: http.StatusNotFound}) + opts := NetworkConnectionOptions{"foobar"} + err := client.DisconnectNetwork("8dfafdbc3a40", opts) + if serr, ok := err.(*NoSuchNetworkOrContainer); !ok { + t.Errorf("DisconnectNetwork: wrong error type: %s.", serr) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/signal.go b/vendor/github.com/fsouza/go-dockerclient/signal.go new file mode 100644 index 000000000..16aa00388 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/signal.go @@ -0,0 +1,49 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +// Signal represents a signal that can be send to the container on +// KillContainer call. +type Signal int + +// These values represent all signals available on Linux, where containers will +// be running. +const ( + SIGABRT = Signal(0x6) + SIGALRM = Signal(0xe) + SIGBUS = Signal(0x7) + SIGCHLD = Signal(0x11) + SIGCLD = Signal(0x11) + SIGCONT = Signal(0x12) + SIGFPE = Signal(0x8) + SIGHUP = Signal(0x1) + SIGILL = Signal(0x4) + SIGINT = Signal(0x2) + SIGIO = Signal(0x1d) + SIGIOT = Signal(0x6) + SIGKILL = Signal(0x9) + SIGPIPE = Signal(0xd) + SIGPOLL = Signal(0x1d) + SIGPROF = Signal(0x1b) + SIGPWR = Signal(0x1e) + SIGQUIT = Signal(0x3) + SIGSEGV = Signal(0xb) + SIGSTKFLT = Signal(0x10) + SIGSTOP = Signal(0x13) + SIGSYS = Signal(0x1f) + SIGTERM = Signal(0xf) + SIGTRAP = Signal(0x5) + SIGTSTP = Signal(0x14) + SIGTTIN = Signal(0x15) + SIGTTOU = Signal(0x16) + SIGUNUSED = Signal(0x1f) + SIGURG = Signal(0x17) + SIGUSR1 = Signal(0xa) + SIGUSR2 = Signal(0xc) + SIGVTALRM = Signal(0x1a) + SIGWINCH = Signal(0x1c) + SIGXCPU = Signal(0x18) + SIGXFSZ = Signal(0x19) +) diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go new file mode 100644 index 000000000..48042cbda --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -0,0 +1,117 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" +) + +func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { + excludes, err := parseDockerignore(srcPath) + if err != nil { + return nil, err + } + + includes := []string{"."} + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The deamon will remove them for us, if needed, after it + // parses the Dockerfile. + // + // https://github.com/docker/docker/issues/8330 + // + forceIncludeFiles := []string{".dockerignore", dockerfilePath} + + for _, includeFile := range forceIncludeFiles { + if includeFile == "" { + continue + } + keepThem, err := fileutils.Matches(includeFile, excludes) + if err != nil { + return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err) + } + if keepThem { + includes = append(includes, includeFile) + } + } + + if err := validateContextDirectory(srcPath, excludes); err != nil { + return nil, err + } + tarOpts := &archive.TarOptions{ + ExcludePatterns: excludes, + IncludeFiles: includes, + Compression: archive.Uncompressed, + NoLchown: true, + } + return archive.TarWithOptions(srcPath, tarOpts) +} + +// validateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read. +// Symlinks which point to non-existing files don't trigger an error +func validateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +func parseDockerignore(root string) ([]string, error) { + var excludes []string + ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err) + } + excludes = strings.Split(string(ignore), "\n") + + return excludes, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/.dockerignore b/vendor/github.com/fsouza/go-dockerclient/testing/data/.dockerignore new file mode 100644 index 000000000..027e8c20e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/data/.dockerignore @@ -0,0 +1,3 @@ +container.tar +dockerfile.tar +foofile diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/vendor/github.com/fsouza/go-dockerclient/testing/data/Dockerfile new file mode 100644 index 000000000..0948dcfa8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/data/Dockerfile @@ -0,0 +1,15 @@ +# this file describes how to build tsuru python image +# to run it: +# 1- install docker +# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile + +from base:ubuntu-quantal +run apt-get install wget -y --force-yes +run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate +run mkdir /var/lib/tsuru +run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1 +run cp /var/lib/tsuru/python/deploy /var/lib/tsuru +run cp /var/lib/tsuru/base/restart /var/lib/tsuru +run cp /var/lib/tsuru/base/start /var/lib/tsuru +run /var/lib/tsuru/base/install +run /var/lib/tsuru/base/setup diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/barfile b/vendor/github.com/fsouza/go-dockerclient/testing/data/barfile new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/ca.pem b/vendor/github.com/fsouza/go-dockerclient/testing/data/ca.pem new file mode 100644 index 000000000..8e38bba13 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/data/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC1TCCAb+gAwIBAgIQJ9MsNxrUxumNbAytGi3GEDALBgkqhkiG9w0BAQswFjEU +MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTM4WhcNMTcwOTMwMjAy +MTM4WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBALpFCSARjG+5yXoqr7UMzuE0df7RRZfeRZI06lJ02ZqV4Iii +rgL7ML9yPxX50NbLnjiilSDTUhnyocYFItokzUzz8qpX/nlYhuN2Iqwh4d0aWS8z +f5y248F+H1z+HY2W8NPl/6DVlVwYaNW1/k+RPMlHS0INLR6j+3Ievew7RNE0NnM2 +znELW6NetekDt3GUcz0Z95vDUDfdPnIk1eIFMmYvLxZh23xOca4Q37a3S8F3d+dN ++OOpwjdgY9Qme0NQUaXpgp58jWuQfB8q7mZrdnLlLqRa8gx1HeDSotX7UmWtWPkb +vd9EdlKLYw5PVpxMV1rkwf2t4TdgD5NfkpXlXkkCAwEAAaMjMCEwDgYDVR0PAQH/ +BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4IBAQBxYjHVSKqE +MJw7CW0GddesULtXXVWGJuZdWJLQlPvPMfIfjIvlcZyS4cdVNiQ3sREFIZz8TpII +CT0/Pg3sgv/FcOQe1CN0xZYZcyiAZHK1z0fJQq2qVpdv7+tJcjI2vvU6NI24iQCo +W1wz25trJz9QbdB2MRLMjyz7TSWuafztIvcfEzaIdQ0Whqund/cSuPGQx5IwF83F +rvlkOyJSH2+VIEBTCIuykJeL0DLTt8cePBQR5L1ISXb4RUMK9ZtqRscBRv8sn7o2 +ixG3wtL0gYF4xLtsQWVxI3iFVrU3WzOH/3c5shVRkWBd+AQRSwCJI4mKH7penJCF +i3/zzlkvOnjV +-----END CERTIFICATE----- diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/cert.pem b/vendor/github.com/fsouza/go-dockerclient/testing/data/cert.pem new file mode 100644 index 000000000..5e7244b24 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/data/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC6DCCAdKgAwIBAgIRANO6ymxQAjp66KmEka1G6b0wCwYJKoZIhvcNAQELMBYx +FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MTAxNjIwMjE1MloXDTE3MDkzMDIw +MjE1MlowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQDGA1mAhSOpZspD1dpZ7qVEQrIJw4Xo8252jHaORnEdDiFm +b6brEmr6jw8t4P3IGxbqBc/TqRV+SSXxwYEVvfpeQKH+SmqStoMNtD3Ura161az4 +V0BcxMtSlsUGpoz+//QCAq8qiaxMwgiyc5253mkQm88anj2cNt7xbewiu/KFWuf7 +BVpNK1+ltpJmlukfcj/G+I1bw7j1KxBjDrFqe5cyDuuZcDL2tmUXP/ZWDyXwSv+H +AOckqn44z6aXlBkVvOXDBZJqY76d/vWVDNCuZeXRnqlhP3t1kH4V0RQXo+JD2tgt +JgdU0unzyoFOSWNUBPm73tqmjUGGAmGHBmeegJr/AgMBAAGjNTAzMA4GA1UdDwEB +/wQEAwIAgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMAsGCSqG +SIb3DQEBCwOCAQEABVTWl5SmBP+j5He5bQsgnIXjviSKqe40/10V4LJAOmilycRF +zLrzM+YMwfjg6PLIs8CldAMWHw9y9ktZY4MxkgCktaiaN/QmMTMwFWEcN4wy5IpM +U5l93eAg7xsnY430h3QBBADujX4wdF3fs8rSL8zAAQFL0ihurwU124K3yXKsrwpb +CiVUGfIN4sPwjy8Ws9oxHFDC9/P8lgjHZ1nBIf8KSHnMzlxDGj7isQfhtH+7mcCL +cM1qO2NirS2v7uaEPPY+MJstAz+W7EJCW9dfMSmHna2SDC37Xkin7uEY9z+qaKFL +8d/XxOB/L8Ucy8VZhdsv0dsBq5KfJntITM0ksQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/container.tar b/vendor/github.com/fsouza/go-dockerclient/testing/data/container.tar new file mode 100644 index 0000000000000000000000000000000000000000..e4b066e3b6df8cb78ac445a34234f3780d164cf4 GIT binary patch literal 2048 zcmeH_Q3``F42FH)DgF~kTC`qZ7s*`9%A^%r$Bu89Fp<6NMew1akmheFe?H>)Y5N#5 z`(UT)m>?q4G^iwZ#(XmAwH8Ujv`|_rQd)Ig3sQ!(szArs+5bAH%#&Di1HU}iJx_zp z+3uU9k~Zgl)J<3?S%)LS_Hgc7e)t4AX&%Rz>>WAcX2Ec>82D}md=O1Y)p%bo=N_rJ OD+CIGLZA@%gTMmt=q{T8 literal 0 HcmV?d00001 diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/vendor/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar new file mode 100644 index 0000000000000000000000000000000000000000..32c9ce64704835cd096b85ac44c35b5087b5ccdd GIT binary patch literal 2560 zcmeHGy>8<$49;3V1%d0TNOs}`$a>xT46-c8LTt+?QB8ACf0XPiQll-h0~9$I?_v`_`p)qp;@ z0OJK)JAmosQD=m*-~y?5ASGvD1{zS;L7n!AYz2z}2Y8%Kb25fgK0fDb5l4UE+{yF$ zXs`{{TG^hbn!J);Cl1>2UV0=k!T8hL+GbhfZ2u5L51|SJ2KFb&fyiW3|3Qw(jvC+i zouk4oz*u9Q((Iyric9uLhPZsmgZ8ANMrS_2p5cn+n!M}dU&=mMrdq8|OlgOvF-oFN zh5A!%9Pk(EcxS4q(c~Z~u-BL7!+gIN2&&-GnGy1YRpY|{e@?X?J9}9;KY_$PxYO}H o;5QJT#=q||{Y*ZuNn-Gk-)jtGb|Y`+PV+v2`vmS2xaA4_1I+dVl>h($ literal 0 HcmV?d00001 diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/foofile b/vendor/github.com/fsouza/go-dockerclient/testing/data/foofile new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/key.pem b/vendor/github.com/fsouza/go-dockerclient/testing/data/key.pem new file mode 100644 index 000000000..a9346bcf4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/data/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAxgNZgIUjqWbKQ9XaWe6lREKyCcOF6PNudox2jkZxHQ4hZm+m +6xJq+o8PLeD9yBsW6gXP06kVfkkl8cGBFb36XkCh/kpqkraDDbQ91K2tetWs+FdA +XMTLUpbFBqaM/v/0AgKvKomsTMIIsnOdud5pEJvPGp49nDbe8W3sIrvyhVrn+wVa +TStfpbaSZpbpH3I/xviNW8O49SsQYw6xanuXMg7rmXAy9rZlFz/2Vg8l8Er/hwDn +JKp+OM+ml5QZFbzlwwWSamO+nf71lQzQrmXl0Z6pYT97dZB+FdEUF6PiQ9rYLSYH +VNLp88qBTkljVAT5u97apo1BhgJhhwZnnoCa/wIDAQABAoIBAQCaGy9EC9pmU95l +DwGh7k5nIrUnTilg1FwLHWSDdCVCZKXv8ENrPelOWZqJrUo1u4eI2L8XTsewgkNq +tJu/DRzWz9yDaO0qg6rZNobMh+K076lvmZA44twOydJLS8H+D7ua+PXU2FLlZjmY +kMyXRJZmW6zCXZc7haTbJx6ZJccoquk/DkS4FcFurJP177u1YrWS9TTw9kensUtU +jQ63uf56UTN1i+0+Rxl7OW1TZlqwlri5I4njg5249+FxwwHzIq8+l7zD7K9pl8c/ +nG1HuulvU2bVlDlRdyslMPAH34vw9Sku1BD8furrJLr1na5lRSLKJODEaIPEsLwv +CdEUwP9JAoGBAO76ZW80RyNB2fA+wbTq70Sr8CwrXxYemXrez5LKDC7SsohKFCPE +IedpO/n+nmymiiJvMm874EExoG6BVrbkWkeb+2vinEfOQNlDMsDx7WLjPekP3t6i +rXHO3CjFooVFq2z3mZa/Nc5NZqu8fNWNCKJxZDJphdoj6sORNJIUvZVjAoGBANQd +++J+ITcu3/+A6JrGcgLunBFQYPqkiItk0J4QKYKuX5ik9rWcQDN8TTtfW2mDuiQ4 +NrCwuVPq1V1kB16JzH017SsYLo9g8I20YjnBZge9pKTeUaLVTb3C50LW8FBylop0 +Bnm597dNbtSjphjoTMg0XyC19o3Esf2YeWG0QNS1AoGAWWDfFRNJU99qIldmXULM +0DM6NVrXSk+ReYnhunXEzrJQwXZrR+EwCPurydk36Uz0NuK9yypquhdUeF/5TZfk +SAoHo5byekyipl9imRUigqyY2BTudvgCxKDoaHtaSFwBPFTyZZYICquaLbrmOXxw +8UhVgCFFRYvPXuts7QHC0h8CgYBWEvy9gfU0kV7wLX02IUTuj6jhFb7ktpN6DSTi +nyhZES1VoctDEu6ydcRZTW6ouH12aSE4Pd5WgTqntQmQgVZrkNB25k8ue2Xh+srJ +KQOgLIJ9LIHwE6KCWG7DnrjRzE3uTPq7to0g4tkQjH/AJ7PQof/gJDayfJjFkXPg +A+cy6QKBgEPbKpiqscm03gT2QanBut5pg4dqPOxp0SlErA3kSFNTRK3oYBQPC+LH +qA5nD5brdkeNBB58Rll8Zpzxiff50bcvLP/7/Sb3NjaXFTEY0gVbdRof3n6N0YP3 +Hu5XDNJ9RNkNzE5RIG1g86KE+aKlcrKMaigqAiuIy2PSnjkQeGk8 +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/server.pem b/vendor/github.com/fsouza/go-dockerclient/testing/data/server.pem new file mode 100644 index 000000000..89cc445e1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/data/server.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC/DCCAeagAwIBAgIQMUILcXtvmSOK63zEBo0VXzALBgkqhkiG9w0BAQswFjEU +MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTQ2WhcNMTcwOTMwMjAy +MTQ2WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANxUOUhNnqFnrTlLsBYzfFRZWQo268l+4K4lOJCVbfDonP3g +Mz0vGi9fcyFqEWSA8Y+ShXna625HTnReCwFdsu0861qCIq7v95hFFCyOe0iIxpd0 +AKLnl90d+1vonE7andgFgoobbTiMly4UK4H6z8D148fFNIihoteOG3PIF89TFxP7 +CJ/3wXnx/IKpdlO8PAnub3tBPJHvGDj7KORLy4IBxRX5VBAdfGNybE66fcrehEva +rLA4m9pgiaR/Nnr9FdKhPyqYdjflLNvzydxNvMIV4M0hFlhXmYvpMjA5/XsTnsyV +t9JHJa5Upwqsbne08t7rsm7liZNxZlko8xPOTQcCAwEAAaNKMEgwDgYDVR0PAQH/ +BAQDAgCgMAwGA1UdEwEB/wQCMAAwKAYDVR0RBCEwH4ILYm9vdDJkb2NrZXKHBH8A +AAGHBAoAAg+HBMCoO2cwCwYJKoZIhvcNAQELA4IBAQAYoYcDkDWkl73FZ0WnPmAj +LiF7HU95Qg3KyEpFsAJeShSLPPbQntmwhdekEzY4tQ3eKQB/+zHFjzsCr/lmDUmH +Ea/ryQ17C+jyH+Ykg0IWW6L6veZhvRDg6Z9focVtPVBRxPTqC/Qhb54blWRASV+W +UreMuXQ5+1dQptAM7ixOeLVHjBi/bd9TL3jvwBVCr9QedteMjjK4TCF9Tbcou+MF +2w3OJJZMDhcD+YwoK9uJDqlKmcTm/vVMbSsp/pTMcnQ7jxCeR8/XyX+VwTZwaHAa +o92Q/eg3THAiWhvyT/SzyH9dHHBAyXynUwGCggKawHktfvW4QXRPuLxLrJ7iB5cy +-----END CERTIFICATE----- diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem b/vendor/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem new file mode 100644 index 000000000..c897e5da5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEoAIBAAKCAQEA3FQ5SE2eoWetOUuwFjN8VFlZCjbryX7griU4kJVt8Oic/eAz +PS8aL19zIWoRZIDxj5KFedrrbkdOdF4LAV2y7TzrWoIiru/3mEUULI57SIjGl3QA +oueX3R37W+icTtqd2AWCihttOIyXLhQrgfrPwPXjx8U0iKGi144bc8gXz1MXE/sI +n/fBefH8gql2U7w8Ce5ve0E8ke8YOPso5EvLggHFFflUEB18Y3JsTrp9yt6ES9qs +sDib2mCJpH82ev0V0qE/Kph2N+Us2/PJ3E28whXgzSEWWFeZi+kyMDn9exOezJW3 +0kclrlSnCqxud7Ty3uuybuWJk3FmWSjzE85NBwIDAQABAoIBAG0ak+cW8LeShHf7 +3+2Of0GxoOLrAWWdG5uAuPr31CJYve0FybnBimDtDjD8ujIfm/7xmoEWBEFutA3x +x9dcU88gvJbsHEqub9gKVQwfXjMz78tt2SbSMiR/xUnk7QorPcCMMfE71aEMFYzu +1gCed6Rg3vO81t/V0rKVH0j9S7UQz5v/oX15eVDV5LOqyCHwAi6K0eXXbqnbI0TH +SOQ/nexM2msVXWbO9t6ra6f5V7FXziDK5Xi+rPxRbX9mkrDzxDAevfuRqYBx5vtL +W2Q2hKjUAHFgXFniNSZBS7dCdAtz0el/3ct+cNmpuTMhhs7M6wC1CuYiZ/DxLiFh +Si73VckCgYEA+/ceh3+VjtQ0rgEw8sD9bqYEA8IaBiObjneIoFnKBYRG7yZd8JMm +HD4M/aQ1qhcRLPN7GR03YQULgQJURbKSjJHnhfTXHyeHC3NN4gMVHQXewu2MHCh6 +7FCQ9CfK0KcYLgegVVvL3PrF3hyWGnmTu+G0UkDQRYVnaNrB7snrW6UCgYEA39tq ++MCQdu0moJ5szSZf02undg9EeW6isk9qzi7TId3/MLci2eH7PEnipipPUK3+DERq +aba0y0TKgBR2EXvXLFJA/+kfdo2loIEHOfox85HVfxgUaFRti63ZI0uF8D0QT2Yy +oJal+RFghVoSnv4LjhRKEPbIkScTXGjdK+7wFjsCfz79iKRXQQx0ALd/lL0bgkAn +QNmvrNHcFQeI2p8700WNzC39aX67SsvEt3qxkrjzC1gxhpTAuReIK1gVPPwvqHN8 +BmV20FD5kMlMCix2mNCopwgUWvKvLAvoGFTxncKMA39+aJbuXAjiqJTekKgNvOE7 +i9kEWw0GTNPp3JHV6QECgYAPwb0M11kT1euDIMOdyRazpf86kyaJuZzgGjD1ZFxe +JOcigbGFTp/FhZnbglzk2+pm6KXo3QBq0mPCki4hWusxZnTGzpz1VlETNCHTFeZQ +M7KoaIR/N3oie9Et59H8r/+m5xWnMhNqratyl316DX24uXrhKM3DUdHODl+LCR2D +IwKBgE1MbHuwolUPEw3HeO4R7NMFVTFei7E/fpUsimPfArGg8UydwvloNT1myJos +N2JzfGGjN2KPVcBk9fOs71mJ6VcK3C3g5JIccplk6h9VNaw55+zdQvKPTzoBoTvy +A+Fwx2AlF61KeRF87DL2YTRJ6B9MHmWgf7+GVZOxomLgEAcZ +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/server.go b/vendor/github.com/fsouza/go-dockerclient/testing/server.go new file mode 100644 index 000000000..b16e71367 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/server.go @@ -0,0 +1,1246 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testing provides a fake implementation of the Docker API, useful for +// testing purpose. +package testing + +import ( + "archive/tar" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + mathrand "math/rand" + "net" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/fsouza/go-dockerclient" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" + "github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux" +) + +var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) + +// DockerServer represents a programmable, concurrent (not much), HTTP server +// implementing a fake version of the Docker remote API. +// +// It can used in standalone mode, listening for connections or as an arbitrary +// HTTP handler. +// +// For more details on the remote API, check http://goo.gl/G3plxW. +type DockerServer struct { + containers []*docker.Container + uploadedFiles map[string]string + execs []*docker.ExecInspect + execMut sync.RWMutex + cMut sync.RWMutex + images []docker.Image + iMut sync.RWMutex + imgIDs map[string]string + networks []*docker.Network + netMut sync.RWMutex + listener net.Listener + mux *mux.Router + hook func(*http.Request) + failures map[string]string + multiFailures []map[string]string + execCallbacks map[string]func() + statsCallbacks map[string]func(string) docker.Stats + customHandlers map[string]http.Handler + handlerMutex sync.RWMutex + cChan chan<- *docker.Container + volStore map[string]*volumeCounter + volMut sync.RWMutex +} + +type volumeCounter struct { + volume docker.Volume + count int +} + +// NewServer returns a new instance of the fake server, in standalone mode. Use +// the method URL to get the URL of the server. +// +// It receives the bind address (use 127.0.0.1:0 for getting an available port +// on the host), a channel of containers and a hook function, that will be +// called on every request. +// +// The fake server will send containers in the channel whenever the container +// changes its state, via the HTTP API (i.e.: create, start and stop). This +// channel may be nil, which means that the server won't notify on state +// changes. +func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*http.Request)) (*DockerServer, error) { + listener, err := net.Listen("tcp", bind) + if err != nil { + return nil, err + } + server := DockerServer{ + listener: listener, + imgIDs: make(map[string]string), + hook: hook, + failures: make(map[string]string), + execCallbacks: make(map[string]func()), + statsCallbacks: make(map[string]func(string) docker.Stats), + customHandlers: make(map[string]http.Handler), + uploadedFiles: make(map[string]string), + cChan: containerChan, + } + server.buildMuxer() + go http.Serve(listener, &server) + return &server, nil +} + +func (s *DockerServer) notify(container *docker.Container) { + if s.cChan != nil { + s.cChan <- container + } +} + +func (s *DockerServer) buildMuxer() { + s.mux = mux.NewRouter() + s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer)) + s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers)) + s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer)) + s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer)) + s.mux.Path("/containers/{id:.*}/rename").Methods("POST").HandlerFunc(s.handlerWrapper(s.renameContainer)) + s.mux.Path("/containers/{id:.*}/top").Methods("GET").HandlerFunc(s.handlerWrapper(s.topContainer)) + s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer)) + s.mux.Path("/containers/{id:.*}/kill").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) + s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) + s.mux.Path("/containers/{id:.*}/pause").Methods("POST").HandlerFunc(s.handlerWrapper(s.pauseContainer)) + s.mux.Path("/containers/{id:.*}/unpause").Methods("POST").HandlerFunc(s.handlerWrapper(s.unpauseContainer)) + s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer)) + s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer)) + s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer)) + s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer)) + s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer)) + s.mux.Path("/containers/{id:.*}/archive").Methods("PUT").HandlerFunc(s.handlerWrapper(s.uploadToContainer)) + s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer)) + s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer)) + s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer)) + s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage)) + s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage)) + s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages)) + s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage)) + s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage)) + s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage)) + s.mux.Path("/images/{name:.*}/tag").Methods("POST").HandlerFunc(s.handlerWrapper(s.tagImage)) + s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents) + s.mux.Path("/_ping").Methods("GET").HandlerFunc(s.handlerWrapper(s.pingDocker)) + s.mux.Path("/images/load").Methods("POST").HandlerFunc(s.handlerWrapper(s.loadImage)) + s.mux.Path("/images/{id:.*}/get").Methods("GET").HandlerFunc(s.handlerWrapper(s.getImage)) + s.mux.Path("/networks").Methods("GET").HandlerFunc(s.handlerWrapper(s.listNetworks)) + s.mux.Path("/networks/{id:.*}").Methods("GET").HandlerFunc(s.handlerWrapper(s.networkInfo)) + s.mux.Path("/networks").Methods("POST").HandlerFunc(s.handlerWrapper(s.createNetwork)) + s.mux.Path("/volumes").Methods("GET").HandlerFunc(s.handlerWrapper(s.listVolumes)) + s.mux.Path("/volumes/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createVolume)) + s.mux.Path("/volumes/{name:.*}").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectVolume)) + s.mux.Path("/volumes/{name:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeVolume)) +} + +// SetHook changes the hook function used by the server. +// +// The hook function is a function called on every request. +func (s *DockerServer) SetHook(hook func(*http.Request)) { + s.hook = hook +} + +// PrepareExec adds a callback to a container exec in the fake server. +// +// This function will be called whenever the given exec id is started, and the +// given exec id will remain in the "Running" start while the function is +// running, so it's useful for emulating an exec that runs for two seconds, for +// example: +// +// opts := docker.CreateExecOptions{ +// AttachStdin: true, +// AttachStdout: true, +// AttachStderr: true, +// Tty: true, +// Cmd: []string{"/bin/bash", "-l"}, +// } +// // Client points to a fake server. +// exec, err := client.CreateExec(opts) +// // handle error +// server.PrepareExec(exec.ID, func() {time.Sleep(2 * time.Second)}) +// err = client.StartExec(exec.ID, docker.StartExecOptions{Tty: true}) // will block for 2 seconds +// // handle error +func (s *DockerServer) PrepareExec(id string, callback func()) { + s.execCallbacks[id] = callback +} + +// PrepareStats adds a callback that will be called for each container stats +// call. +// +// This callback function will be called multiple times if stream is set to +// true when stats is called. +func (s *DockerServer) PrepareStats(id string, callback func(string) docker.Stats) { + s.statsCallbacks[id] = callback +} + +// PrepareFailure adds a new expected failure based on a URL regexp it receives +// an id for the failure. +func (s *DockerServer) PrepareFailure(id string, urlRegexp string) { + s.failures[id] = urlRegexp +} + +// PrepareMultiFailures enqueues a new expected failure based on a URL regexp +// it receives an id for the failure. +func (s *DockerServer) PrepareMultiFailures(id string, urlRegexp string) { + s.multiFailures = append(s.multiFailures, map[string]string{"error": id, "url": urlRegexp}) +} + +// ResetFailure removes an expected failure identified by the given id. +func (s *DockerServer) ResetFailure(id string) { + delete(s.failures, id) +} + +// ResetMultiFailures removes all enqueued failures. +func (s *DockerServer) ResetMultiFailures() { + s.multiFailures = []map[string]string{} +} + +// CustomHandler registers a custom handler for a specific path. +// +// For example: +// +// server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// http.Error(w, "Something wrong is not right", http.StatusInternalServerError) +// })) +func (s *DockerServer) CustomHandler(path string, handler http.Handler) { + s.handlerMutex.Lock() + s.customHandlers[path] = handler + s.handlerMutex.Unlock() +} + +// MutateContainer changes the state of a container, returning an error if the +// given id does not match to any container "running" in the server. +func (s *DockerServer) MutateContainer(id string, state docker.State) error { + for _, container := range s.containers { + if container.ID == id { + container.State = state + return nil + } + } + return errors.New("container not found") +} + +// Stop stops the server. +func (s *DockerServer) Stop() { + if s.listener != nil { + s.listener.Close() + } +} + +// URL returns the HTTP URL of the server. +func (s *DockerServer) URL() string { + if s.listener == nil { + return "" + } + return "http://" + s.listener.Addr().String() + "/" +} + +// ServeHTTP handles HTTP requests sent to the server. +func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.handlerMutex.RLock() + defer s.handlerMutex.RUnlock() + for re, handler := range s.customHandlers { + if m, _ := regexp.MatchString(re, r.URL.Path); m { + handler.ServeHTTP(w, r) + return + } + } + s.mux.ServeHTTP(w, r) + if s.hook != nil { + s.hook(r) + } +} + +// DefaultHandler returns default http.Handler mux, it allows customHandlers to +// call the default behavior if wanted. +func (s *DockerServer) DefaultHandler() http.Handler { + return s.mux +} + +func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + for errorID, urlRegexp := range s.failures { + matched, err := regexp.MatchString(urlRegexp, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !matched { + continue + } + http.Error(w, errorID, http.StatusBadRequest) + return + } + for i, failure := range s.multiFailures { + matched, err := regexp.MatchString(failure["url"], r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !matched { + continue + } + http.Error(w, failure["error"], http.StatusBadRequest) + s.multiFailures = append(s.multiFailures[:i], s.multiFailures[i+1:]...) + return + } + f(w, r) + } +} + +func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) { + all := r.URL.Query().Get("all") + s.cMut.RLock() + result := make([]docker.APIContainers, 0, len(s.containers)) + for _, container := range s.containers { + if all == "1" || container.State.Running { + result = append(result, docker.APIContainers{ + ID: container.ID, + Image: container.Image, + Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")), + Created: container.Created.Unix(), + Status: container.State.String(), + Ports: container.NetworkSettings.PortMappingAPI(), + Names: []string{fmt.Sprintf("/%s", container.Name)}, + }) + } + } + s.cMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) { + s.cMut.RLock() + result := make([]docker.APIImages, len(s.images)) + for i, image := range s.images { + result[i] = docker.APIImages{ + ID: image.ID, + Created: image.Created.Unix(), + } + for tag, id := range s.imgIDs { + if id == image.ID { + result[i].RepoTags = append(result[i].RepoTags, tag) + } + } + } + s.cMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) findImage(id string) (string, error) { + s.iMut.RLock() + defer s.iMut.RUnlock() + image, ok := s.imgIDs[id] + if ok { + return image, nil + } + image, _, err := s.findImageByID(id) + return image, err +} + +func (s *DockerServer) findImageByID(id string) (string, int, error) { + s.iMut.RLock() + defer s.iMut.RUnlock() + for i, image := range s.images { + if image.ID == id { + return image.ID, i, nil + } + } + return "", -1, errors.New("No such image") +} + +func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) { + var config struct { + *docker.Config + HostConfig *docker.HostConfig + } + defer r.Body.Close() + err := json.NewDecoder(r.Body).Decode(&config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + name := r.URL.Query().Get("name") + if name != "" && !nameRegexp.MatchString(name) { + http.Error(w, "Invalid container name", http.StatusInternalServerError) + return + } + if _, err := s.findImage(config.Image); err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + ports := map[docker.Port][]docker.PortBinding{} + for port := range config.ExposedPorts { + ports[port] = []docker.PortBinding{{ + HostIP: "0.0.0.0", + HostPort: strconv.Itoa(mathrand.Int() % 0xffff), + }} + } + + //the container may not have cmd when using a Dockerfile + var path string + var args []string + if len(config.Cmd) == 1 { + path = config.Cmd[0] + } else if len(config.Cmd) > 1 { + path = config.Cmd[0] + args = config.Cmd[1:] + } + + generatedID := s.generateID() + config.Config.Hostname = generatedID[:12] + container := docker.Container{ + Name: name, + ID: generatedID, + Created: time.Now(), + Path: path, + Args: args, + Config: config.Config, + HostConfig: config.HostConfig, + State: docker.State{ + Running: false, + Pid: mathrand.Int() % 50000, + ExitCode: 0, + StartedAt: time.Now(), + }, + Image: config.Image, + NetworkSettings: &docker.NetworkSettings{ + IPAddress: fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2), + IPPrefixLen: 24, + Gateway: "172.16.42.1", + Bridge: "docker0", + Ports: ports, + }, + } + s.cMut.Lock() + if container.Name != "" { + for _, c := range s.containers { + if c.Name == container.Name { + defer s.cMut.Unlock() + http.Error(w, "there's already a container with this name", http.StatusConflict) + return + } + } + } + s.containers = append(s.containers, &container) + s.cMut.Unlock() + w.WriteHeader(http.StatusCreated) + s.notify(&container) + + json.NewEncoder(w).Encode(container) +} + +func (s *DockerServer) generateID() string { + var buf [16]byte + rand.Read(buf[:]) + return fmt.Sprintf("%x", buf) +} + +func (s *DockerServer) renameContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, index, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + copy := *container + copy.Name = r.URL.Query().Get("name") + s.cMut.Lock() + defer s.cMut.Unlock() + if s.containers[index].ID == copy.ID { + s.containers[index] = © + } + w.WriteHeader(http.StatusNoContent) +} + +func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(container) +} + +func (s *DockerServer) statsContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + _, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + stream, _ := strconv.ParseBool(r.URL.Query().Get("stream")) + callback := s.statsCallbacks[id] + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + encoder := json.NewEncoder(w) + for { + var stats docker.Stats + if callback != nil { + stats = callback(id) + } + encoder.Encode(stats) + if !stream { + break + } + } +} + +func (s *DockerServer) uploadToContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if !container.State.Running { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Container %s is not running", id) + return + } + path := r.URL.Query().Get("path") + s.uploadedFiles[id] = path + w.WriteHeader(http.StatusOK) +} + +func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if !container.State.Running { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Container %s is not running", id) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + result := docker.TopResult{ + Titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}, + Processes: [][]string{ + {"root", "7535", "7516", "0", "03:20", "?", "00:00:00", container.Path + " " + strings.Join(container.Args, " ")}, + }, + } + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + defer r.Body.Close() + var hostConfig docker.HostConfig + err = json.NewDecoder(r.Body).Decode(&hostConfig) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + container.HostConfig = &hostConfig + if len(hostConfig.PortBindings) > 0 { + ports := map[docker.Port][]docker.PortBinding{} + for key, items := range hostConfig.PortBindings { + bindings := make([]docker.PortBinding, len(items)) + for i := range items { + binding := docker.PortBinding{ + HostIP: items[i].HostIP, + HostPort: items[i].HostPort, + } + if binding.HostIP == "" { + binding.HostIP = "0.0.0.0" + } + if binding.HostPort == "" { + binding.HostPort = strconv.Itoa(mathrand.Int() % 0xffff) + } + bindings[i] = binding + } + ports[key] = bindings + } + container.NetworkSettings.Ports = ports + } + if container.State.Running { + http.Error(w, "", http.StatusNotModified) + return + } + container.State.Running = true + s.notify(container) +} + +func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if !container.State.Running { + http.Error(w, "Container not running", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Running = false + s.notify(container) +} + +func (s *DockerServer) pauseContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if container.State.Paused { + http.Error(w, "Container already paused", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Paused = true +} + +func (s *DockerServer) unpauseContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if !container.State.Paused { + http.Error(w, "Container not paused", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Paused = false +} + +func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + hijacker, ok := w.(http.Hijacker) + if !ok { + http.Error(w, "cannot hijack connection", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.raw-stream") + w.WriteHeader(http.StatusOK) + conn, _, err := hijacker.Hijack() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + wg := sync.WaitGroup{} + if r.URL.Query().Get("stdin") == "1" { + wg.Add(1) + go func() { + ioutil.ReadAll(conn) + wg.Done() + }() + } + outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout) + if container.State.Running { + fmt.Fprintf(outStream, "Container is running\n") + } else { + fmt.Fprintf(outStream, "Container is not running\n") + } + fmt.Fprintln(outStream, "What happened?") + fmt.Fprintln(outStream, "Something happened") + wg.Wait() + if r.URL.Query().Get("stream") == "1" { + for { + time.Sleep(1e6) + s.cMut.RLock() + if !container.State.Running { + s.cMut.RUnlock() + break + } + s.cMut.RUnlock() + } + } + conn.Close() +} + +func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + for { + time.Sleep(1e6) + s.cMut.RLock() + if !container.State.Running { + s.cMut.RUnlock() + break + } + s.cMut.RUnlock() + } + result := map[string]int{"StatusCode": container.State.ExitCode} + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + force := r.URL.Query().Get("force") + container, index, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if container.State.Running && force != "1" { + msg := "Error: API error (406): Impossible to remove a running container, please stop it first" + http.Error(w, msg, http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + s.cMut.Lock() + defer s.cMut.Unlock() + if s.containers[index].ID == id || s.containers[index].Name == id { + s.containers[index] = s.containers[len(s.containers)-1] + s.containers = s.containers[:len(s.containers)-1] + } +} + +func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) { + id := r.URL.Query().Get("container") + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + var config *docker.Config + runConfig := r.URL.Query().Get("run") + if runConfig != "" { + config = new(docker.Config) + err = json.Unmarshal([]byte(runConfig), config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } + w.WriteHeader(http.StatusOK) + image := docker.Image{ + ID: "img-" + container.ID, + Parent: container.Image, + Container: container.ID, + Comment: r.URL.Query().Get("m"), + Author: r.URL.Query().Get("author"), + Config: config, + } + repository := r.URL.Query().Get("repo") + tag := r.URL.Query().Get("tag") + s.iMut.Lock() + s.images = append(s.images, image) + if repository != "" { + if tag != "" { + repository += ":" + tag + } + s.imgIDs[repository] = image.ID + } + s.iMut.Unlock() + fmt.Fprintf(w, `{"ID":%q}`, image.ID) +} + +func (s *DockerServer) findContainer(idOrName string) (*docker.Container, int, error) { + s.cMut.RLock() + defer s.cMut.RUnlock() + for i, container := range s.containers { + if container.ID == idOrName || container.Name == idOrName { + return container, i, nil + } + } + return nil, -1, errors.New("No such container") +} + +func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) { + if ct := r.Header.Get("Content-Type"); ct == "application/tar" { + gotDockerFile := false + tr := tar.NewReader(r.Body) + for { + header, err := tr.Next() + if err != nil { + break + } + if header.Name == "Dockerfile" { + gotDockerFile = true + } + } + if !gotDockerFile { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte("miss Dockerfile")) + return + } + } + //we did not use that Dockerfile to build image cause we are a fake Docker daemon + image := docker.Image{ + ID: s.generateID(), + Created: time.Now(), + } + + query := r.URL.Query() + repository := image.ID + if t := query.Get("t"); t != "" { + repository = t + } + s.iMut.Lock() + s.images = append(s.images, image) + s.imgIDs[repository] = image.ID + s.iMut.Unlock() + w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID))) +} + +func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) { + fromImageName := r.URL.Query().Get("fromImage") + tag := r.URL.Query().Get("tag") + image := docker.Image{ + ID: s.generateID(), + } + s.iMut.Lock() + s.images = append(s.images, image) + if fromImageName != "" { + if tag != "" { + fromImageName = fmt.Sprintf("%s:%s", fromImageName, tag) + } + s.imgIDs[fromImageName] = image.ID + } + s.iMut.Unlock() +} + +func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + tag := r.URL.Query().Get("tag") + if tag != "" { + name += ":" + tag + } + s.iMut.RLock() + if _, ok := s.imgIDs[name]; !ok { + s.iMut.RUnlock() + http.Error(w, "No such image", http.StatusNotFound) + return + } + s.iMut.RUnlock() + fmt.Fprintln(w, "Pushing...") + fmt.Fprintln(w, "Pushed") +} + +func (s *DockerServer) tagImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + s.iMut.RLock() + if _, ok := s.imgIDs[name]; !ok { + s.iMut.RUnlock() + http.Error(w, "No such image", http.StatusNotFound) + return + } + s.iMut.RUnlock() + s.iMut.Lock() + defer s.iMut.Unlock() + newRepo := r.URL.Query().Get("repo") + newTag := r.URL.Query().Get("tag") + if newTag != "" { + newRepo += ":" + newTag + } + s.imgIDs[newRepo] = s.imgIDs[name] + w.WriteHeader(http.StatusCreated) +} + +func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + s.iMut.RLock() + var tag string + if img, ok := s.imgIDs[id]; ok { + id, tag = img, id + } + var tags []string + for tag, taggedID := range s.imgIDs { + if taggedID == id { + tags = append(tags, tag) + } + } + s.iMut.RUnlock() + _, index, err := s.findImageByID(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.WriteHeader(http.StatusNoContent) + s.iMut.Lock() + defer s.iMut.Unlock() + if len(tags) < 2 { + s.images[index] = s.images[len(s.images)-1] + s.images = s.images[:len(s.images)-1] + } + if tag != "" { + delete(s.imgIDs, tag) + } +} + +func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + s.iMut.RLock() + defer s.iMut.RUnlock() + if id, ok := s.imgIDs[name]; ok { + for _, img := range s.images { + if img.ID == id { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(img) + return + } + } + } + http.Error(w, "not found", http.StatusNotFound) +} + +func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var events [][]byte + count := mathrand.Intn(20) + for i := 0; i < count; i++ { + data, err := json.Marshal(s.generateEvent()) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + events = append(events, data) + } + w.WriteHeader(http.StatusOK) + for _, d := range events { + fmt.Fprintln(w, d) + time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond) + } +} + +func (s *DockerServer) pingDocker(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func (s *DockerServer) generateEvent() *docker.APIEvents { + var eventType string + switch mathrand.Intn(4) { + case 0: + eventType = "create" + case 1: + eventType = "start" + case 2: + eventType = "stop" + case 3: + eventType = "destroy" + } + return &docker.APIEvents{ + ID: s.generateID(), + Status: eventType, + From: "mybase:latest", + Time: time.Now().Unix(), + } +} + +func (s *DockerServer) loadImage(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func (s *DockerServer) getImage(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/tar") +} + +func (s *DockerServer) createExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + execID := s.generateID() + container.ExecIDs = append(container.ExecIDs, execID) + + exec := docker.ExecInspect{ + ID: execID, + Container: *container, + } + + var params docker.CreateExecOptions + err = json.NewDecoder(r.Body).Decode(¶ms) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if len(params.Cmd) > 0 { + exec.ProcessConfig.EntryPoint = params.Cmd[0] + if len(params.Cmd) > 1 { + exec.ProcessConfig.Arguments = params.Cmd[1:] + } + } + + exec.ProcessConfig.User = params.User + exec.ProcessConfig.Tty = params.Tty + + s.execMut.Lock() + s.execs = append(s.execs, &exec) + s.execMut.Unlock() + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{"Id": exec.ID}) +} + +func (s *DockerServer) startExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + if exec, err := s.getExec(id, false); err == nil { + s.execMut.Lock() + exec.Running = true + s.execMut.Unlock() + if callback, ok := s.execCallbacks[id]; ok { + callback() + delete(s.execCallbacks, id) + } else if callback, ok := s.execCallbacks["*"]; ok { + callback() + delete(s.execCallbacks, "*") + } + s.execMut.Lock() + exec.Running = false + s.execMut.Unlock() + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) +} + +func (s *DockerServer) resizeExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + if _, err := s.getExec(id, false); err == nil { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) +} + +func (s *DockerServer) inspectExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + if exec, err := s.getExec(id, true); err == nil { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(exec) + return + } + w.WriteHeader(http.StatusNotFound) +} + +func (s *DockerServer) getExec(id string, copy bool) (*docker.ExecInspect, error) { + s.execMut.RLock() + defer s.execMut.RUnlock() + for _, exec := range s.execs { + if exec.ID == id { + if copy { + cp := *exec + exec = &cp + } + return exec, nil + } + } + return nil, errors.New("exec not found") +} + +func (s *DockerServer) findNetwork(idOrName string) (*docker.Network, int, error) { + s.netMut.RLock() + defer s.netMut.RUnlock() + for i, network := range s.networks { + if network.ID == idOrName || network.Name == idOrName { + return network, i, nil + } + } + return nil, -1, errors.New("No such network") +} + +func (s *DockerServer) listNetworks(w http.ResponseWriter, r *http.Request) { + s.netMut.RLock() + result := make([]docker.Network, 0, len(s.networks)) + for _, network := range s.networks { + result = append(result, *network) + } + s.netMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) networkInfo(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + network, _, err := s.findNetwork(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(network) +} + +// isValidName validates configuration objects supported by libnetwork +func isValidName(name string) bool { + if name == "" || strings.Contains(name, ".") { + return false + } + return true +} + +func (s *DockerServer) createNetwork(w http.ResponseWriter, r *http.Request) { + var config *docker.CreateNetworkOptions + defer r.Body.Close() + err := json.NewDecoder(r.Body).Decode(&config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !isValidName(config.Name) { + http.Error(w, "Invalid network name", http.StatusBadRequest) + return + } + if n, _, _ := s.findNetwork(config.Name); n != nil { + http.Error(w, "network already exists", http.StatusForbidden) + return + } + + generatedID := s.generateID() + network := docker.Network{ + Name: config.Name, + ID: generatedID, + Driver: config.Driver, + } + s.netMut.Lock() + s.networks = append(s.networks, &network) + s.netMut.Unlock() + w.WriteHeader(http.StatusCreated) + var c = struct{ ID string }{ID: network.ID} + json.NewEncoder(w).Encode(c) +} + +func (s *DockerServer) listVolumes(w http.ResponseWriter, r *http.Request) { + s.volMut.RLock() + result := make([]docker.Volume, 0, len(s.volStore)) + for _, volumeCounter := range s.volStore { + result = append(result, volumeCounter.volume) + } + s.volMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) createVolume(w http.ResponseWriter, r *http.Request) { + var data struct { + *docker.CreateVolumeOptions + } + defer r.Body.Close() + err := json.NewDecoder(r.Body).Decode(&data) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + volume := &docker.Volume{ + Name: data.CreateVolumeOptions.Name, + Driver: data.CreateVolumeOptions.Driver, + } + // If the name is not specified, generate one. Just using generateID for now + if len(volume.Name) == 0 { + volume.Name = s.generateID() + } + // If driver is not specified, use local + if len(volume.Driver) == 0 { + volume.Driver = "local" + } + // Mount point is a default one with name + volume.Mountpoint = "/var/lib/docker/volumes/" + volume.Name + + // If the volume already exists, don't re-add it. + exists := false + s.volMut.Lock() + if s.volStore != nil { + _, exists = s.volStore[volume.Name] + } else { + // No volumes, create volStore + s.volStore = make(map[string]*volumeCounter) + } + if !exists { + s.volStore[volume.Name] = &volumeCounter{ + volume: *volume, + count: 0, + } + } + s.volMut.Unlock() + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(volume) +} + +func (s *DockerServer) inspectVolume(w http.ResponseWriter, r *http.Request) { + s.volMut.RLock() + defer s.volMut.RUnlock() + name := mux.Vars(r)["name"] + vol, err := s.findVolume(name) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(vol.volume) +} + +func (s *DockerServer) findVolume(name string) (*volumeCounter, error) { + vol, ok := s.volStore[name] + if !ok { + return nil, errors.New("no such volume") + } + return vol, nil +} + +func (s *DockerServer) removeVolume(w http.ResponseWriter, r *http.Request) { + s.volMut.Lock() + defer s.volMut.Unlock() + name := mux.Vars(r)["name"] + vol, err := s.findVolume(name) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if vol.count != 0 { + http.Error(w, "volume in use and cannot be removed", http.StatusConflict) + return + } + s.volStore[vol.volume.Name] = nil + w.WriteHeader(http.StatusNoContent) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/testing/server_test.go b/vendor/github.com/fsouza/go-dockerclient/testing/server_test.go new file mode 100644 index 000000000..ddadcaab5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/testing/server_test.go @@ -0,0 +1,2103 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/fsouza/go-dockerclient" +) + +func TestNewServer(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.listener.Close() + conn, err := net.Dial("tcp", server.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + conn.Close() +} + +func TestServerStop(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + server.Stop() + _, err = net.Dial("tcp", server.listener.Addr().String()) + if err == nil { + t.Error("Unexpected error when dialing to stopped server") + } +} + +func TestServerStopNoListener(t *testing.T) { + server := DockerServer{} + server.Stop() +} + +func TestServerURL(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.Stop() + url := server.URL() + if expected := "http://" + server.listener.Addr().String() + "/"; url != expected { + t.Errorf("DockerServer.URL(): Want %q. Got %q.", expected, url) + } +} + +func TestServerURLNoListener(t *testing.T) { + server := DockerServer{} + url := server.URL() + if url != "" { + t.Errorf("DockerServer.URL(): Expected empty URL on handler mode, got %q.", url) + } +} + +func TestHandleWithHook(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, func(*http.Request) { called = true }) + defer server.Stop() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("ServeHTTP did not call the hook function.") + } +} + +func TestSetHook(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, nil) + defer server.Stop() + server.SetHook(func(*http.Request) { called = true }) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("ServeHTTP did not call the hook function.") + } +} + +func TestCustomHandler(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 2) + server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + fmt.Fprint(w, "Hello world") + })) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("Did not call the custom handler") + } + if got := recorder.Body.String(); got != "Hello world" { + t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got) + } +} + +func TestCustomHandlerRegexp(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 2) + server.CustomHandler("/containers/.*/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + fmt.Fprint(w, "Hello world") + })) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/.*/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("Did not call the custom handler") + } + if got := recorder.Body.String(); got != "Hello world" { + t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got) + } +} + +func TestListContainers(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.APIContainers, 2) + for i, container := range server.containers { + expected[i] = docker.APIContainers{ + ID: container.ID, + Image: container.Image, + Command: strings.Join(container.Config.Cmd, " "), + Created: container.Created.Unix(), + Status: container.State.String(), + Ports: container.NetworkSettings.PortMappingAPI(), + Names: []string{"/" + container.Name}, + } + } + var got []docker.APIContainers + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListContainers. Want %#v. Got %#v.", expected, got) + } +} + +func TestListRunningContainers(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=0", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListRunningContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got []docker.APIContainers + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("ListRunningContainers: Want 0. Got %d.", len(got)) + } +} + +func TestCreateContainer(t *testing.T) { + server := DockerServer{} + server.imgIDs = map[string]string{"base": "a1234"} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var returned docker.Container + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Fatal(err) + } + stored := server.containers[0] + if returned.ID != stored.ID { + t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) + } + if stored.State.Running { + t.Errorf("CreateContainer should not set container to running state.") + } + if stored.Config.User != "ubuntu" { + t.Errorf("CreateContainer: wrong config. Expected: %q. Returned: %q.", "ubuntu", stored.Config.User) + } + if stored.Config.Hostname != returned.ID[:12] { + t.Errorf("CreateContainer: wrong hostname. Expected: %q. Returned: %q.", returned.ID[:12], stored.Config.Hostname) + } + expectedBind := []string{"/var/run/docker.sock:/var/run/docker.sock:rw"} + if !reflect.DeepEqual(stored.HostConfig.Binds, expectedBind) { + t.Errorf("CreateContainer: wrong host config. Expected: %v. Returned %v.", expectedBind, stored.HostConfig.Binds) + } +} + +func TestCreateContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.imgIDs = map[string]string{"base": "a1234"} + server.cChan = ch + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + if notified := <-ch; notified != server.containers[0] { + t.Errorf("CreateContainer: did not notify the proper container. Want %q. Got %q.", server.containers[0].ID, notified.ID) + } +} + +func TestCreateContainerInvalidBody(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader("whaaaaaat---")) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCreateContainerDuplicateName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + server.imgIDs = map[string]string{"base": "a1234"} + addContainers(&server, 1) + server.containers[0].Name = "mycontainer" + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` + request, _ := http.NewRequest("POST", "/containers/create?name=mycontainer", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusConflict { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusConflict, recorder.Code) + } +} + +func TestCreateMultipleContainersEmptyName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + server.imgIDs = map[string]string{"base": "a1234"} + addContainers(&server, 1) + server.containers[0].Name = "" + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var returned docker.Container + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Fatal(err) + } + stored := server.containers[1] + if returned.ID != stored.ID { + t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) + } + if stored.State.Running { + t.Errorf("CreateContainer should not set container to running state.") + } + if stored.Config.User != "ubuntu" { + t.Errorf("CreateContainer: wrong config. Expected: %q. Returned: %q.", "ubuntu", stored.Config.User) + } + expectedBind := []string{"/var/run/docker.sock:/var/run/docker.sock:rw"} + if !reflect.DeepEqual(stored.HostConfig.Binds, expectedBind) { + t.Errorf("CreateContainer: wrong host config. Expected: %v. Returned %v.", expectedBind, stored.HostConfig.Binds) + } +} + +func TestCreateContainerInvalidName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], +"Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create?name=myapp/container1", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusInternalServerError { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) + } + expectedBody := "Invalid container name\n" + if got := recorder.Body.String(); got != expectedBody { + t.Errorf("CreateContainer: wrong body. Want %q. Got %q.", expectedBody, got) + } +} + +func TestCreateContainerImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], +"Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRenameContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + newName := server.containers[0].Name + "abc" + path := fmt.Sprintf("/containers/%s/rename?name=%s", server.containers[0].ID, newName) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RenameContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + container := server.containers[0] + if container.Name != newName { + t.Errorf("RenameContainer: did not rename the container. Want %q. Got %q.", newName, container.Name) + } +} + +func TestRenameContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/containers/blabla/rename?name=something", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("RenameContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestCommitContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("CommitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := fmt.Sprintf(`{"ID":"%s"}`, server.images[0].ID) + if got := recorder.Body.String(); got != expected { + t.Errorf("CommitContainer: wrong response body. Want %q. Got %q.", expected, got) + } +} + +func TestCommitContainerComplete(t *testing.T) { + server := DockerServer{} + server.imgIDs = make(map[string]string) + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&m=saving&author=developers" + queryString += `&run={"Cmd": ["cat", "/world"],"PortSpecs":["22"]}` + request, _ := http.NewRequest("POST", "/commit?"+queryString, nil) + server.ServeHTTP(recorder, request) + image := server.images[0] + if image.Parent != server.containers[0].Image { + t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent) + } + if image.Container != server.containers[0].ID { + t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container) + } + message := "saving" + if image.Comment != message { + t.Errorf("CommitContainer: wrong comment (commit message). Want %q. Got %q.", message, image.Comment) + } + author := "developers" + if image.Author != author { + t.Errorf("CommitContainer: wrong author. Want %q. Got %q.", author, image.Author) + } + if id := server.imgIDs["tsuru/python"]; id != image.ID { + t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id) + } + portSpecs := []string{"22"} + if !reflect.DeepEqual(image.Config.PortSpecs, portSpecs) { + t.Errorf("CommitContainer: wrong port spec in config. Want %#v. Got %#v.", portSpecs, image.Config.PortSpecs) + } + cmd := []string{"cat", "/world"} + if !reflect.DeepEqual(image.Config.Cmd, cmd) { + t.Errorf("CommitContainer: wrong cmd in config. Want %#v. Got %#v.", cmd, image.Config.Cmd) + } +} + +func TestCommitContainerWithTag(t *testing.T) { + server := DockerServer{} + server.imgIDs = make(map[string]string) + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&tag=v1" + request, _ := http.NewRequest("POST", "/commit?"+queryString, nil) + server.ServeHTTP(recorder, request) + image := server.images[0] + if image.Parent != server.containers[0].Image { + t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent) + } + if image.Container != server.containers[0].ID { + t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container) + } + if id := server.imgIDs["tsuru/python:v1"]; id != image.ID { + t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id) + } +} + +func TestCommitContainerInvalidRun(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID+"&run=abc---", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCommitContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container=abc123", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestInspectContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := server.containers[0] + var got docker.Container + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Config, expected.Config) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } + if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } + got.State.StartedAt = expected.State.StartedAt + got.State.FinishedAt = expected.State.FinishedAt + got.Config = expected.Config + got.Created = expected.Created + got.NetworkSettings = expected.NetworkSettings + if !reflect.DeepEqual(got, *expected) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } +} + +func TestInspectContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/abc123/json", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("InspectContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestTopContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got docker.TopResult + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Titles, []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}) { + t.Fatalf("TopContainer: Unexpected titles, got: %#v", got.Titles) + } + if len(got.Processes) != 1 { + t.Fatalf("TopContainer: Unexpected process len, got: %d", len(got.Processes)) + } + if got.Processes[0][len(got.Processes[0])-1] != "ls -la .." { + t.Fatalf("TopContainer: Unexpected command name, got: %s", got.Processes[0][len(got.Processes[0])-1]) + } +} + +func TestTopContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/xyz/top", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestTopContainerStopped(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusInternalServerError { + t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) + } +} + +func TestStartContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + memory := int64(536870912) + hostConfig := docker.HostConfig{Memory: memory} + configBytes, err := json.Marshal(hostConfig) + if err != nil { + t.Fatal(err) + } + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, bytes.NewBuffer(configBytes)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if !server.containers[0].State.Running { + t.Error("StartContainer: did not set the container to running state") + } + if gotMemory := server.containers[0].HostConfig.Memory; gotMemory != memory { + t.Errorf("StartContainer: wrong HostConfig. Wants %d of memory. Got %d", memory, gotMemory) + } +} + +func TestStartContainerChangeNetwork(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + hostConfig := docker.HostConfig{ + PortBindings: map[docker.Port][]docker.PortBinding{ + "8888/tcp": {{HostIP: "", HostPort: "12345"}}, + }, + } + configBytes, err := json.Marshal(hostConfig) + if err != nil { + t.Fatal(err) + } + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, bytes.NewBuffer(configBytes)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if !server.containers[0].State.Running { + t.Error("StartContainer: did not set the container to running state") + } + portMapping := server.containers[0].NetworkSettings.Ports["8888/tcp"] + expected := []docker.PortBinding{{HostIP: "0.0.0.0", HostPort: "12345"}} + if !reflect.DeepEqual(portMapping, expected) { + t.Errorf("StartContainer: network not updated. Wants %#v ports. Got %#v", expected, portMapping) + } +} + +func TestStartContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.cChan = ch + addContainers(&server, 1) + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[1].ID) + request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("{}"))) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if notified := <-ch; notified != server.containers[1] { + t.Errorf("StartContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) + } +} + +func TestStartContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/start" + request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("null"))) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestStartContainerAlreadyRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("null"))) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotModified { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotModified, recorder.Code) + } +} + +func TestStopContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Running { + t.Error("StopContainer: did not stop the container") + } +} + +func TestKillContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/kill", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("KillContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Running { + t.Error("KillContainer: did not stop the container") + } +} + +func TestStopContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.cChan = ch + addContainers(&server, 1) + addContainers(&server, 1) + server.containers[1].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[1].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if notified := <-ch; notified != server.containers[1] { + t.Errorf("StopContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) + } +} + +func TestStopContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/stop" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestStopContainerNotRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestPauseContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if !server.containers[0].State.Paused { + t.Error("PauseContainer: did not pause the container") + } +} + +func TestPauseContainerAlreadyPaused(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Paused = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestPauseContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/pause" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestUnpauseContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Paused = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Paused { + t.Error("UnpauseContainer: did not unpause the container") + } +} + +func TestUnpauseContainerNotPaused(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestUnpauseContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/unpause" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestWaitContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + go func() { + server.cMut.Lock() + server.containers[0].State.Running = false + server.cMut.Unlock() + }() + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := `{"StatusCode":0}` + "\n" + if body := recorder.Body.String(); body != expected { + t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestWaitContainerStatus(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + server.containers[0].State.ExitCode = 63 + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := `{"StatusCode":63}` + "\n" + if body := recorder.Body.String(); body != expected { + t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestWaitContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/wait" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("WaitContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +type HijackableResponseRecorder struct { + httptest.ResponseRecorder + readCh chan []byte +} + +func (r *HijackableResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { + myConn, otherConn := net.Pipe() + r.readCh = make(chan []byte) + go func() { + data, _ := ioutil.ReadAll(myConn) + r.readCh <- data + }() + return otherConn, nil, nil +} + +func (r *HijackableResponseRecorder) HijackBuffer() string { + return string(<-r.readCh) +} + +func TestAttachContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := &HijackableResponseRecorder{} + path := fmt.Sprintf("/containers/%s/attach?logs=1", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + lines := []string{ + "\x01\x00\x00\x00\x00\x00\x00\x15Container is running", + "\x01\x00\x00\x00\x00\x00\x00\x0fWhat happened?", + "\x01\x00\x00\x00\x00\x00\x00\x13Something happened", + } + expected := strings.Join(lines, "\n") + "\n" + if body := recorder.HijackBuffer(); body != expected { + t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestAttachContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := &HijackableResponseRecorder{} + path := "/containers/abc123/attach?logs=1" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("AttachContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestAttachContainerWithStreamBlocks(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + path := fmt.Sprintf("/containers/%s/attach?logs=1&stdout=1&stream=1", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + done := make(chan string) + go func() { + recorder := &HijackableResponseRecorder{} + server.ServeHTTP(recorder, request) + done <- recorder.HijackBuffer() + }() + select { + case <-done: + t.Fatalf("attach stream returned before container is stopped") + case <-time.After(500 * time.Millisecond): + } + server.cMut.Lock() + server.containers[0].State.Running = false + server.cMut.Unlock() + var body string + select { + case body = <-done: + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for attach to finish") + } + lines := []string{ + "\x01\x00\x00\x00\x00\x00\x00\x15Container is running", + "\x01\x00\x00\x00\x00\x00\x00\x0fWhat happened?", + "\x01\x00\x00\x00\x00\x00\x00\x13Something happened", + } + expected := strings.Join(lines, "\n") + "\n" + if body != expected { + t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestRemoveContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.containers) > 0 { + t.Error("RemoveContainer: did not remove the container.") + } +} + +func TestRemoveContainerByName(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].Name) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.containers) > 0 { + t.Error("RemoveContainer: did not remove the container.") + } +} + +func TestRemoveContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/abc123") + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRemoveContainerRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusInternalServerError { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) + } + if len(server.containers) < 1 { + t.Error("RemoveContainer: should not remove the container.") + } +} + +func TestRemoveContainerRunningForce(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s?%s", server.containers[0].ID, "force=1") + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.containers) > 0 { + t.Error("RemoveContainer: did not remove the container.") + } +} + +func TestPullImage(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/create?fromImage=base", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if len(server.images) != 1 { + t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images)) + } + if _, ok := server.imgIDs["base"]; !ok { + t.Error("PullImage: Repository should not be empty.") + } +} + +func TestPullImageWithTag(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/create?fromImage=base&tag=tag", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if len(server.images) != 1 { + t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images)) + } + if _, ok := server.imgIDs["base:tag"]; !ok { + t.Error("PullImage: Repository should not be empty.") + } +} + +func TestPushImage(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestPushImageWithTag(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python:v1": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push?tag=v1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestPushImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestTagImage(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/tag?repo=tsuru/new-python", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + if server.imgIDs["tsuru/python"] != server.imgIDs["tsuru/new-python"] { + t.Errorf("TagImage: did not tag the image") + } +} + +func TestTagImageWithRepoAndTag(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/tag?repo=tsuru/new-python&tag=v1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + if server.imgIDs["tsuru/python"] != server.imgIDs["tsuru/new-python:v1"] { + t.Errorf("TagImage: did not tag the image") + } +} + +func TestTagImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/tag", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func addContainers(server *DockerServer, n int) { + server.cMut.Lock() + defer server.cMut.Unlock() + for i := 0; i < n; i++ { + date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) + container := docker.Container{ + Name: fmt.Sprintf("%x", rand.Int()%10000), + ID: fmt.Sprintf("%x", rand.Int()%10000), + Created: date, + Path: "ls", + Args: []string{"-la", ".."}, + Config: &docker.Config{ + Hostname: fmt.Sprintf("docker-%d", i), + AttachStdout: true, + AttachStderr: true, + Env: []string{"ME=you", fmt.Sprintf("NUMBER=%d", i)}, + Cmd: []string{"ls", "-la", ".."}, + Image: "base", + }, + State: docker.State{ + Running: false, + Pid: 400 + i, + ExitCode: 0, + StartedAt: date, + }, + Image: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + NetworkSettings: &docker.NetworkSettings{ + IPAddress: fmt.Sprintf("10.10.10.%d", i+2), + IPPrefixLen: 24, + Gateway: "10.10.10.1", + Bridge: "docker0", + PortMapping: map[string]docker.PortMapping{ + "Tcp": {"8888": fmt.Sprintf("%d", 49600+i)}, + }, + Ports: map[docker.Port][]docker.PortBinding{ + "8888/tcp": { + {HostIP: "0.0.0.0", HostPort: fmt.Sprintf("%d", 49600+i)}, + }, + }, + }, + ResolvConfPath: "/etc/resolv.conf", + } + server.containers = append(server.containers, &container) + } +} + +func addImages(server *DockerServer, n int, repo bool) { + server.iMut.Lock() + defer server.iMut.Unlock() + if server.imgIDs == nil { + server.imgIDs = make(map[string]string) + } + for i := 0; i < n; i++ { + date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) + image := docker.Image{ + ID: fmt.Sprintf("%x", rand.Int()%10000), + Created: date, + } + server.images = append(server.images, image) + if repo { + repo := "docker/python-" + image.ID + server.imgIDs[repo] = image.ID + } + } +} + +func TestListImages(t *testing.T) { + server := DockerServer{} + addImages(&server, 2, true) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/images/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListImages: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.APIImages, 2) + for i, image := range server.images { + expected[i] = docker.APIImages{ + ID: image.ID, + Created: image.Created.Unix(), + RepoTags: []string{"docker/python-" + image.ID}, + } + } + var got []docker.APIImages + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListImages. Want %#v. Got %#v.", expected, got) + } +} + +func TestRemoveImage(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, false) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/images/%s", server.images[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.images) > 0 { + t.Error("RemoveImage: did not remove the image.") + } +} + +func TestRemoveImageByName(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, true) + server.buildMuxer() + recorder := httptest.NewRecorder() + imgName := "docker/python-" + server.images[0].ID + path := "/images/" + imgName + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.images) > 0 { + t.Error("RemoveImage: did not remove the image.") + } + _, ok := server.imgIDs[imgName] + if ok { + t.Error("RemoveImage: did not remove image tag name.") + } +} + +func TestRemoveImageWithMultipleTags(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, true) + server.buildMuxer() + imgID := server.images[0].ID + imgName := "docker/python-" + imgID + server.imgIDs["docker/python-wat"] = imgID + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/images/%s", imgName) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + _, ok := server.imgIDs[imgName] + if ok { + t.Error("RemoveImage: did not remove image tag name.") + } + id, ok := server.imgIDs["docker/python-wat"] + if !ok { + t.Error("RemoveImage: removed the wrong tag name.") + } + if id != imgID { + t.Error("RemoveImage: disassociated the wrong ID from the tag") + } + if len(server.images) < 1 { + t.Fatal("RemoveImage: removed the image, but should keep it") + } + if server.images[0].ID != imgID { + t.Error("RemoveImage: changed the ID of the image!") + } +} + +func TestPrepareFailure(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + errorID := "my_error" + server.PrepareFailure(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + if recorder.Body.String() != errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } +} + +func TestPrepareMultiFailures(t *testing.T) { + server := DockerServer{multiFailures: []map[string]string{}} + server.buildMuxer() + errorID := "multi error" + server.PrepareMultiFailures(errorID, "containers/json") + server.PrepareMultiFailures(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + if recorder.Body.String() != errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } + recorder = httptest.NewRecorder() + request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + if recorder.Body.String() != errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } + recorder = httptest.NewRecorder() + request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if recorder.Body.String() == errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } +} + +func TestRemoveFailure(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + errorID := "my_error" + server.PrepareFailure(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + server.ResetFailure(errorID) + recorder = httptest.NewRecorder() + request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("RemoveFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestResetMultiFailures(t *testing.T) { + server := DockerServer{multiFailures: []map[string]string{}} + server.buildMuxer() + errorID := "multi error" + server.PrepareMultiFailures(errorID, "containers/json") + server.PrepareMultiFailures(errorID, "containers/json") + if len(server.multiFailures) != 2 { + t.Errorf("PrepareMultiFailures: error adding multi failures.") + } + server.ResetMultiFailures() + if len(server.multiFailures) != 0 { + t.Errorf("ResetMultiFailures: error reseting multi failures.") + } +} + +func TestMutateContainer(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + server.containers = append(server.containers, &docker.Container{ID: "id123"}) + state := docker.State{Running: false, ExitCode: 1} + err := server.MutateContainer("id123", state) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(server.containers[0].State, state) { + t.Errorf("Wrong state after mutation.\nWant %#v.\nGot %#v.", + state, server.containers[0].State) + } +} + +func TestMutateContainerNotFound(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + state := docker.State{Running: false, ExitCode: 1} + err := server.MutateContainer("id123", state) + if err == nil { + t.Error("Unexpected error") + } + if err.Error() != "container not found" { + t.Errorf("wrong error message. Want %q. Got %q.", "container not found", err) + } +} + +func TestBuildImageWithContentTypeTar(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + imageName := "teste" + recorder := httptest.NewRecorder() + tarFile, err := os.Open("data/dockerfile.tar") + if err != nil { + t.Fatal(err) + } + defer tarFile.Close() + request, _ := http.NewRequest("POST", "/build?t=teste", tarFile) + request.Header.Add("Content-Type", "application/tar") + server.buildImage(recorder, request) + if recorder.Body.String() == "miss Dockerfile" { + t.Errorf("BuildImage: miss Dockerfile") + return + } + if _, ok := server.imgIDs[imageName]; ok == false { + t.Errorf("BuildImage: image %s not builded", imageName) + } +} + +func TestBuildImageWithRemoteDockerfile(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + imageName := "teste" + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/build?t=teste&remote=http://localhost/Dockerfile", nil) + server.buildImage(recorder, request) + if _, ok := server.imgIDs[imageName]; ok == false { + t.Errorf("BuildImage: image %s not builded", imageName) + } +} + +func TestPing(t *testing.T) { + server := DockerServer{} + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/_ping", nil) + server.pingDocker(recorder, request) + if recorder.Body.String() != "" { + t.Errorf("Ping: Unexpected body: %s", recorder.Body.String()) + } + if recorder.Code != http.StatusOK { + t.Errorf("Ping: Expected code %d, got: %d", http.StatusOK, recorder.Code) + } +} + +func TestDefaultHandler(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.listener.Close() + if server.mux != server.DefaultHandler() { + t.Fatalf("DefaultHandler: Expected to return server.mux, got: %#v", server.DefaultHandler()) + } +} + +func TestCreateExecContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + serverExec := server.execs[0] + var got docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if got.ID != serverExec.ID { + t.Errorf("CreateExec: wrong value. Want %#v. Got %#v.", serverExec.ID, got.ID) + } + + expected := docker.ExecInspect{ + ID: got.ID, + ProcessConfig: docker.ExecProcessConfig{ + EntryPoint: "bash", + Arguments: []string{"-c", "ls"}, + }, + Container: *server.containers[0], + } + + if !reflect.DeepEqual(*serverExec, expected) { + t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, *serverExec) + } +} + +func TestInspectExecContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + path = fmt.Sprintf("/exec/%s/json", got.ID) + request, _ = http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got2 docker.ExecInspect + err = json.NewDecoder(recorder.Body).Decode(&got2) + if err != nil { + t.Fatal(err) + } + expected := docker.ExecInspect{ + ID: got.ID, + ProcessConfig: docker.ExecProcessConfig{ + EntryPoint: "bash", + Arguments: []string{"-c", "ls"}, + }, + Container: *server.containers[0], + } + got2.Container.State.StartedAt = expected.Container.State.StartedAt + got2.Container.State.FinishedAt = expected.Container.State.FinishedAt + got2.Container.Config = expected.Container.Config + got2.Container.Created = expected.Container.Created + got2.Container.NetworkSettings = expected.Container.NetworkSettings + got2.Container.ExecIDs = expected.Container.ExecIDs + + if !reflect.DeepEqual(got2, expected) { + t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, got2) + } +} + +func TestStartExecContainer(t *testing.T) { + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var exec docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&exec) + if err != nil { + t.Fatal(err) + } + unleash := make(chan bool) + server.PrepareExec(exec.ID, func() { + <-unleash + }) + codes := make(chan int, 1) + sent := make(chan bool) + go func() { + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/exec/%s/start", exec.ID) + body := `{"Tty":true}` + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + close(sent) + server.ServeHTTP(recorder, request) + codes <- recorder.Code + }() + <-sent + execInfo, err := waitExec(server.URL(), exec.ID, true, 5) + if err != nil { + t.Fatal(err) + } + if !execInfo.Running { + t.Error("StartExec: expected exec to be running, but it's not running") + } + close(unleash) + if code := <-codes; code != http.StatusOK { + t.Errorf("StartExec: wrong status. Want %d. Got %d.", http.StatusOK, code) + } + execInfo, err = waitExec(server.URL(), exec.ID, false, 5) + if err != nil { + t.Fatal(err) + } + if execInfo.Running { + t.Error("StartExec: expected exec to be not running after start returns, but it's running") + } +} + +func TestStartExecContainerWildcardCallback(t *testing.T) { + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + unleash := make(chan bool) + server.PrepareExec("*", func() { + <-unleash + }) + var exec docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&exec) + if err != nil { + t.Fatal(err) + } + codes := make(chan int, 1) + sent := make(chan bool) + go func() { + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/exec/%s/start", exec.ID) + body := `{"Tty":true}` + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + close(sent) + server.ServeHTTP(recorder, request) + codes <- recorder.Code + }() + <-sent + execInfo, err := waitExec(server.URL(), exec.ID, true, 5) + if err != nil { + t.Fatal(err) + } + if !execInfo.Running { + t.Error("StartExec: expected exec to be running, but it's not running") + } + close(unleash) + if code := <-codes; code != http.StatusOK { + t.Errorf("StartExec: wrong status. Want %d. Got %d.", http.StatusOK, code) + } + execInfo, err = waitExec(server.URL(), exec.ID, false, 5) + if err != nil { + t.Fatal(err) + } + if execInfo.Running { + t.Error("StartExec: expected exec to be not running after start returns, but it's running") + } +} + +func TestStartExecContainerNotFound(t *testing.T) { + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Tty":true}` + request, _ := http.NewRequest("POST", "/exec/something-wat/start", strings.NewReader(body)) + server.ServeHTTP(recorder, request) +} + +func waitExec(url, execID string, running bool, maxTry int) (*docker.ExecInspect, error) { + client, err := docker.NewClient(url) + if err != nil { + return nil, err + } + exec, err := client.InspectExec(execID) + for i := 0; i < maxTry && exec.Running != running && err == nil; i++ { + time.Sleep(100e6) + exec, err = client.InspectExec(exec.ID) + } + return exec, err +} + +func TestStatsContainer(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.Stop() + addContainers(server, 2) + server.buildMuxer() + expected := docker.Stats{} + expected.CPUStats.CPUUsage.TotalUsage = 20 + server.PrepareStats(server.containers[0].ID, func(id string) docker.Stats { + return expected + }) + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stats?stream=false", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StatsContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + body := recorder.Body.Bytes() + var got docker.Stats + err = json.Unmarshal(body, &got) + if err != nil { + t.Fatal(err) + } + got.Read = time.Time{} + if !reflect.DeepEqual(got, expected) { + t.Errorf("StatsContainer: wrong value. Want %#v. Got %#v.", expected, got) + } +} + +type safeWriter struct { + sync.Mutex + *httptest.ResponseRecorder +} + +func (w *safeWriter) Write(buf []byte) (int, error) { + w.Lock() + defer w.Unlock() + return w.ResponseRecorder.Write(buf) +} + +func TestStatsContainerStream(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.Stop() + addContainers(server, 2) + server.buildMuxer() + expected := docker.Stats{} + expected.CPUStats.CPUUsage.TotalUsage = 20 + server.PrepareStats(server.containers[0].ID, func(id string) docker.Stats { + time.Sleep(50 * time.Millisecond) + return expected + }) + recorder := &safeWriter{ + ResponseRecorder: httptest.NewRecorder(), + } + path := fmt.Sprintf("/containers/%s/stats?stream=true", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + go func() { + server.ServeHTTP(recorder, request) + }() + time.Sleep(200 * time.Millisecond) + recorder.Lock() + defer recorder.Unlock() + body := recorder.Body.Bytes() + parts := bytes.Split(body, []byte("\n")) + if len(parts) < 2 { + t.Errorf("StatsContainer: wrong number of parts. Want at least 2. Got %#v.", len(parts)) + } + var got docker.Stats + err = json.Unmarshal(parts[0], &got) + if err != nil { + t.Fatal(err) + } + got.Read = time.Time{} + if !reflect.DeepEqual(got, expected) { + t.Errorf("StatsContainer: wrong value. Want %#v. Got %#v.", expected, got) + } +} + +func addNetworks(server *DockerServer, n int) { + server.netMut.Lock() + defer server.netMut.Unlock() + for i := 0; i < n; i++ { + netid := fmt.Sprintf("%x", rand.Int()%10000) + network := docker.Network{ + Name: netid, + ID: fmt.Sprintf("%x", rand.Int()%10000), + Driver: "bridge", + Containers: map[string]docker.Endpoint{ + "blah": { + Name: "blah", + ID: fmt.Sprintf("%x", rand.Int()%10000), + }, + }, + } + server.networks = append(server.networks, &network) + } +} + +func TestListNetworks(t *testing.T) { + server := DockerServer{} + addNetworks(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/networks", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListNetworks: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.Network, 2) + for i, network := range server.networks { + expected[i] = docker.Network{ + ID: network.ID, + Name: network.Name, + Driver: network.Driver, + Containers: network.Containers, + } + } + var got []docker.Network + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListNetworks. Want %#v. Got %#v.", expected, got) + } +} + +type createNetworkResponse struct { + ID string `json:"ID"` +} + +func TestCreateNetwork(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + netid := fmt.Sprintf("%x", rand.Int()%10000) + netname := fmt.Sprintf("%x", rand.Int()%10000) + body := fmt.Sprintf(`{"ID": "%s", "Name": "%s", "Type": "bridge" }`, netid, netname) + request, _ := http.NewRequest("POST", "/networks", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + + var returned createNetworkResponse + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Fatal(err) + } + stored := server.networks[0] + if returned.ID != stored.ID { + t.Errorf("CreateNetwork: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned) + } +} + +func TestCreateNetworkInvalidBody(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/networks", strings.NewReader("whaaaaaat---")) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCreateNetworkDuplicateName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + addNetworks(&server, 1) + server.networks[0].Name = "mynetwork" + recorder := httptest.NewRecorder() + body := fmt.Sprintf(`{"ID": "%s", "Name": "mynetwork", "Type": "bridge" }`, fmt.Sprintf("%x", rand.Int()%10000)) + request, _ := http.NewRequest("POST", "/networks", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusForbidden { + t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusForbidden, recorder.Code) + } +} + +func TestListVolumes(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + expected := []docker.Volume{{ + Name: "test-vol-1", + Driver: "local", + Mountpoint: "/var/lib/docker/volumes/test-vol-1", + }} + server.volStore = make(map[string]*volumeCounter) + for _, vol := range expected { + server.volStore[vol.Name] = &volumeCounter{ + volume: vol, + count: 0, + } + } + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/volumes", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListVolumes: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var got []docker.Volume + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListVolumes. Want %#v. Got %#v.", expected, got) + } +} + +func TestCreateVolume(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Name":"test-volume"}` + request, _ := http.NewRequest("POST", "/volumes/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateVolume: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var returned docker.Volume + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Error(err) + } + if returned.Name != "test-volume" { + t.Errorf("CreateVolume: Name mismatch. Expected: test-volume. Returned %q.", returned.Name) + } + if returned.Driver != "local" { + t.Errorf("CreateVolume: Driver mismatch. Expected: local. Returned: %q", returned.Driver) + } + if returned.Mountpoint != "/var/lib/docker/volumes/test-volume" { + t.Errorf("CreateVolume: Mountpoint mismatch. Expected: /var/lib/docker/volumes/test-volume. Returned: %q.", returned.Mountpoint) + } +} + +func TestCreateVolumeAlreadExists(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + server.volStore = make(map[string]*volumeCounter) + server.volStore["test-volume"] = &volumeCounter{ + volume: docker.Volume{ + Name: "test-volume", + Driver: "local", + Mountpoint: "/var/lib/docker/volumes/test-volume", + }, + count: 0, + } + body := `{"Name":"test-volume"}` + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/volumes/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateVolumeAlreadExists: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var returned docker.Volume + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Error(err) + } + if returned.Name != "test-volume" { + t.Errorf("CreateVolumeAlreadExists: Name mismatch. Expected: test-volume. Returned %q.", returned.Name) + } + if returned.Driver != "local" { + t.Errorf("CreateVolumeAlreadExists: Driver mismatch. Expected: local. Returned: %q", returned.Driver) + } + if returned.Mountpoint != "/var/lib/docker/volumes/test-volume" { + t.Errorf("CreateVolumeAlreadExists: Mountpoint mismatch. Expected: /var/lib/docker/volumes/test-volume. Returned: %q.", returned.Mountpoint) + } +} + +func TestInspectVolume(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + expected := docker.Volume{ + Name: "test-volume", + Driver: "local", + Mountpoint: "/var/lib/docker/volumes/test-volume", + } + volC := &volumeCounter{ + volume: expected, + count: 0, + } + volStore := make(map[string]*volumeCounter) + volStore["test-volume"] = volC + server.volStore = volStore + request, _ := http.NewRequest("GET", "/volumes/test-volume", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("InspectVolume: wrong status. Want %d. God %d.", http.StatusOK, recorder.Code) + } + var returned docker.Volume + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Error(err) + } + if returned.Name != "test-volume" { + t.Errorf("InspectVolume: Name mismatch. Expected: test-volume. Returned %q.", returned.Name) + } + if returned.Driver != "local" { + t.Errorf("InspectVolume: Driver mismatch. Expected: local. Returned: %q", returned.Driver) + } + if returned.Mountpoint != "/var/lib/docker/volumes/test-volume" { + t.Errorf("InspectVolume: Mountpoint mismatch. Expected: /var/lib/docker/volumes/test-volume. Returned: %q.", returned.Mountpoint) + } +} + +func TestInspectVolumeNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/volumes/test-volume", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("RemoveMissingVolume: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRemoveVolume(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + server.volStore = make(map[string]*volumeCounter) + server.volStore["test-volume"] = &volumeCounter{ + volume: docker.Volume{ + Name: "test-volume", + Driver: "local", + Mountpoint: "/var/lib/docker/volumes/test-volume", + }, + count: 0, + } + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("DELETE", "/volumes/test-volume", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveVolume: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } +} + +func TestRemoveMissingVolume(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("DELETE", "/volumes/test-volume", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("RemoveMissingVolume: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRemoveVolumeInuse(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + server.volStore = make(map[string]*volumeCounter) + server.volStore["test-volume"] = &volumeCounter{ + volume: docker.Volume{ + Name: "test-volume", + Driver: "local", + Mountpoint: "/var/lib/docker/volumes/test-volume", + }, + count: 1, + } + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("DELETE", "/volumes/test-volume", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusConflict { + t.Errorf("RemoveVolume: wrong status. Want %d. Got %d.", http.StatusConflict, recorder.Code) + } +} + +func TestUploadToContainer(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + cont := &docker.Container{ + ID: "id123", + State: docker.State{ + Running: true, + ExitCode: 0, + }, + } + server.containers = append(server.containers, cont) + server.uploadedFiles = make(map[string]string) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("PUT", fmt.Sprintf("/containers/%s/archive?path=abcd", cont.ID), nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("UploadToContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestUploadToContainerMissingContainer(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("PUT", "/containers/missing-container/archive?path=abcd", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("UploadToContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go new file mode 100644 index 000000000..55f43174b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/tls.go @@ -0,0 +1,96 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// The content is borrowed from Docker's own source code to provide a simple +// tls based dialer + +package docker + +import ( + "crypto/tls" + "errors" + "net" + "strings" + "time" +) + +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go new file mode 100644 index 000000000..0e57cb122 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/volume.go @@ -0,0 +1,127 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "net/http" +) + +var ( + // ErrNoSuchVolume is the error returned when the volume does not exist. + ErrNoSuchVolume = errors.New("no such volume") + + // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. + ErrVolumeInUse = errors.New("volume in use and cannot be removed") +) + +// Volume represents a volume. +// +// See https://goo.gl/FZA4BK for more details. +type Volume struct { + Name string `json:"Name" yaml:"Name"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` + Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"` +} + +// ListVolumesOptions specify parameters to the ListVolumes function. +// +// See https://goo.gl/FZA4BK for more details. +type ListVolumesOptions struct { + Filters map[string][]string +} + +// ListVolumes returns a list of available volumes in the server. +// +// See https://goo.gl/FZA4BK for more details. +func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { + resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + m := make(map[string]interface{}) + if err := json.NewDecoder(resp.Body).Decode(&m); err != nil { + return nil, err + } + var volumes []Volume + volumesJSON, ok := m["Volumes"] + if !ok { + return volumes, nil + } + data, err := json.Marshal(volumesJSON) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &volumes); err != nil { + return nil, err + } + return volumes, nil +} + +// CreateVolumeOptions specify parameters to the CreateVolume function. +// +// See https://goo.gl/pBUbZ9 for more details. +type CreateVolumeOptions struct { + Name string + Driver string + DriverOpts map[string]string +} + +// CreateVolume creates a volume on the server. +// +// See https://goo.gl/pBUbZ9 for more details. +func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { + resp, err := c.do("POST", "/volumes/create", doOptions{data: opts}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var volume Volume + if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { + return nil, err + } + return &volume, nil +} + +// InspectVolume returns a volume by its name. +// +// See https://goo.gl/0g9A6i for more details. +func (c *Client) InspectVolume(name string) (*Volume, error) { + resp, err := c.do("GET", "/volumes/"+name, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, ErrNoSuchVolume + } + return nil, err + } + defer resp.Body.Close() + var volume Volume + if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { + return nil, err + } + return &volume, nil +} + +// RemoveVolume removes a volume by its name. +// +// See https://goo.gl/79GNQz for more details. +func (c *Client) RemoveVolume(name string) error { + resp, err := c.do("DELETE", "/volumes/"+name, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok { + if e.Status == http.StatusNotFound { + return ErrNoSuchVolume + } + if e.Status == http.StatusConflict { + return ErrVolumeInUse + } + } + return nil + } + defer resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/volume_test.go b/vendor/github.com/fsouza/go-dockerclient/volume_test.go new file mode 100644 index 000000000..e6bcca95f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/volume_test.go @@ -0,0 +1,142 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "net/http" + "net/url" + "reflect" + "testing" +) + +func TestListVolumes(t *testing.T) { + volumesData := `[ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + }, + { + "Name": "foo", + "Driver": "bar", + "Mountpoint": "/var/lib/docker/volumes/bar" + } +]` + body := `{ "Volumes": ` + volumesData + ` }` + var expected []Volume + if err := json.Unmarshal([]byte(volumesData), &expected); err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + volumes, err := client.ListVolumes(ListVolumesOptions{}) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(volumes, expected) { + t.Errorf("ListVolumes: Wrong return value. Want %#v. Got %#v.", expected, volumes) + } +} + +func TestCreateVolume(t *testing.T) { + body := `{ + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + }` + var expected Volume + if err := json.Unmarshal([]byte(body), &expected); err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(fakeRT) + volume, err := client.CreateVolume( + CreateVolumeOptions{ + Name: "tardis", + Driver: "local", + DriverOpts: map[string]string{ + "foo": "bar", + }, + }, + ) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(volume, &expected) { + t.Errorf("CreateVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) + } + req := fakeRT.requests[0] + expectedMethod := "POST" + if req.Method != expectedMethod { + t.Errorf("CreateVolume(): Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/volumes/create")) + if req.URL.Path != u.Path { + t.Errorf("CreateVolume(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestInspectVolume(t *testing.T) { + body := `{ + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + }` + var expected Volume + if err := json.Unmarshal([]byte(body), &expected); err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(fakeRT) + name := "tardis" + volume, err := client.InspectVolume(name) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(volume, &expected) { + t.Errorf("InspectVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) + } + req := fakeRT.requests[0] + expectedMethod := "GET" + if req.Method != expectedMethod { + t.Errorf("InspectVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/volumes/" + name)) + if req.URL.Path != u.Path { + t.Errorf("CreateVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } +} + +func TestRemoveVolume(t *testing.T) { + name := "test" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + if err := client.RemoveVolume(name); err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/volumes/" + name)) + if req.URL.Path != u.Path { + t.Errorf("RemoveVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } +} + +func TestRemoveVolumeNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such volume", status: http.StatusNotFound}) + if err := client.RemoveVolume("test:"); err != ErrNoSuchVolume { + t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrNoSuchVolume, err) + } +} + +func TestRemoveVolumeInUse(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "volume in use and cannot be removed", status: http.StatusConflict}) + if err := client.RemoveVolume("test:"); err != ErrVolumeInUse { + t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrVolumeInUse, err) + } +} diff --git a/vendor/github.com/go-chef/chef/.gitignore b/vendor/github.com/go-chef/chef/.gitignore new file mode 100644 index 000000000..3aeb2e2a1 --- /dev/null +++ b/vendor/github.com/go-chef/chef/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +coverage +coverage.* +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/go-chef/chef/LICENSE b/vendor/github.com/go-chef/chef/LICENSE new file mode 100644 index 000000000..ad410e113 --- /dev/null +++ b/vendor/github.com/go-chef/chef/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/go-chef/chef/README.md b/vendor/github.com/go-chef/chef/README.md new file mode 100644 index 000000000..6ac731a97 --- /dev/null +++ b/vendor/github.com/go-chef/chef/README.md @@ -0,0 +1,95 @@ +[![Stories in Ready](https://badge.waffle.io/go-chef/chef.png?label=ready&title=Ready)](https://waffle.io/go-chef/chef) +[![Build Status](https://app.wercker.com/status/9cfd4b53ea24e0894904067f283e4cf8/s "wercker status")](https://app.wercker.com/project/bykey/9cfd4b53ea24e0894904067f283e4cf8) +[![Coverage Status](https://coveralls.io/repos/go-chef/chef/badge.png?branch=master)](https://coveralls.io/r/go-chef/chef?branch=master) + +# Chef Server API Client Library in Golang +This is a Library that you can use to write tools to interact with the chef server. + +## Install + + go get github.com/go-chef/chef + +## Test + + go get -t github.com/go-chef/chef + go test -v github.com/go-chef/chef + +## SSL + + If you run into an SSL verification problem when trying to connect to a ssl server with self signed certs setup your config object with `SkipSSL: true` + +## Usage +This example is setting up a basic client that you can use to interact with all the service endpoints (clients, nodes, cookbooks, etc.) +More usage examples can be found in the [examples](examples) directory. + + package main + + import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + + "github.com/go-chef/chef" + ) + + func main() { + // read a client key + key, err := ioutil.ReadFile("key.pem") + if err != nil { + fmt.Println("Couldn't read key.pem:", err) + os.Exit(1) + } + + // build a client + client, err := chef.NewClient(&chef.Config{ + Name: "foo", + Key: string(key), + // goiardi is on port 4545 by default. chef-zero is 8889 + BaseURL: "http://localhost:4545", + }) + if err != nil { + fmt.Println("Issue setting up client:", err) + } + + // List Cookbooks + cookList, err := client.Cookbooks.List() + if err != nil { + fmt.Println("Issue listing cookbooks:", err) + } + + // Print out the list + fmt.Println(cookList) + } + +## CONTRIBUTING + +If you feel like contributing, great! Just fork the repo, make your +improvements, and submit a pull request. Tests would, of course, be appreciated. +Adding tests where there are no tests currently would be even more appreciated. +At least, though, try and not break anything worse than it is. Test coverage has +improved, but is still an ongoing concern. + +## AUTHORS + +| | | +|:--------------|:-----------------------------------------------| +|Jesse Nelson |[@spheromak](https://github.com/spheromak) +|AJ Christensen |[@fujin](https://github.com/fujin) +|Brad Beam |[@bradbeam](https://github.com/bradbeam) +|Kraig Amador |[@bigkraig](https://github.com/bigkraig) + +## COPYRIGHT + +Copyright 2013-2014, Jesse Nelson + +## LICENSE + +Like many Chef ecosystem programs, go-chef/chef is licensed under the Apache 2.0 +License. See the LICENSE file for details. + +Chef is copyright (c) 2008-2014 Chef, Inc. and its various contributors. + +Thanks go out to the fine folks of Opscode and the Chef community for all their +hard work. diff --git a/vendor/github.com/go-chef/chef/acl.go b/vendor/github.com/go-chef/chef/acl.go new file mode 100644 index 000000000..69e2de139 --- /dev/null +++ b/vendor/github.com/go-chef/chef/acl.go @@ -0,0 +1,52 @@ +package chef + +import "fmt" + +type ACLService struct { + client *Client +} + +// ACL represents the native Go version of the deserialized ACL type +type ACL map[string]ACLitems + +// ACLitems +type ACLitems struct { + Groups ACLitem `json:"groups"` + Actors ACLitem `json:"actors"` +} + +// ACLitem +type ACLitem []string + +func NewACL(acltype string, actors, groups ACLitem) (acl *ACL) { + acl = &ACL{ + acltype: ACLitems{ + Actors: actors, + Groups: groups, + }, + } + return +} + +// Get gets an ACL from the Chef server. +// +// Chef API docs: lol +func (a *ACLService) Get(subkind string, name string) (acl ACL, err error) { + url := fmt.Sprintf("%s/%s/_acl", subkind, name) + err = a.client.magicRequestDecoder("GET", url, nil, &acl) + return +} + +// Put updates an ACL on the Chef server. +// +// Chef API docs: rofl +func (a *ACLService) Put(subkind, name string, acltype string, item *ACL) (err error) { + url := fmt.Sprintf("%s/%s/_acl/%s", subkind, name, acltype) + body, err := JSONReader(item) + if err != nil { + return + } + + err = a.client.magicRequestDecoder("PUT", url, body, nil) + return +} diff --git a/vendor/github.com/go-chef/chef/acl_test.go b/vendor/github.com/go-chef/chef/acl_test.go new file mode 100644 index 000000000..0924e43ea --- /dev/null +++ b/vendor/github.com/go-chef/chef/acl_test.go @@ -0,0 +1,102 @@ +package chef + +import ( + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestACLService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/nodes/hostname/_acl", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "create": { + "actors": [ + "hostname", + "pivotal" + ], + "groups": [ + "clients", + "users", + "admins" + ] + }, + "read": { + "actors": [ + "hostname", + "pivotal" + ], + "groups": [ + "clients", + "users", + "admins" + ] + }, + "update": { + "actors": [ + "hostname", + "pivotal" + ], + "groups": [ + "users", + "admins" + ] + }, + "delete": { + "actors": [ + "hostname", + "pivotal" + ], + "groups": [ + "users", + "admins" + ] + }, + "grant": { + "actors": [ + "hostname", + "pivotal" + ], + "groups": [ + "admins" + ] + } + } + `) + }) + + acl, err := client.ACLs.Get("nodes", "hostname") + if err != nil { + t.Errorf("ACL.Get returned error: %v", err) + } + + want := ACL{ + "create": ACLitems{Groups: []string{"clients", "users", "admins"}, Actors: []string{"hostname", "pivotal"}}, + "read": ACLitems{Groups: []string{"clients", "users", "admins"}, Actors: []string{"hostname", "pivotal"}}, + "update": ACLitems{Groups: []string{"users", "admins"}, Actors: []string{"hostname", "pivotal"}}, + "delete": ACLitems{Groups: []string{"users", "admins"}, Actors: []string{"hostname", "pivotal"}}, + "grant": ACLitems{Groups: []string{"admins"}, Actors: []string{"hostname", "pivotal"}}, + } + + if !reflect.DeepEqual(acl, want) { + t.Errorf("ACL.Get returned %+v, want %+v", acl, want) + } +} + +func TestACLService_Put(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/nodes/hostname/_acl/create", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ``) + }) + + acl := NewACL("create", []string{"pivotal"}, []string{"admins"}) + err := client.ACLs.Put("nodes", "hostname", "create", acl) + if err != nil { + t.Errorf("ACL.Put returned error: %v", err) + } +} diff --git a/vendor/github.com/go-chef/chef/authentication.go b/vendor/github.com/go-chef/chef/authentication.go new file mode 100644 index 000000000..2005e1980 --- /dev/null +++ b/vendor/github.com/go-chef/chef/authentication.go @@ -0,0 +1,122 @@ +package chef + +import ( + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "errors" + "io" + "math/big" +) + +// GenerateSignature will generate a signature ( sign ) the given data +func GenerateSignature(priv *rsa.PrivateKey, data string) (enc []byte, err error) { + sig, err := privateEncrypt(priv, []byte(data)) + if err != nil { + return nil, err + } + + return sig, nil +} + +// privateEncrypt implements OpenSSL's RSA_private_encrypt function +func privateEncrypt(key *rsa.PrivateKey, data []byte) (enc []byte, err error) { + k := (key.N.BitLen() + 7) / 8 + tLen := len(data) + + // rfc2313, section 8: + // The length of the data D shall not be more than k-11 octets + if tLen > k-11 { + err = errors.New("Data too long") + return + } + em := make([]byte, k) + em[1] = 1 + for i := 2; i < k-tLen-1; i++ { + em[i] = 0xff + } + copy(em[k-tLen:k], data) + c := new(big.Int).SetBytes(em) + if c.Cmp(key.N) > 0 { + err = nil + return + } + var m *big.Int + var ir *big.Int + if key.Precomputed.Dp == nil { + m = new(big.Int).Exp(c, key.D, key.N) + } else { + // We have the precalculated values needed for the CRT. + m = new(big.Int).Exp(c, key.Precomputed.Dp, key.Primes[0]) + m2 := new(big.Int).Exp(c, key.Precomputed.Dq, key.Primes[1]) + m.Sub(m, m2) + if m.Sign() < 0 { + m.Add(m, key.Primes[0]) + } + m.Mul(m, key.Precomputed.Qinv) + m.Mod(m, key.Primes[0]) + m.Mul(m, key.Primes[1]) + m.Add(m, m2) + + for i, values := range key.Precomputed.CRTValues { + prime := key.Primes[2+i] + m2.Exp(c, values.Exp, prime) + m2.Sub(m2, m) + m2.Mul(m2, values.Coeff) + m2.Mod(m2, prime) + if m2.Sign() < 0 { + m2.Add(m2, prime) + } + m2.Mul(m2, values.R) + m.Add(m, m2) + } + } + + if ir != nil { + // Unblind. + m.Mul(m, ir) + m.Mod(m, key.N) + } + enc = m.Bytes() + return +} + +// HashStr returns the base64 encoded SHA1 sum of the toHash string +func HashStr(toHash string) string { + h := sha1.New() + io.WriteString(h, toHash) + hashed := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return hashed +} + +// Base64BlockEncode takes a byte slice and breaks it up into a +// slice of base64 encoded strings +func Base64BlockEncode(content []byte, limit int) []string { + resultString := base64.StdEncoding.EncodeToString(content) + var resultSlice []string + + index := 0 + + var maxLengthPerSlice int + + // No limit + if limit == 0 { + maxLengthPerSlice = len(resultString) + } else { + maxLengthPerSlice = limit + } + + // Iterate through the encoded string storing + // a max of per slice item + for i := 0; i < len(resultString)/maxLengthPerSlice; i++ { + resultSlice = append(resultSlice, resultString[index:index+maxLengthPerSlice]) + index += maxLengthPerSlice + } + + // Add remaining chunk to the end of the slice + if len(resultString)%maxLengthPerSlice != 0 { + resultSlice = append(resultSlice, resultString[index:]) + } + + return resultSlice +} diff --git a/vendor/github.com/go-chef/chef/build.sh b/vendor/github.com/go-chef/chef/build.sh new file mode 100644 index 000000000..0ad18b2df --- /dev/null +++ b/vendor/github.com/go-chef/chef/build.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +# fixes build/test problems +if [ ! -d $GOPATH/src/github.com/go-chef/chef ]; then + mkdir -p $GOPATH/src/github.com/go-chef + ln -s ./ $GOPATH/src/github.com/go-chef/chef +fi + +set -ex + +# Grab dependencies for coveralls.io integration +go get -u github.com/axw/gocov/gocov +go get -u github.com/mattn/goveralls +go get -u github.com/ctdk/goiardi/chefcrypto +go get -u github.com/ctdk/goiardi/authentication +go get -u github.com/davecgh/go-spew/spew +go get -u github.com/smartystreets/goconvey/convey + +# go test -coverprofile=coverage dependency +go get -u golang.org/x/tools/cmd/cover + +# Overwrite the coverage file +go test -coverprofile=coverage + +# Goveralls +go tool cover -func=coverage diff --git a/vendor/github.com/go-chef/chef/client.go b/vendor/github.com/go-chef/chef/client.go new file mode 100644 index 000000000..7c6fc9443 --- /dev/null +++ b/vendor/github.com/go-chef/chef/client.go @@ -0,0 +1,88 @@ +package chef + +import "fmt" + +type ApiClientService struct { + client *Client +} + +// Client represents the native Go version of the deserialized Client type +type ApiClient struct { + Name string `json:"name"` + ClientName string `json:"clientname"` + OrgName string `json:"orgname"` + Admin bool `json:"admin"` + Validator bool `json:"validator"` + Certificate string `json:"certificate,omitempty"` + PublicKey string `json:"public_key,omitempty"` + PrivateKey string `json:"private_key,omitempty"` + Uri string `json:"uri,omitempty"` + JsonClass string `json:"json_class"` + ChefType string `json:"chef_type"` +} + +type ApiNewClient struct { + Name string `json:"name"` + Admin bool `json:"admin"` +} + +type ApiClientCreateResult struct { + Uri string `json:"uri,omitempty"` + PrivateKey string `json:"private_key,omitempty"` +} + +type ApiClientListResult map[string]string + +// String makes ApiClientListResult implement the string result +func (c ApiClientListResult) String() (out string) { + for k, v := range c { + out += fmt.Sprintf("%s => %s\n", k, v) + } + return out +} + +// List lists the clients in the Chef server. +// +// Chef API docs: https://docs.chef.io/api_chef_server.html#id13 +func (e *ApiClientService) List() (data ApiClientListResult, err error) { + err = e.client.magicRequestDecoder("GET", "clients", nil, &data) + return +} + +// Get gets a client from the Chef server. +// +// Chef API docs: https://docs.chef.io/api_chef_server.html#id16 +func (e *ApiClientService) Get(name string) (client ApiClient, err error) { + url := fmt.Sprintf("clients/%s", name) + err = e.client.magicRequestDecoder("GET", url, nil, &client) + return +} + +// Create makes a Client on the chef server +// +// Chef API docs: https://docs.chef.io/api_chef_server.html#id14 +func (e *ApiClientService) Create(clientName string, admin bool) (data *ApiClientCreateResult, err error) { + post := ApiNewClient{ + Name: clientName, + Admin: admin, + } + body, err := JSONReader(post) + if err != nil { + return + } + + err = e.client.magicRequestDecoder("POST", "clients", body, &data) + return +} + +// Put updates a client on the Chef server. +// +// Chef API docs: https://docs.chef.io/api_chef_server.html#id17 + +// Delete removes a client on the Chef server +// +// Chef API docs: https://docs.chef.io/api_chef_server.html#id15 +func (e *ApiClientService) Delete(name string) (err error) { + err = e.client.magicRequestDecoder("DELETE", "clients/"+name, nil, nil) + return +} diff --git a/vendor/github.com/go-chef/chef/client_test.go b/vendor/github.com/go-chef/chef/client_test.go new file mode 100644 index 000000000..ea6174426 --- /dev/null +++ b/vendor/github.com/go-chef/chef/client_test.go @@ -0,0 +1,102 @@ +package chef + +import ( + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "os" + "reflect" + "testing" +) + +var ( + testClientJSON = "test/client.json" +) + +func TestClientFromJSONDecoder(t *testing.T) { + if file, err := os.Open(testClientJSON); err != nil { + t.Error("unexpected error", err, "during os.Open on", testClientJSON) + } else { + dec := json.NewDecoder(file) + var n Client + if err := dec.Decode(&n); err == io.EOF { + log.Println(n) + } else if err != nil { + log.Fatal(err) + } + } +} + +func TestClientsService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/clients", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"client1": "http://localhost/clients/client1", "client2": "http://localhost/clients/client2"}`) + }) + + response, err := client.Clients.List() + if err != nil { + t.Errorf("Clients.List returned error: %v", err) + } + + want := "client1 => http://localhost/clients/client1\nclient2 => http://localhost/clients/client2\n" + if response.String() != want { + t.Errorf("Clients.List returned:\n%+v\nwant:\n%+v\n", response.String(), want) + } +} + +func TestClientsService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/clients/client1", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "clientname": "client1", + "orgname": "org_name", + "validator": false, + "certificate": "-----BEGIN CERTIFICATE-----", + "name": "node_name" + }`) + }) + + _, err := client.Clients.Get("client1") + if err != nil { + t.Errorf("Clients.Get returned error: %v", err) + } +} + +func TestClientsService_Create(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/clients", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"uri": "http://localhost/clients/client", "private_key": "-----BEGIN PRIVATE KEY-----"}`) + }) + + response, err := client.Clients.Create("client", false) + if err != nil { + t.Errorf("Clients.Create returned error: %v", err) + } + + want := &ApiClientCreateResult{Uri: "http://localhost/clients/client", PrivateKey: "-----BEGIN PRIVATE KEY-----"} + if !reflect.DeepEqual(response, want) { + t.Errorf("Clients.Create returned %+v, want %+v", response, want) + } +} + +func TestClientsService_Delete(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/clients/client1", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"name": "client1", "json_class": "Chef::Client", "chef_type": "client"}`) + }) + + err := client.Clients.Delete("client1") + if err != nil { + t.Errorf("Clients.Delete returned error: %v", err) + } +} diff --git a/vendor/github.com/go-chef/chef/cookbook.go b/vendor/github.com/go-chef/chef/cookbook.go new file mode 100644 index 000000000..1a725bfbf --- /dev/null +++ b/vendor/github.com/go-chef/chef/cookbook.go @@ -0,0 +1,144 @@ +package chef + +import "fmt" + +// CookbookService is the service for interacting with chef server cookbooks endpoint +type CookbookService struct { + client *Client +} + +// CookbookItem represents a object of cookbook file data +type CookbookItem struct { + Url string `json:"url,omitempty"` + Path string `json:"path,omitempty"` + Name string `json:"name,omitempty"` + Checksum string `json:"checksum,omitempty"` + Specificity string `json:"specificity,omitempty"` +} + +// CookbookListResult is the summary info returned by chef-api when listing +// http://docs.opscode.com/api_chef_server.html#cookbooks +type CookbookListResult map[string]CookbookVersions + +// CookbookVersions is the data container returned from the chef server when listing all cookbooks +type CookbookVersions struct { + Url string `json:"url,omitempty"` + Versions []CookbookVersion `json:"versions,omitempty"` +} + +// CookbookVersion is the data for a specific cookbook version +type CookbookVersion struct { + Url string `json:"url,omitempty"` + Version string `json:"version,omitempty"` +} + +// CookbookMeta represents a Golang version of cookbook metadata +type CookbookMeta struct { + Name string `json:"cookbook_name,omitempty"` + Version string `json:"version,omitempty"` + Description string `json:"description,omitempty"` + LongDescription string `json:"long_description,omitempty"` + Maintainer string `json:"maintainer,omitempty"` + MaintainerEmail string `json:"maintainer_email,omitempty"` + License string `json:"license,omitempty"` + Platforms map[string]string `json:"platforms,omitempty"` + Depends map[string]string `json:"dependencies,omitempty"` + Reccomends map[string]string `json:"recommendations,omitempty"` + Suggests map[string]string `json:"suggestions,omitempty"` + Conflicts map[string]string `json:"conflicting,omitempty"` + Provides map[string]string `json:"providing,omitempty"` + Replaces map[string]string `json:"replacing,omitempty"` + Attributes map[string]interface{} `json:"attributes,omitempty"` // this has a format as well that could be typed, but blargh https://github.com/lob/chef/blob/master/cookbooks/apache2/metadata.json + Groupings map[string]interface{} `json:"groupings,omitempty"` // never actually seen this used.. looks like it should be map[string]map[string]string, but not sure http://docs.opscode.com/essentials_cookbook_metadata.html + Recipes map[string]string `json:"recipes,omitempty"` +} + +// Cookbook represents the native Go version of the deserialized api cookbook +type Cookbook struct { + CookbookName string `json:"cookbook_name"` + Name string `json:"name"` + Version string `json:"version,omitempty"` + ChefType string `json:"chef_type,omitempty"` + Frozen bool `json:"frozen?,omitempty"` + JsonClass string `json:"json_class,omitempty"` + Files []CookbookItem `json:"files,omitempty"` + Templates []CookbookItem `json:"templates,omitempty"` + Attributes []CookbookItem `json:"attributes,omitempty"` + Recipes []CookbookItem `json:"recipes,omitempty"` + Definitions []CookbookItem `json:"definitions,omitempty"` + Libraries []CookbookItem `json:"libraries,omitempty"` + Providers []CookbookItem `json:"providers,omitempty"` + Resources []CookbookItem `json:"resources,omitempty"` + RootFiles []CookbookItem `json:"templates,omitempty"` + Metadata CookbookMeta `json:"metadata,omitempty"` +} + +// String makes CookbookListResult implement the string result +func (c CookbookListResult) String() (out string) { + for k, v := range c { + out += fmt.Sprintf("%s => %s\n", k, v.Url) + for _, i := range v.Versions { + out += fmt.Sprintf(" * %s\n", i.Version) + } + } + return out +} + +// versionParams assembles a querystring for the chef api's num_versions +// This is used to restrict the number of versions returned in the reponse +func versionParams(path, numVersions string) string { + if numVersions == "0" { + numVersions = "all" + } + + // need to optionally add numVersion args to the request + if len(numVersions) > 0 { + path = fmt.Sprintf("%s?num_versions=%s", path, numVersions) + } + return path +} + +// Get retruns a CookbookVersion for a specific cookbook +// GET /cookbooks/name +func (c *CookbookService) Get(name string) (data CookbookVersion, err error) { + path := fmt.Sprintf("cookbooks/%s", name) + err = c.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// GetAvailable returns the versions of a coookbook available on a server +func (c *CookbookService) GetAvailableVersions(name, numVersions string) (data CookbookListResult, err error) { + path := versionParams(fmt.Sprintf("cookbooks/%s", name), numVersions) + err = c.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// GetVersion fetches a specific version of a cookbooks data from the server api +// GET /cookbook/foo/1.2.3 +// GET /cookbook/foo/_latest +// Chef API docs: http://docs.opscode.com/api_chef_server.html#id5 +func (c *CookbookService) GetVersion(name, version string) (data Cookbook, err error) { + url := fmt.Sprintf("cookbooks/%s/%s", name, version) + c.client.magicRequestDecoder("GET", url, nil, &data) + return +} + +// ListVersions lists the cookbooks available on the server limited to numVersions +// Chef API docs: http://docs.opscode.com/api_chef_server.html#id2 +func (c *CookbookService) ListAvailableVersions(numVersions string) (data CookbookListResult, err error) { + path := versionParams("cookbooks", numVersions) + err = c.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// List returns a CookbookListResult with the latest versions of cookbooks available on the server +func (c *CookbookService) List() (CookbookListResult, error) { + return c.ListAvailableVersions("") +} + +// DeleteVersion removes a version of a cook from a server +func (c *CookbookService) Delete(name, version string) (err error) { + path := fmt.Sprintf("cookbooks/%s", name) + err = c.client.magicRequestDecoder("DELETE", path, nil, nil) + return +} diff --git a/vendor/github.com/go-chef/chef/cookbook_test.go b/vendor/github.com/go-chef/chef/cookbook_test.go new file mode 100644 index 000000000..f0987a06c --- /dev/null +++ b/vendor/github.com/go-chef/chef/cookbook_test.go @@ -0,0 +1,139 @@ +package chef + +import ( + "fmt" + "io/ioutil" + "net/http" + //"os" + "testing" +) + +const cookbookListResponseFile = "test/cookbooks_response.json" +const cookbookTestFile = "test/cookbook.json" + +func TestCookbookList(t *testing.T) { + setup() + defer teardown() + + file, err := ioutil.ReadFile(cookbookListResponseFile) + if err != nil { + t.Error(err) + } + + mux.HandleFunc("/cookbooks", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, string(file)) + }) + + data, err := client.Cookbooks.List() + if err != nil { + t.Error(err) + } + + if data == nil { + t.Fatal("WTF we should have some data") + } + fmt.Println(data) + + _, err = client.Cookbooks.ListAvailableVersions("3") + if err != nil { + t.Error(err) + } + + _, err = client.Cookbooks.ListAvailableVersions("0") + if err != nil { + t.Error(err) + } +} + +func TestCookbookListAvailableVersions_0(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/cookbooks", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "BAD FUCKING REQUEST", 503) + }) + + _, err := client.Cookbooks.ListAvailableVersions("2") + if err == nil { + t.Error("We expected this bad request to error", err) + } +} + +func TestCookBookDelete(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/cookbooks/good", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "") + }) + mux.HandleFunc("/cookbooks/bad", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Not Found", 404) + }) + + err := client.Cookbooks.Delete("bad", "1.1.1") + if err == nil { + t.Error("We expected this bad request to error", err) + } + + err = client.Cookbooks.Delete("good", "1.1.1") + if err != nil { + t.Error(err) + } +} + +func TestCookBookGet(t *testing.T) { + setup() + defer teardown() + + cookbookVerionJSON := `{"url": "http://localhost:4000/cookbooks/apache2/5.1.0", "version": "5.1.0"}` + mux.HandleFunc("/cookbooks/good", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, cookbookVerionJSON) + }) + mux.HandleFunc("/cookbooks/bad", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Not Found", 404) + }) + + data, err := client.Cookbooks.Get("good") + if err != nil { + t.Error(err) + } + + if data.Version != "5.1.0" { + t.Errorf("We expected '5.1.0' and got '%s'\n", data.Version) + } + + _, err = client.Cookbooks.Get("bad") + if err == nil { + t.Error("We expected this bad request to error", err) + } +} + +func TestCookBookGetAvailableVersions(t *testing.T) { + setup() + defer teardown() + + cookbookVerionsJSON := ` + { "apache2": { + "url": "http://localhost:4000/cookbooks/apache2", + "versions": [ + {"url": "http://localhost:4000/cookbooks/apache2/5.1.0", + "version": "5.1.0"}, + {"url": "http://localhost:4000/cookbooks/apache2/4.2.0", + "version": "4.2.0"} + ] + }}` + + mux.HandleFunc("/cookbooks/good", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, cookbookVerionsJSON) + }) + mux.HandleFunc("/cookbooks/bad", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Not Found", 404) + }) + + data, err := client.Cookbooks.GetAvailableVersions("good", "3") + if err != nil { + t.Error(err) + } + fmt.Println(data) + +} diff --git a/vendor/github.com/go-chef/chef/databag.go b/vendor/github.com/go-chef/chef/databag.go new file mode 100644 index 000000000..4ad8e3e15 --- /dev/null +++ b/vendor/github.com/go-chef/chef/databag.go @@ -0,0 +1,108 @@ +package chef + +import "fmt" + +// DataBagService is the service for interacting with the chef server data endpoint +type DataBagService struct { + client *Client +} + +// DataBagItem is a data bag item +type DataBagItem interface{} + +// DataBag is a data bag +type DataBag struct { + Name string `json:"name"` + JsonClass string `json:"json_class"` + ChefType string `json:"chef_type"` +} + +type DataBagCreateResult struct { + URI string `json:"uri"` +} + +// DataBagListResult is the list of data bags returned by chef-api when listing +// http://docs.getchef.com/api_chef_server.html#data +type DataBagListResult map[string]string + +// String makes DataBagListResult implement the string result +func (d DataBagListResult) String() (out string) { + for k, v := range d { + out += fmt.Sprintf("%s => %s\n", k, v) + } + return out +} + +// List returns a list of databags on the server +// Chef API Docs: http://docs.getchef.com/api_chef_server.html#id18 +func (d *DataBagService) List() (data *DataBagListResult, err error) { + path := fmt.Sprintf("data") + err = d.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// Create adds a data bag to the server +// Chef API Docs: http://docs.getchef.com/api_chef_server.html#id19 +func (d *DataBagService) Create(databag *DataBag) (result *DataBagCreateResult, err error) { + body, err := JSONReader(databag) + if err != nil { + return + } + + err = d.client.magicRequestDecoder("POST", "data", body, &result) + return +} + +// Delete removes a data bag from the server +// Chef API Docs: ???????????????? +func (d *DataBagService) Delete(name string) (result *DataBag, err error) { + path := fmt.Sprintf("data/%s", name) + err = d.client.magicRequestDecoder("DELETE", path, nil, &result) + return +} + +// ListItems gets a list of the items in a data bag. +// Chef API Docs: http://docs.getchef.com/api_chef_server.html#id20 +func (d *DataBagService) ListItems(name string) (data *DataBagListResult, err error) { + path := fmt.Sprintf("data/%s", name) + err = d.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// CreateItem adds an item to a data bag +// Chef API Docs: http://docs.getchef.com/api_chef_server.html#id21 +func (d *DataBagService) CreateItem(databagName string, databagItem DataBagItem) (err error) { + body, err := JSONReader(databagItem) + if err != nil { + return + } + path := fmt.Sprintf("data/%s", databagName) + return d.client.magicRequestDecoder("POST", path, body, nil) +} + +// DeleteItem deletes an item from a data bag +// Chef API Docs: http://docs.getchef.com/api_chef_server.html#id22 +func (d *DataBagService) DeleteItem(databagName, databagItem string) (err error) { + path := fmt.Sprintf("data/%s/%s", databagName, databagItem) + err = d.client.magicRequestDecoder("DELETE", path, nil, nil) + return +} + +// GetItem gets an item from a data bag +// Chef API Docs: http://docs.getchef.com/api_chef_server.html#id23 +func (d *DataBagService) GetItem(databagName, databagItem string) (item DataBagItem, err error) { + path := fmt.Sprintf("data/%s/%s", databagName, databagItem) + err = d.client.magicRequestDecoder("GET", path, nil, &item) + return +} + +// UpdateItem updates an item in a data bag +// Chef API Docs: http://docs.getchef.com/api_chef_server.html#id24 +func (d *DataBagService) UpdateItem(databagName, databagItemId string, databagItem DataBagItem) (err error) { + body, err := JSONReader(databagItem) + if err != nil { + return + } + path := fmt.Sprintf("data/%s/%s", databagName, databagItemId) + return d.client.magicRequestDecoder("PUT", path, body, nil) +} diff --git a/vendor/github.com/go-chef/chef/databag_test.go b/vendor/github.com/go-chef/chef/databag_test.go new file mode 100644 index 000000000..5ea751461 --- /dev/null +++ b/vendor/github.com/go-chef/chef/databag_test.go @@ -0,0 +1,164 @@ +package chef + +import ( + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestDataBagsService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"bag1":"http://localhost/data/bag1", "bag2":"http://localhost/data/bag2"}`) + }) + + databags, err := client.DataBags.List() + if err != nil { + t.Errorf("DataBags.List returned error: %v", err) + } + + want := &DataBagListResult{"bag1": "http://localhost/data/bag1", "bag2": "http://localhost/data/bag2"} + if !reflect.DeepEqual(databags, want) { + t.Errorf("DataBags.List returned %+v, want %+v", databags, want) + } +} + +func TestDataBagsService_Create(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"uri": "http://localhost/data/newdatabag"}`) + }) + + databag := &DataBag{Name: "newdatabag"} + response, err := client.DataBags.Create(databag) + if err != nil { + t.Errorf("DataBags.Create returned error: %v", err) + } + + want := &DataBagCreateResult{URI: "http://localhost/data/newdatabag"} + if !reflect.DeepEqual(response, want) { + t.Errorf("DataBags.Create returned %+v, want %+v", response, want) + } +} + +func TestDataBagsService_Delete(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data/databag", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"name": "databag", "json_class": "Chef::DataBag", "chef_type": "data_bag"}`) + }) + + response, err := client.DataBags.Delete("databag") + if err != nil { + t.Errorf("DataBags.Delete returned error: %v", err) + } + + want := &DataBag{ + Name: "databag", + JsonClass: "Chef::DataBag", + ChefType: "data_bag", + } + + if !reflect.DeepEqual(response, want) { + t.Errorf("DataBags.Delete returned %+v, want %+v", response, want) + } +} + +func TestDataBagsService_ListItems(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data/bag1", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"item1":"http://localhost/data/bag1/item1", "item2":"http://localhost/data/bag1/item2"}`) + }) + + databags, err := client.DataBags.ListItems("bag1") + if err != nil { + t.Errorf("DataBags.ListItems returned error: %v", err) + } + + want := &DataBagListResult{"item1": "http://localhost/data/bag1/item1", "item2": "http://localhost/data/bag1/item2"} + if !reflect.DeepEqual(databags, want) { + t.Errorf("DataBags.ListItems returned %+v, want %+v", databags, want) + } +} + +func TestDataBagsService_GetItem(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data/bag1/item1", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"id":"item1", "stuff":"things"}`) + }) + + _, err := client.DataBags.GetItem("bag1", "item1") + if err != nil { + t.Errorf("DataBags.GetItem returned error: %v", err) + } +} + +func TestDataBagsService_CreateItem(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data/bag1", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ``) + }) + + dbi := map[string]string{ + "id": "item1", + "foo": "test123", + } + + err := client.DataBags.CreateItem("bag1", dbi) + if err != nil { + t.Errorf("DataBags.CreateItem returned error: %v", err) + } +} + +func TestDataBagsService_DeleteItem(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data/bag1/item1", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ``) + }) + + err := client.DataBags.DeleteItem("bag1", "item1") + if err != nil { + t.Errorf("DataBags.DeleteItem returned error: %v", err) + } +} + +func TestDataBagsService_UpdateItem(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/data/bag1/item1", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ``) + }) + + dbi := map[string]string{ + "id": "item1", + "foo": "test123", + } + + err := client.DataBags.UpdateItem("bag1", "item1", dbi) + if err != nil { + t.Errorf("DataBags.UpdateItem returned error: %v", err) + } +} + +func TestDataBagsService_DataBagListResultString(t *testing.T) { + e := &DataBagListResult{"bag1": "http://localhost/data/bag1", "bag2": "http://localhost/data/bag2"} + want := "bag1 => http://localhost/data/bag1\nbag2 => http://localhost/data/bag2\n" + if e.String() != want { + t.Errorf("DataBagListResult.String returned:\n%+v\nwant:\n%+v\n", e.String(), want) + } +} diff --git a/vendor/github.com/go-chef/chef/debug.go b/vendor/github.com/go-chef/chef/debug.go new file mode 100644 index 000000000..d59297c00 --- /dev/null +++ b/vendor/github.com/go-chef/chef/debug.go @@ -0,0 +1,9 @@ +// +build debug + +package chef + +import "log" + +func debug(fmt string, args ...interface{}) { + log.Printf(fmt, args...) +} diff --git a/vendor/github.com/go-chef/chef/doc.go b/vendor/github.com/go-chef/chef/doc.go new file mode 100644 index 000000000..71c4fe5de --- /dev/null +++ b/vendor/github.com/go-chef/chef/doc.go @@ -0,0 +1,96 @@ +/* +This is a chef server api client. +This Library can be used to write tools to interact with the chef server. + +The testing can be run with `go test`, and the client can be used as per normal via `go get github.com/go-chef/chef` +Documentation can be found on GoDoc at http://godoc.org/github.com/go-chef/chef + +This is example code generating a new node on the chef-server. + + + package main + + import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + + "github.com/go-chef/chef" + ) + + func main() { + // read a client key + key, err := ioutil.ReadFile("key.pem") + if err != nil { + fmt.Println("Couldn't read key.pem:", err) + os.Exit(1) + } + + // build a client + client, err := chef.NewClient(&chef.Config{ + Name: "foo", + Key: string(key), + // goiardi is on port 4545 by default. chef-zero is 8889 + BaseURL: "http://localhost:4545", + }) + if err != nil { + fmt.Println("Issue setting up client:", err) + os.Exit(1) + } + + // Create a Node object + // TOOD: should have a constructor for this + ranjib := chef.Node{ + Name: "ranjib", + Environment: "_default", + ChefType: "node", + JsonClass: "Chef::Node", + RunList: []string{"pwn"}, + } + + // Create + _, err = client.Nodes.Post(ranjib) + if err != nil { + log.Fatal("Couldn't create node. ", err) + } + + // List nodes + nodeList, err := client.Nodes.List() + if err != nil { + log.Fatal("Couldn't list nodes: ", err) + } + + // dump the node list in Json + jsonData, err := json.MarshalIndent(nodeList, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + // dump the ranjib node we got from server in JSON! + serverNode, _ := client.Nodes.Get("ranjib") + if err != nil { + log.Fatal("Couldn't get node: ", err) + } + jsonData, err = json.MarshalIndent(serverNode, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + // update node + ranjib.RunList = append(ranjib.RunList, "recipe[works]") + jsonData, err = json.MarshalIndent(ranjib, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + _, err = client.Nodes.Put(ranjib) + if err != nil { + log.Fatal("Couldn't update node: ", err) + } + + // Delete node ignoring errors :) + client.Nodes.Delete(ranjib.Name) + + } + +*/ +package chef diff --git a/vendor/github.com/go-chef/chef/environment.go b/vendor/github.com/go-chef/chef/environment.go new file mode 100644 index 000000000..b25e9d76a --- /dev/null +++ b/vendor/github.com/go-chef/chef/environment.go @@ -0,0 +1,114 @@ +package chef + +import "fmt" +import "sort" + +// Environment has a Reader, hey presto +type EnvironmentService struct { + client *Client +} + +type EnvironmentResult map[string]string + +// Environment represents the native Go version of the deserialized Environment type +type Environment struct { + Name string `json:"name"` + Description string `json:"description"` + ChefType string `json:"chef_type"` + Attributes interface{} `json:"attributes,omitempty"` + DefaultAttributes interface{} `json:"default_attributes,omitempty"` + OverrideAttributes interface{} `json:"override_attributes,omitempty"` + JsonClass string `json:"json_class,omitempty"` + CookbookVersions map[string]string `json:"cookbook_versions"` +} + +type EnvironmentCookbookResult map[string]CookbookVersions + +func strMapToStr(e map[string]string) (out string) { + keys := make([]string, len(e)) + for k, _ := range e { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + if k == "" { + continue + } + out += fmt.Sprintf("%s => %s\n", k, e[k]) + } + return +} + +// String makes EnvironmentResult implement the string result +func (e EnvironmentResult) String() (out string) { + return strMapToStr(e) +} + +// List lists the environments in the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id14 +func (e *EnvironmentService) List() (data *EnvironmentResult, err error) { + err = e.client.magicRequestDecoder("GET", "environments", nil, &data) + return +} + +// Create an environment in the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id15 +func (e *EnvironmentService) Create(environment *Environment) (data *EnvironmentResult, err error) { + body, err := JSONReader(environment) + if err != nil { + return + } + + err = e.client.magicRequestDecoder("POST", "environments", body, &data) + return +} + +// Delete an environment from the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id16 + +// Get gets an environment from the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id17 +func (e *EnvironmentService) Get(name string) (data *Environment, err error) { + path := fmt.Sprintf("environments/%s", name) + err = e.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// Write an environment to the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id18 +func (e *EnvironmentService) Put(environment *Environment) (data *Environment, err error) { + path := fmt.Sprintf("environments/%s", environment.Name) + body, err := JSONReader(environment) + if err != nil { + return + } + + err = e.client.magicRequestDecoder("PUT", path, body, &data) + return +} + +// Get the versions of a cookbook for this environment from the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id19 +func (e *EnvironmentService) ListCookbooks(name string, numVersions string) (data EnvironmentCookbookResult, err error) { + path := versionParams(fmt.Sprintf("environments/%s/cookbooks", name), numVersions) + err = e.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// Get a hash of cookbooks and cookbook versions (including all dependencies) that +// are required by the run_list array. Version constraints may be specified using +// the @ symbol after the cookbook name as a delimiter. Version constraints may also +// be present when the cookbook_versions attributes is specified for an environment +// or when dependencies are specified by a cookbook. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id20 + +// Get a list of cookbooks and cookbook versions that are available to the specified environment. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id21 diff --git a/vendor/github.com/go-chef/chef/environment_test.go b/vendor/github.com/go-chef/chef/environment_test.go new file mode 100644 index 000000000..1bb74705d --- /dev/null +++ b/vendor/github.com/go-chef/chef/environment_test.go @@ -0,0 +1,158 @@ +package chef + +import ( + "encoding/json" + "fmt" + _ "github.com/davecgh/go-spew/spew" + "io" + "log" + "net/http" + "os" + "reflect" + "testing" +) + +var ( + testEnvironmentJSON = "test/environment.json" +) + +// BUG(fujin): re-do with goconvey +func TestEnvironmentFromJSONDecoder(t *testing.T) { + if file, err := os.Open(testEnvironmentJSON); err != nil { + t.Error("unexpected error", err, "during os.Open on", testEnvironmentJSON) + } else { + dec := json.NewDecoder(file) + var e Environment + if err := dec.Decode(&e); err == io.EOF { + log.Println(e) + } else if err != nil { + log.Fatal(err) + } + } +} + +func TestEnvironmentsService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/environments", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"_default":"blah", "development":"blah"}`) + }) + + environments, err := client.Environments.List() + if err != nil { + t.Errorf("Environments.List returned error: %v", err) + } + + want := &EnvironmentResult{"_default": "blah", "development": "blah"} + if !reflect.DeepEqual(environments, want) { + //spew.Dump(environments) + //spew.Dump(want) + t.Errorf("Environments.List returned %+v, want %+v", environments, want) + } +} + +func TestEnvironmentsService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/environments/testenvironment", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "name": "testenvironment", + "json_class": "Chef::Environment", + "chef_type": "environment" + }`) + }) + + environments, err := client.Environments.Get("testenvironment") + if err != nil { + t.Errorf("Environments.Get returned error: %v", err) + } + + want := &Environment{ + Name: "testenvironment", + JsonClass: "Chef::Environment", + ChefType: "environment", + } + + if !reflect.DeepEqual(environments, want) { + t.Errorf("Environments.Get returned %+v, want %+v", environments, want) + } +} + +func TestEnvironmentsService_Create(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/environments", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ "uri": "http://localhost:4000/environments/dev" }`) + }) + + role := &Environment{ + Name: "dev", + ChefType: "environment", + JsonClass: "Chef::Environment", + Attributes: "", + Description: "", + CookbookVersions: map[string]string{}, + } + + uri, err := client.Environments.Create(role) + if err != nil { + t.Errorf("Environments.Create returned error: %v", err) + } + + want := &EnvironmentResult{"uri": "http://localhost:4000/environments/dev"} + + if !reflect.DeepEqual(uri, want) { + t.Errorf("Environments.Create returned %+v, want %+v", uri, want) + } +} + +func TestEnvironmentsService_Put(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/environments/dev", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "name": "dev", + "json_class": "Chef::Environment", + "description": "The Dev Environment", + "cookbook_versions": {}, + "chef_type": "environment" + }`) + }) + + environment := &Environment{ + Name: "dev", + ChefType: "environment", + JsonClass: "Chef::Environment", + Description: "The Dev Environment", + CookbookVersions: map[string]string{}, + } + + updatedEnvironment, err := client.Environments.Put(environment) + if err != nil { + t.Errorf("Environments.Put returned error: %v", err) + } + + if !reflect.DeepEqual(updatedEnvironment, environment) { + t.Errorf("Environments.Put returned %+v, want %+v", updatedEnvironment, environment) + } +} + +func TestEnvironmentsService_EnvironmentListResultString(t *testing.T) { + e := &EnvironmentResult{"_default": "https://api.opscode.com/organizations/org_name/environments/_default", "webserver": "https://api.opscode.com/organizations/org_name/environments/webserver"} + want := "_default => https://api.opscode.com/organizations/org_name/environments/_default\nwebserver => https://api.opscode.com/organizations/org_name/environments/webserver\n" + if e.String() != want { + t.Errorf("EnvironmentResult.String returned:\n%+v\nwant:\n%+v\n", e.String(), want) + } +} + +func TestEnvironmentsService_EnvironmentCreateResultString(t *testing.T) { + e := &EnvironmentResult{"uri": "http://localhost:4000/environments/dev"} + want := "uri => http://localhost:4000/environments/dev\n" + if e.String() != want { + t.Errorf("EnvironmentResult.String returned %+v, want %+v", e.String(), want) + } +} diff --git a/vendor/github.com/go-chef/chef/examples/cookbooks/cookbooks.go b/vendor/github.com/go-chef/chef/examples/cookbooks/cookbooks.go new file mode 100644 index 000000000..d9cb1ebcc --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/cookbooks/cookbooks.go @@ -0,0 +1,59 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/go-chef/chef" +) + +func main() { + // simple arg parsing + //cookPath := flag.String("cookbook", "c", "Path to cookbook for upload") + // flag.Parse() + + // read a client key + key, err := ioutil.ReadFile("key.pem") + if err != nil { + fmt.Println("Couldn't read key.pem:", err) + os.Exit(1) + } + + // build a client + client, err := chef.NewClient(&chef.Config{ + Name: "foo", + Key: string(key), + // goiardi is on port 4545 by default. chef-zero is 8889 + BaseURL: "http://localhost:8443", + }) + if err != nil { + fmt.Println("Issue setting up client:", err) + os.Exit(1) + } + + // List Cookbooks + cookList, err := client.Cookbooks.List() + if err != nil { + fmt.Println("Issue listing cookbooks:", err) + } + + // Print out the list + fmt.Println(cookList) + /* + *'parse' metadata... + this would prefer .json over .rb + if it's .rb lets maybe try to eval it ? + otherwise just extract name/version if they exist + */ + + /* + + + * generate sums + * create sandbox + * upload to sandbox + * + */ + +} diff --git a/vendor/github.com/go-chef/chef/examples/cookbooks/key.pem b/vendor/github.com/go-chef/chef/examples/cookbooks/key.pem new file mode 100644 index 000000000..f44ac76ec --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/cookbooks/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx12nDxxOwSPHRSJEDz67a0folBqElzlu2oGMiUTS+dqtj3FU +h5lJc1MjcprRVxcDVwhsSSo9948XEkk39IdblUCLohucqNMzOnIcdZn8zblN7Cnp +W03UwRM0iWX1HuwHnGvm6PKeqKGqplyIXYO0qlDWCzC+VaxFTwOUk31MfOHJQn4y +fTrfuE7h3FTElLBu065SFp3dPICIEmWCl9DadnxbnZ8ASxYQ9xG7hmZduDgjNW5l +3x6/EFkpym+//D6AbWDcVJ1ovCsJL3CfH/NZC3ekeJ/aEeLxP/vaCSH1VYC5VsYK +5Qg7SIa6Nth3+RZz1hYOoBJulEzwljznwoZYRQIDAQABAoIBADPQol+qAsnty5er +PTcdHcbXLJp5feZz1dzSeL0gdxja/erfEJIhg9aGUBs0I55X69VN6h7l7K8PsHZf +MzzJhUL4QJJETOYP5iuVhtIF0I+DTr5Hck/5nYcEv83KAvgjbiL4ZE486IF5awnL +2OE9HtJ5KfhEleNcX7MWgiIHGb8G1jCqu/tH0GI8Z4cNgUrXMbczGwfbN/5Wc0zo +Dtpe0Tec/Fd0DLFwRiAuheakPjlVWb7AGMDX4TyzCXfMpS1ul2jk6nGFk77uQozF +PQUawCRp+mVS4qecgq/WqfTZZbBlW2L18/kpafvsxG8kJ7OREtrb0SloZNFHEc2Q +70GbgKECgYEA6c/eOrI3Uour1gKezEBFmFKFH6YS/NZNpcSG5PcoqF6AVJwXg574 +Qy6RatC47e92be2TT1Oyplntj4vkZ3REv81yfz/tuXmtG0AylH7REbxubxAgYmUT +18wUAL4s3TST2AlK4R29KwBadwUAJeOLNW+Rc4xht1galsqQRb4pUzkCgYEA2kj2 +vUhKAB7QFCPST45/5q+AATut8WeHnI+t1UaiZoK41Jre8TwlYqUgcJ16Q0H6KIbJ +jlEZAu0IsJxjQxkD4oJgv8n5PFXdc14HcSQ512FmgCGNwtDY/AT7SQP3kOj0Rydg +N02uuRb/55NJ07Bh+yTQNGA+M5SSnUyaRPIAMW0CgYBgVU7grDDzB60C/g1jZk/G +VKmYwposJjfTxsc1a0gLJvSE59MgXc04EOXFNr4a+oC3Bh2dn4SJ2Z9xd1fh8Bur +UwCLwVE3DBTwl2C/ogiN4C83/1L4d2DXlrPfInvloBYR+rIpUlFweDLNuve2pKvk +llU9YGeaXOiHnGoY8iKgsQKBgQDZKMOHtZYhHoZlsul0ylCGAEz5bRT0V8n7QJlw +12+TSjN1F4n6Npr+00Y9ov1SUh38GXQFiLq4RXZitYKu6wEJZCm6Q8YXd1jzgDUp +IyAEHNsrV7Y/fSSRPKd9kVvGp2r2Kr825aqQasg16zsERbKEdrBHmwPmrsVZhi7n +rlXw1QKBgQDBOyUJKQOgDE2u9EHybhCIbfowyIE22qn9a3WjQgfxFJ+aAL9Bg124 +fJIEzz43fJ91fe5lTOgyMF5TtU5ClAOPGtlWnXU0e5j3L4LjbcqzEbeyxvP3sn1z +dYkX7NdNQ5E6tcJZuJCGq0HxIAQeKPf3x9DRKzMnLply6BEzyuAC4g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/go-chef/chef/examples/nodes/key.pem b/vendor/github.com/go-chef/chef/examples/nodes/key.pem new file mode 100644 index 000000000..f44ac76ec --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/nodes/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx12nDxxOwSPHRSJEDz67a0folBqElzlu2oGMiUTS+dqtj3FU +h5lJc1MjcprRVxcDVwhsSSo9948XEkk39IdblUCLohucqNMzOnIcdZn8zblN7Cnp +W03UwRM0iWX1HuwHnGvm6PKeqKGqplyIXYO0qlDWCzC+VaxFTwOUk31MfOHJQn4y +fTrfuE7h3FTElLBu065SFp3dPICIEmWCl9DadnxbnZ8ASxYQ9xG7hmZduDgjNW5l +3x6/EFkpym+//D6AbWDcVJ1ovCsJL3CfH/NZC3ekeJ/aEeLxP/vaCSH1VYC5VsYK +5Qg7SIa6Nth3+RZz1hYOoBJulEzwljznwoZYRQIDAQABAoIBADPQol+qAsnty5er +PTcdHcbXLJp5feZz1dzSeL0gdxja/erfEJIhg9aGUBs0I55X69VN6h7l7K8PsHZf +MzzJhUL4QJJETOYP5iuVhtIF0I+DTr5Hck/5nYcEv83KAvgjbiL4ZE486IF5awnL +2OE9HtJ5KfhEleNcX7MWgiIHGb8G1jCqu/tH0GI8Z4cNgUrXMbczGwfbN/5Wc0zo +Dtpe0Tec/Fd0DLFwRiAuheakPjlVWb7AGMDX4TyzCXfMpS1ul2jk6nGFk77uQozF +PQUawCRp+mVS4qecgq/WqfTZZbBlW2L18/kpafvsxG8kJ7OREtrb0SloZNFHEc2Q +70GbgKECgYEA6c/eOrI3Uour1gKezEBFmFKFH6YS/NZNpcSG5PcoqF6AVJwXg574 +Qy6RatC47e92be2TT1Oyplntj4vkZ3REv81yfz/tuXmtG0AylH7REbxubxAgYmUT +18wUAL4s3TST2AlK4R29KwBadwUAJeOLNW+Rc4xht1galsqQRb4pUzkCgYEA2kj2 +vUhKAB7QFCPST45/5q+AATut8WeHnI+t1UaiZoK41Jre8TwlYqUgcJ16Q0H6KIbJ +jlEZAu0IsJxjQxkD4oJgv8n5PFXdc14HcSQ512FmgCGNwtDY/AT7SQP3kOj0Rydg +N02uuRb/55NJ07Bh+yTQNGA+M5SSnUyaRPIAMW0CgYBgVU7grDDzB60C/g1jZk/G +VKmYwposJjfTxsc1a0gLJvSE59MgXc04EOXFNr4a+oC3Bh2dn4SJ2Z9xd1fh8Bur +UwCLwVE3DBTwl2C/ogiN4C83/1L4d2DXlrPfInvloBYR+rIpUlFweDLNuve2pKvk +llU9YGeaXOiHnGoY8iKgsQKBgQDZKMOHtZYhHoZlsul0ylCGAEz5bRT0V8n7QJlw +12+TSjN1F4n6Npr+00Y9ov1SUh38GXQFiLq4RXZitYKu6wEJZCm6Q8YXd1jzgDUp +IyAEHNsrV7Y/fSSRPKd9kVvGp2r2Kr825aqQasg16zsERbKEdrBHmwPmrsVZhi7n +rlXw1QKBgQDBOyUJKQOgDE2u9EHybhCIbfowyIE22qn9a3WjQgfxFJ+aAL9Bg124 +fJIEzz43fJ91fe5lTOgyMF5TtU5ClAOPGtlWnXU0e5j3L4LjbcqzEbeyxvP3sn1z +dYkX7NdNQ5E6tcJZuJCGq0HxIAQeKPf3x9DRKzMnLply6BEzyuAC4g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/go-chef/chef/examples/nodes/nodes.go b/vendor/github.com/go-chef/chef/examples/nodes/nodes.go new file mode 100644 index 000000000..9262bec18 --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/nodes/nodes.go @@ -0,0 +1,86 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + + "github.com/go-chef/chef" +) + +func main() { + // read a client key + key, err := ioutil.ReadFile("key.pem") + if err != nil { + fmt.Println("Couldn't read key.pem:", err) + os.Exit(1) + } + + // build a client + client, err := chef.NewClient(&chef.Config{ + Name: "foo", + Key: string(key), + // goiardi is on port 4545 by default. chef-zero is 8889 + BaseURL: "http://localhost:4545", + }) + if err != nil { + fmt.Println("Issue setting up client:", err) + os.Exit(1) + } + + // Create a Node object + // TOOD: should have a constructor for this + ranjib := chef.Node{ + Name: "ranjib", + Environment: "_default", + ChefType: "node", + JsonClass: "Chef::Node", + RunList: []string{"pwn"}, + } + + // Delete node ignoring errors :) + client.Nodes.Delete(ranjib.Name) + + // Create + _, err = client.Nodes.Post(ranjib) + if err != nil { + log.Fatal("Couldn't create node. ", err) + } + + // List nodes + nodeList, err := client.Nodes.List() + if err != nil { + log.Fatal("Couldn't list nodes: ", err) + } + + // dump the node list in Json + jsonData, err := json.MarshalIndent(nodeList, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + // dump the ranjib node we got from server in JSON! + serverNode, _ := client.Nodes.Get("ranjib") + if err != nil { + log.Fatal("Couldn't get node: ", err) + } + jsonData, err = json.MarshalIndent(serverNode, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + // update node + ranjib.RunList = append(ranjib.RunList, "recipe[works]") + jsonData, err = json.MarshalIndent(ranjib, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + _, err = client.Nodes.Put(ranjib) + if err != nil { + log.Fatal("Couldn't update node: ", err) + } + + // Delete node ignoring errors :) + client.Nodes.Delete(ranjib.Name) + +} diff --git a/vendor/github.com/go-chef/chef/examples/sandboxes/key.pem b/vendor/github.com/go-chef/chef/examples/sandboxes/key.pem new file mode 100644 index 000000000..f44ac76ec --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/sandboxes/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx12nDxxOwSPHRSJEDz67a0folBqElzlu2oGMiUTS+dqtj3FU +h5lJc1MjcprRVxcDVwhsSSo9948XEkk39IdblUCLohucqNMzOnIcdZn8zblN7Cnp +W03UwRM0iWX1HuwHnGvm6PKeqKGqplyIXYO0qlDWCzC+VaxFTwOUk31MfOHJQn4y +fTrfuE7h3FTElLBu065SFp3dPICIEmWCl9DadnxbnZ8ASxYQ9xG7hmZduDgjNW5l +3x6/EFkpym+//D6AbWDcVJ1ovCsJL3CfH/NZC3ekeJ/aEeLxP/vaCSH1VYC5VsYK +5Qg7SIa6Nth3+RZz1hYOoBJulEzwljznwoZYRQIDAQABAoIBADPQol+qAsnty5er +PTcdHcbXLJp5feZz1dzSeL0gdxja/erfEJIhg9aGUBs0I55X69VN6h7l7K8PsHZf +MzzJhUL4QJJETOYP5iuVhtIF0I+DTr5Hck/5nYcEv83KAvgjbiL4ZE486IF5awnL +2OE9HtJ5KfhEleNcX7MWgiIHGb8G1jCqu/tH0GI8Z4cNgUrXMbczGwfbN/5Wc0zo +Dtpe0Tec/Fd0DLFwRiAuheakPjlVWb7AGMDX4TyzCXfMpS1ul2jk6nGFk77uQozF +PQUawCRp+mVS4qecgq/WqfTZZbBlW2L18/kpafvsxG8kJ7OREtrb0SloZNFHEc2Q +70GbgKECgYEA6c/eOrI3Uour1gKezEBFmFKFH6YS/NZNpcSG5PcoqF6AVJwXg574 +Qy6RatC47e92be2TT1Oyplntj4vkZ3REv81yfz/tuXmtG0AylH7REbxubxAgYmUT +18wUAL4s3TST2AlK4R29KwBadwUAJeOLNW+Rc4xht1galsqQRb4pUzkCgYEA2kj2 +vUhKAB7QFCPST45/5q+AATut8WeHnI+t1UaiZoK41Jre8TwlYqUgcJ16Q0H6KIbJ +jlEZAu0IsJxjQxkD4oJgv8n5PFXdc14HcSQ512FmgCGNwtDY/AT7SQP3kOj0Rydg +N02uuRb/55NJ07Bh+yTQNGA+M5SSnUyaRPIAMW0CgYBgVU7grDDzB60C/g1jZk/G +VKmYwposJjfTxsc1a0gLJvSE59MgXc04EOXFNr4a+oC3Bh2dn4SJ2Z9xd1fh8Bur +UwCLwVE3DBTwl2C/ogiN4C83/1L4d2DXlrPfInvloBYR+rIpUlFweDLNuve2pKvk +llU9YGeaXOiHnGoY8iKgsQKBgQDZKMOHtZYhHoZlsul0ylCGAEz5bRT0V8n7QJlw +12+TSjN1F4n6Npr+00Y9ov1SUh38GXQFiLq4RXZitYKu6wEJZCm6Q8YXd1jzgDUp +IyAEHNsrV7Y/fSSRPKd9kVvGp2r2Kr825aqQasg16zsERbKEdrBHmwPmrsVZhi7n +rlXw1QKBgQDBOyUJKQOgDE2u9EHybhCIbfowyIE22qn9a3WjQgfxFJ+aAL9Bg124 +fJIEzz43fJ91fe5lTOgyMF5TtU5ClAOPGtlWnXU0e5j3L4LjbcqzEbeyxvP3sn1z +dYkX7NdNQ5E6tcJZuJCGq0HxIAQeKPf3x9DRKzMnLply6BEzyuAC4g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/go-chef/chef/examples/sandboxes/sandboxes.go b/vendor/github.com/go-chef/chef/examples/sandboxes/sandboxes.go new file mode 100644 index 000000000..21d06b5b0 --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/sandboxes/sandboxes.go @@ -0,0 +1,103 @@ +package main + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "encoding/json" + "fmt" + "io/ioutil" + "os" + + "github.com/cenkalti/backoff" + "github.com/davecgh/go-spew/spew" + "github.com/go-chef/chef" +) + +//random_data makes random byte slice for building junk sandbox data +func random_data(size int) (b []byte) { + b = make([]byte, size) + rand.Read(b) + return +} + +func main() { + // read a client key + key, err := ioutil.ReadFile("key.pem") + if err != nil { + fmt.Println("Couldn't read key.pem:", err) + os.Exit(1) + } + + // build a client + client, err := chef.NewClient(&chef.Config{ + Name: "foo", + Key: string(key), + // goiardi is on port 4545 by default. chef-zero is 8889 + BaseURL: "http://localhost:4545", + }) + if err != nil { + fmt.Println("Issue setting up client:", err) + os.Exit(1) + } + + // create junk files and sums + files := make(map[string][]byte) + sums := make([]string, 10) + for i := 0; i < 10; i++ { + data := random_data(1024) + hashstr := fmt.Sprintf("%x", md5.Sum(data)) + files[hashstr] = data + sums[i] = hashstr + } + + // post the new sums/files to the sandbox + postResp, err := client.Sandboxes.Post(sums) + if err != nil { + fmt.Println("error making request: ", err) + os.Exit(1) + } + + // Dump the the server response data + j, err := json.MarshalIndent(postResp, "", " ") + fmt.Printf("%s", j) + + // Let's upload the files that postRep thinks we should upload + for hash, item := range postResp.Checksums { + if item.Upload == true { + if hash == "" { + continue + } + // If you were writing this in your own tool you could just use the FH and let the Reader interface suck out the content instead of doing the convert. + fmt.Printf("\nUploading: %s ---> %v\n\n", hash, item) + req, err := client.NewRequest("PUT", item.Url, bytes.NewReader(files[hash])) + if err != nil { + fmt.Println("This shouldn't happen:", err.Error()) + os.Exit(1) + } + + // post the files + upload := func() error { + _, err = client.Do(req, nil) + return err + } + + // with exp backoff ! + err = backoff.Retry(upload, backoff.NewExponentialBackOff()) + if err != nil { + fmt.Println("error posting files to the sandbox: ", err.Error()) + } + } + } + + // Now lets tell the server we have uploaded all the things. + sandbox, err := client.Sandboxes.Put(postResp.ID) + if err != nil { + fmt.Println("Error commiting sandbox: ", err.Error()) + os.Exit(1) + } + + // and heres yer commited sandbox + spew.Dump(sandbox) + +} diff --git a/vendor/github.com/go-chef/chef/examples/search/key.pem b/vendor/github.com/go-chef/chef/examples/search/key.pem new file mode 100644 index 000000000..f44ac76ec --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/search/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx12nDxxOwSPHRSJEDz67a0folBqElzlu2oGMiUTS+dqtj3FU +h5lJc1MjcprRVxcDVwhsSSo9948XEkk39IdblUCLohucqNMzOnIcdZn8zblN7Cnp +W03UwRM0iWX1HuwHnGvm6PKeqKGqplyIXYO0qlDWCzC+VaxFTwOUk31MfOHJQn4y +fTrfuE7h3FTElLBu065SFp3dPICIEmWCl9DadnxbnZ8ASxYQ9xG7hmZduDgjNW5l +3x6/EFkpym+//D6AbWDcVJ1ovCsJL3CfH/NZC3ekeJ/aEeLxP/vaCSH1VYC5VsYK +5Qg7SIa6Nth3+RZz1hYOoBJulEzwljznwoZYRQIDAQABAoIBADPQol+qAsnty5er +PTcdHcbXLJp5feZz1dzSeL0gdxja/erfEJIhg9aGUBs0I55X69VN6h7l7K8PsHZf +MzzJhUL4QJJETOYP5iuVhtIF0I+DTr5Hck/5nYcEv83KAvgjbiL4ZE486IF5awnL +2OE9HtJ5KfhEleNcX7MWgiIHGb8G1jCqu/tH0GI8Z4cNgUrXMbczGwfbN/5Wc0zo +Dtpe0Tec/Fd0DLFwRiAuheakPjlVWb7AGMDX4TyzCXfMpS1ul2jk6nGFk77uQozF +PQUawCRp+mVS4qecgq/WqfTZZbBlW2L18/kpafvsxG8kJ7OREtrb0SloZNFHEc2Q +70GbgKECgYEA6c/eOrI3Uour1gKezEBFmFKFH6YS/NZNpcSG5PcoqF6AVJwXg574 +Qy6RatC47e92be2TT1Oyplntj4vkZ3REv81yfz/tuXmtG0AylH7REbxubxAgYmUT +18wUAL4s3TST2AlK4R29KwBadwUAJeOLNW+Rc4xht1galsqQRb4pUzkCgYEA2kj2 +vUhKAB7QFCPST45/5q+AATut8WeHnI+t1UaiZoK41Jre8TwlYqUgcJ16Q0H6KIbJ +jlEZAu0IsJxjQxkD4oJgv8n5PFXdc14HcSQ512FmgCGNwtDY/AT7SQP3kOj0Rydg +N02uuRb/55NJ07Bh+yTQNGA+M5SSnUyaRPIAMW0CgYBgVU7grDDzB60C/g1jZk/G +VKmYwposJjfTxsc1a0gLJvSE59MgXc04EOXFNr4a+oC3Bh2dn4SJ2Z9xd1fh8Bur +UwCLwVE3DBTwl2C/ogiN4C83/1L4d2DXlrPfInvloBYR+rIpUlFweDLNuve2pKvk +llU9YGeaXOiHnGoY8iKgsQKBgQDZKMOHtZYhHoZlsul0ylCGAEz5bRT0V8n7QJlw +12+TSjN1F4n6Npr+00Y9ov1SUh38GXQFiLq4RXZitYKu6wEJZCm6Q8YXd1jzgDUp +IyAEHNsrV7Y/fSSRPKd9kVvGp2r2Kr825aqQasg16zsERbKEdrBHmwPmrsVZhi7n +rlXw1QKBgQDBOyUJKQOgDE2u9EHybhCIbfowyIE22qn9a3WjQgfxFJ+aAL9Bg124 +fJIEzz43fJ91fe5lTOgyMF5TtU5ClAOPGtlWnXU0e5j3L4LjbcqzEbeyxvP3sn1z +dYkX7NdNQ5E6tcJZuJCGq0HxIAQeKPf3x9DRKzMnLply6BEzyuAC4g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/go-chef/chef/examples/search/search.go b/vendor/github.com/go-chef/chef/examples/search/search.go new file mode 100644 index 000000000..0f1e16ad7 --- /dev/null +++ b/vendor/github.com/go-chef/chef/examples/search/search.go @@ -0,0 +1,89 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + + "github.com/davecgh/go-spew/spew" + "github.com/go-chef/chef" +) + +func main() { + // read a client key + key, err := ioutil.ReadFile("key.pem") + if err != nil { + fmt.Println("Couldn't read key.pem:", err) + os.Exit(1) + } + + // build a client + client, err := chef.NewClient(&chef.Config{ + Name: "foo", + Key: string(key), + // goiardi is on port 4545 by default. chef-zero is 8889 + BaseURL: "http://localhost:4545", + }) + if err != nil { + fmt.Println("Issue setting up client:", err) + os.Exit(1) + } + + // List Indexes + indexes, err := client.Search.Indexes() + if err != nil { + log.Fatal("Couldn't list nodes: ", err) + } + + // dump the Index list in Json + jsonData, err := json.MarshalIndent(indexes, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + // build a seach query + query, err := client.Search.NewQuery("node", "name:*") + if err != nil { + log.Fatal("Error building query ", err) + } + + // Run the query + res, err := query.Do(client) + if err != nil { + log.Fatal("Error running query ", err) + } + + // <3 spew + spew.Dump(res) + + // dump out results back in json for fun + jsonData, err = json.MarshalIndent(res, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + // You can also use the service to run a query + res, err = client.Search.Exec("node", "name:*") + if err != nil { + log.Fatal("Error running Search.Exec() ", err) + } + + // dump out results back in json for fun + jsonData, err = json.MarshalIndent(res, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + + // Partial search + log.Print("Partial Search") + part := make(map[string]interface{}) + part["name"] = []string{"name"} + pres, err := client.Search.PartialExec("node", "*:*", part) + if err != nil { + log.Fatal("Error running Search.PartialExec()", err) + } + + jsonData, err = json.MarshalIndent(pres, "", "\t") + os.Stdout.Write(jsonData) + os.Stdout.WriteString("\n") + +} diff --git a/vendor/github.com/go-chef/chef/http.go b/vendor/github.com/go-chef/chef/http.go new file mode 100644 index 000000000..f2981a3fc --- /dev/null +++ b/vendor/github.com/go-chef/chef/http.go @@ -0,0 +1,306 @@ +package chef + +import ( + "bytes" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +// ChefVersion that we pretend to emulate +const ChefVersion = "11.12.0" + +// Body wraps io.Reader and adds methods for calculating hashes and detecting content +type Body struct { + io.Reader +} + +// AuthConfig representing a client and a private key used for encryption +// This is embedded in the Client type +type AuthConfig struct { + PrivateKey *rsa.PrivateKey + ClientName string +} + +// Client is vessel for public methods used against the chef-server +type Client struct { + Auth *AuthConfig + BaseURL *url.URL + client *http.Client + + ACLs *ACLService + Clients *ApiClientService + Cookbooks *CookbookService + DataBags *DataBagService + Environments *EnvironmentService + Nodes *NodeService + Roles *RoleService + Sandboxes *SandboxService + Search *SearchService +} + +// Config contains the configuration options for a chef client. This is Used primarily in the NewClient() constructor in order to setup a proper client object +type Config struct { + // This should be the user ID on the chef server + Name string + + // This is the plain text private Key for the user + Key string + + // BaseURL is the chef server URL used to connect too. Is using orgs you should include your org in the url + BaseURL string + + // When set to false (default) this will enable SSL Cert Verification. If you need to disable Cert Verification set to true + SkipSSL bool + + // Time to wait in seconds before giving up on a request to the server + Timeout time.Duration +} + +/* +An ErrorResponse reports one or more errors caused by an API request. +Thanks to https://github.com/google/go-github +*/ +type ErrorResponse struct { + Response *http.Response // HTTP response that caused this error +} + +// Buffer creates a byte.Buffer copy from a io.Reader resets read on reader to 0,0 +func (body *Body) Buffer() *bytes.Buffer { + var b bytes.Buffer + if body.Reader == nil { + return &b + } + + b.ReadFrom(body.Reader) + _, err := body.Reader.(io.Seeker).Seek(0, 0) + if err != nil { + log.Fatal(err) + } + return &b +} + +// Hash calculates the body content hash +func (body *Body) Hash() (h string) { + b := body.Buffer() + // empty buffs should return a empty string + if b.Len() == 0 { + h = HashStr("") + } + h = HashStr(b.String()) + return +} + +// ContentType returns the content-type string of Body as detected by http.DetectContentType() +func (body *Body) ContentType() string { + if json.Unmarshal(body.Buffer().Bytes(), &struct{}{}) == nil { + return "application/json" + } + return http.DetectContentType(body.Buffer().Bytes()) +} + +func (r *ErrorResponse) Error() string { + return fmt.Sprintf("%v %v: %d", + r.Response.Request.Method, r.Response.Request.URL, + r.Response.StatusCode) +} + +// NewClient is the client generator used to instantiate a client for talking to a chef-server +// It is a simple constructor for the Client struct intended as a easy interface for issuing +// signed requests +func NewClient(cfg *Config) (*Client, error) { + pk, err := PrivateKeyFromString([]byte(cfg.Key)) + if err != nil { + return nil, err + } + + baseUrl, _ := url.Parse(cfg.BaseURL) + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: cfg.SkipSSL}, + } + + c := &Client{ + Auth: &AuthConfig{ + PrivateKey: pk, + ClientName: cfg.Name, + }, + client: &http.Client{ + Transport: tr, + Timeout: cfg.Timeout * time.Second, + }, + BaseURL: baseUrl, + } + c.ACLs = &ACLService{client: c} + c.Clients = &ApiClientService{client: c} + c.Cookbooks = &CookbookService{client: c} + c.DataBags = &DataBagService{client: c} + c.Environments = &EnvironmentService{client: c} + c.Nodes = &NodeService{client: c} + c.Roles = &RoleService{client: c} + c.Sandboxes = &SandboxService{client: c} + c.Search = &SearchService{client: c} + return c, nil +} + +// magicRequestDecoder performs a request on an endpoint, and decodes the response into the passed in Type +func (c *Client) magicRequestDecoder(method, path string, body io.Reader, v interface{}) error { + req, err := c.NewRequest(method, path, body) + if err != nil { + return err + } + + debug("Request: %+v \n", req) + res, err := c.Do(req, v) + if res != nil { + defer res.Body.Close() + } + debug("Response: %+v \n", res) + if err != nil { + return err + } + return err +} + +// NewRequest returns a signed request suitable for the chef server +func (c *Client) NewRequest(method string, requestUrl string, body io.Reader) (*http.Request, error) { + relativeUrl, err := url.Parse(requestUrl) + if err != nil { + return nil, err + } + u := c.BaseURL.ResolveReference(relativeUrl) + + // NewRequest uses a new value object of body + req, err := http.NewRequest(method, u.String(), body) + if err != nil { + return nil, err + } + + // parse and encode Querystring Values + values := req.URL.Query() + req.URL.RawQuery = values.Encode() + debug("Encoded url %+v", u) + + myBody := &Body{body} + + if body != nil { + // Detect Content-type + req.Header.Set("Content-Type", myBody.ContentType()) + } + + // Calculate the body hash + req.Header.Set("X-Ops-Content-Hash", myBody.Hash()) + + // don't have to check this works, signRequest only emits error when signing hash is not valid, and we baked that in + c.Auth.SignRequest(req) + return req, nil +} + +// CheckResponse receives a pointer to a http.Response and generates an Error via unmarshalling +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 { + return nil + } + errorResponse := &ErrorResponse{Response: r} + data, err := ioutil.ReadAll(r.Body) + if err == nil && data != nil { + json.Unmarshal(data, errorResponse) + } + return errorResponse +} + +// Do is used either internally via our magic request shite or a user may use it +func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) { + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + + // BUG(fujin) tightly coupled + err = CheckResponse(res) // <-- + if err != nil { + return res, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + io.Copy(w, res.Body) + } else { + err = json.NewDecoder(res.Body).Decode(v) + if err != nil { + return res, err + } + } + } + return res, nil +} + +// SignRequest modifies headers of an http.Request +func (ac AuthConfig) SignRequest(request *http.Request) error { + // sanitize the path for the chef-server + // chef-server doesn't support '//' in the Hash Path. + var endpoint string + if request.URL.Path != "" { + endpoint = path.Clean(request.URL.Path) + request.URL.Path = endpoint + } else { + endpoint = request.URL.Path + } + + request.Header.Set("Method", request.Method) + request.Header.Set("Hashed Path", HashStr(endpoint)) + request.Header.Set("Accept", "application/json") + request.Header.Set("X-Chef-Version", ChefVersion) + request.Header.Set("X-Ops-Timestamp", time.Now().UTC().Format(time.RFC3339)) + request.Header.Set("X-Ops-UserId", ac.ClientName) + request.Header.Set("X-Ops-Sign", "algorithm=sha1;version=1.0") + + // To validate the signature it seems to be very particular + var content string + for _, key := range []string{"Method", "Hashed Path", "X-Ops-Content-Hash", "X-Ops-Timestamp", "X-Ops-UserId"} { + content += fmt.Sprintf("%s:%s\n", key, request.Header.Get(key)) + } + content = strings.TrimSuffix(content, "\n") + // generate signed string of headers + // Since we've gone through additional validation steps above, + // we shouldn't get an error at this point + signature, err := GenerateSignature(ac.PrivateKey, content) + if err != nil { + return err + } + + // TODO: THIS IS CHEF PROTOCOL SPECIFIC + // Signature is made up of n 60 length chunks + base64sig := Base64BlockEncode(signature, 60) + + // roll over the auth slice and add the apropriate header + for index, value := range base64sig { + request.Header.Set(fmt.Sprintf("X-Ops-Authorization-%d", index+1), string(value)) + } + + return nil +} + +// PrivateKeyFromString parses an RSA private key from a string +func PrivateKeyFromString(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block == nil { + return nil, fmt.Errorf("block size invalid for '%s'", string(key)) + } + rsaKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return rsaKey, nil +} diff --git a/vendor/github.com/go-chef/chef/http_test.go b/vendor/github.com/go-chef/chef/http_test.go new file mode 100644 index 000000000..cf9a4395d --- /dev/null +++ b/vendor/github.com/go-chef/chef/http_test.go @@ -0,0 +1,571 @@ +package chef + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + . "github.com/ctdk/goiardi/chefcrypto" + . "github.com/smartystreets/goconvey/convey" + "io" + "math/big" + "net/http" + "net/http/httptest" + "regexp" + "strconv" + "strings" + "testing" +) + +var ( + testRequiredHeaders = []string{ + "X-Ops-Timestamp", + "X-Ops-UserId", + "X-Ops-Sign", + "X-Ops-Content-Hash", + "X-Ops-Authorization-1", + } + + mux *http.ServeMux + server *httptest.Server + client *Client +) + +const ( + userid = "tester" + requestURL = "http://localhost:80" + // Generated from + // openssl genrsa -out privkey.pem 2048 + // perl -pe 's/\n/\\n/g' privkey.pem + privateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx12nDxxOwSPHRSJEDz67a0folBqElzlu2oGMiUTS+dqtj3FU +h5lJc1MjcprRVxcDVwhsSSo9948XEkk39IdblUCLohucqNMzOnIcdZn8zblN7Cnp +W03UwRM0iWX1HuwHnGvm6PKeqKGqplyIXYO0qlDWCzC+VaxFTwOUk31MfOHJQn4y +fTrfuE7h3FTElLBu065SFp3dPICIEmWCl9DadnxbnZ8ASxYQ9xG7hmZduDgjNW5l +3x6/EFkpym+//D6AbWDcVJ1ovCsJL3CfH/NZC3ekeJ/aEeLxP/vaCSH1VYC5VsYK +5Qg7SIa6Nth3+RZz1hYOoBJulEzwljznwoZYRQIDAQABAoIBADPQol+qAsnty5er +PTcdHcbXLJp5feZz1dzSeL0gdxja/erfEJIhg9aGUBs0I55X69VN6h7l7K8PsHZf +MzzJhUL4QJJETOYP5iuVhtIF0I+DTr5Hck/5nYcEv83KAvgjbiL4ZE486IF5awnL +2OE9HtJ5KfhEleNcX7MWgiIHGb8G1jCqu/tH0GI8Z4cNgUrXMbczGwfbN/5Wc0zo +Dtpe0Tec/Fd0DLFwRiAuheakPjlVWb7AGMDX4TyzCXfMpS1ul2jk6nGFk77uQozF +PQUawCRp+mVS4qecgq/WqfTZZbBlW2L18/kpafvsxG8kJ7OREtrb0SloZNFHEc2Q +70GbgKECgYEA6c/eOrI3Uour1gKezEBFmFKFH6YS/NZNpcSG5PcoqF6AVJwXg574 +Qy6RatC47e92be2TT1Oyplntj4vkZ3REv81yfz/tuXmtG0AylH7REbxubxAgYmUT +18wUAL4s3TST2AlK4R29KwBadwUAJeOLNW+Rc4xht1galsqQRb4pUzkCgYEA2kj2 +vUhKAB7QFCPST45/5q+AATut8WeHnI+t1UaiZoK41Jre8TwlYqUgcJ16Q0H6KIbJ +jlEZAu0IsJxjQxkD4oJgv8n5PFXdc14HcSQ512FmgCGNwtDY/AT7SQP3kOj0Rydg +N02uuRb/55NJ07Bh+yTQNGA+M5SSnUyaRPIAMW0CgYBgVU7grDDzB60C/g1jZk/G +VKmYwposJjfTxsc1a0gLJvSE59MgXc04EOXFNr4a+oC3Bh2dn4SJ2Z9xd1fh8Bur +UwCLwVE3DBTwl2C/ogiN4C83/1L4d2DXlrPfInvloBYR+rIpUlFweDLNuve2pKvk +llU9YGeaXOiHnGoY8iKgsQKBgQDZKMOHtZYhHoZlsul0ylCGAEz5bRT0V8n7QJlw +12+TSjN1F4n6Npr+00Y9ov1SUh38GXQFiLq4RXZitYKu6wEJZCm6Q8YXd1jzgDUp +IyAEHNsrV7Y/fSSRPKd9kVvGp2r2Kr825aqQasg16zsERbKEdrBHmwPmrsVZhi7n +rlXw1QKBgQDBOyUJKQOgDE2u9EHybhCIbfowyIE22qn9a3WjQgfxFJ+aAL9Bg124 +fJIEzz43fJ91fe5lTOgyMF5TtU5ClAOPGtlWnXU0e5j3L4LjbcqzEbeyxvP3sn1z +dYkX7NdNQ5E6tcJZuJCGq0HxIAQeKPf3x9DRKzMnLply6BEzyuAC4g== +-----END RSA PRIVATE KEY----- +` + // Generated from + // openssl rsa -in privkey.pem -pubout -out pubkey.pem + // perl -pe 's/\n/\\n/g' pubkey.pem + publicKey = ` +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx12nDxxOwSPHRSJEDz67 +a0folBqElzlu2oGMiUTS+dqtj3FUh5lJc1MjcprRVxcDVwhsSSo9948XEkk39Idb +lUCLohucqNMzOnIcdZn8zblN7CnpW03UwRM0iWX1HuwHnGvm6PKeqKGqplyIXYO0 +qlDWCzC+VaxFTwOUk31MfOHJQn4yfTrfuE7h3FTElLBu065SFp3dPICIEmWCl9Da +dnxbnZ8ASxYQ9xG7hmZduDgjNW5l3x6/EFkpym+//D6AbWDcVJ1ovCsJL3CfH/NZ +C3ekeJ/aEeLxP/vaCSH1VYC5VsYK5Qg7SIa6Nth3+RZz1hYOoBJulEzwljznwoZY +RQIDAQAB +-----END PUBLIC KEY----- +` + // Generated from + // openssl dsaparam -out dsaparam.pem 2048 + // openssl gendsa -out privkey.pem dsaparam.pem + // perl -pe 's/\n/\\n/g' privkey.pem + badPrivateKey = ` +-----BEGIN DSA PRIVATE KEY----- +MIIDVgIBAAKCAQEApv0SsaKRWyn0IrbI6i547c/gldLQ3vB5xoSuTkVOvmD3HfuE +EVPKMS+XKlhgHOJy677zYNKUOIR78vfDVr1M89w19NSic81UwGGaOkrjQWOkoHaA +BS4046AzYKWqHWQNn9dm7WdQlbMBcBv9u+J6EqlzstPwWVaRdbAzyPtwQZRF5WfC +OcrQr8XpXbKsPh55FzfvFpu4KEKTY+8ynLz9uDNW2iAxj9NtRlUHQNqKQvjQsr/8 +4pVrEBh+CnzNrmPXQIbyxV0y8WukAo3I3ZXK5nsUcJhFoVCRx4aBlp9W96mYZ7OE +dPCkFsoVhUNFo0jlJhMPODR1NXy77c4v1Kh6xwIhAJwFm6CQBOWJxZdGo2luqExE +acUG9Hkr2qd0yccgs2tFAoIBAQCQJCwASD7X9l7nZyZvJpXMe6YreGaP3VbbHCz8 +GHs1P5exOausfJXa9gRLx2qDW0sa1ZyFUDnd2Dt810tgAhY143lufNoV3a4IRHpS +Fm8jjDRMyBQ/BrLBBXgpwiZ9LHBuUSeoRKY0BdyRsULmcq2OaBq9J38NUblWSe2R +NjQ45X6SGgUdHy3CrQtLjCA9l8+VPg3l05IBbXIhVSllP5AUmMG4T9x6M7NHEoSr +c7ewKSJNvc1C8+G66Kfz8xcChKcKC2z1YzvxrlcDHF+BBLw1Ppp+yMBfhQDWIZfe +6tpiKEEyWoyi4GkzQ+vooFIriaaL+Nnggh+iJ7BEUByHBaHnAoIBAFUxSB3bpbbp +Vna0HN6b+svuTCFhYi9AcmI1dcyEFKycUvZjP/X07HvX2yrL8aGxMJgF6RzPob/F ++SZar3u9Fd8DUYLxis6/B5d/ih7GnfPdChrDOJM1nwlferTGHXd1TBDzugpAovCe +JAjXiPsGmcCi9RNyoGib/FgniT7IKA7s3yJAzYSeW3wtLToSNGFJHn+TzFDBuWV4 +KH70bpEV84JIzWo0ejKzgMBQ0Zrjcsm4lGBtzaBqGSvOrlIVFuSWFYUxrSTTxthQ +/JYz4ch8+HsQC/0HBuJ48yALDCVKsWq4Y21LRRJIOC25DfjwEYWWaKNGlDDsJA1m +Y5WF0OX+ABcCIEXhrzI1NddyFwLnfDCQ+sy6HT8/xLKXfaipd2rpn3gL +-----END DSA PRIVATE KEY----- +` +) + +// Gave up trying to implement this myself +// nopCloser came from https://groups.google.com/d/msg/golang-nuts/J-Y4LtdGNSw/wDSYbHWIKj0J +// yay for sharing +// nopCloser creates a io.ReadCloser to satisfy the request.Body input +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +func setup() { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + client, _ = NewClient(&Config{ + Name: userid, + Key: privateKey, + BaseURL: server.URL, + }) +} + +func teardown() { + server.Close() +} + +func createServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(checkHeader)) +} + +// publicKeyFromString parses an RSA public key from a string +func publicKeyFromString(key []byte) (*rsa.PublicKey, error) { + block, _ := pem.Decode(key) + if block == nil { + return nil, fmt.Errorf("block size invalid for '%s'", string(key)) + } + rsaKey, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + + return rsaKey.(*rsa.PublicKey), nil +} + +func makeAuthConfig() (*AuthConfig, error) { + pk, err := PrivateKeyFromString([]byte(privateKey)) + if err != nil { + return nil, err + } + + ac := &AuthConfig{ + PrivateKey: pk, + ClientName: userid, + } + return ac, nil +} + +func TestAuthConfig(t *testing.T) { + _, err := makeAuthConfig() + if err != nil { + t.Error("Failed to create AuthConfig struct from privatekeys and stuff", err) + } +} + +func TestBase64BlockEncodeNoLimit(t *testing.T) { + ac, _ := makeAuthConfig() + var content string + for _, key := range []string{"header1", "header2", "header3"} { + content += fmt.Sprintf("%s:blahblahblah\n", key) + } + content = strings.TrimSuffix(content, "\n") + + signature, _ := GenerateSignature(ac.PrivateKey, content) + Base64BlockEncode(signature, 0) +} + +func TestSignRequestBadSignature(t *testing.T) { + ac, err := makeAuthConfig() + request, err := http.NewRequest("GET", requestURL, nil) + ac.PrivateKey.PublicKey.N = big.NewInt(23234728432324) + + err = ac.SignRequest(request) + if err == nil { + t.Fatal("failed to generate failed signature") + } +} + +func TestSignRequestNoBody(t *testing.T) { + setup() + defer teardown() + ac, err := makeAuthConfig() + request, err := client.NewRequest("GET", requestURL, nil) + + err = ac.SignRequest(request) + if err != nil { + t.Fatal("failed to generate RequestHeaders") + } + count := 0 + for _, requiredHeader := range testRequiredHeaders { + for header := range request.Header { + if strings.ToLower(requiredHeader) == strings.ToLower(header) { + count++ + break + } + } + } + if count != len(testRequiredHeaders) { + t.Error("apiRequestHeaders didn't return all of testRequiredHeaders") + } +} + +func TestSignRequestBody(t *testing.T) { + ac, err := makeAuthConfig() + if err != nil { + t.Fatal(err) + } + setup() + defer teardown() + + // Gave up trying to implement this myself + // nopCloser came from https://groups.google.com/d/msg/golang-nuts/J-Y4LtdGNSw/wDSYbHWIKj0J + // yay for sharing + requestBody := strings.NewReader("somecoolbodytext") + request, err := client.NewRequest("GET", requestURL, requestBody) + + err = ac.SignRequest(request) + if err != nil { + t.Fatal("failed to generate RequestHeaders") + } + count := 0 + for _, requiredHeader := range testRequiredHeaders { + for header := range request.Header { + if strings.ToLower(requiredHeader) == strings.ToLower(header) { + count++ + break + } + } + } + if count != len(testRequiredHeaders) { + t.Error("apiRequestHeaders didn't return all of testRequiredHeaders") + } +} + +// <3 goiardi +// Test our headers as goiardi would +// https://github.com/ctdk/goiardi/blob/master/authentication/authentication.go +// func checkHeader(user_id string, r *http.Request) string { +func checkHeader(rw http.ResponseWriter, req *http.Request) { + user_id := req.Header.Get("X-OPS-USERID") + // Since we don't have a real client or user to check against, + // we'll just verify that input user = output user + // user, err := actor.GetReqUser(user_id) + // if err != nil { + if user_id != userid { + fmt.Fprintf(rw, "Failed to authenticate as %s", user_id) + } + + contentHash := req.Header.Get("X-OPS-CONTENT-HASH") + if contentHash == "" { + fmt.Fprintf(rw, "no content hash provided") + } + + authTimestamp := req.Header.Get("x-ops-timestamp") + if authTimestamp == "" { + fmt.Fprintf(rw, "no timestamp header provided") + } + // TODO: Will want to implement this later + // else { + // // check the time stamp w/ allowed slew + // tok, terr := checkTimeStamp(authTimestamp, config.Config.TimeSlewDur) + // if !tok { + // return terr + // } + // } + + // Eventually this may be put to some sort of use, but for now just + // make sure that it's there. Presumably eventually it would be used to + // use algorithms other than sha1 for hashing the body, or using a + // different version of the header signing algorithm. + xopssign := req.Header.Get("x-ops-sign") + var apiVer string + var hashChk []string + if xopssign == "" { + fmt.Fprintf(rw, "missing X-Ops-Sign header") + } else { + re := regexp.MustCompile(`version=(\d+\.\d+)`) + shaRe := regexp.MustCompile(`algorithm=(\w+)`) + if verChk := re.FindStringSubmatch(xopssign); verChk != nil { + apiVer = verChk[1] + if apiVer != "1.0" && apiVer != "1.1" { + fmt.Fprintf(rw, "Bad version number '%s' in X-Ops-Header", apiVer) + + } + } else { + fmt.Fprintf(rw, "malformed version in X-Ops-Header") + } + + // if algorithm is missing, it uses sha1. Of course, no other + // hashing algorithm is supported yet... + if hashChk = shaRe.FindStringSubmatch(xopssign); hashChk != nil { + if hashChk[1] != "sha1" { + fmt.Fprintf(rw, "Unsupported hashing algorithm '%s' specified in X-Ops-Header", hashChk[1]) + } + } + } + + signedHeaders, sherr := assembleSignedHeader(req) + if sherr != nil { + fmt.Fprintf(rw, sherr.Error()) + } + + _, err := HeaderDecrypt(publicKey, signedHeaders) + if err != nil { + fmt.Fprintf(rw, "unexpected header decryption error '%s'", err) + } +} + +func TestRequest(t *testing.T) { + ac, err := makeAuthConfig() + server := createServer() + defer server.Close() + setup() + defer teardown() + + request, err := client.NewRequest("GET", server.URL, nil) + + err = ac.SignRequest(request) + if err != nil { + t.Fatal("failed to generate RequestHeaders") + } + + client := &http.Client{} + response, err := client.Do(request) + if err != nil { + t.Error(err) + } + + if response.StatusCode != 200 { + t.Error("Non 200 return code: " + response.Status) + } + + buf := new(bytes.Buffer) + buf.ReadFrom(response.Body) + bodyStr := buf.String() + + if bodyStr != "" { + t.Error(bodyStr) + } + +} + +func TestRequestToEndpoint(t *testing.T) { + ac, err := makeAuthConfig() + server := createServer() + defer server.Close() + + requestBody := strings.NewReader("somecoolbodytext") + request, err := client.NewRequest("GET", server.URL+"/clients", requestBody) + + err = ac.SignRequest(request) + if err != nil { + t.Fatal("failed to generate RequestHeaders") + } + + client := &http.Client{} + response, err := client.Do(request) + if err != nil { + t.Error(err) + } + + if response.StatusCode != 200 { + t.Error("Non 200 return code: " + response.Status) + } + + buf := new(bytes.Buffer) + buf.ReadFrom(response.Body) + bodyStr := buf.String() + + if bodyStr != "" { + t.Error(bodyStr) + } +} + +// More Goiardi <3 +func assembleSignedHeader(r *http.Request) (string, error) { + sHeadStore := make(map[int]string) + authHeader := regexp.MustCompile(`(?i)^X-Ops-Authorization-(\d+)`) + for k := range r.Header { + if c := authHeader.FindStringSubmatch(k); c != nil { + /* Have to put it into a map first, then sort, in case + * the headers don't come out in the right order */ + // skipping this error because we shouldn't even be + // able to get here with something that won't be an + // integer. Famous last words, I'm sure. + i, _ := strconv.Atoi(c[1]) + sHeadStore[i] = r.Header.Get(k) + } + } + if len(sHeadStore) == 0 { + return "", errors.New("no authentication headers found") + } + + sH := make([]string, len(sHeadStore)) + sHlimit := len(sH) + for k, v := range sHeadStore { + if k > sHlimit { + return "", errors.New("malformed authentication headers") + } + sH[k-1] = v + } + signedHeaders := strings.Join(sH, "") + + return signedHeaders, nil +} + +func assembleHeaderToCheck(r *http.Request) string { + + // To validate the signature it seems to be very particular + // Would like to use this loop to generate the content + // But it causes validation to fail.. so we do it explicitly + + // authHeader := regexp.MustCompile(`(?i)^X-Ops-Authorization-(\d+)`) + // acceptEncoding := regexp.MustCompile(`(?i)^Accept-Encoding`) + // userAgent := regexp.MustCompile(`(?i)^User-Agent`) + // + // var content string + // for key, value := range r.Header { + // if !authHeader.MatchString(key) && !acceptEncoding.MatchString(key) && !userAgent.MatchString(key) { + // content += fmt.Sprintf("%s:%s\n", key, value) + // } + // } + // return content + var content string + content += fmt.Sprintf("%s:%s\n", "Method", r.Header.Get("Method")) + content += fmt.Sprintf("%s:%s\n", "Hashed Path", r.Header.Get("Hashed Path")) + content += fmt.Sprintf("%s:%s\n", "Accept", r.Header.Get("Accept")) + content += fmt.Sprintf("%s:%s\n", "X-Chef-Version", r.Header.Get("X-Chef-Version")) + content += fmt.Sprintf("%s:%s\n", "X-Ops-Timestamp", r.Header.Get("X-Ops-Timestamp")) + content += fmt.Sprintf("%s:%s\n", "X-Ops-Userid", r.Header.Get("X-Ops-Userid")) + content += fmt.Sprintf("%s:%s\n", "X-Ops-Sign", r.Header.Get("X-Ops-Sign")) + content += fmt.Sprintf("%s:%s\n", "X-Ops-Content-Hash", r.Header.Get("X-Ops-Content-Hash")) + return content +} + +func TestGenerateHash(t *testing.T) { + input, output := HashStr("hi"), "witfkXg0JglCjW9RssWvTAveakI=" + + Convey("correctly hashes a given input string", t, func() { + So(input, ShouldEqual, output) + }) +} + +// BUG(fujin): @bradbeam: this doesn't make sense to me. +func TestGenerateSignatureError(t *testing.T) { + ac, _ := makeAuthConfig() + + // BUG(fujin): what about the 'hi' string is not meant to be signable? + sig, err := GenerateSignature(ac.PrivateKey, "hi") + + Convey("sig should be empty?", t, func() { + So(sig, ShouldNotBeEmpty) + }) + + Convey("errors for an unknown reason to fujin", t, func() { + So(err, ShouldBeNil) + }) +} + +func TestRequestError(t *testing.T) { + ac, err := makeAuthConfig() + if err != nil { + t.Fatal(err) + } + + // Gave up trying to implement this myself + // nopCloser came from https://groups.google.com/d/msg/golang-nuts/J-Y4LtdGNSw/wDSYbHWIKj0J + // yay for sharing + requestBody := nopCloser{bytes.NewBufferString("somecoolbodytext")} + request, err := http.NewRequest("GET", requestURL, requestBody) + + err = ac.SignRequest(request) + + // BUG(fujin): This should actually error not bubble nil? + Convey("should not sign a request with missing required information", t, func() { + So(err, ShouldBeNil) + }) +} + +func TestNewClient(t *testing.T) { + cfg := &Config{Name: "testclient", Key: privateKey, SkipSSL: false} + c, err := NewClient(cfg) + if err != nil { + t.Error("Couldn't make a valid client...\n", err) + } + // simple validation on the created client + if c.Auth.ClientName != "testclient" { + t.Error("unexpected client name: ", c.Auth.ClientName) + } + + // Bad PEM should be an error + cfg = &Config{Name: "blah", Key: "not a key", SkipSSL: false} + c, err = NewClient(cfg) + if err == nil { + t.Error("Built a client from a bad key string") + } + + // Not a proper key should be an error + cfg = &Config{Name: "blah", Key: badPrivateKey, SkipSSL: false} + c, err = NewClient(cfg) + if err == nil { + t.Error("Built a client from a bad key string") + } +} + +func TestNewRequest(t *testing.T) { + var err error + server := createServer() + cfg := &Config{Name: "testclient", Key: privateKey, SkipSSL: false} + c, _ := NewClient(cfg) + defer server.Close() + + request, err := c.NewRequest("GET", server.URL, nil) + if err != nil { + t.Error("HRRRM! we tried to make a request but it failed :`( ", err) + } + + resp, err := c.Do(request, nil) + if resp.StatusCode != 200 { + t.Error("Non 200 return code: ", resp.Status) + } + + // This should fail because we've got an invalid URI + _, err = c.NewRequest("GET", "%gh&%ij", nil) + if err == nil { + t.Error("This terrible request thing should fail and it didn't") + } + + // This should fail because there is no TOODLES! method :D + request, err = c.NewRequest("TOODLES!", "", nil) + _, err = c.Do(request, nil) + if err == nil { + t.Error("This terrible request thing should fail and it didn't") + } +} + +func TestDo_badjson(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/hashrocket", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, " pigthrusters => 100% ") + }) + + stupidData := struct{}{} + request, err := client.NewRequest("GET", "hashrocket", nil) + _, err = client.Do(request, &stupidData) + if err == nil { + t.Error(err) + } + +} diff --git a/vendor/github.com/go-chef/chef/node.go b/vendor/github.com/go-chef/chef/node.go new file mode 100644 index 000000000..53ef1def8 --- /dev/null +++ b/vendor/github.com/go-chef/chef/node.go @@ -0,0 +1,87 @@ +package chef + +import "fmt" + +type NodeService struct { + client *Client +} + +// Node represents the native Go version of the deserialized Node type +type Node struct { + Name string `json:"name"` + Environment string `json:"chef_environment"` + ChefType string `json:"chef_type"` + AutomaticAttributes map[string]interface{} `json:"automatic"` + NormalAttributes map[string]interface{} `json:"normal"` + DefaultAttributes map[string]interface{} `json:"default"` + OverrideAttributes map[string]interface{} `json:"override"` + JsonClass string `json:"json_class"` + RunList []string `json:"run_list"` +} + +type NodeResult struct { + Uri string `json:"uri"` +} + +// NewNode is the Node constructor method +func NewNode(name string) (node Node) { + node = Node{ + Name: name, + Environment: "_default", + ChefType: "node", + JsonClass: "Chef::Node", + } + return +} + +// List lists the nodes in the Chef server. +// +// Chef API docs: http://docs.opscode.com/api_chef_server.html#id25 +func (e *NodeService) List() (data map[string]string, err error) { + err = e.client.magicRequestDecoder("GET", "nodes", nil, &data) + return +} + +// Get gets a node from the Chef server. +// +// Chef API docs: http://docs.opscode.com/api_chef_server.html#id28 +func (e *NodeService) Get(name string) (node Node, err error) { + url := fmt.Sprintf("nodes/%s", name) + err = e.client.magicRequestDecoder("GET", url, nil, &node) + return +} + +// Post creates a Node on the chef server +// +// Chef API docs: https://docs.getchef.com/api_chef_server.html#id39 +func (e *NodeService) Post(node Node) (data *NodeResult, err error) { + body, err := JSONReader(node) + if err != nil { + return + } + + err = e.client.magicRequestDecoder("POST", "nodes", body, &data) + return +} + +// Put updates a node on the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id42 +func (e *NodeService) Put(n Node) (node Node, err error) { + url := fmt.Sprintf("nodes/%s", n.Name) + body, err := JSONReader(n) + if err != nil { + return + } + + err = e.client.magicRequestDecoder("PUT", url, body, &node) + return +} + +// Delete removes a node on the Chef server +// +// Chef API docs: https://docs.getchef.com/api_chef_server.html#id40 +func (e *NodeService) Delete(name string) (err error) { + err = e.client.magicRequestDecoder("DELETE", "nodes/"+name, nil, nil) + return +} diff --git a/vendor/github.com/go-chef/chef/node_test.go b/vendor/github.com/go-chef/chef/node_test.go new file mode 100644 index 000000000..f372493e7 --- /dev/null +++ b/vendor/github.com/go-chef/chef/node_test.go @@ -0,0 +1,122 @@ +package chef + +import ( + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "os" + "reflect" + "testing" +) + +var ( + testNodeJSON = "test/node.json" +) + +func TestNodeFromJSONDecoder(t *testing.T) { + if file, err := os.Open(testNodeJSON); err != nil { + t.Error("unexpected error", err, "during os.Open on", testNodeJSON) + } else { + dec := json.NewDecoder(file) + var n Node + if err := dec.Decode(&n); err == io.EOF { + log.Println(n) + } else if err != nil { + log.Fatal(err) + } + } +} + +func TestNode_NewNode(t *testing.T) { + n := NewNode("testnode") + expect := Node{ + Name: "testnode", + Environment: "_default", + ChefType: "node", + JsonClass: "Chef::Node", + } + + if !reflect.DeepEqual(n, expect) { + t.Errorf("NewNode returned %+v, want %+v", n, expect) + } +} + +func TestNodesService_Methods(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/nodes", func(w http.ResponseWriter, r *http.Request) { + switch { + case r.Method == "GET": + fmt.Fprintf(w, `{"node1":"https://chef/nodes/node1", "node2":"https://chef/nodes/node2"}`) + case r.Method == "POST": + fmt.Fprintf(w, `{ "uri": "http://localhost:4545/nodes/node1" }`) + } + }) + + mux.HandleFunc("/nodes/node1", func(w http.ResponseWriter, r *http.Request) { + switch { + case r.Method == "GET" || r.Method == "PUT": + fmt.Fprintf(w, `{ + "name": "node1", + "json_class": "Chef::Node", + "chef_type": "node", + "chef_environment": "development" + }`) + case r.Method == "DELETE": + } + }) + + // Test list + nodes, err := client.Nodes.List() + if err != nil { + t.Errorf("Nodes.List returned error: %v", err) + } + + listWant := map[string]string{"node1": "https://chef/nodes/node1", "node2": "https://chef/nodes/node2"} + + if !reflect.DeepEqual(nodes, listWant) { + t.Errorf("Nodes.List returned %+v, want %+v", nodes, listWant) + } + + // test Get + node, err := client.Nodes.Get("node1") + if err != nil { + t.Errorf("Nodes.Get returned error: %v", err) + } + + wantNode := NewNode("node1") + wantNode.Environment = "development" + if !reflect.DeepEqual(node, wantNode) { + t.Errorf("Nodes.Get returned %+v, want %+v", node, wantNode) + } + + // test Post + res, err := client.Nodes.Post(wantNode) + if err != nil { + t.Errorf("Nodes.Post returned error: %s", err.Error()) + } + + postResult := &NodeResult{"http://localhost:4545/nodes/node1"} + if !reflect.DeepEqual(postResult, res) { + t.Errorf("Nodes.Post returned %+v, want %+v", res, postResult) + } + + // test Put + putRes, err := client.Nodes.Put(node) + if err != nil { + t.Errorf("Nodes.Put returned error", err) + } + + if !reflect.DeepEqual(putRes, node) { + t.Errorf("Nodes.Post returned %+v, want %+v", putRes, node) + } + + // test Delete + err = client.Nodes.Delete(node.Name) + if err != nil { + t.Errorf("Nodes.Delete returned error", err) + } +} diff --git a/vendor/github.com/go-chef/chef/reader.go b/vendor/github.com/go-chef/chef/reader.go new file mode 100644 index 000000000..0f6e085d9 --- /dev/null +++ b/vendor/github.com/go-chef/chef/reader.go @@ -0,0 +1,15 @@ +package chef + +import ( + "bytes" + "encoding/json" + "io" +) + +// JSONReader handles arbitrary types and synthesizes a streaming encoder for them. +func JSONReader(v interface{}) (r io.Reader, err error) { + buf := new(bytes.Buffer) + err = json.NewEncoder(buf).Encode(v) + r = bytes.NewReader(buf.Bytes()) + return +} diff --git a/vendor/github.com/go-chef/chef/reader_test.go b/vendor/github.com/go-chef/chef/reader_test.go new file mode 100644 index 000000000..986ed4631 --- /dev/null +++ b/vendor/github.com/go-chef/chef/reader_test.go @@ -0,0 +1,46 @@ +package chef + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +type TestEncoder struct { + Name string + Awesome []string + OtherStuff map[string]string +} + +func TestEncoderJSONReader(t *testing.T) { + f, err := ioutil.TempFile("test/", "reader") + if err != nil { + t.Error(err) + } + + defer f.Close() + defer os.Remove(f.Name()) + + tr := &TestEncoder{ + Name: "Test Reader", + Awesome: []string{"foo", "bar", "baz"}, + OtherStuff: map[string]string{ + "foo": "bar", + "baz": "banana", + }, + } + + // Generate body + body, err := JSONReader(tr) + if err != nil { + t.Error(err) + } + + t.Log(body) + + _, err = io.Copy(f, body) + if err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/go-chef/chef/release.go b/vendor/github.com/go-chef/chef/release.go new file mode 100644 index 000000000..34ac92db1 --- /dev/null +++ b/vendor/github.com/go-chef/chef/release.go @@ -0,0 +1,5 @@ +// +build !debug + +package chef + +func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/go-chef/chef/role.go b/vendor/github.com/go-chef/chef/role.go new file mode 100644 index 000000000..eaad0efe5 --- /dev/null +++ b/vendor/github.com/go-chef/chef/role.go @@ -0,0 +1,101 @@ +package chef + +import "fmt" + +type RoleService struct { + client *Client +} + +type RoleListResult map[string]string +type RoleCreateResult map[string]string + +// Role represents the native Go version of the deserialized Role type +type Role struct { + Name string `json:"name"` + ChefType string `json:"chef_type"` + Description string `json:"description"` + RunList RunList `json:"run_list"` + DefaultAttributes interface{} `json:"default_attributes,omitempty"` + OverrideAttributes interface{} `json:"override_attributes,omitempty"` + JsonClass string `json:"json_class,omitempty"` +} + +// String makes RoleListResult implement the string result +func (e RoleListResult) String() (out string) { + return strMapToStr(e) +} + +// String makes RoleCreateResult implement the string result +func (e RoleCreateResult) String() (out string) { + return strMapToStr(e) +} + +// List lists the roles in the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id31 +func (e *RoleService) List() (data *RoleListResult, err error) { + err = e.client.magicRequestDecoder("GET", "roles", nil, &data) + return +} + +// Create a new role in the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id32 +func (e *RoleService) Create(role *Role) (data *RoleCreateResult, err error) { + // err = e.client.magicRequestDecoder("POST", "roles", role, &data) + body, err := JSONReader(role) + if err != nil { + return + } + + // BUG(fujiN): This is now both a *response* decoder and handles upload.. gettin smelly + err = e.client.magicRequestDecoder( + "POST", + "roles", + body, + &data, + ) + + return +} + +// Delete a role from the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id33 + +// Get gets a role from the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id34 +func (e *RoleService) Get(name string) (data *Role, err error) { + path := fmt.Sprintf("roles/%s", name) + err = e.client.magicRequestDecoder("GET", path, nil, &data) + return +} + +// Update a role in the Chef server. +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id35 +func (e *RoleService) Put(role *Role) (data *Role, err error) { + path := fmt.Sprintf("roles/%s", role.Name) + // err = e.client.magicRequestDecoder("PUT", path, role, nil) + body, err := JSONReader(role) + if err != nil { + return + } + + err = e.client.magicRequestDecoder( + "PUT", + path, + body, + &data, + ) + return +} + +// Get a list of environments have have environment specific run-lists for the given role +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id36 + +// Get the environment-specific run-list for a role +// +// Chef API docs: http://docs.getchef.com/api_chef_server.html#id37 diff --git a/vendor/github.com/go-chef/chef/role_test.go b/vendor/github.com/go-chef/chef/role_test.go new file mode 100644 index 000000000..6a3dbf1f7 --- /dev/null +++ b/vendor/github.com/go-chef/chef/role_test.go @@ -0,0 +1,192 @@ +package chef + +import ( + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "os" + "reflect" + "testing" + . "github.com/smartystreets/goconvey/convey" +) + +var ( + testRoleJSON = "test/role.json" + // FML + testRole = &Role{ + Name: "test", + ChefType: "role", + Description: "Test Role", + RunList: []string{"recipe[foo]", "recipe[baz]", "role[banana]"}, + JsonClass: "Chef::Role", + DefaultAttributes: struct{}{}, + OverrideAttributes: struct{}{}, + } +) + +func TestRoleName(t *testing.T) { + // BUG(spheromak): Pull these constructors out into a Convey Decorator + n1 := testRole + name := n1.Name + + Convey("Role name is 'test'", t, func() { + So(name, ShouldEqual, "test") + }) +} + +// BUG(fujin): re-do with goconvey +func TestRoleFromJSONDecoder(t *testing.T) { + if file, err := os.Open(testRoleJSON); err != nil { + t.Error("unexpected error", err, "during os.Open on", testRoleJSON) + } else { + dec := json.NewDecoder(file) + var n Role + if err := dec.Decode(&n); err == io.EOF { + log.Println(n) + } else if err != nil { + log.Fatal(err) + } + } +} + +func TestRolesService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/roles", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"foo":"http://localhost:4000/roles/foo", "webserver":"http://localhost:4000/roles/webserver"}`) + }) + + roles, err := client.Roles.List() + if err != nil { + t.Errorf("Roles.List returned error: %v", err) + } + + want := &RoleListResult{"foo": "http://localhost:4000/roles/foo", "webserver": "http://localhost:4000/roles/webserver"} + + if !reflect.DeepEqual(roles, want) { + t.Errorf("Roles.List returned %+v, want %+v", roles, want) + } +} + +func TestRolesService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/roles/webserver", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "name": "webserver", + "chef_type": "role", + "json_class": "Chef::Role", + "default_attributes": "", + "description": "A webserver", + "run_list": [ + "recipe[unicorn]", + "recipe[apache2]" + ], + "override_attributes": "" + } + `) + }) + + role, err := client.Roles.Get("webserver") + if err != nil { + t.Errorf("Roles.Get returned error: %v", err) + } + + want := &Role{ + Name: "webserver", + ChefType: "role", + JsonClass: "Chef::Role", + DefaultAttributes: "", + Description: "A webserver", + RunList: []string{"recipe[unicorn]", "recipe[apache2]"}, + OverrideAttributes: "", + } + + if !reflect.DeepEqual(role, want) { + t.Errorf("Roles.Get returned %+v, want %+v", role, want) + } +} + +func TestRolesService_Create(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/roles", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ "uri": "http://localhost:4000/roles/webserver" }`) + }) + + role := &Role{ + Name: "webserver", + ChefType: "role", + JsonClass: "Chef::Role", + DefaultAttributes: "", + Description: "A webserver", + RunList: []string{"recipe[unicorn]", "recipe[apache2]"}, + OverrideAttributes: "", + } + + uri, err := client.Roles.Create(role) + if err != nil { + t.Errorf("Roles.Create returned error: %v", err) + } + + want := &RoleCreateResult{"uri": "http://localhost:4000/roles/webserver"} + + if !reflect.DeepEqual(uri, want) { + t.Errorf("Roles.Create returned %+v, want %+v", uri, want) + } +} + +func TestRolesService_Put(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/roles/webserver", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "name": "webserver", + "chef_type": "role", + "json_class": "Chef::Role", + "description": "A webserver", + "run_list": [ + "recipe[apache2]" + ] + }`) + }) + + role := &Role{ + Name: "webserver", + ChefType: "role", + JsonClass: "Chef::Role", + Description: "A webserver", + RunList: []string{"recipe[apache2]"}, + } + + updatedRole, err := client.Roles.Put(role) + if err != nil { + t.Errorf("Roles.Put returned error: %v", err) + } + + if !reflect.DeepEqual(updatedRole, role) { + t.Errorf("Roles.Put returned %+v, want %+v", updatedRole, role) + } +} + +func TestRolesService_RoleListResultString(t *testing.T) { + r := &RoleListResult{"foo": "http://localhost:4000/roles/foo"} + want := "foo => http://localhost:4000/roles/foo\n" + if r.String() != want { + t.Errorf("RoleListResult.String returned %+v, want %+v", r.String(), want) + } +} + +func TestRolesService_RoleCreateResultString(t *testing.T) { + r := &RoleCreateResult{"uri": "http://localhost:4000/roles/webserver"} + want := "uri => http://localhost:4000/roles/webserver\n" + if r.String() != want { + t.Errorf("RoleCreateResult.String returned %+v, want %+v", r.String(), want) + } +} diff --git a/vendor/github.com/go-chef/chef/run_list.go b/vendor/github.com/go-chef/chef/run_list.go new file mode 100644 index 000000000..ac768fad6 --- /dev/null +++ b/vendor/github.com/go-chef/chef/run_list.go @@ -0,0 +1,3 @@ +package chef + +type RunList []string diff --git a/vendor/github.com/go-chef/chef/run_list_test.go b/vendor/github.com/go-chef/chef/run_list_test.go new file mode 100644 index 000000000..fc04ee135 --- /dev/null +++ b/vendor/github.com/go-chef/chef/run_list_test.go @@ -0,0 +1,28 @@ +package chef + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +var ( + rl = RunList{"recipe[foo]", "recipe[baz]", "role[banana]"} +) + +func TestNodeRunList(t *testing.T) { + Convey("Node.RunList() should be a RunList", t, func() { + So(rl, ShouldHaveSameTypeAs, RunList{}) + }) + + Convey("Node.RunList() should be populated", t, func() { + So(rl, ShouldContain, "recipe[foo]") + So(rl, ShouldContain, "recipe[baz]") + So(rl, ShouldContain, "role[banana]") + }) + + rl = RunList{} + Convey("Empty RunList should be valid", t, func() { + So(rl, ShouldBeEmpty) + }) + +} diff --git a/vendor/github.com/go-chef/chef/sandbox.go b/vendor/github.com/go-chef/chef/sandbox.go new file mode 100644 index 000000000..43d48ecd7 --- /dev/null +++ b/vendor/github.com/go-chef/chef/sandbox.go @@ -0,0 +1,70 @@ +package chef + +import ( + "fmt" + "time" +) + +// SandboxService is the chef-client Sandbox service used as the entrypoint and caller for Sandbox methods +type SandboxService struct { + client *Client +} + +// SandboxRequest is the desired chef-api structure for a Post body +type SandboxRequest struct { + Checksums map[string]interface{} `json:"checksums"` +} + +// SandboxPostResponse is the struct returned from the chef-server for Post Requests to /sandbox +type SandboxPostResponse struct { + ID string `json:"sandbox_id"` + Uri string `json:"uri"` + Checksums map[string]SandboxItem +} + +// A SandbooxItem is embeddedinto the response from the chef-server and the actual sandbox It is the Url and state for a specific Item. +type SandboxItem struct { + Url string `json:"url"` + Upload bool `json:"needs_upload"` +} + +// Sandbox Is the structure of an actul sandbox that has been created and returned by the final PUT to the sandbox ID +type Sandbox struct { + ID string `json:"guid"` + Name string `json:"name"` + CreationTime time.Time `json:"create_time"` + Completed bool `json:"is_completed"` + Checksums []string +} + +// Post creates a new sandbox on the chef-server. Deviates from the Chef-server api in that it takes a []string of sums for the sandbox instead of the IMO rediculous hash of nulls that the API wants. We convert it to the right structure under the hood for the chef-server api. +// http://docs.getchef.com/api_chef_server.html#id38 +func (s SandboxService) Post(sums []string) (data SandboxPostResponse, err error) { + smap := make(map[string]interface{}) + for _, hashstr := range sums { + smap[hashstr] = nil + } + sboxReq := SandboxRequest{Checksums: smap} + + body, err := JSONReader(sboxReq) + if err != nil { + return + } + + err = s.client.magicRequestDecoder("POST", "/sandboxes", body, &data) + return +} + +// Put is used to commit a sandbox ID to the chef server. To singal that the sandox you have Posted is now uploaded. +func (s SandboxService) Put(id string) (box Sandbox, err error) { + answer := make(map[string]bool) + answer["is_completed"] = true + body, err := JSONReader(answer) + + if id == "" { + return box, fmt.Errorf("must supply sandbox id to PUT request.") + } + + err = s.client.magicRequestDecoder("PUT", "/sandboxes/"+id, body, &box) + return +} diff --git a/vendor/github.com/go-chef/chef/sandbox_test.go b/vendor/github.com/go-chef/chef/sandbox_test.go new file mode 100644 index 000000000..8af59da3b --- /dev/null +++ b/vendor/github.com/go-chef/chef/sandbox_test.go @@ -0,0 +1,96 @@ +package chef + +import ( + "crypto/md5" + "crypto/rand" + "fmt" + "net/http" + _ "reflect" + "testing" + . "github.com/smartystreets/goconvey/convey" +) + +// generate random data for sandbox +func random_data(size int) (b []byte) { + b = make([]byte, size) + rand.Read(b) + return +} + +// mux.HandleFunc("/sandboxes/f1c560ccb472448e9cfb31ff98134247", func(w http.ResponseWriter, r *http.Request) { }) +func TestSandboxesPost(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/sandboxes", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "sandbox_id": "f1c560ccb472448e9cfb31ff98134247", + "uri": "http://trendy.local:4545/sandboxes/f1c560ccb472448e9cfb31ff98134247", + "Checksums": { + "4bd9946774fff1fb53745c645e447c9d20a14cac410b9eea037299247e70aa1e": { + "url": "http://trendy.local:4545/file_store/4bd9946774fff1fb53745c645e447c9d20a14cac410b9eea037299247e70aa1e", + "needs_upload": true + }, + "548c6928e3f5a800a0e9cc146647a31a2353c42950a611cfca646819cdaa54fa": { + "url": "http://trendy.local:4545/file_store/548c6928e3f5a800a0e9cc146647a31a2353c42950a611cfca646819cdaa54fa", + "needs_upload": true + }}}`) + }) + + // create junk files and sums + files := make(map[string][]byte) + // slice of strings for holding our hashes + sums := make([]string, 10) + for i := 0; i <= 10; i++ { + data := random_data(1024) + hashstr := fmt.Sprintf("%x", md5.Sum(data)) + files[hashstr] = data + sums = append(sums, hashstr) + } + + // post the new sums/files to the sandbox + _, err := client.Sandboxes.Post(sums) + if err != nil { + t.Errorf("Snadbox Post error making request: ", err) + } +} + +func TestSandboxesPut(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/sandboxes/f1c560ccb472448e9cfb31ff98134247", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "guid": "123", + "name": "123", + "CreateionTime": "2014-08-23 18:13:37", + "is_completed": true, + "uri": "https://127.0.0.1/sandboxes/f1c560ccb472448e9cfb31ff98134247", + "checksums": [ + "3124216defe5849089a577ffefb0bb05", + "e06ddfa07ca97c80c368d08e189b928a", + "eb65444f8adeb11c56cf0df201a07cb4" + ] + }`) + }) + + sandbox, err := client.Sandboxes.Put("f1c560ccb472448e9cfb31ff98134247") + if err != nil { + t.Errorf("Snadbox Put error making request: ", err) + } + + expected := Sandbox{ + ID: "123", + Name: "123", + Completed: true, + Checksums: []string{ + "3124216defe5849089a577ffefb0bb05", + "e06ddfa07ca97c80c368d08e189b928a", + "eb65444f8adeb11c56cf0df201a07cb4", + }, + } + + Convey("Sandbox Equality", t, func() { + So(sandbox, ShouldResemble, expected) + }) +} diff --git a/vendor/github.com/go-chef/chef/search.go b/vendor/github.com/go-chef/chef/search.go new file mode 100644 index 000000000..45471a7a8 --- /dev/null +++ b/vendor/github.com/go-chef/chef/search.go @@ -0,0 +1,148 @@ +package chef + +import ( + "errors" + "fmt" + "strings" +) + +type SearchService struct { + client *Client +} + +// SearchQuery Is the struct for holding a query request +type SearchQuery struct { + // The index you want to search + Index string + + // The query you want to execute. This is the 'chef' query ex: 'chef_environment:prod' + Query string + + // Sort order you want the search results returned + SortBy string + + // Starting position for search + Start int + + // Number of rows to return + Rows int +} + +// String implements the Stringer Interface for the SearchQuery +func (q SearchQuery) String() string { + return fmt.Sprintf("%s?q=%s&rows=%d&sort=%s&start=%d", q.Index, q.Query, q.Rows, q.SortBy, q.Start) +} + +// SearchResult will return a slice of interface{} of chef-like objects (roles/nodes/etc) +type SearchResult struct { + Total int + Start int + Rows []interface{} +} + +// Do will execute the search query on the client +func (q SearchQuery) Do(client *Client) (res SearchResult, err error) { + fullUrl := fmt.Sprintf("search/%s", q) + err = client.magicRequestDecoder("GET", fullUrl, nil, &res) + return +} + +// DoPartial will execute the search query on the client with partal mapping +func (q SearchQuery) DoPartial(client *Client, params map[string]interface{}) (res SearchResult, err error) { + fullUrl := fmt.Sprintf("search/%s", q) + + body, err := JSONReader(params) + if err != nil { + debug("Problem encoding params for body", err.Error()) + return + } + + err = client.magicRequestDecoder("POST", fullUrl, body, &res) + return +} + +// NewSearch is a constructor for a SearchQuery struct. This is used by other search service methods to perform search requests on the server +func (e SearchService) NewQuery(idx, statement string) (query SearchQuery, err error) { + // validate statement + if !strings.Contains(statement, ":") { + err = errors.New("statement is malformed") + return + } + + query = SearchQuery{ + Index: idx, + Query: statement, + // These are the defaults in chef: https://github.com/opscode/chef/blob/master/lib/chef/search/query.rb#L102-L105 + SortBy: "X_CHEF_id_CHEF_X asc", + Start: 0, + Rows: 1000, + } + + return +} + +// Exec runs the query on the index passed in. This is a helper method. If you want more controll over the query use NewQuery and its Do() method. +// BUG(spheromak): Should we use exec or SearchQuery.Do() or have both ? +func (e SearchService) Exec(idx, statement string) (res SearchResult, err error) { + // Copy-paste here till We decide which way to go with exec vs Do + if !strings.Contains(statement, ":") { + err = errors.New("statement is malformed") + return + } + + query := SearchQuery{ + Index: idx, + Query: statement, + // These are the defaults in chef: https://github.com/opscode/chef/blob/master/lib/chef/search/query.rb#L102-L105 + SortBy: "X_CHEF_id_CHEF_X asc", + Start: 0, + Rows: 1000, + } + + res, err = query.Do(e.client) + start := res.Start + inc := 1000 + total := res.Total + + for start + inc <= total { + query.Start = query.Start + 1000 + start = query.Start + ares, err := query.Do(e.client) + if err != nil { + return res, err + } + res.Rows = append(res.Rows, ares.Rows...) + } + return +} + +// PartialExec Executes a partial search based on passed in params and the query. +func (e SearchService) PartialExec(idx, statement string, params map[string]interface{}) (res SearchResult, err error) { + query := SearchQuery{ + Index: idx, + Query: statement, + // These are the defaults in chef: https://github.com/opscode/chef/blob/master/lib/chef/search/query.rb#L102-L105 + SortBy: "X_CHEF_id_CHEF_X asc", + Start: 0, + Rows: 1000, + } + + fullUrl := fmt.Sprintf("search/%s", query) + + body, err := JSONReader(params) + if err != nil { + debug("Problem encoding params for body") + return + } + + err = e.client.magicRequestDecoder("POST", fullUrl, body, &res) + return +} + +// List lists the nodes in the Chef server. +// +// Chef API docs: http://docs.opscode.com/api_chef_server.html#id25 +func (e SearchService) Indexes() (data map[string]string, err error) { + err = e.client.magicRequestDecoder("GET", "search", nil, &data) + return +} diff --git a/vendor/github.com/go-chef/chef/search_test.go b/vendor/github.com/go-chef/chef/search_test.go new file mode 100644 index 000000000..84b208157 --- /dev/null +++ b/vendor/github.com/go-chef/chef/search_test.go @@ -0,0 +1,89 @@ +package chef + +import ( + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestSearch_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "node": "http://localhost:4000/search/node", + "role": "http://localhost:4000/search/role", + "client": "http://localhost:4000/search/client", + "users": "http://localhost:4000/search/users" + }`) + }) + + indexes, err := client.Search.Indexes() + if err != nil { + t.Errorf("Search.Get returned error: %+v", err) + } + wantedIdx := map[string]string{ + "node": "http://localhost:4000/search/node", + "role": "http://localhost:4000/search/role", + "client": "http://localhost:4000/search/client", + "users": "http://localhost:4000/search/users", + } + if !reflect.DeepEqual(indexes, wantedIdx) { + t.Errorf("Search.Get returned %+v, want %+v", indexes, wantedIdx) + } +} + +func TestSearch_ExecDo(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search/nodes", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{ + "total": 1, + "start": 0, + "rows": [ + { + "overrides": {"hardware_type": "laptop"}, + "name": "latte", + "chef_type": "node", + "json_class": "Chef::Node", + "attributes": {"hardware_type": "laptop"}, + "run_list": ["recipe[unicorn]"], + "defaults": {} + } + ] + }`) + }) + + // test the fail case + _, err := client.Search.NewQuery("foo", "failsauce") + if err == nil { + t.Errorf("Bad query wasn't caught") + } + + // test the fail case + _, err = client.Search.Exec("foo", "failsauce") + if err == nil { + t.Errorf("Bad query wasn't caught") + } + + // test the positive case + query, err := client.Search.NewQuery("nodes", "name:latte") + if err != nil { + t.Errorf("failed to create query") + } + + // for now we aren't testing the result.. + _, err = query.Do(client) + if err != nil { + t.Errorf("Search.Exec failed", err) + } + + _, err = client.Search.Exec("nodes", "name:latte") + if err != nil { + t.Errorf("Search.Exec failed", err) + } + +} diff --git a/vendor/github.com/go-chef/chef/test/client.json b/vendor/github.com/go-chef/chef/test/client.json new file mode 100644 index 000000000..1129917d1 --- /dev/null +++ b/vendor/github.com/go-chef/chef/test/client.json @@ -0,0 +1,7 @@ +{ + "clientname": "client_name", + "orgname": "org_name", + "validator": false, + "certificate": "-----BEGIN CERTIFICATE-----\nMIIDOjCCAqOgAwIBAgIE47eOmDANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMC\nVVMxEzARBgNVBAgMCldhc2hpbmd0b24xfrsgbg@VBAcMB1NlYXR0bGUxFjAUBgNV\nBAoMDU9wc2NvZGUsIEluYy4xHDAaBgNVBAsME0NlcnRpZmljYXRlIFNlcnZpY2Ux\nMjAwBgNVBAMMKW9wc2NvZGUuY29tL2VtYWlsQWRkcmVzcz1hdXRoQG9wc2NvZGUu\nY29tMCAXDTEyMDkwNzE4MDUwOFo1234g5ghdcDAzMTgwNTA4WjCBnTEQMA4GA1UE\nBxMHU2VhdHRsZTETMBEGA1UECBMKV2FzaGluZ3RvbjELMAkGA1UEBhMCVVMxHDAa\nBgNVBAsTE0NlcnRpZmljYXRlIFNlcnZpY2UxFjAUBgNVBAoTDU9wc2NvZGUsIElu\nYy4xMTAvBgNVBAMUKFVSSTpodHRwOi8vb3BzY29kZS5jb20vR1VJRFMvY2xpZW50\nX2d1aWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJhwXHuq96iHpv\nSDkDdpSAWa3O81o1ZkG1/H1GmRKc3NDp9KfL/eg/itJeVT5BJ2DXenhbP0M8BHQG\narObwx6vXm+H/Q2sdfbbhdFt5y6wQZI4Dqnv7Hk/H95jUb+MJoXpJkB7zYSqrrei\nLBjjPg2p+M9+EUklelLeXDJDdj1xL5RNdDfv3yxyESTdYYVboQwgqbP3yGy7ITQc\nvg11qmDzg7L17q6uQevVap6aECuGS7//CcDcKJwuBYQukJi7QFs5V4ayq6GlgfT/h\nb/yjEAcfcqvBBDOO8K34fExnWTO7fdghh4btddPayHbud8dW0m3KdzhUaIh9BlGz\nxFTdI8/lAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAZlvDZWmw0u0VS4mapSxuKPAz\n4qhB8yttbZj8H9hUNfMdHMpyE3jJGFcLokRXdfxOsFNoPUj0hn+5BKTqtmdd6T1u\niDVoFfJ3nVPfGJMdPXZyi62pNCwPe/va13dLqTG92AGNDjjOxmMma9/OQ8Ty0eBu\nHWOxaK6VpV8pqZPq/Fg=\n-----END CERTIFICATE-----\n", + "name": "node_name" +} \ No newline at end of file diff --git a/vendor/github.com/go-chef/chef/test/cookbooks_response.json b/vendor/github.com/go-chef/chef/test/cookbooks_response.json new file mode 100644 index 000000000..a9ced6898 --- /dev/null +++ b/vendor/github.com/go-chef/chef/test/cookbooks_response.json @@ -0,0 +1,20 @@ +{ + "apache2": { + "url": "http://localhost:4000/cookbooks/apache2", + "versions": [ + {"url": "http://localhost:4000/cookbooks/apache2/5.1.0", + "version": "5.1.0"}, + {"url": "http://localhost:4000/cookbooks/apache2/4.2.0", + "version": "4.2.0"} + ] + }, + "nginx": { + "url": "http://localhost:4000/cookbooks/nginx", + "versions": [ + {"url": "http://localhost:4000/cookbooks/nginx/1.0.0", + "version": "1.0.0"}, + {"url": "http://localhost:4000/cookbooks/nginx/0.3.0", + "version": "0.3.0"} + ] + } +} diff --git a/vendor/github.com/go-chef/chef/test/environment.json b/vendor/github.com/go-chef/chef/test/environment.json new file mode 100644 index 000000000..939bb60a8 --- /dev/null +++ b/vendor/github.com/go-chef/chef/test/environment.json @@ -0,0 +1,28 @@ +{ + "name": "testenvironment", + "json_class": "Chef::Environment", + "default_attributes": { + "openssh": { + "server": { + "permit_root_login": "no", + "max_auth_tries": "3", + "password_authentication": "no", + "x11_forwarding": "yes", + "subsystem": "sftp /usr/lib/openssh/sftp-server" + }, + "client": { + "forward_x11": "yes", + "forward_agent": "yes" + } + } + }, + "chef_type": "environment", + "override_attributes": {}, + "cookbook_versions": { + "couchdb": "= 11.0.0", + "my_rails_app": "~> 1.2.0" + }, + "cookbook": { + "couchdb": "= 11.0.0" + } +} diff --git a/vendor/github.com/go-chef/chef/test/node.json b/vendor/github.com/go-chef/chef/test/node.json new file mode 100644 index 000000000..1456f5b12 --- /dev/null +++ b/vendor/github.com/go-chef/chef/test/node.json @@ -0,0 +1,26 @@ +{ + "name": "testnode", + "chef_environment": "_default", + "json_class": "Chef::Node", + "automatic": {}, + "normal": { + "tags": [], + "openssh": { + "server": { + "permit_root_login": "no", + "max_auth_tries": "3", + "password_authentication": "no", + "x11_forwarding": "yes", + "subsystem": "sftp /usr/lib/openssh/sftp-server" + }, + "client": { + "forward_x11": "yes", + "forward_agent": "yes" + } + } + }, + "chef_type": "node", + "default": {}, + "override": {}, + "run_list": [] +} diff --git a/vendor/github.com/go-chef/chef/test/role.json b/vendor/github.com/go-chef/chef/test/role.json new file mode 100644 index 000000000..d89e1f6b4 --- /dev/null +++ b/vendor/github.com/go-chef/chef/test/role.json @@ -0,0 +1,23 @@ +{ + "name": "testrole", + "chef_environment": "_default", + "json_class": "Chef::Role", + "default_attributes": { + "openssh": { + "server": { + "permit_root_login": "no", + "max_auth_tries": "3", + "password_authentication": "no", + "x11_forwarding": "yes", + "subsystem": "sftp /usr/lib/openssh/sftp-server" + }, + "client": { + "forward_x11": "yes", + "forward_agent": "yes" + } + } + }, + "chef_type": "role", + "override": {}, + "run_list": [] +} diff --git a/vendor/github.com/go-chef/chef/wercker.yml b/vendor/github.com/go-chef/chef/wercker.yml new file mode 100644 index 000000000..f688951a2 --- /dev/null +++ b/vendor/github.com/go-chef/chef/wercker.yml @@ -0,0 +1,56 @@ +box: wercker/golang + +build: + steps: + - script: + name: Populate cache + code: | + if test -d "$WERCKER_CACHE_DIR/go-pkg-cache"; then rsync -avzv --exclude "$WERCKER_SOURCE_DIR" "$WERCKER_CACHE_DIR/go-pkg-cache/" "$GOPATH/" ; fi + + # this forces the GOPATH to include the checked out source instead of some random workdir + - script: + name: FixDirs + code: | + if [ ! -d $GOPATH/src/github.com/go-chef ]; then mkdir -p $GOPATH/src/github.com/go-chef ; fi + ln -sf $WERCKER_SOURCE_DIR $GOPATH/src/github.com/go-chef/chef + ls -l $GOPATH/src/github.com/go-chef + + - script: + name: Get dependencies + code: | + go get -u github.com/ctdk/goiardi/chefcrypto + go get -u github.com/ctdk/goiardi/authentication + go get -u github.com/davecgh/go-spew/spew + go get -u github.com/smartystreets/goconvey/convey + go get -u github.com/axw/gocov/gocov + go get -u github.com/matm/gocov-html + go get -u github.com/mattn/goveralls + + # Using the gocov tool to test the exact package we want to test from GOPATH + - script: + name: Test + code: | + gocov test github.com/go-chef/chef > coverage.json + + - script: + name: Coverage + code: | + gocov report coverage.json + gocov-html coverage.json > $WERCKER_REPORT_ARTIFACTS_DIR/coverage.html + + - script: + name: Coveralls.io + code: | + goveralls -service='wercker.com' -repotoken=$COVERALLS_TOKEN -gocovdata=coverage.json + + - script: + name: Store cache + code: | + rsync -avzv --exclude "$WERCKER_SOURCE_DIR" "$GOPATH/" "$WERCKER_CACHE_DIR/go-pkg-cache/" + + after-steps: + - wouter/irc-notify: + server: irc.freenode.net + port: 6667 + nickname: wercker + channel: go-chef diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 000000000..7adca9439 --- /dev/null +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000..c52f63a47 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,590 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +To auto-split value into slice: + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 000000000..ce6d8eb45 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,577 @@ +本包æ供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支æŒè¦†ç›–加载多个数æ®æºï¼ˆ`[]byte` 或文件) +- 支æŒé€’归读å–键值 +- 支æŒè¯»å–父å­åˆ†åŒº +- 支æŒè¯»å–自增键å +- 支æŒè¯»å–多行的键值 +- 支æŒå¤§é‡è¾…助方法 +- 支æŒåœ¨è¯»å–时直接转æ¢ä¸º Go 语言类型 +- 支æŒè¯»å–å’Œ **写入** 分区和键的注释 +- è½»æ¾æ“作分区ã€é”®å€¼å’Œæ³¨é‡Š +- 在ä¿å­˜æ–‡ä»¶æ—¶åˆ†åŒºå’Œé”®å€¼ä¼šä¿æŒåŽŸæœ‰çš„é¡ºåº + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +### 测试安装 + +如果您想è¦åœ¨è‡ªå·±çš„机器上è¿è¡Œæµ‹è¯•ï¼Œè¯·ä½¿ç”¨ `-t` 标记: + + go get -t gopkg.in/ini.v1 + +## 开始使用 + +### 从数æ®æºåŠ è½½ + +一个 **æ•°æ®æº** å¯ä»¥æ˜¯ `[]byte` 类型的原始数æ®ï¼Œæˆ– `string` 类型的文件路径。您å¯ä»¥åŠ è½½ **ä»»æ„多个** æ•°æ®æºã€‚如果您传递其它类型的数æ®æºï¼Œåˆ™ä¼šç›´æŽ¥è¿”回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需è¦åŠ è½½å“ªäº›æ•°æ®æºæ—¶ï¼Œä»å¯ä»¥ä½¿ç”¨ **Append()** 在需è¦çš„时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### æ“作分区(Section) + +获å–指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想è¦èŽ·å–默认分区,则å¯ä»¥ç”¨ç©ºå­—符串代替分区å: + +```go +section, err := cfg.GetSection("") +``` + +当您éžå¸¸ç¡®å®šæŸä¸ªåˆ†åŒºæ˜¯å­˜åœ¨çš„,å¯ä»¥ä½¿ç”¨ä»¥ä¸‹ç®€ä¾¿æ–¹æ³•ï¼š + +```go +section := cfg.Section("") +``` + +如果ä¸å°å¿ƒåˆ¤æ–­é”™äº†ï¼Œè¦èŽ·å–的分区其实是ä¸å­˜åœ¨çš„,那会å‘生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获å–所有分区对象或å称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### æ“作键(Key) + +获å–æŸä¸ªåˆ†åŒºä¸‹çš„键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也å¯ä»¥ç›´æŽ¥èŽ·å–键而忽略错误处ç†ï¼š + +```go +key := cfg.Section("").Key("key name") +``` + +判断æŸä¸ªé”®æ˜¯å¦å­˜åœ¨ï¼š + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获å–分区下的所有键或键å: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获å–分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### æ“作键值(Value) + +获å–一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获å–值的åŒæ—¶é€šè¿‡è‡ªå®šä¹‰å‡½æ•°è¿›è¡Œå¤„ç†éªŒè¯ï¼š + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您ä¸éœ€è¦ä»»ä½•å¯¹å€¼çš„自动转å˜åŠŸèƒ½ï¼ˆä¾‹å¦‚递归读å–),å¯ä»¥ç›´æŽ¥èŽ·å–原值(这ç§æ–¹å¼æ€§èƒ½æœ€ä½³ï¼‰ï¼š + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断æŸä¸ªåŽŸå€¼æ˜¯å¦å­˜åœ¨ï¼š + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获å–其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// ç”± Must 开头的方法åå…许接收一个相åŒç±»åž‹çš„å‚æ•°æ¥ä½œä¸ºé»˜è®¤å€¼ï¼Œ +// 当键ä¸å­˜åœ¨æˆ–者转æ¢å¤±è´¥æ—¶ï¼Œåˆ™ä¼šç›´æŽ¥è¿”回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +å—¯å“¼ï¼Ÿå° caseï¼ + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了ï¼é‚£è¦æ˜¯æˆ‘属于一行的内容写ä¸ä¸‹æƒ³è¦å†™åˆ°ç¬¬äºŒè¡Œæ€Žä¹ˆåŠžï¼Ÿ + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是å°èœä¸€ç¢Ÿï¼ + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需è¦æ³¨æ„的是,值两侧的å•å¼•å·ä¼šè¢«è‡ªåŠ¨å‰”除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然ä¸æ˜¯ã€‚ + +#### æ“作键值的辅助方法 + +获å–键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获å–到的值ä¸æ˜¯å€™é€‰å€¼çš„ä»»æ„一个,则会返回默认值,而默认值ä¸éœ€è¦æ˜¯å€™é€‰å€¼ä¸­çš„一员。 + +验è¯èŽ·å–的值是å¦åœ¨æŒ‡å®šèŒƒå›´å†…: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +自动分割键值为切片(slice): + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### ä¿å­˜é…ç½® + +终于到了这个时刻,是时候ä¿å­˜ä¸€ä¸‹é…置了。 + +比较原始的åšæ³•æ˜¯è¾“出é…置到æŸä¸ªæ–‡ä»¶ï¼š + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +å¦ä¸€ä¸ªæ¯”较高级的åšæ³•æ˜¯å†™å…¥åˆ°ä»»ä½•å®žçŽ° `io.Writer` 接å£çš„对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读å–键值 + +在获å–所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` å¯ä»¥æ˜¯ç›¸åŒåˆ†åŒºæˆ–者默认分区下的键å。字符串 `%()s` 会被相应的键值所替代,如果指定的键ä¸å­˜åœ¨ï¼Œåˆ™ä¼šç”¨ç©ºå­—符串替代。您å¯ä»¥æœ€å¤šä½¿ç”¨ 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读å–父å­åˆ†åŒº + +您å¯ä»¥åœ¨åˆ†åŒºå称中使用 `.` æ¥è¡¨ç¤ºä¸¤ä¸ªæˆ–多个分区之间的父å­å…³ç³»ã€‚如果æŸä¸ªé”®åœ¨å­åˆ†åŒºä¸­ä¸å­˜åœ¨ï¼Œåˆ™ä¼šåŽ»å®ƒçš„父分区中å†æ¬¡å¯»æ‰¾ï¼Œç›´åˆ°æ²¡æœ‰çˆ¶åˆ†åŒºä¸ºæ­¢ã€‚ + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读å–自增键å + +如果数æ®æºä¸­çš„é”®å为 `-`,则认为该键使用了自增键å的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想è¦ä½¿ç”¨æ›´åŠ é¢å‘对象的方å¼çŽ©è½¬ INI å—?好主æ„。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟å¯ä»¥å¦‚此的简å•ã€‚ + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?åªéœ€è¦æ˜ å°„一个分区å—? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简å•ï¼Œåªè¦åœ¨æ˜ å°„之å‰å¯¹æŒ‡å®šå­—段进行赋值就å¯ä»¥äº†ã€‚如果键未找到或者类型错误,该值ä¸ä¼šå‘生改å˜ã€‚ + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊ï¼ç„¶è€Œï¼Œå¦‚æžœä¸èƒ½è¿˜ç»™æˆ‘原æ¥çš„é…置文件,有什么åµç”¨ï¼Ÿ + +### 从结构åå°„ + +å¯æ˜¯ï¼Œæˆ‘有说ä¸èƒ½å—? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹å‘生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### å称映射器(Name Mapper) + +为了节çœæ‚¨çš„时间并简化代ç ï¼Œæœ¬åº“支æŒç±»åž‹ä¸º [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) çš„å称映射器,该映射器负责结构字段å与分区å和键å之间的映射。 + +ç›®å‰æœ‰ 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段å转æ¢è‡³æ ¼å¼ `ALL_CAPS_UNDERSCORE` åŽå†åŽ»åŒ¹é…分区å和键å。 +- `TitleUnderscore`:该映射器将字段å转æ¢è‡³æ ¼å¼ `title_underscore` åŽå†åŽ»åŒ¹é…分区å和键å。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也å¯åº”用相åŒçš„规则。 + +#### 映射/å射的其它说明 + +任何嵌入的结构都会被默认认作一个ä¸åŒçš„分区,并且ä¸ä¼šè‡ªåŠ¨äº§ç”Ÿæ‰€è°“的父å­åˆ†åŒºå…³è”: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例é…置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是è¦åµŒå…¥ç»“构也在åŒä¸€ä¸ªåˆ†åŒºã€‚好å§ï¼Œä½ çˆ¹æ˜¯æŽåˆšï¼ + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例é…置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获å–帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工å•](https://github.com/go-ini/ini/issues/new) + +## 常è§é—®é¢˜ + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写æ“作时采用é”机制æ¥ç¡®ä¿æ•°æ®æ—¶é—´ã€‚但在æŸäº›æƒ…况下,您éžå¸¸ç¡®å®šåªè¿›è¡Œè¯»æ“作。此时,您å¯ä»¥é€šè¿‡è®¾ç½® `cfg.BlockMode = false` æ¥å°†è¯»æ“作æå‡å¤§çº¦ **50-70%** 的性能。 + +### 为什么è¦å†™å¦ä¸€ä¸ª INI 解æžåº“? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) æ¥å®Œæˆå¯¹ INI 文件的æ“作,但我希望使用更加 Go 风格的代ç ã€‚并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能æå‡ã€‚ + +为了åšå‡ºè¿™äº›æ”¹å˜ï¼Œæˆ‘必须对 API 进行破å,所以新开一个仓库是最安全的åšæ³•ã€‚除此之外,本库直接使用 `gopkg.in` æ¥è¿›è¡Œç‰ˆæœ¬åŒ–å‘布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000..1a27f0680 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,1027 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + + _VERSION = "1.8.6" +) + +func Version() string { + return _VERSION +} + +var ( + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Write spaces around "=" to look better. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is a interface that returns file content. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int divided by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 divided by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint divided by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 0) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty. + if len(sec.keyList) == 0 { + continue + } + } + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";". + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/ini_test.go b/vendor/github.com/go-ini/ini/ini_test.go new file mode 100644 index 000000000..c0b0cc9ae --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini_test.go @@ -0,0 +1,574 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +const _CONF_DATA = ` +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +# Information about package author +# Bio can be written in multiple lines. +[author] +NAME = Unknwon ; Succeeding comment +E-MAIL = fake@localhost +GITHUB = https://github.com/%(NAME)s +BIO = """Gopher. +Coding addict. +Good man. +""" # Succeeding comment + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty\ + +; Comment before the section +[comments] ; This is a comment for the section too +; Comment before key +key = "value" +key2 = "value2" ; This is a comment for key2 +key3 = "one", "two", "three" + +[advance] +value with quotes = "some value" +value quote2 again = 'some value' +true = 2+3=5 +"1+1=2" = true +"""6+1=7""" = true +"""` + "`" + `5+5` + "`" + `""" = 10 +` + "`" + `"6+6"` + "`" + ` = 12 +` + "`" + `7-2=4` + "`" + ` = false +ADDRESS = ` + "`" + `404 road, +NotFound, State, 50000` + "`" + ` + +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 \ +` + +func Test_Load(t *testing.T) { + Convey("Load from data sources", t, func() { + + Convey("Load with empty data", func() { + So(Empty(), ShouldNotBeNil) + }) + + Convey("Load with multiple data sources", func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + }) + }) + + Convey("Bad load process", t, func() { + + Convey("Load from invalid data sources", func() { + _, err := Load(_CONF_DATA) + So(err, ShouldNotBeNil) + + f, err := Load("testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + + _, err = Load(1) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(""), 1) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad section name", func() { + _, err := Load([]byte("[]")) + So(err, ShouldNotBeNil) + + _, err = Load([]byte("[")) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad keys", func() { + _, err := Load([]byte(`"""name`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`"""name"""`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`""=1`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`=`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`name`)) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad values", func() { + _, err := Load([]byte(`name="""Unknwon`)) + So(err, ShouldNotBeNil) + }) + }) +} + +func Test_Values(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + for i, v := range []float64{1.1, 2.2, 3.3} { + So(vals1[i], ShouldEqual, v) + } + + vals2 := sec.Key("INTS").Ints(",") + for i, v := range []int{1, 2, 3} { + So(vals2[i], ShouldEqual, v) + } + + vals3 := sec.Key("INTS").Int64s(",") + for i, v := range []int64{1, 2, 3} { + So(vals3[i], ShouldEqual, v) + } + + vals4 := sec.Key("UINTS").Uints(",") + for i, v := range []uint{1, 2, 3} { + So(vals4[i], ShouldEqual, v) + } + + vals5 := sec.Key("UINTS").Uint64s(",") + for i, v := range []uint64{1, 2, 3} { + So(vals5[i], ShouldEqual, v) + } + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + for i, v := range []time.Time{t, t, t} { + So(vals6[i].String(), ShouldEqual, v.String()) + } + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("æ— é—»") + So(k.String(), ShouldEqual, "æ— é—»") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Has Key (backwards compatible)", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.Haskey("UNUSED_KEY") + haskey2 := sec.Haskey("CLONE_URL") + haskey3 := sec.Haskey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Key", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.HasKey("UNUSED_KEY") + haskey2 := sec.HasKey("CLONE_URL") + haskey3 := sec.HasKey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Value", func() { + sec := cfg.Section("author") + hasvalue1 := sec.HasValue("Unknwon") + hasvalue2 := sec.HasValue("doc") + So(hasvalue1, ShouldBeTrue) + So(hasvalue2, ShouldBeFalse) + }) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) + + Convey("Test key hash clone", t, func() { + cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1))) + So(err, ShouldBeNil) + for _, v := range cfg.Section("").KeysHash() { + So(len(v), ShouldBeGreaterThan, 0) + } + }) + + Convey("Key has empty value", t, func() { + _conf := `key1= +key2= ; comment` + cfg, err := Load([]byte(_conf)) + So(err, ShouldBeNil) + So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty) + }) +} + +func Test_File_Append(t *testing.T) { + Convey("Append data sources", t, func() { + cfg, err := Load([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Append([]byte(""), []byte("")), ShouldBeNil) + + Convey("Append bad data sources", func() { + So(cfg.Append(1), ShouldNotBeNil) + So(cfg.Append([]byte(""), 1), ShouldNotBeNil) + }) + }) +} + +func Test_File_WriteTo(t *testing.T) { + Convey("Write to somewhere", t, func() { + var buf bytes.Buffer + cfg := Empty() + cfg.WriteTo(&buf) + }) +} + +func Test_File_SaveTo(t *testing.T) { + Convey("Save file", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.Section("").Key("NAME").Comment = "Package name" + cfg.Section("author").Comment = `Information about package author +# Bio can be written in multiple lines.` + cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) + + cfg.Section("author").Key("NAME").Comment = "This is author name" + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + }) +} + +func Benchmark_Key_Value(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000..1c1bf91f0 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,312 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines + if line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.IndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + section, err = f.NewSection(string(line[1:closeIdx])) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncr = isAutoIncr + + value, err := p.readValue(line[offset:]) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000..3fb92c396 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,351 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/go-ini/ini/struct_test.go b/vendor/github.com/go-ini/ini/struct_test.go new file mode 100644 index 000000000..d865ad78e --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct_test.go @@ -0,0 +1,239 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type testNested struct { + Cities []string `delim:"|"` + Visits []time.Time + Note string + Unused int `ini:"-"` +} + +type testEmbeded struct { + GPA float64 +} + +type testStruct struct { + Name string `ini:"NAME"` + Age int + Male bool + Money float64 + Born time.Time + Time time.Duration `ini:"Duration"` + Others testNested + *testEmbeded `ini:"grade"` + Unused int `ini:"-"` + Unsigned uint +} + +const _CONF_DATA_STRUCT = ` +NAME = Unknwon +Age = 21 +Male = true +Money = 1.25 +Born = 1993-10-07T20:17:05Z +Duration = 2h45m +Unsigned = 3 + +[Others] +Cities = HangZhou|Boston +Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Note = Hello world! + +[grade] +GPA = 2.8 + +[foo.bar] +Here = there +When = then +` + +type unsupport struct { + Byte byte +} + +type unsupport2 struct { + Others struct { + Cities byte + } +} + +type unsupport3 struct { + Cities byte +} + +type unsupport4 struct { + *unsupport3 `ini:"Others"` +} + +type defaultValue struct { + Name string + Age int + Male bool + Money float64 + Born time.Time + Cities []string +} + +type fooBar struct { + Here, When string +} + +const _INVALID_DATA_CONF_STRUCT = ` +Name = +Age = age +Male = 123 +Money = money +Born = nil +Cities = +` + +func Test_Struct(t *testing.T) { + Convey("Map to struct", t, func() { + Convey("Map file to struct", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Name, ShouldEqual, "Unknwon") + So(ts.Age, ShouldEqual, 21) + So(ts.Male, ShouldBeTrue) + So(ts.Money, ShouldEqual, 1.25) + So(ts.Unsigned, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + So(ts.Born.String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(ts.Time.Seconds(), ShouldEqual, dur.Seconds()) + + So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") + So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(ts.Others.Note, ShouldEqual, "Hello world!") + So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + }) + + Convey("Map section to struct", func() { + foobar := new(fooBar) + f, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + + So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil) + So(foobar.Here, ShouldEqual, "there") + So(foobar.When, ShouldEqual, "then") + }) + + Convey("Map to non-pointer struct", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.MapTo(testStruct{}), ShouldNotBeNil) + }) + + Convey("Map to unsupported type", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = func(raw string) string { + if raw == "Byte" { + return "NAME" + } + return raw + } + So(cfg.MapTo(&unsupport{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) + }) + + Convey("Map from invalid data source", func() { + So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) + }) + + Convey("Map to wrong types and gain default values", func() { + cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT)) + So(err, ShouldBeNil) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}} + So(cfg.MapTo(dv), ShouldBeNil) + So(dv.Name, ShouldEqual, "Joe") + So(dv.Age, ShouldEqual, 10) + So(dv.Male, ShouldBeTrue) + So(dv.Money, ShouldEqual, 1.25) + So(dv.Born.String(), ShouldEqual, t.String()) + So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston") + }) + }) + + Convey("Reflect from struct", t, func() { + type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int + } + type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded `ini:"infos"` + } + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := Empty() + So(ReflectFrom(cfg, a), ShouldBeNil) + cfg.SaveTo("testdata/conf_reflect.ini") + + Convey("Reflect from non-point struct", func() { + So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) + }) + }) +} + +type testMapper struct { + PackageName string +} + +func Test_NameGetter(t *testing.T) { + Convey("Test name mappers", t, func() { + So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil) + + cfg, err := Load([]byte("PACKAGE_NAME=ini")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = AllCapsUnderscore + tg := new(testMapper) + So(cfg.MapTo(tg), ShouldBeNil) + So(tg.PackageName, ShouldEqual, "ini") + }) +} diff --git a/vendor/github.com/go-ini/ini/testdata/conf.ini b/vendor/github.com/go-ini/ini/testdata/conf.ini new file mode 100644 index 000000000..2ed0ac1d3 --- /dev/null +++ b/vendor/github.com/go-ini/ini/testdata/conf.ini @@ -0,0 +1,2 @@ +[author] +E-MAIL = u@gogs.io \ No newline at end of file diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go new file mode 100644 index 000000000..90dcabb80 --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -0,0 +1,311 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package query implements encoding of structs into URL query parameters. +// +// As a simple example: +// +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } +// +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// +// The exact mapping between Go values and url.Values is described in the +// documentation for the Values() function. +package query + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type Encoder interface { + EncodeValues(key string, v *url.Values) error +} + +// Values returns the url.Values encoding of v. +// +// Values expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a URL parameter unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any time.Time that returns true +// for IsZero(). +// +// The URL parameter name defaults to the struct field name but can be +// specified in the struct field's tag value. The "url" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `url:"-"` +// +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` +// +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` +// +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC3339 timestamps. Including the +// "unix" option signals that the field should be encoded as a Unix time (see +// time.Unix()) +// +// Slice and Array values default to encoding as multiple URL values of the +// same name. Including the "comma" option signals that the field should be +// encoded as a single comma-delimited value. Including the "space" option +// similarly encodes the value as a single space-delimited string. Including +// the "brackets" option signals that the multiple URL values should have "[]" +// appended to the value name. +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its URL +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// Nested structs are encoded including parent fields in value names for +// scoping. e.g: +// +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same URL parameter name will be included +// as multiple URL values of the same name. +func Values(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if name == "" { + if sf.Anonymous && sv.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, sv) + continue + } + + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + if !reflect.Indirect(sv).IsValid() { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeValues(name, &values); err != nil { + return err + } + continue + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + var del byte + if opts.Contains("comma") { + del = ',' + } else if opts.Contains("space") { + del = ' ' + } else if opts.Contains("brackets") { + name = name + "[]" + } + + if del != 0 { + s := new(bytes.Buffer) + first := true + for i := 0; i < sv.Len(); i++ { + if first { + first = false + } else { + s.WriteByte(del) + } + s.WriteString(valueString(sv.Index(i), opts)) + } + values.Add(name, s.String()) + } else { + for i := 0; i < sv.Len(); i++ { + values.Add(name, valueString(sv.Index(i), opts)) + } + } + continue + } + + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts)) + continue + } + + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Kind() == reflect.Struct { + reflectValue(values, sv, name) + continue + } + + values.Add(name, valueString(sv, opts)) + } + + for _, f := range embedded { + if err := reflectValue(values, f, scope); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + return t.Format(time.RFC3339) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + if v.Type() == timeType { + return v.Interface().(time.Time).IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/google/go-querystring/query/encode_test.go b/vendor/github.com/google/go-querystring/query/encode_test.go new file mode 100644 index 000000000..e0b2a365a --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode_test.go @@ -0,0 +1,301 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package query + +import ( + "fmt" + "net/url" + "reflect" + "testing" + "time" +) + +type Nested struct { + A SubNested `url:"a"` + B *SubNested `url:"b"` + Ptr *SubNested `url:"ptr,omitempty"` +} + +type SubNested struct { + Value string `url:"value"` +} + +func TestValues_types(t *testing.T) { + str := "string" + strPtr := &str + + tests := []struct { + in interface{} + want url.Values + }{ + { + // basic primitives + struct { + A string + B int + C uint + D float32 + E bool + }{}, + url.Values{ + "A": {""}, + "B": {"0"}, + "C": {"0"}, + "D": {"0"}, + "E": {"false"}, + }, + }, + { + // pointers + struct { + A *string + B *int + C **string + }{A: strPtr, C: &strPtr}, + url.Values{ + "A": {str}, + "B": {""}, + "C": {str}, + }, + }, + { + // slices and arrays + struct { + A []string + B []string `url:",comma"` + C []string `url:",space"` + D [2]string + E [2]string `url:",comma"` + F [2]string `url:",space"` + G []*string `url:",space"` + H []bool `url:",int,space"` + I []string `url:",brackets"` + }{ + A: []string{"a", "b"}, + B: []string{"a", "b"}, + C: []string{"a", "b"}, + D: [2]string{"a", "b"}, + E: [2]string{"a", "b"}, + F: [2]string{"a", "b"}, + G: []*string{&str, &str}, + H: []bool{true, false}, + I: []string{"a", "b"}, + }, + url.Values{ + "A": {"a", "b"}, + "B": {"a,b"}, + "C": {"a b"}, + "D": {"a", "b"}, + "E": {"a,b"}, + "F": {"a b"}, + "G": {"string string"}, + "H": {"1 0"}, + "I[]": {"a", "b"}, + }, + }, + { + // other types + struct { + A time.Time + B time.Time `url:",unix"` + C bool `url:",int"` + D bool `url:",int"` + }{ + A: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC), + B: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC), + C: true, + D: false, + }, + url.Values{ + "A": {"2000-01-01T12:34:56Z"}, + "B": {"946730096"}, + "C": {"1"}, + "D": {"0"}, + }, + }, + { + struct { + Nest Nested `url:"nest"` + }{ + Nested{ + A: SubNested{ + Value: "that", + }, + }, + }, + url.Values{ + "nest[a][value]": {"that"}, + "nest[b]": {""}, + }, + }, + { + struct { + Nest Nested `url:"nest"` + }{ + Nested{ + Ptr: &SubNested{ + Value: "that", + }, + }, + }, + url.Values{ + "nest[a][value]": {""}, + "nest[b]": {""}, + "nest[ptr][value]": {"that"}, + }, + }, + { + nil, + url.Values{}, + }, + } + + for i, tt := range tests { + v, err := Values(tt.in) + if err != nil { + t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err) + } + + if !reflect.DeepEqual(tt.want, v) { + t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want) + } + } +} + +func TestValues_omitEmpty(t *testing.T) { + str := "" + s := struct { + a string + A string + B string `url:",omitempty"` + C string `url:"-"` + D string `url:"omitempty"` // actually named omitempty, not an option + E *string `url:",omitempty"` + }{E: &str} + + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{ + "A": {""}, + "omitempty": {""}, + "E": {""}, // E is included because the pointer is not empty, even though the string being pointed to is + } + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +type A struct { + B +} + +type B struct { + C string +} + +type D struct { + B + C string +} + +func TestValues_embeddedStructs(t *testing.T) { + tests := []struct { + in interface{} + want url.Values + }{ + { + A{B{C: "foo"}}, + url.Values{"C": {"foo"}}, + }, + { + D{B: B{C: "bar"}, C: "foo"}, + url.Values{"C": {"foo", "bar"}}, + }, + } + + for i, tt := range tests { + v, err := Values(tt.in) + if err != nil { + t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err) + } + + if !reflect.DeepEqual(tt.want, v) { + t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want) + } + } +} + +func TestValues_invalidInput(t *testing.T) { + _, err := Values("") + if err == nil { + t.Errorf("expected Values() to return an error on invalid input") + } +} + +type EncodedArgs []string + +func (m EncodedArgs) EncodeValues(key string, v *url.Values) error { + for i, arg := range m { + v.Set(fmt.Sprintf("%s.%d", key, i), arg) + } + return nil +} + +func TestValues_Marshaler(t *testing.T) { + s := struct { + Args EncodedArgs `url:"arg"` + }{[]string{"a", "b", "c"}} + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{ + "arg.0": {"a"}, + "arg.1": {"b"}, + "arg.2": {"c"}, + } + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +func TestValues_MarshalerWithNilPointer(t *testing.T) { + s := struct { + Args *EncodedArgs `url:"arg"` + }{} + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{} + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +func TestTagParsing(t *testing.T) { + name, opts := parseTag("field,foobar,foo") + if name != "field" { + t.Fatalf("name = %q, want field", name) + } + for _, tt := range []struct { + opt string + want bool + }{ + {"foobar", true}, + {"foo", true}, + {"bar", false}, + {"field", false}, + } { + if opts.Contains(tt.opt) != tt.want { + t.Errorf("Contains(%q) = %v", tt.opt, !tt.want) + } + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/archive/archive.go b/vendor/github.com/hashicorp/atlas-go/archive/archive.go new file mode 100644 index 000000000..d9232b4f0 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/archive.go @@ -0,0 +1,500 @@ +// archive is package that helps create archives in a format that +// Atlas expects with its various upload endpoints. +package archive + +import ( + "archive/tar" + "bufio" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" +) + +// Archive is the resulting archive. The archive data is generally streamed +// so the io.ReadCloser can be used to backpressure the archive progress +// and avoid memory pressure. +type Archive struct { + io.ReadCloser + + Size int64 + Metadata map[string]string +} + +// ArchiveOpts are the options for defining how the archive will be built. +type ArchiveOpts struct { + // Exclude and Include are filters of files to include/exclude in + // the archive when creating it from a directory. These filters should + // be relative to the packaging directory and should be basic glob + // patterns. + Exclude []string + Include []string + + // Extra is a mapping of extra files to include within the archive. The + // key should be the path within the archive and the value should be + // an absolute path to the file to put into the archive. These extra + // files will override any other files in the archive. + Extra map[string]string + + // VCS, if true, will detect and use a VCS system to determine what + // files to include the archive. + VCS bool +} + +// IsSet says whether any options were set. +func (o *ArchiveOpts) IsSet() bool { + return len(o.Exclude) > 0 || len(o.Include) > 0 || o.VCS +} + +// CreateArchive takes the given path and ArchiveOpts and archives it. +// +// The archive will be fully completed and put into a temporary file. +// This must be done to retrieve the content length of the archive which +// is needed for almost all operations involving archives with Atlas. Because +// of this, sufficient disk space will be required to buffer the archive. +func CreateArchive(path string, opts *ArchiveOpts) (*Archive, error) { + log.Printf("[INFO] creating archive from %s", path) + + // Dereference any symlinks and determine the real path and info + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + if fi.Mode()&os.ModeSymlink != 0 { + path, fi, err = readLinkFull(path, fi) + if err != nil { + return nil, err + } + } + + // Windows + path = filepath.ToSlash(path) + + // Direct file paths cannot have archive options + if !fi.IsDir() && opts.IsSet() { + return nil, fmt.Errorf( + "options such as exclude, include, and VCS can't be set when " + + "the path is a file.") + } + + if fi.IsDir() { + return archiveDir(path, opts) + } else { + return archiveFile(path) + } +} + +func archiveFile(path string) (*Archive, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + + if _, err := gzip.NewReader(f); err == nil { + // Reset the read offset for future reading + if _, err := f.Seek(0, 0); err != nil { + f.Close() + return nil, err + } + + // Get the file info for the size + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + + // This is a gzip file, let it through. + return &Archive{ReadCloser: f, Size: fi.Size()}, nil + } + + // Close the file, no use for it anymore + f.Close() + + // We have a single file that is not gzipped. Compress it. + path, err = filepath.Abs(path) + if err != nil { + return nil, err + } + + // Act like we're compressing a directory, but only include this one + // file. + return archiveDir(filepath.Dir(path), &ArchiveOpts{ + Include: []string{filepath.Base(path)}, + }) +} + +func archiveDir(root string, opts *ArchiveOpts) (*Archive, error) { + + var vcsInclude []string + var metadata map[string]string + if opts.VCS { + var err error + + if err = vcsPreflight(root); err != nil { + return nil, err + } + + vcsInclude, err = vcsFiles(root) + if err != nil { + return nil, err + } + + metadata, err = vcsMetadata(root) + if err != nil { + return nil, err + } + } + + // Make sure the root path is absolute + root, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + // Create the temporary file that we'll send the archive data to. + archiveF, err := ioutil.TempFile("", "atlas-archive") + if err != nil { + return nil, err + } + + // Create the wrapper for the result which will automatically + // remove the temporary file on close. + archiveWrapper := &readCloseRemover{F: archiveF} + + // Buffer the writer so that we can push as much data to disk at + // a time as possible. 4M should be good. + bufW := bufio.NewWriterSize(archiveF, 4096*1024) + + // Gzip compress all the output data + gzipW := gzip.NewWriter(bufW) + + // Tar the file contents + tarW := tar.NewWriter(gzipW) + + // First, walk the path and do the normal files + werr := filepath.Walk(root, copyDirWalkFn( + tarW, root, "", opts, vcsInclude)) + if werr == nil { + // If that succeeded, handle the extra files + werr = copyExtras(tarW, opts.Extra) + } + + // Attempt to close all the things. If we get an error on the way + // and we haven't had an error yet, then record that as the critical + // error. But we still try to close everything. + + // Close the tar writer + if err := tarW.Close(); err != nil && werr == nil { + werr = err + } + + // Close the gzip writer + if err := gzipW.Close(); err != nil && werr == nil { + werr = err + } + + // Flush the buffer + if err := bufW.Flush(); err != nil && werr == nil { + werr = err + } + + // If we had an error, then close the file (removing it) and + // return the error. + if werr != nil { + archiveWrapper.Close() + return nil, werr + } + + // Seek to the beginning + if _, err := archiveWrapper.F.Seek(0, 0); err != nil { + archiveWrapper.Close() + return nil, err + } + + // Get the file information so we can get the size + fi, err := archiveWrapper.F.Stat() + if err != nil { + archiveWrapper.Close() + return nil, err + } + + return &Archive{ + ReadCloser: archiveWrapper, + Size: fi.Size(), + Metadata: metadata, + }, nil +} + +func copyDirWalkFn( + tarW *tar.Writer, root string, prefix string, + opts *ArchiveOpts, vcsInclude []string) filepath.WalkFunc { + + errFunc := func(err error) filepath.WalkFunc { + return func(string, os.FileInfo, error) error { + return err + } + } + + // Windows + root = filepath.ToSlash(root) + + var includeMap map[string]struct{} + + // If we have an include/exclude pattern set, then setup the lookup + // table to determine what we want to include. + if opts != nil && len(opts.Include) > 0 { + includeMap = make(map[string]struct{}) + for _, pattern := range opts.Include { + matches, err := filepath.Glob(filepath.Join(root, pattern)) + if err != nil { + return errFunc(fmt.Errorf( + "error checking include glob '%s': %s", + pattern, err)) + } + + for _, path := range matches { + // Windows + path = filepath.ToSlash(path) + subpath, err := filepath.Rel(root, path) + subpath = filepath.ToSlash(subpath) + + if err != nil { + return errFunc(err) + } + + for { + includeMap[subpath] = struct{}{} + subpath = filepath.Dir(subpath) + if subpath == "." { + break + } + } + } + } + } + + return func(path string, info os.FileInfo, err error) error { + path = filepath.ToSlash(path) + + if err != nil { + return err + } + + // Get the relative path from the path since it contains the root + // plus the path. + subpath, err := filepath.Rel(root, path) + if err != nil { + return err + } + if subpath == "." { + return nil + } + if prefix != "" { + subpath = filepath.Join(prefix, subpath) + } + // Windows + subpath = filepath.ToSlash(subpath) + + // If we have a list of VCS files, check that first + skip := false + if len(vcsInclude) > 0 { + skip = true + for _, f := range vcsInclude { + if f == subpath { + skip = false + break + } + + if info.IsDir() && strings.HasPrefix(f, subpath+"/") { + skip = false + break + } + } + } + + // If include is present, we only include what is listed + if len(includeMap) > 0 { + if _, ok := includeMap[subpath]; !ok { + skip = true + } + } + + // If exclude, it is one last gate to excluding files + if opts != nil { + for _, exclude := range opts.Exclude { + match, err := filepath.Match(exclude, subpath) + if err != nil { + return err + } + if match { + skip = true + break + } + } + } + + // If we have to skip this file, then skip it, properly skipping + // children if we're a directory. + if skip { + if info.IsDir() { + return filepath.SkipDir + } + + return nil + } + + // If this is a symlink, then we need to get the symlink target + // rather than the symlink itself. + if info.Mode()&os.ModeSymlink != 0 { + target, info, err := readLinkFull(path, info) + if err != nil { + return err + } + + // Copy the concrete entry for this path. This will either + // be the file itself or just a directory entry. + if err := copyConcreteEntry(tarW, subpath, target, info); err != nil { + return err + } + + if info.IsDir() { + return filepath.Walk(target, copyDirWalkFn( + tarW, target, subpath, opts, vcsInclude)) + } + } + + return copyConcreteEntry(tarW, subpath, path, info) + } +} + +func copyConcreteEntry( + tarW *tar.Writer, entry string, + path string, info os.FileInfo) error { + // Windows + path = filepath.ToSlash(path) + + // Build the file header for the tar entry + header, err := tar.FileInfoHeader(info, path) + if err != nil { + return fmt.Errorf( + "failed creating archive header: %s", path) + } + + // Modify the header to properly be the full entry name + header.Name = entry + if info.IsDir() { + header.Name += "/" + } + + // Write the header first to the archive. + if err := tarW.WriteHeader(header); err != nil { + return fmt.Errorf( + "failed writing archive header: %s", path) + } + + // If it is a directory, then we're done (no body to write) + if info.IsDir() { + return nil + } + + // Open the real file to write the data + f, err := os.Open(path) + if err != nil { + return fmt.Errorf( + "failed opening file '%s' to write compressed archive.", path) + } + defer f.Close() + + if _, err = io.Copy(tarW, f); err != nil { + return fmt.Errorf( + "failed copying file to archive: %s", path) + } + + return nil +} + +func copyExtras(w *tar.Writer, extra map[string]string) error { + for entry, path := range extra { + info, err := os.Stat(path) + if err != nil { + return err + } + + // No matter what, write the entry. If this is a directory, + // it'll just write the directory header. + if err := copyConcreteEntry(w, entry, path, info); err != nil { + return err + } + + // If this is a directory, then we walk the internal contents + // and copy those as well. + if info.IsDir() { + err := filepath.Walk(path, copyDirWalkFn( + w, path, entry, nil, nil)) + if err != nil { + return err + } + } + } + + return nil +} + +func readLinkFull(path string, info os.FileInfo) (string, os.FileInfo, error) { + // Read the symlink continously until we reach a concrete file. + target := path + tries := 0 + for info.Mode()&os.ModeSymlink != 0 { + var err error + target, err = os.Readlink(target) + if err != nil { + return "", nil, err + } + if !filepath.IsAbs(target) { + target, err = filepath.Abs(target) + if err != nil { + return "", nil, err + } + } + info, err = os.Lstat(target) + if err != nil { + return "", nil, err + } + + tries++ + if tries > 100 { + return "", nil, fmt.Errorf( + "Symlink for %s is too deep, over 100 levels deep", + path) + } + } + + return target, info, nil +} + +// readCloseRemover is an io.ReadCloser implementation that will remove +// the file on Close(). We use this to clean up our temporary file for +// the archive. +type readCloseRemover struct { + F *os.File +} + +func (r *readCloseRemover) Read(p []byte) (int, error) { + return r.F.Read(p) +} + +func (r *readCloseRemover) Close() error { + // First close the file + err := r.F.Close() + + // Next make sure to remove it, or at least try, regardless of error + // above. + os.Remove(r.F.Name()) + + return err +} diff --git a/vendor/github.com/hashicorp/atlas-go/archive/archive_test.go b/vendor/github.com/hashicorp/atlas-go/archive/archive_test.go new file mode 100644 index 000000000..1c8f2bc1f --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/archive_test.go @@ -0,0 +1,554 @@ +package archive + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "sort" + "testing" +) + +const fixturesDir = "./test-fixtures" + +var testHasGit bool +var testHasHg bool + +func init() { + if _, err := exec.LookPath("git"); err == nil { + testHasGit = true + } + + if _, err := exec.LookPath("hg"); err == nil { + testHasHg = true + } +} + +func TestArchiveOptsIsSet(t *testing.T) { + cases := []struct { + Opts *ArchiveOpts + Set bool + }{ + { + &ArchiveOpts{}, + false, + }, + { + &ArchiveOpts{VCS: true}, + true, + }, + { + &ArchiveOpts{Exclude: make([]string, 0, 0)}, + false, + }, + { + &ArchiveOpts{Exclude: []string{"foo"}}, + true, + }, + { + &ArchiveOpts{Include: make([]string, 0, 0)}, + false, + }, + { + &ArchiveOpts{Include: []string{"foo"}}, + true, + }, + } + + for i, tc := range cases { + if tc.Opts.IsSet() != tc.Set { + t.Fatalf("%d: expected %#v", i, tc.Set) + } + } +} + +func TestArchive_file(t *testing.T) { + path := filepath.Join(testFixture("archive-file"), "foo.txt") + r, err := CreateArchive(path, new(ArchiveOpts)) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "foo.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_fileCompressed(t *testing.T) { + path := filepath.Join(testFixture("archive-file-compressed"), "file.tar.gz") + r, err := CreateArchive(path, new(ArchiveOpts)) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "./foo.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_fileNoExist(t *testing.T) { + tf := tempFile(t) + if err := os.Remove(tf); err != nil { + t.Fatalf("err: %s", err) + } + + r, err := CreateArchive(tf, nil) + if err == nil { + t.Fatal("err should not be nil") + } + if r != nil { + t.Fatal("should be nil") + } +} + +func TestArchive_fileWithOpts(t *testing.T) { + r, err := CreateArchive(tempFile(t), &ArchiveOpts{VCS: true}) + if err == nil { + t.Fatal("err should not be nil") + } + if r != nil { + t.Fatal("should be nil") + } +} + +func TestArchive_dirExtra(t *testing.T) { + opts := &ArchiveOpts{ + Extra: map[string]string{ + "hello.txt": filepath.Join( + testFixture("archive-subdir"), "subdir", "hello.txt"), + }, + } + + r, err := CreateArchive(testFixture("archive-flat"), opts) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "baz.txt", + "foo.txt", + "hello.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirExtraDir(t *testing.T) { + opts := &ArchiveOpts{ + Extra: map[string]string{ + "foo": filepath.Join(testFixture("archive-subdir"), "subdir"), + }, + } + + r, err := CreateArchive(testFixture("archive-flat"), opts) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "baz.txt", + "foo.txt", + "foo/", + "foo/hello.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirMode(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("modes don't work on Windows") + } + + opts := &ArchiveOpts{} + + r, err := CreateArchive(testFixture("archive-dir-mode"), opts) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "file.txt-exec", + } + + entries := testArchive(t, r, true) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} +func TestArchive_dirSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("git symlinks don't work on Windows") + } + + path := filepath.Join(testFixture("archive-symlink"), "link", "link") + r, err := CreateArchive(path, new(ArchiveOpts)) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "foo.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirWithSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("git symlinks don't work on Windows") + } + + path := filepath.Join(testFixture("archive-symlink"), "link") + r, err := CreateArchive(path, new(ArchiveOpts)) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "link/", + "link/foo.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirNoVCS(t *testing.T) { + r, err := CreateArchive(testFixture("archive-flat"), new(ArchiveOpts)) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "baz.txt", + "foo.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirSubdirsNoVCS(t *testing.T) { + r, err := CreateArchive(testFixture("archive-subdir"), new(ArchiveOpts)) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "bar.txt", + "foo.txt", + "subdir/", + "subdir/hello.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirExclude(t *testing.T) { + opts := &ArchiveOpts{ + Exclude: []string{"subdir", "subdir/*"}, + } + + r, err := CreateArchive(testFixture("archive-subdir"), opts) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "bar.txt", + "foo.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirInclude(t *testing.T) { + opts := &ArchiveOpts{ + Include: []string{"bar.txt"}, + } + + r, err := CreateArchive(testFixture("archive-subdir"), opts) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "bar.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_dirIncludeStar(t *testing.T) { + opts := &ArchiveOpts{ + Include: []string{"build/**/*"}, + } + + r, err := CreateArchive(testFixture("archive-subdir-splat"), opts) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "build/", + "build/darwin-amd64/", + "build/darwin-amd64/build.txt", + "build/linux-amd64/", + "build/linux-amd64/build.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_git(t *testing.T) { + if !testHasGit { + t.Log("git not found, skipping") + t.Skip() + } + + // Git doesn't allow nested ".git" directories so we do some hackiness + // here to get around that... + testDir := testFixture("archive-git") + oldName := filepath.ToSlash(filepath.Join(testDir, "DOTgit")) + newName := filepath.ToSlash(filepath.Join(testDir, ".git")) + os.Remove(newName) + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + // testDir with VCS set to true + r, err := CreateArchive(testDir, &ArchiveOpts{VCS: true}) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "bar.txt", + "foo.txt", + "subdir/", + "subdir/hello.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } + + // Test that metadata was added + if r.Metadata == nil { + t.Fatal("expected archive metadata to be set") + } + + expectedMetadata := map[string]string{ + "branch": "master", + "commit": "7525d17cbbb56f3253a20903ffddc07c6c935c76", + "remote.origin": "https://github.com/hashicorp/origin.git", + "remote.upstream": "https://github.com/hashicorp/upstream.git", + } + + if !reflect.DeepEqual(r.Metadata, expectedMetadata) { + t.Fatalf("expected %+v to be %+v", r.Metadata, expectedMetadata) + } +} + +func TestArchive_gitSubdir(t *testing.T) { + if !testHasGit { + t.Log("git not found, skipping") + t.Skip() + } + + // Git doesn't allow nested ".git" directories so we do some hackiness + // here to get around that... + testDir := testFixture("archive-git") + oldName := filepath.ToSlash(filepath.Join(testDir, "DOTgit")) + newName := filepath.ToSlash(filepath.Join(testDir, ".git")) + os.Remove(newName) + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + // testDir with VCS set to true + r, err := CreateArchive(filepath.Join(testDir, "subdir"), &ArchiveOpts{VCS: true}) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "hello.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("bad: %#v", entries) + } +} + +func TestArchive_hg(t *testing.T) { + if !testHasHg { + t.Log("hg not found, skipping") + t.Skip() + } + + // testDir with VCS set to true + testDir := testFixture("archive-hg") + r, err := CreateArchive(testDir, &ArchiveOpts{VCS: true}) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "bar.txt", + "foo.txt", + "subdir/", + "subdir/hello.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("\n-- Expected --\n%#v\n-- Found --\n%#v", expected, entries) + } +} + +func TestArchive_hgSubdir(t *testing.T) { + if !testHasHg { + t.Log("hg not found, skipping") + t.Skip() + } + + // testDir with VCS set to true + testDir := filepath.Join(testFixture("archive-hg"), "subdir") + r, err := CreateArchive(testDir, &ArchiveOpts{VCS: true}) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "hello.txt", + } + + entries := testArchive(t, r, false) + if !reflect.DeepEqual(entries, expected) { + t.Fatalf("\n-- Expected --\n%#v\n-- Found --\n%#v", expected, entries) + } +} + +func TestReadCloseRemover(t *testing.T) { + f, err := ioutil.TempFile("", "atlas-go") + if err != nil { + t.Fatalf("err: %s", err) + } + + r := &readCloseRemover{F: f} + if err := r.Close(); err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := os.Stat(f.Name()); err == nil { + t.Fatal("file should not exist anymore") + } +} + +func testArchive(t *testing.T, r *Archive, detailed bool) []string { + // Finish the archiving process in-memory + var buf bytes.Buffer + n, err := io.Copy(&buf, r) + if err != nil { + t.Fatalf("err: %s", err) + } + if n != r.Size { + t.Fatalf("bad size: %d (expected: %d)", n, r.Size) + } + + gzipR, err := gzip.NewReader(&buf) + if err != nil { + t.Fatalf("err: %s", err) + } + tarR := tar.NewReader(gzipR) + + // Read all the entries + result := make([]string, 0, 5) + for { + hdr, err := tarR.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("err: %s", err) + } + + text := hdr.Name + if detailed { + // Check if the file is executable. We use these stub names + // to compensate for umask differences in test environments + // and limitations in using "git clone". + if hdr.FileInfo().Mode()&0111 != 0 { + text = hdr.Name + "-exec" + } else { + text = hdr.Name + "-reg" + } + } + + result = append(result, text) + } + + sort.Strings(result) + return result +} + +func tempFile(t *testing.T) string { + tf, err := ioutil.TempFile("", "test") + if err != nil { + t.Fatalf("err: %s", err) + } + defer tf.Close() + + return tf.Name() +} + +func testFixture(n string) string { + return filepath.Join(fixturesDir, n) +} diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-dir-mode/file.txt b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-dir-mode/file.txt new file mode 100644 index 000000000..dc2507e82 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-dir-mode/file.txt @@ -0,0 +1 @@ +I should be mode 0777 diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-file-compressed/file.tar.gz b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-file-compressed/file.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..bed30fb489d3440b929dd74eb3ec89a341e8d8ea GIT binary patch literal 144 zcmb2|=3rP86%fL}{Pv4G`|0>{1{6?}&-ggIkDWn-fdK%*`#(ki literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-file/foo.txt b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-file/foo.txt new file mode 100644 index 000000000..257cc5642 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-file/foo.txt @@ -0,0 +1 @@ +foo diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/baz.txt b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/baz.txt new file mode 100644 index 000000000..76018072e --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/baz.txt @@ -0,0 +1 @@ +baz diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/foo.txt b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/foo.txt new file mode 100644 index 000000000..257cc5642 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-flat/foo.txt @@ -0,0 +1 @@ +foo diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/COMMIT_EDITMSG b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/COMMIT_EDITMSG new file mode 100644 index 000000000..e7e768a4a --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/COMMIT_EDITMSG @@ -0,0 +1,12 @@ +Those files tho +# Please enter the commit message for your changes. Lines starting +# with '#' will be ignored, and an empty message aborts the commit. +# On branch master +# +# Initial commit +# +# Changes to be committed: +# new file: bar.txt +# new file: foo.txt +# new file: subdir/hello.txt +# diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/HEAD b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/HEAD new file mode 100644 index 000000000..cb089cd89 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/HEAD @@ -0,0 +1 @@ +ref: refs/heads/master diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/config b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/config new file mode 100644 index 000000000..252f2e347 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/config @@ -0,0 +1,13 @@ +[core] + repositoryformatversion = 0 + filemode = true + bare = false + logallrefupdates = true + ignorecase = true + precomposeunicode = true +[remote "origin"] + url = https://github.com/hashicorp/origin.git + fetch = +refs/heads/*:refs/remotes/origin/* +[remote "upstream"] + url = https://github.com/hashicorp/upstream.git + fetch = +refs/heads/*:refs/remotes/upstream/* diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/description b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/description new file mode 100644 index 000000000..498b267a8 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/description @@ -0,0 +1 @@ +Unnamed repository; edit this file 'description' to name the repository. diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/applypatch-msg.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/applypatch-msg.sample new file mode 100644 index 000000000..8b2a2fe84 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/applypatch-msg.sample @@ -0,0 +1,15 @@ +#!/bin/sh +# +# An example hook script to check the commit log message taken by +# applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. The hook is +# allowed to edit the commit message file. +# +# To enable this hook, rename this file to "applypatch-msg". + +. git-sh-setup +test -x "$GIT_DIR/hooks/commit-msg" && + exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"} +: diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/commit-msg.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/commit-msg.sample new file mode 100644 index 000000000..b58d1184a --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/commit-msg.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to check the commit log message. +# Called by "git commit" with one argument, the name of the file +# that has the commit message. The hook should exit with non-zero +# status after issuing an appropriate message if it wants to stop the +# commit. The hook is allowed to edit the commit message file. +# +# To enable this hook, rename this file to "commit-msg". + +# Uncomment the below to add a Signed-off-by line to the message. +# Doing this in a hook is a bad idea in general, but the prepare-commit-msg +# hook is more suited to it. +# +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" + +# This example catches duplicate Signed-off-by lines. + +test "" = "$(grep '^Signed-off-by: ' "$1" | + sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { + echo >&2 Duplicate Signed-off-by lines. + exit 1 +} diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/post-update.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/post-update.sample new file mode 100644 index 000000000..ec17ec193 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/post-update.sample @@ -0,0 +1,8 @@ +#!/bin/sh +# +# An example hook script to prepare a packed repository for use over +# dumb transports. +# +# To enable this hook, rename this file to "post-update". + +exec git update-server-info diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-applypatch.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-applypatch.sample new file mode 100644 index 000000000..b1f187c2e --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-applypatch.sample @@ -0,0 +1,14 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed +# by applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-applypatch". + +. git-sh-setup +test -x "$GIT_DIR/hooks/pre-commit" && + exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"} +: diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-commit.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-commit.sample new file mode 100644 index 000000000..68d62d544 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-commit.sample @@ -0,0 +1,49 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git commit" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message if +# it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-commit". + +if git rev-parse --verify HEAD >/dev/null 2>&1 +then + against=HEAD +else + # Initial commit: diff against an empty tree object + against=4b825dc642cb6eb9a060e54bf8d69288fbee4904 +fi + +# If you want to allow non-ASCII filenames set this variable to true. +allownonascii=$(git config --bool hooks.allownonascii) + +# Redirect output to stderr. +exec 1>&2 + +# Cross platform projects tend to avoid non-ASCII filenames; prevent +# them from being added to the repository. We exploit the fact that the +# printable range starts at the space character and ends with tilde. +if [ "$allownonascii" != "true" ] && + # Note that the use of brackets around a tr range is ok here, (it's + # even required, for portability to Solaris 10's /usr/bin/tr), since + # the square bracket bytes happen to fall in the designated range. + test $(git diff --cached --name-only --diff-filter=A -z $against | + LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 +then + cat <<\EOF +Error: Attempt to add a non-ASCII file name. + +This can cause problems if you want to work with people on other platforms. + +To be portable it is advisable to rename the file. + +If you know what you are doing you can disable this check using: + + git config hooks.allownonascii true +EOF + exit 1 +fi + +# If there are whitespace errors, print the offending file names and fail. +exec git diff-index --check --cached $against -- diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-push.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-push.sample new file mode 100644 index 000000000..1f3bcebfd --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-push.sample @@ -0,0 +1,54 @@ +#!/bin/sh + +# An example hook script to verify what is about to be pushed. Called by "git +# push" after it has checked the remote status, but before anything has been +# pushed. If this script exits with a non-zero status nothing will be pushed. +# +# This hook is called with the following parameters: +# +# $1 -- Name of the remote to which the push is being done +# $2 -- URL to which the push is being done +# +# If pushing without using a named remote those arguments will be equal. +# +# Information about the commits which are being pushed is supplied as lines to +# the standard input in the form: +# +# +# +# This sample shows how to prevent push of commits where the log message starts +# with "WIP" (work in progress). + +remote="$1" +url="$2" + +z40=0000000000000000000000000000000000000000 + +IFS=' ' +while read local_ref local_sha remote_ref remote_sha +do + if [ "$local_sha" = $z40 ] + then + # Handle delete + : + else + if [ "$remote_sha" = $z40 ] + then + # New branch, examine all commits + range="$local_sha" + else + # Update to existing branch, examine new commits + range="$remote_sha..$local_sha" + fi + + # Check for WIP commit + commit=`git rev-list -n 1 --grep '^WIP' "$range"` + if [ -n "$commit" ] + then + echo "Found WIP commit in $local_ref, not pushing" + exit 1 + fi + fi +done + +exit 0 diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-rebase.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-rebase.sample new file mode 100644 index 000000000..9773ed4cb --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/pre-rebase.sample @@ -0,0 +1,169 @@ +#!/bin/sh +# +# Copyright (c) 2006, 2008 Junio C Hamano +# +# The "pre-rebase" hook is run just before "git rebase" starts doing +# its job, and can prevent the command from running by exiting with +# non-zero status. +# +# The hook is called with the following parameters: +# +# $1 -- the upstream the series was forked from. +# $2 -- the branch being rebased (or empty when rebasing the current branch). +# +# This sample shows how to prevent topic branches that are already +# merged to 'next' branch from getting rebased, because allowing it +# would result in rebasing already published history. + +publish=next +basebranch="$1" +if test "$#" = 2 +then + topic="refs/heads/$2" +else + topic=`git symbolic-ref HEAD` || + exit 0 ;# we do not interrupt rebasing detached HEAD +fi + +case "$topic" in +refs/heads/??/*) + ;; +*) + exit 0 ;# we do not interrupt others. + ;; +esac + +# Now we are dealing with a topic branch being rebased +# on top of master. Is it OK to rebase it? + +# Does the topic really exist? +git show-ref -q "$topic" || { + echo >&2 "No such branch $topic" + exit 1 +} + +# Is topic fully merged to master? +not_in_master=`git rev-list --pretty=oneline ^master "$topic"` +if test -z "$not_in_master" +then + echo >&2 "$topic is fully merged to master; better remove it." + exit 1 ;# we could allow it, but there is no point. +fi + +# Is topic ever merged to next? If so you should not be rebasing it. +only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` +only_next_2=`git rev-list ^master ${publish} | sort` +if test "$only_next_1" = "$only_next_2" +then + not_in_topic=`git rev-list "^$topic" master` + if test -z "$not_in_topic" + then + echo >&2 "$topic is already up-to-date with master" + exit 1 ;# we could allow it, but there is no point. + else + exit 0 + fi +else + not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` + /usr/bin/perl -e ' + my $topic = $ARGV[0]; + my $msg = "* $topic has commits already merged to public branch:\n"; + my (%not_in_next) = map { + /^([0-9a-f]+) /; + ($1 => 1); + } split(/\n/, $ARGV[1]); + for my $elem (map { + /^([0-9a-f]+) (.*)$/; + [$1 => $2]; + } split(/\n/, $ARGV[2])) { + if (!exists $not_in_next{$elem->[0]}) { + if ($msg) { + print STDERR $msg; + undef $msg; + } + print STDERR " $elem->[1]\n"; + } + } + ' "$topic" "$not_in_next" "$not_in_master" + exit 1 +fi + +exit 0 + +################################################################ + +This sample hook safeguards topic branches that have been +published from being rewound. + +The workflow assumed here is: + + * Once a topic branch forks from "master", "master" is never + merged into it again (either directly or indirectly). + + * Once a topic branch is fully cooked and merged into "master", + it is deleted. If you need to build on top of it to correct + earlier mistakes, a new topic branch is created by forking at + the tip of the "master". This is not strictly necessary, but + it makes it easier to keep your history simple. + + * Whenever you need to test or publish your changes to topic + branches, merge them into "next" branch. + +The script, being an example, hardcodes the publish branch name +to be "next", but it is trivial to make it configurable via +$GIT_DIR/config mechanism. + +With this workflow, you would want to know: + +(1) ... if a topic branch has ever been merged to "next". Young + topic branches can have stupid mistakes you would rather + clean up before publishing, and things that have not been + merged into other branches can be easily rebased without + affecting other people. But once it is published, you would + not want to rewind it. + +(2) ... if a topic branch has been fully merged to "master". + Then you can delete it. More importantly, you should not + build on top of it -- other people may already want to + change things related to the topic as patches against your + "master", so if you need further changes, it is better to + fork the topic (perhaps with the same name) afresh from the + tip of "master". + +Let's look at this example: + + o---o---o---o---o---o---o---o---o---o "next" + / / / / + / a---a---b A / / + / / / / + / / c---c---c---c B / + / / / \ / + / / / b---b C \ / + / / / / \ / + ---o---o---o---o---o---o---o---o---o---o---o "master" + + +A, B and C are topic branches. + + * A has one fix since it was merged up to "next". + + * B has finished. It has been fully merged up to "master" and "next", + and is ready to be deleted. + + * C has not merged to "next" at all. + +We would want to allow C to be rebased, refuse A, and encourage +B to be deleted. + +To compute (1): + + git rev-list ^master ^topic next + git rev-list ^master next + + if these match, topic has not merged in next at all. + +To compute (2): + + git rev-list master..topic + + if this is empty, it is fully merged to "master". diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/prepare-commit-msg.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/prepare-commit-msg.sample new file mode 100644 index 000000000..f093a02ec --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/prepare-commit-msg.sample @@ -0,0 +1,36 @@ +#!/bin/sh +# +# An example hook script to prepare the commit log message. +# Called by "git commit" with the name of the file that has the +# commit message, followed by the description of the commit +# message's source. The hook's purpose is to edit the commit +# message file. If the hook fails with a non-zero status, +# the commit is aborted. +# +# To enable this hook, rename this file to "prepare-commit-msg". + +# This hook includes three examples. The first comments out the +# "Conflicts:" part of a merge commit. +# +# The second includes the output of "git diff --name-status -r" +# into the message, just before the "git status" output. It is +# commented because it doesn't cope with --amend or with squashed +# commits. +# +# The third example adds a Signed-off-by line to the message, that can +# still be edited. This is rarely a good idea. + +case "$2,$3" in + merge,) + /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;; + +# ,|template,) +# /usr/bin/perl -i.bak -pe ' +# print "\n" . `git diff --cached --name-status -r` +# if /^#/ && $first++ == 0' "$1" ;; + + *) ;; +esac + +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/update.sample b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/update.sample new file mode 100644 index 000000000..d84758373 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/hooks/update.sample @@ -0,0 +1,128 @@ +#!/bin/sh +# +# An example hook script to blocks unannotated tags from entering. +# Called by "git receive-pack" with arguments: refname sha1-old sha1-new +# +# To enable this hook, rename this file to "update". +# +# Config +# ------ +# hooks.allowunannotated +# This boolean sets whether unannotated tags will be allowed into the +# repository. By default they won't be. +# hooks.allowdeletetag +# This boolean sets whether deleting tags will be allowed in the +# repository. By default they won't be. +# hooks.allowmodifytag +# This boolean sets whether a tag may be modified after creation. By default +# it won't be. +# hooks.allowdeletebranch +# This boolean sets whether deleting branches will be allowed in the +# repository. By default they won't be. +# hooks.denycreatebranch +# This boolean sets whether remotely creating branches will be denied +# in the repository. By default this is allowed. +# + +# --- Command line +refname="$1" +oldrev="$2" +newrev="$3" + +# --- Safety check +if [ -z "$GIT_DIR" ]; then + echo "Don't run this script from the command line." >&2 + echo " (if you want, you could supply GIT_DIR then run" >&2 + echo " $0 )" >&2 + exit 1 +fi + +if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +# --- Config +allowunannotated=$(git config --bool hooks.allowunannotated) +allowdeletebranch=$(git config --bool hooks.allowdeletebranch) +denycreatebranch=$(git config --bool hooks.denycreatebranch) +allowdeletetag=$(git config --bool hooks.allowdeletetag) +allowmodifytag=$(git config --bool hooks.allowmodifytag) + +# check for no description +projectdesc=$(sed -e '1q' "$GIT_DIR/description") +case "$projectdesc" in +"Unnamed repository"* | "") + echo "*** Project description file hasn't been set" >&2 + exit 1 + ;; +esac + +# --- Check types +# if $newrev is 0000...0000, it's a commit to delete a ref. +zero="0000000000000000000000000000000000000000" +if [ "$newrev" = "$zero" ]; then + newrev_type=delete +else + newrev_type=$(git cat-file -t $newrev) +fi + +case "$refname","$newrev_type" in + refs/tags/*,commit) + # un-annotated tag + short_refname=${refname##refs/tags/} + if [ "$allowunannotated" != "true" ]; then + echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2 + echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 + exit 1 + fi + ;; + refs/tags/*,delete) + # delete tag + if [ "$allowdeletetag" != "true" ]; then + echo "*** Deleting a tag is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/tags/*,tag) + # annotated tag + if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 + then + echo "*** Tag '$refname' already exists." >&2 + echo "*** Modifying a tag is not allowed in this repository." >&2 + exit 1 + fi + ;; + refs/heads/*,commit) + # branch + if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then + echo "*** Creating a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/heads/*,delete) + # delete branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/remotes/*,commit) + # tracking branch + ;; + refs/remotes/*,delete) + # delete tracking branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a tracking branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + *) + # Anything else (is there anything else?) + echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 + exit 1 + ;; +esac + +# --- Finished +exit 0 diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/index b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/index new file mode 100644 index 0000000000000000000000000000000000000000..b9699684975201d64c8d308bce4f5cc14180b965 GIT binary patch literal 256 zcmZ?q402{*U|<4b=8)pHZXnG7qZt_(SQy+7MlvuoE@5C`{0fv30b-VLu~U)lr+?OF zZ}n8zw`}b_72Wn{4D3mXMS3L_B}n?w%&A8=N44f?iq6IbAs>29*y~QQ3;S!gZzcnK wT7Eu$a~g> 1414446684 -0700 commit (initial): Those files tho diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/logs/refs/heads/master b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/logs/refs/heads/master new file mode 100644 index 000000000..1c469022b --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/logs/refs/heads/master @@ -0,0 +1 @@ +0000000000000000000000000000000000000000 7525d17cbbb56f3253a20903ffddc07c6c935c76 Mitchell Hashimoto 1414446684 -0700 commit (initial): Those files tho diff --git a/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/objects/25/7cc5642cb1a054f08cc83f2d943e56fd3ebe99 b/vendor/github.com/hashicorp/atlas-go/archive/test-fixtures/archive-git/DOTgit/objects/25/7cc5642cb1a054f08cc83f2d943e56fd3ebe99 new file mode 100644 index 0000000000000000000000000000000000000000..bdcf704c9e663f3a11b3146b1b455bc2581b4761 GIT binary patch literal 19 acmb9XD~D{Ff%bx$Vkn}$=55XC}B{oIhvxgaY4w3o)h-EQ|!Y2 M+U=VO08e!hF+vm7FlI0`FfcPQQAkQG(krPbVF(vH71@6JXKnUYPlbKU*4|Um zZGQ$;l9rzjR-#&SG(~6Qf{+hAC+u~n*oFPI+c(q100 https://github.com/foo/bar.git + urlSplit := strings.Split(split[1], " ") + result[remote] = strings.TrimSpace(urlSplit[0]) + } + } + + return result, nil +} + +// gitPreflight is the pre-flight command that runs for Git-based VCSs +func gitPreflight(path string) error { + var stderr, stdout bytes.Buffer + + cmd := exec.Command("git", "--version") + cmd.Dir = path + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("error getting git version: %s\nstdout: %s\nstderr: %s", + err, stdout.String(), stderr.String()) + } + + // Check if the output is valid + output := strings.Split(strings.TrimSpace(stdout.String()), " ") + if len(output) < 1 { + log.Printf("[WARN] could not extract version output from Git") + return nil + } + + // Parse the version + gitv, err := version.NewVersion(output[len(output)-1]) + if err != nil { + log.Printf("[WARN] could not parse version output from Git") + return nil + } + + constraint, err := version.NewConstraint("> 1.8") + if err != nil { + log.Printf("[WARN] could not create version constraint to check") + return nil + } + if !constraint.Check(gitv) { + return fmt.Errorf("git version (%s) is too old, please upgrade", gitv.String()) + } + + return nil +} + +// gitMetadata is the function to parse and return Git metadata +func gitMetadata(path string) (map[string]string, error) { + // Future-self note: Git is NOT threadsafe, so we cannot run these + // operations in go routines or else you're going to have a really really + // bad day and Panda.State == "Sad" :( + + branch, err := gitBranch(path) + if err != nil { + return nil, err + } + + commit, err := gitCommit(path) + if err != nil { + return nil, err + } + + remotes, err := gitRemotes(path) + if err != nil { + return nil, err + } + + // Make the return result (we already know the size) + result := make(map[string]string, 2+len(remotes)) + + result["branch"] = branch + result["commit"] = commit + for remote, value := range remotes { + result[remote] = value + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/atlas-go/archive/vcs_test.go b/vendor/github.com/hashicorp/atlas-go/archive/vcs_test.go new file mode 100644 index 000000000..b8f88e498 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/vcs_test.go @@ -0,0 +1,260 @@ +package archive + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +func setupGitFixtures(t *testing.T) (string, func()) { + testDir := testFixture("archive-git") + oldName := filepath.Join(testDir, "DOTgit") + newName := filepath.Join(testDir, ".git") + + cleanup := func() { + os.Rename(newName, oldName) + // Windows leaves an empty folder lying around afterward + if runtime.GOOS == "windows" { + os.Remove(newName) + } + } + + // We call this BEFORE and after each setup for tests that make lower-level + // calls like runCommand + cleanup() + + if err := os.Rename(oldName, newName); err != nil { + t.Fatal(err) + } + + return testDir, cleanup +} + +func TestVCSPreflight(t *testing.T) { + if !testHasGit { + t.Skip("git not found") + } + + testDir, cleanup := setupGitFixtures(t) + defer cleanup() + + if err := vcsPreflight(testDir); err != nil { + t.Fatal(err) + } +} + +func TestGitBranch(t *testing.T) { + if !testHasGit { + t.Skip("git not found") + } + + testDir, cleanup := setupGitFixtures(t) + defer cleanup() + + branch, err := gitBranch(testDir) + if err != nil { + t.Fatal(err) + } + + expected := "master" + if branch != expected { + t.Fatalf("expected %q to be %q", branch, expected) + } +} + +func TestGitBranch_detached(t *testing.T) { + if !testHasGit { + t.Skip("git not found") + } + + testDir := testFixture("archive-git") + oldName := filepath.Join(testDir, "DOTgit") + newName := filepath.Join(testDir, ".git") + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %#v", err) + } + + // Copy and then remove the .git dir instead of moving and replacing like + // other tests, since the checkout below is going to write to the reflog and + // the index + runCommand(t, pwd, "cp", "-r", oldName, newName) + defer runCommand(t, pwd, "rm", "-rf", newName) + + runCommand(t, testDir, "git", "checkout", "--detach") + + branch, err := gitBranch(testDir) + if err != nil { + t.Fatal(err) + } + + if branch != "" { + t.Fatalf("expected branch to be empty, but it was: %s", branch) + } +} + +func TestGitCommit(t *testing.T) { + if !testHasGit { + t.Skip("git not found") + } + + testDir, cleanup := setupGitFixtures(t) + defer cleanup() + + commit, err := gitCommit(testDir) + if err != nil { + t.Fatal(err) + } + + expected := "7525d17cbbb56f3253a20903ffddc07c6c935c76" + if commit != expected { + t.Fatalf("expected %q to be %q", commit, expected) + } +} + +func TestGitRemotes(t *testing.T) { + if !testHasGit { + t.Skip("git not found") + } + + testDir, cleanup := setupGitFixtures(t) + defer cleanup() + + remotes, err := gitRemotes(testDir) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "remote.origin": "https://github.com/hashicorp/origin.git", + "remote.upstream": "https://github.com/hashicorp/upstream.git", + } + + if !reflect.DeepEqual(remotes, expected) { + t.Fatalf("expected %+v to be %+v", remotes, expected) + } +} + +func TestVCSMetadata_git(t *testing.T) { + if !testHasGit { + t.Skip("git not found") + } + + testDir, cleanup := setupGitFixtures(t) + defer cleanup() + + metadata, err := vcsMetadata(testDir) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "branch": "master", + "commit": "7525d17cbbb56f3253a20903ffddc07c6c935c76", + "remote.origin": "https://github.com/hashicorp/origin.git", + "remote.upstream": "https://github.com/hashicorp/upstream.git", + } + + if !reflect.DeepEqual(metadata, expected) { + t.Fatalf("expected %+v to be %+v", metadata, expected) + } +} + +func TestVCSMetadata_git_detached(t *testing.T) { + if !testHasGit { + t.Skip("git not found") + } + + testDir := testFixture("archive-git") + oldName := filepath.Join(testDir, "DOTgit") + newName := filepath.Join(testDir, ".git") + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %#v", err) + } + + // Copy and then remove the .git dir instead of moving and replacing like + // other tests, since the checkout below is going to write to the reflog and + // the index + runCommand(t, pwd, "cp", "-r", oldName, newName) + defer runCommand(t, pwd, "rm", "-rf", newName) + + runCommand(t, testDir, "git", "checkout", "--detach") + + metadata, err := vcsMetadata(testDir) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "branch": "", + "commit": "7525d17cbbb56f3253a20903ffddc07c6c935c76", + "remote.origin": "https://github.com/hashicorp/origin.git", + "remote.upstream": "https://github.com/hashicorp/upstream.git", + } + + if !reflect.DeepEqual(metadata, expected) { + t.Fatalf("expected %+v to be %+v", metadata, expected) + } +} + +func TestVCSPathDetect_git(t *testing.T) { + testDir, cleanup := setupGitFixtures(t) + defer cleanup() + + vcs, err := vcsDetect(testDir) + if err != nil { + t.Errorf("VCS detection failed") + } + + if vcs.Name != "git" { + t.Errorf("Expected to find git; found %s", vcs.Name) + } +} + +func TestVCSPathDetect_git_failure(t *testing.T) { + _, err := vcsDetect(testFixture("archive-flat")) + // We expect to get an error because there is no git repo here + if err == nil { + t.Errorf("VCS detection failed") + } +} + +func TestVCSPathDetect_hg(t *testing.T) { + vcs, err := vcsDetect(testFixture("archive-hg")) + if err != nil { + t.Errorf("VCS detection failed") + } + + if vcs.Name != "hg" { + t.Errorf("Expected to find hg; found %s", vcs.Name) + } +} + +func TestVCSPathDetect_hg_absolute(t *testing.T) { + abspath, err := filepath.Abs(testFixture("archive-hg")) + vcs, err := vcsDetect(abspath) + if err != nil { + t.Errorf("VCS detection failed") + } + + if vcs.Name != "hg" { + t.Errorf("Expected to find hg; found %s", vcs.Name) + } +} + +func runCommand(t *testing.T, path, command string, args ...string) { + var stderr, stdout bytes.Buffer + cmd := exec.Command(command, args...) + cmd.Dir = path + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + t.Fatalf("error running command: %s\nstdout: %s\nstderr: %s", + err, stdout.String(), stderr.String()) + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/application.go b/vendor/github.com/hashicorp/atlas-go/v1/application.go new file mode 100644 index 000000000..42cf88201 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/application.go @@ -0,0 +1,164 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" +) + +// appWrapper is the API wrapper since the server wraps the resulting object. +type appWrapper struct { + Application *App `json:"application"` +} + +// App represents a single instance of an application on the Atlas server. +type App struct { + // User is the namespace (username or organization) under which the + // Atlas application resides + User string `json:"username"` + + // Name is the name of the application + Name string `json:"name"` +} + +// Slug returns the slug format for this App (User/Name) +func (a *App) Slug() string { + return fmt.Sprintf("%s/%s", a.User, a.Name) +} + +// App gets the App by the given user space and name. In the event the App is +// not found (404), or for any other non-200 responses, an error is returned. +func (c *Client) App(user, name string) (*App, error) { + log.Printf("[INFO] getting application %s/%s", user, name) + + endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var app App + if err := decodeJSON(response, &app); err != nil { + return nil, err + } + + return &app, nil +} + +// CreateApp creates a new App under the given user with the given name. If the +// App is created successfully, it is returned. If the server returns any +// errors, an error is returned. +func (c *Client) CreateApp(user, name string) (*App, error) { + log.Printf("[INFO] creating application %s/%s", user, name) + + body, err := json.Marshal(&appWrapper{&App{ + User: user, + Name: name, + }}) + if err != nil { + return nil, err + } + + endpoint := "/api/v1/vagrant/applications" + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var app App + if err := decodeJSON(response, &app); err != nil { + return nil, err + } + + return &app, nil +} + +// appVersion represents a specific version of an App in Atlas. It is actually +// an upload container/wrapper. +type appVersion struct { + UploadPath string `json:"upload_path"` + Token string `json:"token"` + Version uint64 `json:"version"` +} + +// appMetadataWrapper is a wrapper around a map the prefixes the json key with +// "metadata" when marshalled to format requests to the API properly. +type appMetadataWrapper struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// UploadApp creates and uploads a new version for the App. If the server does not +// find the application, an error is returned. If the server does not accept the +// data, an error is returned. +// +// It is the responsibility of the caller to create a properly-formed data +// object; this method blindly passes along the contents of the io.Reader. +func (c *Client) UploadApp(app *App, metadata map[string]interface{}, + data io.Reader, size int64) (uint64, error) { + + log.Printf("[INFO] uploading application %s (%d bytes) with metadata %q", + app.Slug(), size, metadata) + + endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s/versions", + app.User, app.Name) + + // If metadata was given, setup the RequestOptions to pass in the metadata + // with the request. + var ro *RequestOptions + if metadata != nil { + // wrap the struct into the correct JSON format + wrapper := struct { + Application *appMetadataWrapper `json:"application"` + }{ + &appMetadataWrapper{metadata}, + } + m, err := json.Marshal(wrapper) + if err != nil { + return 0, err + } + + // Create the request options. + ro = &RequestOptions{ + Body: bytes.NewReader(m), + BodyLength: int64(len(m)), + } + } + + request, err := c.Request("POST", endpoint, ro) + if err != nil { + return 0, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return 0, err + } + + var av appVersion + if err := decodeJSON(response, &av); err != nil { + return 0, err + } + + if err := c.putFile(av.UploadPath, data, size); err != nil { + return 0, err + } + + return av.Version, nil +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/application_test.go b/vendor/github.com/hashicorp/atlas-go/v1/application_test.go new file mode 100644 index 000000000..2144a7032 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/application_test.go @@ -0,0 +1,118 @@ +package atlas + +import ( + "bytes" + "testing" +) + +func TestSlug_returnsSlug(t *testing.T) { + app := &App{ + User: "hashicorp", + Name: "project", + } + + expected := "hashicorp/project" + if app.Slug() != expected { + t.Fatalf("expected %q to be %q", app.Slug(), expected) + } +} + +func TestApp_fetchesApp(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + app, err := client.App("hashicorp", "existing") + if err != nil { + t.Fatal(err) + } + + if app.User != "hashicorp" { + t.Errorf("expected %q to be %q", app.User, "hashicorp") + } + + if app.Name != "existing" { + t.Errorf("expected %q to be %q", app.Name, "existing") + } +} + +func TestApp_returnsErrorNoApp(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + _, err = client.App("hashicorp", "newproject") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } +} + +func TestCreateApp_createsAndReturnsApp(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + app, err := client.CreateApp("hashicorp", "newproject") + if err != nil { + t.Fatal(err) + } + + if app.User != "hashicorp" { + t.Errorf("expected %q to be %q", app.User, "hashicorp") + } + + if app.Name != "newproject" { + t.Errorf("expected %q to be %q", app.Name, "newproject") + } +} + +func TestCreateApp_returnsErrorExistingApp(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + _, err = client.CreateApp("hashicorp", "existing") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } +} + +func TestUploadApp_createsAndReturnsVersion(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + app := &App{ + User: "hashicorp", + Name: "existing", + } + metadata := map[string]interface{}{"testing": true} + data := new(bytes.Buffer) + version, err := client.UploadApp(app, metadata, data, int64(data.Len())) + if err != nil { + t.Fatal(err) + } + if version != 125 { + t.Fatalf("bad: %#v", version) + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/artifact.go b/vendor/github.com/hashicorp/atlas-go/v1/artifact.go new file mode 100644 index 000000000..28273df28 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/artifact.go @@ -0,0 +1,248 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "net/url" +) + +// Artifact represents a single instance of an artifact. +type Artifact struct { + // User and name are self-explanatory. Tag is the combination + // of both into "username/name" + User string `json:"username"` + Name string `json:"name"` + Tag string `json:",omitempty"` +} + +// ArtifactVersion represents a single version of an artifact. +type ArtifactVersion struct { + User string `json:"username"` + Name string `json:"name"` + Tag string `json:",omitempty"` + Type string `json:"artifact_type"` + ID string `json:"id"` + Version int `json:"version"` + Metadata map[string]string `json:"metadata"` + File bool `json:"file"` + Slug string `json:"slug"` + + UploadPath string `json:"upload_path"` + UploadToken string `json:"upload_token"` +} + +// ArtifactSearchOpts are the options used to search for an artifact. +type ArtifactSearchOpts struct { + User string + Name string + Type string + + Build string + Version string + Metadata map[string]string +} + +// UploadArtifactOpts are the options used to upload an artifact. +type UploadArtifactOpts struct { + User string + Name string + Type string + ID string + File io.Reader + FileSize int64 + Metadata map[string]string + BuildID int + CompileID int +} + +// MarshalJSON converts the UploadArtifactOpts into a JSON struct. +func (o *UploadArtifactOpts) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "artifact_version": map[string]interface{}{ + "id": o.ID, + "file": o.File != nil, + "metadata": o.Metadata, + "build_id": o.BuildID, + "compile_id": o.CompileID, + }, + }) +} + +// This is the value that should be used for metadata in ArtifactSearchOpts +// if you don't care what the value is. +const MetadataAnyValue = "943febbf-589f-401b-8f25-58f6d8786848" + +// Artifact finds the Atlas artifact by the given name and returns it. Any +// errors that occur are returned, including ErrAuth and ErrNotFound special +// exceptions which the user may want to handle separately. +func (c *Client) Artifact(user, name string) (*Artifact, error) { + endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var aw artifactWrapper + if err := decodeJSON(response, &aw); err != nil { + return nil, err + } + + return aw.Artifact, nil +} + +// ArtifactSearch searches Atlas for the given ArtifactSearchOpts and returns +// a slice of ArtifactVersions. +func (c *Client) ArtifactSearch(opts *ArtifactSearchOpts) ([]*ArtifactVersion, error) { + log.Printf("[INFO] searching artifacts: %#v", opts) + + params := make(map[string]string) + if opts.Version != "" { + params["version"] = opts.Version + } + if opts.Build != "" { + params["build"] = opts.Build + } + + i := 1 + for k, v := range opts.Metadata { + prefix := fmt.Sprintf("metadata.%d.", i) + params[prefix+"key"] = k + if v != MetadataAnyValue { + params[prefix+"value"] = v + } + + i++ + } + + endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/search", + opts.User, opts.Name, opts.Type) + request, err := c.Request("GET", endpoint, &RequestOptions{ + Params: params, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var w artifactSearchWrapper + if err := decodeJSON(response, &w); err != nil { + return nil, err + } + + return w.Versions, nil +} + +// CreateArtifact creates and returns a new Artifact in Atlas. Any errors that +// occurr are returned. +func (c *Client) CreateArtifact(user, name string) (*Artifact, error) { + log.Printf("[INFO] creating artifact: %s/%s", user, name) + body, err := json.Marshal(&artifactWrapper{&Artifact{ + User: user, + Name: name, + }}) + if err != nil { + return nil, err + } + + endpoint := "/api/v1/artifacts" + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var aw artifactWrapper + if err := decodeJSON(response, &aw); err != nil { + return nil, err + } + + return aw.Artifact, nil +} + +// ArtifactFileURL is a helper method for getting the URL for an ArtifactVersion +// from the Client. +func (c *Client) ArtifactFileURL(av *ArtifactVersion) (*url.URL, error) { + if !av.File { + return nil, nil + } + + u := *c.URL + u.Path = fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/file", + av.User, av.Name, av.Type) + return &u, nil +} + +// UploadArtifact streams the upload of a file on disk using the given +// UploadArtifactOpts. Any errors that occur are returned. +func (c *Client) UploadArtifact(opts *UploadArtifactOpts) (*ArtifactVersion, error) { + log.Printf("[INFO] uploading artifact: %s/%s (%s)", opts.User, opts.Name, opts.Type) + + endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s", + opts.User, opts.Name, opts.Type) + + body, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var av ArtifactVersion + if err := decodeJSON(response, &av); err != nil { + return nil, err + } + + if opts.File != nil { + if err := c.putFile(av.UploadPath, opts.File, opts.FileSize); err != nil { + return nil, err + } + } + + return &av, nil +} + +type artifactWrapper struct { + Artifact *Artifact `json:"artifact"` +} + +type artifactSearchWrapper struct { + Versions []*ArtifactVersion +} + +type artifactVersionWrapper struct { + Version *ArtifactVersion +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/artifact_test.go b/vendor/github.com/hashicorp/atlas-go/v1/artifact_test.go new file mode 100644 index 000000000..fbdf54275 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/artifact_test.go @@ -0,0 +1,167 @@ +package atlas + +import ( + "bytes" + "testing" +) + +func TestArtifact_fetchesArtifact(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + art, err := client.Artifact("hashicorp", "existing") + if err != nil { + t.Fatal(err) + } + + if art.User != "hashicorp" { + t.Errorf("expected %q to be %q", art.User, "hashicorp") + } + + if art.Name != "existing" { + t.Errorf("expected %q to be %q", art.Name, "existing") + } +} + +func TestArtifact_returnsErrorNoArtifact(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + _, err = client.App("hashicorp", "newproject") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } +} + +func TestArtifactSearch_fetches(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + vs, err := client.ArtifactSearch(&ArtifactSearchOpts{ + User: "hashicorp", + Name: "existing1", + Type: "amazon-ami", + }) + if err != nil { + t.Fatal(err) + } + + if len(vs) != 1 { + t.Fatalf("bad: %#v", vs) + } +} + +func TestArtifactSearch_metadata(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + vs, err := client.ArtifactSearch(&ArtifactSearchOpts{ + User: "hashicorp", + Name: "existing2", + Type: "amazon-ami", + Metadata: map[string]string{ + "foo": "bar", + "bar": MetadataAnyValue, + }, + }) + if err != nil { + t.Fatal(err) + } + + if len(vs) != 1 { + t.Fatalf("bad: %#v", vs) + } +} + +func TestArtifactFileURL(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + v := &ArtifactVersion{ + User: "foo", + Name: "bar", + Type: "vagrant-box", + File: true, + } + + u, err := client.ArtifactFileURL(v) + if err != nil { + t.Fatal(err) + } + + expected := *server.URL + expected.Path = "/api/v1/artifacts/foo/bar/vagrant-box/file" + if u.String() != expected.String() { + t.Fatalf("unexpected: %s\n\nexpected: %s", u, expected.String()) + } +} + +func TestArtifactFileURL_nil(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + v := &ArtifactVersion{ + User: "foo", + Name: "bar", + Type: "vagrant-box", + } + + u, err := client.ArtifactFileURL(v) + if err != nil { + t.Fatal(err) + } + if u != nil { + t.Fatal("should be nil") + } +} + +func TestUploadArtifact(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + data := new(bytes.Buffer) + _, err = client.UploadArtifact(&UploadArtifactOpts{ + User: "hashicorp", + Name: "existing", + Type: "amazon-ami", + File: data, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/atlas_test.go b/vendor/github.com/hashicorp/atlas-go/v1/atlas_test.go new file mode 100644 index 000000000..e76b7a94a --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/atlas_test.go @@ -0,0 +1,453 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "testing" +) + +type atlasServer struct { + URL *url.URL + + t *testing.T + ln net.Listener + server http.Server +} + +func newTestAtlasServer(t *testing.T) *atlasServer { + hs := &atlasServer{t: t} + + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + hs.ln = ln + + hs.URL = &url.URL{ + Scheme: "http", + Host: ln.Addr().String(), + } + + mux := http.NewServeMux() + hs.setupRoutes(mux) + + var server http.Server + server.Handler = mux + hs.server = server + go server.Serve(ln) + + return hs +} + +func (hs *atlasServer) Stop() { + hs.ln.Close() +} + +func (hs *atlasServer) setupRoutes(mux *http.ServeMux) { + mux.HandleFunc("/_json", hs.jsonHandler) + mux.HandleFunc("/_rails-error", hs.railsHandler) + mux.HandleFunc("/_status/", hs.statusHandler) + + mux.HandleFunc("/_binstore/", hs.binstoreHandler) + + mux.HandleFunc("/api/v1/authenticate", hs.authenticationHandler) + mux.HandleFunc("/api/v1/token", hs.tokenHandler) + + mux.HandleFunc("/api/v1/artifacts/hashicorp/existing", hs.vagrantArtifactExistingHandler) + mux.HandleFunc("/api/v1/artifacts/hashicorp/existing/amazon-ami", hs.vagrantArtifactUploadHandler) + mux.HandleFunc("/api/v1/artifacts/hashicorp/existing1/amazon-ami/search", hs.vagrantArtifactSearchHandler1) + mux.HandleFunc("/api/v1/artifacts/hashicorp/existing2/amazon-ami/search", hs.vagrantArtifactSearchHandler2) + + mux.HandleFunc("/api/v1/vagrant/applications", hs.vagrantCreateAppHandler) + mux.HandleFunc("/api/v1/vagrant/applications/", hs.vagrantCreateAppsHandler) + mux.HandleFunc("/api/v1/vagrant/applications/hashicorp/existing", hs.vagrantAppExistingHandler) + mux.HandleFunc("/api/v1/vagrant/applications/hashicorp/existing/versions", hs.vagrantUploadAppHandler) + + mux.HandleFunc("/api/v1/packer/build-configurations", hs.vagrantBCCreateHandler) + mux.HandleFunc("/api/v1/packer/build-configurations/hashicorp/existing", hs.vagrantBCExistingHandler) + mux.HandleFunc("/api/v1/packer/build-configurations/hashicorp/existing/versions", hs.vagrantBCCreateVersionHandler) + + mux.HandleFunc("/api/v1/terraform/configurations/hashicorp/existing/versions/latest", hs.tfConfigLatest) + mux.HandleFunc("/api/v1/terraform/configurations/hashicorp/existing/versions", hs.tfConfigUpload) +} + +func (hs *atlasServer) statusHandler(w http.ResponseWriter, r *http.Request) { + slice := strings.Split(r.URL.Path, "/") + codeStr := slice[len(slice)-1] + + code, err := strconv.ParseInt(codeStr, 10, 32) + if err != nil { + hs.t.Fatal(err) + } + + w.WriteHeader(int(code)) +} + +func (hs *atlasServer) railsHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(422) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `{"errors": ["this is an error", "this is another error"]}`) +} + +func (hs *atlasServer) jsonHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `{"ok": true}`) +} + +func (hs *atlasServer) authenticationHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + hs.t.Fatal(err) + } + + login, password := r.Form["user[login]"][0], r.Form["user[password]"][0] + + if login == "sethloves" && password == "bacon" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "token": "pX4AQ5vO7T-xJrxsnvlB0cfeF-tGUX-A-280LPxoryhDAbwmox7PKinMgA1F6R3BKaT" + } + `) + } else { + w.WriteHeader(http.StatusUnauthorized) + } +} + +func (hs *atlasServer) tokenHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + token := r.Header.Get(atlasTokenHeader) + if token == "a.atlasv1.b" { + w.WriteHeader(http.StatusOK) + } else { + w.WriteHeader(http.StatusUnauthorized) + } +} + +func (hs *atlasServer) tfConfigLatest(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + fmt.Fprintf(w, ` + { + "version": { + "version": 5, + "metadata": { "foo": "bar" }, + "variables": { "foo": "bar" } + } + } + `) +} + +func (hs *atlasServer) tfConfigUpload(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, r.Body); err != nil { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if buf.Len() == 0 { + w.WriteHeader(http.StatusConflict) + return + } + + uploadPath := hs.URL.String() + "/_binstore/" + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "version": 5, + "upload_path": "%s" + } + `, uploadPath) +} + +func (hs *atlasServer) vagrantArtifactExistingHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + fmt.Fprintf(w, ` + { + "artifact": { + "username": "hashicorp", + "name": "existing", + "tag": "hashicorp/existing" + } + } + `) +} + +func (hs *atlasServer) vagrantArtifactSearchHandler1(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + fmt.Fprintf(w, ` + { + "versions": [{ + "username": "hashicorp", + "name": "existing", + "tag": "hashicorp/existing" + }] + } + `) +} + +func (hs *atlasServer) vagrantArtifactSearchHandler2(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if err := r.ParseForm(); err != nil { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if r.Form.Get("metadata.1.key") == "" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + if r.Form.Get("metadata.2.key") == "" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + fmt.Fprintf(w, ` + { + "versions": [{ + "username": "hashicorp", + "name": "existing", + "tag": "hashicorp/existing" + }] + } + `) +} + +func (hs *atlasServer) vagrantArtifactUploadHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, r.Body); err != nil { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if buf.Len() == 0 { + w.WriteHeader(http.StatusConflict) + return + } + + uploadPath := hs.URL.String() + "/_binstore/" + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "upload_path": "%s" + } + `, uploadPath) +} + +func (hs *atlasServer) vagrantAppExistingHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + fmt.Fprintf(w, ` + { + "username": "hashicorp", + "name": "existing", + "tag": "hashicorp/existing", + "private": true + } + `) +} + +func (hs *atlasServer) vagrantBCCreateHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + var wrapper bcWrapper + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&wrapper); err != nil && err != io.EOF { + hs.t.Fatal(err) + } + bc := wrapper.BuildConfig + + if bc.User != "hashicorp" { + w.WriteHeader(http.StatusConflict) + return + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "username":"hashicorp", + "name":"new", + "tag":"hashicorp/new", + "private":true + } + `) +} + +func (hs *atlasServer) vagrantBCCreateVersionHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + var wrapper bcCreateWrapper + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&wrapper); err != nil && err != io.EOF { + hs.t.Fatal(err) + } + builds := wrapper.Version.Builds + + if len(builds) == 0 { + w.WriteHeader(http.StatusConflict) + return + } + + expected := map[string]interface{}{"testing": true} + if !reflect.DeepEqual(wrapper.Version.Metadata, expected) { + hs.t.Fatalf("expected %q to be %q", wrapper.Version.Metadata, expected) + } + + uploadPath := hs.URL.String() + "/_binstore/" + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "upload_path": "%s" + } + `, uploadPath) +} + +func (hs *atlasServer) vagrantBCExistingHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + fmt.Fprintf(w, ` + { + "username": "hashicorp", + "name": "existing" + } + `) +} + +func (hs *atlasServer) vagrantCreateAppHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + var aw appWrapper + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&aw); err != nil && err != io.EOF { + hs.t.Fatal(err) + } + app := aw.Application + + if app.User == "hashicorp" && app.Name == "existing" { + w.WriteHeader(http.StatusConflict) + } else { + body, err := json.Marshal(app) + if err != nil { + hs.t.Fatal(err) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, string(body)) + } +} + +func (hs *atlasServer) vagrantCreateAppsHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + split := strings.Split(r.RequestURI, "/") + parts := split[len(split)-2:] + user, name := parts[0], parts[1] + + if user == "hashicorp" && name == "existing" { + body, err := json.Marshal(&App{ + User: "hashicorp", + Name: "existing", + }) + if err != nil { + hs.t.Fatal(err) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, string(body)) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +func (hs *atlasServer) vagrantUploadAppHandler(w http.ResponseWriter, r *http.Request) { + u := *hs.URL + u.Path = path.Join(u.Path, "_binstore/630e42d9-2364-2412-4121-18266770468e") + + var buf bytes.Buffer + if _, err := io.Copy(&buf, r.Body); err != nil { + hs.t.Fatal(err) + } + expected := `{"application":{"metadata":{"testing":true}}}` + if buf.String() != expected { + hs.t.Fatalf("expected metadata to be %q, but was %q", expected, buf.String()) + } + + body, err := json.Marshal(&appVersion{ + UploadPath: u.String(), + Token: "630e42d9-2364-2412-4121-18266770468e", + Version: 125, + }) + if err != nil { + hs.t.Fatal(err) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, string(body)) +} + +func (hs *atlasServer) binstoreHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "PUT" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/authentication.go b/vendor/github.com/hashicorp/atlas-go/v1/authentication.go new file mode 100644 index 000000000..613eaefcc --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/authentication.go @@ -0,0 +1,88 @@ +package atlas + +import ( + "fmt" + "log" + "net/url" + "strings" +) + +// Login accepts a username and password as string arguments. Both username and +// password must be non-nil, non-empty values. Atlas does not permit +// passwordless authentication. +// +// If authentication is unsuccessful, an error is returned with the body of the +// error containing the server's response. +// +// If authentication is successful, this method sets the Token value on the +// Client and returns the Token as a string. +func (c *Client) Login(username, password string) (string, error) { + log.Printf("[INFO] logging in user %s", username) + + if len(username) == 0 { + return "", fmt.Errorf("client: missing username") + } + + if len(password) == 0 { + return "", fmt.Errorf("client: missing password") + } + + // Make a request + request, err := c.Request("POST", "/api/v1/authenticate", &RequestOptions{ + Body: strings.NewReader(url.Values{ + "user[login]": []string{username}, + "user[password]": []string{password}, + "user[description]": []string{"Created by the Atlas Go Client"}, + }.Encode()), + Headers: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + }) + if err != nil { + return "", err + } + + // Make the request + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return "", err + } + + // Decode the body + var tResponse struct{ Token string } + if err := decodeJSON(response, &tResponse); err != nil { + return "", nil + } + + // Set the token + log.Printf("[DEBUG] setting atlas token (%s)", maskString(tResponse.Token)) + c.Token = tResponse.Token + + // Return the token + return c.Token, nil +} + +// Verify verifies that authentication and communication with Atlas +// is properly functioning. +func (c *Client) Verify() error { + log.Printf("[INFO] verifying authentication") + + request, err := c.Request("GET", "/api/v1/authenticate", nil) + if err != nil { + return err + } + + _, err = checkResp(c.HTTPClient.Do(request)) + return err +} + +// maskString masks all but the first few characters of a string for display +// output. This is useful for tokens so we can display them to the user without +// showing the full output. +func maskString(s string) string { + if len(s) <= 3 { + return "*** (masked)" + } + + return s[0:3] + "*** (masked)" +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/authentication_test.go b/vendor/github.com/hashicorp/atlas-go/v1/authentication_test.go new file mode 100644 index 000000000..f6f40761a --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/authentication_test.go @@ -0,0 +1,30 @@ +package atlas + +import "testing" + +func TestMaskString_emptyString(t *testing.T) { + result := maskString("") + expected := "*** (masked)" + + if result != expected { + t.Errorf("expected %s to be %s", result, expected) + } +} + +func TestMaskString_threeString(t *testing.T) { + result := maskString("123") + expected := "*** (masked)" + + if result != expected { + t.Errorf("expected %s to be %s", result, expected) + } +} + +func TestMaskString_longerString(t *testing.T) { + result := maskString("ABCD1234") + expected := "ABC*** (masked)" + + if result != expected { + t.Errorf("expected %s to be %s", result, expected) + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/build_config.go b/vendor/github.com/hashicorp/atlas-go/v1/build_config.go new file mode 100644 index 000000000..53b6c1125 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/build_config.go @@ -0,0 +1,183 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" +) + +// bcWrapper is the API wrapper since the server wraps the resulting object. +type bcWrapper struct { + BuildConfig *BuildConfig `json:"build_configuration"` +} + +// BuildConfig represents a Packer build configuration. +type BuildConfig struct { + // User is the namespace under which the build config lives + User string `json:"username"` + + // Name is the actual name of the build config, unique in the scope + // of the username. + Name string `json:"name"` +} + +// Slug returns the slug format for this BuildConfig (User/Name) +func (b *BuildConfig) Slug() string { + return fmt.Sprintf("%s/%s", b.User, b.Name) +} + +// BuildConfigVersion represents a single uploaded (or uploadable) version +// of a build configuration. +type BuildConfigVersion struct { + // The fields below are the username/name combo to uniquely identify + // a build config. + User string `json:"username"` + Name string `json:"name"` + + // Builds is the list of builds that this version supports. + Builds []BuildConfigBuild +} + +// Slug returns the slug format for this BuildConfigVersion (User/Name) +func (bv *BuildConfigVersion) Slug() string { + return fmt.Sprintf("%s/%s", bv.User, bv.Name) +} + +// BuildConfigBuild is a single build that is present in an uploaded +// build configuration. +type BuildConfigBuild struct { + // Name is a unique name for this build + Name string `json:"name"` + + // Type is the type of builder that this build needs to run on, + // such as "amazon-ebs" or "qemu". + Type string `json:"type"` + + // Artifact is true if this build results in one or more artifacts + // being sent to Atlas + Artifact bool `json:"artifact"` +} + +// BuildConfig gets a single build configuration by user and name. +func (c *Client) BuildConfig(user, name string) (*BuildConfig, error) { + log.Printf("[INFO] getting build configuration %s/%s", user, name) + + endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var bc BuildConfig + if err := decodeJSON(response, &bc); err != nil { + return nil, err + } + + return &bc, nil +} + +// CreateBuildConfig creates a new build configuration. +func (c *Client) CreateBuildConfig(user, name string) (*BuildConfig, error) { + log.Printf("[INFO] creating build configuration %s/%s", user, name) + + endpoint := "/api/v1/packer/build-configurations" + body, err := json.Marshal(&bcWrapper{ + BuildConfig: &BuildConfig{ + User: user, + Name: name, + }, + }) + if err != nil { + return nil, err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var bc BuildConfig + if err := decodeJSON(response, &bc); err != nil { + return nil, err + } + + return &bc, nil +} + +// UploadBuildConfigVersion creates a single build configuration version +// and uploads the template associated with it. +// +// Actual API: "Create Build Config Version" +func (c *Client) UploadBuildConfigVersion(v *BuildConfigVersion, metadata map[string]interface{}, + data io.Reader, size int64) error { + + log.Printf("[INFO] uploading build configuration version %s (%d bytes), with metadata %q", + v.Slug(), size, metadata) + + endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s/versions", + v.User, v.Name) + + var bodyData bcCreateWrapper + bodyData.Version.Builds = v.Builds + bodyData.Version.Metadata = metadata + body, err := json.Marshal(bodyData) + if err != nil { + return err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return err + } + + var bv bcCreate + if err := decodeJSON(response, &bv); err != nil { + return err + } + + if err := c.putFile(bv.UploadPath, data, size); err != nil { + return err + } + + return nil +} + +// bcCreate is the struct returned when creating a build configuration. +type bcCreate struct { + UploadPath string `json:"upload_path"` +} + +// bcCreateWrapper is the wrapper for creating a build config. +type bcCreateWrapper struct { + Version struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` + Builds []BuildConfigBuild `json:"builds"` + } `json:"version"` +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/build_config_test.go b/vendor/github.com/hashicorp/atlas-go/v1/build_config_test.go new file mode 100644 index 000000000..54a2affcd --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/build_config_test.go @@ -0,0 +1,95 @@ +package atlas + +import ( + "bytes" + "reflect" + "testing" +) + +func TestBuildConfig_slug(t *testing.T) { + bc := &BuildConfig{User: "sethvargo", Name: "bacon"} + expected := "sethvargo/bacon" + if bc.Slug() != expected { + t.Errorf("expected %q to be %q", bc.Slug(), expected) + } +} + +func TestBuildConfigVersion_slug(t *testing.T) { + bc := &BuildConfigVersion{User: "sethvargo", Name: "bacon"} + expected := "sethvargo/bacon" + if bc.Slug() != expected { + t.Errorf("expected %q to be %q", bc.Slug(), expected) + } +} + +func TestBuildConfig_fetches(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + actual, err := client.BuildConfig("hashicorp", "existing") + if err != nil { + t.Fatal(err) + } + + expected := &BuildConfig{ + User: "hashicorp", + Name: "existing", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("%#v", actual) + } +} + +func TestCreateBuildConfig(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + user, name := "hashicorp", "new" + bc, err := client.CreateBuildConfig(user, name) + if err != nil { + t.Fatal(err) + } + + if bc.User != user { + t.Errorf("expected %q to be %q", bc.User, user) + } + + if bc.Name != name { + t.Errorf("expected %q to be %q", bc.Name, name) + } +} + +func TestUploadBuildConfigVersion(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + bc := &BuildConfigVersion{ + User: "hashicorp", + Name: "existing", + Builds: []BuildConfigBuild{ + BuildConfigBuild{Name: "foo", Type: "ami"}, + }, + } + metadata := map[string]interface{}{"testing": true} + data := new(bytes.Buffer) + err = client.UploadBuildConfigVersion(bc, metadata, data, int64(data.Len())) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/client.go b/vendor/github.com/hashicorp/atlas-go/v1/client.go new file mode 100644 index 000000000..abae4fe56 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/client.go @@ -0,0 +1,301 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path" + "runtime" + "strings" + + "github.com/hashicorp/go-cleanhttp" +) + +const ( + // atlasDefaultEndpoint is the default base URL for connecting to Atlas. + atlasDefaultEndpoint = "https://atlas.hashicorp.com" + + // atlasEndpointEnvVar is the environment variable that overrrides the + // default Atlas address. + atlasEndpointEnvVar = "ATLAS_ADDRESS" + + // atlasTokenHeader is the header key used for authenticating with Atlas + atlasTokenHeader = "X-Atlas-Token" +) + +var projectURL = "https://github.com/hashicorp/atlas-go" +var userAgent = fmt.Sprintf("AtlasGo/1.0 (+%s; %s)", + projectURL, runtime.Version()) + +// ErrAuth is the error returned if a 401 is returned by an API request. +var ErrAuth = fmt.Errorf("authentication failed") + +// ErrNotFound is the error returned if a 404 is returned by an API request. +var ErrNotFound = fmt.Errorf("resource not found") + +// RailsError represents an error that was returned from the Rails server. +type RailsError struct { + Errors []string `json:"errors"` +} + +// Error collects all of the errors in the RailsError and returns a comma- +// separated list of the errors that were returned from the server. +func (re *RailsError) Error() string { + return strings.Join(re.Errors, ", ") +} + +// Client represents a single connection to a Atlas API endpoint. +type Client struct { + // URL is the full endpoint address to the Atlas server including the + // protocol, port, and path. + URL *url.URL + + // Token is the Atlas authentication token + Token string + + // HTTPClient is the underlying http client with which to make requests. + HTTPClient *http.Client +} + +// DefaultClient returns a client that connects to the Atlas API. +func DefaultClient() *Client { + atlasEndpoint := os.Getenv(atlasEndpointEnvVar) + if atlasEndpoint == "" { + atlasEndpoint = atlasDefaultEndpoint + } + + client, err := NewClient(atlasEndpoint) + if err != nil { + panic(err) + } + + return client +} + +// NewClient creates a new Atlas Client from the given URL (as a string). If +// the URL cannot be parsed, an error is returned. The HTTPClient is set to +// an empty http.Client, but this can be changed programmatically by setting +// client.HTTPClient. The user can also programmatically set the URL as a +// *url.URL. +func NewClient(urlString string) (*Client, error) { + if len(urlString) == 0 { + return nil, fmt.Errorf("client: missing url") + } + + parsedURL, err := url.Parse(urlString) + if err != nil { + return nil, err + } + + token := os.Getenv("ATLAS_TOKEN") + if token != "" { + log.Printf("[DEBUG] using ATLAS_TOKEN (%s)", maskString(token)) + } + + client := &Client{ + URL: parsedURL, + Token: token, + } + + if err := client.init(); err != nil { + return nil, err + } + + return client, nil +} + +// init() sets defaults on the client. +func (c *Client) init() error { + c.HTTPClient = cleanhttp.DefaultClient() + return nil +} + +// RequestOptions is the list of options to pass to the request. +type RequestOptions struct { + // Params is a map of key-value pairs that will be added to the Request. + Params map[string]string + + // Headers is a map of key-value pairs that will be added to the Request. + Headers map[string]string + + // Body is an io.Reader object that will be streamed or uploaded with the + // Request. BodyLength is the final size of the Body. + Body io.Reader + BodyLength int64 +} + +// Request creates a new HTTP request using the given verb and sub path. +func (c *Client) Request(verb, spath string, ro *RequestOptions) (*http.Request, error) { + log.Printf("[INFO] request: %s %s", verb, spath) + + // Ensure we have a RequestOptions struct (passing nil is an acceptable) + if ro == nil { + ro = new(RequestOptions) + } + + // Create a new URL with the appended path + u := *c.URL + u.Path = path.Join(c.URL.Path, spath) + + // Add the token and other params + if c.Token != "" { + log.Printf("[DEBUG] request: appending token (%s)", maskString(c.Token)) + if ro.Headers == nil { + ro.Headers = make(map[string]string) + } + + ro.Headers[atlasTokenHeader] = c.Token + } + + return c.rawRequest(verb, &u, ro) +} + +func (c *Client) putFile(rawURL string, r io.Reader, size int64) error { + log.Printf("[INFO] putting file: %s", rawURL) + + url, err := url.Parse(rawURL) + if err != nil { + return err + } + + request, err := c.rawRequest("PUT", url, &RequestOptions{ + Body: r, + BodyLength: size, + }) + if err != nil { + return err + } + + if _, err := checkResp(c.HTTPClient.Do(request)); err != nil { + return err + } + + return nil +} + +// rawRequest accepts a verb, URL, and RequestOptions struct and returns the +// constructed http.Request and any errors that occurred +func (c *Client) rawRequest(verb string, u *url.URL, ro *RequestOptions) (*http.Request, error) { + if verb == "" { + return nil, fmt.Errorf("client: missing verb") + } + + if u == nil { + return nil, fmt.Errorf("client: missing URL.url") + } + + if ro == nil { + return nil, fmt.Errorf("client: missing RequestOptions") + } + + // Add the token and other params + var params = make(url.Values) + for k, v := range ro.Params { + params.Add(k, v) + } + u.RawQuery = params.Encode() + + // Create the request object + request, err := http.NewRequest(verb, u.String(), ro.Body) + if err != nil { + return nil, err + } + + // Set the User-Agent + request.Header.Set("User-Agent", userAgent) + + // Add any headers (auth will be here if set) + for k, v := range ro.Headers { + request.Header.Add(k, v) + } + + // Add content-length if we have it + if ro.BodyLength > 0 { + request.ContentLength = ro.BodyLength + } + + log.Printf("[DEBUG] raw request: %#v", request) + + return request, nil +} + +// checkResp wraps http.Client.Do() and verifies that the request was +// successful. A non-200 request returns an error formatted to included any +// validation problems or otherwise. +func checkResp(resp *http.Response, err error) (*http.Response, error) { + // If the err is already there, there was an error higher up the chain, so + // just return that + if err != nil { + return resp, err + } + + log.Printf("[INFO] response: %d (%s)", resp.StatusCode, resp.Status) + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + log.Printf("[ERR] response: error copying response body") + } else { + log.Printf("[DEBUG] response: %s", buf.String()) + + // We are going to reset the response body, so we need to close the old + // one or else it will leak. + resp.Body.Close() + resp.Body = &bytesReadCloser{&buf} + } + + switch resp.StatusCode { + case 200: + return resp, nil + case 201: + return resp, nil + case 202: + return resp, nil + case 204: + return resp, nil + case 400: + return nil, parseErr(resp) + case 401: + return nil, ErrAuth + case 404: + return nil, ErrNotFound + case 422: + return nil, parseErr(resp) + default: + return nil, fmt.Errorf("client: %s", resp.Status) + } +} + +// parseErr is used to take an error JSON response and return a single string +// for use in error messages. +func parseErr(r *http.Response) error { + re := &RailsError{} + + if err := decodeJSON(r, &re); err != nil { + return fmt.Errorf("error decoding JSON body: %s", err) + } + + return re +} + +// decodeJSON is used to JSON decode a body into an interface. +func decodeJSON(resp *http.Response, out interface{}) error { + defer resp.Body.Close() + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// bytesReadCloser is a simple wrapper around a bytes buffer that implements +// Close as a noop. +type bytesReadCloser struct { + *bytes.Buffer +} + +func (nrc *bytesReadCloser) Close() error { + // we don't actually have to do anything here, since the buffer is just some + // data in memory and the error is initialized to no-error + return nil +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/client_test.go b/vendor/github.com/hashicorp/atlas-go/v1/client_test.go new file mode 100644 index 000000000..c13ee3b50 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/client_test.go @@ -0,0 +1,268 @@ +package atlas + +import ( + "net/url" + "os" + "reflect" + "strings" + "testing" +) + +func TestDefaultClient_url(t *testing.T) { + client := DefaultClient() + + if client.URL.String() != atlasDefaultEndpoint { + t.Fatalf("expected %q to be %q", client.URL.String(), atlasDefaultEndpoint) + } +} + +func TestDefaultClient_urlFromEnvVar(t *testing.T) { + defer os.Setenv(atlasEndpointEnvVar, os.Getenv(atlasEndpointEnvVar)) + otherEndpoint := "http://127.0.0.1:1234" + + err := os.Setenv(atlasEndpointEnvVar, otherEndpoint) + if err != nil { + t.Fatal(err) + } + + client := DefaultClient() + + if client.URL.String() != otherEndpoint { + t.Fatalf("expected %q to be %q", client.URL.String(), otherEndpoint) + } +} + +func TestNewClient_badURL(t *testing.T) { + _, err := NewClient("") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + expected := "client: missing url" + if !strings.Contains(err.Error(), expected) { + t.Fatalf("expected %q to contain %q", err.Error(), expected) + } +} + +func TestNewClient_parsesURL(t *testing.T) { + client, err := NewClient("https://example.com/foo/bar") + if err != nil { + t.Fatal(err) + } + + expected := &url.URL{ + Scheme: "https", + Host: "example.com", + Path: "/foo/bar", + } + if !reflect.DeepEqual(client.URL, expected) { + t.Fatalf("expected %q to equal %q", client.URL, expected) + } +} + +func TestNewClient_setsDefaultHTTPClient(t *testing.T) { + _, err := NewClient("https://example.com/foo/bar") + if err != nil { + t.Fatal(err) + } +} + +func TestLogin_missingUsername(t *testing.T) { + client, err := NewClient("https://example.com/foo/bar") + if err != nil { + t.Fatal(err) + } + + _, err = client.Login("", "") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + expected := "client: missing username" + if !strings.Contains(err.Error(), expected) { + t.Fatalf("expected %q to contain %q", err.Error(), expected) + } +} + +func TestLogin_missingPassword(t *testing.T) { + client, err := NewClient("https://example.com/foo/bar") + if err != nil { + t.Fatal(err) + } + + _, err = client.Login("username", "") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + expected := "client: missing password" + if !strings.Contains(err.Error(), expected) { + t.Fatalf("expected %q to contain %q", err.Error(), expected) + } +} + +func TestLogin_serverErrorMessage(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Login("username", "password") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + if err != ErrAuth { + t.Fatalf("bad: %s", err) + } +} + +func TestLogin_success(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + token, err := client.Login("sethloves", "bacon") + if err != nil { + t.Fatal(err) + } + + if client.Token == "" { + t.Fatal("expected client token to be set") + } + + if token == "" { + t.Fatal("expected token to be returned") + } +} + +func TestRequest_tokenAuth(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + client.Token = "a.atlasv1.b" + + request, err := client.Request("GET", "/api/v1/token", nil) + if err != nil { + t.Fatal(err) + } + + _, err = checkResp(client.HTTPClient.Do(request)) + if err != nil { + t.Fatal(err) + } +} + +func TestRequest_getsData(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + request, err := client.Request("GET", "/_status/200", nil) + if err != nil { + t.Fatal(err) + } + + if _, err := checkResp(client.HTTPClient.Do(request)); err != nil { + t.Fatal(err) + } +} + +func TestRequest_railsError(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + request, err := client.Request("GET", "/_rails-error", nil) + if err != nil { + t.Fatal(err) + } + + _, err = checkResp(client.HTTPClient.Do(request)) + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + expected := &RailsError{ + Errors: []string{ + "this is an error", + "this is another error", + }, + } + + if !reflect.DeepEqual(err, expected) { + t.Fatalf("expected %+v to be %+v", err, expected) + } +} + +func TestRequest_notFoundError(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + request, err := client.Request("GET", "/_status/404", nil) + if err != nil { + t.Fatal(err) + } + + _, err = checkResp(client.HTTPClient.Do(request)) + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + if err != ErrNotFound { + t.Fatalf("bad error: %#v", err) + } +} + +func TestRequestJSON_decodesData(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + request, err := client.Request("GET", "/_json", nil) + if err != nil { + t.Fatal(err) + } + + response, err := checkResp(client.HTTPClient.Do(request)) + if err != nil { + t.Fatal(err) + } + + var decoded struct{ Ok bool } + if err := decodeJSON(response, &decoded); err != nil { + t.Fatal(err) + } + + if !decoded.Ok { + t.Fatal("expected decoded response to be Ok, but was not") + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/terraform.go b/vendor/github.com/hashicorp/atlas-go/v1/terraform.go new file mode 100644 index 000000000..adeba2a11 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/terraform.go @@ -0,0 +1,97 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" +) + +// TerraformConfigVersion represents a single uploaded version of a +// Terraform configuration. +type TerraformConfigVersion struct { + Version int + Remotes []string `json:"remotes"` + Metadata map[string]string `json:"metadata"` + Variables map[string]string `json:"variables"` +} + +// TerraformConfigLatest returns the latest Terraform configuration version. +func (c *Client) TerraformConfigLatest(user, name string) (*TerraformConfigVersion, error) { + log.Printf("[INFO] getting terraform configuration %s/%s", user, name) + + endpoint := fmt.Sprintf("/api/v1/terraform/configurations/%s/%s/versions/latest", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err == ErrNotFound { + return nil, nil + } + if err != nil { + return nil, err + } + + var wrapper tfConfigVersionWrapper + if err := decodeJSON(response, &wrapper); err != nil { + return nil, err + } + + return wrapper.Version, nil +} + +// CreateTerraformConfigVersion creatse a new Terraform configuration +// versions and uploads a slug with it. +func (c *Client) CreateTerraformConfigVersion( + user string, name string, + version *TerraformConfigVersion, + data io.Reader, size int64) (int, error) { + log.Printf("[INFO] creating terraform configuration %s/%s", user, name) + + endpoint := fmt.Sprintf( + "/api/v1/terraform/configurations/%s/%s/versions", user, name) + body, err := json.Marshal(&tfConfigVersionWrapper{ + Version: version, + }) + if err != nil { + return 0, err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return 0, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return 0, err + } + + var result tfConfigVersionCreate + if err := decodeJSON(response, &result); err != nil { + return 0, err + } + + if err := c.putFile(result.UploadPath, data, size); err != nil { + return 0, err + } + + return result.Version, nil +} + +type tfConfigVersionCreate struct { + UploadPath string `json:"upload_path"` + Version int +} + +type tfConfigVersionWrapper struct { + Version *TerraformConfigVersion `json:"version"` +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/terraform_test.go b/vendor/github.com/hashicorp/atlas-go/v1/terraform_test.go new file mode 100644 index 000000000..02aa255b5 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/terraform_test.go @@ -0,0 +1,57 @@ +package atlas + +import ( + "bytes" + "reflect" + "testing" +) + +func TestTerraformConfigLatest(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + actual, err := client.TerraformConfigLatest("hashicorp", "existing") + if err != nil { + t.Fatal(err) + } + + expected := &TerraformConfigVersion{ + Version: 5, + Metadata: map[string]string{"foo": "bar"}, + Variables: map[string]string{"foo": "bar"}, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("%#v", actual) + } +} + +func TestCreateTerraformConfigVersion(t *testing.T) { + server := newTestAtlasServer(t) + defer server.Stop() + + client, err := NewClient(server.URL.String()) + if err != nil { + t.Fatal(err) + } + + v := &TerraformConfigVersion{ + Version: 5, + Metadata: map[string]string{"foo": "bar"}, + } + + data := new(bytes.Buffer) + vsn, err := client.CreateTerraformConfigVersion( + "hashicorp", "existing", v, data, int64(data.Len())) + if err != nil { + t.Fatal(err) + } + if vsn != 5 { + t.Fatalf("bad: %v", vsn) + } +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/util.go b/vendor/github.com/hashicorp/atlas-go/v1/util.go new file mode 100644 index 000000000..9aa0d2886 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/util.go @@ -0,0 +1,22 @@ +package atlas + +import ( + "fmt" + "strings" +) + +// ParseSlug parses a slug of the format (x/y) into the x and y components. It +// accepts a string of the format "x/y" ("user/name" for example). If an empty +// string is given, an error is returned. If the given string is not a valid +// slug format, an error is returned. +func ParseSlug(slug string) (string, string, error) { + if slug == "" { + return "", "", fmt.Errorf("missing slug") + } + + parts := strings.Split(slug, "/") + if len(parts) != 2 { + return "", "", fmt.Errorf("malformed slug %q", slug) + } + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/util_test.go b/vendor/github.com/hashicorp/atlas-go/v1/util_test.go new file mode 100644 index 000000000..d41a3e988 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/util_test.go @@ -0,0 +1,57 @@ +package atlas + +import ( + "strings" + "testing" +) + +func TestParseSlug_emptyString(t *testing.T) { + _, _, err := ParseSlug("") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + expected := "missing slug" + if !strings.Contains(err.Error(), expected) { + t.Fatalf("expected %q to contain %q", err.Error(), expected) + } +} + +func TestParseSlug_noSlashes(t *testing.T) { + _, _, err := ParseSlug("bacon") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + expected := "malformed slug" + if !strings.Contains(err.Error(), expected) { + t.Fatalf("expected %q to contain %q", err.Error(), expected) + } +} + +func TestParseSlug_multipleSlashes(t *testing.T) { + _, _, err := ParseSlug("bacon/is/delicious/but/this/is/not/valid") + if err == nil { + t.Fatal("expected error, but nothing was returned") + } + + expected := "malformed slug" + if !strings.Contains(err.Error(), expected) { + t.Fatalf("expected %q to contain %q", err.Error(), expected) + } +} + +func TestParseSlug_goodString(t *testing.T) { + user, name, err := ParseSlug("hashicorp/project") + if err != nil { + t.Fatal(err) + } + + if user != "hashicorp" { + t.Fatalf("expected %q to be %q", user, "hashicorp") + } + + if name != "project" { + t.Fatalf("expected %q to be %q", name, "project") + } +} diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 000000000..7e64988f4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,43 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.6.0 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +// Get a new client +client, err := api.NewClient(api.DefaultConfig()) +if err != nil { + panic(err) +} + +// Get a handle to the KV API +kv := client.KV() + +// PUT a new KV pair +p := &api.KVPair{Key: "foo", Value: []byte("test")} +_, err = kv.Put(p, nil) +if err != nil { + panic(err) +} + +// Lookup the pair +pair, _, err := kv.Get("foo", nil) +if err != nil { + panic(err) +} +fmt.Printf("KV: %v", pair) + +``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 000000000..c3fb0d53a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,140 @@ +package api + +const ( + // ACLCLientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/acl_test.go b/vendor/github.com/hashicorp/consul/api/acl_test.go new file mode 100644 index 000000000..2a5207a6e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl_test.go @@ -0,0 +1,128 @@ +package api + +import ( + "testing" +) + +func TestACL_CreateDestroy(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + ae := ACLEntry{ + Name: "API test", + Type: ACLClientType, + Rules: `key "" { policy = "deny" }`, + } + + id, wm, err := acl.Create(&ae, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + ae2, _, err := acl.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules { + t.Fatalf("Bad: %#v", ae2) + } + + wm, err = acl.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } +} + +func TestACL_CloneDestroy(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + id, wm, err := acl.Clone(c.config.Token, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + wm, err = acl.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } +} + +func TestACL_Info(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + ae, qm, err := acl.Info(c.config.Token, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + + if ae == nil || ae.ID != c.config.Token || ae.Type != ACLManagementType { + t.Fatalf("bad: %#v", ae) + } +} + +func TestACL_List(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + acls, qm, err := acl.List(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(acls) < 2 { + t.Fatalf("bad: %v", acls) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 000000000..e4466a651 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,337 @@ +package api + +import ( + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int + Address string +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to create an associated +// check for a service +type AgentServiceCheck struct { + Script string `json:",omitempty"` + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state +func (a *Agent) PassTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state +func (a *Agent) WarnTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state +func (a *Agent) FailTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "fail") +} + +// UpdateTTL is used to update the TTL of a check +func (a *Agent) UpdateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent_test.go b/vendor/github.com/hashicorp/consul/api/agent_test.go new file mode 100644 index 000000000..c49696aeb --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent_test.go @@ -0,0 +1,568 @@ +package api + +import ( + "strings" + "testing" +) + +func TestAgent_Self(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + + name := info["Config"]["NodeName"] + if name == "" { + t.Fatalf("bad: %v", info) + } +} + +func TestAgent_Members(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + members, err := agent.Members(false) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(members) != 1 { + t.Fatalf("bad: %v", members) + } +} + +func TestAgent_Services(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + + // Checks should default to critical + if chk.Status != "critical" { + t.Fatalf("Bad: %#v", chk) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Services_CheckPassing(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + Status: "passing", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + + if chk.Status != "passing" { + t.Fatalf("Bad: %#v", chk) + } + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Services_CheckBadStatus(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + Status: "fluffy", + }, + } + if err := agent.ServiceRegister(reg); err == nil { + t.Fatalf("bad status accepted") + } +} + +func TestAgent_ServiceAddress(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg1 := &AgentServiceRegistration{ + Name: "foo1", + Port: 8000, + Address: "192.168.0.42", + } + reg2 := &AgentServiceRegistration{ + Name: "foo2", + Port: 8000, + } + if err := agent.ServiceRegister(reg1); err != nil { + t.Fatalf("err: %v", err) + } + if err := agent.ServiceRegister(reg2); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + + if _, ok := services["foo1"]; !ok { + t.Fatalf("missing service: %v", services) + } + if _, ok := services["foo2"]; !ok { + t.Fatalf("missing service: %v", services) + } + + if services["foo1"].Address != "192.168.0.42" { + t.Fatalf("missing Address field in service foo1: %v", services) + } + if services["foo2"].Address != "" { + t.Fatalf("missing Address field in service foo2: %v", services) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Services_MultipleChecks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Checks: AgentServiceChecks{ + &AgentServiceCheck{ + TTL: "15s", + }, + &AgentServiceCheck{ + TTL: "30s", + }, + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := checks["service:foo:1"]; !ok { + t.Fatalf("missing check: %v", checks) + } + if _, ok := checks["service:foo:2"]; !ok { + t.Fatalf("missing check: %v", checks) + } +} + +func TestAgent_SetTTLStatus(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + if err := agent.WarnTTL("service:foo", "test"); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != "warning" { + t.Fatalf("Bad: %#v", chk) + } + if chk.Output != "test" { + t.Fatalf("Bad: %#v", chk) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Checks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentCheckRegistration{ + Name: "foo", + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != "critical" { + t.Fatalf("check not critical: %v", chk) + } + + if err := agent.CheckDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_CheckStartPassing(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentCheckRegistration{ + Name: "foo", + AgentServiceCheck: AgentServiceCheck{ + Status: "passing", + }, + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != "passing" { + t.Fatalf("check not passing: %v", chk) + } + + if err := agent.CheckDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Checks_serviceBound(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Register a check bound to the service + reg := &AgentCheckRegistration{ + Name: "redischeck", + ServiceID: "redis", + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + + check, ok := checks["redischeck"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if check.ServiceID != "redis" { + t.Fatalf("missing service association for check: %v", check) + } +} + +func TestAgent_Checks_Docker(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Register a check bound to the service + reg := &AgentCheckRegistration{ + Name: "redischeck", + ServiceID: "redis", + AgentServiceCheck: AgentServiceCheck{ + DockerContainerID: "f972c95ebf0e", + Script: "/bin/true", + Shell: "/bin/bash", + Interval: "10s", + }, + } + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + + check, ok := checks["redischeck"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if check.ServiceID != "redis" { + t.Fatalf("missing service association for check: %v", check) + } +} + +func TestAgent_Join(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Join ourself + addr := info["Config"]["AdvertiseAddr"].(string) + err = agent.Join(addr, false) + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_ForceLeave(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // Eject somebody + err := agent.ForceLeave("foo") + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestServiceMaintenance(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Enable maintenance mode + if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure a critical check was added + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + found := false + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + found = true + if check.Status != "critical" || check.Notes != "broken" { + t.Fatalf("bad: %#v", checks) + } + } + } + if !found { + t.Fatalf("bad: %#v", checks) + } + + // Disable maintenance mode + if err := agent.DisableServiceMaintenance("redis"); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the critical health check was removed + checks, err = agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + t.Fatalf("should have removed health check") + } + } +} + +func TestNodeMaintenance(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // Enable maintenance mode + if err := agent.EnableNodeMaintenance("broken"); err != nil { + t.Fatalf("err: %s", err) + } + + // Check that a critical check was added + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + found := false + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + found = true + if check.Status != "critical" || check.Notes != "broken" { + t.Fatalf("bad: %#v", checks) + } + } + } + if !found { + t.Fatalf("bad: %#v", checks) + } + + // Disable maintenance mode + if err := agent.DisableNodeMaintenance(); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the check was removed + checks, err = agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + t.Fatalf("should have removed health check") + } + } +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 000000000..0a2a76e5d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,475 @@ +package api + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// DefaultConfig returns a default configuration for the client +func DefaultConfig() *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + HttpClient: cleanhttp.DefaultClient(), + } + + if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" { + config.Address = addr + } + + if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" { + config.Token = token + } + + if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err) + } + + if enabled { + config.Scheme = "https" + } + } + + if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" { + doVerify, err := strconv.ParseBool(verify) + if err != nil { + log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err) + } + + if !doVerify { + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + config.HttpClient.Transport = transport + } + } + + return config +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.HttpClient == nil { + config.HttpClient = defConfig.HttpClient + } + + if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 { + trans := cleanhttp.DefaultTransport() + trans.Dial = func(_, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + config.Address = parts[1] + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + obj interface{} +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsServerError returns true for 500 errors from the Consul servers, these are +// usually retryable at a later time. +func IsServerError(err error) bool { + if err == nil { + return false + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + if b, err := encodeBody(r.obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.params.Set("token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Now().Sub(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api_test.go b/vendor/github.com/hashicorp/consul/api/api_test.go new file mode 100644 index 000000000..c2d178dde --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api_test.go @@ -0,0 +1,288 @@ +package api + +import ( + crand "crypto/rand" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/hashicorp/consul/testutil" +) + +type configCallback func(c *Config) + +func makeClient(t *testing.T) (*Client, *testutil.TestServer) { + return makeClientWithConfig(t, nil, nil) +} + +func makeACLClient(t *testing.T) (*Client, *testutil.TestServer) { + return makeClientWithConfig(t, func(clientConfig *Config) { + clientConfig.Token = "root" + }, func(serverConfig *testutil.TestServerConfig) { + serverConfig.ACLMasterToken = "root" + serverConfig.ACLDatacenter = "dc1" + serverConfig.ACLDefaultPolicy = "deny" + }) +} + +func makeClientWithConfig( + t *testing.T, + cb1 configCallback, + cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer) { + + // Make client config + conf := DefaultConfig() + if cb1 != nil { + cb1(conf) + } + + // Create server + server := testutil.NewTestServerConfig(t, cb2) + conf.Address = server.HTTPAddr + + // Create client + client, err := NewClient(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + return client, server +} + +func testKey() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("Failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +func TestDefaultConfig_env(t *testing.T) { + t.Parallel() + addr := "1.2.3.4:5678" + token := "abcd1234" + auth := "username:password" + + os.Setenv("CONSUL_HTTP_ADDR", addr) + defer os.Setenv("CONSUL_HTTP_ADDR", "") + os.Setenv("CONSUL_HTTP_TOKEN", token) + defer os.Setenv("CONSUL_HTTP_TOKEN", "") + os.Setenv("CONSUL_HTTP_AUTH", auth) + defer os.Setenv("CONSUL_HTTP_AUTH", "") + os.Setenv("CONSUL_HTTP_SSL", "1") + defer os.Setenv("CONSUL_HTTP_SSL", "") + os.Setenv("CONSUL_HTTP_SSL_VERIFY", "0") + defer os.Setenv("CONSUL_HTTP_SSL_VERIFY", "") + + config := DefaultConfig() + + if config.Address != addr { + t.Errorf("expected %q to be %q", config.Address, addr) + } + + if config.Token != token { + t.Errorf("expected %q to be %q", config.Token, token) + } + + if config.HttpAuth == nil { + t.Fatalf("expected HttpAuth to be enabled") + } + if config.HttpAuth.Username != "username" { + t.Errorf("expected %q to be %q", config.HttpAuth.Username, "username") + } + if config.HttpAuth.Password != "password" { + t.Errorf("expected %q to be %q", config.HttpAuth.Password, "password") + } + + if config.Scheme != "https" { + t.Errorf("expected %q to be %q", config.Scheme, "https") + } + + if !config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify { + t.Errorf("expected SSL verification to be off") + } +} + +func TestSetQueryOptions(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("GET", "/v1/kv/foo") + q := &QueryOptions{ + Datacenter: "foo", + AllowStale: true, + RequireConsistent: true, + WaitIndex: 1000, + WaitTime: 100 * time.Second, + Token: "12345", + Near: "nodex", + } + r.setQueryOptions(q) + + if r.params.Get("dc") != "foo" { + t.Fatalf("bad: %v", r.params) + } + if _, ok := r.params["stale"]; !ok { + t.Fatalf("bad: %v", r.params) + } + if _, ok := r.params["consistent"]; !ok { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("index") != "1000" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("wait") != "100000ms" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("token") != "12345" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("near") != "nodex" { + t.Fatalf("bad: %v", r.params) + } +} + +func TestSetWriteOptions(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("GET", "/v1/kv/foo") + q := &WriteOptions{ + Datacenter: "foo", + Token: "23456", + } + r.setWriteOptions(q) + + if r.params.Get("dc") != "foo" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("token") != "23456" { + t.Fatalf("bad: %v", r.params) + } +} + +func TestRequestToHTTP(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("DELETE", "/v1/kv/foo") + q := &QueryOptions{ + Datacenter: "foo", + } + r.setQueryOptions(q) + req, err := r.toHTTP() + if err != nil { + t.Fatalf("err: %v", err) + } + + if req.Method != "DELETE" { + t.Fatalf("bad: %v", req) + } + if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" { + t.Fatalf("bad: %v", req) + } +} + +func TestParseQueryMeta(t *testing.T) { + t.Parallel() + resp := &http.Response{ + Header: make(map[string][]string), + } + resp.Header.Set("X-Consul-Index", "12345") + resp.Header.Set("X-Consul-LastContact", "80") + resp.Header.Set("X-Consul-KnownLeader", "true") + + qm := &QueryMeta{} + if err := parseQueryMeta(resp, qm); err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex != 12345 { + t.Fatalf("Bad: %v", qm) + } + if qm.LastContact != 80*time.Millisecond { + t.Fatalf("Bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("Bad: %v", qm) + } +} + +func TestAPI_UnixSocket(t *testing.T) { + t.Parallel() + if runtime.GOOS == "windows" { + t.SkipNow() + } + + tempDir, err := ioutil.TempDir("", "consul") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(tempDir) + socket := filepath.Join(tempDir, "test.sock") + + c, s := makeClientWithConfig(t, func(c *Config) { + c.Address = "unix://" + socket + }, func(c *testutil.TestServerConfig) { + c.Addresses = &testutil.TestAddressConfig{ + HTTP: "unix://" + socket, + } + }) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %s", err) + } + if info["Config"]["NodeName"] == "" { + t.Fatalf("bad: %v", info) + } +} + +func TestAPI_durToMsec(t *testing.T) { + if ms := durToMsec(0); ms != "0ms" { + t.Fatalf("bad: %s", ms) + } + + if ms := durToMsec(time.Millisecond); ms != "1ms" { + t.Fatalf("bad: %s", ms) + } + + if ms := durToMsec(time.Microsecond); ms != "1ms" { + t.Fatalf("bad: %s", ms) + } + + if ms := durToMsec(5 * time.Millisecond); ms != "5ms" { + t.Fatalf("bad: %s", ms) + } +} + +func TestAPI_IsServerError(t *testing.T) { + if IsServerError(nil) { + t.Fatalf("should not be a server error") + } + + if IsServerError(fmt.Errorf("not the error you are looking for")) { + t.Fatalf("should not be a server error") + } + + if !IsServerError(fmt.Errorf(serverError)) { + t.Fatalf("should be a server error") + } +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 000000000..cf64bd909 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,182 @@ +package api + +type Node struct { + Node string + Address string +} + +type CatalogService struct { + Node string + Address string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServicePort int +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + Node string + Address string + Datacenter string + Service *AgentService + Check *AgentCheck +} + +type CatalogDeregistration struct { + Node string + Address string + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog_test.go b/vendor/github.com/hashicorp/consul/api/catalog_test.go new file mode 100644 index 000000000..bb8be25b0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog_test.go @@ -0,0 +1,279 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestCatalog_Datacenters(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + datacenters, err := catalog.Datacenters() + if err != nil { + return false, err + } + + if len(datacenters) == 0 { + return false, fmt.Errorf("Bad: %v", datacenters) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Nodes(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + nodes, meta, err := catalog.Nodes(nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + + if len(nodes) == 0 { + return false, fmt.Errorf("Bad: %v", nodes) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Services(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + services, meta, err := catalog.Services(nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + + if len(services) == 0 { + return false, fmt.Errorf("Bad: %v", services) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Service(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + services, meta, err := catalog.Service("consul", "", nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + + if len(services) == 0 { + return false, fmt.Errorf("Bad: %v", services) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Node(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + name, _ := c.Agent().NodeName() + + testutil.WaitForResult(func() (bool, error) { + info, meta, err := catalog.Node(name, nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + if len(info.Services) == 0 { + return false, fmt.Errorf("Bad: %v", info) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Registration(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + service := &AgentService{ + ID: "redis1", + Service: "redis", + Tags: []string{"master", "v1"}, + Port: 8000, + } + + check := &AgentCheck{ + Node: "foobar", + CheckID: "service:redis1", + Name: "Redis health check", + Notes: "Script based health check", + Status: "passing", + ServiceID: "redis1", + } + + reg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + Service: service, + Check: check, + } + + testutil.WaitForResult(func() (bool, error) { + if _, err := catalog.Register(reg, nil); err != nil { + return false, err + } + + node, _, err := catalog.Node("foobar", nil) + if err != nil { + return false, err + } + + if _, ok := node.Services["redis1"]; !ok { + return false, fmt.Errorf("missing service: redis1") + } + + health, _, err := c.Health().Node("foobar", nil) + if err != nil { + return false, err + } + + if health[0].CheckID != "service:redis1" { + return false, fmt.Errorf("missing checkid service:redis1") + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) + + // Test catalog deregistration of the previously registered service + dereg := &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + ServiceID: "redis1", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + testutil.WaitForResult(func() (bool, error) { + node, _, err := catalog.Node("foobar", nil) + if err != nil { + return false, err + } + + if _, ok := node.Services["redis1"]; ok { + return false, fmt.Errorf("ServiceID:redis1 is not deregistered") + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) + + // Test deregistration of the previously registered check + dereg = &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + CheckID: "service:redis1", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + testutil.WaitForResult(func() (bool, error) { + health, _, err := c.Health().Node("foobar", nil) + if err != nil { + return false, err + } + + if len(health) != 0 { + return false, fmt.Errorf("CheckID:service:redis1 is not deregistered") + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) + + // Test node deregistration of the previously registered node + dereg = &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + testutil.WaitForResult(func() (bool, error) { + node, _, err := catalog.Node("foobar", nil) + if err != nil { + return false, err + } + + if node != nil { + return false, fmt.Errorf("node is not deregistered: %v", node) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 000000000..fdff2075c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,66 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap represents a datacenter and its associated WAN +// nodes and their associates coordinates. +type CoordinateDatacenterMap struct { + Datacenter string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate_test.go b/vendor/github.com/hashicorp/consul/api/coordinate_test.go new file mode 100644 index 000000000..9d13d1c39 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate_test.go @@ -0,0 +1,54 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestCoordinate_Datacenters(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + coordinate := c.Coordinate() + + testutil.WaitForResult(func() (bool, error) { + datacenters, err := coordinate.Datacenters() + if err != nil { + return false, err + } + + if len(datacenters) == 0 { + return false, fmt.Errorf("Bad: %v", datacenters) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCoordinate_Nodes(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + coordinate := c.Coordinate() + + testutil.WaitForResult(func() (bool, error) { + _, _, err := coordinate.Nodes(nil) + if err != nil { + return false, err + } + + // There's not a good way to populate coordinates without + // waiting for them to calculate and update, so the best + // we can do is call the endpoint and make sure we don't + // get an error. + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 000000000..85b5b069b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/event_test.go b/vendor/github.com/hashicorp/consul/api/event_test.go new file mode 100644 index 000000000..1ca92e233 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event_test.go @@ -0,0 +1,49 @@ +package api + +import ( + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestEvent_FireList(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + event := c.Event() + + params := &UserEvent{Name: "foo"} + id, meta, err := event.Fire(params, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + var events []*UserEvent + var qm *QueryMeta + testutil.WaitForResult(func() (bool, error) { + events, qm, err = event.List("", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + return len(events) > 0, err + }, func(err error) { + t.Fatalf("err: %#v", err) + }) + + if events[len(events)-1].ID != id { + t.Fatalf("bad: %#v", events) + } + + if qm.LastIndex != event.IDToIndex(id) { + t.Fatalf("Bad: %#v", qm) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 000000000..1a273e087 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,136 @@ +package api + +import ( + "fmt" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks []*HealthCheck +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set("passing", "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + switch state { + case "any": + case "warning": + case "critical": + case "passing": + case "unknown": + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/health_test.go b/vendor/github.com/hashicorp/consul/api/health_test.go new file mode 100644 index 000000000..d80a4693a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health_test.go @@ -0,0 +1,125 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestHealth_Node(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + name := info["Config"]["NodeName"].(string) + + testutil.WaitForResult(func() (bool, error) { + checks, meta, err := health.Node(name, nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestHealth_Checks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + // Make a service with a check + reg := &AgentServiceRegistration{ + Name: "foo", + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + defer agent.ServiceDeregister("foo") + + testutil.WaitForResult(func() (bool, error) { + checks, meta, err := health.Checks("foo", nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("Bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestHealth_Service(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + health := c.Health() + + testutil.WaitForResult(func() (bool, error) { + // consul service should always exist... + checks, meta, err := health.Service("consul", "", true, nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("Bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestHealth_State(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + health := c.Health() + + testutil.WaitForResult(func() (bool, error) { + checks, meta, err := health.State("any", nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("Bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 000000000..688b3a09d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,240 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + Key string + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Value []byte + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+key) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv_test.go b/vendor/github.com/hashicorp/consul/api/kv_test.go new file mode 100644 index 000000000..758595d89 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv_test.go @@ -0,0 +1,447 @@ +package api + +import ( + "bytes" + "path" + "testing" + "time" +) + +func TestClientPutGetDelete(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + key := testKey() + pair, _, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } + + value := []byte("test") + + // Put a key that begins with a '/', this should fail + invalidKey := "/test" + p := &KVPair{Key: invalidKey, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err == nil { + t.Fatalf("Invalid key not detected: %s", invalidKey) + } + + // Put the key + p = &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + if pair.Flags != 42 { + t.Fatalf("unexpected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete + if _, err := kv.Delete(key, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Get should fail + pair, _, err = kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } +} + +func TestClient_List_DeleteRecurse(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Generate some test keys + prefix := testKey() + var keys []string + for i := 0; i < 100; i++ { + keys = append(keys, path.Join(prefix, testKey())) + } + + // Set values + value := []byte("test") + for _, key := range keys { + p := &KVPair{Key: key, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // List the values + pairs, meta, err := kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != len(keys) { + t.Fatalf("got %d keys", len(pairs)) + } + for _, pair := range pairs { + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete all + if _, err := kv.DeleteTree(prefix, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // List the values + pairs, _, err = kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 0 { + t.Fatalf("got %d keys", len(pairs)) + } +} + +func TestClient_DeleteCAS(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Put the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value} + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("CAS failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // CAS update with bad index + p.ModifyIndex = 1 + if work, _, err := kv.DeleteCAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if work { + t.Fatalf("unexpected CAS") + } + + // CAS update with valid index + p.ModifyIndex = meta.LastIndex + if work, _, err := kv.DeleteCAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("unexpected CAS failure") + } +} + +func TestClient_CAS(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Put the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value} + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("CAS failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // CAS update with bad index + newVal := []byte("foo") + p.Value = newVal + p.ModifyIndex = 1 + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if work { + t.Fatalf("unexpected CAS") + } + + // CAS update with valid index + p.ModifyIndex = meta.LastIndex + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("unexpected CAS failure") + } +} + +func TestClient_WatchGet(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + key := testKey() + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Put the key + value := []byte("test") + go func() { + kv := c.KV() + + time.Sleep(100 * time.Millisecond) + p := &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + }() + + // Get should work + options := &QueryOptions{WaitIndex: meta.LastIndex} + pair, meta2, err := kv.Get(key, options) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + if pair.Flags != 42 { + t.Fatalf("unexpected value: %#v", pair) + } + if meta2.LastIndex <= meta.LastIndex { + t.Fatalf("unexpected value: %#v", meta2) + } +} + +func TestClient_WatchList(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + prefix := testKey() + key := path.Join(prefix, testKey()) + pairs, meta, err := kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 0 { + t.Fatalf("unexpected value: %#v", pairs) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Put the key + value := []byte("test") + go func() { + kv := c.KV() + + time.Sleep(100 * time.Millisecond) + p := &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + }() + + // Get should work + options := &QueryOptions{WaitIndex: meta.LastIndex} + pairs, meta2, err := kv.List(prefix, options) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 1 { + t.Fatalf("expected value: %#v", pairs) + } + if !bytes.Equal(pairs[0].Value, value) { + t.Fatalf("unexpected value: %#v", pairs) + } + if pairs[0].Flags != 42 { + t.Fatalf("unexpected value: %#v", pairs) + } + if meta2.LastIndex <= meta.LastIndex { + t.Fatalf("unexpected value: %#v", meta2) + } + +} + +func TestClient_Keys_DeleteRecurse(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Generate some test keys + prefix := testKey() + var keys []string + for i := 0; i < 100; i++ { + keys = append(keys, path.Join(prefix, testKey())) + } + + // Set values + value := []byte("test") + for _, key := range keys { + p := &KVPair{Key: key, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // List the values + out, meta, err := kv.Keys(prefix, "", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(out) != len(keys) { + t.Fatalf("got %d keys", len(out)) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete all + if _, err := kv.DeleteTree(prefix, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // List the values + out, _, err = kv.Keys(prefix, "", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(out) != 0 { + t.Fatalf("got %d keys", len(out)) + } +} + +func TestClient_AcquireRelease(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + kv := c.KV() + + // Make a session + id, _, err := session.CreateNoChecks(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + // Acquire the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value, Session: id} + if work, _, err := kv.Acquire(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("Lock failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if pair.LockIndex != 1 { + t.Fatalf("Expected lock: %v", pair) + } + if pair.Session != id { + t.Fatalf("Expected lock: %v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Release + if work, _, err := kv.Release(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("Release fail") + } + + // Get should work + pair, meta, err = kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if pair.LockIndex != 1 { + t.Fatalf("Expected lock: %v", pair) + } + if pair.Session != "" { + t.Fatalf("Expected unlock: %v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 000000000..08e8e7931 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,380 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + if s, err := l.createSession(); err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } else { + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsServerError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/lock_test.go b/vendor/github.com/hashicorp/consul/api/lock_test.go new file mode 100644 index 000000000..5cc74e19c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock_test.go @@ -0,0 +1,560 @@ +package api + +import ( + "log" + "net/http" + "net/http/httptest" + "net/http/httputil" + "strings" + "sync" + "testing" + "time" +) + +func TestLock_LockUnlock(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Initial unlock should fail + err = lock.Unlock() + if err != ErrLockNotHeld { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Double lock should fail + _, err = lock.Lock(nil) + if err != ErrLockHeld { + t.Fatalf("err: %v", err) + } + + // Should be leader + select { + case <-leaderCh: + t.Fatalf("should be leader") + default: + } + + // Initial unlock should work + err = lock.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double unlock should fail + err = lock.Unlock() + if err != ErrLockNotHeld { + t.Fatalf("err: %v", err) + } + + // Should lose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestLock_ForceInvalidate(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + go func() { + // Nuke the session, simulator an operator invalidation + // or a health check failure + session := c.Session() + session.Destroy(lock.lockSession, nil) + }() + + // Should loose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestLock_DeleteKey(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // This uncovered some issues around special-case handling of low index + // numbers where it would work with a low number but fail for higher + // ones, so we loop this a bit to sweep the index up out of that + // territory. + for i := 0; i < 10; i++ { + func() { + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + go func() { + // Nuke the key, simulate an operator intervention + kv := c.KV() + kv.Delete("test/lock", nil) + }() + + // Should loose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } + }() + } +} + +func TestLock_Contend(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + wg := &sync.WaitGroup{} + acquired := make([]bool, 3) + for idx := range acquired { + wg.Add(1) + go func(idx int) { + defer wg.Done() + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work eventually, will contend + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + log.Printf("Contender %d acquired", idx) + + // Set acquired and then leave + acquired[idx] = true + }(idx) + } + + // Wait for termination + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + // Wait for everybody to get a turn + select { + case <-doneCh: + case <-time.After(3 * DefaultLockRetryTime): + t.Fatalf("timeout") + } + + for idx, did := range acquired { + if !did { + t.Fatalf("contender %d never acquired", idx) + } + } +} + +func TestLock_Destroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Destroy should fail + if err := lock.Destroy(); err != ErrLockHeld { + t.Fatalf("err: %v", err) + } + + // Should be able to release + err = lock.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Acquire with a different lock + l2, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err = l2.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Destroy should still fail + if err := lock.Destroy(); err != ErrLockInUse { + t.Fatalf("err: %v", err) + } + + // Should release + err = l2.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + err = lock.Destroy() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double destroy should work + err = l2.Destroy() + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestLock_Conflict(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/lock/", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not hold") + } + defer sema.Release() + + lock, err := c.LockKey("test/lock/.lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should conflict with semaphore + _, err = lock.Lock(nil) + if err != ErrLockConflict { + t.Fatalf("err: %v", err) + } + + // Should conflict with semaphore + err = lock.Destroy() + if err != ErrLockConflict { + t.Fatalf("err: %v", err) + } +} + +func TestLock_ReclaimLock(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session, _, err := c.Session().Create(&SessionEntry{}, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + lock, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + l2, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) + if err != nil { + t.Fatalf("err: %v", err) + } + + reclaimed := make(chan (<-chan struct{}), 1) + go func() { + l2Ch, err := l2.Lock(nil) + if err != nil { + t.Fatalf("not locked: %v", err) + } + reclaimed <- l2Ch + }() + + // Should reclaim the lock + var leader2Ch <-chan struct{} + + select { + case leader2Ch = <-reclaimed: + case <-time.After(time.Second): + t.Fatalf("should have locked") + } + + // unlock should work + err = l2.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + //Both locks should see the unlock + select { + case <-leader2Ch: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } + + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestLock_MonitorRetry(t *testing.T) { + t.Parallel() + raw, s := makeClient(t) + defer s.Stop() + + // Set up a server that always responds with 500 errors. + failer := func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(500) + } + outage := httptest.NewServer(http.HandlerFunc(failer)) + defer outage.Close() + + // Set up a reverse proxy that will send some requests to the + // 500 server and pass everything else through to the real Consul + // server. + var mutex sync.Mutex + errors := 0 + director := func(req *http.Request) { + mutex.Lock() + defer mutex.Unlock() + + req.URL.Scheme = "http" + if errors > 0 && req.Method == "GET" && strings.Contains(req.URL.Path, "/v1/kv/test/lock") { + req.URL.Host = outage.URL[7:] // Strip off "http://". + errors-- + } else { + req.URL.Host = raw.config.Address + } + } + proxy := httptest.NewServer(&httputil.ReverseProxy{Director: director}) + defer proxy.Close() + + // Make another client that points at the proxy instead of the real + // Consul server. + config := raw.config + config.Address = proxy.URL[7:] // Strip off "http://". + c, err := NewClient(&config) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Set up a lock with retries enabled. + opts := &LockOptions{ + Key: "test/lock", + SessionTTL: "60s", + MonitorRetries: 3, + } + lock, err := c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if lock.opts.MonitorRetryTime != DefaultMonitorRetryTime { + t.Fatalf("bad: %d", lock.opts.MonitorRetryTime) + } + + // Now set a custom time for the test. + opts.MonitorRetryTime = 250 * time.Millisecond + lock, err = c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if lock.opts.MonitorRetryTime != 250*time.Millisecond { + t.Fatalf("bad: %d", lock.opts.MonitorRetryTime) + } + + // Should get the lock. + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Poke the key using the raw client to force the monitor to wake up + // and check the lock again. This time we will return errors for some + // of the responses. + mutex.Lock() + errors = 2 + mutex.Unlock() + pair, _, err := raw.KV().Get("test/lock", &QueryOptions{}) + if err != nil { + t.Fatalf("err: %v", err) + } + if _, err := raw.KV().Put(pair, &WriteOptions{}); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should still be the leader. + select { + case <-leaderCh: + t.Fatalf("should be leader") + default: + } + + // Now return an overwhelming number of errors. + mutex.Lock() + errors = 10 + mutex.Unlock() + if _, err := raw.KV().Put(pair, &WriteOptions{}); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should lose leadership. + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestLock_OneShot(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // Set up a lock as a one-shot. + opts := &LockOptions{ + Key: "test/lock", + LockTryOnce: true, + } + lock, err := c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if lock.opts.LockWaitTime != DefaultLockWaitTime { + t.Fatalf("bad: %d", lock.opts.LockWaitTime) + } + + // Now set a custom time for the test. + opts.LockWaitTime = 250 * time.Millisecond + lock, err = c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if lock.opts.LockWaitTime != 250*time.Millisecond { + t.Fatalf("bad: %d", lock.opts.LockWaitTime) + } + + // Should get the lock. + ch, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("not leader") + } + + // Now try with another session. + contender, err := c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + start := time.Now() + ch, err = contender.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch != nil { + t.Fatalf("should not be leader") + } + diff := time.Now().Sub(start) + if diff < contender.opts.LockWaitTime || diff > 2*contender.opts.LockWaitTime { + t.Fatalf("time out of bounds: %9.6f", diff.Seconds()) + } + + // Unlock and then make sure the contender can get it. + if err := lock.Unlock(); err != nil { + t.Fatalf("err: %v", err) + } + ch, err = contender.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should be leader") + } +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 000000000..c8141887c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,173 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string +} + +// PrepatedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *QueryOptions) (*QueryMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return qm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query_test.go b/vendor/github.com/hashicorp/consul/api/prepared_query_test.go new file mode 100644 index 000000000..011bfb195 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query_test.go @@ -0,0 +1,123 @@ +package api + +import ( + "reflect" + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestPreparedQuery(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // Set up a node and a service. + reg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + Service: &AgentService{ + ID: "redis1", + Service: "redis", + Tags: []string{"master", "v1"}, + Port: 8000, + }, + } + + catalog := c.Catalog() + testutil.WaitForResult(func() (bool, error) { + if _, err := catalog.Register(reg, nil); err != nil { + return false, err + } + + if _, _, err := catalog.Node("foobar", nil); err != nil { + return false, err + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) + + // Create a simple prepared query. + def := &PreparedQueryDefinition{ + Service: ServiceQuery{ + Service: "redis", + }, + } + + query := c.PreparedQuery() + var err error + def.ID, _, err = query.Create(def, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Read it back. + defs, _, err := query.Get(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 1 || !reflect.DeepEqual(defs[0], def) { + t.Fatalf("bad: %v", defs) + } + + // List them all. + defs, _, err = query.List(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 1 || !reflect.DeepEqual(defs[0], def) { + t.Fatalf("bad: %v", defs) + } + + // Make an update. + def.Name = "my-query" + _, err = query.Update(def, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Read it back again to verify the update worked. + defs, _, err = query.Get(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 1 || !reflect.DeepEqual(defs[0], def) { + t.Fatalf("bad: %v", defs) + } + + // Execute by ID. + results, _, err := query.Execute(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(results.Nodes) != 1 || results.Nodes[0].Node.Node != "foobar" { + t.Fatalf("bad: %v", results) + } + + // Execute by name. + results, _, err = query.Execute("my-query", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(results.Nodes) != 1 || results.Nodes[0].Node.Node != "foobar" { + t.Fatalf("bad: %v", results) + } + + // Delete it. + _, err = query.Delete(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure there are no longer any queries. + defs, _, err = query.List(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 0 { + t.Fatalf("bad: %v", defs) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 000000000..745a208c9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 000000000..e6645ac1d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,512 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + if sess, err := s.createSession(); err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } else { + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsServerError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore_test.go b/vendor/github.com/hashicorp/consul/api/semaphore_test.go new file mode 100644 index 000000000..4cec164f4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore_test.go @@ -0,0 +1,518 @@ +package api + +import ( + "log" + "net/http" + "net/http/httptest" + "net/http/httputil" + "strings" + "sync" + "testing" + "time" +) + +func TestSemaphore_AcquireRelease(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Initial release should fail + err = sema.Release() + if err != ErrSemaphoreNotHeld { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not hold") + } + + // Double lock should fail + _, err = sema.Acquire(nil) + if err != ErrSemaphoreHeld { + t.Fatalf("err: %v", err) + } + + // Should be held + select { + case <-lockCh: + t.Fatalf("should be held") + default: + } + + // Initial release should work + err = sema.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double unlock should fail + err = sema.Release() + if err != ErrSemaphoreNotHeld { + t.Fatalf("err: %v", err) + } + + // Should lose resource + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be held") + } +} + +func TestSemaphore_ForceInvalidate(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not acquired") + } + defer sema.Release() + + go func() { + // Nuke the session, simulator an operator invalidation + // or a health check failure + session := c.Session() + session.Destroy(sema.lockSession, nil) + }() + + // Should loose slot + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be locked") + } +} + +func TestSemaphore_DeleteKey(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not locked") + } + defer sema.Release() + + go func() { + // Nuke the key, simulate an operator intervention + kv := c.KV() + kv.DeleteTree("test/semaphore", nil) + }() + + // Should loose leadership + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be locked") + } +} + +func TestSemaphore_Contend(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + wg := &sync.WaitGroup{} + acquired := make([]bool, 4) + for idx := range acquired { + wg.Add(1) + go func(idx int) { + defer wg.Done() + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work eventually, will contend + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not locked") + } + defer sema.Release() + log.Printf("Contender %d acquired", idx) + + // Set acquired and then leave + acquired[idx] = true + }(idx) + } + + // Wait for termination + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + // Wait for everybody to get a turn + select { + case <-doneCh: + case <-time.After(3 * DefaultLockRetryTime): + t.Fatalf("timeout") + } + + for idx, did := range acquired { + if !did { + t.Fatalf("contender %d never acquired", idx) + } + } +} + +func TestSemaphore_BadLimit(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 0) + if err == nil { + t.Fatalf("should error") + } + + sema, err = c.SemaphorePrefix("test/semaphore", 1) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + sema2, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema2.Acquire(nil) + if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" { + t.Fatalf("err: %v", err) + } +} + +func TestSemaphore_Destroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + sema2, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema2.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should fail, still held + if err := sema.Destroy(); err != ErrSemaphoreHeld { + t.Fatalf("err: %v", err) + } + + err = sema.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should fail, still in use + if err := sema.Destroy(); err != ErrSemaphoreInUse { + t.Fatalf("err: %v", err) + } + + err = sema2.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + if err := sema.Destroy(); err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + if err := sema2.Destroy(); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestSemaphore_Conflict(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/sema/.lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + sema, err := c.SemaphorePrefix("test/sema/", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should conflict with lock + _, err = sema.Acquire(nil) + if err != ErrSemaphoreConflict { + t.Fatalf("err: %v", err) + } + + // Should conflict with lock + err = sema.Destroy() + if err != ErrSemaphoreConflict { + t.Fatalf("err: %v", err) + } +} + +func TestSemaphore_MonitorRetry(t *testing.T) { + t.Parallel() + raw, s := makeClient(t) + defer s.Stop() + + // Set up a server that always responds with 500 errors. + failer := func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(500) + } + outage := httptest.NewServer(http.HandlerFunc(failer)) + defer outage.Close() + + // Set up a reverse proxy that will send some requests to the + // 500 server and pass everything else through to the real Consul + // server. + var mutex sync.Mutex + errors := 0 + director := func(req *http.Request) { + mutex.Lock() + defer mutex.Unlock() + + req.URL.Scheme = "http" + if errors > 0 && req.Method == "GET" && strings.Contains(req.URL.Path, "/v1/kv/test/sema/.lock") { + req.URL.Host = outage.URL[7:] // Strip off "http://". + errors-- + } else { + req.URL.Host = raw.config.Address + } + } + proxy := httptest.NewServer(&httputil.ReverseProxy{Director: director}) + defer proxy.Close() + + // Make another client that points at the proxy instead of the real + // Consul server. + config := raw.config + config.Address = proxy.URL[7:] // Strip off "http://". + c, err := NewClient(&config) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Set up a lock with retries enabled. + opts := &SemaphoreOptions{ + Prefix: "test/sema/.lock", + Limit: 2, + SessionTTL: "60s", + MonitorRetries: 3, + } + sema, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if sema.opts.MonitorRetryTime != DefaultMonitorRetryTime { + t.Fatalf("bad: %d", sema.opts.MonitorRetryTime) + } + + // Now set a custom time for the test. + opts.MonitorRetryTime = 250 * time.Millisecond + sema, err = c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if sema.opts.MonitorRetryTime != 250*time.Millisecond { + t.Fatalf("bad: %d", sema.opts.MonitorRetryTime) + } + + // Should get the lock. + ch, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("didn't acquire") + } + + // Take the semaphore using the raw client to force the monitor to wake + // up and check the lock again. This time we will return errors for some + // of the responses. + mutex.Lock() + errors = 2 + mutex.Unlock() + another, err := raw.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if _, err := another.Acquire(nil); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should still have the semaphore. + select { + case <-ch: + t.Fatalf("lost the semaphore") + default: + } + + // Now return an overwhelming number of errors, using the raw client to + // poke the key and get the monitor to run again. + mutex.Lock() + errors = 10 + mutex.Unlock() + if err := another.Release(); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should lose the semaphore. + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("should not have the semaphore") + } +} + +func TestSemaphore_OneShot(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // Set up a semaphore as a one-shot. + opts := &SemaphoreOptions{ + Prefix: "test/sema/.lock", + Limit: 2, + SemaphoreTryOnce: true, + } + sema, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if sema.opts.SemaphoreWaitTime != DefaultSemaphoreWaitTime { + t.Fatalf("bad: %d", sema.opts.SemaphoreWaitTime) + } + + // Now set a custom time for the test. + opts.SemaphoreWaitTime = 250 * time.Millisecond + sema, err = c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if sema.opts.SemaphoreWaitTime != 250*time.Millisecond { + t.Fatalf("bad: %d", sema.opts.SemaphoreWaitTime) + } + + // Should acquire the semaphore. + ch, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should have acquired the semaphore") + } + + // Try with another session. + another, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + ch, err = another.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should have acquired the semaphore") + } + + // Try with a third one that shouldn't get it. + contender, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + start := time.Now() + ch, err = contender.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch != nil { + t.Fatalf("should not have acquired the semaphore") + } + diff := time.Now().Sub(start) + if diff < contender.opts.SemaphoreWaitTime || diff > 2*contender.opts.SemaphoreWaitTime { + t.Fatalf("time out of bounds: %9.6f", diff.Seconds()) + } + + // Give up a slot and make sure the third one can get it. + if err := another.Release(); err != nil { + t.Fatalf("err: %v", err) + } + ch, err = contender.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should have acquired the semaphore") + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 000000000..36e99a389 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,217 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error { + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/session_test.go b/vendor/github.com/hashicorp/consul/api/session_test.go new file mode 100644 index 000000000..85bea228e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session_test.go @@ -0,0 +1,314 @@ +package api + +import ( + "testing" + "time" +) + +func TestSession_CreateDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, meta, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + meta, err = session.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } +} + +func TestSession_CreateRenewDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + se := &SessionEntry{ + TTL: "10s", + } + + id, meta, err := session.Create(se, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + renew, meta, err := session.Renew(id, nil) + + if err != nil { + t.Fatalf("err: %v", err) + } + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if renew == nil { + t.Fatalf("should get session") + } + + if renew.ID != id { + t.Fatalf("should have matching id") + } + + if renew.TTL != "10s" { + t.Fatalf("should get session with TTL") + } +} + +func TestSession_CreateRenewDestroyRenew(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + entry := &SessionEntry{ + Behavior: SessionBehaviorDelete, + TTL: "500s", // disable ttl + } + + id, meta, err := session.Create(entry, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + // Extend right after create. Everything should be fine. + entry, _, err = session.Renew(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatal("session unexpectedly vanished") + } + + // Simulate TTL loss by manually destroying the session. + meta, err = session.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + // Extend right after delete. The 404 should proxy as a nil. + entry, _, err = session.Renew(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if entry != nil { + t.Fatal("session still exists") + } +} + +func TestSession_CreateDestroyRenewPeriodic(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + entry := &SessionEntry{ + Behavior: SessionBehaviorDelete, + TTL: "500s", // disable ttl + } + + id, meta, err := session.Create(entry, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + // This only tests Create/Destroy/RenewPeriodic to avoid the more + // difficult case of testing all of the timing code. + + // Simulate TTL loss by manually destroying the session. + meta, err = session.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + // Extend right after delete. The 404 should terminate the loop quickly and return ErrSessionExpired. + errCh := make(chan error, 1) + doneCh := make(chan struct{}) + go func() { errCh <- session.RenewPeriodic("1s", id, nil, doneCh) }() + defer close(doneCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("timedout: missing session did not terminate renewal loop") + case err = <-errCh: + if err != ErrSessionExpired { + t.Fatalf("err: %v", err) + } + } +} + +func TestSession_Info(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + + if info == nil { + t.Fatalf("should get session") + } + if info.CreateIndex == 0 { + t.Fatalf("bad: %v", info) + } + if info.ID != id { + t.Fatalf("bad: %v", info) + } + if info.Name != "" { + t.Fatalf("bad: %v", info) + } + if info.Node == "" { + t.Fatalf("bad: %v", info) + } + if len(info.Checks) == 0 { + t.Fatalf("bad: %v", info) + } + if info.LockDelay == 0 { + t.Fatalf("bad: %v", info) + } + if info.Behavior != "release" { + t.Fatalf("bad: %v", info) + } + if info.TTL != "" { + t.Fatalf("bad: %v", info) + } +} + +func TestSession_Node(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + sessions, qm, err := session.Node(info.Node, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(sessions) != 1 { + t.Fatalf("bad: %v", sessions) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} + +func TestSession_List(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + sessions, qm, err := session.List(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(sessions) != 1 { + t.Fatalf("bad: %v", sessions) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 000000000..74ef61a67 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status_test.go b/vendor/github.com/hashicorp/consul/api/status_test.go new file mode 100644 index 000000000..62dc1550f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status_test.go @@ -0,0 +1,37 @@ +package api + +import ( + "testing" +) + +func TestStatusLeader(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + status := c.Status() + + leader, err := status.Leader() + if err != nil { + t.Fatalf("err: %v", err) + } + if leader == "" { + t.Fatalf("Expected leader") + } +} + +func TestStatusPeers(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + status := c.Status() + + peers, err := status.Peers() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(peers) == 0 { + t.Fatalf("Expected peers ") + } +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000..1c95f5978 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, ErrNotExist) { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000..a733bef18 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/errwrap/errwrap_test.go b/vendor/github.com/hashicorp/errwrap/errwrap_test.go new file mode 100644 index 000000000..5ae5f8e3c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap_test.go @@ -0,0 +1,94 @@ +package errwrap + +import ( + "fmt" + "testing" +) + +func TestWrappedError_impl(t *testing.T) { + var _ error = new(wrappedError) +} + +func TestGetAll(t *testing.T) { + cases := []struct { + Err error + Msg string + Len int + }{ + {}, + { + fmt.Errorf("foo"), + "foo", + 1, + }, + { + fmt.Errorf("bar"), + "foo", + 0, + }, + { + Wrapf("bar", fmt.Errorf("foo")), + "foo", + 1, + }, + { + Wrapf("{{err}}", fmt.Errorf("foo")), + "foo", + 2, + }, + { + Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))), + "foo", + 1, + }, + } + + for i, tc := range cases { + actual := GetAll(tc.Err, tc.Msg) + if len(actual) != tc.Len { + t.Fatalf("%d: bad: %#v", i, actual) + } + for _, v := range actual { + if v.Error() != tc.Msg { + t.Fatalf("%d: bad: %#v", i, actual) + } + } + } +} + +func TestGetAllType(t *testing.T) { + cases := []struct { + Err error + Type interface{} + Len int + }{ + {}, + { + fmt.Errorf("foo"), + "foo", + 0, + }, + { + fmt.Errorf("bar"), + fmt.Errorf("foo"), + 1, + }, + { + Wrapf("bar", fmt.Errorf("foo")), + fmt.Errorf("baz"), + 2, + }, + { + Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))), + Wrapf("", nil), + 0, + }, + } + + for i, tc := range cases { + actual := GetAllType(tc.Err, tc.Type) + if len(actual) != tc.Len { + t.Fatalf("%d: bad: %#v", i, actual) + } + } +} diff --git a/vendor/github.com/hashicorp/go-checkpoint/LICENSE b/vendor/github.com/hashicorp/go-checkpoint/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-checkpoint/README.md b/vendor/github.com/hashicorp/go-checkpoint/README.md new file mode 100644 index 000000000..ab8ebc0d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/README.md @@ -0,0 +1,22 @@ +# Go Checkpoint Client + +[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at +Hashicorp that we use to check version information, broadcoast security +bulletins, etc. + +We understand that software making remote calls over the internet +for any reason can be undesirable. Because of this, Checkpoint can be +disabled in all of our software that includes it. You can view the source +of this client to see that we're not sending any private information. + +Each Hashicorp application has it's specific configuration option +to disable chekpoint calls, but the `CHECKPOINT_DISABLE` makes +the underlying checkpoint component itself disabled. For example +in the case of packer: +``` +CHECKPOINT_DISABLE=1 packer build +``` + +**Note:** This repository is probably useless outside of internal HashiCorp +use. It is open source for disclosure and because our open source projects +must be able to link to it. diff --git a/vendor/github.com/hashicorp/go-checkpoint/checkpoint.go b/vendor/github.com/hashicorp/go-checkpoint/checkpoint.go new file mode 100644 index 000000000..abea934bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/checkpoint.go @@ -0,0 +1,359 @@ +// checkpoint is a package for checking version information and alerts +// for a HashiCorp product. +package checkpoint + +import ( + "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB} + +// CheckParams are the parameters for configuring a check request. +type CheckParams struct { + // Product and version are used to lookup the correct product and + // alerts for the proper version. The version is also used to perform + // a version check. + Product string + Version string + + // Arch and OS are used to filter alerts potentially only to things + // affecting a specific os/arch combination. If these aren't specified, + // they'll be automatically filled in. + Arch string + OS string + + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string + SignatureFile string + + // CacheFile, if specified, will cache the result of a check. The + // duration of the cache is specified by CacheDuration, and defaults + // to 48 hours if not specified. If the CacheFile is newer than the + // CacheDuration, than the Check will short-circuit and use those + // results. + // + // If the CacheFile directory doesn't exist, it will be created with + // permissions 0755. + CacheFile string + CacheDuration time.Duration + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// CheckResponse is the response for a check request. +type CheckResponse struct { + Product string + CurrentVersion string `json:"current_version"` + CurrentReleaseDate int `json:"current_release_date"` + CurrentDownloadURL string `json:"current_download_url"` + CurrentChangelogURL string `json:"current_changelog_url"` + ProjectWebsite string `json:"project_website"` + Outdated bool `json:"outdated"` + Alerts []*CheckAlert +} + +// CheckAlert is a single alert message from a check request. +// +// These never have to be manually constructed, and are typically populated +// into a CheckResponse as a result of the Check request. +type CheckAlert struct { + ID int + Date int + Message string + URL string + Level string +} + +// Check checks for alerts and new version information. +func Check(p *CheckParams) (*CheckResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &CheckResponse{}, nil + } + + // If we have a cached result, then use that + if r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil { + return nil, err + } else if r != nil { + defer r.Close() + return checkResult(r) + } + + var u url.URL + + if p.Arch == "" { + p.Arch = runtime.GOARCH + } + if p.OS == "" { + p.OS = runtime.GOOS + } + + // If we're given a SignatureFile, then attempt to read that. + signature := p.Signature + if p.Signature == "" && p.SignatureFile != "" { + var err error + signature, err = checkSignature(p.SignatureFile) + if err != nil { + return nil, err + } + } + + v := u.Query() + v.Set("version", p.Version) + v.Set("arch", p.Arch) + v.Set("os", p.OS) + v.Set("signature", signature) + + u.Scheme = "https" + u.Host = "checkpoint-api.hashicorp.com" + u.Path = fmt.Sprintf("/v1/check/%s", p.Product) + u.RawQuery = v.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Add("Accept", "application/json") + req.Header.Add("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + var r io.Reader = resp.Body + if p.CacheFile != "" { + // Make sure the directory holding our cache exists. + if err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil { + return nil, err + } + + // We have to cache the result, so write the response to the + // file as we read it. + f, err := os.Create(p.CacheFile) + if err != nil { + return nil, err + } + + // Write the cache header + if err := writeCacheHeader(f, p.Version); err != nil { + f.Close() + os.Remove(p.CacheFile) + return nil, err + } + + defer f.Close() + r = io.TeeReader(r, f) + } + + return checkResult(r) +} + +// CheckInterval is used to check for a response on a given interval duration. +// The interval is not exact, and checks are randomized to prevent a thundering +// herd. However, it is expected that on average one check is performed per +// interval. The returned channel may be closed to stop background checks. +func CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} { + doneCh := make(chan struct{}) + + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return doneCh + } + + go func() { + for { + select { + case <-time.After(randomStagger(interval)): + resp, err := Check(p) + cb(resp, err) + case <-doneCh: + return + } + } + }() + + return doneCh +} + +// randomStagger returns an interval that is between 3/4 and 5/4 of +// the given interval. The expected value is the interval. +func randomStagger(interval time.Duration) time.Duration { + stagger := time.Duration(mrand.Int63()) % (interval / 2) + return 3*(interval/4) + stagger +} + +func checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + // File doesn't exist, not a problem + return nil, nil + } + + return nil, err + } + + if d == 0 { + d = 48 * time.Hour + } + + if fi.ModTime().Add(d).Before(time.Now()) { + // Cache is busted, delete the old file and re-request. We ignore + // errors here because re-creating the file is fine too. + os.Remove(path) + return nil, nil + } + + // File looks good so far, open it up so we can inspect the contents. + f, err := os.Open(path) + if err != nil { + return nil, err + } + + // Check the signature of the file + var sig [4]byte + if err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil { + f.Close() + return nil, err + } + if !reflect.DeepEqual(sig, magicBytes) { + // Signatures don't match. Reset. + f.Close() + return nil, nil + } + + // Check the version. If it changed, then rewrite + var length uint32 + if err := binary.Read(f, binary.LittleEndian, &length); err != nil { + f.Close() + return nil, err + } + data := make([]byte, length) + if _, err := io.ReadFull(f, data); err != nil { + f.Close() + return nil, err + } + if string(data) != current { + // Version changed, reset + f.Close() + return nil, nil + } + + return f, nil +} + +func checkResult(r io.Reader) (*CheckResponse, error) { + var result CheckResponse + dec := json.NewDecoder(r) + if err := dec.Decode(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func checkSignature(path string) (string, error) { + _, err := os.Stat(path) + if err == nil { + // The file exists, read it out + sigBytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + + // Split the file into lines + lines := strings.SplitN(string(sigBytes), "\n", 2) + if len(lines) > 0 { + return strings.TrimSpace(lines[0]), nil + } + } + + // If this isn't a non-exist error, then return that. + if !os.IsNotExist(err) { + return "", err + } + + // The file doesn't exist, so create a signature. + var b [16]byte + n := 0 + for n < 16 { + n2, err := rand.Read(b[n:]) + if err != nil { + return "", err + } + + n += n2 + } + signature := fmt.Sprintf( + "%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + + // Make sure the directory holding our signature exists. + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return "", err + } + + // Write the signature + if err := ioutil.WriteFile(path, []byte(signature+"\n\n"+userMessage+"\n"), 0644); err != nil { + return "", err + } + + return signature, nil +} + +func writeCacheHeader(f io.Writer, v string) error { + // Write our signature first + if err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil { + return err + } + + // Write out our current version length + var length uint32 = uint32(len(v)) + if err := binary.Write(f, binary.LittleEndian, length); err != nil { + return err + } + + _, err := f.Write([]byte(v)) + return err +} + +// userMessage is suffixed to the signature file to provide feedback. +var userMessage = ` +This signature is a randomly generated UUID used to de-duplicate +alerts and version information. This signature is random, it is +not based on any personally identifiable information. To create +a new signature, you can simply delete this file at any time. +See the documentation for the software using Checkpoint for more +information on how to disable it. +` diff --git a/vendor/github.com/hashicorp/go-checkpoint/checkpoint_test.go b/vendor/github.com/hashicorp/go-checkpoint/checkpoint_test.go new file mode 100644 index 000000000..6d0a5addc --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/checkpoint_test.go @@ -0,0 +1,201 @@ +package checkpoint + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" +) + +func TestCheck(t *testing.T) { + expected := &CheckResponse{ + Product: "test", + CurrentVersion: "1.0", + CurrentReleaseDate: 0, + CurrentDownloadURL: "http://www.hashicorp.com", + CurrentChangelogURL: "http://www.hashicorp.com", + ProjectWebsite: "http://www.hashicorp.com", + Outdated: false, + Alerts: []*CheckAlert{}, + } + + actual, err := Check(&CheckParams{ + Product: "test", + Version: "1.0", + }) + + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestCheck_disabled(t *testing.T) { + os.Setenv("CHECKPOINT_DISABLE", "1") + defer os.Setenv("CHECKPOINT_DISABLE", "") + + expected := &CheckResponse{} + + actual, err := Check(&CheckParams{ + Product: "test", + Version: "1.0", + }) + + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("expected %+v to equal %+v", actual, expected) + } +} + +func TestCheck_cache(t *testing.T) { + dir, err := ioutil.TempDir("", "checkpoint") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &CheckResponse{ + Product: "test", + CurrentVersion: "1.0", + CurrentReleaseDate: 0, + CurrentDownloadURL: "http://www.hashicorp.com", + CurrentChangelogURL: "http://www.hashicorp.com", + ProjectWebsite: "http://www.hashicorp.com", + Outdated: false, + Alerts: []*CheckAlert{}, + } + + var actual *CheckResponse + for i := 0; i < 5; i++ { + var err error + actual, err = Check(&CheckParams{ + Product: "test", + Version: "1.0", + CacheFile: filepath.Join(dir, "cache"), + }) + if err != nil { + t.Fatalf("err: %s", err) + } + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestCheck_cacheNested(t *testing.T) { + dir, err := ioutil.TempDir("", "checkpoint") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &CheckResponse{ + Product: "test", + CurrentVersion: "1.0", + CurrentReleaseDate: 0, + CurrentDownloadURL: "http://www.hashicorp.com", + CurrentChangelogURL: "http://www.hashicorp.com", + ProjectWebsite: "http://www.hashicorp.com", + Outdated: false, + Alerts: []*CheckAlert{}, + } + + var actual *CheckResponse + for i := 0; i < 5; i++ { + var err error + actual, err = Check(&CheckParams{ + Product: "test", + Version: "1.0", + CacheFile: filepath.Join(dir, "nested", "cache"), + }) + if err != nil { + t.Fatalf("err: %s", err) + } + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestCheckInterval(t *testing.T) { + expected := &CheckResponse{ + Product: "test", + CurrentVersion: "1.0", + CurrentReleaseDate: 0, + CurrentDownloadURL: "http://www.hashicorp.com", + CurrentChangelogURL: "http://www.hashicorp.com", + ProjectWebsite: "http://www.hashicorp.com", + Outdated: false, + Alerts: []*CheckAlert{}, + } + + params := &CheckParams{ + Product: "test", + Version: "1.0", + } + + calledCh := make(chan struct{}) + checkFn := func(actual *CheckResponse, err error) { + defer close(calledCh) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } + } + + doneCh := CheckInterval(params, 500*time.Millisecond, checkFn) + defer close(doneCh) + + select { + case <-calledCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestCheckInterval_disabled(t *testing.T) { + os.Setenv("CHECKPOINT_DISABLE", "1") + defer os.Setenv("CHECKPOINT_DISABLE", "") + + params := &CheckParams{ + Product: "test", + Version: "1.0", + } + + calledCh := make(chan struct{}) + checkFn := func(actual *CheckResponse, err error) { + defer close(calledCh) + } + + doneCh := CheckInterval(params, 500*time.Millisecond, checkFn) + defer close(doneCh) + + select { + case <-calledCh: + t.Fatal("expected callback to not invoke") + case <-time.After(time.Second): + } +} + +func TestRandomStagger(t *testing.T) { + intv := 24 * time.Hour + min := 18 * time.Hour + max := 30 * time.Hour + for i := 0; i < 1000; i++ { + out := randomStagger(intv) + if out < min || out > max { + t.Fatalf("bad: %v", out) + } + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 000000000..036e5313f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 000000000..c692e23f4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,40 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with the same default values +// as http.DefaultTransport +func DefaultTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } + SetTransportFinalizer(transport) + return transport +} + +// DefaultClient returns a new http.Client with the same default values as +// http.Client, but with a non-shared Transport +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// SetTransportFinalizer sets a finalizer on the transport to ensure that +// idle connections are closed prior to garbage collection; otherwise +// these may leak +func SetTransportFinalizer(transport *http.Transport) { + runtime.SetFinalizer(&transport, func(t **http.Transport) { + (*t).CloseIdleConnections() + }) +} diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml new file mode 100644 index 000000000..59bf16855 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/.travis.yml @@ -0,0 +1,10 @@ +sudo: false + +language: go + +go: + - 1.5 + +branches: + only: + - master diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md new file mode 100644 index 000000000..c3cf4c226 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -0,0 +1,151 @@ +# go-getter + +[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-getter +[godocs]: http://godoc.org/github.com/hashicorp/go-getter + +go-getter is a library for Go (golang) for downloading files or directories +from various sources using a URL as the primary form of input. + +The power of this library is being flexible in being able to download +from a number of different sources (file paths, Git, HTTP, Mercurial, etc.) +using a single string as input. This removes the burden of knowing how to +download from a variety of sources from the implementer. + +The concept of a _detector_ automatically turns invalid URLs into proper +URLs. For example: "github.com/hashicorp/go-getter" would turn into a +Git URL. Or "./foo" would turn into a file URL. These are extensible. + +This library is used by [Terraform](https://terraform.io) for +downloading modules, [Otto](https://ottoproject.io) for dependencies and +Appfile imports, and [Nomad](https://nomadproject.io) for downloading +binaries. + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-getter). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-getter +``` + +## URL Format + +go-getter uses a single string URL as input to downlaod from a variety of +protocols. go-getter has various "tricks" with this URL to do certain things. +This section documents the URL format. + +### Supported Protocols and Detectors + +**Protocols** are used to download files/directories using a specific +mechanism. Example protocols are Git and HTTP. + +**Detectors** are used to transform a valid or invalid URL into another +URL if it matches a certain pattern. Example: "github.com/user/repo" is +automatically transformed into a fully valid Git URL. This allows go-getter +to be very user friendly. + +go-getter out of the box supports the following protocols. Additional protocols +can be augmented at runtime by implementing the `Getter` interface. + + * Local files + * Git + * Mercurial + * HTTP + * Amazon S3 + +In addition to the above protocols, go-getter has what are called "detectors." +These take a URL and attempt to automatically choose the best protocol for +it, which might involve even changing the protocol. The following detection +is built-in by default: + + * File paths such as "./foo" are automatically changed to absolute + file URLs. + * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically + changed to Git protocol over HTTP. + * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically + changed to a Git or mercurial protocol using the BitBucket API. + +### Forced Protocol + +In some cases, the protocol to use is ambiguous depending on the source +URL. For example, "http://github.com/mitchellh/vagrant.git" could reference +an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this +URL. + +Forced protocol can be done by prefixing the URL with the protocol followed +by double colons. For example: `git::http://github.com/mitchellh/vagrant.git` +would download the given HTTP URL using the Git protocol. + +Forced protocols will also override any detectors. + +In the absense of a forced protocol, detectors may be run on the URL, transforming +the protocol anyways. The above example would've used the Git protocol either +way since the Git detector would've detected it was a GitHub URL. + +### Checksumming + +For file downloads of any protocol, go-getter can automatically verify +a checksum for you. Note that checksumming only works for downloading files, +not directories, but checksumming will work for any protocol. + +To checksum a file, append a `checksum` query parameter to the URL. +The paramter value should be in the format of `type:value`, where +type is "md5", "sha1", "sha256", or "sha512". The "value" should be +the actual checksum value. go-getter will parse out this query parameter +automatically and use it to verify the checksum. An example URL +is shown below: + +``` +./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 +``` + +The checksum query parameter is never sent to the backend protocol +implementation. It is used at a higher level by go-getter itself. + +### Unarchiving + +go-getter will automatically unarchive files into a file or directory +based on the extension of the file being requested (over any protocol). +This works for both file and directory downloads. + +go-getter looks for an `archive` query parameter to specify the format of +the archive. If this isn't specified, go-getter will use the extension of +the path to see if it appears archived. Unarchiving can be explicitly +disabled by setting the `archive` query parameter to `false`. + +The following archive formats are supported: + + * `tar.gz` and `tgz` + * `tar.bz2` and `tbz2` + * `zip` + * `gz` + * `bz2` + +For example, an example URL is shown below: + +``` +./foo.zip +``` + +This will automatically be inferred to be a ZIP file and will be extracted. +You can also be explicit about the archive type: + +``` +./some/other/path?archive=zip +``` + +And finally, you can disable archiving completely: + +``` +./some/path?archive=false +``` + +You can combine unarchiving with the other features of go-getter such +as checksumming. The special `archive` query parameter will be removed +from the URL before going to the final protocol downloader. diff --git a/vendor/github.com/hashicorp/go-getter/Vagrantfile b/vendor/github.com/hashicorp/go-getter/Vagrantfile new file mode 100644 index 000000000..74f2ae14f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/Vagrantfile @@ -0,0 +1,18 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure(2) do |config| + config.vm.box = "hashicorp/windows_81" + config.vm.provider "vmware_fusion" do |v| + v.gui = true + v.cpus = 4 + v.memory = 4096 + + # Prevents problems with incorrect ip lookup when using `vagrant ssh` + v.enable_vmrun_ip_lookup = false + end +end diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go new file mode 100644 index 000000000..0622b324c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -0,0 +1,304 @@ +package getter + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// Client is a client for downloading things. +// +// Top-level functions such as Get are shortcuts for interacting with a client. +// Using a client directly allows more fine-grained control over how downloading +// is done, as well as customizing the protocols supported. +type Client struct { + // Src is the source URL to get. + // + // Dst is the path to save the downloaded thing as. If Dir is set to + // true, then this should be a directory. If the directory doesn't exist, + // it will be created for you. + // + // Pwd is the working directory for detection. If this isn't set, some + // detection may fail. Client will not default pwd to the current + // working directory for security reasons. + Src string + Dst string + Pwd string + + // Dir, if true, tells the Client it is downloading a directory (versus + // a single file). This distinction is necessary since filenames and + // directory names follow the same format so disambiguating is impossible + // without knowing ahead of time. + Dir bool + + // Detectors is the list of detectors that are tried on the source. + // If this is nil, then the default Detectors will be used. + Detectors []Detector + + // Decompressors is the map of decompressors supported by this client. + // If this is nil, then the default value is the Decompressors global. + Decompressors map[string]Decompressor + + // Getters is the map of protocols supported by this client. If this + // is nil, then the default Getters variable will be used. + Getters map[string]Getter +} + +// Get downloads the configured source to the destination. +func (c *Client) Get() error { + // Store this locally since there are cases we swap this + dir := c.Dir + + // Default decompressor value + decompressors := c.Decompressors + if decompressors == nil { + decompressors = Decompressors + } + + // Detect the URL. This is safe if it is already detected. + detectors := c.Detectors + if detectors == nil { + detectors = Detectors + } + src, err := Detect(c.Src, c.Pwd, detectors) + if err != nil { + return err + } + + // Determine if we have a forced protocol, i.e. "git::http://..." + force, src := getForcedGetter(src) + + // If there is a subdir component, then we download the root separately + // and then copy over the proper subdir. + var realDst string + dst := c.Dst + src, subDir := SourceDirSubdir(src) + if subDir != "" { + tmpDir, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + realDst = dst + dst = tmpDir + } + + u, err := urlhelper.Parse(src) + if err != nil { + return err + } + if force == "" { + force = u.Scheme + } + + getters := c.Getters + if getters == nil { + getters = Getters + } + + g, ok := getters[force] + if !ok { + return fmt.Errorf( + "download not supported for scheme '%s'", force) + } + + // We have magic query parameters that we use to signal different features + q := u.Query() + + // Determine if we have an archive type + archiveV := q.Get("archive") + if archiveV != "" { + // Delete the paramter since it is a magic parameter we don't + // want to pass on to the Getter + q.Del("archive") + u.RawQuery = q.Encode() + + // If we can parse the value as a bool and it is false, then + // set the archive to "-" which should never map to a decompressor + if b, err := strconv.ParseBool(archiveV); err == nil && !b { + archiveV = "-" + } + } + if archiveV == "" { + // We don't appear to... but is it part of the filename? + matchingLen := 0 + for k, _ := range decompressors { + if strings.HasSuffix(u.Path, k) && len(k) > matchingLen { + archiveV = k + matchingLen = len(k) + } + } + } + + // If we have a decompressor, then we need to change the destination + // to download to a temporary path. We unarchive this into the final, + // real path. + var decompressDst string + var decompressDir bool + decompressor := decompressors[archiveV] + if decompressor != nil { + // Create a temporary directory to store our archive. We delete + // this at the end of everything. + td, err := ioutil.TempDir("", "getter") + if err != nil { + return fmt.Errorf( + "Error creating temporary directory for archive: %s", err) + } + defer os.RemoveAll(td) + + // Swap the download directory to be our temporary path and + // store the old values. + decompressDst = dst + decompressDir = dir + dst = filepath.Join(td, "archive") + dir = false + } + + // Determine if we have a checksum + var checksumHash hash.Hash + var checksumValue []byte + if v := q.Get("checksum"); v != "" { + // Delete the query parameter if we have it. + q.Del("checksum") + u.RawQuery = q.Encode() + + // If we're getting a directory, then this is an error. You cannot + // checksum a directory. TODO: test + if dir { + return fmt.Errorf( + "checksum cannot be specified for directory download") + } + + // Determine the checksum hash type + checksumType := "" + idx := strings.Index(v, ":") + if idx > -1 { + checksumType = v[:idx] + } + switch checksumType { + case "md5": + checksumHash = md5.New() + case "sha1": + checksumHash = sha1.New() + case "sha256": + checksumHash = sha256.New() + case "sha512": + checksumHash = sha512.New() + default: + return fmt.Errorf( + "unsupported checksum type: %s", checksumType) + } + + // Get the remainder of the value and parse it into bytes + b, err := hex.DecodeString(v[idx+1:]) + if err != nil { + return fmt.Errorf("invalid checksum: %s", err) + } + + // Set our value + checksumValue = b + } + + // If we're not downloading a directory, then just download the file + // and return. + if !dir { + err := g.GetFile(dst, u) + if err != nil { + return err + } + + if checksumHash != nil { + if err := checksum(dst, checksumHash, checksumValue); err != nil { + return err + } + } + + if decompressor != nil { + // We have a decompressor, so decompress the current destination + // into the final destination with the proper mode. + err := decompressor.Decompress(decompressDst, dst, decompressDir) + if err != nil { + return err + } + + // Swap the information back + dst = decompressDst + dir = decompressDir + } + + // We check the dir value again because it can be switched back + // if we were unarchiving. If we're still only Get-ing a file, then + // we're done. + if !dir { + return nil + } + } + + // If we're at this point we're either downloading a directory or we've + // downloaded and unarchived a directory and we're just checking subdir. + // In the case we have a decompressor we don't Get because it was Get + // above. + if decompressor == nil { + // We're downloading a directory, which might require a bit more work + // if we're specifying a subdir. + err := g.Get(dst, u) + if err != nil { + err = fmt.Errorf("error downloading '%s': %s", src, err) + return err + } + } + + // If we have a subdir, copy that over + if subDir != "" { + if err := os.RemoveAll(realDst); err != nil { + return err + } + if err := os.MkdirAll(realDst, 0755); err != nil { + return err + } + + return copyDir(realDst, filepath.Join(dst, subDir), false) + } + + return nil +} + +// checksum is a simple method to compute the checksum of a source file +// and compare it to the given expected value. +func checksum(source string, h hash.Hash, v []byte) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("Failed to open file for checksum: %s", err) + } + defer f.Close() + + if _, err := io.Copy(h, f); err != nil { + return fmt.Errorf("Failed to hash: %s", err) + } + + if actual := h.Sum(nil); !bytes.Equal(actual, v) { + return fmt.Errorf( + "Checksums did not match.\nExpected: %s\nGot: %s", + hex.EncodeToString(v), + hex.EncodeToString(actual)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go new file mode 100644 index 000000000..2f58e8aeb --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -0,0 +1,78 @@ +package getter + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +// +// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. +func copyDir(dst string, src string, ignoreDot bool) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == src { + return nil + } + + if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go new file mode 100644 index 000000000..d18174cc3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -0,0 +1,29 @@ +package getter + +// Decompressor defines the interface that must be implemented to add +// support for decompressing a type. +type Decompressor interface { + // Decompress should decompress src to dst. dir specifies whether dst + // is a directory or single file. src is guaranteed to be a single file + // that exists. dst is not guaranteed to exist already. + Decompress(dst, src string, dir bool) error +} + +// Decompressors is the mapping of extension to the Decompressor implementation +// that will decompress that extension/type. +var Decompressors map[string]Decompressor + +func init() { + tbzDecompressor := new(TarBzip2Decompressor) + tgzDecompressor := new(TarGzipDecompressor) + + Decompressors = map[string]Decompressor{ + "bz2": new(Bzip2Decompressor), + "gz": new(GzipDecompressor), + "tar.bz2": tbzDecompressor, + "tar.gz": tgzDecompressor, + "tbz2": tbzDecompressor, + "tgz": tgzDecompressor, + "zip": new(ZipDecompressor), + } +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go new file mode 100644 index 000000000..339f4cf7a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go @@ -0,0 +1,45 @@ +package getter + +import ( + "compress/bzip2" + "fmt" + "io" + "os" + "path/filepath" +) + +// Bzip2Decompressor is an implementation of Decompressor that can +// decompress bz2 files. +type Bzip2Decompressor struct{} + +func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, bzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2_test.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2_test.go new file mode 100644 index 000000000..9d49c47ae --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2_test.go @@ -0,0 +1,32 @@ +package getter + +import ( + "path/filepath" + "testing" +) + +func TestBzip2Decompressor(t *testing.T) { + cases := []TestDecompressCase{ + { + "single.bz2", + false, + false, + nil, + "d3b07384d113edec49eaa6238ad5ff00", + }, + + { + "single.bz2", + true, + true, + nil, + "", + }, + } + + for i, tc := range cases { + cases[i].Input = filepath.Join("./test-fixtures", "decompress-bz2", tc.Input) + } + + TestDecompressor(t, new(Bzip2Decompressor), cases) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go new file mode 100644 index 000000000..20010540e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -0,0 +1,49 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// GzipDecompressor is an implementation of Decompressor that can +// decompress bz2 files. +type GzipDecompressor struct{} + +func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("gzip-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gzipR.Close() + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, gzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip_test.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip_test.go new file mode 100644 index 000000000..0aa9e27d8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip_test.go @@ -0,0 +1,32 @@ +package getter + +import ( + "path/filepath" + "testing" +) + +func TestGzipDecompressor(t *testing.T) { + cases := []TestDecompressCase{ + { + "single.gz", + false, + false, + nil, + "d3b07384d113edec49eaa6238ad5ff00", + }, + + { + "single.gz", + true, + true, + nil, + "", + }, + } + + for i, tc := range cases { + cases[i].Input = filepath.Join("./test-fixtures", "decompress-gz", tc.Input) + } + + TestDecompressor(t, new(GzipDecompressor), cases) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go new file mode 100644 index 000000000..c46ed4453 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -0,0 +1,95 @@ +package getter + +import ( + "archive/tar" + "compress/bzip2" + "fmt" + "io" + "os" + "path/filepath" +) + +// TarBzip2Decompressor is an implementation of Decompressor that can +// decompress tar.bz2 files. +type TarBzip2Decompressor struct{} + +func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + + // Once bzip decompressed we have a tar format + tarR := tar.NewReader(bzipR) + done := false + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + return nil + } + if err != nil { + return err + } + + path := dst + if dir { + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2_test.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2_test.go new file mode 100644 index 000000000..c6fbc5276 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2_test.go @@ -0,0 +1,56 @@ +package getter + +import ( + "path/filepath" + "testing" +) + +func TestTarBzip2Decompressor(t *testing.T) { + cases := []TestDecompressCase{ + { + "empty.tar.bz2", + false, + true, + nil, + "", + }, + + { + "single.tar.bz2", + false, + false, + nil, + "d3b07384d113edec49eaa6238ad5ff00", + }, + + { + "single.tar.bz2", + true, + false, + []string{"file"}, + "", + }, + + { + "multiple.tar.bz2", + true, + false, + []string{"file1", "file2"}, + "", + }, + + { + "multiple.tar.bz2", + false, + true, + nil, + "", + }, + } + + for i, tc := range cases { + cases[i].Input = filepath.Join("./test-fixtures", "decompress-tbz2", tc.Input) + } + + TestDecompressor(t, new(TarBzip2Decompressor), cases) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go new file mode 100644 index 000000000..5821ed66e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -0,0 +1,121 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" +) + +// TestDecompressCase is a single test case for testing decompressors +type TestDecompressCase struct { + Input string // Input is the complete path to the input file + Dir bool // Dir is whether or not we're testing directory mode + Err bool // Err is whether we expect an error or not + DirList []string // DirList is the list of files for Dir mode + FileMD5 string // FileMD5 is the expected MD5 for a single file +} + +// TestDecompressor is a helper function for testing generic decompressors. +func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) { + for _, tc := range cases { + t.Logf("Testing: %s", tc.Input) + + // Temporary dir to store stuff + td, err := ioutil.TempDir("", "getter") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Destination is always joining result so that we have a new path + dst := filepath.Join(td, "subdir", "result") + + // We use a function so defers work + func() { + defer os.RemoveAll(td) + + // Decompress + err := d.Decompress(dst, tc.Input, tc.Dir) + if (err != nil) != tc.Err { + t.Fatalf("err %s: %s", tc.Input, err) + } + if tc.Err { + return + } + + // If it isn't a directory, then check for a single file + if !tc.Dir { + fi, err := os.Stat(dst) + if err != nil { + t.Fatalf("err %s: %s", tc.Input, err) + } + if fi.IsDir() { + t.Fatalf("err %s: expected file, got directory", tc.Input) + } + if tc.FileMD5 != "" { + actual := testMD5(t, dst) + expected := tc.FileMD5 + if actual != expected { + t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual) + } + } + + return + } + + // Directory, check for the correct contents + actual := testListDir(t, dst) + expected := tc.DirList + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) + } + }() + } +} + +func testListDir(t *testing.T, path string) []string { + var result []string + err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + sub = strings.TrimPrefix(sub, path) + if sub == "" { + return nil + } + sub = sub[1:] // Trim the leading path sep. + + result = append(result, sub) + return nil + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(result) + return result +} + +func testMD5(t *testing.T, path string) string { + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + h := md5.New() + _, err = io.Copy(h, f) + if err != nil { + t.Fatalf("err: %s", err) + } + + result := h.Sum(nil) + return hex.EncodeToString(result) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go new file mode 100644 index 000000000..391bbc0ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -0,0 +1,99 @@ +package getter + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// TarGzipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type TarGzipDecompressor struct{} + +func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err) + } + defer gzipR.Close() + + // Once gzip decompressed we have a tar format + tarR := tar.NewReader(gzipR) + done := false + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + return nil + } + if err != nil { + return err + } + + path := dst + if dir { + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz_test.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz_test.go new file mode 100644 index 000000000..ce36580b2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz_test.go @@ -0,0 +1,56 @@ +package getter + +import ( + "path/filepath" + "testing" +) + +func TestTarGzipDecompressor(t *testing.T) { + cases := []TestDecompressCase{ + { + "empty.tar.gz", + false, + true, + nil, + "", + }, + + { + "single.tar.gz", + false, + false, + nil, + "d3b07384d113edec49eaa6238ad5ff00", + }, + + { + "single.tar.gz", + true, + false, + []string{"file"}, + "", + }, + + { + "multiple.tar.gz", + true, + false, + []string{"file1", "file2"}, + "", + }, + + { + "multiple.tar.gz", + false, + true, + nil, + "", + }, + } + + for i, tc := range cases { + cases[i].Input = filepath.Join("./test-fixtures", "decompress-tgz", tc.Input) + } + + TestDecompressor(t, new(TarGzipDecompressor), cases) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go new file mode 100644 index 000000000..0e4f6d739 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -0,0 +1,87 @@ +package getter + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" +) + +// ZipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type ZipDecompressor struct{} + +func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // Open the zip + zipR, err := zip.OpenReader(src) + if err != nil { + return err + } + defer zipR.Close() + + // Check the zip integrity + if len(zipR.File) == 0 { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + if !dir && len(zipR.File) > 1 { + return fmt.Errorf("expected a single file: %s", src) + } + + // Go through and unarchive + for _, f := range zipR.File { + path := dst + if dir { + path = filepath.Join(path, f.Name) + } + + if f.FileInfo().IsDir() { + if dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // Open the file for reading + srcF, err := f.Open() + if err != nil { + return err + } + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + srcF.Close() + return err + } + _, err = io.Copy(dstF, srcF) + srcF.Close() + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, f.Mode()); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip_test.go b/vendor/github.com/hashicorp/go-getter/decompress_zip_test.go new file mode 100644 index 000000000..fb227cf34 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip_test.go @@ -0,0 +1,56 @@ +package getter + +import ( + "path/filepath" + "testing" +) + +func TestZipDecompressor(t *testing.T) { + cases := []TestDecompressCase{ + { + "empty.zip", + false, + true, + nil, + "", + }, + + { + "single.zip", + false, + false, + nil, + "d3b07384d113edec49eaa6238ad5ff00", + }, + + { + "single.zip", + true, + false, + []string{"file"}, + "", + }, + + { + "multiple.zip", + true, + false, + []string{"file1", "file2"}, + "", + }, + + { + "multiple.zip", + false, + true, + nil, + "", + }, + } + + for i, tc := range cases { + cases[i].Input = filepath.Join("./test-fixtures", "decompress-zip", tc.Input) + } + + TestDecompressor(t, new(ZipDecompressor), cases) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go new file mode 100644 index 000000000..481b737c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -0,0 +1,97 @@ +package getter + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/go-getter/helper/url" +) + +// Detector defines the interface that an invalid URL or a URL with a blank +// scheme is passed through in order to determine if its shorthand for +// something else well-known. +type Detector interface { + // Detect will detect whether the string matches a known pattern to + // turn it into a proper URL. + Detect(string, string) (string, bool, error) +} + +// Detectors is the list of detectors that are tried on an invalid URL. +// This is also the order they're tried (index 0 is first). +var Detectors []Detector + +func init() { + Detectors = []Detector{ + new(GitHubDetector), + new(BitBucketDetector), + new(S3Detector), + new(FileDetector), + } +} + +// Detect turns a source string into another source string if it is +// detected to be of a known pattern. +// +// The third parameter should be the list of detectors to use in the +// order to try them. If you don't want to configure this, just use +// the global Detectors variable. +// +// This is safe to be called with an already valid source string: Detect +// will just return it. +func Detect(src string, pwd string, ds []Detector) (string, error) { + getForce, getSrc := getForcedGetter(src) + + // Separate out the subdir if there is one, we don't pass that to detect + getSrc, subDir := SourceDirSubdir(getSrc) + + u, err := url.Parse(getSrc) + if err == nil && u.Scheme != "" { + // Valid URL + return src, nil + } + + for _, d := range ds { + result, ok, err := d.Detect(getSrc, pwd) + if err != nil { + return "", err + } + if !ok { + continue + } + + var detectForce string + detectForce, result = getForcedGetter(result) + result, detectSubdir := SourceDirSubdir(result) + + // If we have a subdir from the detection, then prepend it to our + // requested subdir. + if detectSubdir != "" { + if subDir != "" { + subDir = filepath.Join(detectSubdir, subDir) + } else { + subDir = detectSubdir + } + } + if subDir != "" { + u, err := url.Parse(result) + if err != nil { + return "", fmt.Errorf("Error parsing URL: %s", err) + } + u.Path += "//" + subDir + result = u.String() + } + + // Preserve the forced getter if it exists. We try to use the + // original set force first, followed by any force set by the + // detector. + if getForce != "" { + result = fmt.Sprintf("%s::%s", getForce, result) + } else if detectForce != "" { + result = fmt.Sprintf("%s::%s", detectForce, result) + } + + return result, nil + } + + return "", fmt.Errorf("invalid source string: %s", src) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go new file mode 100644 index 000000000..a183a17df --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go @@ -0,0 +1,66 @@ +package getter + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" +) + +// BitBucketDetector implements Detector to detect BitBucket URLs and turn +// them into URLs that the Git or Hg Getter can understand. +type BitBucketDetector struct{} + +func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "bitbucket.org/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { + u, err := url.Parse("https://" + src) + if err != nil { + return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) + } + + // We need to get info on this BitBucket repository to determine whether + // it is Git or Hg. + var info struct { + SCM string `json:"scm"` + } + infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path + resp, err := http.Get(infoUrl) + if err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + if resp.StatusCode == 403 { + // A private repo + return "", true, fmt.Errorf( + "shorthand BitBucket URL can't be used for private repos, " + + "please use a full URL") + } + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&info); err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + + switch info.SCM { + case "git": + if !strings.HasSuffix(u.Path, ".git") { + u.Path += ".git" + } + + return "git::" + u.String(), true, nil + case "hg": + return "hg::" + u.String(), true, nil + default: + return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket_test.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket_test.go new file mode 100644 index 000000000..202c93256 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket_test.go @@ -0,0 +1,67 @@ +package getter + +import ( + "net/http" + "strings" + "testing" +) + +const testBBUrl = "https://bitbucket.org/hashicorp/tf-test-git" + +func TestBitBucketDetector(t *testing.T) { + t.Parallel() + + if _, err := http.Get(testBBUrl); err != nil { + t.Log("internet may not be working, skipping BB tests") + t.Skip() + } + + cases := []struct { + Input string + Output string + }{ + // HTTP + { + "bitbucket.org/hashicorp/tf-test-git", + "git::https://bitbucket.org/hashicorp/tf-test-git.git", + }, + { + "bitbucket.org/hashicorp/tf-test-git.git", + "git::https://bitbucket.org/hashicorp/tf-test-git.git", + }, + { + "bitbucket.org/hashicorp/tf-test-hg", + "hg::https://bitbucket.org/hashicorp/tf-test-hg", + }, + } + + pwd := "/pwd" + f := new(BitBucketDetector) + for i, tc := range cases { + var err error + for i := 0; i < 3; i++ { + var output string + var ok bool + output, ok, err = f.Detect(tc.Input, pwd) + if err != nil { + if strings.Contains(err.Error(), "invalid character") { + continue + } + + t.Fatalf("err: %s", err) + } + if !ok { + t.Fatal("not ok") + } + + if output != tc.Output { + t.Fatalf("%d: bad: %#v", i, output) + } + + break + } + if i >= 3 { + t.Fatalf("failure from bitbucket: %s", err) + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go new file mode 100644 index 000000000..ddeedc1dd --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -0,0 +1,60 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +// FileDetector implements Detector to detect file paths. +type FileDetector struct{} + +func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if !filepath.IsAbs(src) { + if pwd == "" { + return "", true, fmt.Errorf( + "relative paths require a module with a pwd") + } + + // Stat the pwd to determine if its a symbolic link. If it is, + // then the pwd becomes the original directory. Otherwise, + // `filepath.Join` below does some weird stuff. + // + // We just ignore if the pwd doesn't exist. That error will be + // caught later when we try to use the URL. + if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { + if err != nil { + return "", true, err + } + if fi.Mode()&os.ModeSymlink != 0 { + pwd, err = os.Readlink(pwd) + if err != nil { + return "", true, err + } + } + } + + src = filepath.Join(pwd, src) + } + + return fmtFileURL(src), true, nil +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + return fmt.Sprintf("file://%s", path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + return fmt.Sprintf("file:///%s", path) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_file_test.go b/vendor/github.com/hashicorp/go-getter/detect_file_test.go new file mode 100644 index 000000000..aa4ff2aaf --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_file_test.go @@ -0,0 +1,88 @@ +package getter + +import ( + "runtime" + "testing" +) + +type fileTest struct { + in, pwd, out string + err bool +} + +var fileTests = []fileTest{ + {"./foo", "/pwd", "file:///pwd/foo", false}, + {"./foo?foo=bar", "/pwd", "file:///pwd/foo?foo=bar", false}, + {"foo", "/pwd", "file:///pwd/foo", false}, +} + +var unixFileTests = []fileTest{ + {"/foo", "/pwd", "file:///foo", false}, + {"/foo?bar=baz", "/pwd", "file:///foo?bar=baz", false}, +} + +var winFileTests = []fileTest{ + {"/foo", "/pwd", "file:///pwd/foo", false}, + {`C:\`, `/pwd`, `file://C:/`, false}, + {`C:\?bar=baz`, `/pwd`, `file://C:/?bar=baz`, false}, +} + +func TestFileDetector(t *testing.T) { + if runtime.GOOS == "windows" { + fileTests = append(fileTests, winFileTests...) + } else { + fileTests = append(fileTests, unixFileTests...) + } + + f := new(FileDetector) + for i, tc := range fileTests { + out, ok, err := f.Detect(tc.in, tc.pwd) + if err != nil { + t.Fatalf("err: %s", err) + } + if !ok { + t.Fatal("not ok") + } + + if out != tc.out { + t.Fatalf("%d: bad: %#v", i, out) + } + } +} + +var noPwdFileTests = []fileTest{ + {in: "./foo", pwd: "", out: "", err: true}, + {in: "foo", pwd: "", out: "", err: true}, +} + +var noPwdUnixFileTests = []fileTest{ + {in: "/foo", pwd: "", out: "file:///foo", err: false}, +} + +var noPwdWinFileTests = []fileTest{ + {in: "/foo", pwd: "", out: "", err: true}, + {in: `C:\`, pwd: ``, out: `file://C:/`, err: false}, +} + +func TestFileDetector_noPwd(t *testing.T) { + if runtime.GOOS == "windows" { + noPwdFileTests = append(noPwdFileTests, noPwdWinFileTests...) + } else { + noPwdFileTests = append(noPwdFileTests, noPwdUnixFileTests...) + } + + f := new(FileDetector) + for i, tc := range noPwdFileTests { + out, ok, err := f.Detect(tc.in, tc.pwd) + if err != nil != tc.err { + t.Fatalf("%d: err: %s", i, err) + } + if !ok { + t.Fatal("not ok") + } + + if out != tc.out { + t.Fatalf("%d: bad: %#v", i, out) + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go new file mode 100644 index 000000000..c084ad9ac --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_github.go @@ -0,0 +1,73 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GitHubDetector implements Detector to detect GitHub URLs and turn +// them into URLs that the Git Getter can understand. +type GitHubDetector struct{} + +func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "github.com/") { + return d.detectHTTP(src) + } else if strings.HasPrefix(src, "git@github.com:") { + return d.detectSSH(src) + } + + return "", false, nil +} + +func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 3 { + return "", false, fmt.Errorf( + "GitHub URLs should be github.com/username/repo") + } + + urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) + } + + if !strings.HasSuffix(url.Path, ".git") { + url.Path += ".git" + } + + if len(parts) > 3 { + url.Path += "//" + strings.Join(parts[3:], "/") + } + + return "git::" + url.String(), true, nil +} + +func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { + idx := strings.Index(src, ":") + qidx := strings.Index(src, "?") + if qidx == -1 { + qidx = len(src) + } + + var u url.URL + u.Scheme = "ssh" + u.User = url.User("git") + u.Host = "github.com" + u.Path = src[idx+1 : qidx] + if qidx < len(src) { + q, err := url.ParseQuery(src[qidx+1:]) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) + } + + u.RawQuery = q.Encode() + } + + return "git::" + u.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github_test.go b/vendor/github.com/hashicorp/go-getter/detect_github_test.go new file mode 100644 index 000000000..43ed9fcc6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_github_test.go @@ -0,0 +1,55 @@ +package getter + +import ( + "testing" +) + +func TestGitHubDetector(t *testing.T) { + cases := []struct { + Input string + Output string + }{ + // HTTP + {"github.com/hashicorp/foo", "git::https://github.com/hashicorp/foo.git"}, + {"github.com/hashicorp/foo.git", "git::https://github.com/hashicorp/foo.git"}, + { + "github.com/hashicorp/foo/bar", + "git::https://github.com/hashicorp/foo.git//bar", + }, + { + "github.com/hashicorp/foo?foo=bar", + "git::https://github.com/hashicorp/foo.git?foo=bar", + }, + { + "github.com/hashicorp/foo.git?foo=bar", + "git::https://github.com/hashicorp/foo.git?foo=bar", + }, + + // SSH + {"git@github.com:hashicorp/foo.git", "git::ssh://git@github.com/hashicorp/foo.git"}, + { + "git@github.com:hashicorp/foo.git//bar", + "git::ssh://git@github.com/hashicorp/foo.git//bar", + }, + { + "git@github.com:hashicorp/foo.git?foo=bar", + "git::ssh://git@github.com/hashicorp/foo.git?foo=bar", + }, + } + + pwd := "/pwd" + f := new(GitHubDetector) + for i, tc := range cases { + output, ok, err := f.Detect(tc.Input, pwd) + if err != nil { + t.Fatalf("err: %s", err) + } + if !ok { + t.Fatal("not ok") + } + + if output != tc.Output { + t.Fatalf("%d: bad: %#v", i, output) + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go new file mode 100644 index 000000000..8e0f4a03b --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_s3.go @@ -0,0 +1,61 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// S3Detector implements Detector to detect S3 URLs and turn +// them into URLs that the S3 getter can understand. +type S3Detector struct{} + +func (d *S3Detector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, ".amazonaws.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *S3Detector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 2 { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } + + hostParts := strings.Split(parts[0], ".") + if len(hostParts) == 3 { + return d.detectPathStyle(hostParts[0], parts[1:]) + } else if len(hostParts) == 4 { + return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:]) + } else { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } +} + +func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} + +func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3_test.go b/vendor/github.com/hashicorp/go-getter/detect_s3_test.go new file mode 100644 index 000000000..f410c785c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_s3_test.go @@ -0,0 +1,84 @@ +package getter + +import ( + "testing" +) + +func TestS3Detector(t *testing.T) { + cases := []struct { + Input string + Output string + }{ + // Virtual hosted style + { + "bucket.s3.amazonaws.com/foo", + "s3::https://s3.amazonaws.com/bucket/foo", + }, + { + "bucket.s3.amazonaws.com/foo/bar", + "s3::https://s3.amazonaws.com/bucket/foo/bar", + }, + { + "bucket.s3.amazonaws.com/foo/bar.baz", + "s3::https://s3.amazonaws.com/bucket/foo/bar.baz", + }, + { + "bucket.s3-eu-west-1.amazonaws.com/foo", + "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo", + }, + { + "bucket.s3-eu-west-1.amazonaws.com/foo/bar", + "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo/bar", + }, + { + "bucket.s3-eu-west-1.amazonaws.com/foo/bar.baz", + "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo/bar.baz", + }, + // Path style + { + "s3.amazonaws.com/bucket/foo", + "s3::https://s3.amazonaws.com/bucket/foo", + }, + { + "s3.amazonaws.com/bucket/foo/bar", + "s3::https://s3.amazonaws.com/bucket/foo/bar", + }, + { + "s3.amazonaws.com/bucket/foo/bar.baz", + "s3::https://s3.amazonaws.com/bucket/foo/bar.baz", + }, + { + "s3-eu-west-1.amazonaws.com/bucket/foo", + "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo", + }, + { + "s3-eu-west-1.amazonaws.com/bucket/foo/bar", + "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo/bar", + }, + { + "s3-eu-west-1.amazonaws.com/bucket/foo/bar.baz", + "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo/bar.baz", + }, + // Misc tests + { + "s3-eu-west-1.amazonaws.com/bucket/foo/bar.baz?version=1234", + "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo/bar.baz?version=1234", + }, + } + + pwd := "/pwd" + f := new(S3Detector) + for i, tc := range cases { + output, ok, err := f.Detect(tc.Input, pwd) + if err != nil { + t.Fatalf("err: %s", err) + } + if !ok { + t.Fatal("not ok") + } + + if output != tc.Output { + t.Fatalf("%d: bad: %#v", i, output) + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_test.go b/vendor/github.com/hashicorp/go-getter/detect_test.go new file mode 100644 index 000000000..a339f2888 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_test.go @@ -0,0 +1,51 @@ +package getter + +import ( + "testing" +) + +func TestDetect(t *testing.T) { + cases := []struct { + Input string + Pwd string + Output string + Err bool + }{ + {"./foo", "/foo", "file:///foo/foo", false}, + {"git::./foo", "/foo", "git::file:///foo/foo", false}, + { + "git::github.com/hashicorp/foo", + "", + "git::https://github.com/hashicorp/foo.git", + false, + }, + { + "./foo//bar", + "/foo", + "file:///foo/foo//bar", + false, + }, + { + "git::github.com/hashicorp/foo//bar", + "", + "git::https://github.com/hashicorp/foo.git//bar", + false, + }, + { + "git::https://github.com/hashicorp/consul.git", + "", + "git::https://github.com/hashicorp/consul.git", + false, + }, + } + + for i, tc := range cases { + output, err := Detect(tc.Input, tc.Pwd, Detectors) + if err != nil != tc.Err { + t.Fatalf("%d: bad err: %s", i, err) + } + if output != tc.Output { + t.Fatalf("%d: bad output: %s\nexpected: %s", i, output, tc.Output) + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go new file mode 100644 index 000000000..647ccf459 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/folder_storage.go @@ -0,0 +1,65 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "os" + "path/filepath" +) + +// FolderStorage is an implementation of the Storage interface that manages +// modules on the disk. +type FolderStorage struct { + // StorageDir is the directory where the modules will be stored. + StorageDir string +} + +// Dir implements Storage.Dir +func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { + d = s.dir(key) + _, err = os.Stat(d) + if err == nil { + // Directory exists + e = true + return + } + if os.IsNotExist(err) { + // Directory doesn't exist + d = "" + e = false + err = nil + return + } + + // An error + d = "" + e = false + return +} + +// Get implements Storage.Get +func (s *FolderStorage) Get(key string, source string, update bool) error { + dir := s.dir(key) + if !update { + if _, err := os.Stat(dir); err == nil { + // If the directory already exists, then we're done since + // we're not updating. + return nil + } else if !os.IsNotExist(err) { + // If the error we got wasn't a file-not-exist error, then + // something went wrong and we should report it. + return fmt.Errorf("Error reading module directory: %s", err) + } + } + + // Get the source. This always forces an update. + return Get(dir, source) +} + +// dir returns the directory name internally that we'll use to map to +// internally. +func (s *FolderStorage) dir(key string) string { + sum := md5.Sum([]byte(key)) + return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) +} diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage_test.go b/vendor/github.com/hashicorp/go-getter/folder_storage_test.go new file mode 100644 index 000000000..feb8d3425 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/folder_storage_test.go @@ -0,0 +1,48 @@ +package getter + +import ( + "os" + "path/filepath" + "testing" +) + +func TestFolderStorage_impl(t *testing.T) { + var _ Storage = new(FolderStorage) +} + +func TestFolderStorage(t *testing.T) { + s := &FolderStorage{StorageDir: tempDir(t)} + + module := testModule("basic") + + // A module shouldn't exist at first... + _, ok, err := s.Dir(module) + if err != nil { + t.Fatalf("err: %s", err) + } + if ok { + t.Fatal("should not exist") + } + + key := "foo" + + // We can get it + err = s.Get(key, module, false) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Now the module exists + dir, ok, err := s.Dir(key) + if err != nil { + t.Fatalf("err: %s", err) + } + if !ok { + t.Fatal("should exist") + } + + mainPath := filepath.Join(dir, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go new file mode 100644 index 000000000..cfdabaed6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -0,0 +1,120 @@ +// getter is a package for downloading files or directories from a variety of +// protocols. +// +// getter is unique in its ability to download both directories and files. +// It also detects certain source strings to be protocol-specific URLs. For +// example, "github.com/hashicorp/go-getter" would turn into a Git URL and +// use the Git protocol. +// +// Protocols and detectors are extensible. +// +// To get started, see Client. +package getter + +import ( + "bytes" + "fmt" + "net/url" + "os/exec" + "regexp" + "syscall" +) + +// Getter defines the interface that schemes must implement to download +// things. +type Getter interface { + // Get downloads the given URL into the given directory. This always + // assumes that we're updating and gets the latest version that it can. + // + // The directory may already exist (if we're updating). If it is in a + // format that isn't understood, an error should be returned. Get shouldn't + // simply nuke the directory. + Get(string, *url.URL) error + + // GetFile downloads the give URL into the given path. The URL must + // reference a single file. If possible, the Getter should check if + // the remote end contains the same file and no-op this operation. + GetFile(string, *url.URL) error +} + +// Getters is the mapping of scheme to the Getter implementation that will +// be used to get a dependency. +var Getters map[string]Getter + +// forcedRegexp is the regular expression that finds forced getters. This +// syntax is schema::url, example: git::https://foo.com +var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) + +func init() { + httpGetter := new(HttpGetter) + + Getters = map[string]Getter{ + "file": new(FileGetter), + "git": new(GitGetter), + "hg": new(HgGetter), + "s3": new(S3Getter), + "http": httpGetter, + "https": httpGetter, + } +} + +// Get downloads the directory specified by src into the folder specified by +// dst. If dst already exists, Get will attempt to update it. +// +// src is a URL, whereas dst is always just a file path to a folder. This +// folder doesn't need to exist. It will be created if it doesn't exist. +func Get(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: true, + Getters: Getters, + }).Get() +} + +// GetFile downloads the file specified by src into the path specified by +// dst. +func GetFile(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: false, + Getters: Getters, + }).Get() +} + +// getRunCommand is a helper that will run a command and capture the output +// in the case an error happens. +func getRunCommand(cmd *exec.Cmd) error { + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + err := cmd.Run() + if err == nil { + return nil + } + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return fmt.Errorf( + "%s exited with %d: %s", + cmd.Path, + status.ExitStatus(), + buf.String()) + } + } + + return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) +} + +// getForcedGetter takes a source and returns the tuple of the forced +// getter and the raw URL (without the force syntax). +func getForcedGetter(src string) (string, string) { + var forced string + if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { + forced = ms[1] + src = ms[2] + } + + return forced, src +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go new file mode 100644 index 000000000..9564c9f48 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file.go @@ -0,0 +1,98 @@ +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" +) + +// FileGetter is a Getter implementation that will download a module from +// a file scheme. +type FileGetter struct { + // Copy, if set to true, will copy data instead of using a symlink + Copy bool +} + +func (g *FileGetter) Get(dst string, u *url.URL) error { + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(u.Path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + return os.Symlink(u.Path, dst) +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(u.Path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + return os.Symlink(u.Path, dst) + } + + // Copy + srcF, err := os.Open(u.Path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, srcF) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_test.go b/vendor/github.com/hashicorp/go-getter/get_file_test.go new file mode 100644 index 000000000..0c136d2cd --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_test.go @@ -0,0 +1,150 @@ +package getter + +import ( + "os" + "path/filepath" + "testing" +) + +func TestFileGetter_impl(t *testing.T) { + var _ Getter = new(FileGetter) +} + +func TestFileGetter(t *testing.T) { + g := new(FileGetter) + dst := tempDir(t) + + // With a dir that doesn't exist + if err := g.Get(dst, testModuleURL("basic")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the destination folder is a symlink + fi, err := os.Lstat(dst) + if err != nil { + t.Fatalf("err: %s", err) + } + if fi.Mode()&os.ModeSymlink == 0 { + t.Fatal("destination is not a symlink") + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestFileGetter_sourceFile(t *testing.T) { + g := new(FileGetter) + dst := tempDir(t) + + // With a source URL that is a path to a file + u := testModuleURL("basic") + u.Path += "/main.tf" + if err := g.Get(dst, u); err == nil { + t.Fatal("should error") + } +} + +func TestFileGetter_sourceNoExist(t *testing.T) { + g := new(FileGetter) + dst := tempDir(t) + + // With a source URL that doesn't exist + u := testModuleURL("basic") + u.Path += "/main" + if err := g.Get(dst, u); err == nil { + t.Fatal("should error") + } +} + +func TestFileGetter_dir(t *testing.T) { + g := new(FileGetter) + dst := tempDir(t) + + if err := os.MkdirAll(dst, 0755); err != nil { + t.Fatalf("err: %s", err) + } + + // With a dir that exists that isn't a symlink + if err := g.Get(dst, testModuleURL("basic")); err == nil { + t.Fatal("should error") + } +} + +func TestFileGetter_dirSymlink(t *testing.T) { + g := new(FileGetter) + dst := tempDir(t) + dst2 := tempDir(t) + + // Make parents + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + t.Fatalf("err: %s", err) + } + if err := os.MkdirAll(dst2, 0755); err != nil { + t.Fatalf("err: %s", err) + } + + // Make a symlink + if err := os.Symlink(dst2, dst); err != nil { + t.Fatalf("err: %s", err) + } + + // With a dir that exists that isn't a symlink + if err := g.Get(dst, testModuleURL("basic")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestFileGetter_GetFile(t *testing.T) { + g := new(FileGetter) + dst := tempFile(t) + + // With a dir that doesn't exist + if err := g.GetFile(dst, testModuleURL("basic-file/foo.txt")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the destination folder is a symlink + fi, err := os.Lstat(dst) + if err != nil { + t.Fatalf("err: %s", err) + } + if fi.Mode()&os.ModeSymlink == 0 { + t.Fatal("destination is not a symlink") + } + + // Verify the main file exists + assertContents(t, dst, "Hello\n") +} + +func TestFileGetter_GetFile_Copy(t *testing.T) { + g := new(FileGetter) + g.Copy = true + + dst := tempFile(t) + + // With a dir that doesn't exist + if err := g.GetFile(dst, testModuleURL("basic-file/foo.txt")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the destination folder is a symlink + fi, err := os.Lstat(dst) + if err != nil { + t.Fatalf("err: %s", err) + } + if fi.Mode()&os.ModeSymlink != 0 { + t.Fatal("destination is a symlink") + } + + // Verify the main file exists + assertContents(t, dst, "Hello\n") +} diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go new file mode 100644 index 000000000..1bf0dc7e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -0,0 +1,120 @@ +package getter + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// GitGetter is a Getter implementation that will download a module from +// a git repository. +type GitGetter struct{} + +func (g *GitGetter) Get(dst string, u *url.URL) error { + if _, err := exec.LookPath("git"); err != nil { + return fmt.Errorf("git must be available and on the PATH") + } + + // Extract some query parameters we use + var ref string + q := u.Query() + if len(q) > 0 { + ref = q.Get("ref") + q.Del("ref") + + // Copy the URL + var newU url.URL = *u + u = &newU + u.RawQuery = q.Encode() + } + + // First: clone or update the repository + _, err := os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + err = g.update(dst, ref) + } else { + err = g.clone(dst, u) + } + if err != nil { + return err + } + + // Next: check out the proper tag/branch if it is specified, and checkout + if ref == "" { + return nil + } + + return g.checkout(dst, ref) +} + +// GetFile for Git doesn't support updating at this time. It will download +// the file every time. +func (g *GitGetter) GetFile(dst string, u *url.URL) error { + td, err := ioutil.TempDir("", "getter-git") + if err != nil { + return err + } + if err := os.RemoveAll(td); err != nil { + return err + } + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.Dir(u.Path) + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *GitGetter) checkout(dst string, ref string) error { + cmd := exec.Command("git", "checkout", ref) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *GitGetter) clone(dst string, u *url.URL) error { + cmd := exec.Command("git", "clone", u.String(), dst) + return getRunCommand(cmd) +} + +func (g *GitGetter) update(dst string, ref string) error { + // Determine if we're a branch. If we're NOT a branch, then we just + // switch to master prior to checking out + cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) + cmd.Dir = dst + if getRunCommand(cmd) != nil { + // Not a branch, switch to master. This will also catch non-existent + // branches, in which case we want to switch to master and then + // checkout the proper branch later. + ref = "master" + } + + // We have to be on a branch to pull + if err := g.checkout(dst, ref); err != nil { + return err + } + + cmd = exec.Command("git", "pull", "--ff-only") + cmd.Dir = dst + return getRunCommand(cmd) +} diff --git a/vendor/github.com/hashicorp/go-getter/get_git_test.go b/vendor/github.com/hashicorp/go-getter/get_git_test.go new file mode 100644 index 000000000..08298cb2e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_git_test.go @@ -0,0 +1,231 @@ +package getter + +import ( + "os" + "os/exec" + "path/filepath" + "testing" +) + +var testHasGit bool + +func init() { + if _, err := exec.LookPath("git"); err == nil { + testHasGit = true + } +} + +func TestGitGetter_impl(t *testing.T) { + var _ Getter = new(GitGetter) +} + +func TestGitGetter(t *testing.T) { + if !testHasGit { + t.Log("git not found, skipping") + t.Skip() + } + + g := new(GitGetter) + dst := tempDir(t) + + // Git doesn't allow nested ".git" directories so we do some hackiness + // here to get around that... + moduleDir := filepath.Join(fixtureDir, "basic-git") + oldName := filepath.Join(moduleDir, "DOTgit") + newName := filepath.Join(moduleDir, ".git") + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + // With a dir that doesn't exist + if err := g.Get(dst, testModuleURL("basic-git")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGitGetter_branch(t *testing.T) { + if !testHasGit { + t.Log("git not found, skipping") + t.Skip() + } + + g := new(GitGetter) + dst := tempDir(t) + + // Git doesn't allow nested ".git" directories so we do some hackiness + // here to get around that... + moduleDir := filepath.Join(fixtureDir, "basic-git") + oldName := filepath.Join(moduleDir, "DOTgit") + newName := filepath.Join(moduleDir, ".git") + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + url := testModuleURL("basic-git") + q := url.Query() + q.Add("ref", "test-branch") + url.RawQuery = q.Encode() + + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main_branch.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } + + // Get again should work + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath = filepath.Join(dst, "main_branch.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGitGetter_branchUpdate(t *testing.T) { + if !testHasGit { + t.Log("git not found, skipping") + t.Skip() + } + + g := new(GitGetter) + dst := tempDir(t) + + // First setup the state with a fresh branch + moduleDir := filepath.Join(fixtureDir, "git-branch-update") + oldName := filepath.Join(moduleDir, "DOTgit-1") + newName := filepath.Join(moduleDir, ".git") + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + // Get the "test-branch" branch + url := testModuleURL("git-branch-update") + q := url.Query() + q.Add("ref", "test-branch") + url.RawQuery = q.Encode() + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main_branch.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } + + // Swap the data to have a branch update + if err := os.Rename(newName, oldName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(oldName, newName) + oldName = filepath.Join(moduleDir, "DOTgit-2") + newName = filepath.Join(moduleDir, ".git") + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + // Get again should work + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath = filepath.Join(dst, "main_branch_update.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGitGetter_tag(t *testing.T) { + if !testHasGit { + t.Log("git not found, skipping") + t.Skip() + } + + g := new(GitGetter) + dst := tempDir(t) + + // Git doesn't allow nested ".git" directories so we do some hackiness + // here to get around that... + moduleDir := filepath.Join(fixtureDir, "basic-git") + oldName := filepath.Join(moduleDir, "DOTgit") + newName := filepath.Join(moduleDir, ".git") + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + url := testModuleURL("basic-git") + q := url.Query() + q.Add("ref", "v1.0") + url.RawQuery = q.Encode() + + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main_tag1.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } + + // Get again should work + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath = filepath.Join(dst, "main_tag1.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGitGetter_GetFile(t *testing.T) { + if !testHasGit { + t.Log("git not found, skipping") + t.Skip() + } + + g := new(GitGetter) + dst := tempFile(t) + + // Git doesn't allow nested ".git" directories so we do some hackiness + // here to get around that... + moduleDir := filepath.Join(fixtureDir, "basic-git") + oldName := filepath.Join(moduleDir, "DOTgit") + newName := filepath.Join(moduleDir, ".git") + if err := os.Rename(oldName, newName); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Rename(newName, oldName) + + // Download + if err := g.GetFile(dst, testModuleURL("basic-git/foo.txt")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + if _, err := os.Stat(dst); err != nil { + t.Fatalf("err: %s", err) + } + assertContents(t, dst, "Hello\n") +} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go new file mode 100644 index 000000000..eb170d100 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -0,0 +1,122 @@ +package getter + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// HgGetter is a Getter implementation that will download a module from +// a Mercurial repository. +type HgGetter struct{} + +func (g *HgGetter) Get(dst string, u *url.URL) error { + if _, err := exec.LookPath("hg"); err != nil { + return fmt.Errorf("hg must be available and on the PATH") + } + + newURL, err := urlhelper.Parse(u.String()) + if err != nil { + return err + } + if fixWindowsDrivePath(newURL) { + // See valid file path form on http://www.selenic.com/hg/help/urls + newURL.Path = fmt.Sprintf("/%s", newURL.Path) + } + + // Extract some query parameters we use + var rev string + q := newURL.Query() + if len(q) > 0 { + rev = q.Get("rev") + q.Del("rev") + + newURL.RawQuery = q.Encode() + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err != nil { + if err := g.clone(dst, newURL); err != nil { + return err + } + } + + if err := g.pull(dst, newURL); err != nil { + return err + } + + return g.update(dst, newURL, rev) +} + +// GetFile for Hg doesn't support updating at this time. It will download +// the file every time. +func (g *HgGetter) GetFile(dst string, u *url.URL) error { + td, err := ioutil.TempDir("", "getter-hg") + if err != nil { + return err + } + if err := os.RemoveAll(td); err != nil { + return err + } + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.Dir(u.Path) + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *HgGetter) clone(dst string, u *url.URL) error { + cmd := exec.Command("hg", "clone", "-U", u.String(), dst) + return getRunCommand(cmd) +} + +func (g *HgGetter) pull(dst string, u *url.URL) error { + cmd := exec.Command("hg", "pull") + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *HgGetter) update(dst string, u *url.URL, rev string) error { + args := []string{"update"} + if rev != "" { + args = append(args, rev) + } + + cmd := exec.Command("hg", args...) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func fixWindowsDrivePath(u *url.URL) bool { + // hg assumes a file:/// prefix for Windows drive letter file paths. + // (e.g. file:///c:/foo/bar) + // If the URL Path does not begin with a '/' character, the resulting URL + // path will have a file:// prefix. (e.g. file://c:/foo/bar) + // See http://www.selenic.com/hg/help/urls and the examples listed in + // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 + return runtime.GOOS == "windows" && u.Scheme == "file" && + len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' +} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg_test.go b/vendor/github.com/hashicorp/go-getter/get_hg_test.go new file mode 100644 index 000000000..9a5ce5db7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_hg_test.go @@ -0,0 +1,98 @@ +package getter + +import ( + "os" + "os/exec" + "path/filepath" + "testing" +) + +var testHasHg bool + +func init() { + if _, err := exec.LookPath("hg"); err == nil { + testHasHg = true + } +} + +func TestHgGetter_impl(t *testing.T) { + var _ Getter = new(HgGetter) +} + +func TestHgGetter(t *testing.T) { + if !testHasHg { + t.Log("hg not found, skipping") + t.Skip() + } + + g := new(HgGetter) + dst := tempDir(t) + + // With a dir that doesn't exist + if err := g.Get(dst, testModuleURL("basic-hg")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestHgGetter_branch(t *testing.T) { + if !testHasHg { + t.Log("hg not found, skipping") + t.Skip() + } + + g := new(HgGetter) + dst := tempDir(t) + + url := testModuleURL("basic-hg") + q := url.Query() + q.Add("rev", "test-branch") + url.RawQuery = q.Encode() + + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main_branch.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } + + // Get again should work + if err := g.Get(dst, url); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath = filepath.Join(dst, "main_branch.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestHgGetter_GetFile(t *testing.T) { + if !testHasHg { + t.Log("hg not found, skipping") + t.Skip() + } + + g := new(HgGetter) + dst := tempFile(t) + + // Download + if err := g.GetFile(dst, testModuleURL("basic-hg/foo.txt")); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + if _, err := os.Stat(dst); err != nil { + t.Fatalf("err: %s", err) + } + assertContents(t, dst, "Hello\n") +} diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go new file mode 100644 index 000000000..47b3e7e42 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -0,0 +1,201 @@ +package getter + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" +) + +// HttpGetter is a Getter implementation that will download from an HTTP +// endpoint. +// +// For file downloads, HTTP is used directly. +// +// The protocol for downloading a directory from an HTTP endpoing is as follows: +// +// An HTTP GET request is made to the URL with the additional GET parameter +// "terraform-get=1". This lets you handle that scenario specially if you +// wish. The response must be a 2xx. +// +// First, a header is looked for "X-Terraform-Get" which should contain +// a source URL to download. +// +// If the header is not present, then a meta tag is searched for named +// "terraform-get" and the content should be a source URL. +// +// The source URL, whether from the header or meta tag, must be a fully +// formed URL. The shorthand syntax of "github.com/foo/bar" or relative +// paths are not allowed. +type HttpGetter struct{} + +func (g *HttpGetter) Get(dst string, u *url.URL) error { + // Copy the URL so we can modify it + var newU url.URL = *u + u = &newU + + // Add terraform-get to the parameter. + q := u.Query() + q.Add("terraform-get", "1") + u.RawQuery = q.Encode() + + // Get the URL + resp, err := http.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Extract the source URL + var source string + if v := resp.Header.Get("X-Terraform-Get"); v != "" { + source = v + } else { + source, err = g.parseMeta(resp.Body) + if err != nil { + return err + } + } + if source == "" { + return fmt.Errorf("no source URL was returned") + } + + // If there is a subdir component, then we download the root separately + // into a temporary directory, then copy over the proper subdir. + source, subDir := SourceDirSubdir(source) + if subDir == "" { + return Get(dst, source) + } + + // We have a subdir, time to jump some hoops + return g.getSubdir(dst, source, subDir) +} + +func (g *HttpGetter) GetFile(dst string, u *url.URL) error { + resp, err := http.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} + +// getSubdir downloads the source into the destination, but with +// the proper subdir. +func (g *HttpGetter) getSubdir(dst, source, subDir string) error { + // Create a temporary directory to store the full source + td, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + defer os.RemoveAll(td) + + // Download that into the given directory + if err := Get(td, source); err != nil { + return err + } + + // Make sure the subdir path actually exists + sourcePath := filepath.Join(td, subDir) + if _, err := os.Stat(sourcePath); err != nil { + return fmt.Errorf( + "Error downloading %s: %s", source, err) + } + + // Copy the subdirectory into our actual destination. + if err := os.RemoveAll(dst); err != nil { + return err + } + + // Make the final destination + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + return copyDir(dst, sourcePath, false) +} + +// parseMeta looks for the first meta tag in the given reader that +// will give us the source URL. +func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { + d := xml.NewDecoder(r) + d.CharsetReader = charsetReader + d.Strict = false + var err error + var t xml.Token + for { + t, err = d.Token() + if err != nil { + if err == io.EOF { + err = nil + } + return "", err + } + if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { + return "", nil + } + if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { + return "", nil + } + e, ok := t.(xml.StartElement) + if !ok || !strings.EqualFold(e.Name.Local, "meta") { + continue + } + if attrValue(e.Attr, "name") != "terraform-get" { + continue + } + if f := attrValue(e.Attr, "content"); f != "" { + return f, nil + } + } +} + +// attrValue returns the attribute value for the case-insensitive key +// `name', or the empty string if nothing is found. +func attrValue(attrs []xml.Attr, name string) string { + for _, a := range attrs { + if strings.EqualFold(a.Name.Local, name) { + return a.Value + } + } + return "" +} + +// charsetReader returns a reader for the given charset. Currently +// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful +// error which is printed by go get, so the user can find why the package +// wasn't downloaded if the encoding is not supported. Note that, in +// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters +// greater than 0x7f are not rejected). +func charsetReader(charset string, input io.Reader) (io.Reader, error) { + switch strings.ToLower(charset) { + case "ascii": + return input, nil + default: + return nil, fmt.Errorf("can't decode XML document using charset %q", charset) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/get_http_test.go b/vendor/github.com/hashicorp/go-getter/get_http_test.go new file mode 100644 index 000000000..417523164 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_http_test.go @@ -0,0 +1,184 @@ +package getter + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "testing" +) + +func TestHttpGetter_impl(t *testing.T) { + var _ Getter = new(HttpGetter) +} + +func TestHttpGetter_header(t *testing.T) { + ln := testHttpServer(t) + defer ln.Close() + + g := new(HttpGetter) + dst := tempDir(t) + + var u url.URL + u.Scheme = "http" + u.Host = ln.Addr().String() + u.Path = "/header" + + // Get it! + if err := g.Get(dst, &u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestHttpGetter_meta(t *testing.T) { + ln := testHttpServer(t) + defer ln.Close() + + g := new(HttpGetter) + dst := tempDir(t) + + var u url.URL + u.Scheme = "http" + u.Host = ln.Addr().String() + u.Path = "/meta" + + // Get it! + if err := g.Get(dst, &u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestHttpGetter_metaSubdir(t *testing.T) { + ln := testHttpServer(t) + defer ln.Close() + + g := new(HttpGetter) + dst := tempDir(t) + + var u url.URL + u.Scheme = "http" + u.Host = ln.Addr().String() + u.Path = "/meta-subdir" + + // Get it! + if err := g.Get(dst, &u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "sub.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestHttpGetter_none(t *testing.T) { + ln := testHttpServer(t) + defer ln.Close() + + g := new(HttpGetter) + dst := tempDir(t) + + var u url.URL + u.Scheme = "http" + u.Host = ln.Addr().String() + u.Path = "/none" + + // Get it! + if err := g.Get(dst, &u); err == nil { + t.Fatal("should error") + } +} + +func TestHttpGetter_file(t *testing.T) { + ln := testHttpServer(t) + defer ln.Close() + + g := new(HttpGetter) + dst := tempFile(t) + + var u url.URL + u.Scheme = "http" + u.Host = ln.Addr().String() + u.Path = "/file" + + // Get it! + if err := g.GetFile(dst, &u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + if _, err := os.Stat(dst); err != nil { + t.Fatalf("err: %s", err) + } + assertContents(t, dst, "Hello\n") +} + +func testHttpServer(t *testing.T) net.Listener { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + mux := http.NewServeMux() + mux.HandleFunc("/file", testHttpHandlerFile) + mux.HandleFunc("/header", testHttpHandlerHeader) + mux.HandleFunc("/meta", testHttpHandlerMeta) + mux.HandleFunc("/meta-subdir", testHttpHandlerMetaSubdir) + + var server http.Server + server.Handler = mux + go server.Serve(ln) + + return ln +} + +func testHttpHandlerFile(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Hello\n")) +} + +func testHttpHandlerHeader(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Terraform-Get", testModuleURL("basic").String()) + w.WriteHeader(200) +} + +func testHttpHandlerMeta(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic").String()))) +} + +func testHttpHandlerMetaSubdir(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic//subdir").String()))) +} + +func testHttpHandlerNone(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(testHttpNoneStr)) +} + +const testHttpMetaStr = ` + + + + + +` + +const testHttpNoneStr = ` + + + + +` diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go new file mode 100644 index 000000000..a7d3d3052 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -0,0 +1,45 @@ +package getter + +import ( + "net/url" +) + +// MockGetter is an implementation of Getter that can be used for tests. +type MockGetter struct { + // Proxy, if set, will be called after recording the calls below. + // If it isn't set, then the *Err values will be returned. + Proxy Getter + + GetCalled bool + GetDst string + GetURL *url.URL + GetErr error + + GetFileCalled bool + GetFileDst string + GetFileURL *url.URL + GetFileErr error +} + +func (g *MockGetter) Get(dst string, u *url.URL) error { + g.GetCalled = true + g.GetDst = dst + g.GetURL = u + + if g.Proxy != nil { + return g.Proxy.Get(dst, u) + } + + return g.GetErr +} + +func (g *MockGetter) GetFile(dst string, u *url.URL) error { + g.GetFileCalled = true + g.GetFileDst = dst + g.GetFileURL = u + + if g.Proxy != nil { + return g.Proxy.GetFile(dst, u) + } + return g.GetFileErr +} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go new file mode 100644 index 000000000..6014fc797 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -0,0 +1,194 @@ +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3Getter is a Getter implementation that will download a module from +// a S3 bucket. +type S3Getter struct{} + +func (g *S3Getter) Get(dst string, u *url.URL) error { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + + // List files in path, keep listing until no more objects are found + lastMarker := "" + hasMore := true + for hasMore { + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + if lastMarker != "" { + req.Marker = aws.String(lastMarker) + } + + resp, err := client.ListObjects(req) + if err != nil { + return err + } + + hasMore = aws.BoolValue(resp.IsTruncated) + + // Get each object storing each file relative to the destination path + for _, object := range resp.Contents { + lastMarker = aws.StringValue(object.Key) + objPath := aws.StringValue(object.Key) + + // If the key ends with a backslash assume it is a directory and ignore + if strings.HasSuffix(objPath, "/") { + continue + } + + // Get the object destination path + objDst, err := filepath.Rel(path, objPath) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + + if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil { + return err + } + } + } + + return nil +} + +func (g *S3Getter) GetFile(dst string, u *url.URL) error { + region, bucket, path, version, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + return g.getObject(client, dst, bucket, path, version) +} + +func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { + req := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if version != "" { + req.VersionId = aws.String(version) + } + + resp, err := client.GetObject(req) + if err != nil { + return err + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} + +func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { + conf := &aws.Config{} + if creds == nil { + creds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ExpiryWindow: 5 * time.Minute}, + }) + } + + conf.Credentials = creds + if region != "" { + conf.Region = aws.String(region) + } + + return conf +} + +func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } + + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + + _, hasAwsId := u.Query()["aws_access_key_id"] + _, hasAwsSecret := u.Query()["aws_access_key_secret"] + _, hasAwsToken := u.Query()["aws_access_token"] + if hasAwsId || hasAwsSecret || hasAwsToken { + creds = credentials.NewStaticCredentials( + u.Query().Get("aws_access_key_id"), + u.Query().Get("aws_access_key_secret"), + u.Query().Get("aws_access_token"), + ) + } + + return +} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3_test.go b/vendor/github.com/hashicorp/go-getter/get_s3_test.go new file mode 100644 index 000000000..dd8b30351 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_s3_test.go @@ -0,0 +1,108 @@ +package getter + +import ( + "os" + "path/filepath" + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +func init() { + // These are well known restricted IAM keys to a HashiCorp-managed bucket + // in a private AWS account that only has access to the open source test + // resources. + // + // We do the string concat below to avoid AWS autodetection of a key. This + // key is locked down an IAM policy that is read-only so we're purposely + // exposing it. + os.Setenv("AWS_ACCESS_KEY", "AKIAJCTNQ" + "IOBWAYXKGZA") + os.Setenv("AWS_SECRET_KEY", "jcQOTYdXNzU5MO" + "5ExqbE1U995dIfKCKQtiVobMvr") +} + +func TestS3Getter_impl(t *testing.T) { + var _ Getter = new(S3Getter) +} + +func TestS3Getter(t *testing.T) { + g := new(S3Getter) + dst := tempDir(t) + + // With a dir that doesn't exist + err := g.Get( + dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder")) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestS3Getter_subdir(t *testing.T) { + g := new(S3Getter) + dst := tempDir(t) + + // With a dir that doesn't exist + err := g.Get( + dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/subfolder")) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + subPath := filepath.Join(dst, "sub.tf") + if _, err := os.Stat(subPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestS3Getter_GetFile(t *testing.T) { + g := new(S3Getter) + dst := tempFile(t) + + // Download + err := g.GetFile( + dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf")) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + if _, err := os.Stat(dst); err != nil { + t.Fatalf("err: %s", err) + } + assertContents(t, dst, "# Main\n") +} + +func TestS3Getter_GetFile_badParams(t *testing.T) { + g := new(S3Getter) + dst := tempFile(t) + + // Download + err := g.GetFile( + dst, + testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf?aws_access_key_id=foo&aws_access_key_secret=bar&aws_access_token=baz")) + if err == nil { + t.Fatalf("expected error, got none") + } + + if reqerr, ok := err.(awserr.RequestFailure); !ok || reqerr.StatusCode() != 403 { + t.Fatalf("expected InvalidAccessKeyId error") + } +} + +func TestS3Getter_GetFile_notfound(t *testing.T) { + g := new(S3Getter) + dst := tempFile(t) + + // Download + err := g.GetFile( + dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/404.tf")) + if err == nil { + t.Fatalf("expected error, got none") + } +} diff --git a/vendor/github.com/hashicorp/go-getter/get_test.go b/vendor/github.com/hashicorp/go-getter/get_test.go new file mode 100644 index 000000000..fc758df54 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_test.go @@ -0,0 +1,245 @@ +package getter + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +func TestGet_badSchema(t *testing.T) { + dst := tempDir(t) + u := testModule("basic") + u = strings.Replace(u, "file", "nope", -1) + + if err := Get(dst, u); err == nil { + t.Fatal("should error") + } +} + +func TestGet_file(t *testing.T) { + dst := tempDir(t) + u := testModule("basic") + + if err := Get(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGet_fileDetect(t *testing.T) { + dst := tempDir(t) + u := filepath.Join("./test-fixtures", "basic") + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + + client := &Client{ + Src: u, + Dst: dst, + Pwd: pwd, + Dir: true, + } + + if err := client.Get(); err != nil { + t.Fatalf("err: %s", err) + } + + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGet_fileForced(t *testing.T) { + dst := tempDir(t) + u := testModule("basic") + u = "file::" + u + + if err := Get(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGet_fileSubdir(t *testing.T) { + dst := tempDir(t) + u := testModule("basic//subdir") + + if err := Get(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + mainPath := filepath.Join(dst, "sub.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGet_archive(t *testing.T) { + dst := tempDir(t) + u := filepath.Join("./test-fixtures", "archive.tar.gz") + u, _ = filepath.Abs(u) + + if err := Get(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + mainPath := filepath.Join(dst, "main.tf") + if _, err := os.Stat(mainPath); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestGetFile(t *testing.T) { + dst := tempFile(t) + u := testModule("basic-file/foo.txt") + + if err := GetFile(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + assertContents(t, dst, "Hello\n") +} + +func TestGetFile_archive(t *testing.T) { + dst := tempFile(t) + u := testModule("basic-file-archive/archive.tar.gz") + + if err := GetFile(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + assertContents(t, dst, "Hello\n") +} + +func TestGetFile_archiveChecksum(t *testing.T) { + dst := tempFile(t) + u := testModule( + "basic-file-archive/archive.tar.gz?checksum=md5:fbd90037dacc4b1ab40811d610dde2f0") + + if err := GetFile(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + assertContents(t, dst, "Hello\n") +} + +func TestGetFile_archiveNoUnarchive(t *testing.T) { + dst := tempFile(t) + u := testModule("basic-file-archive/archive.tar.gz") + u += "?archive=false" + + if err := GetFile(dst, u); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the main file exists + actual := testMD5(t, dst) + expected := "fbd90037dacc4b1ab40811d610dde2f0" + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGetFile_checksum(t *testing.T) { + cases := []struct { + Append string + Err bool + }{ + { + "", + false, + }, + + // MD5 + { + "?checksum=md5:09f7e02f1290be211da707a266f153b3", + false, + }, + { + "?checksum=md5:09f7e02f1290be211da707a266f153b4", + true, + }, + + // SHA1 + { + "?checksum=sha1:1d229271928d3f9e2bb0375bd6ce5db6c6d348d9", + false, + }, + { + "?checksum=sha1:1d229271928d3f9e2bb0375bd6ce5db6c6d348d0", + true, + }, + + // SHA256 + { + "?checksum=sha256:66a045b452102c59d840ec097d59d9467e13a3f34f6494e539ffd32c1bb35f18", + false, + }, + { + "?checksum=sha256:66a045b452102c59d840ec097d59d9467e13a3f34f6494e539ffd32c1bb35f19", + true, + }, + + // SHA512 + { + "?checksum=sha512:c2bad2223811194582af4d1508ac02cd69eeeeedeeb98d54fcae4dcefb13cc882e7640328206603d3fb9cd5f949a9be0db054dd34fbfa190c498a5fe09750cef", + false, + }, + { + "?checksum=sha512:c2bad2223811194582af4d1508ac02cd69eeeeedeeb98d54fcae4dcefb13cc882e7640328206603d3fb9cd5f949a9be0db054dd34fbfa190c498a5fe09750ced", + true, + }, + } + + for _, tc := range cases { + u := testModule("basic-file/foo.txt") + tc.Append + + func() { + dst := tempFile(t) + defer os.Remove(dst) + if err := GetFile(dst, u); (err != nil) != tc.Err { + t.Fatalf("append: %s\n\nerr: %s", tc.Append, err) + } + + // Verify the main file exists + assertContents(t, dst, "Hello\n") + }() + } +} + +func TestGetFile_checksumURL(t *testing.T) { + dst := tempFile(t) + u := testModule("basic-file/foo.txt") + "?checksum=md5:09f7e02f1290be211da707a266f153b3" + + getter := &MockGetter{Proxy: new(FileGetter)} + client := &Client{ + Src: u, + Dst: dst, + Dir: false, + Getters: map[string]Getter{ + "file": getter, + }, + } + + if err := client.Get(); err != nil { + t.Fatalf("err: %s", err) + } + + if v := getter.GetFileURL.Query().Get("checksum"); v != "" { + t.Fatalf("bad: %s", v) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go new file mode 100644 index 000000000..02497c254 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url.go @@ -0,0 +1,14 @@ +package url + +import ( + "net/url" +) + +// Parse parses rawURL into a URL structure. +// The rawURL may be relative or absolute. +// +// Parse is a wrapper for the Go stdlib net/url Parse function, but returns +// Windows "safe" URLs on Windows platforms. +func Parse(rawURL string) (*url.URL, error) { + return parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_test.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_test.go new file mode 100644 index 000000000..ce3226b4b --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_test.go @@ -0,0 +1,88 @@ +package url + +import ( + "runtime" + "testing" +) + +type parseTest struct { + rawURL string + scheme string + host string + path string + str string + err bool +} + +var parseTests = []parseTest{ + { + rawURL: "/foo/bar", + scheme: "", + host: "", + path: "/foo/bar", + str: "/foo/bar", + err: false, + }, + { + rawURL: "file:///dir/", + scheme: "file", + host: "", + path: "/dir/", + str: "file:///dir/", + err: false, + }, +} + +var winParseTests = []parseTest{ + { + rawURL: `C:\`, + scheme: ``, + host: ``, + path: `C:/`, + str: `C:/`, + err: false, + }, + { + rawURL: `file://C:\`, + scheme: `file`, + host: ``, + path: `C:/`, + str: `file://C:/`, + err: false, + }, + { + rawURL: `file:///C:\`, + scheme: `file`, + host: ``, + path: `C:/`, + str: `file://C:/`, + err: false, + }, +} + +func TestParse(t *testing.T) { + if runtime.GOOS == "windows" { + parseTests = append(parseTests, winParseTests...) + } + for i, pt := range parseTests { + url, err := Parse(pt.rawURL) + if err != nil && !pt.err { + t.Errorf("test %d: unexpected error: %s", i, err) + } + if err == nil && pt.err { + t.Errorf("test %d: expected an error", i) + } + if url.Scheme != pt.scheme { + t.Errorf("test %d: expected Scheme = %q, got %q", i, pt.scheme, url.Scheme) + } + if url.Host != pt.host { + t.Errorf("test %d: expected Host = %q, got %q", i, pt.host, url.Host) + } + if url.Path != pt.path { + t.Errorf("test %d: expected Path = %q, got %q", i, pt.path, url.Path) + } + if url.String() != pt.str { + t.Errorf("test %d: expected url.String() = %q, got %q", i, pt.str, url.String()) + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go new file mode 100644 index 000000000..ed1352a91 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package url + +import ( + "net/url" +) + +func parse(rawURL string) (*url.URL, error) { + return url.Parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go new file mode 100644 index 000000000..4655226f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go @@ -0,0 +1,40 @@ +package url + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" +) + +func parse(rawURL string) (*url.URL, error) { + // Make sure we're using "/" since URLs are "/"-based. + rawURL = filepath.ToSlash(rawURL) + + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Scheme, and the rest of the path + // has been parsed into the URL Path without the leading ':' character. + u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path) + u.Scheme = "" + } + + if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Host. + u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) + u.Host = "" + } + + // Remove leading slash for absolute file paths. + if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { + u.Path = u.Path[1:] + } + + return u, err +} diff --git a/vendor/github.com/hashicorp/go-getter/module_test.go b/vendor/github.com/hashicorp/go-getter/module_test.go new file mode 100644 index 000000000..332f34ef0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/module_test.go @@ -0,0 +1,83 @@ +package getter + +import ( + "io/ioutil" + "net/url" + "os" + "path/filepath" + "reflect" + "testing" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/terraform/config" +) + +const fixtureDir = "./test-fixtures" + +func tempDir(t *testing.T) string { + dir, err := ioutil.TempDir("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.RemoveAll(dir); err != nil { + t.Fatalf("err: %s", err) + } + + return dir +} + +func tempFile(t *testing.T) string { + dir := tempDir(t) + return filepath.Join(dir, "foo") +} + +func testConfig(t *testing.T, n string) *config.Config { + c, err := config.LoadDir(filepath.Join(fixtureDir, n)) + if err != nil { + t.Fatalf("err: %s", err) + } + + return c +} + +func testModule(n string) string { + p := filepath.Join(fixtureDir, n) + p, err := filepath.Abs(p) + if err != nil { + panic(err) + } + return fmtFileURL(p) +} + +func testModuleURL(n string) *url.URL { + u, err := urlhelper.Parse(testModule(n)) + if err != nil { + panic(err) + } + + return u +} + +func testURL(s string) *url.URL { + u, err := urlhelper.Parse(s) + if err != nil { + panic(err) + } + + return u +} + +func testStorage(t *testing.T) Storage { + return &FolderStorage{StorageDir: tempDir(t)} +} + +func assertContents(t *testing.T, path string, contents string) { + data, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(data, []byte(contents)) { + t.Fatalf("bad. expected:\n\n%s\n\nGot:\n\n%s", contents, string(data)) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go new file mode 100644 index 000000000..4d5ee3cce --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -0,0 +1,36 @@ +package getter + +import ( + "strings" +) + +// SourceDirSubdir takes a source and returns a tuple of the URL without +// the subdir and the URL with the subdir. +func SourceDirSubdir(src string) (string, string) { + // Calcaulate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src, "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} diff --git a/vendor/github.com/hashicorp/go-getter/source_test.go b/vendor/github.com/hashicorp/go-getter/source_test.go new file mode 100644 index 000000000..ca97ab533 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/source_test.go @@ -0,0 +1,39 @@ +package getter + +import ( + "testing" +) + +func TestSourceDirSubdir(t *testing.T) { + cases := []struct { + Input string + Dir, Sub string + }{ + { + "hashicorp.com", + "hashicorp.com", "", + }, + { + "hashicorp.com//foo", + "hashicorp.com", "foo", + }, + { + "hashicorp.com//foo?bar=baz", + "hashicorp.com?bar=baz", "foo", + }, + { + "file://foo//bar", + "file://foo", "bar", + }, + } + + for i, tc := range cases { + adir, asub := SourceDirSubdir(tc.Input) + if adir != tc.Dir { + t.Fatalf("%d: bad dir: %#v", i, adir) + } + if asub != tc.Sub { + t.Fatalf("%d: bad sub: %#v", i, asub) + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go new file mode 100644 index 000000000..2bc6b9ec3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/storage.go @@ -0,0 +1,13 @@ +package getter + +// Storage is an interface that knows how to lookup downloaded directories +// as well as download and update directories from their sources into the +// proper location. +type Storage interface { + // Dir returns the directory on local disk where the directory source + // can be loaded from. + Dir(string) (string, bool, error) + + // Get will download and optionally update the given directory. + Get(string, string, bool) error +} diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/archive.tar.gz b/vendor/github.com/hashicorp/go-getter/test-fixtures/archive.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..999e5fc9d6974cb108aa94abadac6e2aecfac0dc GIT binary patch literal 141 zcmb2|=3qGQG&PKY`RxTqu4V_G*2L?!u6_YV$#X3GJ=j^-Ug-NCzIsRYLT=60|F>wZ`SUo>_I@-\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" + +# This example catches duplicate Signed-off-by lines. + +test "" = "$(grep '^Signed-off-by: ' "$1" | + sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { + echo >&2 Duplicate Signed-off-by lines. + exit 1 +} diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/post-update.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/post-update.sample new file mode 100644 index 000000000..ec17ec193 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/post-update.sample @@ -0,0 +1,8 @@ +#!/bin/sh +# +# An example hook script to prepare a packed repository for use over +# dumb transports. +# +# To enable this hook, rename this file to "post-update". + +exec git update-server-info diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-applypatch.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-applypatch.sample new file mode 100644 index 000000000..b1f187c2e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-applypatch.sample @@ -0,0 +1,14 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed +# by applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-applypatch". + +. git-sh-setup +test -x "$GIT_DIR/hooks/pre-commit" && + exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"} +: diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-commit.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-commit.sample new file mode 100644 index 000000000..68d62d544 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-commit.sample @@ -0,0 +1,49 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git commit" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message if +# it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-commit". + +if git rev-parse --verify HEAD >/dev/null 2>&1 +then + against=HEAD +else + # Initial commit: diff against an empty tree object + against=4b825dc642cb6eb9a060e54bf8d69288fbee4904 +fi + +# If you want to allow non-ASCII filenames set this variable to true. +allownonascii=$(git config --bool hooks.allownonascii) + +# Redirect output to stderr. +exec 1>&2 + +# Cross platform projects tend to avoid non-ASCII filenames; prevent +# them from being added to the repository. We exploit the fact that the +# printable range starts at the space character and ends with tilde. +if [ "$allownonascii" != "true" ] && + # Note that the use of brackets around a tr range is ok here, (it's + # even required, for portability to Solaris 10's /usr/bin/tr), since + # the square bracket bytes happen to fall in the designated range. + test $(git diff --cached --name-only --diff-filter=A -z $against | + LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 +then + cat <<\EOF +Error: Attempt to add a non-ASCII file name. + +This can cause problems if you want to work with people on other platforms. + +To be portable it is advisable to rename the file. + +If you know what you are doing you can disable this check using: + + git config hooks.allownonascii true +EOF + exit 1 +fi + +# If there are whitespace errors, print the offending file names and fail. +exec git diff-index --check --cached $against -- diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-push.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-push.sample new file mode 100644 index 000000000..1f3bcebfd --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-push.sample @@ -0,0 +1,54 @@ +#!/bin/sh + +# An example hook script to verify what is about to be pushed. Called by "git +# push" after it has checked the remote status, but before anything has been +# pushed. If this script exits with a non-zero status nothing will be pushed. +# +# This hook is called with the following parameters: +# +# $1 -- Name of the remote to which the push is being done +# $2 -- URL to which the push is being done +# +# If pushing without using a named remote those arguments will be equal. +# +# Information about the commits which are being pushed is supplied as lines to +# the standard input in the form: +# +# +# +# This sample shows how to prevent push of commits where the log message starts +# with "WIP" (work in progress). + +remote="$1" +url="$2" + +z40=0000000000000000000000000000000000000000 + +IFS=' ' +while read local_ref local_sha remote_ref remote_sha +do + if [ "$local_sha" = $z40 ] + then + # Handle delete + : + else + if [ "$remote_sha" = $z40 ] + then + # New branch, examine all commits + range="$local_sha" + else + # Update to existing branch, examine new commits + range="$remote_sha..$local_sha" + fi + + # Check for WIP commit + commit=`git rev-list -n 1 --grep '^WIP' "$range"` + if [ -n "$commit" ] + then + echo "Found WIP commit in $local_ref, not pushing" + exit 1 + fi + fi +done + +exit 0 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-rebase.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-rebase.sample new file mode 100644 index 000000000..9773ed4cb --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/pre-rebase.sample @@ -0,0 +1,169 @@ +#!/bin/sh +# +# Copyright (c) 2006, 2008 Junio C Hamano +# +# The "pre-rebase" hook is run just before "git rebase" starts doing +# its job, and can prevent the command from running by exiting with +# non-zero status. +# +# The hook is called with the following parameters: +# +# $1 -- the upstream the series was forked from. +# $2 -- the branch being rebased (or empty when rebasing the current branch). +# +# This sample shows how to prevent topic branches that are already +# merged to 'next' branch from getting rebased, because allowing it +# would result in rebasing already published history. + +publish=next +basebranch="$1" +if test "$#" = 2 +then + topic="refs/heads/$2" +else + topic=`git symbolic-ref HEAD` || + exit 0 ;# we do not interrupt rebasing detached HEAD +fi + +case "$topic" in +refs/heads/??/*) + ;; +*) + exit 0 ;# we do not interrupt others. + ;; +esac + +# Now we are dealing with a topic branch being rebased +# on top of master. Is it OK to rebase it? + +# Does the topic really exist? +git show-ref -q "$topic" || { + echo >&2 "No such branch $topic" + exit 1 +} + +# Is topic fully merged to master? +not_in_master=`git rev-list --pretty=oneline ^master "$topic"` +if test -z "$not_in_master" +then + echo >&2 "$topic is fully merged to master; better remove it." + exit 1 ;# we could allow it, but there is no point. +fi + +# Is topic ever merged to next? If so you should not be rebasing it. +only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` +only_next_2=`git rev-list ^master ${publish} | sort` +if test "$only_next_1" = "$only_next_2" +then + not_in_topic=`git rev-list "^$topic" master` + if test -z "$not_in_topic" + then + echo >&2 "$topic is already up-to-date with master" + exit 1 ;# we could allow it, but there is no point. + else + exit 0 + fi +else + not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` + /usr/bin/perl -e ' + my $topic = $ARGV[0]; + my $msg = "* $topic has commits already merged to public branch:\n"; + my (%not_in_next) = map { + /^([0-9a-f]+) /; + ($1 => 1); + } split(/\n/, $ARGV[1]); + for my $elem (map { + /^([0-9a-f]+) (.*)$/; + [$1 => $2]; + } split(/\n/, $ARGV[2])) { + if (!exists $not_in_next{$elem->[0]}) { + if ($msg) { + print STDERR $msg; + undef $msg; + } + print STDERR " $elem->[1]\n"; + } + } + ' "$topic" "$not_in_next" "$not_in_master" + exit 1 +fi + +exit 0 + +################################################################ + +This sample hook safeguards topic branches that have been +published from being rewound. + +The workflow assumed here is: + + * Once a topic branch forks from "master", "master" is never + merged into it again (either directly or indirectly). + + * Once a topic branch is fully cooked and merged into "master", + it is deleted. If you need to build on top of it to correct + earlier mistakes, a new topic branch is created by forking at + the tip of the "master". This is not strictly necessary, but + it makes it easier to keep your history simple. + + * Whenever you need to test or publish your changes to topic + branches, merge them into "next" branch. + +The script, being an example, hardcodes the publish branch name +to be "next", but it is trivial to make it configurable via +$GIT_DIR/config mechanism. + +With this workflow, you would want to know: + +(1) ... if a topic branch has ever been merged to "next". Young + topic branches can have stupid mistakes you would rather + clean up before publishing, and things that have not been + merged into other branches can be easily rebased without + affecting other people. But once it is published, you would + not want to rewind it. + +(2) ... if a topic branch has been fully merged to "master". + Then you can delete it. More importantly, you should not + build on top of it -- other people may already want to + change things related to the topic as patches against your + "master", so if you need further changes, it is better to + fork the topic (perhaps with the same name) afresh from the + tip of "master". + +Let's look at this example: + + o---o---o---o---o---o---o---o---o---o "next" + / / / / + / a---a---b A / / + / / / / + / / c---c---c---c B / + / / / \ / + / / / b---b C \ / + / / / / \ / + ---o---o---o---o---o---o---o---o---o---o---o "master" + + +A, B and C are topic branches. + + * A has one fix since it was merged up to "next". + + * B has finished. It has been fully merged up to "master" and "next", + and is ready to be deleted. + + * C has not merged to "next" at all. + +We would want to allow C to be rebased, refuse A, and encourage +B to be deleted. + +To compute (1): + + git rev-list ^master ^topic next + git rev-list ^master next + + if these match, topic has not merged in next at all. + +To compute (2): + + git rev-list master..topic + + if this is empty, it is fully merged to "master". diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/prepare-commit-msg.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/prepare-commit-msg.sample new file mode 100644 index 000000000..f093a02ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/prepare-commit-msg.sample @@ -0,0 +1,36 @@ +#!/bin/sh +# +# An example hook script to prepare the commit log message. +# Called by "git commit" with the name of the file that has the +# commit message, followed by the description of the commit +# message's source. The hook's purpose is to edit the commit +# message file. If the hook fails with a non-zero status, +# the commit is aborted. +# +# To enable this hook, rename this file to "prepare-commit-msg". + +# This hook includes three examples. The first comments out the +# "Conflicts:" part of a merge commit. +# +# The second includes the output of "git diff --name-status -r" +# into the message, just before the "git status" output. It is +# commented because it doesn't cope with --amend or with squashed +# commits. +# +# The third example adds a Signed-off-by line to the message, that can +# still be edited. This is rarely a good idea. + +case "$2,$3" in + merge,) + /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;; + +# ,|template,) +# /usr/bin/perl -i.bak -pe ' +# print "\n" . `git diff --cached --name-status -r` +# if /^#/ && $first++ == 0' "$1" ;; + + *) ;; +esac + +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/update.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/update.sample new file mode 100644 index 000000000..d84758373 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/hooks/update.sample @@ -0,0 +1,128 @@ +#!/bin/sh +# +# An example hook script to blocks unannotated tags from entering. +# Called by "git receive-pack" with arguments: refname sha1-old sha1-new +# +# To enable this hook, rename this file to "update". +# +# Config +# ------ +# hooks.allowunannotated +# This boolean sets whether unannotated tags will be allowed into the +# repository. By default they won't be. +# hooks.allowdeletetag +# This boolean sets whether deleting tags will be allowed in the +# repository. By default they won't be. +# hooks.allowmodifytag +# This boolean sets whether a tag may be modified after creation. By default +# it won't be. +# hooks.allowdeletebranch +# This boolean sets whether deleting branches will be allowed in the +# repository. By default they won't be. +# hooks.denycreatebranch +# This boolean sets whether remotely creating branches will be denied +# in the repository. By default this is allowed. +# + +# --- Command line +refname="$1" +oldrev="$2" +newrev="$3" + +# --- Safety check +if [ -z "$GIT_DIR" ]; then + echo "Don't run this script from the command line." >&2 + echo " (if you want, you could supply GIT_DIR then run" >&2 + echo " $0 )" >&2 + exit 1 +fi + +if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +# --- Config +allowunannotated=$(git config --bool hooks.allowunannotated) +allowdeletebranch=$(git config --bool hooks.allowdeletebranch) +denycreatebranch=$(git config --bool hooks.denycreatebranch) +allowdeletetag=$(git config --bool hooks.allowdeletetag) +allowmodifytag=$(git config --bool hooks.allowmodifytag) + +# check for no description +projectdesc=$(sed -e '1q' "$GIT_DIR/description") +case "$projectdesc" in +"Unnamed repository"* | "") + echo "*** Project description file hasn't been set" >&2 + exit 1 + ;; +esac + +# --- Check types +# if $newrev is 0000...0000, it's a commit to delete a ref. +zero="0000000000000000000000000000000000000000" +if [ "$newrev" = "$zero" ]; then + newrev_type=delete +else + newrev_type=$(git cat-file -t $newrev) +fi + +case "$refname","$newrev_type" in + refs/tags/*,commit) + # un-annotated tag + short_refname=${refname##refs/tags/} + if [ "$allowunannotated" != "true" ]; then + echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2 + echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 + exit 1 + fi + ;; + refs/tags/*,delete) + # delete tag + if [ "$allowdeletetag" != "true" ]; then + echo "*** Deleting a tag is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/tags/*,tag) + # annotated tag + if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 + then + echo "*** Tag '$refname' already exists." >&2 + echo "*** Modifying a tag is not allowed in this repository." >&2 + exit 1 + fi + ;; + refs/heads/*,commit) + # branch + if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then + echo "*** Creating a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/heads/*,delete) + # delete branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/remotes/*,commit) + # tracking branch + ;; + refs/remotes/*,delete) + # delete tracking branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a tracking branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + *) + # Anything else (is there anything else?) + echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 + exit 1 + ;; +esac + +# --- Finished +exit 0 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/index b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/index new file mode 100644 index 0000000000000000000000000000000000000000..9586d960fc3943b669e8356b989d09f4f0166a34 GIT binary patch literal 320 zcmZ?q402{*U|<4b<}le-b|B3FqZt_(SQw;__A)RuE@5C`{0fv30b;h7sVr63k5;5M z+0@R|mSxFXmUq8`fjuohU$3O11f*ZaE(l05gb7w12GL-1q0U@%rNLeE 1410850637 -0700 commit (initial): A commit +497bc37401eb3c9b11865b1768725b64066eccee 243f0fc5c4e586d1a3daa54c981b6f34e9ab1085 Mitchell Hashimoto 1410886526 -0700 commit: tag1 +243f0fc5c4e586d1a3daa54c981b6f34e9ab1085 1f31e97f053caeb5d6b7bffa3faf82941c99efa2 Mitchell Hashimoto 1410886536 -0700 commit: remove tag1 +1f31e97f053caeb5d6b7bffa3faf82941c99efa2 1f31e97f053caeb5d6b7bffa3faf82941c99efa2 Mitchell Hashimoto 1410886909 -0700 checkout: moving from master to test-branch +1f31e97f053caeb5d6b7bffa3faf82941c99efa2 7b7614f8759ac8b5e4b02be65ad8e2667be6dd87 Mitchell Hashimoto 1410886913 -0700 commit: Branch +7b7614f8759ac8b5e4b02be65ad8e2667be6dd87 1f31e97f053caeb5d6b7bffa3faf82941c99efa2 Mitchell Hashimoto 1410886916 -0700 checkout: moving from test-branch to master +1f31e97f053caeb5d6b7bffa3faf82941c99efa2 146492b04efe0aae2b8288c5c0aef6a951030fde Mitchell Hashimoto 1411767116 -0700 commit: add subdir +146492b04efe0aae2b8288c5c0aef6a951030fde 25851303ab930fdacfdb7df91adbf626a483df48 Mitchell Hashimoto 1444775178 -0700 commit: add a file diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/master b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/master new file mode 100644 index 000000000..ca4c77dd8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/master @@ -0,0 +1,5 @@ +0000000000000000000000000000000000000000 497bc37401eb3c9b11865b1768725b64066eccee Mitchell Hashimoto 1410850637 -0700 commit (initial): A commit +497bc37401eb3c9b11865b1768725b64066eccee 243f0fc5c4e586d1a3daa54c981b6f34e9ab1085 Mitchell Hashimoto 1410886526 -0700 commit: tag1 +243f0fc5c4e586d1a3daa54c981b6f34e9ab1085 1f31e97f053caeb5d6b7bffa3faf82941c99efa2 Mitchell Hashimoto 1410886536 -0700 commit: remove tag1 +1f31e97f053caeb5d6b7bffa3faf82941c99efa2 146492b04efe0aae2b8288c5c0aef6a951030fde Mitchell Hashimoto 1411767116 -0700 commit: add subdir +146492b04efe0aae2b8288c5c0aef6a951030fde 25851303ab930fdacfdb7df91adbf626a483df48 Mitchell Hashimoto 1444775178 -0700 commit: add a file diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/test-branch b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/test-branch new file mode 100644 index 000000000..937067a2a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/logs/refs/heads/test-branch @@ -0,0 +1,2 @@ +0000000000000000000000000000000000000000 1f31e97f053caeb5d6b7bffa3faf82941c99efa2 Mitchell Hashimoto 1410886909 -0700 branch: Created from HEAD +1f31e97f053caeb5d6b7bffa3faf82941c99efa2 7b7614f8759ac8b5e4b02be65ad8e2667be6dd87 Mitchell Hashimoto 1410886913 -0700 commit: Branch diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/14/6492b04efe0aae2b8288c5c0aef6a951030fde b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/14/6492b04efe0aae2b8288c5c0aef6a951030fde new file mode 100644 index 0000000000000000000000000000000000000000..2a713ec7cf4938e12e84fc7f0aa6231c45c9a946 GIT binary patch literal 170 zcmV;b09F5Z0j^8 zP`O@wiXbUzWpDU7*Rk7Mo3iS**>8TVt&chE=W*a|=k?F7_s(rQ%jjPgA^;%)p#5;< YoH_I;rvA&A;Zp45nM>Qv3xnxYMYiKtt^fc4 literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/1d/3d6744266642cb7623e2c678c33c77b075c49f b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/1d/3d6744266642cb7623e2c678c33c77b075c49f new file mode 100644 index 0000000000000000000000000000000000000000..2518fd6ac51a5a3b76ebf290ea8866b81746de0c GIT binary patch literal 84 zcmV-a0IUCa0V^p=O;s?nU@$Z=Ff%bx$W6@5(<@11urNq2jQC!%i0{sU{W8An8}_#! qu{ALO0)^tzq?F7eh90TPQ}Q#mUEq)YdhE=+UK>u`%ew&du^oyiI4LLq literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/1f/31e97f053caeb5d6b7bffa3faf82941c99efa2 b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/1f/31e97f053caeb5d6b7bffa3faf82941c99efa2 new file mode 100644 index 0000000000000000000000000000000000000000..5793a840b76235dafcb144ed66d1b8d274ed76bf GIT binary patch literal 167 zcmV;Y09gNc0jde)V4 V5S{)q#u}CSCf;!s>;oX7Q=N++Phyo>KhY@))4ka?%IV3*@oJ1_}!Me>)YD$BHN~~{94vhTh*t2;A_M3v&+5kx(&IOvjz`l>`{yQXvi4V SwJ)0dC8iqRL45!N?@<9%)ls?t literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/25/851303ab930fdacfdb7df91adbf626a483df48 b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/25/851303ab930fdacfdb7df91adbf626a483df48 new file mode 100644 index 000000000..80de4b622 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/25/851303ab930fdacfdb7df91adbf626a483df48 @@ -0,0 +1 @@ +x­ÎK `לb.`3P(ãÒ‡`°$ELÅûKL¼Û/ù±ÕZ:(c}g†¤æÅ‘EíX²—ž9%Ež”5Aº}ÔÒIñ¤¤^´W5gF"VÁ)碉HœòFâŒ9± w_Û·ÒãÊÛWz­¥¶Þà4|mZv¹W*Û[=­­5Ò:8¢ECG¢ó_Ê¥¹l,>ÝÖT2 \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/38/30637158f774a20edcc0bf1c4d07b0bf87c43d b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/38/30637158f774a20edcc0bf1c4d07b0bf87c43d new file mode 100644 index 0000000000000000000000000000000000000000..ef8ebf7282975b2da2da3cfe6cef1477fcca395c GIT binary patch literal 59 zcmV-B0L1@z0ZYosPf{>3XHZt~NX^N~=iAXD~D{Ff%bx$W6@5(<@11urNq2jQC!%i0{sU{W8An8}_#! qu|-l6pH!5Xmz)7o`E2f^*_$q2bN24mTvr}-x_ê­Uƒ´“-gˆ³³‚&—çx»G EBöyA >ÅÅÅ/)}ƒk•TòºÂ…Ÿ¥¶.ü´©üÚéѸ®SêíÚéàl öHˆjÔqHþ ¦ÎðõÔ%øD‡ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/7b/7614f8759ac8b5e4b02be65ad8e2667be6dd87 b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/7b/7614f8759ac8b5e4b02be65ad8e2667be6dd87 new file mode 100644 index 000000000..abe281a74 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/7b/7614f8759ac8b5e4b02be65ad8e2667be6dd87 @@ -0,0 +1,2 @@ +x­ÎK +Â0…aÇYE6`Iš7ˆˆ#'.â&¹×šFbÜ¿EpN?ø'µZ—Ág££#r-´•>…l&`²²&éQdŠÑ€ò.YØ:nƒKRƒ#aTŒ&Ûè"(òsÐ2…€3ƒ÷(­óû2RÁuå7x•¥¶Ñøi?ðµ©üìò¨°¬SjõÌ¥–Â{¤âGá„`»îÅÀ¿Œ±k‡-öÛS„ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/8c/1a79ca1f98b6d00f5bf5c6cc9e8d3c092dd3ba b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/8c/1a79ca1f98b6d00f5bf5c6cc9e8d3c092dd3ba new file mode 100644 index 0000000000000000000000000000000000000000..656ae8e33e7cb6dd2220cec705ebf707caad03c0 GIT binary patch literal 51 zcmbc HOYk88vc(gk literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/96/43088174e25a9bd91c27970a580af0085c9f32 b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/96/43088174e25a9bd91c27970a580af0085c9f32 new file mode 100644 index 0000000000000000000000000000000000000000..387943288da570c5373bc4f0cb7b8005536653e7 GIT binary patch literal 52 zcmV-40L%Y)0V^p=O;s>9WiT`_Ff%bx$W6@5(<@11urNq2jQC!%i0{sU{W8An8}_#! Ku>}Anzz?PeB^DI` literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/b7/757b6a3696ad036e9aa2f5b4856d09e7f17993 b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/b7/757b6a3696ad036e9aa2f5b4856d09e7f17993 new file mode 100644 index 0000000000000000000000000000000000000000..10192566594b6bfa1e9d04c558444006bb2f27fe GIT binary patch literal 82 zcmV-Y0ImOc0V^p=O;s>AWiT`_Ff%bx$W6@5(<@11urNq2jQC!%i0{sU{W8An8}_#! ou|-l6Uy_(^2vYZK?xWe8E?#r??$%sa9(Ci;lb+y-0LsiEdb9E;RsaA1 literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/d2/368a7048e1e919eebaad2a9a275b18cc9c4181 b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/d2/368a7048e1e919eebaad2a9a275b18cc9c4181 new file mode 100644 index 0000000000000000000000000000000000000000..10e19bed9bc0327ea3eea6891ed406b4c89da2b7 GIT binary patch literal 116 zcmV-)0E_>40V^p=O;s>7FlI0`FfcPQQAo?r*DI+gVR)I!Qg!`kMQW2x?M!W1mb_(o z_bZ@EauYN2^h(kgEDVwhBfgg`;=6NTzl<;YhW+hFY)uS+K%uxaDJ8Rrp+~Cnl>Cfs W7x<&U9y>Fy*M?K~@-6_k2`djTDKrrP literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391 b/vendor/github.com/hashicorp/go-getter/test-fixtures/basic-git/DOTgit/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391 new file mode 100644 index 0000000000000000000000000000000000000000..711223894375fe1186ac5bfffdc48fb1fa1e65cc GIT binary patch literal 15 WcmbY%CIzaj8qGbEc>?kDFXw;K?Vi^Mur6piZdizBw|mBJalrree3)b00(*wbpQYW literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-gz/single.gz b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-gz/single.gz new file mode 100644 index 0000000000000000000000000000000000000000..00754d02d1b766c6fe83562c04e4a1a3c389596d GIT binary patch literal 29 kcmb2|=HL+Yn;OQzoR*oB%HVza{BtIT6{$vbEDQ_`0DQX$Hvj+t literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/empty.tar.bz2 b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/empty.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a70463740ad1149f75a68a13fc3c0fc7b22c68fd GIT binary patch literal 46 zcmZ>Y%CIzaj8qGbjI7@Agn>aMfZ+fG3j+g(0)xRBR?TI?N$TA@mF8%8=kY5~ literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/multiple.tar.bz2 b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/multiple.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..1d59e6ad2ce760bf7ff834cee8570812ae501c24 GIT binary patch literal 166 zcmV;X09pS+T4*^jL0KkKS*=**^Z)>``;gHP2#`Pke}Di8Yi6DR1ONaiFaWqlG{j*V zU;qHdn2aM#000G2H8h%;lru)7C#jS5O`9*CnlxbJKvO7z7UyBRg2+OM-B@PTMx>`H zWZ)vlH64^mMhc_9WTiTM;^eF)#j7-}RthNzu1dCASsqclX3q}yyBF&hr=||A9vHbi UuDR^Hq9EA2k}1N3g=)typwQSw`2YX_ literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/single.tar.bz2 b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tbz2/single.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..115f9a4aeb2ce15195187c8952191b17e5155fbb GIT binary patch literal 135 zcmV;20C@jGT4*^jL0KkKSq`FV*Z=^5dyvr(2!TKVe}Di4Yi6DR1ONaCAOLhAjWq$F zH1!%Z)6^1BKr{x800~R&VFw`vR3s!?@alJNC#0hYyhCR~w{Pwah*C7LewukF%UHc4vzO8hz+9IHtv3LJQ!E(`&M))_)b(-tu-EZ2c*ZQKSU{X&+{@V9FCik+x?$Cc89r@HscY5K8 z$~!Z(lUK#RTPbfi--cuE^UL?F6YQgYy_eemG5Y`STmL=&|8M-q%zywgw}qcRy!Dnr HgMk46B&J9$ literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tgz/single.tar.gz b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-tgz/single.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..921bdacc6f6c00c099f7f64a3bfd1dc5d4859cbe GIT binary patch literal 137 zcmb2|=3q!Sn;OQz{Pw&fSF?dYYvOg=qcVv;NmX(aSz1LFZvAd9Rk-|tlh?!lX8nzc z6~gTzt7flURqi*f=2x1pY3e8 jr&}fb-%W8(Id3m`{Nu;hd<@8-;dPo|NzQQw4F(1PJX<=r literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/empty.zip b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/empty.zip new file mode 100644 index 0000000000000000000000000000000000000000..15cb0ecb3e219d1701294bfdf0fe3f5cb5d208e7 GIT binary patch literal 22 NcmWIWW@Tf*000g10H*)| literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/multiple.zip b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/multiple.zip new file mode 100644 index 0000000000000000000000000000000000000000..a032181d43b89865241dd9e1d05dc5712706267d GIT binary patch literal 306 zcmWIWW@h1H0D%o5Ivy)hjp|r{Y!GH;kYPy6%tzw+5H!Mwun~+*a?H5gApx|AfdObG!;(f23+gIXh^x?C6yQy)QO;1KfUd)3 S6e}CZJ|-ZX2Bh0S90mY1;x;}2 literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/single.zip b/vendor/github.com/hashicorp/go-getter/test-fixtures/decompress-zip/single.zip new file mode 100644 index 0000000000000000000000000000000000000000..90687fde18f0470ec513362f7cc06ae17b0abf62 GIT binary patch literal 162 zcmWIWW@h1H00Ey69gh{MMs+MeHU!Hsq-Ex$hK6u5FdMi}4KoJf(h6<{MwYLPKqVqT zb!qweTmjyUOmfV)%#Z*X%>Y!-u%r>hf|$q(F%iwQ0B=?{kQzoH^a9dOAPxfna#|Ph literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/dup/foo/main.tf b/vendor/github.com/hashicorp/go-getter/test-fixtures/dup/foo/main.tf new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/dup/main.tf b/vendor/github.com/hashicorp/go-getter/test-fixtures/dup/main.tf new file mode 100644 index 000000000..98efd6e4f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/dup/main.tf @@ -0,0 +1,7 @@ +module "foo" { + source = "./foo" +} + +module "foo" { + source = "./foo" +} diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/COMMIT_EDITMSG b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/COMMIT_EDITMSG new file mode 100644 index 000000000..d5297f6c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/COMMIT_EDITMSG @@ -0,0 +1,7 @@ +Initial branch +# Please enter the commit message for your changes. Lines starting +# with '#' will be ignored, and an empty message aborts the commit. +# On branch test-branch +# Changes to be committed: +# new file: main_branch.tf +# diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/HEAD b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/HEAD new file mode 100644 index 000000000..b04b97d48 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/HEAD @@ -0,0 +1 @@ +ref: refs/heads/test-branch diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/config b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/config new file mode 100644 index 000000000..6c9406b7d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/config @@ -0,0 +1,7 @@ +[core] + repositoryformatversion = 0 + filemode = true + bare = false + logallrefupdates = true + ignorecase = true + precomposeunicode = true diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/description b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/description new file mode 100644 index 000000000..498b267a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/description @@ -0,0 +1 @@ +Unnamed repository; edit this file 'description' to name the repository. diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/applypatch-msg.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/applypatch-msg.sample new file mode 100644 index 000000000..8b2a2fe84 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/applypatch-msg.sample @@ -0,0 +1,15 @@ +#!/bin/sh +# +# An example hook script to check the commit log message taken by +# applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. The hook is +# allowed to edit the commit message file. +# +# To enable this hook, rename this file to "applypatch-msg". + +. git-sh-setup +test -x "$GIT_DIR/hooks/commit-msg" && + exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"} +: diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/commit-msg.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/commit-msg.sample new file mode 100644 index 000000000..b58d1184a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/commit-msg.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to check the commit log message. +# Called by "git commit" with one argument, the name of the file +# that has the commit message. The hook should exit with non-zero +# status after issuing an appropriate message if it wants to stop the +# commit. The hook is allowed to edit the commit message file. +# +# To enable this hook, rename this file to "commit-msg". + +# Uncomment the below to add a Signed-off-by line to the message. +# Doing this in a hook is a bad idea in general, but the prepare-commit-msg +# hook is more suited to it. +# +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" + +# This example catches duplicate Signed-off-by lines. + +test "" = "$(grep '^Signed-off-by: ' "$1" | + sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { + echo >&2 Duplicate Signed-off-by lines. + exit 1 +} diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/post-update.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/post-update.sample new file mode 100644 index 000000000..ec17ec193 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/post-update.sample @@ -0,0 +1,8 @@ +#!/bin/sh +# +# An example hook script to prepare a packed repository for use over +# dumb transports. +# +# To enable this hook, rename this file to "post-update". + +exec git update-server-info diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-applypatch.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-applypatch.sample new file mode 100644 index 000000000..b1f187c2e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-applypatch.sample @@ -0,0 +1,14 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed +# by applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-applypatch". + +. git-sh-setup +test -x "$GIT_DIR/hooks/pre-commit" && + exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"} +: diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-commit.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-commit.sample new file mode 100644 index 000000000..68d62d544 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-commit.sample @@ -0,0 +1,49 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git commit" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message if +# it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-commit". + +if git rev-parse --verify HEAD >/dev/null 2>&1 +then + against=HEAD +else + # Initial commit: diff against an empty tree object + against=4b825dc642cb6eb9a060e54bf8d69288fbee4904 +fi + +# If you want to allow non-ASCII filenames set this variable to true. +allownonascii=$(git config --bool hooks.allownonascii) + +# Redirect output to stderr. +exec 1>&2 + +# Cross platform projects tend to avoid non-ASCII filenames; prevent +# them from being added to the repository. We exploit the fact that the +# printable range starts at the space character and ends with tilde. +if [ "$allownonascii" != "true" ] && + # Note that the use of brackets around a tr range is ok here, (it's + # even required, for portability to Solaris 10's /usr/bin/tr), since + # the square bracket bytes happen to fall in the designated range. + test $(git diff --cached --name-only --diff-filter=A -z $against | + LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 +then + cat <<\EOF +Error: Attempt to add a non-ASCII file name. + +This can cause problems if you want to work with people on other platforms. + +To be portable it is advisable to rename the file. + +If you know what you are doing you can disable this check using: + + git config hooks.allownonascii true +EOF + exit 1 +fi + +# If there are whitespace errors, print the offending file names and fail. +exec git diff-index --check --cached $against -- diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-push.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-push.sample new file mode 100644 index 000000000..6187dbf43 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-push.sample @@ -0,0 +1,53 @@ +#!/bin/sh + +# An example hook script to verify what is about to be pushed. Called by "git +# push" after it has checked the remote status, but before anything has been +# pushed. If this script exits with a non-zero status nothing will be pushed. +# +# This hook is called with the following parameters: +# +# $1 -- Name of the remote to which the push is being done +# $2 -- URL to which the push is being done +# +# If pushing without using a named remote those arguments will be equal. +# +# Information about the commits which are being pushed is supplied as lines to +# the standard input in the form: +# +# +# +# This sample shows how to prevent push of commits where the log message starts +# with "WIP" (work in progress). + +remote="$1" +url="$2" + +z40=0000000000000000000000000000000000000000 + +while read local_ref local_sha remote_ref remote_sha +do + if [ "$local_sha" = $z40 ] + then + # Handle delete + : + else + if [ "$remote_sha" = $z40 ] + then + # New branch, examine all commits + range="$local_sha" + else + # Update to existing branch, examine new commits + range="$remote_sha..$local_sha" + fi + + # Check for WIP commit + commit=`git rev-list -n 1 --grep '^WIP' "$range"` + if [ -n "$commit" ] + then + echo >&2 "Found WIP commit in $local_ref, not pushing" + exit 1 + fi + fi +done + +exit 0 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-rebase.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-rebase.sample new file mode 100644 index 000000000..9773ed4cb --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/pre-rebase.sample @@ -0,0 +1,169 @@ +#!/bin/sh +# +# Copyright (c) 2006, 2008 Junio C Hamano +# +# The "pre-rebase" hook is run just before "git rebase" starts doing +# its job, and can prevent the command from running by exiting with +# non-zero status. +# +# The hook is called with the following parameters: +# +# $1 -- the upstream the series was forked from. +# $2 -- the branch being rebased (or empty when rebasing the current branch). +# +# This sample shows how to prevent topic branches that are already +# merged to 'next' branch from getting rebased, because allowing it +# would result in rebasing already published history. + +publish=next +basebranch="$1" +if test "$#" = 2 +then + topic="refs/heads/$2" +else + topic=`git symbolic-ref HEAD` || + exit 0 ;# we do not interrupt rebasing detached HEAD +fi + +case "$topic" in +refs/heads/??/*) + ;; +*) + exit 0 ;# we do not interrupt others. + ;; +esac + +# Now we are dealing with a topic branch being rebased +# on top of master. Is it OK to rebase it? + +# Does the topic really exist? +git show-ref -q "$topic" || { + echo >&2 "No such branch $topic" + exit 1 +} + +# Is topic fully merged to master? +not_in_master=`git rev-list --pretty=oneline ^master "$topic"` +if test -z "$not_in_master" +then + echo >&2 "$topic is fully merged to master; better remove it." + exit 1 ;# we could allow it, but there is no point. +fi + +# Is topic ever merged to next? If so you should not be rebasing it. +only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` +only_next_2=`git rev-list ^master ${publish} | sort` +if test "$only_next_1" = "$only_next_2" +then + not_in_topic=`git rev-list "^$topic" master` + if test -z "$not_in_topic" + then + echo >&2 "$topic is already up-to-date with master" + exit 1 ;# we could allow it, but there is no point. + else + exit 0 + fi +else + not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` + /usr/bin/perl -e ' + my $topic = $ARGV[0]; + my $msg = "* $topic has commits already merged to public branch:\n"; + my (%not_in_next) = map { + /^([0-9a-f]+) /; + ($1 => 1); + } split(/\n/, $ARGV[1]); + for my $elem (map { + /^([0-9a-f]+) (.*)$/; + [$1 => $2]; + } split(/\n/, $ARGV[2])) { + if (!exists $not_in_next{$elem->[0]}) { + if ($msg) { + print STDERR $msg; + undef $msg; + } + print STDERR " $elem->[1]\n"; + } + } + ' "$topic" "$not_in_next" "$not_in_master" + exit 1 +fi + +exit 0 + +################################################################ + +This sample hook safeguards topic branches that have been +published from being rewound. + +The workflow assumed here is: + + * Once a topic branch forks from "master", "master" is never + merged into it again (either directly or indirectly). + + * Once a topic branch is fully cooked and merged into "master", + it is deleted. If you need to build on top of it to correct + earlier mistakes, a new topic branch is created by forking at + the tip of the "master". This is not strictly necessary, but + it makes it easier to keep your history simple. + + * Whenever you need to test or publish your changes to topic + branches, merge them into "next" branch. + +The script, being an example, hardcodes the publish branch name +to be "next", but it is trivial to make it configurable via +$GIT_DIR/config mechanism. + +With this workflow, you would want to know: + +(1) ... if a topic branch has ever been merged to "next". Young + topic branches can have stupid mistakes you would rather + clean up before publishing, and things that have not been + merged into other branches can be easily rebased without + affecting other people. But once it is published, you would + not want to rewind it. + +(2) ... if a topic branch has been fully merged to "master". + Then you can delete it. More importantly, you should not + build on top of it -- other people may already want to + change things related to the topic as patches against your + "master", so if you need further changes, it is better to + fork the topic (perhaps with the same name) afresh from the + tip of "master". + +Let's look at this example: + + o---o---o---o---o---o---o---o---o---o "next" + / / / / + / a---a---b A / / + / / / / + / / c---c---c---c B / + / / / \ / + / / / b---b C \ / + / / / / \ / + ---o---o---o---o---o---o---o---o---o---o---o "master" + + +A, B and C are topic branches. + + * A has one fix since it was merged up to "next". + + * B has finished. It has been fully merged up to "master" and "next", + and is ready to be deleted. + + * C has not merged to "next" at all. + +We would want to allow C to be rebased, refuse A, and encourage +B to be deleted. + +To compute (1): + + git rev-list ^master ^topic next + git rev-list ^master next + + if these match, topic has not merged in next at all. + +To compute (2): + + git rev-list master..topic + + if this is empty, it is fully merged to "master". diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/prepare-commit-msg.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/prepare-commit-msg.sample new file mode 100644 index 000000000..f093a02ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/prepare-commit-msg.sample @@ -0,0 +1,36 @@ +#!/bin/sh +# +# An example hook script to prepare the commit log message. +# Called by "git commit" with the name of the file that has the +# commit message, followed by the description of the commit +# message's source. The hook's purpose is to edit the commit +# message file. If the hook fails with a non-zero status, +# the commit is aborted. +# +# To enable this hook, rename this file to "prepare-commit-msg". + +# This hook includes three examples. The first comments out the +# "Conflicts:" part of a merge commit. +# +# The second includes the output of "git diff --name-status -r" +# into the message, just before the "git status" output. It is +# commented because it doesn't cope with --amend or with squashed +# commits. +# +# The third example adds a Signed-off-by line to the message, that can +# still be edited. This is rarely a good idea. + +case "$2,$3" in + merge,) + /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;; + +# ,|template,) +# /usr/bin/perl -i.bak -pe ' +# print "\n" . `git diff --cached --name-status -r` +# if /^#/ && $first++ == 0' "$1" ;; + + *) ;; +esac + +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/update.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/update.sample new file mode 100644 index 000000000..d84758373 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/hooks/update.sample @@ -0,0 +1,128 @@ +#!/bin/sh +# +# An example hook script to blocks unannotated tags from entering. +# Called by "git receive-pack" with arguments: refname sha1-old sha1-new +# +# To enable this hook, rename this file to "update". +# +# Config +# ------ +# hooks.allowunannotated +# This boolean sets whether unannotated tags will be allowed into the +# repository. By default they won't be. +# hooks.allowdeletetag +# This boolean sets whether deleting tags will be allowed in the +# repository. By default they won't be. +# hooks.allowmodifytag +# This boolean sets whether a tag may be modified after creation. By default +# it won't be. +# hooks.allowdeletebranch +# This boolean sets whether deleting branches will be allowed in the +# repository. By default they won't be. +# hooks.denycreatebranch +# This boolean sets whether remotely creating branches will be denied +# in the repository. By default this is allowed. +# + +# --- Command line +refname="$1" +oldrev="$2" +newrev="$3" + +# --- Safety check +if [ -z "$GIT_DIR" ]; then + echo "Don't run this script from the command line." >&2 + echo " (if you want, you could supply GIT_DIR then run" >&2 + echo " $0 )" >&2 + exit 1 +fi + +if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +# --- Config +allowunannotated=$(git config --bool hooks.allowunannotated) +allowdeletebranch=$(git config --bool hooks.allowdeletebranch) +denycreatebranch=$(git config --bool hooks.denycreatebranch) +allowdeletetag=$(git config --bool hooks.allowdeletetag) +allowmodifytag=$(git config --bool hooks.allowmodifytag) + +# check for no description +projectdesc=$(sed -e '1q' "$GIT_DIR/description") +case "$projectdesc" in +"Unnamed repository"* | "") + echo "*** Project description file hasn't been set" >&2 + exit 1 + ;; +esac + +# --- Check types +# if $newrev is 0000...0000, it's a commit to delete a ref. +zero="0000000000000000000000000000000000000000" +if [ "$newrev" = "$zero" ]; then + newrev_type=delete +else + newrev_type=$(git cat-file -t $newrev) +fi + +case "$refname","$newrev_type" in + refs/tags/*,commit) + # un-annotated tag + short_refname=${refname##refs/tags/} + if [ "$allowunannotated" != "true" ]; then + echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2 + echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 + exit 1 + fi + ;; + refs/tags/*,delete) + # delete tag + if [ "$allowdeletetag" != "true" ]; then + echo "*** Deleting a tag is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/tags/*,tag) + # annotated tag + if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 + then + echo "*** Tag '$refname' already exists." >&2 + echo "*** Modifying a tag is not allowed in this repository." >&2 + exit 1 + fi + ;; + refs/heads/*,commit) + # branch + if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then + echo "*** Creating a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/heads/*,delete) + # delete branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/remotes/*,commit) + # tracking branch + ;; + refs/remotes/*,delete) + # delete tracking branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a tracking branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + *) + # Anything else (is there anything else?) + echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 + exit 1 + ;; +esac + +# --- Finished +exit 0 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/index b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/index new file mode 100644 index 0000000000000000000000000000000000000000..a295ccf1b3de8b8e21dcb43ff29ee4a628367329 GIT binary patch literal 217 zcmZ?q402{*U|<5_FdcDeAk6@y85tN@81zc+F)%bPVPIhV3X~E7V)lLi;-4qwO9VV( z`fa+#JSOqsr~0W3?74}Vd3q&j5dD@w3SVrR$1CT!mHm!qk>E Z&VTWyv{B>FVJ 1445730077 -0700 commit (initial): Initial +9a311b181c4ec0fe4d8856b9fc935a191855742a 9a311b181c4ec0fe4d8856b9fc935a191855742a Mitchell Hashimoto 1445730083 -0700 checkout: moving from master to test-branch +9a311b181c4ec0fe4d8856b9fc935a191855742a ae4fc53ed172182cf8ab07d2f087994950a2ccdf Mitchell Hashimoto 1445730106 -0700 commit: Initial branch diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/master b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/master new file mode 100644 index 000000000..c6181dfa6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/master @@ -0,0 +1 @@ +0000000000000000000000000000000000000000 9a311b181c4ec0fe4d8856b9fc935a191855742a Mitchell Hashimoto 1445730077 -0700 commit (initial): Initial diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/test-branch b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/test-branch new file mode 100644 index 000000000..8c2a94efa --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/logs/refs/heads/test-branch @@ -0,0 +1,2 @@ +0000000000000000000000000000000000000000 9a311b181c4ec0fe4d8856b9fc935a191855742a Mitchell Hashimoto 1445730083 -0700 branch: Created from HEAD +9a311b181c4ec0fe4d8856b9fc935a191855742a ae4fc53ed172182cf8ab07d2f087994950a2ccdf Mitchell Hashimoto 1445730106 -0700 commit: Initial branch diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/29/5c5449283c0fe38c75ae73144513240e5626a6 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/29/5c5449283c0fe38c75ae73144513240e5626a6 new file mode 100644 index 0000000000000000000000000000000000000000..e6653c083a78b119890b30ddffb3a936bc5abdef GIT binary patch literal 84 zcmV-a0IUCa0V^p=O;s>AXD~D{Ff%bx$W6@5(<@11*!M5~c~ZVaz$2#LrfbY&5+8o5 qpNga;KB*`%FF6CG(k?Y>X+gqf23DP=Gj>UAE!FSgi~s;CHXntTV literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a new file mode 100644 index 000000000..e116abba7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a @@ -0,0 +1,2 @@ +x­ÌA +Â0@Q×9Å\À2t:D\êÂC¤cj‰:Þß"x·þ×Vk6 w¶Æaê'ýp ‘˜ˆãTÑË"BʤóøÎ.¼-µnÙ4ÅRà^)×f ŽÛðk]úÙùQC.¶z‚~X<¢ìQݦ[añ/3w}fË¡¸XÙE \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/a9/193e348262522550ebacc037f772c52cbfa5d5 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/a9/193e348262522550ebacc037f772c52cbfa5d5 new file mode 100644 index 0000000000000000000000000000000000000000..a6230cdf304482575f86dc72c75a29fb32bfa466 GIT binary patch literal 52 zcmV-40L%Y)0V^p=O;s>9WiT`_Ff%bx$W6@5(<@11*!M5~c~ZVaz$2#LrfbY&5+8o5 Kp9%mvd=U!?XB3(M literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/ae/4fc53ed172182cf8ab07d2f087994950a2ccdf b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/ae/4fc53ed172182cf8ab07d2f087994950a2ccdf new file mode 100644 index 0000000000000000000000000000000000000000..57fd1016c4788afe94bd8619c91f864097e7e27f GIT binary patch literal 172 zcmV;d08{^X0j-WpP6IIz1zGzPeE}l3?e<5C5KCC(5Ivm^wlp52!Q7t-Qf`oKN%iW< z=)FyF5ntx63a*e)2*qVD_NG2dBG%+F1VZnERf;ZhG~c}IHleUbTri^qy$j)*GsUIc zqA$UTw10 a9P}`zK4r{&Zf!PRVcB_;YW@I+eN+M*ic_lq literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/be/fe5fe7626f1850e202fb35ac375c61e1f27f95 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-1/objects/be/fe5fe7626f1850e202fb35ac375c61e1f27f95 new file mode 100644 index 0000000000000000000000000000000000000000..61c8d6ad72e92558b452d07dd00db7158385ab86 GIT binary patch literal 22 dcmb\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" + +# This example catches duplicate Signed-off-by lines. + +test "" = "$(grep '^Signed-off-by: ' "$1" | + sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { + echo >&2 Duplicate Signed-off-by lines. + exit 1 +} diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/post-update.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/post-update.sample new file mode 100644 index 000000000..ec17ec193 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/post-update.sample @@ -0,0 +1,8 @@ +#!/bin/sh +# +# An example hook script to prepare a packed repository for use over +# dumb transports. +# +# To enable this hook, rename this file to "post-update". + +exec git update-server-info diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-applypatch.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-applypatch.sample new file mode 100644 index 000000000..b1f187c2e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-applypatch.sample @@ -0,0 +1,14 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed +# by applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-applypatch". + +. git-sh-setup +test -x "$GIT_DIR/hooks/pre-commit" && + exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"} +: diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-commit.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-commit.sample new file mode 100644 index 000000000..68d62d544 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-commit.sample @@ -0,0 +1,49 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git commit" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message if +# it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-commit". + +if git rev-parse --verify HEAD >/dev/null 2>&1 +then + against=HEAD +else + # Initial commit: diff against an empty tree object + against=4b825dc642cb6eb9a060e54bf8d69288fbee4904 +fi + +# If you want to allow non-ASCII filenames set this variable to true. +allownonascii=$(git config --bool hooks.allownonascii) + +# Redirect output to stderr. +exec 1>&2 + +# Cross platform projects tend to avoid non-ASCII filenames; prevent +# them from being added to the repository. We exploit the fact that the +# printable range starts at the space character and ends with tilde. +if [ "$allownonascii" != "true" ] && + # Note that the use of brackets around a tr range is ok here, (it's + # even required, for portability to Solaris 10's /usr/bin/tr), since + # the square bracket bytes happen to fall in the designated range. + test $(git diff --cached --name-only --diff-filter=A -z $against | + LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 +then + cat <<\EOF +Error: Attempt to add a non-ASCII file name. + +This can cause problems if you want to work with people on other platforms. + +To be portable it is advisable to rename the file. + +If you know what you are doing you can disable this check using: + + git config hooks.allownonascii true +EOF + exit 1 +fi + +# If there are whitespace errors, print the offending file names and fail. +exec git diff-index --check --cached $against -- diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-push.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-push.sample new file mode 100644 index 000000000..6187dbf43 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-push.sample @@ -0,0 +1,53 @@ +#!/bin/sh + +# An example hook script to verify what is about to be pushed. Called by "git +# push" after it has checked the remote status, but before anything has been +# pushed. If this script exits with a non-zero status nothing will be pushed. +# +# This hook is called with the following parameters: +# +# $1 -- Name of the remote to which the push is being done +# $2 -- URL to which the push is being done +# +# If pushing without using a named remote those arguments will be equal. +# +# Information about the commits which are being pushed is supplied as lines to +# the standard input in the form: +# +# +# +# This sample shows how to prevent push of commits where the log message starts +# with "WIP" (work in progress). + +remote="$1" +url="$2" + +z40=0000000000000000000000000000000000000000 + +while read local_ref local_sha remote_ref remote_sha +do + if [ "$local_sha" = $z40 ] + then + # Handle delete + : + else + if [ "$remote_sha" = $z40 ] + then + # New branch, examine all commits + range="$local_sha" + else + # Update to existing branch, examine new commits + range="$remote_sha..$local_sha" + fi + + # Check for WIP commit + commit=`git rev-list -n 1 --grep '^WIP' "$range"` + if [ -n "$commit" ] + then + echo >&2 "Found WIP commit in $local_ref, not pushing" + exit 1 + fi + fi +done + +exit 0 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-rebase.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-rebase.sample new file mode 100644 index 000000000..9773ed4cb --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/pre-rebase.sample @@ -0,0 +1,169 @@ +#!/bin/sh +# +# Copyright (c) 2006, 2008 Junio C Hamano +# +# The "pre-rebase" hook is run just before "git rebase" starts doing +# its job, and can prevent the command from running by exiting with +# non-zero status. +# +# The hook is called with the following parameters: +# +# $1 -- the upstream the series was forked from. +# $2 -- the branch being rebased (or empty when rebasing the current branch). +# +# This sample shows how to prevent topic branches that are already +# merged to 'next' branch from getting rebased, because allowing it +# would result in rebasing already published history. + +publish=next +basebranch="$1" +if test "$#" = 2 +then + topic="refs/heads/$2" +else + topic=`git symbolic-ref HEAD` || + exit 0 ;# we do not interrupt rebasing detached HEAD +fi + +case "$topic" in +refs/heads/??/*) + ;; +*) + exit 0 ;# we do not interrupt others. + ;; +esac + +# Now we are dealing with a topic branch being rebased +# on top of master. Is it OK to rebase it? + +# Does the topic really exist? +git show-ref -q "$topic" || { + echo >&2 "No such branch $topic" + exit 1 +} + +# Is topic fully merged to master? +not_in_master=`git rev-list --pretty=oneline ^master "$topic"` +if test -z "$not_in_master" +then + echo >&2 "$topic is fully merged to master; better remove it." + exit 1 ;# we could allow it, but there is no point. +fi + +# Is topic ever merged to next? If so you should not be rebasing it. +only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` +only_next_2=`git rev-list ^master ${publish} | sort` +if test "$only_next_1" = "$only_next_2" +then + not_in_topic=`git rev-list "^$topic" master` + if test -z "$not_in_topic" + then + echo >&2 "$topic is already up-to-date with master" + exit 1 ;# we could allow it, but there is no point. + else + exit 0 + fi +else + not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` + /usr/bin/perl -e ' + my $topic = $ARGV[0]; + my $msg = "* $topic has commits already merged to public branch:\n"; + my (%not_in_next) = map { + /^([0-9a-f]+) /; + ($1 => 1); + } split(/\n/, $ARGV[1]); + for my $elem (map { + /^([0-9a-f]+) (.*)$/; + [$1 => $2]; + } split(/\n/, $ARGV[2])) { + if (!exists $not_in_next{$elem->[0]}) { + if ($msg) { + print STDERR $msg; + undef $msg; + } + print STDERR " $elem->[1]\n"; + } + } + ' "$topic" "$not_in_next" "$not_in_master" + exit 1 +fi + +exit 0 + +################################################################ + +This sample hook safeguards topic branches that have been +published from being rewound. + +The workflow assumed here is: + + * Once a topic branch forks from "master", "master" is never + merged into it again (either directly or indirectly). + + * Once a topic branch is fully cooked and merged into "master", + it is deleted. If you need to build on top of it to correct + earlier mistakes, a new topic branch is created by forking at + the tip of the "master". This is not strictly necessary, but + it makes it easier to keep your history simple. + + * Whenever you need to test or publish your changes to topic + branches, merge them into "next" branch. + +The script, being an example, hardcodes the publish branch name +to be "next", but it is trivial to make it configurable via +$GIT_DIR/config mechanism. + +With this workflow, you would want to know: + +(1) ... if a topic branch has ever been merged to "next". Young + topic branches can have stupid mistakes you would rather + clean up before publishing, and things that have not been + merged into other branches can be easily rebased without + affecting other people. But once it is published, you would + not want to rewind it. + +(2) ... if a topic branch has been fully merged to "master". + Then you can delete it. More importantly, you should not + build on top of it -- other people may already want to + change things related to the topic as patches against your + "master", so if you need further changes, it is better to + fork the topic (perhaps with the same name) afresh from the + tip of "master". + +Let's look at this example: + + o---o---o---o---o---o---o---o---o---o "next" + / / / / + / a---a---b A / / + / / / / + / / c---c---c---c B / + / / / \ / + / / / b---b C \ / + / / / / \ / + ---o---o---o---o---o---o---o---o---o---o---o "master" + + +A, B and C are topic branches. + + * A has one fix since it was merged up to "next". + + * B has finished. It has been fully merged up to "master" and "next", + and is ready to be deleted. + + * C has not merged to "next" at all. + +We would want to allow C to be rebased, refuse A, and encourage +B to be deleted. + +To compute (1): + + git rev-list ^master ^topic next + git rev-list ^master next + + if these match, topic has not merged in next at all. + +To compute (2): + + git rev-list master..topic + + if this is empty, it is fully merged to "master". diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/prepare-commit-msg.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/prepare-commit-msg.sample new file mode 100644 index 000000000..f093a02ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/prepare-commit-msg.sample @@ -0,0 +1,36 @@ +#!/bin/sh +# +# An example hook script to prepare the commit log message. +# Called by "git commit" with the name of the file that has the +# commit message, followed by the description of the commit +# message's source. The hook's purpose is to edit the commit +# message file. If the hook fails with a non-zero status, +# the commit is aborted. +# +# To enable this hook, rename this file to "prepare-commit-msg". + +# This hook includes three examples. The first comments out the +# "Conflicts:" part of a merge commit. +# +# The second includes the output of "git diff --name-status -r" +# into the message, just before the "git status" output. It is +# commented because it doesn't cope with --amend or with squashed +# commits. +# +# The third example adds a Signed-off-by line to the message, that can +# still be edited. This is rarely a good idea. + +case "$2,$3" in + merge,) + /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;; + +# ,|template,) +# /usr/bin/perl -i.bak -pe ' +# print "\n" . `git diff --cached --name-status -r` +# if /^#/ && $first++ == 0' "$1" ;; + + *) ;; +esac + +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/update.sample b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/update.sample new file mode 100644 index 000000000..d84758373 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/hooks/update.sample @@ -0,0 +1,128 @@ +#!/bin/sh +# +# An example hook script to blocks unannotated tags from entering. +# Called by "git receive-pack" with arguments: refname sha1-old sha1-new +# +# To enable this hook, rename this file to "update". +# +# Config +# ------ +# hooks.allowunannotated +# This boolean sets whether unannotated tags will be allowed into the +# repository. By default they won't be. +# hooks.allowdeletetag +# This boolean sets whether deleting tags will be allowed in the +# repository. By default they won't be. +# hooks.allowmodifytag +# This boolean sets whether a tag may be modified after creation. By default +# it won't be. +# hooks.allowdeletebranch +# This boolean sets whether deleting branches will be allowed in the +# repository. By default they won't be. +# hooks.denycreatebranch +# This boolean sets whether remotely creating branches will be denied +# in the repository. By default this is allowed. +# + +# --- Command line +refname="$1" +oldrev="$2" +newrev="$3" + +# --- Safety check +if [ -z "$GIT_DIR" ]; then + echo "Don't run this script from the command line." >&2 + echo " (if you want, you could supply GIT_DIR then run" >&2 + echo " $0 )" >&2 + exit 1 +fi + +if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +# --- Config +allowunannotated=$(git config --bool hooks.allowunannotated) +allowdeletebranch=$(git config --bool hooks.allowdeletebranch) +denycreatebranch=$(git config --bool hooks.denycreatebranch) +allowdeletetag=$(git config --bool hooks.allowdeletetag) +allowmodifytag=$(git config --bool hooks.allowmodifytag) + +# check for no description +projectdesc=$(sed -e '1q' "$GIT_DIR/description") +case "$projectdesc" in +"Unnamed repository"* | "") + echo "*** Project description file hasn't been set" >&2 + exit 1 + ;; +esac + +# --- Check types +# if $newrev is 0000...0000, it's a commit to delete a ref. +zero="0000000000000000000000000000000000000000" +if [ "$newrev" = "$zero" ]; then + newrev_type=delete +else + newrev_type=$(git cat-file -t $newrev) +fi + +case "$refname","$newrev_type" in + refs/tags/*,commit) + # un-annotated tag + short_refname=${refname##refs/tags/} + if [ "$allowunannotated" != "true" ]; then + echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2 + echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 + exit 1 + fi + ;; + refs/tags/*,delete) + # delete tag + if [ "$allowdeletetag" != "true" ]; then + echo "*** Deleting a tag is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/tags/*,tag) + # annotated tag + if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 + then + echo "*** Tag '$refname' already exists." >&2 + echo "*** Modifying a tag is not allowed in this repository." >&2 + exit 1 + fi + ;; + refs/heads/*,commit) + # branch + if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then + echo "*** Creating a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/heads/*,delete) + # delete branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/remotes/*,commit) + # tracking branch + ;; + refs/remotes/*,delete) + # delete tracking branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a tracking branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + *) + # Anything else (is there anything else?) + echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 + exit 1 + ;; +esac + +# --- Finished +exit 0 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/index b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/index new file mode 100644 index 0000000000000000000000000000000000000000..44211b1a689f795f15c17cc7f396feae4a418a95 GIT binary patch literal 305 zcmZ?q402{*U|<4b<}e*`X&}u2qZt_(SQzw5?lCYlE@5C`{0fv30b=%j|Kgt~a&(JDPa{UAq6=uDqTd@^uxTUvRS9-u3TH z(iudN%!@BANJ%V7g&NBc66ER%G*^ 1445730077 -0700 commit (initial): Initial +9a311b181c4ec0fe4d8856b9fc935a191855742a 9a311b181c4ec0fe4d8856b9fc935a191855742a Mitchell Hashimoto 1445730083 -0700 checkout: moving from master to test-branch +9a311b181c4ec0fe4d8856b9fc935a191855742a ae4fc53ed172182cf8ab07d2f087994950a2ccdf Mitchell Hashimoto 1445730106 -0700 commit: Initial branch +ae4fc53ed172182cf8ab07d2f087994950a2ccdf 34fa5e1fb706dc77cc0b0cf622c82e67ba84e297 Mitchell Hashimoto 1445730148 -0700 commit: Branch update diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/master b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/master new file mode 100644 index 000000000..c6181dfa6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/master @@ -0,0 +1 @@ +0000000000000000000000000000000000000000 9a311b181c4ec0fe4d8856b9fc935a191855742a Mitchell Hashimoto 1445730077 -0700 commit (initial): Initial diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/test-branch b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/test-branch new file mode 100644 index 000000000..349f98deb --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/logs/refs/heads/test-branch @@ -0,0 +1,3 @@ +0000000000000000000000000000000000000000 9a311b181c4ec0fe4d8856b9fc935a191855742a Mitchell Hashimoto 1445730083 -0700 branch: Created from HEAD +9a311b181c4ec0fe4d8856b9fc935a191855742a ae4fc53ed172182cf8ab07d2f087994950a2ccdf Mitchell Hashimoto 1445730106 -0700 commit: Initial branch +ae4fc53ed172182cf8ab07d2f087994950a2ccdf 34fa5e1fb706dc77cc0b0cf622c82e67ba84e297 Mitchell Hashimoto 1445730148 -0700 commit: Branch update diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/20/9cbd800810a2a1559ba33c48bc75860e967e07 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/20/9cbd800810a2a1559ba33c48bc75860e967e07 new file mode 100644 index 0000000000000000000000000000000000000000..23b6ef27247970bf610a752437daf5061a9d4fb6 GIT binary patch literal 117 zcmV-*0E+*30V^p=O;s>7GGj0_FfcPQQOHfq%+o7LW7zjE{&`ZqM8G4a-==HKV-g>J zs-KFaBtEGqF)uj-q|z=mYH2~jW(HQBr89O(Y%SIA;f%mk8DCnEl30=o($J;*&+W?V X*&$z7@%aTOyX{^7&LkZG2U9cfW4$$9 literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/29/5c5449283c0fe38c75ae73144513240e5626a6 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/29/5c5449283c0fe38c75ae73144513240e5626a6 new file mode 100644 index 0000000000000000000000000000000000000000..e6653c083a78b119890b30ddffb3a936bc5abdef GIT binary patch literal 84 zcmV-a0IUCa0V^p=O;s>AXD~D{Ff%bx$W6@5(<@11*!M5~c~ZVaz$2#LrfbY&5+8o5 qpNga;KB*`%FF6CG(k?Y>X+gqf23DP=Gj>UAE!FSgi~s;CHXntTVQd@=7e&B0e{@LYGXkI$5 literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/8a/2dfe46d4eb9b54f5aa0e4e536346bdafee3467 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/8a/2dfe46d4eb9b54f5aa0e4e536346bdafee3467 new file mode 100644 index 0000000000000000000000000000000000000000..5123c4b7454955157c2439781749635788d27d3e GIT binary patch literal 32 ncmbr%ViyHteYYz;_D5 literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a new file mode 100644 index 000000000..e116abba7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/9a/311b181c4ec0fe4d8856b9fc935a191855742a @@ -0,0 +1,2 @@ +x­ÌA +Â0@Q×9Å\À2t:D\êÂC¤cj‰:Þß"x·þ×Vk6 w¶Æaê'ýp ‘˜ˆãTÑË"BʤóøÎ.¼-µnÙ4ÅRà^)×f ŽÛðk]úÙùQC.¶z‚~X<¢ìQݦ[añ/3w}fË¡¸XÙE \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/a9/193e348262522550ebacc037f772c52cbfa5d5 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/a9/193e348262522550ebacc037f772c52cbfa5d5 new file mode 100644 index 0000000000000000000000000000000000000000..a6230cdf304482575f86dc72c75a29fb32bfa466 GIT binary patch literal 52 zcmV-40L%Y)0V^p=O;s>9WiT`_Ff%bx$W6@5(<@11*!M5~c~ZVaz$2#LrfbY&5+8o5 Kp9%mvd=U!?XB3(M literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/ae/4fc53ed172182cf8ab07d2f087994950a2ccdf b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/ae/4fc53ed172182cf8ab07d2f087994950a2ccdf new file mode 100644 index 0000000000000000000000000000000000000000..57fd1016c4788afe94bd8619c91f864097e7e27f GIT binary patch literal 172 zcmV;d08{^X0j-WpP6IIz1zGzPeE}l3?e<5C5KCC(5Ivm^wlp52!Q7t-Qf`oKN%iW< z=)FyF5ntx63a*e)2*qVD_NG2dBG%+F1VZnERf;ZhG~c}IHleUbTri^qy$j)*GsUIc zqA$UTw10 a9P}`zK4r{&Zf!PRVcB_;YW@I+eN+M*ic_lq literal 0 HcmV?d00001 diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/be/fe5fe7626f1850e202fb35ac375c61e1f27f95 b/vendor/github.com/hashicorp/go-getter/test-fixtures/git-branch-update/DOTgit-2/objects/be/fe5fe7626f1850e202fb35ac375c61e1f27f95 new file mode 100644 index 0000000000000000000000000000000000000000..61c8d6ad72e92558b452d07dd00db7158385ab86 GIT binary patch literal 22 dcmb 0 { + desc = fmt.Sprintf("%s (status: %d)", desc, code) + } + c.Logger.Printf("[DEBUG] %s: retrying in %s", desc, wait) + time.Sleep(wait) + } + + // Return an error if we fall out of the retry loop + return nil, fmt.Errorf("%s %s giving up after %d attempts", + req.Method, req.URL, c.RetryMax+1) +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// backoff is used to calculate how long to sleep before retrying +// after observing failures. It takes the minimum/maximum wait time and +// iteration, and returns the duration to wait. +func backoff(min, max time.Duration, iter int) time.Duration { + mult := math.Pow(2, float64(iter)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client_test.go b/vendor/github.com/hashicorp/go-retryablehttp/client_test.go new file mode 100644 index 000000000..36a829a16 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client_test.go @@ -0,0 +1,342 @@ +package retryablehttp + +import ( + "bytes" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync/atomic" + "testing" + "time" +) + +func TestRequest(t *testing.T) { + // Fails on invalid request + _, err := NewRequest("GET", "://foo", nil) + if err == nil { + t.Fatalf("should error") + } + + // Works with no request body + _, err = NewRequest("GET", "http://foo", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Works with request body + body := bytes.NewReader([]byte("yo")) + req, err := NewRequest("GET", "/", body) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Request allows typical HTTP request forming methods + req.Header.Set("X-Test", "foo") + if v, ok := req.Header["X-Test"]; !ok || len(v) != 1 || v[0] != "foo" { + t.Fatalf("bad headers: %v", req.Header) + } + + // Sets the Content-Length automatically for LenReaders + if req.ContentLength != 2 { + t.Fatalf("bad ContentLength: %d", req.ContentLength) + } +} + +func TestClient_Do(t *testing.T) { + // Create a request + body := bytes.NewReader([]byte("hello")) + req, err := NewRequest("PUT", "http://127.0.0.1:28934/v1/foo", body) + if err != nil { + t.Fatalf("err: %v", err) + } + req.Header.Set("foo", "bar") + + // Create the client. Use short retry windows. + client := NewClient() + client.RetryWaitMin = 10 * time.Millisecond + client.RetryWaitMax = 50 * time.Millisecond + client.RetryMax = 50 + + // Send the request + var resp *http.Response + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + var err error + resp, err = client.Do(req) + if err != nil { + t.Fatalf("err: %v", err) + } + }() + + select { + case <-doneCh: + t.Fatalf("should retry on error") + case <-time.After(200 * time.Millisecond): + // Client should still be retrying due to connection failure. + } + + // Create the mock handler. First we return a 500-range response to ensure + // that we power through and keep retrying in the face of recoverable + // errors. + code := int64(500) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check the request details + if r.Method != "PUT" { + t.Fatalf("bad method: %s", r.Method) + } + if r.RequestURI != "/v1/foo" { + t.Fatalf("bad uri: %s", r.RequestURI) + } + + // Check the headers + if v := r.Header.Get("foo"); v != "bar" { + t.Fatalf("bad header: expect foo=bar, got foo=%v", v) + } + + // Check the payload + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("err: %s", err) + } + expected := []byte("hello") + if !bytes.Equal(body, expected) { + t.Fatalf("bad: %v", body) + } + + w.WriteHeader(int(atomic.LoadInt64(&code))) + }) + + // Create a test server + list, err := net.Listen("tcp", ":28934") + if err != nil { + t.Fatalf("err: %v", err) + } + defer list.Close() + go http.Serve(list, handler) + + // Wait again + select { + case <-doneCh: + t.Fatalf("should retry on 500-range") + case <-time.After(200 * time.Millisecond): + // Client should still be retrying due to 500's. + } + + // Start returning 200's + atomic.StoreInt64(&code, 200) + + // Wait again + select { + case <-doneCh: + case <-time.After(time.Second): + t.Fatalf("timed out") + } + + if resp.StatusCode != 200 { + t.Fatalf("exected 200, got: %d", resp.StatusCode) + } +} + +func TestClient_Do_fails(t *testing.T) { + // Mock server which always responds 500. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(500) + })) + defer ts.Close() + + // Create the client. Use short retry windows so we fail faster. + client := NewClient() + client.RetryWaitMin = 10 * time.Millisecond + client.RetryWaitMax = 10 * time.Millisecond + client.RetryMax = 2 + + // Create the request + req, err := NewRequest("POST", ts.URL, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Send the request. + _, err = client.Do(req) + if err == nil || !strings.Contains(err.Error(), "giving up") { + t.Fatalf("expected giving up error, got: %#v", err) + } +} + +func TestClient_Get(t *testing.T) { + // Mock server which always responds 500. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Fatalf("bad method: %s", r.Method) + } + if r.RequestURI != "/foo/bar" { + t.Fatalf("bad uri: %s", r.RequestURI) + } + w.WriteHeader(200) + })) + defer ts.Close() + + // Make the request. + resp, err := NewClient().Get(ts.URL + "/foo/bar") + if err != nil { + t.Fatalf("err: %v", err) + } + resp.Body.Close() +} + +func TestClient_Head(t *testing.T) { + // Mock server which always responds 200. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Fatalf("bad method: %s", r.Method) + } + if r.RequestURI != "/foo/bar" { + t.Fatalf("bad uri: %s", r.RequestURI) + } + w.WriteHeader(200) + })) + defer ts.Close() + + // Make the request. + resp, err := NewClient().Head(ts.URL + "/foo/bar") + if err != nil { + t.Fatalf("err: %v", err) + } + resp.Body.Close() +} + +func TestClient_Post(t *testing.T) { + // Mock server which always responds 200. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("bad method: %s", r.Method) + } + if r.RequestURI != "/foo/bar" { + t.Fatalf("bad uri: %s", r.RequestURI) + } + if ct := r.Header.Get("Content-Type"); ct != "application/json" { + t.Fatalf("bad content-type: %s", ct) + } + + // Check the payload + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("err: %s", err) + } + expected := []byte(`{"hello":"world"}`) + if !bytes.Equal(body, expected) { + t.Fatalf("bad: %v", body) + } + + w.WriteHeader(200) + })) + defer ts.Close() + + // Make the request. + resp, err := NewClient().Post( + ts.URL+"/foo/bar", + "application/json", + strings.NewReader(`{"hello":"world"}`)) + if err != nil { + t.Fatalf("err: %v", err) + } + resp.Body.Close() +} + +func TestClient_PostForm(t *testing.T) { + // Mock server which always responds 200. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("bad method: %s", r.Method) + } + if r.RequestURI != "/foo/bar" { + t.Fatalf("bad uri: %s", r.RequestURI) + } + if ct := r.Header.Get("Content-Type"); ct != "application/x-www-form-urlencoded" { + t.Fatalf("bad content-type: %s", ct) + } + + // Check the payload + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("err: %s", err) + } + expected := []byte(`hello=world`) + if !bytes.Equal(body, expected) { + t.Fatalf("bad: %v", body) + } + + w.WriteHeader(200) + })) + defer ts.Close() + + // Create the form data. + form, err := url.ParseQuery("hello=world") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make the request. + resp, err := NewClient().PostForm(ts.URL+"/foo/bar", form) + if err != nil { + t.Fatalf("err: %v", err) + } + resp.Body.Close() +} + +func TestBackoff(t *testing.T) { + type tcase struct { + min time.Duration + max time.Duration + i int + expect time.Duration + } + cases := []tcase{ + { + time.Second, + 5 * time.Minute, + 0, + time.Second, + }, + { + time.Second, + 5 * time.Minute, + 1, + 2 * time.Second, + }, + { + time.Second, + 5 * time.Minute, + 2, + 4 * time.Second, + }, + { + time.Second, + 5 * time.Minute, + 3, + 8 * time.Second, + }, + { + time.Second, + 5 * time.Minute, + 63, + 5 * time.Minute, + }, + { + time.Second, + 5 * time.Minute, + 128, + 5 * time.Minute, + }, + } + + for _, tc := range cases { + if v := backoff(tc.min, tc.max, tc.i); v != tc.expect { + t.Fatalf("bad: %#v -> %s", tc, v) + } + } +} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml new file mode 100644 index 000000000..9f30eecd7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + +script: + - go test diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 000000000..1d50070f1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 000000000..091cfab38 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,156 @@ +package version + +import ( + "fmt" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k, _ := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + if v.LessThan(c) { + return false + } + + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-version/constraint_test.go b/vendor/github.com/hashicorp/go-version/constraint_test.go new file mode 100644 index 000000000..3abf70bbb --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint_test.go @@ -0,0 +1,103 @@ +package version + +import ( + "testing" +) + +func TestNewConstraint(t *testing.T) { + cases := []struct { + input string + count int + err bool + }{ + {">= 1.2", 1, false}, + {"1.0", 1, false}, + {">= 1.x", 0, true}, + {">= 1.2, < 1.0", 2, false}, + + // Out of bounds + {"11387778780781445675529500000000000000000", 0, true}, + } + + for _, tc := range cases { + v, err := NewConstraint(tc.input) + if tc.err && err == nil { + t.Fatalf("expected error for input: %s", tc.input) + } else if !tc.err && err != nil { + t.Fatalf("error for input %s: %s", tc.input, err) + } + + if len(v) != tc.count { + t.Fatalf("input: %s\nexpected len: %d\nactual: %d", + tc.input, tc.count, len(v)) + } + } +} + +func TestConstraintCheck(t *testing.T) { + cases := []struct { + constraint string + version string + check bool + }{ + {">= 1.0, < 1.2", "1.1.5", true}, + {"< 1.0, < 1.2", "1.1.5", false}, + {"= 1.0", "1.1.5", false}, + {"= 1.0", "1.0.0", true}, + {"1.0", "1.0.0", true}, + {"~> 1.0", "2.0", false}, + {"~> 1.0", "1.1", true}, + {"~> 1.0", "1.2.3", true}, + {"~> 1.0.0", "1.2.3", false}, + {"~> 1.0.0", "1.0.7", true}, + {"~> 1.0.0", "1.1.0", false}, + {"~> 1.0.7", "1.0.4", false}, + } + + for _, tc := range cases { + c, err := NewConstraint(tc.constraint) + if err != nil { + t.Fatalf("err: %s", err) + } + + v, err := NewVersion(tc.version) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := c.Check(v) + expected := tc.check + if actual != expected { + t.Fatalf("Version: %s\nConstraint: %s\nExpected: %#v", + tc.version, tc.constraint, expected) + } + } +} + +func TestConstraintsString(t *testing.T) { + cases := []struct { + constraint string + result string + }{ + {">= 1.0, < 1.2", ""}, + {"~> 1.0.7", ""}, + } + + for _, tc := range cases { + c, err := NewConstraint(tc.constraint) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := c.String() + expected := tc.result + if expected == "" { + expected = tc.constraint + } + + if actual != expected { + t.Fatalf("Constraint: %s\nExpected: %#v\nActual: %s", + tc.constraint, expected, actual) + } + } +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 000000000..d0e0b0c8f --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,251 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var versionRegexp *regexp.Regexp + +// The raw regular expression string used for testing the validity +// of a version. +const VersionRegexpRaw string = `([0-9]+(\.[0-9]+){0,2})` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `?` + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int + si int +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + matches := versionRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int, len(segmentsStr), 3) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int(val) + si += 1 + } + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + return &Version{ + metadata: matches[7], + pre: matches[4], + segments: segments, + si: si, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// or GreaterThan methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments() + segmentsOther := other.Segments() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Compare the segments + for i := 0; i < len(segmentsSelf); i++ { + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } else { + return 1 + } + } + + panic("should not be reached") +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + _, notIsNumeric := strconv.ParseInt(preOther, 10, 64) + if notIsNumeric == nil { + return -1 + } + return 1 + } + + if preOther == "" { + _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64) + if notIsNumeric == nil { + return 1 + } + return -1 + } + + if preSelf > preOther { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + return v.segments +} + +// String returns the full version string included pre-release +// and metadata information. +func (v *Version) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%d.%d.%d", v.segments[0], v.segments[1], v.segments[2]) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 000000000..cc888d43e --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection_test.go b/vendor/github.com/hashicorp/go-version/version_collection_test.go new file mode 100644 index 000000000..14783d7e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection_test.go @@ -0,0 +1,46 @@ +package version + +import ( + "reflect" + "sort" + "testing" +) + +func TestCollection(t *testing.T) { + versionsRaw := []string{ + "1.1.1", + "1.0", + "1.2", + "2", + "0.7.1", + } + + versions := make([]*Version, len(versionsRaw)) + for i, raw := range versionsRaw { + v, err := NewVersion(raw) + if err != nil { + t.Fatalf("err: %s", err) + } + + versions[i] = v + } + + sort.Sort(Collection(versions)) + + actual := make([]string, len(versions)) + for i, v := range versions { + actual[i] = v.String() + } + + expected := []string{ + "0.7.1", + "1.0.0", + "1.1.1", + "1.2.0", + "2.0.0", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/vendor/github.com/hashicorp/go-version/version_test.go b/vendor/github.com/hashicorp/go-version/version_test.go new file mode 100644 index 000000000..cc96a048e --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_test.go @@ -0,0 +1,208 @@ +package version + +import ( + "reflect" + "testing" +) + +func TestNewVersion(t *testing.T) { + cases := []struct { + version string + err bool + }{ + {"1.2.3", false}, + {"1.0", false}, + {"1", false}, + {"1.2.beta", true}, + {"foo", true}, + {"1.2-5", false}, + {"1.2-beta.5", false}, + {"\n1.2", true}, + {"1.2.0-x.Y.0+metadata", false}, + {"1.2.0-x.Y.0+metadata-width-hypen", false}, + {"1.2.3-rc1-with-hypen", false}, + {"1.2.3.4", true}, + } + + for _, tc := range cases { + _, err := NewVersion(tc.version) + if tc.err && err == nil { + t.Fatalf("expected error for version: %s", tc.version) + } else if !tc.err && err != nil { + t.Fatalf("error for version %s: %s", tc.version, err) + } + } +} + +func TestVersionCompare(t *testing.T) { + cases := []struct { + v1 string + v2 string + expected int + }{ + {"1.2.3", "1.4.5", -1}, + {"1.2-beta", "1.2-beta", 0}, + {"1.2", "1.1.4", 1}, + {"1.2", "1.2-beta", 1}, + {"1.2+foo", "1.2+beta", 0}, + } + + for _, tc := range cases { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Fatalf("err: %s", err) + } + + v2, err := NewVersion(tc.v2) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := v1.Compare(v2) + expected := tc.expected + if actual != expected { + t.Fatalf( + "%s <=> %s\nexpected: %d\nactual: %d", + tc.v1, tc.v2, + expected, actual) + } + } +} + +func TestComparePreReleases(t *testing.T) { + cases := []struct { + v1 string + v2 string + expected int + }{ + {"1.2-beta.2", "1.2-beta.2", 0}, + {"1.2-beta.1", "1.2-beta.2", -1}, + {"3.2-alpha.1", "3.2-alpha", 1}, + {"1.2-beta.2", "1.2-beta.1", 1}, + {"1.2-beta", "1.2-beta.3", -1}, + {"1.2-alpha", "1.2-beta.3", -1}, + {"1.2-beta", "1.2-alpha.3", 1}, + {"3.0-alpha.3", "3.0-rc.1", -1}, + {"3.0-alpha3", "3.0-rc1", -1}, + {"3.0-alpha.1", "3.0-alpha.beta", -1}, + {"5.4-alpha", "5.4-alpha.beta", 1}, + } + + for _, tc := range cases { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Fatalf("err: %s", err) + } + + v2, err := NewVersion(tc.v2) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := v1.Compare(v2) + expected := tc.expected + if actual != expected { + t.Fatalf( + "%s <=> %s\nexpected: %d\nactual: %d", + tc.v1, tc.v2, + expected, actual) + } + } +} + +func TestVersionMetadata(t *testing.T) { + cases := []struct { + version string + expected string + }{ + {"1.2.3", ""}, + {"1.2-beta", ""}, + {"1.2.0-x.Y.0", ""}, + {"1.2.0-x.Y.0+metadata", "metadata"}, + } + + for _, tc := range cases { + v, err := NewVersion(tc.version) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := v.Metadata() + expected := tc.expected + if actual != expected { + t.Fatalf("expected: %s\nactual: %s", expected, actual) + } + } +} + +func TestVersionPrerelease(t *testing.T) { + cases := []struct { + version string + expected string + }{ + {"1.2.3", ""}, + {"1.2-beta", "beta"}, + {"1.2.0-x.Y.0", "x.Y.0"}, + {"1.2.0-x.Y.0+metadata", "x.Y.0"}, + } + + for _, tc := range cases { + v, err := NewVersion(tc.version) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := v.Prerelease() + expected := tc.expected + if actual != expected { + t.Fatalf("expected: %s\nactual: %s", expected, actual) + } + } +} + +func TestVersionSegments(t *testing.T) { + cases := []struct { + version string + expected []int + }{ + {"1.2.3", []int{1, 2, 3}}, + {"1.2-beta", []int{1, 2, 0}}, + {"1-x.Y.0", []int{1, 0, 0}}, + {"1.2.0-x.Y.0+metadata", []int{1, 2, 0}}, + } + + for _, tc := range cases { + v, err := NewVersion(tc.version) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := v.Segments() + expected := tc.expected + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("expected: %#v\nactual: %#v", expected, actual) + } + } +} + +func TestVersionString(t *testing.T) { + cases := [][]string{ + {"1.2.3", "1.2.3"}, + {"1.2-beta", "1.2.0-beta"}, + {"1.2.0-x.Y.0", "1.2.0-x.Y.0"}, + {"1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"}, + } + + for _, tc := range cases { + v, err := NewVersion(tc[0]) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := v.String() + expected := tc[1] + if actual != expected { + t.Fatalf("expected: %s\nactual: %s", expected, actual) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 000000000..8ed84fe01 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,7 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 000000000..83dc540ef --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,3 @@ +sudo: false +language: go +go: 1.5 diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 000000000..ad404a811 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,17 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 000000000..fafa6ad03 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,94 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Numbers are assumed to be base 10. If you prefix a number with 0x, + it is treated as a hexadecimal. If it is prefixed with 0, it is + treated as an octal. Numbers can be in scientific notation: "1e10". + + * Boolean values: `true`, `false` + + * Arrays can be made by wrapping it in `[]`. Example: + `["foo", "bar", 42]`. Arrays can contain primitives + and other arrays, but cannot contain objects. Objects must + use the block syntax shown below. + +Objects and nested objects are created using the structure shown below: + +``` +variable "ami" { + description = "the AMI to use" +} +``` + +## Thanks + +Thanks to: + + * [@vstakhov](https://github.com/vstakhov) - The original libucl parser + and syntax that HCL was based off of. + + * [@fatih](https://github.com/fatih) - The rewritten HCL parser + in pure Go (no goyacc) and support for a printer. diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 000000000..b0c3fc6de --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,627 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(int(v))) + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(int(v))) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for fieldType, field := range fields { + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, field) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + field.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, field) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, field); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, field); err != nil { + return err + } + } + + decodedFields = append(decodedFields, fieldType.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go new file mode 100644 index 000000000..c02bb57fb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder_test.go @@ -0,0 +1,664 @@ +package hcl + +import ( + "io/ioutil" + "path/filepath" + "reflect" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" +) + +func TestDecode_interface(t *testing.T) { + cases := []struct { + File string + Err bool + Out interface{} + }{ + { + "basic.hcl", + false, + map[string]interface{}{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}", + }, + }, + { + "basic_squish.hcl", + false, + map[string]interface{}{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}", + "foo-bar": "baz", + }, + }, + { + "empty.hcl", + false, + map[string]interface{}{ + "resource": []map[string]interface{}{ + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{}, + }, + }, + }, + }, + }, + { + "tfvars.hcl", + false, + map[string]interface{}{ + "regularvar": "Should work", + "map.key1": "Value", + "map.key2": "Other value", + }, + }, + { + "escape.hcl", + false, + map[string]interface{}{ + "foo": "bar\"baz\\n", + }, + }, + { + "interpolate_escape.hcl", + false, + map[string]interface{}{ + "foo": "${file(\"bing/bong.txt\")}", + }, + }, + { + "float.hcl", + false, + map[string]interface{}{ + "a": 1.02, + }, + }, + { + "multiline_bad.hcl", + true, + nil, + }, + { + "multiline_no_marker.hcl", + true, + nil, + }, + { + "multiline.hcl", + false, + map[string]interface{}{"foo": "bar\nbaz\n"}, + }, + { + "multiline_no_eof.hcl", + false, + map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"}, + }, + { + "multiline.json", + false, + map[string]interface{}{"foo": "bar\nbaz"}, + }, + { + "scientific.json", + false, + map[string]interface{}{ + "a": 1e-10, + "b": 1e+10, + "c": 1e10, + "d": 1.2e-10, + "e": 1.2e+10, + "f": 1.2e10, + }, + }, + { + "scientific.hcl", + false, + map[string]interface{}{ + "a": 1e-10, + "b": 1e+10, + "c": 1e10, + "d": 1.2e-10, + "e": 1.2e+10, + "f": 1.2e10, + }, + }, + { + "terraform_heroku.hcl", + false, + map[string]interface{}{ + "name": "terraform-test-app", + "config_vars": []map[string]interface{}{ + map[string]interface{}{ + "FOO": "bar", + }, + }, + }, + }, + { + "structure_multi.hcl", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "baz": []map[string]interface{}{ + map[string]interface{}{"key": 7}, + }, + }, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{"key": 12}, + }, + }, + }, + }, + }, + { + "structure_multi.json", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "baz": []map[string]interface{}{ + map[string]interface{}{"key": 7}, + }, + }, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{"key": 12}, + }, + }, + }, + }, + }, + { + "structure_list.hcl", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "key": 7, + }, + map[string]interface{}{ + "key": 12, + }, + }, + }, + }, + { + "structure_list.json", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "key": 7, + }, + map[string]interface{}{ + "key": 12, + }, + }, + }, + }, + { + "structure_list_deep.json", + false, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "name": "terraform_example", + "ingress": []map[string]interface{}{ + map[string]interface{}{ + "from_port": 22, + }, + map[string]interface{}{ + "from_port": 80, + }, + }, + }, + }, + }, + }, + }, + }, + + { + "nested_block_comment.hcl", + false, + map[string]interface{}{ + "bar": "value", + }, + }, + + { + "unterminated_block_comment.hcl", + true, + nil, + }, + + { + "object_list.json", + false, + map[string]interface{}{ + "resource": []map[string]interface{}{ + map[string]interface{}{ + "aws_instance": []map[string]interface{}{ + map[string]interface{}{ + "db": []map[string]interface{}{ + map[string]interface{}{ + "vpc": "foo", + "provisioner": []map[string]interface{}{ + map[string]interface{}{ + "file": []map[string]interface{}{ + map[string]interface{}{ + "source": "foo", + "destination": "bar", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range cases { + t.Logf("Testing: %s", tc.File) + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File)) + if err != nil { + t.Fatalf("err: %s", err) + } + + var out interface{} + err = Decode(&out, string(d)) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.File, err) + } + + if !reflect.DeepEqual(out, tc.Out) { + t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) + } + } +} + +func TestDecode_equal(t *testing.T) { + cases := []struct { + One, Two string + }{ + { + "basic.hcl", + "basic.json", + }, + { + "float.hcl", + "float.json", + }, + /* + { + "structure.hcl", + "structure.json", + }, + */ + { + "structure.hcl", + "structure_flat.json", + }, + { + "terraform_heroku.hcl", + "terraform_heroku.json", + }, + } + + for _, tc := range cases { + p1 := filepath.Join(fixtureDir, tc.One) + p2 := filepath.Join(fixtureDir, tc.Two) + + d1, err := ioutil.ReadFile(p1) + if err != nil { + t.Fatalf("err: %s", err) + } + + d2, err := ioutil.ReadFile(p2) + if err != nil { + t.Fatalf("err: %s", err) + } + + var i1, i2 interface{} + err = Decode(&i1, string(d1)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = Decode(&i2, string(d2)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(i1, i2) { + t.Fatalf( + "%s != %s\n\n%#v\n\n%#v", + tc.One, tc.Two, + i1, i2) + } + } +} + +func TestDecode_flatMap(t *testing.T) { + var val map[string]map[string]string + + err := Decode(&val, testReadFile(t, "structure_flatmap.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]map[string]string{ + "foo": map[string]string{ + "foo": "bar", + "key": "7", + }, + } + + if !reflect.DeepEqual(val, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected) + } +} + +func TestDecode_structure(t *testing.T) { + type V struct { + Key int + Foo string + } + + var actual V + + err := Decode(&actual, testReadFile(t, "flat.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := V{ + Key: 7, + Foo: "bar", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) + } +} + +func TestDecode_structurePtr(t *testing.T) { + type V struct { + Key int + Foo string + } + + var actual *V + + err := Decode(&actual, testReadFile(t, "flat.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &V{ + Key: 7, + Foo: "bar", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) + } +} + +func TestDecode_structureArray(t *testing.T) { + // This test is extracted from a failure in Consul (consul.io), + // hence the interesting structure naming. + + type KeyPolicyType string + + type KeyPolicy struct { + Prefix string `hcl:",key"` + Policy KeyPolicyType + } + + type Policy struct { + Keys []KeyPolicy `hcl:"key,expand"` + } + + expected := Policy{ + Keys: []KeyPolicy{ + KeyPolicy{ + Prefix: "", + Policy: "read", + }, + KeyPolicy{ + Prefix: "foo/", + Policy: "write", + }, + KeyPolicy{ + Prefix: "foo/bar/", + Policy: "read", + }, + KeyPolicy{ + Prefix: "foo/bar/baz", + Policy: "deny", + }, + }, + } + + files := []string{ + "decode_policy.hcl", + "decode_policy.json", + } + + for _, f := range files { + var actual Policy + + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_sliceExpand(t *testing.T) { + type testInner struct { + Name string `hcl:",key"` + Key string + } + + type testStruct struct { + Services []testInner `hcl:"service,expand"` + } + + expected := testStruct{ + Services: []testInner{ + testInner{ + Name: "my-service-0", + Key: "value", + }, + testInner{ + Name: "my-service-1", + Key: "value", + }, + }, + } + + files := []string{ + "slice_expand.hcl", + } + + for _, f := range files { + t.Logf("Testing: %s", f) + + var actual testStruct + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_structureMap(t *testing.T) { + // This test is extracted from a failure in Terraform (terraform.io), + // hence the interesting structure naming. + + type hclVariable struct { + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + type rawConfig struct { + Variable map[string]hclVariable + } + + expected := rawConfig{ + Variable: map[string]hclVariable{ + "foo": hclVariable{ + Default: "bar", + Description: "bar", + Fields: []string{"Default", "Description"}, + }, + + "amis": hclVariable{ + Default: []map[string]interface{}{ + map[string]interface{}{ + "east": "foo", + }, + }, + Fields: []string{"Default"}, + }, + }, + } + + files := []string{ + "decode_tf_variable.hcl", + "decode_tf_variable.json", + } + + for _, f := range files { + t.Logf("Testing: %s", f) + + var actual rawConfig + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_interfaceNonPointer(t *testing.T) { + var value interface{} + err := Decode(value, testReadFile(t, "basic_int_string.hcl")) + if err == nil { + t.Fatal("should error") + } +} + +func TestDecode_intString(t *testing.T) { + var value struct { + Count int + } + + err := Decode(&value, testReadFile(t, "basic_int_string.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if value.Count != 3 { + t.Fatalf("bad: %#v", value.Count) + } +} + +func TestDecode_Node(t *testing.T) { + // given + var value struct { + Content ast.Node + Nested struct { + Content ast.Node + } + } + + content := ` +content { + hello = "world" +} +` + + // when + err := Decode(&value, content) + + // then + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + // verify ast.Node can be decoded later + var v map[string]interface{} + err = DecodeObject(&v, value.Content) + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + if v["hello"] != "world" { + t.Errorf("expected mapping to be returned") + } +} + +func TestDecode_NestedNode(t *testing.T) { + // given + var value struct { + Nested struct { + Content ast.Node + } + } + + content := ` +nested "content" { + hello = "world" +} +` + + // when + err := Decode(&value, content) + + // then + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + // verify ast.Node can be decoded later + var v map[string]interface{} + err = DecodeObject(&v, value.Nested.Content) + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + if v["hello"] != "world" { + t.Errorf("expected mapping to be returned") + } +} + diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 000000000..575a20b50 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 000000000..f8bb71a04 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,211 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // associated line comment, only when used in a list + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go new file mode 100644 index 000000000..942256cad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go @@ -0,0 +1,200 @@ +package ast + +import ( + "reflect" + "strings" + "testing" + + "github.com/hashicorp/hcl/hcl/token" +) + +func TestObjectListFilter(t *testing.T) { + var cases = []struct { + Filter []string + Input []*ObjectItem + Output []*ObjectItem + }{ + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{ + Token: token.Token{Type: token.STRING, Text: `"foo"`}, + }, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{}, + }, + }, + }, + + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + }, + }, + } + + for _, tc := range cases { + input := &ObjectList{Items: tc.Input} + expected := &ObjectList{Items: tc.Output} + if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) { + t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual) + } + } +} + +func TestWalk(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}}, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + order := []string{ + "*ast.ObjectList", + "*ast.ObjectItem", + "*ast.ObjectKey", + "*ast.ObjectKey", + "*ast.LiteralType", + "*ast.ObjectItem", + "*ast.ObjectKey", + } + count := 0 + + Walk(node, func(n Node) (Node, bool) { + if n == nil { + return n, false + } + + typeName := reflect.TypeOf(n).String() + if order[count] != typeName { + t.Errorf("expected '%s' got: '%s'", order[count], typeName) + } + count++ + return n, true + }) +} + +func TestWalkEquality(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + rewritten := Walk(node, func(n Node) (Node, bool) { return n, true }) + + newNode, ok := rewritten.(*ObjectList) + if !ok { + t.Fatalf("expected Objectlist, got %T", rewritten) + } + + if !reflect.DeepEqual(node, newNode) { + t.Fatal("rewritten node is not equal to the given node") + } + + if len(newNode.Items) != 2 { + t.Error("expected newNode length 2, got: %d", len(newNode.Items)) + } + + expected := []string{ + `"foo"`, + `"bar"`, + } + + for i, item := range newNode.Items { + if len(item.Keys) != 1 { + t.Error("expected keys newNode length 1, got: %d", len(item.Keys)) + } + + if item.Keys[0].Token.Text != expected[i] { + t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text) + } + + if item.Val != nil { + t.Errorf("expected item value should be nil") + } + } +} + +func TestWalkRewrite(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + suffix := "_example" + node = Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + i.Token.Text = i.Token.Text + suffix + n = i + } + return n, true + }).(*ObjectList) + + Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + if !strings.HasSuffix(i.Token.Text, suffix) { + t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix) + } + } + return n, true + }) + +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 000000000..ba07ad42b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 000000000..5c99381df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go new file mode 100644 index 000000000..32399fec5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go @@ -0,0 +1,9 @@ +package parser + +import ( + "testing" +) + +func TestPosError_impl(t *testing.T) { + var _ error = new(PosError) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 000000000..c951bf322 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,417 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + // object + return keys, nil + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + fmt.Println("illegal") + default: + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + if needComma { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA), + } + } + + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go new file mode 100644 index 000000000..f5cca64d0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go @@ -0,0 +1,323 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +func TestType(t *testing.T) { + var literals = []struct { + typ token.Type + src string + }{ + {token.STRING, `foo = "foo"`}, + {token.NUMBER, `foo = 123`}, + {token.FLOAT, `foo = 123.12`}, + {token.FLOAT, `foo = -123.12`}, + {token.BOOL, `foo = true`}, + {token.HEREDOC, "foo = < 0 { + commented = true + buf.WriteByte(newline) + } + + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + } + } + } + + if index == len(o.List.Items) { + p.prev = o.Rbrace + break + } + + // check if we have adjacent one liner items. If yes we'll going to align + // the comments. + var aligned []*ast.ObjectItem + for _, item := range o.List.Items[index:] { + // we don't group one line lists + if len(o.List.Items) == 1 { + break + } + + // one means a oneliner with out any lead comment + // two means a oneliner with lead comment + // anything else might be something else + cur := lines(string(p.objectItem(item))) + if cur > 2 { + break + } + + curPos := item.Pos() + + nextPos := token.Pos{} + if index != len(o.List.Items)-1 { + nextPos = o.List.Items[index+1].Pos() + } + + prevPos := token.Pos{} + if index != 0 { + prevPos = o.List.Items[index-1].Pos() + } + + // fmt.Println("DEBUG ----------------") + // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) + // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) + // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) + + if curPos.Line+1 == nextPos.Line { + aligned = append(aligned, item) + index++ + continue + } + + if curPos.Line-1 == prevPos.Line { + aligned = append(aligned, item) + index++ + + // finish if we have a new line or comment next. This happens + // if the next item is not adjacent + if curPos.Line+1 != nextPos.Line { + break + } + continue + } + + break + } + + // put newlines if the items are between other non aligned items. + // newlines are also added if there is a standalone comment already, so + // check it too + if !commented && index != len(aligned) { + buf.WriteByte(newline) + } + + if len(aligned) >= 1 { + p.prev = aligned[len(aligned)-1].Pos() + + items := p.alignedItems(aligned) + buf.Write(p.indent(items)) + } else { + p.prev = o.List.Items[index].Pos() + + buf.Write(p.indent(p.objectItem(o.List.Items[index]))) + index++ + } + + buf.WriteByte(newline) + } + + buf.WriteString("}") + return buf.Bytes() +} + +func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { + var buf bytes.Buffer + + // find the longest key and value length, needed for alignment + var longestKeyLen int // longest key length + var longestValLen int // longest value length + for _, item := range items { + key := len(item.Keys[0].Token.Text) + val := len(p.output(item.Val)) + + if key > longestKeyLen { + longestKeyLen = key + } + + if val > longestValLen { + longestValLen = val + } + } + + for i, item := range items { + if item.LeadComment != nil { + for _, comment := range item.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range item.Keys { + keyLen := len(k.Token.Text) + buf.WriteString(k.Token.Text) + for i := 0; i < longestKeyLen-keyLen+1; i++ { + buf.WriteByte(blank) + } + + // reach end of key + if i == len(item.Keys)-1 && len(item.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + val := p.output(item.Val) + valLen := len(val) + buf.Write(val) + + if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { + for i := 0; i < longestValLen-valLen+1; i++ { + buf.WriteByte(blank) + } + + for _, comment := range item.LineComment.List { + buf.WriteString(comment.Text) + } + } + + // do not print for the last item + if i != len(items)-1 { + buf.WriteByte(newline) + } + } + + return buf.Bytes() +} + +// list returns the printable HCL form of an list type. +func (p *printer) list(l *ast.ListType) []byte { + var buf bytes.Buffer + buf.WriteString("[") + + var longestLine int + for _, item := range l.List { + // for now we assume that the list only contains literal types + if lit, ok := item.(*ast.LiteralType); ok { + lineLen := len(lit.Token.Text) + if lineLen > longestLine { + longestLine = lineLen + } + } + } + + for i, item := range l.List { + if item.Pos().Line != l.Lbrack.Line { + // multiline list, add newline before we add each item + buf.WriteByte(newline) + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) + buf.WriteString(",") + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + if i != len(l.List)-1 { + if lit, ok := l.List[i+1].(*ast.LiteralType); ok && lit.LineComment != nil { + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + } + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + + if i == len(l.List)-1 { + buf.WriteByte(newline) + } + } else { + buf.Write(p.output(item)) + if i != len(l.List)-1 { + buf.WriteString(",") + buf.WriteByte(blank) + } + } + + } + + buf.WriteString("]") + return buf.Bytes() +} + +// indent indents the lines of the given buffer for each non-empty line +func (p *printer) indent(buf []byte) []byte { + var prefix []byte + if p.cfg.SpacesWidth != 0 { + for i := 0; i < p.cfg.SpacesWidth; i++ { + prefix = append(prefix, blank) + } + } else { + prefix = []byte{tab} + } + + var res []byte + bol := true + for _, c := range buf { + if bol && c != '\n' { + res = append(res, prefix...) + } + + res = append(res, c) + bol = c == '\n' + } + return res +} + +// unindent removes all the indentation from the tombstoned lines +func (p *printer) unindent(buf []byte) []byte { + var res []byte + for i := 0; i < len(buf); i++ { + skip := len(buf)-i <= len(unindent) + if !skip { + skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) + } + if skip { + res = append(res, buf[i]) + continue + } + + // We have a marker. we have to backtrace here and clean out + // any whitespace ahead of our tombstone up to a \n + for j := len(res) - 1; j >= 0; j-- { + if res[j] == '\n' { + break + } + + res = res[:j] + } + + // Skip the entire unindent marker + i += len(unindent) - 1 + } + + return res +} + +// heredocIndent marks all the 2nd and further lines as unindentable +func (p *printer) heredocIndent(buf []byte) []byte { + var res []byte + bol := false + for _, c := range buf { + if bol && c != '\n' { + res = append(res, unindent...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +func lines(txt string) int { + endline := 1 + for i := 0; i < len(txt); i++ { + if txt[i] == '\n' { + endline++ + } + } + return endline +} + +// ---------------------------------------------------------------------------- +// Tracing support + +func (p *printer) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + i := 2 * p.indentTrace + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *printer, msg string) *printer { + p.printTrace(msg, "(") + p.indentTrace++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *printer) { + p.indentTrace-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go new file mode 100644 index 000000000..fb9df58d4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -0,0 +1,64 @@ +// Package printer implements printing of AST nodes to HCL format. +package printer + +import ( + "bytes" + "io" + "text/tabwriter" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" +) + +var DefaultConfig = Config{ + SpacesWidth: 2, +} + +// A Config node controls the output of Fprint. +type Config struct { + SpacesWidth int // if set, it will use spaces instead of tabs for alignment +} + +func (c *Config) Fprint(output io.Writer, node ast.Node) error { + p := &printer{ + cfg: *c, + comments: make([]*ast.CommentGroup, 0), + standaloneComments: make([]*ast.CommentGroup, 0), + // enableTrace: true, + } + + p.collectComments(node) + + if _, err := output.Write(p.unindent(p.output(node))); err != nil { + return err + } + + // flush tabwriter, if any + var err error + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return err +} + +// Fprint "pretty-prints" an HCL node to output +// It calls Config.Fprint with default settings. +func Fprint(output io.Writer, node ast.Node) error { + return DefaultConfig.Fprint(output, node) +} + +// Format formats src HCL and returns the result. +func Format(src []byte) ([]byte, error) { + node, err := parser.Parse(src) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if err := DefaultConfig.Fprint(&buf, node); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go new file mode 100644 index 000000000..86aa946b2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go @@ -0,0 +1,143 @@ +package printer + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/hashicorp/hcl/hcl/parser" +) + +var update = flag.Bool("update", false, "update golden files") + +const ( + dataDir = "testdata" +) + +type entry struct { + source, golden string +} + +// Use go test -update to create/update the respective golden files. +var data = []entry{ + {"complexhcl.input", "complexhcl.golden"}, + {"list.input", "list.golden"}, + {"comment.input", "comment.golden"}, + {"comment_aligned.input", "comment_aligned.golden"}, + {"comment_standalone.input", "comment_standalone.golden"}, +} + +func TestFiles(t *testing.T) { + for _, e := range data { + source := filepath.Join(dataDir, e.source) + golden := filepath.Join(dataDir, e.golden) + check(t, source, golden) + } +} + +func check(t *testing.T, source, golden string) { + src, err := ioutil.ReadFile(source) + if err != nil { + t.Error(err) + return + } + + res, err := format(src) + if err != nil { + t.Error(err) + return + } + + // update golden files if necessary + if *update { + if err := ioutil.WriteFile(golden, res, 0644); err != nil { + t.Error(err) + } + return + } + + // get golden + gld, err := ioutil.ReadFile(golden) + if err != nil { + t.Error(err) + return + } + + // formatted source and golden must be the same + if err := diff(source, golden, res, gld); err != nil { + t.Error(err) + return + } +} + +// diff compares a and b. +func diff(aname, bname string, a, b []byte) error { + var buf bytes.Buffer // holding long error message + + // compare lengths + if len(a) != len(b) { + fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b)) + } + + // compare contents + line := 1 + offs := 1 + for i := 0; i < len(a) && i < len(b); i++ { + ch := a[i] + if ch != b[i] { + fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs)) + fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs)) + fmt.Fprintf(&buf, "\n\n") + break + } + if ch == '\n' { + line++ + offs = i + 1 + } + } + + if buf.Len() > 0 { + return errors.New(buf.String()) + } + return nil +} + +// format parses src, prints the corresponding AST, verifies the resulting +// src is syntactically correct, and returns the resulting src or an error +// if any. +func format(src []byte) ([]byte, error) { + // parse src + node, err := parser.Parse(src) + if err != nil { + return nil, fmt.Errorf("parse: %s\n%s", err, src) + } + + var buf bytes.Buffer + + cfg := &Config{} + if err := cfg.Fprint(&buf, node); err != nil { + return nil, fmt.Errorf("print: %s", err) + } + + // make sure formatted output is syntactically correct + res := buf.Bytes() + + if _, err := parser.Parse(src); err != nil { + return nil, fmt.Errorf("parse: %s\n%s", err, src) + } + + return res, nil +} + +// lineAt returns the line in text starting at offset offs. +func lineAt(text []byte, offs int) []byte { + i := offs + for i < len(text) && text[i] != '\n' { + i++ + } + return text[offs:i] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden new file mode 100644 index 000000000..e86215f53 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden @@ -0,0 +1,36 @@ +// A standalone comment is a comment which is not attached to any kind of node + +// This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = ["fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1, 2] // another line here + +# Another comment +variable = { + description = "bar" # another yooo + + foo { + # Nested standalone + + bar = "fatih" + } +} + +// lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 \ No newline at end of file diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input new file mode 100644 index 000000000..57c37ac1d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input @@ -0,0 +1,37 @@ +// A standalone comment is a comment which is not attached to any kind of node + + // This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = [ "fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1,2] // another line here + + # Another comment +variable = { + description = "bar" # another yooo + foo { + # Nested standalone + + bar = "fatih" + } +} + + // lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 + diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden new file mode 100644 index 000000000..5d0e8d695 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden @@ -0,0 +1,25 @@ +aligned { + # We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + + default = { + bar = "example" + } + + #deneme arslan + fatih = ["fatih"] # yoo4 + + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + + default = { + bar = "example" + } + + security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 + ] +} \ No newline at end of file diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input new file mode 100644 index 000000000..1c6ba0de7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input @@ -0,0 +1,21 @@ +aligned { +# We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + default = { + bar = "example" + } + #deneme arslan + fatih = ["fatih"] # yoo4 + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + default = { + bar = "example" + } + +security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 +] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden new file mode 100644 index 000000000..962dbf2b3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden @@ -0,0 +1,16 @@ +// A standalone comment + +aligned { + # Standalone 1 + + a = "bar" # yoo1 + default = "bar" # yoo2 + + # Standalone 2 +} + +# Standalone 3 + +numbers = [1, 2] // another line here + +# Standalone 4 diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input new file mode 100644 index 000000000..4436cb16c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input @@ -0,0 +1,16 @@ +// A standalone comment + +aligned { + # Standalone 1 + + a = "bar" # yoo1 + default = "bar" # yoo2 + + # Standalone 2 +} + + # Standalone 3 + +numbers = [1,2] // another line here + + # Standalone 4 diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden new file mode 100644 index 000000000..b733a27e4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden @@ -0,0 +1,54 @@ +variable "foo" { + default = "bar" + description = "bar" +} + +developer = ["fatih", "arslan"] + +provider "aws" { + access_key = "foo" + secret_key = "bar" +} + +provider "do" { + api_key = "${var.foo}" +} + +resource "aws_security_group" "firewall" { + count = 5 +} + +resource aws_instance "web" { + ami = "${var.foo}" + + security_groups = [ + "foo", + "${aws_security_group.firewall.foo}", + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } + + network_interface = { + device_index = 1 + + description = < 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string. +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '< 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go new file mode 100644 index 000000000..b497a9fa1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go @@ -0,0 +1,535 @@ +package scanner + +import ( + "bytes" + "fmt" + "testing" + + "github.com/hashicorp/hcl/hcl/token" + "strings" +) + +var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + +type tokenPair struct { + tok token.Type + text string +} + +var tokenLists = map[string][]tokenPair{ + "comment": []tokenPair{ + {token.COMMENT, "//"}, + {token.COMMENT, "////"}, + {token.COMMENT, "// comment"}, + {token.COMMENT, "// /* comment */"}, + {token.COMMENT, "// // comment //"}, + {token.COMMENT, "//" + f100}, + {token.COMMENT, "#"}, + {token.COMMENT, "##"}, + {token.COMMENT, "# comment"}, + {token.COMMENT, "# /* comment */"}, + {token.COMMENT, "# # comment #"}, + {token.COMMENT, "#" + f100}, + {token.COMMENT, "/**/"}, + {token.COMMENT, "/***/"}, + {token.COMMENT, "/* comment */"}, + {token.COMMENT, "/* // comment */"}, + {token.COMMENT, "/* /* comment */"}, + {token.COMMENT, "/*\n comment\n*/"}, + {token.COMMENT, "/*" + f100 + "*/"}, + }, + "operator": []tokenPair{ + {token.LBRACK, "["}, + {token.LBRACE, "{"}, + {token.COMMA, ","}, + {token.PERIOD, "."}, + {token.RBRACK, "]"}, + {token.RBRACE, "}"}, + {token.ASSIGN, "="}, + {token.ADD, "+"}, + {token.SUB, "-"}, + }, + "bool": []tokenPair{ + {token.BOOL, "true"}, + {token.BOOL, "false"}, + }, + "ident": []tokenPair{ + {token.IDENT, "a"}, + {token.IDENT, "a0"}, + {token.IDENT, "foobar"}, + {token.IDENT, "foo-bar"}, + {token.IDENT, "abc123"}, + {token.IDENT, "LGTM"}, + {token.IDENT, "_"}, + {token.IDENT, "_abc123"}, + {token.IDENT, "abc123_"}, + {token.IDENT, "_abc_123_"}, + {token.IDENT, "_äöü"}, + {token.IDENT, "_本"}, + {token.IDENT, "äöü"}, + {token.IDENT, "本"}, + {token.IDENT, "aÛ°Û±Û¸"}, + {token.IDENT, "foo६४"}, + {token.IDENT, "bar9876"}, + }, + "heredoc": []tokenPair{ + {token.HEREDOC, "< 0 for %q", s.ErrorCount, src) + } +} + +func testTokenList(t *testing.T, tokenList []tokenPair) { + // create artifical source code + buf := new(bytes.Buffer) + for _, ident := range tokenList { + fmt.Fprintf(buf, "%s\n", ident.text) + } + + s := New(buf.Bytes()) + for _, ident := range tokenList { + tok := s.Scan() + if tok.Type != ident.tok { + t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) + } + + if tok.Text != ident.text { + t.Errorf("text = %q want %q", tok.String(), ident.text) + } + + } +} + +func countNewlines(s string) int { + n := 0 + for _, ch := range s { + if ch == '\n' { + n++ + } + } + return n +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 000000000..e87ac6356 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,245 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section, except for escaped quotes, which we handle specifically. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + // We special case escaped double quotes in interpolations, converting + // them to straight double quotes. + if r == '\\' { + if q, _ := utf8.DecodeRuneInString(s); q == '"' { + continue + } + } + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go new file mode 100644 index 000000000..4a810aa38 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go @@ -0,0 +1,93 @@ +package strconv + +import "testing" + +type quoteTest struct { + in string + out string + ascii string +} + +var quotetests = []quoteTest{ + {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`}, + {"\\", `"\\"`, `"\\"`}, + {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`}, + {"\u263a", `"☺"`, `"\u263a"`}, + {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`}, + {"\x04", `"\x04"`, `"\x04"`}, +} + +type unQuoteTest struct { + in string + out string +} + +var unquotetests = []unQuoteTest{ + {`""`, ""}, + {`"a"`, "a"}, + {`"abc"`, "abc"}, + {`"☺"`, "☺"}, + {`"hello world"`, "hello world"}, + {`"\xFF"`, "\xFF"}, + {`"\377"`, "\377"}, + {`"\u1234"`, "\u1234"}, + {`"\U00010111"`, "\U00010111"}, + {`"\U0001011111"`, "\U0001011111"}, + {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""}, + {`"'"`, "'"}, + {`"${file("foo")}"`, `${file("foo")}`}, + {`"${file(\"foo\")}"`, `${file("foo")}`}, + {`"echo ${var.region}${element(split(",",var.zones),0)}"`, + `echo ${var.region}${element(split(",",var.zones),0)}`}, +} + +var misquoted = []string{ + ``, + `"`, + `"a`, + `"'`, + `b"`, + `"\"`, + `"\9"`, + `"\19"`, + `"\129"`, + `'\'`, + `'\9'`, + `'\19'`, + `'\129'`, + `'ab'`, + `"\x1!"`, + `"\U12345678"`, + `"\z"`, + "`", + "`xxx", + "`\"", + `"\'"`, + `'\"'`, + "\"\n\"", + "\"\\n\n\"", + "'\n'", + `"${"`, + `"${foo{}"`, +} + +func TestUnquote(t *testing.T) { + for _, tt := range unquotetests { + if out, err := Unquote(tt.in); err != nil || out != tt.out { + t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out) + } + } + + // run the quote tests too, backward + for _, tt := range quotetests { + if in, err := Unquote(tt.out); in != tt.in { + t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in) + } + } + + for _, s := range misquoted { + if out, err := Unquote(s); out != "" || err != ErrSyntax { + t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl new file mode 100644 index 000000000..78c267582 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl @@ -0,0 +1,4 @@ +foo = [ + "1", + "2", # comment +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl new file mode 100644 index 000000000..eb5a99a69 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl @@ -0,0 +1,6 @@ +resource = [{ + "foo": { + "bar": {}, + "baz": [1, 2, "foo"], + } +}] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_deep.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_deep.hcl new file mode 100644 index 000000000..dd3151cb7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_deep.hcl @@ -0,0 +1,5 @@ +resource = [{ + foo = [{ + bar = {} + }] +}] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl new file mode 100644 index 000000000..1ff7f29fd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl @@ -0,0 +1,15 @@ +// Foo + +/* Bar */ + +/* +/* +Baz +*/ + +# Another + +# Multiple +# Lines + +foo = "bar" diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl new file mode 100644 index 000000000..fec56017d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl @@ -0,0 +1 @@ +# Hello diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl new file mode 100644 index 000000000..cccb5b06f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl @@ -0,0 +1,42 @@ +// This comes from Terraform, as a test +variable "foo" { + default = "bar" + description = "bar" +} + +provider "aws" { + access_key = "foo" + secret_key = "bar" +} + +provider "do" { + api_key = "${var.foo}" +} + +resource "aws_security_group" "firewall" { + count = 5 +} + +resource aws_instance "web" { + ami = "${var.foo}" + security_groups = [ + "foo", + "${aws_security_group.firewall.foo}" + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } +} + +resource "aws_instance" "db" { + security_groups = "${aws_security_group.firewall.*.id}" + VPC = "foo" + + depends_on = ["aws_instance.web"] +} + +output "web_ip" { + value = "${aws_instance.web.private_ip}" +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl new file mode 100644 index 000000000..0007aaf5f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl @@ -0,0 +1 @@ +foo.bar = "baz" diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl new file mode 100644 index 000000000..059d4ce65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl @@ -0,0 +1 @@ +foo = [1, 2, "foo"] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl new file mode 100644 index 000000000..50f4218ac --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl @@ -0,0 +1 @@ +foo = [1, 2, "foo",] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl new file mode 100644 index 000000000..029c54b0c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl @@ -0,0 +1,2 @@ +foo = "bar" +key = 7 diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl new file mode 100644 index 000000000..e9f77cae9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl @@ -0,0 +1,3 @@ +default = { + "eu-west-1": "ami-b1cf19c6", +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl new file mode 100644 index 000000000..92592fbb3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl @@ -0,0 +1,5 @@ +// This is a test structure for the lexer +foo bar "baz" { + key = 7 + foo = "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl new file mode 100644 index 000000000..7229a1f01 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl @@ -0,0 +1,5 @@ +foo { + value = 7 + "value" = 8 + "complex::value" = 9 +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl new file mode 100644 index 000000000..4d156ddea --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl @@ -0,0 +1 @@ +resource "foo" "bar" {} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl new file mode 100644 index 000000000..cf2747ea1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl @@ -0,0 +1,7 @@ +foo = "bar" +bar = 7 +baz = [1,2,3] +foo = -12 +bar = 3.14159 +foo = true +bar = false diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 000000000..696ee8da4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,170 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 000000000..65d56c9b8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,297 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // Done + return keys, nil + case token.ILLEGAL: + fmt.Println("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go new file mode 100644 index 000000000..8c66fb9ca --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go @@ -0,0 +1,338 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +func TestType(t *testing.T) { + var literals = []struct { + typ token.Type + src string + }{ + {token.STRING, `"foo": "bar"`}, + {token.NUMBER, `"foo": 123`}, + {token.FLOAT, `"foo": 123.12`}, + {token.FLOAT, `"foo": -123.12`}, + {token.BOOL, `"foo": true`}, + {token.STRING, `"foo": null`}, + } + + for _, l := range literals { + t.Logf("Testing: %s", l.src) + + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + lit, ok := item.Val.(*ast.LiteralType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + if lit.Token.Type != l.typ { + t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type) + } + } +} + +func TestListType(t *testing.T) { + var literals = []struct { + src string + tokens []token.Type + }{ + { + `"foo": ["123", 123]`, + []token.Type{token.STRING, token.NUMBER}, + }, + { + `"foo": [123, "123",]`, + []token.Type{token.NUMBER, token.STRING}, + }, + { + `"foo": []`, + []token.Type{}, + }, + { + `"foo": ["123", 123]`, + []token.Type{token.STRING, token.NUMBER}, + }, + { + `"foo": ["123", {}]`, + []token.Type{token.STRING, token.LBRACE}, + }, + } + + for _, l := range literals { + t.Logf("Testing: %s", l.src) + + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + list, ok := item.Val.(*ast.ListType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + tokens := []token.Type{} + for _, li := range list.List { + switch v := li.(type) { + case *ast.LiteralType: + tokens = append(tokens, v.Token.Type) + case *ast.ObjectType: + tokens = append(tokens, token.LBRACE) + } + } + + equals(t, l.tokens, tokens) + } +} + +func TestObjectType(t *testing.T) { + var literals = []struct { + src string + nodeType []ast.Node + itemLen int + }{ + { + `"foo": {}`, + nil, + 0, + }, + { + `"foo": { + "bar": "fatih" + }`, + []ast.Node{&ast.LiteralType{}}, + 1, + }, + { + `"foo": { + "bar": "fatih", + "baz": ["arslan"] + }`, + []ast.Node{ + &ast.LiteralType{}, + &ast.ListType{}, + }, + 2, + }, + { + `"foo": { + "bar": {} + }`, + []ast.Node{ + &ast.ObjectType{}, + }, + 1, + }, + { + `"foo": { + "bar": {}, + "foo": true + }`, + []ast.Node{ + &ast.ObjectType{}, + &ast.LiteralType{}, + }, + 2, + }, + } + + for _, l := range literals { + t.Logf("Testing:\n%s\n", l.src) + + p := newParser([]byte(l.src)) + // p.enableTrace = true + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + // we know that the ObjectKey name is foo for all cases, what matters + // is the object + obj, ok := item.Val.(*ast.ObjectType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + // check if the total length of items are correct + equals(t, l.itemLen, len(obj.List.Items)) + + // check if the types are correct + for i, item := range obj.List.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + } + } +} + +func TestFlattenObjects(t *testing.T) { + var literals = []struct { + src string + nodeType []ast.Node + itemLen int + }{ + { + `{ + "foo": [ + { + "foo": "svh", + "bar": "fatih" + } + ] + }`, + []ast.Node{ + &ast.ObjectType{}, + &ast.LiteralType{}, + &ast.LiteralType{}, + }, + 3, + }, + { + `{ + "variable": { + "foo": {} + } + }`, + []ast.Node{ + &ast.ObjectType{}, + }, + 1, + }, + } + + for _, l := range literals { + t.Logf("Testing:\n%s\n", l.src) + + f, err := Parse([]byte(l.src)) + if err != nil { + t.Error(err) + } + + // the first object is always an ObjectList so just assert that one + // so we can use it as such + obj, ok := f.Node.(*ast.ObjectList) + if !ok { + t.Errorf("node should be *ast.ObjectList, got: %T", f.Node) + } + + // check if the types are correct + var i int + for _, item := range obj.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + i++ + + if obj, ok := item.Val.(*ast.ObjectType); ok { + for _, item := range obj.List.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + i++ + } + } + } + + // check if the number of items is correct + equals(t, l.itemLen, i) + + } +} + +func TestObjectKey(t *testing.T) { + keys := []struct { + exp []token.Type + src string + }{ + {[]token.Type{token.STRING}, `"foo": {}`}, + } + + for _, k := range keys { + p := newParser([]byte(k.src)) + keys, err := p.objectKey() + if err != nil { + t.Fatal(err) + } + + tokens := []token.Type{} + for _, o := range keys { + tokens = append(tokens, o.Token.Type) + } + + equals(t, k.exp, tokens) + } + + errKeys := []struct { + src string + }{ + {`foo 12 {}`}, + {`foo bar = {}`}, + {`foo []`}, + {`12 {}`}, + } + + for _, k := range errKeys { + p := newParser([]byte(k.src)) + _, err := p.objectKey() + if err == nil { + t.Errorf("case '%s' should give an error", k.src) + } + } +} + +// Official HCL tests +func TestParse(t *testing.T) { + cases := []struct { + Name string + Err bool + }{ + { + "array.json", + false, + }, + { + "basic.json", + false, + }, + { + "object.json", + false, + }, + { + "types.json", + false, + }, + } + + const fixtureDir = "./test-fixtures" + + for _, tc := range cases { + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name)) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = Parse(d) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.Name, err) + } + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json new file mode 100644 index 000000000..e320f17ab --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json @@ -0,0 +1,4 @@ +{ + "foo": [1, 2, "bar"], + "bar": "baz" +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json new file mode 100644 index 000000000..b54bde96c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json @@ -0,0 +1,3 @@ +{ + "foo": "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json new file mode 100644 index 000000000..72168a3cc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json @@ -0,0 +1,5 @@ +{ + "foo": { + "bar": [1,2] + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json new file mode 100644 index 000000000..9a142a6ca --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json @@ -0,0 +1,10 @@ +{ + "foo": "bar", + "bar": 7, + "baz": [1,2,3], + "foo": -12, + "bar": 3.14159, + "foo": true, + "bar": false, + "foo": null +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 000000000..477f71ff3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go new file mode 100644 index 000000000..fe2d75524 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go @@ -0,0 +1,363 @@ +package scanner + +import ( + "bytes" + "fmt" + "testing" + + "github.com/hashicorp/hcl/json/token" +) + +var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + +type tokenPair struct { + tok token.Type + text string +} + +var tokenLists = map[string][]tokenPair{ + "operator": []tokenPair{ + {token.LBRACK, "["}, + {token.LBRACE, "{"}, + {token.COMMA, ","}, + {token.PERIOD, "."}, + {token.RBRACK, "]"}, + {token.RBRACE, "}"}, + }, + "bool": []tokenPair{ + {token.BOOL, "true"}, + {token.BOOL, "false"}, + }, + "string": []tokenPair{ + {token.STRING, `" "`}, + {token.STRING, `"a"`}, + {token.STRING, `"本"`}, + {token.STRING, `"${file("foo")}"`}, + {token.STRING, `"${file(\"foo\")}"`}, + {token.STRING, `"\a"`}, + {token.STRING, `"\b"`}, + {token.STRING, `"\f"`}, + {token.STRING, `"\n"`}, + {token.STRING, `"\r"`}, + {token.STRING, `"\t"`}, + {token.STRING, `"\v"`}, + {token.STRING, `"\""`}, + {token.STRING, `"\000"`}, + {token.STRING, `"\777"`}, + {token.STRING, `"\x00"`}, + {token.STRING, `"\xff"`}, + {token.STRING, `"\u0000"`}, + {token.STRING, `"\ufA16"`}, + {token.STRING, `"\U00000000"`}, + {token.STRING, `"\U0000ffAB"`}, + {token.STRING, `"` + f100 + `"`}, + }, + "number": []tokenPair{ + {token.NUMBER, "0"}, + {token.NUMBER, "1"}, + {token.NUMBER, "9"}, + {token.NUMBER, "42"}, + {token.NUMBER, "1234567890"}, + {token.NUMBER, "-0"}, + {token.NUMBER, "-1"}, + {token.NUMBER, "-9"}, + {token.NUMBER, "-42"}, + {token.NUMBER, "-1234567890"}, + }, + "float": []tokenPair{ + {token.FLOAT, "0."}, + {token.FLOAT, "1."}, + {token.FLOAT, "42."}, + {token.FLOAT, "01234567890."}, + {token.FLOAT, ".0"}, + {token.FLOAT, ".1"}, + {token.FLOAT, ".42"}, + {token.FLOAT, ".0123456789"}, + {token.FLOAT, "0.0"}, + {token.FLOAT, "1.0"}, + {token.FLOAT, "42.0"}, + {token.FLOAT, "01234567890.0"}, + {token.FLOAT, "0e0"}, + {token.FLOAT, "1e0"}, + {token.FLOAT, "42e0"}, + {token.FLOAT, "01234567890e0"}, + {token.FLOAT, "0E0"}, + {token.FLOAT, "1E0"}, + {token.FLOAT, "42E0"}, + {token.FLOAT, "01234567890E0"}, + {token.FLOAT, "0e+10"}, + {token.FLOAT, "1e-10"}, + {token.FLOAT, "42e+10"}, + {token.FLOAT, "01234567890e-10"}, + {token.FLOAT, "0E+10"}, + {token.FLOAT, "1E-10"}, + {token.FLOAT, "42E+10"}, + {token.FLOAT, "01234567890E-10"}, + {token.FLOAT, "01.8e0"}, + {token.FLOAT, "1.4e0"}, + {token.FLOAT, "42.2e0"}, + {token.FLOAT, "01234567890.12e0"}, + {token.FLOAT, "0.E0"}, + {token.FLOAT, "1.12E0"}, + {token.FLOAT, "42.123E0"}, + {token.FLOAT, "01234567890.213E0"}, + {token.FLOAT, "0.2e+10"}, + {token.FLOAT, "1.2e-10"}, + {token.FLOAT, "42.54e+10"}, + {token.FLOAT, "01234567890.98e-10"}, + {token.FLOAT, "0.1E+10"}, + {token.FLOAT, "1.1E-10"}, + {token.FLOAT, "42.1E+10"}, + {token.FLOAT, "01234567890.1E-10"}, + {token.FLOAT, "-0.0"}, + {token.FLOAT, "-1.0"}, + {token.FLOAT, "-42.0"}, + {token.FLOAT, "-01234567890.0"}, + {token.FLOAT, "-0e0"}, + {token.FLOAT, "-1e0"}, + {token.FLOAT, "-42e0"}, + {token.FLOAT, "-01234567890e0"}, + {token.FLOAT, "-0E0"}, + {token.FLOAT, "-1E0"}, + {token.FLOAT, "-42E0"}, + {token.FLOAT, "-01234567890E0"}, + {token.FLOAT, "-0e+10"}, + {token.FLOAT, "-1e-10"}, + {token.FLOAT, "-42e+10"}, + {token.FLOAT, "-01234567890e-10"}, + {token.FLOAT, "-0E+10"}, + {token.FLOAT, "-1E-10"}, + {token.FLOAT, "-42E+10"}, + {token.FLOAT, "-01234567890E-10"}, + {token.FLOAT, "-01.8e0"}, + {token.FLOAT, "-1.4e0"}, + {token.FLOAT, "-42.2e0"}, + {token.FLOAT, "-01234567890.12e0"}, + {token.FLOAT, "-0.E0"}, + {token.FLOAT, "-1.12E0"}, + {token.FLOAT, "-42.123E0"}, + {token.FLOAT, "-01234567890.213E0"}, + {token.FLOAT, "-0.2e+10"}, + {token.FLOAT, "-1.2e-10"}, + {token.FLOAT, "-42.54e+10"}, + {token.FLOAT, "-01234567890.98e-10"}, + {token.FLOAT, "-0.1E+10"}, + {token.FLOAT, "-1.1E-10"}, + {token.FLOAT, "-42.1E+10"}, + {token.FLOAT, "-01234567890.1E-10"}, + }, +} + +var orderedTokenLists = []string{ + "comment", + "operator", + "bool", + "string", + "number", + "float", +} + +func TestPosition(t *testing.T) { + // create artifical source code + buf := new(bytes.Buffer) + + for _, listName := range orderedTokenLists { + for _, ident := range tokenLists[listName] { + fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text) + } + } + + s := New(buf.Bytes()) + + pos := token.Pos{"", 4, 1, 5} + s.Scan() + for _, listName := range orderedTokenLists { + + for _, k := range tokenLists[listName] { + curPos := s.tokPos + // fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column) + + if curPos.Offset != pos.Offset { + t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text) + } + if curPos.Line != pos.Line { + t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text) + } + if curPos.Column != pos.Column { + t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text) + } + pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline + pos.Line += countNewlines(k.text) + 1 // each token is on a new line + + s.Error = func(pos token.Pos, msg string) { + t.Errorf("error %q for %q", msg, k.text) + } + + s.Scan() + } + } + // make sure there were no token-internal errors reported by scanner + if s.ErrorCount != 0 { + t.Errorf("%d errors", s.ErrorCount) + } +} + +func TestComment(t *testing.T) { + testTokenList(t, tokenLists["comment"]) +} + +func TestOperator(t *testing.T) { + testTokenList(t, tokenLists["operator"]) +} + +func TestBool(t *testing.T) { + testTokenList(t, tokenLists["bool"]) +} + +func TestIdent(t *testing.T) { + testTokenList(t, tokenLists["ident"]) +} + +func TestString(t *testing.T) { + testTokenList(t, tokenLists["string"]) +} + +func TestNumber(t *testing.T) { + testTokenList(t, tokenLists["number"]) +} + +func TestFloat(t *testing.T) { + testTokenList(t, tokenLists["float"]) +} + +func TestRealExample(t *testing.T) { + complexReal := ` +{ + "variable": { + "foo": { + "default": "bar", + "description": "bar", + "depends_on": ["something"] + } + } +}` + + literals := []struct { + tokenType token.Type + literal string + }{ + {token.LBRACE, `{`}, + {token.STRING, `"variable"`}, + {token.COLON, `:`}, + {token.LBRACE, `{`}, + {token.STRING, `"foo"`}, + {token.COLON, `:`}, + {token.LBRACE, `{`}, + {token.STRING, `"default"`}, + {token.COLON, `:`}, + {token.STRING, `"bar"`}, + {token.COMMA, `,`}, + {token.STRING, `"description"`}, + {token.COLON, `:`}, + {token.STRING, `"bar"`}, + {token.COMMA, `,`}, + {token.STRING, `"depends_on"`}, + {token.COLON, `:`}, + {token.LBRACK, `[`}, + {token.STRING, `"something"`}, + {token.RBRACK, `]`}, + {token.RBRACE, `}`}, + {token.RBRACE, `}`}, + {token.RBRACE, `}`}, + {token.EOF, ``}, + } + + s := New([]byte(complexReal)) + for _, l := range literals { + tok := s.Scan() + if l.tokenType != tok.Type { + t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String()) + } + + if l.literal != tok.Text { + t.Errorf("got: %s want %s\n", tok, l.literal) + } + } + +} + +func TestError(t *testing.T) { + testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) + testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) + + testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING) + testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING) + + testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER) + testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER) + testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL) + + testError(t, `"`, "1:2", "literal not terminated", token.STRING) + testError(t, `"abc`, "1:5", "literal not terminated", token.STRING) + testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING) +} + +func testError(t *testing.T, src, pos, msg string, tok token.Type) { + s := New([]byte(src)) + + errorCalled := false + s.Error = func(p token.Pos, m string) { + if !errorCalled { + if pos != p.String() { + t.Errorf("pos = %q, want %q for %q", p, pos, src) + } + + if m != msg { + t.Errorf("msg = %q, want %q for %q", m, msg, src) + } + errorCalled = true + } + } + + tk := s.Scan() + if tk.Type != tok { + t.Errorf("tok = %s, want %s for %q", tk, tok, src) + } + if !errorCalled { + t.Errorf("error handler not called for %q", src) + } + if s.ErrorCount == 0 { + t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src) + } +} + +func testTokenList(t *testing.T, tokenList []tokenPair) { + // create artifical source code + buf := new(bytes.Buffer) + for _, ident := range tokenList { + fmt.Fprintf(buf, "%s\n", ident.text) + } + + s := New(buf.Bytes()) + for _, ident := range tokenList { + tok := s.Scan() + if tok.Type != ident.tok { + t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) + } + + if tok.Text != ident.text { + t.Errorf("text = %q want %q", tok.String(), ident.text) + } + + } +} + +func countNewlines(s string) int { + n := 0 + for _, ch := range s { + if ch == '\n' { + n++ + } + } + return n +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json new file mode 100644 index 000000000..e320f17ab --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json @@ -0,0 +1,4 @@ +{ + "foo": [1, 2, "bar"], + "bar": "baz" +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json new file mode 100644 index 000000000..b54bde96c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json @@ -0,0 +1,3 @@ +{ + "foo": "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json new file mode 100644 index 000000000..72168a3cc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json @@ -0,0 +1,5 @@ +{ + "foo": { + "bar": [1,2] + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json new file mode 100644 index 000000000..9a142a6ca --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json @@ -0,0 +1,10 @@ +{ + "foo": "bar", + "bar": 7, + "baz": [1,2,3], + "foo": -12, + "bar": 3.14159, + "foo": true, + "bar": false, + "foo": null +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 000000000..95a0c3eee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token_test.go b/vendor/github.com/hashicorp/hcl/json/token/token_test.go new file mode 100644 index 000000000..a83fdd55b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token_test.go @@ -0,0 +1,34 @@ +package token + +import ( + "testing" +) + +func TestTypeString(t *testing.T) { + var tokens = []struct { + tt Type + str string + }{ + {ILLEGAL, "ILLEGAL"}, + {EOF, "EOF"}, + {NUMBER, "NUMBER"}, + {FLOAT, "FLOAT"}, + {BOOL, "BOOL"}, + {STRING, "STRING"}, + {NULL, "NULL"}, + {LBRACK, "LBRACK"}, + {LBRACE, "LBRACE"}, + {COMMA, "COMMA"}, + {PERIOD, "PERIOD"}, + {RBRACK, "RBRACK"}, + {RBRACE, "RBRACE"}, + } + + for _, token := range tokens { + if token.tt.String() != token.str { + t.Errorf("want: %q got:%q\n", token.str, token.tt) + + } + } + +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 000000000..2e38ecb0c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,31 @@ +package hcl + +import ( + "unicode" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v string) lexModeValue { + for _, r := range v { + if unicode.IsSpace(r) { + continue + } + + if r == '{' { + return lexModeJson + } else { + return lexModeHcl + } + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/lex_test.go b/vendor/github.com/hashicorp/hcl/lex_test.go new file mode 100644 index 000000000..f7ee37886 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex_test.go @@ -0,0 +1,37 @@ +package hcl + +import ( + "testing" +) + +func TestLexMode(t *testing.T) { + cases := []struct { + Input string + Mode lexModeValue + }{ + { + "", + lexModeHcl, + }, + { + "foo", + lexModeHcl, + }, + { + "{}", + lexModeJson, + }, + { + " {}", + lexModeJson, + }, + } + + for i, tc := range cases { + actual := lexMode(tc.Input) + + if actual != tc.Mode { + t.Fatalf("%d: %#v", i, actual) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 000000000..d0719c2ab --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,23 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + switch lexMode(input) { + case lexModeHcl: + return hclParser.Parse([]byte(input)) + case lexModeJson: + return jsonParser.Parse([]byte(input)) + } + + return nil, fmt.Errorf("unknown config format") +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl new file mode 100644 index 000000000..949994487 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl @@ -0,0 +1,2 @@ +foo = "bar" +bar = "${file("bing/bong.txt")}" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json new file mode 100644 index 000000000..7bdddc84b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json @@ -0,0 +1,4 @@ +{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}" +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl new file mode 100644 index 000000000..4e415da20 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl @@ -0,0 +1 @@ +count = "3" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl new file mode 100644 index 000000000..363697b49 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl @@ -0,0 +1,3 @@ +foo="bar" +bar="${file("bing/bong.txt")}" +foo-bar="baz" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl new file mode 100644 index 000000000..5b185cc91 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl @@ -0,0 +1,15 @@ +key "" { + policy = "read" +} + +key "foo/" { + policy = "write" +} + +key "foo/bar/" { + policy = "read" +} + +key "foo/bar/baz" { + policy = "deny" +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json new file mode 100644 index 000000000..151864ee8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json @@ -0,0 +1,19 @@ +{ + "key": { + "": { + "policy": "read" + }, + + "foo/": { + "policy": "write" + }, + + "foo/bar/": { + "policy": "read" + }, + + "foo/bar/baz": { + "policy": "deny" + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl new file mode 100644 index 000000000..52dcaa1bc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl @@ -0,0 +1,10 @@ +variable "foo" { + default = "bar" + description = "bar" +} + +variable "amis" { + default = { + east = "foo" + } +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json new file mode 100644 index 000000000..49f921ed0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json @@ -0,0 +1,14 @@ +{ + "variable": { + "foo": { + "default": "bar", + "description": "bar" + }, + + "amis": { + "default": { + "east": "foo" + } + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl new file mode 100644 index 000000000..5be1b2315 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl @@ -0,0 +1 @@ +resource "foo" {} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl new file mode 100644 index 000000000..ead1b8b99 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl @@ -0,0 +1 @@ +foo = "bar\"baz\\n" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl new file mode 100644 index 000000000..9bca551f8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl @@ -0,0 +1,2 @@ +foo = "bar" +Key = 7 diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl new file mode 100644 index 000000000..eed44e542 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl @@ -0,0 +1 @@ +a = 1.02 diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json new file mode 100644 index 000000000..a9d1ab4b0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json @@ -0,0 +1,3 @@ +{ + "a": 1.02 +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate_escape.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate_escape.hcl new file mode 100644 index 000000000..7b9539138 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate_escape.hcl @@ -0,0 +1 @@ +foo="${file(\"bing/bong.txt\")}" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl new file mode 100644 index 000000000..f883bd707 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl @@ -0,0 +1,4 @@ +foo = <= 0 { + y := bytes.IndexByte(line[x:], ']') + if y >= 0 { + level = LogLevel(line[x+1 : x+y]) + } + } + + _, ok := f.badLevels[level] + return !ok +} + +func (f *LevelFilter) Write(p []byte) (n int, err error) { + // Note in general that io.Writer can receive any byte sequence + // to write, but the "log" package always guarantees that we only + // get a single line. We use that as a slight optimization within + // this method, assuming we're dealing with a single, complete line + // of log data. + + if !f.Check(p) { + return len(p), nil + } + + return f.Writer.Write(p) +} + +// SetMinLevel is used to update the minimum log level +func (f *LevelFilter) SetMinLevel(min LogLevel) { + f.MinLevel = min + f.init() +} + +func (f *LevelFilter) init() { + badLevels := make(map[LogLevel]struct{}) + for _, level := range f.Levels { + if level == f.MinLevel { + break + } + badLevels[level] = struct{}{} + } + f.badLevels = badLevels +} diff --git a/vendor/github.com/hashicorp/logutils/level_benchmark_test.go b/vendor/github.com/hashicorp/logutils/level_benchmark_test.go new file mode 100644 index 000000000..3c2caf70e --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/level_benchmark_test.go @@ -0,0 +1,37 @@ +package logutils + +import ( + "io/ioutil" + "testing" +) + +var messages [][]byte + +func init() { + messages = [][]byte{ + []byte("[TRACE] foo"), + []byte("[DEBUG] foo"), + []byte("[INFO] foo"), + []byte("[WARN] foo"), + []byte("[ERROR] foo"), + } +} + +func BenchmarkDiscard(b *testing.B) { + for i := 0; i < b.N; i++ { + ioutil.Discard.Write(messages[i%len(messages)]) + } +} + +func BenchmarkLevelFilter(b *testing.B) { + filter := &LevelFilter{ + Levels: []LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}, + MinLevel: "WARN", + Writer: ioutil.Discard, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + filter.Write(messages[i%len(messages)]) + } +} diff --git a/vendor/github.com/hashicorp/logutils/level_test.go b/vendor/github.com/hashicorp/logutils/level_test.go new file mode 100644 index 000000000..f6b6ac3c3 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/level_test.go @@ -0,0 +1,94 @@ +package logutils + +import ( + "bytes" + "io" + "log" + "testing" +) + +func TestLevelFilter_impl(t *testing.T) { + var _ io.Writer = new(LevelFilter) +} + +func TestLevelFilter(t *testing.T) { + buf := new(bytes.Buffer) + filter := &LevelFilter{ + Levels: []LogLevel{"DEBUG", "WARN", "ERROR"}, + MinLevel: "WARN", + Writer: buf, + } + + logger := log.New(filter, "", 0) + logger.Print("[WARN] foo") + logger.Println("[ERROR] bar") + logger.Println("[DEBUG] baz") + logger.Println("[WARN] buzz") + + result := buf.String() + expected := "[WARN] foo\n[ERROR] bar\n[WARN] buzz\n" + if result != expected { + t.Fatalf("bad: %#v", result) + } +} + +func TestLevelFilterCheck(t *testing.T) { + filter := &LevelFilter{ + Levels: []LogLevel{"DEBUG", "WARN", "ERROR"}, + MinLevel: "WARN", + Writer: nil, + } + + testCases := []struct { + line string + check bool + }{ + {"[WARN] foo\n", true}, + {"[ERROR] bar\n", true}, + {"[DEBUG] baz\n", false}, + {"[WARN] buzz\n", true}, + } + + for _, testCase := range testCases { + result := filter.Check([]byte(testCase.line)) + if result != testCase.check { + t.Errorf("Fail: %s", testCase.line) + } + } +} + +func TestLevelFilter_SetMinLevel(t *testing.T) { + filter := &LevelFilter{ + Levels: []LogLevel{"DEBUG", "WARN", "ERROR"}, + MinLevel: "ERROR", + Writer: nil, + } + + testCases := []struct { + line string + checkBefore bool + checkAfter bool + }{ + {"[WARN] foo\n", false, true}, + {"[ERROR] bar\n", true, true}, + {"[DEBUG] baz\n", false, false}, + {"[WARN] buzz\n", false, true}, + } + + for _, testCase := range testCases { + result := filter.Check([]byte(testCase.line)) + if result != testCase.checkBefore { + t.Errorf("Fail: %s", testCase.line) + } + } + + // Update the minimum level to WARN + filter.SetMinLevel("WARN") + + for _, testCase := range testCases { + result := filter.Check([]byte(testCase.line)) + if result != testCase.checkAfter { + t.Errorf("Fail: %s", testCase.line) + } + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/README.md b/vendor/github.com/hashicorp/serf/coordinate/README.md new file mode 100644 index 000000000..0a96fd3eb --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/README.md @@ -0,0 +1 @@ +# TODO - I'll beef this up as I implement each of the enhancements. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 000000000..613bfff89 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,180 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.coord = coord.Clone() +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate { + c.mutex.Lock() + defer c.mutex.Unlock() + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + return c.coord.Clone() +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/client_test.go b/vendor/github.com/hashicorp/serf/coordinate/client_test.go new file mode 100644 index 000000000..ae7d58c0d --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client_test.go @@ -0,0 +1,109 @@ +package coordinate + +import ( + "reflect" + "strings" + "testing" + "time" +) + +func TestClient_NewClient(t *testing.T) { + config := DefaultConfig() + + config.Dimensionality = 0 + client, err := NewClient(config) + if err == nil || !strings.Contains(err.Error(), "dimensionality") { + t.Fatal(err) + } + + config.Dimensionality = 7 + client, err = NewClient(config) + if err != nil { + t.Fatal(err) + } + + origin := NewCoordinate(config) + if !reflect.DeepEqual(client.GetCoordinate(), origin) { + t.Fatalf("fresh client should be located at the origin") + } +} + +func TestClient_Update(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + // Make sure the Euclidean part of our coordinate is what we expect. + c := client.GetCoordinate() + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, 0.0}) + + // Place a node right above the client and observe an RTT longer than the + // client expects, given its distance. + other := NewCoordinate(config) + other.Vec[2] = 0.001 + rtt := time.Duration(2.0 * other.Vec[2] * secondsToNanoseconds) + c = client.Update("node", other, rtt) + + // The client should have scooted down to get away from it. + if !(c.Vec[2] < 0.0) { + t.Fatalf("client z coordinate %9.6f should be < 0.0", c.Vec[2]) + } + + // Set the coordinate to a known state. + c.Vec[2] = 99.0 + client.SetCoordinate(c) + c = client.GetCoordinate() + verifyEqualFloats(t, c.Vec[2], 99.0) +} + +func TestClient_DistanceTo(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + // Fiddle a raw coordinate to put it a specific number of seconds away. + other := NewCoordinate(config) + other.Vec[2] = 12.345 + expected := time.Duration(other.Vec[2] * secondsToNanoseconds) + dist := client.DistanceTo(other) + if dist != expected { + t.Fatalf("distance doesn't match %9.6f != %9.6f", dist.Seconds(), expected.Seconds()) + } +} + +func TestClient_latencyFilter(t *testing.T) { + config := DefaultConfig() + config.LatencyFilterSize = 3 + + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + // Make sure we get the median, and that things age properly. + verifyEqualFloats(t, client.latencyFilter("alice", 0.201), 0.201) + verifyEqualFloats(t, client.latencyFilter("alice", 0.200), 0.201) + verifyEqualFloats(t, client.latencyFilter("alice", 0.207), 0.201) + + // This glitch will get median-ed out and never seen by Vivaldi. + verifyEqualFloats(t, client.latencyFilter("alice", 1.9), 0.207) + verifyEqualFloats(t, client.latencyFilter("alice", 0.203), 0.207) + verifyEqualFloats(t, client.latencyFilter("alice", 0.199), 0.203) + verifyEqualFloats(t, client.latencyFilter("alice", 0.211), 0.203) + + // Make sure different nodes are not coupled. + verifyEqualFloats(t, client.latencyFilter("bob", 0.310), 0.310) + + // Make sure we don't leak coordinates for nodes that leave. + client.ForgetNode("alice") + verifyEqualFloats(t, client.latencyFilter("alice", 0.888), 0.888) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 000000000..a5b3aadfe --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,70 @@ +package coordinate + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 4 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 000000000..c9194e048 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,183 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i, _ := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i, _ := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i, _ := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i, _ := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i, _ := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go new file mode 100644 index 000000000..17f68ac51 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go @@ -0,0 +1,260 @@ +package coordinate + +import ( + "math" + "reflect" + "testing" + "time" +) + +// verifyDimensionPanic will run the supplied func and make sure it panics with +// the expected error type. +func verifyDimensionPanic(t *testing.T, f func()) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(DimensionalityConflictError); !ok { + t.Fatalf("panic isn't the right type") + } + } else { + t.Fatalf("didn't get expected panic") + } + }() + f() +} + +func TestCoordinate_NewCoordinate(t *testing.T) { + config := DefaultConfig() + c := NewCoordinate(config) + if uint(len(c.Vec)) != config.Dimensionality { + t.Fatalf("dimensionality not set correctly %d != %d", + len(c.Vec), config.Dimensionality) + } +} + +func TestCoordinate_Clone(t *testing.T) { + c := NewCoordinate(DefaultConfig()) + c.Vec[0], c.Vec[1], c.Vec[2] = 1.0, 2.0, 3.0 + c.Error = 5.0 + c.Adjustment = 10.0 + c.Height = 4.2 + + other := c.Clone() + if !reflect.DeepEqual(c, other) { + t.Fatalf("coordinate clone didn't make a proper copy") + } + + other.Vec[0] = c.Vec[0] + 0.5 + if reflect.DeepEqual(c, other) { + t.Fatalf("cloned coordinate is still pointing at its ancestor") + } +} + +func TestCoordinate_IsCompatibleWith(t *testing.T) { + config := DefaultConfig() + + config.Dimensionality = 3 + c1 := NewCoordinate(config) + c2 := NewCoordinate(config) + + config.Dimensionality = 2 + alien := NewCoordinate(config) + + if !c1.IsCompatibleWith(c1) || !c2.IsCompatibleWith(c2) || + !alien.IsCompatibleWith(alien) { + t.Fatalf("coordinates should be compatible with themselves") + } + + if !c1.IsCompatibleWith(c2) || !c2.IsCompatibleWith(c1) { + t.Fatalf("coordinates should be compatible with each other") + } + + if c1.IsCompatibleWith(alien) || c2.IsCompatibleWith(alien) || + alien.IsCompatibleWith(c1) || alien.IsCompatibleWith(c2) { + t.Fatalf("alien should not be compatible with the other coordinates") + } +} + +func TestCoordinate_ApplyForce(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + origin := NewCoordinate(config) + + // This proves that we normalize, get the direction right, and apply the + // force multiplier correctly. + above := NewCoordinate(config) + above.Vec = []float64{0.0, 0.0, 2.9} + c := origin.ApplyForce(config, 5.3, above) + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, -5.3}) + + // Scoot a point not starting at the origin to make sure there's nothing + // special there. + right := NewCoordinate(config) + right.Vec = []float64{3.4, 0.0, -5.3} + c = c.ApplyForce(config, 2.0, right) + verifyEqualVectors(t, c.Vec, []float64{-2.0, 0.0, -5.3}) + + // If the points are right on top of each other, then we should end up + // in a random direction, one unit away. This makes sure the unit vector + // build up doesn't divide by zero. + c = origin.ApplyForce(config, 1.0, origin) + verifyEqualFloats(t, origin.DistanceTo(c).Seconds(), 1.0) + + // Enable a minimum height and make sure that gets factored in properly. + config.HeightMin = 10.0e-6 + origin = NewCoordinate(config) + c = origin.ApplyForce(config, 5.3, above) + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, -5.3}) + verifyEqualFloats(t, c.Height, config.HeightMin+5.3*config.HeightMin/2.9) + + // Make sure the height minimum is enforced. + c = origin.ApplyForce(config, -5.3, above) + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, 5.3}) + verifyEqualFloats(t, c.Height, config.HeightMin) + + // Shenanigans should get called if the dimensions don't match. + bad := c.Clone() + bad.Vec = make([]float64, len(bad.Vec)+1) + verifyDimensionPanic(t, func() { c.ApplyForce(config, 1.0, bad) }) +} + +func TestCoordinate_DistanceTo(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + c1, c2 := NewCoordinate(config), NewCoordinate(config) + c1.Vec = []float64{-0.5, 1.3, 2.4} + c2.Vec = []float64{1.2, -2.3, 3.4} + + verifyEqualFloats(t, c1.DistanceTo(c1).Seconds(), 0.0) + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), c2.DistanceTo(c1).Seconds()) + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758) + + // Make sure negative adjustment factors are ignored. + c1.Adjustment = -1.0e6 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758) + + // Make sure positive adjustment factors affect the distance. + c1.Adjustment = 0.1 + c2.Adjustment = 0.2 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758+0.3) + + // Make sure the heights affect the distance. + c1.Height = 0.7 + c2.Height = 0.1 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758+0.3+0.8) + + // Shenanigans should get called if the dimensions don't match. + bad := c1.Clone() + bad.Vec = make([]float64, len(bad.Vec)+1) + verifyDimensionPanic(t, func() { _ = c1.DistanceTo(bad) }) +} + +// dist is a self-contained example that appears in documentation. +func dist(a *Coordinate, b *Coordinate) time.Duration { + // Coordinates will always have the same dimensionality, so this is + // just a sanity check. + if len(a.Vec) != len(b.Vec) { + panic("dimensions aren't compatible") + } + + // Calculate the Euclidean distance plus the heights. + sumsq := 0.0 + for i := 0; i < len(a.Vec); i++ { + diff := a.Vec[i] - b.Vec[i] + sumsq += diff * diff + } + rtt := math.Sqrt(sumsq) + a.Height + b.Height + + // Apply the adjustment components, guarding against negatives. + adjusted := rtt + a.Adjustment + b.Adjustment + if adjusted > 0.0 { + rtt = adjusted + } + + // Go's times are natively nanoseconds, so we convert from seconds. + const secondsToNanoseconds = 1.0e9 + return time.Duration(rtt * secondsToNanoseconds) +} + +func TestCoordinate_dist_Example(t *testing.T) { + config := DefaultConfig() + c1, c2 := NewCoordinate(config), NewCoordinate(config) + c1.Vec = []float64{-0.5, 1.3, 2.4} + c2.Vec = []float64{1.2, -2.3, 3.4} + c1.Adjustment = 0.1 + c2.Adjustment = 0.2 + c1.Height = 0.7 + c2.Height = 0.1 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), dist(c1, c2).Seconds()) +} + +func TestCoordinate_rawDistanceTo(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + c1, c2 := NewCoordinate(config), NewCoordinate(config) + c1.Vec = []float64{-0.5, 1.3, 2.4} + c2.Vec = []float64{1.2, -2.3, 3.4} + + verifyEqualFloats(t, c1.rawDistanceTo(c1), 0.0) + verifyEqualFloats(t, c1.rawDistanceTo(c2), c2.rawDistanceTo(c1)) + verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758) + + // Make sure that the adjustment doesn't factor into the raw + // distance. + c1.Adjustment = 1.0e6 + verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758) + + // Make sure the heights affect the distance. + c1.Height = 0.7 + c2.Height = 0.1 + verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758+0.8) +} + +func TestCoordinate_add(t *testing.T) { + vec1 := []float64{1.0, -3.0, 3.0} + vec2 := []float64{-4.0, 5.0, 6.0} + verifyEqualVectors(t, add(vec1, vec2), []float64{-3.0, 2.0, 9.0}) + + zero := []float64{0.0, 0.0, 0.0} + verifyEqualVectors(t, add(vec1, zero), vec1) +} + +func TestCoordinate_diff(t *testing.T) { + vec1 := []float64{1.0, -3.0, 3.0} + vec2 := []float64{-4.0, 5.0, 6.0} + verifyEqualVectors(t, diff(vec1, vec2), []float64{5.0, -8.0, -3.0}) + + zero := []float64{0.0, 0.0, 0.0} + verifyEqualVectors(t, diff(vec1, zero), vec1) +} + +func TestCoordinate_magnitude(t *testing.T) { + zero := []float64{0.0, 0.0, 0.0} + verifyEqualFloats(t, magnitude(zero), 0.0) + + vec := []float64{1.0, -2.0, 3.0} + verifyEqualFloats(t, magnitude(vec), 3.7416573867739413) +} + +func TestCoordinate_unitVectorAt(t *testing.T) { + vec1 := []float64{1.0, 2.0, 3.0} + vec2 := []float64{0.5, 0.6, 0.7} + u, mag := unitVectorAt(vec1, vec2) + verifyEqualVectors(t, u, []float64{0.18257418583505536, 0.511207720338155, 0.8398412548412546}) + verifyEqualFloats(t, magnitude(u), 1.0) + verifyEqualFloats(t, mag, magnitude(diff(vec1, vec2))) + + // If we give positions that are equal we should get a random unit vector + // returned to us, rather than a divide by zero. + u, mag = unitVectorAt(vec1, vec1) + verifyEqualFloats(t, magnitude(u), 1.0) + verifyEqualFloats(t, mag, 0.0) + + // We can't hit the final clause without heroics so I manually forced it + // there to verify it works. +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/performance_test.go b/vendor/github.com/hashicorp/serf/coordinate/performance_test.go new file mode 100644 index 000000000..fc676e20f --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/performance_test.go @@ -0,0 +1,182 @@ +package coordinate + +import ( + "math" + "testing" + "time" +) + +func TestPerformance_Line(t *testing.T) { + const spacing = 10 * time.Millisecond + const nodes, cycles = 10, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateLine(nodes, spacing) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.0018 || stats.ErrorMax > 0.0092 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Grid(t *testing.T) { + const spacing = 10 * time.Millisecond + const nodes, cycles = 25, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateGrid(nodes, spacing) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.0015 || stats.ErrorMax > 0.022 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Split(t *testing.T) { + const lan, wan = 1 * time.Millisecond, 10 * time.Millisecond + const nodes, cycles = 25, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateSplit(nodes, lan, wan) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.000060 || stats.ErrorMax > 0.00048 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Height(t *testing.T) { + const radius = 100 * time.Millisecond + const nodes, cycles = 25, 1000 + + // Constrain us to two dimensions so that we can just exactly represent + // the circle. + config := DefaultConfig() + config.Dimensionality = 2 + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + + // Generate truth where the first coordinate is in the "middle" because + // it's equidistant from all the nodes, but it will have an extra radius + // added to the distance, so it should come out above all the others. + truth := GenerateCircle(nodes, radius) + Simulate(clients, truth, cycles) + + // Make sure the height looks reasonable with the regular nodes all in a + // plane, and the center node up above. + for i, _ := range clients { + coord := clients[i].GetCoordinate() + if i == 0 { + if coord.Height < 0.97*radius.Seconds() { + t.Fatalf("height is out of spec: %9.6f", coord.Height) + } + } else { + if coord.Height > 0.03*radius.Seconds() { + t.Fatalf("height is out of spec: %9.6f", coord.Height) + } + } + } + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.0025 || stats.ErrorMax > 0.064 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Drift(t *testing.T) { + const dist = 500 * time.Millisecond + const nodes = 4 + config := DefaultConfig() + config.Dimensionality = 2 + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + + // Do some icky surgery on the clients to put them into a square, up in + // the first quadrant. + clients[0].coord.Vec = []float64{0.0, 0.0} + clients[1].coord.Vec = []float64{0.0, dist.Seconds()} + clients[2].coord.Vec = []float64{dist.Seconds(), dist.Seconds()} + clients[3].coord.Vec = []float64{dist.Seconds(), dist.Seconds()} + + // Make a corresponding truth matrix. The nodes are laid out like this + // so the distances are all equal, except for the diagonal: + // + // (1) <- dist -> (2) + // + // | <- dist | + // | | + // | dist -> | + // + // (0) <- dist -> (3) + // + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := dist + if (i%2 == 0) && (j%2 == 0) { + rtt = time.Duration(math.Sqrt2 * float64(rtt)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + + calcCenterError := func() float64 { + min, max := clients[0].GetCoordinate(), clients[0].GetCoordinate() + for i := 1; i < nodes; i++ { + coord := clients[i].GetCoordinate() + for j, v := range coord.Vec { + min.Vec[j] = math.Min(min.Vec[j], v) + max.Vec[j] = math.Max(max.Vec[j], v) + } + } + + mid := make([]float64, config.Dimensionality) + for i, _ := range mid { + mid[i] = min.Vec[i] + (max.Vec[i]-min.Vec[i])/2 + } + return magnitude(mid) + } + + // Let the simulation run for a while to stabilize, then snap a baseline + // for the center error. + Simulate(clients, truth, 1000) + baseline := calcCenterError() + + // Now run for a bunch more cycles and see if gravity pulls the center + // in the right direction. + Simulate(clients, truth, 10000) + if error := calcCenterError(); error > 0.8*baseline { + t.Fatalf("drift performance out of spec: %9.6f -> %9.6f", baseline, error) + } +} + +func TestPerformance_Random(t *testing.T) { + const mean, deviation = 100 * time.Millisecond, 10 * time.Millisecond + const nodes, cycles = 25, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateRandom(nodes, mean, deviation) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.075 || stats.ErrorMax > 0.33 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 000000000..6fb033c0c --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i, _ := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i, _ := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/test_util.go b/vendor/github.com/hashicorp/serf/coordinate/test_util.go new file mode 100644 index 000000000..116e94933 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/test_util.go @@ -0,0 +1,27 @@ +package coordinate + +import ( + "math" + "testing" +) + +// verifyEqualFloats will compare f1 and f2 and fail if they are not +// "equal" within a threshold. +func verifyEqualFloats(t *testing.T, f1 float64, f2 float64) { + const zeroThreshold = 1.0e-6 + if math.Abs(f1-f2) > zeroThreshold { + t.Fatalf("equal assertion fail, %9.6f != %9.6f", f1, f2) + } +} + +// verifyEqualVectors will compare vec1 and vec2 and fail if they are not +// "equal" within a threshold. +func verifyEqualVectors(t *testing.T, vec1 []float64, vec2 []float64) { + if len(vec1) != len(vec2) { + t.Fatalf("vector length mismatch, %d != %d", len(vec1), len(vec2)) + } + + for i, _ := range vec1 { + verifyEqualFloats(t, vec1[i], vec2[i]) + } +} diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 000000000..f0e5c79e1 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 000000000..d4db7fc99 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 000000000..be6ebca9c --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/bench_test.go b/vendor/github.com/hashicorp/yamux/bench_test.go new file mode 100644 index 000000000..4377b839c --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/bench_test.go @@ -0,0 +1,81 @@ +package yamux + +import ( + "testing" +) + +func BenchmarkPing(b *testing.B) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + for i := 0; i < b.N; i++ { + rtt, err := client.Ping() + if err != nil { + b.Fatalf("err: %v", err) + } + if rtt == 0 { + b.Fatalf("bad: %v", rtt) + } + } +} + +func BenchmarkAccept(b *testing.B) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + go func() { + for i := 0; i < b.N; i++ { + stream, err := server.AcceptStream() + if err != nil { + return + } + stream.Close() + } + }() + + for i := 0; i < b.N; i++ { + stream, err := client.Open() + if err != nil { + b.Fatalf("err: %v", err) + } + stream.Close() + } +} + +func BenchmarkSendRecv(b *testing.B) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + sendBuf := make([]byte, 512) + recvBuf := make([]byte, 512) + + doneCh := make(chan struct{}) + go func() { + stream, err := server.AcceptStream() + if err != nil { + return + } + defer stream.Close() + for i := 0; i < b.N; i++ { + if _, err := stream.Read(recvBuf); err != nil { + b.Fatalf("err: %v", err) + } + } + close(doneCh) + }() + + stream, err := client.Open() + if err != nil { + b.Fatalf("err: %v", err) + } + defer stream.Close() + for i := 0; i < b.N; i++ { + if _, err := stream.Write(sendBuf); err != nil { + b.Fatalf("err: %v", err) + } + } + <-doneCh +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 000000000..4f5293828 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/const_test.go b/vendor/github.com/hashicorp/yamux/const_test.go new file mode 100644 index 000000000..153da18b9 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const_test.go @@ -0,0 +1,72 @@ +package yamux + +import ( + "testing" +) + +func TestConst(t *testing.T) { + if protoVersion != 0 { + t.Fatalf("bad: %v", protoVersion) + } + + if typeData != 0 { + t.Fatalf("bad: %v", typeData) + } + if typeWindowUpdate != 1 { + t.Fatalf("bad: %v", typeWindowUpdate) + } + if typePing != 2 { + t.Fatalf("bad: %v", typePing) + } + if typeGoAway != 3 { + t.Fatalf("bad: %v", typeGoAway) + } + + if flagSYN != 1 { + t.Fatalf("bad: %v", flagSYN) + } + if flagACK != 2 { + t.Fatalf("bad: %v", flagACK) + } + if flagFIN != 4 { + t.Fatalf("bad: %v", flagFIN) + } + if flagRST != 8 { + t.Fatalf("bad: %v", flagRST) + } + + if goAwayNormal != 0 { + t.Fatalf("bad: %v", goAwayNormal) + } + if goAwayProtoErr != 1 { + t.Fatalf("bad: %v", goAwayProtoErr) + } + if goAwayInternalErr != 2 { + t.Fatalf("bad: %v", goAwayInternalErr) + } + + if headerSize != 12 { + t.Fatalf("bad header size") + } +} + +func TestEncodeDecode(t *testing.T) { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagACK|flagRST, 1234, 4321) + + if hdr.Version() != protoVersion { + t.Fatalf("bad: %v", hdr) + } + if hdr.MsgType() != typeWindowUpdate { + t.Fatalf("bad: %v", hdr) + } + if hdr.Flags() != flagACK|flagRST { + t.Fatalf("bad: %v", hdr) + } + if hdr.StreamID() != 1234 { + t.Fatalf("bad: %v", hdr) + } + if hdr.Length() != 4321 { + t.Fatalf("bad: %v", hdr) + } +} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 000000000..7abc7c744 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,87 @@ +package yamux + +import ( + "fmt" + "io" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination + LogOutput io.Writer +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 000000000..20369bdd9 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,598 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream + streams map[uint32]*Stream + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + s := &Session{ + config: config, + logger: log.New(config.LogOutput, "", log.LstdFlags), + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get and ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + timer := time.NewTimer(s.config.ConnectionWriteTimeout) + defer timer.Stop() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + timer := time.NewTimer(s.config.ConnectionWriteTimeout) + defer timer.Stop() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + var handler func(header) error + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + // Switch on the type + switch hdr.MsgType() { + case typeData: + handler = s.handleStreamMessage + case typeWindowUpdate: + handler = s.handleStreamMessage + case typeGoAway: + handler = s.handleGoAway + case typePing: + handler = s.handlePing + default: + return ErrInvalidMsgType + } + + // Invoke the handler + if err := handler(hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream() { + select { + case <-s.synCh: + default: + panic("established stream without inflight syn") + } +} diff --git a/vendor/github.com/hashicorp/yamux/session_test.go b/vendor/github.com/hashicorp/yamux/session_test.go new file mode 100644 index 000000000..8a1b7d5de --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session_test.go @@ -0,0 +1,1153 @@ +package yamux + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +type logCapture struct{ bytes.Buffer } + +func (l *logCapture) logs() []string { + return strings.Split(strings.TrimSpace(l.String()), "\n") +} + +func (l *logCapture) match(expect []string) bool { + return reflect.DeepEqual(l.logs(), expect) +} + +func captureLogs(s *Session) *logCapture { + buf := new(logCapture) + s.logger = log.New(buf, "", 0) + return buf +} + +type pipeConn struct { + reader *io.PipeReader + writer *io.PipeWriter + writeBlocker sync.Mutex +} + +func (p *pipeConn) Read(b []byte) (int, error) { + return p.reader.Read(b) +} + +func (p *pipeConn) Write(b []byte) (int, error) { + p.writeBlocker.Lock() + defer p.writeBlocker.Unlock() + return p.writer.Write(b) +} + +func (p *pipeConn) Close() error { + p.reader.Close() + return p.writer.Close() +} + +func testConn() (io.ReadWriteCloser, io.ReadWriteCloser) { + read1, write1 := io.Pipe() + read2, write2 := io.Pipe() + conn1 := &pipeConn{reader: read1, writer: write2} + conn2 := &pipeConn{reader: read2, writer: write1} + return conn1, conn2 +} + +func testConf() *Config { + conf := DefaultConfig() + conf.AcceptBacklog = 64 + conf.KeepAliveInterval = 100 * time.Millisecond + conf.ConnectionWriteTimeout = 250 * time.Millisecond + return conf +} + +func testConfNoKeepAlive() *Config { + conf := testConf() + conf.EnableKeepAlive = false + return conf +} + +func testClientServer() (*Session, *Session) { + return testClientServerConfig(testConf()) +} + +func testClientServerConfig(conf *Config) (*Session, *Session) { + conn1, conn2 := testConn() + client, _ := Client(conn1, conf) + server, _ := Server(conn2, conf) + return client, server +} + +func TestPing(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + rtt, err := client.Ping() + if err != nil { + t.Fatalf("err: %v", err) + } + if rtt == 0 { + t.Fatalf("bad: %v", rtt) + } + + rtt, err = server.Ping() + if err != nil { + t.Fatalf("err: %v", err) + } + if rtt == 0 { + t.Fatalf("bad: %v", rtt) + } +} + +func TestPing_Timeout(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + // Prevent the client from responding + clientConn := client.conn.(*pipeConn) + clientConn.writeBlocker.Lock() + + errCh := make(chan error, 1) + go func() { + _, err := server.Ping() // Ping via the server session + errCh <- err + }() + + select { + case err := <-errCh: + if err != ErrTimeout { + t.Fatalf("err: %v", err) + } + case <-time.After(client.config.ConnectionWriteTimeout * 2): + t.Fatalf("failed to timeout within expected %v", client.config.ConnectionWriteTimeout) + } + + // Verify that we recover, even if we gave up + clientConn.writeBlocker.Unlock() + + go func() { + _, err := server.Ping() // Ping via the server session + errCh <- err + }() + + select { + case err := <-errCh: + if err != nil { + t.Fatalf("err: %v", err) + } + case <-time.After(client.config.ConnectionWriteTimeout): + t.Fatalf("timeout") + } +} + +func TestAccept(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + if client.NumStreams() != 0 { + t.Fatalf("bad") + } + if server.NumStreams() != 0 { + t.Fatalf("bad") + } + + wg := &sync.WaitGroup{} + wg.Add(4) + + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 1 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := client.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 2 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := server.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 2 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 1 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + select { + case <-doneCh: + case <-time.After(time.Second): + panic("timeout") + } +} + +func TestNonNilInterface(t *testing.T) { + _, server := testClientServer() + server.Close() + + conn, err := server.Accept() + if err != nil && conn != nil { + t.Error("bad: accept should return a connection of nil value") + } + + conn, err = server.Open() + if err != nil && conn != nil { + t.Error("bad: open should return a connection of nil value") + } +} + +func TestSendData_Small(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + + if server.NumStreams() != 1 { + t.Fatalf("bad") + } + + buf := make([]byte, 4) + for i := 0; i < 1000; i++ { + n, err := stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("short read: %d", n) + } + if string(buf) != "test" { + t.Fatalf("bad: %s", buf) + } + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.NumStreams() != 1 { + t.Fatalf("bad") + } + + for i := 0; i < 1000; i++ { + n, err := stream.Write([]byte("test")) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("short write %d", n) + } + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + select { + case <-doneCh: + case <-time.After(time.Second): + panic("timeout") + } + + if client.NumStreams() != 0 { + t.Fatalf("bad") + } + if server.NumStreams() != 0 { + t.Fatalf("bad") + } +} + +func TestSendData_Large(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + data := make([]byte, 512*1024) + for idx := range data { + data[idx] = byte(idx % 256) + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + + buf := make([]byte, 4*1024) + for i := 0; i < 128; i++ { + n, err := stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4*1024 { + t.Fatalf("short read: %d", n) + } + for idx := range buf { + if buf[idx] != byte(idx%256) { + t.Fatalf("bad: %v %v %v", i, idx, buf[idx]) + } + } + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + + n, err := stream.Write(data) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(data) { + t.Fatalf("short write %d", n) + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + select { + case <-doneCh: + case <-time.After(time.Second): + panic("timeout") + } +} + +func TestGoAway(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + if err := server.GoAway(); err != nil { + t.Fatalf("err: %v", err) + } + + _, err := client.Open() + if err != ErrRemoteGoAway { + t.Fatalf("err: %v", err) + } +} + +func TestManyStreams(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + wg := &sync.WaitGroup{} + + acceptor := func(i int) { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 512) + for { + n, err := stream.Read(buf) + if err == io.EOF { + return + } + if err != nil { + t.Fatalf("err: %v", err) + } + if n == 0 { + t.Fatalf("err: %v", err) + } + } + } + sender := func(i int) { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + msg := fmt.Sprintf("%08d", i) + for i := 0; i < 1000; i++ { + n, err := stream.Write([]byte(msg)) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(msg) { + t.Fatalf("short write %d", n) + } + } + } + + for i := 0; i < 50; i++ { + wg.Add(2) + go acceptor(i) + go sender(i) + } + + wg.Wait() +} + +func TestManyStreams_PingPong(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + wg := &sync.WaitGroup{} + + ping := []byte("ping") + pong := []byte("pong") + + acceptor := func(i int) { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 4) + for { + // Read the 'ping' + n, err := stream.Read(buf) + if err == io.EOF { + return + } + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("err: %v", err) + } + if !bytes.Equal(buf, ping) { + t.Fatalf("bad: %s", buf) + } + + // Shrink the internal buffer! + stream.Shrink() + + // Write out the 'pong' + n, err = stream.Write(pong) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("err: %v", err) + } + } + } + sender := func(i int) { + defer wg.Done() + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 4) + for i := 0; i < 1000; i++ { + // Send the 'ping' + n, err := stream.Write(ping) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("short write %d", n) + } + + // Read the 'pong' + n, err = stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("err: %v", err) + } + if !bytes.Equal(buf, pong) { + t.Fatalf("bad: %s", buf) + } + + // Shrink the buffer + stream.Shrink() + } + } + + for i := 0; i < 50; i++ { + wg.Add(2) + go acceptor(i) + go sender(i) + } + + wg.Wait() +} + +func TestHalfClose(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, err := stream.Write([]byte("a")); err != nil { + t.Fatalf("err: %v", err) + } + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + stream2.Close() // Half close + + buf := make([]byte, 4) + n, err := stream2.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 1 { + t.Fatalf("bad: %v", n) + } + + // Send more + if _, err := stream.Write([]byte("bcd")); err != nil { + t.Fatalf("err: %v", err) + } + stream.Close() + + // Read after close + n, err = stream2.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 3 { + t.Fatalf("bad: %v", n) + } + + // EOF after close + n, err = stream2.Read(buf) + if err != io.EOF { + t.Fatalf("err: %v", err) + } + if n != 0 { + t.Fatalf("bad: %v", n) + } +} + +func TestReadDeadline(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream2.Close() + + if err := stream.SetReadDeadline(time.Now().Add(5 * time.Millisecond)); err != nil { + t.Fatalf("err: %v", err) + } + + buf := make([]byte, 4) + if _, err := stream.Read(buf); err != ErrTimeout { + t.Fatalf("err: %v", err) + } +} + +func TestWriteDeadline(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream2.Close() + + if err := stream.SetWriteDeadline(time.Now().Add(50 * time.Millisecond)); err != nil { + t.Fatalf("err: %v", err) + } + + buf := make([]byte, 512) + for i := 0; i < int(initialStreamWindow); i++ { + _, err := stream.Write(buf) + if err != nil && err == ErrTimeout { + return + } else if err != nil { + t.Fatalf("err: %v", err) + } + } + t.Fatalf("Expected timeout") +} + +func TestBacklogExceeded(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + // Fill the backlog + max := client.config.AcceptBacklog + for i := 0; i < max; i++ { + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + if _, err := stream.Write([]byte("foo")); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Attempt to open a new stream + errCh := make(chan error, 1) + go func() { + _, err := client.Open() + errCh <- err + }() + + // Shutdown the server + go func() { + time.Sleep(10 * time.Millisecond) + server.Close() + }() + + select { + case err := <-errCh: + if err == nil { + t.Fatalf("open should fail") + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestKeepAlive(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + time.Sleep(200 * time.Millisecond) + + // Ping value should increase + client.pingLock.Lock() + defer client.pingLock.Unlock() + if client.pingID == 0 { + t.Fatalf("should ping") + } + + server.pingLock.Lock() + defer server.pingLock.Unlock() + if server.pingID == 0 { + t.Fatalf("should ping") + } +} + +func TestKeepAlive_Timeout(t *testing.T) { + conn1, conn2 := testConn() + + clientConf := testConf() + clientConf.ConnectionWriteTimeout = time.Hour // We're testing keep alives, not connection writes + clientConf.EnableKeepAlive = false // Just test one direction, so it's deterministic who hangs up on whom + client, _ := Client(conn1, clientConf) + defer client.Close() + + server, _ := Server(conn2, testConf()) + defer server.Close() + + _ = captureLogs(client) // Client logs aren't part of the test + serverLogs := captureLogs(server) + + errCh := make(chan error, 1) + go func() { + _, err := server.Accept() // Wait until server closes + errCh <- err + }() + + // Prevent the client from responding + clientConn := client.conn.(*pipeConn) + clientConn.writeBlocker.Lock() + + select { + case err := <-errCh: + if err != ErrKeepAliveTimeout { + t.Fatalf("unexpected error: %v", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("timeout waiting for timeout") + } + + if !server.IsClosed() { + t.Fatalf("server should have closed") + } + + if !serverLogs.match([]string{"[ERR] yamux: keepalive failed: i/o deadline reached"}) { + t.Fatalf("server log incorect: %v", serverLogs.logs()) + } +} + +func TestLargeWindow(t *testing.T) { + conf := DefaultConfig() + conf.MaxStreamWindowSize *= 2 + + client, server := testClientServerConfig(conf) + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream2.Close() + + stream.SetWriteDeadline(time.Now().Add(10 * time.Millisecond)) + buf := make([]byte, conf.MaxStreamWindowSize) + n, err := stream.Write(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(buf) { + t.Fatalf("short write: %d", n) + } +} + +type UnlimitedReader struct{} + +func (u *UnlimitedReader) Read(p []byte) (int, error) { + runtime.Gosched() + return len(p), nil +} + +func TestSendData_VeryLarge(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + var n int64 = 1 * 1024 * 1024 * 1024 + var workers int = 16 + + wg := &sync.WaitGroup{} + wg.Add(workers * 2) + + for i := 0; i < workers; i++ { + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 4) + _, err = stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if !bytes.Equal(buf, []byte{0, 1, 2, 3}) { + t.Fatalf("bad header") + } + + recv, err := io.Copy(ioutil.Discard, stream) + if err != nil { + t.Fatalf("err: %v", err) + } + if recv != n { + t.Fatalf("bad: %v", recv) + } + }() + } + for i := 0; i < workers; i++ { + go func() { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + _, err = stream.Write([]byte{0, 1, 2, 3}) + if err != nil { + t.Fatalf("err: %v", err) + } + + unlimited := &UnlimitedReader{} + sent, err := io.Copy(stream, io.LimitReader(unlimited, n)) + if err != nil { + t.Fatalf("err: %v", err) + } + if sent != n { + t.Fatalf("bad: %v", sent) + } + }() + } + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + select { + case <-doneCh: + case <-time.After(20 * time.Second): + panic("timeout") + } +} + +func TestBacklogExceeded_Accept(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + max := 5 * client.config.AcceptBacklog + go func() { + for i := 0; i < max; i++ { + stream, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + } + }() + + // Fill the backlog + for i := 0; i < max; i++ { + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + if _, err := stream.Write([]byte("foo")); err != nil { + t.Fatalf("err: %v", err) + } + } +} + +func TestSession_WindowUpdateWriteDuringRead(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + // Choose a huge flood size that we know will result in a window update. + flood := int64(client.config.MaxStreamWindowSize) - 1 + + // The server will accept a new stream and then flood data to it. + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + n, err := stream.Write(make([]byte, flood)) + if err != nil { + t.Fatalf("err: %v", err) + } + if int64(n) != flood { + t.Fatalf("short write: %d", n) + } + }() + + // The client will open a stream, block outbound writes, and then + // listen to the flood from the server, which should time out since + // it won't be able to send the window update. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn := client.conn.(*pipeConn) + conn.writeBlocker.Lock() + + _, err = stream.Read(make([]byte, flood)) + if err != ErrConnectionWriteTimeout { + t.Fatalf("err: %v", err) + } + }() + + wg.Wait() +} + +func TestSession_sendNoWait_Timeout(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + }() + + // The client will open the stream and then block outbound writes, we'll + // probe sendNoWait once it gets into that state. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn := client.conn.(*pipeConn) + conn.writeBlocker.Lock() + + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, 0) + for { + err = client.sendNoWait(hdr) + if err == nil { + continue + } else if err == ErrConnectionWriteTimeout { + break + } else { + t.Fatalf("err: %v", err) + } + } + }() + + wg.Wait() +} + +func TestSession_PingOfDeath(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + var doPingOfDeath sync.Mutex + doPingOfDeath.Lock() + + // This is used later to block outbound writes. + conn := server.conn.(*pipeConn) + + // The server will accept a stream, block outbound writes, and then + // flood its send channel so that no more headers can be queued. + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn.writeBlocker.Lock() + for { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, 0, 0, 0) + err = server.sendNoWait(hdr) + if err == nil { + continue + } else if err == ErrConnectionWriteTimeout { + break + } else { + t.Fatalf("err: %v", err) + } + } + + doPingOfDeath.Unlock() + }() + + // The client will open a stream and then send the server a ping once it + // can no longer write. This makes sure the server doesn't deadlock reads + // while trying to reply to the ping with no ability to write. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + // This ping will never unblock because the ping id will never + // show up in a response. + doPingOfDeath.Lock() + go func() { client.Ping() }() + + // Wait for a while to make sure the previous ping times out, + // then turn writes back on and make sure a ping works again. + time.Sleep(2 * server.config.ConnectionWriteTimeout) + conn.writeBlocker.Unlock() + if _, err = client.Ping(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + wg.Wait() +} + +func TestSession_ConnectionWriteTimeout(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + }() + + // The client will open the stream and then block outbound writes, we'll + // tee up a write and make sure it eventually times out. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn := client.conn.(*pipeConn) + conn.writeBlocker.Lock() + + // Since the write goroutine is blocked then this will return a + // timeout since it can't get feedback about whether the write + // worked. + n, err := stream.Write([]byte("hello")) + if err != ErrConnectionWriteTimeout { + t.Fatalf("err: %v", err) + } + if n != 0 { + t.Fatalf("lied about writes: %d", n) + } + }() + + wg.Wait() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 000000000..419470b7f --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,141 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backwards compatibily. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without wiating for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error + diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 000000000..4c3242d3e --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,452 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline time.Time + writeDeadline time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.stateLock.Unlock() + return 0, io.EOF + } + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + if !s.readDeadline.IsZero() { + delay := s.readDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.recvNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + if !s.writeDeadline.IsZero() { + delay := s.writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + delta := max - atomic.LoadUint32(&s.recvWindow) + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + return nil + } + + // Update our window + atomic.AddUint32(&s.recvWindow, delta) + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream() + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + if s.state == streamSYNSent { + s.session.establishStream() + } + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + if remain := atomic.LoadUint32(&s.recvWindow); length > remain { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) + return ErrRecvWindowExceeded + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline = t + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline = t + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 000000000..5fe45afcd --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,28 @@ +package yamux + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/hashicorp/yamux/util_test.go b/vendor/github.com/hashicorp/yamux/util_test.go new file mode 100644 index 000000000..dd14623af --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util_test.go @@ -0,0 +1,50 @@ +package yamux + +import ( + "testing" +) + +func TestAsyncSendErr(t *testing.T) { + ch := make(chan error) + asyncSendErr(ch, ErrTimeout) + select { + case <-ch: + t.Fatalf("should not get") + default: + } + + ch = make(chan error, 1) + asyncSendErr(ch, ErrTimeout) + select { + case <-ch: + default: + t.Fatalf("should get") + } +} + +func TestAsyncNotify(t *testing.T) { + ch := make(chan struct{}) + asyncNotify(ch) + select { + case <-ch: + t.Fatalf("should not get") + default: + } + + ch = make(chan struct{}, 1) + asyncNotify(ch) + select { + case <-ch: + default: + t.Fatalf("should get") + } +} + +func TestMin(t *testing.T) { + if min(1, 2) != 1 { + t.Fatalf("bad") + } + if min(2, 1) != 1 { + t.Fatalf("bad") + } +} diff --git a/vendor/github.com/hmrc/vmware-govcd/.editorconfig b/vendor/github.com/hmrc/vmware-govcd/.editorconfig new file mode 100644 index 000000000..3152da69a --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/hmrc/vmware-govcd/.gitignore b/vendor/github.com/hmrc/vmware-govcd/.gitignore new file mode 100644 index 000000000..975162838 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.cover + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof \ No newline at end of file diff --git a/vendor/github.com/hmrc/vmware-govcd/.travis.yml b/vendor/github.com/hmrc/vmware-govcd/.travis.yml new file mode 100644 index 000000000..d179553b3 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/.travis.yml @@ -0,0 +1,16 @@ +before_install: + - sudo apt-get update -qq + - sudo apt-get install -y figlet cowsay +sudo: required +language: go +install: +- go get -t ./... +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- PATH="$HOME/gopath/bin:$PATH" +- script/coverage --coveralls +after_success: +- figlet "Build Successful!" | /usr/games/cowsay -n -e "^^" +after_failure: +- figlet "Build Failed!" | /usr/games/cowsay -n -s diff --git a/vendor/github.com/hmrc/vmware-govcd/LICENSE b/vendor/github.com/hmrc/vmware-govcd/LICENSE new file mode 100644 index 000000000..6a5b84c73 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/LICENSE @@ -0,0 +1,569 @@ +govcloudair is distributed under the Apache License 2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Fabio Rapposelli + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +testutil/http.go is distributed with a Mozilla Public License 2.0 + + Mozilla Public License, version 2.0 + + 1. Definitions + + 1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + + 1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + + 1.3. "Contribution" + + means Covered Software of a particular Contributor. + + 1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + + 1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + + 1.6. "Executable Form" + + means any form of the work other than Source Code Form. + + 1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + + 1.8. "License" + + means this document. + + 1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + + 1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + + 1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + + 1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + + 1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + + 1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + + 2. License Grants and Conditions + + 2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + + 2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + + 2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + + 2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + + 2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + + 2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + + 2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + + 3. Responsibilities + + 3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + + 3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + + 3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + + 3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + + 3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + + 4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + + 5. Termination + + 5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + + 5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + + 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + + 6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + + 7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + + 8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + + 9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + + 10. Versions of the License + + 10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + + 10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + + 10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + + 10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + + Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + + If it is not possible or desirable to put the notice in a particular file, + then You may include the notice in a location (such as a LICENSE file in a + relevant directory) where a recipient would be likely to look for such a + notice. + + You may add additional accurate notices of copyright ownership. + + Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hmrc/vmware-govcd/README.md b/vendor/github.com/hmrc/vmware-govcd/README.md new file mode 100644 index 000000000..4a78fd2e9 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/README.md @@ -0,0 +1,59 @@ +## vmware-govcd + +This package was originally forked from [github.com/vmware/govcloudair](https://github.com/vmware/govcloudair) before pulling in [rickard-von-essen's](https://github.com/rickard-von-essen) +great changes to allow using a [vCloud Director API](https://github.com/rickard-von-essen/govcloudair/tree/vcd-5.5). On top of this I have added features as needed for a terraform provider for vCloud Director + +### Example ### + +```go +package main + +import ( + "fmt" + "net/url" + "os" + + "github.com/hmrc/vmware-govcd" +) + +type Config struct { + User string + Password string + Org string + Href string + VDC string +} + +func (c *Config) Client() (*govcd.VCDClient, error) { + u, err := url.ParseRequestURI(c.Href) + if err != nil { + return nil, fmt.Errorf("Unable to pass url: %s", err) + } + + vcdclient := govcd.NewVCDClient(*u) + org, vcd, err := vcdclient.Authenticate(c.User, c.Password, c.Org, c.VDC) + if err != nil { + return nil, fmt.Errorf("Unable to authenticate: %s", err) + } + vcdclient.Org = org + vcdclient.OrgVdc = vcd + return vcdclient, nil +} + +func main() { + config := Config{ + User: "Username", + Password: "password", + Org: "vcd org", + Href: "vcd api url", + VDC: "vcd virtual datacenter name", + } + + client, err := config.Client() // We now have a client + if err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Printf("Org URL: %s\n", client.OrgHREF.String()) +} +``` diff --git a/vendor/github.com/hmrc/vmware-govcd/api.go b/vendor/github.com/hmrc/vmware-govcd/api.go new file mode 100644 index 000000000..eda7f5a10 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/api.go @@ -0,0 +1,107 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +// Package govcloudair provides a simple binding for vCloud Air / vCloud Director REST APIs. +package govcd + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +// Client provides a client to vCloud Air, values can be populated automatically using the Authenticate method. +type Client struct { + APIVersion string // The API version required + VCDToken string // Access Token (authorization header) + VCDAuthHeader string // Authorization header + VCDVDCHREF url.URL // HREF of the backend VDC you're using + Http http.Client // HttpClient is the client to use. Default will be used if not provided. +} + +// NewRequest creates a new HTTP request and applies necessary auth headers if +// set. +func (c *Client) NewRequest(params map[string]string, method string, u url.URL, body io.Reader) *http.Request { + + p := url.Values{} + + // Build up our request parameters + for k, v := range params { + p.Add(k, v) + } + + // Add the params to our URL + u.RawQuery = p.Encode() + + // Build the request, no point in checking for errors here as we're just + // passing a string version of an url.URL struct and http.NewRequest returns + // error only if can't process an url.ParseRequestURI(). + req, _ := http.NewRequest(method, u.String(), body) + + if c.VCDAuthHeader != "" && c.VCDToken != "" { + // Add the authorization header + req.Header.Add(c.VCDAuthHeader, c.VCDToken) + // Add the Accept header for VCD + req.Header.Add("Accept", "application/*+xml;version="+c.APIVersion) + } + + return req + +} + +// parseErr takes an error XML resp and returns a single string for use in error messages. +func parseErr(resp *http.Response) error { + + errBody := new(types.Error) + + // if there was an error decoding the body, just return that + if err := decodeBody(resp, errBody); err != nil { + return fmt.Errorf("error parsing error body for non-200 request: %s", err) + } + + return fmt.Errorf("API Error: %d: %s", errBody.MajorErrorCode, errBody.Message) +} + +// decodeBody is used to XML decode a response body +func decodeBody(resp *http.Response, out interface{}) error { + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + // Unmarshal the XML. + if err = xml.Unmarshal(body, &out); err != nil { + return err + } + + return nil +} + +// checkResp wraps http.Client.Do() and verifies the request, if status code +// is 2XX it passes back the response, if it's a known invalid status code it +// parses the resultant XML error and returns a descriptive error, if the +// status code is not handled it returns a generic error with the status code. +func checkResp(resp *http.Response, err error) (*http.Response, error) { + if err != nil { + return resp, err + } + + switch i := resp.StatusCode; { + // Valid request, return the response. + case i == 200 || i == 201 || i == 202 || i == 204: + return resp, nil + // Invalid request, parse the XML error returned and return it. + case i == 400 || i == 401 || i == 403 || i == 404 || i == 405 || i == 406 || i == 409 || i == 415 || i == 500 || i == 503 || i == 504: + return nil, parseErr(resp) + // Unhandled response. + default: + return nil, fmt.Errorf("unhandled API response, please report this issue, status code: %s", resp.Status) + } +} diff --git a/vendor/github.com/hmrc/vmware-govcd/api_test.go b/vendor/github.com/hmrc/vmware-govcd/api_test.go new file mode 100644 index 000000000..d7e7a5208 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/api_test.go @@ -0,0 +1,3 @@ +package govcd + +import () diff --git a/vendor/github.com/hmrc/vmware-govcd/api_vca.go b/vendor/github.com/hmrc/vmware-govcd/api_vca.go new file mode 100644 index 000000000..16c5d8efd --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/api_vca.go @@ -0,0 +1,324 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + "os" + "time" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +// Client provides a client to vCloud Air, values can be populated automatically using the Authenticate method. +type VAClient struct { + VAToken string // vCloud Air authorization token + VAEndpoint url.URL // vCloud Air API endpoint + Region string // Region where the compute resource lives. + Client Client // Client for the underlying vCD instance +} + +// VCHS API + +type services struct { + Service []struct { + Region string `xml:"region,attr"` + ServiceID string `xml:"serviceId,attr"` + ServiceType string `xml:"serviceType,attr"` + Type string `xml:"type,attr"` + HREF string `xml:"href,attr"` + } `xml:"Service"` +} + +type session struct { + Link []*types.Link `xml:"Link"` +} + +type computeResources struct { + VdcRef []struct { + Status string `xml:"status,attr"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + HREF string `xml:"href,attr"` + Link []*types.Link `xml:"Link"` + } `xml:"VdcRef"` +} + +type vCloudSession struct { + VdcLink []struct { + AuthorizationToken string `xml:"authorizationToken,attr"` + AuthorizationHeader string `xml:"authorizationHeader,attr"` + Name string `xml:"name,attr"` + HREF string `xml:"href,attr"` + } `xml:"VdcLink"` +} + +// + +func (c *VAClient) vaauthorize(user, pass string) (u url.URL, err error) { + + if user == "" { + user = os.Getenv("VCLOUDAIR_USERNAME") + } + + if pass == "" { + pass = os.Getenv("VCLOUDAIR_PASSWORD") + } + + s := c.VAEndpoint + s.Path += "/vchs/sessions" + + // No point in checking for errors here + req := c.Client.NewRequest(map[string]string{}, "POST", s, nil) + + // Set Basic Authentication Header + req.SetBasicAuth(user, pass) + + // Add the Accept header for vCA + req.Header.Add("Accept", "application/xml;version=5.6") + + resp, err := checkResp(c.Client.Http.Do(req)) + if err != nil { + return url.URL{}, err + } + defer resp.Body.Close() + + // Store the authentication header + c.VAToken = resp.Header.Get("X-Vchs-Authorization") + + session := new(session) + + if err = decodeBody(resp, session); err != nil { + return url.URL{}, fmt.Errorf("error decoding session response: %s", err) + } + + // Loop in the session struct to find right service and compute resource. + for _, s := range session.Link { + if s.Type == "application/xml;class=vnd.vmware.vchs.servicelist" && s.Rel == "down" { + u, err := url.ParseRequestURI(s.HREF) + return *u, err + } + } + return url.URL{}, fmt.Errorf("couldn't find a Service List in current session") +} + +func (c *VAClient) vaacquireservice(s url.URL, cid string) (u url.URL, err error) { + + if cid == "" { + cid = os.Getenv("VCLOUDAIR_COMPUTEID") + } + + req := c.Client.NewRequest(map[string]string{}, "GET", s, nil) + + // Add the Accept header for vCA + req.Header.Add("Accept", "application/xml;version=5.6") + + // Set Authorization Header for vCA + req.Header.Add("x-vchs-authorization", c.VAToken) + + resp, err := checkResp(c.Client.Http.Do(req)) + if err != nil { + return url.URL{}, fmt.Errorf("error processing compute action: %s", err) + } + + services := new(services) + + if err = decodeBody(resp, services); err != nil { + return url.URL{}, fmt.Errorf("error decoding services response: %s", err) + } + + // Loop in the Services struct to find right service and compute resource. + for _, s := range services.Service { + if s.ServiceID == cid { + c.Region = s.Region + u, err := url.ParseRequestURI(s.HREF) + return *u, err + } + } + return url.URL{}, fmt.Errorf("couldn't find a Compute Resource in current service list") +} + +func (c *VAClient) vaacquirecompute(s url.URL, vid string) (u url.URL, err error) { + + if vid == "" { + vid = os.Getenv("VCLOUDAIR_VDCID") + } + + req := c.Client.NewRequest(map[string]string{}, "GET", s, nil) + + // Add the Accept header for vCA + req.Header.Add("Accept", "application/xml;version=5.6") + + // Set Authorization Header + req.Header.Add("x-vchs-authorization", c.VAToken) + + // TODO: wrap into checkresp to parse error + resp, err := checkResp(c.Client.Http.Do(req)) + if err != nil { + return url.URL{}, fmt.Errorf("error processing compute action: %s", err) + } + + computeresources := new(computeResources) + + if err = decodeBody(resp, computeresources); err != nil { + return url.URL{}, fmt.Errorf("error decoding computeresources response: %s", err) + } + + // Iterate through the ComputeResources struct searching for the right + // backend server + for _, s := range computeresources.VdcRef { + if s.Name == vid { + for _, t := range s.Link { + if t.Name == vid { + u, err := url.ParseRequestURI(t.HREF) + return *u, err + } + } + } + } + return url.URL{}, fmt.Errorf("couldn't find a VDC Resource in current Compute list") +} + +func (c *VAClient) vagetbackendauth(s url.URL, cid string) error { + + if cid == "" { + cid = os.Getenv("VCLOUDAIR_COMPUTEID") + } + + req := c.Client.NewRequest(map[string]string{}, "POST", s, nil) + + // Add the Accept header for vCA + req.Header.Add("Accept", "application/xml;version=5.6") + + // Set Authorization Header + req.Header.Add("x-vchs-authorization", c.VAToken) + + // TODO: wrap into checkresp to parse error + resp, err := checkResp(c.Client.Http.Do(req)) + if err != nil { + return fmt.Errorf("error processing backend url action: %s", err) + } + defer resp.Body.Close() + + vcloudsession := new(vCloudSession) + + if err = decodeBody(resp, vcloudsession); err != nil { + return fmt.Errorf("error decoding vcloudsession response: %s", err) + } + + // Get the backend session information + for _, s := range vcloudsession.VdcLink { + if s.Name == cid { + // Fetch the authorization token + c.Client.VCDToken = s.AuthorizationToken + + // Fetch the authorization header + c.Client.VCDAuthHeader = s.AuthorizationHeader + + u, err := url.ParseRequestURI(s.HREF) + if err != nil { + return fmt.Errorf("error decoding href: %s", err) + } + c.Client.VCDVDCHREF = *u + return nil + } + } + return fmt.Errorf("error finding the right backend resource") +} + +// NewVAClient returns a new empty client to authenticate against the vCloud Air +// service, the vCloud Air endpoint can be overridden by setting the +// VCLOUDAIR_ENDPOINT environment variable. +func NewVAClient() (*VAClient, error) { + + var u *url.URL + var err error + + if os.Getenv("VCLOUDAIR_ENDPOINT") != "" { + u, err = url.ParseRequestURI(os.Getenv("VCLOUDAIR_ENDPOINT")) + if err != nil { + return &VAClient{}, fmt.Errorf("cannot parse endpoint coming from VCLOUDAIR_ENDPOINT") + } + } else { + // Implicitly trust this URL parse. + u, _ = url.ParseRequestURI("https://vchs.vmware.com/api") + } + + VAClient := VAClient{ + VAEndpoint: *u, + Client: Client{ + APIVersion: "5.6", + // Patching things up as we're hitting several TLS timeouts. + Http: http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 120 * time.Second, + }, + }, + }, + } + return &VAClient, nil +} + +// Authenticate is an helper function that performs a complete login in vCloud +// Air and in the backend vCloud Director instance. +func (c *VAClient) Authenticate(username, password, computeid, vdcid string) (Vdc, error) { + // Authorize + vaservicehref, err := c.vaauthorize(username, password) + if err != nil { + return Vdc{}, fmt.Errorf("error Authorizing: %s", err) + } + + // Get Service + vacomputehref, err := c.vaacquireservice(vaservicehref, computeid) + if err != nil { + return Vdc{}, fmt.Errorf("error Acquiring Service: %s", err) + } + + // Get Compute + vavdchref, err := c.vaacquirecompute(vacomputehref, vdcid) + if err != nil { + return Vdc{}, fmt.Errorf("error Acquiring Compute: %s", err) + } + + // Get Backend Authorization + if err = c.vagetbackendauth(vavdchref, computeid); err != nil { + return Vdc{}, fmt.Errorf("error Acquiring Backend Authorization: %s", err) + } + + v, err := c.Client.retrieveVDC() + if err != nil { + return Vdc{}, fmt.Errorf("error Acquiring VDC: %s", err) + } + + return v, nil + +} + +// Disconnect performs a disconnection from the vCloud Air API endpoint. +func (c *VAClient) Disconnect() error { + if c.Client.VCDToken == "" && c.Client.VCDAuthHeader == "" && c.VAToken == "" { + return fmt.Errorf("cannot disconnect, client is not authenticated") + } + + s := c.VAEndpoint + s.Path += "/vchs/session" + + req := c.Client.NewRequest(map[string]string{}, "DELETE", s, nil) + + // Add the Accept header for vCA + req.Header.Add("Accept", "application/xml;version=5.6") + + // Set Authorization Header + req.Header.Add("x-vchs-authorization", c.VAToken) + + if _, err := checkResp(c.Client.Http.Do(req)); err != nil { + return fmt.Errorf("error processing session delete for vchs: %s", err) + } + + return nil +} diff --git a/vendor/github.com/hmrc/vmware-govcd/api_vca_test.go b/vendor/github.com/hmrc/vmware-govcd/api_vca_test.go new file mode 100644 index 000000000..9f3a5cf9c --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/api_vca_test.go @@ -0,0 +1,749 @@ +/* + * @Author: frapposelli + * @Project: govcloudair + * @Filename: api_test.go + * @Last Modified by: frapposelli + */ + +package govcd + +import ( + "net/url" + "os" + "testing" + + "github.com/hmrc/vmware-govcd/testutil" + . "gopkg.in/check.v1" +) + +var au, _ = url.ParseRequestURI("http://localhost:4444/api") +var aus, _ = url.ParseRequestURI("http://localhost:4444/api/vchs/services") +var auc, _ = url.ParseRequestURI("http://localhost:4444/api/vchs/compute/00000000-0000-0000-0000-000000000000") +var aucs, _ = url.ParseRequestURI("http://localhost:4444/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession") + +func Test(t *testing.T) { TestingT(t) } + +type S struct { + client *VAClient + vdc Vdc + vapp *VApp +} + +var _ = Suite(&S{}) + +var testServer = testutil.NewHTTPServer() + +var authheader = map[string]string{"x-vchs-authorization": "012345678901234567890123456789"} + +func (s *S) SetUpSuite(c *C) { + testServer.Start() + var err error + + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + + s.client, err = NewVAClient() + if err != nil { + panic(err) + } + + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{201, authheader, vaauthorization}, + "/api/vchs/services": testutil.Response{200, nil, vaservices}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vacompute}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{201, nil, vabackend}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + }) + + s.vdc, err = s.client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + s.vapp = NewVApp(&s.client.Client) + _ = testServer.WaitRequests(5) + testServer.Flush() + if err != nil { + panic(err) + } +} + +func (s *S) TearDownTest(c *C) { + testServer.Flush() +} + +func TestClient_vaauthorize(t *testing.T) { + testServer.Start() + var err error + + // Set up a working client + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Set up a correct conversation + testServer.Response(201, authheader, vaauthorization) + _, err = client.vaauthorize("username", "password") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Test if token is correctly set on client. + if client.VAToken != "012345678901234567890123456789" { + t.Fatalf("VAtoken not set on client: %s", client.VAToken) + } + + // Test client errors + + // Test a correct response with a wrong status code + testServer.Response(404, authheader, notfoundErr) + _, err = client.vaauthorize("username", "password") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an API error + testServer.Response(500, authheader, vcdError) + _, err = client.vaauthorize("username", "password") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an API response that doesn't contain the param we're looking for. + testServer.Response(200, authheader, vaauthorizationErr) + _, err = client.vaauthorize("username", "password") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an un-parsable response. + testServer.Response(200, authheader, notfoundErr) + _, err = client.vaauthorize("username", "password") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + +} + +func TestClient_vaacquireservice(t *testing.T) { + testServer.Start() + var err error + + // Set up a working client + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + client.VAToken = "012345678901234567890123456789" + + // Test a correct conversation + testServer.Response(200, nil, vaservices) + vacomputehref, err := client.vaacquireservice(*aus, "CI123456-789") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + if vacomputehref.String() != "http://localhost:4444/api/vchs/compute/00000000-0000-0000-0000-000000000000" { + t.Fatalf("VAComputeHREF not set on client: %s", vacomputehref) + } + + if client.Region != "US - Anywhere" { + t.Fatalf("Region not set on client: %s", client.Region) + } + + // Test client errors + + // Test a 404 + testServer.Response(404, nil, notfoundErr) + _, err = client.vaacquireservice(*aus, "CI123456-789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an API error + testServer.Response(500, nil, vcdError) + _, err = client.vaacquireservice(*aus, "CI123456-789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an unknown Compute ID + testServer.Response(200, nil, vaservices) + _, err = client.vaacquireservice(*aus, "NOTVALID-789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an un-parsable response + testServer.Response(200, nil, notfoundErr) + _, err = client.vaacquireservice(*aus, "CI123456-789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + +} + +func TestClient_vaacquirecompute(t *testing.T) { + testServer.Start() + var err error + + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + client.VAToken = "012345678901234567890123456789" + client.Region = "US - Anywhere" + + testServer.Response(200, nil, vacompute) + vavdchref, err := client.vaacquirecompute(*auc, "VDC12345-6789") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + if vavdchref.String() != "http://localhost:4444/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession" { + t.Fatalf("VAVDCHREF not set on client: %s", vavdchref) + } + + // Test client errors + + // Test a 404 + testServer.Response(404, nil, notfoundErr) + _, err = client.vaacquirecompute(*auc, "VDC12345-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + // Test an API error + testServer.Response(500, nil, vcdError) + _, err = client.vaacquirecompute(*auc, "VDC12345-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an unknown VDC ID + testServer.Response(200, nil, vacompute) + _, err = client.vaacquirecompute(*auc, "INVALID-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + + // Test an un-parsable response + testServer.Response(200, nil, notfoundErr) + _, err = client.vaacquirecompute(*auc, "VDC12345-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + +} + +func TestClient_vagetbackendauth(t *testing.T) { + testServer.Start() + var err error + + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + client.VAToken = "012345678901234567890123456789" + client.Region = "US - Anywhere" + + testServer.Response(201, nil, vabackend) + err = client.vagetbackendauth(*aucs, "CI123456-789") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.Client.VCDToken != "01234567890123456789012345678901" { + t.Fatalf("VCDToken not set on client: %s", client.Client.VCDToken) + } + if client.Client.VCDAuthHeader != "x-vcloud-authorization" { + t.Fatalf("VCDAuthHeader not set on client: %s", client.Client.VCDAuthHeader) + } + if client.Client.VCDVDCHREF.String() != "http://localhost:4444/api/vdc/00000000-0000-0000-0000-000000000000" { + t.Fatalf("VDC not set on client: %s", client.Client.VCDVDCHREF) + } + + // Test client errors + + // Test a 404 + testServer.Response(404, nil, notfoundErr) + err = client.vagetbackendauth(*aucs, "VDC12345-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + // Test an API error + testServer.Response(500, nil, vcdError) + err = client.vagetbackendauth(*aucs, "VDC12345-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + // Test an unknown backend VDC IC + testServer.Response(201, nil, vabackend) + err = client.vagetbackendauth(*aucs, "INVALID-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + // Test an un-parsable response + testServer.Response(201, nil, notfoundErr) + err = client.vagetbackendauth(*aucs, "VDC12345-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + // Test a botched backend VDC IC + testServer.Response(201, nil, vabackendErr) + err = client.vagetbackendauth(*aucs, "VDC12345-6789") + _ = testServer.WaitRequest() + if err == nil { + t.Fatalf("Request error not caught: %v", err) + } + +} + +// Env variable tests + +func TestClient_vaauthorize_env(t *testing.T) { + + os.Setenv("VCLOUDAIR_USERNAME", "username") + os.Setenv("VCLOUDAIR_PASSWORD", "password") + + testServer.Start() + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + + testServer.Response(201, authheader, vaauthorization) + _, err = client.vaauthorize("", "") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.VAToken != "012345678901234567890123456789" { + t.Fatalf("VAtoken not set on client: %s", client.VAToken) + } + +} + +func TestClient_vaacquireservice_env(t *testing.T) { + + os.Setenv("VCLOUDAIR_COMPUTEID", "CI123456-789") + + testServer.Start() + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + client.VAToken = "012345678901234567890123456789" + + testServer.Response(200, nil, vaservices) + vacomputehref, err := client.vaacquireservice(*aus, "") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + if vacomputehref.String() != "http://localhost:4444/api/vchs/compute/00000000-0000-0000-0000-000000000000" { + t.Fatalf("VAComputeHREF not set on client: %s", vacomputehref) + } + + if client.Region != "US - Anywhere" { + t.Fatalf("Region not set on client: %s", client.Region) + } + +} + +func TestClient_vaacquirecompute_env(t *testing.T) { + + os.Setenv("VCLOUDAIR_VDCID", "VDC12345-6789") + + testServer.Start() + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + client.VAToken = "012345678901234567890123456789" + client.Region = "US - Anywhere" + + testServer.Response(200, nil, vacompute) + vavdchref, err := client.vaacquirecompute(*auc, "") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + if vavdchref.String() != "http://localhost:4444/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession" { + t.Fatalf("VAVDCHREF not set on client: %s", vavdchref) + } + +} + +func TestClient_vagetbackendauth_env(t *testing.T) { + + os.Setenv("VCLOUDAIR_VDCID", "VDC12345-6789") + + testServer.Start() + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + client.VAToken = "012345678901234567890123456789" + client.Region = "US - Anywhere" + + testServer.Response(201, nil, vabackend) + err = client.vagetbackendauth(*aucs, "") + _ = testServer.WaitRequest() + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.Client.VCDToken != "01234567890123456789012345678901" { + t.Fatalf("VCDToken not set on client: %s", client.Client.VCDToken) + } + if client.Client.VCDAuthHeader != "x-vcloud-authorization" { + t.Fatalf("VCDAuthHeader not set on client: %s", client.Client.VCDAuthHeader) + } + if client.Client.VCDVDCHREF.String() != "http://localhost:4444/api/vdc/00000000-0000-0000-0000-000000000000" { + t.Fatalf("VDC not set on client: %s", client.Client.VCDVDCHREF) + } + +} + +func TestClient_NewClient(t *testing.T) { + + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "") + if _, err = NewVAClient(); err != nil { + t.Fatalf("err: %v", err) + } + + os.Setenv("VCLOUDAIR_ENDPOINT", "💩") + if _, err = NewVAClient(); err == nil { + t.Fatalf("err: %v", err) + } + +} + +func TestClient_Disconnect(t *testing.T) { + + testServer.Start() + var err error + + // Test an authenticated client + c := makeClient(t) + testServer.Response(201, nil, "") + err = c.Disconnect() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Test an empty client + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + err = client.Disconnect() + if err == nil { + t.Fatalf("err: %v", err) + } + +} + +func TestVAClient_Authenticate(t *testing.T) { + + testServer.Start() + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Botched auth + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{401, nil, vcdError}, + "/api/vchs/services": testutil.Response{200, nil, vaservices}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vacompute}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{201, nil, vabackend}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + }) + + _, err = client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + _ = testServer.WaitRequests(1) + testServer.Flush() + if err == nil { + t.Fatalf("Uncatched error: %v", err) + } + + // Botched services + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{201, authheader, vaauthorization}, + "/api/vchs/services": testutil.Response{500, nil, vcdError}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vacompute}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{201, nil, vabackend}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + }) + + _, err = client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + _ = testServer.WaitRequests(2) + testServer.Flush() + if err == nil { + t.Fatalf("Uncatched error: %v", err) + } + + // Botched compute + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{201, authheader, vaauthorization}, + "/api/vchs/services": testutil.Response{200, nil, vaservices}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{500, nil, vcdError}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{201, nil, vabackend}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + }) + + _, err = client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + _ = testServer.WaitRequests(3) + testServer.Flush() + if err == nil { + t.Fatalf("Uncatched error: %v", err) + } + + // Botched backend + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{201, authheader, vaauthorization}, + "/api/vchs/services": testutil.Response{200, nil, vaservices}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vacompute}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{500, nil, vcdError}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + }) + + _, err = client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + _ = testServer.WaitRequests(4) + testServer.Flush() + if err == nil { + t.Fatalf("Uncatched error: %v", err) + } + + // Botched vdc + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{201, authheader, vaauthorization}, + "/api/vchs/services": testutil.Response{200, nil, vaservices}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vacompute}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{201, nil, vabackend}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{500, nil, vcdError}, + }) + + _, err = client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + _ = testServer.WaitRequests(5) + testServer.Flush() + if err == nil { + t.Fatalf("Uncatched error: %v", err) + } + +} + +func makeClient(t *testing.T) VAClient { + + testServer.Start() + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{201, authheader, vaauthorization}, + "/api/vchs/services": testutil.Response{200, nil, vaservices}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vacompute}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{201, nil, vabackend}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + }) + + _, err = client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + + _ = testServer.WaitRequests(5) + testServer.Flush() + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.VAToken != "012345678901234567890123456789" { + t.Fatalf("VAtoken not set on client: %s", client.VAToken) + } + + if client.Region != "US - Anywhere" { + t.Fatalf("Region not set on client: %s", client.Region) + } + + if client.Client.VCDToken != "01234567890123456789012345678901" { + t.Fatalf("VCDToken not set on client: %s", client.Client.VCDToken) + } + if client.Client.VCDAuthHeader != "x-vcloud-authorization" { + t.Fatalf("VCDAuthHeader not set on client: %s", client.Client.VCDAuthHeader) + } + if client.Client.VCDVDCHREF.String() != "http://localhost:4444/api/vdc/00000000-0000-0000-0000-000000000000" { + t.Fatalf("VDC not set on client: %s", client.Client.VCDVDCHREF) + } + + return *client +} + +func TestClient_parseErr(t *testing.T) { + testServer.Start() + var err error + os.Setenv("VCLOUDAIR_ENDPOINT", "http://localhost:4444/api") + client, err := NewVAClient() + if err != nil { + t.Fatalf("err: %v", err) + } + + // I'M A TEAPOT! + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/vchs/sessions": testutil.Response{201, authheader, vaauthorization}, + "/api/vchs/services": testutil.Response{200, nil, vaservices}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vacompute}, + "/api/vchs/compute/00000000-0000-0000-0000-000000000000/vdc/00000000-0000-0000-0000-000000000000/vcloudsession": testutil.Response{418, nil, notfoundErr}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + }) + + _, err = client.Authenticate("username", "password", "CI123456-789", "VDC12345-6789") + + _ = testServer.WaitRequests(4) + testServer.Flush() + + if err == nil { + t.Fatalf("Uncatched error: %v", err) + } + +} + +func TestClient_NewRequest(t *testing.T) { + c := makeClient(t) + + params := map[string]string{ + "foo": "bar", + "baz": "bar", + } + + uri, _ := url.ParseRequestURI("http://localhost:4444/api/bar") + + req := c.Client.NewRequest(params, "POST", *uri, nil) + + encoded := req.URL.Query() + if encoded.Get("foo") != "bar" { + t.Fatalf("bad: %v", encoded) + } + + if encoded.Get("baz") != "bar" { + t.Fatalf("bad: %v", encoded) + } + + if req.URL.String() != "http://localhost:4444/api/bar?baz=bar&foo=bar" { + t.Fatalf("bad base url: %v", req.URL.String()) + } + + if req.Header.Get("x-vcloud-authorization") != "01234567890123456789012345678901" { + t.Fatalf("bad auth header: %v", req.Header) + } + + if req.Method != "POST" { + t.Fatalf("bad method: %v", req.Method) + } + +} + +// status: 404 +var notfoundErr = ` + + 404 Not Found + +

404 Not Found

+
nginx/1.4.6 (Ubuntu)
+ + + ` + +// status: 201 +var vaauthorization = ` + + + + + + ` +var vaauthorizationErr = ` + + + + + ` + +// status: 200 +var vaservices = ` + + + + + ` + +// status: 200 +var vacompute = ` + + + + + + + + ` + +// status: 201 +var vabackend = ` + + + + + + ` + +// status: 201 +var vabackendErr = ` + + + + + + ` + +var vcdError = ` + + ` diff --git a/vendor/github.com/hmrc/vmware-govcd/api_vcd.go b/vendor/github.com/hmrc/vmware-govcd/api_vcd.go new file mode 100644 index 000000000..9649118ba --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/api_vcd.go @@ -0,0 +1,237 @@ +package govcd + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "os" + "sync" + "time" +) + +type VCDClient struct { + OrgHREF url.URL // vCloud Director OrgRef + Org Org // Org + OrgVdc Vdc // Org vDC + Client Client // Client for the underlying VCD instance + sessionHREF url.URL // HREF for the session API + Mutex sync.Mutex +} + +type supportedVersions struct { + VersionInfo struct { + Version string `xml:"Version"` + LoginUrl string `xml:"LoginUrl"` + } `xml:"VersionInfo"` +} + +func (c *VCDClient) vcdloginurl() error { + + s := c.Client.VCDVDCHREF + s.Path += "/versions" + + // No point in checking for errors here + req := c.Client.NewRequest(map[string]string{}, "GET", s, nil) + + resp, err := checkResp(c.Client.Http.Do(req)) + if err != nil { + return err + } + defer resp.Body.Close() + + supportedVersions := new(supportedVersions) + + err = decodeBody(resp, supportedVersions) + + if err != nil { + return fmt.Errorf("error decoding versions response: %s", err) + } + + u, err := url.Parse(supportedVersions.VersionInfo.LoginUrl) + if err != nil { + return fmt.Errorf("couldn't find a LoginUrl in versions") + } + c.sessionHREF = *u + return nil +} + +func (c *VCDClient) vcdauthorize(user, pass, org string) error { + + if user == "" { + user = os.Getenv("VCLOUD_USERNAME") + } + + if pass == "" { + pass = os.Getenv("VCLOUD_PASSWORD") + } + + if org == "" { + org = os.Getenv("VCLOUD_ORG") + } + + // No point in checking for errors here + req := c.Client.NewRequest(map[string]string{}, "POST", c.sessionHREF, nil) + + // Set Basic Authentication Header + req.SetBasicAuth(user+"@"+org, pass) + + // Add the Accept header for vCA + req.Header.Add("Accept", "application/*+xml;version=5.5") + + resp, err := checkResp(c.Client.Http.Do(req)) + if err != nil { + return err + } + defer resp.Body.Close() + + // Store the authentication header + c.Client.VCDToken = resp.Header.Get("x-vcloud-authorization") + c.Client.VCDAuthHeader = "x-vcloud-authorization" + + session := new(session) + err = decodeBody(resp, session) + + if err != nil { + fmt.Errorf("error decoding session response: %s", err) + } + + org_found := false + // Loop in the session struct to find the organization. + for _, s := range session.Link { + if s.Type == "application/vnd.vmware.vcloud.org+xml" && s.Rel == "down" { + u, err := url.Parse(s.HREF) + if err != nil { + return fmt.Errorf("couldn't find a Organization in current session, %v", err) + } + c.OrgHREF = *u + org_found = true + } + } + if !org_found { + return fmt.Errorf("couldn't find a Organization in current session") + } + + // Loop in the session struct to find the session url. + session_found := false + for _, s := range session.Link { + if s.Rel == "remove" { + u, err := url.Parse(s.HREF) + if err != nil { + return fmt.Errorf("couldn't find a logout HREF in current session, %v", err) + } + c.sessionHREF = *u + session_found = true + } + } + if !session_found { + return fmt.Errorf("couldn't find a logout HREF in current session") + } + return nil +} + +func (c *VCDClient) RetrieveOrg(vcdname string) (Org, error) { + + req := c.Client.NewRequest(map[string]string{}, "GET", c.OrgHREF, nil) + req.Header.Add("Accept", "vnd.vmware.vcloud.org+xml;version=5.5") + + // TODO: wrap into checkresp to parse error + resp, err := checkResp(c.Client.Http.Do(req)) + if err != nil { + return Org{}, fmt.Errorf("error retreiving org: %s", err) + } + + org := NewOrg(&c.Client) + + if err = decodeBody(resp, org.Org); err != nil { + return Org{}, fmt.Errorf("error decoding org response: %s", err) + } + + // Get the VDC ref from the Org + for _, s := range org.Org.Link { + if s.Type == "application/vnd.vmware.vcloud.vdc+xml" && s.Rel == "down" { + if vcdname != "" && s.Name != vcdname { + continue + } + u, err := url.Parse(s.HREF) + if err != nil { + return Org{}, err + } + c.Client.VCDVDCHREF = *u + } + } + + if &c.Client.VCDVDCHREF == nil { + return Org{}, fmt.Errorf("error finding the organization VDC HREF") + } + + return *org, nil +} + +func NewVCDClient(vcdEndpoint url.URL, insecure bool) *VCDClient { + + return &VCDClient{ + Client: Client{ + APIVersion: "5.5", + VCDVDCHREF: vcdEndpoint, + Http: http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: insecure, + }, + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 120 * time.Second, + }, + }, + }, + } +} + +// Authenticate is an helper function that performs a login in vCloud Director. +func (c *VCDClient) Authenticate(username, password, org, vdcname string) (Org, Vdc, error) { + + // LoginUrl + err := c.vcdloginurl() + if err != nil { + return Org{}, Vdc{}, fmt.Errorf("error finding LoginUrl: %s", err) + } + // Authorize + err = c.vcdauthorize(username, password, org) + if err != nil { + return Org{}, Vdc{}, fmt.Errorf("error authorizing: %s", err) + } + + // Get Org + o, err := c.RetrieveOrg(vdcname) + if err != nil { + return Org{}, Vdc{}, fmt.Errorf("error acquiring Org: %s", err) + } + + vdc, err := c.Client.retrieveVDC() + + if err != nil { + return Org{}, Vdc{}, fmt.Errorf("error retrieving the organization VDC") + } + + return o, vdc, nil +} + +// Disconnect performs a disconnection from the vCloud Director API endpoint. +func (c *VCDClient) Disconnect() error { + if c.Client.VCDToken == "" && c.Client.VCDAuthHeader == "" { + return fmt.Errorf("cannot disconnect, client is not authenticated") + } + + req := c.Client.NewRequest(map[string]string{}, "DELETE", c.sessionHREF, nil) + + // Add the Accept header for vCA + req.Header.Add("Accept", "application/xml;version=5.5") + + // Set Authorization Header + req.Header.Add(c.Client.VCDAuthHeader, c.Client.VCDToken) + + if _, err := checkResp(c.Client.Http.Do(req)); err != nil { + return fmt.Errorf("error processing session delete for vCloud Director: %s", err) + } + return nil +} diff --git a/vendor/github.com/hmrc/vmware-govcd/api_vcd_test.go b/vendor/github.com/hmrc/vmware-govcd/api_vcd_test.go new file mode 100644 index 000000000..905e15af4 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/api_vcd_test.go @@ -0,0 +1,208 @@ +package govcd + +import ( + "net/url" + "testing" + + "github.com/hmrc/vmware-govcd/testutil" + . "gopkg.in/check.v1" +) + +type K struct { + client *VCDClient + org Org + vdc Vdc +} + +var vcdu_api, _ = url.Parse("http://localhost:4444/api") +var vcdu_v, _ = url.Parse("http://localhost:4444/api/versions") +var vcdu_s, _ = url.Parse("http://localhost:4444/api/vchs/services") + +var _ = Suite(&S{}) + +var vcdauthheader = map[string]string{"x-vcloud-authorization": "012345678901234567890123456789"} + +func (s *K) SetUpSuite(c *C) { + testServer.Start() + var err error + s.client = NewVCDClient(*vcdu_api, false) + if err != nil { + panic(err) + } + + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/versions": testutil.Response{200, map[string]string{}, vcdversions}, + }) + + s.org, s.vdc, err = s.client.Authenticate("username", "password", "organization", "VDC") + if err != nil { + panic(err) + } +} + +func (s *K) TearDownTest(c *C) { + testServer.Flush() +} + +func TestClient_getloginurl(t *testing.T) { + testServer.Start() + var err error + + // Set up a working client + client := NewVCDClient(*vcdu_api, false) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Set up a correct conversation + testServer.ResponseMap(200, testutil.ResponseMap{ + "/api/versions": testutil.Response{200, nil, vcdversions}, + }) + + err = client.vcdloginurl() + testServer.Flush() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Test if token is correctly set on client. + if client.sessionHREF.Path != "/api/sessions" { + t.Fatalf("Getting LoginUrl failed, url: %s", client.sessionHREF.Path) + } +} + +func TestVCDClient_Authenticate(t *testing.T) { + + testServer.Start() + var err error + + client := NewVCDClient(*vcdu_api, false) + if err != nil { + t.Fatalf("err: %v", err) + } + + // OK auth + testServer.ResponseMap(5, testutil.ResponseMap{ + "/api/versions": testutil.Response{200, nil, vcdversions}, + "/api/sessions": testutil.Response{201, vcdauthheader, vcdsessions}, + "/api/org/00000000-0000-0000-0000-000000000000": testutil.Response{201, vcdauthheader, vcdorg}, + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{201, vcdauthheader, vcdorg}, + }) + + org, _, err := client.Authenticate("username", "password", "organization", "organization vDC") + testServer.Flush() + if err != nil { + t.Fatalf("Error authenticating: %v", err) + } + + if org.Org.FullName != "Organization (full)" { + t.Fatalf("Orgname not parsed, got: %s", org.Org.FullName) + } +} + +// status: 200 +var vcdversions = ` + + + + 1.5 + http://localhost:4444/api/sessions + + application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml + InstantiateVAppTemplateParamsType + http://localhost:4444/api/v1.5/schema/master.xsd + + + application/vnd.vmware.admin.vmwProviderVdcReferences+xml + VMWProviderVdcReferencesType + http://localhost:4444/api/v1.5/schema/vmwextensions.xsd + + + +` + +var vcdsessions = ` + + + + + + + + + +` + +var vcdorg = ` + + + + + + + Organization (full) + +` + +var vcdvdc = ` + + + + + + + + + + + + + organization vDC + AllocationVApp + + + MHz + 0 + 0 + 0 + 0 + 0 + + + MB + 0 + 0 + 0 + 0 + 0 + + + + + + + + + + + + + vmx-04 + vmx-07 + vmx-08 + vmx-09 + vmx-10 + + + 0 + 100 + 0 + 0 + true + + + + + + +` diff --git a/vendor/github.com/hmrc/vmware-govcd/catalog.go b/vendor/github.com/hmrc/vmware-govcd/catalog.go new file mode 100644 index 000000000..1839af497 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/catalog.go @@ -0,0 +1,57 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type Catalog struct { + Catalog *types.Catalog + c *Client +} + +func NewCatalog(c *Client) *Catalog { + return &Catalog{ + Catalog: new(types.Catalog), + c: c, + } +} + +func (c *Catalog) FindCatalogItem(catalogitem string) (CatalogItem, error) { + + for _, cis := range c.Catalog.CatalogItems { + for _, ci := range cis.CatalogItem { + if ci.Name == catalogitem && ci.Type == "application/vnd.vmware.vcloud.catalogItem+xml" { + u, err := url.ParseRequestURI(ci.HREF) + + if err != nil { + return CatalogItem{}, fmt.Errorf("error decoding catalog response: %s", err) + } + + req := c.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(c.c.Http.Do(req)) + if err != nil { + return CatalogItem{}, fmt.Errorf("error retreiving catalog: %s", err) + } + + cat := NewCatalogItem(c.c) + + if err = decodeBody(resp, cat.CatalogItem); err != nil { + return CatalogItem{}, fmt.Errorf("error decoding catalog response: %s", err) + } + + // The request was successful + return *cat, nil + } + } + } + + return CatalogItem{}, fmt.Errorf("can't find catalog item: %s", catalogitem) +} diff --git a/vendor/github.com/hmrc/vmware-govcd/catalog_test.go b/vendor/github.com/hmrc/vmware-govcd/catalog_test.go new file mode 100644 index 000000000..e6ee677cf --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/catalog_test.go @@ -0,0 +1,68 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + . "gopkg.in/check.v1" +) + +func (s *S) Test_FindCatalogItem(c *C) { + + // Get the Org populated + testServer.Response(200, nil, orgExample) + org, err := s.vdc.GetVDCOrg() + _ = testServer.WaitRequest() + testServer.Flush() + c.Assert(err, IsNil) + + // Populate Catalog + testServer.Response(200, nil, catalogExample) + cat, err := org.FindCatalog("Public Catalog") + _ = testServer.WaitRequest() + testServer.Flush() + + // Find Catalog Item + testServer.Response(200, nil, catalogitemExample) + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + _ = testServer.WaitRequest() + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(catitem.CatalogItem.HREF, Equals, "http://localhost:4444/api/catalogItem/1176e485-8858-4e15-94e5-ae4face605ae") + c.Assert(catitem.CatalogItem.Description, Equals, "id: cts-6.4-32bit") + + // Test non-existant catalog item + catitem, err = cat.FindCatalogItem("INVALID") + c.Assert(err, NotNil) + +} + +var catalogExample = ` + + + + vCHS service catalog + + + + + + + + + + + + + + + + + + true + 2013-10-15T01:14:22.370Z + 60 + +` diff --git a/vendor/github.com/hmrc/vmware-govcd/catalogitem.go b/vendor/github.com/hmrc/vmware-govcd/catalogitem.go new file mode 100644 index 000000000..bffa22ffb --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/catalogitem.go @@ -0,0 +1,49 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type CatalogItem struct { + CatalogItem *types.CatalogItem + c *Client +} + +func NewCatalogItem(c *Client) *CatalogItem { + return &CatalogItem{ + CatalogItem: new(types.CatalogItem), + c: c, + } +} + +func (ci *CatalogItem) GetVAppTemplate() (VAppTemplate, error) { + url, err := url.ParseRequestURI(ci.CatalogItem.Entity.HREF) + + if err != nil { + return VAppTemplate{}, fmt.Errorf("error decoding catalogitem response: %s", err) + } + + req := ci.c.NewRequest(map[string]string{}, "GET", *url, nil) + + resp, err := checkResp(ci.c.Http.Do(req)) + if err != nil { + return VAppTemplate{}, fmt.Errorf("error retreiving vapptemplate: %s", err) + } + + cat := NewVAppTemplate(ci.c) + + if err = decodeBody(resp, cat.VAppTemplate); err != nil { + return VAppTemplate{}, fmt.Errorf("error decoding vapptemplate response: %s", err) + } + + // The request was successful + return *cat, nil + +} diff --git a/vendor/github.com/hmrc/vmware-govcd/catalogitem_test.go b/vendor/github.com/hmrc/vmware-govcd/catalogitem_test.go new file mode 100644 index 000000000..f8db37fcf --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/catalogitem_test.go @@ -0,0 +1,55 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + . "gopkg.in/check.v1" +) + +func (s *S) Test_GetVAppTemplate(c *C) { + + // Get the Org populated + testServer.Response(200, nil, orgExample) + org, err := s.vdc.GetVDCOrg() + _ = testServer.WaitRequest() + testServer.Flush() + c.Assert(err, IsNil) + + // Populate Catalog + testServer.Response(200, nil, catalogExample) + cat, err := org.FindCatalog("Public Catalog") + _ = testServer.WaitRequest() + testServer.Flush() + + // Populate Catalog Item + testServer.Response(200, nil, catalogitemExample) + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + _ = testServer.WaitRequest() + testServer.Flush() + + // Get VAppTemplate + testServer.Response(200, nil, vapptemplateExample) + vapptemplate, err := catitem.GetVAppTemplate() + _ = testServer.WaitRequest() + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(vapptemplate.VAppTemplate.HREF, Equals, "http://localhost:4444/api/vAppTemplate/vappTemplate-40cb9721-5f1a-44f9-b5c3-98c5f518c4f5") + c.Assert(vapptemplate.VAppTemplate.Name, Equals, "CentOS64-32bit") + c.Assert(vapptemplate.VAppTemplate.Description, Equals, "id: cts-6.4-32bit") + +} + +var catalogitemExample = ` + + + + + id: cts-6.4-32bit + + 2014-06-04T21:06:43.750Z + 4 + +` diff --git a/vendor/github.com/hmrc/vmware-govcd/edgegateway.go b/vendor/github.com/hmrc/vmware-govcd/edgegateway.go new file mode 100644 index 000000000..3bd16bd1f --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/edgegateway.go @@ -0,0 +1,625 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/xml" + "fmt" + "log" + "net/http" + "net/url" + "os" + "regexp" + "time" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type EdgeGateway struct { + EdgeGateway *types.EdgeGateway + c *Client +} + +func NewEdgeGateway(c *Client) *EdgeGateway { + return &EdgeGateway{ + EdgeGateway: new(types.EdgeGateway), + c: c, + } +} + +func (e *EdgeGateway) AddDhcpPool(network *types.OrgVDCNetwork, dhcppool []interface{}) (Task, error) { + newedgeconfig := e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + log.Printf("[DEBUG] EDGE GATEWAY: %#v", newedgeconfig) + log.Printf("[DEBUG] EDGE GATEWAY SERVICE: %#v", newedgeconfig.GatewayDhcpService) + newdchpservice := &types.GatewayDhcpService{} + if newedgeconfig.GatewayDhcpService == nil { + newdchpservice.IsEnabled = true + } else { + newdchpservice.IsEnabled = newedgeconfig.GatewayDhcpService.IsEnabled + + for _, v := range newedgeconfig.GatewayDhcpService.Pool { + + // Kludgy IF to avoid deleting DNAT rules not created by us. + // If matches, let's skip it and continue the loop + if v.Network.HREF == network.HREF { + continue + } + + newdchpservice.Pool = append(newdchpservice.Pool, v) + } + } + + for _, v := range dhcppool { + data := v.(map[string]interface{}) + + dhcprule := &types.DhcpPoolService{ + IsEnabled: true, + Network: &types.Reference{ + HREF: network.HREF, + Name: network.Name, + }, + DefaultLeaseTime: 3600, + MaxLeaseTime: 7200, + LowIPAddress: data["start_address"].(string), + HighIPAddress: data["end_address"].(string), + } + newdchpservice.Pool = append(newdchpservice.Pool, dhcprule) + } + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + GatewayDhcpService: newdchpservice, + } + + output, err := xml.MarshalIndent(newRules, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + var resp *http.Response + for { + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(e.EdgeGateway.HREF) + s.Path += "/action/configureServices" + + req := e.c.NewRequest(map[string]string{}, "POST", *s, b) + log.Printf("[DEBUG] POSTING TO URL: %s", s.Path) + log.Printf("[DEBUG] XML TO SEND:\n%s", b) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err = checkResp(e.c.Http.Do(req)) + if err != nil { + if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v { + time.Sleep(3 * time.Second) + continue + } + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + break + } + + task := NewTask(e.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (e *EdgeGateway) RemoveNATMapping(nattype, externalIP, internalIP, port string) (Task, error) { + // Find uplink interface + var uplink types.Reference + for _, gi := range e.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gi.InterfaceType != "uplink" { + continue + } + uplink = *gi.Network + } + + newedgeconfig := e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + // Take care of the NAT service + newnatservice := &types.NatService{} + + newnatservice.IsEnabled = newedgeconfig.NatService.IsEnabled + newnatservice.NatType = newedgeconfig.NatService.NatType + newnatservice.Policy = newedgeconfig.NatService.Policy + newnatservice.ExternalIP = newedgeconfig.NatService.ExternalIP + + for _, v := range newedgeconfig.NatService.NatRule { + + // Kludgy IF to avoid deleting DNAT rules not created by us. + // If matches, let's skip it and continue the loop + if v.RuleType == nattype && + v.GatewayNatRule.OriginalIP == externalIP && + v.GatewayNatRule.OriginalPort == port && + v.GatewayNatRule.Interface.HREF == uplink.HREF { + log.Printf("[DEBUG] REMOVING %s Rule: %#v", v.RuleType, v.GatewayNatRule) + continue + } + log.Printf("[DEBUG] KEEPING %s Rule: %#v", v.RuleType, v.GatewayNatRule) + newnatservice.NatRule = append(newnatservice.NatRule, v) + } + + newedgeconfig.NatService = newnatservice + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + NatService: newnatservice, + } + + output, err := xml.MarshalIndent(newRules, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(e.EdgeGateway.HREF) + s.Path += "/action/configureServices" + + req := e.c.NewRequest(map[string]string{}, "POST", *s, b) + log.Printf("[DEBUG] POSTING TO URL: %s", s.Path) + log.Printf("[DEBUG] XML TO SEND:\n%s", b) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err := checkResp(e.c.Http.Do(req)) + if err != nil { + log.Printf("[DEBUG] Error is: %#v", err) + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + task := NewTask(e.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (e *EdgeGateway) AddNATMapping(nattype, externalIP, internalIP, port string) (Task, error) { + // Find uplink interface + var uplink types.Reference + for _, gi := range e.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gi.InterfaceType != "uplink" { + continue + } + uplink = *gi.Network + } + + newedgeconfig := e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + // Take care of the NAT service + newnatservice := &types.NatService{} + + if newedgeconfig.NatService == nil { + newnatservice.IsEnabled = true + } else { + newnatservice.IsEnabled = newedgeconfig.NatService.IsEnabled + newnatservice.NatType = newedgeconfig.NatService.NatType + newnatservice.Policy = newedgeconfig.NatService.Policy + newnatservice.ExternalIP = newedgeconfig.NatService.ExternalIP + + for _, v := range newedgeconfig.NatService.NatRule { + + // Kludgy IF to avoid deleting DNAT rules not created by us. + // If matches, let's skip it and continue the loop + if v.RuleType == nattype && + v.GatewayNatRule.OriginalIP == externalIP && + v.GatewayNatRule.OriginalPort == port && + v.GatewayNatRule.TranslatedIP == internalIP && + v.GatewayNatRule.Interface.HREF == uplink.HREF { + continue + } + + newnatservice.NatRule = append(newnatservice.NatRule, v) + } + } + + //add rule + natRule := &types.NatRule{ + RuleType: nattype, + IsEnabled: true, + GatewayNatRule: &types.GatewayNatRule{ + Interface: &types.Reference{ + HREF: uplink.HREF, + }, + OriginalIP: externalIP, + OriginalPort: port, + TranslatedIP: internalIP, + TranslatedPort: port, + Protocol: "tcp", + }, + } + newnatservice.NatRule = append(newnatservice.NatRule, natRule) + + newedgeconfig.NatService = newnatservice + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + NatService: newnatservice, + } + + output, err := xml.MarshalIndent(newRules, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(e.EdgeGateway.HREF) + s.Path += "/action/configureServices" + + req := e.c.NewRequest(map[string]string{}, "POST", *s, b) + log.Printf("[DEBUG] POSTING TO URL: %s", s.Path) + log.Printf("[DEBUG] XML TO SEND:\n%s", b) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err := checkResp(e.c.Http.Do(req)) + if err != nil { + log.Printf("[DEBUG] Error is: %#v", err) + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + task := NewTask(e.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (e *EdgeGateway) CreateFirewallRules(defaultAction string, rules []*types.FirewallRule) (Task, error) { + err := e.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error: %v\n", err) + } + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + FirewallService: &types.FirewallService{ + IsEnabled: true, + DefaultAction: defaultAction, + LogDefaultAction: true, + FirewallRule: rules, + }, + } + + output, err := xml.MarshalIndent(newRules, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error: %v\n", err) + } + + var resp *http.Response + for { + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(e.EdgeGateway.HREF) + s.Path += "/action/configureServices" + + req := e.c.NewRequest(map[string]string{}, "POST", *s, b) + log.Printf("[DEBUG] POSTING TO URL: %s", s.Path) + log.Printf("[DEBUG] XML TO SEND:\n%s", b) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err = checkResp(e.c.Http.Do(req)) + if err != nil { + if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v { + time.Sleep(3 * time.Second) + continue + } + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + break + } + + task := NewTask(e.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil +} + +func (e *EdgeGateway) Refresh() error { + + if e.EdgeGateway == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + u, _ := url.ParseRequestURI(e.EdgeGateway.HREF) + + req := e.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(e.c.Http.Do(req)) + if err != nil { + return fmt.Errorf("error retreiving Edge Gateway: %s", err) + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + e.EdgeGateway = &types.EdgeGateway{} + + if err = decodeBody(resp, e.EdgeGateway); err != nil { + return fmt.Errorf("error decoding Edge Gateway response: %s", err) + } + + // The request was successful + return nil +} + +func (e *EdgeGateway) Remove1to1Mapping(internal, external string) (Task, error) { + + // Refresh EdgeGateway rules + err := e.Refresh() + if err != nil { + fmt.Printf("error: %v\n", err) + } + + var uplinkif string + for _, gifs := range e.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gifs.InterfaceType == "uplink" { + uplinkif = gifs.Network.HREF + } + } + + newedgeconfig := e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + // Take care of the NAT service + newnatservice := &types.NatService{} + + // Copy over the NAT configuration + newnatservice.IsEnabled = newedgeconfig.NatService.IsEnabled + newnatservice.NatType = newedgeconfig.NatService.NatType + newnatservice.Policy = newedgeconfig.NatService.Policy + newnatservice.ExternalIP = newedgeconfig.NatService.ExternalIP + + for k, v := range newedgeconfig.NatService.NatRule { + + // Kludgy IF to avoid deleting DNAT rules not created by us. + // If matches, let's skip it and continue the loop + if v.RuleType == "DNAT" && + v.GatewayNatRule.OriginalIP == external && + v.GatewayNatRule.TranslatedIP == internal && + v.GatewayNatRule.OriginalPort == "any" && + v.GatewayNatRule.TranslatedPort == "any" && + v.GatewayNatRule.Protocol == "any" && + v.GatewayNatRule.Interface.HREF == uplinkif { + continue + } + + // Kludgy IF to avoid deleting SNAT rules not created by us. + // If matches, let's skip it and continue the loop + if v.RuleType == "SNAT" && + v.GatewayNatRule.OriginalIP == internal && + v.GatewayNatRule.TranslatedIP == external && + v.GatewayNatRule.Interface.HREF == uplinkif { + continue + } + + // If doesn't match the above IFs, it's something we need to preserve, + // let's add it to the new NatService struct + newnatservice.NatRule = append(newnatservice.NatRule, newedgeconfig.NatService.NatRule[k]) + + } + + // Fill the new NatService Section + newedgeconfig.NatService = newnatservice + + // Take care of the Firewall service + newfwservice := &types.FirewallService{} + + // Copy over the firewall configuration + newfwservice.IsEnabled = newedgeconfig.FirewallService.IsEnabled + newfwservice.DefaultAction = newedgeconfig.FirewallService.DefaultAction + newfwservice.LogDefaultAction = newedgeconfig.FirewallService.LogDefaultAction + + for k, v := range newedgeconfig.FirewallService.FirewallRule { + + // Kludgy IF to avoid deleting inbound FW rules not created by us. + // If matches, let's skip it and continue the loop + if v.Policy == "allow" && + v.Protocols.Any == true && + v.DestinationPortRange == "Any" && + v.SourcePortRange == "Any" && + v.SourceIP == "Any" && + v.DestinationIP == external { + continue + } + + // Kludgy IF to avoid deleting outbound FW rules not created by us. + // If matches, let's skip it and continue the loop + if v.Policy == "allow" && + v.Protocols.Any == true && + v.DestinationPortRange == "Any" && + v.SourcePortRange == "Any" && + v.SourceIP == internal && + v.DestinationIP == "Any" { + continue + } + + // If doesn't match the above IFs, it's something we need to preserve, + // let's add it to the new FirewallService struct + newfwservice.FirewallRule = append(newfwservice.FirewallRule, newedgeconfig.FirewallService.FirewallRule[k]) + + } + + // Fill the new FirewallService Section + newedgeconfig.FirewallService = newfwservice + + // Fix + newedgeconfig.NatService.IsEnabled = true + + output, err := xml.MarshalIndent(newedgeconfig, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(e.EdgeGateway.HREF) + s.Path += "/action/configureServices" + + req := e.c.NewRequest(map[string]string{}, "POST", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err := checkResp(e.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + task := NewTask(e.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (e *EdgeGateway) Create1to1Mapping(internal, external, description string) (Task, error) { + + // Refresh EdgeGateway rules + err := e.Refresh() + if err != nil { + fmt.Printf("error: %v\n", err) + } + + var uplinkif string + for _, gifs := range e.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gifs.InterfaceType == "uplink" { + uplinkif = gifs.Network.HREF + } + } + + newedgeconfig := e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + snat := &types.NatRule{ + Description: description, + RuleType: "SNAT", + IsEnabled: true, + GatewayNatRule: &types.GatewayNatRule{ + Interface: &types.Reference{ + HREF: uplinkif, + }, + OriginalIP: internal, + TranslatedIP: external, + Protocol: "any", + }, + } + + newedgeconfig.NatService.NatRule = append(newedgeconfig.NatService.NatRule, snat) + + dnat := &types.NatRule{ + Description: description, + RuleType: "DNAT", + IsEnabled: true, + GatewayNatRule: &types.GatewayNatRule{ + Interface: &types.Reference{ + HREF: uplinkif, + }, + OriginalIP: external, + OriginalPort: "any", + TranslatedIP: internal, + TranslatedPort: "any", + Protocol: "any", + }, + } + + newedgeconfig.NatService.NatRule = append(newedgeconfig.NatService.NatRule, dnat) + + fwin := &types.FirewallRule{ + Description: description, + IsEnabled: true, + Policy: "allow", + Protocols: &types.FirewallRuleProtocols{ + Any: true, + }, + DestinationPortRange: "Any", + DestinationIP: external, + SourcePortRange: "Any", + SourceIP: "Any", + EnableLogging: false, + } + + newedgeconfig.FirewallService.FirewallRule = append(newedgeconfig.FirewallService.FirewallRule, fwin) + + fwout := &types.FirewallRule{ + Description: description, + IsEnabled: true, + Policy: "allow", + Protocols: &types.FirewallRuleProtocols{ + Any: true, + }, + DestinationPortRange: "Any", + DestinationIP: "Any", + SourcePortRange: "Any", + SourceIP: internal, + EnableLogging: false, + } + + newedgeconfig.FirewallService.FirewallRule = append(newedgeconfig.FirewallService.FirewallRule, fwout) + + output, err := xml.MarshalIndent(newedgeconfig, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(e.EdgeGateway.HREF) + s.Path += "/action/configureServices" + + req := e.c.NewRequest(map[string]string{}, "POST", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err := checkResp(e.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + task := NewTask(e.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} diff --git a/vendor/github.com/hmrc/vmware-govcd/edgegateway_test.go b/vendor/github.com/hmrc/vmware-govcd/edgegateway_test.go new file mode 100644 index 000000000..6aebd6d78 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/edgegateway_test.go @@ -0,0 +1,409 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "github.com/hmrc/vmware-govcd/testutil" + + . "gopkg.in/check.v1" +) + +func (s *S) Test_Refresh(c *C) { + + // Get the Org populated + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/vdc/00000000-0000-0000-0000-000000000000/edgeGateways": testutil.Response{200, nil, edgegatewayqueryresultsExample}, + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, edgegatewayExample}, + }) + + edge, err := s.vdc.FindEdgeGateway("M916272752-5793") + _ = testServer.WaitRequests(2) + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(edge.EdgeGateway.Name, Equals, "M916272752-5793") + + testServer.Response(200, nil, edgegatewayExample) + err = edge.Refresh() + _ = testServer.WaitRequest() + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(edge.EdgeGateway.Name, Equals, "M916272752-5793") + +} + +func (s *S) Test_NATMapping(c *C) { + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/vdc/00000000-0000-0000-0000-000000000000/edgeGateways": testutil.Response{200, nil, edgegatewayqueryresultsExample}, + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, edgegatewayExample}, + }) + + edge, err := s.vdc.FindEdgeGateway("M916272752-5793") + _ = testServer.WaitRequests(2) + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(edge.EdgeGateway.Name, Equals, "M916272752-5793") + + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, edgegatewayExample}, + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000/action/configureServices": testutil.Response{200, nil, taskExample}, + }) + + _, err = edge.AddNATMapping("DNAT", "10.0.0.1", "20.0.0.2", "77") + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, edgegatewayExample}, + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000/action/configureServices": testutil.Response{200, nil, taskExample}, + }) + + _, err = edge.RemoveNATMapping("DNAT", "10.0.0.1", "20.0.0.2", "77") + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + +} + +func (s *S) Test_1to1Mappings(c *C) { + + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/vdc/00000000-0000-0000-0000-000000000000/edgeGateways": testutil.Response{200, nil, edgegatewayqueryresultsExample}, + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, edgegatewayExample}, + }) + + edge, err := s.vdc.FindEdgeGateway("M916272752-5793") + _ = testServer.WaitRequests(2) + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(edge.EdgeGateway.Name, Equals, "M916272752-5793") + + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, edgegatewayExample}, + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000/action/configureServices": testutil.Response{200, nil, taskExample}, + }) + + _, err = edge.Create1to1Mapping("10.0.0.1", "20.0.0.2", "description") + _ = testServer.WaitRequests(2) + + c.Assert(err, IsNil) + + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, edgegatewayExample}, + "/api/admin/edgeGateway/00000000-0000-0000-0000-000000000000/action/configureServices": testutil.Response{200, nil, taskExample}, + }) + + _, err = edge.Remove1to1Mapping("10.0.0.1", "20.0.0.2") + _ = testServer.WaitRequests(2) + + c.Assert(err, IsNil) + +} + +var edgegatewayqueryresultsExample = ` + + + + + +` + +var edgegatewayExample = ` + + + + + + + + compact + + + JGray Network + JGray Network + + internal + + 192.168.108.1 + 255.255.255.0 + 192.168.108.1 + + false + false + + + M916272752-5793-default-routed + M916272752-5793-default-routed + + internal + + 192.168.109.1 + 255.255.255.0 + 192.168.109.1 + + false + false + + + d2p3-ext + d2p3-ext + + uplink + + 23.92.224.1 + 255.255.254.0 + 23.92.225.51 + + + 23.92.225.73 + 23.92.225.75 + + + 23.92.225.27 + 23.92.225.27 + + + 23.92.225.11 + 23.92.225.11 + + + 23.92.225.54 + 23.92.225.54 + + + 23.92.224.34 + 23.92.224.34 + + + 23.92.225.5 + 23.92.225.5 + + + 23.92.225.34 + 23.92.225.34 + + + 23.92.224.255 + 23.92.224.255 + + + 23.92.225.49 + 23.92.225.51 + + + 23.92.225.62 + 23.92.225.62 + + + 23.92.225.43 + 23.92.225.43 + + + 23.92.225.30 + 23.92.225.30 + + + + true + 1024.0 + 1024.0 + true + + + + + true + drop + false + + 1 + true + false + ssh prd-001 + allow + + true + + 999 + 999 + 23.92.225.51 + -1 + Any + 60.241.110.111 + false + + + 2 + true + false + inet prd-001 + allow + + true + + -1 + Any + Any + -1 + Any + 192.168.109.8 + false + + + 3 + true + false + suppah megah rulez creati0nz + allow + + true + + -1 + Any + 23.92.224.255 + -1 + Any + Any + false + + + 4 + true + false + suppah megah rulez creati0nz + allow + + true + + -1 + Any + Any + -1 + Any + 192.168.109.3 + false + + + + false + + DNAT + true + 65537 + + + 23.92.225.51 + 999 + 192.168.109.8 + 22 + Tcp + + + + SNAT + true + 65538 + + + 192.168.109.8 + 23.92.225.51 + + + + SNAT + true + 65539 + + + 192.168.109.3 + 23.92.224.255 + + + + DNAT + true + 65540 + + + 23.92.224.255 + any + 192.168.109.3 + any + any + + + + + true + + Test VPN Alpha + For OC Pod Testing + + 192.168.110.99 + + 64.184.133.62 + 192.168.110.99 + 23.92.225.51 + 23.92.225.51 + + M916272752-5793-default-routed + 192.168.109.1 + 255.255.255.0 + + + 192.168.120.0/24 + 192.168.120.0 + 255.255.255.0 + + Blah1Blah2Blah3Blah1Blah2Blah3Blah1Blah2Blah3 + false + AES256 + 1500 + true + false + + + JGray VPN + + 67.172.148.74 + + 67.172.148.74 + 67.172.148.74 + 23.92.225.51 + 23.92.225.51 + + JGray Network + 192.168.108.1 + 255.255.255.0 + + + 192.168.1.0/24 + 192.168.1.0 + 255.255.255.0 + + fM7puHkGbg6tqpd4jKNhBo45mbs8fw3yapgt3H7f97a2jrkLyYjP9eSH4oGwps7u + false + AES256 + 1500 + false + false + + + + false + + + false + + + true + true + + + ` diff --git a/vendor/github.com/hmrc/vmware-govcd/org.go b/vendor/github.com/hmrc/vmware-govcd/org.go new file mode 100644 index 000000000..574cf5392 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/org.go @@ -0,0 +1,56 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type Org struct { + Org *types.Org + c *Client +} + +func NewOrg(c *Client) *Org { + return &Org{ + Org: new(types.Org), + c: c, + } +} + +func (o *Org) FindCatalog(catalog string) (Catalog, error) { + + for _, av := range o.Org.Link { + if av.Rel == "down" && av.Type == "application/vnd.vmware.vcloud.catalog+xml" && av.Name == catalog { + u, err := url.ParseRequestURI(av.HREF) + + if err != nil { + return Catalog{}, fmt.Errorf("error decoding org response: %s", err) + } + + req := o.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(o.c.Http.Do(req)) + if err != nil { + return Catalog{}, fmt.Errorf("error retreiving catalog: %s", err) + } + + cat := NewCatalog(o.c) + + if err = decodeBody(resp, cat.Catalog); err != nil { + return Catalog{}, fmt.Errorf("error decoding catalog response: %s", err) + } + + // The request was successful + return *cat, nil + + } + } + + return Catalog{}, fmt.Errorf("can't find catalog: %s", catalog) +} diff --git a/vendor/github.com/hmrc/vmware-govcd/org_test.go b/vendor/github.com/hmrc/vmware-govcd/org_test.go new file mode 100644 index 000000000..1440c7b4d --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/org_test.go @@ -0,0 +1,55 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + . "gopkg.in/check.v1" +) + +func (s *S) Test_FindCatalog(c *C) { + + // Get the Org populated + testServer.Response(200, nil, orgExample) + org, err := s.vdc.GetVDCOrg() + _ = testServer.WaitRequest() + testServer.Flush() + c.Assert(err, IsNil) + + // Find Catalog + testServer.Response(200, nil, catalogExample) + cat, err := org.FindCatalog("Public Catalog") + _ = testServer.WaitRequest() + testServer.Flush() + c.Assert(err, IsNil) + c.Assert(cat.Catalog.Description, Equals, "vCHS service catalog") + +} + +var orgExample = ` + + + + + + + + + + + + + + + + + + + + + + + OrganizationName + + ` diff --git a/vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork.go b/vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork.go new file mode 100644 index 000000000..1c15ebb96 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork.go @@ -0,0 +1,141 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/xml" + "fmt" + types "github.com/hmrc/vmware-govcd/types/v56" + "log" + "net/http" + "net/url" + "regexp" + "strings" + "time" +) + +// OrgVDCNetwork an org vdc network client +type OrgVDCNetwork struct { + OrgVDCNetwork *types.OrgVDCNetwork + c *Client +} + +// NewOrgVDCNetwork creates an org vdc network client +func NewOrgVDCNetwork(c *Client) *OrgVDCNetwork { + return &OrgVDCNetwork{ + OrgVDCNetwork: new(types.OrgVDCNetwork), + c: c, + } +} + +func (o *OrgVDCNetwork) Refresh() error { + if o.OrgVDCNetwork.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty") + } + + u, _ := url.ParseRequestURI(o.OrgVDCNetwork.HREF) + + req := o.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(o.c.Http.Do(req)) + if err != nil { + return fmt.Errorf("error retrieving task: %s", err) + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + o.OrgVDCNetwork = &types.OrgVDCNetwork{} + + if err = decodeBody(resp, o.OrgVDCNetwork); err != nil { + return fmt.Errorf("error decoding task response: %s", err) + } + + // The request was successful + return nil +} + +func (o *OrgVDCNetwork) Delete() (Task, error) { + err := o.Refresh() + if err != nil { + return Task{}, fmt.Errorf("Error refreshing network: %s", err) + } + pathArr := strings.Split(o.OrgVDCNetwork.HREF, "/") + s, _ := url.ParseRequestURI(o.OrgVDCNetwork.HREF) + s.Path = "/api/admin/network/" + pathArr[len(pathArr)-1] + + var resp *http.Response + for { + req := o.c.NewRequest(map[string]string{}, "DELETE", *s, nil) + resp, err = checkResp(o.c.Http.Do(req)) + if err != nil { + if v, _ := regexp.MatchString("is busy, cannot proceed with the operation.$", err.Error()); v { + time.Sleep(3 * time.Second) + continue + } + return Task{}, fmt.Errorf("error deleting Network: %s", err) + } + break + } + + task := NewTask(o.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil +} + +func (v *Vdc) CreateOrgVDCNetwork(networkConfig *types.OrgVDCNetwork) error { + for _, av := range v.Vdc.Link { + if av.Rel == "add" && av.Type == "application/vnd.vmware.vcloud.orgVdcNetwork+xml" { + u, err := url.ParseRequestURI(av.HREF) + //return fmt.Errorf("Test output: %#v") + + if err != nil { + return fmt.Errorf("error decoding vdc response: %s", err) + } + + output, err := xml.MarshalIndent(networkConfig, " ", " ") + if err != nil { + return fmt.Errorf("error marshaling OrgVDCNetwork compose: %s", err) + } + + //return fmt.Errorf("Test output: %s\n%#v", b, v.c) + + var resp *http.Response + for { + b := bytes.NewBufferString(xml.Header + string(output)) + log.Printf("[DEBUG] VCD Client configuration: %s", b) + req := v.c.NewRequest(map[string]string{}, "POST", *u, b) + req.Header.Add("Content-Type", av.Type) + resp, err = checkResp(v.c.Http.Do(req)) + if err != nil { + if v, _ := regexp.MatchString("is busy, cannot proceed with the operation.$", err.Error()); v { + time.Sleep(3 * time.Second) + continue + } + return fmt.Errorf("error instantiating a new OrgVDCNetwork: %s", err) + } + break + } + newstuff := NewOrgVDCNetwork(v.c) + if err = decodeBody(resp, newstuff.OrgVDCNetwork); err != nil { + return fmt.Errorf("error decoding orgvdcnetwork response: %s", err) + } + task := NewTask(v.c) + for _, t := range newstuff.OrgVDCNetwork.Tasks.Task { + task.Task = t + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("Error performing task: %#v", err) + } + } + } + } + return nil +} diff --git a/vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork_test.go b/vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork_test.go new file mode 100644 index 000000000..0e7421412 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/orgvdcnetwork_test.go @@ -0,0 +1,64 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "github.com/hmrc/vmware-govcd/testutil" + + . "gopkg.in/check.v1" +) + +func (s *S) Test_NetRefresh(c *C) { + + // Get the Org populated + testServer.ResponseMap(1, testutil.ResponseMap{ + "/api/network/44444444-4444-4444-4444-4444444444444": testutil.Response{200, nil, orgvdcnetExample}, + }) + + network, err := s.vdc.FindVDCNetwork("networkName") + _ = testServer.WaitRequest() + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(network.OrgVDCNetwork.Name, Equals, "networkName") + + testServer.Response(200, nil, orgvdcnetExample) + err = network.Refresh() + _ = testServer.WaitRequest() + testServer.Flush() + + c.Assert(err, IsNil) + c.Assert(network.OrgVDCNetwork.Name, Equals, "networkName") + +} + +var orgvdcnetExample = ` + + + + + + This routed network was created with Create VDC. + + + + false + 192.168.109.1 + 255.255.255.0 + true + + + 192.168.109.2 + 192.168.109.100 + + + + + natRouted + false + + false + + ` diff --git a/vendor/github.com/hmrc/vmware-govcd/script/coverage b/vendor/github.com/hmrc/vmware-govcd/script/coverage new file mode 100644 index 000000000..343a166d2 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/script/coverage @@ -0,0 +1,51 @@ +#!/bin/sh +# Generate test coverage statistics for Go packages. +# +# Works around the fact that `go test -coverprofile` currently does not work +# with multiple packages, see https://code.google.com/p/go/issues/detail?id=6909 +# +# Usage: script/coverage [--html|--coveralls] +# +# --html Additionally create HTML report and open it in browser +# --coveralls Push coverage statistics to coveralls.io +# + +set -e + +workdir=.cover +profile="$workdir/cover.out" +mode=count + +generate_cover_data() { + rm -rf "$workdir" + mkdir "$workdir" + + for pkg in "$@"; do + f="$workdir/$(echo $pkg | tr / -).cover" + go test -covermode="$mode" -coverprofile="$f" "$pkg" + done + + echo "mode: $mode" >"$profile" + grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" +} + +show_cover_report() { + go tool cover -${1}="$profile" +} + +push_to_coveralls() { + echo "Pushing coverage statistics to coveralls.io" +} + +generate_cover_data $(go list ./...) +show_cover_report func +case "$1" in +"") + ;; +--html) + show_cover_report html ;; +--coveralls) + push_to_coveralls ;; +*) + echo >&2 "error: invalid option: $1"; exit 1 ;; +esac diff --git a/vendor/github.com/hmrc/vmware-govcd/task.go b/vendor/github.com/hmrc/vmware-govcd/task.go new file mode 100644 index 000000000..3a191e99f --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/task.go @@ -0,0 +1,77 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + "time" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type Task struct { + Task *types.Task + c *Client +} + +func NewTask(c *Client) *Task { + return &Task{ + Task: new(types.Task), + c: c, + } +} + +func (t *Task) Refresh() error { + + if t.Task == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + u, _ := url.ParseRequestURI(t.Task.HREF) + + req := t.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(t.c.Http.Do(req)) + if err != nil { + return fmt.Errorf("error retrieving task: %s", err) + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + t.Task = &types.Task{} + + if err = decodeBody(resp, t.Task); err != nil { + return fmt.Errorf("error decoding task response: %s", err) + } + + // The request was successful + return nil +} + +func (t *Task) WaitTaskCompletion() error { + + if t.Task == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + for { + err := t.Refresh() + if err != nil { + return fmt.Errorf("error retreiving task: %s", err) + } + + // If task is not in a waiting status we're done, check if there's an error and return it. + if t.Task.Status != "queued" && t.Task.Status != "preRunning" && t.Task.Status != "running" { + if t.Task.Status == "error" { + return fmt.Errorf("task did not complete succesfully: %s", t.Task.Description) + } + return nil + } + + // Sleep for 3 seconds and try again. + time.Sleep(3 * time.Second) + } +} diff --git a/vendor/github.com/hmrc/vmware-govcd/task_test.go b/vendor/github.com/hmrc/vmware-govcd/task_test.go new file mode 100644 index 000000000..4b94b9c6d --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/task_test.go @@ -0,0 +1,35 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + . "gopkg.in/check.v1" +) + +func (s *S) Test_WaitTaskCompletion(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Deploy() + _ = testServer.WaitRequest() + testServer.Flush() + c.Assert(err, IsNil) + + testServer.Response(200, nil, taskExample) + err = task.WaitTaskCompletion() + _ = testServer.WaitRequest() + testServer.Flush() + c.Assert(err, IsNil) + +} + +var taskExample = ` + + + + + 100 +
+ + ` diff --git a/vendor/github.com/hmrc/vmware-govcd/testutil/http.go b/vendor/github.com/hmrc/vmware-govcd/testutil/http.go new file mode 100644 index 000000000..5ce9e66c5 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/testutil/http.go @@ -0,0 +1,186 @@ +// +// The software in this package is published under the terms of the Mozilla +// Public License, version 2.0 a copy of which has been included with this +// distribution in the LICENSE file. +// + +package testutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" +) + +type HTTPServer struct { + URL string + Timeout time.Duration + started bool + request chan *http.Request + response chan ResponseFunc +} + +type Response struct { + Status int + Headers map[string]string + Body string +} + +var DefaultClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, +} + +func NewHTTPServer() *HTTPServer { + return &HTTPServer{URL: "http://localhost:4444", Timeout: 5 * time.Second} +} + +type ResponseFunc func(path string) Response + +func (s *HTTPServer) Start() { + if s.started { + return + } + s.started = true + s.request = make(chan *http.Request, 1024) + s.response = make(chan ResponseFunc, 1024) + u, err := url.Parse(s.URL) + if err != nil { + panic(err) + } + l, err := net.Listen("tcp", u.Host) + if err != nil { + panic(err) + } + go http.Serve(l, s) + + s.Response(203, nil, "") + for { + // Wait for it to be up. + resp, err := http.Get(s.URL) + if err == nil && resp.StatusCode == 203 { + break + } + time.Sleep(1e8) + } + s.WaitRequest() // Consume dummy request. +} + +// Flush discards all pending requests and responses. +func (s *HTTPServer) Flush() { + for { + select { + case <-s.request: + case <-s.response: + default: + return + } + } +} + +func body(req *http.Request) string { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + return string(data) +} + +func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.ParseMultipartForm(1e6) + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + s.request <- req + var resp Response + select { + case respFunc := <-s.response: + resp = respFunc(req.URL.Path) + case <-time.After(s.Timeout): + const msg = "ERROR: Timeout waiting for test to prepare a response\n" + fmt.Fprintf(os.Stderr, msg) + resp = Response{500, nil, msg} + } + if resp.Headers != nil { + h := w.Header() + for k, v := range resp.Headers { + h.Set(k, v) + } + } + if resp.Status != 0 { + w.WriteHeader(resp.Status) + } + w.Write([]byte(resp.Body)) +} + +// WaitRequests returns the next n requests made to the http server from +// the queue. If not enough requests were previously made, it waits until +// the timeout value for them to be made. +func (s *HTTPServer) WaitRequests(n int) []*http.Request { + reqs := make([]*http.Request, 0, n) + for i := 0; i < n; i++ { + select { + case req := <-s.request: + reqs = append(reqs, req) + case <-time.After(s.Timeout): + panic("Timeout waiting for request") + } + } + return reqs +} + +// WaitRequest returns the next request made to the http server from +// the queue. If no requests were previously made, it waits until the +// timeout value for one to be made. +func (s *HTTPServer) WaitRequest() *http.Request { + return s.WaitRequests(1)[0] +} + +// ResponseFunc prepares the test server to respond the following n +// requests using f to build each response. +func (s *HTTPServer) ResponseFunc(n int, f ResponseFunc) { + for i := 0; i < n; i++ { + s.response <- f + } +} + +// ResponseMap maps request paths to responses. +type ResponseMap map[string]Response + +// ResponseMap prepares the test server to respond the following n +// requests using the m to obtain the responses. +func (s *HTTPServer) ResponseMap(n int, m ResponseMap) { + f := func(path string) Response { + for rpath, resp := range m { + if rpath == path { + return resp + } + } + body := "Path not found in response map: " + path + return Response{Status: 500, Body: body} + } + s.ResponseFunc(n, f) +} + +// Responses prepares the test server to respond the following n requests +// using the provided response parameters. +func (s *HTTPServer) Responses(n int, status int, headers map[string]string, body string) { + f := func(path string) Response { + return Response{status, headers, body} + } + s.ResponseFunc(n, f) +} + +// Response prepares the test server to respond the following request +// using the provided response parameters. +func (s *HTTPServer) Response(status int, headers map[string]string, body string) { + s.Responses(1, status, headers, body) +} diff --git a/vendor/github.com/hmrc/vmware-govcd/types/v56/types.go b/vendor/github.com/hmrc/vmware-govcd/types/v56/types.go new file mode 100644 index 000000000..7a345a30e --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/types/v56/types.go @@ -0,0 +1,1508 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package types + +import ( + "encoding/xml" +) + +// Maps status Attribute Values for VAppTemplate, VApp, Vm, and Media Objects +var VAppStatuses = map[int]string{ + -1: "FAILED_CREATION", + 0: "UNRESOLVED", + 1: "RESOLVED", + 2: "DEPLOYED", + 3: "SUSPENDED", + 4: "POWERED_ON", + 5: "WAITING_FOR_INPUT", + 6: "UNKNOWN", + 7: "UNRECOGNIZED", + 8: "POWERED_OFF", + 9: "INCONSISTENT_STATE", + 10: "MIXED", + 11: "DESCRIPTOR_PENDING", + 12: "COPYING_CONTENTS", + 13: "DISK_CONTENTS_PENDING", + 14: "QUARANTINED", + 15: "QUARANTINE_EXPIRED", + 16: "REJECTED", + 17: "TRANSFER_TIMEOUT", + 18: "VAPP_UNDEPLOYED", + 19: "VAPP_PARTIALLY_DEPLOYED", +} + +// Maps status Attribute Values for VDC Objects +var VDCStatuses = map[int]string{ + -1: "FAILED_CREATION", + 0: "NOT_READY", + 1: "READY", + 2: "UNKNOWN", + 3: "UNRECOGNIZED", +} + +// VCD API + +// DefaultStorageProfileSection is the name of the storage profile that will be specified for this virtual machine. The named storage profile must exist in the organization vDC that contains the virtual machine. If not specified, the default storage profile for the vDC is used. +// Type: DefaultStorageProfileSection_Type +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Name of the storage profile that will be specified for this virtual machine. The named storage profile must exist in the organization vDC that contains the virtual machine. If not specified, the default storage profile for the vDC is used. +// Since: 5.1 +type DefaultStorageProfileSection struct { + StorageProfile string `xml:"StorageProfile,omitempty"` +} + +// CustomizationSection represents a vApp template customization settings. +// Type: CustomizationSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp template customization settings. +// Since: 1.0 +type CustomizationSection struct { + // FIXME: OVF Section needs to be laid down correctly + Info string `xml:"ovf:Info"` + // + GoldMaster bool `xml:"goldMaster,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + CustomizeOnInstantiate bool `xml:"CustomizeOnInstantiate"` + Link []*Link `xml:"Link,omitempty"` +} + +// LeaseSettingsSection represents vApp lease settings. +// Type: LeaseSettingsSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents vApp lease settings. +// Since: 0.9 +type LeaseSettingsSection struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + DeploymentLeaseExpiration string `xml:"DeploymentLeaseExpiration,omitempty"` + DeploymentLeaseInSeconds int `xml:"DeploymentLeaseInSeconds,omitempty"` + Link *Link `xml:"Link,omitempty"` + StorageLeaseExpiration string `xml:"StorageLeaseExpiration,omitempty"` + StorageLeaseInSeconds int `xml:"StorageLeaseInSeconds,omitempty"` +} + +// IPRange represents a range of IP addresses, start and end inclusive. +// Type: IpRangeType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a range of IP addresses, start and end inclusive. +// Since: 0.9 +type IPRange struct { + StartAddress string `xml:"StartAddress"` // Start address of the IP range. + EndAddress string `xml:"EndAddress"` // End address of the IP range. +} + +// DhcpService represents a DHCP network service. +// Type: DhcpServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a DHCP network service. +// Since: +type DhcpService struct { + DefaultLeaseTime int `xml:"DefaultLeaseTime,omitempty"` // Default lease in seconds for DHCP addresses. + DomainName string `xml:"DomainName,omitempty"` // The domain name. + IPRange *IPRange `xml:"IpRange"` // IP range for DHCP addresses. + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + MaxLeaseTime int `xml:"MaxLeaseTime"` // Max lease in seconds for DHCP addresses. + PrimaryNameServer string `xml:"PrimaryNameServer,omitempty"` // The primary name server. + RouterIP string `xml:"RouterIp,omitempty"` // Router IP. + SecondaryNameServer string `xml:"SecondaryNameServer,omitempty"` // The secondary name server. + SubMask string `xml:"SubMask,omitempty"` // The subnet mask. +} + +// NetworkFeatures represents features of a network. +// Type: NetworkFeaturesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents features of a network. +// Since: +type NetworkFeatures struct { + DhcpService *DhcpService `xml:"DhcpService,omitempty"` // Substitute for NetworkService. DHCP service settings + FirewallService *FirewallService `xml:"FirewallService,omitempty"` // Substitute for NetworkService. Firewall service settings + NatService *NatService `xml:"NatService,omitempty"` // Substitute for NetworkService. NAT service settings + LoadBalancerService *LoadBalancerService `xml:"LoadBalancerService,omitempty"` // Substitute for NetworkService. Load Balancer service settings + StaticRoutingService *StaticRoutingService `xml:"StaticRoutingService,omitempty"` // Substitute for NetworkService. Static Routing service settings + // TODO: Not Implemented + // IpsecVpnService IpsecVpnService `xml:"IpsecVpnService,omitempty"` // Substitute for NetworkService. Ipsec Vpn service settings +} + +// IPAddresses a list of IP addresses +// Type: IpAddressesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of IP addresses. +// Since: 0.9 +type IPAddresses struct { + IPAddress string `xml:"IpAddress,omitempty"` // An IP address. +} + +// IPRanges representsa list of IP ranges. +// Type: IpRangesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of IP ranges. +// Since: 0.9 +type IPRanges struct { + IPRange []*IPRange `xml:"IpRange,omitempty"` // IP range. +} + +// IPScope specifies network settings like gateway, network mask, DNS servers IP ranges etc +// Type: IpScopeType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Specify network settings like gateway, network mask, DNS servers, IP ranges, etc. +// Since: 0.9 +type IPScope struct { + IsInherited bool `xml:"IsInherited"` // True if the IP scope is inherit from parent network. + Gateway string `xml:"Gateway,omitempty"` // Gateway of the network. + Netmask string `xml:"Netmask,omitempty"` // Network mask. + DNS1 string `xml:"Dns1,omitempty"` // Primary DNS server. + DNS2 string `xml:"Dns2,omitempty"` // Secondary DNS server. + DNSSuffix string `xml:"DnsSuffix,omitempty"` // DNS suffix. + IsEnabled bool `xml:"IsEnabled"` // Indicates if subnet is enabled or not. Default value is True. + IPRanges *IPRanges `xml:"IpRanges,omitempty"` // IP ranges used for static pool allocation in the network. + AllocatedIPAddresses *IPAddresses `xml:"AllocatedIpAddresses,omitempty"` // Read-only list of allocated IP addresses in the network. + SubAllocations *SubAllocations `xml:"SubAllocations,omitempty"` // Read-only list of IP addresses that are sub allocated to edge gateways. +} + +// SubAllocations a list of IP addresses that are sub allocated to edge gateways. +// Type: SubAllocationsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of IP addresses that are sub allocated to edge gateways. +// Since: 5.1 +type SubAllocations struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + // Elements + Link []*Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + SubAllocation *SubAllocation `xml:"SubAllocation,omitempty"` // IP Range sub allocated to a edge gateway. +} + +// SubAllocation IP range sub allocated to an edge gateway. +// Type: SubAllocationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: IP range sub allocated to an edge gateway. +// Since: 5.1 +type SubAllocation struct { + EdgeGateway *Reference `xml:"EdgeGateway,omitempty"` // Edge gateway that uses this sub allocation. + IPRanges *IPRanges `xml:"IpRanges,omitempty"` // IP range sub allocated to the edge gateway. +} + +// IPScopes represents a list of IP scopes. +// Type: IpScopesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of IP scopes. +// Since: 5.1 +type IPScopes struct { + IPScope IPScope `xml:"IpScope"` // IP scope. +} + +// NetworkConfiguration the configuration applied to a network. This is an abstract base type. The concrete types include thos for vApp and Organization wide networks. +// Type: NetworkConfigurationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: The configurations applied to a network. This is an abstract base type. The concrete types include those for vApp and Organization wide networks. +// Since: 0.9 +type NetworkConfiguration struct { + BackwardCompatibilityMode bool `xml:"BackwardCompatibilityMode"` + Features *NetworkFeatures `xml:"Features,omitempty"` + ParentNetwork *Reference `xml:"ParentNetwork,omitempty"` + IPScopes *IPScopes `xml:"IpScopes,omitempty"` + FenceMode string `xml:"FenceMode"` + RetainNetInfoAcrossDeployments bool `xml:"RetainNetInfoAcrossDeployments"` + // TODO: Not Implemented + // RouterInfo RouterInfo `xml:"RouterInfo,omitempty"` + // SyslogServerSettings SyslogServerSettings `xml:"SyslogServerSettings,omitempty"` +} + +// VAppNetworkConfiguration representa a vApp network configuration +// Type: VAppNetworkConfigurationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp network configuration. +// Since: 0.9 +type VAppNetworkConfiguration struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + NetworkName string `xml:"networkName,attr"` + + Configuration *NetworkConfiguration `xml:"Configuration"` + Description string `xml:"Description,omitempty"` + IsDeployed bool `xml:"IsDeployed"` + Link *Link `xml:"Link,omitempty"` +} + +// NetworkConfigSection is container for vApp networks. +// Type: NetworkConfigSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for vApp networks. +// Since: 0.9 +type NetworkConfigSection struct { + // Extends OVF Section_Type + // FIXME: Fix the OVF section + Info string `xml:"ovf:Info"` + // + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` + NetworkConfig *VAppNetworkConfiguration `xml:"NetworkConfig,omitempty"` +} + +// NetworkConnection represents a network connection in the virtual machine. +// Type: NetworkConnectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a network connection in the virtual machine. +// Since: 0.9 +type NetworkConnection struct { + Network string `xml:"network,attr"` // Name of the network to which this NIC is connected. + NetworkConnectionIndex int `xml:"NetworkConnectionIndex"` // Virtual slot number associated with this NIC. First slot number is 0. + NeedsCustomization bool `xml:"needsCustomization,attr,omitempty"` // True if this NIC needs customization. + ExternalIPAddress string `xml:"ExternalIpAddress,omitempty"` // If the network to which this NIC connects provides NAT services, the external address assigned to this NIC appears here. + IPAddress string `xml:"IpAddress,omitempty"` // IP address assigned to this NIC. + IsConnected bool `xml:"IsConnected"` // If the virtual machine is undeployed, this value specifies whether the NIC should be connected upon deployment. If the virtual machine is deployed, this value reports the current status of this NIC's connection, and can be updated to change that connection status. + IPAddressAllocationMode string `xml:"IpAddressAllocationMode"` // IP address allocation mode for this connection. One of: POOL (A static IP address is allocated automatically from a pool of addresses.) DHCP (The IP address is obtained from a DHCP service.) MANUAL (The IP address is assigned manually in the IpAddress element.) NONE (No IP addressing mode specified.) + MACAddress string `xml:"MACAddress,omitempty"` // MAC address associated with the NIC. +} + +// NetworkConnectionSection the container for the network connections of this virtual machine. +// Type: NetworkConnectionSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for the network connections of this virtual machine. +// Since: 0.9 +type NetworkConnectionSection struct { + // Extends OVF Section_Type + // FIXME: Fix the OVF section + XMLName xml.Name `xml:"NetworkConnectionSection"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + + Info string `xml:"ovf:Info"` + // + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` + PrimaryNetworkConnectionIndex int `xml:"PrimaryNetworkConnectionIndex"` + NetworkConnection *NetworkConnection `xml:"NetworkConnection,omitempty"` +} + +// InstantiationParams is a container for ovf:Section_Type elements that specify vApp configuration on instantiate, compose, or recompose. +// Type: InstantiationParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for ovf:Section_Type elements that specify vApp configuration on instantiate, compose, or recompose. +// Since: 0.9 +type InstantiationParams struct { + CustomizationSection *CustomizationSection `xml:"CustomizationSection,omitempty"` + DefaultStorageProfileSection *DefaultStorageProfileSection `xml:"DefaultStorageProfileSection,omitempty"` + GuestCustomizationSection *GuestCustomizationSection `xml:"GuestCustomizationSection,omitempty"` + LeaseSettingsSection *LeaseSettingsSection `xml:"LeaseSettingsSection,omitempty"` + NetworkConfigSection *NetworkConfigSection `xml:"NetworkConfigSection,omitempty"` + NetworkConnectionSection *NetworkConnectionSection `xml:"NetworkConnectionSection,omitempty"` + // TODO: Not Implemented + // SnapshotSection SnapshotSection `xml:"SnapshotSection,omitempty"` +} + +// OrgVDCNetwork represents an Org vDC network in the vCloud model. +// Type: OrgVdcNetworkType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an Org vDC network in the vCloud model. +// Since: 5.1 +type OrgVDCNetwork struct { + XMLName xml.Name `xml:"OrgVdcNetwork"` + Xmlns string `xml:"xmlns,attr,imitempty"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Status string `xml:"status,attr,omitempty"` + Configuration *NetworkConfiguration `xml:"Configuration,omitempty"` + Description string `xml:"Description,omitempty"` + EdgeGateway *Reference `xml:"EdgeGateway,omitempty"` + IsShared bool `xml:"IsShared"` + Link []Link `xml:"Link,omitempty"` + ServiceConfig *GatewayFeatures `xml:"ServiceConfig,omitempty"` // Specifies the service configuration for an isolated Org vDC networks + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// SupportedHardwareVersions contains a list of VMware virtual hardware versions supported in this vDC. +// Type: SupportedHardwareVersionsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Contains a list of VMware virtual hardware versions supported in this vDC. +// Since: 1.5 +type SupportedHardwareVersions struct { + SupportedHardwareVersion []string `xml:"SupportedHardwareVersion,omitempty"` // A virtual hardware version supported in this vDC. +} + +// Capabilities collection of supported hardware capabilities. +// Type: CapabilitiesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Collection of supported hardware capabilities. +// Since: 1.5 +type Capabilities struct { + SupportedHardwareVersions *SupportedHardwareVersions `xml:"SupportedHardwareVersions,omitempty"` // Read-only list of virtual hardware versions supported by this vDC. +} + +// Vdc represents the user view of an organization vDC. +// Type: VdcType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the user view of an organization vDC. +// Since: 0.9 +type Vdc struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Status string `xml:"status,attr,omitempty"` + + AllocationModel string `xml:"AllocationModel"` + AvailableNetworks []*AvailableNetworks `xml:"AvailableNetworks,omitempty"` + Capabilities []*Capabilities `xml:"Capabilities,omitempty"` + ComputeCapacity []*ComputeCapacity `xml:"ComputeCapacity"` + Description string `xml:"Description,omitempty"` + IsEnabled bool `xml:"IsEnabled"` + Link []*Link `xml:"Link,omitempty"` + NetworkQuota int `xml:"NetworkQuota"` + NicQuota int `xml:"NicQuota"` + ResourceEntities []*ResourceEntities `xml:"ResourceEntities,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + UsedNetworkCount int `xml:"UsedNetworkCount,omitempty"` + VdcStorageProfiles []*VdcStorageProfiles `xml:"VdcStorageProfiles"` + VMQuota int `xml:"VmQuota"` +} + +// Task represents an asynchronous operation in vCloud Director. +// Type: TaskType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an asynchronous operation in vCloud Director. +// Since: 0.9 +type Task struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Status string `xml:"status,attr"` + Operation string `xml:"operation,attr,omitempty"` + OperationName string `xml:"operationName,attr,omitempty"` + ServiceNamespace string `xml:"serviceNamespace,attr,omitempty"` + StartTime string `xml:"startTime,attr,omitempty"` + EndTime string `xml:"endTime,attr,omitempty"` + ExpiryTime string `xml:"expiryTime,attr,omitempty"` + CancelRequested bool `xml:"cancelRequested,attr,omitempty"` + Description string `xml:"Description,omitempty"` + Details string `xml:"Details,omitempty"` + Error *Error `xml:"Error,omitempty"` + Link *Link `xml:"Link,omitempty"` + Organization *Reference `xml:"Organization,omitempty"` + Owner *Reference `xml:"Owner,omitempty"` + Progress int `xml:"Progress,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + User *Reference `xml:"User,omitempty"` +} + +// CapacityWithUsage represents a capacity and usage of a given resource. +// Type: CapacityWithUsageType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a capacity and usage of a given resource. +// Since: 0.9 +type CapacityWithUsage struct { + Allocated int64 `xml:"Allocated,omitempty"` + Limit int64 `xml:"Limit,omitempty"` + Overhead int64 `xml:"Overhead,omitempty"` + Reserved int64 `xml:"Reserved,omitempty"` + Units string `xml:"Units"` + Used int64 `xml:"Used,omitempty"` +} + +// ComputeCapacity represents vDC compute capacity. +// Type: ComputeCapacityType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents vDC compute capacity. +// Since: 0.9 +type ComputeCapacity struct { + CPU *CapacityWithUsage `xml:"Cpu"` + Memory *CapacityWithUsage `xml:"Memory"` +} + +// Reference is a reference to a resource. Contains an href attribute and optional name and type attributes. +// Type: ReferenceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A reference to a resource. Contains an href attribute and optional name and type attributes. +// Since: 0.9 +type Reference struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` +} + +// ResourceReference represents a reference to a resource. Contains an href attribute, a resource status attribute, and optional name and type attributes. +// Type: ResourceReferenceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a reference to a resource. Contains an href attribute, a resource status attribute, and optional name and type attributes. +// Since: 0.9 +type ResourceReference struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` +} + +// VdcStorageProfiles is a container for references to storage profiles associated with a vDC. +// Element: VdcStorageProfiles +// Type: VdcStorageProfilesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to storage profiles associated with a vDC. +// Since: 5.1 +type VdcStorageProfiles struct { + VdcStorageProfile []*Reference `xml:"VdcStorageProfile,omitempty"` +} + +// ResourceEntities is a container for references to ResourceEntity objects in this vDC. +// Type: ResourceEntitiesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to ResourceEntity objects in this vDC. +// Since: 0.9 +type ResourceEntities struct { + ResourceEntity []*ResourceReference `xml:"ResourceEntity,omitempty"` +} + +// AvailableNetworks is a container for references to available organization vDC networks. +// Type: AvailableNetworksType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to available organization vDC networks. +// Since: 0.9 +type AvailableNetworks struct { + Network []*Reference `xml:"Network,omitempty"` +} + +// Link extends reference type by adding relation attribute. Defines a hyper-link with a relationship, hyper-link reference, and an optional MIME type. +// Type: LinkType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Extends reference type by adding relation attribute. Defines a hyper-link with a relationship, hyper-link reference, and an optional MIME type. +// Since: 0.9 +type Link struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + Rel string `xml:"rel,attr"` +} + +// Org represents the user view of a vCloud Director organization. +// Type: OrgType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the user view of a vCloud Director organization. +// Since: 0.9 +type Org struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Description string `xml:"Description,omitempty"` + FullName string `xml:"FullName"` + IsEnabled bool `xml:"IsEnabled,omitempty"` + Link []*Link `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// CatalogItem contains a reference to a VappTemplate or Media object and related metadata. +// Type: CatalogItemType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Contains a reference to a VappTemplate or Media object and related metadata. +// Since: 0.9 +type CatalogItem struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Size int64 `xml:"size,attr,omitempty"` + DateCreated string `xml:"DateCreated,omitempty"` + Description string `xml:"Description,omitempty"` + Entity *Entity `xml:"Entity"` + Link []*Link `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + VersionNumber int64 `xml:"VersionNumber,omitempty"` +} + +// Entity is a basic entity type in the vCloud object model. Includes a name, an optional description, and an optional list of links. +// Type: EntityType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Basic entity type in the vCloud object model. Includes a name, an optional description, and an optional list of links. +// Since: 0.9 +type Entity struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Description string `xml:"Description,omitempty"` + Link []*Link `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// CatalogItems is a container for references to catalog items. +// Type: CatalogItemsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to catalog items. +// Since: 0.9 +type CatalogItems struct { + CatalogItem []*Reference `xml:"CatalogItem"` +} + +// Catalog represents the user view of a Catalog object. +// Type: CatalogType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the user view of a Catalog object. +// Since: 0.9 +type Catalog struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + CatalogItems []*CatalogItems `xml:"CatalogItems"` + DateCreated string `xml:"DateCreated"` + Description string `xml:"Description"` + IsPublished bool `xml:"IsPublished"` + Link []*Link `xml:"Link"` + Owner *Owner `xml:"Owner,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + VersionNumber int64 `xml:"VersionNumber"` +} + +// Owner represents the owner of this entity. +// Type: OwnerType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the owner of this entity. +// Since: 1.5 +type Owner struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link []*Link `xml:"Link,omitempty"` + User *Reference `xml:"User"` +} + +// Error is the standard error message type used in the vCloud REST API. +// Type: ErrorType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: The standard error message type used in the vCloud REST API. +// Since: 0.9 +type Error struct { + Message string `xml:"message,attr"` + MajorErrorCode int `xml:"majorErrorCode,attr"` + MinorErrorCode string `xml:"minorErrorCode,attr"` + VendorSpecificErrorCode string `xml:"vendorSpecificErrorCode,attr,omitempty"` + StackTrace string `xml:"stackTrace,attr,omitempty"` +} + +// File represents a file to be transferred (uploaded or downloaded). +// Type: FileType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a file to be transferred (uploaded or downloaded). +// Since: 0.9 +type File struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Size int64 `xml:"size,attr,omitempty"` + BytesTransferred int64 `xml:"bytesTransferred,attr,omitempty"` + Checksum string `xml:"checksum,attr,omitempty"` + Description string `xml:"Description,omitempty"` + Link []*Link `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// FilesList represents a list of files to be transferred (uploaded or downloaded). +// Type: FilesListType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of files to be transferred (uploaded or downloaded). +// Since: 0.9 +type FilesList struct { + File []*File `xml:"File"` +} + +// UndeployVAppParams parameters to an undeploy vApp request. +// Type: UndeployVAppParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Parameters to an undeploy vApp request. +// Since: 0.9 +type UndeployVAppParams struct { + Xmlns string `xml:"xmlns,attr"` + UndeployPowerAction string `xml:"UndeployPowerAction,omitempty"` +} + +// VMCapabilities allows you to specify certain capabilities of this virtual machine. +// Type: VmCapabilitiesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Allows you to specify certain capabilities of this virtual machine. +// Since: 5.1 +type VMCapabilities struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + CPUHotAddEnabled bool `xml:"CpuHotAddEnabled,omitempty"` + Link []*Link `xml:"Link,omitempty"` + MemoryHotAddEnabled bool `xml:"MemoryHotAddEnabled,omitempty"` +} + +// VMs represents a list of virtual machines. +// Type: VmsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of virtual machines. +// Since: 5.1 +type VMs struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link []*Link `xml:"Link,omitempty"` + VMReference []*Reference `xml:"VmReference,omitempty"` +} + +/* + * Types that are completely valid (position, comment, coverage complete) + */ + +// ComposeVAppParams represetns vApp composition parameters +// Type: ComposeVAppParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents vApp composition parameters. +// Since: 0.9 +type ComposeVAppParams struct { + XMLName xml.Name `xml:"ComposeVAppParams"` + Ovf string `xml:"xmlns:ovf,attr"` + Xsi string `xml:"xmlns:xsi,attr"` + Xmlns string `xml:"xmlns,attr"` + // Attributes + Name string `xml:"name,attr,omitempty"` // Typically used to name or identify the subject of the request. For example, the name of the object being created or modified. + Deploy bool `xml:"deploy,attr"` // True if the vApp should be deployed at instantiation. Defaults to true. + PowerOn bool `xml:"powerOn,attr"` // True if the vApp should be powered-on at instantiation. Defaults to true. + LinkedClone bool `xml:"linkedClone,attr,omitempty"` // Reserved. Unimplemented. + // Elements + Description string `xml:"Description,omitempty"` // Optional description. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + InstantiationParams *InstantiationParams `xml:"InstantiationParams,omitempty"` // Instantiation parameters for the composed vApp. + SourcedItem *SourcedCompositionItemParam `xml:"SourcedItem,omitempty"` // Composition item. One of: vApp vAppTemplate Vm. + AllEULAsAccepted bool `xml:"AllEULAsAccepted,omitempty"` // True confirms acceptance of all EULAs in a vApp template. Instantiation fails if this element is missing, empty, or set to false and one or more EulaSection elements are present. +} + +// SourcedCompositionItemParam represents a vApp, vApp template or Vm to include in a composed vApp. +// Type: SourcedCompositionItemParamType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp, vApp template or Vm to include in a composed vApp. +// Since: 0.9 +type SourcedCompositionItemParam struct { + // Attributes + SourceDelete bool `xml:"sourceDelete,attr,omitempty"` // True if the source item should be deleted after composition is complete. + // Elements + Source *Reference `xml:"Source"` // Reference to a vApp, vApp template or virtual machine to include in the composition. Changing the name of the newly created VM by specifying name attribute is deprecated. Include VmGeneralParams element instead. + VMGeneralParams *VMGeneralParams `xml:"VmGeneralParams,omitempty"` // Specify name, description, and other properties of a VM during instantiation. + VAppScopedLocalID string `xml:"VAppScopedLocalId,omitempty"` // If Source references a Vm, this value provides a unique identifier for the Vm in the scope of the composed vApp. + InstantiationParams *InstantiationParams `xml:"InstantiationParams,omitempty"` // If Source references a Vm this can include any of the following OVF sections: VirtualHardwareSection OperatingSystemSection NetworkConnectionSection GuestCustomizationSection. + NetworkAssignment *NetworkAssignment `xml:"NetworkAssignment,omitempty"` // If Source references a Vm, this element maps a network name specified in the Vm to the network name of a vApp network defined in the composed vApp. + StorageProfile *Reference `xml:"StorageProfile,omitempty"` // If Source references a Vm, this element contains a reference to a storage profile to be used for the Vm. The specified storage profile must exist in the organization vDC that contains the composed vApp. If not specified, the default storage profile for the vDC is used. + LocalityParams *LocalityParams `xml:"LocalityParams,omitempty"` // Represents locality parameters. Locality parameters provide a hint that may help the placement engine optimize placement of a VM and an independent a Disk so that the VM can make efficient use of the disk. +} + +// LocalityParams represents locality parameters. Locality parameters provide a hint that may help the placement engine optimize placement of a VM with respect to another VM or an independent disk. +// Type: LocalityParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents locality parameters. Locality parameters provide a hint that may help the placement engine optimize placement of a VM with respect to another VM or an independent disk. +// Since: 5.1 +type LocalityParams struct { + // Elements + ResourceEntity *Reference `xml:"ResourceEntity,omitempty"` // Reference to a Disk, or a VM. +} + +// NetworkAssignment maps a network name specified in a Vm to the network name of a vApp network defined in the VApp that contains the Vm +// Type: NetworkAssignmentType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Maps a network name specified in a Vm to the network name of a vApp network defined in the VApp that contains the Vm +// Since: 0.9 +type NetworkAssignment struct { + // Attributes + InnerNetwork string `xml:"innerNetwork,attr"` // Name of the network as specified in the Vm. + ContainerNetwork string `xml:"containerNetwork,attr"` // Name of the vApp network to map to. +} + +// VMGeneralParams a set of overrides to source VM properties to apply to target VM during copying. +// Type: VmGeneralParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A set of overrides to source VM properties to apply to target VM during copying. +// Since: 5.6 +type VMGeneralParams struct { + // Elements + Name string `xml:"Name,omitempty"` // Name of VM + Description string `xml:"Description,omitempty"` // VM description + NeedsCustomization bool `xml:"NeedsCustomization,omitempty"` // True if this VM needs guest customization +} + +// VApp representa a vApp +// Type: VAppType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp. +// Since: 0.9 +type VApp struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused. + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the resource entity. + Deployed bool `xml:"deployed,attr,omitempty"` // True if the virtual machine is deployed. + OvfDescriptorUploaded bool `xml:"ovfDescriptorUploaded,attr,omitempty"` // Read-only indicator that the OVF descriptor for this vApp has been uploaded. + // Elements + Link []*Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Files *FilesList `xml:"Files,omitempty"` // Represents a list of files to be transferred (uploaded or downloaded). Each File in the list is part of the ResourceEntity. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + // TODO: OVF Sections to be implemented + // Section OVF_Section `xml:"Section"` + DateCreated string `xml:"DateCreated,omitempty"` // Creation date/time of the vApp. + Owner *Owner `xml:"Owner,omitempty"` // vApp owner. + InMaintenanceMode bool `xml:"InMaintenanceMode,omitempty"` // True if this vApp is in maintenance mode. Prevents users from changing vApp metadata. + Children *VAppChildren `xml:"Children,omitempty"` // Container for virtual machines included in this vApp. +} + +type MetadataValue struct { + XMLName xml.Name `xml:"MetadataValue"` + Xsi string `xml:"xmlns:xsi,attr"` + Xmlns string `xml:"xmlns,attr"` + TypedValue *TypedValue `xml:"TypedValue"` +} + +type TypedValue struct { + XsiType string `xml:"xsi:type,attr"` + Value string `xml:"Value"` +} + +// VAppChildren is a container for virtual machines included in this vApp. +// Type: VAppChildrenType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for virtual machines included in this vApp. +// Since: 0.9 +type VAppChildren struct { + VM []*VM `xml:"Vm,omitempty"` // Rerpresents a virtual machine. +} + +// TasksInProgress is a list of queued, running, or recently completed tasks. +// Type: TasksInProgressType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of queued, running, or recently completed tasks. +// Since: 0.9 +type TasksInProgress struct { + // Elements + Task []*Task `xml:"Task"` // A task. +} + +// VAppTemplateChildren is a container for virtual machines included in this vApp template. +// Type: VAppTemplateChildrenType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for virtual machines included in this vApp template. +// Since: 0.9 +type VAppTemplateChildren struct { + // Elements + VM []*VAppTemplate `xml:"Vm"` // Represents a virtual machine in this vApp template. +} + + + +// VAppTemplate represents a vApp template. +// Type: VAppTemplateType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp template. +// Since: 0.9 +type VAppTemplate struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused. + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the resource entity. + OvfDescriptorUploaded string `xml:"ovfDescriptorUploaded,attr,omitempty"` // True if the OVF descriptor for this template has been uploaded. + GoldMaster bool `xml:"goldMaster,attr,omitempty"` // True if this template is a gold master. + // Elements + Link []*Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Files *FilesList `xml:"Files,omitempty"` // Represents a list of files to be transferred (uploaded or downloaded). Each File in the list is part of the ResourceEntity. + Owner *Owner `xml:"Owner,omitempty"` // vAppTemplate owner. + Children *VAppTemplateChildren `xml:"Children,omitempty"` // Container for virtual machines included in this vApp template. + VAppScopedLocalID string `xml:"VAppScopedLocalId"` // A unique identifier for the Vm in the scope of the vApp template. + DefaultStorageProfile string `xml:"DefaultStorageProfile,omitempty"` // The name of the storage profile to be used for this object. The named storage profile must exist in the organization vDC that contains the object. If not specified, the default storage profile for the vDC is used. + DateCreated string `xml:"DateCreated,omitempty"` // Creation date/time of the template. + // FIXME: Upstream bug? Missing NetworkConfigSection, LeaseSettingSection and + // CustomizationSection at least, NetworkConnectionSection is required when + // using ComposeVApp action in the context of a Children VM (still + // referenced by VAppTemplateType). + NetworkConfigSection *NetworkConfigSection `xml:"NetworkConfigSection,omitempty"` + NetworkConnectionSection *NetworkConnectionSection `xml:"NetworkConnectionSection,omitempty"` + LeaseSettingsSection *LeaseSettingsSection `xml:"LeaseSettingsSection,omitempty"` + CustomizationSection *CustomizationSection `xml:"CustomizationSection,omitempty"` + // OVF Section needs to be added + // Section Section `xml:"Section,omitempty"` +} + +// VM represents a virtual machine +// Type: VmType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a virtual machine. +// Since: 0.9 +type VM struct { + // Attributes + XMLName xml.Name `xml:"Vm"` + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + Xsi string `xml:"xmlns:xsi,attr,omitempty"` + Xmlns string `xml:"xmlns,attr,omitempty"` + + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the resource entity. + Deployed bool `xml:"deployed,attr,omitempty"` // True if the virtual machine is deployed. + NeedsCustomization bool `xml:"needsCustomization,attr,omitempty"` // True if this virtual machine needs customization. + NestedHypervisorEnabled bool `xml:"nestedHypervisorEnabled,attr,omitempty"` // True if hardware-assisted CPU virtualization capabilities in the host should be exposed to the guest operating system. + // Elements + Link []*Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Files *FilesList `xml:"FilesList,omitempty"` // Represents a list of files to be transferred (uploaded or downloaded). Each File in the list is part of the ResourceEntity. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + // TODO: OVF Sections to be implemented + // Section OVF_Section `xml:"Section,omitempty" + DateCreated string `xml:"DateCreated,omitempty"` // Creation date/time of the vApp. + + // FIXME: Upstream bug? Missing NetworkConnectionSection + NetworkConnectionSection *NetworkConnectionSection `xml:"NetworkConnectionSection,omitempty"` + + VAppScopedLocalID string `xml:"VAppScopedLocalId,omitempty"` // A unique identifier for the virtual machine in the scope of the vApp. + + // TODO: OVF Sections to be implemented + // Environment OVF_Environment `xml:"Environment,omitempty" + + VMCapabilities *VMCapabilities `xml:"VmCapabilities,omitempty"` // Allows you to specify certain capabilities of this virtual machine. + StorageProfile *Reference `xml:"StorageProfile,omitempty"` // A reference to a storage profile to be used for this object. The specified storage profile must exist in the organization vDC that contains the object. If not specified, the default storage profile for the vDC is used. +} + +// OVFItem is a horrible kludge to process OVF, needs to be fixed with proper types. +type OVFItem struct { + XMLName xml.Name `xml:"vcloud:Item"` + XmlnsRasd string `xml:"xmlns:rasd,attr"` + XmlnsVCloud string `xml:"xmlns:vcloud,attr"` + XmlnsXsi string `xml:"xmlns:xsi,attr"` + VCloudHREF string `xml:"vcloud:href,attr"` + VCloudType string `xml:"vcloud:type,attr"` + AllocationUnits string `xml:"rasd:AllocationUnits"` + Description string `xml:"rasd:Description"` + ElementName string `xml:"rasd:ElementName"` + InstanceID int `xml:"rasd:InstanceID"` + Reservation int `xml:"rasd:Reservation"` + ResourceType int `xml:"rasd:ResourceType"` + VirtualQuantity int `xml:"rasd:VirtualQuantity"` + Weight int `xml:"rasd:Weight"` + Link *Link `xml:"vcloud:Link"` +} + +// DeployVAppParams are the parameters to a deploy vApp request +// Type: DeployVAppParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Parameters to a deploy vApp request. +// Since: 0.9 +type DeployVAppParams struct { + XMLName xml.Name `xml:"DeployVAppParams"` + Xmlns string `xml:"xmlns,attr"` + // Attributes + PowerOn bool `xml:"powerOn,attr"` // Used to specify whether to power on vapp on deployment, if not set default value is true. + DeploymentLeaseSeconds int `xml:"deploymentLeaseSeconds,attr,omitempty"` // Lease in seconds for deployment. A value of 0 is replaced by the organization default deploymentLeaseSeconds value. + ForceCustomization bool `xml:"forceCustomization,attr,omitempty"` // Used to specify whether to force customization on deployment, if not set default value is false +} + +// GuestCustomizationSection represents guest customization settings +// Type: GuestCustomizationSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a guest customization settings. +// Since: 1.0 +type GuestCustomizationSection struct { + // Extends OVF Section_Type + // Attributes + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + Xsi string `xml:"xmlns:xsi,attr,omitempty"` + Xmlns string `xml:"xmlns,attr,omitempty"` + + HREF string `xml:"href,attr,omitempty"` // A reference to the section in URL format. + Type string `xml:"type,attr,omitempty"` // The MIME type of the section. + // FIXME: Fix the OVF section + Info string `xml:"ovf:Info"` + // Elements + Enabled bool `xml:"Enabled,omitempty"` // True if guest customization is enabled. + ChangeSid bool `xml:"ChangeSid,omitempty"` // True if customization can change the Windows SID of this virtual machine. + VirtualMachineID string `xml:"VirtualMachineId,omitempty"` // Virtual machine ID to apply. + JoinDomainEnabled bool `xml:"JoinDomainEnabled,omitempty"` // True if this virtual machine can join a Windows Domain. + UseOrgSettings bool `xml:"UseOrgSettings,omitempty"` // True if customization should use organization settings (OrgGuestPersonalizationSettings) when joining a Windows Domain. + DomainName string `xml:"DomainName,omitempty"` // The name of the Windows Domain to join. + DomainUserName string `xml:"DomainUserName,omitempty"` // User name to specify when joining a Windows Domain. + DomainUserPassword string `xml:"DomainUserPassword,omitempty"` // Password to use with DomainUserName. + MachineObjectOU string `xml:"MachineObjectOU,omitempty"` // The name of the Windows Domain Organizational Unit (OU) in which the computer account for this virtual machine will be created. + AdminPasswordEnabled bool `xml:"AdminPasswordEnabled,omitempty"` // True if guest customization can modify administrator password settings for this virtual machine. + AdminPasswordAuto bool `xml:"AdminPasswordAuto,omitempty"` // True if the administrator password for this virtual machine should be automatically generated. + AdminPassword string `xml:"AdminPassword,omitempty"` // True if the administrator password for this virtual machine should be set to this string. (AdminPasswordAuto must be false.) + AdminAutoLogonEnabled bool `xml:"AdminAutoLogonEnabled,omitempty"` // True if guest administrator should automatically log into this virtual machine. + AdminAutoLogonCount int `xml:"AdminAutoLogonCount,omitempty"` // Number of times administrator can automatically log into this virtual machine. In case AdminAutoLogon is set to True, this value should be between 1 and 100. Otherwise, it should be 0. + ResetPasswordRequired bool `xml:"ResetPasswordRequired,omitempty"` // True if the administrator password for this virtual machine must be reset after first use. + CustomizationScript string `xml:"CustomizationScript,omitempty"` // Script to run on guest customization. The entire script must appear in this element. Use the XML entity to represent a newline. Unicode characters can be represented in the form &#xxxx; where xxxx is the character number. + ComputerName string `xml:"ComputerName,omitempty"` // Computer name to assign to this virtual machine. + Link []*Link `xml:"Link,omitempty"` // A link to an operation on this section. +} + +// InstantiateVAppTemplateParams represents vApp template instantiation parameters. +// Type: InstantiateVAppTemplateParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents vApp template instantiation parameters. +// Since: 0.9 +type InstantiateVAppTemplateParams struct { + XMLName xml.Name `xml:"InstantiateVAppTemplateParams"` + Ovf string `xml:"xmlns:ovf,attr"` + Xsi string `xml:"xmlns:xsi,attr,omitempty"` + Xmlns string `xml:"xmlns,attr"` + // Attributes + Name string `xml:"name,attr,omitempty"` // Typically used to name or identify the subject of the request. For example, the name of the object being created or modified. + Deploy bool `xml:"deploy,attr"` // True if the vApp should be deployed at instantiation. Defaults to true. + PowerOn bool `xml:"powerOn,attr"` // True if the vApp should be powered-on at instantiation. Defaults to true. + LinkedClone bool `xml:"linkedClone,attr,omitempty"` // Reserved. Unimplemented. + // Elements + Description string `xml:"Description,omitempty"` // Optional description. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + InstantiationParams *InstantiationParams `xml:"InstantiationParams,omitempty"` // Instantiation parameters for the composed vApp. + Source *Reference `xml:"Source"` // A reference to a source object such as a vApp or vApp template. + IsSourceDelete bool `xml:"IsSourceDelete,omitempty"` // Set to true to delete the source object after the operation completes. + SourcedItem *SourcedCompositionItemParam `xml:"SourcedItem,omitempty"` // Composition item. One of: vApp vAppTemplate Vm. + AllEULAsAccepted bool `xml:"AllEULAsAccepted,omitempty"` // True confirms acceptance of all EULAs in a vApp template. Instantiation fails if this element is missing, empty, or set to false and one or more EulaSection elements are present. +} + +// EdgeGateway represents a gateway. +// Element: EdgeGateway +// Type: GatewayType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a gateway. +// Since: 5.1 +type EdgeGateway struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the gateway. One of: 0 (The gateway is still being created) 1 (The gateway is ready) -1 (There was an error while creating the gateway). + // Elements + Link []*Link `xml:"Link,omitempty"` // A link to an operation on this section. + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Configuration *GatewayConfiguration `xml:"Configuration"` // Gateway configuration. +} + +// GatewayConfiguration is the gateway configuration +// Type: GatewayConfigurationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Gateway Configuration. +// Since: 5.1 +type GatewayConfiguration struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + BackwardCompatibilityMode bool `xml:"BackwardCompatibilityMode,omitempty"` // Compatibilty mode. Default is false. If set to true, will allow users to write firewall rules in the old 1.5 format. The new format does not require to use direction in firewall rules. Also, for firewall rules to allow NAT traffic the filter is applied on the original IP addresses. Once set to true cannot be reverted back to false. + GatewayBackingConfig string `xml:"GatewayBackingConfig"` // Configuration of the vShield edge VM for this gateway. One of: compact, full. + GatewayInterfaces *GatewayInterfaces `xml:"GatewayInterfaces"` // List of Gateway interfaces. + EdgeGatewayServiceConfiguration *GatewayFeatures `xml:"EdgeGatewayServiceConfiguration,omitempty"` // Represents Gateway Features. + HaEnabled bool `xml:"HaEnabled,omitempty"` // True if this gateway is highly available. (Requires two vShield edge VMs.) + UseDefaultRouteForDNSRelay bool `xml:"UseDefaultRouteForDnsRelay,omitempty"` // True if the default gateway on the external network selected for default route should be used as the DNS relay. +} + +// GatewayInterfaces is a list of Gateway Interfaces. +// Type: GatewayInterfacesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of Gateway Interfaces. +// Since: 5.1 +type GatewayInterfaces struct { + GatewayInterface []*GatewayInterface `xml:"GatewayInterface"` // Gateway Interface. +} + +// GatewayInterface is a gateway interface configuration. +// Type: GatewayInterfaceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Gateway Interface configuration. +// Since: 5.1 +type GatewayInterface struct { + Name string `xml:"Name,omitempty"` // Internally generated name for the Gateway Interface. + DisplayName string `xml:"DisplayName,omitempty"` // Gateway Interface display name. + Network *Reference `xml:"Network"` // A reference to the network connected to the gateway interface. + InterfaceType string `xml:"InterfaceType"` // The type of interface: One of: Uplink, Internal + SubnetParticipation *SubnetParticipation `xml:"SubnetParticipation,omitempty"` // IP allocation per subnet. + ApplyRateLimit bool `xml:"ApplyRateLimit,omitempty"` // True if rate limiting is applied on this interface. + InRateLimit float64 `xml:"InRateLimit,omitempty"` // Incoming rate limit expressed as Gbps. + OutRateLimit float64 `xml:"OutRateLimit,omitempty"` // Outgoing rate limit expressed as Gbps. + UseForDefaultRoute bool `xml:"UseForDefaultRoute,omitempty"` // True if this network is default route for the gateway. +} + +// SubnetParticipation allows to chose which subnets a gateway can be a part of +// Type: SubnetParticipationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Allows to chose which subnets a gateway can be part of +// Since: 5.1 +type SubnetParticipation struct { + Gateway string `xml:"Gateway"` // Gateway for subnet + IPAddress string `xml:"IpAddress,omitempty"` // Ip Address to be assigned. Keep empty or ommit element for auto assignment + IPRanges *IPRanges `xml:"IpRanges,omitempty"` // Range of IP addresses available for external interfaces. + Netmask string `xml:"Netmask"` // Nestmask for the subnet +} + +type EdgeGatewayServiceConfiguration struct { + XMLName xml.Name `xml:"EdgeGatewayServiceConfiguration"` + Xmlns string `xml:"xmlns,attr,omitempty"` + GatewayDhcpService *GatewayDhcpService `xml:"GatewayDhcpService,omitempty"` + FirewallService *FirewallService `xml:"FirewallService,omitempty"` + NatService *NatService `xml:"NatService,omitempty"` +} + +// GatewayFeatures represents edge gateway services. +// Element: EdgeGatewayServiceConfiguration +// Type: GatewayFeaturesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents edge gateway services. +// Since: 5.1 +type GatewayFeatures struct { + XMLName xml.Name + Xmlns string `xml:"xmlns,attr,omitempty"` + FirewallService *FirewallService `xml:"FirewallService,omitempty"` // Substitute for NetworkService. Firewall service settings + NatService *NatService `xml:"NatService,omitempty"` // Substitute for NetworkService. NAT service settings + GatewayDhcpService *GatewayDhcpService `xml:"GatewayDhcpService,omitempty"` // Substitute for NetworkService. Gateway DHCP service settings + GatewayIpsecVpnService *GatewayIpsecVpnService `xml:"GatewayIpsecVpnService,omitempty"` // Substitute for NetworkService. Gateway Ipsec VPN service settings + LoadBalancerService *LoadBalancerService `xml:"LoadBalancerService,omitempty"` // Substitute for NetworkService. Load Balancer service settings + StaticRoutingService *StaticRoutingService `xml:"StaticRoutingService,omitempty"` // Substitute for NetworkService. Static Routing service settings +} + +// StaticRoutingService represents Static Routing network service. +// Type: StaticRoutingServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents Static Routing network service. +// Since: 1.5 +type StaticRoutingService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + StaticRoute *StaticRoute `xml:"StaticRoute,omitempty"` // Details of each Static Route. +} + +// StaticRoute represents a static route entry +// Type: StaticRouteType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: +// Since: +type StaticRoute struct { + Name string `xml:"Name"` // Name for the static route. + Network string `xml:"Network"` // Network specification in CIDR. + NextHopIP string `xml:"NextHopIp"` // IP Address of Next Hop router/gateway. + Interface string `xml:"Interface,omitempty"` // Interface to use for static routing. Internal and External are the supported values. + GatewayInterface *Reference `xml:"GatewayInterface,omitempty"` // Gateway interface to which static route is bound. +} + +// LoadBalancerService represents gateway load balancer service. +// Type: LoadBalancerServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents gateway load balancer service. +// Since: 5.1 +type LoadBalancerService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + Pool *LoadBalancerPool `xml:"Pool,omitempty"` // List of load balancer pools. + VirtualServer *LoadBalancerVirtualServer `xml:"VirtualServer,omitempty"` // List of load balancer virtual servers. +} + +// LoadBalancerPool represents a load balancer pool. +// Type: LoadBalancerPoolType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a load balancer pool. +// Since: 5.1 +type LoadBalancerPool struct { + ID string `xml:"Id,omitempty"` // Load balancer pool id. + Name string `xml:"Name"` // Load balancer pool name. + Description string `xml:"Description,omitempty"` // Load balancer pool description. + ServicePort *LBPoolServicePort `xml:"ServicePort"` // Load balancer pool service port. + Member *LBPoolMember `xml:"Member"` // Load balancer pool member. + Operational bool `xml:"Operational,omitempty"` // True if the load balancer pool is operational. + ErrorDetails string `xml:"ErrorDetails,omitempty"` // Error details for this pool. +} + +// LBPoolServicePort represents a service port in a load balancer pool. +// Type: LBPoolServicePortType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a service port in a load balancer pool. +// Since: 5.1 +type LBPoolServicePort struct { + IsEnabled bool `xml:"IsEnabled,omitempty"` // True if this service port is enabled. + Protocol string `xml:"Protocol"` // Load balancer protocol type. One of: HTTP, HTTPS, TCP. + Algorithm string `xml:"Algorithm"` // Load Balancer algorithm type. One of: IP_HASH, ROUND_ROBIN, URI, LEAST_CONN. + Port string `xml:"Port"` // Port for this service profile. + HealthCheckPort string `xml:"HealthCheckPort,omitempty"` // Health check port for this profile. + HealthCheck *LBPoolHealthCheck `xml:"HealthCheck,omitempty"` // Health check list. +} + +// LBPoolHealthCheck represents a service port health check list. +// Type: LBPoolHealthCheckType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a service port health check list. +// Since: 5.1 +type LBPoolHealthCheck struct { + Mode string `xml:"Mode"` // Load balancer service port health check mode. One of: TCP, HTTP, SSL. + URI string `xml:"Uri,omitempty"` // Load balancer service port health check URI. + HealthThreshold string `xml:"HealthThreshold,omitempty"` // Health threshold for this service port. + UnhealthThreshold string `xml:"UnhealthThreshold,omitempty"` // Unhealth check port for this profile. + Interval string `xml:"Interval,omitempty"` // Interval between health checks. + Timeout string `xml:"Timeout,omitempty"` // Health check timeout. +} + +// LBPoolMember represents a member in a load balancer pool. +// Type: LBPoolMemberType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a member in a load balancer pool. +// Since: 5.1 +type LBPoolMember struct { + IPAddress string `xml:"IpAddress"` // Ip Address for load balancer member. + Weight string `xml:"Weight"` // Weight of this member. + ServicePort *LBPoolServicePort `xml:"ServicePort,omitempty"` // Load balancer member service port. +} + +// LoadBalancerVirtualServer represents a load balancer virtual server. +// Type: LoadBalancerVirtualServerType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a load balancer virtual server. +// Since: 5.1 +type LoadBalancerVirtualServer struct { + IsEnabled bool `xml:"IsEnabled,omitempty"` // True if this virtual server is enabled. + Name string `xml:"Name"` // Load balancer virtual server name. + Description string `xml:"Description,omitempty"` // Load balancer virtual server description. + Interface *Reference `xml:"Interface"` // Gateway Interface to which Load Balancer Virtual Server is bound. + IPAddress string `xml:"IpAddress"` // Load balancer virtual server Ip Address. + ServiceProfile *LBVirtualServerServiceProfile `xml:"ServiceProfile"` // Load balancer virtual server service profiles. + Logging bool `xml:"Logging,omitempty"` // Enable logging for this virtual server. + Pool string `xml:"Pool"` // Name of Load balancer pool associated with this virtual server. + LoadBalancerTemplates *VendorTemplate `xml:"LoadBalancerTemplates,omitempty"` // Service template related attributes. +} + +// LBVirtualServerServiceProfile represents service profile for a load balancing virtual server. +// Type: LBVirtualServerServiceProfileType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents service profile for a load balancing virtual server. +// Since: 5.1 +type LBVirtualServerServiceProfile struct { + IsEnabled bool `xml:"IsEnabled,omitempty"` // True if this service profile is enabled. + Protocol string `xml:"Protocol"` // Load balancer Protocol type. One of: HTTP, HTTPS, TCP. + Port string `xml:"Port"` // Port for this service profile. + Persistence *LBPersistence `xml:"Persistence,omitempty"` // Persistence type for service profile. +} + +// LBPersistence represents persistence type for a load balancer service profile. +// Type: LBPersistenceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents persistence type for a load balancer service profile. +// Since: 5.1 +type LBPersistence struct { + Method string `xml:"Method"` // Persistence method. One of: COOKIE, SSL_SESSION_ID. + CookieName string `xml:"CookieName,omitempty"` // Cookie name when persistence method is COOKIE. + CookieMode string `xml:"CookieMode,omitempty"` // Cookie Mode. One of: INSERT, PREFIX, APP. +} + +// VendorTemplate is information about a vendor service template. This is optional. +// Type: VendorTemplateType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Information about a vendor service template. This is optional. +// Since: 5.1 +type VendorTemplate struct { + Name string `xml:"Name"` // Name of the vendor template. This is required. + ID string `xml:"Id"` // ID of the vendor template. This is required. +} + +// GatewayIpsecVpnService represents gateway IPsec VPN service. +// Type: GatewayIpsecVpnServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents gateway IPsec VPN service. +// Since: 5.1 +type GatewayIpsecVpnService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + Endpoint *GatewayIpsecVpnEndpoint `xml:"Endpoint,omitempty"` // List of IPSec VPN Service Endpoints. + Tunnel []*GatewayIpsecVpnTunnel `xml:"Tunnel"` // List of IPSec VPN tunnels. +} + +// GatewayIpsecVpnEndpoint represents an IPSec VPN endpoint. +// Type: GatewayIpsecVpnEndpointType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an IPSec VPN endpoint. +// Since: 5.1 +type GatewayIpsecVpnEndpoint struct { + Network *Reference `xml:"Network"` // External network reference. + PublicIP string `xml:"PublicIp,omitempty"` // Public IP for IPSec endpoint. +} + +// GatewayIpsecVpnTunnel represents an IPSec VPN tunnel. +// Type: GatewayIpsecVpnTunnelType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an IPSec VPN tunnel. +// Since: 5.1 +type GatewayIpsecVpnTunnel struct { + Name string `xml:"Name"` // The name of the tunnel. + Description string `xml:"Description,omitempty"` // A description of the tunnel. + // TODO: Fix this in a better way + IpsecVpnThirdPartyPeer *IpsecVpnThirdPartyPeer `xml:"IpsecVpnThirdPartyPeer,omitempty"` // Details about the peer network. + PeerIPAddress string `xml:"PeerIpAddress"` // IP address of the peer endpoint. + PeerID string `xml:"PeerId"` // Id for the peer end point + LocalIPAddress string `xml:"LocalIpAddress"` // Address of the local network. + LocalID string `xml:"LocalId"` // Id for local end point + LocalSubnet *IpsecVpnSubnet `xml:"LocalSubnet"` // List of local subnets in the tunnel. + PeerSubnet *IpsecVpnSubnet `xml:"PeerSubnet"` // List of peer subnets in the tunnel. + SharedSecret string `xml:"SharedSecret"` // Shared secret used for authentication. + SharedSecretEncrypted bool `xml:"SharedSecretEncrypted,omitempty"` // True if shared secret is encrypted. + EncryptionProtocol string `xml:"EncryptionProtocol"` // Encryption protocol to be used. One of: AES, AES256, TRIPLEDES + Mtu int `xml:"Mtu"` // MTU for the tunnel. + IsEnabled bool `xml:"IsEnabled,omitempty"` // True if the tunnel is enabled. + IsOperational bool `xml:"IsOperational,omitempty"` // True if the tunnel is operational. + ErrorDetails string `xml:"ErrorDetails,omitempty"` // Error details of the tunnel. +} + +// IpsecVpnThirdPartyPeer represents details about a peer network +type IpsecVpnThirdPartyPeer struct { + PeerID string `xml:"PeerId,omitempty"` // Id for the peer end point +} + +// IpsecVpnSubnet represents subnet details. +// Type: IpsecVpnSubnetType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents subnet details. +// Since: 5.1 +type IpsecVpnSubnet struct { + Name string `xml:"Name"` // Gateway Name. + Gateway string `xml:"Gateway"` // Subnet Gateway. + Netmask string `xml:"Netmask"` // Subnet Netmask. +} + +// GatewayDhcpService represents Gateway DHCP service. +// Type: GatewayDhcpServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents Gateway DHCP service. +// Since: 5.1 +type GatewayDhcpService struct { + IsEnabled bool `xml:"IsEnabled,omitempty"` // Enable or disable the service using this flag + Pool []*DhcpPoolService `xml:"Pool,omitempty"` // A DHCP pool. +} + +// DhcpPoolService represents DHCP pool service. +// Type: DhcpPoolServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents DHCP pool service. +// Since: 5.1 +type DhcpPoolService struct { + IsEnabled bool `xml:"IsEnabled,omitempty"` // True if this DHCP Pool is enabled. + Network *Reference `xml:"Network"` // Org vDC network to which the DHCP range is applicable. + DefaultLeaseTime int `xml:"DefaultLeaseTime,omitempty"` // Default lease period for DHCP range. + MaxLeaseTime int `xml:"MaxLeaseTime"` // Maximum lease period for DHCP range. + LowIPAddress string `xml:"LowIpAddress"` // Low IP address in DHCP range. + HighIPAddress string `xml:"HighIpAddress"` // High IP address in DHCP range. +} + +// VMSelection represents details of an vm+nic+iptype selection. +// Type: VmSelectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents details of an vm+nic+iptype selection. +// Since: 5.1 +type VMSelection struct { + VAppScopedVMID string `xml:"VAppScopedVmId"` // VAppScopedVmId of VM to which this rule applies. + VMNicID int `xml:"VmNicId"` // VM NIC ID to which this rule applies. + IPType string `xml:"IpType"` // The value can be one of:- assigned: assigned internal IP be automatically choosen. NAT: NATed external IP will be automatically choosen. +} + +// FirewallRuleProtocols flags for a network protocol in a firewall rule +// Type: FirewallRuleType/Protocols +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: +// Since: +type FirewallRuleProtocols struct { + ICMP bool `xml:"Icmp,omitempty"` // True if the rule applies to the ICMP protocol. + Any bool `xml:"Any,omitempty"` // True if the rule applies to any protocol. + TCP bool `xml:"Tcp,omitempty"` // True if the rule applies to the TCP protocol. + UDP bool `xml:"Udp,omitempty"` // True if the rule applies to the UDP protocol. + // FIXME: this is supposed to extend protocol support to all the VSM supported protocols + // Other string `xml:"Other,omitempty"` // Any other protocol supported by vShield Manager +} + +// FirewallRule represents a firewall rule +// Type: FirewallRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a firewall rule. +// Since: 0.9 +type FirewallRule struct { + ID string `xml:"Id,omitempty"` // Firewall rule identifier. + IsEnabled bool `xml:"IsEnabled"` // Used to enable or disable the firewall rule. Default value is true. + MatchOnTranslate bool `xml:"MatchOnTranslate"` // For DNATed traffic, match the firewall rules only after the destination IP is translated. + Description string `xml:"Description,omitempty"` // A description of the rule. + Policy string `xml:"Policy,omitempty"` // One of: drop (drop packets that match the rule), allow (allow packets that match the rule to pass through the firewall) + Protocols *FirewallRuleProtocols `xml:"Protocols,omitempty"` // Specify the protocols to which the rule should be applied. + IcmpSubType string `xml:"IcmpSubType,omitempty"` // ICMP subtype. One of: address-mask-request, address-mask-reply, destination-unreachable, echo-request, echo-reply, parameter-problem, redirect, router-advertisement, router-solicitation, source-quench, time-exceeded, timestamp-request, timestamp-reply, any. + Port int `xml:"Port,omitempty"` // The port to which this rule applies. A value of -1 matches any port. + DestinationPortRange string `xml:"DestinationPortRange,omitempty"` // Destination port range to which this rule applies. + DestinationIP string `xml:"DestinationIp,omitempty"` // Destination IP address to which the rule applies. A value of Any matches any IP address. + DestinationVM *VMSelection `xml:"DestinationVm,omitempty"` // Details of the destination VM + SourcePort int `xml:"SourcePort,omitempty"` // Destination port to which this rule applies. A value of -1 matches any port. + SourcePortRange string `xml:"SourcePortRange,omitempty"` // Source port range to which this rule applies. + SourceIP string `xml:"SourceIp,omitempty"` // Source IP address to which the rule applies. A value of Any matches any IP address. + SourceVM *VMSelection `xml:"SourceVm,omitempty"` // Details of the source Vm + Direction string `xml:"Direction,omitempty"` // Direction of traffic to which rule applies. One of: in (rule applies to incoming traffic. This is the default value), out (rule applies to outgoing traffic). + EnableLogging bool `xml:"EnableLogging"` // Used to enable or disable firewall rule logging. Default value is false. +} + +// FirewallService represent a network firewall service. +// Type: FirewallServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a network firewall service. +// Since: +type FirewallService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + DefaultAction string `xml:"DefaultAction,omitempty"` // Default action of the firewall. One of: drop (Default. Drop packets that match the rule.), allow (Allow packets that match the rule to pass through the firewall) + LogDefaultAction bool `xml:"LogDefaultAction"` // Flag to enable logging for default action. Default value is false. + FirewallRule []*FirewallRule `xml:"FirewallRule,omitempty"` // A firewall rule. +} + +// NatService represents a NAT network service. +// Type: NatServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a NAT network service. +// Since: +type NatService struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + NatType string `xml:"NatType,omitempty"` // One of: ipTranslation (use IP translation), portForwarding (use port forwarding) + Policy string `xml:"Policy,omitempty"` // One of: allowTraffic (Allow all traffic), allowTrafficIn (Allow inbound traffic only) + NatRule []*NatRule `xml:"NatRule,omitempty"` // A NAT rule. + ExternalIP string `xml:"ExternalIp,omitempty"` // External IP address for rule. +} + +// NatRule represents a NAT rule. +// Type: NatRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a NAT rule. +// Since: 0.9 +type NatRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + Description string `xml:"Description,omitempty"` // A description of the rule. + RuleType string `xml:"RuleType,omitempty"` // Type of NAT rule. One of: SNAT (source NAT), DNAT (destination NAT) + IsEnabled bool `xml:"IsEnabled"` // Used to enable or disable the firewall rule. Default value is true. + ID string `xml:"Id,omitempty"` // Firewall rule identifier. + GatewayNatRule *GatewayNatRule `xml:"GatewayNatRule,omitempty"` // Defines SNAT and DNAT types. + OneToOneBasicRule *NatOneToOneBasicRule `xml:"OneToOneBasicRule,omitempty"` // Maps one internal IP address to one external IP address. + OneToOneVMRule *NatOneToOneVMRule `xml:"OneToOneVmRule,omitempty"` // Maps one VM NIC to one external IP addresses. + PortForwardingRule *NatPortForwardingRule `xml:"PortForwardingRule,omitempty"` // Port forwarding internal to external IP addresses. + VMRule *NatVMRule `xml:"VmRule,omitempty"` // Port forwarding VM NIC to external IP addresses. +} + +// GatewayNatRule represents the SNAT and DNAT rules. +// Type: GatewayNatRuleType represents the SNAT and DNAT rules. +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the SNAT and DNAT rules. +// Since: 5.1 +type GatewayNatRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + Interface *Reference `xml:"Interface,omitempty"` // Interface to which rule is applied. + OriginalIP string `xml:"OriginalIp"` // Original IP for rule. + OriginalPort string `xml:"OriginalPort,omitempty"` // Original port for rule. + TranslatedIP string `xml:"TranslatedIp"` // Translated IP for rule. + TranslatedPort string `xml:"TranslatedPort,omitempty"` // Translated port for rule. + Protocol string `xml:"Protocol,omitempty"` // Protocol for rule. + IcmpSubType string `xml:"IcmpSubType,omitempty"` // ICMP subtype. One of: address-mask-request, address-mask-reply, destination-unreachable, echo-request, echo-reply, parameter-problem, redirect, router-advertisement, router-solicitation, source-quench, time-exceeded, timestamp-request, timestamp-reply, any. +} + +// NatOneToOneBasicRule represents the NAT basic rule for one to one mapping of internal and external IP addresses from a network. +// Type: NatOneToOneBasicRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT basic rule for one to one mapping of internal and external IP addresses from a network. +// Since: 0.9 +type NatOneToOneBasicRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + MappingMode string `xml:"MappingMode"` // One of: automatic (map IP addresses automatically), manual (map IP addresses manually using ExternalIpAddress and InternalIpAddress) + ExternalIPAddress string `xml:"ExternalIpAddress"` // External IP address to map. + InternalIPAddress string `xml:"InternalIpAddress"` // Internal IP address to map. +} + +// NatOneToOneVMRule represents the NAT rule for one to one mapping of VM NIC and external IP addresses from a network. +// Type: NatOneToOneVmRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT rule for one to one mapping of VM NIC and external IP addresses from a network. +// Since: 0.9 +type NatOneToOneVMRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + MappingMode string `xml:"MappingMode"` // Mapping mode. + ExternalIPAddress string `xml:"ExternalIpAddress"` // External IP address to map. + VAppScopedVMID string `xml:"VAppScopedVmId"` // VAppScopedVmId of VM to which this rule applies. + VMNicID int `xml:"VmNicId"` // VM NIC ID to which this rule applies. +} + +// NatPortForwardingRule represents the NAT rule for port forwarding between internal IP/port and external IP/port. +// Type: NatPortForwardingRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT rule for port forwarding between internal IP/port and external IP/port. +// Since: 0.9 +type NatPortForwardingRule struct { + ExternalIPAddress string `xml:"ExternalIpAddress"` // External IP address to map. + ExternalPort int `xml:"ExternalPort"` // External port to forward to. + InternalIPAddress string `xml:"InternalIpAddress"` // Internal IP address to map. + InternalPort int `xml:"InternalPort"` // Internal port to forward to. + Protocol string `xml:"Protocol,omitempty"` // Protocol to forward. One of: TCP (forward TCP packets), UDP (forward UDP packets), TCP_UDP (forward TCP and UDP packets). +} + +// NatVMRule represents the NAT rule for port forwarding between VM NIC/port and external IP/port. +// Type: NatVmRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT rule for port forwarding between VM NIC/port and external IP/port. +// Since: 0.9 +type NatVMRule struct { + ExternalIPAddress string `xml:"ExternalIpAddress,omitempty"` // External IP address to map. + ExternalPort int `xml:"ExternalPort"` // External port to forward to. + VAppScopedVMID string `xml:"VAppScopedVmId"` // VAppScopedVmId of VM to which this rule applies. + VMNicID int `xml:"VmNicId"` // VM NIC ID to which this rule applies. + InternalPort int `xml:"InternalPort"` // Internal port to forward to. + Protocol string `xml:"Protocol,omitempty"` // Protocol to forward. One of: TCP (forward TCP packets), UDP (forward UDP packets), TCP_UDP (forward TCP and UDP packets). +} + +// QueryResultEdgeGatewayRecordsType is a container for query results in records format. +// Type: QueryResultRecordsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for query results in records format. +// Since: 1.5 +type QueryResultEdgeGatewayRecordsType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Name string `xml:"name,attr,omitempty"` // The name of the entity. + Page int `xml:"page,attr,omitempty"` // Page of the result set that this container holds. The first page is page number 1. + PageSize int `xml:"pageSize,attr,omitempty"` // Page size, as a number of records or references. + Total float64 `xml:"total,attr,omitempty"` // Total number of records or references in the container. + // Elements + Link []*Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + EdgeGatewayRecord *QueryResultEdgeGatewayRecordType `xml:"EdgeGatewayRecord"` // A record representing a query result. +} + +// QueryResultEdgeGatewayRecordType represents an edge gateway record as query result. +type QueryResultEdgeGatewayRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Name string `xml:"name,attr,omitempty"` // EdgeGateway name. + Vdc string `xml:"vdc,attr,omitempty"` // VDC Reference or ID + NumberOfExtNetworks int `xml:"numberOfExtNetworks,attr,omitempty"` // Number of external networks connected to the edgeGateway. Yes Yes + NumberOfOrgNetworks int `xml:"numberOfOrgNetworks,attr,omitempty"` // Number of org VDC networks connected to the edgeGateway Yes Yes + IsBusy bool `xml:"isBusy,attr"` // True if this Edge Gateway is busy. Yes Yes + GatewayStatus string `xml:"gatewayStatus,attr,omitempty"` // + HaStatus string `xml:"haStatus,attr,omitempty"` // High Availability Status of the edgeGateway Yes Yes +} diff --git a/vendor/github.com/hmrc/vmware-govcd/vapp.go b/vendor/github.com/hmrc/vmware-govcd/vapp.go new file mode 100644 index 000000000..96a6aedc4 --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/vapp.go @@ -0,0 +1,807 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/xml" + "fmt" + "log" + "net/url" + "os" + "strconv" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type VApp struct { + VApp *types.VApp + c *Client +} + +func NewVApp(c *Client) *VApp { + return &VApp{ + VApp: new(types.VApp), + c: c, + } +} + +func (v *VApp) Refresh() error { + + if v.VApp.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty") + } + + u, _ := url.ParseRequestURI(v.VApp.HREF) + + req := v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return fmt.Errorf("error retrieving task: %s", err) + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + v.VApp = &types.VApp{} + + if err = decodeBody(resp, v.VApp); err != nil { + return fmt.Errorf("error decoding task response: %s", err) + } + + // The request was successful + return nil +} + +func (v *VApp) ComposeVApp(orgvdcnetwork OrgVDCNetwork, vapptemplate VAppTemplate, name string, description string) (Task, error) { + + if vapptemplate.VAppTemplate.Children == nil || orgvdcnetwork.OrgVDCNetwork == nil { + return Task{}, fmt.Errorf("can't compose a new vApp, objects passed are not valid") + } + + // Build request XML + vcomp := &types.ComposeVAppParams{ + Ovf: "http://schemas.dmtf.org/ovf/envelope/1", + Xsi: "http://www.w3.org/2001/XMLSchema-instance", + Xmlns: "http://www.vmware.com/vcloud/v1.5", + Deploy: false, + Name: name, + PowerOn: false, + Description: description, + InstantiationParams: &types.InstantiationParams{ + NetworkConfigSection: &types.NetworkConfigSection{ + Info: "Configuration parameters for logical networks", + NetworkConfig: &types.VAppNetworkConfiguration{ + NetworkName: orgvdcnetwork.OrgVDCNetwork.Name, + Configuration: &types.NetworkConfiguration{ + FenceMode: "bridged", + ParentNetwork: &types.Reference{ + HREF: orgvdcnetwork.OrgVDCNetwork.HREF, + Name: orgvdcnetwork.OrgVDCNetwork.Name, + Type: orgvdcnetwork.OrgVDCNetwork.Type, + }, + }, + }, + }, + }, + SourcedItem: &types.SourcedCompositionItemParam{ + Source: &types.Reference{ + HREF: vapptemplate.VAppTemplate.Children.VM[0].HREF, + Name: vapptemplate.VAppTemplate.Children.VM[0].Name, + }, + InstantiationParams: &types.InstantiationParams{ + NetworkConnectionSection: &types.NetworkConnectionSection{ + Type: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.Type, + HREF: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.HREF, + Info: "Network config for sourced item", + PrimaryNetworkConnectionIndex: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.PrimaryNetworkConnectionIndex, + NetworkConnection: &types.NetworkConnection{ + Network: orgvdcnetwork.OrgVDCNetwork.Name, + NetworkConnectionIndex: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.NetworkConnection.NetworkConnectionIndex, + IsConnected: true, + IPAddressAllocationMode: "POOL", + }, + }, + }, + NetworkAssignment: &types.NetworkAssignment{ + InnerNetwork: orgvdcnetwork.OrgVDCNetwork.Name, + ContainerNetwork: orgvdcnetwork.OrgVDCNetwork.Name, + }, + }, + } + + output, err := xml.MarshalIndent(vcomp, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error marshaling vapp compose: %s", err) + } + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s := v.c.VCDVDCHREF + s.Path += "/action/composeVApp" + + req := v.c.NewRequest(map[string]string{}, "POST", s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.composeVAppParams+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error instantiating a new vApp: %s", err) + } + + if err = decodeBody(resp, v.VApp); err != nil { + return Task{}, fmt.Errorf("error decoding vApp response: %s", err) + } + + task := NewTask(v.c) + task.Task = v.VApp.Tasks.Task[0] + + // The request was successful + return *task, nil + +} + +func (v *VApp) PowerOn() (Task, error) { + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/power/action/powerOn" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error powering on vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) PowerOff() (Task, error) { + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/power/action/powerOff" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error powering off vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) Reboot() (Task, error) { + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/power/action/reboot" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error rebooting vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) Reset() (Task, error) { + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/power/action/reset" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error resetting vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) Suspend() (Task, error) { + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/power/action/suspend" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error suspending vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) Shutdown() (Task, error) { + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/power/action/shutdown" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error shutting down vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) Undeploy() (Task, error) { + + vu := &types.UndeployVAppParams{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + UndeployPowerAction: "powerOff", + } + + output, err := xml.MarshalIndent(vu, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/action/undeploy" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.undeployVAppParams+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error undeploy vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) Deploy() (Task, error) { + + vu := &types.DeployVAppParams{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + PowerOn: false, + } + + output, err := xml.MarshalIndent(vu, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.HREF) + s.Path += "/action/deploy" + + req := v.c.NewRequest(map[string]string{}, "POST", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.deployVAppParams+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error undeploy vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) Delete() (Task, error) { + + s, _ := url.ParseRequestURI(v.VApp.HREF) + + req := v.c.NewRequest(map[string]string{}, "DELETE", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error deleting vApp: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) RunCustomizationScript(computername, script string) (Task, error) { + + err := v.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err) + } + + // Check if VApp Children is populated + if v.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization") + } + + vu := &types.GuestCustomizationSection{ + Ovf: "http://schemas.dmtf.org/ovf/envelope/1", + Xsi: "http://www.w3.org/2001/XMLSchema-instance", + Xmlns: "http://www.vmware.com/vcloud/v1.5", + + HREF: v.VApp.Children.VM[0].HREF, + Type: "application/vnd.vmware.vcloud.guestCustomizationSection+xml", + Info: "Specifies Guest OS Customization Settings", + Enabled: true, + ComputerName: computername, + CustomizationScript: script, + } + + output, err := xml.MarshalIndent(vu, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + log.Printf("[DEBUG] VCD Client configuration: %s", output) + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF) + s.Path += "/guestCustomizationSection/" + + req := v.c.NewRequest(map[string]string{}, "PUT", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.guestCustomizationSection+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error customizing VM: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) GetStatus() (string, error) { + err := v.Refresh() + if err != nil { + return "", fmt.Errorf("error refreshing vapp: %v", err) + } + return types.VAppStatuses[v.VApp.Status], nil +} + +func (v *VApp) ChangeCPUcount(size int) (Task, error) { + + err := v.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err) + } + + // Check if VApp Children is populated + if v.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization") + } + + newcpu := &types.OVFItem{ + XmlnsRasd: "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData", + XmlnsVCloud: "http://www.vmware.com/vcloud/v1.5", + XmlnsXsi: "http://www.w3.org/2001/XMLSchema-instance", + VCloudHREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/cpu", + VCloudType: "application/vnd.vmware.vcloud.rasdItem+xml", + AllocationUnits: "hertz * 10^6", + Description: "Number of Virtual CPUs", + ElementName: strconv.Itoa(size) + " virtual CPU(s)", + InstanceID: 4, + Reservation: 0, + ResourceType: 3, + VirtualQuantity: size, + Weight: 0, + Link: &types.Link{ + HREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/cpu", + Rel: "edit", + Type: "application/vnd.vmware.vcloud.rasdItem+xml", + }, + } + + output, err := xml.MarshalIndent(newcpu, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF) + s.Path += "/virtualHardwareSection/cpu" + + req := v.c.NewRequest(map[string]string{}, "PUT", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.rasdItem+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error customizing VM: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) ChangeVMName(name string) (Task, error) { + err := v.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err) + } + + if v.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization") + } + + newname := &types.VM{ + Name: name, + Xmlns: "http://www.vmware.com/vcloud/v1.5", + } + + output, err := xml.MarshalIndent(newname, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + log.Printf("[DEBUG] VCD Client configuration: %s", output) + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF) + + req := v.c.NewRequest(map[string]string{}, "PUT", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.vm+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error customizing VM: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) DeleteMetadata(key string) (Task, error) { + err := v.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err) + } + + if v.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization") + } + + s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF) + s.Path += "/metadata/" + key + + req := v.c.NewRequest(map[string]string{}, "DELETE", *s, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error deleting Metadata: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil +} + +func (v *VApp) AddMetadata(key, value string) (Task, error) { + err := v.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err) + } + + if v.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization") + } + + newmetadata := &types.MetadataValue{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + Xsi: "http://www.w3.org/2001/XMLSchema-instance", + TypedValue: &types.TypedValue{ + XsiType: "MetadataStringValue", + Value: value, + }, + } + + output, err := xml.MarshalIndent(newmetadata, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + log.Printf("[DEBUG] NetworkXML: %s", output) + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF) + s.Path += "/metadata/" + key + + req := v.c.NewRequest(map[string]string{}, "PUT", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.metadata.value+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error customizing VM Network: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +func (v *VApp) ChangeNetworkConfig(network, ip string) (Task, error) { + err := v.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err) + } + + if v.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization") + } + + // Determine what type of address is requested for the vApp + ipAllocationMode := "NONE" + ipAddress := "Any" + + // TODO: Review current behaviour of using DHCP when left blank + if ip == "" || ip == "dhcp" { + ipAllocationMode = "DHCP" + } else if ip == "allocated" { + ipAllocationMode = "POOL" + } else if ip == "none" { + ipAllocationMode = "NONE" + } else if ip != "" { + ipAllocationMode = "MANUAL" + // TODO: Check a valid IP has been given + ipAddress = ip + } + + networkConnection := &types.NetworkConnection{ + Network: network, + NeedsCustomization: true, + NetworkConnectionIndex: 0, + IPAddress: ipAddress, + IsConnected: true, + IPAddressAllocationMode: ipAllocationMode, + } + + newnetwork := &types.NetworkConnectionSection{ + Xmlns: "http://www.vmware.com/vcloud/v1.5", + Ovf: "http://schemas.dmtf.org/ovf/envelope/1", + Info: "Specifies the available VM network connections", + PrimaryNetworkConnectionIndex: 0, + NetworkConnection: networkConnection, + } + + output, err := xml.MarshalIndent(newnetwork, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + log.Printf("[DEBUG] NetworkXML: %s", output) + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF) + s.Path += "/networkConnectionSection/" + + req := v.c.NewRequest(map[string]string{}, "PUT", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.networkConnectionSection+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error customizing VM Network: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil +} + +func (v *VApp) ChangeMemorySize(size int) (Task, error) { + + err := v.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err) + } + + // Check if VApp Children is populated + if v.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization") + } + + newmem := &types.OVFItem{ + XmlnsRasd: "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData", + XmlnsVCloud: "http://www.vmware.com/vcloud/v1.5", + XmlnsXsi: "http://www.w3.org/2001/XMLSchema-instance", + VCloudHREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/memory", + VCloudType: "application/vnd.vmware.vcloud.rasdItem+xml", + AllocationUnits: "byte * 2^20", + Description: "Memory Size", + ElementName: strconv.Itoa(size) + " MB of memory", + InstanceID: 5, + Reservation: 0, + ResourceType: 4, + VirtualQuantity: size, + Weight: 0, + Link: &types.Link{ + HREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/memory", + Rel: "edit", + Type: "application/vnd.vmware.vcloud.rasdItem+xml", + }, + } + + output, err := xml.MarshalIndent(newmem, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + debug := os.Getenv("GOVCLOUDAIR_DEBUG") + + if debug == "true" { + fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output)) + } + + b := bytes.NewBufferString(xml.Header + string(output)) + + s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF) + s.Path += "/virtualHardwareSection/memory" + + req := v.c.NewRequest(map[string]string{}, "PUT", *s, b) + + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.rasdItem+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error customizing VM: %s", err) + } + + task := NewTask(v.c) + + if err = decodeBody(resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} diff --git a/vendor/github.com/hmrc/vmware-govcd/vapp_test.go b/vendor/github.com/hmrc/vmware-govcd/vapp_test.go new file mode 100644 index 000000000..475cc21ee --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/vapp_test.go @@ -0,0 +1,692 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "github.com/hmrc/vmware-govcd/testutil" + + . "gopkg.in/check.v1" +) + +func (s *S) Test_ComposeVApp(c *C) { + + testServer.ResponseMap(7, testutil.ResponseMap{ + "/api/org/11111111-1111-1111-1111-111111111111": testutil.Response{200, nil, orgExample}, + "/api/network/44444444-4444-4444-4444-4444444444444": testutil.Response{200, nil, orgvdcnetExample}, + "/api/catalog/e8a20fdf-8a78-440c-ac71-0420db59f854": testutil.Response{200, nil, catalogExample}, + "/api/catalogItem/1176e485-8858-4e15-94e5-ae4face605ae": testutil.Response{200, nil, catalogitemExample}, + "/api/vAppTemplate/vappTemplate-40cb9721-5f1a-44f9-b5c3-98c5f518c4f5": testutil.Response{200, nil, vapptemplateExample}, + "/api/vdc/00000000-0000-0000-0000-000000000000/action/composeVApp": testutil.Response{200, nil, instantiatedvappExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + }) + + // Get the Org populated + org, err := s.vdc.GetVDCOrg() + c.Assert(err, IsNil) + + // Populate OrgVDCNetwork + net, err := s.vdc.FindVDCNetwork("networkName") + c.Assert(err, IsNil) + + // Populate Catalog + cat, err := org.FindCatalog("Public Catalog") + c.Assert(err, IsNil) + + // Populate Catalog Item + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + c.Assert(err, IsNil) + + // Get VAppTemplate + vapptemplate, err := catitem.GetVAppTemplate() + c.Assert(err, IsNil) + + // Compose VApp + task, err := s.vapp.ComposeVApp(net, vapptemplate, "name", "description") + c.Assert(err, IsNil) + c.Assert(task.Task.OperationName, Equals, "vdcInstantiateVapp") + c.Assert(s.vapp.VApp.HREF, Equals, "http://localhost:4444/api/vApp/vapp-00000000-0000-0000-0000-000000000000") + + status, err := s.vapp.GetStatus() + + c.Assert(err, IsNil) + c.Assert(status, Equals, "POWERED_OFF") + + _ = testServer.WaitRequests(7) + +} + +func (s *S) Test_PowerOn(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.PowerOn() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_PowerOff(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.PowerOff() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_Reboot(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Reboot() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_AddMetadata(c *C) { + testServer.ResponseMap(8, testutil.ResponseMap{ + "/api/org/11111111-1111-1111-1111-111111111111": testutil.Response{200, nil, orgExample}, + "/api/network/44444444-4444-4444-4444-4444444444444": testutil.Response{200, nil, orgvdcnetExample}, + "/api/catalog/e8a20fdf-8a78-440c-ac71-0420db59f854": testutil.Response{200, nil, catalogExample}, + "/api/catalogItem/1176e485-8858-4e15-94e5-ae4face605ae": testutil.Response{200, nil, catalogitemExample}, + "/api/vAppTemplate/vappTemplate-40cb9721-5f1a-44f9-b5c3-98c5f518c4f5": testutil.Response{200, nil, vapptemplateExample}, + "/api/vdc/00000000-0000-0000-0000-000000000000/action/composeVApp": testutil.Response{200, nil, instantiatedvappExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + "/api/vApp/vm-00000000-0000-0000-0000-000000000000/metadata/key": testutil.Response{200, nil, taskExample}, + }) + + // Get the Org populated + org, err := s.vdc.GetVDCOrg() + c.Assert(err, IsNil) + + // Populate OrgVDCNetwork + net, err := s.vdc.FindVDCNetwork("networkName") + c.Assert(err, IsNil) + + // Populate Catalog + cat, err := org.FindCatalog("Public Catalog") + c.Assert(err, IsNil) + + // Populate Catalog Item + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + c.Assert(err, IsNil) + + // Get VAppTemplate + vapptemplate, err := catitem.GetVAppTemplate() + c.Assert(err, IsNil) + + // Compose VApp + task, err := s.vapp.ComposeVApp(net, vapptemplate, "name", "description") + c.Assert(err, IsNil) + c.Assert(task.Task.OperationName, Equals, "vdcInstantiateVapp") + c.Assert(s.vapp.VApp.HREF, Equals, "http://localhost:4444/api/vApp/vapp-00000000-0000-0000-0000-000000000000") + + task, err = s.vapp.AddMetadata("key", "value") + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + + _ = testServer.WaitRequests(8) + +} + +func (s *S) Test_ChangeVMName(c *C) { + + testServer.ResponseMap(8, testutil.ResponseMap{ + "/api/org/11111111-1111-1111-1111-111111111111": testutil.Response{200, nil, orgExample}, + "/api/network/44444444-4444-4444-4444-4444444444444": testutil.Response{200, nil, orgvdcnetExample}, + "/api/catalog/e8a20fdf-8a78-440c-ac71-0420db59f854": testutil.Response{200, nil, catalogExample}, + "/api/catalogItem/1176e485-8858-4e15-94e5-ae4face605ae": testutil.Response{200, nil, catalogitemExample}, + "/api/vAppTemplate/vappTemplate-40cb9721-5f1a-44f9-b5c3-98c5f518c4f5": testutil.Response{200, nil, vapptemplateExample}, + "/api/vdc/00000000-0000-0000-0000-000000000000/action/composeVApp": testutil.Response{200, nil, instantiatedvappExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + "/api/vApp/vm-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, taskExample}, + }) + + // Get the Org populated + org, err := s.vdc.GetVDCOrg() + c.Assert(err, IsNil) + + // Populate OrgVDCNetwork + net, err := s.vdc.FindVDCNetwork("networkName") + c.Assert(err, IsNil) + + // Populate Catalog + cat, err := org.FindCatalog("Public Catalog") + c.Assert(err, IsNil) + + // Populate Catalog Item + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + c.Assert(err, IsNil) + + // Get VAppTemplate + vapptemplate, err := catitem.GetVAppTemplate() + c.Assert(err, IsNil) + + // Compose VApp + task, err := s.vapp.ComposeVApp(net, vapptemplate, "name", "description") + c.Assert(err, IsNil) + c.Assert(task.Task.OperationName, Equals, "vdcInstantiateVapp") + c.Assert(s.vapp.VApp.HREF, Equals, "http://localhost:4444/api/vApp/vapp-00000000-0000-0000-0000-000000000000") + + task, err = s.vapp.ChangeVMName("My-vm") + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + + _ = testServer.WaitRequests(8) + +} + +func (s *S) Test_Reset(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Reset() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_Suspend(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Suspend() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_Shutdown(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Shutdown() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_Undeploy(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Undeploy() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_Deploy(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Deploy() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_Delete(c *C) { + + testServer.Response(200, nil, taskExample) + task, err := s.vapp.Delete() + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + +} + +func (s *S) Test_RunCustomizationScript(c *C) { + + testServer.ResponseMap(8, testutil.ResponseMap{ + "/api/org/11111111-1111-1111-1111-111111111111": testutil.Response{200, nil, orgExample}, + "/api/network/44444444-4444-4444-4444-4444444444444": testutil.Response{200, nil, orgvdcnetExample}, + "/api/catalog/e8a20fdf-8a78-440c-ac71-0420db59f854": testutil.Response{200, nil, catalogExample}, + "/api/catalogItem/1176e485-8858-4e15-94e5-ae4face605ae": testutil.Response{200, nil, catalogitemExample}, + "/api/vAppTemplate/vappTemplate-40cb9721-5f1a-44f9-b5c3-98c5f518c4f5": testutil.Response{200, nil, vapptemplateExample}, + "/api/vdc/00000000-0000-0000-0000-000000000000/action/composeVApp": testutil.Response{200, nil, instantiatedvappExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + "/api/vApp/vm-00000000-0000-0000-0000-000000000000/guestCustomizationSection/": testutil.Response{200, nil, taskExample}, + }) + + // Get the Org populated + org, err := s.vdc.GetVDCOrg() + c.Assert(err, IsNil) + + // Populate OrgVDCNetwork + net, err := s.vdc.FindVDCNetwork("networkName") + c.Assert(err, IsNil) + + // Populate Catalog + cat, err := org.FindCatalog("Public Catalog") + c.Assert(err, IsNil) + + // Populate Catalog Item + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + c.Assert(err, IsNil) + + // Get VAppTemplate + vapptemplate, err := catitem.GetVAppTemplate() + c.Assert(err, IsNil) + + // Compose VApp + task, err := s.vapp.ComposeVApp(net, vapptemplate, "name", "description") + c.Assert(err, IsNil) + c.Assert(task.Task.OperationName, Equals, "vdcInstantiateVapp") + c.Assert(s.vapp.VApp.HREF, Equals, "http://localhost:4444/api/vApp/vapp-00000000-0000-0000-0000-000000000000") + + task, err = s.vapp.RunCustomizationScript("computername", "this is my script") + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + + _ = testServer.WaitRequests(8) + +} + +func (s *S) Test_ChangeCPUcount(c *C) { + + testServer.ResponseMap(8, testutil.ResponseMap{ + "/api/org/11111111-1111-1111-1111-111111111111": testutil.Response{200, nil, orgExample}, + "/api/network/44444444-4444-4444-4444-4444444444444": testutil.Response{200, nil, orgvdcnetExample}, + "/api/catalog/e8a20fdf-8a78-440c-ac71-0420db59f854": testutil.Response{200, nil, catalogExample}, + "/api/catalogItem/1176e485-8858-4e15-94e5-ae4face605ae": testutil.Response{200, nil, catalogitemExample}, + "/api/vAppTemplate/vappTemplate-40cb9721-5f1a-44f9-b5c3-98c5f518c4f5": testutil.Response{200, nil, vapptemplateExample}, + "/api/vdc/00000000-0000-0000-0000-000000000000/action/composeVApp": testutil.Response{200, nil, instantiatedvappExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + "/api/vApp/vm-00000000-0000-0000-0000-000000000000/virtualHardwareSection/cpu": testutil.Response{200, nil, taskExample}, + }) + + // Get the Org populated + org, err := s.vdc.GetVDCOrg() + c.Assert(err, IsNil) + + // Populate OrgVDCNetwork + net, err := s.vdc.FindVDCNetwork("networkName") + c.Assert(err, IsNil) + + // Populate Catalog + cat, err := org.FindCatalog("Public Catalog") + c.Assert(err, IsNil) + + // Populate Catalog Item + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + c.Assert(err, IsNil) + + // Get VAppTemplate + vapptemplate, err := catitem.GetVAppTemplate() + c.Assert(err, IsNil) + + // Compose VApp + task, err := s.vapp.ComposeVApp(net, vapptemplate, "name", "description") + c.Assert(err, IsNil) + c.Assert(task.Task.OperationName, Equals, "vdcInstantiateVapp") + c.Assert(s.vapp.VApp.HREF, Equals, "http://localhost:4444/api/vApp/vapp-00000000-0000-0000-0000-000000000000") + + task, err = s.vapp.ChangeCPUcount(2) + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + + _ = testServer.WaitRequests(8) + +} + +func (s *S) Test_ChangeMemorySize(c *C) { + + testServer.ResponseMap(8, testutil.ResponseMap{ + "/api/org/11111111-1111-1111-1111-111111111111": testutil.Response{200, nil, orgExample}, + "/api/network/44444444-4444-4444-4444-4444444444444": testutil.Response{200, nil, orgvdcnetExample}, + "/api/catalog/e8a20fdf-8a78-440c-ac71-0420db59f854": testutil.Response{200, nil, catalogExample}, + "/api/catalogItem/1176e485-8858-4e15-94e5-ae4face605ae": testutil.Response{200, nil, catalogitemExample}, + "/api/vAppTemplate/vappTemplate-40cb9721-5f1a-44f9-b5c3-98c5f518c4f5": testutil.Response{200, nil, vapptemplateExample}, + "/api/vdc/00000000-0000-0000-0000-000000000000/action/composeVApp": testutil.Response{200, nil, instantiatedvappExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + "/api/vApp/vm-00000000-0000-0000-0000-000000000000/virtualHardwareSection/memory": testutil.Response{200, nil, taskExample}, + }) + + // Get the Org populated + org, err := s.vdc.GetVDCOrg() + c.Assert(err, IsNil) + + // Populate OrgVDCNetwork + net, err := s.vdc.FindVDCNetwork("networkName") + c.Assert(err, IsNil) + + // Populate Catalog + cat, err := org.FindCatalog("Public Catalog") + c.Assert(err, IsNil) + + // Populate Catalog Item + catitem, err := cat.FindCatalogItem("CentOS64-32bit") + c.Assert(err, IsNil) + + // Get VAppTemplate + vapptemplate, err := catitem.GetVAppTemplate() + c.Assert(err, IsNil) + + // Compose VApp + task, err := s.vapp.ComposeVApp(net, vapptemplate, "name", "description") + c.Assert(err, IsNil) + c.Assert(task.Task.OperationName, Equals, "vdcInstantiateVapp") + c.Assert(s.vapp.VApp.HREF, Equals, "http://localhost:4444/api/vApp/vapp-00000000-0000-0000-0000-000000000000") + + task, err = s.vapp.ChangeMemorySize(4096) + + c.Assert(err, IsNil) + c.Assert(task.Task.Status, Equals, "success") + + _ = testServer.WaitRequests(8) + +} + +var instantiatedvappExample = ` + + + + + + + + + + + + + My vApp to be deployed + + + + + + + 1 +
+ + + 2014-10-24T15:26:59.067Z + + + + false + + ` + +var vappExample = ` + + + + + + + + + + + + + + + + + + + + + + Test API GO4444! + + Lease settings section + + 0 + 0 + + + VApp startup section + + + + + The list of logical networks + + This isolated network was created with Create VDC. + + + This is a special place-holder used for disconnected network interfaces. + + + + The configuration parameters for logical networks + + + + This isolated network was created with Create VDC. + + + + true + 192.168.99.1 + 255.255.255.0 + true + + + 192.168.99.2 + 192.168.99.100 + + + + + + bridged + false + + false + + + This is a special place-holder used for disconnected network interfaces. + + + + false + 196.254.254.254 + 255.255.0.0 + 196.254.254.254 + + + isolated + + false + + + + Snapshot information section + + 2014-11-06T22:24:43.913Z + + + + false + + + + + + + + + + + + + + + + + + + + + + id: cts-6.4-32bit + + Virtual hardware requirements + + Virtual Hardware Family + 0 + CentOS64-32bit + vmx-09 + + + 00:50:56:02:0b:36 + 0 + false + none + E1000 ethernet adapter on "none" + Network adapter 0 + 1 + E1000 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 1 + IDE Controller + IDE Controller 1 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 1 virtual CPU(s) + 4 + 0 + 3 + 1 + 0 + 1 + + + + byte * 2^20 + Memory Size + 1024 MB of memory + 5 + 0 + 4 + 1024 + 0 + + + + + + + + + + + + + + + + + Specifies the operating system installed + CentOS 4/5/6 (32-bit) + + + + Specifies the available VM network connections + 0 + + 0 + false + 00:50:56:02:0b:36 + NONE + + + + + Specifies Guest OS Customization Settings + true + false + 00000000-0000-0000-0000-000000000000 + false + false + true + true + false + 0 + true + cts-6.4-32bit + + + + Specifies Runtime info + + + + Snapshot information section + + CentOS64-32bit + + + false + false + + + + + + ` diff --git a/vendor/github.com/hmrc/vmware-govcd/vapptemplate.go b/vendor/github.com/hmrc/vmware-govcd/vapptemplate.go new file mode 100644 index 000000000..088d4bd0d --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/vapptemplate.go @@ -0,0 +1,57 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/xml" + "fmt" + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type VAppTemplate struct { + VAppTemplate *types.VAppTemplate + c *Client +} + +func NewVAppTemplate(c *Client) *VAppTemplate { + return &VAppTemplate{ + VAppTemplate: new(types.VAppTemplate), + c: c, + } +} + +func (v *Vdc) InstantiateVAppTemplate(template *types.InstantiateVAppTemplateParams) error { + output, err := xml.MarshalIndent(template, "", " ") + if err != nil { + return fmt.Errorf("Error finding VAppTemplate: %#v", err) + } + b := bytes.NewBufferString(xml.Header + string(output)) + + s := v.c.VCDVDCHREF + s.Path += "/action/instantiateVAppTemplate" + + req := v.c.NewRequest(map[string]string{}, "POST", s, b) + req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml") + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return fmt.Errorf("error instantiating a new template: %s", err) + } + + vapptemplate := NewVAppTemplate(v.c) + if err = decodeBody(resp, vapptemplate.VAppTemplate); err != nil { + return fmt.Errorf("error decoding orgvdcnetwork response: %s", err) + } + task := NewTask(v.c) + for _, t := range vapptemplate.VAppTemplate.Tasks.Task { + task.Task = t + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("Error performing task: %#v", err) + } + } + return nil +} diff --git a/vendor/github.com/hmrc/vmware-govcd/vapptemplate_test.go b/vendor/github.com/hmrc/vmware-govcd/vapptemplate_test.go new file mode 100644 index 000000000..48b10aceb --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/vapptemplate_test.go @@ -0,0 +1,183 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +var vapptemplateExample = ` + + + + + + + + + + + id: cts-6.4-32bit + + + + + + + + + + id: cts-6.4-32bit + + Specifies the available VM network connections + 0 + + 0 + false + 00:50:56:02:00:39 + NONE + + + + Specifies Guest OS Customization Settings + true + false + 3a3934d2-1f3f-4782-a911-f143da27e88c + false + false + true + true + false + 0 + true + cts-6.4-32bit + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + CentOS64-32bit + vmx-09 + + + 00:50:56:02:00:39 + 0 + false + none + E1000 ethernet adapter on "none" + Network adapter 0 + 1 + E1000 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 1 + IDE Controller + IDE Controller 1 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 1 virtual CPU(s) + 4 + 0 + 3 + 1 + 0 + 1 + + + byte * 2^20 + Memory Size + 1024 MB of memory + 5 + 0 + 4 + 1024 + 0 + + + + + + + + + CentOS64-32bit + 2014-06-04T21:06:43.547Z + + + + The list of logical networks + + This is a special place-holder used for disconnected network interfaces. + + + + The configuration parameters for logical networks + + This is a special place-holder used for disconnected network interfaces. + + + + false + 196.254.254.254 + 255.255.0.0 + 196.254.254.254 + + + isolated + + false + + + + Lease settings section + + 0 + + + VApp template customization section + true + + 2014-06-04T21:06:43.547Z + + + ` diff --git a/vendor/github.com/hmrc/vmware-govcd/vdc.go b/vendor/github.com/hmrc/vmware-govcd/vdc.go new file mode 100644 index 000000000..58da1b80a --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/vdc.go @@ -0,0 +1,278 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + "strings" + + types "github.com/hmrc/vmware-govcd/types/v56" +) + +type Vdc struct { + Vdc *types.Vdc + c *Client +} + +func NewVdc(c *Client) *Vdc { + return &Vdc{ + Vdc: new(types.Vdc), + c: c, + } +} + +func (c *Client) retrieveVDC() (Vdc, error) { + + req := c.NewRequest(map[string]string{}, "GET", c.VCDVDCHREF, nil) + + resp, err := checkResp(c.Http.Do(req)) + if err != nil { + return Vdc{}, fmt.Errorf("error retreiving vdc: %s", err) + } + + vdc := NewVdc(c) + + if err = decodeBody(resp, vdc.Vdc); err != nil { + return Vdc{}, fmt.Errorf("error decoding vdc response: %s", err) + } + + // The request was successful + return *vdc, nil +} + +func (v *Vdc) Refresh() error { + + if v.Vdc.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty") + } + + u, _ := url.ParseRequestURI(v.Vdc.HREF) + + req := v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return fmt.Errorf("error retreiving Edge Gateway: %s", err) + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + v.Vdc = &types.Vdc{} + + if err = decodeBody(resp, v.Vdc); err != nil { + return fmt.Errorf("error decoding Edge Gateway response: %s", err) + } + + // The request was successful + return nil +} + +func (v *Vdc) FindVDCNetwork(network string) (OrgVDCNetwork, error) { + + for _, an := range v.Vdc.AvailableNetworks { + for _, n := range an.Network { + if n.Name == network { + u, err := url.ParseRequestURI(n.HREF) + if err != nil { + return OrgVDCNetwork{}, fmt.Errorf("error decoding vdc response: %s", err) + } + + req := v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return OrgVDCNetwork{}, fmt.Errorf("error retreiving orgvdcnetwork: %s", err) + } + + orgnet := NewOrgVDCNetwork(v.c) + + if err = decodeBody(resp, orgnet.OrgVDCNetwork); err != nil { + return OrgVDCNetwork{}, fmt.Errorf("error decoding orgvdcnetwork response: %s", err) + } + + // The request was successful + return *orgnet, nil + + } + } + } + + return OrgVDCNetwork{}, fmt.Errorf("can't find VDC Network: %s", network) +} + +// Doesn't work with vCloud API 5.5, only vCloud Air +func (v *Vdc) GetVDCOrg() (Org, error) { + + for _, av := range v.Vdc.Link { + if av.Rel == "up" && av.Type == "application/vnd.vmware.vcloud.org+xml" { + u, err := url.ParseRequestURI(av.HREF) + + if err != nil { + return Org{}, fmt.Errorf("error decoding vdc response: %s", err) + } + + req := v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return Org{}, fmt.Errorf("error retreiving org: %s", err) + } + + org := NewOrg(v.c) + + if err = decodeBody(resp, org.Org); err != nil { + return Org{}, fmt.Errorf("error decoding org response: %s", err) + } + + // The request was successful + return *org, nil + + } + } + return Org{}, fmt.Errorf("can't find VDC Org") +} + +func (v *Vdc) FindEdgeGateway(edgegateway string) (EdgeGateway, error) { + + for _, av := range v.Vdc.Link { + if av.Rel == "edgeGateways" && av.Type == "application/vnd.vmware.vcloud.query.records+xml" { + u, err := url.ParseRequestURI(av.HREF) + + if err != nil { + return EdgeGateway{}, fmt.Errorf("error decoding vdc response: %s", err) + } + + // Querying the Result list + req := v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return EdgeGateway{}, fmt.Errorf("error retrieving edge gateway records: %s", err) + } + + query := new(types.QueryResultEdgeGatewayRecordsType) + + if err = decodeBody(resp, query); err != nil { + return EdgeGateway{}, fmt.Errorf("error decoding edge gateway query response: %s", err) + } + + u, err = url.ParseRequestURI(query.EdgeGatewayRecord.HREF) + if err != nil { + return EdgeGateway{}, fmt.Errorf("error decoding edge gateway query response: %s", err) + } + + // Querying the Result list + req = v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err = checkResp(v.c.Http.Do(req)) + if err != nil { + return EdgeGateway{}, fmt.Errorf("error retrieving edge gateway: %s", err) + } + + edge := NewEdgeGateway(v.c) + + if err = decodeBody(resp, edge.EdgeGateway); err != nil { + return EdgeGateway{}, fmt.Errorf("error decoding edge gateway response: %s", err) + } + + return *edge, nil + + } + } + return EdgeGateway{}, fmt.Errorf("can't find Edge Gateway") + +} + +func (v *Vdc) FindVAppByName(vapp string) (VApp, error) { + + err := v.Refresh() + if err != nil { + return VApp{}, fmt.Errorf("error refreshing vdc: %s", err) + } + + for _, resents := range v.Vdc.ResourceEntities { + for _, resent := range resents.ResourceEntity { + + if resent.Name == vapp && resent.Type == "application/vnd.vmware.vcloud.vApp+xml" { + + u, err := url.ParseRequestURI(resent.HREF) + + if err != nil { + return VApp{}, fmt.Errorf("error decoding vdc response: %s", err) + } + + // Querying the VApp + req := v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return VApp{}, fmt.Errorf("error retrieving vApp: %s", err) + } + + newvapp := NewVApp(v.c) + + if err = decodeBody(resp, newvapp.VApp); err != nil { + return VApp{}, fmt.Errorf("error decoding vApp response: %s", err) + } + + return *newvapp, nil + + } + } + } + return VApp{}, fmt.Errorf("can't find vApp: %s", vapp) +} + +func (v *Vdc) FindVAppByID(vappid string) (VApp, error) { + + // Horrible hack to fetch a vapp with its id. + // urn:vcloud:vapp:00000000-0000-0000-0000-000000000000 + + err := v.Refresh() + if err != nil { + return VApp{}, fmt.Errorf("error refreshing vdc: %s", err) + } + + urnslice := strings.SplitAfter(vappid, ":") + urnid := urnslice[len(urnslice)-1] + + for _, resents := range v.Vdc.ResourceEntities { + for _, resent := range resents.ResourceEntity { + + hrefslice := strings.SplitAfter(resent.HREF, "/") + hrefslice = strings.SplitAfter(hrefslice[len(hrefslice)-1], "-") + res := strings.Join(hrefslice[1:], "") + + if res == urnid && resent.Type == "application/vnd.vmware.vcloud.vApp+xml" { + + u, err := url.ParseRequestURI(resent.HREF) + + if err != nil { + return VApp{}, fmt.Errorf("error decoding vdc response: %s", err) + } + + // Querying the VApp + req := v.c.NewRequest(map[string]string{}, "GET", *u, nil) + + resp, err := checkResp(v.c.Http.Do(req)) + if err != nil { + return VApp{}, fmt.Errorf("error retrieving vApp: %s", err) + } + + newvapp := NewVApp(v.c) + + if err = decodeBody(resp, newvapp.VApp); err != nil { + return VApp{}, fmt.Errorf("error decoding vApp response: %s", err) + } + + return *newvapp, nil + + } + } + } + return VApp{}, fmt.Errorf("can't find vApp") + +} diff --git a/vendor/github.com/hmrc/vmware-govcd/vdc_test.go b/vendor/github.com/hmrc/vmware-govcd/vdc_test.go new file mode 100644 index 000000000..ad30afb8e --- /dev/null +++ b/vendor/github.com/hmrc/vmware-govcd/vdc_test.go @@ -0,0 +1,178 @@ +/* + * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "github.com/hmrc/vmware-govcd/testutil" + + . "gopkg.in/check.v1" +) + +func (s *S) Test_FindVDCNetwork(c *C) { + + testServer.Response(200, nil, orgvdcnetExample) + + net, err := s.vdc.FindVDCNetwork("networkName") + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(net, NotNil) + c.Assert(net.OrgVDCNetwork.HREF, Equals, "http://localhost:4444/api/network/cb0f4c9e-1a46-49d4-9fcb-d228000a6bc1") + + // find Invalid Network + net, err = s.vdc.FindVDCNetwork("INVALID") + c.Assert(err, NotNil) +} + +func (s *S) Test_GetVDCOrg(c *C) { + + testServer.Response(200, nil, orgExample) + + org, err := s.vdc.GetVDCOrg() + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(org, NotNil) + c.Assert(org.Org.HREF, Equals, "http://localhost:4444/api/org/23bd2339-c55f-403c-baf3-13109e8c8d57") +} + +func (s *S) Test_NewVdc(c *C) { + + testServer.Response(200, nil, vdcExample) + err := s.vdc.Refresh() + _ = testServer.WaitRequest() + c.Assert(err, IsNil) + + c.Assert(s.vdc.Vdc.Link[0].Rel, Equals, "up") + c.Assert(s.vdc.Vdc.Link[0].Type, Equals, "application/vnd.vmware.vcloud.org+xml") + c.Assert(s.vdc.Vdc.Link[0].HREF, Equals, "http://localhost:4444/api/org/11111111-1111-1111-1111-111111111111") + + c.Assert(s.vdc.Vdc.AllocationModel, Equals, "AllocationPool") + + for _, v := range s.vdc.Vdc.ComputeCapacity { + c.Assert(v.CPU.Units, Equals, "MHz") + c.Assert(v.CPU.Allocated, Equals, int64(30000)) + c.Assert(v.CPU.Limit, Equals, int64(30000)) + c.Assert(v.CPU.Reserved, Equals, int64(15000)) + c.Assert(v.CPU.Used, Equals, int64(0)) + c.Assert(v.CPU.Overhead, Equals, int64(0)) + c.Assert(v.Memory.Units, Equals, "MB") + c.Assert(v.Memory.Allocated, Equals, int64(61440)) + c.Assert(v.Memory.Limit, Equals, int64(61440)) + c.Assert(v.Memory.Reserved, Equals, int64(61440)) + c.Assert(v.Memory.Used, Equals, int64(6144)) + c.Assert(v.Memory.Overhead, Equals, int64(95)) + } + + c.Assert(s.vdc.Vdc.ResourceEntities[0].ResourceEntity[0].Name, Equals, "vAppTemplate") + c.Assert(s.vdc.Vdc.ResourceEntities[0].ResourceEntity[0].Type, Equals, "application/vnd.vmware.vcloud.vAppTemplate+xml") + c.Assert(s.vdc.Vdc.ResourceEntities[0].ResourceEntity[0].HREF, Equals, "http://localhost:4444/api/vAppTemplate/vappTemplate-22222222-2222-2222-2222-222222222222") + + for _, v := range s.vdc.Vdc.AvailableNetworks { + for _, v2 := range v.Network { + c.Assert(v2.Name, Equals, "networkName") + c.Assert(v2.Type, Equals, "application/vnd.vmware.vcloud.network+xml") + c.Assert(v2.HREF, Equals, "http://localhost:4444/api/network/44444444-4444-4444-4444-4444444444444") + } + } + + c.Assert(s.vdc.Vdc.NicQuota, Equals, 0) + c.Assert(s.vdc.Vdc.NetworkQuota, Equals, 20) + c.Assert(s.vdc.Vdc.UsedNetworkCount, Equals, 0) + c.Assert(s.vdc.Vdc.VMQuota, Equals, 0) + c.Assert(s.vdc.Vdc.IsEnabled, Equals, true) + + for _, v := range s.vdc.Vdc.VdcStorageProfiles { + for _, v2 := range v.VdcStorageProfile { + c.Assert(v2.Name, Equals, "storageProfile") + c.Assert(v2.Type, Equals, "application/vnd.vmware.vcloud.vdcStorageProfile+xml") + c.Assert(v2.HREF, Equals, "http://localhost:4444/api/vdcStorageProfile/88888888-8888-8888-8888-888888888888") + } + } + +} + +func (s *S) Test_FindVApp(c *C) { + + // testServer.Response(200, nil, vappExample) + + // vapp, err := s.vdc.FindVAppByID("") + + // _ = testServer.WaitRequest() + // testServer.Flush() + // c.Assert(err, IsNil) + + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + }) + + _, err := s.vdc.FindVAppByName("myVApp") + + _ = testServer.WaitRequests(2) + + c.Assert(err, IsNil) + + testServer.ResponseMap(2, testutil.ResponseMap{ + "/api/vdc/00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vdcExample}, + "/api/vApp/vapp-00000000-0000-0000-0000-000000000000": testutil.Response{200, nil, vappExample}, + }) + + _, err = s.vdc.FindVAppByID("urn:vcloud:vapp:00000000-0000-0000-0000-000000000000") + + _ = testServer.WaitRequests(2) + + c.Assert(err, IsNil) + +} + +var vdcExample = ` + + + + + AllocationPool + + + MHz + 30000 + 30000 + 15000 + 0 + 0 + + + MB + 61440 + 61440 + 61440 + 6144 + 95 + + + + + + + + + + + + vmx-10 + + + 0 + 20 + 0 + 0 + true + + + + + ` diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml new file mode 100644 index 000000000..9d91c6339 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -0,0 +1,2 @@ +language: go +install: go get -t diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 000000000..686680298 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 000000000..4f0f990fc --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,122 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche. + +![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg) + +## Status + +It is ready for production use. It works fine after extensive use in the wild. + +[![Build Status][1]][2] +[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo) + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo + +### Important note + +Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected. + +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). + +### Mergo in the wild + +- [imdario/zas](https://github.com/imdario/zas) +- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + } + + dest := Foo{ + A: "two", + B: 2, + } + + mergo.Merge(&dest, src) + + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v1 + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 000000000..6e9aa7baf --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/issue17_test.go b/vendor/github.com/imdario/mergo/issue17_test.go new file mode 100644 index 000000000..0ee96f377 --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue17_test.go @@ -0,0 +1,25 @@ +package mergo + +import ( + "encoding/json" + "testing" +) + +var ( + request = `{"timestamp":null, "name": "foo"}` + maprequest = map[string]interface{}{ + "timestamp": nil, + "name": "foo", + "newStuff": "foo", + } +) + +func TestIssue17MergeWithOverwrite(t *testing.T) { + var something map[string]interface{} + if err := json.Unmarshal([]byte(request), &something); err != nil { + t.Errorf("Error while Unmarshalling maprequest %s", err) + } + if err := MergeWithOverwrite(&something, maprequest); err != nil { + t.Errorf("Error while merging %s", err) + } +} diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 000000000..1ed3d716a --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,154 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } else { + if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}) error { + return _map(dst, src, false) +} + +func MapWithOverwrite(dst, src interface{}) error { + return _map(dst, src, true) +} + +func _map(dst, src interface{}, overwrite bool) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 000000000..2a90adbc0 --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,120 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "reflect" +) + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + switch dst.Kind() { + case reflect.Struct: + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { + return + } + } + case reflect.Map: + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } + } + if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } else if dst.IsNil() { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + return + } + default: + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}) error { + return merge(dst, src, false) +} + +// The same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +func MergeWithOverwrite(dst, src interface{}) error { + return merge(dst, src, true) +} + +func merge(dst, src interface{}, overwrite bool) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 000000000..f8a0991ec --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,90 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/imdario/mergo/mergo_test.go b/vendor/github.com/imdario/mergo/mergo_test.go new file mode 100644 index 000000000..cabf993a3 --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo_test.go @@ -0,0 +1,502 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mergo + +import ( + "gopkg.in/yaml.v1" + "io/ioutil" + "reflect" + "testing" + "time" +) + +type simpleTest struct { + Value int +} + +type complexTest struct { + St simpleTest + sz int + Id string +} + +type moreComplextText struct { + Ct complexTest + St simpleTest + Nt simpleTest +} + +type pointerTest struct { + C *simpleTest +} + +type sliceTest struct { + S []int +} + +func TestKb(t *testing.T) { + type testStruct struct { + Name string + KeyValue map[string]interface{} + } + + akv := make(map[string]interface{}) + akv["Key1"] = "not value 1" + akv["Key2"] = "value2" + a := testStruct{} + a.Name = "A" + a.KeyValue = akv + + bkv := make(map[string]interface{}) + bkv["Key1"] = "value1" + bkv["Key3"] = "value3" + b := testStruct{} + b.Name = "B" + b.KeyValue = bkv + + ekv := make(map[string]interface{}) + ekv["Key1"] = "value1" + ekv["Key2"] = "value2" + ekv["Key3"] = "value3" + expected := testStruct{} + expected.Name = "B" + expected.KeyValue = ekv + + Merge(&b, a) + + if !reflect.DeepEqual(b, expected) { + t.Errorf("Actual: %#v did not match \nExpected: %#v", b, expected) + } +} + +func TestNil(t *testing.T) { + if err := Merge(nil, nil); err != ErrNilArguments { + t.Fail() + } +} + +func TestDifferentTypes(t *testing.T) { + a := simpleTest{42} + b := 42 + if err := Merge(&a, b); err != ErrDifferentArgumentsTypes { + t.Fail() + } +} + +func TestSimpleStruct(t *testing.T) { + a := simpleTest{} + b := simpleTest{42} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.Value != 42 { + t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%d)", a.Value, b.Value) + } + if !reflect.DeepEqual(a, b) { + t.FailNow() + } +} + +func TestComplexStruct(t *testing.T) { + a := complexTest{} + a.Id = "athing" + b := complexTest{simpleTest{42}, 1, "bthing"} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.St.Value != 42 { + t.Fatalf("b not merged in properly: a.St.Value(%d) != b.St.Value(%d)", a.St.Value, b.St.Value) + } + if a.sz == 1 { + t.Fatalf("a's private field sz not preserved from merge: a.sz(%d) == b.sz(%d)", a.sz, b.sz) + } + if a.Id == b.Id { + t.Fatalf("a's field Id merged unexpectedly: a.Id(%s) == b.Id(%s)", a.Id, b.Id) + } +} + +func TestComplexStructWithOverwrite(t *testing.T) { + a := complexTest{simpleTest{1}, 1, "do-not-overwrite-with-empty-value"} + b := complexTest{simpleTest{42}, 2, ""} + + expect := complexTest{simpleTest{42}, 1, "do-not-overwrite-with-empty-value"} + if err := MergeWithOverwrite(&a, b); err != nil { + t.FailNow() + } + + if !reflect.DeepEqual(a, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", a, expect) + } +} + +func TestPointerStruct(t *testing.T) { + s1 := simpleTest{} + s2 := simpleTest{19} + a := pointerTest{&s1} + b := pointerTest{&s2} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.C.Value != b.C.Value { + t.Fatalf("b not merged in properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) + } +} + +type embeddingStruct struct { + embeddedStruct +} + +type embeddedStruct struct { + A string +} + +func TestEmbeddedStruct(t *testing.T) { + tests := []struct { + src embeddingStruct + dst embeddingStruct + expected embeddingStruct + }{ + { + src: embeddingStruct{ + embeddedStruct{"foo"}, + }, + dst: embeddingStruct{ + embeddedStruct{""}, + }, + expected: embeddingStruct{ + embeddedStruct{"foo"}, + }, + }, + { + src: embeddingStruct{ + embeddedStruct{""}, + }, + dst: embeddingStruct{ + embeddedStruct{"bar"}, + }, + expected: embeddingStruct{ + embeddedStruct{"bar"}, + }, + }, + { + src: embeddingStruct{ + embeddedStruct{"foo"}, + }, + dst: embeddingStruct{ + embeddedStruct{"bar"}, + }, + expected: embeddingStruct{ + embeddedStruct{"bar"}, + }, + }, + } + + for _, test := range tests { + err := Merge(&test.dst, test.src) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !reflect.DeepEqual(test.dst, test.expected) { + t.Errorf("unexpected output\nexpected:\n%+v\nsaw:\n%+v\n", test.expected, test.dst) + } + } +} + +func TestPointerStructNil(t *testing.T) { + a := pointerTest{nil} + b := pointerTest{&simpleTest{19}} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.C.Value != b.C.Value { + t.Fatalf("b not merged in a properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) + } +} + +func TestSliceStruct(t *testing.T) { + a := sliceTest{} + b := sliceTest{[]int{1, 2, 3}} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if len(b.S) != 3 { + t.FailNow() + } + if len(a.S) != len(b.S) { + t.Fatalf("b not merged in a proper way %d != %d", len(a.S), len(b.S)) + } + + a = sliceTest{[]int{1}} + b = sliceTest{[]int{1, 2, 3}} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if len(a.S) != 1 { + t.FailNow() + } + if len(a.S) == len(b.S) { + t.Fatalf("b merged unexpectedly %d != %d", len(a.S), len(b.S)) + } +} + +func TestMapsWithOverwrite(t *testing.T) { + m := map[string]simpleTest{ + "a": simpleTest{}, // overwritten by 16 + "b": simpleTest{42}, // not overwritten by empty value + "c": simpleTest{13}, // overwritten by 12 + "d": simpleTest{61}, + } + n := map[string]simpleTest{ + "a": simpleTest{16}, + "b": simpleTest{}, + "c": simpleTest{12}, + "e": simpleTest{14}, + } + expect := map[string]simpleTest{ + "a": simpleTest{16}, + "b": simpleTest{}, + "c": simpleTest{12}, + "d": simpleTest{61}, + "e": simpleTest{14}, + } + + if err := MergeWithOverwrite(&m, n); err != nil { + t.Fatalf(err.Error()) + } + + if !reflect.DeepEqual(m, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) + } +} + +func TestMaps(t *testing.T) { + m := map[string]simpleTest{ + "a": simpleTest{}, + "b": simpleTest{42}, + "c": simpleTest{13}, + "d": simpleTest{61}, + } + n := map[string]simpleTest{ + "a": simpleTest{16}, + "b": simpleTest{}, + "c": simpleTest{12}, + "e": simpleTest{14}, + } + expect := map[string]simpleTest{ + "a": simpleTest{0}, + "b": simpleTest{42}, + "c": simpleTest{13}, + "d": simpleTest{61}, + "e": simpleTest{14}, + } + + if err := Merge(&m, n); err != nil { + t.Fatalf(err.Error()) + } + + if !reflect.DeepEqual(m, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) + } + if m["a"].Value != 0 { + t.Fatalf(`n merged in m because I solved non-addressable map values TODO: m["a"].Value(%d) != n["a"].Value(%d)`, m["a"].Value, n["a"].Value) + } + if m["b"].Value != 42 { + t.Fatalf(`n wrongly merged in m: m["b"].Value(%d) != n["b"].Value(%d)`, m["b"].Value, n["b"].Value) + } + if m["c"].Value != 13 { + t.Fatalf(`n overwritten in m: m["c"].Value(%d) != n["c"].Value(%d)`, m["c"].Value, n["c"].Value) + } +} + +func TestYAMLMaps(t *testing.T) { + thing := loadYAML("testdata/thing.yml") + license := loadYAML("testdata/license.yml") + ft := thing["fields"].(map[interface{}]interface{}) + fl := license["fields"].(map[interface{}]interface{}) + expectedLength := len(ft) + len(fl) + if err := Merge(&license, thing); err != nil { + t.Fatal(err.Error()) + } + currentLength := len(license["fields"].(map[interface{}]interface{})) + if currentLength != expectedLength { + t.Fatalf(`thing not merged in license properly, license must have %d elements instead of %d`, expectedLength, currentLength) + } + fields := license["fields"].(map[interface{}]interface{}) + if _, ok := fields["id"]; !ok { + t.Fatalf(`thing not merged in license properly, license must have a new id field from thing`) + } +} + +func TestTwoPointerValues(t *testing.T) { + a := &simpleTest{} + b := &simpleTest{42} + if err := Merge(a, b); err != nil { + t.Fatalf(`Boom. You crossed the streams: %s`, err) + } +} + +func TestMap(t *testing.T) { + a := complexTest{} + a.Id = "athing" + c := moreComplextText{a, simpleTest{}, simpleTest{}} + b := map[string]interface{}{ + "ct": map[string]interface{}{ + "st": map[string]interface{}{ + "value": 42, + }, + "sz": 1, + "id": "bthing", + }, + "st": &simpleTest{144}, // Mapping a reference + "zt": simpleTest{299}, // Mapping a missing field (zt doesn't exist) + "nt": simpleTest{3}, + } + if err := Map(&c, b); err != nil { + t.FailNow() + } + m := b["ct"].(map[string]interface{}) + n := m["st"].(map[string]interface{}) + o := b["st"].(*simpleTest) + p := b["nt"].(simpleTest) + if c.Ct.St.Value != 42 { + t.Fatalf("b not merged in properly: c.Ct.St.Value(%d) != b.Ct.St.Value(%d)", c.Ct.St.Value, n["value"]) + } + if c.St.Value != 144 { + t.Fatalf("b not merged in properly: c.St.Value(%d) != b.St.Value(%d)", c.St.Value, o.Value) + } + if c.Nt.Value != 3 { + t.Fatalf("b not merged in properly: c.Nt.Value(%d) != b.Nt.Value(%d)", c.St.Value, p.Value) + } + if c.Ct.sz == 1 { + t.Fatalf("a's private field sz not preserved from merge: c.Ct.sz(%d) == b.Ct.sz(%d)", c.Ct.sz, m["sz"]) + } + if c.Ct.Id == m["id"] { + t.Fatalf("a's field Id merged unexpectedly: c.Ct.Id(%s) == b.Ct.Id(%s)", c.Ct.Id, m["id"]) + } +} + +func TestSimpleMap(t *testing.T) { + a := simpleTest{} + b := map[string]interface{}{ + "value": 42, + } + if err := Map(&a, b); err != nil { + t.FailNow() + } + if a.Value != 42 { + t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%v)", a.Value, b["value"]) + } +} + +type pointerMapTest struct { + A int + hidden int + B *simpleTest +} + +func TestBackAndForth(t *testing.T) { + pt := pointerMapTest{42, 1, &simpleTest{66}} + m := make(map[string]interface{}) + if err := Map(&m, pt); err != nil { + t.FailNow() + } + var ( + v interface{} + ok bool + ) + if v, ok = m["a"]; v.(int) != pt.A || !ok { + t.Fatalf("pt not merged in properly: m[`a`](%d) != pt.A(%d)", v, pt.A) + } + if v, ok = m["b"]; !ok { + t.Fatalf("pt not merged in properly: B is missing in m") + } + var st *simpleTest + if st = v.(*simpleTest); st.Value != 66 { + t.Fatalf("something went wrong while mapping pt on m, B wasn't copied") + } + bpt := pointerMapTest{} + if err := Map(&bpt, m); err != nil { + t.Fatal(err) + } + if bpt.A != pt.A { + t.Fatalf("pt not merged in properly: bpt.A(%d) != pt.A(%d)", bpt.A, pt.A) + } + if bpt.hidden == pt.hidden { + t.Fatalf("pt unexpectedly merged: bpt.hidden(%d) == pt.hidden(%d)", bpt.hidden, pt.hidden) + } + if bpt.B.Value != pt.B.Value { + t.Fatalf("pt not merged in properly: bpt.B.Value(%d) != pt.B.Value(%d)", bpt.B.Value, pt.B.Value) + } +} + +type structWithTimePointer struct { + Birth *time.Time +} + +func TestTime(t *testing.T) { + now := time.Now() + dataStruct := structWithTimePointer{ + Birth: &now, + } + dataMap := map[string]interface{}{ + "Birth": &now, + } + b := structWithTimePointer{} + if err := Merge(&b, dataStruct); err != nil { + t.FailNow() + } + if b.Birth.IsZero() { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) + } + if b.Birth != dataStruct.Birth { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) + } + b = structWithTimePointer{} + if err := Map(&b, dataMap); err != nil { + t.FailNow() + } + if b.Birth.IsZero() { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataMap['Birth'](%v)", b.Birth, dataMap["Birth"]) + } +} + +type simpleNested struct { + A int +} + +type structWithNestedPtrValueMap struct { + NestedPtrValue map[string]*simpleNested +} + +func TestNestedPtrValueInMap(t *testing.T) { + src := &structWithNestedPtrValueMap{ + NestedPtrValue: map[string]*simpleNested{ + "x": &simpleNested{ + A: 1, + }, + }, + } + dst := &structWithNestedPtrValueMap{ + NestedPtrValue: map[string]*simpleNested{ + "x": &simpleNested{ + }, + }, + } + if err := Map(dst, src); err != nil { + t.FailNow() + } + if dst.NestedPtrValue["x"].A == 0 { + t.Fatalf("Nested Ptr value not merged in properly: dst.NestedPtrValue[\"x\"].A(%v) != src.NestedPtrValue[\"x\"].A(%v)", dst.NestedPtrValue["x"].A, src.NestedPtrValue["x"].A) + } +} + +func loadYAML(path string) (m map[string]interface{}) { + m = make(map[string]interface{}) + raw, _ := ioutil.ReadFile(path) + _ = yaml.Unmarshal(raw, &m) + return +} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 000000000..62fdb61ec --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,3 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string diff --git a/vendor/github.com/imdario/mergo/testdata/thing.yml b/vendor/github.com/imdario/mergo/testdata/thing.yml new file mode 100644 index 000000000..c28eab0d0 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/thing.yml @@ -0,0 +1,5 @@ +fields: + id: int + name: string + parent: ref "datu:thing" + status: enum(draft, public, private) diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 000000000..531fcc11c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 000000000..1f9807757 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000..ad17bf001 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..187ef676d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..9cfa988bc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/api_test.go b/vendor/github.com/jmespath/go-jmespath/api_test.go new file mode 100644 index 000000000..b0b106d3d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api_test.go @@ -0,0 +1,32 @@ +package jmespath + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidPrecompiledExpressionSearches(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + precompiled, err := Compile("foo") + assert.Nil(err) + result, err := precompiled.Search(data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestInvalidPrecompileErrors(t *testing.T) { + assert := assert.New(t) + _, err := Compile("not a valid expression") + assert.NotNil(err) +} + +func TestInvalidMustCompilePanics(t *testing.T) { + defer func() { + r := recover() + assert.NotNil(t, r) + }() + MustCompile("not a valid expression") +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go b/vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go new file mode 100644 index 000000000..1c53cfc86 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go @@ -0,0 +1,96 @@ +/*Basic command line interface for debug and testing purposes. + +Examples: + +Only print the AST for the expression: + + jp.go -ast "foo.bar.baz" + +Evaluate the JMESPath expression against JSON data from a file: + + jp.go -input /tmp/data.json "foo.bar.baz" + +This program can also be used as an executable to the jp-compliance +runner (github.com/jmespath/jmespath.test). + +*/ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" +) + +import ( + "encoding/json" + + "github.com/jmespath/go-jmespath" +) + +func errMsg(msg string, a ...interface{}) int { + fmt.Fprintf(os.Stderr, msg, a...) + fmt.Fprintln(os.Stderr) + return 1 +} + +func run() int { + + astOnly := flag.Bool("ast", false, "Print the AST for the input expression and exit.") + inputFile := flag.String("input", "", "Filename containing JSON data to search. If not provided, data is read from stdin.") + + flag.Parse() + args := flag.Args() + if len(args) != 1 { + fmt.Fprintf(os.Stderr, "Usage:\n\n") + flag.PrintDefaults() + return errMsg("\nError: expected a single argument (the JMESPath expression).") + } + + expression := args[0] + parser := jmespath.NewParser() + parsed, err := parser.Parse(expression) + if err != nil { + if syntaxError, ok := err.(jmespath.SyntaxError); ok { + return errMsg("%s\n%s\n", syntaxError, syntaxError.HighlightLocation()) + } + return errMsg("%s", err) + } + if *astOnly { + fmt.Println("") + fmt.Printf("%s\n", parsed) + return 0 + } + + var inputData []byte + if *inputFile != "" { + inputData, err = ioutil.ReadFile(*inputFile) + if err != nil { + return errMsg("Error loading file %s: %s", *inputFile, err) + } + } else { + // If an input data file is not provided then we read the + // data from stdin. + inputData, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return errMsg("Error reading from stdin: %s", err) + } + } + var data interface{} + json.Unmarshal(inputData, &data) + result, err := jmespath.Search(expression, data) + if err != nil { + return errMsg("Error executing expression: %s", err) + } + toJSON, err := json.MarshalIndent(result, "", " ") + if err != nil { + return errMsg("Error serializing result to JSON: %s", err) + } + fmt.Println(string(toJSON)) + return 0 +} + +func main() { + os.Exit(run()) +} diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/basic.json b/vendor/github.com/jmespath/go-jmespath/compliance/basic.json new file mode 100644 index 000000000..d550e9695 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/basic.json @@ -0,0 +1,96 @@ +[{ + "given": + {"foo": {"bar": {"baz": "correct"}}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": {"baz": "correct"}} + }, + { + "expression": "foo.bar", + "result": {"baz": "correct"} + }, + { + "expression": "foo.bar.baz", + "result": "correct" + }, + { + "expression": "foo\n.\nbar\n.baz", + "result": "correct" + }, + { + "expression": "foo.bar.baz.bad", + "result": null + }, + { + "expression": "foo.bar.bad", + "result": null + }, + { + "expression": "foo.bad", + "result": null + }, + { + "expression": "bad", + "result": null + }, + { + "expression": "bad.morebad.morebad", + "result": null + } + ] +}, +{ + "given": + {"foo": {"bar": ["one", "two", "three"]}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": ["one", "two", "three"]} + }, + { + "expression": "foo.bar", + "result": ["one", "two", "three"] + } + ] +}, +{ + "given": ["one", "two", "three"], + "cases": [ + { + "expression": "one", + "result": null + }, + { + "expression": "two", + "result": null + }, + { + "expression": "three", + "result": null + }, + { + "expression": "one.two", + "result": null + } + ] +}, +{ + "given": + {"foo": {"1": ["one", "two", "three"], "-1": "bar"}}, + "cases": [ + { + "expression": "foo.\"1\"", + "result": ["one", "two", "three"] + }, + { + "expression": "foo.\"1\"[0]", + "result": "one" + }, + { + "expression": "foo.\"-1\"", + "result": "bar" + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json b/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json new file mode 100644 index 000000000..e3fa196b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json @@ -0,0 +1,257 @@ +[ + { + "given": { + "outer": { + "foo": "foo", + "bar": "bar", + "baz": "baz" + } + }, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] + }, + { + "given": { + "outer": { + "foo": "foo", + "bool": false, + "empty_list": [], + "empty_string": "" + } + }, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] + }, + { + "given": { + "True": true, + "False": false, + "Number": 5, + "EmptyList": [], + "Zero": 0 + }, + "cases": [ + { + "expression": "True && False", + "result": false + }, + { + "expression": "False && True", + "result": false + }, + { + "expression": "True && True", + "result": true + }, + { + "expression": "False && False", + "result": false + }, + { + "expression": "True && Number", + "result": 5 + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "Number && False", + "result": false + }, + { + "expression": "Number && EmptyList", + "result": [] + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "EmptyList && True", + "result": [] + }, + { + "expression": "EmptyList && False", + "result": [] + }, + { + "expression": "True || False", + "result": true + }, + { + "expression": "True || True", + "result": true + }, + { + "expression": "False || True", + "result": true + }, + { + "expression": "False || False", + "result": false + }, + { + "expression": "Number || EmptyList", + "result": 5 + }, + { + "expression": "Number || True", + "result": 5 + }, + { + "expression": "Number || True && False", + "result": 5 + }, + { + "expression": "(Number || True) && False", + "result": false + }, + { + "expression": "Number || (True && False)", + "result": 5 + }, + { + "expression": "!True", + "result": false + }, + { + "expression": "!False", + "result": true + }, + { + "expression": "!Number", + "result": false + }, + { + "expression": "!EmptyList", + "result": true + }, + { + "expression": "True && !False", + "result": true + }, + { + "expression": "True && !EmptyList", + "result": true + }, + { + "expression": "!False && !EmptyList", + "result": true + }, + { + "expression": "!(True && False)", + "result": true + }, + { + "expression": "!Zero", + "result": false + }, + { + "expression": "!!Zero", + "result": true + } + ] + }, + { + "given": { + "one": 1, + "two": 2, + "three": 3 + }, + "cases": [ + { + "expression": "one < two", + "result": true + }, + { + "expression": "one <= two", + "result": true + }, + { + "expression": "one == one", + "result": true + }, + { + "expression": "one == two", + "result": false + }, + { + "expression": "one > two", + "result": false + }, + { + "expression": "one >= two", + "result": false + }, + { + "expression": "one != two", + "result": true + }, + { + "expression": "one < two && three > one", + "result": true + }, + { + "expression": "one < two || three > one", + "result": true + }, + { + "expression": "one < two || three < one", + "result": true + }, + { + "expression": "two < one || three < one", + "result": false + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/current.json b/vendor/github.com/jmespath/go-jmespath/compliance/current.json new file mode 100644 index 000000000..0c26248d0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/current.json @@ -0,0 +1,25 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "@", + "result": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + } + }, + { + "expression": "@.bar", + "result": {"baz": "qux"} + }, + { + "expression": "@.foo[0]", + "result": {"name": "a"} + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/escape.json b/vendor/github.com/jmespath/go-jmespath/compliance/escape.json new file mode 100644 index 000000000..4a62d951a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/escape.json @@ -0,0 +1,46 @@ +[{ + "given": { + "foo.bar": "dot", + "foo bar": "space", + "foo\nbar": "newline", + "foo\"bar": "doublequote", + "c:\\\\windows\\path": "windows", + "/unix/path": "unix", + "\"\"\"": "threequotes", + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "\"foo.bar\"", + "result": "dot" + }, + { + "expression": "\"foo bar\"", + "result": "space" + }, + { + "expression": "\"foo\\nbar\"", + "result": "newline" + }, + { + "expression": "\"foo\\\"bar\"", + "result": "doublequote" + }, + { + "expression": "\"c:\\\\\\\\windows\\\\path\"", + "result": "windows" + }, + { + "expression": "\"/unix/path\"", + "result": "unix" + }, + { + "expression": "\"\\\"\\\"\\\"\"", + "result": "threequotes" + }, + { + "expression": "\"bar\".\"baz\"", + "result": "qux" + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/filters.json b/vendor/github.com/jmespath/go-jmespath/compliance/filters.json new file mode 100644 index 000000000..5b9f52b11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/filters.json @@ -0,0 +1,468 @@ +[ + { + "given": {"foo": [{"name": "a"}, {"name": "b"}]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "foo[?name == 'a']", + "result": [{"name": "a"}] + } + ] + }, + { + "given": {"foo": [0, 1], "bar": [2, 3]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "*[?[0] == `0`]", + "result": [[], []] + } + ] + }, + { + "given": {"foo": [{"first": "foo", "last": "bar"}, + {"first": "foo", "last": "foo"}, + {"first": "foo", "last": "baz"}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?first == last]", + "result": [{"first": "foo", "last": "foo"}] + }, + { + "comment": "Verify projection created from filter", + "expression": "foo[?first == last].first", + "result": ["foo"] + } + ] + }, + { + "given": {"foo": [{"age": 20}, + {"age": 25}, + {"age": 30}]}, + "cases": [ + { + "comment": "Greater than with a number", + "expression": "foo[?age > `25`]", + "result": [{"age": 30}] + }, + { + "expression": "foo[?age >= `25`]", + "result": [{"age": 25}, {"age": 30}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age > `30`]", + "result": [] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `25`]", + "result": [{"age": 20}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age <= `25`]", + "result": [{"age": 20}, {"age": 25}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `20`]", + "result": [] + }, + { + "expression": "foo[?age == `20`]", + "result": [{"age": 20}] + }, + { + "expression": "foo[?age != `20`]", + "result": [{"age": 25}, {"age": 30}] + } + ] + }, + { + "given": {"foo": [{"top": {"name": "a"}}, + {"top": {"name": "b"}}]}, + "cases": [ + { + "comment": "Filter with subexpression", + "expression": "foo[?top.name == 'a']", + "result": [{"top": {"name": "a"}}] + } + ] + }, + { + "given": {"foo": [{"top": {"first": "foo", "last": "bar"}}, + {"top": {"first": "foo", "last": "foo"}}, + {"top": {"first": "foo", "last": "baz"}}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?top.first == top.last]", + "result": [{"top": {"first": "foo", "last": "foo"}}] + }, + { + "comment": "Matching a JSON array", + "expression": "foo[?top == `{\"first\": \"foo\", \"last\": \"bar\"}`]", + "result": [{"top": {"first": "foo", "last": "bar"}}] + } + ] + }, + { + "given": {"foo": [ + {"key": true}, + {"key": false}, + {"key": 0}, + {"key": 1}, + {"key": [0]}, + {"key": {"bar": [0]}}, + {"key": null}, + {"key": [1]}, + {"key": {"a":2}} + ]}, + "cases": [ + { + "expression": "foo[?key == `true`]", + "result": [{"key": true}] + }, + { + "expression": "foo[?key == `false`]", + "result": [{"key": false}] + }, + { + "expression": "foo[?key == `0`]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?key == `1`]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?key == `[0]`]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?key == `{\"bar\": [0]}`]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?key == `null`]", + "result": [{"key": null}] + }, + { + "expression": "foo[?key == `[1]`]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?key == `{\"a\":2}`]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?`true` == key]", + "result": [{"key": true}] + }, + { + "expression": "foo[?`false` == key]", + "result": [{"key": false}] + }, + { + "expression": "foo[?`0` == key]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?`1` == key]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?`[0]` == key]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?`{\"bar\": [0]}` == key]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?`null` == key]", + "result": [{"key": null}] + }, + { + "expression": "foo[?`[1]` == key]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?`{\"a\":2}` == key]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?key != `true`]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `false`]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `0`]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `1`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `null`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `[1]`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `{\"a\":2}`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + }, + { + "expression": "foo[?`true` != key]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`false` != key]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`0` != key]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`1` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`null` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`[1]` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`{\"a\":2}` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + } + ] + }, + { + "given": {"reservations": [ + {"instances": [ + {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}, + {"foo": 1, "bar": 2}, {"foo": 2, "bar": 1}]}]}, + "cases": [ + { + "expression": "reservations[].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[*].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[].instances[?bar==`1`][]", + "result": [{"foo": 2, "bar": 1}] + } + ] + }, + { + "given": { + "baz": "other", + "foo": [ + {"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?bar==`1`].bar[0]", + "result": [] + } + ] + }, + { + "given": { + "foo": [ + {"a": 1, "b": {"c": "x"}}, + {"a": 1, "b": {"c": "y"}}, + {"a": 1, "b": {"c": "z"}}, + {"a": 2, "b": {"c": "z"}}, + {"a": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?a==`1`].b.c", + "result": ["x", "y", "z"] + } + ] + }, + { + "given": {"foo": [{"name": "a"}, {"name": "b"}, {"name": "c"}]}, + "cases": [ + { + "comment": "Filter with or expression", + "expression": "foo[?name == 'a' || name == 'b']", + "result": [{"name": "a"}, {"name": "b"}] + }, + { + "expression": "foo[?name == 'a' || name == 'e']", + "result": [{"name": "a"}] + }, + { + "expression": "foo[?name == 'a' || name == 'b' || name == 'c']", + "result": [{"name": "a"}, {"name": "b"}, {"name": "c"}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2}, {"a": 1, "b": 3}]}, + "cases": [ + { + "comment": "Filter with and expression", + "expression": "foo[?a == `1` && b == `2`]", + "result": [{"a": 1, "b": 2}] + }, + { + "expression": "foo[?a == `1` && b == `4`]", + "result": [] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Filter with Or and And expressions", + "expression": "foo[?c == `3` || a == `1` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "expression": "foo[?b == `2` || a == `3` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && b == `4` || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?(a == `3` && b == `4`) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?((a == `3` && b == `4`)) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && (b == `4` || b == `2`)]", + "result": [{"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && ((b == `4` || b == `2`))]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Verify precedence of or/and expressions", + "expression": "foo[?a == `1` || b ==`2` && c == `5`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "comment": "Parentheses can alter precedence", + "expression": "foo[?(a == `1` || b ==`2`) && c == `5`]", + "result": [] + }, + { + "comment": "Not expressions combined with and/or", + "expression": "foo[?!(a == `1` || b ==`2`)]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": { + "foo": [ + {"key": true}, + {"key": false}, + {"key": []}, + {"key": {}}, + {"key": [0]}, + {"key": {"a": "b"}}, + {"key": 0}, + {"key": 1}, + {"key": null}, + {"notkey": true} + ] + }, + "cases": [ + { + "comment": "Unary filter expression", + "expression": "foo[?key]", + "result": [ + {"key": true}, {"key": [0]}, {"key": {"a": "b"}}, + {"key": 0}, {"key": 1} + ] + }, + { + "comment": "Unary not filter expression", + "expression": "foo[?!key]", + "result": [ + {"key": false}, {"key": []}, {"key": {}}, + {"key": null}, {"notkey": true} + ] + }, + { + "comment": "Equality with null RHS", + "expression": "foo[?key == `null`]", + "result": [ + {"key": null}, {"notkey": true} + ] + } + ] + }, + { + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + "cases": [ + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ < `5`]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?`5` > @]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ == @]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/functions.json b/vendor/github.com/jmespath/go-jmespath/compliance/functions.json new file mode 100644 index 000000000..8b8db363a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/functions.json @@ -0,0 +1,825 @@ +[{ + "given": + { + "foo": -1, + "zero": 0, + "numbers": [-1, 3, 4, 5], + "array": [-1, 3, 4, 5, "a", "100"], + "strings": ["a", "b", "c"], + "decimals": [1.01, 1.2, -1.5], + "str": "Str", + "false": false, + "empty_list": [], + "empty_hash": {}, + "objects": {"foo": "bar", "bar": "baz"}, + "null_key": null + }, + "cases": [ + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(str)", + "error": "invalid-type" + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(`false`)", + "error": "invalid-type" + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`1`, `2`)", + "error": "invalid-arity" + }, + { + "expression": "abs()", + "error": "invalid-arity" + }, + { + "expression": "unknown_function(`1`, `2`)", + "error": "unknown-function" + }, + { + "expression": "avg(numbers)", + "result": 2.75 + }, + { + "expression": "avg(array)", + "error": "invalid-type" + }, + { + "expression": "avg('abc')", + "error": "invalid-type" + }, + { + "expression": "avg(foo)", + "error": "invalid-type" + }, + { + "expression": "avg(@)", + "error": "invalid-type" + }, + { + "expression": "avg(strings)", + "error": "invalid-type" + }, + { + "expression": "ceil(`1.2`)", + "result": 2 + }, + { + "expression": "ceil(decimals[0])", + "result": 2 + }, + { + "expression": "ceil(decimals[1])", + "result": 2 + }, + { + "expression": "ceil(decimals[2])", + "result": -1 + }, + { + "expression": "ceil('string')", + "error": "invalid-type" + }, + { + "expression": "contains('abc', 'a')", + "result": true + }, + { + "expression": "contains('abc', 'd')", + "result": false + }, + { + "expression": "contains(`false`, 'd')", + "error": "invalid-type" + }, + { + "expression": "contains(strings, 'a')", + "result": true + }, + { + "expression": "contains(decimals, `1.2`)", + "result": true + }, + { + "expression": "contains(decimals, `false`)", + "result": false + }, + { + "expression": "ends_with(str, 'r')", + "result": true + }, + { + "expression": "ends_with(str, 'tr')", + "result": true + }, + { + "expression": "ends_with(str, 'Str')", + "result": true + }, + { + "expression": "ends_with(str, 'SStr')", + "result": false + }, + { + "expression": "ends_with(str, 'foo')", + "result": false + }, + { + "expression": "ends_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "floor(`1.2`)", + "result": 1 + }, + { + "expression": "floor('string')", + "error": "invalid-type" + }, + { + "expression": "floor(decimals[0])", + "result": 1 + }, + { + "expression": "floor(foo)", + "result": -1 + }, + { + "expression": "floor(str)", + "error": "invalid-type" + }, + { + "expression": "length('abc')", + "result": 3 + }, + { + "expression": "length('✓foo')", + "result": 4 + }, + { + "expression": "length('')", + "result": 0 + }, + { + "expression": "length(@)", + "result": 12 + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "length(str)", + "result": 3 + }, + { + "expression": "length(array)", + "result": 6 + }, + { + "expression": "length(objects)", + "result": 2 + }, + { + "expression": "length(`false`)", + "error": "invalid-type" + }, + { + "expression": "length(foo)", + "error": "invalid-type" + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "max(numbers)", + "result": 5 + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(strings)", + "result": "c" + }, + { + "expression": "max(abc)", + "error": "invalid-type" + }, + { + "expression": "max(array)", + "error": "invalid-type" + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(empty_list)", + "result": null + }, + { + "expression": "merge(`{}`)", + "result": {} + }, + { + "expression": "merge(`{}`, `{}`)", + "result": {} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"b\": 2}`)", + "result": {"a": 1, "b": 2} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"a\": 2}`)", + "result": {"a": 2} + }, + { + "expression": "merge(`{\"a\": 1, \"b\": 2}`, `{\"a\": 2, \"c\": 3}`, `{\"d\": 4}`)", + "result": {"a": 2, "b": 2, "c": 3, "d": 4} + }, + { + "expression": "min(numbers)", + "result": -1 + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(abc)", + "error": "invalid-type" + }, + { + "expression": "min(array)", + "error": "invalid-type" + }, + { + "expression": "min(empty_list)", + "result": null + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(strings)", + "result": "a" + }, + { + "expression": "type('abc')", + "result": "string" + }, + { + "expression": "type(`1.0`)", + "result": "number" + }, + { + "expression": "type(`2`)", + "result": "number" + }, + { + "expression": "type(`true`)", + "result": "boolean" + }, + { + "expression": "type(`false`)", + "result": "boolean" + }, + { + "expression": "type(`null`)", + "result": "null" + }, + { + "expression": "type(`[0]`)", + "result": "array" + }, + { + "expression": "type(`{\"a\": \"b\"}`)", + "result": "object" + }, + { + "expression": "type(@)", + "result": "object" + }, + { + "expression": "sort(keys(objects))", + "result": ["bar", "foo"] + }, + { + "expression": "keys(foo)", + "error": "invalid-type" + }, + { + "expression": "keys(strings)", + "error": "invalid-type" + }, + { + "expression": "keys(`false`)", + "error": "invalid-type" + }, + { + "expression": "sort(values(objects))", + "result": ["bar", "baz"] + }, + { + "expression": "keys(empty_hash)", + "result": [] + }, + { + "expression": "values(foo)", + "error": "invalid-type" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(',', `[\"a\", \"b\"]`)", + "result": "a,b" + }, + { + "expression": "join(',', `[\"a\", 0]`)", + "error": "invalid-type" + }, + { + "expression": "join(', ', str)", + "error": "invalid-type" + }, + { + "expression": "join('|', strings)", + "result": "a|b|c" + }, + { + "expression": "join(`2`, strings)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals[].to_string(@))", + "result": "1.01|1.2|-1.5" + }, + { + "expression": "join('|', empty_list)", + "result": "" + }, + { + "expression": "reverse(numbers)", + "result": [5, 4, 3, -1] + }, + { + "expression": "reverse(array)", + "result": ["100", "a", 5, 4, 3, -1] + }, + { + "expression": "reverse(`[]`)", + "result": [] + }, + { + "expression": "reverse('')", + "result": "" + }, + { + "expression": "reverse('hello world')", + "result": "dlrow olleh" + }, + { + "expression": "starts_with(str, 'S')", + "result": true + }, + { + "expression": "starts_with(str, 'St')", + "result": true + }, + { + "expression": "starts_with(str, 'Str')", + "result": true + }, + { + "expression": "starts_with(str, 'String')", + "result": false + }, + { + "expression": "starts_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "sum(numbers)", + "result": 11 + }, + { + "expression": "sum(decimals)", + "result": 0.71 + }, + { + "expression": "sum(array)", + "error": "invalid-type" + }, + { + "expression": "sum(array[].to_number(@))", + "result": 111 + }, + { + "expression": "sum(`[]`)", + "result": 0 + }, + { + "expression": "to_array('foo')", + "result": ["foo"] + }, + { + "expression": "to_array(`0`)", + "result": [0] + }, + { + "expression": "to_array(objects)", + "result": [{"foo": "bar", "bar": "baz"}] + }, + { + "expression": "to_array(`[1, 2, 3]`)", + "result": [1, 2, 3] + }, + { + "expression": "to_array(false)", + "result": [false] + }, + { + "expression": "to_string('foo')", + "result": "foo" + }, + { + "expression": "to_string(`1.2`)", + "result": "1.2" + }, + { + "expression": "to_string(`[0, 1]`)", + "result": "[0,1]" + }, + { + "expression": "to_number('1.0')", + "result": 1.0 + }, + { + "expression": "to_number('1.1')", + "result": 1.1 + }, + { + "expression": "to_number('4')", + "result": 4 + }, + { + "expression": "to_number('notanumber')", + "result": null + }, + { + "expression": "to_number(`false`)", + "result": null + }, + { + "expression": "to_number(`null`)", + "result": null + }, + { + "expression": "to_number(`[0]`)", + "result": null + }, + { + "expression": "to_number(`{\"foo\": 0}`)", + "result": null + }, + { + "expression": "\"to_string\"(`1.0`)", + "error": "syntax" + }, + { + "expression": "sort(numbers)", + "result": [-1, 3, 4, 5] + }, + { + "expression": "sort(strings)", + "result": ["a", "b", "c"] + }, + { + "expression": "sort(decimals)", + "result": [-1.5, 1.01, 1.2] + }, + { + "expression": "sort(array)", + "error": "invalid-type" + }, + { + "expression": "sort(abc)", + "error": "invalid-type" + }, + { + "expression": "sort(empty_list)", + "result": [] + }, + { + "expression": "sort(@)", + "error": "invalid-type" + }, + { + "expression": "not_null(unknown_key, str)", + "result": "Str" + }, + { + "expression": "not_null(unknown_key, foo.bar, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(unknown_key, null_key, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(all, expressions, are_null)", + "result": null + }, + { + "expression": "not_null()", + "error": "invalid-arity" + }, + { + "description": "function projection on single arg function", + "expression": "numbers[].to_string(@)", + "result": ["-1", "3", "4", "5"] + }, + { + "description": "function projection on single arg function", + "expression": "array[].to_number(@)", + "result": [-1, 3, 4, 5, 100] + } + ] +}, { + "given": + { + "foo": [ + {"b": "b", "a": "a"}, + {"c": "c", "b": "b"}, + {"d": "d", "c": "c"}, + {"e": "e", "d": "d"}, + {"f": "f", "e": "e"} + ] + }, + "cases": [ + { + "description": "function projection on variadic function", + "expression": "foo[].not_null(f, e, d, c, b, a)", + "result": ["b", "c", "d", "e", "f"] + } + ] +}, { + "given": + { + "people": [ + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"}, + {"age": 10, "age_str": "10", "bool": true, "name": 3} + ] + }, + "cases": [ + { + "description": "sort by field expression", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "expression": "sort_by(people, &age_str)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "sort by function expression", + "expression": "sort_by(people, &to_number(age_str))", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "function projection on sort_by function", + "expression": "sort_by(people, &age)[].name", + "result": [3, "a", "c", "b", "d"] + }, + { + "expression": "sort_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &age)[].extra", + "result": ["foo", "bar"] + }, + { + "expression": "sort_by(`[]`, &age)", + "result": [] + }, + { + "expression": "max_by(people, &age)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &age_str)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &to_number(age_str))", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "min_by(people, &age)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &age_str)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &to_number(age_str))", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + } + ] +}, { + "given": + { + "people": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + }, + "cases": [ + { + "description": "stable sort order", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + } + ] +}, { + "given": + { + "people": [ + {"a": 10, "b": 1, "c": "z"}, + {"a": 10, "b": 2, "c": null}, + {"a": 10, "b": 3}, + {"a": 10, "b": 4, "c": "z"}, + {"a": 10, "b": 5, "c": null}, + {"a": 10, "b": 6}, + {"a": 10, "b": 7, "c": "z"}, + {"a": 10, "b": 8, "c": null}, + {"a": 10, "b": 9} + ], + "empty": [] + }, + "cases": [ + { + "expression": "map(&a, people)", + "result": [10, 10, 10, 10, 10, 10, 10, 10, 10] + }, + { + "expression": "map(&c, people)", + "result": ["z", null, null, "z", null, null, "z", null, null] + }, + { + "expression": "map(&a, badkey)", + "error": "invalid-type" + }, + { + "expression": "map(&foo, empty)", + "result": [] + } + ] +}, { + "given": { + "array": [ + { + "foo": {"bar": "yes1"} + }, + { + "foo": {"bar": "yes2"} + }, + { + "foo1": {"bar": "no"} + } + ]}, + "cases": [ + { + "expression": "map(&foo.bar, array)", + "result": ["yes1", "yes2", null] + }, + { + "expression": "map(&foo1.bar, array)", + "result": [null, null, "no"] + }, + { + "expression": "map(&foo.bar.baz, array)", + "result": [null, null, null] + } + ] +}, { + "given": { + "array": [[1, 2, 3, [4]], [5, 6, 7, [8, 9]]] + }, + "cases": [ + { + "expression": "map(&[], array)", + "result": [[1, 2, 3, 4], [5, 6, 7, 8, 9]] + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json b/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json new file mode 100644 index 000000000..7998a41ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json @@ -0,0 +1,1377 @@ +[ + { + "given": { + "__L": true + }, + "cases": [ + { + "expression": "__L", + "result": true + } + ] + }, + { + "given": { + "!\r": true + }, + "cases": [ + { + "expression": "\"!\\r\"", + "result": true + } + ] + }, + { + "given": { + "Y_1623": true + }, + "cases": [ + { + "expression": "Y_1623", + "result": true + } + ] + }, + { + "given": { + "x": true + }, + "cases": [ + { + "expression": "x", + "result": true + } + ] + }, + { + "given": { + "\tF\uCebb": true + }, + "cases": [ + { + "expression": "\"\\tF\\uCebb\"", + "result": true + } + ] + }, + { + "given": { + " \t": true + }, + "cases": [ + { + "expression": "\" \\t\"", + "result": true + } + ] + }, + { + "given": { + " ": true + }, + "cases": [ + { + "expression": "\" \"", + "result": true + } + ] + }, + { + "given": { + "v2": true + }, + "cases": [ + { + "expression": "v2", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "_X": true + }, + "cases": [ + { + "expression": "_X", + "result": true + } + ] + }, + { + "given": { + "\t4\ud9da\udd15": true + }, + "cases": [ + { + "expression": "\"\\t4\\ud9da\\udd15\"", + "result": true + } + ] + }, + { + "given": { + "v24_W": true + }, + "cases": [ + { + "expression": "v24_W", + "result": true + } + ] + }, + { + "given": { + "H": true + }, + "cases": [ + { + "expression": "\"H\"", + "result": true + } + ] + }, + { + "given": { + "\f": true + }, + "cases": [ + { + "expression": "\"\\f\"", + "result": true + } + ] + }, + { + "given": { + "E4": true + }, + "cases": [ + { + "expression": "\"E4\"", + "result": true + } + ] + }, + { + "given": { + "!": true + }, + "cases": [ + { + "expression": "\"!\"", + "result": true + } + ] + }, + { + "given": { + "tM": true + }, + "cases": [ + { + "expression": "tM", + "result": true + } + ] + }, + { + "given": { + " [": true + }, + "cases": [ + { + "expression": "\" [\"", + "result": true + } + ] + }, + { + "given": { + "R!": true + }, + "cases": [ + { + "expression": "\"R!\"", + "result": true + } + ] + }, + { + "given": { + "_6W": true + }, + "cases": [ + { + "expression": "_6W", + "result": true + } + ] + }, + { + "given": { + "\uaBA1\r": true + }, + "cases": [ + { + "expression": "\"\\uaBA1\\r\"", + "result": true + } + ] + }, + { + "given": { + "tL7": true + }, + "cases": [ + { + "expression": "tL7", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\">\"", + "result": true + } + ] + }, + { + "given": { + "hvu": true + }, + "cases": [ + { + "expression": "hvu", + "result": true + } + ] + }, + { + "given": { + "; !": true + }, + "cases": [ + { + "expression": "\"; !\"", + "result": true + } + ] + }, + { + "given": { + "hU": true + }, + "cases": [ + { + "expression": "hU", + "result": true + } + ] + }, + { + "given": { + "!I\n\/": true + }, + "cases": [ + { + "expression": "\"!I\\n\\/\"", + "result": true + } + ] + }, + { + "given": { + "\uEEbF": true + }, + "cases": [ + { + "expression": "\"\\uEEbF\"", + "result": true + } + ] + }, + { + "given": { + "U)\t": true + }, + "cases": [ + { + "expression": "\"U)\\t\"", + "result": true + } + ] + }, + { + "given": { + "fa0_9": true + }, + "cases": [ + { + "expression": "fa0_9", + "result": true + } + ] + }, + { + "given": { + "/": true + }, + "cases": [ + { + "expression": "\"/\"", + "result": true + } + ] + }, + { + "given": { + "Gy": true + }, + "cases": [ + { + "expression": "Gy", + "result": true + } + ] + }, + { + "given": { + "\b": true + }, + "cases": [ + { + "expression": "\"\\b\"", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\"<\"", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "\t&\\\r": true + }, + "cases": [ + { + "expression": "\"\\t&\\\\\\r\"", + "result": true + } + ] + }, + { + "given": { + "#": true + }, + "cases": [ + { + "expression": "\"#\"", + "result": true + } + ] + }, + { + "given": { + "B__": true + }, + "cases": [ + { + "expression": "B__", + "result": true + } + ] + }, + { + "given": { + "\nS \n": true + }, + "cases": [ + { + "expression": "\"\\nS \\n\"", + "result": true + } + ] + }, + { + "given": { + "Bp": true + }, + "cases": [ + { + "expression": "Bp", + "result": true + } + ] + }, + { + "given": { + ",\t;": true + }, + "cases": [ + { + "expression": "\",\\t;\"", + "result": true + } + ] + }, + { + "given": { + "B_q": true + }, + "cases": [ + { + "expression": "B_q", + "result": true + } + ] + }, + { + "given": { + "\/+\t\n\b!Z": true + }, + "cases": [ + { + "expression": "\"\\/+\\t\\n\\b!Z\"", + "result": true + } + ] + }, + { + "given": { + "\udadd\udfc7\\ueFAc": true + }, + "cases": [ + { + "expression": "\"\udadd\udfc7\\\\ueFAc\"", + "result": true + } + ] + }, + { + "given": { + ":\f": true + }, + "cases": [ + { + "expression": "\":\\f\"", + "result": true + } + ] + }, + { + "given": { + "\/": true + }, + "cases": [ + { + "expression": "\"\\/\"", + "result": true + } + ] + }, + { + "given": { + "_BW_6Hg_Gl": true + }, + "cases": [ + { + "expression": "_BW_6Hg_Gl", + "result": true + } + ] + }, + { + "given": { + "\udbcf\udc02": true + }, + "cases": [ + { + "expression": "\"\udbcf\udc02\"", + "result": true + } + ] + }, + { + "given": { + "zs1DC": true + }, + "cases": [ + { + "expression": "zs1DC", + "result": true + } + ] + }, + { + "given": { + "__434": true + }, + "cases": [ + { + "expression": "__434", + "result": true + } + ] + }, + { + "given": { + "\udb94\udd41": true + }, + "cases": [ + { + "expression": "\"\udb94\udd41\"", + "result": true + } + ] + }, + { + "given": { + "Z_5": true + }, + "cases": [ + { + "expression": "Z_5", + "result": true + } + ] + }, + { + "given": { + "z_M_": true + }, + "cases": [ + { + "expression": "z_M_", + "result": true + } + ] + }, + { + "given": { + "YU_2": true + }, + "cases": [ + { + "expression": "YU_2", + "result": true + } + ] + }, + { + "given": { + "_0": true + }, + "cases": [ + { + "expression": "_0", + "result": true + } + ] + }, + { + "given": { + "\b+": true + }, + "cases": [ + { + "expression": "\"\\b+\"", + "result": true + } + ] + }, + { + "given": { + "\"": true + }, + "cases": [ + { + "expression": "\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "D7": true + }, + "cases": [ + { + "expression": "D7", + "result": true + } + ] + }, + { + "given": { + "_62L": true + }, + "cases": [ + { + "expression": "_62L", + "result": true + } + ] + }, + { + "given": { + "\tK\t": true + }, + "cases": [ + { + "expression": "\"\\tK\\t\"", + "result": true + } + ] + }, + { + "given": { + "\n\\\f": true + }, + "cases": [ + { + "expression": "\"\\n\\\\\\f\"", + "result": true + } + ] + }, + { + "given": { + "I_": true + }, + "cases": [ + { + "expression": "I_", + "result": true + } + ] + }, + { + "given": { + "W_a0_": true + }, + "cases": [ + { + "expression": "W_a0_", + "result": true + } + ] + }, + { + "given": { + "BQ": true + }, + "cases": [ + { + "expression": "BQ", + "result": true + } + ] + }, + { + "given": { + "\tX$\uABBb": true + }, + "cases": [ + { + "expression": "\"\\tX$\\uABBb\"", + "result": true + } + ] + }, + { + "given": { + "Z9": true + }, + "cases": [ + { + "expression": "Z9", + "result": true + } + ] + }, + { + "given": { + "\b%\"\uda38\udd0f": true + }, + "cases": [ + { + "expression": "\"\\b%\\\"\uda38\udd0f\"", + "result": true + } + ] + }, + { + "given": { + "_F": true + }, + "cases": [ + { + "expression": "_F", + "result": true + } + ] + }, + { + "given": { + "!,": true + }, + "cases": [ + { + "expression": "\"!,\"", + "result": true + } + ] + }, + { + "given": { + "\"!": true + }, + "cases": [ + { + "expression": "\"\\\"!\"", + "result": true + } + ] + }, + { + "given": { + "Hh": true + }, + "cases": [ + { + "expression": "Hh", + "result": true + } + ] + }, + { + "given": { + "&": true + }, + "cases": [ + { + "expression": "\"&\"", + "result": true + } + ] + }, + { + "given": { + "9\r\\R": true + }, + "cases": [ + { + "expression": "\"9\\r\\\\R\"", + "result": true + } + ] + }, + { + "given": { + "M_k": true + }, + "cases": [ + { + "expression": "M_k", + "result": true + } + ] + }, + { + "given": { + "!\b\n\udb06\ude52\"\"": true + }, + "cases": [ + { + "expression": "\"!\\b\\n\udb06\ude52\\\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "6": true + }, + "cases": [ + { + "expression": "\"6\"", + "result": true + } + ] + }, + { + "given": { + "_7": true + }, + "cases": [ + { + "expression": "_7", + "result": true + } + ] + }, + { + "given": { + "0": true + }, + "cases": [ + { + "expression": "\"0\"", + "result": true + } + ] + }, + { + "given": { + "\\8\\": true + }, + "cases": [ + { + "expression": "\"\\\\8\\\\\"", + "result": true + } + ] + }, + { + "given": { + "b7eo": true + }, + "cases": [ + { + "expression": "b7eo", + "result": true + } + ] + }, + { + "given": { + "xIUo9": true + }, + "cases": [ + { + "expression": "xIUo9", + "result": true + } + ] + }, + { + "given": { + "5": true + }, + "cases": [ + { + "expression": "\"5\"", + "result": true + } + ] + }, + { + "given": { + "?": true + }, + "cases": [ + { + "expression": "\"?\"", + "result": true + } + ] + }, + { + "given": { + "sU": true + }, + "cases": [ + { + "expression": "sU", + "result": true + } + ] + }, + { + "given": { + "VH2&H\\\/": true + }, + "cases": [ + { + "expression": "\"VH2&H\\\\\\/\"", + "result": true + } + ] + }, + { + "given": { + "_C": true + }, + "cases": [ + { + "expression": "_C", + "result": true + } + ] + }, + { + "given": { + "_": true + }, + "cases": [ + { + "expression": "_", + "result": true + } + ] + }, + { + "given": { + "<\t": true + }, + "cases": [ + { + "expression": "\"<\\t\"", + "result": true + } + ] + }, + { + "given": { + "\uD834\uDD1E": true + }, + "cases": [ + { + "expression": "\"\\uD834\\uDD1E\"", + "result": true + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/indices.json b/vendor/github.com/jmespath/go-jmespath/compliance/indices.json new file mode 100644 index 000000000..aa03b35dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/indices.json @@ -0,0 +1,346 @@ +[{ + "given": + {"foo": {"bar": ["zero", "one", "two"]}}, + "cases": [ + { + "expression": "foo.bar[0]", + "result": "zero" + }, + { + "expression": "foo.bar[1]", + "result": "one" + }, + { + "expression": "foo.bar[2]", + "result": "two" + }, + { + "expression": "foo.bar[3]", + "result": null + }, + { + "expression": "foo.bar[-1]", + "result": "two" + }, + { + "expression": "foo.bar[-2]", + "result": "one" + }, + { + "expression": "foo.bar[-3]", + "result": "zero" + }, + { + "expression": "foo.bar[-4]", + "result": null + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo[0].bar", + "result": "one" + }, + { + "expression": "foo[1].bar", + "result": "two" + }, + { + "expression": "foo[2].bar", + "result": "three" + }, + { + "expression": "foo[3].notbar", + "result": "four" + }, + { + "expression": "foo[3].bar", + "result": null + }, + { + "expression": "foo[0]", + "result": {"bar": "one"} + }, + { + "expression": "foo[1]", + "result": {"bar": "two"} + }, + { + "expression": "foo[2]", + "result": {"bar": "three"} + }, + { + "expression": "foo[3]", + "result": {"notbar": "four"} + }, + { + "expression": "foo[4]", + "result": null + } + ] +}, +{ + "given": [ + "one", "two", "three" + ], + "cases": [ + { + "expression": "[0]", + "result": "one" + }, + { + "expression": "[1]", + "result": "two" + }, + { + "expression": "[2]", + "result": "three" + }, + { + "expression": "[-1]", + "result": "three" + }, + { + "expression": "[-2]", + "result": "two" + }, + { + "expression": "[-3]", + "result": "one" + } + ] +}, +{ + "given": {"reservations": [ + {"instances": [{"foo": 1}, {"foo": 2}]} + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo", + "result": [1, 2] + }, + { + "expression": "reservations[].instances[].bar", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + } + ] +}, +{ + "given": {"reservations": [{ + "instances": [ + {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"foo": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"foo": "bar"}, + {"notfoo": [{"bar": 20}, {"bar": 21}, {"notbar": [7]}, {"bar": 22}]}, + {"bar": [{"baz": [1]}, {"baz": [2]}, {"baz": [3]}, {"baz": [4]}]}, + {"baz": [{"baz": [1, 2]}, {"baz": []}, {"baz": []}, {"baz": [3, 4]}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + }, { + "instances": [ + {"a": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"b": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"c": "bar"}, + {"notfoo": [{"bar": 23}, {"bar": 24}, {"notbar": [7]}, {"bar": 25}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + } + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo[].bar", + "result": [1, 2, 4, 5, 6, 8] + }, + { + "expression": "reservations[].instances[].foo[].baz", + "result": [] + }, + { + "expression": "reservations[].instances[].notfoo[].bar", + "result": [20, 21, 22, 23, 24, 25] + }, + { + "expression": "reservations[].instances[].notfoo[].notbar", + "result": [[7], [7]] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].instances[].foo[].notbar", + "result": [3, [7]] + }, + { + "expression": "reservations[].instances[].bar[].baz", + "result": [[1], [2], [3], [4]] + }, + { + "expression": "reservations[].instances[].baz[].baz", + "result": [[1, 2], [], [], [3, 4]] + }, + { + "expression": "reservations[].instances[].qux[].baz", + "result": [[], [1, 2, 3], [4], [], [], [1, 2, 3], [4], []] + }, + { + "expression": "reservations[].instances[].qux[].baz[]", + "result": [1, 2, 3, 4, 1, 2, 3, 4] + } + ] +}, +{ + "given": { + "foo": [ + [["one", "two"], ["three", "four"]], + [["five", "six"], ["seven", "eight"]], + [["nine"], ["ten"]] + ] + }, + "cases": [ + { + "expression": "foo[]", + "result": [["one", "two"], ["three", "four"], ["five", "six"], + ["seven", "eight"], ["nine"], ["ten"]] + }, + { + "expression": "foo[][0]", + "result": ["one", "three", "five", "seven", "nine", "ten"] + }, + { + "expression": "foo[][1]", + "result": ["two", "four", "six", "eight"] + }, + { + "expression": "foo[][0][0]", + "result": [] + }, + { + "expression": "foo[][2][2]", + "result": [] + }, + { + "expression": "foo[][0][0][100]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].baz", + "result": [1, 3, 5, 7] + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[]", + "result": null + }, + { + "expression": "hash[]", + "result": null + }, + { + "expression": "number[]", + "result": null + }, + { + "expression": "nullvalue[]", + "result": null + }, + { + "expression": "string[].foo", + "result": null + }, + { + "expression": "hash[].foo", + "result": null + }, + { + "expression": "number[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo[].bar", + "result": null + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/literal.json b/vendor/github.com/jmespath/go-jmespath/compliance/literal.json new file mode 100644 index 000000000..c6706b971 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/literal.json @@ -0,0 +1,185 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "`\"foo\"`", + "result": "foo" + }, + { + "comment": "Interpret escaped unicode.", + "expression": "`\"\\u03a6\"`", + "result": "Φ" + }, + { + "expression": "`\"✓\"`", + "result": "✓" + }, + { + "expression": "`[1, 2, 3]`", + "result": [1, 2, 3] + }, + { + "expression": "`{\"a\": \"b\"}`", + "result": {"a": "b"} + }, + { + "expression": "`true`", + "result": true + }, + { + "expression": "`false`", + "result": false + }, + { + "expression": "`null`", + "result": null + }, + { + "expression": "`0`", + "result": 0 + }, + { + "expression": "`1`", + "result": 1 + }, + { + "expression": "`2`", + "result": 2 + }, + { + "expression": "`3`", + "result": 3 + }, + { + "expression": "`4`", + "result": 4 + }, + { + "expression": "`5`", + "result": 5 + }, + { + "expression": "`6`", + "result": 6 + }, + { + "expression": "`7`", + "result": 7 + }, + { + "expression": "`8`", + "result": 8 + }, + { + "expression": "`9`", + "result": 9 + }, + { + "comment": "Escaping a backtick in quotes", + "expression": "`\"foo\\`bar\"`", + "result": "foo`bar" + }, + { + "comment": "Double quote in literal", + "expression": "`\"foo\\\"bar\"`", + "result": "foo\"bar" + }, + { + "expression": "`\"1\\`\"`", + "result": "1`" + }, + { + "comment": "Multiple literal expressions with escapes", + "expression": "`\"\\\\\"`.{a:`\"b\"`}", + "result": {"a": "b"} + }, + { + "comment": "literal . identifier", + "expression": "`{\"a\": \"b\"}`.a", + "result": "b" + }, + { + "comment": "literal . identifier . identifier", + "expression": "`{\"a\": {\"b\": \"c\"}}`.a.b", + "result": "c" + }, + { + "comment": "literal . identifier bracket-expr", + "expression": "`[0, 1, 2]`[1]", + "result": 1 + } + ] + }, + { + "comment": "Literals", + "given": {"type": "object"}, + "cases": [ + { + "comment": "Literal with leading whitespace", + "expression": "` {\"foo\": true}`", + "result": {"foo": true} + }, + { + "comment": "Literal with trailing whitespace", + "expression": "`{\"foo\": true} `", + "result": {"foo": true} + }, + { + "comment": "Literal on RHS of subexpr not allowed", + "expression": "foo.`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Raw String Literals", + "given": {}, + "cases": [ + { + "expression": "'foo'", + "result": "foo" + }, + { + "expression": "' foo '", + "result": " foo " + }, + { + "expression": "'0'", + "result": "0" + }, + { + "expression": "'newline\n'", + "result": "newline\n" + }, + { + "expression": "'\n'", + "result": "\n" + }, + { + "expression": "'✓'", + "result": "✓" + }, + { + "expression": "'ð„ž'", + "result": "ð„ž" + }, + { + "expression": "' [foo] '", + "result": " [foo] " + }, + { + "expression": "'[foo]'", + "result": "[foo]" + }, + { + "comment": "Do not interpret escaped unicode.", + "expression": "'\\u03a6'", + "result": "\\u03a6" + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json b/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json new file mode 100644 index 000000000..8f2a481ed --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json @@ -0,0 +1,393 @@ +[{ + "given": { + "foo": { + "bar": "bar", + "baz": "baz", + "qux": "qux", + "nested": { + "one": { + "a": "first", + "b": "second", + "c": "third" + }, + "two": { + "a": "first", + "b": "second", + "c": "third" + }, + "three": { + "a": "first", + "b": "second", + "c": {"inner": "third"} + } + } + }, + "bar": 1, + "baz": 2, + "qux\"": 3 + }, + "cases": [ + { + "expression": "foo.{bar: bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"bar\": bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"foo.bar\": bar}", + "result": {"foo.bar": "bar"} + }, + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{\"bar\": bar, \"baz\": baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "{\"baz\": baz, \"qux\\\"\": \"qux\\\"\"}", + "result": {"baz": 2, "qux\"": 3} + }, + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{bar: bar,qux: qux}", + "result": {"bar": "bar", "qux": "qux"} + }, + { + "expression": "foo.{bar: bar, noexist: noexist}", + "result": {"bar": "bar", "noexist": null} + }, + { + "expression": "foo.{noexist: noexist, alsonoexist: alsonoexist}", + "result": {"noexist": null, "alsonoexist": null} + }, + { + "expression": "foo.badkey.{nokey: nokey, alsonokey: alsonokey}", + "result": null + }, + { + "expression": "foo.nested.*.{a: a,b: b}", + "result": [{"a": "first", "b": "second"}, + {"a": "first", "b": "second"}, + {"a": "first", "b": "second"}] + }, + { + "expression": "foo.nested.three.{a: a, cinner: c.inner}", + "result": {"a": "first", "cinner": "third"} + }, + { + "expression": "foo.nested.three.{a: a, c: c.inner.bad.key}", + "result": {"a": "first", "c": null} + }, + { + "expression": "foo.{a: nested.one.a, b: nested.two.b}", + "result": {"a": "first", "b": "second"} + }, + { + "expression": "{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "{bar: bar}", + "result": {"bar": 1} + }, + { + "expression": "{otherkey: bar}", + "result": {"otherkey": 1} + }, + { + "expression": "{no: no, exist: exist}", + "result": {"no": null, "exist": null} + }, + { + "expression": "foo.[bar]", + "result": ["bar"] + }, + { + "expression": "foo.[bar,baz]", + "result": ["bar", "baz"] + }, + { + "expression": "foo.[bar,qux]", + "result": ["bar", "qux"] + }, + { + "expression": "foo.[bar,noexist]", + "result": ["bar", null] + }, + { + "expression": "foo.[noexist,alsonoexist]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": [2, 3, 4]} + }, + "cases": [ + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": 1, "baz": [2, 3, 4]} + }, + { + "expression": "foo.[bar,baz[0]]", + "result": [1, 2] + }, + { + "expression": "foo.[bar,baz[1]]", + "result": [1, 3] + }, + { + "expression": "foo.[bar,baz[2]]", + "result": [1, 4] + }, + { + "expression": "foo.[bar,baz[3]]", + "result": [1, null] + }, + { + "expression": "foo.[bar[0],baz[3]]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": 2} + }, + "cases": [ + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "foo.[bar,baz]", + "result": [1, 2] + } + ] +}, { + "given": { + "foo": { + "bar": {"baz": [{"common": "first", "one": 1}, + {"common": "second", "two": 2}]}, + "ignoreme": 1, + "includeme": true + } + }, + "cases": [ + { + "expression": "foo.{bar: bar.baz[1],includeme: includeme}", + "result": {"bar": {"common": "second", "two": 2}, "includeme": true} + }, + { + "expression": "foo.{\"bar.baz.two\": bar.baz[1].two, includeme: includeme}", + "result": {"bar.baz.two": 2, "includeme": true} + }, + { + "expression": "foo.[includeme, bar.baz[*].common]", + "result": [true, ["first", "second"]] + }, + { + "expression": "foo.[includeme, bar.baz[*].none]", + "result": [true, []] + }, + { + "expression": "foo.[includeme, bar.baz[].common]", + "result": [true, ["first", "second"]] + } + ] +}, { + "given": { + "reservations": [{ + "instances": [ + {"id": "id1", + "name": "first"}, + {"id": "id2", + "name": "second"} + ]}, { + "instances": [ + {"id": "id3", + "name": "third"}, + {"id": "id4", + "name": "fourth"} + ]} + ]}, + "cases": [ + { + "expression": "reservations[*].instances[*].{id: id, name: name}", + "result": [[{"id": "id1", "name": "first"}, {"id": "id2", "name": "second"}], + [{"id": "id3", "name": "third"}, {"id": "id4", "name": "fourth"}]] + }, + { + "expression": "reservations[].instances[].{id: id, name: name}", + "result": [{"id": "id1", "name": "first"}, + {"id": "id2", "name": "second"}, + {"id": "id3", "name": "third"}, + {"id": "id4", "name": "fourth"}] + }, + { + "expression": "reservations[].instances[].[id, name]", + "result": [["id1", "first"], + ["id2", "second"], + ["id3", "third"], + ["id4", "fourth"]] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].[baz, qux]", + "result": [[1, 2], [3, 4], [5, 6], [7, 8]] + }, + { + "expression": "foo[].bar[].[baz]", + "result": [[1], [3], [5], [7]] + }, + { + "expression": "foo[].bar[].[baz, qux][]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "abc" + }, { + "bar": "def" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].bar, qux[0]]", + "result": [["abc", "def"], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].[bar, boo], qux[0]]", + "result": [[["a", "c" ], ["d", "f" ]], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].not_there || baz[*].bar, qux[0]]", + "result": [["a", "d"], "zero"] + } + ] +}, +{ + "given": {"type": "object"}, + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*],*]", + "result": [null, ["object"]] + } + ] +}, +{ + "given": [], + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*]]", + "result": [[]] + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json b/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json new file mode 100644 index 000000000..2127cf441 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json @@ -0,0 +1,59 @@ +[{ + "given": + {"outer": {"foo": "foo", "bar": "bar", "baz": "baz"}}, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] +}, { + "given": + {"outer": {"foo": "foo", "bool": false, "empty_list": [], "empty_string": ""}}, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json b/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json new file mode 100644 index 000000000..b10c0a496 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json @@ -0,0 +1,131 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "subkey" + }, + "other": { + "baz": "subkey" + }, + "other2": { + "baz": "subkey" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + } + } + }, + "cases": [ + { + "expression": "foo.*.baz | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [1]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [2]", + "result": "subkey" + }, + { + "expression": "foo.bar.* | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.notbaz | [*]", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | *.baz", + "result": ["subkey", "subkey"] + } + ] +}, { + "given": { + "foo": { + "bar": { + "baz": "one" + }, + "other": { + "baz": "two" + }, + "other2": { + "baz": "three" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["d", "e", "f"] + } + } + }, + "cases": [ + { + "expression": "foo | bar", + "result": {"baz": "one"} + }, + { + "expression": "foo | bar | baz", + "result": "one" + }, + { + "expression": "foo|bar| baz", + "result": "one" + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "[foo.bar, foo.other] | [0]", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | a", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | b", + "result": {"baz": "two"} + }, + { + "expression": "foo.bam || foo.bar | baz", + "result": "one" + }, + { + "expression": "foo | not_there || bar", + "result": {"baz": "one"} + } + ] +}, { + "given": { + "foo": [{ + "bar": [{ + "baz": "one" + }, { + "baz": "two" + }] + }, { + "bar": [{ + "baz": "three" + }, { + "baz": "four" + }] + }] + }, + "cases": [ + { + "expression": "foo[*].bar[*] | [0][0]", + "result": {"baz": "one"} + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/slice.json b/vendor/github.com/jmespath/go-jmespath/compliance/slice.json new file mode 100644 index 000000000..359477278 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/slice.json @@ -0,0 +1,187 @@ +[{ + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + "bar": { + "baz": 1 + } + }, + "cases": [ + { + "expression": "bar[0:10]", + "result": null + }, + { + "expression": "foo[0:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[1:9]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + }, + { + "expression": "foo[0:10:2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[5:]", + "result": [5, 6, 7, 8, 9] + }, + { + "expression": "foo[5::2]", + "result": [5, 7, 9] + }, + { + "expression": "foo[::2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[::-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[1::2]", + "result": [1, 3, 5, 7, 9] + }, + { + "expression": "foo[10:0:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1] + }, + { + "expression": "foo[10:5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:-2]", + "result": [8, 6, 4] + }, + { + "expression": "foo[0:20]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[10:-20:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[10:-20]", + "result": [] + }, + { + "expression": "foo[-4:-1]", + "result": [6, 7, 8] + }, + { + "expression": "foo[:-5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:0]", + "error": "invalid-value" + }, + { + "expression": "foo[8:2:0:1]", + "error": "syntax" + }, + { + "expression": "foo[8:2&]", + "error": "syntax" + }, + { + "expression": "foo[2:a:3]", + "error": "syntax" + } + ] +}, { + "given": { + "foo": [{"a": 1}, {"a": 2}, {"a": 3}], + "bar": [{"a": {"b": 1}}, {"a": {"b": 2}}, + {"a": {"b": 3}}], + "baz": 50 + }, + "cases": [ + { + "expression": "foo[:2].a", + "result": [1, 2] + }, + { + "expression": "foo[:2].b", + "result": [] + }, + { + "expression": "foo[:2].a.b", + "result": [] + }, + { + "expression": "bar[::-1].a.b", + "result": [3, 2, 1] + }, + { + "expression": "bar[:2].a.b", + "result": [1, 2] + }, + { + "expression": "baz[:2].a", + "result": null + } + ] +}, { + "given": [{"a": 1}, {"a": 2}, {"a": 3}], + "cases": [ + { + "expression": "[:]", + "result": [{"a": 1}, {"a": 2}, {"a": 3}] + }, + { + "expression": "[:2].a", + "result": [1, 2] + }, + { + "expression": "[::-1].a", + "result": [3, 2, 1] + }, + { + "expression": "[:2].b", + "result": [] + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json b/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json new file mode 100644 index 000000000..003c29458 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json @@ -0,0 +1,616 @@ +[{ + "comment": "Dot syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo.1", + "error": "syntax" + }, + { + "expression": "foo.-11", + "error": "syntax" + }, + { + "expression": "foo", + "result": null + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": ".foo", + "error": "syntax" + }, + { + "expression": "foo..bar", + "error": "syntax" + }, + { + "expression": "foo.bar.", + "error": "syntax" + }, + { + "expression": "foo[.]", + "error": "syntax" + } + ] +}, + { + "comment": "Simple token errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": ".", + "error": "syntax" + }, + { + "expression": ":", + "error": "syntax" + }, + { + "expression": ",", + "error": "syntax" + }, + { + "expression": "]", + "error": "syntax" + }, + { + "expression": "[", + "error": "syntax" + }, + { + "expression": "}", + "error": "syntax" + }, + { + "expression": "{", + "error": "syntax" + }, + { + "expression": ")", + "error": "syntax" + }, + { + "expression": "(", + "error": "syntax" + }, + { + "expression": "((&", + "error": "syntax" + }, + { + "expression": "a[", + "error": "syntax" + }, + { + "expression": "a]", + "error": "syntax" + }, + { + "expression": "a][", + "error": "syntax" + }, + { + "expression": "!", + "error": "syntax" + } + ] + }, + { + "comment": "Boolean syntax errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "![!(!", + "error": "syntax" + } + ] + }, + { + "comment": "Wildcard syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "*", + "result": ["object"] + }, + { + "expression": "*.*", + "result": [] + }, + { + "expression": "*.foo", + "result": [] + }, + { + "expression": "*[0]", + "result": [] + }, + { + "expression": ".*", + "error": "syntax" + }, + { + "expression": "*foo", + "error": "syntax" + }, + { + "expression": "*0", + "error": "syntax" + }, + { + "expression": "foo[*]bar", + "error": "syntax" + }, + { + "expression": "foo[*]*", + "error": "syntax" + } + ] + }, + { + "comment": "Flatten syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[]", + "result": null + } + ] + }, + { + "comment": "Simple bracket syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[0]", + "result": null + }, + { + "expression": "[*]", + "result": null + }, + { + "expression": "*.[0]", + "error": "syntax" + }, + { + "expression": "*.[\"0\"]", + "result": [[null]] + }, + { + "expression": "[*].bar", + "result": null + }, + { + "expression": "[*][0]", + "result": null + }, + { + "expression": "foo[#]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select list syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[0]", + "result": null + }, + { + "comment": "Valid multi-select of a list", + "expression": "foo[0, 1]", + "error": "syntax" + }, + { + "expression": "foo.[0]", + "error": "syntax" + }, + { + "expression": "foo.[*]", + "result": null + }, + { + "comment": "Multi-select of a list with trailing comma", + "expression": "foo[0, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo[0,", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo.[a", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with extra comma", + "expression": "foo[0,, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using identifier indices", + "expression": "foo[abc, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index with trailing comma", + "expression": "foo[abc, ]", + "error": "syntax" + }, + { + "comment": "Valid multi-select of a hash using an identifier index", + "expression": "foo.[abc]", + "result": null + }, + { + "comment": "Valid multi-select of a hash", + "expression": "foo.[abc, def]", + "result": null + }, + { + "comment": "Multi-select of a hash using a numeric index", + "expression": "foo.[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with a trailing comma", + "expression": "foo.[abc, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with extra commas", + "expression": "foo.[abc,, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash using number indices", + "expression": "foo.[0, 1]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select hash syntax", + "given": {"type": "object"}, + "cases": [ + { + "comment": "No key or value", + "expression": "a{}", + "error": "syntax" + }, + { + "comment": "No closing token", + "expression": "a{", + "error": "syntax" + }, + { + "comment": "Not a key value pair", + "expression": "a{foo}", + "error": "syntax" + }, + { + "comment": "Missing value and closing character", + "expression": "a{foo:", + "error": "syntax" + }, + { + "comment": "Missing closing character", + "expression": "a{foo: 0", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a{foo:}", + "error": "syntax" + }, + { + "comment": "Trailing comma and no closing character", + "expression": "a{foo: 0, ", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a{foo: ,}", + "error": "syntax" + }, + { + "comment": "Accessing Array using an identifier", + "expression": "a{foo: bar}", + "error": "syntax" + }, + { + "expression": "a{foo: 0}", + "error": "syntax" + }, + { + "comment": "Missing key-value pair", + "expression": "a.{}", + "error": "syntax" + }, + { + "comment": "Not a key-value pair", + "expression": "a.{foo}", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a.{foo:}", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a.{foo: ,}", + "error": "syntax" + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar}", + "result": null + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar, baz: bam}", + "result": null + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, }", + "error": "syntax" + }, + { + "comment": "Missing key in second key-value pair", + "expression": "a.{foo: bar, baz}", + "error": "syntax" + }, + { + "comment": "Missing value in second key-value pair", + "expression": "a.{foo: bar, baz:}", + "error": "syntax" + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, baz: bam, }", + "error": "syntax" + }, + { + "comment": "Nested multi select", + "expression": "{\"\\\\\":{\" \":*}}", + "result": {"\\": {" ": ["object"]}} + } + ] + }, + { + "comment": "Or expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo || bar", + "result": null + }, + { + "expression": "foo ||", + "error": "syntax" + }, + { + "expression": "foo.|| bar", + "error": "syntax" + }, + { + "expression": " || foo", + "error": "syntax" + }, + { + "expression": "foo || || foo", + "error": "syntax" + }, + { + "expression": "foo.[a || b]", + "result": null + }, + { + "expression": "foo.[a ||]", + "error": "syntax" + }, + { + "expression": "\"foo", + "error": "syntax" + } + ] + }, + { + "comment": "Filter expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[?bar==`\"baz\"`]", + "result": null + }, + { + "expression": "foo[? bar == `\"baz\"` ]", + "result": null + }, + { + "expression": "foo[ ?bar==`\"baz\"`]", + "error": "syntax" + }, + { + "expression": "foo[?bar==]", + "error": "syntax" + }, + { + "expression": "foo[?==]", + "error": "syntax" + }, + { + "expression": "foo[?==bar]", + "error": "syntax" + }, + { + "expression": "foo[?bar==baz?]", + "error": "syntax" + }, + { + "expression": "foo[?a.b.c==d.e.f]", + "result": null + }, + { + "expression": "foo[?bar==`[0, 1, 2]`]", + "result": null + }, + { + "expression": "foo[?bar==`[\"a\", \"b\", \"c\"]`]", + "result": null + }, + { + "comment": "Literal char not escaped", + "expression": "foo[?bar==`[\"foo`bar\"]`]", + "error": "syntax" + }, + { + "comment": "Literal char escaped", + "expression": "foo[?bar==`[\"foo\\`bar\"]`]", + "result": null + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar<>baz]", + "error": "syntax" + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar^baz]", + "error": "syntax" + }, + { + "expression": "foo[bar==baz]", + "error": "syntax" + }, + { + "comment": "Quoted identifier in filter expression no spaces", + "expression": "[?\"\\\\\">`\"foo\"`]", + "result": null + }, + { + "comment": "Quoted identifier in filter expression with spaces", + "expression": "[?\"\\\\\" > `\"foo\"`]", + "result": null + } + ] + }, + { + "comment": "Filter expression errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "bar.`\"anything\"`", + "error": "syntax" + }, + { + "expression": "bar.baz.noexists.`\"literal\"`", + "error": "syntax" + }, + { + "comment": "Literal wildcard projection", + "expression": "foo[*].`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[*].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`.`\"subliteral\"`", + "error": "syntax" + }, + { + "comment": "Projecting a literal onto an empty list", + "expression": "foo[*].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "twolen[*].`\"foo\"`", + "error": "syntax" + }, + { + "comment": "Two level projection of a literal", + "expression": "twolen[*].threelen[*].`\"bar\"`", + "error": "syntax" + }, + { + "comment": "Two level flattened projection of a literal", + "expression": "twolen[].threelen[].`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Identifiers", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo", + "result": null + }, + { + "expression": "\"foo\"", + "result": null + }, + { + "expression": "\"\\\\\"", + "result": null + } + ] + }, + { + "comment": "Combined syntax", + "given": [], + "cases": [ + { + "expression": "*||*|*|*", + "result": null + }, + { + "expression": "*[]||[*]", + "result": [] + }, + { + "expression": "[*.*]", + "result": [null] + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json b/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json new file mode 100644 index 000000000..6b07b0b6d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json @@ -0,0 +1,38 @@ +[ + { + "given": {"foo": [{"✓": "✓"}, {"✓": "✗"}]}, + "cases": [ + { + "expression": "foo[].\"✓\"", + "result": ["✓", "✗"] + } + ] + }, + { + "given": {"☯": true}, + "cases": [ + { + "expression": "\"☯\"", + "result": true + } + ] + }, + { + "given": {"♪♫•*¨*•.¸¸â¤Â¸Â¸.•*¨*•♫♪": true}, + "cases": [ + { + "expression": "\"♪♫•*¨*•.¸¸â¤Â¸Â¸.•*¨*•♫♪\"", + "result": true + } + ] + }, + { + "given": {"☃": true}, + "cases": [ + { + "expression": "\"☃\"", + "result": true + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json b/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json new file mode 100644 index 000000000..3bcec3028 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json @@ -0,0 +1,460 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "val" + }, + "other": { + "baz": "val" + }, + "other2": { + "baz": "val" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + }, + "other5": { + "other": { + "a": 1, + "b": 1, + "c": 1 + } + } + } + }, + "cases": [ + { + "expression": "foo.*.baz", + "result": ["val", "val", "val"] + }, + { + "expression": "foo.bar.*", + "result": ["val"] + }, + { + "expression": "foo.*.notbaz", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "foo.*.notbaz[0]", + "result": ["a", "a"] + }, + { + "expression": "foo.*.notbaz[-1]", + "result": ["c", "c"] + } + ] +}, { + "given": { + "foo": { + "first-1": { + "second-1": "val" + }, + "first-2": { + "second-1": "val" + }, + "first-3": { + "second-1": "val" + } + } + }, + "cases": [ + { + "expression": "foo.*", + "result": [{"second-1": "val"}, {"second-1": "val"}, + {"second-1": "val"}] + }, + { + "expression": "foo.*.*", + "result": [["val"], ["val"], ["val"]] + }, + { + "expression": "foo.*.*.*", + "result": [[], [], []] + }, + { + "expression": "foo.*.*.*.*", + "result": [[], [], []] + } + ] +}, { + "given": { + "foo": { + "bar": "one" + }, + "other": { + "bar": "one" + }, + "nomatch": { + "notbar": "three" + } + }, + "cases": [ + { + "expression": "*.bar", + "result": ["one", "one"] + } + ] +}, { + "given": { + "top1": { + "sub1": {"foo": "one"} + }, + "top2": { + "sub1": {"foo": "one"} + } + }, + "cases": [ + { + "expression": "*", + "result": [{"sub1": {"foo": "one"}}, + {"sub1": {"foo": "one"}}] + }, + { + "expression": "*.sub1", + "result": [{"foo": "one"}, + {"foo": "one"}] + }, + { + "expression": "*.*", + "result": [[{"foo": "one"}], + [{"foo": "one"}]] + }, + { + "expression": "*.*.foo[]", + "result": ["one", "one"] + }, + { + "expression": "*.sub1.foo", + "result": ["one", "one"] + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "foo[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": + [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}], + "cases": [ + { + "expression": "[*]", + "result": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}] + }, + { + "expression": "[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": { + "foo": { + "bar": [ + {"baz": ["one", "two", "three"]}, + {"baz": ["four", "five", "six"]}, + {"baz": ["seven", "eight", "nine"]} + ] + } + }, + "cases": [ + { + "expression": "foo.bar[*].baz", + "result": [["one", "two", "three"], ["four", "five", "six"], ["seven", "eight", "nine"]] + }, + { + "expression": "foo.bar[*].baz[0]", + "result": ["one", "four", "seven"] + }, + { + "expression": "foo.bar[*].baz[1]", + "result": ["two", "five", "eight"] + }, + { + "expression": "foo.bar[*].baz[2]", + "result": ["three", "six", "nine"] + }, + { + "expression": "foo.bar[*].baz[3]", + "result": [] + } + ] +}, +{ + "given": { + "foo": { + "bar": [["one", "two"], ["three", "four"]] + } + }, + "cases": [ + { + "expression": "foo.bar[*]", + "result": [["one", "two"], ["three", "four"]] + }, + { + "expression": "foo.bar[0]", + "result": ["one", "two"] + }, + { + "expression": "foo.bar[0][0]", + "result": "one" + }, + { + "expression": "foo.bar[0][0][0]", + "result": null + }, + { + "expression": "foo.bar[0][0][0][0]", + "result": null + }, + { + "expression": "foo[0][0]", + "result": null + } + ] +}, +{ + "given": { + "foo": [ + {"bar": [{"kind": "basic"}, {"kind": "intermediate"}]}, + {"bar": [{"kind": "advanced"}, {"kind": "expert"}]}, + {"bar": "string"} + ] + + }, + "cases": [ + { + "expression": "foo[*].bar[*].kind", + "result": [["basic", "intermediate"], ["advanced", "expert"]] + }, + { + "expression": "foo[*].bar[0].kind", + "result": ["basic", "advanced"] + } + ] +}, +{ + "given": { + "foo": [ + {"bar": {"kind": "basic"}}, + {"bar": {"kind": "intermediate"}}, + {"bar": {"kind": "advanced"}}, + {"bar": {"kind": "expert"}}, + {"bar": "string"} + ] + }, + "cases": [ + { + "expression": "foo[*].bar.kind", + "result": ["basic", "intermediate", "advanced", "expert"] + } + ] +}, +{ + "given": { + "foo": [{"bar": ["one", "two"]}, {"bar": ["three", "four"]}, {"bar": ["five"]}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*].bar[1]", + "result": ["two", "four"] + }, + { + "expression": "foo[*].bar[2]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{"bar": []}, {"bar": []}, {"bar": []}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [["one", "two"], ["three", "four"], ["five"]] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*][1]", + "result": ["two", "four"] + } + ] +}, +{ + "given": { + "foo": [ + [ + ["one", "two"], ["three", "four"] + ], [ + ["five", "six"], ["seven", "eight"] + ], [ + ["nine"], ["ten"] + ] + ] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": [["one", "two"], ["five", "six"], ["nine"]] + }, + { + "expression": "foo[*][1]", + "result": [["three", "four"], ["seven", "eight"], ["ten"]] + }, + { + "expression": "foo[*][0][0]", + "result": ["one", "five", "nine"] + }, + { + "expression": "foo[*][1][0]", + "result": ["three", "seven", "ten"] + }, + { + "expression": "foo[*][0][1]", + "result": ["two", "six"] + }, + { + "expression": "foo[*][1][1]", + "result": ["four", "eight"] + }, + { + "expression": "foo[*][2]", + "result": [] + }, + { + "expression": "foo[*][2][2]", + "result": [] + }, + { + "expression": "bar[*]", + "result": null + }, + { + "expression": "bar[*].baz[*]", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[*]", + "result": null + }, + { + "expression": "hash[*]", + "result": null + }, + { + "expression": "number[*]", + "result": null + }, + { + "expression": "nullvalue[*]", + "result": null + }, + { + "expression": "string[*].foo", + "result": null + }, + { + "expression": "hash[*].foo", + "result": null + }, + { + "expression": "number[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo[*].bar", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "val", "bar": "val"}, + "number": 23, + "array": [1, 2, 3], + "nullvalue": null + }, + "cases": [ + { + "expression": "string.*", + "result": null + }, + { + "expression": "hash.*", + "result": ["val", "val"] + }, + { + "expression": "number.*", + "result": null + }, + { + "expression": "array.*", + "result": null + }, + { + "expression": "nullvalue.*", + "result": null + } + ] +}, +{ + "given": { + "a": [0, 1, 2], + "b": [0, 1, 2] + }, + "cases": [ + { + "expression": "*[0]", + "result": [0, 0] + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance_test.go b/vendor/github.com/jmespath/go-jmespath/compliance_test.go new file mode 100644 index 000000000..4ee9c959d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance_test.go @@ -0,0 +1,123 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestSuite struct { + Given interface{} + TestCases []TestCase `json:"cases"` + Comment string +} +type TestCase struct { + Comment string + Expression string + Result interface{} + Error string +} + +var whiteListed = []string{ + "compliance/basic.json", + "compliance/current.json", + "compliance/escape.json", + "compliance/filters.json", + "compliance/functions.json", + "compliance/identifiers.json", + "compliance/indices.json", + "compliance/literal.json", + "compliance/multiselect.json", + "compliance/ormatch.json", + "compliance/pipe.json", + "compliance/slice.json", + "compliance/syntax.json", + "compliance/unicode.json", + "compliance/wildcard.json", + "compliance/boolean.json", +} + +func allowed(path string) bool { + for _, el := range whiteListed { + if el == path { + return true + } + } + return false +} + +func TestCompliance(t *testing.T) { + assert := assert.New(t) + + var complianceFiles []string + err := filepath.Walk("compliance", func(path string, _ os.FileInfo, _ error) error { + //if strings.HasSuffix(path, ".json") { + if allowed(path) { + complianceFiles = append(complianceFiles, path) + } + return nil + }) + if assert.Nil(err) { + for _, filename := range complianceFiles { + runComplianceTest(assert, filename) + } + } +} + +func runComplianceTest(assert *assert.Assertions, filename string) { + var testSuites []TestSuite + data, err := ioutil.ReadFile(filename) + if assert.Nil(err) { + err := json.Unmarshal(data, &testSuites) + if assert.Nil(err) { + for _, testsuite := range testSuites { + runTestSuite(assert, testsuite, filename) + } + } + } +} + +func runTestSuite(assert *assert.Assertions, testsuite TestSuite, filename string) { + for _, testcase := range testsuite.TestCases { + if testcase.Error != "" { + // This is a test case that verifies we error out properly. + runSyntaxTestCase(assert, testsuite.Given, testcase, filename) + } else { + runTestCase(assert, testsuite.Given, testcase, filename) + } + } +} + +func runSyntaxTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + // Anything with an .Error means that we expect that JMESPath should return + // an error when we try to evaluate the expression. + _, err := Search(testcase.Expression, given) + assert.NotNil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) +} + +func runTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + lexer := NewLexer() + var err error + _, err = lexer.tokenize(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not lex expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + parser := NewParser() + _, err = parser.Parse(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not parse expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + actual, err := Search(testcase.Expression, given) + if assert.Nil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) { + assert.Equal(testcase.Result, actual, fmt.Sprintf("Expression: %s", testcase.Expression)) + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..8a3f2ef0d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,840 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": functionEntry{ + name: "length", + arguments: []argSpec{ + argSpec{types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": functionEntry{ + name: "starts_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": functionEntry{ + name: "abs", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": functionEntry{ + name: "avg", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": functionEntry{ + name: "ceil", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": functionEntry{ + name: "contains", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": functionEntry{ + name: "ends_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": functionEntry{ + name: "floor", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": functionEntry{ + name: "amp", + arguments: []argSpec{ + argSpec{types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": functionEntry{ + name: "max", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": functionEntry{ + name: "merge", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": functionEntry{ + name: "max_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": functionEntry{ + name: "sum", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": functionEntry{ + name: "min", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": functionEntry{ + name: "min_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": functionEntry{ + name: "type", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": functionEntry{ + name: "keys", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": functionEntry{ + name: "values", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": functionEntry{ + name: "sort", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": functionEntry{ + name: "sort_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": functionEntry{ + name: "join", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": functionEntry{ + name: "reverse", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": functionEntry{ + name: "to_array", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": functionEntry{ + name: "to_string", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": functionEntry{ + name: "to_number", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": functionEntry{ + name: "not_null", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if _, ok := arg.([]interface{}); ok { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if c, ok := arg.([]interface{}); ok { + return float64(len(c)), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-1 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-1 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-1 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-10 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-10 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-10 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-100 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-100 new file mode 100644 index 000000000..bc4f6a3f4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-100 @@ -0,0 +1 @@ +ends_with(str, 'SStr') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-101 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-101 new file mode 100644 index 000000000..81bf07a7a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-101 @@ -0,0 +1 @@ +ends_with(str, 'foo') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-102 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-102 new file mode 100644 index 000000000..3225de913 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-102 @@ -0,0 +1 @@ +floor(`1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-103 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-103 new file mode 100644 index 000000000..8cac95958 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-103 @@ -0,0 +1 @@ +floor(decimals[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-104 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-104 new file mode 100644 index 000000000..bd76f47e2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-104 @@ -0,0 +1 @@ +floor(foo) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-105 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-105 new file mode 100644 index 000000000..c719add3d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-105 @@ -0,0 +1 @@ +length('abc') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-106 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-106 new file mode 100644 index 000000000..ff12f04f1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-106 @@ -0,0 +1 @@ +length('') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-107 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-107 new file mode 100644 index 000000000..0eccba1d3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-107 @@ -0,0 +1 @@ +length(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-108 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-108 new file mode 100644 index 000000000..ab14b0fa8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-108 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-109 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-109 new file mode 100644 index 000000000..f1514bb74 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-109 @@ -0,0 +1 @@ +length(str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-110 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-110 new file mode 100644 index 000000000..09276059a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-110 @@ -0,0 +1 @@ +length(array) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-112 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-112 new file mode 100644 index 000000000..ab14b0fa8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-112 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-115 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-115 new file mode 100644 index 000000000..bfb41ae98 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-115 @@ -0,0 +1 @@ +max(strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-118 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-118 new file mode 100644 index 000000000..915ec172a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-118 @@ -0,0 +1 @@ +merge(`{}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-119 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-119 new file mode 100644 index 000000000..5b74e9b59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-119 @@ -0,0 +1 @@ +merge(`{}`, `{}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-12 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-12 new file mode 100644 index 000000000..64c5e5885 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-12 @@ -0,0 +1 @@ +two \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-120 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-120 new file mode 100644 index 000000000..f34dcd8fa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-120 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"b": 2}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-121 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-121 new file mode 100644 index 000000000..e335dc96f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-121 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"a": 2}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-122 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-122 new file mode 100644 index 000000000..aac28fffe --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-122 @@ -0,0 +1 @@ +merge(`{"a": 1, "b": 2}`, `{"a": 2, "c": 3}`, `{"d": 4}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-123 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-123 new file mode 100644 index 000000000..1c6fd6719 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-123 @@ -0,0 +1 @@ +min(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-126 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-126 new file mode 100644 index 000000000..93e68db77 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-126 @@ -0,0 +1 @@ +min(decimals) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-128 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-128 new file mode 100644 index 000000000..554601ea4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-128 @@ -0,0 +1 @@ +type('abc') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-129 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-129 new file mode 100644 index 000000000..1ab2d9834 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-129 @@ -0,0 +1 @@ +type(`1.0`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-13 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-13 new file mode 100644 index 000000000..1d19714ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-13 @@ -0,0 +1 @@ +three \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-130 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-130 new file mode 100644 index 000000000..3cee2f56f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-130 @@ -0,0 +1 @@ +type(`2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-131 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-131 new file mode 100644 index 000000000..4821f9aef --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-131 @@ -0,0 +1 @@ +type(`true`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-132 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-132 new file mode 100644 index 000000000..40b6913a6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-132 @@ -0,0 +1 @@ +type(`false`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133 new file mode 100644 index 000000000..c711252be --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133 @@ -0,0 +1 @@ +type(`null`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-134 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-134 new file mode 100644 index 000000000..ec5d07e95 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-134 @@ -0,0 +1 @@ +type(`[0]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-135 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-135 new file mode 100644 index 000000000..2080401e1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-135 @@ -0,0 +1 @@ +type(`{"a": "b"}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-136 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-136 new file mode 100644 index 000000000..c5ee2ba5c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-136 @@ -0,0 +1 @@ +type(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-137 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-137 new file mode 100644 index 000000000..1814ca17b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-137 @@ -0,0 +1 @@ +keys(objects) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-138 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-138 new file mode 100644 index 000000000..e03cdb0d6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-138 @@ -0,0 +1 @@ +values(objects) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-139 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-139 new file mode 100644 index 000000000..7fea8d2ce --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-139 @@ -0,0 +1 @@ +keys(empty_hash) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-14 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-14 new file mode 100644 index 000000000..a17c92f59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-14 @@ -0,0 +1 @@ +one.two \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-140 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-140 new file mode 100644 index 000000000..4f1d882a4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-140 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-141 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-141 new file mode 100644 index 000000000..4f1d882a4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-141 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-142 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-142 new file mode 100644 index 000000000..19ec1fe09 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-142 @@ -0,0 +1 @@ +join(',', `["a", "b"]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-143 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-143 new file mode 100644 index 000000000..761c68a6b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-143 @@ -0,0 +1 @@ +join('|', strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-144 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-144 new file mode 100644 index 000000000..a0dd68eaa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-144 @@ -0,0 +1 @@ +join('|', decimals[].to_string(@)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-145 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-145 new file mode 100644 index 000000000..a4190b2ba --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-145 @@ -0,0 +1 @@ +join('|', empty_list) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-146 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-146 new file mode 100644 index 000000000..f5033c302 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-146 @@ -0,0 +1 @@ +reverse(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-147 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-147 new file mode 100644 index 000000000..822f054d5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-147 @@ -0,0 +1 @@ +reverse(array) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-148 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-148 new file mode 100644 index 000000000..a584adcc0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-148 @@ -0,0 +1 @@ +reverse(`[]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-149 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-149 new file mode 100644 index 000000000..fb4cc5dc4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-149 @@ -0,0 +1 @@ +reverse('') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-15 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-15 new file mode 100644 index 000000000..693f95496 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-15 @@ -0,0 +1 @@ +foo."1" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-150 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-150 new file mode 100644 index 000000000..aa260fabc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-150 @@ -0,0 +1 @@ +reverse('hello world') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-151 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-151 new file mode 100644 index 000000000..d8c58826a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-151 @@ -0,0 +1 @@ +starts_with(str, 'S') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-152 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-152 new file mode 100644 index 000000000..32e16b7bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-152 @@ -0,0 +1 @@ +starts_with(str, 'St') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-153 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-153 new file mode 100644 index 000000000..5f575ae7f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-153 @@ -0,0 +1 @@ +starts_with(str, 'Str') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-155 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-155 new file mode 100644 index 000000000..f31551c62 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-155 @@ -0,0 +1 @@ +sum(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-156 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-156 new file mode 100644 index 000000000..18b90446c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-156 @@ -0,0 +1 @@ +sum(decimals) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-157 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-157 new file mode 100644 index 000000000..def4d0bc1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-157 @@ -0,0 +1 @@ +sum(array[].to_number(@)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-158 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-158 new file mode 100644 index 000000000..48e4a7707 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-158 @@ -0,0 +1 @@ +sum(`[]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-159 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-159 new file mode 100644 index 000000000..9fb939a0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-159 @@ -0,0 +1 @@ +to_array('foo') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-16 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-16 new file mode 100644 index 000000000..86155ed75 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-16 @@ -0,0 +1 @@ +foo."1"[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-160 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-160 new file mode 100644 index 000000000..74ba7cc67 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-160 @@ -0,0 +1 @@ +to_array(`0`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-161 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-161 new file mode 100644 index 000000000..57f8b983f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-161 @@ -0,0 +1 @@ +to_array(objects) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-162 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-162 new file mode 100644 index 000000000..d17c7345f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-162 @@ -0,0 +1 @@ +to_array(`[1, 2, 3]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-163 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-163 new file mode 100644 index 000000000..15f70f783 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-163 @@ -0,0 +1 @@ +to_array(false) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-164 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-164 new file mode 100644 index 000000000..9b227529b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-164 @@ -0,0 +1 @@ +to_string('foo') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-165 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-165 new file mode 100644 index 000000000..489a42935 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-165 @@ -0,0 +1 @@ +to_string(`1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-166 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-166 new file mode 100644 index 000000000..d17106a00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-166 @@ -0,0 +1 @@ +to_string(`[0, 1]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-167 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-167 new file mode 100644 index 000000000..4f4ae9e68 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-167 @@ -0,0 +1 @@ +to_number('1.0') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-168 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-168 new file mode 100644 index 000000000..ce932e2e6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-168 @@ -0,0 +1 @@ +to_number('1.1') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-169 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-169 new file mode 100644 index 000000000..e246fa4db --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-169 @@ -0,0 +1 @@ +to_number('4') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-17 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-17 new file mode 100644 index 000000000..de0b4c39d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-17 @@ -0,0 +1 @@ +foo."-1" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-170 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-170 new file mode 100644 index 000000000..f8c264747 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-170 @@ -0,0 +1 @@ +to_number('notanumber') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-171 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-171 new file mode 100644 index 000000000..7d423b1cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-171 @@ -0,0 +1 @@ +to_number(`false`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-172 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-172 new file mode 100644 index 000000000..503716b68 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-172 @@ -0,0 +1 @@ +to_number(`null`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-173 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-173 new file mode 100644 index 000000000..7f61dfa15 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-173 @@ -0,0 +1 @@ +to_number(`[0]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-174 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-174 new file mode 100644 index 000000000..ee72a8c01 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-174 @@ -0,0 +1 @@ +to_number(`{"foo": 0}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175 new file mode 100644 index 000000000..8d8f1f759 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175 @@ -0,0 +1 @@ +sort(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-178 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-178 new file mode 100644 index 000000000..8cb54ba47 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-178 @@ -0,0 +1 @@ +sort(empty_list) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 new file mode 100644 index 000000000..cf2c9b1db --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 @@ -0,0 +1 @@ +not_null(unknown_key, str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-18 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-18 new file mode 100644 index 000000000..b516b2c48 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-18 @@ -0,0 +1 @@ +@ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-180 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-180 new file mode 100644 index 000000000..e047d4866 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-180 @@ -0,0 +1 @@ +not_null(unknown_key, foo.bar, empty_list, str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-181 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-181 new file mode 100644 index 000000000..c4cc87b9c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-181 @@ -0,0 +1 @@ +not_null(unknown_key, null_key, empty_list, str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-182 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-182 new file mode 100644 index 000000000..2c7fa0a9c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-182 @@ -0,0 +1 @@ +not_null(all, expressions, are_null) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-183 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-183 new file mode 100644 index 000000000..eb096e61c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-183 @@ -0,0 +1 @@ +numbers[].to_string(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-184 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-184 new file mode 100644 index 000000000..4958abaec --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-184 @@ -0,0 +1 @@ +array[].to_number(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-185 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-185 new file mode 100644 index 000000000..102708472 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-185 @@ -0,0 +1 @@ +foo[].not_null(f, e, d, c, b, a) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-186 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-186 new file mode 100644 index 000000000..83cb91612 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-186 @@ -0,0 +1 @@ +sort_by(people, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-187 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-187 new file mode 100644 index 000000000..a494d6c4b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-187 @@ -0,0 +1 @@ +sort_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-188 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-188 new file mode 100644 index 000000000..2294fc54d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-188 @@ -0,0 +1 @@ +sort_by(people, &age)[].name \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-189 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-189 new file mode 100644 index 000000000..bb8c2b46d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-189 @@ -0,0 +1 @@ +sort_by(people, &age)[].extra \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-19 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-19 new file mode 100644 index 000000000..e3ed49ac6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-19 @@ -0,0 +1 @@ +@.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-190 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-190 new file mode 100644 index 000000000..3ab029034 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-190 @@ -0,0 +1 @@ +sort_by(`[]`, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-191 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-191 new file mode 100644 index 000000000..97db56f7b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-191 @@ -0,0 +1 @@ +max_by(people, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-192 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-192 new file mode 100644 index 000000000..a7e648de9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-192 @@ -0,0 +1 @@ +max_by(people, &age_str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-193 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-193 new file mode 100644 index 000000000..be4348d0c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-193 @@ -0,0 +1 @@ +max_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-194 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-194 new file mode 100644 index 000000000..a707283d4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-194 @@ -0,0 +1 @@ +min_by(people, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-195 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-195 new file mode 100644 index 000000000..2cd6618d8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-195 @@ -0,0 +1 @@ +min_by(people, &age_str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-196 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-196 new file mode 100644 index 000000000..833e68373 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-196 @@ -0,0 +1 @@ +min_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-198 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-198 new file mode 100644 index 000000000..706dbda89 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-198 @@ -0,0 +1 @@ +__L \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-199 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-199 new file mode 100644 index 000000000..ca593ca93 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-199 @@ -0,0 +1 @@ +"!\r" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-2 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-2 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-2 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-20 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-20 new file mode 100644 index 000000000..f300ab917 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-20 @@ -0,0 +1 @@ +@.foo[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-200 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-200 new file mode 100644 index 000000000..9c9384354 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-200 @@ -0,0 +1 @@ +Y_1623 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-201 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-201 new file mode 100644 index 000000000..c1b0730e0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-201 @@ -0,0 +1 @@ +x \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-202 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-202 new file mode 100644 index 000000000..1552ec63a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-202 @@ -0,0 +1 @@ +"\tF\uCebb" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-203 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-203 new file mode 100644 index 000000000..047041273 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-203 @@ -0,0 +1 @@ +" \t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-204 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-204 new file mode 100644 index 000000000..efd782cc3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-204 @@ -0,0 +1 @@ +" " \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-205 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-205 new file mode 100644 index 000000000..8494ac270 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-205 @@ -0,0 +1 @@ +v2 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-206 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-206 new file mode 100644 index 000000000..c61f7f7eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-206 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-207 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-207 new file mode 100644 index 000000000..f6055f189 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-207 @@ -0,0 +1 @@ +_X \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-208 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-208 new file mode 100644 index 000000000..4f58e0e7b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-208 @@ -0,0 +1 @@ +"\t4\ud9da\udd15" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-209 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-209 new file mode 100644 index 000000000..f536bfbf6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-209 @@ -0,0 +1 @@ +v24_W \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-21 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-21 new file mode 100644 index 000000000..ef47ff2c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-21 @@ -0,0 +1 @@ +"foo.bar" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-210 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-210 new file mode 100644 index 000000000..69759281c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-210 @@ -0,0 +1 @@ +"H" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-211 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-211 new file mode 100644 index 000000000..c3e8b5927 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-211 @@ -0,0 +1 @@ +"\f" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-212 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-212 new file mode 100644 index 000000000..24ecc222c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-212 @@ -0,0 +1 @@ +"E4" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-213 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-213 new file mode 100644 index 000000000..5693009d2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-213 @@ -0,0 +1 @@ +"!" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-214 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-214 new file mode 100644 index 000000000..62dd220e7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-214 @@ -0,0 +1 @@ +tM \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-215 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-215 new file mode 100644 index 000000000..3c1e81f55 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-215 @@ -0,0 +1 @@ +" [" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-216 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-216 new file mode 100644 index 000000000..493daa673 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-216 @@ -0,0 +1 @@ +"R!" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-217 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-217 new file mode 100644 index 000000000..116b50ab3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-217 @@ -0,0 +1 @@ +_6W \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-218 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-218 new file mode 100644 index 000000000..0073fac45 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-218 @@ -0,0 +1 @@ +"\uaBA1\r" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-219 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-219 new file mode 100644 index 000000000..00d8fa37e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-219 @@ -0,0 +1 @@ +tL7 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-22 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-22 new file mode 100644 index 000000000..661ebcfa3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-22 @@ -0,0 +1 @@ +"foo bar" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-220 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-220 new file mode 100644 index 000000000..c14f16e02 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-220 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-257 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-257 new file mode 100644 index 000000000..8a2443e6e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-257 @@ -0,0 +1 @@ +hvu \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-258 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-258 new file mode 100644 index 000000000..c9ddacbb6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-258 @@ -0,0 +1 @@ +"; !" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-259 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-259 new file mode 100644 index 000000000..d0209c6df --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-259 @@ -0,0 +1 @@ +hU \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-26 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-26 new file mode 100644 index 000000000..82649bd24 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-26 @@ -0,0 +1 @@ +"/unix/path" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-260 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-260 new file mode 100644 index 000000000..c07242aa4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-260 @@ -0,0 +1 @@ +"!I\n\/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-261 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-261 new file mode 100644 index 000000000..7aae4effc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-261 @@ -0,0 +1 @@ +"\uEEbF" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-262 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-262 new file mode 100644 index 000000000..c1574f35f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-262 @@ -0,0 +1 @@ +"U)\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-263 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-263 new file mode 100644 index 000000000..5197e3a2b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-263 @@ -0,0 +1 @@ +fa0_9 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-264 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-264 new file mode 100644 index 000000000..320558b00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-264 @@ -0,0 +1 @@ +"/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-265 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-265 new file mode 100644 index 000000000..4a2cb0865 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-265 @@ -0,0 +1 @@ +Gy \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-266 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-266 new file mode 100644 index 000000000..9524c8381 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-266 @@ -0,0 +1 @@ +"\b" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-267 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-267 new file mode 100644 index 000000000..066b8d98b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-267 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-268 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-268 new file mode 100644 index 000000000..c61f7f7eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-268 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-269 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-269 new file mode 100644 index 000000000..a582f62d2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-269 @@ -0,0 +1 @@ +"\t&\\\r" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27 new file mode 100644 index 000000000..a1d50731c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27 @@ -0,0 +1 @@ +"\"\"\"" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-270 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-270 new file mode 100644 index 000000000..e3c5eedeb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-270 @@ -0,0 +1 @@ +"#" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-271 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-271 new file mode 100644 index 000000000..e75309a52 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-271 @@ -0,0 +1 @@ +B__ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-272 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-272 new file mode 100644 index 000000000..027177272 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-272 @@ -0,0 +1 @@ +"\nS \n" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-273 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-273 new file mode 100644 index 000000000..99432276e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-273 @@ -0,0 +1 @@ +Bp \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-274 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-274 new file mode 100644 index 000000000..d4f8a788b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-274 @@ -0,0 +1 @@ +",\t;" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-275 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-275 new file mode 100644 index 000000000..56c384f75 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-275 @@ -0,0 +1 @@ +B_q \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-276 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-276 new file mode 100644 index 000000000..f093d2aa3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-276 @@ -0,0 +1 @@ +"\/+\t\n\b!Z" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-277 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-277 new file mode 100644 index 000000000..11e1229d9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-277 @@ -0,0 +1 @@ +"󇟇\\ueFAc" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-278 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-278 new file mode 100644 index 000000000..90dbfcfcd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-278 @@ -0,0 +1 @@ +":\f" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-279 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-279 new file mode 100644 index 000000000..b06b83025 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-279 @@ -0,0 +1 @@ +"\/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-28 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-28 new file mode 100644 index 000000000..5f55d73af --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-28 @@ -0,0 +1 @@ +"bar"."baz" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-280 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-280 new file mode 100644 index 000000000..0e4bf7c11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-280 @@ -0,0 +1 @@ +_BW_6Hg_Gl \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-281 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-281 new file mode 100644 index 000000000..81bb45f80 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-281 @@ -0,0 +1 @@ +"ôƒ°‚" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-282 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-282 new file mode 100644 index 000000000..d0b4de146 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-282 @@ -0,0 +1 @@ +zs1DC \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 new file mode 100644 index 000000000..68797580c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 @@ -0,0 +1 @@ +__434 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-284 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-284 new file mode 100644 index 000000000..e61be91c4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-284 @@ -0,0 +1 @@ +"óµ…" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-285 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-285 new file mode 100644 index 000000000..026cb9cbb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-285 @@ -0,0 +1 @@ +Z_5 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-286 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-286 new file mode 100644 index 000000000..ca9587d06 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-286 @@ -0,0 +1 @@ +z_M_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-287 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-287 new file mode 100644 index 000000000..67f6d9c42 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-287 @@ -0,0 +1 @@ +YU_2 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-288 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-288 new file mode 100644 index 000000000..927ab653a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-288 @@ -0,0 +1 @@ +_0 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-289 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-289 new file mode 100644 index 000000000..39307ab93 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-289 @@ -0,0 +1 @@ +"\b+" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-29 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-29 new file mode 100644 index 000000000..8b0c5b41b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-29 @@ -0,0 +1 @@ +foo[?name == 'a'] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-290 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-290 new file mode 100644 index 000000000..a3ec2ed7a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-290 @@ -0,0 +1 @@ +"\"" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-291 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-291 new file mode 100644 index 000000000..26bf7e122 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-291 @@ -0,0 +1 @@ +D7 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-292 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-292 new file mode 100644 index 000000000..d595c9f43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-292 @@ -0,0 +1 @@ +_62L \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-293 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-293 new file mode 100644 index 000000000..f68696949 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-293 @@ -0,0 +1 @@ +"\tK\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-294 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-294 new file mode 100644 index 000000000..f3a9b7edb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-294 @@ -0,0 +1 @@ +"\n\\\f" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-295 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-295 new file mode 100644 index 000000000..455f00ffc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-295 @@ -0,0 +1 @@ +I_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-296 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-296 new file mode 100644 index 000000000..ccd5968f9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-296 @@ -0,0 +1 @@ +W_a0_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-297 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-297 new file mode 100644 index 000000000..ee55c16fc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-297 @@ -0,0 +1 @@ +BQ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-298 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-298 new file mode 100644 index 000000000..0d1a169a6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-298 @@ -0,0 +1 @@ +"\tX$\uABBb" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-299 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-299 new file mode 100644 index 000000000..0573cfd73 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-299 @@ -0,0 +1 @@ +Z9 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-3 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-3 new file mode 100644 index 000000000..f0fcbd8ea --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-3 @@ -0,0 +1 @@ +foo.bar.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-30 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-30 new file mode 100644 index 000000000..4f8e6a17a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-30 @@ -0,0 +1 @@ +*[?[0] == `0`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-300 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-300 new file mode 100644 index 000000000..a0db02beb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-300 @@ -0,0 +1 @@ +"\b%\"òž„" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-301 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-301 new file mode 100644 index 000000000..56032f7a2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-301 @@ -0,0 +1 @@ +_F \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-302 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-302 new file mode 100644 index 000000000..4a8a3cff3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-302 @@ -0,0 +1 @@ +"!," \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-303 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-303 new file mode 100644 index 000000000..7c1efac00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-303 @@ -0,0 +1 @@ +"\"!" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-304 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-304 new file mode 100644 index 000000000..a0f489d53 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-304 @@ -0,0 +1 @@ +Hh \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-305 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-305 new file mode 100644 index 000000000..c64e8d5ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-305 @@ -0,0 +1 @@ +"&" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-306 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-306 new file mode 100644 index 000000000..0567e992f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-306 @@ -0,0 +1 @@ +"9\r\\R" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-307 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-307 new file mode 100644 index 000000000..ce8245c5b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-307 @@ -0,0 +1 @@ +M_k \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-308 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-308 new file mode 100644 index 000000000..8f16a5ac0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-308 @@ -0,0 +1 @@ +"!\b\nó‘©’\"\"" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-309 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-309 new file mode 100644 index 000000000..504ff5ae3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-309 @@ -0,0 +1 @@ +"6" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-31 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-31 new file mode 100644 index 000000000..07fb57234 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-31 @@ -0,0 +1 @@ +foo[?first == last] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-310 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-310 new file mode 100644 index 000000000..533dd8e54 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-310 @@ -0,0 +1 @@ +_7 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-311 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-311 new file mode 100644 index 000000000..1e4a3a341 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-311 @@ -0,0 +1 @@ +"0" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-312 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-312 new file mode 100644 index 000000000..37961f6ca --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-312 @@ -0,0 +1 @@ +"\\8\\" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-313 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-313 new file mode 100644 index 000000000..23480cff1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-313 @@ -0,0 +1 @@ +b7eo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-314 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-314 new file mode 100644 index 000000000..e609f81a3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-314 @@ -0,0 +1 @@ +xIUo9 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-315 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-315 new file mode 100644 index 000000000..d89a25f0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-315 @@ -0,0 +1 @@ +"5" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-316 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-316 new file mode 100644 index 000000000..5adcf5e7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-316 @@ -0,0 +1 @@ +"?" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-317 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-317 new file mode 100644 index 000000000..ace4a897d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-317 @@ -0,0 +1 @@ +sU \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 new file mode 100644 index 000000000..feffb7061 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 @@ -0,0 +1 @@ +"VH2&H\\\/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-319 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-319 new file mode 100644 index 000000000..8223f1e51 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-319 @@ -0,0 +1 @@ +_C \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-32 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-32 new file mode 100644 index 000000000..7e85c4bdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-32 @@ -0,0 +1 @@ +foo[?first == last].first \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-320 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-320 new file mode 100644 index 000000000..c9cdc63b0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-320 @@ -0,0 +1 @@ +_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-321 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-321 new file mode 100644 index 000000000..c82f7982e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-321 @@ -0,0 +1 @@ +"<\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-322 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-322 new file mode 100644 index 000000000..dae65c515 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-322 @@ -0,0 +1 @@ +"\uD834\uDD1E" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-323 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-323 new file mode 100644 index 000000000..b6b369543 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-323 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-324 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-324 new file mode 100644 index 000000000..bf06e678c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-324 @@ -0,0 +1 @@ +foo.bar[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-325 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-325 new file mode 100644 index 000000000..5d48e0205 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-325 @@ -0,0 +1 @@ +foo.bar[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-326 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-326 new file mode 100644 index 000000000..de3af7230 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-326 @@ -0,0 +1 @@ +foo.bar[3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-327 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-327 new file mode 100644 index 000000000..a1c333508 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-327 @@ -0,0 +1 @@ +foo.bar[-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-328 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-328 new file mode 100644 index 000000000..ad0fef91c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-328 @@ -0,0 +1 @@ +foo.bar[-2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-329 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-329 new file mode 100644 index 000000000..3e83c6f73 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-329 @@ -0,0 +1 @@ +foo.bar[-3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-33 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-33 new file mode 100644 index 000000000..72fc0a53e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-33 @@ -0,0 +1 @@ +foo[?age > `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-330 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-330 new file mode 100644 index 000000000..433a737d6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-330 @@ -0,0 +1 @@ +foo.bar[-4] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-331 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-331 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-331 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-332 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-332 new file mode 100644 index 000000000..5e0d9b717 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-332 @@ -0,0 +1 @@ +foo[0].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-333 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-333 new file mode 100644 index 000000000..3cd7e9460 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-333 @@ -0,0 +1 @@ +foo[1].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-334 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-334 new file mode 100644 index 000000000..74cb17655 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-334 @@ -0,0 +1 @@ +foo[2].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-335 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-335 new file mode 100644 index 000000000..3cf2007f7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-335 @@ -0,0 +1 @@ +foo[3].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-336 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-336 new file mode 100644 index 000000000..9674d8803 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-336 @@ -0,0 +1 @@ +foo[3].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-337 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-337 new file mode 100644 index 000000000..9b0b2f818 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-337 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-338 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-338 new file mode 100644 index 000000000..83c639a18 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-338 @@ -0,0 +1 @@ +foo[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-339 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-339 new file mode 100644 index 000000000..3b76c9f64 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-339 @@ -0,0 +1 @@ +foo[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-34 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-34 new file mode 100644 index 000000000..9a2b0184e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-34 @@ -0,0 +1 @@ +foo[?age >= `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-340 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-340 new file mode 100644 index 000000000..ff99e045d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-340 @@ -0,0 +1 @@ +foo[3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-341 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-341 new file mode 100644 index 000000000..040ecb240 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-341 @@ -0,0 +1 @@ +foo[4] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-342 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-342 new file mode 100644 index 000000000..6e7ea636e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-342 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-343 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-343 new file mode 100644 index 000000000..bace2a0be --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-343 @@ -0,0 +1 @@ +[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-344 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-344 new file mode 100644 index 000000000..5d50c80c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-344 @@ -0,0 +1 @@ +[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-345 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-345 new file mode 100644 index 000000000..99d21a2a0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-345 @@ -0,0 +1 @@ +[-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-346 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-346 new file mode 100644 index 000000000..133a9c627 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-346 @@ -0,0 +1 @@ +[-2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-347 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-347 new file mode 100644 index 000000000..b7f78c5dc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-347 @@ -0,0 +1 @@ +[-3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-348 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-348 new file mode 100644 index 000000000..bd9de815f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-348 @@ -0,0 +1 @@ +reservations[].instances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-349 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-349 new file mode 100644 index 000000000..55e625735 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-349 @@ -0,0 +1 @@ +reservations[].instances[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-35 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-35 new file mode 100644 index 000000000..fa83f1da3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-35 @@ -0,0 +1 @@ +foo[?age > `30`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-350 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-350 new file mode 100644 index 000000000..1661747c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-350 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-351 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-351 new file mode 100644 index 000000000..1661747c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-351 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-352 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-352 new file mode 100644 index 000000000..3debc70f8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-352 @@ -0,0 +1 @@ +reservations[].instances[].foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-353 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-353 new file mode 100644 index 000000000..75af2fda0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-353 @@ -0,0 +1 @@ +reservations[].instances[].foo[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-354 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-354 new file mode 100644 index 000000000..4a70cd8a0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-354 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-355 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-355 new file mode 100644 index 000000000..987985b00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-355 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-356 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-356 new file mode 100644 index 000000000..1661747c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-356 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-357 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-357 new file mode 100644 index 000000000..634f937e5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-357 @@ -0,0 +1 @@ +reservations[].instances[].foo[].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-358 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-358 new file mode 100644 index 000000000..09cb7b8bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-358 @@ -0,0 +1 @@ +reservations[].instances[].bar[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-359 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-359 new file mode 100644 index 000000000..f5d9ac5b7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-359 @@ -0,0 +1 @@ +reservations[].instances[].baz[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-36 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-36 new file mode 100644 index 000000000..463a2a542 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-36 @@ -0,0 +1 @@ +foo[?age < `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-360 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-360 new file mode 100644 index 000000000..d1016d6e7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-360 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-361 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-361 new file mode 100644 index 000000000..ef54cf52d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-361 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-362 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-362 new file mode 100644 index 000000000..bea506ff2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-362 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-363 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-363 new file mode 100644 index 000000000..20dd081e0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-363 @@ -0,0 +1 @@ +foo[][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-364 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-364 new file mode 100644 index 000000000..4803734b0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-364 @@ -0,0 +1 @@ +foo[][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-365 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-365 new file mode 100644 index 000000000..1be565985 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-365 @@ -0,0 +1 @@ +foo[][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-366 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-366 new file mode 100644 index 000000000..d2cf6da59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-366 @@ -0,0 +1 @@ +foo[][2][2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-367 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-367 new file mode 100644 index 000000000..c609ca64b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-367 @@ -0,0 +1 @@ +foo[][0][0][100] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-368 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-368 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-368 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-369 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-369 new file mode 100644 index 000000000..bea506ff2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-369 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-37 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-37 new file mode 100644 index 000000000..10ed5d3f6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-37 @@ -0,0 +1 @@ +foo[?age <= `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-370 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-370 new file mode 100644 index 000000000..13f2c4a0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-370 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-371 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-371 new file mode 100644 index 000000000..edf3d9277 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-371 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-372 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-372 new file mode 100644 index 000000000..2a3b993af --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-372 @@ -0,0 +1 @@ +foo[].bar[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-373 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-373 new file mode 100644 index 000000000..d5ca878a1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-373 @@ -0,0 +1 @@ +string[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-374 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-374 new file mode 100644 index 000000000..fcd255f5d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-374 @@ -0,0 +1 @@ +hash[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-375 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-375 new file mode 100644 index 000000000..2d53bd7cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-375 @@ -0,0 +1 @@ +number[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-376 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-376 new file mode 100644 index 000000000..cb10d2497 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-376 @@ -0,0 +1 @@ +nullvalue[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-377 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-377 new file mode 100644 index 000000000..f6c79ca84 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-377 @@ -0,0 +1 @@ +string[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-378 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-378 new file mode 100644 index 000000000..09bf36e8a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-378 @@ -0,0 +1 @@ +hash[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-379 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-379 new file mode 100644 index 000000000..4c3578189 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-379 @@ -0,0 +1 @@ +number[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-38 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-38 new file mode 100644 index 000000000..16a4c36ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-38 @@ -0,0 +1 @@ +foo[?age < `20`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-380 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-380 new file mode 100644 index 000000000..2dd8ae218 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-380 @@ -0,0 +1 @@ +nullvalue[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 new file mode 100644 index 000000000..dfed81603 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 @@ -0,0 +1 @@ +nullvalue[].foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-382 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-382 new file mode 100644 index 000000000..d7628e646 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-382 @@ -0,0 +1 @@ +`"foo"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-383 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-383 new file mode 100644 index 000000000..49c5269b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-383 @@ -0,0 +1 @@ +`"\u03a6"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-384 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-384 new file mode 100644 index 000000000..d5db721d0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-384 @@ -0,0 +1 @@ +`"✓"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-385 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-385 new file mode 100644 index 000000000..a2b6e4ec8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-385 @@ -0,0 +1 @@ +`[1, 2, 3]` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-386 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-386 new file mode 100644 index 000000000..f5801bdd6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-386 @@ -0,0 +1 @@ +`{"a": "b"}` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-387 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-387 new file mode 100644 index 000000000..f87db59a8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-387 @@ -0,0 +1 @@ +`true` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-388 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-388 new file mode 100644 index 000000000..3b20d905f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-388 @@ -0,0 +1 @@ +`false` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-389 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-389 new file mode 100644 index 000000000..70bcd29a7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-389 @@ -0,0 +1 @@ +`null` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-39 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-39 new file mode 100644 index 000000000..351054d3e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-39 @@ -0,0 +1 @@ +foo[?age == `20`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-390 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-390 new file mode 100644 index 000000000..0918d4155 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-390 @@ -0,0 +1 @@ +`0` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-391 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-391 new file mode 100644 index 000000000..ef70c4c11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-391 @@ -0,0 +1 @@ +`1` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-392 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-392 new file mode 100644 index 000000000..b39a922f4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-392 @@ -0,0 +1 @@ +`2` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-393 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-393 new file mode 100644 index 000000000..7e65687db --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-393 @@ -0,0 +1 @@ +`3` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-394 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-394 new file mode 100644 index 000000000..770d1ece7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-394 @@ -0,0 +1 @@ +`4` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-395 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-395 new file mode 100644 index 000000000..a8b81985c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-395 @@ -0,0 +1 @@ +`5` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-396 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-396 new file mode 100644 index 000000000..7f0861065 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-396 @@ -0,0 +1 @@ +`6` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-397 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-397 new file mode 100644 index 000000000..495114d91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-397 @@ -0,0 +1 @@ +`7` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-398 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-398 new file mode 100644 index 000000000..94f355c46 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-398 @@ -0,0 +1 @@ +`8` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-399 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-399 new file mode 100644 index 000000000..600d2aa3f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-399 @@ -0,0 +1 @@ +`9` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-4 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-4 new file mode 100644 index 000000000..314852235 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-4 @@ -0,0 +1 @@ +foo.bar.baz.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-40 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-40 new file mode 100644 index 000000000..99d9258a6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-40 @@ -0,0 +1 @@ +foo[?age != `20`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-400 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-400 new file mode 100644 index 000000000..637015b5f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-400 @@ -0,0 +1 @@ +`"foo\`bar"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-401 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-401 new file mode 100644 index 000000000..6fa7557b8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-401 @@ -0,0 +1 @@ +`"foo\"bar"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-402 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-402 new file mode 100644 index 000000000..5aabeec34 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-402 @@ -0,0 +1 @@ +`"1\`"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-403 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-403 new file mode 100644 index 000000000..8302ea198 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-403 @@ -0,0 +1 @@ +`"\\"`.{a:`"b"`} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-404 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-404 new file mode 100644 index 000000000..d88d014a9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-404 @@ -0,0 +1 @@ +`{"a": "b"}`.a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-405 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-405 new file mode 100644 index 000000000..47152dddb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-405 @@ -0,0 +1 @@ +`{"a": {"b": "c"}}`.a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406 new file mode 100644 index 000000000..895d42938 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406 @@ -0,0 +1 @@ +`[0, 1, 2]`[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-407 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-407 new file mode 100644 index 000000000..42500a368 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-407 @@ -0,0 +1 @@ +` {"foo": true}` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-408 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-408 new file mode 100644 index 000000000..08b944dad --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-408 @@ -0,0 +1 @@ +`{"foo": true} ` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-409 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-409 new file mode 100644 index 000000000..6de163f80 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-409 @@ -0,0 +1 @@ +'foo' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-41 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-41 new file mode 100644 index 000000000..5bc357d9f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-41 @@ -0,0 +1 @@ +foo[?top.name == 'a'] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-410 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-410 new file mode 100644 index 000000000..b84bbdb29 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-410 @@ -0,0 +1 @@ +' foo ' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-411 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-411 new file mode 100644 index 000000000..bf6a07ace --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-411 @@ -0,0 +1 @@ +'0' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-412 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-412 new file mode 100644 index 000000000..c742f5b0c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-412 @@ -0,0 +1,2 @@ +'newline +' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-413 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-413 new file mode 100644 index 000000000..04e9b3ade --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-413 @@ -0,0 +1,2 @@ +' +' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-414 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-414 new file mode 100644 index 000000000..ebdaf120d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-414 @@ -0,0 +1 @@ +'✓' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-415 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-415 new file mode 100644 index 000000000..d0ba5d7fa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-415 @@ -0,0 +1 @@ +'ð„ž' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-416 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-416 new file mode 100644 index 000000000..19c2e2ef4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-416 @@ -0,0 +1 @@ +' [foo] ' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-417 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-417 new file mode 100644 index 000000000..5faa483b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-417 @@ -0,0 +1 @@ +'[foo]' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-418 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-418 new file mode 100644 index 000000000..e3c05c163 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-418 @@ -0,0 +1 @@ +'\u03a6' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-419 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-419 new file mode 100644 index 000000000..7c13861ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-419 @@ -0,0 +1 @@ +foo.{bar: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-42 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-42 new file mode 100644 index 000000000..d037a0a4d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-42 @@ -0,0 +1 @@ +foo[?top.first == top.last] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-420 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-420 new file mode 100644 index 000000000..f795c2552 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-420 @@ -0,0 +1 @@ +foo.{"bar": bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-421 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-421 new file mode 100644 index 000000000..772c45639 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-421 @@ -0,0 +1 @@ +foo.{"foo.bar": bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-422 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-422 new file mode 100644 index 000000000..8808e92bf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-422 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-423 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-423 new file mode 100644 index 000000000..3f13757a1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-423 @@ -0,0 +1 @@ +foo.{"bar": bar, "baz": baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-424 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-424 new file mode 100644 index 000000000..23cd8903e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-424 @@ -0,0 +1 @@ +{"baz": baz, "qux\"": "qux\""} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-425 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-425 new file mode 100644 index 000000000..fabb6da4f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-425 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-426 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-426 new file mode 100644 index 000000000..4c3f615b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-426 @@ -0,0 +1 @@ +foo.{bar: bar,qux: qux} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-427 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-427 new file mode 100644 index 000000000..8bc46535a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-427 @@ -0,0 +1 @@ +foo.{bar: bar, noexist: noexist} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-428 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-428 new file mode 100644 index 000000000..2024b6f11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-428 @@ -0,0 +1 @@ +foo.{noexist: noexist, alsonoexist: alsonoexist} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-429 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-429 new file mode 100644 index 000000000..b52191d10 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-429 @@ -0,0 +1 @@ +foo.badkey.{nokey: nokey, alsonokey: alsonokey} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-43 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-43 new file mode 100644 index 000000000..8534a5cae --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-43 @@ -0,0 +1 @@ +foo[?top == `{"first": "foo", "last": "bar"}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-430 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-430 new file mode 100644 index 000000000..5cd310b6d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-430 @@ -0,0 +1 @@ +foo.nested.*.{a: a,b: b} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-431 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-431 new file mode 100644 index 000000000..0b24ef535 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-431 @@ -0,0 +1 @@ +foo.nested.three.{a: a, cinner: c.inner} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-432 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-432 new file mode 100644 index 000000000..473c1c351 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-432 @@ -0,0 +1 @@ +foo.nested.three.{a: a, c: c.inner.bad.key} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-433 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-433 new file mode 100644 index 000000000..44ba735ab --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-433 @@ -0,0 +1 @@ +foo.{a: nested.one.a, b: nested.two.b} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-434 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-434 new file mode 100644 index 000000000..f5f89b12b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-434 @@ -0,0 +1 @@ +{bar: bar, baz: baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-435 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-435 new file mode 100644 index 000000000..697764cb3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-435 @@ -0,0 +1 @@ +{bar: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-436 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-436 new file mode 100644 index 000000000..20447fb10 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-436 @@ -0,0 +1 @@ +{otherkey: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-437 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-437 new file mode 100644 index 000000000..310b9b1dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-437 @@ -0,0 +1 @@ +{no: no, exist: exist} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-438 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-438 new file mode 100644 index 000000000..c79b2e240 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-438 @@ -0,0 +1 @@ +foo.[bar] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-439 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-439 new file mode 100644 index 000000000..ab498ef65 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-439 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-44 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-44 new file mode 100644 index 000000000..71307c409 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-44 @@ -0,0 +1 @@ +foo[?key == `true`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-440 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-440 new file mode 100644 index 000000000..4b8f39a46 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-440 @@ -0,0 +1 @@ +foo.[bar,qux] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-441 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-441 new file mode 100644 index 000000000..b8f9020f8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-441 @@ -0,0 +1 @@ +foo.[bar,noexist] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-442 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-442 new file mode 100644 index 000000000..b7c7b3f65 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-442 @@ -0,0 +1 @@ +foo.[noexist,alsonoexist] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-443 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-443 new file mode 100644 index 000000000..fabb6da4f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-443 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-444 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-444 new file mode 100644 index 000000000..c15c39f82 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-444 @@ -0,0 +1 @@ +foo.[bar,baz[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-445 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-445 new file mode 100644 index 000000000..9cebd8984 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-445 @@ -0,0 +1 @@ +foo.[bar,baz[1]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-446 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-446 new file mode 100644 index 000000000..c5bbfbf84 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-446 @@ -0,0 +1 @@ +foo.[bar,baz[2]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-447 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-447 new file mode 100644 index 000000000..d81cb2b90 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-447 @@ -0,0 +1 @@ +foo.[bar,baz[3]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-448 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-448 new file mode 100644 index 000000000..3a65aa7d6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-448 @@ -0,0 +1 @@ +foo.[bar[0],baz[3]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-449 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-449 new file mode 100644 index 000000000..8808e92bf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-449 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-45 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-45 new file mode 100644 index 000000000..e142b22a2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-45 @@ -0,0 +1 @@ +foo[?key == `false`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-450 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-450 new file mode 100644 index 000000000..ab498ef65 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-450 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-451 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-451 new file mode 100644 index 000000000..8e3d22dc5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-451 @@ -0,0 +1 @@ +foo.{bar: bar.baz[1],includeme: includeme} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-452 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-452 new file mode 100644 index 000000000..398c7f8b0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-452 @@ -0,0 +1 @@ +foo.{"bar.baz.two": bar.baz[1].two, includeme: includeme} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-453 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-453 new file mode 100644 index 000000000..a17644487 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-453 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].common] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-454 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-454 new file mode 100644 index 000000000..da5225ddc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-454 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].none] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-455 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-455 new file mode 100644 index 000000000..a8870b22b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-455 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[].common] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-456 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-456 new file mode 100644 index 000000000..420b1a57c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-456 @@ -0,0 +1 @@ +reservations[*].instances[*].{id: id, name: name} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-457 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-457 new file mode 100644 index 000000000..0761ee16d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-457 @@ -0,0 +1 @@ +reservations[].instances[].{id: id, name: name} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-458 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-458 new file mode 100644 index 000000000..aa1191a48 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-458 @@ -0,0 +1 @@ +reservations[].instances[].[id, name] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-459 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-459 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-459 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-46 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-46 new file mode 100644 index 000000000..9a24a464e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-46 @@ -0,0 +1 @@ +foo[?key == `0`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-460 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-460 new file mode 100644 index 000000000..bea506ff2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-460 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-461 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-461 new file mode 100644 index 000000000..13f2c4a0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-461 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-462 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-462 new file mode 100644 index 000000000..edf3d9277 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-462 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-463 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-463 new file mode 100644 index 000000000..d965466e9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-463 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-464 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-464 new file mode 100644 index 000000000..f1822a174 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-464 @@ -0,0 +1 @@ +foo[].bar[].[baz] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-465 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-465 new file mode 100644 index 000000000..c6f77b80c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-465 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux][] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-466 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-466 new file mode 100644 index 000000000..db56262a4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-466 @@ -0,0 +1 @@ +foo.[baz[*].bar, qux[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-467 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-467 new file mode 100644 index 000000000..b901067d2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-467 @@ -0,0 +1 @@ +foo.[baz[*].[bar, boo], qux[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-468 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-468 new file mode 100644 index 000000000..738479fa6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-468 @@ -0,0 +1 @@ +foo.[baz[*].not_there || baz[*].bar, qux[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-469 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-469 new file mode 100644 index 000000000..6926996a7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-469 @@ -0,0 +1 @@ +[[*],*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-47 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-47 new file mode 100644 index 000000000..6d33cc72c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-47 @@ -0,0 +1 @@ +foo[?key == `1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-470 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-470 new file mode 100644 index 000000000..736be0a31 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-470 @@ -0,0 +1 @@ +[[*]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-471 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-471 new file mode 100644 index 000000000..29e1fb20a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-471 @@ -0,0 +1 @@ +outer.foo || outer.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-472 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-472 new file mode 100644 index 000000000..c0070ba78 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-472 @@ -0,0 +1 @@ +outer.foo||outer.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-473 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-473 new file mode 100644 index 000000000..661b0bec5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-473 @@ -0,0 +1 @@ +outer.bar || outer.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-474 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-474 new file mode 100644 index 000000000..296d5aeee --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-474 @@ -0,0 +1 @@ +outer.bar||outer.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-475 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-475 new file mode 100644 index 000000000..ca140f8aa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-475 @@ -0,0 +1 @@ +outer.bad || outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-476 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-476 new file mode 100644 index 000000000..15d309242 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-476 @@ -0,0 +1 @@ +outer.bad||outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-477 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-477 new file mode 100644 index 000000000..56148d957 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-477 @@ -0,0 +1 @@ +outer.foo || outer.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-478 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-478 new file mode 100644 index 000000000..6d3cf6d90 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-478 @@ -0,0 +1 @@ +outer.foo||outer.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-479 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-479 new file mode 100644 index 000000000..100fa8339 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-479 @@ -0,0 +1 @@ +outer.bad || outer.alsobad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-48 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-48 new file mode 100644 index 000000000..de56fc042 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-48 @@ -0,0 +1 @@ +foo[?key == `[0]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-480 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-480 new file mode 100644 index 000000000..64490352b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-480 @@ -0,0 +1 @@ +outer.bad||outer.alsobad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-481 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-481 new file mode 100644 index 000000000..af901bde1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-481 @@ -0,0 +1 @@ +outer.empty_string || outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-482 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-482 new file mode 100644 index 000000000..36b63e462 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-482 @@ -0,0 +1 @@ +outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-483 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-483 new file mode 100644 index 000000000..aba584f99 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-483 @@ -0,0 +1 @@ +foo.*.baz | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-484 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-484 new file mode 100644 index 000000000..4234ac019 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-484 @@ -0,0 +1 @@ +foo.*.baz | [1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-485 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-485 new file mode 100644 index 000000000..12330d990 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-485 @@ -0,0 +1 @@ +foo.*.baz | [2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-486 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-486 new file mode 100644 index 000000000..1b2d93e19 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-486 @@ -0,0 +1 @@ +foo.bar.* | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-487 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-487 new file mode 100644 index 000000000..c371fc645 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-487 @@ -0,0 +1 @@ +foo.*.notbaz | [*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-488 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-488 new file mode 100644 index 000000000..3c835642e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-488 @@ -0,0 +1 @@ +foo | bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-489 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-489 new file mode 100644 index 000000000..decaa0421 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-489 @@ -0,0 +1 @@ +foo | bar | baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-49 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-49 new file mode 100644 index 000000000..49d9c63a3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-49 @@ -0,0 +1 @@ +foo[?key == `{"bar": [0]}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-490 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-490 new file mode 100644 index 000000000..b91068037 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-490 @@ -0,0 +1 @@ +foo|bar| baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-491 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-491 new file mode 100644 index 000000000..11df74d8b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-491 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-492 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-492 new file mode 100644 index 000000000..11df74d8b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-492 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-493 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-493 new file mode 100644 index 000000000..37da9fc0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-493 @@ -0,0 +1 @@ +[foo.bar, foo.other] | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-494 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-494 new file mode 100644 index 000000000..1f4fc943d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-494 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-495 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-495 new file mode 100644 index 000000000..67c7ea9cf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-495 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-496 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-496 new file mode 100644 index 000000000..d87f9bba4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-496 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | *.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-497 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-497 new file mode 100644 index 000000000..ebf8e2711 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-497 @@ -0,0 +1 @@ +foo.bam || foo.bar | baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-498 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-498 new file mode 100644 index 000000000..f32bc6db5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-498 @@ -0,0 +1 @@ +foo | not_there || bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-499 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-499 new file mode 100644 index 000000000..d04459d90 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-499 @@ -0,0 +1 @@ +foo[*].bar[*] | [0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-5 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-5 new file mode 100644 index 000000000..b537264a1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-5 @@ -0,0 +1 @@ +foo.bar.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-50 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-50 new file mode 100644 index 000000000..c17c1df17 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-50 @@ -0,0 +1 @@ +foo[?key == `null`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-500 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-500 new file mode 100644 index 000000000..3eb869f43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-500 @@ -0,0 +1 @@ +bar[0:10] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-501 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-501 new file mode 100644 index 000000000..aa5d6be52 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-501 @@ -0,0 +1 @@ +foo[0:10:1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-502 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-502 new file mode 100644 index 000000000..1a4d1682d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-502 @@ -0,0 +1 @@ +foo[0:10] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-503 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-503 new file mode 100644 index 000000000..5925a578b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-503 @@ -0,0 +1 @@ +foo[0:10:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-504 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-504 new file mode 100644 index 000000000..081e93abd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-504 @@ -0,0 +1 @@ +foo[0::1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-505 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-505 new file mode 100644 index 000000000..922700149 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-505 @@ -0,0 +1 @@ +foo[0::] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-506 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-506 new file mode 100644 index 000000000..fd2294d66 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-506 @@ -0,0 +1 @@ +foo[0:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-507 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-507 new file mode 100644 index 000000000..c6b551d5e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-507 @@ -0,0 +1 @@ +foo[:10:1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-508 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-508 new file mode 100644 index 000000000..503f58da6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-508 @@ -0,0 +1 @@ +foo[::1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-509 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-509 new file mode 100644 index 000000000..f78bb770c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-509 @@ -0,0 +1 @@ +foo[:10:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-51 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-51 new file mode 100644 index 000000000..589a214f4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-51 @@ -0,0 +1 @@ +foo[?key == `[1]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-510 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-510 new file mode 100644 index 000000000..eb9d2ba88 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-510 @@ -0,0 +1 @@ +foo[::] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-511 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-511 new file mode 100644 index 000000000..1921a3d98 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-511 @@ -0,0 +1 @@ +foo[:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-512 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-512 new file mode 100644 index 000000000..a87afcb1b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-512 @@ -0,0 +1 @@ +foo[1:9] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-513 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-513 new file mode 100644 index 000000000..dbf51d8cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-513 @@ -0,0 +1 @@ +foo[0:10:2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-514 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-514 new file mode 100644 index 000000000..f7288763a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-514 @@ -0,0 +1 @@ +foo[5:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-515 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-515 new file mode 100644 index 000000000..64395761d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-515 @@ -0,0 +1 @@ +foo[5::2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-516 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-516 new file mode 100644 index 000000000..706bb14dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-516 @@ -0,0 +1 @@ +foo[::2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-517 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-517 new file mode 100644 index 000000000..8fcfaee95 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-517 @@ -0,0 +1 @@ +foo[::-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-518 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-518 new file mode 100644 index 000000000..f6a00bf9b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-518 @@ -0,0 +1 @@ +foo[1::2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-519 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-519 new file mode 100644 index 000000000..ea068ee06 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-519 @@ -0,0 +1 @@ +foo[10:0:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-52 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-52 new file mode 100644 index 000000000..214917ac0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-52 @@ -0,0 +1 @@ +foo[?key == `{"a":2}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-520 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-520 new file mode 100644 index 000000000..1fe14258e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-520 @@ -0,0 +1 @@ +foo[10:5:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-521 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-521 new file mode 100644 index 000000000..4ba0e1302 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-521 @@ -0,0 +1 @@ +foo[8:2:-2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-522 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-522 new file mode 100644 index 000000000..25db439ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-522 @@ -0,0 +1 @@ +foo[0:20] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-523 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-523 new file mode 100644 index 000000000..8a965920a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-523 @@ -0,0 +1 @@ +foo[10:-20:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-524 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-524 new file mode 100644 index 000000000..b1e5ba373 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-524 @@ -0,0 +1 @@ +foo[10:-20] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-525 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-525 new file mode 100644 index 000000000..06253112e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-525 @@ -0,0 +1 @@ +foo[-4:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-526 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-526 new file mode 100644 index 000000000..1e14a6a4c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-526 @@ -0,0 +1 @@ +foo[:-5:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-527 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-527 new file mode 100644 index 000000000..aef5c2747 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-527 @@ -0,0 +1 @@ +foo[:2].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-528 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-528 new file mode 100644 index 000000000..93c95fcf6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-528 @@ -0,0 +1 @@ +foo[:2].b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-529 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-529 new file mode 100644 index 000000000..7e0733e59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-529 @@ -0,0 +1 @@ +foo[:2].a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-53 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-53 new file mode 100644 index 000000000..4c002ed80 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-53 @@ -0,0 +1 @@ +foo[?`true` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-530 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-530 new file mode 100644 index 000000000..2438b2576 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-530 @@ -0,0 +1 @@ +bar[::-1].a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-531 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-531 new file mode 100644 index 000000000..549994b6b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-531 @@ -0,0 +1 @@ +bar[:2].a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-532 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-532 new file mode 100644 index 000000000..ab98292b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-532 @@ -0,0 +1 @@ +baz[:2].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-533 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-533 new file mode 100644 index 000000000..65fca9687 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-533 @@ -0,0 +1 @@ +[:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-534 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-534 new file mode 100644 index 000000000..18c5daf7b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-534 @@ -0,0 +1 @@ +[:2].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-535 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-535 new file mode 100644 index 000000000..1bb84f7d4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-535 @@ -0,0 +1 @@ +[::-1].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-536 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-536 new file mode 100644 index 000000000..7a0416f05 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-536 @@ -0,0 +1 @@ +[:2].b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-537 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-537 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-537 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-538 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-538 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-538 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-539 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-539 new file mode 100644 index 000000000..f59ec20aa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-539 @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-54 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-54 new file mode 100644 index 000000000..23d27073e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-54 @@ -0,0 +1 @@ +foo[?`false` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-540 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-540 new file mode 100644 index 000000000..dee569574 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-540 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-541 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-541 new file mode 100644 index 000000000..1a16f7418 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-541 @@ -0,0 +1 @@ +*.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-542 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-542 new file mode 100644 index 000000000..7e8066d39 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-542 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-543 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-543 new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-543 @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-544 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-544 new file mode 100644 index 000000000..6e7ea636e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-544 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-545 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-545 new file mode 100644 index 000000000..5a5194647 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-545 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-546 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-546 new file mode 100644 index 000000000..416127425 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-546 @@ -0,0 +1 @@ +*.["0"] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-547 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-547 new file mode 100644 index 000000000..cd9fb6ba7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-547 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-548 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-548 new file mode 100644 index 000000000..9f3ada480 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-548 @@ -0,0 +1 @@ +[*][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-549 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-549 new file mode 100644 index 000000000..9b0b2f818 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-549 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-55 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-55 new file mode 100644 index 000000000..6d840ee56 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-55 @@ -0,0 +1 @@ +foo[?`0` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-550 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-550 new file mode 100644 index 000000000..b23413b92 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-550 @@ -0,0 +1 @@ +foo.[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-551 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-551 new file mode 100644 index 000000000..08ab2e1c4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-551 @@ -0,0 +1 @@ +foo.[abc] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-552 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-552 new file mode 100644 index 000000000..78b05a5c6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-552 @@ -0,0 +1 @@ +foo.[abc, def] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-553 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-553 new file mode 100644 index 000000000..1e7b886e7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-553 @@ -0,0 +1 @@ +a.{foo: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-554 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-554 new file mode 100644 index 000000000..91b4c9896 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-554 @@ -0,0 +1 @@ +a.{foo: bar, baz: bam} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-555 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-555 new file mode 100644 index 000000000..8301ef981 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-555 @@ -0,0 +1 @@ +{"\\":{" ":*}} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-556 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-556 new file mode 100644 index 000000000..8f75cc913 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-556 @@ -0,0 +1 @@ +foo || bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-557 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-557 new file mode 100644 index 000000000..e5f122c56 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-557 @@ -0,0 +1 @@ +foo.[a || b] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-558 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-558 new file mode 100644 index 000000000..39d191432 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-558 @@ -0,0 +1 @@ +foo[?bar==`"baz"`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-559 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-559 new file mode 100644 index 000000000..d08bbe250 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-559 @@ -0,0 +1 @@ +foo[? bar == `"baz"` ] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-56 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-56 new file mode 100644 index 000000000..addaf204c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-56 @@ -0,0 +1 @@ +foo[?`1` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-560 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-560 new file mode 100644 index 000000000..a77f35581 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-560 @@ -0,0 +1 @@ +foo[?a.b.c==d.e.f] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-561 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-561 new file mode 100644 index 000000000..c9697aa48 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-561 @@ -0,0 +1 @@ +foo[?bar==`[0, 1, 2]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-562 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-562 new file mode 100644 index 000000000..fd7064a08 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-562 @@ -0,0 +1 @@ +foo[?bar==`["a", "b", "c"]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-563 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-563 new file mode 100644 index 000000000..61e5e1b8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-563 @@ -0,0 +1 @@ +foo[?bar==`["foo\`bar"]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-564 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-564 new file mode 100644 index 000000000..bc9d8af1d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-564 @@ -0,0 +1 @@ +[?"\\">`"foo"`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-565 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-565 new file mode 100644 index 000000000..2dd54dc39 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-565 @@ -0,0 +1 @@ +[?"\\" > `"foo"`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-566 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-566 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-566 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-567 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-567 new file mode 100644 index 000000000..7e9668e78 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-567 @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-568 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-568 new file mode 100644 index 000000000..d58ac16bf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-568 @@ -0,0 +1 @@ +"\\" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-569 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-569 new file mode 100644 index 000000000..33ac9fba6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-569 @@ -0,0 +1 @@ +*||*|*|* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-57 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-57 new file mode 100644 index 000000000..acf2435c7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-57 @@ -0,0 +1 @@ +foo[?`[0]` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-570 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-570 new file mode 100644 index 000000000..99e19638c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-570 @@ -0,0 +1 @@ +*[]||[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-571 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-571 new file mode 100644 index 000000000..be0845011 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-571 @@ -0,0 +1 @@ +[*.*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-572 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-572 new file mode 100644 index 000000000..a84b51e1c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-572 @@ -0,0 +1 @@ +foo[]."✓" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-573 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-573 new file mode 100644 index 000000000..c2de55815 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-573 @@ -0,0 +1 @@ +"☯" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-574 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-574 new file mode 100644 index 000000000..dc2dda0bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-574 @@ -0,0 +1 @@ +"♪♫•*¨*•.¸¸â¤Â¸Â¸.•*¨*•♫♪" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-575 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-575 new file mode 100644 index 000000000..a2d3d5f6a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-575 @@ -0,0 +1 @@ +"☃" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-576 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-576 new file mode 100644 index 000000000..0971c37ea --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-576 @@ -0,0 +1 @@ +foo.*.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-577 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-577 new file mode 100644 index 000000000..0e39dfd69 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-577 @@ -0,0 +1 @@ +foo.bar.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-578 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-578 new file mode 100644 index 000000000..89c1ce22d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-578 @@ -0,0 +1 @@ +foo.*.notbaz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-579 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-579 new file mode 100644 index 000000000..5199b9f95 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-579 @@ -0,0 +1 @@ +foo.*.notbaz[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-58 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-58 new file mode 100644 index 000000000..99fe382c6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-58 @@ -0,0 +1 @@ +foo[?`{"bar": [0]}` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-580 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-580 new file mode 100644 index 000000000..5bb6d4ae7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-580 @@ -0,0 +1 @@ +foo.*.notbaz[-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-581 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-581 new file mode 100644 index 000000000..edac73189 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-581 @@ -0,0 +1 @@ +foo.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-582 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-582 new file mode 100644 index 000000000..458d0a6dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-582 @@ -0,0 +1 @@ +foo.*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-583 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-583 new file mode 100644 index 000000000..f757fd534 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-583 @@ -0,0 +1 @@ +foo.*.*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-584 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-584 new file mode 100644 index 000000000..670049d96 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-584 @@ -0,0 +1 @@ +foo.*.*.*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-585 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-585 new file mode 100644 index 000000000..3c88caafe --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-585 @@ -0,0 +1 @@ +*.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-586 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-586 new file mode 100644 index 000000000..f59ec20aa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-586 @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-587 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-587 new file mode 100644 index 000000000..0852fcc78 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-587 @@ -0,0 +1 @@ +*.sub1 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-588 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-588 new file mode 100644 index 000000000..dee569574 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-588 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-589 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-589 new file mode 100644 index 000000000..66781bba4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-589 @@ -0,0 +1 @@ +*.*.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-59 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-59 new file mode 100644 index 000000000..4aad20ae6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-59 @@ -0,0 +1 @@ +foo[?`null` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-590 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-590 new file mode 100644 index 000000000..0db15d97e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-590 @@ -0,0 +1 @@ +*.sub1.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-591 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-591 new file mode 100644 index 000000000..b24be9d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-591 @@ -0,0 +1 @@ +foo[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-592 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-592 new file mode 100644 index 000000000..e6efe133f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-592 @@ -0,0 +1 @@ +foo[*].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-593 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-593 new file mode 100644 index 000000000..5a5194647 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-593 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-594 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-594 new file mode 100644 index 000000000..cd9fb6ba7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-594 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-595 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-595 new file mode 100644 index 000000000..cbf1a5d59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-595 @@ -0,0 +1 @@ +[*].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596 new file mode 100644 index 000000000..8bd13b7eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596 @@ -0,0 +1 @@ +foo.bar[*].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-597 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-597 new file mode 100644 index 000000000..7239f3e88 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-597 @@ -0,0 +1 @@ +foo.bar[*].baz[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-598 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-598 new file mode 100644 index 000000000..f5e431d9e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-598 @@ -0,0 +1 @@ +foo.bar[*].baz[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-599 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-599 new file mode 100644 index 000000000..d0c259539 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-599 @@ -0,0 +1 @@ +foo.bar[*].baz[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-6 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-6 new file mode 100644 index 000000000..b9749b748 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-6 @@ -0,0 +1 @@ +foo.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-60 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-60 new file mode 100644 index 000000000..dac67509b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-60 @@ -0,0 +1 @@ +foo[?`[1]` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-600 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-600 new file mode 100644 index 000000000..a6388271e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-600 @@ -0,0 +1 @@ +foo.bar[*].baz[3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-601 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-601 new file mode 100644 index 000000000..2a66ffe93 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-601 @@ -0,0 +1 @@ +foo.bar[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-602 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-602 new file mode 100644 index 000000000..b6b369543 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-602 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-603 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-603 new file mode 100644 index 000000000..7e57f9e74 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-603 @@ -0,0 +1 @@ +foo.bar[0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-604 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-604 new file mode 100644 index 000000000..c5f8bef0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-604 @@ -0,0 +1 @@ +foo.bar[0][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-605 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-605 new file mode 100644 index 000000000..3decf0803 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-605 @@ -0,0 +1 @@ +foo.bar[0][0][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-606 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-606 new file mode 100644 index 000000000..655e2959b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-606 @@ -0,0 +1 @@ +foo[0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-607 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-607 new file mode 100644 index 000000000..2aa159718 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-607 @@ -0,0 +1 @@ +foo[*].bar[*].kind \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-608 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-608 new file mode 100644 index 000000000..556b380ba --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-608 @@ -0,0 +1 @@ +foo[*].bar[0].kind \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-609 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-609 new file mode 100644 index 000000000..0de3229b8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-609 @@ -0,0 +1 @@ +foo[*].bar.kind \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-61 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-61 new file mode 100644 index 000000000..130ed3b37 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-61 @@ -0,0 +1 @@ +foo[?`{"a":2}` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-610 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-610 new file mode 100644 index 000000000..3b511f133 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-610 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-611 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-611 new file mode 100644 index 000000000..c8dfa16e6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-611 @@ -0,0 +1 @@ +foo[*].bar[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-612 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-612 new file mode 100644 index 000000000..69f04ee23 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-612 @@ -0,0 +1 @@ +foo[*].bar[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-613 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-613 new file mode 100644 index 000000000..3b511f133 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-613 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-614 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-614 new file mode 100644 index 000000000..03e0c0cb9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-614 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-615 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-615 new file mode 100644 index 000000000..ac1c89668 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-615 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-616 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-616 new file mode 100644 index 000000000..03e0c0cb9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-616 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-617 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-617 new file mode 100644 index 000000000..ac1c89668 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-617 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-618 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-618 new file mode 100644 index 000000000..6494cf1c6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-618 @@ -0,0 +1 @@ +foo[*][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-619 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-619 new file mode 100644 index 000000000..1406be572 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-619 @@ -0,0 +1 @@ +foo[*][1][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-62 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-62 new file mode 100644 index 000000000..3d15fcc16 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-62 @@ -0,0 +1 @@ +foo[?key != `true`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-620 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-620 new file mode 100644 index 000000000..72b5aa281 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-620 @@ -0,0 +1 @@ +foo[*][0][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-621 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-621 new file mode 100644 index 000000000..02a26491a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-621 @@ -0,0 +1 @@ +foo[*][1][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-622 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-622 new file mode 100644 index 000000000..cb08037e2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-622 @@ -0,0 +1 @@ +foo[*][2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-623 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-623 new file mode 100644 index 000000000..91d695995 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-623 @@ -0,0 +1 @@ +foo[*][2][2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-624 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-624 new file mode 100644 index 000000000..f40f261ad --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-624 @@ -0,0 +1 @@ +bar[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-625 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-625 new file mode 100644 index 000000000..03904b1de --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-625 @@ -0,0 +1 @@ +bar[*].baz[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-626 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-626 new file mode 100644 index 000000000..fd7c21c34 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-626 @@ -0,0 +1 @@ +string[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-627 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-627 new file mode 100644 index 000000000..d7ca4719a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-627 @@ -0,0 +1 @@ +hash[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-628 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-628 new file mode 100644 index 000000000..b3ddffe3c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-628 @@ -0,0 +1 @@ +number[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-629 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-629 new file mode 100644 index 000000000..c03cd39eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-629 @@ -0,0 +1 @@ +nullvalue[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-63 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-63 new file mode 100644 index 000000000..08731af69 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-63 @@ -0,0 +1 @@ +foo[?key != `false`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-630 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-630 new file mode 100644 index 000000000..b3c40cd53 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-630 @@ -0,0 +1 @@ +string[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-631 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-631 new file mode 100644 index 000000000..c5930d543 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-631 @@ -0,0 +1 @@ +hash[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-632 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-632 new file mode 100644 index 000000000..cc0b1a489 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-632 @@ -0,0 +1 @@ +number[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-633 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-633 new file mode 100644 index 000000000..d677b9658 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-633 @@ -0,0 +1 @@ +nullvalue[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-634 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-634 new file mode 100644 index 000000000..c11666401 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-634 @@ -0,0 +1 @@ +nullvalue[*].foo[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-635 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-635 new file mode 100644 index 000000000..e33997710 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-635 @@ -0,0 +1 @@ +string.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-636 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-636 new file mode 100644 index 000000000..76f53453a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-636 @@ -0,0 +1 @@ +hash.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-637 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-637 new file mode 100644 index 000000000..dd485072f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-637 @@ -0,0 +1 @@ +number.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-638 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-638 new file mode 100644 index 000000000..16000c003 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-638 @@ -0,0 +1 @@ +array.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-639 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-639 new file mode 100644 index 000000000..1d0d03ed3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-639 @@ -0,0 +1 @@ +nullvalue.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-64 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-64 new file mode 100644 index 000000000..b67aebe98 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-64 @@ -0,0 +1 @@ +foo[?key != `0`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-640 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-640 new file mode 100644 index 000000000..7e8066d39 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-640 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-641 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-641 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-641 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-642 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-642 new file mode 100644 index 000000000..fe0397993 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-642 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-643 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-643 new file mode 100644 index 000000000..1a27fd80c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-643 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-644 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-644 new file mode 100644 index 000000000..559a13456 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-644 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-645 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-645 new file mode 100644 index 000000000..e31621b43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-645 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-646 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-646 new file mode 100644 index 000000000..6bf7a1036 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-646 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-647 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-647 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-647 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-648 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-648 new file mode 100644 index 000000000..28b9bcbbb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-648 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-649 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-649 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-649 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-65 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-65 new file mode 100644 index 000000000..d3ac793bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-65 @@ -0,0 +1 @@ +foo[?key != `1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-650 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-650 new file mode 100644 index 000000000..fe0397993 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-650 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-651 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-651 new file mode 100644 index 000000000..1a27fd80c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-651 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-652 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-652 new file mode 100644 index 000000000..559a13456 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-652 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-653 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-653 new file mode 100644 index 000000000..e31621b43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-653 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-654 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-654 new file mode 100644 index 000000000..6bf7a1036 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-654 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-655 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-655 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-655 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-656 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-656 new file mode 100644 index 000000000..28b9bcbbb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-656 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-66 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-66 new file mode 100644 index 000000000..065295bc1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-66 @@ -0,0 +1 @@ +foo[?key != `null`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-67 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-67 new file mode 100644 index 000000000..43d164927 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-67 @@ -0,0 +1 @@ +foo[?key != `[1]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-68 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-68 new file mode 100644 index 000000000..6b884fa86 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-68 @@ -0,0 +1 @@ +foo[?key != `{"a":2}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-69 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-69 new file mode 100644 index 000000000..d85c779d0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-69 @@ -0,0 +1 @@ +foo[?`true` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-7 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-7 new file mode 100644 index 000000000..44d6628cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-7 @@ -0,0 +1 @@ +bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-70 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-70 new file mode 100644 index 000000000..3e6dcf304 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-70 @@ -0,0 +1 @@ +foo[?`false` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-71 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-71 new file mode 100644 index 000000000..bdb820b30 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-71 @@ -0,0 +1 @@ +foo[?`0` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-72 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-72 new file mode 100644 index 000000000..3f3048a00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-72 @@ -0,0 +1 @@ +foo[?`1` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-73 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-73 new file mode 100644 index 000000000..dacc25724 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-73 @@ -0,0 +1 @@ +foo[?`null` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-74 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-74 new file mode 100644 index 000000000..32ebae880 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-74 @@ -0,0 +1 @@ +foo[?`[1]` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-75 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-75 new file mode 100644 index 000000000..dcd023e0f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-75 @@ -0,0 +1 @@ +foo[?`{"a":2}` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-76 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-76 new file mode 100644 index 000000000..e08cc13cb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-76 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-77 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-77 new file mode 100644 index 000000000..1ec43f45f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-77 @@ -0,0 +1 @@ +reservations[*].instances[?bar==`1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-78 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-78 new file mode 100644 index 000000000..303871163 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-78 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`][] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-79 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-79 new file mode 100644 index 000000000..e3875746b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-79 @@ -0,0 +1 @@ +foo[?bar==`1`].bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-8 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-8 new file mode 100644 index 000000000..da7bc1ccf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-8 @@ -0,0 +1 @@ +bad.morebad.morebad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-80 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-80 new file mode 100644 index 000000000..5c3d68356 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-80 @@ -0,0 +1 @@ +foo[?a==`1`].b.c \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-81 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-81 new file mode 100644 index 000000000..6232808f0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-81 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-82 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-82 new file mode 100644 index 000000000..6232808f0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-82 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-83 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-83 new file mode 100644 index 000000000..29497f4ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-83 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-84 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-84 new file mode 100644 index 000000000..29497f4ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-84 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-85 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-85 new file mode 100644 index 000000000..346696563 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-85 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-86 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-86 new file mode 100644 index 000000000..346696563 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-86 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-87 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-87 new file mode 100644 index 000000000..c6268f847 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-87 @@ -0,0 +1 @@ +avg(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-88 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-88 new file mode 100644 index 000000000..7ce703695 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-88 @@ -0,0 +1 @@ +ceil(`1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-89 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-89 new file mode 100644 index 000000000..0561bc26d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-89 @@ -0,0 +1 @@ +ceil(decimals[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-9 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-9 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-9 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-90 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-90 new file mode 100644 index 000000000..c78c1fc30 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-90 @@ -0,0 +1 @@ +ceil(decimals[1]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-91 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-91 new file mode 100644 index 000000000..ebcb4bbdb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-91 @@ -0,0 +1 @@ +ceil(decimals[2]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-92 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-92 new file mode 100644 index 000000000..6edbf1afe --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-92 @@ -0,0 +1 @@ +contains('abc', 'a') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-93 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-93 new file mode 100644 index 000000000..d2b2f070d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-93 @@ -0,0 +1 @@ +contains('abc', 'd') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-94 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-94 new file mode 100644 index 000000000..3535da2ec --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-94 @@ -0,0 +1 @@ +contains(strings, 'a') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-95 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-95 new file mode 100644 index 000000000..ba839fe60 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-95 @@ -0,0 +1 @@ +contains(decimals, `1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-96 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-96 new file mode 100644 index 000000000..f43581869 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-96 @@ -0,0 +1 @@ +contains(decimals, `false`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-97 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-97 new file mode 100644 index 000000000..adb65fc01 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-97 @@ -0,0 +1 @@ +ends_with(str, 'r') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-98 b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-98 new file mode 100644 index 000000000..93d6901be --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/corpus/expr-98 @@ -0,0 +1 @@ +ends_with(str, 'tr') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go b/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go new file mode 100644 index 000000000..c7df08782 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go @@ -0,0 +1,13 @@ +package jmespath + +import "github.com/jmespath/go-jmespath" + +// Fuzz will fuzz test the JMESPath parser. +func Fuzz(data []byte) int { + p := jmespath.NewParser() + _, err := p.Parse(string(data)) + if err != nil { + return 1 + } + return 0 +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter_test.go b/vendor/github.com/jmespath/go-jmespath/interpreter_test.go new file mode 100644 index 000000000..5b529c4f3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter_test.go @@ -0,0 +1,213 @@ +package jmespath + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +type scalars struct { + Foo string + Bar string +} + +type sliceType struct { + A string + B []scalars + C []*scalars +} + +type benchmarkStruct struct { + Fooasdfasdfasdfasdf string +} + +type benchmarkNested struct { + Fooasdfasdfasdfasdf nestedA +} + +type nestedA struct { + Fooasdfasdfasdfasdf nestedB +} + +type nestedB struct { + Fooasdfasdfasdfasdf nestedC +} + +type nestedC struct { + Fooasdfasdfasdfasdf string +} + +type nestedSlice struct { + A []sliceType +} + +func TestCanSupportEmptyInterface(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + result, err := Search("foo", data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestCanSupportUserDefinedStructsValue(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportUserDefinedStructsRef(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceAll(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlicingExpression(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[:].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithFilterProjection(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[? `true` ].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlice(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithOrExpressions(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: nil} + result, err := Search("C || A", data) + assert.Nil(err) + assert.Equal("foo", result) +} + +func TestCanSupportStructWithSlicePointer(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: []*scalars{&scalars{"f1", "b1"}, &scalars{"correct", "b2"}}} + result, err := Search("C[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + // Note that there's a lower cased "foo" instead of "Foo", + // but it should still correspond to the Foo field in the + // scalars struct + result, err := Search("foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceLowerCased(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("b[-1].foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithNestedPointers(t *testing.T) { + assert := assert.New(t) + data := struct{ A *struct{ B int } }{} + result, err := Search("A.B", data) + assert.Nil(err) + assert.Nil(result) +} + +func TestCanSupportFlattenNestedSlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {B: []scalars{{Foo: "f1a"}, {Foo: "f1b"}}}, + {B: []scalars{{Foo: "f2a"}, {Foo: "f2b"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1a", "f1b", "f2a", "f2b"}, result) +} + +func TestCanSupportFlattenNestedEmptySlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {}, {B: []scalars{{Foo: "a"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"a"}, result) +} + +func TestCanSupportProjectionsWithStructs(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {A: "first"}, {A: "second"}, {A: "third"}, + }} + result, err := Search("A[*].A", data) + assert.Nil(err) + assert.Equal([]interface{}{"first", "second", "third"}, result) +} + +func BenchmarkInterpretSingleFieldStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf") + data := benchmarkStruct{"foobarbazqux"} + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + data := benchmarkNested{ + nestedA{ + nestedB{ + nestedC{"foobarbazqux"}, + }, + }, + } + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedMaps(b *testing.B) { + jsonData := []byte(`{"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": "foobarbazqux"}}}}`) + var data interface{} + json.Unmarshal(jsonData, &data) + + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + for i := 0; i < b.N; i++ { + intr.Execute(ast, data) + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer_test.go b/vendor/github.com/jmespath/go-jmespath/lexer_test.go new file mode 100644 index 000000000..7a9a9ee24 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer_test.go @@ -0,0 +1,161 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var lexingTests = []struct { + expression string + expected []token +}{ + {"*", []token{token{tStar, "*", 0, 1}}}, + {".", []token{token{tDot, ".", 0, 1}}}, + {"[?", []token{token{tFilter, "[?", 0, 2}}}, + {"[]", []token{token{tFlatten, "[]", 0, 2}}}, + {"(", []token{token{tLparen, "(", 0, 1}}}, + {")", []token{token{tRparen, ")", 0, 1}}}, + {"[", []token{token{tLbracket, "[", 0, 1}}}, + {"]", []token{token{tRbracket, "]", 0, 1}}}, + {"{", []token{token{tLbrace, "{", 0, 1}}}, + {"}", []token{token{tRbrace, "}", 0, 1}}}, + {"||", []token{token{tOr, "||", 0, 2}}}, + {"|", []token{token{tPipe, "|", 0, 1}}}, + {"29", []token{token{tNumber, "29", 0, 2}}}, + {"2", []token{token{tNumber, "2", 0, 1}}}, + {"0", []token{token{tNumber, "0", 0, 1}}}, + {"-20", []token{token{tNumber, "-20", 0, 3}}}, + {"foo", []token{token{tUnquotedIdentifier, "foo", 0, 3}}}, + {`"bar"`, []token{token{tQuotedIdentifier, "bar", 0, 3}}}, + // Escaping the delimiter + {`"bar\"baz"`, []token{token{tQuotedIdentifier, `bar"baz`, 0, 7}}}, + {",", []token{token{tComma, ",", 0, 1}}}, + {":", []token{token{tColon, ":", 0, 1}}}, + {"<", []token{token{tLT, "<", 0, 1}}}, + {"<=", []token{token{tLTE, "<=", 0, 2}}}, + {">", []token{token{tGT, ">", 0, 1}}}, + {">=", []token{token{tGTE, ">=", 0, 2}}}, + {"==", []token{token{tEQ, "==", 0, 2}}}, + {"!=", []token{token{tNE, "!=", 0, 2}}}, + {"`[0, 1, 2]`", []token{token{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, + {"'foo'", []token{token{tStringLiteral, "foo", 1, 3}}}, + {"'a'", []token{token{tStringLiteral, "a", 1, 1}}}, + {`'foo\'bar'`, []token{token{tStringLiteral, "foo'bar", 1, 7}}}, + {"@", []token{token{tCurrent, "@", 0, 1}}}, + {"&", []token{token{tExpref, "&", 0, 1}}}, + // Quoted identifier unicode escape sequences + {`"\u2713"`, []token{token{tQuotedIdentifier, "✓", 0, 3}}}, + {`"\\"`, []token{token{tQuotedIdentifier, `\`, 0, 1}}}, + {"`\"foo\"`", []token{token{tJSONLiteral, "\"foo\"", 1, 5}}}, + // Combinations of tokens. + {"foo.bar", []token{ + token{tUnquotedIdentifier, "foo", 0, 3}, + token{tDot, ".", 3, 1}, + token{tUnquotedIdentifier, "bar", 4, 3}, + }}, + {"foo[0]", []token{ + token{tUnquotedIdentifier, "foo", 0, 3}, + token{tLbracket, "[", 3, 1}, + token{tNumber, "0", 4, 1}, + token{tRbracket, "]", 5, 1}, + }}, + {"foo[?a 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser_test.go b/vendor/github.com/jmespath/go-jmespath/parser_test.go new file mode 100644 index 000000000..997a0f4d7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser_test.go @@ -0,0 +1,136 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var parsingErrorTests = []struct { + expression string + msg string +}{ + {"foo.", "Incopmlete expression"}, + {"[foo", "Incopmlete expression"}, + {"]", "Invalid"}, + {")", "Invalid"}, + {"}", "Invalid"}, + {"foo..bar", "Invalid"}, + {`foo."bar`, "Forwards lexer errors"}, + {`{foo: bar`, "Incomplete expression"}, + {`{foo bar}`, "Invalid"}, + {`[foo bar]`, "Invalid"}, + {`foo@`, "Invalid"}, + {`&&&&&&&&&&&&t(`, "Invalid"}, + {`[*][`, "Invalid"}, +} + +func TestParsingErrors(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + for _, tt := range parsingErrorTests { + _, err := parser.Parse(tt.expression) + assert.NotNil(err, fmt.Sprintf("Expected parsing error: %s, for expression: %s", tt.msg, tt.expression)) + } +} + +var prettyPrinted = `ASTProjection { + children: { + ASTField { + value: "foo" + } + ASTSubexpression { + children: { + ASTSubexpression { + children: { + ASTField { + value: "bar" + } + ASTField { + value: "baz" + } + } + ASTField { + value: "qux" + } + } +} +` + +var prettyPrintedCompNode = `ASTFilterProjection { + children: { + ASTField { + value: "a" + } + ASTIdentity { + } + ASTComparator { + value: tLTE + children: { + ASTField { + value: "b" + } + ASTField { + value: "c" + } + } +} +` + +func TestPrettyPrintedAST(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("foo[*].bar.baz.qux") + assert.Equal(parsed.PrettyPrint(0), prettyPrinted) +} + +func TestPrettyPrintedCompNode(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("a[?b<=c]") + assert.Equal(parsed.PrettyPrint(0), prettyPrintedCompNode) +} + +func BenchmarkParseIdentifier(b *testing.B) { + runParseBenchmark(b, exprIdentifier) +} + +func BenchmarkParseSubexpression(b *testing.B) { + runParseBenchmark(b, exprSubexpr) +} + +func BenchmarkParseDeeplyNested50(b *testing.B) { + runParseBenchmark(b, deeplyNested50) +} + +func BenchmarkParseDeepNested50Pipe(b *testing.B) { + runParseBenchmark(b, deeplyNested50Pipe) +} + +func BenchmarkParseDeepNested50Index(b *testing.B) { + runParseBenchmark(b, deeplyNested50Index) +} + +func BenchmarkParseQuotedIdentifier(b *testing.B) { + runParseBenchmark(b, exprQuotedIdentifier) +} + +func BenchmarkParseQuotedIdentifierEscapes(b *testing.B) { + runParseBenchmark(b, quotedIdentifierEscapes) +} + +func BenchmarkParseRawStringLiteral(b *testing.B) { + runParseBenchmark(b, rawStringLiteral) +} + +func BenchmarkParseDeepProjection104(b *testing.B) { + runParseBenchmark(b, deepProjection104) +} + +func runParseBenchmark(b *testing.B, expression string) { + parser := NewParser() + for i := 0; i < b.N; i++ { + parser.Parse(expression) + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/jmespath/go-jmespath/util_test.go b/vendor/github.com/jmespath/go-jmespath/util_test.go new file mode 100644 index 000000000..1e2cd9352 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util_test.go @@ -0,0 +1,73 @@ +package jmespath + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSlicePositiveStep(t *testing.T) { + assert := assert.New(t) + input := make([]interface{}, 5) + input[0] = 0 + input[1] = 1 + input[2] = 2 + input[3] = 3 + input[4] = 4 + result, err := slice(input, []sliceParam{sliceParam{0, true}, sliceParam{3, true}, sliceParam{1, true}}) + assert.Nil(err) + assert.Equal(input[:3], result) +} + +func TestIsFalseJSONTypes(t *testing.T) { + assert := assert.New(t) + assert.True(isFalse(false)) + assert.True(isFalse("")) + var empty []interface{} + assert.True(isFalse(empty)) + m := make(map[string]interface{}) + assert.True(isFalse(m)) + assert.True(isFalse(nil)) + +} + +func TestIsFalseWithUserDefinedStructs(t *testing.T) { + assert := assert.New(t) + type nilStructType struct { + SliceOfPointers []*string + } + nilStruct := nilStructType{SliceOfPointers: nil} + assert.True(isFalse(nilStruct.SliceOfPointers)) + + // A user defined struct will never be false though, + // even if it's fields are the zero type. + assert.False(isFalse(nilStruct)) +} + +func TestIsFalseWithNilInterface(t *testing.T) { + assert := assert.New(t) + var a *int = nil + var nilInterface interface{} + nilInterface = a + assert.True(isFalse(nilInterface)) +} + +func TestIsFalseWithMapOfUserStructs(t *testing.T) { + assert := assert.New(t) + type foo struct { + Bar string + Baz string + } + m := make(map[int]foo) + assert.True(isFalse(m)) +} + +func TestObjsEqual(t *testing.T) { + assert := assert.New(t) + assert.True(objsEqual("foo", "foo")) + assert.True(objsEqual(20, 20)) + assert.True(objsEqual([]int{1, 2, 3}, []int{1, 2, 3})) + assert.True(objsEqual(nil, nil)) + assert.True(!objsEqual(nil, "foo")) + assert.True(objsEqual([]int{}, []int{})) + assert.True(!objsEqual([]int{}, nil)) +} diff --git a/vendor/github.com/kardianos/osext/LICENSE b/vendor/github.com/kardianos/osext/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/kardianos/osext/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kardianos/osext/README.md b/vendor/github.com/kardianos/osext/README.md new file mode 100644 index 000000000..61350baba --- /dev/null +++ b/vendor/github.com/kardianos/osext/README.md @@ -0,0 +1,16 @@ +### Extensions to the "os" package. + +## Find the current Executable and ExecutableFolder. + +There is sometimes utility in finding the current executable file +that is running. This can be used for upgrading the current executable +or finding resources located relative to the executable file. Both +working directory and the os.Args[0] value are arbitrary and cannot +be relied on; os.Args[0] can be "faked". + +Multi-platform and supports: + * Linux + * OS X + * Windows + * Plan 9 + * BSDs. diff --git a/vendor/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go new file mode 100644 index 000000000..c0de8b7f5 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext.go @@ -0,0 +1,33 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Extensions to the standard "os" package. +package osext + +import "path/filepath" + +var cx, ce = executableClean() + +func executableClean() (string, error) { + p, err := executable() + return filepath.Clean(p), err +} + +// Executable returns an absolute path that can be used to +// re-invoke the current program. +// It may not be valid after the current program exits. +func Executable() (string, error) { + return cx, ce +} + +// Returns same path as Executable, returns just the folder +// path. Excludes the executable name and any trailing slash. +func ExecutableFolder() (string, error) { + p, err := Executable() + if err != nil { + return "", err + } + + return filepath.Dir(p), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_plan9.go b/vendor/github.com/kardianos/osext/osext_plan9.go new file mode 100644 index 000000000..655750c54 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_plan9.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package osext + +import ( + "os" + "strconv" + "syscall" +) + +func executable() (string, error) { + f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text") + if err != nil { + return "", err + } + defer f.Close() + return syscall.Fd2path(int(f.Fd())) +} diff --git a/vendor/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go new file mode 100644 index 000000000..d59847ee5 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_procfs.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd solaris dragonfly + +package osext + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" +) + +func executable() (string, error) { + switch runtime.GOOS { + case "linux": + const deletedTag = " (deleted)" + execpath, err := os.Readlink("/proc/self/exe") + if err != nil { + return execpath, err + } + execpath = strings.TrimSuffix(execpath, deletedTag) + execpath = strings.TrimPrefix(execpath, deletedTag) + return execpath, nil + case "netbsd": + return os.Readlink("/proc/curproc/exe") + case "dragonfly": + return os.Readlink("/proc/curproc/file") + case "solaris": + return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid())) + } + return "", errors.New("ExecPath not implemented for " + runtime.GOOS) +} diff --git a/vendor/github.com/kardianos/osext/osext_sysctl.go b/vendor/github.com/kardianos/osext/osext_sysctl.go new file mode 100644 index 000000000..66da0bcf9 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_sysctl.go @@ -0,0 +1,126 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd + +package osext + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "syscall" + "unsafe" +) + +var initCwd, initCwdErr = os.Getwd() + +func executable() (string, error) { + var mib [4]int32 + switch runtime.GOOS { + case "freebsd": + mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} + case "darwin": + mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} + case "openbsd": + mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */} + } + + n := uintptr(0) + // Get length. + _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + buf := make([]byte, n) + _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + + var execPath string + switch runtime.GOOS { + case "openbsd": + // buf now contains **argv, with pointers to each of the C-style + // NULL terminated arguments. + var args []string + argv := uintptr(unsafe.Pointer(&buf[0])) + Loop: + for { + argp := *(**[1 << 20]byte)(unsafe.Pointer(argv)) + if argp == nil { + break + } + for i := 0; uintptr(i) < n; i++ { + // we don't want the full arguments list + if string(argp[i]) == " " { + break Loop + } + if argp[i] != 0 { + continue + } + args = append(args, string(argp[:i])) + n -= uintptr(i) + break + } + if n < unsafe.Sizeof(argv) { + break + } + argv += unsafe.Sizeof(argv) + n -= unsafe.Sizeof(argv) + } + execPath = args[0] + // There is no canonical way to get an executable path on + // OpenBSD, so check PATH in case we are called directly + if execPath[0] != '/' && execPath[0] != '.' { + execIsInPath, err := exec.LookPath(execPath) + if err == nil { + execPath = execIsInPath + } + } + default: + for i, v := range buf { + if v == 0 { + buf = buf[:i] + break + } + } + execPath = string(buf) + } + + var err error + // execPath will not be empty due to above checks. + // Try to get the absolute path if the execPath is not rooted. + if execPath[0] != '/' { + execPath, err = getAbs(execPath) + if err != nil { + return execPath, err + } + } + // For darwin KERN_PROCARGS may return the path to a symlink rather than the + // actual executable. + if runtime.GOOS == "darwin" { + if execPath, err = filepath.EvalSymlinks(execPath); err != nil { + return execPath, err + } + } + return execPath, nil +} + +func getAbs(execPath string) (string, error) { + if initCwdErr != nil { + return execPath, initCwdErr + } + // The execPath may begin with a "../" or a "./" so clean it first. + // Join the two paths, trailing and starting slashes undetermined, so use + // the generic Join function. + return filepath.Join(initCwd, filepath.Clean(execPath)), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_test.go b/vendor/github.com/kardianos/osext/osext_test.go new file mode 100644 index 000000000..eb18236c0 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_test.go @@ -0,0 +1,203 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux freebsd netbsd windows openbsd + +package osext + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" +) + +const ( + executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE" + + executableEnvValueMatch = "match" + executableEnvValueDelete = "delete" +) + +func TestPrintExecutable(t *testing.T) { + ef, err := Executable() + if err != nil { + t.Fatalf("Executable failed: %v", err) + } + t.Log("Executable:", ef) +} +func TestPrintExecutableFolder(t *testing.T) { + ef, err := ExecutableFolder() + if err != nil { + t.Fatalf("ExecutableFolder failed: %v", err) + } + t.Log("Executable Folder:", ef) +} +func TestExecutableFolder(t *testing.T) { + ef, err := ExecutableFolder() + if err != nil { + t.Fatalf("ExecutableFolder failed: %v", err) + } + if ef[len(ef)-1] == filepath.Separator { + t.Fatal("ExecutableFolder ends with a trailing slash.") + } +} +func TestExecutableMatch(t *testing.T) { + ep, err := Executable() + if err != nil { + t.Fatalf("Executable failed: %v", err) + } + + // fullpath to be of the form "dir/prog". + dir := filepath.Dir(filepath.Dir(ep)) + fullpath, err := filepath.Rel(dir, ep) + if err != nil { + t.Fatalf("filepath.Rel: %v", err) + } + // Make child start with a relative program path. + // Alter argv[0] for child to verify getting real path without argv[0]. + cmd := &exec.Cmd{ + Dir: dir, + Path: fullpath, + Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)}, + } + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("exec(self) failed: %v", err) + } + outs := string(out) + if !filepath.IsAbs(outs) { + t.Fatalf("Child returned %q, want an absolute path", out) + } + if !sameFile(outs, ep) { + t.Fatalf("Child returned %q, not the same file as %q", out, ep) + } +} + +func TestExecutableDelete(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip() + } + fpath, err := Executable() + if err != nil { + t.Fatalf("Executable failed: %v", err) + } + + r, w := io.Pipe() + stderrBuff := &bytes.Buffer{} + stdoutBuff := &bytes.Buffer{} + cmd := &exec.Cmd{ + Path: fpath, + Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)}, + Stdin: r, + Stderr: stderrBuff, + Stdout: stdoutBuff, + } + err = cmd.Start() + if err != nil { + t.Fatalf("exec(self) start failed: %v", err) + } + + tempPath := fpath + "_copy" + _ = os.Remove(tempPath) + + err = copyFile(tempPath, fpath) + if err != nil { + t.Fatalf("copy file failed: %v", err) + } + err = os.Remove(fpath) + if err != nil { + t.Fatalf("remove running test file failed: %v", err) + } + err = os.Rename(tempPath, fpath) + if err != nil { + t.Fatalf("rename copy to previous name failed: %v", err) + } + + w.Write([]byte{0}) + w.Close() + + err = cmd.Wait() + if err != nil { + t.Fatalf("exec wait failed: %v", err) + } + + childPath := stderrBuff.String() + if !filepath.IsAbs(childPath) { + t.Fatalf("Child returned %q, want an absolute path", childPath) + } + if !sameFile(childPath, fpath) { + t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath) + } +} + +func sameFile(fn1, fn2 string) bool { + fi1, err := os.Stat(fn1) + if err != nil { + return false + } + fi2, err := os.Stat(fn2) + if err != nil { + return false + } + return os.SameFile(fi1, fi2) +} +func copyFile(dest, src string) error { + df, err := os.Create(dest) + if err != nil { + return err + } + defer df.Close() + + sf, err := os.Open(src) + if err != nil { + return err + } + defer sf.Close() + + _, err = io.Copy(df, sf) + return err +} + +func TestMain(m *testing.M) { + env := os.Getenv(executableEnvVar) + switch env { + case "": + os.Exit(m.Run()) + case executableEnvValueMatch: + // First chdir to another path. + dir := "/" + if runtime.GOOS == "windows" { + dir = filepath.VolumeName(".") + } + os.Chdir(dir) + if ep, err := Executable(); err != nil { + fmt.Fprint(os.Stderr, "ERROR: ", err) + } else { + fmt.Fprint(os.Stderr, ep) + } + case executableEnvValueDelete: + bb := make([]byte, 1) + var err error + n, err := os.Stdin.Read(bb) + if err != nil { + fmt.Fprint(os.Stderr, "ERROR: ", err) + os.Exit(2) + } + if n != 1 { + fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n) + os.Exit(2) + } + if ep, err := Executable(); err != nil { + fmt.Fprint(os.Stderr, "ERROR: ", err) + } else { + fmt.Fprint(os.Stderr, ep) + } + } + os.Exit(0) +} diff --git a/vendor/github.com/kardianos/osext/osext_windows.go b/vendor/github.com/kardianos/osext/osext_windows.go new file mode 100644 index 000000000..72d282cf8 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_windows.go @@ -0,0 +1,34 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package osext + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") +) + +// GetModuleFileName() with hModule = NULL +func executable() (exePath string, err error) { + return getModuleFileName() +} + +func getModuleFileName() (string, error) { + var n uint32 + b := make([]uint16, syscall.MAX_PATH) + size := uint32(len(b)) + + r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) + n = uint32(r0) + if n == 0 { + return "", e1 + } + return string(utf16.Decode(b[0:n])), nil +} diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 000000000..0f1d00e11 --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,4 @@ +.db +*.test +*~ +*.swp diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml new file mode 100644 index 000000000..c94a8718e --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.yml @@ -0,0 +1,71 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - tip + +before_install: + - psql --version + - sudo /etc/init.d/postgresql stop + - sudo apt-get -y --purge remove postgresql libpq-dev libpq5 postgresql-client-common postgresql-common + - sudo rm -rf /var/lib/postgresql + - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list" + - sudo apt-get update -qq + - sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION + - sudo chmod 777 /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "local all postgres trust" > /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "local all all trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostnossl all pqgossltest 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostnossl all pqgosslcert 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostssl all pqgossltest 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostssl all pqgosslcert 127.0.0.1/32 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "host all all 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostnossl all pqgossltest ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostnossl all pqgosslcert ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostssl all pqgossltest ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostssl all pqgosslcert ::1/128 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "host all all ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ certs/server.key certs/server.crt certs/root.crt + - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_cert_file = 'server.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" + - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_key_file = 'server.key'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" + - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_ca_file = 'root.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" + - sudo sh -c "echo 127.0.0.1 postgres >> /etc/hosts" + - sudo ls -l /var/lib/postgresql/$PGVERSION/main/ + - sudo cat /etc/postgresql/$PGVERSION/main/postgresql.conf + - sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key + - sudo /etc/init.d/postgresql restart + +env: + global: + - PGUSER=postgres + - PQGOSSLTESTS=1 + - PQSSLCERTTEST_PATH=$PWD/certs + - PGHOST=127.0.0.1 + matrix: + - PGVERSION=9.5 PQTEST_BINARY_PARAMETERS=yes + - PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=yes + - PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=yes + - PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=yes + - PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=yes + - PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=yes + - PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=yes + - PGVERSION=9.5 PQTEST_BINARY_PARAMETERS=no + - PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=no + - PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=no + - PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=no + - PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=no + - PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=no + - PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=no + +script: + - go test -v ./... + +before_script: + - psql -c 'create database pqgotest' -U postgres + - psql -c 'create user pqgossltest' -U postgres + - psql -c 'create user pqgosslcert' -U postgres diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md new file mode 100644 index 000000000..84c937f15 --- /dev/null +++ b/vendor/github.com/lib/pq/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to pq + +`pq` has a backlog of pull requests, but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +### Patch review + +Help review existing open pull requests by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of pq), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case (you'll probably want to +pattern it after the tests in +[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 000000000..5773904a3 --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 000000000..b4e3f45cb --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,105 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq) + +## Install + + go get github.com/lib/pq + +## Docs + +For detailed documentation and basic usage examples, please see the package +documentation at . + +## Tests + +`go test` is used for testing. A running PostgreSQL server is +required, with the ability to log in. The default database to connect +to test with is "pqgotest," but it can be overridden using environment +variables. + +Example: + + PGHOST=/var/run/postgresql go test github.com/lib/pq + +Optionally, a benchmark suite can be run as part of the tests: + + PGHOST=/var/run/postgresql go test -bench . + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support + +## Future / Things you can help with + +* Better COPY FROM / COPY TO (see discussion in #181) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Benjamin Heatwole (bheatwole) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Charlie Melbye (cmelbye) +* Chris Bandy (cbandy) +* Chris Gilling (cgilling) +* Chris Walsh (cwds) +* Dan Sosedoff (sosedoff) +* Daniel Farina (fdr) +* Eric Chlebek (echlebek) +* Eric Garrido (minusnine) +* Eric Urban (hydrogen18) +* Everyone at The Go Team +* Evan Shaw (edsrzf) +* Ewan Chou (coocood) +* Fazal Majid (fazalmajid) +* Federico Romero (federomero) +* Fumin (fumin) +* Gary Burd (garyburd) +* Heroku (heroku) +* James Pozdena (jpoz) +* Jason McVetta (jmcvetta) +* Jeremy Jay (pbnjay) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Jonathan Rudenberg (titanous) +* Joël Stemmer (jstemmer) +* Kamil Kisiel (kisielk) +* Kelly Dunn (kellydunn) +* Keith Rarick (kr) +* Kir Shatrov (kirs) +* Lann Martin (lann) +* Maciek Sakrejda (deafbybeheading) +* Marc Brinkmann (mbr) +* Marko Tiikkaja (johto) +* Matt Newberry (MattNewberry) +* Matt Robenolt (mattrobenolt) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Nicolas Patry (Narsil) +* Oliver Tonnhofer (olt) +* Patrick Hayes (phayes) +* Paul Hammond (paulhammond) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* Timothée Peignier (cyberdelia) +* Travis Cline (tmc) +* TruongSinh Tran-Nguyen (truongsinh) +* Yaismel Miranda (ympons) +* notedit (notedit) diff --git a/vendor/github.com/lib/pq/bench_test.go b/vendor/github.com/lib/pq/bench_test.go new file mode 100644 index 000000000..e71f41d06 --- /dev/null +++ b/vendor/github.com/lib/pq/bench_test.go @@ -0,0 +1,435 @@ +// +build go1.1 + +package pq + +import ( + "bufio" + "bytes" + "database/sql" + "database/sql/driver" + "io" + "math/rand" + "net" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +var ( + selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'" + selectSeriesQuery = "SELECT generate_series(1, 100)" +) + +func BenchmarkSelectString(b *testing.B) { + var result string + benchQuery(b, selectStringQuery, &result) +} + +func BenchmarkSelectSeries(b *testing.B) { + var result int + benchQuery(b, selectSeriesQuery, &result) +} + +func benchQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchQueryLoop(b, db, query, result) + } +} + +func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) { + rows, err := db.Query(query) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(result) + if err != nil { + b.Fatal("failed to scan", err) + } + } +} + +// reading from circularConn yields content[:prefixLen] once, followed by +// content[prefixLen:] over and over again. It never returns EOF. +type circularConn struct { + content string + prefixLen int + pos int + net.Conn // for all other net.Conn methods that will never be called +} + +func (r *circularConn) Read(b []byte) (n int, err error) { + n = copy(b, r.content[r.pos:]) + r.pos += n + if r.pos >= len(r.content) { + r.pos = r.prefixLen + } + return +} + +func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil } + +func (r *circularConn) Close() error { return nil } + +func fakeConn(content string, prefixLen int) *conn { + c := &circularConn{content: content, prefixLen: prefixLen} + return &conn{buf: bufio.NewReader(c), c: c} +} + +// This benchmark is meant to be the same as BenchmarkSelectString, but takes +// out some of the factors this package can't control. The numbers are less noisy, +// but also the costs of network communication aren't accurately represented. +func BenchmarkMockSelectString(b *testing.B) { + b.StopTimer() + // taken from a recorded run of BenchmarkSelectString + // See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html + const response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectStringQuery) + } +} + +var seriesRowData = func() string { + var buf bytes.Buffer + for i := 1; i <= 100; i++ { + digits := byte(2) + if i >= 100 { + digits = 3 + } else if i < 10 { + digits = 1 + } + buf.WriteString("D\x00\x00\x00") + buf.WriteByte(10 + digits) + buf.WriteString("\x00\x01\x00\x00\x00") + buf.WriteByte(digits) + buf.WriteString(strconv.Itoa(i)) + } + return buf.String() +}() + +func BenchmarkMockSelectSeries(b *testing.B) { + b.StopTimer() + var response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectSeriesQuery) + } +} + +func benchMockQuery(b *testing.B, c *conn, query string) { + stmt, err := c.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + rows, err := stmt.Query(nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkPreparedSelectString(b *testing.B) { + var result string + benchPreparedQuery(b, selectStringQuery, &result) +} + +func BenchmarkPreparedSelectSeries(b *testing.B) { + var result int + benchPreparedQuery(b, selectSeriesQuery, &result) +} + +func benchPreparedQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + stmt, err := db.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedQueryLoop(b, db, stmt, result) + } +} + +func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) { + rows, err := stmt.Query() + if err != nil { + b.Fatal(err) + } + if !rows.Next() { + rows.Close() + b.Fatal("no rows") + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(&result) + if err != nil { + b.Fatal("failed to scan") + } + } +} + +// See the comment for BenchmarkMockSelectString. +func BenchmarkMockPreparedSelectString(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + const responses = parseResponse + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectStringQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func BenchmarkMockPreparedSelectSeries(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + var responses = parseResponse + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectSeriesQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) { + rows, err := stmt.Query(nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkEncodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, int64(1234), oid.T_int8) + } +} + +func BenchmarkEncodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, 3.14159, oid.T_float8) + } +} + +var testByteString = []byte("abcdefghijklmnopqrstuvwxyz") + +func BenchmarkEncodeByteaHex(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea) + } +} +func BenchmarkEncodeByteaEscape(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea) + } +} + +func BenchmarkEncodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, true, oid.T_bool) + } +} + +var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local) + +func BenchmarkEncodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz) + } +} + +var testIntBytes = []byte("1234") + +func BenchmarkDecodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText) + } +} + +var testFloatBytes = []byte("3.14159") + +func BenchmarkDecodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText) + } +} + +var testBoolBytes = []byte{'t'} + +func BenchmarkDecodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText) + } +} + +func TestDecodeBool(t *testing.T) { + db := openTestConn(t) + rows, err := db.Query("select true") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07") + +func BenchmarkDecodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } +} + +func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +func BenchmarkLocationCache(b *testing.B) { + globalLocationCache = newLocationCache() + for i := 0; i < b.N; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } +} + +func BenchmarkLocationCacheMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +// Stress test the performance of parsing results from the wire. +func BenchmarkResultParsing(b *testing.B) { + b.StopTimer() + + db := openTestConn(b) + defer db.Close() + _, err := db.Exec("BEGIN") + if err != nil { + b.Fatal(err) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + res, err := db.Query("SELECT generate_series(1, 50000)") + if err != nil { + b.Fatal(err) + } + res.Close() + } +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 000000000..666b0012a --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(b.buf, (s + "\000")...) +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/certs/README b/vendor/github.com/lib/pq/certs/README new file mode 100644 index 000000000..24ab7b256 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/README @@ -0,0 +1,3 @@ +This directory contains certificates and private keys for testing some +SSL-related functionality in Travis. Do NOT use these certificates for +anything other than testing. diff --git a/vendor/github.com/lib/pq/certs/postgresql.crt b/vendor/github.com/lib/pq/certs/postgresql.crt new file mode 100644 index 000000000..6e6b4284a --- /dev/null +++ b/vendor/github.com/lib/pq/certs/postgresql.crt @@ -0,0 +1,69 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:10:11 2014 GMT + Not After : Oct 8 15:10:11 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pqgosslcert + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (1024 bit) + Modulus (1024 bit): + 00:e3:8c:06:9a:70:54:51:d1:34:34:83:39:cd:a2: + 59:0f:05:ed:8d:d8:0e:34:d0:92:f4:09:4d:ee:8c: + 78:55:49:24:f8:3c:e0:34:58:02:b2:e7:94:58:c1: + e8:e5:bb:d1:af:f6:54:c1:40:b1:90:70:79:0d:35: + 54:9c:8f:16:e9:c2:f0:92:e6:64:49:38:c1:76:f8: + 47:66:c4:5b:4a:b6:a9:43:ce:c8:be:6c:4d:2b:94: + 97:3c:55:bc:d1:d0:6e:b7:53:ae:89:5c:4b:6b:86: + 40:be:c1:ae:1e:64:ce:9c:ae:87:0a:69:e5:c8:21: + 12:be:ae:1d:f6:45:df:16:a7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 9B:25:31:63:A2:D8:06:FF:CB:E3:E9:96:FF:0D:BA:DC:12:7D:04:CF + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 3e:f5:f8:0b:4e:11:bd:00:86:1f:ce:dc:97:02:98:91:11:f5: + 65:f6:f2:8a:b2:3e:47:92:05:69:28:c9:e9:b4:f7:cf:93:d1: + 2d:81:5d:00:3c:23:be:da:70:ea:59:e1:2c:d3:25:49:ae:a6: + 95:54:c1:10:df:23:e3:fe:d6:e4:76:c7:6b:73:ad:1b:34:7c: + e2:56:cc:c0:37:ae:c5:7a:11:20:6c:3d:05:0e:99:cd:22:6c: + cf:59:a1:da:28:d4:65:ba:7d:2f:2b:3d:69:6d:a6:c1:ae:57: + bf:56:64:13:79:f8:48:46:65:eb:81:67:28:0b:7b:de:47:10: + b3:80:3c:31:d1:58:94:01:51:4a:c7:c8:1a:01:a8:af:c4:cd: + bb:84:a5:d9:8b:b4:b9:a1:64:3e:95:d9:90:1d:d5:3f:67:cc: + 3b:ba:f5:b4:d1:33:77:ee:c2:d2:3e:7e:c5:66:6e:b7:35:4c: + 60:57:b0:b8:be:36:c8:f3:d3:95:8c:28:4a:c9:f7:27:a4:0d: + e5:96:99:eb:f5:c8:bd:f3:84:6d:ef:02:f9:8a:36:7d:6b:5f: + 36:68:37:41:d9:74:ae:c6:78:2e:44:86:a1:ad:43:ca:fb:b5: + 3e:ba:10:23:09:02:ac:62:d1:d0:83:c8:95:b9:e3:5e:30:ff: + 5b:2b:38:fa +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIBAjANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTEwMTFa +Fw0yNDEwMDgxNTEwMTFaMGQxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +FDASBgNVBAMTC3BxZ29zc2xjZXJ0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0WAKy55RYwejl +u9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+bE0rlJc8VbzR +0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQABo1owWDAdBgNV +HQ4EFgQUmyUxY6LYBv/L4+mW/w263BJ9BM8wHwYDVR0jBBgwFoAUUpPtHnYKn2VP +3hlmwdUiQDXLoHIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQEL +BQADggEBAD71+AtOEb0Ahh/O3JcCmJER9WX28oqyPkeSBWkoyem098+T0S2BXQA8 +I77acOpZ4SzTJUmuppVUwRDfI+P+1uR2x2tzrRs0fOJWzMA3rsV6ESBsPQUOmc0i +bM9Zodoo1GW6fS8rPWltpsGuV79WZBN5+EhGZeuBZygLe95HELOAPDHRWJQBUUrH +yBoBqK/EzbuEpdmLtLmhZD6V2ZAd1T9nzDu69bTRM3fuwtI+fsVmbrc1TGBXsLi+ +Nsjz05WMKErJ9yekDeWWmev1yL3zhG3vAvmKNn1rXzZoN0HZdK7GeC5EhqGtQ8r7 +tT66ECMJAqxi0dCDyJW5414w/1srOPo= +-----END CERTIFICATE----- diff --git a/vendor/github.com/lib/pq/certs/postgresql.key b/vendor/github.com/lib/pq/certs/postgresql.key new file mode 100644 index 000000000..eb8b20be9 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/postgresql.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0 +WAKy55RYwejlu9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+ +bE0rlJc8VbzR0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQAB +AoGAM5dM6/kp9P700i8qjOgRPym96Zoh5nGfz/rIE5z/r36NBkdvIg8OVZfR96nH +b0b9TOMR5lsPp0sI9yivTWvX6qyvLJRWy2vvx17hXK9NxXUNTAm0PYZUTvCtcPeX +RnJpzQKNZQPkFzF0uXBc4CtPK2Vz0+FGvAelrhYAxnw1dIkCQQD+9qaW5QhXjsjb +Nl85CmXgxPmGROcgLQCO+omfrjf9UXrituU9Dz6auym5lDGEdMFnkzfr+wpasEy9 +mf5ZZOhDAkEA5HjXfVGaCtpydOt6hDon/uZsyssCK2lQ7NSuE3vP+sUsYMzIpEoy +t3VWXqKbo+g9KNDTP4WEliqp1aiSIylzzQJANPeqzihQnlgEdD4MdD4rwhFJwVIp +Le8Lcais1KaN7StzOwxB/XhgSibd2TbnPpw+3bSg5n5lvUdo+e62/31OHwJAU1jS +I+F09KikQIr28u3UUWT2IzTT4cpVv1AHAQyV3sG3YsjSGT0IK20eyP9BEBZU2WL0 +7aNjrvR5aHxKc5FXsQJABsFtyGpgI5X4xufkJZVZ+Mklz2n7iXa+XPatMAHFxAtb +EEMt60rngwMjXAzBSC6OYuYogRRAY3UCacNC5VhLYQ== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/lib/pq/certs/root.crt b/vendor/github.com/lib/pq/certs/root.crt new file mode 100644 index 000000000..aecf8f621 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/root.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIJANmheROCdW1NMA0GCSqGSIb3DQEBBQUAMF4xCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGExEjAQBgNVBAcTCUxhcyBWZWdhczEaMBgG +A1UEChMRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMTBXBxIENBMB4XDTE0MTAx +MTE1MDQyOVoXDTI0MTAwODE1MDQyOVowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgT +Bk5ldmFkYTESMBAGA1UEBxMJTGFzIFZlZ2FzMRowGAYDVQQKExFnaXRodWIuY29t +L2xpYi9wcTEOMAwGA1UEAxMFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCV4PxP7ShzWBzUCThcKk3qZtOLtHmszQVtbqhvgTpm1kTRtKBdVMu0 +pLAHQ3JgJCnAYgH0iZxVGoMP16T3irdgsdC48+nNTFM2T0cCdkfDURGIhSFN47cb +Pgy306BcDUD2q7ucW33+dlFSRuGVewocoh4BWM/vMtMvvWzdi4Ag/L/jhb+5wZxZ +sWymsadOVSDePEMKOvlCa3EdVwVFV40TVyDb+iWBUivDAYsS2a3KajuJrO6MbZiE +Sp2RCIkZS2zFmzWxVRi9ZhzIZhh7EVF9JAaNC3T52jhGUdlRq3YpBTMnd89iOh74 +6jWXG7wSuPj3haFzyNhmJ0ZUh+2Ynoh1AgMBAAGjgcMwgcAwHQYDVR0OBBYEFFKT +7R52Cp9lT94ZZsHVIkA1y6ByMIGQBgNVHSMEgYgwgYWAFFKT7R52Cp9lT94ZZsHV +IkA1y6ByoWKkYDBeMQswCQYDVQQGEwJVUzEPMA0GA1UECBMGTmV2YWRhMRIwEAYD +VQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdpdGh1Yi5jb20vbGliL3BxMQ4wDAYD +VQQDEwVwcSBDQYIJANmheROCdW1NMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADggEBAAEhCLWkqJNMI8b4gkbmj5fqQ/4+oO83bZ3w2Oqf6eZ8I8BC4f2NOyE6 +tRUlq5+aU7eqC1cOAvGjO+YHN/bF/DFpwLlzvUSXt+JP/pYcUjL7v+pIvwqec9hD +ndvM4iIbkD/H/OYQ3L+N3W+G1x7AcFIX+bGCb3PzYVQAjxreV6//wgKBosMGFbZo +HPxT9RPMun61SViF04H5TNs0derVn1+5eiiYENeAhJzQNyZoOOUuX1X/Inx9bEPh +C5vFBtSMgIytPgieRJVWAiMLYsfpIAStrHztRAbBs2DU01LmMgRvHdxgFEKinC/d +UHZZQDP+6pT+zADrGhQGXe4eThaO6f0= +-----END CERTIFICATE----- diff --git a/vendor/github.com/lib/pq/certs/server.crt b/vendor/github.com/lib/pq/certs/server.crt new file mode 100644 index 000000000..ddc995a6d --- /dev/null +++ b/vendor/github.com/lib/pq/certs/server.crt @@ -0,0 +1,81 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:05:15 2014 GMT + Not After : Oct 8 15:05:15 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=postgres + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (2048 bit) + Modulus (2048 bit): + 00:d7:8a:4c:85:fb:17:a5:3c:8f:e0:72:11:29:ce: + 3f:b0:1f:3f:7d:c6:ee:7f:a7:fc:02:2b:35:47:08: + a6:3d:90:df:5c:56:14:94:00:c7:6d:d1:d2:e2:61: + 95:77:b8:e3:a6:66:31:f9:1f:21:7d:62:e1:27:da: + 94:37:61:4a:ea:63:53:a0:61:b8:9c:bb:a5:e2:e7: + b7:a6:d8:0f:05:04:c7:29:e2:ea:49:2b:7f:de:15: + 00:a6:18:70:50:c7:0c:de:9a:f9:5a:96:b0:e1:94: + 06:c6:6d:4a:21:3b:b4:0f:a5:6d:92:86:34:b2:4e: + d7:0e:a7:19:c0:77:0b:7b:87:c8:92:de:42:ff:86: + d2:b7:9a:a4:d4:15:23:ca:ad:a5:69:21:b8:ce:7e: + 66:cb:85:5d:b9:ed:8b:2d:09:8d:94:e4:04:1e:72: + ec:ef:d0:76:90:15:5a:a4:f7:91:4b:e9:ce:4e:9d: + 5d:9a:70:17:9c:d8:e9:73:83:ea:3d:61:99:a6:cd: + ac:91:40:5a:88:77:e5:4e:2a:8e:3d:13:f3:f9:38: + 6f:81:6b:8a:95:ca:0e:07:ab:6f:da:b4:8c:d9:ff: + aa:78:03:aa:c7:c2:cf:6f:64:92:d3:d8:83:d5:af: + f1:23:18:a7:2e:7b:17:0b:e7:7d:f1:fa:a8:41:a3: + 04:57 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + EE:F0:B3:46:DC:C7:09:EB:0E:B6:2F:E5:FE:62:60:45:44:9F:59:CC + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 7e:5a:6e:be:bf:d2:6c:c1:d6:fa:b6:fb:3f:06:53:36:08:87: + 9d:95:b1:39:af:9e:f6:47:38:17:39:da:25:7c:f2:ad:0c:e3: + ab:74:19:ca:fb:8c:a0:50:c0:1d:19:8a:9c:21:ed:0f:3a:d1: + 96:54:2e:10:09:4f:b8:70:f7:2b:99:43:d2:c6:15:bc:3f:24: + 7d:28:39:32:3f:8d:a4:4f:40:75:7f:3e:0d:1c:d1:69:f2:4e: + 98:83:47:97:d2:25:ac:c9:36:86:2f:04:a6:c4:86:c7:c4:00: + 5f:7f:b9:ad:fc:bf:e9:f5:78:d7:82:1a:51:0d:fc:ab:9e:92: + 1d:5f:0c:18:d1:82:e0:14:c9:ce:91:89:71:ff:49:49:ff:35: + bf:7b:44:78:42:c1:d0:66:65:bb:28:2e:60:ca:9b:20:12:a9: + 90:61:b1:96:ec:15:46:c9:37:f7:07:90:8a:89:45:2a:3f:37: + ec:dc:e3:e5:8f:c3:3a:57:80:a5:54:60:0c:e1:b2:26:99:2b: + 40:7e:36:d1:9a:70:02:ec:63:f4:3b:72:ae:81:fb:30:20:6d: + cb:48:46:c6:b5:8f:39:b1:84:05:25:55:8d:f5:62:f6:1b:46: + 2e:da:a3:4c:26:12:44:d7:56:b6:b8:a9:ca:d3:ab:71:45:7c: + 9f:48:6d:1e +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIBATANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTA1MTVa +Fw0yNDEwMDgxNTA1MTVaMGExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +ETAPBgNVBAMTCHBvc3RncmVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYUlADHbdHS4mGV +d7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLqSSt/3hUAphhw +UMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C/4bSt5qk1BUj +yq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1dmnAXnNjpc4Pq +PWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOqx8LPb2SS09iD +1a/xIxinLnsXC+d98fqoQaMEVwIDAQABo1owWDAdBgNVHQ4EFgQU7vCzRtzHCesO +ti/l/mJgRUSfWcwwHwYDVR0jBBgwFoAUUpPtHnYKn2VP3hlmwdUiQDXLoHIwCQYD +VR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQELBQADggEBAH5abr6/0mzB +1vq2+z8GUzYIh52VsTmvnvZHOBc52iV88q0M46t0Gcr7jKBQwB0Zipwh7Q860ZZU +LhAJT7hw9yuZQ9LGFbw/JH0oOTI/jaRPQHV/Pg0c0WnyTpiDR5fSJazJNoYvBKbE +hsfEAF9/ua38v+n1eNeCGlEN/Kuekh1fDBjRguAUyc6RiXH/SUn/Nb97RHhCwdBm +ZbsoLmDKmyASqZBhsZbsFUbJN/cHkIqJRSo/N+zc4+WPwzpXgKVUYAzhsiaZK0B+ +NtGacALsY/Q7cq6B+zAgbctIRsa1jzmxhAUlVY31YvYbRi7ao0wmEkTXVra4qcrT +q3FFfJ9IbR4= +-----END CERTIFICATE----- diff --git a/vendor/github.com/lib/pq/certs/server.key b/vendor/github.com/lib/pq/certs/server.key new file mode 100644 index 000000000..bd7b019b6 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYU +lADHbdHS4mGVd7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLq +SSt/3hUAphhwUMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C +/4bSt5qk1BUjyq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1d +mnAXnNjpc4PqPWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOq +x8LPb2SS09iD1a/xIxinLnsXC+d98fqoQaMEVwIDAQABAoIBAF3ZoihUhJ82F4+r +Gz4QyDpv4L1reT2sb1aiabhcU8ZK5nbWJG+tRyjSS/i2dNaEcttpdCj9HR/zhgZM +bm0OuAgG58rVwgS80CZUruq++Qs+YVojq8/gWPTiQD4SNhV2Fmx3HkwLgUk3oxuT +SsvdqzGE3okGVrutCIcgy126eA147VPMoej1Bb3fO6npqK0pFPhZfAc0YoqJuM+k +obRm5pAnGUipyLCFXjA9HYPKwYZw2RtfdA3CiImHeanSdqS+ctrC9y8BV40Th7gZ +haXdKUNdjmIxV695QQ1mkGqpKLZFqhzKioGQ2/Ly2d1iaKN9fZltTusu8unepWJ2 +tlT9qMECgYEA9uHaF1t2CqE+AJvWTihHhPIIuLxoOQXYea1qvxfcH/UMtaLKzCNm +lQ5pqCGsPvp+10f36yttO1ZehIvlVNXuJsjt0zJmPtIolNuJY76yeussfQ9jHheB +5uPEzCFlHzxYbBUyqgWaF6W74okRGzEGJXjYSP0yHPPdU4ep2q3bGiUCgYEA34Af +wBSuQSK7uLxArWHvQhyuvi43ZGXls6oRGl+Ysj54s8BP6XGkq9hEJ6G4yxgyV+BR +DUOs5X8/TLT8POuIMYvKTQthQyCk0eLv2FLdESDuuKx0kBVY3s8lK3/z5HhrdOiN +VMNZU+xDKgKc3hN9ypkk8vcZe6EtH7Y14e0rVcsCgYBTgxi8F/M5K0wG9rAqphNz +VFBA9XKn/2M33cKjO5X5tXIEKzpAjaUQvNxexG04rJGljzG8+mar0M6ONahw5yD1 +O7i/XWgazgpuOEkkVYiYbd8RutfDgR4vFVMn3hAP3eDnRtBplRWH9Ec3HTiNIys6 +F8PKBOQjyRZQQC7jyzW3hQKBgACe5HeuFwXLSOYsb6mLmhR+6+VPT4wR1F95W27N +USk9jyxAnngxfpmTkiziABdgS9N+pfr5cyN4BP77ia/Jn6kzkC5Cl9SN5KdIkA3z +vPVtN/x/ThuQU5zaymmig1ThGLtMYggYOslG4LDfLPxY5YKIhle+Y+259twdr2yf +Mf2dAoGAaGv3tWMgnIdGRk6EQL/yb9PKHo7ShN+tKNlGaK7WwzBdKs+Fe8jkgcr7 +pz4Ne887CmxejdISzOCcdT+Zm9Bx6I/uZwWOtDvWpIgIxVX9a9URj/+D1MxTE/y4 +d6H+c89yDY62I2+drMpdjCd3EtCaTlxpTbRS+s1eAHMH7aEkcCE= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 000000000..394e659df --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,1852 @@ +package pq + +import ( + "bufio" + "crypto/md5" + "crypto/tls" + "crypto/x509" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less.") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly.") +) + +type drv struct{} + +func (d *drv) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &drv{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +type defaultDialer struct{} + +func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) { + return net.Dial(ntw, addr) +} +func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout(ntw, addr, timeout) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool +} + +// Handle driver-side settings in parsed connection string. +func (c *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value := o.Get(key); value != "" { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &c.disablePreparedBinaryResult) + if err != nil { + return err + } + err = boolSetting("binary_parameters", &c.binaryParameters) + if err != nil { + return err + } + return nil +} + +func (c *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + _, ok := o["password"] + if ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + user, err := user.Current() + if err != nil { + return + } + filename = filepath.Join(user.HomeDir, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode & (0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o.Get("host") + ntw, _ := network(o) + port := o.Get("port") + db := o.Get("dbname") + username := o.Get("user") + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func (s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (c *conn) writeBuf(b byte) *writeBuf { + c.scratch[0] = b + return &writeBuf{ + buf: c.scratch[:5], + pos: 1, + } +} + +func Open(name string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, name) +} + +func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o.Set("host", "localhost") + o.Set("port", "5432") + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o.Set("extra_float_digits", "2") + for k, v := range parseEnviron(os.Environ()) { + o.Set(k, v) + } + + if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") { + name, err = ParseURL(name) + if err != nil { + return nil, err + } + } + + if err := parseOpts(name, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback := o.Get("fallback_application_name"); fallback != "" { + if !o.Isset("application_name") { + o.Set("application_name", fallback) + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc := o.Get("client_encoding"); enc != "" && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o.Set("client_encoding", "UTF8") + // DateStyle needs a similar treatment. + if datestyle := o.Get("datestyle"); datestyle != "" { + if datestyle != "ISO, MDY" { + panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v", + "ISO, MDY", datestyle)) + } + } else { + o.Set("datestyle", "ISO, MDY") + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if o.Get("user") == "" { + u, err := userCurrent() + if err != nil { + return nil, err + } else { + o.Set("user", u) + } + } + + cn := &conn{} + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(d, o) + if err != nil { + return nil, err + } + cn.ssl(o) + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + return cn, err +} + +func dial(d Dialer, o values) (net.Conn, error) { + ntw, addr := network(o) + // SSL is not necessary or supported over UNIX domain sockets + if ntw == "unix" { + o["sslmode"] = "disable" + } + + // Zero or not specified means wait indefinitely. + if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + conn, err := d.DialTimeout(ntw, addr, duration) + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + return d.Dial(ntw, addr) +} + +func network(o values) (string, string) { + host := o.Get("host") + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o.Get("port")) + return "unix", sockPath + } + + return "tcp", host + ":" + o.Get("port") +} + +type values map[string]string + +func (vs values) Set(k, v string) { + vs[k] = v +} + +func (vs values) Get(k string) (v string) { + return vs[k] +} + +func (vs values) Isset(k string) bool { + _, ok := vs[k] + return ok +} + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o.Set(string(keyRunes), "") + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o.Set(string(keyRunes), string(valRunes)) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN") + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) Commit() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.Rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + st := &stmt{cn: cn, name: ""} + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + } + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, o := range colTyps { + switch o { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + return cn.prepareCopyIn(q) + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + err = cn.sendSimpleMessage('X') + if err != nil { + return err + } + + return cn.c.Close() +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (_ driver.Rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } else { + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil + } +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } else { + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err + } +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) { + // sanity check + if m.buf[0] != 0 { + panic("oops") + } + + _, err := cn.c.Write((m.wrap())[1:]) + if err != nil { + panic(err) + } +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'E': + panic(parseError(r)) + case 'N': + // ignore + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A', 'N': + // ignore + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o.Get("sslmode"); mode { + case "require", "": + tlsConf.InsecureSkipVerify = true + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o.Get("host") + case "disable": + return + default: + errorf(`unsupported sslmode %q; only "require" (default), "verify-full", and "disable" supported`, mode) + } + + cn.setupSSLClientCertificates(&tlsConf, o) + cn.setupSSLCA(&tlsConf, o) + + w := cn.writeBuf(0) + w.int32(80877103) + cn.sendStartupPacket(w) + + b := cn.scratch[:1] + _, err := io.ReadFull(cn.c, b) + if err != nil { + panic(err) + } + + if b[0] != 'S' { + panic(ErrSSLNotSupported) + } + + client := tls.Client(cn.c, &tlsConf) + if verifyCaOnly { + cn.verifyCA(client, &tlsConf) + } + cn.c = client +} + +// verifyCA carries out a TLS handshake to the server and verifies the +// presented certificate against the effective CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func (cn *conn) verifyCA(client *tls.Conn, tlsConf *tls.Config) { + err := client.Handshake() + if err != nil { + panic(err) + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + if err != nil { + panic(err) + } +} + +// This function sets up SSL client certificates based on either the "sslkey" +// and "sslcert" settings (possibly set via the environment variables PGSSLKEY +// and PGSSLCERT, respectively), or if they aren't set, from the .postgresql +// directory in the user's home directory. If the file paths are set +// explicitly, the files must exist. The key file must also not be +// world-readable, or this function will panic with +// ErrSSLKeyHasWorldPermissions. +func (cn *conn) setupSSLClientCertificates(tlsConf *tls.Config, o values) { + var missingOk bool + + sslkey := o.Get("sslkey") + sslcert := o.Get("sslcert") + if sslkey != "" && sslcert != "" { + // If the user has set an sslkey and sslcert, they *must* exist. + missingOk = false + } else { + // Automatically load certificates from ~/.postgresql. + user, err := user.Current() + if err != nil { + // user.Current() might fail when cross-compiling. We have to + // ignore the error and continue without client certificates, since + // we wouldn't know where to load them from. + return + } + + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + missingOk = true + } + + // Check that both files exist, and report the error or stop, depending on + // which behaviour we want. Note that we don't do any more extensive + // checks than this (such as checking that the paths aren't directories); + // LoadX509KeyPair() will take care of the rest. + keyfinfo, err := os.Stat(sslkey) + if err != nil && missingOk { + return + } else if err != nil { + panic(err) + } + _, err = os.Stat(sslcert) + if err != nil && missingOk { + return + } else if err != nil { + panic(err) + } + + // If we got this far, the key file must also have the correct permissions + kmode := keyfinfo.Mode() + if kmode != kmode&0600 { + panic(ErrSSLKeyHasWorldPermissions) + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + panic(err) + } + tlsConf.Certificates = []tls.Certificate{cert} +} + +// Sets up RootCAs in the TLS configuration if sslrootcert is set. +func (cn *conn) setupSSLCA(tlsConf *tls.Config, o values) { + if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + panic(err) + } + + ok := tlsConf.RootCAs.AppendCertsFromPEM(cert) + if !ok { + errorf("couldn't parse pem in sslrootcert") + } + } +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + cn.sendStartupPacket(w) + + for { + t, r := cn.recv() + switch t { + case 'K': + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o.Get("password")) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary []byte = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText []byte = []byte{0, 0} + +type stmt struct { + cn *conn + name string + colNames []string + colFmts []format + colFmtData []byte + colTyps []oid.Oid + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rows struct { + cn *conn + colNames []string + colTyps []oid.Oid + colFmts []format + done bool + rb readBuf +} + +func (rs *rows) Close() error { + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + return nil + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i], rs.colFmts[i]) + } + return + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (c *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + c.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + c.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + c.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (c *conn) processReadyForQuery(r *readBuf) { + c.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []oid.Oid) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []oid.Oid) { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return nil, nil, nil + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.bad = true + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.bad = true + errorf("unexpected %q after error %s", t, err) + } + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []oid.Oid) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]oid.Oid, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i] = r.oid() + r.next(6) + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []oid.Oid) { + n := r.int16() + colNames = make([]string, n) + colFmts = make([]format, n) + colTyps = make([]oid.Oid, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i] = r.oid() + r.next(6) + colFmts[i] = format(r.int16()) + } + return +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/github.com/lib/pq/conn_test.go b/vendor/github.com/lib/pq/conn_test.go new file mode 100644 index 000000000..2639c8efd --- /dev/null +++ b/vendor/github.com/lib/pq/conn_test.go @@ -0,0 +1,1433 @@ +package pq + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "io" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func forceBinaryParameters() bool { + bp := os.Getenv("PQTEST_BINARY_PARAMETERS") + if bp == "yes" { + return true + } else if bp == "" || bp == "no" { + return false + } else { + panic("unexpected value for PQTEST_BINARY_PARAMETERS") + } +} + +func openTestConnConninfo(conninfo string) (*sql.DB, error) { + defaultTo := func(envvar string, value string) { + if os.Getenv(envvar) == "" { + os.Setenv(envvar, value) + } + } + defaultTo("PGDATABASE", "pqgotest") + defaultTo("PGSSLMODE", "disable") + defaultTo("PGCONNECT_TIMEOUT", "20") + + if forceBinaryParameters() && + !strings.HasPrefix(conninfo, "postgres://") && + !strings.HasPrefix(conninfo, "postgresql://") { + conninfo = conninfo + " binary_parameters=yes" + } + + return sql.Open("postgres", conninfo) +} + +func openTestConn(t Fatalistic) *sql.DB { + conn, err := openTestConnConninfo("") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func getServerVersion(t *testing.T, db *sql.DB) int { + var version int + err := db.QueryRow("SHOW server_version_num").Scan(&version) + if err != nil { + t.Fatal(err) + } + return version +} + +func TestReconnect(t *testing.T) { + db1 := openTestConn(t) + defer db1.Close() + tx, err := db1.Begin() + if err != nil { + t.Fatal(err) + } + var pid1 int + err = tx.QueryRow("SELECT pg_backend_pid()").Scan(&pid1) + if err != nil { + t.Fatal(err) + } + db2 := openTestConn(t) + defer db2.Close() + _, err = db2.Exec("SELECT pg_terminate_backend($1)", pid1) + if err != nil { + t.Fatal(err) + } + // The rollback will probably "fail" because we just killed + // its connection above + _ = tx.Rollback() + + const expected int = 42 + var result int + err = db1.QueryRow(fmt.Sprintf("SELECT %d", expected)).Scan(&result) + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Errorf("got %v; expected %v", result, expected) + } +} + +func TestCommitInFailedTransaction(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + rows, err := txn.Query("SELECT error") + if err == nil { + rows.Close() + t.Fatal("expected failure") + } + err = txn.Commit() + if err != ErrInFailedTransaction { + t.Fatalf("expected ErrInFailedTransaction; got %#v", err) + } +} + +func TestOpenURL(t *testing.T) { + testURL := func(url string) { + db, err := openTestConnConninfo(url) + if err != nil { + t.Fatal(err) + } + defer db.Close() + // database/sql might not call our Open at all unless we do something with + // the connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + txn.Rollback() + } + testURL("postgres://") + testURL("postgresql://") +} + +const pgpass_file = "/tmp/pqgotest_pgpass" +func TestPgpass(t *testing.T) { + testAssert := func(conninfo string, expected string, reason string) { + conn, err := openTestConnConninfo(conninfo) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + txn, err := conn.Begin() + if err != nil { + if expected != "fail" { + t.Fatalf(reason, err) + } + return + } + rows, err := txn.Query("SELECT USER") + if err != nil { + txn.Rollback() + rows.Close() + if expected != "fail" { + t.Fatalf(reason, err) + } + } else { + if expected != "ok" { + t.Fatalf(reason, err) + } + } + txn.Rollback() + } + testAssert("", "ok", "missing .pgpass, unexpected error %#v") + os.Setenv("PGPASSFILE", pgpass_file) + testAssert("host=/tmp", "fail", ", unexpected error %#v") + os.Remove(pgpass_file) + pgpass, err := os.OpenFile(pgpass_file, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + t.Fatalf("Unexpected error writing pgpass file %#v", err) + } + _, err = pgpass.WriteString(`# comment +server:5432:some_db:some_user:pass_A +*:5432:some_db:some_user:pass_B +localhost:*:*:*:pass_C +*:*:*:*:pass_fallback +`) + if err != nil { + t.Fatalf("Unexpected error writing pgpass file %#v", err) + } + pgpass.Close() + + assertPassword := func(extra values, expected string) { + o := &values{"host": "localhost", "sslmode": "disable", "connect_timeout": "20", "user": "majid", "port": "5432", "extra_float_digits": "2", "dbname": "pqgotest", "client_encoding": "UTF8", "datestyle": "ISO, MDY"} + for k, v := range extra { + (*o)[k] = v + } + (&conn{}).handlePgpass(*o) + if o.Get("password") != expected { + t.Fatalf("For %v expected %s got %s", extra, expected, o.Get("password")) + } + } + // wrong permissions for the pgpass file means it should be ignored + assertPassword(values{"host": "example.com", "user": "foo"}, "") + // fix the permissions and check if it has taken effect + os.Chmod(pgpass_file, 0600) + assertPassword(values{"host": "server", "dbname": "some_db", "user": "some_user"}, "pass_A") + assertPassword(values{"host": "example.com", "user": "foo"}, "pass_fallback") + assertPassword(values{"host": "example.com", "dbname": "some_db", "user": "some_user"}, "pass_B") + // localhost also matches the default "" and UNIX sockets + assertPassword(values{"host": "", "user": "some_user"}, "pass_C") + assertPassword(values{"host": "/tmp", "user": "some_user"}, "pass_C") + // cleanup + os.Remove(pgpass_file) + os.Setenv("PGPASSFILE", "") +} + +func TestExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + r, err := db.Exec("INSERT INTO temp VALUES (1)") + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 1 { + t.Fatalf("expected 1 row affected, not %d", n) + } + + r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + + // SELECT doesn't send the number of returned rows in the command tag + // before 9.0 + if getServerVersion(t, db) >= 90000 { + r, err = db.Exec("SELECT g FROM generate_series(1, 2) g") + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 2 { + t.Fatalf("expected 2 rows affected, not %d", n) + } + + r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3) + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + } +} + +func TestStatment(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1") + if err != nil { + t.Fatal(err) + } + + st1, err := db.Prepare("SELECT 2") + if err != nil { + t.Fatal(err) + } + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } + + var i int + err = r.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } + + // st1 + + r1, err := st1.Query() + if err != nil { + t.Fatal(err) + } + defer r1.Close() + + if !r1.Next() { + if r.Err() != nil { + t.Fatal(r1.Err()) + } + t.Fatal("expected row") + } + + err = r1.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 2 { + t.Fatalf("expected 2, got %d", i) + } +} + +func TestRowsCloseBeforeDone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + + err = r.Close() + if err != nil { + t.Fatal(err) + } + + if r.Next() { + t.Fatal("unexpected row") + } + + if r.Err() != nil { + t.Fatal(r.Err()) + } +} + +func TestParameterCountMismatch(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var notused int + err := db.QueryRow("SELECT false", 1).Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } + + err = db.QueryRow("SELECT $1").Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } +} + +// Test that EmptyQueryResponses are handled correctly. +func TestEmptyQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("") + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("") + if err != nil { + t.Fatal(err) + } + cols, err := rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + + stmt, err := db.Prepare("") + if err != nil { + t.Fatal(err) + } + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query() + if err != nil { + t.Fatal(err) + } + cols, err = rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } +} + +// Test that rows.Columns() is correct even if there are no result rows. +func TestEmptyResultSetColumns(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar WHERE FALSE") + if err != nil { + t.Fatal(err) + } + cols, err := rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 2 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + if cols[0] != "a" || cols[1] != "bar" { + t.Fatalf("unexpected Columns result %v", cols) + } + + stmt, err := db.Prepare("SELECT $1::int AS a, text 'bar' AS bar WHERE FALSE") + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query(1) + if err != nil { + t.Fatal(err) + } + cols, err = rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 2 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + if cols[0] != "a" || cols[1] != "bar" { + t.Fatalf("unexpected Columns result %v", cols) + } + +} + +func TestEncodeDecode(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + q := ` + SELECT + E'\\000\\001\\002'::bytea, + 'foobar'::text, + NULL::integer, + '2000-1-1 01:02:03.04-7'::timestamptz, + 0::boolean, + 123, + -321, + 3.14::float8 + WHERE + E'\\000\\001\\002'::bytea = $1 + AND 'foobar'::text = $2 + AND $3::integer is NULL + ` + // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3 + + exp1 := []byte{0, 1, 2} + exp2 := "foobar" + + r, err := db.Query(q, exp1, exp2, nil) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("expected row") + } + + var got1 []byte + var got2 string + var got3 = sql.NullInt64{Valid: true} + var got4 time.Time + var got5, got6, got7, got8 interface{} + + err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7, &got8) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(exp1, got1) { + t.Errorf("expected %q byte: %q", exp1, got1) + } + + if !reflect.DeepEqual(exp2, got2) { + t.Errorf("expected %q byte: %q", exp2, got2) + } + + if got3.Valid { + t.Fatal("expected invalid") + } + + if got4.Year() != 2000 { + t.Fatal("wrong year") + } + + if got5 != false { + t.Fatalf("expected false, got %q", got5) + } + + if got6 != int64(123) { + t.Fatalf("expected 123, got %d", got6) + } + + if got7 != int64(-321) { + t.Fatalf("expected -321, got %d", got7) + } + + if got8 != float64(3.14) { + t.Fatalf("expected 3.14, got %f", got8) + } +} + +func TestNoData(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1 WHERE true = false") + if err != nil { + t.Fatal(err) + } + defer st.Close() + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("unexpected row") + } + + _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20) + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } + + _, err = db.Query("SELECT * FROM nonexistenttable") + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } +} + +func TestErrorDuringStartup(t *testing.T) { + // Don't use the normal connection setup, this is intended to + // blow up in the startup packet from a non-existent user. + db, err := openTestConnConninfo("user=thisuserreallydoesntexist") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + _, err = db.Begin() + if err == nil { + t.Fatal("expected error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "invalid_authorization_specification" && e.Code.Name() != "invalid_password" { + t.Fatalf("expected invalid_authorization_specification or invalid_password, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestBadConn(t *testing.T) { + var err error + + cn := conn{} + func() { + defer cn.errRecover(&err) + panic(io.EOF) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } + + cn = conn{} + func() { + defer cn.errRecover(&err) + e := &Error{Severity: Efatal} + panic(e) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } +} + +func TestErrorOnExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Query("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQueryRowSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + var v int + err = txn.QueryRow("INSERT INTO foo VALUES (0), (0)").Scan(&v) + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +// Test the QueryRow bug workarounds in stmt.exec() and simpleQuery() +func TestQueryRowBugWorkaround(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // stmt.exec() + _, err := db.Exec("CREATE TEMP TABLE notnulltemp (a varchar(10) not null)") + if err != nil { + t.Fatal(err) + } + + var a string + err = db.QueryRow("INSERT INTO notnulltemp(a) values($1) RETURNING a", nil).Scan(&a) + if err == sql.ErrNoRows { + t.Fatalf("expected constraint violation error; got: %v", err) + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "not_null_violation" { + t.Fatalf("expected not_null_violation; got: %s (%+v)", pge.Code.Name(), err) + } + + // Test workaround in simpleQuery() + tx, err := db.Begin() + if err != nil { + t.Fatalf("unexpected error %s in Begin", err) + } + defer tx.Rollback() + + _, err = tx.Exec("SET LOCAL check_function_bodies TO FALSE") + if err != nil { + t.Fatalf("could not disable check_function_bodies: %s", err) + } + _, err = tx.Exec(` +CREATE OR REPLACE FUNCTION bad_function() +RETURNS integer +-- hack to prevent the function from being inlined +SET check_function_bodies TO TRUE +AS $$ + SELECT text 'bad' +$$ LANGUAGE sql`) + if err != nil { + t.Fatalf("could not create function: %s", err) + } + + err = tx.QueryRow("SELECT * FROM bad_function()").Scan(&a) + if err == nil { + t.Fatalf("expected error") + } + pge, ok = err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "invalid_function_definition" { + t.Fatalf("expected invalid_function_definition; got: %s (%+v)", pge.Code.Name(), err) + } + + err = tx.Rollback() + if err != nil { + t.Fatalf("unexpected error %s in Rollback", err) + } + + // Also test that simpleQuery()'s workaround works when the query fails + // after a row has been received. + rows, err := db.Query(` +select + (select generate_series(1, ss.i)) +from (select gs.i + from generate_series(1, 2) gs(i) + order by gs.i limit 2) ss`) + if err != nil { + t.Fatalf("query failed: %s", err) + } + if !rows.Next() { + t.Fatalf("expected at least one result row; got %s", rows.Err()) + } + var i int + err = rows.Scan(&i) + if err != nil { + t.Fatalf("rows.Scan() failed: %s", err) + } + if i != 1 { + t.Fatalf("unexpected value for i: %d", i) + } + if rows.Next() { + t.Fatalf("unexpected row") + } + pge, ok = rows.Err().(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "cardinality_violation" { + t.Fatalf("expected cardinality_violation; got: %s (%+v)", pge.Code.Name(), rows.Err()) + } +} + +func TestSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("select 1") + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } +} + +func TestBindError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("create temp table test (i integer)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Query("select * from test where i=$1", "hhh") + if err == nil { + t.Fatal("expected an error") + } + + // Should not get error here + r, err := db.Query("select * from test where i=$1", 1) + if err != nil { + t.Fatal(err) + } + defer r.Close() +} + +func TestParseErrorInExtendedQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("PARSE_ERROR $1", 1) + if err == nil { + t.Fatal("expected error") + } + + rows, err = db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// TestReturning tests that an INSERT query using the RETURNING clause returns a row. +func TestReturning(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE distributors (did integer default 0, dname text)") + if err != nil { + t.Fatal(err) + } + + rows, err := db.Query("INSERT INTO distributors (did, dname) VALUES (DEFAULT, 'XYZ Widgets') " + + "RETURNING did;") + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + t.Fatal("no rows") + } + var did int + err = rows.Scan(&did) + if err != nil { + t.Fatal(err) + } + if did != 0 { + t.Fatalf("bad value for did: got %d, want %d", did, 0) + } + + if rows.Next() { + t.Fatal("unexpected next row") + } + err = rows.Err() + if err != nil { + t.Fatal(err) + } +} + +func TestIssue186(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // Exec() a query which returns results + _, err := db.Exec("VALUES (1), (2), (3)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + // Query() a query which doesn't return any results + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + rows, err := txn.Query("CREATE TEMP TABLE foo(f1 int)") + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } + + // small trick to get NoData from a parameterized query + _, err = txn.Exec("CREATE RULE nodata AS ON INSERT TO foo DO INSTEAD NOTHING") + if err != nil { + t.Fatal(err) + } + rows, err = txn.Query("INSERT INTO foo VALUES ($1)", 1) + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } +} + +func TestIssue196(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122' = $1, float8 '35.03554004971999' = $2", + float32(0.10000122), float64(35.03554004971999)) + + var float4match, float8match bool + err := row.Scan(&float4match, &float8match) + if err != nil { + t.Fatal(err) + } + if !float4match { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if !float8match { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +// Test that any CommandComplete messages sent before the query results are +// ignored. +func TestIssue282(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var search_path string + err := db.QueryRow(` + SET LOCAL search_path TO pg_catalog; + SET LOCAL search_path TO pg_catalog; + SHOW search_path`).Scan(&search_path) + if err != nil { + t.Fatal(err) + } + if search_path != "pg_catalog" { + t.Fatalf("unexpected search_path %s", search_path) + } +} + +func TestReadFloatPrecision(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122', float8 '35.03554004971999'") + var float4val float32 + var float8val float64 + err := row.Scan(&float4val, &float8val) + if err != nil { + t.Fatal(err) + } + if float4val != float32(0.10000122) { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if float8val != float64(35.03554004971999) { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +func TestXactMultiStmt(t *testing.T) { + // minified test case based on bug reports from + // pico303@gmail.com and rangelspam@gmail.com + t.Skip("Skipping failing test") + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Commit() + + rows, err := tx.Query("select 1") + if err != nil { + t.Fatal(err) + } + + if rows.Next() { + var val int32 + if err = rows.Scan(&val); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in first query in xact") + } + + rows2, err := tx.Query("select 2") + if err != nil { + t.Fatal(err) + } + + if rows2.Next() { + var val2 int32 + if err := rows2.Scan(&val2); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in second query in xact") + } + + if err = rows.Err(); err != nil { + t.Fatal(err) + } + + if err = rows2.Err(); err != nil { + t.Fatal(err) + } + + if err = tx.Commit(); err != nil { + t.Fatal(err) + } +} + +var envParseTests = []struct { + Expected map[string]string + Env []string +}{ + { + Env: []string{"PGDATABASE=hello", "PGUSER=goodbye"}, + Expected: map[string]string{"dbname": "hello", "user": "goodbye"}, + }, + { + Env: []string{"PGDATESTYLE=ISO, MDY"}, + Expected: map[string]string{"datestyle": "ISO, MDY"}, + }, + { + Env: []string{"PGCONNECT_TIMEOUT=30"}, + Expected: map[string]string{"connect_timeout": "30"}, + }, +} + +func TestParseEnviron(t *testing.T) { + for i, tt := range envParseTests { + results := parseEnviron(tt.Env) + if !reflect.DeepEqual(tt.Expected, results) { + t.Errorf("%d: Expected: %#v Got: %#v", i, tt.Expected, results) + } + } +} + +func TestParseComplete(t *testing.T) { + tpc := func(commandTag string, command string, affectedRows int64, shouldFail bool) { + defer func() { + if p := recover(); p != nil { + if !shouldFail { + t.Error(p) + } + } + }() + cn := &conn{} + res, c := cn.parseComplete(commandTag) + if c != command { + t.Errorf("Expected %v, got %v", command, c) + } + n, err := res.RowsAffected() + if err != nil { + t.Fatal(err) + } + if n != affectedRows { + t.Errorf("Expected %d, got %d", affectedRows, n) + } + } + + tpc("ALTER TABLE", "ALTER TABLE", 0, false) + tpc("INSERT 0 1", "INSERT", 1, false) + tpc("UPDATE 100", "UPDATE", 100, false) + tpc("SELECT 100", "SELECT", 100, false) + tpc("FETCH 100", "FETCH", 100, false) + // allow COPY (and others) without row count + tpc("COPY", "COPY", 0, false) + // don't fail on command tags we don't recognize + tpc("UNKNOWNCOMMANDTAG", "UNKNOWNCOMMANDTAG", 0, false) + + // failure cases + tpc("INSERT 1", "", 0, true) // missing oid + tpc("UPDATE 0 1", "", 0, true) // too many numbers + tpc("SELECT foo", "", 0, true) // invalid row count +} + +func TestExecerInterface(t *testing.T) { + // Gin up a straw man private struct just for the type check + cn := &conn{c: nil} + var cni interface{} = cn + + _, ok := cni.(driver.Execer) + if !ok { + t.Fatal("Driver doesn't implement Execer") + } +} + +func TestNullAfterNonNull(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer") + if err != nil { + t.Fatal(err) + } + + var n sql.NullInt64 + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Int64 != 9 { + t.Fatalf("expected 2, not %d", n.Int64) + } + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Valid { + t.Fatal("expected n to be invalid") + } + + if n.Int64 != 0 { + t.Fatalf("expected n to 2, not %d", n.Int64) + } +} + +func Test64BitErrorChecking(t *testing.T) { + defer func() { + if err := recover(); err != nil { + t.Fatal("panic due to 0xFFFFFFFF != -1 " + + "when int is 64 bits") + } + }() + + db := openTestConn(t) + defer db.Close() + + r, err := db.Query(`SELECT * +FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`) + + if err != nil { + t.Fatal(err) + } + + defer r.Close() + + for r.Next() { + } +} + +func TestCommit(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + sqlInsert := "INSERT INTO temp VALUES (1)" + sqlSelect := "SELECT * FROM temp" + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(sqlInsert) + if err != nil { + t.Fatal(err) + } + err = tx.Commit() + if err != nil { + t.Fatal(err) + } + var i int + err = db.QueryRow(sqlSelect).Scan(&i) + if err != nil { + t.Fatal(err) + } + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } +} + +func TestErrorClass(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Query("SELECT int 'notint'") + if err == nil { + t.Fatal("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *pq.Error, got %#+v", err) + } + if pge.Code.Class() != "22" { + t.Fatalf("expected class 28, got %v", pge.Code.Class()) + } + if pge.Code.Class().Name() != "data_exception" { + t.Fatalf("expected data_exception, got %v", pge.Code.Class().Name()) + } +} + +func TestParseOpts(t *testing.T) { + tests := []struct { + in string + expected values + valid bool + }{ + {"dbname=hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user=goodbye ", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname = hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user =goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user= goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"host=localhost password='correct horse battery staple'", values{"host": "localhost", "password": "correct horse battery staple"}, true}, + {"dbname=データベース password=パスワード", values{"dbname": "データベース", "password": "パスワード"}, true}, + {"dbname=hello user=''", values{"dbname": "hello", "user": ""}, true}, + {"user='' dbname=hello", values{"dbname": "hello", "user": ""}, true}, + // The last option value is an empty string if there's no non-whitespace after its = + {"dbname=hello user= ", values{"dbname": "hello", "user": ""}, true}, + + // The parser ignores spaces after = and interprets the next set of non-whitespace characters as the value. + {"user= password=foo", values{"user": "password=foo"}, true}, + + // Backslash escapes next char + {`user=a\ \'\\b`, values{"user": `a '\b`}, true}, + {`user='a \'b'`, values{"user": `a 'b`}, true}, + + // Incomplete escape + {`user=x\`, values{}, false}, + + // No '=' after the key + {"postgre://marko@internet", values{}, false}, + {"dbname user=goodbye", values{}, false}, + {"user=foo blah", values{}, false}, + {"user=foo blah ", values{}, false}, + + // Unterminated quoted value + {"dbname=hello user='unterminated", values{}, false}, + } + + for _, test := range tests { + o := make(values) + err := parseOpts(test.in, o) + + switch { + case err != nil && test.valid: + t.Errorf("%q got unexpected error: %s", test.in, err) + case err == nil && test.valid && !reflect.DeepEqual(test.expected, o): + t.Errorf("%q got: %#v want: %#v", test.in, o, test.expected) + case err == nil && !test.valid: + t.Errorf("%q expected an error", test.in) + } + } +} + +func TestRuntimeParameters(t *testing.T) { + type RuntimeTestResult int + const ( + ResultUnknown RuntimeTestResult = iota + ResultSuccess + ResultError // other error + ) + + tests := []struct { + conninfo string + param string + expected string + expectedOutcome RuntimeTestResult + }{ + // invalid parameter + {"DOESNOTEXIST=foo", "", "", ResultError}, + // we can only work with a specific value for these two + {"client_encoding=SQL_ASCII", "", "", ResultError}, + {"datestyle='ISO, YDM'", "", "", ResultError}, + // "options" should work exactly as it does in libpq + {"options='-c search_path=pqgotest'", "search_path", "pqgotest", ResultSuccess}, + // pq should override client_encoding in this case + {"options='-c client_encoding=SQL_ASCII'", "client_encoding", "UTF8", ResultSuccess}, + // allow client_encoding to be set explicitly + {"client_encoding=UTF8", "client_encoding", "UTF8", ResultSuccess}, + // test a runtime parameter not supported by libpq + {"work_mem='139kB'", "work_mem", "139kB", ResultSuccess}, + // test fallback_application_name + {"application_name=foo fallback_application_name=bar", "application_name", "foo", ResultSuccess}, + {"application_name='' fallback_application_name=bar", "application_name", "", ResultSuccess}, + {"fallback_application_name=bar", "application_name", "bar", ResultSuccess}, + } + + for _, test := range tests { + db, err := openTestConnConninfo(test.conninfo) + if err != nil { + t.Fatal(err) + } + + // application_name didn't exist before 9.0 + if test.param == "application_name" && getServerVersion(t, db) < 90000 { + db.Close() + continue + } + + tryGetParameterValue := func() (value string, outcome RuntimeTestResult) { + defer db.Close() + row := db.QueryRow("SELECT current_setting($1)", test.param) + err = row.Scan(&value) + if err != nil { + return "", ResultError + } + return value, ResultSuccess + } + + value, outcome := tryGetParameterValue() + if outcome != test.expectedOutcome && outcome == ResultError { + t.Fatalf("%v: unexpected error: %v", test.conninfo, err) + } + if outcome != test.expectedOutcome { + t.Fatalf("unexpected outcome %v (was expecting %v) for conninfo \"%s\"", + outcome, test.expectedOutcome, test.conninfo) + } + if value != test.expected { + t.Fatalf("bad value for %s: got %s, want %s with conninfo \"%s\"", + test.param, value, test.expected, test.conninfo) + } + } +} + +func TestIsUTF8(t *testing.T) { + var cases = []struct { + name string + want bool + }{ + {"unicode", true}, + {"utf-8", true}, + {"utf_8", true}, + {"UTF-8", true}, + {"UTF8", true}, + {"utf8", true}, + {"u n ic_ode", true}, + {"ut_f%8", true}, + {"ubf8", false}, + {"punycode", false}, + } + + for _, test := range cases { + if g := isUTF8(test.name); g != test.want { + t.Errorf("isUTF8(%q) = %v want %v", test.name, g, test.want) + } + } +} + +func TestQuoteIdentifier(t *testing.T) { + var cases = []struct { + input string + want string + }{ + {`foo`, `"foo"`}, + {`foo bar baz`, `"foo bar baz"`}, + {`foo"bar`, `"foo""bar"`}, + {"foo\x00bar", `"foo"`}, + {"\x00foo", `""`}, + } + + for _, test := range cases { + got := QuoteIdentifier(test.input) + if got != test.want { + t.Errorf("QuoteIdentifier(%q) = %v want %v", test.input, got, test.want) + } + } +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 000000000..101f11133 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,267 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + cn.bad = true + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + cn.bad = true + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + cn.bad = true + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.cn.bad = true + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + case 'N': + // NoticeResponse + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.cn.bad = true + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.cn.bad { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + return nil, ci.Close() + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if ci.cn.bad { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/copy_test.go b/vendor/github.com/lib/pq/copy_test.go new file mode 100644 index 000000000..86745b38f --- /dev/null +++ b/vendor/github.com/lib/pq/copy_test.go @@ -0,0 +1,465 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "strings" + "testing" +) + +func TestCopyInStmt(t *testing.T) { + var stmt string + stmt = CopyIn("table name") + if stmt != `COPY "table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn("table name", "column 1", "column 2") + if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn(`table " name """`, `co"lumn""`) + if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInSchemaStmt(t *testing.T) { + var stmt string + stmt = CopyInSchema("schema name", "table name") + if stmt != `COPY "schema name"."table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema("schema name", "table name", "column 1", "column 2") + if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`) + if stmt != `COPY "schema "" name """"""".`+ + `"table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInMultipleValues(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + for i := 0; i < 500; i++ { + _, err = stmt.Exec(int64(i), longString) + if err != nil { + t.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 500 { + t.Fatalf("expected 500 items, not %d", num) + } +} + +func TestCopyInRaiseStmtTrigger(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + var exists int + err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists) + if err == sql.ErrNoRows { + t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger") + } else if err != nil { + t.Fatal(err) + } + } + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE OR REPLACE FUNCTION pg_temp.temptest() + RETURNS trigger AS + $BODY$ begin + raise notice 'Hello world'; + return new; + end $BODY$ + LANGUAGE plpgsql`) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE TRIGGER temptest_trigger + BEFORE INSERT + ON temp + FOR EACH ROW + EXECUTE PROCEDURE pg_temp.temptest()`) + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + _, err = stmt.Exec(int64(1), longString) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 1 { + t.Fatalf("expected 1 items, not %d", num) + } +} + +func TestCopyInTypes(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing")) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + var text string + var blob []byte + var nothing sql.NullString + + err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing) + if err != nil { + t.Fatal(err) + } + + if num != 1234567890 { + t.Fatal("unexpected result", num) + } + if text != "Héllö\n ☃!\r\t\\" { + t.Fatal("unexpected result", text) + } + if bytes.Compare(blob, []byte{0, 255, 9, 10, 13}) != 0 { + t.Fatal("unexpected result", blob) + } + if nothing.Valid { + t.Fatal("unexpected result", nothing.String) + } +} + +func TestCopyInWrongType(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num")) + if err != nil { + t.Fatal(err) + } + defer stmt.Close() + + _, err = stmt.Exec("Héllö\n ☃!\r\t\\") + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" { + t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge) + } +} + +func TestCopyOutsideOfTxnError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Prepare(CopyIn("temp", "num")) + if err == nil { + t.Fatal("COPY outside of transaction did not return an error") + } + if err != errCopyNotSupportedOutsideTxn { + t.Fatalf("expected %s, got %s", err, err.Error()) + } +} + +func TestCopyInBinaryError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary") + if err != errBinaryCopyNotSupported { + t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) TO STDOUT") + if err != errCopyToNotSupported { + t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopySyntaxError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Prepare("COPY ") + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "syntax_error" { + t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +// Tests for connection errors in copyin.resploop() +func TestCopyRespLoopConnectionError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + var pid int + err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a")) + if err != nil { + t.Fatal(err) + } + defer stmt.Close() + + _, err = db.Exec("SELECT pg_terminate_backend($1)", pid) + if err != nil { + t.Fatal(err) + } + + if getServerVersion(t, db) < 90500 { + // We have to try and send something over, since postgres before + // version 9.5 won't process SIGTERMs while it's waiting for + // CopyData/CopyEnd messages; see tcop/postgres.c. + _, err = stmt.Exec(1) + if err != nil { + t.Fatal(err) + } + } + _, err = stmt.Exec() + if err == nil { + t.Fatalf("expected error") + } + pge, ok := err.(*Error) + if !ok { + if err == driver.ErrBadConn { + // likely an EPIPE + } else { + t.Fatalf("expected *pq.Error or driver.ErrBadConn, got %+#v", err) + } + } else if pge.Code.Name() != "admin_shutdown" { + t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name()) + } + + _ = stmt.Close() +} + +func BenchmarkCopyIn(b *testing.B) { + db := openTestConn(b) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + b.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + b.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + _, err = stmt.Exec(int64(i), "hello world!") + if err != nil { + b.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + b.Fatal(err) + } + + err = stmt.Close() + if err != nil { + b.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + b.Fatal(err) + } + + if num != b.N { + b.Fatalf("expected %d items, not %d", b.N, num) + } +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 000000000..19798dfc9 --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,212 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + db, err := sql.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full") + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid' + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + +Queries + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + +Errors + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +http://godoc.org/github.com/lib/pq/listen_example. + +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 000000000..88422eb3d --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,538 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } + panic("not reached") +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + if f == formatBinary { + return binaryDecode(parameterStatus, s, typ) + } else { + return textDecode(parameterStatus, s, typ) + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + + default: + errorf("don't know how to decode binary parameter of type %u", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return parseBytea(s) + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + bits := 64 + if typ == oid.T_float4 { + bits = 32 + } + f, err := strconv.ParseFloat(string(s), bits) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + return t +} + +func expect(str, char string, pos int) { + if c := str[pos : pos+1]; c != char { + errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func mustAtoi(str string) int { + result, err := strconv.Atoi(str) + if err != nil { + errorf("expected number; got '%v'", str) + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache *locationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +/* + * If EnableInfinityTs is not called, "-infinity" and "infinity" will return + * []byte("-infinity") and []byte("infinity") respectively, and potentially + * cause error "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time", + * when scanning into a time.Time value. + * + * Once EnableInfinityTs has been called, all connections created using this + * driver will decode Postgres' "-infinity" and "infinity" for "timestamp", + * "timestamp with time zone" and "date" types to the predefined minimum and + * maximum times, respectively. When encoding time.Time values, any time which + * equals or preceeds the predefined minimum time will be encoded to + * "-infinity". Any values at or past the maximum time will similarly be + * encoded to "infinity". + * + * + * If EnableInfinityTs is called with negative >= positive, it will panic. + * Calling EnableInfinityTs after a connection has been established results in + * undefined behavior. If EnableInfinityTs is called more than once, it will + * panic. + */ +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := mustAtoi(str[:monSep]) + daySep := monSep + 3 + month := mustAtoi(str[monSep+1 : daySep]) + expect(str, "-", daySep) + timeSep := daySep + 3 + day := mustAtoi(str[daySep+1 : timeSep]) + + var hour, minute, second int + if len(str) > monSep+len("01-01")+1 { + expect(str, " ", timeSep) + minSep := timeSep + 3 + expect(str, ":", minSep) + hour = mustAtoi(str[timeSep+1 : minSep]) + secSep := minSep + 3 + expect(str, ":", secSep) + minute = mustAtoi(str[minSep+1 : secSep]) + secEnd := secSep + 3 + second = mustAtoi(str[secSep+1 : secEnd]) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx:remainderIdx+1] == "." { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := mustAtoi(str[fracStart : fracStart+fracOff]) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart:tzStart+1] == "-" || str[tzStart:tzStart+1] == "+") { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + if c := str[tzStart : tzStart+1]; c == "-" { + tzSign = -1 + } else if c == "+" { + tzSign = +1 + } else { + errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := mustAtoi(str[tzStart+1 : tzStart+3]) + remainderIdx += 3 + var tzMin, tzSec int + if tzStart+3 < len(str) && str[tzStart+3:tzStart+4] == ":" { + tzMin = mustAtoi(str[tzStart+4 : tzStart+6]) + remainderIdx += 3 + } + if tzStart+6 < len(str) && str[tzStart+6:tzStart+7] == ":" { + tzSec = mustAtoi(str[tzStart+7 : tzStart+9]) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + if remainderIdx < len(str) && str[remainderIdx:remainderIdx+3] == " BC" { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) (b []byte) { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b = []byte(t.Format(time.RFC3339Nano)) + + _, offset := t.Zone() + offset = offset % 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + errorf("%s", err) + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/encode_test.go b/vendor/github.com/lib/pq/encode_test.go new file mode 100644 index 000000000..97b663886 --- /dev/null +++ b/vendor/github.com/lib/pq/encode_test.go @@ -0,0 +1,719 @@ +package pq + +import ( + "bytes" + "database/sql" + "fmt" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +func TestScanTimestamp(t *testing.T) { + var nt NullTime + tn := time.Now() + nt.Scan(tn) + if !nt.Valid { + t.Errorf("Expected Valid=false") + } + if nt.Time != tn { + t.Errorf("Time value mismatch") + } +} + +func TestScanNilTimestamp(t *testing.T) { + var nt NullTime + nt.Scan(nil) + if nt.Valid { + t.Errorf("Expected Valid=false") + } +} + +var timeTests = []struct { + str string + timeval time.Time +}{ + {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+42*60)))}, + {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+30*60+9)))}, + {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", 7*60*60))}, + {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, +} + +// Helper function for the two tests below +func tryParse(str string) (t time.Time, err error) { + defer func() { + if p := recover(); p != nil { + err = fmt.Errorf("%v", p) + return + } + }() + i := parseTs(nil, str) + t, ok := i.(time.Time) + if !ok { + err = fmt.Errorf("Not a time.Time type, got %#v", i) + } + return +} + +// Test that parsing the string results in the expected value. +func TestParseTs(t *testing.T) { + for i, tt := range timeTests { + val, err := tryParse(tt.str) + if err != nil { + t.Errorf("%d: got error: %v", i, err) + } else if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", + i, tt.str, tt.timeval, val) + } + } +} + +// Now test that sending the value into the database and parsing it back +// returns the same time.Time value. +func TestEncodeAndParseTs(t *testing.T) { + db, err := openTestConnConninfo("timezone='Etc/UTC'") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + for i, tt := range timeTests { + var dbstr string + err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr) + if err != nil { + t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err) + continue + } + + val, err := tryParse(dbstr) + if err != nil { + t.Errorf("%d: could not parse value %q: %s", i, dbstr, err) + continue + } + val = val.In(tt.timeval.Location()) + if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val) + } + } +} + +var formatTimeTests = []struct { + time time.Time + expected string +}{ + {time.Time{}, "0001-01-01T00:00:00Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03T04:05:06.123456789Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03T04:05:06.123456789+02:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03T04:05:06.123456789-06:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03T04:05:06-07:30:09"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00"}, + + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00 BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00 BC"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09"}, + {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09 BC"}, +} + +func TestFormatTs(t *testing.T) { + for i, tt := range formatTimeTests { + val := string(formatTs(tt.time)) + if val != tt.expected { + t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected) + } + } +} + +func TestTimestampWithTimeZone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + + // try several different locations, all included in Go's zoneinfo.zip + for _, locName := range []string{ + "UTC", + "America/Chicago", + "America/New_York", + "Australia/Darwin", + "Australia/Perth", + } { + loc, err := time.LoadLocation(locName) + if err != nil { + t.Logf("Could not load time zone %s - skipping", locName) + continue + } + + // Postgres timestamps have a resolution of 1 microsecond, so don't + // use the full range of the Nanosecond argument + refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc) + + for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} { + // Switch Postgres's timezone to test different output timestamp formats + _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone)) + if err != nil { + t.Fatal(err) + } + + var gotTime time.Time + row := tx.QueryRow("select $1::timestamp with time zone", refTime) + err = row.Scan(&gotTime) + if err != nil { + t.Fatal(err) + } + + if !refTime.Equal(gotTime) { + t.Errorf("timestamps not equal: %s != %s", refTime, gotTime) + } + + // check that the time zone is set correctly based on TimeZone + pgLoc, err := time.LoadLocation(pgTimeZone) + if err != nil { + t.Logf("Could not load time zone %s - skipping", pgLoc) + continue + } + translated := refTime.In(pgLoc) + if translated.String() != gotTime.String() { + t.Errorf("timestamps not equal: %s != %s", translated, gotTime) + } + } + } +} + +func TestTimestampWithOutTimezone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + test := func(ts, pgts string) { + r, err := db.Query("SELECT $1::timestamp", pgts) + if err != nil { + t.Fatalf("Could not run query: %v", err) + } + + n := r.Next() + + if n != true { + t.Fatal("Expected at least one row") + } + + var result time.Time + err = r.Scan(&result) + if err != nil { + t.Fatalf("Did not expect error scanning row: %v", err) + } + + expected, err := time.Parse(time.RFC3339, ts) + if err != nil { + t.Fatalf("Could not parse test time literal: %v", err) + } + + if !result.Equal(expected) { + t.Fatalf("Expected time to match %v: got mismatch %v", + expected, result) + } + + n = r.Next() + if n != false { + t.Fatal("Expected only one row") + } + } + + test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00") + + // Test higher precision time + test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033") +} + +func TestInfinityTimestamp(t *testing.T) { + db := openTestConn(t) + defer db.Close() + var err error + var resultT time.Time + + expectedError := fmt.Errorf(`sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time`) + type testCases []struct { + Query string + Param string + ExpectedErr error + ExpectedVal interface{} + } + tc := testCases{ + {"SELECT $1::timestamp", "-infinity", expectedError, "-infinity"}, + {"SELECT $1::timestamptz", "-infinity", expectedError, "-infinity"}, + {"SELECT $1::timestamp", "infinity", expectedError, "infinity"}, + {"SELECT $1::timestamptz", "infinity", expectedError, "infinity"}, + } + // try to assert []byte to time.Time + for _, q := range tc { + err = db.QueryRow(q.Query, q.Param).Scan(&resultT) + if err.Error() != q.ExpectedErr.Error() { + t.Errorf("Scanning -/+infinity, expected error, %q, got %q", q.ExpectedErr, err) + } + } + // yield []byte + for _, q := range tc { + var resultI interface{} + err = db.QueryRow(q.Query, q.Param).Scan(&resultI) + if err != nil { + t.Errorf("Scanning -/+infinity, expected no error, got %q", err) + } + result, ok := resultI.([]byte) + if !ok { + t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI) + } + if string(result) != q.ExpectedVal { + t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result) + } + } + + y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC) + EnableInfinityTs(y1500, y2500) + + err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT) + } + + err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + y_1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC) + var s string + err = db.QueryRow("SELECT $1::timestamp::text", y_1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", y_1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + + err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + + disableInfinityTs() + + var panicErrorString string + func() { + defer func() { + panicErrorString, _ = recover().(string) + }() + EnableInfinityTs(y2500, y1500) + }() + if panicErrorString != infinityTsNegativeMustBeSmaller { + t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString) + } +} + +func TestStringWithNul(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + hello0world := string("hello\x00world") + _, err := db.Query("SELECT $1::text", &hello0world) + if err == nil { + t.Fatal("Postgres accepts a string with nul in it; " + + "injection attacks may be plausible") + } +} + +func TestByteSliceToText(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("hello world") + row := db.QueryRow("SELECT $1::text", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if string(result) != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestStringToBytea(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := "hello world" + row := db.QueryRow("SELECT $1::bytea", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(result, []byte(b)) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestTextByteSliceToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11") + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + + if result != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } + } +} + +func TestBinaryByteSlicetoUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte{'\xa0','\xee','\xbc','\x99', + '\x9c', '\x0b', + '\x4e', '\xf8', + '\xbb', '\x00', '\x6b', + '\xb9', '\xbd', '\x38', '\x0a', '\x11'} + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + + if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") { + t.Fatalf("expected %v but got %v", b, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestStringToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11" + row := db.QueryRow("SELECT $1::uuid", s) + + var result string + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if result != s { + t.Fatalf("expected %v but got %v", s, result) + } +} + +func TestTextByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte(fmt.Sprintf("%d", expected)) + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } +} + +func TestBinaryByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte{'\x00', '\xbc', '\x61', '\x4e'} + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestByteaOutputFormatEncoding(t *testing.T) { + input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123") + want := []byte("\\x5c78000102fffe6162636465666730313233") + got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid hex bytea output, got %v but expected %v", got, want) + } + + want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123") + got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid escape bytea output, got %v but expected %v", got, want) + } +} + +func TestByteaOutputFormats(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + // skip + return + } + + testByteaOutputFormat := func(f string, usePrepared bool) { + expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08") + sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')" + + var data []byte + + // use a txn to avoid relying on getting the same connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("SET LOCAL bytea_output TO " + f) + if err != nil { + t.Fatal(err) + } + var rows *sql.Rows + var stmt *sql.Stmt + if usePrepared { + stmt, err = txn.Prepare(sqlQuery) + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query() + } else { + // use Query; QueryRow would hide the actual error + rows, err = txn.Query(sqlQuery) + } + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + t.Fatal("shouldn't happen") + } + err = rows.Scan(&data) + if err != nil { + t.Fatal(err) + } + err = rows.Close() + if err != nil { + t.Fatal(err) + } + if stmt != nil { + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + } + if !bytes.Equal(data, expectedData) { + t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData) + } + } + + testByteaOutputFormat("hex", false) + testByteaOutputFormat("escape", false) + testByteaOutputFormat("hex", true) + testByteaOutputFormat("escape", true) +} + +func TestAppendEncodedText(t *testing.T) { + var buf []byte + + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10)) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld") + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255}) + + if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" { + t.Fatal(string(buf)) + } +} + +func TestAppendEscapedText(t *testing.T) { + if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func TestAppendEscapedTextExistingBuffer(t *testing.T) { + var buf []byte + buf = []byte("123\t") + if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func BenchmarkAppendEscapedText(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "123456789\n" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} + +func BenchmarkAppendEscapedTextNoEscape(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "1234567890" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 000000000..b4bb44cee --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,508 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (c *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + c.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + *err = driver.ErrBadConn + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + c.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + c.bad = true + } +} diff --git a/vendor/github.com/lib/pq/hstore/hstore.go b/vendor/github.com/lib/pq/hstore/hstore.go new file mode 100644 index 000000000..72d5abf51 --- /dev/null +++ b/vendor/github.com/lib/pq/hstore/hstore.go @@ -0,0 +1,118 @@ +package hstore + +import ( + "database/sql" + "database/sql/driver" + "strings" +) + +// A wrapper for transferring Hstore values back and forth easily. +type Hstore struct { + Map map[string]sql.NullString +} + +// escapes and quotes hstore keys/values +// s should be a sql.NullString or string +func hQuote(s interface{}) string { + var str string + switch v := s.(type) { + case sql.NullString: + if !v.Valid { + return "NULL" + } + str = v.String + case string: + str = v + default: + panic("not a string or sql.NullString") + } + + str = strings.Replace(str, "\\", "\\\\", -1) + return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"` +} + +// Scan implements the Scanner interface. +// +// Note h.Map is reallocated before the scan to clear existing values. If the +// hstore column's database value is NULL, then h.Map is set to nil instead. +func (h *Hstore) Scan(value interface{}) error { + if value == nil { + h.Map = nil + return nil + } + h.Map = make(map[string]sql.NullString) + var b byte + pair := [][]byte{{}, {}} + pi := 0 + inQuote := false + didQuote := false + sawSlash := false + bindex := 0 + for bindex, b = range value.([]byte) { + if sawSlash { + pair[pi] = append(pair[pi], b) + sawSlash = false + continue + } + + switch b { + case '\\': + sawSlash = true + continue + case '"': + inQuote = !inQuote + if !didQuote { + didQuote = true + } + continue + default: + if !inQuote { + switch b { + case ' ', '\t', '\n', '\r': + continue + case '=': + continue + case '>': + pi = 1 + didQuote = false + continue + case ',': + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + pair[0] = []byte{} + pair[1] = []byte{} + pi = 0 + continue + } + } + } + pair[pi] = append(pair[pi], b) + } + if bindex > 0 { + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + } + return nil +} + +// Value implements the driver Valuer interface. Note if h.Map is nil, the +// database column value will be set to NULL. +func (h Hstore) Value() (driver.Value, error) { + if h.Map == nil { + return nil, nil + } + parts := []string{} + for key, val := range h.Map { + thispart := hQuote(key) + "=>" + hQuote(val) + parts = append(parts, thispart) + } + return []byte(strings.Join(parts, ",")), nil +} diff --git a/vendor/github.com/lib/pq/hstore/hstore_test.go b/vendor/github.com/lib/pq/hstore/hstore_test.go new file mode 100644 index 000000000..c9c108fc3 --- /dev/null +++ b/vendor/github.com/lib/pq/hstore/hstore_test.go @@ -0,0 +1,148 @@ +package hstore + +import ( + "database/sql" + "os" + "testing" + + _ "github.com/lib/pq" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func openTestConn(t Fatalistic) *sql.DB { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + conn, err := sql.Open("postgres", "") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func TestHstore(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // quitely create hstore if it doesn't exist + _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore") + if err != nil { + t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error()) + } + + hs := Hstore{} + + // test for null-valued hstores + err = db.QueryRow("SELECT NULL::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query null map failed: %s", err.Error()) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + // test for empty hstores + err = db.QueryRow("SELECT ''::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query empty map failed: %s", err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + // a few example maps to test out + hsOnePair := Hstore{ + Map: map[string]sql.NullString{ + "key1": {"value1", true}, + }, + } + + hsThreePairs := Hstore{ + Map: map[string]sql.NullString{ + "key1": {"value1", true}, + "key2": {"value2", true}, + "key3": {"value3", true}, + }, + } + + hsSmorgasbord := Hstore{ + Map: map[string]sql.NullString{ + "nullstring": {"NULL", true}, + "actuallynull": {"", false}, + "NULL": {"NULL string key", true}, + "withbracket": {"value>42", true}, + "withequal": {"value=42", true}, + `"withquotes1"`: {`this "should" be fine`, true}, + `"withquotes"2"`: {`this "should\" also be fine`, true}, + "embedded1": {"value1=>x1", true}, + "embedded2": {`"value2"=>x2`, true}, + "withnewlines": {"\n\nvalue\t=>2", true}, + "<>": {`this, "should,\" also, => be fine`, true}, + }, + } + + // test encoding in query params, then decoding during Scan + testBidirectional := func(h Hstore) { + err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs) + if err != nil { + t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected %d-pair map, got null map", len(h.Map)) + } + if len(hs.Map) != len(h.Map) { + t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map)) + } + + for key, val := range hs.Map { + otherval, found := h.Map[key] + if !found { + t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map)) + } + if otherval.Valid != val.Valid { + t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map)) + } + if otherval.String != val.String { + t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map)) + } + } + } + + testBidirectional(hsOnePair) + testBidirectional(hsThreePairs) + testBidirectional(hsSmorgasbord) +} diff --git a/vendor/github.com/lib/pq/listen_example/doc.go b/vendor/github.com/lib/pq/listen_example/doc.go new file mode 100644 index 000000000..5bc99f5c1 --- /dev/null +++ b/vendor/github.com/lib/pq/listen_example/doc.go @@ -0,0 +1,102 @@ +/* + +Below you will find a self-contained Go program which uses the LISTEN / NOTIFY +mechanism to avoid polling the database while waiting for more work to arrive. + + // + // You can see the program in action by defining a function similar to + // the following: + // + // CREATE OR REPLACE FUNCTION public.get_work() + // RETURNS bigint + // LANGUAGE sql + // AS $$ + // SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END + // $$ + // ; + + package main + + import ( + "database/sql" + "fmt" + "time" + + "github.com/lib/pq" + ) + + func doWork(db *sql.DB, work int64) { + // work here + } + + func getWork(db *sql.DB) { + for { + // get work from the database here + var work sql.NullInt64 + err := db.QueryRow("SELECT get_work()").Scan(&work) + if err != nil { + fmt.Println("call to get_work() failed: ", err) + time.Sleep(10 * time.Second) + continue + } + if !work.Valid { + // no more work to do + fmt.Println("ran out of work") + return + } + + fmt.Println("starting work on ", work.Int64) + go doWork(db, work.Int64) + } + } + + func waitForNotification(l *pq.Listener) { + for { + select { + case <-l.Notify: + fmt.Println("received notification, new work available") + return + case <-time.After(90 * time.Second): + go func() { + l.Ping() + }() + // Check if there's more work available, just in case it takes + // a while for the Listener to notice connection loss and + // reconnect. + fmt.Println("received no work for 90 seconds, checking for new work") + return + } + } + } + + func main() { + var conninfo string = "" + + db, err := sql.Open("postgres", conninfo) + if err != nil { + panic(err) + } + + reportProblem := func(ev pq.ListenerEventType, err error) { + if err != nil { + fmt.Println(err.Error()) + } + } + + listener := pq.NewListener(conninfo, 10 * time.Second, time.Minute, reportProblem) + err = listener.Listen("getwork") + if err != nil { + panic(err) + } + + fmt.Println("entering main loop") + for { + // process all available work before waiting for notifications + getWork(db) + waitForNotification(listener) + } + } + + +*/ +package listen_example diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 000000000..8cad57815 --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,766 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// Creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + cn, err := Open(name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: notificationChan, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'N', 'S': + // ignore + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Send a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Send an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// Send `UNLISTEN *` to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// Execute a "simple query" (i.e. one with no bindable parameters) on the +// connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err() returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +type ListenerEventType int + +const ( + // Emitted only when the database connection has been initially + // initialized. err will always be nil. + ListenerEventConnected ListenerEventType = iota + + // Emitted after a database connection has been lost, either because of an + // error or because Close has been called. err will be set to the reason + // the database connection was lost. + ListenerEventDisconnected + + // Emitted after a database connection has been re-established after + // connection loss. err will always be nil. After this event has been + // emitted, a nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // Emitted after a connection to the database was attempted, but failed. + // err will be set to an error describing why the connection attempt did + // not succeed. + ListenerEventConnectionAttemptFailed +) + +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// Returns the notification channel for this listener. This is the same +// channel as Notify, and will not be recreated during the life time of the +// Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func() { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for _ = range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }() + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := NewListenerConn(l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(nextReconnect.Sub(time.Now())) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/notify_test.go b/vendor/github.com/lib/pq/notify_test.go new file mode 100644 index 000000000..fe8941a4e --- /dev/null +++ b/vendor/github.com/lib/pq/notify_test.go @@ -0,0 +1,574 @@ +package pq + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +var errNilNotification = errors.New("nil notification") + +func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error { + select { + case n := <-ch: + if n == nil { + return errNilNotification + } + if n.Channel != relname || n.Extra != extra { + return fmt.Errorf("unexpected notification %v", n) + } + return nil + case <-time.After(1500 * time.Millisecond): + return fmt.Errorf("timeout") + } +} + +func expectNoNotification(t *testing.T, ch <-chan *Notification) error { + select { + case n := <-ch: + return fmt.Errorf("unexpected notification %v", n) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error { + select { + case e := <-eventch: + if e != et { + return fmt.Errorf("unexpected event %v", e) + } + return nil + case <-time.After(1500 * time.Millisecond): + panic("expectEvent timeout") + } +} + +func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error { + select { + case e := <-eventch: + return fmt.Errorf("unexpected event %v", e) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + notificationChan := make(chan *Notification) + l, err := NewListenerConn("", notificationChan) + if err != nil { + t.Fatal(err) + } + + return l, notificationChan +} + +func TestNewListenerConn(t *testing.T) { + l, _ := newTestListenerConn(t) + + defer l.Close() +} + +func TestConnListen(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlisten(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.Unlisten("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlistenAll(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.UnlistenAll() + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnClose(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +func TestConnPing(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + err := l.Ping() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Ping() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +// Test for deadlock where a query fails while another one is queued +func TestConnExecDeadlock(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + l.ExecSimpleQuery("SELECT pg_sleep(60)") + wg.Done() + }() + runtime.Gosched() + go func() { + l.ExecSimpleQuery("SELECT 1") + wg.Done() + }() + // give the two goroutines some time to get into position + runtime.Gosched() + // calls Close on the net.Conn; equivalent to a network failure + l.Close() + + var done int32 = 0 + go func() { + time.Sleep(10 * time.Second) + if atomic.LoadInt32(&done) != 1 { + panic("timed out") + } + }() + wg.Wait() + atomic.StoreInt32(&done, 1) +} + +// Test for ListenerConn being closed while a slow query is executing +func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)") + if sent { + panic("expected sent=false") + } + // could be any of a number of errors + if err == nil { + panic("expected error") + } + wg.Done() + }() + // give the above goroutine some time to get into position + runtime.Gosched() + err := l.Close() + if err != nil { + t.Fatal(err) + } + var done int32 = 0 + go func() { + time.Sleep(10 * time.Second) + if atomic.LoadInt32(&done) != 1 { + panic("timed out") + } + }() + wg.Wait() + atomic.StoreInt32(&done, 1) +} + +func TestNotifyExtra(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + t.Skip("skipping NOTIFY payload test since the server does not appear to support it") + } + + l, channel := newTestListenerConn(t) + defer l.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test, 'something'") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "something") + if err != nil { + t.Fatal(err) + } +} + +// create a new test listener and also set the timeouts +func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + eventch := make(chan ListenerEventType, 16) + l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t }) + err := expectEvent(t, eventch, ListenerEventConnected) + if err != nil { + t.Fatal(err) + } + return l, eventch +} + +func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) { + return newTestListenerTimeout(t, time.Hour, time.Hour) +} + +func TestListenerListen(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlisten(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.Unlisten("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlistenAll(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.UnlistenAll() + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerFailedQuery(t *testing.T) { + l, eventch := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // shouldn't cause a disconnect + ok, err := l.cn.ExecSimpleQuery("SELECT error") + if !ok { + t.Fatalf("could not send query to server: %v", err) + } + _, ok = err.(PGError) + if !ok { + t.Fatalf("unexpected error %v", err) + } + err = expectNoEvent(t, eventch) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerReconnect(t *testing.T) { + l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // kill the connection and make sure it comes back up + ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())") + if ok { + t.Fatalf("could not kill the connection: %v", err) + } + if err != io.EOF { + t.Fatalf("unexpected error %v", err) + } + err = expectEvent(t, eventch, ListenerEventDisconnected) + if err != nil { + t.Fatal(err) + } + err = expectEvent(t, eventch, ListenerEventReconnected) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + // should get nil after Reconnected + err = expectNotification(t, l.Notify, "", "") + if err != errNilNotification { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerClose(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} + +func TestListenerPing(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Ping() + if err != nil { + t.Fatal(err) + } + + err = l.Close() + if err != nil { + t.Fatal(err) + } + + err = l.Ping() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 000000000..caaede248 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go new file mode 100644 index 000000000..cd4aea808 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/gen.go @@ -0,0 +1,74 @@ +// +build ignore + +// Generate the table of OID values +// Run with 'go run gen.go'. +package main + +import ( + "database/sql" + "fmt" + "log" + "os" + "os/exec" + + _ "github.com/lib/pq" +) + +func main() { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + db, err := sql.Open("postgres", "") + if err != nil { + log.Fatal(err) + } + cmd := exec.Command("gofmt") + cmd.Stderr = os.Stderr + w, err := cmd.StdinPipe() + if err != nil { + log.Fatal(err) + } + f, err := os.Create("types.go") + if err != nil { + log.Fatal(err) + } + cmd.Stdout = f + err = cmd.Start() + if err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, "// generated by 'go run gen.go'; do not edit") + fmt.Fprintln(w, "\npackage oid") + fmt.Fprintln(w, "const (") + rows, err := db.Query(` + SELECT typname, oid + FROM pg_type WHERE oid < 10000 + ORDER BY oid; + `) + if err != nil { + log.Fatal(err) + } + var name string + var oid int + for rows.Next() { + err = rows.Scan(&name, &oid) + if err != nil { + log.Fatal(err) + } + fmt.Fprintf(w, "T_%s Oid = %d\n", name, oid) + } + if err = rows.Err(); err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, ")") + w.Close() + cmd.Wait() +} diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 000000000..03df05a61 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,161 @@ +// generated by 'go run gen.go'; do not edit + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 +) diff --git a/vendor/github.com/lib/pq/ssl_test.go b/vendor/github.com/lib/pq/ssl_test.go new file mode 100644 index 000000000..932b336f5 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_test.go @@ -0,0 +1,226 @@ +package pq + +// This file contains SSL tests + +import ( + _ "crypto/sha256" + "crypto/x509" + "database/sql" + "fmt" + "os" + "path/filepath" + "testing" +) + +func maybeSkipSSLTests(t *testing.T) { + // Require some special variables for testing certificates + if os.Getenv("PQSSLCERTTEST_PATH") == "" { + t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests") + } + + value := os.Getenv("PQGOSSLTESTS") + if value == "" || value == "0" { + t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests") + } else if value != "1" { + t.Fatalf("unexpected value %q for PQGOSSLTESTS", value) + } +} + +func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) { + db, err := openTestConnConninfo(conninfo) + if err != nil { + // should never fail + t.Fatal(err) + } + // Do something with the connection to see whether it's working or not. + tx, err := db.Begin() + if err == nil { + return db, tx.Rollback() + } + _ = db.Close() + return nil, err +} + +func checkSSLSetup(t *testing.T, conninfo string) { + db, err := openSSLConn(t, conninfo) + if err == nil { + db.Close() + t.Fatalf("expected error with conninfo=%q", conninfo) + } +} + +// Connect over SSL and run a simple query to test the basics +func TestSSLConnection(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + db, err := openSSLConn(t, "sslmode=require user=pqgossltest") + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// Test sslmode=verify-full +func TestSSLVerifyFull(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + _, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok = err.(x509.HostnameError) + if !ok { + t.Fatalf("expected x509.HostnameError, got %#+v", err) + } + // OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +// Test sslmode=verify-ca +func TestSSLVerifyCA(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name, but that's OK + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest") + if err != nil { + t.Fatal(err) + } + // Everything OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +func getCertConninfo(t *testing.T, source string) string { + var sslkey string + var sslcert string + + certpath := os.Getenv("PQSSLCERTTEST_PATH") + + switch source { + case "missingkey": + sslkey = "/tmp/filedoesnotexist" + sslcert = filepath.Join(certpath, "postgresql.crt") + case "missingcert": + sslkey = filepath.Join(certpath, "postgresql.key") + sslcert = "/tmp/filedoesnotexist" + case "certtwice": + sslkey = filepath.Join(certpath, "postgresql.crt") + sslcert = filepath.Join(certpath, "postgresql.crt") + case "valid": + sslkey = filepath.Join(certpath, "postgresql.key") + sslcert = filepath.Join(certpath, "postgresql.crt") + default: + t.Fatalf("invalid source %q", source) + } + return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert) +} + +// Authenticate over SSL using client certificates +func TestSSLClientCertificates(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Should also fail without a valid certificate + db, err := openSSLConn(t, "sslmode=require user=pqgosslcert") + if err == nil { + db.Close() + t.Fatal("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatal("expected pq.Error") + } + if pge.Code.Name() != "invalid_authorization_specification" { + t.Fatalf("unexpected error code %q", pge.Code.Name()) + } + + // Should work + db, err = openSSLConn(t, getCertConninfo(t, "valid")) + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// Test errors with ssl certificates +func TestSSLClientCertificatesMissingFiles(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Key missing, should fail + _, err := openSSLConn(t, getCertConninfo(t, "missingkey")) + if err == nil { + t.Fatal("expected error") + } + // should be a PathError + _, ok := err.(*os.PathError) + if !ok { + t.Fatalf("expected PathError, got %#+v", err) + } + + // Cert missing, should fail + _, err = openSSLConn(t, getCertConninfo(t, "missingcert")) + if err == nil { + t.Fatal("expected error") + } + // should be a PathError + _, ok = err.(*os.PathError) + if !ok { + t.Fatalf("expected PathError, got %#+v", err) + } + + // Key has wrong permissions, should fail + _, err = openSSLConn(t, getCertConninfo(t, "certtwice")) + if err == nil { + t.Fatal("expected error") + } + if err != ErrSSLKeyHasWorldPermissions { + t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err) + } +} diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 000000000..9bac95c48 --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + i := strings.Index(u.Host, ":") + if i < 0 { + accrue("host", u.Host) + } else { + accrue("host", u.Host[:i]) + accrue("port", u.Host[i+1:]) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/url_test.go b/vendor/github.com/lib/pq/url_test.go new file mode 100644 index 000000000..29f4a7c75 --- /dev/null +++ b/vendor/github.com/lib/pq/url_test.go @@ -0,0 +1,54 @@ +package pq + +import ( + "testing" +) + +func TestSimpleParseURL(t *testing.T) { + expected := "host=hostname.remote" + str, err := ParseURL("postgres://hostname.remote") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) + } +} + +func TestFullParseURL(t *testing.T) { + expected := `dbname=database host=hostname.remote password=top\ secret port=1234 user=username` + str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected) + } +} + +func TestInvalidProtocolParseURL(t *testing.T) { + _, err := ParseURL("http://hostname.remote") + switch err { + case nil: + t.Fatal("Expected an error from parsing invalid protocol") + default: + msg := "invalid connection protocol: http" + if err.Error() != msg { + t.Fatalf("Unexpected error message:\n+ %s\n- %s", + err.Error(), msg) + } + } +} + +func TestMinimalURL(t *testing.T) { + cs, err := ParseURL("postgres://") + if err != nil { + t.Fatal(err) + } + + if cs != "" { + t.Fatalf("expected blank connection string, got: %q", cs) + } +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 000000000..e937d7d08 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 000000000..2b691267b --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go new file mode 100644 index 000000000..67ae1fda5 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go @@ -0,0 +1,67 @@ +package artifactory + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +type FileInfo struct { + Uri string `json:"uri"` + DownloadUri string `json:"downloadUri"` + Repo string `json:"repo"` + Path string `json:"path"` + RemoteUrl string `json:"remoteUrl,omitempty"` + Created string `json:"created"` + CreatedBy string `json:"createdBy"` + LastModified string `json:"lastModified"` + ModifiedBy string `json:"modifiedBy"` + MimeType string `json:"mimeType"` + Size string `json:"size"` + Checksums struct { + SHA1 string `json:"sha1"` + MD5 string `json:"md5"` + } `json:"checksums"` + OriginalChecksums struct { + SHA1 string `json:"sha1"` + MD5 string `json:"md5"` + } `json:"originalChecksums,omitempty"` +} + +func (c *ArtifactoryClient) DeployArtifact(repoKey string, filename string, path string, properties map[string]string) (CreatedStorageItem, error) { + var res CreatedStorageItem + var fileProps []string + var finalUrl string + finalUrl = "/" + repoKey + "/" + if &path != nil { + finalUrl = finalUrl + path + } + baseFile := filepath.Base(filename) + finalUrl = finalUrl + "/" + baseFile + if len(properties) > 0 { + finalUrl = finalUrl + ";" + for k, v := range properties { + fileProps = append(fileProps, k+"="+v) + } + finalUrl = finalUrl + strings.Join(fileProps, ";") + } + data, err := os.Open(filename) + if err != nil { + return res, err + } + defer data.Close() + b, _ := ioutil.ReadAll(data) + d, err := c.Put(finalUrl, string(b), make(map[string]string)) + if err != nil { + return res, err + } else { + e := json.Unmarshal(d, &res) + if e != nil { + return res, e + } else { + return res, nil + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/error.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/error.json new file mode 100644 index 000000000..11eb6e692 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/error.json @@ -0,0 +1,6 @@ +{ + "errors" : [ { + "status" : 404, + "message" : "Not Found" + } ] +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/groups.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/groups.json new file mode 100644 index 000000000..101c855af --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/groups.json @@ -0,0 +1,16 @@ +[ { + "name" : "administrators", + "uri" : "https://artifactory/artifactory/api/security/groups/administrators" +}, { + "name" : "docker-readers", + "uri" : "https://artifactory/artifactory/api/security/groups/docker-readers" +}, { + "name" : "remote-readers", + "uri" : "https://artifactory/artifactory/api/security/groups/remote-readers" +}, { + "name" : "developers", + "uri" : "https://artifactory/artifactory/api/security/groups/developers" +}, { + "name" : "java-committers", + "uri" : "https://artifactory/artifactory/api/security/groups/java-committers" +} ] diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/license_information.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/license_information.json new file mode 100644 index 000000000..48e7d8d95 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/license_information.json @@ -0,0 +1,5 @@ +{ + "type" : "Trial", + "validThrough" : "Sep 30, 2015", + "licensedTo" : "User Bob" +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/local_repository_config.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/local_repository_config.json new file mode 100644 index 000000000..035bc3154 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/local_repository_config.json @@ -0,0 +1,33 @@ +{ + "key" : "libs-release-local", + "packageType" : "maven", + "description" : "Local repository for in-house libraries", + "notes" : "", + "includesPattern" : "**/*", + "excludesPattern" : "", + "repoLayoutRef" : "maven-2-default", + "enableNuGetSupport" : false, + "enableGemsSupport" : false, + "enableNpmSupport" : false, + "enableBowerSupport" : false, + "enableDebianSupport" : false, + "debianTrivialLayout" : false, + "enablePypiSupport" : false, + "enableDockerSupport" : false, + "dockerApiVersion" : "V1", + "forceDockerAuthentication" : false, + "enableVagrantSupport" : false, + "enableGitLfsSupport" : false, + "checksumPolicyType" : "client-checksums", + "handleReleases" : true, + "handleSnapshots" : false, + "maxUniqueSnapshots" : 0, + "snapshotVersionBehavior" : "unique", + "suppressPomConsistencyChecks" : true, + "blackedOut" : false, + "propertySets" : [ "artifactory" ], + "archiveBrowsingEnabled" : false, + "calculateYumMetadata" : false, + "yumRootDepth" : 0, + "rclass" : "local" +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions.json new file mode 100644 index 000000000..57ea2bc55 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions.json @@ -0,0 +1,16 @@ +[ { + "name" : "snapshot-write", + "uri" : "https://artifactory/artifactory/api/security/permissions/snapshot-write" +}, { + "name" : "Any Remote", + "uri" : "https://artifactory/artifactory/api/security/permissions/Any%20Remote" +}, { + "name" : "Anything", + "uri" : "https://artifactory/artifactory/api/security/permissions/Anything" +}, { + "name" : "release-commiter", + "uri" : "https://artifactory/artifactory/api/security/permissions/release-commiter" +}, { + "name" : "read-docker", + "uri" : "https://artifactory/artifactory/api/security/permissions/read-docker" +} ] diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions_details.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions_details.json new file mode 100644 index 000000000..c4f5079b5 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/permissions_details.json @@ -0,0 +1,11 @@ +{ + "name" : "release-commiter", + "includesPattern" : "**", + "excludesPattern" : "", + "repositories" : [ "docker-local-v2", "libs-release-local", "plugins-release-local" ], + "principals" : { + "groups" : { + "java-committers" : [ "r", "d", "w", "n" ] + } + } +} \ No newline at end of file diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/remote_repository_config.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/remote_repository_config.json new file mode 100644 index 000000000..f078ffab6 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/remote_repository_config.json @@ -0,0 +1,51 @@ +{ + "key" : "jcenter", + "packageType" : "maven", + "description" : "Bintray Central Java repository", + "notes" : "", + "includesPattern" : "**/*", + "excludesPattern" : "", + "repoLayoutRef" : "maven-2-default", + "enableNuGetSupport" : false, + "enableGemsSupport" : false, + "enableNpmSupport" : false, + "enableBowerSupport" : false, + "enableDebianSupport" : false, + "debianTrivialLayout" : false, + "enablePypiSupport" : false, + "enableDockerSupport" : false, + "dockerApiVersion" : "V1", + "forceDockerAuthentication" : false, + "enableVagrantSupport" : false, + "enableGitLfsSupport" : false, + "url" : "http://jcenter.bintray.com", + "username" : "", + "password" : "", + "handleReleases" : true, + "handleSnapshots" : false, + "suppressPomConsistencyChecks" : true, + "remoteRepoChecksumPolicyType" : "", + "hardFail" : false, + "offline" : false, + "blackedOut" : false, + "storeArtifactsLocally" : true, + "socketTimeoutMillis" : 15000, + "localAddress" : "", + "retrievalCachePeriodSecs" : 43200, + "assumedOfflinePeriodSecs" : 300, + "missedRetrievalCachePeriodSecs" : 7200, + "unusedArtifactsCleanupPeriodHours" : 0, + "fetchJarsEagerly" : false, + "fetchSourcesEagerly" : false, + "shareConfiguration" : false, + "synchronizeProperties" : false, + "maxUniqueSnapshots" : 0, + "propertySets" : [ "artifactory" ], + "archiveBrowsingEnabled" : false, + "listRemoteFolderItems" : true, + "rejectInvalidJars" : false, + "allowAnyHostAuth" : false, + "enableCookieManagement" : false, + "enableTokenAuthentication" : false, + "rclass" : "remote" +} \ No newline at end of file diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/repositories.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/repositories.json new file mode 100644 index 000000000..503d79595 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/repositories.json @@ -0,0 +1,94 @@ +[ { + "key" : "dev-docker-local", + "description" : "Development Docker Registry", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/dev-docker-local" +}, { + "key" : "docker-local-v2", + "description" : "Production Docker Repository", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/docker-local-v2" +}, { + "key" : "ext-release-local", + "description" : "Local repository for third party libraries", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/ext-release-local" +}, { + "key" : "ext-snapshot-local", + "description" : "Local repository for third party snapshots", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/ext-snapshot-local" +}, { + "key" : "libs-release-local", + "description" : "Local repository for in-house libraries", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/libs-release-local" +}, { + "key" : "libs-snapshot-local", + "description" : "Local repository for in-house snapshots", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/libs-snapshot-local" +}, { + "key" : "plugins-release-local", + "description" : "Local repository for plugins", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/plugins-release-local" +}, { + "key" : "plugins-snapshot-local", + "description" : "Local repository for plugins snapshots", + "type" : "LOCAL", + "url" : "https://artifactory.domain/artifactory/plugins-snapshot-local" +}, { + "key" : "bower-remote", + "type" : "REMOTE", + "url" : "https://github.com/" +}, { + "key" : "docker-remote", + "type" : "REMOTE", + "url" : "https://registry-1.docker.io/" +}, { + "key" : "jcenter", + "description" : "Bintray Central Java repository", + "type" : "REMOTE", + "url" : "http://jcenter.bintray.com" +}, { + "key" : "npm-remote", + "type" : "REMOTE", + "url" : "https://registry.npmjs.org" +}, { + "key" : "nuget-remote", + "type" : "REMOTE", + "url" : "https://www.nuget.org/" +}, { + "key" : "pypi-remote", + "type" : "REMOTE", + "url" : "https://pypi.python.org" +}, { + "key" : "rubygems-remote", + "type" : "REMOTE", + "url" : "https://rubygems.org/" +}, { + "key" : "nexus-releases-remote", + "type" : "REMOTE", + "url" : "https://nexus.domain/content/repositories/releases/" +}, { + "key" : "libs-release", + "type" : "VIRTUAL", + "url" : "https://artifactory.domain/artifactory/libs-release" +}, { + "key" : "libs-snapshot", + "type" : "VIRTUAL", + "url" : "https://artifactory.domain/artifactory/libs-snapshot" +}, { + "key" : "plugins-release", + "type" : "VIRTUAL", + "url" : "https://artifactory.domain/artifactory/plugins-release" +}, { + "key" : "plugins-snapshot", + "type" : "VIRTUAL", + "url" : "https://artifactory.domain/artifactory/plugins-snapshot" +}, { + "key" : "remote-repos", + "type" : "VIRTUAL", + "url" : "https://artifactory.domain/artifactory/remote-repos" +} ] diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_group.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_group.json new file mode 100644 index 000000000..33f121db0 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_group.json @@ -0,0 +1,6 @@ +{ + "name" : "docker-readers", + "description" : "Can read from Docker repositories", + "autoJoin" : false, + "realm" : "artifactory" +} \ No newline at end of file diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_user.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_user.json new file mode 100644 index 000000000..c77a41d1e --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/single_user.json @@ -0,0 +1,11 @@ +{ + "name" : "admin", + "email" : "admin@admin.com", + "admin" : true, + "profileUpdatable" : true, + "internalPasswordDisabled" : false, + "groups" : [ "administrators" ], + "lastLoggedIn" : "2015-08-11T14:04:11.472Z", + "realm" : "internal", + "offlineMode" : false +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/users.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/users.json new file mode 100644 index 000000000..235153f88 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/users.json @@ -0,0 +1,9 @@ +[ + { + "name": "davids", + "uri" : "http://localhost:8080/artifactory/api/security/users/davids" + }, { + "name": "danl", + "uri" : "http://localhost:8080/artifactory/api/security/users/danl" + } +] diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/virtual_repository_config.json b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/virtual_repository_config.json new file mode 100644 index 000000000..224239f2b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/assets/test/virtual_repository_config.json @@ -0,0 +1,25 @@ +{ + "key" : "libs-release", + "packageType" : "maven", + "description" : "", + "notes" : "", + "includesPattern" : "**/*", + "excludesPattern" : "", + "enableNuGetSupport" : false, + "enableGemsSupport" : false, + "enableNpmSupport" : false, + "enableBowerSupport" : false, + "enableDebianSupport" : false, + "debianTrivialLayout" : false, + "enablePypiSupport" : false, + "enableDockerSupport" : false, + "dockerApiVersion" : "V1", + "forceDockerAuthentication" : false, + "enableVagrantSupport" : false, + "enableGitLfsSupport" : false, + "repositories" : [ "libs-release-local", "ext-release-local", "remote-repos" ], + "artifactoryRequestsCanRetrieveRemoteArtifacts" : false, + "keyPair" : "", + "pomRepositoryReferencesCleanupPolicy" : "discard_active_reference", + "rclass" : "virtual" +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go new file mode 100644 index 000000000..056858f30 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go @@ -0,0 +1,64 @@ +package artifactory + +import ( + "crypto/tls" + "fmt" + "net/http" + "os" +) + +type ClientConfig struct { + BaseURL string + Username string + Password string + VerifySSL bool + Client *http.Client + Transport *http.Transport +} + +type ArtifactoryClient struct { + Client *http.Client + Config *ClientConfig + Transport *http.Transport +} + +func NewClient(config *ClientConfig) (c ArtifactoryClient) { + verifySSL := func() bool { + if config.VerifySSL != true { + return false + } else { + return true + } + } + if config.Transport == nil { + config.Transport = new(http.Transport) + } + config.Transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: verifySSL()} + if config.Client == nil { + config.Client = new(http.Client) + } + config.Client.Transport = config.Transport + return ArtifactoryClient{Client: config.Client, Config: config} +} + +func clientConfigFrom(from string) (c *ClientConfig) { + switch from { + case "environment": + if os.Getenv("ARTIFACTORY_URL") == "" || os.Getenv("ARTIFACTORY_USERNAME") == "" || os.Getenv("ARTIFACTORY_PASSWORD") == "" { + fmt.Printf("You must set the environment variables ARTIFACTORY_URL/ARTIFACTORY_USERNAME/ARTIFACTORY_PASSWORD\n") + os.Exit(1) + } + } + conf := ClientConfig{ + BaseURL: os.Getenv("ARTIFACTORY_URL"), + Username: os.Getenv("ARTIFACTORY_USERNAME"), + Password: os.Getenv("ARTIFACTORY_PASSWORD"), + } + return &conf +} + +func NewClientFromEnv() (c ArtifactoryClient) { + config := clientConfigFrom("environment") + client := NewClient(config) + return client +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client_test.go new file mode 100644 index 000000000..cfe421c76 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client_test.go @@ -0,0 +1,38 @@ +package artifactory + +import ( + "fmt" + "github.com/stretchr/testify/assert" + _ "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + _ "os" + "testing" +) + +func TestNewClientCustomTransport(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprint(w, "pong") + })) + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + defer server.Close() + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + res, err := client.Get("/ping", make(map[string]string)) + assert.Nil(t, err, "should not return an error") + assert.Equal(t, "pong", string(res), "should return the testmsg") +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go new file mode 100644 index 000000000..70551aad9 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go @@ -0,0 +1,10 @@ +package artifactory + +type ErrorsJson struct { + Errors []ErrorJson `json:"errors"` +} + +type ErrorJson struct { + Status string `json:"status"` + Message string `json:"message"` +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go new file mode 100644 index 000000000..a60c11229 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go @@ -0,0 +1,61 @@ +package artifactory + +import ( + "encoding/json" +) + +type Group struct { + Name string `json:"name"` + Uri string `json:"uri"` +} + +type GroupDetails struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AutoJoin bool `json:"autoJoin,omitempty"` + Realm string `json:"realm,omitempty"` + RealmAttributes string `json:"realmAttributes,omitempty"` +} + +func (c *ArtifactoryClient) GetGroups() ([]Group, error) { + var res []Group + d, e := c.Get("/api/security/groups", make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) GetGroupDetails(u string) (GroupDetails, error) { + var res GroupDetails + d, e := c.Get("/api/security/groups/"+u, make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) CreateGroup(gname string, g GroupDetails) error { + j, jerr := json.Marshal(g) + if jerr != nil { + return jerr + } + o := make(map[string]string) + _, err := c.Put("/api/security/groups/"+gname, string(j), o) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups_test.go new file mode 100644 index 000000000..0b3868e05 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups_test.go @@ -0,0 +1,158 @@ +package artifactory + +import ( + "bytes" + "fmt" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" +) + +func TestGetGroups(t *testing.T) { + responseFile, err := os.Open("assets/test/groups.json") + if err != nil { + t.Fatalf("Unable to read test data: %s", err.Error()) + } + defer responseFile.Close() + responseBody, _ := ioutil.ReadAll(responseFile) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, string(responseBody)) + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + groups, err := client.GetGroups() + assert.NoError(t, err, "should not return an error") + assert.Len(t, groups, 5, "should have five groups") + assert.Equal(t, groups[0].Name, "administrators", "Should have the administrators group") + assert.Equal(t, groups[0].Uri, "https://artifactory/artifactory/api/security/groups/administrators", "should have a uri") + for _, g := range groups { + assert.NotNil(t, g.Name, "Name should not be empty") + assert.NotNil(t, g.Uri, "Uri should not be empty") + } +} + +func TestGetGroupDetails(t *testing.T) { + responseFile, err := os.Open("assets/test/single_group.json") + if err != nil { + t.Fatalf("Unable to read test data: %s", err.Error()) + } + defer responseFile.Close() + responseBody, _ := ioutil.ReadAll(responseFile) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, string(responseBody)) + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + group, err := client.GetGroupDetails("docker-readers") + assert.NoError(t, err, "should not return an error") + assert.Equal(t, group.Name, "docker-readers", "name should be docker-readers") + assert.Equal(t, group.Description, "Can read from Docker repositories", "description should match") + assert.False(t, group.AutoJoin, "autojoin should be false") + assert.Equal(t, group.Realm, "artifactory", "realm should be artifactory") +} + +func TestCreateGroupNoDetails(t *testing.T) { + var buf bytes.Buffer + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + req, _ := ioutil.ReadAll(r.Body) + buf.Write(req) + fmt.Fprintf(w, "") + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + var details GroupDetails = GroupDetails{} + err := client.CreateGroup("testgroup", details) + assert.NoError(t, err, "should not return an error") + assert.Equal(t, "{}", string(buf.Bytes()), "should send empty json") +} + +func TestCreateGroupDetails(t *testing.T) { + var buf bytes.Buffer + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + req, _ := ioutil.ReadAll(r.Body) + buf.Write(req) + fmt.Fprintf(w, "") + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + var details GroupDetails = GroupDetails{ + Description: "test group desc", + AutoJoin: true, + } + expectedJson := `{"description":"test group desc","autoJoin":true}` + err := client.CreateGroup("testgroup", details) + assert.NoError(t, err, "should not return an error") + assert.Equal(t, expectedJson, string(buf.Bytes()), "should send empty json") +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go new file mode 100644 index 000000000..0fb2f6649 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go @@ -0,0 +1,96 @@ +package artifactory + +import ( + "bytes" + "crypto/sha1" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +func (c *ArtifactoryClient) Get(path string, options map[string]string) ([]byte, error) { + return c.makeRequest("GET", path, options, nil) +} + +func (c *ArtifactoryClient) Post(path string, data string, options map[string]string) ([]byte, error) { + body := strings.NewReader(data) + return c.makeRequest("POST", path, options, body) +} + +func (c *ArtifactoryClient) Put(path string, data string, options map[string]string) ([]byte, error) { + body := strings.NewReader(data) + return c.makeRequest("PUT", path, options, body) +} + +func (c *ArtifactoryClient) Delete(path string) error { + _, err := c.makeRequest("DELETE", path, make(map[string]string), nil) + if err != nil { + return err + } else { + return nil + } +} + +func (c *ArtifactoryClient) makeRequest(method string, path string, options map[string]string, body io.Reader) ([]byte, error) { + qs := url.Values{} + for q, p := range options { + qs.Add(q, p) + } + var base_req_path string + if c.Config.BaseURL[:len(c.Config.BaseURL)-1] == "/" { + base_req_path = c.Config.BaseURL + path + } else { + base_req_path = c.Config.BaseURL + "/" + path + } + u, err := url.Parse(base_req_path) + if err != nil { + var data bytes.Buffer + return data.Bytes(), err + } + if len(options) != 0 { + u.RawQuery = qs.Encode() + } + buf := new(bytes.Buffer) + if body != nil { + buf.ReadFrom(body) + } + req, _ := http.NewRequest(method, u.String(), bytes.NewReader(buf.Bytes())) + if body != nil { + h := sha1.New() + h.Write(buf.Bytes()) + chkSum := h.Sum(nil) + req.Header.Add("X-Checksum-Sha1", fmt.Sprintf("%x", chkSum)) + } + req.Header.Add("user-agent", "artifactory-go."+VERSION) + req.Header.Add("X-Result-Detail", "info, properties") + req.SetBasicAuth(c.Config.Username, c.Config.Password) + r, err := c.Client.Do(req) + if err != nil { + var data bytes.Buffer + return data.Bytes(), err + } else { + defer r.Body.Close() + data, err := ioutil.ReadAll(r.Body) + if r.StatusCode < 200 || r.StatusCode > 299 { + var ej ErrorsJson + uerr := json.Unmarshal(data, &ej) + if uerr != nil { + emsg := fmt.Sprintf("Non-2xx code returned: %d. Message follows:\n%s", r.StatusCode, string(data)) + return data, errors.New(emsg) + } else { + var emsgs []string + for _, i := range ej.Errors { + emsgs = append(emsgs, i.Message) + } + return data, errors.New(strings.Join(emsgs, "\n")) + } + } else { + return data, err + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go new file mode 100644 index 000000000..7b9cdb588 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go @@ -0,0 +1,23 @@ +package artifactory + +import ( + "encoding/json" +) + +type LicenseInformation struct { + LicenseType string `json:"type"` + ValidThrough string `json:"validThrough"` + LicensedTo string `json:"licensedTo"` +} + +func (c *ArtifactoryClient) GetLicenseInformation() (LicenseInformation, error) { + o := make(map[string]string, 0) + var l LicenseInformation + d, e := c.Get("/api/system/license", o) + if e != nil { + return l, e + } else { + err := json.Unmarshal(d, &l) + return l, err + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go new file mode 100644 index 000000000..2ad1f5042 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go @@ -0,0 +1,11 @@ +package artifactory + +const LOCAL_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.LocalRepositoryConfiguration+json" +const REMOTE_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.RemoteRepositoryConfiguration+json" +const VIRTUAL_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.VirtualRepositoryConfiguration+json" +const USER_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.User+json" +const GROUP_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.Group+json" +const PERMISSION_TARGET_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.PermissionTarget+json" +const IMPORT_SETTINGS_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.ImportSettings+json" +const EXPORT_SETTIGNS_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.ExportSettings+json" +const SYSTEM_VERSION_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.Version+json" diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go new file mode 100644 index 000000000..71ff71edc --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go @@ -0,0 +1,53 @@ +package artifactory + +import ( + "encoding/json" +) + +type PermissionTarget struct { + Name string `json:"name"` + Uri string `json:"uri"` +} + +type PermissionTargetDetails struct { + Name string `json:"name,omitempty"` + IncludesPattern string `json:"includesPattern,omitempty"` + ExcludesPattern string `json:"excludesPattern,omitempty"` + Repositories []string `json:"repositories,omitempty"` + Principals Principals `json:"principals,omitempty"` +} + +type Principals struct { + Users map[string][]string `json:"users"` + Groups map[string][]string `json:"groups"` +} + +func (c *ArtifactoryClient) GetPermissionTargets() ([]PermissionTarget, error) { + var res []PermissionTarget + d, e := c.Get("/api/security/permissions", make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) GetPermissionTargetDetails(u string) (PermissionTargetDetails, error) { + var res PermissionTargetDetails + d, e := c.Get("/api/security/permissions/"+u, make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets_test.go new file mode 100644 index 000000000..c880f86ba --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets_test.go @@ -0,0 +1,99 @@ +package artifactory + +import ( + _ "bytes" + "fmt" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" +) + +func TestGetPermissions(t *testing.T) { + responseFile, err := os.Open("assets/test/permissions.json") + if err != nil { + t.Fatalf("Unable to read test data: %s", err.Error()) + } + defer responseFile.Close() + responseBody, _ := ioutil.ReadAll(responseFile) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, string(responseBody)) + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + perms, err := client.GetPermissionTargets() + assert.NoError(t, err, "should not return an error") + assert.Len(t, perms, 5, "should have five targets") + assert.Equal(t, perms[0].Name, "snapshot-write", "Should have the snapshot-write target") + assert.Equal(t, perms[0].Uri, "https://artifactory/artifactory/api/security/permissions/snapshot-write", "should have a uri") + for _, p := range perms { + assert.NotNil(t, p.Name, "Name should not be empty") + assert.NotNil(t, p.Uri, "Uri should not be empty") + } +} + +func TestGetPermissionDetails(t *testing.T) { + responseFile, err := os.Open("assets/test/permissions_details.json") + if err != nil { + t.Fatalf("Unable to read test data: %s", err.Error()) + } + defer responseFile.Close() + responseBody, _ := ioutil.ReadAll(responseFile) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, string(responseBody)) + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + perms, err := client.GetPermissionTargetDetails("release-commiter") + assert.NoError(t, err, "should not return an error") + assert.Equal(t, perms.Name, "release-commiter", "Should be release-commiter") + assert.Equal(t, perms.IncludesPattern, "**", "Includes should be **") + assert.Equal(t, perms.ExcludesPattern, "", "Excludes should be nil") + assert.Len(t, perms.Repositories, 3, "Should have 3 repositories") + assert.Contains(t, perms.Repositories, "docker-local-v2", "Should have repos") + assert.NotNil(t, perms.Principals.Groups, "should have a group principal") + groups := []string{} + for g := range perms.Principals.Groups { + groups = append(groups, g) + } + assert.Contains(t, groups, "java-committers", "Should have the java committers group") + assert.Len(t, perms.Principals.Groups["java-committers"], 4, "should have 4 permissions") + assert.Contains(t, perms.Principals.Groups["java-committers"], "r", "Should have the r permission") +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go new file mode 100644 index 000000000..5a3ed8cd6 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go @@ -0,0 +1,151 @@ +package artifactory + +import ( + "encoding/json" + "fmt" +) + +type Repo struct { + Key string `json:"key"` + Rtype string `json:"type"` + Description string `json:"description,omitempty"` + Url string `json:"url,omitempty"` +} + +type RepoConfig interface { + MimeType() string +} + +type GenericRepoConfig struct { + Key string `json:"key,omitempty"` + RClass string `json:"rclass"` + PackageType string `json:"packageType,omitempty"` + Description string `json:"description,omitempty"` + Notes string `json:"notes,omitempty"` + IncludesPattern string `json:"includesPattern,omitempty"` + ExcludesPattern string `json:"excludesPattern,omitempty"` + HandleReleases bool `json:"handleReleases,omitempty"` + HandleSnapshots bool `json:"handleSnapshots,omitempty"` + MaxUniqueSnapshots int `json:"maxUniqueSnapshots,omitempty"` + SuppressPomConsistencyChecks bool `json:"supressPomConsistencyChecks,omitempty"` + BlackedOut bool `json:"blackedOut,omitempty"` + PropertySets []string `json:"propertySets,omitempty"` +} + +func (r GenericRepoConfig) MimeType() string { + return "" +} + +type LocalRepoConfig struct { + GenericRepoConfig + + LayoutRef string `json:"repoLayoutRef,omitempty"` + DebianTrivialLayout bool `json:"debianTrivialLayout,omitempty"` + ChecksumPolicyType string `json:"checksumPolicyType,omitempty"` + SnapshotVersionBehavior string `json:"snapshotVersionBehavior,omitempty"` + ArchiveBrowsingEnabled bool `json:"archiveBrowsingEnabled,omitempty"` + CalculateYumMetadata bool `json:"calculateYumMetadata,omitempty"` + YumRootDepth int `json:"yumRootDepth,omitempty"` +} + +func (r LocalRepoConfig) MimeType() string { + return LOCAL_REPO_MIMETYPE +} + +type RemoteRepoConfig struct { + GenericRepoConfig + + Url string `json:"url"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Proxy string `json:"proxy,omitempty"` + RemoteRepoChecksumPolicyType string `json:"remoteRepoChecksumPolicyType,omitempty"` + HardFail bool `json:"hardFail,omitempty"` + Offline bool `json:"offline,omitempty"` + StoreArtifactsLocally bool `json:"storeArtifactsLocally,omitempty"` + SocketTimeoutMillis int `json:"socketTimeoutMillis,omitempty"` + LocalAddress string `json:"localAddress,omitempty"` + RetrivialCachePeriodSecs int `json:"retrievalCachePeriodSecs,omitempty"` + FailedRetrievalCachePeriodSecs int `json:"failedRetrievalCachePeriodSecs,omitempty"` + MissedRetrievalCachePeriodSecs int `json:"missedRetrievalCachePeriodSecs,omitempty"` + UnusedArtifactsCleanupEnabled bool `json:"unusedArtifactCleanupEnabled,omitempty"` + UnusedArtifactsCleanupPeriodHours int `json:"unusedArtifactCleanupPeriodHours,omitempty"` + FetchJarsEagerly bool `json:"fetchJarsEagerly,omitempty"` + ShareConfiguration bool `json:"shareConfiguration,omitempty"` + SynchronizeProperties bool `json:"synchronizeProperties,omitempty"` + AllowAnyHostAuth bool `json:"allowAnyHostAuth,omitempty"` + EnableCookieManagement bool `json:"enableCookieManagement,omitempty"` + BowerRegistryUrl string `json:"bowerRegistryUrl,omitempty"` + VcsType string `json:"vcsType,omitempty"` + VcsGitProvider string `json:"vcsGitProvider,omitempty"` + VcsGitDownloader string `json:"vcsGitDownloader,omitempty"` +} + +func (r RemoteRepoConfig) MimeType() string { + return REMOTE_REPO_MIMETYPE +} + +type VirtualRepoConfig struct { + GenericRepoConfig + + Repositories []string `json:"repositories"` + DebianTrivialLayout bool `json:"debianTrivialLayout,omitempty"` + ArtifactoryRequestsCanRetrieveRemoteArtifacts bool `json:artifactoryRequestsCanRetrieveRemoteArtifacts,omitempty"` + KeyPair string `json:"keyPair,omitempty"` + PomRepositoryReferenceCleanupPolicy string `json:"pomRepositoryReferenceCleanupPolicy,omitempty"` +} + +func (r VirtualRepoConfig) MimeType() string { + return VIRTUAL_REPO_MIMETYPE +} + +func (client *ArtifactoryClient) GetRepos(rtype string) ([]Repo, error) { + o := make(map[string]string, 0) + if rtype != "all" { + o["type"] = rtype + } + var dat []Repo + d, e := client.Get("/api/repositories", o) + if e != nil { + return dat, e + } else { + err := json.Unmarshal(d, &dat) + if err != nil { + return dat, err + } else { + return dat, e + } + } +} + +func (client *ArtifactoryClient) GetRepo(key string) (RepoConfig, error) { + o := make(map[string]string, 0) + dat := new(GenericRepoConfig) + d, e := client.Get("/api/repositories/"+key, o) + if e != nil { + return *dat, e + } else { + err := json.Unmarshal(d, &dat) + if err != nil { + return *dat, err + } else { + switch dat.RClass { + case "local": + var cdat LocalRepoConfig + _ = json.Unmarshal(d, &cdat) + return cdat, nil + case "remote": + var cdat RemoteRepoConfig + _ = json.Unmarshal(d, &cdat) + return cdat, nil + case "virtual": + var cdat VirtualRepoConfig + _ = json.Unmarshal(d, &cdat) + return cdat, nil + default: + fmt.Printf("fallthrough to default\n") + return dat, nil + } + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go new file mode 100644 index 000000000..20a411107 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go @@ -0,0 +1,9 @@ +package artifactory + +type GavcSearchResults struct { + Results []FileInfo `json:"results"` +} + +type Uri struct { + Uri string `json:"uri,omitempty"` +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go new file mode 100644 index 000000000..8c986d78f --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go @@ -0,0 +1,47 @@ +package artifactory + +import ( + "encoding/json" + "strings" +) + +type Gavc struct { + GroupID string + ArtifactID string + Version string + Classifier string + Repos []string +} + +func (c *ArtifactoryClient) GAVCSearch(coords *Gavc) (files []FileInfo, e error) { + url := "/api/search/gavc" + params := make(map[string]string) + if &coords.GroupID != nil { + params["g"] = coords.GroupID + } + if &coords.ArtifactID != nil { + params["a"] = coords.ArtifactID + } + if &coords.Version != nil { + params["v"] = coords.Version + } + if &coords.Classifier != nil { + params["c"] = coords.Classifier + } + if &coords.Repos != nil { + params["repos"] = strings.Join(coords.Repos, ",") + } + d, err := c.Get(url, params) + if err != nil { + return files, err + } else { + var dat GavcSearchResults + err := json.Unmarshal(d, &dat) + if err != nil { + return files, err + } else { + files = dat.Results + return files, nil + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go new file mode 100644 index 000000000..f74130961 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go @@ -0,0 +1,6 @@ +package artifactory + +func (c *ArtifactoryClient) GetSystemSecurityConfiguration() (s string, e error) { + d, e := c.Get("/api/system/security", make(map[string]string)) + return string(d), e +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go new file mode 100644 index 000000000..b7967e11f --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go @@ -0,0 +1,18 @@ +package artifactory + +type CreatedStorageItem struct { + URI string `json:"uri"` + DownloadURI string `json:"downloadUri"` + Repo string `json:"repo"` + Created string `json:"created"` + CreatedBy string `json:"createdBy"` + Size string `json:"size"` + MimeType string `json:"mimeType"` + Checksums ArtifactChecksums `json:"checksums"` + OriginalChecksums ArtifactChecksums `json:"originalChecksums"` +} + +type ArtifactChecksums struct { + MD5 string `json:"md5"` + SHA1 string `json:"sha1"` +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go new file mode 100644 index 000000000..d33c29c65 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go @@ -0,0 +1,6 @@ +package artifactory + +func (c *ArtifactoryClient) GetGeneralConfiguration() (s string, e error) { + d, e := c.Get("/api/system/configuration", make(map[string]string)) + return string(d), e +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go new file mode 100644 index 000000000..9caf6adf7 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go @@ -0,0 +1,83 @@ +package artifactory + +import ( + "encoding/json" + "errors" +) + +type User struct { + Name string `json:"name"` + Uri string `json:"uri"` +} + +type UserDetails struct { + Name string `json:"name,omitempty"` + Email string `json:"email"` + Password string `json:"password"` + Admin bool `json:"admin,omitempty"` + ProfileUpdatable bool `json:"profileUpdatable,omitempty"` + InternalPasswordDisabled bool `json:"internalPasswordDisabled,omitempty"` + LastLoggedIn string `json:"lastLoggedIn,omitempty"` + Realm string `json:"realm,omitempty"` + Groups []string `json:"groups,omitempty"` +} + +func (c *ArtifactoryClient) GetUsers() ([]User, error) { + var res []User + d, e := c.Get("/api/security/users", make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) GetUserDetails(u string) (UserDetails, error) { + var res UserDetails + d, e := c.Get("/api/security/users/"+u, make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) CreateUser(uname string, u UserDetails) error { + if &u.Email == nil || &u.Password == nil { + return errors.New("Email and password are required to create users") + } + j, jerr := json.Marshal(u) + if jerr != nil { + return jerr + } + o := make(map[string]string) + _, err := c.Put("/api/security/users/"+uname, string(j), o) + if err != nil { + return err + } + return nil +} + +func (c *ArtifactoryClient) DeleteUser(uname string) error { + err := c.Delete("/api/security/users/" + uname) + if err != nil { + return err + } else { + return nil + } +} + +func (c *ArtifactoryClient) GetUserEncryptedPassword() (s string, err error) { + d, err := c.Get("/api/security/encryptedPassword", make(map[string]string)) + return string(d), err +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users_test.go new file mode 100644 index 000000000..9cc6a3560 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users_test.go @@ -0,0 +1,188 @@ +package artifactory + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" +) + +func TestCreateUserFailure(t *testing.T) { + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + } + + client := NewClient(conf) + var details UserDetails = UserDetails{} + err := client.CreateUser("testuser", details) + assert.Error(t, err, "should return an error") +} + +func TestDeleteUser(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, "") + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + err := client.DeleteUser("testuser") + assert.NoError(t, err, "should not return an error") +} + +func TestCreateUser(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, "user created") + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + var details UserDetails = UserDetails{ + Email: "test@test.com", + Password: "somepass", + } + err := client.CreateUser("testuser", details) + assert.NoError(t, err, "should not return an error") +} + +func TestGetUsers(t *testing.T) { + responseFile, err := os.Open("assets/test/users.json") + if err != nil { + t.Fatalf("Unable to read test data: %s", err.Error()) + } + defer responseFile.Close() + responseBody, _ := ioutil.ReadAll(responseFile) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, string(responseBody)) + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + users, err := client.GetUsers() + assert.NoError(t, err, "should not return an error") + assert.Len(t, users, 2, "should have two users") +} + +func TestGetUserDetails(t *testing.T) { + responseFile, err := os.Open("assets/test/single_user.json") + if err != nil { + t.Fatalf("Unable to read test data: %s", err.Error()) + } + defer responseFile.Close() + responseBody, _ := ioutil.ReadAll(responseFile) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, string(responseBody)) + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + user, err := client.GetUserDetails("admin") + assert.NoError(t, err, "should not return an error") + assert.Equal(t, user.Name, "admin", "name should be admin") + assert.Equal(t, user.Email, "admin@admin.com", "should have email of admin@admin.com") + assert.True(t, user.Admin, "user should be an admin") + assert.True(t, user.ProfileUpdatable, "profile updatable should be true") + assert.False(t, user.InternalPasswordDisabled, "Internal password should not be disabled") + assert.Len(t, user.Groups, 1, "User should be in one group") + assert.Equal(t, user.Groups[0], "administrators", "user should be in the administrators group") + assert.Equal(t, user.Realm, "internal", "user realm should be internal") + assert.NotNil(t, user.LastLoggedIn, "lastLoggedIn should not be empty") +} + +func TestGetUserEncryptedPassword(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "text/plain") + fmt.Fprintf(w, "ABCDEFGH") + })) + defer server.Close() + + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL) + }, + } + + conf := &ClientConfig{ + BaseURL: "http://127.0.0.1:8080/", + Username: "username", + Password: "password", + VerifySSL: false, + Transport: transport, + } + + client := NewClient(conf) + d, err := client.GetUserEncryptedPassword() + assert.NoError(t, err, "should not return an error") + assert.Equal(t, d, "ABCDEFGH", "encrypted password should be returned") +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go new file mode 100644 index 000000000..bca14077e --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go @@ -0,0 +1,3 @@ +package artifactory + +const VERSION = "4.0.1" diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version_test.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version_test.go new file mode 100644 index 000000000..a60755a8b --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version_test.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/masterzen/simplexml/dom/document.go b/vendor/github.com/masterzen/simplexml/dom/document.go new file mode 100644 index 000000000..d73693c3f --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/dom/document.go @@ -0,0 +1,35 @@ +package dom + +import ( + "bytes" + "fmt" +) + +type Document struct { + root *Element + PrettyPrint bool + Indentation string + DocType bool +} + +func CreateDocument() *Document { + return &Document{ PrettyPrint: false, Indentation: " ", DocType: true } +} + +func (doc *Document) SetRoot(node *Element) { + node.parent = nil + doc.root = node +} + +func (doc *Document) String() string { + var b bytes.Buffer + if doc.DocType { + fmt.Fprintln(&b, ``) + } + + if doc.root != nil { + doc.root.Bytes(&b, doc.PrettyPrint, doc.Indentation, 0) + } + + return string(b.Bytes()) +} diff --git a/vendor/github.com/masterzen/simplexml/dom/dom_test.go b/vendor/github.com/masterzen/simplexml/dom/dom_test.go new file mode 100644 index 000000000..057efc84e --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/dom/dom_test.go @@ -0,0 +1,105 @@ +package dom + +import ( + . "launchpad.net/gocheck" + "testing" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type DomSuite struct{} + +var _ = Suite(&DomSuite{}) + +func (s *DomSuite) TestEmptyDocument(c *C) { + doc := CreateDocument() + doc.PrettyPrint = true + c.Assert(doc.String(), Equals, "\n") +} + +func (s *DomSuite) TestOneEmptyNode(c *C) { + doc := CreateDocument() + doc.PrettyPrint = true + root := CreateElement("root") + doc.SetRoot(root) + c.Assert(doc.String(), Equals, "\n\n") +} + +func (s *DomSuite) TestMoreNodes(c *C) { + doc := CreateDocument() + doc.PrettyPrint = true + root := CreateElement("root") + node1 := CreateElement("node1") + root.AddChild(node1) + subnode := CreateElement("sub") + node1.AddChild(subnode) + node2 := CreateElement("node2") + root.AddChild(node2) + doc.SetRoot(root) + + expected := ` + + + + + + +` + + c.Assert(doc.String(), Equals, expected) +} + +func (s *DomSuite) TestAttributes(c *C) { + doc := CreateDocument() + doc.PrettyPrint = true + root := CreateElement("root") + node1 := CreateElement("node1") + node1.SetAttr("attr1", "pouet") + root.AddChild(node1) + doc.SetRoot(root) + + expected := ` + + + +` + c.Assert(doc.String(), Equals, expected) +} + +func (s *DomSuite) TestContent(c *C) { + doc := CreateDocument() + doc.PrettyPrint = true + root := CreateElement("root") + node1 := CreateElement("node1") + node1.SetContent("this is a text content") + root.AddChild(node1) + doc.SetRoot(root) + + expected := ` + + this is a text content + +` + c.Assert(doc.String(), Equals, expected) +} + +func (s *DomSuite) TestNamespace(c *C) { + doc := CreateDocument() + doc.PrettyPrint = true + root := CreateElement("root") + root.DeclareNamespace(Namespace { Prefix: "a", Uri: "http://schemas.xmlsoap.org/ws/2004/08/addressing"}) + node1 := CreateElement("node1") + root.AddChild(node1) + node1.SetNamespace("a", "http://schemas.xmlsoap.org/ws/2004/08/addressing") + node1.SetContent("this is a text content") + doc.SetRoot(root) + + expected := ` + + this is a text content + +` + c.Assert(doc.String(), Equals, expected) +} + diff --git a/vendor/github.com/masterzen/simplexml/dom/element.go b/vendor/github.com/masterzen/simplexml/dom/element.go new file mode 100644 index 000000000..8e2795960 --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/dom/element.go @@ -0,0 +1,200 @@ +package dom + +import ( + "encoding/xml" + "fmt" + "bytes" +) + +type Attr struct { + Name xml.Name // Attribute namespace and name. + Value string // Attribute value. +} + +type Element struct { + name xml.Name + children []*Element + parent *Element + content string + attributes []*Attr + namespaces []*Namespace + document *Document +} + +func CreateElement(n string) *Element { + element := &Element { name: xml.Name { Local: n } } + element.children = make([]*Element, 0, 5) + element.attributes = make([]*Attr, 0, 10) + element.namespaces = make([]*Namespace, 0, 10) + return element +} + +func (node *Element) AddChild(child *Element) *Element { + if child.parent != nil { + child.parent.RemoveChild(child) + } + child.SetParent(node) + node.children = append(node.children, child) + return node +} + +func (node *Element) RemoveChild(child *Element) *Element { + p := -1 + for i, v := range node.children { + if v == child { + p = i + break + } + } + + if p == -1 { + return node + } + + copy(node.children[p:], node.children[p+1:]) + node.children = node.children[0 : len(node.children)-1] + child.parent = nil + return node +} + +func (node *Element) SetAttr(name string, value string) *Element { + // namespaces? + attr := &Attr{ Name: xml.Name { Local: name }, Value: value } + node.attributes = append(node.attributes, attr) + return node +} + +func (node *Element) SetParent(parent *Element) *Element { + node.parent = parent + return node +} + +func (node *Element) SetContent(content string) *Element { + node.content = content + return node +} + +// Add a namespace declaration to this node +func (node *Element) DeclareNamespace(ns Namespace) *Element { + // check if we already have it + prefix := node.namespacePrefix(ns.Uri) + if prefix == ns.Prefix { + return node + } + // add it + node.namespaces = append(node.namespaces, &ns) + return node +} + +func (node *Element) DeclaredNamespaces() []*Namespace { + return node.namespaces +} + +func (node *Element) SetNamespace(prefix string, uri string) { + resolved := node.namespacePrefix(uri) + if resolved == "" { + // we couldn't find the namespace, let's declare it at this node + node.namespaces = append(node.namespaces, &Namespace { Prefix: prefix, Uri: uri }) + } + node.name.Space = uri +} + +func (node *Element) Bytes(out *bytes.Buffer, indent bool, indentType string, level int) { + empty := len(node.children) == 0 && node.content == "" + content := node.content != "" +// children := len(node.children) > 0 +// ns := len(node.namespaces) > 0 +// attrs := len(node.attributes) > 0 + + indentStr := "" + nextLine := "" + if indent { + nextLine = "\n" + for i := 0; i < level; i++ { + indentStr += indentType + } + } + + if node.name.Local != "" { + if len(node.name.Space) > 0 { + // first find if ns has been declared, otherwise + prefix := node.namespacePrefix(node.name.Space) + fmt.Fprintf(out, "%s<%s:%s", indentStr, prefix, node.name.Local) + } else { + fmt.Fprintf(out, "%s<%s", indentStr, node.name.Local) + } + } + + // declared namespaces + for _, v := range node.namespaces { + prefix := node.namespacePrefix(v.Uri) + fmt.Fprintf(out, ` xmlns:%s="%s"`, prefix, v.Uri) + } + + // attributes + for _, v := range node.attributes { + if len(v.Name.Space) > 0 { + prefix := node.namespacePrefix(v.Name.Space) + fmt.Fprintf(out, ` %s:%s="%s"`, prefix, v.Name.Local, v.Value) + } else { + fmt.Fprintf(out, ` %s="%s"`, v.Name.Local, v.Value) + } + } + + // close tag + if empty { + fmt.Fprintf(out, "/>%s", nextLine) + } else { + if content { + out.WriteRune('>') + } else { + fmt.Fprintf(out, ">%s", nextLine) + } + } + + if len(node.children) > 0 { + for _, child := range node.children { + child.Bytes(out, indent, indentType, level + 1) + } + } else if node.content != "" { + //val := []byte(node.content) + //xml.EscapeText(out, val) + out.WriteString(node.content) + } + + if !empty && len(node.name.Local) > 0 { + var indentation string + if content { + indentation = "" + } else { + indentation = indentStr + } + if len(node.name.Space) > 0 { + prefix := node.namespacePrefix(node.name.Space) + fmt.Fprintf(out, "%s\n", indentation, prefix, node.name.Local) + } else { + fmt.Fprintf(out, "%s\n", indentation, node.name.Local) + } + } +} + +// Finds the prefix of the given namespace if it has been declared +// in this node or in one of its parent +func (node *Element) namespacePrefix(uri string) string { + for _, ns := range node.namespaces { + if ns.Uri == uri { + return ns.Prefix + } + } + if node.parent == nil { + return "" + } + return node.parent.namespacePrefix(uri) +} + + +func (node *Element) String() string { + var b bytes.Buffer + node.Bytes(&b, false, "", 0) + return string(b.Bytes()) +} diff --git a/vendor/github.com/masterzen/simplexml/dom/namespace.go b/vendor/github.com/masterzen/simplexml/dom/namespace.go new file mode 100644 index 000000000..3961e654d --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/dom/namespace.go @@ -0,0 +1,10 @@ +package dom + +type Namespace struct { + Prefix string + Uri string +} + +func (ns *Namespace) SetTo(node *Element) { + node.SetNamespace(ns.Prefix, ns.Uri) +} diff --git a/vendor/github.com/masterzen/winrm/soap/header.go b/vendor/github.com/masterzen/winrm/soap/header.go new file mode 100644 index 000000000..1a2ac0881 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/header.go @@ -0,0 +1,181 @@ +package soap + +import ( + "github.com/masterzen/simplexml/dom" + "strconv" +) + +type HeaderOption struct { + key string + value string +} + +func NewHeaderOption(name string, value string) *HeaderOption { + return &HeaderOption{key: name, value: value} +} + +type SoapHeader struct { + to string + replyTo string + maxEnvelopeSize string + timeout string + locale string + id string + action string + shellId string + resourceURI string + options []HeaderOption + message *SoapMessage +} + +type HeaderBuilder interface { + To(string) *SoapHeader + ReplyTo(string) *SoapHeader + MaxEnvelopeSize(int) *SoapHeader + Timeout(string) *SoapHeader + Locale(string) *SoapHeader + Id(string) *SoapHeader + Action(string) *SoapHeader + ShellId(string) *SoapHeader + resourceURI(string) *SoapHeader + AddOption(*HeaderOption) *SoapHeader + Options([]HeaderOption) *SoapHeader + Build(*SoapMessage) *SoapMessage +} + +func (self *SoapHeader) To(uri string) *SoapHeader { + self.to = uri + return self +} + +func (self *SoapHeader) ReplyTo(uri string) *SoapHeader { + self.replyTo = uri + return self +} + +func (self *SoapHeader) MaxEnvelopeSize(size int) *SoapHeader { + self.maxEnvelopeSize = strconv.Itoa(size) + return self +} + +func (self *SoapHeader) Timeout(timeout string) *SoapHeader { + self.timeout = timeout + return self +} + +func (self *SoapHeader) Id(id string) *SoapHeader { + self.id = id + return self +} + +func (self *SoapHeader) Action(action string) *SoapHeader { + self.action = action + return self +} + +func (self *SoapHeader) Locale(locale string) *SoapHeader { + self.locale = locale + return self +} + +func (self *SoapHeader) ShellId(shellId string) *SoapHeader { + self.shellId = shellId + return self +} + +func (self *SoapHeader) ResourceURI(resourceURI string) *SoapHeader { + self.resourceURI = resourceURI + return self +} + +func (self *SoapHeader) AddOption(option *HeaderOption) *SoapHeader { + self.options = append(self.options, *option) + return self +} + +func (self *SoapHeader) Options(options []HeaderOption) *SoapHeader { + self.options = options + return self +} + +func (self *SoapHeader) Build() *SoapMessage { + header := self.createElement(self.message.envelope, "Header", NS_SOAP_ENV) + + if self.to != "" { + to := self.createElement(header, "To", NS_ADDRESSING) + to.SetContent(self.to) + } + + if self.replyTo != "" { + replyTo := self.createElement(header, "ReplyTo", NS_ADDRESSING) + a := self.createMUElement(replyTo, "Address", NS_ADDRESSING, true) + a.SetContent(self.replyTo) + } + + if self.maxEnvelopeSize != "" { + envelope := self.createMUElement(header, "MaxEnvelopeSize", NS_WSMAN_DMTF, true) + envelope.SetContent(self.maxEnvelopeSize) + } + + if self.timeout != "" { + timeout := self.createElement(header, "OperationTimeout", NS_WSMAN_DMTF) + timeout.SetContent(self.timeout) + } + + if self.id != "" { + id := self.createElement(header, "MessageID", NS_ADDRESSING) + id.SetContent(self.id) + } + + if self.locale != "" { + locale := self.createMUElement(header, "Locale", NS_WSMAN_DMTF, false) + locale.SetAttr("xml:lang", self.locale) + datalocale := self.createMUElement(header, "DataLocale", NS_WSMAN_MSFT, false) + datalocale.SetAttr("xml:lang", self.locale) + } + + if self.action != "" { + action := self.createMUElement(header, "Action", NS_ADDRESSING, true) + action.SetContent(self.action) + } + + if self.shellId != "" { + selectorSet := self.createElement(header, "SelectorSet", NS_WSMAN_DMTF) + selector := self.createElement(selectorSet, "Selector", NS_WSMAN_DMTF) + selector.SetAttr("Name", "ShellId") + selector.SetContent(self.shellId) + } + + if self.resourceURI != "" { + resource := self.createMUElement(header, "ResourceURI", NS_WSMAN_DMTF, true) + resource.SetContent(self.resourceURI) + } + + if len(self.options) > 0 { + set := self.createElement(header, "OptionSet", NS_WSMAN_DMTF) + for _, option := range self.options { + e := self.createElement(set, "Option", NS_WSMAN_DMTF) + e.SetAttr("Name", option.key) + e.SetContent(option.value) + } + } + + return self.message +} + +func (self *SoapHeader) createElement(parent *dom.Element, name string, ns dom.Namespace) (element *dom.Element) { + element = dom.CreateElement(name) + parent.AddChild(element) + ns.SetTo(element) + return +} + +func (self *SoapHeader) createMUElement(parent *dom.Element, name string, ns dom.Namespace, mustUnderstand bool) (element *dom.Element) { + element = self.createElement(parent, name, ns) + value := "false" + if mustUnderstand { + value = "true" + } + element.SetAttr("mustUnderstand", value) + return +} diff --git a/vendor/github.com/masterzen/winrm/soap/header_test.go b/vendor/github.com/masterzen/winrm/soap/header_test.go new file mode 100644 index 000000000..82ce93870 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/header_test.go @@ -0,0 +1,48 @@ +package soap + +import ( + "github.com/masterzen/simplexml/dom" + . "gopkg.in/check.v1" + "testing" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func initDocument() (h *SoapHeader) { + doc := dom.CreateDocument() + doc.PrettyPrint = true + e := dom.CreateElement("Envelope") + doc.SetRoot(e) + AddUsualNamespaces(e) + NS_SOAP_ENV.SetTo(e) + h = &SoapHeader{message: &SoapMessage{document: doc, envelope: e}} + return +} + +func (s *MySuite) TestHeaderBuild(c *C) { + h := initDocument() + msg := h.To("http://winrm:5985/wsman").ReplyTo("http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous").MaxEnvelopeSize(153600).Id("1-2-3-4").Locale("en_US").Timeout("PT60S").Build() + + expected := ` + + + http://winrm:5985/wsman + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + 153600 + PT60S + 1-2-3-4 + + + + +` + + c.Check(msg.String(), Equals, expected) +} diff --git a/vendor/github.com/masterzen/winrm/soap/message.go b/vendor/github.com/masterzen/winrm/soap/message.go new file mode 100644 index 000000000..d06c3085f --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/message.go @@ -0,0 +1,74 @@ +package soap + +import ( + "github.com/masterzen/simplexml/dom" +) + +type SoapMessage struct { + document *dom.Document + envelope *dom.Element + header *SoapHeader + body *dom.Element +} + +type MessageBuilder interface { + SetBody(*dom.Element) + NewBody() *dom.Element + CreateElement(*dom.Element, string, dom.Namespace) *dom.Element + CreateBodyElement(string, dom.Namespace) *dom.Element + Header() *SoapHeader + Doc() *dom.Document + Free() + + String() string +} + +func NewMessage() (message *SoapMessage) { + doc := dom.CreateDocument() + e := dom.CreateElement("Envelope") + doc.SetRoot(e) + AddUsualNamespaces(e) + NS_SOAP_ENV.SetTo(e) + + message = &SoapMessage{document: doc, envelope: e} + return +} + +func (message *SoapMessage) NewBody() (body *dom.Element) { + body = dom.CreateElement("Body") + message.envelope.AddChild(body) + NS_SOAP_ENV.SetTo(body) + return +} + +func (message *SoapMessage) String() string { + return message.document.String() +} + +func (message *SoapMessage) Doc() *dom.Document { + return message.document +} + +func (message *SoapMessage) Free() { +} + +func (message *SoapMessage) CreateElement(parent *dom.Element, name string, ns dom.Namespace) (element *dom.Element) { + element = dom.CreateElement(name) + parent.AddChild(element) + ns.SetTo(element) + return +} + +func (message *SoapMessage) CreateBodyElement(name string, ns dom.Namespace) (element *dom.Element) { + if message.body == nil { + message.body = message.NewBody() + } + return message.CreateElement(message.body, name, ns) +} + +func (message *SoapMessage) Header() *SoapHeader { + if message.header == nil { + message.header = &SoapHeader{message: message} + } + return message.header +} diff --git a/vendor/github.com/masterzen/winrm/soap/namespaces.go b/vendor/github.com/masterzen/winrm/soap/namespaces.go new file mode 100644 index 000000000..fda49f49e --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/namespaces.go @@ -0,0 +1,37 @@ +package soap + +import ( + "github.com/masterzen/simplexml/dom" + "github.com/masterzen/xmlpath" +) + +var ( + NS_SOAP_ENV = dom.Namespace{"env", "http://www.w3.org/2003/05/soap-envelope"} + NS_ADDRESSING = dom.Namespace{"a", "http://schemas.xmlsoap.org/ws/2004/08/addressing"} + NS_CIMBINDING = dom.Namespace{"b", "http://schemas.dmtf.org/wbem/wsman/1/cimbinding.xsd"} + NS_ENUM = dom.Namespace{"n", "http://schemas.xmlsoap.org/ws/2004/09/enumeration"} + NS_TRANSFER = dom.Namespace{"x", "http://schemas.xmlsoap.org/ws/2004/09/transfer"} + NS_WSMAN_DMTF = dom.Namespace{"w", "http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd"} + NS_WSMAN_MSFT = dom.Namespace{"p", "http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd"} + NS_SCHEMA_INST = dom.Namespace{"xsi", "http://www.w3.org/2001/XMLSchema-instance"} + NS_WIN_SHELL = dom.Namespace{"rsp", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell"} + NS_WSMAN_FAULT = dom.Namespace{"f", "http://schemas.microsoft.com/wbem/wsman/1/wsmanfault"} +) + +var MostUsed = [...]dom.Namespace{NS_SOAP_ENV, NS_ADDRESSING, NS_WIN_SHELL, NS_WSMAN_DMTF, NS_WSMAN_MSFT} + +func AddUsualNamespaces(node *dom.Element) { + for _, ns := range MostUsed { + node.DeclareNamespace(ns) + } +} + +func GetAllNamespaces() []xmlpath.Namespace { + var ns = []dom.Namespace{NS_WIN_SHELL, NS_ADDRESSING, NS_WSMAN_DMTF, NS_WSMAN_MSFT, NS_SOAP_ENV} + + var xmlpathNs = make([]xmlpath.Namespace, 0, 4) + for _, namespace := range ns { + xmlpathNs = append(xmlpathNs, xmlpath.Namespace{Prefix: namespace.Prefix, Uri: namespace.Uri}) + } + return xmlpathNs +} diff --git a/vendor/github.com/masterzen/winrm/soap/namespaces_test.go b/vendor/github.com/masterzen/winrm/soap/namespaces_test.go new file mode 100644 index 000000000..f299de741 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/namespaces_test.go @@ -0,0 +1,37 @@ +package soap + +import ( + "github.com/masterzen/simplexml/dom" + "testing" +) + +func TestAddUsualNamespaces(t *testing.T) { + doc := dom.CreateDocument() + root := dom.CreateElement("root") + doc.SetRoot(root) + AddUsualNamespaces(root) + + for ns := range root.DeclaredNamespaces() { + found := false + for ns2 := range MostUsed { + if ns2 == ns { + found = true + } + } + if !found { + t.Errorf("Test failed - Namespace %s not found", ns) + } + } + +} + +func TestSetTo(t *testing.T) { + doc := dom.CreateDocument() + root := dom.CreateElement("root") + doc.SetRoot(root) + NS_SOAP_ENV.SetTo(root) + + if root.String() != `` { + t.Errorf("Test failed - root has not the correct NS: %s", root.String()) + } +} diff --git a/vendor/github.com/masterzen/winrm/winrm/client.go b/vendor/github.com/masterzen/winrm/winrm/client.go new file mode 100644 index 000000000..a195b6ea6 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/client.go @@ -0,0 +1,166 @@ +package winrm + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "net/http" + + "github.com/masterzen/winrm/soap" +) + +type Client struct { + Parameters + username string + password string + useHTTPS bool + url string + http HttpPost + transport http.RoundTripper +} + +// NewClient will create a new remote client on url, connecting with user and password +// This function doesn't connect (connection happens only when CreateShell is called) +func NewClient(endpoint *Endpoint, user, password string) (client *Client, err error) { + params := DefaultParameters() + client, err = NewClientWithParameters(endpoint, user, password, params) + return +} + +// NewClient will create a new remote client on url, connecting with user and password +// This function doesn't connect (connection happens only when CreateShell is called) +func NewClientWithParameters(endpoint *Endpoint, user, password string, params *Parameters) (client *Client, err error) { + transport, err := newTransport(endpoint) + + client = &Client{ + Parameters: *params, + username: user, + password: password, + url: endpoint.url(), + http: Http_post, + useHTTPS: endpoint.HTTPS, + transport: transport, + } + + if params.TransportDecorator != nil { + client.transport = params.TransportDecorator(transport) + } + return +} + +// newTransport will create a new HTTP Transport, with options specified within the endpoint configuration +func newTransport(endpoint *Endpoint) (*http.Transport, error) { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: endpoint.Insecure, + }, + } + + if endpoint.CACert != nil && len(*endpoint.CACert) > 0 { + certPool, err := readCACerts(endpoint.CACert) + if err != nil { + return nil, err + } + + transport.TLSClientConfig.RootCAs = certPool + } + + return transport, nil +} + +func readCACerts(certs *[]byte) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + if !certPool.AppendCertsFromPEM(*certs) { + return nil, fmt.Errorf("Unable to read certificates") + } + + return certPool, nil +} + +// CreateShell will create a WinRM Shell, which is the prealable for running +// commands. +func (client *Client) CreateShell() (shell *Shell, err error) { + request := NewOpenShellRequest(client.url, &client.Parameters) + defer request.Free() + + response, err := client.sendRequest(request) + if err == nil { + var shellId string + if shellId, err = ParseOpenShellResponse(response); err == nil { + shell = &Shell{client: client, ShellId: shellId} + } + } + return +} + +func (client *Client) sendRequest(request *soap.SoapMessage) (response string, err error) { + return client.http(client, request) +} + +// Run will run command on the the remote host, writing the process stdout and stderr to +// the given writers. Note with this method it isn't possible to inject stdin. +func (client *Client) Run(command string, stdout io.Writer, stderr io.Writer) (exitCode int, err error) { + shell, err := client.CreateShell() + if err != nil { + return 0, err + } + var cmd *Command + cmd, err = shell.Execute(command) + if err != nil { + return 0, err + } + go io.Copy(stdout, cmd.Stdout) + go io.Copy(stderr, cmd.Stderr) + cmd.Wait() + shell.Close() + return cmd.ExitCode(), cmd.err +} + +// Run will run command on the the remote host, returning the process stdout and stderr +// as strings, and using the input stdin string as the process input +func (client *Client) RunWithString(command string, stdin string) (stdout string, stderr string, exitCode int, err error) { + shell, err := client.CreateShell() + if err != nil { + return "", "", 0, err + } + defer shell.Close() + var cmd *Command + cmd, err = shell.Execute(command) + if err != nil { + return "", "", 0, err + } + if len(stdin) > 0 { + cmd.Stdin.Write([]byte(stdin)) + } + var outWriter, errWriter bytes.Buffer + go io.Copy(&outWriter, cmd.Stdout) + go io.Copy(&errWriter, cmd.Stderr) + cmd.Wait() + return outWriter.String(), errWriter.String(), cmd.ExitCode(), cmd.err +} + +// Run will run command on the the remote host, writing the process stdout and stderr to +// the given writers, and injecting the process stdin with the stdin reader. +// Warning stdin (not stdout/stderr) are bufferized, which means reading only one byte in stdin will +// send a winrm http packet to the remote host. If stdin is a pipe, it might be better for +// performance reasons to buffer it. +func (client *Client) RunWithInput(command string, stdout io.Writer, stderr io.Writer, stdin io.Reader) (exitCode int, err error) { + shell, err := client.CreateShell() + if err != nil { + return 0, err + } + defer shell.Close() + var cmd *Command + cmd, err = shell.Execute(command) + if err != nil { + return 0, err + } + go io.Copy(cmd.Stdin, stdin) + go io.Copy(stdout, cmd.Stdout) + go io.Copy(stderr, cmd.Stderr) + cmd.Wait() + return cmd.ExitCode(), cmd.err +} diff --git a/vendor/github.com/masterzen/winrm/winrm/client_test.go b/vendor/github.com/masterzen/winrm/winrm/client_test.go new file mode 100644 index 000000000..c0203aca6 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/client_test.go @@ -0,0 +1,51 @@ +package winrm + +import ( + "github.com/masterzen/winrm/soap" + . "gopkg.in/check.v1" + "io/ioutil" + "net/http" + "strings" +) + +func (s *WinRMSuite) TestNewClient(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + + c.Assert(err, IsNil) + c.Assert(client.url, Equals, "http://localhost:5985/wsman") + c.Assert(client.username, Equals, "Administrator") + c.Assert(client.password, Equals, "v3r1S3cre7") +} + +func (s *WinRMSuite) TestClientCreateShell(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + c.Assert(err, IsNil) + client.http = func(client *Client, message *soap.SoapMessage) (string, error) { + c.Assert(message.String(), Contains, "http://schemas.xmlsoap.org/ws/2004/09/transfer/Create") + return createShellResponse, nil + } + + shell, _ := client.CreateShell() + c.Assert(shell.ShellId, Equals, "67A74734-DD32-4F10-89DE-49A060483810") +} + +func (s *WinRMSuite) TestReplaceTransportWithDecorator(c *C) { + var myrt rtfunc = func(req *http.Request) (*http.Response, error) { + req.Body.Close() + return &http.Response{StatusCode: 500, Body: ioutil.NopCloser(strings.NewReader("yeehaw"))}, nil + } + + params := DefaultParameters() + params.TransportDecorator = func(*http.Transport) http.RoundTripper { return myrt } + + client, err := NewClientWithParameters(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "password", params) + c.Assert(err, IsNil) + _, err = client.http(client, soap.NewMessage()) + c.Assert(err.Error(), Equals, "http error: 500 - yeehaw") +} + +type rtfunc func(*http.Request) (*http.Response, error) + +func (f rtfunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} diff --git a/vendor/github.com/masterzen/winrm/winrm/command.go b/vendor/github.com/masterzen/winrm/winrm/command.go new file mode 100644 index 000000000..d8c4a7d31 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/command.go @@ -0,0 +1,210 @@ +package winrm + +import ( + "bytes" + "errors" + "io" + "strings" +) + +type commandWriter struct { + *Command + eof bool +} + +type commandReader struct { + *Command + write *io.PipeWriter + read *io.PipeReader + stream string +} + +// Command represents a given command running on a Shell. This structure allows to get access +// to the various stdout, stderr and stdin pipes. +type Command struct { + client *Client + shell *Shell + commandId string + exitCode int + finished bool + err error + + Stdin *commandWriter + Stdout *commandReader + Stderr *commandReader + + done chan struct{} + cancel chan struct{} +} + +func newCommand(shell *Shell, commandId string) *Command { + command := &Command{shell: shell, client: shell.client, commandId: commandId, exitCode: 1, err: nil, done: make(chan struct{}), cancel: make(chan struct{})} + command.Stdin = &commandWriter{Command: command, eof: false} + command.Stdout = newCommandReader("stdout", command) + command.Stderr = newCommandReader("stderr", command) + + go fetchOutput(command) + + return command +} + +func newCommandReader(stream string, command *Command) *commandReader { + read, write := io.Pipe() + return &commandReader{Command: command, stream: stream, write: write, read: read} +} + +func fetchOutput(command *Command) { + for { + select { + case <-command.cancel: + close(command.done) + return + default: + finished, err := command.slurpAllOutput() + if finished { + command.err = err + close(command.done) + return + } + } + } +} + +func (command *Command) check() (err error) { + if command.commandId == "" { + return errors.New("Command has already been closed") + } + if command.shell == nil { + return errors.New("Command has no associated shell") + } + if command.client == nil { + return errors.New("Command has no associated client") + } + return +} + +// Close will terminate the running command +func (command *Command) Close() (err error) { + if err = command.check(); err != nil { + return err + } + + select { // close cancel channel if it's still open + case <-command.cancel: + default: + close(command.cancel) + } + + request := NewSignalRequest(command.client.url, command.shell.ShellId, command.commandId, &command.client.Parameters) + defer request.Free() + + _, err = command.client.sendRequest(request) + return err +} + +func (command *Command) slurpAllOutput() (finished bool, err error) { + if err = command.check(); err != nil { + command.Stderr.write.CloseWithError(err) + command.Stdout.write.CloseWithError(err) + return true, err + } + + request := NewGetOutputRequest(command.client.url, command.shell.ShellId, command.commandId, "stdout stderr", &command.client.Parameters) + defer request.Free() + + response, err := command.client.sendRequest(request) + if err != nil { + if strings.Contains(err.Error(), "OperationTimeout") { + // Operation timeout because there was no command output + return + } + + command.Stderr.write.CloseWithError(err) + command.Stdout.write.CloseWithError(err) + return true, err + } + + var exitCode int + var stdout, stderr bytes.Buffer + finished, exitCode, err = ParseSlurpOutputErrResponse(response, &stdout, &stderr) + if err != nil { + command.Stderr.write.CloseWithError(err) + command.Stdout.write.CloseWithError(err) + return true, err + } + if stdout.Len() > 0 { + command.Stdout.write.Write(stdout.Bytes()) + } + if stderr.Len() > 0 { + command.Stderr.write.Write(stderr.Bytes()) + } + if finished { + command.exitCode = exitCode + command.Stderr.write.Close() + command.Stdout.write.Close() + } + + return +} + +func (command *Command) sendInput(data []byte) (err error) { + if err = command.check(); err != nil { + return err + } + + request := NewSendInputRequest(command.client.url, command.shell.ShellId, command.commandId, data, &command.client.Parameters) + defer request.Free() + + _, err = command.client.sendRequest(request) + return +} + +// ExitCode returns command exit code when it is finished. Before that the result is always 0. +func (command *Command) ExitCode() int { + return command.exitCode +} + +// Calling this function will block the current goroutine until the remote command terminates. +func (command *Command) Wait() { + // block until finished + <-command.done +} + +// Write data to this Pipe +func (w *commandWriter) Write(data []byte) (written int, err error) { + for len(data) > 0 { + if w.eof { + err = io.EOF + return + } + // never send more data than our EnvelopeSize. + n := min(w.client.Parameters.EnvelopeSize-1000, len(data)) + if err = w.sendInput(data[:n]); err != nil { + break + } + data = data[n:] + written += int(n) + } + return +} + +func min(a int, b int) int { + if a < b { + return a + } + return b +} + +func (w *commandWriter) Close() error { + w.eof = true + return w.Close() +} + +// Read data from this Pipe +func (r *commandReader) Read(buf []byte) (int, error) { + n, err := r.read.Read(buf) + if err != nil && err != io.EOF { + return 0, err + } + return n, err +} diff --git a/vendor/github.com/masterzen/winrm/winrm/command_test.go b/vendor/github.com/masterzen/winrm/winrm/command_test.go new file mode 100644 index 000000000..3ee9efc9e --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/command_test.go @@ -0,0 +1,164 @@ +package winrm + +import ( + "bytes" + "io" + "io/ioutil" + "strings" + "sync" + "time" + + "github.com/masterzen/winrm/soap" + . "gopkg.in/check.v1" +) + +func (s *WinRMSuite) TestExecuteCommand(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + c.Assert(err, IsNil) + + shell := &Shell{client: client, ShellId: "67A74734-DD32-4F10-89DE-49A060483810"} + count := 0 + client.http = func(client *Client, message *soap.SoapMessage) (string, error) { + switch count { + case 0: + { + c.Assert(message.String(), Contains, "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command") + count = 1 + return executeCommandResponse, nil + } + case 1: + { + c.Assert(message.String(), Contains, "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive") + count = 2 + return outputResponse, nil + } + default: + { + return doneCommandResponse, nil + } + } + } + + command, _ := shell.Execute("ipconfig /all") + var stdout, stderr bytes.Buffer + var wg sync.WaitGroup + f := func(b *bytes.Buffer, r *commandReader) { + wg.Add(1) + defer wg.Done() + io.Copy(b, r) + } + go f(&stdout, command.Stdout) + go f(&stderr, command.Stderr) + command.Wait() + wg.Wait() + c.Assert(stdout.String(), Equals, "That's all folks!!!") + c.Assert(stderr.String(), Equals, "This is stderr, I'm pretty sure!") +} + +func (s *WinRMSuite) TestStdinCommand(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + c.Assert(err, IsNil) + + shell := &Shell{client: client, ShellId: "67A74734-DD32-4F10-89DE-49A060483810"} + count := 0 + client.http = func(client *Client, message *soap.SoapMessage) (string, error) { + if strings.Contains(message.String(), "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send") { + c.Assert(message.String(), Contains, "c3RhbmRhcmQgaW5wdXQ=") + return "", nil + } else { + if strings.Contains(message.String(), "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command") { + return executeCommandResponse, nil + } else if count != 1 && strings.Contains(message.String(), "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive") { + count = 1 + return outputResponse, nil + } else { + return doneCommandResponse, nil + } + } + } + + command, _ := shell.Execute("ipconfig /all") + command.Stdin.Write([]byte("standard input")) + // slurp output from command + var outWriter, errWriter bytes.Buffer + go io.Copy(&outWriter, command.Stdout) + go io.Copy(&errWriter, command.Stderr) + command.Wait() +} + +func (s *WinRMSuite) TestCommandExitCode(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + c.Assert(err, IsNil) + + shell := &Shell{client: client, ShellId: "67A74734-DD32-4F10-89DE-49A060483810"} + count := 0 + client.http = func(client *Client, message *soap.SoapMessage) (string, error) { + defer func() { count += 1 }() + switch count { + case 0: + return executeCommandResponse, nil + case 1: + return doneCommandResponse, nil + default: + c.Log("Mimicking some observed Windows behavior where only the first 'done' response has the actual exit code and 0 afterwards") + return doneCommandExitCode0Response, nil + } + } + + command, _ := shell.Execute("ipconfig /all") + + command.Wait() + <-time.After(time.Second) // to make the test fail if fetchOutput races to re-set the exit code + + c.Assert(command.ExitCode(), Equals, 123) +} + +func (s *WinRMSuite) TestCloseCommandStopsFetch(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + c.Assert(err, IsNil) + + shell := &Shell{client: client, ShellId: "67A74734-DD32-4F10-89DE-49A060483810"} + + http := make(chan string) + client.http = func(client *Client, message *soap.SoapMessage) (string, error) { + switch { + case strings.Contains(message.String(), "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive"): + c.Log("Request for command output received by server") + r := <-http + c.Log("Returning command output") + return r, nil + case strings.Contains(message.String(), "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command"): + return executeCommandResponse, nil + case strings.Contains(message.String(), "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Signal"): + c.Log("Signal message received by server") + return "", nil // response is not used + default: + c.Logf("Unexpected message: %s", message) + return "", nil + } + } + + command, _ := shell.Execute("ipconfig /all") + // need to be reading Stdout/Stderr, otherwise, the writes to these are blocking... + go ioutil.ReadAll(command.Stdout) + go ioutil.ReadAll(command.Stderr) + + http <- outputResponse // wait for command to enter fetch/slurp + + command.Close() + + select { + case http <- outputResponse: // return to fetch from slurp + c.Log("Fetch loop 'drained' one last reponse before realizing that the command is now closed") + case <-time.After(1 * time.Second): + c.Log("no poll within one second, fetch may have stopped") + } + + select { + case http <- outputResponse: + c.Log("Fetch loop is still polling after command.Close()") + c.FailNow() + case <-time.After(1 * time.Second): + c.Log("no poll within one second, assuming fetch has stopped") + } +} diff --git a/vendor/github.com/masterzen/winrm/winrm/endpoint.go b/vendor/github.com/masterzen/winrm/winrm/endpoint.go new file mode 100644 index 000000000..93e255d5e --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/endpoint.go @@ -0,0 +1,22 @@ +package winrm + +import "fmt" + +type Endpoint struct { + Host string + Port int + HTTPS bool + Insecure bool + CACert *[]byte +} + +func (ep *Endpoint) url() string { + var scheme string + if ep.HTTPS { + scheme = "https" + } else { + scheme = "http" + } + + return fmt.Sprintf("%s://%s:%d/wsman", scheme, ep.Host, ep.Port) +} diff --git a/vendor/github.com/masterzen/winrm/winrm/endpoint_test.go b/vendor/github.com/masterzen/winrm/winrm/endpoint_test.go new file mode 100644 index 000000000..4b5aa8bcc --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/endpoint_test.go @@ -0,0 +1,15 @@ +package winrm + +import ( + . "gopkg.in/check.v1" +) + +func (s *WinRMSuite) TestEndpointUrlHttp(c *C) { + endpoint := &Endpoint{Host: "abc", Port: 123} + c.Assert(endpoint.url(), Equals, "http://abc:123/wsman") +} + +func (s *WinRMSuite) TestEndpointUrlHttps(c *C) { + endpoint := &Endpoint{Host: "abc", Port: 123, HTTPS: true} + c.Assert(endpoint.url(), Equals, "https://abc:123/wsman") +} diff --git a/vendor/github.com/masterzen/winrm/winrm/fixture_test.go b/vendor/github.com/masterzen/winrm/winrm/fixture_test.go new file mode 100644 index 000000000..95b18e4dd --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/fixture_test.go @@ -0,0 +1,87 @@ +package winrm + +import ( + "fmt" + . "gopkg.in/check.v1" + "strings" +) + +var ( + createShellResponse = ` + + http://schemas.xmlsoap.org/ws/2004/09/transfer/CreateResponse + uuid:195078CF-804B-41F7-A246-9CB3C1A41A9A + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + uuid:D00059E8-57D6-4035-AD8D-3EDC495DA163 + + + + http://107.20.128.235:5985/wsman + + http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd + + 67A74734-DD32-4F10-89DE-49A060483810 + + + + + 67A74734-DD32-4F10-89DE-49A060483810 + http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd + Administrator + 213.41.177.193 + PT7200.000S + stdin + stdout + stderr + P0DT0H0M1S + P0DT0H0M1S + + + ` + + executeCommandResponse = `http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandResponseuuid:D9E108AA-E32B-45E3-8601-E9C70999D3BAhttp://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymousuuid:F530804C-6D02-4FA9-AE78-1997750594BA1A6DEE6B-EC68-4DD6-87E9-030C0048ECC4` + + outputResponse = `http://schemas.microsoft.com/wbem/wsman/1/windows/shell/ReceiveResponseuuid:AAD46BD4-6315-4C3C-93D4-94A55773287Dhttp://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymousuuid:18A52A06-9027-41DC-8850-3F244595AF62VGhhdCdzIGFsbCBmb2xrcyEhIQ==VGhpcyBpcyBzdGRlcnIsIEknbSBwcmV0dHkgc3VyZSE=` + + singleOutputResponse = `http://schemas.microsoft.com/wbem/wsman/1/windows/shell/ReceiveResponseuuid:AAD46BD4-6315-4C3C-93D4-94A55773287Dhttp://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymousuuid:18A52A06-9027-41DC-8850-3F244595AF62VGhhdCdzIGFsbCBmb2xrcyEhIQ==` + + doneCommandResponse = `http://schemas.microsoft.com/wbem/wsman/1/windows/shell/ReceiveResponseuuid:206F8145-683D-4987-949B-E099F999F088http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymousuuid:6c68191c-8385-4816-506a-0769cb9f3f4e123 + ` + doneCommandExitCode0Response = `http://schemas.microsoft.com/wbem/wsman/1/windows/shell/ReceiveResponseuuid:206F8145-683D-4987-949B-E099F999F088http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymousuuid:6c68191c-8385-4816-506a-0769cb9f3f4e0` +) + +type containsChecker struct { + *CheckerInfo +} + +// The Contains checker verifies that the obtained value contains +// the expected value, according to usual Go semantics for strings.Contains. +// +// For example: +// +// c.Assert(haystack, Contains, "needle") +// +var Contains Checker = &containsChecker{ + &CheckerInfo{Name: "contains", Params: []string{"obtained", "expected"}}, +} + +func (checker *containsChecker) Check(params []interface{}, names []string) (result bool, error string) { + return matches(params[0], params[1]) +} + +func matches(haystack, needle interface{}) (result bool, error string) { + neStr, ok := needle.(string) + if !ok { + return false, "Expected value must be a string" + } + valueStr, valueIsStr := haystack.(string) + if !valueIsStr { + if valueWithStr, valueHasStr := haystack.(fmt.Stringer); valueHasStr { + valueStr, valueIsStr = valueWithStr.String(), true + } + } + if valueIsStr { + return strings.Contains(valueStr, neStr), "" + } + return false, "Obtained value is not a string and has no .String()" +} diff --git a/vendor/github.com/masterzen/winrm/winrm/http.go b/vendor/github.com/masterzen/winrm/winrm/http.go new file mode 100644 index 000000000..7e191e364 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/http.go @@ -0,0 +1,60 @@ +package winrm + +import ( + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/masterzen/winrm/soap" +) + +var soapXML string = "application/soap+xml" + +type HttpPost func(*Client, *soap.SoapMessage) (string, error) + +func body(response *http.Response) (content string, err error) { + contentType := response.Header.Get("Content-Type") + if strings.HasPrefix(contentType, soapXML) { + var body []byte + body, err = ioutil.ReadAll(response.Body) + response.Body.Close() + if err != nil { + err = fmt.Errorf("error while reading request body %s", err) + return + } + + content = string(body) + return + } else { + err = fmt.Errorf("invalid content-type: %s", contentType) + return + } + return +} + +func Http_post(client *Client, request *soap.SoapMessage) (response string, err error) { + httpClient := &http.Client{Transport: client.transport} + + req, err := http.NewRequest("POST", client.url, strings.NewReader(request.String())) + if err != nil { + err = fmt.Errorf("impossible to create http request %s", err) + return + } + req.Header.Set("Content-Type", soapXML+";charset=UTF-8") + req.SetBasicAuth(client.username, client.password) + resp, err := httpClient.Do(req) + if err != nil { + err = fmt.Errorf("unknown error %s", err) + return + } + + if resp.StatusCode == 200 { + response, err = body(resp) + } else { + body, _ := ioutil.ReadAll(resp.Body) + err = fmt.Errorf("http error: %d - %s", resp.StatusCode, body) + } + + return +} diff --git a/vendor/github.com/masterzen/winrm/winrm/http_test.go b/vendor/github.com/masterzen/winrm/winrm/http_test.go new file mode 100644 index 000000000..6636051b4 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/http_test.go @@ -0,0 +1,63 @@ +package winrm + +import ( + "net" + "net/http" + "net/http/httptest" + + . "gopkg.in/check.v1" +) + +var response = ` + + http://schemas.xmlsoap.org/ws/2004/09/transfer/CreateResponse + uuid:195078CF-804B-41F7-A246-9CB3C1A41A9A + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + uuid:D00059E8-57D6-4035-AD8D-3EDC495DA163 + + + + http://107.20.128.235:15985/wsman + + http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd + + 67A74734-DD32-4F10-89DE-49A060483810 + + + + + 67A74734-DD32-4F10-89DE-49A060483810 + http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd + Administrator + 213.41.177.193 + PT7200.000S + stdin + stdout +stderr + P0DT0H0M1S + P0DT0H0M1S + + +` + +func (s *WinRMSuite) TestHttpRequest(c *C) { + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/soap+xml") + w.Write([]byte(response)) + })) + l, err := net.Listen("tcp", "127.0.0.1:15985") + if err != nil { + c.Fatalf("Can't listen %s", err) + } + ts.Listener = l + ts.Start() + defer ts.Close() + + client, err := NewClient(&Endpoint{Host: "localhost", Port: 15985}, "test", "test") + c.Assert(err, IsNil) + shell, err := client.CreateShell() + if err != nil { + c.Fatalf("Can't create shell %s", err) + } + c.Assert(shell.ShellId, Equals, "67A74734-DD32-4F10-89DE-49A060483810") +} diff --git a/vendor/github.com/masterzen/winrm/winrm/parameters.go b/vendor/github.com/masterzen/winrm/winrm/parameters.go new file mode 100644 index 000000000..099863b40 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/parameters.go @@ -0,0 +1,20 @@ +package winrm + +import ( + "net/http" +) + +type Parameters struct { + Timeout string + Locale string + EnvelopeSize int + TransportDecorator func(*http.Transport) http.RoundTripper +} + +func DefaultParameters() *Parameters { + return NewParameters("PT60S", "en-US", 153600) +} + +func NewParameters(timeout string, locale string, envelopeSize int) *Parameters { + return &Parameters{Timeout: timeout, Locale: locale, EnvelopeSize: envelopeSize} +} diff --git a/vendor/github.com/masterzen/winrm/winrm/parameters_test.go b/vendor/github.com/masterzen/winrm/winrm/parameters_test.go new file mode 100644 index 000000000..9b42c4eb2 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/parameters_test.go @@ -0,0 +1,19 @@ +package winrm + +import ( + . "gopkg.in/check.v1" +) + +func (s *WinRMSuite) TestDefaultParameters(c *C) { + params := DefaultParameters() + c.Assert(params.Locale, Equals, "en-US") + c.Assert(params.Timeout, Equals, "PT60S") + c.Assert(params.EnvelopeSize, Equals, 153600) +} + +func (s *WinRMSuite) TestParameters(c *C) { + params := NewParameters("PT120S", "fr-FR", 128) + c.Assert(params.Locale, Equals, "fr-FR") + c.Assert(params.Timeout, Equals, "PT120S") + c.Assert(params.EnvelopeSize, Equals, 128) +} diff --git a/vendor/github.com/masterzen/winrm/winrm/powershell.go b/vendor/github.com/masterzen/winrm/winrm/powershell.go new file mode 100644 index 000000000..5544bc69d --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/powershell.go @@ -0,0 +1,22 @@ +package winrm + +import ( + "encoding/base64" + "fmt" +) + +// Wraps a PowerShell script and prepares it for execution by the winrm client +func Powershell(psCmd string) string { + // 2 byte chars to make PowerShell happy + wideCmd := "" + for _, b := range []byte(psCmd) { + wideCmd += string(b) + "\x00" + } + + // Base64 encode the command + input := []uint8(wideCmd) + encodedCmd := base64.StdEncoding.EncodeToString(input) + + // Create the powershell.exe command line to execute the script + return fmt.Sprintf("powershell.exe -EncodedCommand %s", encodedCmd) +} diff --git a/vendor/github.com/masterzen/winrm/winrm/powershell_test.go b/vendor/github.com/masterzen/winrm/winrm/powershell_test.go new file mode 100644 index 000000000..cb98b93fc --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/powershell_test.go @@ -0,0 +1,10 @@ +package winrm + +import ( + . "gopkg.in/check.v1" +) + +func (s *WinRMSuite) TestPowershell(c *C) { + psCmd := Powershell("dir") + c.Assert(psCmd, Equals, "powershell.exe -EncodedCommand ZABpAHIA") +} diff --git a/vendor/github.com/masterzen/winrm/winrm/request.go b/vendor/github.com/masterzen/winrm/winrm/request.go new file mode 100644 index 000000000..f9ad8285b --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/request.go @@ -0,0 +1,116 @@ +package winrm + +import ( + "encoding/base64" + + "github.com/masterzen/winrm/soap" + "github.com/nu7hatch/gouuid" +) + +func genUUID() string { + uuid, _ := uuid.NewV4() + return "uuid:" + uuid.String() +} + +func defaultHeaders(message *soap.SoapMessage, url string, params *Parameters) (h *soap.SoapHeader) { + h = message.Header() + h.To(url).ReplyTo("http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous").MaxEnvelopeSize(params.EnvelopeSize).Id(genUUID()).Locale(params.Locale).Timeout(params.Timeout) + return +} + +func NewOpenShellRequest(uri string, params *Parameters) (message *soap.SoapMessage) { + if params == nil { + params = DefaultParameters() + } + message = soap.NewMessage() + defaultHeaders(message, uri, params).Action("http://schemas.xmlsoap.org/ws/2004/09/transfer/Create").ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd").AddOption(soap.NewHeaderOption("WINRS_NOPROFILE", "FALSE")).AddOption(soap.NewHeaderOption("WINRS_CODEPAGE", "65001")).Build() + + body := message.CreateBodyElement("Shell", soap.NS_WIN_SHELL) + input := message.CreateElement(body, "InputStreams", soap.NS_WIN_SHELL) + input.SetContent("stdin") + output := message.CreateElement(body, "OutputStreams", soap.NS_WIN_SHELL) + output.SetContent("stdout stderr") + return +} + +func NewDeleteShellRequest(uri string, shellId string, params *Parameters) (message *soap.SoapMessage) { + if params == nil { + params = DefaultParameters() + } + message = soap.NewMessage() + defaultHeaders(message, uri, params).Action("http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete").ShellId(shellId).ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd").Build() + + message.NewBody() + + return +} + +func NewExecuteCommandRequest(uri, shellId, command string, arguments []string, params *Parameters) (message *soap.SoapMessage) { + if params == nil { + params = DefaultParameters() + } + message = soap.NewMessage() + defaultHeaders(message, uri, params).Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command").ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd").ShellId(shellId).AddOption(soap.NewHeaderOption("WINRS_CONSOLEMODE_STDIN", "TRUE")).AddOption(soap.NewHeaderOption("WINRS_SKIP_CMD_SHELL", "FALSE")).Build() + body := message.CreateBodyElement("CommandLine", soap.NS_WIN_SHELL) + + // ensure special characters like & don't mangle the request XML + command = "" + commandElement := message.CreateElement(body, "Command", soap.NS_WIN_SHELL) + commandElement.SetContent(command) + + for _, arg := range arguments { + arg = "" + argumentsElement := message.CreateElement(body, "Arguments", soap.NS_WIN_SHELL) + argumentsElement.SetContent(arg) + } + + return +} + +func NewGetOutputRequest(uri string, shellId string, commandId string, streams string, params *Parameters) (message *soap.SoapMessage) { + if params == nil { + params = DefaultParameters() + } + message = soap.NewMessage() + defaultHeaders(message, uri, params).Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive").ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd").ShellId(shellId).Build() + + receive := message.CreateBodyElement("Receive", soap.NS_WIN_SHELL) + desiredStreams := message.CreateElement(receive, "DesiredStream", soap.NS_WIN_SHELL) + desiredStreams.SetAttr("CommandId", commandId) + desiredStreams.SetContent(streams) + return +} + +func NewSendInputRequest(uri string, shellId string, commandId string, input []byte, params *Parameters) (message *soap.SoapMessage) { + if params == nil { + params = DefaultParameters() + } + message = soap.NewMessage() + + defaultHeaders(message, uri, params).Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send").ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd").ShellId(shellId).Build() + + content := base64.StdEncoding.EncodeToString(input) + + send := message.CreateBodyElement("Send", soap.NS_WIN_SHELL) + streams := message.CreateElement(send, "Stream", soap.NS_WIN_SHELL) + streams.SetAttr("Name", "stdin") + streams.SetAttr("CommandId", commandId) + streams.SetContent(content) + return +} + +func NewSignalRequest(uri string, shellId string, commandId string, params *Parameters) (message *soap.SoapMessage) { + if params == nil { + params = DefaultParameters() + } + message = soap.NewMessage() + + defaultHeaders(message, uri, params).Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Signal").ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd").ShellId(shellId).Build() + + signal := message.CreateBodyElement("Signal", soap.NS_WIN_SHELL) + signal.SetAttr("CommandId", commandId) + code := message.CreateElement(signal, "Code", soap.NS_WIN_SHELL) + code.SetContent("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/signal/terminate") + + return +} diff --git a/vendor/github.com/masterzen/winrm/winrm/request_test.go b/vendor/github.com/masterzen/winrm/winrm/request_test.go new file mode 100644 index 000000000..545ff253e --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/request_test.go @@ -0,0 +1,143 @@ +package winrm + +import ( + "strings" + "testing" + + "github.com/masterzen/simplexml/dom" + "github.com/masterzen/winrm/soap" + "github.com/masterzen/xmlpath" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type WinRMSuite struct{} + +var _ = Suite(&WinRMSuite{}) + +func (s *WinRMSuite) TestOpenShellRequest(c *C) { + openShell := NewOpenShellRequest("http://localhost", nil) + defer openShell.Free() + + assertXPath(c, openShell.Doc(), "//a:Action", "http://schemas.xmlsoap.org/ws/2004/09/transfer/Create") + assertXPath(c, openShell.Doc(), "//a:To", "http://localhost") + assertXPath(c, openShell.Doc(), "//env:Body/rsp:Shell/rsp:InputStreams", "stdin") + assertXPath(c, openShell.Doc(), "//env:Body/rsp:Shell/rsp:OutputStreams", "stdout stderr") +} + +func (s *WinRMSuite) TestDeleteShellRequest(c *C) { + request := NewDeleteShellRequest("http://localhost", "SHELLID", nil) + defer request.Free() + + assertXPath(c, request.Doc(), "//a:Action", "http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete") + assertXPath(c, request.Doc(), "//a:To", "http://localhost") + assertXPath(c, request.Doc(), "//w:Selector[@Name=\"ShellId\"]", "SHELLID") +} + +func (s *WinRMSuite) TestExecuteCommandRequest(c *C) { + request := NewExecuteCommandRequest("http://localhost", "SHELLID", "ipconfig /all", []string{}, nil) + defer request.Free() + + assertXPath(c, request.Doc(), "//a:Action", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command") + assertXPath(c, request.Doc(), "//a:To", "http://localhost") + assertXPath(c, request.Doc(), "//w:Selector[@Name=\"ShellId\"]", "SHELLID") + assertXPath(c, request.Doc(), "//w:Option[@Name=\"WINRS_CONSOLEMODE_STDIN\"]", "TRUE") + assertXPath(c, request.Doc(), "//rsp:CommandLine/rsp:Command", "ipconfig /all") + assertXPathNil(c, request.Doc(), "//rsp:CommandLine/rsp:Arguments") +} + +func (s *WinRMSuite) TestExecuteCommandWithArgumentsRequest(c *C) { + args := []string{"/p", "C:\\test.txt"} + request := NewExecuteCommandRequest("http://localhost", "SHELLID", "del", args, nil) + defer request.Free() + + assertXPath(c, request.Doc(), "//a:Action", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command") + assertXPath(c, request.Doc(), "//a:To", "http://localhost") + assertXPath(c, request.Doc(), "//w:Selector[@Name=\"ShellId\"]", "SHELLID") + assertXPath(c, request.Doc(), "//w:Option[@Name=\"WINRS_CONSOLEMODE_STDIN\"]", "TRUE") + assertXPath(c, request.Doc(), "//rsp:CommandLine/rsp:Command", "del") + assertXPath(c, request.Doc(), "//rsp:CommandLine/rsp:Arguments", "/p") + assertXPath(c, request.Doc(), "//rsp:CommandLine/rsp:Arguments", "C:\\test.txt") +} + +func (s *WinRMSuite) TestGetOutputRequest(c *C) { + request := NewGetOutputRequest("http://localhost", "SHELLID", "COMMANDID", "stdout stderr", nil) + defer request.Free() + + assertXPath(c, request.Doc(), "//a:Action", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive") + assertXPath(c, request.Doc(), "//a:To", "http://localhost") + assertXPath(c, request.Doc(), "//w:Selector[@Name=\"ShellId\"]", "SHELLID") + assertXPath(c, request.Doc(), "//rsp:Receive/rsp:DesiredStream[@CommandId=\"COMMANDID\"]", "stdout stderr") +} + +func (s *WinRMSuite) TestSendInputRequest(c *C) { + request := NewSendInputRequest("http://localhost", "SHELLID", "COMMANDID", []byte{31, 32}, nil) + defer request.Free() + + assertXPath(c, request.Doc(), "//a:Action", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send") + assertXPath(c, request.Doc(), "//a:To", "http://localhost") + assertXPath(c, request.Doc(), "//w:Selector[@Name=\"ShellId\"]", "SHELLID") + assertXPath(c, request.Doc(), "//rsp:Send/rsp:Stream[@CommandId=\"COMMANDID\"]", "HyA=") +} + +func (s *WinRMSuite) TestSignalRequest(c *C) { + request := NewSignalRequest("http://localhost", "SHELLID", "COMMANDID", nil) + defer request.Free() + + assertXPath(c, request.Doc(), "//a:Action", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Signal") + assertXPath(c, request.Doc(), "//a:To", "http://localhost") + assertXPath(c, request.Doc(), "//w:Selector[@Name=\"ShellId\"]", "SHELLID") + assertXPath(c, request.Doc(), "//rsp:Signal[@CommandId=\"COMMANDID\"]/rsp:Code", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/signal/terminate") +} + +func assertXPath(c *C, doc *dom.Document, request string, expected string) { + root, path, err := parseXPath(doc, request) + + if err != nil { + c.Fatalf("Xpath %s gives error %s", request, err) + } + + ok := path.Exists(root) + c.Assert(ok, Equals, true) + + var foundValue string + iter := path.Iter(root) + for iter.Next() { + foundValue = iter.Node().String() + if foundValue == expected { + break + } + } + + if foundValue != expected { + c.Errorf("Should have found '%s', but found '%s' instead", expected, foundValue) + } +} + +func assertXPathNil(c *C, doc *dom.Document, request string) { + root, path, err := parseXPath(doc, request) + + if err != nil { + c.Fatalf("Xpath %s gives error %s", request, err) + } + + ok := path.Exists(root) + c.Assert(ok, Equals, false) +} + +func parseXPath(doc *dom.Document, request string) (*xmlpath.Node, *xmlpath.Path, error) { + content := strings.NewReader(doc.String()) + node, err := xmlpath.Parse(content) + if err != nil { + return nil, nil, err + } + + path, err := xmlpath.CompileWithNamespace(request, soap.GetAllNamespaces()) + if err != nil { + return nil, nil, err + } + + return node, path, nil +} diff --git a/vendor/github.com/masterzen/winrm/winrm/response.go b/vendor/github.com/masterzen/winrm/winrm/response.go new file mode 100644 index 000000000..ea02a87ae --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/response.go @@ -0,0 +1,111 @@ +package winrm + +import ( + "encoding/base64" + "fmt" + "github.com/masterzen/winrm/soap" + "github.com/masterzen/xmlpath" + "io" + "strconv" + "strings" +) + +func first(node *xmlpath.Node, xpath string) (content string, err error) { + path, err := xmlpath.CompileWithNamespace(xpath, soap.GetAllNamespaces()) + if err != nil { + return + } + content, _ = path.String(node) + return +} + +func any(node *xmlpath.Node, xpath string) (found bool, err error) { + path, err := xmlpath.CompileWithNamespace(xpath, soap.GetAllNamespaces()) + if err != nil { + return + } + + found = path.Exists(node) + return +} + +func xpath(node *xmlpath.Node, xpath string) (nodes []xmlpath.Node, err error) { + path, err := xmlpath.CompileWithNamespace(xpath, soap.GetAllNamespaces()) + if err != nil { + return + } + + nodes = make([]xmlpath.Node, 0, 1) + iter := path.Iter(node) + for iter.Next() { + nodes = append(nodes, *(iter.Node())) + } + return +} + +func ParseOpenShellResponse(response string) (shellId string, err error) { + doc, err := xmlpath.Parse(strings.NewReader(response)) + + shellId, err = first(doc, "//w:Selector[@Name='ShellId']") + return +} + +func ParseExecuteCommandResponse(response string) (commandId string, err error) { + doc, err := xmlpath.Parse(strings.NewReader(response)) + + commandId, err = first(doc, "//rsp:CommandId") + return +} + +func ParseSlurpOutputErrResponse(response string, stdout io.Writer, stderr io.Writer) (finished bool, exitCode int, err error) { + doc, err := xmlpath.Parse(strings.NewReader(response)) + + stdouts, _ := xpath(doc, "//rsp:Stream[@Name='stdout']") + for _, node := range stdouts { + content, _ := base64.StdEncoding.DecodeString(node.String()) + stdout.Write(content) + } + stderrs, _ := xpath(doc, "//rsp:Stream[@Name='stderr']") + for _, node := range stderrs { + content, _ := base64.StdEncoding.DecodeString(node.String()) + stderr.Write(content) + } + + ended, _ := any(doc, "//*[@State='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done']") + + if ended { + finished = ended + if exitBool, _ := any(doc, "//rsp:ExitCode"); exitBool { + exit, _ := first(doc, "//rsp:ExitCode") + exitCode, _ = strconv.Atoi(exit) + } + } else { + finished = false + } + + return +} + +func ParseSlurpOutputResponse(response string, stream io.Writer, streamType string) (finished bool, exitCode int, err error) { + doc, err := xmlpath.Parse(strings.NewReader(response)) + + nodes, _ := xpath(doc, fmt.Sprintf("//rsp:Stream[@Name='%s']", streamType)) + for _, node := range nodes { + content, _ := base64.StdEncoding.DecodeString(node.String()) + stream.Write(content) + } + + ended, _ := any(doc, "//*[@State='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done']") + + if ended { + finished = ended + if exitBool, _ := any(doc, "//rsp:ExitCode"); exitBool { + exit, _ := first(doc, "//rsp:ExitCode") + exitCode, _ = strconv.Atoi(exit) + } + } else { + finished = false + } + + return +} diff --git a/vendor/github.com/masterzen/winrm/winrm/response_test.go b/vendor/github.com/masterzen/winrm/winrm/response_test.go new file mode 100644 index 000000000..e80cb06b5 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/response_test.go @@ -0,0 +1,70 @@ +package winrm + +import ( + "bytes" + . "gopkg.in/check.v1" +) + +func (s *WinRMSuite) TestOpenShellResponse(c *C) { + response := createShellResponse + shellId, err := ParseOpenShellResponse(response) + if err != nil { + c.Fatalf("response didn't parse: %s", err) + } + + c.Assert("67A74734-DD32-4F10-89DE-49A060483810", Equals, shellId) +} + +func (s *WinRMSuite) TestExecuteCommandResponse(c *C) { + response := executeCommandResponse + + commandId, err := ParseExecuteCommandResponse(response) + if err != nil { + c.Fatalf("response didn't parse: %s", err) + } + + c.Assert("1A6DEE6B-EC68-4DD6-87E9-030C0048ECC4", Equals, commandId) + +} + +func (s *WinRMSuite) TestSlurpOutputResponse(c *C) { + response := outputResponse + + var stdout, stderr bytes.Buffer + finished, _, err := ParseSlurpOutputErrResponse(response, &stdout, &stderr) + if err != nil { + c.Fatalf("response didn't parse: %s", err) + } + + c.Assert(finished, Equals, false) + c.Assert("That's all folks!!!", Equals, stdout.String()) + c.Assert("This is stderr, I'm pretty sure!", Equals, stderr.String()) +} + +func (s *WinRMSuite) TestSlurpOutputSingleResponse(c *C) { + response := singleOutputResponse + + var stream bytes.Buffer + finished, _, err := ParseSlurpOutputResponse(response, &stream, "stdout") + if err != nil { + c.Fatalf("response didn't parse: %s", err) + } + + c.Assert(finished, Equals, false) + c.Assert("That's all folks!!!", Equals, stream.String()) +} + +func (s *WinRMSuite) TestDoneSlurpOutputResponse(c *C) { + response := doneCommandResponse + + var stdout, stderr bytes.Buffer + finished, code, err := ParseSlurpOutputErrResponse(response, &stdout, &stderr) + if err != nil { + c.Fatalf("response didn't parse: %s", err) + } + + c.Assert(finished, Equals, true) + c.Assert(code, Equals, 123) + c.Assert("", Equals, stdout.String()) + c.Assert("", Equals, stderr.String()) +} diff --git a/vendor/github.com/masterzen/winrm/winrm/shell.go b/vendor/github.com/masterzen/winrm/winrm/shell.go new file mode 100644 index 000000000..a14518280 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/shell.go @@ -0,0 +1,31 @@ +package winrm + +// Shell is the local view of a WinRM Shell of a given Client +type Shell struct { + client *Client + ShellId string +} + +// Execute command on the given Shell, returning either an error or a Command +func (shell *Shell) Execute(command string, arguments ...string) (cmd *Command, err error) { + request := NewExecuteCommandRequest(shell.client.url, shell.ShellId, command, arguments, &shell.client.Parameters) + defer request.Free() + + response, err := shell.client.sendRequest(request) + if err == nil { + var commandId string + if commandId, err = ParseExecuteCommandResponse(response); err == nil { + cmd = newCommand(shell, commandId) + } + } + return +} + +// Close will terminate this shell. No commands can be issued once the shell is closed. +func (shell *Shell) Close() (err error) { + request := NewDeleteShellRequest(shell.client.url, shell.ShellId, &shell.client.Parameters) + defer request.Free() + + _, err = shell.client.sendRequest(request) + return +} diff --git a/vendor/github.com/masterzen/winrm/winrm/shell_test.go b/vendor/github.com/masterzen/winrm/winrm/shell_test.go new file mode 100644 index 000000000..cbe85cc96 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/winrm/shell_test.go @@ -0,0 +1,40 @@ +package winrm + +import ( + "github.com/masterzen/winrm/soap" + . "gopkg.in/check.v1" +) + +func (s *WinRMSuite) TestShellExecuteResponse(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + c.Assert(err, IsNil) + + shell := &Shell{client: client, ShellId: "67A74734-DD32-4F10-89DE-49A060483810"} + first := true + client.http = func(client *Client, message *soap.SoapMessage) (string, error) { + if first { + c.Assert(message.String(), Contains, "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command") + first = false + return executeCommandResponse, nil + } else { + c.Assert(message.String(), Contains, "http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive") + return outputResponse, nil + } + } + + command, _ := shell.Execute("ipconfig /all") + c.Assert(command.commandId, Equals, "1A6DEE6B-EC68-4DD6-87E9-030C0048ECC4") +} + +func (s *WinRMSuite) TestShellCloseResponse(c *C) { + client, err := NewClient(&Endpoint{Host: "localhost", Port: 5985}, "Administrator", "v3r1S3cre7") + c.Assert(err, IsNil) + + shell := &Shell{client: client, ShellId: "67A74734-DD32-4F10-89DE-49A060483810"} + client.http = func(client *Client, message *soap.SoapMessage) (string, error) { + c.Assert(message.String(), Contains, "http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete") + return "", nil + } + + shell.Close() +} diff --git a/vendor/github.com/masterzen/xmlpath/LICENSE b/vendor/github.com/masterzen/xmlpath/LICENSE new file mode 100644 index 000000000..53320c352 --- /dev/null +++ b/vendor/github.com/masterzen/xmlpath/LICENSE @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/masterzen/xmlpath/all_test.go b/vendor/github.com/masterzen/xmlpath/all_test.go new file mode 100644 index 000000000..a487d8a1a --- /dev/null +++ b/vendor/github.com/masterzen/xmlpath/all_test.go @@ -0,0 +1,536 @@ +package xmlpath_test + +import ( + "bytes" + "encoding/xml" + . "launchpad.net/gocheck" + "launchpad.net/xmlpath" + "testing" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&BasicSuite{}) + +type BasicSuite struct{} + +var trivialXml = []byte(`abcdefg`) + +func (s *BasicSuite) TestRootText(c *C) { + node, err := xmlpath.Parse(bytes.NewBuffer(trivialXml)) + c.Assert(err, IsNil) + path := xmlpath.MustCompile("/") + result, ok := path.String(node) + c.Assert(ok, Equals, true) + c.Assert(result, Equals, "abcdefg") +} + +var trivialHtml = []byte(`<a>`) + +func (s *BasicSuite) TestHTML(c *C) { + node, err := xmlpath.ParseHTML(bytes.NewBuffer(trivialHtml)) + c.Assert(err, IsNil) + path := xmlpath.MustCompile("/root/foo") + result, ok := path.String(node) + c.Assert(ok, Equals, true) + c.Assert(result, Equals, "") +} + +func (s *BasicSuite) TestLibraryTable(c *C) { + node, err := xmlpath.Parse(bytes.NewBuffer(libraryXml)) + c.Assert(err, IsNil) + for _, test := range libraryTable { + cmt := Commentf("xml path: %s", test.path) + path, err := xmlpath.Compile(test.path) + if want, ok := test.result.(cerror); ok { + c.Assert(err, ErrorMatches, string(want), cmt) + c.Assert(path, IsNil, cmt) + continue + } + c.Assert(err, IsNil) + switch want := test.result.(type) { + case string: + got, ok := path.String(node) + c.Assert(ok, Equals, true, cmt) + c.Assert(got, Equals, want, cmt) + c.Assert(path.Exists(node), Equals, true, cmt) + iter := path.Iter(node) + iter.Next() + node := iter.Node() + c.Assert(node.String(), Equals, want, cmt) + c.Assert(string(node.Bytes()), Equals, want, cmt) + case []string: + var alls []string + var allb []string + iter := path.Iter(node) + for iter.Next() { + alls = append(alls, iter.Node().String()) + allb = append(allb, string(iter.Node().Bytes())) + } + c.Assert(alls, DeepEquals, want, cmt) + c.Assert(allb, DeepEquals, want, cmt) + s, sok := path.String(node) + b, bok := path.Bytes(node) + if len(want) == 0 { + c.Assert(sok, Equals, false, cmt) + c.Assert(bok, Equals, false, cmt) + c.Assert(s, Equals, "") + c.Assert(b, IsNil) + } else { + c.Assert(sok, Equals, true, cmt) + c.Assert(bok, Equals, true, cmt) + c.Assert(s, Equals, alls[0], cmt) + c.Assert(string(b), Equals, alls[0], cmt) + c.Assert(path.Exists(node), Equals, true, cmt) + } + case exists: + wantb := bool(want) + ok := path.Exists(node) + c.Assert(ok, Equals, wantb, cmt) + _, ok = path.String(node) + c.Assert(ok, Equals, wantb, cmt) + } + } +} + +type cerror string +type exists bool + +var libraryTable = []struct{ path string; result interface{} }{ + // These are the examples in the package documentation: + {"/library/book/isbn", "0836217462"}, + {"library/*/isbn", "0836217462"}, + {"/library/book/../book/./isbn", "0836217462"}, + {"/library/book/character[2]/name", "Snoopy"}, + {"/library/book/character[born='1950-10-04']/name", "Snoopy"}, + {"/library/book//node()[@id='PP']/name", "Peppermint Patty"}, + {"//book[author/@id='CMS']/title", "Being a Dog Is a Full-Time Job"}, + {"/library/book/preceding::comment()", " Great book. "}, + + // A few simple + {"/library/book/isbn", exists(true)}, + {"/library/isbn", exists(false)}, + {"/library/book/isbn/bad", exists(false)}, + {"/library/book/bad", exists(false)}, + {"/library/bad/isbn", exists(false)}, + {"/bad/book/isbn", exists(false)}, + + // Simple paths. + {"/library/book/isbn", "0836217462"}, + {"/library/book/author/name", "Charles M Schulz"}, + {"/library/book/author/born", "1922-11-26"}, + {"/library/book/character/name", "Peppermint Patty"}, + {"/library/book/character/qualification", "bold, brash and tomboyish"}, + + // Unrooted path with root node as context. + {"library/book/isbn", "0836217462"}, + + // Multiple entries from simple paths. + {"/library/book/isbn", []string{"0836217462", "0883556316"}}, + {"/library/book/character/name", []string{"Peppermint Patty", "Snoopy", "Schroeder", "Lucy", "Barney Google", "Spark Plug", "Snuffy Smith"}}, + + // Handling of wildcards. + {"/library/book/author/*", []string{"Charles M Schulz", "1922-11-26", "2000-02-12", "Charles M Schulz", "1922-11-26", "2000-02-12"}}, + + // Unsupported axis and note test. + {"/foo()", cerror(`compiling xml path "/foo\(\)":5: unsupported expression: foo\(\)`)}, + {"/foo::node()", cerror(`compiling xml path "/foo::node\(\)":6: unsupported axis: "foo"`)}, + + // The attribute axis. + {"/library/book/title/attribute::lang", "en"}, + {"/library/book/title/@lang", "en"}, + {"/library/book/@available/parent::node()/@id", "b0836217462"}, + {"/library/book/attribute::*", []string{"b0836217462", "true", "b0883556316", "true"}}, + {"/library/book/attribute::text()", cerror(`.*: text\(\) cannot succeed on axis "attribute"`)}, + + // The self axis. + {"/library/book/isbn/./self::node()", "0836217462"}, + + // The descendant axis. + {"/library/book/isbn/descendant::isbn", exists(false)}, + {"/library/descendant::isbn", []string{"0836217462", "0883556316"}}, + {"/descendant::*/isbn", []string{"0836217462", "0883556316"}}, + {"/descendant::isbn", []string{"0836217462", "0883556316"}}, + + // The descendant-or-self axis. + {"/library/book/isbn/descendant-or-self::isbn", "0836217462"}, + {"/library//isbn", []string{"0836217462", "0883556316"}}, + {"//isbn", []string{"0836217462", "0883556316"}}, + {"/descendant-or-self::node()/child::book/child::*", "0836217462"}, + + // The parent axis. + {"/library/book/isbn/../isbn/parent::node()//title", "Being a Dog Is a Full-Time Job"}, + + // The ancestor axis. + {"/library/book/isbn/ancestor::book/title", "Being a Dog Is a Full-Time Job"}, + {"/library/book/ancestor::book/title", exists(false)}, + + // The ancestor-or-self axis. + {"/library/book/isbn/ancestor-or-self::book/title", "Being a Dog Is a Full-Time Job"}, + {"/library/book/ancestor-or-self::book/title", "Being a Dog Is a Full-Time Job"}, + + // The following axis. + // The first author name must not be included, as it's within the context + // node (author) rather than following it. These queries exercise de-duping + // of nodes, since the following axis runs to the end multiple times. + {"/library/book/author/following::name", []string{"Peppermint Patty", "Snoopy", "Schroeder", "Lucy", "Charles M Schulz", "Barney Google", "Spark Plug", "Snuffy Smith"}}, + {"//following::book/author/name", []string{"Charles M Schulz", "Charles M Schulz"}}, + + // The following-sibling axis. + {"/library/book/quote/following-sibling::node()/name", []string{"Charles M Schulz", "Peppermint Patty", "Snoopy", "Schroeder", "Lucy"}}, + + // The preceding axis. + {"/library/book/author/born/preceding::name", []string{"Charles M Schulz", "Charles M Schulz", "Lucy", "Schroeder", "Snoopy", "Peppermint Patty"}}, + {"/library/book/author/born/preceding::author/name", []string{"Charles M Schulz"}}, + {"/library/book/author/born/preceding::library", exists(false)}, + + // The preceding-sibling axis. + {"/library/book/author/born/preceding-sibling::name", []string{"Charles M Schulz", "Charles M Schulz"}}, + {"/library/book/author/born/preceding::author/name", []string{"Charles M Schulz"}}, + + // Comments. + {"/library/comment()", []string{" Great book. ", " Another great book. "}}, + {"//self::comment()", []string{" Great book. ", " Another great book. "}}, + {`comment("")`, cerror(`.*: comment\(\) has no arguments`)}, + + + // Processing instructions. + {`/library/book/author/processing-instruction()`, `"go rocks"`}, + {`/library/book/author/processing-instruction("echo")`, `"go rocks"`}, + {`/library//processing-instruction("echo")`, `"go rocks"`}, + {`/library/book/author/processing-instruction("foo")`, exists(false)}, + {`/library/book/author/processing-instruction(")`, cerror(`.*: missing '"'`)}, + + // Predicates. + {"library/book[@id='b0883556316']/isbn", []string{"0883556316"}}, + {"library/book[isbn='0836217462']/character[born='1950-10-04']/name", []string{"Snoopy"}}, + {"library/book[quote]/@id", []string{"b0836217462"}}, + {"library/book[./character/born='1922-07-17']/@id", []string{"b0883556316"}}, + {"library/book[2]/isbn", []string{"0883556316"}}, + {"library/book[0]/isbn", cerror(".*: positions start at 1")}, + {"library/book[-1]/isbn", cerror(".*: positions must be positive")}, + + // Bogus expressions. + {"/foo)", cerror(`compiling xml path "/foo\)":4: unexpected '\)'`)}, +} + +var libraryXml = []byte( +` + + + + 0836217462 + Being a Dog Is a Full-Time Job + I'd dog paddle the deepest ocean. + + + Charles M Schulz + 1922-11-26 + 2000-02-12 + + + Peppermint Patty + 1966-08-22 + bold, brash and tomboyish + + + Snoopy + 1950-10-04 + extroverted beagle + + + Schroeder + 1951-05-30 + brought classical music to the Peanuts strip + + + Lucy + 1952-03-03 + bossy, crabby and selfish + + + + + 0883556316 + Barney Google and Snuffy Smith + + Charles M Schulz + 1922-11-26 + 2000-02-12 + + + Barney Google + 1919-01-01 + goggle-eyed, moustached, gloved and top-hatted, bulbous-nosed, cigar-chomping shrimp + + + Spark Plug + 1922-07-17 + brown-eyed, bow-legged nag, seldom races, patched blanket + + + Snuffy Smith + 1934-01-01 + volatile and diminutive moonshiner, ornery little cuss, sawed-off and shiftless + + + +`) + +func (s *BasicSuite) TestNamespace(c *C) { + node, err := xmlpath.Parse(bytes.NewBuffer(namespaceXml)) + c.Assert(err, IsNil) + for _, test := range namespaceTable { + cmt := Commentf("xml path: %s", test.path) + path, err := xmlpath.CompileWithNamespace(test.path, namespaces) + if want, ok := test.result.(cerror); ok { + c.Assert(err, ErrorMatches, string(want), cmt) + c.Assert(path, IsNil, cmt) + continue + } + c.Assert(err, IsNil) + switch want := test.result.(type) { + case string: + got, ok := path.String(node) + c.Assert(ok, Equals, true, cmt) + c.Assert(got, Equals, want, cmt) + c.Assert(path.Exists(node), Equals, true, cmt) + iter := path.Iter(node) + iter.Next() + node := iter.Node() + c.Assert(node.String(), Equals, want, cmt) + c.Assert(string(node.Bytes()), Equals, want, cmt) + case []string: + var alls []string + var allb []string + iter := path.Iter(node) + for iter.Next() { + alls = append(alls, iter.Node().String()) + allb = append(allb, string(iter.Node().Bytes())) + } + c.Assert(alls, DeepEquals, want, cmt) + c.Assert(allb, DeepEquals, want, cmt) + s, sok := path.String(node) + b, bok := path.Bytes(node) + if len(want) == 0 { + c.Assert(sok, Equals, false, cmt) + c.Assert(bok, Equals, false, cmt) + c.Assert(s, Equals, "") + c.Assert(b, IsNil) + } else { + c.Assert(sok, Equals, true, cmt) + c.Assert(bok, Equals, true, cmt) + c.Assert(s, Equals, alls[0], cmt) + c.Assert(string(b), Equals, alls[0], cmt) + c.Assert(path.Exists(node), Equals, true, cmt) + } + case exists: + wantb := bool(want) + ok := path.Exists(node) + c.Assert(ok, Equals, wantb, cmt) + _, ok = path.String(node) + c.Assert(ok, Equals, wantb, cmt) + } + } +} + +var namespaceXml = []byte(`http://schemas.microsoft.com/wbem/wsman/1/windows/shell/ReceiveResponseuuid:AAD46BD4-6315-4C3C-93D4-94A55773287Dhttp://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymousuuid:18A52A06-9027-41DC-8850-3F244595AF62VGhhdCdzIGFsbCBmb2xrcyEhIQ==VGhpcyBpcyBzdGRlcnIsIEknbSBwcmV0dHkgc3VyZSE=`) + +var namespaces = []xmlpath.Namespace { + { "a", "http://schemas.xmlsoap.org/ws/2004/08/addressing" }, + { "rsp", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell" }, +} + +var namespaceTable = []struct{ path string; result interface{} }{ + { "//a:To", "http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous" }, + { "//rsp:Stream[@Name='stdout']", "VGhhdCdzIGFsbCBmb2xrcyEhIQ==" }, + { "//rsp:CommandState/@CommandId", "1A6DEE6B-EC68-4DD6-87E9-030C0048ECC4" }, + { "//*[@State='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done']", exists(false) }, + { "//rsp:Stream", []string{ "VGhhdCdzIGFsbCBmb2xrcyEhIQ==", "VGhpcyBpcyBzdGRlcnIsIEknbSBwcmV0dHkgc3VyZSE=" }}, + { "//s:Header", cerror(`.*: unknown namespace prefix: s`) }, +} + +func (s *BasicSuite) BenchmarkParse(c *C) { + for i := 0; i < c.N; i++ { + _, err := xmlpath.Parse(bytes.NewBuffer(instancesXml)) + c.Assert(err, IsNil) + } +} + +func (s *BasicSuite) BenchmarkSimplePathCompile(c *C) { + var err error + c.ResetTimer() + for i := 0; i < c.N; i++ { + _, err = xmlpath.Compile("/DescribeInstancesResponse/reservationSet/item/groupSet/item/groupId") + } + c.StopTimer() + c.Assert(err, IsNil) +} + +func (s *BasicSuite) BenchmarkSimplePathString(c *C) { + node, err := xmlpath.Parse(bytes.NewBuffer(instancesXml)) + c.Assert(err, IsNil) + path := xmlpath.MustCompile("/DescribeInstancesResponse/reservationSet/item/instancesSet/item/instanceType") + var str string + c.ResetTimer() + for i := 0; i < c.N; i++ { + str, _ = path.String(node) + } + c.StopTimer() + c.Assert(str, Equals, "m1.small") +} + +func (s *BasicSuite) BenchmarkSimplePathStringUnmarshal(c *C) { + // For a vague comparison. + var result struct{ Str string `xml:"reservationSet>item>instancesSet>item>instanceType"` } + for i := 0; i < c.N; i++ { + xml.Unmarshal(instancesXml, &result) + } + c.StopTimer() + c.Assert(result.Str, Equals, "m1.large") +} + +func (s *BasicSuite) BenchmarkSimplePathExists(c *C) { + node, err := xmlpath.Parse(bytes.NewBuffer(instancesXml)) + c.Assert(err, IsNil) + path := xmlpath.MustCompile("/DescribeInstancesResponse/reservationSet/item/instancesSet/item/instanceType") + var exists bool + c.ResetTimer() + for i := 0; i < c.N; i++ { + exists = path.Exists(node) + } + c.StopTimer() + c.Assert(exists, Equals, true) +} + + + +var instancesXml = []byte( +` + 98e3c9a4-848c-4d6d-8e8a-b1bdEXAMPLE + + + r-b27e30d9 + 999988887777 + + + sg-67ad940e + default + + + + + i-c5cd56af + ami-1a2b3c4d + + 16 + running + + domU-12-31-39-10-56-34.compute-1.internal + ec2-174-129-165-232.compute-1.amazonaws.com + + GSG_Keypair + 0 + + m1.small + 2010-08-17T01:15:18.000Z + + us-east-1b + + + aki-94c527fd + ari-96c527ff + + disabled + + 10.198.85.190 + 174.129.165.232 + i386 + ebs + /dev/sda1 + + + /dev/sda1 + + vol-a082c1c9 + attached + 2010-08-17T01:15:21.000Z + false + + + + spot + sir-7a688402 + paravirtual + + + xen + + + 854251627541 + + + r-b67e30dd + 999988887777 + + + sg-67ad940e + default + + + + + i-d9cd56b3 + ami-1a2b3c4d + + 16 + running + + domU-12-31-39-10-54-E5.compute-1.internal + ec2-184-73-58-78.compute-1.amazonaws.com + + GSG_Keypair + 0 + + m1.large + 2010-08-17T01:15:19.000Z + + us-east-1b + + + aki-94c527fd + ari-96c527ff + + disabled + + 10.198.87.19 + 184.73.58.78 + i386 + ebs + /dev/sda1 + + + /dev/sda1 + + vol-a282c1cb + attached + 2010-08-17T01:15:23.000Z + false + + + + spot + sir-55a3aa02 + paravirtual + + + xen + + + 854251627541 + + + +`) diff --git a/vendor/github.com/masterzen/xmlpath/doc.go b/vendor/github.com/masterzen/xmlpath/doc.go new file mode 100644 index 000000000..d1acf6c07 --- /dev/null +++ b/vendor/github.com/masterzen/xmlpath/doc.go @@ -0,0 +1,95 @@ +// Package xmlpath implements a strict subset of the XPath specification for the Go language. +// +// The XPath specification is available at: +// +// http://www.w3.org/TR/xpath +// +// Path expressions supported by this package are in the following format, +// with all components being optional: +// +// /axis-name::node-test[predicate]/axis-name::node-test[predicate] +// +// At the moment, xmlpath is compatible with the XPath specification +// to the following extent: +// +// - All axes are supported ("child", "following-sibling", etc) +// - All abbreviated forms are supported (".", "//", etc) +// - All node types except for namespace are supported +// - Predicates are restricted to [N], [path], and [path=literal] forms +// - Only a single predicate is supported per path step +// - Namespaces are experimentally supported +// - Richer expressions +// +// For example, assuming the following document: +// +// +// +// +// 0836217462 +// Being a Dog Is a Full-Time Job +// I'd dog paddle the deepest ocean. +// +// +// Charles M Schulz +// 1922-11-26 +// 2000-02-12 +// +// +// Peppermint Patty +// 1966-08-22 +// bold, brash and tomboyish +// +// +// Snoopy +// 1950-10-04 +// extroverted beagle +// +// +// +// +// The following examples are valid path expressions, and the first +// match has the indicated value: +// +// /library/book/isbn => "0836217462" +// library/*/isbn => "0836217462" +// /library/book/../book/./isbn => "0836217462" +// /library/book/character[2]/name => "Snoopy" +// /library/book/character[born='1950-10-04']/name => "Snoopy" +// /library/book//node()[@id='PP']/name => "Peppermint Patty" +// //book[author/@id='CMS']/title => "Being a Dog Is a Full-Time Job"}, +// /library/book/preceding::comment() => " Great book. " +// +// To run an expression, compile it, and then apply the compiled path to any +// number of context nodes, from one or more parsed xml documents: +// +// path := xmlpath.MustCompile("/library/book/isbn") +// root, err := xmlpath.Parse(file) +// if err != nil { +// log.Fatal(err) +// } +// if value, ok := path.String(root); ok { +// fmt.Println("Found:", value) +// } +// +// To use xmlpath with namespaces, it is required to give the supported set of namespace +// when compiling: +// +// +// var namespaces = []xmlpath.Namespace { +// { "s", "http://www.w3.org/2003/05/soap-envelope" }, +// { "a", "http://schemas.xmlsoap.org/ws/2004/08/addressing" }, +// } +// path, err := xmlpath.CompileWithNamespace("/s:Header/a:To", namespaces) +// if err != nil { +// log.Fatal(err) +// } +// root, err := xmlpath.Parse(file) +// if err != nil { +// log.Fatal(err) +// } +// if value, ok := path.String(root); ok { +// fmt.Println("Found:", value) +// } +// + +package xmlpath diff --git a/vendor/github.com/masterzen/xmlpath/parser.go b/vendor/github.com/masterzen/xmlpath/parser.go new file mode 100644 index 000000000..88c1523a3 --- /dev/null +++ b/vendor/github.com/masterzen/xmlpath/parser.go @@ -0,0 +1,233 @@ +package xmlpath + +import ( + "encoding/xml" + "io" +) + +// Node is an item in an xml tree that was compiled to +// be processed via xml paths. A node may represent: +// +// - An element in the xml document () +// - An attribute of an element in the xml document (href="...") +// - A comment in the xml document () +// - A processing instruction in the xml document () +// - Some text within the xml document +// +type Node struct { + kind nodeKind + name xml.Name + attr string + text []byte + + nodes []Node + pos int + end int + + up *Node + down []*Node +} + +type nodeKind int + +const ( + anyNode nodeKind = iota + startNode + endNode + attrNode + textNode + commentNode + procInstNode +) + +// String returns the string value of node. +// +// The string value of a node is: +// +// - For element nodes, the concatenation of all text nodes within the element. +// - For text nodes, the text itself. +// - For attribute nodes, the attribute value. +// - For comment nodes, the text within the comment delimiters. +// - For processing instruction nodes, the content of the instruction. +// +func (node *Node) String() string { + if node.kind == attrNode { + return node.attr + } + return string(node.Bytes()) +} + +// Bytes returns the string value of node as a byte slice. +// See Node.String for a description of what the string value of a node is. +func (node *Node) Bytes() []byte { + if node.kind == attrNode { + return []byte(node.attr) + } + if node.kind != startNode { + return node.text + } + var text []byte + for i := node.pos; i < node.end; i++ { + if node.nodes[i].kind == textNode { + text = append(text, node.nodes[i].text...) + } + } + return text +} + +// equals returns whether the string value of node is equal to s, +// without allocating memory. +func (node *Node) equals(s string) bool { + if node.kind == attrNode { + return s == node.attr + } + if node.kind != startNode { + if len(s) != len(node.text) { + return false + } + for i := range s { + if s[i] != node.text[i] { + return false + } + } + return true + } + si := 0 + for i := node.pos; i < node.end; i++ { + if node.nodes[i].kind == textNode { + for _, c := range node.nodes[i].text { + if si > len(s) { + return false + } + if s[si] != c { + return false + } + si++ + } + } + } + return si == len(s) +} + +// Parse reads an xml document from r, parses it, and returns its root node. +func Parse(r io.Reader) (*Node, error) { + return ParseDecoder(xml.NewDecoder(r)) +} + +// ParseHTML reads an HTML-like document from r, parses it, and returns +// its root node. +func ParseHTML(r io.Reader) (*Node, error) { + d := xml.NewDecoder(r) + d.Strict = false + d.AutoClose = xml.HTMLAutoClose + d.Entity = xml.HTMLEntity + return ParseDecoder(d) +} + +// ParseDecoder parses the xml document being decoded by d and returns +// its root node. +func ParseDecoder(d *xml.Decoder) (*Node, error) { + var nodes []Node + var text []byte + + // The root node. + nodes = append(nodes, Node{kind: startNode}) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + switch t := t.(type) { + case xml.EndElement: + nodes = append(nodes, Node{ + kind: endNode, + }) + case xml.StartElement: + nodes = append(nodes, Node{ + kind: startNode, + name: t.Name, + }) + for _, attr := range t.Attr { + nodes = append(nodes, Node{ + kind: attrNode, + name: attr.Name, + attr: attr.Value, + }) + } + case xml.CharData: + texti := len(text) + text = append(text, t...) + nodes = append(nodes, Node{ + kind: textNode, + text: text[texti : texti+len(t)], + }) + case xml.Comment: + texti := len(text) + text = append(text, t...) + nodes = append(nodes, Node{ + kind: commentNode, + text: text[texti : texti+len(t)], + }) + case xml.ProcInst: + texti := len(text) + text = append(text, t.Inst...) + nodes = append(nodes, Node{ + kind: procInstNode, + name: xml.Name{Local: t.Target}, + text: text[texti : texti+len(t.Inst)], + }) + } + } + + // Close the root node. + nodes = append(nodes, Node{kind: endNode}) + + stack := make([]*Node, 0, len(nodes)) + downs := make([]*Node, len(nodes)) + downCount := 0 + + for pos := range nodes { + + switch nodes[pos].kind { + + case startNode, attrNode, textNode, commentNode, procInstNode: + node := &nodes[pos] + node.nodes = nodes + node.pos = pos + if len(stack) > 0 { + node.up = stack[len(stack)-1] + } + if node.kind == startNode { + stack = append(stack, node) + } else { + node.end = pos + 1 + } + + case endNode: + node := stack[len(stack)-1] + node.end = pos + stack = stack[:len(stack)-1] + + // Compute downs. Doing that here is what enables the + // use of a slice of a contiguous pre-allocated block. + node.down = downs[downCount:downCount] + for i := node.pos + 1; i < node.end; i++ { + if nodes[i].up == node { + switch nodes[i].kind { + case startNode, textNode, commentNode, procInstNode: + node.down = append(node.down, &nodes[i]) + downCount++ + } + } + } + if len(stack) == 0 { + return node, nil + } + } + } + return nil, io.EOF +} diff --git a/vendor/github.com/masterzen/xmlpath/path.go b/vendor/github.com/masterzen/xmlpath/path.go new file mode 100644 index 000000000..f35af70ae --- /dev/null +++ b/vendor/github.com/masterzen/xmlpath/path.go @@ -0,0 +1,642 @@ +package xmlpath + +import ( + "fmt" + "strconv" + "unicode/utf8" +) + +// Namespace represents a given XML Namespace +type Namespace struct { + Prefix string + Uri string +} + +// Path is a compiled path that can be applied to a context +// node to obtain a matching node set. +// A single Path can be applied concurrently to any number +// of context nodes. +type Path struct { + path string + steps []pathStep +} + +// Iter returns an iterator that goes over the list of nodes +// that p matches on the given context. +func (p *Path) Iter(context *Node) *Iter { + iter := Iter{ + make([]pathStepState, len(p.steps)), + make([]bool, len(context.nodes)), + } + for i := range p.steps { + iter.state[i].step = &p.steps[i] + } + iter.state[0].init(context) + return &iter +} + +// Exists returns whether any nodes match p on the given context. +func (p *Path) Exists(context *Node) bool { + return p.Iter(context).Next() +} + +// String returns the string value of the first node matched +// by p on the given context. +// +// See the documentation of Node.String. +func (p *Path) String(context *Node) (s string, ok bool) { + iter := p.Iter(context) + if iter.Next() { + return iter.Node().String(), true + } + return "", false +} + +// Bytes returns as a byte slice the string value of the first +// node matched by p on the given context. +// +// See the documentation of Node.String. +func (p *Path) Bytes(node *Node) (b []byte, ok bool) { + iter := p.Iter(node) + if iter.Next() { + return iter.Node().Bytes(), true + } + return nil, false +} + +// Iter iterates over node sets. +type Iter struct { + state []pathStepState + seen []bool +} + +// Node returns the current node. +// Must only be called after Iter.Next returns true. +func (iter *Iter) Node() *Node { + state := iter.state[len(iter.state)-1] + if state.pos == 0 { + panic("Iter.Node called before Iter.Next") + } + if state.node == nil { + panic("Iter.Node called after Iter.Next false") + } + return state.node +} + +// Next iterates to the next node in the set, if any, and +// returns whether there is a node available. +func (iter *Iter) Next() bool { + tip := len(iter.state) - 1 +outer: + for { + for !iter.state[tip].next() { + tip-- + if tip == -1 { + return false + } + } + for tip < len(iter.state)-1 { + tip++ + iter.state[tip].init(iter.state[tip-1].node) + if !iter.state[tip].next() { + tip-- + continue outer + } + } + if iter.seen[iter.state[tip].node.pos] { + continue + } + iter.seen[iter.state[tip].node.pos] = true + return true + } + panic("unreachable") +} + +type pathStepState struct { + step *pathStep + node *Node + pos int + idx int + aux int +} + +func (s *pathStepState) init(node *Node) { + s.node = node + s.pos = 0 + s.idx = 0 + s.aux = 0 +} + +func (s *pathStepState) next() bool { + for s._next() { + s.pos++ + if s.step.pred == nil { + return true + } + if s.step.pred.bval { + if s.step.pred.path.Exists(s.node) { + return true + } + } else if s.step.pred.path != nil { + iter := s.step.pred.path.Iter(s.node) + for iter.Next() { + if iter.Node().equals(s.step.pred.sval) { + return true + } + } + } else { + if s.step.pred.ival == s.pos { + return true + } + } + } + return false +} + +func (s *pathStepState) _next() bool { + if s.node == nil { + return false + } + if s.step.root && s.idx == 0 { + for s.node.up != nil { + s.node = s.node.up + } + } + + switch s.step.axis { + + case "self": + if s.idx == 0 && s.step.match(s.node) { + s.idx++ + return true + } + + case "parent": + if s.idx == 0 && s.node.up != nil && s.step.match(s.node.up) { + s.idx++ + s.node = s.node.up + return true + } + + case "ancestor", "ancestor-or-self": + if s.idx == 0 && s.step.axis == "ancestor-or-self" { + s.idx++ + if s.step.match(s.node) { + return true + } + } + for s.node.up != nil { + s.node = s.node.up + s.idx++ + if s.step.match(s.node) { + return true + } + } + + case "child": + var down []*Node + if s.idx == 0 { + down = s.node.down + } else { + down = s.node.up.down + } + for s.idx < len(down) { + node := down[s.idx] + s.idx++ + if s.step.match(node) { + s.node = node + return true + } + } + + case "descendant", "descendant-or-self": + if s.idx == 0 { + s.idx = s.node.pos + s.aux = s.node.end + if s.step.axis == "descendant" { + s.idx++ + } + } + for s.idx < s.aux { + node := &s.node.nodes[s.idx] + s.idx++ + if node.kind == attrNode { + continue + } + if s.step.match(node) { + s.node = node + return true + } + } + + case "following": + if s.idx == 0 { + s.idx = s.node.end + } + for s.idx < len(s.node.nodes) { + node := &s.node.nodes[s.idx] + s.idx++ + if node.kind == attrNode { + continue + } + if s.step.match(node) { + s.node = node + return true + } + } + + case "following-sibling": + var down []*Node + if s.node.up != nil { + down = s.node.up.down + if s.idx == 0 { + for s.idx < len(down) { + node := down[s.idx] + s.idx++ + if node == s.node { + break + } + } + } + } + for s.idx < len(down) { + node := down[s.idx] + s.idx++ + if s.step.match(node) { + s.node = node + return true + } + } + + case "preceding": + if s.idx == 0 { + s.aux = s.node.pos // Detect ancestors. + s.idx = s.node.pos - 1 + } + for s.idx >= 0 { + node := &s.node.nodes[s.idx] + s.idx-- + if node.kind == attrNode { + continue + } + if node == s.node.nodes[s.aux].up { + s.aux = s.node.nodes[s.aux].up.pos + continue + } + if s.step.match(node) { + s.node = node + return true + } + } + + case "preceding-sibling": + var down []*Node + if s.node.up != nil { + down = s.node.up.down + if s.aux == 0 { + s.aux = 1 + for s.idx < len(down) { + node := down[s.idx] + s.idx++ + if node == s.node { + s.idx-- + break + } + } + } + } + for s.idx >= 0 { + node := down[s.idx] + s.idx-- + if s.step.match(node) { + s.node = node + return true + } + } + + case "attribute": + if s.idx == 0 { + s.idx = s.node.pos + 1 + s.aux = s.node.end + } + for s.idx < s.aux { + node := &s.node.nodes[s.idx] + s.idx++ + if node.kind != attrNode { + break + } + if s.step.match(node) { + s.node = node + return true + } + } + + } + + s.node = nil + return false +} + +type pathPredicate struct { + path *Path + sval string + ival int + bval bool +} + +type pathStep struct { + root bool + axis string + name string + prefix string + uri string + kind nodeKind + pred *pathPredicate +} + +func (step *pathStep) match(node *Node) bool { + return node.kind != endNode && + (step.kind == anyNode || step.kind == node.kind) && + (step.name == "*" || (node.name.Local == step.name && (node.name.Space != "" && node.name.Space == step.uri || node.name.Space == ""))) +} + +// MustCompile returns the compiled path, and panics if +// there are any errors. +func MustCompile(path string) *Path { + e, err := Compile(path) + if err != nil { + panic(err) + } + return e +} + +// Compile returns the compiled path. +func Compile(path string) (*Path, error) { + c := pathCompiler{path, 0, []Namespace{} } + if path == "" { + return nil, c.errorf("empty path") + } + p, err := c.parsePath() + if err != nil { + return nil, err + } + return p, nil +} + +// Compile the path with the knowledge of the given namespaces +func CompileWithNamespace(path string, ns []Namespace) (*Path, error) { + c := pathCompiler{path, 0, ns} + if path == "" { + return nil, c.errorf("empty path") + } + p, err := c.parsePath() + if err != nil { + return nil, err + } + return p, nil +} + +type pathCompiler struct { + path string + i int + ns []Namespace +} + +func (c *pathCompiler) errorf(format string, args ...interface{}) error { + return fmt.Errorf("compiling xml path %q:%d: %s", c.path, c.i, fmt.Sprintf(format, args...)) +} + +func (c *pathCompiler) parsePath() (path *Path, err error) { + var steps []pathStep + var start = c.i + for { + step := pathStep{axis: "child"} + + if c.i == 0 && c.skipByte('/') { + step.root = true + if len(c.path) == 1 { + step.name = "*" + } + } + if c.peekByte('/') { + step.axis = "descendant-or-self" + step.name = "*" + } else if c.skipByte('@') { + mark := c.i + if !c.skipName() { + return nil, c.errorf("missing name after @") + } + step.axis = "attribute" + step.name = c.path[mark:c.i] + step.kind = attrNode + } else { + mark := c.i + if c.skipName() { + step.name = c.path[mark:c.i] + } + if step.name == "" { + return nil, c.errorf("missing name") + } else if step.name == "*" { + step.kind = startNode + } else if step.name == "." { + step.axis = "self" + step.name = "*" + } else if step.name == ".." { + step.axis = "parent" + step.name = "*" + } else { + if c.skipByte(':') { + if !c.skipByte(':') { + mark = c.i + if c.skipName() { + step.prefix = step.name + step.name = c.path[mark:c.i] + // check prefix + found := false + for _, ns := range c.ns { + if ns.Prefix == step.prefix { + step.uri = ns.Uri + found = true + break + } + } + if !found { + return nil, c.errorf("unknown namespace prefix: %s", step.prefix) + } + } else { + return nil, c.errorf("missing name after namespace prefix") + } + } else { + switch step.name { + case "attribute": + step.kind = attrNode + case "self", "child", "parent": + case "descendant", "descendant-or-self": + case "ancestor", "ancestor-or-self": + case "following", "following-sibling": + case "preceding", "preceding-sibling": + default: + return nil, c.errorf("unsupported axis: %q", step.name) + } + step.axis = step.name + + mark = c.i + if !c.skipName() { + return nil, c.errorf("missing name") + } + step.name = c.path[mark:c.i] + } + } + if c.skipByte('(') { + conflict := step.kind != anyNode + switch step.name { + case "node": + // must be anyNode + case "text": + step.kind = textNode + case "comment": + step.kind = commentNode + case "processing-instruction": + step.kind = procInstNode + default: + return nil, c.errorf("unsupported expression: %s()", step.name) + } + if conflict { + return nil, c.errorf("%s() cannot succeed on axis %q", step.name, step.axis) + } + + literal, err := c.parseLiteral() + if err == errNoLiteral { + step.name = "*" + } else if err != nil { + return nil, c.errorf("%v", err) + } else if step.kind == procInstNode { + step.name = literal + } else { + return nil, c.errorf("%s() has no arguments", step.name) + } + if !c.skipByte(')') { + return nil, c.errorf("missing )") + } + } else if step.name == "*" && step.kind == anyNode { + step.kind = startNode + } + } + } + if c.skipByte('[') { + step.pred = &pathPredicate{} + if ival, ok := c.parseInt(); ok { + if ival == 0 { + return nil, c.errorf("positions start at 1") + } + step.pred.ival = ival + } else { + path, err := c.parsePath() + if err != nil { + return nil, err + } + if path.path[0] == '-' { + if _, err = strconv.Atoi(path.path); err == nil { + return nil, c.errorf("positions must be positive") + } + } + step.pred.path = path + if c.skipByte('=') { + sval, err := c.parseLiteral() + if err != nil { + return nil, c.errorf("%v", err) + } + step.pred.sval = sval + } else { + step.pred.bval = true + } + } + if !c.skipByte(']') { + return nil, c.errorf("expected ']'") + } + } + steps = append(steps, step) + //fmt.Printf("step: %#v\n", step) + if !c.skipByte('/') { + if (start == 0 || start == c.i) && c.i < len(c.path) { + return nil, c.errorf("unexpected %q", c.path[c.i]) + } + return &Path{steps: steps, path: c.path[start:c.i]}, nil + } + } + panic("unreachable") +} + +var errNoLiteral = fmt.Errorf("expected a literal string") + +func (c *pathCompiler) parseLiteral() (string, error) { + if c.skipByte('"') { + mark := c.i + if !c.skipByteFind('"') { + return "", fmt.Errorf(`missing '"'`) + } + return c.path[mark:c.i-1], nil + } + if c.skipByte('\'') { + mark := c.i + if !c.skipByteFind('\'') { + return "", fmt.Errorf(`missing "'"`) + } + return c.path[mark:c.i-1], nil + } + return "", errNoLiteral +} + +func (c *pathCompiler) parseInt() (v int, ok bool) { + mark := c.i + for c.i < len(c.path) && c.path[c.i] >= '0' && c.path[c.i] <= '9' { + v *= 10 + v += int(c.path[c.i]) - '0' + c.i++ + } + if c.i == mark { + return 0, false + } + return v, true +} + +func (c *pathCompiler) skipByte(b byte) bool { + if c.i < len(c.path) && c.path[c.i] == b { + c.i++ + return true + } + return false +} + +func (c *pathCompiler) skipByteFind(b byte) bool { + for i := c.i; i < len(c.path); i++ { + if c.path[i] == b { + c.i = i+1 + return true + } + } + return false +} + +func (c *pathCompiler) peekByte(b byte) bool { + return c.i < len(c.path) && c.path[c.i] == b +} + +func (c *pathCompiler) skipName() bool { + if c.i >= len(c.path) { + return false + } + if c.path[c.i] == '*' { + c.i++ + return true + } + start := c.i + for c.i < len(c.path) && (c.path[c.i] >= utf8.RuneSelf || isNameByte(c.path[c.i])) { + c.i++ + } + return c.i > start +} + +func isNameByte(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c == '.' || c == '-' +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..74845de4a --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 000000000..83c588773 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..98ffe86a4 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 000000000..9d24bac1d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..1f0c6bf53 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..83c398b16 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml new file mode 100644 index 000000000..c2f4ab263 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/.travis.yml @@ -0,0 +1,14 @@ +sudo: false + +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - tip + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md new file mode 100644 index 000000000..287ecb246 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/README.md @@ -0,0 +1,61 @@ +# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) + +cli is a library for implementing powerful command-line interfaces in Go. +cli is the library that powers the CLI for +[Packer](https://github.com/mitchellh/packer), +[Serf](https://github.com/hashicorp/serf), and +[Consul](https://github.com/hashicorp/consul). + +## Features + +* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. + +* Support for nested subcommands such as `cli foo bar`. + +* Optional support for default subcommands so `cli` does something + other than error. + +* Automatic help generation for listing subcommands + +* Automatic help flag recognition of `-h`, `--help`, etc. + +* Automatic version flag recognition of `-v`, `--version`. + +* Helpers for interacting with the terminal, such as outputting information, + asking for input, etc. These are optional, you can always interact with the + terminal however you choose. + +* Use of Go interfaces/types makes augmenting various parts of the library a + piece of cake. + +## Example + +Below is a simple example of creating and running a CLI + +```go +package main + +import ( + "log" + "os" + + "github.com/mitchellh/cli" +) + +func main() { + c := cli.NewCLI("app", "1.0.0") + c.Args = os.Args[1:] + c.Commands = map[string]cli.CommandFactory{ + "foo": fooCommandFactory, + "bar": barCommandFactory, + } + + exitStatus, err := c.Run() + if err != nil { + log.Println(err) + } + + os.Exit(exitStatus) +} +``` + diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go new file mode 100644 index 000000000..e871e6136 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -0,0 +1,390 @@ +package cli + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "sync" + "text/template" + + "github.com/armon/go-radix" +) + +// CLI contains the state necessary to run subcommands and parse the +// command line arguments. +// +// CLI also supports nested subcommands, such as "cli foo bar". To use +// nested subcommands, the key in the Commands mapping below contains the +// full subcommand. In this example, it would be "foo bar". +// +// If you use a CLI with nested subcommands, some semantics change due to +// ambiguities: +// +// * We use longest prefix matching to find a matching subcommand. This +// means if you register "foo bar" and the user executes "cli foo qux", +// the "foo" commmand will be executed with the arg "qux". It is up to +// you to handle these args. One option is to just return the special +// help return code `RunResultHelp` to display help and exit. +// +// * The help flag "-h" or "-help" will look at all args to determine +// the help function. For example: "otto apps list -h" will show the +// help for "apps list" but "otto apps -h" will show it for "apps". +// In the normal CLI, only the first subcommand is used. +// +// * The help flag will list any subcommands that a command takes +// as well as the command's help itself. If there are no subcommands, +// it will note this. If the CLI itself has no subcommands, this entire +// section is omitted. +// +// * Any parent commands that don't exist are automatically created as +// no-op commands that just show help for other subcommands. For example, +// if you only register "foo bar", then "foo" is automatically created. +// +type CLI struct { + // Args is the list of command-line arguments received excluding + // the name of the app. For example, if the command "./cli foo bar" + // was invoked, then Args should be []string{"foo", "bar"}. + Args []string + + // Commands is a mapping of subcommand names to a factory function + // for creating that Command implementation. If there is a command + // with a blank string "", then it will be used as the default command + // if no subcommand is specified. + // + // If the key has a space in it, this will create a nested subcommand. + // For example, if the key is "foo bar", then to access it our CLI + // must be accessed with "./cli foo bar". See the docs for CLI for + // notes on how this changes some other behavior of the CLI as well. + Commands map[string]CommandFactory + + // Name defines the name of the CLI. + Name string + + // Version of the CLI. + Version string + + // HelpFunc and HelpWriter are used to output help information, if + // requested. + // + // HelpFunc is the function called to generate the generic help + // text that is shown if help must be shown for the CLI that doesn't + // pertain to a specific command. + // + // HelpWriter is the Writer where the help text is outputted to. If + // not specified, it will default to Stderr. + HelpFunc HelpFunc + HelpWriter io.Writer + + once sync.Once + commandTree *radix.Tree + commandNested bool + isHelp bool + subcommand string + subcommandArgs []string + topFlags []string + + isVersion bool +} + +// NewClI returns a new CLI instance with sensible defaults. +func NewCLI(app, version string) *CLI { + return &CLI{ + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + } + +} + +// IsHelp returns whether or not the help flag is present within the +// arguments. +func (c *CLI) IsHelp() bool { + c.once.Do(c.init) + return c.isHelp +} + +// IsVersion returns whether or not the version flag is present within the +// arguments. +func (c *CLI) IsVersion() bool { + c.once.Do(c.init) + return c.isVersion +} + +// Run runs the actual CLI based on the arguments given. +func (c *CLI) Run() (int, error) { + c.once.Do(c.init) + + // Just show the version and exit if instructed. + if c.IsVersion() && c.Version != "" { + c.HelpWriter.Write([]byte(c.Version + "\n")) + return 1, nil + } + + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.HelpWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + "\n")) + return 1, nil + } + + // Attempt to get the factory function for creating the command + // implementation. If the command is invalid or blank, it is an error. + raw, ok := c.commandTree.Get(c.Subcommand()) + if !ok { + c.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + "\n")) + return 1, nil + } + + command, err := raw.(CommandFactory)() + if err != nil { + return 0, err + } + + // If we've been instructed to just print the help, then print it + if c.IsHelp() { + c.commandHelp(command) + return 1, nil + } + + code := command.Run(c.SubcommandArgs()) + if code == RunResultHelp { + // Requesting help + c.commandHelp(command) + return 1, nil + } + + return code, nil +} + +// Subcommand returns the subcommand that the CLI would execute. For +// example, a CLI from "--version version --help" would return a Subcommand +// of "version" +func (c *CLI) Subcommand() string { + c.once.Do(c.init) + return c.subcommand +} + +// SubcommandArgs returns the arguments that will be passed to the +// subcommand. +func (c *CLI) SubcommandArgs() []string { + c.once.Do(c.init) + return c.subcommandArgs +} + +func (c *CLI) init() { + if c.HelpFunc == nil { + c.HelpFunc = BasicHelpFunc("app") + + if c.Name != "" { + c.HelpFunc = BasicHelpFunc(c.Name) + } + } + + if c.HelpWriter == nil { + c.HelpWriter = os.Stderr + } + + // Build our command tree + c.commandTree = radix.New() + c.commandNested = false + for k, v := range c.Commands { + k = strings.TrimSpace(k) + c.commandTree.Insert(k, v) + if strings.ContainsRune(k, ' ') { + c.commandNested = true + } + } + + // Go through the key and fill in any missing parent commands + if c.commandNested { + var walkFn radix.WalkFn + toInsert := make(map[string]struct{}) + walkFn = func(k string, raw interface{}) bool { + idx := strings.LastIndex(k, " ") + if idx == -1 { + // If there is no space, just ignore top level commands + return false + } + + // Trim up to that space so we can get the expected parent + k = k[:idx] + if _, ok := c.commandTree.Get(k); ok { + // Yay we have the parent! + return false + } + + // We're missing the parent, so let's insert this + toInsert[k] = struct{}{} + + // Call the walk function recursively so we check this one too + return walkFn(k, nil) + } + + // Walk! + c.commandTree.Walk(walkFn) + + // Insert any that we're missing + for k, _ := range toInsert { + var f CommandFactory = func() (Command, error) { + return &MockCommand{ + HelpText: "This command is accessed by using one of the subcommands below.", + RunResult: RunResultHelp, + }, nil + } + + c.commandTree.Insert(k, f) + } + } + + // Process the args + c.processArgs() +} + +func (c *CLI) commandHelp(command Command) { + // Get the template to use + tpl := strings.TrimSpace(defaultHelpTemplate) + if t, ok := command.(CommandHelpTemplate); ok { + tpl = t.HelpTemplate() + } + if !strings.HasSuffix(tpl, "\n") { + tpl += "\n" + } + + // Parse it + t, err := template.New("root").Parse(tpl) + if err != nil { + t = template.Must(template.New("root").Parse(fmt.Sprintf( + "Internal error! Failed to parse command help template: %s\n", err))) + } + + // Template data + data := map[string]interface{}{ + "Name": c.Name, + "Help": command.Help(), + } + + // Build subcommand list if we have it + var subcommands []map[string]interface{} + if c.commandNested { + // Get the matching keys + var keys []string + prefix := c.Subcommand() + " " + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + keys = append(keys, k) + return false + }) + + // Sort the keys + sort.Strings(keys) + + // Figure out the padding length + var longest int + for _, k := range keys { + if v := len(k); v > longest { + longest = v + } + } + + // Go through and create their structures + subcommands = make([]map[string]interface{}, len(keys)) + for i, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just checked that it should be here above. If it is + // isn't, there are serious problems. + panic("value is missing") + } + + // Get the command + sub, err := raw.(CommandFactory)() + if err != nil { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error instantiating %q: %s", k, err))) + } + + // Determine some info + name := strings.TrimPrefix(k, prefix) + + subcommands[i] = map[string]interface{}{ + "Name": name, + "NameAligned": name + strings.Repeat(" ", longest-len(k)), + "Help": sub.Help(), + "Synopsis": sub.Synopsis(), + } + } + } + data["Subcommands"] = subcommands + + // Write + err = t.Execute(c.HelpWriter, data) + if err == nil { + return + } + + // An error, just output... + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Internal error rendering help: %s", err))) +} + +func (c *CLI) processArgs() { + for i, arg := range c.Args { + if c.subcommand == "" { + // Check for version and help flags if not in a subcommand + if arg == "-v" || arg == "-version" || arg == "--version" { + c.isVersion = true + continue + } + if arg == "-h" || arg == "-help" || arg == "--help" { + c.isHelp = true + continue + } + + if arg != "" && arg[0] == '-' { + // Record the arg... + c.topFlags = append(c.topFlags, arg) + } + } + + // If we didn't find a subcommand yet and this is the first non-flag + // argument, then this is our subcommand. j + if c.subcommand == "" && arg != "" && arg[0] != '-' { + c.subcommand = arg + if c.commandNested { + // Nested CLI, the subcommand is actually the entire + // arg list up to a flag that is still a valid subcommand. + k, _, ok := c.commandTree.LongestPrefix(strings.Join(c.Args[i:], " ")) + if ok { + c.subcommand = k + i += strings.Count(k, " ") + } + } + + // The remaining args the subcommand arguments + c.subcommandArgs = c.Args[i+1:] + } + } + + // If we never found a subcommand and support a default command, then + // switch to using that. + if c.subcommand == "" { + if _, ok := c.Commands[""]; ok { + args := c.topFlags + args = append(args, c.subcommandArgs...) + c.topFlags = nil + c.subcommandArgs = args + } + } +} + +const defaultHelpTemplate = ` +{{.Help}}{{if gt (len .Subcommands) 0}} + +Subcommands: +{{ range $value := .Subcommands }} + {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} +{{ end }} +` diff --git a/vendor/github.com/mitchellh/cli/cli_test.go b/vendor/github.com/mitchellh/cli/cli_test.go new file mode 100644 index 000000000..8007fca17 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/cli_test.go @@ -0,0 +1,443 @@ +package cli + +import ( + "bytes" + "reflect" + "strings" + "testing" +) + +func TestCLIIsHelp(t *testing.T) { + testCases := []struct { + args []string + isHelp bool + }{ + {[]string{"-h"}, true}, + {[]string{"-help"}, true}, + {[]string{"--help"}, true}, + {[]string{"-h", "foo"}, true}, + {[]string{"foo", "bar"}, false}, + {[]string{"-v", "bar"}, false}, + {[]string{"foo", "-h"}, false}, + {[]string{"foo", "-help"}, false}, + {[]string{"foo", "--help"}, false}, + } + + for _, testCase := range testCases { + cli := &CLI{Args: testCase.args} + result := cli.IsHelp() + + if result != testCase.isHelp { + t.Errorf("Expected '%#v'. Args: %#v", testCase.isHelp, testCase.args) + } + } +} + +func TestCLIIsVersion(t *testing.T) { + testCases := []struct { + args []string + isVersion bool + }{ + {[]string{"-v"}, true}, + {[]string{"-version"}, true}, + {[]string{"--version"}, true}, + {[]string{"-v", "foo"}, true}, + {[]string{"foo", "bar"}, false}, + {[]string{"-h", "bar"}, false}, + {[]string{"foo", "-v"}, false}, + {[]string{"foo", "-version"}, false}, + {[]string{"foo", "--version"}, false}, + } + + for _, testCase := range testCases { + cli := &CLI{Args: testCase.args} + result := cli.IsVersion() + + if result != testCase.isVersion { + t.Errorf("Expected '%#v'. Args: %#v", testCase.isVersion, testCase.args) + } + } +} + +func TestCLIRun(t *testing.T) { + command := new(MockCommand) + cli := &CLI{ + Args: []string{"foo", "-bar", "-baz"}, + Commands: map[string]CommandFactory{ + "foo": func() (Command, error) { + return command, nil + }, + }, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != command.RunResult { + t.Fatalf("bad: %d", exitCode) + } + + if !command.RunCalled { + t.Fatalf("run should be called") + } + + if !reflect.DeepEqual(command.RunArgs, []string{"-bar", "-baz"}) { + t.Fatalf("bad args: %#v", command.RunArgs) + } +} + +func TestCLIRun_blank(t *testing.T) { + command := new(MockCommand) + cli := &CLI{ + Args: []string{"", "foo", "-bar", "-baz"}, + Commands: map[string]CommandFactory{ + "foo": func() (Command, error) { + return command, nil + }, + }, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != command.RunResult { + t.Fatalf("bad: %d", exitCode) + } + + if !command.RunCalled { + t.Fatalf("run should be called") + } + + if !reflect.DeepEqual(command.RunArgs, []string{"-bar", "-baz"}) { + t.Fatalf("bad args: %#v", command.RunArgs) + } +} + +func TestCLIRun_default(t *testing.T) { + commandBar := new(MockCommand) + commandBar.RunResult = 42 + + cli := &CLI{ + Args: []string{"-bar", "-baz"}, + Commands: map[string]CommandFactory{ + "": func() (Command, error) { + return commandBar, nil + }, + "foo": func() (Command, error) { + return new(MockCommand), nil + }, + }, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != commandBar.RunResult { + t.Fatalf("bad: %d", exitCode) + } + + if !commandBar.RunCalled { + t.Fatalf("run should be called") + } + + if !reflect.DeepEqual(commandBar.RunArgs, []string{"-bar", "-baz"}) { + t.Fatalf("bad args: %#v", commandBar.RunArgs) + } +} + +func TestCLIRun_nested(t *testing.T) { + command := new(MockCommand) + cli := &CLI{ + Args: []string{"foo", "bar", "-bar", "-baz"}, + Commands: map[string]CommandFactory{ + "foo": func() (Command, error) { + return new(MockCommand), nil + }, + "foo bar": func() (Command, error) { + return command, nil + }, + }, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != command.RunResult { + t.Fatalf("bad: %d", exitCode) + } + + if !command.RunCalled { + t.Fatalf("run should be called") + } + + if !reflect.DeepEqual(command.RunArgs, []string{"-bar", "-baz"}) { + t.Fatalf("bad args: %#v", command.RunArgs) + } +} + +func TestCLIRun_nestedMissingParent(t *testing.T) { + buf := new(bytes.Buffer) + cli := &CLI{ + Args: []string{"foo"}, + Commands: map[string]CommandFactory{ + "foo bar": func() (Command, error) { + return &MockCommand{SynopsisText: "hi!"}, nil + }, + }, + HelpWriter: buf, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != 1 { + t.Fatalf("bad exit code: %d", exitCode) + } + + if buf.String() != testCommandNestedMissingParent { + t.Fatalf("bad: %#v", buf.String()) + } +} + +func TestCLIRun_printHelp(t *testing.T) { + testCases := [][]string{ + {}, + {"-h"}, + {"i-dont-exist"}, + {"-bad-flag", "foo"}, + } + + for _, testCase := range testCases { + buf := new(bytes.Buffer) + helpText := "foo" + + cli := &CLI{ + Args: testCase, + Commands: map[string]CommandFactory{ + "foo": func() (Command, error) { + return new(MockCommand), nil + }, + }, + HelpFunc: func(map[string]CommandFactory) string { + return helpText + }, + HelpWriter: buf, + } + + code, err := cli.Run() + if err != nil { + t.Errorf("Args: %#v. Error: %s", testCase, err) + continue + } + + if code != 1 { + t.Errorf("Args: %#v. Code: %d", testCase, code) + continue + } + + if !strings.Contains(buf.String(), helpText) { + t.Errorf("Args: %#v. Text: %v", testCase, buf.String()) + } + } +} + +func TestCLIRun_printCommandHelp(t *testing.T) { + testCases := [][]string{ + {"--help", "foo"}, + {"-h", "foo"}, + } + + for _, args := range testCases { + command := &MockCommand{ + HelpText: "donuts", + } + + buf := new(bytes.Buffer) + cli := &CLI{ + Args: args, + Commands: map[string]CommandFactory{ + "foo": func() (Command, error) { + return command, nil + }, + }, + HelpWriter: buf, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != 1 { + t.Fatalf("bad exit code: %d", exitCode) + } + + if buf.String() != (command.HelpText + "\n") { + t.Fatalf("bad: %#v", buf.String()) + } + } +} + +func TestCLIRun_printCommandHelpSubcommands(t *testing.T) { + testCases := [][]string{ + {"--help", "foo"}, + {"-h", "foo"}, + } + + for _, args := range testCases { + command := &MockCommand{ + HelpText: "donuts", + } + + buf := new(bytes.Buffer) + cli := &CLI{ + Args: args, + Commands: map[string]CommandFactory{ + "foo": func() (Command, error) { + return command, nil + }, + "foo bar": func() (Command, error) { + return &MockCommand{SynopsisText: "hi!"}, nil + }, + "foo longer": func() (Command, error) { + return &MockCommand{SynopsisText: "hi!"}, nil + }, + }, + HelpWriter: buf, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != 1 { + t.Fatalf("bad exit code: %d", exitCode) + } + + if buf.String() != testCommandHelpSubcommandsOutput { + t.Fatalf("bad: %#v", buf.String()) + } + } +} + +func TestCLIRun_printCommandHelpTemplate(t *testing.T) { + testCases := [][]string{ + {"--help", "foo"}, + {"-h", "foo"}, + } + + for _, args := range testCases { + command := &MockCommandHelpTemplate{ + MockCommand: MockCommand{ + HelpText: "donuts", + }, + + HelpTemplateText: "hello {{.Help}}", + } + + buf := new(bytes.Buffer) + cli := &CLI{ + Args: args, + Commands: map[string]CommandFactory{ + "foo": func() (Command, error) { + return command, nil + }, + }, + HelpWriter: buf, + } + + exitCode, err := cli.Run() + if err != nil { + t.Fatalf("err: %s", err) + } + + if exitCode != 1 { + t.Fatalf("bad exit code: %d", exitCode) + } + + if buf.String() != "hello "+command.HelpText+"\n" { + t.Fatalf("bad: %#v", buf.String()) + } + } +} + +func TestCLISubcommand(t *testing.T) { + testCases := []struct { + args []string + subcommand string + }{ + {[]string{"bar"}, "bar"}, + {[]string{"foo", "-h"}, "foo"}, + {[]string{"-h", "bar"}, "bar"}, + {[]string{"foo", "bar", "-h"}, "foo"}, + } + + for _, testCase := range testCases { + cli := &CLI{Args: testCase.args} + result := cli.Subcommand() + + if result != testCase.subcommand { + t.Errorf("Expected %#v, got %#v. Args: %#v", + testCase.subcommand, result, testCase.args) + } + } +} + +func TestCLISubcommand_nested(t *testing.T) { + testCases := []struct { + args []string + subcommand string + }{ + {[]string{"bar"}, "bar"}, + {[]string{"foo", "-h"}, "foo"}, + {[]string{"-h", "bar"}, "bar"}, + {[]string{"foo", "bar", "-h"}, "foo bar"}, + {[]string{"foo", "bar", "baz", "-h"}, "foo bar"}, + {[]string{"foo", "bar", "-h", "baz"}, "foo bar"}, + } + + for _, testCase := range testCases { + cli := &CLI{ + Args: testCase.args, + Commands: map[string]CommandFactory{ + "foo bar": func() (Command, error) { + return new(MockCommand), nil + }, + }, + } + result := cli.Subcommand() + + if result != testCase.subcommand { + t.Errorf("Expected %#v, got %#v. Args: %#v", + testCase.subcommand, result, testCase.args) + } + } +} + +const testCommandNestedMissingParent = `This command is accessed by using one of the subcommands below. + +Subcommands: + + bar hi! + +` + +const testCommandHelpSubcommandsOutput = `donuts + +Subcommands: + + bar hi! + longer hi! + +` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go new file mode 100644 index 000000000..b4924eb00 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command.go @@ -0,0 +1,47 @@ +package cli + +const ( + // RunResultHelp is a value that can be returned from Run to signal + // to the CLI to render the help output. + RunResultHelp = -18511 +) + +// A command is a runnable sub-command of a CLI. +type Command interface { + // Help should return long-form help text that includes the command-line + // usage, a brief few sentences explaining the function of the command, + // and the complete list of flags the command accepts. + Help() string + + // Run should run the actual command with the given CLI instance and + // command-line arguments. It should return the exit status when it is + // finished. + // + // There are a handful of special exit codes this can return documented + // above that change behavior. + Run(args []string) int + + // Synopsis should return a one-line, short synopsis of the command. + // This should be less than 50 characters ideally. + Synopsis() string +} + +// CommandHelpTemplate is an extension of Command that also has a function +// for returning a template for the help rather than the help itself. In +// this scenario, both Help and HelpTemplate should be implemented. +// +// If CommandHelpTemplate isn't implemented, the Help is output as-is. +type CommandHelpTemplate interface { + // HelpTemplate is the template in text/template format to use for + // displaying the Help. The keys available are: + // + // * ".Help" - The help text itself + // * ".Subcommands" + // + HelpTemplate() string +} + +// CommandFactory is a type of function that is a factory for commands. +// We need a factory because we may need to setup some state on the +// struct that implements the command itself. +type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go new file mode 100644 index 000000000..6371e573b --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command_mock.go @@ -0,0 +1,42 @@ +package cli + +// MockCommand is an implementation of Command that can be used for tests. +// It is publicly exported from this package in case you want to use it +// externally. +type MockCommand struct { + // Settable + HelpText string + RunResult int + SynopsisText string + + // Set by the command + RunCalled bool + RunArgs []string +} + +func (c *MockCommand) Help() string { + return c.HelpText +} + +func (c *MockCommand) Run(args []string) int { + c.RunCalled = true + c.RunArgs = args + + return c.RunResult +} + +func (c *MockCommand) Synopsis() string { + return c.SynopsisText +} + +// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. +type MockCommandHelpTemplate struct { + MockCommand + + // Settable + HelpTemplateText string +} + +func (c *MockCommandHelpTemplate) HelpTemplate() string { + return c.HelpTemplateText +} diff --git a/vendor/github.com/mitchellh/cli/command_mock_test.go b/vendor/github.com/mitchellh/cli/command_mock_test.go new file mode 100644 index 000000000..241f33939 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command_mock_test.go @@ -0,0 +1,9 @@ +package cli + +import ( + "testing" +) + +func TestMockCommand_implements(t *testing.T) { + var _ Command = new(MockCommand) +} diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go new file mode 100644 index 000000000..67ea8c824 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/help.go @@ -0,0 +1,79 @@ +package cli + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +// HelpFunc is the type of the function that is responsible for generating +// the help output when the CLI must show the general help text. +type HelpFunc func(map[string]CommandFactory) string + +// BasicHelpFunc generates some basic help output that is usually good enough +// for most CLI applications. +func BasicHelpFunc(app string) HelpFunc { + return func(commands map[string]CommandFactory) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "usage: %s [--version] [--help] []\n\n", + app)) + buf.WriteString("Available commands are:\n") + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + maxKeyLen := 0 + for key, _ := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() + } +} + +// FilteredHelpFunc will filter the commands to only include the keys +// in the include parameter. +func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { + return func(commands map[string]CommandFactory) string { + set := make(map[string]struct{}) + for _, k := range include { + set[k] = struct{}{} + } + + filtered := make(map[string]CommandFactory) + for k, f := range commands { + if _, ok := set[k]; ok { + filtered[k] = f + } + } + + return f(filtered) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go new file mode 100644 index 000000000..a2d6f94f4 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui.go @@ -0,0 +1,187 @@ +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + + "github.com/bgentry/speakeasy" + "github.com/mattn/go-isatty" +) + +// Ui is an interface for interacting with the terminal, or "interface" +// of a CLI. This abstraction doesn't have to be used, but helps provide +// a simple, layerable way to manage user interactions. +type Ui interface { + // Ask asks the user for input using the given query. The response is + // returned as the given string, or an error. + Ask(string) (string, error) + + // AskSecret asks the user for input using the given query, but does not echo + // the keystrokes to the terminal. + AskSecret(string) (string, error) + + // Output is called for normal standard output. + Output(string) + + // Info is called for information related to the previous output. + // In general this may be the exact same as Output, but this gives + // Ui implementors some flexibility with output formats. + Info(string) + + // Error is used for any error messages that might appear on standard + // error. + Error(string) + + // Warn is used for any warning messages that might appear on standard + // error. + Warn(string) +} + +// BasicUi is an implementation of Ui that just outputs to the given +// writer. This UI is not threadsafe by default, but you can wrap it +// in a ConcurrentUi to make it safe. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer +} + +func (u *BasicUi) Ask(query string) (string, error) { + return u.ask(query, false) +} + +func (u *BasicUi) AskSecret(query string) (string, error) { + return u.ask(query, true) +} + +func (u *BasicUi) ask(query string, secret bool) (string, error) { + if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { + return "", err + } + + // Register for interrupts so that we can catch it and immediately + // return... + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Ask for input in a go-routine so that we can ignore it. + errCh := make(chan error, 1) + lineCh := make(chan string, 1) + go func() { + var line string + var err error + if secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + r := bufio.NewReader(u.Reader) + line, err = r.ReadString('\n') + } + if err != nil { + errCh <- err + return + } + + lineCh <- strings.TrimRight(line, "\r\n") + }() + + select { + case err := <-errCh: + return "", err + case line := <-lineCh: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(u.Writer) + + return "", errors.New("interrupted") + } +} + +func (u *BasicUi) Error(message string) { + w := u.Writer + if u.ErrorWriter != nil { + w = u.ErrorWriter + } + + fmt.Fprint(w, message) + fmt.Fprint(w, "\n") +} + +func (u *BasicUi) Info(message string) { + u.Output(message) +} + +func (u *BasicUi) Output(message string) { + fmt.Fprint(u.Writer, message) + fmt.Fprint(u.Writer, "\n") +} + +func (u *BasicUi) Warn(message string) { + u.Error(message) +} + +// PrefixedUi is an implementation of Ui that prefixes messages. +type PrefixedUi struct { + AskPrefix string + AskSecretPrefix string + OutputPrefix string + InfoPrefix string + ErrorPrefix string + WarnPrefix string + Ui Ui +} + +func (u *PrefixedUi) Ask(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskPrefix, query) + } + + return u.Ui.Ask(query) +} + +func (u *PrefixedUi) AskSecret(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) + } + + return u.Ui.AskSecret(query) +} + +func (u *PrefixedUi) Error(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) + } + + u.Ui.Error(message) +} + +func (u *PrefixedUi) Info(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.InfoPrefix, message) + } + + u.Ui.Info(message) +} + +func (u *PrefixedUi) Output(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.OutputPrefix, message) + } + + u.Ui.Output(message) +} + +func (u *PrefixedUi) Warn(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.WarnPrefix, message) + } + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go new file mode 100644 index 000000000..e3d5131d1 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_colored.go @@ -0,0 +1,69 @@ +package cli + +import ( + "fmt" +) + +// UiColor is a posix shell color code to use. +type UiColor struct { + Code int + Bold bool +} + +// A list of colors that are useful. These are all non-bolded by default. +var ( + UiColorNone UiColor = UiColor{-1, false} + UiColorRed = UiColor{31, false} + UiColorGreen = UiColor{32, false} + UiColorYellow = UiColor{33, false} + UiColorBlue = UiColor{34, false} + UiColorMagenta = UiColor{35, false} + UiColorCyan = UiColor{36, false} +) + +// ColoredUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColoredUi struct { + OutputColor UiColor + InfoColor UiColor + ErrorColor UiColor + WarnColor UiColor + Ui Ui +} + +func (u *ColoredUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColoredUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColoredUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColoredUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColoredUi) colorize(message string, color UiColor) string { + if color.Code == -1 { + return message + } + + attr := 0 + if color.Bold { + attr = 1 + } + + return fmt.Sprintf("\033[%d;%dm%s\033[0m", attr, color.Code, message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_colored_test.go b/vendor/github.com/mitchellh/cli/ui_colored_test.go new file mode 100644 index 000000000..35bbbf589 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_colored_test.go @@ -0,0 +1,74 @@ +package cli + +import ( + "testing" +) + +func TestColoredUi_impl(t *testing.T) { + var _ Ui = new(ColoredUi) +} + +func TestColoredUi_noColor(t *testing.T) { + mock := new(MockUi) + ui := &ColoredUi{ + ErrorColor: UiColorNone, + Ui: mock, + } + ui.Error("foo") + + if mock.ErrorWriter.String() != "foo\n" { + t.Fatalf("bad: %#v", mock.ErrorWriter.String()) + } +} + +func TestColoredUi_Error(t *testing.T) { + mock := new(MockUi) + ui := &ColoredUi{ + ErrorColor: UiColor{Code: 33}, + Ui: mock, + } + ui.Error("foo") + + if mock.ErrorWriter.String() != "\033[0;33mfoo\033[0m\n" { + t.Fatalf("bad: %#v", mock.ErrorWriter.String()) + } +} + +func TestColoredUi_Info(t *testing.T) { + mock := new(MockUi) + ui := &ColoredUi{ + InfoColor: UiColor{Code: 33}, + Ui: mock, + } + ui.Info("foo") + + if mock.OutputWriter.String() != "\033[0;33mfoo\033[0m\n" { + t.Fatalf("bad: %#v %#v", mock.OutputWriter.String()) + } +} + +func TestColoredUi_Output(t *testing.T) { + mock := new(MockUi) + ui := &ColoredUi{ + OutputColor: UiColor{Code: 33}, + Ui: mock, + } + ui.Output("foo") + + if mock.OutputWriter.String() != "\033[0;33mfoo\033[0m\n" { + t.Fatalf("bad: %#v %#v", mock.OutputWriter.String()) + } +} + +func TestColoredUi_Warn(t *testing.T) { + mock := new(MockUi) + ui := &ColoredUi{ + WarnColor: UiColor{Code: 33}, + Ui: mock, + } + ui.Warn("foo") + + if mock.ErrorWriter.String() != "\033[0;33mfoo\033[0m\n" { + t.Fatalf("bad: %#v %#v", mock.ErrorWriter.String()) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go new file mode 100644 index 000000000..b4f4dbfaa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_concurrent.go @@ -0,0 +1,54 @@ +package cli + +import ( + "sync" +) + +// ConcurrentUi is a wrapper around a Ui interface (and implements that +// interface) making the underlying Ui concurrency safe. +type ConcurrentUi struct { + Ui Ui + l sync.Mutex +} + +func (u *ConcurrentUi) Ask(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.Ask(query) +} + +func (u *ConcurrentUi) AskSecret(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.AskSecret(query) +} + +func (u *ConcurrentUi) Error(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Error(message) +} + +func (u *ConcurrentUi) Info(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Info(message) +} + +func (u *ConcurrentUi) Output(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Output(message) +} + +func (u *ConcurrentUi) Warn(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent_test.go b/vendor/github.com/mitchellh/cli/ui_concurrent_test.go new file mode 100644 index 000000000..d03e49809 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_concurrent_test.go @@ -0,0 +1,9 @@ +package cli + +import ( + "testing" +) + +func TestConcurrentUi_impl(t *testing.T) { + var _ Ui = new(ConcurrentUi) +} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go new file mode 100644 index 000000000..c46772855 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_mock.go @@ -0,0 +1,64 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// MockUi is a mock UI that is used for tests and is exported publicly for +// use in external tests if needed as well. +type MockUi struct { + InputReader io.Reader + ErrorWriter *bytes.Buffer + OutputWriter *bytes.Buffer + + once sync.Once +} + +func (u *MockUi) Ask(query string) (string, error) { + u.once.Do(u.init) + + var result string + fmt.Fprint(u.OutputWriter, query) + if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { + return "", err + } + + return result, nil +} + +func (u *MockUi) AskSecret(query string) (string, error) { + return u.Ask(query) +} + +func (u *MockUi) Error(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) Info(message string) { + u.Output(message) +} + +func (u *MockUi) Output(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.OutputWriter, message) + fmt.Fprint(u.OutputWriter, "\n") +} + +func (u *MockUi) Warn(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) init() { + u.ErrorWriter = new(bytes.Buffer) + u.OutputWriter = new(bytes.Buffer) +} diff --git a/vendor/github.com/mitchellh/cli/ui_mock_test.go b/vendor/github.com/mitchellh/cli/ui_mock_test.go new file mode 100644 index 000000000..4cce0bef4 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_mock_test.go @@ -0,0 +1,9 @@ +package cli + +import ( + "testing" +) + +func TestMockUi_implements(t *testing.T) { + var _ Ui = new(MockUi) +} diff --git a/vendor/github.com/mitchellh/cli/ui_test.go b/vendor/github.com/mitchellh/cli/ui_test.go new file mode 100644 index 000000000..ac795ba84 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_test.go @@ -0,0 +1,162 @@ +package cli + +import ( + "bytes" + "io" + "testing" +) + +func TestBasicUi_implements(t *testing.T) { + var _ Ui = new(BasicUi) +} + +func TestBasicUi_Ask(t *testing.T) { + in_r, in_w := io.Pipe() + defer in_r.Close() + defer in_w.Close() + + writer := new(bytes.Buffer) + ui := &BasicUi{ + Reader: in_r, + Writer: writer, + } + + go in_w.Write([]byte("foo bar\nbaz\n")) + + result, err := ui.Ask("Name?") + if err != nil { + t.Fatalf("err: %s", err) + } + + if writer.String() != "Name? " { + t.Fatalf("bad: %#v", writer.String()) + } + + if result != "foo bar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestBasicUi_AskSecret(t *testing.T) { + in_r, in_w := io.Pipe() + defer in_r.Close() + defer in_w.Close() + + writer := new(bytes.Buffer) + ui := &BasicUi{ + Reader: in_r, + Writer: writer, + } + + go in_w.Write([]byte("foo bar\nbaz\n")) + + result, err := ui.AskSecret("Name?") + if err != nil { + t.Fatalf("err: %s", err) + } + + if writer.String() != "Name? " { + t.Fatalf("bad: %#v", writer.String()) + } + + if result != "foo bar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestBasicUi_Error(t *testing.T) { + writer := new(bytes.Buffer) + ui := &BasicUi{Writer: writer} + ui.Error("HELLO") + + if writer.String() != "HELLO\n" { + t.Fatalf("bad: %s", writer.String()) + } +} + +func TestBasicUi_Error_ErrorWriter(t *testing.T) { + writer := new(bytes.Buffer) + ewriter := new(bytes.Buffer) + ui := &BasicUi{Writer: writer, ErrorWriter: ewriter} + ui.Error("HELLO") + + if ewriter.String() != "HELLO\n" { + t.Fatalf("bad: %s", ewriter.String()) + } +} + +func TestBasicUi_Output(t *testing.T) { + writer := new(bytes.Buffer) + ui := &BasicUi{Writer: writer} + ui.Output("HELLO") + + if writer.String() != "HELLO\n" { + t.Fatalf("bad: %s", writer.String()) + } +} + +func TestBasicUi_Warn(t *testing.T) { + writer := new(bytes.Buffer) + ui := &BasicUi{Writer: writer} + ui.Warn("HELLO") + + if writer.String() != "HELLO\n" { + t.Fatalf("bad: %s", writer.String()) + } +} + +func TestPrefixedUi_implements(t *testing.T) { + var _ Ui = new(PrefixedUi) +} + +func TestPrefixedUiError(t *testing.T) { + ui := new(MockUi) + p := &PrefixedUi{ + ErrorPrefix: "foo", + Ui: ui, + } + + p.Error("bar") + if ui.ErrorWriter.String() != "foobar\n" { + t.Fatalf("bad: %s", ui.ErrorWriter.String()) + } +} + +func TestPrefixedUiInfo(t *testing.T) { + ui := new(MockUi) + p := &PrefixedUi{ + InfoPrefix: "foo", + Ui: ui, + } + + p.Info("bar") + if ui.OutputWriter.String() != "foobar\n" { + t.Fatalf("bad: %s", ui.OutputWriter.String()) + } +} + +func TestPrefixedUiOutput(t *testing.T) { + ui := new(MockUi) + p := &PrefixedUi{ + OutputPrefix: "foo", + Ui: ui, + } + + p.Output("bar") + if ui.OutputWriter.String() != "foobar\n" { + t.Fatalf("bad: %s", ui.OutputWriter.String()) + } +} + +func TestPrefixedUiWarn(t *testing.T) { + ui := new(MockUi) + p := &PrefixedUi{ + WarnPrefix: "foo", + Ui: ui, + } + + p.Warn("bar") + if ui.ErrorWriter.String() != "foobar\n" { + t.Fatalf("bad: %s", ui.ErrorWriter.String()) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go new file mode 100644 index 000000000..1e1db3cf6 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_writer.go @@ -0,0 +1,18 @@ +package cli + +// UiWriter is an io.Writer implementation that can be used with +// loggers that writes every line of log output data to a Ui at the +// Info level. +type UiWriter struct { + Ui Ui +} + +func (w *UiWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + p = p[:n-1] + } + + w.Ui.Info(string(p)) + return n, nil +} diff --git a/vendor/github.com/mitchellh/cli/ui_writer_test.go b/vendor/github.com/mitchellh/cli/ui_writer_test.go new file mode 100644 index 000000000..b742e9314 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_writer_test.go @@ -0,0 +1,37 @@ +package cli + +import ( + "io" + "testing" +) + +func TestUiWriter_impl(t *testing.T) { + var _ io.Writer = new(UiWriter) +} + +func TestUiWriter(t *testing.T) { + ui := new(MockUi) + w := &UiWriter{ + Ui: ui, + } + + w.Write([]byte("foo\n")) + w.Write([]byte("bar\n")) + + if ui.OutputWriter.String() != "foo\nbar\n" { + t.Fatalf("bad: %s", ui.OutputWriter.String()) + } +} + +func TestUiWriter_empty(t *testing.T) { + ui := new(MockUi) + w := &UiWriter{ + Ui: ui, + } + + w.Write([]byte("")) + + if ui.OutputWriter.String() != "\n" { + t.Fatalf("bad: %s", ui.OutputWriter.String()) + } +} diff --git a/vendor/github.com/mitchellh/colorstring/.travis.yml b/vendor/github.com/mitchellh/colorstring/.travis.yml new file mode 100644 index 000000000..74e286ae1 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/colorstring/LICENSE b/vendor/github.com/mitchellh/colorstring/LICENSE new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/colorstring/README.md b/vendor/github.com/mitchellh/colorstring/README.md new file mode 100644 index 000000000..0654d454d --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/README.md @@ -0,0 +1,30 @@ +# colorstring [![Build Status](https://travis-ci.org/mitchellh/colorstring.svg)](https://travis-ci.org/mitchellh/colorstring) + +colorstring is a [Go](http://www.golang.org) library for outputting colored +strings to a console using a simple inline syntax in your string to specify +the color to print as. + +For example, the string `[blue]hello [red]world` would output the text +"hello world" in two colors. The API of colorstring allows for easily disabling +colors, adding aliases, etc. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/colorstring +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/colorstring). + +Usage is easy enough: + +```go +colorstring.Println("[blue]Hello [red]World!") +``` + +Additionally, the `Colorize` struct can be used to set options such as +custom colors, color disabling, etc. diff --git a/vendor/github.com/mitchellh/colorstring/colorstring.go b/vendor/github.com/mitchellh/colorstring/colorstring.go new file mode 100644 index 000000000..3de5b241d --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/colorstring.go @@ -0,0 +1,244 @@ +// colorstring provides functions for colorizing strings for terminal +// output. +package colorstring + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// Color colorizes your strings using the default settings. +// +// Strings given to Color should use the syntax `[color]` to specify the +// color for text following. For example: `[blue]Hello` will return "Hello" +// in blue. See DefaultColors for all the supported colors and attributes. +// +// If an unrecognized color is given, it is ignored and assumed to be part +// of the string. For example: `[hi]world` will result in "[hi]world". +// +// A color reset is appended to the end of every string. This will reset +// the color of following strings when you output this text to the same +// terminal session. +// +// If you want to customize any of this behavior, use the Colorize struct. +func Color(v string) string { + return def.Color(v) +} + +// ColorPrefix returns the color sequence that prefixes the given text. +// +// This is useful when wrapping text if you want to inherit the color +// of the wrapped text. For example, "[green]foo" will return "[green]". +// If there is no color sequence, then this will return "". +func ColorPrefix(v string) string { + return def.ColorPrefix(v) +} + +// Colorize colorizes your strings, giving you the ability to customize +// some of the colorization process. +// +// The options in Colorize can be set to customize colorization. If you're +// only interested in the defaults, just use the top Color function directly, +// which creates a default Colorize. +type Colorize struct { + // Colors maps a color string to the code for that color. The code + // is a string so that you can use more complex colors to set foreground, + // background, attributes, etc. For example, "boldblue" might be + // "1;34" + Colors map[string]string + + // If true, color attributes will be ignored. This is useful if you're + // outputting to a location that doesn't support colors and you just + // want the strings returned. + Disable bool + + // Reset, if true, will reset the color after each colorization by + // adding a reset code at the end. + Reset bool +} + +// Color colorizes a string according to the settings setup in the struct. +// +// For more details on the syntax, see the top-level Color function. +func (c *Colorize) Color(v string) string { + matches := parseRe.FindAllStringIndex(v, -1) + if len(matches) == 0 { + return v + } + + result := new(bytes.Buffer) + colored := false + m := []int{0, 0} + for _, nm := range matches { + // Write the text in between this match and the last + result.WriteString(v[m[1]:nm[0]]) + m = nm + + var replace string + if code, ok := c.Colors[v[m[0]+1:m[1]-1]]; ok { + colored = true + + if !c.Disable { + replace = fmt.Sprintf("\033[%sm", code) + } + } else { + replace = v[m[0]:m[1]] + } + + result.WriteString(replace) + } + result.WriteString(v[m[1]:]) + + if colored && c.Reset && !c.Disable { + // Write the clear byte at the end + result.WriteString("\033[0m") + } + + return result.String() +} + +// ColorPrefix returns the first color sequence that exists in this string. +// +// For example: "[green]foo" would return "[green]". If no color sequence +// exists, then "" is returned. This is especially useful when wrapping +// colored texts to inherit the color of the wrapped text. +func (c *Colorize) ColorPrefix(v string) string { + return prefixRe.FindString(strings.TrimSpace(v)) +} + +// DefaultColors are the default colors used when colorizing. +// +// If the color is surrounded in underscores, such as "_blue_", then that +// color will be used for the background color. +var DefaultColors map[string]string + +func init() { + DefaultColors = map[string]string{ + // Default foreground/background colors + "default": "39", + "_default_": "49", + + // Foreground colors + "black": "30", + "red": "31", + "green": "32", + "yellow": "33", + "blue": "34", + "magenta": "35", + "cyan": "36", + "light_gray": "37", + "dark_gray": "90", + "light_red": "91", + "light_green": "92", + "light_yellow": "93", + "light_blue": "94", + "light_magenta": "95", + "light_cyan": "96", + "white": "97", + + // Background colors + "_black_": "40", + "_red_": "41", + "_green_": "42", + "_yellow_": "43", + "_blue_": "44", + "_magenta_": "45", + "_cyan_": "46", + "_light_gray_": "47", + "_dark_gray_": "100", + "_light_red_": "101", + "_light_green_": "102", + "_light_yellow_": "103", + "_light_blue_": "104", + "_light_magenta_": "105", + "_light_cyan_": "106", + "_white_": "107", + + // Attributes + "bold": "1", + "dim": "2", + "underline": "4", + "blink_slow": "5", + "blink_fast": "6", + "invert": "7", + "hidden": "8", + + // Reset to reset everything to their defaults + "reset": "0", + "reset_bold": "21", + } + + def = Colorize{ + Colors: DefaultColors, + Reset: true, + } +} + +var def Colorize +var parseReRaw = `\[[a-z0-9_-]+\]` +var parseRe = regexp.MustCompile(`(?i)` + parseReRaw) +var prefixRe = regexp.MustCompile(`^(?i)(` + parseReRaw + `)+`) + +// Print is a convenience wrapper for fmt.Print with support for color codes. +// +// Print formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are added between +// operands when neither is a string. It returns the number of bytes written +// and any write error encountered. +func Print(a string) (n int, err error) { + return fmt.Print(Color(a)) +} + +// Println is a convenience wrapper for fmt.Println with support for color +// codes. +// +// Println formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are always added +// between operands and a newline is appended. It returns the number of bytes +// written and any write error encountered. +func Println(a string) (n int, err error) { + return fmt.Println(Color(a)) +} + +// Printf is a convenience wrapper for fmt.Printf with support for color codes. +// +// Printf formats according to a format specifier and writes to standard output +// with support for color codes. It returns the number of bytes written and any +// write error encountered. +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(Color(format), a...) +} + +// Fprint is a convenience wrapper for fmt.Fprint with support for color codes. +// +// Fprint formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are added between operands when neither +// is a string. It returns the number of bytes written and any write error +// encountered. +func Fprint(w io.Writer, a string) (n int, err error) { + return fmt.Fprint(w, Color(a)) +} + +// Fprintln is a convenience wrapper for fmt.Fprintln with support for color +// codes. +// +// Fprintln formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are always added between operands and a +// newline is appended. It returns the number of bytes written and any write +// error encountered. +func Fprintln(w io.Writer, a string) (n int, err error) { + return fmt.Fprintln(w, Color(a)) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf with support for color +// codes. +// +// Fprintf formats according to a format specifier and writes to w with support +// for color codes. It returns the number of bytes written and any write error +// encountered. +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, Color(format), a...) +} diff --git a/vendor/github.com/mitchellh/colorstring/colorstring_test.go b/vendor/github.com/mitchellh/colorstring/colorstring_test.go new file mode 100644 index 000000000..3e76abef6 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/colorstring_test.go @@ -0,0 +1,185 @@ +package colorstring + +import ( + "os" + "testing" +) + +func TestColor(t *testing.T) { + cases := []struct { + Input, Output string + }{ + { + Input: "foo", + Output: "foo", + }, + + { + Input: "[blue]foo", + Output: "\033[34mfoo\033[0m", + }, + + { + Input: "foo[blue]foo", + Output: "foo\033[34mfoo\033[0m", + }, + + { + Input: "foo[what]foo", + Output: "foo[what]foo", + }, + { + Input: "foo[_blue_]foo", + Output: "foo\033[44mfoo\033[0m", + }, + { + Input: "foo[bold]foo", + Output: "foo\033[1mfoo\033[0m", + }, + { + Input: "[blue]foo[bold]bar", + Output: "\033[34mfoo\033[1mbar\033[0m", + }, + { + Input: "[underline]foo[reset]bar", + Output: "\033[4mfoo\033[0mbar\033[0m", + }, + } + + for _, tc := range cases { + actual := Color(tc.Input) + if actual != tc.Output { + t.Errorf( + "Input: %#v\n\nOutput: %#v\n\nExpected: %#v", + tc.Input, + actual, + tc.Output) + } + } +} + +func TestColorPrefix(t *testing.T) { + cases := []struct { + Input, Output string + }{ + { + Input: "foo", + Output: "", + }, + + { + Input: "[blue]foo", + Output: "[blue]", + }, + + { + Input: "[bold][blue]foo", + Output: "[bold][blue]", + }, + + { + Input: " [bold][blue]foo", + Output: "[bold][blue]", + }, + } + + for _, tc := range cases { + actual := ColorPrefix(tc.Input) + if actual != tc.Output { + t.Errorf( + "Input: %#v\n\nOutput: %#v\n\nExpected: %#v", + tc.Input, + actual, + tc.Output) + } + } +} + +func TestColorizeColor_disable(t *testing.T) { + c := def + c.Disable = true + + cases := []struct { + Input, Output string + }{ + { + "[blue]foo", + "foo", + }, + + { + "[foo]bar", + "[foo]bar", + }, + } + + for _, tc := range cases { + actual := c.Color(tc.Input) + if actual != tc.Output { + t.Errorf( + "Input: %#v\n\nOutput: %#v\n\nExpected: %#v", + tc.Input, + actual, + tc.Output) + } + } +} + +func TestColorizeColor_noReset(t *testing.T) { + c := def + c.Reset = false + + input := "[blue]foo" + output := "\033[34mfoo" + actual := c.Color(input) + if actual != output { + t.Errorf( + "Input: %#v\n\nOutput: %#v\n\nExpected: %#v", + input, + actual, + output) + } +} + +func TestConvenienceWrappers(t *testing.T) { + var length int + printInput := "[bold]Print:\t\t[default][red]R[green]G[blue]B[cyan]C[magenta]M[yellow]Y\n" + printlnInput := "[bold]Println:\t[default][red]R[green]G[blue]B[cyan]C[magenta]M[yellow]Y" + printfInput := "[bold]Printf:\t\t[default][red]R[green]G[blue]B[cyan]C[magenta]M[yellow]Y\n" + fprintInput := "[bold]Fprint:\t\t[default][red]R[green]G[blue]B[cyan]C[magenta]M[yellow]Y\n" + fprintlnInput := "[bold]Fprintln:\t[default][red]R[green]G[blue]B[cyan]C[magenta]M[yellow]Y" + fprintfInput := "[bold]Fprintf:\t[default][red]R[green]G[blue]B[cyan]C[magenta]M[yellow]Y\n" + + // colorstring.Print + length, _ = Print(printInput) + assertOutputLength(t, printInput, 58, length) + + // colorstring.Println + length, _ = Println(printlnInput) + assertOutputLength(t, printlnInput, 59, length) + + // colorstring.Printf + length, _ = Printf(printfInput) + assertOutputLength(t, printfInput, 59, length) + + // colorstring.Fprint + length, _ = Fprint(os.Stdout, fprintInput) + assertOutputLength(t, fprintInput, 59, length) + + // colorstring.Fprintln + length, _ = Fprintln(os.Stdout, fprintlnInput) + assertOutputLength(t, fprintlnInput, 60, length) + + // colorstring.Fprintf + length, _ = Fprintf(os.Stdout, fprintfInput) + assertOutputLength(t, fprintfInput, 59, length) +} + +func assertOutputLength(t *testing.T, input string, expectedLength int, actualLength int) { + if actualLength != expectedLength { + t.Errorf("Input: %#v\n\n Output length: %d\n\n Expected: %d", + input, + actualLength, + expectedLength) + } +} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 000000000..bcb8c8d2c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 000000000..db6a6aa1a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copier_time_test.go b/vendor/github.com/mitchellh/copystructure/copier_time_test.go new file mode 100644 index 000000000..5506a0ff1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time_test.go @@ -0,0 +1,17 @@ +package copystructure + +import ( + "testing" + "time" +) + +func TestTimeCopier(t *testing.T) { + v := time.Now().UTC() + result, err := timeCopier(v) + if err != nil { + t.Fatalf("err: %s", err) + } + if result.(time.Time) != v { + t.Fatalf("bad: %#v\n\n%#v", v, result) + } +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 000000000..7ef83d8aa --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,281 @@ +package copystructure + +import ( + "reflect" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + w := new(walker) + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + ps []bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + m.SetMapIndex(mk, mv) + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + s := w.cs[len(w.cs)-1] + s.Index(i).Set(v) + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + sf.Set(v) + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + + // Get the type for the map + t := m.Type() + mapType := reflect.MapOf(t.Key(), t.Elem()) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.Indirect(reflect.New(mapType)) + } else { + newMap = reflect.MakeMap(reflect.MapOf(t.Key(), t.Elem())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if w.ignoring() { + return nil + } + + w.ps = append(w.ps, v) + return nil +} + +func (w *walker) PointerExit(bool) error { + if w.ignoring() { + return nil + } + + w.ps = w.ps[:len(w.ps)-1] + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + reflect.Indirect(newV).Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + + var newS reflect.Value + if s.IsNil() { + newS = reflect.Indirect(reflect.New(s.Type())) + } else { + newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap()) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + v = reflect.ValueOf(dup) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[len(w.ps)-1] +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + } +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure_examples_test.go b/vendor/github.com/mitchellh/copystructure/copystructure_examples_test.go new file mode 100644 index 000000000..e094b8626 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure_examples_test.go @@ -0,0 +1,22 @@ +package copystructure + +import ( + "fmt" +) + +func ExampleCopy() { + input := map[string]interface{}{ + "bob": map[string]interface{}{ + "emails": []string{"a", "b"}, + }, + } + + dup, err := Copy(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", dup) + // Output: + // map[string]interface {}{"bob":map[string]interface {}{"emails":[]string{"a", "b"}}} +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure_test.go b/vendor/github.com/mitchellh/copystructure/copystructure_test.go new file mode 100644 index 000000000..c2c39e836 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure_test.go @@ -0,0 +1,194 @@ +package copystructure + +import ( + "reflect" + "testing" + "time" +) + +func TestCopy_complex(t *testing.T) { + v := map[string]interface{}{ + "foo": []string{"a", "b"}, + "bar": "baz", + } + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_primitive(t *testing.T) { + cases := []interface{}{ + 42, + "foo", + 1.2, + } + + for _, tc := range cases { + result, err := Copy(tc) + if err != nil { + t.Fatalf("err: %s", err) + } + if result != tc { + t.Fatalf("bad: %#v", result) + } + } +} + +func TestCopy_primitivePtr(t *testing.T) { + cases := []interface{}{ + 42, + "foo", + 1.2, + } + + for _, tc := range cases { + result, err := Copy(&tc) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, &tc) { + t.Fatalf("bad: %#v", result) + } + } +} + +func TestCopy_map(t *testing.T) { + v := map[string]interface{}{ + "bar": "baz", + } + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_slice(t *testing.T) { + v := []string{"bar", "baz"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_struct(t *testing.T) { + type test struct { + Value string + } + + v := test{Value: "foo"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_structPtr(t *testing.T) { + type test struct { + Value string + } + + v := &test{Value: "foo"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_structNil(t *testing.T) { + type test struct { + Value string + } + + var v *test + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + if v, ok := result.(*test); !ok { + t.Fatalf("bad: %#v", result) + } else if v != nil { + t.Fatalf("bad: %#v", v) + } +} + +func TestCopy_structNested(t *testing.T) { + type TestInner struct{} + + type Test struct { + Test *TestInner + } + + v := Test{} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_structUnexported(t *testing.T) { + type test struct { + Value string + + private string + } + + v := test{Value: "foo"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_time(t *testing.T) { + type test struct { + Value time.Time + } + + v := test{Value: time.Now().UTC()} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 000000000..d70706d5b --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 000000000..6944957d5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,132 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +func dirUnix() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // If that fails, try getent + var stdout bytes.Buffer + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If "getent" is missing, ignore it + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd = exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/go-homedir/homedir_test.go b/vendor/github.com/mitchellh/go-homedir/homedir_test.go new file mode 100644 index 000000000..c34dbc7f2 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir_test.go @@ -0,0 +1,112 @@ +package homedir + +import ( + "fmt" + "os" + "os/user" + "testing" +) + +func patchEnv(key, value string) func() { + bck := os.Getenv(key) + deferFunc := func() { + os.Setenv(key, bck) + } + + os.Setenv(key, value) + return deferFunc +} + +func BenchmarkDir(b *testing.B) { + // We do this for any "warmups" + for i := 0; i < 10; i++ { + Dir() + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Dir() + } +} + +func TestDir(t *testing.T) { + u, err := user.Current() + if err != nil { + t.Fatalf("err: %s", err) + } + + dir, err := Dir() + if err != nil { + t.Fatalf("err: %s", err) + } + + if u.HomeDir != dir { + t.Fatalf("%#v != %#v", u.HomeDir, dir) + } +} + +func TestExpand(t *testing.T) { + u, err := user.Current() + if err != nil { + t.Fatalf("err: %s", err) + } + + cases := []struct { + Input string + Output string + Err bool + }{ + { + "/foo", + "/foo", + false, + }, + + { + "~/foo", + fmt.Sprintf("%s/foo", u.HomeDir), + false, + }, + + { + "", + "", + false, + }, + + { + "~", + u.HomeDir, + false, + }, + + { + "~foo/foo", + "", + true, + }, + } + + for _, tc := range cases { + actual, err := Expand(tc.Input) + if (err != nil) != tc.Err { + t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err) + } + + if actual != tc.Output { + t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual) + } + } + + DisableCache = true + defer func() { DisableCache = false }() + defer patchEnv("HOME", "/custom/path/")() + expected := "/custom/path/foo/bar" + actual, err := Expand("~/foo/bar") + + if err != nil { + t.Errorf("No error is expected, got: %v", err) + } else if actual != "/custom/path/foo/bar" { + t.Errorf("Expected: %v; actual: %v", expected, actual) + } +} diff --git a/vendor/github.com/mitchellh/go-linereader/LICENSE.md b/vendor/github.com/mitchellh/go-linereader/LICENSE.md new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/go-linereader/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-linereader/README.md b/vendor/github.com/mitchellh/go-linereader/README.md new file mode 100644 index 000000000..cee8b16e7 --- /dev/null +++ b/vendor/github.com/mitchellh/go-linereader/README.md @@ -0,0 +1,30 @@ +# go-linereader + +`go-linereader` (Golang package: `linereader`) is a package for Go that +breaks up the input from an io.Reader into multiple lines. It is +a lot like `bufio.Scanner`, except you can specify timeouts that will push +"lines" through after a certain amount of time. This lets you read lines, +but return any data if a line isn't updated for some time. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-linereader`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-linereader + +Below is an example of its usage ignoring errors: + +```go +// Assume r is some set io.Reader. Perhaps a file, network, anything. +var r io.Reader + +// Initialize the line reader +lr := linereader.New(r) + +// Get all the lines +for line := <-lr.Ch { + // Do something with the line. This line will have the line separator + // removed. +} +``` diff --git a/vendor/github.com/mitchellh/go-linereader/linereader.go b/vendor/github.com/mitchellh/go-linereader/linereader.go new file mode 100644 index 000000000..6cf9b9b97 --- /dev/null +++ b/vendor/github.com/mitchellh/go-linereader/linereader.go @@ -0,0 +1,118 @@ +package linereader + +import ( + "io" + "bufio" + "sync/atomic" + "time" +) + +// Reader takes an io.Reader and pushes the lines out onto the channel. +type Reader struct { + Reader io.Reader + Timeout time.Duration + + // Ch is the output channel. This will be closed when there are no + // more lines (io.EOF). + Ch chan string + + started uint32 +} + +// New creates a new Reader that reads lines from the io.Reader. +// +// The Reader is already started when returned, so it is unsafe to modify +// any struct fields. +func New(r io.Reader) *Reader { + result := &Reader{ + Reader: r, + Timeout: 100 * time.Millisecond, + Ch: make(chan string), + } + + go result.Run() + return result +} + +// Run reads from the Reader and dispatches lines on the Ch channel. +// +// This blocks and is usually called with `go` prefixed to dispatch onto +// a goroutine. It is safe to call this function multiple times; subsequent +// calls to Run will exit without running. +func (r *Reader) Run() { + if !atomic.CompareAndSwapUint32(&r.started, 0, 1) { + return + } + + // When we're done, close the channel + defer close(r.Ch) + + // Listen for bytes in a goroutine. We do this so that if we're blocking + // we can flush the bytes we have after some configured time. There is + // probably a way to make this a lot faster but this works for now. + // + // NOTE: This isn't particularly performant. I'm sure there is a better + // way to do this instead of sending single bytes on a channel, but it + // works fine. + buf := bufio.NewReader(r.Reader) + byteCh := make(chan byte) + doneCh := make(chan error) + go func() { + defer close(doneCh) + for { + b, err := buf.ReadByte() + if err != nil { + doneCh <- err + return + } + + byteCh <- b + } + }() + + lineBuf := make([]byte, 0, 80) + for { + var err error + line := lineBuf[0:0] + for { + brk := false + + select { + case b := <-byteCh: + brk = b == '\n' + if !brk { + line = append(line, b) + } + case err = <-doneCh: + brk = true + case <-time.After(r.Timeout): + if len(line) > 0 { + brk = true + } + } + + if brk { + break + } + } + + // If an error occurred and its not an EOF, then report that + // error to all pipes and exit. + if err != nil && err != io.EOF { + break + } + + // If we're at the end and the line is empty, then return. + if err == io.EOF && len(line) == 0 { + break + } + + // Write out the line + r.Ch <- string(line) + + // If we hit the end, we're done + if err == io.EOF { + break + } + } +} diff --git a/vendor/github.com/mitchellh/go-linereader/linereader_test.go b/vendor/github.com/mitchellh/go-linereader/linereader_test.go new file mode 100644 index 000000000..9c4fcd1a5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-linereader/linereader_test.go @@ -0,0 +1,48 @@ +package linereader + +import ( + "bytes" + "io" + "time" + "reflect" + "testing" +) + +func TestReader(t *testing.T) { + var buf bytes.Buffer + buf.WriteString("foo\nbar\n") + + var result []string + r := New(&buf) + for line := range r.Ch { + result = append(result, line) + } + + expected := []string{"foo", "bar"} + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestReader_pause(t *testing.T) { + pr, pw := io.Pipe() + + go func() { + defer pw.Close() + pw.Write([]byte("foo\n")) + pw.Write([]byte("bar")) + time.Sleep(200 * time.Millisecond) + pw.Write([]byte("baz\n")) + }() + + var result []string + r := New(pr) + for line := range r.Ch { + result = append(result, line) + } + + expected := []string{"foo", "bar", "baz"} + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 000000000..7f3fe9a96 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + +script: + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..659d6885f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..aa91f76ce --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,151 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = reflect.ValueOf(data).Type() + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go new file mode 100644 index 000000000..53289afcf --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go @@ -0,0 +1,229 @@ +package mapstructure + +import ( + "errors" + "reflect" + "testing" + "time" +) + +func TestComposeDecodeHookFunc(t *testing.T) { + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "foo", nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "bar", nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + result, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if result.(string) != "foobar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestComposeDecodeHookFunc_err(t *testing.T) { + f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + return nil, errors.New("foo") + } + + f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + panic("NOPE") + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42) + if err.Error() != "foo" { + t.Fatalf("bad: %s", err) + } +} + +func TestComposeDecodeHookFunc_kinds(t *testing.T) { + var f2From reflect.Kind + + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return int(42), nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + f2From = f + return data, nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if f2From != reflect.Int { + t.Fatalf("bad: %#v", f2From) + } +} + +func TestStringToSliceHookFunc(t *testing.T) { + f := StringToSliceHookFunc(",") + + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {sliceType, sliceType, 42, 42, false}, + {strType, strType, 42, 42, false}, + { + strType, + sliceType, + "foo,bar,baz", + []string{"foo", "bar", "baz"}, + false, + }, + { + strType, + sliceType, + "", + []string{}, + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestStringToTimeDurationHookFunc(t *testing.T) { + f := StringToTimeDurationHookFunc() + + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Duration(5)) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {strType, timeType, "5s", 5 * time.Second, false}, + {strType, timeType, "5", time.Duration(0), true}, + {strType, strType, "5", "5", false}, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestWeaklyTypedHook(t *testing.T) { + var f DecodeHookFunc = WeaklyTypedHook + + boolType := reflect.TypeOf(true) + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + // TO STRING + { + boolType, + strType, + false, + "0", + false, + }, + + { + boolType, + strType, + true, + "1", + false, + }, + + { + reflect.TypeOf(float32(1)), + strType, + float32(7), + "7", + false, + }, + + { + reflect.TypeOf(int(1)), + strType, + int(7), + "7", + false, + }, + + { + sliceType, + strType, + []uint8("foo"), + "foo", + false, + }, + + { + reflect.TypeOf(uint(1)), + strType, + uint(7), + "7", + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..40be5116d --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,745 @@ +// The mapstructure package exposes functionality to convert an +// abitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return err + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // Accept empty array/slice instead of an empty map in weakly typed mode + if d.config.WeaklyTypedInput && + (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && + dataVal.Len() == 0 { + val.Set(valMap) + return nil + } else { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + realVal := reflect.New(valElemType) + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + // Accept empty map instead of array/slice in weakly typed mode + if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } else { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + } + + // Make a new slice to hold our result, same size as the original data. + valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) + continue + } + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append(structs, val.FieldByName(fieldType.Name)) + continue + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey, _ := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey, _ := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey, _ := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go new file mode 100644 index 000000000..41d2a41f7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding/json" + "testing" +) + +func Benchmark_Decode(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +// decodeViaJSON takes the map data and passes it through encoding/json to convert it into the +// given Go native structure pointed to by v. v must be a pointer to a struct. +func decodeViaJSON(data interface{}, v interface{}) error { + // Perform the task by simply marshalling the input into JSON, + // then unmarshalling it into target native Go struct. + b, err := json.Marshal(data) + if err != nil { + return err + } + return json.Unmarshal(b, v) +} + +func Benchmark_DecodeViaJSON(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + decodeViaJSON(input, &result) + } +} + +func Benchmark_DecodeBasic(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeTypeConversion(b *testing.B) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + var resultStrict TypeConversionResult + for i := 0; i < b.N; i++ { + Decode(input, &resultStrict) + } +} + +func Benchmark_DecodeMap(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeMapOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSlice(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + var result Slice + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSliceOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeWeaklyTypedInput(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadata(b *testing.B) { + type Person struct { + Name string + Age int + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadataEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + b.Fatalf("err: %s", err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeTagged(b *testing.B) { + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go new file mode 100644 index 000000000..7054f1ac9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go @@ -0,0 +1,47 @@ +package mapstructure + +import "testing" + +// GH-1 +func TestDecode_NilValue(t *testing.T) { + input := map[string]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} + +// GH-10 +func TestDecode_mapInterfaceInterface(t *testing.T) { + input := map[interface{}]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go new file mode 100644 index 000000000..f17c214a8 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go @@ -0,0 +1,203 @@ +package mapstructure + +import ( + "fmt" +) + +func ExampleDecode() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}} +} + +func ExampleDecode_errors() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": 123, + "age": "bad value", + "emails": []int{1, 2, 3}, + } + + var result Person + err := Decode(input, &result) + if err == nil { + panic("should have an error") + } + + fmt.Println(err.Error()) + // Output: + // 5 error(s) decoding: + // + // * 'Age' expected type 'int', got unconvertible type 'string' + // * 'Emails[0]' expected type 'string', got unconvertible type 'int' + // * 'Emails[1]' expected type 'string', got unconvertible type 'int' + // * 'Emails[2]' expected type 'string', got unconvertible type 'int' + // * 'Name' expected type 'string', got unconvertible type 'int' +} + +func ExampleDecode_metadata() { + type Person struct { + Name string + Age int + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + // For metadata, we make a more advanced DecoderConfig so we can + // more finely configure the decoder that is used. In this case, we + // just tell the decoder we want to track metadata. + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + if err := decoder.Decode(input); err != nil { + panic(err) + } + + fmt.Printf("Unused keys: %#v", md.Unused) + // Output: + // Unused keys: []string{"email"} +} + +func ExampleDecode_weaklyTypedInput() { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + err = decoder.Decode(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}} +} + +func ExampleDecode_tags() { + // Note that the mapstructure tags defined in the struct type + // can indicate which fields the values are mapped to. + type Person struct { + Name string `mapstructure:"person_name"` + Age int `mapstructure:"person_age"` + } + + input := map[string]interface{}{ + "person_name": "Mitchell", + "person_age": 91, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91} +} + +func ExampleDecode_embeddedStruct() { + // Squashing multiple embedded structs is allowed using the squash tag. + // This is demonstrated by creating a composite struct of multiple types + // and decoding into it. In this case, a person can carry with it both + // a Family and a Location, as well as their own FirstName. + type Family struct { + LastName string + } + type Location struct { + City string + } + type Person struct { + Family `mapstructure:",squash"` + Location `mapstructure:",squash"` + FirstName string + } + + input := map[string]interface{}{ + "FirstName": "Mitchell", + "LastName": "Hashimoto", + "City": "San Francisco", + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City) + // Output: + // Mitchell Hashimoto, San Francisco +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go new file mode 100644 index 000000000..8a27647b5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -0,0 +1,999 @@ +package mapstructure + +import ( + "reflect" + "sort" + "testing" +) + +type Basic struct { + Vstring string + Vint int + Vuint uint + Vbool bool + Vfloat float64 + Vextra string + vsilent bool + Vdata interface{} +} + +type BasicSquash struct { + Test Basic `mapstructure:",squash"` +} + +type Embedded struct { + Basic + Vunique string +} + +type EmbeddedPointer struct { + *Basic + Vunique string +} + +type EmbeddedSquash struct { + Basic `mapstructure:",squash"` + Vunique string +} + +type Map struct { + Vfoo string + Vother map[string]string +} + +type MapOfStruct struct { + Value map[string]Basic +} + +type Nested struct { + Vfoo string + Vbar Basic +} + +type NestedPointer struct { + Vfoo string + Vbar *Basic +} + +type Slice struct { + Vfoo string + Vbar []string +} + +type SliceOfStruct struct { + Value []Basic +} + +type Tagged struct { + Extra string `mapstructure:"bar,what,what"` + Value string `mapstructure:"foo"` +} + +type TypeConversionResult struct { + IntToFloat float32 + IntToUint uint + IntToBool bool + IntToString string + UintToInt int + UintToFloat float32 + UintToBool bool + UintToString string + BoolToInt int + BoolToUint uint + BoolToFloat float32 + BoolToString string + FloatToInt int + FloatToUint uint + FloatToBool bool + FloatToString string + SliceUint8ToString string + StringToInt int + StringToUint uint + StringToBool bool + StringToFloat float32 + SliceToMap map[string]interface{} + MapToSlice []interface{} +} + +func TestBasicTypes(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Errorf("got an err: %s", err.Error()) + t.FailNow() + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vint) + } + + if result.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vuint) + } + + if result.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbool) + } + + if result.Vfloat != 42.42 { + t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat) + } + + if result.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vextra) + } + + if result.vsilent != false { + t.Error("vsilent should not be set, it is unexported") + } + + if result.Vdata != 42 { + t.Error("vdata should be valid") + } +} + +func TestBasic_IntWithFloat(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": float64(42), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } +} + +func TestBasic_Merge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": 42, + } + + var result Basic + result.Vuint = 100 + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + expected := Basic{ + Vint: 42, + Vuint: 100, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestDecode_BasicSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + } + + var result BasicSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Test.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Test.Vstring) + } +} + +func TestDecode_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "innerfoo" { + t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedPointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result EmbeddedPointer + err := Decode(input, &result) + if err == nil { + t.Fatal("should get error") + } +} + +func TestDecode_EmbeddedSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var result EmbeddedSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_DecodeHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) { + if from == reflect.String && to != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_DecodeHookType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) { + if from.Kind() == reflect.String && + to.Kind() != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_Nil(t *testing.T) { + t.Parallel() + + var input interface{} = nil + result := Basic{ + Vstring: "foo", + } + + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result.Vstring != "foo" { + t.Fatalf("bad: %#v", result.Vstring) + } +} + +func TestDecode_NonStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + + var result map[string]string + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["foo"] != "bar" { + t.Fatal("foo is not bar") + } +} + +func TestDecode_StructMatch(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vbar": Basic{ + Vstring: "foo", + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("bad: %#v", result) + } +} + +func TestDecode_TypeConversion(t *testing.T) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "SliceUint8ToString": []uint8("foo"), + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + expectedResultStrict := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + UintToInt: 42, + UintToFloat: 42, + BoolToInt: 0, + BoolToUint: 0, + BoolToFloat: 0, + FloatToInt: 42, + FloatToUint: 42, + } + + expectedResultWeak := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + IntToBool: true, + IntToString: "42", + UintToInt: 42, + UintToFloat: 42, + UintToBool: true, + UintToString: "42", + BoolToInt: 1, + BoolToUint: 1, + BoolToFloat: 1, + BoolToString: "1", + FloatToInt: 42, + FloatToUint: 42, + FloatToBool: true, + FloatToString: "42.42", + SliceUint8ToString: "foo", + StringToInt: 42, + StringToUint: 42, + StringToBool: true, + StringToFloat: 42.42, + SliceToMap: map[string]interface{}{}, + MapToSlice: []interface{}{}, + } + + // Test strict type conversion + var resultStrict TypeConversionResult + err := Decode(input, &resultStrict) + if err == nil { + t.Errorf("should return an error") + } + if !reflect.DeepEqual(resultStrict, expectedResultStrict) { + t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict) + } + + // Test weak type conversion + var decoder *Decoder + var resultWeak TypeConversionResult + + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &resultWeak, + } + + decoder, err = NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if !reflect.DeepEqual(resultWeak, expectedResultWeak) { + t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak) + } +} + +func TestDecoder_ErrorUnused(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "hello", + "foo": "bar", + } + + var result Basic + config := &DecoderConfig{ + ErrorUnused: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err == nil { + t.Fatal("expected error") + } +} + +func TestMap(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vother == nil { + t.Fatal("vother should not be nil") + } + + if len(result.Vother) != 2 { + t.Error("vother should have two items") + } + + if result.Vother["foo"] != "foo" { + t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"]) + } + + if result.Vother["bar"] != "bar" { + t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"]) + } +} + +func TestMapMerge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + result.Vother = map[string]string{"hello": "world"} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + expected := map[string]string{ + "foo": "foo", + "bar": "bar", + "hello": "world", + } + if !reflect.DeepEqual(result.Vother, expected) { + t.Errorf("bad: %#v", result.Vother) + } +} + +func TestMapOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Value == nil { + t.Fatal("value should not be nil") + } + + if len(result.Value) != 2 { + t.Error("value should have two items") + } + + if result.Value["foo"].Vstring != "one" { + t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring) + } + + if result.Value["bar"].Vstring != "two" { + t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring) + } +} + +func TestNestedType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestNestedTypePointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result NestedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestSlice(t *testing.T) { + t.Parallel() + + inputStringSlice := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + inputStringSlicePointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[]string{"foo", "bar", "baz"}, + } + + outputStringSlice := &Slice{ + "foo", + []string{"foo", "bar", "baz"}, + } + + testSliceInput(t, inputStringSlice, outputStringSlice) + testSliceInput(t, inputStringSlicePointer, outputStringSlice) +} + +func TestInvalidSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Slice{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestSliceOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestInvalidType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": 42, + } + + var result Basic + err := Decode(input, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok := err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegIntUint := map[string]interface{}{ + "vuint": -42, + } + + err = Decode(inputNegIntUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegFloatUint := map[string]interface{}{ + "vuint": -42.0, + } + + err = Decode(inputNegFloatUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestMetadata_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vstring", "Vunique"} + + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestNonPtrValue(t *testing.T) { + t.Parallel() + + err := Decode(map[string]interface{}{}, Basic{}) + if err == nil { + t.Fatal("error should exist") + } + + if err.Error() != "result must be a pointer" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestTagged(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + err := Decode(input, &result) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if result.Value != "bar" { + t.Errorf("value should be 'bar', got: %#v", result.Value) + } + + if result.Extra != "value" { + t.Errorf("extra should be 'value', got: %#v", result.Extra) + } +} + +func TestWeakDecode(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + } + + var result struct { + Foo int + Bar string + } + + if err := WeakDecode(input, &result); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } +} + +func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { + var result Slice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == nil { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} diff --git a/vendor/github.com/mitchellh/packer/common/uuid/uuid.go b/vendor/github.com/mitchellh/packer/common/uuid/uuid.go new file mode 100644 index 000000000..d8b9830be --- /dev/null +++ b/vendor/github.com/mitchellh/packer/common/uuid/uuid.go @@ -0,0 +1,24 @@ +package uuid + +import ( + "crypto/rand" + "fmt" + "time" +) + +// Generates a time ordered UUID. Top 32 bits are a timestamp, +// bottom 96 are random. +func TimeOrderedUUID() string { + unix := uint32(time.Now().UTC().Unix()) + + b := make([]byte, 12) + n, err := rand.Read(b) + if n != len(b) { + err = fmt.Errorf("Not enough entropy available") + } + if err != nil { + panic(err) + } + return fmt.Sprintf("%08x-%04x-%04x-%04x-%04x%08x", + unix, b[0:2], b[2:4], b[4:6], b[6:8], b[8:]) +} diff --git a/vendor/github.com/mitchellh/packer/common/uuid/uuid_test.go b/vendor/github.com/mitchellh/packer/common/uuid/uuid_test.go new file mode 100644 index 000000000..8a853f1be --- /dev/null +++ b/vendor/github.com/mitchellh/packer/common/uuid/uuid_test.go @@ -0,0 +1,12 @@ +package uuid + +import ( + "testing" +) + +func TestTimeOrderedUuid(t *testing.T) { + uuid := TimeOrderedUUID() + if len(uuid) != 36 { + t.Fatalf("bad: %s", uuid) + } +} diff --git a/vendor/github.com/mitchellh/panicwrap/LICENSE b/vendor/github.com/mitchellh/panicwrap/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/panicwrap/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/panicwrap/README.md b/vendor/github.com/mitchellh/panicwrap/README.md new file mode 100644 index 000000000..b48feae87 --- /dev/null +++ b/vendor/github.com/mitchellh/panicwrap/README.md @@ -0,0 +1,108 @@ +# panicwrap + +panicwrap is a Go library that re-executes a Go binary and monitors stderr +output from the binary for a panic. When it finds a panic, it executes a +user-defined handler function. Stdout, stderr, stdin, signals, and exit +codes continue to work as normal, making the existence of panicwrap mostly +invisible to the end user until a panic actually occurs. + +Since a panic is truly a bug in the program meant to crash the runtime, +globally catching panics within Go applications is not supposed to be possible. +Despite this, it is often useful to have a way to know when panics occur. +panicwrap allows you to do something with these panics, such as writing them +to a file, so that you can track when panics occur. + +panicwrap is ***not a panic recovery system***. Panics indicate serious +problems with your application and _should_ crash the runtime. panicwrap +is just meant as a way to monitor for panics. If you still think this is +the worst idea ever, read the section below on why. + +## Features + +* **SIMPLE!** +* Works with all Go applications on all platforms Go supports +* Custom behavior when a panic occurs +* Stdout, stderr, stdin, exit codes, and signals continue to work as + expected. + +## Usage + +Using panicwrap is simple. It behaves a lot like `fork`, if you know +how that works. A basic example is shown below. + +Because it would be sad to panic while capturing a panic, it is recommended +that the handler functions for panicwrap remain relatively simple and well +tested. panicwrap itself contains many tests. + +```go +package main + +import ( + "fmt" + "github.com/mitchellh/panicwrap" + "os" +) + +func main() { + exitStatus, err := panicwrap.BasicWrap(panicHandler) + if err != nil { + // Something went wrong setting up the panic wrapper. Unlikely, + // but possible. + panic(err) + } + + // If exitStatus >= 0, then we're the parent process and the panicwrap + // re-executed ourselves and completed. Just exit with the proper status. + if exitStatus >= 0 { + os.Exit(exitStatus) + } + + // Otherwise, exitStatus < 0 means we're the child. Continue executing as + // normal... + + // Let's say we panic + panic("oh shucks") +} + +func panicHandler(output string) { + // output contains the full output (including stack traces) of the + // panic. Put it in a file or something. + fmt.Printf("The child panicked:\n\n%s\n", output) + os.Exit(1) +} +``` + +## How Does it Work? + +panicwrap works by re-executing the running program (retaining arguments, +environmental variables, etc.) and monitoring the stderr of the program. +Since Go always outputs panics in a predictable way with a predictable +exit code, panicwrap is able to reliably detect panics and allow the parent +process to handle them. + +## WHY?! Panics should CRASH! + +Yes, panics _should_ crash. They are 100% always indicative of bugs and having +information on a production server or application as to what caused the panic is critical. + +### User Facing + +In user-facing programs (programs like +[Packer](http://github.com/mitchellh/packer) or +[Docker](http://github.com/dotcloud/docker)), it is up to the user to +report such panics. This is unreliable, at best, and it would be better if the +program could have a way to automatically report panics. panicwrap provides +a way to do this. + +### Server + +For backend applications, it is easier to detect crashes (since the application exits) +and having an idea as to why the crash occurs is equally important; +particularly on a production server. + +At [HashiCorp](http://www.hashicorp.com), +we use panicwrap to log panics to timestamped files with some additional +data (configuration settings at the time, environmental variables, etc.) + +The goal of panicwrap is _not_ to hide panics. It is instead to provide +a clean mechanism for capturing them and ultimately crashing. diff --git a/vendor/github.com/mitchellh/panicwrap/panicwrap.go b/vendor/github.com/mitchellh/panicwrap/panicwrap.go new file mode 100644 index 000000000..11eafe712 --- /dev/null +++ b/vendor/github.com/mitchellh/panicwrap/panicwrap.go @@ -0,0 +1,309 @@ +// The panicwrap package provides functions for capturing and handling +// panics in your application. It does this by re-executing the running +// application and monitoring stderr for any panics. At the same time, +// stdout/stderr/etc. are set to the same values so that data is shuttled +// through properly, making the existence of panicwrap mostly transparent. +// +// Panics are only detected when the subprocess exits with a non-zero +// exit status, since this is the only time panics are real. Otherwise, +// "panic-like" output is ignored. +package panicwrap + +import ( + "bytes" + "errors" + "github.com/kardianos/osext" + "io" + "os" + "os/exec" + "os/signal" + "syscall" + "time" +) + +const ( + DEFAULT_COOKIE_KEY = "cccf35992f8f3cd8d1d28f0109dd953e26664531" + DEFAULT_COOKIE_VAL = "7c28215aca87789f95b406b8dd91aa5198406750" +) + +// HandlerFunc is the type called when a panic is detected. +type HandlerFunc func(string) + +// WrapConfig is the configuration for panicwrap when wrapping an existing +// binary. To get started, in general, you only need the BasicWrap function +// that will set this up for you. However, for more customizability, +// WrapConfig and Wrap can be used. +type WrapConfig struct { + // Handler is the function called when a panic occurs. + Handler HandlerFunc + + // The cookie key and value are used within environmental variables + // to tell the child process that it is already executing so that + // wrap doesn't re-wrap itself. + CookieKey string + CookieValue string + + // If true, the panic will not be mirrored to the configured writer + // and will instead ONLY go to the handler. This lets you effectively + // hide panics from the end user. This is not recommended because if + // your handler fails, the panic is effectively lost. + HidePanic bool + + // The amount of time that a process must exit within after detecting + // a panic header for panicwrap to assume it is a panic. Defaults to + // 300 milliseconds. + DetectDuration time.Duration + + // The writer to send the stderr to. If this is nil, then it defaults + // to os.Stderr. + Writer io.Writer + + // The writer to send stdout to. If this is nil, then it defaults to + // os.Stdout. + Stdout io.Writer +} + +// BasicWrap calls Wrap with the given handler function, using defaults +// for everything else. See Wrap and WrapConfig for more information on +// functionality and return values. +func BasicWrap(f HandlerFunc) (int, error) { + return Wrap(&WrapConfig{ + Handler: f, + }) +} + +// Wrap wraps the current executable in a handler to catch panics. It +// returns an error if there was an error during the wrapping process. +// If the error is nil, then the int result indicates the exit status of the +// child process. If the exit status is -1, then this is the child process, +// and execution should continue as normal. Otherwise, this is the parent +// process and the child successfully ran already, and you should exit the +// process with the returned exit status. +// +// This function should be called very very early in your program's execution. +// Ideally, this runs as the first line of code of main. +// +// Once this is called, the given WrapConfig shouldn't be modified or used +// any further. +func Wrap(c *WrapConfig) (int, error) { + if c.Handler == nil { + return -1, errors.New("Handler must be set") + } + + if c.DetectDuration == 0 { + c.DetectDuration = 300 * time.Millisecond + } + + if c.Writer == nil { + c.Writer = os.Stderr + } + + // If we're already wrapped, exit out. + if Wrapped(c) { + return -1, nil + } + + // Get the path to our current executable + exePath, err := osext.Executable() + if err != nil { + return -1, err + } + + // Pipe the stderr so we can read all the data as we look for panics + stderr_r, stderr_w := io.Pipe() + + // doneCh is closed when we're done, signaling any other goroutines + // to end immediately. + doneCh := make(chan struct{}) + + // panicCh is the channel on which the panic text will actually be + // sent. + panicCh := make(chan string) + + // On close, make sure to finish off the copying of data to stderr + defer func() { + defer close(doneCh) + stderr_w.Close() + <-panicCh + }() + + // Start the goroutine that will watch stderr for any panics + go trackPanic(stderr_r, c.Writer, c.DetectDuration, panicCh) + + // Create the writer for stdout that we're going to use + var stdout_w io.Writer = os.Stdout + if c.Stdout != nil { + stdout_w = c.Stdout + } + + // Build a subcommand to re-execute ourselves. We make sure to + // set the environmental variable to include our cookie. We also + // set stdin/stdout to match the config. Finally, we pipe stderr + // through ourselves in order to watch for panics. + cmd := exec.Command(exePath, os.Args[1:]...) + cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue) + cmd.Stdin = os.Stdin + cmd.Stdout = stdout_w + cmd.Stderr = stderr_w + if err := cmd.Start(); err != nil { + return 1, err + } + + // Listen to signals and capture them forever. We allow the child + // process to handle them in some way. + sigCh := make(chan os.Signal) + signal.Notify(sigCh, os.Interrupt) + go func() { + defer signal.Stop(sigCh) + for { + select { + case <-doneCh: + return + case <-sigCh: + } + } + }() + + if err := cmd.Wait(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // This is some other kind of subprocessing error. + return 1, err + } + + exitStatus := 1 + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = status.ExitStatus() + } + + // Close the writer end so that the tracker goroutine ends at some point + stderr_w.Close() + + // Wait on the panic data + panicTxt := <-panicCh + if panicTxt != "" { + if !c.HidePanic { + c.Writer.Write([]byte(panicTxt)) + } + + c.Handler(panicTxt) + } + + return exitStatus, nil + } + + return 0, nil +} + +// Wrapped checks if we're already wrapped according to the configuration +// given. +// +// Wrapped is very cheap and can be used early to short-circuit some pre-wrap +// logic your application may have. +func Wrapped(c *WrapConfig) bool { + if c.CookieKey == "" { + c.CookieKey = DEFAULT_COOKIE_KEY + } + + if c.CookieValue == "" { + c.CookieValue = DEFAULT_COOKIE_VAL + } + + // If the cookie key/value match our environment, then we are the + // child, so just exit now and tell the caller that we're the child + return os.Getenv(c.CookieKey) == c.CookieValue +} + +// trackPanic monitors the given reader for a panic. If a panic is detected, +// it is outputted on the result channel. This will close the channel once +// it is complete. +func trackPanic(r io.Reader, w io.Writer, dur time.Duration, result chan<- string) { + defer close(result) + + var panicTimer <-chan time.Time + panicBuf := new(bytes.Buffer) + panicHeaders := [][]byte{ + []byte("panic:"), + []byte("fatal error: fault"), + } + panicType := -1 + + tempBuf := make([]byte, 2048) + for { + var buf []byte + var n int + + if panicTimer == nil && panicBuf.Len() > 0 { + // We're not tracking a panic but the buffer length is + // greater than 0. We need to clear out that buffer, but + // look for another panic along the way. + + // First, remove the previous panic header so we don't loop + w.Write(panicBuf.Next(len(panicHeaders[panicType]))) + + // Next, assume that this is our new buffer to inspect + n = panicBuf.Len() + buf = make([]byte, n) + copy(buf, panicBuf.Bytes()) + panicBuf.Reset() + } else { + var err error + buf = tempBuf + n, err = r.Read(buf) + if n <= 0 && err == io.EOF { + if panicBuf.Len() > 0 { + // We were tracking a panic, assume it was a panic + // and return that as the result. + result <- panicBuf.String() + } + + return + } + } + + if panicTimer != nil { + // We're tracking what we think is a panic right now. + // If the timer ended, then it is not a panic. + isPanic := true + select { + case <-panicTimer: + isPanic = false + default: + } + + // No matter what, buffer the text some more. + panicBuf.Write(buf[0:n]) + + if !isPanic { + // It isn't a panic, stop tracking. Clean-up will happen + // on the next iteration. + panicTimer = nil + } + + continue + } + + panicType = -1 + flushIdx := n + for i, header := range panicHeaders { + idx := bytes.Index(buf[0:n], header) + if idx >= 0 { + panicType = i + flushIdx = idx + break + } + } + + // Flush to stderr what isn't a panic + w.Write(buf[0:flushIdx]) + + if panicType == -1 { + // Not a panic so just continue along + continue + } + + // We have a panic header. Write we assume is a panic os far. + panicBuf.Write(buf[flushIdx:n]) + panicTimer = time.After(dur) + } +} diff --git a/vendor/github.com/mitchellh/panicwrap/panicwrap_test.go b/vendor/github.com/mitchellh/panicwrap/panicwrap_test.go new file mode 100644 index 000000000..d8804b3e9 --- /dev/null +++ b/vendor/github.com/mitchellh/panicwrap/panicwrap_test.go @@ -0,0 +1,325 @@ +package panicwrap + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "regexp" + "strings" + "testing" + "time" +) + +var wrapRe = regexp.MustCompile(`wrapped: \d{3,4}`) + +func helperProcess(s ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--"} + cs = append(cs, s...) + env := []string{ + "GO_WANT_HELPER_PROCESS=1", + } + + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = append(env, os.Environ()...) + cmd.Stdin = os.Stdin + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + return cmd +} + +// This is executed by `helperProcess` in a separate process in order to +// provider a proper sub-process environment to test some of our functionality. +func TestHelperProcess(*testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + // Find the arguments to our helper, which are the arguments past + // the "--" in the command line. + args := os.Args + for len(args) > 0 { + if args[0] == "--" { + args = args[1:] + break + } + + args = args[1:] + } + + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "No command\n") + os.Exit(2) + } + + panicHandler := func(s string) { + fmt.Fprintf(os.Stdout, "wrapped: %d", len(s)) + os.Exit(0) + } + + cmd, args := args[0], args[1:] + switch cmd { + case "no-panic-ordered-output": + exitStatus, err := BasicWrap(panicHandler) + if err != nil { + fmt.Fprintf(os.Stderr, "wrap error: %s", err) + os.Exit(1) + } + + if exitStatus < 0 { + for i := 0; i < 1000; i++ { + os.Stdout.Write([]byte("a")) + os.Stderr.Write([]byte("b")) + } + os.Exit(0) + } + + os.Exit(exitStatus) + case "no-panic-output": + fmt.Fprint(os.Stdout, "i am output") + fmt.Fprint(os.Stderr, "stderr out") + os.Exit(0) + case "panic-boundary": + exitStatus, err := BasicWrap(panicHandler) + + if err != nil { + fmt.Fprintf(os.Stderr, "wrap error: %s", err) + os.Exit(1) + } + + if exitStatus < 0 { + // Simulate a panic but on two boundaries... + fmt.Fprint(os.Stderr, "pan") + os.Stderr.Sync() + time.Sleep(100 * time.Millisecond) + fmt.Fprint(os.Stderr, "ic: oh crap") + os.Exit(2) + } + + os.Exit(exitStatus) + case "panic-long": + exitStatus, err := BasicWrap(panicHandler) + + if err != nil { + fmt.Fprintf(os.Stderr, "wrap error: %s", err) + os.Exit(1) + } + + if exitStatus < 0 { + // Make a fake panic by faking the header and adding a + // bunch of garbage. + fmt.Fprint(os.Stderr, "panic: foo\n\n") + for i := 0; i < 1024; i++ { + fmt.Fprint(os.Stderr, "foobarbaz") + } + + // Sleep so that it dumps the previous data + //time.Sleep(1 * time.Millisecond) + time.Sleep(500 * time.Millisecond) + + // Make a real panic + panic("I AM REAL!") + } + + os.Exit(exitStatus) + case "panic": + hidePanic := false + if args[0] == "hide" { + hidePanic = true + } + + config := &WrapConfig{ + Handler: panicHandler, + HidePanic: hidePanic, + } + + exitStatus, err := Wrap(config) + + if err != nil { + fmt.Fprintf(os.Stderr, "wrap error: %s", err) + os.Exit(1) + } + + if exitStatus < 0 { + panic("uh oh") + } + + os.Exit(exitStatus) + case "wrapped": + child := false + if len(args) > 0 && args[0] == "child" { + child = true + } + config := &WrapConfig{ + Handler: panicHandler, + } + + exitStatus, err := Wrap(config) + if err != nil { + fmt.Fprintf(os.Stderr, "wrap error: %s", err) + os.Exit(1) + } + + if exitStatus < 0 { + if child { + fmt.Printf("%v", Wrapped(config)) + } + os.Exit(0) + } + + if !child { + fmt.Printf("%v", Wrapped(config)) + } + os.Exit(exitStatus) + default: + fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) + os.Exit(2) + } +} + +func TestPanicWrap_Output(t *testing.T) { + stderr := new(bytes.Buffer) + stdout := new(bytes.Buffer) + + p := helperProcess("no-panic-output") + p.Stdout = stdout + p.Stderr = stderr + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + if !strings.Contains(stdout.String(), "i am output") { + t.Fatalf("didn't forward: %#v", stdout.String()) + } + + if !strings.Contains(stderr.String(), "stderr out") { + t.Fatalf("didn't forward: %#v", stderr.String()) + } +} + +/* +TODO(mitchellh): This property would be nice to gain. +func TestPanicWrap_Output_Order(t *testing.T) { + output := new(bytes.Buffer) + + p := helperProcess("no-panic-ordered-output") + p.Stdout = output + p.Stderr = output + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + expectedBuf := new(bytes.Buffer) + for i := 0; i < 1000; i++ { + expectedBuf.WriteString("ab") + } + + actual := strings.TrimSpace(output.String()) + expected := strings.TrimSpace(expectedBuf.String()) + + if actual != expected { + t.Fatalf("bad: %#v", actual) + } +} +*/ + +func TestPanicWrap_panicHide(t *testing.T) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + + p := helperProcess("panic", "hide") + p.Stdout = stdout + p.Stderr = stderr + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + if wrapRe.FindString(stdout.String()) == "" { + t.Fatalf("didn't wrap: %#v", stdout.String()) + } + + if strings.Contains(stderr.String(), "panic:") { + t.Fatalf("shouldn't have panic: %#v", stderr.String()) + } +} + +func TestPanicWrap_panicShow(t *testing.T) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + + p := helperProcess("panic", "show") + p.Stdout = stdout + p.Stderr = stderr + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + if wrapRe.FindString(stdout.String()) == "" { + t.Fatalf("didn't wrap: %#v", stdout.String()) + } + + if !strings.Contains(stderr.String(), "panic:") { + t.Fatalf("should have panic: %#v", stderr.String()) + } +} + +func TestPanicWrap_panicLong(t *testing.T) { + stdout := new(bytes.Buffer) + + p := helperProcess("panic-long") + p.Stdout = stdout + p.Stderr = new(bytes.Buffer) + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + if wrapRe.FindString(stdout.String()) == "" { + t.Fatalf("didn't wrap: %#v", stdout.String()) + } +} + +func TestPanicWrap_panicBoundary(t *testing.T) { + // TODO(mitchellh): panics are currently lost on boundaries + t.SkipNow() + + stdout := new(bytes.Buffer) + + p := helperProcess("panic-boundary") + p.Stdout = stdout + //p.Stderr = new(bytes.Buffer) + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + if wrapRe.FindString(stdout.String()) == "" { + t.Fatalf("didn't wrap: %#v", stdout.String()) + } +} + +func TestWrapped(t *testing.T) { + stdout := new(bytes.Buffer) + + p := helperProcess("wrapped", "child") + p.Stdout = stdout + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + if !strings.Contains(stdout.String(), "true") { + t.Fatalf("bad: %#v", stdout.String()) + } +} + +func TestWrapped_parent(t *testing.T) { + stdout := new(bytes.Buffer) + + p := helperProcess("wrapped") + p.Stdout = stdout + if err := p.Run(); err != nil { + t.Fatalf("err: %s", err) + } + + if !strings.Contains(stdout.String(), "false") { + t.Fatalf("bad: %#v", stdout.String()) + } +} diff --git a/vendor/github.com/mitchellh/prefixedio/LICENSE b/vendor/github.com/mitchellh/prefixedio/LICENSE new file mode 100644 index 000000000..35827a58c --- /dev/null +++ b/vendor/github.com/mitchellh/prefixedio/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/prefixedio/README.md b/vendor/github.com/mitchellh/prefixedio/README.md new file mode 100644 index 000000000..1c8636020 --- /dev/null +++ b/vendor/github.com/mitchellh/prefixedio/README.md @@ -0,0 +1,30 @@ +# prefixedio + +`prefixedio` (Golang package: `prefixedio`) is a package for Go +that takes an `io.Reader` and de-multiplexes line-oriented data based +on a line prefix to a set of readers. + +## Installation and Usage + +Install using `go get github.com/mitchellh/prefixedio`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/prefixedio + +Below is an example of its usage ignoring errors: + +```go +// Assume r is some set io.Reader. Perhaps a file, network, anything. +var r io.Reader + +// Initialize the prefixed reader +pr, _ := prefixedio.NewReader(r) + +// Grab readers for a couple prefixes +errR, _ := pr.Prefix("err: ") +outR, _ := pr.Prefix("out: ") + +// Copy the data to different places based on the prefix +go io.Copy(os.Stderr, errR) +go io.Copy(os.Stdout, outR) +``` diff --git a/vendor/github.com/mitchellh/prefixedio/reader.go b/vendor/github.com/mitchellh/prefixedio/reader.go new file mode 100644 index 000000000..82266a9cf --- /dev/null +++ b/vendor/github.com/mitchellh/prefixedio/reader.go @@ -0,0 +1,220 @@ +package prefixedio + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "sync" + "time" +) + +// Reader reads from another io.Reader and de-multiplexes line-oriented +// data into different io.Reader streams. +// +// Lines are delimited with the '\n' character. +// +// When `Read` is called, any data that doesn't currently have a prefix +// registered will be discarded. Data won't start being discarded until +// the first Read is called on a prefix. Once the first Read is called, +// data is read until EOF. Therefore, be sure to request all prefix +// readers before issuing any Read calls on any prefixes. +// +// Reads will block if all the readers aren't routinely draining their +// buffers. Therefore, be sure to be actively reading from all registered +// prefixes, otherwise you can encounter deadlock scenarios. +type Reader struct { + FlushTimeout time.Duration + + done bool + prefixes map[string]*io.PipeWriter + r io.Reader + + l sync.Mutex + once sync.Once +} + +// NewReader creates a new Reader with the given io.Reader. +func NewReader(r io.Reader) (*Reader, error) { + if r == nil { + return nil, errors.New("Reader must not be nil") + } + + return &Reader{r: r}, nil +} + +// Prefix returns a new io.Reader that will read data that +// is prefixed with the given prefix. +// +// The read data is line-oriented so calling Read will result +// in a full line of output (including the line separator), +// but is exposed as an io.Reader for useful utility interoperating +// with other Go libraries. +// +// The data read has the prefix stripped, but contains the line +// delimiter. +// +// An empty prefix "" will read the data before any other prefix match is +// found, allowing you to have a default reader before a prefix is matched. +func (r *Reader) Prefix(p string) (io.Reader, error) { + r.l.Lock() + defer r.l.Unlock() + + if r.prefixes == nil { + r.prefixes = make(map[string]*io.PipeWriter) + } + + if _, ok := r.prefixes[p]; ok { + return nil, fmt.Errorf("Prefix already registered: %s", p) + } + + pr, pw := io.Pipe() + r.prefixes[p] = pw + + if r.done { + pw.Close() + } + + return &prefixReader{ + r: r, + pr: pr, + }, nil +} + +// init starts the goroutine that reads from the underlying reader +// and sends data to the proper place. +// +// This is safe to call multiple times. +func (r *Reader) init() { + r.once.Do(func() { + go r.read() + }) +} + +// read runs in a goroutine and performs a read on the reader, +// dispatching lines to prefixes where necessary. +func (r *Reader) read() { + var err error + var lastPrefix string + buf := bufio.NewReader(r.r) + + // Listen for bytes in a goroutine. We do this so that if we're blocking + // we can flush the bytes we have after some configured time. There is + // probably a way to make this a lot faster but this works for now. + byteCh := make(chan byte) + doneCh := make(chan error) + go func() { + defer close(doneCh) + for { + b, err := buf.ReadByte() + if err != nil { + doneCh <- err + return + } + + byteCh <- b + } + }() + + // Figure out the timeout we wait until we flush if we see no data + ft := r.FlushTimeout + if ft == 0 { + ft = 100 * time.Millisecond + } + + lineBuf := make([]byte, 0, 80) + for { + line := lineBuf[0:0] + for { + brk := false + + select { + case b := <-byteCh: + line = append(line, b) + brk = b == '\n' + case err = <-doneCh: + brk = true + case <-time.After(ft): + brk = true + } + + if brk { + break + } + } + + // If an error occurred and its not an EOF, then report that + // error to all pipes and exit. + if err != nil && err != io.EOF { + break + } + + // Go through each prefix and write if the line matches. + // If no lines match, the data is lost. + var prefix string + r.l.Lock() + for p, _ := range r.prefixes { + if p == "" { + continue + } + + if bytes.HasPrefix(line, []byte(p)) { + prefix = p + line = line[len(p):] + break + } + } + + if prefix == "" { + prefix = lastPrefix + } + + pw, ok := r.prefixes[prefix] + if ok { + lastPrefix = prefix + + // Make sure we write all the data before we exit. + n := 0 + for n < len(line) { + ni, err := pw.Write(line[n:]) + if err != nil { + break + } + + n += ni + } + } + r.l.Unlock() + + if err == io.EOF { + break + } + } + + r.l.Lock() + defer r.l.Unlock() + + // Mark us done so that we don't create anymore readers + r.done = true + + // All previous writers should be closed so that the readers + // properly return an EOF (or another error if we had one) + for _, pw := range r.prefixes { + if err != nil && err != io.EOF { + pw.CloseWithError(err) + } else { + pw.Close() + } + } +} + +type prefixReader struct { + r *Reader + pr io.Reader +} + +func (r *prefixReader) Read(p []byte) (int, error) { + r.r.init() + return r.pr.Read(p) +} diff --git a/vendor/github.com/mitchellh/prefixedio/reader_test.go b/vendor/github.com/mitchellh/prefixedio/reader_test.go new file mode 100644 index 000000000..1489d5a77 --- /dev/null +++ b/vendor/github.com/mitchellh/prefixedio/reader_test.go @@ -0,0 +1,96 @@ +package prefixedio + +import ( + "bytes" + "io" + "strings" + "testing" +) + +func TestReader(t *testing.T) { + var fooBuf, barBuf, defBuf bytes.Buffer + + original := bytes.NewReader([]byte(strings.TrimSpace(testInput))) + r, err := NewReader(original) + if err != nil { + t.Fatalf("err: %s", err) + } + + pFoo, err := r.Prefix("foo: ") + if err != nil { + t.Fatalf("err: %s", err) + } + _, err = r.Prefix("foo: ") + if err == nil { + t.Fatalf("expected prefix already registered error") + } + pBar, err := r.Prefix("bar: ") + if err != nil { + t.Fatalf("err: %s", err) + } + pDefault, err := r.Prefix("") + if err != nil { + t.Fatalf("err: %s", err) + } + + doneCh := make(chan struct{}, 3) + go func() { + if _, err := io.Copy(&fooBuf, pFoo); err != nil { + t.Fatalf("err: %s", err) + } + doneCh <- struct{}{} + }() + go func() { + if _, err := io.Copy(&barBuf, pBar); err != nil { + t.Fatalf("err: %s", err) + } + doneCh <- struct{}{} + }() + go func() { + if _, err := io.Copy(&defBuf, pDefault); err != nil { + t.Fatalf("err: %s", err) + } + doneCh <- struct{}{} + }() + + // Wait for all the reads to be done + <-doneCh + <-doneCh + <-doneCh + + if fooBuf.String() != testInputFoo { + t.Fatalf("bad: %s", fooBuf.String()) + } + if barBuf.String() != testInputBar { + t.Fatalf("bad: %s", barBuf.String()) + } + if defBuf.String() != testInputDef { + t.Fatalf("bad: %s", defBuf.String()) + } +} + +const testInput = ` +what +hello +foo: 1 +bar: 2 +bar: 3 +42 +foo: 4 +foo: 5 +bar: 6 +` + +const testInputFoo = `1 +4 +5 +` + +const testInputBar = `2 +3 +42 +6` + +const testInputDef = `what +hello +` diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 000000000..ac82cd2e1 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 000000000..7c59d764c --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,17 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 000000000..d3cfe8545 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Location location.go; DO NOT EDIT + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} + +func (i Location) String() string { + if i+1 >= Location(len(_Location_index)) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 000000000..1f2066598 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,279 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + pointer := false + if v.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(v) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func() { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }() + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + if err := walk(kv, w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + if sw, ok := w.(StructWalker); ok { + if err = sw.Struct(v); err != nil { + return + } + } + + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk_test.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk_test.go new file mode 100644 index 000000000..4ec1066e7 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk_test.go @@ -0,0 +1,377 @@ +package reflectwalk + +import ( + "reflect" + "testing" +) + +type TestEnterExitWalker struct { + Locs []Location +} + +func (t *TestEnterExitWalker) Enter(l Location) error { + if t.Locs == nil { + t.Locs = make([]Location, 0, 5) + } + + t.Locs = append(t.Locs, l) + return nil +} + +func (t *TestEnterExitWalker) Exit(l Location) error { + t.Locs = append(t.Locs, l) + return nil +} + +type TestPointerWalker struct { + Ps []bool +} + +func (t *TestPointerWalker) PointerEnter(v bool) error { + t.Ps = append(t.Ps, v) + return nil +} + +func (t *TestPointerWalker) PointerExit(v bool) error { + return nil +} + +type TestPrimitiveWalker struct { + Value reflect.Value +} + +func (t *TestPrimitiveWalker) Primitive(v reflect.Value) error { + t.Value = v + return nil +} + +type TestPrimitiveCountWalker struct { + Count int +} + +func (t *TestPrimitiveCountWalker) Primitive(v reflect.Value) error { + t.Count += 1 + return nil +} + +type TestPrimitiveReplaceWalker struct { + Value reflect.Value +} + +func (t *TestPrimitiveReplaceWalker) Primitive(v reflect.Value) error { + v.Set(reflect.ValueOf("bar")) + return nil +} + +type TestMapWalker struct { + MapVal reflect.Value + Keys []string + Values []string +} + +func (t *TestMapWalker) Map(m reflect.Value) error { + t.MapVal = m + return nil +} + +func (t *TestMapWalker) MapElem(m, k, v reflect.Value) error { + if t.Keys == nil { + t.Keys = make([]string, 0, 1) + t.Values = make([]string, 0, 1) + } + + t.Keys = append(t.Keys, k.Interface().(string)) + t.Values = append(t.Values, v.Interface().(string)) + return nil +} + +type TestSliceWalker struct { + Count int + SliceVal reflect.Value +} + +func (t *TestSliceWalker) Slice(v reflect.Value) error { + t.SliceVal = v + return nil +} + +func (t *TestSliceWalker) SliceElem(int, reflect.Value) error { + t.Count++ + return nil +} + +type TestStructWalker struct { + Fields []string +} + +func (t *TestStructWalker) Struct(v reflect.Value) error { + return nil +} + +func (t *TestStructWalker) StructField(sf reflect.StructField, v reflect.Value) error { + if t.Fields == nil { + t.Fields = make([]string, 0, 1) + } + + t.Fields = append(t.Fields, sf.Name) + return nil +} + +func TestTestStructs(t *testing.T) { + var raw interface{} + raw = new(TestEnterExitWalker) + if _, ok := raw.(EnterExitWalker); !ok { + t.Fatal("EnterExitWalker is bad") + } + + raw = new(TestPrimitiveWalker) + if _, ok := raw.(PrimitiveWalker); !ok { + t.Fatal("PrimitiveWalker is bad") + } + + raw = new(TestMapWalker) + if _, ok := raw.(MapWalker); !ok { + t.Fatal("MapWalker is bad") + } + + raw = new(TestSliceWalker) + if _, ok := raw.(SliceWalker); !ok { + t.Fatal("SliceWalker is bad") + } + + raw = new(TestStructWalker) + if _, ok := raw.(StructWalker); !ok { + t.Fatal("StructWalker is bad") + } +} + +func TestWalk_Basic(t *testing.T) { + w := new(TestPrimitiveWalker) + + type S struct { + Foo string + } + + data := &S{ + Foo: "foo", + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if w.Value.Kind() != reflect.String { + t.Fatalf("bad: %#v", w.Value) + } +} + +func TestWalk_Basic_Replace(t *testing.T) { + w := new(TestPrimitiveReplaceWalker) + + type S struct { + Foo string + Bar []interface{} + } + + data := &S{ + Foo: "foo", + Bar: []interface{}{[]string{"what"}}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if data.Foo != "bar" { + t.Fatalf("bad: %#v", data.Foo) + } + if data.Bar[0].([]string)[0] != "bar" { + t.Fatalf("bad: %#v", data.Bar) + } +} + +func TestWalk_EnterExit(t *testing.T) { + w := new(TestEnterExitWalker) + + type S struct { + A string + M map[string]string + } + + data := &S{ + A: "foo", + M: map[string]string{ + "a": "b", + }, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []Location{ + WalkLoc, + Struct, + StructField, + StructField, + StructField, + Map, + MapKey, + MapKey, + MapValue, + MapValue, + Map, + StructField, + Struct, + WalkLoc, + } + if !reflect.DeepEqual(w.Locs, expected) { + t.Fatalf("Bad: %#v", w.Locs) + } +} + +func TestWalk_Interface(t *testing.T) { + w := new(TestPrimitiveCountWalker) + + type S struct { + Foo string + Bar []interface{} + } + + var data interface{} = &S{ + Foo: "foo", + Bar: []interface{}{[]string{"bar", "what"}, "baz"}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if w.Count != 4 { + t.Fatalf("bad: %#v", w.Count) + } +} + +func TestWalk_Interface_nil(t *testing.T) { + w := new(TestPrimitiveCountWalker) + + type S struct { + Bar interface{} + } + + var data interface{} = &S{} + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestWalk_Map(t *testing.T) { + w := new(TestMapWalker) + + type S struct { + Foo map[string]string + } + + data := &S{ + Foo: map[string]string{ + "foo": "foov", + "bar": "barv", + }, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(w.MapVal.Interface(), data.Foo) { + t.Fatalf("Bad: %#v", w.MapVal.Interface()) + } + + expectedK := []string{"foo", "bar"} + if !reflect.DeepEqual(w.Keys, expectedK) { + t.Fatalf("Bad keys: %#v", w.Keys) + } + + expectedV := []string{"foov", "barv"} + if !reflect.DeepEqual(w.Values, expectedV) { + t.Fatalf("Bad values: %#v", w.Values) + } +} + +func TestWalk_Pointer(t *testing.T) { + w := new(TestPointerWalker) + + type S struct { + Foo string + } + + data := &S{ + Foo: "foo", + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []bool{true, false} + if !reflect.DeepEqual(w.Ps, expected) { + t.Fatalf("bad: %#v", w.Ps) + } +} + +func TestWalk_Slice(t *testing.T) { + w := new(TestSliceWalker) + + type S struct { + Foo []string + } + + data := &S{ + Foo: []string{"a", "b", "c"}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(w.SliceVal.Interface(), data.Foo) { + t.Fatalf("bad: %#v", w.SliceVal.Interface()) + } + + if w.Count != 3 { + t.Fatalf("Bad count: %d", w.Count) + } +} + +func TestWalk_Struct(t *testing.T) { + w := new(TestStructWalker) + + type S struct { + Foo string + Bar string + } + + data := &S{ + Foo: "foo", + Bar: "bar", + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{"Foo", "Bar"} + if !reflect.DeepEqual(w.Fields, expected) { + t.Fatalf("bad: %#v", w.Fields) + } +} diff --git a/vendor/github.com/nesv/go-dynect/dynect/client.go b/vendor/github.com/nesv/go-dynect/dynect/client.go new file mode 100644 index 000000000..b8f8f8b42 --- /dev/null +++ b/vendor/github.com/nesv/go-dynect/dynect/client.go @@ -0,0 +1,153 @@ +package dynect + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net/http" +) + +const ( + DynAPIPrefix = "https://api.dynect.net/REST" +) + +func init() { + // Set the logging prefix. + log.SetPrefix("go-dynect ") +} + +// A client for use with DynECT's REST API. +type Client struct { + Token string + CustomerName string + httpclient *http.Client + verbose bool +} + +// Creates a new Httpclient. +func NewClient(customerName string) *Client { + return &Client{ + CustomerName: customerName, + httpclient: &http.Client{}} +} + +// Enable, or disable verbose output from the client. +// +// This will enable (or disable) logging messages that explain what the client +// is about to do, like the endpoint it is about to make a request to. If the +// request fails with an unexpected HTTP response code, then the response body +// will be logged out, as well. +func (c *Client) Verbose(p bool) { + c.verbose = p +} + +// Establishes a new session with the DynECT API. +func (c *Client) Login(username, password string) error { + var req = LoginBlock{ + Username: username, + Password: password, + CustomerName: c.CustomerName} + + var resp LoginResponse + + err := c.Do("POST", "Session", req, &resp) + if err != nil { + return err + } + + c.Token = resp.Data.Token + return nil +} + +func (c *Client) LoggedIn() bool { + return len(c.Token) > 0 +} + +func (c *Client) Logout() error { + return c.Do("DELETE", "Session", nil, nil) +} + +func (c *Client) Do(method, endpoint string, requestData, responseData interface{}) error { + // Throw an error if the user tries to make a request if the client is + // logged out/unauthenticated, but make an exemption for when the + // caller is trying to log in. + if !c.LoggedIn() && method != "POST" && endpoint != "Session" { + return errors.New("Will not perform request; httpclient is closed") + } + + var err error + + // Marshal the request data into a byte slice. + if c.verbose { + log.Println("Marshaling request data") + } + var js []byte + if requestData != nil { + js, err = json.Marshal(requestData) + } else { + js = []byte("") + } + if err != nil { + return err + } + + // Create a new http.Request object, and set the necessary headers to + // authorize the request, and specify the content type. + url := fmt.Sprintf("%s/%s", DynAPIPrefix, endpoint) + var req *http.Request + req, err = http.NewRequest(method, url, bytes.NewReader(js)) + if err != nil { + return err + } + req.Header.Set("Auth-Token", c.Token) + req.Header.Set("Content-Type", "application/json") + + if c.verbose { + log.Printf("Making %s request to %q", method, url) + } + + var resp *http.Response + resp, err = c.httpclient.Do(req) + if err != nil { + if c.verbose { + respBody, _ := ioutil.ReadAll(resp.Body) + log.Printf("%s", string(respBody)) + } + return err + } else if resp.StatusCode != 200 { + if c.verbose { + // Print out the response body. + respBody, _ := ioutil.ReadAll(resp.Body) + log.Printf("%s", string(respBody)) + } + return errors.New(fmt.Sprintf("Bad response, got %q", resp.Status)) + } + + // Unmarshal the response data into the provided struct. + if c.verbose { + log.Println("Reading in response data") + } + var respBody []byte + respBody, err = ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if c.verbose { + log.Println("Unmarshaling response data") + } + err = json.Unmarshal(respBody, &responseData) + if err != nil { + respBody, _ := ioutil.ReadAll(resp.Body) + if resp.ContentLength == 0 || resp.ContentLength == -1 { + log.Println("Zero-length content body") + } else { + log.Printf("%s", string(respBody)) + } + } + + return err +} diff --git a/vendor/github.com/nesv/go-dynect/dynect/client_test.go b/vendor/github.com/nesv/go-dynect/dynect/client_test.go new file mode 100644 index 000000000..8916d9981 --- /dev/null +++ b/vendor/github.com/nesv/go-dynect/dynect/client_test.go @@ -0,0 +1,114 @@ +package dynect + +import ( + "os" + "testing" + "strings" +) + +var ( + DynCustomerName string + DynUsername string + DynPassword string + testZone string +) + +func init() { + DynCustomerName = os.Getenv("DYNECT_CUSTOMER_NAME") + DynUsername = os.Getenv("DYNECT_USER_NAME") + DynPassword = os.Getenv("DYNECT_PASSWORD") + testZone = os.Getenv("DYNECT_TEST_ZONE") +} + +func TestSetup(t *testing.T) { + if len(DynCustomerName) == 0 { + t.Fatal("DYNECT_CUSTOMER_NAME not set") + } + + if len(DynUsername) == 0 { + t.Fatal("DYNECT_USER_NAME not set") + } + + if len(DynPassword) == 0 { + t.Fatal("DYNECT_PASSWORD not set") + } + + if len(testZone) == 0 { + t.Fatal("DYNECT_TEST_ZONE not specified") + } +} + +func TestLoginLogout(t *testing.T) { + client := NewClient(DynCustomerName) + client.Verbose(true) + err := client.Login(DynUsername, DynPassword) + if err != nil { + t.Error(err) + } + + err = client.Logout() + if err != nil { + t.Error(err) + } +} + +func TestZonesRequest(t *testing.T) { + client := NewClient(DynCustomerName) + client.Verbose(true) + err := client.Login(DynUsername, DynPassword) + if err != nil { + t.Error(err) + } + defer func() { + err = client.Logout() + if err != nil { + t.Error(err) + } + }() + + var resp ZonesResponse + err = client.Do("GET", "Zone", nil, &resp) + if err != nil { + t.Error(err) + } + + nresults := len(resp.Data) + for i, zone := range resp.Data { + parts := strings.Split(zone, "/") + t.Logf("(%d/%d) %q", i+1, nresults, parts[len(parts)-2]) + } +} + +func TestFetchingAllZoneRecords(t *testing.T) { + client := NewClient(DynCustomerName) + client.Verbose(true) + err := client.Login(DynUsername, DynPassword) + if err != nil { + t.Error(err) + } + defer func() { + err = client.Logout() + if err != nil { + t.Error(err) + } + }() + + var resp AllRecordsResponse + err = client.Do("GET", "AllRecord/" + testZone, nil, &resp) + if err != nil { + t.Error(err) + } + for _, zr := range resp.Data { + parts := strings.Split(zr, "/") + uri := strings.Join(parts[2:], "/") + t.Log(uri) + + var record RecordResponse + err := client.Do("GET", uri, nil, &record) + if err != nil { + t.Fatal(err) + } + + t.Log("OK") + } +} diff --git a/vendor/github.com/nesv/go-dynect/dynect/convenient_client.go b/vendor/github.com/nesv/go-dynect/dynect/convenient_client.go new file mode 100644 index 000000000..609210a6f --- /dev/null +++ b/vendor/github.com/nesv/go-dynect/dynect/convenient_client.go @@ -0,0 +1,154 @@ +package dynect + +import ( + "fmt" + "log" + "net/http" + "strconv" + "strings" +) + +// ConvenientClient A client with extra helper methods for common actions +type ConvenientClient struct { + Client +} + +// NewConvenientClient Creates a new ConvenientClient +func NewConvenientClient(customerName string) *ConvenientClient { + return &ConvenientClient{ + Client{ + CustomerName: customerName, + httpclient: &http.Client{}, + }} +} + +// PublishZone Publish a specific zone and the changes for the current session +func (c *ConvenientClient) PublishZone(zone string) error { + data := &PublishZoneBlock{ + Publish: true, + } + return c.Do("PUT", "Zone/"+zone, data, nil) +} + +// GetRecordID finds the dns record ID by fetching all records for a FQDN +func (c *ConvenientClient) GetRecordID(record *Record) error { + finalID := "" + url := fmt.Sprintf("AllRecord/%s/%s", record.Zone, record.FQDN) + var records AllRecordsResponse + err := c.Do("GET", url, nil, &records) + if err != nil { + return fmt.Errorf("Failed to find Dyn record id: %s", err) + } + for _, recordURL := range records.Data { + id := strings.TrimPrefix(recordURL, fmt.Sprintf("/REST/%sRecord/%s/%s/", record.Type, record.Zone, record.FQDN)) + if !strings.Contains(id, "/") && id != "" { + finalID = id + log.Printf("[INFO] Found Dyn record ID: %s", id) + } + } + if finalID == "" { + return fmt.Errorf("Failed to find Dyn record id!") + } + + record.ID = finalID + return nil +} + +// CreateRecord Method to create a DNS record +func (c *ConvenientClient) CreateRecord(record *Record) error { + if record.FQDN == "" { + record.FQDN = fmt.Sprintf("%s.%s", record.Name, record.Zone) + } + rdata, err := buildRData(record) + if err != nil { + return fmt.Errorf("Failed to create Dyn RData: %s", err) + } + url := fmt.Sprintf("%sRecord/%s/%s", record.Type, record.Zone, record.FQDN) + data := &RecordRequest{ + RData: rdata, + TTL: record.TTL, + } + return c.Do("POST", url, data, nil) +} + +// UpdateRecord Method to update a DNS record +func (c *ConvenientClient) UpdateRecord(record *Record) error { + if record.FQDN == "" { + record.FQDN = fmt.Sprintf("%s.%s", record.Name, record.Zone) + } + rdata, err := buildRData(record) + if err != nil { + return fmt.Errorf("Failed to create Dyn RData: %s", err) + } + url := fmt.Sprintf("%sRecord/%s/%s/%s", record.Type, record.Zone, record.FQDN, record.ID) + data := &RecordRequest{ + RData: rdata, + TTL: record.TTL, + } + return c.Do("PUT", url, data, nil) +} + +// DeleteRecord Method to delete a DNS record +func (c *ConvenientClient) DeleteRecord(record *Record) error { + if record.FQDN == "" { + record.FQDN = fmt.Sprintf("%s.%s", record.Name, record.Zone) + } + // safety check that we have an ID, otherwise we could accidentally delete everything + if record.ID == "" { + return fmt.Errorf("No ID found! We can't continue!") + } + url := fmt.Sprintf("%sRecord/%s/%s/%s", record.Type, record.Zone, record.FQDN, record.ID) + return c.Do("DELETE", url, nil, nil) +} + +// GetRecord Method to get record details +func (c *ConvenientClient) GetRecord(record *Record) error { + url := fmt.Sprintf("%sRecord/%s/%s/%s", record.Type, record.Zone, record.FQDN, record.ID) + var rec RecordResponse + err := c.Do("GET", url, nil, &rec) + if err != nil { + return err + } + + record.Zone = rec.Data.Zone + record.FQDN = rec.Data.FQDN + record.Name = strings.TrimSuffix(rec.Data.FQDN, "."+rec.Data.Zone) + record.Type = rec.Data.RecordType + record.TTL = strconv.Itoa(rec.Data.TTL) + + switch rec.Data.RecordType { + case "A", "AAAA": + record.Value = rec.Data.RData.Address + case "CNAME": + record.Value = rec.Data.RData.CName + case "TXT", "SPF": + record.Value = rec.Data.RData.TxtData + default: + return fmt.Errorf("Invalid Dyn record type: %s", rec.Data.RecordType) + } + + return nil +} + +func buildRData(r *Record) (DataBlock, error) { + var rdata DataBlock + + switch r.Type { + case "A", "AAAA": + rdata = DataBlock{ + Address: r.Value, + } + case "CNAME": + rdata = DataBlock{ + CName: r.Value, + } + case "TXT", "SPF": + rdata = DataBlock{ + TxtData: r.Value, + } + default: + return rdata, fmt.Errorf("Invalid Dyn record type: %s", r.Type) + } + + return rdata, nil +} diff --git a/vendor/github.com/nesv/go-dynect/dynect/json.go b/vendor/github.com/nesv/go-dynect/dynect/json.go new file mode 100644 index 000000000..68287ff84 --- /dev/null +++ b/vendor/github.com/nesv/go-dynect/dynect/json.go @@ -0,0 +1,58 @@ +package dynect + +/* +This struct represents the request body that would be sent to the DynECT API +for logging in and getting a session token for future requests. +*/ +type LoginBlock struct { + Username string `json:"user_name"` + Password string `json:"password"` + CustomerName string `json:"customer_name"` +} + +// Type ResponseBlock holds the "header" information returned by any call to +// the DynECT API. +// +// All response-type structs should include this as an anonymous/embedded field. +type ResponseBlock struct { + Status string `json:"string"` + JobId int `json:"job_id,omitempty"` + Messages []MessageBlock `json:"msgs,omitempty"` +} + +// Type MessageBlock holds the message information from the server, and is +// nested within the ResponseBlock type. +type MessageBlock struct { + Info string `json:"INFO"` + Source string `json:"SOURCE"` + ErrorCode string `json:"ERR_CD"` + Level string `json:"LVL"` +} + +// Type LoginResponse holds the data returned by an HTTP POST call to +// https://api.dynect.net/REST/Session/. +type LoginResponse struct { + ResponseBlock + Data LoginDataBlock `json:"data"` +} + +// Type LoginDataBlock holds the token and API version information from an HTTP +// POST call to https://api.dynect.net/REST/Session/. +// +// It is nested within the LoginResponse struct. +type LoginDataBlock struct { + Token string `json:"token"` + Version string `json:"version"` +} + +// RecordRequest holds the request body for a record create/update +type RecordRequest struct { + RData DataBlock `json:"rdata"` + TTL string `json:"ttl,omitempty"` +} + +// PublishZoneBlock holds the request body for a publish zone request +// https://help.dyn.com/update-zone-api/ +type PublishZoneBlock struct { + Publish bool `json:"publish"` +} diff --git a/vendor/github.com/nesv/go-dynect/dynect/record.go b/vendor/github.com/nesv/go-dynect/dynect/record.go new file mode 100644 index 000000000..f77f5abde --- /dev/null +++ b/vendor/github.com/nesv/go-dynect/dynect/record.go @@ -0,0 +1,12 @@ +package dynect + +// Record simple struct to hold record details +type Record struct { + ID string + Zone string + Name string + Value string + Type string + FQDN string + TTL string +} diff --git a/vendor/github.com/nesv/go-dynect/dynect/records.go b/vendor/github.com/nesv/go-dynect/dynect/records.go new file mode 100644 index 000000000..26033c745 --- /dev/null +++ b/vendor/github.com/nesv/go-dynect/dynect/records.go @@ -0,0 +1,168 @@ +package dynect + +// Type AllRecordsResponse is a struct for holding a list of all URIs returned +// from an HTTP GET call to either https://api.dynect.net/REST/AllRecord/ +// or https://api/dynect.net/REST/AllRecord///. +type AllRecordsResponse struct { + ResponseBlock + Data []string `json:"data"` +} + +// Type RecordResponse is used to hold the information for a single DNS record +// returned from Dyn's DynECT API. +type RecordResponse struct { + ResponseBlock + Data BaseRecord `json:"data"` +} + +/* +The base struct for record data returned from the Dyn REST API. + +It should never be directly passed to the *Client.Do() function for marshaling +response data to. Instead, it should aid in the composition of a more-specific +response struct. +*/ +type BaseRecord struct { + FQDN string `json:"fqdn"` + RecordId int `json:"record_id"` + RecordType string `json:"record_type"` + TTL int `json:"ttl"` + Zone string `json:"zone"` + RData DataBlock `json:"rdata"` +} + +// Type DataBlock is nested within the BaseRecord struct, and is used for +// holding record information. +// +// The comment above each field indicates which record types you can expect +// the information to be provided. +type DataBlock struct { + // A, AAAA + Address string `json:"address,omitempty" bson:"address,omitempty"` + + // CERT, DNSKEY, DS, IPSECKEY, KEY, SSHFP + Algorithm string `json:"algorithm,omitempty" bson:"algorithm,omitempty"` + + // LOC + Altitude string `json:"altitude,omitempty" bson:"altitude,omitempty"` + + // CNAME + CName string `json:"cname,omitempty" bson:"cname,omitempty"` + + // CERT + Certificate string `json:"certificate,omitempty" bson:"algorithm,omitempty"` + + // DNAME + DName string `json:"dname,omitempty" bson:"dname,omitempty"` + + // DHCID, DS + Digest string `json:"digest,omitempty" bson:"digest,omitempty"` + + // DS + DigestType string `json:"digtype,omitempty" bson:"digest_type,omitempty"` + + // KX, MX + Exchange string `json:"exchange,omitempty" bson:"exchange,omitempty"` + + // SSHFP + FPType string `json:"fptype,omitempty" bson:"fp_type,omitempty"` + + // SSHFP + Fingerprint string `json:"fingerprint,omitempty" bson:"fingerprint,omitempty"` + + // DNSKEY, KEY, NAPTR + Flags string `json:"flags,omitempty" bson:"flags,omitempty"` + + // CERT + Format string `json:"format,omitempty" bson:"format,omitempty"` + + // IPSECKEY + GatewayType string `json:"gatetype,omitempty" bson:"gateway_type,omitempty"` + + // LOC + HorizPre string `json:"horiz_pre,omitempty" bson:"horiz_pre,omitempty"` + + // DS + KeyTag string `json:"keytag,omitempty" bson:"keytag,omitempty"` + + // LOC + Latitude string `json:"latitude,omitempty" bson:"latitude,omitempty"` + + // LOC + Longitude string `json:"longitude,omitempty" bson:"longitude,omitempty"` + + // PX + Map822 string `json:"map822,omitempty" bson:"map_822,omitempty"` + + // PX + MapX400 string `json:"mapx400,omitempty" bson:"map_x400,omitempty"` + + // RP + Mbox string `json:"mbox,omitempty" bson:"mbox,omitempty"` + + // NS + NSDName string `json:"nsdname,omitempty" bson:"nsdname,omitempty"` + + // NSAP + NSAP string `json:"nsap,omitempty" bson:"nsap,omitempty"` + + // NAPTR + Order string `json:"order,omitempty" bson:"order,omitempty"` + + // SRV + Port string `json:"port,omitempty" bson:"port,omitempty"` + + // IPSECKEY + Precendence string `json:"precendence,omitempty" bson:"precendence,omitempty"` + + // KX, MX, NAPTR, PX + Preference int `json:"preference,omitempty" bson:"preference,omitempty"` + + // SRV + Priority int `json:"priority,omitempty" bson:"priority,omitempty"` + + // DNSKEY, KEY + Protocol string `json:"protocol,omitempty" bson:"protocol,omitempty"` + + // PTR + PTRDname string `json:"ptrdname,omitempty" bson:"ptrdname,omitempty"` + + // DNSKEY, IPSECKEY, KEY + PublicKey string `json:"public_key,omitempty" bson:"public_key,omitempty"` + + // NAPTR + Regexp string `json:"regexp,omitempty" bson:"regexp,omitempty"` + + // NAPTR + Replacement string `json:"replacement,omitempty" bson:"replacement,omitempty"` + + // SOA + RName string `json:"rname,omitempty" bson:"rname,omitempty"` + + // NAPTR + Services string `json:"services,omitempty" bson:"services,omitempty"` + + // LOC + Size string `json:"size,omitempty" bson:"size,omitempty"` + + // CERT + Tag string `json:"tag,omitempty" bson:"tag,omitempty"` + + // SRV + Target string `json:"target,omitempty" bson:"target,omitempty"` + + // RP + TxtDName string `json:"txtdname,omitempty" bson:"txtdname,omitempty"` + + // SPF, TXT + TxtData string `json:"txtdata,omitempty" bson:"txtdata,omitempty"` + + // LOC + Version string `json:"version,omitempty" bson:"version,omitempty"` + + // LOC + VertPre string `json:"vert_pre,omitempty" bson:"vert_pre,omitempty"` + + // SRV + Weight string `json:"weight,omitempty" bson:"weight,omitempty"` +} diff --git a/vendor/github.com/nesv/go-dynect/dynect/zones.go b/vendor/github.com/nesv/go-dynect/dynect/zones.go new file mode 100644 index 000000000..0852c6cf0 --- /dev/null +++ b/vendor/github.com/nesv/go-dynect/dynect/zones.go @@ -0,0 +1,24 @@ +package dynect + +// ZonesResponse is used for holding the data returned by a call to +// "https://api.dynect.net/REST/Zone/". +type ZonesResponse struct { + ResponseBlock + Data []string `json:"data"` +} + +// ZoneResponse is used for holding the data returned by a call to +// "https://api.dynect.net/REST/Zone/ZONE_NAME". +type ZoneResponse struct { + ResponseBlock + Data ZoneDataBlock `json:"data"` +} + +// Type ZoneDataBlock is used as a nested struct, which holds the data for a +// zone returned by a call to "https://api.dynect.net/REST/Zone/ZONE_NAME". +type ZoneDataBlock struct { + Serial int `json:"serial"` + SerialStyle string `json:"serial_style"` + Zone string `json:"zone"` + ZoneType string `json:"zone_type"` +} diff --git a/vendor/github.com/nu7hatch/gouuid/.gitignore b/vendor/github.com/nu7hatch/gouuid/.gitignore new file mode 100644 index 000000000..f9d9cd8ab --- /dev/null +++ b/vendor/github.com/nu7hatch/gouuid/.gitignore @@ -0,0 +1,11 @@ +_obj +_test +*.6 +*.out +_testmain.go +\#* +.\#* +*.log +_cgo* +*.o +*.a diff --git a/vendor/github.com/nu7hatch/gouuid/COPYING b/vendor/github.com/nu7hatch/gouuid/COPYING new file mode 100644 index 000000000..d7849fd8f --- /dev/null +++ b/vendor/github.com/nu7hatch/gouuid/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2011 by Krzysztof Kowalik + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/nu7hatch/gouuid/README.md b/vendor/github.com/nu7hatch/gouuid/README.md new file mode 100644 index 000000000..e3d025d5e --- /dev/null +++ b/vendor/github.com/nu7hatch/gouuid/README.md @@ -0,0 +1,21 @@ +# Pure Go UUID implementation + +This package provides immutable UUID structs and the functions +NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +and 5 UUIDs as specified in [RFC 4122](http://www.ietf.org/rfc/rfc4122.txt). + +## Installation + +Use the `go` tool: + + $ go get github.com/nu7hatch/gouuid + +## Usage + +See [documentation and examples](http://godoc.org/github.com/nu7hatch/gouuid) +for more information. + +## Copyright + +Copyright (C) 2011 by Krzysztof Kowalik . See [COPYING](https://github.com/nu7hatch/gouuid/tree/master/COPYING) +file for details. diff --git a/vendor/github.com/nu7hatch/gouuid/example_test.go b/vendor/github.com/nu7hatch/gouuid/example_test.go new file mode 100644 index 000000000..9e86fdc3d --- /dev/null +++ b/vendor/github.com/nu7hatch/gouuid/example_test.go @@ -0,0 +1,33 @@ +package uuid_test + +import ( + "fmt" + "github.com/nu7hatch/gouuid" +) + +func ExampleNewV4() { + u4, err := uuid.NewV4() + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Println(u4) +} + +func ExampleNewV5() { + u5, err := uuid.NewV5(uuid.NamespaceURL, []byte("nu7hat.ch")) + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Println(u5) +} + +func ExampleParseHex() { + u, err := uuid.ParseHex("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Println(u) +} diff --git a/vendor/github.com/nu7hatch/gouuid/uuid.go b/vendor/github.com/nu7hatch/gouuid/uuid.go new file mode 100644 index 000000000..ac9623b72 --- /dev/null +++ b/vendor/github.com/nu7hatch/gouuid/uuid.go @@ -0,0 +1,173 @@ +// This package provides immutable UUID structs and the functions +// NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +// and 5 UUIDs as specified in RFC 4122. +// +// Copyright (C) 2011 by Krzysztof Kowalik +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "hash" + "regexp" +) + +// The UUID reserved variants. +const ( + ReservedNCS byte = 0x80 + ReservedRFC4122 byte = 0x40 + ReservedMicrosoft byte = 0x20 + ReservedFuture byte = 0x00 +) + +// The following standard UUIDs are for use with NewV3() or NewV5(). +var ( + NamespaceDNS, _ = ParseHex("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = ParseHex("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = ParseHex("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// Pattern used to parse hex string representation of the UUID. +// FIXME: do something to consider both brackets at one time, +// current one allows to parse string with only one opening +// or closing bracket. +const hexPattern = "^(urn\\:uuid\\:)?\\{?([a-z0-9]{8})-([a-z0-9]{4})-" + + "([1-5][a-z0-9]{3})-([a-z0-9]{4})-([a-z0-9]{12})\\}?$" + +var re = regexp.MustCompile(hexPattern) + +// A UUID representation compliant with specification in +// RFC 4122 document. +type UUID [16]byte + +// ParseHex creates a UUID object from given hex string +// representation. Function accepts UUID string in following +// formats: +// +// uuid.ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// uuid.ParseHex("{6ba7b814-9dad-11d1-80b4-00c04fd430c8}") +// uuid.ParseHex("urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// +func ParseHex(s string) (u *UUID, err error) { + md := re.FindStringSubmatch(s) + if md == nil { + err = errors.New("Invalid UUID string") + return + } + hash := md[2] + md[3] + md[4] + md[5] + md[6] + b, err := hex.DecodeString(hash) + if err != nil { + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Parse creates a UUID object from given bytes slice. +func Parse(b []byte) (u *UUID, err error) { + if len(b) != 16 { + err = errors.New("Given slice is not valid UUID sequence") + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Generate a UUID based on the MD5 hash of a namespace identifier +// and a name. +func NewV3(ns *UUID, name []byte) (u *UUID, err error) { + if ns == nil { + err = errors.New("Invalid namespace UUID") + return + } + u = new(UUID) + // Set all bits to MD5 hash generated from namespace and name. + u.setBytesFromHash(md5.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(3) + return +} + +// Generate a random UUID. +func NewV4() (u *UUID, err error) { + u = new(UUID) + // Set all bits to randomly (or pseudo-randomly) chosen values. + _, err = rand.Read(u[:]) + if err != nil { + return + } + u.setVariant(ReservedRFC4122) + u.setVersion(4) + return +} + +// Generate a UUID based on the SHA-1 hash of a namespace identifier +// and a name. +func NewV5(ns *UUID, name []byte) (u *UUID, err error) { + u = new(UUID) + // Set all bits to truncated SHA1 hash generated from namespace + // and name. + u.setBytesFromHash(sha1.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(5) + return +} + +// Generate a MD5 hash of a namespace and a name, and copy it to the +// UUID slice. +func (u *UUID) setBytesFromHash(hash hash.Hash, ns, name []byte) { + hash.Write(ns[:]) + hash.Write(name) + copy(u[:], hash.Sum([]byte{})[:16]) +} + +// Set the two most significant bits (bits 6 and 7) of the +// clock_seq_hi_and_reserved to zero and one, respectively. +func (u *UUID) setVariant(v byte) { + switch v { + case ReservedNCS: + u[8] = (u[8] | ReservedNCS) & 0xBF + case ReservedRFC4122: + u[8] = (u[8] | ReservedRFC4122) & 0x7F + case ReservedMicrosoft: + u[8] = (u[8] | ReservedMicrosoft) & 0x3F + } +} + +// Variant returns the UUID Variant, which determines the internal +// layout of the UUID. This will be one of the constants: RESERVED_NCS, +// RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE. +func (u *UUID) Variant() byte { + if u[8]&ReservedNCS == ReservedNCS { + return ReservedNCS + } else if u[8]&ReservedRFC4122 == ReservedRFC4122 { + return ReservedRFC4122 + } else if u[8]&ReservedMicrosoft == ReservedMicrosoft { + return ReservedMicrosoft + } + return ReservedFuture +} + +// Set the four most significant bits (bits 12 through 15) of the +// time_hi_and_version field to the 4-bit version number. +func (u *UUID) setVersion(v byte) { + u[6] = (u[6] & 0xF) | (v << 4) +} + +// Version returns a version number of the algorithm used to +// generate the UUID sequence. +func (u *UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Returns unparsed version of the generated UUID sequence. +func (u *UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/nu7hatch/gouuid/uuid_test.go b/vendor/github.com/nu7hatch/gouuid/uuid_test.go new file mode 100644 index 000000000..70ed346f0 --- /dev/null +++ b/vendor/github.com/nu7hatch/gouuid/uuid_test.go @@ -0,0 +1,135 @@ +// This package provides immutable UUID structs and the functions +// NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +// and 5 UUIDs as specified in RFC 4122. +// +// Copyright (C) 2011 by Krzysztof Kowalik +package uuid + +import ( + "regexp" + "testing" +) + +const format = "^[a-z0-9]{8}-[a-z0-9]{4}-[1-5][a-z0-9]{3}-[a-z0-9]{4}-[a-z0-9]{12}$" + +func TestParse(t *testing.T) { + _, err := Parse([]byte{1, 2, 3, 4, 5}) + if err == nil { + t.Errorf("Expected error due to invalid UUID sequence") + } + base, _ := NewV4() + u, err := Parse(base[:]) + if err != nil { + t.Errorf("Expected to parse UUID sequence without problems") + return + } + if u.String() != base.String() { + t.Errorf("Expected parsed UUID to be the same as base, %s != %s", u.String(), base.String()) + } +} + +func TestParseString(t *testing.T) { + _, err := ParseHex("foo") + if err == nil { + t.Errorf("Expected error due to invalid UUID string") + } + base, _ := NewV4() + u, err := ParseHex(base.String()) + if err != nil { + t.Errorf("Expected to parse UUID sequence without problems") + return + } + if u.String() != base.String() { + t.Errorf("Expected parsed UUID to be the same as base, %s != %s", u.String(), base.String()) + } +} + +func TestNewV3(t *testing.T) { + u, err := NewV3(NamespaceURL, []byte("golang.org")) + if err != nil { + t.Errorf("Expected to generate UUID without problems, error thrown: %d", err.Error()) + return + } + if u.Version() != 3 { + t.Errorf("Expected to generate UUIDv3, given %d", u.Version()) + } + if u.Variant() != ReservedRFC4122 { + t.Errorf("Expected to generate UUIDv3 RFC4122 variant, given %x", u.Variant()) + } + re := regexp.MustCompile(format) + if !re.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given %s", u.String()) + } + u2, _ := NewV3(NamespaceURL, []byte("golang.org")) + if u2.String() != u.String() { + t.Errorf("Expected UUIDs generated of the same namespace and name to be the same") + } + u3, _ := NewV3(NamespaceDNS, []byte("golang.org")) + if u3.String() == u.String() { + t.Errorf("Expected UUIDs generated of different namespace and the same name to be different") + } + u4, _ := NewV3(NamespaceURL, []byte("code.google.com")) + if u4.String() == u.String() { + t.Errorf("Expected UUIDs generated of the same namespace and different names to be different") + } +} + +func TestNewV4(t *testing.T) { + u, err := NewV4() + if err != nil { + t.Errorf("Expected to generate UUID without problems, error thrown: %s", err.Error()) + return + } + if u.Version() != 4 { + t.Errorf("Expected to generate UUIDv4, given %d", u.Version()) + } + if u.Variant() != ReservedRFC4122 { + t.Errorf("Expected to generate UUIDv4 RFC4122 variant, given %x", u.Variant()) + } + re := regexp.MustCompile(format) + if !re.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given %s", u.String()) + } +} + +func TestNewV5(t *testing.T) { + u, err := NewV5(NamespaceURL, []byte("golang.org")) + if err != nil { + t.Errorf("Expected to generate UUID without problems, error thrown: %d", err.Error()) + return + } + if u.Version() != 5 { + t.Errorf("Expected to generate UUIDv5, given %d", u.Version()) + } + if u.Variant() != ReservedRFC4122 { + t.Errorf("Expected to generate UUIDv5 RFC4122 variant, given %x", u.Variant()) + } + re := regexp.MustCompile(format) + if !re.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given %s", u.String()) + } + u2, _ := NewV5(NamespaceURL, []byte("golang.org")) + if u2.String() != u.String() { + t.Errorf("Expected UUIDs generated of the same namespace and name to be the same") + } + u3, _ := NewV5(NamespaceDNS, []byte("golang.org")) + if u3.String() == u.String() { + t.Errorf("Expected UUIDs generated of different namespace and the same name to be different") + } + u4, _ := NewV5(NamespaceURL, []byte("code.google.com")) + if u4.String() == u.String() { + t.Errorf("Expected UUIDs generated of the same namespace and different names to be different") + } +} + +func BenchmarkParseHex(b *testing.B) { + s := "f3593cff-ee92-40df-4086-87825b523f13" + for i := 0; i < b.N; i++ { + _, err := ParseHex(s) + if err != nil { + b.Fatal(err) + } + } + b.StopTimer() + b.ReportAllocs() +} diff --git a/vendor/github.com/packer-community/winrmcp/winrmcp/cp.go b/vendor/github.com/packer-community/winrmcp/winrmcp/cp.go new file mode 100644 index 000000000..1065e4529 --- /dev/null +++ b/vendor/github.com/packer-community/winrmcp/winrmcp/cp.go @@ -0,0 +1,200 @@ +package winrmcp + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "log" + "os" + "sync" + + "github.com/masterzen/winrm/winrm" + "github.com/mitchellh/packer/common/uuid" +) + +func doCopy(client *winrm.Client, config *Config, in io.Reader, toPath string) error { + tempFile := fmt.Sprintf("winrmcp-%s.tmp", uuid.TimeOrderedUUID()) + tempPath := "$env:TEMP\\" + tempFile + + if os.Getenv("WINRMCP_DEBUG") != "" { + log.Printf("Copying file to %s\n", tempPath) + } + + err := uploadContent(client, config.MaxOperationsPerShell, "%TEMP%\\"+tempFile, in) + if err != nil { + return errors.New(fmt.Sprintf("Error uploading file to %s: %v", tempPath, err)) + } + + if os.Getenv("WINRMCP_DEBUG") != "" { + log.Printf("Moving file from %s to %s", tempPath, toPath) + } + + err = restoreContent(client, tempPath, toPath) + if err != nil { + return errors.New(fmt.Sprintf("Error restoring file from %s to %s: %v", tempPath, toPath, err)) + } + + if os.Getenv("WINRMCP_DEBUG") != "" { + log.Printf("Removing temporary file %s", tempPath) + } + + err = cleanupContent(client, tempPath) + if err != nil { + return errors.New(fmt.Sprintf("Error removing temporary file %s: %v", tempPath, err)) + } + + return nil +} + +func uploadContent(client *winrm.Client, maxChunks int, filePath string, reader io.Reader) error { + var err error + done := false + for !done { + done, err = uploadChunks(client, filePath, maxChunks, reader) + if err != nil { + return err + } + } + + return nil +} + +func uploadChunks(client *winrm.Client, filePath string, maxChunks int, reader io.Reader) (bool, error) { + shell, err := client.CreateShell() + if err != nil { + return false, errors.New(fmt.Sprintf("Couldn't create shell: %v", err)) + } + defer shell.Close() + + // Upload the file in chunks to get around the Windows command line size limit. + // Base64 encodes each set of three bytes into four bytes. In addition the output + // is padded to always be a multiple of four. + // + // ceil(n / 3) * 4 = m1 - m2 + // + // where: + // n = bytes + // m1 = max (8192 character command limit.) + // m2 = len(filePath) + + chunkSize := ((8000 - len(filePath)) / 4) * 3 + chunk := make([]byte, chunkSize) + + if maxChunks == 0 { + maxChunks = 1 + } + + for i := 0; i < maxChunks; i++ { + n, err := reader.Read(chunk) + + if err != nil && err != io.EOF { + return false, err + } + if n == 0 { + return true, nil + } + + content := base64.StdEncoding.EncodeToString(chunk[:n]) + if err = appendContent(shell, filePath, content); err != nil { + return false, err + } + } + + return false, nil +} + +func restoreContent(client *winrm.Client, fromPath, toPath string) error { + shell, err := client.CreateShell() + if err != nil { + return err + } + + defer shell.Close() + script := fmt.Sprintf(` + $tmp_file_path = [System.IO.Path]::GetFullPath("%s") + $dest_file_path = [System.IO.Path]::GetFullPath("%s") + if (Test-Path $dest_file_path) { + rm $dest_file_path + } + else { + $dest_dir = ([System.IO.Path]::GetDirectoryName($dest_file_path)) + New-Item -ItemType directory -Force -ErrorAction SilentlyContinue -Path $dest_dir | Out-Null + } + + if (Test-Path $tmp_file_path) { + $base64_lines = Get-Content $tmp_file_path + $base64_string = [string]::join("",$base64_lines) + $bytes = [System.Convert]::FromBase64String($base64_string) + [System.IO.File]::WriteAllBytes($dest_file_path, $bytes) + } else { + echo $null > $dest_file_path + } + `, fromPath, toPath) + + cmd, err := shell.Execute(winrm.Powershell(script)) + if err != nil { + return err + } + defer cmd.Close() + + var wg sync.WaitGroup + copyFunc := func(w io.Writer, r io.Reader) { + defer wg.Done() + io.Copy(w, r) + } + + wg.Add(2) + go copyFunc(os.Stdout, cmd.Stdout) + go copyFunc(os.Stderr, cmd.Stderr) + + cmd.Wait() + wg.Wait() + + if cmd.ExitCode() != 0 { + return errors.New(fmt.Sprintf("restore operation returned code=%d", cmd.ExitCode())) + } + return nil +} + +func cleanupContent(client *winrm.Client, filePath string) error { + shell, err := client.CreateShell() + if err != nil { + return err + } + + defer shell.Close() + cmd, _ := shell.Execute("powershell", "Remove-Item", filePath, "-ErrorAction SilentlyContinue") + + cmd.Wait() + cmd.Close() + return nil +} + +func appendContent(shell *winrm.Shell, filePath, content string) error { + cmd, err := shell.Execute(fmt.Sprintf("echo %s >> \"%s\"", content, filePath)) + + if err != nil { + return err + } + + defer cmd.Close() + var wg sync.WaitGroup + copyFunc := func(w io.Writer, r io.Reader) { + defer wg.Done() + io.Copy(w, r) + } + + wg.Add(2) + go copyFunc(os.Stdout, cmd.Stdout) + go copyFunc(os.Stderr, cmd.Stderr) + + cmd.Wait() + wg.Wait() + + if cmd.ExitCode() != 0 { + return errors.New(fmt.Sprintf("upload operation returned code=%d", cmd.ExitCode())) + } + + return nil +} diff --git a/vendor/github.com/packer-community/winrmcp/winrmcp/endpoint.go b/vendor/github.com/packer-community/winrmcp/winrmcp/endpoint.go new file mode 100644 index 000000000..ccd7d0d12 --- /dev/null +++ b/vendor/github.com/packer-community/winrmcp/winrmcp/endpoint.go @@ -0,0 +1,42 @@ +package winrmcp + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + + "github.com/masterzen/winrm/winrm" +) + +func parseEndpoint(addr string, https bool, insecure bool, caCert []byte) (*winrm.Endpoint, error) { + var host string + var port int + + if addr == "" { + return nil, errors.New("Couldn't convert \"\" to an address.") + } + if !strings.Contains(addr, ":") { + host = addr + port = 5985 + } else { + shost, sport, err := net.SplitHostPort(addr) + if err != nil { + return nil, errors.New(fmt.Sprintf("Couldn't convert \"%s\" to an address.", addr)) + } + host = shost + port, err = strconv.Atoi(sport) + if err != nil { + return nil, errors.New("Couldn't convert \"%s\" to a port number.") + } + } + + return &winrm.Endpoint{ + Host: host, + Port: port, + HTTPS: https, + Insecure: insecure, + CACert: &caCert, + }, nil +} diff --git a/vendor/github.com/packer-community/winrmcp/winrmcp/endpoint_test.go b/vendor/github.com/packer-community/winrmcp/winrmcp/endpoint_test.go new file mode 100644 index 000000000..b9143cf27 --- /dev/null +++ b/vendor/github.com/packer-community/winrmcp/winrmcp/endpoint_test.go @@ -0,0 +1,81 @@ +package winrmcp + +import "testing" + +func Test_parsing_an_addr_to_a_winrm_endpoint(t *testing.T) { + endpoint, err := parseEndpoint("1.2.3.4:1234", false, false, nil) + + if err != nil { + t.Fatalf("Should not have been an error: %v", err) + } + if endpoint == nil { + t.Error("Endpoint should not be nil") + } + if endpoint.Host != "1.2.3.4" { + t.Error("Host should be 1.2.3.4") + } + if endpoint.Port != 1234 { + t.Error("Port should be 1234") + } + if endpoint.Insecure { + t.Error("Endpoint should be insecure") + } + if endpoint.HTTPS { + t.Error("Endpoint should be HTTP not HTTPS") + } +} + +func Test_parsing_an_addr_without_a_port_to_a_winrm_endpoint(t *testing.T) { + certBytes := []byte{1, 2, 3, 4, 5, 6} + endpoint, err := parseEndpoint("1.2.3.4", true, true, certBytes) + + if err != nil { + t.Fatalf("Should not have been an error: %v", err) + } + if endpoint == nil { + t.Error("Endpoint should not be nil") + } + if endpoint.Host != "1.2.3.4" { + t.Error("Host should be 1.2.3.4") + } + if endpoint.Port != 5985 { + t.Error("Port should be 5985") + } + if endpoint.Insecure != true { + t.Error("Endpoint should be insecure") + } + if endpoint.HTTPS != true { + t.Error("Endpoint should be HTTPS") + } + + if len(*endpoint.CACert) != len(certBytes) { + t.Error("Length of CACert is wrong") + } + for i := 0; i < len(certBytes); i++ { + if (*endpoint.CACert)[i] != certBytes[i] { + t.Error("CACert is not set correctly") + } + } +} + +func Test_parsing_an_empty_addr_to_a_winrm_endpoint(t *testing.T) { + endpoint, err := parseEndpoint("", false, false, nil) + + if endpoint != nil { + t.Error("Endpoint should be nil") + } + if err == nil { + t.Error("Expected an error") + } +} + +func Test_parsing_an_addr_with_a_bad_port(t *testing.T) { + endpoint, err := parseEndpoint("1.2.3.4:ABCD", false, false, nil) + + if endpoint != nil { + t.Error("Endpoint should be nil") + } + if err == nil { + t.Error("Expected an error") + } +} diff --git a/vendor/github.com/packer-community/winrmcp/winrmcp/ls.go b/vendor/github.com/packer-community/winrmcp/winrmcp/ls.go new file mode 100644 index 000000000..1a6cbaf5c --- /dev/null +++ b/vendor/github.com/packer-community/winrmcp/winrmcp/ls.go @@ -0,0 +1,71 @@ +package winrmcp + +import ( + "encoding/xml" + "errors" + "fmt" + "log" + "os" + "strconv" + + "github.com/masterzen/winrm/winrm" +) + +type FileItem struct { + Name string + Path string + Mode string + LastWriteTime string + Length int +} + +func fetchList(client *winrm.Client, remotePath string) ([]FileItem, error) { + script := fmt.Sprintf("Get-ChildItem %s", remotePath) + stdout, stderr, _, err := client.RunWithString("powershell -Command \""+script+" | ConvertTo-Xml -NoTypeInformation -As String\"", "") + if err != nil { + return nil, errors.New(fmt.Sprintf("Couldn't execute script %s: %v", script, err)) + } + + if stderr != "" { + if os.Getenv("WINRMCP_DEBUG") != "" { + log.Printf("STDERR returned: %s\n", stderr) + } + } + + if stdout != "" { + doc := pslist{} + err := xml.Unmarshal([]byte(stdout), &doc) + if err != nil { + return nil, errors.New(fmt.Sprintf("Couldn't parse results: %v", err)) + } + + return convertFileItems(doc.Objects), nil + } + + return []FileItem{}, nil +} + +func convertFileItems(objects []psobject) []FileItem { + items := make([]FileItem, len(objects)) + + for i, object := range objects { + for _, property := range object.Properties { + switch property.Name { + case "Name": + items[i].Name = property.Value + case "Mode": + items[i].Mode = property.Value + case "FullName": + items[i].Path = property.Value + case "Length": + if n, err := strconv.Atoi(property.Value); err == nil { + items[i].Length = n + } + case "LastWriteTime": + items[i].LastWriteTime = property.Value + } + } + } + + return items +} diff --git a/vendor/github.com/packer-community/winrmcp/winrmcp/path.go b/vendor/github.com/packer-community/winrmcp/winrmcp/path.go new file mode 100644 index 000000000..c35cc0728 --- /dev/null +++ b/vendor/github.com/packer-community/winrmcp/winrmcp/path.go @@ -0,0 +1,18 @@ +package winrmcp + +import ( + "fmt" + "strings" +) + +func winPath(path string) string { + if len(path) == 0 { + return path + } + + if strings.Contains(path, " ") { + path = fmt.Sprintf("'%s'", strings.Trim(path, "'\"")) + } + + return strings.Replace(path, "/", "\\", -1) +} diff --git a/vendor/github.com/packer-community/winrmcp/winrmcp/psobject.go b/vendor/github.com/packer-community/winrmcp/winrmcp/psobject.go new file mode 100644 index 000000000..5374fdba5 --- /dev/null +++ b/vendor/github.com/packer-community/winrmcp/winrmcp/psobject.go @@ -0,0 +1,17 @@ +package winrmcp + +type pslist struct { + Objects []psobject `xml:"Object"` +} + +type psobject struct { + Properties []psproperty `xml:"Property"` + Value string `xml:",innerxml"` +} + +type psproperty struct { + Name string `xml:"Name,attr"` + Value string `xml:",innerxml"` +} + + diff --git a/vendor/github.com/packer-community/winrmcp/winrmcp/winrmcp.go b/vendor/github.com/packer-community/winrmcp/winrmcp/winrmcp.go new file mode 100644 index 000000000..844c5a2f6 --- /dev/null +++ b/vendor/github.com/packer-community/winrmcp/winrmcp/winrmcp.go @@ -0,0 +1,117 @@ +package winrmcp + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/dylanmei/iso8601" + "github.com/masterzen/winrm/winrm" +) + +type Winrmcp struct { + client *winrm.Client + config *Config +} + +type Config struct { + Auth Auth + Https bool + Insecure bool + CACertBytes []byte + OperationTimeout time.Duration + MaxOperationsPerShell int +} + +type Auth struct { + User string + Password string +} + +func New(addr string, config *Config) (*Winrmcp, error) { + endpoint, err := parseEndpoint(addr, config.Https, config.Insecure, config.CACertBytes) + if err != nil { + return nil, err + } + if config == nil { + config = &Config{} + } + + params := winrm.DefaultParameters() + if config.OperationTimeout.Seconds() > 0 { + params.Timeout = iso8601.FormatDuration(config.OperationTimeout) + } + client, err := winrm.NewClientWithParameters( + endpoint, config.Auth.User, config.Auth.Password, params) + return &Winrmcp{client, config}, err +} + +func (fs *Winrmcp) Copy(fromPath, toPath string) error { + f, err := os.Open(fromPath) + if err != nil { + return errors.New(fmt.Sprintf("Couldn't read file %s: %v", fromPath, err)) + } + + defer f.Close() + fi, err := f.Stat() + if err != nil { + return errors.New(fmt.Sprintf("Couldn't stat file %s: %v", fromPath, err)) + } + + if !fi.IsDir() { + return fs.Write(toPath, f) + } else { + fw := fileWalker{ + client: fs.client, + config: fs.config, + toDir: toPath, + fromDir: fromPath, + } + return filepath.Walk(fromPath, fw.copyFile) + } +} + +func (fs *Winrmcp) Write(toPath string, src io.Reader) error { + return doCopy(fs.client, fs.config, src, winPath(toPath)) +} + +func (fs *Winrmcp) List(remotePath string) ([]FileItem, error) { + return fetchList(fs.client, winPath(remotePath)) +} + +type fileWalker struct { + client *winrm.Client + config *Config + toDir string + fromDir string +} + +func (fw *fileWalker) copyFile(fromPath string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if !shouldUploadFile(fi) { + return nil + } + + hostPath, _ := filepath.Abs(fromPath) + fromDir, _ := filepath.Abs(fw.fromDir) + relPath, _ := filepath.Rel(fromDir, hostPath) + toPath := filepath.Join(fw.toDir, relPath) + + f, err := os.Open(hostPath) + if err != nil { + return errors.New(fmt.Sprintf("Couldn't read file %s: %v", fromPath, err)) + } + + return doCopy(fw.client, fw.config, f, winPath(toPath)) +} + +func shouldUploadFile(fi os.FileInfo) bool { + // Ignore dir entries and OS X special hidden file + return !fi.IsDir() && ".DS_Store" != fi.Name() +} diff --git a/vendor/github.com/packethost/packngo/LICENSE.txt b/vendor/github.com/packethost/packngo/LICENSE.txt new file mode 100644 index 000000000..57c50110c --- /dev/null +++ b/vendor/github.com/packethost/packngo/LICENSE.txt @@ -0,0 +1,56 @@ +Copyright (c) 2014 The packngo AUTHORS. All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +====================== +Portions of the client are based on code at: +https://github.com/google/go-github/ and +https://github.com/digitalocean/godo + +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/packethost/packngo/devices.go b/vendor/github.com/packethost/packngo/devices.go new file mode 100644 index 000000000..7607c1250 --- /dev/null +++ b/vendor/github.com/packethost/packngo/devices.go @@ -0,0 +1,236 @@ +package packngo + +import "fmt" + +const deviceBasePath = "/devices" + +// DeviceService interface defines available device methods +type DeviceService interface { + List(ProjectID string) ([]Device, *Response, error) + Get(string) (*Device, *Response, error) + Create(*DeviceCreateRequest) (*Device, *Response, error) + Delete(string) (*Response, error) + Reboot(string) (*Response, error) + PowerOff(string) (*Response, error) + PowerOn(string) (*Response, error) + Lock(string) (*Response, error) + Unlock(string) (*Response, error) +} + +type devicesRoot struct { + Devices []Device `json:"devices"` +} + +// Device represents a Packet device +type Device struct { + ID string `json:"id"` + Href string `json:"href,omitempty"` + Hostname string `json:"hostname,omitempty"` + State string `json:"state,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Locked bool `json:"locked,omitempty"` + BillingCycle string `json:"billing_cycle,omitempty"` + Tags []string `json:"tags,omitempty"` + Network []*IPAddress `json:"ip_addresses"` + OS *OS `json:"operating_system,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Facility *Facility `json:"facility,omitempty"` + Project *Project `json:"project,omitempty"` + ProvisionPer float32 `json:"provisioning_percentage,omitempty"` +} + +func (d Device) String() string { + return Stringify(d) +} + +// DeviceCreateRequest type used to create a Packet device +type DeviceCreateRequest struct { + HostName string `json:"hostname"` + Plan string `json:"plan"` + Facility string `json:"facility"` + OS string `json:"operating_system"` + BillingCycle string `json:"billing_cycle"` + ProjectID string `json:"project_id"` + UserData string `json:"userdata"` + Tags []string `json:"tags"` +} + +func (d DeviceCreateRequest) String() string { + return Stringify(d) +} + +// DeviceActionRequest type used to execute actions on devices +type DeviceActionRequest struct { + Type string `json:"type"` +} + +func (d DeviceActionRequest) String() string { + return Stringify(d) +} + +// IPAddress used to execute actions on devices +type IPAddress struct { + Family int `json:"address_family"` + Cidr int `json:"cidr"` + Address string `json:"address"` + Gateway string `json:"gateway"` + Public bool `json:"public"` +} + +func (n IPAddress) String() string { + return Stringify(n) +} + +// DeviceServiceOp implements DeviceService +type DeviceServiceOp struct { + client *Client +} + +// List returns devices on a project +func (s *DeviceServiceOp) List(projectID string) ([]Device, *Response, error) { + path := fmt.Sprintf("%s/%s/devices?include=facility", projectBasePath, projectID) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(devicesRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Devices, resp, err +} + +// Get returns a device by id +func (s *DeviceServiceOp) Get(deviceID string) (*Device, *Response, error) { + path := fmt.Sprintf("%s/%s?include=facility", deviceBasePath, deviceID) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + device := new(Device) + resp, err := s.client.Do(req, device) + if err != nil { + return nil, resp, err + } + + return device, resp, err +} + +// Create creates a new device +func (s *DeviceServiceOp) Create(createRequest *DeviceCreateRequest) (*Device, *Response, error) { + path := fmt.Sprintf("%s/%s/devices", projectBasePath, createRequest.ProjectID) + + req, err := s.client.NewRequest("POST", path, createRequest) + if err != nil { + return nil, nil, err + } + + device := new(Device) + resp, err := s.client.Do(req, device) + if err != nil { + return nil, resp, err + } + + return device, resp, err +} + +// Delete deletes a device +func (s *DeviceServiceOp) Delete(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// Reboot reboots on a device +func (s *DeviceServiceOp) Reboot(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + + action := &DeviceActionRequest{Type: "reboot"} + req, err := s.client.NewRequest("POST", path, action) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// PowerOff powers on a device +func (s *DeviceServiceOp) PowerOff(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + + action := &DeviceActionRequest{Type: "power_off"} + req, err := s.client.NewRequest("POST", path, action) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// PowerOn powers on a device +func (s *DeviceServiceOp) PowerOn(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + + action := &DeviceActionRequest{Type: "power_on"} + req, err := s.client.NewRequest("POST", path, action) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +type lockDeviceType struct { + Locked bool `json:"locked"` +} + +// Lock sets a device to "locked" +func (s *DeviceServiceOp) Lock(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + + action := lockDeviceType{Locked: true} + req, err := s.client.NewRequest("PATCH", path, action) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err + +} + +// Unlock sets a device to "locked" +func (s *DeviceServiceOp) Unlock(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + + action := lockDeviceType{Locked: false} + req, err := s.client.NewRequest("PATCH", path, action) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} diff --git a/vendor/github.com/packethost/packngo/email.go b/vendor/github.com/packethost/packngo/email.go new file mode 100644 index 000000000..98ed87147 --- /dev/null +++ b/vendor/github.com/packethost/packngo/email.go @@ -0,0 +1,40 @@ +package packngo + +const emailBasePath = "/emails" + +// EmailService interface defines available email methods +type EmailService interface { + Get(string) (*Email, *Response, error) +} + +// Email represents a user's email address +type Email struct { + ID string `json:"id"` + Address string `json:"address"` + Default bool `json:"default,omitempty"` + URL string `json:"href,omitempty"` +} +func (e Email) String() string { + return Stringify(e) +} + +// EmailServiceOp implements EmailService +type EmailServiceOp struct { + client *Client +} + +// Get retrieves an email by id +func (s *EmailServiceOp) Get(emailID string) (*Email, *Response, error) { + req, err := s.client.NewRequest("GET", emailBasePath, nil) + if err != nil { + return nil, nil, err + } + + email := new(Email) + resp, err := s.client.Do(req, email) + if err != nil { + return nil, resp, err + } + + return email, resp, err +} diff --git a/vendor/github.com/packethost/packngo/facilities.go b/vendor/github.com/packethost/packngo/facilities.go new file mode 100644 index 000000000..a6deab895 --- /dev/null +++ b/vendor/github.com/packethost/packngo/facilities.go @@ -0,0 +1,55 @@ +package packngo + +const facilityBasePath = "/facilities" + +// FacilityService interface defines available facility methods +type FacilityService interface { + List() ([]Facility, *Response, error) +} + +type facilityRoot struct { + Facilities []Facility `json:"facilities"` +} + +// Facility represents a Packet facility +type Facility struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Code string `json:"code,omitempty"` + Features []string `json:"features,omitempty"` + Address *Address `json:"address,omitempty"` + URL string `json:"href,omitempty"` +} +func (f Facility) String() string { + return Stringify(f) +} + +// Address - the physical address of the facility +type Address struct { + ID string `json:"id,omitempty"` +} +func (a Address) String() string { + return Stringify(a) +} + + +// FacilityServiceOp implements FacilityService +type FacilityServiceOp struct { + client *Client +} + +// List returns all available Packet facilities +func (s *FacilityServiceOp) List() ([]Facility, *Response, error) { + req, err := s.client.NewRequest("GET", facilityBasePath, nil) + if err != nil { + return nil, nil, err + } + + root := new(facilityRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Facilities, resp, err +} diff --git a/vendor/github.com/packethost/packngo/operatingsystems.go b/vendor/github.com/packethost/packngo/operatingsystems.go new file mode 100644 index 000000000..bad59e86c --- /dev/null +++ b/vendor/github.com/packethost/packngo/operatingsystems.go @@ -0,0 +1,44 @@ +package packngo + +const osBasePath = "/operating-systems" + +// OSService interface defines available operating_systems methods +type OSService interface { + List() ([]OS, *Response, error) +} + +type osRoot struct { + OperatingSystems []OS `json:"operating_systems"` +} + +// OS represents a Packet operating system +type OS struct { + Name string `json:"name"` + Slug string `json:"slug"` + Distro string `json:"distro"` + Version string `json:"version"` +} +func (o OS) String() string { + return Stringify(o) +} + +// OSServiceOp implements OSService +type OSServiceOp struct { + client *Client +} + +// List returns all available operating systems +func (s *OSServiceOp) List() ([]OS, *Response, error) { + req, err := s.client.NewRequest("GET", osBasePath, nil) + if err != nil { + return nil, nil, err + } + + root := new(osRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.OperatingSystems, resp, err +} diff --git a/vendor/github.com/packethost/packngo/packngo.go b/vendor/github.com/packethost/packngo/packngo.go new file mode 100644 index 000000000..c3d70a582 --- /dev/null +++ b/vendor/github.com/packethost/packngo/packngo.go @@ -0,0 +1,205 @@ +package packngo + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + libraryVersion = "0.1.0" + baseURL = "https://api.packet.net/" + userAgent = "packngo/" + libraryVersion + mediaType = "application/json" + + headerRateLimit = "X-RateLimit-Limit" + headerRateRemaining = "X-RateLimit-Remaining" + headerRateReset = "X-RateLimit-Reset" +) + +// ListOptions specifies optional global API parameters +type ListOptions struct { + // for paginated result sets, page of results to retrieve + Page int `url:"page,omitempty"` + + // for paginated result sets, the number of results to return per page + PerPage int `url:"per_page,omitempty"` + + // specify which resources you want to return as collections instead of references + Includes string +} + +// Response is the http response from api calls +type Response struct { + *http.Response + Rate +} + +func (r *Response) populateRate() { + // parse the rate limit headers and populate Response.Rate + if limit := r.Header.Get(headerRateLimit); limit != "" { + r.Rate.RequestLimit, _ = strconv.Atoi(limit) + } + if remaining := r.Header.Get(headerRateRemaining); remaining != "" { + r.Rate.RequestsRemaining, _ = strconv.Atoi(remaining) + } + if reset := r.Header.Get(headerRateReset); reset != "" { + if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { + r.Rate.Reset = Timestamp{time.Unix(v, 0)} + } + } +} + +// ErrorResponse is the http response used on errrors +type ErrorResponse struct { + Response *http.Response + Errors []string `json:"errors"` +} + +func (r *ErrorResponse) Error() string { + return fmt.Sprintf("%v %v: %d %v", + r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, strings.Join(r.Errors, ", ")) +} + +// Client is the base API Client +type Client struct { + client *http.Client + + BaseURL *url.URL + + UserAgent string + ConsumerToken string + APIKey string + + RateLimit Rate + + // Packet Api Objects + Plans PlanService + Users UserService + Emails EmailService + SSHKeys SSHKeyService + Devices DeviceService + Projects ProjectService + Facilities FacilityService + OperatingSystems OSService +} + +// NewRequest inits a new http request with the proper headers +func (c *Client) NewRequest(method, path string, body interface{}) (*http.Request, error) { + // relative path to append to the endpoint url, no leading slash please + rel, err := url.Parse(path) + if err != nil { + return nil, err + } + + u := c.BaseURL.ResolveReference(rel) + + // json encode the request body, if any + buf := new(bytes.Buffer) + if body != nil { + err := json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + req.Close = true + + req.Header.Add("X-Auth-Token", c.APIKey) + req.Header.Add("X-Consumer-Token", c.ConsumerToken) + + req.Header.Add("Content-Type", mediaType) + req.Header.Add("Accept", mediaType) + req.Header.Add("User-Agent", userAgent) + return req, nil +} + +// Do executes the http request +func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + response := Response{Response: resp} + response.populateRate() + c.RateLimit = response.Rate + + err = checkResponse(resp) + // if the response is an error, return the ErrorResponse + if err != nil { + return &response, err + } + + if v != nil { + // if v implements the io.Writer interface, return the raw response + if w, ok := v.(io.Writer); ok { + io.Copy(w, resp.Body) + } else { + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return &response, err + } + } + } + + return &response, err +} + +// NewClient initializes and returns a Client, use this to get an API Client to operate on +// N.B.: Packet's API certificate requires Go 1.5+ to successfully parse. If you are using +// an older version of Go, pass in a custom http.Client with a custom TLS configuration +// that sets "InsecureSkipVerify" to "true" +func NewClient(consumerToken string, apiKey string, httpClient *http.Client) *Client { + if httpClient == nil { + // Don't fall back on http.DefaultClient as it's not nice to adjust state + // implicitly. If the client wants to use http.DefaultClient, they can + // pass it in explicitly. + httpClient = &http.Client{} + } + + BaseURL, _ := url.Parse(baseURL) + + c := &Client{client: httpClient, BaseURL: BaseURL, UserAgent: userAgent, ConsumerToken: consumerToken, APIKey: apiKey} + c.Plans = &PlanServiceOp{client: c} + c.Users = &UserServiceOp{client: c} + c.Emails = &EmailServiceOp{client: c} + c.SSHKeys = &SSHKeyServiceOp{client: c} + c.Devices = &DeviceServiceOp{client: c} + c.Projects = &ProjectServiceOp{client: c} + c.Facilities = &FacilityServiceOp{client: c} + c.OperatingSystems = &OSServiceOp{client: c} + + return c +} + +func checkResponse(r *http.Response) error { + // return if http status code is within 200 range + if c := r.StatusCode; c >= 200 && c <= 299 { + // response is good, return + return nil + } + + errorResponse := &ErrorResponse{Response: r} + data, err := ioutil.ReadAll(r.Body) + // if the response has a body, populate the message in errorResponse + if err == nil && len(data) > 0 { + json.Unmarshal(data, errorResponse) + } + + return errorResponse +} diff --git a/vendor/github.com/packethost/packngo/plans.go b/vendor/github.com/packethost/packngo/plans.go new file mode 100644 index 000000000..6350443e7 --- /dev/null +++ b/vendor/github.com/packethost/packngo/plans.go @@ -0,0 +1,114 @@ +package packngo + +const planBasePath = "/plans" + +// PlanService interface defines available plan methods +type PlanService interface { + List() ([]Plan, *Response, error) +} + +type planRoot struct { + Plans []Plan `json:"plans"` +} + +// Plan represents a Packet service plan +type Plan struct { + ID string `json:"id"` + Slug string `json:"slug,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Line string `json:"line,omitempty"` + Specs *Specs `json:"specs,omitempty"` + Pricing *Pricing `json:"pricing,omitempty"` +} +func (p Plan) String() string { + return Stringify(p) +} + +// Specs - the server specs for a plan +type Specs struct { + Cpus []*Cpus `json:"cpus,omitempty"` + Memory *Memory `json:"memory,omitempty"` + Drives []*Drives `json:"drives,omitempty"` + Nics []*Nics `json:"nics,omitempty"` + Features *Features `json:"features,omitempty"` +} +func (s Specs) String() string { + return Stringify(s) +} + +// Cpus - the CPU config details for specs on a plan +type Cpus struct { + Count int `json:"count,omitempty"` + Type string `json:"type,omitempty"` +} +func (c Cpus) String() string { + return Stringify(c) +} + +// Memory - the RAM config details for specs on a plan +type Memory struct { + Total string `json:"total,omitempty"` +} +func (m Memory) String() string { + return Stringify(m) +} + +// Drives - the storage config details for specs on a plan +type Drives struct { + Count int `json:"count,omitempty"` + Size string `json:"size,omitempty"` + Type string `json:"type,omitempty"` +} +func (d Drives) String() string { + return Stringify(d) +} + +// Nics - the network hardware details for specs on a plan +type Nics struct { + Count int `json:"count,omitempty"` + Type string `json:"type,omitempty"` +} +func (n Nics) String() string { + return Stringify(n) +} + +// Features - other features in the specs for a plan +type Features struct { + Raid bool `json:"raid,omitempty"` + Txt bool `json:"txt,omitempty"` +} +func (f Features) String() string { + return Stringify(f) +} + +// Pricing - the pricing options on a plan +type Pricing struct { + Hourly float32 `json:"hourly,omitempty"` + Monthly float32 `json:"monthly,omitempty"` +} +func (p Pricing) String() string { + return Stringify(p) +} + +// PlanServiceOp implements PlanService +type PlanServiceOp struct { + client *Client +} + +// List method returns all available plans +func (s *PlanServiceOp) List() ([]Plan, *Response, error) { + req, err := s.client.NewRequest("GET", planBasePath, nil) + + if err != nil { + return nil, nil, err + } + + root := new(planRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Plans, resp, err +} diff --git a/vendor/github.com/packethost/packngo/projects.go b/vendor/github.com/packethost/packngo/projects.go new file mode 100644 index 000000000..391a4b3ce --- /dev/null +++ b/vendor/github.com/packethost/packngo/projects.go @@ -0,0 +1,136 @@ +package packngo + +import "fmt" + +const projectBasePath = "/projects" + +// ProjectService interface defines available project methods +type ProjectService interface { + List() ([]Project, *Response, error) + Get(string) (*Project, *Response, error) + Create(*ProjectCreateRequest) (*Project, *Response, error) + Update(*ProjectUpdateRequest) (*Project, *Response, error) + Delete(string) (*Response, error) +} + +type projectsRoot struct { + Projects []Project `json:"projects"` +} +// Project represents a Packet project +type Project struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Users []User `json:"members,omitempty"` + Devices []Device `json:"devices,omitempty"` + SSHKeys []SSHKey `json:"ssh_keys,omitempty"` + URL string `json:"href,omitempty"` +} +func (p Project) String() string { + return Stringify(p) +} + +// ProjectCreateRequest type used to create a Packet project +type ProjectCreateRequest struct { + Name string `json:"name"` + PaymentMethod string `json:"payment_method,omitempty"` +} +func (p ProjectCreateRequest) String() string { + return Stringify(p) +} + +// ProjectUpdateRequest type used to update a Packet project +type ProjectUpdateRequest struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + PaymentMethod string `json:"payment_method,omitempty"` +} +func (p ProjectUpdateRequest) String() string { + return Stringify(p) +} + +// ProjectServiceOp implements ProjectService +type ProjectServiceOp struct { + client *Client +} + +// List returns the user's projects +func (s *ProjectServiceOp) List() ([]Project, *Response, error) { + req, err := s.client.NewRequest("GET", projectBasePath, nil) + if err != nil { + return nil, nil, err + } + + root := new(projectsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Projects, resp, err +} + +// Get returns a project by id +func (s *ProjectServiceOp) Get(projectID string) (*Project, *Response, error) { + path := fmt.Sprintf("%s/%s", projectBasePath, projectID) + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + project := new(Project) + resp, err := s.client.Do(req, project) + if err != nil { + return nil, resp, err + } + + return project, resp, err +} + +// Create creates a new project +func (s *ProjectServiceOp) Create(createRequest *ProjectCreateRequest) (*Project, *Response, error) { + req, err := s.client.NewRequest("POST", projectBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + project := new(Project) + resp, err := s.client.Do(req, project) + if err != nil { + return nil, resp, err + } + + return project, resp, err +} + +// Update updates a project +func (s *ProjectServiceOp) Update(updateRequest *ProjectUpdateRequest) (*Project, *Response, error) { + path := fmt.Sprintf("%s/%s", projectBasePath, updateRequest.ID) + req, err := s.client.NewRequest("PATCH", path, updateRequest) + if err != nil { + return nil, nil, err + } + + project := new(Project) + resp, err := s.client.Do(req, project) + if err != nil { + return nil, resp, err + } + + return project, resp, err +} + +// Delete deletes a project +func (s *ProjectServiceOp) Delete(projectID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", projectBasePath, projectID) + + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} diff --git a/vendor/github.com/packethost/packngo/rate.go b/vendor/github.com/packethost/packngo/rate.go new file mode 100644 index 000000000..4dc55acaa --- /dev/null +++ b/vendor/github.com/packethost/packngo/rate.go @@ -0,0 +1,11 @@ +package packngo + +// Rate provides the API request rate limit details +type Rate struct { + RequestLimit int `json:"request_limit"` + RequestsRemaining int `json:"requests_remaining"` + Reset Timestamp `json:"rate_reset"` +} +func (r Rate) String() string { + return Stringify(r) +} diff --git a/vendor/github.com/packethost/packngo/sshkeys.go b/vendor/github.com/packethost/packngo/sshkeys.go new file mode 100644 index 000000000..33c45e169 --- /dev/null +++ b/vendor/github.com/packethost/packngo/sshkeys.go @@ -0,0 +1,138 @@ +package packngo + +import "fmt" + +const sshKeyBasePath = "/ssh-keys" + +// SSHKeyService interface defines available device methods +type SSHKeyService interface { + List() ([]SSHKey, *Response, error) + Get(string) (*SSHKey, *Response, error) + Create(*SSHKeyCreateRequest) (*SSHKey, *Response, error) + Update(*SSHKeyUpdateRequest) (*SSHKey, *Response, error) + Delete(string) (*Response, error) +} + +type sshKeyRoot struct { + SSHKeys []SSHKey `json:"ssh_keys"` +} + +// SSHKey represents a user's ssh key +type SSHKey struct { + ID string `json:"id"` + Label string `json:"label"` + Key string `json:"key"` + FingerPrint string `json:"fingerprint"` + Created string `json:"created_at"` + Updated string `json:"updated_at"` + User User `json:"user,omitempty"` + URL string `json:"href,omitempty"` +} +func (s SSHKey) String() string { + return Stringify(s) +} + +// SSHKeyCreateRequest type used to create an ssh key +type SSHKeyCreateRequest struct { + Label string `json:"label"` + Key string `json:"key"` +} +func (s SSHKeyCreateRequest) String() string { + return Stringify(s) +} + +// SSHKeyUpdateRequest type used to update an ssh key +type SSHKeyUpdateRequest struct { + ID string `json:"id"` + Label string `json:"label"` + Key string `json:"key"` +} +func (s SSHKeyUpdateRequest) String() string { + return Stringify(s) +} + +// SSHKeyServiceOp implements SSHKeyService +type SSHKeyServiceOp struct { + client *Client +} + +// List returns a user's ssh keys +func (s *SSHKeyServiceOp) List() ([]SSHKey, *Response, error) { + req, err := s.client.NewRequest("GET", sshKeyBasePath, nil) + if err != nil { + return nil, nil, err + } + + root := new(sshKeyRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.SSHKeys, resp, err +} + +// Get returns an ssh key by id +func (s *SSHKeyServiceOp) Get(sshKeyID string) (*SSHKey, *Response, error) { + path := fmt.Sprintf("%s/%s", sshKeyBasePath, sshKeyID) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + sshKey := new(SSHKey) + resp, err := s.client.Do(req, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Create creates a new ssh key +func (s *SSHKeyServiceOp) Create(createRequest *SSHKeyCreateRequest) (*SSHKey, *Response, error) { + req, err := s.client.NewRequest("POST", sshKeyBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + sshKey := new(SSHKey) + resp, err := s.client.Do(req, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Update updates an ssh key +func (s *SSHKeyServiceOp) Update(updateRequest *SSHKeyUpdateRequest) (*SSHKey, *Response, error) { + path := fmt.Sprintf("%s/%s", sshKeyBasePath, updateRequest.ID) + req, err := s.client.NewRequest("PATCH", path, updateRequest) + if err != nil { + return nil, nil, err + } + + sshKey := new(SSHKey) + resp, err := s.client.Do(req, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Delete deletes an ssh key +func (s *SSHKeyServiceOp) Delete(sshKeyID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", sshKeyBasePath, sshKeyID) + + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} diff --git a/vendor/github.com/packethost/packngo/timestamp.go b/vendor/github.com/packethost/packngo/timestamp.go new file mode 100644 index 000000000..c3320ed62 --- /dev/null +++ b/vendor/github.com/packethost/packngo/timestamp.go @@ -0,0 +1,35 @@ +package packngo + +import ( + "strconv" + "time" +) + +// Timestamp represents a time that can be unmarshalled from a JSON string +// formatted as either an RFC3339 or Unix timestamp. All +// exported methods of time.Time can be called on Timestamp. +type Timestamp struct { + time.Time +} + +func (t Timestamp) String() string { + return t.Time.String() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// Time is expected in RFC3339 or Unix format. +func (t *Timestamp) UnmarshalJSON(data []byte) (err error) { + str := string(data) + i, err := strconv.ParseInt(str, 10, 64) + if err == nil { + t.Time = time.Unix(i, 0) + } else { + t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) + } + return +} + +// Equal reports whether t and u are equal based on time.Equal +func (t Timestamp) Equal(u Timestamp) bool { + return t.Time.Equal(u.Time) +} diff --git a/vendor/github.com/packethost/packngo/user.go b/vendor/github.com/packethost/packngo/user.go new file mode 100644 index 000000000..5bd2d11fa --- /dev/null +++ b/vendor/github.com/packethost/packngo/user.go @@ -0,0 +1,52 @@ +package packngo + +const userBasePath = "/users" + +// UserService interface defines available user methods +type UserService interface { + Get(string) (*User, *Response, error) +} + +// User represents a Packet user +type User struct { + ID string `json:"id"` + FirstName string `json:"first_name,omitempty"` + LastName string `json:"last_name,omitempty"` + FullName string `json:"full_name,omitempty"` + Email string `json:"email,omitempty"` + TwoFactor string `json:"two_factor_auth,omitempty"` + AvatarURL string `json:"avatar_url,omitempty"` + Facebook string `json:"twitter,omitempty"` + Twitter string `json:"facebook,omitempty"` + LinkedIn string `json:"linkedin,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + TimeZone string `json:"timezone,omitempty"` + Emails []Email `json:"email,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` + URL string `json:"href,omitempty"` +} +func (u User) String() string { + return Stringify(u) +} + +// UserServiceOp implements UserService +type UserServiceOp struct { + client *Client +} + +// Get method gets a user by userID +func (s *UserServiceOp) Get(userID string) (*User, *Response, error) { + req, err := s.client.NewRequest("GET", userBasePath, nil) + if err != nil { + return nil, nil, err + } + + user := new(User) + resp, err := s.client.Do(req, user) + if err != nil { + return nil, resp, err + } + + return user, resp, err +} diff --git a/vendor/github.com/packethost/packngo/utils.go b/vendor/github.com/packethost/packngo/utils.go new file mode 100644 index 000000000..a77d7b79e --- /dev/null +++ b/vendor/github.com/packethost/packngo/utils.go @@ -0,0 +1,112 @@ +package packngo + +import ( + "bytes" + "fmt" + "io" + "reflect" +) + +var timestampType = reflect.TypeOf(Timestamp{}) + +// Stringify creates a string representation of the provided message +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + stringifyValue(&buf, v) + return buf.String() +} + +// String allocates a new string value to store v and returns a pointer to it +func String(v string) *string { + p := new(string) + *p = v + return p +} + +// Int allocates a new int32 value to store v and returns a pointer to it, but unlike Int32 its argument value is an int. +func Int(v int) *int { + p := new(int) + *p = v + return p +} + +// Bool allocates a new bool value to store v and returns a pointer to it. +func Bool(v bool) *bool { + p := new(bool) + *p = v + return p +} + +// StreamToString converts a reader to a string +func StreamToString(stream io.Reader) string { + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + return buf.String() +} + +// stringifyValue was graciously cargoculted from the goprotubuf library +func stringifyValue(w io.Writer, val reflect.Value) { + if val.Kind() == reflect.Ptr && val.IsNil() { + w.Write([]byte("")) + return + } + + v := reflect.Indirect(val) + + switch v.Kind() { + case reflect.String: + fmt.Fprintf(w, `"%s"`, v) + case reflect.Slice: + w.Write([]byte{'['}) + for i := 0; i < v.Len(); i++ { + if i > 0 { + w.Write([]byte{' '}) + } + + stringifyValue(w, v.Index(i)) + } + + w.Write([]byte{']'}) + return + case reflect.Struct: + if v.Type().Name() != "" { + w.Write([]byte(v.Type().String())) + } + + // special handling of Timestamp values + if v.Type() == timestampType { + fmt.Fprintf(w, "{%s}", v.Interface()) + return + } + + w.Write([]byte{'{'}) + + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + + if sep { + w.Write([]byte(", ")) + } else { + sep = true + } + + w.Write([]byte(v.Type().Field(i).Name)) + w.Write([]byte{':'}) + stringifyValue(w, fv) + } + + w.Write([]byte{'}'}) + default: + if v.CanInterface() { + fmt.Fprint(w, v.Interface()) + } + } +} diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b382a04ed --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTORS @@ -0,0 +1 @@ +Paul Borman diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/pborman/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go new file mode 100644 index 000000000..50a0f2d09 --- /dev/null +++ b/vendor/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go new file mode 100644 index 000000000..d8bd013e6 --- /dev/null +++ b/vendor/github.com/pborman/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go new file mode 100644 index 000000000..cdd4192fd --- /dev/null +++ b/vendor/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID dervied from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go new file mode 100644 index 000000000..760580a50 --- /dev/null +++ b/vendor/github.com/pborman/uuid/json.go @@ -0,0 +1,30 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "errors" + +func (u UUID) MarshalJSON() ([]byte, error) { + if len(u) == 0 { + return []byte(`""`), nil + } + return []byte(`"` + u.String() + `"`), nil +} + +func (u *UUID) UnmarshalJSON(data []byte) error { + if len(data) == 0 || string(data) == `""` { + return nil + } + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("invalid UUID format") + } + data = data[1 : len(data)-1] + uu := Parse(string(data)) + if uu == nil { + return errors.New("invalid UUID format") + } + *u = uu + return nil +} diff --git a/vendor/github.com/pborman/uuid/json_test.go b/vendor/github.com/pborman/uuid/json_test.go new file mode 100644 index 000000000..b5eae0924 --- /dev/null +++ b/vendor/github.com/pborman/uuid/json_test.go @@ -0,0 +1,32 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/json" + "reflect" + "testing" +) + +var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + +func TestJSON(t *testing.T) { + type S struct { + ID1 UUID + ID2 UUID + } + s1 := S{ID1: testUUID} + data, err := json.Marshal(&s1) + if err != nil { + t.Fatal(err) + } + var s2 S + if err := json.Unmarshal(data, &s2); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&s1, &s2) { + t.Errorf("got %#v, want %#v", s2, s1) + } +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go new file mode 100644 index 000000000..dd0a8ac18 --- /dev/null +++ b/vendor/github.com/pborman/uuid/node.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "net" + +var ( + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + if nodeID == nil { + SetNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/pborman/uuid/seq_test.go b/vendor/github.com/pborman/uuid/seq_test.go new file mode 100644 index 000000000..3b3d1430d --- /dev/null +++ b/vendor/github.com/pborman/uuid/seq_test.go @@ -0,0 +1,66 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "flag" + "runtime" + "testing" + "time" +) + +// This test is only run when --regressions is passed on the go test line. +var regressions = flag.Bool("regressions", false, "run uuid regression tests") + +// TestClockSeqRace tests for a particular race condition of returning two +// identical Version1 UUIDs. The duration of 1 minute was chosen as the race +// condition, before being fixed, nearly always occured in under 30 seconds. +func TestClockSeqRace(t *testing.T) { + if !*regressions { + t.Skip("skipping regression tests") + } + duration := time.Minute + + done := make(chan struct{}) + defer close(done) + + ch := make(chan UUID, 10000) + ncpu := runtime.NumCPU() + switch ncpu { + case 0, 1: + // We can't run the test effectively. + t.Skip("skipping race test, only one CPU detected") + return + default: + runtime.GOMAXPROCS(ncpu) + } + for i := 0; i < ncpu; i++ { + go func() { + for { + select { + case <-done: + return + case ch <- NewUUID(): + } + } + }() + } + + uuids := make(map[string]bool) + cnt := 0 + start := time.Now() + for u := range ch { + s := u.String() + if uuids[s] { + t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s) + return + } + uuids[s] = true + if time.Since(start) > duration { + return + } + cnt++ + } +} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 000000000..98b23aa15 --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,48 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + *uuid = UUID(b) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} diff --git a/vendor/github.com/pborman/uuid/sql_test.go b/vendor/github.com/pborman/uuid/sql_test.go new file mode 100644 index 000000000..83bac8cac --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql_test.go @@ -0,0 +1,58 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "strings" + "testing" +) + +func TestScan(t *testing.T) { + var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479" + var byteTest []byte = Parse(stringTest) + var badTypeTest int = 6 + var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4" + var invalidByteTest []byte = Parse(invalidTest) + + var uuid UUID + err := (&uuid).Scan(stringTest) + if err != nil { + t.Fatal(err) + } + + err = (&uuid).Scan([]byte(stringTest)) + if err != nil { + t.Fatal(err) + } + + err = (&uuid).Scan(byteTest) + if err != nil { + t.Fatal(err) + } + + err = (&uuid).Scan(badTypeTest) + if err == nil { + t.Error("int correctly parsed and shouldn't have") + } + if !strings.Contains(err.Error(), "unable to scan type") { + t.Error("attempting to parse an int returned an incorrect error message") + } + + err = (&uuid).Scan(invalidTest) + if err == nil { + t.Error("invalid uuid was parsed without error") + } + if !strings.Contains(err.Error(), "invalid UUID") { + t.Error("attempting to parse an invalid UUID returned an incorrect error message") + } + + err = (&uuid).Scan(invalidByteTest) + if err == nil { + t.Error("invalid byte uuid was parsed without error") + } + if !strings.Contains(err.Error(), "invalid UUID") { + t.Error("attempting to parse an invalid byte UUID returned an incorrect error message") + } +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go new file mode 100644 index 000000000..7ebc9bef1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + mu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer mu.Unlock() + mu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer mu.Unlock() + mu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer mu.Unlock() + mu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go new file mode 100644 index 000000000..de40b102c --- /dev/null +++ b/vendor/github.com/pborman/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go new file mode 100644 index 000000000..2920fae63 --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -0,0 +1,163 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } + panic("unreachable") +} + +// Version returns the verison of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/pborman/uuid/uuid_test.go b/vendor/github.com/pborman/uuid/uuid_test.go new file mode 100644 index 000000000..417ebeb26 --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid_test.go @@ -0,0 +1,390 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" +) + +type test struct { + in string + version Version + variant Variant + isuuid bool +} + +var tests = []test{ + {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, + {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, + {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, + {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, + {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, + {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, + {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, + {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, + {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, + {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, + {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, + {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, + {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, + {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, + + {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, + {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, + + {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, + {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, +} + +var constants = []struct { + c interface{} + name string +}{ + {Person, "Person"}, + {Group, "Group"}, + {Org, "Org"}, + {Invalid, "Invalid"}, + {RFC4122, "RFC4122"}, + {Reserved, "Reserved"}, + {Microsoft, "Microsoft"}, + {Future, "Future"}, + {Domain(17), "Domain17"}, + {Variant(42), "BadVariant42"}, +} + +func testTest(t *testing.T, in string, tt test) { + uuid := Parse(in) + if ok := (uuid != nil); ok != tt.isuuid { + t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) + } + if uuid == nil { + return + } + + if v := uuid.Variant(); v != tt.variant { + t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) + } + if v, _ := uuid.Version(); v != tt.version { + t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) + } +} + +func TestUUID(t *testing.T) { + for _, tt := range tests { + testTest(t, tt.in, tt) + testTest(t, strings.ToUpper(tt.in), tt) + } +} + +func TestConstants(t *testing.T) { + for x, tt := range constants { + v, ok := tt.c.(fmt.Stringer) + if !ok { + t.Errorf("%x: %v: not a stringer", x, v) + } else if s := v.String(); s != tt.name { + v, _ := tt.c.(int) + t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name) + } + } +} + +func TestRandomUUID(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + uuid := NewRandom() + s := uuid.String() + if m[s] { + t.Errorf("NewRandom returned duplicated UUID %s\n", s) + } + m[s] = true + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func TestNew(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + s := New() + if m[s] { + t.Errorf("New returned duplicated UUID %s\n", s) + } + m[s] = true + uuid := Parse(s) + if uuid == nil { + t.Errorf("New returned %q which does not decode\n", s) + continue + } + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func clockSeq(t *testing.T, uuid UUID) int { + seq, ok := uuid.ClockSequence() + if !ok { + t.Fatalf("%s: invalid clock sequence\n", uuid) + } + return seq +} + +func TestClockSeq(t *testing.T) { + // Fake time.Now for this test to return a monotonically advancing time; restore it at end. + defer func(orig func() time.Time) { timeNow = orig }(timeNow) + monTime := time.Now() + timeNow = func() time.Time { + monTime = monTime.Add(1 * time.Second) + return monTime + } + + SetClockSequence(-1) + uuid1 := NewUUID() + uuid2 := NewUUID() + + if clockSeq(t, uuid1) != clockSeq(t, uuid2) { + t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2)) + } + + SetClockSequence(-1) + uuid2 = NewUUID() + + // Just on the very off chance we generated the same sequence + // two times we try again. + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + SetClockSequence(-1) + uuid2 = NewUUID() + } + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1)) + } + + SetClockSequence(0x1234) + uuid1 = NewUUID() + if seq := clockSeq(t, uuid1); seq != 0x1234 { + t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq) + } +} + +func TestCoding(t *testing.T) { + text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" + urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" + data := UUID{ + 0x7d, 0x44, 0x48, 0x40, + 0x9d, 0xc0, + 0x11, 0xd1, + 0xb2, 0x45, + 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, + } + if v := data.String(); v != text { + t.Errorf("%x: encoded to %s, expected %s\n", data, v, text) + } + if v := data.URN(); v != urn { + t.Errorf("%x: urn is %s, expected %s\n", data, v, urn) + } + + uuid := Parse(text) + if !Equal(uuid, data) { + t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data) + } +} + +func TestVersion1(t *testing.T) { + uuid1 := NewUUID() + uuid2 := NewUUID() + + if Equal(uuid1, uuid2) { + t.Errorf("%s:duplicate uuid\n", uuid1) + } + if v, _ := uuid1.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid1, v) + } + if v, _ := uuid2.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid2, v) + } + n1 := uuid1.NodeID() + n2 := uuid2.NodeID() + if !bytes.Equal(n1, n2) { + t.Errorf("Different nodes %x != %x\n", n1, n2) + } + t1, ok := uuid1.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid1) + } + t2, ok := uuid2.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid2) + } + q1, ok := uuid1.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence\n", uuid1) + } + q2, ok := uuid2.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence", uuid2) + } + + switch { + case t1 == t2 && q1 == q2: + t.Errorf("time stopped\n") + case t1 > t2 && q1 == q2: + t.Errorf("time reversed\n") + case t1 < t2 && q1 != q2: + t.Errorf("clock sequence chaned unexpectedly\n") + } +} + +func TestNodeAndTime(t *testing.T) { + // Time is February 5, 1998 12:30:23.136364800 AM GMT + + uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") + node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} + + ts, ok := uuid.Time() + if ok { + c := time.Unix(ts.UnixTime()) + want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) + if !c.Equal(want) { + t.Errorf("Got time %v, want %v", c, want) + } + } else { + t.Errorf("%s: bad time\n", uuid) + } + if !bytes.Equal(node, uuid.NodeID()) { + t.Errorf("Expected node %v got %v\n", node, uuid.NodeID()) + } +} + +func TestMD5(t *testing.T) { + uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String() + want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" + if uuid != want { + t.Errorf("MD5: got %q expected %q\n", uuid, want) + } +} + +func TestSHA1(t *testing.T) { + uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String() + want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" + if uuid != want { + t.Errorf("SHA1: got %q expected %q\n", uuid, want) + } +} + +func TestNodeID(t *testing.T) { + nid := []byte{1, 2, 3, 4, 5, 6} + SetNodeInterface("") + s := NodeInterface() + if s == "" || s == "user" { + t.Errorf("NodeInterface %q after SetInteface\n", s) + } + node1 := NodeID() + if node1 == nil { + t.Errorf("NodeID nil after SetNodeInterface\n", s) + } + SetNodeID(nid) + s = NodeInterface() + if s != "user" { + t.Errorf("Expected NodeInterface %q got %q\n", "user", s) + } + node2 := NodeID() + if node2 == nil { + t.Errorf("NodeID nil after SetNodeID\n", s) + } + if bytes.Equal(node1, node2) { + t.Errorf("NodeID not changed after SetNodeID\n", s) + } else if !bytes.Equal(nid, node2) { + t.Errorf("NodeID is %x, expected %x\n", node2, nid) + } +} + +func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) { + if uuid == nil { + t.Errorf("%s failed\n", name) + return + } + if v, _ := uuid.Version(); v != 2 { + t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v) + return + } + if v, ok := uuid.Domain(); !ok || v != domain { + if !ok { + t.Errorf("%s: %d: Domain failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v) + } + } + if v, ok := uuid.Id(); !ok || v != id { + if !ok { + t.Errorf("%s: %d: Id failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v) + } + } +} + +func TestDCE(t *testing.T) { + testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678) + testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid())) + testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid())) +} + +type badRand struct{} + +func (r badRand) Read(buf []byte) (int, error) { + for i, _ := range buf { + buf[i] = byte(i) + } + return len(buf), nil +} + +func TestBadRand(t *testing.T) { + SetRand(badRand{}) + uuid1 := New() + uuid2 := New() + if uuid1 != uuid2 { + t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2) + } + SetRand(nil) + uuid1 = New() + uuid2 = New() + if uuid1 == uuid2 { + t.Errorf("unexecpted duplicates, got %q\n", uuid1) + } +} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go new file mode 100644 index 000000000..0127eacfa --- /dev/null +++ b/vendor/github.com/pborman/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go new file mode 100644 index 000000000..b3d4a368d --- /dev/null +++ b/vendor/github.com/pborman/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/vendor/github.com/pearkes/cloudflare/.gitignore b/vendor/github.com/pearkes/cloudflare/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/pearkes/cloudflare/LICENSE b/vendor/github.com/pearkes/cloudflare/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/pearkes/cloudflare/README.md b/vendor/github.com/pearkes/cloudflare/README.md new file mode 100644 index 000000000..a1f967aef --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/README.md @@ -0,0 +1,15 @@ +## cloudflare + +This package provides the `cloudflare` package which offers +an interface to the CloudFlare gAPI. + +It's intentionally designed to make heavy use of built-ins and strings +in place of custom data structures and proper types. It also only implements +specific endpoints, and doesn't have full API coverage. + +**For those reasons, I recommend looking elsewhere if you just need +a standard CloudFlare API client.** + +### Documentation + +The full documentation is available on [Godoc](http://godoc.org/github.com/pearkes/cloudflare) diff --git a/vendor/github.com/pearkes/cloudflare/api.go b/vendor/github.com/pearkes/cloudflare/api.go new file mode 100644 index 000000000..ed7fe93f7 --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/api.go @@ -0,0 +1,119 @@ +package cloudflare + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + + "github.com/hashicorp/go-cleanhttp" +) + +// Client provides a client to the CloudflAre API +type Client struct { + // Access Token + Token string + + // User Email + Email string + + // URL to the DO API to use + URL string + + // HttpClient is the client to use. Default will be + // used if not provided. + Http *http.Client +} + +// NewClient returns a new cloudflare client, +// requires an authorization token. You can generate +// an OAuth token by visiting the Apps & API section +// of the CloudflAre control panel for your account. +func NewClient(email string, token string) (*Client, error) { + // If it exists, grab teh token from the environment + if token == "" { + token = os.Getenv("CLOUDFLARE_TOKEN") + } + + if email == "" { + email = os.Getenv("CLOUDFLARE_EMAIL") + } + + client := Client{ + Token: token, + Email: email, + URL: "https://www.cloudflare.com/api_json.html", + Http: cleanhttp.DefaultClient(), + } + return &client, nil +} + +// Creates a new request with the params +func (c *Client) NewRequest(params map[string]string, method string, action string) (*http.Request, error) { + p := url.Values{} + u, err := url.Parse(c.URL) + + if err != nil { + return nil, fmt.Errorf("Error parsing base URL: %s", err) + } + + // Build up our request parameters + for k, v := range params { + p.Add(k, v) + } + + // Add authentication details + p.Add("tkn", c.Token) + p.Add("email", c.Email) + + // The "action" to take against the API + p.Add("a", action) + + // Add the params to our URL + u.RawQuery = p.Encode() + + // Build the request + req, err := http.NewRequest(method, u.String(), nil) + + if err != nil { + return nil, fmt.Errorf("Error creating request: %s", err) + } + + return req, nil + +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + if err = json.Unmarshal(body, &out); err != nil { + return err + } + + return nil +} + +// checkResp wraps http.Client.Do() and verifies that the +// request was successful. A non-200 request returns an error +// formatted to included any validation problems or otherwise +func checkResp(resp *http.Response, err error) (*http.Response, error) { + // If the err is already there, there was an error higher + // up the chain, so just return that + if err != nil { + return resp, err + } + + switch i := resp.StatusCode; { + case i == 200: + return resp, nil + default: + return nil, fmt.Errorf("API Error: %s", resp.Status) + } +} diff --git a/vendor/github.com/pearkes/cloudflare/api_test.go b/vendor/github.com/pearkes/cloudflare/api_test.go new file mode 100644 index 000000000..3cd6387dd --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/api_test.go @@ -0,0 +1,101 @@ +package cloudflare + +import ( + "os" + "testing" + + . "github.com/motain/gocheck" + "github.com/pearkes/cloudflare/testutil" +) + +type S struct { + client *Client +} + +var _ = Suite(&S{}) + +var testServer = testutil.NewHTTPServer() + +func (s *S) SetUpSuite(c *C) { + testServer.Start() + var err error + s.client, err = NewClient("foobar", "foobar") + s.client.URL = "http://localhost:4444" + if err != nil { + panic(err) + } +} + +func (s *S) TearDownTest(c *C) { + testServer.Flush() +} + +func makeClient(t *testing.T) *Client { + client, err := NewClient("foobaremail", "foobartoken") + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.Token != "foobartoken" { + t.Fatalf("token not set on client: %s", client.Token) + } + + if client.Email != "foobaremail" { + t.Fatalf("email not set on client: %s", client.Token) + } + + return client +} + +func Test_NewClient_env(t *testing.T) { + os.Setenv("CLOUDFLARE_TOKEN", "bar") + os.Setenv("CLOUDFLARE_EMAIL", "bar") + client, err := NewClient("", "") + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.Token != "bar" { + t.Fatalf("token not set on client: %s", client.Token) + } + + if client.Email != "bar" { + t.Fatalf("email not set on client: %s", client.Email) + } +} + +func TestClient_NewRequest(t *testing.T) { + c := makeClient(t) + + params := map[string]string{ + "foo": "bar", + "baz": "bar", + } + req, err := c.NewRequest(params, "POST", "bar") + if err != nil { + t.Fatalf("bad: %v", err) + } + + encoded := req.URL.Query() + if encoded.Get("foo") != "bar" { + t.Fatalf("bad: %v", encoded) + } + + if encoded.Get("baz") != "bar" { + t.Fatalf("bad: %v", encoded) + } + + if encoded.Get("a") != "bar" { + t.Fatalf("bad: %v", encoded) + } + expected := "https://www.cloudflare.com/api_json.html?a=bar&baz=bar&email=foobaremail&foo=bar&tkn=foobartoken" + if req.URL.String() != expected { + t.Fatalf("bad base url: %v\n\nexpected: %v", req.URL.String(), expected) + } + + if req.Method != "POST" { + t.Fatalf("bad method: %v", req.Method) + } +} diff --git a/vendor/github.com/pearkes/cloudflare/record.go b/vendor/github.com/pearkes/cloudflare/record.go new file mode 100644 index 000000000..a3bed92a2 --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/record.go @@ -0,0 +1,334 @@ +package cloudflare + +import ( + "errors" + "fmt" + "strings" +) + +type RecordsResponse struct { + Response struct { + Recs struct { + Records []Record `json:"objs"` + } `json:"recs"` + } `json:"response"` + Result string `json:"result"` + Message string `json:"msg"` +} + +func (r *RecordsResponse) FindRecord(id string) (*Record, error) { + if r.Result == "error" { + return nil, fmt.Errorf("API Error: %s", r.Message) + } + + objs := r.Response.Recs.Records + notFoundErr := errors.New("Record not found") + + // No objects, return nil + if len(objs) < 0 { + return nil, notFoundErr + } + + for _, v := range objs { + // We have a match, return that + if v.Id == id { + return &v, nil + } + } + + return nil, notFoundErr +} + +func (r *RecordsResponse) FindRecordByName(name string, wildcard bool) ([]Record, error) { + if r.Result == "error" { + return nil, fmt.Errorf("API Error: %s", r.Message) + } + + objs := r.Response.Recs.Records + notFoundErr := errors.New("Record not found") + + // No objects, return nil + if len(objs) < 0 { + return nil, notFoundErr + } + + var recs []Record + suffix := "." + name + + for _, v := range objs { + if v.Name == name { + recs = append(recs, v) + } else if wildcard && strings.HasSuffix(v.Name, suffix) { + recs = append(recs, v) + } + } + + return recs, nil +} + +type RecordResponse struct { + Response struct { + Rec struct { + Record Record `json:"obj"` + } `json:"rec"` + } `json:"response"` + Result string `json:"result"` + Message string `json:"msg"` +} + +func (r *RecordResponse) GetRecord() (*Record, error) { + if r.Result == "error" { + return nil, fmt.Errorf("API Error: %s", r.Message) + } + + return &r.Response.Rec.Record, nil +} + +// Record is used to represent a retrieved Record. All properties +// are set as strings. +type Record struct { + Id string `json:"rec_id"` + Domain string `json:"zone_name"` + Name string `json:"display_name"` + FullName string `json:"name"` + Value string `json:"content"` + Type string `json:"type"` + Priority string `json:"prio"` + Ttl string `json:"ttl"` +} + +// CreateRecord contains the request parameters to create a new +// record. +type CreateRecord struct { + Type string + Name string + Content string + Ttl string + Priority string +} + +// CreateRecord creates a record from the parameters specified and +// returns an error if it fails. If no error and the name is returned, +// the Record was succesfully created. +func (c *Client) CreateRecord(domain string, opts *CreateRecord) (*Record, error) { + // Make the request parameters + params := make(map[string]string) + params["z"] = domain + + params["type"] = opts.Type + + if opts.Name != "" { + params["name"] = opts.Name + } + + if opts.Content != "" { + params["content"] = opts.Content + } + + if opts.Priority != "" { + params["prio"] = opts.Priority + } + + if opts.Ttl != "" { + params["ttl"] = opts.Ttl + } else { + params["ttl"] = "1" + } + + req, err := c.NewRequest(params, "POST", "rec_new") + if err != nil { + return nil, err + } + + resp, err := checkResp(c.Http.Do(req)) + + if err != nil { + return nil, fmt.Errorf("Error creating record: %s", err) + } + + recordResp := new(RecordResponse) + + err = decodeBody(resp, &recordResp) + + if err != nil { + return nil, fmt.Errorf("Error parsing record response: %s", err) + } + record, err := recordResp.GetRecord() + if err != nil { + return nil, err + } + + // The request was successful + return record, nil +} + +// DestroyRecord destroys a record by the ID specified and +// returns an error if it fails. If no error is returned, +// the Record was succesfully destroyed. +func (c *Client) DestroyRecord(domain string, id string) error { + params := make(map[string]string) + + params["z"] = domain + params["id"] = id + + req, err := c.NewRequest(params, "POST", "rec_delete") + if err != nil { + return err + } + + resp, err := checkResp(c.Http.Do(req)) + + if err != nil { + return fmt.Errorf("Error deleting record: %s", err) + } + + recordResp := new(RecordResponse) + + err = decodeBody(resp, &recordResp) + + if err != nil { + return fmt.Errorf("Error parsing record response: %s", err) + } + _, err = recordResp.GetRecord() + if err != nil { + return err + } + + // The request was successful + return nil +} + +// UpdateRecord contains the request parameters to update a +// record. +type UpdateRecord struct { + Type string + Name string + Content string + Ttl string + Priority string +} + +// UpdateRecord destroys a record by the ID specified and +// returns an error if it fails. If no error is returned, +// the Record was succesfully updated. +func (c *Client) UpdateRecord(domain string, id string, opts *UpdateRecord) error { + params := make(map[string]string) + params["z"] = domain + params["id"] = id + + params["type"] = opts.Type + + if opts.Name != "" { + params["name"] = opts.Name + } + + if opts.Content != "" { + params["content"] = opts.Content + } + + if opts.Priority != "" { + params["prio"] = opts.Priority + } + + if opts.Ttl != "" { + params["ttl"] = opts.Ttl + } else { + params["ttl"] = "1" + } + + req, err := c.NewRequest(params, "POST", "rec_edit") + if err != nil { + return err + } + + resp, err := checkResp(c.Http.Do(req)) + + if err != nil { + return fmt.Errorf("Error updating record: %s", err) + } + + recordResp := new(RecordResponse) + + err = decodeBody(resp, &recordResp) + + if err != nil { + return fmt.Errorf("Error parsing record response: %s", err) + } + _, err = recordResp.GetRecord() + if err != nil { + return err + } + + // The request was successful + return nil +} + +func (c *Client) RetrieveRecordsByName(domain string, name string, wildcard bool) ([]Record, error) { + params := make(map[string]string) + // The zone we want + params["z"] = domain + + req, err := c.NewRequest(params, "GET", "rec_load_all") + + if err != nil { + return nil, err + } + + resp, err := checkResp(c.Http.Do(req)) + if err != nil { + return nil, fmt.Errorf("Error retrieving record: %s", err) + } + + records := new(RecordsResponse) + + err = decodeBody(resp, records) + + if err != nil { + return nil, fmt.Errorf("Error decoding record response: %s", err) + } + + record, err := records.FindRecordByName(name, wildcard) + if err != nil { + return nil, err + } + + // The request was successful + return record, nil +} + +// RetrieveRecord gets a record by the ID specified and +// returns a Record and an error. An error will be returned for failed +// requests with a nil Record. +func (c *Client) RetrieveRecord(domain string, id string) (*Record, error) { + params := make(map[string]string) + // The zone we want + params["z"] = domain + params["id"] = id + + req, err := c.NewRequest(params, "GET", "rec_load_all") + + if err != nil { + return nil, err + } + + resp, err := checkResp(c.Http.Do(req)) + if err != nil { + return nil, fmt.Errorf("Error retrieving record: %s", err) + } + + records := new(RecordsResponse) + + err = decodeBody(resp, records) + + if err != nil { + return nil, fmt.Errorf("Error decoding record response: %s", err) + } + + record, err := records.FindRecord(id) + if err != nil { + return nil, err + } + + // The request was successful + return record, nil +} diff --git a/vendor/github.com/pearkes/cloudflare/record_test.go b/vendor/github.com/pearkes/cloudflare/record_test.go new file mode 100644 index 000000000..e134b5ef2 --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/record_test.go @@ -0,0 +1,409 @@ +package cloudflare + +import ( + "testing" + + . "github.com/motain/gocheck" +) + +func TestRecord(t *testing.T) { + TestingT(t) +} + +func (s *S) Test_CreateRecord(c *C) { + testServer.Response(200, nil, recordExample) + + opts := CreateRecord{ + Type: "A", + Name: "foobar", + Content: "10.0.0.1", + } + + record, err := s.client.CreateRecord("example.com", &opts) + + req := testServer.WaitRequest() + + c.Assert(req.Form["type"], DeepEquals, []string{"A"}) + c.Assert(req.Form["name"], DeepEquals, []string{"foobar"}) + c.Assert(req.Form["content"], DeepEquals, []string{"10.0.0.1"}) + c.Assert(req.Form["ttl"], DeepEquals, []string{"1"}) + c.Assert(err, IsNil) + c.Assert(record.Id, Equals, "23734516") +} + +func (s *S) Test_CreateRecordWithTTL(c *C) { + testServer.Response(200, nil, recordExample) + + opts := CreateRecord{ + Type: "A", + Name: "foobar", + Content: "10.0.0.1", + Ttl: "600", + } + + record, err := s.client.CreateRecord("example.com", &opts) + + req := testServer.WaitRequest() + + c.Assert(req.Form["type"], DeepEquals, []string{"A"}) + c.Assert(req.Form["name"], DeepEquals, []string{"foobar"}) + c.Assert(req.Form["content"], DeepEquals, []string{"10.0.0.1"}) + c.Assert(req.Form["ttl"], DeepEquals, []string{"600"}) + c.Assert(err, IsNil) + c.Assert(record.Id, Equals, "23734516") +} + +func (s *S) Test_RetrieveRecord(c *C) { + testServer.Response(200, nil, recordsExample) + + record, err := s.client.RetrieveRecord("example.com", "16606009") + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(record.Name, Equals, "direct") + c.Assert(record.Id, Equals, "16606009") + c.Assert(record.Priority, Equals, "") +} + +func (s *S) Test_RetrieveRecord_Bad(c *C) { + testServer.Response(200, nil, recordsErrorExample) + + record, err := s.client.RetrieveRecord("example.com", "16606009") + + _ = testServer.WaitRequest() + + c.Assert(err.Error(), Equals, "API Error: Invalid zone.") + c.Assert(record, IsNil) +} + +func (s *S) Test_DestroyRecord(c *C) { + testServer.Response(200, nil, recordDeleteExample) + + err := s.client.DestroyRecord("example.com", "25") + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) +} + +func (s *S) Test_UpdateRecord_Bad(c *C) { + testServer.Response(200, nil, recordErrorExample) + + opts := UpdateRecord{ + Name: "foobar", + Type: "CNAME", + } + + err := s.client.UpdateRecord("example.com", "16606009", &opts) + + req := testServer.WaitRequest() + + c.Assert(req.Form["type"], DeepEquals, []string{"CNAME"}) + c.Assert(req.Form["name"], DeepEquals, []string{"foobar"}) + c.Assert(err.Error(), Equals, "API Error: Invalid record id.") +} + +func (s *S) Test_UpdateRecord(c *C) { + testServer.Response(200, nil, recordExample) + + opts := UpdateRecord{ + Name: "foobar", + Type: "CNAME", + } + + err := s.client.UpdateRecord("example.com", "25", &opts) + + req := testServer.WaitRequest() + + c.Assert(req.Form["type"], DeepEquals, []string{"CNAME"}) + c.Assert(req.Form["name"], DeepEquals, []string{"foobar"}) + c.Assert(err, IsNil) +} + +var recordErrorExample = `{ + "request": { + "act": "rec_edit" + }, + "result": "error", + "msg": "Invalid record id." +}` + +var recordsErrorExample = `{ + "request": { + "act": "rec_load_all" + }, + "result": "error", + "msg": "Invalid zone." +}` + +var recordDeleteExample = `{ + "request": { + "act": "rec_delete", + "a": "rec_delete", + "tkn": "1296c62233d48a6cf0585b0c1dddc3512e4b2", + "id": "23735515", + "email": "sample@example.com", + "z": "example.com" + }, + "result": "success", + "msg": null +}` + +var recordsExample = `{ + "request": { + "act": "rec_load_all", + "a": "rec_load_all", + "email": "sample@example.com", + "tkn": "8afbe6dea02407989af4dd4c97bb6e25", + "z": "example.com" + }, + "response": { + "recs": { + "has_more": false, + "count": 7, + "objs": [ + { + "rec_id": "16606009", + "rec_tag": "7f8e77bac02ba65d34e20c4b994a202c", + "zone_name": "example.com", + "name": "direct.example.com", + "display_name": "direct", + "type": "A", + "prio": null, + "content": "[server IP]", + "display_content": "[server IP]", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": null, + "ssl_status": null, + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "0", + "props": { + "proxiable": 1, + "cloud_on": 0, + "cf_open": 1, + "ssl": 0, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0 + } + }, + { + "rec_id": "16606003", + "rec_tag": "d5315634e9f5660d3670e62fa176e5de", + "zone_name": "example.com", + "name": "home.example.com", + "display_name": "home", + "type": "A", + "prio": null, + "content": "[server IP]", + "display_content": "[server IP]", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": null, + "ssl_status": null, + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "0", + "props": { + "proxiable": 1, + "cloud_on": 0, + "cf_open": 1, + "ssl": 0, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0 + } + }, + { + "rec_id": "16606000", + "rec_tag": "23b26c051884e94e18711742942760b1", + "zone_name": "example.com", + "name": "example.com", + "display_name": "example.com", + "type": "A", + "prio": null, + "content": "[server IP]", + "display_content": "[server IP]", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": null, + "ssl_status": null, + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "1", + "props": { + "proxiable": 1, + "cloud_on": 1, + "cf_open": 1, + "ssl": 0, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0 + } + }, + { + "rec_id": "18136402", + "rec_tag": "3bcef45cdf5b7638b13cfb89f1b6e716", + "zone_name": "example.com", + "name": "test.example.com", + "display_name": "test", + "type": "A", + "prio": null, + "content": "[server IP]", + "display_content": "[server IP]", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": null, + "ssl_status": null, + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "0", + "props": { + "proxiable": 1, + "cloud_on": 0, + "cf_open": 1, + "ssl": 0, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0 + } + }, + { + "rec_id": "16606018", + "rec_tag": "c0b453b2d94213a7930d342114cbda86", + "zone_name": "example.com", + "name": "www.example.com", + "display_name": "www", + "type": "CNAME", + "prio": null, + "content": "example.com", + "display_content": "example.com", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": null, + "ssl_status": null, + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "0", + "props": { + "proxiable": 1, + "cloud_on": 0, + "cf_open": 1, + "ssl": 0, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0 + } + }, + { + "rec_id": "17119732", + "rec_tag": "1faa40f85c78bccb69ee8116e84f3b40", + "zone_name": "example.com", + "name": "xn--vii.example.com", + "display_name": "⟵", + "type": "CNAME", + "prio": null, + "content": "example.com", + "display_content": "example.com", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": null, + "ssl_status": null, + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "1", + "props": { + "proxiable": 1, + "cloud_on": 1, + "cf_open": 1, + "ssl": 0, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0 + } + }, + { + "rec_id": "16606030", + "rec_tag": "2012b3a2e49978ef18ee13dd98e6b6f7", + "zone_name": "example.com", + "name": "yay.example.com", + "display_name": "yay", + "type": "CNAME", + "prio": null, + "content": "domains.tumblr.com", + "display_content": "domains.tumblr.com", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": null, + "ssl_status": null, + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "0", + "props": { + "proxiable": 1, + "cloud_on": 0, + "cf_open": 1, + "ssl": 0, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0 + } + } + ] + } + }, + "result": "success", + "msg": null +}` + +var recordExample = `{ + "request": { + "act": "rec_new", + "a": "rec_new", + "tkn": "8afbe6dea02407989af4dd4c97bb6e25", + "email": "sample@example.com", + "type": "A", + "z": "example.com", + "name": "test", + "content": "96.126.126.36", + "ttl": "1", + "service_mode": "1" + }, + "response": { + "rec": { + "obj": { + "rec_id": "23734516", + "rec_tag": "b3db8b8ad50389eb4abae7522b22852f", + "zone_name": "example.com", + "name": "test.example.com", + "display_name": "test", + "type": "A", + "prio": null, + "content": "96.126.126.36", + "display_content": "96.126.126.36", + "ttl": "1", + "ttl_ceil": 86400, + "ssl_id": "12805", + "ssl_status": "V", + "ssl_expires_on": null, + "auto_ttl": 1, + "service_mode": "0", + "props": { + "proxiable": 1, + "cloud_on": 0, + "cf_open": 1, + "ssl": 1, + "expired_ssl": 0, + "expiring_ssl": 0, + "pending_ssl": 0, + "vanity_lock": 0 + } + } + } + }, + "result": "success", + "msg": null +}` diff --git a/vendor/github.com/pearkes/cloudflare/testutil/http.go b/vendor/github.com/pearkes/cloudflare/testutil/http.go new file mode 100644 index 000000000..e4f8ddb80 --- /dev/null +++ b/vendor/github.com/pearkes/cloudflare/testutil/http.go @@ -0,0 +1,174 @@ +package testutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" +) + +type HTTPServer struct { + URL string + Timeout time.Duration + started bool + request chan *http.Request + response chan ResponseFunc +} + +type Response struct { + Status int + Headers map[string]string + Body string +} + +func NewHTTPServer() *HTTPServer { + return &HTTPServer{URL: "http://localhost:4444", Timeout: 5 * time.Second} +} + +type ResponseFunc func(path string) Response + +func (s *HTTPServer) Start() { + if s.started { + return + } + s.started = true + s.request = make(chan *http.Request, 1024) + s.response = make(chan ResponseFunc, 1024) + u, err := url.Parse(s.URL) + if err != nil { + panic(err) + } + l, err := net.Listen("tcp", u.Host) + if err != nil { + panic(err) + } + go http.Serve(l, s) + + s.Response(203, nil, "") + for { + // Wait for it to be up. + resp, err := http.Get(s.URL) + if err == nil && resp.StatusCode == 203 { + break + } + time.Sleep(1e8) + } + s.WaitRequest() // Consume dummy request. +} + +// Flush discards all pending requests and responses. +func (s *HTTPServer) Flush() { + for { + select { + case <-s.request: + case <-s.response: + default: + return + } + } +} + +func body(req *http.Request) string { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + return string(data) +} + +func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.ParseMultipartForm(1e6) + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + s.request <- req + var resp Response + select { + case respFunc := <-s.response: + resp = respFunc(req.URL.Path) + case <-time.After(s.Timeout): + const msg = "ERROR: Timeout waiting for test to prepare a response\n" + fmt.Fprintf(os.Stderr, msg) + resp = Response{500, nil, msg} + } + if resp.Headers != nil { + h := w.Header() + for k, v := range resp.Headers { + h.Set(k, v) + } + } + if resp.Status != 0 { + w.WriteHeader(resp.Status) + } + w.Write([]byte(resp.Body)) +} + +// WaitRequests returns the next n requests made to the http server from +// the queue. If not enough requests were previously made, it waits until +// the timeout value for them to be made. +func (s *HTTPServer) WaitRequests(n int) []*http.Request { + reqs := make([]*http.Request, 0, n) + for i := 0; i < n; i++ { + select { + case req := <-s.request: + reqs = append(reqs, req) + case <-time.After(s.Timeout): + panic("Timeout waiting for request") + } + } + return reqs +} + +// WaitRequest returns the next request made to the http server from +// the queue. If no requests were previously made, it waits until the +// timeout value for one to be made. +func (s *HTTPServer) WaitRequest() *http.Request { + return s.WaitRequests(1)[0] +} + +// ResponseFunc prepares the test server to respond the following n +// requests using f to build each response. +func (s *HTTPServer) ResponseFunc(n int, f ResponseFunc) { + for i := 0; i < n; i++ { + s.response <- f + } +} + +// ResponseMap maps request paths to responses. +type ResponseMap map[string]Response + +// ResponseMap prepares the test server to respond the following n +// requests using the m to obtain the responses. +func (s *HTTPServer) ResponseMap(n int, m ResponseMap) { + f := func(path string) Response { + for rpath, resp := range m { + if rpath == path { + return resp + } + } + body := "Path not found in response map: " + path + return Response{Status: 500, Body: body} + } + s.ResponseFunc(n, f) +} + +// Responses prepares the test server to respond the following n requests +// using the provided response parameters. +func (s *HTTPServer) Responses(n int, status int, headers map[string]string, body string) { + f := func(path string) Response { + return Response{status, headers, body} + } + s.ResponseFunc(n, f) +} + +// Response prepares the test server to respond the following request +// using the provided response parameters. +func (s *HTTPServer) Response(status int, headers map[string]string, body string) { + s.Responses(1, status, headers, body) +} diff --git a/vendor/github.com/pearkes/dnsimple/.travis.yml b/vendor/github.com/pearkes/dnsimple/.travis.yml new file mode 100644 index 000000000..98b43464d --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.3 + - tip diff --git a/vendor/github.com/pearkes/dnsimple/LICENSE b/vendor/github.com/pearkes/dnsimple/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/pearkes/dnsimple/README.md b/vendor/github.com/pearkes/dnsimple/README.md new file mode 100644 index 000000000..9b67aec15 --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/README.md @@ -0,0 +1,17 @@ +## dnsimple + +[![](https://api.travis-ci.org/pearkes/dnsimple.svg?branch=master)](https://travis-ci.org/pearkes/dnsimple) + +This package provides the `dnsimple` package which offers +an interface to the DNSimple API. + +It's intentionally designed to make heavy use of built-ins and strings +in place of custom data structures and proper types. It also only implements +specific endpoints, and doesn't have full API coverage. + +**For those reasons, I recommend looking elsewhere if you just need +a standard DNSimple API client.** + +### Documentation + +The full documentation is available on [Godoc](http://godoc.org/github.com/pearkes/dnsimple) diff --git a/vendor/github.com/pearkes/dnsimple/api.go b/vendor/github.com/pearkes/dnsimple/api.go new file mode 100644 index 000000000..2e3908fc0 --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/api.go @@ -0,0 +1,175 @@ +package dnsimple + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/hashicorp/go-cleanhttp" +) + +// Client provides a client to the DNSimple API +type Client struct { + // Access Token + Token string + + // User Email + Email string + + // Domain Token + DomainToken string + + // URL to the DO API to use + URL string + + // HttpClient is the client to use. A client with + // default values will be used if not provided. + Http *http.Client +} + +// DNSimpleError is the error format that they return +// to us if there is a problem +type DNSimpleError struct { + Errors map[string][]string `json:"errors"` +} + +func (d *DNSimpleError) Join() string { + var errs []string + + for k, v := range d.Errors { + errs = append(errs, fmt.Sprintf("%s errors: %s", k, strings.Join(v, ", "))) + } + + return strings.Join(errs, ", ") +} + +// NewClient returns a new dnsimple client, +// requires an authorization token. You can generate +// an OAuth token by visiting the Apps & API section +// of the DNSimple control panel for your account. +func NewClient(email string, token string) (*Client, error) { + client := Client{ + Token: token, + Email: email, + URL: "https://api.dnsimple.com/v1", + Http: cleanhttp.DefaultClient(), + } + return &client, nil +} + +func NewClientWithDomainToken(domainToken string) (*Client, error) { + client := Client{ + DomainToken: domainToken, + URL: "https://api.dnsimple.com/v1", + Http: cleanhttp.DefaultClient(), + } + return &client, nil +} + +// Creates a new request with the params +func (c *Client) NewRequest(body map[string]interface{}, method string, endpoint string) (*http.Request, error) { + u, err := url.Parse(c.URL + endpoint) + + if err != nil { + return nil, fmt.Errorf("Error parsing base URL: %s", err) + } + + rBody, err := encodeBody(body) + if err != nil { + return nil, fmt.Errorf("Error encoding request body: %s", err) + } + + // Build the request + req, err := http.NewRequest(method, u.String(), rBody) + if err != nil { + return nil, fmt.Errorf("Error creating request: %s", err) + } + + // Add the authorization header + if c.DomainToken != "" { + req.Header.Add("X-DNSimple-Domain-Token", c.DomainToken) + } else { + req.Header.Add("X-DNSimple-Token", fmt.Sprintf("%s:%s", c.Email, c.Token)) + } + req.Header.Add("Accept", "application/json") + + // If it's a not a get, add a content-type + if method != "GET" { + req.Header.Add("Content-Type", "application/json") + } + + return req, nil + +} + +// parseErr is used to take an error json resp +// and return a single string for use in error messages +func parseErr(resp *http.Response) error { + dnsError := DNSimpleError{} + + err := decodeBody(resp, &dnsError) + + // if there was an error decoding the body, just return that + if err != nil { + return fmt.Errorf("Error parsing error body for non-200 request: %s", err) + } + + return fmt.Errorf("API Error: %s", dnsError.Join()) +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + if err = json.Unmarshal(body, &out); err != nil { + return err + } + + return nil +} + +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// checkResp wraps http.Client.Do() and verifies that the +// request was successful. A non-200 request returns an error +// formatted to included any validation problems or otherwise +func checkResp(resp *http.Response, err error) (*http.Response, error) { + // If the err is already there, there was an error higher + // up the chain, so just return that + if err != nil { + return resp, err + } + + switch i := resp.StatusCode; { + case i == 200: + return resp, nil + case i == 201: + return resp, nil + case i == 202: + return resp, nil + case i == 204: + return resp, nil + case i == 422: + return nil, fmt.Errorf("API Error: %s", resp.Status) + case i == 400: + return nil, parseErr(resp) + default: + return nil, fmt.Errorf("API Error: %s", resp.Status) + } +} diff --git a/vendor/github.com/pearkes/dnsimple/api_test.go b/vendor/github.com/pearkes/dnsimple/api_test.go new file mode 100644 index 000000000..0e2ce248c --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/api_test.go @@ -0,0 +1,83 @@ +package dnsimple + +import ( + "testing" +) + +func makeClient(t *testing.T) *Client { + client, err := NewClient("foobaremail", "foobartoken") + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.Token != "foobartoken" { + t.Fatalf("token not set on client: %s", client.Token) + } + + return client +} + +func makeClientWithDomainToken(t *testing.T) *Client { + client, err := NewClientWithDomainToken("foobardomaintoken") + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.DomainToken != "foobardomaintoken" { + t.Fatalf("domian token not set on client: %s", client.DomainToken) + } + + return client +} + +func TestClient_NewRequest(t *testing.T) { + c := makeClient(t) + + body := map[string]interface{}{ + "foo": "bar", + "baz": "bar", + } + req, err := c.NewRequest(body, "POST", "/bar") + if err != nil { + t.Fatalf("bad: %v", err) + } + + if req.URL.String() != "https://api.dnsimple.com/v1/bar" { + t.Fatalf("bad base url: %v", req.URL.String()) + } + + if req.Header.Get("X-DNSimple-Token") != "foobaremail:foobartoken" { + t.Fatalf("bad auth header: %v", req.Header) + } + + if req.Method != "POST" { + t.Fatalf("bad method: %v", req.Method) + } +} + +func TestClientDomainToken_NewRequest(t *testing.T) { + c := makeClientWithDomainToken(t) + + body := map[string]interface{}{ + "foo": "bar", + "baz": "bar", + } + req, err := c.NewRequest(body, "POST", "/bar") + if err != nil { + t.Fatalf("bad: %v", err) + } + + if req.URL.String() != "https://api.dnsimple.com/v1/bar" { + t.Fatalf("bad base url: %v", req.URL.String()) + } + + if req.Header.Get("X-DNSimple-Domain-Token") != "foobardomaintoken" { + t.Fatalf("bad auth header: %v", req.Header) + } + + if req.Method != "POST" { + t.Fatalf("bad method: %v", req.Method) + } +} diff --git a/vendor/github.com/pearkes/dnsimple/domain.go b/vendor/github.com/pearkes/dnsimple/domain.go new file mode 100644 index 000000000..40c09825c --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/domain.go @@ -0,0 +1,62 @@ +package dnsimple + +import ( + "encoding/json" + "io" + "time" +) + +// GetDomains retrieves all the domains for a given account. +func (c *Client) GetDomains() ([]Domain, error) { + req, err := c.NewRequest(nil, "GET", "/domains") + if err != nil { + return nil, err + } + resp, err := c.Http.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + domainResponses := []DomainResponse{} + err = decode(resp.Body, &domainResponses) + if err != nil { + return nil, err + } + domains := make([]Domain, len(domainResponses)) + for i, dr := range domainResponses { + domains[i] = dr.Domain + } + return domains, nil +} + +func decode(reader io.Reader, obj interface{}) error { + decoder := json.NewDecoder(reader) + err := decoder.Decode(&obj) + if err != nil { + return err + } + return nil +} + +type DomainResponse struct { + Domain Domain `json:"domain"` +} + +type Domain struct { + Id int `json:"id"` + UserId int `json:"user_id"` + RegistrantId int `json:"registrant_id"` + Name string `json:"name"` + UnicodeName string `json:"unicode_name"` + Token string `json:"token"` + State string `json:"state"` + Language string `json:"language"` + Lockable bool `json:"lockable"` + AutoRenew bool `json:"auto_renew"` + WhoisProtected bool `json:"whois_protected"` + RecordCount int `json:"record_count"` + ServiceCount int `json:"service_count"` + ExpiresOn string `json:"expires_on"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} diff --git a/vendor/github.com/pearkes/dnsimple/domain_test.go b/vendor/github.com/pearkes/dnsimple/domain_test.go new file mode 100644 index 000000000..d1ce11919 --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/domain_test.go @@ -0,0 +1,102 @@ +package dnsimple + +import ( + "time" + + . "github.com/motain/gocheck" +) + +func (s *S) Test_GetDomains(c *C) { + testServer.Response(202, nil, domainsExample) + + domains, err := s.client.GetDomains() + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(domains, DeepEquals, []Domain{ + Domain{ + 228, + 19, + 0, + "example.it", + "example.it", + "domain-token", + "hosted", + "", + true, + false, + false, + 5, + 0, + "", + Jan15_3, + Jan15_3, + }, + Domain{ + 227, + 19, + 28, + "example.com", + "example.com", + "domain-token", + "registered", + "", + true, + true, + false, + 7, + 0, + "2015-01-16", + Jan15_1, + Jan16, + }, + }) +} + +var domainsExample = `[ + { + "domain": { + "id": 228, + "user_id": 19, + "registrant_id": null, + "name": "example.it", + "unicode_name": "example.it", + "token": "domain-token", + "state": "hosted", + "language": null, + "lockable": true, + "auto_renew": false, + "whois_protected": false, + "record_count": 5, + "service_count": 0, + "expires_on": null, + "created_at": "2014-01-15T22:03:49Z", + "updated_at": "2014-01-15T22:03:49Z" + } + }, + { + "domain": { + "id": 227, + "user_id": 19, + "registrant_id": 28, + "name": "example.com", + "unicode_name": "example.com", + "token": "domain-token", + "state": "registered", + "language": null, + "lockable": true, + "auto_renew": true, + "whois_protected": false, + "record_count": 7, + "service_count": 0, + "expires_on": "2015-01-16", + "created_at": "2014-01-15T22:01:55Z", + "updated_at": "2014-01-16T22:56:22Z" + } + } +]` + +var Jan15_3, _ = time.Parse("2006-01-02T15:04:05Z", "2014-01-15T22:03:49Z") +var Jan15_1, _ = time.Parse("2006-01-02T15:04:05Z", "2014-01-15T22:01:55Z") +var Jan16, _ = time.Parse("2006-01-02T15:04:05Z", "2014-01-16T22:56:22Z") diff --git a/vendor/github.com/pearkes/dnsimple/record.go b/vendor/github.com/pearkes/dnsimple/record.go new file mode 100644 index 000000000..6fedb714b --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/record.go @@ -0,0 +1,215 @@ +package dnsimple + +import ( + "fmt" + "strconv" +) + +type RecordResponse struct { + Record Record `json:"record"` +} + +// Record is used to represent a retrieved Record. All properties +// are set as strings. +type Record struct { + Name string `json:"name"` + Content string `json:"content"` + DomainId int64 `json:"domain_id"` + Id int64 `json:"id"` + Prio int64 `json:"prio"` + RecordType string `json:"record_type"` + Ttl int64 `json:"ttl"` +} + +// Returns the domain id +func (r *Record) StringDomainId() string { + return strconv.FormatInt(r.DomainId, 10) +} + +// Returns the id +func (r *Record) StringId() string { + return strconv.FormatInt(r.Id, 10) +} + +// Returns the string for prio +func (r *Record) StringPrio() string { + return strconv.FormatInt(r.Prio, 10) +} + +// Returns the string for Locked +func (r *Record) StringTtl() string { + return strconv.FormatInt(r.Ttl, 10) +} + +// ChangeRecord contains the request parameters to create or update a +// record. +type ChangeRecord struct { + Name string // name of the record + Value string // where the record points + Type string // type, i.e a, mx + Ttl string // TTL of record +} + +// CreateRecord creates a record from the parameters specified and +// returns an error if it fails. If no error and an ID is returned, +// the Record was succesfully created. +func (c *Client) CreateRecord(domain string, opts *ChangeRecord) (string, error) { + // Make the request parameters + params := make(map[string]interface{}) + + params["name"] = opts.Name + params["record_type"] = opts.Type + params["content"] = opts.Value + + if opts.Ttl != "" { + ttl, err := strconv.ParseInt(opts.Ttl, 0, 0) + if err != nil { + return "", nil + } + params["ttl"] = ttl + } + + endpoint := fmt.Sprintf("/domains/%s/records", domain) + + req, err := c.NewRequest(params, "POST", endpoint) + if err != nil { + return "", err + } + + resp, err := checkResp(c.Http.Do(req)) + if err != nil { + return "", fmt.Errorf("Error creating record: %s", err) + } + + record := new(RecordResponse) + + err = decodeBody(resp, &record) + + if err != nil { + return "", fmt.Errorf("Error parsing record response: %s", err) + } + + // The request was successful + return record.Record.StringId(), nil +} + +// UpdateRecord updated a record from the parameters specified and +// returns an error if it fails. +func (c *Client) UpdateRecord(domain string, id string, opts *ChangeRecord) (string, error) { + // Make the request parameters + params := make(map[string]interface{}) + + if opts.Name != "" { + params["name"] = opts.Name + } + + if opts.Type != "" { + params["record_type"] = opts.Type + } + + if opts.Value != "" { + params["content"] = opts.Value + } + + if opts.Ttl != "" { + ttl, err := strconv.ParseInt(opts.Ttl, 0, 0) + if err != nil { + return "", nil + } + params["ttl"] = ttl + } + + endpoint := fmt.Sprintf("/domains/%s/records/%s", domain, id) + + req, err := c.NewRequest(params, "PUT", endpoint) + if err != nil { + return "", err + } + + resp, err := checkResp(c.Http.Do(req)) + if err != nil { + return "", fmt.Errorf("Error updating record: %s", err) + } + + record := new(RecordResponse) + + err = decodeBody(resp, &record) + + if err != nil { + return "", fmt.Errorf("Error parsing record response: %s", err) + } + + // The request was successful + return record.Record.StringId(), nil +} + +// DestroyRecord destroys a record by the ID specified and +// returns an error if it fails. If no error is returned, +// the Record was succesfully destroyed. +func (c *Client) DestroyRecord(domain string, id string) error { + var body map[string]interface{} + req, err := c.NewRequest(body, "DELETE", fmt.Sprintf("/domains/%s/records/%s", domain, id)) + + if err != nil { + return err + } + + _, err = checkResp(c.Http.Do(req)) + + if err != nil { + return fmt.Errorf("Error destroying record: %s", err) + } + + // The request was successful + return nil +} + +// RetrieveRecord gets a record by the ID specified and +// returns a Record and an error. An error will be returned for failed +// requests with a nil Record. +func (c *Client) RetrieveRecord(domain string, id string) (*Record, error) { + var body map[string]interface{} + req, err := c.NewRequest(body, "GET", fmt.Sprintf("/domains/%s/records/%s", domain, id)) + if err != nil { + return nil, err + } + + resp, err := checkResp(c.Http.Do(req)) + if err != nil { + return nil, fmt.Errorf("Error retrieving record: %s", err) + } + + recordResp := RecordResponse{} + + err = decodeBody(resp, &recordResp) + + if err != nil { + return nil, fmt.Errorf("Error decoding record response: %s", err) + } + + // The request was successful + return &recordResp.Record, nil +} + +// GetRecords retrieves all the records for the given domain. +func (c *Client) GetRecords(domain string) ([]Record, error) { + req, err := c.NewRequest(nil, "GET", "/domains/"+domain+"/records") + if err != nil { + return nil, err + } + resp, err := c.Http.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + recordResponses := make([]RecordResponse, 10) + err = decode(resp.Body, &recordResponses) + if err != nil { + return nil, err + } + records := make([]Record, len(recordResponses)) + for i, rr := range recordResponses { + records[i] = rr.Record + } + return records, nil +} diff --git a/vendor/github.com/pearkes/dnsimple/record_test.go b/vendor/github.com/pearkes/dnsimple/record_test.go new file mode 100644 index 000000000..ab24e0a6e --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/record_test.go @@ -0,0 +1,116 @@ +package dnsimple + +import ( + "github.com/pearkes/dnsimple/testutil" + "strings" + "testing" + + . "github.com/motain/gocheck" +) + +func Test(t *testing.T) { + TestingT(t) +} + +type S struct { + client *Client +} + +var _ = Suite(&S{}) + +var testServer = testutil.NewHTTPServer() + +func (s *S) SetUpSuite(c *C) { + testServer.Start() + var err error + s.client, err = NewClient("email", "foobar") + s.client.URL = "http://localhost:4444" + if err != nil { + panic(err) + } +} + +func (s *S) TearDownTest(c *C) { + testServer.Flush() +} + +func (s *S) Test_CreateRecord(c *C) { + testServer.Response(202, nil, recordExample) + + opts := ChangeRecord{ + Name: "foobar", + } + + id, err := s.client.CreateRecord("example.com", &opts) + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(id, Equals, "25") +} + +func (s *S) Test_UpdateRecord(c *C) { + testServer.Response(200, nil, recordExample) + + opts := ChangeRecord{ + Name: "foobar", + } + + id, err := s.client.UpdateRecord("example.com", "25", &opts) + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(id, Equals, "25") +} + +func (s *S) Test_CreateRecord_fail(c *C) { + testServer.Response(400, nil, recordExampleError) + + opts := ChangeRecord{ + Name: "foobar", + } + + _, err := s.client.CreateRecord("example.com", &opts) + + _ = testServer.WaitRequest() + + c.Assert(strings.Contains(err.Error(), "unsupported"), Equals, true) +} + +func (s *S) Test_RetrieveRecord(c *C) { + testServer.Response(200, nil, recordExample) + + record, err := s.client.RetrieveRecord("example.com", "25") + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(record.StringId(), Equals, "25") + c.Assert(record.StringDomainId(), Equals, "28") + c.Assert(record.StringTtl(), Equals, "3600") + c.Assert(record.Name, Equals, "foobar") + c.Assert(record.Content, Equals, "mail.example.com") + c.Assert(record.RecordType, Equals, "MX") +} + +var recordExampleError = `{ + "errors": { + "content": ["can't be blank"], + "record_type": ["can't be blank","unsupported"] + } +}` + +var recordExample = `{ + "record": { + "content": "mail.example.com", + "created_at": "2013-01-29T14:25:38Z", + "domain_id": 28, + "id": 25, + "name": "foobar", + "prio": 10, + "record_type": "MX", + "ttl": 3600, + "updated_at": "2013-01-29T14:25:38Z" + } +}` diff --git a/vendor/github.com/pearkes/dnsimple/testutil/http.go b/vendor/github.com/pearkes/dnsimple/testutil/http.go new file mode 100644 index 000000000..e4f8ddb80 --- /dev/null +++ b/vendor/github.com/pearkes/dnsimple/testutil/http.go @@ -0,0 +1,174 @@ +package testutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" +) + +type HTTPServer struct { + URL string + Timeout time.Duration + started bool + request chan *http.Request + response chan ResponseFunc +} + +type Response struct { + Status int + Headers map[string]string + Body string +} + +func NewHTTPServer() *HTTPServer { + return &HTTPServer{URL: "http://localhost:4444", Timeout: 5 * time.Second} +} + +type ResponseFunc func(path string) Response + +func (s *HTTPServer) Start() { + if s.started { + return + } + s.started = true + s.request = make(chan *http.Request, 1024) + s.response = make(chan ResponseFunc, 1024) + u, err := url.Parse(s.URL) + if err != nil { + panic(err) + } + l, err := net.Listen("tcp", u.Host) + if err != nil { + panic(err) + } + go http.Serve(l, s) + + s.Response(203, nil, "") + for { + // Wait for it to be up. + resp, err := http.Get(s.URL) + if err == nil && resp.StatusCode == 203 { + break + } + time.Sleep(1e8) + } + s.WaitRequest() // Consume dummy request. +} + +// Flush discards all pending requests and responses. +func (s *HTTPServer) Flush() { + for { + select { + case <-s.request: + case <-s.response: + default: + return + } + } +} + +func body(req *http.Request) string { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + return string(data) +} + +func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.ParseMultipartForm(1e6) + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + s.request <- req + var resp Response + select { + case respFunc := <-s.response: + resp = respFunc(req.URL.Path) + case <-time.After(s.Timeout): + const msg = "ERROR: Timeout waiting for test to prepare a response\n" + fmt.Fprintf(os.Stderr, msg) + resp = Response{500, nil, msg} + } + if resp.Headers != nil { + h := w.Header() + for k, v := range resp.Headers { + h.Set(k, v) + } + } + if resp.Status != 0 { + w.WriteHeader(resp.Status) + } + w.Write([]byte(resp.Body)) +} + +// WaitRequests returns the next n requests made to the http server from +// the queue. If not enough requests were previously made, it waits until +// the timeout value for them to be made. +func (s *HTTPServer) WaitRequests(n int) []*http.Request { + reqs := make([]*http.Request, 0, n) + for i := 0; i < n; i++ { + select { + case req := <-s.request: + reqs = append(reqs, req) + case <-time.After(s.Timeout): + panic("Timeout waiting for request") + } + } + return reqs +} + +// WaitRequest returns the next request made to the http server from +// the queue. If no requests were previously made, it waits until the +// timeout value for one to be made. +func (s *HTTPServer) WaitRequest() *http.Request { + return s.WaitRequests(1)[0] +} + +// ResponseFunc prepares the test server to respond the following n +// requests using f to build each response. +func (s *HTTPServer) ResponseFunc(n int, f ResponseFunc) { + for i := 0; i < n; i++ { + s.response <- f + } +} + +// ResponseMap maps request paths to responses. +type ResponseMap map[string]Response + +// ResponseMap prepares the test server to respond the following n +// requests using the m to obtain the responses. +func (s *HTTPServer) ResponseMap(n int, m ResponseMap) { + f := func(path string) Response { + for rpath, resp := range m { + if rpath == path { + return resp + } + } + body := "Path not found in response map: " + path + return Response{Status: 500, Body: body} + } + s.ResponseFunc(n, f) +} + +// Responses prepares the test server to respond the following n requests +// using the provided response parameters. +func (s *HTTPServer) Responses(n int, status int, headers map[string]string, body string) { + f := func(path string) Response { + return Response{status, headers, body} + } + s.ResponseFunc(n, f) +} + +// Response prepares the test server to respond the following request +// using the provided response parameters. +func (s *HTTPServer) Response(status int, headers map[string]string, body string) { + s.Responses(1, status, headers, body) +} diff --git a/vendor/github.com/pearkes/mailgun/.gitignore b/vendor/github.com/pearkes/mailgun/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/pearkes/mailgun/LICENSE b/vendor/github.com/pearkes/mailgun/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/pearkes/mailgun/README.md b/vendor/github.com/pearkes/mailgun/README.md new file mode 100644 index 000000000..14d620f43 --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/README.md @@ -0,0 +1,15 @@ +## mailgun + +This package provides the `mailgun` package which offers +an interface to the Mailgun V2 API. + +It's intentionally designed to make heavy use of built-ins and strings +in place of custom data structures and proper types. It also only implements +specific endpoints, and doesn't have full API coverage. + +**For those reasons, I recommend looking elsewhere if you just need +a standard mailgun API client.** + +### Documentation + +The full documentation is available on [Godoc](http://godoc.org/github.com/pearkes/mailgun) diff --git a/vendor/github.com/pearkes/mailgun/api.go b/vendor/github.com/pearkes/mailgun/api.go new file mode 100644 index 000000000..f66312b5f --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/api.go @@ -0,0 +1,140 @@ +package mailgun + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + + "github.com/hashicorp/go-cleanhttp" +) + +// Client provides a client to the Mailgun API +type Client struct { + // Authentication key + ApiKey string + + // URL to the API to use + URL string + + // HttpClient is the client to use. Default will be + // used if not provided. + Http *http.Client +} + +// MailgunError is the error format that they return +// to us if there is a problem +type MailgunError struct { + Message string `json:"message"` +} + +// NewClient returns a new mailgun client, +// requires an api key. +func NewClient(key string) (*Client, error) { + // If it exists, grab teh token from the environment + if key == "" { + key = os.Getenv("MAILGUN_API_KEY") + } + + client := Client{ + ApiKey: key, + URL: "https://api.mailgun.net/v2", + Http: cleanhttp.DefaultClient(), + } + + return &client, nil +} + +// Creates a new request with the params +func (c *Client) NewRequest(params map[string]string, method string, endpoint string) (*http.Request, error) { + p := url.Values{} + u, err := url.Parse(c.URL + endpoint) + + if err != nil { + return nil, fmt.Errorf("Error parsing base URL: %s", err) + } + + // Build up our request parameters + for k, v := range params { + p.Add(k, v) + } + + // Add the params to our URL + u.RawQuery = p.Encode() + + // Build the request + req, err := http.NewRequest(method, u.String(), nil) + + if err != nil { + return nil, fmt.Errorf("Error creating request: %s", err) + } + + // Add the basic auth header + req.SetBasicAuth("api", c.ApiKey) + + return req, nil +} + +// parseErr is used to take an error json resp +// and return a single string for use in error messages +func parseErr(resp *http.Response) error { + errBody := new(MailgunError) + + err := decodeBody(resp, &errBody) + + // if there was an error decoding the body, just return that + if err != nil { + return fmt.Errorf("Error parsing error body for non-200 request: %s", err) + } + + return fmt.Errorf("API Error %s: %s", resp.Status, errBody.Message) +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + if err = json.Unmarshal(body, &out); err != nil { + return err + } + + return nil +} + +// checkResp wraps http.Client.Do() and verifies that the +// request was successful. A non-200 request returns an error +// formatted to included any validation problems or otherwise +func checkResp(resp *http.Response, err error) (*http.Response, error) { + // If the err is already there, there was an error higher + // up the chain, so just return that + if err != nil { + return resp, err + } + + switch i := resp.StatusCode; { + case i == 200: + return resp, nil + case i == 201: + return resp, nil + case i == 202: + return resp, nil + case i == 204: + return resp, nil + case i == 400: + return nil, parseErr(resp) + case i == 401: + return nil, parseErr(resp) + case i == 402: + return nil, parseErr(resp) + case i == 422: + return nil, parseErr(resp) + default: + return nil, fmt.Errorf("API Error: %s", resp.Status) + } +} diff --git a/vendor/github.com/pearkes/mailgun/api_test.go b/vendor/github.com/pearkes/mailgun/api_test.go new file mode 100644 index 000000000..c606faf21 --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/api_test.go @@ -0,0 +1,92 @@ +package mailgun + +import ( + "os" + "testing" + + . "github.com/motain/gocheck" + "github.com/pearkes/mailgun/testutil" +) + +type S struct { + client *Client +} + +var _ = Suite(&S{}) + +var testServer = testutil.NewHTTPServer() + +func (s *S) SetUpSuite(c *C) { + testServer.Start() + var err error + s.client, err = NewClient("foobar") + s.client.URL = "http://localhost:4444" + if err != nil { + panic(err) + } +} + +func (s *S) TearDownTest(c *C) { + testServer.Flush() +} + +func makeClient(t *testing.T) *Client { + client, err := NewClient("foobarkey") + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.ApiKey != "foobarkey" { + t.Fatalf("key not set on client: %s", client.ApiKey) + } + + return client +} + +func Test_NewClient_env(t *testing.T) { + os.Setenv("MAILGUN_API_KEY", "bar") + client, err := NewClient("") + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.ApiKey != "bar" { + t.Fatalf("key not set on client: %s", client.ApiKey) + } +} + +func TestClient_NewRequest(t *testing.T) { + c := makeClient(t) + + params := map[string]string{ + "foo": "bar", + "baz": "bar", + } + req, err := c.NewRequest(params, "POST", "/bar") + if err != nil { + t.Fatalf("bad: %v", err) + } + + encoded := req.URL.Query() + if encoded.Get("foo") != "bar" { + t.Fatalf("bad: %v", encoded) + } + + if encoded.Get("baz") != "bar" { + t.Fatalf("bad: %v", encoded) + } + + if req.URL.String() != "https://api.mailgun.net/v2/bar?baz=bar&foo=bar" { + t.Fatalf("bad base url: %v", req.URL.String()) + } + + if req.Header.Get("Authorization") != "Basic YXBpOmZvb2JhcmtleQ==" { + t.Fatalf("bad auth header: %v", req.Header) + } + + if req.Method != "POST" { + t.Fatalf("bad method: %v", req.Method) + } +} diff --git a/vendor/github.com/pearkes/mailgun/domain.go b/vendor/github.com/pearkes/mailgun/domain.go new file mode 100644 index 000000000..115875ff6 --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/domain.go @@ -0,0 +1,131 @@ +package mailgun + +import ( + "fmt" + "strconv" +) + +type DomainResponse struct { + Domain Domain `json:"domain"` + SendingRecords []SendingRecord `json:"sending_dns_records"` + ReceivingRecords []ReceivingRecord `json:"receiving_dns_records"` +} + +// Domain is used to represent a retrieved Domain. All properties +// are set as strings. +type Domain struct { + Name string `json:"name"` + SmtpLogin string `json:"smtp_login"` + SmtpPassword string `json:"smtp_password"` + SpamAction string `json:"spam_action"` + Wildcard bool `json:"wildcard"` +} + +type SendingRecord struct { + Name string `json:"name"` + RecordType string `json:"record_type"` + Valid string `json:"valid"` + Value string `json:"value"` +} + +type ReceivingRecord struct { + Priority string `json:"priority"` + RecordType string `json:"record_type"` + Valid string `json:"valid"` + Value string `json:"value"` +} + +func (d *Domain) StringWildcard() string { + return strconv.FormatBool(d.Wildcard) +} + +// CreateDomain contains the request parameters to create a new +// domain. +type CreateDomain struct { + Name string // Name of the domain + SmtpPassword string + SpamAction string // one of disabled or tag + Wildcard bool +} + +// CreateDomain creates a domain from the parameters specified and +// returns an error if it fails. If no error and the name is returned, +// the Domain was succesfully created. +func (c *Client) CreateDomain(opts *CreateDomain) (string, error) { + // Make the request parameters + params := make(map[string]string) + + params["name"] = opts.Name + params["smtp_password"] = opts.SmtpPassword + params["spam_action"] = opts.SpamAction + params["wildcard"] = strconv.FormatBool(opts.Wildcard) + + req, err := c.NewRequest(params, "POST", "/domains") + if err != nil { + return "", err + } + + resp, err := checkResp(c.Http.Do(req)) + + if err != nil { + return "", fmt.Errorf("Error creating domain: %s", err) + } + + domain := new(DomainResponse) + + err = decodeBody(resp, &domain) + + if err != nil { + return "", fmt.Errorf("Error parsing domain response: %s", err) + } + + // The request was successful + return domain.Domain.Name, nil +} + +// DestroyDomain destroys a domain by the ID specified and +// returns an error if it fails. If no error is returned, +// the Domain was succesfully destroyed. +func (c *Client) DestroyDomain(name string) error { + req, err := c.NewRequest(map[string]string{}, "DELETE", fmt.Sprintf("/domains/%s", name)) + + if err != nil { + return err + } + + _, err = checkResp(c.Http.Do(req)) + + if err != nil { + return fmt.Errorf("Error destroying domain: %s", err) + } + + // The request was successful + return nil +} + +// RetrieveDomain gets a domain and records by the ID specified and +// returns a DomainResponse and an error. An error will be returned for failed +// requests with a nil DomainResponse. +func (c *Client) RetrieveDomain(name string) (DomainResponse, error) { + req, err := c.NewRequest(map[string]string{}, "GET", fmt.Sprintf("/domains/%s", name)) + + if err != nil { + return DomainResponse{}, err + } + + resp, err := checkResp(c.Http.Do(req)) + if err != nil { + return DomainResponse{}, fmt.Errorf("Error destroying domain: %s", err) + } + + domainresp := new(DomainResponse) + + err = decodeBody(resp, domainresp) + + if err != nil { + return DomainResponse{}, fmt.Errorf("Error decoding domain response: %s", err) + } + + // The request was successful + return *domainresp, nil +} diff --git a/vendor/github.com/pearkes/mailgun/domain_test.go b/vendor/github.com/pearkes/mailgun/domain_test.go new file mode 100644 index 000000000..3517d081d --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/domain_test.go @@ -0,0 +1,109 @@ +package mailgun + +import ( + "testing" + + . "github.com/motain/gocheck" +) + +func TestDomain(t *testing.T) { + TestingT(t) +} + +func (s *S) Test_CreateDomain(c *C) { + testServer.Response(202, nil, domainExample) + + opts := CreateDomain{ + Name: "example.com", + SpamAction: "disabled", + Wildcard: true, + SmtpPassword: "foobar", + } + + id, err := s.client.CreateDomain(&opts) + + req := testServer.WaitRequest() + + c.Assert(req.Form["name"], DeepEquals, []string{"example.com"}) + c.Assert(req.Form["spam_action"], DeepEquals, []string{"disabled"}) + c.Assert(req.Form["wildcard"], DeepEquals, []string{"true"}) + c.Assert(req.Form["smtp_password"], DeepEquals, []string{"foobar"}) + c.Assert(err, IsNil) + c.Assert(id, Equals, "domain.com") +} + +func (s *S) Test_RetrieveDomain(c *C) { + testServer.Response(200, nil, domainExample) + + resp, err := s.client.RetrieveDomain("example.com") + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) + c.Assert(resp.Domain.Name, Equals, "domain.com") + c.Assert(resp.Domain.SmtpPassword, Equals, "4rtqo4p6rrx9") + c.Assert(resp.Domain.StringWildcard(), Equals, "false") + c.Assert(resp.ReceivingRecords[0].Priority, Equals, "10") + c.Assert(resp.SendingRecords[0].Value, Equals, "v=spf1 include:mailgun.org ~all") +} + +func (s *S) Test_DestroyDomain(c *C) { + testServer.Response(204, nil, "") + + err := s.client.DestroyDomain("example.com") + + _ = testServer.WaitRequest() + + c.Assert(err, IsNil) +} + +var domainErrorExample = `{ + "message": "Domain name format is bad" +}` + +var domainExample = ` +{ + "domain": { + "created_at": "Wed, 10 Jul 2013 19:26:52 GMT", + "smtp_login": "postmaster@domain.com", + "name": "domain.com", + "smtp_password": "4rtqo4p6rrx9", + "wildcard": false, + "spam_action": "tag" + }, + "receiving_dns_records": [ + { + "priority": "10", + "record_type": "MX", + "valid": "valid", + "value": "mxa.mailgun.org" + }, + { + "priority": "10", + "record_type": "MX", + "valid": "valid", + "value": "mxb.mailgun.org" + } + ], + "sending_dns_records": [ + { + "record_type": "TXT", + "valid": "valid", + "name": "domain.com", + "value": "v=spf1 include:mailgun.org ~all" + }, + { + "record_type": "TXT", + "valid": "valid", + "name": "domain.com", + "value": "k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUA...." + }, + { + "record_type": "CNAME", + "valid": "valid", + "name": "email.domain.com", + "value": "mailgun.org" + } + ] +} +` diff --git a/vendor/github.com/pearkes/mailgun/testutil/http.go b/vendor/github.com/pearkes/mailgun/testutil/http.go new file mode 100644 index 000000000..e4f8ddb80 --- /dev/null +++ b/vendor/github.com/pearkes/mailgun/testutil/http.go @@ -0,0 +1,174 @@ +package testutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" +) + +type HTTPServer struct { + URL string + Timeout time.Duration + started bool + request chan *http.Request + response chan ResponseFunc +} + +type Response struct { + Status int + Headers map[string]string + Body string +} + +func NewHTTPServer() *HTTPServer { + return &HTTPServer{URL: "http://localhost:4444", Timeout: 5 * time.Second} +} + +type ResponseFunc func(path string) Response + +func (s *HTTPServer) Start() { + if s.started { + return + } + s.started = true + s.request = make(chan *http.Request, 1024) + s.response = make(chan ResponseFunc, 1024) + u, err := url.Parse(s.URL) + if err != nil { + panic(err) + } + l, err := net.Listen("tcp", u.Host) + if err != nil { + panic(err) + } + go http.Serve(l, s) + + s.Response(203, nil, "") + for { + // Wait for it to be up. + resp, err := http.Get(s.URL) + if err == nil && resp.StatusCode == 203 { + break + } + time.Sleep(1e8) + } + s.WaitRequest() // Consume dummy request. +} + +// Flush discards all pending requests and responses. +func (s *HTTPServer) Flush() { + for { + select { + case <-s.request: + case <-s.response: + default: + return + } + } +} + +func body(req *http.Request) string { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + return string(data) +} + +func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.ParseMultipartForm(1e6) + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + s.request <- req + var resp Response + select { + case respFunc := <-s.response: + resp = respFunc(req.URL.Path) + case <-time.After(s.Timeout): + const msg = "ERROR: Timeout waiting for test to prepare a response\n" + fmt.Fprintf(os.Stderr, msg) + resp = Response{500, nil, msg} + } + if resp.Headers != nil { + h := w.Header() + for k, v := range resp.Headers { + h.Set(k, v) + } + } + if resp.Status != 0 { + w.WriteHeader(resp.Status) + } + w.Write([]byte(resp.Body)) +} + +// WaitRequests returns the next n requests made to the http server from +// the queue. If not enough requests were previously made, it waits until +// the timeout value for them to be made. +func (s *HTTPServer) WaitRequests(n int) []*http.Request { + reqs := make([]*http.Request, 0, n) + for i := 0; i < n; i++ { + select { + case req := <-s.request: + reqs = append(reqs, req) + case <-time.After(s.Timeout): + panic("Timeout waiting for request") + } + } + return reqs +} + +// WaitRequest returns the next request made to the http server from +// the queue. If no requests were previously made, it waits until the +// timeout value for one to be made. +func (s *HTTPServer) WaitRequest() *http.Request { + return s.WaitRequests(1)[0] +} + +// ResponseFunc prepares the test server to respond the following n +// requests using f to build each response. +func (s *HTTPServer) ResponseFunc(n int, f ResponseFunc) { + for i := 0; i < n; i++ { + s.response <- f + } +} + +// ResponseMap maps request paths to responses. +type ResponseMap map[string]Response + +// ResponseMap prepares the test server to respond the following n +// requests using the m to obtain the responses. +func (s *HTTPServer) ResponseMap(n int, m ResponseMap) { + f := func(path string) Response { + for rpath, resp := range m { + if rpath == path { + return resp + } + } + body := "Path not found in response map: " + path + return Response{Status: 500, Body: body} + } + s.ResponseFunc(n, f) +} + +// Responses prepares the test server to respond the following n requests +// using the provided response parameters. +func (s *HTTPServer) Responses(n int, status int, headers map[string]string, body string) { + f := func(path string) Response { + return Response{status, headers, body} + } + s.ResponseFunc(n, f) +} + +// Response prepares the test server to respond the following request +// using the provided response parameters. +func (s *HTTPServer) Response(status int, headers map[string]string, body string) { + s.Responses(1, status, headers, body) +} diff --git a/vendor/github.com/rackspace/gophercloud/.travis.yml b/vendor/github.com/rackspace/gophercloud/.travis.yml new file mode 100644 index 000000000..325b90ac8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/.travis.yml @@ -0,0 +1,16 @@ +language: go +install: + - go get -v -tags 'fixtures acceptance' ./... +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 +script: script/cibuild +after_success: + - go get golang.org/x/tools/cmd/cover + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin/ + - goveralls 2k7PTU3xa474Hymwgdj6XjqenNfGTNkO8 +sudo: false diff --git a/vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md b/vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md new file mode 100644 index 000000000..6ba5beb85 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md @@ -0,0 +1,274 @@ +# Contributing to gophercloud + +- [Getting started](#getting-started) +- [Tests](#tests) +- [Style guide](#basic-style-guide) +- [5 ways to get involved](#5-ways-to-get-involved) + +## Setting up your git workspace + +As a contributor you will need to setup your workspace in a slightly different +way than just downloading it. Here are the basic installation instructions: + +1. Configure your `$GOPATH` and run `go get` as described in the main +[README](/README.md#how-to-install). + +2. Move into the directory that houses your local repository: + + ```bash + cd ${GOPATH}/src/github.com/rackspace/gophercloud + ``` + +3. Fork the `rackspace/gophercloud` repository and update your remote refs. You +will need to rename the `origin` remote branch to `upstream`, and add your +fork as `origin` instead: + + ```bash + git remote rename origin upstream + git remote add origin git@github.com//gophercloud + ``` + +4. Checkout the latest development branch: + + ```bash + git checkout master + ``` + +5. If you're working on something (discussed more in detail below), you will +need to checkout a new feature branch: + + ```bash + git checkout -b my-new-feature + ``` + +Another thing to bear in mind is that you will need to add a few extra +environment variables for acceptance tests - this is documented in our +[acceptance tests readme](/acceptance). + +## Tests + +When working on a new or existing feature, testing will be the backbone of your +work since it helps uncover and prevent regressions in the codebase. There are +two types of test we use in gophercloud: unit tests and acceptance tests, which +are both described below. + +### Unit tests + +Unit tests are the fine-grained tests that establish and ensure the behaviour +of individual units of functionality. We usually test on an +operation-by-operation basis (an operation typically being an API action) with +the use of mocking to set up explicit expectations. Each operation will set up +its HTTP response expectation, and then test how the system responds when fed +this controlled, pre-determined input. + +To make life easier, we've introduced a bunch of test helpers to simplify the +process of testing expectations with assertions: + +```go +import ( + "testing" + + "github.com/rackspace/gophercloud/testhelper" +) + +func TestSomething(t *testing.T) { + result, err := Operation() + + testhelper.AssertEquals(t, "foo", result.Bar) + testhelper.AssertNoErr(t, err) +} + +func TestSomethingElse(t *testing.T) { + testhelper.CheckEquals(t, "expected", "actual") +} +``` + +`AssertEquals` and `AssertNoErr` will throw a fatal error if a value does not +match an expected value or if an error has been declared, respectively. You can +also use `CheckEquals` and `CheckNoErr` for the same purpose; the only difference +being that `t.Errorf` is raised rather than `t.Fatalf`. + +Here is a truncated example of mocked HTTP responses: + +```go +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestGet(t *testing.T) { + // Setup the HTTP request multiplexer and server + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + // Test we're using the correct HTTP method + th.TestMethod(t, r, "GET") + + // Test we're setting the auth token + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + // Set the appropriate headers for our mocked response + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // Set the HTTP body + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "name": "private-network", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) + + // Call our API operation + network, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + + // Assert no errors and equality + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Status, "ACTIVE") +} +``` + +### Acceptance tests + +As we've already mentioned, unit tests have a very narrow and confined focus - +they test small units of behaviour. Acceptance tests on the other hand have a +far larger scope: they are fully functional tests that test the entire API of a +service in one fell swoop. They don't care about unit isolation or mocking +expectations, they instead do a full run-through and consequently test how the +entire system _integrates_ together. When an API satisfies expectations, it +proves by default that the requirements for a contract have been met. + +Please be aware that acceptance tests will hit a live API - and may incur +service charges from your provider. Although most tests handle their own +teardown procedures, it is always worth manually checking that resources are +deleted after the test suite finishes. + +### Running tests + +To run all tests: + +```bash +go test ./... +``` + +To run all tests with verbose output: + +```bash +go test -v ./... +``` + +To run tests that match certain [build tags](): + +```bash +go test -tags "foo bar" ./... +``` + +To run tests for a particular sub-package: + +```bash +cd ./path/to/package && go test . +``` + +## Basic style guide + +We follow the standard formatting recommendations and language idioms set out +in the [Effective Go](https://golang.org/doc/effective_go.html) guide. It's +definitely worth reading - but the relevant sections are +[formatting](https://golang.org/doc/effective_go.html#formatting) +and [names](https://golang.org/doc/effective_go.html#names). + +## 5 ways to get involved + +There are five main ways you can get involved in our open-source project, and +each is described briefly below. Once you've made up your mind and decided on +your fix, you will need to follow the same basic steps that all submissions are +required to adhere to: + +1. [fork](https://help.github.com/articles/fork-a-repo/) the `rackspace/gophercloud` repository +2. checkout a [new branch](https://github.com/Kunena/Kunena-Forum/wiki/Create-a-new-branch-with-git-and-manage-branches) +3. submit your branch as a [pull request](https://help.github.com/articles/creating-a-pull-request/) + +### 1. Providing feedback + +On of the easiest ways to get readily involved in our project is to let us know +about your experiences using our SDK. Feedback like this is incredibly useful +to us, because it allows us to refine and change features based on what our +users want and expect of us. There are a bunch of ways to get in contact! You +can [ping us](https://developer.rackspace.com/support/) via e-mail, talk to us on irc +(#rackspace-dev on freenode), [tweet us](https://twitter.com/rackspace), or +submit an issue on our [bug tracker](/issues). Things you might like to tell us +are: + +* how easy was it to start using our SDK? +* did it meet your expectations? If not, why not? +* did our documentation help or hinder you? +* what could we improve in general? + +### 2. Fixing bugs + +If you want to start fixing open bugs, we'd really appreciate that! Bug fixing +is central to any project. The best way to get started is by heading to our +[bug tracker](https://github.com/rackspace/gophercloud/issues) and finding open +bugs that you think nobody is working on. It might be useful to comment on the +thread to see the current state of the issue and if anybody has made any +breakthroughs on it so far. + +### 3. Improving documentation + +We have three forms of documentation: + +* short README documents that briefly introduce a topic +* reference documentation on [godoc.org](http://godoc.org) that is automatically +generated from source code comments +* user documentation on our [homepage](http://gophercloud.io) that includes +getting started guides, installation guides and code samples + +If you feel that a certain section could be improved - whether it's to clarify +ambiguity, correct a technical mistake, or to fix a grammatical error - please +feel entitled to do so! We welcome doc pull requests with the same childlike +enthusiasm as any other contribution! + +### 4. Optimizing existing features + +If you would like to improve or optimize an existing feature, please be aware +that we adhere to [semantic versioning](http://semver.org) - which means that +we cannot introduce breaking changes to the API without a major version change +(v1.x -> v2.x). Making that leap is a big step, so we encourage contributors to +refactor rather than rewrite. Running tests will prevent regression and avoid +the possibility of breaking somebody's current implementation. + +Another tip is to keep the focus of your work as small as possible - try not to +introduce a change that affects lots and lots of files because it introduces +added risk and increases the cognitive load on the reviewers checking your +work. Change-sets which are easily understood and will not negatively impact +users are more likely to be integrated quickly. + +Lastly, if you're seeking to optimize a particular operation, you should try to +demonstrate a negative performance impact - perhaps using go's inbuilt +[benchmark capabilities](http://dave.cheney.net/2013/06/30/how-to-write-benchmarks-in-go). + +### 5. Working on a new feature + +If you've found something we've left out, definitely feel free to start work on +introducing that feature. It's always useful to open an issue or submit a pull +request early on to indicate your intent to a core contributor - this enables +quick/early feedback and can help steer you in the right direction by avoiding +known issues. It might also help you avoid losing time implementing something +that might not ever work. One tip is to prefix your Pull Request issue title +with [wip] - then people know it's a work in progress. + +You must ensure that all of your work is well tested - both in terms of unit +and acceptance tests. Untested code will not be merged because it introduces +too much of a risk to end-users. + +Happy hacking! diff --git a/vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md b/vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md new file mode 100644 index 000000000..63beb30b2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md @@ -0,0 +1,13 @@ +Contributors +============ + +| Name | Email | +| ---- | ----- | +| Samuel A. Falvo II | +| Glen Campbell | +| Jesse Noller | +| Jon Perritt | +| Ash Wilson | +| Jamie Hannaford | +| Don Schenck | don.schenck@rackspace.com> +| Joe Topjian | diff --git a/vendor/github.com/rackspace/gophercloud/LICENSE b/vendor/github.com/rackspace/gophercloud/LICENSE new file mode 100644 index 000000000..fbbbc9e4c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/LICENSE @@ -0,0 +1,191 @@ +Copyright 2012-2013 Rackspace, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/rackspace/gophercloud/README.md b/vendor/github.com/rackspace/gophercloud/README.md new file mode 100644 index 000000000..0a0da59f6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/README.md @@ -0,0 +1,160 @@ +# Gophercloud: an OpenStack SDK for Go +[![Build Status](https://travis-ci.org/rackspace/gophercloud.svg?branch=master)](https://travis-ci.org/rackspace/gophercloud) + +Gophercloud is a flexible SDK that allows you to consume and work with OpenStack +clouds in a simple and idiomatic way using golang. Many services are supported, +including Compute, Block Storage, Object Storage, Networking, and Identity. +Each service API is backed with getting started guides, code samples, reference +documentation, unit tests and acceptance tests. + +## Useful links + +* [Gophercloud homepage](http://gophercloud.io) +* [Reference documentation](http://godoc.org/github.com/rackspace/gophercloud) +* [Getting started guides](http://gophercloud.io/docs) +* [Effective Go](https://golang.org/doc/effective_go.html) + +## How to install + +Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) +is pointing to an appropriate directory where you want to install Gophercloud: + +```bash +mkdir $HOME/go +export GOPATH=$HOME/go +``` + +To protect yourself against changes in your dependencies, we highly recommend choosing a +[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for +your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install +Gophercloud as a dependency like so: + +```bash +go get github.com/rackspace/gophercloud + +# Edit your code to import relevant packages from "github.com/rackspace/gophercloud" + +godep save ./... +``` + +This will install all the source files you need into a `Godeps/_workspace` directory, which is +referenceable from your own source files when you use the `godep go` command. + +## Getting started + +### Credentials + +Because you'll be hitting an API, you will need to retrieve your OpenStack +credentials and either store them as environment variables or in your local Go +files. The first method is recommended because it decouples credential +information from source code, allowing you to push the latter to your version +control system without any security risk. + +You will need to retrieve the following: + +* username +* password +* tenant name or tenant ID +* a valid Keystone identity URL + +For users that have the OpenStack dashboard installed, there's a shortcut. If +you visit the `project/access_and_security` path in Horizon and click on the +"Download OpenStack RC File" button at the top right hand corner, you will +download a bash file that exports all of your access details to environment +variables. To execute the file, run `source admin-openrc.sh` and you will be +prompted for your password. + +### Authentication + +Once you have access to your credentials, you can begin plugging them into +Gophercloud. The next step is authentication, and this is handled by a base +"Provider" struct. To get one, you can either pass in your credentials +explicitly, or tell Gophercloud to use environment variables: + +```go +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/utils" +) + +// Option 1: Pass in the values yourself +opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://my-openstack.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", +} + +// Option 2: Use a utility function to retrieve all your environment variables +opts, err := openstack.AuthOptionsFromEnv() +``` + +Once you have the `opts` variable, you can pass it in and get back a +`ProviderClient` struct: + +```go +provider, err := openstack.AuthenticatedClient(opts) +``` + +The `ProviderClient` is the top-level client that all of your OpenStack services +derive from. The provider contains all of the authentication details that allow +your Go code to access the API - such as the base URL and token ID. + +### Provision a server + +Once we have a base Provider, we inject it as a dependency into each OpenStack +service. In order to work with the Compute API, we need a Compute service +client; which can be created like so: + +```go +client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), +}) +``` + +We then use this `client` for any Compute API operation we want. In our case, +we want to provision a new server - so we invoke the `Create` method and pass +in the flavor ID (hardware specification) and image ID (operating system) we're +interested in: + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +server, err := servers.Create(client, servers.CreateOpts{ + Name: "My new server!", + FlavorRef: "flavor_id", + ImageRef: "image_id", +}).Extract() +``` + +If you are unsure about what images and flavors are, you can read our [Compute +Getting Started guide](http://gophercloud.io/docs/compute). The above code +sample creates a new server with the parameters, and embodies the new resource +in the `server` variable (a +[`servers.Server`](http://godoc.org/github.com/rackspace/gophercloud) struct). + +### Next steps + +Cool! You've handled authentication, got your `ProviderClient` and provisioned +a new server. You're now ready to use more OpenStack services. + +* [Getting started with Compute](http://gophercloud.io/docs/compute) +* [Getting started with Object Storage](http://gophercloud.io/docs/object-storage) +* [Getting started with Networking](http://gophercloud.io/docs/networking) +* [Getting started with Block Storage](http://gophercloud.io/docs/block-storage) +* [Getting started with Identity](http://gophercloud.io/docs/identity) + +## Contributing + +Engaging the community and lowering barriers for contributors is something we +care a lot about. For this reason, we've taken the time to write a [contributing +guide](./CONTRIBUTING.md) for folks interested in getting involved in our project. +If you're not sure how you can get involved, feel free to submit an issue or +[contact us](https://developer.rackspace.com/support/). You don't need to be a +Go expert - all members of the community are welcome! + +## Help and feedback + +If you're struggling with something or have spotted a potential bug, feel free +to submit an issue to our [bug tracker](/issues) or [contact us directly](https://developer.rackspace.com/support/). diff --git a/vendor/github.com/rackspace/gophercloud/UPGRADING.md b/vendor/github.com/rackspace/gophercloud/UPGRADING.md new file mode 100644 index 000000000..76a94d570 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/UPGRADING.md @@ -0,0 +1,338 @@ +# Upgrading to v1.0.0 + +With the arrival of this new major version increment, the unfortunate news is +that breaking changes have been introduced to existing services. The API +has been completely rewritten from the ground up to make the library more +extensible, maintainable and easy-to-use. + +Below we've compiled upgrade instructions for the various services that +existed before. If you have a specific issue that is not addressed below, +please [submit an issue](/issues/new) or +[e-mail our support team](https://developer.rackspace.com/support/). + +* [Authentication](#authentication) +* [Servers](#servers) + * [List servers](#list-servers) + * [Get server details](#get-server-details) + * [Create server](#create-server) + * [Resize server](#resize-server) + * [Reboot server](#reboot-server) + * [Update server](#update-server) + * [Rebuild server](#rebuild-server) + * [Change admin password](#change-admin-password) + * [Delete server](#delete-server) + * [Rescue server](#rescue-server) +* [Images and flavors](#images-and-flavors) + * [List images](#list-images) + * [List flavors](#list-flavors) + * [Create/delete image](#createdelete-image) +* [Other](#other) + * [List keypairs](#list-keypairs) + * [Create/delete keypair](#createdelete-keypair) + * [List IP addresses](#list-ip-addresses) + +# Authentication + +One of the major differences that this release introduces is the level of +sub-packaging to differentiate between services and providers. You now have +the option of authenticating with OpenStack and other providers (like Rackspace). + +To authenticate with a vanilla OpenStack installation, you can either specify +your credentials like this: + +```go +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" +) + +opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://my-openstack.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", +} +``` + +Or have them pulled in through environment variables, like this: + +```go +opts, err := openstack.AuthOptionsFromEnv() +``` + +Once you have your `AuthOptions` struct, you pass it in to get back a `Provider`, +like so: + +```go +provider, err := openstack.AuthenticatedClient(opts) +``` + +This provider is the top-level structure that all services are created from. + +# Servers + +Before you can interact with the Compute API, you need to retrieve a +`gophercloud.ServiceClient`. To do this: + +```go +// Define your region, etc. +opts := gophercloud.EndpointOpts{Region: "RegionOne"} + +client, err := openstack.NewComputeV2(provider, opts) +``` + +## List servers + +All operations that involve API collections (servers, flavors, images) now use +the `pagination.Pager` interface. This interface represents paginated entities +that can be iterated over. + +Once you have a Pager, you can then pass a callback function into its `EachPage` +method, and this will allow you to traverse over the collection and execute +arbitrary functionality. So, an example with list servers: + +```go +import ( + "fmt" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// We have the option of filtering the server list. If we want the full +// collection, leave it as an empty struct or nil +opts := servers.ListOpts{Name: "server_1"} + +// Retrieve a pager (i.e. a paginated collection) +pager := servers.List(client, opts) + +// Define an anonymous function to be executed on each page's iteration +err := pager.EachPage(func(page pagination.Page) (bool, error) { + serverList, err := servers.ExtractServers(page) + + // `s' will be a servers.Server struct + for _, s := range serverList { + fmt.Printf("We have a server. ID=%s, Name=%s", s.ID, s.Name) + } +}) +``` + +## Get server details + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +// Get the HTTP result +response := servers.Get(client, "server_id") + +// Extract a Server struct from the response +server, err := response.Extract() +``` + +## Create server + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +// Define our options +opts := servers.CreateOpts{ + Name: "new_server", + FlavorRef: "flavorID", + ImageRef: "imageID", +} + +// Get our response +response := servers.Create(client, opts) + +// Extract +server, err := response.Extract() +``` + +## Change admin password + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +result := servers.ChangeAdminPassword(client, "server_id", "newPassword_&123") +``` + +## Resize server + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +result := servers.Resize(client, "server_id", "new_flavor_id") +``` + +## Reboot server + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +// You have a choice of two reboot methods: servers.SoftReboot or servers.HardReboot +result := servers.Reboot(client, "server_id", servers.SoftReboot) +``` + +## Update server + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +opts := servers.UpdateOpts{Name: "new_name"} + +server, err := servers.Update(client, "server_id", opts).Extract() +``` + +## Rebuild server + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +// You have the option of specifying additional options +opts := RebuildOpts{ + Name: "new_name", + AdminPass: "admin_password", + ImageID: "image_id", + Metadata: map[string]string{"owner": "me"}, +} + +result := servers.Rebuild(client, "server_id", opts) + +// You can extract a servers.Server struct from the HTTP response +server, err := result.Extract() +``` + +## Delete server + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + +response := servers.Delete(client, "server_id") +``` + +## Rescue server + +The server rescue extension for Compute is not currently supported. + +# Images and flavors + +## List images + +As with listing servers (see above), you first retrieve a Pager, and then pass +in a callback over each page: + +```go +import ( + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/openstack/compute/v2/images" +) + +// We have the option of filtering the image list. If we want the full +// collection, leave it as an empty struct +opts := images.ListOpts{ChangesSince: "2014-01-01T01:02:03Z", Name: "Ubuntu 12.04"} + +// Retrieve a pager (i.e. a paginated collection) +pager := images.List(client, opts) + +// Define an anonymous function to be executed on each page's iteration +err := pager.EachPage(func(page pagination.Page) (bool, error) { + imageList, err := images.ExtractImages(page) + + for _, i := range imageList { + // "i" will be an images.Image + } +}) +``` + +## List flavors + +```go +import ( + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" +) + +// We have the option of filtering the flavor list. If we want the full +// collection, leave it as an empty struct +opts := flavors.ListOpts{ChangesSince: "2014-01-01T01:02:03Z", MinRAM: 4} + +// Retrieve a pager (i.e. a paginated collection) +pager := flavors.List(client, opts) + +// Define an anonymous function to be executed on each page's iteration +err := pager.EachPage(func(page pagination.Page) (bool, error) { + flavorList, err := networks.ExtractFlavors(page) + + for _, f := range flavorList { + // "f" will be a flavors.Flavor + } +}) +``` + +## Create/delete image + +Image management has been shifted to Glance, but unfortunately this service is +not supported as of yet. You can, however, list Compute images like so: + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/images" + +// Retrieve a pager (i.e. a paginated collection) +pager := images.List(client, opts) + +// Define an anonymous function to be executed on each page's iteration +err := pager.EachPage(func(page pagination.Page) (bool, error) { + imageList, err := images.ExtractImages(page) + + for _, i := range imageList { + // "i" will be an images.Image + } +}) +``` + +# Other + +## List keypairs + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + +// Retrieve a pager (i.e. a paginated collection) +pager := keypairs.List(client, opts) + +// Define an anonymous function to be executed on each page's iteration +err := pager.EachPage(func(page pagination.Page) (bool, error) { + keyList, err := keypairs.ExtractKeyPairs(page) + + for _, k := range keyList { + // "k" will be a keypairs.KeyPair + } +}) +``` + +## Create/delete keypairs + +To create a new keypair, you need to specify its name and, optionally, a +pregenerated OpenSSH-formatted public key. + +```go +import "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + +opts := keypairs.CreateOpts{ + Name: "new_key", + PublicKey: "...", +} + +response := keypairs.Create(client, opts) + +key, err := response.Extract() +``` + +To delete an existing keypair: + +```go +response := keypairs.Delete(client, "keypair_id") +``` + +## List IP addresses + +This operation is not currently supported. diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/README.md b/vendor/github.com/rackspace/gophercloud/acceptance/README.md new file mode 100644 index 000000000..3199837c2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/README.md @@ -0,0 +1,57 @@ +# Gophercloud Acceptance tests + +The purpose of these acceptance tests is to validate that SDK features meet +the requirements of a contract - to consumers, other parts of the library, and +to a remote API. + +> **Note:** Because every test will be run against a real API endpoint, you +> may incur bandwidth and service charges for all the resource usage. These +> tests *should* remove their remote products automatically. However, there may +> be certain cases where this does not happen; always double-check to make sure +> you have no stragglers left behind. + +### Step 1. Set environment variables + +A lot of tests rely on environment variables for configuration - so you will need +to set them before running the suite. If you're testing against pure OpenStack APIs, +you can download a file that contains all of these variables for you: just visit +the `project/access_and_security` page in your control panel and click the "Download +OpenStack RC File" button at the top right. For all other providers, you will need +to set them manually. + +#### Authentication + +|Name|Description| +|---|---| +|`OS_USERNAME`|Your API username| +|`OS_PASSWORD`|Your API password| +|`OS_AUTH_URL`|The identity URL you need to authenticate| +|`OS_TENANT_NAME`|Your API tenant name| +|`OS_TENANT_ID`|Your API tenant ID| +|`RS_USERNAME`|Your Rackspace username| +|`RS_API_KEY`|Your Rackspace API key| + +#### General + +|Name|Description| +|---|---| +|`OS_REGION_NAME`|The region you want your resources to reside in| +|`RS_REGION`|Rackspace region you want your resource to reside in| + +#### Compute + +|Name|Description| +|---|---| +|`OS_IMAGE_ID`|The ID of the image your want your server to be based on| +|`OS_FLAVOR_ID`|The ID of the flavor you want your server to be based on| +|`OS_FLAVOR_ID_RESIZE`|The ID of the flavor you want your server to be resized to| +|`RS_IMAGE_ID`|The ID of the image you want servers to be created with| +|`RS_FLAVOR_ID`|The ID of the flavor you want your server to be created with| + +### 2. Run the test suite + +From the root directory, run: + +``` +./script/acceptancetest +``` diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go new file mode 100644 index 000000000..7741aa984 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go @@ -0,0 +1,70 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots" + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSnapshots(t *testing.T) { + + client, err := newClient(t) + th.AssertNoErr(t, err) + + v, err := volumes.Create(client, &volumes.CreateOpts{ + Name: "gophercloud-test-volume", + Size: 1, + }).Extract() + th.AssertNoErr(t, err) + + err = volumes.WaitForStatus(client, v.ID, "available", 120) + th.AssertNoErr(t, err) + + t.Logf("Created volume: %v\n", v) + + ss, err := snapshots.Create(client, &snapshots.CreateOpts{ + Name: "gophercloud-test-snapshot", + VolumeID: v.ID, + }).Extract() + th.AssertNoErr(t, err) + + err = snapshots.WaitForStatus(client, ss.ID, "available", 120) + th.AssertNoErr(t, err) + + t.Logf("Created snapshot: %+v\n", ss) + + err = snapshots.Delete(client, ss.ID).ExtractErr() + th.AssertNoErr(t, err) + + err = gophercloud.WaitFor(120, func() (bool, error) { + _, err := snapshots.Get(client, ss.ID).Extract() + if err != nil { + return true, nil + } + + return false, nil + }) + th.AssertNoErr(t, err) + + t.Log("Deleted snapshot\n") + + err = volumes.Delete(client, v.ID).ExtractErr() + th.AssertNoErr(t, err) + + err = gophercloud.WaitFor(120, func() (bool, error) { + _, err := volumes.Get(client, v.ID).Extract() + if err != nil { + return true, nil + } + + return false, nil + }) + th.AssertNoErr(t, err) + + t.Log("Deleted volume\n") +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go new file mode 100644 index 000000000..7760427f0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go @@ -0,0 +1,63 @@ +// +build acceptance blockstorage + +package v1 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newClient(t *testing.T) (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := openstack.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + return openstack.NewBlockStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +func TestVolumes(t *testing.T) { + client, err := newClient(t) + th.AssertNoErr(t, err) + + cv, err := volumes.Create(client, &volumes.CreateOpts{ + Size: 1, + Name: "gophercloud-test-volume", + }).Extract() + th.AssertNoErr(t, err) + defer func() { + err = volumes.WaitForStatus(client, cv.ID, "available", 60) + th.AssertNoErr(t, err) + err = volumes.Delete(client, cv.ID).ExtractErr() + th.AssertNoErr(t, err) + }() + + _, err = volumes.Update(client, cv.ID, &volumes.UpdateOpts{ + Name: "gophercloud-updated-volume", + }).Extract() + th.AssertNoErr(t, err) + + v, err := volumes.Get(client, cv.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got volume: %+v\n", v) + + if v.Name != "gophercloud-updated-volume" { + t.Errorf("Unable to update volume: Expected name: gophercloud-updated-volume\nActual name: %s", v.Name) + } + + err = volumes.List(client, &volumes.ListOpts{Name: "gophercloud-updated-volume"}).EachPage(func(page pagination.Page) (bool, error) { + vols, err := volumes.ExtractVolumes(page) + th.CheckEquals(t, 1, len(vols)) + return true, err + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go new file mode 100644 index 000000000..000bc01d5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go @@ -0,0 +1,49 @@ +// +build acceptance + +package v1 + +import ( + "testing" + "time" + + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestVolumeTypes(t *testing.T) { + client, err := newClient(t) + th.AssertNoErr(t, err) + + vt, err := volumetypes.Create(client, &volumetypes.CreateOpts{ + ExtraSpecs: map[string]interface{}{ + "capabilities": "gpu", + "priority": 3, + }, + Name: "gophercloud-test-volumeType", + }).Extract() + th.AssertNoErr(t, err) + defer func() { + time.Sleep(10000 * time.Millisecond) + err = volumetypes.Delete(client, vt.ID).ExtractErr() + if err != nil { + t.Error(err) + return + } + }() + t.Logf("Created volume type: %+v\n", vt) + + vt, err = volumetypes.Get(client, vt.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got volume type: %+v\n", vt) + + err = volumetypes.List(client).EachPage(func(page pagination.Page) (bool, error) { + volTypes, err := volumetypes.ExtractVolumeTypes(page) + if len(volTypes) != 1 { + t.Errorf("Expected 1 volume type, got %d", len(volTypes)) + } + t.Logf("Listing volume types: %+v\n", volTypes) + return true, err + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go new file mode 100644 index 000000000..6e88819d8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go @@ -0,0 +1,40 @@ +// +build acceptance + +package openstack + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" +) + +func TestAuthenticatedClient(t *testing.T) { + // Obtain credentials from the environment. + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + t.Fatalf("Unable to acquire credentials: %v", err) + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + t.Fatalf("Unable to authenticate: %v", err) + } + + if client.TokenID == "" { + t.Errorf("No token ID assigned to the client") + } + + t.Logf("Client successfully acquired a token: %v", client.TokenID) + + // Find the storage service in the service catalog. + storage, err := openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + if err != nil { + t.Errorf("Unable to locate a storage service: %v", err) + } else { + t.Logf("Located a storage service at endpoint: [%s]", storage.Endpoint) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go new file mode 100644 index 000000000..add0e5fc1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go @@ -0,0 +1,55 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestBootFromVolume(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + name := tools.RandomString("Gophercloud-", 8) + t.Logf("Creating server [%s].", name) + + bd := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + UUID: choices.ImageID, + SourceType: bootfromvolume.Image, + VolumeSize: 10, + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + } + server, err := bootfromvolume.Create(client, bootfromvolume.CreateOptsExt{ + serverCreateOpts, + bd, + }).Extract() + th.AssertNoErr(t, err) + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + t.Logf("Created server: %+v\n", server) + defer servers.Delete(client, server.ID) + t.Logf("Deleting server [%s]...", name) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go new file mode 100644 index 000000000..c1bbf7961 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go @@ -0,0 +1,104 @@ +// +build acceptance common + +package v2 + +import ( + "fmt" + "os" + "strings" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +func newClient() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + return openstack.NewComputeV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +func waitForStatus(client *gophercloud.ServiceClient, server *servers.Server, status string) error { + return tools.WaitFor(func() (bool, error) { + latest, err := servers.Get(client, server.ID).Extract() + if err != nil { + return false, err + } + + if latest.Status == status { + // Success! + return true, nil + } + + return false, nil + }) +} + +// ComputeChoices contains image and flavor selections for use by the acceptance tests. +type ComputeChoices struct { + // ImageID contains the ID of a valid image. + ImageID string + + // FlavorID contains the ID of a valid flavor. + FlavorID string + + // FlavorIDResize contains the ID of a different flavor available on the same OpenStack installation, that is distinct + // from FlavorID. + FlavorIDResize string + + // NetworkName is the name of a network to launch the instance on. + NetworkName string +} + +// ComputeChoicesFromEnv populates a ComputeChoices struct from environment variables. +// If any required state is missing, an `error` will be returned that enumerates the missing properties. +func ComputeChoicesFromEnv() (*ComputeChoices, error) { + imageID := os.Getenv("OS_IMAGE_ID") + flavorID := os.Getenv("OS_FLAVOR_ID") + flavorIDResize := os.Getenv("OS_FLAVOR_ID_RESIZE") + networkName := os.Getenv("OS_NETWORK_NAME") + + missing := make([]string, 0, 3) + if imageID == "" { + missing = append(missing, "OS_IMAGE_ID") + } + if flavorID == "" { + missing = append(missing, "OS_FLAVOR_ID") + } + if flavorIDResize == "" { + missing = append(missing, "OS_FLAVOR_ID_RESIZE") + } + if networkName == "" { + networkName = "public" + } + + notDistinct := "" + if flavorID == flavorIDResize { + notDistinct = "OS_FLAVOR_ID and OS_FLAVOR_ID_RESIZE must be distinct." + } + + if len(missing) > 0 || notDistinct != "" { + text := "You're missing some important setup:\n" + if len(missing) > 0 { + text += " * These environment variables must be provided: " + strings.Join(missing, ", ") + "\n" + } + if notDistinct != "" { + text += " * " + notDistinct + "\n" + } + + return nil, fmt.Errorf(text) + } + + return &ComputeChoices{ImageID: imageID, FlavorID: flavorID, FlavorIDResize: flavorIDResize, NetworkName: networkName}, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go new file mode 100644 index 000000000..1356ffa89 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go @@ -0,0 +1,47 @@ +// +build acceptance compute extensionss + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestListExtensions(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + err = extensions.List(client).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + exts, err := extensions.ExtractExtensions(page) + th.AssertNoErr(t, err) + + for i, ext := range exts { + t.Logf("[%02d] name=[%s]\n", i, ext.Name) + t.Logf(" alias=[%s]\n", ext.Alias) + t.Logf(" description=[%s]\n", ext.Description) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func TestGetExtension(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + ext, err := extensions.Get(client, "os-admin-actions").Extract() + th.AssertNoErr(t, err) + + t.Logf("Extension details:") + t.Logf(" name=[%s]\n", ext.Name) + t.Logf(" namespace=[%s]\n", ext.Namespace) + t.Logf(" alias=[%s]\n", ext.Alias) + t.Logf(" description=[%s]\n", ext.Description) + t.Logf(" updated=[%s]\n", ext.Updated) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go new file mode 100644 index 000000000..9f51b1228 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go @@ -0,0 +1,57 @@ +// +build acceptance compute flavors + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" + "github.com/rackspace/gophercloud/pagination" +) + +func TestListFlavors(t *testing.T) { + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + t.Logf("ID\tRegion\tName\tStatus\tCreated") + + pager := flavors.ListDetail(client, nil) + count, pages := 0, 0 + pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("---") + pages++ + flavors, err := flavors.ExtractFlavors(page) + if err != nil { + return false, err + } + + for _, f := range flavors { + t.Logf("%s\t%s\t%d\t%d\t%d", f.ID, f.Name, f.RAM, f.Disk, f.VCPUs) + } + + return true, nil + }) + + t.Logf("--------\n%d flavors listed on %d pages.", count, pages) +} + +func TestGetFlavor(t *testing.T) { + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + flavor, err := flavors.Get(client, choices.FlavorID).Extract() + if err != nil { + t.Fatalf("Unable to get flavor information: %v", err) + } + + t.Logf("Flavor: %#v", flavor) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/floatingip_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/floatingip_test.go new file mode 100644 index 000000000..de6efc91b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/floatingip_test.go @@ -0,0 +1,168 @@ +// +build acceptance compute servers + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func createFIPServer(t *testing.T, client *gophercloud.ServiceClient, choices *ComputeChoices) (*servers.Server, error) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s\n", name) + + pwd := tools.MakeNewPassword("") + + server, err := servers.Create(client, servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + AdminPass: pwd, + }).Extract() + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + th.AssertEquals(t, pwd, server.AdminPass) + + return server, err +} + +func createFloatingIP(t *testing.T, client *gophercloud.ServiceClient) (*floatingip.FloatingIP, error) { + pool := os.Getenv("OS_POOL_NAME") + fip, err := floatingip.Create(client, &floatingip.CreateOpts{ + Pool: pool, + }).Extract() + th.AssertNoErr(t, err) + t.Logf("Obtained Floating IP: %v", fip.IP) + + return fip, err +} + +func associateFloatingIPDeprecated(t *testing.T, client *gophercloud.ServiceClient, serverId string, fip *floatingip.FloatingIP) { + // This form works, but is considered deprecated. + // See associateFloatingIP or associateFloatingIPFixed + err := floatingip.Associate(client, serverId, fip.IP).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Associated floating IP %v from instance %v", fip.IP, serverId) + defer func() { + err = floatingip.Disassociate(client, serverId, fip.IP).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Disassociated floating IP %v from instance %v", fip.IP, serverId) + }() + floatingIp, err := floatingip.Get(client, fip.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Floating IP %v is associated with Fixed IP %v", fip.IP, floatingIp.FixedIP) +} + +func associateFloatingIP(t *testing.T, client *gophercloud.ServiceClient, serverId string, fip *floatingip.FloatingIP) { + associateOpts := floatingip.AssociateOpts{ + ServerID: serverId, + FloatingIP: fip.IP, + } + + err := floatingip.AssociateInstance(client, associateOpts).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Associated floating IP %v from instance %v", fip.IP, serverId) + defer func() { + err = floatingip.DisassociateInstance(client, associateOpts).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Disassociated floating IP %v from instance %v", fip.IP, serverId) + }() + floatingIp, err := floatingip.Get(client, fip.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Floating IP %v is associated with Fixed IP %v", fip.IP, floatingIp.FixedIP) +} + +func associateFloatingIPFixed(t *testing.T, client *gophercloud.ServiceClient, serverId string, fip *floatingip.FloatingIP) { + + network := os.Getenv("OS_NETWORK_NAME") + server, err := servers.Get(client, serverId).Extract() + if err != nil { + t.Fatalf("%s", err) + } + + var fixedIP string + for _, networkAddresses := range server.Addresses[network].([]interface{}) { + address := networkAddresses.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "fixed" { + if address["version"].(float64) == 4 { + fixedIP = address["addr"].(string) + } + } + } + + associateOpts := floatingip.AssociateOpts{ + ServerID: serverId, + FloatingIP: fip.IP, + FixedIP: fixedIP, + } + + err = floatingip.AssociateInstance(client, associateOpts).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Associated floating IP %v from instance %v with Fixed IP %v", fip.IP, serverId, fixedIP) + defer func() { + err = floatingip.DisassociateInstance(client, associateOpts).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Disassociated floating IP %v from instance %v with Fixed IP %v", fip.IP, serverId, fixedIP) + }() + floatingIp, err := floatingip.Get(client, fip.ID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, floatingIp.FixedIP, fixedIP) + t.Logf("Floating IP %v is associated with Fixed IP %v", fip.IP, floatingIp.FixedIP) +} + +func TestFloatingIP(t *testing.T) { + pool := os.Getenv("OS_POOL_NAME") + if pool == "" { + t.Fatalf("OS_POOL_NAME must be set") + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + server, err := createFIPServer(t, client, choices) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(client, server.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + fip, err := createFloatingIP(t, client) + if err != nil { + t.Fatalf("Unable to create floating IP: %v", err) + } + defer func() { + err = floatingip.Delete(client, fip.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Floating IP deleted.") + }() + + associateFloatingIPDeprecated(t, client, server.ID, fip) + associateFloatingIP(t, client, server.ID, fip) + associateFloatingIPFixed(t, client, server.ID, fip) + +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go new file mode 100644 index 000000000..ceab22fa7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go @@ -0,0 +1,37 @@ +// +build acceptance compute images + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/images" + "github.com/rackspace/gophercloud/pagination" +) + +func TestListImages(t *testing.T) { + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute: client: %v", err) + } + + t.Logf("ID\tRegion\tName\tStatus\tCreated") + + pager := images.ListDetail(client, nil) + count, pages := 0, 0 + pager.EachPage(func(page pagination.Page) (bool, error) { + pages++ + images, err := images.ExtractImages(page) + if err != nil { + return false, err + } + + for _, i := range images { + t.Logf("%s\t%s\t%s\t%s", i.ID, i.Name, i.Status, i.Created) + } + + return true, nil + }) + + t.Logf("--------\n%d images listed on %d pages.", count, pages) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go new file mode 100644 index 000000000..a4fe8db2d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go @@ -0,0 +1,74 @@ +// +build acceptance + +package v2 + +import ( + "crypto/rand" + "crypto/rsa" + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" + + "golang.org/x/crypto/ssh" +) + +const keyName = "gophercloud_test_key_pair" + +func TestCreateServerWithKeyPair(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + publicKey := privateKey.PublicKey + pub, err := ssh.NewPublicKey(&publicKey) + th.AssertNoErr(t, err) + pubBytes := ssh.MarshalAuthorizedKey(pub) + pk := string(pubBytes) + + kp, err := keypairs.Create(client, keypairs.CreateOpts{ + Name: keyName, + PublicKey: pk, + }).Extract() + th.AssertNoErr(t, err) + t.Logf("Created key pair: %s\n", kp) + + choices, err := ComputeChoicesFromEnv() + th.AssertNoErr(t, err) + + name := tools.RandomString("Gophercloud-", 8) + t.Logf("Creating server [%s] with key pair.", name) + + serverCreateOpts := servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + } + + server, err := servers.Create(client, keypairs.CreateOptsExt{ + serverCreateOpts, + keyName, + }).Extract() + th.AssertNoErr(t, err) + defer servers.Delete(client, server.ID) + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + server, err = servers.Get(client, server.ID).Extract() + t.Logf("Created server: %+v\n", server) + th.AssertNoErr(t, err) + th.AssertEquals(t, server.KeyName, keyName) + + t.Logf("Deleting key pair [%s]...", kp.Name) + err = keypairs.Delete(client, keyName).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Deleting server [%s]...", name) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/network_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/network_test.go new file mode 100644 index 000000000..7ebe7ec7b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/network_test.go @@ -0,0 +1,78 @@ +// +build acceptance compute servers + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func getNetworkIDFromNetworkExtension(t *testing.T, client *gophercloud.ServiceClient, networkName string) (string, error) { + allPages, err := networks.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkList, err := networks.ExtractNetworks(allPages) + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkID := "" + for _, network := range networkList { + t.Logf("Network: %v", network) + if network.Label == networkName { + networkID = network.ID + } + } + + t.Logf("Found network ID for %s: %s\n", networkName, networkID) + + return networkID, nil +} + +func TestNetworks(t *testing.T) { + networkName := os.Getenv("OS_NETWORK_NAME") + if networkName == "" { + t.Fatalf("OS_NETWORK_NAME must be set") + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + networkID, err := getNetworkIDFromNetworkExtension(t, client, networkName) + if err != nil { + t.Fatalf("Unable to get network ID: %v", err) + } + + // createNetworkServer is defined in tenantnetworks_test.go + server, err := createNetworkServer(t, client, choices, networkID) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(client, server.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + allPages, err := networks.List(client).AllPages() + allNetworks, err := networks.ExtractNetworks(allPages) + th.AssertNoErr(t, err) + t.Logf("Retrieved all %d networks: %+v", len(allNetworks), allNetworks) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go new file mode 100644 index 000000000..bb158c3ee --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go @@ -0,0 +1,3 @@ +// The v2 package contains acceptance tests for the Openstack Compute V2 service. + +package v2 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secdefrules_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secdefrules_test.go new file mode 100644 index 000000000..78b07986b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secdefrules_test.go @@ -0,0 +1,72 @@ +// +build acceptance compute defsecrules + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + dsr "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSecDefRules(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + id := createDefRule(t, client) + + listDefRules(t, client) + + getDefRule(t, client, id) + + deleteDefRule(t, client, id) +} + +func createDefRule(t *testing.T, client *gophercloud.ServiceClient) string { + opts := dsr.CreateOpts{ + FromPort: tools.RandomInt(80, 89), + ToPort: tools.RandomInt(90, 99), + IPProtocol: "TCP", + CIDR: "0.0.0.0/0", + } + + rule, err := dsr.Create(client, opts).Extract() + th.AssertNoErr(t, err) + + t.Logf("Created default rule %s", rule.ID) + + return rule.ID +} + +func listDefRules(t *testing.T, client *gophercloud.ServiceClient) { + err := dsr.List(client).EachPage(func(page pagination.Page) (bool, error) { + drList, err := dsr.ExtractDefaultRules(page) + th.AssertNoErr(t, err) + + for _, dr := range drList { + t.Logf("Listing default rule %s: Name [%s] From Port [%s] To Port [%s] Protocol [%s]", + dr.ID, dr.FromPort, dr.ToPort, dr.IPProtocol) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getDefRule(t *testing.T, client *gophercloud.ServiceClient, id string) { + rule, err := dsr.Get(client, id).Extract() + th.AssertNoErr(t, err) + + t.Logf("Getting rule %s: %#v", id, rule) +} + +func deleteDefRule(t *testing.T, client *gophercloud.ServiceClient, id string) { + err := dsr.Delete(client, id).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Deleted rule %s", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secgroup_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secgroup_test.go new file mode 100644 index 000000000..4f5073910 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/secgroup_test.go @@ -0,0 +1,177 @@ +// +build acceptance compute secgroups + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSecGroups(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + serverID, needsDeletion := findServer(t, client) + + groupID := createSecGroup(t, client) + + listSecGroups(t, client) + + newName := tools.RandomString("secgroup_", 5) + updateSecGroup(t, client, groupID, newName) + + getSecGroup(t, client, groupID) + + addRemoveRules(t, client, groupID) + + addServerToSecGroup(t, client, serverID, newName) + + removeServerFromSecGroup(t, client, serverID, newName) + + if needsDeletion { + servers.Delete(client, serverID) + } + + deleteSecGroup(t, client, groupID) +} + +func createSecGroup(t *testing.T, client *gophercloud.ServiceClient) string { + opts := secgroups.CreateOpts{ + Name: tools.RandomString("secgroup_", 5), + Description: "something", + } + + group, err := secgroups.Create(client, opts).Extract() + th.AssertNoErr(t, err) + + t.Logf("Created secgroup %s %s", group.ID, group.Name) + + return group.ID +} + +func listSecGroups(t *testing.T, client *gophercloud.ServiceClient) { + err := secgroups.List(client).EachPage(func(page pagination.Page) (bool, error) { + secGrpList, err := secgroups.ExtractSecurityGroups(page) + th.AssertNoErr(t, err) + + for _, sg := range secGrpList { + t.Logf("Listing secgroup %s: Name [%s] Desc [%s] TenantID [%s]", sg.ID, + sg.Name, sg.Description, sg.TenantID) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func updateSecGroup(t *testing.T, client *gophercloud.ServiceClient, id, newName string) { + opts := secgroups.UpdateOpts{ + Name: newName, + Description: tools.RandomString("dec_", 10), + } + group, err := secgroups.Update(client, id, opts).Extract() + th.AssertNoErr(t, err) + + t.Logf("Updated %s's name to %s", group.ID, group.Name) +} + +func getSecGroup(t *testing.T, client *gophercloud.ServiceClient, id string) { + group, err := secgroups.Get(client, id).Extract() + th.AssertNoErr(t, err) + + t.Logf("Getting %s: %#v", id, group) +} + +func addRemoveRules(t *testing.T, client *gophercloud.ServiceClient, id string) { + opts := secgroups.CreateRuleOpts{ + ParentGroupID: id, + FromPort: 22, + ToPort: 22, + IPProtocol: "TCP", + CIDR: "0.0.0.0/0", + } + + rule, err := secgroups.CreateRule(client, opts).Extract() + th.AssertNoErr(t, err) + + t.Logf("Adding rule %s to group %s", rule.ID, id) + + err = secgroups.DeleteRule(client, rule.ID).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Deleted rule %s from group %s", rule.ID, id) +} + +func findServer(t *testing.T, client *gophercloud.ServiceClient) (string, bool) { + var serverID string + var needsDeletion bool + + err := servers.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + sList, err := servers.ExtractServers(page) + th.AssertNoErr(t, err) + + for _, s := range sList { + serverID = s.ID + needsDeletion = false + + t.Logf("Found an existing server: ID [%s]", serverID) + break + } + + return true, nil + }) + th.AssertNoErr(t, err) + + if serverID == "" { + t.Log("No server found, creating one") + + choices, err := ComputeChoicesFromEnv() + th.AssertNoErr(t, err) + + opts := &servers.CreateOpts{ + Name: tools.RandomString("secgroup_test_", 5), + ImageRef: choices.ImageID, + FlavorRef: choices.FlavorID, + } + + s, err := servers.Create(client, opts).Extract() + th.AssertNoErr(t, err) + serverID = s.ID + + t.Logf("Created server %s, waiting for it to build", s.ID) + err = servers.WaitForStatus(client, serverID, "ACTIVE", 300) + th.AssertNoErr(t, err) + + needsDeletion = true + } + + return serverID, needsDeletion +} + +func addServerToSecGroup(t *testing.T, client *gophercloud.ServiceClient, serverID, groupName string) { + err := secgroups.AddServerToGroup(client, serverID, groupName).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Adding group %s to server %s", groupName, serverID) +} + +func removeServerFromSecGroup(t *testing.T, client *gophercloud.ServiceClient, serverID, groupName string) { + err := secgroups.RemoveServerFromGroup(client, serverID, groupName).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Removing group %s from server %s", groupName, serverID) +} + +func deleteSecGroup(t *testing.T, client *gophercloud.ServiceClient, id string) { + err := secgroups.Delete(client, id).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Deleted group %s", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go new file mode 100644 index 000000000..945854e3b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go @@ -0,0 +1,143 @@ +// +build acceptance compute servers + +package v2 + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func createServerGroup(t *testing.T, computeClient *gophercloud.ServiceClient) (*servergroups.ServerGroup, error) { + sg, err := servergroups.Create(computeClient, &servergroups.CreateOpts{ + Name: "test", + Policies: []string{"affinity"}, + }).Extract() + + if err != nil { + t.Fatalf("Unable to create server group: %v", err) + } + + t.Logf("Created server group: %v", sg.ID) + t.Logf("It has policies: %v", sg.Policies) + + return sg, nil +} + +func getServerGroup(t *testing.T, computeClient *gophercloud.ServiceClient, sgID string) error { + sg, err := servergroups.Get(computeClient, sgID).Extract() + if err != nil { + t.Fatalf("Unable to get server group: %v", err) + } + + t.Logf("Got server group: %v", sg.Name) + + return nil +} + +func createServerInGroup(t *testing.T, computeClient *gophercloud.ServiceClient, choices *ComputeChoices, serverGroup *servergroups.ServerGroup) (*servers.Server, error) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s\n", name) + + pwd := tools.MakeNewPassword("") + + serverCreateOpts := servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + AdminPass: pwd, + } + server, err := servers.Create(computeClient, schedulerhints.CreateOptsExt{ + serverCreateOpts, + schedulerhints.SchedulerHints{ + Group: serverGroup.ID, + }, + }).Extract() + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + th.AssertEquals(t, pwd, server.AdminPass) + + return server, err +} + +func verifySchedulerWorked(t *testing.T, firstServer, secondServer *servers.Server) error { + t.Logf("First server hostID: %v", firstServer.HostID) + t.Logf("Second server hostID: %v", secondServer.HostID) + if firstServer.HostID == secondServer.HostID { + return nil + } + + return fmt.Errorf("%s and %s were not scheduled on the same host.", firstServer.ID, secondServer.ID) +} + +func TestServerGroups(t *testing.T) { + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + computeClient, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + sg, err := createServerGroup(t, computeClient) + if err != nil { + t.Fatalf("Unable to create server group: %v", err) + } + defer func() { + servergroups.Delete(computeClient, sg.ID) + t.Logf("Server Group deleted.") + }() + + err = getServerGroup(t, computeClient, sg.ID) + if err != nil { + t.Fatalf("Unable to get server group: %v", err) + } + + firstServer, err := createServerInGroup(t, computeClient, choices, sg) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(computeClient, firstServer.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(computeClient, firstServer, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + firstServer, err = servers.Get(computeClient, firstServer.ID).Extract() + + secondServer, err := createServerInGroup(t, computeClient, choices, sg) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(computeClient, secondServer.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(computeClient, secondServer, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + secondServer, err = servers.Get(computeClient, secondServer.ID).Extract() + + if err = verifySchedulerWorked(t, firstServer, secondServer); err != nil { + t.Fatalf("Scheduling did not work: %v", err) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go new file mode 100644 index 000000000..f6c7c0524 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go @@ -0,0 +1,484 @@ +// +build acceptance compute servers + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestListServers(t *testing.T) { + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + t.Logf("ID\tRegion\tName\tStatus\tIPv4\tIPv6") + + pager := servers.List(client, servers.ListOpts{}) + count, pages := 0, 0 + pager.EachPage(func(page pagination.Page) (bool, error) { + pages++ + t.Logf("---") + + servers, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + for _, s := range servers { + t.Logf("%s\t%s\t%s\t%s\t%s\t\n", s.ID, s.Name, s.Status, s.AccessIPv4, s.AccessIPv6) + count++ + } + + return true, nil + }) + + t.Logf("--------\n%d servers listed on %d pages.\n", count, pages) +} + +func networkingClient() (*gophercloud.ServiceClient, error) { + opts, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + provider, err := openstack.AuthenticatedClient(opts) + if err != nil { + return nil, err + } + + return openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +func createServer(t *testing.T, client *gophercloud.ServiceClient, choices *ComputeChoices) (*servers.Server, error) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + var network networks.Network + + networkingClient, err := networkingClient() + if err != nil { + t.Fatalf("Unable to create a networking client: %v", err) + } + + pager := networks.List(networkingClient, networks.ListOpts{ + Name: choices.NetworkName, + Limit: 1, + }) + pager.EachPage(func(page pagination.Page) (bool, error) { + networks, err := networks.ExtractNetworks(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + if len(networks) == 0 { + t.Fatalf("No networks to attach to server") + return false, err + } + + network = networks[0] + + return false, nil + }) + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s\n", name) + + pwd := tools.MakeNewPassword("") + + server, err := servers.Create(client, servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + Networks: []servers.Network{ + servers.Network{UUID: network.ID}, + }, + AdminPass: pwd, + Personality: servers.Personality{ + &servers.File{ + Path: "/etc/test", + Contents: []byte("hello world"), + }, + }, + }).Extract() + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + th.AssertEquals(t, pwd, server.AdminPass) + + return server, err +} + +func TestCreateDestroyServer(t *testing.T) { + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(client, server.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + pager := servers.ListAddresses(client, server.ID) + pager.EachPage(func(page pagination.Page) (bool, error) { + networks, err := servers.ExtractAddresses(page) + if err != nil { + return false, err + } + + for n, a := range networks { + t.Logf("%s: %+v\n", n, a) + } + return true, nil + }) + + pager = servers.ListAddressesByNetwork(client, server.ID, choices.NetworkName) + pager.EachPage(func(page pagination.Page) (bool, error) { + addresses, err := servers.ExtractNetworkAddresses(page) + if err != nil { + return false, err + } + + for _, a := range addresses { + t.Logf("%+v\n", a) + } + return true, nil + }) +} + +func TestUpdateServer(t *testing.T) { + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatal(err) + } + defer servers.Delete(client, server.ID) + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + alternateName := tools.RandomString("ACPTTEST", 16) + for alternateName == server.Name { + alternateName = tools.RandomString("ACPTTEST", 16) + } + + t.Logf("Attempting to rename the server to %s.", alternateName) + + updated, err := servers.Update(client, server.ID, servers.UpdateOpts{Name: alternateName}).Extract() + if err != nil { + t.Fatalf("Unable to rename server: %v", err) + } + + if updated.ID != server.ID { + t.Errorf("Updated server ID [%s] didn't match original server ID [%s]!", updated.ID, server.ID) + } + + err = tools.WaitFor(func() (bool, error) { + latest, err := servers.Get(client, updated.ID).Extract() + if err != nil { + return false, err + } + + return latest.Name == alternateName, nil + }) +} + +func TestActionChangeAdminPassword(t *testing.T) { + t.Parallel() + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatal(err) + } + defer servers.Delete(client, server.ID) + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + randomPassword := tools.MakeNewPassword(server.AdminPass) + res := servers.ChangeAdminPassword(client, server.ID, randomPassword) + if res.Err != nil { + t.Fatal(err) + } + + if err = waitForStatus(client, server, "PASSWORD"); err != nil { + t.Fatal(err) + } + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func TestActionReboot(t *testing.T) { + t.Parallel() + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatal(err) + } + defer servers.Delete(client, server.ID) + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + res := servers.Reboot(client, server.ID, "aldhjflaskhjf") + if res.Err == nil { + t.Fatal("Expected the SDK to provide an ArgumentError here") + } + + t.Logf("Attempting reboot of server %s", server.ID) + res = servers.Reboot(client, server.ID, servers.OSReboot) + if res.Err != nil { + t.Fatalf("Unable to reboot server: %v", err) + } + + if err = waitForStatus(client, server, "REBOOT"); err != nil { + t.Fatal(err) + } + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func TestActionRebuild(t *testing.T) { + t.Parallel() + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatal(err) + } + defer servers.Delete(client, server.ID) + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + t.Logf("Attempting to rebuild server %s", server.ID) + + rebuildOpts := servers.RebuildOpts{ + Name: tools.RandomString("ACPTTEST", 16), + AdminPass: tools.MakeNewPassword(server.AdminPass), + ImageID: choices.ImageID, + } + + rebuilt, err := servers.Rebuild(client, server.ID, rebuildOpts).Extract() + if err != nil { + t.Fatal(err) + } + + if rebuilt.ID != server.ID { + t.Errorf("Expected rebuilt server ID of [%s]; got [%s]", server.ID, rebuilt.ID) + } + + if err = waitForStatus(client, rebuilt, "REBUILD"); err != nil { + t.Fatal(err) + } + + if err = waitForStatus(client, rebuilt, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func resizeServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server, choices *ComputeChoices) { + if err := waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + t.Logf("Attempting to resize server [%s]", server.ID) + + opts := &servers.ResizeOpts{ + FlavorRef: choices.FlavorIDResize, + } + if res := servers.Resize(client, server.ID, opts); res.Err != nil { + t.Fatal(res.Err) + } + + if err := waitForStatus(client, server, "VERIFY_RESIZE"); err != nil { + t.Fatal(err) + } +} + +func TestActionResizeConfirm(t *testing.T) { + t.Parallel() + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatal(err) + } + defer servers.Delete(client, server.ID) + resizeServer(t, client, server, choices) + + t.Logf("Attempting to confirm resize for server %s", server.ID) + + if res := servers.ConfirmResize(client, server.ID); res.Err != nil { + t.Fatal(err) + } + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func TestActionResizeRevert(t *testing.T) { + t.Parallel() + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatal(err) + } + defer servers.Delete(client, server.ID) + resizeServer(t, client, server, choices) + + t.Logf("Attempting to revert resize for server %s", server.ID) + + if res := servers.RevertResize(client, server.ID); res.Err != nil { + t.Fatal(err) + } + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func TestServerMetadata(t *testing.T) { + t.Parallel() + + choices, err := ComputeChoicesFromEnv() + th.AssertNoErr(t, err) + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + server, err := createServer(t, client, choices) + if err != nil { + t.Fatal(err) + } + defer servers.Delete(client, server.ID) + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + metadata, err := servers.UpdateMetadata(client, server.ID, servers.MetadataOpts{ + "foo": "bar", + "this": "that", + }).Extract() + th.AssertNoErr(t, err) + t.Logf("UpdateMetadata result: %+v\n", metadata) + + err = servers.DeleteMetadatum(client, server.ID, "foo").ExtractErr() + th.AssertNoErr(t, err) + + metadata, err = servers.CreateMetadatum(client, server.ID, servers.MetadatumOpts{ + "foo": "baz", + }).Extract() + th.AssertNoErr(t, err) + t.Logf("CreateMetadatum result: %+v\n", metadata) + + metadata, err = servers.Metadatum(client, server.ID, "foo").Extract() + th.AssertNoErr(t, err) + t.Logf("Metadatum result: %+v\n", metadata) + th.AssertEquals(t, "baz", metadata["foo"]) + + metadata, err = servers.Metadata(client, server.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Metadata result: %+v\n", metadata) + + metadata, err = servers.ResetMetadata(client, server.ID, servers.MetadataOpts{}).Extract() + th.AssertNoErr(t, err) + t.Logf("ResetMetadata result: %+v\n", metadata) + th.AssertDeepEquals(t, map[string]string{}, metadata) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go new file mode 100644 index 000000000..a92e8bf53 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go @@ -0,0 +1,109 @@ +// +build acceptance compute servers + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func getNetworkID(t *testing.T, client *gophercloud.ServiceClient, networkName string) (string, error) { + allPages, err := tenantnetworks.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkList, err := tenantnetworks.ExtractNetworks(allPages) + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkID := "" + for _, network := range networkList { + t.Logf("Network: %v", network) + if network.Name == networkName { + networkID = network.ID + } + } + + t.Logf("Found network ID for %s: %s\n", networkName, networkID) + + return networkID, nil +} + +func createNetworkServer(t *testing.T, client *gophercloud.ServiceClient, choices *ComputeChoices, networkID string) (*servers.Server, error) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s\n", name) + + pwd := tools.MakeNewPassword("") + + networks := make([]servers.Network, 1) + networks[0] = servers.Network{ + UUID: networkID, + } + + server, err := servers.Create(client, servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + AdminPass: pwd, + Networks: networks, + }).Extract() + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + th.AssertEquals(t, pwd, server.AdminPass) + + return server, err +} + +func TestTenantNetworks(t *testing.T) { + networkName := os.Getenv("OS_NETWORK_NAME") + if networkName == "" { + t.Fatalf("OS_NETWORK_NAME must be set") + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + networkID, err := getNetworkID(t, client, networkName) + if err != nil { + t.Fatalf("Unable to get network ID: %v", err) + } + + server, err := createNetworkServer(t, client, choices, networkID) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(client, server.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + allPages, err := tenantnetworks.List(client).AllPages() + allNetworks, err := tenantnetworks.ExtractNetworks(allPages) + th.AssertNoErr(t, err) + t.Logf("Retrieved all %d networks: %+v", len(allNetworks), allNetworks) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/volumeattach_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/volumeattach_test.go new file mode 100644 index 000000000..34634c9d2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/volumeattach_test.go @@ -0,0 +1,125 @@ +// +build acceptance compute servers + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newBlockClient(t *testing.T) (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := openstack.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + return openstack.NewBlockStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +func createVAServer(t *testing.T, computeClient *gophercloud.ServiceClient, choices *ComputeChoices) (*servers.Server, error) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s\n", name) + + pwd := tools.MakeNewPassword("") + + server, err := servers.Create(computeClient, servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + AdminPass: pwd, + }).Extract() + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + th.AssertEquals(t, pwd, server.AdminPass) + + return server, err +} + +func createVAVolume(t *testing.T, blockClient *gophercloud.ServiceClient) (*volumes.Volume, error) { + volume, err := volumes.Create(blockClient, &volumes.CreateOpts{ + Size: 1, + Name: "gophercloud-test-volume", + }).Extract() + th.AssertNoErr(t, err) + defer func() { + err = volumes.WaitForStatus(blockClient, volume.ID, "available", 60) + th.AssertNoErr(t, err) + }() + + return volume, err +} + +func createVolumeAttachment(t *testing.T, computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, volumeId string) { + va, err := volumeattach.Create(computeClient, serverId, &volumeattach.CreateOpts{ + VolumeID: volumeId, + }).Extract() + th.AssertNoErr(t, err) + defer func() { + err = volumes.WaitForStatus(blockClient, volumeId, "in-use", 60) + th.AssertNoErr(t, err) + err = volumeattach.Delete(computeClient, serverId, va.ID).ExtractErr() + th.AssertNoErr(t, err) + err = volumes.WaitForStatus(blockClient, volumeId, "available", 60) + th.AssertNoErr(t, err) + }() +} + +func TestAttachVolume(t *testing.T) { + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + computeClient, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + blockClient, err := newBlockClient(t) + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + server, err := createVAServer(t, computeClient, choices) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(computeClient, server.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(computeClient, server, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + volume, err := createVAVolume(t, blockClient) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer func() { + err = volumes.Delete(blockClient, volume.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Volume deleted.") + }() + + createVolumeAttachment(t, computeClient, blockClient, server.ID, volume.ID) + +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/common.go new file mode 100644 index 000000000..f7ffc37b2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/common.go @@ -0,0 +1,70 @@ +// +build acceptance db + +package v1 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/db/v1/instances" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := openstack.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := openstack.NewDBV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + th.AssertNoErr(t, err) + + return c +} + +type context struct { + test *testing.T + client *gophercloud.ServiceClient + instanceID string + DBIDs []string + users []string +} + +func newContext(t *testing.T) context { + return context{ + test: t, + client: newClient(t), + } +} + +func (c context) Logf(msg string, args ...interface{}) { + if len(args) > 0 { + c.test.Logf(msg, args...) + } else { + c.test.Log(msg) + } +} + +func (c context) AssertNoErr(err error) { + th.AssertNoErr(c.test, err) +} + +func (c context) WaitUntilActive(id string) { + err := gophercloud.WaitFor(60, func() (bool, error) { + inst, err := instances.Get(c.client, id).Extract() + if err != nil { + return false, err + } + if inst.Status == "ACTIVE" { + return true, nil + } + return false, nil + }) + + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/database_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/database_test.go new file mode 100644 index 000000000..2fd31753b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/database_test.go @@ -0,0 +1,45 @@ +// +build acceptance db + +package v1 + +import ( + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" +) + +func (c context) createDBs() { + opts := db.BatchCreateOpts{ + db.CreateOpts{Name: "db1"}, + db.CreateOpts{Name: "db2"}, + db.CreateOpts{Name: "db3"}, + } + + err := db.Create(c.client, c.instanceID, opts).ExtractErr() + c.AssertNoErr(err) + c.Logf("Created three databases on instance %s: db1, db2, db3", c.instanceID) +} + +func (c context) listDBs() { + c.Logf("Listing databases on instance %s", c.instanceID) + + err := db.List(c.client, c.instanceID).EachPage(func(page pagination.Page) (bool, error) { + dbList, err := db.ExtractDBs(page) + c.AssertNoErr(err) + + for _, db := range dbList { + c.Logf("DB: %#v", db) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c context) deleteDBs() { + for _, id := range []string{"db1", "db2", "db3"} { + err := db.Delete(c.client, c.instanceID, id).ExtractErr() + c.AssertNoErr(err) + c.Logf("Deleted DB %s", id) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/flavor_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/flavor_test.go new file mode 100644 index 000000000..46f986cc8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/flavor_test.go @@ -0,0 +1,31 @@ +// +build acceptance db + +package v1 + +import ( + "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + "github.com/rackspace/gophercloud/pagination" +) + +func (c context) listFlavors() { + c.Logf("Listing flavors") + + err := flavors.List(c.client).EachPage(func(page pagination.Page) (bool, error) { + flavorList, err := flavors.ExtractFlavors(page) + c.AssertNoErr(err) + + for _, f := range flavorList { + c.Logf("Flavor: ID [%s] Name [%s] RAM [%d]", f.ID, f.Name, f.RAM) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c context) getFlavor() { + flavor, err := flavors.Get(c.client, "1").Extract() + c.Logf("Getting flavor %s", flavor.ID) + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/instance_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/instance_test.go new file mode 100644 index 000000000..dfded2175 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/instance_test.go @@ -0,0 +1,138 @@ +// +build acceptance db + +package v1 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/db/v1/instances" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +const envDSType = "DATASTORE_TYPE_ID" + +func TestRunner(t *testing.T) { + c := newContext(t) + + // FLAVOR tests + c.listFlavors() + c.getFlavor() + + // INSTANCE tests + c.createInstance() + c.listInstances() + c.getInstance() + c.isRootEnabled() + c.enableRootUser() + c.isRootEnabled() + c.restartInstance() + //c.resizeInstance() + //c.resizeVol() + + // DATABASE tests + c.createDBs() + c.listDBs() + + // USER tests + c.createUsers() + c.listUsers() + + // TEARDOWN + c.deleteUsers() + c.deleteDBs() + c.deleteInstance() +} + +func (c context) createInstance() { + if os.Getenv(envDSType) == "" { + c.test.Fatalf("%s must be set as an environment var", envDSType) + } + + opts := instances.CreateOpts{ + FlavorRef: "2", + Size: 5, + Name: tools.RandomString("gopher_db", 5), + Datastore: &instances.DatastoreOpts{Type: os.Getenv(envDSType)}, + } + + instance, err := instances.Create(c.client, opts).Extract() + th.AssertNoErr(c.test, err) + + c.Logf("Restarting %s. Waiting...", instance.ID) + c.WaitUntilActive(instance.ID) + c.Logf("Created Instance %s", instance.ID) + + c.instanceID = instance.ID +} + +func (c context) listInstances() { + c.Logf("Listing instances") + + err := instances.List(c.client).EachPage(func(page pagination.Page) (bool, error) { + instanceList, err := instances.ExtractInstances(page) + c.AssertNoErr(err) + + for _, i := range instanceList { + c.Logf("Instance: ID [%s] Name [%s] Status [%s] VolSize [%d] Datastore Type [%s]", + i.ID, i.Name, i.Status, i.Volume.Size, i.Datastore.Type) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c context) getInstance() { + instance, err := instances.Get(c.client, c.instanceID).Extract() + c.AssertNoErr(err) + c.Logf("Getting instance: %s", instance.ID) +} + +func (c context) deleteInstance() { + err := instances.Delete(c.client, c.instanceID).ExtractErr() + c.AssertNoErr(err) + c.Logf("Deleted instance %s", c.instanceID) +} + +func (c context) enableRootUser() { + _, err := instances.EnableRootUser(c.client, c.instanceID).Extract() + c.AssertNoErr(err) + c.Logf("Enabled root user on %s", c.instanceID) +} + +func (c context) isRootEnabled() { + enabled, err := instances.IsRootEnabled(c.client, c.instanceID) + c.AssertNoErr(err) + c.Logf("Is root enabled? %d", enabled) +} + +func (c context) restartInstance() { + id := c.instanceID + err := instances.Restart(c.client, id).ExtractErr() + c.AssertNoErr(err) + c.Logf("Restarting %s. Waiting...", id) + c.WaitUntilActive(id) + c.Logf("Restarted %s", id) +} + +func (c context) resizeInstance() { + id := c.instanceID + err := instances.Resize(c.client, id, "3").ExtractErr() + c.AssertNoErr(err) + c.Logf("Resizing %s. Waiting...", id) + c.WaitUntilActive(id) + c.Logf("Resized %s with flavorRef %s", id, "2") +} + +func (c context) resizeVol() { + id := c.instanceID + err := instances.ResizeVolume(c.client, id, 4).ExtractErr() + c.AssertNoErr(err) + c.Logf("Resizing volume of %s. Waiting...", id) + c.WaitUntilActive(id) + c.Logf("Resized the volume of %s to %d GB", id, 2) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/pkg.go new file mode 100644 index 000000000..b7b1f993d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/pkg.go @@ -0,0 +1 @@ +package v1 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/user_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/user_test.go new file mode 100644 index 000000000..25a47943c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/db/v1/user_test.go @@ -0,0 +1,70 @@ +// +build acceptance db + +package v1 + +import ( + "github.com/rackspace/gophercloud/acceptance/tools" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + u "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" +) + +func (c context) createUsers() { + users := []string{ + tools.RandomString("user_", 5), + tools.RandomString("user_", 5), + tools.RandomString("user_", 5), + } + + db1 := db.CreateOpts{Name: "db1"} + db2 := db.CreateOpts{Name: "db2"} + db3 := db.CreateOpts{Name: "db3"} + + opts := u.BatchCreateOpts{ + u.CreateOpts{ + Name: users[0], + Password: tools.RandomString("", 5), + Databases: db.BatchCreateOpts{db1, db2, db3}, + }, + u.CreateOpts{ + Name: users[1], + Password: tools.RandomString("", 5), + Databases: db.BatchCreateOpts{db1, db2}, + }, + u.CreateOpts{ + Name: users[2], + Password: tools.RandomString("", 5), + Databases: db.BatchCreateOpts{db3}, + }, + } + + err := u.Create(c.client, c.instanceID, opts).ExtractErr() + c.AssertNoErr(err) + c.Logf("Created three users on instance %s: %s, %s, %s", c.instanceID, users[0], users[1], users[2]) + c.users = users +} + +func (c context) listUsers() { + c.Logf("Listing databases on instance %s", c.instanceID) + + err := db.List(c.client, c.instanceID).EachPage(func(page pagination.Page) (bool, error) { + dbList, err := db.ExtractDBs(page) + c.AssertNoErr(err) + + for _, db := range dbList { + c.Logf("DB: %#v", db) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c context) deleteUsers() { + for _, id := range c.DBIDs { + err := db.Delete(c.client, c.instanceID, id).ExtractErr() + c.AssertNoErr(err) + c.Logf("Deleted DB %s", id) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go new file mode 100644 index 000000000..d1fa1e3dc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go @@ -0,0 +1,46 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + extensions2 "github.com/rackspace/gophercloud/openstack/identity/v2/extensions" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestEnumerateExtensions(t *testing.T) { + service := authenticatedClient(t) + + t.Logf("Extensions available on this identity endpoint:") + count := 0 + err := extensions2.List(service).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + extensions, err := extensions2.ExtractExtensions(page) + th.AssertNoErr(t, err) + + for i, ext := range extensions { + t.Logf("[%02d] name=[%s] namespace=[%s]", i, ext.Name, ext.Namespace) + t.Logf(" alias=[%s] updated=[%s]", ext.Alias, ext.Updated) + t.Logf(" description=[%s]", ext.Description) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) +} + +func TestGetExtension(t *testing.T) { + service := authenticatedClient(t) + + ext, err := extensions2.Get(service, "OS-KSCRUD").Extract() + th.AssertNoErr(t, err) + + th.CheckEquals(t, "OpenStack Keystone User CRUD", ext.Name) + th.CheckEquals(t, "http://docs.openstack.org/identity/api/ext/OS-KSCRUD/v1.0", ext.Namespace) + th.CheckEquals(t, "OS-KSCRUD", ext.Alias) + th.CheckEquals(t, "OpenStack extensions to Keystone v2.0 API enabling User Operations.", ext.Description) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go new file mode 100644 index 000000000..96bf1fdad --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go @@ -0,0 +1,47 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + th "github.com/rackspace/gophercloud/testhelper" +) + +func v2AuthOptions(t *testing.T) gophercloud.AuthOptions { + // Obtain credentials from the environment. + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + // Trim out unused fields. Prefer authentication by API key to password. + ao.UserID, ao.DomainID, ao.DomainName = "", "", "" + if ao.APIKey != "" { + ao.Password = "" + } + + return ao +} + +func createClient(t *testing.T, auth bool) *gophercloud.ServiceClient { + ao := v2AuthOptions(t) + + provider, err := openstack.NewClient(ao.IdentityEndpoint) + th.AssertNoErr(t, err) + + if auth { + err = openstack.AuthenticateV2(provider, ao) + th.AssertNoErr(t, err) + } + + return openstack.NewIdentityV2(provider) +} + +func unauthenticatedClient(t *testing.T) *gophercloud.ServiceClient { + return createClient(t, false) +} + +func authenticatedClient(t *testing.T) *gophercloud.ServiceClient { + return createClient(t, true) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go new file mode 100644 index 000000000..5ec3cc8e8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go @@ -0,0 +1 @@ +package v2 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/role_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/role_test.go new file mode 100644 index 000000000..ba243fe02 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/role_test.go @@ -0,0 +1,58 @@ +// +build acceptance identity roles + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestRoles(t *testing.T) { + client := authenticatedClient(t) + + tenantID := findTenant(t, client) + userID := createUser(t, client, tenantID) + roleID := listRoles(t, client) + + addUserRole(t, client, tenantID, userID, roleID) + + deleteUserRole(t, client, tenantID, userID, roleID) + + deleteUser(t, client, userID) +} + +func listRoles(t *testing.T, client *gophercloud.ServiceClient) string { + var roleID string + + err := roles.List(client).EachPage(func(page pagination.Page) (bool, error) { + roleList, err := roles.ExtractRoles(page) + th.AssertNoErr(t, err) + + for _, role := range roleList { + t.Logf("Listing role: ID [%s] Name [%s]", role.ID, role.Name) + roleID = role.ID + } + + return true, nil + }) + + th.AssertNoErr(t, err) + + return roleID +} + +func addUserRole(t *testing.T, client *gophercloud.ServiceClient, tenantID, userID, roleID string) { + err := roles.AddUserRole(client, tenantID, userID, roleID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Added role %s to user %s", roleID, userID) +} + +func deleteUserRole(t *testing.T, client *gophercloud.ServiceClient, tenantID, userID, roleID string) { + err := roles.DeleteUserRole(client, tenantID, userID, roleID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Removed role %s from user %s", roleID, userID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go new file mode 100644 index 000000000..578fc483b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go @@ -0,0 +1,32 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + tenants2 "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestEnumerateTenants(t *testing.T) { + service := authenticatedClient(t) + + t.Logf("Tenants to which your current token grants access:") + count := 0 + err := tenants2.List(service, nil).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + tenants, err := tenants2.ExtractTenants(page) + th.AssertNoErr(t, err) + for i, tenant := range tenants { + t.Logf("[%02d] name=[%s] id=[%s] description=[%s] enabled=[%v]", + i, tenant.Name, tenant.ID, tenant.Description, tenant.Enabled) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go new file mode 100644 index 000000000..e01b3b3dd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go @@ -0,0 +1,54 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAuthenticateAndValidate(t *testing.T) { + // 1. TestAuthenticate + ao := v2AuthOptions(t) + service := unauthenticatedClient(t) + + // Authenticated! + result := tokens2.Create(service, tokens2.WrapOptions(ao)) + + // Extract and print the token. + token, err := result.ExtractToken() + th.AssertNoErr(t, err) + + t.Logf("Acquired token: [%s]", token.ID) + t.Logf("The token will expire at: [%s]", token.ExpiresAt.String()) + t.Logf("The token is valid for tenant: [%#v]", token.Tenant) + + // Extract and print the service catalog. + catalog, err := result.ExtractServiceCatalog() + th.AssertNoErr(t, err) + + t.Logf("Acquired service catalog listing [%d] services", len(catalog.Entries)) + for i, entry := range catalog.Entries { + t.Logf("[%02d]: name=[%s], type=[%s]", i, entry.Name, entry.Type) + for _, endpoint := range entry.Endpoints { + t.Logf(" - region=[%s] publicURL=[%s]", endpoint.Region, endpoint.PublicURL) + } + } + + // 2. TestValidate + client := authenticatedClient(t) + + // Validate Token! + getResult := tokens2.Get(client, token.ID) + + // Extract and print the user. + user, err := getResult.ExtractUser() + th.AssertNoErr(t, err) + + t.Logf("Acquired User: [%s]", user.Name) + t.Logf("The User id: [%s]", user.ID) + t.Logf("The User username: [%s]", user.UserName) + t.Logf("The User roles: [%#v]", user.Roles) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/user_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/user_test.go new file mode 100644 index 000000000..fe73d1989 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/user_test.go @@ -0,0 +1,127 @@ +// +build acceptance identity + +package v2 + +import ( + "strconv" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" + "github.com/rackspace/gophercloud/openstack/identity/v2/users" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestUsers(t *testing.T) { + client := authenticatedClient(t) + + tenantID := findTenant(t, client) + + userID := createUser(t, client, tenantID) + + listUsers(t, client) + + getUser(t, client, userID) + + updateUser(t, client, userID) + + listUserRoles(t, client, tenantID, userID) + + deleteUser(t, client, userID) +} + +func findTenant(t *testing.T, client *gophercloud.ServiceClient) string { + var tenantID string + err := tenants.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + tenantList, err := tenants.ExtractTenants(page) + th.AssertNoErr(t, err) + + for _, t := range tenantList { + tenantID = t.ID + break + } + + return true, nil + }) + th.AssertNoErr(t, err) + + return tenantID +} + +func createUser(t *testing.T, client *gophercloud.ServiceClient, tenantID string) string { + t.Log("Creating user") + + opts := users.CreateOpts{ + Name: tools.RandomString("user_", 5), + Enabled: users.Disabled, + TenantID: tenantID, + Email: "new_user@foo.com", + } + + user, err := users.Create(client, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created user %s on tenant %s", user.ID, tenantID) + + return user.ID +} + +func listUsers(t *testing.T, client *gophercloud.ServiceClient) { + err := users.List(client).EachPage(func(page pagination.Page) (bool, error) { + userList, err := users.ExtractUsers(page) + th.AssertNoErr(t, err) + + for _, user := range userList { + t.Logf("Listing user: ID [%s] Name [%s] Email [%s] Enabled? [%s]", + user.ID, user.Name, user.Email, strconv.FormatBool(user.Enabled)) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getUser(t *testing.T, client *gophercloud.ServiceClient, userID string) { + _, err := users.Get(client, userID).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting user %s", userID) +} + +func updateUser(t *testing.T, client *gophercloud.ServiceClient, userID string) { + opts := users.UpdateOpts{Name: tools.RandomString("new_name", 5), Email: "new@foo.com"} + user, err := users.Update(client, userID, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Updated user %s: Name [%s] Email [%s]", userID, user.Name, user.Email) +} + +func listUserRoles(t *testing.T, client *gophercloud.ServiceClient, tenantID, userID string) { + count := 0 + err := users.ListRoles(client, tenantID, userID).EachPage(func(page pagination.Page) (bool, error) { + count++ + + roleList, err := users.ExtractRoles(page) + th.AssertNoErr(t, err) + + t.Logf("Listing roles for user %s", userID) + + for _, r := range roleList { + t.Logf("- %s (%s)", r.Name, r.ID) + } + + return true, nil + }) + + if count == 0 { + t.Logf("No roles for user %s", userID) + } + + th.AssertNoErr(t, err) +} + +func deleteUser(t *testing.T, client *gophercloud.ServiceClient, userID string) { + res := users.Delete(client, userID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted user %s", userID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go new file mode 100644 index 000000000..ea893c2de --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go @@ -0,0 +1,111 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + endpoints3 "github.com/rackspace/gophercloud/openstack/identity/v3/endpoints" + services3 "github.com/rackspace/gophercloud/openstack/identity/v3/services" + "github.com/rackspace/gophercloud/pagination" +) + +func TestListEndpoints(t *testing.T) { + // Create a service client. + serviceClient := createAuthenticatedClient(t) + if serviceClient == nil { + return + } + + // Use the service to list all available endpoints. + pager := endpoints3.List(serviceClient, endpoints3.ListOpts{}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + endpoints, err := endpoints3.ExtractEndpoints(page) + if err != nil { + t.Fatalf("Error extracting endpoings: %v", err) + } + + for _, endpoint := range endpoints { + t.Logf("Endpoint: %8s %10s %9s %s", + endpoint.ID, + endpoint.Availability, + endpoint.Name, + endpoint.URL) + } + + return true, nil + }) + if err != nil { + t.Errorf("Unexpected error while iterating endpoint pages: %v", err) + } +} + +func TestNavigateCatalog(t *testing.T) { + // Create a service client. + client := createAuthenticatedClient(t) + if client == nil { + return + } + + var compute *services3.Service + var endpoint *endpoints3.Endpoint + + // Discover the service we're interested in. + servicePager := services3.List(client, services3.ListOpts{ServiceType: "compute"}) + err := servicePager.EachPage(func(page pagination.Page) (bool, error) { + part, err := services3.ExtractServices(page) + if err != nil { + return false, err + } + if compute != nil { + t.Fatalf("Expected one service, got more than one page") + return false, nil + } + if len(part) != 1 { + t.Fatalf("Expected one service, got %d", len(part)) + return false, nil + } + + compute = &part[0] + return true, nil + }) + if err != nil { + t.Fatalf("Unexpected error iterating pages: %v", err) + } + + if compute == nil { + t.Fatalf("No compute service found.") + } + + // Enumerate the endpoints available for this service. + computePager := endpoints3.List(client, endpoints3.ListOpts{ + Availability: gophercloud.AvailabilityPublic, + ServiceID: compute.ID, + }) + err = computePager.EachPage(func(page pagination.Page) (bool, error) { + part, err := endpoints3.ExtractEndpoints(page) + if err != nil { + return false, err + } + if endpoint != nil { + t.Fatalf("Expected one endpoint, got more than one page") + return false, nil + } + if len(part) != 1 { + t.Fatalf("Expected one endpoint, got %d", len(part)) + return false, nil + } + + endpoint = &part[0] + return true, nil + }) + + if endpoint == nil { + t.Fatalf("No endpoint found.") + } + + t.Logf("Success. The compute endpoint is at %s.", endpoint.URL) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go new file mode 100644 index 000000000..ce6434588 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go @@ -0,0 +1,39 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + th "github.com/rackspace/gophercloud/testhelper" +) + +func createAuthenticatedClient(t *testing.T) *gophercloud.ServiceClient { + // Obtain credentials from the environment. + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + // Trim out unused fields. + ao.Username, ao.TenantID, ao.TenantName = "", "", "" + + if ao.UserID == "" { + t.Logf("Skipping identity v3 tests because no OS_USERID is present.") + return nil + } + + // Create a client and manually authenticate against v3. + providerClient, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + t.Fatalf("Unable to instantiate client: %v", err) + } + + err = openstack.AuthenticateV3(providerClient, ao) + if err != nil { + t.Fatalf("Unable to authenticate against identity v3: %v", err) + } + + // Create a service client. + return openstack.NewIdentityV3(providerClient) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go new file mode 100644 index 000000000..eac3ae96a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go @@ -0,0 +1 @@ +package v3 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go new file mode 100644 index 000000000..082bd11e7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go @@ -0,0 +1,36 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + services3 "github.com/rackspace/gophercloud/openstack/identity/v3/services" + "github.com/rackspace/gophercloud/pagination" +) + +func TestListServices(t *testing.T) { + // Create a service client. + serviceClient := createAuthenticatedClient(t) + if serviceClient == nil { + return + } + + // Use the client to list all available services. + pager := services3.List(serviceClient, services3.ListOpts{}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + parts, err := services3.ExtractServices(page) + if err != nil { + return false, err + } + + t.Logf("--- Page ---") + for _, service := range parts { + t.Logf("Service: %32s %15s %10s %s", service.ID, service.Type, service.Name, *service.Description) + } + return true, nil + }) + if err != nil { + t.Errorf("Unexpected error traversing pages: %v", err) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go new file mode 100644 index 000000000..4342ade03 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go @@ -0,0 +1,42 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack" + tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" +) + +func TestGetToken(t *testing.T) { + // Obtain credentials from the environment. + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + t.Fatalf("Unable to acquire credentials: %v", err) + } + + // Trim out unused fields. Skip if we don't have a UserID. + ao.Username, ao.TenantID, ao.TenantName = "", "", "" + if ao.UserID == "" { + t.Logf("Skipping identity v3 tests because no OS_USERID is present.") + return + } + + // Create an unauthenticated client. + provider, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + t.Fatalf("Unable to instantiate client: %v", err) + } + + // Create a service client. + service := openstack.NewIdentityV3(provider) + + // Use the service to create a token. + token, err := tokens3.Create(service, ao, nil).Extract() + if err != nil { + t.Fatalf("Unable to get token: %v", err) + } + + t.Logf("Acquired token: %s", token.ID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go new file mode 100644 index 000000000..99e1d0118 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go @@ -0,0 +1,51 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/networking/v2/apiversions" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestListAPIVersions(t *testing.T) { + Setup(t) + defer Teardown() + + pager := apiversions.ListVersions(Client) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + versions, err := apiversions.ExtractAPIVersions(page) + th.AssertNoErr(t, err) + + for _, v := range versions { + t.Logf("API Version: ID [%s] Status [%s]", v.ID, v.Status) + } + + return true, nil + }) + th.CheckNoErr(t, err) +} + +func TestListAPIResources(t *testing.T) { + Setup(t) + defer Teardown() + + pager := apiversions.ListVersionResources(Client, "v2.0") + err := pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + vrs, err := apiversions.ExtractVersionResources(page) + th.AssertNoErr(t, err) + + for _, vr := range vrs { + t.Logf("Network: Name [%s] Collection [%s]", vr.Name, vr.Collection) + } + + return true, nil + }) + th.CheckNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go new file mode 100644 index 000000000..1efac2c08 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go @@ -0,0 +1,39 @@ +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + th "github.com/rackspace/gophercloud/testhelper" +) + +var Client *gophercloud.ServiceClient + +func NewClient() (*gophercloud.ServiceClient, error) { + opts, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + provider, err := openstack.AuthenticatedClient(opts) + if err != nil { + return nil, err + } + + return openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ + Name: "neutron", + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +func Setup(t *testing.T) { + client, err := NewClient() + th.AssertNoErr(t, err) + Client = client +} + +func Teardown() { + Client = nil +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go new file mode 100644 index 000000000..edcbba4fd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go @@ -0,0 +1,45 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestListExts(t *testing.T) { + Setup(t) + defer Teardown() + + pager := extensions.List(Client) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + exts, err := extensions.ExtractExtensions(page) + th.AssertNoErr(t, err) + + for _, ext := range exts { + t.Logf("Extension: Name [%s] Description [%s]", ext.Name, ext.Description) + } + + return true, nil + }) + th.CheckNoErr(t, err) +} + +func TestGetExt(t *testing.T) { + Setup(t) + defer Teardown() + + ext, err := extensions.Get(Client, "service-type").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, ext.Updated, "2013-01-20T00:00:00-00:00") + th.AssertEquals(t, ext.Name, "Neutron Service Type Management") + th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/neutron/service-type/api/v1.0") + th.AssertEquals(t, ext.Alias, "service-type") + th.AssertEquals(t, ext.Description, "API for retrieving service providers for Neutron advanced services") +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/firewall_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/firewall_test.go new file mode 100644 index 000000000..80246b648 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/firewall_test.go @@ -0,0 +1,116 @@ +// +build acceptance networking fwaas + +package fwaas + +import ( + "testing" + "time" + + "github.com/rackspace/gophercloud" + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func firewallSetup(t *testing.T) string { + base.Setup(t) + return createPolicy(t, &policies.CreateOpts{}) +} + +func firewallTeardown(t *testing.T, policyID string) { + defer base.Teardown() + deletePolicy(t, policyID) +} + +func TestFirewall(t *testing.T) { + policyID := firewallSetup(t) + defer firewallTeardown(t, policyID) + + firewallID := createFirewall(t, &firewalls.CreateOpts{ + Name: "gophercloud test", + Description: "acceptance test", + PolicyID: policyID, + }) + + waitForFirewallToBeActive(t, firewallID) + + listFirewalls(t) + + updateFirewall(t, firewallID, &firewalls.UpdateOpts{ + Description: "acceptance test updated", + }) + + waitForFirewallToBeActive(t, firewallID) + + deleteFirewall(t, firewallID) + + waitForFirewallToBeDeleted(t, firewallID) +} + +func createFirewall(t *testing.T, opts *firewalls.CreateOpts) string { + f, err := firewalls.Create(base.Client, *opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created firewall: %#v", opts) + return f.ID +} + +func listFirewalls(t *testing.T) { + err := firewalls.List(base.Client, firewalls.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + firewallList, err := firewalls.ExtractFirewalls(page) + if err != nil { + t.Errorf("Failed to extract firewalls: %v", err) + return false, err + } + + for _, r := range firewallList { + t.Logf("Listing firewalls: ID [%s]", r.ID) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func updateFirewall(t *testing.T, firewallID string, opts *firewalls.UpdateOpts) { + f, err := firewalls.Update(base.Client, firewallID, *opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Updated firewall ID [%s]", f.ID) +} + +func getFirewall(t *testing.T, firewallID string) *firewalls.Firewall { + f, err := firewalls.Get(base.Client, firewallID).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting firewall ID [%s]", f.ID) + return f +} + +func deleteFirewall(t *testing.T, firewallID string) { + res := firewalls.Delete(base.Client, firewallID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted firewall %s", firewallID) +} + +func waitForFirewallToBeActive(t *testing.T, firewallID string) { + for i := 0; i < 10; i++ { + fw := getFirewall(t, firewallID) + if fw.Status == "ACTIVE" { + break + } + time.Sleep(time.Second) + } +} + +func waitForFirewallToBeDeleted(t *testing.T, firewallID string) { + for i := 0; i < 10; i++ { + err := firewalls.Get(base.Client, firewallID).Err + if err != nil { + httpStatus := err.(*gophercloud.UnexpectedResponseCodeError) + if httpStatus.Actual == 404 { + return + } + } + time.Sleep(time.Second) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/pkg.go new file mode 100644 index 000000000..206bf3313 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/pkg.go @@ -0,0 +1 @@ +package fwaas diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/policy_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/policy_test.go new file mode 100644 index 000000000..fdca22e3f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/policy_test.go @@ -0,0 +1,107 @@ +// +build acceptance networking fwaas + +package fwaas + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func firewallPolicySetup(t *testing.T) string { + base.Setup(t) + return createRule(t, &rules.CreateOpts{ + Protocol: "tcp", + Action: "allow", + }) +} + +func firewallPolicyTeardown(t *testing.T, ruleID string) { + defer base.Teardown() + deleteRule(t, ruleID) +} + +func TestFirewallPolicy(t *testing.T) { + ruleID := firewallPolicySetup(t) + defer firewallPolicyTeardown(t, ruleID) + + policyID := createPolicy(t, &policies.CreateOpts{ + Name: "gophercloud test", + Description: "acceptance test", + Rules: []string{ + ruleID, + }, + }) + + listPolicies(t) + + updatePolicy(t, policyID, &policies.UpdateOpts{ + Description: "acceptance test updated", + }) + + getPolicy(t, policyID) + + removeRuleFromPolicy(t, policyID, ruleID) + + addRuleToPolicy(t, policyID, ruleID) + + deletePolicy(t, policyID) +} + +func createPolicy(t *testing.T, opts *policies.CreateOpts) string { + p, err := policies.Create(base.Client, *opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created policy: %#v", opts) + return p.ID +} + +func listPolicies(t *testing.T) { + err := policies.List(base.Client, policies.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + policyList, err := policies.ExtractPolicies(page) + if err != nil { + t.Errorf("Failed to extract policies: %v", err) + return false, err + } + + for _, p := range policyList { + t.Logf("Listing policies: ID [%s]", p.ID) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func updatePolicy(t *testing.T, policyID string, opts *policies.UpdateOpts) { + p, err := policies.Update(base.Client, policyID, *opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Updated policy ID [%s]", p.ID) +} + +func removeRuleFromPolicy(t *testing.T, policyID string, ruleID string) { + err := policies.RemoveRule(base.Client, policyID, ruleID) + th.AssertNoErr(t, err) + t.Logf("Removed rule [%s] from policy ID [%s]", ruleID, policyID) +} + +func addRuleToPolicy(t *testing.T, policyID string, ruleID string) { + err := policies.InsertRule(base.Client, policyID, ruleID, "", "") + th.AssertNoErr(t, err) + t.Logf("Inserted rule [%s] into policy ID [%s]", ruleID, policyID) +} + +func getPolicy(t *testing.T, policyID string) { + p, err := policies.Get(base.Client, policyID).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting policy ID [%s]", p.ID) +} + +func deletePolicy(t *testing.T, policyID string) { + res := policies.Delete(base.Client, policyID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted policy %s", policyID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/rule_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/rule_test.go new file mode 100644 index 000000000..144aa0998 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/rule_test.go @@ -0,0 +1,84 @@ +// +build acceptance networking fwaas + +package fwaas + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestFirewallRules(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + ruleID := createRule(t, &rules.CreateOpts{ + Name: "gophercloud_test", + Description: "acceptance test", + Protocol: "tcp", + Action: "allow", + DestinationIPAddress: "192.168.0.0/24", + DestinationPort: "22", + }) + + listRules(t) + + destinationIPAddress := "192.168.1.0/24" + destinationPort := "" + sourcePort := "1234" + + updateRule(t, ruleID, &rules.UpdateOpts{ + DestinationIPAddress: &destinationIPAddress, + DestinationPort: &destinationPort, + SourcePort: &sourcePort, + }) + + getRule(t, ruleID) + + deleteRule(t, ruleID) +} + +func createRule(t *testing.T, opts *rules.CreateOpts) string { + r, err := rules.Create(base.Client, *opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created rule: %#v", opts) + return r.ID +} + +func listRules(t *testing.T) { + err := rules.List(base.Client, rules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + ruleList, err := rules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract rules: %v", err) + return false, err + } + + for _, r := range ruleList { + t.Logf("Listing rules: ID [%s]", r.ID) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func updateRule(t *testing.T, ruleID string, opts *rules.UpdateOpts) { + r, err := rules.Update(base.Client, ruleID, *opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Updated rule ID [%s]", r.ID) +} + +func getRule(t *testing.T, ruleID string) { + r, err := rules.Get(base.Client, ruleID).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting rule ID [%s]", r.ID) +} + +func deleteRule(t *testing.T, ruleID string) { + res := rules.Delete(base.Client, ruleID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted rule %s", ruleID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go new file mode 100644 index 000000000..63e0be39d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go @@ -0,0 +1,300 @@ +// +build acceptance networking layer3ext + +package extensions + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +const ( + cidr1 = "10.0.0.1/24" + cidr2 = "20.0.0.1/24" +) + +func TestAll(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + testRouter(t) + testFloatingIP(t) +} + +func testRouter(t *testing.T) { + // Setup: Create network + networkID := createNetwork(t) + + // Create router + routerID := createRouter(t, networkID) + + // Lists routers + listRouters(t) + + // Update router + updateRouter(t, routerID) + + // Get router + getRouter(t, routerID) + + // Create new subnet. Note: this subnet will be deleted when networkID is deleted + subnetID := createSubnet(t, networkID, cidr2) + + // Add interface + addInterface(t, routerID, subnetID) + + // Remove interface + removeInterface(t, routerID, subnetID) + + // Delete router + deleteRouter(t, routerID) + + // Cleanup + deleteNetwork(t, networkID) +} + +func testFloatingIP(t *testing.T) { + // Setup external network + extNetworkID := createNetwork(t) + + // Setup internal network, subnet and port + intNetworkID, subnetID, portID := createInternalTopology(t) + + // Now the important part: we need to allow the external network to talk to + // the internal subnet. For this we need a router that has an interface to + // the internal subnet. + routerID := bridgeIntSubnetWithExtNetwork(t, extNetworkID, subnetID) + + // Create floating IP + ipID := createFloatingIP(t, extNetworkID, portID) + + // Get floating IP + getFloatingIP(t, ipID) + + // Update floating IP + updateFloatingIP(t, ipID, portID) + + // Delete floating IP + deleteFloatingIP(t, ipID) + + // Remove the internal subnet interface + removeInterface(t, routerID, subnetID) + + // Delete router and external network + deleteRouter(t, routerID) + deleteNetwork(t, extNetworkID) + + // Delete internal port and network + deletePort(t, portID) + deleteNetwork(t, intNetworkID) +} + +func createNetwork(t *testing.T) string { + t.Logf("Creating a network") + + asu := true + opts := external.CreateOpts{ + Parent: networks.CreateOpts{Name: "sample_network", AdminStateUp: &asu}, + External: true, + } + n, err := networks.Create(base.Client, opts).Extract() + + th.AssertNoErr(t, err) + + if n.ID == "" { + t.Fatalf("No ID returned when creating a network") + } + + createSubnet(t, n.ID, cidr1) + + t.Logf("Network created: ID [%s]", n.ID) + + return n.ID +} + +func deleteNetwork(t *testing.T, networkID string) { + t.Logf("Deleting network %s", networkID) + networks.Delete(base.Client, networkID) +} + +func deletePort(t *testing.T, portID string) { + t.Logf("Deleting port %s", portID) + ports.Delete(base.Client, portID) +} + +func createInternalTopology(t *testing.T) (string, string, string) { + t.Logf("Creating an internal network (for port)") + opts := networks.CreateOpts{Name: "internal_network"} + n, err := networks.Create(base.Client, opts).Extract() + th.AssertNoErr(t, err) + + // A subnet is also needed + subnetID := createSubnet(t, n.ID, cidr2) + + t.Logf("Creating an internal port on network %s", n.ID) + p, err := ports.Create(base.Client, ports.CreateOpts{ + NetworkID: n.ID, + Name: "fixed_internal_port", + }).Extract() + th.AssertNoErr(t, err) + + return n.ID, subnetID, p.ID +} + +func bridgeIntSubnetWithExtNetwork(t *testing.T, networkID, subnetID string) string { + // Create router with external gateway info + routerID := createRouter(t, networkID) + + // Add interface for internal subnet + addInterface(t, routerID, subnetID) + + return routerID +} + +func createSubnet(t *testing.T, networkID, cidr string) string { + t.Logf("Creating a subnet for network %s", networkID) + + iFalse := false + s, err := subnets.Create(base.Client, subnets.CreateOpts{ + NetworkID: networkID, + CIDR: cidr, + IPVersion: subnets.IPv4, + Name: "my_subnet", + EnableDHCP: &iFalse, + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Subnet created: ID [%s]", s.ID) + + return s.ID +} + +func createRouter(t *testing.T, networkID string) string { + t.Logf("Creating a router for network %s", networkID) + + asu := false + gwi := routers.GatewayInfo{NetworkID: networkID} + r, err := routers.Create(base.Client, routers.CreateOpts{ + Name: "foo_router", + AdminStateUp: &asu, + GatewayInfo: &gwi, + }).Extract() + + th.AssertNoErr(t, err) + + if r.ID == "" { + t.Fatalf("No ID returned when creating a router") + } + + t.Logf("Router created: ID [%s]", r.ID) + + return r.ID +} + +func listRouters(t *testing.T) { + pager := routers.List(base.Client, routers.ListOpts{}) + + err := pager.EachPage(func(page pagination.Page) (bool, error) { + routerList, err := routers.ExtractRouters(page) + th.AssertNoErr(t, err) + + for _, r := range routerList { + t.Logf("Listing router: ID [%s] Name [%s] Status [%s] GatewayInfo [%#v]", + r.ID, r.Name, r.Status, r.GatewayInfo) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func updateRouter(t *testing.T, routerID string) { + _, err := routers.Update(base.Client, routerID, routers.UpdateOpts{ + Name: "another_name", + }).Extract() + + th.AssertNoErr(t, err) +} + +func getRouter(t *testing.T, routerID string) { + r, err := routers.Get(base.Client, routerID).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Getting router: ID [%s] Name [%s] Status [%s]", r.ID, r.Name, r.Status) +} + +func addInterface(t *testing.T, routerID, subnetID string) { + ir, err := routers.AddInterface(base.Client, routerID, routers.InterfaceOpts{SubnetID: subnetID}).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Interface added to router %s: SubnetID [%s] PortID [%s]", routerID, ir.SubnetID, ir.PortID) +} + +func removeInterface(t *testing.T, routerID, subnetID string) { + ir, err := routers.RemoveInterface(base.Client, routerID, routers.InterfaceOpts{SubnetID: subnetID}).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Interface %s removed from %s", ir.ID, routerID) +} + +func deleteRouter(t *testing.T, routerID string) { + t.Logf("Deleting router %s", routerID) + + res := routers.Delete(base.Client, routerID) + + th.AssertNoErr(t, res.Err) +} + +func createFloatingIP(t *testing.T, networkID, portID string) string { + t.Logf("Creating floating IP on network [%s] with port [%s]", networkID, portID) + + opts := floatingips.CreateOpts{ + FloatingNetworkID: networkID, + PortID: portID, + } + + ip, err := floatingips.Create(base.Client, opts).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Floating IP created: ID [%s] Status [%s] Fixed (internal) IP: [%s] Floating (external) IP: [%s]", + ip.ID, ip.Status, ip.FixedIP, ip.FloatingIP) + + return ip.ID +} + +func getFloatingIP(t *testing.T, ipID string) { + ip, err := floatingips.Get(base.Client, ipID).Extract() + th.AssertNoErr(t, err) + + t.Logf("Getting floating IP: ID [%s] Status [%s]", ip.ID, ip.Status) +} + +func updateFloatingIP(t *testing.T, ipID, portID string) { + t.Logf("Disassociate all ports from IP %s", ipID) + _, err := floatingips.Update(base.Client, ipID, floatingips.UpdateOpts{PortID: ""}).Extract() + th.AssertNoErr(t, err) + + t.Logf("Re-associate the port %s", portID) + _, err = floatingips.Update(base.Client, ipID, floatingips.UpdateOpts{PortID: portID}).Extract() + th.AssertNoErr(t, err) +} + +func deleteFloatingIP(t *testing.T, ipID string) { + t.Logf("Deleting IP %s", ipID) + res := floatingips.Delete(base.Client, ipID) + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go new file mode 100644 index 000000000..27dfe5f8b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go @@ -0,0 +1,78 @@ +package lbaas + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + th "github.com/rackspace/gophercloud/testhelper" +) + +func SetupTopology(t *testing.T) (string, string) { + // create network + n, err := networks.Create(base.Client, networks.CreateOpts{Name: "tmp_network"}).Extract() + th.AssertNoErr(t, err) + + t.Logf("Created network %s", n.ID) + + // create subnet + s, err := subnets.Create(base.Client, subnets.CreateOpts{ + NetworkID: n.ID, + CIDR: "192.168.199.0/24", + IPVersion: subnets.IPv4, + Name: "tmp_subnet", + }).Extract() + th.AssertNoErr(t, err) + + t.Logf("Created subnet %s", s.ID) + + return n.ID, s.ID +} + +func DeleteTopology(t *testing.T, networkID string) { + res := networks.Delete(base.Client, networkID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted network %s", networkID) +} + +func CreatePool(t *testing.T, subnetID string) string { + p, err := pools.Create(base.Client, pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "tmp_pool", + SubnetID: subnetID, + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created pool %s", p.ID) + + return p.ID +} + +func DeletePool(t *testing.T, poolID string) { + res := pools.Delete(base.Client, poolID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted pool %s", poolID) +} + +func CreateMonitor(t *testing.T) string { + m, err := monitors.Create(base.Client, monitors.CreateOpts{ + Delay: 10, + Timeout: 10, + MaxRetries: 3, + Type: monitors.TypeHTTP, + ExpectedCodes: "200", + URLPath: "/login", + HTTPMethod: "GET", + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created monitor ID [%s]", m.ID) + + return m.ID +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go new file mode 100644 index 000000000..9b60582d1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go @@ -0,0 +1,95 @@ +// +build acceptance networking lbaas lbaasmember + +package lbaas + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestMembers(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + // setup + networkID, subnetID := SetupTopology(t) + poolID := CreatePool(t, subnetID) + + // create member + memberID := createMember(t, poolID) + + // list members + listMembers(t) + + // update member + updateMember(t, memberID) + + // get member + getMember(t, memberID) + + // delete member + deleteMember(t, memberID) + + // teardown + DeletePool(t, poolID) + DeleteTopology(t, networkID) +} + +func createMember(t *testing.T, poolID string) string { + m, err := members.Create(base.Client, members.CreateOpts{ + Address: "192.168.199.1", + ProtocolPort: 8080, + PoolID: poolID, + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created member: ID [%s] Status [%s] Weight [%d] Address [%s] Port [%d]", + m.ID, m.Status, m.Weight, m.Address, m.ProtocolPort) + + return m.ID +} + +func listMembers(t *testing.T) { + err := members.List(base.Client, members.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + memberList, err := members.ExtractMembers(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + for _, m := range memberList { + t.Logf("Listing member: ID [%s] Status [%s]", m.ID, m.Status) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func updateMember(t *testing.T, memberID string) { + m, err := members.Update(base.Client, memberID, members.UpdateOpts{AdminStateUp: true}).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Updated member ID [%s]", m.ID) +} + +func getMember(t *testing.T, memberID string) { + m, err := members.Get(base.Client, memberID).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Getting member ID [%s]", m.ID) +} + +func deleteMember(t *testing.T, memberID string) { + res := members.Delete(base.Client, memberID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted member %s", memberID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go new file mode 100644 index 000000000..9056fff67 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go @@ -0,0 +1,77 @@ +// +build acceptance networking lbaas lbaasmonitor + +package lbaas + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestMonitors(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + // create monitor + monitorID := CreateMonitor(t) + + // list monitors + listMonitors(t) + + // update monitor + updateMonitor(t, monitorID) + + // get monitor + getMonitor(t, monitorID) + + // delete monitor + deleteMonitor(t, monitorID) +} + +func listMonitors(t *testing.T) { + err := monitors.List(base.Client, monitors.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + monitorList, err := monitors.ExtractMonitors(page) + if err != nil { + t.Errorf("Failed to extract monitors: %v", err) + return false, err + } + + for _, m := range monitorList { + t.Logf("Listing monitor: ID [%s] Type [%s] Delay [%ds] Timeout [%d] Retries [%d] Status [%s]", + m.ID, m.Type, m.Delay, m.Timeout, m.MaxRetries, m.Status) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func updateMonitor(t *testing.T, monitorID string) { + opts := monitors.UpdateOpts{Delay: 10, Timeout: 10, MaxRetries: 3} + m, err := monitors.Update(base.Client, monitorID, opts).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Updated monitor ID [%s]", m.ID) +} + +func getMonitor(t *testing.T, monitorID string) { + m, err := monitors.Get(base.Client, monitorID).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Getting monitor ID [%s]: URL path [%s] HTTP Method [%s] Accepted codes [%s]", + m.ID, m.URLPath, m.HTTPMethod, m.ExpectedCodes) +} + +func deleteMonitor(t *testing.T, monitorID string) { + res := monitors.Delete(base.Client, monitorID) + + th.AssertNoErr(t, res.Err) + + t.Logf("Deleted monitor %s", monitorID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go new file mode 100644 index 000000000..f5a7df7b7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go @@ -0,0 +1 @@ +package lbaas diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go new file mode 100644 index 000000000..81940649c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go @@ -0,0 +1,98 @@ +// +build acceptance networking lbaas lbaaspool + +package lbaas + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestPools(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + // setup + networkID, subnetID := SetupTopology(t) + + // create pool + poolID := CreatePool(t, subnetID) + + // list pools + listPools(t) + + // update pool + updatePool(t, poolID) + + // get pool + getPool(t, poolID) + + // create monitor + monitorID := CreateMonitor(t) + + // associate health monitor + associateMonitor(t, poolID, monitorID) + + // disassociate health monitor + disassociateMonitor(t, poolID, monitorID) + + // delete pool + DeletePool(t, poolID) + + // teardown + DeleteTopology(t, networkID) +} + +func listPools(t *testing.T) { + err := pools.List(base.Client, pools.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + poolList, err := pools.ExtractPools(page) + if err != nil { + t.Errorf("Failed to extract pools: %v", err) + return false, err + } + + for _, p := range poolList { + t.Logf("Listing pool: ID [%s] Name [%s] Status [%s] LB algorithm [%s]", p.ID, p.Name, p.Status, p.LBMethod) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func updatePool(t *testing.T, poolID string) { + opts := pools.UpdateOpts{Name: "SuperPool", LBMethod: pools.LBMethodLeastConnections} + p, err := pools.Update(base.Client, poolID, opts).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Updated pool ID [%s]", p.ID) +} + +func getPool(t *testing.T, poolID string) { + p, err := pools.Get(base.Client, poolID).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Getting pool ID [%s]", p.ID) +} + +func associateMonitor(t *testing.T, poolID, monitorID string) { + res := pools.AssociateMonitor(base.Client, poolID, monitorID) + + th.AssertNoErr(t, res.Err) + + t.Logf("Associated pool %s with monitor %s", poolID, monitorID) +} + +func disassociateMonitor(t *testing.T, poolID, monitorID string) { + res := pools.DisassociateMonitor(base.Client, poolID, monitorID) + + th.AssertNoErr(t, res.Err) + + t.Logf("Disassociated pool %s with monitor %s", poolID, monitorID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go new file mode 100644 index 000000000..c8dff2d93 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go @@ -0,0 +1,101 @@ +// +build acceptance networking lbaas lbaasvip + +package lbaas + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestVIPs(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + // setup + networkID, subnetID := SetupTopology(t) + poolID := CreatePool(t, subnetID) + + // create VIP + VIPID := createVIP(t, subnetID, poolID) + + // list VIPs + listVIPs(t) + + // update VIP + updateVIP(t, VIPID) + + // get VIP + getVIP(t, VIPID) + + // delete VIP + deleteVIP(t, VIPID) + + // teardown + DeletePool(t, poolID) + DeleteTopology(t, networkID) +} + +func createVIP(t *testing.T, subnetID, poolID string) string { + p, err := vips.Create(base.Client, vips.CreateOpts{ + Protocol: "HTTP", + Name: "New_VIP", + AdminStateUp: vips.Up, + SubnetID: subnetID, + PoolID: poolID, + ProtocolPort: 80, + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created pool %s", p.ID) + + return p.ID +} + +func listVIPs(t *testing.T) { + err := vips.List(base.Client, vips.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + vipList, err := vips.ExtractVIPs(page) + if err != nil { + t.Errorf("Failed to extract VIPs: %v", err) + return false, err + } + + for _, vip := range vipList { + t.Logf("Listing VIP: ID [%s] Name [%s] Address [%s] Port [%s] Connection Limit [%d]", + vip.ID, vip.Name, vip.Address, vip.ProtocolPort, vip.ConnLimit) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func updateVIP(t *testing.T, VIPID string) { + i1000 := 1000 + _, err := vips.Update(base.Client, VIPID, vips.UpdateOpts{ConnLimit: &i1000}).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Updated VIP ID [%s]", VIPID) +} + +func getVIP(t *testing.T, VIPID string) { + vip, err := vips.Get(base.Client, VIPID).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Getting VIP ID [%s]: Status [%s]", vip.ID, vip.Status) +} + +func deleteVIP(t *testing.T, VIPID string) { + res := vips.Delete(base.Client, VIPID) + + th.AssertNoErr(t, res.Err) + + t.Logf("Deleted VIP %s", VIPID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go new file mode 100644 index 000000000..aeec0fa75 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go @@ -0,0 +1 @@ +package extensions diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go new file mode 100644 index 000000000..f10c9d9bd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go @@ -0,0 +1,68 @@ +// +build acceptance networking + +package extensions + +import ( + "strconv" + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestNetworkCRUDOperations(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + // Create a network + n, err := networks.Create(base.Client, networks.CreateOpts{Name: "sample_network", AdminStateUp: networks.Up}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Name, "sample_network") + th.AssertEquals(t, n.AdminStateUp, true) + networkID := n.ID + + // List networks + pager := networks.List(base.Client, networks.ListOpts{Limit: 2}) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + networkList, err := networks.ExtractNetworks(page) + th.AssertNoErr(t, err) + + for _, n := range networkList { + t.Logf("Network: ID [%s] Name [%s] Status [%s] Is shared? [%s]", + n.ID, n.Name, n.Status, strconv.FormatBool(n.Shared)) + } + + return true, nil + }) + th.CheckNoErr(t, err) + + // Get a network + if networkID == "" { + t.Fatalf("In order to retrieve a network, the NetworkID must be set") + } + n, err = networks.Get(base.Client, networkID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.Subnets, []string{}) + th.AssertEquals(t, n.Name, "sample_network") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.Shared, false) + th.AssertEquals(t, n.ID, networkID) + + // Update network + n, err = networks.Update(base.Client, networkID, networks.UpdateOpts{Name: "new_network_name"}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Name, "new_network_name") + + // Delete network + res := networks.Delete(base.Client, networkID) + th.AssertNoErr(t, res.Err) +} + +func TestCreateMultipleNetworks(t *testing.T) { + //networks.CreateMany() +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go new file mode 100644 index 000000000..7d75292f0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go @@ -0,0 +1,171 @@ +// +build acceptance networking security + +package extensions + +import ( + "testing" + + base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSecurityGroups(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + // create security group + groupID := createSecGroup(t) + + // delete security group + defer deleteSecGroup(t, groupID) + + // list security group + listSecGroups(t) + + // get security group + getSecGroup(t, groupID) + + // create port with security group + networkID, portID := createPort(t, groupID) + + // teardown + defer deleteNetwork(t, networkID) + + // delete port + defer deletePort(t, portID) +} + +func TestSecurityGroupRules(t *testing.T) { + base.Setup(t) + defer base.Teardown() + + // create security group + groupID := createSecGroup(t) + + defer deleteSecGroup(t, groupID) + + // create security group rule + ruleID := createSecRule(t, groupID) + + // delete security group rule + defer deleteSecRule(t, ruleID) + + // list security group rule + listSecRules(t) + + // get security group rule + getSecRule(t, ruleID) +} + +func createSecGroup(t *testing.T) string { + sg, err := groups.Create(base.Client, groups.CreateOpts{ + Name: "new-webservers", + Description: "security group for webservers", + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created security group %s", sg.ID) + + return sg.ID +} + +func listSecGroups(t *testing.T) { + err := groups.List(base.Client, groups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + list, err := groups.ExtractGroups(page) + if err != nil { + t.Errorf("Failed to extract secgroups: %v", err) + return false, err + } + + for _, sg := range list { + t.Logf("Listing security group: ID [%s] Name [%s]", sg.ID, sg.Name) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getSecGroup(t *testing.T, id string) { + sg, err := groups.Get(base.Client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting security group: ID [%s] Name [%s] Description [%s]", sg.ID, sg.Name, sg.Description) +} + +func createPort(t *testing.T, groupID string) (string, string) { + n, err := networks.Create(base.Client, networks.CreateOpts{Name: "tmp_network"}).Extract() + th.AssertNoErr(t, err) + t.Logf("Created network %s", n.ID) + + opts := ports.CreateOpts{ + NetworkID: n.ID, + Name: "my_port", + SecurityGroups: []string{groupID}, + } + p, err := ports.Create(base.Client, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created port %s with security group %s", p.ID, groupID) + + return n.ID, p.ID +} + +func deleteSecGroup(t *testing.T, groupID string) { + res := groups.Delete(base.Client, groupID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted security group %s", groupID) +} + +func createSecRule(t *testing.T, groupID string) string { + r, err := rules.Create(base.Client, rules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: "IPv4", + PortRangeMax: 80, + Protocol: "tcp", + SecGroupID: groupID, + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created security group rule %s", r.ID) + + return r.ID +} + +func listSecRules(t *testing.T) { + err := rules.List(base.Client, rules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + list, err := rules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract sec rules: %v", err) + return false, err + } + + for _, r := range list { + t.Logf("Listing security rule: ID [%s]", r.ID) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getSecRule(t *testing.T, id string) { + r, err := rules.Get(base.Client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting security rule: ID [%s] Direction [%s] EtherType [%s] Protocol [%s]", + r.ID, r.Direction, r.EtherType, r.Protocol) +} + +func deleteSecRule(t *testing.T, id string) { + res := rules.Delete(base.Client, id) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted security rule %s", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go new file mode 100644 index 000000000..be8a3a195 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go @@ -0,0 +1,68 @@ +// +build acceptance networking + +package v2 + +import ( + "strconv" + "testing" + + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestNetworkCRUDOperations(t *testing.T) { + Setup(t) + defer Teardown() + + // Create a network + n, err := networks.Create(Client, networks.CreateOpts{Name: "sample_network", AdminStateUp: networks.Up}).Extract() + th.AssertNoErr(t, err) + defer networks.Delete(Client, n.ID) + th.AssertEquals(t, n.Name, "sample_network") + th.AssertEquals(t, n.AdminStateUp, true) + networkID := n.ID + + // List networks + pager := networks.List(Client, networks.ListOpts{Limit: 2}) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + networkList, err := networks.ExtractNetworks(page) + th.AssertNoErr(t, err) + + for _, n := range networkList { + t.Logf("Network: ID [%s] Name [%s] Status [%s] Is shared? [%s]", + n.ID, n.Name, n.Status, strconv.FormatBool(n.Shared)) + } + + return true, nil + }) + th.CheckNoErr(t, err) + + // Get a network + if networkID == "" { + t.Fatalf("In order to retrieve a network, the NetworkID must be set") + } + n, err = networks.Get(Client, networkID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.Subnets, []string{}) + th.AssertEquals(t, n.Name, "sample_network") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.Shared, false) + th.AssertEquals(t, n.ID, networkID) + + // Update network + n, err = networks.Update(Client, networkID, networks.UpdateOpts{Name: "new_network_name"}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Name, "new_network_name") + + // Delete network + res := networks.Delete(Client, networkID) + th.AssertNoErr(t, res.Err) +} + +func TestCreateMultipleNetworks(t *testing.T) { + //networks.CreateMany() +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go new file mode 100644 index 000000000..5ec3cc8e8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go @@ -0,0 +1 @@ +package v2 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go new file mode 100644 index 000000000..03e8e2784 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go @@ -0,0 +1,117 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestPortCRUD(t *testing.T) { + Setup(t) + defer Teardown() + + // Setup network + t.Log("Setting up network") + networkID, err := createNetwork() + th.AssertNoErr(t, err) + defer networks.Delete(Client, networkID) + + // Setup subnet + t.Logf("Setting up subnet on network %s", networkID) + subnetID, err := createSubnet(networkID) + th.AssertNoErr(t, err) + defer subnets.Delete(Client, subnetID) + + // Create port + t.Logf("Create port based on subnet %s", subnetID) + portID := createPort(t, networkID, subnetID) + + // List ports + t.Logf("Listing all ports") + listPorts(t) + + // Get port + if portID == "" { + t.Fatalf("In order to retrieve a port, the portID must be set") + } + p, err := ports.Get(Client, portID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, p.ID, portID) + + // Update port + p, err = ports.Update(Client, portID, ports.UpdateOpts{Name: "new_port_name"}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, p.Name, "new_port_name") + + // Delete port + res := ports.Delete(Client, portID) + th.AssertNoErr(t, res.Err) +} + +func createPort(t *testing.T, networkID, subnetID string) string { + enable := false + opts := ports.CreateOpts{ + NetworkID: networkID, + Name: "my_port", + AdminStateUp: &enable, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + } + p, err := ports.Create(Client, opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, p.NetworkID, networkID) + th.AssertEquals(t, p.Name, "my_port") + th.AssertEquals(t, p.AdminStateUp, false) + + return p.ID +} + +func listPorts(t *testing.T) { + count := 0 + pager := ports.List(Client, ports.ListOpts{}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + count++ + t.Logf("--- Page ---") + + portList, err := ports.ExtractPorts(page) + th.AssertNoErr(t, err) + + for _, p := range portList { + t.Logf("Port: ID [%s] Name [%s] Status [%s] MAC addr [%s] Fixed IPs [%#v] Security groups [%#v]", + p.ID, p.Name, p.Status, p.MACAddress, p.FixedIPs, p.SecurityGroups) + } + + return true, nil + }) + + th.CheckNoErr(t, err) + + if count == 0 { + t.Logf("No pages were iterated over when listing ports") + } +} + +func createNetwork() (string, error) { + res, err := networks.Create(Client, networks.CreateOpts{Name: "tmp_network", AdminStateUp: networks.Up}).Extract() + return res.ID, err +} + +func createSubnet(networkID string) (string, error) { + s, err := subnets.Create(Client, subnets.CreateOpts{ + NetworkID: networkID, + CIDR: "192.168.199.0/24", + IPVersion: subnets.IPv4, + Name: "my_subnet", + EnableDHCP: subnets.Down, + }).Extract() + return s.ID, err +} + +func TestPortBatchCreate(t *testing.T) { + // todo +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go new file mode 100644 index 000000000..097a303ed --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go @@ -0,0 +1,86 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + Setup(t) + defer Teardown() + + pager := subnets.List(Client, subnets.ListOpts{Limit: 2}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + subnetList, err := subnets.ExtractSubnets(page) + th.AssertNoErr(t, err) + + for _, s := range subnetList { + t.Logf("Subnet: ID [%s] Name [%s] IP Version [%d] CIDR [%s] GatewayIP [%s]", + s.ID, s.Name, s.IPVersion, s.CIDR, s.GatewayIP) + } + + return true, nil + }) + th.CheckNoErr(t, err) +} + +func TestCRUD(t *testing.T) { + Setup(t) + defer Teardown() + + // Setup network + t.Log("Setting up network") + n, err := networks.Create(Client, networks.CreateOpts{Name: "tmp_network", AdminStateUp: networks.Up}).Extract() + th.AssertNoErr(t, err) + networkID := n.ID + defer networks.Delete(Client, networkID) + + // Create subnet + t.Log("Create subnet") + enable := false + opts := subnets.CreateOpts{ + NetworkID: networkID, + CIDR: "192.168.199.0/24", + IPVersion: subnets.IPv4, + Name: "my_subnet", + EnableDHCP: &enable, + } + s, err := subnets.Create(Client, opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.NetworkID, networkID) + th.AssertEquals(t, s.CIDR, "192.168.199.0/24") + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.Name, "my_subnet") + th.AssertEquals(t, s.EnableDHCP, false) + subnetID := s.ID + + // Get subnet + t.Log("Getting subnet") + s, err = subnets.Get(Client, subnetID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, s.ID, subnetID) + + // Update subnet + t.Log("Update subnet") + s, err = subnets.Update(Client, subnetID, subnets.UpdateOpts{Name: "new_subnet_name"}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, s.Name, "new_subnet_name") + + // Delete subnet + t.Log("Delete subnet") + res := subnets.Delete(Client, subnetID) + th.AssertNoErr(t, res.Err) +} + +func TestBatchCreate(t *testing.T) { + // todo +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go new file mode 100644 index 000000000..24cc62b4a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go @@ -0,0 +1,50 @@ +// +build acceptance + +package v1 + +import ( + "strings" + "testing" + + "github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAccounts(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + // Update an account's metadata. + updateres := accounts.Update(client, accounts.UpdateOpts{Metadata: metadata}) + t.Logf("Update Account Response: %+v\n", updateres) + updateHeaders, err := updateres.Extract() + th.AssertNoErr(t, err) + t.Logf("Update Account Response Headers: %+v\n", updateHeaders) + + // Defer the deletion of the metadata set above. + defer func() { + tempMap := make(map[string]string) + for k := range metadata { + tempMap[k] = "" + } + updateres = accounts.Update(client, accounts.UpdateOpts{Metadata: tempMap}) + th.AssertNoErr(t, updateres.Err) + }() + + // Extract the custom metadata from the 'Get' response. + res := accounts.Get(client, nil) + + h, err := res.Extract() + th.AssertNoErr(t, err) + t.Logf("Get Account Response Headers: %+v\n", h) + + am, err := res.ExtractMetadata() + th.AssertNoErr(t, err) + for k := range metadata { + if am[k] != metadata[strings.Title(k)] { + t.Errorf("Expected custom metadata with key: %s", k) + return + } + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go new file mode 100644 index 000000000..1eac681b5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go @@ -0,0 +1,28 @@ +// +build acceptance + +package v1 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + th "github.com/rackspace/gophercloud/testhelper" +) + +var metadata = map[string]string{"gopher": "cloud"} + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := openstack.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + th.AssertNoErr(t, err) + return c +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go new file mode 100644 index 000000000..8328a4fa6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go @@ -0,0 +1,137 @@ +// +build acceptance + +package v1 + +import ( + "strings" + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +// numContainers is the number of containers to create for testing. +var numContainers = 2 + +func TestContainers(t *testing.T) { + // Create a new client to execute the HTTP requests. See common.go for newClient body. + client := newClient(t) + + // Create a slice of random container names. + cNames := make([]string, numContainers) + for i := 0; i < numContainers; i++ { + cNames[i] = tools.RandomString("gophercloud-test-container-", 8) + } + + // Create numContainers containers. + for i := 0; i < len(cNames); i++ { + res := containers.Create(client, cNames[i], nil) + th.AssertNoErr(t, res.Err) + } + // Delete the numContainers containers after function completion. + defer func() { + for i := 0; i < len(cNames); i++ { + res := containers.Delete(client, cNames[i]) + th.AssertNoErr(t, res.Err) + } + }() + + // List the numContainer names that were just created. To just list those, + // the 'prefix' parameter is used. + err := containers.List(client, &containers.ListOpts{Full: true, Prefix: "gophercloud-test-container-"}).EachPage(func(page pagination.Page) (bool, error) { + containerList, err := containers.ExtractInfo(page) + th.AssertNoErr(t, err) + + for _, n := range containerList { + t.Logf("Container: Name [%s] Count [%d] Bytes [%d]", + n.Name, n.Count, n.Bytes) + } + + return true, nil + }) + th.AssertNoErr(t, err) + + // List the info for the numContainer containers that were created. + err = containers.List(client, &containers.ListOpts{Full: false, Prefix: "gophercloud-test-container-"}).EachPage(func(page pagination.Page) (bool, error) { + containerList, err := containers.ExtractNames(page) + th.AssertNoErr(t, err) + for _, n := range containerList { + t.Logf("Container: Name [%s]", n) + } + + return true, nil + }) + th.AssertNoErr(t, err) + + // Update one of the numContainer container metadata. + updateres := containers.Update(client, cNames[0], &containers.UpdateOpts{Metadata: metadata}) + th.AssertNoErr(t, updateres.Err) + // After the tests are done, delete the metadata that was set. + defer func() { + tempMap := make(map[string]string) + for k := range metadata { + tempMap[k] = "" + } + res := containers.Update(client, cNames[0], &containers.UpdateOpts{Metadata: tempMap}) + th.AssertNoErr(t, res.Err) + }() + + // Retrieve a container's metadata. + cm, err := containers.Get(client, cNames[0]).ExtractMetadata() + th.AssertNoErr(t, err) + for k := range metadata { + if cm[k] != metadata[strings.Title(k)] { + t.Errorf("Expected custom metadata with key: %s", k) + } + } +} + +func TestListAllContainers(t *testing.T) { + // Create a new client to execute the HTTP requests. See common.go for newClient body. + client := newClient(t) + + numContainers := 20 + + // Create a slice of random container names. + cNames := make([]string, numContainers) + for i := 0; i < numContainers; i++ { + cNames[i] = tools.RandomString("gophercloud-test-container-", 8) + } + + // Create numContainers containers. + for i := 0; i < len(cNames); i++ { + res := containers.Create(client, cNames[i], nil) + th.AssertNoErr(t, res.Err) + } + // Delete the numContainers containers after function completion. + defer func() { + for i := 0; i < len(cNames); i++ { + res := containers.Delete(client, cNames[i]) + th.AssertNoErr(t, res.Err) + } + }() + + // List all the numContainer names that were just created. To just list those, + // the 'prefix' parameter is used. + allPages, err := containers.List(client, &containers.ListOpts{Full: true, Limit: 5, Prefix: "gophercloud-test-container-"}).AllPages() + th.AssertNoErr(t, err) + containerInfoList, err := containers.ExtractInfo(allPages) + th.AssertNoErr(t, err) + for _, n := range containerInfoList { + t.Logf("Container: Name [%s] Count [%d] Bytes [%d]", + n.Name, n.Count, n.Bytes) + } + th.AssertEquals(t, numContainers, len(containerInfoList)) + + // List the info for all the numContainer containers that were created. + allPages, err = containers.List(client, &containers.ListOpts{Full: false, Limit: 2, Prefix: "gophercloud-test-container-"}).AllPages() + th.AssertNoErr(t, err) + containerNamesList, err := containers.ExtractNames(allPages) + th.AssertNoErr(t, err) + for _, n := range containerNamesList { + t.Logf("Container: Name [%s]", n) + } + th.AssertEquals(t, numContainers, len(containerNamesList)) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go new file mode 100644 index 000000000..a8de338c3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go @@ -0,0 +1,119 @@ +// +build acceptance + +package v1 + +import ( + "bytes" + "strings" + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +// numObjects is the number of objects to create for testing. +var numObjects = 2 + +func TestObjects(t *testing.T) { + // Create a provider client for executing the HTTP request. + // See common.go for more information. + client := newClient(t) + + // Make a slice of length numObjects to hold the random object names. + oNames := make([]string, numObjects) + for i := 0; i < len(oNames); i++ { + oNames[i] = tools.RandomString("test-object-", 8) + } + + // Create a container to hold the test objects. + cName := tools.RandomString("test-container-", 8) + header, err := containers.Create(client, cName, nil).ExtractHeader() + th.AssertNoErr(t, err) + t.Logf("Create object headers: %+v\n", header) + + // Defer deletion of the container until after testing. + defer func() { + res := containers.Delete(client, cName) + th.AssertNoErr(t, res.Err) + }() + + // Create a slice of buffers to hold the test object content. + oContents := make([]*bytes.Buffer, numObjects) + for i := 0; i < numObjects; i++ { + oContents[i] = bytes.NewBuffer([]byte(tools.RandomString("", 10))) + res := objects.Create(client, cName, oNames[i], oContents[i], nil) + th.AssertNoErr(t, res.Err) + } + // Delete the objects after testing. + defer func() { + for i := 0; i < numObjects; i++ { + res := objects.Delete(client, cName, oNames[i], nil) + th.AssertNoErr(t, res.Err) + } + }() + + ons := make([]string, 0, len(oNames)) + err = objects.List(client, cName, &objects.ListOpts{Full: false, Prefix: "test-object-"}).EachPage(func(page pagination.Page) (bool, error) { + names, err := objects.ExtractNames(page) + th.AssertNoErr(t, err) + ons = append(ons, names...) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, len(ons), len(oNames)) + + ois := make([]objects.Object, 0, len(oNames)) + err = objects.List(client, cName, &objects.ListOpts{Full: true, Prefix: "test-object-"}).EachPage(func(page pagination.Page) (bool, error) { + info, err := objects.ExtractInfo(page) + th.AssertNoErr(t, err) + + ois = append(ois, info...) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, len(ois), len(oNames)) + + // Copy the contents of one object to another. + copyres := objects.Copy(client, cName, oNames[0], &objects.CopyOpts{Destination: cName + "/" + oNames[1]}) + th.AssertNoErr(t, copyres.Err) + + // Download one of the objects that was created above. + o1Content, err := objects.Download(client, cName, oNames[0], nil).ExtractContent() + th.AssertNoErr(t, err) + + // Download the another object that was create above. + o2Content, err := objects.Download(client, cName, oNames[1], nil).ExtractContent() + th.AssertNoErr(t, err) + + // Compare the two object's contents to test that the copy worked. + th.AssertEquals(t, string(o2Content), string(o1Content)) + + // Update an object's metadata. + updateres := objects.Update(client, cName, oNames[0], &objects.UpdateOpts{Metadata: metadata}) + th.AssertNoErr(t, updateres.Err) + + // Delete the object's metadata after testing. + defer func() { + tempMap := make(map[string]string) + for k := range metadata { + tempMap[k] = "" + } + res := objects.Update(client, cName, oNames[0], &objects.UpdateOpts{Metadata: tempMap}) + th.AssertNoErr(t, res.Err) + }() + + // Retrieve an object's metadata. + om, err := objects.Get(client, cName, oNames[0], nil).ExtractMetadata() + th.AssertNoErr(t, err) + for k := range metadata { + if om[k] != metadata[strings.Title(k)] { + t.Errorf("Expected custom metadata with key: %s", k) + return + } + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/buildinfo_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/buildinfo_test.go new file mode 100644 index 000000000..05a5e1d67 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/buildinfo_test.go @@ -0,0 +1,20 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestBuildInfo(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + bi, err := buildinfo.Get(client).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved build info: %+v\n", bi) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/common.go new file mode 100644 index 000000000..2c28dcbcc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/common.go @@ -0,0 +1,44 @@ +// +build acceptance + +package v1 + +import ( + "fmt" + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + th "github.com/rackspace/gophercloud/testhelper" +) + +var template = fmt.Sprintf(` +{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": {}, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "flavor": "%s", + "image": "%s", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } +}`, os.Getenv("OS_FLAVOR_ID"), os.Getenv("OS_IMAGE_ID")) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := openstack.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := openstack.NewOrchestrationV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + th.AssertNoErr(t, err) + return c +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/hello-compute.json b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/hello-compute.json new file mode 100644 index 000000000..11cfc8053 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/hello-compute.json @@ -0,0 +1,13 @@ +{ + "heat_template_version": "2013-05-23", + "resources": { + "compute_instance": { + "type": "OS::Nova::Server", + "properties": { + "flavor": "m1.small", + "image": "cirros-0.3.2-x86_64-disk", + "name": "Single Compute Instance" + } + } + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackevents_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackevents_test.go new file mode 100644 index 000000000..e356c86aa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackevents_test.go @@ -0,0 +1,68 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents" + "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStackEvents(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + resourceName := "hello_world" + var eventID string + + createOpts := stacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + err = stackevents.List(client, stackName, stack.ID, nil).EachPage(func(page pagination.Page) (bool, error) { + events, err := stackevents.ExtractEvents(page) + th.AssertNoErr(t, err) + t.Logf("listed events: %+v\n", events) + eventID = events[0].ID + return false, nil + }) + th.AssertNoErr(t, err) + + err = stackevents.ListResourceEvents(client, stackName, stack.ID, resourceName, nil).EachPage(func(page pagination.Page) (bool, error) { + resourceEvents, err := stackevents.ExtractEvents(page) + th.AssertNoErr(t, err) + t.Logf("listed resource events: %+v\n", resourceEvents) + return false, nil + }) + th.AssertNoErr(t, err) + + event, err := stackevents.Get(client, stackName, stack.ID, resourceName, eventID).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved event: %+v\n", event) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackresources_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackresources_test.go new file mode 100644 index 000000000..b614f1cef --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stackresources_test.go @@ -0,0 +1,62 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources" + "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStackResources(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + + createOpts := stacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + resourceName := "hello_world" + resource, err := stackresources.Get(client, stackName, stack.ID, resourceName).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack resource: %+v\n", resource) + + metadata, err := stackresources.Metadata(client, stackName, stack.ID, resourceName).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack resource metadata: %+v\n", metadata) + + err = stackresources.List(client, stackName, stack.ID, stackresources.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + resources, err := stackresources.ExtractResources(page) + th.AssertNoErr(t, err) + t.Logf("resources: %+v\n", resources) + return false, nil + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacks_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacks_test.go new file mode 100644 index 000000000..db31cd40b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacks_test.go @@ -0,0 +1,153 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStacks(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName1 := "gophercloud-test-stack-2" + createOpts := stacks.CreateOpts{ + Name: stackName1, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName1, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName1) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + updateOpts := stacks.UpdateOpts{ + Template: template, + Timeout: 20, + } + err = stacks.Update(client, stackName1, stack.ID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "UPDATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + t.Logf("Updated stack") + + err = stacks.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + stackList, err := stacks.ExtractStacks(page) + th.AssertNoErr(t, err) + + t.Logf("Got stack list: %+v\n", stackList) + + return true, nil + }) + th.AssertNoErr(t, err) + + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack: %+v\n", getStack) + + abandonedStack, err := stacks.Abandon(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Abandonded stack %+v\n", abandonedStack) + th.AssertNoErr(t, err) +} + +// Test using the updated interface +func TestStacksNewTemplateFormat(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName1 := "gophercloud-test-stack-2" + templateOpts := new(osStacks.Template) + templateOpts.Bin = []byte(template) + createOpts := osStacks.CreateOpts{ + Name: stackName1, + TemplateOpts: templateOpts, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName1, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName1) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + updateOpts := osStacks.UpdateOpts{ + TemplateOpts: templateOpts, + Timeout: 20, + } + err = stacks.Update(client, stackName1, stack.ID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "UPDATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + t.Logf("Updated stack") + + err = stacks.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + stackList, err := osStacks.ExtractStacks(page) + th.AssertNoErr(t, err) + + t.Logf("Got stack list: %+v\n", stackList) + + return true, nil + }) + th.AssertNoErr(t, err) + + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack: %+v\n", getStack) + + abandonedStack, err := stacks.Abandon(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Abandonded stack %+v\n", abandonedStack) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacktemplates_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacktemplates_test.go new file mode 100644 index 000000000..22d5e8835 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/orchestration/v1/stacktemplates_test.go @@ -0,0 +1,75 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStackTemplates(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + + createOpts := stacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + tmpl, err := stacktemplates.Get(client, stackName, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved template: %+v\n", tmpl) + + validateOpts := osStacktemplates.ValidateOpts{ + Template: `{"heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string", + }, + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor", + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n", + }, + }, + }, + }`} + validatedTemplate, err := stacktemplates.Validate(client, validateOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("validated template: %+v\n", validatedTemplate) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go new file mode 100644 index 000000000..3a8ecdb10 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go @@ -0,0 +1,4 @@ +// +build acceptance + +package openstack + diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go new file mode 100644 index 000000000..e9fdd9920 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go @@ -0,0 +1,38 @@ +// +build acceptance + +package v1 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newClient() (*gophercloud.ServiceClient, error) { + opts, err := rackspace.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + opts = tools.OnlyRS(opts) + region := os.Getenv("RS_REGION") + + provider, err := rackspace.AuthenticatedClient(opts) + if err != nil { + return nil, err + } + + return rackspace.NewBlockStorageV1(provider, gophercloud.EndpointOpts{ + Region: region, + }) +} + +func setup(t *testing.T) *gophercloud.ServiceClient { + client, err := newClient() + th.AssertNoErr(t, err) + + return client +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go new file mode 100644 index 000000000..25b2cfeee --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go @@ -0,0 +1,82 @@ +// +build acceptance blockstorage snapshots + +package v1 + +import ( + "testing" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSnapshots(t *testing.T) { + client := setup(t) + volID := testVolumeCreate(t, client) + + t.Log("Creating snapshots") + s := testSnapshotCreate(t, client, volID) + id := s.ID + + t.Log("Listing snapshots") + testSnapshotList(t, client) + + t.Logf("Getting snapshot %s", id) + testSnapshotGet(t, client, id) + + t.Logf("Updating snapshot %s", id) + testSnapshotUpdate(t, client, id) + + t.Logf("Deleting snapshot %s", id) + testSnapshotDelete(t, client, id) + s.WaitUntilDeleted(client, -1) + + t.Logf("Deleting volume %s", volID) + testVolumeDelete(t, client, volID) +} + +func testSnapshotCreate(t *testing.T, client *gophercloud.ServiceClient, volID string) *snapshots.Snapshot { + opts := snapshots.CreateOpts{VolumeID: volID, Name: "snapshot-001"} + s, err := snapshots.Create(client, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created snapshot %s", s.ID) + + t.Logf("Waiting for new snapshot to become available...") + start := time.Now().Second() + s.WaitUntilComplete(client, -1) + t.Logf("Snapshot completed after %ds", time.Now().Second()-start) + + return s +} + +func testSnapshotList(t *testing.T, client *gophercloud.ServiceClient) { + snapshots.List(client).EachPage(func(page pagination.Page) (bool, error) { + sList, err := snapshots.ExtractSnapshots(page) + th.AssertNoErr(t, err) + + for _, s := range sList { + t.Logf("Snapshot: ID [%s] Name [%s] Volume ID [%s] Progress [%s] Created [%s]", + s.ID, s.Name, s.VolumeID, s.Progress, s.CreatedAt) + } + + return true, nil + }) +} + +func testSnapshotGet(t *testing.T, client *gophercloud.ServiceClient, id string) { + _, err := snapshots.Get(client, id).Extract() + th.AssertNoErr(t, err) +} + +func testSnapshotUpdate(t *testing.T, client *gophercloud.ServiceClient, id string) { + _, err := snapshots.Update(client, id, snapshots.UpdateOpts{Name: "new_name"}).Extract() + th.AssertNoErr(t, err) +} + +func testSnapshotDelete(t *testing.T, client *gophercloud.ServiceClient, id string) { + res := snapshots.Delete(client, id) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted snapshot %s", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go new file mode 100644 index 000000000..f86f9aded --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go @@ -0,0 +1,71 @@ +// +build acceptance blockstorage volumes + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestVolumes(t *testing.T) { + client := setup(t) + + t.Logf("Listing volumes") + testVolumeList(t, client) + + t.Logf("Creating volume") + volumeID := testVolumeCreate(t, client) + + t.Logf("Getting volume %s", volumeID) + testVolumeGet(t, client, volumeID) + + t.Logf("Updating volume %s", volumeID) + testVolumeUpdate(t, client, volumeID) + + t.Logf("Deleting volume %s", volumeID) + testVolumeDelete(t, client, volumeID) +} + +func testVolumeList(t *testing.T, client *gophercloud.ServiceClient) { + volumes.List(client).EachPage(func(page pagination.Page) (bool, error) { + vList, err := volumes.ExtractVolumes(page) + th.AssertNoErr(t, err) + + for _, v := range vList { + t.Logf("Volume: ID [%s] Name [%s] Type [%s] Created [%s]", v.ID, v.Name, + v.VolumeType, v.CreatedAt) + } + + return true, nil + }) +} + +func testVolumeCreate(t *testing.T, client *gophercloud.ServiceClient) string { + vol, err := volumes.Create(client, os.CreateOpts{Size: 75}).Extract() + th.AssertNoErr(t, err) + t.Logf("Created volume: ID [%s] Size [%s]", vol.ID, vol.Size) + return vol.ID +} + +func testVolumeGet(t *testing.T, client *gophercloud.ServiceClient, id string) { + vol, err := volumes.Get(client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Created volume: ID [%s] Size [%s]", vol.ID, vol.Size) +} + +func testVolumeUpdate(t *testing.T, client *gophercloud.ServiceClient, id string) { + vol, err := volumes.Update(client, id, volumes.UpdateOpts{Name: "new_name"}).Extract() + th.AssertNoErr(t, err) + t.Logf("Created volume: ID [%s] Name [%s]", vol.ID, vol.Name) +} + +func testVolumeDelete(t *testing.T, client *gophercloud.ServiceClient, id string) { + res := volumes.Delete(client, id) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted volume %s", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go new file mode 100644 index 000000000..716f2b9fd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go @@ -0,0 +1,46 @@ +// +build acceptance blockstorage volumetypes + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAll(t *testing.T) { + client := setup(t) + + t.Logf("Listing volume types") + id := testList(t, client) + + t.Logf("Getting volume type %s", id) + testGet(t, client, id) +} + +func testList(t *testing.T, client *gophercloud.ServiceClient) string { + var lastID string + + volumetypes.List(client).EachPage(func(page pagination.Page) (bool, error) { + typeList, err := volumetypes.ExtractVolumeTypes(page) + th.AssertNoErr(t, err) + + for _, vt := range typeList { + t.Logf("Volume type: ID [%s] Name [%s]", vt.ID, vt.Name) + lastID = vt.ID + } + + return true, nil + }) + + return lastID +} + +func testGet(t *testing.T, client *gophercloud.ServiceClient, id string) { + vt, err := volumetypes.Get(client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Volume: ID [%s] Name [%s]", vt.ID, vt.Name) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/base_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/base_test.go new file mode 100644 index 000000000..135f5b330 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/base_test.go @@ -0,0 +1,32 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/cdn/v1/base" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestBaseOps(t *testing.T) { + client := newClient(t) + t.Log("Retrieving Home Document") + testHomeDocumentGet(t, client) + + t.Log("Pinging root URL") + testPing(t, client) +} + +func testHomeDocumentGet(t *testing.T, client *gophercloud.ServiceClient) { + hd, err := base.Get(client).Extract() + th.AssertNoErr(t, err) + t.Logf("Retrieved home document: %+v", *hd) +} + +func testPing(t *testing.T, client *gophercloud.ServiceClient) { + err := base.Ping(client).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Successfully pinged root URL") +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/common.go new file mode 100644 index 000000000..2333ca77b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/common.go @@ -0,0 +1,23 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := rackspace.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := rackspace.NewCDNV1(client, gophercloud.EndpointOpts{}) + th.AssertNoErr(t, err) + return c +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/flavor_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/flavor_test.go new file mode 100644 index 000000000..f26cff014 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/flavor_test.go @@ -0,0 +1,47 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/cdn/v1/flavors" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestFlavor(t *testing.T) { + client := newClient(t) + + t.Log("Listing Flavors") + id := testFlavorsList(t, client) + + t.Log("Retrieving Flavor") + testFlavorGet(t, client, id) +} + +func testFlavorsList(t *testing.T, client *gophercloud.ServiceClient) string { + var id string + err := flavors.List(client).EachPage(func(page pagination.Page) (bool, error) { + flavorList, err := os.ExtractFlavors(page) + th.AssertNoErr(t, err) + + for _, flavor := range flavorList { + t.Logf("Listing flavor: ID [%s] Providers [%+v]", flavor.ID, flavor.Providers) + id = flavor.ID + } + + return true, nil + }) + + th.AssertNoErr(t, err) + return id +} + +func testFlavorGet(t *testing.T, client *gophercloud.ServiceClient, id string) { + flavor, err := flavors.Get(client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Retrieved Flavor: %+v", *flavor) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/service_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/service_test.go new file mode 100644 index 000000000..c19c241c3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/service_test.go @@ -0,0 +1,93 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/cdn/v1/services" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/cdn/v1/services" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestService(t *testing.T) { + client := newClient(t) + + t.Log("Creating Service") + loc := testServiceCreate(t, client, "test-site-1") + t.Logf("Created service at location: %s", loc) + + defer testServiceDelete(t, client, loc) + + t.Log("Updating Service") + testServiceUpdate(t, client, loc) + + t.Log("Retrieving Service") + testServiceGet(t, client, loc) + + t.Log("Listing Services") + testServiceList(t, client) +} + +func testServiceCreate(t *testing.T, client *gophercloud.ServiceClient, name string) string { + createOpts := os.CreateOpts{ + Name: name, + Domains: []os.Domain{ + os.Domain{ + Domain: "www." + name + ".com", + }, + }, + Origins: []os.Origin{ + os.Origin{ + Origin: name + ".com", + Port: 80, + SSL: false, + }, + }, + FlavorID: "cdn", + } + l, err := services.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + return l +} + +func testServiceGet(t *testing.T, client *gophercloud.ServiceClient, id string) { + s, err := services.Get(client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Retrieved service: %+v", *s) +} + +func testServiceUpdate(t *testing.T, client *gophercloud.ServiceClient, id string) { + opts := os.UpdateOpts{ + os.Append{ + Value: os.Domain{Domain: "newDomain.com", Protocol: "http"}, + }, + } + + loc, err := services.Update(client, id, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Successfully updated service at location: %s", loc) +} + +func testServiceList(t *testing.T, client *gophercloud.ServiceClient) { + err := services.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + serviceList, err := os.ExtractServices(page) + th.AssertNoErr(t, err) + + for _, service := range serviceList { + t.Logf("Listing service: %+v", service) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func testServiceDelete(t *testing.T, client *gophercloud.ServiceClient, id string) { + err := services.Delete(client, id).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Successfully deleted service (%s)", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/serviceasset_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/serviceasset_test.go new file mode 100644 index 000000000..c32bf253d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/cdn/v1/serviceasset_test.go @@ -0,0 +1,32 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + osServiceAssets "github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets" + "github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestServiceAsset(t *testing.T) { + client := newClient(t) + + t.Log("Creating Service") + loc := testServiceCreate(t, client, "test-site-2") + t.Logf("Created service at location: %s", loc) + + t.Log("Deleting Service Assets") + testServiceAssetDelete(t, client, loc) +} + +func testServiceAssetDelete(t *testing.T, client *gophercloud.ServiceClient, url string) { + deleteOpts := osServiceAssets.DeleteOpts{ + All: true, + } + err := serviceassets.Delete(client, url, deleteOpts).ExtractErr() + th.AssertNoErr(t, err) + t.Log("Successfully deleted all Service Assets") +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go new file mode 100644 index 000000000..61214c047 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go @@ -0,0 +1,28 @@ +// +build acceptance + +package rackspace + +import ( + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAuthenticatedClient(t *testing.T) { + // Obtain credentials from the environment. + ao, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := rackspace.AuthenticatedClient(tools.OnlyRS(ao)) + if err != nil { + t.Fatalf("Unable to authenticate: %v", err) + } + + if client.TokenID == "" { + t.Errorf("No token ID assigned to the client") + } + + t.Logf("Client successfully acquired a token: %v", client.TokenID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go new file mode 100644 index 000000000..d7e6aa712 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go @@ -0,0 +1,49 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + osBFV "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume" + "github.com/rackspace/gophercloud/rackspace/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestBootFromVolume(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + options, err := optionsFromEnv() + th.AssertNoErr(t, err) + + name := tools.RandomString("Gophercloud-", 8) + t.Logf("Creating server [%s].", name) + + bd := []osBFV.BlockDevice{ + osBFV.BlockDevice{ + UUID: options.imageID, + SourceType: osBFV.Image, + VolumeSize: 10, + }, + } + + server, err := bootfromvolume.Create(client, servers.CreateOpts{ + Name: name, + FlavorRef: "performance1-1", + BlockDevice: bd, + }).Extract() + th.AssertNoErr(t, err) + t.Logf("Created server: %+v\n", server) + defer deleteServer(t, client, server) + + getServer(t, client, server) + + listServers(t, client) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go new file mode 100644 index 000000000..3ca6dc9b6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go @@ -0,0 +1,60 @@ +// +build acceptance + +package v2 + +import ( + "errors" + "os" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" +) + +func newClient() (*gophercloud.ServiceClient, error) { + // Obtain credentials from the environment. + options, err := rackspace.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + options = tools.OnlyRS(options) + region := os.Getenv("RS_REGION") + + if options.Username == "" { + return nil, errors.New("Please provide a Rackspace username as RS_USERNAME.") + } + if options.APIKey == "" { + return nil, errors.New("Please provide a Rackspace API key as RS_API_KEY.") + } + if region == "" { + return nil, errors.New("Please provide a Rackspace region as RS_REGION.") + } + + client, err := rackspace.AuthenticatedClient(options) + if err != nil { + return nil, err + } + + return rackspace.NewComputeV2(client, gophercloud.EndpointOpts{ + Region: region, + }) +} + +type serverOpts struct { + imageID string + flavorID string +} + +func optionsFromEnv() (*serverOpts, error) { + options := &serverOpts{ + imageID: os.Getenv("RS_IMAGE_ID"), + flavorID: os.Getenv("RS_FLAVOR_ID"), + } + if options.imageID == "" { + return nil, errors.New("Please provide a valid Rackspace image ID as RS_IMAGE_ID") + } + if options.flavorID == "" { + return nil, errors.New("Please provide a valid Rackspace flavor ID as RS_FLAVOR_ID") + } + return options, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go new file mode 100644 index 000000000..4618ecc8a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go @@ -0,0 +1,61 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/compute/v2/flavors" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestListFlavors(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + count := 0 + err = flavors.ListDetail(client, nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + t.Logf("-- Page %0d --", count) + + fs, err := flavors.ExtractFlavors(page) + th.AssertNoErr(t, err) + + for i, flavor := range fs { + t.Logf("[%02d] id=[%s]", i, flavor.ID) + t.Logf(" name=[%s]", flavor.Name) + t.Logf(" disk=[%d]", flavor.Disk) + t.Logf(" RAM=[%d]", flavor.RAM) + t.Logf(" rxtx_factor=[%f]", flavor.RxTxFactor) + t.Logf(" swap=[%d]", flavor.Swap) + t.Logf(" VCPUs=[%d]", flavor.VCPUs) + } + + return true, nil + }) + th.AssertNoErr(t, err) + if count == 0 { + t.Errorf("No flavors listed!") + } +} + +func TestGetFlavor(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + options, err := optionsFromEnv() + th.AssertNoErr(t, err) + + flavor, err := flavors.Get(client, options.flavorID).Extract() + th.AssertNoErr(t, err) + + t.Logf("Requested flavor:") + t.Logf(" id=[%s]", flavor.ID) + t.Logf(" name=[%s]", flavor.Name) + t.Logf(" disk=[%d]", flavor.Disk) + t.Logf(" RAM=[%d]", flavor.RAM) + t.Logf(" rxtx_factor=[%f]", flavor.RxTxFactor) + t.Logf(" swap=[%d]", flavor.Swap) + t.Logf(" VCPUs=[%d]", flavor.VCPUs) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go new file mode 100644 index 000000000..5e36c2e45 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go @@ -0,0 +1,63 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/compute/v2/images" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestListImages(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + count := 0 + err = images.ListDetail(client, nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + t.Logf("-- Page %02d --", count) + + is, err := images.ExtractImages(page) + th.AssertNoErr(t, err) + + for i, image := range is { + t.Logf("[%02d] id=[%s]", i, image.ID) + t.Logf(" name=[%s]", image.Name) + t.Logf(" created=[%s]", image.Created) + t.Logf(" updated=[%s]", image.Updated) + t.Logf(" min disk=[%d]", image.MinDisk) + t.Logf(" min RAM=[%d]", image.MinRAM) + t.Logf(" progress=[%d]", image.Progress) + t.Logf(" status=[%s]", image.Status) + } + + return true, nil + }) + th.AssertNoErr(t, err) + if count < 1 { + t.Errorf("Expected at least one page of images.") + } +} + +func TestGetImage(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + options, err := optionsFromEnv() + th.AssertNoErr(t, err) + + image, err := images.Get(client, options.imageID).Extract() + th.AssertNoErr(t, err) + + t.Logf("Requested image:") + t.Logf(" id=[%s]", image.ID) + t.Logf(" name=[%s]", image.Name) + t.Logf(" created=[%s]", image.Created) + t.Logf(" updated=[%s]", image.Updated) + t.Logf(" min disk=[%d]", image.MinDisk) + t.Logf(" min RAM=[%d]", image.MinRAM) + t.Logf(" progress=[%d]", image.Progress) + t.Logf(" status=[%s]", image.Status) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go new file mode 100644 index 000000000..9bd6eb428 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go @@ -0,0 +1,87 @@ +// +build acceptance rackspace + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs" + th "github.com/rackspace/gophercloud/testhelper" +) + +func deleteKeyPair(t *testing.T, client *gophercloud.ServiceClient, name string) { + err := keypairs.Delete(client, name).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Successfully deleted key [%s].", name) +} + +func TestCreateKeyPair(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + name := tools.RandomString("createdkey-", 8) + k, err := keypairs.Create(client, os.CreateOpts{Name: name}).Extract() + th.AssertNoErr(t, err) + defer deleteKeyPair(t, client, name) + + t.Logf("Created a new keypair:") + t.Logf(" name=[%s]", k.Name) + t.Logf(" fingerprint=[%s]", k.Fingerprint) + t.Logf(" publickey=[%s]", tools.Elide(k.PublicKey)) + t.Logf(" privatekey=[%s]", tools.Elide(k.PrivateKey)) + t.Logf(" userid=[%s]", k.UserID) +} + +func TestImportKeyPair(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + name := tools.RandomString("importedkey-", 8) + pubkey := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDlIQ3r+zd97kb9Hzmujd3V6pbO53eb3Go4q2E8iqVGWQfZTrFdL9KACJnqJIm9HmncfRkUTxE37hqeGCCv8uD+ZPmPiZG2E60OX1mGDjbbzAyReRwYWXgXHopggZTLak5k4mwZYaxwaufbVBDRn847e01lZnaXaszEToLM37NLw+uz29sl3TwYy2R0RGHPwPc160aWmdLjSyd1Nd4c9pvvOP/EoEuBjIC6NJJwg2Rvg9sjjx9jYj0QUgc8CqKLN25oMZ69kNJzlFylKRUoeeVr89txlR59yehJWk6Uw6lYFTdJmcmQOFVAJ12RMmS1hLWCM8UzAgtw+EDa0eqBxBDl smash@winter" + + k, err := keypairs.Create(client, os.CreateOpts{ + Name: name, + PublicKey: pubkey, + }).Extract() + th.AssertNoErr(t, err) + defer deleteKeyPair(t, client, name) + + th.CheckEquals(t, pubkey, k.PublicKey) + th.CheckEquals(t, "", k.PrivateKey) + + t.Logf("Imported an existing keypair:") + t.Logf(" name=[%s]", k.Name) + t.Logf(" fingerprint=[%s]", k.Fingerprint) + t.Logf(" publickey=[%s]", tools.Elide(k.PublicKey)) + t.Logf(" privatekey=[%s]", tools.Elide(k.PrivateKey)) + t.Logf(" userid=[%s]", k.UserID) +} + +func TestListKeyPairs(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + count := 0 + err = keypairs.List(client).EachPage(func(page pagination.Page) (bool, error) { + count++ + t.Logf("--- %02d ---", count) + + ks, err := keypairs.ExtractKeyPairs(page) + th.AssertNoErr(t, err) + + for i, keypair := range ks { + t.Logf("[%02d] name=[%s]", i, keypair.Name) + t.Logf(" fingerprint=[%s]", keypair.Fingerprint) + t.Logf(" publickey=[%s]", tools.Elide(keypair.PublicKey)) + t.Logf(" privatekey=[%s]", tools.Elide(keypair.PrivateKey)) + t.Logf(" userid=[%s]", keypair.UserID) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go new file mode 100644 index 000000000..e8fc4d37d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go @@ -0,0 +1,53 @@ +// +build acceptance rackspace + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/compute/v2/networks" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestNetworks(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + // Create a network + n, err := networks.Create(client, networks.CreateOpts{Label: "sample_network", CIDR: "172.20.0.0/24"}).Extract() + th.AssertNoErr(t, err) + t.Logf("Created network: %+v\n", n) + defer networks.Delete(client, n.ID) + th.AssertEquals(t, n.Label, "sample_network") + th.AssertEquals(t, n.CIDR, "172.20.0.0/24") + networkID := n.ID + + // List networks + pager := networks.List(client) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + networkList, err := networks.ExtractNetworks(page) + th.AssertNoErr(t, err) + + for _, n := range networkList { + t.Logf("Network: ID [%s] Label [%s] CIDR [%s]", + n.ID, n.Label, n.CIDR) + } + + return true, nil + }) + th.CheckNoErr(t, err) + + // Get a network + if networkID == "" { + t.Fatalf("In order to retrieve a network, the NetworkID must be set") + } + n, err = networks.Get(client, networkID).Extract() + t.Logf("Retrieved Network: %+v\n", n) + th.AssertNoErr(t, err) + th.AssertEquals(t, n.CIDR, "172.20.0.0/24") + th.AssertEquals(t, n.Label, "sample_network") + th.AssertEquals(t, n.ID, networkID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go new file mode 100644 index 000000000..5ec3cc8e8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go @@ -0,0 +1 @@ +package v2 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go new file mode 100644 index 000000000..a8b5937b6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go @@ -0,0 +1,217 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig" + oskey "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs" + "github.com/rackspace/gophercloud/rackspace/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func createServerKeyPair(t *testing.T, client *gophercloud.ServiceClient) *oskey.KeyPair { + name := tools.RandomString("importedkey-", 8) + pubkey := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDlIQ3r+zd97kb9Hzmujd3V6pbO53eb3Go4q2E8iqVGWQfZTrFdL9KACJnqJIm9HmncfRkUTxE37hqeGCCv8uD+ZPmPiZG2E60OX1mGDjbbzAyReRwYWXgXHopggZTLak5k4mwZYaxwaufbVBDRn847e01lZnaXaszEToLM37NLw+uz29sl3TwYy2R0RGHPwPc160aWmdLjSyd1Nd4c9pvvOP/EoEuBjIC6NJJwg2Rvg9sjjx9jYj0QUgc8CqKLN25oMZ69kNJzlFylKRUoeeVr89txlR59yehJWk6Uw6lYFTdJmcmQOFVAJ12RMmS1hLWCM8UzAgtw+EDa0eqBxBDl smash@winter" + + k, err := keypairs.Create(client, oskey.CreateOpts{ + Name: name, + PublicKey: pubkey, + }).Extract() + th.AssertNoErr(t, err) + + return k +} + +func createServer(t *testing.T, client *gophercloud.ServiceClient, keyName string) *os.Server { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + options, err := optionsFromEnv() + th.AssertNoErr(t, err) + + name := tools.RandomString("Gophercloud-", 8) + + pwd := tools.MakeNewPassword("") + + opts := &servers.CreateOpts{ + Name: name, + ImageRef: options.imageID, + FlavorRef: options.flavorID, + DiskConfig: diskconfig.Manual, + AdminPass: pwd, + } + + if keyName != "" { + opts.KeyPair = keyName + } + + t.Logf("Creating server [%s].", name) + s, err := servers.Create(client, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Creating server.") + + err = servers.WaitForStatus(client, s.ID, "ACTIVE", 300) + th.AssertNoErr(t, err) + t.Logf("Server created successfully.") + + th.CheckEquals(t, pwd, s.AdminPass) + + return s +} + +func logServer(t *testing.T, server *os.Server, index int) { + if index == -1 { + t.Logf(" id=[%s]", server.ID) + } else { + t.Logf("[%02d] id=[%s]", index, server.ID) + } + t.Logf(" name=[%s]", server.Name) + t.Logf(" tenant ID=[%s]", server.TenantID) + t.Logf(" user ID=[%s]", server.UserID) + t.Logf(" updated=[%s]", server.Updated) + t.Logf(" created=[%s]", server.Created) + t.Logf(" host ID=[%s]", server.HostID) + t.Logf(" access IPv4=[%s]", server.AccessIPv4) + t.Logf(" access IPv6=[%s]", server.AccessIPv6) + t.Logf(" image=[%v]", server.Image) + t.Logf(" flavor=[%v]", server.Flavor) + t.Logf(" addresses=[%v]", server.Addresses) + t.Logf(" metadata=[%v]", server.Metadata) + t.Logf(" links=[%v]", server.Links) + t.Logf(" keyname=[%s]", server.KeyName) + t.Logf(" admin password=[%s]", server.AdminPass) + t.Logf(" status=[%s]", server.Status) + t.Logf(" progress=[%d]", server.Progress) +} + +func getServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) { + t.Logf("> servers.Get") + + details, err := servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + logServer(t, details, -1) +} + +func updateServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) { + t.Logf("> servers.Get") + + opts := os.UpdateOpts{ + Name: "updated-server", + } + updatedServer, err := servers.Update(client, server.ID, opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, "updated-server", updatedServer.Name) + logServer(t, updatedServer, -1) +} + +func listServers(t *testing.T, client *gophercloud.ServiceClient) { + t.Logf("> servers.List") + + count := 0 + err := servers.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + t.Logf("--- Page %02d ---", count) + + s, err := servers.ExtractServers(page) + th.AssertNoErr(t, err) + for index, server := range s { + logServer(t, &server, index) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func changeAdminPassword(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) { + t.Logf("> servers.ChangeAdminPassword") + + original := server.AdminPass + + t.Logf("Changing server password.") + err := servers.ChangeAdminPassword(client, server.ID, tools.MakeNewPassword(original)).ExtractErr() + th.AssertNoErr(t, err) + + err = servers.WaitForStatus(client, server.ID, "ACTIVE", 300) + th.AssertNoErr(t, err) + t.Logf("Password changed successfully.") +} + +func rebootServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) { + t.Logf("> servers.Reboot") + + err := servers.Reboot(client, server.ID, os.HardReboot).ExtractErr() + th.AssertNoErr(t, err) + + err = servers.WaitForStatus(client, server.ID, "ACTIVE", 300) + th.AssertNoErr(t, err) + + t.Logf("Server successfully rebooted.") +} + +func rebuildServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) { + t.Logf("> servers.Rebuild") + + options, err := optionsFromEnv() + th.AssertNoErr(t, err) + + opts := servers.RebuildOpts{ + Name: tools.RandomString("RenamedGopher", 16), + AdminPass: tools.MakeNewPassword(server.AdminPass), + ImageID: options.imageID, + DiskConfig: diskconfig.Manual, + } + after, err := servers.Rebuild(client, server.ID, opts).Extract() + th.AssertNoErr(t, err) + th.CheckEquals(t, after.ID, server.ID) + + err = servers.WaitForStatus(client, after.ID, "ACTIVE", 300) + th.AssertNoErr(t, err) + + t.Logf("Server successfully rebuilt.") + logServer(t, after, -1) +} + +func deleteServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) { + t.Logf("> servers.Delete") + + res := servers.Delete(client, server.ID) + th.AssertNoErr(t, res.Err) + + t.Logf("Server deleted successfully.") +} + +func deleteServerKeyPair(t *testing.T, client *gophercloud.ServiceClient, k *oskey.KeyPair) { + t.Logf("> keypairs.Delete") + + err := keypairs.Delete(client, k.Name).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Keypair deleted successfully.") +} + +func TestServerOperations(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + kp := createServerKeyPair(t, client) + defer deleteServerKeyPair(t, client, kp) + + server := createServer(t, client, kp.Name) + defer deleteServer(t, client, server) + + getServer(t, client, server) + updateServer(t, client, server) + listServers(t, client) + changeAdminPassword(t, client, server) + rebootServer(t, client, server) + rebuildServer(t, client, server) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go new file mode 100644 index 000000000..39475e176 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go @@ -0,0 +1,53 @@ +// +build acceptance rackspace + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/compute/v2/networks" + "github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestVirtualInterfaces(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + // Create a server + server := createServer(t, client, "") + t.Logf("Created Server: %v\n", server) + defer deleteServer(t, client, server) + serverID := server.ID + + // Create a network + n, err := networks.Create(client, networks.CreateOpts{Label: "sample_network", CIDR: "172.20.0.0/24"}).Extract() + th.AssertNoErr(t, err) + t.Logf("Created Network: %v\n", n) + defer networks.Delete(client, n.ID) + networkID := n.ID + + // Create a virtual interface + vi, err := virtualinterfaces.Create(client, serverID, networkID).Extract() + th.AssertNoErr(t, err) + t.Logf("Created virtual interface: %+v\n", vi) + defer virtualinterfaces.Delete(client, serverID, vi.ID) + + // List virtual interfaces + pager := virtualinterfaces.List(client, serverID) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + virtualinterfacesList, err := virtualinterfaces.ExtractVirtualInterfaces(page) + th.AssertNoErr(t, err) + + for _, vi := range virtualinterfacesList { + t.Logf("Virtual Interface: ID [%s] MAC Address [%s] IP Addresses [%v]", + vi.ID, vi.MACAddress, vi.IPAddresses) + } + + return true, nil + }) + th.CheckNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/volumeattach_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/volumeattach_test.go new file mode 100644 index 000000000..9848e2eba --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/volumeattach_test.go @@ -0,0 +1,130 @@ +// +build acceptance compute servers + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack" + osVolumes "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + osVolumeAttach "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" + osServers "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/rackspace" + "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/rackspace/compute/v2/servers" + "github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newBlockClient(t *testing.T) (*gophercloud.ServiceClient, error) { + ao, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := rackspace.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + return openstack.NewBlockStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("RS_REGION_NAME"), + }) +} + +func createVAServer(t *testing.T, computeClient *gophercloud.ServiceClient, choices *serverOpts) (*osServers.Server, error) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s\n", name) + + pwd := tools.MakeNewPassword("") + + server, err := servers.Create(computeClient, osServers.CreateOpts{ + Name: name, + FlavorRef: choices.flavorID, + ImageRef: choices.imageID, + AdminPass: pwd, + }).Extract() + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + th.AssertEquals(t, pwd, server.AdminPass) + + return server, err +} + +func createVAVolume(t *testing.T, blockClient *gophercloud.ServiceClient) (*volumes.Volume, error) { + volume, err := volumes.Create(blockClient, &osVolumes.CreateOpts{ + Size: 80, + Name: "gophercloud-test-volume", + }).Extract() + th.AssertNoErr(t, err) + defer func() { + err = osVolumes.WaitForStatus(blockClient, volume.ID, "available", 60) + th.AssertNoErr(t, err) + }() + + return volume, err +} + +func createVolumeAttachment(t *testing.T, computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverID string, volumeID string) { + va, err := volumeattach.Create(computeClient, serverID, &osVolumeAttach.CreateOpts{ + VolumeID: volumeID, + }).Extract() + th.AssertNoErr(t, err) + defer func() { + err = osVolumes.WaitForStatus(blockClient, volumeID, "in-use", 60) + th.AssertNoErr(t, err) + err = volumeattach.Delete(computeClient, serverID, va.ID).ExtractErr() + th.AssertNoErr(t, err) + err = osVolumes.WaitForStatus(blockClient, volumeID, "available", 60) + th.AssertNoErr(t, err) + }() + t.Logf("Attached volume to server: %+v", va) +} + +func TestAttachVolume(t *testing.T) { + choices, err := optionsFromEnv() + if err != nil { + t.Fatal(err) + } + + computeClient, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + blockClient, err := newBlockClient(t) + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + server, err := createVAServer(t, computeClient, choices) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(computeClient, server.ID) + t.Logf("Server deleted.") + }() + + if err = osServers.WaitForStatus(computeClient, server.ID, "ACTIVE", 300); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + volume, err := createVAVolume(t, blockClient) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer func() { + err = volumes.Delete(blockClient, volume.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Volume deleted.") + }() + + createVolumeAttachment(t, computeClient, blockClient, server.ID, volume.ID) + +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/backup_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/backup_test.go new file mode 100644 index 000000000..522aaceb1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/backup_test.go @@ -0,0 +1,84 @@ +// +build acceptance db rackspace + +package v1 + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/pagination" + + "github.com/rackspace/gophercloud/rackspace/db/v1/backups" + "github.com/rackspace/gophercloud/rackspace/db/v1/instances" +) + +func (c *context) createBackup() { + opts := backups.CreateOpts{ + Name: tools.RandomString("backup_", 5), + InstanceID: c.instanceID, + } + + backup, err := backups.Create(c.client, opts).Extract() + + c.Logf("Created backup %#v", backup) + c.AssertNoErr(err) + + err = gophercloud.WaitFor(60, func() (bool, error) { + b, err := backups.Get(c.client, backup.ID).Extract() + if err != nil { + return false, err + } + if b.Status == "COMPLETED" { + return true, nil + } + return false, nil + }) + c.AssertNoErr(err) + + c.backupID = backup.ID +} + +func (c *context) getBackup() { + backup, err := backups.Get(c.client, c.backupID).Extract() + c.AssertNoErr(err) + c.Logf("Getting backup %s", backup.ID) +} + +func (c *context) listAllBackups() { + c.Logf("Listing backups") + + err := backups.List(c.client, nil).EachPage(func(page pagination.Page) (bool, error) { + backupList, err := backups.ExtractBackups(page) + c.AssertNoErr(err) + + for _, b := range backupList { + c.Logf("Backup: %#v", b) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c *context) listInstanceBackups() { + c.Logf("Listing backups for instance %s", c.instanceID) + + err := instances.ListBackups(c.client, c.instanceID).EachPage(func(page pagination.Page) (bool, error) { + backupList, err := backups.ExtractBackups(page) + c.AssertNoErr(err) + + for _, b := range backupList { + c.Logf("Backup: %#v", b) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c *context) deleteBackup() { + err := backups.Delete(c.client, c.backupID).ExtractErr() + c.AssertNoErr(err) + c.Logf("Deleted backup %s", c.backupID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/common.go new file mode 100644 index 000000000..24512b99a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/common.go @@ -0,0 +1,73 @@ +// +build acceptance db rackspace + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" + "github.com/rackspace/gophercloud/rackspace/db/v1/instances" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + opts, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + opts = tools.OnlyRS(opts) + + client, err := rackspace.AuthenticatedClient(opts) + th.AssertNoErr(t, err) + + c, err := rackspace.NewDBV1(client, gophercloud.EndpointOpts{ + Region: "IAD", + }) + th.AssertNoErr(t, err) + + return c +} + +type context struct { + test *testing.T + client *gophercloud.ServiceClient + instanceID string + DBIDs []string + replicaID string + backupID string + configGroupID string + users []string +} + +func newContext(t *testing.T) context { + return context{ + test: t, + client: newClient(t), + } +} + +func (c context) Logf(msg string, args ...interface{}) { + if len(args) > 0 { + c.test.Logf(msg, args...) + } else { + c.test.Log(msg) + } +} + +func (c context) AssertNoErr(err error) { + th.AssertNoErr(c.test, err) +} + +func (c context) WaitUntilActive(id string) { + err := gophercloud.WaitFor(60, func() (bool, error) { + inst, err := instances.Get(c.client, id).Extract() + if err != nil { + return false, err + } + if inst.Status == "ACTIVE" { + return true, nil + } + return false, nil + }) + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/config_group_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/config_group_test.go new file mode 100644 index 000000000..81bd40abc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/config_group_test.go @@ -0,0 +1,93 @@ +// +build acceptance db rackspace + +package v1 + +import ( + "github.com/rackspace/gophercloud/acceptance/tools" + os "github.com/rackspace/gophercloud/openstack/db/v1/configurations" + "github.com/rackspace/gophercloud/pagination" + config "github.com/rackspace/gophercloud/rackspace/db/v1/configurations" + "github.com/rackspace/gophercloud/rackspace/db/v1/instances" +) + +func (c *context) createConfigGrp() { + opts := os.CreateOpts{ + Name: tools.RandomString("config_", 5), + Values: map[string]interface{}{ + "connect_timeout": 300, + "join_buffer_size": 900000, + }, + } + + cg, err := config.Create(c.client, opts).Extract() + + c.AssertNoErr(err) + c.Logf("Created config group %#v", cg) + + c.configGroupID = cg.ID +} + +func (c *context) getConfigGrp() { + cg, err := config.Get(c.client, c.configGroupID).Extract() + c.Logf("Getting config group: %#v", cg) + c.AssertNoErr(err) +} + +func (c *context) updateConfigGrp() { + opts := os.UpdateOpts{ + Name: tools.RandomString("new_name_", 5), + Values: map[string]interface{}{ + "connect_timeout": 250, + }, + } + err := config.Update(c.client, c.configGroupID, opts).ExtractErr() + c.Logf("Updated config group %s", c.configGroupID) + c.AssertNoErr(err) +} + +func (c *context) replaceConfigGrp() { + opts := os.UpdateOpts{ + Values: map[string]interface{}{ + "big_tables": 1, + }, + } + + err := config.Replace(c.client, c.configGroupID, opts).ExtractErr() + c.Logf("Replaced values for config group %s", c.configGroupID) + c.AssertNoErr(err) +} + +func (c *context) associateInstanceWithConfigGrp() { + err := instances.AssociateWithConfigGroup(c.client, c.instanceID, c.configGroupID).ExtractErr() + c.Logf("Associated instance %s with config group %s", c.instanceID, c.configGroupID) + c.AssertNoErr(err) +} + +func (c *context) listConfigGrpInstances() { + c.Logf("Listing all instances associated with config group %s", c.configGroupID) + + err := config.ListInstances(c.client, c.configGroupID).EachPage(func(page pagination.Page) (bool, error) { + instanceList, err := instances.ExtractInstances(page) + c.AssertNoErr(err) + + for _, instance := range instanceList { + c.Logf("Instance: %#v", instance) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c *context) deleteConfigGrp() { + err := config.Delete(c.client, c.configGroupID).ExtractErr() + c.Logf("Deleted config group %s", c.configGroupID) + c.AssertNoErr(err) +} + +func (c *context) detachInstanceFromGrp() { + err := instances.DetachFromConfigGroup(c.client, c.instanceID).ExtractErr() + c.Logf("Detached instance %s from config groups", c.instanceID) + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/database_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/database_test.go new file mode 100644 index 000000000..d5c448f6e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/database_test.go @@ -0,0 +1,54 @@ +// +build acceptance db rackspace + +package v1 + +import ( + "github.com/rackspace/gophercloud/acceptance/tools" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" +) + +func (c *context) createDBs() { + dbs := []string{ + tools.RandomString("db_", 5), + tools.RandomString("db_", 5), + tools.RandomString("db_", 5), + } + + opts := db.BatchCreateOpts{ + db.CreateOpts{Name: dbs[0]}, + db.CreateOpts{Name: dbs[1]}, + db.CreateOpts{Name: dbs[2]}, + } + + err := db.Create(c.client, c.instanceID, opts).ExtractErr() + c.Logf("Created three databases on instance %s: %s, %s, %s", c.instanceID, dbs[0], dbs[1], dbs[2]) + c.AssertNoErr(err) + + c.DBIDs = dbs +} + +func (c *context) listDBs() { + c.Logf("Listing databases on instance %s", c.instanceID) + + err := db.List(c.client, c.instanceID).EachPage(func(page pagination.Page) (bool, error) { + dbList, err := db.ExtractDBs(page) + c.AssertNoErr(err) + + for _, db := range dbList { + c.Logf("DB: %#v", db) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c *context) deleteDBs() { + for _, id := range c.DBIDs { + err := db.Delete(c.client, c.instanceID, id).ExtractErr() + c.AssertNoErr(err) + c.Logf("Deleted DB %s", id) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/flavor_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/flavor_test.go new file mode 100644 index 000000000..0d6e6df61 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/flavor_test.go @@ -0,0 +1,32 @@ +// +build acceptance db rackspace + +package v1 + +import ( + os "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/db/v1/flavors" +) + +func (c context) listFlavors() { + c.Logf("Listing flavors") + + err := flavors.List(c.client).EachPage(func(page pagination.Page) (bool, error) { + flavorList, err := os.ExtractFlavors(page) + c.AssertNoErr(err) + + for _, f := range flavorList { + c.Logf("Flavor: ID [%s] Name [%s] RAM [%d]", f.ID, f.Name, f.RAM) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c context) getFlavor() { + flavor, err := flavors.Get(c.client, "1").Extract() + c.Logf("Getting flavor %s", flavor.ID) + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/instance_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/instance_test.go new file mode 100644 index 000000000..b5540e3bb --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/instance_test.go @@ -0,0 +1,169 @@ +// +build acceptance db rackspace + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/db/v1/instances" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestRunner(t *testing.T) { + c := newContext(t) + + // FLAVOR tests + c.listFlavors() + c.getFlavor() + + // INSTANCE tests + c.createInstance() + c.listInstances() + c.getInstance() + c.isRootEnabled() + c.enableRootUser() + c.isRootEnabled() + c.restartInstance() + c.resizeInstance() + c.resizeVol() + c.getDefaultConfig() + + // REPLICA tests + c.createReplica() + c.detachReplica() + + // BACKUP tests + c.createBackup() + c.getBackup() + c.listAllBackups() + c.listInstanceBackups() + c.deleteBackup() + + // CONFIG GROUP tests + c.createConfigGrp() + c.getConfigGrp() + c.updateConfigGrp() + c.replaceConfigGrp() + c.associateInstanceWithConfigGrp() + c.listConfigGrpInstances() + c.detachInstanceFromGrp() + c.deleteConfigGrp() + + // DATABASE tests + c.createDBs() + c.listDBs() + + // USER tests + c.createUsers() + c.listUsers() + c.changeUserPwd() + c.getUser() + c.updateUser() + c.listUserAccess() + c.revokeUserAccess() + c.grantUserAccess() + + // TEARDOWN + c.deleteUsers() + c.deleteDBs() + + c.restartInstance() + c.WaitUntilActive(c.instanceID) + + c.deleteInstance(c.replicaID) + c.deleteInstance(c.instanceID) +} + +func (c *context) createInstance() { + opts := instances.CreateOpts{ + FlavorRef: "1", + Size: 1, + Name: tools.RandomString("gopher_db", 5), + } + + instance, err := instances.Create(c.client, opts).Extract() + th.AssertNoErr(c.test, err) + + c.Logf("Creating %s. Waiting...", instance.ID) + c.WaitUntilActive(instance.ID) + c.Logf("Created instance %s", instance.ID) + + c.instanceID = instance.ID +} + +func (c *context) listInstances() { + c.Logf("Listing instances") + + err := instances.List(c.client, nil).EachPage(func(page pagination.Page) (bool, error) { + instanceList, err := instances.ExtractInstances(page) + c.AssertNoErr(err) + + for _, i := range instanceList { + c.Logf("Instance: ID [%s] Name [%s] Status [%s] VolSize [%d] Datastore Type [%s]", + i.ID, i.Name, i.Status, i.Volume.Size, i.Datastore.Type) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c *context) getInstance() { + instance, err := instances.Get(c.client, c.instanceID).Extract() + c.AssertNoErr(err) + c.Logf("Getting instance: %#v", instance) +} + +func (c *context) deleteInstance(id string) { + err := instances.Delete(c.client, id).ExtractErr() + c.AssertNoErr(err) + c.Logf("Deleted instance %s", id) +} + +func (c *context) enableRootUser() { + _, err := instances.EnableRootUser(c.client, c.instanceID).Extract() + c.AssertNoErr(err) + c.Logf("Enabled root user on %s", c.instanceID) +} + +func (c *context) isRootEnabled() { + enabled, err := instances.IsRootEnabled(c.client, c.instanceID) + c.AssertNoErr(err) + c.Logf("Is root enabled? %s", enabled) +} + +func (c *context) restartInstance() { + id := c.instanceID + err := instances.Restart(c.client, id).ExtractErr() + c.AssertNoErr(err) + c.Logf("Restarting %s. Waiting...", id) + c.WaitUntilActive(id) + c.Logf("Restarted %s", id) +} + +func (c *context) resizeInstance() { + id := c.instanceID + err := instances.Resize(c.client, id, "2").ExtractErr() + c.AssertNoErr(err) + c.Logf("Resizing %s. Waiting...", id) + c.WaitUntilActive(id) + c.Logf("Resized %s with flavorRef %s", id, "2") +} + +func (c *context) resizeVol() { + id := c.instanceID + err := instances.ResizeVolume(c.client, id, 2).ExtractErr() + c.AssertNoErr(err) + c.Logf("Resizing volume of %s. Waiting...", id) + c.WaitUntilActive(id) + c.Logf("Resized the volume of %s to %d GB", id, 2) +} + +func (c *context) getDefaultConfig() { + config, err := instances.GetDefaultConfig(c.client, c.instanceID).Extract() + c.Logf("Default config group for instance %s: %#v", c.instanceID, config) + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/pkg.go new file mode 100644 index 000000000..b7b1f993d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/pkg.go @@ -0,0 +1 @@ +package v1 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/replica_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/replica_test.go new file mode 100644 index 000000000..89edf9d14 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/replica_test.go @@ -0,0 +1,33 @@ +// +build acceptance db rackspace + +package v1 + +import ( + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace/db/v1/instances" + th "github.com/rackspace/gophercloud/testhelper" +) + +func (c *context) createReplica() { + opts := instances.CreateOpts{ + FlavorRef: "2", + Size: 1, + Name: tools.RandomString("gopher_db", 5), + ReplicaOf: c.instanceID, + } + + repl, err := instances.Create(c.client, opts).Extract() + th.AssertNoErr(c.test, err) + + c.Logf("Creating replica of %s. Waiting...", c.instanceID) + c.WaitUntilActive(repl.ID) + c.Logf("Created replica %#v", repl) + + c.replicaID = repl.ID +} + +func (c *context) detachReplica() { + err := instances.DetachReplica(c.client, c.replicaID).ExtractErr() + c.Logf("Detached replica %s", c.replicaID) + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/user_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/user_test.go new file mode 100644 index 000000000..0488f5dd1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/db/v1/user_test.go @@ -0,0 +1,125 @@ +// +build acceptance db rackspace + +package v1 + +import ( + "github.com/rackspace/gophercloud/acceptance/tools" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + os "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/db/v1/users" +) + +func (c *context) createUsers() { + c.users = []string{ + tools.RandomString("user_", 5), + tools.RandomString("user_", 5), + tools.RandomString("user_", 5), + } + + db1 := db.CreateOpts{Name: c.DBIDs[0]} + db2 := db.CreateOpts{Name: c.DBIDs[1]} + db3 := db.CreateOpts{Name: c.DBIDs[2]} + + opts := os.BatchCreateOpts{ + os.CreateOpts{ + Name: c.users[0], + Password: tools.RandomString("db_", 5), + Databases: db.BatchCreateOpts{db1, db2, db3}, + }, + os.CreateOpts{ + Name: c.users[1], + Password: tools.RandomString("db_", 5), + Databases: db.BatchCreateOpts{db1, db2}, + }, + os.CreateOpts{ + Name: c.users[2], + Password: tools.RandomString("db_", 5), + Databases: db.BatchCreateOpts{db3}, + }, + } + + err := users.Create(c.client, c.instanceID, opts).ExtractErr() + c.Logf("Created three users on instance %s: %s, %s, %s", c.instanceID, c.users[0], c.users[1], c.users[2]) + c.AssertNoErr(err) +} + +func (c *context) listUsers() { + c.Logf("Listing users on instance %s", c.instanceID) + + err := os.List(c.client, c.instanceID).EachPage(func(page pagination.Page) (bool, error) { + uList, err := os.ExtractUsers(page) + c.AssertNoErr(err) + + for _, u := range uList { + c.Logf("User: %#v", u) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c *context) deleteUsers() { + for _, id := range c.users { + err := users.Delete(c.client, c.instanceID, id).ExtractErr() + c.AssertNoErr(err) + c.Logf("Deleted user %s", id) + } +} + +func (c *context) changeUserPwd() { + opts := os.BatchCreateOpts{} + + for _, name := range c.users[:1] { + opts = append(opts, os.CreateOpts{Name: name, Password: tools.RandomString("", 5)}) + } + + err := users.ChangePassword(c.client, c.instanceID, opts).ExtractErr() + c.Logf("Updated 2 users' passwords") + c.AssertNoErr(err) +} + +func (c *context) getUser() { + user, err := users.Get(c.client, c.instanceID, c.users[0]).Extract() + c.Logf("Getting user %s", user) + c.AssertNoErr(err) +} + +func (c *context) updateUser() { + opts := users.UpdateOpts{Name: tools.RandomString("new_name_", 5)} + err := users.Update(c.client, c.instanceID, c.users[0], opts).ExtractErr() + c.Logf("Updated user %s", c.users[0]) + c.AssertNoErr(err) + c.users[0] = opts.Name +} + +func (c *context) listUserAccess() { + err := users.ListAccess(c.client, c.instanceID, c.users[0]).EachPage(func(page pagination.Page) (bool, error) { + dbList, err := users.ExtractDBs(page) + c.AssertNoErr(err) + + for _, db := range dbList { + c.Logf("User %s has access to DB: %#v", db) + } + + return true, nil + }) + + c.AssertNoErr(err) +} + +func (c *context) grantUserAccess() { + opts := db.BatchCreateOpts{db.CreateOpts{Name: c.DBIDs[0]}} + err := users.GrantAccess(c.client, c.instanceID, c.users[0], opts).ExtractErr() + c.Logf("Granted access for user %s to DB %s", c.users[0], c.DBIDs[0]) + c.AssertNoErr(err) +} + +func (c *context) revokeUserAccess() { + dbName, userName := c.DBIDs[0], c.users[0] + err := users.RevokeAccess(c.client, c.instanceID, userName, dbName).ExtractErr() + c.Logf("Revoked access for user %s to DB %s", userName, dbName) + c.AssertNoErr(err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go new file mode 100644 index 000000000..a50e01552 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go @@ -0,0 +1,54 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + extensions2 "github.com/rackspace/gophercloud/rackspace/identity/v2/extensions" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestExtensions(t *testing.T) { + service := authenticatedClient(t) + + t.Logf("Extensions available on this identity endpoint:") + count := 0 + var chosen string + err := extensions2.List(service).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + extensions, err := extensions2.ExtractExtensions(page) + th.AssertNoErr(t, err) + + for i, ext := range extensions { + if chosen == "" { + chosen = ext.Alias + } + + t.Logf("[%02d] name=[%s] namespace=[%s]", i, ext.Name, ext.Namespace) + t.Logf(" alias=[%s] updated=[%s]", ext.Alias, ext.Updated) + t.Logf(" description=[%s]", ext.Description) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + + if chosen == "" { + t.Logf("No extensions found.") + return + } + + ext, err := extensions2.Get(service, chosen).Extract() + th.AssertNoErr(t, err) + + t.Logf("Detail for extension [%s]:", chosen) + t.Logf(" name=[%s]", ext.Name) + t.Logf(" namespace=[%s]", ext.Namespace) + t.Logf(" alias=[%s]", ext.Alias) + t.Logf(" updated=[%s]", ext.Updated) + t.Logf(" description=[%s]", ext.Description) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go new file mode 100644 index 000000000..1182982f4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go @@ -0,0 +1,50 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func rackspaceAuthOptions(t *testing.T) gophercloud.AuthOptions { + // Obtain credentials from the environment. + options, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + options = tools.OnlyRS(options) + + if options.Username == "" { + t.Fatal("Please provide a Rackspace username as RS_USERNAME.") + } + if options.APIKey == "" { + t.Fatal("Please provide a Rackspace API key as RS_API_KEY.") + } + + return options +} + +func createClient(t *testing.T, auth bool) *gophercloud.ServiceClient { + ao := rackspaceAuthOptions(t) + + provider, err := rackspace.NewClient(ao.IdentityEndpoint) + th.AssertNoErr(t, err) + + if auth { + err = rackspace.Authenticate(provider, ao) + th.AssertNoErr(t, err) + } + + return rackspace.NewIdentityV2(provider) +} + +func unauthenticatedClient(t *testing.T) *gophercloud.ServiceClient { + return createClient(t, false) +} + +func authenticatedClient(t *testing.T) *gophercloud.ServiceClient { + return createClient(t, true) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/pkg.go new file mode 100644 index 000000000..5ec3cc8e8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/pkg.go @@ -0,0 +1 @@ +package v2 diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/role_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/role_test.go new file mode 100644 index 000000000..efaeb75cd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/role_test.go @@ -0,0 +1,59 @@ +// +build acceptance identity roles + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/identity/v2/roles" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestRoles(t *testing.T) { + client := authenticatedClient(t) + + userID := createUser(t, client) + roleID := listRoles(t, client) + + addUserRole(t, client, userID, roleID) + + deleteUserRole(t, client, userID, roleID) + + deleteUser(t, client, userID) +} + +func listRoles(t *testing.T, client *gophercloud.ServiceClient) string { + var roleID string + + err := roles.List(client).EachPage(func(page pagination.Page) (bool, error) { + roleList, err := os.ExtractRoles(page) + th.AssertNoErr(t, err) + + for _, role := range roleList { + t.Logf("Listing role: ID [%s] Name [%s]", role.ID, role.Name) + roleID = role.ID + } + + return true, nil + }) + + th.AssertNoErr(t, err) + + return roleID +} + +func addUserRole(t *testing.T, client *gophercloud.ServiceClient, userID, roleID string) { + err := roles.AddUserRole(client, userID, roleID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Added role %s to user %s", roleID, userID) +} + +func deleteUserRole(t *testing.T, client *gophercloud.ServiceClient, userID, roleID string) { + err := roles.DeleteUserRole(client, userID, roleID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Removed role %s from user %s", roleID, userID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go new file mode 100644 index 000000000..6081a498e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go @@ -0,0 +1,37 @@ +// +build acceptance + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + rstenants "github.com/rackspace/gophercloud/rackspace/identity/v2/tenants" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestTenants(t *testing.T) { + service := authenticatedClient(t) + + t.Logf("Tenants available to the currently issued token:") + count := 0 + err := rstenants.List(service, nil).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + tenants, err := rstenants.ExtractTenants(page) + th.AssertNoErr(t, err) + + for i, tenant := range tenants { + t.Logf("[%02d] id=[%s]", i, tenant.ID) + t.Logf(" name=[%s] enabled=[%v]", i, tenant.Name, tenant.Enabled) + t.Logf(" description=[%s]", tenant.Description) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + if count == 0 { + t.Errorf("No tenants listed for your current token.") + } +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tokens_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tokens_test.go new file mode 100644 index 000000000..95ee7e660 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tokens_test.go @@ -0,0 +1,61 @@ +// +build acceptance + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" + "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens" + th "github.com/rackspace/gophercloud/testhelper" +) + +func rackspaceAuthOptions(t *testing.T) gophercloud.AuthOptions { + // Obtain credentials from the environment. + options, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + options = tools.OnlyRS(options) + + if options.Username == "" { + t.Fatal("Please provide a Rackspace username as RS_USERNAME.") + } + if options.APIKey == "" { + t.Fatal("Please provide a Rackspace API key as RS_API_KEY.") + } + + return options +} + +func createClient(t *testing.T, auth bool) *gophercloud.ServiceClient { + ao := rackspaceAuthOptions(t) + + provider, err := rackspace.NewClient(ao.IdentityEndpoint) + th.AssertNoErr(t, err) + + if auth { + err = rackspace.Authenticate(provider, ao) + th.AssertNoErr(t, err) + } + + return rackspace.NewIdentityV2(provider) +} + +func TestTokenAuth(t *testing.T) { + authedClient := createClient(t, true) + token := authedClient.TokenID + + tenantID := os.Getenv("RS_TENANT_ID") + if tenantID == "" { + t.Skip("You must set RS_TENANT_ID environment variable to run this test") + } + + authOpts := tokens.AuthOptions{} + authOpts.TenantID = tenantID + authOpts.Token = token + + _, err := tokens.Create(authedClient, authOpts).ExtractToken() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/user_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/user_test.go new file mode 100644 index 000000000..28c0c832b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/user_test.go @@ -0,0 +1,93 @@ +// +build acceptance identity + +package v2 + +import ( + "strconv" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + os "github.com/rackspace/gophercloud/openstack/identity/v2/users" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/identity/v2/users" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestUsers(t *testing.T) { + client := authenticatedClient(t) + + userID := createUser(t, client) + + listUsers(t, client) + + getUser(t, client, userID) + + updateUser(t, client, userID) + + resetApiKey(t, client, userID) + + deleteUser(t, client, userID) +} + +func createUser(t *testing.T, client *gophercloud.ServiceClient) string { + t.Log("Creating user") + + opts := users.CreateOpts{ + Username: tools.RandomString("user_", 5), + Enabled: os.Disabled, + Email: "new_user@foo.com", + } + + user, err := users.Create(client, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created user %s", user.ID) + + return user.ID +} + +func listUsers(t *testing.T, client *gophercloud.ServiceClient) { + err := users.List(client).EachPage(func(page pagination.Page) (bool, error) { + userList, err := os.ExtractUsers(page) + th.AssertNoErr(t, err) + + for _, user := range userList { + t.Logf("Listing user: ID [%s] Username [%s] Email [%s] Enabled? [%s]", + user.ID, user.Username, user.Email, strconv.FormatBool(user.Enabled)) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getUser(t *testing.T, client *gophercloud.ServiceClient, userID string) { + _, err := users.Get(client, userID).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting user %s", userID) +} + +func updateUser(t *testing.T, client *gophercloud.ServiceClient, userID string) { + opts := users.UpdateOpts{Username: tools.RandomString("new_name", 5), Email: "new@foo.com"} + user, err := users.Update(client, userID, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Updated user %s: Username [%s] Email [%s]", userID, user.Username, user.Email) +} + +func deleteUser(t *testing.T, client *gophercloud.ServiceClient, userID string) { + res := users.Delete(client, userID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted user %s", userID) +} + +func resetApiKey(t *testing.T, client *gophercloud.ServiceClient, userID string) { + key, err := users.ResetAPIKey(client, userID).Extract() + th.AssertNoErr(t, err) + + if key.APIKey == "" { + t.Fatal("failed to reset API key for user") + } + + t.Logf("Reset API key for user %s to %s", key.Username, key.APIKey) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/acl_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/acl_test.go new file mode 100644 index 000000000..7a380273c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/acl_test.go @@ -0,0 +1,94 @@ +// +build acceptance lbs + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/lb/v1/acl" + "github.com/rackspace/gophercloud/rackspace/lb/v1/lbs" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestACL(t *testing.T) { + client := setup(t) + + ids := createLB(t, client, 1) + lbID := ids[0] + + createACL(t, client, lbID) + + waitForLB(client, lbID, lbs.ACTIVE) + + networkIDs := showACL(t, client, lbID) + + deleteNetworkItem(t, client, lbID, networkIDs[0]) + waitForLB(client, lbID, lbs.ACTIVE) + + bulkDeleteACL(t, client, lbID, networkIDs[1:2]) + waitForLB(client, lbID, lbs.ACTIVE) + + deleteACL(t, client, lbID) + waitForLB(client, lbID, lbs.ACTIVE) + + deleteLB(t, client, lbID) +} + +func createACL(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + opts := acl.CreateOpts{ + acl.CreateOpt{Address: "206.160.163.21", Type: acl.DENY}, + acl.CreateOpt{Address: "206.160.165.11", Type: acl.DENY}, + acl.CreateOpt{Address: "206.160.165.12", Type: acl.DENY}, + acl.CreateOpt{Address: "206.160.165.13", Type: acl.ALLOW}, + } + + err := acl.Create(client, lbID, opts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Created ACL items for LB %d", lbID) +} + +func showACL(t *testing.T, client *gophercloud.ServiceClient, lbID int) []int { + ids := []int{} + + err := acl.List(client, lbID).EachPage(func(page pagination.Page) (bool, error) { + accessList, err := acl.ExtractAccessList(page) + th.AssertNoErr(t, err) + + for _, i := range accessList { + t.Logf("Listing network item: ID [%s] Address [%s] Type [%s]", i.ID, i.Address, i.Type) + ids = append(ids, i.ID) + } + + return true, nil + }) + th.AssertNoErr(t, err) + + return ids +} + +func deleteNetworkItem(t *testing.T, client *gophercloud.ServiceClient, lbID, itemID int) { + err := acl.Delete(client, lbID, itemID).ExtractErr() + + th.AssertNoErr(t, err) + + t.Logf("Deleted network item %d", itemID) +} + +func bulkDeleteACL(t *testing.T, client *gophercloud.ServiceClient, lbID int, items []int) { + err := acl.BulkDelete(client, lbID, items).ExtractErr() + + th.AssertNoErr(t, err) + + t.Logf("Deleted network items %s", intsToStr(items)) +} + +func deleteACL(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + err := acl.DeleteAll(client, lbID).ExtractErr() + + th.AssertNoErr(t, err) + + t.Logf("Deleted ACL from LB %d", lbID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/common.go new file mode 100644 index 000000000..4ce05e69c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/common.go @@ -0,0 +1,62 @@ +// +build acceptance lbs + +package v1 + +import ( + "os" + "strconv" + "strings" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newProvider() (*gophercloud.ProviderClient, error) { + opts, err := rackspace.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + opts = tools.OnlyRS(opts) + + return rackspace.AuthenticatedClient(opts) +} + +func newClient() (*gophercloud.ServiceClient, error) { + provider, err := newProvider() + if err != nil { + return nil, err + } + + return rackspace.NewLBV1(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("RS_REGION"), + }) +} + +func newComputeClient() (*gophercloud.ServiceClient, error) { + provider, err := newProvider() + if err != nil { + return nil, err + } + + return rackspace.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("RS_REGION"), + }) +} + +func setup(t *testing.T) *gophercloud.ServiceClient { + client, err := newClient() + th.AssertNoErr(t, err) + + return client +} + +func intsToStr(ids []int) string { + strIDs := []string{} + for _, id := range ids { + strIDs = append(strIDs, strconv.Itoa(id)) + } + return strings.Join(strIDs, ", ") +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/lb_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/lb_test.go new file mode 100644 index 000000000..c67ddecad --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/lb_test.go @@ -0,0 +1,214 @@ +// +build acceptance lbs + +package v1 + +import ( + "strconv" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/lb/v1/lbs" + "github.com/rackspace/gophercloud/rackspace/lb/v1/vips" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestLBs(t *testing.T) { + client := setup(t) + + ids := createLB(t, client, 3) + id := ids[0] + + listLBProtocols(t, client) + + listLBAlgorithms(t, client) + + listLBs(t, client) + + getLB(t, client, id) + + checkLBLogging(t, client, id) + + checkErrorPage(t, client, id) + + getStats(t, client, id) + + updateLB(t, client, id) + + deleteLB(t, client, id) + + batchDeleteLBs(t, client, ids[1:]) +} + +func createLB(t *testing.T, client *gophercloud.ServiceClient, count int) []int { + ids := []int{} + + for i := 0; i < count; i++ { + opts := lbs.CreateOpts{ + Name: tools.RandomString("test_", 5), + Port: 80, + Protocol: "HTTP", + VIPs: []vips.VIP{ + vips.VIP{Type: vips.PUBLIC}, + }, + } + + lb, err := lbs.Create(client, opts).Extract() + th.AssertNoErr(t, err) + + t.Logf("Created LB %d - waiting for it to build...", lb.ID) + waitForLB(client, lb.ID, lbs.ACTIVE) + t.Logf("LB %d has reached ACTIVE state", lb.ID) + + ids = append(ids, lb.ID) + } + + return ids +} + +func waitForLB(client *gophercloud.ServiceClient, id int, state lbs.Status) { + gophercloud.WaitFor(60, func() (bool, error) { + lb, err := lbs.Get(client, id).Extract() + if err != nil { + return false, err + } + if lb.Status != state { + return false, nil + } + return true, nil + }) +} + +func listLBProtocols(t *testing.T, client *gophercloud.ServiceClient) { + err := lbs.ListProtocols(client).EachPage(func(page pagination.Page) (bool, error) { + pList, err := lbs.ExtractProtocols(page) + th.AssertNoErr(t, err) + + for _, p := range pList { + t.Logf("Listing protocol: Name [%s]", p.Name) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func listLBAlgorithms(t *testing.T, client *gophercloud.ServiceClient) { + err := lbs.ListAlgorithms(client).EachPage(func(page pagination.Page) (bool, error) { + aList, err := lbs.ExtractAlgorithms(page) + th.AssertNoErr(t, err) + + for _, a := range aList { + t.Logf("Listing algorithm: Name [%s]", a.Name) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func listLBs(t *testing.T, client *gophercloud.ServiceClient) { + err := lbs.List(client, lbs.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + lbList, err := lbs.ExtractLBs(page) + th.AssertNoErr(t, err) + + for _, lb := range lbList { + t.Logf("Listing LB: ID [%d] Name [%s] Protocol [%s] Status [%s] Node count [%d] Port [%d]", + lb.ID, lb.Name, lb.Protocol, lb.Status, lb.NodeCount, lb.Port) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getLB(t *testing.T, client *gophercloud.ServiceClient, id int) { + lb, err := lbs.Get(client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting LB %d: Created [%s] VIPs [%#v] Logging [%#v] Persistence [%#v] SourceAddrs [%#v]", + lb.ID, lb.Created, lb.VIPs, lb.ConnectionLogging, lb.SessionPersistence, lb.SourceAddrs) +} + +func updateLB(t *testing.T, client *gophercloud.ServiceClient, id int) { + opts := lbs.UpdateOpts{ + Name: tools.RandomString("new_", 5), + Protocol: "TCP", + HalfClosed: gophercloud.Enabled, + Algorithm: "RANDOM", + Port: 8080, + Timeout: 100, + HTTPSRedirect: gophercloud.Disabled, + } + + err := lbs.Update(client, id, opts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Updating LB %d - waiting for it to finish", id) + waitForLB(client, id, lbs.ACTIVE) + t.Logf("LB %d has reached ACTIVE state", id) +} + +func deleteLB(t *testing.T, client *gophercloud.ServiceClient, id int) { + err := lbs.Delete(client, id).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted LB %d", id) +} + +func batchDeleteLBs(t *testing.T, client *gophercloud.ServiceClient, ids []int) { + err := lbs.BulkDelete(client, ids).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted LB %s", intsToStr(ids)) +} + +func checkLBLogging(t *testing.T, client *gophercloud.ServiceClient, id int) { + err := lbs.EnableLogging(client, id).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Enabled logging for LB %d", id) + + res, err := lbs.IsLoggingEnabled(client, id) + th.AssertNoErr(t, err) + t.Logf("LB %d log enabled? %s", id, strconv.FormatBool(res)) + + waitForLB(client, id, lbs.ACTIVE) + + err = lbs.DisableLogging(client, id).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Disabled logging for LB %d", id) +} + +func checkErrorPage(t *testing.T, client *gophercloud.ServiceClient, id int) { + content, err := lbs.SetErrorPage(client, id, "New content!").Extract() + t.Logf("Set error page for LB %d", id) + + content, err = lbs.GetErrorPage(client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Error page for LB %d: %s", id, content) + + err = lbs.DeleteErrorPage(client, id).ExtractErr() + t.Logf("Deleted error page for LB %d", id) +} + +func getStats(t *testing.T, client *gophercloud.ServiceClient, id int) { + waitForLB(client, id, lbs.ACTIVE) + + stats, err := lbs.GetStats(client, id).Extract() + th.AssertNoErr(t, err) + + t.Logf("Stats for LB %d: %#v", id, stats) +} + +func checkCaching(t *testing.T, client *gophercloud.ServiceClient, id int) { + err := lbs.EnableCaching(client, id).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Enabled caching for LB %d", id) + + res, err := lbs.IsContentCached(client, id) + th.AssertNoErr(t, err) + t.Logf("Is caching enabled for LB? %s", strconv.FormatBool(res)) + + err = lbs.DisableCaching(client, id).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Disabled caching for LB %d", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/monitor_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/monitor_test.go new file mode 100644 index 000000000..c1a8e24dd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/monitor_test.go @@ -0,0 +1,60 @@ +// +build acceptance lbs + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/lb/v1/lbs" + "github.com/rackspace/gophercloud/rackspace/lb/v1/monitors" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestMonitors(t *testing.T) { + client := setup(t) + + ids := createLB(t, client, 1) + lbID := ids[0] + + getMonitor(t, client, lbID) + + updateMonitor(t, client, lbID) + + deleteMonitor(t, client, lbID) + + deleteLB(t, client, lbID) +} + +func getMonitor(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + hm, err := monitors.Get(client, lbID).Extract() + th.AssertNoErr(t, err) + t.Logf("Health monitor for LB %d: Type [%s] Delay [%d] Timeout [%d] AttemptLimit [%d]", + lbID, hm.Type, hm.Delay, hm.Timeout, hm.AttemptLimit) +} + +func updateMonitor(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + opts := monitors.UpdateHTTPMonitorOpts{ + AttemptLimit: 3, + Delay: 10, + Timeout: 10, + BodyRegex: "hello is it me you're looking for", + Path: "/foo", + StatusRegex: "200", + Type: monitors.HTTP, + } + + err := monitors.Update(client, lbID, opts).ExtractErr() + th.AssertNoErr(t, err) + + waitForLB(client, lbID, lbs.ACTIVE) + t.Logf("Updated monitor for LB %d", lbID) +} + +func deleteMonitor(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + err := monitors.Delete(client, lbID).ExtractErr() + th.AssertNoErr(t, err) + + waitForLB(client, lbID, lbs.ACTIVE) + t.Logf("Deleted monitor for LB %d", lbID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/node_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/node_test.go new file mode 100644 index 000000000..18b9fe71c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/node_test.go @@ -0,0 +1,175 @@ +// +build acceptance lbs + +package v1 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/compute/v2/servers" + "github.com/rackspace/gophercloud/rackspace/lb/v1/lbs" + "github.com/rackspace/gophercloud/rackspace/lb/v1/nodes" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestNodes(t *testing.T) { + client := setup(t) + + serverIP := findServer(t) + ids := createLB(t, client, 1) + lbID := ids[0] + + nodeID := addNodes(t, client, lbID, serverIP) + + listNodes(t, client, lbID) + + getNode(t, client, lbID, nodeID) + + updateNode(t, client, lbID, nodeID) + + listEvents(t, client, lbID) + + deleteNode(t, client, lbID, nodeID) + + waitForLB(client, lbID, lbs.ACTIVE) + deleteLB(t, client, lbID) +} + +func findServer(t *testing.T) string { + var serverIP string + + client, err := newComputeClient() + th.AssertNoErr(t, err) + + err = servers.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + sList, err := servers.ExtractServers(page) + th.AssertNoErr(t, err) + + for _, s := range sList { + serverIP = s.AccessIPv4 + t.Logf("Found an existing server: ID [%s] Public IP [%s]", s.ID, serverIP) + break + } + + return true, nil + }) + th.AssertNoErr(t, err) + + if serverIP == "" { + t.Log("No server found, creating one") + + imageRef := os.Getenv("RS_IMAGE_ID") + if imageRef == "" { + t.Fatalf("OS var RS_IMAGE_ID undefined") + } + flavorRef := os.Getenv("RS_FLAVOR_ID") + if flavorRef == "" { + t.Fatalf("OS var RS_FLAVOR_ID undefined") + } + + opts := &servers.CreateOpts{ + Name: tools.RandomString("lb_test_", 5), + ImageRef: imageRef, + FlavorRef: flavorRef, + DiskConfig: diskconfig.Manual, + } + + s, err := servers.Create(client, opts).Extract() + th.AssertNoErr(t, err) + serverIP = s.AccessIPv4 + + t.Logf("Created server %s, waiting for it to build", s.ID) + err = servers.WaitForStatus(client, s.ID, "ACTIVE", 300) + th.AssertNoErr(t, err) + t.Logf("Server created successfully.") + } + + return serverIP +} + +func addNodes(t *testing.T, client *gophercloud.ServiceClient, lbID int, serverIP string) int { + opts := nodes.CreateOpts{ + nodes.CreateOpt{ + Address: serverIP, + Port: 80, + Condition: nodes.ENABLED, + Type: nodes.PRIMARY, + }, + } + + page := nodes.Create(client, lbID, opts) + + nodeList, err := page.ExtractNodes() + th.AssertNoErr(t, err) + + var nodeID int + for _, n := range nodeList { + nodeID = n.ID + } + if nodeID == 0 { + t.Fatalf("nodeID could not be extracted from create response") + } + + t.Logf("Added node %d to LB %d", nodeID, lbID) + waitForLB(client, lbID, lbs.ACTIVE) + + return nodeID +} + +func listNodes(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + err := nodes.List(client, lbID, nil).EachPage(func(page pagination.Page) (bool, error) { + nodeList, err := nodes.ExtractNodes(page) + th.AssertNoErr(t, err) + + for _, n := range nodeList { + t.Logf("Listing node: ID [%d] Address [%s:%d] Status [%s]", n.ID, n.Address, n.Port, n.Status) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func getNode(t *testing.T, client *gophercloud.ServiceClient, lbID int, nodeID int) { + node, err := nodes.Get(client, lbID, nodeID).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting node %d: Type [%s] Weight [%d]", nodeID, node.Type, node.Weight) +} + +func updateNode(t *testing.T, client *gophercloud.ServiceClient, lbID int, nodeID int) { + opts := nodes.UpdateOpts{ + Weight: gophercloud.IntToPointer(10), + Condition: nodes.DRAINING, + Type: nodes.SECONDARY, + } + err := nodes.Update(client, lbID, nodeID, opts).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Updated node %d", nodeID) + waitForLB(client, lbID, lbs.ACTIVE) +} + +func listEvents(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + pager := nodes.ListEvents(client, lbID, nodes.ListEventsOpts{}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + eventList, err := nodes.ExtractNodeEvents(page) + th.AssertNoErr(t, err) + + for _, e := range eventList { + t.Logf("Listing events for node %d: Type [%s] Msg [%s] Severity [%s] Date [%s]", + e.NodeID, e.Type, e.DetailedMessage, e.Severity, e.Created) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func deleteNode(t *testing.T, client *gophercloud.ServiceClient, lbID int, nodeID int) { + err := nodes.Delete(client, lbID, nodeID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted node %d", nodeID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/session_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/session_test.go new file mode 100644 index 000000000..8d85655f6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/session_test.go @@ -0,0 +1,47 @@ +// +build acceptance lbs + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/lb/v1/sessions" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSession(t *testing.T) { + client := setup(t) + + ids := createLB(t, client, 1) + lbID := ids[0] + + getSession(t, client, lbID) + + enableSession(t, client, lbID) + waitForLB(client, lbID, "ACTIVE") + + disableSession(t, client, lbID) + waitForLB(client, lbID, "ACTIVE") + + deleteLB(t, client, lbID) +} + +func getSession(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + sp, err := sessions.Get(client, lbID).Extract() + th.AssertNoErr(t, err) + t.Logf("Session config: Type [%s]", sp.Type) +} + +func enableSession(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + opts := sessions.CreateOpts{Type: sessions.HTTPCOOKIE} + err := sessions.Enable(client, lbID, opts).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Enable %s sessions for %d", opts.Type, lbID) +} + +func disableSession(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + err := sessions.Disable(client, lbID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Disable sessions for %d", lbID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/throttle_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/throttle_test.go new file mode 100644 index 000000000..1cc12356c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/throttle_test.go @@ -0,0 +1,53 @@ +// +build acceptance lbs + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/lb/v1/throttle" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestThrottle(t *testing.T) { + client := setup(t) + + ids := createLB(t, client, 1) + lbID := ids[0] + + getThrottleConfig(t, client, lbID) + + createThrottleConfig(t, client, lbID) + waitForLB(client, lbID, "ACTIVE") + + deleteThrottleConfig(t, client, lbID) + waitForLB(client, lbID, "ACTIVE") + + deleteLB(t, client, lbID) +} + +func getThrottleConfig(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + sp, err := throttle.Get(client, lbID).Extract() + th.AssertNoErr(t, err) + t.Logf("Throttle config: MaxConns [%s]", sp.MaxConnections) +} + +func createThrottleConfig(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + opts := throttle.CreateOpts{ + MaxConnections: 200, + MaxConnectionRate: 100, + MinConnections: 0, + RateInterval: 10, + } + + err := throttle.Create(client, lbID, opts).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Enable throttling for %d", lbID) +} + +func deleteThrottleConfig(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + err := throttle.Delete(client, lbID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Disable throttling for %d", lbID) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/vip_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/vip_test.go new file mode 100644 index 000000000..bc0c2a89f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/lb/v1/vip_test.go @@ -0,0 +1,83 @@ +// +build acceptance lbs + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/lb/v1/lbs" + "github.com/rackspace/gophercloud/rackspace/lb/v1/vips" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestVIPs(t *testing.T) { + client := setup(t) + + ids := createLB(t, client, 1) + lbID := ids[0] + + listVIPs(t, client, lbID) + + vipIDs := addVIPs(t, client, lbID, 3) + + deleteVIP(t, client, lbID, vipIDs[0]) + + bulkDeleteVIPs(t, client, lbID, vipIDs[1:]) + + waitForLB(client, lbID, lbs.ACTIVE) + deleteLB(t, client, lbID) +} + +func listVIPs(t *testing.T, client *gophercloud.ServiceClient, lbID int) { + err := vips.List(client, lbID).EachPage(func(page pagination.Page) (bool, error) { + vipList, err := vips.ExtractVIPs(page) + th.AssertNoErr(t, err) + + for _, vip := range vipList { + t.Logf("Listing VIP: ID [%s] Address [%s] Type [%s] Version [%s]", + vip.ID, vip.Address, vip.Type, vip.Version) + } + + return true, nil + }) + th.AssertNoErr(t, err) +} + +func addVIPs(t *testing.T, client *gophercloud.ServiceClient, lbID, count int) []int { + ids := []int{} + + for i := 0; i < count; i++ { + opts := vips.CreateOpts{ + Type: vips.PUBLIC, + Version: vips.IPV6, + } + + vip, err := vips.Create(client, lbID, opts).Extract() + th.AssertNoErr(t, err) + + t.Logf("Created VIP %d", vip.ID) + + waitForLB(client, lbID, lbs.ACTIVE) + + ids = append(ids, vip.ID) + } + + return ids +} + +func deleteVIP(t *testing.T, client *gophercloud.ServiceClient, lbID, vipID int) { + err := vips.Delete(client, lbID, vipID).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Deleted VIP %d", vipID) + + waitForLB(client, lbID, lbs.ACTIVE) +} + +func bulkDeleteVIPs(t *testing.T, client *gophercloud.ServiceClient, lbID int, ids []int) { + err := vips.BulkDelete(client, lbID, ids).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted VIPs %s", intsToStr(ids)) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/common.go new file mode 100644 index 000000000..81704187f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/common.go @@ -0,0 +1,39 @@ +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +var Client *gophercloud.ServiceClient + +func NewClient() (*gophercloud.ServiceClient, error) { + opts, err := rackspace.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + provider, err := rackspace.AuthenticatedClient(opts) + if err != nil { + return nil, err + } + + return rackspace.NewNetworkV2(provider, gophercloud.EndpointOpts{ + Name: "cloudNetworks", + Region: os.Getenv("RS_REGION"), + }) +} + +func Setup(t *testing.T) { + client, err := NewClient() + th.AssertNoErr(t, err) + Client = client +} + +func Teardown() { + Client = nil +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/network_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/network_test.go new file mode 100644 index 000000000..3862123bf --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/network_test.go @@ -0,0 +1,65 @@ +// +build acceptance networking + +package v2 + +import ( + "strconv" + "testing" + + os "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/networking/v2/networks" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestNetworkCRUDOperations(t *testing.T) { + Setup(t) + defer Teardown() + + // Create a network + n, err := networks.Create(Client, os.CreateOpts{Name: "sample_network", AdminStateUp: os.Up}).Extract() + th.AssertNoErr(t, err) + defer networks.Delete(Client, n.ID) + th.AssertEquals(t, "sample_network", n.Name) + th.AssertEquals(t, true, n.AdminStateUp) + networkID := n.ID + + // List networks + pager := networks.List(Client, os.ListOpts{Limit: 2}) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + networkList, err := os.ExtractNetworks(page) + th.AssertNoErr(t, err) + + for _, n := range networkList { + t.Logf("Network: ID [%s] Name [%s] Status [%s] Is shared? [%s]", + n.ID, n.Name, n.Status, strconv.FormatBool(n.Shared)) + } + + return true, nil + }) + th.CheckNoErr(t, err) + + // Get a network + if networkID == "" { + t.Fatalf("In order to retrieve a network, the NetworkID must be set") + } + n, err = networks.Get(Client, networkID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, "ACTIVE", n.Status) + th.AssertDeepEquals(t, []string{}, n.Subnets) + th.AssertEquals(t, "sample_network", n.Name) + th.AssertEquals(t, true, n.AdminStateUp) + th.AssertEquals(t, false, n.Shared) + th.AssertEquals(t, networkID, n.ID) + + // Update network + n, err = networks.Update(Client, networkID, os.UpdateOpts{Name: "new_network_name"}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, "new_network_name", n.Name) + + // Delete network + res := networks.Delete(Client, networkID) + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/port_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/port_test.go new file mode 100644 index 000000000..3c42bb20c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/port_test.go @@ -0,0 +1,116 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + osNetworks "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + osPorts "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + osSubnets "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/networking/v2/networks" + "github.com/rackspace/gophercloud/rackspace/networking/v2/ports" + "github.com/rackspace/gophercloud/rackspace/networking/v2/subnets" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestPortCRUD(t *testing.T) { + Setup(t) + defer Teardown() + + // Setup network + t.Log("Setting up network") + networkID, err := createNetwork() + th.AssertNoErr(t, err) + defer networks.Delete(Client, networkID) + + // Setup subnet + t.Logf("Setting up subnet on network %s", networkID) + subnetID, err := createSubnet(networkID) + th.AssertNoErr(t, err) + defer subnets.Delete(Client, subnetID) + + // Create port + t.Logf("Create port based on subnet %s", subnetID) + portID := createPort(t, networkID, subnetID) + + // List ports + t.Logf("Listing all ports") + listPorts(t) + + // Get port + if portID == "" { + t.Fatalf("In order to retrieve a port, the portID must be set") + } + p, err := ports.Get(Client, portID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, portID, p.ID) + + // Update port + p, err = ports.Update(Client, portID, osPorts.UpdateOpts{Name: "new_port_name"}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, "new_port_name", p.Name) + + // Delete port + res := ports.Delete(Client, portID) + th.AssertNoErr(t, res.Err) +} + +func createPort(t *testing.T, networkID, subnetID string) string { + enable := true + opts := osPorts.CreateOpts{ + NetworkID: networkID, + Name: "my_port", + AdminStateUp: &enable, + FixedIPs: []osPorts.IP{osPorts.IP{SubnetID: subnetID}}, + } + p, err := ports.Create(Client, opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, networkID, p.NetworkID) + th.AssertEquals(t, "my_port", p.Name) + th.AssertEquals(t, true, p.AdminStateUp) + + return p.ID +} + +func listPorts(t *testing.T) { + count := 0 + pager := ports.List(Client, osPorts.ListOpts{}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + count++ + t.Logf("--- Page ---") + + portList, err := osPorts.ExtractPorts(page) + th.AssertNoErr(t, err) + + for _, p := range portList { + t.Logf("Port: ID [%s] Name [%s] Status [%s] MAC addr [%s] Fixed IPs [%#v] Security groups [%#v]", + p.ID, p.Name, p.Status, p.MACAddress, p.FixedIPs, p.SecurityGroups) + } + + return true, nil + }) + + th.CheckNoErr(t, err) + + if count == 0 { + t.Logf("No pages were iterated over when listing ports") + } +} + +func createNetwork() (string, error) { + res, err := networks.Create(Client, osNetworks.CreateOpts{Name: "tmp_network", AdminStateUp: osNetworks.Up}).Extract() + return res.ID, err +} + +func createSubnet(networkID string) (string, error) { + s, err := subnets.Create(Client, osSubnets.CreateOpts{ + NetworkID: networkID, + CIDR: "192.168.199.0/24", + IPVersion: osSubnets.IPv4, + Name: "my_subnet", + EnableDHCP: osSubnets.Down, + }).Extract() + return s.ID, err +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/security_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/security_test.go new file mode 100644 index 000000000..ec029913e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/security_test.go @@ -0,0 +1,165 @@ +// +build acceptance networking security + +package v2 + +import ( + "testing" + + osGroups "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" + osRules "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + osNetworks "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + osPorts "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/pagination" + rsNetworks "github.com/rackspace/gophercloud/rackspace/networking/v2/networks" + rsPorts "github.com/rackspace/gophercloud/rackspace/networking/v2/ports" + rsGroups "github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups" + rsRules "github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSecurityGroups(t *testing.T) { + Setup(t) + defer Teardown() + + // create security group + groupID := createSecGroup(t) + + // delete security group + defer deleteSecGroup(t, groupID) + + // list security group + listSecGroups(t) + + // get security group + getSecGroup(t, groupID) +} + +func TestSecurityGroupRules(t *testing.T) { + Setup(t) + defer Teardown() + + // create security group + groupID := createSecGroup(t) + + defer deleteSecGroup(t, groupID) + + // create security group rule + ruleID := createSecRule(t, groupID) + + // delete security group rule + defer deleteSecRule(t, ruleID) + + // list security group rule + listSecRules(t) + + // get security group rule + getSecRule(t, ruleID) +} + +func createSecGroup(t *testing.T) string { + sg, err := rsGroups.Create(Client, osGroups.CreateOpts{ + Name: "new-webservers", + Description: "security group for webservers", + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created security group %s", sg.ID) + + return sg.ID +} + +func listSecGroups(t *testing.T) { + err := rsGroups.List(Client, osGroups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + list, err := osGroups.ExtractGroups(page) + if err != nil { + t.Errorf("Failed to extract secgroups: %v", err) + return false, err + } + + for _, sg := range list { + t.Logf("Listing security group: ID [%s] Name [%s]", sg.ID, sg.Name) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getSecGroup(t *testing.T, id string) { + sg, err := rsGroups.Get(Client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting security group: ID [%s] Name [%s] Description [%s]", sg.ID, sg.Name, sg.Description) +} + +func createSecGroupPort(t *testing.T, groupID string) (string, string) { + n, err := rsNetworks.Create(Client, osNetworks.CreateOpts{Name: "tmp_network"}).Extract() + th.AssertNoErr(t, err) + t.Logf("Created network %s", n.ID) + + opts := osPorts.CreateOpts{ + NetworkID: n.ID, + Name: "my_port", + SecurityGroups: []string{groupID}, + } + p, err := rsPorts.Create(Client, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created port %s with security group %s", p.ID, groupID) + + return n.ID, p.ID +} + +func deleteSecGroup(t *testing.T, groupID string) { + res := rsGroups.Delete(Client, groupID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted security group %s", groupID) +} + +func createSecRule(t *testing.T, groupID string) string { + r, err := rsRules.Create(Client, osRules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: "IPv4", + PortRangeMax: 80, + Protocol: "tcp", + SecGroupID: groupID, + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created security group rule %s", r.ID) + + return r.ID +} + +func listSecRules(t *testing.T) { + err := rsRules.List(Client, osRules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + list, err := osRules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract sec rules: %v", err) + return false, err + } + + for _, r := range list { + t.Logf("Listing security rule: ID [%s]", r.ID) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getSecRule(t *testing.T, id string) { + r, err := rsRules.Get(Client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting security rule: ID [%s] Direction [%s] EtherType [%s] Protocol [%s]", + r.ID, r.Direction, r.EtherType, r.Protocol) +} + +func deleteSecRule(t *testing.T, id string) { + res := rsRules.Delete(Client, id) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted security rule %s", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/subnet_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/subnet_test.go new file mode 100644 index 000000000..c4014320a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/subnet_test.go @@ -0,0 +1,84 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + osNetworks "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + osSubnets "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/networking/v2/networks" + "github.com/rackspace/gophercloud/rackspace/networking/v2/subnets" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestListSubnets(t *testing.T) { + Setup(t) + defer Teardown() + + pager := subnets.List(Client, osSubnets.ListOpts{Limit: 2}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page ---") + + subnetList, err := osSubnets.ExtractSubnets(page) + th.AssertNoErr(t, err) + + for _, s := range subnetList { + t.Logf("Subnet: ID [%s] Name [%s] IP Version [%d] CIDR [%s] GatewayIP [%s]", + s.ID, s.Name, s.IPVersion, s.CIDR, s.GatewayIP) + } + + return true, nil + }) + th.CheckNoErr(t, err) +} + +func TestSubnetCRUD(t *testing.T) { + Setup(t) + defer Teardown() + + // Setup network + t.Log("Setting up network") + n, err := networks.Create(Client, osNetworks.CreateOpts{Name: "tmp_network", AdminStateUp: osNetworks.Up}).Extract() + th.AssertNoErr(t, err) + networkID := n.ID + defer networks.Delete(Client, networkID) + + // Create subnet + t.Log("Create subnet") + enable := false + opts := osSubnets.CreateOpts{ + NetworkID: networkID, + CIDR: "192.168.199.0/24", + IPVersion: osSubnets.IPv4, + Name: "my_subnet", + EnableDHCP: &enable, + } + s, err := subnets.Create(Client, opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, networkID, s.NetworkID) + th.AssertEquals(t, "192.168.199.0/24", s.CIDR) + th.AssertEquals(t, 4, s.IPVersion) + th.AssertEquals(t, "my_subnet", s.Name) + th.AssertEquals(t, false, s.EnableDHCP) + subnetID := s.ID + + // Get subnet + t.Log("Getting subnet") + s, err = subnets.Get(Client, subnetID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, subnetID, s.ID) + + // Update subnet + t.Log("Update subnet") + s, err = subnets.Update(Client, subnetID, osSubnets.UpdateOpts{Name: "new_subnet_name"}).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, "new_subnet_name", s.Name) + + // Delete subnet + t.Log("Delete subnet") + res := subnets.Delete(Client, subnetID) + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go new file mode 100644 index 000000000..8b3cde45e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go @@ -0,0 +1,38 @@ +// +build acceptance rackspace + +package v1 + +import ( + "testing" + + raxAccounts "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAccounts(t *testing.T) { + c, err := createClient(t, false) + th.AssertNoErr(t, err) + + updateHeaders, err := raxAccounts.Update(c, raxAccounts.UpdateOpts{Metadata: map[string]string{"white": "mountains"}}).Extract() + th.AssertNoErr(t, err) + t.Logf("Update Account Response Headers: %+v\n", updateHeaders) + defer func() { + updateres := raxAccounts.Update(c, raxAccounts.UpdateOpts{Metadata: map[string]string{"white": ""}}) + th.AssertNoErr(t, updateres.Err) + metadata, err := raxAccounts.Get(c).ExtractMetadata() + th.AssertNoErr(t, err) + t.Logf("Metadata from Get Account request (after update reverted): %+v\n", metadata) + th.CheckEquals(t, metadata["White"], "") + }() + + getResp := raxAccounts.Get(c) + th.AssertNoErr(t, getResp.Err) + + getHeaders, _ := getResp.Extract() + t.Logf("Get Account Response Headers: %+v\n", getHeaders) + + metadata, _ := getResp.ExtractMetadata() + t.Logf("Metadata from Get Account request (after update): %+v\n", metadata) + + th.CheckEquals(t, metadata["White"], "mountains") +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go new file mode 100644 index 000000000..79013a564 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go @@ -0,0 +1,23 @@ +// +build acceptance rackspace objectstorage v1 + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestBulk(t *testing.T) { + c, err := createClient(t, false) + th.AssertNoErr(t, err) + + var options bulk.DeleteOpts + options = append(options, "container/object1") + res := bulk.Delete(c, options) + th.AssertNoErr(t, res.Err) + body, err := res.ExtractBody() + th.AssertNoErr(t, err) + t.Logf("Response body from Bulk Delete Request: %+v\n", body) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go new file mode 100644 index 000000000..0f56f4978 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go @@ -0,0 +1,66 @@ +// +build acceptance rackspace objectstorage v1 + +package v1 + +import ( + "testing" + + osContainers "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/pagination" + raxCDNContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers" + raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCDNContainers(t *testing.T) { + raxClient, err := createClient(t, false) + th.AssertNoErr(t, err) + + createres := raxContainers.Create(raxClient, "gophercloud-test", nil) + th.AssertNoErr(t, createres.Err) + t.Logf("Headers from Create Container request: %+v\n", createres.Header) + defer func() { + res := raxContainers.Delete(raxClient, "gophercloud-test") + th.AssertNoErr(t, res.Err) + }() + + raxCDNClient, err := createClient(t, true) + th.AssertNoErr(t, err) + enableRes := raxCDNContainers.Enable(raxCDNClient, "gophercloud-test", raxCDNContainers.EnableOpts{CDNEnabled: true, TTL: 900}) + t.Logf("Header map from Enable CDN Container request: %+v\n", enableRes.Header) + enableHeader, err := enableRes.Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Enable CDN Container request: %+v\n", enableHeader) + + t.Logf("Container Names available to the currently issued token:") + count := 0 + err = raxCDNContainers.List(raxCDNClient, &osContainers.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + names, err := raxCDNContainers.ExtractNames(page) + th.AssertNoErr(t, err) + + for i, name := range names { + t.Logf("[%02d] %s", i, name) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + if count == 0 { + t.Errorf("No CDN containers listed for your current token.") + } + + updateOpts := raxCDNContainers.UpdateOpts{XCDNEnabled: raxCDNContainers.Disabled, XLogRetention: raxCDNContainers.Enabled} + updateHeader, err := raxCDNContainers.Update(raxCDNClient, "gophercloud-test", updateOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Update CDN Container request: %+v\n", updateHeader) + + getRes := raxCDNContainers.Get(raxCDNClient, "gophercloud-test") + getHeader, err := getRes.Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Get CDN Container request (after update): %+v\n", getHeader) + metadata, err := getRes.ExtractMetadata() + t.Logf("Metadata from Get CDN Container request (after update): %+v\n", metadata) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go new file mode 100644 index 000000000..0c0ab8a1e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go @@ -0,0 +1,50 @@ +// +build acceptance rackspace objectstorage v1 + +package v1 + +import ( + "bytes" + "testing" + + raxCDNContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers" + raxCDNObjects "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects" + raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers" + raxObjects "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCDNObjects(t *testing.T) { + raxClient, err := createClient(t, false) + th.AssertNoErr(t, err) + + createContResult := raxContainers.Create(raxClient, "gophercloud-test", nil) + th.AssertNoErr(t, createContResult.Err) + t.Logf("Headers from Create Container request: %+v\n", createContResult.Header) + defer func() { + deleteResult := raxContainers.Delete(raxClient, "gophercloud-test") + th.AssertNoErr(t, deleteResult.Err) + }() + + header, err := raxObjects.Create(raxClient, "gophercloud-test", "test-object", bytes.NewBufferString("gophercloud cdn test"), nil).ExtractHeader() + th.AssertNoErr(t, err) + t.Logf("Headers from Create Object request: %+v\n", header) + defer func() { + deleteResult := raxObjects.Delete(raxClient, "gophercloud-test", "test-object", nil) + th.AssertNoErr(t, deleteResult.Err) + }() + + raxCDNClient, err := createClient(t, true) + th.AssertNoErr(t, err) + + enableHeader, err := raxCDNContainers.Enable(raxCDNClient, "gophercloud-test", raxCDNContainers.EnableOpts{CDNEnabled: true, TTL: 900}).Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Enable CDN Container request: %+v\n", enableHeader) + + objCDNURL, err := raxCDNObjects.CDNURL(raxCDNClient, "gophercloud-test", "test-object") + th.AssertNoErr(t, err) + t.Logf("%s CDN URL: %s\n", "test_object", objCDNURL) + + deleteHeader, err := raxCDNObjects.Delete(raxCDNClient, "gophercloud-test", "test-object", nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Delete CDN Object request: %+v\n", deleteHeader) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go new file mode 100644 index 000000000..1ae07278c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go @@ -0,0 +1,54 @@ +// +build acceptance rackspace objectstorage v1 + +package v1 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func rackspaceAuthOptions(t *testing.T) gophercloud.AuthOptions { + // Obtain credentials from the environment. + options, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + options = tools.OnlyRS(options) + + if options.Username == "" { + t.Fatal("Please provide a Rackspace username as RS_USERNAME.") + } + if options.APIKey == "" { + t.Fatal("Please provide a Rackspace API key as RS_API_KEY.") + } + + return options +} + +func createClient(t *testing.T, cdn bool) (*gophercloud.ServiceClient, error) { + region := os.Getenv("RS_REGION") + if region == "" { + t.Fatal("Please provide a Rackspace region as RS_REGION") + } + + ao := rackspaceAuthOptions(t) + + provider, err := rackspace.NewClient(ao.IdentityEndpoint) + th.AssertNoErr(t, err) + + err = rackspace.Authenticate(provider, ao) + th.AssertNoErr(t, err) + + if cdn { + return rackspace.NewObjectCDNV1(provider, gophercloud.EndpointOpts{ + Region: region, + }) + } + + return rackspace.NewObjectStorageV1(provider, gophercloud.EndpointOpts{ + Region: region, + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go new file mode 100644 index 000000000..c89551373 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go @@ -0,0 +1,90 @@ +// +build acceptance rackspace objectstorage v1 + +package v1 + +import ( + "testing" + + osContainers "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/pagination" + raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestContainers(t *testing.T) { + c, err := createClient(t, false) + th.AssertNoErr(t, err) + + t.Logf("Containers Info available to the currently issued token:") + count := 0 + err = raxContainers.List(c, &osContainers.ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + containers, err := raxContainers.ExtractInfo(page) + th.AssertNoErr(t, err) + + for i, container := range containers { + t.Logf("[%02d] name=[%s]", i, container.Name) + t.Logf(" count=[%d]", container.Count) + t.Logf(" bytes=[%d]", container.Bytes) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + if count == 0 { + t.Errorf("No containers listed for your current token.") + } + + t.Logf("Container Names available to the currently issued token:") + count = 0 + err = raxContainers.List(c, &osContainers.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + names, err := raxContainers.ExtractNames(page) + th.AssertNoErr(t, err) + + for i, name := range names { + t.Logf("[%02d] %s", i, name) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + if count == 0 { + t.Errorf("No containers listed for your current token.") + } + + createHeader, err := raxContainers.Create(c, "gophercloud-test", nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Create Container request: %+v\n", createHeader) + defer func() { + deleteres := raxContainers.Delete(c, "gophercloud-test") + deleteHeader, err := deleteres.Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Delete Container request: %+v\n", deleteres.Header) + t.Logf("Headers from Delete Container request: %+v\n", deleteHeader) + }() + + updateHeader, err := raxContainers.Update(c, "gophercloud-test", raxContainers.UpdateOpts{Metadata: map[string]string{"white": "mountains"}}).Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Update Container request: %+v\n", updateHeader) + defer func() { + res := raxContainers.Update(c, "gophercloud-test", raxContainers.UpdateOpts{Metadata: map[string]string{"white": ""}}) + th.AssertNoErr(t, res.Err) + metadata, err := raxContainers.Get(c, "gophercloud-test").ExtractMetadata() + th.AssertNoErr(t, err) + t.Logf("Metadata from Get Container request (after update reverted): %+v\n", metadata) + th.CheckEquals(t, metadata["White"], "") + }() + + getres := raxContainers.Get(c, "gophercloud-test") + getHeader, err := getres.Extract() + th.AssertNoErr(t, err) + t.Logf("Headers from Get Container request (after update): %+v\n", getHeader) + metadata, err := getres.ExtractMetadata() + t.Logf("Metadata from Get Container request (after update): %+v\n", metadata) + th.CheckEquals(t, metadata["White"], "mountains") +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go new file mode 100644 index 000000000..585dea769 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go @@ -0,0 +1,124 @@ +// +build acceptance rackspace objectstorage v1 + +package v1 + +import ( + "bytes" + "testing" + + osObjects "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects" + "github.com/rackspace/gophercloud/pagination" + raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers" + raxObjects "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestObjects(t *testing.T) { + c, err := createClient(t, false) + th.AssertNoErr(t, err) + + res := raxContainers.Create(c, "gophercloud-test", nil) + th.AssertNoErr(t, res.Err) + + defer func() { + t.Logf("Deleting container...") + res := raxContainers.Delete(c, "gophercloud-test") + th.AssertNoErr(t, res.Err) + }() + + content := bytes.NewBufferString("Lewis Carroll") + options := &osObjects.CreateOpts{ContentType: "text/plain"} + createres := raxObjects.Create(c, "gophercloud-test", "o1", content, options) + th.AssertNoErr(t, createres.Err) + + defer func() { + t.Logf("Deleting object o1...") + res := raxObjects.Delete(c, "gophercloud-test", "o1", nil) + th.AssertNoErr(t, res.Err) + }() + + t.Logf("Objects Info available to the currently issued token:") + count := 0 + err = raxObjects.List(c, "gophercloud-test", &osObjects.ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + objects, err := raxObjects.ExtractInfo(page) + th.AssertNoErr(t, err) + + for i, object := range objects { + t.Logf("[%02d] name=[%s]", i, object.Name) + t.Logf(" content-type=[%s]", object.ContentType) + t.Logf(" bytes=[%d]", object.Bytes) + t.Logf(" last-modified=[%s]", object.LastModified) + t.Logf(" hash=[%s]", object.Hash) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + if count == 0 { + t.Errorf("No objects listed for your current token.") + } + t.Logf("Container Names available to the currently issued token:") + count = 0 + err = raxObjects.List(c, "gophercloud-test", &osObjects.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) { + t.Logf("--- Page %02d ---", count) + + names, err := raxObjects.ExtractNames(page) + th.AssertNoErr(t, err) + + for i, name := range names { + t.Logf("[%02d] %s", i, name) + } + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + if count == 0 { + t.Errorf("No objects listed for your current token.") + } + + copyres := raxObjects.Copy(c, "gophercloud-test", "o1", &raxObjects.CopyOpts{Destination: "gophercloud-test/o2"}) + th.AssertNoErr(t, copyres.Err) + defer func() { + t.Logf("Deleting object o2...") + res := raxObjects.Delete(c, "gophercloud-test", "o2", nil) + th.AssertNoErr(t, res.Err) + }() + + o1Content, err := raxObjects.Download(c, "gophercloud-test", "o1", nil).ExtractContent() + th.AssertNoErr(t, err) + o2Content, err := raxObjects.Download(c, "gophercloud-test", "o2", nil).ExtractContent() + th.AssertNoErr(t, err) + th.AssertEquals(t, string(o2Content), string(o1Content)) + + updateres := raxObjects.Update(c, "gophercloud-test", "o2", osObjects.UpdateOpts{Metadata: map[string]string{"white": "mountains"}}) + th.AssertNoErr(t, updateres.Err) + t.Logf("Headers from Update Account request: %+v\n", updateres.Header) + defer func() { + res := raxObjects.Update(c, "gophercloud-test", "o2", osObjects.UpdateOpts{Metadata: map[string]string{"white": ""}}) + th.AssertNoErr(t, res.Err) + metadata, err := raxObjects.Get(c, "gophercloud-test", "o2", nil).ExtractMetadata() + th.AssertNoErr(t, err) + t.Logf("Metadata from Get Account request (after update reverted): %+v\n", metadata) + th.CheckEquals(t, "", metadata["White"]) + }() + + getres := raxObjects.Get(c, "gophercloud-test", "o2", nil) + th.AssertNoErr(t, getres.Err) + t.Logf("Headers from Get Account request (after update): %+v\n", getres.Header) + metadata, err := getres.ExtractMetadata() + th.AssertNoErr(t, err) + t.Logf("Metadata from Get Account request (after update): %+v\n", metadata) + th.CheckEquals(t, "mountains", metadata["White"]) + + createTempURLOpts := osObjects.CreateTempURLOpts{ + Method: osObjects.GET, + TTL: 600, + } + tempURL, err := raxObjects.CreateTempURL(c, "gophercloud-test", "o1", createTempURLOpts) + th.AssertNoErr(t, err) + t.Logf("TempURL for object (%s): %s", "o1", tempURL) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/buildinfo_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/buildinfo_test.go new file mode 100644 index 000000000..42cc048e3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/buildinfo_test.go @@ -0,0 +1,20 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestBuildInfo(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + bi, err := buildinfo.Get(client).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved build info: %+v\n", bi) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/common.go new file mode 100644 index 000000000..b9d51979d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/common.go @@ -0,0 +1,45 @@ +// +build acceptance + +package v1 + +import ( + "fmt" + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +var template = fmt.Sprintf(` +{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": {}, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "flavor": "%s", + "image": "%s", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } +} +`, os.Getenv("RS_FLAVOR_ID"), os.Getenv("RS_IMAGE_ID")) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := rackspace.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := rackspace.NewOrchestrationV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("RS_REGION_NAME"), + }) + th.AssertNoErr(t, err) + return c +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackevents_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackevents_test.go new file mode 100644 index 000000000..9e3fc084a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackevents_test.go @@ -0,0 +1,70 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + osStackEvents "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents" + osStacks "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents" + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStackEvents(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + resourceName := "hello_world" + var eventID string + + createOpts := osStacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + err = stackevents.List(client, stackName, stack.ID, nil).EachPage(func(page pagination.Page) (bool, error) { + events, err := osStackEvents.ExtractEvents(page) + th.AssertNoErr(t, err) + t.Logf("listed events: %+v\n", events) + eventID = events[0].ID + return false, nil + }) + th.AssertNoErr(t, err) + + err = stackevents.ListResourceEvents(client, stackName, stack.ID, resourceName, nil).EachPage(func(page pagination.Page) (bool, error) { + resourceEvents, err := osStackEvents.ExtractResourceEvents(page) + th.AssertNoErr(t, err) + t.Logf("listed resource events: %+v\n", resourceEvents) + return false, nil + }) + th.AssertNoErr(t, err) + + event, err := stackevents.Get(client, stackName, stack.ID, resourceName, eventID).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved event: %+v\n", event) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackresources_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackresources_test.go new file mode 100644 index 000000000..65926e78d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stackresources_test.go @@ -0,0 +1,64 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + osStackResources "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources" + osStacks "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources" + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStackResources(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + + createOpts := osStacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + resourceName := "hello_world" + resource, err := stackresources.Get(client, stackName, stack.ID, resourceName).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack resource: %+v\n", resource) + + metadata, err := stackresources.Metadata(client, stackName, stack.ID, resourceName).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack resource metadata: %+v\n", metadata) + + err = stackresources.List(client, stackName, stack.ID, nil).EachPage(func(page pagination.Page) (bool, error) { + resources, err := osStackResources.ExtractResources(page) + th.AssertNoErr(t, err) + t.Logf("resources: %+v\n", resources) + return false, nil + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacks_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacks_test.go new file mode 100644 index 000000000..61969b52c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacks_test.go @@ -0,0 +1,154 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + osStacks "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStacks(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName1 := "gophercloud-test-stack-2" + createOpts := osStacks.CreateOpts{ + Name: stackName1, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName1, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName1) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + updateOpts := osStacks.UpdateOpts{ + Template: template, + Timeout: 20, + } + err = stacks.Update(client, stackName1, stack.ID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "UPDATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + t.Logf("Updated stack") + + err = stacks.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + stackList, err := osStacks.ExtractStacks(page) + th.AssertNoErr(t, err) + + t.Logf("Got stack list: %+v\n", stackList) + + return true, nil + }) + th.AssertNoErr(t, err) + + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack: %+v\n", getStack) + + abandonedStack, err := stacks.Abandon(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Abandonded stack %+v\n", abandonedStack) + th.AssertNoErr(t, err) +} + +// Test using the updated interface +func TestStacksNewTemplateFormat(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName1 := "gophercloud-test-stack-2" + templateOpts := new(osStacks.Template) + templateOpts.Bin = []byte(template) + createOpts := osStacks.CreateOpts{ + Name: stackName1, + TemplateOpts: templateOpts, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName1, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName1) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + updateOpts := osStacks.UpdateOpts{ + TemplateOpts: templateOpts, + Timeout: 20, + } + err = stacks.Update(client, stackName1, stack.ID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "UPDATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + t.Logf("Updated stack") + + err = stacks.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + stackList, err := osStacks.ExtractStacks(page) + th.AssertNoErr(t, err) + + t.Logf("Got stack list: %+v\n", stackList) + + return true, nil + }) + th.AssertNoErr(t, err) + + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack: %+v\n", getStack) + + abandonedStack, err := stacks.Abandon(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Abandonded stack %+v\n", abandonedStack) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacktemplates_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacktemplates_test.go new file mode 100644 index 000000000..e4ccd9e6b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/orchestration/v1/stacktemplates_test.go @@ -0,0 +1,77 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + osStacks "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + osStacktemplates "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates" + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestStackTemplates(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + + createOpts := osStacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + tmpl, err := stacktemplates.Get(client, stackName, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved template: %+v\n", tmpl) + + validateOpts := osStacktemplates.ValidateOpts{ + Template: `{"heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string", + }, + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor", + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n", + }, + }, + }, + }`} + validatedTemplate, err := stacktemplates.Validate(client, validateOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("validated template: %+v\n", validatedTemplate) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go new file mode 100644 index 000000000..5d17b32ca --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go @@ -0,0 +1 @@ +package rackspace diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/cloudnetworks_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/cloudnetworks_test.go new file mode 100644 index 000000000..2c6287e9f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/cloudnetworks_test.go @@ -0,0 +1,36 @@ +// +build acceptance + +package v3 + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCloudNetworks(t *testing.T) { + c := newClient(t) + cnID := testListNetworks(t, c) + testGetNetworks(t, c, cnID) +} + +func testListNetworks(t *testing.T, c *gophercloud.ServiceClient) string { + allPages, err := cloudnetworks.List(c).AllPages() + th.AssertNoErr(t, err) + allcn, err := cloudnetworks.ExtractCloudNetworks(allPages) + fmt.Printf("Listing all cloud networks: %+v\n\n", allcn) + var cnID string + if len(allcn) > 0 { + cnID = allcn[0].ID + } + return cnID +} + +func testGetNetworks(t *testing.T, c *gophercloud.ServiceClient, id string) { + cn, err := cloudnetworks.Get(c, id).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved cloud network: %+v\n\n", cn) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/common.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/common.go new file mode 100644 index 000000000..8c7531417 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/common.go @@ -0,0 +1,26 @@ +// +build acceptance + +package v3 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := rackspace.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := rackspace.NewRackConnectV3(client, gophercloud.EndpointOpts{ + Region: os.Getenv("RS_REGION_NAME"), + }) + th.AssertNoErr(t, err) + return c +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/lbpools_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/lbpools_test.go new file mode 100644 index 000000000..85ac931b9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/lbpools_test.go @@ -0,0 +1,71 @@ +// +build acceptance + +package v3 + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestLBPools(t *testing.T) { + c := newClient(t) + pID := testListPools(t, c) + testGetPools(t, c, pID) + nID := testListNodes(t, c, pID) + testListNodeDetails(t, c, pID) + testGetNode(t, c, pID, nID) + testGetNodeDetails(t, c, pID, nID) +} + +func testListPools(t *testing.T, c *gophercloud.ServiceClient) string { + allPages, err := lbpools.List(c).AllPages() + th.AssertNoErr(t, err) + allp, err := lbpools.ExtractPools(allPages) + fmt.Printf("Listing all LB pools: %+v\n\n", allp) + var pID string + if len(allp) > 0 { + pID = allp[0].ID + } + return pID +} + +func testGetPools(t *testing.T, c *gophercloud.ServiceClient, pID string) { + p, err := lbpools.Get(c, pID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved LB pool: %+v\n\n", p) +} + +func testListNodes(t *testing.T, c *gophercloud.ServiceClient, pID string) string { + allPages, err := lbpools.ListNodes(c, pID).AllPages() + th.AssertNoErr(t, err) + alln, err := lbpools.ExtractNodes(allPages) + fmt.Printf("Listing all LB pool nodes for pool (%s): %+v\n\n", pID, alln) + var nID string + if len(alln) > 0 { + nID = alln[0].ID + } + return nID +} + +func testListNodeDetails(t *testing.T, c *gophercloud.ServiceClient, pID string) { + allPages, err := lbpools.ListNodesDetails(c, pID).AllPages() + th.AssertNoErr(t, err) + alln, err := lbpools.ExtractNodesDetails(allPages) + fmt.Printf("Listing all LB pool nodes details for pool (%s): %+v\n\n", pID, alln) +} + +func testGetNode(t *testing.T, c *gophercloud.ServiceClient, pID, nID string) { + n, err := lbpools.GetNode(c, pID, nID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved LB node: %+v\n\n", n) +} + +func testGetNodeDetails(t *testing.T, c *gophercloud.ServiceClient, pID, nID string) { + n, err := lbpools.GetNodeDetails(c, pID, nID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved LB node details: %+v\n\n", n) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/publicips_test.go b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/publicips_test.go new file mode 100644 index 000000000..8dc62703b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/publicips_test.go @@ -0,0 +1,45 @@ +// +build acceptance + +package v3 + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestPublicIPs(t *testing.T) { + c := newClient(t) + ipID := testListIPs(t, c) + sID := testGetIP(t, c, ipID) + testListIPsForServer(t, c, sID) +} + +func testListIPs(t *testing.T, c *gophercloud.ServiceClient) string { + allPages, err := publicips.List(c).AllPages() + th.AssertNoErr(t, err) + allip, err := publicips.ExtractPublicIPs(allPages) + fmt.Printf("Listing all public IPs: %+v\n\n", allip) + var ipID string + if len(allip) > 0 { + ipID = allip[0].ID + } + return ipID +} + +func testGetIP(t *testing.T, c *gophercloud.ServiceClient, ipID string) string { + ip, err := publicips.Get(c, ipID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved public IP (%s): %+v\n\n", ipID, ip) + return ip.CloudServer.ID +} + +func testListIPsForServer(t *testing.T, c *gophercloud.ServiceClient, sID string) { + allPages, err := publicips.ListForServer(c, sID).AllPages() + th.AssertNoErr(t, err) + allip, err := publicips.ExtractPublicIPs(allPages) + fmt.Printf("Listing all public IPs for server (%s): %+v\n\n", sID, allip) +} diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/tools/pkg.go b/vendor/github.com/rackspace/gophercloud/acceptance/tools/pkg.go new file mode 100644 index 000000000..f7eca1298 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/tools/pkg.go @@ -0,0 +1 @@ +package tools diff --git a/vendor/github.com/rackspace/gophercloud/acceptance/tools/tools.go b/vendor/github.com/rackspace/gophercloud/acceptance/tools/tools.go new file mode 100644 index 000000000..35679b728 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/acceptance/tools/tools.go @@ -0,0 +1,89 @@ +// +build acceptance common + +package tools + +import ( + "crypto/rand" + "errors" + mrand "math/rand" + "os" + "time" + + "github.com/rackspace/gophercloud" +) + +// ErrTimeout is returned if WaitFor takes longer than 300 second to happen. +var ErrTimeout = errors.New("Timed out") + +// OnlyRS overrides the default Gophercloud behavior of using OS_-prefixed environment variables +// if RS_ variables aren't present. Otherwise, they'll stomp over each other here in the acceptance +// tests, where you need to have both defined. +func OnlyRS(original gophercloud.AuthOptions) gophercloud.AuthOptions { + if os.Getenv("RS_AUTH_URL") == "" { + original.IdentityEndpoint = "" + } + if os.Getenv("RS_USERNAME") == "" { + original.Username = "" + } + if os.Getenv("RS_PASSWORD") == "" { + original.Password = "" + } + if os.Getenv("RS_API_KEY") == "" { + original.APIKey = "" + } + return original +} + +// WaitFor polls a predicate function once per second to wait for a certain state to arrive. +func WaitFor(predicate func() (bool, error)) error { + for i := 0; i < 300; i++ { + time.Sleep(1 * time.Second) + + satisfied, err := predicate() + if err != nil { + return err + } + if satisfied { + return nil + } + } + return ErrTimeout +} + +// MakeNewPassword generates a new string that's guaranteed to be different than the given one. +func MakeNewPassword(oldPass string) string { + randomPassword := RandomString("", 16) + for randomPassword == oldPass { + randomPassword = RandomString("", 16) + } + return randomPassword +} + +// RandomString generates a string of given length, but random content. +// All content will be within the ASCII graphic character set. +// (Implementation from Even Shaw's contribution on +// http://stackoverflow.com/questions/12771930/what-is-the-fastest-way-to-generate-a-long-random-string-in-go). +func RandomString(prefix string, n int) string { + const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return prefix + string(bytes) +} + +// RandomInt will return a random integer between a specified range. +func RandomInt(min, max int) int { + mrand.Seed(time.Now().Unix()) + return mrand.Intn(max-min) + min +} + +// Elide returns the first bit of its input string with a suffix of "..." if it's longer than +// a comfortable 40 characters. +func Elide(value string) string { + if len(value) > 40 { + return value[0:37] + "..." + } + return value +} diff --git a/vendor/github.com/rackspace/gophercloud/auth_options.go b/vendor/github.com/rackspace/gophercloud/auth_options.go new file mode 100644 index 000000000..d26e16ac1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/auth_options.go @@ -0,0 +1,50 @@ +package gophercloud + +/* +AuthOptions stores information needed to authenticate to an OpenStack cluster. +You can populate one manually, or use a provider's AuthOptionsFromEnv() function +to read relevant information from the standard environment variables. Pass one +to a provider's AuthenticatedClient function to authenticate and obtain a +ProviderClient representing an active session on that provider. + +Its fields are the union of those recognized by each identity implementation and +provider. +*/ +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed by + // all of the identity services, it will often be populated by a provider-level + // function. + IdentityEndpoint string + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username, UserID string + + // Exactly one of Password or APIKey is required for the Identity V2 and V3 + // APIs. Consult with your provider's control panel to discover your account's + // preferred method of authentication. + Password, APIKey string + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID, DomainName string + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + TenantID, TenantName string + + // AllowReauth should be set to true if you grant permission for Gophercloud to + // cache your credentials in memory, and to allow Gophercloud to attempt to + // re-authenticate automatically if/when your token expires. If you set it to + // false, it will not cache these settings, but re-authentication will not be + // possible. This setting defaults to false. + AllowReauth bool + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string +} diff --git a/vendor/github.com/rackspace/gophercloud/auth_results.go b/vendor/github.com/rackspace/gophercloud/auth_results.go new file mode 100644 index 000000000..856a23382 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/auth_results.go @@ -0,0 +1,14 @@ +package gophercloud + +import "time" + +// AuthResults [deprecated] is a leftover type from the v0.x days. It was +// intended to describe common functionality among identity service results, but +// is not actually used anywhere. +type AuthResults interface { + // TokenID returns the token's ID value from the authentication response. + TokenID() (string, error) + + // ExpiresAt retrieves the token's expiration time. + ExpiresAt() (time.Time, error) +} diff --git a/vendor/github.com/rackspace/gophercloud/doc.go b/vendor/github.com/rackspace/gophercloud/doc.go new file mode 100644 index 000000000..fb81a9d8f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/doc.go @@ -0,0 +1,67 @@ +/* +Package gophercloud provides a multi-vendor interface to OpenStack-compatible +clouds. The library has a three-level hierarchy: providers, services, and +resources. + +Provider structs represent the service providers that offer and manage a +collection of services. Examples of providers include: OpenStack, Rackspace, +HP. These are defined like so: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://my-openstack.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +Service structs are specific to a provider and handle all of the logic and +operations for a particular OpenStack service. Examples of services include: +Compute, Object Storage, Block Storage. In order to define one, you need to +pass in the parent provider, like so: + + opts := gophercloud.EndpointOpts{Region: "RegionOne"} + + client := openstack.NewComputeV2(provider, opts) + +Resource structs are the domain models that services make use of in order +to work with and represent the state of API resources: + + server, err := servers.Get(client, "{serverId}").Extract() + +Intermediate Result structs are returned for API operations, which allow +generic access to the HTTP headers, response body, and any errors associated +with the network transaction. To turn a result into a usable resource struct, +you must call the Extract method which is chained to the response, or an +Extract function from an applicable extension: + + result := servers.Get(client, "{serverId}") + + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) + +All requests that enumerate a collection return a Pager struct that is used to +iterate through the results one page at a time. Use the EachPage method on that +Pager to handle each successive Page in a closure, then use the appropriate +extraction method from that request's package to interpret that Page as a slice +of results: + + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + // Handle the []servers.Server slice. + + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) + +This top-level package contains utility functions and data types that are used +throughout the provider and service packages. Of particular note for end users +are the AuthOptions and EndpointOpts structs. +*/ +package gophercloud diff --git a/vendor/github.com/rackspace/gophercloud/endpoint_search.go b/vendor/github.com/rackspace/gophercloud/endpoint_search.go new file mode 100644 index 000000000..518943121 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/endpoint_search.go @@ -0,0 +1,92 @@ +package gophercloud + +import "errors" + +var ( + // ErrServiceNotFound is returned when no service in a service catalog matches + // the provided EndpointOpts. This is generally returned by provider service + // factory methods like "NewComputeV2()" and can mean that a service is not + // enabled for your account. + ErrServiceNotFound = errors.New("No suitable service could be found in the service catalog.") + + // ErrEndpointNotFound is returned when no available endpoints match the + // provided EndpointOpts. This is also generally returned by provider service + // factory methods, and usually indicates that a region was specified + // incorrectly. + ErrEndpointNotFound = errors.New("No suitable endpoint could be found in the service catalog.") +) + +// Availability indicates to whom a specific service endpoint is accessible: +// the internet at large, internal networks only, or only to administrators. +// Different identity services use different terminology for these. Identity v2 +// lists them as different kinds of URLs within the service catalog ("adminURL", +// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an +// endpoint's response. +type Availability string + +const ( + // AvailabilityAdmin indicates that an endpoint is only available to + // administrators. + AvailabilityAdmin Availability = "admin" + + // AvailabilityPublic indicates that an endpoint is available to everyone on + // the internet. + AvailabilityPublic Availability = "public" + + // AvailabilityInternal indicates that an endpoint is only available within + // the cluster's internal network. + AvailabilityInternal Availability = "internal" +) + +// EndpointOpts specifies search criteria used by queries against an +// OpenStack service catalog. The options must contain enough information to +// unambiguously identify one, and only one, endpoint within the catalog. +// +// Usually, these are passed to service client factory functions in a provider +// package, like "rackspace.NewComputeV2()". +type EndpointOpts struct { + // Type [required] is the service type for the client (e.g., "compute", + // "object-store"). Generally, this will be supplied by the service client + // function, but a user-given value will be honored if provided. + Type string + + // Name [optional] is the service name for the client (e.g., "nova") as it + // appears in the service catalog. Services can have the same Type but a + // different Name, which is why both Type and Name are sometimes needed. + Name string + + // Region [required] is the geographic region in which the endpoint resides, + // generally specifying which datacenter should house your resources. + // Required only for services that span multiple regions. + Region string + + // Availability [optional] is the visibility of the endpoint to be returned. + // Valid types include the constants AvailabilityPublic, AvailabilityInternal, + // or AvailabilityAdmin from this package. + // + // Availability is not required, and defaults to AvailabilityPublic. Not all + // providers or services offer all Availability options. + Availability Availability +} + +/* +EndpointLocator is an internal function to be used by provider implementations. + +It provides an implementation that locates a single endpoint from a service +catalog for a specific ProviderClient based on user-provided EndpointOpts. The +provider then uses it to discover related ServiceClients. +*/ +type EndpointLocator func(EndpointOpts) (string, error) + +// ApplyDefaults is an internal method to be used by provider implementations. +// +// It sets EndpointOpts fields if not already set, including a default type. +// Currently, EndpointOpts.Availability defaults to the public endpoint. +func (eo *EndpointOpts) ApplyDefaults(t string) { + if eo.Type == "" { + eo.Type = t + } + if eo.Availability == "" { + eo.Availability = AvailabilityPublic + } +} diff --git a/vendor/github.com/rackspace/gophercloud/endpoint_search_test.go b/vendor/github.com/rackspace/gophercloud/endpoint_search_test.go new file mode 100644 index 000000000..345745342 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/endpoint_search_test.go @@ -0,0 +1,19 @@ +package gophercloud + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestApplyDefaultsToEndpointOpts(t *testing.T) { + eo := EndpointOpts{Availability: AvailabilityPublic} + eo.ApplyDefaults("compute") + expected := EndpointOpts{Availability: AvailabilityPublic, Type: "compute"} + th.CheckDeepEquals(t, expected, eo) + + eo = EndpointOpts{Type: "compute"} + eo.ApplyDefaults("object-store") + expected = EndpointOpts{Availability: AvailabilityPublic, Type: "compute"} + th.CheckDeepEquals(t, expected, eo) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/auth_env.go b/vendor/github.com/rackspace/gophercloud/openstack/auth_env.go new file mode 100644 index 000000000..a4402b6f0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/auth_env.go @@ -0,0 +1,58 @@ +package openstack + +import ( + "fmt" + "os" + + "github.com/rackspace/gophercloud" +) + +var nilOptions = gophercloud.AuthOptions{} + +// ErrNoAuthUrl, ErrNoUsername, and ErrNoPassword errors indicate of the required OS_AUTH_URL, OS_USERNAME, or OS_PASSWORD +// environment variables, respectively, remain undefined. See the AuthOptions() function for more details. +var ( + ErrNoAuthURL = fmt.Errorf("Environment variable OS_AUTH_URL needs to be set.") + ErrNoUsername = fmt.Errorf("Environment variable OS_USERNAME needs to be set.") + ErrNoPassword = fmt.Errorf("Environment variable OS_PASSWORD needs to be set.") +) + +// AuthOptions fills out an identity.AuthOptions structure with the settings found on the various OpenStack +// OS_* environment variables. The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, +// OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must +// have settings, or an error will result. OS_TENANT_ID and OS_TENANT_NAME are optional. +func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { + authURL := os.Getenv("OS_AUTH_URL") + username := os.Getenv("OS_USERNAME") + userID := os.Getenv("OS_USERID") + password := os.Getenv("OS_PASSWORD") + tenantID := os.Getenv("OS_TENANT_ID") + tenantName := os.Getenv("OS_TENANT_NAME") + domainID := os.Getenv("OS_DOMAIN_ID") + domainName := os.Getenv("OS_DOMAIN_NAME") + + if authURL == "" { + return nilOptions, ErrNoAuthURL + } + + if username == "" && userID == "" { + return nilOptions, ErrNoUsername + } + + if password == "" { + return nilOptions, ErrNoPassword + } + + ao := gophercloud.AuthOptions{ + IdentityEndpoint: authURL, + UserID: userID, + Username: username, + Password: password, + TenantID: tenantID, + TenantName: tenantName, + DomainID: domainID, + DomainName: domainName, + } + + return ao, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go new file mode 100644 index 000000000..e3af39f51 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go @@ -0,0 +1,3 @@ +// Package apiversions provides information and interaction with the different +// API versions for the OpenStack Block Storage service, code-named Cinder. +package apiversions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go new file mode 100644 index 000000000..bb2c25915 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go @@ -0,0 +1,21 @@ +package apiversions + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List lists all the Cinder API versions available to end-users. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} + +// Get will retrieve the volume type with the provided ID. To extract the volume +// type from the result, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, v string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, v), &res.Body, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go new file mode 100644 index 000000000..56b5e4fc7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go @@ -0,0 +1,145 @@ +package apiversions + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, `{ + "versions": [ + { + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "id": "v1.0", + "links": [ + { + "href": "http://23.253.228.211:8776/v1/", + "rel": "self" + } + ] + }, + { + "status": "CURRENT", + "updated": "2012-11-21T11:33:21Z", + "id": "v2.0", + "links": [ + { + "href": "http://23.253.228.211:8776/v2/", + "rel": "self" + } + ] + } + ] + }`) + }) + + count := 0 + + List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractAPIVersions(page) + if err != nil { + t.Errorf("Failed to extract API versions: %v", err) + return false, err + } + + expected := []APIVersion{ + APIVersion{ + ID: "v1.0", + Status: "CURRENT", + Updated: "2012-01-04T11:33:21Z", + }, + APIVersion{ + ID: "v2.0", + Status: "CURRENT", + Updated: "2012-11-21T11:33:21Z", + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestAPIInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v1/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, `{ + "version": { + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1" + } + ], + "id": "v1.0", + "links": [ + { + "href": "http://23.253.228.211:8776/v1/", + "rel": "self" + }, + { + "href": "http://jorgew.github.com/block-storage-api/content/os-block-storage-1.0.pdf", + "type": "application/pdf", + "rel": "describedby" + }, + { + "href": "http://docs.rackspacecloud.com/servers/api/v1.1/application.wadl", + "type": "application/vnd.sun.wadl+xml", + "rel": "describedby" + } + ] + } + }`) + }) + + actual, err := Get(client.ServiceClient(), "v1").Extract() + if err != nil { + t.Errorf("Failed to extract version: %v", err) + } + + expected := APIVersion{ + ID: "v1.0", + Status: "CURRENT", + Updated: "2012-01-04T11:33:21Z", + } + + th.AssertEquals(t, actual.ID, expected.ID) + th.AssertEquals(t, actual.Status, expected.Status) + th.AssertEquals(t, actual.Updated, expected.Updated) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go new file mode 100644 index 000000000..7b0df115b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go @@ -0,0 +1,58 @@ +package apiversions + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// APIVersion represents an API version for Cinder. +type APIVersion struct { + ID string `json:"id" mapstructure:"id"` // unique identifier + Status string `json:"status" mapstructure:"status"` // current status + Updated string `json:"updated" mapstructure:"updated"` // date last updated +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(page pagination.Page) ([]APIVersion, error) { + var resp struct { + Versions []APIVersion `mapstructure:"versions"` + } + + err := mapstructure.Decode(page.(APIVersionPage).Body, &resp) + + return resp.Versions, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an API version resource. +func (r GetResult) Extract() (*APIVersion, error) { + var resp struct { + Version *APIVersion `mapstructure:"version"` + } + + err := mapstructure.Decode(r.Body, &resp) + + return resp.Version, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go new file mode 100644 index 000000000..56f8260a2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go @@ -0,0 +1,15 @@ +package apiversions + +import ( + "strings" + + "github.com/rackspace/gophercloud" +) + +func getURL(c *gophercloud.ServiceClient, version string) string { + return c.ServiceURL(strings.TrimRight(version, "/") + "/") +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go new file mode 100644 index 000000000..37e91425b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go @@ -0,0 +1,26 @@ +package apiversions + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "v1") + expected := endpoint + "v1/" + th.AssertEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go new file mode 100644 index 000000000..198f83077 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go @@ -0,0 +1,5 @@ +// Package snapshots provides information and interaction with snapshots in the +// OpenStack Block Storage service. A snapshot is a point in time copy of the +// data contained in an external storage volume, and can be controlled +// programmatically. +package snapshots diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go new file mode 100644 index 000000000..d1461fb69 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go @@ -0,0 +1,114 @@ +package snapshots + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "snapshots": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "display_name": "snapshot-001" + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "display_name": "snapshot-002" + } + ] + } + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "snapshot": { + "display_name": "snapshot-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "snapshot": { + "volume_id": "1234", + "display_name": "snapshot-001" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "snapshot": { + "volume_id": "1234", + "display_name": "snapshot-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) +} + +func MockUpdateMetadataResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/123/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, ` + { + "metadata": { + "key": "v1" + } + } + `) + + fmt.Fprintf(w, ` + { + "metadata": { + "key": "v1" + } + } + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go new file mode 100644 index 000000000..71936e51b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go @@ -0,0 +1,206 @@ +package snapshots + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Snapshot. This object is passed to +// the snapshots.Create function. For more information about these parameters, +// see the Snapshot object. +type CreateOpts struct { + // OPTIONAL + Description string + // OPTIONAL + Force bool + // OPTIONAL + Metadata map[string]interface{} + // OPTIONAL + Name string + // REQUIRED + VolumeID string +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.VolumeID == "" { + return nil, fmt.Errorf("Required CreateOpts field 'VolumeID' not set.") + } + s["volume_id"] = opts.VolumeID + + if opts.Description != "" { + s["display_description"] = opts.Description + } + if opts.Force == true { + s["force"] = opts.Force + } + if opts.Metadata != nil { + s["metadata"] = opts.Metadata + } + if opts.Name != "" { + s["display_name"] = opts.Name + } + + return map[string]interface{}{"snapshot": s}, nil +} + +// Create will create a new Snapshot based on the values in CreateOpts. To +// extract the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToSnapshotCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return res +} + +// Delete will delete the existing Snapshot with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, id), nil) + return res +} + +// Get retrieves the Snapshot with the provided ID. To extract the Snapshot +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSnapshotListQuery() (string, error) +} + +// ListOpts hold options for listing Snapshots. It is passed to the +// snapshots.List function. +type ListOpts struct { + Name string `q:"display_name"` + Status string `q:"status"` + VolumeID string `q:"volume_id"` +} + +// ToSnapshotListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSnapshotListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns Snapshots optionally limited by the conditions provided in +// ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSnapshotListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPage := func(r pagination.PageResult) pagination.Page { + return ListResult{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, url, createPage) +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateMetadataOptsBuilder interface { + ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) +} + +// UpdateMetadataOpts contain options for updating an existing Snapshot. This +// object is passed to the snapshots.Update function. For more information +// about the parameters, see the Snapshot object. +type UpdateMetadataOpts struct { + Metadata map[string]interface{} +} + +// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of +// an UpdateMetadataOpts. +func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) { + v := make(map[string]interface{}) + + if opts.Metadata != nil { + v["metadata"] = opts.Metadata + } + + return v, nil +} + +// UpdateMetadata will update the Snapshot with provided information. To +// extract the updated Snapshot from the response, call the ExtractMetadata +// method on the UpdateMetadataResult. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) UpdateMetadataResult { + var res UpdateMetadataResult + + reqBody, err := opts.ToSnapshotUpdateMetadataMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Put(updateMetadataURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// IDFromName is a convienience function that returns a snapshot's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + snapshotCount := 0 + snapshotID := "" + if name == "" { + return "", fmt.Errorf("A snapshot name must be provided.") + } + pager := List(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + snapshotList, err := ExtractSnapshots(page) + if err != nil { + return false, err + } + + for _, s := range snapshotList { + if s.Name == name { + snapshotCount++ + snapshotID = s.ID + } + } + return true, nil + }) + + switch snapshotCount { + case 0: + return "", fmt.Errorf("Unable to find snapshot: %s", name) + case 1: + return snapshotID, nil + default: + return "", fmt.Errorf("Found %d snapshots matching %s", snapshotCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go new file mode 100644 index 000000000..d0f9e887e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go @@ -0,0 +1,104 @@ +package snapshots + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + List(client.ServiceClient(), &ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractSnapshots(page) + if err != nil { + t.Errorf("Failed to extract snapshots: %v", err) + return false, err + } + + expected := []Snapshot{ + Snapshot{ + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "snapshot-001", + }, + Snapshot{ + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "snapshot-002", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + v, err := Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "snapshot-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := CreateOpts{VolumeID: "1234", Name: "snapshot-001"} + n, err := Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.VolumeID, "1234") + th.AssertEquals(t, n.Name, "snapshot-001") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestUpdateMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateMetadataResponse(t) + + expected := map[string]interface{}{"key": "v1"} + + options := &UpdateMetadataOpts{ + Metadata: map[string]interface{}{ + "key": "v1", + }, + } + + actual, err := UpdateMetadata(client.ServiceClient(), "123", options).ExtractMetadata() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, actual, expected) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go new file mode 100644 index 000000000..e595798e4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go @@ -0,0 +1,123 @@ +package snapshots + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// Snapshot contains all the information associated with an OpenStack Snapshot. +type Snapshot struct { + // Currect status of the Snapshot. + Status string `mapstructure:"status"` + + // Display name. + Name string `mapstructure:"display_name"` + + // Instances onto which the Snapshot is attached. + Attachments []string `mapstructure:"attachments"` + + // Logical group. + AvailabilityZone string `mapstructure:"availability_zone"` + + // Is the Snapshot bootable? + Bootable string `mapstructure:"bootable"` + + // Date created. + CreatedAt string `mapstructure:"created_at"` + + // Display description. + Description string `mapstructure:"display_discription"` + + // See VolumeType object for more information. + VolumeType string `mapstructure:"volume_type"` + + // ID of the Snapshot from which this Snapshot was created. + SnapshotID string `mapstructure:"snapshot_id"` + + // ID of the Volume from which this Snapshot was created. + VolumeID string `mapstructure:"volume_id"` + + // User-defined key-value pairs. + Metadata map[string]string `mapstructure:"metadata"` + + // Unique identifier. + ID string `mapstructure:"id"` + + // Size of the Snapshot, in GB. + Size int `mapstructure:"size"` +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ListResult is a pagination.Pager that is returned from a call to the List function. +type ListResult struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Snapshots. +func (r ListResult) IsEmpty() (bool, error) { + volumes, err := ExtractSnapshots(r) + if err != nil { + return true, err + } + return len(volumes) == 0, nil +} + +// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. +func ExtractSnapshots(page pagination.Page) ([]Snapshot, error) { + var response struct { + Snapshots []Snapshot `json:"snapshots"` + } + + err := mapstructure.Decode(page.(ListResult).Body, &response) + return response.Snapshots, err +} + +// UpdateMetadataResult contains the response body and error from an UpdateMetadata request. +type UpdateMetadataResult struct { + commonResult +} + +// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata. +func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) { + if r.Err != nil { + return nil, r.Err + } + + m := r.Body.(map[string]interface{})["metadata"] + return m.(map[string]interface{}), nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Snapshot object out of the commonResult object. +func (r commonResult) Extract() (*Snapshot, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Snapshot *Snapshot `json:"snapshot"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Snapshot, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go new file mode 100644 index 000000000..4d635e8dd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go @@ -0,0 +1,27 @@ +package snapshots + +import "github.com/rackspace/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func metadataURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id, "metadata") +} + +func updateMetadataURL(c *gophercloud.ServiceClient, id string) string { + return metadataURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go new file mode 100644 index 000000000..feacf7f69 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go @@ -0,0 +1,50 @@ +package snapshots + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "snapshots" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "snapshots/foo" + th.AssertEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "snapshots/foo" + th.AssertEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + "snapshots" + th.AssertEquals(t, expected, actual) +} + +func TestMetadataURL(t *testing.T) { + actual := metadataURL(endpointClient(), "foo") + expected := endpoint + "snapshots/foo/metadata" + th.AssertEquals(t, expected, actual) +} + +func TestUpdateMetadataURL(t *testing.T) { + actual := updateMetadataURL(endpointClient(), "foo") + expected := endpoint + "snapshots/foo/metadata" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go new file mode 100644 index 000000000..64cdc607e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go @@ -0,0 +1,22 @@ +package snapshots + +import ( + "github.com/rackspace/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go new file mode 100644 index 000000000..307b8b12d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go new file mode 100644 index 000000000..3e9243ac4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go @@ -0,0 +1,236 @@ +package volumes + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + // OPTIONAL + Availability string + // OPTIONAL + Description string + // OPTIONAL + Metadata map[string]string + // OPTIONAL + Name string + // REQUIRED + Size int + // OPTIONAL + SnapshotID, SourceVolID, ImageID string + // OPTIONAL + VolumeType string +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + v := make(map[string]interface{}) + + if opts.Size == 0 { + return nil, fmt.Errorf("Required CreateOpts field 'Size' not set.") + } + v["size"] = opts.Size + + if opts.Availability != "" { + v["availability_zone"] = opts.Availability + } + if opts.Description != "" { + v["display_description"] = opts.Description + } + if opts.ImageID != "" { + v["imageRef"] = opts.ImageID + } + if opts.Metadata != nil { + v["metadata"] = opts.Metadata + } + if opts.Name != "" { + v["display_name"] = opts.Name + } + if opts.SourceVolID != "" { + v["source_volid"] = opts.SourceVolID + } + if opts.SnapshotID != "" { + v["snapshot_id"] = opts.SnapshotID + } + if opts.VolumeType != "" { + v["volume_type"] = opts.VolumeType + } + + return map[string]interface{}{"volume": v}, nil +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToVolumeCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return res +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, id), nil) + return res +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant volumes. + AllTenants bool `q:"all_tenants"` + // List only volumes that contain Metadata. + Metadata map[string]string `q:"metadata"` + // List only volumes that have Name as the display name. + Name string `q:"name"` + // List only volumes that have a status of Status. + Status string `q:"status"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + createPage := func(r pagination.PageResult) pagination.Page { + return ListResult{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, url, createPage) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + // OPTIONAL + Name string + // OPTIONAL + Description string + // OPTIONAL + Metadata map[string]string +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + v := make(map[string]interface{}) + + if opts.Description != "" { + v["display_description"] = opts.Description + } + if opts.Metadata != nil { + v["metadata"] = opts.Metadata + } + if opts.Name != "" { + v["display_name"] = opts.Name + } + + return map[string]interface{}{"volume": v}, nil +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToVolumeUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Put(updateURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + volumeCount := 0 + volumeID := "" + if name == "" { + return "", fmt.Errorf("A volume name must be provided.") + } + pager := List(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + volumeList, err := ExtractVolumes(page) + if err != nil { + return false, err + } + + for _, s := range volumeList { + if s.Name == name { + volumeCount++ + volumeID = s.ID + } + } + return true, nil + }) + + switch volumeCount { + case 0: + return "", fmt.Errorf("Unable to find volume: %s", name) + case 1: + return volumeID, nil + default: + return "", fmt.Errorf("Found %d volumes matching %s", volumeCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go new file mode 100644 index 000000000..75c2bbc59 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go @@ -0,0 +1,123 @@ +package volumes + +import ( + "testing" + + fixtures "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixtures.MockListResponse(t) + + count := 0 + + List(client.ServiceClient(), &ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVolumes(page) + if err != nil { + t.Errorf("Failed to extract volumes: %v", err) + return false, err + } + + expected := []Volume{ + Volume{ + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + }, + Volume{ + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListAll(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixtures.MockListResponse(t) + + allPages, err := List(client.ServiceClient(), &ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := ExtractVolumes(allPages) + th.AssertNoErr(t, err) + + expected := []Volume{ + Volume{ + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + }, + Volume{ + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + }, + } + + th.CheckDeepEquals(t, expected, actual) + +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixtures.MockGetResponse(t) + + v, err := Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "vol-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, v.Attachments[0]["device"], "/dev/vde") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixtures.MockCreateResponse(t) + + options := &CreateOpts{Size: 75} + n, err := Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Size, 4) + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixtures.MockDeleteResponse(t) + + res := Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixtures.MockUpdateResponse(t) + + options := UpdateOpts{Name: "vol-002"} + v, err := Update(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract() + th.AssertNoErr(t, err) + th.CheckEquals(t, "vol-002", v.Name) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go new file mode 100644 index 000000000..2fd4ef14e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go @@ -0,0 +1,113 @@ +package volumes + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Current status of the volume. + Status string `mapstructure:"status"` + + // Human-readable display name for the volume. + Name string `mapstructure:"display_name"` + + // Instances onto which the volume is attached. + Attachments []map[string]interface{} `mapstructure:"attachments"` + + // This parameter is no longer used. + AvailabilityZone string `mapstructure:"availability_zone"` + + // Indicates whether this is a bootable volume. + Bootable string `mapstructure:"bootable"` + + // The date when this volume was created. + CreatedAt string `mapstructure:"created_at"` + + // Human-readable description for the volume. + Description string `mapstructure:"display_description"` + + // The type of volume to create, either SATA or SSD. + VolumeType string `mapstructure:"volume_type"` + + // The ID of the snapshot from which the volume was created + SnapshotID string `mapstructure:"snapshot_id"` + + // The ID of another block storage volume from which the current volume was created + SourceVolID string `mapstructure:"source_volid"` + + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `mapstructure:"metadata"` + + // Unique identifier for the volume. + ID string `mapstructure:"id"` + + // Size of the volume in GB. + Size int `mapstructure:"size"` +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ListResult is a pagination.pager that is returned from a call to the List function. +type ListResult struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Volumes. +func (r ListResult) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + if err != nil { + return true, err + } + return len(volumes) == 0, nil +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(page pagination.Page) ([]Volume, error) { + var response struct { + Volumes []Volume `json:"volumes"` + } + + err := mapstructure.Decode(page.(ListResult).Body, &response) + return response.Volumes, err +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Volume *Volume `json:"volume"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Volume, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go new file mode 100644 index 000000000..2f66ba5e4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go @@ -0,0 +1,7 @@ +/* +This is package created is to hold fixtures (which imports testing), +so that importing volumes package does not inadvertently import testing into production code +More information here: +https://github.com/rackspace/gophercloud/issues/473 +*/ +package testing diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go new file mode 100644 index 000000000..3df7653f7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go @@ -0,0 +1,113 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "volumes": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "display_name": "vol-001" + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "display_name": "vol-002" + } + ] + } + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume": { + "display_name": "vol-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "attachments": [ + { + "device": "/dev/vde", + "server_id": "a740d24b-dc5b-4d59-ac75-53971c2920ba", + "id": "d6da11e5-2ed3-413e-88d8-b772ba62193d", + "volume_id": "d6da11e5-2ed3-413e-88d8-b772ba62193d" + } + ] + } +} + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "volume": { + "size": 75 + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "volume": { + "size": 4, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func MockUpdateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "volume": { + "display_name": "vol-002", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } + } + `) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go new file mode 100644 index 000000000..29629a1af --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/rackspace/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go new file mode 100644 index 000000000..a95270e14 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go @@ -0,0 +1,44 @@ +package volumes + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "volumes" + th.AssertEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + "volumes" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "volumes/foo" + th.AssertEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "volumes/foo" + th.AssertEquals(t, expected, actual) +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient(), "foo") + expected := endpoint + "volumes/foo" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go new file mode 100644 index 000000000..1dda695ea --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/rackspace/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go new file mode 100644 index 000000000..793084f89 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go @@ -0,0 +1,9 @@ +// Package volumetypes provides information and interaction with volume types +// in the OpenStack Block Storage service. A volume type indicates the type of +// a block storage volume, such as SATA, SCSCI, SSD, etc. These can be +// customized or defined by the OpenStack admin. +// +// You can also define extra_specs associated with your volume types. For +// instance, you could have a VolumeType=SATA, with extra_specs (RPM=10000, +// RAID-Level=5) . Extra_specs are defined and customized by the admin. +package volumetypes diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go new file mode 100644 index 000000000..e3326eae1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go @@ -0,0 +1,60 @@ +package volumetypes + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "volume_types": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "name": "vol-type-001", + "extra_specs": { + "capabilities": "gpu" + } + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "name": "vol-type-002", + "extra_specs": {} + } + ] + } + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume_type": { + "name": "vol-type-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "extra_specs": { + "serverNumber": "2" + } + } +} + `) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go new file mode 100644 index 000000000..1673d13aa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go @@ -0,0 +1,76 @@ +package volumetypes + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeTypeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts are options for creating a volume type. +type CreateOpts struct { + // OPTIONAL. See VolumeType. + ExtraSpecs map[string]interface{} + // OPTIONAL. See VolumeType. + Name string +} + +// ToVolumeTypeCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToVolumeTypeCreateMap() (map[string]interface{}, error) { + vt := make(map[string]interface{}) + + if opts.ExtraSpecs != nil { + vt["extra_specs"] = opts.ExtraSpecs + } + if opts.Name != "" { + vt["name"] = opts.Name + } + + return map[string]interface{}{"volume_type": vt}, nil +} + +// Create will create a new volume. To extract the created volume type object, +// call the Extract method on the CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToVolumeTypeCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return res +} + +// Delete will delete the volume type with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, id), nil) + return res +} + +// Get will retrieve the volume type with the provided ID. To extract the volume +// type from the result, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, err := client.Get(getURL(client, id), &res.Body, nil) + res.Err = err + return res +} + +// List returns all volume types. +func List(client *gophercloud.ServiceClient) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return ListResult{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, listURL(client), createPage) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go new file mode 100644 index 000000000..8d40bfe1d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go @@ -0,0 +1,118 @@ +package volumetypes + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVolumeTypes(page) + if err != nil { + t.Errorf("Failed to extract volume types: %v", err) + return false, err + } + + expected := []VolumeType{ + VolumeType{ + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-type-001", + ExtraSpecs: map[string]interface{}{ + "capabilities": "gpu", + }, + }, + VolumeType{ + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-type-002", + ExtraSpecs: map[string]interface{}{}, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + vt, err := Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, vt.ExtraSpecs, map[string]interface{}{"serverNumber": "2"}) + th.AssertEquals(t, vt.Name, "vol-type-001") + th.AssertEquals(t, vt.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "volume_type": { + "name": "vol-type-001" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "volume_type": { + "name": "vol-type-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) + + options := &CreateOpts{Name: "vol-type-001"} + n, err := Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "vol-type-001") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.WriteHeader(http.StatusAccepted) + }) + + err := Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go new file mode 100644 index 000000000..c049a045d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go @@ -0,0 +1,72 @@ +package volumetypes + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// VolumeType contains all information associated with an OpenStack Volume Type. +type VolumeType struct { + ExtraSpecs map[string]interface{} `json:"extra_specs" mapstructure:"extra_specs"` // user-defined metadata + ID string `json:"id" mapstructure:"id"` // unique identifier + Name string `json:"name" mapstructure:"name"` // display name +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ListResult is a pagination.Pager that is returned from a call to the List function. +type ListResult struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Volume Types. +func (r ListResult) IsEmpty() (bool, error) { + volumeTypes, err := ExtractVolumeTypes(r) + if err != nil { + return true, err + } + return len(volumeTypes) == 0, nil +} + +// ExtractVolumeTypes extracts and returns Volume Types. +func ExtractVolumeTypes(page pagination.Page) ([]VolumeType, error) { + var response struct { + VolumeTypes []VolumeType `mapstructure:"volume_types"` + } + + err := mapstructure.Decode(page.(ListResult).Body, &response) + return response.VolumeTypes, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume Type object out of the commonResult object. +func (r commonResult) Extract() (*VolumeType, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + VolumeType *VolumeType `json:"volume_type" mapstructure:"volume_type"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.VolumeType, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go new file mode 100644 index 000000000..cf8367bfa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go @@ -0,0 +1,19 @@ +package volumetypes + +import "github.com/rackspace/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("types") +} + +func createURL(c *gophercloud.ServiceClient) string { + return listURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go new file mode 100644 index 000000000..44016e295 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go @@ -0,0 +1,38 @@ +package volumetypes + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + "types" + th.AssertEquals(t, expected, actual) +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "types" + th.AssertEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "types/foo" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "types/foo" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/doc.go new file mode 100644 index 000000000..f78d4f735 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/doc.go @@ -0,0 +1,4 @@ +// Package base provides information and interaction with the base API +// resource in the OpenStack CDN service. This API resource allows for +// retrieving the Home Document and pinging the root URL. +package base diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/fixtures.go new file mode 100644 index 000000000..19b5ece46 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/fixtures.go @@ -0,0 +1,53 @@ +package base + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// HandleGetSuccessfully creates an HTTP handler at `/` on the test handler mux +// that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "resources": { + "rel/cdn": { + "href-template": "services{?marker,limit}", + "href-vars": { + "marker": "param/marker", + "limit": "param/limit" + }, + "hints": { + "allow": [ + "GET" + ], + "formats": { + "application/json": {} + } + } + } + } + } + `) + + }) +} + +// HandlePingSuccessfully creates an HTTP handler at `/ping` on the test handler +// mux that responds with a `Ping` response. +func HandlePingSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go new file mode 100644 index 000000000..dd221bc98 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go @@ -0,0 +1,21 @@ +package base + +import "github.com/rackspace/gophercloud" + +// Get retrieves the home document, allowing the user to discover the +// entire API. +func Get(c *gophercloud.ServiceClient) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c), &res.Body, nil) + return res +} + +// Ping retrieves a ping to the server. +func Ping(c *gophercloud.ServiceClient) PingResult { + var res PingResult + _, res.Err = c.Get(pingURL(c), nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + MoreHeaders: map[string]string{"Accept": ""}, + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go new file mode 100644 index 000000000..2c20a7110 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go @@ -0,0 +1,43 @@ +package base + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestGetHomeDocument(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(fake.ServiceClient()).Extract() + th.CheckNoErr(t, err) + + expected := HomeDocument{ + "rel/cdn": map[string]interface{}{ + "href-template": "services{?marker,limit}", + "href-vars": map[string]interface{}{ + "marker": "param/marker", + "limit": "param/limit", + }, + "hints": map[string]interface{}{ + "allow": []string{"GET"}, + "formats": map[string]interface{}{ + "application/json": map[string]interface{}{}, + }, + }, + }, + } + th.CheckDeepEquals(t, expected, *actual) +} + +func TestPing(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePingSuccessfully(t) + + err := Ping(fake.ServiceClient()).ExtractErr() + th.CheckNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/results.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/results.go new file mode 100644 index 000000000..bef1da8a1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/results.go @@ -0,0 +1,35 @@ +package base + +import ( + "errors" + + "github.com/rackspace/gophercloud" +) + +// HomeDocument is a resource that contains all the resources for the CDN API. +type HomeDocument map[string]interface{} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a home document resource. +func (r GetResult) Extract() (*HomeDocument, error) { + if r.Err != nil { + return nil, r.Err + } + + submap, ok := r.Body.(map[string]interface{})["resources"] + if !ok { + return nil, errors.New("Unexpected HomeDocument structure") + } + casted := HomeDocument(submap.(map[string]interface{})) + + return &casted, nil +} + +// PingResult represents the result of a Ping operation. +type PingResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/urls.go new file mode 100644 index 000000000..a95e18bca --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/base/urls.go @@ -0,0 +1,11 @@ +package base + +import "github.com/rackspace/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL() +} + +func pingURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("ping") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/doc.go new file mode 100644 index 000000000..d4066985c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/doc.go @@ -0,0 +1,6 @@ +// Package flavors provides information and interaction with the flavors API +// resource in the OpenStack CDN service. This API resource allows for +// listing flavors and retrieving a specific flavor. +// +// A flavor is a mapping configuration to a CDN provider. +package flavors diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go new file mode 100644 index 000000000..d7ec1a00d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go @@ -0,0 +1,82 @@ +package flavors + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// HandleListCDNFlavorsSuccessfully creates an HTTP handler at `/flavors` on the test handler mux +// that responds with a `List` response. +func HandleListCDNFlavorsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "flavors": [ + { + "id": "europe", + "providers": [ + { + "provider": "Fastly", + "links": [ + { + "href": "http://www.fastly.com", + "rel": "provider_url" + } + ] + } + ], + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/flavors/europe", + "rel": "self" + } + ] + } + ] + } + `) + }) +} + +// HandleGetCDNFlavorSuccessfully creates an HTTP handler at `/flavors/{id}` on the test handler mux +// that responds with a `Get` response. +func HandleGetCDNFlavorSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors/asia", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "id" : "asia", + "providers" : [ + { + "provider" : "ChinaCache", + "links": [ + { + "href": "http://www.chinacache.com", + "rel": "provider_url" + } + ] + } + ], + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/flavors/asia", + "rel": "self" + } + ] + } + `) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go new file mode 100644 index 000000000..8755a95b8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go @@ -0,0 +1,22 @@ +package flavors + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a single page of CDN flavors. +func List(c *gophercloud.ServiceClient) pagination.Pager { + url := listURL(c) + createPage := func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retrieves a specific flavor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go new file mode 100644 index 000000000..f73173827 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go @@ -0,0 +1,89 @@ +package flavors + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListCDNFlavorsSuccessfully(t) + + count := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractFlavors(page) + if err != nil { + t.Errorf("Failed to extract flavors: %v", err) + return false, err + } + + expected := []Flavor{ + Flavor{ + ID: "europe", + Providers: []Provider{ + Provider{ + Provider: "Fastly", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.fastly.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/europe", + Rel: "self", + }, + }, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetCDNFlavorSuccessfully(t) + + expected := &Flavor{ + ID: "asia", + Providers: []Provider{ + Provider{ + Provider: "ChinaCache", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.chinacache.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/asia", + Rel: "self", + }, + }, + } + + actual, err := Get(fake.ServiceClient(), "asia").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/results.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/results.go new file mode 100644 index 000000000..8cab48b53 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/results.go @@ -0,0 +1,71 @@ +package flavors + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Provider represents a provider for a particular flavor. +type Provider struct { + // Specifies the name of the provider. The name must not exceed 64 bytes in + // length and is limited to unicode, digits, underscores, and hyphens. + Provider string `mapstructure:"provider"` + // Specifies a list with an href where rel is provider_url. + Links []gophercloud.Link `mapstructure:"links"` +} + +// Flavor represents a mapping configuration to a CDN provider. +type Flavor struct { + // Specifies the name of the flavor. The name must not exceed 64 bytes in + // length and is limited to unicode, digits, underscores, and hyphens. + ID string `mapstructure:"id"` + // Specifies the list of providers mapped to this flavor. + Providers []Provider `mapstructure:"providers"` + // Specifies the self-navigating JSON document paths. + Links []gophercloud.Link `mapstructure:"links"` +} + +// FlavorPage is the page returned by a pager when traversing over a +// collection of CDN flavors. +type FlavorPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a FlavorPage contains no Flavors. +func (r FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(r) + if err != nil { + return true, err + } + return len(flavors) == 0, nil +} + +// ExtractFlavors extracts and returns Flavors. It is used while iterating over +// a flavors.List call. +func ExtractFlavors(page pagination.Page) ([]Flavor, error) { + var response struct { + Flavors []Flavor `json:"flavors"` + } + + err := mapstructure.Decode(page.(FlavorPage).Body, &response) + return response.Flavors, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a flavor from a GetResult. +func (r GetResult) Extract() (*Flavor, error) { + if r.Err != nil { + return nil, r.Err + } + + var res Flavor + + err := mapstructure.Decode(r.Body, &res) + + return &res, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/urls.go new file mode 100644 index 000000000..6eb38d293 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/urls.go @@ -0,0 +1,11 @@ +package flavors + +import "github.com/rackspace/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("flavors") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("flavors", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/doc.go new file mode 100644 index 000000000..ceecaa5a5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/doc.go @@ -0,0 +1,7 @@ +// Package serviceassets provides information and interaction with the +// serviceassets API resource in the OpenStack CDN service. This API resource +// allows for deleting cached assets. +// +// A service distributes assets across the network. Service assets let you +// interrogate properties about these assets and perform certain actions on them. +package serviceassets diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go new file mode 100644 index 000000000..5c6b5d00e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go @@ -0,0 +1,19 @@ +package serviceassets + +import ( + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// HandleDeleteCDNAssetSuccessfully creates an HTTP handler at `/services/{id}/assets` on the test handler mux +// that responds with a `Delete` response. +func HandleDeleteCDNAssetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0/assets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go new file mode 100644 index 000000000..1ddc65faf --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go @@ -0,0 +1,48 @@ +package serviceassets + +import ( + "strings" + + "github.com/rackspace/gophercloud" +) + +// DeleteOptsBuilder allows extensions to add additional parameters to the Delete +// request. +type DeleteOptsBuilder interface { + ToCDNAssetDeleteParams() (string, error) +} + +// DeleteOpts is a structure that holds options for deleting CDN service assets. +type DeleteOpts struct { + // If all is set to true, specifies that the delete occurs against all of the + // assets for the service. + All bool `q:"all"` + // Specifies the relative URL of the asset to be deleted. + URL string `q:"url"` +} + +// ToCDNAssetDeleteParams formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToCDNAssetDeleteParams() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// Delete accepts a unique service ID or URL and deletes the CDN service asset associated with +// it. For example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Delete(c *gophercloud.ServiceClient, idOrURL string, opts DeleteOptsBuilder) DeleteResult { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = deleteURL(c, idOrURL) + } + + var res DeleteResult + _, res.Err = c.Delete(url, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go new file mode 100644 index 000000000..dde7bc171 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go @@ -0,0 +1,18 @@ +package serviceassets + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteCDNAssetSuccessfully(t) + + err := Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", nil).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/results.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/results.go new file mode 100644 index 000000000..1d8734b7c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/results.go @@ -0,0 +1,8 @@ +package serviceassets + +import "github.com/rackspace/gophercloud" + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/urls.go new file mode 100644 index 000000000..cb0aea8fc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/urls.go @@ -0,0 +1,7 @@ +package serviceassets + +import "github.com/rackspace/gophercloud" + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("services", id, "assets") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/doc.go new file mode 100644 index 000000000..41f7c60da --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/doc.go @@ -0,0 +1,7 @@ +// Package services provides information and interaction with the services API +// resource in the OpenStack CDN service. This API resource allows for +// listing, creating, updating, retrieving, and deleting services. +// +// A service represents an application that has its content cached to the edge +// nodes. +package services diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/errors.go new file mode 100644 index 000000000..359584c2a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/errors.go @@ -0,0 +1,7 @@ +package services + +import "fmt" + +func no(str string) error { + return fmt.Errorf("Required parameter %s not provided", str) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/fixtures.go new file mode 100644 index 000000000..d9bc9f20b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/fixtures.go @@ -0,0 +1,372 @@ +package services + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// HandleListCDNServiceSuccessfully creates an HTTP handler at `/services` on the test handler mux +// that responds with a `List` response. +func HandleListCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "links": [ + { + "rel": "next", + "href": "https://www.poppycdn.io/v1.0/services?marker=96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0&limit=20" + } + ], + "services": [ + { + "id": "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "name": "mywebsite.com", + "domains": [ + { + "domain": "www.mywebsite.com" + } + ], + "origins": [ + { + "origin": "mywebsite.com", + "port": 80, + "ssl": false + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + }, + { + "name": "home", + "ttl": 17200, + "rules": [ + { + "name": "index", + "request_url": "/index.htm" + } + ] + }, + { + "name": "images", + "ttl": 12800, + "rules": [ + { + "name": "images", + "request_url": "*.png" + } + ] + } + ], + "restrictions": [ + { + "name": "website only", + "rules": [ + { + "name": "mywebsite.com", + "referrer": "www.mywebsite.com" + } + ] + } + ], + "flavor_id": "asia", + "status": "deployed", + "errors" : [], + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "rel": "self" + }, + { + "href": "mywebsite.com.cdn123.poppycdn.net", + "rel": "access_url" + }, + { + "href": "https://www.poppycdn.io/v1.0/flavors/asia", + "rel": "flavor" + } + ] + }, + { + "id": "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + "name": "myothersite.com", + "domains": [ + { + "domain": "www.myothersite.com" + } + ], + "origins": [ + { + "origin": "44.33.22.11", + "port": 80, + "ssl": false + }, + { + "origin": "77.66.55.44", + "port": 80, + "ssl": false, + "rules": [ + { + "name": "videos", + "request_url": "^/videos/*.m3u" + } + ] + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + } + ], + "restrictions": [ + {} + ], + "flavor_id": "europe", + "status": "deployed", + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + "rel": "self" + }, + { + "href": "myothersite.com.poppycdn.net", + "rel": "access_url" + }, + { + "href": "https://www.poppycdn.io/v1.0/flavors/europe", + "rel": "flavor" + } + ] + } + ] + } + `) + case "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1": + fmt.Fprintf(w, `{ + "services": [] + }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleCreateCDNServiceSuccessfully creates an HTTP handler at `/services` on the test handler mux +// that responds with a `Create` response. +func HandleCreateCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, ` + { + "name": "mywebsite.com", + "domains": [ + { + "domain": "www.mywebsite.com" + }, + { + "domain": "blog.mywebsite.com" + } + ], + "origins": [ + { + "origin": "mywebsite.com", + "port": 80, + "ssl": false + } + ], + "restrictions": [ + { + "name": "website only", + "rules": [ + { + "name": "mywebsite.com", + "referrer": "www.mywebsite.com" + } + ] + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + } + ], + + "flavor_id": "cdn" + } + `) + w.Header().Add("Location", "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleGetCDNServiceSuccessfully creates an HTTP handler at `/services/{id}` on the test handler mux +// that responds with a `Get` response. +func HandleGetCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "id": "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "name": "mywebsite.com", + "domains": [ + { + "domain": "www.mywebsite.com", + "protocol": "http" + } + ], + "origins": [ + { + "origin": "mywebsite.com", + "port": 80, + "ssl": false + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + }, + { + "name": "home", + "ttl": 17200, + "rules": [ + { + "name": "index", + "request_url": "/index.htm" + } + ] + }, + { + "name": "images", + "ttl": 12800, + "rules": [ + { + "name": "images", + "request_url": "*.png" + } + ] + } + ], + "restrictions": [ + { + "name": "website only", + "rules": [ + { + "name": "mywebsite.com", + "referrer": "www.mywebsite.com" + } + ] + } + ], + "flavor_id": "cdn", + "status": "deployed", + "errors" : [], + "links": [ + { + "href": "https://global.cdn.api.rackspacecloud.com/v1.0/110011/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "rel": "self" + }, + { + "href": "blog.mywebsite.com.cdn1.raxcdn.com", + "rel": "access_url" + }, + { + "href": "https://global.cdn.api.rackspacecloud.com/v1.0/110011/flavors/cdn", + "rel": "flavor" + } + ] + } + `) + }) +} + +// HandleUpdateCDNServiceSuccessfully creates an HTTP handler at `/services/{id}` on the test handler mux +// that responds with a `Update` response. +func HandleUpdateCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, ` + [ + { + "op": "add", + "path": "/domains/-", + "value": {"domain": "appended.mocksite4.com"} + }, + { + "op": "add", + "path": "/domains/4", + "value": {"domain": "inserted.mocksite4.com"} + }, + { + "op": "add", + "path": "/domains", + "value": [ + {"domain": "bulkadded1.mocksite4.com"}, + {"domain": "bulkadded2.mocksite4.com"} + ] + }, + { + "op": "replace", + "path": "/origins/2", + "value": {"origin": "44.33.22.11", "port": 80, "ssl": false} + }, + { + "op": "replace", + "path": "/origins", + "value": [ + {"origin": "44.33.22.11", "port": 80, "ssl": false}, + {"origin": "55.44.33.22", "port": 443, "ssl": true} + ] + }, + { + "op": "remove", + "path": "/caching/8" + }, + { + "op": "remove", + "path": "/caching" + }, + { + "op": "replace", + "path": "/name", + "value": "differentServiceName" + } + ] + `) + w.Header().Add("Location", "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleDeleteCDNServiceSuccessfully creates an HTTP handler at `/services/{id}` on the test handler mux +// that responds with a `Delete` response. +func HandleDeleteCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go new file mode 100644 index 000000000..8b37928e2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go @@ -0,0 +1,378 @@ +package services + +import ( + "fmt" + "strings" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToCDNServiceListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Marker and Limit are used for pagination. +type ListOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` +} + +// ToCDNServiceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToCDNServiceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// CDN services. It accepts a ListOpts struct, which allows for pagination via +// marker and limit. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToCDNServiceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPage := func(r pagination.PageResult) pagination.Page { + p := ServicePage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + } + + pager := pagination.NewPager(c, url, createPage) + return pager +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToCDNServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // REQUIRED. Specifies the name of the service. The minimum length for name is + // 3. The maximum length is 256. + Name string + // REQUIRED. Specifies a list of domains used by users to access their website. + Domains []Domain + // REQUIRED. Specifies a list of origin domains or IP addresses where the + // original assets are stored. + Origins []Origin + // REQUIRED. Specifies the CDN provider flavor ID to use. For a list of + // flavors, see the operation to list the available flavors. The minimum + // length for flavor_id is 1. The maximum length is 256. + FlavorID string + // OPTIONAL. Specifies the TTL rules for the assets under this service. Supports wildcards for fine-grained control. + Caching []CacheRule + // OPTIONAL. Specifies the restrictions that define who can access assets (content from the CDN cache). + Restrictions []Restriction +} + +// ToCDNServiceCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToCDNServiceCreateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.Name == "" { + return nil, no("Name") + } + s["name"] = opts.Name + + if opts.Domains == nil { + return nil, no("Domains") + } + for _, domain := range opts.Domains { + if domain.Domain == "" { + return nil, no("Domains[].Domain") + } + } + s["domains"] = opts.Domains + + if opts.Origins == nil { + return nil, no("Origins") + } + for _, origin := range opts.Origins { + if origin.Origin == "" { + return nil, no("Origins[].Origin") + } + if origin.Rules == nil && len(opts.Origins) > 1 { + return nil, no("Origins[].Rules") + } + for _, rule := range origin.Rules { + if rule.Name == "" { + return nil, no("Origins[].Rules[].Name") + } + if rule.RequestURL == "" { + return nil, no("Origins[].Rules[].RequestURL") + } + } + } + s["origins"] = opts.Origins + + if opts.FlavorID == "" { + return nil, no("FlavorID") + } + s["flavor_id"] = opts.FlavorID + + if opts.Caching != nil { + for _, cache := range opts.Caching { + if cache.Name == "" { + return nil, no("Caching[].Name") + } + if cache.Rules != nil { + for _, rule := range cache.Rules { + if rule.Name == "" { + return nil, no("Caching[].Rules[].Name") + } + if rule.RequestURL == "" { + return nil, no("Caching[].Rules[].RequestURL") + } + } + } + } + s["caching"] = opts.Caching + } + + if opts.Restrictions != nil { + for _, restriction := range opts.Restrictions { + if restriction.Name == "" { + return nil, no("Restrictions[].Name") + } + if restriction.Rules != nil { + for _, rule := range restriction.Rules { + if rule.Name == "" { + return nil, no("Restrictions[].Rules[].Name") + } + } + } + } + s["restrictions"] = opts.Restrictions + } + + return s, nil +} + +// Create accepts a CreateOpts struct and creates a new CDN service using the +// values provided. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToCDNServiceCreateMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + resp, err := c.Post(createURL(c), &reqBody, nil, nil) + res.Header = resp.Header + res.Err = err + return res +} + +// Get retrieves a specific service based on its URL or its unique ID. For +// example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Get(c *gophercloud.ServiceClient, idOrURL string) GetResult { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = getURL(c, idOrURL) + } + + var res GetResult + _, res.Err = c.Get(url, &res.Body, nil) + return res +} + +// Path is a JSON pointer location that indicates which service parameter is being added, replaced, +// or removed. +type Path struct { + baseElement string +} + +func (p Path) renderRoot() string { + return "/" + p.baseElement +} + +func (p Path) renderDash() string { + return fmt.Sprintf("/%s/-", p.baseElement) +} + +func (p Path) renderIndex(index int64) string { + return fmt.Sprintf("/%s/%d", p.baseElement, index) +} + +var ( + // PathDomains indicates that an update operation is to be performed on a Domain. + PathDomains = Path{baseElement: "domains"} + + // PathOrigins indicates that an update operation is to be performed on an Origin. + PathOrigins = Path{baseElement: "origins"} + + // PathCaching indicates that an update operation is to be performed on a CacheRule. + PathCaching = Path{baseElement: "caching"} +) + +type value interface { + toPatchValue() interface{} + appropriatePath() Path + renderRootOr(func(p Path) string) string +} + +// Patch represents a single update to an existing Service. Multiple updates to a service can be +// submitted at the same time. +type Patch interface { + ToCDNServiceUpdateMap() map[string]interface{} +} + +// Insertion is a Patch that requests the addition of a value (Domain, Origin, or CacheRule) to +// a Service at a fixed index. Use an Append instead to append the new value to the end of its +// collection. Pass it to the Update function as part of the Patch slice. +type Insertion struct { + Index int64 + Value value +} + +// ToCDNServiceUpdateMap converts an Insertion into a request body fragment suitable for the +// Update call. +func (i Insertion) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "add", + "path": i.Value.renderRootOr(func(p Path) string { return p.renderIndex(i.Index) }), + "value": i.Value.toPatchValue(), + } +} + +// Append is a Patch that requests the addition of a value (Domain, Origin, or CacheRule) to a +// Service at the end of its respective collection. Use an Insertion instead to insert the value +// at a fixed index within the collection. Pass this to the Update function as part of its +// Patch slice. +type Append struct { + Value value +} + +// ToCDNServiceUpdateMap converts an Append into a request body fragment suitable for the +// Update call. +func (a Append) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "add", + "path": a.Value.renderRootOr(func(p Path) string { return p.renderDash() }), + "value": a.Value.toPatchValue(), + } +} + +// Replacement is a Patch that alters a specific service parameter (Domain, Origin, or CacheRule) +// in-place by index. Pass it to the Update function as part of the Patch slice. +type Replacement struct { + Value value + Index int64 +} + +// ToCDNServiceUpdateMap converts a Replacement into a request body fragment suitable for the +// Update call. +func (r Replacement) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": r.Value.renderRootOr(func(p Path) string { return p.renderIndex(r.Index) }), + "value": r.Value.toPatchValue(), + } +} + +// NameReplacement specifically updates the Service name. Pass it to the Update function as part +// of the Patch slice. +type NameReplacement struct { + NewName string +} + +// ToCDNServiceUpdateMap converts a NameReplacement into a request body fragment suitable for the +// Update call. +func (r NameReplacement) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/name", + "value": r.NewName, + } +} + +// Removal is a Patch that requests the removal of a service parameter (Domain, Origin, or +// CacheRule) by index. Pass it to the Update function as part of the Patch slice. +type Removal struct { + Path Path + Index int64 + All bool +} + +// ToCDNServiceUpdateMap converts a Removal into a request body fragment suitable for the +// Update call. +func (r Removal) ToCDNServiceUpdateMap() map[string]interface{} { + result := map[string]interface{}{"op": "remove"} + if r.All { + result["path"] = r.Path.renderRoot() + } else { + result["path"] = r.Path.renderIndex(r.Index) + } + return result +} + +type UpdateOpts []Patch + +// Update accepts a slice of Patch operations (Insertion, Append, Replacement or Removal) and +// updates an existing CDN service using the values provided. idOrURL can be either the service's +// URL or its ID. For example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Update(c *gophercloud.ServiceClient, idOrURL string, opts UpdateOpts) UpdateResult { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = updateURL(c, idOrURL) + } + + reqBody := make([]map[string]interface{}, len(opts)) + for i, patch := range opts { + reqBody[i] = patch.ToCDNServiceUpdateMap() + } + + resp, err := c.Request("PATCH", url, gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{202}, + }) + var result UpdateResult + result.Header = resp.Header + result.Err = err + return result +} + +// Delete accepts a service's ID or its URL and deletes the CDN service +// associated with it. For example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Delete(c *gophercloud.ServiceClient, idOrURL string) DeleteResult { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = deleteURL(c, idOrURL) + } + + var res DeleteResult + _, res.Err = c.Delete(url, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests_test.go new file mode 100644 index 000000000..59e826f04 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests_test.go @@ -0,0 +1,358 @@ +package services + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListCDNServiceSuccessfully(t) + + count := 0 + + err := List(fake.ServiceClient(), &ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractServices(page) + if err != nil { + t.Errorf("Failed to extract services: %v", err) + return false, err + } + + expected := []Service{ + Service{ + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Name: "mywebsite.com", + Domains: []Domain{ + Domain{ + Domain: "www.mywebsite.com", + }, + }, + Origins: []Origin{ + Origin{ + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Caching: []CacheRule{ + CacheRule{ + Name: "default", + TTL: 3600, + }, + CacheRule{ + Name: "home", + TTL: 17200, + Rules: []TTLRule{ + TTLRule{ + Name: "index", + RequestURL: "/index.htm", + }, + }, + }, + CacheRule{ + Name: "images", + TTL: 12800, + Rules: []TTLRule{ + TTLRule{ + Name: "images", + RequestURL: "*.png", + }, + }, + }, + }, + Restrictions: []Restriction{ + Restriction{ + Name: "website only", + Rules: []RestrictionRule{ + RestrictionRule{ + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + FlavorID: "asia", + Status: "deployed", + Errors: []Error{}, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Rel: "self", + }, + gophercloud.Link{ + Href: "mywebsite.com.cdn123.poppycdn.net", + Rel: "access_url", + }, + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/asia", + Rel: "flavor", + }, + }, + }, + Service{ + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + Name: "myothersite.com", + Domains: []Domain{ + Domain{ + Domain: "www.myothersite.com", + }, + }, + Origins: []Origin{ + Origin{ + Origin: "44.33.22.11", + Port: 80, + SSL: false, + }, + Origin{ + Origin: "77.66.55.44", + Port: 80, + SSL: false, + Rules: []OriginRule{ + OriginRule{ + Name: "videos", + RequestURL: "^/videos/*.m3u", + }, + }, + }, + }, + Caching: []CacheRule{ + CacheRule{ + Name: "default", + TTL: 3600, + }, + }, + Restrictions: []Restriction{}, + FlavorID: "europe", + Status: "deployed", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + Rel: "self", + }, + gophercloud.Link{ + Href: "myothersite.com.poppycdn.net", + Rel: "access_url", + }, + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/europe", + Rel: "flavor", + }, + }, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateCDNServiceSuccessfully(t) + + createOpts := CreateOpts{ + Name: "mywebsite.com", + Domains: []Domain{ + Domain{ + Domain: "www.mywebsite.com", + }, + Domain{ + Domain: "blog.mywebsite.com", + }, + }, + Origins: []Origin{ + Origin{ + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Restrictions: []Restriction{ + Restriction{ + Name: "website only", + Rules: []RestrictionRule{ + RestrictionRule{ + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + Caching: []CacheRule{ + CacheRule{ + Name: "default", + TTL: 3600, + }, + }, + FlavorID: "cdn", + } + + expected := "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" + actual, err := Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, expected, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetCDNServiceSuccessfully(t) + + expected := &Service{ + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Name: "mywebsite.com", + Domains: []Domain{ + Domain{ + Domain: "www.mywebsite.com", + Protocol: "http", + }, + }, + Origins: []Origin{ + Origin{ + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Caching: []CacheRule{ + CacheRule{ + Name: "default", + TTL: 3600, + }, + CacheRule{ + Name: "home", + TTL: 17200, + Rules: []TTLRule{ + TTLRule{ + Name: "index", + RequestURL: "/index.htm", + }, + }, + }, + CacheRule{ + Name: "images", + TTL: 12800, + Rules: []TTLRule{ + TTLRule{ + Name: "images", + RequestURL: "*.png", + }, + }, + }, + }, + Restrictions: []Restriction{ + Restriction{ + Name: "website only", + Rules: []RestrictionRule{ + RestrictionRule{ + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + FlavorID: "cdn", + Status: "deployed", + Errors: []Error{}, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://global.cdn.api.rackspacecloud.com/v1.0/110011/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Rel: "self", + }, + gophercloud.Link{ + Href: "blog.mywebsite.com.cdn1.raxcdn.com", + Rel: "access_url", + }, + gophercloud.Link{ + Href: "https://global.cdn.api.rackspacecloud.com/v1.0/110011/flavors/cdn", + Rel: "flavor", + }, + }, + } + + actual, err := Get(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestSuccessfulUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateCDNServiceSuccessfully(t) + + expected := "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" + ops := UpdateOpts{ + // Append a single Domain + Append{Value: Domain{Domain: "appended.mocksite4.com"}}, + // Insert a single Domain + Insertion{ + Index: 4, + Value: Domain{Domain: "inserted.mocksite4.com"}, + }, + // Bulk addition + Append{ + Value: DomainList{ + Domain{Domain: "bulkadded1.mocksite4.com"}, + Domain{Domain: "bulkadded2.mocksite4.com"}, + }, + }, + // Replace a single Origin + Replacement{ + Index: 2, + Value: Origin{Origin: "44.33.22.11", Port: 80, SSL: false}, + }, + // Bulk replace Origins + Replacement{ + Index: 0, // Ignored + Value: OriginList{ + Origin{Origin: "44.33.22.11", Port: 80, SSL: false}, + Origin{Origin: "55.44.33.22", Port: 443, SSL: true}, + }, + }, + // Remove a single CacheRule + Removal{ + Index: 8, + Path: PathCaching, + }, + // Bulk removal + Removal{ + All: true, + Path: PathCaching, + }, + // Service name replacement + NameReplacement{ + NewName: "differentServiceName", + }, + } + + actual, err := Update(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", ops).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, expected, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteCDNServiceSuccessfully(t) + + err := Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/results.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/results.go new file mode 100644 index 000000000..33406c482 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/results.go @@ -0,0 +1,316 @@ +package services + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// Domain represents a domain used by users to access their website. +type Domain struct { + // Specifies the domain used to access the assets on their website, for which + // a CNAME is given to the CDN provider. + Domain string `mapstructure:"domain" json:"domain"` + // Specifies the protocol used to access the assets on this domain. Only "http" + // or "https" are currently allowed. The default is "http". + Protocol string `mapstructure:"protocol" json:"protocol,omitempty"` +} + +func (d Domain) toPatchValue() interface{} { + r := make(map[string]interface{}) + r["domain"] = d.Domain + if d.Protocol != "" { + r["protocol"] = d.Protocol + } + return r +} + +func (d Domain) appropriatePath() Path { + return PathDomains +} + +func (d Domain) renderRootOr(render func(p Path) string) string { + return render(d.appropriatePath()) +} + +// DomainList provides a useful way to perform bulk operations in a single Patch. +type DomainList []Domain + +func (list DomainList) toPatchValue() interface{} { + r := make([]interface{}, len(list)) + for i, domain := range list { + r[i] = domain.toPatchValue() + } + return r +} + +func (list DomainList) appropriatePath() Path { + return PathDomains +} + +func (list DomainList) renderRootOr(_ func(p Path) string) string { + return list.appropriatePath().renderRoot() +} + +// OriginRule represents a rule that defines when an origin should be accessed. +type OriginRule struct { + // Specifies the name of this rule. + Name string `mapstructure:"name" json:"name"` + // Specifies the request URL this rule should match for this origin to be used. Regex is supported. + RequestURL string `mapstructure:"request_url" json:"request_url"` +} + +// Origin specifies a list of origin domains or IP addresses where the original assets are stored. +type Origin struct { + // Specifies the URL or IP address to pull origin content from. + Origin string `mapstructure:"origin" json:"origin"` + // Specifies the port used to access the origin. The default is port 80. + Port int `mapstructure:"port" json:"port,omitempty"` + // Specifies whether or not to use HTTPS to access the origin. The default + // is false. + SSL bool `mapstructure:"ssl" json:"ssl"` + // Specifies a collection of rules that define the conditions when this origin + // should be accessed. If there is more than one origin, the rules parameter is required. + Rules []OriginRule `mapstructure:"rules" json:"rules,omitempty"` +} + +func (o Origin) toPatchValue() interface{} { + r := make(map[string]interface{}) + r["origin"] = o.Origin + r["port"] = o.Port + r["ssl"] = o.SSL + if len(o.Rules) > 0 { + r["rules"] = make([]map[string]interface{}, len(o.Rules)) + for index, rule := range o.Rules { + submap := r["rules"].([]map[string]interface{})[index] + submap["name"] = rule.Name + submap["request_url"] = rule.RequestURL + } + } + return r +} + +func (o Origin) appropriatePath() Path { + return PathOrigins +} + +func (o Origin) renderRootOr(render func(p Path) string) string { + return render(o.appropriatePath()) +} + +// OriginList provides a useful way to perform bulk operations in a single Patch. +type OriginList []Origin + +func (list OriginList) toPatchValue() interface{} { + r := make([]interface{}, len(list)) + for i, origin := range list { + r[i] = origin.toPatchValue() + } + return r +} + +func (list OriginList) appropriatePath() Path { + return PathOrigins +} + +func (list OriginList) renderRootOr(_ func(p Path) string) string { + return list.appropriatePath().renderRoot() +} + +// TTLRule specifies a rule that determines if a TTL should be applied to an asset. +type TTLRule struct { + // Specifies the name of this rule. + Name string `mapstructure:"name" json:"name"` + // Specifies the request URL this rule should match for this TTL to be used. Regex is supported. + RequestURL string `mapstructure:"request_url" json:"request_url"` +} + +// CacheRule specifies the TTL rules for the assets under this service. +type CacheRule struct { + // Specifies the name of this caching rule. Note: 'default' is a reserved name used for the default TTL setting. + Name string `mapstructure:"name" json:"name"` + // Specifies the TTL to apply. + TTL int `mapstructure:"ttl" json:"ttl"` + // Specifies a collection of rules that determine if this TTL should be applied to an asset. + Rules []TTLRule `mapstructure:"rules" json:"rules,omitempty"` +} + +func (c CacheRule) toPatchValue() interface{} { + r := make(map[string]interface{}) + r["name"] = c.Name + r["ttl"] = c.TTL + r["rules"] = make([]map[string]interface{}, len(c.Rules)) + for index, rule := range c.Rules { + submap := r["rules"].([]map[string]interface{})[index] + submap["name"] = rule.Name + submap["request_url"] = rule.RequestURL + } + return r +} + +func (c CacheRule) appropriatePath() Path { + return PathCaching +} + +func (c CacheRule) renderRootOr(render func(p Path) string) string { + return render(c.appropriatePath()) +} + +// CacheRuleList provides a useful way to perform bulk operations in a single Patch. +type CacheRuleList []CacheRule + +func (list CacheRuleList) toPatchValue() interface{} { + r := make([]interface{}, len(list)) + for i, rule := range list { + r[i] = rule.toPatchValue() + } + return r +} + +func (list CacheRuleList) appropriatePath() Path { + return PathCaching +} + +func (list CacheRuleList) renderRootOr(_ func(p Path) string) string { + return list.appropriatePath().renderRoot() +} + +// RestrictionRule specifies a rule that determines if this restriction should be applied to an asset. +type RestrictionRule struct { + // Specifies the name of this rule. + Name string `mapstructure:"name" json:"name"` + // Specifies the http host that requests must come from. + Referrer string `mapstructure:"referrer" json:"referrer,omitempty"` +} + +// Restriction specifies a restriction that defines who can access assets (content from the CDN cache). +type Restriction struct { + // Specifies the name of this restriction. + Name string `mapstructure:"name" json:"name"` + // Specifies a collection of rules that determine if this TTL should be applied to an asset. + Rules []RestrictionRule `mapstructure:"rules" json:"rules"` +} + +// Error specifies an error that occurred during the previous service action. +type Error struct { + // Specifies an error message detailing why there is an error. + Message string `mapstructure:"message"` +} + +// Service represents a CDN service resource. +type Service struct { + // Specifies the service ID that represents distributed content. The value is + // a UUID, such as 96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0, that is generated by the server. + ID string `mapstructure:"id"` + // Specifies the name of the service. + Name string `mapstructure:"name"` + // Specifies a list of domains used by users to access their website. + Domains []Domain `mapstructure:"domains"` + // Specifies a list of origin domains or IP addresses where the original assets are stored. + Origins []Origin `mapstructure:"origins"` + // Specifies the TTL rules for the assets under this service. Supports wildcards for fine grained control. + Caching []CacheRule `mapstructure:"caching"` + // Specifies the restrictions that define who can access assets (content from the CDN cache). + Restrictions []Restriction `mapstructure:"restrictions" json:"restrictions,omitempty"` + // Specifies the CDN provider flavor ID to use. For a list of flavors, see the operation to list the available flavors. + FlavorID string `mapstructure:"flavor_id"` + // Specifies the current status of the service. + Status string `mapstructure:"status"` + // Specifies the list of errors that occurred during the previous service action. + Errors []Error `mapstructure:"errors"` + // Specifies the self-navigating JSON document paths. + Links []gophercloud.Link `mapstructure:"links"` +} + +// ServicePage is the page returned by a pager when traversing over a +// collection of CDN services. +type ServicePage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no services. +func (r ServicePage) IsEmpty() (bool, error) { + services, err := ExtractServices(r) + if err != nil { + return true, err + } + return len(services) == 0, nil +} + +// LastMarker returns the last service in a ListResult. +func (r ServicePage) LastMarker() (string, error) { + services, err := ExtractServices(r) + if err != nil { + return "", err + } + if len(services) == 0 { + return "", nil + } + return (services[len(services)-1]).ID, nil +} + +// ExtractServices is a function that takes a ListResult and returns the services' information. +func ExtractServices(page pagination.Page) ([]Service, error) { + var response struct { + Services []Service `mapstructure:"services"` + } + + err := mapstructure.Decode(page.(ServicePage).Body, &response) + return response.Services, err +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + gophercloud.Result +} + +// Extract is a method that extracts the location of a newly created service. +func (r CreateResult) Extract() (string, error) { + if r.Err != nil { + return "", r.Err + } + if l, ok := r.Header["Location"]; ok && len(l) > 0 { + return l[0], nil + } + return "", nil +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a service from a GetResult. +func (r GetResult) Extract() (*Service, error) { + if r.Err != nil { + return nil, r.Err + } + + var res Service + + err := mapstructure.Decode(r.Body, &res) + + return &res, err +} + +// UpdateResult represents the result of a Update operation. +type UpdateResult struct { + gophercloud.Result +} + +// Extract is a method that extracts the location of an updated service. +func (r UpdateResult) Extract() (string, error) { + if r.Err != nil { + return "", r.Err + } + if l, ok := r.Header["Location"]; ok && len(l) > 0 { + return l[0], nil + } + return "", nil +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/urls.go new file mode 100644 index 000000000..d953d4c19 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/cdn/v1/services/urls.go @@ -0,0 +1,23 @@ +package services + +import "github.com/rackspace/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("services") +} + +func createURL(c *gophercloud.ServiceClient) string { + return listURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("services", id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/client.go b/vendor/github.com/rackspace/gophercloud/openstack/client.go new file mode 100644 index 000000000..33602a696 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/client.go @@ -0,0 +1,274 @@ +package openstack + +import ( + "fmt" + "net/url" + + "github.com/rackspace/gophercloud" + tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" + "github.com/rackspace/gophercloud/openstack/utils" +) + +const ( + v20 = "v2.0" + v30 = "v3.0" +) + +// NewClient prepares an unauthenticated ProviderClient instance. +// Most users will probably prefer using the AuthenticatedClient function instead. +// This is useful if you wish to explicitly control the version of the identity service that's used for authentication explicitly, +// for example. +func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + hadPath := u.Path != "" + u.Path, u.RawQuery, u.Fragment = "", "", "" + base := u.String() + + endpoint = gophercloud.NormalizeURL(endpoint) + base = gophercloud.NormalizeURL(base) + + if hadPath { + return &gophercloud.ProviderClient{ + IdentityBase: base, + IdentityEndpoint: endpoint, + }, nil + } + + return &gophercloud.ProviderClient{ + IdentityBase: base, + IdentityEndpoint: "", + }, nil +} + +// AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint specified by options, acquires a token, and +// returns a Client instance that's ready to operate. +// It first queries the root identity endpoint to determine which versions of the identity service are supported, then chooses +// the most recent identity service available to proceed. +func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + client, err := NewClient(options.IdentityEndpoint) + if err != nil { + return nil, err + } + + err = Authenticate(client, options) + if err != nil { + return nil, err + } + return client, nil +} + +// Authenticate or re-authenticate against the most recent identity service supported at the provided endpoint. +func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + versions := []*utils.Version{ + &utils.Version{ID: v20, Priority: 20, Suffix: "/v2.0/"}, + &utils.Version{ID: v30, Priority: 30, Suffix: "/v3/"}, + } + + chosen, endpoint, err := utils.ChooseVersion(client, versions) + if err != nil { + return err + } + + switch chosen.ID { + case v20: + return v2auth(client, endpoint, options) + case v30: + return v3auth(client, endpoint, options) + default: + // The switch statement must be out of date from the versions list. + return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + } +} + +// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. +func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + return v2auth(client, "", options) +} + +func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error { + v2Client := NewIdentityV2(client) + if endpoint != "" { + v2Client.Endpoint = endpoint + } + + result := tokens2.Create(v2Client, tokens2.AuthOptions{AuthOptions: options}) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if options.AllowReauth { + client.ReauthFunc = func() error { + client.TokenID = "" + return AuthenticateV2(client, options) + } + } + client.TokenID = token.ID + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V2EndpointURL(catalog, opts) + } + + return nil +} + +// AuthenticateV3 explicitly authenticates against the identity v3 service. +func AuthenticateV3(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + return v3auth(client, "", options) +} + +func v3auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error { + // Override the generated service endpoint with the one returned by the version endpoint. + v3Client := NewIdentityV3(client) + if endpoint != "" { + v3Client.Endpoint = endpoint + } + + var scope *tokens3.Scope + if options.TenantID != "" { + scope = &tokens3.Scope{ + ProjectID: options.TenantID, + } + options.TenantID = "" + options.TenantName = "" + } else { + if options.TenantName != "" { + scope = &tokens3.Scope{ + ProjectName: options.TenantName, + DomainID: options.DomainID, + DomainName: options.DomainName, + } + options.TenantName = "" + } + } + + result := tokens3.Create(v3Client, options, scope) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + client.TokenID = token.ID + + if options.AllowReauth { + client.ReauthFunc = func() error { + client.TokenID = "" + return AuthenticateV3(client, options) + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V3EndpointURL(catalog, opts) + } + + return nil +} + +// NewIdentityV2 creates a ServiceClient that may be used to interact with the v2 identity service. +func NewIdentityV2(client *gophercloud.ProviderClient) *gophercloud.ServiceClient { + v2Endpoint := client.IdentityBase + "v2.0/" + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: v2Endpoint, + } +} + +// NewIdentityV3 creates a ServiceClient that may be used to access the v3 identity service. +func NewIdentityV3(client *gophercloud.ProviderClient) *gophercloud.ServiceClient { + v3Endpoint := client.IdentityBase + "v3/" + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: v3Endpoint, + } +} + +// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 object storage package. +func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("object-store") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewComputeV2 creates a ServiceClient that may be used with the v2 compute package. +func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("compute") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewNetworkV2 creates a ServiceClient that may be used with the v2 network package. +func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("network") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: url, + ResourceBase: url + "v2.0/", + }, nil +} + +// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 block storage service. +func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("volume") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 +// CDN service. +func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("cdn") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 orchestration service. +func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("orchestration") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. +func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("database") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/client_test.go b/vendor/github.com/rackspace/gophercloud/openstack/client_test.go new file mode 100644 index 000000000..257260c4e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/client_test.go @@ -0,0 +1,161 @@ +package openstack + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAuthenticatedClientV3(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + const ID = "0123456789" + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "stable", + "id": "v3.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + } + ] + } + } + `, th.Endpoint()+"v3/", th.Endpoint()+"v2.0/") + }) + + th.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Subject-Token", ID) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ "token": { "expires_at": "2013-02-02T18:30:59.000000Z" } }`) + }) + + options := gophercloud.AuthOptions{ + UserID: "me", + Password: "secret", + IdentityEndpoint: th.Endpoint(), + } + client, err := AuthenticatedClient(options) + th.AssertNoErr(t, err) + th.CheckEquals(t, ID, client.TokenID) +} + +func TestAuthenticatedClientV2(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "experimental", + "id": "v3.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + } + ] + } + } + `, th.Endpoint()+"v3/", th.Endpoint()+"v2.0/") + }) + + th.Mux.HandleFunc("/v2.0/tokens", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "access": { + "token": { + "id": "01234567890", + "expires": "2014-10-01T10:00:00.000000Z" + }, + "serviceCatalog": [ + { + "name": "Cloud Servers", + "type": "compute", + "endpoints": [ + { + "tenantId": "t1000", + "publicURL": "https://compute.north.host.com/v1/t1000", + "internalURL": "https://compute.north.internal/v1/t1000", + "region": "North", + "versionId": "1", + "versionInfo": "https://compute.north.host.com/v1/", + "versionList": "https://compute.north.host.com/" + }, + { + "tenantId": "t1000", + "publicURL": "https://compute.north.host.com/v1.1/t1000", + "internalURL": "https://compute.north.internal/v1.1/t1000", + "region": "North", + "versionId": "1.1", + "versionInfo": "https://compute.north.host.com/v1.1/", + "versionList": "https://compute.north.host.com/" + } + ], + "endpoints_links": [] + }, + { + "name": "Cloud Files", + "type": "object-store", + "endpoints": [ + { + "tenantId": "t1000", + "publicURL": "https://storage.north.host.com/v1/t1000", + "internalURL": "https://storage.north.internal/v1/t1000", + "region": "North", + "versionId": "1", + "versionInfo": "https://storage.north.host.com/v1/", + "versionList": "https://storage.north.host.com/" + }, + { + "tenantId": "t1000", + "publicURL": "https://storage.south.host.com/v1/t1000", + "internalURL": "https://storage.south.internal/v1/t1000", + "region": "South", + "versionId": "1", + "versionInfo": "https://storage.south.host.com/v1/", + "versionList": "https://storage.south.host.com/" + } + ] + } + ] + } + } + `) + }) + + options := gophercloud.AuthOptions{ + Username: "me", + Password: "secret", + IdentityEndpoint: th.Endpoint(), + } + client, err := AuthenticatedClient(options) + th.AssertNoErr(t, err) + th.CheckEquals(t, "01234567890", client.TokenID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/README.md b/vendor/github.com/rackspace/gophercloud/openstack/common/README.md new file mode 100644 index 000000000..7b55795d0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/README.md @@ -0,0 +1,3 @@ +# Common Resources + +This directory is for resources that are shared by multiple services. diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go new file mode 100644 index 000000000..4a168f4b2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go @@ -0,0 +1,15 @@ +// Package extensions provides information and interaction with the different extensions available +// for an OpenStack service. +// +// The purpose of OpenStack API extensions is to: +// +// - Introduce new features in the API without requiring a version change. +// - Introduce vendor-specific niche functionality. +// - Act as a proving ground for experimental functionalities that might be included in a future +// version of the API. +// +// Extensions usually have tags that prevent conflicts with other extensions that define attributes +// or resources with the same names, and with core resources and attributes. +// Because an extension might not be supported by all plug-ins, its availability varies with deployments +// and the specific plug-in. +package extensions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go new file mode 100644 index 000000000..aeec0fa75 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go @@ -0,0 +1 @@ +package extensions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go new file mode 100644 index 000000000..0ed7de9f1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go @@ -0,0 +1,91 @@ +// +build fixtures + +package extensions + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Extension results. +const ListOutput = ` +{ + "extensions": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] +}` + +// GetOutput provides a single Extension result. +const GetOutput = ` +{ + "extension": { + "updated": "2013-02-03T10:00:00-00:00", + "name": "agent", + "links": [], + "namespace": "http://docs.openstack.org/ext/agent/api/v2.0", + "alias": "agent", + "description": "The agent management extension." + } +} +` + +// ListedExtension is the Extension that should be parsed from ListOutput. +var ListedExtension = Extension{ + Updated: "2013-01-20T00:00:00-00:00", + Name: "Neutron Service Type Management", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + Alias: "service-type", + Description: "API for retrieving service providers for Neutron advanced services", +} + +// ExpectedExtensions is a slice containing the Extension that should be parsed from ListOutput. +var ExpectedExtensions = []Extension{ListedExtension} + +// SingleExtension is the Extension that should be parsed from GetOutput. +var SingleExtension = &Extension{ + Updated: "2013-02-03T10:00:00-00:00", + Name: "agent", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/agent/api/v2.0", + Alias: "agent", + Description: "The agent management extension.", +} + +// HandleListExtensionsSuccessfully creates an HTTP handler at `/extensions` on the test handler +// mux that response with a list containing a single tenant. +func HandleListExtensionsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetExtensionSuccessfully creates an HTTP handler at `/extensions/agent` that responds with +// a JSON payload corresponding to SingleExtension. +func HandleGetExtensionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions/agent", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go new file mode 100644 index 000000000..0b7108501 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go @@ -0,0 +1,21 @@ +package extensions + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) GetResult { + var res GetResult + _, res.Err = c.Get(ExtensionURL(c, alias), &res.Body, nil) + return res +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, ListExtensionURL(c), func(r pagination.PageResult) pagination.Page { + return ExtensionPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go new file mode 100644 index 000000000..6550283df --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go @@ -0,0 +1,38 @@ +package extensions + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListExtensionsSuccessfully(t) + + count := 0 + + List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractExtensions(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedExtensions, actual) + + return true, nil + }) + + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetExtensionSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SingleExtension, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/results.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/results.go new file mode 100644 index 000000000..777d083fa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/results.go @@ -0,0 +1,65 @@ +package extensions + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// GetResult temporarily stores the result of a Get call. +// Use its Extract() method to interpret it as an Extension. +type GetResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult as an Extension. +func (r GetResult) Extract() (*Extension, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Extension *Extension `json:"extension"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Extension, err +} + +// Extension is a struct that represents an OpenStack extension. +type Extension struct { + Updated string `json:"updated" mapstructure:"updated"` + Name string `json:"name" mapstructure:"name"` + Links []interface{} `json:"links" mapstructure:"links"` + Namespace string `json:"namespace" mapstructure:"namespace"` + Alias string `json:"alias" mapstructure:"alias"` + Description string `json:"description" mapstructure:"description"` +} + +// ExtensionPage is the page returned by a pager when traversing over a collection of extensions. +type ExtensionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an ExtensionPage struct is empty. +func (r ExtensionPage) IsEmpty() (bool, error) { + is, err := ExtractExtensions(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the +// elements into a slice of Extension structs. +// In other words, a generic collection is mapped into a relevant slice. +func ExtractExtensions(page pagination.Page) ([]Extension, error) { + var resp struct { + Extensions []Extension `mapstructure:"extensions"` + } + + err := mapstructure.Decode(page.(ExtensionPage).Body, &resp) + + return resp.Extensions, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go new file mode 100644 index 000000000..6460c66bc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go @@ -0,0 +1,13 @@ +package extensions + +import "github.com/rackspace/gophercloud" + +// ExtensionURL generates the URL for an extension resource by name. +func ExtensionURL(c *gophercloud.ServiceClient, name string) string { + return c.ServiceURL("extensions", name) +} + +// ListExtensionURL generates the URL for the extensions resource collection. +func ListExtensionURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("extensions") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go new file mode 100644 index 000000000..3223b1ca8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go @@ -0,0 +1,26 @@ +package extensions + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestExtensionURL(t *testing.T) { + actual := ExtensionURL(endpointClient(), "agent") + expected := endpoint + "extensions/agent" + th.AssertEquals(t, expected, actual) +} + +func TestListExtensionURL(t *testing.T) { + actual := ListExtensionURL(endpointClient()) + expected := endpoint + "extensions" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go new file mode 100644 index 000000000..c8edee0e8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go @@ -0,0 +1,111 @@ +package bootfromvolume + +import ( + "errors" + "strconv" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// SourceType represents the type of medium being used to create the volume. +type SourceType string + +const ( + Volume SourceType = "volume" + Snapshot SourceType = "snapshot" + Image SourceType = "image" +) + +// BlockDevice is a structure with options for booting a server instance +// from a volume. The volume may be created from an image, snapshot, or another +// volume. +type BlockDevice struct { + // BootIndex [optional] is the boot index. It defaults to 0. + BootIndex int `json:"boot_index"` + + // DeleteOnTermination [optional] specifies whether or not to delete the attached volume + // when the server is deleted. Defaults to `false`. + DeleteOnTermination bool `json:"delete_on_termination"` + + // DestinationType [optional] is the type that gets created. Possible values are "volume" + // and "local". + DestinationType string `json:"destination_type"` + + // SourceType [required] must be one of: "volume", "snapshot", "image". + SourceType SourceType `json:"source_type"` + + // UUID [required] is the unique identifier for the volume, snapshot, or image (see above) + UUID string `json:"uuid"` + + // VolumeSize [optional] is the size of the volume to create (in gigabytes). + VolumeSize int `json:"volume_size"` +} + +// CreateOptsExt is a structure that extends the server `CreateOpts` structure +// by allowing for a block device mapping. +type CreateOptsExt struct { + servers.CreateOptsBuilder + BlockDevice []BlockDevice `json:"block_device_mapping_v2,omitempty"` +} + +// ToServerCreateMap adds the block device mapping option to the base server +// creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if len(opts.BlockDevice) == 0 { + return nil, errors.New("Required fields UUID and SourceType not set.") + } + + serverMap := base["server"].(map[string]interface{}) + + blockDevice := make([]map[string]interface{}, len(opts.BlockDevice)) + + for i, bd := range opts.BlockDevice { + if string(bd.SourceType) == "" { + return nil, errors.New("SourceType must be one of: volume, image, snapshot.") + } + + blockDevice[i] = make(map[string]interface{}) + + blockDevice[i]["source_type"] = bd.SourceType + blockDevice[i]["boot_index"] = strconv.Itoa(bd.BootIndex) + blockDevice[i]["delete_on_termination"] = strconv.FormatBool(bd.DeleteOnTermination) + blockDevice[i]["volume_size"] = strconv.Itoa(bd.VolumeSize) + if bd.UUID != "" { + blockDevice[i]["uuid"] = bd.UUID + } + if bd.DestinationType != "" { + blockDevice[i]["destination_type"] = bd.DestinationType + } + + } + serverMap["block_device_mapping_v2"] = blockDevice + + return base, nil +} + +// Create requests the creation of a server from the given block device mapping. +func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) servers.CreateResult { + var res servers.CreateResult + + reqBody, err := opts.ToServerCreateMap() + if err != nil { + res.Err = err + return res + } + + // Delete imageName and flavorName that come from ToServerCreateMap(). + // As of Liberty, Boot From Volume is failing if they are passed. + delete(reqBody["server"].(map[string]interface{}), "imageName") + delete(reqBody["server"].(map[string]interface{}), "flavorName") + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go new file mode 100644 index 000000000..8a7fa7467 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go @@ -0,0 +1,53 @@ +package bootfromvolume + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCreateOpts(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + ext := CreateOptsExt{ + CreateOptsBuilder: base, + BlockDevice: []BlockDevice{ + BlockDevice{ + UUID: "123456", + SourceType: Image, + DestinationType: "volume", + VolumeSize: 10, + }, + }, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "flavorName": "", + "imageName": "", + "block_device_mapping_v2":[ + { + "uuid":"123456", + "source_type":"image", + "destination_type":"volume", + "boot_index": "0", + "delete_on_termination": "false", + "volume_size": "10" + } + ] + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go new file mode 100644 index 000000000..f60329f0f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go @@ -0,0 +1,10 @@ +package bootfromvolume + +import ( + os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// CreateResult temporarily contains the response from a Create call. +type CreateResult struct { + os.CreateResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go new file mode 100644 index 000000000..0cffe25ff --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go @@ -0,0 +1,7 @@ +package bootfromvolume + +import "github.com/rackspace/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-volumes_boot") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go new file mode 100644 index 000000000..6ee647732 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go @@ -0,0 +1,16 @@ +package bootfromvolume + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-volumes_boot", createURL(c)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/doc.go new file mode 100644 index 000000000..2571a1a5a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/doc.go @@ -0,0 +1 @@ +package defsecrules diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/fixtures.go new file mode 100644 index 000000000..c28e492d3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/fixtures.go @@ -0,0 +1,108 @@ +package defsecrules + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +const rootPath = "/os-security-group-default-rules" + +func mockListRulesResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_default_rules": [ + { + "from_port": 80, + "id": "{ruleID}", + "ip_protocol": "TCP", + "ip_range": { + "cidr": "10.10.10.0/24" + }, + "to_port": 80 + } + ] +} + `) + }) +} + +func mockCreateRuleResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group_default_rule": { + "ip_protocol": "TCP", + "from_port": 80, + "to_port": 80, + "cidr": "10.10.12.0/24" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_default_rule": { + "from_port": 80, + "id": "{ruleID}", + "ip_protocol": "TCP", + "ip_range": { + "cidr": "10.10.12.0/24" + }, + "to_port": 80 + } +} +`) + }) +} + +func mockGetRuleResponse(t *testing.T, ruleID string) { + url := rootPath + "/" + ruleID + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_default_rule": { + "id": "{ruleID}", + "from_port": 80, + "to_port": 80, + "ip_protocol": "TCP", + "ip_range": { + "cidr": "10.10.12.0/24" + } + } +} + `) + }) +} + +func mockDeleteRuleResponse(t *testing.T, ruleID string) { + url := rootPath + "/" + ruleID + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go new file mode 100644 index 000000000..9f27ef172 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go @@ -0,0 +1,95 @@ +package defsecrules + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List will return a collection of default rules. +func List(client *gophercloud.ServiceClient) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return DefaultRulePage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, rootURL(client), createPage) +} + +// CreateOpts represents the configuration for adding a new default rule. +type CreateOpts struct { + // Required - the lower bound of the port range that will be opened. + FromPort int `json:"from_port"` + + // Required - the upper bound of the port range that will be opened. + ToPort int `json:"to_port"` + + // Required - the protocol type that will be allowed, e.g. TCP. + IPProtocol string `json:"ip_protocol"` + + // ONLY required if FromGroupID is blank. This represents the IP range that + // will be the source of network traffic to your security group. Use + // 0.0.0.0/0 to allow all IP addresses. + CIDR string `json:"cidr,omitempty"` +} + +// CreateOptsBuilder builds the create rule options into a serializable format. +type CreateOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// ToRuleCreateMap builds the create rule options into a serializable format. +func (opts CreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + rule := make(map[string]interface{}) + + if opts.FromPort == 0 { + return rule, errors.New("A FromPort must be set") + } + if opts.ToPort == 0 { + return rule, errors.New("A ToPort must be set") + } + if opts.IPProtocol == "" { + return rule, errors.New("A IPProtocol must be set") + } + if opts.CIDR == "" { + return rule, errors.New("A CIDR must be set") + } + + rule["from_port"] = opts.FromPort + rule["to_port"] = opts.ToPort + rule["ip_protocol"] = opts.IPProtocol + rule["cidr"] = opts.CIDR + + return map[string]interface{}{"security_group_default_rule": rule}, nil +} + +// Create is the operation responsible for creating a new default rule. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var result CreateResult + + reqBody, err := opts.ToRuleCreateMap() + if err != nil { + result.Err = err + return result + } + + _, result.Err = client.Post(rootURL(client), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return result +} + +// Get will return details for a particular default rule. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var result GetResult + _, result.Err = client.Get(resourceURL(client, id), &result.Body, nil) + return result +} + +// Delete will permanently delete a default rule from the project. +func Delete(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { + var result gophercloud.ErrResult + _, result.Err = client.Delete(resourceURL(client, id), nil) + return result +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests_test.go new file mode 100644 index 000000000..d4ebe87c5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests_test.go @@ -0,0 +1,100 @@ +package defsecrules + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const ruleID = "{ruleID}" + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListRulesResponse(t) + + count := 0 + + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractDefaultRules(page) + th.AssertNoErr(t, err) + + expected := []DefaultRule{ + DefaultRule{ + FromPort: 80, + ID: ruleID, + IPProtocol: "TCP", + IPRange: secgroups.IPRange{CIDR: "10.10.10.0/24"}, + ToPort: 80, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateRuleResponse(t) + + opts := CreateOpts{ + IPProtocol: "TCP", + FromPort: 80, + ToPort: 80, + CIDR: "10.10.12.0/24", + } + + group, err := Create(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &DefaultRule{ + ID: ruleID, + FromPort: 80, + ToPort: 80, + IPProtocol: "TCP", + IPRange: secgroups.IPRange{CIDR: "10.10.12.0/24"}, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetRuleResponse(t, ruleID) + + group, err := Get(client.ServiceClient(), ruleID).Extract() + th.AssertNoErr(t, err) + + expected := &DefaultRule{ + ID: ruleID, + FromPort: 80, + ToPort: 80, + IPProtocol: "TCP", + IPRange: secgroups.IPRange{CIDR: "10.10.12.0/24"}, + } + + th.AssertDeepEquals(t, expected, group) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteRuleResponse(t, ruleID) + + err := Delete(client.ServiceClient(), ruleID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/results.go new file mode 100644 index 000000000..e588d3e32 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/results.go @@ -0,0 +1,69 @@ +package defsecrules + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/rackspace/gophercloud/pagination" +) + +// DefaultRule represents a default rule - which is identical to a +// normal security rule. +type DefaultRule secgroups.Rule + +// DefaultRulePage is a single page of a DefaultRule collection. +type DefaultRulePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of default rules contains any results. +func (page DefaultRulePage) IsEmpty() (bool, error) { + users, err := ExtractDefaultRules(page) + if err != nil { + return false, err + } + return len(users) == 0, nil +} + +// ExtractDefaultRules returns a slice of DefaultRules contained in a single +// page of results. +func ExtractDefaultRules(page pagination.Page) ([]DefaultRule, error) { + casted := page.(DefaultRulePage).Body + var response struct { + Rules []DefaultRule `mapstructure:"security_group_default_rules"` + } + + err := mapstructure.WeakDecode(casted, &response) + + return response.Rules, err +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// Extract will extract a DefaultRule struct from most responses. +func (r commonResult) Extract() (*DefaultRule, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Rule DefaultRule `mapstructure:"security_group_default_rule"` + } + + err := mapstructure.WeakDecode(r.Body, &response) + + return &response.Rule, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/urls.go new file mode 100644 index 000000000..cc928ab89 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/urls.go @@ -0,0 +1,13 @@ +package defsecrules + +import "github.com/rackspace/gophercloud" + +const rulepath = "os-security-group-default-rules" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rulepath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rulepath) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go new file mode 100644 index 000000000..10079097b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go @@ -0,0 +1,23 @@ +package extensions + +import ( + "github.com/rackspace/gophercloud" + common "github.com/rackspace/gophercloud/openstack/common/extensions" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtractExtensions interprets a Page as a slice of Extensions. +func ExtractExtensions(page pagination.Page) ([]common.Extension, error) { + return common.ExtractExtensions(page) +} + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) common.GetResult { + return common.Get(c, alias) +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return common.List(c) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go new file mode 100644 index 000000000..c3c525fa2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go @@ -0,0 +1,96 @@ +package extensions + +import ( + "fmt" + "net/http" + "testing" + + common "github.com/rackspace/gophercloud/openstack/common/extensions" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ` +{ + "extensions": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] +} + `) + }) + + count := 0 + List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractExtensions(page) + th.AssertNoErr(t, err) + + expected := []common.Extension{ + common.Extension{ + Updated: "2013-01-20T00:00:00-00:00", + Name: "Neutron Service Type Management", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + Alias: "service-type", + Description: "API for retrieving service providers for Neutron advanced services", + }, + } + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/extensions/agent", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "extension": { + "updated": "2013-02-03T10:00:00-00:00", + "name": "agent", + "links": [], + "namespace": "http://docs.openstack.org/ext/agent/api/v2.0", + "alias": "agent", + "description": "The agent management extension." + } +} + `) + }) + + ext, err := Get(client.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, ext.Updated, "2013-02-03T10:00:00-00:00") + th.AssertEquals(t, ext.Name, "agent") + th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/agent/api/v2.0") + th.AssertEquals(t, ext.Alias, "agent") + th.AssertEquals(t, ext.Description, "The agent management extension.") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go new file mode 100644 index 000000000..80785faca --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go @@ -0,0 +1,3 @@ +// Package diskconfig provides information and interaction with the Disk +// Config extension that works with the OpenStack Compute service. +package diskconfig diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go new file mode 100644 index 000000000..7407e0d17 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go @@ -0,0 +1,114 @@ +package diskconfig + +import ( + "errors" + + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// DiskConfig represents one of the two possible settings for the DiskConfig option when creating, +// rebuilding, or resizing servers: Auto or Manual. +type DiskConfig string + +const ( + // Auto builds a server with a single partition the size of the target flavor disk and + // automatically adjusts the filesystem to fit the entire partition. Auto may only be used with + // images and servers that use a single EXT3 partition. + Auto DiskConfig = "AUTO" + + // Manual builds a server using whatever partition scheme and filesystem are present in the source + // image. If the target flavor disk is larger, the remaining space is left unpartitioned. This + // enables images to have non-EXT3 filesystems, multiple partitions, and so on, and enables you + // to manage the disk configuration. It also results in slightly shorter boot times. + Manual DiskConfig = "MANUAL" +) + +// ErrInvalidDiskConfig is returned if an invalid string is specified for a DiskConfig option. +var ErrInvalidDiskConfig = errors.New("DiskConfig must be either diskconfig.Auto or diskconfig.Manual.") + +// Validate ensures that a DiskConfig contains an appropriate value. +func (config DiskConfig) validate() error { + switch config { + case Auto, Manual: + return nil + default: + return ErrInvalidDiskConfig + } +} + +// CreateOptsExt adds a DiskConfig option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // DiskConfig [optional] controls how the created server's disk is partitioned. + DiskConfig DiskConfig `json:"OS-DCF:diskConfig,omitempty"` +} + +// ToServerCreateMap adds the diskconfig option to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if string(opts.DiskConfig) == "" { + return base, nil + } + + serverMap := base["server"].(map[string]interface{}) + serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) + + return base, nil +} + +// RebuildOptsExt adds a DiskConfig option to the base RebuildOpts. +type RebuildOptsExt struct { + servers.RebuildOptsBuilder + + // DiskConfig [optional] controls how the rebuilt server's disk is partitioned. + DiskConfig DiskConfig +} + +// ToServerRebuildMap adds the diskconfig option to the base server rebuild options. +func (opts RebuildOptsExt) ToServerRebuildMap() (map[string]interface{}, error) { + err := opts.DiskConfig.validate() + if err != nil { + return nil, err + } + + base, err := opts.RebuildOptsBuilder.ToServerRebuildMap() + if err != nil { + return nil, err + } + + serverMap := base["rebuild"].(map[string]interface{}) + serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) + + return base, nil +} + +// ResizeOptsExt adds a DiskConfig option to the base server resize options. +type ResizeOptsExt struct { + servers.ResizeOptsBuilder + + // DiskConfig [optional] controls how the resized server's disk is partitioned. + DiskConfig DiskConfig +} + +// ToServerResizeMap adds the diskconfig option to the base server creation options. +func (opts ResizeOptsExt) ToServerResizeMap() (map[string]interface{}, error) { + err := opts.DiskConfig.validate() + if err != nil { + return nil, err + } + + base, err := opts.ResizeOptsBuilder.ToServerResizeMap() + if err != nil { + return nil, err + } + + serverMap := base["resize"].(map[string]interface{}) + serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) + + return base, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go new file mode 100644 index 000000000..17418a3ce --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go @@ -0,0 +1,89 @@ +package diskconfig + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCreateOpts(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + ext := CreateOptsExt{ + CreateOptsBuilder: base, + DiskConfig: Manual, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "flavorName": "", + "imageName": "", + "OS-DCF:diskConfig": "MANUAL" + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} + +func TestRebuildOpts(t *testing.T) { + base := servers.RebuildOpts{ + Name: "rebuiltserver", + AdminPass: "swordfish", + ImageID: "asdfasdfasdf", + } + + ext := RebuildOptsExt{ + RebuildOptsBuilder: base, + DiskConfig: Auto, + } + + actual, err := ext.ToServerRebuildMap() + th.AssertNoErr(t, err) + + expected := ` + { + "rebuild": { + "name": "rebuiltserver", + "imageRef": "asdfasdfasdf", + "adminPass": "swordfish", + "OS-DCF:diskConfig": "AUTO" + } + } + ` + th.CheckJSONEquals(t, expected, actual) +} + +func TestResizeOpts(t *testing.T) { + base := servers.ResizeOpts{ + FlavorRef: "performance1-8", + } + + ext := ResizeOptsExt{ + ResizeOptsBuilder: base, + DiskConfig: Auto, + } + + actual, err := ext.ToServerResizeMap() + th.AssertNoErr(t, err) + + expected := ` + { + "resize": { + "flavorRef": "performance1-8", + "OS-DCF:diskConfig": "AUTO" + } + } + ` + th.CheckJSONEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go new file mode 100644 index 000000000..10ec2dafc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go @@ -0,0 +1,60 @@ +package diskconfig + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" +) + +func commonExtract(result gophercloud.Result) (*DiskConfig, error) { + var resp struct { + Server struct { + DiskConfig string `mapstructure:"OS-DCF:diskConfig"` + } `mapstructure:"server"` + } + + err := mapstructure.Decode(result.Body, &resp) + if err != nil { + return nil, err + } + + config := DiskConfig(resp.Server.DiskConfig) + return &config, nil +} + +// ExtractGet returns the disk configuration from a servers.Get call. +func ExtractGet(result servers.GetResult) (*DiskConfig, error) { + return commonExtract(result.Result) +} + +// ExtractUpdate returns the disk configuration from a servers.Update call. +func ExtractUpdate(result servers.UpdateResult) (*DiskConfig, error) { + return commonExtract(result.Result) +} + +// ExtractRebuild returns the disk configuration from a servers.Rebuild call. +func ExtractRebuild(result servers.RebuildResult) (*DiskConfig, error) { + return commonExtract(result.Result) +} + +// ExtractDiskConfig returns the DiskConfig setting for a specific server acquired from an +// servers.ExtractServers call, while iterating through a Pager. +func ExtractDiskConfig(page pagination.Page, index int) (*DiskConfig, error) { + casted := page.(servers.ServerPage).Body + + type server struct { + DiskConfig string `mapstructure:"OS-DCF:diskConfig"` + } + var response struct { + Servers []server `mapstructure:"servers"` + } + + err := mapstructure.Decode(casted, &response) + if err != nil { + return nil, err + } + + config := DiskConfig(response.Servers[index].DiskConfig) + return &config, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go new file mode 100644 index 000000000..dd8d2b7df --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go @@ -0,0 +1,68 @@ +package diskconfig + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestExtractGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + servers.HandleServerGetSuccessfully(t) + + config, err := ExtractGet(servers.Get(client.ServiceClient(), "1234asdf")) + th.AssertNoErr(t, err) + th.CheckEquals(t, Manual, *config) +} + +func TestExtractUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + servers.HandleServerUpdateSuccessfully(t) + + r := servers.Update(client.ServiceClient(), "1234asdf", servers.UpdateOpts{ + Name: "new-name", + }) + config, err := ExtractUpdate(r) + th.AssertNoErr(t, err) + th.CheckEquals(t, Manual, *config) +} + +func TestExtractRebuild(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + servers.HandleRebuildSuccessfully(t, servers.SingleServerBody) + + r := servers.Rebuild(client.ServiceClient(), "1234asdf", servers.RebuildOpts{ + Name: "new-name", + AdminPass: "swordfish", + ImageID: "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + AccessIPv4: "1.2.3.4", + }) + config, err := ExtractRebuild(r) + th.AssertNoErr(t, err) + th.CheckEquals(t, Manual, *config) +} + +func TestExtractList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + servers.HandleServerListSuccessfully(t) + + pages := 0 + err := servers.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + config, err := ExtractDiskConfig(page, 0) + th.AssertNoErr(t, err) + th.CheckEquals(t, Manual, *config) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, pages, 1) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go new file mode 100644 index 000000000..2b447da1d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go @@ -0,0 +1,3 @@ +// Package extensions provides information and interaction with the +// different extensions available for the OpenStack Compute service. +package extensions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/doc.go new file mode 100644 index 000000000..f74f58ce8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/doc.go @@ -0,0 +1,3 @@ +// Package floatingip provides the ability to manage floating ips through +// nova-network +package floatingip diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/fixtures.go new file mode 100644 index 000000000..e47fa4ccd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/fixtures.go @@ -0,0 +1,193 @@ +// +build fixtures + +package floatingip + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "floating_ips": [ + { + "fixed_ip": null, + "id": 1, + "instance_id": null, + "ip": "10.10.10.1", + "pool": "nova" + }, + { + "fixed_ip": "166.78.185.201", + "id": 2, + "instance_id": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "ip": "10.10.10.2", + "pool": "nova" + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "floating_ip": { + "fixed_ip": "166.78.185.201", + "id": 2, + "instance_id": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "ip": "10.10.10.2", + "pool": "nova" + } +} +` + +// CreateOutput is a sample response to a Post call +const CreateOutput = ` +{ + "floating_ip": { + "fixed_ip": null, + "id": 1, + "instance_id": null, + "ip": "10.10.10.1", + "pool": "nova" + } +} +` + +// FirstFloatingIP is the first result in ListOutput. +var FirstFloatingIP = FloatingIP{ + ID: "1", + IP: "10.10.10.1", + Pool: "nova", +} + +// SecondFloatingIP is the first result in ListOutput. +var SecondFloatingIP = FloatingIP{ + FixedIP: "166.78.185.201", + ID: "2", + InstanceID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + IP: "10.10.10.2", + Pool: "nova", +} + +// ExpectedFloatingIPsSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedFloatingIPsSlice = []FloatingIP{FirstFloatingIP, SecondFloatingIP} + +// CreatedFloatingIP is the parsed result from CreateOutput. +var CreatedFloatingIP = FloatingIP{ + ID: "1", + IP: "10.10.10.1", + Pool: "nova", +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing floating ip +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips/2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request +// for a new floating ip +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "pool": "nova" +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// an existing floating ip +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips/1", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleAssociateSuccessfully configures the test server to respond to a Post request +// to associate an allocated floating IP +func HandleAssociateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "addFloatingIp": { + "address": "10.10.10.2" + } +} +`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleFixedAssociateSucessfully configures the test server to respond to a Post request +// to associate an allocated floating IP with a specific fixed IP address +func HandleAssociateFixedSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "addFloatingIp": { + "address": "10.10.10.2", + "fixed_address": "166.78.185.201" + } +} +`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleDisassociateSuccessfully configures the test server to respond to a Post request +// to disassociate an allocated floating IP +func HandleDisassociateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "removeFloatingIp": { + "address": "10.10.10.2" + } +} +`) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go new file mode 100644 index 000000000..820646297 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go @@ -0,0 +1,171 @@ +package floatingip + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of FloatingIPs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return FloatingIPsPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notable, the +// CreateOpts struct in this package does. +type CreateOptsBuilder interface { + ToFloatingIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies a Floating IP allocation request +type CreateOpts struct { + // Pool is the pool of floating IPs to allocate one from + Pool string +} + +// AssociateOpts specifies the required information to associate or disassociate a floating IP to an instance +type AssociateOpts struct { + // ServerID is the UUID of the server + ServerID string + + // FixedIP is an optional fixed IP address of the server + FixedIP string + + // FloatingIP is the floating IP to associate with an instance + FloatingIP string +} + +// ToFloatingIPCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + if opts.Pool == "" { + return nil, errors.New("Missing field required for floating IP creation: Pool") + } + + return map[string]interface{}{"pool": opts.Pool}, nil +} + +// ToAssociateMap constructs a request body from AssociateOpts. +func (opts AssociateOpts) ToAssociateMap() (map[string]interface{}, error) { + if opts.ServerID == "" { + return nil, errors.New("Required field missing for floating IP association: ServerID") + } + + if opts.FloatingIP == "" { + return nil, errors.New("Required field missing for floating IP association: FloatingIP") + } + + associateInfo := map[string]interface{}{ + "serverId": opts.ServerID, + "floatingIp": opts.FloatingIP, + "fixedIp": opts.FixedIP, + } + + return associateInfo, nil + +} + +// Create requests the creation of a new floating IP +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToFloatingIPCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Get returns data about a previously created FloatingIP. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} + +// Delete requests the deletion of a previous allocated FloatingIP. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, id), nil) + return res +} + +// association / disassociation + +// Associate pairs an allocated floating IP with an instance +// Deprecated. Use AssociateInstance. +func Associate(client *gophercloud.ServiceClient, serverId, fip string) AssociateResult { + var res AssociateResult + + addFloatingIp := make(map[string]interface{}) + addFloatingIp["address"] = fip + reqBody := map[string]interface{}{"addFloatingIp": addFloatingIp} + + _, res.Err = client.Post(associateURL(client, serverId), reqBody, nil, nil) + return res +} + +// AssociateInstance pairs an allocated floating IP with an instance. +func AssociateInstance(client *gophercloud.ServiceClient, opts AssociateOpts) AssociateResult { + var res AssociateResult + + associateInfo, err := opts.ToAssociateMap() + if err != nil { + res.Err = err + return res + } + + addFloatingIp := make(map[string]interface{}) + addFloatingIp["address"] = associateInfo["floatingIp"].(string) + + // fixedIp is not required + if associateInfo["fixedIp"] != "" { + addFloatingIp["fixed_address"] = associateInfo["fixedIp"].(string) + } + + serverId := associateInfo["serverId"].(string) + + reqBody := map[string]interface{}{"addFloatingIp": addFloatingIp} + _, res.Err = client.Post(associateURL(client, serverId), reqBody, nil, nil) + return res +} + +// Disassociate decouples an allocated floating IP from an instance +// Deprecated. Use DisassociateInstance. +func Disassociate(client *gophercloud.ServiceClient, serverId, fip string) DisassociateResult { + var res DisassociateResult + + removeFloatingIp := make(map[string]interface{}) + removeFloatingIp["address"] = fip + reqBody := map[string]interface{}{"removeFloatingIp": removeFloatingIp} + + _, res.Err = client.Post(disassociateURL(client, serverId), reqBody, nil, nil) + return res +} + +// DisassociateInstance decouples an allocated floating IP from an instance +func DisassociateInstance(client *gophercloud.ServiceClient, opts AssociateOpts) DisassociateResult { + var res DisassociateResult + + associateInfo, err := opts.ToAssociateMap() + if err != nil { + res.Err = err + return res + } + + removeFloatingIp := make(map[string]interface{}) + removeFloatingIp["address"] = associateInfo["floatingIp"].(string) + reqBody := map[string]interface{}{"removeFloatingIp": removeFloatingIp} + + serverId := associateInfo["serverId"].(string) + + _, res.Err = client.Post(disassociateURL(client, serverId), reqBody, nil, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests_test.go new file mode 100644 index 000000000..4d86fe246 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests_test.go @@ -0,0 +1,123 @@ +package floatingip + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractFloatingIPs(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedFloatingIPsSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + actual, err := Create(client.ServiceClient(), CreateOpts{ + Pool: "nova", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedFloatingIP, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "2").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondFloatingIP, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := Delete(client.ServiceClient(), "1").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAssociateDeprecated(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAssociateSuccessfully(t) + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + fip := "10.10.10.2" + + err := Associate(client.ServiceClient(), serverId, fip).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAssociate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAssociateSuccessfully(t) + + associateOpts := AssociateOpts{ + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + FloatingIP: "10.10.10.2", + } + + err := AssociateInstance(client.ServiceClient(), associateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAssociateFixed(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAssociateFixedSuccessfully(t) + + associateOpts := AssociateOpts{ + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + FloatingIP: "10.10.10.2", + FixedIP: "166.78.185.201", + } + + err := AssociateInstance(client.ServiceClient(), associateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDisassociateDeprecated(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDisassociateSuccessfully(t) + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + fip := "10.10.10.2" + + err := Disassociate(client.ServiceClient(), serverId, fip).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDisassociateInstance(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDisassociateSuccessfully(t) + + associateOpts := AssociateOpts{ + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + FloatingIP: "10.10.10.2", + } + + err := DisassociateInstance(client.ServiceClient(), associateOpts).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/results.go new file mode 100644 index 000000000..be77fa179 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/results.go @@ -0,0 +1,99 @@ +package floatingip + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// A FloatingIP is an IP that can be associated with an instance +type FloatingIP struct { + // ID is a unique ID of the Floating IP + ID string `mapstructure:"id"` + + // FixedIP is the IP of the instance related to the Floating IP + FixedIP string `mapstructure:"fixed_ip,omitempty"` + + // InstanceID is the ID of the instance that is using the Floating IP + InstanceID string `mapstructure:"instance_id"` + + // IP is the actual Floating IP + IP string `mapstructure:"ip"` + + // Pool is the pool of floating IPs that this floating IP belongs to + Pool string `mapstructure:"pool"` +} + +// FloatingIPsPage stores a single, only page of FloatingIPs +// results from a List call. +type FloatingIPsPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a FloatingIPsPage is empty. +func (page FloatingIPsPage) IsEmpty() (bool, error) { + va, err := ExtractFloatingIPs(page) + return len(va) == 0, err +} + +// ExtractFloatingIPs interprets a page of results as a slice of +// FloatingIPs. +func ExtractFloatingIPs(page pagination.Page) ([]FloatingIP, error) { + casted := page.(FloatingIPsPage).Body + var response struct { + FloatingIPs []FloatingIP `mapstructure:"floating_ips"` + } + + err := mapstructure.WeakDecode(casted, &response) + + return response.FloatingIPs, err +} + +type FloatingIPResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any FloatingIP resource +// response as a FloatingIP struct. +func (r FloatingIPResult) Extract() (*FloatingIP, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + FloatingIP *FloatingIP `json:"floating_ip" mapstructure:"floating_ip"` + } + + err := mapstructure.WeakDecode(r.Body, &res) + return res.FloatingIP, err +} + +// CreateResult is the response from a Create operation. Call its Extract method to interpret it +// as a FloatingIP. +type CreateResult struct { + FloatingIPResult +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a FloatingIP. +type GetResult struct { + FloatingIPResult +} + +// DeleteResult is the response from a Delete operation. Call its Extract method to determine if +// the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult is the response from a Delete operation. Call its Extract method to determine if +// the call succeeded or failed. +type AssociateResult struct { + gophercloud.ErrResult +} + +// DisassociateResult is the response from a Delete operation. Call its Extract method to determine if +// the call succeeded or failed. +type DisassociateResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls.go new file mode 100644 index 000000000..54198f852 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls.go @@ -0,0 +1,37 @@ +package floatingip + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-floating-ips" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} + +func serverURL(c *gophercloud.ServiceClient, serverId string) string { + return c.ServiceURL("servers/" + serverId + "/action") +} + +func associateURL(c *gophercloud.ServiceClient, serverId string) string { + return serverURL(c, serverId) +} + +func disassociateURL(c *gophercloud.ServiceClient, serverId string) string { + return serverURL(c, serverId) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls_test.go new file mode 100644 index 000000000..f73d6fb0f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/urls_test.go @@ -0,0 +1,60 @@ +package floatingip + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-floating-ips", listURL(c)) +} + +func TestCreateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-floating-ips", createURL(c)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-floating-ips/"+id, getURL(c, id)) +} + +func TestDeleteURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-floating-ips/"+id, deleteURL(c, id)) +} + +func TestAssociateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + th.CheckEquals(t, c.Endpoint+"servers/"+serverId+"/action", associateURL(c, serverId)) +} + +func TestDisassociateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + th.CheckEquals(t, c.Endpoint+"servers/"+serverId+"/action", disassociateURL(c, serverId)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go new file mode 100644 index 000000000..856f41bac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go @@ -0,0 +1,3 @@ +// Package keypairs provides information and interaction with the Keypairs +// extension for the OpenStack Compute service. +package keypairs diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go new file mode 100644 index 000000000..d10af99d0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go @@ -0,0 +1,171 @@ +// +build fixtures + +package keypairs + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a", + "name": "firstkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n" + } + }, + { + "keypair": { + "fingerprint": "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + "name": "secondkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n" + } + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "keypair": { + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n", + "name": "firstkey", + "fingerprint": "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a" + } +} +` + +// CreateOutput is a sample response to a Create call. +const CreateOutput = ` +{ + "keypair": { + "fingerprint": "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + "name": "createdkey", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7\nDUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ\n9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5QIDAQAB\nAoGAE5XO1mDhORy9COvsg+kYPUhB1GsCYxh+v88wG7HeFDKBY6KUc/Kxo6yoGn5T\nTjRjekyi2KoDZHz4VlIzyZPwFS4I1bf3oCunVoAKzgLdmnTtvRNMC5jFOGc2vUgP\n9bSyRj3S1R4ClVk2g0IDeagko/jc8zzLEYuIK+fbkds79YECQQDt3vcevgegnkga\ntF4NsDmmBPRkcSHCqrANP/7vFcBQN3czxeYYWX3DK07alu6GhH1Y4sHbdm616uU0\nll7xbDzxAkEAzAtN2IyftNygV2EGiaGgqLyo/tD9+Vui2qCQplqe4jvWh/5Sparl\nOjmKo+uAW+hLrLVMnHzRWxbWU8hirH5FNQJATO+ZxCK4etXXAnQmG41NCAqANWB2\nB+2HJbH2NcQ2QHvAHUm741JGn/KI/aBlo7KEjFRDWUVUB5ji64BbUwCsMQJBAIku\nLGcjnBf/oLk+XSPZC2eGd2Ph5G5qYmH0Q2vkTx+wtTn3DV+eNsDfgMtWAJVJ5t61\ngU1QSXyhLPVlKpnnxuUCQC+xvvWjWtsLaFtAsZywJiqLxQzHts8XLGZptYJ5tLWV\nrtmYtBcJCN48RrgQHry/xWYeA4K/AFQpXfNPgprQ96Q=\n-----END RSA PRIVATE KEY-----\n", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n", + "user_id": "fake" + } +} +` + +// ImportOutput is a sample response to a Create call that provides its own public key. +const ImportOutput = ` +{ + "keypair": { + "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", + "name": "importedkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova", + "user_id": "fake" + } +} +` + +// FirstKeyPair is the first result in ListOutput. +var FirstKeyPair = KeyPair{ + Name: "firstkey", + Fingerprint: "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n", +} + +// SecondKeyPair is the second result in ListOutput. +var SecondKeyPair = KeyPair{ + Name: "secondkey", + Fingerprint: "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n", +} + +// ExpectedKeyPairSlice is the slice of results that should be parsed from ListOutput, in the expected +// order. +var ExpectedKeyPairSlice = []KeyPair{FirstKeyPair, SecondKeyPair} + +// CreatedKeyPair is the parsed result from CreatedOutput. +var CreatedKeyPair = KeyPair{ + Name: "createdkey", + Fingerprint: "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n", + PrivateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7\nDUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ\n9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5QIDAQAB\nAoGAE5XO1mDhORy9COvsg+kYPUhB1GsCYxh+v88wG7HeFDKBY6KUc/Kxo6yoGn5T\nTjRjekyi2KoDZHz4VlIzyZPwFS4I1bf3oCunVoAKzgLdmnTtvRNMC5jFOGc2vUgP\n9bSyRj3S1R4ClVk2g0IDeagko/jc8zzLEYuIK+fbkds79YECQQDt3vcevgegnkga\ntF4NsDmmBPRkcSHCqrANP/7vFcBQN3czxeYYWX3DK07alu6GhH1Y4sHbdm616uU0\nll7xbDzxAkEAzAtN2IyftNygV2EGiaGgqLyo/tD9+Vui2qCQplqe4jvWh/5Sparl\nOjmKo+uAW+hLrLVMnHzRWxbWU8hirH5FNQJATO+ZxCK4etXXAnQmG41NCAqANWB2\nB+2HJbH2NcQ2QHvAHUm741JGn/KI/aBlo7KEjFRDWUVUB5ji64BbUwCsMQJBAIku\nLGcjnBf/oLk+XSPZC2eGd2Ph5G5qYmH0Q2vkTx+wtTn3DV+eNsDfgMtWAJVJ5t61\ngU1QSXyhLPVlKpnnxuUCQC+xvvWjWtsLaFtAsZywJiqLxQzHts8XLGZptYJ5tLWV\nrtmYtBcJCN48RrgQHry/xWYeA4K/AFQpXfNPgprQ96Q=\n-----END RSA PRIVATE KEY-----\n", + UserID: "fake", +} + +// ImportedKeyPair is the parsed result from ImportOutput. +var ImportedKeyPair = KeyPair{ + Name: "importedkey", + Fingerprint: "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova", + UserID: "fake", +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request for "firstkey". +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs/firstkey", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request for a new +// keypair called "createdkey". +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "keypair": { "name": "createdkey" } }`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleImportSuccessfully configures the test server to respond to an Import request for an +// existing keypair called "importedkey". +func HandleImportSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "keypair": { + "name": "importedkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ImportOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// keypair called "deletedkey". +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs/deletedkey", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go new file mode 100644 index 000000000..c56ee67ea --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go @@ -0,0 +1,102 @@ +package keypairs + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsExt adds a KeyPair option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + KeyName string `json:"key_name,omitempty"` +} + +// ToServerCreateMap adds the key_name and, optionally, key_data options to +// the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if opts.KeyName == "" { + return base, nil + } + + serverMap := base["server"].(map[string]interface{}) + serverMap["key_name"] = opts.KeyName + + return base, nil +} + +// List returns a Pager that allows you to iterate over a collection of KeyPairs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return KeyPairPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notable, the +// CreateOpts struct in this package does. +type CreateOptsBuilder interface { + ToKeyPairCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies keypair creation or import parameters. +type CreateOpts struct { + // Name [required] is a friendly name to refer to this KeyPair in other services. + Name string + + // PublicKey [optional] is a pregenerated OpenSSH-formatted public key. If provided, this key + // will be imported and no new key will be created. + PublicKey string +} + +// ToKeyPairCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { + if opts.Name == "" { + return nil, errors.New("Missing field required for keypair creation: Name") + } + + keypair := make(map[string]interface{}) + keypair["name"] = opts.Name + if opts.PublicKey != "" { + keypair["public_key"] = opts.PublicKey + } + + return map[string]interface{}{"keypair": keypair}, nil +} + +// Create requests the creation of a new keypair on the server, or to import a pre-existing +// keypair. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToKeyPairCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Get returns public data about a previously uploaded KeyPair. +func Get(client *gophercloud.ServiceClient, name string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, name), &res.Body, nil) + return res +} + +// Delete requests the deletion of a previous stored KeyPair from the server. +func Delete(client *gophercloud.ServiceClient, name string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, name), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go new file mode 100644 index 000000000..67d1833f5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go @@ -0,0 +1,71 @@ +package keypairs + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractKeyPairs(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedKeyPairSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + actual, err := Create(client.ServiceClient(), CreateOpts{ + Name: "createdkey", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedKeyPair, actual) +} + +func TestImport(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleImportSuccessfully(t) + + actual, err := Create(client.ServiceClient(), CreateOpts{ + Name: "importedkey", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &ImportedKeyPair, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "firstkey").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstKeyPair, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := Delete(client.ServiceClient(), "deletedkey").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go new file mode 100644 index 000000000..f1a0d8e11 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go @@ -0,0 +1,94 @@ +package keypairs + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// KeyPair is an SSH key known to the OpenStack cluster that is available to be injected into +// servers. +type KeyPair struct { + // Name is used to refer to this keypair from other services within this region. + Name string `mapstructure:"name"` + + // Fingerprint is a short sequence of bytes that can be used to authenticate or validate a longer + // public key. + Fingerprint string `mapstructure:"fingerprint"` + + // PublicKey is the public key from this pair, in OpenSSH format. "ssh-rsa AAAAB3Nz..." + PublicKey string `mapstructure:"public_key"` + + // PrivateKey is the private key from this pair, in PEM format. + // "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." It is only present if this keypair was just + // returned from a Create call + PrivateKey string `mapstructure:"private_key"` + + // UserID is the user who owns this keypair. + UserID string `mapstructure:"user_id"` +} + +// KeyPairPage stores a single, only page of KeyPair results from a List call. +type KeyPairPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a KeyPairPage is empty. +func (page KeyPairPage) IsEmpty() (bool, error) { + ks, err := ExtractKeyPairs(page) + return len(ks) == 0, err +} + +// ExtractKeyPairs interprets a page of results as a slice of KeyPairs. +func ExtractKeyPairs(page pagination.Page) ([]KeyPair, error) { + type pair struct { + KeyPair KeyPair `mapstructure:"keypair"` + } + + var resp struct { + KeyPairs []pair `mapstructure:"keypairs"` + } + + err := mapstructure.Decode(page.(KeyPairPage).Body, &resp) + results := make([]KeyPair, len(resp.KeyPairs)) + for i, pair := range resp.KeyPairs { + results[i] = pair.KeyPair + } + return results, err +} + +type keyPairResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any KeyPair resource response as a KeyPair struct. +func (r keyPairResult) Extract() (*KeyPair, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + KeyPair *KeyPair `json:"keypair" mapstructure:"keypair"` + } + + err := mapstructure.Decode(r.Body, &res) + return res.KeyPair, err +} + +// CreateResult is the response from a Create operation. Call its Extract method to interpret it +// as a KeyPair. +type CreateResult struct { + keyPairResult +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a KeyPair. +type GetResult struct { + keyPairResult +} + +// DeleteResult is the response from a Delete operation. Call its Extract method to determine if +// the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go new file mode 100644 index 000000000..702f5329e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go @@ -0,0 +1,25 @@ +package keypairs + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-keypairs" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, name string) string { + return c.ServiceURL(resourcePath, name) +} + +func deleteURL(c *gophercloud.ServiceClient, name string) string { + return getURL(c, name) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go new file mode 100644 index 000000000..60efd2a5d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go @@ -0,0 +1,40 @@ +package keypairs + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-keypairs", listURL(c)) +} + +func TestCreateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-keypairs", createURL(c)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-keypairs/wat", getURL(c, "wat")) +} + +func TestDeleteURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-keypairs/wat", deleteURL(c, "wat")) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/doc.go new file mode 100644 index 000000000..fafe4a04d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/doc.go @@ -0,0 +1,2 @@ +// Package network provides the ability to manage nova-networks +package networks diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/fixtures.go new file mode 100644 index 000000000..12b94859b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/fixtures.go @@ -0,0 +1,209 @@ +// +build fixtures + +package networks + +import ( + "fmt" + "net/http" + "testing" + "time" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "networks": [ + { + "bridge": "br100", + "bridge_interface": "eth0", + "broadcast": "10.0.0.7", + "cidr": "10.0.0.0/29", + "cidr_v6": null, + "created_at": "2011-08-15 06:19:19.387525", + "deleted": false, + "deleted_at": null, + "dhcp_start": "10.0.0.3", + "dns1": null, + "dns2": null, + "gateway": "10.0.0.1", + "gateway_v6": null, + "host": "nsokolov-desktop", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "injected": false, + "label": "mynet_0", + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": "1234", + "rxtx_base": null, + "updated_at": "2011-08-16 09:26:13.048257", + "vlan": 100, + "vpn_private_address": "10.0.0.2", + "vpn_public_address": "127.0.0.1", + "vpn_public_port": 1000 + }, + { + "bridge": "br101", + "bridge_interface": "eth0", + "broadcast": "10.0.0.15", + "cidr": "10.0.0.10/29", + "cidr_v6": null, + "created_at": "2011-08-15 06:19:19.885495", + "deleted": false, + "deleted_at": null, + "dhcp_start": "10.0.0.11", + "dns1": null, + "dns2": null, + "gateway": "10.0.0.9", + "gateway_v6": null, + "host": null, + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "injected": false, + "label": "mynet_1", + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": null, + "rxtx_base": null, + "updated_at": null, + "vlan": 101, + "vpn_private_address": "10.0.0.10", + "vpn_public_address": null, + "vpn_public_port": 1001 + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "network": { + "bridge": "br101", + "bridge_interface": "eth0", + "broadcast": "10.0.0.15", + "cidr": "10.0.0.10/29", + "cidr_v6": null, + "created_at": "2011-08-15 06:19:19.885495", + "deleted": false, + "deleted_at": null, + "dhcp_start": "10.0.0.11", + "dns1": null, + "dns2": null, + "gateway": "10.0.0.9", + "gateway_v6": null, + "host": null, + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "injected": false, + "label": "mynet_1", + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": null, + "rxtx_base": null, + "updated_at": null, + "vlan": 101, + "vpn_private_address": "10.0.0.10", + "vpn_public_address": null, + "vpn_public_port": 1001 + } +} +` + +// FirstNetwork is the first result in ListOutput. +var nilTime time.Time +var FirstNetwork = Network{ + Bridge: "br100", + BridgeInterface: "eth0", + Broadcast: "10.0.0.7", + CIDR: "10.0.0.0/29", + CIDRv6: "", + CreatedAt: time.Date(2011, 8, 15, 6, 19, 19, 387525000, time.UTC), + Deleted: false, + DeletedAt: nilTime, + DHCPStart: "10.0.0.3", + DNS1: "", + DNS2: "", + Gateway: "10.0.0.1", + Gatewayv6: "", + Host: "nsokolov-desktop", + ID: "20c8acc0-f747-4d71-a389-46d078ebf047", + Injected: false, + Label: "mynet_0", + MultiHost: false, + Netmask: "255.255.255.248", + Netmaskv6: "", + Priority: 0, + ProjectID: "1234", + RXTXBase: 0, + UpdatedAt: time.Date(2011, 8, 16, 9, 26, 13, 48257000, time.UTC), + VLAN: 100, + VPNPrivateAddress: "10.0.0.2", + VPNPublicAddress: "127.0.0.1", + VPNPublicPort: 1000, +} + +// SecondNetwork is the second result in ListOutput. +var SecondNetwork = Network{ + Bridge: "br101", + BridgeInterface: "eth0", + Broadcast: "10.0.0.15", + CIDR: "10.0.0.10/29", + CIDRv6: "", + CreatedAt: time.Date(2011, 8, 15, 6, 19, 19, 885495000, time.UTC), + Deleted: false, + DeletedAt: nilTime, + DHCPStart: "10.0.0.11", + DNS1: "", + DNS2: "", + Gateway: "10.0.0.9", + Gatewayv6: "", + Host: "", + ID: "20c8acc0-f747-4d71-a389-46d078ebf000", + Injected: false, + Label: "mynet_1", + MultiHost: false, + Netmask: "255.255.255.248", + Netmaskv6: "", + Priority: 0, + ProjectID: "", + RXTXBase: 0, + UpdatedAt: nilTime, + VLAN: 101, + VPNPrivateAddress: "10.0.0.10", + VPNPublicAddress: "", + VPNPublicPort: 1001, +} + +// ExpectedNetworkSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedNetworkSlice = []Network{FirstNetwork, SecondNetwork} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing network. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-networks/20c8acc0-f747-4d71-a389-46d078ebf000", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests.go new file mode 100644 index 000000000..eb203878b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests.go @@ -0,0 +1,22 @@ +package networks + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of Network. +func List(client *gophercloud.ServiceClient) pagination.Pager { + url := listURL(client) + createPage := func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, url, createPage) +} + +// Get returns data about a previously created Network. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests_test.go new file mode 100644 index 000000000..722b3f0bd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/requests_test.go @@ -0,0 +1,37 @@ +package networks + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNetworks(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedNetworkSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "20c8acc0-f747-4d71-a389-46d078ebf000").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondNetwork, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/results.go new file mode 100644 index 000000000..55b361d7f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/results.go @@ -0,0 +1,222 @@ +package networks + +import ( + "fmt" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// A Network represents a nova-network that an instance communicates on +type Network struct { + // The Bridge that VIFs on this network are connected to + Bridge string `mapstructure:"bridge"` + + // BridgeInterface is what interface is connected to the Bridge + BridgeInterface string `mapstructure:"bridge_interface"` + + // The Broadcast address of the network. + Broadcast string `mapstructure:"broadcast"` + + // CIDR is the IPv4 subnet. + CIDR string `mapstructure:"cidr"` + + // CIDRv6 is the IPv6 subnet. + CIDRv6 string `mapstructure:"cidr_v6"` + + // CreatedAt is when the network was created.. + CreatedAt time.Time `mapstructure:"-"` + + // Deleted shows if the network has been deleted. + Deleted bool `mapstructure:"deleted"` + + // DeletedAt is the time when the network was deleted. + DeletedAt time.Time `mapstructure:"-"` + + // DHCPStart is the start of the DHCP address range. + DHCPStart string `mapstructure:"dhcp_start"` + + // DNS1 is the first DNS server to use through DHCP. + DNS1 string `mapstructure:"dns_1"` + + // DNS2 is the first DNS server to use through DHCP. + DNS2 string `mapstructure:"dns_2"` + + // Gateway is the network gateway. + Gateway string `mapstructure:"gateway"` + + // Gatewayv6 is the IPv6 network gateway. + Gatewayv6 string `mapstructure:"gateway_v6"` + + // Host is the host that the network service is running on. + Host string `mapstructure:"host"` + + // ID is the UUID of the network. + ID string `mapstructure:"id"` + + // Injected determines if network information is injected into the host. + Injected bool `mapstructure:"injected"` + + // Label is the common name that the network has.. + Label string `mapstructure:"label"` + + // MultiHost is if multi-host networking is enablec.. + MultiHost bool `mapstructure:"multi_host"` + + // Netmask is the network netmask. + Netmask string `mapstructure:"netmask"` + + // Netmaskv6 is the IPv6 netmask. + Netmaskv6 string `mapstructure:"netmask_v6"` + + // Priority is the network interface priority. + Priority int `mapstructure:"priority"` + + // ProjectID is the project associated with this network. + ProjectID string `mapstructure:"project_id"` + + // RXTXBase configures bandwidth entitlement. + RXTXBase int `mapstructure:"rxtx_base"` + + // UpdatedAt is the time when the network was last updated. + UpdatedAt time.Time `mapstructure:"-"` + + // VLAN is the vlan this network runs on. + VLAN int `mapstructure:"vlan"` + + // VPNPrivateAddress is the private address of the CloudPipe VPN. + VPNPrivateAddress string `mapstructure:"vpn_private_address"` + + // VPNPublicAddress is the public address of the CloudPipe VPN. + VPNPublicAddress string `mapstructure:"vpn_public_address"` + + // VPNPublicPort is the port of the CloudPipe VPN. + VPNPublicPort int `mapstructure:"vpn_public_port"` +} + +// NetworkPage stores a single, only page of Networks +// results from a List call. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkPage is empty. +func (page NetworkPage) IsEmpty() (bool, error) { + va, err := ExtractNetworks(page) + return len(va) == 0, err +} + +// ExtractNetworks interprets a page of results as a slice of Networks +func ExtractNetworks(page pagination.Page) ([]Network, error) { + var res struct { + Networks []Network `mapstructure:"networks"` + } + + err := mapstructure.Decode(page.(NetworkPage).Body, &res) + + var rawNetworks []interface{} + body := page.(NetworkPage).Body + switch body.(type) { + case map[string]interface{}: + rawNetworks = body.(map[string]interface{})["networks"].([]interface{}) + case map[string][]interface{}: + rawNetworks = body.(map[string][]interface{})["networks"] + default: + return res.Networks, fmt.Errorf("Unknown type") + } + + for i := range rawNetworks { + thisNetwork := rawNetworks[i].(map[string]interface{}) + if t, ok := thisNetwork["created_at"].(string); ok && t != "" { + createdAt, err := time.Parse("2006-01-02 15:04:05.000000", t) + if err != nil { + return res.Networks, err + } + res.Networks[i].CreatedAt = createdAt + } + + if t, ok := thisNetwork["updated_at"].(string); ok && t != "" { + updatedAt, err := time.Parse("2006-01-02 15:04:05.000000", t) + if err != nil { + return res.Networks, err + } + res.Networks[i].UpdatedAt = updatedAt + } + + if t, ok := thisNetwork["deleted_at"].(string); ok && t != "" { + deletedAt, err := time.Parse("2006-01-02 15:04:05.000000", t) + if err != nil { + return res.Networks, err + } + res.Networks[i].DeletedAt = deletedAt + } + } + + return res.Networks, err +} + +type NetworkResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Network resource +// response as a Network struct. +func (r NetworkResult) Extract() (*Network, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *Network `json:"network" mapstructure:"network"` + } + + config := &mapstructure.DecoderConfig{ + Result: &res, + WeaklyTypedInput: true, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + + if err := decoder.Decode(r.Body); err != nil { + return nil, err + } + + b := r.Body.(map[string]interface{})["network"].(map[string]interface{}) + + if t, ok := b["created_at"].(string); ok && t != "" { + createdAt, err := time.Parse("2006-01-02 15:04:05.000000", t) + if err != nil { + return res.Network, err + } + res.Network.CreatedAt = createdAt + } + + if t, ok := b["updated_at"].(string); ok && t != "" { + updatedAt, err := time.Parse("2006-01-02 15:04:05.000000", t) + if err != nil { + return res.Network, err + } + res.Network.UpdatedAt = updatedAt + } + + if t, ok := b["deleted_at"].(string); ok && t != "" { + deletedAt, err := time.Parse("2006-01-02 15:04:05.000000", t) + if err != nil { + return res.Network, err + } + res.Network.DeletedAt = deletedAt + } + + return res.Network, err + +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a Network. +type GetResult struct { + NetworkResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls.go new file mode 100644 index 000000000..69664620a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls.go @@ -0,0 +1,17 @@ +package networks + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-networks" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls_test.go new file mode 100644 index 000000000..be54c90a5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/networks/urls_test.go @@ -0,0 +1,25 @@ +package networks + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-networks", listURL(c)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-networks/"+id, getURL(c, id)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go new file mode 100644 index 000000000..0bd45661b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go @@ -0,0 +1,3 @@ +// Package schedulerhints enables instances to provide the OpenStack scheduler +// hints about where they should be placed in the cloud. +package schedulerhints diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go new file mode 100644 index 000000000..567eef41a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go @@ -0,0 +1,134 @@ +package schedulerhints + +import ( + "fmt" + "net" + "regexp" + "strings" + + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// SchedulerHints represents a set of scheduling hints that are passed to the +// OpenStack scheduler +type SchedulerHints struct { + // Group specifies a Server Group to place the instance in. + Group string + + // DifferentHost will place the instance on a compute node that does not + // host the given instances. + DifferentHost []string + + // SameHost will place the instance on a compute node that hosts the given + // instances. + SameHost []string + + // Query is a conditional statement that results in compute nodes able to + // host the instance. + Query []interface{} + + // TargetCell specifies a cell name where the instance will be placed. + TargetCell string + + // BuildNearHostIP specifies a subnet of compute nodes to host the instance. + BuildNearHostIP string +} + +// SchedulerHintsBuilder builds the scheduler hints into a serializable format. +type SchedulerHintsBuilder interface { + ToServerSchedulerHintsMap() (map[string]interface{}, error) +} + +// ToServerSchedulerHintsMap builds the scheduler hints into a serializable format. +func (opts SchedulerHints) ToServerSchedulerHintsMap() (map[string]interface{}, error) { + sh := make(map[string]interface{}) + + uuidRegex, _ := regexp.Compile("^[a-z0-9]{8}-[a-z0-9]{4}-[1-5][a-z0-9]{3}-[a-z0-9]{4}-[a-z0-9]{12}$") + + if opts.Group != "" { + if !uuidRegex.MatchString(opts.Group) { + return nil, fmt.Errorf("Group must be a UUID") + } + sh["group"] = opts.Group + } + + if len(opts.DifferentHost) > 0 { + for _, diffHost := range opts.DifferentHost { + if !uuidRegex.MatchString(diffHost) { + return nil, fmt.Errorf("The hosts in DifferentHost must be in UUID format.") + } + } + sh["different_host"] = opts.DifferentHost + } + + if len(opts.SameHost) > 0 { + for _, sameHost := range opts.SameHost { + if !uuidRegex.MatchString(sameHost) { + return nil, fmt.Errorf("The hosts in SameHost must be in UUID format.") + } + } + sh["same_host"] = opts.SameHost + } + + /* Query can be something simple like: + [">=", "$free_ram_mb", 1024] + + Or more complex like: + ['and', + ['>=', '$free_ram_mb', 1024], + ['>=', '$free_disk_mb', 200 * 1024] + ] + + Because of the possible complexity, just make sure the length is a minimum of 3. + */ + if len(opts.Query) > 0 { + if len(opts.Query) < 3 { + return nil, fmt.Errorf("Query must be a conditional statement in the format of [op,variable,value]") + } + sh["query"] = opts.Query + } + + if opts.TargetCell != "" { + sh["target_cell"] = opts.TargetCell + } + + if opts.BuildNearHostIP != "" { + if _, _, err := net.ParseCIDR(opts.BuildNearHostIP); err != nil { + return nil, fmt.Errorf("BuildNearHostIP must be a valid subnet in the form 192.168.1.1/24") + } + ipParts := strings.Split(opts.BuildNearHostIP, "/") + sh["build_near_host_ip"] = ipParts[0] + sh["cidr"] = "/" + ipParts[1] + } + + return sh, nil +} + +// CreateOptsExt adds a SchedulerHints option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // SchedulerHints provides a set of hints to the scheduler. + SchedulerHints SchedulerHintsBuilder +} + +// ToServerCreateMap adds the SchedulerHints option to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + schedulerHints, err := opts.SchedulerHints.ToServerSchedulerHintsMap() + if err != nil { + return nil, err + } + + if len(schedulerHints) == 0 { + return base, nil + } + + base["os:scheduler_hints"] = schedulerHints + + return base, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests_test.go new file mode 100644 index 000000000..9b38b35d9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests_test.go @@ -0,0 +1,130 @@ +package schedulerhints + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCreateOpts(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + schedulerHints := SchedulerHints{ + Group: "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + DifferentHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + SameHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + Query: []interface{}{">=", "$free_ram_mb", "1024"}, + TargetCell: "foobar", + BuildNearHostIP: "192.168.1.1/24", + } + + ext := CreateOptsExt{ + CreateOptsBuilder: base, + SchedulerHints: schedulerHints, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "flavorName": "", + "imageName": "" + }, + "os:scheduler_hints": { + "group": "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + "different_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "same_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "query": [ + ">=", "$free_ram_mb", "1024" + ], + "target_cell": "foobar", + "build_near_host_ip": "192.168.1.1", + "cidr": "/24" + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} + +func TestCreateOptsWithComplexQuery(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + schedulerHints := SchedulerHints{ + Group: "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + DifferentHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + SameHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + Query: []interface{}{"and", []string{">=", "$free_ram_mb", "1024"}, []string{">=", "$free_disk_mb", "204800"}}, + TargetCell: "foobar", + BuildNearHostIP: "192.168.1.1/24", + } + + ext := CreateOptsExt{ + CreateOptsBuilder: base, + SchedulerHints: schedulerHints, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "flavorName": "", + "imageName": "" + }, + "os:scheduler_hints": { + "group": "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + "different_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "same_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "query": [ + "and", + [">=", "$free_ram_mb", "1024"], + [">=", "$free_disk_mb", "204800"] + ], + "target_cell": "foobar", + "build_near_host_ip": "192.168.1.1", + "cidr": "/24" + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go new file mode 100644 index 000000000..702f32c98 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go @@ -0,0 +1 @@ +package secgroups diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/fixtures.go new file mode 100644 index 000000000..8c42e48e4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/fixtures.go @@ -0,0 +1,267 @@ +package secgroups + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +const rootPath = "/os-security-groups" + +const listGroupsJSON = ` +{ + "security_groups": [ + { + "description": "default", + "id": "{groupID}", + "name": "default", + "rules": [], + "tenant_id": "openstack" + } + ] +} +` + +func mockListGroupsResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, listGroupsJSON) + }) +} + +func mockListGroupsByServerResponse(t *testing.T, serverID string) { + url := fmt.Sprintf("/servers/%s%s", serverID, rootPath) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, listGroupsJSON) + }) +} + +func mockCreateGroupResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group": { + "name": "test", + "description": "something" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "something", + "id": "{groupID}", + "name": "test", + "rules": [], + "tenant_id": "openstack" + } +} +`) + }) +} + +func mockUpdateGroupResponse(t *testing.T, groupID string) { + url := fmt.Sprintf("%s/%s", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group": { + "name": "new_name", + "description": "new_desc" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "something", + "id": "{groupID}", + "name": "new_name", + "rules": [], + "tenant_id": "openstack" + } +} +`) + }) +} + +func mockGetGroupsResponse(t *testing.T, groupID string) { + url := fmt.Sprintf("%s/%s", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "default", + "id": "{groupID}", + "name": "default", + "rules": [ + { + "from_port": 80, + "group": { + "tenant_id": "openstack", + "name": "default" + }, + "ip_protocol": "TCP", + "to_port": 85, + "parent_group_id": "{groupID}", + "ip_range": { + "cidr": "0.0.0.0" + }, + "id": "{ruleID}" + } + ], + "tenant_id": "openstack" + } +} + `) + }) +} + +func mockGetNumericIDGroupResponse(t *testing.T, groupID int) { + url := fmt.Sprintf("%s/%d", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "id": 12345 + } +} + `) + }) +} + +func mockDeleteGroupResponse(t *testing.T, groupID string) { + url := fmt.Sprintf("%s/%s", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockAddRuleResponse(t *testing.T) { + th.Mux.HandleFunc("/os-security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group_rule": { + "from_port": 22, + "ip_protocol": "TCP", + "to_port": 22, + "parent_group_id": "{groupID}", + "cidr": "0.0.0.0/0" + } +} `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_rule": { + "from_port": 22, + "group": {}, + "ip_protocol": "TCP", + "to_port": 22, + "parent_group_id": "{groupID}", + "ip_range": { + "cidr": "0.0.0.0/0" + }, + "id": "{ruleID}" + } +}`) + }) +} + +func mockDeleteRuleResponse(t *testing.T, ruleID string) { + url := fmt.Sprintf("/os-security-group-rules/%s", ruleID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockAddServerToGroupResponse(t *testing.T, serverID string) { + url := fmt.Sprintf("/servers/%s/action", serverID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "addSecurityGroup": { + "name": "test" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{}`) + }) +} + +func mockRemoveServerFromGroupResponse(t *testing.T, serverID string) { + url := fmt.Sprintf("/servers/%s/action", serverID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "removeSecurityGroup": { + "name": "test" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{}`) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go new file mode 100644 index 000000000..4cef48022 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go @@ -0,0 +1,257 @@ +package secgroups + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +func commonList(client *gophercloud.ServiceClient, url string) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return SecurityGroupPage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, url, createPage) +} + +// List will return a collection of all the security groups for a particular +// tenant. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return commonList(client, rootURL(client)) +} + +// ListByServer will return a collection of all the security groups which are +// associated with a particular server. +func ListByServer(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return commonList(client, listByServerURL(client, serverID)) +} + +// GroupOpts is the underlying struct responsible for creating or updating +// security groups. It therefore represents the mutable attributes of a +// security group. +type GroupOpts struct { + // Required - the name of your security group. + Name string `json:"name"` + + // Required - the description of your security group. + Description string `json:"description"` +} + +// CreateOpts is the struct responsible for creating a security group. +type CreateOpts GroupOpts + +// CreateOptsBuilder builds the create options into a serializable format. +type CreateOptsBuilder interface { + ToSecGroupCreateMap() (map[string]interface{}, error) +} + +var ( + errName = errors.New("Name is a required field") + errDesc = errors.New("Description is a required field") +) + +// ToSecGroupCreateMap builds the create options into a serializable format. +func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { + sg := make(map[string]interface{}) + + if opts.Name == "" { + return sg, errName + } + if opts.Description == "" { + return sg, errDesc + } + + sg["name"] = opts.Name + sg["description"] = opts.Description + + return map[string]interface{}{"security_group": sg}, nil +} + +// Create will create a new security group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var result CreateResult + + reqBody, err := opts.ToSecGroupCreateMap() + if err != nil { + result.Err = err + return result + } + + _, result.Err = client.Post(rootURL(client), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return result +} + +// UpdateOpts is the struct responsible for updating an existing security group. +type UpdateOpts GroupOpts + +// UpdateOptsBuilder builds the update options into a serializable format. +type UpdateOptsBuilder interface { + ToSecGroupUpdateMap() (map[string]interface{}, error) +} + +// ToSecGroupUpdateMap builds the update options into a serializable format. +func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { + sg := make(map[string]interface{}) + + if opts.Name == "" { + return sg, errName + } + if opts.Description == "" { + return sg, errDesc + } + + sg["name"] = opts.Name + sg["description"] = opts.Description + + return map[string]interface{}{"security_group": sg}, nil +} + +// Update will modify the mutable properties of a security group, notably its +// name and description. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var result UpdateResult + + reqBody, err := opts.ToSecGroupUpdateMap() + if err != nil { + result.Err = err + return result + } + + _, result.Err = client.Put(resourceURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return result +} + +// Get will return details for a particular security group. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var result GetResult + _, result.Err = client.Get(resourceURL(client, id), &result.Body, nil) + return result +} + +// Delete will permanently delete a security group from the project. +func Delete(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { + var result gophercloud.ErrResult + _, result.Err = client.Delete(resourceURL(client, id), nil) + return result +} + +// CreateRuleOpts represents the configuration for adding a new rule to an +// existing security group. +type CreateRuleOpts struct { + // Required - the ID of the group that this rule will be added to. + ParentGroupID string `json:"parent_group_id"` + + // Required - the lower bound of the port range that will be opened. + FromPort int `json:"from_port"` + + // Required - the upper bound of the port range that will be opened. + ToPort int `json:"to_port"` + + // Required - the protocol type that will be allowed, e.g. TCP. + IPProtocol string `json:"ip_protocol"` + + // ONLY required if FromGroupID is blank. This represents the IP range that + // will be the source of network traffic to your security group. Use + // 0.0.0.0/0 to allow all IP addresses. + CIDR string `json:"cidr,omitempty"` + + // ONLY required if CIDR is blank. This value represents the ID of a group + // that forwards traffic to the parent group. So, instead of accepting + // network traffic from an entire IP range, you can instead refine the + // inbound source by an existing security group. + FromGroupID string `json:"group_id,omitempty"` +} + +// CreateRuleOptsBuilder builds the create rule options into a serializable format. +type CreateRuleOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// ToRuleCreateMap builds the create rule options into a serializable format. +func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]interface{}, error) { + rule := make(map[string]interface{}) + + if opts.ParentGroupID == "" { + return rule, errors.New("A ParentGroupID must be set") + } + if opts.FromPort == 0 { + return rule, errors.New("A FromPort must be set") + } + if opts.ToPort == 0 { + return rule, errors.New("A ToPort must be set") + } + if opts.IPProtocol == "" { + return rule, errors.New("A IPProtocol must be set") + } + if opts.CIDR == "" && opts.FromGroupID == "" { + return rule, errors.New("A CIDR or FromGroupID must be set") + } + + rule["parent_group_id"] = opts.ParentGroupID + rule["from_port"] = opts.FromPort + rule["to_port"] = opts.ToPort + rule["ip_protocol"] = opts.IPProtocol + + if opts.CIDR != "" { + rule["cidr"] = opts.CIDR + } + if opts.FromGroupID != "" { + rule["group_id"] = opts.FromGroupID + } + + return map[string]interface{}{"security_group_rule": rule}, nil +} + +// CreateRule will add a new rule to an existing security group (whose ID is +// specified in CreateRuleOpts). You have the option of controlling inbound +// traffic from either an IP range (CIDR) or from another security group. +func CreateRule(client *gophercloud.ServiceClient, opts CreateRuleOptsBuilder) CreateRuleResult { + var result CreateRuleResult + + reqBody, err := opts.ToRuleCreateMap() + if err != nil { + result.Err = err + return result + } + + _, result.Err = client.Post(rootRuleURL(client), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return result +} + +// DeleteRule will permanently delete a rule from a security group. +func DeleteRule(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { + var result gophercloud.ErrResult + _, result.Err = client.Delete(resourceRuleURL(client, id), nil) + return result +} + +func actionMap(prefix, groupName string) map[string]map[string]string { + return map[string]map[string]string{ + prefix + "SecurityGroup": map[string]string{"name": groupName}, + } +} + +// AddServerToGroup will associate a server and a security group, enforcing the +// rules of the group on the server. +func AddServerToGroup(client *gophercloud.ServiceClient, serverID, groupName string) gophercloud.ErrResult { + var result gophercloud.ErrResult + _, result.Err = client.Post(serverActionURL(client, serverID), actionMap("add", groupName), &result.Body, nil) + return result +} + +// RemoveServerFromGroup will disassociate a server from a security group. +func RemoveServerFromGroup(client *gophercloud.ServiceClient, serverID, groupName string) gophercloud.ErrResult { + var result gophercloud.ErrResult + _, result.Err = client.Post(serverActionURL(client, serverID), actionMap("remove", groupName), &result.Body, nil) + return result +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests_test.go new file mode 100644 index 000000000..4e21d5dea --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests_test.go @@ -0,0 +1,248 @@ +package secgroups + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const ( + serverID = "{serverID}" + groupID = "{groupID}" + ruleID = "{ruleID}" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListGroupsResponse(t) + + count := 0 + + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractSecurityGroups(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []SecurityGroup{ + SecurityGroup{ + ID: groupID, + Description: "default", + Name: "default", + Rules: []Rule{}, + TenantID: "openstack", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestListByServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListGroupsByServerResponse(t, serverID) + + count := 0 + + err := ListByServer(client.ServiceClient(), serverID).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractSecurityGroups(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []SecurityGroup{ + SecurityGroup{ + ID: groupID, + Description: "default", + Name: "default", + Rules: []Rule{}, + TenantID: "openstack", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateGroupResponse(t) + + opts := CreateOpts{ + Name: "test", + Description: "something", + } + + group, err := Create(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &SecurityGroup{ + ID: groupID, + Name: "test", + Description: "something", + TenantID: "openstack", + Rules: []Rule{}, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateGroupResponse(t, groupID) + + opts := UpdateOpts{ + Name: "new_name", + Description: "new_desc", + } + + group, err := Update(client.ServiceClient(), groupID, opts).Extract() + th.AssertNoErr(t, err) + + expected := &SecurityGroup{ + ID: groupID, + Name: "new_name", + Description: "something", + TenantID: "openstack", + Rules: []Rule{}, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetGroupsResponse(t, groupID) + + group, err := Get(client.ServiceClient(), groupID).Extract() + th.AssertNoErr(t, err) + + expected := &SecurityGroup{ + ID: groupID, + Description: "default", + Name: "default", + TenantID: "openstack", + Rules: []Rule{ + Rule{ + FromPort: 80, + ToPort: 85, + IPProtocol: "TCP", + IPRange: IPRange{CIDR: "0.0.0.0"}, + Group: Group{TenantID: "openstack", Name: "default"}, + ParentGroupID: groupID, + ID: ruleID, + }, + }, + } + + th.AssertDeepEquals(t, expected, group) +} + +func TestGetNumericID(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + numericGroupID := 12345 + + mockGetNumericIDGroupResponse(t, numericGroupID) + + group, err := Get(client.ServiceClient(), "12345").Extract() + th.AssertNoErr(t, err) + + expected := &SecurityGroup{ID: "12345"} + th.AssertDeepEquals(t, expected, group) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteGroupResponse(t, groupID) + + err := Delete(client.ServiceClient(), groupID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAddRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockAddRuleResponse(t) + + opts := CreateRuleOpts{ + ParentGroupID: groupID, + FromPort: 22, + ToPort: 22, + IPProtocol: "TCP", + CIDR: "0.0.0.0/0", + } + + rule, err := CreateRule(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &Rule{ + FromPort: 22, + ToPort: 22, + Group: Group{}, + IPProtocol: "TCP", + ParentGroupID: groupID, + IPRange: IPRange{CIDR: "0.0.0.0/0"}, + ID: ruleID, + } + + th.AssertDeepEquals(t, expected, rule) +} + +func TestDeleteRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteRuleResponse(t, ruleID) + + err := DeleteRule(client.ServiceClient(), ruleID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAddServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockAddServerToGroupResponse(t, serverID) + + err := AddServerToGroup(client.ServiceClient(), serverID, "test").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestRemoveServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockRemoveServerFromGroupResponse(t, serverID) + + err := RemoveServerFromGroup(client.ServiceClient(), serverID, "test").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/results.go new file mode 100644 index 000000000..478c5dc09 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/results.go @@ -0,0 +1,147 @@ +package secgroups + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// SecurityGroup represents a security group. +type SecurityGroup struct { + // The unique ID of the group. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string + + // The human-readable name of the group, which needs to be unique. + Name string + + // The human-readable description of the group. + Description string + + // The rules which determine how this security group operates. + Rules []Rule + + // The ID of the tenant to which this security group belongs. + TenantID string `mapstructure:"tenant_id"` +} + +// Rule represents a security group rule, a policy which determines how a +// security group operates and what inbound traffic it allows in. +type Rule struct { + // The unique ID. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string + + // The lower bound of the port range which this security group should open up + FromPort int `mapstructure:"from_port"` + + // The upper bound of the port range which this security group should open up + ToPort int `mapstructure:"to_port"` + + // The IP protocol (e.g. TCP) which the security group accepts + IPProtocol string `mapstructure:"ip_protocol"` + + // The CIDR IP range whose traffic can be received + IPRange IPRange `mapstructure:"ip_range"` + + // The security group ID to which this rule belongs + ParentGroupID string `mapstructure:"parent_group_id"` + + // Not documented. + Group Group +} + +// IPRange represents the IP range whose traffic will be accepted by the +// security group. +type IPRange struct { + CIDR string +} + +// Group represents a group. +type Group struct { + TenantID string `mapstructure:"tenant_id"` + Name string +} + +// SecurityGroupPage is a single page of a SecurityGroup collection. +type SecurityGroupPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Security Groups contains any results. +func (page SecurityGroupPage) IsEmpty() (bool, error) { + users, err := ExtractSecurityGroups(page) + if err != nil { + return false, err + } + return len(users) == 0, nil +} + +// ExtractSecurityGroups returns a slice of SecurityGroups contained in a single page of results. +func ExtractSecurityGroups(page pagination.Page) ([]SecurityGroup, error) { + casted := page.(SecurityGroupPage).Body + var response struct { + SecurityGroups []SecurityGroup `mapstructure:"security_groups"` + } + + err := mapstructure.WeakDecode(casted, &response) + + return response.SecurityGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// Extract will extract a SecurityGroup struct from most responses. +func (r commonResult) Extract() (*SecurityGroup, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + SecurityGroup SecurityGroup `mapstructure:"security_group"` + } + + err := mapstructure.WeakDecode(r.Body, &response) + + return &response.SecurityGroup, err +} + +// CreateRuleResult represents the result when adding rules to a security group. +type CreateRuleResult struct { + gophercloud.Result +} + +// Extract will extract a Rule struct from a CreateRuleResult. +func (r CreateRuleResult) Extract() (*Rule, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Rule Rule `mapstructure:"security_group_rule"` + } + + err := mapstructure.WeakDecode(r.Body, &response) + + return &response.Rule, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go new file mode 100644 index 000000000..dc53fbfac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go @@ -0,0 +1,32 @@ +package secgroups + +import "github.com/rackspace/gophercloud" + +const ( + secgrouppath = "os-security-groups" + rulepath = "os-security-group-rules" +) + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(secgrouppath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(secgrouppath) +} + +func listByServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers", serverID, secgrouppath) +} + +func rootRuleURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rulepath) +} + +func resourceRuleURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rulepath, id) +} + +func serverActionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go new file mode 100644 index 000000000..1e5ed568d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go @@ -0,0 +1,2 @@ +// Package servergroups provides the ability to manage server groups +package servergroups diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/fixtures.go new file mode 100644 index 000000000..133fd85ce --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/fixtures.go @@ -0,0 +1,161 @@ +// +build fixtures + +package servergroups + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "server_groups": [ + { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + }, + { + "id": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "name": "test2", + "policies": [ + "affinity" + ], + "members": [], + "metadata": {} + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "server_group": { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + } +} +` + +// CreateOutput is a sample response to a Post call +const CreateOutput = ` +{ + "server_group": { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + } +} +` + +// FirstServerGroup is the first result in ListOutput. +var FirstServerGroup = ServerGroup{ + ID: "616fb98f-46ca-475e-917e-2563e5a8cd19", + Name: "test", + Policies: []string{ + "anti-affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// SecondServerGroup is the second result in ListOutput. +var SecondServerGroup = ServerGroup{ + ID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + Name: "test2", + Policies: []string{ + "affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// ExpectedServerGroupSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedServerGroupSlice = []ServerGroup{FirstServerGroup, SecondServerGroup} + +// CreatedServerGroup is the parsed result from CreateOutput. +var CreatedServerGroup = ServerGroup{ + ID: "616fb98f-46ca-475e-917e-2563e5a8cd19", + Name: "test", + Policies: []string{ + "anti-affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing server group +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups/4d8c3732-a248-40ed-bebc-539a6ffd25c0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request +// for a new server group +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "server_group": { + "name": "test", + "policies": [ + "anti-affinity" + ] + } +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// an existing server group +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups/616fb98f-46ca-475e-917e-2563e5a8cd19", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go new file mode 100644 index 000000000..1597b43eb --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go @@ -0,0 +1,77 @@ +package servergroups + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of ServerGroups. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return ServerGroupsPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notably, the +// CreateOpts struct in this package does. +type CreateOptsBuilder interface { + ToServerGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies a Server Group allocation request +type CreateOpts struct { + // Name is the name of the server group + Name string + + // Policies are the server group policies + Policies []string +} + +// ToServerGroupCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { + if opts.Name == "" { + return nil, errors.New("Missing field required for server group creation: Name") + } + + if len(opts.Policies) < 1 { + return nil, errors.New("Missing field required for server group creation: Policies") + } + + serverGroup := make(map[string]interface{}) + serverGroup["name"] = opts.Name + serverGroup["policies"] = opts.Policies + + return map[string]interface{}{"server_group": serverGroup}, nil +} + +// Create requests the creation of a new Server Group +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToServerGroupCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Get returns data about a previously created ServerGroup. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} + +// Delete requests the deletion of a previously allocated ServerGroup. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests_test.go new file mode 100644 index 000000000..07fec51b1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests_test.go @@ -0,0 +1,59 @@ +package servergroups + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractServerGroups(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedServerGroupSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + actual, err := Create(client.ServiceClient(), CreateOpts{ + Name: "test", + Policies: []string{"anti-affinity"}, + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedServerGroup, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "4d8c3732-a248-40ed-bebc-539a6ffd25c0").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstServerGroup, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := Delete(client.ServiceClient(), "616fb98f-46ca-475e-917e-2563e5a8cd19").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/results.go new file mode 100644 index 000000000..d74ee5dbb --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/results.go @@ -0,0 +1,87 @@ +package servergroups + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// A ServerGroup creates a policy for instance placement in the cloud +type ServerGroup struct { + // ID is the unique ID of the Server Group. + ID string `mapstructure:"id"` + + // Name is the common name of the server group. + Name string `mapstructure:"name"` + + // Polices are the group policies. + Policies []string `mapstructure:"policies"` + + // Members are the members of the server group. + Members []string `mapstructure:"members"` + + // Metadata includes a list of all user-specified key-value pairs attached to the Server Group. + Metadata map[string]interface{} +} + +// ServerGroupsPage stores a single, only page of ServerGroups +// results from a List call. +type ServerGroupsPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a ServerGroupsPage is empty. +func (page ServerGroupsPage) IsEmpty() (bool, error) { + va, err := ExtractServerGroups(page) + return len(va) == 0, err +} + +// ExtractServerGroups interprets a page of results as a slice of +// ServerGroups. +func ExtractServerGroups(page pagination.Page) ([]ServerGroup, error) { + casted := page.(ServerGroupsPage).Body + var response struct { + ServerGroups []ServerGroup `mapstructure:"server_groups"` + } + + err := mapstructure.WeakDecode(casted, &response) + + return response.ServerGroups, err +} + +type ServerGroupResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Server Group resource +// response as a ServerGroup struct. +func (r ServerGroupResult) Extract() (*ServerGroup, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + ServerGroup *ServerGroup `json:"server_group" mapstructure:"server_group"` + } + + err := mapstructure.WeakDecode(r.Body, &res) + return res.ServerGroup, err +} + +// CreateResult is the response from a Create operation. Call its Extract method to interpret it +// as a ServerGroup. +type CreateResult struct { + ServerGroupResult +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a ServerGroup. +type GetResult struct { + ServerGroupResult +} + +// DeleteResult is the response from a Delete operation. Call its Extract method to determine if +// the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go new file mode 100644 index 000000000..074a16c67 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go @@ -0,0 +1,25 @@ +package servergroups + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-server-groups" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls_test.go new file mode 100644 index 000000000..bff4dfc72 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls_test.go @@ -0,0 +1,42 @@ +package servergroups + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-server-groups", listURL(c)) +} + +func TestCreateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-server-groups", createURL(c)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-server-groups/"+id, getURL(c, id)) +} + +func TestDeleteURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-server-groups/"+id, deleteURL(c, id)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/doc.go new file mode 100644 index 000000000..d2729f874 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/doc.go @@ -0,0 +1,5 @@ +/* +Package startstop provides functionality to start and stop servers that have +been provisioned by the OpenStack Compute service. +*/ +package startstop diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/fixtures.go new file mode 100644 index 000000000..670828a98 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/fixtures.go @@ -0,0 +1,27 @@ +package startstop + +import ( + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func mockStartServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"os-start": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockStopServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"os-stop": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go new file mode 100644 index 000000000..0e090e69f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go @@ -0,0 +1,23 @@ +package startstop + +import "github.com/rackspace/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +// Start is the operation responsible for starting a Compute server. +func Start(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { + var res gophercloud.ErrResult + reqBody := map[string]interface{}{"os-start": nil} + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) + return res +} + +// Stop is the operation responsible for stopping a Compute server. +func Stop(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { + var res gophercloud.ErrResult + reqBody := map[string]interface{}{"os-stop": nil} + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests_test.go new file mode 100644 index 000000000..97a121b19 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests_test.go @@ -0,0 +1,30 @@ +package startstop + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const serverID = "{serverId}" + +func TestStart(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockStartServerResponse(t, serverID) + + err := Start(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestStop(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockStopServerResponse(t, serverID) + + err := Stop(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go new file mode 100644 index 000000000..65c46ff50 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go @@ -0,0 +1,2 @@ +// Package tenantnetworks provides the ability for tenants to see information about the networks they have access to +package tenantnetworks diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go new file mode 100644 index 000000000..0cfa72ab0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go @@ -0,0 +1,84 @@ +// +build fixtures + +package tenantnetworks + +import ( + "fmt" + "net/http" + "testing" + "time" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "networks": [ + { + "cidr": "10.0.0.0/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "label": "mynet_0" + }, + { + "cidr": "10.0.0.10/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "label": "mynet_1" + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "network": { + "cidr": "10.0.0.10/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "label": "mynet_1" + } +} +` + +// FirstNetwork is the first result in ListOutput. +var nilTime time.Time +var FirstNetwork = Network{ + CIDR: "10.0.0.0/29", + ID: "20c8acc0-f747-4d71-a389-46d078ebf047", + Name: "mynet_0", +} + +// SecondNetwork is the second result in ListOutput. +var SecondNetwork = Network{ + CIDR: "10.0.0.10/29", + ID: "20c8acc0-f747-4d71-a389-46d078ebf000", + Name: "mynet_1", +} + +// ExpectedNetworkSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedNetworkSlice = []Network{FirstNetwork, SecondNetwork} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-tenant-networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing network. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-tenant-networks/20c8acc0-f747-4d71-a389-46d078ebf000", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go new file mode 100644 index 000000000..3ec13d384 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go @@ -0,0 +1,22 @@ +package tenantnetworks + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of Network. +func List(client *gophercloud.ServiceClient) pagination.Pager { + url := listURL(client) + createPage := func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, url, createPage) +} + +// Get returns data about a previously created Network. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests_test.go new file mode 100644 index 000000000..fc4ee4f4b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests_test.go @@ -0,0 +1,37 @@ +package tenantnetworks + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNetworks(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedNetworkSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "20c8acc0-f747-4d71-a389-46d078ebf000").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondNetwork, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go new file mode 100644 index 000000000..805009247 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go @@ -0,0 +1,68 @@ +package tenantnetworks + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// A Network represents a nova-network that an instance communicates on +type Network struct { + // CIDR is the IPv4 subnet. + CIDR string `mapstructure:"cidr"` + + // ID is the UUID of the network. + ID string `mapstructure:"id"` + + // Name is the common name that the network has. + Name string `mapstructure:"label"` +} + +// NetworkPage stores a single, only page of Networks +// results from a List call. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkPage is empty. +func (page NetworkPage) IsEmpty() (bool, error) { + va, err := ExtractNetworks(page) + return len(va) == 0, err +} + +// ExtractNetworks interprets a page of results as a slice of Networks +func ExtractNetworks(page pagination.Page) ([]Network, error) { + networks := page.(NetworkPage).Body + var res struct { + Networks []Network `mapstructure:"networks"` + } + + err := mapstructure.WeakDecode(networks, &res) + + return res.Networks, err +} + +type NetworkResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Network resource +// response as a Network struct. +func (r NetworkResult) Extract() (*Network, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *Network `json:"network" mapstructure:"network"` + } + + err := mapstructure.Decode(r.Body, &res) + return res.Network, err +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a Network. +type GetResult struct { + NetworkResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go new file mode 100644 index 000000000..2401a5d03 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go @@ -0,0 +1,17 @@ +package tenantnetworks + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-tenant-networks" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls_test.go new file mode 100644 index 000000000..39c464e9f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls_test.go @@ -0,0 +1,25 @@ +package tenantnetworks + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-tenant-networks", listURL(c)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-tenant-networks/"+id, getURL(c, id)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go new file mode 100644 index 000000000..22f68d80e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go @@ -0,0 +1,3 @@ +// Package volumeattach provides the ability to attach and detach volumes +// to instances +package volumeattach diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go new file mode 100644 index 000000000..b4ebedea8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go @@ -0,0 +1,75 @@ +package volumeattach + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of VolumeAttachments. +func List(client *gophercloud.ServiceClient, serverId string) pagination.Pager { + return pagination.NewPager(client, listURL(client, serverId), func(r pagination.PageResult) pagination.Page { + return VolumeAttachmentsPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notable, the +// CreateOpts struct in this package does. +type CreateOptsBuilder interface { + ToVolumeAttachmentCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies volume attachment creation or import parameters. +type CreateOpts struct { + // Device is the device that the volume will attach to the instance as. Omit for "auto" + Device string + + // VolumeID is the ID of the volume to attach to the instance + VolumeID string +} + +// ToVolumeAttachmentCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToVolumeAttachmentCreateMap() (map[string]interface{}, error) { + if opts.VolumeID == "" { + return nil, errors.New("Missing field required for volume attachment creation: VolumeID") + } + + volumeAttachment := make(map[string]interface{}) + volumeAttachment["volumeId"] = opts.VolumeID + if opts.Device != "" { + volumeAttachment["device"] = opts.Device + } + + return map[string]interface{}{"volumeAttachment": volumeAttachment}, nil +} + +// Create requests the creation of a new volume attachment on the server +func Create(client *gophercloud.ServiceClient, serverId string, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToVolumeAttachmentCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client, serverId), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Get returns public data about a previously created VolumeAttachment. +func Get(client *gophercloud.ServiceClient, serverId, aId string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, serverId, aId), &res.Body, nil) + return res +} + +// Delete requests the deletion of a previous stored VolumeAttachment from the server. +func Delete(client *gophercloud.ServiceClient, serverId, aId string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, serverId, aId), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests_test.go new file mode 100644 index 000000000..b0a765bef --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests_test.go @@ -0,0 +1,94 @@ +package volumeattach + +import ( + "testing" + + fixtures "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// FirstVolumeAttachment is the first result in ListOutput. +var FirstVolumeAttachment = VolumeAttachment{ + Device: "/dev/vdd", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f803", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f803", +} + +// SecondVolumeAttachment is the first result in ListOutput. +var SecondVolumeAttachment = VolumeAttachment{ + Device: "/dev/vdc", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", +} + +// ExpectedVolumeAttachmentSlide is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedVolumeAttachmentSlice = []VolumeAttachment{FirstVolumeAttachment, SecondVolumeAttachment} + +//CreatedVolumeAttachment is the parsed result from CreatedOutput. +var CreatedVolumeAttachment = VolumeAttachment{ + Device: "/dev/vdc", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleListSuccessfully(t) + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + count := 0 + err := List(client.ServiceClient(), serverId).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVolumeAttachments(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedVolumeAttachmentSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleCreateSuccessfully(t) + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + actual, err := Create(client.ServiceClient(), serverId, CreateOpts{ + Device: "/dev/vdc", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedVolumeAttachment, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleGetSuccessfully(t) + aId := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + actual, err := Get(client.ServiceClient(), serverId, aId).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondVolumeAttachment, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleDeleteSuccessfully(t) + aId := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + err := Delete(client.ServiceClient(), serverId, aId).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go new file mode 100644 index 000000000..26be39e4f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go @@ -0,0 +1,84 @@ +package volumeattach + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// VolumeAttach controls the attachment of a volume to an instance. +type VolumeAttachment struct { + // ID is a unique id of the attachment + ID string `mapstructure:"id"` + + // Device is what device the volume is attached as + Device string `mapstructure:"device"` + + // VolumeID is the ID of the attached volume + VolumeID string `mapstructure:"volumeId"` + + // ServerID is the ID of the instance that has the volume attached + ServerID string `mapstructure:"serverId"` +} + +// VolumeAttachmentsPage stores a single, only page of VolumeAttachments +// results from a List call. +type VolumeAttachmentsPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a VolumeAttachmentsPage is empty. +func (page VolumeAttachmentsPage) IsEmpty() (bool, error) { + va, err := ExtractVolumeAttachments(page) + return len(va) == 0, err +} + +// ExtractVolumeAttachments interprets a page of results as a slice of +// VolumeAttachments. +func ExtractVolumeAttachments(page pagination.Page) ([]VolumeAttachment, error) { + casted := page.(VolumeAttachmentsPage).Body + var response struct { + VolumeAttachments []VolumeAttachment `mapstructure:"volumeAttachments"` + } + + err := mapstructure.WeakDecode(casted, &response) + + return response.VolumeAttachments, err +} + +type VolumeAttachmentResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any VolumeAttachment resource +// response as a VolumeAttachment struct. +func (r VolumeAttachmentResult) Extract() (*VolumeAttachment, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + VolumeAttachment *VolumeAttachment `json:"volumeAttachment" mapstructure:"volumeAttachment"` + } + + err := mapstructure.Decode(r.Body, &res) + return res.VolumeAttachment, err +} + +// CreateResult is the response from a Create operation. Call its Extract method to interpret it +// as a VolumeAttachment. +type CreateResult struct { + VolumeAttachmentResult +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a VolumeAttachment. +type GetResult struct { + VolumeAttachmentResult +} + +// DeleteResult is the response from a Delete operation. Call its Extract method to determine if +// the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go new file mode 100644 index 000000000..183391a62 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go @@ -0,0 +1,7 @@ +/* +This is package created is to hold fixtures (which imports testing), +so that importing volumeattach package does not inadvertently import testing into production code +More information here: +https://github.com/rackspace/gophercloud/issues/473 +*/ +package testing diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go new file mode 100644 index 000000000..c469bfbc8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go @@ -0,0 +1,110 @@ +// +build fixtures + +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "volumeAttachments": [ + { + "device": "/dev/vdd", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" + }, + { + "device": "/dev/vdc", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "volumeAttachment": { + "device": "/dev/vdc", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" + } +} +` + +// CreateOutput is a sample response to a Create call. +const CreateOutput = ` +{ + "volumeAttachment": { + "device": "/dev/vdc", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" + } +} +` + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing attachment +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments/a26887c6-c47b-4654-abb5-dfadf7d3f804", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request +// for a new attachment +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "volumeAttachment": { + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "device": "/dev/vdc" + } +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// an existing attachment +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments/a26887c6-c47b-4654-abb5-dfadf7d3f804", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go new file mode 100644 index 000000000..9d9d1786d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go @@ -0,0 +1,25 @@ +package volumeattach + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-volume_attachments" + +func resourceURL(c *gophercloud.ServiceClient, serverId string) string { + return c.ServiceURL("servers", serverId, resourcePath) +} + +func listURL(c *gophercloud.ServiceClient, serverId string) string { + return resourceURL(c, serverId) +} + +func createURL(c *gophercloud.ServiceClient, serverId string) string { + return resourceURL(c, serverId) +} + +func getURL(c *gophercloud.ServiceClient, serverId, aId string) string { + return c.ServiceURL("servers", serverId, resourcePath, aId) +} + +func deleteURL(c *gophercloud.ServiceClient, serverId, aId string) string { + return getURL(c, serverId, aId) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls_test.go new file mode 100644 index 000000000..8ee0e42d4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls_test.go @@ -0,0 +1,46 @@ +package volumeattach + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + th.CheckEquals(t, c.Endpoint+"servers/"+serverId+"/os-volume_attachments", listURL(c, serverId)) +} + +func TestCreateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + th.CheckEquals(t, c.Endpoint+"servers/"+serverId+"/os-volume_attachments", createURL(c, serverId)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + aId := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + + th.CheckEquals(t, c.Endpoint+"servers/"+serverId+"/os-volume_attachments/"+aId, getURL(c, serverId, aId)) +} + +func TestDeleteURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + aId := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + + th.CheckEquals(t, c.Endpoint+"servers/"+serverId+"/os-volume_attachments/"+aId, deleteURL(c, serverId, aId)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go new file mode 100644 index 000000000..5822e1bcf --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go @@ -0,0 +1,7 @@ +// Package flavors provides information and interaction with the flavor API +// resource in the OpenStack Compute service. +// +// A flavor is an available hardware configuration for a server. Each flavor +// has a unique combination of disk space, memory capacity and priority for CPU +// time. +package flavors diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go new file mode 100644 index 000000000..59123aaf7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go @@ -0,0 +1,103 @@ +package flavors + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFlavorListQuery() (string, error) +} + +// ListOpts helps control the results returned by the List() function. +// For example, a flavor with a minDisk field of 10 will not be returned if you specify MinDisk set to 20. +// Typically, software will use the last ID of the previous call to List to set the Marker for the current call. +type ListOpts struct { + + // ChangesSince, if provided, instructs List to return only those things which have changed since the timestamp provided. + ChangesSince string `q:"changes-since"` + + // MinDisk and MinRAM, if provided, elides flavors which do not meet your criteria. + MinDisk int `q:"minDisk"` + MinRAM int `q:"minRam"` + + // Marker and Limit control paging. + // Marker instructs List where to start listing from. + Marker string `q:"marker"` + + // Limit instructs List to refrain from sending excessively large lists of flavors. + Limit int `q:"limit"` +} + +// ToFlavorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFlavorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// ListDetail instructs OpenStack to provide a list of flavors. +// You may provide criteria by which List curtails its results for easier processing. +// See ListOpts for more details. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToFlavorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + createPage := func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, url, createPage) +} + +// Get instructs OpenStack to provide details on a single flavor, identified by its ID. +// Use ExtractFlavor to convert its result into a Flavor. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} + +// IDFromName is a convienience function that returns a flavor's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + flavorCount := 0 + flavorID := "" + if name == "" { + return "", fmt.Errorf("A flavor name must be provided.") + } + pager := ListDetail(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + flavorList, err := ExtractFlavors(page) + if err != nil { + return false, err + } + + for _, f := range flavorList { + if f.Name == name { + flavorCount++ + flavorID = f.ID + } + } + return true, nil + }) + + switch flavorCount { + case 0: + return "", fmt.Errorf("Unable to find flavor: %s", name) + case 1: + return flavorID, nil + default: + return "", fmt.Errorf("Found %d flavors matching %s", flavorCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go new file mode 100644 index 000000000..fbd7c3314 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go @@ -0,0 +1,129 @@ +package flavors + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +const tokenID = "blerb" + +func TestListFlavors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "flavors": [ + { + "id": "1", + "name": "m1.tiny", + "disk": 1, + "ram": 512, + "vcpus": 1 + }, + { + "id": "2", + "name": "m2.small", + "disk": 10, + "ram": 1024, + "vcpus": 2 + } + ], + "flavors_links": [ + { + "href": "%s/flavors/detail?marker=2", + "rel": "next" + } + ] + } + `, th.Server.URL) + case "2": + fmt.Fprintf(w, `{ "flavors": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) + + pages := 0 + err := ListDetail(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractFlavors(page) + if err != nil { + return false, err + } + + expected := []Flavor{ + Flavor{ID: "1", Name: "m1.tiny", Disk: 1, RAM: 512, VCPUs: 1}, + Flavor{ID: "2", Name: "m2.small", Disk: 10, RAM: 1024, VCPUs: 2}, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } + + return true, nil + }) + if err != nil { + t.Fatal(err) + } + if pages != 1 { + t.Errorf("Expected one page, got %d", pages) + } +} + +func TestGetFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/12345", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "flavor": { + "id": "1", + "name": "m1.tiny", + "disk": 1, + "ram": 512, + "vcpus": 1, + "rxtx_factor": 1 + } + } + `) + }) + + actual, err := Get(fake.ServiceClient(), "12345").Extract() + if err != nil { + t.Fatalf("Unable to get flavor: %v", err) + } + + expected := &Flavor{ + ID: "1", + Name: "m1.tiny", + Disk: 1, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1, + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go new file mode 100644 index 000000000..8dddd705c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go @@ -0,0 +1,122 @@ +package flavors + +import ( + "errors" + "reflect" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ErrCannotInterpret is returned by an Extract call if the response body doesn't have the expected structure. +var ErrCannotInterpet = errors.New("Unable to interpret a response body.") + +// GetResult temporarily holds the response from a Get call. +type GetResult struct { + gophercloud.Result +} + +// Extract provides access to the individual Flavor returned by the Get function. +func (gr GetResult) Extract() (*Flavor, error) { + if gr.Err != nil { + return nil, gr.Err + } + + var result struct { + Flavor Flavor `mapstructure:"flavor"` + } + + cfg := &mapstructure.DecoderConfig{ + DecodeHook: defaulter, + Result: &result, + } + decoder, err := mapstructure.NewDecoder(cfg) + if err != nil { + return nil, err + } + err = decoder.Decode(gr.Body) + return &result.Flavor, err +} + +// Flavor records represent (virtual) hardware configurations for server resources in a region. +type Flavor struct { + // The Id field contains the flavor's unique identifier. + // For example, this identifier will be useful when specifying which hardware configuration to use for a new server instance. + ID string `mapstructure:"id"` + + // The Disk and RA< fields provide a measure of storage space offered by the flavor, in GB and MB, respectively. + Disk int `mapstructure:"disk"` + RAM int `mapstructure:"ram"` + + // The Name field provides a human-readable moniker for the flavor. + Name string `mapstructure:"name"` + + RxTxFactor float64 `mapstructure:"rxtx_factor"` + + // Swap indicates how much space is reserved for swap. + // If not provided, this field will be set to 0. + Swap int `mapstructure:"swap"` + + // VCPUs indicates how many (virtual) CPUs are available for this flavor. + VCPUs int `mapstructure:"vcpus"` +} + +// FlavorPage contains a single page of the response from a List call. +type FlavorPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a page contains any results. +func (p FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(p) + if err != nil { + return true, err + } + return len(flavors) == 0, nil +} + +// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +func (p FlavorPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"flavors_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +func defaulter(from, to reflect.Kind, v interface{}) (interface{}, error) { + if (from == reflect.String) && (to == reflect.Int) { + return 0, nil + } + return v, nil +} + +// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation. +func ExtractFlavors(page pagination.Page) ([]Flavor, error) { + casted := page.(FlavorPage).Body + var container struct { + Flavors []Flavor `mapstructure:"flavors"` + } + + cfg := &mapstructure.DecoderConfig{ + DecodeHook: defaulter, + Result: &container, + } + decoder, err := mapstructure.NewDecoder(cfg) + if err != nil { + return container.Flavors, err + } + err = decoder.Decode(casted) + if err != nil { + return container.Flavors, err + } + + return container.Flavors, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go new file mode 100644 index 000000000..683c107dc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go @@ -0,0 +1,13 @@ +package flavors + +import ( + "github.com/rackspace/gophercloud" +) + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors", "detail") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go new file mode 100644 index 000000000..069da2496 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go @@ -0,0 +1,26 @@ +package flavors + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "flavors/foo" + th.CheckEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + "flavors/detail" + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go new file mode 100644 index 000000000..0edaa3f02 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go @@ -0,0 +1,7 @@ +// Package images provides information and interaction with the image API +// resource in the OpenStack Compute service. +// +// An image is a collection of files used to create or rebuild a server. +// Operators provide a number of pre-built OS images by default. You may also +// create custom images from cloud servers you have launched. +package images diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go new file mode 100644 index 000000000..1e021ad4c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go @@ -0,0 +1,109 @@ +package images + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts contain options for limiting the number of Images returned from a call to ListDetail. +type ListOpts struct { + // When the image last changed status (in date-time format). + ChangesSince string `q:"changes-since"` + // The number of Images to return. + Limit int `q:"limit"` + // UUID of the Image at which to set a marker. + Marker string `q:"marker"` + // The name of the Image. + Name string `q:"name"` + // The name of the Server (in URL format). + Server string `q:"server"` + // The current status of the Image. + Status string `q:"status"` + // The value of the type of image (e.g. BASE, SERVER, ALL) + Type string `q:"type"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// ListDetail enumerates the available images. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPage := func(r pagination.PageResult) pagination.Page { + return ImagePage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, url, createPage) +} + +// Get acquires additional detail about a specific image by ID. +// Use ExtractImage() to interpret the result as an openstack Image. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var result GetResult + _, result.Err = client.Get(getURL(client, id), &result.Body, nil) + return result +} + +// Delete deletes the specified image ID. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var result DeleteResult + _, result.Err = client.Delete(deleteURL(client, id), nil) + return result +} + +// IDFromName is a convienience function that returns an image's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + imageCount := 0 + imageID := "" + if name == "" { + return "", fmt.Errorf("An image name must be provided.") + } + pager := ListDetail(client, &ListOpts{ + Name: name, + }) + pager.EachPage(func(page pagination.Page) (bool, error) { + imageList, err := ExtractImages(page) + if err != nil { + return false, err + } + + for _, i := range imageList { + if i.Name == name { + imageCount++ + imageID = i.ID + } + } + return true, nil + }) + + switch imageCount { + case 0: + return "", fmt.Errorf("Unable to find image: %s", name) + case 1: + return imageID, nil + default: + return "", fmt.Errorf("Found %d images matching %s", imageCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go new file mode 100644 index 000000000..93a97bdc6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go @@ -0,0 +1,191 @@ +package images + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListImages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "images": [ + { + "status": "ACTIVE", + "updated": "2014-09-23T12:54:56Z", + "id": "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + "OS-EXT-IMG-SIZE:size": 476704768, + "name": "F17-x86_64-cfntools", + "created": "2014-09-23T12:54:52Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2014-09-23T12:51:43Z", + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "OS-EXT-IMG-SIZE:size": 13167616, + "name": "cirros-0.3.2-x86_64-disk", + "created": "2014-09-23T12:51:42Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "metadata": {} + } + ] + } + `) + case "2": + fmt.Fprintf(w, `{ "images": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) + + pages := 0 + options := &ListOpts{Limit: 2} + err := ListDetail(fake.ServiceClient(), options).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractImages(page) + if err != nil { + return false, err + } + + expected := []Image{ + Image{ + ID: "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + Name: "F17-x86_64-cfntools", + Created: "2014-09-23T12:54:52Z", + Updated: "2014-09-23T12:54:56Z", + MinDisk: 0, + MinRAM: 0, + Progress: 100, + Status: "ACTIVE", + }, + Image{ + ID: "f90f6034-2570-4974-8351-6b49732ef2eb", + Name: "cirros-0.3.2-x86_64-disk", + Created: "2014-09-23T12:51:42Z", + Updated: "2014-09-23T12:51:43Z", + MinDisk: 0, + MinRAM: 0, + Progress: 100, + Status: "ACTIVE", + }, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Unexpected page contents: expected %#v, got %#v", expected, actual) + } + + return false, nil + }) + + if err != nil { + t.Fatalf("EachPage error: %v", err) + } + if pages != 1 { + t.Errorf("Expected one page, got %d", pages) + } +} + +func TestGetImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/12345678", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "image": { + "status": "ACTIVE", + "updated": "2014-09-23T12:54:56Z", + "id": "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + "OS-EXT-IMG-SIZE:size": 476704768, + "name": "F17-x86_64-cfntools", + "created": "2014-09-23T12:54:52Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "metadata": {} + } + } + `) + }) + + actual, err := Get(fake.ServiceClient(), "12345678").Extract() + if err != nil { + t.Fatalf("Unexpected error from Get: %v", err) + } + + expected := &Image{ + Status: "ACTIVE", + Updated: "2014-09-23T12:54:56Z", + ID: "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + Name: "F17-x86_64-cfntools", + Created: "2014-09-23T12:54:52Z", + MinDisk: 0, + Progress: 100, + MinRAM: 0, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but got %#v", expected, actual) + } +} + +func TestNextPageURL(t *testing.T) { + var page ImagePage + var body map[string]interface{} + bodyString := []byte(`{"images":{"links":[{"href":"http://192.154.23.87/12345/images/image3","rel":"bookmark"}]}, "images_links":[{"href":"http://192.154.23.87/12345/images/image4","rel":"next"}]}`) + err := json.Unmarshal(bodyString, &body) + if err != nil { + t.Fatalf("Error unmarshaling data into page body: %v", err) + } + page.Body = body + + expected := "http://192.154.23.87/12345/images/image4" + actual, err := page.NextPageURL() + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) +} + +// Test Image delete +func TestDeleteImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/12345678", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "12345678") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go new file mode 100644 index 000000000..40e814d1d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go @@ -0,0 +1,95 @@ +package images + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// GetResult temporarily stores a Get response. +type GetResult struct { + gophercloud.Result +} + +// DeleteResult represents the result of an image.Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract interprets a GetResult as an Image. +func (gr GetResult) Extract() (*Image, error) { + if gr.Err != nil { + return nil, gr.Err + } + + var decoded struct { + Image Image `mapstructure:"image"` + } + + err := mapstructure.Decode(gr.Body, &decoded) + return &decoded.Image, err +} + +// Image is used for JSON (un)marshalling. +// It provides a description of an OS image. +type Image struct { + // ID contains the image's unique identifier. + ID string + + Created string + + // MinDisk and MinRAM specify the minimum resources a server must provide to be able to install the image. + MinDisk int + MinRAM int + + // Name provides a human-readable moniker for the OS image. + Name string + + // The Progress and Status fields indicate image-creation status. + // Any usable image will have 100% progress. + Progress int + Status string + + Updated string +} + +// ImagePage contains a single page of results from a List operation. +// Use ExtractImages to convert it into a slice of usable structs. +type ImagePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a page contains no Image results. +func (page ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(page) + if err != nil { + return true, err + } + return len(images) == 0, nil +} + +// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +func (page ImagePage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"images_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractImages converts a page of List results into a slice of usable Image structs. +func ExtractImages(page pagination.Page) ([]Image, error) { + casted := page.(ImagePage).Body + var results struct { + Images []Image `mapstructure:"images"` + } + + err := mapstructure.Decode(casted, &results) + return results.Images, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go new file mode 100644 index 000000000..b1bf1038f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go @@ -0,0 +1,15 @@ +package images + +import "github.com/rackspace/gophercloud" + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("images", "detail") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go new file mode 100644 index 000000000..b1ab3d679 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go @@ -0,0 +1,26 @@ +package images + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "images/foo" + th.CheckEquals(t, expected, actual) +} + +func TestListDetailURL(t *testing.T) { + actual := listDetailURL(endpointClient()) + expected := endpoint + "images/detail" + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go new file mode 100644 index 000000000..fe4567120 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go @@ -0,0 +1,6 @@ +// Package servers provides information and interaction with the server API +// resource in the OpenStack Compute service. +// +// A server is a virtual machine instance in the compute system. In order for +// one to be provisioned, a valid flavor and image are required. +package servers diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go new file mode 100644 index 000000000..4339a16d4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go @@ -0,0 +1,664 @@ +// +build fixtures + +package servers + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ServerListBody contains the canned body of a servers.List response. +const ServerListBody = ` +{ + "servers": [ + { + "status": "ACTIVE", + "updated": "2014-09-25T13:10:10Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", + "version": 4, + "addr": "10.0.0.32", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001e", + "OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "herp", + "created": "2014-09-25T13:10:02Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "derp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + } + ] +} +` + +// SingleServerBody is the canned body of a Get request on an existing server. +const SingleServerBody = ` +{ + "server": { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "derp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + } +} +` + +var ( + // ServerHerp is a Server struct that should correspond to the first result in ServerListBody. + ServerHerp = Server{ + Status: "ACTIVE", + Updated: "2014-09-25T13:10:10Z", + HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", + "version": float64(4), + "addr": "10.0.0.32", + "OS-EXT-IPS:type": "fixed", + }, + }, + }, + Links: []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "self", + }, + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "bookmark", + }, + }, + Image: map[string]interface{}{ + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "1", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark", + }, + }, + }, + ID: "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + UserID: "9349aff8be7545ac9d2f1d00999a23cd", + Name: "herp", + Created: "2014-09-25T13:10:02Z", + TenantID: "fcad67a6189847c4aecfa3c81a05783b", + Metadata: map[string]interface{}{}, + SecurityGroups: []map[string]interface{}{ + map[string]interface{}{ + "name": "default", + }, + }, + } + + // ServerDerp is a Server struct that should correspond to the second server in ServerListBody. + ServerDerp = Server{ + Status: "ACTIVE", + Updated: "2014-09-25T13:04:49Z", + HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": float64(4), + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed", + }, + }, + }, + Links: []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self", + }, + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark", + }, + }, + Image: map[string]interface{}{ + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "1", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark", + }, + }, + }, + ID: "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + UserID: "9349aff8be7545ac9d2f1d00999a23cd", + Name: "derp", + Created: "2014-09-25T13:04:41Z", + TenantID: "fcad67a6189847c4aecfa3c81a05783b", + Metadata: map[string]interface{}{}, + SecurityGroups: []map[string]interface{}{ + map[string]interface{}{ + "name": "default", + }, + }, + } +) + +// HandleServerCreationSuccessfully sets up the test server to respond to a server creation request +// with a given response. +func HandleServerCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "server": { + "name": "derp", + "imageRef": "f90f6034-2570-4974-8351-6b49732ef2eb", + "flavorRef": "1" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleServerListSuccessfully sets up the test server to respond to a server List request. +func HandleServerListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ServerListBody) + case "9e5476bd-a4ec-4653-93d6-72c93aa682ba": + fmt.Fprintf(w, `{ "servers": [] }`) + default: + t.Fatalf("/servers/detail invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleServerDeletionSuccessfully sets up the test server to respond to a server deletion request. +func HandleServerDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleServerGetSuccessfully sets up the test server to respond to a server Get request. +func HandleServerGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleServerBody) + }) +} + +// HandleServerUpdateSuccessfully sets up the test server to respond to a server Update request. +func HandleServerUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ "server": { "name": "new-name" } }`) + + fmt.Fprintf(w, SingleServerBody) + }) +} + +// HandleAdminPasswordChangeSuccessfully sets up the test server to respond to a server password +// change request. +func HandleAdminPasswordChangeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "changePassword": { "adminPass": "new-password" } }`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleRebootSuccessfully sets up the test server to respond to a reboot request with success. +func HandleRebootSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "reboot": { "type": "SOFT" } }`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleRebuildSuccessfully sets up the test server to respond to a rebuild request with success. +func HandleRebuildSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "rebuild": { + "name": "new-name", + "adminPass": "swordfish", + "imageRef": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "accessIPv4": "1.2.3.4" + } + } + `) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleServerRescueSuccessfully sets up the test server to respond to a server Rescue request. +func HandleServerRescueSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "rescue": { "adminPass": "1234567890" } }`) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ "adminPass": "1234567890" }`)) + }) +} + +// HandleMetadatumGetSuccessfully sets up the test server to respond to a metadatum Get request. +func HandleMetadatumGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "meta": {"foo":"bar"}}`)) + }) +} + +// HandleMetadatumCreateSuccessfully sets up the test server to respond to a metadatum Create request. +func HandleMetadatumCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "meta": { + "foo": "bar" + } + }`) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "meta": {"foo":"bar"}}`)) + }) +} + +// HandleMetadatumDeleteSuccessfully sets up the test server to respond to a metadatum Delete request. +func HandleMetadatumDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleMetadataGetSuccessfully sets up the test server to respond to a metadata Get request. +func HandleMetadataGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ "metadata": {"foo":"bar", "this":"that"}}`)) + }) +} + +// HandleMetadataResetSuccessfully sets up the test server to respond to a metadata Create request. +func HandleMetadataResetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "metadata": { + "foo": "bar", + "this": "that" + } + }`) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "metadata": {"foo":"bar", "this":"that"}}`)) + }) +} + +// HandleMetadataUpdateSuccessfully sets up the test server to respond to a metadata Update request. +func HandleMetadataUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "metadata": { + "foo": "baz", + "this": "those" + } + }`) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "metadata": {"foo":"baz", "this":"those"}}`)) + }) +} + +// ListAddressesExpected represents an expected repsonse from a ListAddresses request. +var ListAddressesExpected = map[string][]Address{ + "public": []Address{ + Address{ + Version: 4, + Address: "80.56.136.39", + }, + Address{ + Version: 6, + Address: "2001:4800:790e:510:be76:4eff:fe04:82a8", + }, + }, + "private": []Address{ + Address{ + Version: 4, + Address: "10.880.3.154", + }, + }, +} + +// HandleAddressListSuccessfully sets up the test server to respond to a ListAddresses request. +func HandleAddressListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf/ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "addresses": { + "public": [ + { + "version": 4, + "addr": "50.56.176.35" + }, + { + "version": 6, + "addr": "2001:4800:780e:510:be76:4eff:fe04:84a8" + } + ], + "private": [ + { + "version": 4, + "addr": "10.180.3.155" + } + ] + } + }`) + }) +} + +// ListNetworkAddressesExpected represents an expected repsonse from a ListAddressesByNetwork request. +var ListNetworkAddressesExpected = []Address{ + Address{ + Version: 4, + Address: "50.56.176.35", + }, + Address{ + Version: 6, + Address: "2001:4800:780e:510:be76:4eff:fe04:84a8", + }, +} + +// HandleNetworkAddressListSuccessfully sets up the test server to respond to a ListAddressesByNetwork request. +func HandleNetworkAddressListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf/ips/public", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "public": [ + { + "version": 4, + "addr": "50.56.176.35" + }, + { + "version": 6, + "addr": "2001:4800:780e:510:be76:4eff:fe04:84a8" + } + ] + }`) + }) +} + +// HandleCreateServerImageSuccessfully sets up the test server to respond to a TestCreateServerImage request. +func HandleCreateServerImageSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/serverimage/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.Header().Add("Location", "https://0.0.0.0/images/xxxx-xxxxx-xxxxx-xxxx") + w.WriteHeader(http.StatusAccepted) + }) +} + diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go new file mode 100644 index 000000000..f9839d9f2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go @@ -0,0 +1,852 @@ +package servers + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" + "github.com/rackspace/gophercloud/openstack/compute/v2/images" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToServerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +type ListOpts struct { + // A time/date stamp for when the server last changed status. + ChangesSince string `q:"changes-since"` + + // Name of the image in URL format. + Image string `q:"image"` + + // Name of the flavor in URL format. + Flavor string `q:"flavor"` + + // Name of the server as a string; can be queried with regular expressions. + // Realize that ?name=bob returns both bob and bobb. If you need to match bob + // only, you can use a regular expression matching the syntax of the + // underlying database server implemented for Compute. + Name string `q:"name"` + + // Value of the status of the server so that you can filter on "ACTIVE" for example. + Status string `q:"status"` + + // Name of the host as a string. + Host string `q:"host"` + + // UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // Bool to show all tenants + AllTenants bool `q:"all_tenants"` +} + +// ToServerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List makes a request against the API to list servers accessible to you. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + + if opts != nil { + query, err := opts.ToServerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPageFn := func(r pagination.PageResult) pagination.Page { + return ServerPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, url, createPageFn) +} + +// CreateOptsBuilder describes struct types that can be accepted by the Create call. +// The CreateOpts struct in this package does. +type CreateOptsBuilder interface { + ToServerCreateMap() (map[string]interface{}, error) +} + +// Network is used within CreateOpts to control a new server's network attachments. +type Network struct { + // UUID of a nova-network to attach to the newly provisioned server. + // Required unless Port is provided. + UUID string + + // Port of a neutron network to attach to the newly provisioned server. + // Required unless UUID is provided. + Port string + + // FixedIP [optional] specifies a fixed IPv4 address to be used on this network. + FixedIP string +} + +// Personality is an array of files that are injected into the server at launch. +type Personality []*File + +// File is used within CreateOpts and RebuildOpts to inject a file into the server at launch. +// File implements the json.Marshaler interface, so when a Create or Rebuild operation is requested, +// json.Marshal will call File's MarshalJSON method. +type File struct { + // Path of the file + Path string + // Contents of the file. Maximum content size is 255 bytes. + Contents []byte +} + +// MarshalJSON marshals the escaped file, base64 encoding the contents. +func (f *File) MarshalJSON() ([]byte, error) { + file := struct { + Path string `json:"path"` + Contents string `json:"contents"` + }{ + Path: f.Path, + Contents: base64.StdEncoding.EncodeToString(f.Contents), + } + return json.Marshal(file) +} + +// CreateOpts specifies server creation parameters. +type CreateOpts struct { + // Name [required] is the name to assign to the newly launched server. + Name string + + // ImageRef [optional; required if ImageName is not provided] is the ID or full + // URL to the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageRef string + + // ImageName [optional; required if ImageRef is not provided] is the name of the + // image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageName string + + // FlavorRef [optional; required if FlavorName is not provided] is the ID or + // full URL to the flavor that describes the server's specs. + FlavorRef string + + // FlavorName [optional; required if FlavorRef is not provided] is the name of + // the flavor that describes the server's specs. + FlavorName string + + // SecurityGroups [optional] lists the names of the security groups to which this server should belong. + SecurityGroups []string + + // UserData [optional] contains configuration information or scripts to use upon launch. + // Create will base64-encode it for you. + UserData []byte + + // AvailabilityZone [optional] in which to launch the server. + AvailabilityZone string + + // Networks [optional] dictates how this server will be attached to available networks. + // By default, the server will be attached to all isolated networks for the tenant. + Networks []Network + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. + Metadata map[string]string + + // Personality [optional] includes files to inject into the server at launch. + // Create will base64-encode file contents for you. + Personality Personality + + // ConfigDrive [optional] enables metadata injection through a configuration drive. + ConfigDrive bool + + // AdminPass [optional] sets the root user password. If not set, a randomly-generated + // password will be created and returned in the response. + AdminPass string + + // AccessIPv4 [optional] specifies an IPv4 address for the instance. + AccessIPv4 string + + // AccessIPv6 [optional] specifies an IPv6 address for the instance. + AccessIPv6 string +} + +// ToServerCreateMap assembles a request body based on the contents of a CreateOpts. +func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { + server := make(map[string]interface{}) + + server["name"] = opts.Name + server["imageRef"] = opts.ImageRef + server["imageName"] = opts.ImageName + server["flavorRef"] = opts.FlavorRef + server["flavorName"] = opts.FlavorName + + if opts.UserData != nil { + encoded := base64.StdEncoding.EncodeToString(opts.UserData) + server["user_data"] = &encoded + } + if opts.ConfigDrive { + server["config_drive"] = "true" + } + if opts.AvailabilityZone != "" { + server["availability_zone"] = opts.AvailabilityZone + } + if opts.Metadata != nil { + server["metadata"] = opts.Metadata + } + if opts.AdminPass != "" { + server["adminPass"] = opts.AdminPass + } + if opts.AccessIPv4 != "" { + server["accessIPv4"] = opts.AccessIPv4 + } + if opts.AccessIPv6 != "" { + server["accessIPv6"] = opts.AccessIPv6 + } + + if len(opts.SecurityGroups) > 0 { + securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups)) + for i, groupName := range opts.SecurityGroups { + securityGroups[i] = map[string]interface{}{"name": groupName} + } + server["security_groups"] = securityGroups + } + + if len(opts.Networks) > 0 { + networks := make([]map[string]interface{}, len(opts.Networks)) + for i, net := range opts.Networks { + networks[i] = make(map[string]interface{}) + if net.UUID != "" { + networks[i]["uuid"] = net.UUID + } + if net.Port != "" { + networks[i]["port"] = net.Port + } + if net.FixedIP != "" { + networks[i]["fixed_ip"] = net.FixedIP + } + } + server["networks"] = networks + } + + if len(opts.Personality) > 0 { + server["personality"] = opts.Personality + } + + return map[string]interface{}{"server": server}, nil +} + +// Create requests a server to be provisioned to the user in the current tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToServerCreateMap() + if err != nil { + res.Err = err + return res + } + + // If ImageRef isn't provided, use ImageName to ascertain the image ID. + if reqBody["server"].(map[string]interface{})["imageRef"].(string) == "" { + imageName := reqBody["server"].(map[string]interface{})["imageName"].(string) + if imageName == "" { + res.Err = errors.New("One and only one of ImageRef and ImageName must be provided.") + return res + } + imageID, err := images.IDFromName(client, imageName) + if err != nil { + res.Err = err + return res + } + reqBody["server"].(map[string]interface{})["imageRef"] = imageID + } + delete(reqBody["server"].(map[string]interface{}), "imageName") + + // If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID. + if reqBody["server"].(map[string]interface{})["flavorRef"].(string) == "" { + flavorName := reqBody["server"].(map[string]interface{})["flavorName"].(string) + if flavorName == "" { + res.Err = errors.New("One and only one of FlavorRef and FlavorName must be provided.") + return res + } + flavorID, err := flavors.IDFromName(client, flavorName) + if err != nil { + res.Err = err + return res + } + reqBody["server"].(map[string]interface{})["flavorRef"] = flavorID + } + delete(reqBody["server"].(map[string]interface{}), "flavorName") + + _, res.Err = client.Post(listURL(client), reqBody, &res.Body, nil) + return res +} + +// Delete requests that a server previously provisioned be removed from your account. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, id), nil) + return res +} + +// Get requests details on a single server, by ID. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var result GetResult + _, result.Err = client.Get(getURL(client, id), &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return result +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the Update request. +type UpdateOptsBuilder interface { + ToServerUpdateMap() map[string]interface{} +} + +// UpdateOpts specifies the base attributes that may be updated on an existing server. +type UpdateOpts struct { + // Name [optional] changes the displayed name of the server. + // The server host name will *not* change. + // Server names are not constrained to be unique, even within the same tenant. + Name string + + // AccessIPv4 [optional] provides a new IPv4 address for the instance. + AccessIPv4 string + + // AccessIPv6 [optional] provides a new IPv6 address for the instance. + AccessIPv6 string +} + +// ToServerUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToServerUpdateMap() map[string]interface{} { + server := make(map[string]string) + if opts.Name != "" { + server["name"] = opts.Name + } + if opts.AccessIPv4 != "" { + server["accessIPv4"] = opts.AccessIPv4 + } + if opts.AccessIPv6 != "" { + server["accessIPv6"] = opts.AccessIPv6 + } + return map[string]interface{}{"server": server} +} + +// Update requests that various attributes of the indicated server be changed. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var result UpdateResult + reqBody := opts.ToServerUpdateMap() + _, result.Err = client.Put(updateURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return result +} + +// ChangeAdminPassword alters the administrator or root password for a specified server. +func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) ActionResult { + var req struct { + ChangePassword struct { + AdminPass string `json:"adminPass"` + } `json:"changePassword"` + } + + req.ChangePassword.AdminPass = newPassword + + var res ActionResult + _, res.Err = client.Post(actionURL(client, id), req, nil, nil) + return res +} + +// ErrArgument errors occur when an argument supplied to a package function +// fails to fall within acceptable values. For example, the Reboot() function +// expects the "how" parameter to be one of HardReboot or SoftReboot. These +// constants are (currently) strings, leading someone to wonder if they can pass +// other string values instead, perhaps in an effort to break the API of their +// provider. Reboot() returns this error in this situation. +// +// Function identifies which function was called/which function is generating +// the error. +// Argument identifies which formal argument was responsible for producing the +// error. +// Value provides the value as it was passed into the function. +type ErrArgument struct { + Function, Argument string + Value interface{} +} + +// Error yields a useful diagnostic for debugging purposes. +func (e *ErrArgument) Error() string { + return fmt.Sprintf("Bad argument in call to %s, formal parameter %s, value %#v", e.Function, e.Argument, e.Value) +} + +func (e *ErrArgument) String() string { + return e.Error() +} + +// RebootMethod describes the mechanisms by which a server reboot can be requested. +type RebootMethod string + +// These constants determine how a server should be rebooted. +// See the Reboot() function for further details. +const ( + SoftReboot RebootMethod = "SOFT" + HardReboot RebootMethod = "HARD" + OSReboot = SoftReboot + PowerCycle = HardReboot +) + +// Reboot requests that a given server reboot. +// Two methods exist for rebooting a server: +// +// HardReboot (aka PowerCycle) restarts the server instance by physically cutting power to the machine, or if a VM, +// terminating it at the hypervisor level. +// It's done. Caput. Full stop. +// Then, after a brief while, power is restored or the VM instance restarted. +// +// SoftReboot (aka OSReboot) simply tells the OS to restart under its own procedures. +// E.g., in Linux, asking it to enter runlevel 6, or executing "sudo shutdown -r now", or by asking Windows to restart the machine. +func Reboot(client *gophercloud.ServiceClient, id string, how RebootMethod) ActionResult { + var res ActionResult + + if (how != SoftReboot) && (how != HardReboot) { + res.Err = &ErrArgument{ + Function: "Reboot", + Argument: "how", + Value: how, + } + return res + } + + reqBody := struct { + C map[string]string `json:"reboot"` + }{ + map[string]string{"type": string(how)}, + } + + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) + return res +} + +// RebuildOptsBuilder is an interface that allows extensions to override the +// default behaviour of rebuild options +type RebuildOptsBuilder interface { + ToServerRebuildMap() (map[string]interface{}, error) +} + +// RebuildOpts represents the configuration options used in a server rebuild +// operation +type RebuildOpts struct { + // Required. The ID of the image you want your server to be provisioned on + ImageID string + + // Name to set the server to + Name string + + // Required. The server's admin password + AdminPass string + + // AccessIPv4 [optional] provides a new IPv4 address for the instance. + AccessIPv4 string + + // AccessIPv6 [optional] provides a new IPv6 address for the instance. + AccessIPv6 string + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. + Metadata map[string]string + + // Personality [optional] includes files to inject into the server at launch. + // Rebuild will base64-encode file contents for you. + Personality Personality +} + +// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON +func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { + var err error + server := make(map[string]interface{}) + + if opts.AdminPass == "" { + err = fmt.Errorf("AdminPass is required") + } + + if opts.ImageID == "" { + err = fmt.Errorf("ImageID is required") + } + + if err != nil { + return server, err + } + + server["name"] = opts.Name + server["adminPass"] = opts.AdminPass + server["imageRef"] = opts.ImageID + + if opts.AccessIPv4 != "" { + server["accessIPv4"] = opts.AccessIPv4 + } + + if opts.AccessIPv6 != "" { + server["accessIPv6"] = opts.AccessIPv6 + } + + if opts.Metadata != nil { + server["metadata"] = opts.Metadata + } + + if len(opts.Personality) > 0 { + server["personality"] = opts.Personality + } + + return map[string]interface{}{"rebuild": server}, nil +} + +// Rebuild will reprovision the server according to the configuration options +// provided in the RebuildOpts struct. +func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) RebuildResult { + var result RebuildResult + + if id == "" { + result.Err = fmt.Errorf("ID is required") + return result + } + + reqBody, err := opts.ToServerRebuildMap() + if err != nil { + result.Err = err + return result + } + + _, result.Err = client.Post(actionURL(client, id), reqBody, &result.Body, nil) + return result +} + +// ResizeOptsBuilder is an interface that allows extensions to override the default structure of +// a Resize request. +type ResizeOptsBuilder interface { + ToServerResizeMap() (map[string]interface{}, error) +} + +// ResizeOpts represents the configuration options used to control a Resize operation. +type ResizeOpts struct { + // FlavorRef is the ID of the flavor you wish your server to become. + FlavorRef string +} + +// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON request body for the +// Resize request. +func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { + resize := map[string]interface{}{ + "flavorRef": opts.FlavorRef, + } + + return map[string]interface{}{"resize": resize}, nil +} + +// Resize instructs the provider to change the flavor of the server. +// Note that this implies rebuilding it. +// Unfortunately, one cannot pass rebuild parameters to the resize function. +// When the resize completes, the server will be in RESIZE_VERIFY state. +// While in this state, you can explore the use of the new server's configuration. +// If you like it, call ConfirmResize() to commit the resize permanently. +// Otherwise, call RevertResize() to restore the old configuration. +func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) ActionResult { + var res ActionResult + reqBody, err := opts.ToServerResizeMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) + return res +} + +// ConfirmResize confirms a previous resize operation on a server. +// See Resize() for more details. +func ConfirmResize(client *gophercloud.ServiceClient, id string) ActionResult { + var res ActionResult + + reqBody := map[string]interface{}{"confirmResize": nil} + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202, 204}, + }) + return res +} + +// RevertResize cancels a previous resize operation on a server. +// See Resize() for more details. +func RevertResize(client *gophercloud.ServiceClient, id string) ActionResult { + var res ActionResult + reqBody := map[string]interface{}{"revertResize": nil} + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) + return res +} + +// RescueOptsBuilder is an interface that allows extensions to override the +// default structure of a Rescue request. +type RescueOptsBuilder interface { + ToServerRescueMap() (map[string]interface{}, error) +} + +// RescueOpts represents the configuration options used to control a Rescue +// option. +type RescueOpts struct { + // AdminPass is the desired administrative password for the instance in + // RESCUE mode. If it's left blank, the server will generate a password. + AdminPass string +} + +// ToServerRescueMap formats a RescueOpts as a map that can be used as a JSON +// request body for the Rescue request. +func (opts RescueOpts) ToServerRescueMap() (map[string]interface{}, error) { + server := make(map[string]interface{}) + if opts.AdminPass != "" { + server["adminPass"] = opts.AdminPass + } + return map[string]interface{}{"rescue": server}, nil +} + +// Rescue instructs the provider to place the server into RESCUE mode. +func Rescue(client *gophercloud.ServiceClient, id string, opts RescueOptsBuilder) RescueResult { + var result RescueResult + + if id == "" { + result.Err = fmt.Errorf("ID is required") + return result + } + reqBody, err := opts.ToServerRescueMap() + if err != nil { + result.Err = err + return result + } + + _, result.Err = client.Post(actionURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return result +} + +// ResetMetadataOptsBuilder allows extensions to add additional parameters to the +// Reset request. +type ResetMetadataOptsBuilder interface { + ToMetadataResetMap() (map[string]interface{}, error) +} + +// MetadataOpts is a map that contains key-value pairs. +type MetadataOpts map[string]string + +// ToMetadataResetMap assembles a body for a Reset request based on the contents of a MetadataOpts. +func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ToMetadataUpdateMap assembles a body for an Update request based on the contents of a MetadataOpts. +func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ResetMetadata will create multiple new key-value pairs for the given server ID. +// Note: Using this operation will erase any already-existing metadata and create +// the new metadata provided. To keep any already-existing metadata, use the +// UpdateMetadatas or UpdateMetadata function. +func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) ResetMetadataResult { + var res ResetMetadataResult + metadata, err := opts.ToMetadataResetMap() + if err != nil { + res.Err = err + return res + } + _, res.Err = client.Put(metadataURL(client, id), metadata, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Metadata requests all the metadata for the given server ID. +func Metadata(client *gophercloud.ServiceClient, id string) GetMetadataResult { + var res GetMetadataResult + _, res.Err = client.Get(metadataURL(client, id), &res.Body, nil) + return res +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to the +// Create request. +type UpdateMetadataOptsBuilder interface { + ToMetadataUpdateMap() (map[string]interface{}, error) +} + +// UpdateMetadata updates (or creates) all the metadata specified by opts for the given server ID. +// This operation does not affect already-existing metadata that is not specified +// by opts. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) UpdateMetadataResult { + var res UpdateMetadataResult + metadata, err := opts.ToMetadataUpdateMap() + if err != nil { + res.Err = err + return res + } + _, res.Err = client.Post(metadataURL(client, id), metadata, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// MetadatumOptsBuilder allows extensions to add additional parameters to the +// Create request. +type MetadatumOptsBuilder interface { + ToMetadatumCreateMap() (map[string]interface{}, string, error) +} + +// MetadatumOpts is a map of length one that contains a key-value pair. +type MetadatumOpts map[string]string + +// ToMetadatumCreateMap assembles a body for a Create request based on the contents of a MetadataumOpts. +func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { + if len(opts) != 1 { + return nil, "", errors.New("CreateMetadatum operation must have 1 and only 1 key-value pair.") + } + metadatum := map[string]interface{}{"meta": opts} + var key string + for k := range metadatum["meta"].(MetadatumOpts) { + key = k + } + return metadatum, key, nil +} + +// CreateMetadatum will create or update the key-value pair with the given key for the given server ID. +func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) CreateMetadatumResult { + var res CreateMetadatumResult + metadatum, key, err := opts.ToMetadatumCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Put(metadatumURL(client, id, key), metadatum, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Metadatum requests the key-value pair with the given key for the given server ID. +func Metadatum(client *gophercloud.ServiceClient, id, key string) GetMetadatumResult { + var res GetMetadatumResult + _, res.Err = client.Request("GET", metadatumURL(client, id, key), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + }) + return res +} + +// DeleteMetadatum will delete the key-value pair with the given key for the given server ID. +func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) DeleteMetadatumResult { + var res DeleteMetadatumResult + _, res.Err = client.Delete(metadatumURL(client, id, key), nil) + return res +} + +// ListAddresses makes a request against the API to list the servers IP addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return AddressPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, listAddressesURL(client, id), createPageFn) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP addresses +// for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return NetworkAddressPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), createPageFn) +} + +type CreateImageOpts struct { + // Name [required] of the image/snapshot + Name string + // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the created image. + Metadata map[string]string +} + +type CreateImageOptsBuilder interface { + ToServerCreateImageMap() (map[string]interface{}, error) +} + +// ToServerCreateImageMap formats a CreateImageOpts structure into a request body. +func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { + var err error + img := make(map[string]interface{}) + if opts.Name == "" { + return nil, fmt.Errorf("Cannot create a server image without a name") + } + img["name"] = opts.Name + if opts.Metadata != nil { + img["metadata"] = opts.Metadata + } + createImage := make(map[string]interface{}) + createImage["createImage"] = img + return createImage, err +} + +// CreateImage makes a request against the nova API to schedule an image to be created of the server +func CreateImage(client *gophercloud.ServiceClient, serverId string, opts CreateImageOptsBuilder) CreateImageResult { + var res CreateImageResult + reqBody, err := opts.ToServerCreateImageMap() + if err != nil { + res.Err = err + return res + } + response, err := client.Post(actionURL(client, serverId), reqBody, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + res.Err = err + res.Header = response.Header + return res +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + serverCount := 0 + serverID := "" + if name == "" { + return "", fmt.Errorf("A server name must be provided.") + } + pager := List(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + serverList, err := ExtractServers(page) + if err != nil { + return false, err + } + + for _, s := range serverList { + if s.Name == name { + serverCount++ + serverID = s.ID + } + } + return true, nil + }) + + switch serverCount { + case 0: + return "", fmt.Errorf("Unable to find server: %s", name) + case 1: + return serverID, nil + default: + return "", fmt.Errorf("Found %d servers matching %s", serverCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go new file mode 100644 index 000000000..88cb54dd7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go @@ -0,0 +1,373 @@ +package servers + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListServers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerListSuccessfully(t) + + pages := 0 + err := List(client.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractServers(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 servers, got %d", len(actual)) + } + th.CheckDeepEquals(t, ServerHerp, actual[0]) + th.CheckDeepEquals(t, ServerDerp, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllServers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerListSuccessfully(t) + + allPages, err := List(client.ServiceClient(), ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := ExtractServers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ServerHerp, actual[0]) + th.CheckDeepEquals(t, ServerDerp, actual[1]) +} + +func TestCreateServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerCreationSuccessfully(t, SingleServerBody) + + actual, err := Create(client.ServiceClient(), CreateOpts{ + Name: "derp", + ImageRef: "f90f6034-2570-4974-8351-6b49732ef2eb", + FlavorRef: "1", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestDeleteServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerDeletionSuccessfully(t) + + res := Delete(client.ServiceClient(), "asdfasdfasdf") + th.AssertNoErr(t, res.Err) +} + +func TestGetServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerGetSuccessfully(t) + + client := client.ServiceClient() + actual, err := Get(client, "1234asdf").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestUpdateServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerUpdateSuccessfully(t) + + client := client.ServiceClient() + actual, err := Update(client, "1234asdf", UpdateOpts{Name: "new-name"}).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestChangeServerAdminPassword(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAdminPasswordChangeSuccessfully(t) + + res := ChangeAdminPassword(client.ServiceClient(), "1234asdf", "new-password") + th.AssertNoErr(t, res.Err) +} + +func TestRebootServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRebootSuccessfully(t) + + res := Reboot(client.ServiceClient(), "1234asdf", SoftReboot) + th.AssertNoErr(t, res.Err) +} + +func TestRebuildServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRebuildSuccessfully(t, SingleServerBody) + + opts := RebuildOpts{ + Name: "new-name", + AdminPass: "swordfish", + ImageID: "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + AccessIPv4: "1.2.3.4", + } + + actual, err := Rebuild(client.ServiceClient(), "1234asdf", opts).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestResizeServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "resize": { "flavorRef": "2" } }`) + + w.WriteHeader(http.StatusAccepted) + }) + + res := Resize(client.ServiceClient(), "1234asdf", ResizeOpts{FlavorRef: "2"}) + th.AssertNoErr(t, res.Err) +} + +func TestConfirmResize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "confirmResize": null }`) + + w.WriteHeader(http.StatusNoContent) + }) + + res := ConfirmResize(client.ServiceClient(), "1234asdf") + th.AssertNoErr(t, res.Err) +} + +func TestRevertResize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "revertResize": null }`) + + w.WriteHeader(http.StatusAccepted) + }) + + res := RevertResize(client.ServiceClient(), "1234asdf") + th.AssertNoErr(t, res.Err) +} + +func TestRescue(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleServerRescueSuccessfully(t) + + res := Rescue(client.ServiceClient(), "1234asdf", RescueOpts{ + AdminPass: "1234567890", + }) + th.AssertNoErr(t, res.Err) + adminPass, _ := res.Extract() + th.AssertEquals(t, "1234567890", adminPass) +} + +func TestGetMetadatum(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadatumGetSuccessfully(t) + + expected := map[string]string{"foo": "bar"} + actual, err := Metadatum(client.ServiceClient(), "1234asdf", "foo").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestCreateMetadatum(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadatumCreateSuccessfully(t) + + expected := map[string]string{"foo": "bar"} + actual, err := CreateMetadatum(client.ServiceClient(), "1234asdf", MetadatumOpts{"foo": "bar"}).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteMetadatum(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadatumDeleteSuccessfully(t) + + err := DeleteMetadatum(client.ServiceClient(), "1234asdf", "foo").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadataGetSuccessfully(t) + + expected := map[string]string{"foo": "bar", "this": "that"} + actual, err := Metadata(client.ServiceClient(), "1234asdf").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestResetMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadataResetSuccessfully(t) + + expected := map[string]string{"foo": "bar", "this": "that"} + actual, err := ResetMetadata(client.ServiceClient(), "1234asdf", MetadataOpts{ + "foo": "bar", + "this": "that", + }).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestUpdateMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadataUpdateSuccessfully(t) + + expected := map[string]string{"foo": "baz", "this": "those"} + actual, err := UpdateMetadata(client.ServiceClient(), "1234asdf", MetadataOpts{ + "foo": "baz", + "this": "those", + }).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestListAddresses(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAddressListSuccessfully(t) + + expected := ListAddressesExpected + pages := 0 + err := ListAddresses(client.ServiceClient(), "asdfasdfasdf").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 networks, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestListAddressesByNetwork(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleNetworkAddressListSuccessfully(t) + + expected := ListNetworkAddressesExpected + pages := 0 + err := ListAddressesByNetwork(client.ServiceClient(), "asdfasdfasdf", "public").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractNetworkAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 addresses, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestCreateServerImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateServerImageSuccessfully(t) + + _, err := CreateImage(client.ServiceClient(), "serverimage", CreateImageOpts{Name: "test"}).ExtractImageID() + th.AssertNoErr(t, err) +} + +func TestMarshalPersonality(t *testing.T) { + name := "/etc/test" + contents := []byte("asdfasdf") + + personality := Personality{ + &File{ + Path: name, + Contents: contents, + }, + } + + data, err := json.Marshal(personality) + if err != nil { + t.Fatal(err) + } + + var actual []map[string]string + err = json.Unmarshal(data, &actual) + if err != nil { + t.Fatal(err) + } + + if len(actual) != 1 { + t.Fatal("expected personality length 1") + } + + if actual[0]["path"] != name { + t.Fatal("file path incorrect") + } + + if actual[0]["contents"] != base64.StdEncoding.EncodeToString(contents) { + t.Fatal("file contents incorrect") + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go new file mode 100644 index 000000000..f27870984 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go @@ -0,0 +1,372 @@ +package servers + +import ( + "reflect" + "fmt" + "path" + "net/url" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type serverResult struct { + gophercloud.Result +} + +// Extract interprets any serverResult as a Server, if possible. +func (r serverResult) Extract() (*Server, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Server Server `mapstructure:"server"` + } + + config := &mapstructure.DecoderConfig{ + DecodeHook: toMapFromString, + Result: &response, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + + err = decoder.Decode(r.Body) + if err != nil { + return nil, err + } + + return &response.Server, nil +} + +// CreateResult temporarily contains the response from a Create call. +type CreateResult struct { + serverResult +} + +// GetResult temporarily contains the response from a Get call. +type GetResult struct { + serverResult +} + +// UpdateResult temporarily contains the response from an Update call. +type UpdateResult struct { + serverResult +} + +// DeleteResult temporarily contains the response from a Delete call. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RebuildResult temporarily contains the response from a Rebuild call. +type RebuildResult struct { + serverResult +} + +// ActionResult represents the result of server action operations, like reboot +type ActionResult struct { + gophercloud.ErrResult +} + +// RescueResult represents the result of a server rescue operation +type RescueResult struct { + ActionResult +} + +// CreateImageResult represents the result of an image creation operation +type CreateImageResult struct { + gophercloud.Result +} + +// ExtractImageID gets the ID of the newly created server image from the header +func (res CreateImageResult) ExtractImageID() (string, error) { + if res.Err != nil { + return "", res.Err + } + // Get the image id from the header + u, err := url.ParseRequestURI(res.Header.Get("Location")) + if err != nil { + return "", fmt.Errorf("Failed to parse the image id: %s", err.Error()) + } + imageId := path.Base(u.Path) + if imageId == "." || imageId == "/" { + return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) + } + return imageId, nil +} + +// Extract interprets any RescueResult as an AdminPass, if possible. +func (r RescueResult) Extract() (string, error) { + if r.Err != nil { + return "", r.Err + } + + var response struct { + AdminPass string `mapstructure:"adminPass"` + } + + err := mapstructure.Decode(r.Body, &response) + return response.AdminPass, err +} + +// Server exposes only the standard OpenStack fields corresponding to a given server on the user's account. +type Server struct { + // ID uniquely identifies this server amongst all other servers, including those not accessible to the current tenant. + ID string + + // TenantID identifies the tenant owning this server resource. + TenantID string `mapstructure:"tenant_id"` + + // UserID uniquely identifies the user account owning the tenant. + UserID string `mapstructure:"user_id"` + + // Name contains the human-readable name for the server. + Name string + + // Updated and Created contain ISO-8601 timestamps of when the state of the server last changed, and when it was created. + Updated string + Created string + + HostID string + + // Status contains the current operational status of the server, such as IN_PROGRESS or ACTIVE. + Status string + + // Progress ranges from 0..100. + // A request made against the server completes only once Progress reaches 100. + Progress int + + // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, suitable for remote access for administration. + AccessIPv4, AccessIPv6 string + + // Image refers to a JSON object, which itself indicates the OS image used to deploy the server. + Image map[string]interface{} + + // Flavor refers to a JSON object, which itself indicates the hardware configuration of the deployed server. + Flavor map[string]interface{} + + // Addresses includes a list of all IP addresses assigned to the server, keyed by pool. + Addresses map[string]interface{} + + // Metadata includes a list of all user-specified key-value pairs attached to the server. + Metadata map[string]interface{} + + // Links includes HTTP references to the itself, useful for passing along to other APIs that might want a server reference. + Links []interface{} + + // KeyName indicates which public key was injected into the server on launch. + KeyName string `json:"key_name" mapstructure:"key_name"` + + // AdminPass will generally be empty (""). However, it will contain the administrative password chosen when provisioning a new server without a set AdminPass setting in the first place. + // Note that this is the ONLY time this field will be valid. + AdminPass string `json:"adminPass" mapstructure:"adminPass"` + + // SecurityGroups includes the security groups that this instance has applied to it + SecurityGroups []map[string]interface{} `json:"security_groups" mapstructure:"security_groups"` +} + +// ServerPage abstracts the raw results of making a List() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the +// data provided through the ExtractServers call. +type ServerPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (page ServerPage) IsEmpty() (bool, error) { + servers, err := ExtractServers(page) + if err != nil { + return true, err + } + return len(servers) == 0, nil +} + +// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +func (page ServerPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"servers_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractServers interprets the results of a single page from a List() call, producing a slice of Server entities. +func ExtractServers(page pagination.Page) ([]Server, error) { + casted := page.(ServerPage).Body + + var response struct { + Servers []Server `mapstructure:"servers"` + } + + config := &mapstructure.DecoderConfig{ + DecodeHook: toMapFromString, + Result: &response, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + + err = decoder.Decode(casted) + + return response.Servers, err +} + +// MetadataResult contains the result of a call for (potentially) multiple key-value pairs. +type MetadataResult struct { + gophercloud.Result +} + +// GetMetadataResult temporarily contains the response from a metadata Get call. +type GetMetadataResult struct { + MetadataResult +} + +// ResetMetadataResult temporarily contains the response from a metadata Reset call. +type ResetMetadataResult struct { + MetadataResult +} + +// UpdateMetadataResult temporarily contains the response from a metadata Update call. +type UpdateMetadataResult struct { + MetadataResult +} + +// MetadatumResult contains the result of a call for individual a single key-value pair. +type MetadatumResult struct { + gophercloud.Result +} + +// GetMetadatumResult temporarily contains the response from a metadatum Get call. +type GetMetadatumResult struct { + MetadatumResult +} + +// CreateMetadatumResult temporarily contains the response from a metadatum Create call. +type CreateMetadatumResult struct { + MetadatumResult +} + +// DeleteMetadatumResult temporarily contains the response from a metadatum Delete call. +type DeleteMetadatumResult struct { + gophercloud.ErrResult +} + +// Extract interprets any MetadataResult as a Metadata, if possible. +func (r MetadataResult) Extract() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Metadata map[string]string `mapstructure:"metadata"` + } + + err := mapstructure.Decode(r.Body, &response) + return response.Metadata, err +} + +// Extract interprets any MetadatumResult as a Metadatum, if possible. +func (r MetadatumResult) Extract() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Metadatum map[string]string `mapstructure:"meta"` + } + + err := mapstructure.Decode(r.Body, &response) + return response.Metadatum, err +} + +func toMapFromString(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { + if (from == reflect.String) && (to == reflect.Map) { + return map[string]interface{}{}, nil + } + return data, nil +} + +// Address represents an IP address. +type Address struct { + Version int `mapstructure:"version"` + Address string `mapstructure:"addr"` +} + +// AddressPage abstracts the raw results of making a ListAddresses() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned +// to the client, you may only safely access the data provided through the ExtractAddresses call. +type AddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an AddressPage contains no networks. +func (r AddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractAddresses(r) + if err != nil { + return true, err + } + return len(addresses) == 0, nil +} + +// ExtractAddresses interprets the results of a single page from a ListAddresses() call, +// producing a map of addresses. +func ExtractAddresses(page pagination.Page) (map[string][]Address, error) { + casted := page.(AddressPage).Body + + var response struct { + Addresses map[string][]Address `mapstructure:"addresses"` + } + + err := mapstructure.Decode(casted, &response) + if err != nil { + return nil, err + } + + return response.Addresses, err +} + +// NetworkAddressPage abstracts the raw results of making a ListAddressesByNetwork() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned +// to the client, you may only safely access the data provided through the ExtractAddresses call. +type NetworkAddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NetworkAddressPage contains no addresses. +func (r NetworkAddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractNetworkAddresses(r) + if err != nil { + return true, err + } + return len(addresses) == 0, nil +} + +// ExtractNetworkAddresses interprets the results of a single page from a ListAddressesByNetwork() call, +// producing a slice of addresses. +func ExtractNetworkAddresses(page pagination.Page) ([]Address, error) { + casted := page.(NetworkAddressPage).Body + + var response map[string][]Address + err := mapstructure.Decode(casted, &response) + if err != nil { + return nil, err + } + + var key string + for k := range response { + key = k + } + + return response[key], err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go new file mode 100644 index 000000000..899835493 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go @@ -0,0 +1,47 @@ +package servers + +import "github.com/rackspace/gophercloud" + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers") +} + +func listURL(client *gophercloud.ServiceClient) string { + return createURL(client) +} + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers", "detail") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("servers", id, "metadata", key) +} + +func metadataURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "metadata") +} + +func listAddressesURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "ips") +} + +func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { + return client.ServiceURL("servers", id, "ips", network) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go new file mode 100644 index 000000000..17a1d287f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go @@ -0,0 +1,68 @@ +package servers + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "servers" + th.CheckEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + "servers" + th.CheckEquals(t, expected, actual) +} + +func TestListDetailURL(t *testing.T) { + actual := listDetailURL(endpointClient()) + expected := endpoint + "servers/detail" + th.CheckEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "servers/foo" + th.CheckEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "servers/foo" + th.CheckEquals(t, expected, actual) +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient(), "foo") + expected := endpoint + "servers/foo" + th.CheckEquals(t, expected, actual) +} + +func TestActionURL(t *testing.T) { + actual := actionURL(endpointClient(), "foo") + expected := endpoint + "servers/foo/action" + th.CheckEquals(t, expected, actual) +} + +func TestMetadatumURL(t *testing.T) { + actual := metadatumURL(endpointClient(), "foo", "bar") + expected := endpoint + "servers/foo/metadata/bar" + th.CheckEquals(t, expected, actual) +} + +func TestMetadataURL(t *testing.T) { + actual := metadataURL(endpointClient(), "foo") + expected := endpoint + "servers/foo/metadata" + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go new file mode 100644 index 000000000..e6baf7416 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go @@ -0,0 +1,20 @@ +package servers + +import "github.com/rackspace/gophercloud" + +// WaitForStatus will continually poll a server until it successfully transitions to a specified +// status. It will do this for at most the number of seconds specified. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/doc.go new file mode 100644 index 000000000..45b9cfb4e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/doc.go @@ -0,0 +1,11 @@ +// Package configurations provides information and interaction with the +// configuration API resource in the Rackspace Database service. +// +// A configuration group is a collection of key/value pairs which define how a +// particular database operates. These key/value pairs are specific to each +// datastore type and serve like settings. Some directives are capable of being +// applied dynamically, while other directives require a server restart to take +// effect. The configuration group can be applied to an instance at creation or +// applied to an existing instance to modify the behavior of the running +// datastore on the instance. +package configurations diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/fixtures.go new file mode 100644 index 000000000..ae6541615 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/fixtures.go @@ -0,0 +1,157 @@ +package configurations + +import ( + "fmt" + "time" +) + +var ( + timestamp = "2015-11-12T14:22:42Z" + timeVal, _ = time.Parse(time.RFC3339, timestamp) +) + +var singleConfigJSON = ` +{ + "created": "` + timestamp + `", + "datastore_name": "mysql", + "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "datastore_version_name": "5.6", + "description": "example_description", + "id": "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + "name": "example-configuration-name", + "updated": "` + timestamp + `" +} +` + +var singleConfigWithValuesJSON = ` +{ + "created": "` + timestamp + `", + "datastore_name": "mysql", + "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "datastore_version_name": "5.6", + "description": "example description", + "id": "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + "instance_count": 0, + "name": "example-configuration-name", + "updated": "` + timestamp + `", + "values": { + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120 + } +} +` + +var ( + ListConfigsJSON = fmt.Sprintf(`{"configurations": [%s]}`, singleConfigJSON) + GetConfigJSON = fmt.Sprintf(`{"configuration": %s}`, singleConfigJSON) + CreateConfigJSON = fmt.Sprintf(`{"configuration": %s}`, singleConfigWithValuesJSON) +) + +var CreateReq = ` +{ + "configuration": { + "datastore": { + "type": "a00000a0-00a0-0a00-00a0-000a000000aa", + "version": "b00000b0-00b0-0b00-00b0-000b000000bb" + }, + "description": "example description", + "name": "example-configuration-name", + "values": { + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120 + } + } +} +` + +var UpdateReq = ` +{ + "configuration": { + "values": { + "connect_timeout": 300 + } + } +} +` + +var ListInstancesJSON = ` +{ + "instances": [ + { + "id": "d4603f69-ec7e-4e9b-803f-600b9205576f", + "name": "json_rack_instance" + } + ] +} +` + +var ListParamsJSON = ` +{ + "configuration-parameters": [ + { + "max": 1, + "min": 0, + "name": "innodb_file_per_table", + "restart_required": true, + "type": "integer" + }, + { + "max": 4294967296, + "min": 0, + "name": "key_buffer_size", + "restart_required": false, + "type": "integer" + }, + { + "max": 65535, + "min": 2, + "name": "connect_timeout", + "restart_required": false, + "type": "integer" + }, + { + "max": 4294967296, + "min": 0, + "name": "join_buffer_size", + "restart_required": false, + "type": "integer" + } + ] +} +` + +var GetParamJSON = ` +{ + "max": 1, + "min": 0, + "name": "innodb_file_per_table", + "restart_required": true, + "type": "integer" +} +` + +var ExampleConfig = Config{ + Created: timeVal, + DatastoreName: "mysql", + DatastoreVersionID: "b00000b0-00b0-0b00-00b0-000b000000bb", + DatastoreVersionName: "5.6", + Description: "example_description", + ID: "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + Name: "example-configuration-name", + Updated: timeVal, +} + +var ExampleConfigWithValues = Config{ + Created: timeVal, + DatastoreName: "mysql", + DatastoreVersionID: "b00000b0-00b0-0b00-00b0-000b000000bb", + DatastoreVersionName: "5.6", + Description: "example description", + ID: "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + Name: "example-configuration-name", + Updated: timeVal, + Values: map[string]interface{}{ + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120, + }, +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests.go new file mode 100644 index 000000000..83c710206 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests.go @@ -0,0 +1,287 @@ +package configurations + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/db/v1/instances" + "github.com/rackspace/gophercloud/pagination" +) + +// List will list all of the available configurations. +func List(client *gophercloud.ServiceClient) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return ConfigPage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, baseURL(client), pageFn) +} + +// CreateOptsBuilder is a top-level interface which renders a JSON map. +type CreateOptsBuilder interface { + ToConfigCreateMap() (map[string]interface{}, error) +} + +// DatastoreOpts is the primary options struct for creating and modifying +// how configuration resources are associated with datastores. +type DatastoreOpts struct { + // [OPTIONAL] The type of datastore. Defaults to "MySQL". + Type string + + // [OPTIONAL] The specific version of a datastore. Defaults to "5.6". + Version string +} + +// ToMap renders a JSON map for a datastore setting. +func (opts DatastoreOpts) ToMap() (map[string]string, error) { + datastore := map[string]string{} + + if opts.Type != "" { + datastore["type"] = opts.Type + } + + if opts.Version != "" { + datastore["version"] = opts.Version + } + + return datastore, nil +} + +// CreateOpts is the struct responsible for configuring new configurations. +type CreateOpts struct { + // [REQUIRED] The configuration group name + Name string + + // [REQUIRED] A map of user-defined configuration settings that will define + // how each associated datastore works. Each key/value pair is specific to a + // datastore type. + Values map[string]interface{} + + // [OPTIONAL] Associates the configuration group with a particular datastore. + Datastore *DatastoreOpts + + // [OPTIONAL] A human-readable explanation for the group. + Description string +} + +// ToConfigCreateMap casts a CreateOpts struct into a JSON map. +func (opts CreateOpts) ToConfigCreateMap() (map[string]interface{}, error) { + if opts.Name == "" { + return nil, errors.New("Name is a required field") + } + if len(opts.Values) == 0 { + return nil, errors.New("Values must be a populated map") + } + + config := map[string]interface{}{ + "name": opts.Name, + "values": opts.Values, + } + + if opts.Datastore != nil { + ds, err := opts.Datastore.ToMap() + if err != nil { + return config, err + } + config["datastore"] = ds + } + + if opts.Description != "" { + config["description"] = opts.Description + } + + return map[string]interface{}{"configuration": config}, nil +} + +// Create will create a new configuration group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToConfigCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("POST", baseURL(client), gophercloud.RequestOpts{ + OkCodes: []int{200}, + JSONBody: &reqBody, + JSONResponse: &res.Body, + }) + + return res +} + +// Get will retrieve the details for a specified configuration group. +func Get(client *gophercloud.ServiceClient, configID string) GetResult { + var res GetResult + + _, res.Err = client.Request("GET", resourceURL(client, configID), gophercloud.RequestOpts{ + OkCodes: []int{200}, + JSONResponse: &res.Body, + }) + + return res +} + +// UpdateOptsBuilder is the top-level interface for casting update options into +// JSON maps. +type UpdateOptsBuilder interface { + ToConfigUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the struct responsible for modifying existing configurations. +type UpdateOpts struct { + // [OPTIONAL] The configuration group name + Name string + + // [OPTIONAL] A map of user-defined configuration settings that will define + // how each associated datastore works. Each key/value pair is specific to a + // datastore type. + Values map[string]interface{} + + // [OPTIONAL] Associates the configuration group with a particular datastore. + Datastore *DatastoreOpts + + // [OPTIONAL] A human-readable explanation for the group. + Description string +} + +// ToConfigUpdateMap will cast an UpdateOpts struct into a JSON map. +func (opts UpdateOpts) ToConfigUpdateMap() (map[string]interface{}, error) { + config := map[string]interface{}{} + + if opts.Name != "" { + config["name"] = opts.Name + } + + if opts.Description != "" { + config["description"] = opts.Description + } + + if opts.Datastore != nil { + ds, err := opts.Datastore.ToMap() + if err != nil { + return config, err + } + config["datastore"] = ds + } + + if len(opts.Values) > 0 { + config["values"] = opts.Values + } + + return map[string]interface{}{"configuration": config}, nil +} + +// Update will modify an existing configuration group by performing a merge +// between new and existing values. If the key already exists, the new value +// will overwrite. All other keys will remain unaffected. +func Update(client *gophercloud.ServiceClient, configID string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToConfigUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("PATCH", resourceURL(client, configID), gophercloud.RequestOpts{ + OkCodes: []int{200}, + JSONBody: &reqBody, + }) + + return res +} + +// Replace will modify an existing configuration group by overwriting the +// entire parameter group with the new values provided. Any existing keys not +// included in UpdateOptsBuilder will be deleted. +func Replace(client *gophercloud.ServiceClient, configID string, opts UpdateOptsBuilder) ReplaceResult { + var res ReplaceResult + + reqBody, err := opts.ToConfigUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("PUT", resourceURL(client, configID), gophercloud.RequestOpts{ + OkCodes: []int{202}, + JSONBody: &reqBody, + }) + + return res +} + +// Delete will permanently delete a configuration group. Please note that +// config groups cannot be deleted whilst still attached to running instances - +// you must detach and then delete them. +func Delete(client *gophercloud.ServiceClient, configID string) DeleteResult { + var res DeleteResult + + _, res.Err = client.Request("DELETE", resourceURL(client, configID), gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return res +} + +// ListInstances will list all the instances associated with a particular +// configuration group. +func ListInstances(client *gophercloud.ServiceClient, configID string) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return instances.InstancePage{pagination.LinkedPageBase{PageResult: r}} + } + return pagination.NewPager(client, instancesURL(client, configID), pageFn) +} + +// ListDatastoreParams will list all the available and supported parameters +// that can be used for a particular datastore ID and a particular version. +// For example, if you are wondering how you can configure a MySQL 5.6 instance, +// you can use this operation (you will need to retrieve the MySQL datastore ID +// by using the datastores API). +func ListDatastoreParams(client *gophercloud.ServiceClient, datastoreID, versionID string) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return ParamPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, listDSParamsURL(client, datastoreID, versionID), pageFn) +} + +// GetDatastoreParam will retrieve information about a specific configuration +// parameter. For example, you can use this operation to understand more about +// "innodb_file_per_table" configuration param for MySQL datastores. You will +// need the param's ID first, which can be attained by using the ListDatastoreParams +// operation. +func GetDatastoreParam(client *gophercloud.ServiceClient, datastoreID, versionID, paramID string) ParamResult { + var res ParamResult + + _, res.Err = client.Request("GET", getDSParamURL(client, datastoreID, versionID, paramID), gophercloud.RequestOpts{ + OkCodes: []int{200}, + JSONResponse: &res.Body, + }) + + return res +} + +// ListGlobalParams is similar to ListDatastoreParams but does not require a +// DatastoreID. +func ListGlobalParams(client *gophercloud.ServiceClient, versionID string) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return ParamPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, listGlobalParamsURL(client, versionID), pageFn) +} + +// GetGlobalParam is similar to GetDatastoreParam but does not require a +// DatastoreID. +func GetGlobalParam(client *gophercloud.ServiceClient, versionID, paramID string) ParamResult { + var res ParamResult + + _, res.Err = client.Request("GET", getGlobalParamURL(client, versionID, paramID), gophercloud.RequestOpts{ + OkCodes: []int{200}, + JSONResponse: &res.Body, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests_test.go new file mode 100644 index 000000000..db66f29f8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/requests_test.go @@ -0,0 +1,236 @@ +package configurations + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/db/v1/instances" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +var ( + configID = "{configID}" + _baseURL = "/configurations" + resURL = _baseURL + "/" + configID + + dsID = "{datastoreID}" + versionID = "{versionID}" + paramID = "{paramID}" + dsParamListURL = "/datastores/" + dsID + "/versions/" + versionID + "/parameters" + dsParamGetURL = "/datastores/" + dsID + "/versions/" + versionID + "/parameters/" + paramID + globalParamListURL = "/datastores/versions/" + versionID + "/parameters" + globalParamGetURL = "/datastores/versions/" + versionID + "/parameters/" + paramID +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _baseURL, "GET", "", ListConfigsJSON, 200) + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractConfigs(page) + th.AssertNoErr(t, err) + + expected := []Config{ExampleConfig} + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertEquals(t, 1, count) + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "GET", "", GetConfigJSON, 200) + + config, err := Get(fake.ServiceClient(), configID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleConfig, config) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _baseURL, "POST", CreateReq, CreateConfigJSON, 200) + + opts := CreateOpts{ + Datastore: &DatastoreOpts{ + Type: "a00000a0-00a0-0a00-00a0-000a000000aa", + Version: "b00000b0-00b0-0b00-00b0-000b000000bb", + }, + Description: "example description", + Name: "example-configuration-name", + Values: map[string]interface{}{ + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120, + }, + } + + config, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleConfigWithValues, config) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PATCH", UpdateReq, "", 200) + + opts := UpdateOpts{ + Values: map[string]interface{}{ + "connect_timeout": 300, + }, + } + + err := Update(fake.ServiceClient(), configID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestReplace(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PUT", UpdateReq, "", 202) + + opts := UpdateOpts{ + Values: map[string]interface{}{ + "connect_timeout": 300, + }, + } + + err := Replace(fake.ServiceClient(), configID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "DELETE", "", "", 202) + + err := Delete(fake.ServiceClient(), configID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListInstances(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL+"/instances", "GET", "", ListInstancesJSON, 200) + + expectedInstance := instances.Instance{ + ID: "d4603f69-ec7e-4e9b-803f-600b9205576f", + Name: "json_rack_instance", + } + + pages := 0 + err := ListInstances(fake.ServiceClient(), configID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := instances.ExtractInstances(page) + if err != nil { + return false, err + } + + th.AssertDeepEquals(t, actual, []instances.Instance{expectedInstance}) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestListDSParams(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, dsParamListURL, "GET", "", ListParamsJSON, 200) + + pages := 0 + err := ListDatastoreParams(fake.ServiceClient(), dsID, versionID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractParams(page) + if err != nil { + return false, err + } + + expected := []Param{ + Param{Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer"}, + Param{Max: 4294967296, Min: 0, Name: "key_buffer_size", RestartRequired: false, Type: "integer"}, + Param{Max: 65535, Min: 2, Name: "connect_timeout", RestartRequired: false, Type: "integer"}, + Param{Max: 4294967296, Min: 0, Name: "join_buffer_size", RestartRequired: false, Type: "integer"}, + } + + th.AssertDeepEquals(t, actual, expected) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetDSParam(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, dsParamGetURL, "GET", "", GetParamJSON, 200) + + param, err := GetDatastoreParam(fake.ServiceClient(), dsID, versionID, paramID).Extract() + th.AssertNoErr(t, err) + + expected := &Param{ + Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer", + } + + th.AssertDeepEquals(t, expected, param) +} + +func TestListGlobalParams(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, globalParamListURL, "GET", "", ListParamsJSON, 200) + + pages := 0 + err := ListGlobalParams(fake.ServiceClient(), versionID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractParams(page) + if err != nil { + return false, err + } + + expected := []Param{ + Param{Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer"}, + Param{Max: 4294967296, Min: 0, Name: "key_buffer_size", RestartRequired: false, Type: "integer"}, + Param{Max: 65535, Min: 2, Name: "connect_timeout", RestartRequired: false, Type: "integer"}, + Param{Max: 4294967296, Min: 0, Name: "join_buffer_size", RestartRequired: false, Type: "integer"}, + } + + th.AssertDeepEquals(t, actual, expected) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetGlobalParam(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, globalParamGetURL, "GET", "", GetParamJSON, 200) + + param, err := GetGlobalParam(fake.ServiceClient(), versionID, paramID).Extract() + th.AssertNoErr(t, err) + + expected := &Param{ + Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer", + } + + th.AssertDeepEquals(t, expected, param) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/results.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/results.go new file mode 100644 index 000000000..d0d1d6e99 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/results.go @@ -0,0 +1,197 @@ +package configurations + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Config represents a configuration group API resource. +type Config struct { + Created time.Time `mapstructure:"-"` + Updated time.Time `mapstructure:"-"` + DatastoreName string `mapstructure:"datastore_name"` + DatastoreVersionID string `mapstructure:"datastore_version_id"` + DatastoreVersionName string `mapstructure:"datastore_version_name"` + Description string + ID string + Name string + Values map[string]interface{} +} + +// ConfigPage contains a page of Config resources in a paginated collection. +type ConfigPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a ConfigPage is empty. +func (r ConfigPage) IsEmpty() (bool, error) { + is, err := ExtractConfigs(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractConfigs will retrieve a slice of Config structs from a page. +func ExtractConfigs(page pagination.Page) ([]Config, error) { + casted := page.(ConfigPage).Body + + var resp struct { + Configs []Config `mapstructure:"configurations" json:"configurations"` + } + + if err := mapstructure.Decode(casted, &resp); err != nil { + return nil, err + } + + var vals []interface{} + switch casted.(type) { + case map[string]interface{}: + vals = casted.(map[string]interface{})["configurations"].([]interface{}) + case map[string][]interface{}: + vals = casted.(map[string][]interface{})["configurations"] + default: + return resp.Configs, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i, v := range vals { + val := v.(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Configs, err + } + resp.Configs[i].Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Configs, err + } + resp.Configs[i].Updated = updatedTime + } + } + + return resp.Configs, nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will retrieve a Config resource from an operation result. +func (r commonResult) Extract() (*Config, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Config Config `mapstructure:"configuration"` + } + + err := mapstructure.Decode(r.Body, &response) + val := r.Body.(map[string]interface{})["configuration"].(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Config, err + } + response.Config.Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Config, err + } + response.Config.Updated = updatedTime + } + + return &response.Config, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// ReplaceResult represents the result of a Replace operation. +type ReplaceResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Param represents a configuration parameter API resource. +type Param struct { + Max int + Min int + Name string + RestartRequired bool `mapstructure:"restart_required" json:"restart_required"` + Type string +} + +// ParamPage contains a page of Param resources in a paginated collection. +type ParamPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a ParamPage is empty. +func (r ParamPage) IsEmpty() (bool, error) { + is, err := ExtractParams(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractParams will retrieve a slice of Param structs from a page. +func ExtractParams(page pagination.Page) ([]Param, error) { + casted := page.(ParamPage).Body + + var resp struct { + Params []Param `mapstructure:"configuration-parameters" json:"configuration-parameters"` + } + + err := mapstructure.Decode(casted, &resp) + return resp.Params, err +} + +// ParamResult represents the result of an operation which retrieves details +// about a particular configuration param. +type ParamResult struct { + gophercloud.Result +} + +// Extract will retrieve a param from an operation result. +func (r ParamResult) Extract() (*Param, error) { + if r.Err != nil { + return nil, r.Err + } + + var param Param + + err := mapstructure.Decode(r.Body, ¶m) + return ¶m, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/urls.go new file mode 100644 index 000000000..abea96158 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/configurations/urls.go @@ -0,0 +1,31 @@ +package configurations + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("configurations") +} + +func resourceURL(c *gophercloud.ServiceClient, configID string) string { + return c.ServiceURL("configurations", configID) +} + +func instancesURL(c *gophercloud.ServiceClient, configID string) string { + return c.ServiceURL("configurations", configID, "instances") +} + +func listDSParamsURL(c *gophercloud.ServiceClient, datastoreID, versionID string) string { + return c.ServiceURL("datastores", datastoreID, "versions", versionID, "parameters") +} + +func getDSParamURL(c *gophercloud.ServiceClient, datastoreID, versionID, paramID string) string { + return c.ServiceURL("datastores", datastoreID, "versions", versionID, "parameters", paramID) +} + +func listGlobalParamsURL(c *gophercloud.ServiceClient, versionID string) string { + return c.ServiceURL("datastores", "versions", versionID, "parameters") +} + +func getGlobalParamURL(c *gophercloud.ServiceClient, versionID, paramID string) string { + return c.ServiceURL("datastores", "versions", versionID, "parameters", paramID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/doc.go new file mode 100644 index 000000000..15275fe5d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/doc.go @@ -0,0 +1,6 @@ +// Package flavors provides information and interaction with the database API +// resource in the OpenStack Database service. +// +// A database, when referred to here, refers to the database engine running on +// an instance. +package databases diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/fixtures.go new file mode 100644 index 000000000..3e677211b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/fixtures.go @@ -0,0 +1,61 @@ +package databases + +import ( + "testing" + + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +var ( + instanceID = "{instanceID}" + resURL = "/instances/" + instanceID + "/databases" +) + +var createDBsReq = ` +{ + "databases": [ + { + "character_set": "utf8", + "collate": "utf8_general_ci", + "name": "testingdb" + }, + { + "name": "sampledb" + } + ] +} +` + +var listDBsResp = ` +{ + "databases": [ + { + "name": "anotherexampledb" + }, + { + "name": "exampledb" + }, + { + "name": "nextround" + }, + { + "name": "sampledb" + }, + { + "name": "testingdb" + } + ] +} +` + +func HandleCreate(t *testing.T) { + fixture.SetupHandler(t, resURL, "POST", createDBsReq, "", 202) +} + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, resURL, "GET", "", listDBsResp, 200) +} + +func HandleDelete(t *testing.T) { + fixture.SetupHandler(t, resURL+"/{dbName}", "DELETE", "", "", 202) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests.go new file mode 100644 index 000000000..f1eb5d9ae --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests.go @@ -0,0 +1,115 @@ +package databases + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsBuilder builds create options +type CreateOptsBuilder interface { + ToDBCreateMap() (map[string]interface{}, error) +} + +// DatabaseOpts is the struct responsible for configuring a database; often in +// the context of an instance. +type CreateOpts struct { + // [REQUIRED] Specifies the name of the database. Valid names can be composed + // of the following characters: letters (either case); numbers; these + // characters '@', '?', '#', ' ' but NEVER beginning a name string; '_' is + // permitted anywhere. Prohibited characters that are forbidden include: + // single quotes, double quotes, back quotes, semicolons, commas, backslashes, + // and forward slashes. + Name string + + // [OPTIONAL] Set of symbols and encodings. The default character set is + // "utf8". See http://dev.mysql.com/doc/refman/5.1/en/charset-mysql.html for + // supported character sets. + CharSet string + + // [OPTIONAL] Set of rules for comparing characters in a character set. The + // default value for collate is "utf8_general_ci". See + // http://dev.mysql.com/doc/refman/5.1/en/charset-mysql.html for supported + // collations. + Collate string +} + +// ToMap is a helper function to convert individual DB create opt structures +// into sub-maps. +func (opts CreateOpts) ToMap() (map[string]string, error) { + if opts.Name == "" { + return nil, fmt.Errorf("Name is a required field") + } + if len(opts.Name) > 64 { + return nil, fmt.Errorf("Name must be less than 64 chars long") + } + + db := map[string]string{"name": opts.Name} + + if opts.CharSet != "" { + db["character_set"] = opts.CharSet + } + if opts.Collate != "" { + db["collate"] = opts.Collate + } + return db, nil +} + +// BatchCreateOpts allows for multiple databases to created and modified. +type BatchCreateOpts []CreateOpts + +// ToDBCreateMap renders a JSON map for creating DBs. +func (opts BatchCreateOpts) ToDBCreateMap() (map[string]interface{}, error) { + dbs := make([]map[string]string, len(opts)) + for i, db := range opts { + dbMap, err := db.ToMap() + if err != nil { + return nil, err + } + dbs[i] = dbMap + } + return map[string]interface{}{"databases": dbs}, nil +} + +// Create will create a new database within the specified instance. If the +// specified instance does not exist, a 404 error will be returned. +func Create(client *gophercloud.ServiceClient, instanceID string, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToDBCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("POST", baseURL(client, instanceID), gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{202}, + }) + + return res +} + +// List will list all of the databases for a specified instance. Note: this +// operation will only return user-defined databases; it will exclude system +// databases like "mysql", "information_schema", "lost+found" etc. +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return DBPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, baseURL(client, instanceID), createPageFn) +} + +// Delete will permanently delete the database within a specified instance. +// All contained data inside the database will also be permanently deleted. +func Delete(client *gophercloud.ServiceClient, instanceID, dbName string) DeleteResult { + var res DeleteResult + + _, res.Err = client.Request("DELETE", dbURL(client, instanceID, dbName), gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests_test.go new file mode 100644 index 000000000..8a1b297f2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/requests_test.go @@ -0,0 +1,66 @@ +package databases + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreate(t) + + opts := BatchCreateOpts{ + CreateOpts{Name: "testingdb", CharSet: "utf8", Collate: "utf8_general_ci"}, + CreateOpts{Name: "sampledb"}, + } + + res := Create(fake.ServiceClient(), instanceID, opts) + th.AssertNoErr(t, res.Err) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + expectedDBs := []Database{ + Database{Name: "anotherexampledb"}, + Database{Name: "exampledb"}, + Database{Name: "nextround"}, + Database{Name: "sampledb"}, + Database{Name: "testingdb"}, + } + + pages := 0 + err := List(fake.ServiceClient(), instanceID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractDBs(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, expectedDBs, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDelete(t) + + err := Delete(fake.ServiceClient(), instanceID, "{dbName}").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/results.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/results.go new file mode 100644 index 000000000..7d4b6aeb2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/results.go @@ -0,0 +1,72 @@ +package databases + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Database represents a Database API resource. +type Database struct { + // Specifies the name of the MySQL database. + Name string + + // Set of symbols and encodings. The default character set is utf8. + CharSet string + + // Set of rules for comparing characters in a character set. The default + // value for collate is utf8_general_ci. + Collate string +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// DBPage represents a single page of a paginated DB collection. +type DBPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page DBPage) IsEmpty() (bool, error) { + dbs, err := ExtractDBs(page) + if err != nil { + return true, err + } + return len(dbs) == 0, nil +} + +// NextPageURL will retrieve the next page URL. +func (page DBPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"databases_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractDBs will convert a generic pagination struct into a more +// relevant slice of DB structs. +func ExtractDBs(page pagination.Page) ([]Database, error) { + casted := page.(DBPage).Body + + var response struct { + Databases []Database `mapstructure:"databases"` + } + + err := mapstructure.Decode(casted, &response) + return response.Databases, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/urls.go new file mode 100644 index 000000000..027ca5847 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/databases/urls.go @@ -0,0 +1,11 @@ +package databases + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("instances", instanceID, "databases") +} + +func dbURL(c *gophercloud.ServiceClient, instanceID, dbName string) string { + return c.ServiceURL("instances", instanceID, "databases", dbName) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/doc.go new file mode 100644 index 000000000..ae14026b5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/doc.go @@ -0,0 +1,3 @@ +// Package datastores provides information and interaction with the datastore +// API resource in the Rackspace Database service. +package datastores diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/fixtures.go new file mode 100644 index 000000000..fd767cd3b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/fixtures.go @@ -0,0 +1,100 @@ +package datastores + +import ( + "fmt" + + "github.com/rackspace/gophercloud" +) + +const version1JSON = ` +{ + "id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "links": [ + { + "href": "https://10.240.28.38:8779/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", + "rel": "self" + }, + { + "href": "https://10.240.28.38:8779/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", + "rel": "bookmark" + } + ], + "name": "5.1" +} +` + +const version2JSON = ` +{ + "id": "c00000b0-00c0-0c00-00c0-000b000000cc", + "links": [ + { + "href": "https://10.240.28.38:8779/v1.0/1234/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc", + "rel": "self" + }, + { + "href": "https://10.240.28.38:8779/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc", + "rel": "bookmark" + } + ], + "name": "5.2" +} +` + +var versionsJSON = fmt.Sprintf(`"versions": [%s, %s]`, version1JSON, version2JSON) + +var singleDSJSON = fmt.Sprintf(` +{ + "default_version": "c00000b0-00c0-0c00-00c0-000b000000cc", + "id": "10000000-0000-0000-0000-000000000001", + "links": [ + { + "href": "https://10.240.28.38:8779/v1.0/1234/datastores/10000000-0000-0000-0000-000000000001", + "rel": "self" + }, + { + "href": "https://10.240.28.38:8779/datastores/10000000-0000-0000-0000-000000000001", + "rel": "bookmark" + } + ], + "name": "mysql", + %s +} +`, versionsJSON) + +var ( + ListDSResp = fmt.Sprintf(`{"datastores":[%s]}`, singleDSJSON) + GetDSResp = fmt.Sprintf(`{"datastore":%s}`, singleDSJSON) + ListVersionsResp = fmt.Sprintf(`{%s}`, versionsJSON) + GetVersionResp = fmt.Sprintf(`{"version":%s}`, version1JSON) +) + +var ExampleVersion1 = Version{ + ID: "b00000b0-00b0-0b00-00b0-000b000000bb", + Links: []gophercloud.Link{ + gophercloud.Link{Rel: "self", Href: "https://10.240.28.38:8779/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb"}, + gophercloud.Link{Rel: "bookmark", Href: "https://10.240.28.38:8779/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb"}, + }, + Name: "5.1", +} + +var exampleVersion2 = Version{ + ID: "c00000b0-00c0-0c00-00c0-000b000000cc", + Links: []gophercloud.Link{ + gophercloud.Link{Rel: "self", Href: "https://10.240.28.38:8779/v1.0/1234/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc"}, + gophercloud.Link{Rel: "bookmark", Href: "https://10.240.28.38:8779/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc"}, + }, + Name: "5.2", +} + +var ExampleVersions = []Version{ExampleVersion1, exampleVersion2} + +var ExampleDatastore = Datastore{ + DefaultVersion: "c00000b0-00c0-0c00-00c0-000b000000cc", + ID: "10000000-0000-0000-0000-000000000001", + Links: []gophercloud.Link{ + gophercloud.Link{Rel: "self", Href: "https://10.240.28.38:8779/v1.0/1234/datastores/10000000-0000-0000-0000-000000000001"}, + gophercloud.Link{Rel: "bookmark", Href: "https://10.240.28.38:8779/datastores/10000000-0000-0000-0000-000000000001"}, + }, + Name: "mysql", + Versions: ExampleVersions, +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests.go new file mode 100644 index 000000000..9e147ab2d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests.go @@ -0,0 +1,47 @@ +package datastores + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List will list all available datastore types that instances can use. +func List(client *gophercloud.ServiceClient) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return DatastorePage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, baseURL(client), pageFn) +} + +// Get will retrieve the details of a specified datastore type. +func Get(client *gophercloud.ServiceClient, datastoreID string) GetResult { + var res GetResult + + _, res.Err = client.Request("GET", resourceURL(client, datastoreID), gophercloud.RequestOpts{ + OkCodes: []int{200}, + JSONResponse: &res.Body, + }) + + return res +} + +// ListVersions will list all of the available versions for a specified +// datastore type. +func ListVersions(client *gophercloud.ServiceClient, datastoreID string) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return VersionPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, versionsURL(client, datastoreID), pageFn) +} + +// GetVersion will retrieve the details of a specified datastore version. +func GetVersion(client *gophercloud.ServiceClient, datastoreID, versionID string) GetVersionResult { + var res GetVersionResult + + _, res.Err = client.Request("GET", versionURL(client, datastoreID, versionID), gophercloud.RequestOpts{ + OkCodes: []int{200}, + JSONResponse: &res.Body, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests_test.go new file mode 100644 index 000000000..b4ce871c4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/requests_test.go @@ -0,0 +1,78 @@ +package datastores + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores", "GET", "", ListDSResp, 200) + + pages := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractDatastores(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, []Datastore{ExampleDatastore}, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}", "GET", "", GetDSResp, 200) + + ds, err := Get(fake.ServiceClient(), "{dsID}").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleDatastore, ds) +} + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}/versions", "GET", "", ListVersionsResp, 200) + + pages := 0 + + err := ListVersions(fake.ServiceClient(), "{dsID}").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractVersions(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, ExampleVersions, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetVersion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}/versions/{versionID}", "GET", "", GetVersionResp, 200) + + ds, err := GetVersion(fake.ServiceClient(), "{dsID}", "{versionID}").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleVersion1, ds) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/results.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/results.go new file mode 100644 index 000000000..a86a3cc60 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/results.go @@ -0,0 +1,123 @@ +package datastores + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Version represents a version API resource. Multiple versions belong to a Datastore. +type Version struct { + ID string + Links []gophercloud.Link + Name string +} + +// Datastore represents a Datastore API resource. +type Datastore struct { + DefaultVersion string `json:"default_version" mapstructure:"default_version"` + ID string + Links []gophercloud.Link + Name string + Versions []Version +} + +// DatastorePartial is a meta structure which is used in various API responses. +// It is a lightweight and truncated version of a full Datastore resource, +// offering details of the Version, Type and VersionID only. +type DatastorePartial struct { + Version string + Type string + VersionID string `json:"version_id" mapstructure:"version_id"` +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// GetVersionResult represents the result of getting a version. +type GetVersionResult struct { + gophercloud.Result +} + +// DatastorePage represents a page of datastore resources. +type DatastorePage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a Datastore collection is empty. +func (r DatastorePage) IsEmpty() (bool, error) { + is, err := ExtractDatastores(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractDatastores retrieves a slice of datastore structs from a paginated +// collection. +func ExtractDatastores(page pagination.Page) ([]Datastore, error) { + casted := page.(DatastorePage).Body + + var resp struct { + Datastores []Datastore `mapstructure:"datastores" json:"datastores"` + } + + err := mapstructure.Decode(casted, &resp) + return resp.Datastores, err +} + +// Extract retrieves a single Datastore struct from an operation result. +func (r GetResult) Extract() (*Datastore, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Datastore Datastore `mapstructure:"datastore"` + } + + err := mapstructure.Decode(r.Body, &response) + return &response.Datastore, err +} + +// DatastorePage represents a page of version resources. +type VersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a collection of version resources is empty. +func (r VersionPage) IsEmpty() (bool, error) { + is, err := ExtractVersions(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractVersions retrieves a slice of versions from a paginated collection. +func ExtractVersions(page pagination.Page) ([]Version, error) { + casted := page.(VersionPage).Body + + var resp struct { + Versions []Version `mapstructure:"versions" json:"versions"` + } + + err := mapstructure.Decode(casted, &resp) + return resp.Versions, err +} + +// Extract retrieves a single Version struct from an operation result. +func (r GetVersionResult) Extract() (*Version, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Version Version `mapstructure:"version"` + } + + err := mapstructure.Decode(r.Body, &response) + return &response.Version, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/urls.go new file mode 100644 index 000000000..c4d52480e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/datastores/urls.go @@ -0,0 +1,19 @@ +package datastores + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("datastores") +} + +func resourceURL(c *gophercloud.ServiceClient, dsID string) string { + return c.ServiceURL("datastores", dsID) +} + +func versionsURL(c *gophercloud.ServiceClient, dsID string) string { + return c.ServiceURL("datastores", dsID, "versions") +} + +func versionURL(c *gophercloud.ServiceClient, dsID, versionID string) string { + return c.ServiceURL("datastores", dsID, "versions", versionID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/doc.go new file mode 100644 index 000000000..4d281d562 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/doc.go @@ -0,0 +1,7 @@ +// Package flavors provides information and interaction with the flavor API +// resource in the OpenStack Database service. +// +// A flavor is an available hardware configuration for a database instance. +// Each flavor has a unique combination of disk space, memory capacity and +// priority for CPU time. +package flavors diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/fixtures.go new file mode 100644 index 000000000..f0016bcb2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/fixtures.go @@ -0,0 +1,50 @@ +package flavors + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +const flavor = ` +{ + "id": %d, + "links": [ + { + "href": "https://openstack.example.com/v1.0/1234/flavors/%d", + "rel": "self" + }, + { + "href": "https://openstack.example.com/flavors/%d", + "rel": "bookmark" + } + ], + "name": "%s", + "ram": %d +} +` + +var ( + flavorID = "{flavorID}" + _baseURL = "/flavors" + resURL = "/flavors/" + flavorID +) + +var ( + flavor1 = fmt.Sprintf(flavor, 1, 1, 1, "m1.tiny", 512) + flavor2 = fmt.Sprintf(flavor, 2, 2, 2, "m1.small", 1024) + flavor3 = fmt.Sprintf(flavor, 3, 3, 3, "m1.medium", 2048) + flavor4 = fmt.Sprintf(flavor, 4, 4, 4, "m1.large", 4096) + + listFlavorsResp = fmt.Sprintf(`{"flavors":[%s, %s, %s, %s]}`, flavor1, flavor2, flavor3, flavor4) + getFlavorResp = fmt.Sprintf(`{"flavor": %s}`, flavor1) +) + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, _baseURL, "GET", "", listFlavorsResp, 200) +} + +func HandleGet(t *testing.T) { + fixture.SetupHandler(t, resURL, "GET", "", getFlavorResp, 200) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests.go new file mode 100644 index 000000000..fa34446b8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests.go @@ -0,0 +1,29 @@ +package flavors + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List will list all available hardware flavors that an instance can use. The +// operation is identical to the one supported by the Nova API, but without the +// "disk" property. +func List(client *gophercloud.ServiceClient) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, listURL(client), createPage) +} + +// Get will retrieve information for a specified hardware flavor. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var gr GetResult + + _, gr.Err = client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &gr.Body, + OkCodes: []int{200}, + }) + + return gr +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests_test.go new file mode 100644 index 000000000..88b5871a1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/requests_test.go @@ -0,0 +1,91 @@ +package flavors + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListFlavors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + pages := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractFlavors(page) + if err != nil { + return false, err + } + + expected := []Flavor{ + Flavor{ + ID: "1", + Name: "m1.tiny", + RAM: 512, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/1", Rel: "bookmark"}, + }, + }, + Flavor{ + ID: "2", + Name: "m1.small", + RAM: 1024, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/2", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/2", Rel: "bookmark"}, + }, + }, + Flavor{ + ID: "3", + Name: "m1.medium", + RAM: 2048, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/3", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/3", Rel: "bookmark"}, + }, + }, + Flavor{ + ID: "4", + Name: "m1.large", + RAM: 4096, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/4", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/4", Rel: "bookmark"}, + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGet(t) + + actual, err := Get(fake.ServiceClient(), flavorID).Extract() + th.AssertNoErr(t, err) + + expected := &Flavor{ + ID: "1", + Name: "m1.tiny", + RAM: 512, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + }, + } + + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/results.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/results.go new file mode 100644 index 000000000..2cee010aa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/results.go @@ -0,0 +1,92 @@ +package flavors + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// GetResult temporarily holds the response from a Get call. +type GetResult struct { + gophercloud.Result +} + +// Extract provides access to the individual Flavor returned by the Get function. +func (gr GetResult) Extract() (*Flavor, error) { + if gr.Err != nil { + return nil, gr.Err + } + + var result struct { + Flavor Flavor `mapstructure:"flavor"` + } + + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + }) + + err = decoder.Decode(gr.Body) + return &result.Flavor, err +} + +// Flavor records represent (virtual) hardware configurations for server resources in a region. +type Flavor struct { + // The flavor's unique identifier. + ID string `mapstructure:"id"` + + // The RAM capacity for the flavor. + RAM int `mapstructure:"ram"` + + // The Name field provides a human-readable moniker for the flavor. + Name string `mapstructure:"name"` + + // Links to access the flavor. + Links []gophercloud.Link +} + +// FlavorPage contains a single page of the response from a List call. +type FlavorPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a page contains any results. +func (p FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(p) + if err != nil { + return true, err + } + return len(flavors) == 0, nil +} + +// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +func (p FlavorPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"flavors_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation. +func ExtractFlavors(page pagination.Page) ([]Flavor, error) { + casted := page.(FlavorPage).Body + var container struct { + Flavors []Flavor `mapstructure:"flavors"` + } + + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + Result: &container, + }) + + err = decoder.Decode(casted) + + return container.Flavors, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/urls.go new file mode 100644 index 000000000..80da11f9b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/flavors/urls.go @@ -0,0 +1,11 @@ +package flavors + +import "github.com/rackspace/gophercloud" + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/doc.go new file mode 100644 index 000000000..dc5c90f95 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/doc.go @@ -0,0 +1,7 @@ +// Package instances provides information and interaction with the instance API +// resource in the OpenStack Database service. +// +// A database instance is an isolated database environment with compute and +// storage resources in a single tenant environment on a shared physical host +// machine. +package instances diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/fixtures.go new file mode 100644 index 000000000..af7b185a6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/fixtures.go @@ -0,0 +1,169 @@ +package instances + +import ( + "fmt" + "testing" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +var ( + timestamp = "2015-11-12T14:22:42Z" + timeVal, _ = time.Parse(time.RFC3339, timestamp) +) + +var instance = ` +{ + "created": "` + timestamp + `", + "datastore": { + "type": "mysql", + "version": "5.6" + }, + "flavor": { + "id": "1", + "links": [ + { + "href": "https://my-openstack.com/v1.0/1234/flavors/1", + "rel": "self" + }, + { + "href": "https://my-openstack.com/v1.0/1234/flavors/1", + "rel": "bookmark" + } + ] + }, + "links": [ + { + "href": "https://my-openstack.com/v1.0/1234/instances/1", + "rel": "self" + } + ], + "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.my-openstack.com", + "id": "{instanceID}", + "name": "json_rack_instance", + "status": "BUILD", + "updated": "` + timestamp + `", + "volume": { + "size": 2 + } +} +` + +var createReq = ` +{ + "instance": { + "databases": [ + { + "character_set": "utf8", + "collate": "utf8_general_ci", + "name": "sampledb" + }, + { + "name": "nextround" + } + ], + "flavorRef": "1", + "name": "json_rack_instance", + "users": [ + { + "databases": [ + { + "name": "sampledb" + } + ], + "name": "demouser", + "password": "demopassword" + } + ], + "volume": { + "size": 2 + } + } +} +` + +var ( + instanceID = "{instanceID}" + rootURL = "/instances" + resURL = rootURL + "/" + instanceID + uRootURL = resURL + "/root" + aURL = resURL + "/action" +) + +var ( + restartReq = `{"restart": {}}` + resizeReq = `{"resize": {"flavorRef": "2"}}` + resizeVolReq = `{"resize": {"volume": {"size": 4}}}` +) + +var ( + createResp = fmt.Sprintf(`{"instance": %s}`, instance) + listInstancesResp = fmt.Sprintf(`{"instances":[%s]}`, instance) + getInstanceResp = createResp + enableUserResp = `{"user":{"name":"root","password":"secretsecret"}}` + isUserEnabledResp = `{"rootEnabled":true}` +) + +var expectedInstance = Instance{ + Created: timeVal, + Updated: timeVal, + Flavor: flavors.Flavor{ + ID: "1", + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://my-openstack.com/v1.0/1234/flavors/1", Rel: "self"}, + gophercloud.Link{Href: "https://my-openstack.com/v1.0/1234/flavors/1", Rel: "bookmark"}, + }, + }, + Hostname: "e09ad9a3f73309469cf1f43d11e79549caf9acf2.my-openstack.com", + ID: instanceID, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://my-openstack.com/v1.0/1234/instances/1", Rel: "self"}, + }, + Name: "json_rack_instance", + Status: "BUILD", + Volume: Volume{Size: 2}, + Datastore: datastores.DatastorePartial{ + Type: "mysql", + Version: "5.6", + }, +} + +func HandleCreate(t *testing.T) { + fixture.SetupHandler(t, rootURL, "POST", createReq, createResp, 200) +} + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, rootURL, "GET", "", listInstancesResp, 200) +} + +func HandleGet(t *testing.T) { + fixture.SetupHandler(t, resURL, "GET", "", getInstanceResp, 200) +} + +func HandleDelete(t *testing.T) { + fixture.SetupHandler(t, resURL, "DELETE", "", "", 202) +} + +func HandleEnableRoot(t *testing.T) { + fixture.SetupHandler(t, uRootURL, "POST", "", enableUserResp, 200) +} + +func HandleIsRootEnabled(t *testing.T) { + fixture.SetupHandler(t, uRootURL, "GET", "", isUserEnabledResp, 200) +} + +func HandleRestart(t *testing.T) { + fixture.SetupHandler(t, aURL, "POST", restartReq, "", 202) +} + +func HandleResize(t *testing.T) { + fixture.SetupHandler(t, aURL, "POST", resizeReq, "", 202) +} + +func HandleResizeVol(t *testing.T) { + fixture.SetupHandler(t, aURL, "POST", resizeVolReq, "", 202) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests.go new file mode 100644 index 000000000..f4a63b814 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests.go @@ -0,0 +1,238 @@ +package instances + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsBuilder is the top-level interface for create options. +type CreateOptsBuilder interface { + ToInstanceCreateMap() (map[string]interface{}, error) +} + +// DatastoreOpts represents the configuration for how an instance stores data. +type DatastoreOpts struct { + Version string + Type string +} + +func (opts DatastoreOpts) ToMap() (map[string]string, error) { + return map[string]string{ + "version": opts.Version, + "type": opts.Type, + }, nil +} + +// CreateOpts is the struct responsible for configuring a new database instance. +type CreateOpts struct { + // Either the integer UUID (in string form) of the flavor, or its URI + // reference as specified in the response from the List() call. Required. + FlavorRef string + + // Specifies the volume size in gigabytes (GB). The value must be between 1 + // and 300. Required. + Size int + + // Name of the instance to create. The length of the name is limited to + // 255 characters and any characters are permitted. Optional. + Name string + + // A slice of database information options. + Databases db.CreateOptsBuilder + + // A slice of user information options. + Users users.CreateOptsBuilder + + // Options to configure the type of datastore the instance will use. This is + // optional, and if excluded will default to MySQL. + Datastore *DatastoreOpts +} + +// ToInstanceCreateMap will render a JSON map. +func (opts CreateOpts) ToInstanceCreateMap() (map[string]interface{}, error) { + if opts.Size > 300 || opts.Size < 1 { + return nil, fmt.Errorf("Size (GB) must be between 1-300") + } + if opts.FlavorRef == "" { + return nil, fmt.Errorf("FlavorRef is a required field") + } + + instance := map[string]interface{}{ + "volume": map[string]int{"size": opts.Size}, + "flavorRef": opts.FlavorRef, + } + + if opts.Name != "" { + instance["name"] = opts.Name + } + if opts.Databases != nil { + dbs, err := opts.Databases.ToDBCreateMap() + if err != nil { + return nil, err + } + instance["databases"] = dbs["databases"] + } + if opts.Users != nil { + users, err := opts.Users.ToUserCreateMap() + if err != nil { + return nil, err + } + instance["users"] = users["users"] + } + + return map[string]interface{}{"instance": instance}, nil +} + +// Create asynchronously provisions a new database instance. It requires the +// user to specify a flavor and a volume size. The API service then provisions +// the instance with the requested flavor and sets up a volume of the specified +// size, which is the storage for the database instance. +// +// Although this call only allows the creation of 1 instance per request, you +// can create an instance with multiple databases and users. The default +// binding for a MySQL instance is port 3306. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToInstanceCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("POST", baseURL(client), gophercloud.RequestOpts{ + JSONBody: &reqBody, + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + + return res +} + +// List retrieves the status and information for all database instances. +func List(client *gophercloud.ServiceClient) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return InstancePage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, baseURL(client), createPageFn) +} + +// Get retrieves the status and information for a specified database instance. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + + _, res.Err = client.Request("GET", resourceURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + + return res +} + +// Delete permanently destroys the database instance. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + + _, res.Err = client.Request("DELETE", resourceURL(client, id), gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return res +} + +// EnableRootUser enables the login from any host for the root user and +// provides the user with a generated root password. +func EnableRootUser(client *gophercloud.ServiceClient, id string) UserRootResult { + var res UserRootResult + + _, res.Err = client.Request("POST", userRootURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + + return res +} + +// IsRootEnabled checks an instance to see if root access is enabled. It returns +// True if root user is enabled for the specified database instance or False +// otherwise. +func IsRootEnabled(client *gophercloud.ServiceClient, id string) (bool, error) { + var res gophercloud.Result + + _, err := client.Request("GET", userRootURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + + return res.Body.(map[string]interface{})["rootEnabled"] == true, err +} + +// Restart will restart only the MySQL Instance. Restarting MySQL will +// erase any dynamic configuration settings that you have made within MySQL. +// The MySQL service will be unavailable until the instance restarts. +func Restart(client *gophercloud.ServiceClient, id string) ActionResult { + var res ActionResult + + _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ + JSONBody: map[string]interface{}{"restart": struct{}{}}, + OkCodes: []int{202}, + }) + + return res +} + +// Resize changes the memory size of the instance, assuming a valid +// flavorRef is provided. It will also restart the MySQL service. +func Resize(client *gophercloud.ServiceClient, id, flavorRef string) ActionResult { + var res ActionResult + + type resize struct { + FlavorRef string `json:"flavorRef"` + } + + type req struct { + Resize resize `json:"resize"` + } + + reqBody := req{Resize: resize{FlavorRef: flavorRef}} + + _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ + JSONBody: reqBody, + OkCodes: []int{202}, + }) + + return res +} + +// ResizeVolume will resize the attached volume for an instance. It supports +// only increasing the volume size and does not support decreasing the size. +// The volume size is in gigabytes (GB) and must be an integer. +func ResizeVolume(client *gophercloud.ServiceClient, id string, size int) ActionResult { + var res ActionResult + + type volume struct { + Size int `json:"size"` + } + + type resize struct { + Volume volume `json:"volume"` + } + + type req struct { + Resize resize `json:"resize"` + } + + reqBody := req{Resize: resize{Volume: volume{Size: size}}} + + _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ + JSONBody: reqBody, + OkCodes: []int{202}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests_test.go new file mode 100644 index 000000000..3cc2b701f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/requests_test.go @@ -0,0 +1,133 @@ +package instances + +import ( + "testing" + + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreate(t) + + opts := CreateOpts{ + Name: "json_rack_instance", + FlavorRef: "1", + Databases: db.BatchCreateOpts{ + db.CreateOpts{CharSet: "utf8", Collate: "utf8_general_ci", Name: "sampledb"}, + db.CreateOpts{Name: "nextround"}, + }, + Users: users.BatchCreateOpts{ + users.CreateOpts{ + Name: "demouser", + Password: "demopassword", + Databases: db.BatchCreateOpts{ + db.CreateOpts{Name: "sampledb"}, + }, + }, + }, + Size: 2, + } + + instance, err := Create(fake.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &expectedInstance, instance) +} + +func TestInstanceList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + pages := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractInstances(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, []Instance{expectedInstance}, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetInstance(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGet(t) + + instance, err := Get(fake.ServiceClient(), instanceID).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &expectedInstance, instance) +} + +func TestDeleteInstance(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDelete(t) + + res := Delete(fake.ServiceClient(), instanceID) + th.AssertNoErr(t, res.Err) +} + +func TestEnableRootUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleEnableRoot(t) + + expected := &users.User{Name: "root", Password: "secretsecret"} + user, err := EnableRootUser(fake.ServiceClient(), instanceID).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, user) +} + +func TestIsRootEnabled(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleIsRootEnabled(t) + + isEnabled, err := IsRootEnabled(fake.ServiceClient(), instanceID) + + th.AssertNoErr(t, err) + th.AssertEquals(t, true, isEnabled) +} + +func TestRestart(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRestart(t) + + res := Restart(fake.ServiceClient(), instanceID) + th.AssertNoErr(t, res.Err) +} + +func TestResize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleResize(t) + + res := Resize(fake.ServiceClient(), instanceID, "2") + th.AssertNoErr(t, res.Err) +} + +func TestResizeVolume(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleResizeVol(t) + + res := ResizeVolume(fake.ServiceClient(), instanceID, 4) + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/results.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/results.go new file mode 100644 index 000000000..95aed166b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/results.go @@ -0,0 +1,213 @@ +package instances + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" +) + +// Volume represents information about an attached volume for a database instance. +type Volume struct { + // The size in GB of the volume + Size int + + Used float64 +} + +// Instance represents a remote MySQL instance. +type Instance struct { + // Indicates the datetime that the instance was created + Created time.Time `mapstructure:"-"` + + // Indicates the most recent datetime that the instance was updated. + Updated time.Time `mapstructure:"-"` + + // Indicates the hardware flavor the instance uses. + Flavor flavors.Flavor + + // A DNS-resolvable hostname associated with the database instance (rather + // than an IPv4 address). Since the hostname always resolves to the correct + // IP address of the database instance, this relieves the user from the task + // of maintaining the mapping. Note that although the IP address may likely + // change on resizing, migrating, and so forth, the hostname always resolves + // to the correct database instance. + Hostname string + + // Indicates the unique identifier for the instance resource. + ID string + + // Exposes various links that reference the instance resource. + Links []gophercloud.Link + + // The human-readable name of the instance. + Name string + + // The build status of the instance. + Status string + + // Information about the attached volume of the instance. + Volume Volume + + // Indicates how the instance stores data. + Datastore datastores.DatastorePartial +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract will extract an Instance from various result structs. +func (r commonResult) Extract() (*Instance, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Instance Instance `mapstructure:"instance"` + } + + err := mapstructure.Decode(r.Body, &response) + val := r.Body.(map[string]interface{})["instance"].(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Instance, err + } + response.Instance.Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Instance, err + } + response.Instance.Updated = updatedTime + } + + return &response.Instance, err +} + +// InstancePage represents a single page of a paginated instance collection. +type InstancePage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page InstancePage) IsEmpty() (bool, error) { + instances, err := ExtractInstances(page) + if err != nil { + return true, err + } + return len(instances) == 0, nil +} + +// NextPageURL will retrieve the next page URL. +func (page InstancePage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"instances_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractInstances will convert a generic pagination struct into a more +// relevant slice of Instance structs. +func ExtractInstances(page pagination.Page) ([]Instance, error) { + casted := page.(InstancePage).Body + + var resp struct { + Instances []Instance `mapstructure:"instances"` + } + + if err := mapstructure.Decode(casted, &resp); err != nil { + return nil, err + } + + var vals []interface{} + switch casted.(type) { + case map[string]interface{}: + vals = casted.(map[string]interface{})["instances"].([]interface{}) + case map[string][]interface{}: + vals = casted.(map[string][]interface{})["instances"] + default: + return resp.Instances, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i, v := range vals { + val := v.(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Instances, err + } + resp.Instances[i].Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Instances, err + } + resp.Instances[i].Updated = updatedTime + } + } + + return resp.Instances, nil +} + +// UserRootResult represents the result of an operation to enable the root user. +type UserRootResult struct { + gophercloud.Result +} + +// Extract will extract root user information from a UserRootResult. +func (r UserRootResult) Extract() (*users.User, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + User users.User `mapstructure:"user"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.User, err +} + +// ActionResult represents the result of action requests, such as: restarting +// an instance service, resizing its memory allocation, and resizing its +// attached volume size. +type ActionResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/urls.go new file mode 100644 index 000000000..28c0bec3d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/instances/urls.go @@ -0,0 +1,19 @@ +package instances + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("instances") +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id) +} + +func userRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "root") +} + +func actionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "action") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/doc.go new file mode 100644 index 000000000..cf07832f3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/doc.go @@ -0,0 +1,3 @@ +// Package users provides information and interaction with the user API +// resource in the OpenStack Database service. +package users diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/fixtures.go new file mode 100644 index 000000000..516b335e0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/fixtures.go @@ -0,0 +1,37 @@ +package users + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +const user1 = ` +{"databases": [{"name": "databaseA"}],"name": "dbuser3"%s} +` + +const user2 = ` +{"databases": [{"name": "databaseB"},{"name": "databaseC"}],"name": "dbuser4"%s} +` + +var ( + instanceID = "{instanceID}" + _rootURL = "/instances/" + instanceID + "/users" + pUser1 = fmt.Sprintf(user1, `,"password":"secretsecret"`) + pUser2 = fmt.Sprintf(user2, `,"password":"secretsecret"`) + createReq = fmt.Sprintf(`{"users":[%s, %s]}`, pUser1, pUser2) + listResp = fmt.Sprintf(`{"users":[%s, %s]}`, fmt.Sprintf(user1, ""), fmt.Sprintf(user2, "")) +) + +func HandleCreate(t *testing.T) { + fixture.SetupHandler(t, _rootURL, "POST", createReq, "", 202) +} + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, _rootURL, "GET", "", listResp, 200) +} + +func HandleDelete(t *testing.T) { + fixture.SetupHandler(t, _rootURL+"/{userName}", "DELETE", "", "", 202) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests.go new file mode 100644 index 000000000..7533fc494 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests.go @@ -0,0 +1,132 @@ +package users + +import ( + "errors" + + "github.com/rackspace/gophercloud" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsBuilder is the top-level interface for creating JSON maps. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the struct responsible for configuring a new user; often in the +// context of an instance. +type CreateOpts struct { + // [REQUIRED] Specifies a name for the user. Valid names can be composed + // of the following characters: letters (either case); numbers; these + // characters '@', '?', '#', ' ' but NEVER beginning a name string; '_' is + // permitted anywhere. Prohibited characters that are forbidden include: + // single quotes, double quotes, back quotes, semicolons, commas, backslashes, + // and forward slashes. Spaces at the front or end of a user name are also + // not permitted. + Name string + + // [REQUIRED] Specifies a password for the user. + Password string + + // [OPTIONAL] An array of databases that this user will connect to. The + // "name" field is the only requirement for each option. + Databases db.BatchCreateOpts + + // [OPTIONAL] Specifies the host from which a user is allowed to connect to + // the database. Possible values are a string containing an IPv4 address or + // "%" to allow connecting from any host. Optional; the default is "%". + Host string +} + +// ToMap is a convenience function for creating sub-maps for individual users. +func (opts CreateOpts) ToMap() (map[string]interface{}, error) { + + if opts.Name == "root" { + return nil, errors.New("root is a reserved user name and cannot be used") + } + if opts.Name == "" { + return nil, errors.New("Name is a required field") + } + if opts.Password == "" { + return nil, errors.New("Password is a required field") + } + + user := map[string]interface{}{ + "name": opts.Name, + "password": opts.Password, + } + + if opts.Host != "" { + user["host"] = opts.Host + } + + dbs := make([]map[string]string, len(opts.Databases)) + for i, db := range opts.Databases { + dbs[i] = map[string]string{"name": db.Name} + } + + if len(dbs) > 0 { + user["databases"] = dbs + } + + return user, nil +} + +// BatchCreateOpts allows multiple users to be created at once. +type BatchCreateOpts []CreateOpts + +// ToUserCreateMap will generate a JSON map. +func (opts BatchCreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + users := make([]map[string]interface{}, len(opts)) + for i, opt := range opts { + user, err := opt.ToMap() + if err != nil { + return nil, err + } + users[i] = user + } + return map[string]interface{}{"users": users}, nil +} + +// Create asynchronously provisions a new user for the specified database +// instance based on the configuration defined in CreateOpts. If databases are +// assigned for a particular user, the user will be granted all privileges +// for those specified databases. "root" is a reserved name and cannot be used. +func Create(client *gophercloud.ServiceClient, instanceID string, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToUserCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("POST", baseURL(client, instanceID), gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{202}, + }) + + return res +} + +// List will list all the users associated with a specified database instance, +// along with their associated databases. This operation will not return any +// system users or administrators for a database. +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, baseURL(client, instanceID), createPageFn) +} + +// Delete will permanently delete a user from a specified database instance. +func Delete(client *gophercloud.ServiceClient, instanceID, userName string) DeleteResult { + var res DeleteResult + + _, res.Err = client.Request("DELETE", userURL(client, instanceID, userName), gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests_test.go new file mode 100644 index 000000000..5711f6334 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/requests_test.go @@ -0,0 +1,84 @@ +package users + +import ( + "testing" + + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreate(t) + + opts := BatchCreateOpts{ + CreateOpts{ + Databases: db.BatchCreateOpts{ + db.CreateOpts{Name: "databaseA"}, + }, + Name: "dbuser3", + Password: "secretsecret", + }, + CreateOpts{ + Databases: db.BatchCreateOpts{ + db.CreateOpts{Name: "databaseB"}, + db.CreateOpts{Name: "databaseC"}, + }, + Name: "dbuser4", + Password: "secretsecret", + }, + } + + res := Create(fake.ServiceClient(), instanceID, opts) + th.AssertNoErr(t, res.Err) +} + +func TestUserList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + expectedUsers := []User{ + User{ + Databases: []db.Database{ + db.Database{Name: "databaseA"}, + }, + Name: "dbuser3", + }, + User{ + Databases: []db.Database{ + db.Database{Name: "databaseB"}, + db.Database{Name: "databaseC"}, + }, + Name: "dbuser4", + }, + } + + pages := 0 + err := List(fake.ServiceClient(), instanceID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractUsers(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, expectedUsers, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDelete(t) + + res := Delete(fake.ServiceClient(), instanceID, "{userName}") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/results.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/results.go new file mode 100644 index 000000000..217ddd8da --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/results.go @@ -0,0 +1,73 @@ +package users + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" +) + +// User represents a database user +type User struct { + // The user name + Name string + + // The user password + Password string + + // The databases associated with this user + Databases []db.Database +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UserPage represents a single page of a paginated user collection. +type UserPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(page) + if err != nil { + return true, err + } + return len(users) == 0, nil +} + +// NextPageURL will retrieve the next page URL. +func (page UserPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"users_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractUsers will convert a generic pagination struct into a more +// relevant slice of User structs. +func ExtractUsers(page pagination.Page) ([]User, error) { + casted := page.(UserPage).Body + + var response struct { + Users []User `mapstructure:"users"` + } + + err := mapstructure.Decode(casted, &response) + + return response.Users, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/urls.go new file mode 100644 index 000000000..2a3cacdaf --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/db/v1/users/urls.go @@ -0,0 +1,11 @@ +package users + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("instances", instanceID, "users") +} + +func userURL(c *gophercloud.ServiceClient, instanceID, userName string) string { + return c.ServiceURL("instances", instanceID, "users", userName) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go b/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go new file mode 100644 index 000000000..29d02c43f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go @@ -0,0 +1,91 @@ +package openstack + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" +) + +// V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired +// during the v2 identity service. The specified EndpointOpts are used to identify a unique, +// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided +// criteria and when none do. The minimum that can be specified is a Type, but you will also often +// need to specify a Name and/or a Region depending on what's available on your OpenStack +// deployment. +func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + var endpoints = make([]tokens2.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region == "" || endpoint.Region == opts.Region { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + return "", fmt.Errorf("Discovered %d matching endpoints: %#v", len(endpoints), endpoints) + } + + // Extract the appropriate URL from the matching Endpoint. + for _, endpoint := range endpoints { + switch opts.Availability { + case gophercloud.AvailabilityPublic: + return gophercloud.NormalizeURL(endpoint.PublicURL), nil + case gophercloud.AvailabilityInternal: + return gophercloud.NormalizeURL(endpoint.InternalURL), nil + case gophercloud.AvailabilityAdmin: + return gophercloud.NormalizeURL(endpoint.AdminURL), nil + default: + return "", fmt.Errorf("Unexpected availability in endpoint query: %s", opts.Availability) + } + } + + // Report an error if there were no matching endpoints. + return "", gophercloud.ErrEndpointNotFound +} + +// V3EndpointURL discovers the endpoint URL for a specific service from a Catalog acquired +// during the v3 identity service. The specified EndpointOpts are used to identify a unique, +// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided +// criteria and when none do. The minimum that can be specified is a Type, but you will also often +// need to specify a Name and/or a Region depending on what's available on your OpenStack +// deployment. +func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + var endpoints = make([]tokens3.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + return "", fmt.Errorf("Unexpected availability in endpoint query: %s", opts.Availability) + } + if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && + (opts.Region == "" || endpoint.Region == opts.Region) { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + return "", fmt.Errorf("Discovered %d matching endpoints: %#v", len(endpoints), endpoints) + } + + // Extract the URL from the matching Endpoint. + for _, endpoint := range endpoints { + return gophercloud.NormalizeURL(endpoint.URL), nil + } + + // Report an error if there were no matching endpoints. + return "", gophercloud.ErrEndpointNotFound +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go b/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go new file mode 100644 index 000000000..8e65918ab --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go @@ -0,0 +1,228 @@ +package openstack + +import ( + "strings" + "testing" + + "github.com/rackspace/gophercloud" + tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" + th "github.com/rackspace/gophercloud/testhelper" +) + +// Service catalog fixtures take too much vertical space! +var catalog2 = tokens2.ServiceCatalog{ + Entries: []tokens2.CatalogEntry{ + tokens2.CatalogEntry{ + Type: "same", + Name: "same", + Endpoints: []tokens2.Endpoint{ + tokens2.Endpoint{ + Region: "same", + PublicURL: "https://public.correct.com/", + InternalURL: "https://internal.correct.com/", + AdminURL: "https://admin.correct.com/", + }, + tokens2.Endpoint{ + Region: "different", + PublicURL: "https://badregion.com/", + }, + }, + }, + tokens2.CatalogEntry{ + Type: "same", + Name: "different", + Endpoints: []tokens2.Endpoint{ + tokens2.Endpoint{ + Region: "same", + PublicURL: "https://badname.com/", + }, + tokens2.Endpoint{ + Region: "different", + PublicURL: "https://badname.com/+badregion", + }, + }, + }, + tokens2.CatalogEntry{ + Type: "different", + Name: "different", + Endpoints: []tokens2.Endpoint{ + tokens2.Endpoint{ + Region: "same", + PublicURL: "https://badtype.com/+badname", + }, + tokens2.Endpoint{ + Region: "different", + PublicURL: "https://badtype.com/+badregion+badname", + }, + }, + }, + }, +} + +func TestV2EndpointExact(t *testing.T) { + expectedURLs := map[gophercloud.Availability]string{ + gophercloud.AvailabilityPublic: "https://public.correct.com/", + gophercloud.AvailabilityAdmin: "https://admin.correct.com/", + gophercloud.AvailabilityInternal: "https://internal.correct.com/", + } + + for availability, expected := range expectedURLs { + actual, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: availability, + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) + } +} + +func TestV2EndpointNone(t *testing.T) { + _, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "nope", + Availability: gophercloud.AvailabilityPublic, + }) + th.CheckEquals(t, gophercloud.ErrEndpointNotFound, err) +} + +func TestV2EndpointMultiple(t *testing.T) { + _, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "same", + Region: "same", + Availability: gophercloud.AvailabilityPublic, + }) + if !strings.HasPrefix(err.Error(), "Discovered 2 matching endpoints:") { + t.Errorf("Received unexpected error: %v", err) + } +} + +func TestV2EndpointBadAvailability(t *testing.T) { + _, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: "wat", + }) + th.CheckEquals(t, "Unexpected availability in endpoint query: wat", err.Error()) +} + +var catalog3 = tokens3.ServiceCatalog{ + Entries: []tokens3.CatalogEntry{ + tokens3.CatalogEntry{ + Type: "same", + Name: "same", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "1", + Region: "same", + Interface: "public", + URL: "https://public.correct.com/", + }, + tokens3.Endpoint{ + ID: "2", + Region: "same", + Interface: "admin", + URL: "https://admin.correct.com/", + }, + tokens3.Endpoint{ + ID: "3", + Region: "same", + Interface: "internal", + URL: "https://internal.correct.com/", + }, + tokens3.Endpoint{ + ID: "4", + Region: "different", + Interface: "public", + URL: "https://badregion.com/", + }, + }, + }, + tokens3.CatalogEntry{ + Type: "same", + Name: "different", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "5", + Region: "same", + Interface: "public", + URL: "https://badname.com/", + }, + tokens3.Endpoint{ + ID: "6", + Region: "different", + Interface: "public", + URL: "https://badname.com/+badregion", + }, + }, + }, + tokens3.CatalogEntry{ + Type: "different", + Name: "different", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "7", + Region: "same", + Interface: "public", + URL: "https://badtype.com/+badname", + }, + tokens3.Endpoint{ + ID: "8", + Region: "different", + Interface: "public", + URL: "https://badtype.com/+badregion+badname", + }, + }, + }, + }, +} + +func TestV3EndpointExact(t *testing.T) { + expectedURLs := map[gophercloud.Availability]string{ + gophercloud.AvailabilityPublic: "https://public.correct.com/", + gophercloud.AvailabilityAdmin: "https://admin.correct.com/", + gophercloud.AvailabilityInternal: "https://internal.correct.com/", + } + + for availability, expected := range expectedURLs { + actual, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: availability, + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) + } +} + +func TestV3EndpointNone(t *testing.T) { + _, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "nope", + Availability: gophercloud.AvailabilityPublic, + }) + th.CheckEquals(t, gophercloud.ErrEndpointNotFound, err) +} + +func TestV3EndpointMultiple(t *testing.T) { + _, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Region: "same", + Availability: gophercloud.AvailabilityPublic, + }) + if !strings.HasPrefix(err.Error(), "Discovered 2 matching endpoints:") { + t.Errorf("Received unexpected error: %v", err) + } +} + +func TestV3EndpointBadAvailability(t *testing.T) { + _, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: "wat", + }) + th.CheckEquals(t, "Unexpected availability in endpoint query: wat", err.Error()) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/docs.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/docs.go new file mode 100644 index 000000000..895417871 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/docs.go @@ -0,0 +1,16 @@ +// Package roles provides functionality to interact with and control roles on +// the API. +// +// A role represents a personality that a user can assume when performing a +// specific set of operations. If a role includes a set of rights and +// privileges, a user assuming that role inherits those rights and privileges. +// +// When a token is generated, the list of roles that user can assume is returned +// back to them. Services that are being called by that user determine how they +// interpret the set of roles a user has and to which operations or resources +// each role grants access. +// +// It is up to individual services such as Compute or Image to assign meaning +// to these roles. As far as the Identity service is concerned, a role is an +// arbitrary name assigned by the user. +package roles diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/fixtures.go new file mode 100644 index 000000000..8256f0fe8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/fixtures.go @@ -0,0 +1,48 @@ +package roles + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func MockListRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/OS-KSADM/roles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "roles": [ + { + "id": "123", + "name": "compute:admin", + "description": "Nova Administrator" + } + ] +} + `) + }) +} + +func MockAddUserRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusCreated) + }) +} + +func MockDeleteUserRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go new file mode 100644 index 000000000..9a333140b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go @@ -0,0 +1,33 @@ +package roles + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List is the operation responsible for listing all available global roles +// that a user can adopt. +func List(client *gophercloud.ServiceClient) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, rootURL(client), createPage) +} + +// AddUserRole is the operation responsible for assigning a particular role to +// a user. This is confined to the scope of the user's tenant - so the tenant +// ID is a required argument. +func AddUserRole(client *gophercloud.ServiceClient, tenantID, userID, roleID string) UserRoleResult { + var result UserRoleResult + _, result.Err = client.Put(userRoleURL(client, tenantID, userID, roleID), nil, nil, nil) + return result +} + +// DeleteUserRole is the operation responsible for deleting a particular role +// from a user. This is confined to the scope of the user's tenant - so the +// tenant ID is a required argument. +func DeleteUserRole(client *gophercloud.ServiceClient, tenantID, userID, roleID string) UserRoleResult { + var result UserRoleResult + _, result.Err = client.Delete(userRoleURL(client, tenantID, userID, roleID), nil) + return result +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests_test.go new file mode 100644 index 000000000..7bfeea44a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests_test.go @@ -0,0 +1,64 @@ +package roles + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListRoleResponse(t) + + count := 0 + + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractRoles(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []Role{ + Role{ + ID: "123", + Name: "compute:admin", + Description: "Nova Administrator", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestAddUserRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockAddUserRoleResponse(t) + + err := AddUserRole(client.ServiceClient(), "{tenant_id}", "{user_id}", "{role_id}").ExtractErr() + + th.AssertNoErr(t, err) +} + +func TestDeleteUserRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteUserRoleResponse(t) + + err := DeleteUserRole(client.ServiceClient(), "{tenant_id}", "{user_id}", "{role_id}").ExtractErr() + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/results.go new file mode 100644 index 000000000..ebb3aa530 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/results.go @@ -0,0 +1,53 @@ +package roles + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Role represents an API role resource. +type Role struct { + // The unique ID for the role. + ID string + + // The human-readable name of the role. + Name string + + // The description of the role. + Description string + + // The associated service for this role. + ServiceID string +} + +// RolePage is a single page of a user Role collection. +type RolePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (page RolePage) IsEmpty() (bool, error) { + users, err := ExtractRoles(page) + if err != nil { + return false, err + } + return len(users) == 0, nil +} + +// ExtractRoles returns a slice of roles contained in a single page of results. +func ExtractRoles(page pagination.Page) ([]Role, error) { + casted := page.(RolePage).Body + var response struct { + Roles []Role `mapstructure:"roles"` + } + + err := mapstructure.Decode(casted, &response) + return response.Roles, err +} + +// UserRoleResult represents the result of either an AddUserRole or +// a DeleteUserRole operation. +type UserRoleResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/urls.go new file mode 100644 index 000000000..61b31551d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/urls.go @@ -0,0 +1,21 @@ +package roles + +import "github.com/rackspace/gophercloud" + +const ( + ExtPath = "OS-KSADM" + RolePath = "roles" + UserPath = "users" +) + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(ExtPath, RolePath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(ExtPath, RolePath) +} + +func userRoleURL(c *gophercloud.ServiceClient, tenantID, userID, roleID string) string { + return c.ServiceURL("tenants", tenantID, UserPath, userID, RolePath, ExtPath, roleID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go new file mode 100644 index 000000000..fd6e80ea6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go @@ -0,0 +1,52 @@ +package extensions + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + common "github.com/rackspace/gophercloud/openstack/common/extensions" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtensionPage is a single page of Extension results. +type ExtensionPage struct { + common.ExtensionPage +} + +// IsEmpty returns true if the current page contains at least one Extension. +func (page ExtensionPage) IsEmpty() (bool, error) { + is, err := ExtractExtensions(page) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the +// elements into a slice of Extension structs. +func ExtractExtensions(page pagination.Page) ([]common.Extension, error) { + // Identity v2 adds an intermediate "values" object. + + var resp struct { + Extensions struct { + Values []common.Extension `mapstructure:"values"` + } `mapstructure:"extensions"` + } + + err := mapstructure.Decode(page.(ExtensionPage).Body, &resp) + return resp.Extensions.Values, err +} + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) common.GetResult { + return common.Get(c, alias) +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return common.List(c).WithPageCreator(func(r pagination.PageResult) pagination.Page { + return ExtensionPage{ + ExtensionPage: common.ExtensionPage{SinglePageBase: pagination.SinglePageBase(r)}, + } + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go new file mode 100644 index 000000000..504118a82 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go @@ -0,0 +1,38 @@ +package extensions + +import ( + "testing" + + common "github.com/rackspace/gophercloud/openstack/common/extensions" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListExtensionsSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractExtensions(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, common.ExpectedExtensions, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + common.HandleGetExtensionSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, common.SingleExtension, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go new file mode 100644 index 000000000..791e4e391 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go @@ -0,0 +1,3 @@ +// Package extensions provides information and interaction with the +// different extensions available for the OpenStack Identity service. +package extensions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go new file mode 100644 index 000000000..96cb7d24a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go @@ -0,0 +1,60 @@ +// +build fixtures + +package extensions + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput provides a single Extension result. It differs from the delegated implementation +// by the introduction of an intermediate "values" member. +const ListOutput = ` +{ + "extensions": { + "values": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] + } +} +` + +// HandleListExtensionsSuccessfully creates an HTTP handler that returns ListOutput for a List +// call. +func HandleListExtensionsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ` +{ + "extensions": { + "values": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] + } +} + `) + }) + +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go new file mode 100644 index 000000000..0c2d49d56 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go @@ -0,0 +1,7 @@ +// Package tenants provides information and interaction with the +// tenants API resource for the OpenStack Identity service. +// +// See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +// and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants +// for more information. +package tenants diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go new file mode 100644 index 000000000..7f044ac3b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go @@ -0,0 +1,65 @@ +// +build fixtures + +package tenants + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Tenant results. +const ListOutput = ` +{ + "tenants": [ + { + "id": "1234", + "name": "Red Team", + "description": "The team that is red", + "enabled": true + }, + { + "id": "9876", + "name": "Blue Team", + "description": "The team that is blue", + "enabled": false + } + ] +} +` + +// RedTeam is a Tenant fixture. +var RedTeam = Tenant{ + ID: "1234", + Name: "Red Team", + Description: "The team that is red", + Enabled: true, +} + +// BlueTeam is a Tenant fixture. +var BlueTeam = Tenant{ + ID: "9876", + Name: "Blue Team", + Description: "The team that is blue", + Enabled: false, +} + +// ExpectedTenantSlice is the slice of tenants expected to be returned from ListOutput. +var ExpectedTenantSlice = []Tenant{RedTeam, BlueTeam} + +// HandleListTenantsSuccessfully creates an HTTP handler at `/tenants` on the test handler mux that +// responds with a list of two tenants. +func HandleListTenantsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/tenants", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go new file mode 100644 index 000000000..5a359f5c9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go @@ -0,0 +1,33 @@ +package tenants + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts filters the Tenants that are returned by the List call. +type ListOpts struct { + // Marker is the ID of the last Tenant on the previous page. + Marker string `q:"marker"` + + // Limit specifies the page size. + Limit int `q:"limit"` +} + +// List enumerates the Tenants to which the current token has access. +func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return TenantPage{pagination.LinkedPageBase{PageResult: r}} + } + + url := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + url += q.String() + } + + return pagination.NewPager(client, url, createPage) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go new file mode 100644 index 000000000..e8f172dd1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go @@ -0,0 +1,29 @@ +package tenants + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListTenants(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListTenantsSuccessfully(t) + + count := 0 + err := List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := ExtractTenants(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedTenantSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go new file mode 100644 index 000000000..c1220c384 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go @@ -0,0 +1,62 @@ +package tenants + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Tenant is a grouping of users in the identity service. +type Tenant struct { + // ID is a unique identifier for this tenant. + ID string `mapstructure:"id"` + + // Name is a friendlier user-facing name for this tenant. + Name string `mapstructure:"name"` + + // Description is a human-readable explanation of this Tenant's purpose. + Description string `mapstructure:"description"` + + // Enabled indicates whether or not a tenant is active. + Enabled bool `mapstructure:"enabled"` +} + +// TenantPage is a single page of Tenant results. +type TenantPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (page TenantPage) IsEmpty() (bool, error) { + tenants, err := ExtractTenants(page) + if err != nil { + return false, err + } + return len(tenants) == 0, nil +} + +// NextPageURL extracts the "next" link from the tenants_links section of the result. +func (page TenantPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"tenants_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractTenants returns a slice of Tenants contained in a single page of results. +func ExtractTenants(page pagination.Page) ([]Tenant, error) { + casted := page.(TenantPage).Body + var response struct { + Tenants []Tenant `mapstructure:"tenants"` + } + + err := mapstructure.Decode(casted, &response) + return response.Tenants, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go new file mode 100644 index 000000000..1dd6ce023 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go @@ -0,0 +1,7 @@ +package tenants + +import "github.com/rackspace/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go new file mode 100644 index 000000000..31cacc5e1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go @@ -0,0 +1,5 @@ +// Package tokens provides information and interaction with the token API +// resource for the OpenStack Identity service. +// For more information, see: +// http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +package tokens diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go new file mode 100644 index 000000000..3dfdc08db --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go @@ -0,0 +1,30 @@ +package tokens + +import ( + "errors" + "fmt" +) + +var ( + // ErrUserIDProvided is returned if you attempt to authenticate with a UserID. + ErrUserIDProvided = unacceptedAttributeErr("UserID") + + // ErrAPIKeyProvided is returned if you attempt to authenticate with an APIKey. + ErrAPIKeyProvided = unacceptedAttributeErr("APIKey") + + // ErrDomainIDProvided is returned if you attempt to authenticate with a DomainID. + ErrDomainIDProvided = unacceptedAttributeErr("DomainID") + + // ErrDomainNameProvided is returned if you attempt to authenticate with a DomainName. + ErrDomainNameProvided = unacceptedAttributeErr("DomainName") + + // ErrUsernameRequired is returned if you attempt to authenticate without a Username. + ErrUsernameRequired = errors.New("You must supply a Username in your AuthOptions.") + + // ErrPasswordRequired is returned if you don't provide a password. + ErrPasswordRequired = errors.New("Please supply a Password in your AuthOptions.") +) + +func unacceptedAttributeErr(attribute string) error { + return fmt.Errorf("The base Identity V2 API does not accept authentication by %s", attribute) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go new file mode 100644 index 000000000..6245259e1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go @@ -0,0 +1,195 @@ +// +build fixtures + +package tokens + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" + th "github.com/rackspace/gophercloud/testhelper" + thclient "github.com/rackspace/gophercloud/testhelper/client" +) + +// ExpectedToken is the token that should be parsed from TokenCreationResponse. +var ExpectedToken = &Token{ + ID: "aaaabbbbccccdddd", + ExpiresAt: time.Date(2014, time.January, 31, 15, 30, 58, 0, time.UTC), + Tenant: tenants.Tenant{ + ID: "fc394f2ab2df4114bde39905f800dc57", + Name: "test", + Description: "There are many tenants. This one is yours.", + Enabled: true, + }, +} + +// ExpectedServiceCatalog is the service catalog that should be parsed from TokenCreationResponse. +var ExpectedServiceCatalog = &ServiceCatalog{ + Entries: []CatalogEntry{ + CatalogEntry{ + Name: "inscrutablewalrus", + Type: "something", + Endpoints: []Endpoint{ + Endpoint{ + PublicURL: "http://something0:1234/v2/", + Region: "region0", + }, + Endpoint{ + PublicURL: "http://something1:1234/v2/", + Region: "region1", + }, + }, + }, + CatalogEntry{ + Name: "arbitrarypenguin", + Type: "else", + Endpoints: []Endpoint{ + Endpoint{ + PublicURL: "http://else0:4321/v3/", + Region: "region0", + }, + }, + }, + }, +} + +// ExpectedUser is the token that should be parsed from TokenGetResponse. +var ExpectedUser = &User{ + ID: "a530fefc3d594c4ba2693a4ecd6be74e", + Name: "apiserver", + Roles: []Role{{"member"}, {"service"}}, + UserName: "apiserver", +} + +// TokenCreationResponse is a JSON response that contains ExpectedToken and ExpectedServiceCatalog. +const TokenCreationResponse = ` +{ + "access": { + "token": { + "issued_at": "2014-01-30T15:30:58.000000Z", + "expires": "2014-01-31T15:30:58Z", + "id": "aaaabbbbccccdddd", + "tenant": { + "description": "There are many tenants. This one is yours.", + "enabled": true, + "id": "fc394f2ab2df4114bde39905f800dc57", + "name": "test" + } + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "publicURL": "http://something0:1234/v2/", + "region": "region0" + }, + { + "publicURL": "http://something1:1234/v2/", + "region": "region1" + } + ], + "type": "something", + "name": "inscrutablewalrus" + }, + { + "endpoints": [ + { + "publicURL": "http://else0:4321/v3/", + "region": "region0" + } + ], + "type": "else", + "name": "arbitrarypenguin" + } + ] + } +} +` + +// TokenGetResponse is a JSON response that contains ExpectedToken and ExpectedUser. +const TokenGetResponse = ` +{ + "access": { + "token": { + "issued_at": "2014-01-30T15:30:58.000000Z", + "expires": "2014-01-31T15:30:58Z", + "id": "aaaabbbbccccdddd", + "tenant": { + "description": "There are many tenants. This one is yours.", + "enabled": true, + "id": "fc394f2ab2df4114bde39905f800dc57", + "name": "test" + } + }, + "serviceCatalog": [], + "user": { + "id": "a530fefc3d594c4ba2693a4ecd6be74e", + "name": "apiserver", + "roles": [ + { + "name": "member" + }, + { + "name": "service" + } + ], + "roles_links": [], + "username": "apiserver" + } + } +}` + +// HandleTokenPost expects a POST against a /tokens handler, ensures that the request body has been +// constructed properly given certain auth options, and returns the result. +func HandleTokenPost(t *testing.T, requestJSON string) { + th.Mux.HandleFunc("/tokens", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + if requestJSON != "" { + th.TestJSONRequest(t, r, requestJSON) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, TokenCreationResponse) + }) +} + +// HandleTokenGet expects a Get against a /tokens handler, ensures that the request body has been +// constructed properly given certain auth options, and returns the result. +func HandleTokenGet(t *testing.T, token string) { + th.Mux.HandleFunc("/tokens/"+token, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", thclient.TokenID) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, TokenGetResponse) + }) +} + +// IsSuccessful ensures that a CreateResult was successful and contains the correct token and +// service catalog. +func IsSuccessful(t *testing.T, result CreateResult) { + token, err := result.ExtractToken() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedToken, token) + + serviceCatalog, err := result.ExtractServiceCatalog() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedServiceCatalog, serviceCatalog) +} + +// GetIsSuccessful ensures that a GetResult was successful and contains the correct token and +// User Info. +func GetIsSuccessful(t *testing.T, result GetResult) { + token, err := result.ExtractToken() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedToken, token) + + user, err := result.ExtractUser() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedUser, user) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go new file mode 100644 index 000000000..1f514386d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go @@ -0,0 +1,99 @@ +package tokens + +import ( + "fmt" + + "github.com/rackspace/gophercloud" +) + +// AuthOptionsBuilder describes any argument that may be passed to the Create call. +type AuthOptionsBuilder interface { + + // ToTokenCreateMap assembles the Create request body, returning an error if parameters are + // missing or inconsistent. + ToTokenCreateMap() (map[string]interface{}, error) +} + +// AuthOptions wraps a gophercloud AuthOptions in order to adhere to the AuthOptionsBuilder +// interface. +type AuthOptions struct { + gophercloud.AuthOptions +} + +// WrapOptions embeds a root AuthOptions struct in a package-specific one. +func WrapOptions(original gophercloud.AuthOptions) AuthOptions { + return AuthOptions{AuthOptions: original} +} + +// ToTokenCreateMap converts AuthOptions into nested maps that can be serialized into a JSON +// request. +func (auth AuthOptions) ToTokenCreateMap() (map[string]interface{}, error) { + // Error out if an unsupported auth option is present. + if auth.UserID != "" { + return nil, ErrUserIDProvided + } + if auth.APIKey != "" { + return nil, ErrAPIKeyProvided + } + if auth.DomainID != "" { + return nil, ErrDomainIDProvided + } + if auth.DomainName != "" { + return nil, ErrDomainNameProvided + } + + // Populate the request map. + authMap := make(map[string]interface{}) + + if auth.Username != "" { + if auth.Password != "" { + authMap["passwordCredentials"] = map[string]interface{}{ + "username": auth.Username, + "password": auth.Password, + } + } else { + return nil, ErrPasswordRequired + } + } else if auth.TokenID != "" { + authMap["token"] = map[string]interface{}{ + "id": auth.TokenID, + } + } else { + return nil, fmt.Errorf("You must provide either username/password or tenantID/token values.") + } + + if auth.TenantID != "" { + authMap["tenantId"] = auth.TenantID + } + if auth.TenantName != "" { + authMap["tenantName"] = auth.TenantName + } + + return map[string]interface{}{"auth": authMap}, nil +} + +// Create authenticates to the identity service and attempts to acquire a Token. +// If successful, the CreateResult +// Generally, rather than interact with this call directly, end users should call openstack.AuthenticatedClient(), +// which abstracts all of the gory details about navigating service catalogs and such. +func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) CreateResult { + request, err := auth.ToTokenCreateMap() + if err != nil { + return CreateResult{gophercloud.Result{Err: err}} + } + + var result CreateResult + _, result.Err = client.Post(CreateURL(client), request, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return result +} + +// Validates and retrieves information for user's token. +func Get(client *gophercloud.ServiceClient, token string) GetResult { + var result GetResult + _, result.Err = client.Get(GetURL(client, token), &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return result +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go new file mode 100644 index 000000000..f1ec3396e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go @@ -0,0 +1,152 @@ +package tokens + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func tokenPost(t *testing.T, options gophercloud.AuthOptions, requestJSON string) CreateResult { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleTokenPost(t, requestJSON) + + return Create(client.ServiceClient(), AuthOptions{options}) +} + +func tokenPostErr(t *testing.T, options gophercloud.AuthOptions, expectedErr error) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleTokenPost(t, "") + + actualErr := Create(client.ServiceClient(), AuthOptions{options}).Err + th.CheckDeepEquals(t, expectedErr, actualErr) +} + +func TestCreateWithPassword(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "swordfish", + } + + IsSuccessful(t, tokenPost(t, options, ` + { + "auth": { + "passwordCredentials": { + "username": "me", + "password": "swordfish" + } + } + } + `)) +} + +func TestCreateTokenWithTenantID(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "opensesame", + TenantID: "fc394f2ab2df4114bde39905f800dc57", + } + + IsSuccessful(t, tokenPost(t, options, ` + { + "auth": { + "tenantId": "fc394f2ab2df4114bde39905f800dc57", + "passwordCredentials": { + "username": "me", + "password": "opensesame" + } + } + } + `)) +} + +func TestCreateTokenWithTenantName(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "opensesame", + TenantName: "demo", + } + + IsSuccessful(t, tokenPost(t, options, ` + { + "auth": { + "tenantName": "demo", + "passwordCredentials": { + "username": "me", + "password": "opensesame" + } + } + } + `)) +} + +func TestProhibitUserID(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + UserID: "1234", + Password: "thing", + } + + tokenPostErr(t, options, ErrUserIDProvided) +} + +func TestProhibitAPIKey(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "thing", + APIKey: "123412341234", + } + + tokenPostErr(t, options, ErrAPIKeyProvided) +} + +func TestProhibitDomainID(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "thing", + DomainID: "1234", + } + + tokenPostErr(t, options, ErrDomainIDProvided) +} + +func TestProhibitDomainName(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "thing", + DomainName: "wat", + } + + tokenPostErr(t, options, ErrDomainNameProvided) +} + +func TestRequireUsername(t *testing.T) { + options := gophercloud.AuthOptions{ + Password: "thing", + } + + tokenPostErr(t, options, fmt.Errorf("You must provide either username/password or tenantID/token values.")) +} + +func TestRequirePassword(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + } + + tokenPostErr(t, options, ErrPasswordRequired) +} + +func tokenGet(t *testing.T, tokenId string) GetResult { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleTokenGet(t, tokenId) + return Get(client.ServiceClient(), tokenId) +} + +func TestGetWithToken(t *testing.T) { + GetIsSuccessful(t, tokenGet(t, "db22caf43c934e6c829087c41ff8d8d6")) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go new file mode 100644 index 000000000..67c577b8d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go @@ -0,0 +1,170 @@ +package tokens + +import ( + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" +) + +// Token provides only the most basic information related to an authentication token. +type Token struct { + // ID provides the primary means of identifying a user to the OpenStack API. + // OpenStack defines this field as an opaque value, so do not depend on its content. + // It is safe, however, to compare for equality. + ID string + + // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the authentication token becomes invalid. + // After this point in time, future API requests made using this authentication token will respond with errors. + // Either the caller will need to reauthenticate manually, or more preferably, the caller should exploit automatic re-authentication. + // See the AuthOptions structure for more details. + ExpiresAt time.Time + + // Tenant provides information about the tenant to which this token grants access. + Tenant tenants.Tenant +} + +// Authorization need user info which can get from token authentication's response +type Role struct { + Name string `mapstructure:"name"` +} +type User struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + UserName string `mapstructure:"username"` + Roles []Role `mapstructure:"roles"` +} + +// Endpoint represents a single API endpoint offered by a service. +// It provides the public and internal URLs, if supported, along with a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +// +// In addition, the interface offered by the service will have version information associated with it +// through the VersionId, VersionInfo, and VersionList fields, if provided or supported. +// +// In all cases, fields which aren't supported by the provider and service combined will assume a zero-value (""). +type Endpoint struct { + TenantID string `mapstructure:"tenantId"` + PublicURL string `mapstructure:"publicURL"` + InternalURL string `mapstructure:"internalURL"` + AdminURL string `mapstructure:"adminURL"` + Region string `mapstructure:"region"` + VersionID string `mapstructure:"versionId"` + VersionInfo string `mapstructure:"versionInfo"` + VersionList string `mapstructure:"versionList"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V2 service catalog listing. +// Each class of service, such as cloud DNS or block storage services, will have a single +// CatalogEntry representing it. +// +// Note: when looking for the desired service, try, whenever possible, to key off the type field. +// Otherwise, you'll tie the representation of the service to a specific provider. +type CatalogEntry struct { + // Name will contain the provider-specified name for the service. + Name string `mapstructure:"name"` + + // Type will contain a type string if OpenStack defines a type for the service. + // Otherwise, for provider-specific services, the provider may assign their own type strings. + Type string `mapstructure:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that may exist for + // the service. + Endpoints []Endpoint `mapstructure:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + +// CreateResult defers the interpretation of a created token. +// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog. +type CreateResult struct { + gophercloud.Result +} + +// GetResult is the deferred response from a Get call, which is the same with a Created token. +// Use ExtractUser() to interpret it as a User. +type GetResult struct { + CreateResult +} + +// ExtractToken returns the just-created Token from a CreateResult. +func (result CreateResult) ExtractToken() (*Token, error) { + if result.Err != nil { + return nil, result.Err + } + + var response struct { + Access struct { + Token struct { + Expires string `mapstructure:"expires"` + ID string `mapstructure:"id"` + Tenant tenants.Tenant `mapstructure:"tenant"` + } `mapstructure:"token"` + } `mapstructure:"access"` + } + + err := mapstructure.Decode(result.Body, &response) + if err != nil { + return nil, err + } + + expiresTs, err := time.Parse(gophercloud.RFC3339Milli, response.Access.Token.Expires) + if err != nil { + return nil, err + } + + return &Token{ + ID: response.Access.Token.ID, + ExpiresAt: expiresTs, + Tenant: response.Access.Token.Tenant, + }, nil +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token. +func (result CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + if result.Err != nil { + return nil, result.Err + } + + var response struct { + Access struct { + Entries []CatalogEntry `mapstructure:"serviceCatalog"` + } `mapstructure:"access"` + } + + err := mapstructure.Decode(result.Body, &response) + if err != nil { + return nil, err + } + + return &ServiceCatalog{Entries: response.Access.Entries}, nil +} + +// createErr quickly packs an error in a CreateResult. +func createErr(err error) CreateResult { + return CreateResult{gophercloud.Result{Err: err}} +} + +// ExtractUser returns the User from a GetResult. +func (result GetResult) ExtractUser() (*User, error) { + if result.Err != nil { + return nil, result.Err + } + + var response struct { + Access struct { + User User `mapstructure:"user"` + } `mapstructure:"access"` + } + + err := mapstructure.Decode(result.Body, &response) + if err != nil { + return nil, err + } + + return &response.Access.User, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go new file mode 100644 index 000000000..ee1393299 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go @@ -0,0 +1,13 @@ +package tokens + +import "github.com/rackspace/gophercloud" + +// CreateURL generates the URL used to create new Tokens. +func CreateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tokens") +} + +// GetURL generates the URL used to Validate Tokens. +func GetURL(client *gophercloud.ServiceClient, token string) string { + return client.ServiceURL("tokens", token) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/doc.go new file mode 100644 index 000000000..82abcb9fc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/doc.go @@ -0,0 +1 @@ +package users diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/fixtures.go new file mode 100644 index 000000000..8941868dd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/fixtures.go @@ -0,0 +1,163 @@ +package users + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func MockListUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "users":[ + { + "id": "u1000", + "name": "John Smith", + "username": "jqsmith", + "email": "john.smith@example.org", + "enabled": true, + "tenant_id": "12345" + }, + { + "id": "u1001", + "name": "Jane Smith", + "username": "jqsmith", + "email": "jane.smith@example.org", + "enabled": true, + "tenant_id": "12345" + } + ] +} + `) + }) +} + +func mockCreateUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "user": { + "name": "new_user", + "tenant_id": "12345", + "enabled": false, + "email": "new_user@foo.com" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "name": "new_user", + "tenant_id": "12345", + "enabled": false, + "email": "new_user@foo.com", + "id": "c39e3de9be2d4c779f1dfd6abacc176d" + } +} +`) + }) +} + +func mockGetUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users/new_user", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "name": "new_user", + "tenant_id": "12345", + "enabled": false, + "email": "new_user@foo.com", + "id": "c39e3de9be2d4c779f1dfd6abacc176d" + } +} +`) + }) +} + +func mockUpdateUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users/c39e3de9be2d4c779f1dfd6abacc176d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "user": { + "name": "new_name", + "enabled": true, + "email": "new_email@foo.com" + } +} +`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "name": "new_name", + "tenant_id": "12345", + "enabled": true, + "email": "new_email@foo.com", + "id": "c39e3de9be2d4c779f1dfd6abacc176d" + } +} +`) + }) +} + +func mockDeleteUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users/c39e3de9be2d4c779f1dfd6abacc176d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func mockListRolesResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/1d8b6120dcc640fda4fc9194ffc80273/users/c39e3de9be2d4c779f1dfd6abacc176d/roles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "roles": [ + { + "id": "9fe2ff9ee4384b1894a90878d3e92bab", + "name": "foo_role" + }, + { + "id": "1ea3d56793574b668e85960fbf651e13", + "name": "admin" + } + ] +} + `) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go new file mode 100644 index 000000000..88be45ecc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go @@ -0,0 +1,161 @@ +package users + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +func List(client *gophercloud.ServiceClient) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, rootURL(client), createPage) +} + +// EnabledState represents whether the user is enabled or not. +type EnabledState *bool + +// Useful variables to use when creating or updating users. +var ( + iTrue = true + iFalse = false + + Enabled EnabledState = &iTrue + Disabled EnabledState = &iFalse +) + +// CommonOpts are the parameters that are shared between CreateOpts and +// UpdateOpts +type CommonOpts struct { + // Either a name or username is required. When provided, the value must be + // unique or a 409 conflict error will be returned. If you provide a name but + // omit a username, the latter will be set to the former; and vice versa. + Name, Username string + + // The ID of the tenant to which you want to assign this user. + TenantID string + + // Indicates whether this user is enabled or not. + Enabled EnabledState + + // The email address of this user. + Email string +} + +// CreateOpts represents the options needed when creating new users. +type CreateOpts CommonOpts + +// CreateOptsBuilder describes struct types that can be accepted by the Create call. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// ToUserCreateMap assembles a request body based on the contents of a CreateOpts. +func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + m := make(map[string]interface{}) + + if opts.Name == "" && opts.Username == "" { + return m, errors.New("Either a Name or Username must be provided") + } + + if opts.Name != "" { + m["name"] = opts.Name + } + if opts.Username != "" { + m["username"] = opts.Username + } + if opts.Enabled != nil { + m["enabled"] = &opts.Enabled + } + if opts.Email != "" { + m["email"] = opts.Email + } + if opts.TenantID != "" { + m["tenant_id"] = opts.TenantID + } + + return map[string]interface{}{"user": m}, nil +} + +// Create is the operation responsible for creating new users. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToUserCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(rootURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + + return res +} + +// Get requests details on a single user, either by ID. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var result GetResult + _, result.Err = client.Get(ResourceURL(client, id), &result.Body, nil) + return result +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the Update request. +type UpdateOptsBuilder interface { + ToUserUpdateMap() map[string]interface{} +} + +// UpdateOpts specifies the base attributes that may be updated on an existing server. +type UpdateOpts CommonOpts + +// ToUserUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToUserUpdateMap() map[string]interface{} { + m := make(map[string]interface{}) + + if opts.Name != "" { + m["name"] = opts.Name + } + if opts.Username != "" { + m["username"] = opts.Username + } + if opts.Enabled != nil { + m["enabled"] = &opts.Enabled + } + if opts.Email != "" { + m["email"] = opts.Email + } + if opts.TenantID != "" { + m["tenant_id"] = opts.TenantID + } + + return map[string]interface{}{"user": m} +} + +// Update is the operation responsible for updating exist users by their UUID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var result UpdateResult + reqBody := opts.ToUserUpdateMap() + _, result.Err = client.Put(ResourceURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return result +} + +// Delete is the operation responsible for permanently deleting an API user. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var result DeleteResult + _, result.Err = client.Delete(ResourceURL(client, id), nil) + return result +} + +func ListRoles(client *gophercloud.ServiceClient, tenantID, userID string) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, listRolesURL(client, tenantID, userID), createPage) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests_test.go new file mode 100644 index 000000000..04f837163 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests_test.go @@ -0,0 +1,165 @@ +package users + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListUserResponse(t) + + count := 0 + + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractUsers(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []User{ + User{ + ID: "u1000", + Name: "John Smith", + Username: "jqsmith", + Email: "john.smith@example.org", + Enabled: true, + TenantID: "12345", + }, + User{ + ID: "u1001", + Name: "Jane Smith", + Username: "jqsmith", + Email: "jane.smith@example.org", + Enabled: true, + TenantID: "12345", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateUserResponse(t) + + opts := CreateOpts{ + Name: "new_user", + TenantID: "12345", + Enabled: Disabled, + Email: "new_user@foo.com", + } + + user, err := Create(client.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + + expected := &User{ + Name: "new_user", + ID: "c39e3de9be2d4c779f1dfd6abacc176d", + Email: "new_user@foo.com", + Enabled: false, + TenantID: "12345", + } + + th.AssertDeepEquals(t, expected, user) +} + +func TestGetUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetUserResponse(t) + + user, err := Get(client.ServiceClient(), "new_user").Extract() + th.AssertNoErr(t, err) + + expected := &User{ + Name: "new_user", + ID: "c39e3de9be2d4c779f1dfd6abacc176d", + Email: "new_user@foo.com", + Enabled: false, + TenantID: "12345", + } + + th.AssertDeepEquals(t, expected, user) +} + +func TestUpdateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateUserResponse(t) + + id := "c39e3de9be2d4c779f1dfd6abacc176d" + opts := UpdateOpts{ + Name: "new_name", + Enabled: Enabled, + Email: "new_email@foo.com", + } + + user, err := Update(client.ServiceClient(), id, opts).Extract() + + th.AssertNoErr(t, err) + + expected := &User{ + Name: "new_name", + ID: id, + Email: "new_email@foo.com", + Enabled: true, + TenantID: "12345", + } + + th.AssertDeepEquals(t, expected, user) +} + +func TestDeleteUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteUserResponse(t) + + res := Delete(client.ServiceClient(), "c39e3de9be2d4c779f1dfd6abacc176d") + th.AssertNoErr(t, res.Err) +} + +func TestListingUserRoles(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListRolesResponse(t) + + tenantID := "1d8b6120dcc640fda4fc9194ffc80273" + userID := "c39e3de9be2d4c779f1dfd6abacc176d" + + err := ListRoles(client.ServiceClient(), tenantID, userID).EachPage(func(page pagination.Page) (bool, error) { + actual, err := ExtractRoles(page) + th.AssertNoErr(t, err) + + expected := []Role{ + Role{ID: "9fe2ff9ee4384b1894a90878d3e92bab", Name: "foo_role"}, + Role{ID: "1ea3d56793574b668e85960fbf651e13", Name: "admin"}, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/results.go new file mode 100644 index 000000000..f531d5d02 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/results.go @@ -0,0 +1,128 @@ +package users + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// User represents a user resource that exists on the API. +type User struct { + // The UUID for this user. + ID string + + // The human name for this user. + Name string + + // The username for this user. + Username string + + // Indicates whether the user is enabled (true) or disabled (false). + Enabled bool + + // The email address for this user. + Email string + + // The ID of the tenant to which this user belongs. + TenantID string `mapstructure:"tenant_id"` +} + +// Role assigns specific responsibilities to users, allowing them to accomplish +// certain API operations whilst scoped to a service. +type Role struct { + // UUID of the role + ID string + + // Name of the role + Name string +} + +// UserPage is a single page of a User collection. +type UserPage struct { + pagination.SinglePageBase +} + +// RolePage is a single page of a user Role collection. +type RolePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (page UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(page) + if err != nil { + return false, err + } + return len(users) == 0, nil +} + +// ExtractUsers returns a slice of Tenants contained in a single page of results. +func ExtractUsers(page pagination.Page) ([]User, error) { + casted := page.(UserPage).Body + var response struct { + Users []User `mapstructure:"users"` + } + + err := mapstructure.Decode(casted, &response) + return response.Users, err +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (page RolePage) IsEmpty() (bool, error) { + users, err := ExtractRoles(page) + if err != nil { + return false, err + } + return len(users) == 0, nil +} + +// ExtractRoles returns a slice of Roles contained in a single page of results. +func ExtractRoles(page pagination.Page) ([]Role, error) { + casted := page.(RolePage).Body + var response struct { + Roles []Role `mapstructure:"roles"` + } + + err := mapstructure.Decode(casted, &response) + return response.Roles, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as a User, if possible. +func (r commonResult) Extract() (*User, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + User User `mapstructure:"user"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.User, err +} + +// CreateResult represents the result of a Create operation +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation +type DeleteResult struct { + commonResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/urls.go new file mode 100644 index 000000000..7ec4385d7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/users/urls.go @@ -0,0 +1,21 @@ +package users + +import "github.com/rackspace/gophercloud" + +const ( + tenantPath = "tenants" + userPath = "users" + rolePath = "roles" +) + +func ResourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(userPath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(userPath) +} + +func listRolesURL(c *gophercloud.ServiceClient, tenantID, userID string) string { + return c.ServiceURL(tenantPath, tenantID, userPath, userID, rolePath) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go new file mode 100644 index 000000000..85163949a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go @@ -0,0 +1,6 @@ +// Package endpoints provides information and interaction with the service +// endpoints API resource in the OpenStack Identity service. +// +// For more information, see: +// http://developer.openstack.org/api-ref-identity-v3.html#endpoints-v3 +package endpoints diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go new file mode 100644 index 000000000..854957ff9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go @@ -0,0 +1,21 @@ +package endpoints + +import "fmt" + +func requiredAttribute(attribute string) error { + return fmt.Errorf("You must specify %s for this endpoint.", attribute) +} + +var ( + // ErrAvailabilityRequired is reported if an Endpoint is created without an Availability. + ErrAvailabilityRequired = requiredAttribute("an availability") + + // ErrNameRequired is reported if an Endpoint is created without a Name. + ErrNameRequired = requiredAttribute("a name") + + // ErrURLRequired is reported if an Endpoint is created without a URL. + ErrURLRequired = requiredAttribute("a URL") + + // ErrServiceIDRequired is reported if an Endpoint is created without a ServiceID. + ErrServiceIDRequired = requiredAttribute("a serviceID") +) diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go new file mode 100644 index 000000000..99a495d59 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go @@ -0,0 +1,123 @@ +package endpoints + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// EndpointOpts contains the subset of Endpoint attributes that should be used to create or update an Endpoint. +type EndpointOpts struct { + Availability gophercloud.Availability + Name string + Region string + URL string + ServiceID string +} + +// Create inserts a new Endpoint into the service catalog. +// Within EndpointOpts, Region may be omitted by being left as "", but all other fields are required. +func Create(client *gophercloud.ServiceClient, opts EndpointOpts) CreateResult { + // Redefined so that Region can be re-typed as a *string, which can be omitted from the JSON output. + type endpoint struct { + Interface string `json:"interface"` + Name string `json:"name"` + Region *string `json:"region,omitempty"` + URL string `json:"url"` + ServiceID string `json:"service_id"` + } + + type request struct { + Endpoint endpoint `json:"endpoint"` + } + + // Ensure that EndpointOpts is fully populated. + if opts.Availability == "" { + return createErr(ErrAvailabilityRequired) + } + if opts.Name == "" { + return createErr(ErrNameRequired) + } + if opts.URL == "" { + return createErr(ErrURLRequired) + } + if opts.ServiceID == "" { + return createErr(ErrServiceIDRequired) + } + + // Populate the request body. + reqBody := request{ + Endpoint: endpoint{ + Interface: string(opts.Availability), + Name: opts.Name, + URL: opts.URL, + ServiceID: opts.ServiceID, + }, + } + reqBody.Endpoint.Region = gophercloud.MaybeString(opts.Region) + + var result CreateResult + _, result.Err = client.Post(listURL(client), reqBody, &result.Body, nil) + return result +} + +// ListOpts allows finer control over the endpoints returned by a List call. +// All fields are optional. +type ListOpts struct { + Availability gophercloud.Availability `q:"interface"` + ServiceID string `q:"service_id"` + Page int `q:"page"` + PerPage int `q:"per_page"` +} + +// List enumerates endpoints in a paginated collection, optionally filtered by ListOpts criteria. +func List(client *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + u := listURL(client) + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + u += q.String() + createPage := func(r pagination.PageResult) pagination.Page { + return EndpointPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, u, createPage) +} + +// Update changes an existing endpoint with new data. +// All fields are optional in the provided EndpointOpts. +func Update(client *gophercloud.ServiceClient, endpointID string, opts EndpointOpts) UpdateResult { + type endpoint struct { + Interface *string `json:"interface,omitempty"` + Name *string `json:"name,omitempty"` + Region *string `json:"region,omitempty"` + URL *string `json:"url,omitempty"` + ServiceID *string `json:"service_id,omitempty"` + } + + type request struct { + Endpoint endpoint `json:"endpoint"` + } + + reqBody := request{Endpoint: endpoint{}} + reqBody.Endpoint.Interface = gophercloud.MaybeString(string(opts.Availability)) + reqBody.Endpoint.Name = gophercloud.MaybeString(opts.Name) + reqBody.Endpoint.Region = gophercloud.MaybeString(opts.Region) + reqBody.Endpoint.URL = gophercloud.MaybeString(opts.URL) + reqBody.Endpoint.ServiceID = gophercloud.MaybeString(opts.ServiceID) + + var result UpdateResult + _, result.Err = client.Request("PATCH", endpointURL(client, endpointID), gophercloud.RequestOpts{ + JSONBody: &reqBody, + JSONResponse: &result.Body, + OkCodes: []int{200}, + }) + return result +} + +// Delete removes an endpoint from the service catalog. +func Delete(client *gophercloud.ServiceClient, endpointID string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(endpointURL(client, endpointID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go new file mode 100644 index 000000000..80687c4cb --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go @@ -0,0 +1,226 @@ +package endpoints + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreateSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "POST") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + testhelper.TestJSONRequest(t, r, ` + { + "endpoint": { + "interface": "public", + "name": "the-endiest-of-points", + "region": "underground", + "url": "https://1.2.3.4:9000/", + "service_id": "asdfasdfasdfasdf" + } + } + `) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "endpoint": { + "id": "12", + "interface": "public", + "links": { + "self": "https://localhost:5000/v3/endpoints/12" + }, + "name": "the-endiest-of-points", + "region": "underground", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9000/" + } + } + `) + }) + + actual, err := Create(client.ServiceClient(), EndpointOpts{ + Availability: gophercloud.AvailabilityPublic, + Name: "the-endiest-of-points", + Region: "underground", + URL: "https://1.2.3.4:9000/", + ServiceID: "asdfasdfasdfasdf", + }).Extract() + if err != nil { + t.Fatalf("Unable to create an endpoint: %v", err) + } + + expected := &Endpoint{ + ID: "12", + Availability: gophercloud.AvailabilityPublic, + Name: "the-endiest-of-points", + Region: "underground", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9000/", + } + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("Expected %#v, was %#v", expected, actual) + } +} + +func TestListEndpoints(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "endpoints": [ + { + "id": "12", + "interface": "public", + "links": { + "self": "https://localhost:5000/v3/endpoints/12" + }, + "name": "the-endiest-of-points", + "region": "underground", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9000/" + }, + { + "id": "13", + "interface": "internal", + "links": { + "self": "https://localhost:5000/v3/endpoints/13" + }, + "name": "shhhh", + "region": "underground", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9001/" + } + ], + "links": { + "next": null, + "previous": null + } + } + `) + }) + + count := 0 + List(client.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractEndpoints(page) + if err != nil { + t.Errorf("Failed to extract endpoints: %v", err) + return false, err + } + + expected := []Endpoint{ + Endpoint{ + ID: "12", + Availability: gophercloud.AvailabilityPublic, + Name: "the-endiest-of-points", + Region: "underground", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9000/", + }, + Endpoint{ + ID: "13", + Availability: gophercloud.AvailabilityInternal, + Name: "shhhh", + Region: "underground", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9001/", + }, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, got %#v", expected, actual) + } + + return true, nil + }) + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestUpdateEndpoint(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/endpoints/12", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "PATCH") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + testhelper.TestJSONRequest(t, r, ` + { + "endpoint": { + "name": "renamed", + "region": "somewhere-else" + } + } + `) + + fmt.Fprintf(w, ` + { + "endpoint": { + "id": "12", + "interface": "public", + "links": { + "self": "https://localhost:5000/v3/endpoints/12" + }, + "name": "renamed", + "region": "somewhere-else", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9000/" + } + } + `) + }) + + actual, err := Update(client.ServiceClient(), "12", EndpointOpts{ + Name: "renamed", + Region: "somewhere-else", + }).Extract() + if err != nil { + t.Fatalf("Unexpected error from Update: %v", err) + } + + expected := &Endpoint{ + ID: "12", + Availability: gophercloud.AvailabilityPublic, + Name: "renamed", + Region: "somewhere-else", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9000/", + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, was %#v", expected, actual) + } +} + +func TestDeleteEndpoint(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/endpoints/34", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "DELETE") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(client.ServiceClient(), "34") + testhelper.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go new file mode 100644 index 000000000..128112295 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go @@ -0,0 +1,82 @@ +package endpoints + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete Endpoint. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*Endpoint, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Endpoint `json:"endpoint"` + } + + err := mapstructure.Decode(r.Body, &res) + + return &res.Endpoint, err +} + +// CreateResult is the deferred result of a Create call. +type CreateResult struct { + commonResult +} + +// createErr quickly wraps an error in a CreateResult. +func createErr(err error) CreateResult { + return CreateResult{commonResult{gophercloud.Result{Err: err}}} +} + +// UpdateResult is the deferred result of an Update call. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the deferred result of an Delete call. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Endpoint describes the entry point for another service's API. +type Endpoint struct { + ID string `mapstructure:"id" json:"id"` + Availability gophercloud.Availability `mapstructure:"interface" json:"interface"` + Name string `mapstructure:"name" json:"name"` + Region string `mapstructure:"region" json:"region"` + ServiceID string `mapstructure:"service_id" json:"service_id"` + URL string `mapstructure:"url" json:"url"` +} + +// EndpointPage is a single page of Endpoint results. +type EndpointPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if no Endpoints were returned. +func (p EndpointPage) IsEmpty() (bool, error) { + es, err := ExtractEndpoints(p) + if err != nil { + return true, err + } + return len(es) == 0, nil +} + +// ExtractEndpoints extracts an Endpoint slice from a Page. +func ExtractEndpoints(page pagination.Page) ([]Endpoint, error) { + var response struct { + Endpoints []Endpoint `mapstructure:"endpoints"` + } + + err := mapstructure.Decode(page.(EndpointPage).Body, &response) + + return response.Endpoints, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go new file mode 100644 index 000000000..547d7b102 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go @@ -0,0 +1,11 @@ +package endpoints + +import "github.com/rackspace/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("endpoints") +} + +func endpointURL(client *gophercloud.ServiceClient, endpointID string) string { + return client.ServiceURL("endpoints", endpointID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go new file mode 100644 index 000000000..0b183b743 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go @@ -0,0 +1,23 @@ +package endpoints + +import ( + "testing" + + "github.com/rackspace/gophercloud" +) + +func TestGetListURL(t *testing.T) { + client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"} + url := listURL(&client) + if url != "http://localhost:5000/v3/endpoints" { + t.Errorf("Unexpected list URL generated: [%s]", url) + } +} + +func TestGetEndpointURL(t *testing.T) { + client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"} + url := endpointURL(&client, "1234") + if url != "http://localhost:5000/v3/endpoints/1234" { + t.Errorf("Unexpected service URL generated: [%s]", url) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/doc.go new file mode 100644 index 000000000..bdbc674d6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/doc.go @@ -0,0 +1,3 @@ +// Package roles provides information and interaction with the roles API +// resource for the OpenStack Identity service. +package roles diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests.go new file mode 100644 index 000000000..d95c1e52f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests.go @@ -0,0 +1,50 @@ +package roles + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListAssignmentsOptsBuilder allows extensions to add additional parameters to +// the ListAssignments request. +type ListAssignmentsOptsBuilder interface { + ToRolesListAssignmentsQuery() (string, error) +} + +// ListAssignmentsOpts allows you to query the ListAssignments method. +// Specify one of or a combination of GroupId, RoleId, ScopeDomainId, ScopeProjectId, +// and/or UserId to search for roles assigned to corresponding entities. +// Effective lists effective assignments at the user, project, and domain level, +// allowing for the effects of group membership. +type ListAssignmentsOpts struct { + GroupId string `q:"group.id"` + RoleId string `q:"role.id"` + ScopeDomainId string `q:"scope.domain.id"` + ScopeProjectId string `q:"scope.project.id"` + UserId string `q:"user.id"` + Effective bool `q:"effective"` +} + +// ToRolesListAssignmentsQuery formats a ListAssignmentsOpts into a query string. +func (opts ListAssignmentsOpts) ToRolesListAssignmentsQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// ListAssignments enumerates the roles assigned to a specified resource. +func ListAssignments(client *gophercloud.ServiceClient, opts ListAssignmentsOptsBuilder) pagination.Pager { + url := listAssignmentsURL(client) + query, err := opts.ToRolesListAssignmentsQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + createPage := func(r pagination.PageResult) pagination.Page { + return RoleAssignmentsPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, url, createPage) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests_test.go new file mode 100644 index 000000000..d62dbff9a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/requests_test.go @@ -0,0 +1,104 @@ +package roles + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListSinglePage(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/role_assignments", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "role_assignments": [ + { + "links": { + "assignment": "http://identity:35357/v3/domains/161718/users/313233/roles/123456" + }, + "role": { + "id": "123456" + }, + "scope": { + "domain": { + "id": "161718" + } + }, + "user": { + "id": "313233" + } + }, + { + "links": { + "assignment": "http://identity:35357/v3/projects/456789/groups/101112/roles/123456", + "membership": "http://identity:35357/v3/groups/101112/users/313233" + }, + "role": { + "id": "123456" + }, + "scope": { + "project": { + "id": "456789" + } + }, + "user": { + "id": "313233" + } + } + ], + "links": { + "self": "http://identity:35357/v3/role_assignments?effective", + "previous": null, + "next": null + } + } + `) + }) + + count := 0 + err := ListAssignments(client.ServiceClient(), ListAssignmentsOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractRoleAssignments(page) + if err != nil { + return false, err + } + + expected := []RoleAssignment{ + RoleAssignment{ + Role: Role{ID: "123456"}, + Scope: Scope{Domain: Domain{ID: "161718"}}, + User: User{ID: "313233"}, + Group: Group{}, + }, + RoleAssignment{ + Role: Role{ID: "123456"}, + Scope: Scope{Project: Project{ID: "456789"}}, + User: User{ID: "313233"}, + Group: Group{}, + }, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, got %#v", expected, actual) + } + + return true, nil + }) + if err != nil { + t.Errorf("Unexpected error while paging: %v", err) + } + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/results.go new file mode 100644 index 000000000..d25abd25d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/results.go @@ -0,0 +1,81 @@ +package roles + +import ( + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// RoleAssignment is the result of a role assignments query. +type RoleAssignment struct { + Role Role `json:"role,omitempty"` + Scope Scope `json:"scope,omitempty"` + User User `json:"user,omitempty"` + Group Group `json:"group,omitempty"` +} + +type Role struct { + ID string `json:"id,omitempty"` +} + +type Scope struct { + Domain Domain `json:"domain,omitempty"` + Project Project `json:"domain,omitempty"` +} + +type Domain struct { + ID string `json:"id,omitempty"` +} + +type Project struct { + ID string `json:"id,omitempty"` +} + +type User struct { + ID string `json:"id,omitempty"` +} + +type Group struct { + ID string `json:"id,omitempty"` +} + +// RoleAssignmentsPage is a single page of RoleAssignments results. +type RoleAssignmentsPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the page contains no results. +func (p RoleAssignmentsPage) IsEmpty() (bool, error) { + roleAssignments, err := ExtractRoleAssignments(p) + if err != nil { + return true, err + } + return len(roleAssignments) == 0, nil +} + +// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +func (page RoleAssignmentsPage) NextPageURL() (string, error) { + type resp struct { + Links struct { + Next string `mapstructure:"next"` + } `mapstructure:"links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return r.Links.Next, nil +} + +// ExtractRoleAssignments extracts a slice of RoleAssignments from a Collection acquired from List. +func ExtractRoleAssignments(page pagination.Page) ([]RoleAssignment, error) { + var response struct { + RoleAssignments []RoleAssignment `mapstructure:"role_assignments"` + } + + err := mapstructure.Decode(page.(RoleAssignmentsPage).Body, &response) + return response.RoleAssignments, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls.go new file mode 100644 index 000000000..b009340d0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls.go @@ -0,0 +1,7 @@ +package roles + +import "github.com/rackspace/gophercloud" + +func listAssignmentsURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("role_assignments") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls_test.go new file mode 100644 index 000000000..04679dae4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/roles/urls_test.go @@ -0,0 +1,15 @@ +package roles + +import ( + "testing" + + "github.com/rackspace/gophercloud" +) + +func TestListAssignmentsURL(t *testing.T) { + client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"} + url := listAssignmentsURL(&client) + if url != "http://localhost:5000/v3/role_assignments" { + t.Errorf("Unexpected list URL generated: [%s]", url) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go new file mode 100644 index 000000000..fa5641185 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go @@ -0,0 +1,3 @@ +// Package services provides information and interaction with the services API +// resource for the OpenStack Identity service. +package services diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go new file mode 100644 index 000000000..3ee924f3e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go @@ -0,0 +1,77 @@ +package services + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type response struct { + Service Service `json:"service"` +} + +// Create adds a new service of the requested type to the catalog. +func Create(client *gophercloud.ServiceClient, serviceType string) CreateResult { + type request struct { + Type string `json:"type"` + } + + req := request{Type: serviceType} + + var result CreateResult + _, result.Err = client.Post(listURL(client), req, &result.Body, nil) + return result +} + +// ListOpts allows you to query the List method. +type ListOpts struct { + ServiceType string `q:"type"` + PerPage int `q:"perPage"` + Page int `q:"page"` +} + +// List enumerates the services available to a specific user. +func List(client *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + u := listURL(client) + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + u += q.String() + createPage := func(r pagination.PageResult) pagination.Page { + return ServicePage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, u, createPage) +} + +// Get returns additional information about a service, given its ID. +func Get(client *gophercloud.ServiceClient, serviceID string) GetResult { + var result GetResult + _, result.Err = client.Get(serviceURL(client, serviceID), &result.Body, nil) + return result +} + +// Update changes the service type of an existing service. +func Update(client *gophercloud.ServiceClient, serviceID string, serviceType string) UpdateResult { + type request struct { + Type string `json:"type"` + } + + req := request{Type: serviceType} + + var result UpdateResult + _, result.Err = client.Request("PATCH", serviceURL(client, serviceID), gophercloud.RequestOpts{ + JSONBody: &req, + JSONResponse: &result.Body, + OkCodes: []int{200}, + }) + return result +} + +// Delete removes an existing service. +// It either deletes all associated endpoints, or fails until all endpoints are deleted. +func Delete(client *gophercloud.ServiceClient, serviceID string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(serviceURL(client, serviceID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go new file mode 100644 index 000000000..42f05d365 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go @@ -0,0 +1,209 @@ +package services + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreateSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "POST") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + testhelper.TestJSONRequest(t, r, `{ "type": "compute" }`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "service": { + "description": "Here's your service", + "id": "1234", + "name": "InscrutableOpenStackProjectName", + "type": "compute" + } + }`) + }) + + result, err := Create(client.ServiceClient(), "compute").Extract() + if err != nil { + t.Fatalf("Unexpected error from Create: %v", err) + } + + if result.Description == nil || *result.Description != "Here's your service" { + t.Errorf("Service description was unexpected [%s]", *result.Description) + } + if result.ID != "1234" { + t.Errorf("Service ID was unexpected [%s]", result.ID) + } + if result.Name != "InscrutableOpenStackProjectName" { + t.Errorf("Service name was unexpected [%s]", result.Name) + } + if result.Type != "compute" { + t.Errorf("Service type was unexpected [%s]", result.Type) + } +} + +func TestListSinglePage(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "links": { + "next": null, + "previous": null + }, + "services": [ + { + "description": "Service One", + "id": "1234", + "name": "service-one", + "type": "identity" + }, + { + "description": "Service Two", + "id": "9876", + "name": "service-two", + "type": "compute" + } + ] + } + `) + }) + + count := 0 + err := List(client.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractServices(page) + if err != nil { + return false, err + } + + desc0 := "Service One" + desc1 := "Service Two" + expected := []Service{ + Service{ + Description: &desc0, + ID: "1234", + Name: "service-one", + Type: "identity", + }, + Service{ + Description: &desc1, + ID: "9876", + Name: "service-two", + Type: "compute", + }, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, got %#v", expected, actual) + } + + return true, nil + }) + if err != nil { + t.Errorf("Unexpected error while paging: %v", err) + } + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGetSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/services/12345", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "service": { + "description": "Service One", + "id": "12345", + "name": "service-one", + "type": "identity" + } + } + `) + }) + + result, err := Get(client.ServiceClient(), "12345").Extract() + if err != nil { + t.Fatalf("Error fetching service information: %v", err) + } + + if result.ID != "12345" { + t.Errorf("Unexpected service ID: %s", result.ID) + } + if *result.Description != "Service One" { + t.Errorf("Unexpected service description: [%s]", *result.Description) + } + if result.Name != "service-one" { + t.Errorf("Unexpected service name: [%s]", result.Name) + } + if result.Type != "identity" { + t.Errorf("Unexpected service type: [%s]", result.Type) + } +} + +func TestUpdateSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/services/12345", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "PATCH") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + testhelper.TestJSONRequest(t, r, `{ "type": "lasermagic" }`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "service": { + "id": "12345", + "type": "lasermagic" + } + } + `) + }) + + result, err := Update(client.ServiceClient(), "12345", "lasermagic").Extract() + if err != nil { + t.Fatalf("Unable to update service: %v", err) + } + + if result.ID != "12345" { + t.Fatalf("Expected ID 12345, was %s", result.ID) + } +} + +func TestDeleteSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + testhelper.Mux.HandleFunc("/services/12345", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "DELETE") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(client.ServiceClient(), "12345") + testhelper.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go new file mode 100644 index 000000000..1d0d14128 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go @@ -0,0 +1,80 @@ +package services + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete Service. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*Service, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Service `json:"service"` + } + + err := mapstructure.Decode(r.Body, &res) + + return &res.Service, err +} + +// CreateResult is the deferred result of a Create call. +type CreateResult struct { + commonResult +} + +// GetResult is the deferred result of a Get call. +type GetResult struct { + commonResult +} + +// UpdateResult is the deferred result of an Update call. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the deferred result of an Delete call. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Service is the result of a list or information query. +type Service struct { + Description *string `json:"description,omitempty"` + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` +} + +// ServicePage is a single page of Service results. +type ServicePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the page contains no results. +func (p ServicePage) IsEmpty() (bool, error) { + services, err := ExtractServices(p) + if err != nil { + return true, err + } + return len(services) == 0, nil +} + +// ExtractServices extracts a slice of Services from a Collection acquired from List. +func ExtractServices(page pagination.Page) ([]Service, error) { + var response struct { + Services []Service `mapstructure:"services"` + } + + err := mapstructure.Decode(page.(ServicePage).Body, &response) + return response.Services, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go new file mode 100644 index 000000000..85443a48a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go @@ -0,0 +1,11 @@ +package services + +import "github.com/rackspace/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("services") +} + +func serviceURL(client *gophercloud.ServiceClient, serviceID string) string { + return client.ServiceURL("services", serviceID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go new file mode 100644 index 000000000..5a31b3231 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go @@ -0,0 +1,23 @@ +package services + +import ( + "testing" + + "github.com/rackspace/gophercloud" +) + +func TestListURL(t *testing.T) { + client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"} + url := listURL(&client) + if url != "http://localhost:5000/v3/services" { + t.Errorf("Unexpected list URL generated: [%s]", url) + } +} + +func TestServiceURL(t *testing.T) { + client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"} + url := serviceURL(&client, "1234") + if url != "http://localhost:5000/v3/services/1234" { + t.Errorf("Unexpected service URL generated: [%s]", url) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go new file mode 100644 index 000000000..76ff5f473 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go @@ -0,0 +1,6 @@ +// Package tokens provides information and interaction with the token API +// resource for the OpenStack Identity service. +// +// For more information, see: +// http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 +package tokens diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go new file mode 100644 index 000000000..44761092b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go @@ -0,0 +1,72 @@ +package tokens + +import ( + "errors" + "fmt" +) + +func unacceptedAttributeErr(attribute string) error { + return fmt.Errorf("The base Identity V3 API does not accept authentication by %s", attribute) +} + +func redundantWithTokenErr(attribute string) error { + return fmt.Errorf("%s may not be provided when authenticating with a TokenID", attribute) +} + +func redundantWithUserID(attribute string) error { + return fmt.Errorf("%s may not be provided when authenticating with a UserID", attribute) +} + +var ( + // ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. + ErrAPIKeyProvided = unacceptedAttributeErr("APIKey") + + // ErrTenantIDProvided indicates that a TenantID was provided but can't be used. + ErrTenantIDProvided = unacceptedAttributeErr("TenantID") + + // ErrTenantNameProvided indicates that a TenantName was provided but can't be used. + ErrTenantNameProvided = unacceptedAttributeErr("TenantName") + + // ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. + ErrUsernameWithToken = redundantWithTokenErr("Username") + + // ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. + ErrUserIDWithToken = redundantWithTokenErr("UserID") + + // ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. + ErrDomainIDWithToken = redundantWithTokenErr("DomainID") + + // ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s + ErrDomainNameWithToken = redundantWithTokenErr("DomainName") + + // ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. + ErrUsernameOrUserID = errors.New("Exactly one of Username and UserID must be provided for password authentication") + + // ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. + ErrDomainIDWithUserID = redundantWithUserID("DomainID") + + // ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. + ErrDomainNameWithUserID = redundantWithUserID("DomainName") + + // ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. + // It may also indicate that both a DomainID and a DomainName were provided at once. + ErrDomainIDOrDomainName = errors.New("You must provide exactly one of DomainID or DomainName to authenticate by Username") + + // ErrMissingPassword indicates that no password was provided and no token is available. + ErrMissingPassword = errors.New("You must provide a password to authenticate") + + // ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. + ErrScopeDomainIDOrDomainName = errors.New("You must provide exactly one of DomainID or DomainName in a Scope with ProjectName") + + // ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. + ErrScopeProjectIDOrProjectName = errors.New("You must provide at most one of ProjectID or ProjectName in a Scope") + + // ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. + ErrScopeProjectIDAlone = errors.New("ProjectID must be supplied alone in a Scope") + + // ErrScopeDomainName indicates that a DomainName was provided alone in a Scope. + ErrScopeDomainName = errors.New("DomainName must be supplied with a ProjectName or ProjectID in a Scope.") + + // ErrScopeEmpty indicates that no credentials were provided in a Scope. + ErrScopeEmpty = errors.New("You must provide either a Project or Domain in a Scope") +) diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go new file mode 100644 index 000000000..d449ca36e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go @@ -0,0 +1,281 @@ +package tokens + +import ( + "net/http" + + "github.com/rackspace/gophercloud" +) + +// Scope allows a created token to be limited to a specific domain or project. +type Scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { + h := c.AuthenticatedHeaders() + h["X-Subject-Token"] = subjectToken + return h +} + +// Create authenticates and either generates a new token, or changes the Scope of an existing token. +func Create(c *gophercloud.ServiceClient, options gophercloud.AuthOptions, scope *Scope) CreateResult { + type domainReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } + + type projectReq struct { + Domain *domainReq `json:"domain,omitempty"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + } + + type userReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Password string `json:"password"` + Domain *domainReq `json:"domain,omitempty"` + } + + type passwordReq struct { + User userReq `json:"user"` + } + + type tokenReq struct { + ID string `json:"id"` + } + + type identityReq struct { + Methods []string `json:"methods"` + Password *passwordReq `json:"password,omitempty"` + Token *tokenReq `json:"token,omitempty"` + } + + type scopeReq struct { + Domain *domainReq `json:"domain,omitempty"` + Project *projectReq `json:"project,omitempty"` + } + + type authReq struct { + Identity identityReq `json:"identity"` + Scope *scopeReq `json:"scope,omitempty"` + } + + type request struct { + Auth authReq `json:"auth"` + } + + // Populate the request structure based on the provided arguments. Create and return an error + // if insufficient or incompatible information is present. + var req request + + // Test first for unrecognized arguments. + if options.APIKey != "" { + return createErr(ErrAPIKeyProvided) + } + if options.TenantID != "" { + return createErr(ErrTenantIDProvided) + } + if options.TenantName != "" { + return createErr(ErrTenantNameProvided) + } + + if options.Password == "" { + if c.TokenID != "" { + // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication + // parameters. + if options.Username != "" { + return createErr(ErrUsernameWithToken) + } + if options.UserID != "" { + return createErr(ErrUserIDWithToken) + } + if options.DomainID != "" { + return createErr(ErrDomainIDWithToken) + } + if options.DomainName != "" { + return createErr(ErrDomainNameWithToken) + } + + // Configure the request for Token authentication. + req.Auth.Identity.Methods = []string{"token"} + req.Auth.Identity.Token = &tokenReq{ + ID: c.TokenID, + } + } else { + // If no password or token ID are available, authentication can't continue. + return createErr(ErrMissingPassword) + } + } else { + // Password authentication. + req.Auth.Identity.Methods = []string{"password"} + + // At least one of Username and UserID must be specified. + if options.Username == "" && options.UserID == "" { + return createErr(ErrUsernameOrUserID) + } + + if options.Username != "" { + // If Username is provided, UserID may not be provided. + if options.UserID != "" { + return createErr(ErrUsernameOrUserID) + } + + // Either DomainID or DomainName must also be specified. + if options.DomainID == "" && options.DomainName == "" { + return createErr(ErrDomainIDOrDomainName) + } + + if options.DomainID != "" { + if options.DomainName != "" { + return createErr(ErrDomainIDOrDomainName) + } + + // Configure the request for Username and Password authentication with a DomainID. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &options.Username, + Password: options.Password, + Domain: &domainReq{ID: &options.DomainID}, + }, + } + } + + if options.DomainName != "" { + // Configure the request for Username and Password authentication with a DomainName. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &options.Username, + Password: options.Password, + Domain: &domainReq{Name: &options.DomainName}, + }, + } + } + } + + if options.UserID != "" { + // If UserID is specified, neither DomainID nor DomainName may be. + if options.DomainID != "" { + return createErr(ErrDomainIDWithUserID) + } + if options.DomainName != "" { + return createErr(ErrDomainNameWithUserID) + } + + // Configure the request for UserID and Password authentication. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ID: &options.UserID, Password: options.Password}, + } + } + } + + // Add a "scope" element if a Scope has been provided. + if scope != nil { + if scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if scope.DomainID == "" && scope.DomainName == "" { + return createErr(ErrScopeDomainIDOrDomainName) + } + if scope.ProjectID != "" { + return createErr(ErrScopeProjectIDOrProjectName) + } + + if scope.DomainID != "" { + // ProjectName + DomainID + req.Auth.Scope = &scopeReq{ + Project: &projectReq{ + Name: &scope.ProjectName, + Domain: &domainReq{ID: &scope.DomainID}, + }, + } + } + + if scope.DomainName != "" { + // ProjectName + DomainName + req.Auth.Scope = &scopeReq{ + Project: &projectReq{ + Name: &scope.ProjectName, + Domain: &domainReq{Name: &scope.DomainName}, + }, + } + } + } else if scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if scope.DomainID != "" { + return createErr(ErrScopeProjectIDAlone) + } + if scope.DomainName != "" { + return createErr(ErrScopeProjectIDAlone) + } + + // ProjectID + req.Auth.Scope = &scopeReq{ + Project: &projectReq{ID: &scope.ProjectID}, + } + } else if scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if scope.DomainName != "" { + return createErr(ErrScopeDomainIDOrDomainName) + } + + // DomainID + req.Auth.Scope = &scopeReq{ + Domain: &domainReq{ID: &scope.DomainID}, + } + } else if scope.DomainName != "" { + return createErr(ErrScopeDomainName) + } else { + return createErr(ErrScopeEmpty) + } + } + + var result CreateResult + var response *http.Response + response, result.Err = c.Post(tokenURL(c), req, &result.Body, nil) + if result.Err != nil { + return result + } + result.Header = response.Header + return result +} + +// Get validates and retrieves information about another token. +func Get(c *gophercloud.ServiceClient, token string) GetResult { + var result GetResult + var response *http.Response + response, result.Err = c.Get(tokenURL(c), &result.Body, &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 203}, + }) + if result.Err != nil { + return result + } + result.Header = response.Header + return result +} + +// Validate determines if a specified token is valid or not. +func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { + response, err := c.Request("HEAD", tokenURL(c), gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{204, 404}, + }) + if err != nil { + return false, err + } + + return response.StatusCode == 204, nil +} + +// Revoke immediately makes specified token invalid. +func Revoke(c *gophercloud.ServiceClient, token string) RevokeResult { + var res RevokeResult + _, res.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go new file mode 100644 index 000000000..2b26e4ad3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go @@ -0,0 +1,514 @@ +package tokens + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/testhelper" +) + +// authTokenPost verifies that providing certain AuthOptions and Scope results in an expected JSON structure. +func authTokenPost(t *testing.T, options gophercloud.AuthOptions, scope *Scope, requestJSON string) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{ + TokenID: "12345abcdef", + }, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "POST") + testhelper.TestHeader(t, r, "Content-Type", "application/json") + testhelper.TestHeader(t, r, "Accept", "application/json") + testhelper.TestJSONRequest(t, r, requestJSON) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "token": { + "expires_at": "2014-10-02T13:45:00.000000Z" + } + }`) + }) + + _, err := Create(&client, options, scope).Extract() + if err != nil { + t.Errorf("Create returned an error: %v", err) + } +} + +func authTokenPostErr(t *testing.T, options gophercloud.AuthOptions, scope *Scope, includeToken bool, expectedErr error) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{}, + Endpoint: testhelper.Endpoint(), + } + if includeToken { + client.TokenID = "abcdef123456" + } + + _, err := Create(&client, options, scope).Extract() + if err == nil { + t.Errorf("Create did NOT return an error") + } + if err != expectedErr { + t.Errorf("Create returned an unexpected error: wanted %v, got %v", expectedErr, err) + } +} + +func TestCreateUserIDAndPassword(t *testing.T) { + authTokenPost(t, gophercloud.AuthOptions{UserID: "me", Password: "squirrel!"}, nil, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { "id": "me", "password": "squirrel!" } + } + } + } + } + `) +} + +func TestCreateUsernameDomainIDPassword(t *testing.T) { + authTokenPost(t, gophercloud.AuthOptions{Username: "fakey", Password: "notpassword", DomainID: "abc123"}, nil, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "domain": { + "id": "abc123" + }, + "name": "fakey", + "password": "notpassword" + } + } + } + } + } + `) +} + +func TestCreateUsernameDomainNamePassword(t *testing.T) { + authTokenPost(t, gophercloud.AuthOptions{Username: "frank", Password: "swordfish", DomainName: "spork.net"}, nil, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "domain": { + "name": "spork.net" + }, + "name": "frank", + "password": "swordfish" + } + } + } + } + } + `) +} + +func TestCreateTokenID(t *testing.T) { + authTokenPost(t, gophercloud.AuthOptions{}, nil, ` + { + "auth": { + "identity": { + "methods": ["token"], + "token": { + "id": "12345abcdef" + } + } + } + } + `) +} + +func TestCreateProjectIDScope(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &Scope{ProjectID: "123456"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "project": { + "id": "123456" + } + } + } + } + `) +} + +func TestCreateDomainIDScope(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &Scope{DomainID: "1000"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "domain": { + "id": "1000" + } + } + } + } + `) +} + +func TestCreateProjectNameAndDomainIDScope(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &Scope{ProjectName: "world-domination", DomainID: "1000"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "project": { + "domain": { + "id": "1000" + }, + "name": "world-domination" + } + } + } + } + `) +} + +func TestCreateProjectNameAndDomainNameScope(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &Scope{ProjectName: "world-domination", DomainName: "evil-plans"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "project": { + "domain": { + "name": "evil-plans" + }, + "name": "world-domination" + } + } + } + } + `) +} + +func TestCreateExtractsTokenFromResponse(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{}, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Subject-Token", "aaa111") + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "token": { + "expires_at": "2014-10-02T13:45:00.000000Z" + } + }`) + }) + + options := gophercloud.AuthOptions{UserID: "me", Password: "shhh"} + token, err := Create(&client, options, nil).Extract() + if err != nil { + t.Fatalf("Create returned an error: %v", err) + } + + if token.ID != "aaa111" { + t.Errorf("Expected token to be aaa111, but was %s", token.ID) + } +} + +func TestCreateFailureEmptyAuth(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{}, nil, false, ErrMissingPassword) +} + +func TestCreateFailureAPIKey(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{APIKey: "something"}, nil, false, ErrAPIKeyProvided) +} + +func TestCreateFailureTenantID(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{TenantID: "something"}, nil, false, ErrTenantIDProvided) +} + +func TestCreateFailureTenantName(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{TenantName: "something"}, nil, false, ErrTenantNameProvided) +} + +func TestCreateFailureTokenIDUsername(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{Username: "something"}, nil, true, ErrUsernameWithToken) +} + +func TestCreateFailureTokenIDUserID(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{UserID: "something"}, nil, true, ErrUserIDWithToken) +} + +func TestCreateFailureTokenIDDomainID(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{DomainID: "something"}, nil, true, ErrDomainIDWithToken) +} + +func TestCreateFailureTokenIDDomainName(t *testing.T) { + authTokenPostErr(t, gophercloud.AuthOptions{DomainName: "something"}, nil, true, ErrDomainNameWithToken) +} + +func TestCreateFailureMissingUser(t *testing.T) { + options := gophercloud.AuthOptions{Password: "supersecure"} + authTokenPostErr(t, options, nil, false, ErrUsernameOrUserID) +} + +func TestCreateFailureBothUser(t *testing.T) { + options := gophercloud.AuthOptions{ + Password: "supersecure", + Username: "oops", + UserID: "redundancy", + } + authTokenPostErr(t, options, nil, false, ErrUsernameOrUserID) +} + +func TestCreateFailureMissingDomain(t *testing.T) { + options := gophercloud.AuthOptions{ + Password: "supersecure", + Username: "notuniqueenough", + } + authTokenPostErr(t, options, nil, false, ErrDomainIDOrDomainName) +} + +func TestCreateFailureBothDomain(t *testing.T) { + options := gophercloud.AuthOptions{ + Password: "supersecure", + Username: "someone", + DomainID: "hurf", + DomainName: "durf", + } + authTokenPostErr(t, options, nil, false, ErrDomainIDOrDomainName) +} + +func TestCreateFailureUserIDDomainID(t *testing.T) { + options := gophercloud.AuthOptions{ + UserID: "100", + Password: "stuff", + DomainID: "oops", + } + authTokenPostErr(t, options, nil, false, ErrDomainIDWithUserID) +} + +func TestCreateFailureUserIDDomainName(t *testing.T) { + options := gophercloud.AuthOptions{ + UserID: "100", + Password: "sssh", + DomainName: "oops", + } + authTokenPostErr(t, options, nil, false, ErrDomainNameWithUserID) +} + +func TestCreateFailureScopeProjectNameAlone(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &Scope{ProjectName: "notenough"} + authTokenPostErr(t, options, scope, false, ErrScopeDomainIDOrDomainName) +} + +func TestCreateFailureScopeProjectNameAndID(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &Scope{ProjectName: "whoops", ProjectID: "toomuch", DomainID: "1234"} + authTokenPostErr(t, options, scope, false, ErrScopeProjectIDOrProjectName) +} + +func TestCreateFailureScopeProjectIDAndDomainID(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &Scope{ProjectID: "toomuch", DomainID: "notneeded"} + authTokenPostErr(t, options, scope, false, ErrScopeProjectIDAlone) +} + +func TestCreateFailureScopeProjectIDAndDomainNAme(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &Scope{ProjectID: "toomuch", DomainName: "notneeded"} + authTokenPostErr(t, options, scope, false, ErrScopeProjectIDAlone) +} + +func TestCreateFailureScopeDomainIDAndDomainName(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &Scope{DomainID: "toomuch", DomainName: "notneeded"} + authTokenPostErr(t, options, scope, false, ErrScopeDomainIDOrDomainName) +} + +func TestCreateFailureScopeDomainNameAlone(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &Scope{DomainName: "notenough"} + authTokenPostErr(t, options, scope, false, ErrScopeDomainName) +} + +func TestCreateFailureEmptyScope(t *testing.T) { + options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &Scope{} + authTokenPostErr(t, options, scope, false, ErrScopeEmpty) +} + +func TestGetRequest(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{ + TokenID: "12345abcdef", + }, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "Content-Type", "") + testhelper.TestHeader(t, r, "Accept", "application/json") + testhelper.TestHeader(t, r, "X-Auth-Token", "12345abcdef") + testhelper.TestHeader(t, r, "X-Subject-Token", "abcdef12345") + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { "token": { "expires_at": "2014-08-29T13:10:01.000000Z" } } + `) + }) + + token, err := Get(&client, "abcdef12345").Extract() + if err != nil { + t.Errorf("Info returned an error: %v", err) + } + + expected, _ := time.Parse(time.UnixDate, "Fri Aug 29 13:10:01 UTC 2014") + if token.ExpiresAt != expected { + t.Errorf("Expected expiration time %s, but was %s", expected.Format(time.UnixDate), token.ExpiresAt.Format(time.UnixDate)) + } +} + +func prepareAuthTokenHandler(t *testing.T, expectedMethod string, status int) gophercloud.ServiceClient { + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{ + TokenID: "12345abcdef", + }, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, expectedMethod) + testhelper.TestHeader(t, r, "Content-Type", "") + testhelper.TestHeader(t, r, "Accept", "application/json") + testhelper.TestHeader(t, r, "X-Auth-Token", "12345abcdef") + testhelper.TestHeader(t, r, "X-Subject-Token", "abcdef12345") + + w.WriteHeader(status) + }) + + return client +} + +func TestValidateRequestSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "HEAD", http.StatusNoContent) + + ok, err := Validate(&client, "abcdef12345") + if err != nil { + t.Errorf("Unexpected error from Validate: %v", err) + } + + if !ok { + t.Errorf("Validate returned false for a valid token") + } +} + +func TestValidateRequestFailure(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "HEAD", http.StatusNotFound) + + ok, err := Validate(&client, "abcdef12345") + if err != nil { + t.Errorf("Unexpected error from Validate: %v", err) + } + + if ok { + t.Errorf("Validate returned true for an invalid token") + } +} + +func TestValidateRequestError(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "HEAD", http.StatusUnauthorized) + + _, err := Validate(&client, "abcdef12345") + if err == nil { + t.Errorf("Missing expected error from Validate") + } +} + +func TestRevokeRequestSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "DELETE", http.StatusNoContent) + + res := Revoke(&client, "abcdef12345") + testhelper.AssertNoErr(t, res.Err) +} + +func TestRevokeRequestError(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "DELETE", http.StatusNotFound) + + res := Revoke(&client, "abcdef12345") + if res.Err == nil { + t.Errorf("Missing expected error from Revoke") + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go new file mode 100644 index 000000000..d134f7d4d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go @@ -0,0 +1,139 @@ +package tokens + +import ( + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" +) + +// Endpoint represents a single API endpoint offered by a service. +// It matches either a public, internal or admin URL. +// If supported, it contains a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +type Endpoint struct { + ID string `mapstructure:"id"` + Region string `mapstructure:"region"` + Interface string `mapstructure:"interface"` + URL string `mapstructure:"url"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V3 service catalog listing. +// Each class of service, such as cloud DNS or block storage services, could have multiple +// CatalogEntry representing it (one by interface type, e.g public, admin or internal). +// +// Note: when looking for the desired service, try, whenever possible, to key off the type field. +// Otherwise, you'll tie the representation of the service to a specific provider. +type CatalogEntry struct { + + // Service ID + ID string `mapstructure:"id"` + + // Name will contain the provider-specified name for the service. + Name string `mapstructure:"name"` + + // Type will contain a type string if OpenStack defines a type for the service. + // Otherwise, for provider-specific services, the provider may assign their own type strings. + Type string `mapstructure:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that may exist for + // the service. + Endpoints []Endpoint `mapstructure:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + +// commonResult is the deferred result of a Create or a Get call. +type commonResult struct { + gophercloud.Result +} + +// Extract is a shortcut for ExtractToken. +// This function is deprecated and still present for backward compatibility. +func (r commonResult) Extract() (*Token, error) { + return r.ExtractToken() +} + +// ExtractToken interprets a commonResult as a Token. +func (r commonResult) ExtractToken() (*Token, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Token struct { + ExpiresAt string `mapstructure:"expires_at"` + } `mapstructure:"token"` + } + + var token Token + + // Parse the token itself from the stored headers. + token.ID = r.Header.Get("X-Subject-Token") + + err := mapstructure.Decode(r.Body, &response) + if err != nil { + return nil, err + } + + // Attempt to parse the timestamp. + token.ExpiresAt, err = time.Parse(gophercloud.RFC3339Milli, response.Token.ExpiresAt) + + return &token, err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token. +func (result CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + if result.Err != nil { + return nil, result.Err + } + + var response struct { + Token struct { + Entries []CatalogEntry `mapstructure:"catalog"` + } `mapstructure:"token"` + } + + err := mapstructure.Decode(result.Body, &response) + if err != nil { + return nil, err + } + + return &ServiceCatalog{Entries: response.Token.Entries}, nil +} + +// CreateResult defers the interpretation of a created token. +// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog. +type CreateResult struct { + commonResult +} + +// createErr quickly creates a CreateResult that reports an error. +func createErr(err error) CreateResult { + return CreateResult{ + commonResult: commonResult{Result: gophercloud.Result{Err: err}}, + } +} + +// GetResult is the deferred response from a Get call. +type GetResult struct { + commonResult +} + +// RevokeResult is the deferred response from a Revoke call. +type RevokeResult struct { + commonResult +} + +// Token is a string that grants a user access to a controlled set of services in an OpenStack provider. +// Each Token is valid for a set length of time. +type Token struct { + // ID is the issued token. + ID string + + // ExpiresAt is the timestamp at which this token will no longer be accepted. + ExpiresAt time.Time +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go new file mode 100644 index 000000000..360b60a82 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go @@ -0,0 +1,7 @@ +package tokens + +import "github.com/rackspace/gophercloud" + +func tokenURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("auth", "tokens") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go new file mode 100644 index 000000000..549c39862 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go @@ -0,0 +1,21 @@ +package tokens + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/testhelper" +) + +func TestTokenURL(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{Endpoint: testhelper.Endpoint()} + + expected := testhelper.Endpoint() + "auth/tokens" + actual := tokenURL(&client) + if actual != expected { + t.Errorf("Expected URL %s, but was %s", expected, actual) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go new file mode 100644 index 000000000..0208ee20e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go @@ -0,0 +1,4 @@ +// Package apiversions provides information and interaction with the different +// API versions for the OpenStack Neutron service. This functionality is not +// restricted to this particular version. +package apiversions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go new file mode 100644 index 000000000..76bdb14f7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go @@ -0,0 +1 @@ +package apiversions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go new file mode 100644 index 000000000..9fb6de141 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go @@ -0,0 +1,21 @@ +package apiversions + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListVersions lists all the Neutron API versions available to end-users +func ListVersions(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, apiVersionsURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} + +// ListVersionResources lists all of the different API resources for a particular +// API versions. Typical resources for Neutron might be: networks, subnets, etc. +func ListVersionResources(c *gophercloud.ServiceClient, v string) pagination.Pager { + return pagination.NewPager(c, apiInfoURL(c, v), func(r pagination.PageResult) pagination.Page { + return APIVersionResourcePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go new file mode 100644 index 000000000..d35af9f0c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go @@ -0,0 +1,182 @@ +package apiversions + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.0", + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0", + "rel": "self" + } + ] + } + ] +}`) + }) + + count := 0 + + ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractAPIVersions(page) + if err != nil { + t.Errorf("Failed to extract API versions: %v", err) + return false, err + } + + expected := []APIVersion{ + APIVersion{ + Status: "CURRENT", + ID: "v2.0", + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestNonJSONCannotBeExtractedIntoAPIVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + if _, err := ExtractAPIVersions(page); err == nil { + t.Fatalf("Expected error, got nil") + } + return true, nil + }) +} + +func TestAPIInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "resources": [ + { + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0/subnets", + "rel": "self" + } + ], + "name": "subnet", + "collection": "subnets" + }, + { + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0/networks", + "rel": "self" + } + ], + "name": "network", + "collection": "networks" + }, + { + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0/ports", + "rel": "self" + } + ], + "name": "port", + "collection": "ports" + } + ] +} + `) + }) + + count := 0 + + ListVersionResources(fake.ServiceClient(), "v2.0").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVersionResources(page) + if err != nil { + t.Errorf("Failed to extract version resources: %v", err) + return false, err + } + + expected := []APIVersionResource{ + APIVersionResource{ + Name: "subnet", + Collection: "subnets", + }, + APIVersionResource{ + Name: "network", + Collection: "networks", + }, + APIVersionResource{ + Name: "port", + Collection: "ports", + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestNonJSONCannotBeExtractedIntoAPIVersionResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + ListVersionResources(fake.ServiceClient(), "v2.0").EachPage(func(page pagination.Page) (bool, error) { + if _, err := ExtractVersionResources(page); err == nil { + t.Fatalf("Expected error, got nil") + } + return true, nil + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go new file mode 100644 index 000000000..97159341f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go @@ -0,0 +1,77 @@ +package apiversions + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud/pagination" +) + +// APIVersion represents an API version for Neutron. It contains the status of +// the API, and its unique ID. +type APIVersion struct { + Status string `mapstructure:"status" json:"status"` + ID string `mapstructure:"id" json:"id"` +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(page pagination.Page) ([]APIVersion, error) { + var resp struct { + Versions []APIVersion `mapstructure:"versions"` + } + + err := mapstructure.Decode(page.(APIVersionPage).Body, &resp) + + return resp.Versions, err +} + +// APIVersionResource represents a generic API resource. It contains the name +// of the resource and its plural collection name. +type APIVersionResource struct { + Name string `mapstructure:"name" json:"name"` + Collection string `mapstructure:"collection" json:"collection"` +} + +// APIVersionResourcePage is a concrete type which embeds the common +// SinglePageBase struct, and is used when traversing API versions collections. +type APIVersionResourcePage struct { + pagination.SinglePageBase +} + +// IsEmpty is a concrete function which indicates whether an +// APIVersionResourcePage is empty or not. +func (r APIVersionResourcePage) IsEmpty() (bool, error) { + is, err := ExtractVersionResources(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractVersionResources accepts a Page struct, specifically a +// APIVersionResourcePage struct, and extracts the elements into a slice of +// APIVersionResource structs. In other words, the collection is mapped into +// a relevant slice. +func ExtractVersionResources(page pagination.Page) ([]APIVersionResource, error) { + var resp struct { + APIVersionResources []APIVersionResource `mapstructure:"resources"` + } + + err := mapstructure.Decode(page.(APIVersionResourcePage).Body, &resp) + + return resp.APIVersionResources, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go new file mode 100644 index 000000000..58aa2b61f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go @@ -0,0 +1,15 @@ +package apiversions + +import ( + "strings" + + "github.com/rackspace/gophercloud" +) + +func apiVersionsURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func apiInfoURL(c *gophercloud.ServiceClient, version string) string { + return c.Endpoint + strings.TrimRight(version, "/") + "/" +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go new file mode 100644 index 000000000..7dd069c94 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go @@ -0,0 +1,26 @@ +package apiversions + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestAPIVersionsURL(t *testing.T) { + actual := apiVersionsURL(endpointClient()) + expected := endpoint + th.AssertEquals(t, expected, actual) +} + +func TestAPIInfoURL(t *testing.T) { + actual := apiInfoURL(endpointClient(), "v2.0") + expected := endpoint + "v2.0/" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go new file mode 100644 index 000000000..41603510d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go @@ -0,0 +1,14 @@ +package common + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const TokenID = client.TokenID + +func ServiceClient() *gophercloud.ServiceClient { + sc := client.ServiceClient() + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go new file mode 100644 index 000000000..d08e1fda9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go @@ -0,0 +1,41 @@ +package extensions + +import ( + "github.com/rackspace/gophercloud" + common "github.com/rackspace/gophercloud/openstack/common/extensions" + "github.com/rackspace/gophercloud/pagination" +) + +// Extension is a single OpenStack extension. +type Extension struct { + common.Extension +} + +// GetResult wraps a GetResult from common. +type GetResult struct { + common.GetResult +} + +// ExtractExtensions interprets a Page as a slice of Extensions. +func ExtractExtensions(page pagination.Page) ([]Extension, error) { + inner, err := common.ExtractExtensions(page) + if err != nil { + return nil, err + } + outer := make([]Extension, len(inner)) + for index, ext := range inner { + outer[index] = Extension{ext} + } + return outer, nil +} + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) GetResult { + return GetResult{common.Get(c, alias)} +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return common.List(c) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go new file mode 100644 index 000000000..3d2ac78d4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go @@ -0,0 +1,105 @@ +package extensions + +import ( + "fmt" + "net/http" + "testing" + + common "github.com/rackspace/gophercloud/openstack/common/extensions" + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ` +{ + "extensions": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractExtensions(page) + if err != nil { + t.Errorf("Failed to extract extensions: %v", err) + } + + expected := []Extension{ + Extension{ + common.Extension{ + Updated: "2013-01-20T00:00:00-00:00", + Name: "Neutron Service Type Management", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + Alias: "service-type", + Description: "API for retrieving service providers for Neutron advanced services", + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/extensions/agent", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "extension": { + "updated": "2013-02-03T10:00:00-00:00", + "name": "agent", + "links": [], + "namespace": "http://docs.openstack.org/ext/agent/api/v2.0", + "alias": "agent", + "description": "The agent management extension." + } +} + `) + }) + + ext, err := Get(fake.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, ext.Updated, "2013-02-03T10:00:00-00:00") + th.AssertEquals(t, ext.Name, "agent") + th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/agent/api/v2.0") + th.AssertEquals(t, ext.Alias, "agent") + th.AssertEquals(t, ext.Description, "The agent management extension.") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go new file mode 100644 index 000000000..dad3a844f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go @@ -0,0 +1,3 @@ +// Package external provides information and interaction with the external +// extension for the OpenStack Networking service. +package external diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go new file mode 100644 index 000000000..097ae37f2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go @@ -0,0 +1,69 @@ +package external + +import ( + "time" + + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" +) + +// AdminState gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Up` and `Down` enums. +type AdminState *bool + +// Convenience vars for AdminStateUp values. +var ( + iTrue = true + iFalse = false + + Up AdminState = &iTrue + Down AdminState = &iFalse +) + +// CreateOpts is the structure used when creating new external network +// resources. It embeds networks.CreateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type CreateOpts struct { + Parent networks.CreateOpts + External bool +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +func (o CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + + // DO NOT REMOVE. Though this line seemingly does nothing of value, it is a + // splint to prevent the unit test from failing on Go Tip. We suspect it is a + // compiler issue that will hopefully be worked out prior to our next release. + // Again, for all the unit tests to pass, this line is necessary and sufficient + // at the moment. We should reassess after the Go 1.5 release to determine + // if this line is still needed. + time.Sleep(0 * time.Millisecond) + + outer, err := o.Parent.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + outer["network"].(map[string]interface{})["router:external"] = o.External + + return outer, nil +} + +// UpdateOpts is the structure used when updating existing external network +// resources. It embeds networks.UpdateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type UpdateOpts struct { + Parent networks.UpdateOpts + External bool +} + +// ToNetworkUpdateMap casts an UpdateOpts struct to a map. +func (o UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) { + outer, err := o.Parent.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + outer["network"].(map[string]interface{})["router:external"] = o.External + + return outer, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go new file mode 100644 index 000000000..54dbf4bb6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go @@ -0,0 +1,81 @@ +package external + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" +) + +// NetworkExternal represents a decorated form of a Network with based on the +// "external-net" extension. +type NetworkExternal struct { + // UUID for the network + ID string `mapstructure:"id" json:"id"` + + // Human-readable name for the network. Might not be unique. + Name string `mapstructure:"name" json:"name"` + + // The administrative state of network. If false (down), the network does not forward packets. + AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values. + Status string `mapstructure:"status" json:"status"` + + // Subnets associated with this network. + Subnets []string `mapstructure:"subnets" json:"subnets"` + + // Owner of network. Only admin users can specify a tenant_id other than its own. + TenantID string `mapstructure:"tenant_id" json:"tenant_id"` + + // Specifies whether the network resource can be accessed by any tenant or not. + Shared bool `mapstructure:"shared" json:"shared"` + + // Specifies whether the network is an external network or not. + External bool `mapstructure:"router:external" json:"router:external"` +} + +func commonExtract(e error, response interface{}) (*NetworkExternal, error) { + if e != nil { + return nil, e + } + + var res struct { + Network *NetworkExternal `json:"network"` + } + + err := mapstructure.Decode(response, &res) + + return res.Network, err +} + +// ExtractGet decorates a GetResult struct returned from a networks.Get() +// function with extended attributes. +func ExtractGet(r networks.GetResult) (*NetworkExternal, error) { + return commonExtract(r.Err, r.Body) +} + +// ExtractCreate decorates a CreateResult struct returned from a networks.Create() +// function with extended attributes. +func ExtractCreate(r networks.CreateResult) (*NetworkExternal, error) { + return commonExtract(r.Err, r.Body) +} + +// ExtractUpdate decorates a UpdateResult struct returned from a +// networks.Update() function with extended attributes. +func ExtractUpdate(r networks.UpdateResult) (*NetworkExternal, error) { + return commonExtract(r.Err, r.Body) +} + +// ExtractList accepts a Page struct, specifically a NetworkPage struct, and +// extracts the elements into a slice of NetworkExternal structs. In other +// words, a generic collection is mapped into a relevant slice. +func ExtractList(page pagination.Page) ([]NetworkExternal, error) { + var resp struct { + Networks []NetworkExternal `mapstructure:"networks" json:"networks"` + } + + err := mapstructure.Decode(page.(networks.NetworkPage).Body, &resp) + + return resp.Networks, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go new file mode 100644 index 000000000..916cd2cfd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go @@ -0,0 +1,254 @@ +package external + +import ( + "errors" + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "networks": [ + { + "admin_state_up": true, + "id": "0f38d5ad-10a6-428f-a5fc-825cfe0f1970", + "name": "net1", + "router:external": false, + "shared": false, + "status": "ACTIVE", + "subnets": [ + "25778974-48a8-46e7-8998-9dc8c70d2f06" + ], + "tenant_id": "b575417a6c444a6eb5cc3a58eb4f714a" + }, + { + "admin_state_up": true, + "id": "8d05a1b1-297a-46ca-8974-17debf51ca3c", + "name": "ext_net", + "router:external": true, + "shared": false, + "status": "ACTIVE", + "subnets": [ + "2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5" + ], + "tenant_id": "5eb8995cf717462c9df8d1edfa498010" + } + ] +} + `) + }) + + count := 0 + + networks.List(fake.ServiceClient(), networks.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractList(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + expected := []NetworkExternal{ + NetworkExternal{ + Status: "ACTIVE", + Subnets: []string{"25778974-48a8-46e7-8998-9dc8c70d2f06"}, + Name: "net1", + AdminStateUp: true, + TenantID: "b575417a6c444a6eb5cc3a58eb4f714a", + Shared: false, + ID: "0f38d5ad-10a6-428f-a5fc-825cfe0f1970", + External: false, + }, + NetworkExternal{ + Status: "ACTIVE", + Subnets: []string{"2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5"}, + Name: "ext_net", + AdminStateUp: true, + TenantID: "5eb8995cf717462c9df8d1edfa498010", + Shared: false, + ID: "8d05a1b1-297a-46ca-8974-17debf51ca3c", + External: true, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "admin_state_up": true, + "id": "8d05a1b1-297a-46ca-8974-17debf51ca3c", + "name": "ext_net", + "router:external": true, + "shared": false, + "status": "ACTIVE", + "subnets": [ + "2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5" + ], + "tenant_id": "5eb8995cf717462c9df8d1edfa498010" + } +} + `) + }) + + res := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + n, err := ExtractGet(res) + + th.AssertNoErr(t, err) + th.AssertEquals(t, true, n.External) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "admin_state_up": true, + "name": "ext_net", + "router:external": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "network": { + "admin_state_up": true, + "id": "8d05a1b1-297a-46ca-8974-17debf51ca3c", + "name": "ext_net", + "router:external": true, + "shared": false, + "status": "ACTIVE", + "subnets": [ + "2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5" + ], + "tenant_id": "5eb8995cf717462c9df8d1edfa498010" + } +} + `) + }) + + options := CreateOpts{networks.CreateOpts{Name: "ext_net", AdminStateUp: Up}, true} + res := networks.Create(fake.ServiceClient(), options) + + n, err := ExtractCreate(res) + + th.AssertNoErr(t, err) + th.AssertEquals(t, true, n.External) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "router:external": true, + "name": "new_name" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "admin_state_up": true, + "id": "8d05a1b1-297a-46ca-8974-17debf51ca3c", + "name": "new_name", + "router:external": true, + "shared": false, + "status": "ACTIVE", + "subnets": [ + "2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5" + ], + "tenant_id": "5eb8995cf717462c9df8d1edfa498010" + } +} + `) + }) + + options := UpdateOpts{networks.UpdateOpts{Name: "new_name"}, true} + res := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options) + n, err := ExtractUpdate(res) + + th.AssertNoErr(t, err) + th.AssertEquals(t, true, n.External) +} + +func TestExtractFnsReturnsErrWhenResultContainsErr(t *testing.T) { + gr := networks.GetResult{} + gr.Err = errors.New("") + + if _, err := ExtractGet(gr); err == nil { + t.Fatalf("Expected error, got one") + } + + ur := networks.UpdateResult{} + ur.Err = errors.New("") + + if _, err := ExtractUpdate(ur); err == nil { + t.Fatalf("Expected error, got one") + } + + cr := networks.CreateResult{} + cr.Err = errors.New("") + + if _, err := ExtractCreate(cr); err == nil { + t.Fatalf("Expected error, got one") + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/doc.go new file mode 100644 index 000000000..3ec450a7b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/doc.go @@ -0,0 +1,3 @@ +// Package fwaas provides information and interaction with the Firewall +// as a Service extension for the OpenStack Networking service. +package fwaas diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go new file mode 100644 index 000000000..dd92bb20d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go @@ -0,0 +1,11 @@ +package firewalls + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errPolicyRequired = err("A policy ID is required") +) diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go new file mode 100644 index 000000000..12d587f38 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go @@ -0,0 +1,216 @@ +package firewalls + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// AdminState gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Up` and `Down` enums. +type AdminState *bool + +// Shared gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Yes` and `No` enums. +type Shared *bool + +// Convenience vars for AdminStateUp and Shared values. +var ( + iTrue = true + iFalse = false + Up AdminState = &iTrue + Down AdminState = &iFalse + Yes Shared = &iTrue + No Shared = &iFalse +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFirewallListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall attributes you want to see returned. SortKey allows you to sort +// by a particular firewall attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp bool `q:"admin_state_up"` + Shared bool `q:"shared"` + PolicyID string `q:"firewall_policy_id"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToFirewallListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFirewallListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// firewalls. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewalls that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + + if opts != nil { + query, err := opts.ToFirewallListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return FirewallPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToFirewallCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall. +type CreateOpts struct { + // Only required if the caller has an admin role and wants to create a firewall + // for another tenant. + TenantID string + Name string + Description string + AdminStateUp *bool + Shared *bool + PolicyID string +} + +// ToFirewallCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { + if opts.PolicyID == "" { + return nil, errPolicyRequired + } + + f := make(map[string]interface{}) + + if opts.TenantID != "" { + f["tenant_id"] = opts.TenantID + } + if opts.Name != "" { + f["name"] = opts.Name + } + if opts.Description != "" { + f["description"] = opts.Description + } + if opts.Shared != nil { + f["shared"] = *opts.Shared + } + if opts.AdminStateUp != nil { + f["admin_state_up"] = *opts.AdminStateUp + } + if opts.PolicyID != "" { + f["firewall_policy_id"] = opts.PolicyID + } + + return map[string]interface{}{"firewall": f}, nil +} + +// Create accepts a CreateOpts struct and uses the values to create a new firewall +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToFirewallCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular firewall based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type UpdateOptsBuilder interface { + ToFirewallUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall. +type UpdateOpts struct { + // Name of the firewall. + Name string + Description string + AdminStateUp *bool + Shared *bool + PolicyID string +} + +// ToFirewallUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { + f := make(map[string]interface{}) + + if opts.Name != "" { + f["name"] = opts.Name + } + if opts.Description != "" { + f["description"] = opts.Description + } + if opts.Shared != nil { + f["shared"] = *opts.Shared + } + if opts.AdminStateUp != nil { + f["admin_state_up"] = *opts.AdminStateUp + } + if opts.PolicyID != "" { + f["firewall_policy_id"] = opts.PolicyID + } + + return map[string]interface{}{"firewall": f}, nil +} + +// Update allows firewalls to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToFirewallUpdateMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Delete will permanently delete a particular firewall based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests_test.go new file mode 100644 index 000000000..19d32c5ac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests_test.go @@ -0,0 +1,246 @@ +package firewalls + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/fw/firewalls", rootURL(fake.ServiceClient())) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewalls":[ + { + "status": "ACTIVE", + "name": "fw1", + "admin_state_up": false, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + "description": "OpenStack firewall 1" + }, + { + "status": "PENDING_UPDATE", + "name": "fw2", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e299", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f99", + "description": "OpenStack firewall 2" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractFirewalls(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []Firewall{ + Firewall{ + Status: "ACTIVE", + Name: "fw1", + AdminStateUp: false, + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + PolicyID: "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + ID: "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + Description: "OpenStack firewall 1", + }, + Firewall{ + Status: "PENDING_UPDATE", + Name: "fw2", + AdminStateUp: true, + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + PolicyID: "34be8c83-4d42-4dca-a74e-b77fffb8e299", + ID: "fb5b5315-64f6-4ea3-8e58-981cc37c6f99", + Description: "OpenStack firewall 2", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall":{ + "status": "PENDING_CREATE", + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c" + } +} + `) + }) + + options := CreateOpts{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "fw", + Description: "OpenStack firewall", + AdminStateUp: Up, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + _, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/fb5b5315-64f6-4ea3-8e58-981cc37c6f61", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall": { + "status": "ACTIVE", + "name": "fw", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + "description": "OpenStack firewall" + } +} + `) + }) + + fw, err := Get(fake.ServiceClient(), "fb5b5315-64f6-4ea3-8e58-981cc37c6f61").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "ACTIVE", fw.Status) + th.AssertEquals(t, "fw", fw.Name) + th.AssertEquals(t, "OpenStack firewall", fw.Description) + th.AssertEquals(t, true, fw.AdminStateUp) + th.AssertEquals(t, "34be8c83-4d42-4dca-a74e-b77fffb8e28a", fw.PolicyID) + th.AssertEquals(t, "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", fw.ID) + th.AssertEquals(t, "b4eedccc6fb74fa8a7ad6b08382b852b", fw.TenantID) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/ea5b5315-64f6-4ea3-8e58-981cc37c6576", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "updated fw", + "admin_state_up":false, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall": { + "status": "ACTIVE", + "name": "fw", + "admin_state_up": false, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "id": "ea5b5315-64f6-4ea3-8e58-981cc37c6576", + "description": "OpenStack firewall" + } +} + `) + }) + + options := UpdateOpts{ + Name: "fw", + Description: "updated fw", + AdminStateUp: Down, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + _, err := Update(fake.ServiceClient(), "ea5b5315-64f6-4ea3-8e58-981cc37c6576", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go new file mode 100644 index 000000000..a8c76eef2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go @@ -0,0 +1,101 @@ +package firewalls + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type Firewall struct { + ID string `json:"id" mapstructure:"id"` + Name string `json:"name" mapstructure:"name"` + Description string `json:"description" mapstructure:"description"` + AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"` + Status string `json:"status" mapstructure:"status"` + PolicyID string `json:"firewall_policy_id" mapstructure:"firewall_policy_id"` + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall. +func (r commonResult) Extract() (*Firewall, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Firewall *Firewall `json:"firewall"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Firewall, err +} + +// FirewallPage is the page returned by a pager when traversing over a +// collection of firewalls. +type FirewallPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewalls has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p FirewallPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"firewalls_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a FirewallPage struct is empty. +func (p FirewallPage) IsEmpty() (bool, error) { + is, err := ExtractFirewalls(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractFirewalls accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractFirewalls(page pagination.Page) ([]Firewall, error) { + var resp struct { + Firewalls []Firewall `mapstructure:"firewalls" json:"firewalls"` + } + + err := mapstructure.Decode(page.(FirewallPage).Body, &resp) + + return resp.Firewalls, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go new file mode 100644 index 000000000..4dde53005 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go @@ -0,0 +1,16 @@ +package firewalls + +import "github.com/rackspace/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewalls" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go new file mode 100644 index 000000000..fe07d9abb --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go @@ -0,0 +1,243 @@ +package policies + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Binary gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Yes` and `No` enums +type Binary *bool + +// Convenience vars for Audited and Shared values. +var ( + iTrue = true + iFalse = false + Yes Binary = &iTrue + No Binary = &iFalse +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall policy attributes you want to see returned. SortKey allows you +// to sort by a particular firewall policy attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + Shared bool `q:"shared"` + Audited bool `q:"audited"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall policies. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall policies that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall policy. +type CreateOpts struct { + // Only required if the caller has an admin role and wants to create a firewall policy + // for another tenant. + TenantID string + Name string + Description string + Shared *bool + Audited *bool + Rules []string +} + +// ToPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + p := make(map[string]interface{}) + + if opts.TenantID != "" { + p["tenant_id"] = opts.TenantID + } + if opts.Name != "" { + p["name"] = opts.Name + } + if opts.Description != "" { + p["description"] = opts.Description + } + if opts.Shared != nil { + p["shared"] = *opts.Shared + } + if opts.Audited != nil { + p["audited"] = *opts.Audited + } + if opts.Rules != nil { + p["firewall_rules"] = opts.Rules + } + + return map[string]interface{}{"firewall_policy": p}, nil +} + +// Create accepts a CreateOpts struct and uses the values to create a new firewall policy +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToPolicyCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular firewall policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall policy. +type UpdateOpts struct { + // Name of the firewall policy. + Name string + Description string + Shared *bool + Audited *bool + Rules []string +} + +// ToPolicyUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + p := make(map[string]interface{}) + + if opts.Name != "" { + p["name"] = opts.Name + } + if opts.Description != "" { + p["description"] = opts.Description + } + if opts.Shared != nil { + p["shared"] = *opts.Shared + } + if opts.Audited != nil { + p["audited"] = *opts.Audited + } + if opts.Rules != nil { + p["firewall_rules"] = opts.Rules + } + + return map[string]interface{}{"firewall_policy": p}, nil +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToPolicyUpdateMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Delete will permanently delete a particular firewall policy based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} + +func InsertRule(c *gophercloud.ServiceClient, policyID, ruleID, beforeID, afterID string) error { + type request struct { + RuleId string `json:"firewall_rule_id"` + Before string `json:"insert_before,omitempty"` + After string `json:"insert_after,omitempty"` + } + + reqBody := request{ + RuleId: ruleID, + Before: beforeID, + After: afterID, + } + + // Send request to API + var res commonResult + _, res.Err = c.Put(insertURL(c, policyID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res.Err +} + +func RemoveRule(c *gophercloud.ServiceClient, policyID, ruleID string) error { + type request struct { + RuleId string `json:"firewall_rule_id"` + } + + reqBody := request{ + RuleId: ruleID, + } + + // Send request to API + var res commonResult + _, res.Err = c.Put(removeURL(c, policyID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res.Err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests_test.go new file mode 100644 index 000000000..b9d78652c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests_test.go @@ -0,0 +1,279 @@ +package policies + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/fw/firewall_policies", rootURL(fake.ServiceClient())) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_policies": [ + { + "name": "policy1", + "firewall_rules": [ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": true, + "shared": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy 1" + }, + { + "name": "policy2", + "firewall_rules": [ + "03d2a6ad-633f-431a-8463-4370d06a22c8" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "shared": true, + "id": "c854fab5-bdaf-4a86-9359-78de93e5df01", + "description": "Firewall policy 2" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPolicies(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []Policy{ + Policy{ + Name: "policy1", + Rules: []string{ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6", + }, + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Audited: true, + Shared: false, + ID: "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + Description: "Firewall policy 1", + }, + Policy{ + Name: "policy2", + Rules: []string{ + "03d2a6ad-633f-431a-8463-4370d06a22c8", + }, + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Audited: false, + Shared: true, + ID: "c854fab5-bdaf-4a86-9359-78de93e5df01", + Description: "Firewall policy 2", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32" + ], + "description": "Firewall policy", + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": true, + "shared": false + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy" + } +} + `) + }) + + options := CreateOpts{ + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Name: "policy", + Description: "Firewall policy", + Shared: No, + Audited: Yes, + Rules: []string{ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32", + }, + } + + _, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies/bcab5315-64f6-4ea3-8e58-981cc37c6f61", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_policy":{ + "name": "www", + "firewall_rules": [ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6", + "03d2a6ad-633f-431a-8463-4370d06a22c8" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy web" + } +} + `) + }) + + policy, err := Get(fake.ServiceClient(), "bcab5315-64f6-4ea3-8e58-981cc37c6f61").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "www", policy.Name) + th.AssertEquals(t, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", policy.ID) + th.AssertEquals(t, "Firewall policy web", policy.Description) + th.AssertEquals(t, 3, len(policy.Rules)) + th.AssertEquals(t, "75452b36-268e-4e75-aaf4-f0e7ed50bc97", policy.Rules[0]) + th.AssertEquals(t, "c9e77ca0-1bc8-497d-904d-948107873dc6", policy.Rules[1]) + th.AssertEquals(t, "03d2a6ad-633f-431a-8463-4370d06a22c8", policy.Rules[2]) + th.AssertEquals(t, "9145d91459d248b1b02fdaca97c6a75d", policy.TenantID) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies/f2b08c1e-aa81-4668-8ae1-1401bcb0576c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32" + ], + "description": "Firewall policy" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6", + "03d2a6ad-633f-431a-8463-4370d06a22c8" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy" + } +} + `) + }) + + options := UpdateOpts{ + Name: "policy", + Description: "Firewall policy", + Rules: []string{ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32", + }, + } + + _, err := Update(fake.ServiceClient(), "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies/4ec89077-d057-4a2b-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89077-d057-4a2b-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go new file mode 100644 index 000000000..a9a0c358d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go @@ -0,0 +1,101 @@ +package policies + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type Policy struct { + ID string `json:"id" mapstructure:"id"` + Name string `json:"name" mapstructure:"name"` + Description string `json:"description" mapstructure:"description"` + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` + Audited bool `json:"audited" mapstructure:"audited"` + Shared bool `json:"shared" mapstructure:"shared"` + Rules []string `json:"firewall_rules,omitempty" mapstructure:"firewall_rules"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall policy. +func (r commonResult) Extract() (*Policy, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Policy *Policy `json:"firewall_policy" mapstructure:"firewall_policy"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Policy, err +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of firewall policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (p PolicyPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"firewall_policies_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (p PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractPolicies accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(page pagination.Page) ([]Policy, error) { + var resp struct { + Policies []Policy `mapstructure:"firewall_policies" json:"firewall_policies"` + } + + err := mapstructure.Decode(page.(PolicyPage).Body, &resp) + + return resp.Policies, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go new file mode 100644 index 000000000..27ea9ae61 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go @@ -0,0 +1,26 @@ +package policies + +import "github.com/rackspace/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_policies" + insertPath = "insert_rule" + removePath = "remove_rule" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func insertURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, insertPath) +} + +func removeURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, removePath) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go new file mode 100644 index 000000000..0b29d39fd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go @@ -0,0 +1,12 @@ +package rules + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errProtocolRequired = err("A protocol is required (tcp, udp, icmp or any)") + errActionRequired = err("An action is required (allow or deny)") +) diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go new file mode 100644 index 000000000..57a0e8baf --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go @@ -0,0 +1,285 @@ +package rules + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Binary gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Yes` and `No` enums +type Binary *bool + +// Convenience vars for Enabled and Shared values. +var ( + iTrue = true + iFalse = false + Yes Binary = &iTrue + No Binary = &iFalse +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRuleListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Firewall rule attributes you want to see returned. SortKey allows you to +// sort by a particular firewall rule attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + Protocol string `q:"protocol"` + Action string `q:"action"` + IPVersion int `q:"ip_version"` + SourceIPAddress string `q:"source_ip_address"` + DestinationIPAddress string `q:"destination_ip_address"` + SourcePort string `q:"source_port"` + DestinationPort string `q:"destination_port"` + Enabled bool `q:"enabled"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToRuleListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRuleListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall rules that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + + if opts != nil { + query, err := opts.ToRuleListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return RulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall rule. +type CreateOpts struct { + // Mandatory for create + Protocol string + Action string + // Optional + TenantID string + Name string + Description string + IPVersion int + SourceIPAddress string + DestinationIPAddress string + SourcePort string + DestinationPort string + Shared *bool + Enabled *bool +} + +// ToRuleCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + if opts.Protocol == "" { + return nil, errProtocolRequired + } + + if opts.Action == "" { + return nil, errActionRequired + } + + r := make(map[string]interface{}) + + r["protocol"] = opts.Protocol + r["action"] = opts.Action + + if opts.TenantID != "" { + r["tenant_id"] = opts.TenantID + } + if opts.Name != "" { + r["name"] = opts.Name + } + if opts.Description != "" { + r["description"] = opts.Description + } + if opts.IPVersion != 0 { + r["ip_version"] = opts.IPVersion + } + if opts.SourceIPAddress != "" { + r["source_ip_address"] = opts.SourceIPAddress + } + if opts.DestinationIPAddress != "" { + r["destination_ip_address"] = opts.DestinationIPAddress + } + if opts.SourcePort != "" { + r["source_port"] = opts.SourcePort + } + if opts.DestinationPort != "" { + r["destination_port"] = opts.DestinationPort + } + if opts.Shared != nil { + r["shared"] = *opts.Shared + } + if opts.Enabled != nil { + r["enabled"] = *opts.Enabled + } + + return map[string]interface{}{"firewall_rule": r}, nil +} + +// Create accepts a CreateOpts struct and uses the values to create a new firewall rule +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToRuleCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular firewall rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type UpdateOptsBuilder interface { + ToRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall rule. +// Optional +type UpdateOpts struct { + Protocol string + Action string + Name string + Description string + IPVersion int + SourceIPAddress *string + DestinationIPAddress *string + SourcePort *string + DestinationPort *string + Shared *bool + Enabled *bool +} + +// ToRuleUpdateMap casts a UpdateOpts struct to a map. +func (opts UpdateOpts) ToRuleUpdateMap() (map[string]interface{}, error) { + r := make(map[string]interface{}) + + if opts.Protocol != "" { + r["protocol"] = opts.Protocol + } + if opts.Action != "" { + r["action"] = opts.Action + } + if opts.Name != "" { + r["name"] = opts.Name + } + if opts.Description != "" { + r["description"] = opts.Description + } + if opts.IPVersion != 0 { + r["ip_version"] = opts.IPVersion + } + if opts.SourceIPAddress != nil { + s := *opts.SourceIPAddress + if s == "" { + r["source_ip_address"] = nil + } else { + r["source_ip_address"] = s + } + } + if opts.DestinationIPAddress != nil { + s := *opts.DestinationIPAddress + if s == "" { + r["destination_ip_address"] = nil + } else { + r["destination_ip_address"] = s + } + } + if opts.SourcePort != nil { + s := *opts.SourcePort + if s == "" { + r["source_port"] = nil + } else { + r["source_port"] = s + } + } + if opts.DestinationPort != nil { + s := *opts.DestinationPort + if s == "" { + r["destination_port"] = nil + } else { + r["destination_port"] = s + } + } + if opts.Shared != nil { + r["shared"] = *opts.Shared + } + if opts.Enabled != nil { + r["enabled"] = *opts.Enabled + } + + return map[string]interface{}{"firewall_rule": r}, nil +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToRuleUpdateMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} + +// Delete will permanently delete a particular firewall rule based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests_test.go new file mode 100644 index 000000000..36f89fa5c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests_test.go @@ -0,0 +1,328 @@ +package rules + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/fw/firewall_rules", rootURL(fake.ServiceClient())) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_rules": [ + { + "protocol": "tcp", + "description": "ssh rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "allow", + "ip_version": 4, + "shared": false + }, + { + "protocol": "udp", + "description": "udp rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": null, + "firewall_policy_id": "98d7fb51-698c-4123-87e8-f1eee6b5ab7e", + "position": 1, + "destination_port": null, + "id": "ab7bd950-6c56-4f5e-a307-45967078f890", + "name": "deny_all_udp", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "deny", + "ip_version": 4, + "shared": false + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []Rule{ + Rule{ + Protocol: "tcp", + Description: "ssh rule", + SourcePort: "", + SourceIPAddress: "", + DestinationIPAddress: "192.168.1.0/24", + PolicyID: "e2a5fb51-698c-4898-87e8-f1eee6b50919", + Position: 2, + DestinationPort: "22", + ID: "f03bd950-6c56-4f5e-a307-45967078f507", + Name: "ssh_form_any", + TenantID: "80cf934d6ffb4ef5b244f1c512ad1e61", + Enabled: true, + Action: "allow", + IPVersion: 4, + Shared: false, + }, + Rule{ + Protocol: "udp", + Description: "udp rule", + SourcePort: "", + SourceIPAddress: "", + DestinationIPAddress: "", + PolicyID: "98d7fb51-698c-4123-87e8-f1eee6b5ab7e", + Position: 1, + DestinationPort: "", + ID: "ab7bd950-6c56-4f5e-a307-45967078f890", + Name: "deny_all_udp", + TenantID: "80cf934d6ffb4ef5b244f1c512ad1e61", + Enabled: true, + Action: "deny", + IPVersion: 4, + Shared: false, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_rule": { + "protocol": "tcp", + "description": "ssh rule", + "destination_ip_address": "192.168.1.0/24", + "destination_port": "22", + "name": "ssh_form_any", + "action": "allow", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "allow", + "ip_version": 4, + "shared": false + } +} + `) + }) + + options := CreateOpts{ + TenantID: "80cf934d6ffb4ef5b244f1c512ad1e61", + Protocol: "tcp", + Description: "ssh rule", + DestinationIPAddress: "192.168.1.0/24", + DestinationPort: "22", + Name: "ssh_form_any", + Action: "allow", + } + + _, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules/f03bd950-6c56-4f5e-a307-45967078f507", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "allow", + "ip_version": 4, + "shared": false + } +} + `) + }) + + rule, err := Get(fake.ServiceClient(), "f03bd950-6c56-4f5e-a307-45967078f507").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "tcp", rule.Protocol) + th.AssertEquals(t, "ssh rule", rule.Description) + th.AssertEquals(t, "192.168.1.0/24", rule.DestinationIPAddress) + th.AssertEquals(t, "e2a5fb51-698c-4898-87e8-f1eee6b50919", rule.PolicyID) + th.AssertEquals(t, 2, rule.Position) + th.AssertEquals(t, "22", rule.DestinationPort) + th.AssertEquals(t, "f03bd950-6c56-4f5e-a307-45967078f507", rule.ID) + th.AssertEquals(t, "ssh_form_any", rule.Name) + th.AssertEquals(t, "80cf934d6ffb4ef5b244f1c512ad1e61", rule.TenantID) + th.AssertEquals(t, true, rule.Enabled) + th.AssertEquals(t, "allow", rule.Action) + th.AssertEquals(t, 4, rule.IPVersion) + th.AssertEquals(t, false, rule.Shared) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules/f03bd950-6c56-4f5e-a307-45967078f507", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "destination_ip_address": "192.168.1.0/24", + "destination_port": "22", + "source_ip_address": null, + "source_port": null, + "name": "ssh_form_any", + "action": "allow", + "enabled": false + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": false, + "action": "allow", + "ip_version": 4, + "shared": false + } +} + `) + }) + + destinationIPAddress := "192.168.1.0/24" + destinationPort := "22" + empty := "" + + options := UpdateOpts{ + Protocol: "tcp", + Description: "ssh rule", + DestinationIPAddress: &destinationIPAddress, + DestinationPort: &destinationPort, + Name: "ssh_form_any", + SourceIPAddress: &empty, + SourcePort: &empty, + Action: "allow", + Enabled: No, + } + + _, err := Update(fake.ServiceClient(), "f03bd950-6c56-4f5e-a307-45967078f507", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules/4ec89077-d057-4a2b-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89077-d057-4a2b-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go new file mode 100644 index 000000000..d772024b3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go @@ -0,0 +1,110 @@ +package rules + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Rule represents a firewall rule +type Rule struct { + ID string `json:"id" mapstructure:"id"` + Name string `json:"name,omitempty" mapstructure:"name"` + Description string `json:"description,omitempty" mapstructure:"description"` + Protocol string `json:"protocol" mapstructure:"protocol"` + Action string `json:"action" mapstructure:"action"` + IPVersion int `json:"ip_version,omitempty" mapstructure:"ip_version"` + SourceIPAddress string `json:"source_ip_address,omitempty" mapstructure:"source_ip_address"` + DestinationIPAddress string `json:"destination_ip_address,omitempty" mapstructure:"destination_ip_address"` + SourcePort string `json:"source_port,omitempty" mapstructure:"source_port"` + DestinationPort string `json:"destination_port,omitempty" mapstructure:"destination_port"` + Shared bool `json:"shared,omitempty" mapstructure:"shared"` + Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` + PolicyID string `json:"firewall_policy_id" mapstructure:"firewall_policy_id"` + Position int `json:"position" mapstructure:"position"` + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` +} + +// RulePage is the page returned by a pager when traversing over a +// collection of firewall rules. +type RulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall rules has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (p RulePage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"firewall_rules_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a RulePage struct is empty. +func (p RulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractRules accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(page pagination.Page) ([]Rule, error) { + var resp struct { + Rules []Rule `mapstructure:"firewall_rules" json:"firewall_rules"` + } + + err := mapstructure.Decode(page.(RulePage).Body, &resp) + + return resp.Rules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall rule. +func (r commonResult) Extract() (*Rule, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Rule *Rule `json:"firewall_rule" mapstructure:"firewall_rule"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Rule, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go new file mode 100644 index 000000000..20b08791e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go @@ -0,0 +1,16 @@ +package rules + +import "github.com/rackspace/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_rules" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go new file mode 100644 index 000000000..d53345826 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go @@ -0,0 +1,5 @@ +// Package layer3 provides access to the Layer-3 networking extension for the +// OpenStack Neutron service. This extension allows API users to route packets +// between subnets, forward packets from internal networks to external ones, +// and access instances from external networks through floating IPs. +package layer3 diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go new file mode 100644 index 000000000..29f752a07 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go @@ -0,0 +1,168 @@ +package floatingips + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + FloatingNetworkID string `q:"floating_network_id"` + PortID string `q:"port_id"` + FixedIP string `q:"fixed_ip_address"` + FloatingIP string `q:"floating_ip_address"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// floating IP resources. It accepts a ListOpts struct, which allows you to +// filter and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts contains all the values needed to create a new floating IP +// resource. The only required fields are FloatingNetworkID and PortID which +// refer to the external network and internal port respectively. +type CreateOpts struct { + FloatingNetworkID string + FloatingIP string + PortID string + FixedIP string + TenantID string +} + +var ( + errFloatingNetworkIDRequired = fmt.Errorf("A NetworkID is required") +) + +// Create accepts a CreateOpts struct and uses the values provided to create a +// new floating IP resource. You can create floating IPs on external networks +// only. If you provide a FloatingNetworkID which refers to a network that is +// not external (i.e. its `router:external' attribute is False), the operation +// will fail and return a 400 error. +// +// If you do not specify a FloatingIP address value, the operation will +// automatically allocate an available address for the new resource. If you do +// choose to specify one, it must fall within the subnet range for the external +// network - otherwise the operation returns a 400 error. If the FloatingIP +// address is already in use, the operation returns a 409 error code. +// +// You can associate the new resource with an internal port by using the PortID +// field. If you specify a PortID that is not valid, the operation will fail and +// return 404 error code. +// +// You must also configure an IP address for the port associated with the PortID +// you have provided - this is what the FixedIP refers to: an IP fixed to a port. +// Because a port might be associated with multiple IP addresses, you can use +// the FixedIP field to associate a particular IP address rather than have the +// API assume for you. If you specify an IP address that is not valid, the +// operation will fail and return a 400 error code. If the PortID and FixedIP +// are already associated with another resource, the operation will fail and +// returns a 409 error code. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + var res CreateResult + + // Validate + if opts.FloatingNetworkID == "" { + res.Err = errFloatingNetworkIDRequired + return res + } + + // Define structures + type floatingIP struct { + FloatingNetworkID string `json:"floating_network_id"` + FloatingIP string `json:"floating_ip_address,omitempty"` + PortID string `json:"port_id,omitempty"` + FixedIP string `json:"fixed_ip_address,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + } + type request struct { + FloatingIP floatingIP `json:"floatingip"` + } + + // Populate request body + reqBody := request{FloatingIP: floatingIP{ + FloatingNetworkID: opts.FloatingNetworkID, + FloatingIP: opts.FloatingIP, + PortID: opts.PortID, + FixedIP: opts.FixedIP, + TenantID: opts.TenantID, + }} + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular floating IP resource based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOpts contains the values used when updating a floating IP resource. The +// only value that can be updated is which internal port the floating IP is +// linked to. To associate the floating IP with a new internal port, provide its +// ID. To disassociate the floating IP from all ports, provide an empty string. +type UpdateOpts struct { + PortID string +} + +// Update allows floating IP resources to be updated. Currently, the only way to +// "update" a floating IP is to associate it with a new internal port, or +// disassociated it from all ports. See UpdateOpts for instructions of how to +// do this. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult { + type floatingIP struct { + PortID *string `json:"port_id"` + } + + type request struct { + FloatingIP floatingIP `json:"floatingip"` + } + + var portID *string + if opts.PortID == "" { + portID = nil + } else { + portID = &opts.PortID + } + + reqBody := request{FloatingIP: floatingIP{PortID: portID}} + + // Send request to API + var res UpdateResult + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} + +// Delete will permanently delete a particular floating IP resource. Please +// ensure this is what you want - you can also disassociate the IP from existing +// internal ports. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go new file mode 100644 index 000000000..d914a799b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go @@ -0,0 +1,355 @@ +package floatingips + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingips": [ + { + "floating_network_id": "6d67c30a-ddb4-49a1-bec3-a65b286b4170", + "router_id": null, + "fixed_ip_address": null, + "floating_ip_address": "192.0.0.4", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "status": "DOWN", + "port_id": null, + "id": "2f95fd2b-9f6a-4e8e-9e9a-2cbe286cbf9e" + }, + { + "floating_network_id": "90f742b1-6d17-487b-ba95-71881dbc0b64", + "router_id": "0a24cb83-faf5-4d7f-b723-3144ed8a2167", + "fixed_ip_address": "192.0.0.2", + "floating_ip_address": "10.0.0.3", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "status": "DOWN", + "port_id": "74a342ce-8e07-4e91-880c-9f834b68fa25", + "id": "ada25a95-f321-4f59-b0e0-f3a970dd3d63" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractFloatingIPs(page) + if err != nil { + t.Errorf("Failed to extract floating IPs: %v", err) + return false, err + } + + expected := []FloatingIP{ + FloatingIP{ + FloatingNetworkID: "6d67c30a-ddb4-49a1-bec3-a65b286b4170", + FixedIP: "", + FloatingIP: "192.0.0.4", + TenantID: "017d8de156df4177889f31a9bd6edc00", + Status: "DOWN", + PortID: "", + ID: "2f95fd2b-9f6a-4e8e-9e9a-2cbe286cbf9e", + }, + FloatingIP{ + FloatingNetworkID: "90f742b1-6d17-487b-ba95-71881dbc0b64", + FixedIP: "192.0.0.2", + FloatingIP: "10.0.0.3", + TenantID: "017d8de156df4177889f31a9bd6edc00", + Status: "DOWN", + PortID: "74a342ce-8e07-4e91-880c-9f834b68fa25", + ID: "ada25a95-f321-4f59-b0e0-f3a970dd3d63", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestInvalidNextPageURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"floatingips": [{}], "floatingips_links": {}}`) + }) + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + ExtractFloatingIPs(page) + return true, nil + }) +} + +func TestRequiredFieldsForCreate(t *testing.T) { + res1 := Create(fake.ServiceClient(), CreateOpts{FloatingNetworkID: ""}) + if res1.Err == nil { + t.Fatalf("Expected error, got none") + } + + res2 := Create(fake.ServiceClient(), CreateOpts{FloatingNetworkID: "foo", PortID: ""}) + if res2.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "floatingip": { + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": "10.0.0.3", + "floating_ip_address": "", + "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + options := CreateOpts{ + FloatingNetworkID: "376da547-b977-4cfe-9cba-275c80debf57", + PortID: "ce705c24-c1ef-408a-bda3-7bbd946164ab", + } + + ip, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID) + th.AssertEquals(t, "4969c491a3c74ee4af974e6d800c62de", ip.TenantID) + th.AssertEquals(t, "376da547-b977-4cfe-9cba-275c80debf57", ip.FloatingNetworkID) + th.AssertEquals(t, "", ip.FloatingIP) + th.AssertEquals(t, "ce705c24-c1ef-408a-bda3-7bbd946164ab", ip.PortID) + th.AssertEquals(t, "10.0.0.3", ip.FixedIP) +} + +func TestCreateEmptyPort(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "floatingip": { + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` + { + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": "10.0.0.3", + "floating_ip_address": "", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } + } + `) + }) + + options := CreateOpts{ + FloatingNetworkID: "376da547-b977-4cfe-9cba-275c80debf57", + } + + ip, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID) + th.AssertEquals(t, "4969c491a3c74ee4af974e6d800c62de", ip.TenantID) + th.AssertEquals(t, "376da547-b977-4cfe-9cba-275c80debf57", ip.FloatingNetworkID) + th.AssertEquals(t, "", ip.FloatingIP) + th.AssertEquals(t, "", ip.PortID) + th.AssertEquals(t, "10.0.0.3", ip.FixedIP) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "floating_network_id": "90f742b1-6d17-487b-ba95-71881dbc0b64", + "fixed_ip_address": "192.0.0.2", + "floating_ip_address": "10.0.0.3", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "status": "DOWN", + "port_id": "74a342ce-8e07-4e91-880c-9f834b68fa25", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + ip, err := Get(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "90f742b1-6d17-487b-ba95-71881dbc0b64", ip.FloatingNetworkID) + th.AssertEquals(t, "10.0.0.3", ip.FloatingIP) + th.AssertEquals(t, "74a342ce-8e07-4e91-880c-9f834b68fa25", ip.PortID) + th.AssertEquals(t, "192.0.0.2", ip.FixedIP) + th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", ip.TenantID) + th.AssertEquals(t, "DOWN", ip.Status) + th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID) +} + +func TestAssociate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "floatingip": { + "port_id": "423abc8d-2991-4a55-ba98-2aaea84cc72e" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": null, + "floating_ip_address": "172.24.4.228", + "port_id": "423abc8d-2991-4a55-ba98-2aaea84cc72e", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + ip, err := Update(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7", UpdateOpts{PortID: "423abc8d-2991-4a55-ba98-2aaea84cc72e"}).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, "423abc8d-2991-4a55-ba98-2aaea84cc72e", ip.PortID) +} + +func TestDisassociate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "floatingip": { + "port_id": null + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": null, + "floating_ip_address": "172.24.4.228", + "port_id": null, + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + ip, err := Update(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7", UpdateOpts{}).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, "", ip.FixedIP) + th.AssertDeepEquals(t, "", ip.PortID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go new file mode 100644 index 000000000..a1c7afe2c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go @@ -0,0 +1,127 @@ +package floatingips + +import ( + "fmt" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// FloatingIP represents a floating IP resource. A floating IP is an external +// IP address that is mapped to an internal port and, optionally, a specific +// IP address on a private network. In other words, it enables access to an +// instance on a private network from an external network. For this reason, +// floating IPs can only be defined on networks where the `router:external' +// attribute (provided by the external network extension) is set to True. +type FloatingIP struct { + // Unique identifier for the floating IP instance. + ID string `json:"id" mapstructure:"id"` + + // UUID of the external network where the floating IP is to be created. + FloatingNetworkID string `json:"floating_network_id" mapstructure:"floating_network_id"` + + // Address of the floating IP on the external network. + FloatingIP string `json:"floating_ip_address" mapstructure:"floating_ip_address"` + + // UUID of the port on an internal network that is associated with the floating IP. + PortID string `json:"port_id" mapstructure:"port_id"` + + // The specific IP address of the internal port which should be associated + // with the floating IP. + FixedIP string `json:"fixed_ip_address" mapstructure:"fixed_ip_address"` + + // Owner of the floating IP. Only admin users can specify a tenant identifier + // other than its own. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` + + // The condition of the API resource. + Status string `json:"status" mapstructure:"status"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract a result and extracts a FloatingIP resource. +func (r commonResult) Extract() (*FloatingIP, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + FloatingIP *FloatingIP `json:"floatingip"` + } + + err := mapstructure.Decode(r.Body, &res) + if err != nil { + return nil, fmt.Errorf("Error decoding Neutron floating IP: %v", err) + } + + return res.FloatingIP, nil +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of an update operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// FloatingIPPage is the page returned by a pager when traversing over a +// collection of floating IPs. +type FloatingIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of floating IPs has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p FloatingIPPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"floatingips_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a NetworkPage struct is empty. +func (p FloatingIPPage) IsEmpty() (bool, error) { + is, err := ExtractFloatingIPs(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractFloatingIPs accepts a Page struct, specifically a FloatingIPPage struct, +// and extracts the elements into a slice of FloatingIP structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractFloatingIPs(page pagination.Page) ([]FloatingIP, error) { + var resp struct { + FloatingIPs []FloatingIP `mapstructure:"floatingips" json:"floatingips"` + } + + err := mapstructure.Decode(page.(FloatingIPPage).Body, &resp) + + return resp.FloatingIPs, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go new file mode 100644 index 000000000..355f20dc0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go @@ -0,0 +1,13 @@ +package floatingips + +import "github.com/rackspace/gophercloud" + +const resourcePath = "floatingips" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go new file mode 100644 index 000000000..8b6e73de9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -0,0 +1,230 @@ +package routers + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// routers. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those routers that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return RouterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts contains all the values needed to create a new router. There are +// no required values. +type CreateOpts struct { + Name string + AdminStateUp *bool + TenantID string + GatewayInfo *GatewayInfo +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// logical router. When it is created, the router does not have an internal +// interface - it is not associated to any subnet. +// +// You can optionally specify an external gateway for a router using the +// GatewayInfo struct. The external gateway for the router must be plugged into +// an external network (it is external if its `router:external' field is set to +// true). +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + type router struct { + Name *string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + TenantID *string `json:"tenant_id,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + } + + type request struct { + Router router `json:"router"` + } + + reqBody := request{Router: router{ + Name: gophercloud.MaybeString(opts.Name), + AdminStateUp: opts.AdminStateUp, + TenantID: gophercloud.MaybeString(opts.TenantID), + }} + + if opts.GatewayInfo != nil { + reqBody.Router.GatewayInfo = opts.GatewayInfo + } + + var res CreateResult + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular router based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOpts contains the values used when updating a router. +type UpdateOpts struct { + Name string + AdminStateUp *bool + GatewayInfo *GatewayInfo + Routes []Route +} + +// Update allows routers to be updated. You can update the name, administrative +// state, and the external gateway. For more information about how to set the +// external gateway for a router, see Create. This operation does not enable +// the update of router interfaces. To do this, use the AddInterface and +// RemoveInterface functions. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult { + type router struct { + Name *string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + Routes []Route `json:"routes"` + } + + type request struct { + Router router `json:"router"` + } + + reqBody := request{Router: router{ + Name: gophercloud.MaybeString(opts.Name), + AdminStateUp: opts.AdminStateUp, + }} + + if opts.GatewayInfo != nil { + reqBody.Router.GatewayInfo = opts.GatewayInfo + } + + if opts.Routes != nil { + reqBody.Router.Routes = opts.Routes + } + + // Send request to API + var res UpdateResult + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} + +// Delete will permanently delete a particular router based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} + +var errInvalidInterfaceOpts = errors.New("When adding a router interface you must provide either a subnet ID or a port ID") + +// InterfaceOpts allow you to work with operations that either add or remote +// an internal interface from a router. +type InterfaceOpts struct { + SubnetID string + PortID string +} + +// AddInterface attaches a subnet to an internal router interface. You must +// specify either a SubnetID or PortID in the request body. If you specify both, +// the operation will fail and an error will be returned. +// +// If you specify a SubnetID, the gateway IP address for that particular subnet +// is used to create the router interface. Alternatively, if you specify a +// PortID, the IP address associated with the port is used to create the router +// interface. +// +// If you reference a port that is associated with multiple IP addresses, or +// if the port is associated with zero IP addresses, the operation will fail and +// a 400 Bad Request error will be returned. +// +// If you reference a port already in use, the operation will fail and a 409 +// Conflict error will be returned. +// +// The PortID that is returned after using Extract() on the result of this +// operation can either be the same PortID passed in or, on the other hand, the +// identifier of a new port created by this operation. After the operation +// completes, the device ID of the port is set to the router ID, and the +// device owner attribute is set to `network:router_interface'. +func AddInterface(c *gophercloud.ServiceClient, id string, opts InterfaceOpts) InterfaceResult { + var res InterfaceResult + + // Validate + if (opts.SubnetID == "" && opts.PortID == "") || (opts.SubnetID != "" && opts.PortID != "") { + res.Err = errInvalidInterfaceOpts + return res + } + + type request struct { + SubnetID string `json:"subnet_id,omitempty"` + PortID string `json:"port_id,omitempty"` + } + + body := request{SubnetID: opts.SubnetID, PortID: opts.PortID} + + _, res.Err = c.Put(addInterfaceURL(c, id), body, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} + +// RemoveInterface removes an internal router interface, which detaches a +// subnet from the router. You must specify either a SubnetID or PortID, since +// these values are used to identify the router interface to remove. +// +// Unlike AddInterface, you can also specify both a SubnetID and PortID. If you +// choose to specify both, the subnet ID must correspond to the subnet ID of +// the first IP address on the port specified by the port ID. Otherwise, the +// operation will fail and return a 409 Conflict error. +// +// If the router, subnet or port which are referenced do not exist or are not +// visible to you, the operation will fail and a 404 Not Found error will be +// returned. After this operation completes, the port connecting the router +// with the subnet is removed from the subnet for the network. +func RemoveInterface(c *gophercloud.ServiceClient, id string, opts InterfaceOpts) InterfaceResult { + var res InterfaceResult + + type request struct { + SubnetID string `json:"subnet_id,omitempty"` + PortID string `json:"port_id,omitempty"` + } + + body := request{SubnetID: opts.SubnetID, PortID: opts.PortID} + + _, res.Err = c.Put(removeInterfaceURL(c, id), body, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go new file mode 100644 index 000000000..198173359 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go @@ -0,0 +1,405 @@ +package routers + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/routers", rootURL(fake.ServiceClient())) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "routers": [ + { + "status": "ACTIVE", + "external_gateway_info": null, + "name": "second_routers", + "admin_state_up": true, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "id": "7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b" + }, + { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8" + }, + "name": "router1", + "admin_state_up": true, + "tenant_id": "33a40233088643acb66ff6eb0ebea679", + "id": "a9254bdb-2613-4a13-ac4c-adc581fba50d" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractRouters(page) + if err != nil { + t.Errorf("Failed to extract routers: %v", err) + return false, err + } + + expected := []Router{ + Router{ + Status: "ACTIVE", + GatewayInfo: GatewayInfo{NetworkID: ""}, + AdminStateUp: true, + Name: "second_routers", + ID: "7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b", + TenantID: "6b96ff0cb17a4b859e1e575d221683d3", + }, + Router{ + Status: "ACTIVE", + GatewayInfo: GatewayInfo{NetworkID: "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8"}, + AdminStateUp: true, + Name: "router1", + ID: "a9254bdb-2613-4a13-ac4c-adc581fba50d", + TenantID: "33a40233088643acb66ff6eb0ebea679", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "router":{ + "name": "foo_router", + "admin_state_up": false, + "external_gateway_info":{ + "network_id":"8ca37218-28ff-41cb-9b10-039601ea7e6b" + } + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b" + }, + "name": "foo_router", + "admin_state_up": false, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e" + } +} + `) + }) + + asu := false + gwi := GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"} + + options := CreateOpts{ + Name: "foo_router", + AdminStateUp: &asu, + GatewayInfo: &gwi, + } + r, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "foo_router", r.Name) + th.AssertEquals(t, false, r.AdminStateUp) + th.AssertDeepEquals(t, GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"}, r.GatewayInfo) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/a07eea83-7710-4860-931b-5fe220fae533", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "85d76829-6415-48ff-9c63-5c5ca8c61ac6" + }, + "routes": [ + { + "nexthop": "10.1.0.10", + "destination": "40.0.1.0/24" + } + ], + "name": "router1", + "admin_state_up": true, + "tenant_id": "d6554fe62e2f41efbb6e026fad5c1542", + "id": "a07eea83-7710-4860-931b-5fe220fae533" + } +} + `) + }) + + n, err := Get(fake.ServiceClient(), "a07eea83-7710-4860-931b-5fe220fae533").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.GatewayInfo, GatewayInfo{NetworkID: "85d76829-6415-48ff-9c63-5c5ca8c61ac6"}) + th.AssertEquals(t, n.Name, "router1") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.TenantID, "d6554fe62e2f41efbb6e026fad5c1542") + th.AssertEquals(t, n.ID, "a07eea83-7710-4860-931b-5fe220fae533") + th.AssertDeepEquals(t, n.Routes, []Route{Route{DestinationCIDR: "40.0.1.0/24", NextHop: "10.1.0.10"}}) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "router": { + "name": "new_name", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b" + }, + "routes": [ + { + "nexthop": "10.1.0.10", + "destination": "40.0.1.0/24" + } + ] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b" + }, + "name": "new_name", + "admin_state_up": true, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e", + "routes": [ + { + "nexthop": "10.1.0.10", + "destination": "40.0.1.0/24" + } + ] + } +} + `) + }) + + gwi := GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"} + r := []Route{Route{DestinationCIDR: "40.0.1.0/24", NextHop: "10.1.0.10"}} + options := UpdateOpts{Name: "new_name", GatewayInfo: &gwi, Routes: r} + + n, err := Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "new_name") + th.AssertDeepEquals(t, n.GatewayInfo, GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"}) + th.AssertDeepEquals(t, n.Routes, []Route{Route{DestinationCIDR: "40.0.1.0/24", NextHop: "10.1.0.10"}}) +} + +func TestAllRoutesRemoved(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "router": { + "routes": [] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b" + }, + "name": "name", + "admin_state_up": true, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e", + "routes": [] + } +} + `) + }) + + r := []Route{} + options := UpdateOpts{Routes: r} + + n, err := Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, n.Routes, []Route{}) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c") + th.AssertNoErr(t, res.Err) +} + +func TestAddInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c/add_router_interface", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1" +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnet_id": "0d32a837-8069-4ec3-84c4-3eef3e10b188", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "port_id": "3f990102-4485-4df1-97a0-2c35bdb85b31", + "id": "9a83fa11-8da5-436e-9afe-3d3ac5ce7770" +} +`) + }) + + opts := InterfaceOpts{SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1"} + res, err := AddInterface(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "0d32a837-8069-4ec3-84c4-3eef3e10b188", res.SubnetID) + th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", res.TenantID) + th.AssertEquals(t, "3f990102-4485-4df1-97a0-2c35bdb85b31", res.PortID) + th.AssertEquals(t, "9a83fa11-8da5-436e-9afe-3d3ac5ce7770", res.ID) +} + +func TestAddInterfaceRequiredOpts(t *testing.T) { + _, err := AddInterface(fake.ServiceClient(), "foo", InterfaceOpts{}).Extract() + if err == nil { + t.Fatalf("Expected error, got none") + } + _, err = AddInterface(fake.ServiceClient(), "foo", InterfaceOpts{SubnetID: "bar", PortID: "baz"}).Extract() + if err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestRemoveInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c/remove_router_interface", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1" +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnet_id": "0d32a837-8069-4ec3-84c4-3eef3e10b188", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "port_id": "3f990102-4485-4df1-97a0-2c35bdb85b31", + "id": "9a83fa11-8da5-436e-9afe-3d3ac5ce7770" +} +`) + }) + + opts := InterfaceOpts{SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1"} + res, err := RemoveInterface(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "0d32a837-8069-4ec3-84c4-3eef3e10b188", res.SubnetID) + th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", res.TenantID) + th.AssertEquals(t, "3f990102-4485-4df1-97a0-2c35bdb85b31", res.PortID) + th.AssertEquals(t, "9a83fa11-8da5-436e-9afe-3d3ac5ce7770", res.ID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go new file mode 100644 index 000000000..5e297ab26 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go @@ -0,0 +1,168 @@ +package routers + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// GatewayInfo represents the information of an external gateway for any +// particular network router. +type GatewayInfo struct { + NetworkID string `json:"network_id" mapstructure:"network_id"` +} + +type Route struct { + NextHop string `mapstructure:"nexthop" json:"nexthop"` + DestinationCIDR string `mapstructure:"destination" json:"destination"` +} + +// Router represents a Neutron router. A router is a logical entity that +// forwards packets across internal subnets and NATs (network address +// translation) them on external networks through an appropriate gateway. +// +// A router has an interface for each subnet with which it is associated. By +// default, the IP address of such interface is the subnet's gateway IP. Also, +// whenever a router is associated with a subnet, a port for that router +// interface is added to the subnet's network. +type Router struct { + // Indicates whether or not a router is currently operational. + Status string `json:"status" mapstructure:"status"` + + // Information on external gateway for the router. + GatewayInfo GatewayInfo `json:"external_gateway_info" mapstructure:"external_gateway_info"` + + // Administrative state of the router. + AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"` + + // Human readable name for the router. Does not have to be unique. + Name string `json:"name" mapstructure:"name"` + + // Unique identifier for the router. + ID string `json:"id" mapstructure:"id"` + + // Owner of the router. Only admin users can specify a tenant identifier + // other than its own. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` + + Routes []Route `json:"routes" mapstructure:"routes"` +} + +// RouterPage is the page returned by a pager when traversing over a +// collection of routers. +type RouterPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p RouterPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"routers_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a RouterPage struct is empty. +func (p RouterPage) IsEmpty() (bool, error) { + is, err := ExtractRouters(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractRouters accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRouters(page pagination.Page) ([]Router, error) { + var resp struct { + Routers []Router `mapstructure:"routers" json:"routers"` + } + + err := mapstructure.Decode(page.(RouterPage).Body, &resp) + + return resp.Routers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Router, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Router *Router `json:"router"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Router, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// InterfaceInfo represents information about a particular router interface. As +// mentioned above, in order for a router to forward to a subnet, it needs an +// interface. +type InterfaceInfo struct { + // The ID of the subnet which this interface is associated with. + SubnetID string `json:"subnet_id" mapstructure:"subnet_id"` + + // The ID of the port that is a part of the subnet. + PortID string `json:"port_id" mapstructure:"port_id"` + + // The UUID of the interface. + ID string `json:"id" mapstructure:"id"` + + // Owner of the interface. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` +} + +// InterfaceResult represents the result of interface operations, such as +// AddInterface() and RemoveInterface(). +type InterfaceResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an information struct. +func (r InterfaceResult) Extract() (*InterfaceInfo, error) { + if r.Err != nil { + return nil, r.Err + } + + var res *InterfaceInfo + err := mapstructure.Decode(r.Body, &res) + + return res, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go new file mode 100644 index 000000000..bc22c2a8a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go @@ -0,0 +1,21 @@ +package routers + +import "github.com/rackspace/gophercloud" + +const resourcePath = "routers" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func addInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "add_router_interface") +} + +func removeInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "remove_router_interface") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go new file mode 100644 index 000000000..bc1fc282f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go @@ -0,0 +1,3 @@ +// Package lbaas provides information and interaction with the Load Balancer +// as a Service extension for the OpenStack Networking service. +package lbaas diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go new file mode 100644 index 000000000..848938f98 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go @@ -0,0 +1,123 @@ +package members + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + PoolID string `q:"pool_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts contains all the values needed to create a new pool member. +type CreateOpts struct { + // Only required if the caller has an admin role and wants to create a pool + // for another tenant. + TenantID string + + // Required. The IP address of the member. + Address string + + // Required. The port on which the application is hosted. + ProtocolPort int + + // Required. The pool to which this member will belong. + PoolID string +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool member. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + type member struct { + TenantID string `json:"tenant_id,omitempty"` + ProtocolPort int `json:"protocol_port"` + Address string `json:"address"` + PoolID string `json:"pool_id"` + } + type request struct { + Member member `json:"member"` + } + + reqBody := request{Member: member{ + Address: opts.Address, + TenantID: opts.TenantID, + ProtocolPort: opts.ProtocolPort, + PoolID: opts.PoolID, + }} + + var res CreateResult + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular pool member based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOpts contains the values used when updating a pool member. +type UpdateOpts struct { + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp bool +} + +// Update allows members to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult { + type member struct { + AdminStateUp bool `json:"admin_state_up"` + } + type request struct { + Member member `json:"member"` + } + + reqBody := request{Member: member{AdminStateUp: opts.AdminStateUp}} + + // Send request to API + var res UpdateResult + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return res +} + +// Delete will permanently delete a particular member based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go new file mode 100644 index 000000000..dc1ece321 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go @@ -0,0 +1,243 @@ +package members + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/lb/members", rootURL(fake.ServiceClient())) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "members":[ + { + "status":"ACTIVE", + "weight":1, + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "pool_id":"72741b06-df4d-4715-b142-276b6bce75ab", + "address":"10.0.0.4", + "protocol_port":80, + "id":"701b531b-111a-4f21-ad85-4795b7b12af6" + }, + { + "status":"ACTIVE", + "weight":1, + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "pool_id":"72741b06-df4d-4715-b142-276b6bce75ab", + "address":"10.0.0.3", + "protocol_port":80, + "id":"beb53b4d-230b-4abd-8118-575b8fa006ef" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractMembers(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []Member{ + Member{ + Status: "ACTIVE", + Weight: 1, + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + PoolID: "72741b06-df4d-4715-b142-276b6bce75ab", + Address: "10.0.0.4", + ProtocolPort: 80, + ID: "701b531b-111a-4f21-ad85-4795b7b12af6", + }, + Member{ + Status: "ACTIVE", + Weight: 1, + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + PoolID: "72741b06-df4d-4715-b142-276b6bce75ab", + Address: "10.0.0.3", + ProtocolPort: 80, + ID: "beb53b4d-230b-4abd-8118-575b8fa006ef", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "member": { + "tenant_id": "453105b9-1754-413f-aab1-55f1af620750", + "pool_id": "foo", + "address": "192.0.2.14", + "protocol_port":8080 + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "member": { + "id": "975592ca-e308-48ad-8298-731935ee9f45", + "address": "192.0.2.14", + "protocol_port": 8080, + "tenant_id": "453105b9-1754-413f-aab1-55f1af620750", + "admin_state_up":true, + "weight": 1, + "status": "DOWN" + } +} + `) + }) + + options := CreateOpts{ + TenantID: "453105b9-1754-413f-aab1-55f1af620750", + Address: "192.0.2.14", + ProtocolPort: 8080, + PoolID: "foo", + } + _, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members/975592ca-e308-48ad-8298-731935ee9f45", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "member":{ + "id":"975592ca-e308-48ad-8298-731935ee9f45", + "address":"192.0.2.14", + "protocol_port":8080, + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "admin_state_up":true, + "weight":1, + "status":"DOWN" + } +} + `) + }) + + m, err := Get(fake.ServiceClient(), "975592ca-e308-48ad-8298-731935ee9f45").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "975592ca-e308-48ad-8298-731935ee9f45", m.ID) + th.AssertEquals(t, "192.0.2.14", m.Address) + th.AssertEquals(t, 8080, m.ProtocolPort) + th.AssertEquals(t, "453105b9-1754-413f-aab1-55f1af620750", m.TenantID) + th.AssertEquals(t, true, m.AdminStateUp) + th.AssertEquals(t, 1, m.Weight) + th.AssertEquals(t, "DOWN", m.Status) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "member":{ + "admin_state_up":false + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "member":{ + "status":"PENDING_UPDATE", + "protocol_port":8080, + "weight":1, + "admin_state_up":false, + "tenant_id":"4fd44f30292945e481c7b8a0c8908869", + "pool_id":"7803631d-f181-4500-b3a2-1b68ba2a75fd", + "address":"10.0.0.5", + "status_description":null, + "id":"48a471ea-64f1-4eb6-9be7-dae6bbe40a0f" + } +} + `) + }) + + options := UpdateOpts{AdminStateUp: false} + + _, err := Update(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go new file mode 100644 index 000000000..3cad339b7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go @@ -0,0 +1,122 @@ +package members + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Member represents the application running on a backend server. +type Member struct { + // The status of the member. Indicates whether the member is operational. + Status string + + // Weight of member. + Weight int + + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"` + + // Owner of the member. Only an administrative user can specify a tenant ID + // other than its own. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` + + // The pool to which the member belongs. + PoolID string `json:"pool_id" mapstructure:"pool_id"` + + // The IP address of the member. + Address string + + // The port on which the application is hosted. + ProtocolPort int `json:"protocol_port" mapstructure:"protocol_port"` + + // The unique ID for the member. + ID string +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of pool members. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p MemberPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"members_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (p MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Member structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(page pagination.Page) ([]Member, error) { + var resp struct { + Members []Member `mapstructure:"members" json:"members"` + } + + err := mapstructure.Decode(page.(MemberPage).Body, &resp) + if err != nil { + return nil, err + } + + return resp.Members, nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Member, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Member *Member `json:"member"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Member, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go new file mode 100644 index 000000000..94b57e4c5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go @@ -0,0 +1,16 @@ +package members + +import "github.com/rackspace/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go new file mode 100644 index 000000000..71b21ef16 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go @@ -0,0 +1,265 @@ +package monitors + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// routers. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those routers that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Constants that represent approved monitoring types. +const ( + TypePING = "PING" + TypeTCP = "TCP" + TypeHTTP = "HTTP" + TypeHTTPS = "HTTPS" +) + +var ( + errValidTypeRequired = fmt.Errorf("A valid Type is required. Supported values are PING, TCP, HTTP and HTTPS") + errDelayRequired = fmt.Errorf("Delay is required") + errTimeoutRequired = fmt.Errorf("Timeout is required") + errMaxRetriesRequired = fmt.Errorf("MaxRetries is required") + errURLPathRequired = fmt.Errorf("URL path is required") + errExpectedCodesRequired = fmt.Errorf("ExpectedCodes is required") + errDelayMustGETimeout = fmt.Errorf("Delay must be greater than or equal to timeout") +) + +// CreateOpts contains all the values needed to create a new health monitor. +type CreateOpts struct { + // Required for admins. Indicates the owner of the VIP. + TenantID string + + // Required. The type of probe, which is PING, TCP, HTTP, or HTTPS, that is + // sent by the load balancer to verify the member state. + Type string + + // Required. The time, in seconds, between sending probes to members. + Delay int + + // Required. Maximum number of seconds for a monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int + + // Required. Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int + + // Required for HTTP(S) types. URI path that will be accessed if monitor type + // is HTTP or HTTPS. + URLPath string + + // Required for HTTP(S) types. The HTTP method used for requests by the + // monitor. If this attribute is not specified, it defaults to "GET". + HTTPMethod string + + // Required for HTTP(S) types. Expected HTTP codes for a passing HTTP(S) + // monitor. You can either specify a single status like "200", or a range + // like "200-202". + ExpectedCodes string + + AdminStateUp *bool +} + +// Create is an operation which provisions a new health monitor. There are +// different types of monitor you can provision: PING, TCP or HTTP(S). Below +// are examples of how to create each one. +// +// Here is an example config struct to use when creating a PING or TCP monitor: +// +// CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} +// CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} +// +// Here is an example config struct to use when creating a HTTP(S) monitor: +// +// CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, +// HttpMethod: "HEAD", ExpectedCodes: "200"} +// +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + var res CreateResult + + // Validate inputs + allowed := map[string]bool{TypeHTTP: true, TypeHTTPS: true, TypeTCP: true, TypePING: true} + if opts.Type == "" || allowed[opts.Type] == false { + res.Err = errValidTypeRequired + } + if opts.Delay == 0 { + res.Err = errDelayRequired + } + if opts.Timeout == 0 { + res.Err = errTimeoutRequired + } + if opts.MaxRetries == 0 { + res.Err = errMaxRetriesRequired + } + if opts.Type == TypeHTTP || opts.Type == TypeHTTPS { + if opts.URLPath == "" { + res.Err = errURLPathRequired + } + if opts.ExpectedCodes == "" { + res.Err = errExpectedCodesRequired + } + } + if opts.Delay < opts.Timeout { + res.Err = errDelayMustGETimeout + } + if res.Err != nil { + return res + } + + type monitor struct { + Type string `json:"type"` + Delay int `json:"delay"` + Timeout int `json:"timeout"` + MaxRetries int `json:"max_retries"` + TenantID *string `json:"tenant_id,omitempty"` + URLPath *string `json:"url_path,omitempty"` + ExpectedCodes *string `json:"expected_codes,omitempty"` + HTTPMethod *string `json:"http_method,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + } + + type request struct { + Monitor monitor `json:"health_monitor"` + } + + reqBody := request{Monitor: monitor{ + Type: opts.Type, + Delay: opts.Delay, + Timeout: opts.Timeout, + MaxRetries: opts.MaxRetries, + TenantID: gophercloud.MaybeString(opts.TenantID), + URLPath: gophercloud.MaybeString(opts.URLPath), + ExpectedCodes: gophercloud.MaybeString(opts.ExpectedCodes), + HTTPMethod: gophercloud.MaybeString(opts.HTTPMethod), + AdminStateUp: opts.AdminStateUp, + }} + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular health monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOpts contains all the values needed to update an existing virtual IP. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Required. The time, in seconds, between sending probes to members. + Delay int + + // Required. Maximum number of seconds for a monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int + + // Required. Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int + + // Required for HTTP(S) types. URI path that will be accessed if monitor type + // is HTTP or HTTPS. + URLPath string + + // Required for HTTP(S) types. The HTTP method used for requests by the + // monitor. If this attribute is not specified, it defaults to "GET". + HTTPMethod string + + // Required for HTTP(S) types. Expected HTTP codes for a passing HTTP(S) + // monitor. You can either specify a single status like "200", or a range + // like "200-202". + ExpectedCodes string + + AdminStateUp *bool +} + +// Update is an operation which modifies the attributes of the specified monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult { + var res UpdateResult + + if opts.Delay > 0 && opts.Timeout > 0 && opts.Delay < opts.Timeout { + res.Err = errDelayMustGETimeout + } + + type monitor struct { + Delay int `json:"delay"` + Timeout int `json:"timeout"` + MaxRetries int `json:"max_retries"` + URLPath *string `json:"url_path,omitempty"` + ExpectedCodes *string `json:"expected_codes,omitempty"` + HTTPMethod *string `json:"http_method,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + } + + type request struct { + Monitor monitor `json:"health_monitor"` + } + + reqBody := request{Monitor: monitor{ + Delay: opts.Delay, + Timeout: opts.Timeout, + MaxRetries: opts.MaxRetries, + URLPath: gophercloud.MaybeString(opts.URLPath), + ExpectedCodes: gophercloud.MaybeString(opts.ExpectedCodes), + HTTPMethod: gophercloud.MaybeString(opts.HTTPMethod), + AdminStateUp: opts.AdminStateUp, + }} + + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + return res +} + +// Delete will permanently delete a particular monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go new file mode 100644 index 000000000..79a99bf8a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go @@ -0,0 +1,312 @@ +package monitors + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.AssertEquals(t, th.Endpoint()+"v2.0/lb/health_monitors", rootURL(fake.ServiceClient())) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "health_monitors":[ + { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":10, + "max_retries":1, + "timeout":1, + "type":"PING", + "id":"466c8345-28d8-4f84-a246-e04380b0461d" + }, + { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":5, + "expected_codes":"200", + "max_retries":2, + "http_method":"GET", + "timeout":2, + "url_path":"/", + "type":"HTTP", + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractMonitors(page) + if err != nil { + t.Errorf("Failed to extract monitors: %v", err) + return false, err + } + + expected := []Monitor{ + Monitor{ + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 10, + MaxRetries: 1, + Timeout: 1, + Type: "PING", + ID: "466c8345-28d8-4f84-a246-e04380b0461d", + }, + Monitor{ + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 5, + ExpectedCodes: "200", + MaxRetries: 2, + Timeout: 2, + URLPath: "/", + Type: "HTTP", + HTTPMethod: "GET", + ID: "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestDelayMustBeGreaterOrEqualThanTimeout(t *testing.T) { + _, err := Create(fake.ServiceClient(), CreateOpts{ + Type: "HTTP", + Delay: 1, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } + + _, err = Update(fake.ServiceClient(), "453105b9-1754-413f-aab1-55f1af620750", UpdateOpts{ + Delay: 1, + Timeout: 10, + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "health_monitor":{ + "type":"HTTP", + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "delay":20, + "timeout":10, + "max_retries":5, + "url_path":"/check", + "expected_codes":"200-299" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "health_monitor":{ + "id":"f3eeab00-8367-4524-b662-55e64d4cacb5", + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "type":"HTTP", + "delay":20, + "timeout":10, + "max_retries":5, + "http_method":"GET", + "url_path":"/check", + "expected_codes":"200-299", + "admin_state_up":true, + "status":"ACTIVE" + } +} + `) + }) + + _, err := Create(fake.ServiceClient(), CreateOpts{ + Type: "HTTP", + TenantID: "453105b9-1754-413f-aab1-55f1af620750", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + + th.AssertNoErr(t, err) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Type: TypeHTTP}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors/f3eeab00-8367-4524-b662-55e64d4cacb5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "health_monitor":{ + "id":"f3eeab00-8367-4524-b662-55e64d4cacb5", + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "type":"HTTP", + "delay":20, + "timeout":10, + "max_retries":5, + "http_method":"GET", + "url_path":"/check", + "expected_codes":"200-299", + "admin_state_up":true, + "status":"ACTIVE" + } +} + `) + }) + + hm, err := Get(fake.ServiceClient(), "f3eeab00-8367-4524-b662-55e64d4cacb5").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "f3eeab00-8367-4524-b662-55e64d4cacb5", hm.ID) + th.AssertEquals(t, "453105b9-1754-413f-aab1-55f1af620750", hm.TenantID) + th.AssertEquals(t, "HTTP", hm.Type) + th.AssertEquals(t, 20, hm.Delay) + th.AssertEquals(t, 10, hm.Timeout) + th.AssertEquals(t, 5, hm.MaxRetries) + th.AssertEquals(t, "GET", hm.HTTPMethod) + th.AssertEquals(t, "/check", hm.URLPath) + th.AssertEquals(t, "200-299", hm.ExpectedCodes) + th.AssertEquals(t, true, hm.AdminStateUp) + th.AssertEquals(t, "ACTIVE", hm.Status) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors/b05e44b5-81f9-4551-b474-711a722698f7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "health_monitor":{ + "delay": 3, + "timeout": 20, + "max_retries": 10, + "url_path": "/another_check", + "expected_codes": "301" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "health_monitor": { + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "delay": 3, + "max_retries": 10, + "http_method": "GET", + "timeout": 20, + "pools": [ + { + "status": "PENDING_CREATE", + "status_description": null, + "pool_id": "6e55751f-6ad4-4e53-b8d4-02e442cd21df" + } + ], + "type": "PING", + "id": "b05e44b5-81f9-4551-b474-711a722698f7" + } +} + `) + }) + + _, err := Update(fake.ServiceClient(), "b05e44b5-81f9-4551-b474-711a722698f7", UpdateOpts{ + Delay: 3, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + }).Extract() + + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors/b05e44b5-81f9-4551-b474-711a722698f7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "b05e44b5-81f9-4551-b474-711a722698f7") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go new file mode 100644 index 000000000..d595abd54 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go @@ -0,0 +1,147 @@ +package monitors + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // The unique ID for the VIP. + ID string + + // Owner of the VIP. Only an administrative user can specify a tenant ID + // other than its own. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` + + // The type of probe sent by the load balancer to verify the member state, + // which is PING, TCP, HTTP, or HTTPS. + Type string + + // The time, in seconds, between sending probes to members. + Delay int + + // The maximum number of seconds for a monitor to wait for a connection to be + // established before it times out. This value must be less than the delay value. + Timeout int + + // Number of allowed connection failures before changing the status of the + // member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries" mapstructure:"max_retries"` + + // The HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method" mapstructure:"http_method"` + + // The HTTP path of the request sent by the monitor to test the health of a + // member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path" mapstructure:"url_path"` + + // Expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes" mapstructure:"expected_codes"` + + // The administrative state of the health monitor, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"` + + // The status of the health monitor. Indicates whether the health monitor is + // operational. + Status string +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p MonitorPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"health_monitors_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (p MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(page pagination.Page) ([]Monitor, error) { + var resp struct { + Monitors []Monitor `mapstructure:"health_monitors" json:"health_monitors"` + } + + err := mapstructure.Decode(page.(MonitorPage).Body, &resp) + + return resp.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Monitor *Monitor `json:"health_monitor" mapstructure:"health_monitor"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Monitor, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go new file mode 100644 index 000000000..46e84bbf5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/rackspace/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go new file mode 100644 index 000000000..2bb0acc44 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go @@ -0,0 +1,181 @@ +package pools + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + LBMethod string `q:"lb_method"` + Protocol string `q:"protocol"` + SubnetID string `q:"subnet_id"` + TenantID string `q:"tenant_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + VIPID string `q:"vip_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin = "ROUND_ROBIN" + LBMethodLeastConnections = "LEAST_CONNECTIONS" + + ProtocolTCP = "TCP" + ProtocolHTTP = "HTTP" + ProtocolHTTPS = "HTTPS" +) + +// CreateOpts contains all the values needed to create a new pool. +type CreateOpts struct { + // Only required if the caller has an admin role and wants to create a pool + // for another tenant. + TenantID string + + // Required. Name of the pool. + Name string + + // Required. The protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol string + + // The network on which the members of the pool will be located. Only members + // that are on this network can be added to the pool. + SubnetID string + + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod string +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + type pool struct { + Name string `json:"name"` + TenantID string `json:"tenant_id,omitempty"` + Protocol string `json:"protocol"` + SubnetID string `json:"subnet_id"` + LBMethod string `json:"lb_method"` + } + type request struct { + Pool pool `json:"pool"` + } + + reqBody := request{Pool: pool{ + Name: opts.Name, + TenantID: opts.TenantID, + Protocol: opts.Protocol, + SubnetID: opts.SubnetID, + LBMethod: opts.LBMethod, + }} + + var res CreateResult + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOpts contains the values used when updating a pool. +type UpdateOpts struct { + // Required. Name of the pool. + Name string + + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod string +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult { + type pool struct { + Name string `json:"name,"` + LBMethod string `json:"lb_method"` + } + type request struct { + Pool pool `json:"pool"` + } + + reqBody := request{Pool: pool{ + Name: opts.Name, + LBMethod: opts.LBMethod, + }} + + // Send request to API + var res UpdateResult + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} + +// AssociateMonitor will associate a health monitor with a particular pool. +// Once associated, the health monitor will start monitoring the members of the +// pool and will deactivate these members if they are deemed unhealthy. A +// member can be deactivated (status set to INACTIVE) if any of health monitors +// finds it unhealthy. +func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) AssociateResult { + type hm struct { + ID string `json:"id"` + } + type request struct { + Monitor hm `json:"health_monitor"` + } + + reqBody := request{hm{ID: monitorID}} + + var res AssociateResult + _, res.Err = c.Post(associateURL(c, poolID), reqBody, &res.Body, nil) + return res +} + +// DisassociateMonitor will disassociate a health monitor with a particular +// pool. When dissociation is successful, the health monitor will no longer +// check for the health of the members of the pool. +func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) AssociateResult { + var res AssociateResult + _, res.Err = c.Delete(disassociateURL(c, poolID, monitorID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go new file mode 100644 index 000000000..3b5c7c710 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go @@ -0,0 +1,318 @@ +package pools + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/lb/pools", rootURL(fake.ServiceClient())) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "pools":[ + { + "status":"ACTIVE", + "lb_method":"ROUND_ROBIN", + "protocol":"HTTP", + "description":"", + "health_monitors":[ + "466c8345-28d8-4f84-a246-e04380b0461d", + "5d4b5228-33b0-4e60-b225-9b727c1a20e7" + ], + "members":[ + "701b531b-111a-4f21-ad85-4795b7b12af6", + "beb53b4d-230b-4abd-8118-575b8fa006ef" + ], + "status_description": null, + "id":"72741b06-df4d-4715-b142-276b6bce75ab", + "vip_id":"4ec89087-d057-4e2c-911f-60a3b47ee304", + "name":"app_pool", + "admin_state_up":true, + "subnet_id":"8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "health_monitors_status": [], + "provider": "haproxy" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPools(page) + if err != nil { + t.Errorf("Failed to extract pools: %v", err) + return false, err + } + + expected := []Pool{ + Pool{ + Status: "ACTIVE", + LBMethod: "ROUND_ROBIN", + Protocol: "HTTP", + Description: "", + MonitorIDs: []string{ + "466c8345-28d8-4f84-a246-e04380b0461d", + "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + }, + SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "app_pool", + MemberIDs: []string{ + "701b531b-111a-4f21-ad85-4795b7b12af6", + "beb53b4d-230b-4abd-8118-575b8fa006ef", + }, + ID: "72741b06-df4d-4715-b142-276b6bce75ab", + VIPID: "4ec89087-d057-4e2c-911f-60a3b47ee304", + Provider: "haproxy", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "pool": { + "lb_method": "ROUND_ROBIN", + "protocol": "HTTP", + "name": "Example pool", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "pool": { + "status": "PENDING_CREATE", + "lb_method": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "", + "health_monitors": [], + "members": [], + "status_description": null, + "id": "69055154-f603-4a28-8951-7cc2d9e54a9a", + "vip_id": null, + "name": "Example pool", + "admin_state_up": true, + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "health_monitors_status": [] + } +} + `) + }) + + options := CreateOpts{ + LBMethod: LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + TenantID: "2ffc6e22aae24e4795f87155d24c896f", + } + p, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "PENDING_CREATE", p.Status) + th.AssertEquals(t, "ROUND_ROBIN", p.LBMethod) + th.AssertEquals(t, "HTTP", p.Protocol) + th.AssertEquals(t, "", p.Description) + th.AssertDeepEquals(t, []string{}, p.MonitorIDs) + th.AssertDeepEquals(t, []string{}, p.MemberIDs) + th.AssertEquals(t, "69055154-f603-4a28-8951-7cc2d9e54a9a", p.ID) + th.AssertEquals(t, "Example pool", p.Name) + th.AssertEquals(t, "1981f108-3c48-48d2-b908-30f7d28532c9", p.SubnetID) + th.AssertEquals(t, "2ffc6e22aae24e4795f87155d24c896f", p.TenantID) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "pool":{ + "id":"332abe93-f488-41ba-870b-2ac66be7f853", + "tenant_id":"19eaa775-cf5d-49bc-902e-2f85f668d995", + "name":"Example pool", + "description":"", + "protocol":"tcp", + "lb_algorithm":"ROUND_ROBIN", + "session_persistence":{ + }, + "healthmonitor_id":null, + "members":[ + ], + "admin_state_up":true, + "status":"ACTIVE" + } +} + `) + }) + + n, err := Get(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.ID, "332abe93-f488-41ba-870b-2ac66be7f853") +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "pool":{ + "name":"SuperPool", + "lb_method": "LEAST_CONNECTIONS" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "pool":{ + "status":"PENDING_UPDATE", + "lb_method":"LEAST_CONNECTIONS", + "protocol":"TCP", + "description":"", + "health_monitors":[ + + ], + "subnet_id":"8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "admin_state_up":true, + "name":"SuperPool", + "members":[ + + ], + "id":"61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "vip_id":null + } +} + `) + }) + + options := UpdateOpts{Name: "SuperPool", LBMethod: LBMethodLeastConnections} + + n, err := Update(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "SuperPool", n.Name) + th.AssertDeepEquals(t, "LEAST_CONNECTIONS", n.LBMethod) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853") + th.AssertNoErr(t, res.Err) +} + +func TestAssociateHealthMonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853/health_monitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "health_monitor":{ + "id":"b624decf-d5d3-4c66-9a3d-f047e7786181" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{}`) + }) + + _, err := AssociateMonitor(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "b624decf-d5d3-4c66-9a3d-f047e7786181").Extract() + th.AssertNoErr(t, err) +} + +func TestDisassociateHealthMonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853/health_monitors/b624decf-d5d3-4c66-9a3d-f047e7786181", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := DisassociateMonitor(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "b624decf-d5d3-4c66-9a3d-f047e7786181") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go new file mode 100644 index 000000000..07ec85eda --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go @@ -0,0 +1,146 @@ +package pools + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a member of the pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +// There is only one pool per virtual IP. +type Pool struct { + // The status of the pool. Indicates whether the pool is operational. + Status string + + // The load-balancer algorithm, which is round-robin, least-connections, and + // so on. This value, which must be supported, is dependent on the provider. + // Round-robin must be supported. + LBMethod string `json:"lb_method" mapstructure:"lb_method"` + + // The protocol of the pool, which is TCP, HTTP, or HTTPS. + Protocol string + + // Description for the pool. + Description string + + // The IDs of associated monitors which check the health of the pool members. + MonitorIDs []string `json:"health_monitors" mapstructure:"health_monitors"` + + // The network on which the members of the pool will be located. Only members + // that are on this network can be added to the pool. + SubnetID string `json:"subnet_id" mapstructure:"subnet_id"` + + // Owner of the pool. Only an administrative user can specify a tenant ID + // other than its own. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` + + // The administrative state of the pool, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"` + + // Pool name. Does not have to be unique. + Name string + + // List of member IDs that belong to the pool. + MemberIDs []string `json:"members" mapstructure:"members"` + + // The unique ID for the pool. + ID string + + // The ID of the virtual IP associated with this pool + VIPID string `json:"vip_id" mapstructure:"vip_id"` + + // The provider + Provider string +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p PoolPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"pools_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (p PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractPools accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(page pagination.Page) ([]Pool, error) { + var resp struct { + Pools []Pool `mapstructure:"pools" json:"pools"` + } + + err := mapstructure.Decode(page.(PoolPage).Body, &resp) + + return resp.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Pool, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Pool *Pool `json:"pool"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Pool, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult represents the result of an association operation. +type AssociateResult struct { + commonResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go new file mode 100644 index 000000000..6cd15b002 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/rackspace/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "pools" + monitorPath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func associateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, monitorPath) +} + +func disassociateURL(c *gophercloud.ServiceClient, poolID, monitorID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, monitorPath, monitorID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go new file mode 100644 index 000000000..6216f873e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go @@ -0,0 +1,256 @@ +package vips + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// AdminState gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Up` and `Down` enums. +type AdminState *bool + +// Convenience vars for AdminStateUp values. +var ( + iTrue = true + iFalse = false + + Up AdminState = &iTrue + Down AdminState = &iFalse +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + SubnetID string `q:"subnet_id"` + Address string `q:"address"` + PortID string `q:"port_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// routers. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those routers that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return VIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +var ( + errNameRequired = fmt.Errorf("Name is required") + errSubnetIDRequried = fmt.Errorf("SubnetID is required") + errProtocolRequired = fmt.Errorf("Protocol is required") + errProtocolPortRequired = fmt.Errorf("Protocol port is required") + errPoolIDRequired = fmt.Errorf("PoolID is required") +) + +// CreateOpts contains all the values needed to create a new virtual IP. +type CreateOpts struct { + // Required. Human-readable name for the VIP. Does not have to be unique. + Name string + + // Required. The network on which to allocate the VIP's address. A tenant can + // only create VIPs on networks authorized by policy (e.g. networks that + // belong to them or networks that are shared). + SubnetID string + + // Required. The protocol - can either be TCP, HTTP or HTTPS. + Protocol string + + // Required. The port on which to listen for client traffic. + ProtocolPort int + + // Required. The ID of the pool with which the VIP is associated. + PoolID string + + // Required for admins. Indicates the owner of the VIP. + TenantID string + + // Optional. The IP address of the VIP. + Address string + + // Optional. Human-readable description for the VIP. + Description string + + // Optional. Omit this field to prevent session persistence. + Persistence *SessionPersistence + + // Optional. The maximum number of connections allowed for the VIP. + ConnLimit *int + + // Optional. The administrative state of the VIP. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool +} + +// Create is an operation which provisions a new virtual IP based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Please note that the PoolID should refer to a pool that is not already +// associated with another vip. If the pool is already used by another vip, +// then the operation will fail with a 409 Conflict error will be returned. +// +// Users with an admin role can create VIPs on behalf of other tenants by +// specifying a TenantID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + var res CreateResult + + // Validate required opts + if opts.Name == "" { + res.Err = errNameRequired + return res + } + if opts.SubnetID == "" { + res.Err = errSubnetIDRequried + return res + } + if opts.Protocol == "" { + res.Err = errProtocolRequired + return res + } + if opts.ProtocolPort == 0 { + res.Err = errProtocolPortRequired + return res + } + if opts.PoolID == "" { + res.Err = errPoolIDRequired + return res + } + + type vip struct { + Name string `json:"name"` + SubnetID string `json:"subnet_id"` + Protocol string `json:"protocol"` + ProtocolPort int `json:"protocol_port"` + PoolID string `json:"pool_id"` + Description *string `json:"description,omitempty"` + TenantID *string `json:"tenant_id,omitempty"` + Address *string `json:"address,omitempty"` + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + ConnLimit *int `json:"connection_limit,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + } + + type request struct { + VirtualIP vip `json:"vip"` + } + + reqBody := request{VirtualIP: vip{ + Name: opts.Name, + SubnetID: opts.SubnetID, + Protocol: opts.Protocol, + ProtocolPort: opts.ProtocolPort, + PoolID: opts.PoolID, + Description: gophercloud.MaybeString(opts.Description), + TenantID: gophercloud.MaybeString(opts.TenantID), + Address: gophercloud.MaybeString(opts.Address), + ConnLimit: opts.ConnLimit, + AdminStateUp: opts.AdminStateUp, + }} + + if opts.Persistence != nil { + reqBody.VirtualIP.Persistence = opts.Persistence + } + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular virtual IP based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// UpdateOpts contains all the values needed to update an existing virtual IP. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Human-readable name for the VIP. Does not have to be unique. + Name string + + // Required. The ID of the pool with which the VIP is associated. + PoolID string + + // Optional. Human-readable description for the VIP. + Description string + + // Optional. Omit this field to prevent session persistence. + Persistence *SessionPersistence + + // Optional. The maximum number of connections allowed for the VIP. + ConnLimit *int + + // Optional. The administrative state of the VIP. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool +} + +// Update is an operation which modifies the attributes of the specified VIP. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult { + type vip struct { + Name string `json:"name,omitempty"` + PoolID string `json:"pool_id,omitempty"` + Description *string `json:"description,omitempty"` + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + ConnLimit *int `json:"connection_limit,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + } + + type request struct { + VirtualIP vip `json:"vip"` + } + + reqBody := request{VirtualIP: vip{ + Name: opts.Name, + PoolID: opts.PoolID, + Description: gophercloud.MaybeString(opts.Description), + ConnLimit: opts.ConnLimit, + AdminStateUp: opts.AdminStateUp, + }} + + if opts.Persistence != nil { + reqBody.VirtualIP.Persistence = opts.Persistence + } + + var res UpdateResult + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + return res +} + +// Delete will permanently delete a particular virtual IP based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go new file mode 100644 index 000000000..430f1a1ee --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go @@ -0,0 +1,336 @@ +package vips + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/lb/vips", rootURL(fake.ServiceClient())) + th.AssertEquals(t, th.Endpoint()+"v2.0/lb/vips/foo", resourceURL(fake.ServiceClient(), "foo")) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "vips":[ + { + "id": "db902c0c-d5ff-4753-b465-668ad9656918", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "web_vip", + "description": "lb config for the web tier", + "subnet_id": "96a4386a-f8c3-42ed-afce-d7954eee77b3", + "address" : "10.30.176.47", + "port_id" : "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + "protocol": "HTTP", + "protocol_port": 80, + "pool_id" : "cfc6589d-f949-4c66-99d2-c2da56ef3764", + "admin_state_up": true, + "status": "ACTIVE" + }, + { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "db_vip", + "description": "lb config for the db tier", + "subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "address" : "10.30.176.48", + "port_id" : "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + "protocol": "TCP", + "protocol_port": 3306, + "pool_id" : "41efe233-7591-43c5-9cf7-923964759f9e", + "session_persistence" : {"type" : "SOURCE_IP"}, + "connection_limit" : 2000, + "admin_state_up": true, + "status": "INACTIVE" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVIPs(page) + if err != nil { + t.Errorf("Failed to extract LBs: %v", err) + return false, err + } + + expected := []VirtualIP{ + VirtualIP{ + ID: "db902c0c-d5ff-4753-b465-668ad9656918", + TenantID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "web_vip", + Description: "lb config for the web tier", + SubnetID: "96a4386a-f8c3-42ed-afce-d7954eee77b3", + Address: "10.30.176.47", + PortID: "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + Protocol: "HTTP", + ProtocolPort: 80, + PoolID: "cfc6589d-f949-4c66-99d2-c2da56ef3764", + Persistence: SessionPersistence{}, + ConnLimit: 0, + AdminStateUp: true, + Status: "ACTIVE", + }, + VirtualIP{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + TenantID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "db_vip", + Description: "lb config for the db tier", + SubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + Address: "10.30.176.48", + PortID: "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + Protocol: "TCP", + ProtocolPort: 3306, + PoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + Persistence: SessionPersistence{Type: "SOURCE_IP"}, + ConnLimit: 2000, + AdminStateUp: true, + Status: "INACTIVE", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "vip": { + "protocol": "HTTP", + "name": "NewVip", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "protocol_port": 80, + "session_persistence": {"type": "SOURCE_IP"} + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "vip": { + "status": "PENDING_CREATE", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea", + "connection_limit": -1, + "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "address": "10.0.0.11", + "protocol_port": 80, + "port_id": "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5", + "id": "c987d2be-9a3c-4ac9-a046-e8716b1350e2", + "name": "NewVip" + } +} + `) + }) + + opts := CreateOpts{ + Protocol: "HTTP", + Name: "NewVip", + AdminStateUp: Up, + SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", + PoolID: "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + ProtocolPort: 80, + Persistence: &SessionPersistence{Type: "SOURCE_IP"}, + } + + r, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "PENDING_CREATE", r.Status) + th.AssertEquals(t, "HTTP", r.Protocol) + th.AssertEquals(t, "", r.Description) + th.AssertEquals(t, true, r.AdminStateUp) + th.AssertEquals(t, "8032909d-47a1-4715-90af-5153ffe39861", r.SubnetID) + th.AssertEquals(t, "83657cfcdfe44cd5920adaf26c48ceea", r.TenantID) + th.AssertEquals(t, -1, r.ConnLimit) + th.AssertEquals(t, "61b1f87a-7a21-4ad3-9dda-7f81d249944f", r.PoolID) + th.AssertEquals(t, "10.0.0.11", r.Address) + th.AssertEquals(t, 80, r.ProtocolPort) + th.AssertEquals(t, "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5", r.PortID) + th.AssertEquals(t, "c987d2be-9a3c-4ac9-a046-e8716b1350e2", r.ID) + th.AssertEquals(t, "NewVip", r.Name) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Name: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Name: "foo", SubnetID: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Name: "foo", SubnetID: "bar", Protocol: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Name: "foo", SubnetID: "bar", Protocol: "bar", ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "vip": { + "status": "ACTIVE", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea", + "connection_limit": 1000, + "pool_id": "72741b06-df4d-4715-b142-276b6bce75ab", + "session_persistence": { + "cookie_name": "MyAppCookie", + "type": "APP_COOKIE" + }, + "address": "10.0.0.10", + "protocol_port": 80, + "port_id": "b5a743d6-056b-468b-862d-fb13a9aa694e", + "id": "4ec89087-d057-4e2c-911f-60a3b47ee304", + "name": "my-vip" + } +} + `) + }) + + vip, err := Get(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "ACTIVE", vip.Status) + th.AssertEquals(t, "HTTP", vip.Protocol) + th.AssertEquals(t, "", vip.Description) + th.AssertEquals(t, true, vip.AdminStateUp) + th.AssertEquals(t, 1000, vip.ConnLimit) + th.AssertEquals(t, SessionPersistence{Type: "APP_COOKIE", CookieName: "MyAppCookie"}, vip.Persistence) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "vip": { + "connection_limit": 1000, + "session_persistence": {"type": "SOURCE_IP"} + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "vip": { + "status": "PENDING_UPDATE", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea", + "connection_limit": 1000, + "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "address": "10.0.0.11", + "protocol_port": 80, + "port_id": "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5", + "id": "c987d2be-9a3c-4ac9-a046-e8716b1350e2", + "name": "NewVip" + } +} + `) + }) + + i1000 := 1000 + options := UpdateOpts{ + ConnLimit: &i1000, + Persistence: &SessionPersistence{Type: "SOURCE_IP"}, + } + vip, err := Update(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "PENDING_UPDATE", vip.Status) + th.AssertEquals(t, 1000, vip.ConnLimit) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go new file mode 100644 index 000000000..e1092e780 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go @@ -0,0 +1,166 @@ +package vips + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same member of the pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same member of the pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same member of the pool. +type SessionPersistence struct { + // The type of persistence mode + Type string `mapstructure:"type" json:"type"` + + // Name of cookie if persistence mode is set appropriately + CookieName string `mapstructure:"cookie_name" json:"cookie_name,omitempty"` +} + +// VirtualIP is the primary load balancing configuration object that specifies +// the virtual IP address and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +// This entity is sometimes known in LB products under the name of a "virtual +// server", a "vserver" or a "listener". +type VirtualIP struct { + // The unique ID for the VIP. + ID string `mapstructure:"id" json:"id"` + + // Owner of the VIP. Only an admin user can specify a tenant ID other than its own. + TenantID string `mapstructure:"tenant_id" json:"tenant_id"` + + // Human-readable name for the VIP. Does not have to be unique. + Name string `mapstructure:"name" json:"name"` + + // Human-readable description for the VIP. + Description string `mapstructure:"description" json:"description"` + + // The ID of the subnet on which to allocate the VIP address. + SubnetID string `mapstructure:"subnet_id" json:"subnet_id"` + + // The IP address of the VIP. + Address string `mapstructure:"address" json:"address"` + + // The protocol of the VIP address. A valid value is TCP, HTTP, or HTTPS. + Protocol string `mapstructure:"protocol" json:"protocol"` + + // The port on which to listen to client traffic that is associated with the + // VIP address. A valid value is from 0 to 65535. + ProtocolPort int `mapstructure:"protocol_port" json:"protocol_port"` + + // The ID of the pool with which the VIP is associated. + PoolID string `mapstructure:"pool_id" json:"pool_id"` + + // The ID of the port which belongs to the load balancer + PortID string `mapstructure:"port_id" json:"port_id"` + + // Indicates whether connections in the same session will be processed by the + // same pool member or not. + Persistence SessionPersistence `mapstructure:"session_persistence" json:"session_persistence"` + + // The maximum number of connections allowed for the VIP. Default is -1, + // meaning no limit. + ConnLimit int `mapstructure:"connection_limit" json:"connection_limit"` + + // The administrative state of the VIP. A valid value is true (UP) or false (DOWN). + AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"` + + // The status of the VIP. Indicates whether the VIP is operational. + Status string `mapstructure:"status" json:"status"` +} + +// VIPPage is the page returned by a pager when traversing over a +// collection of routers. +type VIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p VIPPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"vips_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a RouterPage struct is empty. +func (p VIPPage) IsEmpty() (bool, error) { + is, err := ExtractVIPs(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractVIPs accepts a Page struct, specifically a VIPPage struct, +// and extracts the elements into a slice of VirtualIP structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractVIPs(page pagination.Page) ([]VirtualIP, error) { + var resp struct { + VIPs []VirtualIP `mapstructure:"vips" json:"vips"` + } + + err := mapstructure.Decode(page.(VIPPage).Body, &resp) + + return resp.VIPs, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*VirtualIP, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + VirtualIP *VirtualIP `mapstructure:"vip" json:"vip"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.VirtualIP, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go new file mode 100644 index 000000000..2b6f67e71 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go @@ -0,0 +1,16 @@ +package vips + +import "github.com/rackspace/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "vips" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go new file mode 100644 index 000000000..373da44f8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go @@ -0,0 +1,21 @@ +// Package provider gives access to the provider Neutron plugin, allowing +// network extended attributes. The provider extended attributes for networks +// enable administrative users to specify how network objects map to the +// underlying networking infrastructure. These extended attributes also appear +// when administrative users query networks. +// +// For more information about extended attributes, see the NetworkExtAttrs +// struct. The actual semantics of these attributes depend on the technology +// back end of the particular plug-in. See the plug-in documentation and the +// OpenStack Cloud Administrator Guide to understand which values should be +// specific for each of these attributes when OpenStack Networking is deployed +// with a particular plug-in. The examples shown in this chapter refer to the +// Open vSwitch plug-in. +// +// The default policy settings enable only users with administrative rights to +// specify these parameters in requests and to see their values in responses. By +// default, the provider network extension attributes are completely hidden from +// regular tenants. As a rule of thumb, if these attributes are not visible in a +// GET /networks/ operation, this implies the user submitting the +// request is not authorized to view or manipulate provider network attributes. +package provider diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go new file mode 100644 index 000000000..f07d6285d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go @@ -0,0 +1,124 @@ +package provider + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" +) + +// AdminState gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Up` and `Down` enums. +type AdminState *bool + +// Convenience vars for AdminStateUp values. +var ( + iTrue = true + iFalse = false + + Up AdminState = &iTrue + Down AdminState = &iFalse +) + +// NetworkExtAttrs represents an extended form of a Network with additional fields. +type NetworkExtAttrs struct { + // UUID for the network + ID string `mapstructure:"id" json:"id"` + + // Human-readable name for the network. Might not be unique. + Name string `mapstructure:"name" json:"name"` + + // The administrative state of network. If false (down), the network does not forward packets. + AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values. + Status string `mapstructure:"status" json:"status"` + + // Subnets associated with this network. + Subnets []string `mapstructure:"subnets" json:"subnets"` + + // Owner of network. Only admin users can specify a tenant_id other than its own. + TenantID string `mapstructure:"tenant_id" json:"tenant_id"` + + // Specifies whether the network resource can be accessed by any tenant or not. + Shared bool `mapstructure:"shared" json:"shared"` + + // Specifies the nature of the physical network mapped to this network + // resource. Examples are flat, vlan, or gre. + NetworkType string `json:"provider:network_type" mapstructure:"provider:network_type"` + + // Identifies the physical network on top of which this network object is + // being implemented. The OpenStack Networking API does not expose any facility + // for retrieving the list of available physical networks. As an example, in + // the Open vSwitch plug-in this is a symbolic name which is then mapped to + // specific bridges on each compute host through the Open vSwitch plug-in + // configuration file. + PhysicalNetwork string `json:"provider:physical_network" mapstructure:"provider:physical_network"` + + // Identifies an isolated segment on the physical network; the nature of the + // segment depends on the segmentation model defined by network_type. For + // instance, if network_type is vlan, then this is a vlan identifier; + // otherwise, if network_type is gre, then this will be a gre key. + SegmentationID string `json:"provider:segmentation_id" mapstructure:"provider:segmentation_id"` +} + +// ExtractGet decorates a GetResult struct returned from a networks.Get() +// function with extended attributes. +func ExtractGet(r networks.GetResult) (*NetworkExtAttrs, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *NetworkExtAttrs `json:"network"` + } + + err := mapstructure.WeakDecode(r.Body, &res) + + return res.Network, err +} + +// ExtractCreate decorates a CreateResult struct returned from a networks.Create() +// function with extended attributes. +func ExtractCreate(r networks.CreateResult) (*NetworkExtAttrs, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *NetworkExtAttrs `json:"network"` + } + + err := mapstructure.WeakDecode(r.Body, &res) + + return res.Network, err +} + +// ExtractUpdate decorates a UpdateResult struct returned from a +// networks.Update() function with extended attributes. +func ExtractUpdate(r networks.UpdateResult) (*NetworkExtAttrs, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *NetworkExtAttrs `json:"network"` + } + + err := mapstructure.WeakDecode(r.Body, &res) + + return res.Network, err +} + +// ExtractList accepts a Page struct, specifically a NetworkPage struct, and +// extracts the elements into a slice of NetworkExtAttrs structs. In other +// words, a generic collection is mapped into a relevant slice. +func ExtractList(page pagination.Page) ([]NetworkExtAttrs, error) { + var resp struct { + Networks []NetworkExtAttrs `mapstructure:"networks" json:"networks"` + } + + err := mapstructure.WeakDecode(page.(networks.NetworkPage).Body, &resp) + + return resp.Networks, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go new file mode 100644 index 000000000..80816926d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go @@ -0,0 +1,253 @@ +package provider + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "networks": [ + { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "provider:segmentation_id": null, + "provider:physical_network": null, + "provider:network_type": "local" + }, + { + "status": "ACTIVE", + "subnets": [ + "08eae331-0402-425a-923c-34f7cfe39c1b" + ], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": true, + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "provider:segmentation_id": 1234567890, + "provider:physical_network": null, + "provider:network_type": "local" + } + ] +} + `) + }) + + count := 0 + + networks.List(fake.ServiceClient(), networks.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractList(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + expected := []NetworkExtAttrs{ + NetworkExtAttrs{ + Status: "ACTIVE", + Subnets: []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"}, + Name: "private-network", + AdminStateUp: true, + TenantID: "4fd44f30292945e481c7b8a0c8908869", + Shared: true, + ID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + NetworkType: "local", + PhysicalNetwork: "", + SegmentationID: "", + }, + NetworkExtAttrs{ + Status: "ACTIVE", + Subnets: []string{"08eae331-0402-425a-923c-34f7cfe39c1b"}, + Name: "private", + AdminStateUp: true, + TenantID: "26a7980765d0414dbc1fc1f88cdb7e6e", + Shared: true, + ID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + NetworkType: "local", + PhysicalNetwork: "", + SegmentationID: "1234567890", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "provider:physical_network": null, + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "provider:network_type": "local", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "provider:segmentation_id": null + } +} + `) + }) + + res := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + n, err := ExtractGet(res) + + th.AssertNoErr(t, err) + + th.AssertEquals(t, "", n.PhysicalNetwork) + th.AssertEquals(t, "local", n.NetworkType) + th.AssertEquals(t, "", n.SegmentationID) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "sample_network", + "admin_state_up": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "provider:physical_network": null, + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "provider:network_type": "local", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "provider:segmentation_id": null + } +} + `) + }) + + options := networks.CreateOpts{Name: "sample_network", AdminStateUp: Up} + res := networks.Create(fake.ServiceClient(), options) + n, err := ExtractCreate(res) + + th.AssertNoErr(t, err) + + th.AssertEquals(t, "", n.PhysicalNetwork) + th.AssertEquals(t, "local", n.NetworkType) + th.AssertEquals(t, "", n.SegmentationID) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "new_network_name", + "admin_state_up": false, + "shared": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "provider:physical_network": null, + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "provider:network_type": "local", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "provider:segmentation_id": null + } +} + `) + }) + + iTrue := true + options := networks.UpdateOpts{Name: "new_network_name", AdminStateUp: Down, Shared: &iTrue} + res := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options) + n, err := ExtractUpdate(res) + + th.AssertNoErr(t, err) + + th.AssertEquals(t, "", n.PhysicalNetwork) + th.AssertEquals(t, "local", n.NetworkType) + th.AssertEquals(t, "", n.SegmentationID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go new file mode 100644 index 000000000..31f744ccd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go @@ -0,0 +1,32 @@ +// Package security contains functionality to work with security group and +// security group rules Neutron resources. +// +// Security groups and security group rules allows administrators and tenants +// the ability to specify the type of traffic and direction (ingress/egress) +// that is allowed to pass through a port. A security group is a container for +// security group rules. +// +// When a port is created in Networking it is associated with a security group. +// If a security group is not specified the port is associated with a 'default' +// security group. By default, this group drops all ingress traffic and allows +// all egress. Rules can be added to this group in order to change the behaviour. +// +// The basic characteristics of Neutron Security Groups are: +// +// For ingress traffic (to an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all traffic is dropped. +// +// For egress traffic (from an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all egress traffic are dropped. +// - When a new security group is created, rules to allow all egress traffic +// is automatically added. +// +// "default security group" is defined for each tenant. +// - For the default security group a rule which allows intercommunication +// among hosts associated with the default security group is defined by default. +// - As a result, all egress traffic and intercommunication in the default +// group are allowed and all ingress from outside of the default group is +// dropped by default (in the default security group). +package security diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go new file mode 100644 index 000000000..2712ac162 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go @@ -0,0 +1,131 @@ +package groups + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security groups. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +var ( + errNameRequired = fmt.Errorf("Name is required") +) + +// CreateOpts contains all the values needed to create a new security group. +type CreateOpts struct { + // Required. Human-readable name for the VIP. Does not have to be unique. + Name string + + // Required for admins. Indicates the owner of the VIP. + TenantID string + + // Optional. Describes the security group. + Description string +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + var res CreateResult + + // Validate required opts + if opts.Name == "" { + res.Err = errNameRequired + return res + } + + type secgroup struct { + Name string `json:"name"` + TenantID string `json:"tenant_id,omitempty"` + Description string `json:"description,omitempty"` + } + + type request struct { + SecGroup secgroup `json:"security_group"` + } + + reqBody := request{SecGroup: secgroup{ + Name: opts.Name, + TenantID: opts.TenantID, + Description: opts.Description, + }} + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// Delete will permanently delete a particular security group based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} + +// IDFromName is a convenience function that returns a security group's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + securityGroupCount := 0 + securityGroupID := "" + if name == "" { + return "", fmt.Errorf("A security group name must be provided.") + } + pager := List(client, ListOpts{}) + pager.EachPage(func(page pagination.Page) (bool, error) { + securityGroupList, err := ExtractGroups(page) + if err != nil { + return false, err + } + + for _, s := range securityGroupList { + if s.Name == name { + securityGroupCount++ + securityGroupID = s.ID + } + } + return true, nil + }) + + switch securityGroupCount { + case 0: + return "", fmt.Errorf("Unable to find security group: %s", name) + case 1: + return securityGroupID, nil + default: + return "", fmt.Errorf("Found %d security groups matching %s", securityGroupCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go new file mode 100644 index 000000000..5f074c72f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go @@ -0,0 +1,213 @@ +package groups + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/security-groups", rootURL(fake.ServiceClient())) + th.AssertEquals(t, th.Endpoint()+"v2.0/security-groups/foo", resourceURL(fake.ServiceClient(), "foo")) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_groups": [ + { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractGroups(page) + if err != nil { + t.Errorf("Failed to extract secgroups: %v", err) + return false, err + } + + expected := []SecGroup{ + SecGroup{ + Description: "default", + ID: "85cc3048-abc3-43cc-89b3-377341426ac5", + Name: "default", + Rules: []rules.SecGroupRule{}, + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "security_group": { + "name": "new-webservers", + "description": "security group for webservers" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "security group for webservers", + "id": "2076db17-a522-4506-91de-c6dd8e837028", + "name": "new-webservers", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv4", + "id": "38ce2d8e-e8f1-48bd-83c2-d33cb9f50c3d", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv6", + "id": "565b9502-12de-4ffd-91e9-68885cff6ae1", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} + `) + }) + + opts := CreateOpts{Name: "new-webservers", Description: "security group for webservers"} + _, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/85cc3048-abc3-43cc-89b3-377341426ac5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} + `) + }) + + sg, err := Get(fake.ServiceClient(), "85cc3048-abc3-43cc-89b3-377341426ac5").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "default", sg.Description) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sg.ID) + th.AssertEquals(t, "default", sg.Name) + th.AssertEquals(t, 2, len(sg.Rules)) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sg.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go new file mode 100644 index 000000000..49db261c2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go @@ -0,0 +1,108 @@ +package groups + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" +) + +// SecGroup represents a container for security group rules. +type SecGroup struct { + // The UUID for the security group. + ID string + + // Human-readable name for the security group. Might not be unique. Cannot be + // named "default" as that is automatically created for a tenant. + Name string + + // The security group description. + Description string + + // A slice of security group rules that dictate the permitted behaviour for + // traffic entering and leaving the group. + Rules []rules.SecGroupRule `json:"security_group_rules" mapstructure:"security_group_rules"` + + // Owner of the security group. Only admin users can specify a TenantID + // other than their own. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` +} + +// SecGroupPage is the page returned by a pager when traversing over a +// collection of security groups. +type SecGroupPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security groups has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (p SecGroupPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"security_groups_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a SecGroupPage struct is empty. +func (p SecGroupPage) IsEmpty() (bool, error) { + is, err := ExtractGroups(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractGroups accepts a Page struct, specifically a SecGroupPage struct, +// and extracts the elements into a slice of SecGroup structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractGroups(page pagination.Page) ([]SecGroup, error) { + var resp struct { + SecGroups []SecGroup `mapstructure:"security_groups" json:"security_groups"` + } + + err := mapstructure.Decode(page.(SecGroupPage).Body, &resp) + + return resp.SecGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security group. +func (r commonResult) Extract() (*SecGroup, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + SecGroup *SecGroup `mapstructure:"security_group" json:"security_group"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.SecGroup, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go new file mode 100644 index 000000000..84f7324f0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go @@ -0,0 +1,13 @@ +package groups + +import "github.com/rackspace/gophercloud" + +const rootPath = "security-groups" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go new file mode 100644 index 000000000..e06934a09 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go @@ -0,0 +1,174 @@ +package rules + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the security group attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Direction string `q:"direction"` + EtherType string `q:"ethertype"` + ID string `q:"id"` + PortRangeMax int `q:"port_range_max"` + PortRangeMin int `q:"port_range_min"` + Protocol string `q:"protocol"` + RemoteGroupID string `q:"remote_group_id"` + RemoteIPPrefix string `q:"remote_ip_prefix"` + SecGroupID string `q:"security_group_id"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security group rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Errors +var ( + errValidDirectionRequired = fmt.Errorf("A valid Direction is required") + errValidEtherTypeRequired = fmt.Errorf("A valid EtherType is required") + errSecGroupIDRequired = fmt.Errorf("A valid SecGroupID is required") + errValidProtocolRequired = fmt.Errorf("A valid Protocol is required") +) + +// Constants useful for CreateOpts +const ( + DirIngress = "ingress" + DirEgress = "egress" + Ether4 = "IPv4" + Ether6 = "IPv6" + ProtocolTCP = "tcp" + ProtocolUDP = "udp" + ProtocolICMP = "icmp" +) + +// CreateOpts contains all the values needed to create a new security group rule. +type CreateOpts struct { + // Required. Must be either "ingress" or "egress": the direction in which the + // security group rule is applied. + Direction string + + // Required. Must be "IPv4" or "IPv6", and addresses represented in CIDR must + // match the ingress or egress rules. + EtherType string + + // Required. The security group ID to associate with this security group rule. + SecGroupID string + + // Optional. The maximum port number in the range that is matched by the + // security group rule. The PortRangeMin attribute constrains the PortRangeMax + // attribute. If the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int + + // Optional. The minimum port number in the range that is matched by the + // security group rule. If the protocol is TCP or UDP, this value must be + // less than or equal to the value of the PortRangeMax attribute. If the + // protocol is ICMP, this value must be an ICMP type. + PortRangeMin int + + // Optional. The protocol that is matched by the security group rule. Valid + // values are "tcp", "udp", "icmp" or an empty string. + Protocol string + + // Optional. The remote group ID to be associated with this security group + // rule. You can specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string + + // Optional. The remote IP prefix to be associated with this security group + // rule. You can specify either RemoteGroupID or RemoteIPPrefix. This + // attribute matches the specified IP prefix as the source IP address of the + // IP packet. + RemoteIPPrefix string + + // Required for admins. Indicates the owner of the VIP. + TenantID string +} + +// Create is an operation which adds a new security group rule and associates it +// with an existing security group (whose ID is specified in CreateOpts). +func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { + var res CreateResult + + // Validate required opts + if opts.Direction != DirIngress && opts.Direction != DirEgress { + res.Err = errValidDirectionRequired + return res + } + if opts.EtherType != Ether4 && opts.EtherType != Ether6 { + res.Err = errValidEtherTypeRequired + return res + } + if opts.SecGroupID == "" { + res.Err = errSecGroupIDRequired + return res + } + if opts.Protocol != "" && opts.Protocol != ProtocolTCP && opts.Protocol != ProtocolUDP && opts.Protocol != ProtocolICMP { + res.Err = errValidProtocolRequired + return res + } + + type secrule struct { + Direction string `json:"direction"` + EtherType string `json:"ethertype"` + SecGroupID string `json:"security_group_id"` + PortRangeMax int `json:"port_range_max,omitempty"` + PortRangeMin int `json:"port_range_min,omitempty"` + Protocol string `json:"protocol,omitempty"` + RemoteGroupID string `json:"remote_group_id,omitempty"` + RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + } + + type request struct { + SecRule secrule `json:"security_group_rule"` + } + + reqBody := request{SecRule: secrule{ + Direction: opts.Direction, + EtherType: opts.EtherType, + SecGroupID: opts.SecGroupID, + PortRangeMax: opts.PortRangeMax, + PortRangeMin: opts.PortRangeMin, + Protocol: opts.Protocol, + RemoteGroupID: opts.RemoteGroupID, + RemoteIPPrefix: opts.RemoteIPPrefix, + TenantID: opts.TenantID, + }} + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get retrieves a particular security group rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) + return res +} + +// Delete will permanently delete a particular security group rule based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go new file mode 100644 index 000000000..b5afef31e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go @@ -0,0 +1,243 @@ +package rules + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.AssertEquals(t, th.Endpoint()+"v2.0/security-group-rules", rootURL(fake.ServiceClient())) + th.AssertEquals(t, th.Endpoint()+"v2.0/security-group-rules/foo", resourceURL(fake.ServiceClient(), "foo")) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract secrules: %v", err) + return false, err + } + + expected := []SecGroupRule{ + SecGroupRule{ + Direction: "egress", + EtherType: "IPv6", + ID: "3c0e45ff-adaf-4124-b083-bf390e5482ff", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + SecGroupRule{ + Direction: "egress", + EtherType: "IPv4", + ID: "93aa42e5-80db-4581-9391-3a608bd0e448", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "security_group_rule": { + "direction": "ingress", + "port_range_min": 80, + "ethertype": "IPv4", + "port_range_max": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "security_group_rule": { + "direction": "ingress", + "ethertype": "IPv4", + "id": "2bc0accf-312e-429a-956e-e4407625eb62", + "port_range_max": 80, + "port_range_min": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "remote_ip_prefix": null, + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} + `) + }) + + opts := CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: "IPv4", + PortRangeMax: 80, + Protocol: "tcp", + RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", + } + _, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), CreateOpts{Direction: "something"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Direction: DirIngress, EtherType: "something"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Direction: DirIngress, EtherType: Ether4}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), CreateOpts{Direction: DirIngress, EtherType: Ether4, SecGroupID: "something", Protocol: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/3c0e45ff-adaf-4124-b083-bf390e5482ff", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_rule": { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} + `) + }) + + sr, err := Get(fake.ServiceClient(), "3c0e45ff-adaf-4124-b083-bf390e5482ff").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "egress", sr.Direction) + th.AssertEquals(t, "IPv6", sr.EtherType) + th.AssertEquals(t, "3c0e45ff-adaf-4124-b083-bf390e5482ff", sr.ID) + th.AssertEquals(t, 0, sr.PortRangeMax) + th.AssertEquals(t, 0, sr.PortRangeMin) + th.AssertEquals(t, "", sr.Protocol) + th.AssertEquals(t, "", sr.RemoteGroupID) + th.AssertEquals(t, "", sr.RemoteIPPrefix) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sr.SecGroupID) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sr.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go new file mode 100644 index 000000000..6e1385768 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go @@ -0,0 +1,133 @@ +package rules + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// SecGroupRule represents a rule to dictate the behaviour of incoming or +// outgoing traffic for a particular security group. +type SecGroupRule struct { + // The UUID for this security group rule. + ID string + + // The direction in which the security group rule is applied. The only values + // allowed are "ingress" or "egress". For a compute instance, an ingress + // security group rule is applied to incoming (ingress) traffic for that + // instance. An egress rule is applied to traffic leaving the instance. + Direction string + + // Must be IPv4 or IPv6, and addresses represented in CIDR must match the + // ingress or egress rules. + EtherType string `json:"ethertype" mapstructure:"ethertype"` + + // The security group ID to associate with this security group rule. + SecGroupID string `json:"security_group_id" mapstructure:"security_group_id"` + + // The minimum port number in the range that is matched by the security group + // rule. If the protocol is TCP or UDP, this value must be less than or equal + // to the value of the PortRangeMax attribute. If the protocol is ICMP, this + // value must be an ICMP type. + PortRangeMin int `json:"port_range_min" mapstructure:"port_range_min"` + + // The maximum port number in the range that is matched by the security group + // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If + // the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int `json:"port_range_max" mapstructure:"port_range_max"` + + // The protocol that is matched by the security group rule. Valid values are + // "tcp", "udp", "icmp" or an empty string. + Protocol string + + // The remote group ID to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string `json:"remote_group_id" mapstructure:"remote_group_id"` + + // The remote IP prefix to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix . This attribute + // matches the specified IP prefix as the source IP address of the IP packet. + RemoteIPPrefix string `json:"remote_ip_prefix" mapstructure:"remote_ip_prefix"` + + // The owner of this security group rule. + TenantID string `json:"tenant_id" mapstructure:"tenant_id"` +} + +// SecGroupRulePage is the page returned by a pager when traversing over a +// collection of security group rules. +type SecGroupRulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security group rules has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (p SecGroupRulePage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"security_group_rules_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a SecGroupRulePage struct is empty. +func (p SecGroupRulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractRules accepts a Page struct, specifically a SecGroupRulePage struct, +// and extracts the elements into a slice of SecGroupRule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(page pagination.Page) ([]SecGroupRule, error) { + var resp struct { + SecGroupRules []SecGroupRule `mapstructure:"security_group_rules" json:"security_group_rules"` + } + + err := mapstructure.Decode(page.(SecGroupRulePage).Body, &resp) + + return resp.SecGroupRules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security rule. +func (r commonResult) Extract() (*SecGroupRule, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + SecGroupRule *SecGroupRule `mapstructure:"security_group_rule" json:"security_group_rule"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.SecGroupRule, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go new file mode 100644 index 000000000..8e2b2bb28 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go @@ -0,0 +1,13 @@ +package rules + +import "github.com/rackspace/gophercloud" + +const rootPath = "security-group-rules" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go new file mode 100644 index 000000000..c87a7ce27 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go @@ -0,0 +1,9 @@ +// Package networks contains functionality for working with Neutron network +// resources. A network is an isolated virtual layer-2 broadcast domain that is +// typically reserved for the tenant who created it (unless you configure the +// network to be shared). Tenants can create multiple networks until the +// thresholds per-tenant quota is reached. +// +// In the v2.0 Networking API, the network is the main entity. Ports and subnets +// are always associated with a network. +package networks diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go new file mode 100644 index 000000000..83c4a6a86 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go @@ -0,0 +1 @@ +package networks diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go new file mode 100644 index 000000000..25ab7a8b6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go @@ -0,0 +1,226 @@ +package networks + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// AdminState gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Up` and `Down` enums. +type AdminState *bool + +// Convenience vars for AdminStateUp values. +var ( + iTrue = true + iFalse = false + + Up AdminState = &iTrue + Down AdminState = &iFalse +) + +type networkOpts struct { + AdminStateUp *bool + Name string + Shared *bool + TenantID string +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToNetworkListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the network attributes you want to see returned. SortKey allows you to sort +// by a particular network attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + Shared *bool `q:"shared"` + ID string `q:"id"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToNetworkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific network based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts networkOpts + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + n := make(map[string]interface{}) + + if opts.AdminStateUp != nil { + n["admin_state_up"] = &opts.AdminStateUp + } + if opts.Name != "" { + n["name"] = opts.Name + } + if opts.Shared != nil { + n["shared"] = &opts.Shared + } + if opts.TenantID != "" { + n["tenant_id"] = opts.TenantID + } + + return map[string]interface{}{"network": n}, nil +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. This operation does not actually require a request body, i.e. the +// CreateOpts struct argument can be empty. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// network. An admin user, however, has the option of specifying another tenant +// ID in the CreateOpts struct. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToNetworkCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) + return res +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type UpdateOptsBuilder interface { + ToNetworkUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts networkOpts + +// ToNetworkUpdateMap casts a UpdateOpts struct to a map. +func (opts UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) { + n := make(map[string]interface{}) + + if opts.AdminStateUp != nil { + n["admin_state_up"] = &opts.AdminStateUp + } + if opts.Name != "" { + n["name"] = opts.Name + } + if opts.Shared != nil { + n["shared"] = &opts.Shared + } + + return map[string]interface{}{"network": n}, nil +} + +// Update accepts a UpdateOpts struct and updates an existing network using the +// values provided. For more information, see the Create function. +func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToNetworkUpdateMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + _, res.Err = c.Put(updateURL(c, networkID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + + return res +} + +// Delete accepts a unique ID and deletes the network associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, networkID), nil) + return res +} + +// IDFromName is a convenience function that returns a network's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + networkCount := 0 + networkID := "" + if name == "" { + return "", fmt.Errorf("A network name must be provided.") + } + pager := List(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + networkList, err := ExtractNetworks(page) + if err != nil { + return false, err + } + + for _, n := range networkList { + if n.Name == name { + networkCount++ + networkID = n.ID + } + } + return true, nil + }) + + switch networkCount { + case 0: + return "", fmt.Errorf("Unable to find network: %s", name) + case 1: + return networkID, nil + default: + return "", fmt.Errorf("Found %d networks matching %s", networkCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go new file mode 100644 index 000000000..81eb79cb5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go @@ -0,0 +1,276 @@ +package networks + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "networks": [ + { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + }, + { + "status": "ACTIVE", + "subnets": [ + "08eae331-0402-425a-923c-34f7cfe39c1b" + ], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": true, + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324" + } + ] +} + `) + }) + + client := fake.ServiceClient() + count := 0 + + List(client, ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNetworks(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + expected := []Network{ + Network{ + Status: "ACTIVE", + Subnets: []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"}, + Name: "private-network", + AdminStateUp: true, + TenantID: "4fd44f30292945e481c7b8a0c8908869", + Shared: true, + ID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + }, + Network{ + Status: "ACTIVE", + Subnets: []string{"08eae331-0402-425a-923c-34f7cfe39c1b"}, + Name: "private", + AdminStateUp: true, + TenantID: "26a7980765d0414dbc1fc1f88cdb7e6e", + Shared: true, + ID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) + + n, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.Subnets, []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"}) + th.AssertEquals(t, n.Name, "private-network") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertEquals(t, n.Shared, true) + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "sample_network", + "admin_state_up": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [], + "name": "net1", + "admin_state_up": true, + "tenant_id": "9bacb3c5d39d41a79512987f338cf177", + "shared": false, + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + } +} + `) + }) + + iTrue := true + options := CreateOpts{Name: "sample_network", AdminStateUp: &iTrue} + n, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.Subnets, []string{}) + th.AssertEquals(t, n.Name, "net1") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.TenantID, "9bacb3c5d39d41a79512987f338cf177") + th.AssertEquals(t, n.Shared, false) + th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c") +} + +func TestCreateWithOptionalFields(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "sample_network", + "admin_state_up": true, + "shared": true, + "tenant_id": "12345" + } +} + `) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{}`) + }) + + iTrue := true + options := CreateOpts{Name: "sample_network", AdminStateUp: &iTrue, Shared: &iTrue, TenantID: "12345"} + _, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "new_network_name", + "admin_state_up": false, + "shared": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [], + "name": "new_network_name", + "admin_state_up": false, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + } +} + `) + }) + + iTrue, iFalse := true, false + options := UpdateOpts{Name: "new_network_name", AdminStateUp: &iFalse, Shared: &iTrue} + n, err := Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "new_network_name") + th.AssertEquals(t, n.AdminStateUp, false) + th.AssertEquals(t, n.Shared, true) + th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go new file mode 100644 index 000000000..3ecedde9a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go @@ -0,0 +1,116 @@ +package networks + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a network resource. +func (r commonResult) Extract() (*Network, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *Network `json:"network"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Network, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Network represents, well, a network. +type Network struct { + // UUID for the network + ID string `mapstructure:"id" json:"id"` + + // Human-readable name for the network. Might not be unique. + Name string `mapstructure:"name" json:"name"` + + // The administrative state of network. If false (down), the network does not forward packets. + AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values. + Status string `mapstructure:"status" json:"status"` + + // Subnets associated with this network. + Subnets []string `mapstructure:"subnets" json:"subnets"` + + // Owner of network. Only admin users can specify a tenant_id other than its own. + TenantID string `mapstructure:"tenant_id" json:"tenant_id"` + + // Specifies whether the network resource can be accessed by any tenant or not. + Shared bool `mapstructure:"shared" json:"shared"` +} + +// NetworkPage is the page returned by a pager when traversing over a +// collection of networks. +type NetworkPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of networks has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p NetworkPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"networks_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a NetworkPage struct is empty. +func (p NetworkPage) IsEmpty() (bool, error) { + is, err := ExtractNetworks(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct, +// and extracts the elements into a slice of Network structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractNetworks(page pagination.Page) ([]Network, error) { + var resp struct { + Networks []Network `mapstructure:"networks" json:"networks"` + } + + err := mapstructure.Decode(page.(NetworkPage).Body, &resp) + + return resp.Networks, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go new file mode 100644 index 000000000..a9eecc529 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go @@ -0,0 +1,31 @@ +package networks + +import "github.com/rackspace/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("networks", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("networks") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go new file mode 100644 index 000000000..caf77dbe0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go @@ -0,0 +1,38 @@ +package networks + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint, ResourceBase: endpoint + "v2.0/"} +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "v2.0/networks/foo" + th.AssertEquals(t, expected, actual) +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "v2.0/networks" + th.AssertEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "v2.0/networks" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "v2.0/networks/foo" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go new file mode 100644 index 000000000..f16a4bb01 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go @@ -0,0 +1,8 @@ +// Package ports contains functionality for working with Neutron port resources. +// A port represents a virtual switch port on a logical network switch. Virtual +// instances attach their interfaces into ports. The logical port also defines +// the MAC address and the IP address(es) to be assigned to the interfaces +// plugged into them. When IP addresses are associated to a port, this also +// implies the port is associated with a subnet, as the IP address was taken +// from the allocation pool for a specific subnet. +package ports diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go new file mode 100644 index 000000000..111d977e7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go @@ -0,0 +1,11 @@ +package ports + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errNetworkIDRequired = err("A Network ID is required") +) diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go new file mode 100644 index 000000000..2caf1cace --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go @@ -0,0 +1,260 @@ +package ports + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// AdminState gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Up` and `Down` enums. +type AdminState *bool + +// Convenience vars for AdminStateUp values. +var ( + iTrue = true + iFalse = false + + Up AdminState = &iTrue + Down AdminState = &iFalse +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPortListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the port attributes you want to see returned. SortKey allows you to sort +// by a particular port attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + DeviceOwner string `q:"device_owner"` + MACAddress string `q:"mac_address"` + ID string `q:"id"` + DeviceID string `q:"device_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPortListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPortListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// ports. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those ports that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToPortListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PortPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific port based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToPortCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new port. +type CreateOpts struct { + NetworkID string + Name string + AdminStateUp *bool + MACAddress string + FixedIPs interface{} + DeviceID string + DeviceOwner string + TenantID string + SecurityGroups []string +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToPortCreateMap() (map[string]interface{}, error) { + p := make(map[string]interface{}) + + if opts.NetworkID == "" { + return nil, errNetworkIDRequired + } + p["network_id"] = opts.NetworkID + + if opts.DeviceID != "" { + p["device_id"] = opts.DeviceID + } + if opts.DeviceOwner != "" { + p["device_owner"] = opts.DeviceOwner + } + if opts.FixedIPs != nil { + p["fixed_ips"] = opts.FixedIPs + } + if opts.SecurityGroups != nil { + p["security_groups"] = opts.SecurityGroups + } + if opts.TenantID != "" { + p["tenant_id"] = opts.TenantID + } + if opts.AdminStateUp != nil { + p["admin_state_up"] = &opts.AdminStateUp + } + if opts.Name != "" { + p["name"] = opts.Name + } + if opts.MACAddress != "" { + p["mac_address"] = opts.MACAddress + } + + return map[string]interface{}{"port": p}, nil +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. You must remember to provide a NetworkID value. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToPortCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) + return res +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type UpdateOptsBuilder interface { + ToPortUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing port. +type UpdateOpts struct { + Name string + AdminStateUp *bool + FixedIPs interface{} + DeviceID string + DeviceOwner string + SecurityGroups []string +} + +// ToPortUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToPortUpdateMap() (map[string]interface{}, error) { + p := make(map[string]interface{}) + + if opts.DeviceID != "" { + p["device_id"] = opts.DeviceID + } + if opts.DeviceOwner != "" { + p["device_owner"] = opts.DeviceOwner + } + if opts.FixedIPs != nil { + p["fixed_ips"] = opts.FixedIPs + } + if opts.SecurityGroups != nil { + p["security_groups"] = opts.SecurityGroups + } + if opts.AdminStateUp != nil { + p["admin_state_up"] = &opts.AdminStateUp + } + if opts.Name != "" { + p["name"] = opts.Name + } + + return map[string]interface{}{"port": p}, nil +} + +// Update accepts a UpdateOpts struct and updates an existing port using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToPortUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(updateURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return res +} + +// Delete accepts a unique ID and deletes the port associated with it. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, id), nil) + return res +} + +// IDFromName is a convenience function that returns a port's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + portCount := 0 + portID := "" + if name == "" { + return "", fmt.Errorf("A port name must be provided.") + } + pager := List(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + portList, err := ExtractPorts(page) + if err != nil { + return false, err + } + + for _, p := range portList { + if p.Name == name { + portCount++ + portID = p.ID + } + } + return true, nil + }) + + switch portCount { + case 0: + return "", fmt.Errorf("Unable to find port: %s", name) + case 1: + return portID, nil + default: + return "", fmt.Errorf("Found %d ports matching %s", portCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go new file mode 100644 index 000000000..9e323efa3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go @@ -0,0 +1,321 @@ +package ports + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "ports": [ + { + "status": "ACTIVE", + "binding:host_id": "devstack", + "name": "", + "admin_state_up": true, + "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3", + "tenant_id": "", + "device_owner": "network:router_gateway", + "mac_address": "fa:16:3e:58:42:ed", + "fixed_ips": [ + { + "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062", + "ip_address": "172.24.4.2" + } + ], + "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b", + "security_groups": [], + "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPorts(page) + if err != nil { + t.Errorf("Failed to extract subnets: %v", err) + return false, nil + } + + expected := []Port{ + Port{ + Status: "ACTIVE", + Name: "", + AdminStateUp: true, + NetworkID: "70c1db1f-b701-45bd-96e0-a313ee3430b3", + TenantID: "", + DeviceOwner: "network:router_gateway", + MACAddress: "fa:16:3e:58:42:ed", + FixedIPs: []IP{ + IP{ + SubnetID: "008ba151-0b8c-4a67-98b5-0d2b87666062", + IPAddress: "172.24.4.2", + }, + }, + ID: "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b", + SecurityGroups: []string{}, + DeviceID: "9ae135f4-b6e0-4dad-9e91-3c223e385824", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "ACTIVE", + "name": "", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "7e02058126cc4950b75f9970368ba177", + "device_owner": "network:router_interface", + "mac_address": "fa:16:3e:23:fd:d7", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.1" + } + ], + "id": "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", + "security_groups": [], + "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e" + } +} + `) + }) + + n, err := Get(fake.ServiceClient(), "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertEquals(t, n.Name, "") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "7e02058126cc4950b75f9970368ba177") + th.AssertEquals(t, n.DeviceOwner, "network:router_interface") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:23:fd:d7") + th.AssertDeepEquals(t, n.FixedIPs, []IP{ + IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.1"}, + }) + th.AssertEquals(t, n.ID, "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2") + th.AssertDeepEquals(t, n.SecurityGroups, []string{}) + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertEquals(t, n.DeviceID, "5e3898d7-11be-483e-9732-b2f5eccd2b2e") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "private-port", + "admin_state_up": true, + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "security_groups": ["foo"] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "allowed_address_pairs": [], + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} + `) + }) + + asu := true + options := CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []IP{ + IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: []string{"foo"}, + } + n, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "DOWN") + th.AssertEquals(t, n.Name, "private-port") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, n.DeviceOwner, "") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, n.FixedIPs, []IP{ + IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }) + th.AssertEquals(t, n.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertDeepEquals(t, n.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} + `) + }) + + options := UpdateOpts{ + Name: "new_port_name", + FixedIPs: []IP{ + IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + SecurityGroups: []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}, + } + + s, err := Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []IP{ + IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go new file mode 100644 index 000000000..2511ff53b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go @@ -0,0 +1,126 @@ +package ports + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a port resource. +func (r commonResult) Extract() (*Port, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Port *Port `json:"port"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Port, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IP is a sub-struct that represents an individual IP. +type IP struct { + SubnetID string `mapstructure:"subnet_id" json:"subnet_id"` + IPAddress string `mapstructure:"ip_address" json:"ip_address,omitempty"` +} + +// Port represents a Neutron port. See package documentation for a top-level +// description of what this is. +type Port struct { + // UUID for the port. + ID string `mapstructure:"id" json:"id"` + // Network that this port is associated with. + NetworkID string `mapstructure:"network_id" json:"network_id"` + // Human-readable name for the port. Might not be unique. + Name string `mapstructure:"name" json:"name"` + // Administrative state of port. If false (down), port does not forward packets. + AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"` + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values. + Status string `mapstructure:"status" json:"status"` + // Mac address to use on this port. + MACAddress string `mapstructure:"mac_address" json:"mac_address"` + // Specifies IP addresses for the port thus associating the port itself with + // the subnets where the IP addresses are picked from + FixedIPs []IP `mapstructure:"fixed_ips" json:"fixed_ips"` + // Owner of network. Only admin users can specify a tenant_id other than its own. + TenantID string `mapstructure:"tenant_id" json:"tenant_id"` + // Identifies the entity (e.g.: dhcp agent) using this port. + DeviceOwner string `mapstructure:"device_owner" json:"device_owner"` + // Specifies the IDs of any security groups associated with a port. + SecurityGroups []string `mapstructure:"security_groups" json:"security_groups"` + // Identifies the device (e.g., virtual server) using this port. + DeviceID string `mapstructure:"device_id" json:"device_id"` +} + +// PortPage is the page returned by a pager when traversing over a collection +// of network ports. +type PortPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of ports has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p PortPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"ports_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a PortPage struct is empty. +func (p PortPage) IsEmpty() (bool, error) { + is, err := ExtractPorts(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractPorts accepts a Page struct, specifically a PortPage struct, +// and extracts the elements into a slice of Port structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPorts(page pagination.Page) ([]Port, error) { + var resp struct { + Ports []Port `mapstructure:"ports" json:"ports"` + } + + err := mapstructure.Decode(page.(PortPage).Body, &resp) + + return resp.Ports, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go new file mode 100644 index 000000000..6d0572f1f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go @@ -0,0 +1,31 @@ +package ports + +import "github.com/rackspace/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("ports", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("ports") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go new file mode 100644 index 000000000..7fadd4dcb --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go @@ -0,0 +1,44 @@ +package ports + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint, ResourceBase: endpoint + "v2.0/"} +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + "v2.0/ports" + th.AssertEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "v2.0/ports/foo" + th.AssertEquals(t, expected, actual) +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "v2.0/ports" + th.AssertEquals(t, expected, actual) +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient(), "foo") + expected := endpoint + "v2.0/ports/foo" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "v2.0/ports/foo" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go new file mode 100644 index 000000000..43e8296c7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go @@ -0,0 +1,10 @@ +// Package subnets contains functionality for working with Neutron subnet +// resources. A subnet represents an IP address block that can be used to +// assign IP addresses to virtual instances. Each subnet must have a CIDR and +// must be associated with a network. IPs can either be selected from the whole +// subnet CIDR or from allocation pools specified by the user. +// +// A subnet can also have a gateway, a list of DNS name servers, and host routes. +// This information is pushed to instances whose interfaces are associated with +// the subnet. +package subnets diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go new file mode 100644 index 000000000..0db0a6e60 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go @@ -0,0 +1,13 @@ +package subnets + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errNetworkIDRequired = err("A network ID is required") + errCIDRRequired = err("A valid CIDR is required") + errInvalidIPType = err("An IP type must either be 4 or 6") +) diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go new file mode 100644 index 000000000..6cde048ed --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go @@ -0,0 +1,271 @@ +package subnets + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// AdminState gives users a solid type to work with for create and update +// operations. It is recommended that users use the `Up` and `Down` enums. +type AdminState *bool + +// Convenience vars for AdminStateUp values. +var ( + iTrue = true + iFalse = false + + Up AdminState = &iTrue + Down AdminState = &iFalse +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToSubnetListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the subnet attributes you want to see returned. SortKey allows you to sort +// by a particular subnet attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Name string `q:"name"` + EnableDHCP *bool `q:"enable_dhcp"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + IPVersion int `q:"ip_version"` + GatewayIP string `q:"gateway_ip"` + CIDR string `q:"cidr"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToSubnetListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSubnetListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// subnets. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those subnets that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToSubnetListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return SubnetPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific subnet based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// Valid IP types +const ( + IPv4 = 4 + IPv6 = 6 +) + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToSubnetCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new subnet. +type CreateOpts struct { + // Required + NetworkID string + CIDR string + // Optional + Name string + TenantID string + AllocationPools []AllocationPool + GatewayIP string + IPVersion int + EnableDHCP *bool + DNSNameservers []string + HostRoutes []HostRoute +} + +// ToSubnetCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.NetworkID == "" { + return nil, errNetworkIDRequired + } + if opts.CIDR == "" { + return nil, errCIDRRequired + } + if opts.IPVersion != 0 && opts.IPVersion != IPv4 && opts.IPVersion != IPv6 { + return nil, errInvalidIPType + } + + s["network_id"] = opts.NetworkID + s["cidr"] = opts.CIDR + + if opts.EnableDHCP != nil { + s["enable_dhcp"] = &opts.EnableDHCP + } + if opts.Name != "" { + s["name"] = opts.Name + } + if opts.GatewayIP != "" { + s["gateway_ip"] = opts.GatewayIP + } + if opts.TenantID != "" { + s["tenant_id"] = opts.TenantID + } + if opts.IPVersion != 0 { + s["ip_version"] = opts.IPVersion + } + if len(opts.AllocationPools) != 0 { + s["allocation_pools"] = opts.AllocationPools + } + if len(opts.DNSNameservers) != 0 { + s["dns_nameservers"] = opts.DNSNameservers + } + if len(opts.HostRoutes) != 0 { + s["host_routes"] = opts.HostRoutes + } + + return map[string]interface{}{"subnet": s}, nil +} + +// Create accepts a CreateOpts struct and creates a new subnet using the values +// provided. You must remember to provide a valid NetworkID, CIDR and IP version. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToSubnetCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) + return res +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSubnetUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing subnet. +type UpdateOpts struct { + Name string + GatewayIP string + DNSNameservers []string + HostRoutes []HostRoute + EnableDHCP *bool +} + +// ToSubnetUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToSubnetUpdateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.EnableDHCP != nil { + s["enable_dhcp"] = &opts.EnableDHCP + } + if opts.Name != "" { + s["name"] = opts.Name + } + if opts.GatewayIP != "" { + s["gateway_ip"] = opts.GatewayIP + } + if opts.DNSNameservers != nil { + s["dns_nameservers"] = opts.DNSNameservers + } + if opts.HostRoutes != nil { + s["host_routes"] = opts.HostRoutes + } + + return map[string]interface{}{"subnet": s}, nil +} + +// Update accepts a UpdateOpts struct and updates an existing subnet using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToSubnetUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(updateURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + + return res +} + +// Delete accepts a unique ID and deletes the subnet associated with it. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, id), nil) + return res +} + +// IDFromName is a convenience function that returns a subnet's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + subnetCount := 0 + subnetID := "" + if name == "" { + return "", fmt.Errorf("A subnet name must be provided.") + } + pager := List(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + subnetList, err := ExtractSubnets(page) + if err != nil { + return false, err + } + + for _, s := range subnetList { + if s.Name == name { + subnetCount++ + subnetID = s.ID + } + } + return true, nil + }) + + switch subnetCount { + case 0: + return "", fmt.Errorf("Unable to find subnet: %s", name) + case 1: + return subnetID, nil + default: + return "", fmt.Errorf("Found %d subnets matching %s", subnetCount, name) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go new file mode 100644 index 000000000..987064ada --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go @@ -0,0 +1,362 @@ +package subnets + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnets": [ + { + "name": "private-subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + }, + { + "name": "my_subnet", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.0.0.2", + "end": "192.255.255.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.0.0.1", + "cidr": "192.0.0.0/8", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractSubnets(page) + if err != nil { + t.Errorf("Failed to extract subnets: %v", err) + return false, nil + } + + expected := []Subnet{ + Subnet{ + Name: "private-subnet", + EnableDHCP: true, + NetworkID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + TenantID: "26a7980765d0414dbc1fc1f88cdb7e6e", + DNSNameservers: []string{}, + AllocationPools: []AllocationPool{ + AllocationPool{ + Start: "10.0.0.2", + End: "10.0.0.254", + }, + }, + HostRoutes: []HostRoute{}, + IPVersion: 4, + GatewayIP: "10.0.0.1", + CIDR: "10.0.0.0/24", + ID: "08eae331-0402-425a-923c-34f7cfe39c1b", + }, + Subnet{ + Name: "my_subnet", + EnableDHCP: true, + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + TenantID: "4fd44f30292945e481c7b8a0c8908869", + DNSNameservers: []string{}, + AllocationPools: []AllocationPool{ + AllocationPool{ + Start: "192.0.0.2", + End: "192.255.255.254", + }, + }, + HostRoutes: []HostRoute{}, + IPVersion: 4, + GatewayIP: "192.0.0.1", + CIDR: "192.0.0.0/8", + ID: "54d6f61d-db07-451c-9ab3-b9609b6b6f0b", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/54d6f61d-db07-451c-9ab3-b9609b6b6f0b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnet": { + "name": "my_subnet", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.0.0.2", + "end": "192.255.255.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.0.0.1", + "cidr": "192.0.0.0/8", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + } +} + `) + }) + + s, err := Get(fake.ServiceClient(), "54d6f61d-db07-451c-9ab3-b9609b6b6f0b").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_subnet") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.DNSNameservers, []string{}) + th.AssertDeepEquals(t, s.AllocationPools, []AllocationPool{ + AllocationPool{ + Start: "192.0.0.2", + End: "192.255.255.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.0.0.1") + th.AssertEquals(t, s.CIDR, "192.0.0.0/8") + th.AssertEquals(t, s.ID, "54d6f61d-db07-451c-9ab3-b9609b6b6f0b") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet": { + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "ip_version": 4, + "cidr": "192.168.199.0/24", + "dns_nameservers": ["foo"], + "allocation_pools": [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ], + "host_routes": [{"destination":"","nexthop": "bar"}] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "subnet": { + "name": "", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.168.199.1", + "cidr": "192.168.199.0/24", + "id": "3b80198d-4f7b-4f77-9ef5-774d54e17126" + } +} + `) + }) + + opts := CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 4, + CIDR: "192.168.199.0/24", + AllocationPools: []AllocationPool{ + AllocationPool{ + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }, + DNSNameservers: []string{"foo"}, + HostRoutes: []HostRoute{ + HostRoute{NextHop: "bar"}, + }, + } + s, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.DNSNameservers, []string{}) + th.AssertDeepEquals(t, s.AllocationPools, []AllocationPool{ + AllocationPool{ + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.168.199.1") + th.AssertEquals(t, s.CIDR, "192.168.199.0/24") + th.AssertEquals(t, s.ID, "3b80198d-4f7b-4f77-9ef5-774d54e17126") +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + res = Create(fake.ServiceClient(), CreateOpts{NetworkID: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + res = Create(fake.ServiceClient(), CreateOpts{NetworkID: "foo", CIDR: "bar", IPVersion: 40}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet": { + "name": "my_new_subnet", + "dns_nameservers": ["foo"], + "host_routes": [{"destination":"","nexthop": "bar"}] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} + `) + }) + + opts := UpdateOpts{ + Name: "my_new_subnet", + DNSNameservers: []string{"foo"}, + HostRoutes: []HostRoute{ + HostRoute{NextHop: "bar"}, + }, + } + s, err := Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go new file mode 100644 index 000000000..77b956a17 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go @@ -0,0 +1,132 @@ +package subnets + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a subnet resource. +func (r commonResult) Extract() (*Subnet, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Subnet *Subnet `json:"subnet"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Subnet, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AllocationPool represents a sub-range of cidr available for dynamic +// allocation to ports, e.g. {Start: "10.0.0.2", End: "10.0.0.254"} +type AllocationPool struct { + Start string `json:"start"` + End string `json:"end"` +} + +// HostRoute represents a route that should be used by devices with IPs from +// a subnet (not including local subnet route). +type HostRoute struct { + DestinationCIDR string `mapstructure:"destination" json:"destination"` + NextHop string `mapstructure:"nexthop" json:"nexthop"` +} + +// Subnet represents a subnet. See package documentation for a top-level +// description of what this is. +type Subnet struct { + // UUID representing the subnet + ID string `mapstructure:"id" json:"id"` + // UUID of the parent network + NetworkID string `mapstructure:"network_id" json:"network_id"` + // Human-readable name for the subnet. Might not be unique. + Name string `mapstructure:"name" json:"name"` + // IP version, either `4' or `6' + IPVersion int `mapstructure:"ip_version" json:"ip_version"` + // CIDR representing IP range for this subnet, based on IP version + CIDR string `mapstructure:"cidr" json:"cidr"` + // Default gateway used by devices in this subnet + GatewayIP string `mapstructure:"gateway_ip" json:"gateway_ip"` + // DNS name servers used by hosts in this subnet. + DNSNameservers []string `mapstructure:"dns_nameservers" json:"dns_nameservers"` + // Sub-ranges of CIDR available for dynamic allocation to ports. See AllocationPool. + AllocationPools []AllocationPool `mapstructure:"allocation_pools" json:"allocation_pools"` + // Routes that should be used by devices with IPs from this subnet (not including local subnet route). + HostRoutes []HostRoute `mapstructure:"host_routes" json:"host_routes"` + // Specifies whether DHCP is enabled for this subnet or not. + EnableDHCP bool `mapstructure:"enable_dhcp" json:"enable_dhcp"` + // Owner of network. Only admin users can specify a tenant_id other than its own. + TenantID string `mapstructure:"tenant_id" json:"tenant_id"` +} + +// SubnetPage is the page returned by a pager when traversing over a collection +// of subnets. +type SubnetPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of subnets has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (p SubnetPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"subnets_links"` + } + + var r resp + err := mapstructure.Decode(p.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// IsEmpty checks whether a SubnetPage struct is empty. +func (p SubnetPage) IsEmpty() (bool, error) { + is, err := ExtractSubnets(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractSubnets accepts a Page struct, specifically a SubnetPage struct, +// and extracts the elements into a slice of Subnet structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractSubnets(page pagination.Page) ([]Subnet, error) { + var resp struct { + Subnets []Subnet `mapstructure:"subnets" json:"subnets"` + } + + err := mapstructure.Decode(page.(SubnetPage).Body, &resp) + + return resp.Subnets, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results_test.go new file mode 100644 index 000000000..d4048388c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results_test.go @@ -0,0 +1,54 @@ +package subnets + +import ( + "encoding/json" + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" + "testing" +) + +func TestHostRoute(t *testing.T) { + sejson := []byte(` + {"subnet": { + "name": "test-subnet", + "enable_dhcp": false, + "network_id": "3e66c41e-cbbd-4019-9aab-740b7e4150a0", + "tenant_id": "f86e123198cf42d19c8854c5f80c2f06", + "dns_nameservers": [], + "gateway_ip": "172.16.0.1", + "ipv6_ra_mode": null, + "allocation_pools": [ + { + "start": "172.16.0.2", + "end": "172.16.255.254" + } + ], + "host_routes": [ + { + "destination": "172.20.1.0/24", + "nexthop": "172.16.0.2" + } + ], + "ip_version": 4, + "ipv6_address_mode": null, + "cidr": "172.16.0.0/16", + "id": "6dcaa873-7115-41af-9ef5-915f73636e43", + "subnetpool_id": null + }} +`) + + var dejson interface{} + err := json.Unmarshal(sejson, &dejson) + if err != nil { + t.Fatalf("%s", err) + } + + resp := commonResult{gophercloud.Result{Body: dejson}} + subnet, err := resp.Extract() + if err != nil { + t.Fatalf("%s", err) + } + route := subnet.HostRoutes[0] + th.AssertEquals(t, route.NextHop, "172.16.0.2") + th.AssertEquals(t, route.DestinationCIDR, "172.20.1.0/24") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go new file mode 100644 index 000000000..0d0236894 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go @@ -0,0 +1,31 @@ +package subnets + +import "github.com/rackspace/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("subnets", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("subnets") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go new file mode 100644 index 000000000..aeeddf354 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go @@ -0,0 +1,44 @@ +package subnets + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint, ResourceBase: endpoint + "v2.0/"} +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + "v2.0/subnets" + th.AssertEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "v2.0/subnets/foo" + th.AssertEquals(t, expected, actual) +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "v2.0/subnets" + th.AssertEquals(t, expected, actual) +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient(), "foo") + expected := endpoint + "v2.0/subnets/foo" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "v2.0/subnets/foo" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go new file mode 100644 index 000000000..f5f894a9e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go @@ -0,0 +1,8 @@ +// Package accounts contains functionality for working with Object Storage +// account resources. An account is the top-level resource the object storage +// hierarchy: containers belong to accounts, objects belong to containers. +// +// Another way of thinking of an account is like a namespace for all your +// resources. It is synonymous with a project or tenant in other OpenStack +// services. +package accounts diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go new file mode 100644 index 000000000..f22b68700 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go @@ -0,0 +1,38 @@ +// +build fixtures + +package accounts + +import ( + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// HandleGetAccountSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `Get` response. +func HandleGetAccountSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("X-Account-Container-Count", "2") + w.Header().Set("X-Account-Bytes-Used", "14") + w.Header().Set("X-Account-Meta-Subject", "books") + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateAccountSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `Update` response. +func HandleUpdateAccountSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "X-Account-Meta-Gophercloud-Test", "accounts") + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go new file mode 100644 index 000000000..66c46a978 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go @@ -0,0 +1,107 @@ +package accounts + +import "github.com/rackspace/gophercloud" + +// GetOptsBuilder allows extensions to add additional headers to the Get +// request. +type GetOptsBuilder interface { + ToAccountGetMap() (map[string]string, error) +} + +// GetOpts is a structure that contains parameters for getting an account's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToAccountGetMap formats a GetOpts into a map[string]string of headers. +func (opts GetOpts) ToAccountGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves an account's metadata. To extract just the +// custom metadata, call the ExtractMetadata method on the GetResult. To extract +// all the headers that are returned (including the metadata), call the +// ExtractHeader method on the GetResult. +func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) GetResult { + var res GetResult + h := c.AuthenticatedHeaders() + + if opts != nil { + headers, err := opts.ToAccountGetMap() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("HEAD", getURL(c), gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// UpdateOptsBuilder allows extensions to add additional headers to the Update +// request. +type UpdateOptsBuilder interface { + ToAccountUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that contains parameters for updating, creating, or +// deleting an account's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` +} + +// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. +func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { + headers, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + headers["X-Account-Meta-"+k] = v + } + return headers, err +} + +// Update is a function that creates, updates, or deletes an account's metadata. +// To extract the headers returned, call the Extract method on the UpdateResult. +func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + h := make(map[string]string) + + if opts != nil { + headers, err := opts.ToAccountUpdateMap() + if err != nil { + res.Err = err + return res + } + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("POST", updateURL(c), gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go new file mode 100644 index 000000000..6454c0ac4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go @@ -0,0 +1,32 @@ +package accounts + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestUpdateAccount(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateAccountSuccessfully(t) + + options := &UpdateOpts{Metadata: map[string]string{"gophercloud-test": "accounts"}} + res := Update(fake.ServiceClient(), options) + th.AssertNoErr(t, res.Err) +} + +func TestGetAccount(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetAccountSuccessfully(t) + + expectedMetadata := map[string]string{"Subject": "books"} + res := Get(fake.ServiceClient(), &GetOpts{}) + th.AssertNoErr(t, res.Err) + actualMetadata, _ := res.ExtractMetadata() + th.CheckDeepEquals(t, expectedMetadata, actualMetadata) + //headers, err := res.Extract() + //th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go new file mode 100644 index 000000000..6ab1a2306 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go @@ -0,0 +1,102 @@ +package accounts + +import ( + "strings" + "time" + + "github.com/rackspace/gophercloud" +) + +// UpdateResult is returned from a call to the Update function. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// UpdateHeader represents the headers returned in the response from an Update request. +type UpdateHeader struct { + ContentLength string `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// Extract will return a struct of headers returned from a call to Get. To obtain +// a map of headers, call the ExtractHeader method on the GetResult. +func (ur UpdateResult) Extract() (UpdateHeader, error) { + var uh UpdateHeader + if ur.Err != nil { + return uh, ur.Err + } + + if err := gophercloud.DecodeHeader(ur.Header, &uh); err != nil { + return uh, err + } + + if date, ok := ur.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, ur.Header["Date"][0]) + if err != nil { + return uh, err + } + uh.Date = t + } + + return uh, nil +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + BytesUsed int64 `mapstructure:"X-Account-Bytes-Used"` + ContainerCount int `mapstructure:"X-Account-Container-Count"` + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + ObjectCount int64 `mapstructure:"X-Account-Object-Count"` + TransID string `mapstructure:"X-Trans-Id"` + TempURLKey string `mapstructure:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `mapstructure:"X-Account-Meta-Temp-URL-Key-2"` +} + +// GetResult is returned from a call to the Get function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. To obtain +// a map of headers, call the ExtractHeader method on the GetResult. +func (gr GetResult) Extract() (GetHeader, error) { + var gh GetHeader + if gr.Err != nil { + return gh, gr.Err + } + + if err := gophercloud.DecodeHeader(gr.Header, &gh); err != nil { + return gh, err + } + + if date, ok := gr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, gr.Header["Date"][0]) + if err != nil { + return gh, err + } + gh.Date = t + } + + return gh, nil +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metatdata associated with the account. +func (gr GetResult) ExtractMetadata() (map[string]string, error) { + if gr.Err != nil { + return nil, gr.Err + } + + metadata := make(map[string]string) + for k, v := range gr.Header { + if strings.HasPrefix(k, "X-Account-Meta-") { + key := strings.TrimPrefix(k, "X-Account-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go new file mode 100644 index 000000000..9952fe434 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go @@ -0,0 +1,11 @@ +package accounts + +import "github.com/rackspace/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func updateURL(c *gophercloud.ServiceClient) string { + return getURL(c) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go new file mode 100644 index 000000000..074d52dfd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go @@ -0,0 +1,26 @@ +package accounts + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient()) + expected := endpoint + th.CheckEquals(t, expected, actual) +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient()) + expected := endpoint + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go new file mode 100644 index 000000000..5fed5537f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go @@ -0,0 +1,8 @@ +// Package containers contains functionality for working with Object Storage +// container resources. A container serves as a logical namespace for objects +// that are placed inside it - an object with the same name in two different +// containers represents two different objects. +// +// In addition to containing objects, you can also use the container to control +// access to objects by using an access control list (ACL). +package containers diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go new file mode 100644 index 000000000..e60735248 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go @@ -0,0 +1,143 @@ +// +build fixtures + +package containers + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// ExpectedListInfo is the result expected from a call to `List` when full +// info is requested. +var ExpectedListInfo = []Container{ + Container{ + Count: 0, + Bytes: 0, + Name: "janeausten", + }, + Container{ + Count: 1, + Bytes: 14, + Name: "marktwain", + }, +} + +// ExpectedListNames is the result expected from a call to `List` when just +// container names are requested. +var ExpectedListNames = []string{"janeausten", "marktwain"} + +// HandleListContainerInfoSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `List` response when full info is requested. +func HandleListContainerInfoSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, `[ + { + "count": 0, + "bytes": 0, + "name": "janeausten" + }, + { + "count": 1, + "bytes": 14, + "name": "marktwain" + } + ]`) + case "janeausten": + fmt.Fprintf(w, `[ + { + "count": 1, + "bytes": 14, + "name": "marktwain" + } + ]`) + case "marktwain": + fmt.Fprintf(w, `[]`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleListContainerNamesSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `ListNames` response when only container names are requested. +func HandleListContainerNamesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "text/plain") + + w.Header().Set("Content-Type", "text/plain") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, "janeausten\nmarktwain\n") + case "janeausten": + fmt.Fprintf(w, "marktwain\n") + case "marktwain": + fmt.Fprintf(w, ``) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleCreateContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Create` response. +func HandleCreateContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Add("X-Container-Meta-Foo", "bar") + w.Header().Add("X-Trans-Id", "1234567") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleDeleteContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Delete` response. +func HandleDeleteContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Update` response. +func HandleUpdateContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleGetContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Get` response. +func HandleGetContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go new file mode 100644 index 000000000..50ff9f48f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go @@ -0,0 +1,205 @@ +package containers + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToContainerListParams() (bool, string, error) +} + +// ListOpts is a structure that holds options for listing containers. +type ListOpts struct { + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` +} + +// ToContainerListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each container. +func (opts ListOpts) ToContainerListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return false, "", err + } + return opts.Full, q.String(), nil +} + +// List is a function that retrieves containers associated with the account as +// well as account metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c) + if opts != nil { + full, query, err := opts.ToContainerListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + createPage := func(r pagination.PageResult) pagination.Page { + p := ContainerPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + } + + pager := pagination.NewPager(c, url, createPage) + pager.Headers = headers + return pager +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToContainerCreateMap() (map[string]string, error) +} + +// CreateOpts is a structure that holds parameters for creating a container. +type CreateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + IfNoneMatch string `h:"If-None-Match"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerCreateMap formats a CreateOpts into a map of headers. +func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Create is a function that creates a new container. +func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) CreateResult { + var res CreateResult + h := c.AuthenticatedHeaders() + + if opts != nil { + headers, err := opts.ToContainerCreateMap() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("PUT", createURL(c, containerName), gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// Delete is a function that deletes a container. +func Delete(c *gophercloud.ServiceClient, containerName string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, containerName), nil) + return res +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToContainerUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting a container's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerUpdateMap formats a CreateOpts into a map of headers. +func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes a container's +// metadata. +func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + h := c.AuthenticatedHeaders() + + if opts != nil { + headers, err := opts.ToContainerUpdateMap() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("POST", updateURL(c, containerName), gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// Get is a function that retrieves the metadata of a container. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName string) GetResult { + var res GetResult + resp, err := c.Request("HEAD", getURL(c, containerName), gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go new file mode 100644 index 000000000..0ccd5a778 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go @@ -0,0 +1,117 @@ +package containers + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +var metadata = map[string]string{"gophercloud-test": "containers"} + +func TestListContainerInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerInfoSuccessfully(t) + + count := 0 + err := List(fake.ServiceClient(), &ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractInfo(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedListInfo, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAllContainerInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerInfoSuccessfully(t) + + allPages, err := List(fake.ServiceClient(), &ListOpts{Full: true}).AllPages() + th.AssertNoErr(t, err) + actual, err := ExtractInfo(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedListInfo, actual) +} + +func TestListContainerNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerNamesSuccessfully(t) + + count := 0 + err := List(fake.ServiceClient(), &ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNames(page) + if err != nil { + t.Errorf("Failed to extract container names: %v", err) + return false, err + } + + th.CheckDeepEquals(t, ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAllContainerNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerNamesSuccessfully(t) + + allPages, err := List(fake.ServiceClient(), &ListOpts{Full: false}).AllPages() + th.AssertNoErr(t, err) + actual, err := ExtractNames(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedListNames, actual) +} + +func TestCreateContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateContainerSuccessfully(t) + + options := CreateOpts{ContentType: "application/json", Metadata: map[string]string{"foo": "bar"}} + res := Create(fake.ServiceClient(), "testContainer", options) + c, err := res.Extract() + th.CheckNoErr(t, err) + th.CheckEquals(t, "bar", res.Header["X-Container-Meta-Foo"][0]) + th.CheckEquals(t, "1234567", c.TransID) +} + +func TestDeleteContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteContainerSuccessfully(t) + + res := Delete(fake.ServiceClient(), "testContainer") + th.CheckNoErr(t, res.Err) +} + +func TestUpateContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateContainerSuccessfully(t) + + options := &UpdateOpts{Metadata: map[string]string{"foo": "bar"}} + res := Update(fake.ServiceClient(), "testContainer", options) + th.CheckNoErr(t, res.Err) +} + +func TestGetContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetContainerSuccessfully(t) + + _, err := Get(fake.ServiceClient(), "testContainer").ExtractMetadata() + th.CheckNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go new file mode 100644 index 000000000..e682b8dcc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go @@ -0,0 +1,270 @@ +package containers + +import ( + "fmt" + "strings" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// Container represents a container resource. +type Container struct { + // The total number of bytes stored in the container. + Bytes int `json:"bytes" mapstructure:"bytes"` + + // The total number of objects stored in the container. + Count int `json:"count" mapstructure:"count"` + + // The name of the container. + Name string `json:"name" mapstructure:"name"` +} + +// ContainerPage is the page returned by a pager when traversing over a +// collection of containers. +type ContainerPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no container names. +func (r ContainerPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + if err != nil { + return true, err + } + return len(names) == 0, nil +} + +// LastMarker returns the last container name in a ListResult. +func (r ContainerPage) LastMarker() (string, error) { + names, err := ExtractNames(r) + if err != nil { + return "", err + } + if len(names) == 0 { + return "", nil + } + return names[len(names)-1], nil +} + +// ExtractInfo is a function that takes a ListResult and returns the containers' information. +func ExtractInfo(page pagination.Page) ([]Container, error) { + untyped := page.(ContainerPage).Body.([]interface{}) + results := make([]Container, len(untyped)) + for index, each := range untyped { + container := each.(map[string]interface{}) + err := mapstructure.Decode(container, &results[index]) + if err != nil { + return results, err + } + } + return results, nil +} + +// ExtractNames is a function that takes a ListResult and returns the containers' names. +func ExtractNames(page pagination.Page) ([]string, error) { + casted := page.(ContainerPage) + ct := casted.Header.Get("Content-Type") + + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(page) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, container := range parsed { + names = append(names, container.Name) + } + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(page.(ContainerPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + AcceptRanges string `mapstructure:"Accept-Ranges"` + BytesUsed int64 `mapstructure:"X-Account-Bytes-Used"` + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + ObjectCount int64 `mapstructure:"X-Container-Object-Count"` + Read string `mapstructure:"X-Container-Read"` + TransID string `mapstructure:"X-Trans-Id"` + VersionsLocation string `mapstructure:"X-Versions-Location"` + Write string `mapstructure:"X-Container-Write"` +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. To obtain +// a map of headers, call the ExtractHeader method on the GetResult. +func (gr GetResult) Extract() (GetHeader, error) { + var gh GetHeader + if gr.Err != nil { + return gh, gr.Err + } + + if err := gophercloud.DecodeHeader(gr.Header, &gh); err != nil { + return gh, err + } + + if date, ok := gr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, gr.Header["Date"][0]) + if err != nil { + return gh, err + } + gh.Date = t + } + + return gh, nil +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the container. +func (gr GetResult) ExtractMetadata() (map[string]string, error) { + if gr.Err != nil { + return nil, gr.Err + } + metadata := make(map[string]string) + for k, v := range gr.Header { + if strings.HasPrefix(k, "X-Container-Meta-") { + key := strings.TrimPrefix(k, "X-Container-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a Create request. +type CreateHeader struct { + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// CreateResult represents the result of a create operation. To extract the +// the headers from the HTTP response, you can invoke the 'ExtractHeader' +// method on the result struct. +type CreateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. To obtain +// a map of headers, call the ExtractHeader method on the CreateResult. +func (cr CreateResult) Extract() (CreateHeader, error) { + var ch CreateHeader + if cr.Err != nil { + return ch, cr.Err + } + + if err := gophercloud.DecodeHeader(cr.Header, &ch); err != nil { + return ch, err + } + + if date, ok := cr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, cr.Header["Date"][0]) + if err != nil { + return ch, err + } + ch.Date = t + } + + return ch, nil +} + +// UpdateHeader represents the headers returned in the response from a Update request. +type UpdateHeader struct { + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// UpdateResult represents the result of an update operation. To extract the +// the headers from the HTTP response, you can invoke the 'ExtractHeader' +// method on the result struct. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. To obtain +// a map of headers, call the ExtractHeader method on the UpdateResult. +func (ur UpdateResult) Extract() (UpdateHeader, error) { + var uh UpdateHeader + if ur.Err != nil { + return uh, ur.Err + } + + if err := gophercloud.DecodeHeader(ur.Header, &uh); err != nil { + return uh, err + } + + if date, ok := ur.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, ur.Header["Date"][0]) + if err != nil { + return uh, err + } + uh.Date = t + } + + return uh, nil +} + +// DeleteHeader represents the headers returned in the response from a Delete request. +type DeleteHeader struct { + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// DeleteResult represents the result of a delete operation. To extract the +// the headers from the HTTP response, you can invoke the 'ExtractHeader' +// method on the result struct. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. To obtain +// a map of headers, call the ExtractHeader method on the DeleteResult. +func (dr DeleteResult) Extract() (DeleteHeader, error) { + var dh DeleteHeader + if dr.Err != nil { + return dh, dr.Err + } + + if err := gophercloud.DecodeHeader(dr.Header, &dh); err != nil { + return dh, err + } + + if date, ok := dr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, dr.Header["Date"][0]) + if err != nil { + return dh, err + } + dh.Date = t + } + + return dh, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go new file mode 100644 index 000000000..f864f846e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go @@ -0,0 +1,23 @@ +package containers + +import "github.com/rackspace/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func createURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func getURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func deleteURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func updateURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go new file mode 100644 index 000000000..d043a2aae --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go @@ -0,0 +1,43 @@ +package containers + +import ( + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" + "testing" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient()) + expected := endpoint + th.CheckEquals(t, expected, actual) +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient(), "foo") + expected := endpoint + "foo" + th.CheckEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "foo" + th.CheckEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "foo" + th.CheckEquals(t, expected, actual) +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient(), "foo") + expected := endpoint + "foo" + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go new file mode 100644 index 000000000..30a9adde1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go @@ -0,0 +1,5 @@ +// Package objects contains functionality for working with Object Storage +// object resources. An object is a resource that represents and contains data +// - such as documents, images, and so on. You can also store custom metadata +// with an object. +package objects diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go new file mode 100644 index 000000000..7a6e6e1ee --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go @@ -0,0 +1,195 @@ +// +build fixtures + +package objects + +import ( + "crypto/md5" + "fmt" + "io" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// HandleDownloadObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Download` response. +func HandleDownloadObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Successful download with Gophercloud") + }) +} + +// ExpectedListInfo is the result expected from a call to `List` when full +// info is requested. +var ExpectedListInfo = []Object{ + Object{ + Hash: "451e372e48e0f6b1114fa0724aa79fa1", + LastModified: "2009-11-10 23:00:00 +0000 UTC", + Bytes: 14, + Name: "goodbye", + ContentType: "application/octet-stream", + }, + Object{ + Hash: "451e372e48e0f6b1114fa0724aa79fa1", + LastModified: "2009-11-10 23:00:00 +0000 UTC", + Bytes: 14, + Name: "hello", + ContentType: "application/octet-stream", + }, +} + +// ExpectedListNames is the result expected from a call to `List` when just +// object names are requested. +var ExpectedListNames = []string{"hello", "goodbye"} + +// HandleListObjectsInfoSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `List` response when full info is requested. +func HandleListObjectsInfoSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, `[ + { + "hash": "451e372e48e0f6b1114fa0724aa79fa1", + "last_modified": "2009-11-10 23:00:00 +0000 UTC", + "bytes": 14, + "name": "goodbye", + "content_type": "application/octet-stream" + }, + { + "hash": "451e372e48e0f6b1114fa0724aa79fa1", + "last_modified": "2009-11-10 23:00:00 +0000 UTC", + "bytes": 14, + "name": "hello", + "content_type": "application/octet-stream" + } + ]`) + case "hello": + fmt.Fprintf(w, `[]`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleListObjectNamesSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `List` response when only object names are requested. +func HandleListObjectNamesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "text/plain") + + w.Header().Set("Content-Type", "text/plain") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, "hello\ngoodbye\n") + case "goodbye": + fmt.Fprintf(w, "") + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleCreateTextObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux +// that responds with a `Create` response. A Content-Type of "text/plain" is expected. +func HandleCreateTextObjectSuccessfully(t *testing.T, content string) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "text/plain") + th.TestHeader(t, r, "Accept", "application/json") + + hash := md5.New() + io.WriteString(hash, content) + localChecksum := hash.Sum(nil) + + w.Header().Set("ETag", fmt.Sprintf("%x", localChecksum)) + w.WriteHeader(http.StatusCreated) + }) +} + +// HandleCreateTypelessObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler +// mux that responds with a `Create` response. No Content-Type header may be present in the request, so that server- +// side content-type detection will be triggered properly. +func HandleCreateTypelessObjectSuccessfully(t *testing.T, content string) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + if contentType, present := r.Header["Content-Type"]; present { + t.Errorf("Expected Content-Type header to be omitted, but was %#v", contentType) + } + + hash := md5.New() + io.WriteString(hash, content) + localChecksum := hash.Sum(nil) + + w.Header().Set("ETag", fmt.Sprintf("%x", localChecksum)) + w.WriteHeader(http.StatusCreated) + }) +} + +// HandleCopyObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Copy` response. +func HandleCopyObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "COPY") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Destination", "/newTestContainer/newTestObject") + w.WriteHeader(http.StatusCreated) + }) +} + +// HandleDeleteObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Delete` response. +func HandleDeleteObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Update` response. +func HandleUpdateObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Object-Meta-Gophercloud-Test", "objects") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleGetObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Get` response. +func HandleGetObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.Header().Add("X-Object-Meta-Gophercloud-Test", "objects") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go new file mode 100644 index 000000000..f85add0b9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go @@ -0,0 +1,501 @@ +package objects + +import ( + "bufio" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToObjectListParams() (bool, string, error) +} + +// ListOpts is a structure that holds parameters for listing objects. +type ListOpts struct { + // Full is a true/false value that represents the amount of object information + // returned. If Full is set to true, then the content-type, number of bytes, hash + // date last modified, and name are returned. If set to false or not set, then + // only the object names are returned. + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` + Path string `q:"path"` +} + +// ToObjectListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each object. +func (opts ListOpts) ToObjectListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return false, "", err + } + return opts.Full, q.String(), nil +} + +// List is a function that retrieves all objects in a container. It also returns the details +// for the container. To extract only the object information or names, pass the ListResult +// response to the ExtractInfo or ExtractNames function, respectively. +func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c, containerName) + if opts != nil { + full, query, err := opts.ToObjectListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + createPage := func(r pagination.PageResult) pagination.Page { + p := ObjectPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + } + + pager := pagination.NewPager(c, url, createPage) + pager.Headers = headers + return pager +} + +// DownloadOptsBuilder allows extensions to add additional parameters to the +// Download request. +type DownloadOptsBuilder interface { + ToObjectDownloadParams() (map[string]string, string, error) +} + +// DownloadOpts is a structure that holds parameters for downloading an object. +type DownloadOpts struct { + IfMatch string `h:"If-Match"` + IfModifiedSince time.Time `h:"If-Modified-Since"` + IfNoneMatch string `h:"If-None-Match"` + IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"` + Range string `h:"Range"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectDownloadParams formats a DownloadOpts into a query string and map of +// headers. +func (opts DownloadOpts) ToObjectDownloadParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Download is a function that retrieves the content and metadata for an object. +// To extract just the content, pass the DownloadResult response to the +// ExtractContent function. +func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) DownloadResult { + var res DownloadResult + + url := downloadURL(c, containerName, objectName) + h := c.AuthenticatedHeaders() + + if opts != nil { + headers, query, err := opts.ToObjectDownloadParams() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + + url += query + } + + resp, err := c.Request("GET", url, gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 304}, + }) + if resp != nil { + res.Header = resp.Header + res.Body = resp.Body + } + res.Err = err + + return res +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToObjectCreateParams() (map[string]string, string, error) +} + +// CreateOpts is a structure that holds parameters for creating an object. +type CreateOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentLength int64 `h:"Content-Length"` + ContentType string `h:"Content-Type"` + CopyFrom string `h:"X-Copy-From"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType string `h:"X-Detect-Content-Type"` + ETag string `h:"ETag"` + IfNoneMatch string `h:"If-None-Match"` + ObjectManifest string `h:"X-Object-Manifest"` + TransferEncoding string `h:"Transfer-Encoding"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectCreateParams formats a CreateOpts into a query string and map of +// headers. +func (opts CreateOpts) ToObjectCreateParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + + return h, q.String(), nil +} + +// Create is a function that creates a new object or replaces an existing object. If the returned response's ETag +// header fails to match the local checksum, the failed request will automatically be retried up to a maximum of 3 times. +func Create(c *gophercloud.ServiceClient, containerName, objectName string, content io.ReadSeeker, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + url := createURL(c, containerName, objectName) + h := make(map[string]string) + + if opts != nil { + headers, query, err := opts.ToObjectCreateParams() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + + url += query + } + + hash := md5.New() + bufioReader := bufio.NewReader(io.TeeReader(content, hash)) + io.Copy(ioutil.Discard, bufioReader) + localChecksum := hash.Sum(nil) + + h["ETag"] = fmt.Sprintf("%x", localChecksum) + + _, err := content.Seek(0, 0) + if err != nil { + res.Err = err + return res + } + + ropts := gophercloud.RequestOpts{ + RawBody: content, + MoreHeaders: h, + } + + resp, err := c.Request("PUT", url, ropts) + if err != nil { + res.Err = err + return res + } + if resp != nil { + res.Header = resp.Header + if resp.Header.Get("ETag") == fmt.Sprintf("%x", localChecksum) { + res.Err = err + return res + } + res.Err = fmt.Errorf("Local checksum does not match API ETag header") + } + + return res +} + +// CopyOptsBuilder allows extensions to add additional parameters to the +// Copy request. +type CopyOptsBuilder interface { + ToObjectCopyMap() (map[string]string, error) +} + +// CopyOpts is a structure that holds parameters for copying one object to +// another. +type CopyOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + Destination string `h:"Destination,required"` +} + +// ToObjectCopyMap formats a CopyOpts into a map of headers. +func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { + if opts.Destination == "" { + return nil, fmt.Errorf("Required CopyOpts field 'Destination' not set.") + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Copy is a function that copies one object to another. +func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) CopyResult { + var res CopyResult + h := c.AuthenticatedHeaders() + + headers, err := opts.ToObjectCopyMap() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + + url := copyURL(c, containerName, objectName) + resp, err := c.Request("COPY", url, gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToObjectDeleteQuery() (string, error) +} + +// DeleteOpts is a structure that holds parameters for deleting an object. +type DeleteOpts struct { + MultipartManifest string `q:"multipart-manifest"` +} + +// ToObjectDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// Delete is a function that deletes an object. +func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) DeleteResult { + var res DeleteResult + url := deleteURL(c, containerName, objectName) + + if opts != nil { + query, err := opts.ToObjectDeleteQuery() + if err != nil { + res.Err = err + return res + } + url += query + } + + resp, err := c.Delete(url, nil) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// GetOptsBuilder allows extensions to add additional parameters to the +// Get request. +type GetOptsBuilder interface { + ToObjectGetQuery() (string, error) +} + +// GetOpts is a structure that holds parameters for getting an object's metadata. +type GetOpts struct { + Expires string `q:"expires"` + Signature string `q:"signature"` +} + +// ToObjectGetQuery formats a GetOpts into a query string. +func (opts GetOpts) ToObjectGetQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// Get is a function that retrieves the metadata of an object. To extract just the custom +// metadata, pass the GetResult response to the ExtractMetadata function. +func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) GetResult { + var res GetResult + url := getURL(c, containerName, objectName) + + if opts != nil { + query, err := opts.ToObjectGetQuery() + if err != nil { + res.Err = err + return res + } + url += query + } + + resp, err := c.Request("HEAD", url, gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToObjectUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or deleting an +// object's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType bool `h:"X-Detect-Content-Type"` +} + +// ToObjectUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes an object's metadata. +func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + h := c.AuthenticatedHeaders() + + if opts != nil { + headers, err := opts.ToObjectUpdateMap() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + } + + url := updateURL(c, containerName, objectName) + resp, err := c.Request("POST", url, gophercloud.RequestOpts{ + MoreHeaders: h, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// HTTPMethod represents an HTTP method string (e.g. "GET"). +type HTTPMethod string + +var ( + // GET represents an HTTP "GET" method. + GET HTTPMethod = "GET" + // POST represents an HTTP "POST" method. + POST HTTPMethod = "POST" +) + +// CreateTempURLOpts are options for creating a temporary URL for an object. +type CreateTempURLOpts struct { + // (REQUIRED) Method is the HTTP method to allow for users of the temp URL. Valid values + // are "GET" and "POST". + Method HTTPMethod + // (REQUIRED) TTL is the number of seconds the temp URL should be active. + TTL int + // (Optional) Split is the string on which to split the object URL. Since only + // the object path is used in the hash, the object URL needs to be parsed. If + // empty, the default OpenStack URL split point will be used ("/v1/"). + Split string +} + +// CreateTempURL is a function for creating a temporary URL for an object. It +// allows users to have "GET" or "POST" access to a particular tenant's object +// for a limited amount of time. +func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateTempURLOpts) (string, error) { + if opts.Split == "" { + opts.Split = "/v1/" + } + duration := time.Duration(opts.TTL) * time.Second + expiry := time.Now().Add(duration).Unix() + getHeader, err := accounts.Get(c, nil).Extract() + if err != nil { + return "", err + } + secretKey := []byte(getHeader.TempURLKey) + url := getURL(c, containerName, objectName) + splitPath := strings.Split(url, opts.Split) + baseURL, objectPath := splitPath[0], splitPath[1] + objectPath = opts.Split + objectPath + body := fmt.Sprintf("%s\n%d\n%s", opts.Method, expiry, objectPath) + hash := hmac.New(sha1.New, secretKey) + hash.Write([]byte(body)) + hexsum := fmt.Sprintf("%x", hash.Sum(nil)) + return fmt.Sprintf("%s%s?temp_url_sig=%s&temp_url_expires=%d", baseURL, objectPath, hexsum, expiry), nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go new file mode 100644 index 000000000..f7d682291 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go @@ -0,0 +1,165 @@ +package objects + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strings" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestDownloadReader(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDownloadObjectSuccessfully(t) + + response := Download(fake.ServiceClient(), "testContainer", "testObject", nil) + defer response.Body.Close() + + // Check reader + buf := bytes.NewBuffer(make([]byte, 0)) + io.CopyN(buf, response.Body, 10) + th.CheckEquals(t, "Successful", string(buf.Bytes())) +} + +func TestDownloadExtraction(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDownloadObjectSuccessfully(t) + + response := Download(fake.ServiceClient(), "testContainer", "testObject", nil) + + // Check []byte extraction + bytes, err := response.ExtractContent() + th.AssertNoErr(t, err) + th.CheckEquals(t, "Successful download with Gophercloud", string(bytes)) +} + +func TestListObjectInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListObjectsInfoSuccessfully(t) + + count := 0 + options := &ListOpts{Full: true} + err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractInfo(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedListInfo, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListObjectNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListObjectNamesSuccessfully(t) + + count := 0 + options := &ListOpts{Full: false} + err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNames(page) + if err != nil { + t.Errorf("Failed to extract container names: %v", err) + return false, err + } + + th.CheckDeepEquals(t, ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + content := "Did gyre and gimble in the wabe" + + HandleCreateTextObjectSuccessfully(t, content) + + options := &CreateOpts{ContentType: "text/plain"} + res := Create(fake.ServiceClient(), "testContainer", "testObject", strings.NewReader(content), options) + th.AssertNoErr(t, res.Err) +} + +func TestCreateObjectWithoutContentType(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + content := "The sky was the color of television, tuned to a dead channel." + + HandleCreateTypelessObjectSuccessfully(t, content) + + res := Create(fake.ServiceClient(), "testContainer", "testObject", strings.NewReader(content), &CreateOpts{}) + th.AssertNoErr(t, res.Err) +} + +func TestErrorIsRaisedForChecksumMismatch(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("ETag", "acbd18db4cc2f85cedef654fccc4a4d8") + w.WriteHeader(http.StatusCreated) + }) + + content := strings.NewReader("The sky was the color of television, tuned to a dead channel.") + res := Create(fake.ServiceClient(), "testContainer", "testObject", content, &CreateOpts{}) + + err := fmt.Errorf("Local checksum does not match API ETag header") + th.AssertDeepEquals(t, err, res.Err) +} + +func TestCopyObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCopyObjectSuccessfully(t) + + options := &CopyOpts{Destination: "/newTestContainer/newTestObject"} + res := Copy(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestDeleteObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteObjectSuccessfully(t) + + res := Delete(fake.ServiceClient(), "testContainer", "testObject", nil) + th.AssertNoErr(t, res.Err) +} + +func TestUpateObjectMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateObjectSuccessfully(t) + + options := &UpdateOpts{Metadata: map[string]string{"Gophercloud-Test": "objects"}} + res := Update(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestGetObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetObjectSuccessfully(t) + + expected := map[string]string{"Gophercloud-Test": "objects"} + actual, err := Get(fake.ServiceClient(), "testContainer", "testObject", nil).ExtractMetadata() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go new file mode 100644 index 000000000..ecb2c5458 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go @@ -0,0 +1,438 @@ +package objects + +import ( + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// Object is a structure that holds information related to a storage object. +type Object struct { + // Bytes is the total number of bytes that comprise the object. + Bytes int64 `json:"bytes" mapstructure:"bytes"` + + // ContentType is the content type of the object. + ContentType string `json:"content_type" mapstructure:"content_type"` + + // Hash represents the MD5 checksum value of the object's content. + Hash string `json:"hash" mapstructure:"hash"` + + // LastModified is the RFC3339Milli time the object was last modified, represented + // as a string. For any given object (obj), this value may be parsed to a time.Time: + // lastModified, err := time.Parse(gophercloud.RFC3339Milli, obj.LastModified) + LastModified string `json:"last_modified" mapstructure:"last_modified"` + + // Name is the unique name for the object. + Name string `json:"name" mapstructure:"name"` +} + +// ObjectPage is a single page of objects that is returned from a call to the +// List function. +type ObjectPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no object names. +func (r ObjectPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + if err != nil { + return true, err + } + return len(names) == 0, nil +} + +// LastMarker returns the last object name in a ListResult. +func (r ObjectPage) LastMarker() (string, error) { + names, err := ExtractNames(r) + if err != nil { + return "", err + } + if len(names) == 0 { + return "", nil + } + return names[len(names)-1], nil +} + +// ExtractInfo is a function that takes a page of objects and returns their full information. +func ExtractInfo(page pagination.Page) ([]Object, error) { + untyped := page.(ObjectPage).Body.([]interface{}) + results := make([]Object, len(untyped)) + for index, each := range untyped { + object := each.(map[string]interface{}) + err := mapstructure.Decode(object, &results[index]) + if err != nil { + return results, err + } + } + return results, nil +} + +// ExtractNames is a function that takes a page of objects and returns only their names. +func ExtractNames(page pagination.Page) ([]string, error) { + casted := page.(ObjectPage) + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(page) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, object := range parsed { + names = append(names, object.Name) + } + + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(page.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/html"): + return []string{}, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// DownloadHeader represents the headers returned in the response from a Download request. +type DownloadHeader struct { + AcceptRanges string `mapstructure:"Accept-Ranges"` + ContentDisposition string `mapstructure:"Content-Disposition"` + ContentEncoding string `mapstructure:"Content-Encoding"` + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + DeleteAt time.Time `mapstructure:"-"` + ETag string `mapstructure:"Etag"` + LastModified time.Time `mapstructure:"-"` + ObjectManifest string `mapstructure:"X-Object-Manifest"` + StaticLargeObject bool `mapstructure:"X-Static-Large-Object"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// DownloadResult is a *http.Response that is returned from a call to the Download function. +type DownloadResult struct { + gophercloud.HeaderResult + Body io.ReadCloser +} + +// Extract will return a struct of headers returned from a call to Download. To obtain +// a map of headers, call the ExtractHeader method on the DownloadResult. +func (dr DownloadResult) Extract() (DownloadHeader, error) { + var dh DownloadHeader + if dr.Err != nil { + return dh, dr.Err + } + + if err := gophercloud.DecodeHeader(dr.Header, &dh); err != nil { + return dh, err + } + + if date, ok := dr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, date[0]) + if err != nil { + return dh, err + } + dh.Date = t + } + + if date, ok := dr.Header["Last-Modified"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, date[0]) + if err != nil { + return dh, err + } + dh.LastModified = t + } + + if date, ok := dr.Header["X-Delete-At"]; ok && len(date) > 0 { + unix, err := strconv.ParseInt(date[0], 10, 64) + if err != nil { + return dh, err + } + dh.DeleteAt = time.Unix(unix, 0) + } + + return dh, nil +} + +// ExtractContent is a function that takes a DownloadResult's io.Reader body +// and reads all available data into a slice of bytes. Please be aware that due +// the nature of io.Reader is forward-only - meaning that it can only be read +// once and not rewound. You can recreate a reader from the output of this +// function by using bytes.NewReader(downloadBytes) +func (dr DownloadResult) ExtractContent() ([]byte, error) { + if dr.Err != nil { + return nil, dr.Err + } + body, err := ioutil.ReadAll(dr.Body) + if err != nil { + return nil, err + } + dr.Body.Close() + return body, nil +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + ContentDisposition string `mapstructure:"Content-Disposition"` + ContentEncoding string `mapstructure:"Content-Encoding"` + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + DeleteAt time.Time `mapstructure:"-"` + ETag string `mapstructure:"Etag"` + LastModified time.Time `mapstructure:"-"` + ObjectManifest string `mapstructure:"X-Object-Manifest"` + StaticLargeObject bool `mapstructure:"X-Static-Large-Object"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// GetResult is a *http.Response that is returned from a call to the Get function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. To obtain +// a map of headers, call the ExtractHeader method on the GetResult. +func (gr GetResult) Extract() (GetHeader, error) { + var gh GetHeader + if gr.Err != nil { + return gh, gr.Err + } + + if err := gophercloud.DecodeHeader(gr.Header, &gh); err != nil { + return gh, err + } + + if date, ok := gr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, gr.Header["Date"][0]) + if err != nil { + return gh, err + } + gh.Date = t + } + + if date, ok := gr.Header["Last-Modified"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, gr.Header["Last-Modified"][0]) + if err != nil { + return gh, err + } + gh.LastModified = t + } + + if date, ok := gr.Header["X-Delete-At"]; ok && len(date) > 0 { + unix, err := strconv.ParseInt(date[0], 10, 64) + if err != nil { + return gh, err + } + gh.DeleteAt = time.Unix(unix, 0) + } + + return gh, nil +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the object. +func (gr GetResult) ExtractMetadata() (map[string]string, error) { + if gr.Err != nil { + return nil, gr.Err + } + metadata := make(map[string]string) + for k, v := range gr.Header { + if strings.HasPrefix(k, "X-Object-Meta-") { + key := strings.TrimPrefix(k, "X-Object-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a Create request. +type CreateHeader struct { + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + ETag string `mapstructure:"Etag"` + LastModified time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. To obtain +// a map of headers, call the ExtractHeader method on the CreateResult. +func (cr CreateResult) Extract() (CreateHeader, error) { + var ch CreateHeader + if cr.Err != nil { + return ch, cr.Err + } + + if err := gophercloud.DecodeHeader(cr.Header, &ch); err != nil { + return ch, err + } + + if date, ok := cr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, cr.Header["Date"][0]) + if err != nil { + return ch, err + } + ch.Date = t + } + + if date, ok := cr.Header["Last-Modified"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, cr.Header["Last-Modified"][0]) + if err != nil { + return ch, err + } + ch.LastModified = t + } + + return ch, nil +} + +// UpdateHeader represents the headers returned in the response from a Update request. +type UpdateHeader struct { + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. To obtain +// a map of headers, call the ExtractHeader method on the UpdateResult. +func (ur UpdateResult) Extract() (UpdateHeader, error) { + var uh UpdateHeader + if ur.Err != nil { + return uh, ur.Err + } + + if err := gophercloud.DecodeHeader(ur.Header, &uh); err != nil { + return uh, err + } + + if date, ok := ur.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, ur.Header["Date"][0]) + if err != nil { + return uh, err + } + uh.Date = t + } + + return uh, nil +} + +// DeleteHeader represents the headers returned in the response from a Delete request. +type DeleteHeader struct { + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. To obtain +// a map of headers, call the ExtractHeader method on the DeleteResult. +func (dr DeleteResult) Extract() (DeleteHeader, error) { + var dh DeleteHeader + if dr.Err != nil { + return dh, dr.Err + } + + if err := gophercloud.DecodeHeader(dr.Header, &dh); err != nil { + return dh, err + } + + if date, ok := dr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, dr.Header["Date"][0]) + if err != nil { + return dh, err + } + dh.Date = t + } + + return dh, nil +} + +// CopyHeader represents the headers returned in the response from a Copy request. +type CopyHeader struct { + ContentLength int64 `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + CopiedFrom string `mapstructure:"X-Copied-From"` + CopiedFromLastModified time.Time `mapstructure:"-"` + Date time.Time `mapstructure:"-"` + ETag string `mapstructure:"Etag"` + LastModified time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// CopyResult represents the result of a copy operation. +type CopyResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Copy. To obtain +// a map of headers, call the ExtractHeader method on the CopyResult. +func (cr CopyResult) Extract() (CopyHeader, error) { + var ch CopyHeader + if cr.Err != nil { + return ch, cr.Err + } + + if err := gophercloud.DecodeHeader(cr.Header, &ch); err != nil { + return ch, err + } + + if date, ok := cr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, cr.Header["Date"][0]) + if err != nil { + return ch, err + } + ch.Date = t + } + + if date, ok := cr.Header["Last-Modified"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, cr.Header["Last-Modified"][0]) + if err != nil { + return ch, err + } + ch.LastModified = t + } + + if date, ok := cr.Header["X-Copied-From-Last-Modified"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, cr.Header["X-Copied-From-Last-Modified"][0]) + if err != nil { + return ch, err + } + ch.CopiedFromLastModified = t + } + + return ch, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go new file mode 100644 index 000000000..d2ec62cff --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go @@ -0,0 +1,33 @@ +package objects + +import ( + "github.com/rackspace/gophercloud" +) + +func listURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func copyURL(c *gophercloud.ServiceClient, container, object string) string { + return c.ServiceURL(container, object) +} + +func createURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func getURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func deleteURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func downloadURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func updateURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go new file mode 100644 index 000000000..1dcfe3543 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go @@ -0,0 +1,56 @@ +package objects + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestListURL(t *testing.T) { + actual := listURL(endpointClient(), "foo") + expected := endpoint + "foo" + th.CheckEquals(t, expected, actual) +} + +func TestCopyURL(t *testing.T) { + actual := copyURL(endpointClient(), "foo", "bar") + expected := endpoint + "foo/bar" + th.CheckEquals(t, expected, actual) +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient(), "foo", "bar") + expected := endpoint + "foo/bar" + th.CheckEquals(t, expected, actual) +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo", "bar") + expected := endpoint + "foo/bar" + th.CheckEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo", "bar") + expected := endpoint + "foo/bar" + th.CheckEquals(t, expected, actual) +} + +func TestDownloadURL(t *testing.T) { + actual := downloadURL(endpointClient(), "foo", "bar") + expected := endpoint + "foo/bar" + th.CheckEquals(t, expected, actual) +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient(), "foo", "bar") + expected := endpoint + "foo/bar" + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/doc.go new file mode 100644 index 000000000..f2db622d1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/doc.go @@ -0,0 +1,4 @@ +// Package apiversions provides information and interaction with the different +// API versions for the OpenStack Heat service. This functionality is not +// restricted to this particular version. +package apiversions diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests.go new file mode 100644 index 000000000..f6454c860 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests.go @@ -0,0 +1,13 @@ +package apiversions + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// ListVersions lists all the Neutron API versions available to end-users +func ListVersions(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, apiVersionsURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests_test.go new file mode 100644 index 000000000..a2fc980d3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/requests_test.go @@ -0,0 +1,89 @@ +package apiversions + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "versions": [ + { + "status": "CURRENT", + "id": "v1.0", + "links": [ + { + "href": "http://23.253.228.211:8000/v1", + "rel": "self" + } + ] + } + ] +}`) + }) + + count := 0 + + ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractAPIVersions(page) + if err != nil { + t.Errorf("Failed to extract API versions: %v", err) + return false, err + } + + expected := []APIVersion{ + APIVersion{ + Status: "CURRENT", + ID: "v1.0", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://23.253.228.211:8000/v1", + Rel: "self", + }, + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestNonJSONCannotBeExtractedIntoAPIVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + if _, err := ExtractAPIVersions(page); err == nil { + t.Fatalf("Expected error, got nil") + } + return true, nil + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/results.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/results.go new file mode 100644 index 000000000..0700ab0af --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/results.go @@ -0,0 +1,42 @@ +package apiversions + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// APIVersion represents an API version for Neutron. It contains the status of +// the API, and its unique ID. +type APIVersion struct { + Status string `mapstructure:"status"` + ID string `mapstructure:"id"` + Links []gophercloud.Link `mapstructure:"links"` +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(page pagination.Page) ([]APIVersion, error) { + var resp struct { + Versions []APIVersion `mapstructure:"versions"` + } + + err := mapstructure.Decode(page.(APIVersionPage).Body, &resp) + + return resp.Versions, err +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/urls.go new file mode 100644 index 000000000..55d6e0e7a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/apiversions/urls.go @@ -0,0 +1,7 @@ +package apiversions + +import "github.com/rackspace/gophercloud" + +func apiVersionsURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/doc.go new file mode 100644 index 000000000..183e8dfa7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/doc.go @@ -0,0 +1,2 @@ +// Package buildinfo provides build information about heat deployments. +package buildinfo diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/fixtures.go new file mode 100644 index 000000000..20ea09b44 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/fixtures.go @@ -0,0 +1,45 @@ +package buildinfo + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// GetExpected represents the expected object from a Get request. +var GetExpected = &BuildInfo{ + API: Revision{ + Revision: "2.4.5", + }, + Engine: Revision{ + Revision: "1.2.1", + }, +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "api": { + "revision": "2.4.5" + }, + "engine": { + "revision": "1.2.1" + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/build_info` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/build_info", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go new file mode 100644 index 000000000..9e03e5cc8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go @@ -0,0 +1,10 @@ +package buildinfo + +import "github.com/rackspace/gophercloud" + +// Get retreives data for the given stack template. +func Get(c *gophercloud.ServiceClient) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c), &res.Body, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests_test.go new file mode 100644 index 000000000..1e0fe230d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests_test.go @@ -0,0 +1,20 @@ +package buildinfo + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestGetTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := Get(fake.ServiceClient()).Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/results.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/results.go new file mode 100644 index 000000000..683a434a0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/results.go @@ -0,0 +1,37 @@ +package buildinfo + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" +) + +// Revision represents the API/Engine revision of a Heat deployment. +type Revision struct { + Revision string `mapstructure:"revision"` +} + +// BuildInfo represents the build information for a Heat deployment. +type BuildInfo struct { + API Revision `mapstructure:"api"` + Engine Revision `mapstructure:"engine"` +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a BuildInfo object and is called after a +// Get operation. +func (r GetResult) Extract() (*BuildInfo, error) { + if r.Err != nil { + return nil, r.Err + } + + var res BuildInfo + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + return &res, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/urls.go new file mode 100644 index 000000000..2c873d023 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/urls.go @@ -0,0 +1,7 @@ +package buildinfo + +import "github.com/rackspace/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("build_info") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/doc.go new file mode 100644 index 000000000..51cdd9747 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/doc.go @@ -0,0 +1,4 @@ +// Package stackevents provides operations for finding, listing, and retrieving +// stack events. Stack events are events that take place on stacks such as +// updating and abandoning. +package stackevents diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/fixtures.go new file mode 100644 index 000000000..235787a51 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/fixtures.go @@ -0,0 +1,446 @@ +package stackevents + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// FindExpected represents the expected object from a Find request. +var FindExpected = []Event{ + Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_IN_PROGRESS", + PhysicalResourceID: "", + ID: "06feb26f-9298-4a9b-8749-9d770e5d577a", + }, + Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", + }, +} + +// FindOutput represents the response body from a Find request. +const FindOutput = ` +{ + "events": [ + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:11", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": null, + "id": "06feb26f-9298-4a9b-8749-9d770e5d577a" + }, + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } + ] +}` + +// HandleFindSuccessfully creates an HTTP handler at `/stacks/postman_stack/events` +// on the test handler mux that responds with a `Find` response. +func HandleFindSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/events", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ListExpected represents the expected object from a List request. +var ListExpected = []Event{ + Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_IN_PROGRESS", + PhysicalResourceID: "", + ID: "06feb26f-9298-4a9b-8749-9d770e5d577a", + }, + Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", + }, +} + +// ListOutput represents the response body from a List request. +const ListOutput = ` +{ + "events": [ + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:11", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": null, + "id": "06feb26f-9298-4a9b-8749-9d770e5d577a" + }, + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } + ] +}` + +// HandleListSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/events` +// on the test handler mux that responds with a `List` response. +func HandleListSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/events", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "93940999-7d40-44ae-8de4-19624e7b8d18": + fmt.Fprintf(w, `{"events":[]}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// ListResourceEventsExpected represents the expected object from a ListResourceEvents request. +var ListResourceEventsExpected = []Event{ + Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_IN_PROGRESS", + PhysicalResourceID: "", + ID: "06feb26f-9298-4a9b-8749-9d770e5d577a", + }, + Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", + }, +} + +// ListResourceEventsOutput represents the response body from a ListResourceEvents request. +const ListResourceEventsOutput = ` +{ + "events": [ + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:11", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": null, + "id": "06feb26f-9298-4a9b-8749-9d770e5d577a" + }, + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } + ] +}` + +// HandleListResourceEventsSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events` +// on the test handler mux that responds with a `ListResourceEvents` response. +func HandleListResourceEventsSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "93940999-7d40-44ae-8de4-19624e7b8d18": + fmt.Fprintf(w, `{"events":[]}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// GetExpected represents the expected object from a Get request. +var GetExpected = &Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "event":{ + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events/93940999-7d40-44ae-8de4-19624e7b8d18` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events/93940999-7d40-44ae-8de4-19624e7b8d18", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go new file mode 100644 index 000000000..70c6b97e8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go @@ -0,0 +1,203 @@ +package stackevents + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Find retrieves stack events for the given stack name. +func Find(c *gophercloud.ServiceClient, stackName string) FindResult { + var res FindResult + + _, res.Err = c.Request("GET", findURL(c, stackName), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + }) + return res +} + +// SortDir is a type for specifying in which direction to sort a list of events. +type SortDir string + +// SortKey is a type for specifying by which key to sort a list of events. +type SortKey string + +// ResourceStatus is a type for specifying by which resource status to filter a +// list of events. +type ResourceStatus string + +// ResourceAction is a type for specifying by which resource action to filter a +// list of events. +type ResourceAction string + +var ( + // ResourceStatusInProgress is used to filter a List request by the 'IN_PROGRESS' status. + ResourceStatusInProgress ResourceStatus = "IN_PROGRESS" + // ResourceStatusComplete is used to filter a List request by the 'COMPLETE' status. + ResourceStatusComplete ResourceStatus = "COMPLETE" + // ResourceStatusFailed is used to filter a List request by the 'FAILED' status. + ResourceStatusFailed ResourceStatus = "FAILED" + + // ResourceActionCreate is used to filter a List request by the 'CREATE' action. + ResourceActionCreate ResourceAction = "CREATE" + // ResourceActionDelete is used to filter a List request by the 'DELETE' action. + ResourceActionDelete ResourceAction = "DELETE" + // ResourceActionUpdate is used to filter a List request by the 'UPDATE' action. + ResourceActionUpdate ResourceAction = "UPDATE" + // ResourceActionRollback is used to filter a List request by the 'ROLLBACK' action. + ResourceActionRollback ResourceAction = "ROLLBACK" + // ResourceActionSuspend is used to filter a List request by the 'SUSPEND' action. + ResourceActionSuspend ResourceAction = "SUSPEND" + // ResourceActionResume is used to filter a List request by the 'RESUME' action. + ResourceActionResume ResourceAction = "RESUME" + // ResourceActionAbandon is used to filter a List request by the 'ABANDON' action. + ResourceActionAbandon ResourceAction = "ABANDON" + + // SortAsc is used to sort a list of stacks in ascending order. + SortAsc SortDir = "asc" + // SortDesc is used to sort a list of stacks in descending order. + SortDesc SortDir = "desc" + + // SortName is used to sort a list of stacks by name. + SortName SortKey = "name" + // SortResourceType is used to sort a list of stacks by resource type. + SortResourceType SortKey = "resource_type" + // SortCreatedAt is used to sort a list of stacks by date created. + SortCreatedAt SortKey = "created_at" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToStackEventListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Marker and Limit are used for pagination. +type ListOpts struct { + // The stack resource ID with which to start the listing. + Marker string `q:"marker"` + // Integer value for the limit of values to return. + Limit int `q:"limit"` + // Filters the event list by the specified ResourceAction. You can use this + // filter multiple times to filter by multiple resource actions: CREATE, DELETE, + // UPDATE, ROLLBACK, SUSPEND, RESUME or ADOPT. + ResourceActions []ResourceAction `q:"resource_action"` + // Filters the event list by the specified resource_status. You can use this + // filter multiple times to filter by multiple resource statuses: IN_PROGRESS, + // COMPLETE or FAILED. + ResourceStatuses []ResourceStatus `q:"resource_status"` + // Filters the event list by the specified resource_name. You can use this + // filter multiple times to filter by multiple resource names. + ResourceNames []string `q:"resource_name"` + // Filters the event list by the specified resource_type. You can use this + // filter multiple times to filter by multiple resource types: OS::Nova::Server, + // OS::Cinder::Volume, and so on. + ResourceTypes []string `q:"resource_type"` + // Sorts the event list by: resource_type or created_at. + SortKey SortKey `q:"sort_keys"` + // The sort direction of the event list. Which is asc (ascending) or desc (descending). + SortDir SortDir `q:"sort_dir"` +} + +// ToStackEventListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToStackEventListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List makes a request against the API to list resources for the given stack. +func List(client *gophercloud.ServiceClient, stackName, stackID string, opts ListOptsBuilder) pagination.Pager { + url := listURL(client, stackName, stackID) + + if opts != nil { + query, err := opts.ToStackEventListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPageFn := func(r pagination.PageResult) pagination.Page { + p := EventPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + } + + return pagination.NewPager(client, url, createPageFn) +} + +// ListResourceEventsOptsBuilder allows extensions to add additional parameters to the +// ListResourceEvents request. +type ListResourceEventsOptsBuilder interface { + ToResourceEventListQuery() (string, error) +} + +// ListResourceEventsOpts allows the filtering and sorting of paginated resource events through +// the API. Marker and Limit are used for pagination. +type ListResourceEventsOpts struct { + // The stack resource ID with which to start the listing. + Marker string `q:"marker"` + // Integer value for the limit of values to return. + Limit int `q:"limit"` + // Filters the event list by the specified ResourceAction. You can use this + // filter multiple times to filter by multiple resource actions: CREATE, DELETE, + // UPDATE, ROLLBACK, SUSPEND, RESUME or ADOPT. + ResourceActions []string `q:"resource_action"` + // Filters the event list by the specified resource_status. You can use this + // filter multiple times to filter by multiple resource statuses: IN_PROGRESS, + // COMPLETE or FAILED. + ResourceStatuses []string `q:"resource_status"` + // Filters the event list by the specified resource_name. You can use this + // filter multiple times to filter by multiple resource names. + ResourceNames []string `q:"resource_name"` + // Filters the event list by the specified resource_type. You can use this + // filter multiple times to filter by multiple resource types: OS::Nova::Server, + // OS::Cinder::Volume, and so on. + ResourceTypes []string `q:"resource_type"` + // Sorts the event list by: resource_type or created_at. + SortKey SortKey `q:"sort_keys"` + // The sort direction of the event list. Which is asc (ascending) or desc (descending). + SortDir SortDir `q:"sort_dir"` +} + +// ToResourceEventListQuery formats a ListResourceEventsOpts into a query string. +func (opts ListResourceEventsOpts) ToResourceEventListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// ListResourceEvents makes a request against the API to list resources for the given stack. +func ListResourceEvents(client *gophercloud.ServiceClient, stackName, stackID, resourceName string, opts ListResourceEventsOptsBuilder) pagination.Pager { + url := listResourceEventsURL(client, stackName, stackID, resourceName) + + if opts != nil { + query, err := opts.ToResourceEventListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPageFn := func(r pagination.PageResult) pagination.Page { + p := EventPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + } + + return pagination.NewPager(client, url, createPageFn) +} + +// Get retreives data for the given stack resource. +func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, stackName, stackID, resourceName, eventID), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests_test.go new file mode 100644 index 000000000..a4da4d04e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests_test.go @@ -0,0 +1,71 @@ +package stackevents + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestFindEvents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleFindSuccessfully(t, FindOutput) + + actual, err := Find(fake.ServiceClient(), "postman_stack").Extract() + th.AssertNoErr(t, err) + + expected := FindExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t, ListOutput) + + count := 0 + err := List(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractEvents(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListResourceEvents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListResourceEventsSuccessfully(t, ListResourceEventsOutput) + + count := 0 + err := ListResourceEvents(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", "my_resource", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractEvents(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListResourceEventsExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetEvent(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := Get(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", "my_resource", "93940999-7d40-44ae-8de4-19624e7b8d18").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go new file mode 100644 index 000000000..cf9e24098 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go @@ -0,0 +1,172 @@ +package stackevents + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Event represents a stack event. +type Event struct { + // The name of the resource for which the event occurred. + ResourceName string `mapstructure:"resource_name"` + // The time the event occurred. + Time time.Time `mapstructure:"-"` + // The URLs to the event. + Links []gophercloud.Link `mapstructure:"links"` + // The logical ID of the stack resource. + LogicalResourceID string `mapstructure:"logical_resource_id"` + // The reason of the status of the event. + ResourceStatusReason string `mapstructure:"resource_status_reason"` + // The status of the event. + ResourceStatus string `mapstructure:"resource_status"` + // The physical ID of the stack resource. + PhysicalResourceID string `mapstructure:"physical_resource_id"` + // The event ID. + ID string `mapstructure:"id"` + // Properties of the stack resource. + ResourceProperties map[string]interface{} `mapstructure:"resource_properties"` +} + +// FindResult represents the result of a Find operation. +type FindResult struct { + gophercloud.Result +} + +// Extract returns a slice of Event objects and is called after a +// Find operation. +func (r FindResult) Extract() ([]Event, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Res []Event `mapstructure:"events"` + } + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + events := r.Body.(map[string]interface{})["events"].([]interface{}) + + for i, eventRaw := range events { + event := eventRaw.(map[string]interface{}) + if date, ok := event["event_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Res[i].Time = t + } + } + + return res.Res, nil +} + +// EventPage abstracts the raw results of making a List() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the +// data provided through the ExtractResources call. +type EventPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r EventPage) IsEmpty() (bool, error) { + events, err := ExtractEvents(r) + if err != nil { + return true, err + } + return len(events) == 0, nil +} + +// LastMarker returns the last stack ID in a ListResult. +func (r EventPage) LastMarker() (string, error) { + events, err := ExtractEvents(r) + if err != nil { + return "", err + } + if len(events) == 0 { + return "", nil + } + return events[len(events)-1].ID, nil +} + +// ExtractEvents interprets the results of a single page from a List() call, producing a slice of Event entities. +func ExtractEvents(page pagination.Page) ([]Event, error) { + casted := page.(EventPage).Body + + var res struct { + Res []Event `mapstructure:"events"` + } + + if err := mapstructure.Decode(casted, &res); err != nil { + return nil, err + } + + var events []interface{} + switch casted.(type) { + case map[string]interface{}: + events = casted.(map[string]interface{})["events"].([]interface{}) + case map[string][]interface{}: + events = casted.(map[string][]interface{})["events"] + default: + return res.Res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i, eventRaw := range events { + event := eventRaw.(map[string]interface{}) + if date, ok := event["event_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Res[i].Time = t + } + } + + return res.Res, nil +} + +// ExtractResourceEvents interprets the results of a single page from a +// ListResourceEvents() call, producing a slice of Event entities. +func ExtractResourceEvents(page pagination.Page) ([]Event, error) { + return ExtractEvents(page) +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to an Event object and is called after a +// Get operation. +func (r GetResult) Extract() (*Event, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Res *Event `mapstructure:"event"` + } + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + event := r.Body.(map[string]interface{})["event"].(map[string]interface{}) + + if date, ok := event["event_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Res.Time = t + } + + return res.Res, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/urls.go new file mode 100644 index 000000000..8b5eceb17 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/urls.go @@ -0,0 +1,19 @@ +package stackevents + +import "github.com/rackspace/gophercloud" + +func findURL(c *gophercloud.ServiceClient, stackName string) string { + return c.ServiceURL("stacks", stackName, "events") +} + +func listURL(c *gophercloud.ServiceClient, stackName, stackID string) string { + return c.ServiceURL("stacks", stackName, stackID, "events") +} + +func listResourceEventsURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName, "events") +} + +func getURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName, "events", eventID) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/doc.go new file mode 100644 index 000000000..e4f8b08dc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/doc.go @@ -0,0 +1,5 @@ +// Package stackresources provides operations for working with stack resources. +// A resource is a template artifact that represents some component of your +// desired architecture (a Cloud Server, a group of scaled Cloud Servers, a load +// balancer, some configuration management system, and so forth). +package stackresources diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/fixtures.go new file mode 100644 index 000000000..952dc54d1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/fixtures.go @@ -0,0 +1,439 @@ +package stackresources + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// FindExpected represents the expected object from a Find request. +var FindExpected = []Resource{ + Resource{ + Name: "hello_world", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalID: "hello_world", + StatusReason: "state changed", + UpdatedTime: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + CreationTime: time.Date(2015, 2, 5, 21, 33, 10, 0, time.UTC), + RequiredBy: []interface{}{}, + Status: "CREATE_IN_PROGRESS", + PhysicalID: "49181cd6-169a-4130-9455-31185bbfc5bf", + Type: "OS::Nova::Server", + Attributes: map[string]interface{}{"SXSW": "atx"}, + Description: "Some resource", + }, +} + +// FindOutput represents the response body from a Find request. +const FindOutput = ` +{ + "resources": [ + { + "description": "Some resource", + "attributes": {"SXSW": "atx"}, + "resource_name": "hello_world", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "updated_time": "2015-02-05T21:33:11", + "creation_time": "2015-02-05T21:33:10", + "required_by": [], + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "resource_type": "OS::Nova::Server" + } + ] +}` + +// HandleFindSuccessfully creates an HTTP handler at `/stacks/hello_world/resources` +// on the test handler mux that responds with a `Find` response. +func HandleFindSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/resources", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ListExpected represents the expected object from a List request. +var ListExpected = []Resource{ + Resource{ + Name: "hello_world", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalID: "hello_world", + StatusReason: "state changed", + UpdatedTime: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + CreationTime: time.Date(2015, 2, 5, 21, 33, 10, 0, time.UTC), + RequiredBy: []interface{}{}, + Status: "CREATE_IN_PROGRESS", + PhysicalID: "49181cd6-169a-4130-9455-31185bbfc5bf", + Type: "OS::Nova::Server", + Attributes: map[string]interface{}{"SXSW": "atx"}, + Description: "Some resource", + }, +} + +// ListOutput represents the response body from a List request. +const ListOutput = `{ + "resources": [ + { + "resource_name": "hello_world", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "updated_time": "2015-02-05T21:33:11", + "required_by": [], + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "creation_time": "2015-02-05T21:33:10", + "resource_type": "OS::Nova::Server", + "attributes": {"SXSW": "atx"}, + "description": "Some resource" + } +] +}` + +// HandleListSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources` +// on the test handler mux that responds with a `List` response. +func HandleListSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "49181cd6-169a-4130-9455-31185bbfc5bf": + fmt.Fprintf(w, `{"resources":[]}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// GetExpected represents the expected object from a Get request. +var GetExpected = &Resource{ + Name: "wordpress_instance", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance", + Rel: "self", + }, + gophercloud.Link{ + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e", + Rel: "stack", + }, + }, + LogicalID: "wordpress_instance", + Attributes: map[string]interface{}{"SXSW": "atx"}, + StatusReason: "state changed", + UpdatedTime: time.Date(2014, 12, 10, 18, 34, 35, 0, time.UTC), + RequiredBy: []interface{}{}, + Status: "CREATE_COMPLETE", + PhysicalID: "00e3a2fe-c65d-403c-9483-4db9930dd194", + Type: "OS::Nova::Server", +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "resource": { + "description": "Some resource", + "attributes": {"SXSW": "atx"}, + "resource_name": "wordpress_instance", + "description": "", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e", + "rel": "stack" + } + ], + "logical_resource_id": "wordpress_instance", + "resource_status": "CREATE_COMPLETE", + "updated_time": "2014-12-10T18:34:35", + "required_by": [], + "resource_status_reason": "state changed", + "physical_resource_id": "00e3a2fe-c65d-403c-9483-4db9930dd194", + "resource_type": "OS::Nova::Server" + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// MetadataExpected represents the expected object from a Metadata request. +var MetadataExpected = map[string]string{ + "number": "7", + "animal": "auk", +} + +// MetadataOutput represents the response body from a Metadata request. +const MetadataOutput = ` +{ + "metadata": { + "number": "7", + "animal": "auk" + } +}` + +// HandleMetadataSuccessfully creates an HTTP handler at `/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance/metadata` +// on the test handler mux that responds with a `Metadata` response. +func HandleMetadataSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ListTypesExpected represents the expected object from a ListTypes request. +var ListTypesExpected = ResourceTypes{ + "OS::Nova::Server", + "OS::Heat::RandomString", + "OS::Swift::Container", + "OS::Trove::Instance", + "OS::Nova::FloatingIPAssociation", + "OS::Cinder::VolumeAttachment", + "OS::Nova::FloatingIP", + "OS::Nova::KeyPair", +} + +// same as above, but sorted +var SortedListTypesExpected = ResourceTypes{ + "OS::Cinder::VolumeAttachment", + "OS::Heat::RandomString", + "OS::Nova::FloatingIP", + "OS::Nova::FloatingIPAssociation", + "OS::Nova::KeyPair", + "OS::Nova::Server", + "OS::Swift::Container", + "OS::Trove::Instance", +} + +// ListTypesOutput represents the response body from a ListTypes request. +const ListTypesOutput = ` +{ + "resource_types": [ + "OS::Nova::Server", + "OS::Heat::RandomString", + "OS::Swift::Container", + "OS::Trove::Instance", + "OS::Nova::FloatingIPAssociation", + "OS::Cinder::VolumeAttachment", + "OS::Nova::FloatingIP", + "OS::Nova::KeyPair" + ] +}` + +// HandleListTypesSuccessfully creates an HTTP handler at `/resource_types` +// on the test handler mux that responds with a `ListTypes` response. +func HandleListTypesSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/resource_types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// GetSchemaExpected represents the expected object from a Schema request. +var GetSchemaExpected = &TypeSchema{ + Attributes: map[string]interface{}{ + "an_attribute": map[string]interface{}{ + "description": "An attribute description .", + }, + }, + Properties: map[string]interface{}{ + "a_property": map[string]interface{}{ + "update_allowed": false, + "required": true, + "type": "string", + "description": "A resource description.", + }, + }, + ResourceType: "OS::Heat::AResourceName", + SupportStatus: map[string]interface{}{ + "message": "A status message", + "status": "SUPPORTED", + "version": "2014.1", + }, +} + +// GetSchemaOutput represents the response body from a Schema request. +const GetSchemaOutput = ` +{ + "attributes": { + "an_attribute": { + "description": "An attribute description ." + } + }, + "properties": { + "a_property": { + "update_allowed": false, + "required": true, + "type": "string", + "description": "A resource description." + } + }, + "resource_type": "OS::Heat::AResourceName", + "support_status": { + "message": "A status message", + "status": "SUPPORTED", + "version": "2014.1" + } +}` + +// HandleGetSchemaSuccessfully creates an HTTP handler at `/resource_types/OS::Heat::AResourceName` +// on the test handler mux that responds with a `Schema` response. +func HandleGetSchemaSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/resource_types/OS::Heat::AResourceName", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// GetTemplateExpected represents the expected object from a Template request. +var GetTemplateExpected = "{\n \"HeatTemplateFormatVersion\": \"2012-12-12\",\n \"Outputs\": {\n \"private_key\": {\n \"Description\": \"The private key if it has been saved.\",\n \"Value\": \"{\\\"Fn::GetAtt\\\": [\\\"KeyPair\\\", \\\"private_key\\\"]}\"\n },\n \"public_key\": {\n \"Description\": \"The public key.\",\n \"Value\": \"{\\\"Fn::GetAtt\\\": [\\\"KeyPair\\\", \\\"public_key\\\"]}\"\n }\n },\n \"Parameters\": {\n \"name\": {\n \"Description\": \"The name of the key pair.\",\n \"Type\": \"String\"\n },\n \"public_key\": {\n \"Description\": \"The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.\",\n \"Type\": \"String\"\n },\n \"save_private_key\": {\n \"AllowedValues\": [\n \"True\",\n \"true\",\n \"False\",\n \"false\"\n ],\n \"Default\": false,\n \"Description\": \"True if the system should remember a generated private key; False otherwise.\",\n \"Type\": \"String\"\n }\n },\n \"Resources\": {\n \"KeyPair\": {\n \"Properties\": {\n \"name\": {\n \"Ref\": \"name\"\n },\n \"public_key\": {\n \"Ref\": \"public_key\"\n },\n \"save_private_key\": {\n \"Ref\": \"save_private_key\"\n }\n },\n \"Type\": \"OS::Nova::KeyPair\"\n }\n }\n}" + +// GetTemplateOutput represents the response body from a Template request. +const GetTemplateOutput = ` +{ + "HeatTemplateFormatVersion": "2012-12-12", + "Outputs": { + "private_key": { + "Description": "The private key if it has been saved.", + "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"private_key\"]}" + }, + "public_key": { + "Description": "The public key.", + "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"public_key\"]}" + } + }, + "Parameters": { + "name": { + "Description": "The name of the key pair.", + "Type": "String" + }, + "public_key": { + "Description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.", + "Type": "String" + }, + "save_private_key": { + "AllowedValues": [ + "True", + "true", + "False", + "false" + ], + "Default": false, + "Description": "True if the system should remember a generated private key; False otherwise.", + "Type": "String" + } + }, + "Resources": { + "KeyPair": { + "Properties": { + "name": { + "Ref": "name" + }, + "public_key": { + "Ref": "public_key" + }, + "save_private_key": { + "Ref": "save_private_key" + } + }, + "Type": "OS::Nova::KeyPair" + } + } +}` + +// HandleGetTemplateSuccessfully creates an HTTP handler at `/resource_types/OS::Heat::AResourceName/template` +// on the test handler mux that responds with a `Template` response. +func HandleGetTemplateSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/resource_types/OS::Heat::AResourceName/template", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go new file mode 100644 index 000000000..fcb8d8a2f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go @@ -0,0 +1,113 @@ +package stackresources + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Find retrieves stack resources for the given stack name. +func Find(c *gophercloud.ServiceClient, stackName string) FindResult { + var res FindResult + + // Send request to API + _, res.Err = c.Request("GET", findURL(c, stackName), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + }) + return res +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToStackResourceListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Marker and Limit are used for pagination. +type ListOpts struct { + // Include resources from nest stacks up to Depth levels of recursion. + Depth int `q:"nested_depth"` +} + +// ToStackResourceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToStackResourceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List makes a request against the API to list resources for the given stack. +func List(client *gophercloud.ServiceClient, stackName, stackID string, opts ListOptsBuilder) pagination.Pager { + url := listURL(client, stackName, stackID) + + if opts != nil { + query, err := opts.ToStackResourceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPageFn := func(r pagination.PageResult) pagination.Page { + return ResourcePage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, url, createPageFn) +} + +// Get retreives data for the given stack resource. +func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) GetResult { + var res GetResult + + // Send request to API + _, res.Err = c.Get(getURL(c, stackName, stackID, resourceName), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Metadata retreives the metadata for the given stack resource. +func Metadata(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) MetadataResult { + var res MetadataResult + + // Send request to API + _, res.Err = c.Get(metadataURL(c, stackName, stackID, resourceName), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// ListTypes makes a request against the API to list resource types. +func ListTypes(client *gophercloud.ServiceClient) pagination.Pager { + url := listTypesURL(client) + + createPageFn := func(r pagination.PageResult) pagination.Page { + return ResourceTypePage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, url, createPageFn) +} + +// Schema retreives the schema for the given resource type. +func Schema(c *gophercloud.ServiceClient, resourceType string) SchemaResult { + var res SchemaResult + + // Send request to API + _, res.Err = c.Get(schemaURL(c, resourceType), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Template retreives the template representation for the given resource type. +func Template(c *gophercloud.ServiceClient, resourceType string) TemplateResult { + var res TemplateResult + + // Send request to API + _, res.Err = c.Get(templateURL(c, resourceType), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests_test.go new file mode 100644 index 000000000..e5045a71f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests_test.go @@ -0,0 +1,111 @@ +package stackresources + +import ( + "sort" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestFindResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleFindSuccessfully(t, FindOutput) + + actual, err := Find(fake.ServiceClient(), "hello_world").Extract() + th.AssertNoErr(t, err) + + expected := FindExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t, ListOutput) + + count := 0 + err := List(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractResources(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetResource(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := Get(fake.ServiceClient(), "teststack", "0b1771bd-9336-4f2b-ae86-a80f971faf1e", "wordpress_instance").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestResourceMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMetadataSuccessfully(t, MetadataOutput) + + actual, err := Metadata(fake.ServiceClient(), "teststack", "0b1771bd-9336-4f2b-ae86-a80f971faf1e", "wordpress_instance").Extract() + th.AssertNoErr(t, err) + + expected := MetadataExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListResourceTypes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListTypesSuccessfully(t, ListTypesOutput) + + count := 0 + err := ListTypes(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractResourceTypes(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListTypesExpected, actual) + // test if sorting works + sort.Sort(actual) + th.CheckDeepEquals(t, SortedListTypesExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGetResourceSchema(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSchemaSuccessfully(t, GetSchemaOutput) + + actual, err := Schema(fake.ServiceClient(), "OS::Heat::AResourceName").Extract() + th.AssertNoErr(t, err) + + expected := GetSchemaExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestGetResourceTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetTemplateSuccessfully(t, GetTemplateOutput) + + actual, err := Template(fake.ServiceClient(), "OS::Heat::AResourceName").Extract() + th.AssertNoErr(t, err) + + expected := GetTemplateExpected + th.AssertDeepEquals(t, expected, string(actual)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go new file mode 100644 index 000000000..6ddc7660d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go @@ -0,0 +1,284 @@ +package stackresources + +import ( + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Resource represents a stack resource. +type Resource struct { + Attributes map[string]interface{} `mapstructure:"attributes"` + CreationTime time.Time `mapstructure:"-"` + Description string `mapstructure:"description"` + Links []gophercloud.Link `mapstructure:"links"` + LogicalID string `mapstructure:"logical_resource_id"` + Name string `mapstructure:"resource_name"` + PhysicalID string `mapstructure:"physical_resource_id"` + RequiredBy []interface{} `mapstructure:"required_by"` + Status string `mapstructure:"resource_status"` + StatusReason string `mapstructure:"resource_status_reason"` + Type string `mapstructure:"resource_type"` + UpdatedTime time.Time `mapstructure:"-"` +} + +// FindResult represents the result of a Find operation. +type FindResult struct { + gophercloud.Result +} + +// Extract returns a slice of Resource objects and is called after a +// Find operation. +func (r FindResult) Extract() ([]Resource, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Res []Resource `mapstructure:"resources"` + } + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + resources := r.Body.(map[string]interface{})["resources"].([]interface{}) + + for i, resourceRaw := range resources { + resource := resourceRaw.(map[string]interface{}) + if date, ok := resource["updated_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Res[i].UpdatedTime = t + } + if date, ok := resource["creation_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Res[i].CreationTime = t + } + } + + return res.Res, nil +} + +// ResourcePage abstracts the raw results of making a List() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the +// data provided through the ExtractResources call. +type ResourcePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r ResourcePage) IsEmpty() (bool, error) { + resources, err := ExtractResources(r) + if err != nil { + return true, err + } + return len(resources) == 0, nil +} + +// ExtractResources interprets the results of a single page from a List() call, producing a slice of Resource entities. +func ExtractResources(page pagination.Page) ([]Resource, error) { + casted := page.(ResourcePage).Body + + var response struct { + Resources []Resource `mapstructure:"resources"` + } + if err := mapstructure.Decode(casted, &response); err != nil { + return nil, err + } + var resources []interface{} + switch casted.(type) { + case map[string]interface{}: + resources = casted.(map[string]interface{})["resources"].([]interface{}) + case map[string][]interface{}: + resources = casted.(map[string][]interface{})["resources"] + default: + return response.Resources, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i, resourceRaw := range resources { + resource := resourceRaw.(map[string]interface{}) + if date, ok := resource["updated_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + response.Resources[i].UpdatedTime = t + } + if date, ok := resource["creation_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + response.Resources[i].CreationTime = t + } + } + + return response.Resources, nil +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a Resource object and is called after a +// Get operation. +func (r GetResult) Extract() (*Resource, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Res *Resource `mapstructure:"resource"` + } + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + resource := r.Body.(map[string]interface{})["resource"].(map[string]interface{}) + + if date, ok := resource["updated_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Res.UpdatedTime = t + } + if date, ok := resource["creation_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Res.CreationTime = t + } + + return res.Res, nil +} + +// MetadataResult represents the result of a Metadata operation. +type MetadataResult struct { + gophercloud.Result +} + +// Extract returns a map object and is called after a +// Metadata operation. +func (r MetadataResult) Extract() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Meta map[string]string `mapstructure:"metadata"` + } + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + return res.Meta, nil +} + +// ResourceTypePage abstracts the raw results of making a ListTypes() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the +// data provided through the ExtractResourceTypes call. +type ResourceTypePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ResourceTypePage contains no resource types. +func (r ResourceTypePage) IsEmpty() (bool, error) { + rts, err := ExtractResourceTypes(r) + if err != nil { + return true, err + } + return len(rts) == 0, nil +} + +// ResourceTypes represents the type that holds the result of ExtractResourceTypes. +// We define methods on this type to sort it before output +type ResourceTypes []string + +func (r ResourceTypes) Len() int { + return len(r) +} + +func (r ResourceTypes) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (r ResourceTypes) Less(i, j int) bool { + return r[i] < r[j] +} + +// ExtractResourceTypes extracts and returns resource types. +func ExtractResourceTypes(page pagination.Page) (ResourceTypes, error) { + casted := page.(ResourceTypePage).Body + + var response struct { + ResourceTypes ResourceTypes `mapstructure:"resource_types"` + } + + if err := mapstructure.Decode(casted, &response); err != nil { + return nil, err + } + return response.ResourceTypes, nil +} + +// TypeSchema represents a stack resource schema. +type TypeSchema struct { + Attributes map[string]interface{} `mapstructure:"attributes"` + Properties map[string]interface{} `mapstrucutre:"properties"` + ResourceType string `mapstructure:"resource_type"` + SupportStatus map[string]interface{} `mapstructure:"support_status"` +} + +// SchemaResult represents the result of a Schema operation. +type SchemaResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a TypeSchema object and is called after a +// Schema operation. +func (r SchemaResult) Extract() (*TypeSchema, error) { + if r.Err != nil { + return nil, r.Err + } + + var res TypeSchema + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + return &res, nil +} + +// TemplateResult represents the result of a Template operation. +type TemplateResult struct { + gophercloud.Result +} + +// Extract returns the template and is called after a +// Template operation. +func (r TemplateResult) Extract() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + template, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + return nil, err + } + return template, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/urls.go new file mode 100644 index 000000000..ef078d9c9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/urls.go @@ -0,0 +1,31 @@ +package stackresources + +import "github.com/rackspace/gophercloud" + +func findURL(c *gophercloud.ServiceClient, stackName string) string { + return c.ServiceURL("stacks", stackName, "resources") +} + +func listURL(c *gophercloud.ServiceClient, stackName, stackID string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources") +} + +func getURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName) +} + +func metadataURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName, "metadata") +} + +func listTypesURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("resource_types") +} + +func schemaURL(c *gophercloud.ServiceClient, typeName string) string { + return c.ServiceURL("resource_types", typeName) +} + +func templateURL(c *gophercloud.ServiceClient, typeName string) string { + return c.ServiceURL("resource_types", typeName, "template") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/doc.go new file mode 100644 index 000000000..19231b513 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/doc.go @@ -0,0 +1,8 @@ +// Package stacks provides operation for working with Heat stacks. A stack is a +// group of resources (servers, load balancers, databases, and so forth) +// combined to fulfill a useful purpose. Based on a template, Heat orchestration +// engine creates an instantiated set of resources (a stack) to run the +// application framework or component specified (in the template). A stack is a +// running instance of a template. The result of creating a stack is a deployment +// of the application framework or component. +package stacks diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment.go new file mode 100644 index 000000000..abaff2057 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment.go @@ -0,0 +1,137 @@ +package stacks + +import ( + "fmt" + "strings" +) + +// Environment is a structure that represents stack environments +type Environment struct { + TE +} + +// EnvironmentSections is a map containing allowed sections in a stack environment file +var EnvironmentSections = map[string]bool{ + "parameters": true, + "parameter_defaults": true, + "resource_registry": true, +} + +// Validate validates the contents of the Environment +func (e *Environment) Validate() error { + if e.Parsed == nil { + if err := e.Parse(); err != nil { + return err + } + } + for key := range e.Parsed { + if _, ok := EnvironmentSections[key]; !ok { + return fmt.Errorf("Environment has wrong section: %s", key) + } + } + return nil +} + +// Parse environment file to resolve the URL's of the resources. This is done by +// reading from the `Resource Registry` section, which is why the function is +// named GetRRFileContents. +func (e *Environment) getRRFileContents(ignoreIf igFunc) error { + // initialize environment if empty + if e.Files == nil { + e.Files = make(map[string]string) + } + if e.fileMaps == nil { + e.fileMaps = make(map[string]string) + } + + // get the resource registry + rr := e.Parsed["resource_registry"] + + // search the resource registry for URLs + switch rr.(type) { + // process further only if the resource registry is a map + case map[string]interface{}, map[interface{}]interface{}: + rrMap, err := toStringKeys(rr) + if err != nil { + return err + } + // the resource registry might contain a base URL for the resource. If + // such a field is present, use it. Otherwise, use the default base URL. + var baseURL string + if val, ok := rrMap["base_url"]; ok { + baseURL = val.(string) + } else { + baseURL = e.baseURL + } + + // The contents of the resource may be located in a remote file, which + // will be a template. Instantiate a temporary template to manage the + // contents. + tempTemplate := new(Template) + tempTemplate.baseURL = baseURL + tempTemplate.client = e.client + + // Fetch the contents of remote resource URL's + if err = tempTemplate.getFileContents(rr, ignoreIf, false); err != nil { + return err + } + // check the `resources` section (if it exists) for more URL's. Note that + // the previous call to GetFileContents was (deliberately) not recursive + // as we want more control over where to look for URL's + if val, ok := rrMap["resources"]; ok { + switch val.(type) { + // process further only if the contents are a map + case map[string]interface{}, map[interface{}]interface{}: + resourcesMap, err := toStringKeys(val) + if err != nil { + return err + } + for _, v := range resourcesMap { + switch v.(type) { + case map[string]interface{}, map[interface{}]interface{}: + resourceMap, err := toStringKeys(v) + if err != nil { + return err + } + var resourceBaseURL string + // if base_url for the resource type is defined, use it + if val, ok := resourceMap["base_url"]; ok { + resourceBaseURL = val.(string) + } else { + resourceBaseURL = baseURL + } + tempTemplate.baseURL = resourceBaseURL + if err := tempTemplate.getFileContents(v, ignoreIf, false); err != nil { + return err + } + } + } + } + } + // if the resource registry contained any URL's, store them. This can + // then be passed as parameter to api calls to Heat api. + e.Files = tempTemplate.Files + return nil + default: + return nil + } +} + +// function to choose keys whose values are other environment files +func ignoreIfEnvironment(key string, value interface{}) bool { + // base_url and hooks refer to components which cannot have urls + if key == "base_url" || key == "hooks" { + return true + } + // if value is not string, it cannot be a URL + valueString, ok := value.(string) + if !ok { + return true + } + // if value contains `::`, it must be a reference to another resource type + // e.g. OS::Nova::Server : Rackspace::Cloud::Server + if strings.Contains(valueString, "::") { + return true + } + return false +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment_test.go new file mode 100644 index 000000000..3a3c2b98e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/environment_test.go @@ -0,0 +1,184 @@ +package stacks + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestEnvironmentValidation(t *testing.T) { + environmentJSON := new(Environment) + environmentJSON.Bin = []byte(ValidJSONEnvironment) + err := environmentJSON.Validate() + th.AssertNoErr(t, err) + + environmentYAML := new(Environment) + environmentYAML.Bin = []byte(ValidYAMLEnvironment) + err = environmentYAML.Validate() + th.AssertNoErr(t, err) + + environmentInvalid := new(Environment) + environmentInvalid.Bin = []byte(InvalidEnvironment) + if err = environmentInvalid.Validate(); err == nil { + t.Error("environment validation did not catch invalid environment") + } +} + +func TestEnvironmentParsing(t *testing.T) { + environmentJSON := new(Environment) + environmentJSON.Bin = []byte(ValidJSONEnvironment) + err := environmentJSON.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONEnvironmentParsed, environmentJSON.Parsed) + + environmentYAML := new(Environment) + environmentYAML.Bin = []byte(ValidJSONEnvironment) + err = environmentYAML.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONEnvironmentParsed, environmentYAML.Parsed) + + environmentInvalid := new(Environment) + environmentInvalid.Bin = []byte("Keep Austin Weird") + err = environmentInvalid.Parse() + if err == nil { + t.Error("environment parsing did not catch invalid environment") + } +} + +func TestIgnoreIfEnvironment(t *testing.T) { + var keyValueTests = []struct { + key string + value interface{} + out bool + }{ + {"base_url", "afksdf", true}, + {"not_type", "hooks", false}, + {"get_file", "::", true}, + {"hooks", "dfsdfsd", true}, + {"type", "sdfubsduf.yaml", false}, + {"type", "sdfsdufs.environment", false}, + {"type", "sdfsdf.file", false}, + {"type", map[string]string{"key": "value"}, true}, + } + var result bool + for _, kv := range keyValueTests { + result = ignoreIfEnvironment(kv.key, kv.value) + if result != kv.out { + t.Errorf("key: %v, value: %v expected: %v, actual: %v", kv.key, kv.value, kv.out, result) + } + } +} + +func TestGetRRFileContents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + environmentContent := ` +heat_template_version: 2013-05-23 + +description: + Heat WordPress template to support F18, using only Heat OpenStack-native + resource types, and without the requirement for heat-cfntools in the image. + WordPress is web software you can use to create a beautiful website or blog. + This template installs a single-instance WordPress deployment using a local + MySQL database to store the data. + +parameters: + + key_name: + type: string + description : Name of a KeyPair to enable SSH access to the instance + +resources: + wordpress_instance: + type: OS::Nova::Server + properties: + image: { get_param: image_id } + flavor: { get_param: instance_type } + key_name: { get_param: key_name }` + + dbContent := ` +heat_template_version: 2014-10-16 + +description: + Test template for Trove resource capabilities + +parameters: + db_pass: + type: string + hidden: true + description: Database access password + default: secrete + +resources: + +service_db: + type: OS::Trove::Instance + properties: + name: trove_test_db + datastore_type: mariadb + flavor: 1GB Instance + size: 10 + databases: + - name: test_data + users: + - name: kitchen_sink + password: { get_param: db_pass } + databases: [ test_data ]` + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + + fakeEnvURL := strings.Join([]string{baseurl, "my_env.yaml"}, "/") + urlparsed, err := url.Parse(fakeEnvURL) + th.AssertNoErr(t, err) + // handler for my_env.yaml + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/jason") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, environmentContent) + }) + + fakeDBURL := strings.Join([]string{baseurl, "my_db.yaml"}, "/") + urlparsed, err = url.Parse(fakeDBURL) + th.AssertNoErr(t, err) + + // handler for my_db.yaml + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/jason") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, dbContent) + }) + + client := fakeClient{BaseClient: getHTTPClient()} + env := new(Environment) + env.Bin = []byte(`{"resource_registry": {"My::WP::Server": "my_env.yaml", "resources": {"my_db_server": {"OS::DBInstance": "my_db.yaml"}}}}`) + env.client = client + + err = env.Parse() + th.AssertNoErr(t, err) + err = env.getRRFileContents(ignoreIfEnvironment) + th.AssertNoErr(t, err) + expectedEnvFilesContent := "\nheat_template_version: 2013-05-23\n\ndescription:\n Heat WordPress template to support F18, using only Heat OpenStack-native\n resource types, and without the requirement for heat-cfntools in the image.\n WordPress is web software you can use to create a beautiful website or blog.\n This template installs a single-instance WordPress deployment using a local\n MySQL database to store the data.\n\nparameters:\n\n key_name:\n type: string\n description : Name of a KeyPair to enable SSH access to the instance\n\nresources:\n wordpress_instance:\n type: OS::Nova::Server\n properties:\n image: { get_param: image_id }\n flavor: { get_param: instance_type }\n key_name: { get_param: key_name }" + expectedDBFilesContent := "\nheat_template_version: 2014-10-16\n\ndescription:\n Test template for Trove resource capabilities\n\nparameters:\n db_pass:\n type: string\n hidden: true\n description: Database access password\n default: secrete\n\nresources:\n\nservice_db:\n type: OS::Trove::Instance\n properties:\n name: trove_test_db\n datastore_type: mariadb\n flavor: 1GB Instance\n size: 10\n databases:\n - name: test_data\n users:\n - name: kitchen_sink\n password: { get_param: db_pass }\n databases: [ test_data ]" + + th.AssertEquals(t, expectedEnvFilesContent, env.Files[fakeEnvURL]) + th.AssertEquals(t, expectedDBFilesContent, env.Files[fakeDBURL]) + + env.fixFileRefs() + expectedParsed := map[string]interface{}{ + "resource_registry": "2015-04-30", + "My::WP::Server": fakeEnvURL, + "resources": map[string]interface{}{ + "my_db_server": map[string]interface{}{ + "OS::DBInstance": fakeDBURL, + }, + }, + } + env.Parse() + th.AssertDeepEquals(t, expectedParsed, env.Parsed) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/fixtures.go new file mode 100644 index 000000000..83f5dec8d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/fixtures.go @@ -0,0 +1,604 @@ +package stacks + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// CreateExpected represents the expected object from a Create request. +var CreateExpected = &CreatedStack{ + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://168.28.170.117:8004/v1/98606384f58drad0bhdb7d02779549ac/stacks/stackcreated/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, +} + +// CreateOutput represents the response body from a Create request. +const CreateOutput = ` +{ + "stack": { + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "links": [ + { + "href": "http://168.28.170.117:8004/v1/98606384f58drad0bhdb7d02779549ac/stacks/stackcreated/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "rel": "self" + } + ] + } +}` + +// HandleCreateSuccessfully creates an HTTP handler at `/stacks` on the test handler mux +// that responds with a `Create` response. +func HandleCreateSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, output) + }) +} + +// ListExpected represents the expected object from a List request. +var ListExpected = []ListedStack{ + ListedStack{ + Description: "Simple template to test heat commands", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, + StatusReason: "Stack CREATE completed successfully", + Name: "postman_stack", + CreationTime: time.Date(2015, 2, 3, 20, 7, 39, 0, time.UTC), + Status: "CREATE_COMPLETE", + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Tags: []string{"rackspace", "atx"}, + }, + ListedStack{ + Description: "Simple template to test heat commands", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", + Rel: "self", + }, + }, + StatusReason: "Stack successfully updated", + Name: "gophercloud-test-stack-2", + CreationTime: time.Date(2014, 12, 11, 17, 39, 16, 0, time.UTC), + UpdatedTime: time.Date(2014, 12, 11, 17, 40, 37, 0, time.UTC), + Status: "UPDATE_COMPLETE", + ID: "db6977b2-27aa-4775-9ae7-6213212d4ada", + Tags: []string{"sfo", "satx"}, + }, +} + +// FullListOutput represents the response body from a List request without a marker. +const FullListOutput = ` +{ + "stacks": [ + { + "description": "Simple template to test heat commands", + "links": [ + { + "href": "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "rel": "self" + } + ], + "stack_status_reason": "Stack CREATE completed successfully", + "stack_name": "postman_stack", + "creation_time": "2015-02-03T20:07:39", + "updated_time": null, + "stack_status": "CREATE_COMPLETE", + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "tags": ["rackspace", "atx"] + }, + { + "description": "Simple template to test heat commands", + "links": [ + { + "href": "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", + "rel": "self" + } + ], + "stack_status_reason": "Stack successfully updated", + "stack_name": "gophercloud-test-stack-2", + "creation_time": "2014-12-11T17:39:16", + "updated_time": "2014-12-11T17:40:37", + "stack_status": "UPDATE_COMPLETE", + "id": "db6977b2-27aa-4775-9ae7-6213212d4ada", + "tags": ["sfo", "satx"] + } + ] +} +` + +// HandleListSuccessfully creates an HTTP handler at `/stacks` on the test handler mux +// that responds with a `List` response. +func HandleListSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "db6977b2-27aa-4775-9ae7-6213212d4ada": + fmt.Fprintf(w, `[]`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// GetExpected represents the expected object from a Get request. +var GetExpected = &RetrievedStack{ + DisableRollback: true, + Description: "Simple template to test heat commands", + Parameters: map[string]string{ + "flavor": "m1.tiny", + "OS::stack_name": "postman_stack", + "OS::stack_id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + }, + StatusReason: "Stack CREATE completed successfully", + Name: "postman_stack", + Outputs: []map[string]interface{}{}, + CreationTime: time.Date(2015, 2, 3, 20, 7, 39, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, + Capabilities: []interface{}{}, + NotificationTopics: []interface{}{}, + Status: "CREATE_COMPLETE", + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + TemplateDescription: "Simple template to test heat commands", + Tags: []string{"rackspace", "atx"}, +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "stack": { + "disable_rollback": true, + "description": "Simple template to test heat commands", + "parameters": { + "flavor": "m1.tiny", + "OS::stack_name": "postman_stack", + "OS::stack_id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87" + }, + "stack_status_reason": "Stack CREATE completed successfully", + "stack_name": "postman_stack", + "outputs": [], + "creation_time": "2015-02-03T20:07:39", + "links": [ + { + "href": "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "rel": "self" + } + ], + "capabilities": [], + "notification_topics": [], + "timeout_mins": null, + "stack_status": "CREATE_COMPLETE", + "updated_time": null, + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "template_description": "Simple template to test heat commands", + "tags": ["rackspace", "atx"] + } +} +` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// HandleUpdateSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87` +// on the test handler mux that responds with an `Update` response. +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleDeleteSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87` +// on the test handler mux that responds with a `Delete` response. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// GetExpected represents the expected object from a Get request. +var PreviewExpected = &PreviewedStack{ + DisableRollback: true, + Description: "Simple template to test heat commands", + Parameters: map[string]string{ + "flavor": "m1.tiny", + "OS::stack_name": "postman_stack", + "OS::stack_id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + }, + Name: "postman_stack", + CreationTime: time.Date(2015, 2, 3, 20, 7, 39, 0, time.UTC), + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, + Capabilities: []interface{}{}, + NotificationTopics: []interface{}{}, + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + TemplateDescription: "Simple template to test heat commands", +} + +// HandlePreviewSuccessfully creates an HTTP handler at `/stacks/preview` +// on the test handler mux that responds with a `Preview` response. +func HandlePreviewSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/preview", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// AbandonExpected represents the expected object from an Abandon request. +var AbandonExpected = &AbandonedStack{ + Status: "COMPLETE", + Name: "postman_stack", + Template: map[string]interface{}{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": map[string]interface{}{ + "flavor": map[string]interface{}{ + "default": "m1.tiny", + "type": "string", + }, + }, + "resources": map[string]interface{}{ + "hello_world": map[string]interface{}{ + "type": "OS::Nova::Server", + "properties": map[string]interface{}{ + "key_name": "heat_key", + "flavor": map[string]interface{}{ + "get_param": "flavor", + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n", + }, + }, + }, + }, + Action: "CREATE", + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Resources: map[string]interface{}{ + "hello_world": map[string]interface{}{ + "status": "COMPLETE", + "name": "hello_world", + "resource_id": "8a310d36-46fc-436f-8be4-37a696b8ac63", + "action": "CREATE", + "type": "OS::Nova::Server", + }, + }, + Files: map[string]string{ + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "heat_template_version: 2014-10-16\nparameters:\n flavor:\n type: string\n description: Flavor for the server to be created\n default: 4353\n hidden: true\nresources:\n test_server:\n type: \"OS::Nova::Server\"\n properties:\n name: test-server\n flavor: 2 GB General Purpose v1\n image: Debian 7 (Wheezy) (PVHVM)\n", + }, + StackUserProjectID: "897686", + ProjectID: "897686", + Environment: map[string]interface{}{ + "encrypted_param_names": make([]map[string]interface{}, 0), + "parameter_defaults": make(map[string]interface{}), + "parameters": make(map[string]interface{}), + "resource_registry": map[string]interface{}{ + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml", + "resources": make(map[string]interface{}), + }, + }, +} + +// AbandonOutput represents the response body from an Abandon request. +const AbandonOutput = ` +{ + "status": "COMPLETE", + "name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + }, + "action": "CREATE", + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "resources": { + "hello_world": { + "status": "COMPLETE", + "name": "hello_world", + "resource_id": "8a310d36-46fc-436f-8be4-37a696b8ac63", + "action": "CREATE", + "type": "OS::Nova::Server" + } + }, + "files": { + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "heat_template_version: 2014-10-16\nparameters:\n flavor:\n type: string\n description: Flavor for the server to be created\n default: 4353\n hidden: true\nresources:\n test_server:\n type: \"OS::Nova::Server\"\n properties:\n name: test-server\n flavor: 2 GB General Purpose v1\n image: Debian 7 (Wheezy) (PVHVM)\n" +}, + "environment": { + "encrypted_param_names": [], + "parameter_defaults": {}, + "parameters": {}, + "resource_registry": { + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml", + "resources": {} + } + }, + "stack_user_project_id": "897686", + "project_id": "897686" +}` + +// HandleAbandonSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87/abandon` +// on the test handler mux that responds with an `Abandon` response. +func HandleAbandonSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c8/abandon", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ValidJSONTemplate is a valid OpenStack Heat template in JSON format +const ValidJSONTemplate = ` +{ + "heat_template_version": "2014-10-16", + "parameters": { + "flavor": { + "default": 4353, + "description": "Flavor for the server to be created", + "hidden": true, + "type": "string" + } + }, + "resources": { + "test_server": { + "properties": { + "flavor": "2 GB General Purpose v1", + "image": "Debian 7 (Wheezy) (PVHVM)", + "name": "test-server" + }, + "type": "OS::Nova::Server" + } + } +} +` + +// ValidJSONTemplateParsed is the expected parsed version of ValidJSONTemplate +var ValidJSONTemplateParsed = map[string]interface{}{ + "heat_template_version": "2014-10-16", + "parameters": map[string]interface{}{ + "flavor": map[string]interface{}{ + "default": 4353, + "description": "Flavor for the server to be created", + "hidden": true, + "type": "string", + }, + }, + "resources": map[string]interface{}{ + "test_server": map[string]interface{}{ + "properties": map[string]interface{}{ + "flavor": "2 GB General Purpose v1", + "image": "Debian 7 (Wheezy) (PVHVM)", + "name": "test-server", + }, + "type": "OS::Nova::Server", + }, + }, +} + +// ValidYAMLTemplate is a valid OpenStack Heat template in YAML format +const ValidYAMLTemplate = ` +heat_template_version: 2014-10-16 +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: 4353 + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) +` + +// InvalidTemplateNoVersion is an invalid template as it has no `version` section +const InvalidTemplateNoVersion = ` +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: 4353 + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) +` + +// ValidJSONEnvironment is a valid environment for a stack in JSON format +const ValidJSONEnvironment = ` +{ + "parameters": { + "user_key": "userkey" + }, + "resource_registry": { + "My::WP::Server": "file:///home/shardy/git/heat-templates/hot/F18/WordPress_Native.yaml", + "OS::Quantum*": "OS::Neutron*", + "AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml", + "OS::Metering::Alarm": "OS::Ceilometer::Alarm", + "AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml", + "resources": { + "my_db_server": { + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml" + }, + "my_server": { + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml", + "hooks": "pre-create" + }, + "nested_stack": { + "nested_resource": { + "hooks": "pre-update" + }, + "another_resource": { + "hooks": [ + "pre-create", + "pre-update" + ] + } + } + } + } +} +` + +// ValidJSONEnvironmentParsed is the expected parsed version of ValidJSONEnvironment +var ValidJSONEnvironmentParsed = map[string]interface{}{ + "parameters": map[string]interface{}{ + "user_key": "userkey", + }, + "resource_registry": map[string]interface{}{ + "My::WP::Server": "file:///home/shardy/git/heat-templates/hot/F18/WordPress_Native.yaml", + "OS::Quantum*": "OS::Neutron*", + "AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml", + "OS::Metering::Alarm": "OS::Ceilometer::Alarm", + "AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml", + "resources": map[string]interface{}{ + "my_db_server": map[string]interface{}{ + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml", + }, + "my_server": map[string]interface{}{ + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml", + "hooks": "pre-create", + }, + "nested_stack": map[string]interface{}{ + "nested_resource": map[string]interface{}{ + "hooks": "pre-update", + }, + "another_resource": map[string]interface{}{ + "hooks": []interface{}{ + "pre-create", + "pre-update", + }, + }, + }, + }, + }, +} + +// ValidYAMLEnvironment is a valid environment for a stack in YAML format +const ValidYAMLEnvironment = ` +parameters: + user_key: userkey +resource_registry: + My::WP::Server: file:///home/shardy/git/heat-templates/hot/F18/WordPress_Native.yaml + # allow older templates with Quantum in them. + "OS::Quantum*": "OS::Neutron*" + # Choose your implementation of AWS::CloudWatch::Alarm + "AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml" + #"AWS::CloudWatch::Alarm": "OS::Heat::CWLiteAlarm" + "OS::Metering::Alarm": "OS::Ceilometer::Alarm" + "AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml" + resources: + my_db_server: + "OS::DBInstance": file:///home/mine/all_my_cool_templates/db.yaml + my_server: + "OS::DBInstance": file:///home/mine/all_my_cool_templates/db.yaml + hooks: pre-create + nested_stack: + nested_resource: + hooks: pre-update + another_resource: + hooks: [pre-create, pre-update] +` + +// InvalidEnvironment is an invalid environment as it has an extra section called `resources` +const InvalidEnvironment = ` +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: 4353 + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) +parameter_defaults: + KeyName: heat_key +` diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go new file mode 100644 index 000000000..1fc484d67 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go @@ -0,0 +1,682 @@ +package stacks + +import ( + "errors" + "strings" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Rollback is used to specify whether or not a stack can be rolled back. +type Rollback *bool + +var ( + disable = true + // Disable is used to specify that a stack cannot be rolled back. + Disable Rollback = &disable + enable = false + // Enable is used to specify that a stack can be rolled back. + Enable Rollback = &enable +) + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToStackCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // (REQUIRED) The name of the stack. It must start with an alphabetic character. + Name string + // (REQUIRED) A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, TemplateURL will be ignored + // (OPTIONAL; REQUIRED IF Template IS EMPTY) The URL of the template to instantiate. + // This value is ignored if Template is supplied inline. + TemplateURL string + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, Template will be ignored + // (OPTIONAL; REQUIRED IF TemplateURL IS EMPTY) A template to instantiate. The value + // is a stringified version of the JSON/YAML template. Since the template will likely + // be located in a file, one way to set this variable is by using ioutil.ReadFile: + // import "io/ioutil" + // var opts stacks.CreateOpts + // b, err := ioutil.ReadFile("path/to/you/template/file.json") + // if err != nil { + // // handle error... + // } + // opts.Template = string(b) + Template string + // (OPTIONAL) Enables or disables deletion of all stack resources when a stack + // creation fails. Default is true, meaning all resources are not deleted when + // stack creation fails. + DisableRollback Rollback + // (OPTIONAL) A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment + // (DEPRECATED): Please use EnvironmentOpts to provide Environment data + // (OPTIONAL) A stringified JSON environment for the stack. + Environment string + // (DEPRECATED): Files is automatically determined + // by parsing the template and environment passed as TemplateOpts and + // EnvironmentOpts respectively. + // (OPTIONAL) A map that maps file names to file contents. It can also be used + // to pass provider template contents. Example: + // Files: `{"myfile": "#!/bin/bash\necho 'Hello world' > /root/testfile.txt"}` + Files map[string]interface{} + // (OPTIONAL) User-defined parameters to pass to the template. + Parameters map[string]string + // (OPTIONAL) The timeout for stack creation in minutes. + Timeout int + // (OPTIONAL) A list of tags to assosciate with the Stack + Tags []string +} + +// ToStackCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToStackCreateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.Name == "" { + return s, errors.New("Required field 'Name' not provided.") + } + s["stack_name"] = opts.Name + Files := make(map[string]string) + if opts.TemplateOpts == nil { + if opts.Template != "" { + s["template"] = opts.Template + } else if opts.TemplateURL != "" { + s["template_url"] = opts.TemplateURL + } else { + return s, errors.New("Either Template or TemplateURL must be provided.") + } + } else { + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + s["template"] = string(opts.TemplateOpts.Bin) + + for k, v := range opts.TemplateOpts.Files { + Files[k] = v + } + } + if opts.DisableRollback != nil { + s["disable_rollback"] = &opts.DisableRollback + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + Files[k] = v + } + s["environment"] = string(opts.EnvironmentOpts.Bin) + } else if opts.Environment != "" { + s["environment"] = opts.Environment + } + + if opts.Files != nil { + s["files"] = opts.Files + } else { + s["files"] = Files + } + + if opts.DisableRollback != nil { + s["disable_rollback"] = &opts.DisableRollback + } + + if opts.Parameters != nil { + s["parameters"] = opts.Parameters + } + + if opts.Timeout != 0 { + s["timeout_mins"] = opts.Timeout + } + + if opts.Tags != nil { + s["tags"] = strings.Join(opts.Tags, ",") + } + return s, nil +} + +// Create accepts a CreateOpts struct and creates a new stack using the values +// provided. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToStackCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) + return res +} + +// AdoptOptsBuilder is the interface options structs have to satisfy in order +// to be used in the Adopt function in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type AdoptOptsBuilder interface { + ToStackAdoptMap() (map[string]interface{}, error) +} + +// AdoptOpts is the common options struct used in this package's Adopt +// operation. +type AdoptOpts struct { + // (REQUIRED) Existing resources data represented as a string to add to the + // new stack. Data returned by Abandon could be provided as AdoptsStackData. + AdoptStackData string + // (REQUIRED) The name of the stack. It must start with an alphabetic character. + Name string + // (REQUIRED) The timeout for stack creation in minutes. + Timeout int + // (REQUIRED) A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, TemplateURL will be ignored + // (OPTIONAL; REQUIRED IF Template IS EMPTY) The URL of the template to instantiate. + // This value is ignored if Template is supplied inline. + TemplateURL string + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, Template will be ignored + // (OPTIONAL; REQUIRED IF TemplateURL IS EMPTY) A template to instantiate. The value + // is a stringified version of the JSON/YAML template. Since the template will likely + // be located in a file, one way to set this variable is by using ioutil.ReadFile: + // import "io/ioutil" + // var opts stacks.CreateOpts + // b, err := ioutil.ReadFile("path/to/you/template/file.json") + // if err != nil { + // // handle error... + // } + // opts.Template = string(b) + Template string + // (OPTIONAL) Enables or disables deletion of all stack resources when a stack + // creation fails. Default is true, meaning all resources are not deleted when + // stack creation fails. + DisableRollback Rollback + // (OPTIONAL) A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment + // (DEPRECATED): Please use EnvironmentOpts to provide Environment data + // (OPTIONAL) A stringified JSON environment for the stack. + Environment string + // (DEPRECATED): Files is automatically determined + // by parsing the template and environment passed as TemplateOpts and + // EnvironmentOpts respectively. + // (OPTIONAL) A map that maps file names to file contents. It can also be used + // to pass provider template contents. Example: + // Files: `{"myfile": "#!/bin/bash\necho 'Hello world' > /root/testfile.txt"}` + Files map[string]interface{} + // (OPTIONAL) User-defined parameters to pass to the template. + Parameters map[string]string +} + +// ToStackAdoptMap casts a CreateOpts struct to a map. +func (opts AdoptOpts) ToStackAdoptMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.Name == "" { + return s, errors.New("Required field 'Name' not provided.") + } + s["stack_name"] = opts.Name + Files := make(map[string]string) + if opts.AdoptStackData != "" { + s["adopt_stack_data"] = opts.AdoptStackData + } else if opts.TemplateOpts == nil { + if opts.Template != "" { + s["template"] = opts.Template + } else if opts.TemplateURL != "" { + s["template_url"] = opts.TemplateURL + } else { + return s, errors.New("One of AdoptStackData, Template, TemplateURL or TemplateOpts must be provided.") + } + } else { + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + s["template"] = string(opts.TemplateOpts.Bin) + + for k, v := range opts.TemplateOpts.Files { + Files[k] = v + } + } + + if opts.DisableRollback != nil { + s["disable_rollback"] = &opts.DisableRollback + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + Files[k] = v + } + s["environment"] = string(opts.EnvironmentOpts.Bin) + } else if opts.Environment != "" { + s["environment"] = opts.Environment + } + + if opts.Files != nil { + s["files"] = opts.Files + } else { + s["files"] = Files + } + + if opts.Parameters != nil { + s["parameters"] = opts.Parameters + } + + if opts.Timeout != 0 { + s["timeout"] = opts.Timeout + } + s["timeout_mins"] = opts.Timeout + + return s, nil +} + +// Adopt accepts an AdoptOpts struct and creates a new stack using the resources +// from another stack. +func Adopt(c *gophercloud.ServiceClient, opts AdoptOptsBuilder) AdoptResult { + var res AdoptResult + + reqBody, err := opts.ToStackAdoptMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(adoptURL(c), reqBody, &res.Body, nil) + return res +} + +// SortDir is a type for specifying in which direction to sort a list of stacks. +type SortDir string + +// SortKey is a type for specifying by which key to sort a list of stacks. +type SortKey string + +var ( + // SortAsc is used to sort a list of stacks in ascending order. + SortAsc SortDir = "asc" + // SortDesc is used to sort a list of stacks in descending order. + SortDesc SortDir = "desc" + // SortName is used to sort a list of stacks by name. + SortName SortKey = "name" + // SortStatus is used to sort a list of stacks by status. + SortStatus SortKey = "status" + // SortCreatedAt is used to sort a list of stacks by date created. + SortCreatedAt SortKey = "created_at" + // SortUpdatedAt is used to sort a list of stacks by date updated. + SortUpdatedAt SortKey = "updated_at" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToStackListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the network attributes you want to see returned. SortKey allows you to sort +// by a particular network attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey SortKey `q:"sort_keys"` + SortDir SortDir `q:"sort_dir"` +} + +// ToStackListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToStackListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// stacks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToStackListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPage := func(r pagination.PageResult) pagination.Page { + return StackPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retreives a stack based on the stack name and stack ID. +func Get(c *gophercloud.ServiceClient, stackName, stackID string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, stackName, stackID), &res.Body, nil) + return res +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the Update operation in this package. +type UpdateOptsBuilder interface { + ToStackUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // (REQUIRED) A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, TemplateURL will be ignored + // (OPTIONAL; REQUIRED IF Template IS EMPTY) The URL of the template to instantiate. + // This value is ignored if Template is supplied inline. + TemplateURL string + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, Template will be ignored + // (OPTIONAL; REQUIRED IF TemplateURL IS EMPTY) A template to instantiate. The value + // is a stringified version of the JSON/YAML template. Since the template will likely + // be located in a file, one way to set this variable is by using ioutil.ReadFile: + // import "io/ioutil" + // var opts stacks.CreateOpts + // b, err := ioutil.ReadFile("path/to/you/template/file.json") + // if err != nil { + // // handle error... + // } + // opts.Template = string(b) + Template string + // (OPTIONAL) A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment + // (DEPRECATED): Please use EnvironmentOpts to provide Environment data + // (OPTIONAL) A stringified JSON environment for the stack. + Environment string + // (DEPRECATED): Files is automatically determined + // by parsing the template and environment passed as TemplateOpts and + // EnvironmentOpts respectively. + // (OPTIONAL) A map that maps file names to file contents. It can also be used + // to pass provider template contents. Example: + // Files: `{"myfile": "#!/bin/bash\necho 'Hello world' > /root/testfile.txt"}` + Files map[string]interface{} + // (OPTIONAL) User-defined parameters to pass to the template. + Parameters map[string]string + // (OPTIONAL) The timeout for stack creation in minutes. + Timeout int + // (OPTIONAL) A list of tags to assosciate with the Stack + Tags []string +} + +// ToStackUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToStackUpdateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + Files := make(map[string]string) + if opts.TemplateOpts == nil { + if opts.Template != "" { + s["template"] = opts.Template + } else if opts.TemplateURL != "" { + s["template_url"] = opts.TemplateURL + } else { + return s, errors.New("Either Template or TemplateURL must be provided.") + } + } else { + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + s["template"] = string(opts.TemplateOpts.Bin) + + for k, v := range opts.TemplateOpts.Files { + Files[k] = v + } + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + Files[k] = v + } + s["environment"] = string(opts.EnvironmentOpts.Bin) + } else if opts.Environment != "" { + s["environment"] = opts.Environment + } + + if opts.Files != nil { + s["files"] = opts.Files + } else { + s["files"] = Files + } + + if opts.Parameters != nil { + s["parameters"] = opts.Parameters + } + + if opts.Timeout != 0 { + s["timeout_mins"] = opts.Timeout + } + + if opts.Tags != nil { + s["tags"] = strings.Join(opts.Tags, ",") + } + + return s, nil +} + +// Update accepts an UpdateOpts struct and updates an existing stack using the values +// provided. +func Update(c *gophercloud.ServiceClient, stackName, stackID string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToStackUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(updateURL(c, stackName, stackID), reqBody, nil, nil) + return res +} + +// Delete deletes a stack based on the stack name and stack ID. +func Delete(c *gophercloud.ServiceClient, stackName, stackID string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, stackName, stackID), nil) + return res +} + +// PreviewOptsBuilder is the interface options structs have to satisfy in order +// to be used in the Preview operation in this package. +type PreviewOptsBuilder interface { + ToStackPreviewMap() (map[string]interface{}, error) +} + +// PreviewOpts contains the common options struct used in this package's Preview +// operation. +type PreviewOpts struct { + // (REQUIRED) The name of the stack. It must start with an alphabetic character. + Name string + // (REQUIRED) The timeout for stack creation in minutes. + Timeout int + // (REQUIRED) A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, TemplateURL will be ignored + // (OPTIONAL; REQUIRED IF Template IS EMPTY) The URL of the template to instantiate. + // This value is ignored if Template is supplied inline. + TemplateURL string + // (DEPRECATED): Please use TemplateOpts for providing the template. If + // TemplateOpts is provided, Template will be ignored + // (OPTIONAL; REQUIRED IF TemplateURL IS EMPTY) A template to instantiate. The value + // is a stringified version of the JSON/YAML template. Since the template will likely + // be located in a file, one way to set this variable is by using ioutil.ReadFile: + // import "io/ioutil" + // var opts stacks.CreateOpts + // b, err := ioutil.ReadFile("path/to/you/template/file.json") + // if err != nil { + // // handle error... + // } + // opts.Template = string(b) + Template string + // (OPTIONAL) Enables or disables deletion of all stack resources when a stack + // creation fails. Default is true, meaning all resources are not deleted when + // stack creation fails. + DisableRollback Rollback + // (OPTIONAL) A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment + // (DEPRECATED): Please use EnvironmentOpts to provide Environment data + // (OPTIONAL) A stringified JSON environment for the stack. + Environment string + // (DEPRECATED): Files is automatically determined + // by parsing the template and environment passed as TemplateOpts and + // EnvironmentOpts respectively. + // (OPTIONAL) A map that maps file names to file contents. It can also be used + // to pass provider template contents. Example: + // Files: `{"myfile": "#!/bin/bash\necho 'Hello world' > /root/testfile.txt"}` + Files map[string]interface{} + // (OPTIONAL) User-defined parameters to pass to the template. + Parameters map[string]string +} + +// ToStackPreviewMap casts a PreviewOpts struct to a map. +func (opts PreviewOpts) ToStackPreviewMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.Name == "" { + return s, errors.New("Required field 'Name' not provided.") + } + s["stack_name"] = opts.Name + Files := make(map[string]string) + if opts.TemplateOpts == nil { + if opts.Template != "" { + s["template"] = opts.Template + } else if opts.TemplateURL != "" { + s["template_url"] = opts.TemplateURL + } else { + return s, errors.New("Either Template or TemplateURL must be provided.") + } + } else { + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + s["template"] = string(opts.TemplateOpts.Bin) + + for k, v := range opts.TemplateOpts.Files { + Files[k] = v + } + } + if opts.DisableRollback != nil { + s["disable_rollback"] = &opts.DisableRollback + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + Files[k] = v + } + s["environment"] = string(opts.EnvironmentOpts.Bin) + } else if opts.Environment != "" { + s["environment"] = opts.Environment + } + + if opts.Files != nil { + s["files"] = opts.Files + } else { + s["files"] = Files + } + + if opts.Parameters != nil { + s["parameters"] = opts.Parameters + } + + if opts.Timeout != 0 { + s["timeout_mins"] = opts.Timeout + } + + return s, nil +} + +// Preview accepts a PreviewOptsBuilder interface and creates a preview of a stack using the values +// provided. +func Preview(c *gophercloud.ServiceClient, opts PreviewOptsBuilder) PreviewResult { + var res PreviewResult + + reqBody, err := opts.ToStackPreviewMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + _, res.Err = c.Post(previewURL(c), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Abandon deletes the stack with the provided stackName and stackID, but leaves its +// resources intact, and returns data describing the stack and its resources. +func Abandon(c *gophercloud.ServiceClient, stackName, stackID string) AbandonResult { + var res AbandonResult + _, res.Err = c.Delete(abandonURL(c, stackName, stackID), &gophercloud.RequestOpts{ + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests_test.go new file mode 100644 index 000000000..0fde44bd0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests_test.go @@ -0,0 +1,358 @@ +package stacks + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreateStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t, CreateOutput) + + createOpts := CreateOpts{ + Name: "stackcreated", + Timeout: 60, + Template: ` + { + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } + }`, + DisableRollback: Disable, + } + actual, err := Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestCreateStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t, CreateOutput) + template := new(Template) + template.Bin = []byte(` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + } + }`) + createOpts := CreateOpts{ + Name: "stackcreated", + Timeout: 60, + TemplateOpts: template, + DisableRollback: Disable, + } + actual, err := Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAdoptStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t, CreateOutput) + + adoptOpts := AdoptOpts{ + AdoptStackData: `{environment{parameters{}}}`, + Name: "stackcreated", + Timeout: 60, + Template: ` + { + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } + }`, + DisableRollback: Disable, + } + actual, err := Adopt(fake.ServiceClient(), adoptOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAdoptStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t, CreateOutput) + template := new(Template) + template.Bin = []byte(` +{ + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } +}`) + adoptOpts := AdoptOpts{ + AdoptStackData: `{environment{parameters{}}}`, + Name: "stackcreated", + Timeout: 60, + TemplateOpts: template, + DisableRollback: Disable, + } + actual, err := Adopt(fake.ServiceClient(), adoptOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t, FullListOutput) + + count := 0 + err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractStacks(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := Get(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c87").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestUpdateStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + updateOpts := UpdateOpts{ + Template: ` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + }`, + } + err := Update(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada", updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdateStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + template := new(Template) + template.Bin = []byte(` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + } + }`) + updateOpts := UpdateOpts{ + TemplateOpts: template, + } + err := Update(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada", updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDeleteStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := Delete(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestPreviewStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePreviewSuccessfully(t, GetOutput) + + previewOpts := PreviewOpts{ + Name: "stackcreated", + Timeout: 60, + Template: ` + { + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } + }`, + DisableRollback: Disable, + } + actual, err := Preview(fake.ServiceClient(), previewOpts).Extract() + th.AssertNoErr(t, err) + + expected := PreviewExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestPreviewStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePreviewSuccessfully(t, GetOutput) + + template := new(Template) + template.Bin = []byte(` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + } + }`) + previewOpts := PreviewOpts{ + Name: "stackcreated", + Timeout: 60, + TemplateOpts: template, + DisableRollback: Disable, + } + actual, err := Preview(fake.ServiceClient(), previewOpts).Extract() + th.AssertNoErr(t, err) + + expected := PreviewExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAbandonStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAbandonSuccessfully(t, AbandonOutput) + + actual, err := Abandon(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c8").Extract() + th.AssertNoErr(t, err) + + expected := AbandonExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go new file mode 100644 index 000000000..432bc8e0c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go @@ -0,0 +1,313 @@ +package stacks + +import ( + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CreatedStack represents the object extracted from a Create operation. +type CreatedStack struct { + ID string `mapstructure:"id"` + Links []gophercloud.Link `mapstructure:"links"` +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a CreatedStack object and is called after a +// Create operation. +func (r CreateResult) Extract() (*CreatedStack, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Stack *CreatedStack `mapstructure:"stack"` + } + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + return res.Stack, nil +} + +// AdoptResult represents the result of an Adopt operation. AdoptResult has the +// same form as CreateResult. +type AdoptResult struct { + CreateResult +} + +// StackPage is a pagination.Pager that is returned from a call to the List function. +type StackPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Stacks. +func (r StackPage) IsEmpty() (bool, error) { + stacks, err := ExtractStacks(r) + if err != nil { + return true, err + } + return len(stacks) == 0, nil +} + +// ListedStack represents an element in the slice extracted from a List operation. +type ListedStack struct { + CreationTime time.Time `mapstructure:"-"` + Description string `mapstructure:"description"` + ID string `mapstructure:"id"` + Links []gophercloud.Link `mapstructure:"links"` + Name string `mapstructure:"stack_name"` + Status string `mapstructure:"stack_status"` + StatusReason string `mapstructure:"stack_status_reason"` + Tags []string `mapstructure:"tags"` + UpdatedTime time.Time `mapstructure:"-"` +} + +// ExtractStacks extracts and returns a slice of ListedStack. It is used while iterating +// over a stacks.List call. +func ExtractStacks(page pagination.Page) ([]ListedStack, error) { + casted := page.(StackPage).Body + + var res struct { + Stacks []ListedStack `mapstructure:"stacks"` + } + + err := mapstructure.Decode(casted, &res) + if err != nil { + return nil, err + } + + var rawStacks []interface{} + switch casted.(type) { + case map[string]interface{}: + rawStacks = casted.(map[string]interface{})["stacks"].([]interface{}) + case map[string][]interface{}: + rawStacks = casted.(map[string][]interface{})["stacks"] + default: + return res.Stacks, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawStacks { + thisStack := (rawStacks[i]).(map[string]interface{}) + + if t, ok := thisStack["creation_time"].(string); ok && t != "" { + creationTime, err := time.Parse(gophercloud.STACK_TIME_FMT, t) + if err != nil { + return res.Stacks, err + } + res.Stacks[i].CreationTime = creationTime + } + + if t, ok := thisStack["updated_time"].(string); ok && t != "" { + updatedTime, err := time.Parse(gophercloud.STACK_TIME_FMT, t) + if err != nil { + return res.Stacks, err + } + res.Stacks[i].UpdatedTime = updatedTime + } + } + + return res.Stacks, nil +} + +// RetrievedStack represents the object extracted from a Get operation. +type RetrievedStack struct { + Capabilities []interface{} `mapstructure:"capabilities"` + CreationTime time.Time `mapstructure:"-"` + Description string `mapstructure:"description"` + DisableRollback bool `mapstructure:"disable_rollback"` + ID string `mapstructure:"id"` + Links []gophercloud.Link `mapstructure:"links"` + NotificationTopics []interface{} `mapstructure:"notification_topics"` + Outputs []map[string]interface{} `mapstructure:"outputs"` + Parameters map[string]string `mapstructure:"parameters"` + Name string `mapstructure:"stack_name"` + Status string `mapstructure:"stack_status"` + StatusReason string `mapstructure:"stack_status_reason"` + Tags []string `mapstructure:"tags"` + TemplateDescription string `mapstructure:"template_description"` + Timeout int `mapstructure:"timeout_mins"` + UpdatedTime time.Time `mapstructure:"-"` +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a RetrievedStack object and is called after a +// Get operation. +func (r GetResult) Extract() (*RetrievedStack, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Stack *RetrievedStack `mapstructure:"stack"` + } + + config := &mapstructure.DecoderConfig{ + Result: &res, + WeaklyTypedInput: true, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + + if err := decoder.Decode(r.Body); err != nil { + return nil, err + } + + b := r.Body.(map[string]interface{})["stack"].(map[string]interface{}) + + if date, ok := b["creation_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Stack.CreationTime = t + } + + if date, ok := b["updated_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Stack.UpdatedTime = t + } + + return res.Stack, err +} + +// UpdateResult represents the result of a Update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// PreviewedStack represents the result of a Preview operation. +type PreviewedStack struct { + Capabilities []interface{} `mapstructure:"capabilities"` + CreationTime time.Time `mapstructure:"-"` + Description string `mapstructure:"description"` + DisableRollback bool `mapstructure:"disable_rollback"` + ID string `mapstructure:"id"` + Links []gophercloud.Link `mapstructure:"links"` + Name string `mapstructure:"stack_name"` + NotificationTopics []interface{} `mapstructure:"notification_topics"` + Parameters map[string]string `mapstructure:"parameters"` + Resources []interface{} `mapstructure:"resources"` + TemplateDescription string `mapstructure:"template_description"` + Timeout int `mapstructure:"timeout_mins"` + UpdatedTime time.Time `mapstructure:"-"` +} + +// PreviewResult represents the result of a Preview operation. +type PreviewResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a PreviewedStack object and is called after a +// Preview operation. +func (r PreviewResult) Extract() (*PreviewedStack, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Stack *PreviewedStack `mapstructure:"stack"` + } + + config := &mapstructure.DecoderConfig{ + Result: &res, + WeaklyTypedInput: true, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + + if err := decoder.Decode(r.Body); err != nil { + return nil, err + } + + b := r.Body.(map[string]interface{})["stack"].(map[string]interface{}) + + if date, ok := b["creation_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Stack.CreationTime = t + } + + if date, ok := b["updated_time"]; ok && date != nil { + t, err := time.Parse(gophercloud.STACK_TIME_FMT, date.(string)) + if err != nil { + return nil, err + } + res.Stack.UpdatedTime = t + } + + return res.Stack, err +} + +// AbandonedStack represents the result of an Abandon operation. +type AbandonedStack struct { + Status string `mapstructure:"status"` + Name string `mapstructure:"name"` + Template map[string]interface{} `mapstructure:"template"` + Action string `mapstructure:"action"` + ID string `mapstructure:"id"` + Resources map[string]interface{} `mapstructure:"resources"` + Files map[string]string `mapstructure:"files"` + StackUserProjectID string `mapstructure:"stack_user_project_id"` + ProjectID string `mapstructure:"project_id"` + Environment map[string]interface{} `mapstructure:"environment"` +} + +// AbandonResult represents the result of an Abandon operation. +type AbandonResult struct { + gophercloud.Result +} + +// Extract returns a pointer to an AbandonedStack object and is called after an +// Abandon operation. +func (r AbandonResult) Extract() (*AbandonedStack, error) { + if r.Err != nil { + return nil, r.Err + } + + var res AbandonedStack + + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + return &res, nil +} + +// String converts an AbandonResult to a string. This is useful to when passing +// the result of an Abandon operation to an AdoptOpts AdoptStackData field. +func (r AbandonResult) String() (string, error) { + out, err := json.Marshal(r) + if err != nil { + return "", err + } + return string(out), nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template.go new file mode 100644 index 000000000..234ce498e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template.go @@ -0,0 +1,139 @@ +package stacks + +import ( + "fmt" + "github.com/rackspace/gophercloud" + "reflect" + "strings" +) + +// Template is a structure that represents OpenStack Heat templates +type Template struct { + TE +} + +// TemplateFormatVersions is a map containing allowed variations of the template format version +// Note that this contains the permitted variations of the _keys_ not the values. +var TemplateFormatVersions = map[string]bool{ + "HeatTemplateFormatVersion": true, + "heat_template_version": true, + "AWSTemplateFormatVersion": true, +} + +// Validate validates the contents of the Template +func (t *Template) Validate() error { + if t.Parsed == nil { + if err := t.Parse(); err != nil { + return err + } + } + for key := range t.Parsed { + if _, ok := TemplateFormatVersions[key]; ok { + return nil + } + } + return fmt.Errorf("Template format version not found.") +} + +// GetFileContents recursively parses a template to search for urls. These urls +// are assumed to point to other templates (known in OpenStack Heat as child +// templates). The contents of these urls are fetched and stored in the `Files` +// parameter of the template structure. This is the only way that a user can +// use child templates that are located in their filesystem; urls located on the +// web (e.g. on github or swift) can be fetched directly by Heat engine. +func (t *Template) getFileContents(te interface{}, ignoreIf igFunc, recurse bool) error { + // initialize template if empty + if t.Files == nil { + t.Files = make(map[string]string) + } + if t.fileMaps == nil { + t.fileMaps = make(map[string]string) + } + switch te.(type) { + // if te is a map + case map[string]interface{}, map[interface{}]interface{}: + teMap, err := toStringKeys(te) + if err != nil { + return err + } + for k, v := range teMap { + value, ok := v.(string) + if !ok { + // if the value is not a string, recursively parse that value + if err := t.getFileContents(v, ignoreIf, recurse); err != nil { + return err + } + } else if !ignoreIf(k, value) { + // at this point, the k, v pair has a reference to an external template. + // The assumption of heatclient is that value v is a reference + // to a file in the users environment + + // create a new child template + childTemplate := new(Template) + + // initialize child template + + // get the base location of the child template + baseURL, err := gophercloud.NormalizePathURL(t.baseURL, value) + if err != nil { + return err + } + childTemplate.baseURL = baseURL + childTemplate.client = t.client + + // fetch the contents of the child template + if err := childTemplate.Parse(); err != nil { + return err + } + + // process child template recursively if required. This is + // required if the child template itself contains references to + // other templates + if recurse { + if err := childTemplate.getFileContents(childTemplate.Parsed, ignoreIf, recurse); err != nil { + return err + } + } + // update parent template with current child templates' content. + // At this point, the child template has been parsed recursively. + t.fileMaps[value] = childTemplate.URL + t.Files[childTemplate.URL] = string(childTemplate.Bin) + + } + } + return nil + // if te is a slice, call the function on each element of the slice. + case []interface{}: + teSlice := te.([]interface{}) + for i := range teSlice { + if err := t.getFileContents(teSlice[i], ignoreIf, recurse); err != nil { + return err + } + } + // if te is anything else, return + case string, bool, float64, nil, int: + return nil + default: + return fmt.Errorf("%v: Unrecognized type", reflect.TypeOf(te)) + + } + return nil +} + +// function to choose keys whose values are other template files +func ignoreIfTemplate(key string, value interface{}) bool { + // key must be either `get_file` or `type` for value to be a URL + if key != "get_file" && key != "type" { + return true + } + // value must be a string + valueString, ok := value.(string) + if !ok { + return true + } + // `.template` and `.yaml` are allowed suffixes for template URLs when referred to by `type` + if key == "type" && !(strings.HasSuffix(valueString, ".template") || strings.HasSuffix(valueString, ".yaml")) { + return true + } + return false +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template_test.go new file mode 100644 index 000000000..6884db860 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/template_test.go @@ -0,0 +1,148 @@ +package stacks + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestTemplateValidation(t *testing.T) { + templateJSON := new(Template) + templateJSON.Bin = []byte(ValidJSONTemplate) + err := templateJSON.Validate() + th.AssertNoErr(t, err) + + templateYAML := new(Template) + templateYAML.Bin = []byte(ValidYAMLTemplate) + err = templateYAML.Validate() + th.AssertNoErr(t, err) + + templateInvalid := new(Template) + templateInvalid.Bin = []byte(InvalidTemplateNoVersion) + if err = templateInvalid.Validate(); err == nil { + t.Error("Template validation did not catch invalid template") + } +} + +func TestTemplateParsing(t *testing.T) { + templateJSON := new(Template) + templateJSON.Bin = []byte(ValidJSONTemplate) + err := templateJSON.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONTemplateParsed, templateJSON.Parsed) + + templateYAML := new(Template) + templateYAML.Bin = []byte(ValidJSONTemplate) + err = templateYAML.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONTemplateParsed, templateYAML.Parsed) + + templateInvalid := new(Template) + templateInvalid.Bin = []byte("Keep Austin Weird") + err = templateInvalid.Parse() + if err == nil { + t.Error("Template parsing did not catch invalid template") + } +} + +func TestIgnoreIfTemplate(t *testing.T) { + var keyValueTests = []struct { + key string + value interface{} + out bool + }{ + {"not_get_file", "afksdf", true}, + {"not_type", "sdfd", true}, + {"get_file", "shdfuisd", false}, + {"type", "dfsdfsd", true}, + {"type", "sdfubsduf.yaml", false}, + {"type", "sdfsdufs.template", false}, + {"type", "sdfsdf.file", true}, + {"type", map[string]string{"key": "value"}, true}, + } + var result bool + for _, kv := range keyValueTests { + result = ignoreIfTemplate(kv.key, kv.value) + if result != kv.out { + t.Errorf("key: %v, value: %v expected: %v, actual: %v", kv.key, kv.value, result, kv.out) + } + } +} + +func TestGetFileContents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + fakeURL := strings.Join([]string{baseurl, "my_nova.yaml"}, "/") + urlparsed, err := url.Parse(fakeURL) + th.AssertNoErr(t, err) + myNovaContent := `heat_template_version: 2014-10-16 +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: 4353 + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) + networks: + - {uuid: 11111111-1111-1111-1111-111111111111}` + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/jason") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, myNovaContent) + }) + + client := fakeClient{BaseClient: getHTTPClient()} + te := new(Template) + te.Bin = []byte(`heat_template_version: 2015-04-30 +resources: + my_server: + type: my_nova.yaml`) + te.client = client + + err = te.Parse() + th.AssertNoErr(t, err) + err = te.getFileContents(te.Parsed, ignoreIfTemplate, true) + th.AssertNoErr(t, err) + expectedFiles := map[string]string{ + "my_nova.yaml": `heat_template_version: 2014-10-16 +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: 4353 + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) + networks: + - {uuid: 11111111-1111-1111-1111-111111111111}`} + th.AssertEquals(t, expectedFiles["my_nova.yaml"], te.Files[fakeURL]) + te.fixFileRefs() + expectedParsed := map[string]interface{}{ + "heat_template_version": "2015-04-30", + "resources": map[string]interface{}{ + "my_server": map[string]interface{}{ + "type": fakeURL, + }, + }, + } + te.Parse() + th.AssertDeepEquals(t, expectedParsed, te.Parsed) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/urls.go new file mode 100644 index 000000000..3dd2bb32e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/urls.go @@ -0,0 +1,35 @@ +package stacks + +import "github.com/rackspace/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("stacks") +} + +func adoptURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func getURL(c *gophercloud.ServiceClient, name, id string) string { + return c.ServiceURL("stacks", name, id) +} + +func updateURL(c *gophercloud.ServiceClient, name, id string) string { + return getURL(c, name, id) +} + +func deleteURL(c *gophercloud.ServiceClient, name, id string) string { + return getURL(c, name, id) +} + +func previewURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("stacks", "preview") +} + +func abandonURL(c *gophercloud.ServiceClient, name, id string) string { + return c.ServiceURL("stacks", name, id, "abandon") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils.go new file mode 100644 index 000000000..7b476a9c3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils.go @@ -0,0 +1,161 @@ +package stacks + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path/filepath" + "reflect" + "strings" + + "github.com/rackspace/gophercloud" + "gopkg.in/yaml.v2" +) + +// Client is an interface that expects a Get method similar to http.Get. This +// is needed for unit testing, since we can mock an http client. Thus, the +// client will usually be an http.Client EXCEPT in unit tests. +type Client interface { + Get(string) (*http.Response, error) +} + +// TE is a base structure for both Template and Environment +type TE struct { + // Bin stores the contents of the template or environment. + Bin []byte + // URL stores the URL of the template. This is allowed to be a 'file://' + // for local files. + URL string + // Parsed contains a parsed version of Bin. Since there are 2 different + // fields referring to the same value, you must be careful when accessing + // this filed. + Parsed map[string]interface{} + // Files contains a mapping between the urls in templates to their contents. + Files map[string]string + // fileMaps is a map used internally when determining Files. + fileMaps map[string]string + // baseURL represents the location of the template or environment file. + baseURL string + // client is an interface which allows TE to fetch contents from URLS + client Client +} + +// Fetch fetches the contents of a TE from its URL. Once a TE structure has a +// URL, call the fetch method to fetch the contents. +func (t *TE) Fetch() error { + // if the baseURL is not provided, use the current directors as the base URL + if t.baseURL == "" { + u, err := getBasePath() + if err != nil { + return err + } + t.baseURL = u + } + + // if the contents are already present, do nothing. + if t.Bin != nil { + return nil + } + + // get a fqdn from the URL using the baseURL of the TE. For local files, + // the URL's will have the `file` scheme. + u, err := gophercloud.NormalizePathURL(t.baseURL, t.URL) + if err != nil { + return err + } + t.URL = u + + // get an HTTP client if none present + if t.client == nil { + t.client = getHTTPClient() + } + + // use the client to fetch the contents of the TE + resp, err := t.client.Get(t.URL) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + t.Bin = body + return nil +} + +// get the basepath of the TE +func getBasePath() (string, error) { + basePath, err := filepath.Abs(".") + if err != nil { + return "", err + } + u, err := gophercloud.NormalizePathURL("", basePath) + if err != nil { + return "", err + } + return u, nil +} + +// get a an HTTP client to retrieve URL's. This client allows the use of `file` +// scheme since we may need to fetch files from users filesystem +func getHTTPClient() Client { + transport := &http.Transport{} + transport.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) + return &http.Client{Transport: transport} +} + +// Parse will parse the contents and then validate. The contents MUST be either JSON or YAML. +func (t *TE) Parse() error { + if err := t.Fetch(); err != nil { + return err + } + if jerr := json.Unmarshal(t.Bin, &t.Parsed); jerr != nil { + if yerr := yaml.Unmarshal(t.Bin, &t.Parsed); yerr != nil { + return fmt.Errorf("Data in neither json nor yaml format.") + } + } + return t.Validate() +} + +// Validate validates the contents of TE +func (t *TE) Validate() error { + return nil +} + +// igfunc is a parameter used by GetFileContents and GetRRFileContents to check +// for valid URL's. +type igFunc func(string, interface{}) bool + +// convert map[interface{}]interface{} to map[string]interface{} +func toStringKeys(m interface{}) (map[string]interface{}, error) { + switch m.(type) { + case map[string]interface{}, map[interface{}]interface{}: + typedMap := make(map[string]interface{}) + if _, ok := m.(map[interface{}]interface{}); ok { + for k, v := range m.(map[interface{}]interface{}) { + typedMap[k.(string)] = v + } + } else { + typedMap = m.(map[string]interface{}) + } + return typedMap, nil + default: + return nil, fmt.Errorf("Expected a map of type map[string]interface{} or map[interface{}]interface{}, actual type: %v", reflect.TypeOf(m)) + + } +} + +// fix the reference to files by replacing relative URL's by absolute +// URL's +func (t *TE) fixFileRefs() { + tStr := string(t.Bin) + if t.fileMaps == nil { + return + } + for k, v := range t.fileMaps { + tStr = strings.Replace(tStr, k, v, -1) + } + t.Bin = []byte(tStr) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils_test.go new file mode 100644 index 000000000..2536e033a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/utils_test.go @@ -0,0 +1,94 @@ +package stacks + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestTEFixFileRefs(t *testing.T) { + te := TE{ + Bin: []byte(`string_to_replace: my fair lady`), + fileMaps: map[string]string{ + "string_to_replace": "london bridge is falling down", + }, + } + te.fixFileRefs() + th.AssertEquals(t, string(te.Bin), `london bridge is falling down: my fair lady`) +} + +func TesttoStringKeys(t *testing.T) { + var test1 interface{} = map[interface{}]interface{}{ + "Adam": "Smith", + "Isaac": "Newton", + } + result1, err := toStringKeys(test1) + th.AssertNoErr(t, err) + + expected := map[string]interface{}{ + "Adam": "Smith", + "Isaac": "Newton", + } + th.AssertDeepEquals(t, result1, expected) +} + +func TestGetBasePath(t *testing.T) { + _, err := getBasePath() + th.AssertNoErr(t, err) +} + +// test if HTTP client can read file type URLS. Read the URL of this file +// because if this test is running, it means this file _must_ exist +func TestGetHTTPClient(t *testing.T) { + client := getHTTPClient() + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + resp, err := client.Get(baseurl) + th.AssertNoErr(t, err) + th.AssertEquals(t, resp.StatusCode, 200) +} + +// Implement a fakeclient that can be used to mock out HTTP requests +type fakeClient struct { + BaseClient Client +} + +// this client's Get method first changes the URL given to point to +// testhelper's (th) endpoints. This is done because the http Mux does not seem +// to work for fqdns with the `file` scheme +func (c fakeClient) Get(url string) (*http.Response, error) { + newurl := strings.Replace(url, "file://", th.Endpoint(), 1) + return c.BaseClient.Get(newurl) +} + +// test the fetch function +func TestFetch(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + fakeURL := strings.Join([]string{baseurl, "file.yaml"}, "/") + urlparsed, err := url.Parse(fakeURL) + th.AssertNoErr(t, err) + + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/jason") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Fee-fi-fo-fum") + }) + + client := fakeClient{BaseClient: getHTTPClient()} + te := TE{ + URL: "file.yaml", + client: client, + } + err = te.Fetch() + th.AssertNoErr(t, err) + th.AssertEquals(t, fakeURL, te.URL) + th.AssertEquals(t, "Fee-fi-fo-fum", string(te.Bin)) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/doc.go new file mode 100644 index 000000000..5af0bd62a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/doc.go @@ -0,0 +1,8 @@ +// Package stacktemplates provides operations for working with Heat templates. +// A Cloud Orchestration template is a portable file, written in a user-readable +// language, that describes how a set of resources should be assembled and what +// software should be installed in order to produce a working stack. The template +// specifies what resources should be used, what attributes can be set, and other +// parameters that are critical to the successful, repeatable automation of a +// specific application stack. +package stacktemplates diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/fixtures.go new file mode 100644 index 000000000..fa9b3016f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/fixtures.go @@ -0,0 +1,95 @@ +package stacktemplates + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +// GetExpected represents the expected object from a Get request. +var GetExpected = "{\n \"description\": \"Simple template to test heat commands\",\n \"heat_template_version\": \"2013-05-23\",\n \"parameters\": {\n \"flavor\": {\n \"default\": \"m1.tiny\",\n \"type\": \"string\"\n }\n },\n \"resources\": {\n \"hello_world\": {\n \"properties\": {\n \"flavor\": {\n \"get_param\": \"flavor\"\n },\n \"image\": \"ad091b52-742f-469e-8f3c-fd81cadf0743\",\n \"key_name\": \"heat_key\"\n },\n \"type\": \"OS::Nova::Server\"\n }\n }\n}" + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743" + } + } + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87/template` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87/template", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ValidateExpected represents the expected object from a Validate request. +var ValidateExpected = &ValidatedTemplate{ + Description: "Simple template to test heat commands", + Parameters: map[string]interface{}{ + "flavor": map[string]interface{}{ + "Default": "m1.tiny", + "Type": "String", + "NoEcho": "false", + "Description": "", + "Label": "flavor", + }, + }, +} + +// ValidateOutput represents the response body from a Validate request. +const ValidateOutput = ` +{ + "Description": "Simple template to test heat commands", + "Parameters": { + "flavor": { + "Default": "m1.tiny", + "Type": "String", + "NoEcho": "false", + "Description": "", + "Label": "flavor" + } + } +}` + +// HandleValidateSuccessfully creates an HTTP handler at `/validate` +// on the test handler mux that responds with a `Validate` response. +func HandleValidateSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/validate", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go new file mode 100644 index 000000000..c0cea3575 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go @@ -0,0 +1,58 @@ +package stacktemplates + +import ( + "fmt" + + "github.com/rackspace/gophercloud" +) + +// Get retreives data for the given stack template. +func Get(c *gophercloud.ServiceClient, stackName, stackID string) GetResult { + var res GetResult + _, res.Err = c.Request("GET", getURL(c, stackName, stackID), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + }) + return res +} + +// ValidateOptsBuilder describes struct types that can be accepted by the Validate call. +// The ValidateOpts struct in this package does. +type ValidateOptsBuilder interface { + ToStackTemplateValidateMap() (map[string]interface{}, error) +} + +// ValidateOpts specifies the template validation parameters. +type ValidateOpts struct { + Template string + TemplateURL string +} + +// ToStackTemplateValidateMap assembles a request body based on the contents of a ValidateOpts. +func (opts ValidateOpts) ToStackTemplateValidateMap() (map[string]interface{}, error) { + vo := make(map[string]interface{}) + if opts.Template != "" { + vo["template"] = opts.Template + return vo, nil + } + if opts.TemplateURL != "" { + vo["template_url"] = opts.TemplateURL + return vo, nil + } + return vo, fmt.Errorf("One of Template or TemplateURL is required.") +} + +// Validate validates the given stack template. +func Validate(c *gophercloud.ServiceClient, opts ValidateOptsBuilder) ValidateResult { + var res ValidateResult + + reqBody, err := opts.ToStackTemplateValidateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(validateURL(c), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests_test.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests_test.go new file mode 100644 index 000000000..42667c927 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests_test.go @@ -0,0 +1,57 @@ +package stacktemplates + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestGetTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := Get(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c87").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, string(actual)) +} + +func TestValidateTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleValidateSuccessfully(t, ValidateOutput) + + opts := ValidateOpts{ + Template: `{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + }`, + } + actual, err := Validate(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := ValidateExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/results.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/results.go new file mode 100644 index 000000000..4e9ba5a4c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/results.go @@ -0,0 +1,51 @@ +package stacktemplates + +import ( + "encoding/json" + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" +) + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns the JSON template and is called after a Get operation. +func (r GetResult) Extract() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + template, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + return nil, err + } + return template, nil +} + +// ValidatedTemplate represents the parsed object returned from a Validate request. +type ValidatedTemplate struct { + Description string `mapstructure:"Description"` + Parameters map[string]interface{} `mapstructure:"Parameters"` + ParameterGroups map[string]interface{} `mapstructure:"ParameterGroups"` +} + +// ValidateResult represents the result of a Validate operation. +type ValidateResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a ValidatedTemplate object and is called after a +// Validate operation. +func (r ValidateResult) Extract() (*ValidatedTemplate, error) { + if r.Err != nil { + return nil, r.Err + } + + var res ValidatedTemplate + if err := mapstructure.Decode(r.Body, &res); err != nil { + return nil, err + } + + return &res, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/urls.go new file mode 100644 index 000000000..c30b7ca1a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/urls.go @@ -0,0 +1,11 @@ +package stacktemplates + +import "github.com/rackspace/gophercloud" + +func getURL(c *gophercloud.ServiceClient, stackName, stackID string) string { + return c.ServiceURL("stacks", stackName, stackID, "template") +} + +func validateURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("validate") +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version.go new file mode 100644 index 000000000..b697ba816 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version.go @@ -0,0 +1,114 @@ +package utils + +import ( + "fmt" + "strings" + + "github.com/rackspace/gophercloud" +) + +// Version is a supported API version, corresponding to a vN package within the appropriate service. +type Version struct { + ID string + Suffix string + Priority int +} + +var goodStatus = map[string]bool{ + "current": true, + "supported": true, + "stable": true, +} + +// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's +// published versions. +// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. +func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + type linkResp struct { + Href string `json:"href"` + Rel string `json:"rel"` + } + + type valueResp struct { + ID string `json:"id"` + Status string `json:"status"` + Links []linkResp `json:"links"` + } + + type versionsResp struct { + Values []valueResp `json:"values"` + } + + type response struct { + Versions versionsResp `json:"versions"` + } + + normalize := func(endpoint string) string { + if !strings.HasSuffix(endpoint, "/") { + return endpoint + "/" + } + return endpoint + } + identityEndpoint := normalize(client.IdentityEndpoint) + + // If a full endpoint is specified, check version suffixes for a match first. + for _, v := range recognized { + if strings.HasSuffix(identityEndpoint, v.Suffix) { + return v, identityEndpoint, nil + } + } + + var resp response + _, err := client.Request("GET", client.IdentityBase, gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + + if err != nil { + return nil, "", err + } + + byID := make(map[string]*Version) + for _, version := range recognized { + byID[version.ID] = version + } + + var highest *Version + var endpoint string + + for _, value := range resp.Versions.Values { + href := "" + for _, link := range value.Links { + if link.Rel == "self" { + href = normalize(link.Href) + } + } + + if matching, ok := byID[value.ID]; ok { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return matching, href, nil + } + + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || matching.Priority > highest.Priority { + highest = matching + endpoint = href + } + } + } + } + + if highest == nil { + return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) + } + if endpoint == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) + } + + return highest, endpoint, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go b/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go new file mode 100644 index 000000000..388d6892c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go @@ -0,0 +1,118 @@ +package utils + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/testhelper" +) + +func setupVersionHandler() { + testhelper.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "stable", + "id": "v3.0", + "links": [ + { "href": "%s/v3.0", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s/v2.0", "rel": "self" } + ] + } + ] + } + } + `, testhelper.Server.URL, testhelper.Server.URL) + }) +} + +func TestChooseVersion(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + setupVersionHandler() + + v2 := &Version{ID: "v2.0", Priority: 2, Suffix: "blarg"} + v3 := &Version{ID: "v3.0", Priority: 3, Suffix: "hargl"} + + c := &gophercloud.ProviderClient{ + IdentityBase: testhelper.Endpoint(), + IdentityEndpoint: "", + } + v, endpoint, err := ChooseVersion(c, []*Version{v2, v3}) + + if err != nil { + t.Fatalf("Unexpected error from ChooseVersion: %v", err) + } + + if v != v3 { + t.Errorf("Expected %#v to win, but %#v did instead", v3, v) + } + + expected := testhelper.Endpoint() + "v3.0/" + if endpoint != expected { + t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint) + } +} + +func TestChooseVersionOpinionatedLink(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + setupVersionHandler() + + v2 := &Version{ID: "v2.0", Priority: 2, Suffix: "nope"} + v3 := &Version{ID: "v3.0", Priority: 3, Suffix: "northis"} + + c := &gophercloud.ProviderClient{ + IdentityBase: testhelper.Endpoint(), + IdentityEndpoint: testhelper.Endpoint() + "v2.0/", + } + v, endpoint, err := ChooseVersion(c, []*Version{v2, v3}) + if err != nil { + t.Fatalf("Unexpected error from ChooseVersion: %v", err) + } + + if v != v2 { + t.Errorf("Expected %#v to win, but %#v did instead", v2, v) + } + + expected := testhelper.Endpoint() + "v2.0/" + if endpoint != expected { + t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint) + } +} + +func TestChooseVersionFromSuffix(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + v2 := &Version{ID: "v2.0", Priority: 2, Suffix: "/v2.0/"} + v3 := &Version{ID: "v3.0", Priority: 3, Suffix: "/v3.0/"} + + c := &gophercloud.ProviderClient{ + IdentityBase: testhelper.Endpoint(), + IdentityEndpoint: testhelper.Endpoint() + "v2.0/", + } + v, endpoint, err := ChooseVersion(c, []*Version{v2, v3}) + if err != nil { + t.Fatalf("Unexpected error from ChooseVersion: %v", err) + } + + if v != v2 { + t.Errorf("Expected %#v to win, but %#v did instead", v2, v) + } + + expected := testhelper.Endpoint() + "v2.0/" + if endpoint != expected { + t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/http.go b/vendor/github.com/rackspace/gophercloud/pagination/http.go new file mode 100644 index 000000000..1b3fe94ab --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/http.go @@ -0,0 +1,60 @@ +package pagination + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/rackspace/gophercloud" +) + +// PageResult stores the HTTP response that returned the current page of results. +type PageResult struct { + gophercloud.Result + url.URL +} + +// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the +// results, interpreting it as JSON if the content type indicates. +func PageResultFrom(resp *http.Response) (PageResult, error) { + var parsedBody interface{} + + defer resp.Body.Close() + rawBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PageResult{}, err + } + + if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { + err = json.Unmarshal(rawBody, &parsedBody) + if err != nil { + return PageResult{}, err + } + } else { + parsedBody = rawBody + } + + return PageResultFromParsed(resp, parsedBody), err +} + +// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its +// body parsed as JSON (and closed). +func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { + return PageResult{ + Result: gophercloud.Result{ + Body: body, + Header: resp.Header, + }, + URL: *resp.Request.URL, + } +} + +// Request performs an HTTP request and extracts the http.Response from the result. +func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { + return client.Request("GET", url, gophercloud.RequestOpts{ + MoreHeaders: headers, + OkCodes: []int{200, 204}, + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/linked.go b/vendor/github.com/rackspace/gophercloud/pagination/linked.go new file mode 100644 index 000000000..e9bd8dec9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/linked.go @@ -0,0 +1,67 @@ +package pagination + +import "fmt" + +// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. +type LinkedPageBase struct { + PageResult + + // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. + // If any link along the path is missing, an empty URL will be returned. + // If any link results in an unexpected value type, an error will be returned. + // When left as "nil", []string{"links", "next"} will be used as a default. + LinkPath []string +} + +// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. +// It assumes that the links are available in a "links" element of the top-level response object. +// If this is not the case, override NextPageURL on your result type. +func (current LinkedPageBase) NextPageURL() (string, error) { + var path []string + var key string + + if current.LinkPath == nil { + path = []string{"links", "next"} + } else { + path = current.LinkPath + } + + submap, ok := current.Body.(map[string]interface{}) + if !ok { + return "", fmt.Errorf("Expected an object, but was %#v", current.Body) + } + + for { + key, path = path[0], path[1:len(path)] + + value, ok := submap[key] + if !ok { + return "", nil + } + + if len(path) > 0 { + submap, ok = value.(map[string]interface{}) + if !ok { + return "", fmt.Errorf("Expected an object, but was %#v", value) + } + } else { + if value == nil { + // Actual null element. + return "", nil + } + + url, ok := value.(string) + if !ok { + return "", fmt.Errorf("Expected a string, but was %#v", value) + } + + return url, nil + } + } +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current LinkedPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/linked_test.go b/vendor/github.com/rackspace/gophercloud/pagination/linked_test.go new file mode 100644 index 000000000..1ac0f7316 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/linked_test.go @@ -0,0 +1,120 @@ +package pagination + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud/testhelper" +) + +// LinkedPager sample and test cases. + +type LinkedPageResult struct { + LinkedPageBase +} + +func (r LinkedPageResult) IsEmpty() (bool, error) { + is, err := ExtractLinkedInts(r) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +func ExtractLinkedInts(page Page) ([]int, error) { + var response struct { + Ints []int `mapstructure:"ints"` + } + + err := mapstructure.Decode(page.(LinkedPageResult).Body, &response) + if err != nil { + return nil, err + } + + return response.Ints, nil +} + +func createLinked(t *testing.T) Pager { + testhelper.SetupHTTP() + + testhelper.Mux.HandleFunc("/page1", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [1, 2, 3], "links": { "next": "%s/page2" } }`, testhelper.Server.URL) + }) + + testhelper.Mux.HandleFunc("/page2", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [4, 5, 6], "links": { "next": "%s/page3" } }`, testhelper.Server.URL) + }) + + testhelper.Mux.HandleFunc("/page3", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [7, 8, 9], "links": { "next": null } }`) + }) + + client := createClient() + + createPage := func(r PageResult) Page { + return LinkedPageResult{LinkedPageBase{PageResult: r}} + } + + return NewPager(client, testhelper.Server.URL+"/page1", createPage) +} + +func TestEnumerateLinked(t *testing.T) { + pager := createLinked(t) + defer testhelper.TeardownHTTP() + + callCount := 0 + err := pager.EachPage(func(page Page) (bool, error) { + actual, err := ExtractLinkedInts(page) + if err != nil { + return false, err + } + + t.Logf("Handler invoked with %v", actual) + + var expected []int + switch callCount { + case 0: + expected = []int{1, 2, 3} + case 1: + expected = []int{4, 5, 6} + case 2: + expected = []int{7, 8, 9} + default: + t.Fatalf("Unexpected call count: %d", callCount) + return false, nil + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Call %d: Expected %#v, but was %#v", callCount, expected, actual) + } + + callCount++ + return true, nil + }) + if err != nil { + t.Errorf("Unexpected error for page iteration: %v", err) + } + + if callCount != 3 { + t.Errorf("Expected 3 calls, but was %d", callCount) + } +} + +func TestAllPagesLinked(t *testing.T) { + pager := createLinked(t) + defer testhelper.TeardownHTTP() + + page, err := pager.AllPages() + testhelper.AssertNoErr(t, err) + + expected := []int{1, 2, 3, 4, 5, 6, 7, 8, 9} + actual, err := ExtractLinkedInts(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/marker.go b/vendor/github.com/rackspace/gophercloud/pagination/marker.go new file mode 100644 index 000000000..f355afc54 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/marker.go @@ -0,0 +1,40 @@ +package pagination + +// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. +// For convenience, embed the MarkedPageBase struct. +type MarkerPage interface { + Page + + // LastMarker returns the last "marker" value on this page. + LastMarker() (string, error) +} + +// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. +type MarkerPageBase struct { + PageResult + + // Owner is a reference to the embedding struct. + Owner MarkerPage +} + +// NextPageURL generates the URL for the page of results after this one. +func (current MarkerPageBase) NextPageURL() (string, error) { + currentURL := current.URL + + mark, err := current.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("marker", mark) + currentURL.RawQuery = q.Encode() + + return currentURL.String(), nil +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current MarkerPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/marker_test.go b/vendor/github.com/rackspace/gophercloud/pagination/marker_test.go new file mode 100644 index 000000000..f4d55be81 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/marker_test.go @@ -0,0 +1,126 @@ +package pagination + +import ( + "fmt" + "net/http" + "strings" + "testing" + + "github.com/rackspace/gophercloud/testhelper" +) + +// MarkerPager sample and test cases. + +type MarkerPageResult struct { + MarkerPageBase +} + +func (r MarkerPageResult) IsEmpty() (bool, error) { + results, err := ExtractMarkerStrings(r) + if err != nil { + return true, err + } + return len(results) == 0, err +} + +func (r MarkerPageResult) LastMarker() (string, error) { + results, err := ExtractMarkerStrings(r) + if err != nil { + return "", err + } + if len(results) == 0 { + return "", nil + } + return results[len(results)-1], nil +} + +func createMarkerPaged(t *testing.T) Pager { + testhelper.SetupHTTP() + + testhelper.Mux.HandleFunc("/page", func(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + ms := r.Form["marker"] + switch { + case len(ms) == 0: + fmt.Fprintf(w, "aaa\nbbb\nccc") + case len(ms) == 1 && ms[0] == "ccc": + fmt.Fprintf(w, "ddd\neee\nfff") + case len(ms) == 1 && ms[0] == "fff": + fmt.Fprintf(w, "ggg\nhhh\niii") + case len(ms) == 1 && ms[0] == "iii": + w.WriteHeader(http.StatusNoContent) + default: + t.Errorf("Request with unexpected marker: [%v]", ms) + } + }) + + client := createClient() + + createPage := func(r PageResult) Page { + p := MarkerPageResult{MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + } + + return NewPager(client, testhelper.Server.URL+"/page", createPage) +} + +func ExtractMarkerStrings(page Page) ([]string, error) { + content := page.(MarkerPageResult).Body.([]uint8) + parts := strings.Split(string(content), "\n") + results := make([]string, 0, len(parts)) + for _, part := range parts { + if len(part) > 0 { + results = append(results, part) + } + } + return results, nil +} + +func TestEnumerateMarker(t *testing.T) { + pager := createMarkerPaged(t) + defer testhelper.TeardownHTTP() + + callCount := 0 + err := pager.EachPage(func(page Page) (bool, error) { + actual, err := ExtractMarkerStrings(page) + if err != nil { + return false, err + } + + t.Logf("Handler invoked with %v", actual) + + var expected []string + switch callCount { + case 0: + expected = []string{"aaa", "bbb", "ccc"} + case 1: + expected = []string{"ddd", "eee", "fff"} + case 2: + expected = []string{"ggg", "hhh", "iii"} + default: + t.Fatalf("Unexpected call count: %d", callCount) + return false, nil + } + + testhelper.CheckDeepEquals(t, expected, actual) + + callCount++ + return true, nil + }) + testhelper.AssertNoErr(t, err) + testhelper.AssertEquals(t, callCount, 3) +} + +func TestAllPagesMarker(t *testing.T) { + pager := createMarkerPaged(t) + defer testhelper.TeardownHTTP() + + page, err := pager.AllPages() + testhelper.AssertNoErr(t, err) + + expected := []string{"aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii"} + actual, err := ExtractMarkerStrings(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/null.go b/vendor/github.com/rackspace/gophercloud/pagination/null.go new file mode 100644 index 000000000..ae57e1886 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/null.go @@ -0,0 +1,20 @@ +package pagination + +// nullPage is an always-empty page that trivially satisfies all Page interfacts. +// It's useful to be returned along with an error. +type nullPage struct{} + +// NextPageURL always returns "" to indicate that there are no more pages to return. +func (p nullPage) NextPageURL() (string, error) { + return "", nil +} + +// IsEmpty always returns true to prevent iteration over nullPages. +func (p nullPage) IsEmpty() (bool, error) { + return true, nil +} + +// LastMark always returns "" because the nullPage contains no items to have a mark. +func (p nullPage) LastMark() (string, error) { + return "", nil +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/pager.go b/vendor/github.com/rackspace/gophercloud/pagination/pager.go new file mode 100644 index 000000000..a7593ac88 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/pager.go @@ -0,0 +1,226 @@ +package pagination + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/rackspace/gophercloud" +) + +var ( + // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. + ErrPageNotAvailable = errors.New("The requested page does not exist.") +) + +// Page must be satisfied by the result type of any resource collection. +// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. +// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, +// instead. +// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type +// will need to implement. +type Page interface { + + // NextPageURL generates the URL for the page of data that follows this collection. + // Return "" if no such page exists. + NextPageURL() (string, error) + + // IsEmpty returns true if this Page has no items in it. + IsEmpty() (bool, error) + + // GetBody returns the Page Body. This is used in the `AllPages` method. + GetBody() interface{} +} + +// Pager knows how to advance through a specific resource collection, one page at a time. +type Pager struct { + client *gophercloud.ServiceClient + + initialURL string + + createPage func(r PageResult) Page + + Err error + + // Headers supplies additional HTTP headers to populate on each paged request. + Headers map[string]string +} + +// NewPager constructs a manually-configured pager. +// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. +func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { + return Pager{ + client: client, + initialURL: initialURL, + createPage: createPage, + } +} + +// WithPageCreator returns a new Pager that substitutes a different page creation function. This is +// useful for overriding List functions in delegation. +func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { + return Pager{ + client: p.client, + initialURL: p.initialURL, + createPage: createPage, + } +} + +func (p Pager) fetchNextPage(url string) (Page, error) { + resp, err := Request(p.client, p.Headers, url) + if err != nil { + return nil, err + } + + remembered, err := PageResultFrom(resp) + if err != nil { + return nil, err + } + + return p.createPage(remembered), nil +} + +// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. +// Return "false" from the handler to prematurely stop iterating. +func (p Pager) EachPage(handler func(Page) (bool, error)) error { + if p.Err != nil { + return p.Err + } + currentURL := p.initialURL + for { + currentPage, err := p.fetchNextPage(currentURL) + if err != nil { + return err + } + + empty, err := currentPage.IsEmpty() + if err != nil { + return err + } + if empty { + return nil + } + + ok, err := handler(currentPage) + if err != nil { + return err + } + if !ok { + return nil + } + + currentURL, err = currentPage.NextPageURL() + if err != nil { + return err + } + if currentURL == "" { + return nil + } + } +} + +// AllPages returns all the pages from a `List` operation in a single page, +// allowing the user to retrieve all the pages at once. +func (p Pager) AllPages() (Page, error) { + // pagesSlice holds all the pages until they get converted into as Page Body. + var pagesSlice []interface{} + // body will contain the final concatenated Page body. + var body reflect.Value + + // Grab a test page to ascertain the page body type. + testPage, err := p.fetchNextPage(p.initialURL) + if err != nil { + return nil, err + } + // Store the page type so we can use reflection to create a new mega-page of + // that type. + pageType := reflect.TypeOf(testPage) + + // Switch on the page body type. Recognized types are `map[string]interface{}`, + // `[]byte`, and `[]interface{}`. + switch testPage.GetBody().(type) { + case map[string]interface{}: + // key is the map key for the page body if the body type is `map[string]interface{}`. + var key string + // Iterate over the pages to concatenate the bodies. + err := p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().(map[string]interface{}) + for k := range b { + // If it's a linked page, we don't want the `links`, we want the other one. + if !strings.HasSuffix(k, "links") { + key = k + } + } + pagesSlice = append(pagesSlice, b[key].([]interface{})...) + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `map[string]interface{}` + body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) + body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) + case []byte: + // Iterate over the pages to concatenate the bodies. + err := p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]byte) + pagesSlice = append(pagesSlice, b) + // seperate pages with a comma + pagesSlice = append(pagesSlice, []byte{10}) + return true, nil + }) + if err != nil { + return nil, err + } + if len(pagesSlice) > 0 { + // Remove the trailing comma. + pagesSlice = pagesSlice[:len(pagesSlice)-1] + } + var b []byte + // Combine the slice of slices in to a single slice. + for _, slice := range pagesSlice { + b = append(b, slice.([]byte)...) + } + // Set body to value of type `bytes`. + body = reflect.New(reflect.TypeOf(b)).Elem() + body.SetBytes(b) + case []interface{}: + // Iterate over the pages to concatenate the bodies. + err := p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]interface{}) + pagesSlice = append(pagesSlice, b...) + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `[]interface{}` + body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) + for i, s := range pagesSlice { + body.Index(i).Set(reflect.ValueOf(s)) + } + default: + return nil, fmt.Errorf("Page body has unrecognized type.") + } + + // Each `Extract*` function is expecting a specific type of page coming back, + // otherwise the type assertion in those functions will fail. pageType is needed + // to create a type in this method that has the same type that the `Extract*` + // function is expecting and set the Body of that object to the concatenated + // pages. + page := reflect.New(pageType) + // Set the page body to be the concatenated pages. + page.Elem().FieldByName("Body").Set(body) + // Set any additional headers that were pass along. The `objectstorage` pacakge, + // for example, passes a Content-Type header. + h := make(http.Header) + for k, v := range p.Headers { + h.Add(k, v) + } + page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) + // Type assert the page to a Page interface so that the type assertion in the + // `Extract*` methods will work. + return page.Elem().Interface().(Page), err +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/pagination_test.go b/vendor/github.com/rackspace/gophercloud/pagination/pagination_test.go new file mode 100644 index 000000000..f3e4de1b0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/pagination_test.go @@ -0,0 +1,13 @@ +package pagination + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/testhelper" +) + +func createClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{TokenID: "abc123"}, + Endpoint: testhelper.Endpoint(), + } +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/pkg.go b/vendor/github.com/rackspace/gophercloud/pagination/pkg.go new file mode 100644 index 000000000..912daea36 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/pkg.go @@ -0,0 +1,4 @@ +/* +Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. +*/ +package pagination diff --git a/vendor/github.com/rackspace/gophercloud/pagination/single.go b/vendor/github.com/rackspace/gophercloud/pagination/single.go new file mode 100644 index 000000000..f78d4ab5d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/single.go @@ -0,0 +1,15 @@ +package pagination + +// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. +type SinglePageBase PageResult + +// NextPageURL always returns "" to indicate that there are no more pages to return. +func (current SinglePageBase) NextPageURL() (string, error) { + return "", nil +} + +// GetBody returns the single page's body. This method is needed to satisfy the +// Page interface. +func (current SinglePageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/single_test.go b/vendor/github.com/rackspace/gophercloud/pagination/single_test.go new file mode 100644 index 000000000..4af0fee69 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/pagination/single_test.go @@ -0,0 +1,84 @@ +package pagination + +import ( + "fmt" + "net/http" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud/testhelper" +) + +// SinglePage sample and test cases. + +type SinglePageResult struct { + SinglePageBase +} + +func (r SinglePageResult) IsEmpty() (bool, error) { + is, err := ExtractSingleInts(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +func ExtractSingleInts(page Page) ([]int, error) { + var response struct { + Ints []int `mapstructure:"ints"` + } + + err := mapstructure.Decode(page.(SinglePageResult).Body, &response) + if err != nil { + return nil, err + } + + return response.Ints, nil +} + +func setupSinglePaged() Pager { + testhelper.SetupHTTP() + client := createClient() + + testhelper.Mux.HandleFunc("/only", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [1, 2, 3] }`) + }) + + createPage := func(r PageResult) Page { + return SinglePageResult{SinglePageBase(r)} + } + + return NewPager(client, testhelper.Server.URL+"/only", createPage) +} + +func TestEnumerateSinglePaged(t *testing.T) { + callCount := 0 + pager := setupSinglePaged() + defer testhelper.TeardownHTTP() + + err := pager.EachPage(func(page Page) (bool, error) { + callCount++ + + expected := []int{1, 2, 3} + actual, err := ExtractSingleInts(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) + return true, nil + }) + testhelper.CheckNoErr(t, err) + testhelper.CheckEquals(t, 1, callCount) +} + +func TestAllPagesSingle(t *testing.T) { + pager := setupSinglePaged() + defer testhelper.TeardownHTTP() + + page, err := pager.AllPages() + testhelper.AssertNoErr(t, err) + + expected := []int{1, 2, 3} + actual, err := ExtractSingleInts(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/params.go b/vendor/github.com/rackspace/gophercloud/params.go new file mode 100644 index 000000000..4d0f1e6e0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/params.go @@ -0,0 +1,271 @@ +package gophercloud + +import ( + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// EnabledState is a convenience type, mostly used in Create and Update +// operations. Because the zero value of a bool is FALSE, we need to use a +// pointer instead to indicate zero-ness. +type EnabledState *bool + +// Convenience vars for EnabledState values. +var ( + iTrue = true + iFalse = false + + Enabled EnabledState = &iTrue + Disabled EnabledState = &iFalse +) + +// IntToPointer is a function for converting integers into integer pointers. +// This is useful when passing in options to operations. +func IntToPointer(i int) *int { + return &i +} + +/* +MaybeString is an internal function to be used by request methods in individual +resource packages. + +It takes a string that might be a zero value and returns either a pointer to its +address or nil. This is useful for allowing users to conveniently omit values +from an options struct by leaving them zeroed, but still pass nil to the JSON +serializer so they'll be omitted from the request body. +*/ +func MaybeString(original string) *string { + if original != "" { + return &original + } + return nil +} + +/* +MaybeInt is an internal function to be used by request methods in individual +resource packages. + +Like MaybeString, it accepts an int that may or may not be a zero value, and +returns either a pointer to its address or nil. It's intended to hint that the +JSON serializer should omit its field. +*/ +func MaybeInt(original int) *int { + if original != 0 { + return &original + } + return nil +} + +var t time.Time + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Func, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + z := true + for i := 0; i < v.Len(); i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + if v.Type() == reflect.TypeOf(t) { + if v.Interface().(time.Time).IsZero() { + return true + } + return false + } + z := true + for i := 0; i < v.NumField(); i++ { + z = z && isZero(v.Field(i)) + } + return z + } + // Compare other types directly: + z := reflect.Zero(v.Type()) + return v.Interface() == z.Interface() +} + +/* +BuildQueryString is an internal function to be used by request methods in +individual resource packages. + +It accepts a tagged structure and expands it into a URL struct. Field names are +converted into query parameters based on a "q" tag. For example: + + type struct Something { + Bar string `q:"x_bar"` + Baz int `q:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into "?x_bar=AAA&lorem_ipsum=BBB". + +The struct's fields may be strings, integers, or boolean values. Fields left at +their type's zero value will be omitted from the query. +*/ +func BuildQueryString(opts interface{}) (*url.URL, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + params := url.Values{} + + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + qTag := f.Tag.Get("q") + + // if the field has a 'q' tag, it goes in the query string + if qTag != "" { + tags := strings.Split(qTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + switch v.Kind() { + case reflect.String: + params.Add(tags[0], v.String()) + case reflect.Int: + params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) + case reflect.Bool: + params.Add(tags[0], strconv.FormatBool(v.Bool())) + case reflect.Slice: + switch v.Type().Elem() { + case reflect.TypeOf(0): + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) + } + default: + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], v.Index(i).String()) + } + } + } + } else { + // Otherwise, the field is not set. + if len(tags) == 2 && tags[1] == "required" { + // And the field is required. Return an error. + return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + } + } + } + } + + return &url.URL{RawQuery: params.Encode()}, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +/* +BuildHeaders is an internal function to be used by request methods in +individual resource packages. + +It accepts an arbitrary tagged structure and produces a string map that's +suitable for use as the HTTP headers of an outgoing request. Field names are +mapped to header names based in "h" tags. + + type struct Something { + Bar string `h:"x_bar"` + Baz int `h:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into: + + map[string]string{ + "x_bar": "AAA", + "lorem_ipsum": "BBB", + } + +Untagged fields and fields left at their zero values are skipped. Integers, +booleans and string values are supported. +*/ +func BuildHeaders(opts interface{}) (map[string]string, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]string) + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + hTag := f.Tag.Get("h") + + // if the field has a 'h' tag, it goes in the header + if hTag != "" { + tags := strings.Split(hTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + switch v.Kind() { + case reflect.String: + optsMap[tags[0]] = v.String() + case reflect.Int: + optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) + case reflect.Bool: + optsMap[tags[0]] = strconv.FormatBool(v.Bool()) + } + } else { + // Otherwise, the field is not set. + if len(tags) == 2 && tags[1] == "required" { + // And the field is required. Return an error. + return optsMap, fmt.Errorf("Required header not set.") + } + } + } + + } + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return optsMap, fmt.Errorf("Options type is not a struct.") +} + +// IDSliceToQueryString takes a slice of elements and converts them into a query +// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the +// result would be `?name=20&name=40&name=60' +func IDSliceToQueryString(name string, ids []int) string { + str := "" + for k, v := range ids { + if k == 0 { + str += "?" + } else { + str += "&" + } + str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) + } + return str +} + +// IntWithinRange returns TRUE if an integer falls within a defined range, and +// FALSE if not. +func IntWithinRange(val, min, max int) bool { + return val > min && val < max +} diff --git a/vendor/github.com/rackspace/gophercloud/params_test.go b/vendor/github.com/rackspace/gophercloud/params_test.go new file mode 100644 index 000000000..2f40eec81 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/params_test.go @@ -0,0 +1,165 @@ +package gophercloud + +import ( + "net/url" + "reflect" + "testing" + "time" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestMaybeString(t *testing.T) { + testString := "" + var expected *string + actual := MaybeString(testString) + th.CheckDeepEquals(t, expected, actual) + + testString = "carol" + expected = &testString + actual = MaybeString(testString) + th.CheckDeepEquals(t, expected, actual) +} + +func TestMaybeInt(t *testing.T) { + testInt := 0 + var expected *int + actual := MaybeInt(testInt) + th.CheckDeepEquals(t, expected, actual) + + testInt = 4 + expected = &testInt + actual = MaybeInt(testInt) + th.CheckDeepEquals(t, expected, actual) +} + +func TestBuildQueryString(t *testing.T) { + type testVar string + opts := struct { + J int `q:"j"` + R string `q:"r,required"` + C bool `q:"c"` + S []string `q:"s"` + TS []testVar `q:"ts"` + TI []int `q:"ti"` + }{ + J: 2, + R: "red", + C: true, + S: []string{"one", "two", "three"}, + TS: []testVar{"a", "b"}, + TI: []int{1, 2}, + } + expected := &url.URL{RawQuery: "c=true&j=2&r=red&s=one&s=two&s=three&ti=1&ti=2&ts=a&ts=b"} + actual, err := BuildQueryString(&opts) + if err != nil { + t.Errorf("Error building query string: %v", err) + } + th.CheckDeepEquals(t, expected, actual) + + opts = struct { + J int `q:"j"` + R string `q:"r,required"` + C bool `q:"c"` + S []string `q:"s"` + TS []testVar `q:"ts"` + TI []int `q:"ti"` + }{ + J: 2, + C: true, + } + _, err = BuildQueryString(&opts) + if err == nil { + t.Errorf("Expected error: 'Required field not set'") + } + th.CheckDeepEquals(t, expected, actual) + + _, err = BuildQueryString(map[string]interface{}{"Number": 4}) + if err == nil { + t.Errorf("Expected error: 'Options type is not a struct'") + } +} + +func TestBuildHeaders(t *testing.T) { + testStruct := struct { + Accept string `h:"Accept"` + Num int `h:"Number,required"` + Style bool `h:"Style"` + }{ + Accept: "application/json", + Num: 4, + Style: true, + } + expected := map[string]string{"Accept": "application/json", "Number": "4", "Style": "true"} + actual, err := BuildHeaders(&testStruct) + th.CheckNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) + + testStruct.Num = 0 + _, err = BuildHeaders(&testStruct) + if err == nil { + t.Errorf("Expected error: 'Required header not set'") + } + + _, err = BuildHeaders(map[string]interface{}{"Number": 4}) + if err == nil { + t.Errorf("Expected error: 'Options type is not a struct'") + } +} + +func TestIsZero(t *testing.T) { + var testMap map[string]interface{} + testMapValue := reflect.ValueOf(testMap) + expected := true + actual := isZero(testMapValue) + th.CheckEquals(t, expected, actual) + testMap = map[string]interface{}{"empty": false} + testMapValue = reflect.ValueOf(testMap) + expected = false + actual = isZero(testMapValue) + th.CheckEquals(t, expected, actual) + + var testArray [2]string + testArrayValue := reflect.ValueOf(testArray) + expected = true + actual = isZero(testArrayValue) + th.CheckEquals(t, expected, actual) + testArray = [2]string{"one", "two"} + testArrayValue = reflect.ValueOf(testArray) + expected = false + actual = isZero(testArrayValue) + th.CheckEquals(t, expected, actual) + + var testStruct struct { + A string + B time.Time + } + testStructValue := reflect.ValueOf(testStruct) + expected = true + actual = isZero(testStructValue) + th.CheckEquals(t, expected, actual) + testStruct = struct { + A string + B time.Time + }{ + B: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + } + testStructValue = reflect.ValueOf(testStruct) + expected = false + actual = isZero(testStructValue) + th.CheckEquals(t, expected, actual) +} + +func TestQueriesAreEscaped(t *testing.T) { + type foo struct { + Name string `q:"something"` + Shape string `q:"else"` + } + + expected := &url.URL{RawQuery: "else=Triangl+e&something=blah%2B%3F%21%21foo"} + + actual, err := BuildQueryString(foo{Name: "blah+?!!foo", Shape: "Triangl e"}) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/provider_client.go b/vendor/github.com/rackspace/gophercloud/provider_client.go new file mode 100644 index 000000000..92643556f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/provider_client.go @@ -0,0 +1,308 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// DefaultUserAgent is the default User-Agent string set in the request header. +const DefaultUserAgent = "gophercloud/1.0.0" + +// UserAgent represents a User-Agent header. +type UserAgent struct { + // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. + // All the strings to prepend are accumulated and prepended in the Join method. + prepend []string +} + +// Prepend prepends a user-defined string to the default User-Agent string. Users +// may pass in one or more strings to prepend. +func (ua *UserAgent) Prepend(s ...string) { + ua.prepend = append(s, ua.prepend...) +} + +// Join concatenates all the user-defined User-Agend strings with the default +// Gophercloud User-Agent string. +func (ua *UserAgent) Join() string { + uaSlice := append(ua.prepend, DefaultUserAgent) + return strings.Join(uaSlice, " ") +} + +// ProviderClient stores details that are required to interact with any +// services within a specific provider's API. +// +// Generally, you acquire a ProviderClient by calling the NewClient method in +// the appropriate provider's child package, providing whatever authentication +// credentials are required. +type ProviderClient struct { + // IdentityBase is the base URL used for a particular provider's identity + // service - it will be used when issuing authenticatation requests. It + // should point to the root resource of the identity service, not a specific + // identity version. + IdentityBase string + + // IdentityEndpoint is the identity endpoint. This may be a specific version + // of the identity service. If this is the case, this endpoint is used rather + // than querying versions first. + IdentityEndpoint string + + // TokenID is the ID of the most recently issued valid token. + TokenID string + + // EndpointLocator describes how this provider discovers the endpoints for + // its constituent services. + EndpointLocator EndpointLocator + + // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. + HTTPClient http.Client + + // UserAgent represents the User-Agent header in the HTTP request. + UserAgent UserAgent + + // ReauthFunc is the function used to re-authenticate the user if the request + // fails with a 401 HTTP response code. This a needed because there may be multiple + // authentication functions for different Identity service versions. + ReauthFunc func() error +} + +// AuthenticatedHeaders returns a map of HTTP headers that are common for all +// authenticated service requests. +func (client *ProviderClient) AuthenticatedHeaders() map[string]string { + if client.TokenID == "" { + return map[string]string{} + } + return map[string]string{"X-Auth-Token": client.TokenID} +} + +// RequestOpts customizes the behavior of the provider.Request() method. +type RequestOpts struct { + // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The + // content type of the request will default to "application/json" unless overridden by MoreHeaders. + // It's an error to specify both a JSONBody and a RawBody. + JSONBody interface{} + // RawBody contains an io.ReadSeeker that will be consumed by the request directly. No content-type + // will be set unless one is provided explicitly by MoreHeaders. + RawBody io.ReadSeeker + + // JSONResponse, if provided, will be populated with the contents of the response body parsed as + // JSON. + JSONResponse interface{} + // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If + // the response has a different code, an error will be returned. + OkCodes []int + + // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is + // provided with a blank value (""), that header will be *omitted* instead: use this to suppress + // the default Accept header or an inferred Content-Type, for example. + MoreHeaders map[string]string +} + +// UnexpectedResponseCodeError is returned by the Request method when a response code other than +// those listed in OkCodes is encountered. +type UnexpectedResponseCodeError struct { + URL string + Method string + Expected []int + Actual int + Body []byte +} + +func (err *UnexpectedResponseCodeError) Error() string { + return fmt.Sprintf( + "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", + err.Expected, err.Method, err.URL, err.Actual, err.Body, + ) +} + +var applicationJSON = "application/json" + +// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication +// header will automatically be provided. +func (client *ProviderClient) Request(method, url string, options RequestOpts) (*http.Response, error) { + var body io.ReadSeeker + var contentType *string + + // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided + // io.ReadSeeker as-is. Default the content-type to application/json. + if options.JSONBody != nil { + if options.RawBody != nil { + panic("Please provide only one of JSONBody or RawBody to gophercloud.Request().") + } + + rendered, err := json.Marshal(options.JSONBody) + if err != nil { + return nil, err + } + + body = bytes.NewReader(rendered) + contentType = &applicationJSON + } + + if options.RawBody != nil { + body = options.RawBody + } + + // Construct the http.Request. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to + // modify or omit any header. + if contentType != nil { + req.Header.Set("Content-Type", *contentType) + } + req.Header.Set("Accept", applicationJSON) + + for k, v := range client.AuthenticatedHeaders() { + req.Header.Add(k, v) + } + + // Set the User-Agent header + req.Header.Set("User-Agent", client.UserAgent.Join()) + + if options.MoreHeaders != nil { + for k, v := range options.MoreHeaders { + if v != "" { + req.Header.Set(k, v) + } else { + req.Header.Del(k) + } + } + } + + // Issue the request. + resp, err := client.HTTPClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized { + if client.ReauthFunc != nil { + err = client.ReauthFunc() + if err != nil { + return nil, fmt.Errorf("Error trying to re-authenticate: %s", err) + } + if options.RawBody != nil { + options.RawBody.Seek(0, 0) + } + resp.Body.Close() + resp, err = client.Request(method, url, options) + if err != nil { + return nil, fmt.Errorf("Successfully re-authenticated, but got error executing request: %s", err) + } + + return resp, nil + } + } + + // Allow default OkCodes if none explicitly set + if options.OkCodes == nil { + options.OkCodes = defaultOkCodes(method) + } + + // Validate the HTTP response status. + var ok bool + for _, code := range options.OkCodes { + if resp.StatusCode == code { + ok = true + break + } + } + if !ok { + body, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + return resp, &UnexpectedResponseCodeError{ + URL: url, + Method: method, + Expected: options.OkCodes, + Actual: resp.StatusCode, + Body: body, + } + } + + // Parse the response body as JSON, if requested to do so. + if options.JSONResponse != nil { + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { + return nil, err + } + } + + return resp, nil +} + +func defaultOkCodes(method string) []int { + switch { + case method == "GET": + return []int{200} + case method == "POST": + return []int{201, 202} + case method == "PUT": + return []int{201, 202} + case method == "DELETE": + return []int{202, 204} + } + + return []int{} +} + +func (client *ProviderClient) Get(url string, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + return client.Request("GET", url, *opts) +} + +func (client *ProviderClient) Post(url string, JSONBody interface{}, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + + if v, ok := (JSONBody).(io.ReadSeeker); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + return client.Request("POST", url, *opts) +} + +func (client *ProviderClient) Put(url string, JSONBody interface{}, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + + if v, ok := (JSONBody).(io.ReadSeeker); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + return client.Request("PUT", url, *opts) +} + +func (client *ProviderClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + + return client.Request("DELETE", url, *opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/provider_client_test.go b/vendor/github.com/rackspace/gophercloud/provider_client_test.go new file mode 100644 index 000000000..d79d862b2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/provider_client_test.go @@ -0,0 +1,35 @@ +package gophercloud + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAuthenticatedHeaders(t *testing.T) { + p := &ProviderClient{ + TokenID: "1234", + } + expected := map[string]string{"X-Auth-Token": "1234"} + actual := p.AuthenticatedHeaders() + th.CheckDeepEquals(t, expected, actual) +} + +func TestUserAgent(t *testing.T) { + p := &ProviderClient{} + + p.UserAgent.Prepend("custom-user-agent/2.4.0") + expected := "custom-user-agent/2.4.0 gophercloud/1.0.0" + actual := p.UserAgent.Join() + th.CheckEquals(t, expected, actual) + + p.UserAgent.Prepend("another-custom-user-agent/0.3.0", "a-third-ua/5.9.0") + expected = "another-custom-user-agent/0.3.0 a-third-ua/5.9.0 custom-user-agent/2.4.0 gophercloud/1.0.0" + actual = p.UserAgent.Join() + th.CheckEquals(t, expected, actual) + + p.UserAgent = UserAgent{} + expected = "gophercloud/1.0.0" + actual = p.UserAgent.Join() + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/auth_env.go b/vendor/github.com/rackspace/gophercloud/rackspace/auth_env.go new file mode 100644 index 000000000..5852c3ce7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/auth_env.go @@ -0,0 +1,57 @@ +package rackspace + +import ( + "fmt" + "os" + + "github.com/rackspace/gophercloud" +) + +var nilOptions = gophercloud.AuthOptions{} + +// ErrNoAuthUrl, ErrNoUsername, and ErrNoPassword errors indicate of the +// required RS_AUTH_URL, RS_USERNAME, or RS_PASSWORD environment variables, +// respectively, remain undefined. See the AuthOptions() function for more details. +var ( + ErrNoAuthURL = fmt.Errorf("Environment variable RS_AUTH_URL or OS_AUTH_URL need to be set.") + ErrNoUsername = fmt.Errorf("Environment variable RS_USERNAME or OS_USERNAME need to be set.") + ErrNoPassword = fmt.Errorf("Environment variable RS_API_KEY or RS_PASSWORD needs to be set.") +) + +func prefixedEnv(base string) string { + value := os.Getenv("RS_" + base) + if value == "" { + value = os.Getenv("OS_" + base) + } + return value +} + +// AuthOptionsFromEnv fills out an identity.AuthOptions structure with the +// settings found on the various Rackspace RS_* environment variables. +func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { + authURL := prefixedEnv("AUTH_URL") + username := prefixedEnv("USERNAME") + password := prefixedEnv("PASSWORD") + apiKey := prefixedEnv("API_KEY") + + if authURL == "" { + return nilOptions, ErrNoAuthURL + } + + if username == "" { + return nilOptions, ErrNoUsername + } + + if password == "" && apiKey == "" { + return nilOptions, ErrNoPassword + } + + ao := gophercloud.AuthOptions{ + IdentityEndpoint: authURL, + Username: username, + Password: password, + APIKey: apiKey, + } + + return ao, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go new file mode 100644 index 000000000..1cd1b6e30 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go @@ -0,0 +1,131 @@ +package snapshots + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots" +) + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Snapshot. This object is passed to +// the snapshots.Create function. For more information about these parameters, +// see the Snapshot object. +type CreateOpts struct { + // REQUIRED + VolumeID string + // OPTIONAL + Description string + // OPTIONAL + Force bool + // OPTIONAL + Name string +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.VolumeID == "" { + return nil, errors.New("Required CreateOpts field 'VolumeID' not set.") + } + + s["volume_id"] = opts.VolumeID + + if opts.Description != "" { + s["display_description"] = opts.Description + } + if opts.Name != "" { + s["display_name"] = opts.Name + } + if opts.Force { + s["force"] = opts.Force + } + + return map[string]interface{}{"snapshot": s}, nil +} + +// Create will create a new Snapshot based on the values in CreateOpts. To +// extract the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + return CreateResult{os.Create(client, opts)} +} + +// Delete will delete the existing Snapshot with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(client, id) +} + +// Get retrieves the Snapshot with the provided ID. To extract the Snapshot +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + return GetResult{os.Get(client, id)} +} + +// List returns Snapshots. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client, os.ListOpts{}) +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type UpdateOptsBuilder interface { + ToSnapshotUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + Name string + Description string +} + +// ToSnapshotUpdateMap casts a UpdateOpts struct to a map. +func (opts UpdateOpts) ToSnapshotUpdateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.Name != "" { + s["display_name"] = opts.Name + } + if opts.Description != "" { + s["display_description"] = opts.Description + } + + return map[string]interface{}{"snapshot": s}, nil +} + +// Update accepts a UpdateOpts struct and updates an existing snapshot using the +// values provided. +func Update(c *gophercloud.ServiceClient, snapshotID string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToSnapshotUpdateMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + _, res.Err = c.Request("PUT", updateURL(c, snapshotID), gophercloud.RequestOpts{ + JSONBody: &reqBody, + JSONResponse: &res.Body, + OkCodes: []int{200, 201}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go new file mode 100644 index 000000000..1a02b4652 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go @@ -0,0 +1,97 @@ +package snapshots + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +const endpoint = "http://localhost:57909/v1/12345" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestUpdateURL(t *testing.T) { + actual := updateURL(endpointClient(), "foo") + expected := endpoint + "snapshots/foo" + th.AssertEquals(t, expected, actual) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockListResponse(t) + + count := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractSnapshots(page) + if err != nil { + t.Errorf("Failed to extract snapshots: %v", err) + return false, err + } + + expected := []Snapshot{ + Snapshot{ + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "snapshot-001", + }, + Snapshot{ + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "snapshot-002", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertEquals(t, 1, count) + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockGetResponse(t) + + v, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "snapshot-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockCreateResponse(t) + + options := &CreateOpts{VolumeID: "1234", Name: "snapshot-001"} + n, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.VolumeID, "1234") + th.AssertEquals(t, n.Name, "snapshot-001") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockDeleteResponse(t) + + res := Delete(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go new file mode 100644 index 000000000..ad6064f2a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go @@ -0,0 +1,3 @@ +// Package snapshots provides information and interaction with the snapshot +// API resource for the Rackspace Block Storage service. +package snapshots diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go new file mode 100644 index 000000000..c81644c5d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go @@ -0,0 +1,147 @@ +package snapshots + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// Status is the type used to represent a snapshot's status +type Status string + +// Constants to use for supported statuses +const ( + Creating Status = "CREATING" + Available Status = "AVAILABLE" + Deleting Status = "DELETING" + Error Status = "ERROR" + DeleteError Status = "ERROR_DELETING" +) + +// Snapshot is the Rackspace representation of an external block storage device. +type Snapshot struct { + // The timestamp when this snapshot was created. + CreatedAt string `mapstructure:"created_at"` + + // The human-readable description for this snapshot. + Description string `mapstructure:"display_description"` + + // The human-readable name for this snapshot. + Name string `mapstructure:"display_name"` + + // The UUID for this snapshot. + ID string `mapstructure:"id"` + + // The random metadata associated with this snapshot. Note: unlike standard + // OpenStack snapshots, this cannot actually be set. + Metadata map[string]string `mapstructure:"metadata"` + + // Indicates the current progress of the snapshot's backup procedure. + Progress string `mapstructure:"os-extended-snapshot-attributes:progress"` + + // The project ID. + ProjectID string `mapstructure:"os-extended-snapshot-attributes:project_id"` + + // The size of the volume which this snapshot backs up. + Size int `mapstructure:"size"` + + // The status of the snapshot. + Status Status `mapstructure:"status"` + + // The ID of the volume which this snapshot seeks to back up. + VolumeID string `mapstructure:"volume_id"` +} + +// CreateResult represents the result of a create operation +type CreateResult struct { + os.CreateResult +} + +// GetResult represents the result of a get operation +type GetResult struct { + os.GetResult +} + +// UpdateResult represents the result of an update operation +type UpdateResult struct { + gophercloud.Result +} + +func commonExtract(resp interface{}, err error) (*Snapshot, error) { + if err != nil { + return nil, err + } + + var respStruct struct { + Snapshot *Snapshot `json:"snapshot"` + } + + err = mapstructure.Decode(resp, &respStruct) + + return respStruct.Snapshot, err +} + +// Extract will get the Snapshot object out of the GetResult object. +func (r GetResult) Extract() (*Snapshot, error) { + return commonExtract(r.Body, r.Err) +} + +// Extract will get the Snapshot object out of the CreateResult object. +func (r CreateResult) Extract() (*Snapshot, error) { + return commonExtract(r.Body, r.Err) +} + +// Extract will get the Snapshot object out of the UpdateResult object. +func (r UpdateResult) Extract() (*Snapshot, error) { + return commonExtract(r.Body, r.Err) +} + +// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. +func ExtractSnapshots(page pagination.Page) ([]Snapshot, error) { + var response struct { + Snapshots []Snapshot `json:"snapshots"` + } + + err := mapstructure.Decode(page.(os.ListResult).Body, &response) + return response.Snapshots, err +} + +// WaitUntilComplete will continually poll a snapshot until it successfully +// transitions to a specified state. It will do this for at most the number of +// seconds specified. +func (snapshot Snapshot) WaitUntilComplete(c *gophercloud.ServiceClient, timeout int) error { + return gophercloud.WaitFor(timeout, func() (bool, error) { + // Poll resource + current, err := Get(c, snapshot.ID).Extract() + if err != nil { + return false, err + } + + // Has it been built yet? + if current.Progress == "100%" { + return true, nil + } + + return false, nil + }) +} + +// WaitUntilDeleted will continually poll a snapshot until it has been +// successfully deleted, i.e. returns a 404 status. +func (snapshot Snapshot) WaitUntilDeleted(c *gophercloud.ServiceClient, timeout int) error { + return gophercloud.WaitFor(timeout, func() (bool, error) { + // Poll resource + _, err := Get(c, snapshot.ID).Extract() + + // Check for a 404 + if casted, ok := err.(*gophercloud.UnexpectedResponseCodeError); ok && casted.Actual == 404 { + return true, nil + } else if err != nil { + return false, err + } + + return false, nil + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go new file mode 100644 index 000000000..438349410 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go @@ -0,0 +1,75 @@ +package volumes + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/pagination" +) + +type CreateOpts struct { + os.CreateOpts +} + +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + if opts.Size < 75 || opts.Size > 1024 { + return nil, fmt.Errorf("Size field must be between 75 and 1024") + } + + return opts.CreateOpts.ToVolumeCreateMap() +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) CreateResult { + return CreateResult{os.Create(client, opts)} +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(client, id) +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + return GetResult{os.Get(client, id)} +} + +// List returns volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client, os.ListOpts{}) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + // OPTIONAL + Name string + // OPTIONAL + Description string +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + v := make(map[string]interface{}) + + if opts.Description != "" { + v["display_description"] = opts.Description + } + if opts.Name != "" { + v["display_name"] = opts.Name + } + + return map[string]interface{}{"volume": v}, nil +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts os.UpdateOptsBuilder) UpdateResult { + return UpdateResult{os.Update(client, id, opts)} +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go new file mode 100644 index 000000000..b6831f211 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go @@ -0,0 +1,107 @@ +package volumes + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockListResponse(t) + + count := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVolumes(page) + if err != nil { + t.Errorf("Failed to extract volumes: %v", err) + return false, err + } + + expected := []Volume{ + Volume{ + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + }, + Volume{ + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertEquals(t, 1, count) + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockGetResponse(t) + + v, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "vol-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockCreateResponse(t) + + n, err := Create(fake.ServiceClient(), CreateOpts{volumes.CreateOpts{Size: 75}}).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Size, 4) + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestSizeRange(t *testing.T) { + _, err := Create(fake.ServiceClient(), CreateOpts{volumes.CreateOpts{Size: 1}}).Extract() + if err == nil { + t.Fatalf("Expected error, got none") + } + + _, err = Create(fake.ServiceClient(), CreateOpts{volumes.CreateOpts{Size: 2000}}).Extract() + if err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockDeleteResponse(t) + + res := Delete(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockUpdateResponse(t) + + options := &UpdateOpts{Name: "vol-002"} + v, err := Update(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract() + th.AssertNoErr(t, err) + th.CheckEquals(t, "vol-002", v.Name) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go new file mode 100644 index 000000000..b2be25c53 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go @@ -0,0 +1,3 @@ +// Package volumes provides information and interaction with the volume +// API resource for the Rackspace Block Storage service. +package volumes diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go new file mode 100644 index 000000000..c7c2cc498 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go @@ -0,0 +1,66 @@ +package volumes + +import ( + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/pagination" + + "github.com/mitchellh/mapstructure" +) + +// Volume wraps an Openstack volume +type Volume os.Volume + +// CreateResult represents the result of a create operation +type CreateResult struct { + os.CreateResult +} + +// GetResult represents the result of a get operation +type GetResult struct { + os.GetResult +} + +// UpdateResult represents the result of an update operation +type UpdateResult struct { + os.UpdateResult +} + +func commonExtract(resp interface{}, err error) (*Volume, error) { + if err != nil { + return nil, err + } + + var respStruct struct { + Volume *Volume `json:"volume"` + } + + err = mapstructure.Decode(resp, &respStruct) + + return respStruct.Volume, err +} + +// Extract will get the Volume object out of the GetResult object. +func (r GetResult) Extract() (*Volume, error) { + return commonExtract(r.Body, r.Err) +} + +// Extract will get the Volume object out of the CreateResult object. +func (r CreateResult) Extract() (*Volume, error) { + return commonExtract(r.Body, r.Err) +} + +// Extract will get the Volume object out of the UpdateResult object. +func (r UpdateResult) Extract() (*Volume, error) { + return commonExtract(r.Body, r.Err) +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(page pagination.Page) ([]Volume, error) { + var response struct { + Volumes []Volume `json:"volumes"` + } + + err := mapstructure.Decode(page.(os.ListResult).Body, &response) + + return response.Volumes, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go new file mode 100644 index 000000000..c96b3e4a3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go @@ -0,0 +1,18 @@ +package volumetypes + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns all volume types. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client) +} + +// Get will retrieve the volume type with the provided ID. To extract the volume +// type from the result, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + return GetResult{os.Get(client, id)} +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go new file mode 100644 index 000000000..6e65c904b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go @@ -0,0 +1,64 @@ +package volumetypes + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockListResponse(t) + + count := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVolumeTypes(page) + if err != nil { + t.Errorf("Failed to extract volume types: %v", err) + return false, err + } + + expected := []VolumeType{ + VolumeType{ + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-type-001", + ExtraSpecs: map[string]interface{}{ + "capabilities": "gpu", + }, + }, + VolumeType{ + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-type-002", + ExtraSpecs: map[string]interface{}{}, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertEquals(t, 1, count) + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.MockGetResponse(t) + + vt, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, vt.ExtraSpecs, map[string]interface{}{"serverNumber": "2"}) + th.AssertEquals(t, vt.Name, "vol-type-001") + th.AssertEquals(t, vt.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go new file mode 100644 index 000000000..70122b77c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go @@ -0,0 +1,3 @@ +// Package volumetypes provides information and interaction with the volume type +// API resource for the Rackspace Block Storage service. +package volumetypes diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go new file mode 100644 index 000000000..39c8d6f7f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go @@ -0,0 +1,37 @@ +package volumetypes + +import ( + "github.com/mitchellh/mapstructure" + os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes" + "github.com/rackspace/gophercloud/pagination" +) + +type VolumeType os.VolumeType + +type GetResult struct { + os.GetResult +} + +// Extract will get the Volume Type struct out of the response. +func (r GetResult) Extract() (*VolumeType, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + VolumeType *VolumeType `json:"volume_type" mapstructure:"volume_type"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.VolumeType, err +} + +func ExtractVolumeTypes(page pagination.Page) ([]VolumeType, error) { + var response struct { + VolumeTypes []VolumeType `mapstructure:"volume_types"` + } + + err := mapstructure.Decode(page.(os.ListResult).Body, &response) + return response.VolumeTypes, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate.go new file mode 100644 index 000000000..5af7e0778 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate.go @@ -0,0 +1,18 @@ +package base + +import ( + "github.com/rackspace/gophercloud" + + os "github.com/rackspace/gophercloud/openstack/cdn/v1/base" +) + +// Get retrieves the home document, allowing the user to discover the +// entire API. +func Get(c *gophercloud.ServiceClient) os.GetResult { + return os.Get(c) +} + +// Ping retrieves a ping to the server. +func Ping(c *gophercloud.ServiceClient) os.PingResult { + return os.Ping(c) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go new file mode 100644 index 000000000..731fc6dd0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go @@ -0,0 +1,44 @@ +package base + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/cdn/v1/base" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestGetHomeDocument(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetSuccessfully(t) + + actual, err := Get(fake.ServiceClient()).Extract() + th.CheckNoErr(t, err) + + expected := os.HomeDocument{ + "rel/cdn": map[string]interface{}{ + "href-template": "services{?marker,limit}", + "href-vars": map[string]interface{}{ + "marker": "param/marker", + "limit": "param/limit", + }, + "hints": map[string]interface{}{ + "allow": []string{"GET"}, + "formats": map[string]interface{}{ + "application/json": map[string]interface{}{}, + }, + }, + }, + } + th.CheckDeepEquals(t, expected, *actual) +} + +func TestPing(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandlePingSuccessfully(t) + + err := Ping(fake.ServiceClient()).ExtractErr() + th.CheckNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/doc.go new file mode 100644 index 000000000..5582306a8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/doc.go @@ -0,0 +1,4 @@ +// Package base provides information and interaction with the base API +// resource in the Rackspace CDN service. This API resource allows for +// retrieving the Home Document and pinging the root URL. +package base diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate.go new file mode 100644 index 000000000..7152fa23a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate.go @@ -0,0 +1,18 @@ +package flavors + +import ( + "github.com/rackspace/gophercloud" + + os "github.com/rackspace/gophercloud/openstack/cdn/v1/flavors" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a single page of CDN flavors. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return os.List(c) +} + +// Get retrieves a specific flavor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate_test.go new file mode 100644 index 000000000..d6d299df8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/delegate_test.go @@ -0,0 +1,90 @@ +package flavors + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/cdn/v1/flavors" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleListCDNFlavorsSuccessfully(t) + + count := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractFlavors(page) + if err != nil { + t.Errorf("Failed to extract flavors: %v", err) + return false, err + } + + expected := []os.Flavor{ + os.Flavor{ + ID: "europe", + Providers: []os.Provider{ + os.Provider{ + Provider: "Fastly", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.fastly.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/europe", + Rel: "self", + }, + }, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleGetCDNFlavorSuccessfully(t) + + expected := &os.Flavor{ + ID: "asia", + Providers: []os.Provider{ + os.Provider{ + Provider: "ChinaCache", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.chinacache.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/asia", + Rel: "self", + }, + }, + } + + actual, err := Get(fake.ServiceClient(), "asia").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/doc.go new file mode 100644 index 000000000..4ad966eac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/flavors/doc.go @@ -0,0 +1,6 @@ +// Package flavors provides information and interaction with the flavors API +// resource in the Rackspace CDN service. This API resource allows for +// listing flavors and retrieving a specific flavor. +// +// A flavor is a mapping configuration to a CDN provider. +package flavors diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate.go new file mode 100644 index 000000000..07c93a8dc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate.go @@ -0,0 +1,13 @@ +package serviceassets + +import ( + "github.com/rackspace/gophercloud" + + os "github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets" +) + +// Delete accepts a unique ID and deletes the CDN service asset associated with +// it. +func Delete(c *gophercloud.ServiceClient, id string, opts os.DeleteOptsBuilder) os.DeleteResult { + return os.Delete(c, id, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate_test.go new file mode 100644 index 000000000..328e1682d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/delegate_test.go @@ -0,0 +1,19 @@ +package serviceassets + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleDeleteCDNAssetSuccessfully(t) + + err := Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", nil).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/doc.go new file mode 100644 index 000000000..46b3d50a8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/serviceassets/doc.go @@ -0,0 +1,7 @@ +// Package serviceassets provides information and interaction with the +// serviceassets API resource in the Rackspace CDN service. This API resource +// allows for deleting cached assets. +// +// A service distributes assets across the network. Service assets let you +// interrogate properties about these assets and perform certain actions on them. +package serviceassets diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate.go new file mode 100644 index 000000000..e3f145997 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate.go @@ -0,0 +1,37 @@ +package services + +import ( + "github.com/rackspace/gophercloud" + + os "github.com/rackspace/gophercloud/openstack/cdn/v1/services" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// CDN services. It accepts a ListOpts struct, which allows for pagination via +// marker and limit. +func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, opts) +} + +// Create accepts a CreateOpts struct and creates a new CDN service using the +// values provided. +func Create(c *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(c, opts) +} + +// Get retrieves a specific service based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(c, id) +} + +// Update accepts a UpdateOpts struct and updates an existing CDN service using +// the values provided. +func Update(c *gophercloud.ServiceClient, id string, patches []os.Patch) os.UpdateResult { + return os.Update(c, id, patches) +} + +// Delete accepts a unique ID and deletes the CDN service associated with it. +func Delete(c *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate_test.go new file mode 100644 index 000000000..6c48365e4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/delegate_test.go @@ -0,0 +1,359 @@ +package services + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/cdn/v1/services" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleListCDNServiceSuccessfully(t) + + count := 0 + + err := List(fake.ServiceClient(), &os.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractServices(page) + if err != nil { + t.Errorf("Failed to extract services: %v", err) + return false, err + } + + expected := []os.Service{ + os.Service{ + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Name: "mywebsite.com", + Domains: []os.Domain{ + os.Domain{ + Domain: "www.mywebsite.com", + }, + }, + Origins: []os.Origin{ + os.Origin{ + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Caching: []os.CacheRule{ + os.CacheRule{ + Name: "default", + TTL: 3600, + }, + os.CacheRule{ + Name: "home", + TTL: 17200, + Rules: []os.TTLRule{ + os.TTLRule{ + Name: "index", + RequestURL: "/index.htm", + }, + }, + }, + os.CacheRule{ + Name: "images", + TTL: 12800, + Rules: []os.TTLRule{ + os.TTLRule{ + Name: "images", + RequestURL: "*.png", + }, + }, + }, + }, + Restrictions: []os.Restriction{ + os.Restriction{ + Name: "website only", + Rules: []os.RestrictionRule{ + os.RestrictionRule{ + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + FlavorID: "asia", + Status: "deployed", + Errors: []os.Error{}, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Rel: "self", + }, + gophercloud.Link{ + Href: "mywebsite.com.cdn123.poppycdn.net", + Rel: "access_url", + }, + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/asia", + Rel: "flavor", + }, + }, + }, + os.Service{ + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + Name: "myothersite.com", + Domains: []os.Domain{ + os.Domain{ + Domain: "www.myothersite.com", + }, + }, + Origins: []os.Origin{ + os.Origin{ + Origin: "44.33.22.11", + Port: 80, + SSL: false, + }, + os.Origin{ + Origin: "77.66.55.44", + Port: 80, + SSL: false, + Rules: []os.OriginRule{ + os.OriginRule{ + Name: "videos", + RequestURL: "^/videos/*.m3u", + }, + }, + }, + }, + Caching: []os.CacheRule{ + os.CacheRule{ + Name: "default", + TTL: 3600, + }, + }, + Restrictions: []os.Restriction{}, + FlavorID: "europe", + Status: "deployed", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + Rel: "self", + }, + gophercloud.Link{ + Href: "myothersite.com.poppycdn.net", + Rel: "access_url", + }, + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/europe", + Rel: "flavor", + }, + }, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleCreateCDNServiceSuccessfully(t) + + createOpts := os.CreateOpts{ + Name: "mywebsite.com", + Domains: []os.Domain{ + os.Domain{ + Domain: "www.mywebsite.com", + }, + os.Domain{ + Domain: "blog.mywebsite.com", + }, + }, + Origins: []os.Origin{ + os.Origin{ + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Restrictions: []os.Restriction{ + os.Restriction{ + Name: "website only", + Rules: []os.RestrictionRule{ + os.RestrictionRule{ + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + Caching: []os.CacheRule{ + os.CacheRule{ + Name: "default", + TTL: 3600, + }, + }, + FlavorID: "cdn", + } + + expected := "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" + actual, err := Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, expected, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleGetCDNServiceSuccessfully(t) + + expected := &os.Service{ + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Name: "mywebsite.com", + Domains: []os.Domain{ + os.Domain{ + Domain: "www.mywebsite.com", + Protocol: "http", + }, + }, + Origins: []os.Origin{ + os.Origin{ + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Caching: []os.CacheRule{ + os.CacheRule{ + Name: "default", + TTL: 3600, + }, + os.CacheRule{ + Name: "home", + TTL: 17200, + Rules: []os.TTLRule{ + os.TTLRule{ + Name: "index", + RequestURL: "/index.htm", + }, + }, + }, + os.CacheRule{ + Name: "images", + TTL: 12800, + Rules: []os.TTLRule{ + os.TTLRule{ + Name: "images", + RequestURL: "*.png", + }, + }, + }, + }, + Restrictions: []os.Restriction{ + os.Restriction{ + Name: "website only", + Rules: []os.RestrictionRule{ + os.RestrictionRule{ + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + FlavorID: "cdn", + Status: "deployed", + Errors: []os.Error{}, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://global.cdn.api.rackspacecloud.com/v1.0/110011/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Rel: "self", + }, + gophercloud.Link{ + Href: "blog.mywebsite.com.cdn1.raxcdn.com", + Rel: "access_url", + }, + gophercloud.Link{ + Href: "https://global.cdn.api.rackspacecloud.com/v1.0/110011/flavors/cdn", + Rel: "flavor", + }, + }, + } + + actual, err := Get(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestSuccessfulUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleUpdateCDNServiceSuccessfully(t) + + expected := "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" + ops := []os.Patch{ + // Append a single Domain + os.Append{Value: os.Domain{Domain: "appended.mocksite4.com"}}, + // Insert a single Domain + os.Insertion{ + Index: 4, + Value: os.Domain{Domain: "inserted.mocksite4.com"}, + }, + // Bulk addition + os.Append{ + Value: os.DomainList{ + os.Domain{Domain: "bulkadded1.mocksite4.com"}, + os.Domain{Domain: "bulkadded2.mocksite4.com"}, + }, + }, + // Replace a single Origin + os.Replacement{ + Index: 2, + Value: os.Origin{Origin: "44.33.22.11", Port: 80, SSL: false}, + }, + // Bulk replace Origins + os.Replacement{ + Index: 0, // Ignored + Value: os.OriginList{ + os.Origin{Origin: "44.33.22.11", Port: 80, SSL: false}, + os.Origin{Origin: "55.44.33.22", Port: 443, SSL: true}, + }, + }, + // Remove a single CacheRule + os.Removal{ + Index: 8, + Path: os.PathCaching, + }, + // Bulk removal + os.Removal{ + All: true, + Path: os.PathCaching, + }, + // Service name replacement + os.NameReplacement{ + NewName: "differentServiceName", + }, + } + + actual, err := Update(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", ops).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, expected, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleDeleteCDNServiceSuccessfully(t) + + err := Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/doc.go new file mode 100644 index 000000000..ee6e2a54f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/cdn/v1/services/doc.go @@ -0,0 +1,7 @@ +// Package services provides information and interaction with the services API +// resource in the Rackspace CDN service. This API resource allows for +// listing, creating, updating, retrieving, and deleting services. +// +// A service represents an application that has its content cached to the edge +// nodes. +package services diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/client.go b/vendor/github.com/rackspace/gophercloud/rackspace/client.go new file mode 100644 index 000000000..a8f413ed9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/client.go @@ -0,0 +1,224 @@ +package rackspace + +import ( + "fmt" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/utils" + tokens2 "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens" +) + +const ( + // RackspaceUSIdentity is an identity endpoint located in the United States. + RackspaceUSIdentity = "https://identity.api.rackspacecloud.com/v2.0/" + + // RackspaceUKIdentity is an identity endpoint located in the UK. + RackspaceUKIdentity = "https://lon.identity.api.rackspacecloud.com/v2.0/" +) + +const ( + v20 = "v2.0" +) + +// NewClient creates a client that's prepared to communicate with the Rackspace API, but is not +// yet authenticated. Most users will probably prefer using the AuthenticatedClient function +// instead. +// +// Provide the base URL of the identity endpoint you wish to authenticate against as "endpoint". +// Often, this will be either RackspaceUSIdentity or RackspaceUKIdentity. +func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { + if endpoint == "" { + return os.NewClient(RackspaceUSIdentity) + } + return os.NewClient(endpoint) +} + +// AuthenticatedClient logs in to Rackspace with the provided credentials and constructs a +// ProviderClient that's ready to operate. +// +// If the provided AuthOptions does not specify an explicit IdentityEndpoint, it will default to +// the canonical, production Rackspace US identity endpoint. +func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + client, err := NewClient(options.IdentityEndpoint) + if err != nil { + return nil, err + } + + err = Authenticate(client, options) + if err != nil { + return nil, err + } + return client, nil +} + +// Authenticate or re-authenticate against the most recent identity service supported at the +// provided endpoint. +func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + versions := []*utils.Version{ + &utils.Version{ID: v20, Priority: 20, Suffix: "/v2.0/"}, + } + + chosen, endpoint, err := utils.ChooseVersion(client, versions) + if err != nil { + return err + } + + switch chosen.ID { + case v20: + return v2auth(client, endpoint, options) + default: + // The switch statement must be out of date from the versions list. + return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + } +} + +// AuthenticateV2 explicitly authenticates with v2 of the identity service. +func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + return v2auth(client, "", options) +} + +func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error { + v2Client := NewIdentityV2(client) + if endpoint != "" { + v2Client.Endpoint = endpoint + } + + result := tokens2.Create(v2Client, tokens2.WrapOptions(options)) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if options.AllowReauth { + client.ReauthFunc = func() error { + return AuthenticateV2(client, options) + } + } + client.TokenID = token.ID + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return os.V2EndpointURL(catalog, opts) + } + + return nil +} + +// NewIdentityV2 creates a ServiceClient that may be used to access the v2 identity service. +func NewIdentityV2(client *gophercloud.ProviderClient) *gophercloud.ServiceClient { + v2Endpoint := client.IdentityBase + "v2.0/" + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: v2Endpoint, + } +} + +// NewComputeV2 creates a ServiceClient that may be used to access the v2 compute service. +func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("compute") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: url, + }, nil +} + +// NewObjectCDNV1 creates a ServiceClient that may be used with the Rackspace v1 CDN. +func NewObjectCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("rax:object-cdn") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewObjectStorageV1 creates a ServiceClient that may be used with the Rackspace v1 object storage package. +func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return os.NewObjectStorageV1(client, eo) +} + +// NewBlockStorageV1 creates a ServiceClient that can be used to access the +// Rackspace Cloud Block Storage v1 API. +func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("volume") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewLBV1 creates a ServiceClient that can be used to access the Rackspace +// Cloud Load Balancer v1 API. +func NewLBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("rax:load-balancer") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewNetworkV2 creates a ServiceClient that can be used to access the Rackspace +// Networking v2 API. +func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("network") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewCDNV1 creates a ServiceClient that may be used to access the Rackspace v1 +// CDN service. +func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("rax:cdn") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 orchestration service. +func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("orchestration") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewRackConnectV3 creates a ServiceClient that may be used to access the v3 RackConnect service. +func NewRackConnectV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("rax:rackconnect") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} + +// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. +func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("rax:database") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/client_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/client_test.go new file mode 100644 index 000000000..73b1c886f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/client_test.go @@ -0,0 +1,38 @@ +package rackspace + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestAuthenticatedClientV2(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/tokens", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "access": { + "token": { + "id": "01234567890", + "expires": "2014-10-01T10:00:00.000000Z" + }, + "serviceCatalog": [] + } + } + `) + }) + + options := gophercloud.AuthOptions{ + Username: "me", + APIKey: "09876543210", + IdentityEndpoint: th.Endpoint() + "v2.0/", + } + client, err := AuthenticatedClient(options) + th.AssertNoErr(t, err) + th.CheckEquals(t, "01234567890", client.TokenID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go new file mode 100644 index 000000000..2580459f0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go @@ -0,0 +1,12 @@ +package bootfromvolume + +import ( + "github.com/rackspace/gophercloud" + osBFV "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + osServers "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// Create requests the creation of a server from the given block device mapping. +func Create(client *gophercloud.ServiceClient, opts osServers.CreateOptsBuilder) osServers.CreateResult { + return osBFV.Create(client, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go new file mode 100644 index 000000000..571a1bed2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go @@ -0,0 +1,54 @@ +package bootfromvolume + +import ( + "testing" + + osBFV "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCreateOpts(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + ext := osBFV.CreateOptsExt{ + CreateOptsBuilder: base, + BlockDevice: []osBFV.BlockDevice{ + osBFV.BlockDevice{ + UUID: "123456", + SourceType: osBFV.Image, + DestinationType: "volume", + VolumeSize: 10, + }, + }, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "flavorName": "", + "imageName": "", + "block_device_mapping_v2":[ + { + "uuid":"123456", + "source_type":"image", + "destination_type":"volume", + "boot_index": "0", + "delete_on_termination": "false", + "volume_size": "10" + } + ] + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go new file mode 100644 index 000000000..081ea478c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go @@ -0,0 +1,43 @@ +package flavors + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" + "github.com/rackspace/gophercloud/pagination" +) + +// ListOpts helps control the results returned by the List() function. For example, a flavor with a +// minDisk field of 10 will not be returned if you specify MinDisk set to 20. +type ListOpts struct { + + // MinDisk and MinRAM, if provided, elide flavors that do not meet your criteria. + MinDisk int `q:"minDisk"` + MinRAM int `q:"minRam"` + + // Marker specifies the ID of the last flavor in the previous page. + Marker string `q:"marker"` + + // Limit instructs List to refrain from sending excessively large lists of flavors. + Limit int `q:"limit"` +} + +// ToFlavorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFlavorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// ListDetail enumerates the server images available to your account. +func ListDetail(client *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.ListDetail(client, opts) +} + +// Get returns details about a single flavor, identity by ID. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go new file mode 100644 index 000000000..204081dd1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go @@ -0,0 +1,62 @@ +package flavors + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListFlavors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ListOutput) + case "performance1-2": + fmt.Fprintf(w, `{ "flavors": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) + + count := 0 + err := ListDetail(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + actual, err := ExtractFlavors(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedFlavorSlice, actual) + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGetFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/performance1-1", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) + + actual, err := Get(client.ServiceClient(), "performance1-1").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &Performance1Flavor, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go new file mode 100644 index 000000000..278229ab9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go @@ -0,0 +1,3 @@ +// Package flavors provides information and interaction with the flavor +// API resource for the Rackspace Cloud Servers service. +package flavors diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go new file mode 100644 index 000000000..957dccfd7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go @@ -0,0 +1,137 @@ +// +build fixtures + +package flavors + +// ListOutput is a sample response of a flavor List request. +const ListOutput = ` +{ + "flavors": [ + { + "OS-FLV-EXT-DATA:ephemeral": 0, + "OS-FLV-WITH-EXT-SPECS:extra_specs": { + "class": "performance1", + "disk_io_index": "40", + "number_of_data_disks": "0", + "policy_class": "performance_flavor", + "resize_policy_class": "performance_flavor" + }, + "disk": 20, + "id": "performance1-1", + "links": [ + { + "href": "https://iad.servers.api.rackspacecloud.com/v2/864477/flavors/performance1-1", + "rel": "self" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/864477/flavors/performance1-1", + "rel": "bookmark" + } + ], + "name": "1 GB Performance", + "ram": 1024, + "rxtx_factor": 200, + "swap": "", + "vcpus": 1 + }, + { + "OS-FLV-EXT-DATA:ephemeral": 20, + "OS-FLV-WITH-EXT-SPECS:extra_specs": { + "class": "performance1", + "disk_io_index": "40", + "number_of_data_disks": "1", + "policy_class": "performance_flavor", + "resize_policy_class": "performance_flavor" + }, + "disk": 40, + "id": "performance1-2", + "links": [ + { + "href": "https://iad.servers.api.rackspacecloud.com/v2/864477/flavors/performance1-2", + "rel": "self" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/864477/flavors/performance1-2", + "rel": "bookmark" + } + ], + "name": "2 GB Performance", + "ram": 2048, + "rxtx_factor": 400, + "swap": "", + "vcpus": 2 + } + ] +}` + +// GetOutput is a sample response from a flavor Get request. Its contents correspond to the +// Performance1Flavor struct. +const GetOutput = ` +{ + "flavor": { + "OS-FLV-EXT-DATA:ephemeral": 0, + "OS-FLV-WITH-EXT-SPECS:extra_specs": { + "class": "performance1", + "disk_io_index": "40", + "number_of_data_disks": "0", + "policy_class": "performance_flavor", + "resize_policy_class": "performance_flavor" + }, + "disk": 20, + "id": "performance1-1", + "links": [ + { + "href": "https://iad.servers.api.rackspacecloud.com/v2/864477/flavors/performance1-1", + "rel": "self" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/864477/flavors/performance1-1", + "rel": "bookmark" + } + ], + "name": "1 GB Performance", + "ram": 1024, + "rxtx_factor": 200, + "swap": "", + "vcpus": 1 + } +} +` + +// Performance1Flavor is the expected result of parsing GetOutput, or the first element of +// ListOutput. +var Performance1Flavor = Flavor{ + ID: "performance1-1", + Disk: 20, + RAM: 1024, + Name: "1 GB Performance", + RxTxFactor: 200.0, + Swap: 0, + VCPUs: 1, + ExtraSpecs: ExtraSpecs{ + NumDataDisks: 0, + Class: "performance1", + DiskIOIndex: 0, + PolicyClass: "performance_flavor", + }, +} + +// Performance2Flavor is the second result expected from parsing ListOutput. +var Performance2Flavor = Flavor{ + ID: "performance1-2", + Disk: 40, + RAM: 2048, + Name: "2 GB Performance", + RxTxFactor: 400.0, + Swap: 0, + VCPUs: 2, + ExtraSpecs: ExtraSpecs{ + NumDataDisks: 0, + Class: "performance1", + DiskIOIndex: 0, + PolicyClass: "performance_flavor", + }, +} + +// ExpectedFlavorSlice is the slice of Flavor structs that are expected to be parsed from +// ListOutput. +var ExpectedFlavorSlice = []Flavor{Performance1Flavor, Performance2Flavor} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/results.go new file mode 100644 index 000000000..af444a766 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/results.go @@ -0,0 +1,104 @@ +package flavors + +import ( + "reflect" + + "github.com/rackspace/gophercloud" + "github.com/mitchellh/mapstructure" + os "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtraSpecs provide additional information about the flavor. +type ExtraSpecs struct { + // The number of data disks + NumDataDisks int `mapstructure:"number_of_data_disks"` + // The flavor class + Class string `mapstructure:"class"` + // Relative measure of disk I/O performance from 0-99, where higher is faster + DiskIOIndex int `mapstructure:"disk_io_index"` + PolicyClass string `mapstructure:"policy_class"` +} + +// Flavor records represent (virtual) hardware configurations for server resources in a region. +type Flavor struct { + // The Id field contains the flavor's unique identifier. + // For example, this identifier will be useful when specifying which hardware configuration to use for a new server instance. + ID string `mapstructure:"id"` + + // The Disk and RA< fields provide a measure of storage space offered by the flavor, in GB and MB, respectively. + Disk int `mapstructure:"disk"` + RAM int `mapstructure:"ram"` + + // The Name field provides a human-readable moniker for the flavor. + Name string `mapstructure:"name"` + + RxTxFactor float64 `mapstructure:"rxtx_factor"` + + // Swap indicates how much space is reserved for swap. + // If not provided, this field will be set to 0. + Swap int `mapstructure:"swap"` + + // VCPUs indicates how many (virtual) CPUs are available for this flavor. + VCPUs int `mapstructure:"vcpus"` + + // ExtraSpecs provides extra information about the flavor + ExtraSpecs ExtraSpecs `mapstructure:"OS-FLV-WITH-EXT-SPECS:extra_specs"` +} + +// GetResult temporarily holds the response from a Get call. +type GetResult struct { + gophercloud.Result +} + +// Extract provides access to the individual Flavor returned by the Get function. +func (gr GetResult) Extract() (*Flavor, error) { + if gr.Err != nil { + return nil, gr.Err + } + + var result struct { + Flavor Flavor `mapstructure:"flavor"` + } + + cfg := &mapstructure.DecoderConfig{ + DecodeHook: defaulter, + Result: &result, + } + decoder, err := mapstructure.NewDecoder(cfg) + if err != nil { + return nil, err + } + err = decoder.Decode(gr.Body) + return &result.Flavor, err +} + +func defaulter(from, to reflect.Kind, v interface{}) (interface{}, error) { + if (from == reflect.String) && (to == reflect.Int) { + return 0, nil + } + return v, nil +} + +// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation. +func ExtractFlavors(page pagination.Page) ([]Flavor, error) { + casted := page.(os.FlavorPage).Body + var container struct { + Flavors []Flavor `mapstructure:"flavors"` + } + + cfg := &mapstructure.DecoderConfig{ + DecodeHook: defaulter, + Result: &container, + } + decoder, err := mapstructure.NewDecoder(cfg) + if err != nil { + return container.Flavors, err + } + err = decoder.Decode(casted) + if err != nil { + return container.Flavors, err + } + + return container.Flavors, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/urls.go new file mode 100644 index 000000000..f4e2c3dac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/urls.go @@ -0,0 +1,9 @@ +package flavors + +import ( + "github.com/rackspace/gophercloud" +) + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go new file mode 100644 index 000000000..18e1f315a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go @@ -0,0 +1,22 @@ +package images + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/compute/v2/images" + "github.com/rackspace/gophercloud/pagination" +) + +// ListDetail enumerates the available server images. +func ListDetail(client *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.ListDetail(client, opts) +} + +// Get acquires additional detail about a specific image by ID. +func Get(client *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(client, id) +} + +// ExtractImages interprets a page as a collection of server images. +func ExtractImages(page pagination.Page) ([]os.Image, error) { + return os.ExtractImages(page) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go new file mode 100644 index 000000000..db0a6e341 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go @@ -0,0 +1,62 @@ +package images + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListImageDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ListOutput) + case "e19a734c-c7e6-443a-830c-242209c4d65d": + fmt.Fprintf(w, `{ "images": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) + + count := 0 + err := ListDetail(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractImages(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedImageSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGetImageDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/e19a734c-c7e6-443a-830c-242209c4d65d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) + + actual, err := Get(client.ServiceClient(), "e19a734c-c7e6-443a-830c-242209c4d65d").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &UbuntuImage, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go new file mode 100644 index 000000000..cfae80671 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go @@ -0,0 +1,3 @@ +// Package images provides information and interaction with the image +// API resource for the Rackspace Cloud Servers service. +package images diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go new file mode 100644 index 000000000..ccfbdc6a1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go @@ -0,0 +1,200 @@ +// +build fixtures + +package images + +import ( + os "github.com/rackspace/gophercloud/openstack/compute/v2/images" +) + +// ListOutput is an example response from an /images/detail request. +const ListOutput = ` +{ + "images": [ + { + "OS-DCF:diskConfig": "MANUAL", + "OS-EXT-IMG-SIZE:size": 1.017415075e+09, + "created": "2014-10-01T15:49:02Z", + "id": "30aa010e-080e-4d4b-a7f9-09fc55b07d69", + "links": [ + { + "href": "https://iad.servers.api.rackspacecloud.com/v2/111222/images/30aa010e-080e-4d4b-a7f9-09fc55b07d69", + "rel": "self" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/111222/images/30aa010e-080e-4d4b-a7f9-09fc55b07d69", + "rel": "bookmark" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/111222/images/30aa010e-080e-4d4b-a7f9-09fc55b07d69", + "rel": "alternate", + "type": "application/vnd.openstack.image" + } + ], + "metadata": { + "auto_disk_config": "disabled", + "cache_in_nova": "True", + "com.rackspace__1__build_core": "1", + "com.rackspace__1__build_managed": "1", + "com.rackspace__1__build_rackconnect": "1", + "com.rackspace__1__options": "0", + "com.rackspace__1__platform_target": "PublicCloud", + "com.rackspace__1__release_build_date": "2014-10-01_15-46-08", + "com.rackspace__1__release_id": "100", + "com.rackspace__1__release_version": "10", + "com.rackspace__1__source": "kickstart", + "com.rackspace__1__visible_core": "1", + "com.rackspace__1__visible_managed": "0", + "com.rackspace__1__visible_rackconnect": "0", + "image_type": "base", + "org.openstack__1__architecture": "x64", + "org.openstack__1__os_distro": "org.archlinux", + "org.openstack__1__os_version": "2014.8", + "os_distro": "arch", + "os_type": "linux", + "vm_mode": "hvm" + }, + "minDisk": 20, + "minRam": 512, + "name": "Arch 2014.10 (PVHVM)", + "progress": 100, + "status": "ACTIVE", + "updated": "2014-10-01T19:37:58Z" + }, + { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-IMG-SIZE:size": 1.060306463e+09, + "created": "2014-10-01T12:58:11Z", + "id": "e19a734c-c7e6-443a-830c-242209c4d65d", + "links": [ + { + "href": "https://iad.servers.api.rackspacecloud.com/v2/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "self" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "bookmark" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "alternate", + "type": "application/vnd.openstack.image" + } + ], + "metadata": { + "auto_disk_config": "True", + "cache_in_nova": "True", + "com.rackspace__1__build_core": "1", + "com.rackspace__1__build_managed": "1", + "com.rackspace__1__build_rackconnect": "1", + "com.rackspace__1__options": "0", + "com.rackspace__1__platform_target": "PublicCloud", + "com.rackspace__1__release_build_date": "2014-10-01_12-31-03", + "com.rackspace__1__release_id": "1007", + "com.rackspace__1__release_version": "6", + "com.rackspace__1__source": "kickstart", + "com.rackspace__1__visible_core": "1", + "com.rackspace__1__visible_managed": "1", + "com.rackspace__1__visible_rackconnect": "1", + "image_type": "base", + "org.openstack__1__architecture": "x64", + "org.openstack__1__os_distro": "com.ubuntu", + "org.openstack__1__os_version": "14.04", + "os_distro": "ubuntu", + "os_type": "linux", + "vm_mode": "xen" + }, + "minDisk": 20, + "minRam": 512, + "name": "Ubuntu 14.04 LTS (Trusty Tahr)", + "progress": 100, + "status": "ACTIVE", + "updated": "2014-10-01T15:51:44Z" + } + ] +} +` + +// GetOutput is an example response from an /images request. +const GetOutput = ` +{ + "image": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-IMG-SIZE:size": 1060306463, + "created": "2014-10-01T12:58:11Z", + "id": "e19a734c-c7e6-443a-830c-242209c4d65d", + "links": [ + { + "href": "https://iad.servers.api.rackspacecloud.com/v2/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "self" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "bookmark" + }, + { + "href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "alternate", + "type": "application/vnd.openstack.image" + } + ], + "metadata": { + "auto_disk_config": "True", + "cache_in_nova": "True", + "com.rackspace__1__build_core": "1", + "com.rackspace__1__build_managed": "1", + "com.rackspace__1__build_rackconnect": "1", + "com.rackspace__1__options": "0", + "com.rackspace__1__platform_target": "PublicCloud", + "com.rackspace__1__release_build_date": "2014-10-01_12-31-03", + "com.rackspace__1__release_id": "1007", + "com.rackspace__1__release_version": "6", + "com.rackspace__1__source": "kickstart", + "com.rackspace__1__visible_core": "1", + "com.rackspace__1__visible_managed": "1", + "com.rackspace__1__visible_rackconnect": "1", + "image_type": "base", + "org.openstack__1__architecture": "x64", + "org.openstack__1__os_distro": "com.ubuntu", + "org.openstack__1__os_version": "14.04", + "os_distro": "ubuntu", + "os_type": "linux", + "vm_mode": "xen" + }, + "minDisk": 20, + "minRam": 512, + "name": "Ubuntu 14.04 LTS (Trusty Tahr)", + "progress": 100, + "status": "ACTIVE", + "updated": "2014-10-01T15:51:44Z" + } +} +` + +// ArchImage is the first Image structure that should be parsed from ListOutput. +var ArchImage = os.Image{ + ID: "30aa010e-080e-4d4b-a7f9-09fc55b07d69", + Name: "Arch 2014.10 (PVHVM)", + Created: "2014-10-01T15:49:02Z", + Updated: "2014-10-01T19:37:58Z", + MinDisk: 20, + MinRAM: 512, + Progress: 100, + Status: "ACTIVE", +} + +// UbuntuImage is the second Image structure that should be parsed from ListOutput and +// the only image that should be extracted from GetOutput. +var UbuntuImage = os.Image{ + ID: "e19a734c-c7e6-443a-830c-242209c4d65d", + Name: "Ubuntu 14.04 LTS (Trusty Tahr)", + Created: "2014-10-01T12:58:11Z", + Updated: "2014-10-01T15:51:44Z", + MinDisk: 20, + MinRAM: 512, + Progress: 100, + Status: "ACTIVE", +} + +// ExpectedImageSlice is the collection of images that should be parsed from ListOutput, +// in order. +var ExpectedImageSlice = []os.Image{ArchImage, UbuntuImage} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go new file mode 100644 index 000000000..3e53525dc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go @@ -0,0 +1,33 @@ +package keypairs + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of KeyPairs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client) +} + +// Create requests the creation of a new keypair on the server, or to import a pre-existing +// keypair. +func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(client, opts) +} + +// Get returns public data about a previously uploaded KeyPair. +func Get(client *gophercloud.ServiceClient, name string) os.GetResult { + return os.Get(client, name) +} + +// Delete requests the deletion of a previous stored KeyPair from the server. +func Delete(client *gophercloud.ServiceClient, name string) os.DeleteResult { + return os.Delete(client, name) +} + +// ExtractKeyPairs interprets a page of results as a slice of KeyPairs. +func ExtractKeyPairs(page pagination.Page) ([]os.KeyPair, error) { + return os.ExtractKeyPairs(page) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go new file mode 100644 index 000000000..62e5df950 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go @@ -0,0 +1,72 @@ +package keypairs + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractKeyPairs(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, os.ExpectedKeyPairSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreateSuccessfully(t) + + actual, err := Create(client.ServiceClient(), os.CreateOpts{ + Name: "createdkey", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &os.CreatedKeyPair, actual) +} + +func TestImport(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleImportSuccessfully(t) + + actual, err := Create(client.ServiceClient(), os.CreateOpts{ + Name: "importedkey", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &os.ImportedKeyPair, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "firstkey").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &os.FirstKeyPair, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDeleteSuccessfully(t) + + err := Delete(client.ServiceClient(), "deletedkey").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go new file mode 100644 index 000000000..31713752e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go @@ -0,0 +1,3 @@ +// Package keypairs provides information and interaction with the keypair +// API resource for the Rackspace Cloud Servers service. +package keypairs diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go new file mode 100644 index 000000000..8e5c77382 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go @@ -0,0 +1,3 @@ +// Package networks provides information and interaction with the network +// API resource for the Rackspace Cloud Servers service. +package networks diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go new file mode 100644 index 000000000..cebbffd36 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go @@ -0,0 +1,89 @@ +package networks + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(c, listURL(c), createPage) +} + +// Get retrieves a specific network based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // REQUIRED. See Network object for more info. + CIDR string + // REQUIRED. See Network object for more info. + Label string +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + n := make(map[string]interface{}) + + if opts.CIDR == "" { + return nil, errors.New("Required field CIDR not set.") + } + if opts.Label == "" { + return nil, errors.New("Required field Label not set.") + } + + n["label"] = opts.Label + n["cidr"] = opts.CIDR + return map[string]interface{}{"network": n}, nil +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. This operation does not actually require a request body, i.e. the +// CreateOpts struct argument can be empty. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// network. An admin user, however, has the option of specifying another tenant +// ID in the CreateOpts struct. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToNetworkCreateMap() + if err != nil { + res.Err = err + return res + } + + // Send request to API + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return res +} + +// Delete accepts a unique ID and deletes the network associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, networkID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go new file mode 100644 index 000000000..6f44c1cab --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go @@ -0,0 +1,156 @@ +package networks + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/os-networksv2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "networks": [ + { + "label": "test-network-1", + "cidr": "192.168.100.0/24", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + }, + { + "label": "test-network-2", + "cidr": "192.30.250.00/18", + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324" + } + ] +} + `) + }) + + client := fake.ServiceClient() + count := 0 + + err := List(client).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNetworks(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + expected := []Network{ + Network{ + Label: "test-network-1", + CIDR: "192.168.100.0/24", + ID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + }, + Network{ + Label: "test-network-2", + CIDR: "192.30.250.00/18", + ID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/os-networksv2/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "label": "test-network-1", + "cidr": "192.168.100.0/24", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) + + n, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.CIDR, "192.168.100.0/24") + th.AssertEquals(t, n.Label, "test-network-1") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/os-networksv2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "label": "test-network-1", + "cidr": "192.168.100.0/24" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "network": { + "label": "test-network-1", + "cidr": "192.168.100.0/24", + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + } +} + `) + }) + + options := CreateOpts{Label: "test-network-1", CIDR: "192.168.100.0/24"} + n, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Label, "test-network-1") + th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/os-networksv2/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go new file mode 100644 index 000000000..eb6a76c00 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go @@ -0,0 +1,81 @@ +package networks + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a network resource. +func (r commonResult) Extract() (*Network, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *Network `json:"network"` + } + + err := mapstructure.Decode(r.Body, &res) + + return res.Network, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Network represents, well, a network. +type Network struct { + // UUID for the network + ID string `mapstructure:"id" json:"id"` + + // Human-readable name for the network. Might not be unique. + Label string `mapstructure:"label" json:"label"` + + // Classless Inter-Domain Routing + CIDR string `mapstructure:"cidr" json:"cidr"` +} + +// NetworkPage is the page returned by a pager when traversing over a +// collection of networks. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if the NetworkPage contains no Networks. +func (r NetworkPage) IsEmpty() (bool, error) { + networks, err := ExtractNetworks(r) + if err != nil { + return true, err + } + return len(networks) == 0, nil +} + +// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct, +// and extracts the elements into a slice of Network structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractNetworks(page pagination.Page) ([]Network, error) { + var resp struct { + Networks []Network `mapstructure:"networks" json:"networks"` + } + + err := mapstructure.Decode(page.(NetworkPage).Body, &resp) + + return resp.Networks, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go new file mode 100644 index 000000000..19a21aa90 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go @@ -0,0 +1,27 @@ +package networks + +import "github.com/rackspace/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("os-networksv2", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-networksv2") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go new file mode 100644 index 000000000..983992e2b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go @@ -0,0 +1,38 @@ +package networks + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestGetURL(t *testing.T) { + actual := getURL(endpointClient(), "foo") + expected := endpoint + "os-networksv2/foo" + th.AssertEquals(t, expected, actual) +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "os-networksv2" + th.AssertEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := createURL(endpointClient()) + expected := endpoint + "os-networksv2" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "foo") + expected := endpoint + "os-networksv2/foo" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go new file mode 100644 index 000000000..7810d156a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go @@ -0,0 +1,116 @@ +package servers + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" +) + +// List makes a request against the API to list servers accessible to you. +func List(client *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(client, opts) +} + +// Create requests a server to be provisioned to the user in the current tenant. +func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(client, opts) +} + +// Update requests an existing server to be updated with the supplied options. +func Update(client *gophercloud.ServiceClient, id string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(client, id, opts) +} + +// Delete requests that a server previously provisioned be removed from your account. +func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(client, id) +} + +// Get requests details on a single server, by ID. +func Get(client *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(client, id) +} + +// ChangeAdminPassword alters the administrator or root password for a specified server. +func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) os.ActionResult { + return os.ChangeAdminPassword(client, id, newPassword) +} + +// Reboot requests that a given server reboot. Two methods exist for rebooting a server: +// +// os.HardReboot (aka PowerCycle) restarts the server instance by physically cutting power to the +// machine, or if a VM, terminating it at the hypervisor level. It's done. Caput. Full stop. Then, +// after a brief wait, power is restored or the VM instance restarted. +// +// os.SoftReboot (aka OSReboot) simply tells the OS to restart under its own procedures. E.g., in +// Linux, asking it to enter runlevel 6, or executing "sudo shutdown -r now", or by asking Windows to restart the machine. +func Reboot(client *gophercloud.ServiceClient, id string, how os.RebootMethod) os.ActionResult { + return os.Reboot(client, id, how) +} + +// Rebuild will reprovision the server according to the configuration options provided in the +// RebuildOpts struct. +func Rebuild(client *gophercloud.ServiceClient, id string, opts os.RebuildOptsBuilder) os.RebuildResult { + return os.Rebuild(client, id, opts) +} + +// Resize instructs the provider to change the flavor of the server. +// Note that this implies rebuilding it. +// Unfortunately, one cannot pass rebuild parameters to the resize function. +// When the resize completes, the server will be in RESIZE_VERIFY state. +// While in this state, you can explore the use of the new server's configuration. +// If you like it, call ConfirmResize() to commit the resize permanently. +// Otherwise, call RevertResize() to restore the old configuration. +func Resize(client *gophercloud.ServiceClient, id string, opts os.ResizeOptsBuilder) os.ActionResult { + return os.Resize(client, id, opts) +} + +// ConfirmResize confirms a previous resize operation on a server. +// See Resize() for more details. +func ConfirmResize(client *gophercloud.ServiceClient, id string) os.ActionResult { + return os.ConfirmResize(client, id) +} + +// WaitForStatus will continually poll a server until it successfully transitions to a specified +// status. It will do this for at most the number of seconds specified. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return os.WaitForStatus(c, id, status, secs) +} + +// ExtractServers interprets the results of a single page from a List() call, producing a slice of Server entities. +func ExtractServers(page pagination.Page) ([]os.Server, error) { + return os.ExtractServers(page) +} + +// ListAddresses makes a request against the API to list the servers IP addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + return os.ListAddresses(client, id) +} + +// ExtractAddresses interprets the results of a single page from a ListAddresses() call, producing a map of Address slices. +func ExtractAddresses(page pagination.Page) (map[string][]os.Address, error) { + return os.ExtractAddresses(page) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP addresses +// for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + return os.ListAddressesByNetwork(client, id, network) +} + +// ExtractNetworkAddresses interprets the results of a single page from a ListAddressesByNetwork() call, producing a map of Address slices. +func ExtractNetworkAddresses(page pagination.Page) ([]os.Address, error) { + return os.ExtractNetworkAddresses(page) +} + +// Metadata requests all the metadata for the given server ID. +func Metadata(client *gophercloud.ServiceClient, id string) os.GetMetadataResult { + return os.Metadata(client, id) +} + +// UpdateMetadata updates (or creates) all the metadata specified by opts for the given server ID. +// This operation does not affect already-existing metadata that is not specified +// by opts. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts os.UpdateMetadataOptsBuilder) os.UpdateMetadataResult { + return os.UpdateMetadata(client, id, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go new file mode 100644 index 000000000..03e7acea8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go @@ -0,0 +1,182 @@ +package servers + +import ( + "fmt" + "net/http" + "testing" + + os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListServers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) + + count := 0 + err := List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractServers(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedServerSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreateServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleServerCreationSuccessfully(t, CreateOutput) + + actual, err := Create(client.ServiceClient(), os.CreateOpts{ + Name: "derp", + ImageRef: "f90f6034-2570-4974-8351-6b49732ef2eb", + FlavorRef: "1", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, &CreatedServer, actual) +} + +func TestDeleteServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleServerDeletionSuccessfully(t) + + res := Delete(client.ServiceClient(), "asdfasdfasdf") + th.AssertNoErr(t, res.Err) +} + +func TestGetServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) + + actual, err := Get(client.ServiceClient(), "8c65cb68-0681-4c30-bc88-6b83a8a26aee").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &GophercloudServer, actual) +} + +func TestUpdateServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "server": { "name": "test-server-updated" } }`) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, UpdateOutput) + }) + + opts := os.UpdateOpts{ + Name: "test-server-updated", + } + actual, err := Update(client.ServiceClient(), "8c65cb68-0681-4c30-bc88-6b83a8a26aee", opts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &GophercloudUpdatedServer, actual) +} + +func TestChangeAdminPassword(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleAdminPasswordChangeSuccessfully(t) + + res := ChangeAdminPassword(client.ServiceClient(), "1234asdf", "new-password") + th.AssertNoErr(t, res.Err) +} + +func TestReboot(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleRebootSuccessfully(t) + + res := Reboot(client.ServiceClient(), "1234asdf", os.SoftReboot) + th.AssertNoErr(t, res.Err) +} + +func TestRebuildServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleRebuildSuccessfully(t, GetOutput) + + opts := os.RebuildOpts{ + Name: "new-name", + AdminPass: "swordfish", + ImageID: "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + AccessIPv4: "1.2.3.4", + } + actual, err := Rebuild(client.ServiceClient(), "1234asdf", opts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &GophercloudServer, actual) +} + +func TestListAddresses(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleAddressListSuccessfully(t) + + expected := os.ListAddressesExpected + pages := 0 + err := ListAddresses(client.ServiceClient(), "asdfasdfasdf").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 networks, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestListAddressesByNetwork(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleNetworkAddressListSuccessfully(t) + + expected := os.ListNetworkAddressesExpected + pages := 0 + err := ListAddressesByNetwork(client.ServiceClient(), "asdfasdfasdf", "public").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractNetworkAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 addresses, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go new file mode 100644 index 000000000..c9f77f694 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go @@ -0,0 +1,3 @@ +// Package servers provides information and interaction with the server +// API resource for the Rackspace Cloud Servers service. +package servers diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go new file mode 100644 index 000000000..75cccd041 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go @@ -0,0 +1,574 @@ +// +build fixtures + +package servers + +import ( + os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// ListOutput is the recorded output of a Rackspace servers.List request. +const ListOutput = ` +{ + "servers": [ + { + "OS-DCF:diskConfig": "MANUAL", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "accessIPv4": "1.2.3.4", + "accessIPv6": "1111:4822:7818:121:2000:9b5e:7438:a2d0", + "addresses": { + "private": [ + { + "addr": "10.208.230.113", + "version": 4 + } + ], + "public": [ + { + "addr": "2001:4800:7818:101:2000:9b5e:7428:a2d0", + "version": 6 + }, + { + "addr": "104.130.131.164", + "version": 4 + } + ] + }, + "created": "2014-09-23T12:34:58Z", + "flavor": { + "id": "performance1-8", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-8", + "rel": "bookmark" + } + ] + }, + "hostId": "e8951a524bc465b0898aeac7674da6fe1495e253ae1ea17ddb2c2475", + "id": "59818cee-bc8c-44eb-8073-673ee65105f7", + "image": { + "id": "255df5fb-e3d4-45a3-9a07-c976debf7c14", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/255df5fb-e3d4-45a3-9a07-c976debf7c14", + "rel": "bookmark" + } + ] + }, + "key_name": "mykey", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7", + "rel": "self" + }, + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7", + "rel": "bookmark" + } + ], + "metadata": {}, + "name": "devstack", + "progress": 100, + "status": "ACTIVE", + "tenant_id": "111111", + "updated": "2014-09-23T12:38:19Z", + "user_id": "14ae7bb21d81422694655f3cc30f2930" + }, + { + "OS-DCF:diskConfig": "MANUAL", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "accessIPv4": "1.1.2.3", + "accessIPv6": "2222:4444:7817:101:be76:4eff:f0e5:9e02", + "addresses": { + "private": [ + { + "addr": "10.10.20.30", + "version": 4 + } + ], + "public": [ + { + "addr": "1.1.2.3", + "version": 4 + }, + { + "addr": "2222:4444:7817:101:be76:4eff:f0e5:9e02", + "version": 6 + } + ] + }, + "created": "2014-07-21T19:32:55Z", + "flavor": { + "id": "performance1-2", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-2", + "rel": "bookmark" + } + ] + }, + "hostId": "f859679906d6b1a38c1bd516b78f4dcc7d5fcf012578fa3ce460716c", + "id": "25f1c7f5-e00a-4715-b354-16e24b2f4630", + "image": { + "id": "bb02b1a3-bc77-4d17-ab5b-421d89850fca", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/bb02b1a3-bc77-4d17-ab5b-421d89850fca", + "rel": "bookmark" + } + ] + }, + "key_name": "otherkey", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", + "rel": "self" + }, + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", + "rel": "bookmark" + } + ], + "metadata": {}, + "name": "peril-dfw", + "progress": 100, + "status": "ACTIVE", + "tenant_id": "111111", + "updated": "2014-07-21T19:34:24Z", + "user_id": "14ae7bb21d81422694655f3cc30f2930" + } + ] +} +` + +// GetOutput is the recorded output of a Rackspace servers.Get request. +const GetOutput = ` +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "accessIPv4": "1.2.4.8", + "accessIPv6": "2001:4800:6666:105:2a0f:c056:f594:7777", + "addresses": { + "private": [ + { + "addr": "10.20.40.80", + "version": 4 + } + ], + "public": [ + { + "addr": "1.2.4.8", + "version": 4 + }, + { + "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", + "version": 6 + } + ] + }, + "created": "2014-10-21T14:42:16Z", + "flavor": { + "id": "performance1-1", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", + "rel": "bookmark" + } + ] + }, + "hostId": "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", + "id": "8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "image": { + "id": "e19a734c-c7e6-443a-830c-242209c4d65d", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "self" + }, + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "bookmark" + } + ], + "metadata": {}, + "name": "Gophercloud-pxpGGuey", + "progress": 100, + "status": "ACTIVE", + "tenant_id": "111111", + "updated": "2014-10-21T14:42:57Z", + "user_id": "14ae7bb21d81423694655f4dd30f2930" + } +} +` + +// UpdateOutput is the recorded output of a Rackspace servers.Update request. +const UpdateOutput = ` +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "accessIPv4": "1.2.4.8", + "accessIPv6": "2001:4800:6666:105:2a0f:c056:f594:7777", + "addresses": { + "private": [ + { + "addr": "10.20.40.80", + "version": 4 + } + ], + "public": [ + { + "addr": "1.2.4.8", + "version": 4 + }, + { + "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", + "version": 6 + } + ] + }, + "created": "2014-10-21T14:42:16Z", + "flavor": { + "id": "performance1-1", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", + "rel": "bookmark" + } + ] + }, + "hostId": "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", + "id": "8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "image": { + "id": "e19a734c-c7e6-443a-830c-242209c4d65d", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "self" + }, + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "bookmark" + } + ], + "metadata": {}, + "name": "test-server-updated", + "progress": 100, + "status": "ACTIVE", + "tenant_id": "111111", + "updated": "2014-10-21T14:42:57Z", + "user_id": "14ae7bb21d81423694655f4dd30f2930" + } +} +` + +// CreateOutput contains a sample of Rackspace's response to a Create call. +const CreateOutput = ` +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "v7tADqbE5pr9", + "id": "bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", + "links": [ + { + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", + "rel": "self" + }, + { + "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", + "rel": "bookmark" + } + ] + } +} +` + +// DevstackServer is the expected first result from parsing ListOutput. +var DevstackServer = os.Server{ + ID: "59818cee-bc8c-44eb-8073-673ee65105f7", + Name: "devstack", + TenantID: "111111", + UserID: "14ae7bb21d81422694655f3cc30f2930", + HostID: "e8951a524bc465b0898aeac7674da6fe1495e253ae1ea17ddb2c2475", + Updated: "2014-09-23T12:38:19Z", + Created: "2014-09-23T12:34:58Z", + AccessIPv4: "1.2.3.4", + AccessIPv6: "1111:4822:7818:121:2000:9b5e:7438:a2d0", + Progress: 100, + Status: "ACTIVE", + Image: map[string]interface{}{ + "id": "255df5fb-e3d4-45a3-9a07-c976debf7c14", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/255df5fb-e3d4-45a3-9a07-c976debf7c14", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "performance1-8", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-8", + "rel": "bookmark", + }, + }, + }, + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "addr": "10.20.30.40", + "version": float64(4.0), + }, + }, + "public": []interface{}{ + map[string]interface{}{ + "addr": "1111:4822:7818:121:2000:9b5e:7438:a2d0", + "version": float64(6.0), + }, + map[string]interface{}{ + "addr": "1.2.3.4", + "version": float64(4.0), + }, + }, + }, + Metadata: map[string]interface{}{}, + Links: []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59918cee-bd9d-44eb-8173-673ee75105f7", + "rel": "self", + }, + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7", + "rel": "bookmark", + }, + }, + KeyName: "mykey", + AdminPass: "", +} + +// PerilServer is the expected second result from parsing ListOutput. +var PerilServer = os.Server{ + ID: "25f1c7f5-e00a-4715-b354-16e24b2f4630", + Name: "peril-dfw", + TenantID: "111111", + UserID: "14ae7bb21d81422694655f3cc30f2930", + HostID: "f859679906d6b1a38c1bd516b78f4dcc7d5fcf012578fa3ce460716c", + Updated: "2014-07-21T19:34:24Z", + Created: "2014-07-21T19:32:55Z", + AccessIPv4: "1.1.2.3", + AccessIPv6: "2222:4444:7817:101:be76:4eff:f0e5:9e02", + Progress: 100, + Status: "ACTIVE", + Image: map[string]interface{}{ + "id": "bb02b1a3-bc77-4d17-ab5b-421d89850fca", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/bb02b1a3-bc77-4d17-ab5b-421d89850fca", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "performance1-2", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-2", + "rel": "bookmark", + }, + }, + }, + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "addr": "10.10.20.30", + "version": float64(4.0), + }, + }, + "public": []interface{}{ + map[string]interface{}{ + "addr": "2222:4444:7817:101:be76:4eff:f0e5:9e02", + "version": float64(6.0), + }, + map[string]interface{}{ + "addr": "1.1.2.3", + "version": float64(4.0), + }, + }, + }, + Metadata: map[string]interface{}{}, + Links: []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", + "rel": "self", + }, + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", + "rel": "bookmark", + }, + }, + KeyName: "otherkey", + AdminPass: "", +} + +// GophercloudServer is the expected result from parsing GetOutput. +var GophercloudServer = os.Server{ + ID: "8c65cb68-0681-4c30-bc88-6b83a8a26aee", + Name: "Gophercloud-pxpGGuey", + TenantID: "111111", + UserID: "14ae7bb21d81423694655f4dd30f2930", + HostID: "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", + Updated: "2014-10-21T14:42:57Z", + Created: "2014-10-21T14:42:16Z", + AccessIPv4: "1.2.4.8", + AccessIPv6: "2001:4800:6666:105:2a0f:c056:f594:7777", + Progress: 100, + Status: "ACTIVE", + Image: map[string]interface{}{ + "id": "e19a734c-c7e6-443a-830c-242209c4d65d", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "performance1-1", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", + "rel": "bookmark", + }, + }, + }, + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "addr": "10.20.40.80", + "version": float64(4.0), + }, + }, + "public": []interface{}{ + map[string]interface{}{ + "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", + "version": float64(6.0), + }, + map[string]interface{}{ + "addr": "1.2.4.8", + "version": float64(4.0), + }, + }, + }, + Metadata: map[string]interface{}{}, + Links: []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "self", + }, + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "bookmark", + }, + }, + KeyName: "", + AdminPass: "", +} + +// GophercloudUpdatedServer is the expected result from parsing UpdateOutput. +var GophercloudUpdatedServer = os.Server{ + ID: "8c65cb68-0681-4c30-bc88-6b83a8a26aee", + Name: "test-server-updated", + TenantID: "111111", + UserID: "14ae7bb21d81423694655f4dd30f2930", + HostID: "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", + Updated: "2014-10-21T14:42:57Z", + Created: "2014-10-21T14:42:16Z", + AccessIPv4: "1.2.4.8", + AccessIPv6: "2001:4800:6666:105:2a0f:c056:f594:7777", + Progress: 100, + Status: "ACTIVE", + Image: map[string]interface{}{ + "id": "e19a734c-c7e6-443a-830c-242209c4d65d", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "performance1-1", + "links": []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", + "rel": "bookmark", + }, + }, + }, + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "addr": "10.20.40.80", + "version": float64(4.0), + }, + }, + "public": []interface{}{ + map[string]interface{}{ + "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", + "version": float64(6.0), + }, + map[string]interface{}{ + "addr": "1.2.4.8", + "version": float64(4.0), + }, + }, + }, + Metadata: map[string]interface{}{}, + Links: []interface{}{ + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "self", + }, + map[string]interface{}{ + "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", + "rel": "bookmark", + }, + }, + KeyName: "", + AdminPass: "", +} + +// CreatedServer is the partial Server struct that can be parsed from CreateOutput. +var CreatedServer = os.Server{ + ID: "bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", + AdminPass: "v7tADqbE5pr9", + Links: []interface{}{}, +} + +// ExpectedServerSlice is the collection of servers, in order, that should be parsed from ListOutput. +var ExpectedServerSlice = []os.Server{DevstackServer, PerilServer} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go new file mode 100644 index 000000000..d4472a080 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go @@ -0,0 +1,178 @@ +package servers + +import ( + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig" + os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// CreateOpts specifies all of the options that Rackspace accepts in its Create request, including +// the union of all extensions that Rackspace supports. +type CreateOpts struct { + // Name [required] is the name to assign to the newly launched server. + Name string + + // ImageRef [optional; required if ImageName is not provided] is the ID or full + // URL to the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageRef string + + // ImageName [optional; required if ImageRef is not provided] is the name of the + // image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageName string + + // FlavorRef [optional; required if FlavorName is not provided] is the ID or + // full URL to the flavor that describes the server's specs. + FlavorRef string + + // FlavorName [optional; required if FlavorRef is not provided] is the name of + // the flavor that describes the server's specs. + FlavorName string + + // SecurityGroups [optional] lists the names of the security groups to which this server should belong. + SecurityGroups []string + + // UserData [optional] contains configuration information or scripts to use upon launch. + // Create will base64-encode it for you. + UserData []byte + + // AvailabilityZone [optional] in which to launch the server. + AvailabilityZone string + + // Networks [optional] dictates how this server will be attached to available networks. + // By default, the server will be attached to all isolated networks for the tenant. + Networks []os.Network + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. + Metadata map[string]string + + // Personality [optional] includes files to inject into the server at launch. + // Create will base64-encode file contents for you. + Personality os.Personality + + // ConfigDrive [optional] enables metadata injection through a configuration drive. + ConfigDrive bool + + // AdminPass [optional] sets the root user password. If not set, a randomly-generated + // password will be created and returned in the response. + AdminPass string + + // Rackspace-specific extensions begin here. + + // KeyPair [optional] specifies the name of the SSH KeyPair to be injected into the newly launched + // server. See the "keypairs" extension in OpenStack compute v2. + KeyPair string + + // DiskConfig [optional] controls how the created server's disk is partitioned. See the "diskconfig" + // extension in OpenStack compute v2. + DiskConfig diskconfig.DiskConfig + + // BlockDevice [optional] will create the server from a volume, which is created from an image, + // a snapshot, or another volume. + BlockDevice []bootfromvolume.BlockDevice +} + +// ToServerCreateMap constructs a request body using all of the OpenStack extensions that are +// active on Rackspace. +func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { + base := os.CreateOpts{ + Name: opts.Name, + ImageRef: opts.ImageRef, + ImageName: opts.ImageName, + FlavorRef: opts.FlavorRef, + FlavorName: opts.FlavorName, + SecurityGroups: opts.SecurityGroups, + UserData: opts.UserData, + AvailabilityZone: opts.AvailabilityZone, + Networks: opts.Networks, + Metadata: opts.Metadata, + Personality: opts.Personality, + ConfigDrive: opts.ConfigDrive, + AdminPass: opts.AdminPass, + } + + drive := diskconfig.CreateOptsExt{ + CreateOptsBuilder: base, + DiskConfig: opts.DiskConfig, + } + + res, err := drive.ToServerCreateMap() + if err != nil { + return nil, err + } + + if len(opts.BlockDevice) != 0 { + bfv := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: drive, + BlockDevice: opts.BlockDevice, + } + + res, err = bfv.ToServerCreateMap() + if err != nil { + return nil, err + } + } + + // key_name doesn't actually come from the extension (or at least isn't documented there) so + // we need to add it manually. + serverMap := res["server"].(map[string]interface{}) + if opts.KeyPair != "" { + serverMap["key_name"] = opts.KeyPair + } + + return res, nil +} + +// RebuildOpts represents all of the configuration options used in a server rebuild operation that +// are supported by Rackspace. +type RebuildOpts struct { + // Required. The ID of the image you want your server to be provisioned on + ImageID string + + // Name to set the server to + Name string + + // Required. The server's admin password + AdminPass string + + // AccessIPv4 [optional] provides a new IPv4 address for the instance. + AccessIPv4 string + + // AccessIPv6 [optional] provides a new IPv6 address for the instance. + AccessIPv6 string + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. + Metadata map[string]string + + // Personality [optional] includes files to inject into the server at launch. + // Rebuild will base64-encode file contents for you. + Personality os.Personality + + // Rackspace-specific stuff begins here. + + // DiskConfig [optional] controls how the created server's disk is partitioned. See the "diskconfig" + // extension in OpenStack compute v2. + DiskConfig diskconfig.DiskConfig +} + +// ToServerRebuildMap constructs a request body using all of the OpenStack extensions that are +// active on Rackspace. +func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { + base := os.RebuildOpts{ + ImageID: opts.ImageID, + Name: opts.Name, + AdminPass: opts.AdminPass, + AccessIPv4: opts.AccessIPv4, + AccessIPv6: opts.AccessIPv6, + Metadata: opts.Metadata, + Personality: opts.Personality, + } + + drive := diskconfig.RebuildOptsExt{ + RebuildOptsBuilder: base, + DiskConfig: opts.DiskConfig, + } + + return drive.ToServerRebuildMap() +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go new file mode 100644 index 000000000..828b5dc49 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go @@ -0,0 +1,59 @@ +package servers + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCreateOpts(t *testing.T) { + opts := CreateOpts{ + Name: "createdserver", + ImageRef: "image-id", + FlavorRef: "flavor-id", + KeyPair: "mykey", + DiskConfig: diskconfig.Manual, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "image-id", + "flavorRef": "flavor-id", + "flavorName": "", + "imageName": "", + "key_name": "mykey", + "OS-DCF:diskConfig": "MANUAL" + } + } + ` + actual, err := opts.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} + +func TestRebuildOpts(t *testing.T) { + opts := RebuildOpts{ + Name: "rebuiltserver", + AdminPass: "swordfish", + ImageID: "asdfasdfasdf", + DiskConfig: diskconfig.Auto, + } + + actual, err := opts.ToServerRebuildMap() + th.AssertNoErr(t, err) + + expected := ` + { + "rebuild": { + "name": "rebuiltserver", + "imageRef": "asdfasdfasdf", + "adminPass": "swordfish", + "OS-DCF:diskConfig": "AUTO" + } + } + ` + th.CheckJSONEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go new file mode 100644 index 000000000..1ff7c5ae5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go @@ -0,0 +1,45 @@ +package virtualinterfaces + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, instanceID string) pagination.Pager { + createPage := func(r pagination.PageResult) pagination.Page { + return VirtualInterfacePage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(c, listURL(c, instanceID), createPage) +} + +// Create creates a new virtual interface for a network and attaches the network +// to the server instance. +func Create(c *gophercloud.ServiceClient, instanceID, networkID string) CreateResult { + var res CreateResult + + reqBody := map[string]map[string]string{ + "virtual_interface": { + "network_id": networkID, + }, + } + + // Send request to API + _, res.Err = c.Post(createURL(c, instanceID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return res +} + +// Delete deletes the interface with interfaceID attached to the instance with +// instanceID. +func Delete(c *gophercloud.ServiceClient, instanceID, interfaceID string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, instanceID, interfaceID), &gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go new file mode 100644 index 000000000..d40af9c46 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go @@ -0,0 +1,165 @@ +package virtualinterfaces + +import ( + "fmt" + "net/http" + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/12345/os-virtual-interfacesv2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "virtual_interfaces": [ + { + "id": "de7c6d53-b895-4b4a-963c-517ccb0f0775", + "ip_addresses": [ + { + "address": "192.168.0.2", + "network_id": "f212726e-6321-4210-9bae-a13f5a33f83f", + "network_label": "superprivate_xml" + } + ], + "mac_address": "BC:76:4E:04:85:20" + }, + { + "id": "e14e789d-3b98-44a6-9c2d-c23eb1d1465c", + "ip_addresses": [ + { + "address": "10.181.1.30", + "network_id": "3b324a1b-31b8-4db5-9fe5-4a2067f60297", + "network_label": "private" + } + ], + "mac_address": "BC:76:4E:04:81:55" + } + ] +} + `) + }) + + client := fake.ServiceClient() + count := 0 + + err := List(client, "12345").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVirtualInterfaces(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + expected := []VirtualInterface{ + VirtualInterface{ + MACAddress: "BC:76:4E:04:85:20", + IPAddresses: []IPAddress{ + IPAddress{ + Address: "192.168.0.2", + NetworkID: "f212726e-6321-4210-9bae-a13f5a33f83f", + NetworkLabel: "superprivate_xml", + }, + }, + ID: "de7c6d53-b895-4b4a-963c-517ccb0f0775", + }, + VirtualInterface{ + MACAddress: "BC:76:4E:04:81:55", + IPAddresses: []IPAddress{ + IPAddress{ + Address: "10.181.1.30", + NetworkID: "3b324a1b-31b8-4db5-9fe5-4a2067f60297", + NetworkLabel: "private", + }, + }, + ID: "e14e789d-3b98-44a6-9c2d-c23eb1d1465c", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/12345/os-virtual-interfacesv2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "virtual_interface": { + "network_id": "6789" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, `{ + "virtual_interfaces": [ + { + "id": "de7c6d53-b895-4b4a-963c-517ccb0f0775", + "ip_addresses": [ + { + "address": "192.168.0.2", + "network_id": "f212726e-6321-4210-9bae-a13f5a33f83f", + "network_label": "superprivate_xml" + } + ], + "mac_address": "BC:76:4E:04:85:20" + } + ] + }`) + }) + + expected := &VirtualInterface{ + MACAddress: "BC:76:4E:04:85:20", + IPAddresses: []IPAddress{ + IPAddress{ + Address: "192.168.0.2", + NetworkID: "f212726e-6321-4210-9bae-a13f5a33f83f", + NetworkLabel: "superprivate_xml", + }, + }, + ID: "de7c6d53-b895-4b4a-963c-517ccb0f0775", + } + + actual, err := Create(fake.ServiceClient(), "12345", "6789").Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/12345/os-virtual-interfacesv2/6789", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "12345", "6789") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go new file mode 100644 index 000000000..26fa7f31c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go @@ -0,0 +1,81 @@ +package virtualinterfaces + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a network resource. +func (r commonResult) Extract() (*VirtualInterface, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + VirtualInterfaces []VirtualInterface `mapstructure:"virtual_interfaces" json:"virtual_interfaces"` + } + + err := mapstructure.Decode(r.Body, &res) + + return &res.VirtualInterfaces[0], err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IPAddress represents a vitual address attached to a VirtualInterface. +type IPAddress struct { + Address string `mapstructure:"address" json:"address"` + NetworkID string `mapstructure:"network_id" json:"network_id"` + NetworkLabel string `mapstructure:"network_label" json:"network_label"` +} + +// VirtualInterface represents a virtual interface. +type VirtualInterface struct { + // UUID for the virtual interface + ID string `mapstructure:"id" json:"id"` + + MACAddress string `mapstructure:"mac_address" json:"mac_address"` + + IPAddresses []IPAddress `mapstructure:"ip_addresses" json:"ip_addresses"` +} + +// VirtualInterfacePage is the page returned by a pager when traversing over a +// collection of virtual interfaces. +type VirtualInterfacePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if the NetworkPage contains no Networks. +func (r VirtualInterfacePage) IsEmpty() (bool, error) { + networks, err := ExtractVirtualInterfaces(r) + if err != nil { + return true, err + } + return len(networks) == 0, nil +} + +// ExtractVirtualInterfaces accepts a Page struct, specifically a VirtualInterfacePage struct, +// and extracts the elements into a slice of VirtualInterface structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractVirtualInterfaces(page pagination.Page) ([]VirtualInterface, error) { + var resp struct { + VirtualInterfaces []VirtualInterface `mapstructure:"virtual_interfaces" json:"virtual_interfaces"` + } + + err := mapstructure.Decode(page.(VirtualInterfacePage).Body, &resp) + + return resp.VirtualInterfaces, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go new file mode 100644 index 000000000..9e5693e84 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go @@ -0,0 +1,15 @@ +package virtualinterfaces + +import "github.com/rackspace/gophercloud" + +func listURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("servers", instanceID, "os-virtual-interfacesv2") +} + +func createURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("servers", instanceID, "os-virtual-interfacesv2") +} + +func deleteURL(c *gophercloud.ServiceClient, instanceID, interfaceID string) string { + return c.ServiceURL("servers", instanceID, "os-virtual-interfacesv2", interfaceID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go new file mode 100644 index 000000000..6732e4ed9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go @@ -0,0 +1,32 @@ +package virtualinterfaces + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestCreateURL(t *testing.T) { + actual := createURL(endpointClient(), "12345") + expected := endpoint + "servers/12345/os-virtual-interfacesv2" + th.AssertEquals(t, expected, actual) +} + +func TestListURL(t *testing.T) { + actual := createURL(endpointClient(), "12345") + expected := endpoint + "servers/12345/os-virtual-interfacesv2" + th.AssertEquals(t, expected, actual) +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient(), "12345", "6789") + expected := endpoint + "servers/12345/os-virtual-interfacesv2/6789" + th.AssertEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate.go new file mode 100644 index 000000000..c6003e0e5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate.go @@ -0,0 +1,27 @@ +package volumeattach + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of VolumeAttachments. +func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return os.List(client, serverID) +} + +// Create requests the creation of a new volume attachment on the server +func Create(client *gophercloud.ServiceClient, serverID string, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(client, serverID, opts) +} + +// Get returns public data about a previously created VolumeAttachment. +func Get(client *gophercloud.ServiceClient, serverID, aID string) os.GetResult { + return os.Get(client, serverID, aID) +} + +// Delete requests the deletion of a previous stored VolumeAttachment from the server. +func Delete(client *gophercloud.ServiceClient, serverID, aID string) os.DeleteResult { + return os.Delete(client, serverID, aID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate_test.go new file mode 100644 index 000000000..f7ef45e62 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate_test.go @@ -0,0 +1,95 @@ +package volumeattach + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" + fixtures "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// FirstVolumeAttachment is the first result in ListOutput. +var FirstVolumeAttachment = volumeattach.VolumeAttachment{ + Device: "/dev/vdd", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f803", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f803", +} + +// SecondVolumeAttachment is the first result in ListOutput. +var SecondVolumeAttachment = volumeattach.VolumeAttachment{ + Device: "/dev/vdc", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", +} + +// ExpectedVolumeAttachmentSlide is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedVolumeAttachmentSlice = []volumeattach.VolumeAttachment{FirstVolumeAttachment, SecondVolumeAttachment} + +//CreatedVolumeAttachment is the parsed result from CreatedOutput. +var CreatedVolumeAttachment = volumeattach.VolumeAttachment{ + Device: "/dev/vdc", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleListSuccessfully(t) + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + count := 0 + err := List(client.ServiceClient(), serverId).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := volumeattach.ExtractVolumeAttachments(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedVolumeAttachmentSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleCreateSuccessfully(t) + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + actual, err := Create(client.ServiceClient(), serverId, volumeattach.CreateOpts{ + Device: "/dev/vdc", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedVolumeAttachment, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleGetSuccessfully(t) + aId := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + actual, err := Get(client.ServiceClient(), serverId, aId).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondVolumeAttachment, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixtures.HandleDeleteSuccessfully(t) + aId := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + serverId := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + err := Delete(client.ServiceClient(), serverId, aId).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/doc.go new file mode 100644 index 000000000..2164908e3 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/doc.go @@ -0,0 +1,3 @@ +// Package volumeattach provides the ability to attach and detach volume +// to instances to Rackspace servers +package volumeattach diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/doc.go new file mode 100644 index 000000000..664eeadb8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/doc.go @@ -0,0 +1,6 @@ +// Package backups provides information and interaction with the backup API +// resource in the Rackspace Database service. +// +// A backup is a copy of a database instance that can be used to restore it to +// some defined point in history. +package backups diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/fixtures.go new file mode 100644 index 000000000..45c2376d6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/fixtures.go @@ -0,0 +1,66 @@ +package backups + +import "time" + +var ( + timestamp = "2015-11-12T14:22:42Z" + timeVal, _ = time.Parse(time.RFC3339, timestamp) +) + +var getResp = ` +{ + "backup": { + "created": "` + timestamp + `", + "description": "My Backup", + "id": "61f12fef-edb1-4561-8122-e7c00ef26a82", + "instance_id": "d4603f69-ec7e-4e9b-803f-600b9205576f", + "locationRef": null, + "name": "snapshot", + "parent_id": null, + "size": 100, + "status": "NEW", + "datastore": { + "version": "5.1", + "type": "MySQL", + "version_id": "20000000-0000-0000-0000-000000000002" + }, + "updated": "` + timestamp + `" + } +} +` + +var createReq = ` +{ + "backup": { + "description": "My Backup", + "instance": "d4603f69-ec7e-4e9b-803f-600b9205576f", + "name": "snapshot" + } +} +` + +var createResp = getResp + +var listResp = ` +{ + "backups": [ + { + "status": "COMPLETED", + "updated": "` + timestamp + `", + "description": "Backup from Restored Instance", + "datastore": { + "version": "5.1", + "type": "MySQL", + "version_id": "20000000-0000-0000-0000-000000000002" + }, + "id": "87972694-4be2-40f5-83f8-501656e0032a", + "size": 0.141026, + "name": "restored_backup", + "created": "` + timestamp + `", + "instance_id": "29af2cd9-0674-48ab-b87a-b160f00208e6", + "parent_id": null, + "locationRef": "http://localhost/path/to/backup" + } + ] +} +` diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests.go new file mode 100644 index 000000000..9170d7865 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests.go @@ -0,0 +1,138 @@ +package backups + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CreateOptsBuilder is the top-level interface for creating JSON maps. +type CreateOptsBuilder interface { + ToBackupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is responsible for configuring newly provisioned backups. +type CreateOpts struct { + // [REQUIRED] The name of the backup. The only restriction is the name must + // be less than 64 characters long. + Name string + + // [REQUIRED] The ID of the instance being backed up. + InstanceID string + + // [OPTIONAL] A human-readable explanation of the backup. + Description string +} + +// ToBackupCreateMap will create a JSON map for the Create operation. +func (opts CreateOpts) ToBackupCreateMap() (map[string]interface{}, error) { + if opts.Name == "" { + return nil, errors.New("Name is a required field") + } + if opts.InstanceID == "" { + return nil, errors.New("InstanceID is a required field") + } + + backup := map[string]interface{}{ + "name": opts.Name, + "instance": opts.InstanceID, + } + + if opts.Description != "" { + backup["description"] = opts.Description + } + + return map[string]interface{}{"backup": backup}, nil +} + +// Create asynchronously creates a new backup for a specified database instance. +// During the backup process, write access on MyISAM databases will be +// temporarily disabled; innoDB databases will be unaffected. During this time, +// you will not be able to add or delete databases or users; nor delete, stop +// or reboot the instance itself. Only one backup is permitted at once. +// +// Backups are not deleted when database instances are deleted; you must +// manually delete any backups created using Delete(). Backups are saved to your +// Cloud Files account in a new container called z_CLOUDDB_BACKUPS. It is +// strongly recommended you do not alter this container or its contents; usual +// storage costs apply. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToBackupCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("POST", baseURL(client), gophercloud.RequestOpts{ + JSONBody: &reqBody, + JSONResponse: &res.Body, + OkCodes: []int{202}, + }) + + return res +} + +// ListOptsBuilder is the top-level interface for creating query strings. +type ListOptsBuilder interface { + ToBackupListQuery() (string, error) +} + +// ListOpts allows you to refine a list search by certain parameters. +type ListOpts struct { + // The type of datastore by which to filter. + Datastore string `q:"datastore"` +} + +// ToBackupListQuery converts a ListOpts struct into a query string. +func (opts ListOpts) ToBackupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List will list all the saved backups for all database instances. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := baseURL(client) + + if opts != nil { + query, err := opts.ToBackupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + pageFn := func(r pagination.PageResult) pagination.Page { + return BackupPage{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, url, pageFn) +} + +// Get will retrieve details for a particular backup based on its unique ID. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + + _, res.Err = client.Request("GET", resourceURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + + return res +} + +// Delete will permanently delete a backup. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + + _, res.Err = client.Request("DELETE", resourceURL(client, id), gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests_test.go new file mode 100644 index 000000000..d7067330f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/requests_test.go @@ -0,0 +1,131 @@ +package backups + +import ( + "testing" + + "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +var ( + backupID = "{backupID}" + _rootURL = "/backups" + resURL = _rootURL + "/" + backupID +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _rootURL, "POST", createReq, createResp, 202) + + opts := CreateOpts{ + Name: "snapshot", + Description: "My Backup", + InstanceID: "d4603f69-ec7e-4e9b-803f-600b9205576f", + } + + instance, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &Backup{ + Created: timeVal, + Description: "My Backup", + ID: "61f12fef-edb1-4561-8122-e7c00ef26a82", + InstanceID: "d4603f69-ec7e-4e9b-803f-600b9205576f", + LocationRef: "", + Name: "snapshot", + ParentID: "", + Size: 100, + Status: "NEW", + Updated: timeVal, + Datastore: datastores.DatastorePartial{ + Version: "5.1", + Type: "MySQL", + VersionID: "20000000-0000-0000-0000-000000000002", + }, + } + + th.AssertDeepEquals(t, expected, instance) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _rootURL, "GET", "", listResp, 200) + + pages := 0 + + err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + pages++ + actual, err := ExtractBackups(page) + th.AssertNoErr(t, err) + + expected := []Backup{ + Backup{ + Created: timeVal, + Description: "Backup from Restored Instance", + ID: "87972694-4be2-40f5-83f8-501656e0032a", + InstanceID: "29af2cd9-0674-48ab-b87a-b160f00208e6", + LocationRef: "http://localhost/path/to/backup", + Name: "restored_backup", + ParentID: "", + Size: 0.141026, + Status: "COMPLETED", + Updated: timeVal, + Datastore: datastores.DatastorePartial{ + Version: "5.1", + Type: "MySQL", + VersionID: "20000000-0000-0000-0000-000000000002", + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "GET", "", getResp, 200) + + instance, err := Get(fake.ServiceClient(), backupID).Extract() + th.AssertNoErr(t, err) + + expected := &Backup{ + Created: timeVal, + Description: "My Backup", + ID: "61f12fef-edb1-4561-8122-e7c00ef26a82", + InstanceID: "d4603f69-ec7e-4e9b-803f-600b9205576f", + LocationRef: "", + Name: "snapshot", + ParentID: "", + Size: 100, + Status: "NEW", + Updated: timeVal, + Datastore: datastores.DatastorePartial{ + Version: "5.1", + Type: "MySQL", + VersionID: "20000000-0000-0000-0000-000000000002", + }, + } + + th.AssertDeepEquals(t, expected, instance) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "DELETE", "", "", 202) + + err := Delete(fake.ServiceClient(), backupID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/results.go new file mode 100644 index 000000000..04faf3227 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/results.go @@ -0,0 +1,149 @@ +package backups + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/pagination" +) + +// Status represents the various states a Backup can be in. +type Status string + +// Enum types for the status. +const ( + StatusNew Status = "NEW" + StatusBuilding Status = "BUILDING" + StatusCompleted Status = "COMPLETED" + StatusFailed Status = "FAILED" + StatusDeleteFailed Status = "DELETE_FAILED" +) + +// Backup represents a Backup API resource. +type Backup struct { + Description string + ID string + InstanceID string `json:"instance_id" mapstructure:"instance_id"` + LocationRef string + Name string + ParentID string `json:"parent_id" mapstructure:"parent_id"` + Size float64 + Status Status + Created time.Time `mapstructure:"-"` + Updated time.Time `mapstructure:"-"` + Datastore datastores.DatastorePartial +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will retrieve a Backup struct from an operation's result. +func (r commonResult) Extract() (*Backup, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Backup Backup `mapstructure:"backup"` + } + + err := mapstructure.Decode(r.Body, &response) + val := r.Body.(map[string]interface{})["backup"].(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Backup, err + } + response.Backup.Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Backup, err + } + response.Backup.Updated = updatedTime + } + + return &response.Backup, err +} + +// BackupPage represents a page of backups. +type BackupPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an BackupPage struct is empty. +func (r BackupPage) IsEmpty() (bool, error) { + is, err := ExtractBackups(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractBackups will retrieve a slice of Backup structs from a paginated collection. +func ExtractBackups(page pagination.Page) ([]Backup, error) { + casted := page.(BackupPage).Body + + var resp struct { + Backups []Backup `mapstructure:"backups" json:"backups"` + } + + if err := mapstructure.Decode(casted, &resp); err != nil { + return nil, err + } + + var vals []interface{} + switch casted.(type) { + case map[string]interface{}: + vals = casted.(map[string]interface{})["backups"].([]interface{}) + case map[string][]interface{}: + vals = casted.(map[string][]interface{})["backups"] + default: + return resp.Backups, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i, v := range vals { + val := v.(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Backups, err + } + resp.Backups[i].Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Backups, err + } + resp.Backups[i].Updated = updatedTime + } + } + + return resp.Backups, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/urls.go new file mode 100644 index 000000000..553444e88 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/backups/urls.go @@ -0,0 +1,11 @@ +package backups + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("backups") +} + +func resourceURL(c *gophercloud.ServiceClient, backupID string) string { + return c.ServiceURL("backups", backupID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate.go new file mode 100644 index 000000000..d8cb48ac5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate.go @@ -0,0 +1,79 @@ +package configurations + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/db/v1/configurations" + "github.com/rackspace/gophercloud/pagination" +) + +// List will list all of the available configurations. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client) +} + +// Create will create a new configuration group. +func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(client, opts) +} + +// Get will retrieve the details for a specified configuration group. +func Get(client *gophercloud.ServiceClient, configID string) os.GetResult { + return os.Get(client, configID) +} + +// Update will modify an existing configuration group by performing a merge +// between new and existing values. If the key already exists, the new value +// will overwrite. All other keys will remain unaffected. +func Update(client *gophercloud.ServiceClient, configID string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(client, configID, opts) +} + +// Replace will modify an existing configuration group by overwriting the +// entire parameter group with the new values provided. Any existing keys not +// included in UpdateOptsBuilder will be deleted. +func Replace(client *gophercloud.ServiceClient, configID string, opts os.UpdateOptsBuilder) os.ReplaceResult { + return os.Replace(client, configID, opts) +} + +// Delete will permanently delete a configuration group. Please note that +// config groups cannot be deleted whilst still attached to running instances - +// you must detach and then delete them. +func Delete(client *gophercloud.ServiceClient, configID string) os.DeleteResult { + return os.Delete(client, configID) +} + +// ListInstances will list all the instances associated with a particular +// configuration group. +func ListInstances(client *gophercloud.ServiceClient, configID string) pagination.Pager { + return os.ListInstances(client, configID) +} + +// ListDatastoreParams will list all the available and supported parameters +// that can be used for a particular datastore ID and a particular version. +// For example, if you are wondering how you can configure a MySQL 5.6 instance, +// you can use this operation (you will need to retrieve the MySQL datastore ID +// by using the datastores API). +func ListDatastoreParams(client *gophercloud.ServiceClient, datastoreID, versionID string) pagination.Pager { + return os.ListDatastoreParams(client, datastoreID, versionID) +} + +// GetDatastoreParam will retrieve information about a specific configuration +// parameter. For example, you can use this operation to understand more about +// "innodb_file_per_table" configuration param for MySQL datastores. You will +// need the param's ID first, which can be attained by using the ListDatastoreParams +// operation. +func GetDatastoreParam(client *gophercloud.ServiceClient, datastoreID, versionID, paramID string) os.ParamResult { + return os.GetDatastoreParam(client, datastoreID, versionID, paramID) +} + +// ListGlobalParams is similar to ListDatastoreParams but does not require a +// DatastoreID. +func ListGlobalParams(client *gophercloud.ServiceClient, versionID string) pagination.Pager { + return os.ListGlobalParams(client, versionID) +} + +// GetGlobalParam is similar to GetDatastoreParam but does not require a +// DatastoreID. +func GetGlobalParam(client *gophercloud.ServiceClient, versionID, paramID string) os.ParamResult { + return os.GetGlobalParam(client, versionID, paramID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate_test.go new file mode 100644 index 000000000..580f02a54 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/delegate_test.go @@ -0,0 +1,237 @@ +package configurations + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/db/v1/configurations" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/db/v1/instances" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +var ( + configID = "{configID}" + _baseURL = "/configurations" + resURL = _baseURL + "/" + configID + + dsID = "{datastoreID}" + versionID = "{versionID}" + paramID = "{paramID}" + dsParamListURL = "/datastores/" + dsID + "/versions/" + versionID + "/parameters" + dsParamGetURL = "/datastores/" + dsID + "/versions/" + versionID + "/parameters/" + paramID + globalParamListURL = "/datastores/versions/" + versionID + "/parameters" + globalParamGetURL = "/datastores/versions/" + versionID + "/parameters/" + paramID +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _baseURL, "GET", "", listConfigsJSON, 200) + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractConfigs(page) + th.AssertNoErr(t, err) + + expected := []os.Config{exampleConfig} + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertEquals(t, 1, count) + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "GET", "", getConfigJSON, 200) + + config, err := Get(fake.ServiceClient(), configID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &exampleConfig, config) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _baseURL, "POST", createReq, createConfigJSON, 200) + + opts := os.CreateOpts{ + Datastore: &os.DatastoreOpts{ + Type: "a00000a0-00a0-0a00-00a0-000a000000aa", + Version: "b00000b0-00b0-0b00-00b0-000b000000bb", + }, + Description: "example description", + Name: "example-configuration-name", + Values: map[string]interface{}{ + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120, + }, + } + + config, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &exampleConfigWithValues, config) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PATCH", updateReq, "", 200) + + opts := os.UpdateOpts{ + Values: map[string]interface{}{ + "connect_timeout": 300, + }, + } + + err := Update(fake.ServiceClient(), configID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestReplace(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PUT", updateReq, "", 202) + + opts := os.UpdateOpts{ + Values: map[string]interface{}{ + "connect_timeout": 300, + }, + } + + err := Replace(fake.ServiceClient(), configID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "DELETE", "", "", 202) + + err := Delete(fake.ServiceClient(), configID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListInstances(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL+"/instances", "GET", "", listInstancesJSON, 200) + + expectedInstance := instances.Instance{ + ID: "d4603f69-ec7e-4e9b-803f-600b9205576f", + Name: "json_rack_instance", + } + + pages := 0 + err := ListInstances(fake.ServiceClient(), configID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := instances.ExtractInstances(page) + if err != nil { + return false, err + } + + th.AssertDeepEquals(t, actual, []instances.Instance{expectedInstance}) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestListDSParams(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, dsParamListURL, "GET", "", listParamsJSON, 200) + + pages := 0 + err := ListDatastoreParams(fake.ServiceClient(), dsID, versionID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := os.ExtractParams(page) + if err != nil { + return false, err + } + + expected := []os.Param{ + os.Param{Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer"}, + os.Param{Max: 4294967296, Min: 0, Name: "key_buffer_size", RestartRequired: false, Type: "integer"}, + os.Param{Max: 65535, Min: 2, Name: "connect_timeout", RestartRequired: false, Type: "integer"}, + os.Param{Max: 4294967296, Min: 0, Name: "join_buffer_size", RestartRequired: false, Type: "integer"}, + } + + th.AssertDeepEquals(t, actual, expected) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetDSParam(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, dsParamGetURL, "GET", "", getParamJSON, 200) + + param, err := GetDatastoreParam(fake.ServiceClient(), dsID, versionID, paramID).Extract() + th.AssertNoErr(t, err) + + expected := &os.Param{ + Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer", + } + + th.AssertDeepEquals(t, expected, param) +} + +func TestListGlobalParams(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, globalParamListURL, "GET", "", listParamsJSON, 200) + + pages := 0 + err := ListGlobalParams(fake.ServiceClient(), versionID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := os.ExtractParams(page) + if err != nil { + return false, err + } + + expected := []os.Param{ + os.Param{Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer"}, + os.Param{Max: 4294967296, Min: 0, Name: "key_buffer_size", RestartRequired: false, Type: "integer"}, + os.Param{Max: 65535, Min: 2, Name: "connect_timeout", RestartRequired: false, Type: "integer"}, + os.Param{Max: 4294967296, Min: 0, Name: "join_buffer_size", RestartRequired: false, Type: "integer"}, + } + + th.AssertDeepEquals(t, actual, expected) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetGlobalParam(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, globalParamGetURL, "GET", "", getParamJSON, 200) + + param, err := GetGlobalParam(fake.ServiceClient(), versionID, paramID).Extract() + th.AssertNoErr(t, err) + + expected := &os.Param{ + Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer", + } + + th.AssertDeepEquals(t, expected, param) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/doc.go new file mode 100644 index 000000000..48c51d60d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/doc.go @@ -0,0 +1 @@ +package configurations diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/fixtures.go new file mode 100644 index 000000000..d8a2233bc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/configurations/fixtures.go @@ -0,0 +1,159 @@ +package configurations + +import ( + "fmt" + "time" + + os "github.com/rackspace/gophercloud/openstack/db/v1/configurations" +) + +var ( + timestamp = "2015-11-12T14:22:42Z" + timeVal, _ = time.Parse(time.RFC3339, timestamp) +) + +var singleConfigJSON = ` +{ + "created": "` + timestamp + `", + "datastore_name": "mysql", + "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "datastore_version_name": "5.6", + "description": "example_description", + "id": "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + "name": "example-configuration-name", + "updated": "` + timestamp + `" +} +` + +var singleConfigWithValuesJSON = ` +{ + "created": "` + timestamp + `", + "datastore_name": "mysql", + "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "datastore_version_name": "5.6", + "description": "example description", + "id": "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + "instance_count": 0, + "name": "example-configuration-name", + "updated": "` + timestamp + `", + "values": { + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120 + } +} +` + +var ( + listConfigsJSON = fmt.Sprintf(`{"configurations": [%s]}`, singleConfigJSON) + getConfigJSON = fmt.Sprintf(`{"configuration": %s}`, singleConfigJSON) + createConfigJSON = fmt.Sprintf(`{"configuration": %s}`, singleConfigWithValuesJSON) +) + +var createReq = ` +{ + "configuration": { + "datastore": { + "type": "a00000a0-00a0-0a00-00a0-000a000000aa", + "version": "b00000b0-00b0-0b00-00b0-000b000000bb" + }, + "description": "example description", + "name": "example-configuration-name", + "values": { + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120 + } + } +} +` + +var updateReq = ` +{ + "configuration": { + "values": { + "connect_timeout": 300 + } + } +} +` + +var listInstancesJSON = ` +{ + "instances": [ + { + "id": "d4603f69-ec7e-4e9b-803f-600b9205576f", + "name": "json_rack_instance" + } + ] +} +` + +var listParamsJSON = ` +{ + "configuration-parameters": [ + { + "max": 1, + "min": 0, + "name": "innodb_file_per_table", + "restart_required": true, + "type": "integer" + }, + { + "max": 4294967296, + "min": 0, + "name": "key_buffer_size", + "restart_required": false, + "type": "integer" + }, + { + "max": 65535, + "min": 2, + "name": "connect_timeout", + "restart_required": false, + "type": "integer" + }, + { + "max": 4294967296, + "min": 0, + "name": "join_buffer_size", + "restart_required": false, + "type": "integer" + } + ] +} +` + +var getParamJSON = ` +{ + "max": 1, + "min": 0, + "name": "innodb_file_per_table", + "restart_required": true, + "type": "integer" +} +` + +var exampleConfig = os.Config{ + Created: timeVal, + DatastoreName: "mysql", + DatastoreVersionID: "b00000b0-00b0-0b00-00b0-000b000000bb", + DatastoreVersionName: "5.6", + Description: "example_description", + ID: "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + Name: "example-configuration-name", + Updated: timeVal, +} + +var exampleConfigWithValues = os.Config{ + Created: timeVal, + DatastoreName: "mysql", + DatastoreVersionID: "b00000b0-00b0-0b00-00b0-000b000000bb", + DatastoreVersionName: "5.6", + Description: "example description", + ID: "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + Name: "example-configuration-name", + Updated: timeVal, + Values: map[string]interface{}{ + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120, + }, +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate.go new file mode 100644 index 000000000..56552d187 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate.go @@ -0,0 +1,19 @@ +package databases + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" +) + +func Create(client *gophercloud.ServiceClient, instanceID string, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(client, instanceID, opts) +} + +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + return os.List(client, instanceID) +} + +func Delete(client *gophercloud.ServiceClient, instanceID, dbName string) os.DeleteResult { + return os.Delete(client, instanceID, dbName) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate_test.go new file mode 100644 index 000000000..b9e50a53f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/delegate_test.go @@ -0,0 +1,71 @@ +package databases + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +var ( + instanceID = "{instanceID}" + rootURL = "/instances" + resURL = rootURL + "/" + instanceID + uRootURL = resURL + "/root" + aURL = resURL + "/action" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreate(t) + + opts := os.BatchCreateOpts{ + os.CreateOpts{Name: "testingdb", CharSet: "utf8", Collate: "utf8_general_ci"}, + os.CreateOpts{Name: "sampledb"}, + } + + res := Create(fake.ServiceClient(), instanceID, opts) + th.AssertNoErr(t, res.Err) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleList(t) + + expectedDBs := []os.Database{ + os.Database{Name: "anotherexampledb"}, + os.Database{Name: "exampledb"}, + os.Database{Name: "nextround"}, + os.Database{Name: "sampledb"}, + os.Database{Name: "testingdb"}, + } + + pages := 0 + err := List(fake.ServiceClient(), instanceID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := os.ExtractDBs(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, expectedDBs, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDelete(t) + + err := os.Delete(fake.ServiceClient(), instanceID, "{dbName}").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/doc.go new file mode 100644 index 000000000..1a178b61f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/doc.go @@ -0,0 +1,3 @@ +// Package databases provides information and interaction with the database API +// resource in the Rackspace Database service. +package databases diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/urls.go new file mode 100644 index 000000000..18cbec72e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/databases/urls.go @@ -0,0 +1 @@ +package databases diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate.go new file mode 100644 index 000000000..573496d38 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate.go @@ -0,0 +1,28 @@ +package datastores + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/pagination" +) + +// List will list all available flavors. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client) +} + +// Get retrieves the details for a particular flavor. +func Get(client *gophercloud.ServiceClient, flavorID string) os.GetResult { + return os.Get(client, flavorID) +} + +// ListVersions will list all of the available versions for a specified +// datastore type. +func ListVersions(client *gophercloud.ServiceClient, datastoreID string) pagination.Pager { + return os.ListVersions(client, datastoreID) +} + +// GetVersion will retrieve the details of a specified datastore version. +func GetVersion(client *gophercloud.ServiceClient, datastoreID, versionID string) os.GetVersionResult { + return os.GetVersion(client, datastoreID, versionID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate_test.go new file mode 100644 index 000000000..71111b965 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/delegate_test.go @@ -0,0 +1,79 @@ +package datastores + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores", "GET", "", os.ListDSResp, 200) + + pages := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := os.ExtractDatastores(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, []os.Datastore{os.ExampleDatastore}, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}", "GET", "", os.GetDSResp, 200) + + ds, err := Get(fake.ServiceClient(), "{dsID}").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &os.ExampleDatastore, ds) +} + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}/versions", "GET", "", os.ListVersionsResp, 200) + + pages := 0 + + err := ListVersions(fake.ServiceClient(), "{dsID}").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := os.ExtractVersions(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, os.ExampleVersions, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetVersion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}/versions/{versionID}", "GET", "", os.GetVersionResp, 200) + + ds, err := GetVersion(fake.ServiceClient(), "{dsID}", "{versionID}").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &os.ExampleVersion1, ds) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/doc.go new file mode 100644 index 000000000..f36997a03 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/datastores/doc.go @@ -0,0 +1 @@ +package datastores diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate.go new file mode 100644 index 000000000..689b81eac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate.go @@ -0,0 +1,17 @@ +package flavors + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + "github.com/rackspace/gophercloud/pagination" +) + +// List will list all available flavors. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client) +} + +// Get retrieves the details for a particular flavor. +func Get(client *gophercloud.ServiceClient, flavorID string) os.GetResult { + return os.Get(client, flavorID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate_test.go new file mode 100644 index 000000000..f5f6442dd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/delegate_test.go @@ -0,0 +1,95 @@ +package flavors + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListFlavors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleList(t) + + pages := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := os.ExtractFlavors(page) + if err != nil { + return false, err + } + + expected := []os.Flavor{ + os.Flavor{ + ID: "1", + Name: "m1.tiny", + RAM: 512, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/1", Rel: "bookmark"}, + }, + }, + os.Flavor{ + ID: "2", + Name: "m1.small", + RAM: 1024, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/2", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/2", Rel: "bookmark"}, + }, + }, + os.Flavor{ + ID: "3", + Name: "m1.medium", + RAM: 2048, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/3", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/3", Rel: "bookmark"}, + }, + }, + os.Flavor{ + ID: "4", + Name: "m1.large", + RAM: 4096, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/4", Rel: "self"}, + gophercloud.Link{Href: "https://openstack.example.com/flavors/4", Rel: "bookmark"}, + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + if pages != 1 { + t.Errorf("Expected one page, got %d", pages) + } +} + +func TestGetFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGet(t) + + actual, err := Get(fake.ServiceClient(), "{flavorID}").Extract() + th.AssertNoErr(t, err) + + expected := &os.Flavor{ + ID: "1", + Name: "m1.tiny", + RAM: 512, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + }, + } + + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/doc.go new file mode 100644 index 000000000..922a4e667 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/flavors/doc.go @@ -0,0 +1,3 @@ +// Package flavors provides information and interaction with the flavor API +// resource in the Rackspace Database service. +package flavors diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate.go new file mode 100644 index 000000000..f2656fe3b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate.go @@ -0,0 +1,49 @@ +package instances + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/db/v1/instances" +) + +// Get retrieves the status and information for a specified database instance. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + return GetResult{os.Get(client, id)} +} + +// Delete permanently destroys the database instance. +func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(client, id) +} + +// EnableRootUser enables the login from any host for the root user and +// provides the user with a generated root password. +func EnableRootUser(client *gophercloud.ServiceClient, id string) os.UserRootResult { + return os.EnableRootUser(client, id) +} + +// IsRootEnabled checks an instance to see if root access is enabled. It returns +// True if root user is enabled for the specified database instance or False +// otherwise. +func IsRootEnabled(client *gophercloud.ServiceClient, id string) (bool, error) { + return os.IsRootEnabled(client, id) +} + +// Restart will restart only the MySQL Instance. Restarting MySQL will +// erase any dynamic configuration settings that you have made within MySQL. +// The MySQL service will be unavailable until the instance restarts. +func Restart(client *gophercloud.ServiceClient, id string) os.ActionResult { + return os.Restart(client, id) +} + +// Resize changes the memory size of the instance, assuming a valid +// flavorRef is provided. It will also restart the MySQL service. +func Resize(client *gophercloud.ServiceClient, id, flavorRef string) os.ActionResult { + return os.Resize(client, id, flavorRef) +} + +// ResizeVolume will resize the attached volume for an instance. It supports +// only increasing the volume size and does not support decreasing the size. +// The volume size is in gigabytes (GB) and must be an integer. +func ResizeVolume(client *gophercloud.ServiceClient, id string, size int) os.ActionResult { + return os.ResizeVolume(client, id, size) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate_test.go new file mode 100644 index 000000000..716e0a45b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/delegate_test.go @@ -0,0 +1,107 @@ +package instances + +import ( + "testing" + + osDBs "github.com/rackspace/gophercloud/openstack/db/v1/databases" + os "github.com/rackspace/gophercloud/openstack/db/v1/instances" + osUsers "github.com/rackspace/gophercloud/openstack/db/v1/users" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +var ( + _rootURL = "/instances" + resURL = "/instances/" + instanceID +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _rootURL, "POST", createReq, createResp, 200) + + opts := CreateOpts{ + Name: "json_rack_instance", + FlavorRef: "1", + Databases: osDBs.BatchCreateOpts{ + osDBs.CreateOpts{CharSet: "utf8", Collate: "utf8_general_ci", Name: "sampledb"}, + osDBs.CreateOpts{Name: "nextround"}, + }, + Users: osUsers.BatchCreateOpts{ + osUsers.CreateOpts{ + Name: "demouser", + Password: "demopassword", + Databases: osDBs.BatchCreateOpts{ + osDBs.CreateOpts{Name: "sampledb"}, + }, + }, + }, + Size: 2, + RestorePoint: "1234567890", + } + + instance, err := Create(fake.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expectedInstance, instance) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "GET", "", getResp, 200) + + instance, err := Get(fake.ServiceClient(), instanceID).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expectedInstance, instance) +} + +func TestDeleteInstance(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDelete(t) + + res := Delete(fake.ServiceClient(), instanceID) + th.AssertNoErr(t, res.Err) +} + +func TestEnableRootUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleEnableRoot(t) + + expected := &osUsers.User{Name: "root", Password: "secretsecret"} + + user, err := EnableRootUser(fake.ServiceClient(), instanceID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, user) +} + +func TestRestart(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleRestart(t) + + res := Restart(fake.ServiceClient(), instanceID) + th.AssertNoErr(t, res.Err) +} + +func TestResize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleResize(t) + + res := Resize(fake.ServiceClient(), instanceID, "2") + th.AssertNoErr(t, res.Err) +} + +func TestResizeVolume(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleResizeVol(t) + + res := ResizeVolume(fake.ServiceClient(), instanceID, 4) + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/doc.go new file mode 100644 index 000000000..0c8ad6336 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/doc.go @@ -0,0 +1,3 @@ +// Package instances provides information and interaction with the instance API +// resource in the Rackspace Database service. +package instances diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/fixtures.go new file mode 100644 index 000000000..c5ff37aee --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/fixtures.go @@ -0,0 +1,340 @@ +package instances + +import ( + "fmt" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + os "github.com/rackspace/gophercloud/openstack/db/v1/instances" +) + +var ( + timestamp = "2015-11-12T14:22:42Z" + timeVal, _ = time.Parse(time.RFC3339, timestamp) +) + +var instance = ` +{ + "created": "` + timestamp + `", + "datastore": { + "type": "mysql", + "version": "5.6" + }, + "flavor": { + "id": "1", + "links": [ + { + "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", + "rel": "self" + }, + { + "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", + "rel": "bookmark" + } + ] + }, + "links": [ + { + "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", + "rel": "self" + } + ], + "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.rackspaceclouddb.com", + "id": "{instanceID}", + "name": "json_rack_instance", + "status": "BUILD", + "updated": "` + timestamp + `", + "volume": { + "size": 2 + } +} +` + +var createReq = ` +{ + "instance": { + "databases": [ + { + "character_set": "utf8", + "collate": "utf8_general_ci", + "name": "sampledb" + }, + { + "name": "nextround" + } + ], + "flavorRef": "1", + "name": "json_rack_instance", + "users": [ + { + "databases": [ + { + "name": "sampledb" + } + ], + "name": "demouser", + "password": "demopassword" + } + ], + "volume": { + "size": 2 + }, + "restorePoint": { + "backupRef": "1234567890" + } + } +} +` + +var createReplicaReq = ` +{ + "instance": { + "volume": { + "size": 1 + }, + "flavorRef": "9", + "name": "t2s1_ALT_GUEST", + "replica_of": "6bdca2fc-418e-40bd-a595-62abda61862d" + } +} +` + +var createReplicaResp = ` +{ + "instance": { + "status": "BUILD", + "updated": "` + timestamp + `", + "name": "t2s1_ALT_GUEST", + "links": [ + { + "href": "https://ord.databases.api.rackspacecloud.com/v1.0/5919009/instances/8367c312-7c40-4a66-aab1-5767478914fc", + "rel": "self" + }, + { + "href": "https://ord.databases.api.rackspacecloud.com/instances/8367c312-7c40-4a66-aab1-5767478914fc", + "rel": "bookmark" + } + ], + "created": "` + timestamp + `", + "id": "8367c312-7c40-4a66-aab1-5767478914fc", + "volume": { + "size": 1 + }, + "flavor": { + "id": "9" + }, + "datastore": { + "version": "5.6", + "type": "mysql" + }, + "replica_of": { + "id": "6bdca2fc-418e-40bd-a595-62abda61862d" + } + } +} +` + +var listReplicasResp = ` +{ + "instances": [ + { + "status": "ACTIVE", + "name": "t1s1_ALT_GUEST", + "links": [ + { + "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/instances/3c691f06-bf9a-4618-b7ec-2817ce0cf254", + "rel": "self" + }, + { + "href": "https://ord.databases.api.rackspacecloud.com/instances/3c691f06-bf9a-4618-b7ec-2817ce0cf254", + "rel": "bookmark" + } + ], + "ip": [ + "10.0.0.3" + ], + "id": "3c691f06-bf9a-4618-b7ec-2817ce0cf254", + "volume": { + "size": 1 + }, + "flavor": { + "id": "9" + }, + "datastore": { + "version": "5.6", + "type": "mysql" + }, + "replica_of": { + "id": "8b499b45-52d6-402d-b398-f9d8f279c69a" + } + } + ] +} +` + +var getReplicaResp = ` +{ + "instance": { + "status": "ACTIVE", + "updated": "` + timestamp + `", + "name": "t1_ALT_GUEST", + "created": "` + timestamp + `", + "ip": [ + "10.0.0.2" + ], + "replicas": [ + { + "id": "3c691f06-bf9a-4618-b7ec-2817ce0cf254" + } + ], + "id": "8b499b45-52d6-402d-b398-f9d8f279c69a", + "volume": { + "used": 0.54, + "size": 1 + }, + "flavor": { + "id": "9" + }, + "datastore": { + "version": "5.6", + "type": "mysql" + } + } +} +` + +var detachReq = ` +{ + "instance": { + "replica_of": "", + "slave_of": "" + } +} +` + +var getConfigResp = ` +{ + "instance": { + "configuration": { + "basedir": "/usr", + "connect_timeout": "15", + "datadir": "/var/lib/mysql", + "default_storage_engine": "innodb", + "innodb_buffer_pool_instances": "1", + "innodb_buffer_pool_size": "175M", + "innodb_checksum_algorithm": "crc32", + "innodb_data_file_path": "ibdata1:10M:autoextend", + "innodb_file_per_table": "1", + "innodb_io_capacity": "200", + "innodb_log_file_size": "256M", + "innodb_log_files_in_group": "2", + "innodb_open_files": "8192", + "innodb_thread_concurrency": "0", + "join_buffer_size": "1M", + "key_buffer_size": "50M", + "local-infile": "0", + "log-error": "/var/log/mysql/mysqld.log", + "max_allowed_packet": "16M", + "max_connect_errors": "10000", + "max_connections": "40", + "max_heap_table_size": "16M", + "myisam-recover": "BACKUP", + "open_files_limit": "8192", + "performance_schema": "off", + "pid_file": "/var/run/mysqld/mysqld.pid", + "port": "3306", + "query_cache_limit": "1M", + "query_cache_size": "8M", + "query_cache_type": "1", + "read_buffer_size": "256K", + "read_rnd_buffer_size": "1M", + "server_id": "1", + "skip-external-locking": "1", + "skip_name_resolve": "1", + "sort_buffer_size": "256K", + "table_open_cache": "4096", + "thread_stack": "192K", + "tmp_table_size": "16M", + "tmpdir": "/var/tmp", + "user": "mysql", + "wait_timeout": "3600" + } + } +} +` + +var associateReq = `{"instance": {"configuration": "{configGroupID}"}}` + +var listBackupsResp = ` +{ + "backups": [ + { + "status": "COMPLETED", + "updated": "` + timestamp + `", + "description": "Backup from Restored Instance", + "datastore": { + "version": "5.1", + "type": "MySQL", + "version_id": "20000000-0000-0000-0000-000000000002" + }, + "id": "87972694-4be2-40f5-83f8-501656e0032a", + "size": 0.141026, + "name": "restored_backup", + "created": "` + timestamp + `", + "instance_id": "29af2cd9-0674-48ab-b87a-b160f00208e6", + "parent_id": null, + "locationRef": "http://localhost/path/to/backup" + } + ] +} +` + +var ( + createResp = fmt.Sprintf(`{"instance":%s}`, instance) + getResp = fmt.Sprintf(`{"instance":%s}`, instance) + associateResp = fmt.Sprintf(`{"instance":%s}`, instance) + listInstancesResp = fmt.Sprintf(`{"instances":[%s]}`, instance) +) + +var instanceID = "{instanceID}" + +var expectedInstance = &Instance{ + Created: timeVal, + Updated: timeVal, + Datastore: datastores.DatastorePartial{Type: "mysql", Version: "5.6"}, + Flavor: flavors.Flavor{ + ID: "1", + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", Rel: "self"}, + gophercloud.Link{Href: "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", Rel: "bookmark"}, + }, + }, + Hostname: "e09ad9a3f73309469cf1f43d11e79549caf9acf2.rackspaceclouddb.com", + ID: instanceID, + Links: []gophercloud.Link{ + gophercloud.Link{Href: "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", Rel: "self"}, + }, + Name: "json_rack_instance", + Status: "BUILD", + Volume: os.Volume{Size: 2}, +} + +var expectedReplica = &Instance{ + Status: "BUILD", + Updated: timeVal, + Name: "t2s1_ALT_GUEST", + Links: []gophercloud.Link{ + gophercloud.Link{Rel: "self", Href: "https://ord.databases.api.rackspacecloud.com/v1.0/5919009/instances/8367c312-7c40-4a66-aab1-5767478914fc"}, + gophercloud.Link{Rel: "bookmark", Href: "https://ord.databases.api.rackspacecloud.com/instances/8367c312-7c40-4a66-aab1-5767478914fc"}, + }, + Created: timeVal, + ID: "8367c312-7c40-4a66-aab1-5767478914fc", + Volume: os.Volume{Size: 1}, + Flavor: flavors.Flavor{ID: "9"}, + Datastore: datastores.DatastorePartial{Version: "5.6", Type: "mysql"}, + ReplicaOf: &Instance{ + ID: "6bdca2fc-418e-40bd-a595-62abda61862d", + }, +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests.go new file mode 100644 index 000000000..f4df692a4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests.go @@ -0,0 +1,199 @@ +package instances + +import ( + "github.com/rackspace/gophercloud" + osDBs "github.com/rackspace/gophercloud/openstack/db/v1/databases" + os "github.com/rackspace/gophercloud/openstack/db/v1/instances" + osUsers "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/db/v1/backups" +) + +// CreateOpts is the struct responsible for configuring a new database instance. +type CreateOpts struct { + // Either the integer UUID (in string form) of the flavor, or its URI + // reference as specified in the response from the List() call. Required. + FlavorRef string + + // Specifies the volume size in gigabytes (GB). The value must be between 1 + // and 300. Required. + Size int + + // Name of the instance to create. The length of the name is limited to + // 255 characters and any characters are permitted. Optional. + Name string + + // A slice of database information options. + Databases osDBs.CreateOptsBuilder + + // A slice of user information options. + Users osUsers.CreateOptsBuilder + + // ID of the configuration group to associate with the instance. Optional. + ConfigID string + + // Options to configure the type of datastore the instance will use. This is + // optional, and if excluded will default to MySQL. + Datastore *os.DatastoreOpts + + // Specifies the backup ID from which to restore the database instance. There + // are some things to be aware of before using this field. When you execute + // the Restore Backup operation, a new database instance is created to store + // the backup whose ID is specified by the restorePoint attribute. This will + // mean that: + // - All users, passwords and access that were on the instance at the time of + // the backup will be restored along with the databases. + // - You can create new users or databases if you want, but they cannot be + // the same as the ones from the instance that was backed up. + RestorePoint string + + ReplicaOf string +} + +func (opts CreateOpts) ToInstanceCreateMap() (map[string]interface{}, error) { + instance, err := os.CreateOpts{ + FlavorRef: opts.FlavorRef, + Size: opts.Size, + Name: opts.Name, + Databases: opts.Databases, + Users: opts.Users, + }.ToInstanceCreateMap() + + if err != nil { + return nil, err + } + + instance = instance["instance"].(map[string]interface{}) + + if opts.ConfigID != "" { + instance["configuration"] = opts.ConfigID + } + + if opts.Datastore != nil { + ds, err := opts.Datastore.ToMap() + if err != nil { + return nil, err + } + instance["datastore"] = ds + } + + if opts.RestorePoint != "" { + instance["restorePoint"] = map[string]string{"backupRef": opts.RestorePoint} + } + + if opts.ReplicaOf != "" { + instance["replica_of"] = opts.ReplicaOf + } + + return map[string]interface{}{"instance": instance}, nil +} + +// Create asynchronously provisions a new database instance. It requires the +// user to specify a flavor and a volume size. The API service then provisions +// the instance with the requested flavor and sets up a volume of the specified +// size, which is the storage for the database instance. +// +// Although this call only allows the creation of 1 instance per request, you +// can create an instance with multiple databases and users. The default +// binding for a MySQL instance is port 3306. +func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) CreateResult { + return CreateResult{os.Create(client, opts)} +} + +// ListOpts specifies all of the query options to be used when returning a list +// of database instances. +type ListOpts struct { + // IncludeHA includes or excludes High Availability instances from the result set + IncludeHA bool `q:"include_ha"` + + // IncludeReplicas includes or excludes Replica instances from the result set + IncludeReplicas bool `q:"include_replicas"` +} + +// ToInstanceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToInstanceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List retrieves the status and information for all database instances. +func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { + url := baseURL(client) + + if opts != nil { + query, err := opts.ToInstanceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + createPageFn := func(r pagination.PageResult) pagination.Page { + return os.InstancePage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, url, createPageFn) +} + +// GetDefaultConfig lists the default configuration settings from the template +// that was applied to the specified instance. In a sense, this is the vanilla +// configuration setting applied to an instance. Further configuration can be +// applied by associating an instance with a configuration group. +func GetDefaultConfig(client *gophercloud.ServiceClient, id string) ConfigResult { + var res ConfigResult + + _, res.Err = client.Request("GET", configURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + + return res +} + +// AssociateWithConfigGroup associates a specified instance to a specified +// configuration group. If any of the parameters within a configuration group +// require a restart, then the instance will transition into a restart. +func AssociateWithConfigGroup(client *gophercloud.ServiceClient, instanceID, configGroupID string) UpdateResult { + reqBody := map[string]string{ + "configuration": configGroupID, + } + + var res UpdateResult + + _, res.Err = client.Request("PUT", resourceURL(client, instanceID), gophercloud.RequestOpts{ + JSONBody: map[string]map[string]string{"instance": reqBody}, + OkCodes: []int{202}, + }) + + return res +} + +// DetachFromConfigGroup will detach an instance from all config groups. +func DetachFromConfigGroup(client *gophercloud.ServiceClient, instanceID string) UpdateResult { + return AssociateWithConfigGroup(client, instanceID, "") +} + +// ListBackups will list all the backups for a specified database instance. +func ListBackups(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return backups.BackupPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, backupsURL(client, instanceID), pageFn) +} + +// DetachReplica will detach a specified replica instance from its source +// instance, effectively allowing it to operate independently. Detaching a +// replica will restart the MySQL service on the instance. +func DetachReplica(client *gophercloud.ServiceClient, replicaID string) DetachResult { + var res DetachResult + + _, res.Err = client.Request("PATCH", resourceURL(client, replicaID), gophercloud.RequestOpts{ + JSONBody: map[string]interface{}{"instance": map[string]string{"replica_of": "", "slave_of": ""}}, + OkCodes: []int{202}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests_test.go new file mode 100644 index 000000000..7fa460117 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/requests_test.go @@ -0,0 +1,246 @@ +package instances + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + os "github.com/rackspace/gophercloud/openstack/db/v1/instances" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/db/v1/backups" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +func TestInstanceList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixture.SetupHandler(t, "/instances", "GET", "", listInstancesResp, 200) + + opts := &ListOpts{ + IncludeHA: false, + IncludeReplicas: false, + } + + pages := 0 + err := List(fake.ServiceClient(), opts).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractInstances(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, []Instance{*expectedInstance}, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetConfig(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL+"/configuration", "GET", "", getConfigResp, 200) + + config, err := GetDefaultConfig(fake.ServiceClient(), instanceID).Extract() + + expected := map[string]string{ + "basedir": "/usr", + "connect_timeout": "15", + "datadir": "/var/lib/mysql", + "default_storage_engine": "innodb", + "innodb_buffer_pool_instances": "1", + "innodb_buffer_pool_size": "175M", + "innodb_checksum_algorithm": "crc32", + "innodb_data_file_path": "ibdata1:10M:autoextend", + "innodb_file_per_table": "1", + "innodb_io_capacity": "200", + "innodb_log_file_size": "256M", + "innodb_log_files_in_group": "2", + "innodb_open_files": "8192", + "innodb_thread_concurrency": "0", + "join_buffer_size": "1M", + "key_buffer_size": "50M", + "local-infile": "0", + "log-error": "/var/log/mysql/mysqld.log", + "max_allowed_packet": "16M", + "max_connect_errors": "10000", + "max_connections": "40", + "max_heap_table_size": "16M", + "myisam-recover": "BACKUP", + "open_files_limit": "8192", + "performance_schema": "off", + "pid_file": "/var/run/mysqld/mysqld.pid", + "port": "3306", + "query_cache_limit": "1M", + "query_cache_size": "8M", + "query_cache_type": "1", + "read_buffer_size": "256K", + "read_rnd_buffer_size": "1M", + "server_id": "1", + "skip-external-locking": "1", + "skip_name_resolve": "1", + "sort_buffer_size": "256K", + "table_open_cache": "4096", + "thread_stack": "192K", + "tmp_table_size": "16M", + "tmpdir": "/var/tmp", + "user": "mysql", + "wait_timeout": "3600", + } + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, config) +} + +func TestAssociateWithConfigGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PUT", associateReq, associateResp, 202) + + res := AssociateWithConfigGroup(fake.ServiceClient(), instanceID, "{configGroupID}") + th.AssertNoErr(t, res.Err) +} + +func TestListBackups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL+"/backups", "GET", "", listBackupsResp, 200) + + pages := 0 + + err := ListBackups(fake.ServiceClient(), instanceID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + actual, err := backups.ExtractBackups(page) + th.AssertNoErr(t, err) + + expected := []backups.Backup{ + backups.Backup{ + Created: timeVal, + Description: "Backup from Restored Instance", + ID: "87972694-4be2-40f5-83f8-501656e0032a", + InstanceID: "29af2cd9-0674-48ab-b87a-b160f00208e6", + LocationRef: "http://localhost/path/to/backup", + Name: "restored_backup", + ParentID: "", + Size: 0.141026, + Status: "COMPLETED", + Updated: timeVal, + Datastore: datastores.DatastorePartial{Version: "5.1", Type: "MySQL", VersionID: "20000000-0000-0000-0000-000000000002"}, + }, + } + + th.AssertDeepEquals(t, expected, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestCreateReplica(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _rootURL, "POST", createReplicaReq, createReplicaResp, 200) + + opts := CreateOpts{ + Name: "t2s1_ALT_GUEST", + FlavorRef: "9", + Size: 1, + ReplicaOf: "6bdca2fc-418e-40bd-a595-62abda61862d", + } + + replica, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expectedReplica, replica) +} + +func TestListReplicas(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _rootURL, "GET", "", listReplicasResp, 200) + + pages := 0 + err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractInstances(page) + if err != nil { + return false, err + } + + expected := []Instance{ + Instance{ + Status: "ACTIVE", + Name: "t1s1_ALT_GUEST", + Links: []gophercloud.Link{ + gophercloud.Link{Rel: "self", Href: "https://ord.databases.api.rackspacecloud.com/v1.0/1234/instances/3c691f06-bf9a-4618-b7ec-2817ce0cf254"}, + gophercloud.Link{Rel: "bookmark", Href: "https://ord.databases.api.rackspacecloud.com/instances/3c691f06-bf9a-4618-b7ec-2817ce0cf254"}, + }, + ID: "3c691f06-bf9a-4618-b7ec-2817ce0cf254", + IP: []string{"10.0.0.3"}, + Volume: os.Volume{Size: 1}, + Flavor: flavors.Flavor{ID: "9"}, + Datastore: datastores.DatastorePartial{Version: "5.6", Type: "mysql"}, + ReplicaOf: &Instance{ + ID: "8b499b45-52d6-402d-b398-f9d8f279c69a", + }, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetReplica(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "GET", "", getReplicaResp, 200) + + replica, err := Get(fake.ServiceClient(), instanceID).Extract() + th.AssertNoErr(t, err) + + expectedReplica := &Instance{ + Status: "ACTIVE", + Updated: timeVal, + Name: "t1_ALT_GUEST", + Created: timeVal, + IP: []string{ + "10.0.0.2", + }, + Replicas: []Instance{ + Instance{ID: "3c691f06-bf9a-4618-b7ec-2817ce0cf254"}, + }, + ID: "8b499b45-52d6-402d-b398-f9d8f279c69a", + Volume: os.Volume{ + Used: 0.54, + Size: 1, + }, + Flavor: flavors.Flavor{ID: "9"}, + Datastore: datastores.DatastorePartial{ + Version: "5.6", + Type: "mysql", + }, + } + + th.AssertDeepEquals(t, replica, expectedReplica) +} + +func TestDetachReplica(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PATCH", detachReq, "", 202) + + err := DetachReplica(fake.ServiceClient(), instanceID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/results.go new file mode 100644 index 000000000..cdcc9c716 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/results.go @@ -0,0 +1,191 @@ +package instances + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/db/v1/datastores" + "github.com/rackspace/gophercloud/openstack/db/v1/flavors" + os "github.com/rackspace/gophercloud/openstack/db/v1/instances" + "github.com/rackspace/gophercloud/pagination" +) + +// Instance represents a remote MySQL instance. +type Instance struct { + // Indicates the datetime that the instance was created + Created time.Time `mapstructure:"-"` + + // Indicates the most recent datetime that the instance was updated. + Updated time.Time `mapstructure:"-"` + + // Indicates how the instance stores data. + Datastore datastores.DatastorePartial + + // Indicates the hardware flavor the instance uses. + Flavor flavors.Flavor + + // A DNS-resolvable hostname associated with the database instance (rather + // than an IPv4 address). Since the hostname always resolves to the correct + // IP address of the database instance, this relieves the user from the task + // of maintaining the mapping. Note that although the IP address may likely + // change on resizing, migrating, and so forth, the hostname always resolves + // to the correct database instance. + Hostname string + + // Indicates the unique identifier for the instance resource. + ID string + + // Exposes various links that reference the instance resource. + Links []gophercloud.Link + + // The human-readable name of the instance. + Name string + + // The build status of the instance. + Status string + + // Information about the attached volume of the instance. + Volume os.Volume + + // IP indicates the various IP addresses which allow access. + IP []string + + // Indicates whether this instance is a replica of another source instance. + ReplicaOf *Instance `mapstructure:"replica_of" json:"replica_of"` + + // Indicates whether this instance is the source of other replica instances. + Replicas []Instance +} + +func commonExtract(err error, body interface{}) (*Instance, error) { + if err != nil { + return nil, err + } + + var response struct { + Instance Instance `mapstructure:"instance"` + } + + err = mapstructure.Decode(body, &response) + + val := body.(map[string]interface{})["instance"].(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Instance, err + } + response.Instance.Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &response.Instance, err + } + response.Instance.Updated = updatedTime + } + + return &response.Instance, err +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + os.CreateResult +} + +// Extract will retrieve an instance from a create result. +func (r CreateResult) Extract() (*Instance, error) { + return commonExtract(r.Err, r.Body) +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + os.GetResult +} + +// Extract will extract an Instance from a GetResult. +func (r GetResult) Extract() (*Instance, error) { + return commonExtract(r.Err, r.Body) +} + +// ConfigResult represents the result of getting default configuration for an +// instance. +type ConfigResult struct { + gophercloud.Result +} + +// DetachResult represents the result of detaching a replica from its source. +type DetachResult struct { + gophercloud.ErrResult +} + +// Extract will extract the configuration information (in the form of a map) +// about a particular instance. +func (r ConfigResult) Extract() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Instance struct { + Config map[string]string `mapstructure:"configuration"` + } `mapstructure:"instance"` + } + + err := mapstructure.Decode(r.Body, &response) + return response.Instance.Config, err +} + +// UpdateResult represents the result of an Update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// ExtractInstances retrieves a slice of instances from a paginated collection. +func ExtractInstances(page pagination.Page) ([]Instance, error) { + casted := page.(os.InstancePage).Body + + var resp struct { + Instances []Instance `mapstructure:"instances"` + } + + if err := mapstructure.Decode(casted, &resp); err != nil { + return nil, err + } + + var vals []interface{} + switch casted.(type) { + case map[string]interface{}: + vals = casted.(map[string]interface{})["instances"].([]interface{}) + case map[string][]interface{}: + vals = casted.(map[string][]interface{})["instances"] + default: + return resp.Instances, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i, v := range vals { + val := v.(map[string]interface{}) + + if t, ok := val["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Instances, err + } + resp.Instances[i].Created = creationTime + } + + if t, ok := val["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return resp.Instances, err + } + resp.Instances[i].Updated = updatedTime + } + } + + return resp.Instances, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/urls.go new file mode 100644 index 000000000..5955f4cde --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/instances/urls.go @@ -0,0 +1,23 @@ +package instances + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("instances") +} + +func createURL(c *gophercloud.ServiceClient) string { + return baseURL(c) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id) +} + +func configURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "configuration") +} + +func backupsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "backups") +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate.go new file mode 100644 index 000000000..8298c4605 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate.go @@ -0,0 +1,16 @@ +package users + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/db/v1/users" +) + +// Create will create a new database user for the specified database instance. +func Create(client *gophercloud.ServiceClient, instanceID string, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(client, instanceID, opts) +} + +// Delete will permanently remove a user from a specified database instance. +func Delete(client *gophercloud.ServiceClient, instanceID, userName string) os.DeleteResult { + return os.Delete(client, instanceID, userName) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate_test.go new file mode 100644 index 000000000..7a1b773cd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/delegate_test.go @@ -0,0 +1,48 @@ +package users + +import ( + "testing" + + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + os "github.com/rackspace/gophercloud/openstack/db/v1/users" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +const instanceID = "{instanceID}" + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreate(t) + + opts := os.BatchCreateOpts{ + os.CreateOpts{ + Databases: db.BatchCreateOpts{ + db.CreateOpts{Name: "databaseA"}, + }, + Name: "dbuser3", + Password: "secretsecret", + }, + os.CreateOpts{ + Databases: db.BatchCreateOpts{ + db.CreateOpts{Name: "databaseB"}, + db.CreateOpts{Name: "databaseC"}, + }, + Name: "dbuser4", + Password: "secretsecret", + }, + } + + res := Create(fake.ServiceClient(), instanceID, opts) + th.AssertNoErr(t, res.Err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDelete(t) + + res := Delete(fake.ServiceClient(), instanceID, "{userName}") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/doc.go new file mode 100644 index 000000000..84f2eb327 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/doc.go @@ -0,0 +1,3 @@ +// Package users provides information and interaction with the user API +// resource in the Rackspace Database service. +package users diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/fixtures.go new file mode 100644 index 000000000..5314e854f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/fixtures.go @@ -0,0 +1,77 @@ +package users + +const singleDB = `{"databases": [{"name": "databaseE"}]}` + +var changePwdReq = ` +{ + "users": [ + { + "name": "dbuser1", + "password": "newpassword" + }, + { + "name": "dbuser2", + "password": "anotherpassword" + } + ] +} +` + +var updateReq = ` +{ + "user": { + "name": "new_username", + "password": "new_password" + } +} +` + +var getResp = ` +{ + "user": { + "name": "exampleuser", + "host": "foo", + "databases": [ + { + "name": "databaseA" + }, + { + "name": "databaseB" + } + ] + } +} +` + +var listResp = ` +{ +"users": [ + { + "name": "dbuser1", + "host": "localhost", + "databases": [ + { + "name": "databaseA" + } + ] + }, + { + "name": "dbuser2", + "host": "localhost", + "databases": [ + { + "name": "databaseB" + }, + { + "name": "databaseC" + } + ] + } +] +} +` + +var ( + listUserAccessResp = singleDB + grantUserAccessReq = singleDB +) diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests.go new file mode 100644 index 000000000..74e47ab23 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests.go @@ -0,0 +1,176 @@ +package users + +import ( + "errors" + + "github.com/rackspace/gophercloud" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + os "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" +) + +// List will list all available users for a specified database instance. +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, baseURL(client, instanceID), createPageFn) +} + +/* +ChangePassword changes the password for one or more users. For example, to +change the respective passwords for two users: + + opts := os.BatchCreateOpts{ + os.CreateOpts{Name: "db_user_1", Password: "new_password_1"}, + os.CreateOpts{Name: "db_user_2", Password: "new_password_2"}, + } + + ChangePassword(client, "instance_id", opts) +*/ +func ChangePassword(client *gophercloud.ServiceClient, instanceID string, opts os.CreateOptsBuilder) UpdatePasswordsResult { + var res UpdatePasswordsResult + + reqBody, err := opts.ToUserCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("PUT", baseURL(client, instanceID), gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{202}, + }) + + return res +} + +// UpdateOpts is the struct responsible for updating an existing user. +type UpdateOpts struct { + // [OPTIONAL] Specifies a name for the user. Valid names can be composed + // of the following characters: letters (either case); numbers; these + // characters '@', '?', '#', ' ' but NEVER beginning a name string; '_' is + // permitted anywhere. Prohibited characters that are forbidden include: + // single quotes, double quotes, back quotes, semicolons, commas, backslashes, + // and forward slashes. Spaces at the front or end of a user name are also + // not permitted. + Name string + + // [OPTIONAL] Specifies a password for the user. + Password string + + // [OPTIONAL] Specifies the host from which a user is allowed to connect to + // the database. Possible values are a string containing an IPv4 address or + // "%" to allow connecting from any host. Optional; the default is "%". + Host string +} + +// ToMap is a convenience function for creating sub-maps for individual users. +func (opts UpdateOpts) ToMap() (map[string]interface{}, error) { + if opts.Name == "root" { + return nil, errors.New("root is a reserved user name and cannot be used") + } + + user := map[string]interface{}{} + + if opts.Name != "" { + user["name"] = opts.Name + } + + if opts.Password != "" { + user["password"] = opts.Password + } + + if opts.Host != "" { + user["host"] = opts.Host + } + + return user, nil +} + +// Update will modify the attributes of a specified user. Attributes that can +// be updated are: user name, password, and host. +func Update(client *gophercloud.ServiceClient, instanceID, userName string, opts UpdateOpts) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToMap() + if err != nil { + res.Err = err + return res + } + reqBody = map[string]interface{}{"user": reqBody} + + _, res.Err = client.Request("PUT", userURL(client, instanceID, userName), gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{202}, + }) + + return res +} + +// Get will retrieve the details for a particular user. +func Get(client *gophercloud.ServiceClient, instanceID, userName string) GetResult { + var res GetResult + + _, res.Err = client.Request("GET", userURL(client, instanceID, userName), gophercloud.RequestOpts{ + JSONResponse: &res.Body, + OkCodes: []int{200}, + }) + + return res +} + +// ListAccess will list all of the databases a user has access to. +func ListAccess(client *gophercloud.ServiceClient, instanceID, userName string) pagination.Pager { + pageFn := func(r pagination.PageResult) pagination.Page { + return AccessPage{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, dbsURL(client, instanceID, userName), pageFn) +} + +/* +GrantAccess for the specified user to one or more databases on a specified +instance. For example, to add a user to multiple databases: + + opts := db.BatchCreateOpts{ + db.CreateOpts{Name: "database_1"}, + db.CreateOpts{Name: "database_3"}, + db.CreateOpts{Name: "database_19"}, + } + + GrantAccess(client, "instance_id", "user_name", opts) +*/ +func GrantAccess(client *gophercloud.ServiceClient, instanceID, userName string, opts db.CreateOptsBuilder) GrantAccessResult { + var res GrantAccessResult + + reqBody, err := opts.ToDBCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Request("PUT", dbsURL(client, instanceID, userName), gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{202}, + }) + + return res +} + +/* +RevokeAccess will revoke access for the specified user to one or more databases +on a specified instance. For example: + + RevokeAccess(client, "instance_id", "user_name", "db_name") +*/ +func RevokeAccess(client *gophercloud.ServiceClient, instanceID, userName, dbName string) RevokeAccessResult { + var res RevokeAccessResult + + _, res.Err = client.Request("DELETE", dbURL(client, instanceID, userName, dbName), gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests_test.go new file mode 100644 index 000000000..2f2dca700 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/requests_test.go @@ -0,0 +1,156 @@ +package users + +import ( + "testing" + + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + os "github.com/rackspace/gophercloud/openstack/db/v1/users" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud/testhelper/fixture" +) + +var ( + userName = "{userName}" + _rootURL = "/instances/" + instanceID + "/users" + _userURL = _rootURL + "/" + userName + _dbURL = _userURL + "/databases" +) + +func TestChangeUserPassword(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _rootURL, "PUT", changePwdReq, "", 202) + + opts := os.BatchCreateOpts{ + os.CreateOpts{Name: "dbuser1", Password: "newpassword"}, + os.CreateOpts{Name: "dbuser2", Password: "anotherpassword"}, + } + + err := ChangePassword(fake.ServiceClient(), instanceID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _userURL, "PUT", updateReq, "", 202) + + opts := UpdateOpts{ + Name: "new_username", + Password: "new_password", + } + + err := Update(fake.ServiceClient(), instanceID, userName, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _userURL, "GET", "", getResp, 200) + + user, err := Get(fake.ServiceClient(), instanceID, userName).Extract() + + th.AssertNoErr(t, err) + + expected := &User{ + Name: "exampleuser", + Host: "foo", + Databases: []db.Database{ + db.Database{Name: "databaseA"}, + db.Database{Name: "databaseB"}, + }, + } + + th.AssertDeepEquals(t, expected, user) +} + +func TestUserAccessList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _userURL+"/databases", "GET", "", listUserAccessResp, 200) + + expectedDBs := []db.Database{ + db.Database{Name: "databaseE"}, + } + + pages := 0 + err := ListAccess(fake.ServiceClient(), instanceID, userName).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractDBs(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, expectedDBs, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestUserList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + fixture.SetupHandler(t, "/instances/"+instanceID+"/users", "GET", "", listResp, 200) + + expectedUsers := []User{ + User{ + Databases: []db.Database{ + db.Database{Name: "databaseA"}, + }, + Name: "dbuser1", + Host: "localhost", + }, + User{ + Databases: []db.Database{ + db.Database{Name: "databaseB"}, + db.Database{Name: "databaseC"}, + }, + Name: "dbuser2", + Host: "localhost", + }, + } + + pages := 0 + err := List(fake.ServiceClient(), instanceID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractUsers(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, expectedUsers, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGrantAccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _dbURL, "PUT", grantUserAccessReq, "", 202) + + opts := db.BatchCreateOpts{db.CreateOpts{Name: "databaseE"}} + err := GrantAccess(fake.ServiceClient(), instanceID, userName, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestRevokeAccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _dbURL+"/{dbName}", "DELETE", "", "", 202) + + err := RevokeAccess(fake.ServiceClient(), instanceID, userName, "{dbName}").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/results.go new file mode 100644 index 000000000..85b3a7aaa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/results.go @@ -0,0 +1,149 @@ +package users + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + db "github.com/rackspace/gophercloud/openstack/db/v1/databases" + "github.com/rackspace/gophercloud/pagination" +) + +// User represents a database user +type User struct { + // The user name + Name string + + // The user password + Password string + + // Specifies the host from which a user is allowed to connect to the database. + // Possible values are a string containing an IPv4 address or "%" to allow + // connecting from any host. + Host string + + // The databases associated with this user + Databases []db.Database +} + +// UpdatePasswordsResult represents the result of changing a user password. +type UpdatePasswordsResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of updating a user. +type UpdateResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of getting a user. +type GetResult struct { + gophercloud.Result +} + +// Extract will retrieve a User struct from a getresult. +func (r GetResult) Extract() (*User, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + User User `mapstructure:"user"` + } + + err := mapstructure.Decode(r.Body, &response) + return &response.User, err +} + +// AccessPage represents a single page of a paginated user collection. +type AccessPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page AccessPage) IsEmpty() (bool, error) { + users, err := ExtractDBs(page) + if err != nil { + return true, err + } + return len(users) == 0, nil +} + +// NextPageURL will retrieve the next page URL. +func (page AccessPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"databases_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractDBs will convert a generic pagination struct into a more +// relevant slice of DB structs. +func ExtractDBs(page pagination.Page) ([]db.Database, error) { + casted := page.(AccessPage).Body + + var response struct { + DBs []db.Database `mapstructure:"databases"` + } + + err := mapstructure.Decode(casted, &response) + return response.DBs, err +} + +// UserPage represents a single page of a paginated user collection. +type UserPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(page) + if err != nil { + return true, err + } + return len(users) == 0, nil +} + +// NextPageURL will retrieve the next page URL. +func (page UserPage) NextPageURL() (string, error) { + type resp struct { + Links []gophercloud.Link `mapstructure:"users_links"` + } + + var r resp + err := mapstructure.Decode(page.Body, &r) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(r.Links) +} + +// ExtractUsers will convert a generic pagination struct into a more +// relevant slice of User structs. +func ExtractUsers(page pagination.Page) ([]User, error) { + casted := page.(UserPage).Body + + var response struct { + Users []User `mapstructure:"users"` + } + + err := mapstructure.Decode(casted, &response) + + return response.Users, err +} + +// GrantAccessResult represents the result of granting access to a user. +type GrantAccessResult struct { + gophercloud.ErrResult +} + +// RevokeAccessResult represents the result of revoking access to a user. +type RevokeAccessResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/urls.go new file mode 100644 index 000000000..bac8788ff --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/db/v1/users/urls.go @@ -0,0 +1,19 @@ +package users + +import "github.com/rackspace/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("instances", instanceID, "users") +} + +func userURL(c *gophercloud.ServiceClient, instanceID, userName string) string { + return c.ServiceURL("instances", instanceID, "users", userName) +} + +func dbsURL(c *gophercloud.ServiceClient, instanceID, userName string) string { + return c.ServiceURL("instances", instanceID, "users", userName, "databases") +} + +func dbURL(c *gophercloud.ServiceClient, instanceID, userName, dbName string) string { + return c.ServiceURL("instances", instanceID, "users", userName, "databases", dbName) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go new file mode 100644 index 000000000..fc547cde5 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go @@ -0,0 +1,24 @@ +package extensions + +import ( + "github.com/rackspace/gophercloud" + common "github.com/rackspace/gophercloud/openstack/common/extensions" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the +// elements into a slice of os.Extension structs. +func ExtractExtensions(page pagination.Page) ([]common.Extension, error) { + return common.ExtractExtensions(page) +} + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) common.GetResult { + return common.Get(c, alias) +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return common.List(c) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go new file mode 100644 index 000000000..e30f79404 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go @@ -0,0 +1,39 @@ +package extensions + +import ( + "testing" + + common "github.com/rackspace/gophercloud/openstack/common/extensions" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + common.HandleListExtensionsSuccessfully(t) + + count := 0 + + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractExtensions(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, common.ExpectedExtensions, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + common.HandleGetExtensionSuccessfully(t) + + actual, err := Get(fake.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, common.SingleExtension, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go new file mode 100644 index 000000000..b02a95b53 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go @@ -0,0 +1,3 @@ +// Package extensions provides information and interaction with the all the +// extensions available for the Rackspace Identity service. +package extensions diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate.go new file mode 100644 index 000000000..a6ee8515c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate.go @@ -0,0 +1,50 @@ +package roles + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + + os "github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles" +) + +// List is the operation responsible for listing all available global roles +// that a user can adopt. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client) +} + +// AddUserRole is the operation responsible for assigning a particular role to +// a user. This is confined to the scope of the user's tenant - so the tenant +// ID is a required argument. +func AddUserRole(client *gophercloud.ServiceClient, userID, roleID string) UserRoleResult { + var result UserRoleResult + + _, result.Err = client.Request("PUT", userRoleURL(client, userID, roleID), gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + + return result +} + +// DeleteUserRole is the operation responsible for deleting a particular role +// from a user. This is confined to the scope of the user's tenant - so the +// tenant ID is a required argument. +func DeleteUserRole(client *gophercloud.ServiceClient, userID, roleID string) UserRoleResult { + var result UserRoleResult + + _, result.Err = client.Request("DELETE", userRoleURL(client, userID, roleID), gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + + return result +} + +// UserRoleResult represents the result of either an AddUserRole or +// a DeleteUserRole operation. +type UserRoleResult struct { + gophercloud.ErrResult +} + +func userRoleURL(c *gophercloud.ServiceClient, userID, roleID string) string { + return c.ServiceURL(os.UserPath, userID, os.RolePath, os.ExtPath, roleID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate_test.go new file mode 100644 index 000000000..fcee97d0b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/delegate_test.go @@ -0,0 +1,66 @@ +package roles + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListRoleResponse(t) + + count := 0 + + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractRoles(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []os.Role{ + os.Role{ + ID: "123", + Name: "compute:admin", + Description: "Nova Administrator", + ServiceID: "cke5372ebabeeabb70a0e702a4626977x4406e5", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestAddUserRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockAddUserRoleResponse(t) + + err := AddUserRole(client.ServiceClient(), "{user_id}", "{role_id}").ExtractErr() + + th.AssertNoErr(t, err) +} + +func TestDeleteUserRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteUserRoleResponse(t) + + err := DeleteUserRole(client.ServiceClient(), "{user_id}", "{role_id}").ExtractErr() + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/fixtures.go new file mode 100644 index 000000000..5f22d0f64 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/roles/fixtures.go @@ -0,0 +1,49 @@ +package roles + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func MockListRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/OS-KSADM/roles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "roles": [ + { + "id": "123", + "name": "compute:admin", + "description": "Nova Administrator", + "serviceId": "cke5372ebabeeabb70a0e702a4626977x4406e5" + } + ] +} + `) + }) +} + +func MockAddUserRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/users/{user_id}/roles/OS-KSADM/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusCreated) + }) +} + +func MockDeleteUserRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/users/{user_id}/roles/OS-KSADM/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go new file mode 100644 index 000000000..6cdd0cfbd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go @@ -0,0 +1,17 @@ +package tenants + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtractTenants interprets a page of List results as a more usable slice of Tenant structs. +func ExtractTenants(page pagination.Page) ([]os.Tenant, error) { + return os.ExtractTenants(page) +} + +// List enumerates the tenants to which the current token grants access. +func List(client *gophercloud.ServiceClient, opts *os.ListOpts) pagination.Pager { + return os.List(client, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go new file mode 100644 index 000000000..eccbfe23e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go @@ -0,0 +1,28 @@ +package tenants + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListTenants(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListTenantsSuccessfully(t) + + count := 0 + err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + actual, err := ExtractTenants(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, os.ExpectedTenantSlice, actual) + + count++ + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go new file mode 100644 index 000000000..c1825c21d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go @@ -0,0 +1,3 @@ +// Package tenants provides information and interaction with the tenant +// API resource for the Rackspace Identity service. +package tenants diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go new file mode 100644 index 000000000..4f9885af0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go @@ -0,0 +1,60 @@ +package tokens + +import ( + "errors" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" +) + +var ( + // ErrPasswordProvided is returned if both a password and an API key are provided to Create. + ErrPasswordProvided = errors.New("Please provide either a password or an API key.") +) + +// AuthOptions wraps the OpenStack AuthOptions struct to be able to customize the request body +// when API key authentication is used. +type AuthOptions struct { + os.AuthOptions +} + +// WrapOptions embeds a root AuthOptions struct in a package-specific one. +func WrapOptions(original gophercloud.AuthOptions) AuthOptions { + return AuthOptions{AuthOptions: os.WrapOptions(original)} +} + +// ToTokenCreateMap serializes an AuthOptions into a request body. If an API key is provided, it +// will be used, otherwise +func (auth AuthOptions) ToTokenCreateMap() (map[string]interface{}, error) { + if auth.APIKey == "" { + return auth.AuthOptions.ToTokenCreateMap() + } + + // Verify that other required attributes are present. + if auth.Username == "" { + return nil, os.ErrUsernameRequired + } + + authMap := make(map[string]interface{}) + + authMap["RAX-KSKEY:apiKeyCredentials"] = map[string]interface{}{ + "username": auth.Username, + "apiKey": auth.APIKey, + } + + if auth.TenantID != "" { + authMap["tenantId"] = auth.TenantID + } + if auth.TenantName != "" { + authMap["tenantName"] = auth.TenantName + } + + return map[string]interface{}{"auth": authMap}, nil +} + +// Create authenticates to Rackspace's identity service and attempts to acquire a Token. Rather +// than interact with this service directly, users should generally call +// rackspace.AuthenticatedClient(). +func Create(client *gophercloud.ServiceClient, auth AuthOptions) os.CreateResult { + return os.Create(client, auth) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go new file mode 100644 index 000000000..6678ff4a7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go @@ -0,0 +1,36 @@ +package tokens + +import ( + "testing" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func tokenPost(t *testing.T, options gophercloud.AuthOptions, requestJSON string) os.CreateResult { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleTokenPost(t, requestJSON) + + return Create(client.ServiceClient(), WrapOptions(options)) +} + +func TestCreateTokenWithAPIKey(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + APIKey: "1234567890abcdef", + } + + os.IsSuccessful(t, tokenPost(t, options, ` + { + "auth": { + "RAX-KSKEY:apiKeyCredentials": { + "username": "me", + "apiKey": "1234567890abcdef" + } + } + } + `)) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go new file mode 100644 index 000000000..44043e5e1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go @@ -0,0 +1,3 @@ +// Package tokens provides information and interaction with the token +// API resource for the Rackspace Identity service. +package tokens diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate.go new file mode 100644 index 000000000..6135bec10 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate.go @@ -0,0 +1,142 @@ +package users + +import ( + "errors" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/identity/v2/users" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a pager that allows traversal over a collection of users. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return os.List(client) +} + +// CommonOpts are the options which are shared between CreateOpts and +// UpdateOpts +type CommonOpts struct { + // Required. The username to assign to the user. When provided, the username + // must: + // - start with an alphabetical (A-Za-z) character + // - have a minimum length of 1 character + // + // The username may contain upper and lowercase characters, as well as any of + // the following special character: . - @ _ + Username string + + // Required. Email address for the user account. + Email string + + // Required. Indicates whether the user can authenticate after the user + // account is created. If no value is specified, the default value is true. + Enabled os.EnabledState + + // Optional. The password to assign to the user. If provided, the password + // must: + // - start with an alphabetical (A-Za-z) character + // - have a minimum length of 8 characters + // - contain at least one uppercase character, one lowercase character, and + // one numeric character. + // + // The password may contain any of the following special characters: . - @ _ + Password string +} + +// CreateOpts represents the options needed when creating new users. +type CreateOpts CommonOpts + +// ToUserCreateMap assembles a request body based on the contents of a CreateOpts. +func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + m := make(map[string]interface{}) + + if opts.Username == "" { + return m, errors.New("Username is a required field") + } + if opts.Enabled == nil { + return m, errors.New("Enabled is a required field") + } + if opts.Email == "" { + return m, errors.New("Email is a required field") + } + + if opts.Username != "" { + m["username"] = opts.Username + } + if opts.Email != "" { + m["email"] = opts.Email + } + if opts.Enabled != nil { + m["enabled"] = opts.Enabled + } + if opts.Password != "" { + m["OS-KSADM:password"] = opts.Password + } + + return map[string]interface{}{"user": m}, nil +} + +// Create is the operation responsible for creating new users. +func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) CreateResult { + return CreateResult{os.Create(client, opts)} +} + +// Get requests details on a single user, either by ID. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + return GetResult{os.Get(client, id)} +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the Update request. +type UpdateOptsBuilder interface { + ToUserUpdateMap() map[string]interface{} +} + +// UpdateOpts specifies the base attributes that may be updated on an existing server. +type UpdateOpts CommonOpts + +// ToUserUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToUserUpdateMap() map[string]interface{} { + m := make(map[string]interface{}) + + if opts.Username != "" { + m["username"] = opts.Username + } + if opts.Enabled != nil { + m["enabled"] = &opts.Enabled + } + if opts.Email != "" { + m["email"] = opts.Email + } + + return map[string]interface{}{"user": m} +} + +// Update is the operation responsible for updating exist users by their UUID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { + var result UpdateResult + + _, result.Err = client.Request("POST", os.ResourceURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &result.Body, + JSONBody: opts.ToUserUpdateMap(), + OkCodes: []int{200}, + }) + + return result +} + +// Delete is the operation responsible for permanently deleting an API user. +func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(client, id) +} + +// ResetAPIKey resets the User's API key. +func ResetAPIKey(client *gophercloud.ServiceClient, id string) ResetAPIKeyResult { + var result ResetAPIKeyResult + + _, result.Err = client.Request("POST", resetAPIKeyURL(client, id), gophercloud.RequestOpts{ + JSONResponse: &result.Body, + OkCodes: []int{200}, + }) + + return result +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate_test.go new file mode 100644 index 000000000..62faf0c5b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/delegate_test.go @@ -0,0 +1,111 @@ +package users + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/identity/v2/users" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListResponse(t) + + count := 0 + + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + users, err := os.ExtractUsers(page) + + th.AssertNoErr(t, err) + th.AssertEquals(t, "u1000", users[0].ID) + th.AssertEquals(t, "u1001", users[1].ID) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateUser(t) + + opts := CreateOpts{ + Username: "new_user", + Enabled: os.Disabled, + Email: "new_user@foo.com", + Password: "foo", + } + + user, err := Create(client.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + + th.AssertEquals(t, "123456", user.ID) + th.AssertEquals(t, "5830280", user.DomainID) + th.AssertEquals(t, "DFW", user.DefaultRegion) +} + +func TestGetUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetUser(t) + + user, err := Get(client.ServiceClient(), "new_user").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, true, user.Enabled) + th.AssertEquals(t, true, user.MultiFactorEnabled) +} + +func TestUpdateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateUser(t) + + id := "c39e3de9be2d4c779f1dfd6abacc176d" + + opts := UpdateOpts{ + Enabled: os.Enabled, + Email: "new_email@foo.com", + } + + user, err := Update(client.ServiceClient(), id, opts).Extract() + + th.AssertNoErr(t, err) + + th.AssertEquals(t, true, user.Enabled) + th.AssertEquals(t, "new_email@foo.com", user.Email) +} + +func TestDeleteServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteUser(t) + + res := Delete(client.ServiceClient(), "c39e3de9be2d4c779f1dfd6abacc176d") + th.AssertNoErr(t, res.Err) +} + +func TestResetAPIKey(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockResetAPIKey(t) + + apiKey, err := ResetAPIKey(client.ServiceClient(), "99").Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, "joesmith", apiKey.Username) + th.AssertEquals(t, "mooH1eiLahd5ahYood7r", apiKey.APIKey) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/fixtures.go new file mode 100644 index 000000000..973f39ea8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/fixtures.go @@ -0,0 +1,154 @@ +package users + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func mockListResponse(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "users":[ + { + "id": "u1000", + "username": "jqsmith", + "email": "john.smith@example.org", + "enabled": true + }, + { + "id": "u1001", + "username": "jqsmith", + "email": "jane.smith@example.org", + "enabled": true + } + ] +} + `) + }) +} + +func mockCreateUser(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "user": { + "username": "new_user", + "enabled": false, + "email": "new_user@foo.com", + "OS-KSADM:password": "foo" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "RAX-AUTH:defaultRegion": "DFW", + "RAX-AUTH:domainId": "5830280", + "id": "123456", + "username": "new_user", + "email": "new_user@foo.com", + "enabled": false + } +} +`) + }) +} + +func mockGetUser(t *testing.T) { + th.Mux.HandleFunc("/users/new_user", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "RAX-AUTH:defaultRegion": "DFW", + "RAX-AUTH:domainId": "5830280", + "RAX-AUTH:multiFactorEnabled": "true", + "id": "c39e3de9be2d4c779f1dfd6abacc176d", + "username": "jqsmith", + "email": "john.smith@example.org", + "enabled": true + } +} +`) + }) +} + +func mockUpdateUser(t *testing.T) { + th.Mux.HandleFunc("/users/c39e3de9be2d4c779f1dfd6abacc176d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "user": { + "email": "new_email@foo.com", + "enabled": true + } +} +`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "RAX-AUTH:defaultRegion": "DFW", + "RAX-AUTH:domainId": "5830280", + "RAX-AUTH:multiFactorEnabled": "true", + "id": "123456", + "username": "jqsmith", + "email": "new_email@foo.com", + "enabled": true + } +} +`) + }) +} + +func mockDeleteUser(t *testing.T) { + th.Mux.HandleFunc("/users/c39e3de9be2d4c779f1dfd6abacc176d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func mockResetAPIKey(t *testing.T) { + th.Mux.HandleFunc("/users/99/OS-KSADM/credentials/RAX-KSKEY:apiKeyCredentials/RAX-AUTH/reset", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "RAX-KSKEY:apiKeyCredentials": { + "username": "joesmith", + "apiKey": "mooH1eiLahd5ahYood7r" + } +}`) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/results.go new file mode 100644 index 000000000..6936ecba8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/results.go @@ -0,0 +1,129 @@ +package users + +import ( + "strconv" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/identity/v2/users" + + "github.com/mitchellh/mapstructure" +) + +// User represents a user resource that exists on the API. +type User struct { + // The UUID for this user. + ID string + + // The human name for this user. + Name string + + // The username for this user. + Username string + + // Indicates whether the user is enabled (true) or disabled (false). + Enabled bool + + // The email address for this user. + Email string + + // The ID of the tenant to which this user belongs. + TenantID string `mapstructure:"tenant_id"` + + // Specifies the default region for the user account. This value is inherited + // from the user administrator when the account is created. + DefaultRegion string `mapstructure:"RAX-AUTH:defaultRegion"` + + // Identifies the domain that contains the user account. This value is + // inherited from the user administrator when the account is created. + DomainID string `mapstructure:"RAX-AUTH:domainId"` + + // The password value that the user needs for authentication. If the Add user + // request included a password value, this attribute is not included in the + // response. + Password string `mapstructure:"OS-KSADM:password"` + + // Indicates whether the user has enabled multi-factor authentication. + MultiFactorEnabled bool `mapstructure:"RAX-AUTH:multiFactorEnabled"` +} + +// CreateResult represents the result of a Create operation +type CreateResult struct { + os.CreateResult +} + +// GetResult represents the result of a Get operation +type GetResult struct { + os.GetResult +} + +// UpdateResult represents the result of an Update operation +type UpdateResult struct { + os.UpdateResult +} + +func commonExtract(resp interface{}, err error) (*User, error) { + if err != nil { + return nil, err + } + + var respStruct struct { + User *User `json:"user"` + } + + // Since the API returns a string instead of a bool, we need to hack the JSON + json := resp.(map[string]interface{}) + user := json["user"].(map[string]interface{}) + if s, ok := user["RAX-AUTH:multiFactorEnabled"].(string); ok && s != "" { + if b, err := strconv.ParseBool(s); err == nil { + user["RAX-AUTH:multiFactorEnabled"] = b + } + } + + err = mapstructure.Decode(json, &respStruct) + + return respStruct.User, err +} + +// Extract will get the Snapshot object out of the GetResult object. +func (r GetResult) Extract() (*User, error) { + return commonExtract(r.Body, r.Err) +} + +// Extract will get the Snapshot object out of the CreateResult object. +func (r CreateResult) Extract() (*User, error) { + return commonExtract(r.Body, r.Err) +} + +// Extract will get the Snapshot object out of the UpdateResult object. +func (r UpdateResult) Extract() (*User, error) { + return commonExtract(r.Body, r.Err) +} + +// ResetAPIKeyResult represents the server response to the ResetAPIKey method. +type ResetAPIKeyResult struct { + gophercloud.Result +} + +// ResetAPIKeyValue represents an API Key that has been reset. +type ResetAPIKeyValue struct { + // The Username for this API Key reset. + Username string `mapstructure:"username"` + + // The new API Key for this user. + APIKey string `mapstructure:"apiKey"` +} + +// Extract will get the Error or ResetAPIKeyValue object out of the ResetAPIKeyResult object. +func (r ResetAPIKeyResult) Extract() (*ResetAPIKeyValue, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + ResetAPIKeyValue ResetAPIKeyValue `mapstructure:"RAX-KSKEY:apiKeyCredentials"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.ResetAPIKeyValue, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/urls.go new file mode 100644 index 000000000..bc1aaefb0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/users/urls.go @@ -0,0 +1,7 @@ +package users + +import "github.com/rackspace/gophercloud" + +func resetAPIKeyURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("users", id, "OS-KSADM", "credentials", "RAX-KSKEY:apiKeyCredentials", "RAX-AUTH", "reset") +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/doc.go new file mode 100644 index 000000000..42325fe83 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/doc.go @@ -0,0 +1,12 @@ +/* +Package acl provides information and interaction with the access lists feature +of the Rackspace Cloud Load Balancer service. + +The access list management feature allows fine-grained network access controls +to be applied to the load balancer's virtual IP address. A single IP address, +multiple IP addresses, or entire network subnets can be added. Items that are +configured with the ALLOW type always takes precedence over items with the DENY +type. To reject traffic from all items except for those with the ALLOW type, +add a networkItem with an address of "0.0.0.0/0" and a DENY type. +*/ +package acl diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/fixtures.go new file mode 100644 index 000000000..e3c941c81 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/fixtures.go @@ -0,0 +1,109 @@ +package acl + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func _rootURL(lbID int) string { + return "/loadbalancers/" + strconv.Itoa(lbID) + "/accesslist" +} + +func mockListResponse(t *testing.T, id int) { + th.Mux.HandleFunc(_rootURL(id), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "accessList": [ + { + "address": "206.160.163.21", + "id": 21, + "type": "DENY" + }, + { + "address": "206.160.163.22", + "id": 22, + "type": "DENY" + }, + { + "address": "206.160.163.23", + "id": 23, + "type": "DENY" + }, + { + "address": "206.160.163.24", + "id": 24, + "type": "DENY" + } + ] +} + `) + }) +} + +func mockCreateResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "accessList": [ + { + "address": "206.160.163.21", + "type": "DENY" + }, + { + "address": "206.160.165.11", + "type": "DENY" + } + ] +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDeleteAllResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockBatchDeleteResponse(t *testing.T, lbID int, ids []int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + r.ParseForm() + + for k, v := range ids { + fids := r.Form["id"] + th.AssertEquals(t, strconv.Itoa(v), fids[k]) + } + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDeleteResponse(t *testing.T, lbID, networkID int) { + th.Mux.HandleFunc(_rootURL(lbID)+"/"+strconv.Itoa(networkID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go new file mode 100644 index 000000000..d4ce7c01f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go @@ -0,0 +1,111 @@ +package acl + +import ( + "errors" + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List is the operation responsible for returning a paginated collection of +// network items that define a load balancer's access list. +func List(client *gophercloud.ServiceClient, lbID int) pagination.Pager { + url := rootURL(client, lbID) + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AccessListPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder is the interface responsible for generating the JSON +// for a Create operation. +type CreateOptsBuilder interface { + ToAccessListCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is a slice of CreateOpt structs, that allow the user to create +// multiple nodes in a single operation (one node per CreateOpt). +type CreateOpts []CreateOpt + +// CreateOpt represents the options to create a single node. +type CreateOpt struct { + // Required - the IP address or CIDR for item to add to access list. + Address string + + // Required - the type of the node. Either ALLOW or DENY. + Type Type +} + +// ToAccessListCreateMap converts a slice of options into a map that can be +// used for the JSON. +func (opts CreateOpts) ToAccessListCreateMap() (map[string]interface{}, error) { + type itemMap map[string]interface{} + items := []itemMap{} + + for k, v := range opts { + if v.Address == "" { + return itemMap{}, fmt.Errorf("Address is a required attribute, none provided for %d CreateOpt element", k) + } + if v.Type != ALLOW && v.Type != DENY { + return itemMap{}, fmt.Errorf("Type must be ALLOW or DENY") + } + + item := make(itemMap) + item["address"] = v.Address + item["type"] = v.Type + + items = append(items, item) + } + + return itemMap{"accessList": items}, nil +} + +// Create is the operation responsible for adding network items to the access +// rules for a particular load balancer. If network items already exist, the +// new item will be appended. A single IP address or subnet range is considered +// unique and cannot be duplicated. +func Create(client *gophercloud.ServiceClient, loadBalancerID int, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToAccessListCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(rootURL(client, loadBalancerID), reqBody, nil, nil) + return res +} + +// BulkDelete will delete multiple network items from a load balancer's access +// list in a single operation. +func BulkDelete(c *gophercloud.ServiceClient, loadBalancerID int, itemIDs []int) DeleteResult { + var res DeleteResult + + if len(itemIDs) > 10 || len(itemIDs) == 0 { + res.Err = errors.New("You must provide a minimum of 1 and a maximum of 10 item IDs") + return res + } + + url := rootURL(c, loadBalancerID) + url += gophercloud.IDSliceToQueryString("id", itemIDs) + + _, res.Err = c.Delete(url, nil) + return res +} + +// Delete will remove a single network item from a load balancer's access list. +func Delete(c *gophercloud.ServiceClient, lbID, itemID int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, lbID, itemID), nil) + return res +} + +// DeleteAll will delete the entire contents of a load balancer's access list, +// effectively resetting it and allowing all traffic. +func DeleteAll(c *gophercloud.ServiceClient, lbID int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(rootURL(c, lbID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests_test.go new file mode 100644 index 000000000..c4961a3dd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests_test.go @@ -0,0 +1,91 @@ +package acl + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const ( + lbID = 12345 + itemID1 = 67890 + itemID2 = 67891 +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListResponse(t, lbID) + + count := 0 + + err := List(client.ServiceClient(), lbID).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractAccessList(page) + th.AssertNoErr(t, err) + + expected := AccessList{ + NetworkItem{Address: "206.160.163.21", ID: 21, Type: DENY}, + NetworkItem{Address: "206.160.163.22", ID: 22, Type: DENY}, + NetworkItem{Address: "206.160.163.23", ID: 23, Type: DENY}, + NetworkItem{Address: "206.160.163.24", ID: 24, Type: DENY}, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateResponse(t, lbID) + + opts := CreateOpts{ + CreateOpt{Address: "206.160.163.21", Type: DENY}, + CreateOpt{Address: "206.160.165.11", Type: DENY}, + } + + err := Create(client.ServiceClient(), lbID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestBulkDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + ids := []int{itemID1, itemID2} + + mockBatchDeleteResponse(t, lbID, ids) + + err := BulkDelete(client.ServiceClient(), lbID, ids).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteResponse(t, lbID, itemID1) + + err := Delete(client.ServiceClient(), lbID, itemID1).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDeleteAll(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteAllResponse(t, lbID) + + err := DeleteAll(client.ServiceClient(), lbID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/results.go new file mode 100644 index 000000000..9ea5ea2f4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/results.go @@ -0,0 +1,72 @@ +package acl + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// AccessList represents the rules of network access to a particular load +// balancer. +type AccessList []NetworkItem + +// NetworkItem describes how an IP address or entire subnet may interact with a +// load balancer. +type NetworkItem struct { + // The IP address or subnet (CIDR) that defines the network item. + Address string + + // The numeric unique ID for this item. + ID int + + // Either ALLOW or DENY. + Type Type +} + +// Type defines how an item may connect to the load balancer. +type Type string + +// Convenience consts. +const ( + ALLOW Type = "ALLOW" + DENY Type = "DENY" +) + +// AccessListPage is the page returned by a pager for traversing over a +// collection of network items in an access list. +type AccessListPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an AccessListPage struct is empty. +func (p AccessListPage) IsEmpty() (bool, error) { + is, err := ExtractAccessList(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractAccessList accepts a Page struct, specifically an AccessListPage +// struct, and extracts the elements into a slice of NetworkItem structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractAccessList(page pagination.Page) (AccessList, error) { + var resp struct { + List AccessList `mapstructure:"accessList" json:"accessList"` + } + + err := mapstructure.Decode(page.(AccessListPage).Body, &resp) + + return resp.List, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/urls.go new file mode 100644 index 000000000..e373fa1d8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/urls.go @@ -0,0 +1,20 @@ +package acl + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + path = "loadbalancers" + aclPath = "accesslist" +) + +func resourceURL(c *gophercloud.ServiceClient, lbID, networkID int) string { + return c.ServiceURL(path, strconv.Itoa(lbID), aclPath, strconv.Itoa(networkID)) +} + +func rootURL(c *gophercloud.ServiceClient, lbID int) string { + return c.ServiceURL(path, strconv.Itoa(lbID), aclPath) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/doc.go new file mode 100644 index 000000000..05f003285 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/doc.go @@ -0,0 +1,44 @@ +/* +Package lbs provides information and interaction with the Load Balancer API +resource for the Rackspace Cloud Load Balancer service. + +A load balancer is a logical device which belongs to a cloud account. It is +used to distribute workloads between multiple back-end systems or services, +based on the criteria defined as part of its configuration. This configuration +is defined using the Create operation, and can be updated with Update. + +To conserve IPv4 address space, it is highly recommended that you share Virtual +IPs between load balancers. If you have at least one load balancer, you may +create subsequent ones that share a single virtual IPv4 and/or a single IPv6 by +passing in a virtual IP ID to the Update operation (instead of a type). This +feature is also highly desirable if you wish to load balance both an insecure +and secure protocol using one IP or DNS name. In order to share a virtual IP, +each Load Balancer must utilize a unique port. + +All load balancers have a Status attribute that shows the current configuration +status of the device. This status is immutable by the caller and is updated +automatically based on state changes within the service. When a load balancer +is first created, it is placed into a BUILD state while the configuration is +being generated and applied based on the request. Once the configuration is +applied and finalized, it is in an ACTIVE status. In the event of a +configuration change or update, the status of the load balancer changes to +PENDING_UPDATE to signify configuration changes are in progress but have not yet +been finalized. Load balancers in a SUSPENDED status are configured to reject +traffic and do not forward requests to back-end nodes. + +An HTTP load balancer has the X-Forwarded-For (XFF) HTTP header set by default. +This header contains the originating IP address of a client connecting to a web +server through an HTTP proxy or load balancer, which many web applications are +already designed to use when determining the source address for a request. + +It also includes the X-Forwarded-Proto (XFP) HTTP header, which has been added +for identifying the originating protocol of an HTTP request as "http" or +"https" depending on which protocol the client requested. This is useful when +using SSL termination. + +Finally, it also includes the X-Forwarded-Port HTTP header, which has been +added for being able to generate secure URLs containing the specified port. +This header, along with the X-Forwarded-For header, provides the needed +information to the underlying application servers. +*/ +package lbs diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/fixtures.go new file mode 100644 index 000000000..6325310db --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/fixtures.go @@ -0,0 +1,584 @@ +package lbs + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func mockListLBResponse(t *testing.T) { + th.Mux.HandleFunc("/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "loadBalancers":[ + { + "name":"lb-site1", + "id":71, + "protocol":"HTTP", + "port":80, + "algorithm":"RANDOM", + "status":"ACTIVE", + "nodeCount":3, + "virtualIps":[ + { + "id":403, + "address":"206.55.130.1", + "type":"PUBLIC", + "ipVersion":"IPV4" + } + ], + "created":{ + "time":"2010-11-30T03:23:42Z" + }, + "updated":{ + "time":"2010-11-30T03:23:44Z" + } + }, + { + "name":"lb-site2", + "id":72, + "created":{ + "time":"2011-11-30T03:23:42Z" + }, + "updated":{ + "time":"2011-11-30T03:23:44Z" + } + }, + { + "name":"lb-site3", + "id":73, + "created":{ + "time":"2012-11-30T03:23:42Z" + }, + "updated":{ + "time":"2012-11-30T03:23:44Z" + } + } + ] +} + `) + }) +} + +func mockCreateLBResponse(t *testing.T) { + th.Mux.HandleFunc("/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "loadBalancer": { + "name": "a-new-loadbalancer", + "port": 80, + "protocol": "HTTP", + "virtualIps": [ + { + "id": 2341 + }, + { + "id": 900001 + } + ], + "nodes": [ + { + "address": "10.1.1.1", + "port": 80, + "condition": "ENABLED" + } + ] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "loadBalancer": { + "name": "a-new-loadbalancer", + "id": 144, + "protocol": "HTTP", + "halfClosed": false, + "port": 83, + "algorithm": "RANDOM", + "status": "BUILD", + "timeout": 30, + "cluster": { + "name": "ztm-n01.staging1.lbaas.rackspace.net" + }, + "nodes": [ + { + "address": "10.1.1.1", + "id": 653, + "port": 80, + "status": "ONLINE", + "condition": "ENABLED", + "weight": 1 + } + ], + "virtualIps": [ + { + "address": "206.10.10.210", + "id": 39, + "type": "PUBLIC", + "ipVersion": "IPV4" + }, + { + "address": "2001:4801:79f1:0002:711b:be4c:0000:0021", + "id": 900001, + "type": "PUBLIC", + "ipVersion": "IPV6" + } + ], + "created": { + "time": "2010-11-30T03:23:42Z" + }, + "updated": { + "time": "2010-11-30T03:23:44Z" + }, + "connectionLogging": { + "enabled": false + } + } +} + `) + }) +} + +func mockBatchDeleteLBResponse(t *testing.T, ids []int) { + th.Mux.HandleFunc("/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + r.ParseForm() + + for k, v := range ids { + fids := r.Form["id"] + th.AssertEquals(t, strconv.Itoa(v), fids[k]) + } + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDeleteLBResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockGetLBResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "loadBalancer": { + "id": 2000, + "name": "sample-loadbalancer", + "protocol": "HTTP", + "port": 80, + "algorithm": "RANDOM", + "status": "ACTIVE", + "timeout": 30, + "connectionLogging": { + "enabled": true + }, + "virtualIps": [ + { + "id": 1000, + "address": "206.10.10.210", + "type": "PUBLIC", + "ipVersion": "IPV4" + } + ], + "nodes": [ + { + "id": 1041, + "address": "10.1.1.1", + "port": 80, + "condition": "ENABLED", + "status": "ONLINE" + }, + { + "id": 1411, + "address": "10.1.1.2", + "port": 80, + "condition": "ENABLED", + "status": "ONLINE" + } + ], + "sessionPersistence": { + "persistenceType": "HTTP_COOKIE" + }, + "connectionThrottle": { + "maxConnections": 100 + }, + "cluster": { + "name": "c1.dfw1" + }, + "created": { + "time": "2010-11-30T03:23:42Z" + }, + "updated": { + "time": "2010-11-30T03:23:44Z" + }, + "sourceAddresses": { + "ipv6Public": "2001:4801:79f1:1::1/64", + "ipv4Servicenet": "10.0.0.0", + "ipv4Public": "10.12.99.28" + } + } +} + `) + }) +} + +func mockUpdateLBResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "loadBalancer": { + "name": "a-new-loadbalancer", + "protocol": "TCP", + "halfClosed": true, + "algorithm": "RANDOM", + "port": 8080, + "timeout": 100, + "httpsRedirect": false + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockListProtocolsResponse(t *testing.T) { + th.Mux.HandleFunc("/loadbalancers/protocols", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "protocols": [ + { + "name": "DNS_TCP", + "port": 53 + }, + { + "name": "DNS_UDP", + "port": 53 + }, + { + "name": "FTP", + "port": 21 + }, + { + "name": "HTTP", + "port": 80 + }, + { + "name": "HTTPS", + "port": 443 + }, + { + "name": "IMAPS", + "port": 993 + }, + { + "name": "IMAPv4", + "port": 143 + }, + { + "name": "LDAP", + "port": 389 + }, + { + "name": "LDAPS", + "port": 636 + }, + { + "name": "MYSQL", + "port": 3306 + }, + { + "name": "POP3", + "port": 110 + }, + { + "name": "POP3S", + "port": 995 + }, + { + "name": "SMTP", + "port": 25 + }, + { + "name": "TCP", + "port": 0 + }, + { + "name": "TCP_CLIENT_FIRST", + "port": 0 + }, + { + "name": "UDP", + "port": 0 + }, + { + "name": "UDP_STREAM", + "port": 0 + }, + { + "name": "SFTP", + "port": 22 + }, + { + "name": "TCP_STREAM", + "port": 0 + } + ] +} + `) + }) +} + +func mockListAlgorithmsResponse(t *testing.T) { + th.Mux.HandleFunc("/loadbalancers/algorithms", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "algorithms": [ + { + "name": "LEAST_CONNECTIONS" + }, + { + "name": "RANDOM" + }, + { + "name": "ROUND_ROBIN" + }, + { + "name": "WEIGHTED_LEAST_CONNECTIONS" + }, + { + "name": "WEIGHTED_ROUND_ROBIN" + } + ] +} + `) + }) +} + +func mockGetLoggingResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/connectionlogging", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "connectionLogging": { + "enabled": true + } +} + `) + }) +} + +func mockEnableLoggingResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/connectionlogging", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "connectionLogging":{ + "enabled":true + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDisableLoggingResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/connectionlogging", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "connectionLogging":{ + "enabled":false + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockGetErrorPageResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/errorpage", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "errorpage": { + "content": "DEFAULT ERROR PAGE" + } +} + `) + }) +} + +func mockSetErrorPageResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/errorpage", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "errorpage": { + "content": "New error page" + } +} + `) + + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "errorpage": { + "content": "New error page" + } +} + `) + }) +} + +func mockDeleteErrorPageResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/errorpage", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + }) +} + +func mockGetStatsResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/stats", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "connectTimeOut": 10, + "connectError": 20, + "connectFailure": 30, + "dataTimedOut": 40, + "keepAliveTimedOut": 50, + "maxConn": 60, + "currentConn": 40, + "connectTimeOutSsl": 10, + "connectErrorSsl": 20, + "connectFailureSsl": 30, + "dataTimedOutSsl": 40, + "keepAliveTimedOutSsl": 50, + "maxConnSsl": 60, + "currentConnSsl": 40 +} + `) + }) +} + +func mockGetCachingResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/contentcaching", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "contentCaching": { + "enabled": true + } +} + `) + }) +} + +func mockEnableCachingResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/contentcaching", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "contentCaching":{ + "enabled":true + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDisableCachingResponse(t *testing.T, id int) { + th.Mux.HandleFunc("/loadbalancers/"+strconv.Itoa(id)+"/contentcaching", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "contentCaching":{ + "enabled":false + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go new file mode 100644 index 000000000..46f5f02a4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go @@ -0,0 +1,497 @@ +package lbs + +import ( + "errors" + + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/lb/v1/acl" + "github.com/rackspace/gophercloud/rackspace/lb/v1/monitors" + "github.com/rackspace/gophercloud/rackspace/lb/v1/nodes" + "github.com/rackspace/gophercloud/rackspace/lb/v1/sessions" + "github.com/rackspace/gophercloud/rackspace/lb/v1/throttle" + "github.com/rackspace/gophercloud/rackspace/lb/v1/vips" +) + +var ( + errNameRequired = errors.New("Name is a required attribute") + errTimeoutExceeded = errors.New("Timeout must be less than 120") +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToLBListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. +type ListOpts struct { + ChangesSince string `q:"changes-since"` + Status Status `q:"status"` + NodeAddr string `q:"nodeaddress"` + Marker string `q:"marker"` + Limit int `q:"limit"` +} + +// ToLBListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToLBListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List is the operation responsible for returning a paginated collection of +// load balancers. You may pass in a ListOpts struct to filter results. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(client) + if opts != nil { + query, err := opts.ToLBListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return LBPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToLBCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Required - name of the load balancer to create. The name must be 128 + // characters or fewer in length, and all UTF-8 characters are valid. + Name string + + // Optional - nodes to be added. + Nodes []nodes.Node + + // Required - protocol of the service that is being load balanced. + // See http://docs.rackspace.com/loadbalancers/api/v1.0/clb-devguide/content/protocols.html + // for a full list of supported protocols. + Protocol string + + // Optional - enables or disables Half-Closed support for the load balancer. + // Half-Closed support provides the ability for one end of the connection to + // terminate its output, while still receiving data from the other end. Only + // available for TCP/TCP_CLIENT_FIRST protocols. + HalfClosed gophercloud.EnabledState + + // Optional - the type of virtual IPs you want associated with the load + // balancer. + VIPs []vips.VIP + + // Optional - the access list management feature allows fine-grained network + // access controls to be applied to the load balancer virtual IP address. + AccessList *acl.AccessList + + // Optional - algorithm that defines how traffic should be directed between + // back-end nodes. + Algorithm string + + // Optional - current connection logging configuration. + ConnectionLogging *ConnectionLogging + + // Optional - specifies a limit on the number of connections per IP address + // to help mitigate malicious or abusive traffic to your applications. + ConnThrottle *throttle.ConnectionThrottle + + // Optional - the type of health monitor check to perform to ensure that the + // service is performing properly. + HealthMonitor *monitors.Monitor + + // Optional - arbitrary information that can be associated with each LB. + Metadata map[string]interface{} + + // Optional - port number for the service you are load balancing. + Port int + + // Optional - the timeout value for the load balancer and communications with + // its nodes. Defaults to 30 seconds with a maximum of 120 seconds. + Timeout int + + // Optional - specifies whether multiple requests from clients are directed + // to the same node. + SessionPersistence *sessions.SessionPersistence + + // Optional - enables or disables HTTP to HTTPS redirection for the load + // balancer. When enabled, any HTTP request returns status code 301 (Moved + // Permanently), and the requester is redirected to the requested URL via the + // HTTPS protocol on port 443. For example, http://example.com/page.html + // would be redirected to https://example.com/page.html. Only available for + // HTTPS protocol (port=443), or HTTP protocol with a properly configured SSL + // termination (secureTrafficOnly=true, securePort=443). + HTTPSRedirect gophercloud.EnabledState +} + +// ToLBCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToLBCreateMap() (map[string]interface{}, error) { + lb := make(map[string]interface{}) + + if opts.Name == "" { + return lb, errNameRequired + } + if opts.Timeout > 120 { + return lb, errTimeoutExceeded + } + + lb["name"] = opts.Name + + if len(opts.Nodes) > 0 { + nodes := []map[string]interface{}{} + for _, n := range opts.Nodes { + nodes = append(nodes, map[string]interface{}{ + "address": n.Address, + "port": n.Port, + "condition": n.Condition, + }) + } + lb["nodes"] = nodes + } + + if opts.Protocol != "" { + lb["protocol"] = opts.Protocol + } + if opts.HalfClosed != nil { + lb["halfClosed"] = opts.HalfClosed + } + if len(opts.VIPs) > 0 { + lb["virtualIps"] = opts.VIPs + } + if opts.AccessList != nil { + lb["accessList"] = &opts.AccessList + } + if opts.Algorithm != "" { + lb["algorithm"] = opts.Algorithm + } + if opts.ConnectionLogging != nil { + lb["connectionLogging"] = &opts.ConnectionLogging + } + if opts.ConnThrottle != nil { + lb["connectionThrottle"] = &opts.ConnThrottle + } + if opts.HealthMonitor != nil { + lb["healthMonitor"] = &opts.HealthMonitor + } + if len(opts.Metadata) != 0 { + lb["metadata"] = opts.Metadata + } + if opts.Port > 0 { + lb["port"] = opts.Port + } + if opts.Timeout > 0 { + lb["timeout"] = opts.Timeout + } + if opts.SessionPersistence != nil { + lb["sessionPersistence"] = &opts.SessionPersistence + } + if opts.HTTPSRedirect != nil { + lb["httpsRedirect"] = &opts.HTTPSRedirect + } + + return map[string]interface{}{"loadBalancer": lb}, nil +} + +// Create is the operation responsible for asynchronously provisioning a new +// load balancer based on the configuration defined in CreateOpts. Once the +// request is validated and progress has started on the provisioning process, a +// response struct is returned. When extracted (with Extract()), you have +// to the load balancer's unique ID and status. +// +// Once an ID is attained, you can check on the progress of the operation by +// calling Get and passing in the ID. If the corresponding request cannot be +// fulfilled due to insufficient or invalid data, an HTTP 400 (Bad Request) +// error response is returned with information regarding the nature of the +// failure in the body of the response. Failures in the validation process are +// non-recoverable and require the caller to correct the cause of the failure. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToLBCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) + return res +} + +// Get is the operation responsible for providing detailed information +// regarding a specific load balancer which is configured and associated with +// your account. This operation is not capable of returning details for a load +// balancer which has been deleted. +func Get(c *gophercloud.ServiceClient, id int) GetResult { + var res GetResult + + _, res.Err = c.Get(resourceURL(c, id), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} + +// BulkDelete removes all the load balancers referenced in the slice of IDs. +// Any and all configuration data associated with these load balancers is +// immediately purged and is not recoverable. +// +// If one of the items in the list cannot be removed due to its current status, +// a 400 Bad Request error is returned along with the IDs of the ones the +// system identified as potential failures for this request. +func BulkDelete(c *gophercloud.ServiceClient, ids []int) DeleteResult { + var res DeleteResult + + if len(ids) > 10 || len(ids) == 0 { + res.Err = errors.New("You must provide a minimum of 1 and a maximum of 10 LB IDs") + return res + } + + url := rootURL(c) + url += gophercloud.IDSliceToQueryString("id", ids) + + _, res.Err = c.Delete(url, nil) + return res +} + +// Delete removes a single load balancer. +func Delete(c *gophercloud.ServiceClient, id int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, id), nil) + return res +} + +// UpdateOptsBuilder represents a type that can be converted into a JSON-like +// map structure. +type UpdateOptsBuilder interface { + ToLBUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the options for updating an existing load balancer. +type UpdateOpts struct { + // Optional - new name of the load balancer. + Name string + + // Optional - the new protocol you want your load balancer to have. + // See http://docs.rackspace.com/loadbalancers/api/v1.0/clb-devguide/content/protocols.html + // for a full list of supported protocols. + Protocol string + + // Optional - see the HalfClosed field in CreateOpts for more information. + HalfClosed gophercloud.EnabledState + + // Optional - see the Algorithm field in CreateOpts for more information. + Algorithm string + + // Optional - see the Port field in CreateOpts for more information. + Port int + + // Optional - see the Timeout field in CreateOpts for more information. + Timeout int + + // Optional - see the HTTPSRedirect field in CreateOpts for more information. + HTTPSRedirect gophercloud.EnabledState +} + +// ToLBUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToLBUpdateMap() (map[string]interface{}, error) { + lb := make(map[string]interface{}) + + if opts.Name != "" { + lb["name"] = opts.Name + } + if opts.Protocol != "" { + lb["protocol"] = opts.Protocol + } + if opts.HalfClosed != nil { + lb["halfClosed"] = opts.HalfClosed + } + if opts.Algorithm != "" { + lb["algorithm"] = opts.Algorithm + } + if opts.Port > 0 { + lb["port"] = opts.Port + } + if opts.Timeout > 0 { + lb["timeout"] = opts.Timeout + } + if opts.HTTPSRedirect != nil { + lb["httpsRedirect"] = &opts.HTTPSRedirect + } + + return map[string]interface{}{"loadBalancer": lb}, nil +} + +// Update is the operation responsible for asynchronously updating the +// attributes of a specific load balancer. Upon successful validation of the +// request, the service returns a 202 Accepted response, and the load balancer +// enters a PENDING_UPDATE state. A user can poll the load balancer with Get to +// wait for the changes to be applied. When this happens, the load balancer will +// return to an ACTIVE state. +func Update(c *gophercloud.ServiceClient, id int, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToLBUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(resourceURL(c, id), reqBody, nil, nil) + return res +} + +// ListProtocols is the operation responsible for returning a paginated +// collection of load balancer protocols. +func ListProtocols(client *gophercloud.ServiceClient) pagination.Pager { + url := protocolsURL(client) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ProtocolPage{pagination.SinglePageBase(r)} + }) +} + +// ListAlgorithms is the operation responsible for returning a paginated +// collection of load balancer algorithms. +func ListAlgorithms(client *gophercloud.ServiceClient) pagination.Pager { + url := algorithmsURL(client) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AlgorithmPage{pagination.SinglePageBase(r)} + }) +} + +// IsLoggingEnabled returns true if the load balancer has connection logging +// enabled and false if not. +func IsLoggingEnabled(client *gophercloud.ServiceClient, id int) (bool, error) { + var body interface{} + + _, err := client.Get(loggingURL(client, id), &body, nil) + if err != nil { + return false, err + } + + var resp struct { + CL struct { + Enabled bool `mapstructure:"enabled"` + } `mapstructure:"connectionLogging"` + } + + err = mapstructure.Decode(body, &resp) + return resp.CL.Enabled, err +} + +func toConnLoggingMap(state bool) map[string]map[string]bool { + return map[string]map[string]bool{ + "connectionLogging": map[string]bool{"enabled": state}, + } +} + +// EnableLogging will enable connection logging for a specified load balancer. +func EnableLogging(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { + var res gophercloud.ErrResult + _, res.Err = client.Put(loggingURL(client, id), toConnLoggingMap(true), nil, nil) + return res +} + +// DisableLogging will disable connection logging for a specified load balancer. +func DisableLogging(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { + var res gophercloud.ErrResult + _, res.Err = client.Put(loggingURL(client, id), toConnLoggingMap(false), nil, nil) + return res +} + +// GetErrorPage will retrieve the current error page for the load balancer. +func GetErrorPage(client *gophercloud.ServiceClient, id int) ErrorPageResult { + var res ErrorPageResult + _, res.Err = client.Get(errorPageURL(client, id), &res.Body, nil) + return res +} + +// SetErrorPage will set the HTML of the load balancer's error page to a +// specific value. +func SetErrorPage(client *gophercloud.ServiceClient, id int, html string) ErrorPageResult { + var res ErrorPageResult + + type stringMap map[string]string + reqBody := map[string]stringMap{"errorpage": stringMap{"content": html}} + + _, res.Err = client.Put(errorPageURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} + +// DeleteErrorPage will delete the current error page for the load balancer. +func DeleteErrorPage(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { + var res gophercloud.ErrResult + _, res.Err = client.Delete(errorPageURL(client, id), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// GetStats will retrieve detailed stats related to the load balancer's usage. +func GetStats(client *gophercloud.ServiceClient, id int) StatsResult { + var res StatsResult + _, res.Err = client.Get(statsURL(client, id), &res.Body, nil) + return res +} + +// IsContentCached will check to see whether the specified load balancer caches +// content. When content caching is enabled, recently-accessed files are stored +// on the load balancer for easy retrieval by web clients. Content caching +// improves the performance of high traffic web sites by temporarily storing +// data that was recently accessed. While it's cached, requests for that data +// are served by the load balancer, which in turn reduces load off the back-end +// nodes. The result is improved response times for those requests and less +// load on the web server. +func IsContentCached(client *gophercloud.ServiceClient, id int) (bool, error) { + var body interface{} + + _, err := client.Get(cacheURL(client, id), &body, nil) + if err != nil { + return false, err + } + + var resp struct { + CC struct { + Enabled bool `mapstructure:"enabled"` + } `mapstructure:"contentCaching"` + } + + err = mapstructure.Decode(body, &resp) + return resp.CC.Enabled, err +} + +func toCachingMap(state bool) map[string]map[string]bool { + return map[string]map[string]bool{ + "contentCaching": map[string]bool{"enabled": state}, + } +} + +// EnableCaching will enable content-caching for the specified load balancer. +func EnableCaching(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { + var res gophercloud.ErrResult + _, res.Err = client.Put(cacheURL(client, id), toCachingMap(true), nil, nil) + return res +} + +// DisableCaching will disable content-caching for the specified load balancer. +func DisableCaching(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { + var res gophercloud.ErrResult + _, res.Err = client.Put(cacheURL(client, id), toCachingMap(false), nil, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests_test.go new file mode 100644 index 000000000..a8ec19e07 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests_test.go @@ -0,0 +1,438 @@ +package lbs + +import ( + "testing" + "time" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/lb/v1/nodes" + "github.com/rackspace/gophercloud/rackspace/lb/v1/sessions" + "github.com/rackspace/gophercloud/rackspace/lb/v1/throttle" + "github.com/rackspace/gophercloud/rackspace/lb/v1/vips" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const ( + id1 = 12345 + id2 = 67890 + ts1 = "2010-11-30T03:23:42Z" + ts2 = "2010-11-30T03:23:44Z" +) + +func toTime(t *testing.T, str string) time.Time { + ts, err := time.Parse(time.RFC3339, str) + if err != nil { + t.Fatalf("Could not parse time: %s", err.Error()) + } + return ts +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListLBResponse(t) + + count := 0 + + err := List(client.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractLBs(page) + th.AssertNoErr(t, err) + + expected := []LoadBalancer{ + LoadBalancer{ + Name: "lb-site1", + ID: 71, + Protocol: "HTTP", + Port: 80, + Algorithm: "RANDOM", + Status: ACTIVE, + NodeCount: 3, + VIPs: []vips.VIP{ + vips.VIP{ + ID: 403, + Address: "206.55.130.1", + Type: "PUBLIC", + Version: "IPV4", + }, + }, + Created: Datetime{Time: toTime(t, ts1)}, + Updated: Datetime{Time: toTime(t, ts2)}, + }, + LoadBalancer{ + ID: 72, + Name: "lb-site2", + Created: Datetime{Time: toTime(t, "2011-11-30T03:23:42Z")}, + Updated: Datetime{Time: toTime(t, "2011-11-30T03:23:44Z")}, + }, + LoadBalancer{ + ID: 73, + Name: "lb-site3", + Created: Datetime{Time: toTime(t, "2012-11-30T03:23:42Z")}, + Updated: Datetime{Time: toTime(t, "2012-11-30T03:23:44Z")}, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateLBResponse(t) + + opts := CreateOpts{ + Name: "a-new-loadbalancer", + Port: 80, + Protocol: "HTTP", + VIPs: []vips.VIP{ + vips.VIP{ID: 2341}, + vips.VIP{ID: 900001}, + }, + Nodes: []nodes.Node{ + nodes.Node{Address: "10.1.1.1", Port: 80, Condition: "ENABLED"}, + }, + } + + lb, err := Create(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &LoadBalancer{ + Name: "a-new-loadbalancer", + ID: 144, + Protocol: "HTTP", + HalfClosed: false, + Port: 83, + Algorithm: "RANDOM", + Status: BUILD, + Timeout: 30, + Cluster: Cluster{Name: "ztm-n01.staging1.lbaas.rackspace.net"}, + Nodes: []nodes.Node{ + nodes.Node{ + Address: "10.1.1.1", + ID: 653, + Port: 80, + Status: "ONLINE", + Condition: "ENABLED", + Weight: 1, + }, + }, + VIPs: []vips.VIP{ + vips.VIP{ + ID: 39, + Address: "206.10.10.210", + Type: vips.PUBLIC, + Version: vips.IPV4, + }, + vips.VIP{ + ID: 900001, + Address: "2001:4801:79f1:0002:711b:be4c:0000:0021", + Type: vips.PUBLIC, + Version: vips.IPV6, + }, + }, + Created: Datetime{Time: toTime(t, ts1)}, + Updated: Datetime{Time: toTime(t, ts2)}, + ConnectionLogging: ConnectionLogging{Enabled: false}, + } + + th.AssertDeepEquals(t, expected, lb) +} + +func TestBulkDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + ids := []int{id1, id2} + + mockBatchDeleteLBResponse(t, ids) + + err := BulkDelete(client.ServiceClient(), ids).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteLBResponse(t, id1) + + err := Delete(client.ServiceClient(), id1).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetLBResponse(t, id1) + + lb, err := Get(client.ServiceClient(), id1).Extract() + + expected := &LoadBalancer{ + Name: "sample-loadbalancer", + ID: 2000, + Protocol: "HTTP", + Port: 80, + Algorithm: "RANDOM", + Status: ACTIVE, + Timeout: 30, + ConnectionLogging: ConnectionLogging{Enabled: true}, + VIPs: []vips.VIP{ + vips.VIP{ + ID: 1000, + Address: "206.10.10.210", + Type: "PUBLIC", + Version: "IPV4", + }, + }, + Nodes: []nodes.Node{ + nodes.Node{ + Address: "10.1.1.1", + ID: 1041, + Port: 80, + Status: "ONLINE", + Condition: "ENABLED", + }, + nodes.Node{ + Address: "10.1.1.2", + ID: 1411, + Port: 80, + Status: "ONLINE", + Condition: "ENABLED", + }, + }, + SessionPersistence: sessions.SessionPersistence{Type: "HTTP_COOKIE"}, + ConnectionThrottle: throttle.ConnectionThrottle{MaxConnections: 100}, + Cluster: Cluster{Name: "c1.dfw1"}, + Created: Datetime{Time: toTime(t, ts1)}, + Updated: Datetime{Time: toTime(t, ts2)}, + SourceAddrs: SourceAddrs{ + IPv4Public: "10.12.99.28", + IPv4Private: "10.0.0.0", + IPv6Public: "2001:4801:79f1:1::1/64", + }, + } + + th.AssertDeepEquals(t, expected, lb) + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateLBResponse(t, id1) + + opts := UpdateOpts{ + Name: "a-new-loadbalancer", + Protocol: "TCP", + HalfClosed: gophercloud.Enabled, + Algorithm: "RANDOM", + Port: 8080, + Timeout: 100, + HTTPSRedirect: gophercloud.Disabled, + } + + err := Update(client.ServiceClient(), id1, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListProtocols(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListProtocolsResponse(t) + + count := 0 + + err := ListProtocols(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractProtocols(page) + th.AssertNoErr(t, err) + + expected := []Protocol{ + Protocol{Name: "DNS_TCP", Port: 53}, + Protocol{Name: "DNS_UDP", Port: 53}, + Protocol{Name: "FTP", Port: 21}, + Protocol{Name: "HTTP", Port: 80}, + Protocol{Name: "HTTPS", Port: 443}, + Protocol{Name: "IMAPS", Port: 993}, + Protocol{Name: "IMAPv4", Port: 143}, + } + + th.CheckDeepEquals(t, expected[0:7], actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestListAlgorithms(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListAlgorithmsResponse(t) + + count := 0 + + err := ListAlgorithms(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractAlgorithms(page) + th.AssertNoErr(t, err) + + expected := []Algorithm{ + Algorithm{Name: "LEAST_CONNECTIONS"}, + Algorithm{Name: "RANDOM"}, + Algorithm{Name: "ROUND_ROBIN"}, + Algorithm{Name: "WEIGHTED_LEAST_CONNECTIONS"}, + Algorithm{Name: "WEIGHTED_ROUND_ROBIN"}, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestIsLoggingEnabled(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetLoggingResponse(t, id1) + + res, err := IsLoggingEnabled(client.ServiceClient(), id1) + th.AssertNoErr(t, err) + th.AssertEquals(t, true, res) +} + +func TestEnablingLogging(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockEnableLoggingResponse(t, id1) + + err := EnableLogging(client.ServiceClient(), id1).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDisablingLogging(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDisableLoggingResponse(t, id1) + + err := DisableLogging(client.ServiceClient(), id1).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetErrorPage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetErrorPageResponse(t, id1) + + content, err := GetErrorPage(client.ServiceClient(), id1).Extract() + th.AssertNoErr(t, err) + + expected := &ErrorPage{Content: "DEFAULT ERROR PAGE"} + th.AssertDeepEquals(t, expected, content) +} + +func TestSetErrorPage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockSetErrorPageResponse(t, id1) + + html := "New error page" + content, err := SetErrorPage(client.ServiceClient(), id1, html).Extract() + th.AssertNoErr(t, err) + + expected := &ErrorPage{Content: html} + th.AssertDeepEquals(t, expected, content) +} + +func TestDeleteErrorPage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteErrorPageResponse(t, id1) + + err := DeleteErrorPage(client.ServiceClient(), id1).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetStats(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetStatsResponse(t, id1) + + content, err := GetStats(client.ServiceClient(), id1).Extract() + th.AssertNoErr(t, err) + + expected := &Stats{ + ConnectTimeout: 10, + ConnectError: 20, + ConnectFailure: 30, + DataTimedOut: 40, + KeepAliveTimedOut: 50, + MaxConnections: 60, + CurrentConnections: 40, + SSLConnectTimeout: 10, + SSLConnectError: 20, + SSLConnectFailure: 30, + SSLDataTimedOut: 40, + SSLKeepAliveTimedOut: 50, + SSLMaxConnections: 60, + SSLCurrentConnections: 40, + } + th.AssertDeepEquals(t, expected, content) +} + +func TestIsCached(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetCachingResponse(t, id1) + + res, err := IsContentCached(client.ServiceClient(), id1) + th.AssertNoErr(t, err) + th.AssertEquals(t, true, res) +} + +func TestEnablingCaching(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockEnableCachingResponse(t, id1) + + err := EnableCaching(client.ServiceClient(), id1).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDisablingCaching(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDisableCachingResponse(t, id1) + + err := DisableCaching(client.ServiceClient(), id1).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/results.go new file mode 100644 index 000000000..98f3962d7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/results.go @@ -0,0 +1,420 @@ +package lbs + +import ( + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + "github.com/rackspace/gophercloud/rackspace/lb/v1/acl" + "github.com/rackspace/gophercloud/rackspace/lb/v1/nodes" + "github.com/rackspace/gophercloud/rackspace/lb/v1/sessions" + "github.com/rackspace/gophercloud/rackspace/lb/v1/throttle" + "github.com/rackspace/gophercloud/rackspace/lb/v1/vips" +) + +// Protocol represents the network protocol which the load balancer accepts. +type Protocol struct { + // The name of the protocol, e.g. HTTP, LDAP, FTP, etc. + Name string + + // The port number for the protocol. + Port int +} + +// Algorithm defines how traffic should be directed between back-end nodes. +type Algorithm struct { + // The name of the algorithm, e.g RANDOM, ROUND_ROBIN, etc. + Name string +} + +// Status represents the potential state of a load balancer resource. +type Status string + +const ( + // ACTIVE indicates that the LB is configured properly and ready to serve + // traffic to incoming requests via the configured virtual IPs. + ACTIVE Status = "ACTIVE" + + // BUILD indicates that the LB is being provisioned for the first time and + // configuration is being applied to bring the service online. The service + // cannot yet serve incoming requests. + BUILD Status = "BUILD" + + // PENDINGUPDATE indicates that the LB is online but configuration changes + // are being applied to update the service based on a previous request. + PENDINGUPDATE Status = "PENDING_UPDATE" + + // PENDINGDELETE indicates that the LB is online but configuration changes + // are being applied to begin deletion of the service based on a previous + // request. + PENDINGDELETE Status = "PENDING_DELETE" + + // SUSPENDED indicates that the LB has been taken offline and disabled. + SUSPENDED Status = "SUSPENDED" + + // ERROR indicates that the system encountered an error when attempting to + // configure the load balancer. + ERROR Status = "ERROR" + + // DELETED indicates that the LB has been deleted. + DELETED Status = "DELETED" +) + +// Datetime represents the structure of a Created or Updated field. +type Datetime struct { + Time time.Time `mapstructure:"-"` +} + +// LoadBalancer represents a load balancer API resource. +type LoadBalancer struct { + // Human-readable name for the load balancer. + Name string + + // The unique ID for the load balancer. + ID int + + // Represents the service protocol being load balanced. See Protocol type for + // a list of accepted values. + // See http://docs.rackspace.com/loadbalancers/api/v1.0/clb-devguide/content/protocols.html + // for a full list of supported protocols. + Protocol string + + // Defines how traffic should be directed between back-end nodes. The default + // algorithm is RANDOM. See Algorithm type for a list of accepted values. + Algorithm string + + // The current status of the load balancer. + Status Status + + // The number of load balancer nodes. + NodeCount int `mapstructure:"nodeCount"` + + // Slice of virtual IPs associated with this load balancer. + VIPs []vips.VIP `mapstructure:"virtualIps"` + + // Datetime when the LB was created. + Created Datetime + + // Datetime when the LB was created. + Updated Datetime + + // Port number for the service you are load balancing. + Port int + + // HalfClosed provides the ability for one end of the connection to + // terminate its output while still receiving data from the other end. This + // is only available on TCP/TCP_CLIENT_FIRST protocols. + HalfClosed bool + + // Timeout represents the timeout value between a load balancer and its + // nodes. Defaults to 30 seconds with a maximum of 120 seconds. + Timeout int + + // The cluster name. + Cluster Cluster + + // Nodes shows all the back-end nodes which are associated with the load + // balancer. These are the devices which are delivered traffic. + Nodes []nodes.Node + + // Current connection logging configuration. + ConnectionLogging ConnectionLogging + + // SessionPersistence specifies whether multiple requests from clients are + // directed to the same node. + SessionPersistence sessions.SessionPersistence + + // ConnectionThrottle specifies a limit on the number of connections per IP + // address to help mitigate malicious or abusive traffic to your applications. + ConnectionThrottle throttle.ConnectionThrottle + + // The source public and private IP addresses. + SourceAddrs SourceAddrs `mapstructure:"sourceAddresses"` + + // Represents the access rules for this particular load balancer. IP addresses + // or subnet ranges, depending on their type (ALLOW or DENY), can be permitted + // or blocked. + AccessList acl.AccessList +} + +// SourceAddrs represents the source public and private IP addresses. +type SourceAddrs struct { + IPv4Public string `json:"ipv4Public" mapstructure:"ipv4Public"` + IPv4Private string `json:"ipv4Servicenet" mapstructure:"ipv4Servicenet"` + IPv6Public string `json:"ipv6Public" mapstructure:"ipv6Public"` + IPv6Private string `json:"ipv6Servicenet" mapstructure:"ipv6Servicenet"` +} + +// ConnectionLogging - temp +type ConnectionLogging struct { + Enabled bool +} + +// Cluster - temp +type Cluster struct { + Name string +} + +// LBPage is the page returned by a pager when traversing over a collection of +// LBs. +type LBPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks whether a NetworkPage struct is empty. +func (p LBPage) IsEmpty() (bool, error) { + is, err := ExtractLBs(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractLBs accepts a Page struct, specifically a LBPage struct, and extracts +// the elements into a slice of LoadBalancer structs. In other words, a generic +// collection is mapped into a relevant slice. +func ExtractLBs(page pagination.Page) ([]LoadBalancer, error) { + var resp struct { + LBs []LoadBalancer `mapstructure:"loadBalancers" json:"loadBalancers"` + } + + coll := page.(LBPage).Body + err := mapstructure.Decode(coll, &resp) + + s := reflect.ValueOf(coll.(map[string]interface{})["loadBalancers"]) + + for i := 0; i < s.Len(); i++ { + val := (s.Index(i).Interface()).(map[string]interface{}) + + ts, err := extractTS(val, "created") + if err != nil { + return resp.LBs, err + } + resp.LBs[i].Created.Time = ts + + ts, err = extractTS(val, "updated") + if err != nil { + return resp.LBs, err + } + resp.LBs[i].Updated.Time = ts + } + + return resp.LBs, err +} + +func extractTS(body map[string]interface{}, key string) (time.Time, error) { + val := body[key].(map[string]interface{}) + return time.Parse(time.RFC3339, val["time"].(string)) +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as a LB, if possible. +func (r commonResult) Extract() (*LoadBalancer, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + LB LoadBalancer `mapstructure:"loadBalancer"` + } + + err := mapstructure.Decode(r.Body, &response) + + json := r.Body.(map[string]interface{}) + lb := json["loadBalancer"].(map[string]interface{}) + + ts, err := extractTS(lb, "created") + if err != nil { + return nil, err + } + response.LB.Created.Time = ts + + ts, err = extractTS(lb, "updated") + if err != nil { + return nil, err + } + response.LB.Updated.Time = ts + + return &response.LB, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// ProtocolPage is the page returned by a pager when traversing over a +// collection of LB protocols. +type ProtocolPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether a ProtocolPage struct is empty. +func (p ProtocolPage) IsEmpty() (bool, error) { + is, err := ExtractProtocols(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractProtocols accepts a Page struct, specifically a ProtocolPage struct, +// and extracts the elements into a slice of Protocol structs. In other +// words, a generic collection is mapped into a relevant slice. +func ExtractProtocols(page pagination.Page) ([]Protocol, error) { + var resp struct { + Protocols []Protocol `mapstructure:"protocols" json:"protocols"` + } + err := mapstructure.Decode(page.(ProtocolPage).Body, &resp) + return resp.Protocols, err +} + +// AlgorithmPage is the page returned by a pager when traversing over a +// collection of LB algorithms. +type AlgorithmPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an AlgorithmPage struct is empty. +func (p AlgorithmPage) IsEmpty() (bool, error) { + is, err := ExtractAlgorithms(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractAlgorithms accepts a Page struct, specifically an AlgorithmPage struct, +// and extracts the elements into a slice of Algorithm structs. In other +// words, a generic collection is mapped into a relevant slice. +func ExtractAlgorithms(page pagination.Page) ([]Algorithm, error) { + var resp struct { + Algorithms []Algorithm `mapstructure:"algorithms" json:"algorithms"` + } + err := mapstructure.Decode(page.(AlgorithmPage).Body, &resp) + return resp.Algorithms, err +} + +// ErrorPage represents the HTML file that is shown to an end user who is +// attempting to access a load balancer node that is offline/unavailable. +// +// During provisioning, every load balancer is configured with a default error +// page that gets displayed when traffic is requested for an offline node. +// +// You can add a single custom error page with an HTTP-based protocol to a load +// balancer. Page updates override existing content. If a custom error page is +// deleted, or the load balancer is changed to a non-HTTP protocol, the default +// error page is restored. +type ErrorPage struct { + Content string +} + +// ErrorPageResult represents the result of an error page operation - +// specifically getting or creating one. +type ErrorPageResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as an ErrorPage, if possible. +func (r ErrorPageResult) Extract() (*ErrorPage, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + ErrorPage ErrorPage `mapstructure:"errorpage"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.ErrorPage, err +} + +// Stats represents all the key information about a load balancer's usage. +type Stats struct { + // The number of connections closed by this load balancer because its + // ConnectTimeout interval was exceeded. + ConnectTimeout int `mapstructure:"connectTimeOut"` + + // The number of transaction or protocol errors for this load balancer. + ConnectError int + + // Number of connection failures for this load balancer. + ConnectFailure int + + // Number of connections closed by this load balancer because its Timeout + // interval was exceeded. + DataTimedOut int + + // Number of connections closed by this load balancer because the + // 'keepalive_timeout' interval was exceeded. + KeepAliveTimedOut int + + // The maximum number of simultaneous TCP connections this load balancer has + // processed at any one time. + MaxConnections int `mapstructure:"maxConn"` + + // Number of simultaneous connections active at the time of the request. + CurrentConnections int `mapstructure:"currentConn"` + + // Number of SSL connections closed by this load balancer because the + // ConnectTimeout interval was exceeded. + SSLConnectTimeout int `mapstructure:"connectTimeOutSsl"` + + // Number of SSL transaction or protocol erros in this load balancer. + SSLConnectError int `mapstructure:"connectErrorSsl"` + + // Number of SSL connection failures in this load balancer. + SSLConnectFailure int `mapstructure:"connectFailureSsl"` + + // Number of SSL connections closed by this load balancer because the + // Timeout interval was exceeded. + SSLDataTimedOut int `mapstructure:"dataTimedOutSsl"` + + // Number of SSL connections closed by this load balancer because the + // 'keepalive_timeout' interval was exceeded. + SSLKeepAliveTimedOut int `mapstructure:"keepAliveTimedOutSsl"` + + // Maximum number of simultaneous SSL connections this load balancer has + // processed at any one time. + SSLMaxConnections int `mapstructure:"maxConnSsl"` + + // Number of simultaneous SSL connections active at the time of the request. + SSLCurrentConnections int `mapstructure:"currentConnSsl"` +} + +// StatsResult represents the result of a Stats operation. +type StatsResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as a Stats struct, if possible. +func (r StatsResult) Extract() (*Stats, error) { + if r.Err != nil { + return nil, r.Err + } + res := &Stats{} + err := mapstructure.Decode(r.Body, res) + return res, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/urls.go new file mode 100644 index 000000000..471a86b0a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/urls.go @@ -0,0 +1,49 @@ +package lbs + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + path = "loadbalancers" + protocolsPath = "protocols" + algorithmsPath = "algorithms" + logPath = "connectionlogging" + epPath = "errorpage" + stPath = "stats" + cachePath = "contentcaching" +) + +func resourceURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id)) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(path) +} + +func protocolsURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(path, protocolsPath) +} + +func algorithmsURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(path, algorithmsPath) +} + +func loggingURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), logPath) +} + +func errorPageURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), epPath) +} + +func statsURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), stPath) +} + +func cacheURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), cachePath) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/doc.go new file mode 100644 index 000000000..2c5be75ae --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/doc.go @@ -0,0 +1,21 @@ +/* +Package monitors provides information and interaction with the Health Monitor +API resource for the Rackspace Cloud Load Balancer service. + +The load balancing service includes a health monitoring resource that +periodically checks your back-end nodes to ensure they are responding correctly. +If a node does not respond, it is removed from rotation until the health monitor +determines that the node is functional. In addition to being performed +periodically, a health check also executes against every new node that is +added, to ensure that the node is operating properly before allowing it to +service traffic. Only one health monitor is allowed to be enabled on a load +balancer at a time. + +As part of a good strategy for monitoring connections, secondary nodes should +also be created which provide failover for effectively routing traffic in case +the primary node fails. This is an additional feature that ensures that you +remain up in case your primary node fails. + +There are three types of health monitor: CONNECT, HTTP and HTTPS. +*/ +package monitors diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/fixtures.go new file mode 100644 index 000000000..a565abced --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/fixtures.go @@ -0,0 +1,87 @@ +package monitors + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func _rootURL(lbID int) string { + return "/loadbalancers/" + strconv.Itoa(lbID) + "/healthmonitor" +} + +func mockGetResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "healthMonitor": { + "type": "CONNECT", + "delay": 10, + "timeout": 10, + "attemptsBeforeDeactivation": 3 + } +} + `) + }) +} + +func mockUpdateConnectResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "healthMonitor": { + "type": "CONNECT", + "delay": 10, + "timeout": 10, + "attemptsBeforeDeactivation": 3 + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockUpdateHTTPResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "healthMonitor": { + "attemptsBeforeDeactivation": 3, + "bodyRegex": "{regex}", + "delay": 10, + "path": "/foo", + "statusRegex": "200", + "timeout": 10, + "type": "HTTPS" + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDeleteResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go new file mode 100644 index 000000000..d4ba27653 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go @@ -0,0 +1,160 @@ +package monitors + +import ( + "errors" + + "github.com/rackspace/gophercloud" +) + +var ( + errAttemptLimit = errors.New("AttemptLimit field must be an int greater than 1 and less than 10") + errDelay = errors.New("Delay field must be an int greater than 1 and less than 10") + errTimeout = errors.New("Timeout field must be an int greater than 1 and less than 10") +) + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. +type UpdateOptsBuilder interface { + ToMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateConnectMonitorOpts represents the options needed to update a CONNECT +// monitor. +type UpdateConnectMonitorOpts struct { + // Required - number of permissible monitor failures before removing a node + // from rotation. Must be a number between 1 and 10. + AttemptLimit int + + // Required - the minimum number of seconds to wait before executing the + // health monitor. Must be a number between 1 and 3600. + Delay int + + // Required - maximum number of seconds to wait for a connection to be + // established before timing out. Must be a number between 1 and 300. + Timeout int +} + +// ToMonitorUpdateMap produces a map for updating CONNECT monitors. +func (opts UpdateConnectMonitorOpts) ToMonitorUpdateMap() (map[string]interface{}, error) { + type m map[string]interface{} + + if !gophercloud.IntWithinRange(opts.AttemptLimit, 1, 10) { + return m{}, errAttemptLimit + } + if !gophercloud.IntWithinRange(opts.Delay, 1, 3600) { + return m{}, errDelay + } + if !gophercloud.IntWithinRange(opts.Timeout, 1, 300) { + return m{}, errTimeout + } + + return m{"healthMonitor": m{ + "attemptsBeforeDeactivation": opts.AttemptLimit, + "delay": opts.Delay, + "timeout": opts.Timeout, + "type": CONNECT, + }}, nil +} + +// UpdateHTTPMonitorOpts represents the options needed to update a HTTP monitor. +type UpdateHTTPMonitorOpts struct { + // Required - number of permissible monitor failures before removing a node + // from rotation. Must be a number between 1 and 10. + AttemptLimit int `mapstructure:"attemptsBeforeDeactivation"` + + // Required - the minimum number of seconds to wait before executing the + // health monitor. Must be a number between 1 and 3600. + Delay int + + // Required - maximum number of seconds to wait for a connection to be + // established before timing out. Must be a number between 1 and 300. + Timeout int + + // Required - a regular expression that will be used to evaluate the contents + // of the body of the response. + BodyRegex string + + // Required - the HTTP path that will be used in the sample request. + Path string + + // Required - a regular expression that will be used to evaluate the HTTP + // status code returned in the response. + StatusRegex string + + // Optional - the name of a host for which the health monitors will check. + HostHeader string + + // Required - either HTTP or HTTPS + Type Type +} + +// ToMonitorUpdateMap produces a map for updating HTTP(S) monitors. +func (opts UpdateHTTPMonitorOpts) ToMonitorUpdateMap() (map[string]interface{}, error) { + type m map[string]interface{} + + if !gophercloud.IntWithinRange(opts.AttemptLimit, 1, 10) { + return m{}, errAttemptLimit + } + if !gophercloud.IntWithinRange(opts.Delay, 1, 3600) { + return m{}, errDelay + } + if !gophercloud.IntWithinRange(opts.Timeout, 1, 300) { + return m{}, errTimeout + } + if opts.Type != HTTP && opts.Type != HTTPS { + return m{}, errors.New("Type must either by HTTP or HTTPS") + } + if opts.BodyRegex == "" { + return m{}, errors.New("BodyRegex is a required field") + } + if opts.Path == "" { + return m{}, errors.New("Path is a required field") + } + if opts.StatusRegex == "" { + return m{}, errors.New("StatusRegex is a required field") + } + + json := m{ + "attemptsBeforeDeactivation": opts.AttemptLimit, + "delay": opts.Delay, + "timeout": opts.Timeout, + "type": opts.Type, + "bodyRegex": opts.BodyRegex, + "path": opts.Path, + "statusRegex": opts.StatusRegex, + } + + if opts.HostHeader != "" { + json["hostHeader"] = opts.HostHeader + } + + return m{"healthMonitor": json}, nil +} + +// Update is the operation responsible for updating a health monitor. +func Update(c *gophercloud.ServiceClient, id int, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToMonitorUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(rootURL(c, id), reqBody, nil, nil) + return res +} + +// Get is the operation responsible for showing details of a health monitor. +func Get(c *gophercloud.ServiceClient, id int) GetResult { + var res GetResult + _, res.Err = c.Get(rootURL(c, id), &res.Body, nil) + return res +} + +// Delete is the operation responsible for deleting a health monitor. +func Delete(c *gophercloud.ServiceClient, id int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(rootURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests_test.go new file mode 100644 index 000000000..76a60db7f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests_test.go @@ -0,0 +1,75 @@ +package monitors + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const lbID = 12345 + +func TestUpdateCONNECT(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateConnectResponse(t, lbID) + + opts := UpdateConnectMonitorOpts{ + AttemptLimit: 3, + Delay: 10, + Timeout: 10, + } + + err := Update(client.ServiceClient(), lbID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdateHTTP(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateHTTPResponse(t, lbID) + + opts := UpdateHTTPMonitorOpts{ + AttemptLimit: 3, + Delay: 10, + Timeout: 10, + BodyRegex: "{regex}", + Path: "/foo", + StatusRegex: "200", + Type: HTTPS, + } + + err := Update(client.ServiceClient(), lbID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetResponse(t, lbID) + + m, err := Get(client.ServiceClient(), lbID).Extract() + th.AssertNoErr(t, err) + + expected := &Monitor{ + Type: CONNECT, + Delay: 10, + Timeout: 10, + AttemptLimit: 3, + } + + th.AssertDeepEquals(t, expected, m) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteResponse(t, lbID) + + err := Delete(client.ServiceClient(), lbID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/results.go new file mode 100644 index 000000000..eec556f34 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/results.go @@ -0,0 +1,90 @@ +package monitors + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" +) + +// Type represents the type of Monitor. +type Type string + +// Useful constants. +const ( + CONNECT Type = "CONNECT" + HTTP Type = "HTTP" + HTTPS Type = "HTTPS" +) + +// Monitor represents a health monitor API resource. A monitor comes in three +// forms: CONNECT, HTTP or HTTPS. +// +// A CONNECT monitor establishes a basic connection to each node on its defined +// port to ensure that the service is listening properly. The connect monitor +// is the most basic type of health check and does no post-processing or +// protocol-specific health checks. +// +// HTTP and HTTPS health monitors are generally considered more intelligent and +// powerful than CONNECT. It is capable of processing an HTTP or HTTPS response +// to determine the condition of a node. It supports the same basic properties +// as CONNECT and includes additional attributes that are used to evaluate the +// HTTP response. +type Monitor struct { + // Number of permissible monitor failures before removing a node from + // rotation. + AttemptLimit int `mapstructure:"attemptsBeforeDeactivation"` + + // The minimum number of seconds to wait before executing the health monitor. + Delay int + + // Maximum number of seconds to wait for a connection to be established + // before timing out. + Timeout int + + // Type of the health monitor. + Type Type + + // A regular expression that will be used to evaluate the contents of the + // body of the response. + BodyRegex string + + // The name of a host for which the health monitors will check. + HostHeader string + + // The HTTP path that will be used in the sample request. + Path string + + // A regular expression that will be used to evaluate the HTTP status code + // returned in the response. + StatusRegex string +} + +// UpdateResult represents the result of an Update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// DeleteResult represents the result of an Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract interprets any GetResult as a Monitor. +func (r GetResult) Extract() (*Monitor, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + M Monitor `mapstructure:"healthMonitor"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.M, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/urls.go new file mode 100644 index 000000000..0a1e6df5f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + path = "loadbalancers" + monitorPath = "healthmonitor" +) + +func rootURL(c *gophercloud.ServiceClient, lbID int) string { + return c.ServiceURL(path, strconv.Itoa(lbID), monitorPath) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/doc.go new file mode 100644 index 000000000..49c431894 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/doc.go @@ -0,0 +1,35 @@ +/* +Package nodes provides information and interaction with the Node API resource +for the Rackspace Cloud Load Balancer service. + +Nodes are responsible for servicing the requests received through the load +balancer's virtual IP. A node is usually a virtual machine. By default, the +load balancer employs a basic health check that ensures the node is listening +on its defined port. The node is checked at the time of addition and at regular +intervals as defined by the load balancer's health check configuration. If a +back-end node is not listening on its port, or does not meet the conditions of +the defined check, then connections will not be forwarded to the node, and its +status is changed to OFFLINE. Only nodes that are in an ONLINE status receive +and can service traffic from the load balancer. + +All nodes have an associated status that indicates whether the node is +ONLINE, OFFLINE, or DRAINING. Only nodes that are in ONLINE status can receive +and service traffic from the load balancer. The OFFLINE status represents a +node that cannot accept or service traffic. A node in DRAINING status +represents a node that stops the traffic manager from sending any additional +new connections to the node, but honors established sessions. If the traffic +manager receives a request and session persistence requires that the node is +used, the traffic manager uses it. The status is determined by the passive or +active health monitors. + +If the WEIGHTED_ROUND_ROBIN load balancer algorithm mode is selected, then the +caller should assign the relevant weights to the node as part of the weight +attribute of the node element. When the algorithm of the load balancer is +changed to WEIGHTED_ROUND_ROBIN and the nodes do not already have an assigned +weight, the service automatically sets the weight to 1 for all nodes. + +One or more secondary nodes can be added to a specified load balancer so that +if all the primary nodes fail, traffic can be redirected to secondary nodes. +The type attribute allows configuring the node as either PRIMARY or SECONDARY. +*/ +package nodes diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/fixtures.go new file mode 100644 index 000000000..8899fc5e9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/fixtures.go @@ -0,0 +1,243 @@ +package nodes + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func _rootURL(lbID int) string { + return "/loadbalancers/" + strconv.Itoa(lbID) + "/nodes" +} + +func _nodeURL(lbID, nodeID int) string { + return _rootURL(lbID) + "/" + strconv.Itoa(nodeID) +} + +func mockListResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "nodes": [ + { + "id": 410, + "address": "10.1.1.1", + "port": 80, + "condition": "ENABLED", + "status": "ONLINE", + "weight": 3, + "type": "PRIMARY" + }, + { + "id": 411, + "address": "10.1.1.2", + "port": 80, + "condition": "ENABLED", + "status": "ONLINE", + "weight": 8, + "type": "SECONDARY" + } + ] +} + `) + }) +} + +func mockCreateResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "nodes": [ + { + "address": "10.2.2.3", + "port": 80, + "condition": "ENABLED", + "type": "PRIMARY" + }, + { + "address": "10.2.2.4", + "port": 81, + "condition": "ENABLED", + "type": "SECONDARY" + } + ] +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "nodes": [ + { + "address": "10.2.2.3", + "id": 185, + "port": 80, + "status": "ONLINE", + "condition": "ENABLED", + "weight": 1, + "type": "PRIMARY" + }, + { + "address": "10.2.2.4", + "id": 186, + "port": 81, + "status": "ONLINE", + "condition": "ENABLED", + "weight": 1, + "type": "SECONDARY" + } + ] +} + `) + }) +} + +func mockCreateErrResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "nodes": [ + { + "address": "10.2.2.3", + "port": 80, + "condition": "ENABLED", + "type": "PRIMARY" + }, + { + "address": "10.2.2.4", + "port": 81, + "condition": "ENABLED", + "type": "SECONDARY" + } + ] +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(422) // Unprocessable Entity + + fmt.Fprintf(w, ` +{ + "code": 422, + "message": "Load Balancer '%d' has a status of 'PENDING_UPDATE' and is considered immutable." +} + `, lbID) + }) +} + +func mockBatchDeleteResponse(t *testing.T, lbID int, ids []int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + r.ParseForm() + + for k, v := range ids { + fids := r.Form["id"] + th.AssertEquals(t, strconv.Itoa(v), fids[k]) + } + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDeleteResponse(t *testing.T, lbID, nodeID int) { + th.Mux.HandleFunc(_nodeURL(lbID, nodeID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockGetResponse(t *testing.T, lbID, nodeID int) { + th.Mux.HandleFunc(_nodeURL(lbID, nodeID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "node": { + "id": 410, + "address": "10.1.1.1", + "port": 80, + "condition": "ENABLED", + "status": "ONLINE", + "weight": 12, + "type": "PRIMARY" + } +} + `) + }) +} + +func mockUpdateResponse(t *testing.T, lbID, nodeID int) { + th.Mux.HandleFunc(_nodeURL(lbID, nodeID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "node": { + "condition": "DRAINING", + "weight": 10, + "type": "SECONDARY" + } +} + `) + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockListEventsResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID)+"/events", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "nodeServiceEvents": [ + { + "detailedMessage": "Node is ok", + "nodeId": 373, + "id": 7, + "type": "UPDATE_NODE", + "description": "Node '373' status changed to 'ONLINE' for load balancer '323'", + "category": "UPDATE", + "severity": "INFO", + "relativeUri": "/406271/loadbalancers/323/nodes/373/events", + "accountId": 406271, + "loadbalancerId": 323, + "title": "Node Status Updated", + "author": "Rackspace Cloud", + "created": "10-30-2012 10:18:23" + } + ] +} +`) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go new file mode 100644 index 000000000..dc2d46c89 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go @@ -0,0 +1,251 @@ +package nodes + +import ( + "errors" + "fmt" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List is the operation responsible for returning a paginated collection of +// load balancer nodes. It requires the node ID, its parent load balancer ID, +// and optional limit integer (passed in either as a pointer or a nil poitner). +func List(client *gophercloud.ServiceClient, loadBalancerID int, limit *int) pagination.Pager { + url := rootURL(client, loadBalancerID) + if limit != nil { + url += fmt.Sprintf("?limit=%d", limit) + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return NodePage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder is the interface responsible for generating the JSON +// for a Create operation. +type CreateOptsBuilder interface { + ToNodeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is a slice of CreateOpt structs, that allow the user to create +// multiple nodes in a single operation (one node per CreateOpt). +type CreateOpts []CreateOpt + +// CreateOpt represents the options to create a single node. +type CreateOpt struct { + // Required - the IP address or CIDR for this back-end node. It can either be + // a private IP (ServiceNet) or a public IP. + Address string + + // Optional - the port on which traffic is sent and received. + Port int + + // Optional - the condition of the node. See the consts in Results.go. + Condition Condition + + // Optional - the type of the node. See the consts in Results.go. + Type Type + + // Optional - a pointer to an integer between 0 and 100. + Weight *int +} + +func validateWeight(weight *int) error { + if weight != nil && (*weight > 100 || *weight < 0) { + return errors.New("Weight must be a valid int between 0 and 100") + } + return nil +} + +// ToNodeCreateMap converts a slice of options into a map that can be used for +// the JSON. +func (opts CreateOpts) ToNodeCreateMap() (map[string]interface{}, error) { + type nodeMap map[string]interface{} + nodes := []nodeMap{} + + for k, v := range opts { + if v.Address == "" { + return nodeMap{}, fmt.Errorf("ID is a required attribute, none provided for %d CreateOpt element", k) + } + if weightErr := validateWeight(v.Weight); weightErr != nil { + return nodeMap{}, weightErr + } + + node := make(map[string]interface{}) + node["address"] = v.Address + + if v.Port > 0 { + node["port"] = v.Port + } + if v.Condition != "" { + node["condition"] = v.Condition + } + if v.Type != "" { + node["type"] = v.Type + } + if v.Weight != nil { + node["weight"] = &v.Weight + } + + nodes = append(nodes, node) + } + + return nodeMap{"nodes": nodes}, nil +} + +// Create is the operation responsible for creating a new node on a load +// balancer. Since every load balancer exists in both ServiceNet and the public +// Internet, both private and public IP addresses can be used for nodes. +// +// If nodes need time to boot up services before they become operational, you +// can temporarily prevent traffic from being sent to that node by setting the +// Condition field to DRAINING. Health checks will still be performed; but once +// your node is ready, you can update its condition to ENABLED and have it +// handle traffic. +func Create(client *gophercloud.ServiceClient, loadBalancerID int, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToNodeCreateMap() + if err != nil { + res.Err = err + return res + } + + resp, err := client.Post(rootURL(client, loadBalancerID), reqBody, &res.Body, nil) + + if err != nil { + res.Err = err + return res + } + + pr := pagination.PageResultFromParsed(resp, res.Body) + return CreateResult{pagination.SinglePageBase(pr)} +} + +// BulkDelete is the operation responsible for batch deleting multiple nodes in +// a single operation. It accepts a slice of integer IDs and will remove them +// from the load balancer. The maximum limit is 10 node removals at once. +func BulkDelete(c *gophercloud.ServiceClient, loadBalancerID int, nodeIDs []int) DeleteResult { + var res DeleteResult + + if len(nodeIDs) > 10 || len(nodeIDs) == 0 { + res.Err = errors.New("You must provide a minimum of 1 and a maximum of 10 node IDs") + return res + } + + url := rootURL(c, loadBalancerID) + url += gophercloud.IDSliceToQueryString("id", nodeIDs) + + _, res.Err = c.Delete(url, nil) + return res +} + +// Get is the operation responsible for showing details for a single node. +func Get(c *gophercloud.ServiceClient, lbID, nodeID int) GetResult { + var res GetResult + _, res.Err = c.Get(resourceURL(c, lbID, nodeID), &res.Body, nil) + return res +} + +// UpdateOptsBuilder represents a type that can be converted into a JSON-like +// map structure. +type UpdateOptsBuilder interface { + ToNodeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represent the options for updating an existing node. +type UpdateOpts struct { + // Optional - the condition of the node. See the consts in Results.go. + Condition Condition + + // Optional - the type of the node. See the consts in Results.go. + Type Type + + // Optional - a pointer to an integer between 0 and 100. + Weight *int +} + +// ToNodeUpdateMap converts an options struct into a JSON-like map. +func (opts UpdateOpts) ToNodeUpdateMap() (map[string]interface{}, error) { + node := make(map[string]interface{}) + + if opts.Condition != "" { + node["condition"] = opts.Condition + } + if opts.Weight != nil { + if weightErr := validateWeight(opts.Weight); weightErr != nil { + return node, weightErr + } + node["weight"] = &opts.Weight + } + if opts.Type != "" { + node["type"] = opts.Type + } + + return map[string]interface{}{"node": node}, nil +} + +// Update is the operation responsible for updating an existing node. A node's +// IP, port, and status are immutable attributes and cannot be modified. +func Update(c *gophercloud.ServiceClient, lbID, nodeID int, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToNodeUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(resourceURL(c, lbID, nodeID), reqBody, nil, nil) + return res +} + +// Delete is the operation responsible for permanently deleting a node. +func Delete(c *gophercloud.ServiceClient, lbID, nodeID int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, lbID, nodeID), nil) + return res +} + +// ListEventsOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListEventsOptsBuilder interface { + ToEventsListQuery() (string, error) +} + +// ListEventsOpts allows the filtering and sorting of paginated collections through +// the API. +type ListEventsOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` +} + +// ToEventsListQuery formats a ListOpts into a query string. +func (opts ListEventsOpts) ToEventsListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// ListEvents is the operation responsible for listing all the events +// associated with the activity between the node and the load balancer. The +// events report errors found with the node. The detailedMessage provides the +// detailed reason for the error. +func ListEvents(client *gophercloud.ServiceClient, loadBalancerID int, opts ListEventsOptsBuilder) pagination.Pager { + url := eventsURL(client, loadBalancerID) + + if opts != nil { + query, err := opts.ToEventsListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return NodeEventPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests_test.go new file mode 100644 index 000000000..a964af8f9 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests_test.go @@ -0,0 +1,243 @@ +package nodes + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const ( + lbID = 12345 + nodeID = 67890 + nodeID2 = 67891 +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListResponse(t, lbID) + + count := 0 + + err := List(client.ServiceClient(), lbID, nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodes(page) + th.AssertNoErr(t, err) + + expected := []Node{ + Node{ + ID: 410, + Address: "10.1.1.1", + Port: 80, + Condition: ENABLED, + Status: ONLINE, + Weight: 3, + Type: PRIMARY, + }, + Node{ + ID: 411, + Address: "10.1.1.2", + Port: 80, + Condition: ENABLED, + Status: ONLINE, + Weight: 8, + Type: SECONDARY, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateResponse(t, lbID) + + opts := CreateOpts{ + CreateOpt{ + Address: "10.2.2.3", + Port: 80, + Condition: ENABLED, + Type: PRIMARY, + }, + CreateOpt{ + Address: "10.2.2.4", + Port: 81, + Condition: ENABLED, + Type: SECONDARY, + }, + } + + page := Create(client.ServiceClient(), lbID, opts) + + actual, err := page.ExtractNodes() + th.AssertNoErr(t, err) + + expected := []Node{ + Node{ + ID: 185, + Address: "10.2.2.3", + Port: 80, + Condition: ENABLED, + Status: ONLINE, + Weight: 1, + Type: PRIMARY, + }, + Node{ + ID: 186, + Address: "10.2.2.4", + Port: 81, + Condition: ENABLED, + Status: ONLINE, + Weight: 1, + Type: SECONDARY, + }, + } + + th.CheckDeepEquals(t, expected, actual) +} + +func TestCreateErr(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateErrResponse(t, lbID) + + opts := CreateOpts{ + CreateOpt{ + Address: "10.2.2.3", + Port: 80, + Condition: ENABLED, + Type: PRIMARY, + }, + CreateOpt{ + Address: "10.2.2.4", + Port: 81, + Condition: ENABLED, + Type: SECONDARY, + }, + } + + page := Create(client.ServiceClient(), lbID, opts) + + actual, err := page.ExtractNodes() + if err == nil { + t.Fatal("Did not receive expected error from ExtractNodes") + } + if actual != nil { + t.Fatalf("Received non-nil result from failed ExtractNodes: %#v", actual) + } +} + +func TestBulkDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + ids := []int{nodeID, nodeID2} + + mockBatchDeleteResponse(t, lbID, ids) + + err := BulkDelete(client.ServiceClient(), lbID, ids).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetResponse(t, lbID, nodeID) + + node, err := Get(client.ServiceClient(), lbID, nodeID).Extract() + th.AssertNoErr(t, err) + + expected := &Node{ + ID: 410, + Address: "10.1.1.1", + Port: 80, + Condition: ENABLED, + Status: ONLINE, + Weight: 12, + Type: PRIMARY, + } + + th.AssertDeepEquals(t, expected, node) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateResponse(t, lbID, nodeID) + + opts := UpdateOpts{ + Weight: gophercloud.IntToPointer(10), + Condition: DRAINING, + Type: SECONDARY, + } + + err := Update(client.ServiceClient(), lbID, nodeID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteResponse(t, lbID, nodeID) + + err := Delete(client.ServiceClient(), lbID, nodeID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListEvents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListEventsResponse(t, lbID) + + count := 0 + + pager := ListEvents(client.ServiceClient(), lbID, ListEventsOpts{}) + + err := pager.EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodeEvents(page) + th.AssertNoErr(t, err) + + expected := []NodeEvent{ + NodeEvent{ + DetailedMessage: "Node is ok", + NodeID: 373, + ID: 7, + Type: "UPDATE_NODE", + Description: "Node '373' status changed to 'ONLINE' for load balancer '323'", + Category: "UPDATE", + Severity: "INFO", + RelativeURI: "/406271/loadbalancers/323/nodes/373/events", + AccountID: 406271, + LoadBalancerID: 323, + Title: "Node Status Updated", + Author: "Rackspace Cloud", + Created: "10-30-2012 10:18:23", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/results.go new file mode 100644 index 000000000..57835dc4b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/results.go @@ -0,0 +1,213 @@ +package nodes + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Node represents a back-end device, usually a virtual machine, that can +// handle traffic. It is assigned traffic based on its parent load balancer. +type Node struct { + // The IP address or CIDR for this back-end node. + Address string + + // The unique ID for this node. + ID int + + // The port on which traffic is sent and received. + Port int + + // The node's status. + Status Status + + // The node's condition. + Condition Condition + + // The priority at which this node will receive traffic if a weighted + // algorithm is used by its parent load balancer. Ranges from 1 to 100. + Weight int + + // Type of node. + Type Type +} + +// Type indicates whether the node is of a PRIMARY or SECONDARY nature. +type Type string + +const ( + // PRIMARY nodes are in the normal rotation to receive traffic from the load + // balancer. + PRIMARY Type = "PRIMARY" + + // SECONDARY nodes are only in the rotation to receive traffic from the load + // balancer when all the primary nodes fail. This provides a failover feature + // that automatically routes traffic to the secondary node in the event that + // the primary node is disabled or in a failing state. Note that active + // health monitoring must be enabled on the load balancer to enable the + // failover feature to the secondary node. + SECONDARY Type = "SECONDARY" +) + +// Condition represents the condition of a node. +type Condition string + +const ( + // ENABLED indicates that the node is permitted to accept new connections. + ENABLED Condition = "ENABLED" + + // DISABLED indicates that the node is not permitted to accept any new + // connections regardless of session persistence configuration. Existing + // connections are forcibly terminated. + DISABLED Condition = "DISABLED" + + // DRAINING indicates that the node is allowed to service existing + // established connections and connections that are being directed to it as a + // result of the session persistence configuration. + DRAINING Condition = "DRAINING" +) + +// Status indicates whether the node can accept service traffic. If a node is +// not listening on its port or does not meet the conditions of the defined +// active health check for the load balancer, then the load balancer does not +// forward connections, and its status is listed as OFFLINE. +type Status string + +const ( + // ONLINE indicates that the node is healthy and capable of receiving traffic + // from the load balancer. + ONLINE Status = "ONLINE" + + // OFFLINE indicates that the node is not in a position to receive service + // traffic. It is usually switched into this state when a health check is not + // satisfied with the node's response time. + OFFLINE Status = "OFFLINE" +) + +// NodePage is the page returned by a pager when traversing over a collection +// of nodes. +type NodePage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether a NodePage struct is empty. +func (p NodePage) IsEmpty() (bool, error) { + is, err := ExtractNodes(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +func commonExtractNodes(body interface{}) ([]Node, error) { + var resp struct { + Nodes []Node `mapstructure:"nodes" json:"nodes"` + } + + err := mapstructure.Decode(body, &resp) + + return resp.Nodes, err +} + +// ExtractNodes accepts a Page struct, specifically a NodePage struct, and +// extracts the elements into a slice of Node structs. In other words, a +// generic collection is mapped into a relevant slice. +func ExtractNodes(page pagination.Page) ([]Node, error) { + return commonExtractNodes(page.(NodePage).Body) +} + +// CreateResult represents the result of a create operation. Since multiple +// nodes can be added in one operation, this result represents multiple nodes +// and should be treated as a typical pagination Page. Use its ExtractNodes +// method to get out a slice of Node structs. +type CreateResult struct { + pagination.SinglePageBase +} + +// ExtractNodes extracts a slice of Node structs from a CreateResult. +func (res CreateResult) ExtractNodes() ([]Node, error) { + if res.Err != nil { + return nil, res.Err + } + return commonExtractNodes(res.Body) +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +type commonResult struct { + gophercloud.Result +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +func (r commonResult) Extract() (*Node, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Node Node `mapstructure:"node"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.Node, err +} + +// NodeEvent represents a service event that occurred between a node and a +// load balancer. +type NodeEvent struct { + ID int + DetailedMessage string + NodeID int + Type string + Description string + Category string + Severity string + RelativeURI string + AccountID int + LoadBalancerID int + Title string + Author string + Created string +} + +// NodeEventPage is a concrete type which embeds the common SinglePageBase +// struct, and is used when traversing node event collections. +type NodeEventPage struct { + pagination.SinglePageBase +} + +// IsEmpty is a concrete function which indicates whether an NodeEventPage is +// empty or not. +func (r NodeEventPage) IsEmpty() (bool, error) { + is, err := ExtractNodeEvents(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +// ExtractNodeEvents accepts a Page struct, specifically a NodeEventPage +// struct, and extracts the elements into a slice of NodeEvent structs. In +// other words, the collection is mapped into a relevant slice. +func ExtractNodeEvents(page pagination.Page) ([]NodeEvent, error) { + var resp struct { + Events []NodeEvent `mapstructure:"nodeServiceEvents" json:"nodeServiceEvents"` + } + + err := mapstructure.Decode(page.(NodeEventPage).Body, &resp) + + return resp.Events, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/urls.go new file mode 100644 index 000000000..2cefee264 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/urls.go @@ -0,0 +1,25 @@ +package nodes + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + lbPath = "loadbalancers" + nodePath = "nodes" + eventPath = "events" +) + +func resourceURL(c *gophercloud.ServiceClient, lbID, nodeID int) string { + return c.ServiceURL(lbPath, strconv.Itoa(lbID), nodePath, strconv.Itoa(nodeID)) +} + +func rootURL(c *gophercloud.ServiceClient, lbID int) string { + return c.ServiceURL(lbPath, strconv.Itoa(lbID), nodePath) +} + +func eventsURL(c *gophercloud.ServiceClient, lbID int) string { + return c.ServiceURL(lbPath, strconv.Itoa(lbID), nodePath, eventPath) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/doc.go new file mode 100644 index 000000000..dcec0a87e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/doc.go @@ -0,0 +1,30 @@ +/* +Package sessions provides information and interaction with the Session +Persistence feature of the Rackspace Cloud Load Balancer service. + +Session persistence is a feature of the load balancing service that forces +multiple requests from clients (of the same protocol) to be directed to the +same node. This is common with many web applications that do not inherently +share application state between back-end servers. + +There are two modes to choose from: HTTP_COOKIE and SOURCE_IP. You can only set +one of the session persistence modes on a load balancer, and it can only +support one protocol. If you set HTTP_COOKIE mode for an HTTP load balancer, it +supports session persistence for HTTP requests only. Likewise, if you set +SOURCE_IP mode for an HTTPS load balancer, it supports session persistence for +only HTTPS requests. + +To support session persistence for both HTTP and HTTPS requests concurrently, +choose one of the following options: + +- Use two load balancers, one configured for session persistence for HTTP +requests and the other configured for session persistence for HTTPS requests. +That way, the load balancers support session persistence for both HTTP and +HTTPS requests concurrently, with each load balancer supporting one of the +protocols. + +- Use one load balancer, configure it for session persistence for HTTP requests, +and then enable SSL termination for that load balancer. The load balancer +supports session persistence for both HTTP and HTTPS requests concurrently. +*/ +package sessions diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/fixtures.go new file mode 100644 index 000000000..077ef0470 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/fixtures.go @@ -0,0 +1,59 @@ +package sessions + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func _rootURL(id int) string { + return "/loadbalancers/" + strconv.Itoa(id) + "/sessionpersistence" +} + +func mockGetResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "sessionPersistence": { + "persistenceType": "HTTP_COOKIE" + } +} +`) + }) +} + +func mockEnableResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "sessionPersistence": { + "persistenceType": "HTTP_COOKIE" + } +} + `) + + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{}`) + }) +} + +func mockDisableResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go new file mode 100644 index 000000000..a93d766cd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go @@ -0,0 +1,63 @@ +package sessions + +import ( + "errors" + + "github.com/rackspace/gophercloud" +) + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. +type CreateOptsBuilder interface { + ToSPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Required - can either be HTTPCOOKIE or SOURCEIP + Type Type +} + +// ToSPCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToSPCreateMap() (map[string]interface{}, error) { + sp := make(map[string]interface{}) + + if opts.Type == "" { + return sp, errors.New("Type is a required field") + } + + sp["persistenceType"] = opts.Type + return map[string]interface{}{"sessionPersistence": sp}, nil +} + +// Enable is the operation responsible for enabling session persistence for a +// particular load balancer. +func Enable(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) EnableResult { + var res EnableResult + + reqBody, err := opts.ToSPCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(rootURL(c, lbID), reqBody, &res.Body, nil) + return res +} + +// Get is the operation responsible for showing details of the session +// persistence configuration for a particular load balancer. +func Get(c *gophercloud.ServiceClient, lbID int) GetResult { + var res GetResult + _, res.Err = c.Get(rootURL(c, lbID), &res.Body, nil) + return res +} + +// Disable is the operation responsible for disabling session persistence for a +// particular load balancer. +func Disable(c *gophercloud.ServiceClient, lbID int) DisableResult { + var res DisableResult + _, res.Err = c.Delete(rootURL(c, lbID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests_test.go new file mode 100644 index 000000000..f319e540b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests_test.go @@ -0,0 +1,44 @@ +package sessions + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const lbID = 12345 + +func TestEnable(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockEnableResponse(t, lbID) + + opts := CreateOpts{Type: HTTPCOOKIE} + err := Enable(client.ServiceClient(), lbID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetResponse(t, lbID) + + sp, err := Get(client.ServiceClient(), lbID).Extract() + th.AssertNoErr(t, err) + + expected := &SessionPersistence{Type: HTTPCOOKIE} + th.AssertDeepEquals(t, expected, sp) +} + +func TestDisable(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDisableResponse(t, lbID) + + err := Disable(client.ServiceClient(), lbID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/results.go new file mode 100644 index 000000000..fe90e722c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/results.go @@ -0,0 +1,58 @@ +package sessions + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" +) + +// Type represents the type of session persistence being used. +type Type string + +const ( + // HTTPCOOKIE is a session persistence mechanism that inserts an HTTP cookie + // and is used to determine the destination back-end node. This is supported + // for HTTP load balancing only. + HTTPCOOKIE Type = "HTTP_COOKIE" + + // SOURCEIP is a session persistence mechanism that keeps track of the source + // IP address that is mapped and is able to determine the destination + // back-end node. This is supported for HTTPS pass-through and non-HTTP load + // balancing only. + SOURCEIP Type = "SOURCE_IP" +) + +// SessionPersistence indicates how a load balancer is using session persistence +type SessionPersistence struct { + Type Type `mapstructure:"persistenceType"` +} + +// EnableResult represents the result of an enable operation. +type EnableResult struct { + gophercloud.ErrResult +} + +// DisableResult represents the result of a disable operation. +type DisableResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult as an SP, if possible. +func (r GetResult) Extract() (*SessionPersistence, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + SP SessionPersistence `mapstructure:"sessionPersistence"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.SP, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/urls.go new file mode 100644 index 000000000..c4a896d90 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/urls.go @@ -0,0 +1,16 @@ +package sessions + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + path = "loadbalancers" + spPath = "sessionpersistence" +) + +func rootURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), spPath) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/doc.go new file mode 100644 index 000000000..6a2c174ae --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/doc.go @@ -0,0 +1,22 @@ +/* +Package ssl provides information and interaction with the SSL Termination +feature of the Rackspace Cloud Load Balancer service. + +You may only enable and configure SSL termination on load balancers with +non-secure protocols, such as HTTP, but not HTTPS. + +SSL-terminated load balancers decrypt the traffic at the traffic manager and +pass unencrypted traffic to the back-end node. Because of this, the customer's +back-end nodes don't know what protocol the client requested. For this reason, +the X-Forwarded-Proto (XFP) header has been added for identifying the +originating protocol of an HTTP request as "http" or "https" depending on what +protocol the client requested. + +Not every service returns certificates in the proper order. Please verify that +your chain of certificates matches that of walking up the chain from the domain +to the CA root. + +If used for HTTP to HTTPS redirection, the LoadBalancer's securePort attribute +must be set to 443, and its secureTrafficOnly attribute must be true. +*/ +package ssl diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/fixtures.go new file mode 100644 index 000000000..5a52962d4 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/fixtures.go @@ -0,0 +1,196 @@ +package ssl + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func _rootURL(id int) string { + return "/loadbalancers/" + strconv.Itoa(id) + "/ssltermination" +} + +func _certURL(id, certID int) string { + url := _rootURL(id) + "/certificatemappings" + if certID > 0 { + url += "/" + strconv.Itoa(certID) + } + return url +} + +func mockGetResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "sslTermination": { + "certificate": "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + "enabled": true, + "secureTrafficOnly": false, + "intermediateCertificate": "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + "securePort": 443 + } +} +`) + }) +} + +func mockUpdateResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "sslTermination": { + "enabled": true, + "securePort": 443, + "secureTrafficOnly": false, + "privateKey": "foo", + "certificate": "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + "intermediateCertificate": "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n" + } +} + `) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{}`) + }) +} + +func mockDeleteResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + }) +} + +func mockListCertsResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_certURL(lbID, 0), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "certificateMappings": [ + { + "certificateMapping": { + "id": 123, + "hostName": "rackspace.com" + } + }, + { + "certificateMapping": { + "id": 124, + "hostName": "*.rackspace.com" + } + } + ] +} +`) + }) +} + +func mockAddCertResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_certURL(lbID, 0), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "certificateMapping": { + "hostName": "rackspace.com", + "privateKey":"-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAwIudSMpRZx7TS0/AtDVX3DgXwLD9g+XrNaoazlhwhpYALgzJ\nLAbAnOxT6OT0gTpkPus/B7QhW6y6Auf2cdBeW31XoIwPsSoyNhxgErGBxzNARRB9\nlI1HCa1ojFrcULluj4W6rpaOycI5soDBJiJHin/hbZBPZq6vhPCuNP7Ya48Zd/2X\nCQ9ft3XKfmbs1SdrdROIhigse/SGRbMrCorn/vhNIuohr7yOlHG3GcVcUI9k6ZSZ\nBbqF+ZA4ApSF/Q6/cumieEgofhkYbx5fg02s9Jwr4IWnIR2bSHs7UQ6sVgKYzjs7\nPd3Unpa74jFw6/H6shABoO2CIYLotGmQbFgnpwIDAQABAoIBAQCBCQ+PCIclJHNV\ntUzfeCA5ZR4F9JbxHdRTUnxEbOB8UWotckQfTScoAvj4yvdQ42DrCZxj/UOdvFOs\nPufZvlp91bIz1alugWjE+p8n5+2hIaegoTyHoWZKBfxak0myj5KYfHZvKlbmv1ML\nXV4TwEVRfAIG+v87QTY/UUxuF5vR+BpKIbgUJLfPUFFvJUdl84qsJ44pToxaYUd/\nh5YAGC00U4ay1KVSAUnTkkPNZ0lPG/rWU6w6WcTvNRLMd8DzFLTKLOgQfHhbExAF\n+sXPWjWSzbBRP1O7fHqq96QQh4VFiY/7w9W+sDKQyV6Ul17OSXs6aZ4f+lq4rJTI\n1FG96YiBAoGBAO1tiH0h1oWDBYfJB3KJJ6CQQsDGwtHo/DEgznFVP4XwEVbZ98Ha\nBfBCn3sAybbaikyCV1Hwj7kfHMZPDHbrcUSFX7quu/2zPK+wO3lZKXSyu4YsguSa\nRedInN33PpdnlPhLyQdWSuD5sVHJDF6xn22vlyxeILH3ooLg2WOFMPmVAoGBAM+b\nUG/a7iyfpAQKYyuFAsXz6SeFaDY+ZYeX45L112H8Pu+Ie/qzon+bzLB9FIH8GP6+\nQpQgmm/p37U2gD1zChUv7iW6OfQBKk9rWvMpfRF6d7YHquElejhizfTZ+ntBV/VY\ndOYEczxhrdW7keLpatYaaWUy/VboRZmlz/9JGqVLAoGAHfqNmFc0cgk4IowEj7a3\ntTNh6ltub/i+FynwRykfazcDyXaeLPDtfQe8gVh5H8h6W+y9P9BjJVnDVVrX1RAn\nbiJ1EupLPF5sVDapW8ohTOXgfbGTGXBNUUW+4Nv+IDno+mz/RhjkPYHpnM0I7c/5\ntGzOZsC/2hjNgT8I0+MWav0CgYEAuULdJeQVlKalI6HtW2Gn1uRRVJ49H+LQkY6e\nW3+cw2jo9LI0CMWSphNvNrN3wIMp/vHj0fHCP0pSApDvIWbuQXfzKaGko7UCf7rK\nf6GvZRCHkV4IREBAb97j8bMvThxClMNqFfU0rFZyXP+0MOyhFQyertswrgQ6T+Fi\n2mnvKD8CgYAmJHP3NTDRMoMRyAzonJ6nEaGUbAgNmivTaUWMe0+leCvAdwD89gzC\nTKbm3eDUg/6Va3X6ANh3wsfIOe4RXXxcbcFDk9R4zO2M5gfLSjYm5Q87EBZ2hrdj\nM2gLI7dt6thx0J8lR8xRHBEMrVBdgwp0g1gQzo5dAV88/kpkZVps8Q==\n-----END RSA PRIVATE KEY-----\n", + "certificate":"-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + "intermediateCertificate":"-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n" + } +} + `) + + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "certificateMapping": { + "id": 123, + "hostName": "rackspace.com", + "certificate":"-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + "intermediateCertificate":"-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n" + } +} + `) + }) +} + +func mockGetCertResponse(t *testing.T, lbID, certID int) { + th.Mux.HandleFunc(_certURL(lbID, certID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "certificateMapping": { + "id": 123, + "hostName": "rackspace.com", + "certificate":"-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + "intermediateCertificate":"-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n" + } +} +`) + }) +} + +func mockUpdateCertResponse(t *testing.T, lbID, certID int) { + th.Mux.HandleFunc(_certURL(lbID, certID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "certificateMapping": { + "hostName": "rackspace.com", + "privateKey":"-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAwIudSMpRZx7TS0/AtDVX3DgXwLD9g+XrNaoazlhwhpYALgzJ\nLAbAnOxT6OT0gTpkPus/B7QhW6y6Auf2cdBeW31XoIwPsSoyNhxgErGBxzNARRB9\nlI1HCa1ojFrcULluj4W6rpaOycI5soDBJiJHin/hbZBPZq6vhPCuNP7Ya48Zd/2X\nCQ9ft3XKfmbs1SdrdROIhigse/SGRbMrCorn/vhNIuohr7yOlHG3GcVcUI9k6ZSZ\nBbqF+ZA4ApSF/Q6/cumieEgofhkYbx5fg02s9Jwr4IWnIR2bSHs7UQ6sVgKYzjs7\nPd3Unpa74jFw6/H6shABoO2CIYLotGmQbFgnpwIDAQABAoIBAQCBCQ+PCIclJHNV\ntUzfeCA5ZR4F9JbxHdRTUnxEbOB8UWotckQfTScoAvj4yvdQ42DrCZxj/UOdvFOs\nPufZvlp91bIz1alugWjE+p8n5+2hIaegoTyHoWZKBfxak0myj5KYfHZvKlbmv1ML\nXV4TwEVRfAIG+v87QTY/UUxuF5vR+BpKIbgUJLfPUFFvJUdl84qsJ44pToxaYUd/\nh5YAGC00U4ay1KVSAUnTkkPNZ0lPG/rWU6w6WcTvNRLMd8DzFLTKLOgQfHhbExAF\n+sXPWjWSzbBRP1O7fHqq96QQh4VFiY/7w9W+sDKQyV6Ul17OSXs6aZ4f+lq4rJTI\n1FG96YiBAoGBAO1tiH0h1oWDBYfJB3KJJ6CQQsDGwtHo/DEgznFVP4XwEVbZ98Ha\nBfBCn3sAybbaikyCV1Hwj7kfHMZPDHbrcUSFX7quu/2zPK+wO3lZKXSyu4YsguSa\nRedInN33PpdnlPhLyQdWSuD5sVHJDF6xn22vlyxeILH3ooLg2WOFMPmVAoGBAM+b\nUG/a7iyfpAQKYyuFAsXz6SeFaDY+ZYeX45L112H8Pu+Ie/qzon+bzLB9FIH8GP6+\nQpQgmm/p37U2gD1zChUv7iW6OfQBKk9rWvMpfRF6d7YHquElejhizfTZ+ntBV/VY\ndOYEczxhrdW7keLpatYaaWUy/VboRZmlz/9JGqVLAoGAHfqNmFc0cgk4IowEj7a3\ntTNh6ltub/i+FynwRykfazcDyXaeLPDtfQe8gVh5H8h6W+y9P9BjJVnDVVrX1RAn\nbiJ1EupLPF5sVDapW8ohTOXgfbGTGXBNUUW+4Nv+IDno+mz/RhjkPYHpnM0I7c/5\ntGzOZsC/2hjNgT8I0+MWav0CgYEAuULdJeQVlKalI6HtW2Gn1uRRVJ49H+LQkY6e\nW3+cw2jo9LI0CMWSphNvNrN3wIMp/vHj0fHCP0pSApDvIWbuQXfzKaGko7UCf7rK\nf6GvZRCHkV4IREBAb97j8bMvThxClMNqFfU0rFZyXP+0MOyhFQyertswrgQ6T+Fi\n2mnvKD8CgYAmJHP3NTDRMoMRyAzonJ6nEaGUbAgNmivTaUWMe0+leCvAdwD89gzC\nTKbm3eDUg/6Va3X6ANh3wsfIOe4RXXxcbcFDk9R4zO2M5gfLSjYm5Q87EBZ2hrdj\nM2gLI7dt6thx0J8lR8xRHBEMrVBdgwp0g1gQzo5dAV88/kpkZVps8Q==\n-----END RSA PRIVATE KEY-----\n", + "certificate":"-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + "intermediateCertificate":"-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n" + } +} + `) + + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "certificateMapping": { + "id": 123, + "hostName": "rackspace.com", + "certificate":"-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + "intermediateCertificate":"-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n" + } +} + `) + }) +} + +func mockDeleteCertResponse(t *testing.T, lbID, certID int) { + th.Mux.HandleFunc(_certURL(lbID, certID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go new file mode 100644 index 000000000..bb53ef896 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go @@ -0,0 +1,247 @@ +package ssl + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +var ( + errPrivateKey = errors.New("PrivateKey is a required field") + errCertificate = errors.New("Certificate is a required field") + errIntCertificate = errors.New("IntCertificate is a required field") +) + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Update operation in this package. +type UpdateOptsBuilder interface { + ToSSLUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Required - consult the SSLTermConfig struct for more info. + SecurePort int + + // Required - consult the SSLTermConfig struct for more info. + PrivateKey string + + // Required - consult the SSLTermConfig struct for more info. + Certificate string + + // Required - consult the SSLTermConfig struct for more info. + IntCertificate string + + // Optional - consult the SSLTermConfig struct for more info. + Enabled *bool + + // Optional - consult the SSLTermConfig struct for more info. + SecureTrafficOnly *bool +} + +// ToSSLUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToSSLUpdateMap() (map[string]interface{}, error) { + ssl := make(map[string]interface{}) + + if opts.SecurePort == 0 { + return ssl, errors.New("SecurePort needs to be an integer greater than 0") + } + if opts.PrivateKey == "" { + return ssl, errPrivateKey + } + if opts.Certificate == "" { + return ssl, errCertificate + } + if opts.IntCertificate == "" { + return ssl, errIntCertificate + } + + ssl["securePort"] = opts.SecurePort + ssl["privateKey"] = opts.PrivateKey + ssl["certificate"] = opts.Certificate + ssl["intermediateCertificate"] = opts.IntCertificate + + if opts.Enabled != nil { + ssl["enabled"] = &opts.Enabled + } + + if opts.SecureTrafficOnly != nil { + ssl["secureTrafficOnly"] = &opts.SecureTrafficOnly + } + + return map[string]interface{}{"sslTermination": ssl}, nil +} + +// Update is the operation responsible for updating the SSL Termination +// configuration for a load balancer. +func Update(c *gophercloud.ServiceClient, lbID int, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + + reqBody, err := opts.ToSSLUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(rootURL(c, lbID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Get is the operation responsible for showing the details of the SSL +// Termination configuration for a load balancer. +func Get(c *gophercloud.ServiceClient, lbID int) GetResult { + var res GetResult + _, res.Err = c.Get(rootURL(c, lbID), &res.Body, nil) + return res +} + +// Delete is the operation responsible for deleting the SSL Termination +// configuration for a load balancer. +func Delete(c *gophercloud.ServiceClient, lbID int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(rootURL(c, lbID), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// ListCerts will list all of the certificate mappings associated with a +// SSL-terminated HTTP load balancer. +func ListCerts(c *gophercloud.ServiceClient, lbID int) pagination.Pager { + url := certURL(c, lbID) + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return CertPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateCertOptsBuilder is the interface options structs have to satisfy in +// order to be used in the AddCert operation in this package. +type CreateCertOptsBuilder interface { + ToCertCreateMap() (map[string]interface{}, error) +} + +// CreateCertOpts represents the options used when adding a new certificate mapping. +type CreateCertOpts struct { + HostName string + PrivateKey string + Certificate string + IntCertificate string +} + +// ToCertCreateMap will cast an CreateCertOpts struct to a map for JSON serialization. +func (opts CreateCertOpts) ToCertCreateMap() (map[string]interface{}, error) { + cm := make(map[string]interface{}) + + if opts.HostName == "" { + return cm, errors.New("HostName is a required option") + } + if opts.PrivateKey == "" { + return cm, errPrivateKey + } + if opts.Certificate == "" { + return cm, errCertificate + } + + cm["hostName"] = opts.HostName + cm["privateKey"] = opts.PrivateKey + cm["certificate"] = opts.Certificate + + if opts.IntCertificate != "" { + cm["intermediateCertificate"] = opts.IntCertificate + } + + return map[string]interface{}{"certificateMapping": cm}, nil +} + +// CreateCert will add a new SSL certificate and allow an SSL-terminated HTTP +// load balancer to use it. This feature is useful because it allows multiple +// certificates to be used. The maximum number of certificates that can be +// stored per LB is 20. +func CreateCert(c *gophercloud.ServiceClient, lbID int, opts CreateCertOptsBuilder) CreateCertResult { + var res CreateCertResult + + reqBody, err := opts.ToCertCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(certURL(c, lbID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} + +// GetCert will show the details of an existing SSL certificate. +func GetCert(c *gophercloud.ServiceClient, lbID, certID int) GetCertResult { + var res GetCertResult + _, res.Err = c.Get(certResourceURL(c, lbID, certID), &res.Body, nil) + return res +} + +// UpdateCertOptsBuilder is the interface options structs have to satisfy in +// order to be used in the UpdateCert operation in this package. +type UpdateCertOptsBuilder interface { + ToCertUpdateMap() (map[string]interface{}, error) +} + +// UpdateCertOpts represents the options needed to update a SSL certificate. +type UpdateCertOpts struct { + HostName string + PrivateKey string + Certificate string + IntCertificate string +} + +// ToCertUpdateMap will cast an UpdateCertOpts struct into a map for JSON +// seralization. +func (opts UpdateCertOpts) ToCertUpdateMap() (map[string]interface{}, error) { + cm := make(map[string]interface{}) + + if opts.HostName != "" { + cm["hostName"] = opts.HostName + } + if opts.PrivateKey != "" { + cm["privateKey"] = opts.PrivateKey + } + if opts.Certificate != "" { + cm["certificate"] = opts.Certificate + } + if opts.IntCertificate != "" { + cm["intermediateCertificate"] = opts.IntCertificate + } + + return map[string]interface{}{"certificateMapping": cm}, nil +} + +// UpdateCert is the operation responsible for updating the details of an +// existing SSL certificate. +func UpdateCert(c *gophercloud.ServiceClient, lbID, certID int, opts UpdateCertOptsBuilder) UpdateCertResult { + var res UpdateCertResult + + reqBody, err := opts.ToCertUpdateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(certResourceURL(c, lbID, certID), reqBody, &res.Body, nil) + return res +} + +// DeleteCert is the operation responsible for permanently removing a SSL +// certificate. +func DeleteCert(c *gophercloud.ServiceClient, lbID, certID int) DeleteResult { + var res DeleteResult + + _, res.Err = c.Delete(certResourceURL(c, lbID, certID), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests_test.go new file mode 100644 index 000000000..fb14c4a28 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests_test.go @@ -0,0 +1,167 @@ +package ssl + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const ( + lbID = 12345 + certID = 67890 +) + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetResponse(t, lbID) + + sp, err := Get(client.ServiceClient(), lbID).Extract() + th.AssertNoErr(t, err) + + expected := &SSLTermConfig{ + Certificate: "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + Enabled: true, + SecureTrafficOnly: false, + IntCertificate: "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + SecurePort: 443, + } + th.AssertDeepEquals(t, expected, sp) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateResponse(t, lbID) + + opts := UpdateOpts{ + Enabled: gophercloud.Enabled, + SecurePort: 443, + SecureTrafficOnly: gophercloud.Disabled, + PrivateKey: "foo", + Certificate: "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + IntCertificate: "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + } + err := Update(client.ServiceClient(), lbID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteResponse(t, lbID) + + err := Delete(client.ServiceClient(), lbID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListCerts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListCertsResponse(t, lbID) + + count := 0 + + err := ListCerts(client.ServiceClient(), lbID).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractCerts(page) + th.AssertNoErr(t, err) + + expected := []Certificate{ + Certificate{ID: 123, HostName: "rackspace.com"}, + Certificate{ID: 124, HostName: "*.rackspace.com"}, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreateCert(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockAddCertResponse(t, lbID) + + opts := CreateCertOpts{ + HostName: "rackspace.com", + PrivateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAwIudSMpRZx7TS0/AtDVX3DgXwLD9g+XrNaoazlhwhpYALgzJ\nLAbAnOxT6OT0gTpkPus/B7QhW6y6Auf2cdBeW31XoIwPsSoyNhxgErGBxzNARRB9\nlI1HCa1ojFrcULluj4W6rpaOycI5soDBJiJHin/hbZBPZq6vhPCuNP7Ya48Zd/2X\nCQ9ft3XKfmbs1SdrdROIhigse/SGRbMrCorn/vhNIuohr7yOlHG3GcVcUI9k6ZSZ\nBbqF+ZA4ApSF/Q6/cumieEgofhkYbx5fg02s9Jwr4IWnIR2bSHs7UQ6sVgKYzjs7\nPd3Unpa74jFw6/H6shABoO2CIYLotGmQbFgnpwIDAQABAoIBAQCBCQ+PCIclJHNV\ntUzfeCA5ZR4F9JbxHdRTUnxEbOB8UWotckQfTScoAvj4yvdQ42DrCZxj/UOdvFOs\nPufZvlp91bIz1alugWjE+p8n5+2hIaegoTyHoWZKBfxak0myj5KYfHZvKlbmv1ML\nXV4TwEVRfAIG+v87QTY/UUxuF5vR+BpKIbgUJLfPUFFvJUdl84qsJ44pToxaYUd/\nh5YAGC00U4ay1KVSAUnTkkPNZ0lPG/rWU6w6WcTvNRLMd8DzFLTKLOgQfHhbExAF\n+sXPWjWSzbBRP1O7fHqq96QQh4VFiY/7w9W+sDKQyV6Ul17OSXs6aZ4f+lq4rJTI\n1FG96YiBAoGBAO1tiH0h1oWDBYfJB3KJJ6CQQsDGwtHo/DEgznFVP4XwEVbZ98Ha\nBfBCn3sAybbaikyCV1Hwj7kfHMZPDHbrcUSFX7quu/2zPK+wO3lZKXSyu4YsguSa\nRedInN33PpdnlPhLyQdWSuD5sVHJDF6xn22vlyxeILH3ooLg2WOFMPmVAoGBAM+b\nUG/a7iyfpAQKYyuFAsXz6SeFaDY+ZYeX45L112H8Pu+Ie/qzon+bzLB9FIH8GP6+\nQpQgmm/p37U2gD1zChUv7iW6OfQBKk9rWvMpfRF6d7YHquElejhizfTZ+ntBV/VY\ndOYEczxhrdW7keLpatYaaWUy/VboRZmlz/9JGqVLAoGAHfqNmFc0cgk4IowEj7a3\ntTNh6ltub/i+FynwRykfazcDyXaeLPDtfQe8gVh5H8h6W+y9P9BjJVnDVVrX1RAn\nbiJ1EupLPF5sVDapW8ohTOXgfbGTGXBNUUW+4Nv+IDno+mz/RhjkPYHpnM0I7c/5\ntGzOZsC/2hjNgT8I0+MWav0CgYEAuULdJeQVlKalI6HtW2Gn1uRRVJ49H+LQkY6e\nW3+cw2jo9LI0CMWSphNvNrN3wIMp/vHj0fHCP0pSApDvIWbuQXfzKaGko7UCf7rK\nf6GvZRCHkV4IREBAb97j8bMvThxClMNqFfU0rFZyXP+0MOyhFQyertswrgQ6T+Fi\n2mnvKD8CgYAmJHP3NTDRMoMRyAzonJ6nEaGUbAgNmivTaUWMe0+leCvAdwD89gzC\nTKbm3eDUg/6Va3X6ANh3wsfIOe4RXXxcbcFDk9R4zO2M5gfLSjYm5Q87EBZ2hrdj\nM2gLI7dt6thx0J8lR8xRHBEMrVBdgwp0g1gQzo5dAV88/kpkZVps8Q==\n-----END RSA PRIVATE KEY-----\n", + Certificate: "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + IntCertificate: "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + } + + cm, err := CreateCert(client.ServiceClient(), lbID, opts).Extract() + th.AssertNoErr(t, err) + + expected := &Certificate{ + ID: 123, + HostName: "rackspace.com", + Certificate: "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + IntCertificate: "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + } + th.AssertDeepEquals(t, expected, cm) +} + +func TestGetCertMapping(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetCertResponse(t, lbID, certID) + + sp, err := GetCert(client.ServiceClient(), lbID, certID).Extract() + th.AssertNoErr(t, err) + + expected := &Certificate{ + ID: 123, + HostName: "rackspace.com", + Certificate: "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + IntCertificate: "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + } + th.AssertDeepEquals(t, expected, sp) +} + +func TestUpdateCert(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateCertResponse(t, lbID, certID) + + opts := UpdateCertOpts{ + HostName: "rackspace.com", + PrivateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAwIudSMpRZx7TS0/AtDVX3DgXwLD9g+XrNaoazlhwhpYALgzJ\nLAbAnOxT6OT0gTpkPus/B7QhW6y6Auf2cdBeW31XoIwPsSoyNhxgErGBxzNARRB9\nlI1HCa1ojFrcULluj4W6rpaOycI5soDBJiJHin/hbZBPZq6vhPCuNP7Ya48Zd/2X\nCQ9ft3XKfmbs1SdrdROIhigse/SGRbMrCorn/vhNIuohr7yOlHG3GcVcUI9k6ZSZ\nBbqF+ZA4ApSF/Q6/cumieEgofhkYbx5fg02s9Jwr4IWnIR2bSHs7UQ6sVgKYzjs7\nPd3Unpa74jFw6/H6shABoO2CIYLotGmQbFgnpwIDAQABAoIBAQCBCQ+PCIclJHNV\ntUzfeCA5ZR4F9JbxHdRTUnxEbOB8UWotckQfTScoAvj4yvdQ42DrCZxj/UOdvFOs\nPufZvlp91bIz1alugWjE+p8n5+2hIaegoTyHoWZKBfxak0myj5KYfHZvKlbmv1ML\nXV4TwEVRfAIG+v87QTY/UUxuF5vR+BpKIbgUJLfPUFFvJUdl84qsJ44pToxaYUd/\nh5YAGC00U4ay1KVSAUnTkkPNZ0lPG/rWU6w6WcTvNRLMd8DzFLTKLOgQfHhbExAF\n+sXPWjWSzbBRP1O7fHqq96QQh4VFiY/7w9W+sDKQyV6Ul17OSXs6aZ4f+lq4rJTI\n1FG96YiBAoGBAO1tiH0h1oWDBYfJB3KJJ6CQQsDGwtHo/DEgznFVP4XwEVbZ98Ha\nBfBCn3sAybbaikyCV1Hwj7kfHMZPDHbrcUSFX7quu/2zPK+wO3lZKXSyu4YsguSa\nRedInN33PpdnlPhLyQdWSuD5sVHJDF6xn22vlyxeILH3ooLg2WOFMPmVAoGBAM+b\nUG/a7iyfpAQKYyuFAsXz6SeFaDY+ZYeX45L112H8Pu+Ie/qzon+bzLB9FIH8GP6+\nQpQgmm/p37U2gD1zChUv7iW6OfQBKk9rWvMpfRF6d7YHquElejhizfTZ+ntBV/VY\ndOYEczxhrdW7keLpatYaaWUy/VboRZmlz/9JGqVLAoGAHfqNmFc0cgk4IowEj7a3\ntTNh6ltub/i+FynwRykfazcDyXaeLPDtfQe8gVh5H8h6W+y9P9BjJVnDVVrX1RAn\nbiJ1EupLPF5sVDapW8ohTOXgfbGTGXBNUUW+4Nv+IDno+mz/RhjkPYHpnM0I7c/5\ntGzOZsC/2hjNgT8I0+MWav0CgYEAuULdJeQVlKalI6HtW2Gn1uRRVJ49H+LQkY6e\nW3+cw2jo9LI0CMWSphNvNrN3wIMp/vHj0fHCP0pSApDvIWbuQXfzKaGko7UCf7rK\nf6GvZRCHkV4IREBAb97j8bMvThxClMNqFfU0rFZyXP+0MOyhFQyertswrgQ6T+Fi\n2mnvKD8CgYAmJHP3NTDRMoMRyAzonJ6nEaGUbAgNmivTaUWMe0+leCvAdwD89gzC\nTKbm3eDUg/6Va3X6ANh3wsfIOe4RXXxcbcFDk9R4zO2M5gfLSjYm5Q87EBZ2hrdj\nM2gLI7dt6thx0J8lR8xRHBEMrVBdgwp0g1gQzo5dAV88/kpkZVps8Q==\n-----END RSA PRIVATE KEY-----\n", + Certificate: "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + IntCertificate: "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + } + + cm, err := UpdateCert(client.ServiceClient(), lbID, certID, opts).Extract() + th.AssertNoErr(t, err) + + expected := &Certificate{ + ID: 123, + HostName: "rackspace.com", + Certificate: "-----BEGIN CERTIFICATE-----\nMIIEXTCCA0WgAwIBAgIGATTEAjK3MA0GCSqGSIb3DQEBBQUAMIGDMRkwFwYDVQQD\nExBUZXN0IENBIFNUdWIgS2V5MRcwFQYDVQQLEw5QbGF0Zm9ybSBMYmFhczEaMBgG\nA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFDASBgNVBAcTC1NhbiBBbnRvbmlvMQ4w\nDAYDVQQIEwVUZXhhczELMAkGA1UEBhMCVVMwHhcNMTIwMTA5MTk0NjQ1WhcNMTQw\nMTA4MTk0NjQ1WjCBgjELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMRQwEgYD\nVQQHEwtTYW4gQW50b25pbzEaMBgGA1UEChMRUmFja3NwYWNlIEhvc3RpbmcxFzAV\nBgNVBAsTDlBsYXRmb3JtIExiYWFzMRgwFgYDVQQDEw9UZXN0IENsaWVudCBLZXkw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAi51IylFnHtNLT8C0NVfc\nOBfAsP2D5es1qhrOWHCGlgAuDMksBsCc7FPo5PSBOmQ+6z8HtCFbrLoC5/Zx0F5b\nfVegjA+xKjI2HGASsYHHM0BFEH2UjUcJrWiMWtxQuW6Phbqulo7JwjmygMEmIkeK\nf+FtkE9mrq+E8K40/thrjxl3/ZcJD1+3dcp+ZuzVJ2t1E4iGKCx79IZFsysKiuf+\n+E0i6iGvvI6UcbcZxVxQj2TplJkFuoX5kDgClIX9Dr9y6aJ4SCh+GRhvHl+DTaz0\nnCvghachHZtIeztRDqxWApjOOzs93dSelrviMXDr8fqyEAGg7YIhgui0aZBsWCen\nAgMBAAGjgdUwgdIwgbAGA1UdIwSBqDCBpYAUNpx1Pc6cGA7KqEwHMmHBTZMA7lSh\ngYmkgYYwgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVU4IBATAd\nBgNVHQ4EFgQULueOfsjZZOHwJHZwBy6u0swnpccwDQYJKoZIhvcNAQEFBQADggEB\nAFNuqSVUaotUJoWDv4z7Kbi6JFpTjDht5ORw4BdVYlRD4h9DACAFzPrPV2ym/Osp\nhNMdZq6msZku7MdOSQVhdeGWrSNk3M8O9Hg7cVzPNXOF3iNoo3irQ5tURut44xs4\nWw5YWQqS9WyUY5snD8tm7Y1rQTPfhg+678xIq/zWCv/u+FSnfVv1nlhLVQkEeG/Y\ngh1uMaTIpUKTGEjIAGtpGP7wwIcXptR/HyfzhTUSTaWc1Ef7zoKT9LL5z3IV1hC2\njVWz+RwYs98LjMuksJFoHqRfWyYhCIym0jb6GTwaEmpxAjc+d7OLNQdnoEGoUYGP\nYjtfkRYg265ESMA+Kww4Xy8=\n-----END CERTIFICATE-----\n", + IntCertificate: "-----BEGIN CERTIFICATE-----\nMIIDtTCCAp2gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzEZMBcGA1UEAxMQVGVz\ndCBDQSBTVHViIEtleTEXMBUGA1UECxMOUGxhdGZvcm0gTGJhYXMxGjAYBgNVBAoT\nEVJhY2tzcGFjZSBIb3N0aW5nMRQwEgYDVQQHEwtTYW4gQW50b25pbzEOMAwGA1UE\nCBMFVGV4YXMxCzAJBgNVBAYTAlVTMB4XDTEyMDEwOTE5NDU0OVoXDTE0MDEwODE5\nNDU0OVowgYMxGTAXBgNVBAMTEFRlc3QgQ0EgU1R1YiBLZXkxFzAVBgNVBAsTDlBs\nYXRmb3JtIExiYWFzMRowGAYDVQQKExFSYWNrc3BhY2UgSG9zdGluZzEUMBIGA1UE\nBxMLU2FuIEFudG9uaW8xDjAMBgNVBAgTBVRleGFzMQswCQYDVQQGEwJVUzCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANNh55lwTVwQvNoEZjq1zGdYz9jA\nXXdjizn8AJhjHLOAallfPtvCfTEgKanhdoyz5FnhQE8HbDAop/KNS1lN2UMvdl5f\nZNLTSjJrNtedqxQwxN/i3bpyBxNVejUH2NjV1mmyj+5CJYwCzWalvI/gLPq/A3as\nO2EQqtf3U8unRgn0zXLRdYxV9MrUzNAmdipPNvNrsVdrCgA42rgF/8KsyRVQfJCX\nfN7PGCfrsC3YaUvhymraWxNnXIzMYTNa9wEeBZLUw8SlEtpa1Zsvui+TPXu3USNZ\nVnWH8Lb6ENlnoX0VBwo62fjOG3JzhNKoJawi3bRqyDdINOvafr7iPrrs/T8CAwEA\nAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNpx1Pc6cGA7KqEwHMmHB\nTZMA7lQwDQYJKoZIhvcNAQEFBQADggEBAMoRgH3iTG3t317viLKoY+lNMHUgHuR7\nb3mn9MidJKyYVewe6hCDIN6WY4fUojmMW9wFJWJIo0hRMNHL3n3tq8HP2j20Mxy8\nacPdfGZJa+jiBw72CrIGdobKaFduIlIEDBA1pNdZIJ+EulrtqrMesnIt92WaypIS\n8JycbIgDMCiyC0ENHEk8UWlC6429c7OZAsplMTbHME/1R4btxjkdfrYZJjdJ2yL2\n8cjZDUDMCPTdW/ycP07Gkq30RB5tACB5aZdaCn2YaKC8FsEdhff4X7xEOfOEHWEq\nSRxADDj8Lx1MT6QpR07hCiDyHfTCtbqzI0iGjX63Oh7xXSa0f+JVTa8=\n-----END CERTIFICATE-----\n", + } + th.AssertDeepEquals(t, expected, cm) +} + +func TestDeleteCert(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteCertResponse(t, lbID, certID) + + err := DeleteCert(client.ServiceClient(), lbID, certID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/results.go new file mode 100644 index 000000000..ead9fcd37 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/results.go @@ -0,0 +1,148 @@ +package ssl + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// SSLTermConfig represents the SSL configuration for a particular load balancer. +type SSLTermConfig struct { + // The port on which the SSL termination load balancer listens for secure + // traffic. The value must be unique to the existing LB protocol/port + // combination + SecurePort int `mapstructure:"securePort"` + + // The private key for the SSL certificate which is validated and verified + // against the provided certificates. + PrivateKey string `mapstructure:"privatekey"` + + // The certificate used for SSL termination, which is validated and verified + // against the key and intermediate certificate if provided. + Certificate string + + // The intermediate certificate (for the user). The intermediate certificate + // is validated and verified against the key and certificate credentials + // provided. A user may only provide this value when accompanied by a + // Certificate, PrivateKey, and SecurePort. It may not be added or updated as + // a single attribute in a future operation. + IntCertificate string `mapstructure:"intermediatecertificate"` + + // Determines if the load balancer is enabled to terminate SSL traffic or not. + // If this is set to false, the load balancer retains its specified SSL + // attributes but does not terminate SSL traffic. + Enabled bool + + // Determines if the load balancer can only accept secure traffic. If set to + // true, the load balancer will not accept non-secure traffic. + SecureTrafficOnly bool +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult as a SSLTermConfig struct, if possible. +func (r GetResult) Extract() (*SSLTermConfig, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + SSL SSLTermConfig `mapstructure:"sslTermination"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.SSL, err +} + +// Certificate represents an SSL certificate associated with an SSL-terminated +// HTTP load balancer. +type Certificate struct { + ID int + HostName string + Certificate string + IntCertificate string `mapstructure:"intermediateCertificate"` +} + +// CertPage represents a page of certificates. +type CertPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks whether a CertMappingPage struct is empty. +func (p CertPage) IsEmpty() (bool, error) { + is, err := ExtractCerts(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractCerts accepts a Page struct, specifically a CertPage struct, and +// extracts the elements into a slice of Cert structs. In other words, a generic +// collection is mapped into a relevant slice. +func ExtractCerts(page pagination.Page) ([]Certificate, error) { + type NestedMap struct { + Cert Certificate `mapstructure:"certificateMapping" json:"certificateMapping"` + } + var resp struct { + Certs []NestedMap `mapstructure:"certificateMappings" json:"certificateMappings"` + } + + err := mapstructure.Decode(page.(CertPage).Body, &resp) + + slice := []Certificate{} + for _, cert := range resp.Certs { + slice = append(slice, cert.Cert) + } + + return slice, err +} + +type certResult struct { + gophercloud.Result +} + +// Extract interprets a result as a CertMapping struct, if possible. +func (r certResult) Extract() (*Certificate, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + Cert Certificate `mapstructure:"certificateMapping"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.Cert, err +} + +// CreateCertResult represents the result of an CreateCert operation. +type CreateCertResult struct { + certResult +} + +// GetCertResult represents the result of a GetCert operation. +type GetCertResult struct { + certResult +} + +// UpdateCertResult represents the result of an UpdateCert operation. +type UpdateCertResult struct { + certResult +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/urls.go new file mode 100644 index 000000000..aa814b358 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/urls.go @@ -0,0 +1,25 @@ +package ssl + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + path = "loadbalancers" + sslPath = "ssltermination" + certPath = "certificatemappings" +) + +func rootURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), sslPath) +} + +func certURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), sslPath, certPath) +} + +func certResourceURL(c *gophercloud.ServiceClient, id, certID int) string { + return c.ServiceURL(path, strconv.Itoa(id), sslPath, certPath, strconv.Itoa(certID)) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/doc.go new file mode 100644 index 000000000..1ed605d36 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/doc.go @@ -0,0 +1,5 @@ +/* +Package throttle provides information and interaction with the Connection +Throttling feature of the Rackspace Cloud Load Balancer service. +*/ +package throttle diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/fixtures.go new file mode 100644 index 000000000..f3e49fa68 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/fixtures.go @@ -0,0 +1,62 @@ +package throttle + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func _rootURL(id int) string { + return "/loadbalancers/" + strconv.Itoa(id) + "/connectionthrottle" +} + +func mockGetResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "connectionThrottle": { + "maxConnections": 100 + } +} +`) + }) +} + +func mockCreateResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "connectionThrottle": { + "maxConnectionRate": 0, + "maxConnections": 200, + "minConnections": 0, + "rateInterval": 0 + } +} + `) + + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{}`) + }) +} + +func mockDeleteResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go new file mode 100644 index 000000000..0446b97a1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go @@ -0,0 +1,76 @@ +package throttle + +import ( + "errors" + + "github.com/rackspace/gophercloud" +) + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. +type CreateOptsBuilder interface { + ToCTCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Required - the maximum amount of connections per IP address to allow per LB. + MaxConnections int + + // Deprecated as of v1.22. + MaxConnectionRate int + + // Deprecated as of v1.22. + MinConnections int + + // Deprecated as of v1.22. + RateInterval int +} + +// ToCTCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToCTCreateMap() (map[string]interface{}, error) { + ct := make(map[string]interface{}) + + if opts.MaxConnections < 0 || opts.MaxConnections > 100000 { + return ct, errors.New("MaxConnections must be an int between 0 and 100000") + } + + ct["maxConnections"] = opts.MaxConnections + ct["maxConnectionRate"] = opts.MaxConnectionRate + ct["minConnections"] = opts.MinConnections + ct["rateInterval"] = opts.RateInterval + + return map[string]interface{}{"connectionThrottle": ct}, nil +} + +// Create is the operation responsible for creating or updating the connection +// throttling configuration for a load balancer. +func Create(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToCTCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Put(rootURL(c, lbID), reqBody, &res.Body, nil) + return res +} + +// Get is the operation responsible for showing the details of the connection +// throttling configuration for a load balancer. +func Get(c *gophercloud.ServiceClient, lbID int) GetResult { + var res GetResult + _, res.Err = c.Get(rootURL(c, lbID), &res.Body, nil) + return res +} + +// Delete is the operation responsible for deleting the connection throttling +// configuration for a load balancer. +func Delete(c *gophercloud.ServiceClient, lbID int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(rootURL(c, lbID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests_test.go new file mode 100644 index 000000000..6e9703ffc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests_test.go @@ -0,0 +1,44 @@ +package throttle + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const lbID = 12345 + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateResponse(t, lbID) + + opts := CreateOpts{MaxConnections: 200} + err := Create(client.ServiceClient(), lbID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetResponse(t, lbID) + + sp, err := Get(client.ServiceClient(), lbID).Extract() + th.AssertNoErr(t, err) + + expected := &ConnectionThrottle{MaxConnections: 100} + th.AssertDeepEquals(t, expected, sp) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteResponse(t, lbID) + + err := Delete(client.ServiceClient(), lbID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/results.go new file mode 100644 index 000000000..db93c6f3f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/results.go @@ -0,0 +1,43 @@ +package throttle + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" +) + +// ConnectionThrottle represents the connection throttle configuration for a +// particular load balancer. +type ConnectionThrottle struct { + MaxConnections int +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult as a SP, if possible. +func (r GetResult) Extract() (*ConnectionThrottle, error) { + if r.Err != nil { + return nil, r.Err + } + + var response struct { + CT ConnectionThrottle `mapstructure:"connectionThrottle"` + } + + err := mapstructure.Decode(r.Body, &response) + + return &response.CT, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/urls.go new file mode 100644 index 000000000..b77f0ac1c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/urls.go @@ -0,0 +1,16 @@ +package throttle + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + path = "loadbalancers" + ctPath = "connectionthrottle" +) + +func rootURL(c *gophercloud.ServiceClient, id int) string { + return c.ServiceURL(path, strconv.Itoa(id), ctPath) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/doc.go new file mode 100644 index 000000000..5c3846d44 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/doc.go @@ -0,0 +1,13 @@ +/* +Package vips provides information and interaction with the Virtual IP API +resource for the Rackspace Cloud Load Balancer service. + +A virtual IP (VIP) makes a load balancer accessible by clients. The load +balancing service supports either a public VIP, routable on the public Internet, +or a ServiceNet address, routable only within the region in which the load +balancer resides. + +All load balancers must have at least one virtual IP associated with them at +all times. +*/ +package vips diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/fixtures.go new file mode 100644 index 000000000..158759f7f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/fixtures.go @@ -0,0 +1,88 @@ +package vips + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func _rootURL(lbID int) string { + return "/loadbalancers/" + strconv.Itoa(lbID) + "/virtualips" +} + +func mockListResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "virtualIps": [ + { + "id": 1000, + "address": "206.10.10.210", + "type": "PUBLIC" + } + ] +} + `) + }) +} + +func mockCreateResponse(t *testing.T, lbID int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "type":"PUBLIC", + "ipVersion":"IPV6" +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "address":"fd24:f480:ce44:91bc:1af2:15ff:0000:0002", + "id":9000134, + "type":"PUBLIC", + "ipVersion":"IPV6" +} + `) + }) +} + +func mockBatchDeleteResponse(t *testing.T, lbID int, ids []int) { + th.Mux.HandleFunc(_rootURL(lbID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + r.ParseForm() + + for k, v := range ids { + fids := r.Form["id"] + th.AssertEquals(t, strconv.Itoa(v), fids[k]) + } + + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockDeleteResponse(t *testing.T, lbID, vipID int) { + url := _rootURL(lbID) + "/" + strconv.Itoa(vipID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go new file mode 100644 index 000000000..2bc924f29 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go @@ -0,0 +1,97 @@ +package vips + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List is the operation responsible for returning a paginated collection of +// load balancer virtual IP addresses. +func List(client *gophercloud.ServiceClient, loadBalancerID int) pagination.Pager { + url := rootURL(client, loadBalancerID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VIPPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToVIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Optional - the ID of an existing virtual IP. By doing this, you are + // allowing load balancers to share IPV6 addresses. + ID string + + // Optional - the type of address. + Type Type + + // Optional - the version of address. + Version Version +} + +// ToVIPCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToVIPCreateMap() (map[string]interface{}, error) { + lb := make(map[string]interface{}) + + if opts.ID != "" { + lb["id"] = opts.ID + } + if opts.Type != "" { + lb["type"] = opts.Type + } + if opts.Version != "" { + lb["ipVersion"] = opts.Version + } + + return lb, nil +} + +// Create is the operation responsible for assigning a new Virtual IP to an +// existing load balancer resource. Currently, only version 6 IP addresses may +// be added. +func Create(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToVIPCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(rootURL(c, lbID), reqBody, &res.Body, nil) + return res +} + +// BulkDelete is the operation responsible for batch deleting multiple VIPs in +// a single operation. It accepts a slice of integer IDs and will remove them +// from the load balancer. The maximum limit is 10 VIP removals at once. +func BulkDelete(c *gophercloud.ServiceClient, loadBalancerID int, vipIDs []int) DeleteResult { + var res DeleteResult + + if len(vipIDs) > 10 || len(vipIDs) == 0 { + res.Err = errors.New("You must provide a minimum of 1 and a maximum of 10 VIP IDs") + return res + } + + url := rootURL(c, loadBalancerID) + url += gophercloud.IDSliceToQueryString("id", vipIDs) + + _, res.Err = c.Delete(url, nil) + return res +} + +// Delete is the operation responsible for permanently deleting a VIP. +func Delete(c *gophercloud.ServiceClient, lbID, vipID int) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(resourceURL(c, lbID, vipID), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests_test.go new file mode 100644 index 000000000..74ac46173 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests_test.go @@ -0,0 +1,87 @@ +package vips + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const ( + lbID = 12345 + vipID = 67890 + vipID2 = 67891 +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListResponse(t, lbID) + + count := 0 + + err := List(client.ServiceClient(), lbID).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractVIPs(page) + th.AssertNoErr(t, err) + + expected := []VIP{ + VIP{ID: 1000, Address: "206.10.10.210", Type: "PUBLIC"}, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateResponse(t, lbID) + + opts := CreateOpts{ + Type: "PUBLIC", + Version: "IPV6", + } + + vip, err := Create(client.ServiceClient(), lbID, opts).Extract() + th.AssertNoErr(t, err) + + expected := &VIP{ + Address: "fd24:f480:ce44:91bc:1af2:15ff:0000:0002", + ID: 9000134, + Type: "PUBLIC", + Version: "IPV6", + } + + th.CheckDeepEquals(t, expected, vip) +} + +func TestBulkDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + ids := []int{vipID, vipID2} + + mockBatchDeleteResponse(t, lbID, ids) + + err := BulkDelete(client.ServiceClient(), lbID, ids).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteResponse(t, lbID, vipID) + + err := Delete(client.ServiceClient(), lbID, vipID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/results.go new file mode 100644 index 000000000..678b2aff7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/results.go @@ -0,0 +1,89 @@ +package vips + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// VIP represents a Virtual IP API resource. +type VIP struct { + Address string `json:"address,omitempty"` + ID int `json:"id,omitempty"` + Type Type `json:"type,omitempty"` + Version Version `json:"ipVersion,omitempty" mapstructure:"ipVersion"` +} + +// Version represents the version of a VIP. +type Version string + +// Convenient constants to use for type +const ( + IPV4 Version = "IPV4" + IPV6 Version = "IPV6" +) + +// Type represents the type of a VIP. +type Type string + +const ( + // PUBLIC indicates a VIP type that is routable on the public Internet. + PUBLIC Type = "PUBLIC" + + // PRIVATE indicates a VIP type that is routable only on ServiceNet. + PRIVATE Type = "SERVICENET" +) + +// VIPPage is the page returned by a pager when traversing over a collection +// of VIPs. +type VIPPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether a VIPPage struct is empty. +func (p VIPPage) IsEmpty() (bool, error) { + is, err := ExtractVIPs(p) + if err != nil { + return true, nil + } + return len(is) == 0, nil +} + +// ExtractVIPs accepts a Page struct, specifically a VIPPage struct, and +// extracts the elements into a slice of VIP structs. In other words, a +// generic collection is mapped into a relevant slice. +func ExtractVIPs(page pagination.Page) ([]VIP, error) { + var resp struct { + VIPs []VIP `mapstructure:"virtualIps" json:"virtualIps"` + } + + err := mapstructure.Decode(page.(VIPPage).Body, &resp) + + return resp.VIPs, err +} + +type commonResult struct { + gophercloud.Result +} + +func (r commonResult) Extract() (*VIP, error) { + if r.Err != nil { + return nil, r.Err + } + + resp := &VIP{} + err := mapstructure.Decode(r.Body, resp) + + return resp, err +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/urls.go new file mode 100644 index 000000000..28f063a0f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/urls.go @@ -0,0 +1,20 @@ +package vips + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +const ( + lbPath = "loadbalancers" + vipPath = "virtualips" +) + +func resourceURL(c *gophercloud.ServiceClient, lbID, nodeID int) string { + return c.ServiceURL(lbPath, strconv.Itoa(lbID), vipPath, strconv.Itoa(nodeID)) +} + +func rootURL(c *gophercloud.ServiceClient, lbID int) string { + return c.ServiceURL(lbPath, strconv.Itoa(lbID), vipPath) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/common/common_tests.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/common/common_tests.go new file mode 100644 index 000000000..129cd63ae --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/common/common_tests.go @@ -0,0 +1,12 @@ +package common + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/testhelper/client" +) + +const TokenID = client.TokenID + +func ServiceClient() *gophercloud.ServiceClient { + return client.ServiceClient() +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate.go new file mode 100644 index 000000000..dcb0855db --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate.go @@ -0,0 +1,41 @@ +package networks + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, opts) +} + +// Get retrieves a specific network based on its unique ID. +func Get(c *gophercloud.ServiceClient, networkID string) os.GetResult { + return os.Get(c, networkID) +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. This operation does not actually require a request body, i.e. the +// CreateOpts struct argument can be empty. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// network. An admin user, however, has the option of specifying another tenant +// ID in the CreateOpts struct. +func Create(c *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(c, opts) +} + +// Update accepts a UpdateOpts struct and updates an existing network using the +// values provided. For more information, see the Create function. +func Update(c *gophercloud.ServiceClient, networkID string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(c, networkID, opts) +} + +// Delete accepts a unique ID and deletes the network associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) os.DeleteResult { + return os.Delete(c, networkID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate_test.go new file mode 100644 index 000000000..0b3a6b15e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/networks/delegate_test.go @@ -0,0 +1,285 @@ +package networks + +import ( + "fmt" + "net/http" + "testing" + + os "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" + fake "github.com/rackspace/gophercloud/rackspace/networking/v2/common" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "networks": [ + { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + }, + { + "status": "ACTIVE", + "subnets": [ + "08eae331-0402-425a-923c-34f7cfe39c1b" + ], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": true, + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324" + } + ] +} + `) + }) + + client := fake.ServiceClient() + count := 0 + + List(client, os.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractNetworks(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + expected := []os.Network{ + os.Network{ + Status: "ACTIVE", + Subnets: []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"}, + Name: "private-network", + AdminStateUp: true, + TenantID: "4fd44f30292945e481c7b8a0c8908869", + Shared: true, + ID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + }, + os.Network{ + Status: "ACTIVE", + Subnets: []string{"08eae331-0402-425a-923c-34f7cfe39c1b"}, + Name: "private", + AdminStateUp: true, + TenantID: "26a7980765d0414dbc1fc1f88cdb7e6e", + Shared: true, + ID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "private-network", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) + + n, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.Subnets, []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"}) + th.AssertEquals(t, n.Name, "private-network") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertEquals(t, n.Shared, true) + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "sample_network", + "admin_state_up": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [], + "name": "net1", + "admin_state_up": true, + "tenant_id": "9bacb3c5d39d41a79512987f338cf177", + "shared": false, + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + } +} + `) + }) + + iTrue := true + options := os.CreateOpts{Name: "sample_network", AdminStateUp: &iTrue} + n, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.Subnets, []string{}) + th.AssertEquals(t, n.Name, "net1") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.TenantID, "9bacb3c5d39d41a79512987f338cf177") + th.AssertEquals(t, n.Shared, false) + th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c") +} + +func TestCreateWithOptionalFields(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "sample_network", + "admin_state_up": true, + "shared": true, + "tenant_id": "12345" + } +} + `) + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` +{ + "network": { + "name": "sample_network", + "admin_state_up": true, + "shared": true, + "tenant_id": "12345" + } +} + `) + }) + + iTrue := true + options := os.CreateOpts{Name: "sample_network", AdminStateUp: &iTrue, Shared: &iTrue, TenantID: "12345"} + _, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "new_network_name", + "admin_state_up": false, + "shared": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "subnets": [], + "name": "new_network_name", + "admin_state_up": false, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + } +} + `) + }) + + iTrue := true + options := os.UpdateOpts{Name: "new_network_name", AdminStateUp: os.Down, Shared: &iTrue} + n, err := Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "new_network_name") + th.AssertEquals(t, n.AdminStateUp, false) + th.AssertEquals(t, n.Shared, true) + th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go new file mode 100644 index 000000000..95728d185 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go @@ -0,0 +1,43 @@ +package ports + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// ports. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those ports that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, opts) +} + +// Get retrieves a specific port based on its unique ID. +func Get(c *gophercloud.ServiceClient, networkID string) os.GetResult { + return os.Get(c, networkID) +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. You must remember to provide a NetworkID value. +// +// NOTE: Currently the SecurityGroup option is not implemented to work with +// Rackspace. +func Create(c *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(c, opts) +} + +// Update accepts a UpdateOpts struct and updates an existing port using the +// values provided. +func Update(c *gophercloud.ServiceClient, networkID string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(c, networkID, opts) +} + +// Delete accepts a unique ID and deletes the port associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) os.DeleteResult { + return os.Delete(c, networkID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate_test.go new file mode 100644 index 000000000..f53ff595a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate_test.go @@ -0,0 +1,322 @@ +package ports + +import ( + "fmt" + "net/http" + "testing" + + os "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/pagination" + fake "github.com/rackspace/gophercloud/rackspace/networking/v2/common" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "ports": [ + { + "status": "ACTIVE", + "binding:host_id": "devstack", + "name": "", + "admin_state_up": true, + "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3", + "tenant_id": "", + "device_owner": "network:router_gateway", + "mac_address": "fa:16:3e:58:42:ed", + "fixed_ips": [ + { + "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062", + "ip_address": "172.24.4.2" + } + ], + "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b", + "security_groups": [], + "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), os.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractPorts(page) + if err != nil { + t.Errorf("Failed to extract subnets: %v", err) + return false, nil + } + + expected := []os.Port{ + os.Port{ + Status: "ACTIVE", + Name: "", + AdminStateUp: true, + NetworkID: "70c1db1f-b701-45bd-96e0-a313ee3430b3", + TenantID: "", + DeviceOwner: "network:router_gateway", + MACAddress: "fa:16:3e:58:42:ed", + FixedIPs: []os.IP{ + os.IP{ + SubnetID: "008ba151-0b8c-4a67-98b5-0d2b87666062", + IPAddress: "172.24.4.2", + }, + }, + ID: "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b", + SecurityGroups: []string{}, + DeviceID: "9ae135f4-b6e0-4dad-9e91-3c223e385824", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/ports/46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "ACTIVE", + "name": "", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "7e02058126cc4950b75f9970368ba177", + "device_owner": "network:router_interface", + "mac_address": "fa:16:3e:23:fd:d7", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.1" + } + ], + "id": "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", + "security_groups": [], + "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e" + } +} + `) + }) + + n, err := Get(fake.ServiceClient(), "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertEquals(t, n.Name, "") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "7e02058126cc4950b75f9970368ba177") + th.AssertEquals(t, n.DeviceOwner, "network:router_interface") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:23:fd:d7") + th.AssertDeepEquals(t, n.FixedIPs, []os.IP{ + os.IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.1"}, + }) + th.AssertEquals(t, n.ID, "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2") + th.AssertDeepEquals(t, n.SecurityGroups, []string{}) + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertEquals(t, n.DeviceID, "5e3898d7-11be-483e-9732-b2f5eccd2b2e") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "private-port", + "admin_state_up": true, + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "security_groups": ["foo"] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "allowed_address_pairs": [], + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} + `) + }) + + asu := true + options := os.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []os.IP{ + os.IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: []string{"foo"}, + } + n, err := Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "DOWN") + th.AssertEquals(t, n.Name, "private-port") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, n.DeviceOwner, "") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, n.FixedIPs, []os.IP{ + os.IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }) + th.AssertEquals(t, n.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertDeepEquals(t, n.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), os.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} + `) + }) + + options := os.UpdateOpts{ + Name: "new_port_name", + FixedIPs: []os.IP{ + os.IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + SecurityGroups: []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}, + } + + s, err := Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []os.IP{ + os.IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/doc.go new file mode 100644 index 000000000..31f744ccd --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/doc.go @@ -0,0 +1,32 @@ +// Package security contains functionality to work with security group and +// security group rules Neutron resources. +// +// Security groups and security group rules allows administrators and tenants +// the ability to specify the type of traffic and direction (ingress/egress) +// that is allowed to pass through a port. A security group is a container for +// security group rules. +// +// When a port is created in Networking it is associated with a security group. +// If a security group is not specified the port is associated with a 'default' +// security group. By default, this group drops all ingress traffic and allows +// all egress. Rules can be added to this group in order to change the behaviour. +// +// The basic characteristics of Neutron Security Groups are: +// +// For ingress traffic (to an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all traffic is dropped. +// +// For egress traffic (from an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all egress traffic are dropped. +// - When a new security group is created, rules to allow all egress traffic +// is automatically added. +// +// "default security group" is defined for each tenant. +// - For the default security group a rule which allows intercommunication +// among hosts associated with the default security group is defined by default. +// - As a result, all egress traffic and intercommunication in the default +// group are allowed and all ingress from outside of the default group is +// dropped by default (in the default security group). +package security diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate.go new file mode 100644 index 000000000..1e9a23a05 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate.go @@ -0,0 +1,30 @@ +package groups + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// security groups. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts os.ListOpts) pagination.Pager { + return os.List(c, opts) +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts os.CreateOpts) os.CreateResult { + return os.Create(c, opts) +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(c, id) +} + +// Delete will permanently delete a particular security group based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate_test.go new file mode 100644 index 000000000..45cd3ba8d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate_test.go @@ -0,0 +1,206 @@ +package groups + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + osGroups "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" + osRules "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_groups": [ + { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] + } + `) + }) + + count := 0 + + List(fake.ServiceClient(), osGroups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := osGroups.ExtractGroups(page) + if err != nil { + t.Errorf("Failed to extract secgroups: %v", err) + return false, err + } + + expected := []osGroups.SecGroup{ + osGroups.SecGroup{ + Description: "default", + ID: "85cc3048-abc3-43cc-89b3-377341426ac5", + Name: "default", + Rules: []osRules.SecGroupRule{}, + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "security_group": { + "name": "new-webservers", + "description": "security group for webservers" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` + { + "security_group": { + "description": "security group for webservers", + "id": "2076db17-a522-4506-91de-c6dd8e837028", + "name": "new-webservers", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv4", + "id": "38ce2d8e-e8f1-48bd-83c2-d33cb9f50c3d", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv6", + "id": "565b9502-12de-4ffd-91e9-68885cff6ae1", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + opts := osGroups.CreateOpts{Name: "new-webservers", Description: "security group for webservers"} + _, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/85cc3048-abc3-43cc-89b3-377341426ac5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_group": { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + sg, err := Get(fake.ServiceClient(), "85cc3048-abc3-43cc-89b3-377341426ac5").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "default", sg.Description) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sg.ID) + th.AssertEquals(t, "default", sg.Name) + th.AssertEquals(t, 2, len(sg.Rules)) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sg.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate.go new file mode 100644 index 000000000..23b4b318e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate.go @@ -0,0 +1,30 @@ +package rules + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// security group rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts os.ListOpts) pagination.Pager { + return os.List(c, opts) +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts os.CreateOpts) os.CreateResult { + return os.Create(c, opts) +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(c, id) +} + +// Delete will permanently delete a particular security group based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate_test.go new file mode 100644 index 000000000..3563fbeaa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate_test.go @@ -0,0 +1,236 @@ +package rules + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + osRules "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] + } + `) + }) + + count := 0 + + List(fake.ServiceClient(), osRules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := osRules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract secrules: %v", err) + return false, err + } + + expected := []osRules.SecGroupRule{ + osRules.SecGroupRule{ + Direction: "egress", + EtherType: "IPv6", + ID: "3c0e45ff-adaf-4124-b083-bf390e5482ff", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + osRules.SecGroupRule{ + Direction: "egress", + EtherType: "IPv4", + ID: "93aa42e5-80db-4581-9391-3a608bd0e448", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "security_group_rule": { + "direction": "ingress", + "port_range_min": 80, + "ethertype": "IPv4", + "port_range_max": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` + { + "security_group_rule": { + "direction": "ingress", + "ethertype": "IPv4", + "id": "2bc0accf-312e-429a-956e-e4407625eb62", + "port_range_max": 80, + "port_range_min": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "remote_ip_prefix": null, + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + opts := osRules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: "IPv4", + PortRangeMax: 80, + Protocol: "tcp", + RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", + } + _, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), osRules.CreateOpts{Direction: "something"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), osRules.CreateOpts{Direction: osRules.DirIngress, EtherType: "something"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), osRules.CreateOpts{Direction: osRules.DirIngress, EtherType: osRules.Ether4}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), osRules.CreateOpts{Direction: osRules.DirIngress, EtherType: osRules.Ether4, SecGroupID: "something", Protocol: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/3c0e45ff-adaf-4124-b083-bf390e5482ff", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_group_rule": { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + sr, err := Get(fake.ServiceClient(), "3c0e45ff-adaf-4124-b083-bf390e5482ff").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "egress", sr.Direction) + th.AssertEquals(t, "IPv6", sr.EtherType) + th.AssertEquals(t, "3c0e45ff-adaf-4124-b083-bf390e5482ff", sr.ID) + th.AssertEquals(t, 0, sr.PortRangeMax) + th.AssertEquals(t, 0, sr.PortRangeMin) + th.AssertEquals(t, "", sr.Protocol) + th.AssertEquals(t, "", sr.RemoteGroupID) + th.AssertEquals(t, "", sr.RemoteIPPrefix) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sr.SecGroupID) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sr.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate.go new file mode 100644 index 000000000..a7fb7bb15 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate.go @@ -0,0 +1,40 @@ +package subnets + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// subnets. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those subnets that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, opts) +} + +// Get retrieves a specific subnet based on its unique ID. +func Get(c *gophercloud.ServiceClient, networkID string) os.GetResult { + return os.Get(c, networkID) +} + +// Create accepts a CreateOpts struct and creates a new subnet using the values +// provided. You must remember to provide a valid NetworkID, CIDR and IP version. +func Create(c *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(c, opts) +} + +// Update accepts a UpdateOpts struct and updates an existing subnet using the +// values provided. +func Update(c *gophercloud.ServiceClient, networkID string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(c, networkID, opts) +} + +// Delete accepts a unique ID and deletes the subnet associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) os.DeleteResult { + return os.Delete(c, networkID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate_test.go new file mode 100644 index 000000000..fafc6fb30 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/networking/v2/subnets/delegate_test.go @@ -0,0 +1,363 @@ +package subnets + +import ( + "fmt" + "net/http" + "testing" + + os "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" + "github.com/rackspace/gophercloud/pagination" + fake "github.com/rackspace/gophercloud/rackspace/networking/v2/common" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnets": [ + { + "name": "private-subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + }, + { + "name": "my_subnet", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.0.0.2", + "end": "192.255.255.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.0.0.1", + "cidr": "192.0.0.0/8", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + } + ] +} + `) + }) + + count := 0 + + List(fake.ServiceClient(), os.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractSubnets(page) + if err != nil { + t.Errorf("Failed to extract subnets: %v", err) + return false, nil + } + + expected := []os.Subnet{ + os.Subnet{ + Name: "private-subnet", + EnableDHCP: true, + NetworkID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + TenantID: "26a7980765d0414dbc1fc1f88cdb7e6e", + DNSNameservers: []string{}, + AllocationPools: []os.AllocationPool{ + os.AllocationPool{ + Start: "10.0.0.2", + End: "10.0.0.254", + }, + }, + HostRoutes: []os.HostRoute{}, + IPVersion: 4, + GatewayIP: "10.0.0.1", + CIDR: "10.0.0.0/24", + ID: "08eae331-0402-425a-923c-34f7cfe39c1b", + }, + os.Subnet{ + Name: "my_subnet", + EnableDHCP: true, + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + TenantID: "4fd44f30292945e481c7b8a0c8908869", + DNSNameservers: []string{}, + AllocationPools: []os.AllocationPool{ + os.AllocationPool{ + Start: "192.0.0.2", + End: "192.255.255.254", + }, + }, + HostRoutes: []os.HostRoute{}, + IPVersion: 4, + GatewayIP: "192.0.0.1", + CIDR: "192.0.0.0/8", + ID: "54d6f61d-db07-451c-9ab3-b9609b6b6f0b", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/subnets/54d6f61d-db07-451c-9ab3-b9609b6b6f0b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnet": { + "name": "my_subnet", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.0.0.2", + "end": "192.255.255.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.0.0.1", + "cidr": "192.0.0.0/8", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + } +} + `) + }) + + s, err := Get(fake.ServiceClient(), "54d6f61d-db07-451c-9ab3-b9609b6b6f0b").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_subnet") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.DNSNameservers, []string{}) + th.AssertDeepEquals(t, s.AllocationPools, []os.AllocationPool{ + os.AllocationPool{ + Start: "192.0.0.2", + End: "192.255.255.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []os.HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.0.0.1") + th.AssertEquals(t, s.CIDR, "192.0.0.0/8") + th.AssertEquals(t, s.ID, "54d6f61d-db07-451c-9ab3-b9609b6b6f0b") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet": { + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "ip_version": 4, + "cidr": "192.168.199.0/24", + "dns_nameservers": ["foo"], + "allocation_pools": [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ], + "host_routes": [{"destination":"","nexthop": "bar"}] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "subnet": { + "name": "", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.168.199.1", + "cidr": "192.168.199.0/24", + "id": "3b80198d-4f7b-4f77-9ef5-774d54e17126" + } +} + `) + }) + + opts := os.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 4, + CIDR: "192.168.199.0/24", + AllocationPools: []os.AllocationPool{ + os.AllocationPool{ + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }, + DNSNameservers: []string{"foo"}, + HostRoutes: []os.HostRoute{ + os.HostRoute{NextHop: "bar"}, + }, + } + s, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.DNSNameservers, []string{}) + th.AssertDeepEquals(t, s.AllocationPools, []os.AllocationPool{ + os.AllocationPool{ + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []os.HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.168.199.1") + th.AssertEquals(t, s.CIDR, "192.168.199.0/24") + th.AssertEquals(t, s.ID, "3b80198d-4f7b-4f77-9ef5-774d54e17126") +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), os.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + res = Create(fake.ServiceClient(), os.CreateOpts{NetworkID: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + res = Create(fake.ServiceClient(), os.CreateOpts{NetworkID: "foo", CIDR: "bar", IPVersion: 40}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet": { + "name": "my_new_subnet", + "dns_nameservers": ["foo"], + "host_routes": [{"destination":"","nexthop": "bar"}] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} + `) + }) + + opts := os.UpdateOpts{ + Name: "my_new_subnet", + DNSNameservers: []string{"foo"}, + HostRoutes: []os.HostRoute{ + os.HostRoute{NextHop: "bar"}, + }, + } + s, err := Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go new file mode 100644 index 000000000..94739308f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go @@ -0,0 +1,39 @@ +package accounts + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts" +) + +// Get is a function that retrieves an account's metadata. To extract just the +// custom metadata, call the ExtractMetadata method on the GetResult. To extract +// all the headers that are returned (including the metadata), call the +// ExtractHeader method on the GetResult. +func Get(c *gophercloud.ServiceClient) os.GetResult { + return os.Get(c, nil) +} + +// UpdateOpts is a structure that contains parameters for updating, creating, or +// deleting an account's metadata. +type UpdateOpts struct { + Metadata map[string]string + TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` +} + +// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. +func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { + headers, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + headers["X-Account-Meta-"+k] = v + } + return headers, err +} + +// Update will update an account's metadata with the Metadata in the UpdateOptsBuilder. +func Update(c *gophercloud.ServiceClient, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(c, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go new file mode 100644 index 000000000..a1ea98bb7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go @@ -0,0 +1,32 @@ +package accounts + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestUpdateAccounts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleUpdateAccountSuccessfully(t) + + options := &UpdateOpts{Metadata: map[string]string{"gophercloud-test": "accounts"}} + res := Update(fake.ServiceClient(), options) + th.CheckNoErr(t, res.Err) +} + +func TestGetAccounts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + os.HandleGetAccountSuccessfully(t) + + expected := map[string]string{"Foo": "bar"} + actual, err := Get(fake.ServiceClient()).ExtractMetadata() + th.CheckNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go new file mode 100644 index 000000000..293a93088 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go @@ -0,0 +1,3 @@ +// Package accounts provides information and interaction with the account +// API resource for the Rackspace Cloud Files service. +package accounts diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go new file mode 100644 index 000000000..9c89e22b2 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go @@ -0,0 +1,3 @@ +// Package bulk provides functionality for working with bulk operations in the +// Rackspace Cloud Files service. +package bulk diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go new file mode 100644 index 000000000..0aeec155b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go @@ -0,0 +1,51 @@ +package bulk + +import ( + "net/url" + "strings" + + "github.com/rackspace/gophercloud" +) + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToBulkDeleteBody() (string, error) +} + +// DeleteOpts is a structure that holds parameters for deleting an object. +type DeleteOpts []string + +// ToBulkDeleteBody formats a DeleteOpts into a request body. +func (opts DeleteOpts) ToBulkDeleteBody() (string, error) { + return url.QueryEscape(strings.Join(opts, "\n")), nil +} + +// Delete will delete objects or containers in bulk. +func Delete(c *gophercloud.ServiceClient, opts DeleteOptsBuilder) DeleteResult { + var res DeleteResult + + if opts == nil { + return res + } + + reqString, err := opts.ToBulkDeleteBody() + if err != nil { + res.Err = err + return res + } + + reqBody := strings.NewReader(reqString) + + resp, err := c.Request("DELETE", deleteURL(c), gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"Content-Type": "text/plain"}, + OkCodes: []int{200}, + JSONBody: reqBody, + JSONResponse: &res.Body, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go new file mode 100644 index 000000000..8b5578e91 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go @@ -0,0 +1,36 @@ +package bulk + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestBulkDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.AssertEquals(t, r.URL.RawQuery, "bulk-delete") + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "Number Not Found": 1, + "Response Status": "200 OK", + "Errors": [], + "Number Deleted": 1, + "Response Body": "" + } + `) + }) + + options := DeleteOpts{"gophercloud-testcontainer1", "gophercloud-testcontainer2"} + actual, err := Delete(fake.ServiceClient(), options).ExtractBody() + th.AssertNoErr(t, err) + th.AssertEquals(t, actual.NumberDeleted, 1) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go new file mode 100644 index 000000000..fddc125ac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go @@ -0,0 +1,28 @@ +package bulk + +import ( + "github.com/rackspace/gophercloud" + + "github.com/mitchellh/mapstructure" +) + +// DeleteResult represents the result of a bulk delete operation. +type DeleteResult struct { + gophercloud.Result +} + +// DeleteRespBody is the form of the response body returned by a bulk delete request. +type DeleteRespBody struct { + NumberNotFound int `mapstructure:"Number Not Found"` + ResponseStatus string `mapstructure:"Response Status"` + Errors []string `mapstructure:"Errors"` + NumberDeleted int `mapstructure:"Number Deleted"` + ResponseBody string `mapstructure:"Response Body"` +} + +// ExtractBody will extract the body returned by the bulk extract request. +func (dr DeleteResult) ExtractBody() (DeleteRespBody, error) { + var resp DeleteRespBody + err := mapstructure.Decode(dr.Body, &resp) + return resp, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go new file mode 100644 index 000000000..2e112033b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go @@ -0,0 +1,11 @@ +package bulk + +import "github.com/rackspace/gophercloud" + +func deleteURL(c *gophercloud.ServiceClient) string { + return c.Endpoint + "?bulk-delete" +} + +func extractURL(c *gophercloud.ServiceClient, ext string) string { + return c.Endpoint + "?extract-archive=" + ext +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go new file mode 100644 index 000000000..9169e52f1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go @@ -0,0 +1,26 @@ +package bulk + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestDeleteURL(t *testing.T) { + actual := deleteURL(endpointClient()) + expected := endpoint + "?bulk-delete" + th.CheckEquals(t, expected, actual) +} + +func TestExtractURL(t *testing.T) { + actual := extractURL(endpointClient(), "tar") + expected := endpoint + "?extract-archive=tar" + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go new file mode 100644 index 000000000..89adb8396 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go @@ -0,0 +1,38 @@ +package cdncontainers + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtractNames interprets a page of List results when just the container +// names are requested. +func ExtractNames(page pagination.Page) ([]string, error) { + return os.ExtractNames(page) +} + +// ListOpts are options for listing Rackspace CDN containers. +type ListOpts struct { + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Limit int `q:"limit"` + Marker string `q:"marker"` +} + +// ToContainerListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each container. +func (opts ListOpts) ToContainerListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return false, "", err + } + return false, q.String(), nil +} + +// List is a function that retrieves containers associated with the account as +// well as account metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go new file mode 100644 index 000000000..02c3c5e15 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go @@ -0,0 +1,50 @@ +package cdncontainers + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListCDNContainers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListContainerNamesSuccessfully(t) + + count := 0 + err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNames(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetCDNContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetContainerSuccessfully(t) + + _, err := Get(fake.ServiceClient(), "testContainer").ExtractMetadata() + th.CheckNoErr(t, err) + +} + +func TestUpdateCDNContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleUpdateContainerSuccessfully(t) + + options := &UpdateOpts{TTL: 3600} + res := Update(fake.ServiceClient(), "testContainer", options) + th.CheckNoErr(t, res.Err) + +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go new file mode 100644 index 000000000..7b0930eee --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go @@ -0,0 +1,3 @@ +// Package cdncontainers provides information and interaction with the CDN +// Container API resource for the Rackspace Cloud Files service. +package cdncontainers diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go new file mode 100644 index 000000000..6acebb0aa --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go @@ -0,0 +1,161 @@ +package cdncontainers + +import ( + "strconv" + + "github.com/rackspace/gophercloud" +) + +// EnableOptsBuilder allows extensions to add additional parameters to the Enable +// request. +type EnableOptsBuilder interface { + ToCDNContainerEnableMap() (map[string]string, error) +} + +// EnableOpts is a structure that holds options for enabling a CDN container. +type EnableOpts struct { + // CDNEnabled indicates whether or not the container is CDN enabled. Set to + // `true` to enable the container. Note that changing this setting from true + // to false will disable the container in the CDN but only after the TTL has + // expired. + CDNEnabled bool `h:"X-Cdn-Enabled"` + // TTL is the time-to-live for the container (in seconds). + TTL int `h:"X-Ttl"` +} + +// ToCDNContainerEnableMap formats an EnableOpts into a map of headers. +func (opts EnableOpts) ToCDNContainerEnableMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + return h, nil +} + +// Enable is a function that enables/disables a CDN container. +func Enable(c *gophercloud.ServiceClient, containerName string, opts EnableOptsBuilder) EnableResult { + var res EnableResult + h := c.AuthenticatedHeaders() + + if opts != nil { + headers, err := opts.ToCDNContainerEnableMap() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("PUT", enableURL(c, containerName), gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// Get is a function that retrieves the metadata of a container. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName string) GetResult { + var res GetResult + resp, err := c.Request("HEAD", getURL(c, containerName), gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} + +// State is the state of an option. It is a pointer to a boolean to enable checking for +// a zero-value of nil instead of false, which is a valid option. +type State *bool + +var ( + iTrue = true + iFalse = false + + // Enabled is used for a true value for options in request bodies. + Enabled State = &iTrue + // Disabled is used for a false value for options in request bodies. + Disabled State = &iFalse +) + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToContainerUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting a container's metadata. +type UpdateOpts struct { + // Whether or not to CDN-enable a container. Prefer using XCDNEnabled, which + // is of type *bool underneath. + // TODO v2.0: change type to Enabled/Disabled (*bool) + CDNEnabled bool `h:"X-Cdn-Enabled"` + // Whether or not to enable log retention. Prefer using XLogRetention, which + // is of type *bool underneath. + // TODO v2.0: change type to Enabled/Disabled (*bool) + LogRetention bool `h:"X-Log-Retention"` + XCDNEnabled *bool + XLogRetention *bool + TTL int `h:"X-Ttl"` +} + +// ToContainerUpdateMap formats a CreateOpts into a map of headers. +func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + h["X-Cdn-Enabled"] = strconv.FormatBool(opts.CDNEnabled) + h["X-Log-Retention"] = strconv.FormatBool(opts.LogRetention) + + if opts.XCDNEnabled != nil { + h["X-Cdn-Enabled"] = strconv.FormatBool(*opts.XCDNEnabled) + } + + if opts.XLogRetention != nil { + h["X-Log-Retention"] = strconv.FormatBool(*opts.XLogRetention) + } + + return h, nil +} + +// Update is a function that creates, updates, or deletes a container's +// metadata. +func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) UpdateResult { + var res UpdateResult + h := c.AuthenticatedHeaders() + + if opts != nil { + headers, err := opts.ToContainerUpdateMap() + if err != nil { + res.Err = err + return res + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("POST", updateURL(c, containerName), gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{202, 204}, + }) + if resp != nil { + res.Header = resp.Header + } + res.Err = err + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go new file mode 100644 index 000000000..28b963dac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go @@ -0,0 +1,29 @@ +package cdncontainers + +import ( + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestEnableCDNContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Add("X-Ttl", "259200") + w.Header().Add("X-Cdn-Enabled", "True") + w.WriteHeader(http.StatusNoContent) + }) + + options := &EnableOpts{CDNEnabled: true, TTL: 259200} + actual := Enable(fake.ServiceClient(), "testContainer", options) + th.AssertNoErr(t, actual.Err) + th.CheckEquals(t, actual.Header["X-Ttl"][0], "259200") + th.CheckEquals(t, actual.Header["X-Cdn-Enabled"][0], "True") +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go new file mode 100644 index 000000000..cb0ad3096 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go @@ -0,0 +1,149 @@ +package cdncontainers + +import ( + "strings" + "time" + + "github.com/rackspace/gophercloud" +) + +// EnableHeader represents the headers returned in the response from an Enable request. +type EnableHeader struct { + CDNIosURI string `mapstructure:"X-Cdn-Ios-Uri"` + CDNSslURI string `mapstructure:"X-Cdn-Ssl-Uri"` + CDNStreamingURI string `mapstructure:"X-Cdn-Streaming-Uri"` + CDNUri string `mapstructure:"X-Cdn-Uri"` + ContentLength int `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// EnableResult represents the result of an Enable operation. +type EnableResult struct { + gophercloud.HeaderResult +} + +// Extract will return extract an EnableHeader from the response to an Enable +// request. To obtain a map of headers, call the ExtractHeader method on the EnableResult. +func (er EnableResult) Extract() (EnableHeader, error) { + var eh EnableHeader + if er.Err != nil { + return eh, er.Err + } + + if err := gophercloud.DecodeHeader(er.Header, &eh); err != nil { + return eh, err + } + + if date, ok := er.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, er.Header["Date"][0]) + if err != nil { + return eh, err + } + eh.Date = t + } + + return eh, nil +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + CDNEnabled bool `mapstructure:"X-Cdn-Enabled"` + CDNIosURI string `mapstructure:"X-Cdn-Ios-Uri"` + CDNSslURI string `mapstructure:"X-Cdn-Ssl-Uri"` + CDNStreamingURI string `mapstructure:"X-Cdn-Streaming-Uri"` + CDNUri string `mapstructure:"X-Cdn-Uri"` + ContentLength int `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + LogRetention bool `mapstructure:"X-Log-Retention"` + TransID string `mapstructure:"X-Trans-Id"` + TTL int `mapstructure:"X-Ttl"` +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. To obtain +// a map of headers, call the ExtractHeader method on the GetResult. +func (gr GetResult) Extract() (GetHeader, error) { + var gh GetHeader + if gr.Err != nil { + return gh, gr.Err + } + + if err := gophercloud.DecodeHeader(gr.Header, &gh); err != nil { + return gh, err + } + + if date, ok := gr.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, gr.Header["Date"][0]) + if err != nil { + return gh, err + } + gh.Date = t + } + + return gh, nil +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the container. +func (gr GetResult) ExtractMetadata() (map[string]string, error) { + if gr.Err != nil { + return nil, gr.Err + } + metadata := make(map[string]string) + for k, v := range gr.Header { + if strings.HasPrefix(k, "X-Container-Meta-") { + key := strings.TrimPrefix(k, "X-Container-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// UpdateHeader represents the headers returned in the response from a Update request. +type UpdateHeader struct { + CDNIosURI string `mapstructure:"X-Cdn-Ios-Uri"` + CDNSslURI string `mapstructure:"X-Cdn-Ssl-Uri"` + CDNStreamingURI string `mapstructure:"X-Cdn-Streaming-Uri"` + CDNUri string `mapstructure:"X-Cdn-Uri"` + ContentLength int `mapstructure:"Content-Length"` + ContentType string `mapstructure:"Content-Type"` + Date time.Time `mapstructure:"-"` + TransID string `mapstructure:"X-Trans-Id"` +} + +// UpdateResult represents the result of an update operation. To extract the +// the headers from the HTTP response, you can invoke the 'ExtractHeader' +// method on the result struct. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. To obtain +// a map of headers, call the ExtractHeader method on the UpdateResult. +func (ur UpdateResult) Extract() (UpdateHeader, error) { + var uh UpdateHeader + if ur.Err != nil { + return uh, ur.Err + } + + if err := gophercloud.DecodeHeader(ur.Header, &uh); err != nil { + return uh, err + } + + if date, ok := ur.Header["Date"]; ok && len(date) > 0 { + t, err := time.Parse(time.RFC1123, ur.Header["Date"][0]) + if err != nil { + return uh, err + } + uh.Date = t + } + + return uh, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go new file mode 100644 index 000000000..541249a92 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go @@ -0,0 +1,15 @@ +package cdncontainers + +import "github.com/rackspace/gophercloud" + +func enableURL(c *gophercloud.ServiceClient, containerName string) string { + return c.ServiceURL(containerName) +} + +func getURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func updateURL(c *gophercloud.ServiceClient, container string) string { + return getURL(c, container) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go new file mode 100644 index 000000000..aa5bfe68b --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go @@ -0,0 +1,20 @@ +package cdncontainers + +import ( + "testing" + + "github.com/rackspace/gophercloud" + th "github.com/rackspace/gophercloud/testhelper" +) + +const endpoint = "http://localhost:57909/" + +func endpointClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{Endpoint: endpoint} +} + +func TestEnableURL(t *testing.T) { + actual := enableURL(endpointClient(), "foo") + expected := endpoint + "foo" + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go new file mode 100644 index 000000000..e9d2ff1d6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go @@ -0,0 +1,11 @@ +package cdnobjects + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects" +) + +// Delete is a function that deletes an object from the CDN. +func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts os.DeleteOptsBuilder) os.DeleteResult { + return os.Delete(c, containerName, objectName, nil) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go new file mode 100644 index 000000000..b5e04a98c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go @@ -0,0 +1,19 @@ +package cdnobjects + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestDeleteCDNObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDeleteObjectSuccessfully(t) + + res := Delete(fake.ServiceClient(), "testContainer", "testObject", nil) + th.AssertNoErr(t, res.Err) + +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go new file mode 100644 index 000000000..90cd5c97f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go @@ -0,0 +1,3 @@ +// Package cdnobjects provides information and interaction with the CDN +// Object API resource for the Rackspace Cloud Files service. +package cdnobjects diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/request.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/request.go new file mode 100644 index 000000000..540e0cd29 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/request.go @@ -0,0 +1,15 @@ +package cdnobjects + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers" +) + +// CDNURL returns the unique CDN URI for the given container and object. +func CDNURL(c *gophercloud.ServiceClient, containerName, objectName string) (string, error) { + h, err := cdncontainers.Get(c, containerName).Extract() + if err != nil { + return "", err + } + return h.CDNUri + "/" + objectName, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go new file mode 100644 index 000000000..77ed00257 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go @@ -0,0 +1,93 @@ +package containers + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtractInfo interprets a page of List results when full container info +// is requested. +func ExtractInfo(page pagination.Page) ([]os.Container, error) { + return os.ExtractInfo(page) +} + +// ExtractNames interprets a page of List results when just the container +// names are requested. +func ExtractNames(page pagination.Page) ([]string, error) { + return os.ExtractNames(page) +} + +// List is a function that retrieves containers associated with the account as +// well as account metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, opts) +} + +// CreateOpts is a structure that holds parameters for creating a container. +type CreateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerWrite string `h:"X-Container-Write"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerCreateMap formats a CreateOpts into a map of headers. +func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Create is a function that creates a new container. +func Create(c *gophercloud.ServiceClient, containerName string, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(c, containerName, opts) +} + +// Delete is a function that deletes a container. +func Delete(c *gophercloud.ServiceClient, containerName string) os.DeleteResult { + return os.Delete(c, containerName) +} + +// UpdateOpts is a structure that holds parameters for updating or creating a +// container's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerUpdateMap formats a CreateOpts into a map of headers. +func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes a container's +// metadata. +func Update(c *gophercloud.ServiceClient, containerName string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(c, containerName, opts) +} + +// Get is a function that retrieves the metadata of a container. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName string) os.GetResult { + return os.Get(c, containerName) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go new file mode 100644 index 000000000..7ba4eb21c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go @@ -0,0 +1,91 @@ +package containers + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListContainerInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListContainerInfoSuccessfully(t) + + count := 0 + err := List(fake.ServiceClient(), &os.ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractInfo(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ExpectedListInfo, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListContainerNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListContainerNamesSuccessfully(t) + + count := 0 + err := List(fake.ServiceClient(), &os.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNames(page) + if err != nil { + t.Errorf("Failed to extract container names: %v", err) + return false, err + } + + th.CheckDeepEquals(t, os.ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateContainers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreateContainerSuccessfully(t) + + options := os.CreateOpts{ContentType: "application/json", Metadata: map[string]string{"foo": "bar"}} + res := Create(fake.ServiceClient(), "testContainer", options) + th.CheckNoErr(t, res.Err) + th.CheckEquals(t, "bar", res.Header["X-Container-Meta-Foo"][0]) + +} + +func TestDeleteContainers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDeleteContainerSuccessfully(t) + + res := Delete(fake.ServiceClient(), "testContainer") + th.CheckNoErr(t, res.Err) +} + +func TestUpdateContainers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleUpdateContainerSuccessfully(t) + + options := &os.UpdateOpts{Metadata: map[string]string{"foo": "bar"}} + res := Update(fake.ServiceClient(), "testContainer", options) + th.CheckNoErr(t, res.Err) +} + +func TestGetContainers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetContainerSuccessfully(t) + + _, err := Get(fake.ServiceClient(), "testContainer").ExtractMetadata() + th.CheckNoErr(t, err) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go new file mode 100644 index 000000000..d132a0738 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go @@ -0,0 +1,3 @@ +// Package containers provides information and interaction with the Container +// API resource for the Rackspace Cloud Files service. +package containers diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go new file mode 100644 index 000000000..94c820ba7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go @@ -0,0 +1,94 @@ +package objects + +import ( + "io" + + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects" + "github.com/rackspace/gophercloud/pagination" +) + +// ExtractInfo is a function that takes a page of objects and returns their full information. +func ExtractInfo(page pagination.Page) ([]os.Object, error) { + return os.ExtractInfo(page) +} + +// ExtractNames is a function that takes a page of objects and returns only their names. +func ExtractNames(page pagination.Page) ([]string, error) { + return os.ExtractNames(page) +} + +// List is a function that retrieves objects in the container as +// well as container metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, containerName string, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, containerName, opts) +} + +// Download is a function that retrieves the content and metadata for an object. +// To extract just the content, pass the DownloadResult response to the +// ExtractContent function. +func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts os.DownloadOptsBuilder) os.DownloadResult { + return os.Download(c, containerName, objectName, opts) +} + +// Create is a function that creates a new object or replaces an existing object. +func Create(c *gophercloud.ServiceClient, containerName, objectName string, content io.ReadSeeker, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(c, containerName, objectName, content, opts) +} + +// CopyOpts is a structure that holds parameters for copying one object to +// another. +type CopyOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentLength int `h:"Content-Length"` + ContentType string `h:"Content-Type"` + CopyFrom string `h:"X-Copy_From"` + Destination string `h:"Destination"` + DetectContentType bool `h:"X-Detect-Content-Type"` +} + +// ToObjectCopyMap formats a CopyOpts into a map of headers. +func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + // `Content-Length` is required and a value of "0" is acceptable, but calling `gophercloud.BuildHeaders` + // will remove the `Content-Length` header if it's set to 0 (or equivalently not set). This will add + // the header if it's not already set. + if _, ok := h["Content-Length"]; !ok { + h["Content-Length"] = "0" + } + return h, nil +} + +// Copy is a function that copies one object to another. +func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts os.CopyOptsBuilder) os.CopyResult { + return os.Copy(c, containerName, objectName, opts) +} + +// Delete is a function that deletes an object. +func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts os.DeleteOptsBuilder) os.DeleteResult { + return os.Delete(c, containerName, objectName, opts) +} + +// Get is a function that retrieves the metadata of an object. To extract just the custom +// metadata, pass the GetResult response to the ExtractMetadata function. +func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts os.GetOptsBuilder) os.GetResult { + return os.Get(c, containerName, objectName, opts) +} + +// Update is a function that creates, updates, or deletes an object's metadata. +func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(c, containerName, objectName, opts) +} + +func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts os.CreateTempURLOpts) (string, error) { + return os.CreateTempURL(c, containerName, objectName, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go new file mode 100644 index 000000000..21cd4179a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go @@ -0,0 +1,127 @@ +package objects + +import ( + "strings" + "testing" + + os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestDownloadObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDownloadObjectSuccessfully(t) + + content, err := Download(fake.ServiceClient(), "testContainer", "testObject", nil).ExtractContent() + th.AssertNoErr(t, err) + th.CheckEquals(t, string(content), "Successful download with Gophercloud") +} + +func TestListObjectsInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListObjectsInfoSuccessfully(t) + + count := 0 + options := &os.ListOpts{Full: true} + err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractInfo(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ExpectedListInfo, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListObjectNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListObjectNamesSuccessfully(t) + + count := 0 + options := &os.ListOpts{Full: false} + err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNames(page) + if err != nil { + t.Errorf("Failed to extract container names: %v", err) + return false, err + } + + th.CheckDeepEquals(t, os.ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + content := "Did gyre and gimble in the wabe" + os.HandleCreateTextObjectSuccessfully(t, content) + + options := &os.CreateOpts{ContentType: "text/plain"} + res := Create(fake.ServiceClient(), "testContainer", "testObject", strings.NewReader(content), options) + th.AssertNoErr(t, res.Err) +} + +func TestCreateObjectWithoutContentType(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + content := "The sky was the color of television, tuned to a dead channel." + os.HandleCreateTypelessObjectSuccessfully(t, content) + + res := Create(fake.ServiceClient(), "testContainer", "testObject", strings.NewReader(content), &os.CreateOpts{}) + th.AssertNoErr(t, res.Err) +} + +func TestCopyObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCopyObjectSuccessfully(t) + + options := &CopyOpts{Destination: "/newTestContainer/newTestObject"} + res := Copy(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestDeleteObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDeleteObjectSuccessfully(t) + + res := Delete(fake.ServiceClient(), "testContainer", "testObject", nil) + th.AssertNoErr(t, res.Err) +} + +func TestUpdateObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleUpdateObjectSuccessfully(t) + + options := &os.UpdateOpts{Metadata: map[string]string{"Gophercloud-Test": "objects"}} + res := Update(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestGetObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetObjectSuccessfully(t) + + expected := map[string]string{"Gophercloud-Test": "objects"} + actual, err := Get(fake.ServiceClient(), "testContainer", "testObject", nil).ExtractMetadata() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go new file mode 100644 index 000000000..781984bee --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go @@ -0,0 +1,3 @@ +// Package objects provides information and interaction with the Object +// API resource for the Rackspace Cloud Files service. +package objects diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate.go new file mode 100644 index 000000000..c834e5c7d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate.go @@ -0,0 +1,11 @@ +package buildinfo + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo" +) + +// Get retreives build info data for the Heat deployment. +func Get(c *gophercloud.ServiceClient) os.GetResult { + return os.Get(c) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate_test.go new file mode 100644 index 000000000..b25a690c8 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/delegate_test.go @@ -0,0 +1,21 @@ +package buildinfo + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestGetTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetSuccessfully(t, os.GetOutput) + + actual, err := Get(fake.ServiceClient()).Extract() + th.AssertNoErr(t, err) + + expected := os.GetExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/doc.go new file mode 100644 index 000000000..183e8dfa7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/buildinfo/doc.go @@ -0,0 +1,2 @@ +// Package buildinfo provides build information about heat deployments. +package buildinfo diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate.go new file mode 100644 index 000000000..08675deac --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate.go @@ -0,0 +1,27 @@ +package stackevents + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents" + "github.com/rackspace/gophercloud/pagination" +) + +// Find retreives stack events for the given stack name. +func Find(c *gophercloud.ServiceClient, stackName string) os.FindResult { + return os.Find(c, stackName) +} + +// List makes a request against the API to list resources for the given stack. +func List(c *gophercloud.ServiceClient, stackName, stackID string, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, stackName, stackID, opts) +} + +// ListResourceEvents makes a request against the API to list resources for the given stack. +func ListResourceEvents(c *gophercloud.ServiceClient, stackName, stackID, resourceName string, opts os.ListResourceEventsOptsBuilder) pagination.Pager { + return os.ListResourceEvents(c, stackName, stackID, resourceName, opts) +} + +// Get retreives data for the given stack resource. +func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) os.GetResult { + return os.Get(c, stackName, stackID, resourceName, eventID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate_test.go new file mode 100644 index 000000000..e1c0bc8db --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate_test.go @@ -0,0 +1,72 @@ +package stackevents + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestFindEvents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleFindSuccessfully(t, os.FindOutput) + + actual, err := Find(fake.ServiceClient(), "postman_stack").Extract() + th.AssertNoErr(t, err) + + expected := os.FindExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListSuccessfully(t, os.ListOutput) + + count := 0 + err := List(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractEvents(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListResourceEvents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListResourceEventsSuccessfully(t, os.ListResourceEventsOutput) + + count := 0 + err := ListResourceEvents(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", "my_resource", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractEvents(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ListResourceEventsExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetEvent(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetSuccessfully(t, os.GetOutput) + + actual, err := Get(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", "my_resource", "93940999-7d40-44ae-8de4-19624e7b8d18").Extract() + th.AssertNoErr(t, err) + + expected := os.GetExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/doc.go new file mode 100644 index 000000000..dfd6ef660 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/doc.go @@ -0,0 +1,3 @@ +// Package stackevents provides operations for finding, listing, and retrieving +// stack events. +package stackevents diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate.go new file mode 100644 index 000000000..cb7be28b7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate.go @@ -0,0 +1,42 @@ +package stackresources + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources" + "github.com/rackspace/gophercloud/pagination" +) + +// Find retreives stack resources for the given stack name. +func Find(c *gophercloud.ServiceClient, stackName string) os.FindResult { + return os.Find(c, stackName) +} + +// List makes a request against the API to list resources for the given stack. +func List(c *gophercloud.ServiceClient, stackName, stackID string, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, stackName, stackID, opts) +} + +// Get retreives data for the given stack resource. +func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) os.GetResult { + return os.Get(c, stackName, stackID, resourceName) +} + +// Metadata retreives the metadata for the given stack resource. +func Metadata(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) os.MetadataResult { + return os.Metadata(c, stackName, stackID, resourceName) +} + +// ListTypes makes a request against the API to list resource types. +func ListTypes(c *gophercloud.ServiceClient) pagination.Pager { + return os.ListTypes(c) +} + +// Schema retreives the schema for the given resource type. +func Schema(c *gophercloud.ServiceClient, resourceType string) os.SchemaResult { + return os.Schema(c, resourceType) +} + +// Template retreives the template representation for the given resource type. +func Template(c *gophercloud.ServiceClient, resourceType string) os.TemplateResult { + return os.Template(c, resourceType) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate_test.go new file mode 100644 index 000000000..116e44ce7 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/delegate_test.go @@ -0,0 +1,108 @@ +package stackresources + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestFindResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleFindSuccessfully(t, os.FindOutput) + + actual, err := Find(fake.ServiceClient(), "hello_world").Extract() + th.AssertNoErr(t, err) + + expected := os.FindExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListSuccessfully(t, os.ListOutput) + + count := 0 + err := List(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractResources(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetResource(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetSuccessfully(t, os.GetOutput) + + actual, err := Get(fake.ServiceClient(), "teststack", "0b1771bd-9336-4f2b-ae86-a80f971faf1e", "wordpress_instance").Extract() + th.AssertNoErr(t, err) + + expected := os.GetExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestResourceMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleMetadataSuccessfully(t, os.MetadataOutput) + + actual, err := Metadata(fake.ServiceClient(), "teststack", "0b1771bd-9336-4f2b-ae86-a80f971faf1e", "wordpress_instance").Extract() + th.AssertNoErr(t, err) + + expected := os.MetadataExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListResourceTypes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListTypesSuccessfully(t, os.ListTypesOutput) + + count := 0 + err := ListTypes(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractResourceTypes(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ListTypesExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGetResourceSchema(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetSchemaSuccessfully(t, os.GetSchemaOutput) + + actual, err := Schema(fake.ServiceClient(), "OS::Heat::AResourceName").Extract() + th.AssertNoErr(t, err) + + expected := os.GetSchemaExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestGetResourceTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetTemplateSuccessfully(t, os.GetTemplateOutput) + + actual, err := Template(fake.ServiceClient(), "OS::Heat::AResourceName").Extract() + th.AssertNoErr(t, err) + + expected := os.GetTemplateExpected + th.AssertDeepEquals(t, expected, string(actual)) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/doc.go new file mode 100644 index 000000000..e4f8b08dc --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackresources/doc.go @@ -0,0 +1,5 @@ +// Package stackresources provides operations for working with stack resources. +// A resource is a template artifact that represents some component of your +// desired architecture (a Cloud Server, a group of scaled Cloud Servers, a load +// balancer, some configuration management system, and so forth). +package stackresources diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate.go new file mode 100644 index 000000000..f7e387f8f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate.go @@ -0,0 +1,49 @@ +package stacks + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" +) + +// Create accepts an os.CreateOpts struct and creates a new stack using the values +// provided. +func Create(c *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { + return os.Create(c, opts) +} + +// Adopt accepts an os.AdoptOpts struct and creates a new stack from existing stack +// resources using the values provided. +func Adopt(c *gophercloud.ServiceClient, opts os.AdoptOptsBuilder) os.AdoptResult { + return os.Adopt(c, opts) +} + +// List accepts an os.ListOpts struct and lists stacks based on the options provided. +func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { + return os.List(c, opts) +} + +// Get retreives a stack based on the stack name and stack ID. +func Get(c *gophercloud.ServiceClient, stackName, stackID string) os.GetResult { + return os.Get(c, stackName, stackID) +} + +// Update accepts an os.UpdateOpts struct and updates a stack based on the options provided. +func Update(c *gophercloud.ServiceClient, stackName, stackID string, opts os.UpdateOptsBuilder) os.UpdateResult { + return os.Update(c, stackName, stackID, opts) +} + +// Delete deletes a stack based on the stack name and stack ID provided. +func Delete(c *gophercloud.ServiceClient, stackName, stackID string) os.DeleteResult { + return os.Delete(c, stackName, stackID) +} + +// Preview provides a preview of a stack based on the options provided. +func Preview(c *gophercloud.ServiceClient, opts os.PreviewOptsBuilder) os.PreviewResult { + return os.Preview(c, opts) +} + +// Abandon abandons a stack, keeping the resources available. +func Abandon(c *gophercloud.ServiceClient, stackName, stackID string) os.AbandonResult { + return os.Abandon(c, stackName, stackID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate_test.go new file mode 100644 index 000000000..553ae9404 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/delegate_test.go @@ -0,0 +1,870 @@ +package stacks + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestCreateStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreateSuccessfully(t, CreateOutput) + + createOpts := os.CreateOpts{ + Name: "stackcreated", + Timeout: 60, + Template: `{ + "outputs": { + "db_host": { + "value": { + "get_attr": [ + "db", + "hostname" + ] + } + } + }, + "heat_template_version": "2014-10-16", + "description": "HEAT template for creating a Cloud Database.\n", + "parameters": { + "db_name": { + "default": "wordpress", + "type": "string", + "description": "the name for the database", + "constraints": [ + { + "length": { + "max": 64, + "min": 1 + }, + "description": "must be between 1 and 64 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_instance_name": { + "default": "Cloud_DB", + "type": "string", + "description": "the database instance name" + }, + "db_username": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account username", + "constraints": [ + { + "length": { + "max": 16, + "min": 1 + }, + "description": "must be between 1 and 16 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_volume_size": { + "default": 30, + "type": "number", + "description": "database volume size (in GB)", + "constraints": [ + { + "range": { + "max": 1024, + "min": 1 + }, + "description": "must be between 1 and 1024 GB" + } + ] + }, + "db_flavor": { + "default": "1GB Instance", + "type": "string", + "description": "database instance size", + "constraints": [ + { + "description": "must be a valid cloud database flavor", + "allowed_values": [ + "1GB Instance", + "2GB Instance", + "4GB Instance", + "8GB Instance", + "16GB Instance" + ] + } + ] + }, + "db_password": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account password", + "constraints": [ + { + "length": { + "max": 41, + "min": 1 + }, + "description": "must be between 1 and 14 characters" + }, + { + "allowed_pattern": "[a-zA-Z0-9]*", + "description": "must contain only alphanumeric characters." + } + ] + } + }, + "resources": { + "db": { + "type": "OS::Trove::Instance", + "properties": { + "flavor": { + "get_param": "db_flavor" + }, + "size": { + "get_param": "db_volume_size" + }, + "users": [ + { + "password": { + "get_param": "db_password" + }, + "name": { + "get_param": "db_username" + }, + "databases": [ + { + "get_param": "db_name" + } + ] + } + ], + "name": { + "get_param": "db_instance_name" + }, + "databases": [ + { + "name": { + "get_param": "db_name" + } + } + ] + } + } + } + }`, + DisableRollback: os.Disable, + } + actual, err := Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestCreateStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreateSuccessfully(t, CreateOutput) + + createOpts := os.CreateOpts{ + Name: "stackcreated", + Timeout: 60, + TemplateOpts: new(os.Template), + DisableRollback: os.Disable, + } + createOpts.TemplateOpts.Bin = []byte(`{ + "outputs": { + "db_host": { + "value": { + "get_attr": [ + "db", + "hostname" + ] + } + } + }, + "heat_template_version": "2014-10-16", + "description": "HEAT template for creating a Cloud Database.\n", + "parameters": { + "db_name": { + "default": "wordpress", + "type": "string", + "description": "the name for the database", + "constraints": [ + { + "length": { + "max": 64, + "min": 1 + }, + "description": "must be between 1 and 64 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_instance_name": { + "default": "Cloud_DB", + "type": "string", + "description": "the database instance name" + }, + "db_username": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account username", + "constraints": [ + { + "length": { + "max": 16, + "min": 1 + }, + "description": "must be between 1 and 16 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_volume_size": { + "default": 30, + "type": "number", + "description": "database volume size (in GB)", + "constraints": [ + { + "range": { + "max": 1024, + "min": 1 + }, + "description": "must be between 1 and 1024 GB" + } + ] + }, + "db_flavor": { + "default": "1GB Instance", + "type": "string", + "description": "database instance size", + "constraints": [ + { + "description": "must be a valid cloud database flavor", + "allowed_values": [ + "1GB Instance", + "2GB Instance", + "4GB Instance", + "8GB Instance", + "16GB Instance" + ] + } + ] + }, + "db_password": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account password", + "constraints": [ + { + "length": { + "max": 41, + "min": 1 + }, + "description": "must be between 1 and 14 characters" + }, + { + "allowed_pattern": "[a-zA-Z0-9]*", + "description": "must contain only alphanumeric characters." + } + ] + } + }, + "resources": { + "db": { + "type": "OS::Trove::Instance", + "properties": { + "flavor": { + "get_param": "db_flavor" + }, + "size": { + "get_param": "db_volume_size" + }, + "users": [ + { + "password": { + "get_param": "db_password" + }, + "name": { + "get_param": "db_username" + }, + "databases": [ + { + "get_param": "db_name" + } + ] + } + ], + "name": { + "get_param": "db_instance_name" + }, + "databases": [ + { + "name": { + "get_param": "db_name" + } + } + ] + } + } + } + }`) + actual, err := Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAdoptStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreateSuccessfully(t, CreateOutput) + + adoptOpts := os.AdoptOpts{ + AdoptStackData: `{\"environment\":{\"parameters\":{}}, \"status\":\"COMPLETE\",\"name\": \"trovestack\",\n \"template\": {\n \"outputs\": {\n \"db_host\": {\n \"value\": {\n \"get_attr\": [\n \"db\",\n \"hostname\"\n ]\n }\n }\n },\n \"heat_template_version\": \"2014-10-16\",\n \"description\": \"HEAT template for creating a Cloud Database.\\n\",\n \"parameters\": {\n \"db_instance_name\": {\n \"default\": \"Cloud_DB\",\n \"type\": \"string\",\n \"description\": \"the database instance name\"\n },\n \"db_flavor\": {\n \"default\": \"1GB Instance\",\n \"type\": \"string\",\n \"description\": \"database instance size\",\n \"constraints\": [\n {\n \"description\": \"must be a valid cloud database flavor\",\n \"allowed_values\": [\n \"1GB Instance\",\n \"2GB Instance\",\n \"4GB Instance\",\n \"8GB Instance\",\n \"16GB Instance\"\n ]\n }\n ]\n },\n \"db_password\": {\n \"default\": \"admin\",\n \"hidden\": true,\n \"type\": \"string\",\n \"description\": \"database admin account password\",\n \"constraints\": [\n {\n \"length\": {\n \"max\": 41,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 14 characters\"\n },\n {\n \"allowed_pattern\": \"[a-zA-Z0-9]*\",\n \"description\": \"must contain only alphanumeric characters.\"\n }\n ]\n },\n \"db_name\": {\n \"default\": \"wordpress\",\n \"type\": \"string\",\n \"description\": \"the name for the database\",\n \"constraints\": [\n {\n \"length\": {\n \"max\": 64,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 64 characters\"\n },\n {\n \"allowed_pattern\": \"[a-zA-Z][a-zA-Z0-9]*\",\n \"description\": \"must begin with a letter and contain only alphanumeric characters.\"\n }\n ]\n },\n \"db_username\": {\n \"default\": \"admin\",\n \"hidden\": true,\n \"type\": \"string\",\n \"description\": \"database admin account username\",\n \"constraints\": [\n {\n \"length\": {\n \"max\": 16,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 16 characters\"\n },\n {\n \"allowed_pattern\": \"[a-zA-Z][a-zA-Z0-9]*\",\n \"description\": \"must begin with a letter and contain only alphanumeric characters.\"\n }\n ]\n },\n \"db_volume_size\": {\n \"default\": 30,\n \"type\": \"number\",\n \"description\": \"database volume size (in GB)\",\n \"constraints\": [\n {\n \"range\": {\n \"max\": 1024,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 1024 GB\"\n }\n ]\n }\n },\n \"resources\": {\n \"db\": {\n \"type\": \"OS::Trove::Instance\",\n \"properties\": {\n \"flavor\": {\n \"get_param\": \"db_flavor\"\n },\n \"databases\": [\n {\n \"name\": {\n \"get_param\": \"db_name\"\n }\n }\n ],\n \"users\": [\n {\n \"password\": {\n \"get_param\": \"db_password\"\n },\n \"name\": {\n \"get_param\": \"db_username\"\n },\n \"databases\": [\n {\n \"get_param\": \"db_name\"\n }\n ]\n }\n ],\n \"name\": {\n \"get_param\": \"db_instance_name\"\n },\n \"size\": {\n \"get_param\": \"db_volume_size\"\n }\n }\n }\n }\n },\n \"action\": \"CREATE\",\n \"id\": \"exxxxd-7xx5-4xxb-bxx2-cxxxxxx5\",\n \"resources\": {\n \"db\": {\n \"status\": \"COMPLETE\",\n \"name\": \"db\",\n \"resource_data\": {},\n \"resource_id\": \"exxxx2-9xx0-4xxxb-bxx2-dxxxxxx4\",\n \"action\": \"CREATE\",\n \"type\": \"OS::Trove::Instance\",\n \"metadata\": {}\n }\n }\n},`, + Name: "stackadopted", + Timeout: 60, + Template: `{ + "outputs": { + "db_host": { + "value": { + "get_attr": [ + "db", + "hostname" + ] + } + } + }, + "heat_template_version": "2014-10-16", + "description": "HEAT template for creating a Cloud Database.\n", + "parameters": { + "db_name": { + "default": "wordpress", + "type": "string", + "description": "the name for the database", + "constraints": [ + { + "length": { + "max": 64, + "min": 1 + }, + "description": "must be between 1 and 64 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_instance_name": { + "default": "Cloud_DB", + "type": "string", + "description": "the database instance name" + }, + "db_username": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account username", + "constraints": [ + { + "length": { + "max": 16, + "min": 1 + }, + "description": "must be between 1 and 16 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_volume_size": { + "default": 30, + "type": "number", + "description": "database volume size (in GB)", + "constraints": [ + { + "range": { + "max": 1024, + "min": 1 + }, + "description": "must be between 1 and 1024 GB" + } + ] + }, + "db_flavor": { + "default": "1GB Instance", + "type": "string", + "description": "database instance size", + "constraints": [ + { + "description": "must be a valid cloud database flavor", + "allowed_values": [ + "1GB Instance", + "2GB Instance", + "4GB Instance", + "8GB Instance", + "16GB Instance" + ] + } + ] + }, + "db_password": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account password", + "constraints": [ + { + "length": { + "max": 41, + "min": 1 + }, + "description": "must be between 1 and 14 characters" + }, + { + "allowed_pattern": "[a-zA-Z0-9]*", + "description": "must contain only alphanumeric characters." + } + ] + } + }, + "resources": { + "db": { + "type": "OS::Trove::Instance", + "properties": { + "flavor": { + "get_param": "db_flavor" + }, + "size": { + "get_param": "db_volume_size" + }, + "users": [ + { + "password": { + "get_param": "db_password" + }, + "name": { + "get_param": "db_username" + }, + "databases": [ + { + "get_param": "db_name" + } + ] + } + ], + "name": { + "get_param": "db_instance_name" + }, + "databases": [ + { + "name": { + "get_param": "db_name" + } + } + ] + } + } + } + }`, + DisableRollback: os.Disable, + } + actual, err := Adopt(fake.ServiceClient(), adoptOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAdoptStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleCreateSuccessfully(t, CreateOutput) + template := new(os.Template) + template.Bin = []byte(`{ + "outputs": { + "db_host": { + "value": { + "get_attr": [ + "db", + "hostname" + ] + } + } + }, + "heat_template_version": "2014-10-16", + "description": "HEAT template for creating a Cloud Database.\n", + "parameters": { + "db_name": { + "default": "wordpress", + "type": "string", + "description": "the name for the database", + "constraints": [ + { + "length": { + "max": 64, + "min": 1 + }, + "description": "must be between 1 and 64 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_instance_name": { + "default": "Cloud_DB", + "type": "string", + "description": "the database instance name" + }, + "db_username": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account username", + "constraints": [ + { + "length": { + "max": 16, + "min": 1 + }, + "description": "must be between 1 and 16 characters" + }, + { + "allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*", + "description": "must begin with a letter and contain only alphanumeric characters." + } + ] + }, + "db_volume_size": { + "default": 30, + "type": "number", + "description": "database volume size (in GB)", + "constraints": [ + { + "range": { + "max": 1024, + "min": 1 + }, + "description": "must be between 1 and 1024 GB" + } + ] + }, + "db_flavor": { + "default": "1GB Instance", + "type": "string", + "description": "database instance size", + "constraints": [ + { + "description": "must be a valid cloud database flavor", + "allowed_values": [ + "1GB Instance", + "2GB Instance", + "4GB Instance", + "8GB Instance", + "16GB Instance" + ] + } + ] + }, + "db_password": { + "default": "admin", + "hidden": true, + "type": "string", + "description": "database admin account password", + "constraints": [ + { + "length": { + "max": 41, + "min": 1 + }, + "description": "must be between 1 and 14 characters" + }, + { + "allowed_pattern": "[a-zA-Z0-9]*", + "description": "must contain only alphanumeric characters." + } + ] + } + }, + "resources": { + "db": { + "type": "OS::Trove::Instance", + "properties": { + "flavor": { + "get_param": "db_flavor" + }, + "size": { + "get_param": "db_volume_size" + }, + "users": [ + { + "password": { + "get_param": "db_password" + }, + "name": { + "get_param": "db_username" + }, + "databases": [ + { + "get_param": "db_name" + } + ] + } + ], + "name": { + "get_param": "db_instance_name" + }, + "databases": [ + { + "name": { + "get_param": "db_name" + } + } + ] + } + } + } +}`) + + adoptOpts := os.AdoptOpts{ + AdoptStackData: `{\"environment\":{\"parameters\":{}}, \"status\":\"COMPLETE\",\"name\": \"trovestack\",\n \"template\": {\n \"outputs\": {\n \"db_host\": {\n \"value\": {\n \"get_attr\": [\n \"db\",\n \"hostname\"\n ]\n }\n }\n },\n \"heat_template_version\": \"2014-10-16\",\n \"description\": \"HEAT template for creating a Cloud Database.\\n\",\n \"parameters\": {\n \"db_instance_name\": {\n \"default\": \"Cloud_DB\",\n \"type\": \"string\",\n \"description\": \"the database instance name\"\n },\n \"db_flavor\": {\n \"default\": \"1GB Instance\",\n \"type\": \"string\",\n \"description\": \"database instance size\",\n \"constraints\": [\n {\n \"description\": \"must be a valid cloud database flavor\",\n \"allowed_values\": [\n \"1GB Instance\",\n \"2GB Instance\",\n \"4GB Instance\",\n \"8GB Instance\",\n \"16GB Instance\"\n ]\n }\n ]\n },\n \"db_password\": {\n \"default\": \"admin\",\n \"hidden\": true,\n \"type\": \"string\",\n \"description\": \"database admin account password\",\n \"constraints\": [\n {\n \"length\": {\n \"max\": 41,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 14 characters\"\n },\n {\n \"allowed_pattern\": \"[a-zA-Z0-9]*\",\n \"description\": \"must contain only alphanumeric characters.\"\n }\n ]\n },\n \"db_name\": {\n \"default\": \"wordpress\",\n \"type\": \"string\",\n \"description\": \"the name for the database\",\n \"constraints\": [\n {\n \"length\": {\n \"max\": 64,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 64 characters\"\n },\n {\n \"allowed_pattern\": \"[a-zA-Z][a-zA-Z0-9]*\",\n \"description\": \"must begin with a letter and contain only alphanumeric characters.\"\n }\n ]\n },\n \"db_username\": {\n \"default\": \"admin\",\n \"hidden\": true,\n \"type\": \"string\",\n \"description\": \"database admin account username\",\n \"constraints\": [\n {\n \"length\": {\n \"max\": 16,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 16 characters\"\n },\n {\n \"allowed_pattern\": \"[a-zA-Z][a-zA-Z0-9]*\",\n \"description\": \"must begin with a letter and contain only alphanumeric characters.\"\n }\n ]\n },\n \"db_volume_size\": {\n \"default\": 30,\n \"type\": \"number\",\n \"description\": \"database volume size (in GB)\",\n \"constraints\": [\n {\n \"range\": {\n \"max\": 1024,\n \"min\": 1\n },\n \"description\": \"must be between 1 and 1024 GB\"\n }\n ]\n }\n },\n \"resources\": {\n \"db\": {\n \"type\": \"OS::Trove::Instance\",\n \"properties\": {\n \"flavor\": {\n \"get_param\": \"db_flavor\"\n },\n \"databases\": [\n {\n \"name\": {\n \"get_param\": \"db_name\"\n }\n }\n ],\n \"users\": [\n {\n \"password\": {\n \"get_param\": \"db_password\"\n },\n \"name\": {\n \"get_param\": \"db_username\"\n },\n \"databases\": [\n {\n \"get_param\": \"db_name\"\n }\n ]\n }\n ],\n \"name\": {\n \"get_param\": \"db_instance_name\"\n },\n \"size\": {\n \"get_param\": \"db_volume_size\"\n }\n }\n }\n }\n },\n \"action\": \"CREATE\",\n \"id\": \"exxxxd-7xx5-4xxb-bxx2-cxxxxxx5\",\n \"resources\": {\n \"db\": {\n \"status\": \"COMPLETE\",\n \"name\": \"db\",\n \"resource_data\": {},\n \"resource_id\": \"exxxx2-9xx0-4xxxb-bxx2-dxxxxxx4\",\n \"action\": \"CREATE\",\n \"type\": \"OS::Trove::Instance\",\n \"metadata\": {}\n }\n }\n},`, + Name: "stackadopted", + Timeout: 60, + TemplateOpts: template, + DisableRollback: os.Disable, + } + actual, err := Adopt(fake.ServiceClient(), adoptOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleListSuccessfully(t, os.FullListOutput) + + count := 0 + err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := os.ExtractStacks(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, os.ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestUpdateStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleUpdateSuccessfully(t) + + updateOpts := os.UpdateOpts{ + Template: ` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + }`, + } + err := Update(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada", updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdateStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleUpdateSuccessfully(t) + + updateOpts := os.UpdateOpts{ + TemplateOpts: new(os.Template), + } + updateOpts.TemplateOpts.Bin = []byte(` + { + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } + }`) + err := Update(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada", updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDeleteStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleDeleteSuccessfully(t) + + err := Delete(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestPreviewStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandlePreviewSuccessfully(t, os.GetOutput) + + previewOpts := os.PreviewOpts{ + Name: "stackcreated", + Timeout: 60, + Template: ` + { + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } + }`, + DisableRollback: os.Disable, + } + actual, err := Preview(fake.ServiceClient(), previewOpts).Extract() + th.AssertNoErr(t, err) + + expected := os.PreviewExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestPreviewStackNewTemplateFormat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandlePreviewSuccessfully(t, os.GetOutput) + + previewOpts := os.PreviewOpts{ + Name: "stackcreated", + Timeout: 60, + TemplateOpts: new(os.Template), + DisableRollback: os.Disable, + } + previewOpts.TemplateOpts.Bin = []byte(` + { + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } + }`) + actual, err := Preview(fake.ServiceClient(), previewOpts).Extract() + th.AssertNoErr(t, err) + + expected := os.PreviewExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAbandonStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleAbandonSuccessfully(t, os.AbandonOutput) + + actual, err := Abandon(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c8").Extract() + th.AssertNoErr(t, err) + + expected := os.AbandonExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/doc.go new file mode 100644 index 000000000..19231b513 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/doc.go @@ -0,0 +1,8 @@ +// Package stacks provides operation for working with Heat stacks. A stack is a +// group of resources (servers, load balancers, databases, and so forth) +// combined to fulfill a useful purpose. Based on a template, Heat orchestration +// engine creates an instantiated set of resources (a stack) to run the +// application framework or component specified (in the template). A stack is a +// running instance of a template. The result of creating a stack is a deployment +// of the application framework or component. +package stacks diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/fixtures.go new file mode 100644 index 000000000..c9afeb156 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacks/fixtures.go @@ -0,0 +1,32 @@ +package stacks + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks" +) + +// CreateExpected represents the expected object from a Create request. +var CreateExpected = &os.CreatedStack{ + ID: "b663e18a-4767-4cdf-9db5-9c8cc13cc38a", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://ord.orchestration.api.rackspacecloud.com/v1/864477/stacks/stackcreated/b663e18a-4767-4cdf-9db5-9c8cc13cc38a", + Rel: "self", + }, + }, +} + +// CreateOutput represents the response body from a Create request. +const CreateOutput = ` +{ + "stack": { + "id": "b663e18a-4767-4cdf-9db5-9c8cc13cc38a", + "links": [ + { + "href": "https://ord.orchestration.api.rackspacecloud.com/v1/864477/stacks/stackcreated/b663e18a-4767-4cdf-9db5-9c8cc13cc38a", + "rel": "self" + } + ] + } +} +` diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate.go new file mode 100644 index 000000000..3b5d46e1c --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate.go @@ -0,0 +1,16 @@ +package stacktemplates + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates" +) + +// Get retreives data for the given stack template. +func Get(c *gophercloud.ServiceClient, stackName, stackID string) os.GetResult { + return os.Get(c, stackName, stackID) +} + +// Validate validates the given stack template. +func Validate(c *gophercloud.ServiceClient, opts os.ValidateOptsBuilder) os.ValidateResult { + return os.Validate(c, opts) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate_test.go new file mode 100644 index 000000000..d4d0f8f5a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/delegate_test.go @@ -0,0 +1,47 @@ +package stacktemplates + +import ( + "testing" + + os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestGetTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleGetSuccessfully(t, os.GetOutput) + + actual, err := Get(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c87").Extract() + th.AssertNoErr(t, err) + + expected := os.GetExpected + th.AssertDeepEquals(t, expected, string(actual)) +} + +func TestValidateTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleValidateSuccessfully(t, os.ValidateOutput) + + opts := os.ValidateOpts{ + Template: `{ + "Description": "Simple template to test heat commands", + "Parameters": { + "flavor": { + "Default": "m1.tiny", + "Type": "String", + "NoEcho": "false", + "Description": "", + "Label": "flavor" + } + } + }`, + } + actual, err := Validate(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := os.ValidateExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/doc.go new file mode 100644 index 000000000..5af0bd62a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stacktemplates/doc.go @@ -0,0 +1,8 @@ +// Package stacktemplates provides operations for working with Heat templates. +// A Cloud Orchestration template is a portable file, written in a user-readable +// language, that describes how a set of resources should be assembled and what +// software should be installed in order to produce a working stack. The template +// specifies what resources should be used, what attributes can be set, and other +// parameters that are critical to the successful, repeatable automation of a +// specific application stack. +package stacktemplates diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests.go new file mode 100644 index 000000000..58843030a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests.go @@ -0,0 +1,24 @@ +package cloudnetworks + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns all cloud networks that are associated with RackConnect. The ID +// returned for each network is the same as the ID returned by the networks package. +func List(c *gophercloud.ServiceClient) pagination.Pager { + url := listURL(c) + createPage := func(r pagination.PageResult) pagination.Page { + return CloudNetworkPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retrieves a specific cloud network (that is associated with RackConnect) +// based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests_test.go new file mode 100644 index 000000000..10d15dd11 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests_test.go @@ -0,0 +1,87 @@ +package cloudnetworks + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListCloudNetworks(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/cloud_networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[{ + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "updated": "2014-05-25T02:28:44Z" + }]`) + }) + + expected := []CloudNetwork{ + CloudNetwork{ + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + Name: "RC-CLOUD", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + } + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractCloudNetworks(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetCloudNetwork(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/cloud_networks/07426958-1ebf-4c38-b032-d456820ca21a", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "updated": "2014-05-25T02:28:44Z" + }`) + }) + + expected := &CloudNetwork{ + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + Name: "RC-CLOUD", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + } + + actual, err := Get(fake.ServiceClient(), "07426958-1ebf-4c38-b032-d456820ca21a").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/results.go new file mode 100644 index 000000000..f554a0d75 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/results.go @@ -0,0 +1,113 @@ +package cloudnetworks + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CloudNetwork represents a network associated with a RackConnect configuration. +type CloudNetwork struct { + // Specifies the ID of the newtork. + ID string `mapstructure:"id"` + // Specifies the user-provided name of the network. + Name string `mapstructure:"name"` + // Specifies the IP range for this network. + CIDR string `mapstructure:"cidr"` + // Specifies the time the network was created. + CreatedAt time.Time `mapstructure:"-"` + // Specifies the time the network was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// CloudNetworkPage is the page returned by a pager when traversing over a +// collection of CloudNetworks. +type CloudNetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a CloudNetworkPage contains no CloudNetworks. +func (r CloudNetworkPage) IsEmpty() (bool, error) { + cns, err := ExtractCloudNetworks(r) + if err != nil { + return true, err + } + return len(cns) == 0, nil +} + +// ExtractCloudNetworks extracts and returns CloudNetworks. It is used while iterating over +// a cloudnetworks.List call. +func ExtractCloudNetworks(page pagination.Page) ([]CloudNetwork, error) { + var res []CloudNetwork + casted := page.(CloudNetworkPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNets []interface{} + switch casted.(type) { + case interface{}: + rawNets = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNets { + thisNet := (rawNets[i]).(map[string]interface{}) + + if t, ok := thisNet["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNet["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + } + + return res, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a CloudNetwork from a GetResult. +func (r GetResult) Extract() (*CloudNetwork, error) { + if r.Err != nil { + return nil, r.Err + } + var res CloudNetwork + + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + return &res, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/urls.go new file mode 100644 index 000000000..bd6b098da --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/urls.go @@ -0,0 +1,11 @@ +package cloudnetworks + +import "github.com/rackspace/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("cloud_networks") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("cloud_networks", id) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/doc.go new file mode 100644 index 000000000..3a8279e10 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/doc.go @@ -0,0 +1,4 @@ +// Package rackconnect allows Rackspace cloud accounts to leverage version 3 of +// RackConnect, Rackspace's hybrid connectivity solution connecting dedicated +// and cloud servers. +package rackconnect diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/doc.go new file mode 100644 index 000000000..f4319b8ff --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/doc.go @@ -0,0 +1,14 @@ +// Package lbpools provides access to load balancer pools associated with a +// RackConnect configuration. Load Balancer Pools must be configured in advance +// by your Network Security team to be eligible for use with RackConnect. +// If you do not see a pool that you expect to see, contact your Support team +// for further assistance. The Load Balancer Pool id returned by these calls is +// automatically generated by the RackConnect automation and will remain constant +// unless the Load Balancer Pool is renamed on your hardware load balancer. +// All Load Balancer Pools will currently return a status of ACTIVE. Future +// features may introduce additional statuses. +// Node status values are ADDING, ACTIVE, REMOVING, ADD_FAILED, and REMOVE_FAILED. +// The cloud_servers node count will only include Cloud Servers from the specified +// cloud account. Any dedicated servers or cloud servers from another cloud account +// on the same RackConnect Configuration will be counted as external nodes. +package lbpools diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests.go new file mode 100644 index 000000000..c300c56c1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests.go @@ -0,0 +1,146 @@ +package lbpools + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns all load balancer pools that are associated with RackConnect. +func List(c *gophercloud.ServiceClient) pagination.Pager { + url := listURL(c) + createPage := func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retrieves a specific load balancer pool (that is associated with RackConnect) +// based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// ListNodes returns all load balancer pool nodes that are associated with RackConnect +// for the given LB pool ID. +func ListNodes(c *gophercloud.ServiceClient, id string) pagination.Pager { + url := listNodesURL(c, id) + createPage := func(r pagination.PageResult) pagination.Page { + return NodePage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// CreateNode adds the cloud server with the given serverID to the load balancer +// pool with the given poolID. +func CreateNode(c *gophercloud.ServiceClient, poolID, serverID string) CreateNodeResult { + var res CreateNodeResult + reqBody := map[string]interface{}{ + "cloud_server": map[string]string{ + "id": serverID, + }, + } + _, res.Err = c.Post(createNodeURL(c, poolID), reqBody, &res.Body, nil) + return res +} + +// ListNodesDetails returns all load balancer pool nodes that are associated with RackConnect +// for the given LB pool ID with all their details. +func ListNodesDetails(c *gophercloud.ServiceClient, id string) pagination.Pager { + url := listNodesDetailsURL(c, id) + createPage := func(r pagination.PageResult) pagination.Page { + return NodeDetailsPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// GetNode retrieves a specific LB pool node (that is associated with RackConnect) +// based on its unique ID and the LB pool's unique ID. +func GetNode(c *gophercloud.ServiceClient, poolID, nodeID string) GetNodeResult { + var res GetNodeResult + _, res.Err = c.Get(nodeURL(c, poolID, nodeID), &res.Body, nil) + return res +} + +// DeleteNode removes the node with the given nodeID from the LB pool with the +// given poolID. +func DeleteNode(c *gophercloud.ServiceClient, poolID, nodeID string) DeleteNodeResult { + var res DeleteNodeResult + _, res.Err = c.Delete(deleteNodeURL(c, poolID, nodeID), nil) + return res +} + +// GetNodeDetails retrieves a specific LB pool node's details based on its unique +// ID and the LB pool's unique ID. +func GetNodeDetails(c *gophercloud.ServiceClient, poolID, nodeID string) GetNodeDetailsResult { + var res GetNodeDetailsResult + _, res.Err = c.Get(nodeDetailsURL(c, poolID, nodeID), &res.Body, nil) + return res +} + +// NodeOpts are options for bulk adding/deleting nodes to LB pools. +type NodeOpts struct { + ServerID string + PoolID string +} + +// NodesOpts are a slice of NodeOpts, passed as options for bulk operations. +type NodesOpts []NodeOpts + +// ToLBPoolCreateNodesMap serializes a NodesOpts into a map to send in the request. +func (o NodesOpts) ToLBPoolCreateNodesMap() ([]map[string]interface{}, error) { + m := make([]map[string]interface{}, len(o)) + for i := range o { + m[i] = map[string]interface{}{ + "cloud_server": map[string]string{ + "id": o[i].ServerID, + }, + "load_balancer_pool": map[string]string{ + "id": o[i].PoolID, + }, + } + } + return m, nil +} + +// CreateNodes adds the cloud servers with the given serverIDs to the corresponding +// load balancer pools with the given poolIDs. +func CreateNodes(c *gophercloud.ServiceClient, opts NodesOpts) CreateNodesResult { + var res CreateNodesResult + reqBody, err := opts.ToLBPoolCreateNodesMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(createNodesURL(c), reqBody, &res.Body, nil) + return res +} + +// DeleteNodes removes the cloud servers with the given serverIDs to the corresponding +// load balancer pools with the given poolIDs. +func DeleteNodes(c *gophercloud.ServiceClient, opts NodesOpts) DeleteNodesResult { + var res DeleteNodesResult + reqBody, err := opts.ToLBPoolCreateNodesMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Request("DELETE", createNodesURL(c), gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{204}, + }) + return res +} + +// ListNodesDetailsForServer is similar to ListNodesDetails but only returns nodes +// for the given serverID. +func ListNodesDetailsForServer(c *gophercloud.ServiceClient, serverID string) pagination.Pager { + url := listNodesForServerURL(c, serverID) + createPage := func(r pagination.PageResult) pagination.Page { + return NodeDetailsForServerPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests_test.go new file mode 100644 index 000000000..48ebcece1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests_test.go @@ -0,0 +1,876 @@ +package lbpools + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListPools(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[ + { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + { + "id": "33021100-4abf-4836-9080-465a6d87ab68", + "name": "RCv3Test2", + "node_counts": { + "cloud_servers": 1, + "external": 0, + "total": 1 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.7" + }, + { + "id": "b644350a-301b-47b5-a411-c6e0f933c347", + "name": "RCv3Test3", + "node_counts": { + "cloud_servers": 2, + "external": 3, + "total": 5 + }, + "port": 443, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.15" + } + ]`) + }) + + expected := []Pool{ + Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Pool{ + ID: "33021100-4abf-4836-9080-465a6d87ab68", + Name: "RCv3Test2", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 1, + External: 0, + Total: 1, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.7", + }, + Pool{ + ID: "b644350a-301b-47b5-a411-c6e0f933c347", + Name: "RCv3Test3", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 2, + External: 3, + Total: 5, + }, + Port: 443, + Status: "ACTIVE", + VirtualIP: "203.0.113.15", + }, + } + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPools(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetLBPool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }`) + }) + + expected := &Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + } + + actual, err := Get(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} + +func TestListNodes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ACTIVE", + "updated": "2014-05-30T03:24:18Z" + }, + { + "created": "2014-05-31T08:23:12Z", + "cloud_server": { + "id": "f28b870f-a063-498a-8b12-7025e5b1caa6" + }, + "id": "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ADDING", + "updated": "2014-05-31T08:23:26Z" + }, + { + "created": "2014-05-31T08:23:18Z", + "cloud_server": { + "id": "a3d3a6b3-e4e4-496f-9a3d-5c987163e458" + }, + "id": "ced9ddc8-6fae-4e72-9457-16ead52b5515", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ADD_FAILED", + "status_detail": "Unable to communicate with network device", + "updated": "2014-05-31T08:24:36Z" + } + ]`) + }) + + expected := []Node{ + Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + Node{ + CreatedAt: time.Date(2014, 5, 31, 8, 23, 12, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "f28b870f-a063-498a-8b12-7025e5b1caa6", + }, + ID: "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ADDING", + UpdatedAt: time.Date(2014, 5, 31, 8, 23, 26, 0, time.UTC), + }, + Node{ + CreatedAt: time.Date(2014, 5, 31, 8, 23, 18, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "a3d3a6b3-e4e4-496f-9a3d-5c987163e458", + }, + ID: "ced9ddc8-6fae-4e72-9457-16ead52b5515", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ADD_FAILED", + StatusDetail: "Unable to communicate with network device", + UpdatedAt: time.Date(2014, 5, 31, 8, 24, 36, 0, time.UTC), + }, + } + + count := 0 + err := ListNodes(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodes(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + } + } + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + `) + }) + + expected := &Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := CreateNode(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "d95ae0c4-6ab8-4873-b82f-f8433840cff2").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} + +func TestListNodesDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/details", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + ] + `) + }) + + expected := []NodeDetails{ + NodeDetails{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + count := 0 + err := ListNodesDetails(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodesDetails(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/1860451d-fb89-45b8-b54e-151afceb50e5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + `) + }) + + expected := &Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := GetNode(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "1860451d-fb89-45b8-b54e-151afceb50e5").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/1860451d-fb89-45b8-b54e-151afceb50e5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) + + err := DeleteNode(client.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "1860451d-fb89-45b8-b54e-151afceb50e5").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetNodeDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/d95ae0c4-6ab8-4873-b82f-f8433840cff2/details", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + `) + }) + + expected := &NodeDetails{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := GetNodeDetails(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "d95ae0c4-6ab8-4873-b82f-f8433840cff2").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestCreateNodes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + [ + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + } + }, + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "33021100-4abf-4836-9080-465a6d87ab68" + } + } + ] + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ADDING", + "status_detail": null, + "updated": null + }, + { + "created": "2014-05-31T08:23:12Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + "load_balancer_pool": { + "id": "33021100-4abf-4836-9080-465a6d87ab68" + }, + "status": "ADDING", + "status_detail": null, + "updated": null + } + ] + `) + }) + + expected := []Node{ + Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ADDING", + }, + Node{ + CreatedAt: time.Date(2014, 5, 31, 8, 23, 12, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "33021100-4abf-4836-9080-465a6d87ab68", + }, + Status: "ADDING", + }, + } + + opts := NodesOpts{ + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "33021100-4abf-4836-9080-465a6d87ab68", + }, + } + actual, err := CreateNodes(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteNodes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + [ + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + } + }, + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "33021100-4abf-4836-9080-465a6d87ab68" + } + } + ] + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) + + opts := NodesOpts{ + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "33021100-4abf-4836-9080-465a6d87ab68", + }, + } + err := DeleteNodes(client.ServiceClient(), opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListNodesForServerDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/nodes/details", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + ] + `) + }) + + expected := []NodeDetailsForServer{ + NodeDetailsForServer{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + count := 0 + err := ListNodesDetailsForServer(fake.ServiceClient(), "07426958-1ebf-4c38-b032-d456820ca21a").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodesDetailsForServer(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/results.go new file mode 100644 index 000000000..e5e914b1e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/results.go @@ -0,0 +1,505 @@ +package lbpools + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Pool represents a load balancer pool associated with a RackConnect configuration. +type Pool struct { + // The unique ID of the load balancer pool. + ID string `mapstructure:"id"` + // The name of the load balancer pool. + Name string `mapstructure:"name"` + // The node counts associated witht the load balancer pool. + NodeCounts struct { + // The number of nodes associated with this LB pool for this account. + CloudServers int `mapstructure:"cloud_servers"` + // The number of nodes associated with this LB pool from other accounts. + External int `mapstructure:"external"` + // The total number of nodes associated with this LB pool. + Total int `mapstructure:"total"` + } `mapstructure:"node_counts"` + // The port of the LB pool + Port int `mapstructure:"port"` + // The status of the LB pool + Status string `mapstructure:"status"` + // The details of the status of the LB pool + StatusDetail string `mapstructure:"status_detail"` + // The virtual IP of the LB pool + VirtualIP string `mapstructure:"virtual_ip"` +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of Pools. +type PoolPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a PoolPage contains no Pools. +func (r PoolPage) IsEmpty() (bool, error) { + cns, err := ExtractPools(r) + if err != nil { + return true, err + } + return len(cns) == 0, nil +} + +// ExtractPools extracts and returns Pools. It is used while iterating over +// an lbpools.List call. +func ExtractPools(page pagination.Page) ([]Pool, error) { + var res []Pool + err := mapstructure.Decode(page.(PoolPage).Body, &res) + return res, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts an LBPool from a GetResult. +func (r GetResult) Extract() (*Pool, error) { + if r.Err != nil { + return nil, r.Err + } + var res Pool + err := mapstructure.Decode(r.Body, &res) + return &res, err +} + +// Node represents a load balancer pool node associated with a RackConnect configuration. +type Node struct { + // The unique ID of the LB node. + ID string `mapstructure:"id"` + // The cloud server (node) of the load balancer pool. + CloudServer struct { + // The cloud server ID. + ID string `mapstructure:"id"` + } `mapstructure:"cloud_server"` + // The load balancer pool. + LoadBalancerPool struct { + // The LB pool ID. + ID string `mapstructure:"id"` + } `mapstructure:"load_balancer_pool"` + // The status of the LB pool. + Status string `mapstructure:"status"` + // The details of the status of the LB pool. + StatusDetail string `mapstructure:"status_detail"` + // The time the LB node was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the LB node was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// NodePage is the page returned by a pager when traversing over a +// collection of Nodes. +type NodePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NodePage contains no Nodes. +func (r NodePage) IsEmpty() (bool, error) { + n, err := ExtractNodes(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractNodes extracts and returns a slice of Nodes. It is used while iterating over +// an lbpools.ListNodes call. +func ExtractNodes(page pagination.Page) ([]Node, error) { + var res []Node + casted := page.(NodePage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodes []interface{} + switch casted.(type) { + case interface{}: + rawNodes = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodes { + thisNode := (rawNodes[i]).(map[string]interface{}) + + if t, ok := thisNode["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNode["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + } + + return res, err +} + +// NodeResult represents a result that can be extracted as a Node. +type NodeResult struct { + gophercloud.Result +} + +// CreateNodeResult represents the result of an CreateNode operation. +type CreateNodeResult struct { + NodeResult +} + +// GetNodeResult represents the result of an GetNode operation. +type GetNodeResult struct { + NodeResult +} + +// Extract is a function that extracts a Node from a NodeResult. +func (r NodeResult) Extract() (*Node, error) { + if r.Err != nil { + return nil, r.Err + } + var res Node + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + return &res, err +} + +// NodeDetails represents a load balancer pool node associated with a RackConnect configuration +// with all its details. +type NodeDetails struct { + // The unique ID of the LB node. + ID string `mapstructure:"id"` + // The cloud server (node) of the load balancer pool. + CloudServer struct { + // The cloud server ID. + ID string `mapstructure:"id"` + // The name of the server. + Name string `mapstructure:"name"` + // The cloud network for the cloud server. + CloudNetwork struct { + // The network ID. + ID string `mapstructure:"id"` + // The network name. + Name string `mapstructure:"name"` + // The network's private IPv4 address. + PrivateIPv4 string `mapstructure:"private_ip_v4"` + // The IP range for the network. + CIDR string `mapstructure:"cidr"` + // The datetime the network was created. + CreatedAt time.Time `mapstructure:"-"` + // The last datetime the network was updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + // The datetime the server was created. + CreatedAt time.Time `mapstructure:"-"` + // The datetime the server was last updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_server"` + // The load balancer pool. + LoadBalancerPool Pool `mapstructure:"load_balancer_pool"` + // The status of the LB pool. + Status string `mapstructure:"status"` + // The details of the status of the LB pool. + StatusDetail string `mapstructure:"status_detail"` + // The time the LB node was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the LB node was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// NodeDetailsPage is the page returned by a pager when traversing over a +// collection of NodeDetails. +type NodeDetailsPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NodeDetailsPage contains no NodeDetails. +func (r NodeDetailsPage) IsEmpty() (bool, error) { + n, err := ExtractNodesDetails(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractNodesDetails extracts and returns a slice of NodeDetails. It is used while iterating over +// an lbpools.ListNodesDetails call. +func ExtractNodesDetails(page pagination.Page) ([]NodeDetails, error) { + var res []NodeDetails + casted := page.(NodeDetailsPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodesDetails []interface{} + switch casted.(type) { + case interface{}: + rawNodesDetails = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodesDetails { + thisNodeDetails := (rawNodesDetails[i]).(map[string]interface{}) + + if t, ok := thisNodeDetails["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNodeDetails["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + + if cs, ok := thisNodeDetails["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + } + + return res, err +} + +// GetNodeDetailsResult represents the result of an NodeDetails operation. +type GetNodeDetailsResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a NodeDetails from a NodeDetailsResult. +func (r GetNodeDetailsResult) Extract() (*NodeDetails, error) { + if r.Err != nil { + return nil, r.Err + } + var res NodeDetails + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + if cs, ok := b["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + + return &res, err +} + +// DeleteNodeResult represents the result of a DeleteNode operation. +type DeleteNodeResult struct { + gophercloud.ErrResult +} + +// CreateNodesResult represents the result of a CreateNodes operation. +type CreateNodesResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a slice of Nodes from a CreateNodesResult. +func (r CreateNodesResult) Extract() ([]Node, error) { + if r.Err != nil { + return nil, r.Err + } + var res []Node + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.([]interface{}) + for i := range b { + if date, ok := b[i].(map[string]interface{})["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res[i].CreatedAt = t + } + if date, ok := b[i].(map[string]interface{})["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res[i].UpdatedAt = t + } + } + + return res, err +} + +// DeleteNodesResult represents the result of a DeleteNodes operation. +type DeleteNodesResult struct { + gophercloud.ErrResult +} + +// NodeDetailsForServer represents a load balancer pool node associated with a RackConnect configuration +// with all its details for a particular server. +type NodeDetailsForServer struct { + // The unique ID of the LB node. + ID string `mapstructure:"id"` + // The load balancer pool. + LoadBalancerPool Pool `mapstructure:"load_balancer_pool"` + // The status of the LB pool. + Status string `mapstructure:"status"` + // The details of the status of the LB pool. + StatusDetail string `mapstructure:"status_detail"` + // The time the LB node was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the LB node was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// NodeDetailsForServerPage is the page returned by a pager when traversing over a +// collection of NodeDetailsForServer. +type NodeDetailsForServerPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NodeDetailsForServerPage contains no NodeDetailsForServer. +func (r NodeDetailsForServerPage) IsEmpty() (bool, error) { + n, err := ExtractNodesDetailsForServer(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractNodesDetailsForServer extracts and returns a slice of NodeDetailsForServer. It is used while iterating over +// an lbpools.ListNodesDetailsForServer call. +func ExtractNodesDetailsForServer(page pagination.Page) ([]NodeDetailsForServer, error) { + var res []NodeDetailsForServer + casted := page.(NodeDetailsForServerPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodesDetails []interface{} + switch casted.(type) { + case interface{}: + rawNodesDetails = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodesDetails { + thisNodeDetails := (rawNodesDetails[i]).(map[string]interface{}) + + if t, ok := thisNodeDetails["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNodeDetails["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + } + + return res, err +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/urls.go new file mode 100644 index 000000000..c238239f6 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/urls.go @@ -0,0 +1,49 @@ +package lbpools + +import "github.com/rackspace/gophercloud" + +var root = "load_balancer_pools" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id) +} + +func listNodesURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id, "nodes") +} + +func createNodeURL(c *gophercloud.ServiceClient, id string) string { + return listNodesURL(c, id) +} + +func listNodesDetailsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id, "nodes", "details") +} + +func nodeURL(c *gophercloud.ServiceClient, poolID, nodeID string) string { + return c.ServiceURL(root, poolID, "nodes", nodeID) +} + +func deleteNodeURL(c *gophercloud.ServiceClient, poolID, nodeID string) string { + return nodeURL(c, poolID, nodeID) +} + +func nodeDetailsURL(c *gophercloud.ServiceClient, poolID, nodeID string) string { + return c.ServiceURL(root, poolID, "nodes", nodeID, "details") +} + +func createNodesURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root, "nodes") +} + +func deleteNodesURL(c *gophercloud.ServiceClient) string { + return createNodesURL(c) +} + +func listNodesForServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL(root, "nodes", "details?cloud_server_id="+serverID) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests.go new file mode 100644 index 000000000..116426010 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests.go @@ -0,0 +1,50 @@ +package publicips + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns all public IPs. +func List(c *gophercloud.ServiceClient) pagination.Pager { + url := listURL(c) + createPage := func(r pagination.PageResult) pagination.Page { + return PublicIPPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Create adds a public IP to the server with the given serverID. +func Create(c *gophercloud.ServiceClient, serverID string) CreateResult { + var res CreateResult + reqBody := map[string]interface{}{ + "cloud_server": map[string]string{ + "id": serverID, + }, + } + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) + return res +} + +// ListForServer returns all public IPs for the server with the given serverID. +func ListForServer(c *gophercloud.ServiceClient, serverID string) pagination.Pager { + url := listForServerURL(c, serverID) + createPage := func(r pagination.PageResult) pagination.Page { + return PublicIPPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retrieves the public IP with the given id. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// Delete removes the public IP with the given id. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, id), nil) + return res +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests_test.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests_test.go new file mode 100644 index 000000000..61da2b03d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests_test.go @@ -0,0 +1,378 @@ +package publicips + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListIPs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "public_ip_v4": "203.0.113.110", + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + ]`) + }) + + expected := []PublicIP{ + PublicIP{ + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + PublicIPv4: "203.0.113.110", + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPublicIPs(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateIP(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + } + } + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "status": "ADDING" + }`) + }) + + expected := &PublicIP{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + Status: "ADDING", + } + + actual, err := Create(fake.ServiceClient(), "d95ae0c4-6ab8-4873-b82f-f8433840cff2").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestGetIP(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips/2d0f586b-37a7-4ae0-adac-2743d5feb450", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "public_ip_v4": "203.0.113.110", + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + }`) + }) + + expected := &PublicIP{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + Status: "ACTIVE", + PublicIPv4: "203.0.113.110", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := Get(fake.ServiceClient(), "2d0f586b-37a7-4ae0-adac-2743d5feb450").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteIP(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips/2d0f586b-37a7-4ae0-adac-2743d5feb450", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) + + err := Delete(client.ServiceClient(), "2d0f586b-37a7-4ae0-adac-2743d5feb450").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListForServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "public_ip_v4": "203.0.113.110", + "status": "ACTIVE", + "updated": "2014-05-30T03:24:18Z" + } + ]`) + }) + + expected := []PublicIP{ + PublicIP{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + Status: "ACTIVE", + PublicIPv4: "203.0.113.110", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + count := 0 + err := ListForServer(fake.ServiceClient(), "d95ae0c4-6ab8-4873-b82f-f8433840cff2").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPublicIPs(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/results.go new file mode 100644 index 000000000..132cf770a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/results.go @@ -0,0 +1,221 @@ +package publicips + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// PublicIP represents a public IP address. +type PublicIP struct { + // The unique ID of the public IP. + ID string `mapstructure:"id"` + // The IPv4 address of the public IP. + PublicIPv4 string `mapstructure:"public_ip_v4"` + // The cloud server (node) of the public IP. + CloudServer struct { + // The cloud server ID. + ID string `mapstructure:"id"` + // The name of the server. + Name string `mapstructure:"name"` + // The cloud network for the cloud server. + CloudNetwork struct { + // The network ID. + ID string `mapstructure:"id"` + // The network name. + Name string `mapstructure:"name"` + // The network's private IPv4 address. + PrivateIPv4 string `mapstructure:"private_ip_v4"` + // The IP range for the network. + CIDR string `mapstructure:"cidr"` + // The datetime the network was created. + CreatedAt time.Time `mapstructure:"-"` + // The last datetime the network was updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + // The datetime the server was created. + CreatedAt time.Time `mapstructure:"-"` + // The datetime the server was last updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_server"` + // The status of the public IP. + Status string `mapstructure:"status"` + // The details of the status of the public IP. + StatusDetail string `mapstructure:"status_detail"` + // The time the public IP was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the public IP was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// PublicIPPage is the page returned by a pager when traversing over a +// collection of PublicIPs. +type PublicIPPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a PublicIPPage contains no PublicIPs. +func (r PublicIPPage) IsEmpty() (bool, error) { + n, err := ExtractPublicIPs(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractPublicIPs extracts and returns a slice of PublicIPs. It is used while iterating over +// a publicips.List call. +func ExtractPublicIPs(page pagination.Page) ([]PublicIP, error) { + var res []PublicIP + casted := page.(PublicIPPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodesDetails []interface{} + switch casted.(type) { + case interface{}: + rawNodesDetails = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodesDetails { + thisNodeDetails := (rawNodesDetails[i]).(map[string]interface{}) + + if t, ok := thisNodeDetails["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNodeDetails["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + + if cs, ok := thisNodeDetails["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + } + + return res, err +} + +// PublicIPResult represents a result that can be extracted into a PublicIP. +type PublicIPResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + PublicIPResult +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + PublicIPResult +} + +// Extract is a function that extracts a PublicIP from a PublicIPResult. +func (r PublicIPResult) Extract() (*PublicIP, error) { + if r.Err != nil { + return nil, r.Err + } + var res PublicIP + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + if cs, ok := b["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + + return &res, err +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/urls.go b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/urls.go new file mode 100644 index 000000000..6f310be4e --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/urls.go @@ -0,0 +1,25 @@ +package publicips + +import "github.com/rackspace/gophercloud" + +var root = "public_ips" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root) +} + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root) +} + +func listForServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL(root + "?cloud_server_id=" + serverID) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/rackspace/gophercloud/results.go b/vendor/github.com/rackspace/gophercloud/results.go new file mode 100644 index 000000000..27fd1b60f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/results.go @@ -0,0 +1,153 @@ +package gophercloud + +import ( + "encoding/json" + "net/http" + "reflect" + + "github.com/mitchellh/mapstructure" +) + +/* +Result is an internal type to be used by individual resource packages, but its +methods will be available on a wide variety of user-facing embedding types. + +It acts as a base struct that other Result types, returned from request +functions, can embed for convenience. All Results capture basic information +from the HTTP transaction that was performed, including the response body, +HTTP headers, and any errors that happened. + +Generally, each Result type will have an Extract method that can be used to +further interpret the result's payload in a specific context. Extensions or +providers can then provide additional extraction functions to pull out +provider- or extension-specific information as well. +*/ +type Result struct { + // Body is the payload of the HTTP response from the server. In most cases, + // this will be the deserialized JSON structure. + Body interface{} + + // Header contains the HTTP header structure from the original response. + Header http.Header + + // Err is an error that occurred during the operation. It's deferred until + // extraction to make it easier to chain the Extract call. + Err error +} + +// PrettyPrintJSON creates a string containing the full response body as +// pretty-printed JSON. It's useful for capturing test fixtures and for +// debugging extraction bugs. If you include its output in an issue related to +// a buggy extraction function, we will all love you forever. +func (r Result) PrettyPrintJSON() string { + pretty, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + panic(err.Error()) + } + return string(pretty) +} + +// ErrResult is an internal type to be used by individual resource packages, but +// its methods will be available on a wide variety of user-facing embedding +// types. +// +// It represents results that only contain a potential error and +// nothing else. Usually, if the operation executed successfully, the Err field +// will be nil; otherwise it will be stocked with a relevant error. Use the +// ExtractErr method +// to cleanly pull it out. +type ErrResult struct { + Result +} + +// ExtractErr is a function that extracts error information, or nil, from a result. +func (r ErrResult) ExtractErr() error { + return r.Err +} + +/* +HeaderResult is an internal type to be used by individual resource packages, but +its methods will be available on a wide variety of user-facing embedding types. + +It represents a result that only contains an error (possibly nil) and an +http.Header. This is used, for example, by the objectstorage packages in +openstack, because most of the operations don't return response bodies, but do +have relevant information in headers. +*/ +type HeaderResult struct { + Result +} + +// ExtractHeader will return the http.Header and error from the HeaderResult. +// +// header, err := objects.Create(client, "my_container", objects.CreateOpts{}).ExtractHeader() +func (hr HeaderResult) ExtractHeader() (http.Header, error) { + return hr.Header, hr.Err +} + +// DecodeHeader is a function that decodes a header (usually of type map[string]interface{}) to +// another type (usually a struct). This function is used by the objectstorage package to give +// users access to response headers without having to query a map. A DecodeHookFunction is used, +// because OpenStack-based clients return header values as arrays (Go slices). +func DecodeHeader(from, to interface{}) error { + config := &mapstructure.DecoderConfig{ + DecodeHook: func(from, to reflect.Kind, data interface{}) (interface{}, error) { + if from == reflect.Slice { + return data.([]string)[0], nil + } + return data, nil + }, + Result: to, + WeaklyTypedInput: true, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + if err := decoder.Decode(from); err != nil { + return err + } + return nil +} + +// RFC3339Milli describes a common time format used by some API responses. +const RFC3339Milli = "2006-01-02T15:04:05.999999Z" + +// Time format used in cloud orchestration +const STACK_TIME_FMT = "2006-01-02T15:04:05" + +/* +Link is an internal type to be used in packages of collection resources that are +paginated in a certain way. + +It's a response substructure common to many paginated collection results that is +used to point to related pages. Usually, the one we care about is the one with +Rel field set to "next". +*/ +type Link struct { + Href string `mapstructure:"href"` + Rel string `mapstructure:"rel"` +} + +/* +ExtractNextURL is an internal function useful for packages of collection +resources that are paginated in a certain way. + +It attempts attempts to extract the "next" URL from slice of Link structs, or +"" if no such URL is present. +*/ +func ExtractNextURL(links []Link) (string, error) { + var url string + + for _, l := range links { + if l.Rel == "next" { + url = l.Href + } + } + + if url == "" { + return "", nil + } + + return url, nil +} diff --git a/vendor/github.com/rackspace/gophercloud/script/acceptancetest b/vendor/github.com/rackspace/gophercloud/script/acceptancetest new file mode 100644 index 000000000..f9c89f4df --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/script/acceptancetest @@ -0,0 +1,5 @@ +#!/bin/bash +# +# Run the acceptance tests. + +exec go test -p=1 -tags 'acceptance fixtures' github.com/rackspace/gophercloud/acceptance/... $@ diff --git a/vendor/github.com/rackspace/gophercloud/script/bootstrap b/vendor/github.com/rackspace/gophercloud/script/bootstrap new file mode 100644 index 000000000..6bae6e8f1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/script/bootstrap @@ -0,0 +1,26 @@ +#!/bin/bash +# +# This script helps new contributors set up their local workstation for +# gophercloud development and contributions. + +# Create the environment +export GOPATH=$HOME/go/gophercloud +mkdir -p $GOPATH + +# Download gophercloud into that environment +go get github.com/rackspace/gophercloud +cd $GOPATH/src/github.com/rackspace/gophercloud +git checkout master + +# Write out the env.sh convenience file. +cd $GOPATH +cat <env.sh +#!/bin/bash +export GOPATH=$(pwd) +export GOPHERCLOUD=$GOPATH/src/github.com/rackspace/gophercloud +EOF +chmod a+x env.sh + +# Make changes immediately available as a convenience. +. ./env.sh + diff --git a/vendor/github.com/rackspace/gophercloud/script/cibuild b/vendor/github.com/rackspace/gophercloud/script/cibuild new file mode 100644 index 000000000..1cb389e7d --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/script/cibuild @@ -0,0 +1,5 @@ +#!/bin/bash +# +# Test script to be invoked by Travis. + +exec script/unittest -v diff --git a/vendor/github.com/rackspace/gophercloud/script/test b/vendor/github.com/rackspace/gophercloud/script/test new file mode 100644 index 000000000..1e03dff8a --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/script/test @@ -0,0 +1,5 @@ +#!/bin/bash +# +# Run all the tests. + +exec go test -tags 'acceptance fixtures' ./... $@ diff --git a/vendor/github.com/rackspace/gophercloud/script/unittest b/vendor/github.com/rackspace/gophercloud/script/unittest new file mode 100644 index 000000000..d3440a902 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/script/unittest @@ -0,0 +1,5 @@ +#!/bin/bash +# +# Run the unit tests. + +exec go test -tags fixtures ./... $@ diff --git a/vendor/github.com/rackspace/gophercloud/service_client.go b/vendor/github.com/rackspace/gophercloud/service_client.go new file mode 100644 index 000000000..3490da05f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/service_client.go @@ -0,0 +1,32 @@ +package gophercloud + +import "strings" + +// ServiceClient stores details required to interact with a specific service API implemented by a provider. +// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. +type ServiceClient struct { + // ProviderClient is a reference to the provider that implements this service. + *ProviderClient + + // Endpoint is the base URL of the service's API, acquired from a service catalog. + // It MUST end with a /. + Endpoint string + + // ResourceBase is the base URL shared by the resources within a service's API. It should include + // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used + // as-is, instead. + ResourceBase string +} + +// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. +func (client *ServiceClient) ResourceBaseURL() string { + if client.ResourceBase != "" { + return client.ResourceBase + } + return client.Endpoint +} + +// ServiceURL constructs a URL for a resource belonging to this provider. +func (client *ServiceClient) ServiceURL(parts ...string) string { + return client.ResourceBaseURL() + strings.Join(parts, "/") +} diff --git a/vendor/github.com/rackspace/gophercloud/service_client_test.go b/vendor/github.com/rackspace/gophercloud/service_client_test.go new file mode 100644 index 000000000..84beb3f76 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/service_client_test.go @@ -0,0 +1,14 @@ +package gophercloud + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestServiceURL(t *testing.T) { + c := &ServiceClient{Endpoint: "http://123.45.67.8/"} + expected := "http://123.45.67.8/more/parts/here" + actual := c.ServiceURL("more", "parts", "here") + th.CheckEquals(t, expected, actual) +} diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/client/fake.go b/vendor/github.com/rackspace/gophercloud/testhelper/client/fake.go new file mode 100644 index 000000000..5b69b058f --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/testhelper/client/fake.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/testhelper" +) + +// Fake token to use. +const TokenID = "cbc36478b0bd8e67e89469c7749d4127" + +// ServiceClient returns a generic service client for use in tests. +func ServiceClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{TokenID: TokenID}, + Endpoint: testhelper.Endpoint(), + } +} diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/convenience.go b/vendor/github.com/rackspace/gophercloud/testhelper/convenience.go new file mode 100644 index 000000000..cf33e1ad1 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/testhelper/convenience.go @@ -0,0 +1,329 @@ +package testhelper + +import ( + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" +) + +const ( + logBodyFmt = "\033[1;31m%s %s\033[0m" + greenCode = "\033[0m\033[1;32m" + yellowCode = "\033[0m\033[1;33m" + resetCode = "\033[0m\033[1;31m" +) + +func prefix(depth int) string { + _, file, line, _ := runtime.Caller(depth) + return fmt.Sprintf("Failure in %s, line %d:", filepath.Base(file), line) +} + +func green(str interface{}) string { + return fmt.Sprintf("%s%#v%s", greenCode, str, resetCode) +} + +func yellow(str interface{}) string { + return fmt.Sprintf("%s%#v%s", yellowCode, str, resetCode) +} + +func logFatal(t *testing.T, str string) { + t.Fatalf(logBodyFmt, prefix(3), str) +} + +func logError(t *testing.T, str string) { + t.Errorf(logBodyFmt, prefix(3), str) +} + +type diffLogger func([]string, interface{}, interface{}) + +type visit struct { + a1 uintptr + a2 uintptr + typ reflect.Type +} + +// Recursively visits the structures of "expected" and "actual". The diffLogger function will be +// invoked with each different value encountered, including the reference path that was followed +// to get there. +func deepDiffEqual(expected, actual reflect.Value, visited map[visit]bool, path []string, logDifference diffLogger) { + defer func() { + // Fall back to the regular reflect.DeepEquals function. + if r := recover(); r != nil { + var e, a interface{} + if expected.IsValid() { + e = expected.Interface() + } + if actual.IsValid() { + a = actual.Interface() + } + + if !reflect.DeepEqual(e, a) { + logDifference(path, e, a) + } + } + }() + + if !expected.IsValid() && actual.IsValid() { + logDifference(path, nil, actual.Interface()) + return + } + if expected.IsValid() && !actual.IsValid() { + logDifference(path, expected.Interface(), nil) + return + } + if !expected.IsValid() && !actual.IsValid() { + return + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if expected.CanAddr() && actual.CanAddr() && hard(expected.Kind()) { + addr1 := expected.UnsafeAddr() + addr2 := actual.UnsafeAddr() + + if addr1 > addr2 { + addr1, addr2 = addr2, addr1 + } + + if addr1 == addr2 { + // References are identical. We can short-circuit + return + } + + typ := expected.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + // Already visited. + return + } + + // Remember this visit for later. + visited[v] = true + } + + switch expected.Kind() { + case reflect.Array: + for i := 0; i < expected.Len(); i++ { + hop := append(path, fmt.Sprintf("[%d]", i)) + deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference) + } + return + case reflect.Slice: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + return + } + if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() { + return + } + for i := 0; i < expected.Len(); i++ { + hop := append(path, fmt.Sprintf("[%d]", i)) + deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference) + } + return + case reflect.Interface: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + return + } + deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference) + return + case reflect.Ptr: + deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference) + return + case reflect.Struct: + for i, n := 0, expected.NumField(); i < n; i++ { + field := expected.Type().Field(i) + hop := append(path, "."+field.Name) + deepDiffEqual(expected.Field(i), actual.Field(i), visited, hop, logDifference) + } + return + case reflect.Map: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + return + } + if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() { + return + } + + var keys []reflect.Value + if expected.Len() >= actual.Len() { + keys = expected.MapKeys() + } else { + keys = actual.MapKeys() + } + + for _, k := range keys { + expectedValue := expected.MapIndex(k) + actualValue := expected.MapIndex(k) + + if !expectedValue.IsValid() { + logDifference(path, nil, actual.Interface()) + return + } + if !actualValue.IsValid() { + logDifference(path, expected.Interface(), nil) + return + } + + hop := append(path, fmt.Sprintf("[%v]", k)) + deepDiffEqual(expectedValue, actualValue, visited, hop, logDifference) + } + return + case reflect.Func: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + } + return + default: + if expected.Interface() != actual.Interface() { + logDifference(path, expected.Interface(), actual.Interface()) + } + } +} + +func deepDiff(expected, actual interface{}, logDifference diffLogger) { + if expected == nil || actual == nil { + logDifference([]string{}, expected, actual) + return + } + + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + + if expectedValue.Type() != actualValue.Type() { + logDifference([]string{}, expected, actual) + return + } + deepDiffEqual(expectedValue, actualValue, map[visit]bool{}, []string{}, logDifference) +} + +// AssertEquals compares two arbitrary values and performs a comparison. If the +// comparison fails, a fatal error is raised that will fail the test +func AssertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + logFatal(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual))) + } +} + +// CheckEquals is similar to AssertEquals, except with a non-fatal error +func CheckEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + logError(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual))) + } +} + +// AssertDeepEquals - like Equals - performs a comparison - but on more complex +// structures that requires deeper inspection +func AssertDeepEquals(t *testing.T, expected, actual interface{}) { + pre := prefix(2) + + differed := false + deepDiff(expected, actual, func(path []string, expected, actual interface{}) { + differed = true + t.Errorf("\033[1;31m%sat %s expected %s, but got %s\033[0m", + pre, + strings.Join(path, ""), + green(expected), + yellow(actual)) + }) + if differed { + logFatal(t, "The structures were different.") + } +} + +// CheckDeepEquals is similar to AssertDeepEquals, except with a non-fatal error +func CheckDeepEquals(t *testing.T, expected, actual interface{}) { + pre := prefix(2) + + deepDiff(expected, actual, func(path []string, expected, actual interface{}) { + t.Errorf("\033[1;31m%s at %s expected %s, but got %s\033[0m", + pre, + strings.Join(path, ""), + green(expected), + yellow(actual)) + }) +} + +// isJSONEquals is a utility function that implements JSON comparison for AssertJSONEquals and +// CheckJSONEquals. +func isJSONEquals(t *testing.T, expectedJSON string, actual interface{}) bool { + var parsedExpected, parsedActual interface{} + err := json.Unmarshal([]byte(expectedJSON), &parsedExpected) + if err != nil { + t.Errorf("Unable to parse expected value as JSON: %v", err) + return false + } + + jsonActual, err := json.Marshal(actual) + AssertNoErr(t, err) + err = json.Unmarshal(jsonActual, &parsedActual) + AssertNoErr(t, err) + + if !reflect.DeepEqual(parsedExpected, parsedActual) { + prettyExpected, err := json.MarshalIndent(parsedExpected, "", " ") + if err != nil { + t.Logf("Unable to pretty-print expected JSON: %v\n%s", err, expectedJSON) + } else { + // We can't use green() here because %#v prints prettyExpected as a byte array literal, which + // is... unhelpful. Converting it to a string first leaves "\n" uninterpreted for some reason. + t.Logf("Expected JSON:\n%s%s%s", greenCode, prettyExpected, resetCode) + } + + prettyActual, err := json.MarshalIndent(actual, "", " ") + if err != nil { + t.Logf("Unable to pretty-print actual JSON: %v\n%#v", err, actual) + } else { + // We can't use yellow() for the same reason. + t.Logf("Actual JSON:\n%s%s%s", yellowCode, prettyActual, resetCode) + } + + return false + } + return true +} + +// AssertJSONEquals serializes a value as JSON, parses an expected string as JSON, and ensures that +// both are consistent. If they aren't, the expected and actual structures are pretty-printed and +// shown for comparison. +// +// This is useful for comparing structures that are built as nested map[string]interface{} values, +// which are a pain to construct as literals. +func AssertJSONEquals(t *testing.T, expectedJSON string, actual interface{}) { + if !isJSONEquals(t, expectedJSON, actual) { + logFatal(t, "The generated JSON structure differed.") + } +} + +// CheckJSONEquals is similar to AssertJSONEquals, but nonfatal. +func CheckJSONEquals(t *testing.T, expectedJSON string, actual interface{}) { + if !isJSONEquals(t, expectedJSON, actual) { + logError(t, "The generated JSON structure differed.") + } +} + +// AssertNoErr is a convenience function for checking whether an error value is +// an actual error +func AssertNoErr(t *testing.T, e error) { + if e != nil { + logFatal(t, fmt.Sprintf("unexpected error %s", yellow(e.Error()))) + } +} + +// CheckNoErr is similar to AssertNoErr, except with a non-fatal error +func CheckNoErr(t *testing.T, e error) { + if e != nil { + logError(t, fmt.Sprintf("unexpected error %s", yellow(e.Error()))) + } +} diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/doc.go b/vendor/github.com/rackspace/gophercloud/testhelper/doc.go new file mode 100644 index 000000000..25b4dfebb --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/testhelper/doc.go @@ -0,0 +1,4 @@ +/* +Package testhelper container methods that are useful for writing unit tests. +*/ +package testhelper diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/fixture/helper.go b/vendor/github.com/rackspace/gophercloud/testhelper/fixture/helper.go new file mode 100644 index 000000000..d54355d75 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/testhelper/fixture/helper.go @@ -0,0 +1,31 @@ +package fixture + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func SetupHandler(t *testing.T, url, method, requestBody, responseBody string, status int) { + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, method) + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + if requestBody != "" { + th.TestJSONRequest(t, r, requestBody) + } + + if responseBody != "" { + w.Header().Add("Content-Type", "application/json") + } + + w.WriteHeader(status) + + if responseBody != "" { + fmt.Fprintf(w, responseBody) + } + }) +} diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/http_responses.go b/vendor/github.com/rackspace/gophercloud/testhelper/http_responses.go new file mode 100644 index 000000000..e1f1f9ac0 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/testhelper/http_responses.go @@ -0,0 +1,91 @@ +package testhelper + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" +) + +var ( + // Mux is a multiplexer that can be used to register handlers. + Mux *http.ServeMux + + // Server is an in-memory HTTP server for testing. + Server *httptest.Server +) + +// SetupHTTP prepares the Mux and Server. +func SetupHTTP() { + Mux = http.NewServeMux() + Server = httptest.NewServer(Mux) +} + +// TeardownHTTP releases HTTP-related resources. +func TeardownHTTP() { + Server.Close() +} + +// Endpoint returns a fake endpoint that will actually target the Mux. +func Endpoint() string { + return Server.URL + "/" +} + +// TestFormValues ensures that all the URL parameters given to the http.Request are the same as values. +func TestFormValues(t *testing.T, r *http.Request, values map[string]string) { + want := url.Values{} + for k, v := range values { + want.Add(k, v) + } + + r.ParseForm() + if !reflect.DeepEqual(want, r.Form) { + t.Errorf("Request parameters = %v, want %v", r.Form, want) + } +} + +// TestMethod checks that the Request has the expected method (e.g. GET, POST). +func TestMethod(t *testing.T, r *http.Request, expected string) { + if expected != r.Method { + t.Errorf("Request method = %v, expected %v", r.Method, expected) + } +} + +// TestHeader checks that the header on the http.Request matches the expected value. +func TestHeader(t *testing.T, r *http.Request, header string, expected string) { + if actual := r.Header.Get(header); expected != actual { + t.Errorf("Header %s = %s, expected %s", header, actual, expected) + } +} + +// TestBody verifies that the request body matches an expected body. +func TestBody(t *testing.T, r *http.Request, expected string) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Unable to read body: %v", err) + } + str := string(b) + if expected != str { + t.Errorf("Body = %s, expected %s", str, expected) + } +} + +// TestJSONRequest verifies that the JSON payload of a request matches an expected structure, without asserting things about +// whitespace or ordering. +func TestJSONRequest(t *testing.T, r *http.Request, expected string) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Unable to read request body: %v", err) + } + + var actualJSON interface{} + err = json.Unmarshal(b, &actualJSON) + if err != nil { + t.Errorf("Unable to parse request body as JSON: %v", err) + } + + CheckJSONEquals(t, expected, actualJSON) +} diff --git a/vendor/github.com/rackspace/gophercloud/util.go b/vendor/github.com/rackspace/gophercloud/util.go new file mode 100644 index 000000000..3d6a4e490 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/util.go @@ -0,0 +1,82 @@ +package gophercloud + +import ( + "errors" + "net/url" + "path/filepath" + "strings" + "time" +) + +// WaitFor polls a predicate function, once per second, up to a timeout limit. +// It usually does this to wait for a resource to transition to a certain state. +// Resource packages will wrap this in a more convenient function that's +// specific to a certain resource, but it can also be useful on its own. +func WaitFor(timeout int, predicate func() (bool, error)) error { + start := time.Now().Second() + for { + // Force a 1s sleep + time.Sleep(1 * time.Second) + + // If a timeout is set, and that's been exceeded, shut it down + if timeout >= 0 && time.Now().Second()-start >= timeout { + return errors.New("A timeout occurred") + } + + // Execute the function + satisfied, err := predicate() + if err != nil { + return err + } + if satisfied { + return nil + } + } +} + +// NormalizeURL is an internal function to be used by provider clients. +// +// It ensures that each endpoint URL has a closing `/`, as expected by +// ServiceClient's methods. +func NormalizeURL(url string) string { + if !strings.HasSuffix(url, "/") { + return url + "/" + } + return url +} + +// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as +// a reference in the filesystem, if necessary. basePath is assumed to contain +// either '.' when first used, or the file:// type fqdn of the parent resource. +// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml +func NormalizePathURL(basePath, rawPath string) (string, error) { + u, err := url.Parse(rawPath) + if err != nil { + return "", err + } + // if a scheme is defined, it must be a fqdn already + if u.Scheme != "" { + return u.String(), nil + } + // if basePath is a url, then child resources are assumed to be relative to it + bu, err := url.Parse(basePath) + if err != nil { + return "", err + } + var basePathSys, absPathSys string + if bu.Scheme != "" { + basePathSys = filepath.FromSlash(bu.Path) + absPathSys = filepath.Join(basePathSys, rawPath) + bu.Path = filepath.ToSlash(absPathSys) + return bu.String(), nil + } + + absPathSys = filepath.Join(basePath, rawPath) + u.Path = filepath.ToSlash(absPathSys) + if err != nil { + return "", err + } + u.Scheme = "file" + return u.String(), nil + +} diff --git a/vendor/github.com/rackspace/gophercloud/util_test.go b/vendor/github.com/rackspace/gophercloud/util_test.go new file mode 100644 index 000000000..dcec77f24 --- /dev/null +++ b/vendor/github.com/rackspace/gophercloud/util_test.go @@ -0,0 +1,85 @@ +package gophercloud + +import ( + "os" + "path/filepath" + "strings" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestWaitFor(t *testing.T) { + err := WaitFor(5, func() (bool, error) { + return true, nil + }) + th.CheckNoErr(t, err) +} + +func TestNormalizeURL(t *testing.T) { + urls := []string{ + "NoSlashAtEnd", + "SlashAtEnd/", + } + expected := []string{ + "NoSlashAtEnd/", + "SlashAtEnd/", + } + for i := 0; i < len(expected); i++ { + th.CheckEquals(t, expected[i], NormalizeURL(urls[i])) + } + +} + +func TestNormalizePathURL(t *testing.T) { + baseDir, _ := os.Getwd() + + rawPath := "template.yaml" + basePath, _ := filepath.Abs(".") + result, _ := NormalizePathURL(basePath, rawPath) + expected := strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "template.yaml"}, "/") + th.CheckEquals(t, expected, result) + + rawPath = "http://www.google.com" + basePath, _ = filepath.Abs(".") + result, _ = NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath, _ = filepath.Abs(".") + result, _ = NormalizePathURL(basePath, rawPath) + expected = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "very/nested/file.yaml"}, "/") + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath = "http://www.google.com" + result, _ = NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com/very/nested/file.yaml" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml/" + basePath = "http://www.google.com/" + result, _ = NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com/very/nested/file.yaml" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath = "http://www.google.com/even/more" + result, _ = NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com/even/more/very/nested/file.yaml" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more"}, "/") + result, _ = NormalizePathURL(basePath, rawPath) + expected = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more/very/nested/file.yaml"}, "/") + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml/" + basePath = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more"}, "/") + result, _ = NormalizePathURL(basePath, rawPath) + expected = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more/very/nested/file.yaml"}, "/") + th.CheckEquals(t, expected, result) + +} diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml new file mode 100644 index 000000000..2199ce904 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 +sudo: false +notifications: + email: false diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE new file mode 100644 index 000000000..6a1fb910d --- /dev/null +++ b/vendor/github.com/satori/go.uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2015 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md new file mode 100644 index 000000000..48d4937f4 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/README.md @@ -0,0 +1,66 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires any stable version of Go Programming Language. + +It is tested against following versions of Go: 1.0-1.5 + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2015 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/benchmarks_test.go b/vendor/github.com/satori/go.uuid/benchmarks_test.go new file mode 100644 index 000000000..9a85f7c6b --- /dev/null +++ b/vendor/github.com/satori/go.uuid/benchmarks_test.go @@ -0,0 +1,121 @@ +// Copyright (C) 2013-2014 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "testing" +) + +func BenchmarkFromBytes(b *testing.B) { + bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + for i := 0; i < b.N; i++ { + FromBytes(bytes) + } +} + +func BenchmarkFromString(b *testing.B) { + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkFromStringUrn(b *testing.B) { + s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkFromStringWithBrackets(b *testing.B) { + s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkNewV1(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV1() + } +} + +func BenchmarkNewV2(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV2(DomainPerson) + } +} + +func BenchmarkNewV3(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV3(NamespaceDNS, "www.example.com") + } +} + +func BenchmarkNewV4(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV4() + } +} + +func BenchmarkNewV5(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV5(NamespaceDNS, "www.example.com") + } +} + +func BenchmarkMarshalBinary(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.MarshalBinary() + } +} + +func BenchmarkMarshalText(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.MarshalText() + } +} + +func BenchmarkUnmarshalBinary(b *testing.B) { + bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + u := UUID{} + for i := 0; i < b.N; i++ { + u.UnmarshalBinary(bytes) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u := UUID{} + for i := 0; i < b.N; i++ { + u.UnmarshalText(bytes) + } +} + +func BenchmarkMarshalToString(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.String() + } +} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go new file mode 100644 index 000000000..03841d86e --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -0,0 +1,435 @@ +// Copyright (C) 2013-2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "net" + "os" + "sync" + "time" +) + +// UUID layout variants. +const ( + VariantNCS = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +// Used in string method conversion +const dash byte = '-' + +// UUID v1/v2 storage. +var ( + storageMutex sync.Mutex + storageOnce sync.Once + epochFunc = unixTimeFunc + clockSequence uint16 + lastTime uint64 + hardwareAddr [6]byte + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +func initClockSequence() { + buf := make([]byte, 2) + safeRandom(buf) + clockSequence = binary.BigEndian.Uint16(buf) +} + +func initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + safeRandom(hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + hardwareAddr[0] |= 0x01 +} + +func initStorage() { + initClockSequence() + initHardwareAddr() +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [16]byte + +// The nil UUID is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// And returns result of binary AND of two UUIDs. +func And(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] & u2[i] + } + return u +} + +// Or returns result of binary OR of two UUIDs. +func Or(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] | u2[i] + } + return u +} + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() uint { + switch { + case (u[8] & 0x80) == 0x00: + return VariantNCS + case (u[8]&0xc0)|0x80 == 0x80: + return VariantRFC4122 + case (u[8]&0xe0)|0xc0 == 0xc0: + return VariantMicrosoft + } + return VariantFuture +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = dash + hex.Encode(buf[9:13], u[4:6]) + buf[13] = dash + hex.Encode(buf[14:18], u[6:8]) + buf[18] = dash + hex.Encode(buf[19:23], u[8:10]) + buf[23] = dash + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits as described in RFC 4122. +func (u *UUID) SetVariant() { + u[8] = (u[8] & 0xbf) | 0x80 +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +func (u *UUID) UnmarshalText(text []byte) (err error) { + if len(text) < 32 { + err = fmt.Errorf("uuid: invalid UUID string: %s", text) + return + } + + if bytes.Equal(text[:9], urnPrefix) { + text = text[9:] + } else if text[0] == '{' { + text = text[1:] + } + + b := u[:] + + for _, byteGroup := range byteGroups { + if text[0] == '-' { + text = text[1:] + } + + _, err = hex.Decode(b[:byteGroup/2], text[:byteGroup]) + + if err != nil { + return + } + + text = text[byteGroup:] + b = b[byteGroup/2:] + } + + return +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != 16 { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == 16 { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func getStorage() (uint64, uint16, []byte) { + storageOnce.Do(initStorage) + + storageMutex.Lock() + defer storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= lastTime { + clockSequence++ + } + lastTime = timeNow + + return timeNow, clockSequence, hardwareAddr[:] +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(1) + u.SetVariant() + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(2) + u.SetVariant() + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(3) + u.SetVariant() + + return u +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + u := UUID{} + safeRandom(u[:]) + u.SetVersion(4) + u.SetVariant() + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(5) + u.SetVariant() + + return u +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/github.com/satori/go.uuid/uuid_test.go b/vendor/github.com/satori/go.uuid/uuid_test.go new file mode 100644 index 000000000..a50150315 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid_test.go @@ -0,0 +1,508 @@ +// Copyright (C) 2013, 2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "testing" +) + +func TestBytes(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + if !bytes.Equal(u.Bytes(), bytes1) { + t.Errorf("Incorrect bytes representation for UUID: %s", u) + } +} + +func TestString(t *testing.T) { + if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" { + t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String()) + } +} + +func TestEqual(t *testing.T) { + if !Equal(NamespaceDNS, NamespaceDNS) { + t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS) + } + + if Equal(NamespaceDNS, NamespaceURL) { + t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL) + } +} + +func TestOr(t *testing.T) { + u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} + u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + if !Equal(u, Or(u1, u2)) { + t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2)) + } +} + +func TestAnd(t *testing.T) { + u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} + u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if !Equal(u, And(u1, u2)) { + t.Errorf("Incorrect bitwise AND result %s", And(u1, u2)) + } +} + +func TestVersion(t *testing.T) { + u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u.Version() != 1 { + t.Errorf("Incorrect version for UUID: %d", u.Version()) + } +} + +func TestSetVersion(t *testing.T) { + u := UUID{} + u.SetVersion(4) + + if u.Version() != 4 { + t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version()) + } +} + +func TestVariant(t *testing.T) { + u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u1.Variant() != VariantNCS { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant()) + } + + u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u2.Variant() != VariantRFC4122 { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant()) + } + + u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u3.Variant() != VariantMicrosoft { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant()) + } + + u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u4.Variant() != VariantFuture { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant()) + } +} + +func TestSetVariant(t *testing.T) { + u := new(UUID) + u.SetVariant() + + if u.Variant() != VariantRFC4122 { + t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant()) + } +} + +func TestFromBytes(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1, err := FromBytes(b1) + if err != nil { + t.Errorf("Error parsing UUID from bytes: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + + _, err = FromBytes(b2) + if err == nil { + t.Errorf("Should return error parsing from empty byte slice, got %s", err) + } +} + +func TestMarshalBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + b2, err := u.MarshalBinary() + if err != nil { + t.Errorf("Error marshaling UUID: %s", err) + } + + if !bytes.Equal(b1, b2) { + t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) + } +} + +func TestUnmarshalBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.UnmarshalBinary(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + u2 := UUID{} + + err = u2.UnmarshalBinary(b2) + if err == nil { + t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) + } +} + +func TestFromString(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + _, err := FromString("") + if err == nil { + t.Errorf("Should return error trying to parse empty string, got %s", err) + } + + u1, err := FromString(s1) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + u2, err := FromString(s2) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u2) { + t.Errorf("UUIDs should be equal: %s and %s", u, u2) + } + + u3, err := FromString(s3) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u3) { + t.Errorf("UUIDs should be equal: %s and %s", u, u3) + } +} + +func TestFromStringOrNil(t *testing.T) { + u := FromStringOrNil("") + if u != Nil { + t.Errorf("Should return Nil UUID on parse failure, got %s", u) + } +} + +func TestFromBytesOrNil(t *testing.T) { + b := []byte{} + u := FromBytesOrNil(b) + if u != Nil { + t.Errorf("Should return Nil UUID on parse failure, got %s", u) + } +} + +func TestMarshalText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + b2, err := u.MarshalText() + if err != nil { + t.Errorf("Error marshaling UUID: %s", err) + } + + if !bytes.Equal(b1, b2) { + t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) + } +} + +func TestUnmarshalText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.UnmarshalText(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte("") + u2 := UUID{} + + err = u2.UnmarshalText(b2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestValue(t *testing.T) { + u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + val, err := u.Value() + if err != nil { + t.Errorf("Error getting UUID value: %s", err) + } + + if val != u.String() { + t.Errorf("Wrong value returned, should be equal: %s and %s", val, u) + } +} + +func TestScanBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.Scan(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + u2 := UUID{} + + err = u2.Scan(b2) + if err == nil { + t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) + } +} + +func TestScanString(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + u1 := UUID{} + err := u1.Scan(s1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + s2 := "" + u2 := UUID{} + + err = u2.Scan(s2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestScanText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.Scan(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte("") + u2 := UUID{} + + err = u2.Scan(b2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestScanUnsupported(t *testing.T) { + u := UUID{} + + err := u.Scan(true) + if err == nil { + t.Errorf("Should return error trying to unmarshal from bool") + } +} + +func TestNewV1(t *testing.T) { + u := NewV1() + + if u.Version() != 1 { + t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant()) + } + + u1 := NewV1() + u2 := NewV1() + + if Equal(u1, u2) { + t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2) + } + + oldFunc := epochFunc + epochFunc = func() uint64 { return 0 } + + u3 := NewV1() + u4 := NewV1() + + if Equal(u3, u4) { + t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4) + } + + epochFunc = oldFunc +} + +func TestNewV2(t *testing.T) { + u1 := NewV2(DomainPerson) + + if u1.Version() != 2 { + t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version()) + } + + if u1.Variant() != VariantRFC4122 { + t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant()) + } + + u2 := NewV2(DomainGroup) + + if u2.Version() != 2 { + t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version()) + } + + if u2.Variant() != VariantRFC4122 { + t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant()) + } +} + +func TestNewV3(t *testing.T) { + u := NewV3(NamespaceDNS, "www.example.com") + + if u.Version() != 3 { + t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant()) + } + + if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" { + t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) + } + + u = NewV3(NamespaceDNS, "python.org") + + if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" { + t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) + } + + u1 := NewV3(NamespaceDNS, "golang.org") + u2 := NewV3(NamespaceDNS, "golang.org") + if !Equal(u1, u2) { + t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2) + } + + u3 := NewV3(NamespaceDNS, "example.com") + if Equal(u1, u3) { + t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) + } + + u4 := NewV3(NamespaceURL, "golang.org") + if Equal(u1, u4) { + t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) + } +} + +func TestNewV4(t *testing.T) { + u := NewV4() + + if u.Version() != 4 { + t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant()) + } +} + +func TestNewV5(t *testing.T) { + u := NewV5(NamespaceDNS, "www.example.com") + + if u.Version() != 5 { + t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant()) + } + + u = NewV5(NamespaceDNS, "python.org") + + if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" { + t.Errorf("UUIDv5 generated incorrectly: %s", u.String()) + } + + u1 := NewV5(NamespaceDNS, "golang.org") + u2 := NewV5(NamespaceDNS, "golang.org") + if !Equal(u1, u2) { + t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2) + } + + u3 := NewV5(NamespaceDNS, "example.com") + if Equal(u1, u3) { + t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) + } + + u4 := NewV5(NamespaceURL, "golang.org") + if Equal(u1, u4) { + t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) + } +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/.gitignore b/vendor/github.com/soniah/dnsmadeeasy/.gitignore new file mode 100644 index 000000000..b87271076 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# vim +.*.sw* +.sw* diff --git a/vendor/github.com/soniah/dnsmadeeasy/.travis.yml b/vendor/github.com/soniah/dnsmadeeasy/.travis.yml new file mode 100644 index 000000000..9438e76cf --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: +- 1.1 +- 1.2 +- 1.3 + +install: +- go get github.com/motain/gocheck +- go get github.com/soniah/dnsmadeeasy + +script: +- go test diff --git a/vendor/github.com/soniah/dnsmadeeasy/AUTHORS.md b/vendor/github.com/soniah/dnsmadeeasy/AUTHORS.md new file mode 100644 index 000000000..e44b90c94 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/AUTHORS.md @@ -0,0 +1,6 @@ +# DNSMadeEasy authors + +* Sonia Hamilton (@soniah) +* Jack Pearkes (@jpearkes) + +In alphabetical order of surname. In vim: highlight, then `!sort -k3`. diff --git a/vendor/github.com/soniah/dnsmadeeasy/LICENSE b/vendor/github.com/soniah/dnsmadeeasy/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/soniah/dnsmadeeasy/README.md b/vendor/github.com/soniah/dnsmadeeasy/README.md new file mode 100644 index 000000000..b37d3dfc0 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/README.md @@ -0,0 +1,33 @@ +# dnsmadeeasy + +[![Build +Status](https://travis-ci.org/soniah/dnsmadeeasy.svg?branch=master)](https://travis-ci.org/soniah/dnsmadeeasy) +[![Coverage](http://gocover.io/_badge/github.com/soniah/dnsmadeeasy)](http://gocover.io/github.com/soniah/dnsmadeeasy) +[![GoDoc](https://godoc.org/github.com/soniah/dnsmadeeasy?status.png)](http://godoc.org/github.com/soniah/dnsmadeeasy) +https://github.com/soniah/dnsmadeeasy v1.1 + +This package provides the `dnsmadeeasy` package which offers an +interface to the [DNSMadeEasy](http://www.dnsmadeeasy.com/) API. + +It doesn't have full API coverage, and only implements specific +endpoints, as it is designed for use with +[Terraform](https://github.com/hashicorp/terraform). + +**For those reasons, I recommend looking elsewhere if you just need +a standard DNSMadeEasy API client.** + +Sonia Hamilton, sonia@snowfrog.net, http://blog.snowfrog.net. + +## Documentation + +The full documentation is available on [Godoc](http://godoc.org/github.com/soniah/dnsmadeeasy) + +## Related Projects + +* https://github.com/soniah/terraform-provider-dme +* https://github.com/hashicorp/terraform +* https://github.com/pearkes/dnsimple + +## License + +See LICENSE. Copyright the DNSMadeEasy authors, see AUTHORS.md. diff --git a/vendor/github.com/soniah/dnsmadeeasy/api.go b/vendor/github.com/soniah/dnsmadeeasy/api.go new file mode 100644 index 000000000..ec5d244e4 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/api.go @@ -0,0 +1,144 @@ +package dnsmadeeasy + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +// SandboxURL is the URL of the DNS Made Easy Sandbox +const SandboxURL = "http://api.sandbox.dnsmadeeasy.com/V2.0" + +// Client provides a client to the dnsmadeeasy API +type Client struct { + // API Key + AKey string + + // Secret Key + SKey string + + // URL to the API to use + URL string + + // HttpClient is the client to use. Default will be + // used if not provided. + HTTP *http.Client +} + +// Body is the body of a request +type Body map[string]interface{} + +// Error is the error format that they return +// to us if there is a problem +type Error struct { + Errors []string `json:"error"` +} + +// Join joins all the errors together, separated by spaces. +func (d *Error) Join() string { + return strings.Join(d.Errors, " ") +} + +// NewClient returns a new dnsmadeeasy client. It requires an API key and +// secret key. You can generate them by visiting the Config, Account +// Information section of the dnsmadeeasy control panel for your account. +func NewClient(akey string, skey string) (*Client, error) { + client := Client{ + AKey: akey, + SKey: skey, + URL: "https://api.dnsmadeeasy.com/V2.0", + HTTP: http.DefaultClient, + } + return &client, nil +} + +// NewRequest creates a new request with the params +func (c *Client) NewRequest(method, path string, body *bytes.Buffer, + requestDate string) (*http.Request, error) { + + url, err := url.Parse(c.URL + path) + if err != nil { + return nil, fmt.Errorf("Error parsing base URL: %s", err) + } + + // Build the request + req, err := http.NewRequest(method, url.String(), body) + if err != nil { + return nil, fmt.Errorf("Error creating request: %s", err) + } + + // Calculate the hexadecimal HMAC SHA1 of requestDate using sKey + key := []byte(c.SKey) + h := hmac.New(sha1.New, key) + if len(requestDate) == 0 { + requestDate = time.Now().UTC().Format(http.TimeFormat) + } + h.Write([]byte(requestDate)) + hmacString := hex.EncodeToString(h.Sum(nil)) + + // Add the authorization header + req.Header.Add("X-Dnsme-Apikey", c.AKey) + req.Header.Add("X-Dnsme-Requestdate", requestDate) + req.Header.Add("X-Dnsme-Hmac", hmacString) + req.Header.Add("Accept", "application/json") + + // If it's a not a get, add a content-type + if method != "GET" { + req.Header.Add("Content-Type", "application/json") + } + return req, nil +} + +// parseError is used to take an error json resp +// and return a single string for use in error messages +func parseError(resp *http.Response) error { + dnsError := Error{} + err := decodeBody(resp, &dnsError) + + // if there was an error decoding the body, just return that + if err != nil { + return fmt.Errorf("Error parsing error body for non-200 request: %s", err) + } + return fmt.Errorf("API Error (%d): %s", resp.StatusCode, dnsError.Join()) +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + if err = json.Unmarshal(body, &out); err != nil { + return err + } + + return nil +} + +// checkResp wraps http.Client.Do() and verifies that the +// request was successful. A non-200 request returns an error +// formatted to included any validation problems or otherwise +func checkResp(resp *http.Response, err error) (*http.Response, error) { + // If the err is already there, there was an error higher + // up the chain, so just return that + if err != nil { + return resp, err + } + + if resp.StatusCode/100 == 2 { + return resp, nil + } else if resp.StatusCode == 404 { + return nil, fmt.Errorf("Not found") + } + return nil, fmt.Errorf("API Error: %s", resp.Status) +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/api_test.go b/vendor/github.com/soniah/dnsmadeeasy/api_test.go new file mode 100644 index 000000000..9521e5fa0 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/api_test.go @@ -0,0 +1,79 @@ +package dnsmadeeasy + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "testing" +) + +func makeClient(t *testing.T) *Client { + client, err := NewClient("aaaaaa1a-11a1-1aa1-a101-11a1a11aa1aa", + "11a0a11a-a1a1-111a-a11a-a11110a11111") + + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.AKey != "aaaaaa1a-11a1-1aa1-a101-11a1a11aa1aa" { + t.Fatalf("api key not set on client: %s", client.AKey) + } + + if client.SKey != "11a0a11a-a1a1-111a-a11a-a11110a11111" { + t.Fatalf("secret key not set on client: %s", client.SKey) + } + + return client +} + +func TestClient_NewRequest(t *testing.T) { + c := makeClient(t) + + body := bytes.NewBuffer(nil) + req, err := c.NewRequest("POST", "/bar", body, "Thu, 04 Dec 2014 11:02:57 GMT") + if err != nil { + t.Fatalf("bad: %v", err) + } + + if req.URL.String() != "https://api.dnsmadeeasy.com/V2.0/bar" { + t.Fatalf("bad base url: %v", req.URL.String()) + } + + if req.Header.Get("X-Dnsme-Apikey") != "aaaaaa1a-11a1-1aa1-a101-11a1a11aa1aa" { + t.Fatalf("bad auth header: %v", req.Header) + } + + if req.Header.Get("X-Dnsme-Requestdate") != "Thu, 04 Dec 2014 11:02:57 GMT" { + t.Fatalf("bad auth header: %v", req.Header) + } + + if req.Header.Get("X-Dnsme-Hmac") != "7a8c517d5eab84e524a537ce3a73e565cabf8f6a" { + t.Fatalf("bad auth header: %v", req.Header) + } + + if req.Method != "POST" { + t.Fatalf("bad method: %v", req.Method) + } +} + +type ClosingBuffer struct { + *bytes.Buffer +} + +func (cb *ClosingBuffer) Close() error { + return nil +} + +var errExample = &http.Response{ + Body: &ClosingBuffer{bytes.NewBufferString(`{"error":["Record with this type (A), name (test), and value (1.1.1.9) already exists."]}`)}, +} + +func Test_ParseError(t *testing.T) { + should := errors.New("API Error (0): Record with this type (A), name (test), and value (1.1.1.9) already exists.") + actual := parseError(errExample) + + if fmt.Sprintf("%v", should) != fmt.Sprintf("%v", actual) { + t.Fatalf("parseError\nshould: |%v|\nactual: |%v|\n", should, actual) + } +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/examples/README.md b/vendor/github.com/soniah/dnsmadeeasy/examples/README.md new file mode 100644 index 000000000..2b2cbc7bb --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/examples/README.md @@ -0,0 +1,8 @@ +# Examples + +These are examples of using *dnsmadeeasy*. Parameters are passed by +environment variables. For example: + +`% export DME_AKEY=aaaaaa1a-11a1-1aa1-a101-11a1a11aa1aa \ +DME_SKEY=11a0a11a-a1a1-111a-a11a-a11110a11111 DME_DOMAINID=123456 +% DME_IP=1.1.1.1 go run create.go` diff --git a/vendor/github.com/soniah/dnsmadeeasy/examples/create.go b/vendor/github.com/soniah/dnsmadeeasy/examples/create.go new file mode 100644 index 000000000..3d439a4e2 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/examples/create.go @@ -0,0 +1,45 @@ +package main + +import ( + "fmt" + dme "github.com/soniah/dnsmadeeasy" + "log" + "os" +) + +func main() { + akey := os.Getenv("DME_AKEY") + skey := os.Getenv("DME_SKEY") + domainID := os.Getenv("DME_DOMAINID") + ip := os.Getenv("DME_IP") + + fmt.Println("Using these values:") + fmt.Println("akey:", akey) + fmt.Println("skey:", skey) + fmt.Println("domainid:", domainID) + fmt.Println("ip:", ip) + + if len(akey) == 0 || len(skey) == 0 || len(domainID) == 0 || len(ip) == 0 { + log.Fatalf("Environment variable(s) not set\n") + } + + client, err := dme.NewClient(akey, skey) + if err != nil { + log.Fatalf("err: %v", err) + } + client.URL = dme.SandboxURL + + cr := map[string]interface{}{ + "name": "test", + "type": "A", + "value": ip, + "ttl": 86400, + } + + result, err2 := client.CreateRecord(domainID, cr) + if err2 != nil { + log.Fatalf("Result: '%s' Error: %s", result, err2) + } + + log.Printf("Result: '%s'", result) +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/examples/delete.go b/vendor/github.com/soniah/dnsmadeeasy/examples/delete.go new file mode 100644 index 000000000..e6c04a203 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/examples/delete.go @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + dme "github.com/soniah/dnsmadeeasy" + "log" + "os" +) + +func main() { + akey := os.Getenv("DME_AKEY") + skey := os.Getenv("DME_SKEY") + domainID := os.Getenv("DME_DOMAINID") + recordID := os.Getenv("DME_RECORDID") + + fmt.Println("Using these values:") + fmt.Println("akey:", akey) + fmt.Println("skey:", skey) + fmt.Println("domainid:", domainID) + fmt.Println("recordid:", recordID) + + if len(akey) == 0 || len(skey) == 0 || len(domainID) == 0 || len(recordID) == 0 { + log.Fatalf("Environment variable(s) not set\n") + } + + client, err := dme.NewClient(akey, skey) + client.URL = dme.SandboxURL + if err != nil { + log.Fatalf("err: %v", err) + } + + err2 := client.DeleteRecord(domainID, recordID) + if err2 != nil { + log.Fatalf("DeleteRecord result: %v", err2) + } + log.Print("Destroyed.") +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/examples/read.go b/vendor/github.com/soniah/dnsmadeeasy/examples/read.go new file mode 100644 index 000000000..26dc98068 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/examples/read.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + dme "github.com/soniah/dnsmadeeasy" + "log" + "os" +) + +func main() { + akey := os.Getenv("DME_AKEY") + skey := os.Getenv("DME_SKEY") + domainID := os.Getenv("DME_DOMAINID") + recordID := os.Getenv("DME_RECORDID") + + fmt.Println("Using these values:") + fmt.Println("akey:", akey) + fmt.Println("skey:", skey) + fmt.Println("domainid:", domainID) + fmt.Println("recordid:", recordID) + + if len(akey) == 0 || len(skey) == 0 || len(domainID) == 0 || len(recordID) == 0 { + log.Fatalf("Environment variable(s) not set\n") + } + + client, err := dme.NewClient(akey, skey) + client.URL = dme.SandboxURL + if err != nil { + log.Fatalf("err: %v", err) + } + + result, err2 := client.ReadRecord(domainID, recordID) + if err2 != nil { + log.Fatalf("ReadRecord result: %v error %v", result, err2) + } + + log.Printf("Result: %#v", *result) +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/examples/update.go b/vendor/github.com/soniah/dnsmadeeasy/examples/update.go new file mode 100644 index 000000000..1eaef3ba2 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/examples/update.go @@ -0,0 +1,41 @@ +package main + +import ( + "fmt" + dme "github.com/soniah/dnsmadeeasy" + "log" + "os" +) + +func main() { + akey := os.Getenv("DME_AKEY") + skey := os.Getenv("DME_SKEY") + domainID := os.Getenv("DME_DOMAINID") + recordID := os.Getenv("DME_RECORDID") + + fmt.Println("Using these values:") + fmt.Println("akey:", akey) + fmt.Println("skey:", skey) + fmt.Println("domainid:", domainID) + fmt.Println("recordid:", recordID) + + if len(akey) == 0 || len(skey) == 0 || len(domainID) == 0 || len(recordID) == 0 { + log.Fatalf("Environment variable(s) not set\n") + } + + client, err := dme.NewClient(akey, skey) + client.URL = dme.SandboxURL + if err != nil { + log.Fatalf("err: %v", err) + } + + cr := map[string]interface{}{ + "name": "test-update", + } + + result, err2 := client.UpdateRecord(domainID, recordID, cr) + if err2 != nil { + log.Fatalf("UpdateRecord result: %v error %v", result, err2) + } + log.Print("Result: ", result) +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/record.go b/vendor/github.com/soniah/dnsmadeeasy/record.go new file mode 100644 index 000000000..d4224a0ae --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/record.go @@ -0,0 +1,191 @@ +package dnsmadeeasy + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/imdario/mergo" + "strconv" +) + +// DataResponse is the response from a GET ie all records for +// a domainID +type DataResponse struct { + Data []Record `json:"data"` +} + +// Record is used to represent a retrieved Record. +type Record struct { + Name string `json:"name"` + Value string `json:"value"` + RecordID int64 `json:"id"` + Type string `json:"type"` + Source int64 `json:"source"` + SourceID int64 `json:"sourceId"` + DynamicDNS bool `json:"dynamicDns"` + Password string `json:"password"` + TTL int64 `json:"ttl"` + Monitor bool `json:"monitor"` + Failover bool `json:"failover"` + Failed bool `json:"failed"` + GtdLocation string `json:"gtdLocation"` + Description string `json:"description"` + Keywords string `json:"keywords"` + Title string `json:"title"` + HardLink bool `json:"hardLink"` + MXLevel int64 `json:"mxLevel"` + Weight int64 `json:"weight"` + Priority int64 `json:"priority"` + Port int64 `json:"port"` + RedirectType string `json:"redirectType"` +} + +// StringRecordID returns the record id as a string. +func (r *Record) StringRecordID() string { + return strconv.FormatInt(r.RecordID, 10) +} + +// ttl, err := strconv.ParseInt(opts.Ttl, 0, 0) + +type requestType int + +const ( + create requestType = iota + retrieve + update + destroy +) + +func (rt requestType) endpoint(domainID string, recordID string) (result string) { + switch rt { + case create, retrieve: + result = fmt.Sprintf("/dns/managed/%s/records/", domainID) + case update, destroy: + result = fmt.Sprintf("/dns/managed/%s/records/%s/", domainID, recordID) + } + return result +} + +// CRUD - Create, Read, Update, Delete + +// CreateRecord creates a DNS record on DNSMadeEasy +func (c *Client) CreateRecord(domainID string, cr map[string]interface{}) (string, error) { + + path := create.endpoint(domainID, "") + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(cr); err != nil { + return "", err + } + + req, err := c.NewRequest("POST", path, buf, "") + if err != nil { + return "", fmt.Errorf("Error from NewRequest: %s", err) + } + + resp, err := checkResp(c.HTTP.Do(req)) + if err != nil { + return "", fmt.Errorf("Error creating record: %s", err) + } + + record := new(Record) + + err = decodeBody(resp, &record) + if err != nil { + return "", fmt.Errorf("Error parsing record response: %s", err) + } + + // The request was successful + return record.StringRecordID(), nil +} + +// ReadRecord gets a record by the ID specified and returns a Record and an +// error. +func (c *Client) ReadRecord(domainID string, recordID string) (*Record, error) { + body := bytes.NewBuffer(nil) + path := retrieve.endpoint(domainID, recordID) + req, err := c.NewRequest("GET", path, body, "") + if err != nil { + return nil, err + } + + resp, err := checkResp(c.HTTP.Do(req)) + if err != nil { + return nil, fmt.Errorf("Error retrieving record: %s", err) + } + + dataResp := DataResponse{} + err = decodeBody(resp, &dataResp) + if err != nil { + return nil, fmt.Errorf("Error decoding data response: %s", err) + } + var result Record + var found bool + for _, record := range dataResp.Data { + if record.StringRecordID() == recordID { + result = record // not pointer, so data copied + found = true + break + } + } + + if !found { + return nil, fmt.Errorf("Unable to find record %s", recordID) + } + return &result, nil +} + +// UpdateRecord updated a record from the parameters specified and +// returns an error if it fails. +func (c *Client) UpdateRecord(domainID string, recordID string, cr map[string]interface{}) (string, error) { + + current, err := c.ReadRecord(domainID, recordID) + if err != nil { + return "", err + } + + err = mergo.Map(current, cr) + if err != nil { + return "", err + } + + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(current); err != nil { + return "", err + } + + path := update.endpoint(domainID, recordID) + req, err := c.NewRequest("PUT", path, buf, "") + if err != nil { + return "", err + } + + _, err = checkResp(c.HTTP.Do(req)) + if err != nil { + return "", fmt.Errorf("Error updating record: %s", err) + } + + // The request was successful + return recordID, nil +} + +// DeleteRecord destroys a record by the ID specified and +// returns an error if it fails. If no error is returned, +// the Record was succesfully destroyed. +func (c *Client) DeleteRecord(domainID string, recordID string) error { + body := bytes.NewBuffer(nil) + path := destroy.endpoint(domainID, recordID) + req, err := c.NewRequest("DELETE", path, body, "") + if err != nil { + return err + } + + _, err = checkResp(c.HTTP.Do(req)) + if err != nil { + return fmt.Errorf("Unable to find record %s", recordID) + } + + // The request was successful + return nil +} diff --git a/vendor/github.com/soniah/dnsmadeeasy/record_test.go b/vendor/github.com/soniah/dnsmadeeasy/record_test.go new file mode 100644 index 000000000..ea7d0eb19 --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/record_test.go @@ -0,0 +1,173 @@ +package dnsmadeeasy + +import ( + "fmt" + . "github.com/motain/gocheck" + "github.com/soniah/dnsmadeeasy/testutil" + "testing" +) + +func Test(t *testing.T) { + TestingT(t) +} + +type S struct { + client *Client +} + +var _ = Suite(&S{}) + +var testServer = testutil.NewHTTPServer() + +func (s *S) SetUpSuite(c *C) { + testServer.Start() + var err error + s.client, err = NewClient("aaaaaa1a-11a1-1aa1-a101-11a1a11aa1aa", + "11a0a11a-a1a1-111a-a11a-a11110a11111") + s.client.URL = "http://localhost:4444" + if err != nil { + panic(err) + } +} + +func (s *S) TearDownTest(c *C) { + testServer.Flush() +} + +func (s *S) Test_endpoint(c *C) { + c.Assert(create.endpoint("1", ""), Equals, "/dns/managed/1/records/") + c.Assert(retrieve.endpoint("1", ""), Equals, "/dns/managed/1/records/") + c.Assert(update.endpoint("1", "2"), Equals, "/dns/managed/1/records/2/") + c.Assert(destroy.endpoint("1", "2"), Equals, "/dns/managed/1/records/2/") +} + +func (s *S) Test_CreateRecordGood(c *C) { + testServer.Response(201, nil, recordCreate) + cr := map[string]interface{}{ + "Name": "test", + "Value": "1.1.1.1", + } + id, err := s.client.CreateRecord("870073", cr) + _ = testServer.WaitRequest() + c.Assert(err, IsNil) + c.Assert(id, Equals, "10022989") +} + +func (s *S) Test_CreateRecordBad(c *C) { + testServer.Response(404, nil, "") + cr := map[string]interface{}{ + "Name": "test", + "Value": "1.1.1.1", + } + _, err := s.client.CreateRecord("70073", cr) + _ = testServer.WaitRequest() + c.Assert(err, NotNil) +} + +func (s *S) Test_ReadRecordGood(c *C) { + testServer.Response(200, nil, recordRead) + record, err := s.client.ReadRecord("870073", "10039429") + _ = testServer.WaitRequest() + c.Assert(err, IsNil) + c.Assert(record.RecordID, Equals, int64(10039429)) +} + +func (s *S) Test_ReadRecordBad(c *C) { + testServer.Response(200, nil, recordRead) + record, err := s.client.ReadRecord("870073", "1003942") + _ = testServer.WaitRequest() + c.Assert(err, NotNil) + c.Assert(record, IsNil) + c.Assert(fmt.Sprintf("%s", err), Equals, "Unable to find record 1003942") +} + +func (s *S) Test_UpdateRecordGood(c *C) { + testServer.Response(200, nil, recordRead) + testServer.Response(200, nil, "") + cr := map[string]interface{}{ + "Name": "test-update", + } + recordID, err := s.client.UpdateRecord("870073", "10039429", cr) + _ = testServer.WaitRequest() + c.Assert(err, IsNil) + c.Assert(recordID, Equals, "10039429") +} + +func (s *S) Test_UpdateRecordBad(c *C) { + testServer.Response(200, nil, recordRead) + cr := map[string]interface{}{ + "Name": "test-update", + } + recordID, err := s.client.UpdateRecord("870073", "100394", cr) + _ = testServer.WaitRequest() + c.Assert(err, NotNil) + c.Assert(recordID, Equals, "") + c.Assert(fmt.Sprintf("%s", err), Equals, "Unable to find record 100394") +} + +func (s *S) Test_DeleteRecordGood(c *C) { + testServer.Response(200, nil, "") + err := s.client.DeleteRecord("870073", "10039429") + c.Assert(err, IsNil) +} + +func (s *S) Test_DeleteRecordBad(c *C) { + testServer.Response(404, nil, "") + err := s.client.DeleteRecord("870073", "100394") + c.Assert(err, NotNil) + c.Assert(fmt.Sprintf("%s", err), Equals, "Unable to find record 100394") +} + +var recordCreate = `{ + "name":"test", + "value":"1.1.1.1", + "id":10022989, + "type":"A", + "source":1, + "failover":false, + "monitor":false, + "sourceId":870073, + "dynamicDns":false, + "failed":false, + "gtdLocation":"DEFAULT", + "hardLink":false, + "ttl":86400 +}` + +var recordRead = `{ + "data":[ + { + "name":"test", + "value":"1.1.1.1", + "id":10039428, + "type":"A", + "source":1, + "failover":false, + "monitor":false, + "sourceId":870073, + "dynamicDns":false, + "failed":false, + "gtdLocation":"DEFAULT", + "ha rdLink":false, + "ttl":86400 + }, + { + "name":"test", + "value":"1.1.1.2", + "id":10039429, + "type":"A", + "source":1, + "failover":false, + "monitor":false, + "sourceId":870073, + "dynamicDns":false, + "failed":false, + "gtdLocation":"DEFAULT", + "ha rdLink":false, + "ttl":86400 + } + ], + "page":0, + "totalPages":1, + "totalRecords":2 +}` diff --git a/vendor/github.com/soniah/dnsmadeeasy/testutil/http.go b/vendor/github.com/soniah/dnsmadeeasy/testutil/http.go new file mode 100644 index 000000000..ccc570cdd --- /dev/null +++ b/vendor/github.com/soniah/dnsmadeeasy/testutil/http.go @@ -0,0 +1,180 @@ +package testutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" +) + +type HTTPServer struct { + URL string + Timeout time.Duration + started bool + request chan *http.Request + response chan ResponseFunc +} + +type Response struct { + Status int + Headers map[string]string + Body string +} + +var DefaultClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, +} + +func NewHTTPServer() *HTTPServer { + return &HTTPServer{URL: "http://localhost:4444", Timeout: 5 * time.Second} +} + +type ResponseFunc func(path string) Response + +func (s *HTTPServer) Start() { + if s.started { + return + } + s.started = true + s.request = make(chan *http.Request, 1024) + s.response = make(chan ResponseFunc, 1024) + u, err := url.Parse(s.URL) + if err != nil { + panic(err) + } + l, err := net.Listen("tcp", u.Host) + if err != nil { + panic(err) + } + go http.Serve(l, s) + + s.Response(203, nil, "") + for { + // Wait for it to be up. + resp, err := http.Get(s.URL) + if err == nil && resp.StatusCode == 203 { + break + } + time.Sleep(1e8) + } + s.WaitRequest() // Consume dummy request. +} + +// Flush discards all pending requests and responses. +func (s *HTTPServer) Flush() { + for { + select { + case <-s.request: + case <-s.response: + default: + return + } + } +} + +func body(req *http.Request) string { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + return string(data) +} + +func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.ParseMultipartForm(1e6) + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + s.request <- req + var resp Response + select { + case respFunc := <-s.response: + resp = respFunc(req.URL.Path) + case <-time.After(s.Timeout): + const msg = "ERROR: Timeout waiting for test to prepare a response\n" + fmt.Fprintf(os.Stderr, msg) + resp = Response{500, nil, msg} + } + if resp.Headers != nil { + h := w.Header() + for k, v := range resp.Headers { + h.Set(k, v) + } + } + if resp.Status != 0 { + w.WriteHeader(resp.Status) + } + w.Write([]byte(resp.Body)) +} + +// WaitRequests returns the next n requests made to the http server from +// the queue. If not enough requests were previously made, it waits until +// the timeout value for them to be made. +func (s *HTTPServer) WaitRequests(n int) []*http.Request { + reqs := make([]*http.Request, 0, n) + for i := 0; i < n; i++ { + select { + case req := <-s.request: + reqs = append(reqs, req) + case <-time.After(s.Timeout): + panic("Timeout waiting for request") + } + } + return reqs +} + +// WaitRequest returns the next request made to the http server from +// the queue. If no requests were previously made, it waits until the +// timeout value for one to be made. +func (s *HTTPServer) WaitRequest() *http.Request { + return s.WaitRequests(1)[0] +} + +// ResponseFunc prepares the test server to respond the following n +// requests using f to build each response. +func (s *HTTPServer) ResponseFunc(n int, f ResponseFunc) { + for i := 0; i < n; i++ { + s.response <- f + } +} + +// ResponseMap maps request paths to responses. +type ResponseMap map[string]Response + +// ResponseMap prepares the test server to respond the following n +// requests using the m to obtain the responses. +func (s *HTTPServer) ResponseMap(n int, m ResponseMap) { + f := func(path string) Response { + for rpath, resp := range m { + if rpath == path { + return resp + } + } + body := "Path not found in response map: " + path + return Response{Status: 500, Body: body} + } + s.ResponseFunc(n, f) +} + +// Responses prepares the test server to respond the following n requests +// using the provided response parameters. +func (s *HTTPServer) Responses(n int, status int, headers map[string]string, body string) { + f := func(path string) Response { + return Response{status, headers, body} + } + s.ResponseFunc(n, f) +} + +// Response prepares the test server to respond the following request +// using the provided response parameters. +func (s *HTTPServer) Response(status int, headers map[string]string, body string) { + s.Responses(1, status, headers, body) +} diff --git a/vendor/github.com/sthulb/mime/multipart/example_test.go b/vendor/github.com/sthulb/mime/multipart/example_test.go new file mode 100644 index 000000000..6d6ba81d5 --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/example_test.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package multipart_test + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "mime/multipart" + "net/mail" + "strings" +) + +func ExampleNewReader() { + msg := &mail.Message{ + Header: map[string][]string{ + "Content-Type": {"multipart/mixed; boundary=foo"}, + }, + Body: strings.NewReader( + "--foo\r\nFoo: one\r\n\r\nA section\r\n" + + "--foo\r\nFoo: two\r\n\r\nAnd another\r\n" + + "--foo--\r\n"), + } + mediaType, params, err := mime.ParseMediaType(msg.Header.Get("Content-Type")) + if err != nil { + log.Fatal(err) + } + if strings.HasPrefix(mediaType, "multipart/") { + mr := multipart.NewReader(msg.Body, params["boundary"]) + for { + p, err := mr.NextPart() + if err == io.EOF { + return + } + if err != nil { + log.Fatal(err) + } + slurp, err := ioutil.ReadAll(p) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Part %q: %q\n", p.Header.Get("Foo"), slurp) + } + } + + // Output: + // Part "one": "A section" + // Part "two": "And another" +} diff --git a/vendor/github.com/sthulb/mime/multipart/formdata.go b/vendor/github.com/sthulb/mime/multipart/formdata.go new file mode 100644 index 000000000..eee53fc8d --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/formdata.go @@ -0,0 +1,157 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package multipart + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net/textproto" + "os" +) + +// TODO(adg,bradfitz): find a way to unify the DoS-prevention strategy here +// with that of the http package's ParseForm. + +// ReadForm parses an entire multipart message whose parts have +// a Content-Disposition of "form-data". +// It stores up to maxMemory bytes of the file parts in memory +// and the remainder on disk in temporary files. +func (r *Reader) ReadForm(maxMemory int64) (f *Form, err error) { + form := &Form{make(map[string][]string), make(map[string][]*FileHeader)} + defer func() { + if err != nil { + form.RemoveAll() + } + }() + + maxValueBytes := int64(10 << 20) // 10 MB is a lot of text. + for { + p, err := r.NextPart() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + name := p.FormName() + if name == "" { + continue + } + filename := p.FileName() + + var b bytes.Buffer + + if filename == "" { + // value, store as string in memory + n, err := io.CopyN(&b, p, maxValueBytes) + if err != nil && err != io.EOF { + return nil, err + } + maxValueBytes -= n + if maxValueBytes == 0 { + return nil, errors.New("multipart: message too large") + } + form.Value[name] = append(form.Value[name], b.String()) + continue + } + + // file, store in memory or on disk + fh := &FileHeader{ + Filename: filename, + Header: p.Header, + } + n, err := io.CopyN(&b, p, maxMemory+1) + if err != nil && err != io.EOF { + return nil, err + } + if n > maxMemory { + // too big, write to disk and flush buffer + file, err := ioutil.TempFile("", "multipart-") + if err != nil { + return nil, err + } + defer file.Close() + _, err = io.Copy(file, io.MultiReader(&b, p)) + if err != nil { + os.Remove(file.Name()) + return nil, err + } + fh.tmpfile = file.Name() + } else { + fh.content = b.Bytes() + maxMemory -= n + } + form.File[name] = append(form.File[name], fh) + } + + return form, nil +} + +// Form is a parsed multipart form. +// Its File parts are stored either in memory or on disk, +// and are accessible via the *FileHeader's Open method. +// Its Value parts are stored as strings. +// Both are keyed by field name. +type Form struct { + Value map[string][]string + File map[string][]*FileHeader +} + +// RemoveAll removes any temporary files associated with a Form. +func (f *Form) RemoveAll() error { + var err error + for _, fhs := range f.File { + for _, fh := range fhs { + if fh.tmpfile != "" { + e := os.Remove(fh.tmpfile) + if e != nil && err == nil { + err = e + } + } + } + } + return err +} + +// A FileHeader describes a file part of a multipart request. +type FileHeader struct { + Filename string + Header textproto.MIMEHeader + + content []byte + tmpfile string +} + +// Open opens and returns the FileHeader's associated File. +func (fh *FileHeader) Open() (File, error) { + if b := fh.content; b != nil { + r := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) + return sectionReadCloser{r}, nil + } + return os.Open(fh.tmpfile) +} + +// File is an interface to access the file part of a multipart message. +// Its contents may be either stored in memory or on disk. +// If stored on disk, the File's underlying concrete type will be an *os.File. +type File interface { + io.Reader + io.ReaderAt + io.Seeker + io.Closer +} + +// helper types to turn a []byte into a File + +type sectionReadCloser struct { + *io.SectionReader +} + +func (rc sectionReadCloser) Close() error { + return nil +} diff --git a/vendor/github.com/sthulb/mime/multipart/formdata_test.go b/vendor/github.com/sthulb/mime/multipart/formdata_test.go new file mode 100644 index 000000000..6e2388baf --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/formdata_test.go @@ -0,0 +1,90 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package multipart + +import ( + "bytes" + "io" + "os" + "regexp" + "strings" + "testing" +) + +func TestReadForm(t *testing.T) { + testBody := regexp.MustCompile("\n").ReplaceAllString(message, "\r\n") + b := strings.NewReader(testBody) + r := NewReader(b, boundary) + f, err := r.ReadForm(25) + if err != nil { + t.Fatal("ReadForm:", err) + } + defer f.RemoveAll() + if g, e := f.Value["texta"][0], textaValue; g != e { + t.Errorf("texta value = %q, want %q", g, e) + } + if g, e := f.Value["textb"][0], textbValue; g != e { + t.Errorf("texta value = %q, want %q", g, e) + } + fd := testFile(t, f.File["filea"][0], "filea.txt", fileaContents) + if _, ok := fd.(*os.File); ok { + t.Error("file is *os.File, should not be") + } + fd.Close() + fd = testFile(t, f.File["fileb"][0], "fileb.txt", filebContents) + if _, ok := fd.(*os.File); !ok { + t.Errorf("file has unexpected underlying type %T", fd) + } + fd.Close() +} + +func testFile(t *testing.T, fh *FileHeader, efn, econtent string) File { + if fh.Filename != efn { + t.Errorf("filename = %q, want %q", fh.Filename, efn) + } + f, err := fh.Open() + if err != nil { + t.Fatal("opening file:", err) + } + b := new(bytes.Buffer) + _, err = io.Copy(b, f) + if err != nil { + t.Fatal("copying contents:", err) + } + if g := b.String(); g != econtent { + t.Errorf("contents = %q, want %q", g, econtent) + } + return f +} + +const ( + fileaContents = "This is a test file." + filebContents = "Another test file." + textaValue = "foo" + textbValue = "bar" + boundary = `MyBoundary` +) + +const message = ` +--MyBoundary +Content-Disposition: form-data; name="filea"; filename="filea.txt" +Content-Type: text/plain + +` + fileaContents + ` +--MyBoundary +Content-Disposition: form-data; name="fileb"; filename="fileb.txt" +Content-Type: text/plain + +` + filebContents + ` +--MyBoundary +Content-Disposition: form-data; name="texta" + +` + textaValue + ` +--MyBoundary +Content-Disposition: form-data; name="textb" + +` + textbValue + ` +--MyBoundary-- +` diff --git a/vendor/github.com/sthulb/mime/multipart/multipart.go b/vendor/github.com/sthulb/mime/multipart/multipart.go new file mode 100644 index 000000000..3b746a5e1 --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/multipart.go @@ -0,0 +1,388 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// + +/* +Package multipart implements MIME multipart parsing, as defined in RFC +2046. + +The implementation is sufficient for HTTP (RFC 2388) and the multipart +bodies generated by popular browsers. +*/ +package multipart + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/quotedprintable" + "net/textproto" +) + +var emptyParams = make(map[string]string) + +// This constant needs to be at least 76 for this package to work correctly. +// This is because \r\n--separator_of_len_70- would fill the buffer and it +// wouldn't be safe to consume a single byte from it. +const peekBufferSize = 4096 + +// A Part represents a single part in a multipart body. +type Part struct { + // The headers of the body, if any, with the keys canonicalized + // in the same fashion that the Go http.Request headers are. + // For example, "foo-bar" changes case to "Foo-Bar" + // + // As a special case, if the "Content-Transfer-Encoding" header + // has a value of "quoted-printable", that header is instead + // hidden from this map and the body is transparently decoded + // during Read calls. + Header textproto.MIMEHeader + + buffer *bytes.Buffer + mr *Reader + bytesRead int + + disposition string + dispositionParams map[string]string + + // r is either a reader directly reading from mr, or it's a + // wrapper around such a reader, decoding the + // Content-Transfer-Encoding + r io.Reader +} + +// FormName returns the name parameter if p has a Content-Disposition +// of type "form-data". Otherwise it returns the empty string. +func (p *Part) FormName() string { + // See http://tools.ietf.org/html/rfc2183 section 2 for EBNF + // of Content-Disposition value format. + if p.dispositionParams == nil { + p.parseContentDisposition() + } + if p.disposition != "form-data" { + return "" + } + return p.dispositionParams["name"] +} + +// FileName returns the filename parameter of the Part's +// Content-Disposition header. +func (p *Part) FileName() string { + if p.dispositionParams == nil { + p.parseContentDisposition() + } + return p.dispositionParams["filename"] +} + +func (p *Part) parseContentDisposition() { + v := p.Header.Get("Content-Disposition") + var err error + p.disposition, p.dispositionParams, err = mime.ParseMediaType(v) + if err != nil { + p.dispositionParams = emptyParams + } +} + +// NewReader creates a new multipart Reader reading from r using the +// given MIME boundary. +// +// The boundary is usually obtained from the "boundary" parameter of +// the message's "Content-Type" header. Use mime.ParseMediaType to +// parse such headers. +func NewReader(r io.Reader, boundary string) *Reader { + b := []byte("\r\n--" + boundary + "--") + return &Reader{ + bufReader: bufio.NewReaderSize(r, peekBufferSize), + nl: b[:2], + nlDashBoundary: b[:len(b)-2], + dashBoundaryDash: b[2:], + dashBoundary: b[2 : len(b)-2], + } +} + +func newPart(mr *Reader) (*Part, error) { + bp := &Part{ + Header: make(map[string][]string), + mr: mr, + buffer: new(bytes.Buffer), + } + if err := bp.populateHeaders(); err != nil { + return nil, err + } + bp.r = partReader{bp} + const cte = "Content-Transfer-Encoding" + if bp.Header.Get(cte) == "quoted-printable" { + bp.Header.Del(cte) + bp.r = quotedprintable.NewReader(bp.r) + } + return bp, nil +} + +func (bp *Part) populateHeaders() error { + r := textproto.NewReader(bp.mr.bufReader) + header, err := r.ReadMIMEHeader() + if err == nil { + bp.Header = header + } + return err +} + +// Read reads the body of a part, after its headers and before the +// next part (if any) begins. +func (p *Part) Read(d []byte) (n int, err error) { + return p.r.Read(d) +} + +// partReader implements io.Reader by reading raw bytes directly from the +// wrapped *Part, without doing any Transfer-Encoding decoding. +type partReader struct { + p *Part +} + +func (pr partReader) Read(d []byte) (n int, err error) { + p := pr.p + defer func() { + p.bytesRead += n + }() + if p.buffer.Len() >= len(d) { + // Internal buffer of unconsumed data is large enough for + // the read request. No need to parse more at the moment. + return p.buffer.Read(d) + } + peek, err := p.mr.bufReader.Peek(peekBufferSize) // TODO(bradfitz): add buffer size accessor + + // Look for an immediate empty part without a leading \r\n + // before the boundary separator. Some MIME code makes empty + // parts like this. Most browsers, however, write the \r\n + // before the subsequent boundary even for empty parts and + // won't hit this path. + if p.bytesRead == 0 && p.mr.peekBufferIsEmptyPart(peek) { + return 0, io.EOF + } + unexpectedEOF := err == io.EOF + if err != nil && !unexpectedEOF { + return 0, fmt.Errorf("multipart: Part Read: %v", err) + } + if peek == nil { + panic("nil peek buf") + } + // Search the peek buffer for "\r\n--boundary". If found, + // consume everything up to the boundary. If not, consume only + // as much of the peek buffer as cannot hold the boundary + // string. + nCopy := 0 + foundBoundary := false + if idx, isEnd := p.mr.peekBufferSeparatorIndex(peek); idx != -1 { + nCopy = idx + foundBoundary = isEnd + if !isEnd && nCopy == 0 { + nCopy = 1 // make some progress. + } + } else if safeCount := len(peek) - len(p.mr.nlDashBoundary); safeCount > 0 { + nCopy = safeCount + } else if unexpectedEOF { + // If we've run out of peek buffer and the boundary + // wasn't found (and can't possibly fit), we must have + // hit the end of the file unexpectedly. + return 0, io.ErrUnexpectedEOF + } + if nCopy > 0 { + if _, err := io.CopyN(p.buffer, p.mr.bufReader, int64(nCopy)); err != nil { + return 0, err + } + } + n, err = p.buffer.Read(d) + if err == io.EOF && !foundBoundary { + // If the boundary hasn't been reached there's more to + // read, so don't pass through an EOF from the buffer + err = nil + } + return +} + +func (p *Part) Close() error { + io.Copy(ioutil.Discard, p) + return nil +} + +// Reader is an iterator over parts in a MIME multipart body. +// Reader's underlying parser consumes its input as needed. Seeking +// isn't supported. +type Reader struct { + bufReader *bufio.Reader + + currentPart *Part + partsRead int + + nl []byte // "\r\n" or "\n" (set after seeing first boundary line) + nlDashBoundary []byte // nl + "--boundary" + dashBoundaryDash []byte // "--boundary--" + dashBoundary []byte // "--boundary" +} + +// NextPart returns the next part in the multipart or an error. +// When there are no more parts, the error io.EOF is returned. +func (r *Reader) NextPart() (*Part, error) { + if r.currentPart != nil { + r.currentPart.Close() + } + + expectNewPart := false + for { + line, err := r.bufReader.ReadSlice('\n') + + if err == io.EOF && r.isFinalBoundary(line) { + // If the buffer ends in "--boundary--" without the + // trailing "\r\n", ReadSlice will return an error + // (since it's missing the '\n'), but this is a valid + // multipart EOF so we need to return io.EOF instead of + // a fmt-wrapped one. + return nil, io.EOF + } + if err != nil { + return nil, fmt.Errorf("multipart: NextPart: %v", err) + } + + if r.isBoundaryDelimiterLine(line) { + r.partsRead++ + bp, err := newPart(r) + if err != nil { + return nil, err + } + r.currentPart = bp + return bp, nil + } + + if r.isFinalBoundary(line) { + // Expected EOF + return nil, io.EOF + } + + if expectNewPart { + return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line)) + } + + if r.partsRead == 0 { + // skip line + continue + } + + // Consume the "\n" or "\r\n" separator between the + // body of the previous part and the boundary line we + // now expect will follow. (either a new part or the + // end boundary) + if bytes.Equal(line, r.nl) { + expectNewPart = true + continue + } + + return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line) + } +} + +// isFinalBoundary reports whether line is the final boundary line +// indicating that all parts are over. +// It matches `^--boundary--[ \t]*(\r\n)?$` +func (mr *Reader) isFinalBoundary(line []byte) bool { + if !bytes.HasPrefix(line, mr.dashBoundaryDash) { + return false + } + rest := line[len(mr.dashBoundaryDash):] + rest = skipLWSPChar(rest) + return len(rest) == 0 || bytes.Equal(rest, mr.nl) +} + +func (mr *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) { + // http://tools.ietf.org/html/rfc2046#section-5.1 + // The boundary delimiter line is then defined as a line + // consisting entirely of two hyphen characters ("-", + // decimal value 45) followed by the boundary parameter + // value from the Content-Type header field, optional linear + // whitespace, and a terminating CRLF. + if !bytes.HasPrefix(line, mr.dashBoundary) { + return false + } + rest := line[len(mr.dashBoundary):] + rest = skipLWSPChar(rest) + + // On the first part, see our lines are ending in \n instead of \r\n + // and switch into that mode if so. This is a violation of the spec, + // but occurs in practice. + if mr.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' { + mr.nl = mr.nl[1:] + mr.nlDashBoundary = mr.nlDashBoundary[1:] + } + return bytes.Equal(rest, mr.nl) +} + +// peekBufferIsEmptyPart reports whether the provided peek-ahead +// buffer represents an empty part. It is called only if we've not +// already read any bytes in this part and checks for the case of MIME +// software not writing the \r\n on empty parts. Some does, some +// doesn't. +// +// This checks that what follows the "--boundary" is actually the end +// ("--boundary--" with optional whitespace) or optional whitespace +// and then a newline, so we don't catch "--boundaryFAKE", in which +// case the whole line is part of the data. +func (mr *Reader) peekBufferIsEmptyPart(peek []byte) bool { + // End of parts case. + // Test whether peek matches `^--boundary--[ \t]*(?:\r\n|$)` + if bytes.HasPrefix(peek, mr.dashBoundaryDash) { + rest := peek[len(mr.dashBoundaryDash):] + rest = skipLWSPChar(rest) + return bytes.HasPrefix(rest, mr.nl) || len(rest) == 0 + } + if !bytes.HasPrefix(peek, mr.dashBoundary) { + return false + } + // Test whether rest matches `^[ \t]*\r\n`) + rest := peek[len(mr.dashBoundary):] + rest = skipLWSPChar(rest) + return bytes.HasPrefix(rest, mr.nl) +} + +// peekBufferSeparatorIndex returns the index of mr.nlDashBoundary in +// peek and whether it is a real boundary (and not a prefix of an +// unrelated separator). To be the end, the peek buffer must contain a +// newline after the boundary or contain the ending boundary (--separator--). +func (mr *Reader) peekBufferSeparatorIndex(peek []byte) (idx int, isEnd bool) { + idx = bytes.Index(peek, mr.nlDashBoundary) + if idx == -1 { + return + } + + peek = peek[idx+len(mr.nlDashBoundary):] + if len(peek) == 0 || len(peek) == 1 && peek[0] == '-' { + return idx, false + } + if len(peek) > 1 && peek[0] == '-' && peek[1] == '-' { + return idx, true + } + peek = skipLWSPChar(peek) + // Don't have a complete line after the peek. + if bytes.IndexByte(peek, '\n') == -1 { + return idx, false + } + if len(peek) > 0 && peek[0] == '\n' { + return idx, true + } + if len(peek) > 1 && peek[0] == '\r' && peek[1] == '\n' { + return idx, true + } + return idx, false +} + +// skipLWSPChar returns b with leading spaces and tabs removed. +// RFC 822 defines: +// LWSP-char = SPACE / HTAB +func skipLWSPChar(b []byte) []byte { + for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') { + b = b[1:] + } + return b +} diff --git a/vendor/github.com/sthulb/mime/multipart/multipart_test.go b/vendor/github.com/sthulb/mime/multipart/multipart_test.go new file mode 100644 index 000000000..d06bb4159 --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/multipart_test.go @@ -0,0 +1,813 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package multipart + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/textproto" + "os" + "reflect" + "strings" + "testing" +) + +func TestBoundaryLine(t *testing.T) { + mr := NewReader(strings.NewReader(""), "myBoundary") + if !mr.isBoundaryDelimiterLine([]byte("--myBoundary\r\n")) { + t.Error("expected") + } + if !mr.isBoundaryDelimiterLine([]byte("--myBoundary \r\n")) { + t.Error("expected") + } + if !mr.isBoundaryDelimiterLine([]byte("--myBoundary \n")) { + t.Error("expected") + } + if mr.isBoundaryDelimiterLine([]byte("--myBoundary bogus \n")) { + t.Error("expected fail") + } + if mr.isBoundaryDelimiterLine([]byte("--myBoundary bogus--")) { + t.Error("expected fail") + } +} + +func escapeString(v string) string { + bytes, _ := json.Marshal(v) + return string(bytes) +} + +func expectEq(t *testing.T, expected, actual, what string) { + if expected == actual { + return + } + t.Errorf("Unexpected value for %s; got %s (len %d) but expected: %s (len %d)", + what, escapeString(actual), len(actual), escapeString(expected), len(expected)) +} + +func TestNameAccessors(t *testing.T) { + tests := [...][3]string{ + {`form-data; name="foo"`, "foo", ""}, + {` form-data ; name=foo`, "foo", ""}, + {`FORM-DATA;name="foo"`, "foo", ""}, + {` FORM-DATA ; name="foo"`, "foo", ""}, + {` FORM-DATA ; name="foo"`, "foo", ""}, + {` FORM-DATA ; name=foo`, "foo", ""}, + {` FORM-DATA ; filename="foo.txt"; name=foo; baz=quux`, "foo", "foo.txt"}, + {` not-form-data ; filename="bar.txt"; name=foo; baz=quux`, "", "bar.txt"}, + } + for i, test := range tests { + p := &Part{Header: make(map[string][]string)} + p.Header.Set("Content-Disposition", test[0]) + if g, e := p.FormName(), test[1]; g != e { + t.Errorf("test %d: FormName() = %q; want %q", i, g, e) + } + if g, e := p.FileName(), test[2]; g != e { + t.Errorf("test %d: FileName() = %q; want %q", i, g, e) + } + } +} + +var longLine = strings.Repeat("\n\n\r\r\r\n\r\000", (1<<20)/8) + +func testMultipartBody(sep string) string { + testBody := ` +This is a multi-part message. This line is ignored. +--MyBoundary +Header1: value1 +HEADER2: value2 +foo-bar: baz + +My value +The end. +--MyBoundary +name: bigsection + +[longline] +--MyBoundary +Header1: value1b +HEADER2: value2b +foo-bar: bazb + +Line 1 +Line 2 +Line 3 ends in a newline, but just one. + +--MyBoundary + +never read data +--MyBoundary-- + + +useless trailer +` + testBody = strings.Replace(testBody, "\n", sep, -1) + return strings.Replace(testBody, "[longline]", longLine, 1) +} + +func TestMultipart(t *testing.T) { + bodyReader := strings.NewReader(testMultipartBody("\r\n")) + testMultipart(t, bodyReader, false) +} + +func TestMultipartOnlyNewlines(t *testing.T) { + bodyReader := strings.NewReader(testMultipartBody("\n")) + testMultipart(t, bodyReader, true) +} + +func TestMultipartSlowInput(t *testing.T) { + bodyReader := strings.NewReader(testMultipartBody("\r\n")) + testMultipart(t, &slowReader{bodyReader}, false) +} + +func testMultipart(t *testing.T, r io.Reader, onlyNewlines bool) { + reader := NewReader(r, "MyBoundary") + buf := new(bytes.Buffer) + + // Part1 + part, err := reader.NextPart() + if part == nil || err != nil { + t.Error("Expected part1") + return + } + if x := part.Header.Get("Header1"); x != "value1" { + t.Errorf("part.Header.Get(%q) = %q, want %q", "Header1", x, "value1") + } + if x := part.Header.Get("foo-bar"); x != "baz" { + t.Errorf("part.Header.Get(%q) = %q, want %q", "foo-bar", x, "baz") + } + if x := part.Header.Get("Foo-Bar"); x != "baz" { + t.Errorf("part.Header.Get(%q) = %q, want %q", "Foo-Bar", x, "baz") + } + buf.Reset() + if _, err := io.Copy(buf, part); err != nil { + t.Errorf("part 1 copy: %v", err) + } + + adjustNewlines := func(s string) string { + if onlyNewlines { + return strings.Replace(s, "\r\n", "\n", -1) + } + return s + } + + expectEq(t, adjustNewlines("My value\r\nThe end."), buf.String(), "Value of first part") + + // Part2 + part, err = reader.NextPart() + if err != nil { + t.Fatalf("Expected part2; got: %v", err) + return + } + if e, g := "bigsection", part.Header.Get("name"); e != g { + t.Errorf("part2's name header: expected %q, got %q", e, g) + } + buf.Reset() + if _, err := io.Copy(buf, part); err != nil { + t.Errorf("part 2 copy: %v", err) + } + s := buf.String() + if len(s) != len(longLine) { + t.Errorf("part2 body expected long line of length %d; got length %d", + len(longLine), len(s)) + } + if s != longLine { + t.Errorf("part2 long body didn't match") + } + + // Part3 + part, err = reader.NextPart() + if part == nil || err != nil { + t.Error("Expected part3") + return + } + if part.Header.Get("foo-bar") != "bazb" { + t.Error("Expected foo-bar: bazb") + } + buf.Reset() + if _, err := io.Copy(buf, part); err != nil { + t.Errorf("part 3 copy: %v", err) + } + expectEq(t, adjustNewlines("Line 1\r\nLine 2\r\nLine 3 ends in a newline, but just one.\r\n"), + buf.String(), "body of part 3") + + // Part4 + part, err = reader.NextPart() + if part == nil || err != nil { + t.Error("Expected part 4 without errors") + return + } + + // Non-existent part5 + part, err = reader.NextPart() + if part != nil { + t.Error("Didn't expect a fifth part.") + } + if err != io.EOF { + t.Errorf("On fifth part expected io.EOF; got %v", err) + } +} + +func TestVariousTextLineEndings(t *testing.T) { + tests := [...]string{ + "Foo\nBar", + "Foo\nBar\n", + "Foo\r\nBar", + "Foo\r\nBar\r\n", + "Foo\rBar", + "Foo\rBar\r", + "\x00\x01\x02\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10", + } + + for testNum, expectedBody := range tests { + body := "--BOUNDARY\r\n" + + "Content-Disposition: form-data; name=\"value\"\r\n" + + "\r\n" + + expectedBody + + "\r\n--BOUNDARY--\r\n" + bodyReader := strings.NewReader(body) + + reader := NewReader(bodyReader, "BOUNDARY") + buf := new(bytes.Buffer) + part, err := reader.NextPart() + if part == nil { + t.Errorf("Expected a body part on text %d", testNum) + continue + } + if err != nil { + t.Errorf("Unexpected error on text %d: %v", testNum, err) + continue + } + written, err := io.Copy(buf, part) + expectEq(t, expectedBody, buf.String(), fmt.Sprintf("test %d", testNum)) + if err != nil { + t.Errorf("Error copying multipart; bytes=%v, error=%v", written, err) + } + + part, err = reader.NextPart() + if part != nil { + t.Errorf("Unexpected part in test %d", testNum) + } + if err != io.EOF { + t.Errorf("On test %d expected io.EOF; got %v", testNum, err) + } + + } +} + +type maliciousReader struct { + t *testing.T + n int +} + +const maxReadThreshold = 1 << 20 + +func (mr *maliciousReader) Read(b []byte) (n int, err error) { + mr.n += len(b) + if mr.n >= maxReadThreshold { + mr.t.Fatal("too much was read") + return 0, io.EOF + } + return len(b), nil +} + +func TestLineLimit(t *testing.T) { + mr := &maliciousReader{t: t} + r := NewReader(mr, "fooBoundary") + part, err := r.NextPart() + if part != nil { + t.Errorf("unexpected part read") + } + if err == nil { + t.Errorf("expected an error") + } + if mr.n >= maxReadThreshold { + t.Errorf("expected to read < %d bytes; read %d", maxReadThreshold, mr.n) + } +} + +func TestMultipartTruncated(t *testing.T) { + testBody := ` +This is a multi-part message. This line is ignored. +--MyBoundary +foo-bar: baz + +Oh no, premature EOF! +` + body := strings.Replace(testBody, "\n", "\r\n", -1) + bodyReader := strings.NewReader(body) + r := NewReader(bodyReader, "MyBoundary") + + part, err := r.NextPart() + if err != nil { + t.Fatalf("didn't get a part") + } + _, err = io.Copy(ioutil.Discard, part) + if err != io.ErrUnexpectedEOF { + t.Fatalf("expected error io.ErrUnexpectedEOF; got %v", err) + } +} + +type slowReader struct { + r io.Reader +} + +func (s *slowReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return s.r.Read(p) + } + return s.r.Read(p[:1]) +} + +func TestLineContinuation(t *testing.T) { + // This body, extracted from an email, contains headers that span multiple + // lines. + + // TODO: The original mail ended with a double-newline before the + // final delimiter; this was manually edited to use a CRLF. + testBody := + "\n--Apple-Mail-2-292336769\nContent-Transfer-Encoding: 7bit\nContent-Type: text/plain;\n\tcharset=US-ASCII;\n\tdelsp=yes;\n\tformat=flowed\n\nI'm finding the same thing happening on my system (10.4.1).\n\n\n--Apple-Mail-2-292336769\nContent-Transfer-Encoding: quoted-printable\nContent-Type: text/html;\n\tcharset=ISO-8859-1\n\nI'm finding the same thing =\nhappening on my system (10.4.1).=A0 But I built it with XCode =\n2.0.=\n\r\n--Apple-Mail-2-292336769--\n" + + r := NewReader(strings.NewReader(testBody), "Apple-Mail-2-292336769") + + for i := 0; i < 2; i++ { + part, err := r.NextPart() + if err != nil { + t.Fatalf("didn't get a part") + } + var buf bytes.Buffer + n, err := io.Copy(&buf, part) + if err != nil { + t.Errorf("error reading part: %v\nread so far: %q", err, buf.String()) + } + if n <= 0 { + t.Errorf("read %d bytes; expected >0", n) + } + } +} + +func TestQuotedPrintableEncoding(t *testing.T) { + // From https://golang.org/issue/4411 + body := "--0016e68ee29c5d515f04cedf6733\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=text\r\nContent-Transfer-Encoding: quoted-printable\r\n\r\nwords words words words words words words words words words words words wor=\r\nds words words words words words words words words words words words words =\r\nwords words words words words words words words words words words words wor=\r\nds words words words words words words words words words words words words =\r\nwords words words words words words words words words\r\n--0016e68ee29c5d515f04cedf6733\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=submit\r\n\r\nSubmit\r\n--0016e68ee29c5d515f04cedf6733--" + r := NewReader(strings.NewReader(body), "0016e68ee29c5d515f04cedf6733") + part, err := r.NextPart() + if err != nil { + t.Fatal(err) + } + if te, ok := part.Header["Content-Transfer-Encoding"]; ok { + t.Errorf("unexpected Content-Transfer-Encoding of %q", te) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, part) + if err != nil { + t.Error(err) + } + got := buf.String() + want := "words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words words" + if got != want { + t.Errorf("wrong part value:\n got: %q\nwant: %q", got, want) + } +} + +// Test parsing an image attachment from gmail, which previously failed. +func TestNested(t *testing.T) { + // nested-mime is the body part of a multipart/mixed email + // with boundary e89a8ff1c1e83553e304be640612 + f, err := os.Open("testdata/nested-mime") + if err != nil { + t.Fatal(err) + } + defer f.Close() + mr := NewReader(f, "e89a8ff1c1e83553e304be640612") + p, err := mr.NextPart() + if err != nil { + t.Fatalf("error reading first section (alternative): %v", err) + } + + // Read the inner text/plain and text/html sections of the multipart/alternative. + mr2 := NewReader(p, "e89a8ff1c1e83553e004be640610") + p, err = mr2.NextPart() + if err != nil { + t.Fatalf("reading text/plain part: %v", err) + } + if b, err := ioutil.ReadAll(p); string(b) != "*body*\r\n" || err != nil { + t.Fatalf("reading text/plain part: got %q, %v", b, err) + } + p, err = mr2.NextPart() + if err != nil { + t.Fatalf("reading text/html part: %v", err) + } + if b, err := ioutil.ReadAll(p); string(b) != "body\r\n" || err != nil { + t.Fatalf("reading text/html part: got %q, %v", b, err) + } + + p, err = mr2.NextPart() + if err != io.EOF { + t.Fatalf("final inner NextPart = %v; want io.EOF", err) + } + + // Back to the outer multipart/mixed, reading the image attachment. + _, err = mr.NextPart() + if err != nil { + t.Fatalf("error reading the image attachment at the end: %v", err) + } + + _, err = mr.NextPart() + if err != io.EOF { + t.Fatalf("final outer NextPart = %v; want io.EOF", err) + } +} + +type headerBody struct { + header textproto.MIMEHeader + body string +} + +func formData(key, value string) headerBody { + return headerBody{ + textproto.MIMEHeader{ + "Content-Type": {"text/plain; charset=ISO-8859-1"}, + "Content-Disposition": {"form-data; name=" + key}, + }, + value, + } +} + +type parseTest struct { + name string + in, sep string + want []headerBody +} + +var parseTests = []parseTest{ + // Actual body from App Engine on a blob upload. The final part (the + // Content-Type: message/external-body) is what App Engine replaces + // the uploaded file with. The other form fields (prefixed with + // "other" in their form-data name) are unchanged. A bug was + // reported with blob uploads failing when the other fields were + // empty. This was the MIME POST body that previously failed. + { + name: "App Engine post", + sep: "00151757727e9583fd04bfbca4c6", + in: "--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherEmpty1\r\n\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherFoo1\r\n\r\nfoo\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherFoo2\r\n\r\nfoo\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherEmpty2\r\n\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherRepeatFoo\r\n\r\nfoo\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherRepeatFoo\r\n\r\nfoo\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherRepeatEmpty\r\n\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=otherRepeatEmpty\r\n\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=submit\r\n\r\nSubmit\r\n--00151757727e9583fd04bfbca4c6\r\nContent-Type: message/external-body; charset=ISO-8859-1; blob-key=AHAZQqG84qllx7HUqO_oou5EvdYQNS3Mbbkb0RjjBoM_Kc1UqEN2ygDxWiyCPulIhpHRPx-VbpB6RX4MrsqhWAi_ZxJ48O9P2cTIACbvATHvg7IgbvZytyGMpL7xO1tlIvgwcM47JNfv_tGhy1XwyEUO8oldjPqg5Q\r\nContent-Disposition: form-data; name=file; filename=\"fall.png\"\r\n\r\nContent-Type: image/png\r\nContent-Length: 232303\r\nX-AppEngine-Upload-Creation: 2012-05-10 23:14:02.715173\r\nContent-MD5: MzRjODU1ZDZhZGU1NmRlOWEwZmMwMDdlODBmZTA0NzA=\r\nContent-Disposition: form-data; name=file; filename=\"fall.png\"\r\n\r\n\r\n--00151757727e9583fd04bfbca4c6--", + want: []headerBody{ + formData("otherEmpty1", ""), + formData("otherFoo1", "foo"), + formData("otherFoo2", "foo"), + formData("otherEmpty2", ""), + formData("otherRepeatFoo", "foo"), + formData("otherRepeatFoo", "foo"), + formData("otherRepeatEmpty", ""), + formData("otherRepeatEmpty", ""), + formData("submit", "Submit"), + {textproto.MIMEHeader{ + "Content-Type": {"message/external-body; charset=ISO-8859-1; blob-key=AHAZQqG84qllx7HUqO_oou5EvdYQNS3Mbbkb0RjjBoM_Kc1UqEN2ygDxWiyCPulIhpHRPx-VbpB6RX4MrsqhWAi_ZxJ48O9P2cTIACbvATHvg7IgbvZytyGMpL7xO1tlIvgwcM47JNfv_tGhy1XwyEUO8oldjPqg5Q"}, + "Content-Disposition": {"form-data; name=file; filename=\"fall.png\""}, + }, "Content-Type: image/png\r\nContent-Length: 232303\r\nX-AppEngine-Upload-Creation: 2012-05-10 23:14:02.715173\r\nContent-MD5: MzRjODU1ZDZhZGU1NmRlOWEwZmMwMDdlODBmZTA0NzA=\r\nContent-Disposition: form-data; name=file; filename=\"fall.png\"\r\n\r\n"}, + }, + }, + + // Single empty part, ended with --boundary immediately after headers. + { + name: "single empty part, --boundary", + sep: "abc", + in: "--abc\r\nFoo: bar\r\n\r\n--abc--", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, ""}, + }, + }, + + // Single empty part, ended with \r\n--boundary immediately after headers. + { + name: "single empty part, \r\n--boundary", + sep: "abc", + in: "--abc\r\nFoo: bar\r\n\r\n\r\n--abc--", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, ""}, + }, + }, + + // Final part empty. + { + name: "final part empty", + sep: "abc", + in: "--abc\r\nFoo: bar\r\n\r\n--abc\r\nFoo2: bar2\r\n\r\n--abc--", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, ""}, + {textproto.MIMEHeader{"Foo2": {"bar2"}}, ""}, + }, + }, + + // Final part empty with newlines after final separator. + { + name: "final part empty then crlf", + sep: "abc", + in: "--abc\r\nFoo: bar\r\n\r\n--abc--\r\n", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, ""}, + }, + }, + + // Final part empty with lwsp-chars after final separator. + { + name: "final part empty then lwsp", + sep: "abc", + in: "--abc\r\nFoo: bar\r\n\r\n--abc-- \t", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, ""}, + }, + }, + + // No parts (empty form as submitted by Chrome) + { + name: "no parts", + sep: "----WebKitFormBoundaryQfEAfzFOiSemeHfA", + in: "------WebKitFormBoundaryQfEAfzFOiSemeHfA--\r\n", + want: []headerBody{}, + }, + + // Part containing data starting with the boundary, but with additional suffix. + { + name: "fake separator as data", + sep: "sep", + in: "--sep\r\nFoo: bar\r\n\r\n--sepFAKE\r\n--sep--", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, "--sepFAKE"}, + }, + }, + + // Part containing a boundary with whitespace following it. + { + name: "boundary with whitespace", + sep: "sep", + in: "--sep \r\nFoo: bar\r\n\r\ntext\r\n--sep--", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, "text"}, + }, + }, + + // With ignored leading line. + { + name: "leading line", + sep: "MyBoundary", + in: strings.Replace(`This is a multi-part message. This line is ignored. +--MyBoundary +foo: bar + + +--MyBoundary--`, "\n", "\r\n", -1), + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, ""}, + }, + }, + + // Issue 10616; minimal + { + name: "issue 10616 minimal", + sep: "sep", + in: "--sep \r\nFoo: bar\r\n\r\n" + + "a\r\n" + + "--sep_alt\r\n" + + "b\r\n" + + "\r\n--sep--", + want: []headerBody{ + {textproto.MIMEHeader{"Foo": {"bar"}}, "a\r\n--sep_alt\r\nb\r\n"}, + }, + }, + + // Issue 10616; full example from bug. + { + name: "nested separator prefix is outer separator", + sep: "----=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9", + in: strings.Replace(`------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9 +Content-Type: multipart/alternative; boundary="----=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt" + +------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt +Content-Type: text/plain; charset="utf-8" +Content-Transfer-Encoding: 8bit + +This is a multi-part message in MIME format. + +------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt +Content-Type: text/html; charset="utf-8" +Content-Transfer-Encoding: 8bit + +html things +------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt-- +------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9--`, "\n", "\r\n", -1), + want: []headerBody{ + {textproto.MIMEHeader{"Content-Type": {`multipart/alternative; boundary="----=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt"`}}, + strings.Replace(`------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt +Content-Type: text/plain; charset="utf-8" +Content-Transfer-Encoding: 8bit + +This is a multi-part message in MIME format. + +------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt +Content-Type: text/html; charset="utf-8" +Content-Transfer-Encoding: 8bit + +html things +------=_NextPart_4c2fbafd7ec4c8bf08034fe724b608d9_alt--`, "\n", "\r\n", -1), + }, + }, + }, + // Issue 12662: Check that we don't consume the leading \r if the peekBuffer + // ends in '\r\n--separator-' + { + name: "peek buffer boundary condition", + sep: "00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db", + in: strings.Replace(`--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db +Content-Disposition: form-data; name="block"; filename="block" +Content-Type: application/octet-stream + +`+strings.Repeat("A", peekBufferSize-65)+"\n--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--", "\n", "\r\n", -1), + want: []headerBody{ + {textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}}, + strings.Repeat("A", peekBufferSize-65), + }, + }, + }, + // Issue 12662: Same test as above with \r\n at the end + { + name: "peek buffer boundary condition", + sep: "00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db", + in: strings.Replace(`--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db +Content-Disposition: form-data; name="block"; filename="block" +Content-Type: application/octet-stream + +`+strings.Repeat("A", peekBufferSize-65)+"\n--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--\n", "\n", "\r\n", -1), + want: []headerBody{ + {textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}}, + strings.Repeat("A", peekBufferSize-65), + }, + }, + }, + // Issue 12662v2: We want to make sure that for short buffers that end with + // '\r\n--separator-' we always consume at least one (valid) symbol from the + // peekBuffer + { + name: "peek buffer boundary condition", + sep: "aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db", + in: strings.Replace(`--aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db +Content-Disposition: form-data; name="block"; filename="block" +Content-Type: application/octet-stream + +`+strings.Repeat("A", peekBufferSize)+"\n--aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--", "\n", "\r\n", -1), + want: []headerBody{ + {textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}}, + strings.Repeat("A", peekBufferSize), + }, + }, + }, + // Context: https://github.com/camlistore/camlistore/issues/642 + // If the file contents in the form happens to have a size such as: + // size = peekBufferSize - (len("\n--") + len(boundary) + len("\r") + 1), (modulo peekBufferSize) + // then peekBufferSeparatorIndex was wrongly returning (-1, false), which was leading to an nCopy + // cut such as: + // "somedata\r| |\n--Boundary\r" (instead of "somedata| |\r\n--Boundary\r"), which was making the + // subsequent Read miss the boundary. + { + name: "safeCount off by one", + sep: "08b84578eabc563dcba967a945cdf0d9f613864a8f4a716f0e81caa71a74", + in: strings.Replace(`--08b84578eabc563dcba967a945cdf0d9f613864a8f4a716f0e81caa71a74 +Content-Disposition: form-data; name="myfile"; filename="my-file.txt" +Content-Type: application/octet-stream + +`, "\n", "\r\n", -1) + + strings.Repeat("A", peekBufferSize-(len("\n--")+len("08b84578eabc563dcba967a945cdf0d9f613864a8f4a716f0e81caa71a74")+len("\r")+1)) + + strings.Replace(` +--08b84578eabc563dcba967a945cdf0d9f613864a8f4a716f0e81caa71a74 +Content-Disposition: form-data; name="key" + +val +--08b84578eabc563dcba967a945cdf0d9f613864a8f4a716f0e81caa71a74-- +`, "\n", "\r\n", -1), + want: []headerBody{ + {textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="myfile"; filename="my-file.txt"`}}, + strings.Repeat("A", peekBufferSize-(len("\n--")+len("08b84578eabc563dcba967a945cdf0d9f613864a8f4a716f0e81caa71a74")+len("\r")+1)), + }, + {textproto.MIMEHeader{"Content-Disposition": {`form-data; name="key"`}}, + "val", + }, + }, + }, + + roundTripParseTest(), +} + +func TestParse(t *testing.T) { +Cases: + for _, tt := range parseTests { + r := NewReader(strings.NewReader(tt.in), tt.sep) + got := []headerBody{} + for { + p, err := r.NextPart() + if err == io.EOF { + break + } + if err != nil { + t.Errorf("in test %q, NextPart: %v", tt.name, err) + continue Cases + } + pbody, err := ioutil.ReadAll(p) + if err != nil { + t.Errorf("in test %q, error reading part: %v", tt.name, err) + continue Cases + } + got = append(got, headerBody{p.Header, string(pbody)}) + } + if !reflect.DeepEqual(tt.want, got) { + t.Errorf("test %q:\n got: %v\nwant: %v", tt.name, got, tt.want) + if len(tt.want) != len(got) { + t.Errorf("test %q: got %d parts, want %d", tt.name, len(got), len(tt.want)) + } else if len(got) > 1 { + for pi, wantPart := range tt.want { + if !reflect.DeepEqual(wantPart, got[pi]) { + t.Errorf("test %q, part %d:\n got: %v\nwant: %v", tt.name, pi, got[pi], wantPart) + } + } + } + } + } +} + +func partsFromReader(r *Reader) ([]headerBody, error) { + got := []headerBody{} + for { + p, err := r.NextPart() + if err == io.EOF { + return got, nil + } + if err != nil { + return nil, fmt.Errorf("NextPart: %v", err) + } + pbody, err := ioutil.ReadAll(p) + if err != nil { + return nil, fmt.Errorf("error reading part: %v", err) + } + got = append(got, headerBody{p.Header, string(pbody)}) + } +} + +func TestParseAllSizes(t *testing.T) { + const maxSize = 5 << 10 + var buf bytes.Buffer + body := strings.Repeat("a", maxSize) + bodyb := []byte(body) + for size := 0; size < maxSize; size++ { + buf.Reset() + w := NewWriter(&buf) + part, _ := w.CreateFormField("f") + part.Write(bodyb[:size]) + part, _ = w.CreateFormField("key") + part.Write([]byte("val")) + w.Close() + r := NewReader(&buf, w.Boundary()) + got, err := partsFromReader(r) + if err != nil { + t.Errorf("For size %d: %v", size, err) + continue + } + if len(got) != 2 { + t.Errorf("For size %d, num parts = %d; want 2", size, len(got)) + continue + } + if got[0].body != body[:size] { + t.Errorf("For size %d, got unexpected len %d: %q", size, len(got[0].body), got[0].body) + } + } +} + +func roundTripParseTest() parseTest { + t := parseTest{ + name: "round trip", + want: []headerBody{ + formData("empty", ""), + formData("lf", "\n"), + formData("cr", "\r"), + formData("crlf", "\r\n"), + formData("foo", "bar"), + }, + } + var buf bytes.Buffer + w := NewWriter(&buf) + for _, p := range t.want { + pw, err := w.CreatePart(p.header) + if err != nil { + panic(err) + } + _, err = pw.Write([]byte(p.body)) + if err != nil { + panic(err) + } + } + w.Close() + t.in = buf.String() + t.sep = w.Boundary() + return t +} diff --git a/vendor/github.com/sthulb/mime/multipart/testdata/nested-mime b/vendor/github.com/sthulb/mime/multipart/testdata/nested-mime new file mode 100644 index 000000000..71c238e38 --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/testdata/nested-mime @@ -0,0 +1,29 @@ +--e89a8ff1c1e83553e304be640612 +Content-Type: multipart/alternative; boundary=e89a8ff1c1e83553e004be640610 + +--e89a8ff1c1e83553e004be640610 +Content-Type: text/plain; charset=UTF-8 + +*body* + +--e89a8ff1c1e83553e004be640610 +Content-Type: text/html; charset=UTF-8 + +body + +--e89a8ff1c1e83553e004be640610-- +--e89a8ff1c1e83553e304be640612 +Content-Type: image/png; name="x.png" +Content-Disposition: attachment; + filename="x.png" +Content-Transfer-Encoding: base64 +X-Attachment-Id: f_h1edgigu0 + +iVBORw0KGgoAAAANSUhEUgAAAagAAADrCAIAAACza5XhAAAKMWlDQ1BJQ0MgUHJvZmlsZQAASImd +lndUU9kWh8+9N71QkhCKlNBraFICSA29SJEuKjEJEErAkAAiNkRUcERRkaYIMijggKNDkbEiioUB +8b2kqeGaj4aTNftesu5mob4pr07ecMywRwLBvDCJOksqlUyldAZD7g9fxIZRWWPMvXRNJROJRBIG +Y7Vx0mva1HAwYqibdKONXye3dW4iUonhWFJnqK7OaanU1gGkErFYEgaj0cg8wK+zVPh2ziwnHy07 +U8lYTNapezSzOuevRwLB7CFkqQQCwaJDiBQIBIJFhwh8AoFg0SHUqQUCASRJKkwkhMy/JfODWPEJ +BIJFhwh8AoFg0TFnQqQ55GtPFopcJsN97e1nYtNuIBYeGBgYCmYrmE3jZ05iaGAoMX0xzxkWz6Hv +yO7WvrlwzA0uLzrD+VkKqViwl9IfTBVNFMyc/x9alloiPPlqhQAAAABJRU5ErkJggg== +--e89a8ff1c1e83553e304be640612-- diff --git a/vendor/github.com/sthulb/mime/multipart/writer.go b/vendor/github.com/sthulb/mime/multipart/writer.go new file mode 100644 index 000000000..ffb4b9d53 --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/writer.go @@ -0,0 +1,191 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package multipart + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "io" + "net/textproto" + "sort" + "strings" +) + +// A Writer generates multipart messages. +type Writer struct { + w io.Writer + boundary string + lastpart *part +} + +// NewWriter returns a new multipart Writer with a random boundary, +// writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + boundary: randomBoundary(), + } +} + +// Boundary returns the Writer's boundary. +func (w *Writer) Boundary() string { + return w.boundary +} + +// SetBoundary overrides the Writer's default randomly-generated +// boundary separator with an explicit value. +// +// SetBoundary must be called before any parts are created, may only +// contain certain ASCII characters, and must be non-empty and +// at most 69 bytes long. +func (w *Writer) SetBoundary(boundary string) error { + if w.lastpart != nil { + return errors.New("mime: SetBoundary called after write") + } + // rfc2046#section-5.1.1 + if len(boundary) < 1 || len(boundary) > 69 { + return errors.New("mime: invalid boundary length") + } + for _, b := range boundary { + if 'A' <= b && b <= 'Z' || 'a' <= b && b <= 'z' || '0' <= b && b <= '9' { + continue + } + switch b { + case '\'', '(', ')', '+', '_', ',', '-', '.', '/', ':', '=', '?': + continue + } + return errors.New("mime: invalid boundary character") + } + w.boundary = boundary + return nil +} + +// FormDataContentType returns the Content-Type for an HTTP +// multipart/form-data with this Writer's Boundary. +func (w *Writer) FormDataContentType() string { + return "multipart/form-data; boundary=" + w.boundary +} + +func randomBoundary() string { + var buf [30]byte + _, err := io.ReadFull(rand.Reader, buf[:]) + if err != nil { + panic(err) + } + return fmt.Sprintf("%x", buf[:]) +} + +// CreatePart creates a new multipart section with the provided +// header. The body of the part should be written to the returned +// Writer. After calling CreatePart, any previous part may no longer +// be written to. +func (w *Writer) CreatePart(header textproto.MIMEHeader) (io.Writer, error) { + if w.lastpart != nil { + if err := w.lastpart.close(); err != nil { + return nil, err + } + } + var b bytes.Buffer + if w.lastpart != nil { + fmt.Fprintf(&b, "\r\n--%s\r\n", w.boundary) + } else { + fmt.Fprintf(&b, "--%s\r\n", w.boundary) + } + + keys := make([]string, 0, len(header)) + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + for _, v := range header[k] { + fmt.Fprintf(&b, "%s: %s\r\n", k, v) + } + } + fmt.Fprintf(&b, "\r\n") + _, err := io.Copy(w.w, &b) + if err != nil { + return nil, err + } + p := &part{ + mw: w, + } + w.lastpart = p + return p, nil +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +// CreateFormFile is a convenience wrapper around CreatePart. It creates +// a new form-data header with the provided field name and file name. +func (w *Writer) CreateFormFile(fieldname, filename string) (io.Writer, error) { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(fieldname), escapeQuotes(filename))) + h.Set("Content-Type", "application/octet-stream") + return w.CreatePart(h) +} + +// CreateFormField calls CreatePart with a header using the +// given field name. +func (w *Writer) CreateFormField(fieldname string) (io.Writer, error) { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"`, escapeQuotes(fieldname))) + return w.CreatePart(h) +} + +// WriteField calls CreateFormField and then writes the given value. +func (w *Writer) WriteField(fieldname, value string) error { + p, err := w.CreateFormField(fieldname) + if err != nil { + return err + } + _, err = p.Write([]byte(value)) + return err +} + +// Close finishes the multipart message and writes the trailing +// boundary end line to the output. +func (w *Writer) Close() error { + if w.lastpart != nil { + if err := w.lastpart.close(); err != nil { + return err + } + w.lastpart = nil + } + _, err := fmt.Fprintf(w.w, "\r\n--%s--\r\n", w.boundary) + return err +} + +type part struct { + mw *Writer + closed bool + we error // last error that occurred writing +} + +func (p *part) close() error { + p.closed = true + return p.we +} + +func (p *part) Write(d []byte) (n int, err error) { + if p.closed { + return 0, errors.New("multipart: can't write to finished part") + } + n, err = p.mw.w.Write(d) + if err != nil { + p.we = err + } + return +} diff --git a/vendor/github.com/sthulb/mime/multipart/writer_test.go b/vendor/github.com/sthulb/mime/multipart/writer_test.go new file mode 100644 index 000000000..04a6a9b3c --- /dev/null +++ b/vendor/github.com/sthulb/mime/multipart/writer_test.go @@ -0,0 +1,152 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package multipart + +import ( + "bytes" + "io/ioutil" + "net/textproto" + "strings" + "testing" +) + +func TestWriter(t *testing.T) { + fileContents := []byte("my file contents") + + var b bytes.Buffer + w := NewWriter(&b) + { + part, err := w.CreateFormFile("myfile", "my-file.txt") + if err != nil { + t.Fatalf("CreateFormFile: %v", err) + } + part.Write(fileContents) + err = w.WriteField("key", "val") + if err != nil { + t.Fatalf("WriteField: %v", err) + } + part.Write([]byte("val")) + err = w.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + s := b.String() + if len(s) == 0 { + t.Fatal("String: unexpected empty result") + } + if s[0] == '\r' || s[0] == '\n' { + t.Fatal("String: unexpected newline") + } + } + + r := NewReader(&b, w.Boundary()) + + part, err := r.NextPart() + if err != nil { + t.Fatalf("part 1: %v", err) + } + if g, e := part.FormName(), "myfile"; g != e { + t.Errorf("part 1: want form name %q, got %q", e, g) + } + slurp, err := ioutil.ReadAll(part) + if err != nil { + t.Fatalf("part 1: ReadAll: %v", err) + } + if e, g := string(fileContents), string(slurp); e != g { + t.Errorf("part 1: want contents %q, got %q", e, g) + } + + part, err = r.NextPart() + if err != nil { + t.Fatalf("part 2: %v", err) + } + if g, e := part.FormName(), "key"; g != e { + t.Errorf("part 2: want form name %q, got %q", e, g) + } + slurp, err = ioutil.ReadAll(part) + if err != nil { + t.Fatalf("part 2: ReadAll: %v", err) + } + if e, g := "val", string(slurp); e != g { + t.Errorf("part 2: want contents %q, got %q", e, g) + } + + part, err = r.NextPart() + if part != nil || err == nil { + t.Fatalf("expected end of parts; got %v, %v", part, err) + } +} + +func TestWriterSetBoundary(t *testing.T) { + var b bytes.Buffer + w := NewWriter(&b) + tests := []struct { + b string + ok bool + }{ + {"abc", true}, + {"", false}, + {"ungültig", false}, + {"!", false}, + {strings.Repeat("x", 69), true}, + {strings.Repeat("x", 70), false}, + {"bad!ascii!", false}, + {"my-separator", true}, + } + for i, tt := range tests { + err := w.SetBoundary(tt.b) + got := err == nil + if got != tt.ok { + t.Errorf("%d. boundary %q = %v (%v); want %v", i, tt.b, got, err, tt.ok) + } else if tt.ok { + got := w.Boundary() + if got != tt.b { + t.Errorf("boundary = %q; want %q", got, tt.b) + } + } + } + w.Close() + if got := b.String(); !strings.Contains(got, "\r\n--my-separator--\r\n") { + t.Errorf("expected my-separator in output. got: %q", got) + } +} + +func TestWriterBoundaryGoroutines(t *testing.T) { + // Verify there's no data race accessing any lazy boundary if it's used by + // different goroutines. This was previously broken by + // https://codereview.appspot.com/95760043/ and reverted in + // https://codereview.appspot.com/117600043/ + w := NewWriter(ioutil.Discard) + done := make(chan int) + go func() { + w.CreateFormField("foo") + done <- 1 + }() + w.Boundary() + <-done +} + +func TestSortedHeader(t *testing.T) { + var buf bytes.Buffer + mimeWriter := NewWriter(&buf) + if err := mimeWriter.SetBoundary("MIMEBOUNDARY"); err != nil { + t.Fatalf("Error setting mime boundary: %v", err) + } + + header := textproto.MIMEHeader{"Z": {"1"}, "A": {"2"}, "M": {"3"}, "C": {"4"}} + + part, err := mimeWriter.CreatePart(header) + if err != nil { + t.Fatalf("Unable to create part: %v", err) + } + part.Write([]byte("foo")) + + mimeWriter.Close() + + want := "--MIMEBOUNDARY\r\nA: 2\r\nC: 4\r\nM: 3\r\nZ: 1\r\n\r\nfoo\r\n--MIMEBOUNDARY--\r\n" + if want != buf.String() { + t.Fatalf("\n got: %q\nwant: %q\n", buf.String(), want) + } +} diff --git a/vendor/github.com/tent/http-link-go/.gitignore b/vendor/github.com/tent/http-link-go/.gitignore new file mode 100644 index 000000000..9ed3b07ce --- /dev/null +++ b/vendor/github.com/tent/http-link-go/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/tent/http-link-go/.travis.yml b/vendor/github.com/tent/http-link-go/.travis.yml new file mode 100644 index 000000000..7362f08e4 --- /dev/null +++ b/vendor/github.com/tent/http-link-go/.travis.yml @@ -0,0 +1,6 @@ +language: go +go: + - 1.1 + - tip +before_install: + - go get launchpad.net/gocheck diff --git a/vendor/github.com/tent/http-link-go/LICENSE b/vendor/github.com/tent/http-link-go/LICENSE new file mode 100644 index 000000000..88dcd4afd --- /dev/null +++ b/vendor/github.com/tent/http-link-go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Tent.is, LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Tent.is, LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/tent/http-link-go/README.md b/vendor/github.com/tent/http-link-go/README.md new file mode 100644 index 000000000..07d470e4d --- /dev/null +++ b/vendor/github.com/tent/http-link-go/README.md @@ -0,0 +1,12 @@ +# http-link-go [![Build Status](https://travis-ci.org/tent/http-link-go.png?branch=master)](https://travis-ci.org/tent/http-link-go) + +http-link-go implements parsing and serialization of Link header values as +defined in [RFC 5988](https://tools.ietf.org/html/rfc5988). + +[**Documentation**](http://godoc.org/github.com/tent/http-link-go) + +## Installation + +```text +go get github.com/tent/http-link-go +``` diff --git a/vendor/github.com/tent/http-link-go/link.go b/vendor/github.com/tent/http-link-go/link.go new file mode 100644 index 000000000..584dfd051 --- /dev/null +++ b/vendor/github.com/tent/http-link-go/link.go @@ -0,0 +1,185 @@ +// Package link implements parsing and serialization of Link header values as +// defined in RFC 5988. +package link + +import ( + "bytes" + "errors" + "sort" + "unicode" +) + +type Link struct { + URI string + Rel string + Params map[string]string +} + +// Format serializes a slice of Links into a header value. It does not currently +// implement RFC 2231 handling of non-ASCII character encoding and language +// information. +func Format(links []Link) string { + buf := &bytes.Buffer{} + for i, link := range links { + if i > 0 { + buf.Write([]byte(", ")) + } + buf.WriteByte('<') + buf.WriteString(link.URI) + buf.WriteByte('>') + + writeParam(buf, "rel", link.Rel) + + keys := make([]string, 0, len(link.Params)) + for k := range link.Params { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + writeParam(buf, k, link.Params[k]) + } + } + + return buf.String() +} + +func writeParam(buf *bytes.Buffer, key, value string) { + buf.Write([]byte("; ")) + buf.WriteString(key) + buf.Write([]byte(`="`)) + buf.WriteString(value) + buf.WriteByte('"') +} + +// Parse parses a Link header value into a slice of Links. It does not currently +// implement RFC 2231 handling of non-ASCII character encoding and language +// information. +func Parse(l string) ([]Link, error) { + v := []byte(l) + v = bytes.TrimSpace(v) + if len(v) == 0 { + return nil, nil + } + + links := make([]Link, 0, 1) + for len(v) > 0 { + if v[0] != '<' { + return nil, errors.New("link: does not start with <") + } + lend := bytes.IndexByte(v, '>') + if lend == -1 { + return nil, errors.New("link: does not contain ending >") + } + + params := make(map[string]string) + link := Link{URI: string(v[1:lend]), Params: params} + links = append(links, link) + + // trim off parsed url + v = v[lend+1:] + if len(v) == 0 { + break + } + v = bytes.TrimLeftFunc(v, unicode.IsSpace) + + for len(v) > 0 { + if v[0] != ';' && v[0] != ',' { + return nil, errors.New(`link: expected ";" or "'", got "` + string(v[0:1]) + `"`) + } + var next bool + if v[0] == ',' { + next = true + } + v = bytes.TrimLeftFunc(v[1:], unicode.IsSpace) + if next || len(v) == 0 { + break + } + var key, value []byte + key, value, v = consumeParam(v) + if key == nil || value == nil { + return nil, errors.New("link: malformed param") + } + if k := string(key); k == "rel" { + if links[len(links)-1].Rel == "" { + links[len(links)-1].Rel = string(value) + } + } else { + params[k] = string(value) + } + v = bytes.TrimLeftFunc(v, unicode.IsSpace) + } + } + + return links, nil +} + +func isTokenChar(r rune) bool { + return r > 0x20 && r < 0x7f && r != '"' && r != ',' && r != '=' && r != ';' +} + +func isNotTokenChar(r rune) bool { return !isTokenChar(r) } + +func consumeToken(v []byte) (token, rest []byte) { + notPos := bytes.IndexFunc(v, isNotTokenChar) + if notPos == -1 { + return v, nil + } + if notPos == 0 { + return nil, v + } + return v[0:notPos], v[notPos:] +} + +func consumeValue(v []byte) (value, rest []byte) { + if v[0] != '"' { + return nil, v + } + + rest = v[1:] + buffer := &bytes.Buffer{} + var nextIsLiteral bool + for idx, r := range string(rest) { + switch { + case nextIsLiteral: + buffer.WriteRune(r) + nextIsLiteral = false + case r == '"': + return buffer.Bytes(), rest[idx+1:] + case r == '\\': + nextIsLiteral = true + case r != '\r' && r != '\n': + buffer.WriteRune(r) + default: + return nil, v + } + } + return nil, v +} + +func consumeParam(v []byte) (param, value, rest []byte) { + param, rest = consumeToken(v) + param = bytes.ToLower(param) + if param == nil { + return nil, nil, v + } + + rest = bytes.TrimLeftFunc(rest, unicode.IsSpace) + if len(rest) == 0 || rest[0] != '=' { + return nil, nil, v + } + rest = rest[1:] // consume equals sign + rest = bytes.TrimLeftFunc(rest, unicode.IsSpace) + if len(rest) == 0 { + return nil, nil, v + } + if rest[0] != '"' { + value, rest = consumeToken(rest) + } else { + value, rest = consumeValue(rest) + } + if value == nil { + return nil, nil, v + } + return param, value, rest +} diff --git a/vendor/github.com/tent/http-link-go/link_test.go b/vendor/github.com/tent/http-link-go/link_test.go new file mode 100644 index 000000000..8a78eeabb --- /dev/null +++ b/vendor/github.com/tent/http-link-go/link_test.go @@ -0,0 +1,67 @@ +package link + +import ( + "testing" + + . "launchpad.net/gocheck" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type LinkSuite struct{} + +var _ = Suite(&LinkSuite{}) + +// TODO: add more tests +var linkParseTests = []struct { + in string + out []Link +}{ + { + "; rel=\"previous\";\n title=\"previous chapter\"", + []Link{{URI: "http://example.com/TheBook/chapter2", Rel: "previous", Params: map[string]string{"title": "previous chapter"}}}, + }, + { + ";\n rel=\"previous\"; title*=UTF-8'de'letztes%20Kapitel,\n ;\n rel=\"next\"; title*=UTF-8'de'n%c3%a4chstes%20Kapitel", + []Link{ + {URI: "/TheBook/chapter2", Rel: "previous", Params: map[string]string{"title*": "UTF-8'de'letztes%20Kapitel"}}, + {URI: "/TheBook/chapter4", Rel: "next", Params: map[string]string{"title*": "UTF-8'de'n%c3%a4chstes%20Kapitel"}}, + }, + }, +} + +func (s *LinkSuite) TestLinkParsing(c *C) { + for i, t := range linkParseTests { + res, err := Parse(t.in) + c.Assert(err, IsNil, Commentf("test %d", i)) + c.Assert(res, DeepEquals, t.out, Commentf("test %d", i)) + } +} + +var linkFormatTests = []struct { + in []Link + out string +}{ + { + []Link{{URI: "/a", Rel: "foo", Params: map[string]string{"a": "b", "c": "d"}}}, + `; rel="foo"; a="b"; c="d"`, + }, + { + []Link{ + {URI: "/b", Rel: "foo", Params: map[string]string{"a": "b", "c": "d"}}, + {URI: "/a", Rel: "foo", Params: map[string]string{"a": "b", "c": "d"}}}, + `; rel="foo"; a="b"; c="d", ; rel="foo"; a="b"; c="d"`, + }, +} + +func (s *LinkSuite) TestLinkGeneration(c *C) { + for i, t := range linkFormatTests { + res := Format(t.in) + cm := Commentf("test %d", i) + c.Assert(res, Equals, t.out, cm) + parsed, err := Parse(res) + c.Assert(err, IsNil, cm) + c.Assert(parsed, DeepEquals, t.in, cm) + } +} diff --git a/vendor/github.com/vmware/govmomi/.travis.yml b/vendor/github.com/vmware/govmomi/.travis.yml new file mode 100644 index 000000000..3432c4d82 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/.travis.yml @@ -0,0 +1,10 @@ +sudo: false +language: go +go: 1.4 + +before_install: + - go get golang.org/x/tools/cmd/vet + - go get golang.org/x/tools/cmd/goimports + +script: + - make check test diff --git a/vendor/github.com/vmware/govmomi/CHANGELOG.md b/vendor/github.com/vmware/govmomi/CHANGELOG.md new file mode 100644 index 000000000..29f28ce52 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/CHANGELOG.md @@ -0,0 +1,69 @@ +# changelog + +### 0.3.0 (2016-01-16) + +* Add object.VirtualNicManager wrapper + +* Add object.HostVsanSystem wrapper + +* Add object.HostSystem methods: EnterMaintenanceMode, ExitMaintenanceMode, Disconnect, Reconnect + +* Add finder.Folder method + +* Add object.Common.Destroy method + +* Add object.ComputeResource.Reconfigure method + +* Add license.AssignmentManager wrapper + +* Add object.HostFirewallSystem wrapper + +* Add object.DiagnosticManager wrapper + +* Add LoginExtensionByCertificate support + +* Add object.ExtensionManager + +... + +### 0.2.0 (2015-09-15) + +* Update to vim25/6.0 API + +* Stop returning children from `ManagedObjectList` + + Change the `ManagedObjectList` function in the `find` package to only + return the managed objects specified by the path argument and not their + children. The original behavior was used by govc's `ls` command and is + now available in the newly added function `ManagedObjectListChildren`. + +* Add retry functionality to vim25 package + +* Change finder functions to no longer take varargs + + The `find` package had functions to return a list of objects, given a + variable number of patterns. This makes it impossible to distinguish which + patterns produced results and which ones didn't. + + In particular for govc, where multiple arguments can be passed from the + command line, it is useful to let the user know which ones produce results + and which ones don't. + + To evaluate multiple patterns, the user should call the find functions + multiple times (either serially or in parallel). + +* Make optional boolean fields pointers (`vim25/types`). + + False is the zero value of a boolean field, which means they are not serialized + if the field is marked "omitempty". If the field is a pointer instead, the zero + value will be the nil pointer, and both true and false values are serialized. + +### 0.1.0 (2015-03-17) + +Prior to this version the API of this library was in flux. + +Notable changes w.r.t. the state of this library before March 2015 are: + +* All functions that may execute a request take a `context.Context` parameter. +* The `vim25` package contains a minimal client implementation. +* The property collector and its convenience functions live in the `property` package. diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTORS b/vendor/github.com/vmware/govmomi/CONTRIBUTORS new file mode 100644 index 000000000..8813540f1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTORS @@ -0,0 +1,26 @@ +# People who can (and typically have) contributed to this repository. +# +# Please keep the list sorted. +# + +Alvaro Miranda +Amit Bathla +Bob Killen +Bruce Downs +Clint Greenwood +Danny Lockard +Doug MacEachern +Eloy Coto +Eric Yutao +Faiyaz Ahmed +Gavin Gray +Gavrie Philipson +Louie Jiang +Mevan Samaratunga +Pieter Noordhuis +runner.mei +S.Çağlar Onur +Takaaki Furukawa +Yang Yang +Yuya Kusakabe +Zee Yang diff --git a/vendor/github.com/vmware/govmomi/LICENSE.txt b/vendor/github.com/vmware/govmomi/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vmware/govmomi/Makefile b/vendor/github.com/vmware/govmomi/Makefile new file mode 100644 index 000000000..5a4264732 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/Makefile @@ -0,0 +1,20 @@ +.PHONY: test + +all: check test + +check: goimports govet + +goimports: + @echo checking go imports... + @! goimports -d . 2>&1 | egrep -v '^$$' + +govet: + @echo checking go vet... + @go tool vet -structtags=false -methods=false . + +test: + go get + go test -v $(TEST_OPTS) ./... + +install: + go install github.com/vmware/govmomi/govc diff --git a/vendor/github.com/vmware/govmomi/README.md b/vendor/github.com/vmware/govmomi/README.md new file mode 100644 index 000000000..6831d64be --- /dev/null +++ b/vendor/github.com/vmware/govmomi/README.md @@ -0,0 +1,36 @@ +[![Build Status](https://travis-ci.org/vmware/govmomi.png?branch=master)](https://travis-ci.org/vmware/govmomi) + +# govmomi + +A Go library for interacting with VMware vSphere APIs (ESXi and/or vCenter). + +For `govc`, a CLI built on top of govmomi, check out the [govc](./govc) directory. + +## Compatibility + +This library is built for and tested against ESXi and vCenter 5.5 and 6.0. + +If you're able to use it against older versions of ESXi and/or vCenter, please +leave a note and we'll include it in this compatibility list. + +## Documentation + +The APIs exposed by this library very closely follow the API described in the [VMware vSphere API Reference Documentation][apiref]. +Refer to this document to become familiar with the upstream API. + +The code in the `govmomi` package is a wrapper for the code that is generated from the vSphere API description. +It primarily provides convenience functions for working with the vSphere API. +See [godoc.org][godoc] for documentation. + +[apiref]:http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc/right-pane.html +[godoc]:http://godoc.org/github.com/vmware/govmomi + +## Status + +Changes to the API are subject to [semantic versioning](http://semver.org). + +Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes. + +## License + +govmomi is available under the [Apache 2 license](LICENSE). diff --git a/vendor/github.com/vmware/govmomi/client.go b/vendor/github.com/vmware/govmomi/client.go new file mode 100644 index 000000000..56b5191cf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/client.go @@ -0,0 +1,167 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This package is the root package of the govmomi library. + +The library is structured as follows: + +Package vim25 + +The minimal usable functionality is available through the vim25 package. +It contains subpackages that contain generated types, managed objects, and all +available methods. The vim25 package is entirely independent of the other +packages in the govmomi tree -- it has no dependencies on its peers. + +The vim25 package itself contains a client structure that is +passed around throughout the entire library. It abstracts a session and its +immutable state. See the vim25 package for more information. + +Package session + +The session package contains an abstraction for the session manager that allows +a user to login and logout. It also provides access to the current session +(i.e. to determine if the user is in fact logged in) + +Package object + +The object package contains wrappers for a selection of managed objects. The +constructors of these objects all take a *vim25.Client, which they pass along +to derived objects, if applicable. + +Package govc + +The govc package contains the govc CLI. The code in this tree is not intended +to be used as a library. Any functionality that govc contains that _could_ be +used as a library function but isn't, _should_ live in a root level package. + +Other packages + +Other packages, such as "event", "guest", or "license", provide wrappers for +the respective subsystems. They are typically not needed in normal workflows so +are kept outside the object package. +*/ +package govmomi + +import ( + "crypto/tls" + "net/url" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Client struct { + *vim25.Client + + SessionManager *session.Manager +} + +// NewClient creates a new client from a URL. The client authenticates with the +// server with username/password before returning if the URL contains user information. +func NewClient(ctx context.Context, u *url.URL, insecure bool) (*Client, error) { + soapClient := soap.NewClient(u, insecure) + vimClient, err := vim25.NewClient(ctx, soapClient) + if err != nil { + return nil, err + } + + c := &Client{ + Client: vimClient, + SessionManager: session.NewManager(vimClient), + } + + // Only login if the URL contains user information. + if u.User != nil { + err = c.Login(ctx, u.User) + if err != nil { + return nil, err + } + } + + return c, nil +} + +// NewClientWithCertificate creates a new client from a URL. The client authenticates with the +// server with the certificate before returning if the URL contains user information. +func NewClientWithCertificate(ctx context.Context, u *url.URL, insecure bool, cert tls.Certificate) (*Client, error) { + soapClient := soap.NewClient(u, insecure) + soapClient.SetCertificate(cert) + vimClient, err := vim25.NewClient(ctx, soapClient) + if err != nil { + return nil, err + } + + c := &Client{ + Client: vimClient, + SessionManager: session.NewManager(vimClient), + } + + if u.User != nil { + err = c.LoginExtensionByCertificate(ctx, u.User.Username(), "") + if err != nil { + return nil, err + } + } + + return c, nil +} + +// Login dispatches to the SessionManager. +func (c *Client) Login(ctx context.Context, u *url.Userinfo) error { + return c.SessionManager.Login(ctx, u) +} + +// Login dispatches to the SessionManager. +func (c *Client) LoginExtensionByCertificate(ctx context.Context, key string, locale string) error { + return c.SessionManager.LoginExtensionByCertificate(ctx, key, locale) +} + +// Logout dispatches to the SessionManager. +func (c *Client) Logout(ctx context.Context) error { + // Close any idle connections after logging out. + defer c.Client.CloseIdleConnections() + return c.SessionManager.Logout(ctx) +} + +// PropertyCollector returns the session's default property collector. +func (c *Client) PropertyCollector() *property.Collector { + return property.DefaultCollector(c.Client) +} + +// RetrieveOne dispatches to the Retrieve function on the default property collector. +func (c *Client) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, p []string, dst interface{}) error { + return c.PropertyCollector().RetrieveOne(ctx, obj, p, dst) +} + +// Retrieve dispatches to the Retrieve function on the default property collector. +func (c *Client) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, p []string, dst interface{}) error { + return c.PropertyCollector().Retrieve(ctx, objs, p, dst) +} + +// Wait dispatches to property.Wait. +func (c *Client) Wait(ctx context.Context, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { + return property.Wait(ctx, c.PropertyCollector(), obj, ps, f) +} + +// IsVC returns true if we are connected to a vCenter +func (c *Client) IsVC() bool { + return c.Client.IsVC() +} diff --git a/vendor/github.com/vmware/govmomi/client_test.go b/vendor/github.com/vmware/govmomi/client_test.go new file mode 100644 index 000000000..558adc551 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/client_test.go @@ -0,0 +1,138 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package govmomi + +import ( + "errors" + "net/http" + "net/url" + "testing" + + "github.com/vmware/govmomi/test" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +func TestNewClient(t *testing.T) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + c, err := NewClient(context.Background(), u, true) + if err != nil { + t.Fatal(err) + } + + f := func() error { + var x mo.Folder + err = mo.RetrieveProperties(context.Background(), c, c.ServiceContent.PropertyCollector, c.ServiceContent.RootFolder, &x) + if err != nil { + return err + } + if len(x.Name) == 0 { + return errors.New("empty response") + } + return nil + } + + // check cookie is valid with an sdk request + if err := f(); err != nil { + t.Fatal(err) + } + + // check cookie is valid with a non-sdk request + u.User = nil // turn off Basic auth + u.Path = "/folder" + r, err := c.Client.Get(u.String()) + if err != nil { + t.Fatal(err) + } + if r.StatusCode != http.StatusOK { + t.Fatal(r) + } + + // sdk request should fail w/o a valid cookie + c.Client.Jar = nil + if err := f(); err == nil { + t.Fatal("should fail") + } + + // invalid login + u.Path = "/sdk" + u.User = url.UserPassword("ENOENT", "EINVAL") + _, err = NewClient(context.Background(), u, true) + if err == nil { + t.Fatal("should fail") + } +} + +func TestInvalidSdk(t *testing.T) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + // a URL other than a valid /sdk should error, not panic + u.Path = "/mob" + _, err := NewClient(context.Background(), u, true) + if err == nil { + t.Fatal("should fail") + } +} + +func TestPropertiesN(t *testing.T) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + c, err := NewClient(context.Background(), u, true) + if err != nil { + t.Fatal(err) + } + + var f mo.Folder + err = c.RetrieveOne(context.Background(), c.ServiceContent.RootFolder, nil, &f) + if err != nil { + t.Fatal(err) + } + + var dc mo.Datacenter + err = c.RetrieveOne(context.Background(), f.ChildEntity[0], nil, &dc) + if err != nil { + t.Fatal(err) + } + + var folderReferences = []types.ManagedObjectReference{ + dc.DatastoreFolder, + dc.HostFolder, + dc.NetworkFolder, + dc.VmFolder, + } + + var folders []mo.Folder + err = c.Retrieve(context.Background(), folderReferences, []string{"name"}, &folders) + if err != nil { + t.Fatal(err) + } + + if len(folders) != len(folderReferences) { + t.Fatalf("Expected %d, got %d", len(folderReferences), len(folders)) + } +} diff --git a/vendor/github.com/vmware/govmomi/event/history_collector.go b/vendor/github.com/vmware/govmomi/event/history_collector.go new file mode 100644 index 000000000..543c1ea8c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/event/history_collector.go @@ -0,0 +1,75 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import ( + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HistoryCollector struct { + *object.HistoryCollector +} + +func NewHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector { + return &HistoryCollector{ + HistoryCollector: object.NewHistoryCollector(c, ref), + } +} + +func (h HistoryCollector) LatestPage(ctx context.Context) ([]types.BaseEvent, error) { + var o mo.EventHistoryCollector + + err := h.Properties(ctx, h.Reference(), []string{"latestPage"}, &o) + if err != nil { + return nil, err + } + + return o.LatestPage, nil +} + +func (h HistoryCollector) ReadNextEvents(ctx context.Context, maxCount int) ([]types.BaseEvent, error) { + req := types.ReadNextEvents{ + This: h.Reference(), + MaxCount: maxCount, + } + + res, err := methods.ReadNextEvents(ctx, h.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (h HistoryCollector) ReadPreviousEvents(ctx context.Context, maxCount int) ([]types.BaseEvent, error) { + req := types.ReadPreviousEvents{ + This: h.Reference(), + MaxCount: maxCount, + } + + res, err := methods.ReadPreviousEvents(ctx, h.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/event/manager.go b/vendor/github.com/vmware/govmomi/event/manager.go new file mode 100644 index 000000000..c137f4a44 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/event/manager.go @@ -0,0 +1,161 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import ( + "reflect" + "sync" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Manager struct { + reference types.ManagedObjectReference + + c *vim25.Client + + eventCategory map[string]string + eventCategoryMu *sync.Mutex +} + +func NewManager(c *vim25.Client) *Manager { + m := Manager{ + reference: *c.ServiceContent.EventManager, + + c: c, + + eventCategory: make(map[string]string), + eventCategoryMu: new(sync.Mutex), + } + + return &m +} + +func (m Manager) CreateCollectorForEvents(ctx context.Context, filter types.EventFilterSpec) (*HistoryCollector, error) { + req := types.CreateCollectorForEvents{ + This: m.reference, + Filter: filter, + } + + res, err := methods.CreateCollectorForEvents(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewHistoryCollector(m.c, res.Returnval), nil +} + +func (m Manager) LogUserEvent(ctx context.Context, entity types.ManagedObjectReference, msg string) error { + req := types.LogUserEvent{ + This: m.reference, + Entity: entity, + Msg: msg, + } + + _, err := methods.LogUserEvent(ctx, m.c, &req) + if err != nil { + return err + } + + return nil +} + +func (m Manager) PostEvent(ctx context.Context, eventToPost types.BaseEvent, taskInfo types.TaskInfo) error { + req := types.PostEvent{ + This: m.reference, + EventToPost: eventToPost, + TaskInfo: &taskInfo, + } + + _, err := methods.PostEvent(ctx, m.c, &req) + if err != nil { + return err + } + + return nil +} + +func (m Manager) QueryEvents(ctx context.Context, filter types.EventFilterSpec) ([]types.BaseEvent, error) { + req := types.QueryEvents{ + This: m.reference, + Filter: filter, + } + + res, err := methods.QueryEvents(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m Manager) RetrieveArgumentDescription(ctx context.Context, eventTypeID string) ([]types.EventArgDesc, error) { + req := types.RetrieveArgumentDescription{ + This: m.reference, + EventTypeId: eventTypeID, + } + + res, err := methods.RetrieveArgumentDescription(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m Manager) eventCategoryMap(ctx context.Context) (map[string]string, error) { + m.eventCategoryMu.Lock() + defer m.eventCategoryMu.Unlock() + + if len(m.eventCategory) != 0 { + return m.eventCategory, nil + } + + var o mo.EventManager + + ps := []string{"description.eventInfo"} + err := property.DefaultCollector(m.c).RetrieveOne(ctx, m.reference, ps, &o) + if err != nil { + return nil, err + } + + for _, info := range o.Description.EventInfo { + m.eventCategory[info.Key] = info.Category + } + + return m.eventCategory, nil +} + +// EventCategory returns the category for an event, such as "info" or "error" for example. +func (m Manager) EventCategory(ctx context.Context, event types.BaseEvent) (string, error) { + // Most of the event details are included in the Event.FullFormattedMessage, but the category + // is only available via the EventManager description.eventInfo property. The value of this + // property is static, so we fetch and once and cache. + eventCategory, err := m.eventCategoryMap(ctx) + if err != nil { + return "", err + } + + class := reflect.TypeOf(event).Elem().Name() + + return eventCategory[class], nil +} diff --git a/vendor/github.com/vmware/govmomi/event/sort.go b/vendor/github.com/vmware/govmomi/event/sort.go new file mode 100644 index 000000000..ac87b0eda --- /dev/null +++ b/vendor/github.com/vmware/govmomi/event/sort.go @@ -0,0 +1,45 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import ( + "sort" + + "github.com/vmware/govmomi/vim25/types" +) + +// Sort events in accending order base on Key +// From the EventHistoryCollector.latestPage sdk docs: +// The "oldest event" is the one with the smallest key (event ID). +// The events in the returned page are unordered. +func Sort(events []types.BaseEvent) { + sort.Sort(baseEvent(events)) +} + +type baseEvent []types.BaseEvent + +func (d baseEvent) Len() int { + return len(d) +} + +func (d baseEvent) Less(i, j int) bool { + return d[i].GetEvent().Key < d[j].GetEvent().Key +} + +func (d baseEvent) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/vmware/govmomi/examples/datastores/main.go b/vendor/github.com/vmware/govmomi/examples/datastores/main.go new file mode 100644 index 000000000..5a76adfa9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/examples/datastores/main.go @@ -0,0 +1,180 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This example program shows how the `finder` and `property` packages can +be used to navigate a vSphere inventory structure using govmomi. +*/ + +package main + +import ( + "flag" + "fmt" + "net/url" + "os" + "strings" + "text/tabwriter" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// GetEnvString returns string from environment variable. +func GetEnvString(v string, def string) string { + r := os.Getenv(v) + if r == "" { + return def + } + + return r +} + +// GetEnvBool returns boolean from environment variable. +func GetEnvBool(v string, def bool) bool { + r := os.Getenv(v) + if r == "" { + return def + } + + switch strings.ToLower(r[0:1]) { + case "t", "y", "1": + return true + } + + return false +} + +const ( + envURL = "GOVMOMI_URL" + envUserName = "GOVMOMI_USERNAME" + envPassword = "GOVMOMI_PASSWORD" + envInsecure = "GOVMOMI_INSECURE" +) + +var urlDescription = fmt.Sprintf("ESX or vCenter URL [%s]", envURL) +var urlFlag = flag.String("url", GetEnvString(envURL, "https://username:password@host/sdk"), urlDescription) + +var insecureDescription = fmt.Sprintf("Don't verify the server's certificate chain [%s]", envInsecure) +var insecureFlag = flag.Bool("insecure", GetEnvBool(envInsecure, false), insecureDescription) + +func processOverride(u *url.URL) { + envUsername := os.Getenv(envUserName) + envPassword := os.Getenv(envPassword) + + // Override username if provided + if envUsername != "" { + var password string + var ok bool + + if u.User != nil { + password, ok = u.User.Password() + } + + if ok { + u.User = url.UserPassword(envUsername, password) + } else { + u.User = url.User(envUsername) + } + } + + // Override password if provided + if envPassword != "" { + var username string + + if u.User != nil { + username = u.User.Username() + } + + u.User = url.UserPassword(username, envPassword) + } +} + +func exit(err error) { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + flag.Parse() + + // Parse URL from string + u, err := url.Parse(*urlFlag) + if err != nil { + exit(err) + } + + // Override username and/or password as required + processOverride(u) + + // Connect and log in to ESX or vCenter + c, err := govmomi.NewClient(ctx, u, *insecureFlag) + if err != nil { + exit(err) + } + + f := find.NewFinder(c.Client, true) + + // Find one and only datacenter + dc, err := f.DefaultDatacenter(ctx) + if err != nil { + exit(err) + } + + // Make future calls local to this datacenter + f.SetDatacenter(dc) + + // Find datastores in datacenter + dss, err := f.DatastoreList(ctx, "*") + if err != nil { + exit(err) + } + + pc := property.DefaultCollector(c.Client) + + // Convert datastores into list of references + var refs []types.ManagedObjectReference + for _, ds := range dss { + refs = append(refs, ds.Reference()) + } + + // Retrieve summary property for all datastores + var dst []mo.Datastore + err = pc.Retrieve(ctx, refs, []string{"summary"}, &dst) + if err != nil { + exit(err) + } + + // Print summary per datastore + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + fmt.Fprintf(tw, "Name:\tType:\tCapacity:\tFree:\n") + for _, ds := range dst { + fmt.Fprintf(tw, "%s\t", ds.Summary.Name) + fmt.Fprintf(tw, "%s\t", ds.Summary.Type) + fmt.Fprintf(tw, "%s\t", units.ByteSize(ds.Summary.Capacity)) + fmt.Fprintf(tw, "%s\t", units.ByteSize(ds.Summary.FreeSpace)) + fmt.Fprintf(tw, "\n") + } + tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/find/error.go b/vendor/github.com/vmware/govmomi/find/error.go new file mode 100644 index 000000000..684526dab --- /dev/null +++ b/vendor/github.com/vmware/govmomi/find/error.go @@ -0,0 +1,64 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package find + +import "fmt" + +type NotFoundError struct { + kind string + path string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("%s '%s' not found", e.kind, e.path) +} + +type MultipleFoundError struct { + kind string + path string +} + +func (e *MultipleFoundError) Error() string { + return fmt.Sprintf("path '%s' resolves to multiple %ss", e.path, e.kind) +} + +type DefaultNotFoundError struct { + kind string +} + +func (e *DefaultNotFoundError) Error() string { + return fmt.Sprintf("no default %s found", e.kind) +} + +type DefaultMultipleFoundError struct { + kind string +} + +func (e DefaultMultipleFoundError) Error() string { + return fmt.Sprintf("default %s resolves to multiple instances, please specify", e.kind) +} + +func toDefaultError(err error) error { + switch e := err.(type) { + case *NotFoundError: + return &DefaultNotFoundError{e.kind} + case *MultipleFoundError: + return &DefaultMultipleFoundError{e.kind} + default: + return err + } +} diff --git a/vendor/github.com/vmware/govmomi/find/finder.go b/vendor/github.com/vmware/govmomi/find/finder.go new file mode 100644 index 000000000..165a330c1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/find/finder.go @@ -0,0 +1,711 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package find + +import ( + "errors" + "path" + + "github.com/vmware/govmomi/list" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "golang.org/x/net/context" +) + +type Finder struct { + client *vim25.Client + recurser list.Recurser + + dc *object.Datacenter + folders *object.DatacenterFolders +} + +func NewFinder(client *vim25.Client, all bool) *Finder { + f := &Finder{ + client: client, + recurser: list.Recurser{ + Collector: property.DefaultCollector(client), + All: all, + }, + } + + return f +} + +func (f *Finder) SetDatacenter(dc *object.Datacenter) *Finder { + f.dc = dc + f.folders = nil + return f +} + +type findRelativeFunc func(ctx context.Context) (object.Reference, error) + +func (f *Finder) find(ctx context.Context, fn findRelativeFunc, tl bool, arg string) ([]list.Element, error) { + root := list.Element{ + Path: "/", + Object: object.NewRootFolder(f.client), + } + + parts := list.ToParts(arg) + + if len(parts) > 0 { + switch parts[0] { + case "..": // Not supported; many edge case, little value + return nil, errors.New("cannot traverse up a tree") + case ".": // Relative to whatever + pivot, err := fn(ctx) + if err != nil { + return nil, err + } + + mes, err := mo.Ancestors(ctx, f.client, f.client.ServiceContent.PropertyCollector, pivot.Reference()) + if err != nil { + return nil, err + } + + for _, me := range mes { + // Skip root entity in building inventory path. + if me.Parent == nil { + continue + } + root.Path = path.Join(root.Path, me.Name) + } + + root.Object = pivot + parts = parts[1:] + } + } + + f.recurser.TraverseLeafs = tl + es, err := f.recurser.Recurse(ctx, root, parts) + if err != nil { + return nil, err + } + + return es, nil +} + +func (f *Finder) datacenter() (*object.Datacenter, error) { + if f.dc == nil { + return nil, errors.New("please specify a datacenter") + } + + return f.dc, nil +} + +func (f *Finder) dcFolders(ctx context.Context) (*object.DatacenterFolders, error) { + if f.folders != nil { + return f.folders, nil + } + + dc, err := f.datacenter() + if err != nil { + return nil, err + } + + folders, err := dc.Folders(ctx) + if err != nil { + return nil, err + } + + f.folders = folders + + return f.folders, nil +} + +func (f *Finder) dcReference(_ context.Context) (object.Reference, error) { + dc, err := f.datacenter() + if err != nil { + return nil, err + } + + return dc, nil +} + +func (f *Finder) vmFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.VmFolder, nil +} + +func (f *Finder) hostFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.HostFolder, nil +} + +func (f *Finder) datastoreFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.DatastoreFolder, nil +} + +func (f *Finder) networkFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.NetworkFolder, nil +} + +func (f *Finder) rootFolder(_ context.Context) (object.Reference, error) { + return object.NewRootFolder(f.client), nil +} + +func (f *Finder) managedObjectList(ctx context.Context, path string, tl bool) ([]list.Element, error) { + fn := f.rootFolder + + if f.dc != nil { + fn = f.dcReference + } + + if len(path) == 0 { + path = "." + } + + return f.find(ctx, fn, tl, path) +} + +func (f *Finder) ManagedObjectList(ctx context.Context, path string) ([]list.Element, error) { + return f.managedObjectList(ctx, path, false) +} + +func (f *Finder) ManagedObjectListChildren(ctx context.Context, path string) ([]list.Element, error) { + return f.managedObjectList(ctx, path, true) +} + +func (f *Finder) DatacenterList(ctx context.Context, path string) ([]*object.Datacenter, error) { + es, err := f.find(ctx, f.rootFolder, false, path) + if err != nil { + return nil, err + } + + var dcs []*object.Datacenter + for _, e := range es { + ref := e.Object.Reference() + if ref.Type == "Datacenter" { + dcs = append(dcs, object.NewDatacenter(f.client, ref)) + } + } + + if len(dcs) == 0 { + return nil, &NotFoundError{"datacenter", path} + } + + return dcs, nil +} + +func (f *Finder) Datacenter(ctx context.Context, path string) (*object.Datacenter, error) { + dcs, err := f.DatacenterList(ctx, path) + if err != nil { + return nil, err + } + + if len(dcs) > 1 { + return nil, &MultipleFoundError{"datacenter", path} + } + + return dcs[0], nil +} + +func (f *Finder) DefaultDatacenter(ctx context.Context) (*object.Datacenter, error) { + dc, err := f.Datacenter(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return dc, nil +} + +func (f *Finder) DatacenterOrDefault(ctx context.Context, path string) (*object.Datacenter, error) { + if path != "" { + dc, err := f.Datacenter(ctx, path) + if err != nil { + return nil, err + } + return dc, nil + } + + return f.DefaultDatacenter(ctx) +} + +func (f *Finder) DatastoreList(ctx context.Context, path string) ([]*object.Datastore, error) { + es, err := f.find(ctx, f.datastoreFolder, false, path) + if err != nil { + return nil, err + } + + var dss []*object.Datastore + for _, e := range es { + ref := e.Object.Reference() + if ref.Type == "Datastore" { + ds := object.NewDatastore(f.client, ref) + ds.InventoryPath = e.Path + + dss = append(dss, ds) + } + } + + if len(dss) == 0 { + return nil, &NotFoundError{"datastore", path} + } + + return dss, nil +} + +func (f *Finder) Datastore(ctx context.Context, path string) (*object.Datastore, error) { + dss, err := f.DatastoreList(ctx, path) + if err != nil { + return nil, err + } + + if len(dss) > 1 { + return nil, &MultipleFoundError{"datastore", path} + } + + return dss[0], nil +} + +func (f *Finder) DefaultDatastore(ctx context.Context) (*object.Datastore, error) { + ds, err := f.Datastore(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return ds, nil +} + +func (f *Finder) DatastoreOrDefault(ctx context.Context, path string) (*object.Datastore, error) { + if path != "" { + ds, err := f.Datastore(ctx, path) + if err != nil { + return nil, err + } + return ds, nil + } + + return f.DefaultDatastore(ctx) +} + +func (f *Finder) ComputeResourceList(ctx context.Context, path string) ([]*object.ComputeResource, error) { + es, err := f.find(ctx, f.hostFolder, false, path) + if err != nil { + return nil, err + } + + var crs []*object.ComputeResource + for _, e := range es { + var cr *object.ComputeResource + + switch o := e.Object.(type) { + case mo.ComputeResource, mo.ClusterComputeResource: + cr = object.NewComputeResource(f.client, o.Reference()) + default: + continue + } + + cr.InventoryPath = e.Path + crs = append(crs, cr) + } + + if len(crs) == 0 { + return nil, &NotFoundError{"compute resource", path} + } + + return crs, nil +} + +func (f *Finder) ComputeResource(ctx context.Context, path string) (*object.ComputeResource, error) { + crs, err := f.ComputeResourceList(ctx, path) + if err != nil { + return nil, err + } + + if len(crs) > 1 { + return nil, &MultipleFoundError{"compute resource", path} + } + + return crs[0], nil +} + +func (f *Finder) DefaultComputeResource(ctx context.Context) (*object.ComputeResource, error) { + cr, err := f.ComputeResource(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return cr, nil +} + +func (f *Finder) ComputeResourceOrDefault(ctx context.Context, path string) (*object.ComputeResource, error) { + if path != "" { + cr, err := f.ComputeResource(ctx, path) + if err != nil { + return nil, err + } + return cr, nil + } + + return f.DefaultComputeResource(ctx) +} + +func (f *Finder) ClusterComputeResourceList(ctx context.Context, path string) ([]*object.ClusterComputeResource, error) { + es, err := f.find(ctx, f.hostFolder, false, path) + if err != nil { + return nil, err + } + + var ccrs []*object.ClusterComputeResource + for _, e := range es { + var ccr *object.ClusterComputeResource + + switch o := e.Object.(type) { + case mo.ClusterComputeResource: + ccr = object.NewClusterComputeResource(f.client, o.Reference()) + default: + continue + } + + ccr.InventoryPath = e.Path + ccrs = append(ccrs, ccr) + } + + if len(ccrs) == 0 { + return nil, &NotFoundError{"cluster", path} + } + + return ccrs, nil +} + +func (f *Finder) ClusterComputeResource(ctx context.Context, path string) (*object.ClusterComputeResource, error) { + ccrs, err := f.ClusterComputeResourceList(ctx, path) + if err != nil { + return nil, err + } + + if len(ccrs) > 1 { + return nil, &MultipleFoundError{"cluster", path} + } + + return ccrs[0], nil +} + +func (f *Finder) HostSystemList(ctx context.Context, path string) ([]*object.HostSystem, error) { + es, err := f.find(ctx, f.hostFolder, false, path) + if err != nil { + return nil, err + } + + var hss []*object.HostSystem + for _, e := range es { + var hs *object.HostSystem + + switch o := e.Object.(type) { + case mo.HostSystem: + hs = object.NewHostSystem(f.client, o.Reference()) + + hs.InventoryPath = e.Path + hss = append(hss, hs) + case mo.ComputeResource, mo.ClusterComputeResource: + cr := object.NewComputeResource(f.client, o.Reference()) + + cr.InventoryPath = e.Path + + hosts, err := cr.Hosts(ctx) + if err != nil { + return nil, err + } + + hss = append(hss, hosts...) + } + } + + if len(hss) == 0 { + return nil, &NotFoundError{"host", path} + } + + return hss, nil +} + +func (f *Finder) HostSystem(ctx context.Context, path string) (*object.HostSystem, error) { + hss, err := f.HostSystemList(ctx, path) + if err != nil { + return nil, err + } + + if len(hss) > 1 { + return nil, &MultipleFoundError{"host", path} + } + + return hss[0], nil +} + +func (f *Finder) DefaultHostSystem(ctx context.Context) (*object.HostSystem, error) { + hs, err := f.HostSystem(ctx, "*/*") + if err != nil { + return nil, toDefaultError(err) + } + + return hs, nil +} + +func (f *Finder) HostSystemOrDefault(ctx context.Context, path string) (*object.HostSystem, error) { + if path != "" { + hs, err := f.HostSystem(ctx, path) + if err != nil { + return nil, err + } + return hs, nil + } + + return f.DefaultHostSystem(ctx) +} + +func (f *Finder) NetworkList(ctx context.Context, path string) ([]object.NetworkReference, error) { + es, err := f.find(ctx, f.networkFolder, false, path) + if err != nil { + return nil, err + } + + var ns []object.NetworkReference + for _, e := range es { + ref := e.Object.Reference() + switch ref.Type { + case "Network": + r := object.NewNetwork(f.client, ref) + r.InventoryPath = e.Path + ns = append(ns, r) + case "DistributedVirtualPortgroup": + r := object.NewDistributedVirtualPortgroup(f.client, ref) + r.InventoryPath = e.Path + ns = append(ns, r) + case "DistributedVirtualSwitch", "VmwareDistributedVirtualSwitch": + r := object.NewDistributedVirtualSwitch(f.client, ref) + r.InventoryPath = e.Path + ns = append(ns, r) + } + } + + if len(ns) == 0 { + return nil, &NotFoundError{"network", path} + } + + return ns, nil +} + +func (f *Finder) Network(ctx context.Context, path string) (object.NetworkReference, error) { + networks, err := f.NetworkList(ctx, path) + if err != nil { + return nil, err + } + + if len(networks) > 1 { + return nil, &MultipleFoundError{"network", path} + } + + return networks[0], nil +} + +func (f *Finder) DefaultNetwork(ctx context.Context) (object.NetworkReference, error) { + network, err := f.Network(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return network, nil +} + +func (f *Finder) NetworkOrDefault(ctx context.Context, path string) (object.NetworkReference, error) { + if path != "" { + network, err := f.Network(ctx, path) + if err != nil { + return nil, err + } + return network, nil + } + + return f.DefaultNetwork(ctx) +} + +func (f *Finder) ResourcePoolList(ctx context.Context, path string) ([]*object.ResourcePool, error) { + es, err := f.find(ctx, f.hostFolder, true, path) + if err != nil { + return nil, err + } + + var rps []*object.ResourcePool + for _, e := range es { + var rp *object.ResourcePool + + switch o := e.Object.(type) { + case mo.ResourcePool: + rp = object.NewResourcePool(f.client, o.Reference()) + rp.InventoryPath = e.Path + rps = append(rps, rp) + } + } + + if len(rps) == 0 { + return nil, &NotFoundError{"resource pool", path} + } + + return rps, nil +} + +func (f *Finder) ResourcePool(ctx context.Context, path string) (*object.ResourcePool, error) { + rps, err := f.ResourcePoolList(ctx, path) + if err != nil { + return nil, err + } + + if len(rps) > 1 { + return nil, &MultipleFoundError{"resource pool", path} + } + + return rps[0], nil +} + +func (f *Finder) DefaultResourcePool(ctx context.Context) (*object.ResourcePool, error) { + rp, err := f.ResourcePool(ctx, "*/Resources") + if err != nil { + return nil, toDefaultError(err) + } + + return rp, nil +} + +func (f *Finder) ResourcePoolOrDefault(ctx context.Context, path string) (*object.ResourcePool, error) { + if path != "" { + rp, err := f.ResourcePool(ctx, path) + if err != nil { + return nil, err + } + return rp, nil + } + + return f.DefaultResourcePool(ctx) +} + +func (f *Finder) VirtualMachineList(ctx context.Context, path string) ([]*object.VirtualMachine, error) { + es, err := f.find(ctx, f.vmFolder, false, path) + if err != nil { + return nil, err + } + + var vms []*object.VirtualMachine + for _, e := range es { + switch o := e.Object.(type) { + case mo.VirtualMachine: + vm := object.NewVirtualMachine(f.client, o.Reference()) + vm.InventoryPath = e.Path + vms = append(vms, vm) + } + } + + if len(vms) == 0 { + return nil, &NotFoundError{"vm", path} + } + + return vms, nil +} + +func (f *Finder) VirtualMachine(ctx context.Context, path string) (*object.VirtualMachine, error) { + vms, err := f.VirtualMachineList(ctx, path) + if err != nil { + return nil, err + } + + if len(vms) > 1 { + return nil, &MultipleFoundError{"vm", path} + } + + return vms[0], nil +} + +func (f *Finder) VirtualAppList(ctx context.Context, path string) ([]*object.VirtualApp, error) { + es, err := f.find(ctx, f.vmFolder, false, path) + if err != nil { + return nil, err + } + + var apps []*object.VirtualApp + for _, e := range es { + switch o := e.Object.(type) { + case mo.VirtualApp: + app := object.NewVirtualApp(f.client, o.Reference()) + app.InventoryPath = e.Path + apps = append(apps, app) + } + } + + if len(apps) == 0 { + return nil, &NotFoundError{"app", path} + } + + return apps, nil +} + +func (f *Finder) VirtualApp(ctx context.Context, path string) (*object.VirtualApp, error) { + apps, err := f.VirtualAppList(ctx, path) + if err != nil { + return nil, err + } + + if len(apps) > 1 { + return nil, &MultipleFoundError{"app", path} + } + + return apps[0], nil +} + +func (f *Finder) Folder(ctx context.Context, path string) (*object.Folder, error) { + mo, err := f.ManagedObjectList(ctx, path) + if err != nil { + return nil, err + } + + if len(mo) == 0 { + return nil, &NotFoundError{"folder", path} + } + + if len(mo) > 1 { + return nil, &MultipleFoundError{"folder", path} + } + + ref := mo[0].Object.Reference() + if ref.Type != "Folder" { + return nil, &NotFoundError{"folder", path} + } + + folder := object.NewFolder(f.client, ref) + + folder.InventoryPath = mo[0].Path + + return folder, nil +} diff --git a/vendor/github.com/vmware/govmomi/gen/Gemfile b/vendor/github.com/vmware/govmomi/gen/Gemfile new file mode 100644 index 000000000..26a4f1477 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/gen/Gemfile @@ -0,0 +1,3 @@ +source "https://rubygems.org" + +gem "nokogiri" diff --git a/vendor/github.com/vmware/govmomi/gen/Gemfile.lock b/vendor/github.com/vmware/govmomi/gen/Gemfile.lock new file mode 100644 index 000000000..6cdeaae5a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/gen/Gemfile.lock @@ -0,0 +1,12 @@ +GEM + remote: https://rubygems.org/ + specs: + mini_portile (0.6.0) + nokogiri (1.6.3.1) + mini_portile (= 0.6.0) + +PLATFORMS + ruby + +DEPENDENCIES + nokogiri diff --git a/vendor/github.com/vmware/govmomi/gen/gen.sh b/vendor/github.com/vmware/govmomi/gen/gen.sh new file mode 100644 index 000000000..8373fcf82 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/gen/gen.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright (c) 2014 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +if [ ! -f rbvmomi/vmodl.db ]; then + git clone https://github.com/vmware/rbvmomi +fi + +dst=../vim25 + +pkgs=$(echo $dst/{types,methods,mo}) +mkdir -p $pkgs + +bundle exec ruby gen_from_wsdl.rb $dst +bundle exec ruby gen_from_vmodl.rb $dst + +for p in $pkgs +do + echo $p + cd $p + goimports -w *.go + go install + cd - +done diff --git a/vendor/github.com/vmware/govmomi/gen/gen_from_vmodl.rb b/vendor/github.com/vmware/govmomi/gen/gen_from_vmodl.rb new file mode 100644 index 000000000..b8fcc6cae --- /dev/null +++ b/vendor/github.com/vmware/govmomi/gen/gen_from_vmodl.rb @@ -0,0 +1,216 @@ +# Copyright (c) 2014 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +$:.unshift(File.expand_path(File.dirname(__FILE__))) + +require "vim_wsdl" + +require "test/unit" + +PATH = File.expand_path("../rbvmomi", __FILE__) + +def read(file) + File.open(File.join(PATH, file)) +end + +class Prop + def initialize(vmodl, data) + @vmodl = vmodl + @data = data + end + + def slice? + @data["is-array"] + end + + def optional? + @data["is-optional"] + end + + def name + @data["name"] + end + + def var_field + n = name + n[0].capitalize + n[1..-1] + end + + def var_type_prefix(base=false) + if slice? + "[]" + else + if optional? && !base + "*" + else + "" + end + end + end + + def var_type + type = @data["wsdl_type"] + if @vmodl.managed_hash.has_key?(type) + type = "ManagedObjectReference" + end + + # Fix up type from vmodl + case type + when "TypeName", "MethodName" + type = "xsd:string" + when "ManagedObject" + type = "ManagedObjectReference" + when "xsd:anyType" + type = "AnyType" + end + + if type =~ /^xsd:(.*)$/ + type = $1 + case type + when "string" + when "int" + when "boolean" + type ="bool" + when "long" + type ="int64" + when "dateTime" + type ="time.Time" + prefix += "*" if !slice? && optional? + when "byte" + when "double" + type ="float64" + when "float" + type ="float32" + when "short" + type ="int16" + when "base64Binary" + type ="[]byte" + else + raise "unknown type: %s" % type + end + else + if Peek.base?(type) + type = "Base" + type + base = true + end + type = "types." + type + end + + var_type_prefix(base) + type + end + + def var_tag + "mo:\"%s\"" % name + end + + def dump(io) + io.print "%s %s `%s`\n" % [var_field, var_type, var_tag] + end +end + +class Managed + def initialize(vmodl, name, data) + @vmodl = vmodl + @name = name + @data = data + end + + def name + @name + end + + def props + @data["props"].map do |p| + Prop.new(@vmodl, p) + end + end + + def dump(io) + include_ref_getter = false + + io.print "type %s struct {\n" % name + + case @data["wsdl_base"] + when nil, "ManagedObject", "View" + include_ref_getter = true + io.print "Self types.ManagedObjectReference\n\n" + else + io.print "%s\n\n" % @data["wsdl_base"] + end + + props.each do |p| + p.dump(io) + end + io.print "}\n\n" + + if include_ref_getter + io.print "func (m %s) Reference() types.ManagedObjectReference {\n" % [name] + io.print "return m.Self\n" + io.print "}\n\n" + end + end + + def dump_init(io) + io.print "func init() {\n" + io.print "t[\"%s\"] = reflect.TypeOf((*%s)(nil)).Elem()\n" % [name, name] + io.print "}\n\n" + end +end + +class Vmodl + def initialize(data) + @data = Marshal.load(data) + end + + def managed_hash + @managed_hash ||= begin + h = {} + managed.each do |m| + h[m.name] = m + end + h + end + end + + def managed + @data.map do |k,v| + next if !v.is_a?(Hash) + next if v["kind"] != "managed" + next if k =~ /^pbm/i + + Managed.new(self, k, v) + end.compact + end +end + +if !File.directory?(ARGV.first) + raise "first argument not a directory" +end + +wsdl = WSDL.new(WSDL.read "vim.wsdl") +wsdl.validate_assumptions! +wsdl.peek() + +File.open(File.join(ARGV.first, "mo/mo.go"), "w") do |io| + io.print WSDL.header("mo") + + vmodl = Vmodl.new(read "vmodl.db") + + vmodl. + managed. + sort_by { |m| m.name }. + each { |m| m.dump(io); m.dump_init(io); } +end + +exit(0) diff --git a/vendor/github.com/vmware/govmomi/gen/gen_from_wsdl.rb b/vendor/github.com/vmware/govmomi/gen/gen_from_wsdl.rb new file mode 100644 index 000000000..5004cfd0f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/gen/gen_from_wsdl.rb @@ -0,0 +1,70 @@ +# Copyright (c) 2014 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +$:.unshift(File.expand_path(File.dirname(__FILE__))) + +require "vim_wsdl" + +if !File.directory?(ARGV.first) + raise "first argument not a directory" +end + +wsdl = WSDL.new(WSDL.read "vim.wsdl") +wsdl.validate_assumptions! +wsdl.peek() + +ifs = Peek.types.keys.select { |name| Peek.base?(name) }.size() +puts "%d classes, %d interfaces" % [Peek.types.size(), ifs] + +File.open(File.join(ARGV.first, "types/enum.go"), "w") do |io| + io.print WSDL.header("types") + + wsdl. + types. + sort_by { |x| x.name }. + uniq { |x| x.name }. + select { |x| x.name[0] == x.name[0].upcase }. # Only capitalized methods for now... + select { |t| t.is_enum? }. + each { |e| e.dump(io); e.dump_init(io) } +end + +File.open(File.join(ARGV.first, "types/types.go"), "w") do |io| + io.print WSDL.header("types") + + wsdl. + types. + sort_by { |x| x.name }. + uniq { |x| x.name }. + select { |x| x.name[0] == x.name[0].upcase }. # Only capitalized methods for now... + select { |t| !t.is_enum? }. + each { |e| e.dump(io); e.dump_init(io) } +end + +File.open(File.join(ARGV.first, "types/if.go"), "w") do |io| + io.print WSDL.header("types") + + Peek.dump_interfaces(io) +end + +File.open(File.join(ARGV.first, "methods/methods.go"), "w") do |io| + io.print WSDL.header("methods") + + wsdl. + operations. + sort_by { |x| x.name }. + select { |x| x.name[0] == x.name[0].upcase }. # Only capitalized methods for now... + each { |e| e.dump(io) } +end + +exit(0) diff --git a/vendor/github.com/vmware/govmomi/gen/vim_wsdl.rb b/vendor/github.com/vmware/govmomi/gen/vim_wsdl.rb new file mode 100644 index 000000000..285a30b85 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/gen/vim_wsdl.rb @@ -0,0 +1,745 @@ +# Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +require "nokogiri" +require "test/unit" + +class Peek + class Type + attr_accessor :parent, :children, :klass + + def initialize(name) + @name = name + @children = [] + end + + def base? + return !children.empty? + end + end + + @@types = {} + @@refs = {} + @@enums = {} + + def self.types + return @@types + end + + def self.refs + return @@refs + end + + def self.enums + return @@enums + end + + def self.ref(type) + refs[type] = true + end + + def self.enum(type) + enums[type] = true + end + + def self.enum?(type) + enums[type] + end + + def self.register(name) + raise unless name + types[name] ||= Type.new(name) + end + + def self.base?(name) + return unless c = types[name] + c.base? + end + + def self.dump_interfaces(io) + types.keys.sort.each do |name| + next unless base?(name) + + types[name].klass.dump_interface(io, name) + end + end +end + +class EnumValue + def initialize(type, value) + @type = type + @value = value + end + + def type_name + @type.name + end + + def var_name + n = @type.name + v = var_value + if v == "" + n += "Null" + else + n += (v[0].capitalize + v[1..-1]) + end + + return n + end + + def var_value + @value + end + + def dump(io) + io.print "%s = %s(\"%s\")\n" % [var_name, type_name, var_value] + end +end + +class Simple + include Test::Unit::Assertions + + attr_accessor :name, :type + + def initialize(node) + @node = node + end + + def name + @name || @node["name"] + end + + def type + @type || @node["type"] + end + + def is_enum? + false + end + + def dump_init(io) + # noop + end + + def var_name + n = self.name + n = n[1..-1] if n[0] == "_" # Strip leading _ + n = n[0].capitalize + n[1..-1] # Capitalize + return n + end + + def vim_type? + ns, _ = self.type.split(":", 2) + ns == "vim25" + end + + def vim_type(t = self.type) + ns, t = t.split(":", 2) + raise if ns != "vim25" + t + end + + def base_type? + vim_type? && Peek.base?(vim_type) + end + + def enum_type? + vim_type? && Peek.enum?(vim_type) + end + + def any_type? + self.type == "xsd:anyType" + end + + def var_type + t = self.type + prefix = "" + + prefix += "[]" if slice? + + if t =~ /^xsd:(.*)$/ + t = $1 + case t + when "string" + when "int" + when "boolean" + t = "bool" + if !slice? && optional? + prefix += "*" + self.need_omitempty = false + end + when "long" + t = "int64" + when "dateTime" + t = "time.Time" + if !slice? && optional? + prefix += "*" + self.need_omitempty = false + end + when "anyType" + t = "AnyType" + when "byte" + when "double" + t = "float64" + when "float" + t = "float32" + when "short" + t = "int16" + when "base64Binary" + t = "[]byte" + when "anyURI" + t = "url.URL" + else + raise "unknown type: %s" % t + end + else + t = vim_type + if base_type? + prefix += "Base" + else + prefix += "*" if !slice? && !enum_type? && optional? + end + end + + prefix + t + end + + def slice? + test_attr("maxOccurs", "unbounded") + end + + def optional? + test_attr("minOccurs", "0") + end + + def need_omitempty=(v) + @need_omitempty = v + end + + def need_omitempty? + var_type # HACK: trigger setting need_omitempty if necessary + if @need_omitempty.nil? + @need_omitempty = optional? + else + @need_omitempty + end + end + + def need_typeattr? + base_type? || any_type? + end + + protected + + def test_attr(attr, expected) + actual = @node.attr(attr) + if actual != nil + case actual + when expected + true + else + raise "%s=%s" % [value, type.attr(value)] + end + else + false + end + end +end + +class Element < Simple + def initialize(node) + super(node) + end + + def has_type? + !@node["type"].nil? + end + + def child + cs = @node.element_children + assert_equal 1, cs.length + assert_equal "complexType", cs.first.name + + t = ComplexType.new(cs.first) + t.name = self.name + t + end + + def dump(io) + if has_type? + io.print "type %s %s\n\n" % [name, var_type] + else + child.dump(io) + end + end + + def dump_init(io) + if has_type? + io.print "func init() {\n" + io.print "t[\"%s\"] = reflect.TypeOf((*%s)(nil)).Elem()\n" % [name, name] + io.print "}\n\n" + end + end + + def dump_field(io) + tag = name + tag += ",omitempty" if need_omitempty? + tag += ",typeattr" if need_typeattr? + io.print "%s %s `xml:\"%s\"`\n" % [var_name, var_type, tag] + end + + def peek(type=nil) + if has_type? + return if self.type =~ /^xsd:/ + + Peek.ref(vim_type) + else + child.peek() + end + end +end + +class Attribute < Simple + def dump_field(io) + tag = name + tag += ",omitempty" if need_omitempty? + tag += ",attr" + io.print "%s %s `xml:\"%s\"`\n" % [var_name, var_type, tag] + end +end + +class SimpleType < Simple + def is_enum? + true + end + + def dump(io) + enums = @node.xpath(".//xsd:enumeration").map do |n| + EnumValue.new(self, n["value"]) + end + + io.print "type %s string\n\n" % name + io.print "const (\n" + enums.each { |e| e.dump(io) } + io.print ")\n\n" + end + + def dump_init(io) + io.print "func init() {\n" + io.print "t[\"%s\"] = reflect.TypeOf((*%s)(nil)).Elem()\n" % [name, name] + io.print "}\n\n" + end + + def peek + Peek.enum(name) + end +end + +class ComplexType < Simple + class SimpleContent < Simple + def dump(io) + attr = Attribute.new(@node.at_xpath(".//xsd:attribute")) + attr.dump_field(io) + + # HACK DELUXE(PN) + extension = @node.at_xpath(".//xsd:extension") + type = extension["base"].split(":", 2)[1] + io.print "Value %s `xml:\",chardata\"`\n" % type + end + + def peek + end + end + + class ComplexContent < Simple + def base + extension = @node.at_xpath(".//xsd:extension") + assert_not_nil extension + + base = extension["base"] + assert_not_nil base + + vim_type(base) + end + + def dump(io) + Sequence.new(@node).dump(io, base) + end + + def dump_interface(io, name) + Sequence.new(@node).dump_interface(io, name) + end + + def peek + Sequence.new(@node).peek(base) + end + end + + class Sequence < Simple + def sequence + sequence = @node.at_xpath(".//xsd:sequence") + if sequence != nil + sequence.element_children.map do |n| + Element.new(n) + end + else + nil + end + end + + def dump(io, base = nil) + return unless elements = sequence + + io.print "%s\n\n" % base + + elements.each do |e| + e.dump_field(io) + end + end + + def dump_interface(io, name) + method = "Get%s() *%s" % [name, name] + io.print "func (b *%s) %s { return b }\n" % [name, method] + io.print "type Base%s interface {\n" % name + io.print "%s\n" % method + io.print "}\n\n" + io.print "func init() {\n" + io.print "t[\"Base%s\"] = reflect.TypeOf((*%s)(nil)).Elem()\n" % [name, name] + io.print "}\n\n" + end + + def peek(base = nil) + return unless elements = sequence + name = @node.attr("name") + return unless name + + elements.each do |e| + e.peek(name) + end + + c = Peek.register(name) + if base + c.parent = base + Peek.register(c.parent).children << name + end + end + end + + def klass + @klass ||= begin + cs = @node.element_children + if !cs.empty? + assert_equal 1, cs.length + + case cs.first.name + when "simpleContent" + SimpleContent.new(@node) + when "complexContent" + ComplexContent.new(@node) + when "sequence" + Sequence.new(@node) + else + raise "don't know what to do for element: %s..." % cs.first.name + end + end + end + end + + def dump_init(io) + io.print "func init() {\n" + io.print "t[\"%s\"] = reflect.TypeOf((*%s)(nil)).Elem()\n" % [name, name] + io.print "}\n\n" + end + + def dump(io) + io.print "type %s struct {\n" % name + klass.dump(io) if klass + io.print "}\n\n" + end + + def peek + Peek.register(name).klass = klass + klass.peek if klass + end +end + +class Schema + include Test::Unit::Assertions + + def initialize(xml, parent = nil) + @xml = Nokogiri::XML.parse(xml) + end + + def targetNamespace + @xml.root["targetNamespace"] + end + + # We have some assumptions about structure, make sure they hold. + def validate_assumptions! + # Every enumeration is part of a restriction + @xml.xpath(".//xsd:enumeration").each do |n| + assert_equal "restriction", n.parent.name + end + + # See type == enum + @xml.xpath(".//xsd:restriction").each do |n| + # Every restriction has type xsd:string (it's an enum) + assert_equal "xsd:string", n["base"] + + # Every restriction is part of a simpleType + assert_equal "simpleType", n.parent.name + + # Every restriction is alone + assert_equal 1, n.parent.element_children.size + end + + # See type == complex_content + @xml.xpath(".//xsd:complexContent").each do |n| + # complexContent is child of complexType + assert_equal "complexType", n.parent.name + + end + + # See type == complex_type + @xml.xpath(".//xsd:complexType").each do |n| + cc = n.element_children + + # OK to have an empty complexType + next if cc.size == 0 + + # Require 1 element otherwise + assert_equal 1, cc.size + + case cc.first.name + when "complexContent" + # complexContent has 1 "extension" element + cc = cc.first.element_children + assert_equal 1, cc.size + assert_equal "extension", cc.first.name + + # extension has 1 "sequence" element + ec = cc.first.element_children + assert_equal 1, ec.size + assert_equal "sequence", ec.first.name + + # sequence has N "element" elements + sc = ec.first.element_children + assert sc.all? { |e| e.name == "element" } + when "simpleContent" + # simpleContent has 1 "extension" element + cc = cc.first.element_children + assert_equal 1, cc.size + assert_equal "extension", cc.first.name + + # extension has 1 or more "attribute" elements + ec = cc.first.element_children + assert_not_equal 0, ec.size + assert_equal "attribute", ec.first.name + when "sequence" + # sequence has N "element" elements + sc = cc.first.element_children + assert sc.all? { |e| e.name == "element" } + else + raise "unknown element: %s" % cc.first.name + end + end + + includes.each do |i| + i.validate_assumptions! + end + end + + def types + return to_enum(:types) unless block_given? + + includes.each do |i| + i.types do |t| + yield t + end + end + + @xml.root.children.each do |n| + case n.class.to_s + when "Nokogiri::XML::Text" + next + when "Nokogiri::XML::Element" + case n.name + when "include", "import" + next + when "element" + yield Element.new(n) + when "simpleType" + yield SimpleType.new(n) + when "complexType" + yield ComplexType.new(n) + else + raise "unknown child: %s" % n.name + end + else + raise "unknown type: %s" % n.class + end + end + end + + def includes + @includes ||= @xml.root.xpath(".//xmlns:include").map do |n| + Schema.new(WSDL.read n["schemaLocation"]) + end + end +end + + +class Operation + include Test::Unit::Assertions + + def initialize(wsdl, operation_node) + @wsdl = wsdl + @operation_node = operation_node + end + + def name + @operation_node["name"] + end + + def remove_ns(x) + ns, x = x.split(":", 2) + assert_equal "vim25", ns + x + end + + def find_type_for(type) + type = remove_ns(type) + + message = @wsdl.message(type) + assert_not_nil message + + part = message.at_xpath("./xmlns:part") + assert_not_nil message + + remove_ns(part["element"]) + end + + def input + type = @operation_node.at_xpath("./xmlns:input").attr("message") + find_type_for(type) + end + + def go_input + "types." + input + end + + def output + type = @operation_node.at_xpath("./xmlns:output").attr("message") + find_type_for(type) + end + + def go_output + "types." + output + end + + def dump(io) + io.print < /usr/local/bin/govc +chmod +x /usr/local/bin/govc +``` + +### Source + +You can install the latest govc version from source if you have the Go toolchain installed. + +```sh +go get github.com/vmware/govmomi/govc +``` + +(make sure `$GOPATH/bin` is in your `PATH`) + +## Usage + +govc exposes its functionality through subcommands. Option flags +to these subcommands are often shared. + +Common flags include: + +* `-u`: ESXi or vCenter URL (ex: `user:pass@host`) +* `-debug`: Trace requests and responses (to `~/.govmomi/debug`) + +Managed entities can be referred to by their absolute path or by their relative +path. For example, when specifying a datastore to use for a subcommand, you can +either specify it as `/mydatacenter/datastore/mydatastore`, or as +`mydatastore`. If you're not sure about the name of the datastore, or even the +full path to the datastore, you can specify a pattern to match. Both +`/*center/*/my*` (absolute) and `my*store` (relative) will resolve to the same +datastore, given there are no other datastores that match those globs. + +The relative path in this example can only be used if the command can +umambigously resolve a datacenter to use as origin for the query. If no +datacenter is specified, govc defaults to the only datacenter, if there is only +one. The datacenter itself can be specified as a pattern as well, enabling the +following arguments: `-dc='my*' -ds='*store'`. The datastore pattern is looked +up and matched relative to the datacenter which itself is specified as a +pattern. + +Besides specifying managed entities as arguments, they can also be specified +using environment variables. The following environment variables are used by govc +to set defaults: + +* `GOVC_USERNAME`: USERNAME to use. + +* `GOVC_PASSWORD`: PASSWORD to use. + +* `GOVC_URL`: URL of ESXi or vCenter instance to connect to. + + > The URL scheme defaults to `https` and the URL path defaults to `/sdk`. + > This means that specifying `user:pass@host` is equivalent to + > `https://user:pass@host/sdk`. + + > If password include special characters like `#` or `:` you can use + > `GOVC_USERNAME` and `GOVC_PASSWORD` to have a simple `GOVC_URL` + +* `GOVC_INSECURE`: Allow establishing insecure connections. + + > Use this option when the host you're connecting is using self-signed + > certificates, or is otherwise trusted. Set this option to `1` to enable. + +* `GOVC_DATACENTER` + +* `GOVC_DATASTORE` + +* `GOVC_NETWORK` + +* `GOVC_RESOURCE_POOL` + +* `GOVC_HOST` + +* `GOVC_GUEST_LOGIN`: Guest credentials for guest operations + +## Examples + +* About + ``` + $ export GOVC_URL="192.168.1.20" + $ export GOVC_USERNAME="domain\administrator" + $ export GOVC_PASSWORD="Password123#" + $ govc about + + Name: VMware vCenter Server + Vendor: VMware, Inc. + Version: 6.0.0 + Build: 2656761 + OS type: linux-x64 + API type: VirtualCenter + API version: 6.0 + Product ID: vpx + UUID: c9f0242f-10e3-4e10-85d7-5eea7c855188 + ``` + +* [Upload ssh public key to a VM](examples/lib/ssh.sh) + +* [Create and configure a vCenter VM](examples/vcsa.sh) + +## Projects using govc + +* [Kubernetes vSphere Provider](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/cluster/vsphere) + +## License + +govc is available under the [Apache 2 license](../LICENSE). diff --git a/vendor/github.com/vmware/govmomi/govc/about/command.go b/vendor/github.com/vmware/govmomi/govc/about/command.go new file mode 100644 index 000000000..b993b38ba --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/about/command.go @@ -0,0 +1,69 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package about + +import ( + "flag" + "fmt" + "os" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" +) + +type about struct { + *flags.ClientFlag +} + +func init() { + cli.Register("about", &about{}) +} + +func (cmd *about) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) +} + +func (cmd *about) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *about) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + a := c.ServiceContent.About + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + fmt.Fprintf(tw, "Name:\t%s\n", a.Name) + fmt.Fprintf(tw, "Vendor:\t%s\n", a.Vendor) + fmt.Fprintf(tw, "Version:\t%s\n", a.Version) + fmt.Fprintf(tw, "Build:\t%s\n", a.Build) + fmt.Fprintf(tw, "OS type:\t%s\n", a.OsType) + fmt.Fprintf(tw, "API type:\t%s\n", a.ApiType) + fmt.Fprintf(tw, "API version:\t%s\n", a.ApiVersion) + fmt.Fprintf(tw, "Product ID:\t%s\n", a.ProductLineId) + fmt.Fprintf(tw, "UUID:\t%s\n", a.InstanceUuid) + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/build.sh b/vendor/github.com/vmware/govmomi/govc/build.sh new file mode 100644 index 000000000..5e17a79a8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/build.sh @@ -0,0 +1,32 @@ +#!/bin/bash -e + +git_version=$(git describe) +if git_status=$(git status --porcelain 2>/dev/null) && [ -n "${git_status}" ]; then + git_version="${git_version}-dirty" +fi + +ldflags="-X github.com/vmware/govmomi/govc/version.gitVersion=${git_version}" + +BUILD_OS=${BUILD_OS:-darwin linux windows freebsd} +BUILD_ARCH=${BUILD_ARCH:-386 amd64} + +for os in ${BUILD_OS}; do + export GOOS="${os}" + for arch in ${BUILD_ARCH}; do + export GOARCH="${arch}" + + out="govc_${os}_${arch}" + if [ "${os}" == "windows" ]; then + out="${out}.exe" + fi + + set -x + go build \ + -o="${out}" \ + -pkgdir="./_pkg" \ + -compiler='gc' \ + -ldflags="${ldflags}" \ + github.com/vmware/govmomi/govc + set +x + done +done diff --git a/vendor/github.com/vmware/govmomi/govc/cli/command.go b/vendor/github.com/vmware/govmomi/govc/cli/command.go new file mode 100644 index 000000000..77b17ed17 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/cli/command.go @@ -0,0 +1,142 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "sort" + "text/tabwriter" + + "golang.org/x/net/context" +) + +type HasFlags interface { + // Register may be called more than once and should be idempotent. + Register(ctx context.Context, f *flag.FlagSet) + + // Process may be called more than once and should be idempotent. + Process(ctx context.Context) error +} + +type Command interface { + HasFlags + + Run(ctx context.Context, f *flag.FlagSet) error +} + +func generalHelp() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + + cmds := []string{} + for name := range commands { + cmds = append(cmds, name) + } + + sort.Strings(cmds) + + for _, name := range cmds { + fmt.Fprintf(os.Stderr, " %s\n", name) + } +} + +func commandHelp(name string, cmd Command, f *flag.FlagSet) { + type HasUsage interface { + Usage() string + } + + fmt.Fprintf(os.Stderr, "Usage: %s %s [OPTIONS]", os.Args[0], name) + if u, ok := cmd.(HasUsage); ok { + fmt.Fprintf(os.Stderr, " %s", u.Usage()) + } + fmt.Fprintf(os.Stderr, "\n") + + type HasDescription interface { + Description() string + } + + if u, ok := cmd.(HasDescription); ok { + fmt.Fprintf(os.Stderr, "%s\n", u.Description()) + } + + n := 0 + f.VisitAll(func(_ *flag.Flag) { + n += 1 + }) + + if n > 0 { + fmt.Fprintf(os.Stderr, "\nOptions:\n") + tw := tabwriter.NewWriter(os.Stderr, 2, 0, 2, ' ', 0) + f.VisitAll(func(f *flag.Flag) { + fmt.Fprintf(tw, "\t-%s=%s\t%s\n", f.Name, f.DefValue, f.Usage) + }) + tw.Flush() + } +} + +func Run(args []string) int { + var err error + + if len(args) == 0 { + generalHelp() + return 1 + } + + // Look up real command name in aliases table. + name, ok := aliases[args[0]] + if !ok { + name = args[0] + } + + cmd, ok := commands[name] + if !ok { + generalHelp() + return 1 + } + + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.SetOutput(ioutil.Discard) + + ctx := context.Background() + cmd.Register(ctx, fs) + + if err = fs.Parse(args[1:]); err != nil { + goto error + } + + if err = cmd.Process(ctx); err != nil { + goto error + } + + if err = cmd.Run(ctx, fs); err != nil { + goto error + } + + return 0 + +error: + if err == flag.ErrHelp { + commandHelp(args[0], cmd, fs) + } else { + fmt.Fprintf(os.Stderr, "%s: %s\n", os.Args[0], err) + } + + return 1 + +} diff --git a/vendor/github.com/vmware/govmomi/govc/cli/register.go b/vendor/github.com/vmware/govmomi/govc/cli/register.go new file mode 100644 index 000000000..a20fc3ad0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/cli/register.go @@ -0,0 +1,33 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +var commands = map[string]Command{} + +var aliases = map[string]string{} + +func Register(name string, c Command) { + commands[name] = c +} + +func Alias(name string, alias string) { + aliases[alias] = name +} + +func Commands() map[string]Command { + return commands +} diff --git a/vendor/github.com/vmware/govmomi/govc/cluster/add.go b/vendor/github.com/vmware/govmomi/govc/cluster/add.go new file mode 100644 index 000000000..027e0c254 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/cluster/add.go @@ -0,0 +1,129 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type add struct { + *flags.DatacenterFlag + *flags.HostConnectFlag + + cluster string + connect bool + license string +} + +func init() { + cli.Register("cluster.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + cmd.HostConnectFlag, ctx = flags.NewHostConnectFlag(ctx) + cmd.HostConnectFlag.Register(ctx, f) + + f.StringVar(&cmd.cluster, "cluster", "*", "Path to cluster") + + f.StringVar(&cmd.license, "license", "", "Assign license key") + + f.BoolVar(&cmd.connect, "connect", true, "Immediately connect to host") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostConnectFlag.Process(ctx); err != nil { + return err + } + if cmd.HostName == "" { + return flag.ErrHelp + } + if cmd.UserName == "" { + return flag.ErrHelp + } + if cmd.Password == "" { + return flag.ErrHelp + } + return nil +} + +func (cmd *add) Description() string { + return `Add host to cluster. + +The host is added to the cluster specified by the 'cluster' flag.` +} + +func (cmd *add) Add(ctx context.Context, cluster *object.ClusterComputeResource) error { + spec := cmd.HostConnectSpec + + var license *string + if cmd.license != "" { + license = &cmd.license + } + + task, err := cluster.AddHost(ctx, spec, cmd.connect, license, nil) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("adding %s to cluster %s... ", spec.HostName, cluster.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() != 0 { + return flag.ErrHelp + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + cluster, err := finder.ClusterComputeResource(ctx, cmd.cluster) + if err != nil { + return nil + } + + err = cmd.Add(ctx, cluster) + + if err == nil { + return nil + } + + // Check if we failed due to SSLVerifyFault and -noverify is set + if err := cmd.AcceptThumbprint(err); err != nil { + return err + } + + // Accepted unverified thumbprint, try again + return cmd.Add(ctx, cluster) +} diff --git a/vendor/github.com/vmware/govmomi/govc/cluster/change.go b/vendor/github.com/vmware/govmomi/govc/cluster/change.go new file mode 100644 index 000000000..ea21a27dc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/cluster/change.go @@ -0,0 +1,109 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "flag" + "strings" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" +) + +type change struct { + *flags.DatacenterFlag + + types.ClusterConfigSpecEx +} + +func init() { + cli.Register("cluster.change", &change{}) +} + +func (cmd *change) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + cmd.DrsConfig = new(types.ClusterDrsConfigInfo) + cmd.DasConfig = new(types.ClusterDasConfigInfo) + cmd.VsanConfig = new(types.VsanClusterConfigInfo) + cmd.VsanConfig.DefaultConfig = new(types.VsanClusterConfigInfoHostDefaultInfo) + + // DRS + f.Var(flags.NewOptionalBool(&cmd.DrsConfig.Enabled), "drs-enabled", "Enable DRS") + + drsModes := []string{ + string(types.DrsBehaviorManual), + string(types.DrsBehaviorPartiallyAutomated), + string(types.DrsBehaviorFullyAutomated), + } + f.StringVar((*string)(&cmd.DrsConfig.DefaultVmBehavior), "drs-mode", "", + "DRS behavior for virtual machines: "+strings.Join(drsModes, ", ")) + + // HA + f.Var(flags.NewOptionalBool(&cmd.DasConfig.Enabled), "ha-enabled", "Enable HA") + + // vSAN + f.Var(flags.NewOptionalBool(&cmd.VsanConfig.Enabled), "vsan-enabled", "Enable vSAN") + f.Var(flags.NewOptionalBool(&cmd.VsanConfig.DefaultConfig.AutoClaimStorage), "vsan-autoclaim", "") +} + +func (cmd *change) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *change) Usage() string { + return "CLUSTER..." +} + +func (cmd *change) Description() string { + return `Change configuration of the given clusters.` +} + +func (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error { + finder, err := cmd.Finder() + if err != nil { + return err + } + + for _, path := range f.Args() { + clusters, err := finder.ClusterComputeResourceList(ctx, path) + if err != nil { + return err + } + + for _, cluster := range clusters { + task, err := cluster.Reconfigure(ctx, &cmd.ClusterConfigSpecEx, true) + if err != nil { + return err + } + + _, err = task.WaitForResult(ctx, nil) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/cluster/create.go b/vendor/github.com/vmware/govmomi/govc/cluster/create.go new file mode 100644 index 000000000..3d92f01f0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/cluster/create.go @@ -0,0 +1,104 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +type create struct { + *flags.DatacenterFlag + + parent string + + types.ClusterConfigSpecEx +} + +func init() { + cli.Register("cluster.create", &create{}) +} + +func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.StringVar(&cmd.parent, "parent", "", "Path to parent folder for the new cluster") +} + +func (cmd *create) Usage() string { + return "CLUSTER" +} + +func (cmd *create) Description() string { + return `Create CLUSTER in datacenter. + +The cluster is added to the folder specified by the 'parent' flag. If not given, +this defaults to the hosts folder in the specified or default datacenter.` +} + +func (cmd *create) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { + var parent *object.Folder + + if f.NArg() != 1 { + return flag.ErrHelp + } + + if cmd.parent == "" { + dc, err := cmd.Datacenter() + if err != nil { + return err + } + + folders, err := dc.Folders(ctx) + if err != nil { + return err + } + + parent = folders.HostFolder + } else { + finder, err := cmd.Finder() + if err != nil { + return err + } + + parent, err = finder.Folder(ctx, cmd.parent) + if err != nil { + return err + } + } + + _, err := parent.CreateCluster(ctx, f.Arg(0), cmd.ClusterConfigSpecEx) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/datacenter/create.go b/vendor/github.com/vmware/govmomi/govc/datacenter/create.go new file mode 100644 index 000000000..2b2065432 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datacenter/create.go @@ -0,0 +1,85 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datacenter + +import ( + "flag" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type create struct { + *flags.ClientFlag +} + +func init() { + cli.Register("datacenter.create", &create{}) +} + +func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) +} + +func (cmd *create) Usage() string { + return "[DATACENTER NAME]..." +} + +func (cmd *create) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { + datacenters := f.Args() + if len(datacenters) < 1 { + return flag.ErrHelp + } + + client, err := cmd.ClientFlag.Client() + if err != nil { + return err + } + + finder := find.NewFinder(client, false) + rootFolder := object.NewRootFolder(client) + for _, datacenterToCreate := range datacenters { + _, err := finder.Datacenter(context.TODO(), datacenterToCreate) + if err == nil { + // The datacenter was found, no need to create it + continue + } + + switch err.(type) { + case *find.NotFoundError: + _, err = rootFolder.CreateDatacenter(context.TODO(), datacenterToCreate) + if err != nil { + return err + } + default: + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/datacenter/destroy.go b/vendor/github.com/vmware/govmomi/govc/datacenter/destroy.go new file mode 100644 index 000000000..eed333bb0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datacenter/destroy.go @@ -0,0 +1,81 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datacenter + +import ( + "flag" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type destroy struct { + *flags.ClientFlag +} + +func init() { + cli.Register("datacenter.destroy", &destroy{}) +} + +func (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) +} + +func (cmd *destroy) Usage() string { + return "[DATACENTER NAME]..." +} + +func (cmd *destroy) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error { + if len(f.Args()) < 1 { + return flag.ErrHelp + } + datacentersToDestroy := f.Args() + + client, err := cmd.ClientFlag.Client() + if err != nil { + return err + } + + finder := find.NewFinder(client, false) + for _, datacenterToDestroy := range datacentersToDestroy { + foundDatacenters, err := finder.DatacenterList(context.TODO(), datacenterToDestroy) + if err != nil { + return err + } + for _, foundDatacenter := range foundDatacenters { + task, err := foundDatacenter.Destroy(context.TODO()) + if err != nil { + return err + } + + if err := task.Wait(context.TODO()); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/cp.go b/vendor/github.com/vmware/govmomi/govc/datastore/cp.go new file mode 100644 index 000000000..7e206ade4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/cp.go @@ -0,0 +1,99 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "errors" + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type cp struct { + *flags.OutputFlag + *flags.DatastoreFlag + + force bool +} + +func init() { + cli.Register("datastore.cp", &cp{}) +} + +func (cmd *cp) Register(ctx context.Context, f *flag.FlagSet) { + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + + f.BoolVar(&cmd.force, "f", false, "If true, overwrite any identically named file at the destination") +} + +func (cmd *cp) Process(ctx context.Context) error { + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *cp) Usage() string { + return "SRC DST" +} + +func (cmd *cp) Run(ctx context.Context, f *flag.FlagSet) error { + args := f.Args() + if len(args) != 2 { + return errors.New("SRC and DST arguments are required") + } + + c, err := cmd.Client() + if err != nil { + return err + } + + dc, err := cmd.Datacenter() + if err != nil { + return err + } + + // TODO: support cross-datacenter copy + + src, err := cmd.DatastorePath(args[0]) + if err != nil { + return err + } + + dst, err := cmd.DatastorePath(args[1]) + if err != nil { + return err + } + + m := object.NewFileManager(c) + task, err := m.CopyDatastoreFile(context.TODO(), src, dc, dst, dc, cmd.force) + if err != nil { + return err + } + + return task.Wait(context.TODO()) +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/create.go b/vendor/github.com/vmware/govmomi/govc/datastore/create.go new file mode 100644 index 000000000..db58699e8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/create.go @@ -0,0 +1,273 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type create struct { + *flags.HostSystemFlag + + // Generic options + Type typeFlag + Name string + Force bool + + // Options for NAS + RemoteHost string + RemotePath string + AccessMode string + UserName string + Password string + + // Options for VMFS + DiskCanonicalName string +} + +func init() { + cli.Register("datastore.create", &create{}) +} + +var nasTypes = []string{ + string(types.HostFileSystemVolumeFileSystemTypeNFS), + string(types.HostFileSystemVolumeFileSystemTypeNFS41), + string(types.HostFileSystemVolumeFileSystemTypeCIFS), +} + +var vmfsTypes = []string{ + string(types.HostFileSystemVolumeFileSystemTypeVMFS), +} + +var allTypes = []string{} + +func init() { + allTypes = append(allTypes, nasTypes...) + allTypes = append(allTypes, vmfsTypes...) +} + +type typeFlag string + +func (t *typeFlag) Set(s string) error { + s = strings.ToLower(s) + for _, e := range allTypes { + if s == strings.ToLower(e) { + *t = typeFlag(e) + return nil + } + } + + return fmt.Errorf("unknown type") +} + +func (t *typeFlag) String() string { + return string(*t) +} + +func (t *typeFlag) partOf(m []string) bool { + for _, e := range m { + if t.String() == e { + return true + } + } + return false +} + +func (t *typeFlag) IsNasType() bool { + return t.partOf(nasTypes) +} + +func (t *typeFlag) IsVmfsType() bool { + return t.partOf(vmfsTypes) +} + +func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + modes := []string{ + string(types.HostMountModeReadOnly), + string(types.HostMountModeReadWrite), + } + + f.StringVar(&cmd.Name, "name", "", "Datastore name") + f.Var(&cmd.Type, "type", fmt.Sprintf("Datastore type (%s)", strings.Join(allTypes, "|"))) + f.BoolVar(&cmd.Force, "force", false, "Ignore DuplicateName error if datastore is already mounted on a host") + + // Options for NAS + f.StringVar(&cmd.RemoteHost, "remote-host", "", "Remote hostname of the NAS datastore") + f.StringVar(&cmd.RemotePath, "remote-path", "", "Remote path of the NFS mount point") + f.StringVar(&cmd.AccessMode, "mode", modes[0], + fmt.Sprintf("Access mode for the mount point (%s)", strings.Join(modes, "|"))) + f.StringVar(&cmd.UserName, "username", "", "Username to use when connecting (CIFS only)") + f.StringVar(&cmd.Password, "password", "", "Password to use when connecting (CIFS only)") + + // Options for VMFS + f.StringVar(&cmd.DiskCanonicalName, "disk", "", "Canonical name of disk (VMFS only)") +} + +func (cmd *create) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *create) Usage() string { + return "HOST..." +} + +func (cmd *create) Description() string { + return `Create datastore on HOST. + +Examples: + govc datastore.create -type nfs -name nfsDatastore -remote-host 10.143.2.232 -remote-path /share cluster1 + govc datastore.create -type vmfs -name localDatastore -disk=mpx.vmhba0:C0:T0:L0 cluster1` +} + +func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + switch { + case cmd.Type.IsNasType(): + return cmd.CreateNasDatastore(ctx, hosts) + case cmd.Type.IsVmfsType(): + return cmd.CreateVmfsDatastore(ctx, hosts) + default: + return fmt.Errorf("unhandled type %#v", cmd.Type) + } +} + +func (cmd *create) GetHostNasVolumeSpec() types.HostNasVolumeSpec { + s := types.HostNasVolumeSpec{ + LocalPath: cmd.Name, + Type: cmd.Type.String(), + RemoteHost: cmd.RemoteHost, + RemotePath: cmd.RemotePath, + AccessMode: cmd.AccessMode, + UserName: cmd.UserName, + Password: cmd.Password, + } + + return s +} + +func (cmd *create) CreateNasDatastore(ctx context.Context, hosts []*object.HostSystem) error { + object := types.ManagedObjectReference{ + Type: "Datastore", + Value: fmt.Sprintf("%s:%s", cmd.RemoteHost, cmd.RemotePath), + } + + spec := cmd.GetHostNasVolumeSpec() + + for _, host := range hosts { + ds, err := host.ConfigManager().DatastoreSystem(ctx) + if err != nil { + return err + } + + _, err = ds.CreateNasDatastore(ctx, spec) + if err != nil { + if soap.IsSoapFault(err) { + switch fault := soap.ToSoapFault(err).VimFault().(type) { + case types.PlatformConfigFault: + if len(fault.FaultMessage) != 0 { + return errors.New(fault.FaultMessage[0].Message) + } + case types.DuplicateName: + if cmd.Force && fault.Object == object { + fmt.Fprintf(os.Stderr, "%s: '%s' already mounted\n", + host.InventoryPath, cmd.Name) + continue + } + } + } + + return fmt.Errorf("%s: %s", host.InventoryPath, err) + } + } + + return nil +} + +func (cmd *create) CreateVmfsDatastore(ctx context.Context, hosts []*object.HostSystem) error { + for _, host := range hosts { + ds, err := host.ConfigManager().DatastoreSystem(ctx) + if err != nil { + return err + } + + // Find the specified disk + disks, err := ds.QueryAvailableDisksForVmfs(ctx) + if err != nil { + return err + } + + var disk *types.HostScsiDisk + for _, e := range disks { + if e.CanonicalName == cmd.DiskCanonicalName { + disk = &e + break + } + } + + if disk == nil { + return fmt.Errorf("no eligible disk found for name %#v", cmd.DiskCanonicalName) + } + + // Query for creation options and pick the right one + options, err := ds.QueryVmfsDatastoreCreateOptions(ctx, disk.DevicePath) + if err != nil { + return err + } + + var option *types.VmfsDatastoreOption + for _, e := range options { + if _, ok := e.Info.(*types.VmfsDatastoreAllExtentOption); ok { + option = &e + break + } + } + + if option == nil { + return fmt.Errorf("cannot use entire disk for datastore for name %#v", cmd.DiskCanonicalName) + } + + spec := *option.Spec.(*types.VmfsDatastoreCreateSpec) + spec.Vmfs.VolumeName = cmd.Name + _, err = ds.CreateVmfsDatastore(ctx, spec) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/download.go b/vendor/github.com/vmware/govmomi/govc/datastore/download.go new file mode 100644 index 000000000..27bfa93c8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/download.go @@ -0,0 +1,73 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "errors" + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/soap" +) + +type download struct { + *flags.DatastoreFlag +} + +func init() { + cli.Register("datastore.download", &download{}) +} + +func (cmd *download) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) +} + +func (cmd *download) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *download) Usage() string { + return "REMOTE LOCAL" +} + +func (cmd *download) Run(ctx context.Context, f *flag.FlagSet) error { + args := f.Args() + if len(args) != 2 { + return errors.New("invalid arguments") + } + + ds, err := cmd.Datastore() + if err != nil { + return err + } + + p := soap.DefaultDownload + if cmd.OutputFlag.TTY { + logger := cmd.ProgressLogger("Downloading... ") + p.Progress = logger + defer logger.Wait() + } + + return ds.DownloadFile(context.TODO(), args[0], args[1], &p) +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/info.go b/vendor/github.com/vmware/govmomi/govc/datastore/info.go new file mode 100644 index 000000000..6e9dd7997 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/info.go @@ -0,0 +1,153 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "flag" + "fmt" + "io" + "os" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type info struct { + *flags.ClientFlag + *flags.OutputFlag + *flags.DatacenterFlag +} + +func init() { + cli.Register("datastore.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Usage() string { + return "[PATH]..." +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + args := f.Args() + if len(args) == 0 { + args = []string{"*"} + } + + var res infoResult + var props []string + + if cmd.OutputFlag.JSON { + props = nil // Load everything + } else { + props = []string{"info", "summary"} // Load summary + } + + for _, arg := range args { + objects, err := finder.DatastoreList(ctx, arg) + if err != nil { + return err + } + res.objects = append(res.objects, objects...) + } + + if len(res.objects) != 0 { + refs := make([]types.ManagedObjectReference, 0, len(res.objects)) + for _, o := range res.objects { + refs = append(refs, o.Reference()) + } + + pc := property.DefaultCollector(c) + err = pc.Retrieve(ctx, refs, props, &res.Datastores) + if err != nil { + return err + } + } + + return cmd.WriteResult(&res) +} + +type infoResult struct { + Datastores []mo.Datastore + objects []*object.Datastore +} + +func (r *infoResult) Write(w io.Writer) error { + // Maintain order via r.objects as Property collector does not always return results in order. + objects := make(map[types.ManagedObjectReference]mo.Datastore, len(r.Datastores)) + for _, o := range r.Datastores { + objects[o.Reference()] = o + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for _, o := range r.objects { + ds := objects[o.Reference()] + s := ds.Summary + fmt.Fprintf(tw, "Name:\t%s\n", s.Name) + fmt.Fprintf(tw, " Path:\t%s\n", o.InventoryPath) + fmt.Fprintf(tw, " Type:\t%s\n", s.Type) + fmt.Fprintf(tw, " URL:\t%s\n", s.Url) + fmt.Fprintf(tw, " Capacity:\t%.1f GB\n", float64(s.Capacity)/(1<<30)) + fmt.Fprintf(tw, " Free:\t%.1f GB\n", float64(s.FreeSpace)/(1<<30)) + + switch info := ds.Info.(type) { + case *types.NasDatastoreInfo: + fmt.Fprintf(tw, " Remote:\t%s:%s\n", info.Nas.RemoteHost, info.Nas.RemotePath) + } + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/ls.go b/vendor/github.com/vmware/govmomi/govc/datastore/ls.go new file mode 100644 index 000000000..570c592a1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/ls.go @@ -0,0 +1,232 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "path" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ls struct { + *flags.DatastoreFlag + *flags.OutputFlag + + long bool + slash bool + all bool +} + +func init() { + cli.Register("datastore.ls", &ls{}) +} + +func (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + f.BoolVar(&cmd.long, "l", false, "Long listing format") + f.BoolVar(&cmd.slash, "p", false, "Write a slash (`/') after each filename if that file is a directory") + f.BoolVar(&cmd.all, "a", false, "Include entries whose names begin with a dot (.)") +} + +func (cmd *ls) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ls) Usage() string { + return "[FILE]..." +} + +func (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error { + ds, err := cmd.Datastore() + if err != nil { + return err + } + + b, err := ds.Browser(context.TODO()) + if err != nil { + return err + } + + args := f.Args() + if len(args) == 0 { + args = []string{""} + } + + result := &listOutput{ + rs: make([]types.HostDatastoreBrowserSearchResults, 0), + cmd: cmd, + } + + for _, arg := range args { + spec := types.HostDatastoreBrowserSearchSpec{ + MatchPattern: []string{"*"}, + } + + if cmd.long { + spec.Details = &types.FileQueryFlags{ + FileType: true, + FileSize: true, + FileOwner: types.NewBool(true), // TODO: omitempty is generated, but seems to be required + Modification: true, + } + } + + for i := 0; ; i++ { + r, err := cmd.ListPath(b, arg, spec) + if err != nil { + // Treat the argument as a match pattern if not found as directory + if i == 0 && types.IsFileNotFound(err) { + spec.MatchPattern[0] = path.Base(arg) + arg = path.Dir(arg) + continue + } + + return err + } + + // Treat an empty result against match pattern as file not found + if i == 1 && len(r.File) == 0 { + return fmt.Errorf("File %s/%s was not found", r.FolderPath, spec.MatchPattern[0]) + } + + result.add(r) + break + } + } + + return cmd.WriteResult(result) +} + +func (cmd *ls) ListPath(b *object.HostDatastoreBrowser, path string, spec types.HostDatastoreBrowserSearchSpec) (types.HostDatastoreBrowserSearchResults, error) { + var res types.HostDatastoreBrowserSearchResults + + path, err := cmd.DatastorePath(path) + if err != nil { + return res, err + } + + task, err := b.SearchDatastore(context.TODO(), path, &spec) + if err != nil { + return res, err + } + + info, err := task.WaitForResult(context.TODO(), nil) + if err != nil { + return res, err + } + + res = info.Result.(types.HostDatastoreBrowserSearchResults) + return res, nil +} + +type listOutput struct { + rs []types.HostDatastoreBrowserSearchResults + cmd *ls +} + +func (o *listOutput) add(r types.HostDatastoreBrowserSearchResults) { + res := r + res.File = nil + + for _, f := range r.File { + if f.GetFileInfo().Path[0] == '.' && !o.cmd.all { + continue + } + + if o.cmd.slash { + if d, ok := f.(*types.FolderFileInfo); ok { + d.Path += "/" + } + } + + res.File = append(res.File, f) + } + + o.rs = append(o.rs, res) +} + +// hasMultiplePaths returns whether or not the slice of search results contains +// results from more than one folder path. +func (o *listOutput) hasMultiplePaths() bool { + if len(o.rs) == 0 { + return false + } + + p := o.rs[0].FolderPath + + // Multiple paths if any entry is not equal to the first one. + for _, e := range o.rs { + if e.FolderPath != p { + return true + } + } + + return false +} + +func (o *listOutput) MarshalJSON() ([]byte, error) { + return json.Marshal(o.rs) +} + +func (o *listOutput) Write(w io.Writer) error { + // Only include path header if we're dealing with more than one path. + includeHeader := false + if o.hasMultiplePaths() { + includeHeader = true + } + + tw := tabwriter.NewWriter(w, 3, 0, 2, ' ', 0) + for i, r := range o.rs { + if includeHeader { + if i > 0 { + fmt.Fprintf(tw, "\n") + } + fmt.Fprintf(tw, "%s:\n", r.FolderPath) + } + for _, file := range r.File { + info := file.GetFileInfo() + if o.cmd.long { + fmt.Fprintf(tw, "%s\t%s\t%s\n", units.ByteSize(info.FileSize), info.Modification.Format("Mon Jan 2 15:04:05 2006"), info.Path) + } else { + fmt.Fprintf(tw, "%s\n", info.Path) + } + } + } + tw.Flush() + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/mkdir.go b/vendor/github.com/vmware/govmomi/govc/datastore/mkdir.go new file mode 100644 index 000000000..cfa7e3933 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/mkdir.go @@ -0,0 +1,95 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "errors" + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type mkdir struct { + *flags.DatastoreFlag + + createParents bool +} + +func init() { + cli.Register("datastore.mkdir", &mkdir{}) +} + +func (cmd *mkdir) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + + f.BoolVar(&cmd.createParents, "p", false, "Create intermediate directories as needed") +} + +func (cmd *mkdir) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *mkdir) Usage() string { + return "DIRECTORY" +} + +func (cmd *mkdir) Run(ctx context.Context, f *flag.FlagSet) error { + args := f.Args() + if len(args) == 0 { + return errors.New("missing operand") + } + + c, err := cmd.Client() + if err != nil { + return err + } + + dc, err := cmd.Datacenter() + if err != nil { + return err + } + + // TODO(PN): Accept multiple args + path, err := cmd.DatastorePath(args[0]) + if err != nil { + return err + } + + m := object.NewFileManager(c) + err = m.MakeDirectory(context.TODO(), path, dc, cmd.createParents) + + // ignore EEXIST if -p flag is given + if err != nil && cmd.createParents { + if soap.IsSoapFault(err) { + soapFault := soap.ToSoapFault(err) + if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok { + return nil + } + } + } + + return err +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/mv.go b/vendor/github.com/vmware/govmomi/govc/datastore/mv.go new file mode 100644 index 000000000..023ef393e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/mv.go @@ -0,0 +1,92 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "errors" + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type mv struct { + *flags.DatastoreFlag + + force bool +} + +func init() { + cli.Register("datastore.mv", &mv{}) +} + +func (cmd *mv) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + + f.BoolVar(&cmd.force, "f", false, "If true, overwrite any identically named file at the destination") +} + +func (cmd *mv) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *mv) Usage() string { + return "SRC DST" +} + +func (cmd *mv) Run(ctx context.Context, f *flag.FlagSet) error { + args := f.Args() + if len(args) != 2 { + return errors.New("SRC and DST arguments are required") + } + + c, err := cmd.Client() + if err != nil { + return err + } + + dc, err := cmd.Datacenter() + if err != nil { + return err + } + + // TODO: support cross-datacenter move + + src, err := cmd.DatastorePath(args[0]) + if err != nil { + return err + } + + dst, err := cmd.DatastorePath(args[1]) + if err != nil { + return err + } + + m := object.NewFileManager(c) + task, err := m.MoveDatastoreFile(context.TODO(), src, dc, dst, dc, cmd.force) + if err != nil { + return err + } + + return task.Wait(context.TODO()) +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/remove.go b/vendor/github.com/vmware/govmomi/govc/datastore/remove.go new file mode 100644 index 000000000..772803446 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/remove.go @@ -0,0 +1,90 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" +) + +type remove struct { + *flags.HostSystemFlag + *flags.DatastoreFlag +} + +func init() { + cli.Register("datastore.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "HOST..." +} + +func (cmd *remove) Description() string { + return `Remove datastore from HOST. +Example: +govc datastore.remove -ds nfsDatastore cluster1 +` +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + ds, err := cmd.Datastore() + if err != nil { + return err + } + + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + for _, host := range hosts { + hds, err := host.ConfigManager().DatastoreSystem(ctx) + if err != nil { + return err + } + + err = hds.Remove(ctx, ds) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/rm.go b/vendor/github.com/vmware/govmomi/govc/datastore/rm.go new file mode 100644 index 000000000..09ef57d2f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/rm.go @@ -0,0 +1,96 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "errors" + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type rm struct { + *flags.DatastoreFlag + + force bool +} + +func init() { + cli.Register("datastore.rm", &rm{}) + cli.Alias("datastore.rm", "datastore.delete") +} + +func (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + + f.BoolVar(&cmd.force, "f", false, "Force; ignore nonexistent files and arguments") +} + +func (cmd *rm) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *rm) Usage() string { + return "FILE" +} + +func (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error { + args := f.Args() + if len(args) == 0 { + return errors.New("missing operand") + } + + c, err := cmd.Client() + if err != nil { + return err + } + + dc, err := cmd.Datacenter() + if err != nil { + return err + } + + // TODO(PN): Accept multiple args + path, err := cmd.DatastorePath(args[0]) + if err != nil { + return err + } + + m := object.NewFileManager(c) + task, err := m.DeleteDatastoreFile(context.TODO(), path, dc) + if err != nil { + return err + } + + err = task.Wait(context.TODO()) + if err != nil { + if types.IsFileNotFound(err) && cmd.force { + // Ignore error + return nil + } + } + + return err +} diff --git a/vendor/github.com/vmware/govmomi/govc/datastore/upload.go b/vendor/github.com/vmware/govmomi/govc/datastore/upload.go new file mode 100644 index 000000000..77d3d8dcd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/datastore/upload.go @@ -0,0 +1,80 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datastore + +import ( + "errors" + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/soap" +) + +type upload struct { + *flags.OutputFlag + *flags.DatastoreFlag +} + +func init() { + cli.Register("datastore.upload", &upload{}) +} + +func (cmd *upload) Register(ctx context.Context, f *flag.FlagSet) { + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) +} + +func (cmd *upload) Process(ctx context.Context) error { + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *upload) Usage() string { + return "LOCAL REMOTE" +} + +func (cmd *upload) Run(ctx context.Context, f *flag.FlagSet) error { + args := f.Args() + if len(args) != 2 { + return errors.New("invalid arguments") + } + + ds, err := cmd.Datastore() + if err != nil { + return err + } + + p := soap.DefaultUpload + if cmd.OutputFlag.TTY { + logger := cmd.ProgressLogger("Uploading... ") + p.Progress = logger + defer logger.Wait() + } + + return ds.UploadFile(context.TODO(), args[0], args[1], &p) +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/boot.go b/vendor/github.com/vmware/govmomi/govc/device/boot.go new file mode 100644 index 000000000..268acacbf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/boot.go @@ -0,0 +1,83 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package device + +import ( + "flag" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type boot struct { + *flags.VirtualMachineFlag + + order string + types.VirtualMachineBootOptions +} + +func init() { + cli.Register("device.boot", &boot{}) +} + +func (cmd *boot) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.Int64Var(&cmd.BootDelay, "delay", 0, "Delay in ms before starting the boot sequence") + f.StringVar(&cmd.order, "order", "", "Boot device order") + f.Int64Var(&cmd.BootRetryDelay, "retry-delay", 0, "Delay in ms before a boot retry") + + cmd.BootRetryEnabled = types.NewBool(false) + f.BoolVar(cmd.BootRetryEnabled, "retry", false, "If true, retry boot after retry-delay") + + cmd.EnterBIOSSetup = types.NewBool(false) + f.BoolVar(cmd.EnterBIOSSetup, "setup", false, "If true, enter BIOS setup on next boot") +} + +func (cmd *boot) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *boot) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + if cmd.order != "" { + o := strings.Split(cmd.order, ",") + cmd.BootOrder = devices.BootOrder(o) + } + + return vm.SetBootOptions(context.TODO(), &cmd.VirtualMachineBootOptions) +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/cdrom/add.go b/vendor/github.com/vmware/govmomi/govc/device/cdrom/add.go new file mode 100644 index 000000000..122789a93 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/cdrom/add.go @@ -0,0 +1,95 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cdrom + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type add struct { + *flags.VirtualMachineFlag + + controller string +} + +func init() { + cli.Register("device.cdrom.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.controller, "controller", "", "IDE controller name") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + c, err := devices.FindIDEController(cmd.controller) + if err != nil { + return err + } + + d, err := devices.CreateCdrom(c) + if err != nil { + return err + } + + err = vm.AddDevice(context.TODO(), d) + if err != nil { + return err + } + + // output name of device we just created + devices, err = vm.Device(context.TODO()) + if err != nil { + return err + } + + devices = devices.SelectByType(d) + + name := devices.Name(devices[len(devices)-1]) + + fmt.Println(name) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/cdrom/eject.go b/vendor/github.com/vmware/govmomi/govc/device/cdrom/eject.go new file mode 100644 index 000000000..ea8a161a8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/cdrom/eject.go @@ -0,0 +1,78 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cdrom + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type eject struct { + *flags.VirtualMachineFlag + + device string +} + +func init() { + cli.Register("device.cdrom.eject", &eject{}) +} + +func (cmd *eject) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.device, "device", "", "CD-ROM device name") +} + +func (cmd *eject) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *eject) Description() string { + return `Eject media from CD-ROM device. + +If device is not specified, the first CD-ROM device is used.` +} + +func (cmd *eject) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + c, err := devices.FindCdrom(cmd.device) + if err != nil { + return err + } + + return vm.EditDevice(context.TODO(), devices.EjectIso(c)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/cdrom/insert.go b/vendor/github.com/vmware/govmomi/govc/device/cdrom/insert.go new file mode 100644 index 000000000..cd4532ddc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/cdrom/insert.go @@ -0,0 +1,93 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cdrom + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type insert struct { + *flags.DatastoreFlag + *flags.VirtualMachineFlag + + device string +} + +func init() { + cli.Register("device.cdrom.insert", &insert{}) +} + +func (cmd *insert) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.device, "device", "", "CD-ROM device name") +} + +func (cmd *insert) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *insert) Usage() string { + return "ISO" +} + +func (cmd *insert) Description() string { + return `Insert media on datastore into CD-ROM device. + +If device is not specified, the first CD-ROM device is used.` +} + +func (cmd *insert) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil || f.NArg() != 1 { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + c, err := devices.FindCdrom(cmd.device) + if err != nil { + return err + } + + iso, err := cmd.DatastorePath(f.Arg(0)) + if err != nil { + return nil + } + + return vm.EditDevice(context.TODO(), devices.InsertIso(c, iso)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/connect.go b/vendor/github.com/vmware/govmomi/govc/device/connect.go new file mode 100644 index 000000000..65f93aa2b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/connect.go @@ -0,0 +1,83 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package device + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type connect struct { + *flags.VirtualMachineFlag +} + +func init() { + cli.Register("device.connect", &connect{}) +} + +func (cmd *connect) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) +} + +func (cmd *connect) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *connect) Usage() string { + return "DEVICE..." +} + +func (cmd *connect) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + for _, name := range f.Args() { + device := devices.Find(name) + if device == nil { + return fmt.Errorf("device '%s' not found", name) + } + + if err = devices.Connect(device); err != nil { + return err + } + + if err = vm.EditDevice(context.TODO(), device); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/disconnect.go b/vendor/github.com/vmware/govmomi/govc/device/disconnect.go new file mode 100644 index 000000000..fc2308621 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/disconnect.go @@ -0,0 +1,83 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package device + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type disconnect struct { + *flags.VirtualMachineFlag +} + +func init() { + cli.Register("device.disconnect", &disconnect{}) +} + +func (cmd *disconnect) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) +} + +func (cmd *disconnect) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *disconnect) Usage() string { + return "DEVICE..." +} + +func (cmd *disconnect) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + for _, name := range f.Args() { + device := devices.Find(name) + if device == nil { + return fmt.Errorf("device '%s' not found", name) + } + + if err = devices.Disconnect(device); err != nil { + return err + } + + if err = vm.EditDevice(context.TODO(), device); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/floppy/add.go b/vendor/github.com/vmware/govmomi/govc/device/floppy/add.go new file mode 100644 index 000000000..36b819616 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/floppy/add.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package floppy + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type add struct { + *flags.VirtualMachineFlag +} + +func init() { + cli.Register("device.floppy.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + d, err := devices.CreateFloppy() + if err != nil { + return err + } + + err = vm.AddDevice(context.TODO(), d) + if err != nil { + return err + } + + // output name of device we just created + devices, err = vm.Device(context.TODO()) + if err != nil { + return err + } + + devices = devices.SelectByType(d) + + name := devices.Name(devices[len(devices)-1]) + + fmt.Println(name) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/floppy/eject.go b/vendor/github.com/vmware/govmomi/govc/device/floppy/eject.go new file mode 100644 index 000000000..a2271ea6d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/floppy/eject.go @@ -0,0 +1,78 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package floppy + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type eject struct { + *flags.VirtualMachineFlag + + device string +} + +func init() { + cli.Register("device.floppy.eject", &eject{}) +} + +func (cmd *eject) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.device, "device", "", "Floppy device name") +} + +func (cmd *eject) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *eject) Description() string { + return `Eject image from floppy device. + +If device is not specified, the first floppy device is used.` +} + +func (cmd *eject) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + c, err := devices.FindFloppy(cmd.device) + if err != nil { + return err + } + + return vm.EditDevice(context.TODO(), devices.EjectImg(c)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/floppy/insert.go b/vendor/github.com/vmware/govmomi/govc/device/floppy/insert.go new file mode 100644 index 000000000..213db6920 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/floppy/insert.go @@ -0,0 +1,93 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package floppy + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type insert struct { + *flags.DatastoreFlag + *flags.VirtualMachineFlag + + device string +} + +func init() { + cli.Register("device.floppy.insert", &insert{}) +} + +func (cmd *insert) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.device, "device", "", "Floppy device name") +} + +func (cmd *insert) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *insert) Usage() string { + return "IMG" +} + +func (cmd *insert) Description() string { + return `Insert image on datastore into floppy device. + +If device is not specified, the first floppy device is used.` +} + +func (cmd *insert) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil || f.NArg() != 1 { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + c, err := devices.FindFloppy(cmd.device) + if err != nil { + return err + } + + img, err := cmd.DatastorePath(f.Arg(0)) + if err != nil { + return nil + } + + return vm.EditDevice(context.TODO(), devices.InsertImg(c, img)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/info.go b/vendor/github.com/vmware/govmomi/govc/device/info.go new file mode 100644 index 000000000..e1707a910 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/info.go @@ -0,0 +1,160 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package device + +import ( + "flag" + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type info struct { + *flags.VirtualMachineFlag + *flags.OutputFlag +} + +func init() { + cli.Register("device.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Usage() string { + return "[DEVICE]..." +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + res := infoResult{ + list: devices, + } + + if f.NArg() == 0 { + res.Devices = devices + } else { + for _, name := range f.Args() { + device := devices.Find(name) + if device == nil { + return fmt.Errorf("device '%s' not found", name) + } + + res.Devices = append(res.Devices, device) + } + } + + return cmd.WriteResult(&res) +} + +type infoResult struct { + Devices object.VirtualDeviceList + // need the full list of devices to lookup attached devices and controllers + list object.VirtualDeviceList +} + +func (r *infoResult) Write(w io.Writer) error { + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for _, device := range r.Devices { + d := device.GetVirtualDevice() + info := d.DeviceInfo.GetDescription() + + fmt.Fprintf(tw, "Name:\t%s\n", r.Devices.Name(device)) + fmt.Fprintf(tw, " Type:\t%s\n", r.Devices.TypeName(device)) + fmt.Fprintf(tw, " Label:\t%s\n", info.Label) + fmt.Fprintf(tw, " Summary:\t%s\n", info.Summary) + fmt.Fprintf(tw, " Key:\t%d\n", d.Key) + + if c, ok := device.(types.BaseVirtualController); ok { + var attached []string + for _, key := range c.GetVirtualController().Device { + attached = append(attached, r.Devices.Name(r.list.FindByKey(key))) + } + fmt.Fprintf(tw, " Devices:\t%s\n", strings.Join(attached, ", ")) + } else { + if c := r.list.FindByKey(d.ControllerKey); c != nil { + fmt.Fprintf(tw, " Controller:\t%s\n", r.Devices.Name(c)) + fmt.Fprintf(tw, " Unit number:\t%d\n", d.UnitNumber) + } + } + + if ca := d.Connectable; ca != nil { + fmt.Fprintf(tw, " Connected:\t%t\n", ca.Connected) + fmt.Fprintf(tw, " Start connected:\t%t\n", ca.StartConnected) + fmt.Fprintf(tw, " Guest control:\t%t\n", ca.AllowGuestControl) + fmt.Fprintf(tw, " Status:\t%s\n", ca.Status) + } + + switch md := device.(type) { + case types.BaseVirtualEthernetCard: + fmt.Fprintf(tw, " MAC Address:\t%s\n", md.GetVirtualEthernetCard().MacAddress) + fmt.Fprintf(tw, " Address type:\t%s\n", md.GetVirtualEthernetCard().AddressType) + case *types.VirtualDisk: + if b, ok := md.Backing.(types.BaseVirtualDeviceFileBackingInfo); ok { + fmt.Fprintf(tw, " File:\t%s\n", b.GetVirtualDeviceFileBackingInfo().FileName) + } + if b, ok := md.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok && b.Parent != nil { + fmt.Fprintf(tw, " Parent:\t%s\n", b.Parent.GetVirtualDeviceFileBackingInfo().FileName) + } + case *types.VirtualSerialPort: + if b, ok := md.Backing.(*types.VirtualSerialPortURIBackingInfo); ok { + fmt.Fprintf(tw, " Direction:\t%s\n", b.Direction) + fmt.Fprintf(tw, " Service URI:\t%s\n", b.ServiceURI) + fmt.Fprintf(tw, " Proxy URI:\t%s\n", b.ProxyURI) + } + } + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/ls.go b/vendor/github.com/vmware/govmomi/govc/device/ls.go new file mode 100644 index 000000000..c4eafa318 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/ls.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package device + +import ( + "flag" + "fmt" + "os" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type ls struct { + *flags.VirtualMachineFlag + + boot bool +} + +func init() { + cli.Register("device.ls", &ls{}) +} + +func (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.BoolVar(&cmd.boot, "boot", false, "List devices configured in the VM's boot options") +} + +func (cmd *ls) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + if cmd.boot { + options, err := vm.BootOptions(context.TODO()) + if err != nil { + return err + } + + devices = devices.SelectBootOrder(options.BootOrder) + } + + tw := tabwriter.NewWriter(os.Stdout, 3, 0, 2, ' ', 0) + + for _, device := range devices { + fmt.Fprintf(tw, "%s\t%s\t%s\n", devices.Name(device), devices.TypeName(device), + device.GetVirtualDevice().DeviceInfo.GetDescription().Summary) + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/remove.go b/vendor/github.com/vmware/govmomi/govc/device/remove.go new file mode 100644 index 000000000..0e03b7197 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/remove.go @@ -0,0 +1,79 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package device + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type remove struct { + *flags.VirtualMachineFlag +} + +func init() { + cli.Register("device.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "DEVICE..." +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + for _, name := range f.Args() { + device := devices.Find(name) + if device == nil { + return fmt.Errorf("device '%s' not found", name) + } + + if err = vm.RemoveDevice(context.TODO(), device); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/scsi/add.go b/vendor/github.com/vmware/govmomi/govc/device/scsi/add.go new file mode 100644 index 000000000..276391da8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/scsi/add.go @@ -0,0 +1,107 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scsi + +import ( + "flag" + "fmt" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type add struct { + *flags.VirtualMachineFlag + + controller string + sharedBus string + hotAddRemove bool +} + +func init() { + cli.Register("device.scsi.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + var ctypes []string + ct := object.SCSIControllerTypes() + for _, t := range ct { + ctypes = append(ctypes, ct.Type(t)) + } + f.StringVar(&cmd.controller, "type", ct.Type(ct[0]), + fmt.Sprintf("SCSI controller type (%s)", strings.Join(ctypes, "|"))) + f.StringVar(&cmd.sharedBus, "sharing", string(types.VirtualSCSISharingNoSharing), "SCSI sharing") + f.BoolVar(&cmd.hotAddRemove, "hot", false, "Enable hot-add/remove") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + d, err := devices.CreateSCSIController(cmd.controller) + if err != nil { + return err + } + + c := d.(types.BaseVirtualSCSIController).GetVirtualSCSIController() + c.HotAddRemove = &cmd.hotAddRemove + c.SharedBus = types.VirtualSCSISharing(cmd.sharedBus) + + err = vm.AddDevice(context.TODO(), d) + if err != nil { + return err + } + + // output name of device we just created + devices, err = vm.Device(context.TODO()) + if err != nil { + return err + } + + devices = devices.SelectByType(d) + + name := devices.Name(devices[len(devices)-1]) + + fmt.Println(name) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/serial/add.go b/vendor/github.com/vmware/govmomi/govc/device/serial/add.go new file mode 100644 index 000000000..564ef27ab --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/serial/add.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serial + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type add struct { + *flags.VirtualMachineFlag +} + +func init() { + cli.Register("device.serial.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + d, err := devices.CreateSerialPort() + if err != nil { + return err + } + + err = vm.AddDevice(context.TODO(), d) + if err != nil { + return err + } + + // output name of device we just created + devices, err = vm.Device(context.TODO()) + if err != nil { + return err + } + + devices = devices.SelectByType(d) + + name := devices.Name(devices[len(devices)-1]) + + fmt.Println(name) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/serial/connect.go b/vendor/github.com/vmware/govmomi/govc/device/serial/connect.go new file mode 100644 index 000000000..ec9088387 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/serial/connect.go @@ -0,0 +1,76 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and + + +limitations under the License. +*/ + +package serial + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type connect struct { + *flags.VirtualMachineFlag + + device string + client bool +} + +func init() { + cli.Register("device.serial.connect", &connect{}) +} + +func (cmd *connect) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.device, "device", "", "serial port device name") + f.BoolVar(&cmd.client, "client", false, "Use client direction") +} + +func (cmd *connect) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *connect) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + d, err := devices.FindSerialPort(cmd.device) + if err != nil { + return err + } + + return vm.EditDevice(context.TODO(), devices.ConnectSerialPort(d, f.Arg(0), cmd.client)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/device/serial/disconnect.go b/vendor/github.com/vmware/govmomi/govc/device/serial/disconnect.go new file mode 100644 index 000000000..108006fc1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/device/serial/disconnect.go @@ -0,0 +1,72 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serial + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type disconnect struct { + *flags.VirtualMachineFlag + + device string +} + +func init() { + cli.Register("device.serial.disconnect", &disconnect{}) +} + +func (cmd *disconnect) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.device, "device", "", "serial port device name") +} + +func (cmd *disconnect) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *disconnect) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + d, err := devices.FindSerialPort(cmd.device) + if err != nil { + return err + } + + return vm.EditDevice(context.TODO(), devices.DisconnectSerialPort(d)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/dvs/add.go b/vendor/github.com/vmware/govmomi/govc/dvs/add.go new file mode 100644 index 000000000..671b7e13b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/dvs/add.go @@ -0,0 +1,145 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dvs + +import ( + "flag" + "fmt" + "os" + "strings" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type add struct { + *flags.HostSystemFlag + + path string + pnic string +} + +func init() { + cli.Register("dvs.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.StringVar(&cmd.path, "dvs", "", "DVS path") + f.StringVar(&cmd.pnic, "pnic", "vmnic0", "Name of the host physical NIC") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Usage() string { + return "HOST..." +} + +func (cmd *add) Description() string { + return `Add hosts to DVS.` +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() == 0 { + return flag.ErrHelp + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + net, err := finder.Network(ctx, cmd.path) + if err != nil { + return err + } + + dvs, ok := net.(*object.DistributedVirtualSwitch) + if !ok { + return fmt.Errorf("%s (%T) is not of type %T", cmd.path, net, dvs) + } + + var s mo.VmwareDistributedVirtualSwitch + err = dvs.Properties(ctx, dvs.Reference(), []string{"config"}, &s) + if err != nil { + return err + } + + backing := new(types.DistributedVirtualSwitchHostMemberPnicBacking) + + for _, vmnic := range strings.Split(cmd.pnic, ",") { + backing.PnicSpec = append(backing.PnicSpec, types.DistributedVirtualSwitchHostMemberPnicSpec{ + PnicDevice: strings.TrimSpace(vmnic), + }) + } + + config := &types.DVSConfigSpec{ + ConfigVersion: s.Config.GetDVSConfigInfo().ConfigVersion, + } + + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + existing := make(map[string]bool) + // TODO: host.pnic.info command + for _, member := range s.Config.GetDVSConfigInfo().Host { + existing[member.Config.Host.Value] = true + } + + for _, host := range hosts { + ref := host.Reference() + if existing[ref.Value] { + fmt.Fprintf(os.Stderr, "%s is already a member of %s\n", host.InventoryPath, dvs.InventoryPath) + continue + } + + config.Host = append(config.Host, types.DistributedVirtualSwitchHostMemberConfigSpec{ + Operation: "add", + Host: ref, + Backing: backing, + }) + } + + if len(config.Host) == 0 { + return nil + } + + task, err := dvs.Reconfigure(ctx, config) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("adding %d hosts to dvs %s... ", len(config.Host), dvs.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} diff --git a/vendor/github.com/vmware/govmomi/govc/dvs/create.go b/vendor/github.com/vmware/govmomi/govc/dvs/create.go new file mode 100644 index 000000000..cab77f6bc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/dvs/create.go @@ -0,0 +1,119 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dvs + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +type create struct { + *flags.DatacenterFlag + + types.DVSCreateSpec + + configSpec *types.VMwareDVSConfigSpec + + parent string +} + +func init() { + cli.Register("dvs.create", &create{}) +} + +func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + cmd.configSpec = new(types.VMwareDVSConfigSpec) + + cmd.DVSCreateSpec.ConfigSpec = cmd.configSpec + + f.StringVar(&cmd.parent, "parent", "", "Path to parent folder for the new dvs") +} + +func (cmd *create) Usage() string { + return "DVS" +} + +func (cmd *create) Description() string { + return `Create DVS (DistributedVirtualSwitch) in datacenter. + +The dvs is added to the folder specified by the 'parent' flag. If not given, +this defaults to the network folder in the specified or default datacenter.` +} + +func (cmd *create) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { + var parent *object.Folder + + if f.NArg() != 1 { + return flag.ErrHelp + } + + name := f.Arg(0) + + if cmd.parent == "" { + dc, err := cmd.Datacenter() + if err != nil { + return err + } + + folders, err := dc.Folders(ctx) + if err != nil { + return err + } + + parent = folders.NetworkFolder + } else { + finder, err := cmd.Finder() + if err != nil { + return err + } + + parent, err = finder.Folder(ctx, cmd.parent) + if err != nil { + return err + } + } + + cmd.configSpec.Name = name + + task, err := parent.CreateDVS(ctx, cmd.DVSCreateSpec) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("adding %s to folder %s... ", name, parent.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} diff --git a/vendor/github.com/vmware/govmomi/govc/dvs/portgroup/add.go b/vendor/github.com/vmware/govmomi/govc/dvs/portgroup/add.go new file mode 100644 index 000000000..01b7ceb8a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/dvs/portgroup/add.go @@ -0,0 +1,107 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portgroup + +import ( + "flag" + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +type add struct { + *flags.DatacenterFlag + + types.DVPortgroupConfigSpec + + path string +} + +func init() { + cli.Register("dvs.portgroup.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.StringVar(&cmd.path, "dvs", "", "DVS path") + + ptypes := []string{ + string(types.DistributedVirtualPortgroupPortgroupTypeEarlyBinding), + string(types.DistributedVirtualPortgroupPortgroupTypeLateBinding), + string(types.DistributedVirtualPortgroupPortgroupTypeEphemeral), + } + + f.StringVar(&cmd.DVPortgroupConfigSpec.Type, "type", ptypes[0], + fmt.Sprintf("Portgroup type (%s)", strings.Join(ptypes, "|"))) + + f.IntVar(&cmd.DVPortgroupConfigSpec.NumPorts, "nports", 128, "Number of ports") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Usage() string { + return "NAME" +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() == 0 { + return flag.ErrHelp + } + + name := f.Arg(0) + + finder, err := cmd.Finder() + if err != nil { + return err + } + + net, err := finder.Network(ctx, cmd.path) + if err != nil { + return err + } + + dvs, ok := net.(*object.DistributedVirtualSwitch) + if !ok { + return fmt.Errorf("%s (%T) is not of type %T", cmd.path, net, dvs) + } + + cmd.DVPortgroupConfigSpec.Name = name + + task, err := dvs.AddPortgroup(ctx, []types.DVPortgroupConfigSpec{cmd.DVPortgroupConfigSpec}) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("adding %s portgroup to dvs %s... ", name, dvs.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} diff --git a/vendor/github.com/vmware/govmomi/govc/emacs/.gitignore b/vendor/github.com/vmware/govmomi/govc/emacs/.gitignore new file mode 100644 index 000000000..56179efce --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/emacs/.gitignore @@ -0,0 +1,3 @@ +.cask +elpa +*.elc diff --git a/vendor/github.com/vmware/govmomi/govc/emacs/Cask b/vendor/github.com/vmware/govmomi/govc/emacs/Cask new file mode 100644 index 000000000..aa7cf639c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/emacs/Cask @@ -0,0 +1,10 @@ +(source gnu) +(source melpa) + +(package-file "govc.el") + +(development + (depends-on "dash") + (depends-on "json-mode") + (depends-on "magit") + (depends-on "s")) diff --git a/vendor/github.com/vmware/govmomi/govc/emacs/Makefile b/vendor/github.com/vmware/govmomi/govc/emacs/Makefile new file mode 100644 index 000000000..0508993db --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/emacs/Makefile @@ -0,0 +1,27 @@ +CASK = cask +EMACS_BIN ?= emacs +EMACS_FLAGS = +EMACS_EXEC = $(CASK) exec $(EMACS_BIN) --no-site-file --no-site-lisp --batch $(EMACS_FLAGS) + +OBJECTS = govc.elc + +elpa: + $(CASK) install + $(CASK) update + touch $@ + +.PHONY: build test docs clean + +build: elpa $(OBJECTS) + +test: build docs + $(EMACS_EXEC) -l test/make.el -f make-test + +docs: build + $(EMACS_EXEC) -l test/make.el -f make-docs +clean: + rm -f $(OBJECTS) elpa + rm -rf .cask + +%.elc: %.el + $(EMACS_EXEC) -f batch-byte-compile $< diff --git a/vendor/github.com/vmware/govmomi/govc/emacs/README.md b/vendor/github.com/vmware/govmomi/govc/emacs/README.md new file mode 100644 index 000000000..861702f2b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/emacs/README.md @@ -0,0 +1,224 @@ +# govc.el + +Emacs interface to govc for managing VMware ESXi and vCenter. + +The goal of this package is to provide a simple interface for commonly used +govc commands within Emacs. This includes table based inventory/state modes +for vms, hosts, datastores and pools. The keymap for each mode provides +shortcuts for easily feeding the data in view to other govc commands. + +Within the various govc modes, press `?` to see a popup menu of options. +A menu bar is enabled for certain modes, such as `govc-vm-mode` and `govc-host-mode`. +There is also a `govc` menu at all times under the `Tools` menu. + +The recommended way to install govc.el is via MELPA (http://melpa.org/). + +## govc-mode + +Running `govc-global-mode` creates key bindings to the various govc modes. +The default prefix is `C-c v` and can be changed by setting `govc-keymap-prefix`. + +### govc-command-map + +Keybinding | Description +---------------|------------------------------------------------------------ +H | Host info via govc +P | Pool info via govc +V | VM info via govc +S | Datastore info via govc + +### govc-urls + +List of URLs for use with `govc-session`. +The `govc-session-name` displayed by `govc-mode-line` uses `url-target` (anchor) +if set, otherwise `url-host` is used. + +Example: +``` + (setq govc-urls `("root:vagrant@localhost:18443#Vagrant-ESXi" + "root:password@192.168.1.192#Intel-NUC" + "Administrator@vsphere.local:password!@vcva-clovervm")) +``` +To enter a URL that is not in the list, prefix `universal-argument`, for example: + + `C-u M-x govc-vm` + +When in `govc-vm` or `govc-host` mode, a default URL is composed with the +current session credentials and the IP address of the current vm/host and +the vm/host name as the session name. This makes it easier to connect to +nested ESX/vCenter VMs or directly to an ESX host. + +### govc-session-url + +ESX or vCenter URL set by `govc-session` via `govc-urls` selection. + +### govc-session-insecure + +Skip verification of server certificate when true. +This variable is set to the value of the `GOVC_INSECURE` env var by default. +It can also be set per-url via the query string (insecure=true). For example: +``` + (setq govc-urls `("root:password@hostname?insecure=true")) +``` + +### govc-session-datacenter + +Datacenter to use for the current `govc-session`. +If the endpoint has a single Datacenter it will be used by default, otherwise +`govc-session` will prompt for selection. It can also be set per-url via the +query string. For example: +``` + (setq govc-urls `("root:password@hostname?datacenter=dc1")) +``` + +### govc-session-datastore + +Datastore to use for the current `govc-session`. +If the endpoint has a single Datastore it will be used by default, otherwise +`govc-session` will prompt for selection. It can also be set per-url via the +query string. For example: +``` + (setq govc-urls `("root:password@hostname?datastore=vsanDatastore")) +``` + +## govc-tabulated-list-mode + +Generic table bindings to mark/unmark rows. + +In addition to any hooks its parent mode `tabulated-list-mode` might have run, +this mode runs the hook `govc-tabulated-list-mode-hook`, as the final step +during initialization. + +### govc-tabulated-list-mode-map + +Keybinding | Description +---------------|------------------------------------------------------------ +m | Mark and move to the next line +u | Unmark and move to the next line +t | Toggle mark +U | Unmark all +M-& | Shell CMD with current `govc-session` exported as GOVC_ env vars +M-w | Copy current selection or region to the kill ring +M-E | Export session to `process-environment` and `kill-ring` + +## govc-host-mode + +Major mode for handling a list of govc hosts. + +In addition to any hooks its parent mode `govc-tabulated-list-mode` might have run, +this mode runs the hook `govc-host-mode-hook`, as the final step +during initialization. + +### govc-host-mode-map + +Keybinding | Description +---------------|------------------------------------------------------------ +E | Events via govc events -n `govc-max-events` +J | JSON via govc host +N | Netstat via `govc-esxcli-netstat-info` with current host id +c | Connect new session for the current govc mode +p | Pool-mode with current session +s | Datastore-mode with current session +v | VM-mode with current session + +## govc-pool-mode + +Major mode for handling a list of govc pools. + +In addition to any hooks its parent mode `govc-tabulated-list-mode` might have run, +this mode runs the hook `govc-pool-mode-hook`, as the final step +during initialization. + +### govc-pool-mode-map + +Keybinding | Description +---------------|------------------------------------------------------------ +E | Events via govc events -n `govc-max-events` +J | JSON via govc pool +D | Destroy via `govc-pool-destroy` on the pool selection +c | Connect new session for the current govc mode +h | Host-mode with current session +s | Datastore-mode with current session +v | VM-mode with current session + +## govc-datastore-mode + +Major mode for govc datastore.info. + +In addition to any hooks its parent mode `tabulated-list-mode` might have run, +this mode runs the hook `govc-datastore-mode-hook`, as the final step +during initialization. + +### govc-datastore-mode-map + +Keybinding | Description +---------------|------------------------------------------------------------ +J | JSON via govc datastore +RET | Browse datastore +c | Connect new session for the current govc mode +h | Host-mode with current session +p | Pool-mode with current session +v | VM-mode with current session + +## govc-datastore-ls-mode + +Major mode govc datastore.ls. + +In addition to any hooks its parent mode `govc-tabulated-list-mode` might have run, +this mode runs the hook `govc-datastore-ls-mode-hook`, as the final step +during initialization. + +### govc-datastore-ls-mode-map + +Keybinding | Description +---------------|------------------------------------------------------------ +J | JSON via govc datastore +D | Delete selected datastore paths ++ | Mkdir via govc datastore +DEL | Up to parent folder +RET | Open datastore folder or file + +## govc-vm-mode + +Major mode for handling a list of govc vms. + +In addition to any hooks its parent mode `govc-tabulated-list-mode` might have run, +this mode runs the hook `govc-vm-mode-hook`, as the final step +during initialization. + +### govc-vm-mode-map + +Keybinding | Description +---------------|------------------------------------------------------------ +E | Events via govc events -n `govc-max-events` +J | JSON via govc vm +X | ExtraConfig via `govc-vm-extra-config` on the current selection +RET | Devices via `govc-device` on the current selection +V | VNC via `govc-vm-vnc` on the current selection +D | Destroy via `govc-vm-destroy` on the current selection +^ | Start via `govc-vm-start` on the current selection +! | Shutdown via `govc-vm-shutdown` on the current selection +@ | Reboot via `govc-vm-reboot` on the current selection +& | Suspend via `govc-vm-suspend` on the current selection +H | Host info via `govc-host` with host(s) of current selection +S | Datastore via `govc-datastore-ls` with datastore of current selection +P | Ping VM +c | Connect new session for the current govc mode +h | Host-mode with current session +p | Pool-mode with current session +s | Datastore-mode with current session + +## govc-device-mode + +Major mode for handling a govc device. + +In addition to any hooks its parent mode `govc-tabulated-list-mode` might have run, +this mode runs the hook `govc-device-mode-hook`, as the final step +during initialization. + +### govc-device-mode-map + +Keybinding | Description +---------------|------------------------------------------------------------ +J | JSON via govc device +RET | Tabulated govc device diff --git a/vendor/github.com/vmware/govmomi/govc/emacs/govc.el b/vendor/github.com/vmware/govmomi/govc/emacs/govc.el new file mode 100644 index 000000000..1042f0102 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/emacs/govc.el @@ -0,0 +1,1260 @@ +;;; govc.el --- Emacs interface to govc for managing VMware ESXi and vCenter + +;; Author: The govc developers +;; URL: https://github.com/vmware/govmomi +;; Keywords: convenience +;; Version: 0.1.0 +;; Package-Requires: ((dash "1.5.0") (s "1.9.0") (magit-popup "2.0.50") (json-mode "1.6.0")) + +;; This file is NOT part of GNU Emacs. + +;; This program is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; This program is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GNU Emacs; see the file COPYING. If not, write to the +;; Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +;; Boston, MA 02110-1301, USA. + +;;; Commentary: +;; The goal of this package is to provide a simple interface for commonly used +;; govc commands within Emacs. This includes table based inventory/state modes +;; for vms, hosts, datastores and pools. The keymap for each mode provides +;; shortcuts for easily feeding the data in view to other govc commands. +;; +;; Within the various govc modes, press `?' to see a popup menu of options. +;; A menu bar is enabled for certain modes, such as `govc-vm-mode' and `govc-host-mode'. +;; There is also a `govc' menu at all times under the `Tools' menu. +;; +;; The recommended way to install govc.el is via MELPA (http://melpa.org/). + +;;; Code: + +(eval-when-compile + (require 'cl)) +(require 'dash) +(require 'dired) +(require 'json-mode) +(require 'magit-popup) +(require 'url-parse) +(require 's) + +(defgroup govc nil + "Emacs customization group for govc." + :group 'convenience) + +(defcustom govc-keymap-prefix "C-c v" + "Prefix for `govc-mode'." + :group 'govc) + +(defvar govc-command-map + (let ((map (make-sparse-keymap))) + (define-key map "H" 'govc-host) + (define-key map "P" 'govc-pool) + (define-key map "V" 'govc-vm) + (define-key map "S" 'govc-datastore) + (define-key map "?" 'govc-popup) + map) + "Keymap for `govc-mode' after `govc-keymap-prefix' was pressed.") + +(defvar govc-mode-map + (let ((map (make-sparse-keymap))) + (define-key map (kbd govc-keymap-prefix) govc-command-map) + map) + "Keymap for `govc-mode'.") + +;;;###autoload +(define-minor-mode govc-mode + "Running `govc-global-mode' creates key bindings to the various govc modes. +The default prefix is `C-c v' and can be changed by setting `govc-keymap-prefix'. + +\\{govc-mode-map\}" + nil govc-mode-line govc-mode-map + :group 'govc) + +;;;###autoload +(define-globalized-minor-mode govc-global-mode govc-mode govc-mode) + +(defcustom govc-mode-line + '(:eval (format " govc[%s]" (or (govc-session-name) "-"))) + "Mode line lighter for govc." + :group 'govc + :type 'sexp + :risky t) + + +;;; Tabulated list mode extensions (derived from https://github.com/Silex/docker.el tabulated-list-ext.el) +(defun govc-tabulated-list-mark () + "Mark and move to the next line." + (interactive) + (tabulated-list-put-tag (char-to-string dired-marker-char) t)) + +(defun govc-tabulated-list-unmark () + "Unmark and move to the next line." + (interactive) + (tabulated-list-put-tag "" t)) + +(defun govc-tabulated-list-toggle-marks () + "Toggle mark." + (interactive) + (save-excursion + (goto-char (point-min)) + (let ((cmd)) + (while (not (eobp)) + (setq cmd (char-after)) + (tabulated-list-put-tag + (if (eq cmd dired-marker-char) + "" + (char-to-string dired-marker-char)) t))))) + +(defun govc-tabulated-list-unmark-all () + "Unmark all." + (interactive) + (save-excursion + (goto-char (point-min)) + (while (not (eobp)) + (tabulated-list-put-tag "" t)))) + +(defun govc-selection () + "Get the current selection as a list of names." + (let ((selection)) + (save-excursion + (goto-char (point-min)) + (while (not (eobp)) + (when (eq (char-after) ?*) + (add-to-list 'selection (tabulated-list-get-id))) + (forward-line))) + (or selection (let ((id (tabulated-list-get-id))) + (if id + (list id)))))) + +(defun govc-do-selection (fn action) + "Call FN with `govc-selection' confirming ACTION." + (let* ((selection (govc-selection)) + (count (length selection)) + (prompt (if (= count 1) + (car selection) + (format "* [%d] marked" count)))) + (if (yes-or-no-p (format "%s %s ?" action prompt)) + (funcall fn selection)))) + +(defun govc-copy-selection () + "Copy current selection or region to the kill ring." + (interactive) + (if (region-active-p) + (copy-region-as-kill (mark) (point) 'region) + (kill-new (message "%s" (s-join " " (--map (format "'%s'" it) (govc-selection))))))) + +(defvar govc-font-lock-keywords + (list + (list dired-re-mark '(0 dired-mark-face)))) + +(defvar govc-tabulated-list-mode-map + (let ((map (make-sparse-keymap))) + (define-key map "m" 'govc-tabulated-list-mark) + (define-key map "u" 'govc-tabulated-list-unmark) + (define-key map "t" 'govc-tabulated-list-toggle-marks) + (define-key map "U" 'govc-tabulated-list-unmark-all) + (define-key map (kbd "M-&") 'govc-shell-command) + (define-key map (kbd "M-w") 'govc-copy-selection) + (define-key map (kbd "M-E") 'govc-copy-environment) + map) + "Keymap for `govc-tabulated-list-mode'.") + +(define-derived-mode govc-tabulated-list-mode tabulated-list-mode "Tabulated govc" + "Generic table bindings to mark/unmark rows." + (setq-local font-lock-defaults + '(govc-font-lock-keywords t nil nil beginning-of-line))) + + +;;; Keymap helpers for generating menus and popups +(defun govc-keymap-list (keymap) + "Return a list of (key name function) for govc bindings in the given KEYMAP. +The name returned is the first word of the function `documentation'." + (let ((map)) + (map-keymap + (lambda (k f) + (when (keymapp f) + (setq map (append map + (--map (and (setcar it (kbd (format "M-%s" (char-to-string (car it))))) it) + (govc-keymap-list f))))) + (when (and (symbolp f) + (s-starts-with? "govc-" (symbol-name f))) + (if (not (eq ?? k)) + (add-to-list 'map (list k (car (split-string (documentation f))) f))))) keymap) + map)) + +(defun govc-keymap-menu (keymap) + "Return a list of [key function t] for govc bindings in the given KEYMAP. +For use with `easy-menu-define'." + (-map (lambda (item) + (vector (nth 1 item) (nth 2 item) t)) + (govc-keymap-list keymap))) + +(defun govc-key-description (key) + "Call `key-description' ensuring KEY is a sequence." + (key-description (if (integerp key) (list key) key))) + +(defun govc-keymap-list-to-help (keymap) + "Convert KEYMAP to list of help text." + (--map (list (govc-key-description (car it)) + (car (split-string (documentation (nth 2 it)) "\\."))) + keymap)) + +(defun govc-keymap-popup-help () + "Default keymap help for `govc-keymap-popup'." + (append (govc-keymap-list-to-help (govc-keymap-list govc-tabulated-list-mode-map)) + '(("g" "Refresh current buffer") + ("C-h m" "Show all key bindings")))) + +(defun govc-keymap-popup (keymap) + "Convert a `govc-keymap-list' using KEYMAP for use with `magit-define-popup'. +Keys in the ASCII range of 32-97 are mapped to popup commands, all others are listed as help text." + (let* ((maps (--separate (and (integerp (car it)) + (>= (car it) 32) + (<= (car it) 97)) + (govc-keymap-list keymap))) + (help (govc-keymap-list-to-help (cadr maps)))) + (append + '("Commands") + (car maps) + (list (s-join "\n" (--map (format " %-6s %s" (car it) (cadr it)) + (append help (govc-keymap-popup-help)))) + nil)))) + + +;;; govc process helpers +(defvar govc-urls nil + "List of URLs for use with `govc-session'. +The `govc-session-name' displayed by `govc-mode-line' uses `url-target' (anchor) +if set, otherwise `url-host' is used. + +Example: +``` + (setq govc-urls '(\"root:vagrant@localhost:18443#Vagrant-ESXi\" + \"root:password@192.168.1.192#Intel-NUC\" + \"Administrator@vsphere.local:password!@vcva-clovervm\")) +``` +To enter a URL that is not in the list, prefix `universal-argument', for example: + + `\\[universal-argument] \\[govc-vm]' + +When in `govc-vm' or `govc-host' mode, a default URL is composed with the +current session credentials and the IP address of the current vm/host and +the vm/host name as the session name. This makes it easier to connect to +nested ESX/vCenter VMs or directly to an ESX host.") + +(defvar-local govc-session-url nil + "ESX or vCenter URL set by `govc-session' via `govc-urls' selection.") + +(defvar-local govc-session-insecure nil + "Skip verification of server certificate when true. +This variable is set to the value of the `GOVC_INSECURE' env var by default. +It can also be set per-url via the query string (insecure=true). For example: +``` + (setq govc-urls '(\"root:password@hostname?insecure=true\")) +```") + +(defvar-local govc-session-datacenter nil + "Datacenter to use for the current `govc-session'. +If the endpoint has a single Datacenter it will be used by default, otherwise +`govc-session' will prompt for selection. It can also be set per-url via the +query string. For example: +``` + (setq govc-urls '(\"root:password@hostname?datacenter=dc1\")) +```") + +(defvar-local govc-session-datastore nil + "Datastore to use for the current `govc-session'. +If the endpoint has a single Datastore it will be used by default, otherwise +`govc-session' will prompt for selection. It can also be set per-url via the +query string. For example: +``` + (setq govc-urls '(\"root:password@hostname?datastore=vsanDatastore\")) +```") + +(defvar-local govc-filter nil + "Resource path filter.") + +(defvar-local govc-args nil + "Additional govc arguments.") + +(defun govc-session-name () + "Return a name for the current session. +Derived from `govc-session-url' if set, otherwise from the 'GOVC_URL' env var. +Return value is the url anchor if set, otherwise the hostname is returned." + (let* ((u (or govc-session-url (getenv "GOVC_URL"))) + (url (if u (govc-url-parse u)))) + (if url + (or (url-target url) (url-host url))))) + +(defun govc-command (command &rest args) + "Format govc COMMAND ARGS." + (format "govc %s %s" command + (s-join " " (--map (format "'%s'" it) + (-flatten (-non-nil args)))))) + +(defconst govc-environment-map (--map (cons (concat "GOVC_" (upcase it)) + (intern (concat "govc-session-" it))) + '("url" "insecure" "datacenter" "datastore")) + + "Map of `GOVC_*' environment variable names to `govc-session-*' symbol names.") + +(defun govc-environment (&optional unset) + "Return `process-environment' for govc. +Optionally clear govc env if UNSET is non-nil." + (let ((process-environment (copy-sequence process-environment))) + (dolist (e govc-environment-map) + (setenv (car e) (unless unset (symbol-value (cdr e))))) + process-environment)) + +(defun govc-export-environment (arg) + "Set if ARG is \\[universal-argument], unset if ARG is \\[negative-argument]." + (if (equal arg '-) + (progn (setq process-environment (govc-environment t)) + (cons "unset" (--map (car it) + govc-environment-map))) + (progn (setq process-environment (govc-environment)) + (cons "export" (--map (format "%s='%s'" (car it) (or (symbol-value (cdr it)) "")) + govc-environment-map))))) + +(defun govc-copy-environment (&optional arg) + "Export session to `process-environment' and `kill-ring'. +Optionally set `GOVC_*' vars in `process-environment' using prefix +\\[universal-argument] ARG or unset with prefix \\[negative-argument] ARG." + (interactive "P") + (kill-new (message (if arg (s-join " " (govc-export-environment arg)) govc-session-url)))) + +(defun govc-process (command handler) + "Run COMMAND, calling HANDLER upon successful exit of the process." + (message command) + (let ((process-environment (govc-environment)) + (exit-code)) + (with-temp-buffer + (setq exit-code (call-process-shell-command command nil (current-buffer))) + (if (zerop exit-code) + (funcall handler) + (error (buffer-string)))))) + +(defun govc (command &rest args) + "Execute govc COMMAND with ARGS. +Return value is `buffer-string' split on newlines." + (govc-process (govc-command command args) + (lambda () + (split-string (buffer-string) "\n" t)))) + +(defun govc-json (command &rest args) + "Execute govc COMMAND passing arguments ARGS. +Return value is `json-read'." + (govc-process (govc-command command (cons "-json" args)) + (lambda () + (goto-char (point-min)) + (let ((json-object-type 'plist)) + (json-read))))) + +(defun govc-ls-datacenter () + "List datacenters." + (delete-dups (--map (nth 1 (split-string it "/")) + (govc "ls")))) + +(defun govc-object-prompt (prompt ls) + "PROMPT for object name via LS function. Return object without PROMPT if there is just one instance." + (let ((objs (funcall ls))) + (if (eq 1 (length objs)) + (car objs) + (completing-read prompt objs)))) + +(defun govc-url-parse (url) + "A `url-generic-parse-url' wrapper to handle URL with password, but no scheme. +Also fixes the case where user contains an '@'." + (let* ((full (s-contains? "://" url)) + (u (url-generic-parse-url (concat (unless full "https://") url)))) + (unless full + (setf (url-type u) nil) + (setf (url-fullness u) nil)) + (if (s-contains? "@" (url-host u)) + (let* ((h (split-string (url-host u) "@")) + (p (split-string (car h) ":"))) + (setf (url-host u) (cadr h)) + (setf (url-user u) (concat (url-user u) "@" (car p))) + (setf (url-password u) (cadr p)))) + u)) + +(defun govc-url-default () + "Default URL when creating a new session." + (if govc-session-url + (let ((url (govc-url-parse govc-session-url))) + (if (equal major-mode 'govc-host-mode) + (progn (setf (url-host url) (govc-table-column-value "Name")) + (setf (url-target url) nil)) + (progn (setf (url-host url) (govc-table-column-value "IP address")) + (setf (url-target url) (govc-table-column-value "Name")))) + (url-recreate-url url)))) + +(defun govc-urls-completing-read () + "A wrapper for `completing-read' to mask credentials in `govc-urls'." + (let ((alist)) + (dolist (ent govc-urls) + (let ((u (govc-url-parse ent))) + (setf (url-password u) nil) + (add-to-list 'alist `(,(url-recreate-url u) . ,ent) t))) + (let ((u (completing-read "govc url: " (-map 'car alist)))) + (cdr (assoc u alist))))) + +(defun govc-session-set-url (url) + "Set `govc-session-url' to URL and optionally set other govc-session-* variables via URL query." + (let ((q (cdr (url-path-and-query (govc-url-parse url))))) + (dolist (opt (if q (url-parse-query-string q))) + (let ((var (intern (concat "govc-session-" (car opt))))) + (if (boundp var) + (set var (cadr opt)))))) + (setq govc-session-url url)) + +(defun govc-session () + "Initialize a govc session." + (interactive) + (let ((url (if (or current-prefix-arg (eq 0 (length govc-urls))) + (read-string "govc url: " (govc-url-default)) + (if (eq 1 (length govc-urls)) + (car govc-urls) + (govc-urls-completing-read))))) + ;; Wait until this point to clear so current session is preserved in the + ;; event of `keyboard-quit' in `read-string'. + (setq govc-session-datacenter nil + govc-session-datastore nil + govc-filter nil) + (govc-session-set-url url)) + (unless govc-session-insecure + (setq govc-session-insecure (or (getenv "GOVC_INSECURE") + (completing-read "govc insecure: " '("true" "false"))))) + (unless govc-session-datacenter + (setq govc-session-datacenter (govc-object-prompt "govc datacenter: " 'govc-ls-datacenter))) + (add-to-list 'govc-urls govc-session-url)) + +(defalias 'govc-current-session 'buffer-local-variables) + +(defun govc-session-clone (session) + "Clone a session from SESSION buffer locals." + (dolist (v session) + (let ((s (car v))) + (when (s-starts-with? "govc-session-" (symbol-name s)) + (set s (assoc-default s session)))))) + +(defun govc-shell-command (&optional cmd) + "Shell CMD with current `govc-session' exported as GOVC_ env vars." + (interactive) + (let ((process-environment (govc-environment)) + (current-prefix-arg "*govc*") + (url govc-session-url)) + (if cmd + (async-shell-command cmd current-prefix-arg) + (call-interactively 'async-shell-command)) + (with-current-buffer (get-buffer current-prefix-arg) + (setq govc-session-url url)))) + +(defcustom govc-max-events 50 + "Limit events output to the last N events." + :type 'integer + :group 'govc) + +(defun govc-events () + "Events via govc events -n `govc-max-events'." + (interactive) + (govc-shell-command + (govc-command "events" + (list "-n" govc-max-events (govc-selection))))) + +(defun govc-parse-info (output) + "Parse govc info command OUTPUT." + (let* ((entries) + (entry) + (entry-key)) + (-each output + (lambda (line) + (let* ((ix (s-index-of ":" line)) + (key (s-trim (substring line 0 ix))) + (val (s-trim (substring line (+ ix 1))))) + (unless entry-key + (setq entry-key key)) + (when (s-equals? key entry-key) + (setq entry (make-hash-table :test 'equal)) + (add-to-list 'entries entry)) + (puthash key val entry)))) + entries)) + +(defun govc-table-column-names () + "Return a list of column names from `tabulated-list-format'." + (--map (car (aref tabulated-list-format it)) + (number-sequence 0 (- (length tabulated-list-format) 1)))) + +(defun govc-table-column-value (key) + "Return current column value for given KEY." + (let ((names (govc-table-column-names)) + (entry (tabulated-list-get-entry)) + (value)) + (dotimes (ix (- (length names) 1)) + (if (s-equals? key (nth ix names)) + (setq value (elt entry ix)))) + value)) + +(defun govc-table-info (command &optional args) + "Convert `govc-parse-info' COMMAND ARGS output to `tabulated-list-entries' format." + (let ((names (govc-table-column-names))) + (-map (lambda (info) + (let ((id (or (gethash "Path" info) + (gethash (car names) info)))) + (list id (vconcat + (--map (or (gethash it info) "-") + names))))) + (govc-parse-info (govc command args))))) + +(defun govc-map-info (command &optional args) + "Populate key=val map table with govc COMMAND ARGS output." + (-map (lambda (line) + (let* ((ix (s-index-of ":" line)) + (key (s-trim (substring line 0 ix))) + (val (s-trim (substring line (+ ix 1))))) + (list key (vector key val)))) + (govc command args))) + +(defun govc-map-info-table (entries) + "Tabulated `govc-map-info' data via ENTRIES." + (let ((session (govc-current-session)) + (args (append govc-args (govc-selection))) + (buffer (get-buffer-create "*govc-info*"))) + (pop-to-buffer buffer) + (tabulated-list-mode) + (setq govc-args args) + (govc-session-clone session) + (setq tabulated-list-format [("Name" 50) + ("Value" 50)] + tabulated-list-padding 2 + tabulated-list-entries entries) + (tabulated-list-print))) + +(defun govc-json-info (command &optional selection) + "Run govc COMMAND -json on SELECTION." + (interactive) + (govc-process (govc-command command (append (cons "-json" govc-args) + (or selection (govc-selection)))) + (lambda () + (let ((buffer (get-buffer-create "*govc-json*"))) + (with-current-buffer buffer + (erase-buffer)) + (copy-to-buffer buffer (point-min) (point-max)) + (pop-to-buffer buffer) + (json-mode) + ;; We use `json-mode-beautify' as `json-pretty-print-buffer' does not work for `govc-host-json-info' + (json-mode-beautify) + (goto-char (point-min)))))) + +(defun govc-mode-new-session () + "Connect new session for the current govc mode." + (interactive) + (call-interactively 'govc-session) + (revert-buffer)) + +(defun govc-host-with-session () + "Host-mode with current session." + (interactive) + (govc-host nil (govc-current-session))) + +(defun govc-vm-with-session () + "VM-mode with current session." + (interactive) + (govc-vm nil (govc-current-session))) + +(defun govc-datastore-with-session () + "Datastore-mode with current session." + (interactive) + (govc-datastore nil (govc-current-session))) + +(defun govc-pool-with-session () + "Pool-mode with current session." + (interactive) + (govc-pool nil (govc-current-session))) + + +;;; govc host mode +(defun govc-ls-host () + "List hosts." + (govc "ls" "-t" "HostSystem" "host/*")) + +(defun govc-esxcli-netstat-info () + "Wrapper for govc host.esxcli network ip connection list." + (govc-table-info "host.esxcli" + (append govc-args '("-hints=false" "--" "network" "ip" "connection" "list")))) + +(defun govc-esxcli-netstat (host) + "Tabulated `govc-esxcli-netstat-info' HOST." + (interactive (list (govc-object-prompt "Host: " 'govc-ls-host))) + (let ((session (govc-current-session)) + (buffer (get-buffer-create "*govc-esxcli*"))) + (pop-to-buffer buffer) + (tabulated-list-mode) + (setq govc-args (list "-host.ipath" host)) + (govc-session-clone session) + (setq tabulated-list-format [("CCAlgo" 10 t) + ("ForeignAddress" 20 t) + ("LocalAddress" 20 t) + ("Proto" 5 t) + ("RecvQ" 5 t) + ("SendQ" 5 t) + ("State" 15 t) + ("WorldID" 7 t) + ("WorldName" 10 t)] + tabulated-list-padding 2 + tabulated-list-entries #'govc-esxcli-netstat-info) + (tabulated-list-init-header) + (tabulated-list-print))) + +(defun govc-host-esxcli-netstat () + "Netstat via `govc-esxcli-netstat-info' with current host id." + (interactive) + (govc-esxcli-netstat (tabulated-list-get-id))) + +(defun govc-host-info () + "Wrapper for govc host.info." + (govc-table-info "host.info" (or govc-filter "*/*"))) + +(defun govc-host-json-info () + "JSON via govc host.info -json on current selection." + (interactive) + (govc-json-info "host.info" (govc-selection))) + +(defvar govc-host-mode-map + (let ((map (make-sparse-keymap))) + (define-key map "E" 'govc-events) + (define-key map "J" 'govc-host-json-info) + (define-key map "N" 'govc-host-esxcli-netstat) + (define-key map "c" 'govc-mode-new-session) + (define-key map "p" 'govc-pool-with-session) + (define-key map "s" 'govc-datastore-with-session) + (define-key map "v" 'govc-vm-with-session) + (define-key map "?" 'govc-host-popup) + map) + "Keymap for `govc-host-mode'.") + +(defun govc-host (&optional filter session) + "Host info via govc. +Optionally filter by FILTER and inherit SESSION." + (interactive) + (let ((buffer (get-buffer-create "*govc-host*"))) + (pop-to-buffer buffer) + (govc-host-mode) + (if session + (govc-session-clone session) + (call-interactively 'govc-session)) + (setq govc-filter filter) + (tabulated-list-print))) + +(define-derived-mode govc-host-mode govc-tabulated-list-mode "Host" + "Major mode for handling a list of govc hosts." + (setq tabulated-list-format [("Name" 30 t) + ("Logical CPUs" 20 t) + ("CPU usage" 25 t) + ("Memory" 10 t) + ("Memory usage" 25 t) + ("Manufacturer" 13 t) + ("Boot time" 15 t)] + tabulated-list-sort-key (cons "Name" nil) + tabulated-list-padding 2 + tabulated-list-entries #'govc-host-info) + (tabulated-list-init-header)) + +(magit-define-popup govc-host-popup + "Host popup." + :actions (govc-keymap-popup govc-host-mode-map)) + +(easy-menu-define govc-host-mode-menu govc-host-mode-map + "Host menu." + (cons "Host" (govc-keymap-menu govc-host-mode-map))) + + +;;; govc pool mode +(defun govc-ls-pool (&optional pools) + "List resource POOLS recursively." + (let ((subpools (govc "ls" "-t" "ResourcePool" (--map (concat it "/*") (or pools '("host")))))) + (append pools + (if subpools + (govc-ls-pool subpools))))) + +(defun govc-ls-vapp () + "List virtual apps." + (govc "ls" "-t" "VirtualApp" "vm")) + +(defun govc-pool-destroy (name) + "Destroy pool with given NAME." + (interactive (list (completing-read "Destroy pool: " (govc-ls-pool)))) + (govc "pool.destroy" name)) + +(defun govc-pool-destroy-selection () + "Destroy via `govc-pool-destroy' on the pool selection." + (interactive) + (govc-do-selection 'govc-pool-destroy "Delete") + (tabulated-list-revert)) + +(defun govc-pool-info () + "Wrapper for govc pool.info." + (govc-table-info "pool.info" (or govc-filter (append (govc-ls-pool) (govc-ls-vapp))))) + +(defun govc-pool-json-info () + "JSON via govc pool.info -json on current selection." + (interactive) + (govc-json-info "pool.info" (govc-selection))) + +(defvar govc-pool-mode-map + (let ((map (make-sparse-keymap))) + (define-key map "E" 'govc-events) + (define-key map "J" 'govc-pool-json-info) + (define-key map "D" 'govc-pool-destroy-selection) + (define-key map "c" 'govc-mode-new-session) + (define-key map "h" 'govc-host-with-session) + (define-key map "s" 'govc-datastore-with-session) + (define-key map "v" 'govc-vm-with-session) + (define-key map "?" 'govc-pool-popup) + map) + "Keymap for `govc-pool-mode'.") + +(defun govc-pool (&optional filter session) + "Pool info via govc. +Optionally filter by FILTER and inherit SESSION." + (interactive) + (let ((buffer (get-buffer-create "*govc-pool*"))) + (pop-to-buffer buffer) + (govc-pool-mode) + (if session + (govc-session-clone session) + (call-interactively 'govc-session)) + (setq govc-filter filter) + (tabulated-list-print))) + +(define-derived-mode govc-pool-mode govc-tabulated-list-mode "Pool" + "Major mode for handling a list of govc pools." + (setq tabulated-list-format [("Name" 30 t) + ("CPU Usage" 25 t) + ("CPU Shares" 25 t) + ("CPU Reservation" 25 t) + ("CPU Limit" 10 t) + ("Mem Usage" 25 t) + ("Mem Shares" 25 t) + ("Mem Reservation" 25 t) + ("Mem Limit" 10 t)] + tabulated-list-sort-key (cons "Name" nil) + tabulated-list-padding 2 + tabulated-list-entries #'govc-pool-info) + (tabulated-list-init-header)) + +(magit-define-popup govc-pool-popup + "Pool popup." + :actions (govc-keymap-popup govc-pool-mode-map)) + +(easy-menu-define govc-host-mode-menu govc-pool-mode-map + "Pool menu." + (cons "Pool" (govc-keymap-menu govc-pool-mode-map))) + + +;;; govc datastore mode +(defun govc-ls-datastore () + "List datastores." + (govc "ls" "datastore")) + +(defun govc-datastore-ls-entries () + "Wrapper for govc datastore.ls." + (let* ((data (govc-json "datastore.ls" "-l" "-p" govc-filter)) + (file (plist-get (elt data 0) :File))) + (-map (lambda (ent) + (let ((name (plist-get ent :Path)) + (size (plist-get ent :FileSize)) + (time (plist-get ent :Modification)) + (user (plist-get ent :Owner))) + (list (concat govc-filter name) + (vector (file-size-human-readable size) + (current-time-string (date-to-time time)) + name)))) file))) + +(defun govc-datastore-ls-parent () + "Up to parent folder." + (interactive) + (if (s-blank? govc-filter) + (let ((session (govc-current-session))) + (govc-datastore-mode) + (govc-session-clone session)) + (setq govc-filter (file-name-directory (directory-file-name govc-filter)))) + (tabulated-list-revert)) + +(defun govc-datastore-ls-child () + "Open datastore folder or file." + (interactive) + (let ((id (tabulated-list-get-id))) + (if (s-ends-with? "/" id) + (progn (setq govc-filter id) + (tabulated-list-revert)) + (govc-datastore-open)))) + +(defun govc-datastore-open () + "Open datastore file." + (lexical-let* ((srcfile (tabulated-list-get-id)) + (srcpath (format "[%s] %s" (file-name-nondirectory govc-session-datastore) (s-chop-prefix "/" srcfile))) + (suffix (file-name-extension srcfile t)) + (tmpfile (make-temp-file "govc-ds" nil suffix)) + (session (govc-current-session))) + (when (yes-or-no-p (concat "Open " srcpath "?")) + (govc "datastore.download" srcfile tmpfile) + (with-current-buffer (pop-to-buffer (find-file-noselect tmpfile)) + (govc-session-clone session) + (add-hook 'kill-buffer-hook (lambda () + (with-demoted-errors + (delete-file tmpfile))) t t) + (add-hook 'after-save-hook (lambda () + (if (yes-or-no-p (concat "Upload changes to " srcpath "?")) + (with-demoted-errors + (govc "datastore.upload" tmpfile srcfile)))) t t))))) + +(defun govc-datastore-ls-json () + "JSON via govc datastore.ls -json on current selection." + (interactive) + (let ((govc-args '("-l" "-p"))) + (govc-json-info "datastore.ls" (govc-selection)))) + +(defun govc-datastore-mkdir (name) + "Mkdir via govc datastore.mkdir with given NAME." + (interactive (list (read-from-minibuffer "Create directory: " govc-filter))) + (govc "datastore.mkdir" name) + (tabulated-list-revert)) + +(defun govc-datastore-rm (paths) + "Delete datastore PATHS." + (--each paths (govc "datastore.rm" it))) + +(defun govc-datastore-rm-selection () + "Delete selected datastore paths." + (interactive) + (govc-do-selection 'govc-datastore-rm "Delete") + (tabulated-list-revert)) + +(defvar govc-datastore-ls-mode-map + (let ((map (make-sparse-keymap))) + (define-key map "J" 'govc-datastore-ls-json) + (define-key map "D" 'govc-datastore-rm-selection) + (define-key map "+" 'govc-datastore-mkdir) + (define-key map (kbd "DEL") 'govc-datastore-ls-parent) + (define-key map (kbd "RET") 'govc-datastore-ls-child) + (define-key map "?" 'govc-datastore-ls-popup) + map) + "Keymap for `govc-datastore-ls-mode'.") + +(defun govc-datastore-ls (&optional datastore session) + "List govc datastore. Optionally specify DATASTORE and SESSION." + (interactive) + (let ((buffer (get-buffer-create "*govc-datastore*"))) + (pop-to-buffer buffer) + (govc-datastore-ls-mode) + (if session + (govc-session-clone session) + (call-interactively 'govc-session)) + (setq govc-session-datastore (or datastore (govc-object-prompt "govc datastore: " 'govc-ls-datastore))) + (tabulated-list-print))) + +(define-derived-mode govc-datastore-ls-mode govc-tabulated-list-mode "Datastore" + "Major mode govc datastore.ls." + (setq tabulated-list-format [("Size" 10 t) + ("Modification time" 25 t) + ("Name" 40 t)] + tabulated-list-sort-key (cons "Name" nil) + tabulated-list-padding 2 + tabulated-list-entries #'govc-datastore-ls-entries) + (tabulated-list-init-header)) + +(magit-define-popup govc-datastore-ls-popup + "Datastore ls popup." + :actions (govc-keymap-popup govc-datastore-ls-mode-map)) + +(easy-menu-define govc-datastore-ls-mode-menu govc-datastore-ls-mode-map + "Datastore ls menu." + (cons "Datastore" (govc-keymap-menu govc-datastore-ls-mode-map))) + +(defvar govc-datastore-mode-map + (let ((map (make-sparse-keymap))) + (define-key map "J" 'govc-datastore-json-info) + (define-key map (kbd "RET") 'govc-datastore-ls-selection) + (define-key map "c" 'govc-mode-new-session) + (define-key map "h" 'govc-host-with-session) + (define-key map "p" 'govc-pool-with-session) + (define-key map "v" 'govc-vm-with-session) + (define-key map "?" 'govc-datastore-popup) + map) + "Keymap for `govc-datastore-mode'.") + +(defun govc-datastore-json-info () + "JSON via govc datastore.info -json on current selection." + (interactive) + (govc-json-info "datastore.info")) + +(defun govc-datastore-info () + "Wrapper for govc datastore.info." + (govc-table-info "datastore.info" (or govc-filter "*"))) + +(defun govc-datastore-ls-selection () + "Browse datastore." + (interactive) + (govc-datastore-ls (tabulated-list-get-id) (govc-current-session))) + +(defun govc-datastore (&optional filter session) + "Datastore info via govc. +Optionally filter by FILTER and inherit SESSION." + (interactive) + (let ((buffer (get-buffer-create "*govc-datastore*"))) + (pop-to-buffer buffer) + (govc-datastore-mode) + (if session + (govc-session-clone session) + (call-interactively 'govc-session)) + (setq govc-filter filter) + (tabulated-list-print))) + +(define-derived-mode govc-datastore-mode tabulated-list-mode "Datastore" + "Major mode for govc datastore.info." + (setq tabulated-list-format [("Name" 15 t) + ("Type" 10 t) + ("Capacity" 10 t) + ("Free" 10 t) + ("Remote" 30 t)] + tabulated-list-sort-key (cons "Name" nil) + tabulated-list-padding 2 + tabulated-list-entries #'govc-datastore-info) + (tabulated-list-init-header)) + +(magit-define-popup govc-datastore-popup + "Datastore popup." + :actions (govc-keymap-popup govc-datastore-mode-map)) + +(easy-menu-define govc-datastore-mode-menu govc-datastore-mode-map + "Datastore menu." + (cons "Datastore" (govc-keymap-menu govc-datastore-mode-map))) + + +;;; govc vm mode +(defun govc-vm-prompt (prompt) + "PROMPT for a vm name." + (completing-read prompt (govc "ls" "vm"))) + +(defun govc-vm-start (name) + "Start vm with given NAME." + (interactive (list (govc-vm-prompt "Start vm: "))) + (govc "vm.power" "-on" name)) + +(defun govc-vm-shutdown (name) + "Shutdown vm with given NAME." + (interactive (list (govc-vm-prompt "Shutdown vm: "))) + (govc "vm.power" "-s" "-force" name)) + +(defun govc-vm-reboot (name) + "Reboot vm with given NAME." + (interactive (list (govc-vm-prompt "Reboot vm: "))) + (govc "vm.power" "-r" "-force" name)) + +(defun govc-vm-suspend (name) + "Suspend vm with given NAME." + (interactive (list (govc-vm-prompt "Suspend vm: "))) + (govc "vm.power" "-suspend" name)) + +(defun govc-vm-destroy (name) + "Destroy vm with given NAME." + (interactive (list (govc-vm-prompt "Destroy vm: "))) + (govc "vm.destroy" name)) + +(defun govc-vm-vnc-enable (name) + "Enable vnc on vm with given NAME." + (--map (last (split-string it)) + (govc "vm.vnc" "-enable" + "-port" "-1" + "-password" (format "%08x" (random (expt 16 8))) name))) + +(defun govc-vm-vnc (name &optional arg) + "VNC for vm with given NAME. +By default, enable and open VNC for the given vm NAME. +With prefix \\[negative-argument] ARG, VNC will be disabled. +With prefix \\[universal-argument] ARG, VNC will be enabled but not opened." + (interactive (list (govc-vm-prompt "VNC vm: ") + current-prefix-arg)) + (if (equal arg '-) + (govc "vm.vnc" "-disable" name) + (let ((urls (govc-vm-vnc-enable name))) + (unless arg + (-each (-flatten urls) 'browse-url))))) + +(defun govc-vm-screen (name &optional arg) + "Console screenshot of vm NAME console. +Open via `eww' by default, via `browse-url' if ARG is non-nil." + (interactive (list (govc-vm-prompt "Console screenshot vm: ") + current-prefix-arg)) + (let* ((data (govc-json "vm.info" name)) + (vms (plist-get data :VirtualMachines)) + (url (govc-url-parse govc-session-url))) + (mapc + (lambda (vm) + (let* ((moid (plist-get (plist-get vm :Self) :Value)) + (on (string= "poweredOn" (plist-get (plist-get vm :Runtime) :PowerState))) + (host (format "%s:%d" (url-host url) (or (url-port url) 443))) + (path (concat "/screen?id=" moid)) + (auth (concat (url-user url) ":" (url-password url)))) + (if current-prefix-arg + (browse-url (concat "https://" auth "@" host path)) + (let ((creds `((,host ("VMware HTTP server" . ,(base64-encode-string auth))))) + (url-basic-auth-storage 'creds) + (u (concat "https://" host path))) + (require 'eww) + (if on + (url-retrieve u 'eww-render (list u)) + (kill-new (message u))))))) + vms))) + +(defun govc-vm-start-selection () + "Start via `govc-vm-start' on the current selection." + (interactive) + (govc-vm-start (govc-selection)) + (tabulated-list-revert)) + +(defun govc-vm-shutdown-selection () + "Shutdown via `govc-vm-shutdown' on the current selection." + (interactive) + (govc-vm-shutdown (govc-selection)) + (tabulated-list-revert)) + +(defun govc-vm-reboot-selection () + "Reboot via `govc-vm-reboot' on the current selection." + (interactive) + (govc-vm-reboot (govc-selection)) + (tabulated-list-revert)) + +(defun govc-vm-suspend-selection () + "Suspend via `govc-vm-suspend' on the current selection." + (interactive) + (govc-vm-suspend (govc-selection)) + (tabulated-list-revert)) + +(defun govc-vm-destroy-selection () + "Destroy via `govc-vm-destroy' on the current selection." + (interactive) + (govc-do-selection 'govc-vm-destroy "Destroy") + (tabulated-list-revert)) + +(defun govc-vm-vnc-selection () + "VNC via `govc-vm-vnc' on the current selection." + (interactive) + (govc-vm-vnc (govc-selection) current-prefix-arg)) + +(defun govc-vm-screen-selection () + "Console screenshot via `govc-vm-screen' on the current selection." + (interactive) + (govc-vm-screen (govc-selection) current-prefix-arg)) + +(defun govc-vm-info () + "Wrapper for govc vm.info." + (govc-table-info "vm.info" (list "-r" (or govc-filter (setq govc-filter (govc-vm-filter)))))) + +(defun govc-vm-host () + "Host info via `govc-host' with host(s) of current selection." + (interactive) + (govc-host (concat "*/" (govc-table-column-value "Host")) + (govc-current-session))) + +(defun govc-vm-datastore () + "Datastore via `govc-datastore-ls' with datastore of current selection." + (interactive) + (govc-datastore (s-split ", " (govc-table-column-value "Storage") t) + (govc-current-session))) + +(defun govc-vm-ping () + "Ping VM." + (interactive) + (let ((ping-program-options '("-c" "20"))) + (ping (govc-table-column-value "IP address")))) + +(defun govc-vm-device-ls () + "Devices via `govc-device' on the current selection." + (interactive) + (govc-device (tabulated-list-get-id) + (govc-current-session))) + +(defun govc-vm-extra-config () + "Populate table with govc vm.info -e output." + (let* ((data (govc-json "vm.info" govc-args)) + (vms (plist-get data :VirtualMachines)) + (info)) + (mapc + (lambda (vm) + (let* ((config (plist-get vm :Config)) + (name (plist-get config :Name))) + (mapc (lambda (x) + (let ((key (plist-get x :Key)) + (val (plist-get x :Value))) + (push (list key (vector key val)) info))) + (plist-get config :ExtraConfig)) + (if (> (length vms) 1) + (push (list name (vector "vm.name" name)) info)))) + vms) + info)) + +(defun govc-vm-extra-config-table () + "ExtraConfig via `govc-vm-extra-config' on the current selection." + (interactive) + (govc-map-info-table #'govc-vm-extra-config)) + +(defun govc-vm-json-info () + "JSON via govc vm.info -json on current selection." + (interactive) + (govc-json-info "vm.info")) + +(defvar govc-vm-mode-map + (let ((map (make-sparse-keymap))) + (define-key map "E" 'govc-events) + (define-key map "J" 'govc-vm-json-info) + (define-key map "X" 'govc-vm-extra-config-table) + (define-key map (kbd "RET") 'govc-vm-device-ls) + (define-key map "C" 'govc-vm-screen-selection) + (define-key map "V" 'govc-vm-vnc-selection) + (define-key map "D" 'govc-vm-destroy-selection) + (define-key map "^" 'govc-vm-start-selection) + (define-key map "!" 'govc-vm-shutdown-selection) + (define-key map "@" 'govc-vm-reboot-selection) + (define-key map "&" 'govc-vm-suspend-selection) + (define-key map "H" 'govc-vm-host) + (define-key map "S" 'govc-vm-datastore) + (define-key map "P" 'govc-vm-ping) + (define-key map "c" 'govc-mode-new-session) + (define-key map "h" 'govc-host-with-session) + (define-key map "p" 'govc-pool-with-session) + (define-key map "s" 'govc-datastore-with-session) + (define-key map "?" 'govc-vm-popup) + map) + "Keymap for `govc-vm-mode'.") + +(defun govc-vm-filter () + "Default `govc-filter' for `vm-info'." + (--map (concat it "/*") + (append (govc-ls-folder (list (concat "/" govc-session-datacenter "/vm"))) + (govc "ls" "-t" "VirtualApp" "vm")))) + +(defun govc-ls-folder (folders) + "List FOLDERS recursively." + (let ((subfolders (govc "ls" "-t" "Folder" folders))) + (append folders + (if subfolders + (govc-ls-folder subfolders))))) + +(defun govc-vm (&optional filter session) + "VM info via govc. +Optionally filter by FILTER and inherit SESSION." + (interactive) + (let ((buffer (get-buffer-create "*govc-vm*"))) + (pop-to-buffer buffer) + (govc-vm-mode) + (if session + (govc-session-clone session) + (call-interactively 'govc-session)) + (setq govc-filter filter) + (tabulated-list-print))) + +(define-derived-mode govc-vm-mode govc-tabulated-list-mode "VM" + "Major mode for handling a list of govc vms." + (setq tabulated-list-format [("Name" 40 t) + ("Power state" 12 t) + ("Boot time" 13 t) + ("IP address" 15 t) + ("Guest name" 20 t) + ("Host" 20 t) + ("CPU usage" 15 t) + ("Host memory usage" 18 t) + ("Guest memory usage" 19 t) + ("Storage committed" 18 t) + ("Storage" 10 t) + ("Network" 10 t)] + tabulated-list-sort-key (cons "Name" nil) + tabulated-list-padding 2 + tabulated-list-entries #'govc-vm-info) + (tabulated-list-init-header)) + +(magit-define-popup govc-vm-popup + "VM popup." + :actions (govc-keymap-popup govc-vm-mode-map)) + +(easy-menu-define govc-vm-mode-menu govc-vm-mode-map + "VM menu." + (cons "VM" (govc-keymap-menu govc-vm-mode-map))) + + +;;; govc device mode +(defun govc-device-ls () + "Wrapper for govc device.ls -vm VM." + (-map (lambda (line) + (let* ((entry (s-split-up-to " " (s-collapse-whitespace line) 2)) + (name (car entry)) + (type (nth 1 entry)) + (summary (car (last entry)))) + (list name (vector name type summary)))) + (govc "device.ls" govc-args))) + +(defun govc-device-info () + "Populate table with govc device.info output." + (govc-map-info "device.info" govc-args)) + +(defun govc-device-info-table () + "Tabulated govc device.info." + (interactive) + (govc-map-info-table #'govc-device-info)) + +(defun govc-device-json-info () + "JSON via govc device.info -json on current selection." + (interactive) + (govc-json-info "device.info")) + +(defvar govc-device-mode-map + (let ((map (make-sparse-keymap))) + (define-key map (kbd "J") 'govc-device-json-info) + (define-key map (kbd "RET") 'govc-device-info-table) + map) + "Keymap for `govc-device-mode'.") + +(defun govc-device (&optional vm session) + "List govc devices for VM. Optionally inherit SESSION." + (interactive) + (let ((buffer (get-buffer-create "*govc-device*"))) + (pop-to-buffer buffer) + (govc-device-mode) + (if session + (govc-session-clone session) + (call-interactively 'govc-session)) + (setq govc-args (list "-vm" (or vm (govc-vm-prompt "vm: ")))) + (tabulated-list-print))) + +(define-derived-mode govc-device-mode govc-tabulated-list-mode "Device" + "Major mode for handling a govc device." + (setq tabulated-list-format [("Name" 15 t) + ("Type" 30 t) + ("Summary" 40 t)] + tabulated-list-sort-key (cons "Name" nil) + tabulated-list-padding 2 + tabulated-list-entries #'govc-device-ls) + (tabulated-list-init-header)) + +(magit-define-popup govc-popup + "govc popup." + :actions (govc-keymap-list govc-command-map)) + +(easy-menu-change + '("Tools") "govc" + (govc-keymap-menu govc-command-map) + "Search Files (Grep)...") + +(provide 'govc) + +;;; govc.el ends here diff --git a/vendor/github.com/vmware/govmomi/govc/emacs/test/govc-test.el b/vendor/github.com/vmware/govmomi/govc/emacs/test/govc-test.el new file mode 100644 index 000000000..b693b4f21 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/emacs/test/govc-test.el @@ -0,0 +1,137 @@ +(defconst testsuite-dir + (if load-file-name + (file-name-directory load-file-name) + ;; Fall back to default directory (in case of M-x eval-buffer) + default-directory) + "Directory of the test suite.") + +(defconst govc-test-helper-path + (concat (expand-file-name (concat testsuite-dir "/../../test/test_helper.bash")))) + +(load (expand-file-name "../govc" testsuite-dir) nil :no-message) + +(ert-deftest test-govc-url-parse () + (dolist (u '("root:vagrant@localhost:18443" + "Administrator@vsphere.local:vagrant@localhost" + "https://root:vagrant@localhost:18443/sdk" + "https://Administrator@vsphere.local:vagrant@localhost/sdk")) + (should (equal u (url-recreate-url (govc-url-parse u)))))) + +(ert-deftest test-govc-session-set-url () + (should (equal govc-session-insecure nil)) + (should (equal govc-session-datacenter nil)) + (with-temp-buffer + (govc-session-set-url "vc.example.com?insecure=true&datacenter=foo&ignored=true") + (should (equal govc-session-insecure "true")) + (should (equal govc-session-datacenter "foo")) + (should (equal govc-session-datastore nil)))) + +(ert-deftest test-govc-copy-environment () + (let ((process-environment) + (govc-session-url "vc.example.com") + (govc-session-insecure "false") + (govc-session-datacenter "dc1") + (govc-session-datastore "ds1")) + (govc-export-environment '-) + (dolist (e govc-environment-map) + (should (equal nil (getenv (car e))))) + (govc-export-environment (universal-argument)) + (dolist (e govc-environment-map) + (should (not (equal nil (getenv (car e)))))))) + +(defun govc-test-env () + (let ((url (getenv "GOVC_TEST_URL"))) + (unless url + (ert-skip "env GOVC_TEST_URL not set")) + (setq govc-session-url url + govc-session-insecure "true"))) + +(defun govc-test-helper (arg) + (shell-command-to-string (format "bash -c \"source %s; %s\"" govc-test-helper-path arg))) + +(defun govc-test-new-vm () + (s-trim-right (govc-test-helper "new_empty_vm"))) + +(defun govc-test-new-id () + (s-trim-right (govc-test-helper "new_id"))) + +(defun govc-test-teardown () + (ignore-errors + (govc-test-helper "teardown"))) + +(ert-deftest test-govc-vm-info () + (govc-test-env) + (unwind-protect + (let ((id (govc-test-new-vm))) + (govc-json-info "vm.info" (list id)) + (with-current-buffer "*govc-json*" + (goto-char (point-min)) + (let ((data (json-read))) + (should (= (length data) 1)) + (should (cdr (assq 'VirtualMachines data))))) + + (govc-json-info "vm.info" (list "ENOENT")) + (with-current-buffer "*govc-json*" + (goto-char (point-min)) + (let ((data (json-read))) + (should (= (length data) 1)) + (should (not (cdr (assq 'VirtualMachines data)))))) + + (let ((govc-args (list id)) + (len1) + (len2)) + (setq len1 (length (govc-vm-extra-config))) + (should (>= len1 1)) + (govc "vm.change" "-vm" id + "-e" "govc-test-one=1" + "-e" "govc-test-two:2.2=2" + ;; test that we don't choke on \n + "-e" "foo=bar +baz") + (setq len2 (length (govc-vm-extra-config))) + + (should (= (- len2 len1) 3))) + + (let ((govc-filter "*")) + (should (>= (length (govc-vm-info)) 1))) + + (let ((govc-filter "ENOENT")) + (should (= (length (govc-vm-info)) 0))) + + (govc-vm-screen id)) + (govc-test-teardown))) + +(ert-deftest test-govc-datastore-ls-entries () + (govc-test-env) + (unwind-protect + (let ((id (govc-test-new-id))) + (should (>= (length (govc-datastore-ls-entries)) 1)) + + (let ((govc-filter (concat id "/"))) + (should-error (govc-datastore-ls-entries)) + (govc "datastore.mkdir" id) + (should (= (length (govc-datastore-ls-entries)) 0)) + (dotimes (i 3) + (govc "datastore.mkdir" (format "%s/dir %d" id i))) + (let ((entries (govc-datastore-ls-entries))) + (should (= (length entries) 3)) + (should (s-starts-with? (concat id "/dir ") (caar entries)))))) + (govc-test-teardown))) + +(ert-deftest test-govc-pool-ls () + (govc-test-env) + (unwind-protect + (let* ((pools (govc-ls-pool)) + (num (length pools)) + (path (concat (car pools) "/" (govc-test-new-id)))) + (should (>= num 1)) + (message "%d existing pools [%S]" num pools) + (govc "pool.create" path) + (setq pools (govc-ls-pool)) + (govc-pool-destroy path) + (should (= (- (length pools) num) 1))) + (govc-test-teardown))) + +(ert-deftest test-govc-about () + (govc-test-env) + (govc "about")) diff --git a/vendor/github.com/vmware/govmomi/govc/emacs/test/make.el b/vendor/github.com/vmware/govmomi/govc/emacs/test/make.el new file mode 100644 index 000000000..add62185e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/emacs/test/make.el @@ -0,0 +1,58 @@ +#!/usr/bin/env emacs --script + +(let ((current-directory (file-name-directory load-file-name))) + (setq project-test-path (expand-file-name "." current-directory)) + (setq project-root-path (expand-file-name ".." current-directory))) + +(add-to-list 'load-path project-root-path) +(add-to-list 'load-path project-test-path) + +(require 'lisp-mnt) +(require 'govc) +(require 's) + +(defun make-test () + (dolist (test-file (or argv (directory-files project-test-path t "-test.el$"))) + (load test-file nil t)) + (ert-run-tests-batch-and-exit t)) + +(defun govc-help () + "Summary of govc modes in markdown format." + (interactive) + (with-help-window (help-buffer) ; TODO: this turned into a mess, but does the job of generating README.md from govc.el + (dolist (kind '(govc-mode govc-urls govc-session-url govc-session-insecure govc-session-datacenter govc-session-datastore + tabulated-list host pool datastore datastore-ls vm device)) + (let* ((name (if (boundp kind) (symbol-name kind) (format "govc-%s-mode" kind))) + (map (if (equal 'govc-mode kind) 'govc-command-map (intern (concat name "-map")))) + (doc (lambda (f &optional all) + (let* ((txt (if (functionp f) (documentation f t) (documentation-property f 'variable-documentation))) + (ix (if all (length txt) (s-index-of "." txt)))) + (s-replace (format "\n\n\\\{%s\}" (concat name "-map")) "" + (s-replace "'" "`" (substring txt 0 ix))))))) + (princ (concat (s-repeat (if (and (boundp kind) (not (fboundp kind))) 3 2) "#") " " name "\n")) + (princ (concat "\n" (funcall doc (intern name) t) "\n\n")) + (when (boundp map) + (princ (concat "### " (symbol-name map) "\n\n")) + (princ "Keybinding | Description\n") + (princ "---------------|------------------------------------------------------------\n") + (dolist (kl (govc-keymap-list (symbol-value map))) + (let ((key (govc-key-description (car kl)))) + (princ (format "%s%s| %s\n" key (s-repeat (- 4 (length key)) " ") (funcall doc (nth 2 kl)))))) + (princ "\n")))))) + +(defun make-docs () + (let ((commentary) + (summary)) + (with-current-buffer (find-file-noselect (concat project-root-path "/govc.el")) + (setq commentary (s-replace ";;; Commentary:" "" (lm-commentary)) + summary (lm-summary))) + (let ((readme (find-file-noselect (concat project-root-path "/README.md")))) + (with-current-buffer readme + (erase-buffer) + (govc-help) + (with-current-buffer (help-buffer) + (copy-to-buffer readme (point-min) (point-max))) + (goto-char (point-min)) + (insert (concat "# govc.el\n\n" summary ".\n")) + (insert (s-replace "'" "`" (replace-regexp-in-string ";; ?" "" commentary t t))) + (save-buffer 0))))) diff --git a/vendor/github.com/vmware/govmomi/govc/events/command.go b/vendor/github.com/vmware/govmomi/govc/events/command.go new file mode 100644 index 000000000..c3a11e4ad --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/events/command.go @@ -0,0 +1,124 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "flag" + "fmt" + "os" + "strings" + "time" + + "github.com/vmware/govmomi/event" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type events struct { + *flags.DatacenterFlag + + Max int +} + +func init() { + cli.Register("events", &events{}) +} + +func (cmd *events) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.IntVar(&cmd.Max, "n", 25, "Output the last N events") +} + +func (cmd *events) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *events) Usage() string { + return "[PATH]..." +} + +func (cmd *events) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + m := event.NewManager(c) + + objs, err := cmd.ManagedObjects(ctx, f.Args()) + if err != nil { + return err + } + + var events []types.BaseEvent + + for _, o := range objs { + filter := types.EventFilterSpec{ + Entity: &types.EventFilterSpecByEntity{ + Entity: o, + Recursion: types.EventFilterSpecRecursionOptionAll, + }, + } + + collector, err := m.CreateCollectorForEvents(ctx, filter) + if err != nil { + return fmt.Errorf("[%#v] %s", o, err) + } + defer collector.Destroy(ctx) + + err = collector.SetPageSize(ctx, cmd.Max) + if err != nil { + return err + } + + page, err := collector.LatestPage(ctx) + if err != nil { + return err + } + + events = append(events, page...) + } + + event.Sort(events) + + for _, e := range events { + cat, err := m.EventCategory(ctx, e) + if err != nil { + return err + } + + event := e.GetEvent() + msg := strings.TrimSpace(event.FullFormattedMessage) + + if t, ok := e.(*types.TaskEvent); ok { + msg = fmt.Sprintf("%s (target=%s %s)", msg, t.Info.Entity.Type, t.Info.EntityName) + } + + fmt.Fprintf(os.Stdout, "[%s] [%s] %s\n", + event.CreatedTime.Local().Format(time.ANSIC), + cat, msg) + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/examples/lib/ssh.sh b/vendor/github.com/vmware/govmomi/govc/examples/lib/ssh.sh new file mode 100644 index 000000000..60910e9fb --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/examples/lib/ssh.sh @@ -0,0 +1,44 @@ +function public-key { + local dir=${HOME}/.ssh + + for f in $HOME/.ssh/{id_{rsa,dsa},*}.pub; do + if [ -r $f ]; then + echo $f + return + fi + done + + echo "Can't find public key file..." + exit 1 +} + +PUBLIC_KEY_FILE=${PUBLIC_KEY_FILE-$(public-key)} +SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=quiet" + +function upload-public-key { + local vm_name=$1 + local dir=$2 + + if [ -z "$dir" ] + then + uid=$(echo $GOVC_GUEST_LOGIN | awk -F: '{print $1}') + dir=$(govc guest.getenv -vm ${vm_name} HOME | awk -F= '{print $2}') + + if [ -z "$dir" ] + then + echo "Can't find ${uid}'s HOME dir..." + exit 1 + fi + fi + + govc guest.mkdir \ + -vm ${vm_name} \ + -p \ + ${dir}/.ssh + + govc guest.upload \ + -vm ${vm_name} \ + -f \ + ${PUBLIC_KEY_FILE} \ + ${dir}/.ssh/authorized_keys +} diff --git a/vendor/github.com/vmware/govmomi/govc/examples/vcsa.sh b/vendor/github.com/vmware/govmomi/govc/examples/vcsa.sh new file mode 100644 index 000000000..19e4d6b21 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/examples/vcsa.sh @@ -0,0 +1,57 @@ +#!/bin/bash -e + +lib=$(readlink -nf $(dirname $0))/lib +. $lib/ssh.sh + +ova=$1 + +if [ -z "$ova" ] +then + ova=./VMware-vCenter-Server-Appliance-5.5.0.10300-2000350_OVF10.ova +fi + +# default to local Vagrant esxbox for testing +export GOVC_URL=${GOVC_URL-"https://root:vagrant@localhost:8443/sdk"} + +# default VCSA credentials +export GOVC_GUEST_LOGIN=root:vmware + +# VM name as defined in the VCSA .ovf +vm_name=VMware_vCenter_Server_Appliance + +echo "Importing $ova..." +govc import.ova $ova + +echo "Powering on $vm_name..." +govc vm.power -on $vm_name + +echo "Waiting for $vm_name's IP address..." +vc=$(govc vm.ip $vm_name) + +govc vm.info $vm_name + +echo "Uploading ssh public key to $vm_name..." +upload-public-key $vm_name + +echo "Configuring vCenter Server Appliance..." + +# http://www.virtuallyghetto.com/2012/02/automating-vcenter-server-appliance.html +ssh ${SSH_OPTS} root@$vc < 1 + return nil, flag.err + } + + return flag.dc, err +} + +func (flag *DatacenterFlag) ManagedObjects(ctx context.Context, args []string) ([]types.ManagedObjectReference, error) { + var refs []types.ManagedObjectReference + + c, err := flag.Client() + if err != nil { + return nil, err + } + + if len(args) == 0 { + refs = append(refs, c.ServiceContent.RootFolder) + return refs, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + for _, arg := range args { + elements, err := finder.ManagedObjectList(ctx, arg) + if err != nil { + return nil, err + } + + for _, e := range elements { + refs = append(refs, e.Object.Reference()) + } + } + + return refs, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/datastore.go b/vendor/github.com/vmware/govmomi/govc/flags/datastore.go new file mode 100644 index 000000000..d339f7d1f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/datastore.go @@ -0,0 +1,114 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "net/url" + "os" + + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type DatastoreFlag struct { + common + + *DatacenterFlag + + name string + ds *object.Datastore +} + +var datastoreFlagKey = flagKey("datastore") + +func NewDatastoreFlag(ctx context.Context) (*DatastoreFlag, context.Context) { + if v := ctx.Value(datastoreFlagKey); v != nil { + return v.(*DatastoreFlag), ctx + } + + v := &DatastoreFlag{} + v.DatacenterFlag, ctx = NewDatacenterFlag(ctx) + ctx = context.WithValue(ctx, datastoreFlagKey, v) + return v, ctx +} + +func (flag *DatastoreFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + flag.DatacenterFlag.Register(ctx, f) + + env := "GOVC_DATASTORE" + value := os.Getenv(env) + usage := fmt.Sprintf("Datastore [%s]", env) + f.StringVar(&flag.name, "ds", value, usage) + }) +} + +func (flag *DatastoreFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if err := flag.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil + }) +} + +func (flag *DatastoreFlag) Datastore() (*object.Datastore, error) { + if flag.ds != nil { + return flag.ds, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + if flag.ds, err = finder.DatastoreOrDefault(context.TODO(), flag.name); err != nil { + return nil, err + } + + return flag.ds, nil +} + +func (flag *DatastoreFlag) DatastorePath(name string) (string, error) { + ds, err := flag.Datastore() + if err != nil { + return "", err + } + + return ds.Path(name), nil +} + +func (flag *DatastoreFlag) DatastoreURL(path string) (*url.URL, error) { + dc, err := flag.Datacenter() + if err != nil { + return nil, err + } + + ds, err := flag.Datastore() + if err != nil { + return nil, err + } + + u, err := ds.URL(context.TODO(), dc, path) + if err != nil { + return nil, err + } + + return u, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/debug.go b/vendor/github.com/vmware/govmomi/govc/flags/debug.go new file mode 100644 index 000000000..d8e910ea0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/debug.go @@ -0,0 +1,93 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/vim25/debug" +) + +type DebugFlag struct { + common + + enable bool +} + +var debugFlagKey = flagKey("debug") + +func NewDebugFlag(ctx context.Context) (*DebugFlag, context.Context) { + if v := ctx.Value(debugFlagKey); v != nil { + return v.(*DebugFlag), ctx + } + + v := &DebugFlag{} + ctx = context.WithValue(ctx, debugFlagKey, v) + return v, ctx +} + +func (flag *DebugFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + env := "GOVC_DEBUG" + enable := false + switch env := strings.ToLower(os.Getenv(env)); env { + case "1", "true": + enable = true + } + + usage := fmt.Sprintf("Store debug logs [%s]", env) + f.BoolVar(&flag.enable, "debug", enable, usage) + }) +} + +func (flag *DebugFlag) Process(ctx context.Context) error { + if !flag.enable { + return nil + } + + return flag.ProcessOnce(func() error { + // Base path for storing debug logs. + r := os.Getenv("GOVC_DEBUG_PATH") + if r == "" { + r = filepath.Join(os.Getenv("HOME"), ".govmomi") + } + r = filepath.Join(r, "debug") + + // Path for this particular run. + now := time.Now().Format("2006-01-02T15-04-05.999999999") + r = filepath.Join(r, now) + + err := os.MkdirAll(r, 0700) + if err != nil { + return err + } + + p := debug.FileProvider{ + Path: r, + } + + debug.SetProvider(&p) + return nil + }) +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/empty.go b/vendor/github.com/vmware/govmomi/govc/flags/empty.go new file mode 100644 index 000000000..0a1a438ca --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/empty.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + + "golang.org/x/net/context" +) + +type EmptyFlag struct{} + +func (flag *EmptyFlag) Register(ctx context.Context, f *flag.FlagSet) { +} + +func (flag *EmptyFlag) Process(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/host_connect.go b/vendor/github.com/vmware/govmomi/govc/flags/host_connect.go new file mode 100644 index 000000000..be39dbd0a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/host_connect.go @@ -0,0 +1,79 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/vim25/types" +) + +type HostConnectFlag struct { + common + + types.HostConnectSpec + + noverify bool +} + +var hostConnectFlagKey = flagKey("hostConnect") + +func NewHostConnectFlag(ctx context.Context) (*HostConnectFlag, context.Context) { + if v := ctx.Value(hostConnectFlagKey); v != nil { + return v.(*HostConnectFlag), ctx + } + + v := &HostConnectFlag{} + ctx = context.WithValue(ctx, hostConnectFlagKey, v) + return v, ctx +} + +func (flag *HostConnectFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + f.StringVar(&flag.HostName, "hostname", "", "Hostname or IP address of the host") + f.StringVar(&flag.UserName, "username", "", "Username of administration account on the host") + f.StringVar(&flag.Password, "password", "", "Password of administration account on the host") + f.StringVar(&flag.SslThumbprint, "fingerprint", "", "Fingerprint of the host's SSL certificate") + f.BoolVar(&flag.Force, "force", false, "Force when host is managed by another VC") + + f.BoolVar(&flag.noverify, "noverify", false, "When true, ignore host SSL certificate verification error") + }) +} + +func (flag *HostConnectFlag) Process(ctx context.Context) error { + return nil +} + +// AcceptThumbprint returns nil if the given error is an SSLVerifyFault and -noverify is true. +// In which case, flag.SslThumbprint is set to fault.Thumbprint and the caller should retry the task. +func (flag *HostConnectFlag) AcceptThumbprint(err error) error { + if f, ok := err.(types.HasFault); ok { + switch fault := f.Fault().(type) { + case *types.SSLVerifyFault: + if flag.noverify { + flag.SslThumbprint = fault.Thumbprint + return nil + } + return fmt.Errorf("%s Fingerprint is %s", err, fault.Thumbprint) + } + } + + return err +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/host_system.go b/vendor/github.com/vmware/govmomi/govc/flags/host_system.go new file mode 100644 index 000000000..a0d847239 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/host_system.go @@ -0,0 +1,141 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "os" + + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type HostSystemFlag struct { + common + + *ClientFlag + *DatacenterFlag + *SearchFlag + + name string + host *object.HostSystem + pool *object.ResourcePool +} + +var hostSystemFlagKey = flagKey("hostSystem") + +func NewHostSystemFlag(ctx context.Context) (*HostSystemFlag, context.Context) { + if v := ctx.Value(hostSystemFlagKey); v != nil { + return v.(*HostSystemFlag), ctx + } + + v := &HostSystemFlag{} + v.ClientFlag, ctx = NewClientFlag(ctx) + v.DatacenterFlag, ctx = NewDatacenterFlag(ctx) + v.SearchFlag, ctx = NewSearchFlag(ctx, SearchHosts) + ctx = context.WithValue(ctx, hostSystemFlagKey, v) + return v, ctx +} + +func (flag *HostSystemFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + flag.ClientFlag.Register(ctx, f) + flag.DatacenterFlag.Register(ctx, f) + flag.SearchFlag.Register(ctx, f) + + env := "GOVC_HOST" + value := os.Getenv(env) + usage := fmt.Sprintf("Host system [%s]", env) + f.StringVar(&flag.name, "host", value, usage) + }) +} + +func (flag *HostSystemFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if err := flag.ClientFlag.Process(ctx); err != nil { + return err + } + if err := flag.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := flag.SearchFlag.Process(ctx); err != nil { + return err + } + return nil + }) +} + +func (flag *HostSystemFlag) HostSystemIfSpecified() (*object.HostSystem, error) { + if flag.host != nil { + return flag.host, nil + } + + // Use search flags if specified. + if flag.SearchFlag.IsSet() { + host, err := flag.SearchFlag.HostSystem() + if err != nil { + return nil, err + } + + flag.host = host + return flag.host, nil + } + + // Never look for a default host system. + // A host system parameter is optional for vm creation. It uses a mandatory + // resource pool parameter to determine where the vm should be placed. + if flag.name == "" { + return nil, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + flag.host, err = finder.HostSystem(context.TODO(), flag.name) + return flag.host, err +} + +func (flag *HostSystemFlag) HostSystem() (*object.HostSystem, error) { + host, err := flag.HostSystemIfSpecified() + if err != nil { + return nil, err + } + + if host != nil { + return host, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + flag.host, err = finder.DefaultHostSystem(context.TODO()) + return flag.host, err +} + +func (flag *HostSystemFlag) HostNetworkSystem() (*object.HostNetworkSystem, error) { + host, err := flag.HostSystem() + if err != nil { + return nil, err + } + + return host.ConfigManager().NetworkSystem(context.TODO()) +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/network.go b/vendor/github.com/vmware/govmomi/govc/flags/network.go new file mode 100644 index 000000000..602dd0f61 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/network.go @@ -0,0 +1,125 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "os" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type NetworkFlag struct { + common + + *DatacenterFlag + + name string + net object.NetworkReference + adapter string + address string +} + +var networkFlagKey = flagKey("network") + +func NewNetworkFlag(ctx context.Context) (*NetworkFlag, context.Context) { + if v := ctx.Value(networkFlagKey); v != nil { + return v.(*NetworkFlag), ctx + } + + v := &NetworkFlag{} + v.DatacenterFlag, ctx = NewDatacenterFlag(ctx) + ctx = context.WithValue(ctx, networkFlagKey, v) + return v, ctx +} + +func (flag *NetworkFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + flag.DatacenterFlag.Register(ctx, f) + + env := "GOVC_NETWORK" + value := os.Getenv(env) + flag.Set(value) + usage := fmt.Sprintf("Network [%s]", env) + f.Var(flag, "net", usage) + f.StringVar(&flag.adapter, "net.adapter", "e1000", "Network adapter type") + f.StringVar(&flag.address, "net.address", "", "Network hardware address") + }) +} + +func (flag *NetworkFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if err := flag.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil + }) +} + +func (flag *NetworkFlag) String() string { + return flag.name +} + +func (flag *NetworkFlag) Set(name string) error { + flag.name = name + return nil +} + +func (flag *NetworkFlag) Network() (object.NetworkReference, error) { + if flag.net != nil { + return flag.net, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + if flag.net, err = finder.NetworkOrDefault(context.TODO(), flag.name); err != nil { + return nil, err + } + + return flag.net, nil +} + +func (flag *NetworkFlag) Device() (types.BaseVirtualDevice, error) { + net, err := flag.Network() + if err != nil { + return nil, err + } + + backing, err := net.EthernetCardBackingInfo(context.TODO()) + if err != nil { + return nil, err + } + + device, err := object.EthernetCardTypes().CreateEthernetCard(flag.adapter, backing) + if err != nil { + return nil, err + } + + if flag.address != "" { + card := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() + card.AddressType = string(types.VirtualEthernetCardMacTypeManual) + card.MacAddress = flag.address + } + + return device, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/optional_bool.go b/vendor/github.com/vmware/govmomi/govc/flags/optional_bool.go new file mode 100644 index 000000000..6adef5181 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/optional_bool.go @@ -0,0 +1,55 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "strconv" +) + +type optionalBool struct { + val **bool +} + +func (b *optionalBool) Set(s string) error { + v, err := strconv.ParseBool(s) + *b.val = &v + return err +} + +func (b *optionalBool) Get() interface{} { + if *b.val == nil { + return nil + } + return **b.val +} + +func (b *optionalBool) String() string { + if *b.val == nil { + return "" + } + return fmt.Sprintf("%v", **b.val) +} + +func (b *optionalBool) IsBoolFlag() bool { return true } + +// NewOptionalBool returns a flag.Value implementation where there is no default value. +// This avoids sending a default value over the wire as using flag.BoolVar() would. +func NewOptionalBool(v **bool) flag.Value { + return &optionalBool{v} +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/optional_bool_test.go b/vendor/github.com/vmware/govmomi/govc/flags/optional_bool_test.go new file mode 100644 index 000000000..127bf4666 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/optional_bool_test.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "testing" +) + +func TestOptionalBool(t *testing.T) { + fs := flag.NewFlagSet("", flag.ContinueOnError) + var val *bool + + fs.Var(NewOptionalBool(&val), "obool", "optional bool") + + b := fs.Lookup("obool") + + if b.DefValue != "" { + t.Fail() + } + + if b.Value.String() != "" { + t.Fail() + } + + if b.Value.(flag.Getter).Get() != nil { + t.Fail() + } + + b.Value.Set("true") + + if b.Value.String() != "true" { + t.Fail() + } + + if b.Value.(flag.Getter).Get() != true { + t.Fail() + } + + b.Value.Set("false") + + if b.Value.String() != "false" { + t.Fail() + } + + if b.Value.(flag.Getter).Get() != false { + t.Fail() + } +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/output.go b/vendor/github.com/vmware/govmomi/govc/flags/output.go new file mode 100644 index 000000000..d6446f206 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/output.go @@ -0,0 +1,210 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/vim25/progress" +) + +type OutputWriter interface { + Write(io.Writer) error +} + +type OutputFlag struct { + common + + JSON bool + TTY bool +} + +var outputFlagKey = flagKey("output") + +func NewOutputFlag(ctx context.Context) (*OutputFlag, context.Context) { + if v := ctx.Value(outputFlagKey); v != nil { + return v.(*OutputFlag), ctx + } + + v := &OutputFlag{} + ctx = context.WithValue(ctx, outputFlagKey, v) + return v, ctx +} + +func (flag *OutputFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + f.BoolVar(&flag.JSON, "json", false, "Enable JSON output") + }) +} + +func (flag *OutputFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if !flag.JSON { + // Assume we have a tty if not outputting JSON + flag.TTY = true + } + + return nil + }) +} + +// Log outputs the specified string, prefixed with the current time. +// A newline is not automatically added. If the specified string +// starts with a '\r', the current line is cleared first. +func (flag *OutputFlag) Log(s string) (int, error) { + if len(s) > 0 && s[0] == '\r' { + flag.Write([]byte{'\r', 033, '[', 'K'}) + s = s[1:] + } + + return flag.WriteString(time.Now().Format("[02-01-06 15:04:05] ") + s) +} + +func (flag *OutputFlag) Write(b []byte) (int, error) { + if !flag.TTY { + return 0, nil + } + + n, err := os.Stdout.Write(b) + os.Stdout.Sync() + return n, err +} + +func (flag *OutputFlag) WriteString(s string) (int, error) { + return flag.Write([]byte(s)) +} + +func (flag *OutputFlag) WriteResult(result OutputWriter) error { + var err error + var out = os.Stdout + + if flag.JSON { + err = json.NewEncoder(out).Encode(result) + } else { + err = result.Write(out) + } + + return err +} + +type progressLogger struct { + flag *OutputFlag + prefix string + + wg sync.WaitGroup + + sink chan chan progress.Report + done chan struct{} +} + +func newProgressLogger(flag *OutputFlag, prefix string) *progressLogger { + p := &progressLogger{ + flag: flag, + prefix: prefix, + + sink: make(chan chan progress.Report), + done: make(chan struct{}), + } + + p.wg.Add(1) + + go p.loopA() + + return p +} + +// loopA runs before Sink() has been called. +func (p *progressLogger) loopA() { + var err error + + defer p.wg.Done() + + tick := time.NewTicker(100 * time.Millisecond) + defer tick.Stop() + + for stop := false; !stop; { + select { + case ch := <-p.sink: + err = p.loopB(tick, ch) + stop = true + case <-p.done: + stop = true + case <-tick.C: + line := fmt.Sprintf("\r%s", p.prefix) + p.flag.Log(line) + } + } + + if err != nil && err != io.EOF { + p.flag.Log(fmt.Sprintf("\r%sError: %s\n", p.prefix, err)) + } else { + p.flag.Log(fmt.Sprintf("\r%sOK\n", p.prefix)) + } +} + +// loopA runs after Sink() has been called. +func (p *progressLogger) loopB(tick *time.Ticker, ch <-chan progress.Report) error { + var r progress.Report + var ok bool + var err error + + for ok = true; ok; { + select { + case r, ok = <-ch: + if !ok { + break + } + err = r.Error() + case <-tick.C: + line := fmt.Sprintf("\r%s", p.prefix) + if r != nil { + line += fmt.Sprintf("(%.0f%%", r.Percentage()) + detail := r.Detail() + if detail != "" { + line += fmt.Sprintf(", %s", detail) + } + line += ")" + } + p.flag.Log(line) + } + } + + return err +} + +func (p *progressLogger) Sink() chan<- progress.Report { + ch := make(chan progress.Report) + p.sink <- ch + return ch +} + +func (p *progressLogger) Wait() { + close(p.done) + p.wg.Wait() +} + +func (flag *OutputFlag) ProgressLogger(prefix string) *progressLogger { + return newProgressLogger(flag, prefix) +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/resource_pool.go b/vendor/github.com/vmware/govmomi/govc/flags/resource_pool.go new file mode 100644 index 000000000..c7459c978 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/resource_pool.go @@ -0,0 +1,85 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "os" + + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type ResourcePoolFlag struct { + common + + *DatacenterFlag + + name string + pool *object.ResourcePool +} + +var resourcePoolFlagKey = flagKey("resourcePool") + +func NewResourcePoolFlag(ctx context.Context) (*ResourcePoolFlag, context.Context) { + if v := ctx.Value(resourcePoolFlagKey); v != nil { + return v.(*ResourcePoolFlag), ctx + } + + v := &ResourcePoolFlag{} + v.DatacenterFlag, ctx = NewDatacenterFlag(ctx) + ctx = context.WithValue(ctx, resourcePoolFlagKey, v) + return v, ctx +} + +func (flag *ResourcePoolFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + flag.DatacenterFlag.Register(ctx, f) + + env := "GOVC_RESOURCE_POOL" + value := os.Getenv(env) + usage := fmt.Sprintf("Resource pool [%s]", env) + f.StringVar(&flag.name, "pool", value, usage) + }) +} + +func (flag *ResourcePoolFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if err := flag.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil + }) +} + +func (flag *ResourcePoolFlag) ResourcePool() (*object.ResourcePool, error) { + if flag.pool != nil { + return flag.pool, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + if flag.pool, err = finder.ResourcePoolOrDefault(context.TODO(), flag.name); err != nil { + return nil, err + } + + return flag.pool, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/search.go b/vendor/github.com/vmware/govmomi/govc/flags/search.go new file mode 100644 index 000000000..c43912ffe --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/search.go @@ -0,0 +1,392 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "errors" + "flag" + "fmt" + "strings" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "golang.org/x/net/context" +) + +const ( + SearchVirtualMachines = iota + 1 + SearchHosts + SearchVirtualApps +) + +type SearchFlag struct { + common + + *ClientFlag + *DatacenterFlag + + t int + entity string + + byDatastorePath string + byDNSName string + byInventoryPath string + byIP string + byUUID string + + isset bool +} + +var searchFlagKey = flagKey("search") + +func NewSearchFlag(ctx context.Context, t int) (*SearchFlag, context.Context) { + if v := ctx.Value(searchFlagKey); v != nil { + return v.(*SearchFlag), ctx + } + + v := &SearchFlag{ + t: t, + } + + v.ClientFlag, ctx = NewClientFlag(ctx) + v.DatacenterFlag, ctx = NewDatacenterFlag(ctx) + + switch t { + case SearchVirtualMachines: + v.entity = "VM" + case SearchHosts: + v.entity = "host" + case SearchVirtualApps: + v.entity = "vapp" + default: + panic("invalid search type") + } + + ctx = context.WithValue(ctx, searchFlagKey, v) + return v, ctx +} + +func (flag *SearchFlag) Register(ctx context.Context, fs *flag.FlagSet) { + flag.RegisterOnce(func() { + flag.ClientFlag.Register(ctx, fs) + flag.DatacenterFlag.Register(ctx, fs) + + register := func(v *string, f string, d string) { + f = fmt.Sprintf("%s.%s", strings.ToLower(flag.entity), f) + d = fmt.Sprintf(d, flag.entity) + fs.StringVar(v, f, "", d) + } + + switch flag.t { + case SearchVirtualMachines: + register(&flag.byDatastorePath, "path", "Find %s by path to .vmx file") + } + + switch flag.t { + case SearchVirtualMachines, SearchHosts: + register(&flag.byDNSName, "dns", "Find %s by FQDN") + register(&flag.byIP, "ip", "Find %s by IP address") + register(&flag.byUUID, "uuid", "Find %s by instance UUID") + } + + register(&flag.byInventoryPath, "ipath", "Find %s by inventory path") + }) +} + +func (flag *SearchFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if err := flag.ClientFlag.Process(ctx); err != nil { + return err + } + if err := flag.DatacenterFlag.Process(ctx); err != nil { + return err + } + + flags := []string{ + flag.byDatastorePath, + flag.byDNSName, + flag.byInventoryPath, + flag.byIP, + flag.byUUID, + } + + flag.isset = false + for _, f := range flags { + if f != "" { + if flag.isset { + return errors.New("cannot use more than one search flag") + } + flag.isset = true + } + } + + return nil + }) +} + +func (flag *SearchFlag) IsSet() bool { + return flag.isset +} + +func (flag *SearchFlag) searchIndex(c *vim25.Client) *object.SearchIndex { + return object.NewSearchIndex(c) +} + +func (flag *SearchFlag) searchByDatastorePath(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) { + switch flag.t { + case SearchVirtualMachines: + return flag.searchIndex(c).FindByDatastorePath(context.TODO(), dc, flag.byDatastorePath) + default: + panic("unsupported type") + } +} + +func (flag *SearchFlag) searchByDNSName(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) { + switch flag.t { + case SearchVirtualMachines: + return flag.searchIndex(c).FindByDnsName(context.TODO(), dc, flag.byDNSName, true) + case SearchHosts: + return flag.searchIndex(c).FindByDnsName(context.TODO(), dc, flag.byDNSName, false) + default: + panic("unsupported type") + } +} + +func (flag *SearchFlag) searchByInventoryPath(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) { + // TODO(PN): The datacenter flag should not be set because it is ignored. + return flag.searchIndex(c).FindByInventoryPath(context.TODO(), flag.byInventoryPath) +} + +func (flag *SearchFlag) searchByIP(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) { + switch flag.t { + case SearchVirtualMachines: + return flag.searchIndex(c).FindByIp(context.TODO(), dc, flag.byIP, true) + case SearchHosts: + return flag.searchIndex(c).FindByIp(context.TODO(), dc, flag.byIP, false) + default: + panic("unsupported type") + } +} + +func (flag *SearchFlag) searchByUUID(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) { + switch flag.t { + case SearchVirtualMachines: + return flag.searchIndex(c).FindByUuid(context.TODO(), dc, flag.byUUID, true, nil) + case SearchHosts: + return flag.searchIndex(c).FindByUuid(context.TODO(), dc, flag.byUUID, false, nil) + default: + panic("unsupported type") + } +} + +func (flag *SearchFlag) search() (object.Reference, error) { + var ref object.Reference + var err error + + c, err := flag.Client() + if err != nil { + return nil, err + } + + dc, err := flag.Datacenter() + if err != nil { + return nil, err + } + + switch { + case flag.byDatastorePath != "": + ref, err = flag.searchByDatastorePath(c, dc) + case flag.byDNSName != "": + ref, err = flag.searchByDNSName(c, dc) + case flag.byInventoryPath != "": + ref, err = flag.searchByInventoryPath(c, dc) + case flag.byIP != "": + ref, err = flag.searchByIP(c, dc) + case flag.byUUID != "": + ref, err = flag.searchByUUID(c, dc) + default: + err = errors.New("no search flag specified") + } + + if err != nil { + return nil, err + } + + if ref == nil { + return nil, fmt.Errorf("no such %s", flag.entity) + } + + return ref, nil +} + +func (flag *SearchFlag) VirtualMachine() (*object.VirtualMachine, error) { + ref, err := flag.search() + if err != nil { + return nil, err + } + + vm, ok := ref.(*object.VirtualMachine) + if !ok { + return nil, fmt.Errorf("expected VirtualMachine entity, got %s", ref.Reference().Type) + } + + return vm, nil +} + +func (flag *SearchFlag) VirtualMachines(args []string) ([]*object.VirtualMachine, error) { + var out []*object.VirtualMachine + + if flag.IsSet() { + vm, err := flag.VirtualMachine() + if err != nil { + return nil, err + } + + out = append(out, vm) + return out, nil + } + + // List virtual machines + if len(args) == 0 { + return nil, errors.New("no argument") + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + var nfe error + + // List virtual machines for every argument + for _, arg := range args { + vms, err := finder.VirtualMachineList(context.TODO(), arg) + if err != nil { + if _, ok := err.(*find.NotFoundError); ok { + // Let caller decide how to handle NotFoundError + nfe = err + continue + } + return nil, err + } + + out = append(out, vms...) + } + + return out, nfe +} + +func (flag *SearchFlag) VirtualApp() (*object.VirtualApp, error) { + ref, err := flag.search() + if err != nil { + return nil, err + } + + app, ok := ref.(*object.VirtualApp) + if !ok { + return nil, fmt.Errorf("expected VirtualApp entity, got %s", ref.Reference().Type) + } + + return app, nil +} + +func (flag *SearchFlag) VirtualApps(args []string) ([]*object.VirtualApp, error) { + var out []*object.VirtualApp + + if flag.IsSet() { + app, err := flag.VirtualApp() + if err != nil { + return nil, err + } + + out = append(out, app) + return out, nil + } + + // List virtual apps + if len(args) == 0 { + return nil, errors.New("no argument") + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + // List virtual apps for every argument + for _, arg := range args { + apps, err := finder.VirtualAppList(context.TODO(), arg) + if err != nil { + return nil, err + } + + out = append(out, apps...) + } + + return out, nil +} + +func (flag *SearchFlag) HostSystem() (*object.HostSystem, error) { + ref, err := flag.search() + if err != nil { + return nil, err + } + + host, ok := ref.(*object.HostSystem) + if !ok { + return nil, fmt.Errorf("expected HostSystem entity, got %s", ref.Reference().Type) + } + + return host, nil +} + +func (flag *SearchFlag) HostSystems(args []string) ([]*object.HostSystem, error) { + var out []*object.HostSystem + + if flag.IsSet() { + host, err := flag.HostSystem() + if err != nil { + return nil, err + } + + out = append(out, host) + return out, nil + } + + // List host system + if len(args) == 0 { + return nil, errors.New("no argument") + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + // List host systems for every argument + for _, arg := range args { + vms, err := finder.HostSystemList(context.TODO(), arg) + if err != nil { + return nil, err + } + + out = append(out, vms...) + } + + return out, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/version.go b/vendor/github.com/vmware/govmomi/govc/flags/version.go new file mode 100644 index 000000000..d0b5f0ac7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/version.go @@ -0,0 +1,61 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "strconv" + "strings" +) + +type version []int + +func ParseVersion(s string) (version, error) { + v := make(version, 0) + ps := strings.Split(s, ".") + for _, p := range ps { + i, err := strconv.Atoi(p) + if err != nil { + return nil, err + } + + v = append(v, i) + } + + return v, nil +} + +func (v version) Lte(u version) bool { + lv := len(v) + lu := len(u) + + for i := 0; i < lv; i++ { + // Everything up to here has been equal and v has more elements than u. + if i >= lu { + return false + } + + // Move to next digit if equal. + if v[i] == u[i] { + continue + } + + return v[i] < u[i] + } + + // Equal. + return true +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/version_test.go b/vendor/github.com/vmware/govmomi/govc/flags/version_test.go new file mode 100644 index 000000000..dc155e9a5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/version_test.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import "testing" + +func TestParseVersion(t *testing.T) { + var v version + var err error + + v, err = ParseVersion("5.5.5.5") + if err != nil { + t.Error(err) + } + + if len(v) != 4 { + t.Errorf("Expected %d elements, got %d", 4, len(v)) + } + + for i := 0; i < len(v); i++ { + if v[i] != 5 { + t.Errorf("Expected %d, got %d", 5, v[i]) + } + } +} + +func TestLte(t *testing.T) { + v1, err := ParseVersion("5.5") + if err != nil { + panic(err) + } + + v2, err := ParseVersion("5.6") + if err != nil { + panic(err) + } + + if !v1.Lte(v1) { + t.Errorf("Expected 5.5 <= 5.5") + } + + if !v1.Lte(v2) { + t.Errorf("Expected 5.5 <= 5.6") + } + + if v2.Lte(v1) { + t.Errorf("Expected not 5.6 <= 5.5") + } +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/virtual_app.go b/vendor/github.com/vmware/govmomi/govc/flags/virtual_app.go new file mode 100644 index 000000000..e85819fa0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/virtual_app.go @@ -0,0 +1,104 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "os" + + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type VirtualAppFlag struct { + common + + *DatacenterFlag + *SearchFlag + + name string + app *object.VirtualApp +} + +var virtualAppFlagKey = flagKey("virtualApp") + +func NewVirtualAppFlag(ctx context.Context) (*VirtualAppFlag, context.Context) { + if v := ctx.Value(virtualAppFlagKey); v != nil { + return v.(*VirtualAppFlag), ctx + } + + v := &VirtualAppFlag{} + v.DatacenterFlag, ctx = NewDatacenterFlag(ctx) + v.SearchFlag, ctx = NewSearchFlag(ctx, SearchVirtualApps) + ctx = context.WithValue(ctx, virtualAppFlagKey, v) + return v, ctx +} + +func (flag *VirtualAppFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + flag.DatacenterFlag.Register(ctx, f) + flag.SearchFlag.Register(ctx, f) + + env := "GOVC_VAPP" + value := os.Getenv(env) + usage := fmt.Sprintf("Virtual App [%s]", env) + f.StringVar(&flag.name, "vapp", value, usage) + }) +} + +func (flag *VirtualAppFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if err := flag.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := flag.SearchFlag.Process(ctx); err != nil { + return err + } + return nil + }) +} + +func (flag *VirtualAppFlag) VirtualApp() (*object.VirtualApp, error) { + if flag.app != nil { + return flag.app, nil + } + + // Use search flags if specified. + if flag.SearchFlag.IsSet() { + app, err := flag.SearchFlag.VirtualApp() + if err != nil { + return nil, err + } + + flag.app = app + return flag.app, nil + } + + // Never look for a default virtual app. + if flag.name == "" { + return nil, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + flag.app, err = finder.VirtualApp(context.TODO(), flag.name) + return flag.app, err +} diff --git a/vendor/github.com/vmware/govmomi/govc/flags/virtual_machine.go b/vendor/github.com/vmware/govmomi/govc/flags/virtual_machine.go new file mode 100644 index 000000000..0d3cc9a11 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/flags/virtual_machine.go @@ -0,0 +1,110 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flags + +import ( + "flag" + "fmt" + "os" + + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type VirtualMachineFlag struct { + common + + *ClientFlag + *DatacenterFlag + *SearchFlag + + name string + vm *object.VirtualMachine +} + +var virtualMachineFlagKey = flagKey("virtualMachine") + +func NewVirtualMachineFlag(ctx context.Context) (*VirtualMachineFlag, context.Context) { + if v := ctx.Value(virtualMachineFlagKey); v != nil { + return v.(*VirtualMachineFlag), ctx + } + + v := &VirtualMachineFlag{} + v.ClientFlag, ctx = NewClientFlag(ctx) + v.DatacenterFlag, ctx = NewDatacenterFlag(ctx) + v.SearchFlag, ctx = NewSearchFlag(ctx, SearchVirtualMachines) + ctx = context.WithValue(ctx, virtualMachineFlagKey, v) + return v, ctx +} + +func (flag *VirtualMachineFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.RegisterOnce(func() { + flag.ClientFlag.Register(ctx, f) + flag.DatacenterFlag.Register(ctx, f) + flag.SearchFlag.Register(ctx, f) + + env := "GOVC_VM" + value := os.Getenv(env) + usage := fmt.Sprintf("Virtual machine [%s]", env) + f.StringVar(&flag.name, "vm", value, usage) + }) +} + +func (flag *VirtualMachineFlag) Process(ctx context.Context) error { + return flag.ProcessOnce(func() error { + if err := flag.ClientFlag.Process(ctx); err != nil { + return err + } + if err := flag.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := flag.SearchFlag.Process(ctx); err != nil { + return err + } + return nil + }) +} + +func (flag *VirtualMachineFlag) VirtualMachine() (*object.VirtualMachine, error) { + if flag.vm != nil { + return flag.vm, nil + } + + // Use search flags if specified. + if flag.SearchFlag.IsSet() { + vm, err := flag.SearchFlag.VirtualMachine() + if err != nil { + return nil, err + } + + flag.vm = vm + return flag.vm, nil + } + + // Never look for a default virtual machine. + if flag.name == "" { + return nil, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + flag.vm, err = finder.VirtualMachine(context.TODO(), flag.name) + return flag.vm, err +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/add.go b/vendor/github.com/vmware/govmomi/govc/host/add.go new file mode 100644 index 000000000..583f64cf7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/add.go @@ -0,0 +1,153 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package host + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type add struct { + *flags.ClientFlag + *flags.DatacenterFlag + *flags.HostConnectFlag + + parent string + connect bool +} + +func init() { + cli.Register("host.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + cmd.HostConnectFlag, ctx = flags.NewHostConnectFlag(ctx) + cmd.HostConnectFlag.Register(ctx, f) + + f.StringVar(&cmd.parent, "parent", "", "Path to folder to add the host to") + f.BoolVar(&cmd.connect, "connect", true, "Immediately connect to host") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostConnectFlag.Process(ctx); err != nil { + return err + } + if cmd.HostName == "" { + return flag.ErrHelp + } + if cmd.UserName == "" { + return flag.ErrHelp + } + if cmd.Password == "" { + return flag.ErrHelp + } + return nil +} + +func (cmd *add) Description() string { + return `Add host to datacenter. + +The host is added to the folder specified by the 'parent' flag. If not given, +this defaults to the hosts folder in the specified or default datacenter.` +} + +func (cmd *add) Add(ctx context.Context, parent *object.Folder) error { + spec := cmd.HostConnectSpec + + req := types.AddStandaloneHost_Task{ + This: parent.Reference(), + Spec: spec, + AddConnected: cmd.connect, + } + + res, err := methods.AddStandaloneHost_Task(ctx, parent.Client(), &req) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("adding %s to folder %s... ", spec.HostName, parent.InventoryPath)) + defer logger.Wait() + + task := object.NewTask(parent.Client(), res.Returnval) + _, err = task.WaitForResult(ctx, logger) + return err +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + var parent *object.Folder + + if f.NArg() != 0 { + return flag.ErrHelp + } + + if cmd.parent == "" { + dc, err := cmd.Datacenter() + if err != nil { + return err + } + + folders, err := dc.Folders(ctx) + if err != nil { + return err + } + + parent = folders.HostFolder + } else { + finder, err := cmd.Finder() + if err != nil { + return err + } + + parent, err = finder.Folder(ctx, cmd.parent) + if err != nil { + return err + } + } + + err := cmd.Add(ctx, parent) + if err == nil { + return nil + } + + // Check if we failed due to SSLVerifyFault and -noverify is set + if err := cmd.AcceptThumbprint(err); err != nil { + return err + } + + // Accepted unverified thumbprint, try again + return cmd.Add(ctx, parent) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/autostart/add.go b/vendor/github.com/vmware/govmomi/govc/host/autostart/add.go new file mode 100644 index 000000000..ec1fe3459 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/autostart/add.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autostart + +import ( + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/vim25/types" +) + +type add struct { + *AutostartFlag +} + +func init() { + cli.Register("host.autostart.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.AutostartFlag, ctx = newAutostartFlag(ctx) + cmd.AutostartFlag.Register(ctx, f) +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.AutostartFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Usage() string { + return "VM..." +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + var powerInfo = types.AutoStartPowerInfo{ + StartAction: "powerOn", + StartDelay: -1, + StartOrder: -1, + StopAction: "systemDefault", + StopDelay: -1, + WaitForHeartbeat: types.AutoStartWaitHeartbeatSettingSystemDefault, + } + + return cmd.ReconfigureVMs(f.Args(), powerInfo) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/autostart/autostart.go b/vendor/github.com/vmware/govmomi/govc/host/autostart/autostart.go new file mode 100644 index 000000000..4e260db32 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/autostart/autostart.go @@ -0,0 +1,173 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autostart + +import ( + "errors" + "flag" + + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type AutostartFlag struct { + *flags.ClientFlag + *flags.DatacenterFlag + *flags.HostSystemFlag +} + +func newAutostartFlag(ctx context.Context) (*AutostartFlag, context.Context) { + f := &AutostartFlag{} + f.ClientFlag, ctx = flags.NewClientFlag(ctx) + f.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + f.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + return f, ctx +} + +func (f *AutostartFlag) Register(ctx context.Context, fs *flag.FlagSet) { + f.ClientFlag.Register(ctx, fs) + f.DatacenterFlag.Register(ctx, fs) + f.HostSystemFlag.Register(ctx, fs) +} + +func (f *AutostartFlag) Process(ctx context.Context) error { + if err := f.ClientFlag.Process(ctx); err != nil { + return err + } + if err := f.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := f.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +// VirtualMachines returns list of virtual machine objects based on the +// arguments specified on the command line. This helper is defined in +// flags.SearchFlag as well, but that pulls in other virtual machine flags that +// are not relevant here. +func (f *AutostartFlag) VirtualMachines(args []string) ([]*object.VirtualMachine, error) { + if len(args) == 0 { + return nil, errors.New("no argument") + } + + finder, err := f.Finder() + if err != nil { + return nil, err + } + + var out []*object.VirtualMachine + for _, arg := range args { + vms, err := finder.VirtualMachineList(context.TODO(), arg) + if err != nil { + return nil, err + } + + out = append(out, vms...) + } + + return out, nil +} + +func (f *AutostartFlag) HostAutoStartManager() (*mo.HostAutoStartManager, error) { + h, err := f.HostSystem() + if err != nil { + return nil, err + } + + var mhs mo.HostSystem + err = h.Properties(context.TODO(), h.Reference(), []string{"configManager.autoStartManager"}, &mhs) + if err != nil { + return nil, err + } + + var mhas mo.HostAutoStartManager + err = h.Properties(context.TODO(), *mhs.ConfigManager.AutoStartManager, nil, &mhas) + if err != nil { + return nil, err + } + + return &mhas, nil +} + +func (f *AutostartFlag) ReconfigureDefaults(template types.AutoStartDefaults) error { + c, err := f.Client() + if err != nil { + return err + } + + mhas, err := f.HostAutoStartManager() + if err != nil { + return err + } + + req := types.ReconfigureAutostart{ + This: mhas.Reference(), + Spec: types.HostAutoStartManagerConfig{ + Defaults: &template, + }, + } + + _, err = methods.ReconfigureAutostart(context.TODO(), c, &req) + if err != nil { + return err + } + + return nil +} + +func (f *AutostartFlag) ReconfigureVMs(args []string, template types.AutoStartPowerInfo) error { + c, err := f.Client() + if err != nil { + return err + } + + mhas, err := f.HostAutoStartManager() + if err != nil { + return err + } + + req := types.ReconfigureAutostart{ + This: mhas.Reference(), + Spec: types.HostAutoStartManagerConfig{ + PowerInfo: make([]types.AutoStartPowerInfo, 0), + }, + } + + vms, err := f.VirtualMachines(args) + if err != nil { + return err + } + + for _, vm := range vms { + pi := template + pi.Key = vm.Reference() + req.Spec.PowerInfo = append(req.Spec.PowerInfo, pi) + } + + _, err = methods.ReconfigureAutostart(context.TODO(), c, &req) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/autostart/configure.go b/vendor/github.com/vmware/govmomi/govc/host/autostart/configure.go new file mode 100644 index 000000000..405a26ce8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/autostart/configure.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autostart + +import ( + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" +) + +type configure struct { + *AutostartFlag + + types.AutoStartDefaults +} + +func init() { + cli.Register("host.autostart.configure", &configure{}) +} + +func (cmd *configure) Register(ctx context.Context, f *flag.FlagSet) { + cmd.AutostartFlag, ctx = newAutostartFlag(ctx) + cmd.AutostartFlag.Register(ctx, f) + + f.Var(flags.NewOptionalBool(&cmd.Enabled), "enabled", "") + f.IntVar(&cmd.StartDelay, "start-delay", 0, "") + f.StringVar(&cmd.StopAction, "stop-action", "", "") + f.IntVar(&cmd.StopDelay, "stop-delay", 0, "") + f.Var(flags.NewOptionalBool(&cmd.WaitForHeartbeat), "wait-for-heartbeat", "") +} + +func (cmd *configure) Process(ctx context.Context) error { + if err := cmd.AutostartFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *configure) Usage() string { + return "" +} + +func (cmd *configure) Run(ctx context.Context, f *flag.FlagSet) error { + return cmd.ReconfigureDefaults(cmd.AutoStartDefaults) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/autostart/info.go b/vendor/github.com/vmware/govmomi/govc/host/autostart/info.go new file mode 100644 index 000000000..33bd9ec89 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/autostart/info.go @@ -0,0 +1,143 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autostart + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "golang.org/x/net/context" +) + +type info struct { + cli.Command + + *AutostartFlag + *flags.OutputFlag +} + +func init() { + cli.Register("host.autostart.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.AutostartFlag, ctx = newAutostartFlag(ctx) + cmd.AutostartFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.AutostartFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Usage() string { + return "" +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + client, err := cmd.Client() + if err != nil { + return err + } + + mhas, err := cmd.HostAutoStartManager() + if err != nil { + return err + } + + return cmd.WriteResult(&infoResult{client, mhas}) +} + +type infoResult struct { + client *vim25.Client + mhas *mo.HostAutoStartManager +} + +func (r *infoResult) MarshalJSON() ([]byte, error) { + return json.Marshal(r.mhas) +} + +// vmPaths resolves the paths for the VMs in the result. +func (r *infoResult) vmPaths() (map[string]string, error) { + paths := make(map[string]string) + for _, info := range r.mhas.Config.PowerInfo { + mes, err := mo.Ancestors(context.TODO(), r.client, r.client.ServiceContent.PropertyCollector, info.Key) + if err != nil { + return nil, err + } + + path := "" + for _, me := range mes { + // Skip root entity in building inventory path. + if me.Parent == nil { + continue + } + path += "/" + me.Name + } + + paths[info.Key.Value] = path + } + + return paths, nil +} + +func (r *infoResult) Write(w io.Writer) error { + paths, err := r.vmPaths() + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + fmt.Fprintf(tw, "VM") + fmt.Fprintf(tw, "\tStartAction") + fmt.Fprintf(tw, "\tStartDelay") + fmt.Fprintf(tw, "\tStartOrder") + fmt.Fprintf(tw, "\tStopAction") + fmt.Fprintf(tw, "\tStopDelay") + fmt.Fprintf(tw, "\tWaitForHeartbeat") + fmt.Fprintf(tw, "\n") + + for _, info := range r.mhas.Config.PowerInfo { + fmt.Fprintf(tw, "%s", paths[info.Key.Value]) + fmt.Fprintf(tw, "\t%s", info.StartAction) + fmt.Fprintf(tw, "\t%d", info.StartDelay) + fmt.Fprintf(tw, "\t%d", info.StartOrder) + fmt.Fprintf(tw, "\t%s", info.StopAction) + fmt.Fprintf(tw, "\t%d", info.StopDelay) + fmt.Fprintf(tw, "\t%s", info.WaitForHeartbeat) + fmt.Fprintf(tw, "\n") + } + + _ = tw.Flush() + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/autostart/remove.go b/vendor/github.com/vmware/govmomi/govc/host/autostart/remove.go new file mode 100644 index 000000000..b41d5ec11 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/autostart/remove.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autostart + +import ( + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/vim25/types" +) + +type remove struct { + *AutostartFlag +} + +func init() { + cli.Register("host.autostart.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.AutostartFlag, ctx = newAutostartFlag(ctx) + cmd.AutostartFlag.Register(ctx, f) +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.AutostartFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "VM..." +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + var powerInfo = types.AutoStartPowerInfo{ + StartAction: "none", + StartDelay: -1, + StartOrder: -1, + StopAction: "none", + StopDelay: -1, + WaitForHeartbeat: types.AutoStartWaitHeartbeatSettingSystemDefault, + } + + return cmd.ReconfigureVMs(f.Args(), powerInfo) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/disconnect.go b/vendor/github.com/vmware/govmomi/govc/host/disconnect.go new file mode 100644 index 000000000..6c8ed9b80 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/disconnect.go @@ -0,0 +1,81 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package host + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" +) + +type disconnect struct { + *flags.HostSystemFlag +} + +func init() { + cli.Register("host.disconnect", &disconnect{}) +} + +func (cmd *disconnect) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *disconnect) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *disconnect) Description() string { + return `Disconnect host from vCenter and instruct the host to stop sending heartbeats.` +} + +func (cmd *disconnect) Disconnect(ctx context.Context, host *object.HostSystem) error { + task, err := host.Disconnect(ctx) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("%s disconnecting... ", host.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} + +func (cmd *disconnect) Run(ctx context.Context, f *flag.FlagSet) error { + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + for _, host := range hosts { + err = cmd.Disconnect(ctx, host) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/command.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/command.go new file mode 100644 index 000000000..c36e89a40 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/command.go @@ -0,0 +1,149 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esxcli + +import ( + "flag" + "fmt" + "strings" + + "github.com/vmware/govmomi/vim25/types" +) + +type Command struct { + name []string + args []string +} + +type CommandInfoItem struct { + Name string `xml:"name"` + DisplayName string `xml:"displayName"` + Help string `xml:"help"` +} + +type CommandInfoParam struct { + CommandInfoItem + Aliases []string `xml:"aliases"` + Flag bool `xml:"flag"` +} + +type CommandInfoHint struct { + Key string `xml:"key"` + Value string `xml:"value"` +} + +type CommandInfoHints []CommandInfoHint + +type CommandInfoMethod struct { + CommandInfoItem + Param []CommandInfoParam `xml:"param"` + Hints CommandInfoHints `xml:"hints"` +} + +type CommandInfo struct { + CommandInfoItem + Method []*CommandInfoMethod `xml:"method"` +} + +func NewCommand(args []string) *Command { + c := &Command{} + + for i, arg := range args { + if strings.HasPrefix(arg, "-") { + c.args = args[i:] + break + } else { + c.name = append(c.name, arg) + } + } + + return c +} + +func (c *Command) Namespace() string { + return strings.Join(c.name[:len(c.name)-1], ".") +} + +func (c *Command) Name() string { + return c.name[len(c.name)-1] +} + +func (c *Command) Method() string { + return "vim.EsxCLI." + strings.Join(c.name, ".") +} + +func (c *Command) Moid() string { + return "ha-cli-handler-" + strings.Join(c.name[:len(c.name)-1], "-") +} + +// Parse generates a flag.FlagSet based on the given []CommandInfoParam and +// returns arguments for use with methods.ExecuteSoap +func (c *Command) Parse(params []CommandInfoParam) ([]types.ReflectManagedMethodExecuterSoapArgument, error) { + flags := flag.NewFlagSet(strings.Join(c.name, " "), flag.ExitOnError) + vals := make([]string, len(params)) + + for i, p := range params { + v := &vals[i] + for _, a := range p.Aliases { + a = strings.TrimPrefix(a[1:], "-") + flags.StringVar(v, a, "", p.Help) + } + } + + err := flags.Parse(c.args) + if err != nil { + return nil, err + } + + args := []types.ReflectManagedMethodExecuterSoapArgument{} + + for i, p := range params { + if vals[i] == "" { + continue + } + args = append(args, c.Argument(p.Name, vals[i])) + } + + return args, nil +} + +func (c *Command) Argument(name string, val string) types.ReflectManagedMethodExecuterSoapArgument { + return types.ReflectManagedMethodExecuterSoapArgument{ + Name: name, + Val: fmt.Sprintf("<%s>%s", name, val, name), + } +} + +func (h CommandInfoHints) Formatter() string { + for _, hint := range h { + if hint.Key == "formatter" { + return hint.Value + } + } + + return "simple" +} + +func (h CommandInfoHints) Fields() []string { + for _, hint := range h { + if strings.HasPrefix(hint.Key, "fields:") { + return strings.Split(hint.Value, ",") + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/command_test.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/command_test.go new file mode 100644 index 000000000..210da8459 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/command_test.go @@ -0,0 +1,124 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esxcli + +import ( + "reflect" + "testing" + + "github.com/vmware/govmomi/vim25/types" +) + +func TestSystemSettingsAdvancedSetCommand(t *testing.T) { + c := NewCommand([]string{"system", "settings", "advanced", "set", "-o", "/Net/GuestIPHack", "-i", "1"}) + + tests := []struct { + f func() string + expect string + }{ + {c.Name, "set"}, + {c.Namespace, "system.settings.advanced"}, + {c.Method, "vim.EsxCLI.system.settings.advanced.set"}, + {c.Moid, "ha-cli-handler-system-settings-advanced"}, + } + + for _, test := range tests { + actual := test.f() + if actual != test.expect { + t.Errorf("%s != %s", actual, test.expect) + } + } + + params := []CommandInfoParam{ + { + CommandInfoItem: CommandInfoItem{Name: "default", DisplayName: "default", Help: "Reset the option to its default value."}, + Aliases: []string{"-d", "--default"}, + Flag: true, + }, + { + CommandInfoItem: CommandInfoItem{Name: "intvalue", DisplayName: "int-value", Help: "If the option is an integer value use this option."}, + Aliases: []string{"-i", "--int-value"}, + Flag: false, + }, + { + CommandInfoItem: CommandInfoItem{Name: "option", DisplayName: "option", Help: "The name of the option to set the value of. Example: \"/Misc/HostName\""}, + Aliases: []string{"-o", "--option"}, + Flag: false, + }, + { + CommandInfoItem: CommandInfoItem{Name: "stringvalue", DisplayName: "string-value", Help: "If the option is a string use this option."}, + Aliases: []string{"-s", "--string-value"}, + Flag: false, + }, + } + + args, err := c.Parse(params) + if err != nil { + t.Fatal(err) + } + + expect := []types.ReflectManagedMethodExecuterSoapArgument{ + { + DynamicData: types.DynamicData{}, + Name: "intvalue", + Val: "1", + }, + { + DynamicData: types.DynamicData{}, + Name: "option", + Val: "", + }, + } + + if !reflect.DeepEqual(args, expect) { + t.Errorf("%s != %s", args, expect) + } +} + +func TestNetworkVmListCommand(t *testing.T) { + c := NewCommand([]string{"network", "vm", "list"}) + + tests := []struct { + f func() string + expect string + }{ + {c.Name, "list"}, + {c.Namespace, "network.vm"}, + {c.Method, "vim.EsxCLI.network.vm.list"}, + {c.Moid, "ha-cli-handler-network-vm"}, + } + + for _, test := range tests { + actual := test.f() + if actual != test.expect { + t.Errorf("%s != %s", actual, test.expect) + } + } + + var params []CommandInfoParam + + args, err := c.Parse(params) + if err != nil { + t.Fatal(err) + } + + expect := []types.ReflectManagedMethodExecuterSoapArgument{} + + if !reflect.DeepEqual(args, expect) { + t.Errorf("%s != %s", args, expect) + } +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/esxcli.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/esxcli.go new file mode 100644 index 000000000..010d3b885 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/esxcli.go @@ -0,0 +1,153 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esxcli + +import ( + "flag" + "fmt" + "os" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" +) + +type esxcli struct { + *flags.HostSystemFlag + + hints bool +} + +func init() { + cli.Register("host.esxcli", &esxcli{}) +} + +func (cmd *esxcli) Usage() string { + return "COMMAND [ARG]..." +} + +func (cmd *esxcli) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.BoolVar(&cmd.hints, "hints", true, "Use command info hints when formatting output") +} + +func (cmd *esxcli) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *esxcli) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return nil + } + + host, err := cmd.HostSystem() + if err != nil { + return err + } + + e, err := NewExecutor(c, host) + if err != nil { + return err + } + + res, err := e.Run(f.Args()) + if err != nil { + return err + } + + if len(res.Values) == 0 { + return nil + } + + var formatType string + if cmd.hints { + formatType = res.Info.Hints.Formatter() + } + + // TODO: OutputFlag / format options + switch formatType { + case "table": + cmd.formatTable(res) + default: + cmd.formatSimple(res) + } + + return nil +} + +func (cmd *esxcli) formatSimple(res *Response) { + var keys []string + for key := range res.Values[0] { + keys = append(keys, key) + } + sort.Strings(keys) + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for i, rv := range res.Values { + if i > 0 { + fmt.Fprintln(tw) + _ = tw.Flush() + } + for _, key := range keys { + fmt.Fprintf(tw, "%s:\t%s\n", key, strings.Join(rv[key], ", ")) + } + } + + _ = tw.Flush() +} + +func (cmd *esxcli) formatTable(res *Response) { + fields := res.Info.Hints.Fields() + + tw := tabwriter.NewWriter(os.Stdout, len(fields), 0, 2, ' ', 0) + + var hr []string + for _, name := range fields { + hr = append(hr, strings.Repeat("-", len(name))) + } + + fmt.Fprintln(tw, strings.Join(fields, "\t")) + fmt.Fprintln(tw, strings.Join(hr, "\t")) + + for _, vals := range res.Values { + var row []string + + for _, name := range fields { + key := strings.Replace(name, " ", "", -1) + if val, ok := vals[key]; ok { + row = append(row, strings.Join(val, ", ")) + } else { + row = append(row, "") + } + } + + fmt.Fprintln(tw, strings.Join(row, "\t")) + } + + _ = tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/executor.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/executor.go new file mode 100644 index 000000000..055791056 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/executor.go @@ -0,0 +1,166 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esxcli + +import ( + "errors" + "fmt" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" + "golang.org/x/net/context" +) + +type Executor struct { + c *vim25.Client + host *object.HostSystem + mme *types.ReflectManagedMethodExecuter + dtm *types.InternalDynamicTypeManager + info map[string]*CommandInfo +} + +func NewExecutor(c *vim25.Client, host *object.HostSystem) (*Executor, error) { + e := &Executor{ + c: c, + host: host, + info: make(map[string]*CommandInfo), + } + + { + req := types.RetrieveManagedMethodExecuter{ + This: host.Reference(), + } + + res, err := methods.RetrieveManagedMethodExecuter(context.TODO(), c, &req) + if err != nil { + return nil, err + } + + e.mme = res.Returnval + } + + { + req := types.RetrieveDynamicTypeManager{ + This: host.Reference(), + } + + res, err := methods.RetrieveDynamicTypeManager(context.TODO(), c, &req) + if err != nil { + return nil, err + } + + e.dtm = res.Returnval + } + + return e, nil +} + +func (e *Executor) CommandInfo(c *Command) (*CommandInfoMethod, error) { + ns := c.Namespace() + var info *CommandInfo + var ok bool + + if info, ok = e.info[ns]; !ok { + req := types.ExecuteSoap{ + Moid: "ha-dynamic-type-manager-local-cli-cliinfo", + Method: "vim.CLIInfo.FetchCLIInfo", + Argument: []types.ReflectManagedMethodExecuterSoapArgument{ + c.Argument("typeName", "vim.EsxCLI."+ns), + }, + } + + info = new(CommandInfo) + if err := e.Execute(&req, info); err != nil { + return nil, err + } + + e.info[ns] = info + } + + name := c.Name() + for _, method := range info.Method { + if method.Name == name { + return method, nil + } + } + + return nil, fmt.Errorf("method '%s' not found in name space '%s'", name, c.Namespace()) +} + +func (e *Executor) NewRequest(args []string) (*types.ExecuteSoap, *CommandInfoMethod, error) { + c := NewCommand(args) + + info, err := e.CommandInfo(c) + if err != nil { + return nil, nil, err + } + + sargs, err := c.Parse(info.Param) + if err != nil { + return nil, nil, err + } + + sreq := types.ExecuteSoap{ + Moid: c.Moid(), + Method: c.Method(), + Argument: sargs, + } + + return &sreq, info, nil +} + +func (e *Executor) Execute(req *types.ExecuteSoap, res interface{}) error { + req.This = e.mme.ManagedObjectReference + req.Version = "urn:vim25/5.0" + + x, err := methods.ExecuteSoap(context.TODO(), e.c, req) + if err != nil { + return err + } + + if x.Returnval != nil { + if x.Returnval.Fault != nil { + return errors.New(x.Returnval.Fault.FaultMsg) + } + + if err := xml.Unmarshal([]byte(x.Returnval.Response), res); err != nil { + return err + } + } + + return nil +} + +func (e *Executor) Run(args []string) (*Response, error) { + req, info, err := e.NewRequest(args) + if err != nil { + return nil, err + } + + res := &Response{ + Info: info, + } + + if err := e.Execute(req, res); err != nil { + return nil, err + } + + return res, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/firewall_info.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/firewall_info.go new file mode 100644 index 000000000..ee4477a2f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/firewall_info.go @@ -0,0 +1,45 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esxcli + +import "github.com/vmware/govmomi/object" + +type FirewallInfo struct { + Loaded bool + Enabled bool + DefaultAction string +} + +// GetFirewallInfo via 'esxcli network firewall get' +// The HostFirewallSystem type does not expose this data. +// This helper can be useful in particular to determine if the firewall is enabled or disabled. +func GetFirewallInfo(s *object.HostSystem) (*FirewallInfo, error) { + x, err := NewExecutor(s.Client(), s) + + res, err := x.Run([]string{"network", "firewall", "get"}) + if err != nil { + return nil, err + } + + info := &FirewallInfo{ + Loaded: res.Values[0]["Loaded"][0] == "true", + Enabled: res.Values[0]["Enabled"][0] == "true", + DefaultAction: res.Values[0]["DefaultAction"][0], + } + + return info, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_list.xml b/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_list.xml new file mode 100644 index 000000000..087144974 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_list.xml @@ -0,0 +1,15 @@ + + + foo + VM Network + dougm + 2 + 98842 + + + bar + VM Network + 1 + 236235 + + diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_port_list.xml b/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_port_list.xml new file mode 100644 index 000000000..b9e3474a7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_port_list.xml @@ -0,0 +1,12 @@ + + + + 192.168.247.149 + 00:0c:29:12:b2:cf + 33554438 + VM Network + vmnic0 + 33554434 + vSwitch0 + + diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/system_hostname_get.xml b/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/system_hostname_get.xml new file mode 100644 index 000000000..9c1e9462f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/system_hostname_get.xml @@ -0,0 +1,5 @@ + + localdomain + esxbox.localdomain + esxbox + diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/guest_info.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/guest_info.go new file mode 100644 index 000000000..566979338 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/guest_info.go @@ -0,0 +1,116 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esxcli + +import ( + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type hostInfo struct { + *Executor + wids map[string]string +} + +type GuestInfo struct { + c *vim25.Client + hosts map[string]*hostInfo +} + +func NewGuestInfo(c *vim25.Client) *GuestInfo { + return &GuestInfo{ + c: c, + hosts: make(map[string]*hostInfo), + } +} + +func (g *GuestInfo) hostInfo(ref *types.ManagedObjectReference) (*hostInfo, error) { + // cache exectuor and uuid -> worldid map + if h, ok := g.hosts[ref.Value]; ok { + return h, nil + } + + host := object.NewHostSystem(g.c, *ref) + + e, err := NewExecutor(g.c, host) + if err != nil { + return nil, err + } + + res, err := e.Run([]string{"vm", "process", "list"}) + if err != nil { + return nil, err + } + + ids := make(map[string]string, len(res.Values)) + + for _, process := range res.Values { + // Normalize uuid, esxcli and mo.VirtualMachine have different formats + uuid := strings.Replace(process["UUID"][0], " ", "", -1) + uuid = strings.Replace(uuid, "-", "", -1) + + ids[uuid] = process["WorldID"][0] + } + + h := &hostInfo{e, ids} + g.hosts[ref.Value] = h + + return h, nil +} + +// IpAddress attempts to find the guest IP address using esxcli. +// ESX hosts must be configured with the /Net/GuestIPHack enabled. +// For example: +// $ govc host.esxcli -- system settings advanced set -o /Net/GuestIPHack -i 1 +func (g *GuestInfo) IpAddress(vm *object.VirtualMachine) (string, error) { + var mvm mo.VirtualMachine + + pc := property.DefaultCollector(g.c) + err := pc.RetrieveOne(context.TODO(), vm.Reference(), []string{"runtime.host", "config.uuid"}, &mvm) + if err != nil { + return "", err + } + + h, err := g.hostInfo(mvm.Runtime.Host) + if err != nil { + return "", err + } + + // Normalize uuid, esxcli and mo.VirtualMachine have different formats + uuid := strings.Replace(mvm.Config.Uuid, "-", "", -1) + + if wid, ok := h.wids[uuid]; ok { + res, err := h.Run([]string{"network", "vm", "port", "list", "--world-id", wid}) + if err != nil { + return "", err + } + + if len(res.Values) == 1 { + if ip, ok := res.Values[0]["IPAddress"]; ok { + return ip[0], nil + } + } + } + + return "", nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/response.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/response.go new file mode 100644 index 000000000..2957748c6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/response.go @@ -0,0 +1,97 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package esxcli + +import ( + "io" + + "github.com/vmware/govmomi/vim25/xml" +) + +type Values map[string][]string + +type Response struct { + Info *CommandInfoMethod + Values []Values +} + +func (v Values) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + for { + t, err := d.Token() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if s, ok := t.(xml.StartElement); ok { + t, err = d.Token() + if err != nil { + return err + } + + key := s.Name.Local + var val string + if c, ok := t.(xml.CharData); ok { + val = string(c) + } + v[key] = append(v[key], val) + } + } +} + +func (r *Response) Type(start xml.StartElement) string { + for _, a := range start.Attr { + if a.Name.Local == "type" { + return a.Value + } + } + return "" +} + +func (r *Response) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + stype := r.Type(start) + + if stype != "ArrayOfDataObject" { + v := Values{} + if err := d.DecodeElement(&v, &start); err != nil { + return err + } + r.Values = append(r.Values, v) + return nil + } + + for { + t, err := d.Token() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if s, ok := t.(xml.StartElement); ok { + if s.Name.Local == "DataObject" { + v := Values{} + if err := d.DecodeElement(&v, &s); err != nil { + return err + } + r.Values = append(r.Values, v) + } + } + } +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/esxcli/response_test.go b/vendor/github.com/vmware/govmomi/govc/host/esxcli/response_test.go new file mode 100644 index 000000000..2d96ccef2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/esxcli/response_test.go @@ -0,0 +1,105 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esxcli + +import ( + "os" + "reflect" + "testing" + + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" +) + +func load(name string) *Response { + f, err := os.Open(name) + if err != nil { + panic(err) + } + + defer f.Close() + + var b Response + + dec := xml.NewDecoder(f) + dec.TypeFunc = types.TypeFunc() + if err := dec.Decode(&b); err != nil { + panic(err) + } + + return &b +} + +func TestSystemHostnameGetResponse(t *testing.T) { + res := load("fixtures/system_hostname_get.xml") + + expect := []Values{ + { + "DomainName": {"localdomain"}, + "FullyQualifiedDomainName": {"esxbox.localdomain"}, + "HostName": {"esxbox"}, + }, + } + + if !reflect.DeepEqual(res.Values, expect) { + t.Errorf("%s != %s", res.Values, expect) + } +} + +func TestNetworkVmList(t *testing.T) { + res := load("fixtures/network_vm_list.xml") + + expect := []Values{ + { + "Name": {"foo"}, + "Networks": {"VM Network", "dougm"}, + "NumPorts": {"2"}, + "WorldID": {"98842"}, + }, + { + "Name": {"bar"}, + "Networks": {"VM Network"}, + "NumPorts": {"1"}, + "WorldID": {"236235"}, + }, + } + + if !reflect.DeepEqual(res.Values, expect) { + t.Errorf("%s != %s", res.Values, expect) + } +} + +func TestNetworkVmPortList(t *testing.T) { + r := load("fixtures/network_vm_port_list.xml") + + expect := []Values{ + { + "IPAddress": {"192.168.247.149"}, + "MACAddress": {"00:0c:29:12:b2:cf"}, + "PortID": {"33554438"}, + "Portgroup": {"VM Network"}, + "TeamUplink": {"vmnic0"}, + "UplinkPortID": {"33554434"}, + "vSwitch": {"vSwitch0"}, + "DVPortID": {""}, + }, + } + + if !reflect.DeepEqual(r.Values, expect) { + t.Errorf("%s != %s", r.Values, expect) + } +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/firewall/find.go b/vendor/github.com/vmware/govmomi/govc/host/firewall/find.go new file mode 100644 index 000000000..fd649fbd2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/firewall/find.go @@ -0,0 +1,128 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package firewall + +import ( + "flag" + "fmt" + "os" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/govc/host/esxcli" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +type find struct { + *flags.ClientFlag + *flags.OutputFlag + *flags.HostSystemFlag + + enabled bool + check bool + + types.HostFirewallRule +} + +func init() { + cli.Register("firewall.ruleset.find", &find{}) +} + +func (cmd *find) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.BoolVar(&cmd.check, "c", true, "Check if esx firewall is enabled") + f.BoolVar(&cmd.enabled, "enabled", true, "Find enabled rule sets if true, disabled if false") + f.StringVar((*string)(&cmd.Direction), "direction", string(types.HostFirewallRuleDirectionOutbound), "Direction") + f.StringVar((*string)(&cmd.PortType), "type", string(types.HostFirewallRulePortTypeDst), "Port type") + f.StringVar((*string)(&cmd.Protocol), "proto", string(types.HostFirewallRuleProtocolTcp), "Protocol") + f.IntVar(&cmd.Port, "port", 0, "Port") +} + +func (cmd *find) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *find) Description() string { + return `Find firewall rulesets matching the given rule. + +For a complete list of rulesets: govc host.esxcli network firewall ruleset list +For a complete list of rules: govc host.esxcli network firewall ruleset rule list` +} + +func (cmd *find) Run(ctx context.Context, f *flag.FlagSet) error { + host, err := cmd.HostSystem() + if err != nil { + return err + } + + fs, err := host.ConfigManager().FirewallSystem(ctx) + if err != nil { + return err + } + + if cmd.check { + esxfw, err := esxcli.GetFirewallInfo(host) + if err != nil { + return err + } + + if !esxfw.Enabled { + fmt.Fprintln(os.Stderr, "host firewall is disabled") + } + } + + info, err := fs.Info(ctx) + if err != nil { + return err + } + + if f.NArg() != 0 { + // TODO: f.Args() -> types.HostFirewallRulesetIpList + return flag.ErrHelp + } + + rs := object.HostFirewallRulesetList(info.Ruleset) + matched, err := rs.EnabledByRule(cmd.HostFirewallRule, cmd.enabled) + + if err != nil { + return err + } + + for _, r := range matched { + fmt.Println(r.Key) + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/info.go b/vendor/github.com/vmware/govmomi/govc/host/info.go new file mode 100644 index 000000000..956791f3e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/info.go @@ -0,0 +1,159 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package host + +import ( + "flag" + "fmt" + "io" + "os" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type info struct { + *flags.ClientFlag + *flags.OutputFlag + *flags.HostSystemFlag +} + +func init() { + cli.Register("host.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + var res infoResult + var props []string + + if cmd.OutputFlag.JSON { + props = nil // Load everything + } else { + props = []string{"summary"} // Load summary + } + + // We could do without the -host flag, leaving it for compat + host, err := cmd.HostSystemIfSpecified() + if err != nil { + return err + } + + // Default only if there is a single host + if host == nil && f.NArg() == 0 { + host, err = cmd.HostSystem() + if err != nil { + return err + } + } + + if host != nil { + res.objects = append(res.objects, host) + } else { + res.objects, err = cmd.HostSystems(f.Args()) + if err != nil { + return err + } + } + + if len(res.objects) != 0 { + refs := make([]types.ManagedObjectReference, 0, len(res.objects)) + for _, o := range res.objects { + refs = append(refs, o.Reference()) + } + + pc := property.DefaultCollector(c) + err = pc.Retrieve(ctx, refs, props, &res.HostSystems) + if err != nil { + return err + } + } + + return cmd.WriteResult(&res) +} + +type infoResult struct { + HostSystems []mo.HostSystem + objects []*object.HostSystem +} + +func (r *infoResult) Write(w io.Writer) error { + // Maintain order via r.objects as Property collector does not always return results in order. + objects := make(map[types.ManagedObjectReference]mo.HostSystem, len(r.HostSystems)) + for _, o := range r.HostSystems { + objects[o.Reference()] = o + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for _, o := range r.objects { + host := objects[o.Reference()] + s := host.Summary + h := s.Hardware + z := s.QuickStats + ncpu := int(h.NumCpuPkgs * h.NumCpuCores) + cpuUsage := 100 * float64(z.OverallCpuUsage) / float64(ncpu*h.CpuMhz) + memUsage := 100 * float64(z.OverallMemoryUsage<<20) / float64(h.MemorySize) + + fmt.Fprintf(tw, "Name:\t%s\n", s.Config.Name) + fmt.Fprintf(tw, " Path:\t%s\n", o.InventoryPath) + fmt.Fprintf(tw, " Manufacturer:\t%s\n", h.Vendor) + fmt.Fprintf(tw, " Logical CPUs:\t%d CPUs @ %dMHz\n", ncpu, h.CpuMhz) + fmt.Fprintf(tw, " Processor type:\t%s\n", h.CpuModel) + fmt.Fprintf(tw, " CPU usage:\t%d MHz (%.1f%%)\n", z.OverallCpuUsage, cpuUsage) + fmt.Fprintf(tw, " Memory:\t%dMB\n", h.MemorySize/(1024*1024)) + fmt.Fprintf(tw, " Memory usage:\t%d MB (%.1f%%)\n", z.OverallMemoryUsage, memUsage) + fmt.Fprintf(tw, " Boot time:\t%s\n", s.Runtime.BootTime) + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/maintenance/enter.go b/vendor/github.com/vmware/govmomi/govc/host/maintenance/enter.go new file mode 100644 index 000000000..16405fdf6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/maintenance/enter.go @@ -0,0 +1,94 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package maintenancec + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" +) + +type enter struct { + *flags.HostSystemFlag + + timeout int + evacuate bool +} + +func init() { + cli.Register("host.maintenance.enter", &enter{}) +} + +func (cmd *enter) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.IntVar(&cmd.timeout, "timeout", 0, "Timeout") + f.BoolVar(&cmd.evacuate, "evacuate", false, "Evacuate powered off VMs") +} + +func (cmd *enter) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *enter) Usage() string { + return "HOST..." +} + +func (cmd *enter) Description() string { + return `Put the hosts in maintenance mode. + +While this task is running and when the host is in maintenance mode, +no virtual machines can be powered on and no provisioning operations can be performed on the host.` +} + +func (cmd *enter) EnterMaintenanceMode(ctx context.Context, host *object.HostSystem) error { + task, err := host.EnterMaintenanceMode(ctx, cmd.timeout, cmd.evacuate, nil) // TODO: spec param + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("%s entering maintenance mode... ", host.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} + +func (cmd *enter) Run(ctx context.Context, f *flag.FlagSet) error { + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + for _, host := range hosts { + err = cmd.EnterMaintenanceMode(ctx, host) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/maintenance/exit.go b/vendor/github.com/vmware/govmomi/govc/host/maintenance/exit.go new file mode 100644 index 000000000..ff51cb6f2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/maintenance/exit.go @@ -0,0 +1,95 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package maintenancec + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" +) + +type exit struct { + *flags.HostSystemFlag + + timeout int +} + +func init() { + cli.Register("host.maintenance.exit", &exit{}) +} + +func (cmd *exit) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.IntVar(&cmd.timeout, "timeout", 0, "Timeout") +} + +func (cmd *exit) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *exit) Usage() string { + return "HOST..." +} + +func (cmd *exit) Description() string { + return `Take hosts out of maintenance mode. + +This blocks if any concurrent running maintenance-only host configurations operations are being performed. +For example, if VMFS volumes are being upgraded. + +The 'timeout' flag is the number of seconds to wait for the exit maintenance mode to succeed. +If the timeout is less than or equal to zero, there is no timeout.` +} + +func (cmd *exit) ExitMaintenanceMode(ctx context.Context, host *object.HostSystem) error { + task, err := host.ExitMaintenanceMode(ctx, cmd.timeout) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("%s exiting maintenance mode... ", host.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} + +func (cmd *exit) Run(ctx context.Context, f *flag.FlagSet) error { + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + for _, host := range hosts { + err = cmd.ExitMaintenanceMode(ctx, host) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/portgroup/add.go b/vendor/github.com/vmware/govmomi/govc/host/portgroup/add.go new file mode 100644 index 000000000..f1cb86378 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/portgroup/add.go @@ -0,0 +1,66 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portgroup + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type add struct { + *flags.HostSystemFlag + + spec types.HostPortGroupSpec +} + +func init() { + cli.Register("host.portgroup.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.StringVar(&cmd.spec.VswitchName, "vswitch", "", "vSwitch Name") + f.IntVar(&cmd.spec.VlanId, "vlan", 0, "VLAN ID") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Usage() string { + return "NAME" +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + ns, err := cmd.HostNetworkSystem() + if err != nil { + return err + } + + cmd.spec.Name = f.Arg(0) + + return ns.AddPortGroup(context.TODO(), cmd.spec) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/portgroup/remove.go b/vendor/github.com/vmware/govmomi/govc/host/portgroup/remove.go new file mode 100644 index 000000000..174455ae5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/portgroup/remove.go @@ -0,0 +1,58 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portgroup + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type remove struct { + *flags.HostSystemFlag +} + +func init() { + cli.Register("host.portgroup.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "NAME" +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + ns, err := cmd.HostNetworkSystem() + if err != nil { + return err + } + + return ns.RemovePortGroup(context.TODO(), f.Arg(0)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/reconnect.go b/vendor/github.com/vmware/govmomi/govc/host/reconnect.go new file mode 100644 index 000000000..26f0c3225 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/reconnect.go @@ -0,0 +1,97 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package host + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +type reconnect struct { + *flags.HostSystemFlag + *flags.HostConnectFlag + + types.HostSystemReconnectSpec +} + +func init() { + cli.Register("host.reconnect", &reconnect{}) +} + +func (cmd *reconnect) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + cmd.HostConnectFlag, ctx = flags.NewHostConnectFlag(ctx) + cmd.HostConnectFlag.Register(ctx, f) + + cmd.HostSystemReconnectSpec.SyncState = types.NewBool(false) + f.BoolVar(cmd.HostSystemReconnectSpec.SyncState, "sync-state", false, "Sync state") +} + +func (cmd *reconnect) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostConnectFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *reconnect) Description() string { + return `Reconnect host to vCenter. + +This command can also be used to change connection properties (hostname, fingerprint, username, password), +without disconnecting the host.` +} + +func (cmd *reconnect) Reconnect(ctx context.Context, host *object.HostSystem) error { + task, err := host.Reconnect(ctx, &cmd.HostConnectSpec, &cmd.HostSystemReconnectSpec) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("%s reconnecting... ", host.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} + +func (cmd *reconnect) Run(ctx context.Context, f *flag.FlagSet) error { + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + for _, host := range hosts { + err = cmd.Reconnect(ctx, host) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/remove.go b/vendor/github.com/vmware/govmomi/govc/host/remove.go new file mode 100644 index 000000000..c9235e0fb --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/remove.go @@ -0,0 +1,101 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package host + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" +) + +type remove struct { + *flags.HostSystemFlag +} + +func init() { + cli.Register("host.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "HOST..." +} + +func (cmd *remove) Description() string { + return `Remove hosts from vCenter.` +} + +func (cmd *remove) Remove(ctx context.Context, host *object.HostSystem) error { + var h mo.HostSystem + err := host.Properties(ctx, host.Reference(), []string{"parent"}, &h) + if err != nil { + return err + } + + remove := host.Destroy + + if h.Parent.Type == "ComputeResource" { + // Standalone host. From the docs: + // "Invoking remove on a HostSystem of standalone type throws a NotSupported fault. + // A standalone HostSystem can be removeed only by invoking remove on its parent ComputeResource." + remove = object.NewComputeResource(host.Client(), *h.Parent).Destroy + } + + task, err := remove(ctx) + if err != nil { + return err + } + + logger := cmd.ProgressLogger(fmt.Sprintf("%s removing... ", host.InventoryPath)) + defer logger.Wait() + + _, err = task.WaitForResult(ctx, logger) + return err +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + hosts, err := cmd.HostSystems(f.Args()) + if err != nil { + return err + } + + for _, host := range hosts { + err = cmd.Remove(ctx, host) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/storage/info.go b/vendor/github.com/vmware/govmomi/govc/host/storage/info.go new file mode 100644 index 000000000..9d132399d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/storage/info.go @@ -0,0 +1,188 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "flag" + "fmt" + "io" + "strings" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +var infoTypes = []string{"hba", "lun"} + +type infoType string + +func (t *infoType) Set(s string) error { + s = strings.ToLower(s) + + for _, e := range infoTypes { + if s == e { + *t = infoType(s) + return nil + } + } + + return fmt.Errorf("invalid type") +} + +func (t *infoType) String() string { + return string(*t) +} + +func (t *infoType) Result(hss mo.HostStorageSystem) flags.OutputWriter { + switch string(*t) { + case "hba": + return hbaResult(hss) + case "lun": + return lunResult(hss) + default: + panic("unsupported") + } +} + +type info struct { + *flags.HostSystemFlag + *flags.OutputFlag + + typ infoType +} + +func init() { + cli.Register("host.storage.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + err := cmd.typ.Set("lun") + if err != nil { + panic(err) + } + + f.Var(&cmd.typ, "t", fmt.Sprintf("Type (%s)", strings.Join(infoTypes, ","))) +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Usage() string { + return "[-t TYPE]" +} + +func (cmd *info) Description() string { + return `Show information about a host's storage system.` +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + host, err := cmd.HostSystem() + if err != nil { + return err + } + + ss, err := host.ConfigManager().StorageSystem(ctx) + if err != nil { + return err + } + + var hss mo.HostStorageSystem + err = ss.Properties(ctx, ss.Reference(), nil, &hss) + if err != nil { + return nil + } + + return cmd.WriteResult(cmd.typ.Result(hss)) +} + +type hbaResult mo.HostStorageSystem + +func (r hbaResult) Write(w io.Writer) error { + tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0) + + fmt.Fprintf(tw, "Device\t") + fmt.Fprintf(tw, "PCI\t") + fmt.Fprintf(tw, "Driver\t") + fmt.Fprintf(tw, "Status\t") + fmt.Fprintf(tw, "Model\t") + fmt.Fprintf(tw, "\n") + + for _, e := range r.StorageDeviceInfo.HostBusAdapter { + hba := e.GetHostHostBusAdapter() + + fmt.Fprintf(tw, "%s\t", hba.Device) + fmt.Fprintf(tw, "%s\t", hba.Pci) + fmt.Fprintf(tw, "%s\t", hba.Driver) + fmt.Fprintf(tw, "%s\t", hba.Status) + fmt.Fprintf(tw, "%s\t", hba.Model) + fmt.Fprintf(tw, "\n") + } + + return tw.Flush() +} + +type lunResult mo.HostStorageSystem + +func (r lunResult) Write(w io.Writer) error { + tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0) + + fmt.Fprintf(tw, "Name\t") + fmt.Fprintf(tw, "Type\t") + fmt.Fprintf(tw, "Capacity\t") + fmt.Fprintf(tw, "Model\t") + fmt.Fprintf(tw, "\n") + + for _, e := range r.StorageDeviceInfo.ScsiLun { + var capacity int64 + + lun := e.GetScsiLun() + if disk, ok := e.(*types.HostScsiDisk); ok { + capacity = int64(disk.Capacity.Block) * int64(disk.Capacity.BlockSize) + } + + fmt.Fprintf(tw, "%s\t", lun.DeviceName) + fmt.Fprintf(tw, "%s\t", lun.DeviceType) + + if capacity == 0 { + fmt.Fprintf(tw, "-\t") + } else { + fmt.Fprintf(tw, "%s\t", units.ByteSize(capacity)) + } + + fmt.Fprintf(tw, "%s\t", lun.Model) + fmt.Fprintf(tw, "\n") + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/storage/partition.go b/vendor/github.com/vmware/govmomi/govc/host/storage/partition.go new file mode 100644 index 000000000..5e3d4d5d7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/storage/partition.go @@ -0,0 +1,127 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "flag" + "fmt" + "io" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type partition struct { + *flags.HostSystemFlag + *flags.OutputFlag +} + +func init() { + cli.Register("host.storage.partition", &partition{}) +} + +func (cmd *partition) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) +} + +func (cmd *partition) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *partition) Usage() string { + return "DEVICE_PATH" +} + +func (cmd *partition) Description() string { + return `Show partition table for device at DEVICE_PATH.` +} + +func (cmd *partition) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() != 1 { + return fmt.Errorf("specify device path") + } + + path := f.Args()[0] + + host, err := cmd.HostSystem() + if err != nil { + return err + } + + ss, err := host.ConfigManager().StorageSystem(ctx) + if err != nil { + return err + } + + var hss mo.HostStorageSystem + err = ss.Properties(ctx, ss.Reference(), nil, &hss) + if err != nil { + return nil + } + + info, err := ss.RetrieveDiskPartitionInfo(ctx, path) + if err != nil { + return err + } + + return cmd.WriteResult(partitionInfo(*info)) +} + +type partitionInfo types.HostDiskPartitionInfo + +func (p partitionInfo) Write(w io.Writer) error { + tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0) + + fmt.Fprintf(tw, "Table format: %s\n", p.Spec.PartitionFormat) + fmt.Fprintf(tw, "Number of sectors: %d\n", p.Spec.TotalSectors) + fmt.Fprintf(tw, "\n") + + fmt.Fprintf(tw, "Number\t") + fmt.Fprintf(tw, "Start\t") + fmt.Fprintf(tw, "End\t") + fmt.Fprintf(tw, "Size\t") + fmt.Fprintf(tw, "Type\t") + fmt.Fprintf(tw, "\n") + + for _, e := range p.Spec.Partition { + sectors := e.EndSector - e.StartSector + + fmt.Fprintf(tw, "%d\t", e.Partition) + fmt.Fprintf(tw, "%d\t", e.StartSector) + fmt.Fprintf(tw, "%d\t", e.EndSector) + fmt.Fprintf(tw, "%s\t", units.ByteSize(sectors*512)) + fmt.Fprintf(tw, "%s\t", e.Type) + fmt.Fprintf(tw, "\n") + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/vnic/info.go b/vendor/github.com/vmware/govmomi/govc/host/vnic/info.go new file mode 100644 index 000000000..7631b64ee --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/vnic/info.go @@ -0,0 +1,146 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vnic + +import ( + "flag" + "fmt" + "os" + "strings" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type info struct { + *flags.HostSystemFlag +} + +func init() { + cli.Register("host.vnic.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + host, err := cmd.HostSystem() + if err != nil { + return err + } + + ns, err := cmd.HostNetworkSystem() + if err != nil { + return err + } + + var mns mo.HostNetworkSystem + + m, err := host.ConfigManager().VirtualNicManager(ctx) + if err != nil { + return err + } + + info, err := m.Info(ctx) + if err != nil { + return err + } + + err = ns.Properties(ctx, ns.Reference(), []string{"networkInfo"}, &mns) + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + type dnet struct { + dvp mo.DistributedVirtualPortgroup + dvs mo.VmwareDistributedVirtualSwitch + } + + dnets := make(map[string]*dnet) + + for _, nic := range mns.NetworkInfo.Vnic { + fmt.Fprintf(tw, "Device:\t%s\n", nic.Device) + + if dvp := nic.Spec.DistributedVirtualPort; dvp != nil { + dn, ok := dnets[dvp.PortgroupKey] + + if !ok { + dn = new(dnet) + o := object.NewDistributedVirtualPortgroup(host.Client(), types.ManagedObjectReference{ + Type: "DistributedVirtualPortgroup", + Value: dvp.PortgroupKey, + }) + + err = o.Properties(ctx, o.Reference(), []string{"name", "config.distributedVirtualSwitch"}, &dn.dvp) + if err != nil { + return err + } + + err = o.Properties(ctx, *dn.dvp.Config.DistributedVirtualSwitch, []string{"name"}, &dn.dvs) + if err != nil { + return err + } + + dnets[dvp.PortgroupKey] = dn + } + + fmt.Fprintf(tw, "Network label:\t%s\n", dn.dvp.Name) + fmt.Fprintf(tw, "Switch:\t%s\n", dn.dvs.Name) + } else { + fmt.Fprintf(tw, "Network label:\t%s\n", nic.Portgroup) + for _, pg := range mns.NetworkInfo.Portgroup { + if pg.Spec.Name == nic.Portgroup { + fmt.Fprintf(tw, "Switch:\t%s\n", pg.Spec.VswitchName) + break + } + } + } + + fmt.Fprintf(tw, "IP address:\t%s\n", nic.Spec.Ip.IpAddress) + fmt.Fprintf(tw, "TCP/IP stack:\t%s\n", nic.Spec.NetStackInstanceKey) + + var services []string + for _, nc := range info.NetConfig { + for _, dev := range nc.SelectedVnic { + key := nc.NicType + "." + nic.Key + if dev == key { + services = append(services, nc.NicType) + } + } + + } + fmt.Fprintf(tw, "Enabled services:\t%s\n", strings.Join(services, ", ")) + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/vnic/service.go b/vendor/github.com/vmware/govmomi/govc/host/vnic/service.go new file mode 100644 index 000000000..3198974c3 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/vnic/service.go @@ -0,0 +1,112 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vnic + +import ( + "flag" + "fmt" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type service struct { + *flags.HostSystemFlag + + Enable bool + Disable bool +} + +func init() { + cli.Register("host.vnic.service", &service{}) +} + +func (cmd *service) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.BoolVar(&cmd.Enable, "enable", false, "Enable service") + f.BoolVar(&cmd.Disable, "disable", false, "Disable service") +} + +func (cmd *service) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + // Either may be true or none may be true. + if cmd.Enable && cmd.Disable { + return flag.ErrHelp + } + + return nil +} + +func (cmd *service) Usage() string { + return "SERVICE DEVICE" +} + +func (cmd *service) Description() string { + nicTypes := []string{ + string(types.HostVirtualNicManagerNicTypeVmotion), + string(types.HostVirtualNicManagerNicTypeFaultToleranceLogging), + string(types.HostVirtualNicManagerNicTypeVSphereReplication), + string(types.HostVirtualNicManagerNicTypeVSphereReplicationNFC), + string(types.HostVirtualNicManagerNicTypeManagement), + string(types.HostVirtualNicManagerNicTypeVsan), + string(types.HostVirtualNicManagerNicTypeVSphereProvisioning), + } + + return fmt.Sprintf(` +Enable or disable service on a virtual nic device. Example: +SERVICE [%s] DEVICE [%s]`, strings.Join(nicTypes, "|"), strings.Join([]string{"vmk0", "vmk1", "..."}, "|")) +} + +func (cmd *service) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() != 2 { + return flag.ErrHelp + } + + service := f.Arg(0) + device := f.Arg(1) + + host, err := cmd.HostSystem() + if err != nil { + return err + } + + m, err := host.ConfigManager().VirtualNicManager(ctx) + if err != nil { + return err + } + + var method func(context.Context, string, string) error + + if cmd.Enable { + method = m.SelectVnic + } else if cmd.Disable { + method = m.DeselectVnic + } + + if method == nil { + return flag.ErrHelp + } + + return method(ctx, service, device) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/vswitch/add.go b/vendor/github.com/vmware/govmomi/govc/host/vswitch/add.go new file mode 100644 index 000000000..0f1edd340 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/vswitch/add.go @@ -0,0 +1,72 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vswitch + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type add struct { + *flags.HostSystemFlag + + nic string + spec types.HostVirtualSwitchSpec +} + +func init() { + cli.Register("host.vswitch.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.IntVar(&cmd.spec.NumPorts, "ports", 128, "Number of ports") + f.IntVar(&cmd.spec.Mtu, "mtu", 0, "MTU") + f.StringVar(&cmd.nic, "nic", "", "Bridge nic device") +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Usage() string { + return "NAME" +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + ns, err := cmd.HostNetworkSystem() + if err != nil { + return err + } + + if cmd.nic != "" { + cmd.spec.Bridge = &types.HostVirtualSwitchBondBridge{ + NicDevice: []string{cmd.nic}, + } + } + + return ns.AddVirtualSwitch(context.TODO(), f.Arg(0), &cmd.spec) +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/vswitch/info.go b/vendor/github.com/vmware/govmomi/govc/host/vswitch/info.go new file mode 100644 index 000000000..589be1302 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/vswitch/info.go @@ -0,0 +1,106 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vswitch + +import ( + "flag" + "fmt" + "os" + "strings" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "golang.org/x/net/context" +) + +type info struct { + *flags.ClientFlag + *flags.OutputFlag + *flags.HostSystemFlag +} + +func init() { + cli.Register("host.vswitch.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + client, err := cmd.Client() + if err != nil { + return err + } + + ns, err := cmd.HostNetworkSystem() + if err != nil { + return err + } + + var mns mo.HostNetworkSystem + + pc := property.DefaultCollector(client) + err = pc.RetrieveOne(context.TODO(), ns.Reference(), []string{"networkInfo.vswitch"}, &mns) + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for i, s := range mns.NetworkInfo.Vswitch { + if i > 0 { + fmt.Fprintln(tw) + } + fmt.Fprintf(tw, "Name:\t%s\n", s.Name) + fmt.Fprintf(tw, "Portgroup:\t%s\n", cmd.keys("key-vim.host.PortGroup-", s.Portgroup)) + fmt.Fprintf(tw, "Pnic:\t%s\n", cmd.keys("key-vim.host.PhysicalNic-", s.Pnic)) + fmt.Fprintf(tw, "MTU:\t%d\n", s.Mtu) + fmt.Fprintf(tw, "Ports:\t%d\n", s.NumPorts) + fmt.Fprintf(tw, "Ports Available:\t%d\n", s.NumPortsAvailable) + } + + return tw.Flush() +} + +func (cmd *info) keys(key string, vals []string) string { + for i, val := range vals { + vals[i] = strings.TrimPrefix(val, key) + } + return strings.Join(vals, ", ") +} diff --git a/vendor/github.com/vmware/govmomi/govc/host/vswitch/remove.go b/vendor/github.com/vmware/govmomi/govc/host/vswitch/remove.go new file mode 100644 index 000000000..c5660d322 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/host/vswitch/remove.go @@ -0,0 +1,58 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vswitch + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type remove struct { + *flags.HostSystemFlag +} + +func init() { + cli.Register("host.vswitch.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "NAME" +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + ns, err := cmd.HostNetworkSystem() + if err != nil { + return err + } + + return ns.RemoveVirtualSwitch(context.TODO(), f.Arg(0)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/archive.go b/vendor/github.com/vmware/govmomi/govc/importx/archive.go new file mode 100644 index 000000000..92722549c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/archive.go @@ -0,0 +1,150 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "archive/tar" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/ovf" +) + +// ArchiveFlag doesn't register any flags; +// only encapsulates some common archive related functionality. +type ArchiveFlag struct { + Archive +} + +func newArchiveFlag(ctx context.Context) (*ArchiveFlag, context.Context) { + return &ArchiveFlag{}, ctx +} + +func (f *ArchiveFlag) Register(ctx context.Context, fs *flag.FlagSet) { +} + +func (f *ArchiveFlag) Process(ctx context.Context) error { + return nil +} + +func (f *ArchiveFlag) ReadOvf(fpath string) ([]byte, error) { + r, _, err := f.Archive.Open(fpath) + if err != nil { + return nil, err + } + defer r.Close() + + return ioutil.ReadAll(r) +} + +func (f *ArchiveFlag) ReadEnvelope(fpath string) (*ovf.Envelope, error) { + if fpath == "" { + return nil, nil + } + + r, _, err := f.Open(fpath) + if err != nil { + return nil, err + } + defer r.Close() + + e, err := ovf.Unmarshal(r) + if err != nil { + return nil, fmt.Errorf("failed to parse ovf: %s", err.Error()) + } + + return e, nil +} + +type Archive interface { + Open(string) (io.ReadCloser, int64, error) +} + +type TapeArchive struct { + path string +} + +type TapeArchiveEntry struct { + io.Reader + f *os.File +} + +func (t *TapeArchiveEntry) Close() error { + return t.f.Close() +} + +func (t *TapeArchive) Open(name string) (io.ReadCloser, int64, error) { + f, err := os.Open(t.path) + if err != nil { + return nil, 0, err + } + + r := tar.NewReader(f) + + for { + h, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, 0, err + } + + matched, err := filepath.Match(name, path.Base(h.Name)) + if err != nil { + return nil, 0, err + } + + if matched { + return &TapeArchiveEntry{r, f}, h.Size, nil + } + } + + _ = f.Close() + + return nil, 0, os.ErrNotExist +} + +type FileArchive struct { + path string +} + +func (t *FileArchive) Open(name string) (io.ReadCloser, int64, error) { + fpath := name + if name != t.path { + fpath = filepath.Join(filepath.Dir(t.path), name) + } + + s, err := os.Stat(fpath) + if err != nil { + return nil, 0, err + } + + f, err := os.Open(fpath) + if err != nil { + return nil, 0, err + } + + return f, s.Size(), nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/folder.go b/vendor/github.com/vmware/govmomi/govc/importx/folder.go new file mode 100644 index 000000000..924e75f2c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/folder.go @@ -0,0 +1,91 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "errors" + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" +) + +type FolderFlag struct { + *flags.DatacenterFlag + + folder string +} + +func newFolderFlag(ctx context.Context) (*FolderFlag, context.Context) { + f := &FolderFlag{} + f.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + return f, ctx +} + +func (flag *FolderFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.DatacenterFlag.Register(ctx, f) + + f.StringVar(&flag.folder, "folder", "", "Path to folder to add the VM to") +} + +func (flag *FolderFlag) Process(ctx context.Context) error { + return flag.DatacenterFlag.Process(ctx) +} + +func (flag *FolderFlag) Folder() (*object.Folder, error) { + if len(flag.folder) == 0 { + dc, err := flag.Datacenter() + if err != nil { + return nil, err + } + folders, err := dc.Folders(context.TODO()) + if err != nil { + return nil, err + } + return folders.VmFolder, nil + } + + finder, err := flag.Finder() + if err != nil { + return nil, err + } + + mo, err := finder.ManagedObjectList(context.TODO(), flag.folder) + if err != nil { + return nil, err + } + if len(mo) == 0 { + return nil, errors.New("folder argument does not resolve to object") + } + if len(mo) > 1 { + return nil, errors.New("folder argument resolves to more than one object") + } + + ref := mo[0].Object.Reference() + if ref.Type != "Folder" { + return nil, errors.New("folder argument does not resolve to folder") + } + + c, err := flag.Client() + if err != nil { + return nil, err + } + + return object.NewFolder(c, ref), nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/importable.go b/vendor/github.com/vmware/govmomi/govc/importx/importable.go new file mode 100644 index 000000000..14e31670b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/importable.go @@ -0,0 +1,59 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "fmt" + "path" +) + +type importable struct { + localPath string + remotePath string +} + +func (i importable) Ext() string { + return path.Ext(i.localPath) +} + +func (i importable) Base() string { + return path.Base(i.localPath) +} + +func (i importable) BaseClean() string { + b := i.Base() + e := i.Ext() + return b[:len(b)-len(e)] +} + +func (i importable) RemoteSrcVMDK() string { + file := fmt.Sprintf("%s-src.vmdk", i.BaseClean()) + return i.toRemotePath(file) +} + +func (i importable) RemoteDstVMDK() string { + file := fmt.Sprintf("%s.vmdk", i.BaseClean()) + return i.toRemotePath(file) +} + +func (i importable) toRemotePath(p string) string { + if i.remotePath == "" { + return p + } + + return path.Join(i.remotePath, p) +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/lease_updater.go b/vendor/github.com/vmware/govmomi/govc/importx/lease_updater.go new file mode 100644 index 000000000..6f1f1d150 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/lease_updater.go @@ -0,0 +1,131 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "fmt" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ovfFileItem struct { + url *url.URL + item types.OvfFileItem + ch chan progress.Report +} + +func (o ovfFileItem) Sink() chan<- progress.Report { + return o.ch +} + +type leaseUpdater struct { + client *vim25.Client + lease *object.HttpNfcLease + + pos int64 // Number of bytes + total int64 // Total number of bytes + + done chan struct{} // When lease updater should stop + + wg sync.WaitGroup // Track when update loop is done +} + +func newLeaseUpdater(client *vim25.Client, lease *object.HttpNfcLease, items []ovfFileItem) *leaseUpdater { + l := leaseUpdater{ + client: client, + lease: lease, + + done: make(chan struct{}), + } + + for _, item := range items { + l.total += item.item.Size + go l.waitForProgress(item) + } + + // Kickstart update loop + l.wg.Add(1) + go l.run() + + return &l +} + +func (l *leaseUpdater) waitForProgress(item ovfFileItem) { + var pos, total int64 + + total = item.item.Size + + for { + select { + case <-l.done: + return + case p, ok := <-item.ch: + // Return in case of error + if ok && p.Error() != nil { + return + } + + if !ok { + // Last element on the channel, add to total + atomic.AddInt64(&l.pos, total-pos) + return + } + + // Approximate progress in number of bytes + x := int64(float32(total) * (p.Percentage() / 100.0)) + atomic.AddInt64(&l.pos, x-pos) + pos = x + } + } +} + +func (l *leaseUpdater) run() { + defer l.wg.Done() + + tick := time.NewTicker(2 * time.Second) + defer tick.Stop() + + for { + select { + case <-l.done: + return + case <-tick.C: + // From the vim api HttpNfcLeaseProgress(percent) doc, percent == + // "Completion status represented as an integer in the 0-100 range." + // Always report the current value of percent, as it will renew the + // lease even if the value hasn't changed or is 0. + percent := int(float32(100*atomic.LoadInt64(&l.pos)) / float32(l.total)) + err := l.lease.HttpNfcLeaseProgress(context.TODO(), percent) + if err != nil { + fmt.Printf("from lease updater: %s\n", err) + } + } + } +} + +func (l *leaseUpdater) Done() { + close(l.done) + l.wg.Wait() +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/options.go b/vendor/github.com/vmware/govmomi/govc/importx/options.go new file mode 100644 index 000000000..76c61786a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/options.go @@ -0,0 +1,84 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "encoding/json" + "flag" + "os" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/ovf" + "github.com/vmware/govmomi/vim25/types" +) + +type Property struct { + types.KeyValue + Spec *ovf.Property `json:",omitempty"` +} + +type Options struct { + AllDeploymentOptions []string `json:",omitempty"` + Deployment string + + AllDiskProvisioningOptions []string `json:",omitempty"` + DiskProvisioning string + + AllIPAllocationPolicyOptions []string `json:",omitempty"` + IPAllocationPolicy string + + AllIPProtocolOptions []string `json:",omitempty"` + IPProtocol string + + PropertyMapping []Property `json:",omitempty"` + + PowerOn bool + InjectOvfEnv bool + WaitForIP bool + Name *string +} + +type OptionsFlag struct { + Options Options + + path string +} + +func newOptionsFlag(ctx context.Context) (*OptionsFlag, context.Context) { + return &OptionsFlag{}, ctx +} + +func (flag *OptionsFlag) Register(ctx context.Context, f *flag.FlagSet) { + f.StringVar(&flag.path, "options", "", "Options spec file path for VM deployment") +} + +func (flag *OptionsFlag) Process(ctx context.Context) error { + if len(flag.path) > 0 { + f, err := os.Open(flag.path) + if err != nil { + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&flag.Options); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/ova.go b/vendor/github.com/vmware/govmomi/govc/importx/ova.go new file mode 100644 index 000000000..ce33484f6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/ova.go @@ -0,0 +1,61 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +type ova struct { + *ovfx +} + +func init() { + cli.Register("import.ova", &ova{&ovfx{}}) +} + +func (cmd *ova) Usage() string { + return "PATH_TO_OVA" +} + +func (cmd *ova) Run(ctx context.Context, f *flag.FlagSet) error { + fpath, err := cmd.Prepare(f) + if err != nil { + return err + } + + cmd.Archive = &TapeArchive{fpath} + + moref, err := cmd.Import(fpath) + if err != nil { + return err + } + + vm := object.NewVirtualMachine(cmd.Client, *moref) + return cmd.Deploy(vm) +} + +func (cmd *ova) Import(fpath string) (*types.ManagedObjectReference, error) { + ovf := "*.ovf" + return cmd.ovfx.Import(ovf) +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/ovf.go b/vendor/github.com/vmware/govmomi/govc/importx/ovf.go new file mode 100644 index 000000000..9f1aafcda --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/ovf.go @@ -0,0 +1,415 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "errors" + "flag" + "fmt" + "path" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/ovf" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ovfx struct { + *flags.DatastoreFlag + *flags.HostSystemFlag + *flags.OutputFlag + *flags.ResourcePoolFlag + + *ArchiveFlag + *OptionsFlag + *FolderFlag + + Name string + + Client *vim25.Client + Datacenter *object.Datacenter + Datastore *object.Datastore + ResourcePool *object.ResourcePool +} + +func init() { + cli.Register("import.ovf", &ovfx{}) +} + +func (cmd *ovfx) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + cmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx) + cmd.ResourcePoolFlag.Register(ctx, f) + + cmd.ArchiveFlag, ctx = newArchiveFlag(ctx) + cmd.ArchiveFlag.Register(ctx, f) + cmd.OptionsFlag, ctx = newOptionsFlag(ctx) + cmd.OptionsFlag.Register(ctx, f) + cmd.FolderFlag, ctx = newFolderFlag(ctx) + cmd.FolderFlag.Register(ctx, f) + + f.StringVar(&cmd.Name, "name", "", "Name to use for new entity") +} + +func (cmd *ovfx) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.ResourcePoolFlag.Process(ctx); err != nil { + return err + } + if err := cmd.ArchiveFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OptionsFlag.Process(ctx); err != nil { + return err + } + if err := cmd.FolderFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ovfx) Usage() string { + return "PATH_TO_OVF" +} + +func (cmd *ovfx) Run(ctx context.Context, f *flag.FlagSet) error { + fpath, err := cmd.Prepare(f) + if err != nil { + return err + } + + cmd.Archive = &FileArchive{fpath} + + moref, err := cmd.Import(fpath) + if err != nil { + return err + } + + vm := object.NewVirtualMachine(cmd.Client, *moref) + return cmd.Deploy(vm) +} + +func (cmd *ovfx) Prepare(f *flag.FlagSet) (string, error) { + var err error + + args := f.Args() + if len(args) != 1 { + return "", errors.New("no file specified") + } + + cmd.Client, err = cmd.DatastoreFlag.Client() + if err != nil { + return "", err + } + + cmd.Datacenter, err = cmd.DatastoreFlag.Datacenter() + if err != nil { + return "", err + } + + cmd.Datastore, err = cmd.DatastoreFlag.Datastore() + if err != nil { + return "", err + } + + cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool() + if err != nil { + return "", err + } + + return f.Arg(0), nil +} + +func (cmd *ovfx) Deploy(vm *object.VirtualMachine) error { + if err := cmd.PowerOn(vm); err != nil { + return err + } + + if err := cmd.InjectOvfEnv(vm); err != nil { + return err + } + + if err := cmd.WaitForIP(vm); err != nil { + return err + } + + return nil +} + +func (cmd *ovfx) Map(op []Property) (p []types.KeyValue) { + for _, v := range op { + p = append(p, v.KeyValue) + } + + return +} + +func (cmd *ovfx) Import(fpath string) (*types.ManagedObjectReference, error) { + o, err := cmd.ReadOvf(fpath) + if err != nil { + return nil, err + } + + e, err := cmd.ReadEnvelope(fpath) + if err != nil { + return nil, fmt.Errorf("failed to parse ovf: %s", err.Error()) + } + + name := "Govc Virtual Appliance" + if e.VirtualSystem != nil { + name = e.VirtualSystem.ID + if e.VirtualSystem.Name != nil { + name = *e.VirtualSystem.Name + } + } + + // Override name from options if specified + if cmd.Options.Name != nil { + name = *cmd.Options.Name + } + + // Override name from arguments if specified + if cmd.Name != "" { + name = cmd.Name + } + + cisp := types.OvfCreateImportSpecParams{ + DiskProvisioning: cmd.Options.DiskProvisioning, + EntityName: name, + IpAllocationPolicy: cmd.Options.IPAllocationPolicy, + IpProtocol: cmd.Options.IPProtocol, + OvfManagerCommonParams: types.OvfManagerCommonParams{ + DeploymentOption: cmd.Options.Deployment, + Locale: "US"}, + PropertyMapping: cmd.Map(cmd.Options.PropertyMapping), + } + + m := object.NewOvfManager(cmd.Client) + spec, err := m.CreateImportSpec(context.TODO(), string(o), cmd.ResourcePool, cmd.Datastore, cisp) + if err != nil { + return nil, err + } + if spec.Error != nil { + return nil, errors.New(spec.Error[0].LocalizedMessage) + } + if spec.Warning != nil { + for _, w := range spec.Warning { + _, _ = cmd.Log(fmt.Sprintf("Warning: %s\n", w.LocalizedMessage)) + } + } + + // TODO: ImportSpec may have unitNumber==0, but this field is optional in the wsdl + // and hence omitempty in the struct tag; but unitNumber is required for certain devices. + s := &spec.ImportSpec.(*types.VirtualMachineImportSpec).ConfigSpec + for _, d := range s.DeviceChange { + n := &d.GetVirtualDeviceConfigSpec().Device.GetVirtualDevice().UnitNumber + if *n == 0 { + *n = -1 + } + } + + var host *object.HostSystem + if cmd.SearchFlag.IsSet() { + if host, err = cmd.HostSystem(); err != nil { + return nil, err + } + } + + folder, err := cmd.Folder() + if err != nil { + return nil, err + } + + lease, err := cmd.ResourcePool.ImportVApp(context.TODO(), spec.ImportSpec, folder, host) + if err != nil { + return nil, err + } + + info, err := lease.Wait(context.TODO()) + if err != nil { + return nil, err + } + + // Build slice of items and URLs first, so that the lease updater can know + // about every item that needs to be uploaded, and thereby infer progress. + var items []ovfFileItem + + for _, device := range info.DeviceUrl { + for _, item := range spec.FileItem { + if device.ImportKey != item.DeviceId { + continue + } + + u, err := cmd.Client.ParseURL(device.Url) + if err != nil { + return nil, err + } + + i := ovfFileItem{ + url: u, + item: item, + ch: make(chan progress.Report), + } + + items = append(items, i) + } + } + + u := newLeaseUpdater(cmd.Client, lease, items) + defer u.Done() + + for _, i := range items { + err = cmd.Upload(lease, i) + if err != nil { + return nil, err + } + } + + return &info.Entity, lease.HttpNfcLeaseComplete(context.TODO()) +} + +func (cmd *ovfx) Upload(lease *object.HttpNfcLease, ofi ovfFileItem) error { + item := ofi.item + file := item.Path + + f, size, err := cmd.Open(file) + if err != nil { + return err + } + defer f.Close() + + logger := cmd.ProgressLogger(fmt.Sprintf("Uploading %s... ", path.Base(file))) + defer logger.Wait() + + opts := soap.Upload{ + ContentLength: size, + Progress: progress.Tee(ofi, logger), + } + + // Non-disk files (such as .iso) use the PUT method. + // Overwrite: t header is also required in this case (ovftool does the same) + if item.Create { + opts.Method = "PUT" + opts.Headers = map[string]string{ + "Overwrite": "t", + } + } else { + opts.Method = "POST" + opts.Type = "application/x-vnd.vmware-streamVmdk" + } + + return cmd.Client.Client.Upload(f, ofi.url, &opts) +} + +func (cmd *ovfx) PowerOn(vm *object.VirtualMachine) error { + if !cmd.Options.PowerOn { + return nil + } + + cmd.Log("Powering on VM...\n") + + task, err := vm.PowerOn(context.TODO()) + if err != nil { + return err + } + + if _, err = task.WaitForResult(context.TODO(), nil); err != nil { + return err + } + + return nil +} + +func (cmd *ovfx) InjectOvfEnv(vm *object.VirtualMachine) error { + if !cmd.Options.PowerOn || !cmd.Options.InjectOvfEnv { + return nil + } + + a := cmd.Client.ServiceContent.About + if strings.EqualFold(a.ProductLineId, "esx") || strings.EqualFold(a.ProductLineId, "embeddedEsx") { + cmd.Log("Injecting OVF environment...\n") + + // build up Environment in order to marshal to xml + var epa []ovf.EnvProperty + for _, p := range cmd.Options.PropertyMapping { + epa = append(epa, ovf.EnvProperty{ + Key: p.Key, + Value: p.Value}) + } + env := ovf.Env{ + EsxID: vm.Reference().Value, + Platform: &ovf.PlatformSection{ + Kind: a.Name, + Version: a.Version, + Vendor: a.Vendor, + Locale: "US", + }, + Property: &ovf.PropertySection{ + Properties: epa}, + } + + xenv := env.MarshalManual() + vmConfigSpec := types.VirtualMachineConfigSpec{ + ExtraConfig: []types.BaseOptionValue{&types.OptionValue{ + Key: "guestinfo.ovfEnv", + Value: xenv}}} + + task, err := vm.Reconfigure(context.TODO(), vmConfigSpec) + if err != nil { + return err + } + if err := task.Wait(context.TODO()); err != nil { + return err + } + } + + return nil +} + +func (cmd *ovfx) WaitForIP(vm *object.VirtualMachine) error { + if !cmd.Options.PowerOn || !cmd.Options.WaitForIP { + return nil + } + + cmd.Log("Waiting for IP address...\n") + ip, err := vm.WaitForIP(context.TODO()) + if err != nil { + return err + } + + cmd.Log(fmt.Sprintf("Received IP address: %s\n", ip)) + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/spec.go b/vendor/github.com/vmware/govmomi/govc/importx/spec.go new file mode 100644 index 000000000..cc7776442 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/spec.go @@ -0,0 +1,172 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "encoding/json" + "flag" + "fmt" + "path" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/ovf" + "github.com/vmware/govmomi/vim25/types" +) + +var ( + // all possible ovf property values + // the first element being the default value + allDeploymentOptions = []string{"small", "medium", "large"} + allDiskProvisioningOptions = []string{"thin", "monolithicSparse", "monolithicFlat", "twoGbMaxExtentSparse", "twoGbMaxExtentFlat", "seSparse", "eagerZeroedThick", "thick", "sparse", "flat"} + allIPAllocationPolicyOptions = []string{"dhcpPolicy", "transientPolicy", "fixedPolicy", "fixedAllocatedPolicy"} + allIPProtocolOptions = []string{"IPv4", "IPv6"} +) + +type spec struct { + *ArchiveFlag + + verbose bool +} + +func init() { + cli.Register("import.spec", &spec{}) +} + +func (cmd *spec) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ArchiveFlag, ctx = newArchiveFlag(ctx) + cmd.ArchiveFlag.Register(ctx, f) + + f.BoolVar(&cmd.verbose, "verbose", false, "Verbose spec output") +} + +func (cmd *spec) Process(ctx context.Context) error { + if err := cmd.ArchiveFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *spec) Usage() string { + return "PATH_TO_OVF_OR_OVA" +} + +func (cmd *spec) Run(ctx context.Context, f *flag.FlagSet) error { + fpath := "" + args := f.Args() + if len(args) == 1 { + fpath = f.Arg(0) + } + + if len(fpath) > 0 { + switch path.Ext(fpath) { + case ".ovf": + cmd.Archive = &FileArchive{fpath} + case "", ".ova": + cmd.Archive = &TapeArchive{fpath} + fpath = "*.ovf" + default: + return fmt.Errorf("invalid file extension %s", path.Ext(fpath)) + } + } + + return cmd.Spec(fpath) +} + +func (cmd *spec) Map(e *ovf.Envelope) (res []Property) { + if e == nil { + return nil + } + + for _, p := range e.VirtualSystem.Product { + for i, v := range p.Property { + d := "" + if v.Default != nil { + d = *v.Default + } + + // From OVF spec, section 9.5.1: + // key-value-env = [class-value "."] key-value-prod ["." instance-value] + k := v.Key + if p.Class != nil { + k = fmt.Sprintf("%s.%s", *p.Class, k) + } + if p.Instance != nil { + k = fmt.Sprintf("%s.%s", k, *p.Instance) + } + + np := Property{KeyValue: types.KeyValue{Key: k, Value: d}} + if cmd.verbose { + np.Spec = &p.Property[i] + } + + res = append(res, np) + } + } + + return +} + +func (cmd *spec) Spec(fpath string) error { + e, err := cmd.ReadEnvelope(fpath) + if err != nil { + return err + } + + var deploymentOptions = allDeploymentOptions + if e != nil && e.DeploymentOption != nil && e.DeploymentOption.Configuration != nil { + deploymentOptions = nil + + // add default first + for _, c := range e.DeploymentOption.Configuration { + if c.Default != nil && *c.Default { + deploymentOptions = append(deploymentOptions, c.ID) + } + } + + for _, c := range e.DeploymentOption.Configuration { + if c.Default == nil || !*c.Default { + deploymentOptions = append(deploymentOptions, c.ID) + } + } + } + + o := Options{ + Deployment: deploymentOptions[0], + DiskProvisioning: allDiskProvisioningOptions[0], + IPAllocationPolicy: allIPAllocationPolicyOptions[0], + IPProtocol: allIPProtocolOptions[0], + PowerOn: false, + WaitForIP: false, + InjectOvfEnv: false, + PropertyMapping: cmd.Map(e)} + if cmd.verbose { + o.AllDeploymentOptions = deploymentOptions + o.AllDiskProvisioningOptions = allDiskProvisioningOptions + o.AllIPAllocationPolicyOptions = allIPAllocationPolicyOptions + o.AllIPProtocolOptions = allIPProtocolOptions + } + + j, err := json.Marshal(&o) + if err != nil { + return err + } + + fmt.Println(string(j)) + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/vmdk.go b/vendor/github.com/vmware/govmomi/govc/importx/vmdk.go new file mode 100644 index 000000000..6952532cc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/importx/vmdk.go @@ -0,0 +1,518 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importx + +import ( + "errors" + "flag" + "fmt" + "path" + "reflect" + "regexp" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type vmdk struct { + *flags.DatastoreFlag + *flags.ResourcePoolFlag + *flags.OutputFlag + + upload bool + force bool + keep bool + + Client *vim25.Client + Datacenter *object.Datacenter + Datastore *object.Datastore + ResourcePool *object.ResourcePool +} + +func init() { + cli.Register("import.vmdk", &vmdk{}) + cli.Alias("import.vmdk", "datastore.import") +} + +func (cmd *vmdk) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + cmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx) + cmd.ResourcePoolFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + f.BoolVar(&cmd.upload, "upload", true, "Upload specified disk") + f.BoolVar(&cmd.force, "force", false, "Overwrite existing disk") + f.BoolVar(&cmd.keep, "keep", false, "Keep uploaded disk after import") +} + +func (cmd *vmdk) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.ResourcePoolFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *vmdk) Usage() string { + return "PATH_TO_VMDK [REMOTE_DIRECTORY]" +} + +func (cmd *vmdk) Run(ctx context.Context, f *flag.FlagSet) error { + var err error + + args := f.Args() + if len(args) < 1 { + return errors.New("no file to import") + } + + file := importable{ + localPath: f.Arg(0), + } + + // Include remote path if specified + if len(args) >= 2 { + file.remotePath = f.Arg(1) + } + + cmd.Client, err = cmd.DatastoreFlag.Client() + if err != nil { + return err + } + + cmd.Datacenter, err = cmd.DatastoreFlag.Datacenter() + if err != nil { + return err + } + + cmd.Datastore, err = cmd.DatastoreFlag.Datastore() + if err != nil { + return err + } + + cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool() + if err != nil { + return err + } + + err = cmd.PrepareDestination(file) + if err != nil { + return err + } + + if cmd.upload { + err = cmd.Upload(file) + if err != nil { + return err + } + } + + return cmd.Import(file) +} + +// PrepareDestination makes sure that the destination VMDK does not yet exist. +// If the force flag is passed, it removes the existing VMDK. This functions +// exists to give a meaningful error if the remote VMDK already exists. +// +// CopyVirtualDisk can return a " file does not exist" error while in fact +// the source file *does* exist and the *destination* file also exist. +// +func (cmd *vmdk) PrepareDestination(i importable) error { + vmdkPath := i.RemoteDstVMDK() + res, err := cmd.Datastore.Stat(context.TODO(), vmdkPath) + if err != nil { + switch err.(type) { + case object.DatastoreNoSuchDirectoryError: + // The base path doesn't exist. Create it. + dsPath := cmd.Datastore.Path(path.Dir(vmdkPath)) + m := object.NewFileManager(cmd.Client) + return m.MakeDirectory(context.TODO(), dsPath, cmd.Datacenter, true) + case object.DatastoreNoSuchFileError: + // Destination path doesn't exist; all good to continue with import. + return nil + } + + return err + } + + // Check that the returned entry has the right type. + switch res.(type) { + case *types.VmDiskFileInfo: + default: + expected := "VmDiskFileInfo" + actual := reflect.TypeOf(res) + panic(fmt.Sprintf("Expected: %s, actual: %s", expected, actual)) + } + + if !cmd.force { + dsPath := cmd.Datastore.Path(vmdkPath) + err = fmt.Errorf("File %s already exists", dsPath) + return err + } + + // Delete existing disk. + err = cmd.DeleteDisk(vmdkPath) + if err != nil { + return err + } + + return nil +} + +func (cmd *vmdk) Upload(i importable) error { + p := soap.DefaultUpload + if cmd.OutputFlag.TTY { + logger := cmd.ProgressLogger("Uploading... ") + p.Progress = logger + defer logger.Wait() + } + + return cmd.Datastore.UploadFile(context.TODO(), i.localPath, i.RemoteSrcVMDK(), &p) +} + +func (cmd *vmdk) Import(i importable) error { + err := cmd.Copy(i) + if err != nil { + return err + } + + if !cmd.keep { + err = cmd.DeleteDisk(i.RemoteSrcVMDK()) + if err != nil { + return err + } + } + + return nil +} + +func (cmd *vmdk) Copy(i importable) error { + var err error + + logger := cmd.ProgressLogger("Importing... ") + defer logger.Wait() + + agg := progress.NewAggregator(logger) + defer agg.Done() + + switch p := cmd.Client.ServiceContent.About.ApiType; p { + case "HostAgent": + err = cmd.CopyHostAgent(i, agg) + case "VirtualCenter": + err = cmd.CopyVirtualCenter(i, agg) + default: + return fmt.Errorf("unsupported ApiType: %s", p) + } + + return err +} + +func (cmd *vmdk) CopyHostAgent(i importable, s progress.Sinker) error { + spec := &types.VirtualDiskSpec{ + AdapterType: "lsiLogic", + DiskType: "thin", + } + + dc := cmd.Datacenter + src := cmd.Datastore.Path(i.RemoteSrcVMDK()) + dst := cmd.Datastore.Path(i.RemoteDstVMDK()) + vdm := object.NewVirtualDiskManager(cmd.Client) + task, err := vdm.CopyVirtualDisk(context.TODO(), src, dc, dst, dc, spec, false) + if err != nil { + return err + } + + ps := progress.Prefix(s, "copying disk") + _, err = task.WaitForResult(context.TODO(), ps) + if err != nil { + return err + } + + return nil +} + +func (cmd *vmdk) CopyVirtualCenter(i importable, s progress.Sinker) error { + var err error + + srcName := i.BaseClean() + "-srcvm" + dstName := i.BaseClean() + "-dstvm" + + spec := &configSpec{ + Name: srcName, + GuestId: "otherGuest", + Files: &types.VirtualMachineFileInfo{ + VmPathName: fmt.Sprintf("[%s]", cmd.Datastore.Name()), + }, + } + + spec.AddDisk(cmd.Datastore, i.RemoteSrcVMDK()) + + src, err := cmd.CreateVM(spec) + if err != nil { + return err + } + + dst, err := cmd.CloneVM(src, dstName) + if err != nil { + return err + } + + err = cmd.DestroyVM(src) + if err != nil { + return err + } + + vmdk, err := cmd.DetachDisk(dst) + if err != nil { + return err + } + + err = cmd.MoveDisk(vmdk, i.RemoteDstVMDK()) + if err != nil { + return err + } + + err = cmd.DestroyVM(dst) + if err != nil { + return err + } + + return nil +} + +func (cmd *vmdk) MoveDisk(src, dst string) error { + dsSrc := cmd.Datastore.Path(src) + dsDst := cmd.Datastore.Path(dst) + vdm := object.NewVirtualDiskManager(cmd.Client) + task, err := vdm.MoveVirtualDisk(context.TODO(), dsSrc, cmd.Datacenter, dsDst, cmd.Datacenter, true) + if err != nil { + return err + } + + return task.Wait(context.TODO()) +} + +func (cmd *vmdk) DeleteDisk(path string) error { + vdm := object.NewVirtualDiskManager(cmd.Client) + task, err := vdm.DeleteVirtualDisk(context.TODO(), cmd.Datastore.Path(path), cmd.Datacenter) + if err != nil { + return err + } + + return task.Wait(context.TODO()) +} + +func (cmd *vmdk) DetachDisk(vm *object.VirtualMachine) (string, error) { + var mvm mo.VirtualMachine + + pc := property.DefaultCollector(cmd.Client) + err := pc.RetrieveOne(context.TODO(), vm.Reference(), []string{"config.hardware"}, &mvm) + if err != nil { + return "", err + } + + spec := new(configSpec) + dsFile := spec.RemoveDisk(&mvm) + + task, err := vm.Reconfigure(context.TODO(), spec.ToSpec()) + if err != nil { + return "", err + } + + err = task.Wait(context.TODO()) + if err != nil { + return "", err + } + + return dsFile, nil +} + +func (cmd *vmdk) CreateVM(spec *configSpec) (*object.VirtualMachine, error) { + folders, err := cmd.Datacenter.Folders(context.TODO()) + if err != nil { + return nil, err + } + + task, err := folders.VmFolder.CreateVM(context.TODO(), spec.ToSpec(), cmd.ResourcePool, nil) + if err != nil { + return nil, err + } + + info, err := task.WaitForResult(context.TODO(), nil) + if err != nil { + return nil, err + } + + return object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)), nil +} + +func (cmd *vmdk) CloneVM(vm *object.VirtualMachine, name string) (*object.VirtualMachine, error) { + folders, err := cmd.Datacenter.Folders(context.TODO()) + if err != nil { + return nil, err + } + + spec := types.VirtualMachineCloneSpec{ + Config: &types.VirtualMachineConfigSpec{}, + Location: types.VirtualMachineRelocateSpec{}, + } + + task, err := vm.Clone(context.TODO(), folders.VmFolder, name, spec) + if err != nil { + return nil, err + } + + info, err := task.WaitForResult(context.TODO(), nil) + if err != nil { + return nil, err + } + + return object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)), nil +} + +func (cmd *vmdk) DestroyVM(vm *object.VirtualMachine) error { + _, err := cmd.DetachDisk(vm) + if err != nil { + return err + } + + task, err := vm.Destroy(context.TODO()) + if err != nil { + return err + } + + err = task.Wait(context.TODO()) + if err != nil { + return err + } + + return nil +} + +type configSpec types.VirtualMachineConfigSpec + +func (c *configSpec) ToSpec() types.VirtualMachineConfigSpec { + return types.VirtualMachineConfigSpec(*c) +} + +func (c *configSpec) AddChange(d types.BaseVirtualDeviceConfigSpec) { + c.DeviceChange = append(c.DeviceChange, d) +} + +func (c *configSpec) AddDisk(ds *object.Datastore, path string) { + controller := &types.VirtualLsiLogicController{ + VirtualSCSIController: types.VirtualSCSIController{ + SharedBus: types.VirtualSCSISharingNoSharing, + VirtualController: types.VirtualController{ + BusNumber: 0, + VirtualDevice: types.VirtualDevice{ + Key: -1, + }, + }, + }, + } + + controllerSpec := &types.VirtualDeviceConfigSpec{ + Device: controller, + Operation: types.VirtualDeviceConfigSpecOperationAdd, + } + + c.AddChange(controllerSpec) + + disk := &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + Key: -1, + ControllerKey: -1, + UnitNumber: -1, + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: ds.Path(path), + }, + DiskMode: string(types.VirtualDiskModePersistent), + ThinProvisioned: types.NewBool(true), + }, + }, + } + + diskSpec := &types.VirtualDeviceConfigSpec{ + Device: disk, + Operation: types.VirtualDeviceConfigSpecOperationAdd, + } + + c.AddChange(diskSpec) +} + +var dsPathRegexp = regexp.MustCompile(`^\[.*\] (.*)$`) + +func (c *configSpec) RemoveDisk(vm *mo.VirtualMachine) string { + var file string + + for _, d := range vm.Config.Hardware.Device { + switch device := d.(type) { + case *types.VirtualDisk: + if file != "" { + panic("expected VM to have only one disk") + } + + switch backing := device.Backing.(type) { + case *types.VirtualDiskFlatVer1BackingInfo: + file = backing.FileName + case *types.VirtualDiskFlatVer2BackingInfo: + file = backing.FileName + case *types.VirtualDiskSeSparseBackingInfo: + file = backing.FileName + case *types.VirtualDiskSparseVer1BackingInfo: + file = backing.FileName + case *types.VirtualDiskSparseVer2BackingInfo: + file = backing.FileName + default: + name := reflect.TypeOf(device.Backing).String() + panic(fmt.Sprintf("unexpected backing type: %s", name)) + } + + // Remove [datastore] prefix + m := dsPathRegexp.FindStringSubmatch(file) + if len(m) != 2 { + panic(fmt.Sprintf("expected regexp match for %#v", file)) + } + file = m[1] + + removeOp := &types.VirtualDeviceConfigSpec{ + Operation: types.VirtualDeviceConfigSpecOperationRemove, + Device: device, + } + + c.AddChange(removeOp) + } + } + + return file +} diff --git a/vendor/github.com/vmware/govmomi/govc/license/add.go b/vendor/github.com/vmware/govmomi/govc/license/add.go new file mode 100644 index 000000000..a161c3d87 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/license/add.go @@ -0,0 +1,96 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/license" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type add struct { + *flags.ClientFlag + *flags.OutputFlag +} + +func init() { + cli.Register("license.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Usage() string { + return "KEY..." +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + client, err := cmd.Client() + if err != nil { + return err + } + + m := license.NewManager(client) + + // From the vSphere 5.5 documentation: + // + // To specify the edition type and any optional functions, use + // updateLicense for ESX Server and addLicense follow by + // LicenseAssingmentManager.updateAssignedLicense for VirtualCenter. + // + var addFunc func(ctx context.Context, key string, labels map[string]string) (types.LicenseManagerLicenseInfo, error) + switch t := client.ServiceContent.About.ApiType; t { + case "HostAgent": + addFunc = m.Update + case "VirtualCenter": + addFunc = m.Add + default: + return fmt.Errorf("unsupported ApiType: %s", t) + } + + result := make(licenseOutput, 0) + for _, v := range f.Args() { + license, err := addFunc(context.TODO(), v, nil) + if err != nil { + return err + } + + result = append(result, license) + } + + return cmd.WriteResult(licenseOutput(result)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/license/assign.go b/vendor/github.com/vmware/govmomi/govc/license/assign.go new file mode 100644 index 000000000..aedf239bc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/license/assign.go @@ -0,0 +1,114 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/license" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type assign struct { + *flags.ClientFlag + *flags.OutputFlag + *flags.HostSystemFlag + + name string + remove bool +} + +func init() { + cli.Register("license.assign", &assign{}) +} + +func (cmd *assign) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.StringVar(&cmd.name, "name", "", "Display name") + f.BoolVar(&cmd.remove, "remove", false, "Remove assignment") +} + +func (cmd *assign) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *assign) Usage() string { + return "KEY" +} + +func (cmd *assign) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() != 1 { + return flag.ErrHelp + } + + key := f.Arg(0) + + client, err := cmd.Client() + if err != nil { + return err + } + + m, err := license.NewManager(client).AssignmentManager(ctx) + if err != nil { + return err + } + + host, err := cmd.HostSystemIfSpecified() + if err != nil { + return err + } + + var id string + + if host == nil { + // Default to vCenter UUID + id = client.ServiceContent.About.InstanceUuid + } else { + id = host.Reference().Value + } + + if cmd.remove { + return m.Remove(ctx, id) + } + + info, err := m.Update(ctx, id, key, cmd.name) + if err != nil { + return err + } + + return cmd.WriteResult(licenseOutput([]types.LicenseManagerLicenseInfo{*info})) +} diff --git a/vendor/github.com/vmware/govmomi/govc/license/assigned.go b/vendor/github.com/vmware/govmomi/govc/license/assigned.go new file mode 100644 index 000000000..682e831a7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/license/assigned.go @@ -0,0 +1,96 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "flag" + "fmt" + "io" + "os" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/license" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type assigned struct { + *flags.ClientFlag + *flags.OutputFlag + + id string +} + +func init() { + cli.Register("license.assigned.list", &assigned{}) +} + +func (cmd *assigned) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + f.StringVar(&cmd.id, "id", "", "Entity ID") +} + +func (cmd *assigned) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *assigned) Run(ctx context.Context, f *flag.FlagSet) error { + client, err := cmd.Client() + if err != nil { + return err + } + + m, err := license.NewManager(client).AssignmentManager(context.TODO()) + if err != nil { + return err + } + + assigned, err := m.QueryAssigned(context.TODO(), cmd.id) + if err != nil { + return err + } + + return cmd.WriteResult(assignedOutput(assigned)) +} + +type assignedOutput []types.LicenseAssignmentManagerLicenseAssignment + +func (res assignedOutput) Write(w io.Writer) error { + tw := tabwriter.NewWriter(os.Stdout, 4, 0, 2, ' ', 0) + fmt.Fprintf(tw, "Id:\tScope:\tName:\tLicense:\n") + for _, v := range res { + fmt.Fprintf(tw, "%s\t", v.EntityId) + fmt.Fprintf(tw, "%s\t", v.Scope) + fmt.Fprintf(tw, "%s\t", v.EntityDisplayName) + fmt.Fprintf(tw, "%s\t", v.AssignedLicense.LicenseKey) + fmt.Fprintf(tw, "\n") + } + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/license/decode.go b/vendor/github.com/vmware/govmomi/govc/license/decode.go new file mode 100644 index 000000000..aadbed319 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/license/decode.go @@ -0,0 +1,85 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/license" + "golang.org/x/net/context" +) + +type decode struct { + *flags.ClientFlag + *flags.OutputFlag + + feature string +} + +func init() { + cli.Register("license.decode", &decode{}) +} + +func (cmd *decode) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + f.StringVar(&cmd.feature, "feature", "", featureUsage) +} + +func (cmd *decode) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *decode) Usage() string { + return "KEY..." +} + +func (cmd *decode) Run(ctx context.Context, f *flag.FlagSet) error { + client, err := cmd.Client() + if err != nil { + return err + } + + var result license.InfoList + m := license.NewManager(client) + for _, v := range f.Args() { + license, err := m.Decode(context.TODO(), v) + if err != nil { + return err + } + + result = append(result, license) + } + + if cmd.feature != "" { + result = result.WithFeature(cmd.feature) + } + + return cmd.WriteResult(licenseOutput(result)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/license/list.go b/vendor/github.com/vmware/govmomi/govc/license/list.go new file mode 100644 index 000000000..382d75441 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/license/list.go @@ -0,0 +1,78 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/license" + "golang.org/x/net/context" +) + +var featureUsage = "List licenses with given feature" + +type list struct { + *flags.ClientFlag + *flags.OutputFlag + + feature string +} + +func init() { + cli.Register("license.list", &list{}) +} + +func (cmd *list) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + f.StringVar(&cmd.feature, "feature", "", featureUsage) +} + +func (cmd *list) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *list) Run(ctx context.Context, f *flag.FlagSet) error { + client, err := cmd.Client() + if err != nil { + return err + } + + m := license.NewManager(client) + result, err := m.List(context.TODO()) + if err != nil { + return err + } + + if cmd.feature != "" { + result = result.WithFeature(cmd.feature) + } + + return cmd.WriteResult(licenseOutput(result)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/license/output.go b/vendor/github.com/vmware/govmomi/govc/license/output.go new file mode 100644 index 000000000..e4cfe42a5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/license/output.go @@ -0,0 +1,41 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "fmt" + "io" + "os" + "text/tabwriter" + + "github.com/vmware/govmomi/vim25/types" +) + +type licenseOutput []types.LicenseManagerLicenseInfo + +func (res licenseOutput) Write(w io.Writer) error { + tw := tabwriter.NewWriter(os.Stdout, 4, 0, 2, ' ', 0) + fmt.Fprintf(tw, "Key:\tEdition:\tUsed:\tTotal:\n") + for _, v := range res { + fmt.Fprintf(tw, "%s\t", v.LicenseKey) + fmt.Fprintf(tw, "%s\t", v.EditionKey) + fmt.Fprintf(tw, "%d\t", v.Used) + fmt.Fprintf(tw, "%d\t", v.Total) + fmt.Fprintf(tw, "\n") + } + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/license/remove.go b/vendor/github.com/vmware/govmomi/govc/license/remove.go new file mode 100644 index 000000000..886a2bbb9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/license/remove.go @@ -0,0 +1,74 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/license" + "golang.org/x/net/context" +) + +type remove struct { + *flags.ClientFlag + *flags.OutputFlag +} + +func init() { + cli.Register("license.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "KEY..." +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + client, err := cmd.Client() + if err != nil { + return err + } + + m := license.NewManager(client) + for _, v := range f.Args() { + err = m.Remove(context.TODO(), v) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/logs/command.go b/vendor/github.com/vmware/govmomi/govc/logs/command.go new file mode 100644 index 000000000..df01c9d4a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/logs/command.go @@ -0,0 +1,110 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "flag" + "fmt" + "math" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" +) + +type logs struct { + *flags.HostSystemFlag + + Max int + Key string +} + +func init() { + cli.Register("logs", &logs{}) +} + +func (cmd *logs) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + f.IntVar(&cmd.Max, "n", 25, "Output the last N logs") + f.StringVar(&cmd.Key, "log", "", "Log file key") +} + +func (cmd *logs) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *logs) Description() string { + return ` +The '-log' option defaults to "hostd" when connected directly to a host or +when connected to VirtualCenter and a '-host' option is given. Otherwise, +the '-log' option defaults to "vpxd:vpxd.log". The '-host' option is ignored +when connected directly to host. +See 'govc logs.ls' for other '-log' options.` +} + +func (cmd *logs) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + defaultKey := "hostd" + var host *object.HostSystem + + if c.IsVC() { + host, err = cmd.HostSystemIfSpecified() + if err != nil { + return err + } + + if host == nil { + defaultKey = "vpxd:vpxd.log" + } + } + + m := object.NewDiagnosticManager(c) + + key := cmd.Key + if key == "" { + key = defaultKey + } + + // get LineEnd without any LineText + h, err := m.BrowseLog(ctx, host, key, math.MaxInt32, 0) + if err != nil { + return err + } + + start := h.LineEnd - cmd.Max + h, err = m.BrowseLog(ctx, host, key, start, 0) + if err != nil { + return err + } + + for _, line := range h.LineText { + fmt.Println(line) + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/logs/download.go b/vendor/github.com/vmware/govmomi/govc/logs/download.go new file mode 100644 index 000000000..5fa709c0c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/logs/download.go @@ -0,0 +1,133 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "flag" + "fmt" + "path" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type download struct { + *flags.DatacenterFlag + + IncludeDefault bool +} + +func init() { + cli.Register("logs.download", &download{}) +} + +func (cmd *download) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.BoolVar(&cmd.IncludeDefault, "default", true, "Specifies if the bundle should include the default server") +} + +func (cmd *download) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *download) Usage() string { + return "[PATH]..." +} + +func (cmd *download) DownloadFile(c *vim25.Client, b string) error { + u, err := c.Client.ParseURL(b) + if err != nil { + return err + } + + dst := path.Base(u.Path) + p := soap.DefaultDownload + if cmd.OutputFlag.TTY { + logger := cmd.ProgressLogger(fmt.Sprintf("Downloading %s... ", dst)) + defer logger.Wait() + p.Progress = logger + } + + return c.Client.DownloadFile(dst, u, &p) +} + +func (cmd *download) GenerateLogBundles(m *object.DiagnosticManager, host []*object.HostSystem) ([]types.DiagnosticManagerBundleInfo, error) { + logger := cmd.ProgressLogger("Generating log bundles... ") + defer logger.Wait() + + task, err := m.GenerateLogBundles(context.TODO(), cmd.IncludeDefault, host) + if err != nil { + return nil, err + } + + r, err := task.WaitForResult(context.TODO(), logger) + if err != nil { + return nil, err + } + + return r.Result.(types.ArrayOfDiagnosticManagerBundleInfo).DiagnosticManagerBundleInfo, nil +} + +func (cmd *download) Run(ctx context.Context, f *flag.FlagSet) error { + finder, err := cmd.Finder() + if err != nil { + return err + } + + var host []*object.HostSystem + + for _, arg := range f.Args() { + hs, err := finder.HostSystemList(context.TODO(), arg) + if err != nil { + return err + } + + host = append(host, hs...) + } + + c, err := cmd.Client() + if err != nil { + return err + } + + m := object.NewDiagnosticManager(c) + + bundles, err := cmd.GenerateLogBundles(m, host) + if err != nil { + return err + } + + for _, bundle := range bundles { + err := cmd.DownloadFile(c, bundle.Url) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/logs/ls.go b/vendor/github.com/vmware/govmomi/govc/logs/ls.go new file mode 100644 index 000000000..0437bc9c1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/logs/ls.go @@ -0,0 +1,81 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "flag" + "fmt" + "os" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" +) + +type ls struct { + *flags.HostSystemFlag +} + +func init() { + cli.Register("logs.ls", &ls{}) +} + +func (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) { + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) +} + +func (cmd *ls) Process(ctx context.Context) error { + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + var host *object.HostSystem + + if c.IsVC() { + host, err = cmd.HostSystemIfSpecified() + if err != nil { + return err + } + } + + m := object.NewDiagnosticManager(c) + + desc, err := m.QueryDescriptions(ctx, host) + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for _, d := range desc { + fmt.Fprintf(tw, "%s\t%s\n", d.Key, d.FileName) + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/ls/command.go b/vendor/github.com/vmware/govmomi/govc/ls/command.go new file mode 100644 index 000000000..91b95ca3e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/ls/command.go @@ -0,0 +1,131 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ls + +import ( + "flag" + "fmt" + "io" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/list" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ls struct { + *flags.DatacenterFlag + + Long bool + Type string +} + +func init() { + cli.Register("ls", &ls{}) +} + +func (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.BoolVar(&cmd.Long, "l", false, "Long listing format") + f.StringVar(&cmd.Type, "t", "", "Object type") +} + +func (cmd *ls) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ls) Usage() string { + return "[PATH]..." +} + +func (cmd *ls) typeMatch(ref types.ManagedObjectReference) bool { + if cmd.Type == "" { + return true + } + + return strings.ToLower(cmd.Type) == strings.ToLower(ref.Type) +} + +func (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error { + finder, err := cmd.Finder() + if err != nil { + return err + } + + lr := listResult{ + Elements: nil, + Long: cmd.Long, + } + + args := f.Args() + if len(args) == 0 { + args = []string{"."} + } + + for _, arg := range args { + es, err := finder.ManagedObjectListChildren(context.TODO(), arg) + if err != nil { + return err + } + + for _, e := range es { + if cmd.typeMatch(e.Object.Reference()) { + lr.Elements = append(lr.Elements, e) + } + } + } + + return cmd.WriteResult(lr) +} + +type listResult struct { + Elements []list.Element `json:"elements"` + + Long bool `json:"-"` +} + +func (l listResult) Write(w io.Writer) error { + var err error + + for _, e := range l.Elements { + if !l.Long { + fmt.Fprintf(w, "%s\n", e.Path) + continue + } + + switch e.Object.(type) { + case mo.Folder: + if _, err = fmt.Fprintf(w, "%s/\n", e.Path); err != nil { + return err + } + default: + if _, err = fmt.Fprintf(w, "%s (%s)\n", e.Path, e.Object.Reference().Type); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/main.go b/vendor/github.com/vmware/govmomi/govc/main.go new file mode 100644 index 000000000..3212d8634 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/main.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + "github.com/vmware/govmomi/govc/cli" + + _ "github.com/vmware/govmomi/govc/about" + _ "github.com/vmware/govmomi/govc/cluster" + _ "github.com/vmware/govmomi/govc/datacenter" + _ "github.com/vmware/govmomi/govc/datastore" + _ "github.com/vmware/govmomi/govc/device" + _ "github.com/vmware/govmomi/govc/device/cdrom" + _ "github.com/vmware/govmomi/govc/device/floppy" + _ "github.com/vmware/govmomi/govc/device/scsi" + _ "github.com/vmware/govmomi/govc/device/serial" + _ "github.com/vmware/govmomi/govc/dvs" + _ "github.com/vmware/govmomi/govc/dvs/portgroup" + _ "github.com/vmware/govmomi/govc/events" + _ "github.com/vmware/govmomi/govc/extension" + _ "github.com/vmware/govmomi/govc/fields" + _ "github.com/vmware/govmomi/govc/host" + _ "github.com/vmware/govmomi/govc/host/autostart" + _ "github.com/vmware/govmomi/govc/host/esxcli" + _ "github.com/vmware/govmomi/govc/host/firewall" + _ "github.com/vmware/govmomi/govc/host/maintenance" + _ "github.com/vmware/govmomi/govc/host/portgroup" + _ "github.com/vmware/govmomi/govc/host/storage" + _ "github.com/vmware/govmomi/govc/host/vnic" + _ "github.com/vmware/govmomi/govc/host/vswitch" + _ "github.com/vmware/govmomi/govc/importx" + _ "github.com/vmware/govmomi/govc/license" + _ "github.com/vmware/govmomi/govc/logs" + _ "github.com/vmware/govmomi/govc/ls" + _ "github.com/vmware/govmomi/govc/permissions" + _ "github.com/vmware/govmomi/govc/pool" + _ "github.com/vmware/govmomi/govc/vapp" + _ "github.com/vmware/govmomi/govc/version" + _ "github.com/vmware/govmomi/govc/vm" + _ "github.com/vmware/govmomi/govc/vm/disk" + _ "github.com/vmware/govmomi/govc/vm/guest" + _ "github.com/vmware/govmomi/govc/vm/network" +) + +func main() { + os.Exit(cli.Run(os.Args[1:])) +} diff --git a/vendor/github.com/vmware/govmomi/govc/main_test.go b/vendor/github.com/vmware/govmomi/govc/main_test.go new file mode 100644 index 000000000..50f99b11e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/main_test.go @@ -0,0 +1,38 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "testing" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" +) + +func TestMain(t *testing.T) { + // Execute flag registration for every command to verify there are no + // commands with flag name collisions + for _, cmd := range cli.Commands() { + fs := flag.NewFlagSet("", flag.ContinueOnError) + + // Use fresh context for every command + ctx, _ := context.WithCancel(context.Background()) + cmd.Register(ctx, fs) + } +} diff --git a/vendor/github.com/vmware/govmomi/govc/permissions/ls.go b/vendor/github.com/vmware/govmomi/govc/permissions/ls.go new file mode 100644 index 000000000..665809366 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/permissions/ls.go @@ -0,0 +1,97 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "flag" + "fmt" + "os" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type ls struct { + *flags.DatacenterFlag + *flags.OutputFlag +} + +func init() { + cli.Register("permissions.ls", &ls{}) +} + +func (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) +} + +func (cmd *ls) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ls) Usage() string { + return "[PATH]..." +} + +func (cmd *ls) Description() string { + return `List the permissions defined on or effective on managed entities.` +} + +func (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + refs, err := cmd.ManagedObjects(ctx, f.Args()) + if err != nil { + return err + } + + m := object.NewAuthorizationManager(c) + rl, err := m.RoleList(ctx) + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for _, ref := range refs { + perms, err := m.RetrieveEntityPermissions(ctx, ref, true) + if err != nil { + return err + } + + for _, perm := range perms { + fmt.Fprintf(tw, "%s\t%s\n", perm.Principal, rl.ById(perm.RoleId).Name) + } + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/permissions/remove.go b/vendor/github.com/vmware/govmomi/govc/permissions/remove.go new file mode 100644 index 000000000..becec4893 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/permissions/remove.go @@ -0,0 +1,85 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type remove struct { + *flags.DatacenterFlag + + types.Permission + + role string +} + +func init() { + cli.Register("permissions.remove", &remove{}) +} + +func (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.StringVar(&cmd.Principal, "principal", "", "User or group for which the permission is defined") + f.BoolVar(&cmd.Group, "group", false, "True, if principal refers to a group name; false, for a user name") +} + +func (cmd *remove) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *remove) Usage() string { + return "[PATH]..." +} + +func (cmd *remove) Description() string { + return `Removes a permission rule from managed entities.` +} + +func (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + refs, err := cmd.ManagedObjects(ctx, f.Args()) + if err != nil { + return err + } + + m := object.NewAuthorizationManager(c) + + for _, ref := range refs { + err = m.RemoveEntityPermission(ctx, ref, cmd.Principal, cmd.Group) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/permissions/set.go b/vendor/github.com/vmware/govmomi/govc/permissions/set.go new file mode 100644 index 000000000..6b58e7e47 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/permissions/set.go @@ -0,0 +1,103 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type set struct { + *flags.DatacenterFlag + + types.Permission + + role string +} + +func init() { + cli.Register("permissions.set", &set{}) +} + +func (cmd *set) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.StringVar(&cmd.Principal, "principal", "", "User or group for which the permission is defined") + f.BoolVar(&cmd.Group, "group", false, "True, if principal refers to a group name; false, for a user name") + f.BoolVar(&cmd.Propagate, "propagate", true, "Whether or not this permission propagates down the hierarchy to sub-entities") + f.StringVar(&cmd.role, "role", "Admin", "Permission role name") +} + +func (cmd *set) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *set) Usage() string { + return "[PATH]..." +} + +func (cmd *set) Description() string { + return `Set the permissions managed entities. +Example: +govc permissions.set -principal root -role Admin +` +} + +func (cmd *set) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + refs, err := cmd.ManagedObjects(ctx, f.Args()) + if err != nil { + return err + } + + m := object.NewAuthorizationManager(c) + rl, err := m.RoleList(ctx) + if err != nil { + return err + } + + role := rl.ByName(cmd.role) + if role == nil { + return fmt.Errorf("role '%s' not found", cmd.role) + } + cmd.Permission.RoleId = role.RoleId + + perms := []types.Permission{cmd.Permission} + + for _, ref := range refs { + err = m.SetEntityPermissions(ctx, ref, perms) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/pool/change.go b/vendor/github.com/vmware/govmomi/govc/pool/change.go new file mode 100644 index 000000000..126733aea --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/pool/change.go @@ -0,0 +1,98 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type change struct { + *flags.DatacenterFlag + *ResourceConfigSpecFlag + + name string +} + +func init() { + cli.Register("pool.change", &change{}) +} + +func (cmd *change) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + cmd.ResourceConfigSpecFlag = NewResourceConfigSpecFlag() + cmd.ResourceConfigSpecFlag.Register(ctx, f) + + f.StringVar(&cmd.name, "name", "", "Resource pool name") +} + +func (cmd *change) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := cmd.ResourceConfigSpecFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *change) Usage() string { + return "POOL..." +} + +func (cmd *change) Description() string { + return "Change the configuration of one or more resource POOLs.\n" + poolNameHelp +} + +func (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() == 0 { + return flag.ErrHelp + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + cmd.SetAllocation(func(a types.BaseResourceAllocationInfo) { + ra := a.GetResourceAllocationInfo() + if ra.Shares.Level == "" { + ra.Shares = nil + } + }) + + for _, arg := range f.Args() { + pools, err := finder.ResourcePoolList(context.TODO(), arg) + if err != nil { + return err + } + + for _, pool := range pools { + err := pool.UpdateConfig(context.TODO(), cmd.name, &cmd.ResourceConfigSpec) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/pool/create.go b/vendor/github.com/vmware/govmomi/govc/pool/create.go new file mode 100644 index 000000000..affad7420 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/pool/create.go @@ -0,0 +1,101 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "flag" + "fmt" + "path" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type create struct { + *flags.DatacenterFlag + *ResourceConfigSpecFlag +} + +func init() { + cli.Register("pool.create", &create{}) +} + +func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + cmd.ResourceConfigSpecFlag = NewResourceConfigSpecFlag() + cmd.ResourceConfigSpecFlag.SetAllocation(func(a types.BaseResourceAllocationInfo) { + ra := a.GetResourceAllocationInfo() + ra.Shares.Level = types.SharesLevelNormal + ra.ExpandableReservation = types.NewBool(true) + }) + cmd.ResourceConfigSpecFlag.Register(ctx, f) +} + +func (cmd *create) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := cmd.ResourceConfigSpecFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *create) Usage() string { + return "POOL..." +} + +func (cmd *create) Description() string { + return "Create one or more resource POOLs.\n" + poolCreateHelp +} + +func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() == 0 { + return flag.ErrHelp + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + for _, arg := range f.Args() { + dir := path.Dir(arg) + base := path.Base(arg) + parents, err := finder.ResourcePoolList(context.TODO(), dir) + if err != nil { + if _, ok := err.(*find.NotFoundError); ok { + return fmt.Errorf("cannot create resource pool '%s': parent not found", base) + } + return err + } + + for _, parent := range parents { + _, err = parent.Create(context.TODO(), base, cmd.ResourceConfigSpec) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/pool/destroy.go b/vendor/github.com/vmware/govmomi/govc/pool/destroy.go new file mode 100644 index 000000000..6e1fe2668 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/pool/destroy.go @@ -0,0 +1,101 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "flag" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type destroy struct { + *flags.DatacenterFlag + + recursive bool +} + +func init() { + cli.Register("pool.destroy", &destroy{}) +} + +func (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + f.BoolVar(&cmd.recursive, "r", false, "Remove all child resource pools recursively") +} + +func (cmd *destroy) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *destroy) Usage() string { + return "POOL..." +} + +func (cmd *destroy) Description() string { + return "Destroy one or more resource POOLs.\n" + poolNameHelp +} + +func (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() == 0 { + return flag.ErrHelp + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + for _, arg := range f.Args() { + pools, err := finder.ResourcePoolList(context.TODO(), arg) + if err != nil { + if _, ok := err.(*find.NotFoundError); ok { + // Ignore if pool cannot be found + continue + } + + return err + } + + for _, pool := range pools { + if cmd.recursive { + err = pool.DestroyChildren(context.TODO()) + if err != nil { + return err + } + } + + task, err := pool.Destroy(context.TODO()) + if err != nil { + return err + } + err = task.Wait(context.TODO()) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/pool/help.go b/vendor/github.com/vmware/govmomi/govc/pool/help.go new file mode 100644 index 000000000..dbd8ca23b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/pool/help.go @@ -0,0 +1,50 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +var poolNameHelp = ` +POOL may be an absolute or relative path to a resource pool or a (clustered) +compute host. If it resolves to a compute host, the associated root resource +pool is returned. If a relative path is specified, it is resolved with respect +to the current datacenter's "host" folder (i.e. /ha-datacenter/host). + +Paths to nested resource pools must traverse through the root resource pool of +the selected compute host, i.e. "compute-host/Resources/nested-pool". + +The same globbing rules that apply to the "ls" command apply here. For example, +POOL may be specified as "*/Resources/*" to expand to all resource pools that +are nested one level under the root resource pool, on all (clustered) compute +hosts in the current datacenter.` + +var poolCreateHelp = ` +POOL may be an absolute or relative path to a resource pool. The parent of the +specified POOL must be an existing resource pool. If a relative path is +specified, it is resolved with respect to the current datacenter's "host" +folder (i.e. /ha-datacenter/host). The basename of the specified POOL is used +as the name for the new resource pool. + +The same globbing rules that apply to the "ls" command apply here. For example, +the path to the parent resource pool in POOL may be specified as "*/Resources" +to expand to the root resource pools on all (clustered) compute hosts in the +current datacenter. + +For example: + */Resources/test Create resource pool "test" on all (clustered) + compute hosts in the current datacenter. + somehost/Resources/*/nested Create resource pool "nested" in every + resource pool that is a direct descendant of + the root resource pool on "somehost".` diff --git a/vendor/github.com/vmware/govmomi/govc/pool/info.go b/vendor/github.com/vmware/govmomi/govc/pool/info.go new file mode 100644 index 000000000..7fcd28185 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/pool/info.go @@ -0,0 +1,212 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "flag" + "fmt" + "io" + "text/tabwriter" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type info struct { + *flags.DatacenterFlag + *flags.OutputFlag + + pools bool + apps bool +} + +func init() { + cli.Register("pool.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + f.BoolVar(&cmd.pools, "p", true, "List resource pools") + f.BoolVar(&cmd.apps, "a", false, "List virtual app resource pools") +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Usage() string { + return "POOL..." +} + +func (cmd *info) Description() string { + return "Retrieve information about one or more resource POOLs.\n" + poolNameHelp +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() == 0 { + return flag.ErrHelp + } + + c, err := cmd.Client() + if err != nil { + return err + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + var res infoResult + var props []string + + if cmd.OutputFlag.JSON { + props = nil + } else { + props = []string{ + "name", + "config.cpuAllocation", + "config.memoryAllocation", + "runtime.cpu", + "runtime.memory", + } + } + + var vapps []*object.VirtualApp + + for _, arg := range f.Args() { + if cmd.pools { + objects, err := finder.ResourcePoolList(ctx, arg) + if err != nil { + if _, ok := err.(*find.NotFoundError); !ok { + return err + } + } + res.objects = append(res.objects, objects...) + } + + if cmd.apps { + apps, err := finder.VirtualAppList(ctx, arg) + if err != nil { + if _, ok := err.(*find.NotFoundError); !ok { + return err + } + } + vapps = append(vapps, apps...) + } + } + + if len(res.objects) != 0 { + refs := make([]types.ManagedObjectReference, 0, len(res.objects)) + for _, o := range res.objects { + refs = append(refs, o.Reference()) + } + + pc := property.DefaultCollector(c) + err = pc.Retrieve(ctx, refs, props, &res.ResourcePools) + if err != nil { + return err + } + } + + if len(vapps) != 0 { + var apps []mo.VirtualApp + refs := make([]types.ManagedObjectReference, 0, len(vapps)) + for _, o := range vapps { + refs = append(refs, o.Reference()) + p := object.NewResourcePool(c, o.Reference()) + p.InventoryPath = o.InventoryPath + res.objects = append(res.objects, p) + } + + pc := property.DefaultCollector(c) + err = pc.Retrieve(ctx, refs, props, &apps) + if err != nil { + return err + } + + for _, app := range apps { + res.ResourcePools = append(res.ResourcePools, app.ResourcePool) + } + } + + return cmd.WriteResult(&res) +} + +type infoResult struct { + ResourcePools []mo.ResourcePool + objects []*object.ResourcePool +} + +func (r *infoResult) Write(w io.Writer) error { + // Maintain order via r.objects as Property collector does not always return results in order. + objects := make(map[types.ManagedObjectReference]mo.ResourcePool, len(r.ResourcePools)) + for _, o := range r.ResourcePools { + objects[o.Reference()] = o + } + + tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0) + + for _, o := range r.objects { + pool := objects[o.Reference()] + fmt.Fprintf(tw, "Name:\t%s\n", pool.Name) + fmt.Fprintf(tw, " Path:\t%s\n", o.InventoryPath) + + writeInfo(tw, "CPU", "MHz", &pool.Runtime.Cpu, pool.Config.CpuAllocation) + pool.Runtime.Memory.MaxUsage >>= 20 + pool.Runtime.Memory.OverallUsage >>= 20 + writeInfo(tw, "Mem", "MB", &pool.Runtime.Memory, pool.Config.MemoryAllocation) + } + + return tw.Flush() +} + +func writeInfo(w io.Writer, name string, units string, ru *types.ResourcePoolResourceUsage, b types.BaseResourceAllocationInfo) { + ra := b.GetResourceAllocationInfo() + usage := 100.0 * float64(ru.OverallUsage) / float64(ru.MaxUsage) + shares := "" + limit := "unlimited" + + if ra.Shares.Level == types.SharesLevelCustom { + shares = fmt.Sprintf(" (%d)", ra.Shares.Shares) + } + + if ra.Limit != -1 { + limit = fmt.Sprintf("%d%s", ra.Limit, units) + } + + fmt.Fprintf(w, " %s Usage:\t%d%s (%0.1f%%)\n", name, ru.OverallUsage, units, usage) + fmt.Fprintf(w, " %s Shares:\t%s%s\n", name, ra.Shares.Level, shares) + fmt.Fprintf(w, " %s Reservation:\t%d%s (expandable=%v)\n", name, ra.Reservation, units, *ra.ExpandableReservation) + fmt.Fprintf(w, " %s Limit:\t%s\n", name, limit) +} diff --git a/vendor/github.com/vmware/govmomi/govc/pool/resource_config_spec.go b/vendor/github.com/vmware/govmomi/govc/pool/resource_config_spec.go new file mode 100644 index 000000000..5fd61fd96 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/pool/resource_config_spec.go @@ -0,0 +1,98 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "flag" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" +) + +type sharesInfo types.SharesInfo + +func (s *sharesInfo) String() string { + return string(s.Level) +} + +func (s *sharesInfo) Set(val string) error { + switch val { + case string(types.SharesLevelNormal), string(types.SharesLevelLow), string(types.SharesLevelHigh): + s.Level = types.SharesLevel(val) + default: + n, err := strconv.Atoi(val) + if err != nil { + return err + } + + s.Level = types.SharesLevelCustom + s.Shares = n + } + + return nil +} + +func NewResourceConfigSpecFlag() *ResourceConfigSpecFlag { + f := new(ResourceConfigSpecFlag) + f.MemoryAllocation = new(types.ResourceAllocationInfo) + f.CpuAllocation = new(types.ResourceAllocationInfo) + + f.SetAllocation(func(a types.BaseResourceAllocationInfo) { + a.GetResourceAllocationInfo().Shares = new(types.SharesInfo) + }) + return f +} + +type ResourceConfigSpecFlag struct { + types.ResourceConfigSpec +} + +func (s *ResourceConfigSpecFlag) Register(ctx context.Context, f *flag.FlagSet) { + opts := []struct { + name string + units string + types.BaseResourceAllocationInfo + }{ + {"CPU", "MHz", s.CpuAllocation}, + {"Memory", "MB", s.MemoryAllocation}, + } + + for _, opt := range opts { + prefix := strings.ToLower(opt.name)[:3] + ra := opt.GetResourceAllocationInfo() + shares := (*sharesInfo)(ra.Shares) + + f.Int64Var(&ra.Limit, prefix+".limit", 0, opt.name+" limit in "+opt.units) + f.Int64Var(&ra.Reservation, prefix+".reservation", 0, opt.name+" reservation in "+opt.units) + f.Var(flags.NewOptionalBool(&ra.ExpandableReservation), prefix+".expandable", opt.name+" expandable reservation") + f.Var(shares, prefix+".shares", opt.name+" shares level or number") + } +} + +func (s *ResourceConfigSpecFlag) Process(ctx context.Context) error { + return nil +} + +func (s *ResourceConfigSpecFlag) SetAllocation(f func(types.BaseResourceAllocationInfo)) { + for _, a := range []types.BaseResourceAllocationInfo{s.CpuAllocation, s.MemoryAllocation} { + f(a) + } +} diff --git a/vendor/github.com/vmware/govmomi/govc/release.sh b/vendor/github.com/vmware/govmomi/govc/release.sh new file mode 100644 index 000000000..f934ad342 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/release.sh @@ -0,0 +1,76 @@ +#!/bin/bash -e + +if ! which github-release > /dev/null; then + echo 'Please install github-release...' + echo '' + echo ' $ go get github.com/aktau/github-release' + echo '' + exit 1 +fi + +if [ -z "${GITHUB_TOKEN}" ]; then + echo 'Please set GITHUB_TOKEN...' + exit 1 +fi + +export GITHUB_USER="${GITHUB_USER:-vmware}" +export GITHUB_REPO="${GITHUB_REPO:-govmomi}" + +name="$(git describe)" + +case "$1" in + release) + tag="${name}" + ;; + prerelease) + tag="prerelease-${name}" + ;; + *) + echo "Usage: $0 [release|prerelease]" + exit 1 + ;; +esac + +echo "Building govc..." +rm -f govc_* +./build.sh +gzip -f govc_* + +echo "Pushing tag ${tag}..." +git tag -f "${tag}" +git push origin "refs/tags/${tag}" + +# Generate description +description=$( +if [[ "${tag}" == "prerelease-"* ]]; then + echo '**This is a PRERELEASE version.**' +fi + +echo ' +The binaries below are provided without warranty, following the [Apache license](LICENSE). +' + +echo ' +Instructions: +* Download the file relevant to your operating system +* Decompress (i.e. `gzip -d govc_linux_amd64.gz`) +* Set the executable bit (i.e. `chmod +x govc_linux_amd64`) +* Move the file to a directory in your `$PATH` (i.e. `mv govc_linux_amd64 /usr/local/bin`) +' + +echo '```' +echo '$ sha1sum govc_*.gz' +sha1sum govc_*.gz +echo '```' +) + +echo "Creating release..." +github-release release --tag "${tag}" --name "${name}" --description "${description}" --draft --pre-release + +# Upload build artifacts +for f in govc_*.gz; do + echo "Uploading $f..." + github-release upload --tag "${tag}" --name "${f}" --file "${f}" +done + +echo "Remember to publish the draft release!" diff --git a/vendor/github.com/vmware/govmomi/govc/test/.gitignore b/vendor/github.com/vmware/govmomi/govc/test/.gitignore new file mode 100644 index 000000000..8000dd9db --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/vendor/github.com/vmware/govmomi/govc/test/README.md b/vendor/github.com/vmware/govmomi/govc/test/README.md new file mode 100644 index 000000000..d48e8fc86 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/README.md @@ -0,0 +1,73 @@ +# Functional tests for govc + +## Bats + +Install [Bats](https://github.com/sstephenson/bats/) + +## coreutils + +Install gxargs, greadlink and gmktemp on Darwin + +``` +brew install coreutils +brew install findutils +``` + +## Download test images + +Some tests depend on [ttylinux](http://ttylinux.net) images, these can be downloaded by running: + +``` +./images/update.sh +``` + +These images are uploaded to the esxbox as needed by tests and can be +removed with the following command: + +``` +./clean.sh +``` + +## GOVC_TEST_URL + +The govc tests need an ESX instance to run against. The default +`GOVC_TEST_URL` is that of the vagrant box in the *esxbox* directory: + +``` +(cd esxbox && vagrant up) +``` + +Any other ESX box can be used by exporting the following variable: + +``` +export GOVC_TEST_URL=user:pass@hostname +``` + +## vCenter Simulator + +Some tests require vCenter and depend on the Vagrant box in the +*vcsim* directory. These tests are skipped if the vcsim box is not +running. To enable these tests: + +``` +(cd vcsim && vagrant up) +``` + +## Running tests + +The test helper prepends ../govc to `PATH`. + +The tests can be run from any directory, as *govc* is found related to +`PATH` and *images* are found relative to `$BATS_TEST_DIRNAME`. + +The entire suite can be run with the following command: + +``` +bats . +``` + +Or individually, for example: + +``` +./cli.bats +``` diff --git a/vendor/github.com/vmware/govmomi/govc/test/boot_order_test.sh b/vendor/github.com/vmware/govmomi/govc/test/boot_order_test.sh new file mode 100644 index 000000000..e52d16be9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/boot_order_test.sh @@ -0,0 +1,75 @@ +#!/bin/bash -e + +# This test is not run via bats. +# A VNC session will be opened to observe the VM boot order: +# 1) from floppy (followed by: eject floppy, reboot) +# 2) from cdrom (followed by: eject cdrom, reboot) +# 3) from network (will timeout) +# 4) from disk + +. $(dirname $0)/test_helper.bash + +upload_img +upload_iso + +id=$(new_ttylinux_vm) + +function cleanup() { + quit_vnc $vnc + govc vm.destroy $id + pkill -TERM -g $$ ^nc +} + +trap cleanup EXIT + +govc device.cdrom.add -vm $id > /dev/null +govc device.cdrom.insert -vm $id $GOVC_TEST_ISO + +govc device.floppy.add -vm $id > /dev/null +govc device.floppy.insert -vm $id $GOVC_TEST_IMG + +govc device.boot -vm $id -delay 1000 -order floppy,cdrom,ethernet,disk + +vnc=$(govc vm.vnc -port 21122 -password govmomi -enable "${id}" | awk '{print $2}') + +echo "booting from floppy..." +govc vm.power -on $id + +open_vnc $vnc + +sleep 10 + +govc vm.power -off $id + +govc device.floppy.eject -vm $id + +# this is ttylinux-live, notice the 'boot:' prompt vs 'login:' prompt when booted from disk +echo "booting from cdrom..." +govc vm.power -on $id + +sleep 10 + +govc vm.power -off $id + +govc device.cdrom.eject -vm $id + +host_ip=$(echo $vnc | awk -F@ '{print $2}' | awk -F: '{print $1}') +serial_port=33233 +govc device.serial.add -vm $id > /dev/null +govc device.serial.connect -vm $id $uri telnet://:$serial_port + +echo "booting from network, will timeout then boot from disk..." +govc vm.power -on $id + +# capture serial console +echo | nc $host_ip $serial_port 2>/dev/null & + +ip=$(govc vm.ip $id) + +echo "VM booted from disk (ip=$ip)" + +sleep 5 + +govc vm.power -s $id + +sleep 5 diff --git a/vendor/github.com/vmware/govmomi/govc/test/clean.sh b/vendor/github.com/vmware/govmomi/govc/test/clean.sh new file mode 100644 index 000000000..c7700852c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/clean.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Cleanup any artifacts created by govc +# + +. $(dirname $0)/test_helper.bash + +teardown + +datastore_rm() { + name=$1 + govc datastore.rm $name 2> /dev/null +} + +datastore_rm $GOVC_TEST_IMG +datastore_rm $GOVC_TEST_ISO +datastore_rm $GOVC_TEST_VMDK +datastore_rm $(echo $GOVC_TEST_VMDK | sed 's/.vmdk/-flat.vmdk/') + +# Recursively destroy all resource pools created by the test suite +govc ls host/*/Resources/govc-test-* | \ + xargs -rt govc pool.destroy -r + +govc datastore.ls diff --git a/vendor/github.com/vmware/govmomi/govc/test/cli.bats b/vendor/github.com/vmware/govmomi/govc/test/cli.bats new file mode 100644 index 000000000..72ea52406 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/cli.bats @@ -0,0 +1,25 @@ +#!/usr/bin/env bats + +load test_helper + +@test "about" { + run govc about + assert_success + assert_line "Vendor: VMware, Inc." +} + +@test "login attempt without credentials" { + run govc about -u $(echo $GOVC_URL | awk -F@ '{print $2}') + assert_failure "govc: ServerFaultCode: Cannot complete login due to an incorrect user name or password." +} + +@test "login attempt with GOVC_URL, GOVC_USERNAME, and GOVC_PASSWORD" { + govc_url_to_vars + run govc about + assert_success +} + +@test "connect to an endpoint with a non-supported API version" { + run env GOVC_MIN_API_VERSION=24.4 govc about + assert grep -q "^govc: Require API version 24.4," <<<${output} +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/datacenter.bats b/vendor/github.com/vmware/govmomi/govc/test/datacenter.bats new file mode 100644 index 000000000..9a1a94a01 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/datacenter.bats @@ -0,0 +1,85 @@ +#!/usr/bin/env bats + +load test_helper + +@test "create and destroy datacenters" { + vcsim_env + dcs=(`uuidgen` `uuidgen`) + run govc datacenter.create ${dcs[0]} ${dcs[1]} + assert_success + + for dc in ${dcs[*]}; do + run govc ls /$dc + assert_success + # //{vm,network,host,datastore} + [ ${#lines[@]} -eq 4 ] + done + + run govc datacenter.destroy ${dcs[0]} ${dcs[1]} + assert_success + + for dc in ${dcs[*]}; do + run govc ls /$dc + assert_success + [ ${#lines[@]} -eq 0 ] + done +} + +@test "destroy datacenter using glob" { + vcsim_env + prefix=test-dc + dcs=(${prefix}-`uuidgen` ${prefix}-`uuidgen`) + run govc datacenter.create ${dcs[0]} ${dcs[1]} + assert_success + + run govc datacenter.destroy ${prefix}-* + assert_success + + for dc in ${dcs[*]}; do + run govc ls /$dc + assert_success + [ ${#lines[@]} -eq 0 ] + done +} + +@test "destroy datacenter that doesn't exist" { + vcsim_env + dc=$(uuidgen) + + run govc datacenter.destroy $dc + assert_success +} + +@test "create datacenter that already exists" { + vcsim_env + dc=$(uuidgen) + + run govc datacenter.create $dc + assert_success + + run govc datacenter.create $dc + assert_success + + run govc datacenter.destroy $dc + assert_success +} + +@test "fails when datacenter name not specified" { + run govc datacenter.create + assert_failure + + run govc datacenter.destroy + assert_failure +} + +@test "fails when operation attempted on standalone ESX host" { + run govc datacenter.create something + assert_failure + assert_output "govc: ServerFaultCode: The operation is not supported on the object." +} + +@test "fails when attempting to destroy ha-datacenter" { + run govc datacenter.destroy ha-datacenter + assert_failure + assert_output "govc: The operation is not supported on the object." +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/datastore.bats b/vendor/github.com/vmware/govmomi/govc/test/datastore.bats new file mode 100644 index 000000000..c1d4acac6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/datastore.bats @@ -0,0 +1,75 @@ +#!/usr/bin/env bats + +load test_helper + +upload_file() { + file=$($mktemp --tmpdir govc-test-XXXXX) + name=$(basename ${file}) + echo "Hello world!" > ${file} + + run govc datastore.upload "${file}" "${name}" + assert_success + + rm -f "${file}" + echo "${name}" +} + +@test "datastore.ls" { + name=$(upload_file) + + # Single argument + run govc datastore.ls "${name}" + assert_success + [ ${#lines[@]} -eq 1 ] + + # Multiple arguments + run govc datastore.ls "${name}" "${name}" + assert_success + [ ${#lines[@]} -eq 2 ] + + # Pattern argument + run govc datastore.ls "./govc-test-*" + assert_success + [ ${#lines[@]} -ge 1 ] + + # Long listing + run govc datastore.ls -l "./govc-test-*" + assert_success + assert_equal "13B" $(awk '{ print $1 }' <<<${output}) +} + +@test "datastore.rm" { + name=$(upload_file) + + # Not found is a failure + run govc datastore.rm "${name}.notfound" + assert_failure + assert_matches "govc: File .* was not found" "${output}" + + # Not found is NOT a failure with the force flag + run govc datastore.rm -f "${name}.notfound" + assert_success + assert_empty "${output}" + + # Verify the file is present + run govc datastore.ls "${name}" + assert_success + + # Delete the file + run govc datastore.rm "${name}" + assert_success + assert_empty "${output}" + + # Verify the file is gone + run govc datastore.ls "${name}" + assert_failure +} + +@test "datastore.info" { + run govc datastore.info enoent + assert_failure + + run govc datastore.info + assert_success + [ ${#lines[@]} -gt 1 ] +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/device.bats b/vendor/github.com/vmware/govmomi/govc/test/device.bats new file mode 100644 index 000000000..b764830a6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/device.bats @@ -0,0 +1,199 @@ +#!/usr/bin/env bats + +load test_helper + +@test "device.ls" { + vm=$(new_empty_vm) + + result=$(govc device.ls -vm $vm | grep ethernet-0 | wc -l) + [ $result -eq 1 ] +} + +@test "device.info" { + vm=$(new_empty_vm) + + run govc device.info -vm $vm ide-200 + assert_success + + run govc device.info -vm $vm ide-20000 + assert_failure +} + +@test "device.boot" { + vm=$(new_ttylinux_vm) + + result=$(govc device.ls -vm $vm -boot | wc -l) + [ $result -eq 0 ] + + run govc device.boot -vm $vm -order floppy,cdrom,ethernet,disk + assert_success + + result=$(govc device.ls -vm $vm -boot | wc -l) + [ $result -eq 2 ] + + run govc device.cdrom.add -vm $vm + assert_success + + run govc device.floppy.add -vm $vm + assert_success + + run govc device.boot -vm $vm -order floppy,cdrom,ethernet,disk + assert_success + + result=$(govc device.ls -vm $vm -boot | wc -l) + [ $result -eq 4 ] +} + +@test "device.cdrom" { + vm=$(new_empty_vm) + + result=$(govc device.ls -vm $vm | grep cdrom- | wc -l) + [ $result -eq 0 ] + + run govc device.cdrom.add -vm $vm + assert_success + id=$output + + result=$(govc device.ls -vm $vm | grep $id | wc -l) + [ $result -eq 1 ] + + run govc device.info -vm $vm $id + assert_success + + run govc device.cdrom.insert -vm $vm -device $id x.iso + assert_success + + run govc device.info -vm $vm $id + assert_line "Summary: ISO [${GOVC_DATASTORE}] x.iso" + + run govc device.disconnect -vm $vm $id + assert_success + + run govc device.connect -vm $vm $id + assert_success + + run govc device.remove -vm $vm $id + assert_success + + run govc device.disconnect -vm $vm $id + assert_failure "govc: device '$id' not found" + + run govc device.cdrom.insert -vm $vm -device $id x.iso + assert_failure "govc: device '$id' not found" + + run govc device.remove -vm $vm $id + assert_failure "govc: device '$id' not found" +} + +@test "device.floppy" { + vm=$(new_empty_vm) + + result=$(govc device.ls -vm $vm | grep floppy- | wc -l) + [ $result -eq 0 ] + + run govc device.floppy.add -vm $vm + assert_success + id=$output + + result=$(govc device.ls -vm $vm | grep $id | wc -l) + [ $result -eq 1 ] + + run govc device.info -vm $vm $id + assert_success + + run govc device.floppy.insert -vm $vm -device $id x.img + assert_success + + run govc device.info -vm $vm $id + assert_line "Summary: Image [${GOVC_DATASTORE}] x.img" + + run govc device.disconnect -vm $vm $id + assert_success + + run govc device.connect -vm $vm $id + assert_success + + run govc device.remove -vm $vm $id + assert_success + + run govc device.disconnect -vm $vm $id + assert_failure "govc: device '$id' not found" + + run govc device.floppy.insert -vm $vm -device $id x.img + assert_failure "govc: device '$id' not found" + + run govc device.remove -vm $vm $id + assert_failure "govc: device '$id' not found" +} + +@test "device.serial" { + vm=$(new_empty_vm) + + result=$(govc device.ls -vm $vm | grep serial- | wc -l) + [ $result -eq 0 ] + + run govc device.serial.add -vm $vm + assert_success + id=$output + + result=$(govc device.ls -vm $vm | grep $id | wc -l) + [ $result -eq 1 ] + + run govc device.info -vm $vm $id + assert_success + + uri=telnet://:33233 + run govc device.serial.connect -vm $vm -device $id $uri + assert_success + + run govc device.info -vm $vm $id + assert_line "Summary: Remote $uri" + + run govc device.serial.disconnect -vm $vm -device $id + assert_success + + run govc device.info -vm $vm $id + assert_line "Summary: Remote localhost:0" + + run govc device.disconnect -vm $vm $id + assert_success + + run govc device.connect -vm $vm $id + assert_success + + run govc device.remove -vm $vm $id + assert_success + + run govc device.disconnect -vm $vm $id + assert_failure "govc: device '$id' not found" + + run govc device.serial.connect -vm $vm -device $id $uri + assert_failure "govc: device '$id' not found" + + run govc device.remove -vm $vm $id + assert_failure "govc: device '$id' not found" +} + +@test "device.scsi" { + vm=$(new_empty_vm) + + result=$(govc device.ls -vm $vm | grep lsilogic- | wc -l) + [ $result -eq 1 ] + + run govc device.scsi.add -vm $vm + assert_success + id=$output + + result=$(govc device.ls -vm $vm | grep $id | wc -l) + [ $result -eq 1 ] + + result=$(govc device.ls -vm $vm | grep lsilogic- | wc -l) + [ $result -eq 2 ] + + run govc device.scsi.add -vm $vm -type pvscsi + assert_success + id=$output + + result=$(govc device.ls -vm $vm | grep $id | wc -l) + [ $result -eq 1 ] +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/esxbox/Vagrantfile b/vendor/github.com/vmware/govmomi/govc/test/esxbox/Vagrantfile new file mode 100644 index 000000000..df9a01576 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/esxbox/Vagrantfile @@ -0,0 +1,27 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.ssh.insert_key = false + config.ssh.default.username = "root" + config.ssh.shell = "sh" + config.vm.hostname = "esxbox" + + config.vm.box = "esxi55" + config.vm.synced_folder ".", "/vagrant", disabled: true + config.vm.network "forwarded_port", guest: 443, host: 18443 + + [:vmware_fusion, :vmware_workstation].each do |name| + config.vm.provider name do |v,override| + v.vmx["memsize"] = "4096" + end + end + + config.vm.provision "shell", privileged: false, :inline => </dev/null + run govc import.ovf ./images/${TTYLINUX_NAME}.ovf + assert_success + popd >/dev/null + + run govc vm.destroy ${TTYLINUX_NAME} + assert_success +} + +@test "import.ovf with name in options" { + name=$(new_id) + file=$($mktemp --tmpdir govc-test-XXXXX) + echo "{ \"Name\": \"${name}\"}" > ${file} + + run govc import.ovf -options="${file}" $GOVC_IMAGES/${TTYLINUX_NAME}.ovf + assert_success + + run govc vm.destroy "${name}" + assert_success + + rm -f ${file} +} + +@test "import.ovf with name as argument" { + name=$(new_id) + + run govc import.ova -name="${name}" $GOVC_IMAGES/${TTYLINUX_NAME}.ova + assert_success + + run govc vm.destroy "${name}" + assert_success +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/license.bats b/vendor/github.com/vmware/govmomi/govc/test/license.bats new file mode 100644 index 000000000..6132eb11d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/license.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats + +load test_helper + +# These tests should only run against a server running an evaluation license. +verify_evaluation() { + if [ "$(govc license.list -json | jq -r .[0].EditionKey)" != "eval" ]; then + skip "requires evaluation license" + fi +} + +get_key() { + jq ".[] | select(.LicenseKey == \"$1\")" +} + +get_property() { + jq -r ".Properties[] | select(.Key == \"$1\") | .Value" +} + +@test "license.add" { + verify_evaluation + + run govc license.add -json 00000-00000-00000-00000-00001 00000-00000-00000-00000-00002 + assert_success + + # Expect to see an entry for both the first and the second key + assert_equal "License is not valid for this product" $(get_key 00000-00000-00000-00000-00001 <<<${output} | get_property diagnostic) + assert_equal "License is not valid for this product" $(get_key 00000-00000-00000-00000-00002 <<<${output} | get_property diagnostic) +} + +@test "license.remove" { + verify_evaluation + + run govc license.remove -json 00000-00000-00000-00000-00001 + assert_success +} + +@test "license.list" { + verify_evaluation + + run govc license.list -json + assert_success + + # Expect the test instance to run in evaluation mode + assert_equal "Evaluation Mode" $(get_key 00000-00000-00000-00000-00000 <<<$output | jq -r ".Name") +} + +@test "license.decode" { + verify_evaluation + + key=00000-00000-00000-00000-00000 + assert_equal "eval" $(govc license.decode $key | grep $key | awk '{print $2}') +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/logs.bats b/vendor/github.com/vmware/govmomi/govc/test/logs.bats new file mode 100644 index 000000000..733cd8f9c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/logs.bats @@ -0,0 +1,38 @@ +#!/usr/bin/env bats + +load test_helper + +@test "logs" { + run govc logs + assert_success + nlogs=${#lines[@]} + # there should be plenty more than 1 line of hostd logs + [ $nlogs -ge 1 ] + + # test -n flag + run govc logs -n $((nlogs - 1)) + assert_success + [ ${#lines[@]} -le $nlogs ] + + run govc logs -log vmkernel + assert_success + nlogs=${#lines[@]} + # there should be plenty more than 1 line of vmkernel logs + [ $nlogs -ge 1 ] + + # -host ignored against ESX + run govc logs -host enoent + assert_success + + run govc logs -log enoent + assert_failure +} + +@test "logs.ls" { + run govc logs.ls + assert_success + + # -host ignored against ESX + run govc logs.ls -host enoent + assert_success +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/ls.bats b/vendor/github.com/vmware/govmomi/govc/test/ls.bats new file mode 100644 index 000000000..9c290c97a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/ls.bats @@ -0,0 +1,93 @@ +#!/usr/bin/env bats + +load test_helper + +@test "ls" { + run govc ls + assert_success + # /dc/{vm,network,host,datastore} + [ ${#lines[@]} -ge 4 ] + + run govc ls host + assert_success + [ ${#lines[@]} -ge 1 ] + + run govc ls enoent + assert_success + [ ${#lines[@]} -eq 0 ] +} + +@test "ls vm" { + vm=$(new_empty_vm) + + run govc ls vm + assert_success + [ ${#lines[@]} -ge 1 ] + + run govc ls vm/$vm + assert_success + [ ${#lines[@]} -eq 1 ] + + run govc ls /*/vm/$vm + assert_success + [ ${#lines[@]} -eq 1 ] +} + +@test "ls network" { + run govc ls network + assert_success + [ ${#lines[@]} -ge 1 ] + + local path=${lines[0]} + run govc ls $path + assert_success + [ ${#lines[@]} -eq 1 ] + + run govc ls network/$(basename $path) + assert_success + [ ${#lines[@]} -eq 1 ] + + run govc ls /*/network/$(basename $path) + assert_success + [ ${#lines[@]} -eq 1 ] +} + +@test "ls multi ds" { + vcsim_env + + run govc ls + assert_success + # /DC0/{vm,network,host,datastore} + [ ${#lines[@]} -eq 4 ] + + run govc ls /DC* + assert_success + # /DC[0,1]/{vm,network,host,datastore} + [ ${#lines[@]} -eq 8 ] + + # here 'vm' is relative to /DC0 + run govc ls vm + assert_success + [ ${#lines[@]} -gt 0 ] + + unset GOVC_DATACENTER + + run govc ls + assert_success + # /DC[0,1] + [ ${#lines[@]} -eq 2 ] + + run govc ls -dc enoent + assert_failure + [ ${#lines[@]} -gt 0 ] + + # here 'vm' is relative to '/' - so there are no matches + run govc ls vm + assert_success + [ ${#lines[@]} -eq 0 ] + + # ls all vms in all datacenters + run govc ls */vm + assert_success + [ ${#lines[@]} -gt 0 ] +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/network.bats b/vendor/github.com/vmware/govmomi/govc/test/network.bats new file mode 100644 index 000000000..242e013c8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/network.bats @@ -0,0 +1,144 @@ +#!/usr/bin/env bats + +load test_helper + +@test "network dvs backing" { + vcsim_env + + # DVS backed network by default (from vcsim_env) + vm=$(new_empty_vm) + + eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}') + run govc device.info -vm $vm $eth0 + assert_success + + summary=$(govc device.info -vm $vm $eth0 | grep Summary: | awk '{print $2}') + assert_equal "DVSwitch:" $summary + + run govc device.remove -vm $vm $eth0 + assert_success + + eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}') + [ -z "$eth0" ] + + # Standard network backing + run govc vm.network.add -vm $vm -net "VM Network" + assert_success + + eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}') + + run govc device.info -vm $vm $eth0 + assert_success + + summary=$(govc device.info -vm $vm $eth0 | grep Summary: | awk -F: '{print $2}') + assert_equal "VM Network" $(collapse_ws $summary) + + run govc device.remove -vm $vm $eth0 + assert_success + + run govc device.remove -vm $vm $eth0 + assert_failure "govc: device '$eth0' not found" +} + +@test "network change backing" { + vcsim_env + + vm=$(new_empty_vm) + + eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}') + run govc vm.network.change -vm $vm $eth0 enoent + assert_failure "govc: network 'enoent' not found" + + run govc vm.network.change -vm $vm enoent "VM Network" + assert_failure "govc: device 'enoent' not found" + + run govc vm.network.change -vm $vm $eth0 "VM Network" + assert_success + + run govc vm.network.change -vm $vm $eth0 + assert_success + + unset GOVC_NETWORK + run govc vm.network.change -vm $vm $eth0 + assert_failure "govc: default network resolves to multiple instances, please specify" + + run govc vm.power -on $vm + assert_success + run govc vm.power -off $vm + + mac=$(vm_mac $vm) + run govc vm.network.change -vm $vm -net "VM Network" $eth0 + assert_success + + # verify we didn't change the mac address + run govc vm.power -on $vm + assert_success + assert_equal $mac $(vm_mac $vm) +} + +@test "network standard backing" { + vm=$(new_empty_vm) + + run govc device.info -vm $vm ethernet-0 + assert_success + + run govc device.remove -vm $vm ethernet-0 + assert_success + + run govc device.info -vm $vm ethernet-0 + assert_failure + + run govc vm.network.add -vm $vm enoent + assert_failure "govc: network 'enoent' not found" + + run govc vm.network.add -vm $vm "VM Network" + assert_success + + run govc device.info -vm $vm ethernet-0 + assert_success +} + +@test "network adapter" { + vm=$(new_id) + run govc vm.create -on=false -net.adapter=enoent $vm + assert_failure "govc: unknown ethernet card type 'enoent'" + + vm=$(new_id) + run govc vm.create -on=false -net.adapter=vmxnet3 $vm + assert_success + + eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}') + type=$(govc device.info -vm $vm $eth0 | grep Type: | awk -F: '{print $2}') + assert_equal "VirtualVmxnet3" $(collapse_ws $type) + + run govc vm.network.add -vm $vm -net.adapter e1000e "VM Network" + assert_success + + eth1=$(govc device.ls -vm $vm | grep ethernet- | grep -v $eth0 | awk '{print $1}') + type=$(govc device.info -vm $vm $eth1 | grep Type: | awk -F: '{print $2}') + assert_equal "VirtualE1000e" $(collapse_ws $type) +} + +@test "network flag required" { + vcsim_env + + # -net flag is required when there are multiple networks + unset GOVC_NETWORK + run govc vm.create -on=false $(new_id) + assert_failure "govc: default network resolves to multiple instances, please specify" +} + +@test "network change hardware address" { + mac="00:00:0f$(dd bs=1 count=3 if=/dev/random 2>/dev/null | hexdump -v -e '/1 ":%02x"')" + vm=$(new_id) + run govc vm.create -on=false $vm + assert_success + + run govc vm.network.change -vm $vm -net.address $mac ethernet-0 + assert_success + + run govc vm.power -on $vm + assert_success + + assert_equal $mac $(vm_mac $vm) +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/pool.bats b/vendor/github.com/vmware/govmomi/govc/test/pool.bats new file mode 100644 index 000000000..d2b551c1b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/pool.bats @@ -0,0 +1,241 @@ +#!/usr/bin/env bats + +load test_helper + +@test "pool.create" { + path="*/Resources/$(new_id)/$(new_id)" + run govc pool.create $path + assert_failure + assert_line "govc: cannot create resource pool '$(basename ${path})': parent not found" + + id=$(new_id) + path="*/Resources/$id" + run govc pool.create -cpu.shares low -mem.reservation 500 $path + assert_success + + run govc pool.info $path + assert_success + + assert_line "Name: $id" + assert_line "CPU Shares: low" + assert_line "Mem Reservation: 500MB (expandable=true)" + + run govc pool.destroy $path + assert_success +} + +@test "pool.create multiple" { + id=$(new_id) + path="*/Resources/$id" + govc pool.create $path + + # Create multiple parent pools with multiple arguments (without globbing) + run govc pool.create $path/a $path/b + assert_success + result=$(govc ls "host/$path/*" | wc -l) + [ $result -eq 2 ] + + # Create multiple child pools with one argument (with globbing) + run govc pool.create $path/*/{a,b} + assert_success + result=$(govc ls "host/$path/*/*" | wc -l) + [ $result -eq 4 ] + + # Clean up + run govc pool.destroy $path/*/* $path/* $path + assert_success +} + +@test "pool.change" { + id=$(new_id) + path="*/Resources/$id" + govc pool.create $path + + run govc pool.change -mem.shares high $path + assert_success + run govc pool.info $path + assert_success + assert_line "Mem Shares: high" + assert_line "CPU Shares: normal" + + nid=$(new_id) + run govc pool.change -name $nid $path + assert_success + path="*/Resources/$nid" + + run govc pool.info $path + assert_success + assert_line "Name: $nid" + + run govc pool.destroy $path + assert_success +} + +@test "pool.change multiple" { + id=$(new_id) + path="*/Resources/$id" + govc pool.create $path + + # Create some nested pools so that we can test changing multiple in one call + govc pool.create $path/{a,b} $path/{a,b}/test + + # Test precondition + run govc pool.info $path/a/test + assert_success + assert_line "Name: test" + run govc pool.info $path/b/test + assert_success + assert_line "Name: test" + + # Change name of both test pools + run govc pool.change -name hello $path/*/test + assert_success + + # Test postcondition + run govc pool.info $path/a/hello + assert_success + assert_line "Name: hello" + run govc pool.info $path/b/hello + assert_success + assert_line "Name: hello" + + # Clean up + govc pool.destroy $path/a/hello + govc pool.destroy $path/a + govc pool.destroy $path/b/hello + govc pool.destroy $path/b + govc pool.destroy $path +} + +@test "pool.destroy" { + id=$(new_id) + + # should not be any existing test pools + result=$(govc ls "host/*/Resources/govc-test-*" | wc -l) + [ $result -eq 0 ] + + # parent pool + path="*/Resources/$id" + run govc pool.create $path + assert_success + + result=$(govc ls "host/$path/*" | wc -l) + [ $result -eq 0 ] + + # child pools + run govc pool.create $path/$(new_id) + assert_success + + run govc pool.create $path/$(new_id) + assert_success + + # 2 child pools + result=$(govc ls "host/$path/*" | wc -l) + [ $result -eq 2 ] + + # 1 parent pool + result=$(govc ls "host/*/Resources/govc-test-*" | wc -l) + [ $result -eq 1 ] + + run govc pool.destroy -r $path + assert_success + + # if we didn't -r, the child pools would end up here + result=$(govc ls "host/*/Resources/govc-test-*" | wc -l) + [ $result -eq 0 ] +} + +@test "pool.destroy multiple" { + id=$(new_id) + path="*/Resources/$id" + govc pool.create $path + + # Create some nested pools so that we can test destroying multiple in one call + govc pool.create $path/{a,b} + + # Test precondition + result=$(govc ls "host/$path/*" | wc -l) + [ $result -eq 2 ] + + # Destroy both pools + run govc pool.destroy $path/{a,b} + assert_success + + # Test postcondition + result=$(govc ls "host/$path/*" | wc -l) + [ $result -eq 0 ] + + # Clean up + govc pool.destroy $path +} + +@test "vm.create -pool" { + # test with full inventory path to pools + parent_path=$(govc ls 'host/*/Resources') + parent_name=$(basename $parent_path) + [ "$parent_name" = "Resources" ] + + child_name=$(new_id) + child_path="$parent_path/$child_name" + + grand_child_name=$(new_id) + grand_child_path="$child_path/$grand_child_name" + + run govc pool.create $parent_path/$child_name{,/$grand_child_name} + assert_success + + for path in $parent_path $child_path $grand_child_path + do + run govc vm.create -pool $path $(new_id) + assert_success + done + + run govc pool.change -debug -mem.limit 100 -mem.expandable=false $child_path + assert_failure + + run govc pool.change -debug -mem.limit 100 $child_path + assert_success + + run govc pool.change -debug -mem.limit 120 -mem.expandable $child_path + assert_success + + # test with glob inventory path to pools + parent_path="*/$parent_name" + child_path="$parent_path/$child_name" + grand_child_path="$child_path/$grand_child_name" + + for path in $grand_child_path $child_path + do + run govc pool.destroy $path + assert_success + done +} + +@test "vm.create -pool host" { + id=$(new_id) + + path=$(govc ls host) + + run govc vm.create -on=false -pool enoent $id + assert_failure "govc: resource pool 'enoent' not found" + + run govc vm.create -on=false -pool $path $id + assert_success +} + +@test "vm.create -pool cluster" { + vcsim_env + + id=$(new_id) + + path=$(dirname $GOVC_HOST) + + unset GOVC_HOST + unset GOVC_RESOURCE_POOL + + run govc vm.create -on=false -pool enoent $id + assert_failure "govc: resource pool 'enoent' not found" + + run govc vm.create -on=false -pool $path $id + assert_success +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/test_helper.bash b/vendor/github.com/vmware/govmomi/govc/test/test_helper.bash new file mode 100644 index 000000000..233aaae1c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/test_helper.bash @@ -0,0 +1,248 @@ +GOVC_TEST_URL=${GOVC_TEST_URL-"https://root:vagrant@localhost:18443/sdk"} +export GOVC_URL=$GOVC_TEST_URL +export GOVC_DATASTORE=datastore1 +export GOVC_NETWORK="VM Network" +export GOVC_INSECURE=true +unset GOVC_DATACENTER +unset GOVC_USERNAME +unset GOVC_PASSWORD + +if [ -z "$BATS_TEST_DIRNAME" ]; then + BATS_TEST_DIRNAME=$(dirname ${BASH_SOURCE}) +fi + +# gnu core utils +readlink=$(type -p greadlink readlink | head -1) +xargs=$(type -p gxargs xargs | head -1) +mktemp=$(type -p gmktemp mktemp | head -1) + +BATS_TEST_DIRNAME=$($readlink -nf $BATS_TEST_DIRNAME) + +GOVC_IMAGES=$BATS_TEST_DIRNAME/images +TTYLINUX_NAME=ttylinux-pc_i486-16.1 + +GOVC_TEST_VMDK_SRC=$GOVC_IMAGES/${TTYLINUX_NAME}-disk1.vmdk +GOVC_TEST_VMDK=$(basename $GOVC_TEST_VMDK_SRC) + +GOVC_TEST_ISO_SRC=$GOVC_IMAGES/${TTYLINUX_NAME}.iso +GOVC_TEST_ISO=$(basename $GOVC_TEST_ISO_SRC) + +GOVC_TEST_IMG_SRC=$GOVC_IMAGES/floppybird.img +GOVC_TEST_IMG=$(basename $GOVC_TEST_IMG_SRC) + +PATH="$(dirname $BATS_TEST_DIRNAME):$PATH" + +teardown() { + govc ls vm | grep govc-test- | $xargs -r govc vm.destroy + govc datastore.ls | grep govc-test- | awk '{print ($NF)}' | $xargs -n1 -r govc datastore.rm +} + +new_id() { + echo "govc-test-$(uuidgen)" +} + +import_ttylinux_vmdk() { + # TODO: fix datastore.ls do we don't need grep + govc datastore.ls | grep -q $GOVC_TEST_VMDK || \ + govc import.vmdk $GOVC_TEST_VMDK_SRC > /dev/null +} + +datastore_upload() { + src=$1 + dst=$(basename $src) + # TODO: fix datastore.ls do we don't need grep + govc datastore.ls | grep -q $dst || \ + govc datastore.upload $src $dst > /dev/null +} + +upload_img() { + datastore_upload $GOVC_TEST_IMG_SRC +} + +upload_iso() { + datastore_upload $GOVC_TEST_ISO_SRC +} + +new_ttylinux_vm() { + import_ttylinux_vmdk # TODO: make this part of vagrant provision + id=$(new_id) + govc vm.create -m 32 -disk $GOVC_TEST_VMDK -disk.controller ide -on=false $id + echo $id +} + +new_empty_vm() { + id=$(new_id) + govc vm.create -on=false $id + echo $id +} + +vm_power_state() { + govc vm.info "$1" | grep "Power state:" | awk -F: '{print $2}' | collapse_ws +} + +vm_mac() { + govc device.info -vm "$1" ethernet-0 | grep "MAC Address" | awk '{print $NF}' +} + +# exports an enviroment for using vcsim if running, otherwise skips the calling test. +vcsim_env() { + if [ "$(uname)" == "Darwin" ]; then + PATH="/Applications/VMware Fusion.app/Contents/Library:$PATH" + fi + + if [ "$(vmrun list | grep $BATS_TEST_DIRNAME/vcsim | wc -l)" -eq 1 ]; then + export GOVC_URL=https://root:vmware@localhost:16443/sdk \ + GOVC_DATACENTER=DC0 \ + GOVC_DATASTORE=GlobalDS_0 \ + GOVC_HOST=/DC0/host/DC0_C0/DC0_C0_H0 \ + GOVC_RESOURCE_POOL=/DC0/host/DC0_C0/Resources \ + GOVC_NETWORK=/DC0/network/DC0_DVPG0 + else + skip "requires vcsim" + fi +} + +# remove username/password from $GOVC_URL and set $GOVC_{USERNAME,PASSWORD} +govc_url_to_vars() { + local url=$(awk -F@ '{print $2}' <<<"$GOVC_URL") + local userpass=$(awk -F// '{print $2}' <<<"$GOVC_URL" | awk -F@ '{print $1}') + local username=$(awk -F: '{print $1}' <<<"$userpass") + local password=$(awk -F: '{print $2}' <<<"$userpass") + + export GOVC_URL="$url" GOVC_USERNAME="$username" GOVC_PASSWORD="$password" + + # double check that we removed user/pass + grep -q -v @ <<<"$GOVC_URL" +} + +quit_vnc() { + if [ "$(uname)" = "Darwin" ]; then + osascript <&2 + return 1 +} + +assert_success() { + if [ "$status" -ne 0 ]; then + flunk "command failed with exit status $status: $output" + elif [ "$#" -gt 0 ]; then + assert_output "$1" + fi +} + +assert_failure() { + if [ "$status" -ne 1 ]; then + flunk $(printf "expected failed exit status=1, got status=%d" $status) + elif [ "$#" -gt 0 ]; then + assert_output "$1" + fi +} + +assert_equal() { + if [ "$1" != "$2" ]; then + { echo "expected: $1" + echo "actual: $2" + } | flunk + fi +} + +assert_output() { + local expected + if [ $# -eq 0 ]; then expected="$(cat -)" + else expected="$1" + fi + assert_equal "$expected" "$output" +} + +assert_matches() { + local pattern="${1}" + local actual="${2}" + + if [ $# -eq 1 ]; then + actual="$(cat -)" + fi + + if ! grep -q "${pattern}" <<<"${actual}"; then + { echo "pattern: ${pattern}" + echo "actual: ${actual}" + } | flunk + fi +} + +assert_empty() { + local actual="${1}" + + if [ $# -eq 0 ]; then + actual="$(cat -)" + fi + + if [ -n "${actual}" ]; then + { echo "actual: ${actual}" + } | flunk + fi +} + +assert_line() { + if [ "$1" -ge 0 ] 2>/dev/null; then + assert_equal "$2" "$(collapse_ws ${lines[$1]})" + else + local line + for line in "${lines[@]}"; do + if [ "$(collapse_ws $line)" = "$1" ]; then return 0; fi + done + flunk "expected line \`$1'" + fi +} + +refute_line() { + if [ "$1" -ge 0 ] 2>/dev/null; then + local num_lines="${#lines[@]}" + if [ "$1" -lt "$num_lines" ]; then + flunk "output has $num_lines lines" + fi + else + local line + for line in "${lines[@]}"; do + if [ "$line" = "$1" ]; then + flunk "expected to not find line \`$line'" + fi + done + fi +} + +assert() { + if ! "$@"; then + flunk "failed: $@" + fi +} diff --git a/vendor/github.com/vmware/govmomi/govc/test/vcsim/Vagrantfile b/vendor/github.com/vmware/govmomi/govc/test/vcsim/Vagrantfile new file mode 100644 index 000000000..76a3b6d3f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/vcsim/Vagrantfile @@ -0,0 +1,24 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# VCSA with VC simulator enabled. +# 'vagrant provision' will erase any existing inventory and +# populate with the config in ./provision.sh + +Vagrant.configure("2") do |config| + config.vm.hostname = "vcsim" + + config.vm.box = "vcsa" + config.vm.synced_folder ".", "/vagrant", disabled: true + + config.vm.provision "shell", path: "provision.sh" + + config.vm.network "forwarded_port", guest: 443, host: 16443 + config.vm.network "forwarded_port", guest: 80, host: 16080 + + [:vmware_fusion, :vmware_workstation].each do |name| + config.vm.provider name do |v,override| + v.vmx["memsize"] = "4096" + end + end +end diff --git a/vendor/github.com/vmware/govmomi/govc/test/vcsim/provision.sh b/vendor/github.com/vmware/govmomi/govc/test/vcsim/provision.sh new file mode 100644 index 000000000..9732c277c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/vcsim/provision.sh @@ -0,0 +1,30 @@ +PATH=$PATH:/sbin:/usr/sbin:/usr/local/sbin + +echo "Creating vcsim model..." +cat > /etc/vmware-vpx/vcsim/model/initInventory-govc.cfg < + + 2 + 3 + 3 + 2 + 2 + 3 + 3 + 4 + 3 + 2 + + +EOF + +cat > /etc/vmware-vpx/vcsim/model/vcsim-default.cfg < +true +vcsim/model/initInventory-govc.cfg + +EOF + +echo "Starting VC simulator..." +vmware-vcsim-stop +vmware-vcsim-start default diff --git a/vendor/github.com/vmware/govmomi/govc/test/vm.bats b/vendor/github.com/vmware/govmomi/govc/test/vm.bats new file mode 100644 index 000000000..751cbedc6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/test/vm.bats @@ -0,0 +1,339 @@ +#!/usr/bin/env bats + +load test_helper + +@test "vm.ip" { + id=$(new_ttylinux_vm) + + run govc vm.power -on $id + assert_success + + run govc vm.ip $id + assert_success +} + +@test "vm.ip -esxcli" { + id=$(new_ttylinux_vm) + + run govc vm.power -on $id + assert_success + + run govc vm.ip -esxcli $id + assert_success + + ip_esxcli=$output + + run govc vm.ip $id + assert_success + ip_tools=$output + + assert_equal $ip_esxcli $ip_tools +} + +@test "vm.create" { + id=$(new_ttylinux_vm) + + run govc vm.power -on $id + assert_success + + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 0 ] + + result=$(govc device.ls -vm $vm | grep cdrom- | wc -l) + [ $result -eq 0 ] +} + +@test "vm.change" { + id=$(new_ttylinux_vm) + + run govc vm.change -g ubuntu64Guest -m 1024 -c 2 -vm $id + assert_success + + run govc vm.info $id + assert_success + assert_line "Guest name: Ubuntu Linux (64-bit)" + assert_line "Memory: 1024MB" + assert_line "CPU: 2 vCPU(s)" + + run govc vm.change -e "guestinfo.a=1" -e "guestinfo.b=2" -vm $id + assert_success + + run govc vm.info -e $id + assert_success + assert_line "guestinfo.a: 1" + assert_line "guestinfo.b: 2" + + nid=$(new_id) + run govc vm.change -name $nid -vm $id + assert_success + + run govc vm.info $id + [ ${#lines[@]} -eq 0 ] + + run govc vm.info $nid + [ ${#lines[@]} -gt 0 ] +} + +@test "vm.power" { + vm=$(new_ttylinux_vm) + + run vm_power_state $vm + assert_success "poweredOff" + + run govc vm.power $vm + assert_failure + + run govc vm.power -on -off $vm + assert_failure + + run govc vm.power -on $vm + assert_success + run vm_power_state $vm + assert_success "poweredOn" + + run govc vm.power -suspend $vm + assert_success + run vm_power_state $vm + assert_success "suspended" + + run govc vm.power -on $vm + assert_success + run vm_power_state $vm + assert_success "poweredOn" +} + +@test "vm.power -force" { + vm=$(new_id) + govc vm.create $vm + + run govc vm.power -r $vm + assert_failure + + run govc vm.power -r -force $vm + assert_success + + run govc vm.power -s $vm + assert_failure + + run govc vm.power -s -force $vm + assert_success + + run govc vm.power -off $vm + assert_failure + + run govc vm.power -off -force $vm + assert_success + + run govc vm.destroy $vm + assert_success + + run govc vm.power -off $vm + assert_failure + + run govc vm.power -off -force $vm + assert_failure +} + +@test "vm.create pvscsi" { + vm=$(new_id) + govc vm.create -on=false -disk.controller pvscsi $vm + + result=$(govc device.ls -vm $vm | grep pvscsi- | wc -l) + [ $result -eq 1 ] + + result=$(govc device.ls -vm $vm | grep lsilogic- | wc -l) + [ $result -eq 0 ] +} + +@test "vm.create in cluster" { + vcsim_env + + # using GOVC_HOST and its resource pool + run govc vm.create -on=false $(new_id) + assert_success + + # using no -host and the default resource pool for DC0 + unset GOVC_HOST + run govc vm.create -on=false $(new_id) + assert_success +} + +@test "vm.info" { + local num=3 + + local prefix=$(new_id) + + for x in $(seq $num) + do + local id="${prefix}-${x}" + + # If VM is not found: No output, exit code==0 + run govc vm.info $id + assert_success + [ ${#lines[@]} -eq 0 ] + + # If VM is not found (using -json flag): Valid json output, exit code==0 + run govc vm.info -json $id + assert_success + assert_line "{\"VirtualMachines\":null}" + + run govc vm.create -on=false $id + assert_success + + local info=$(govc vm.info -r $id) + local found=$(grep Name: <<<"$info" | wc -l) + [ "$found" -eq 1 ] + + # test that mo names are printed + found=$(grep Host: <<<"$info" | awk '{print $2}') + [ -n "$found" ] + found=$(grep Storage: <<<"$info" | awk '{print $2}') + [ -n "$found" ] + found=$(grep Network: <<<"$info" | awk '{print $2}') + [ -n "$found" ] + done + + # test find slice + local slice=$(govc vm.info ${prefix}-*) + local found=$(grep Name: <<<"$slice" | wc -l) + [ "$found" -eq $num ] + + # test -r + found=$(grep Storage: <<<"$slice" | wc -l) + [ "$found" -eq 0 ] + found=$(grep Network: <<<"$slice" | wc -l) + [ "$found" -eq 0 ] + slice=$(govc vm.info -r ${prefix}-*) + found=$(grep Storage: <<<"$slice" | wc -l) + [ "$found" -eq $num ] + found=$(grep Network: <<<"$slice" | wc -l) + [ "$found" -eq $num ] + + # test extraConfig + run govc vm.change -e "guestinfo.a=2" -vm $id + assert_success + run govc vm.info -e $id + assert_success + assert_line "guestinfo.a: 2" +} + +@test "vm.create linked ide disk" { + vm=$(new_id) + run govc vm.create -disk $GOVC_TEST_VMDK -disk.controller ide -on=false $vm + assert_success + + run govc device.info -vm $vm disk-200-0 + assert_success + assert_line "Controller: ide-200" +} + +@test "vm.create linked scsi disk" { + vm=$(new_id) + + run govc vm.create -disk enoent -on=false $vm + assert_failure "govc: cannot stat '[${GOVC_DATASTORE}] enoent': No such file" + + run govc vm.create -disk $GOVC_TEST_VMDK -on=false $vm + assert_success + + run govc device.info -vm $vm disk-1000-0 + assert_success + assert_line "Controller: lsilogic-1000" + assert_line "Parent: [${GOVC_DATASTORE}] $GOVC_TEST_VMDK" + assert_line "File: [${GOVC_DATASTORE}] $vm/${vm}.vmdk" +} + +@test "vm.create scsi disk" { + vm=$(new_id) + + run govc vm.create -disk enoent -on=false $vm + assert_failure "govc: cannot stat '[${GOVC_DATASTORE}] enoent': No such file" + + run govc vm.create -disk $GOVC_TEST_VMDK -on=false -link=false $vm + assert_success + + run govc device.info -vm $vm disk-1000-0 + assert_success + assert_line "Controller: lsilogic-1000" + refute_line "Parent: [${GOVC_DATASTORE}] $GOVC_TEST_VMDK" + assert_line "File: [${GOVC_DATASTORE}] $GOVC_TEST_VMDK" +} + +@test "vm.create iso" { + upload_iso + + vm=$(new_id) + + run govc vm.create -iso enoent -on=false $vm + assert_failure "govc: cannot stat '[${GOVC_DATASTORE}] enoent': No such file" + + run govc vm.create -iso $GOVC_TEST_ISO -on=false $vm + assert_success + + run govc device.info -vm $vm cdrom-3000 + assert_success + assert_line "Controller: ide-200" + assert_line "Summary: ISO [${GOVC_DATASTORE}] $GOVC_TEST_ISO" +} + +@test "vm.disk.create empty vm" { + vm=$(new_empty_vm) + + local name=$(new_id) + + run govc vm.disk.create -vm $vm -name $name -size 1G + assert_success + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 1 ] + + name=$(new_id) + + run govc vm.disk.create -vm $vm -name $name -controller lsilogic-1000 -size 2G + assert_success + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 2 ] +} + +@test "vm.disk.create" { + import_ttylinux_vmdk + vm=$(new_id) + + govc vm.create -disk $GOVC_TEST_VMDK -on=false $vm + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 1 ] + + local name=$(new_id) + + run govc vm.disk.create -vm $vm -name $name -size 1G + assert_success + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 2 ] + + run govc vm.disk.create -vm $vm -name $name -size 1G + assert_success # TODO: should fail? + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 2 ] +} + +@test "vm.disk.attach" { + import_ttylinux_vmdk + vm=$(new_id) + + govc vm.create -disk $GOVC_TEST_VMDK -on=false $vm + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 1 ] + + run govc import.vmdk $GOVC_TEST_VMDK_SRC $vm + assert_success + + run govc vm.disk.attach -vm $vm -link=false -disk enoent.vmdk + assert_failure "govc: File [${GOVC_DATASTORE}] enoent.vmdk was not found" + + run govc vm.disk.attach -vm $vm -disk enoent.vmdk + assert_failure "govc: Invalid configuration for device '0'." + + run govc vm.disk.attach -vm $vm -disk $vm/$GOVC_TEST_VMDK -controller lsilogic-1000 + assert_success + result=$(govc device.ls -vm $vm | grep disk- | wc -l) + [ $result -eq 2 ] +} diff --git a/vendor/github.com/vmware/govmomi/govc/vapp/destroy.go b/vendor/github.com/vmware/govmomi/govc/vapp/destroy.go new file mode 100644 index 000000000..8f66428e4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vapp/destroy.go @@ -0,0 +1,95 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vapp + +import ( + "flag" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type destroy struct { + *flags.DatacenterFlag +} + +func init() { + cli.Register("vapp.destroy", &destroy{}) +} + +func (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) +} + +func (cmd *destroy) Process(ctx context.Context) error { + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *destroy) Usage() string { + return "VAPP..." +} + +func (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error { + if f.NArg() == 0 { + return flag.ErrHelp + } + + finder, err := cmd.Finder() + if err != nil { + return err + } + + for _, arg := range f.Args() { + vapps, err := finder.VirtualAppList(context.TODO(), arg) + if err != nil { + if _, ok := err.(*find.NotFoundError); ok { + // Ignore if vapp cannot be found + continue + } + + return err + } + + for _, vapp := range vapps { + task, err := vapp.PowerOffVApp_Task(context.TODO(), false) + if err != nil { + return err + } + err = task.Wait(context.TODO()) + if err != nil { + return err + } + + task, err = vapp.Destroy(context.TODO()) + if err != nil { + return err + } + err = task.Wait(context.TODO()) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vapp/power.go b/vendor/github.com/vmware/govmomi/govc/vapp/power.go new file mode 100644 index 000000000..a03dc958f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vapp/power.go @@ -0,0 +1,112 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vapp + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type power struct { + *flags.SearchFlag + + On bool + Off bool + Suspend bool + Force bool +} + +func init() { + cli.Register("vapp.power", &power{}) +} + +func (cmd *power) Register(ctx context.Context, f *flag.FlagSet) { + cmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualApps) + cmd.SearchFlag.Register(ctx, f) + + f.BoolVar(&cmd.On, "on", false, "Power on") + f.BoolVar(&cmd.Off, "off", false, "Power off") + f.BoolVar(&cmd.Suspend, "suspend", false, "Power suspend") + f.BoolVar(&cmd.Force, "force", false, "Force (If force is false, the shutdown order in the vApp is executed. If force is true, all virtual machines are powered-off (regardless of shutdown order))") +} + +func (cmd *power) Process(ctx context.Context) error { + if err := cmd.SearchFlag.Process(ctx); err != nil { + return err + } + opts := []bool{cmd.On, cmd.Off, cmd.Suspend} + selected := false + + for _, opt := range opts { + if opt { + if selected { + return flag.ErrHelp + } + selected = opt + } + } + + if !selected { + return flag.ErrHelp + } + + return nil +} + +func (cmd *power) Run(ctx context.Context, f *flag.FlagSet) error { + vapps, err := cmd.VirtualApps(f.Args()) + if err != nil { + return err + } + + for _, vapp := range vapps { + var task *object.Task + + switch { + case cmd.On: + fmt.Fprintf(cmd, "Powering on %s... ", vapp.Reference()) + task, err = vapp.PowerOnVApp_Task(context.TODO()) + case cmd.Off: + fmt.Fprintf(cmd, "Powering off %s... ", vapp.Reference()) + task, err = vapp.PowerOffVApp_Task(context.TODO(), cmd.Force) + case cmd.Suspend: + fmt.Fprintf(cmd, "Suspend %s... ", vapp.Reference()) + task, err = vapp.SuspendVApp_Task(context.TODO()) + } + + if err != nil { + return err + } + + if task != nil { + err = task.Wait(context.TODO()) + } + if err == nil { + fmt.Fprintf(cmd, "OK\n") + continue + } + + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/version/command.go b/vendor/github.com/vmware/govmomi/govc/version/command.go new file mode 100644 index 000000000..68ef342f6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/version/command.go @@ -0,0 +1,46 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "flag" + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" +) + +var gitVersion string + +type version struct { + *flags.EmptyFlag +} + +func init() { + if gitVersion == "" { + gitVersion = "unknown" + } + + cli.Register("version", &version{}) +} + +func (c *version) Run(ctx context.Context, f *flag.FlagSet) error { + fmt.Printf("govc %s\n", gitVersion) + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/change.go b/vendor/github.com/vmware/govmomi/govc/vm/change.go new file mode 100644 index 000000000..c359569c1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/change.go @@ -0,0 +1,98 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "flag" + "fmt" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type extraConfig []types.BaseOptionValue + +func (e *extraConfig) String() string { + return fmt.Sprintf("%v", *e) +} + +func (e *extraConfig) Set(v string) error { + r := strings.Split(v, "=") + if len(r) < 2 { + return fmt.Errorf("failed to parse extraConfig: %s", v) + } else if r[1] == "" { + return fmt.Errorf("empty value: %s", v) + } + *e = append(*e, &types.OptionValue{Key: r[0], Value: r[1]}) + return nil +} + +type change struct { + *flags.VirtualMachineFlag + + types.VirtualMachineConfigSpec + extraConfig extraConfig +} + +func init() { + cli.Register("vm.change", &change{}) +} + +func (cmd *change) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.Int64Var(&cmd.MemoryMB, "m", 0, "Size in MB of memory") + f.IntVar(&cmd.NumCPUs, "c", 0, "Number of CPUs") + f.StringVar(&cmd.GuestId, "g", "", "Guest OS") + f.StringVar(&cmd.Name, "name", "", "Display name") + f.Var(&cmd.extraConfig, "e", "ExtraConfig. =") + + f.Var(flags.NewOptionalBool(&cmd.NestedHVEnabled), "nested-hv-enabled", "Enable nested hardware-assisted virtualization") +} + +func (cmd *change) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + if len(cmd.extraConfig) > 0 { + cmd.VirtualMachineConfigSpec.ExtraConfig = cmd.extraConfig + } + + task, err := vm.Reconfigure(context.TODO(), cmd.VirtualMachineConfigSpec) + if err != nil { + return err + } + + return task.Wait(context.TODO()) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/create.go b/vendor/github.com/vmware/govmomi/govc/vm/create.go new file mode 100644 index 000000000..fdb49eb95 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/create.go @@ -0,0 +1,274 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type create struct { + *flags.ClientFlag + *flags.DatacenterFlag + *flags.DatastoreFlag + *flags.ResourcePoolFlag + *flags.HostSystemFlag + *flags.NetworkFlag + + memory int + cpus int + guestID string + link bool + on bool + force bool + iso string + disk string + controller string + + Client *vim25.Client + Datacenter *object.Datacenter + Datastore *object.Datastore + ResourcePool *object.ResourcePool + HostSystem *object.HostSystem +} + +func init() { + cli.Register("vm.create", &create{}) +} + +func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) + cmd.DatacenterFlag.Register(ctx, f) + + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + + cmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx) + cmd.ResourcePoolFlag.Register(ctx, f) + + cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) + cmd.HostSystemFlag.Register(ctx, f) + + cmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx) + cmd.NetworkFlag.Register(ctx, f) + + f.IntVar(&cmd.memory, "m", 1024, "Size in MB of memory") + f.IntVar(&cmd.cpus, "c", 1, "Number of CPUs") + f.StringVar(&cmd.guestID, "g", "otherGuest", "Guest OS") + f.BoolVar(&cmd.link, "link", true, "Link specified disk") + f.BoolVar(&cmd.on, "on", true, "Power on VM. Default is true if -disk argument is given.") + f.BoolVar(&cmd.force, "force", false, "Create VM if vmx already exists") + f.StringVar(&cmd.iso, "iso", "", "Path to ISO") + f.StringVar(&cmd.controller, "disk.controller", "scsi", "Disk controller type") + f.StringVar(&cmd.disk, "disk", "", "Disk path name") +} + +func (cmd *create) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.DatacenterFlag.Process(ctx); err != nil { + return err + } + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.ResourcePoolFlag.Process(ctx); err != nil { + return err + } + if err := cmd.HostSystemFlag.Process(ctx); err != nil { + return err + } + if err := cmd.NetworkFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { + var err error + + if len(f.Args()) != 1 { + return flag.ErrHelp + } + + cmd.Client, err = cmd.ClientFlag.Client() + if err != nil { + return err + } + + cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter() + if err != nil { + return err + } + + cmd.Datastore, err = cmd.DatastoreFlag.Datastore() + if err != nil { + return err + } + + cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified() + if err != nil { + return err + } + + if cmd.HostSystem != nil { + if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(context.TODO()); err != nil { + return err + } + } else { + // -host is optional + if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil { + return err + } + } + + for _, file := range []*string{&cmd.iso, &cmd.disk} { + if *file != "" { + _, err = cmd.Datastore.Stat(context.TODO(), *file) + if err != nil { + return err + } + } + } + + task, err := cmd.createVM(f.Arg(0)) + if err != nil { + return err + } + + info, err := task.WaitForResult(context.TODO(), nil) + if err != nil { + return err + } + + vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)) + + if err := cmd.addDevices(vm); err != nil { + return err + } + + if cmd.on { + task, err := vm.PowerOn(context.TODO()) + if err != nil { + return err + } + + _, err = task.WaitForResult(context.TODO(), nil) + if err != nil { + return err + } + } + + return nil +} + +func (cmd *create) addDevices(vm *object.VirtualMachine) error { + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + var add []types.BaseVirtualDevice + + if cmd.disk != "" { + controller, err := devices.FindDiskController(cmd.controller) + if err != nil { + return err + } + + disk := devices.CreateDisk(controller, cmd.Datastore.Path(cmd.disk)) + + if cmd.link { + disk = devices.ChildDisk(disk) + } + + add = append(add, disk) + } + + if cmd.iso != "" { + ide, err := devices.FindIDEController("") + if err != nil { + return err + } + + cdrom, err := devices.CreateCdrom(ide) + if err != nil { + return err + } + + add = append(add, devices.InsertIso(cdrom, cmd.Datastore.Path(cmd.iso))) + } + + netdev, err := cmd.NetworkFlag.Device() + if err != nil { + return err + } + + add = append(add, netdev) + + return vm.AddDevice(context.TODO(), add...) +} + +func (cmd *create) createVM(name string) (*object.Task, error) { + spec := types.VirtualMachineConfigSpec{ + Name: name, + GuestId: cmd.guestID, + Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", cmd.Datastore.Name())}, + NumCPUs: cmd.cpus, + MemoryMB: int64(cmd.memory), + } + + if !cmd.force { + vmxPath := fmt.Sprintf("%s/%s.vmx", name, name) + + _, err := cmd.Datastore.Stat(context.TODO(), vmxPath) + if err == nil { + dsPath := cmd.Datastore.Path(vmxPath) + return nil, fmt.Errorf("File %s already exists", dsPath) + } + } + + if cmd.controller != "ide" { + scsi, err := object.SCSIControllerTypes().CreateSCSIController(cmd.controller) + if err != nil { + return nil, err + } + + spec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{ + Operation: types.VirtualDeviceConfigSpecOperationAdd, + Device: scsi, + }) + } + + folders, err := cmd.Datacenter.Folders(context.TODO()) + if err != nil { + return nil, err + } + + return folders.VmFolder.CreateVM(context.TODO(), spec, cmd.ResourcePool, cmd.HostSystem) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/destroy.go b/vendor/github.com/vmware/govmomi/govc/vm/destroy.go new file mode 100644 index 000000000..9796a87ed --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/destroy.go @@ -0,0 +1,82 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type destroy struct { + *flags.ClientFlag + *flags.SearchFlag +} + +func init() { + cli.Register("vm.destroy", &destroy{}) +} + +func (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines) + cmd.SearchFlag.Register(ctx, f) +} + +func (cmd *destroy) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.SearchFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error { + vms, err := cmd.VirtualMachines(f.Args()) + if err != nil { + return err + } + + for _, vm := range vms { + task, err := vm.PowerOff(context.TODO()) + if err != nil { + return err + } + + // Ignore error since the VM may already been in powered off state. + // vm.Destroy will fail if the VM is still powered on. + _ = task.Wait(context.TODO()) + + task, err = vm.Destroy(context.TODO()) + if err != nil { + return err + } + + err = task.Wait(context.TODO()) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/disk/attach.go b/vendor/github.com/vmware/govmomi/govc/vm/disk/attach.go new file mode 100644 index 000000000..c00126022 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/disk/attach.go @@ -0,0 +1,110 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package disk + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type attach struct { + *flags.DatastoreFlag + *flags.VirtualMachineFlag + + persist bool + link bool + disk string + controller string +} + +func init() { + cli.Register("vm.disk.attach", &attach{}) +} + +func (cmd *attach) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.BoolVar(&cmd.persist, "persist", true, "Persist attached disk") + f.BoolVar(&cmd.link, "link", true, "Link specified disk") + f.StringVar(&cmd.controller, "controller", "", "Disk controller") + f.StringVar(&cmd.disk, "disk", "", "Disk path name") +} + +func (cmd *attach) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *attach) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return flag.ErrHelp + } + + ds, err := cmd.Datastore() + if err != nil { + return err + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + controller, err := devices.FindDiskController(cmd.controller) + if err != nil { + return err + } + + disk := devices.CreateDisk(controller, ds.Path(cmd.disk)) + backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) + + if cmd.link { + if cmd.persist { + backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent) + } else { + backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent) + } + + disk = devices.ChildDisk(disk) + return vm.AddDevice(context.TODO(), disk) + } + + if cmd.persist { + backing.DiskMode = string(types.VirtualDiskModePersistent) + } else { + backing.DiskMode = string(types.VirtualDiskModeNonpersistent) + } + + return vm.AddDevice(context.TODO(), disk) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/disk/create.go b/vendor/github.com/vmware/govmomi/govc/vm/disk/create.go new file mode 100644 index 000000000..7a97577f4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/disk/create.go @@ -0,0 +1,114 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package disk + +import ( + "errors" + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/units" + "golang.org/x/net/context" +) + +type create struct { + *flags.DatastoreFlag + *flags.OutputFlag + *flags.VirtualMachineFlag + + controller string + Name string + Bytes units.ByteSize +} + +func init() { + cli.Register("vm.disk.create", &create{}) +} + +func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { + cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) + cmd.DatastoreFlag.Register(ctx, f) + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + err := (&cmd.Bytes).Set("10G") + if err != nil { + panic(err) + } + + f.StringVar(&cmd.controller, "controller", "", "Disk controller") + f.StringVar(&cmd.Name, "name", "", "Name for new disk") + f.Var(&cmd.Bytes, "size", "Size of new disk") +} + +func (cmd *create) Process(ctx context.Context) error { + if err := cmd.DatastoreFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { + if len(cmd.Name) == 0 { + return errors.New("please specify a disk name") + } + + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + if vm == nil { + return errors.New("please specify a vm") + } + + ds, err := cmd.Datastore() + if err != nil { + return err + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + controller, err := devices.FindDiskController(cmd.controller) + if err != nil { + return err + } + + disk := devices.CreateDisk(controller, ds.Path(cmd.Name)) + + existing := devices.SelectByBackingInfo(disk.Backing) + + if len(existing) > 0 { + cmd.Log("Disk already present\n") + return nil + } + + cmd.Log("Creating disk\n") + disk.CapacityInKB = int64(cmd.Bytes) / 1024 + return vm.AddDevice(context.TODO(), disk) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/auth.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/auth.go new file mode 100644 index 000000000..beab7f1c9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/auth.go @@ -0,0 +1,68 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + "fmt" + "os" + "strings" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/vim25/types" +) + +type AuthFlag struct { + auth types.NamePasswordAuthentication +} + +func newAuthFlag(ctx context.Context) (*AuthFlag, context.Context) { + return &AuthFlag{}, ctx +} + +func (flag *AuthFlag) String() string { + return fmt.Sprintf("%s:%s", flag.auth.Username, strings.Repeat("x", len(flag.auth.Password))) +} + +func (flag *AuthFlag) Set(s string) error { + c := strings.Split(s, ":") + if len(c) > 0 { + flag.auth.Username = c[0] + if len(c) > 1 { + flag.auth.Password = c[1] + } + } + + return nil +} + +func (flag *AuthFlag) Register(ctx context.Context, f *flag.FlagSet) { + env := "GOVC_GUEST_LOGIN" + value := os.Getenv(env) + flag.Set(value) + usage := fmt.Sprintf("Guest VM credentials [%s]", env) + f.Var(flag, "l", usage) +} + +func (flag *AuthFlag) Process(ctx context.Context) error { + return nil +} + +func (flag *AuthFlag) Auth() types.BaseGuestAuthentication { + return &flag.auth +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/chmod.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/chmod.go new file mode 100644 index 000000000..4243af51f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/chmod.go @@ -0,0 +1,59 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type chmod struct { + *GuestFlag + *FileAttrFlag +} + +func init() { + cli.Register("guest.chmod", &chmod{}) +} + +func (cmd *chmod) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + cmd.FileAttrFlag, ctx = newFileAttrFlag(ctx) + cmd.FileAttrFlag.Register(ctx, f) +} + +func (cmd *chmod) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + if err := cmd.FileAttrFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *chmod) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + return m.ChangeFileAttributes(context.TODO(), cmd.Auth(), f.Arg(0), cmd.Attr()) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/download.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/download.go new file mode 100644 index 000000000..336c9fd13 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/download.go @@ -0,0 +1,82 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + + "os" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type download struct { + *GuestFlag + + overwrite bool +} + +func init() { + cli.Register("guest.download", &download{}) +} + +func (cmd *download) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + + f.BoolVar(&cmd.overwrite, "f", false, "If set, the local destination file is clobbered") +} + +func (cmd *download) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *download) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + src := f.Arg(0) + dst := f.Arg(1) + + _, err = os.Stat(dst) + if err == nil && !cmd.overwrite { + return os.ErrExist + } + + info, err := m.InitiateFileTransferFromGuest(context.TODO(), cmd.Auth(), src) + if err != nil { + return err + } + + u, err := cmd.ParseURL(info.Url) + if err != nil { + return err + } + + c, err := cmd.Client() + if err != nil { + return nil + } + + return c.Client.DownloadFile(dst, u, nil) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/file_attr.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/file_attr.go new file mode 100644 index 000000000..3c943c02d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/file_attr.go @@ -0,0 +1,47 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/vim25/types" +) + +type FileAttrFlag struct { + types.GuestPosixFileAttributes +} + +func newFileAttrFlag(ctx context.Context) (*FileAttrFlag, context.Context) { + return &FileAttrFlag{}, ctx +} + +func (flag *FileAttrFlag) Register(ctx context.Context, f *flag.FlagSet) { + f.IntVar(&flag.OwnerId, "uid", 0, "User ID") + f.IntVar(&flag.GroupId, "gid", 0, "Group ID") + f.Int64Var(&flag.Permissions, "perm", 0, "File permissions") +} + +func (flag *FileAttrFlag) Process(ctx context.Context) error { + return nil +} + +func (flag *FileAttrFlag) Attr() types.BaseGuestFileAttributes { + return &flag.GuestPosixFileAttributes +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/getenv.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/getenv.go new file mode 100644 index 000000000..193b03f6e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/getenv.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type getenv struct { + *GuestFlag +} + +func init() { + cli.Register("guest.getenv", &getenv{}) +} + +func (cmd *getenv) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) +} + +func (cmd *getenv) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *getenv) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.ProcessManager() + if err != nil { + return err + } + + vars, err := m.ReadEnvironmentVariable(context.TODO(), cmd.Auth(), f.Args()) + if err != nil { + return err + } + + for _, v := range vars { + fmt.Printf("%s\n", v) + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/guest.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/guest.go new file mode 100644 index 000000000..f9f6a0172 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/guest.go @@ -0,0 +1,113 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "errors" + "flag" + + "net/url" + + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/guest" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type GuestFlag struct { + *flags.ClientFlag + *flags.VirtualMachineFlag + + *AuthFlag +} + +func newGuestFlag(ctx context.Context) (*GuestFlag, context.Context) { + f := &GuestFlag{} + f.ClientFlag, ctx = flags.NewClientFlag(ctx) + f.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + f.AuthFlag, ctx = newAuthFlag(ctx) + return f, ctx +} + +func (flag *GuestFlag) Register(ctx context.Context, f *flag.FlagSet) { + flag.ClientFlag.Register(ctx, f) + flag.VirtualMachineFlag.Register(ctx, f) + flag.AuthFlag.Register(ctx, f) +} + +func (flag *GuestFlag) Process(ctx context.Context) error { + if err := flag.ClientFlag.Process(ctx); err != nil { + return err + } + if err := flag.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + if err := flag.AuthFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (flag *GuestFlag) FileManager() (*guest.FileManager, error) { + c, err := flag.Client() + if err != nil { + return nil, err + } + + vm, err := flag.VirtualMachine() + if err != nil { + return nil, err + } + + o := guest.NewOperationsManager(c, vm.Reference()) + return o.FileManager(context.TODO()) +} + +func (flag *GuestFlag) ProcessManager() (*guest.ProcessManager, error) { + c, err := flag.Client() + if err != nil { + return nil, err + } + + vm, err := flag.VirtualMachine() + if err != nil { + return nil, err + } + + o := guest.NewOperationsManager(c, vm.Reference()) + return o.ProcessManager(context.TODO()) +} + +func (flag *GuestFlag) ParseURL(urlStr string) (*url.URL, error) { + c, err := flag.Client() + if err != nil { + return nil, err + } + + return c.Client.ParseURL(urlStr) +} + +func (flag *GuestFlag) VirtualMachine() (*object.VirtualMachine, error) { + vm, err := flag.VirtualMachineFlag.VirtualMachine() + if err != nil { + return nil, err + } + if vm == nil { + return nil, errors.New("no vm specified") + } + return vm, nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/kill.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/kill.go new file mode 100644 index 000000000..bb7f66723 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/kill.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type kill struct { + *GuestFlag + + pids pidSelector +} + +func init() { + cli.Register("guest.kill", &kill{}) +} + +func (cmd *kill) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + + f.Var(&cmd.pids, "p", "Process ID") +} + +func (cmd *kill) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *kill) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.ProcessManager() + if err != nil { + return err + } + + for _, pid := range cmd.pids { + if err := m.TerminateProcess(context.TODO(), cmd.Auth(), pid); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/ls.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/ls.go new file mode 100644 index 000000000..1e39f7d9d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/ls.go @@ -0,0 +1,78 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + "fmt" + "os" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type ls struct { + *GuestFlag +} + +func init() { + cli.Register("guest.ls", &ls{}) +} + +func (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) +} + +func (cmd *ls) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + offset := 0 + tw := tabwriter.NewWriter(os.Stdout, 3, 0, 2, ' ', 0) + + for { + info, err := m.ListFiles(context.TODO(), cmd.Auth(), f.Arg(0), offset, 0, f.Arg(1)) + if err != nil { + return err + } + + for _, f := range info.Files { + attr := f.Attributes.GetGuestFileAttributes() // TODO: GuestPosixFileAttributes + fmt.Fprintf(tw, "%d\t%s\t%s\n", f.Size, attr.ModificationTime.Format("Mon Jan 2 15:04:05 2006"), f.Path) + } + + _ = tw.Flush() + + if info.Remaining == 0 { + break + } + offset += len(info.Files) + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/mkdir.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/mkdir.go new file mode 100644 index 000000000..28859b040 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/mkdir.go @@ -0,0 +1,71 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type mkdir struct { + *GuestFlag + + createParents bool +} + +func init() { + cli.Register("guest.mkdir", &mkdir{}) +} + +func (cmd *mkdir) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + + f.BoolVar(&cmd.createParents, "p", false, "Create intermediate directories as needed") +} + +func (cmd *mkdir) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *mkdir) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + err = m.MakeDirectory(context.TODO(), cmd.Auth(), f.Arg(0), cmd.createParents) + + // ignore EEXIST if -p flag is given + if err != nil && cmd.createParents { + if soap.IsSoapFault(err) { + soapFault := soap.ToSoapFault(err) + if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok { + return nil + } + } + } + + return err +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/mktemp.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/mktemp.go new file mode 100644 index 000000000..ed0af945d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/mktemp.go @@ -0,0 +1,74 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type mktemp struct { + *GuestFlag + + dir bool + prefix string + suffix string +} + +func init() { + cli.Register("guest.mktemp", &mktemp{}) +} + +func (cmd *mktemp) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + + f.BoolVar(&cmd.dir, "d", false, "Make a directory instead of a file") + f.StringVar(&cmd.prefix, "t", "", "Prefix") + f.StringVar(&cmd.suffix, "s", "", "Suffix") +} + +func (cmd *mktemp) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *mktemp) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + mk := m.CreateTemporaryFile + if cmd.dir { + mk = m.CreateTemporaryDirectory + } + + name, err := mk(context.TODO(), cmd.Auth(), cmd.prefix, cmd.suffix) + if err != nil { + return err + } + + fmt.Println(name) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/ps.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/ps.go new file mode 100644 index 000000000..c443b3bf1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/ps.go @@ -0,0 +1,112 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + "fmt" + "os" + "strconv" + "text/tabwriter" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type ps struct { + *GuestFlag + + every bool + + pids pidSelector + uids uidSelector +} + +type pidSelector []int64 + +func (s *pidSelector) String() string { + return fmt.Sprint(*s) +} + +func (s *pidSelector) Set(value string) error { + v, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + *s = append(*s, v) + return nil +} + +type uidSelector map[string]bool + +func (s uidSelector) String() string { + return "" +} + +func (s uidSelector) Set(value string) error { + s[value] = true + return nil +} + +func init() { + cli.Register("guest.ps", &ps{}) +} + +func (cmd *ps) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + + cmd.uids = make(map[string]bool) + f.BoolVar(&cmd.every, "e", false, "Select all processes") + f.Var(&cmd.pids, "p", "Select by process ID") + f.Var(&cmd.uids, "U", "Select by process UID") +} + +func (cmd *ps) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ps) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.ProcessManager() + if err != nil { + return err + } + + if !cmd.every && len(cmd.uids) == 0 { + cmd.uids[cmd.auth.Username] = true + } + + procs, err := m.ListProcesses(context.TODO(), cmd.Auth(), cmd.pids) + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 4, 0, 2, ' ', 0) + + fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", "UID", "PID", "STIME", "CMD") + + for _, p := range procs { + if cmd.every || cmd.uids[p.Owner] { + fmt.Fprintf(tw, "%s\t%d\t%s\t%s\n", p.Owner, p.Pid, p.StartTime.Format("15:04"), p.CmdLine) + } + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/rm.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/rm.go new file mode 100644 index 000000000..4c7c85f74 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/rm.go @@ -0,0 +1,53 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type rm struct { + *GuestFlag +} + +func init() { + cli.Register("guest.rm", &rm{}) +} + +func (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) +} + +func (cmd *rm) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + return m.DeleteFile(context.TODO(), cmd.Auth(), f.Arg(0)) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/rmdir.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/rmdir.go new file mode 100644 index 000000000..723fe8932 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/rmdir.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type rmdir struct { + *GuestFlag + + recursive bool +} + +func init() { + cli.Register("guest.rmdir", &rmdir{}) +} + +func (cmd *rmdir) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + + f.BoolVar(&cmd.recursive, "p", false, "Recursive removal") +} + +func (cmd *rmdir) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *rmdir) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + return m.DeleteDirectory(context.TODO(), cmd.Auth(), f.Arg(0), cmd.recursive) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/start.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/start.go new file mode 100644 index 000000000..49818d098 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/start.go @@ -0,0 +1,87 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + "fmt" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type start struct { + *GuestFlag + + dir string + vars env +} + +type env []string + +func (e *env) String() string { + return fmt.Sprint(*e) +} + +func (e *env) Set(value string) error { + *e = append(*e, value) + return nil +} + +func init() { + cli.Register("guest.start", &start{}) +} + +func (cmd *start) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + + f.StringVar(&cmd.dir, "C", "", "The absolute path of the working directory for the program to start") + f.Var(&cmd.vars, "e", "Set environment variable (key=val)") +} + +func (cmd *start) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *start) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.ProcessManager() + if err != nil { + return err + } + + spec := types.GuestProgramSpec{ + ProgramPath: f.Arg(0), + Arguments: strings.Join(f.Args()[1:], " "), + WorkingDirectory: cmd.dir, + EnvVariables: cmd.vars, + } + + pid, err := m.StartProgram(context.TODO(), cmd.Auth(), &spec) + if err != nil { + return err + } + + fmt.Printf("%d\n", pid) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/guest/upload.go b/vendor/github.com/vmware/govmomi/govc/vm/guest/upload.go new file mode 100644 index 000000000..fb19a9a5c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/guest/upload.go @@ -0,0 +1,87 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "flag" + "os" + + "github.com/vmware/govmomi/govc/cli" + "golang.org/x/net/context" +) + +type upload struct { + *GuestFlag + *FileAttrFlag + + overwrite bool +} + +func init() { + cli.Register("guest.upload", &upload{}) +} + +func (cmd *upload) Register(ctx context.Context, f *flag.FlagSet) { + cmd.GuestFlag, ctx = newGuestFlag(ctx) + cmd.GuestFlag.Register(ctx, f) + cmd.FileAttrFlag, ctx = newFileAttrFlag(ctx) + cmd.FileAttrFlag.Register(ctx, f) + + f.BoolVar(&cmd.overwrite, "f", false, "If set, the guest destination file is clobbered") +} + +func (cmd *upload) Process(ctx context.Context) error { + if err := cmd.GuestFlag.Process(ctx); err != nil { + return err + } + if err := cmd.FileAttrFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *upload) Run(ctx context.Context, f *flag.FlagSet) error { + m, err := cmd.FileManager() + if err != nil { + return err + } + + src := f.Arg(0) + dst := f.Arg(1) + + s, err := os.Stat(src) + if err != nil { + return err + } + + url, err := m.InitiateFileTransferToGuest(context.TODO(), cmd.Auth(), dst, cmd.Attr(), s.Size(), cmd.overwrite) + if err != nil { + return err + } + + u, err := cmd.ParseURL(url) + if err != nil { + return err + } + + c, err := cmd.Client() + if err != nil { + return nil + } + + return c.Client.UploadFile(src, u, nil) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/info.go b/vendor/github.com/vmware/govmomi/govc/vm/info.go new file mode 100644 index 000000000..494bb8a26 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/info.go @@ -0,0 +1,314 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "flag" + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type info struct { + *flags.ClientFlag + *flags.OutputFlag + *flags.SearchFlag + + WaitForIP bool + General bool + ExtraConfig bool + Resources bool +} + +func init() { + cli.Register("vm.info", &info{}) +} + +func (cmd *info) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + cmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines) + cmd.SearchFlag.Register(ctx, f) + + f.BoolVar(&cmd.WaitForIP, "waitip", false, "Wait for VM to acquire IP address") + f.BoolVar(&cmd.General, "g", true, "Show general summary") + f.BoolVar(&cmd.ExtraConfig, "e", false, "Show ExtraConfig") + f.BoolVar(&cmd.Resources, "r", false, "Show resource summary") +} + +func (cmd *info) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.SearchFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + vms, err := cmd.VirtualMachines(f.Args()) + if err != nil { + if _, ok := err.(*find.NotFoundError); ok { + // Continue with empty VM slice + } else { + return err + } + } + + refs := make([]types.ManagedObjectReference, 0, len(vms)) + for _, vm := range vms { + refs = append(refs, vm.Reference()) + } + + var res infoResult + var props []string + + if cmd.OutputFlag.JSON { + props = nil // Load everything + } else { + props = []string{"summary"} // Load summary + if cmd.General { + props = append(props, "guest.ipAddress") + } + if cmd.ExtraConfig { + props = append(props, "config.extraConfig") + } + if cmd.Resources { + props = append(props, "datastore", "network") + } + } + + pc := property.DefaultCollector(c) + if len(refs) != 0 { + err = pc.Retrieve(ctx, refs, props, &res.VirtualMachines) + if err != nil { + return err + } + } + + if cmd.WaitForIP { + for i, vm := range res.VirtualMachines { + if vm.Guest == nil || vm.Guest.IpAddress == "" { + _, err = vms[i].WaitForIP(ctx) + if err != nil { + return err + } + // Reload virtual machine object + err = pc.RetrieveOne(ctx, vms[i].Reference(), props, &res.VirtualMachines[i]) + if err != nil { + return err + } + } + } + } + + if !cmd.OutputFlag.JSON { + res.objects = vms + res.cmd = cmd + if err = res.collectReferences(pc, ctx); err != nil { + return err + } + } + + return cmd.WriteResult(&res) +} + +type infoResult struct { + VirtualMachines []mo.VirtualMachine + objects []*object.VirtualMachine + entities map[types.ManagedObjectReference]string + cmd *info +} + +// collectReferences builds a unique set of MORs to the set of VirtualMachines, +// so we can collect properties in a single call for each reference type {host,datastore,network}. +func (r *infoResult) collectReferences(pc *property.Collector, ctx context.Context) error { + r.entities = make(map[types.ManagedObjectReference]string) // MOR -> Name map + + var host []mo.HostSystem + var network []mo.Network + var dvp []mo.DistributedVirtualPortgroup + var datastore []mo.Datastore + // Table to drive inflating refs to their mo.* counterparts (dest) + // and save() the Name to r.entities w/o using reflection here. + // Note that we cannot use a []mo.ManagedEntity here, since mo.Network has its own 'Name' field, + // the mo.Network.ManagedEntity.Name field will not be set. + vrefs := map[string]*struct { + dest interface{} + refs []types.ManagedObjectReference + save func() + }{ + "HostSystem": { + &host, nil, func() { + for _, e := range host { + r.entities[e.Reference()] = e.Name + } + }, + }, + "Network": { + &network, nil, func() { + for _, e := range network { + r.entities[e.Reference()] = e.Name + } + }, + }, + "DistributedVirtualPortgroup": { + &dvp, nil, func() { + for _, e := range dvp { + r.entities[e.Reference()] = e.Name + } + }, + }, + "Datastore": { + &datastore, nil, func() { + for _, e := range datastore { + r.entities[e.Reference()] = e.Name + } + }, + }, + } + + xrefs := make(map[types.ManagedObjectReference]bool) + // Add MOR to vrefs[kind].refs avoiding any duplicates. + addRef := func(refs ...types.ManagedObjectReference) { + for _, ref := range refs { + if _, exists := xrefs[ref]; exists { + return + } + xrefs[ref] = true + vref := vrefs[ref.Type] + vref.refs = append(vref.refs, ref) + } + } + + for _, vm := range r.VirtualMachines { + if r.cmd.General { + if ref := vm.Summary.Runtime.Host; ref != nil { + addRef(*ref) + } + } + + if r.cmd.Resources { + addRef(vm.Datastore...) + addRef(vm.Network...) + } + } + + for _, vref := range vrefs { + if vref.refs == nil { + continue + } + err := pc.Retrieve(ctx, vref.refs, []string{"name"}, vref.dest) + if err != nil { + return err + } + vref.save() + } + + return nil +} + +func (r *infoResult) entityNames(refs []types.ManagedObjectReference) string { + var names []string + for _, ref := range refs { + names = append(names, r.entities[ref]) + } + return strings.Join(names, ", ") +} + +func (r *infoResult) Write(w io.Writer) error { + // Maintain order via r.objects as Property collector does not always return results in order. + objects := make(map[types.ManagedObjectReference]mo.VirtualMachine, len(r.VirtualMachines)) + for _, o := range r.VirtualMachines { + objects[o.Reference()] = o + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) + + for _, o := range r.objects { + vm := objects[o.Reference()] + s := vm.Summary + + fmt.Fprintf(tw, "Name:\t%s\n", s.Config.Name) + + if r.cmd.General { + hostName := "" + + if href := vm.Summary.Runtime.Host; href != nil { + if name, ok := r.entities[*href]; ok { + hostName = name + } + } + + fmt.Fprintf(tw, " Path:\t%s\n", o.InventoryPath) + fmt.Fprintf(tw, " UUID:\t%s\n", s.Config.Uuid) + fmt.Fprintf(tw, " Guest name:\t%s\n", s.Config.GuestFullName) + fmt.Fprintf(tw, " Memory:\t%dMB\n", s.Config.MemorySizeMB) + fmt.Fprintf(tw, " CPU:\t%d vCPU(s)\n", s.Config.NumCpu) + fmt.Fprintf(tw, " Power state:\t%s\n", s.Runtime.PowerState) + fmt.Fprintf(tw, " Boot time:\t%s\n", s.Runtime.BootTime) + fmt.Fprintf(tw, " IP address:\t%s\n", s.Guest.IpAddress) + fmt.Fprintf(tw, " Host:\t%s\n", hostName) + } + + if r.cmd.Resources { + fmt.Fprintf(tw, " CPU usage:\t%dMHz\n", s.QuickStats.OverallCpuUsage) + fmt.Fprintf(tw, " Host memory usage:\t%dMB\n", s.QuickStats.HostMemoryUsage) + fmt.Fprintf(tw, " Guest memory usage:\t%dMB\n", s.QuickStats.GuestMemoryUsage) + fmt.Fprintf(tw, " Storage uncommitted:\t%s\n", units.ByteSize(s.Storage.Uncommitted)) + fmt.Fprintf(tw, " Storage committed:\t%s\n", units.ByteSize(s.Storage.Committed)) + fmt.Fprintf(tw, " Storage unshared:\t%s\n", units.ByteSize(s.Storage.Unshared)) + fmt.Fprintf(tw, " Storage:\t%s\n", r.entityNames(vm.Datastore)) + fmt.Fprintf(tw, " Network:\t%s\n", r.entityNames(vm.Network)) + } + + if r.cmd.ExtraConfig { + fmt.Fprintf(tw, " ExtraConfig:\n") + for _, v := range vm.Config.ExtraConfig { + fmt.Fprintf(tw, " %s:\t%s\n", v.GetOptionValue().Key, v.GetOptionValue().Value) + } + } + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/ip.go b/vendor/github.com/vmware/govmomi/govc/vm/ip.go new file mode 100644 index 000000000..6c0992cc2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/ip.go @@ -0,0 +1,113 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "flag" + "fmt" + "time" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/govc/host/esxcli" + "github.com/vmware/govmomi/object" + "golang.org/x/net/context" +) + +type ip struct { + *flags.OutputFlag + *flags.SearchFlag + + esx bool +} + +func init() { + cli.Register("vm.ip", &ip{}) +} + +func (cmd *ip) Register(ctx context.Context, f *flag.FlagSet) { + cmd.OutputFlag, ctx = flags.NewOutputFlag(ctx) + cmd.OutputFlag.Register(ctx, f) + + cmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines) + cmd.SearchFlag.Register(ctx, f) + + f.BoolVar(&cmd.esx, "esxcli", false, "Use esxcli instead of guest tools") +} + +func (cmd *ip) Process(ctx context.Context) error { + if err := cmd.OutputFlag.Process(ctx); err != nil { + return err + } + if err := cmd.SearchFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *ip) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + vms, err := cmd.VirtualMachines(f.Args()) + if err != nil { + return err + } + + var get func(*object.VirtualMachine) (string, error) + + if cmd.esx { + get = func(vm *object.VirtualMachine) (string, error) { + guest := esxcli.NewGuestInfo(c) + + ticker := time.NewTicker(time.Millisecond * 500) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + ip, err := guest.IpAddress(vm) + if err != nil { + return "", err + } + + if ip != "0.0.0.0" { + return ip, nil + } + } + } + } + } else { + get = func(vm *object.VirtualMachine) (string, error) { + return vm.WaitForIP(context.TODO()) + } + } + + for _, vm := range vms { + ip, err := get(vm) + if err != nil { + return err + } + + // TODO(PN): Display inventory path to VM + fmt.Fprintf(cmd, "%s\n", ip) + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/network/add.go b/vendor/github.com/vmware/govmomi/govc/vm/network/add.go new file mode 100644 index 000000000..8f6006ebc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/network/add.go @@ -0,0 +1,75 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "errors" + "flag" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "golang.org/x/net/context" +) + +type add struct { + *flags.VirtualMachineFlag + *flags.NetworkFlag +} + +func init() { + cli.Register("vm.network.add", &add{}) +} + +func (cmd *add) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + cmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx) + cmd.NetworkFlag.Register(ctx, f) +} + +func (cmd *add) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + if err := cmd.NetworkFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachineFlag.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return errors.New("please specify a vm") + } + + // Set network if specified as extra argument. + if f.NArg() > 0 { + _ = cmd.NetworkFlag.Set(f.Arg(0)) + } + + net, err := cmd.NetworkFlag.Device() + if err != nil { + return err + } + + return vm.AddDevice(context.TODO(), net) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/network/change.go b/vendor/github.com/vmware/govmomi/govc/vm/network/change.go new file mode 100644 index 000000000..f7b403506 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/network/change.go @@ -0,0 +1,107 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "errors" + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type change struct { + *flags.VirtualMachineFlag + *flags.NetworkFlag +} + +func init() { + cli.Register("vm.network.change", &change{}) +} + +func (cmd *change) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + cmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx) + cmd.NetworkFlag.Register(ctx, f) +} + +func (cmd *change) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + if err := cmd.NetworkFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error { + vm, err := cmd.VirtualMachineFlag.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return errors.New("please specify a vm") + } + + name := f.Arg(0) + + if name == "" { + return errors.New("please specify a device name") + } + + // Set network if specified as extra argument. + if f.NArg() > 1 { + _ = cmd.NetworkFlag.Set(f.Arg(1)) + } + + devices, err := vm.Device(context.TODO()) + if err != nil { + return err + } + + net := devices.Find(name) + + if net == nil { + return fmt.Errorf("device '%s' not found", name) + } + + dev, err := cmd.NetworkFlag.Device() + if err != nil { + return err + } + + current := net.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() + changed := dev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() + + current.Backing = changed.Backing + + if changed.MacAddress != "" { + current.MacAddress = changed.MacAddress + } + + if changed.AddressType != "" { + current.AddressType = changed.AddressType + } + + return vm.EditDevice(context.TODO(), net) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/power.go b/vendor/github.com/vmware/govmomi/govc/vm/power.go new file mode 100644 index 000000000..dbbeed8f4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/power.go @@ -0,0 +1,161 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type power struct { + *flags.ClientFlag + *flags.SearchFlag + + On bool + Off bool + Reset bool + Reboot bool + Shutdown bool + Suspend bool + Force bool +} + +func init() { + cli.Register("vm.power", &power{}) +} + +func (cmd *power) Register(ctx context.Context, f *flag.FlagSet) { + cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) + cmd.ClientFlag.Register(ctx, f) + + cmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines) + cmd.SearchFlag.Register(ctx, f) + + f.BoolVar(&cmd.On, "on", false, "Power on") + f.BoolVar(&cmd.Off, "off", false, "Power off") + f.BoolVar(&cmd.Reset, "reset", false, "Power reset") + f.BoolVar(&cmd.Suspend, "suspend", false, "Power suspend") + f.BoolVar(&cmd.Reboot, "r", false, "Reboot guest") + f.BoolVar(&cmd.Shutdown, "s", false, "Shutdown guest") + f.BoolVar(&cmd.Force, "force", false, "Force (ignore state error and hard shutdown/reboot if tools unavailable)") +} + +func (cmd *power) Process(ctx context.Context) error { + if err := cmd.ClientFlag.Process(ctx); err != nil { + return err + } + if err := cmd.SearchFlag.Process(ctx); err != nil { + return err + } + opts := []bool{cmd.On, cmd.Off, cmd.Reset, cmd.Suspend, cmd.Reboot, cmd.Shutdown} + selected := false + + for _, opt := range opts { + if opt { + if selected { + return flag.ErrHelp + } + selected = opt + } + } + + if !selected { + return flag.ErrHelp + } + + return nil +} + +func isToolsUnavailable(err error) bool { + if soap.IsSoapFault(err) { + soapFault := soap.ToSoapFault(err) + if _, ok := soapFault.VimFault().(types.ToolsUnavailable); ok { + return ok + } + } + + return false +} + +func (cmd *power) Run(ctx context.Context, f *flag.FlagSet) error { + vms, err := cmd.VirtualMachines(f.Args()) + if err != nil { + return err + } + + for _, vm := range vms { + var task *object.Task + + switch { + case cmd.On: + fmt.Fprintf(cmd, "Powering on %s... ", vm.Reference()) + task, err = vm.PowerOn(context.TODO()) + case cmd.Off: + fmt.Fprintf(cmd, "Powering off %s... ", vm.Reference()) + task, err = vm.PowerOff(context.TODO()) + case cmd.Reset: + fmt.Fprintf(cmd, "Reset %s... ", vm.Reference()) + task, err = vm.Reset(context.TODO()) + case cmd.Suspend: + fmt.Fprintf(cmd, "Suspend %s... ", vm.Reference()) + task, err = vm.Suspend(context.TODO()) + case cmd.Reboot: + fmt.Fprintf(cmd, "Reboot guest %s... ", vm.Reference()) + err = vm.RebootGuest(context.TODO()) + + if err != nil && cmd.Force && isToolsUnavailable(err) { + task, err = vm.Reset(context.TODO()) + } + case cmd.Shutdown: + fmt.Fprintf(cmd, "Shutdown guest %s... ", vm.Reference()) + err = vm.ShutdownGuest(context.TODO()) + + if err != nil && cmd.Force && isToolsUnavailable(err) { + task, err = vm.PowerOff(context.TODO()) + } + } + + if err != nil { + return err + } + + if task != nil { + err = task.Wait(context.TODO()) + } + if err == nil { + fmt.Fprintf(cmd, "OK\n") + continue + } + + if cmd.Force { + fmt.Fprintf(cmd, "Error: %s\n", err) + continue + } + + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/question.go b/vendor/github.com/vmware/govmomi/govc/vm/question.go new file mode 100644 index 000000000..1ea6dc5a6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/question.go @@ -0,0 +1,98 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "errors" + "flag" + "fmt" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type question struct { + *flags.VirtualMachineFlag + + answer string +} + +func init() { + cli.Register("vm.question", &question{}) +} + +func (cmd *question) Register(ctx context.Context, f *flag.FlagSet) { + cmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx) + cmd.VirtualMachineFlag.Register(ctx, f) + + f.StringVar(&cmd.answer, "answer", "", "Answer to question") +} + +func (cmd *question) Process(ctx context.Context) error { + if err := cmd.VirtualMachineFlag.Process(ctx); err != nil { + return err + } + return nil +} + +func (cmd *question) Run(ctx context.Context, f *flag.FlagSet) error { + c, err := cmd.Client() + if err != nil { + return err + } + + vm, err := cmd.VirtualMachine() + if err != nil { + return err + } + + if vm == nil { + return errors.New("No VM specified") + } + + var mvm mo.VirtualMachine + + pc := property.DefaultCollector(c) + err = pc.RetrieveOne(context.TODO(), vm.Reference(), []string{"runtime.question"}, &mvm) + if err != nil { + return err + } + + q := mvm.Runtime.Question + if q == nil { + fmt.Printf("No pending question\n") + return nil + } + + // Print question if no answer is specified + if cmd.answer == "" { + fmt.Printf("Question:\n%s\n\n", q.Text) + fmt.Printf("Possible answers:\n") + for _, e := range q.Choice.ChoiceInfo { + ed := e.(*types.ElementDescription) + fmt.Printf("%s) %s\n", ed.Key, ed.Description.Label) + } + return nil + } + + // Answer question + return vm.Answer(context.TODO(), q.Id, cmd.answer) +} diff --git a/vendor/github.com/vmware/govmomi/govc/vm/vnc.go b/vendor/github.com/vmware/govmomi/govc/vm/vnc.go new file mode 100644 index 000000000..55fe71975 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/govc/vm/vnc.go @@ -0,0 +1,480 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/govc/flags" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type intRange struct { + low, high int +} + +var intRangeRegexp = regexp.MustCompile("^([0-9]+)-([0-9]+)$") + +func (i *intRange) Set(s string) error { + m := intRangeRegexp.FindStringSubmatch(s) + if m == nil { + return fmt.Errorf("invalid range: %s", s) + } + + low, _ := strconv.Atoi(m[1]) + high, _ := strconv.Atoi(m[2]) + if low > high { + return fmt.Errorf("invalid range: low > high") + } + + i.low = low + i.high = high + return nil +} + +func (i *intRange) String() string { + return fmt.Sprintf("%d-%d", i.low, i.high) +} + +type vnc struct { + *flags.SearchFlag + + Enable bool + Disable bool + Port int + PortRange intRange + Password string +} + +func init() { + cmd := &vnc{} + cmd.PortRange.Set("5900-5999") + cli.Register("vm.vnc", cmd) +} + +func (cmd *vnc) Register(ctx context.Context, f *flag.FlagSet) { + cmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines) + cmd.SearchFlag.Register(ctx, f) + + f.BoolVar(&cmd.Enable, "enable", false, "Enable VNC") + f.BoolVar(&cmd.Disable, "disable", false, "Disable VNC") + f.IntVar(&cmd.Port, "port", -1, "VNC port (-1 for auto-select)") + f.Var(&cmd.PortRange, "port-range", "VNC port auto-select range") + f.StringVar(&cmd.Password, "password", "", "VNC password") +} + +func (cmd *vnc) Process(ctx context.Context) error { + if err := cmd.SearchFlag.Process(ctx); err != nil { + return err + } + // Either may be true or none may be true. + if cmd.Enable && cmd.Disable { + return flag.ErrHelp + } + + return nil +} + +func (cmd *vnc) Usage() string { + return "VM..." +} + +func (cmd *vnc) Description() string { + return `VNC controls for VM(s). + +Port numbers are automatically chosen from a range if not specified. + +If neither -enable or -disable is specified, the current state is returned.` +} + +func (cmd *vnc) Run(ctx context.Context, f *flag.FlagSet) error { + vms, err := cmd.loadVMs(f.Args()) + if err != nil { + return err + } + + // Actuate settings in VMs + for _, vm := range vms { + switch { + case cmd.Enable: + vm.enable(cmd.Port, cmd.Password) + case cmd.Disable: + vm.disable() + } + } + + // Reconfigure VMs to reflect updates + for _, vm := range vms { + err = vm.reconfigure() + if err != nil { + return err + } + } + + return cmd.WriteResult(vncResult(vms)) +} + +func (cmd *vnc) loadVMs(args []string) ([]*vncVM, error) { + c, err := cmd.Client() + if err != nil { + return nil, err + } + + vms, err := cmd.VirtualMachines(args) + if err != nil { + return nil, err + } + + var vncVMs []*vncVM + for _, vm := range vms { + v, err := newVNCVM(c, vm) + if err != nil { + return nil, err + } + vncVMs = append(vncVMs, v) + } + + // Assign vncHosts to vncVMs + hosts := make(map[string]*vncHost) + for _, vm := range vncVMs { + if h, ok := hosts[vm.hostReference().Value]; ok { + vm.host = h + continue + } + + hs := object.NewHostSystem(c, vm.hostReference()) + h, err := newVNCHost(c, hs, cmd.PortRange.low, cmd.PortRange.high) + if err != nil { + return nil, err + } + + hosts[vm.hostReference().Value] = h + vm.host = h + } + + return vncVMs, nil +} + +type vncVM struct { + c *vim25.Client + vm *object.VirtualMachine + mvm mo.VirtualMachine + host *vncHost + + curOptions vncOptions + newOptions vncOptions +} + +func newVNCVM(c *vim25.Client, vm *object.VirtualMachine) (*vncVM, error) { + v := &vncVM{ + c: c, + vm: vm, + } + + virtualMachineProperties := []string{ + "name", + "config.extraConfig", + "runtime.host", + } + + pc := property.DefaultCollector(c) + err := pc.RetrieveOne(context.TODO(), vm.Reference(), virtualMachineProperties, &v.mvm) + if err != nil { + return nil, err + } + + v.curOptions = vncOptionsFromExtraConfig(v.mvm.Config.ExtraConfig) + v.newOptions = vncOptionsFromExtraConfig(v.mvm.Config.ExtraConfig) + + return v, nil +} + +func (v *vncVM) hostReference() types.ManagedObjectReference { + return *v.mvm.Runtime.Host +} + +func (v *vncVM) enable(port int, password string) error { + v.newOptions["enabled"] = "true" + v.newOptions["port"] = fmt.Sprintf("%d", port) + v.newOptions["password"] = password + + // Find port if auto-select + if port == -1 { + // Reuse port if If VM already has a port, reuse it. + // Otherwise, find unused VNC port on host. + if p, ok := v.curOptions["port"]; ok && p != "" { + v.newOptions["port"] = p + } else { + port, err := v.host.popUnusedPort() + if err != nil { + return err + } + v.newOptions["port"] = fmt.Sprintf("%d", port) + } + } + return nil +} + +func (v *vncVM) disable() error { + v.newOptions["enabled"] = "false" + v.newOptions["port"] = "" + v.newOptions["password"] = "" + return nil +} + +func (v *vncVM) reconfigure() error { + if reflect.DeepEqual(v.curOptions, v.newOptions) { + // No changes to settings + return nil + } + + spec := types.VirtualMachineConfigSpec{ + ExtraConfig: v.newOptions.ToExtraConfig(), + } + + task, err := v.vm.Reconfigure(context.TODO(), spec) + if err != nil { + return err + } + + return task.Wait(context.TODO()) +} + +func (v *vncVM) uri() (string, error) { + ip, err := v.host.managementIP() + if err != nil { + return "", err + } + + uri := fmt.Sprintf("vnc://:%s@%s:%s", + v.newOptions["password"], + ip, + v.newOptions["port"]) + + return uri, nil +} + +func (v *vncVM) write(w io.Writer) error { + if v.newOptions["enabled"] == "true" { + uri, err := v.uri() + if err != nil { + return err + } + fmt.Printf("%s: %s\n", v.mvm.Name, uri) + } else { + fmt.Printf("%s: disabled\n", v.mvm.Name) + } + return nil +} + +type vncHost struct { + c *vim25.Client + host *object.HostSystem + ports map[int]struct{} + ip string // This field is populated by `managementIP` +} + +func newVNCHost(c *vim25.Client, host *object.HostSystem, low, high int) (*vncHost, error) { + ports := make(map[int]struct{}) + for i := low; i <= high; i++ { + ports[i] = struct{}{} + } + + used, err := loadUsedPorts(c, host.Reference()) + if err != nil { + return nil, err + } + + // Remove used ports from range + for _, u := range used { + delete(ports, u) + } + + h := &vncHost{ + c: c, + host: host, + ports: ports, + } + + return h, nil +} + +func loadUsedPorts(c *vim25.Client, host types.ManagedObjectReference) ([]int, error) { + ospec := types.ObjectSpec{ + Obj: host, + SelectSet: []types.BaseSelectionSpec{ + &types.TraversalSpec{ + Type: "HostSystem", + Path: "vm", + Skip: types.NewBool(false), + }, + }, + Skip: types.NewBool(false), + } + + pspec := types.PropertySpec{ + Type: "VirtualMachine", + PathSet: []string{"config.extraConfig"}, + } + + req := types.RetrieveProperties{ + This: c.ServiceContent.PropertyCollector, + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: []types.PropertySpec{pspec}, + }, + }, + } + + var vms []mo.VirtualMachine + err := mo.RetrievePropertiesForRequest(context.TODO(), c, req, &vms) + if err != nil { + return nil, err + } + + var ports []int + for _, vm := range vms { + if vm.Config == nil || vm.Config.ExtraConfig == nil { + continue + } + + options := vncOptionsFromExtraConfig(vm.Config.ExtraConfig) + if ps, ok := options["port"]; ok && ps != "" { + pi, err := strconv.Atoi(ps) + if err == nil { + ports = append(ports, pi) + } + } + } + + return ports, nil +} + +func (h *vncHost) popUnusedPort() (int, error) { + if len(h.ports) == 0 { + return 0, fmt.Errorf("no unused ports in range") + } + + // Return first port we get when iterating + var port int + for port, _ = range h.ports { + break + } + delete(h.ports, port) + return port, nil +} + +func (h *vncHost) managementIP() (string, error) { + if h.ip != "" { + return h.ip, nil + } + + ips, err := h.host.ManagementIPs(context.TODO()) + if err != nil { + return "", err + } + + if len(ips) > 0 { + h.ip = ips[0].String() + } else { + h.ip = "" + } + + return h.ip, nil +} + +type vncResult []*vncVM + +func (vms vncResult) MarshalJSON() ([]byte, error) { + out := make(map[string]string) + for _, vm := range vms { + uri, err := vm.uri() + if err != nil { + return nil, err + } + out[vm.mvm.Name] = uri + } + return json.Marshal(out) +} + +func (vms vncResult) Write(w io.Writer) error { + for _, vm := range vms { + err := vm.write(w) + if err != nil { + return err + } + } + + return nil +} + +type vncOptions map[string]string + +var vncPrefix = "RemoteDisplay.vnc." + +func vncOptionsFromExtraConfig(ov []types.BaseOptionValue) vncOptions { + vo := make(vncOptions) + for _, b := range ov { + o := b.GetOptionValue() + if strings.HasPrefix(o.Key, vncPrefix) { + key := o.Key[len(vncPrefix):] + if key != "key" { + vo[key] = o.Value.(string) + } + } + } + return vo +} + +func (vo vncOptions) ToExtraConfig() []types.BaseOptionValue { + ov := make([]types.BaseOptionValue, 0, 0) + for k, v := range vo { + key := vncPrefix + k + value := v + + o := types.OptionValue{ + Key: key, + Value: &value, // Pass pointer to avoid omitempty + } + + ov = append(ov, &o) + } + + // Don't know how to deal with the key option, set it to be empty... + o := types.OptionValue{ + Key: vncPrefix + "key", + Value: new(string), // Pass pointer to avoid omitempty + } + + ov = append(ov, &o) + + return ov +} diff --git a/vendor/github.com/vmware/govmomi/guest/auth_manager.go b/vendor/github.com/vmware/govmomi/guest/auth_manager.go new file mode 100644 index 000000000..c62f012ce --- /dev/null +++ b/vendor/github.com/vmware/govmomi/guest/auth_manager.go @@ -0,0 +1,79 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type AuthManager struct { + types.ManagedObjectReference + + vm types.ManagedObjectReference + + c *vim25.Client +} + +func (m AuthManager) Reference() types.ManagedObjectReference { + return m.ManagedObjectReference +} + +func (m AuthManager) AcquireCredentials(ctx context.Context, requestedAuth types.BaseGuestAuthentication, sessionID int64) (types.BaseGuestAuthentication, error) { + req := types.AcquireCredentialsInGuest{ + This: m.Reference(), + Vm: m.vm, + RequestedAuth: requestedAuth, + SessionID: sessionID, + } + + res, err := methods.AcquireCredentialsInGuest(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthManager) ReleaseCredentials(ctx context.Context, auth types.BaseGuestAuthentication) error { + req := types.ReleaseCredentialsInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + } + + _, err := methods.ReleaseCredentialsInGuest(ctx, m.c, &req) + + return err +} + +func (m AuthManager) ValidateCredentials(ctx context.Context, auth types.BaseGuestAuthentication) error { + req := types.ValidateCredentialsInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + } + + _, err := methods.ValidateCredentialsInGuest(ctx, m.c, &req) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/guest/file_manager.go b/vendor/github.com/vmware/govmomi/guest/file_manager.go new file mode 100644 index 000000000..43931e662 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/guest/file_manager.go @@ -0,0 +1,202 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type FileManager struct { + types.ManagedObjectReference + + vm types.ManagedObjectReference + + c *vim25.Client +} + +func (m FileManager) Reference() types.ManagedObjectReference { + return m.ManagedObjectReference +} + +func (m FileManager) ChangeFileAttributes(ctx context.Context, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes) error { + req := types.ChangeFileAttributesInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + GuestFilePath: guestFilePath, + FileAttributes: fileAttributes, + } + + _, err := methods.ChangeFileAttributesInGuest(ctx, m.c, &req) + return err +} + +func (m FileManager) CreateTemporaryDirectory(ctx context.Context, auth types.BaseGuestAuthentication, prefix, suffix string) (string, error) { + req := types.CreateTemporaryDirectoryInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + Prefix: prefix, + Suffix: suffix, + } + + res, err := methods.CreateTemporaryDirectoryInGuest(ctx, m.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +func (m FileManager) CreateTemporaryFile(ctx context.Context, auth types.BaseGuestAuthentication, prefix, suffix string) (string, error) { + req := types.CreateTemporaryFileInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + Prefix: prefix, + Suffix: suffix, + } + + res, err := methods.CreateTemporaryFileInGuest(ctx, m.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +func (m FileManager) DeleteDirectory(ctx context.Context, auth types.BaseGuestAuthentication, directoryPath string, recursive bool) error { + req := types.DeleteDirectoryInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + DirectoryPath: directoryPath, + Recursive: recursive, + } + + _, err := methods.DeleteDirectoryInGuest(ctx, m.c, &req) + return err +} + +func (m FileManager) DeleteFile(ctx context.Context, auth types.BaseGuestAuthentication, filePath string) error { + req := types.DeleteFileInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + FilePath: filePath, + } + + _, err := methods.DeleteFileInGuest(ctx, m.c, &req) + return err +} + +func (m FileManager) InitiateFileTransferFromGuest(ctx context.Context, auth types.BaseGuestAuthentication, guestFilePath string) (*types.FileTransferInformation, error) { + req := types.InitiateFileTransferFromGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + GuestFilePath: guestFilePath, + } + + res, err := methods.InitiateFileTransferFromGuest(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (m FileManager) InitiateFileTransferToGuest(ctx context.Context, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes, fileSize int64, overwrite bool) (string, error) { + req := types.InitiateFileTransferToGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + GuestFilePath: guestFilePath, + FileAttributes: fileAttributes, + FileSize: fileSize, + Overwrite: overwrite, + } + + res, err := methods.InitiateFileTransferToGuest(ctx, m.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +func (m FileManager) ListFiles(ctx context.Context, auth types.BaseGuestAuthentication, filePath string, index int, maxResults int, matchPattern string) (*types.GuestListFileInfo, error) { + req := types.ListFilesInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + FilePath: filePath, + Index: index, + MaxResults: maxResults, + MatchPattern: matchPattern, + } + + res, err := methods.ListFilesInGuest(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (m FileManager) MakeDirectory(ctx context.Context, auth types.BaseGuestAuthentication, directoryPath string, createParentDirectories bool) error { + req := types.MakeDirectoryInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + DirectoryPath: directoryPath, + CreateParentDirectories: createParentDirectories, + } + + _, err := methods.MakeDirectoryInGuest(ctx, m.c, &req) + return err +} + +func (m FileManager) MoveDirectory(ctx context.Context, auth types.BaseGuestAuthentication, srcDirectoryPath string, dstDirectoryPath string) error { + req := types.MoveDirectoryInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + SrcDirectoryPath: srcDirectoryPath, + DstDirectoryPath: dstDirectoryPath, + } + + _, err := methods.MoveDirectoryInGuest(ctx, m.c, &req) + return err +} + +func (m FileManager) MoveFile(ctx context.Context, auth types.BaseGuestAuthentication, srcFilePath string, dstFilePath string, overwrite bool) error { + req := types.MoveFileInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + SrcFilePath: srcFilePath, + DstFilePath: dstFilePath, + Overwrite: overwrite, + } + + _, err := methods.MoveFileInGuest(ctx, m.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/guest/operations_manager.go b/vendor/github.com/vmware/govmomi/guest/operations_manager.go new file mode 100644 index 000000000..3c5394d9d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/guest/operations_manager.go @@ -0,0 +1,72 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type OperationsManager struct { + c *vim25.Client + vm types.ManagedObjectReference +} + +func NewOperationsManager(c *vim25.Client, vm types.ManagedObjectReference) *OperationsManager { + return &OperationsManager{c, vm} +} + +func (m OperationsManager) retrieveOne(ctx context.Context, p string, dst *mo.GuestOperationsManager) error { + pc := property.DefaultCollector(m.c) + return pc.RetrieveOne(ctx, *m.c.ServiceContent.GuestOperationsManager, []string{p}, dst) +} + +func (m OperationsManager) AuthManager(ctx context.Context) (*AuthManager, error) { + var g mo.GuestOperationsManager + + err := m.retrieveOne(ctx, "authManager", &g) + if err != nil { + return nil, err + } + + return &AuthManager{*g.AuthManager, m.vm, m.c}, nil +} + +func (m OperationsManager) FileManager(ctx context.Context) (*FileManager, error) { + var g mo.GuestOperationsManager + + err := m.retrieveOne(ctx, "fileManager", &g) + if err != nil { + return nil, err + } + + return &FileManager{*g.FileManager, m.vm, m.c}, nil +} + +func (m OperationsManager) ProcessManager(ctx context.Context) (*ProcessManager, error) { + var g mo.GuestOperationsManager + + err := m.retrieveOne(ctx, "processManager", &g) + if err != nil { + return nil, err + } + + return &ProcessManager{*g.ProcessManager, m.vm, m.c}, nil +} diff --git a/vendor/github.com/vmware/govmomi/guest/process_manager.go b/vendor/github.com/vmware/govmomi/guest/process_manager.go new file mode 100644 index 000000000..159a571d7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/guest/process_manager.go @@ -0,0 +1,96 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package guest + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ProcessManager struct { + types.ManagedObjectReference + + vm types.ManagedObjectReference + + c *vim25.Client +} + +func (m ProcessManager) Reference() types.ManagedObjectReference { + return m.ManagedObjectReference +} + +func (m ProcessManager) ListProcesses(ctx context.Context, auth types.BaseGuestAuthentication, pids []int64) ([]types.GuestProcessInfo, error) { + req := types.ListProcessesInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + Pids: pids, + } + + res, err := methods.ListProcessesInGuest(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, err +} + +func (m ProcessManager) ReadEnvironmentVariable(ctx context.Context, auth types.BaseGuestAuthentication, names []string) ([]string, error) { + req := types.ReadEnvironmentVariableInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + Names: names, + } + + res, err := methods.ReadEnvironmentVariableInGuest(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, err +} + +func (m ProcessManager) StartProgram(ctx context.Context, auth types.BaseGuestAuthentication, spec types.BaseGuestProgramSpec) (int64, error) { + req := types.StartProgramInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + Spec: spec, + } + + res, err := methods.StartProgramInGuest(ctx, m.c, &req) + if err != nil { + return 0, err + } + + return res.Returnval, err +} + +func (m ProcessManager) TerminateProcess(ctx context.Context, auth types.BaseGuestAuthentication, pid int64) error { + req := types.TerminateProcessInGuest{ + This: m.Reference(), + Vm: m.vm, + Auth: auth, + Pid: pid, + } + + _, err := methods.TerminateProcessInGuest(ctx, m.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/license/assignment_manager.go b/vendor/github.com/vmware/govmomi/license/assignment_manager.go new file mode 100644 index 000000000..509c94f99 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/license/assignment_manager.go @@ -0,0 +1,69 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type AssignmentManager struct { + object.Common +} + +func (m AssignmentManager) QueryAssigned(ctx context.Context, id string) ([]types.LicenseAssignmentManagerLicenseAssignment, error) { + req := types.QueryAssignedLicenses{ + This: m.Reference(), + EntityId: id, + } + + res, err := methods.QueryAssignedLicenses(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AssignmentManager) Remove(ctx context.Context, id string) error { + req := types.RemoveAssignedLicense{ + This: m.Reference(), + EntityId: id, + } + + _, err := methods.RemoveAssignedLicense(ctx, m.Client(), &req) + + return err +} + +func (m AssignmentManager) Update(ctx context.Context, id string, key string, name string) (*types.LicenseManagerLicenseInfo, error) { + req := types.UpdateAssignedLicense{ + This: m.Reference(), + Entity: id, + LicenseKey: key, + EntityDisplayName: name, + } + + res, err := methods.UpdateAssignedLicense(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/license/manager.go b/vendor/github.com/vmware/govmomi/license/manager.go new file mode 100644 index 000000000..1e268f29d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/license/manager.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package license + +import ( + "strconv" + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Manager struct { + object.Common +} + +func NewManager(c *vim25.Client) *Manager { + m := Manager{ + object.NewCommon(c, *c.ServiceContent.LicenseManager), + } + + return &m +} + +func mapToKeyValueSlice(m map[string]string) []types.KeyValue { + r := make([]types.KeyValue, len(m)) + for k, v := range m { + r = append(r, types.KeyValue{Key: k, Value: v}) + } + return r +} + +func (m Manager) Add(ctx context.Context, key string, labels map[string]string) (types.LicenseManagerLicenseInfo, error) { + req := types.AddLicense{ + This: m.Reference(), + LicenseKey: key, + Labels: mapToKeyValueSlice(labels), + } + + res, err := methods.AddLicense(ctx, m.Client(), &req) + if err != nil { + return types.LicenseManagerLicenseInfo{}, err + } + + return res.Returnval, nil +} + +func (m Manager) Decode(ctx context.Context, key string) (types.LicenseManagerLicenseInfo, error) { + req := types.DecodeLicense{ + This: m.Reference(), + LicenseKey: key, + } + + res, err := methods.DecodeLicense(ctx, m.Client(), &req) + if err != nil { + return types.LicenseManagerLicenseInfo{}, err + } + + return res.Returnval, nil +} + +func (m Manager) Remove(ctx context.Context, key string) error { + req := types.RemoveLicense{ + This: m.Reference(), + LicenseKey: key, + } + + _, err := methods.RemoveLicense(ctx, m.Client(), &req) + return err +} + +func (m Manager) Update(ctx context.Context, key string, labels map[string]string) (types.LicenseManagerLicenseInfo, error) { + req := types.UpdateLicense{ + This: m.Reference(), + LicenseKey: key, + Labels: mapToKeyValueSlice(labels), + } + + res, err := methods.UpdateLicense(ctx, m.Client(), &req) + if err != nil { + return types.LicenseManagerLicenseInfo{}, err + } + + return res.Returnval, nil +} + +func (m Manager) List(ctx context.Context) (InfoList, error) { + var mlm mo.LicenseManager + + err := m.Properties(ctx, m.Reference(), []string{"licenses"}, &mlm) + if err != nil { + return nil, err + } + + return InfoList(mlm.Licenses), nil +} + +func (m Manager) AssignmentManager(ctx context.Context) (*AssignmentManager, error) { + var mlm mo.LicenseManager + + err := m.Properties(ctx, m.Reference(), []string{"licenseAssignmentManager"}, &mlm) + if err != nil { + return nil, err + } + + if mlm.LicenseAssignmentManager == nil { + return nil, object.ErrNotSupported + } + + am := AssignmentManager{ + object.NewCommon(m.Client(), *mlm.LicenseAssignmentManager), + } + + return &am, nil +} + +type licenseFeature struct { + name string + level int +} + +func parseLicenseFeature(feature string) *licenseFeature { + lf := new(licenseFeature) + + f := strings.Split(feature, ":") + + lf.name = f[0] + + if len(f) > 1 { + var err error + lf.level, err = strconv.Atoi(f[1]) + if err != nil { + lf.name = feature + } + } + + return lf +} + +func HasFeature(license types.LicenseManagerLicenseInfo, key string) bool { + feature := parseLicenseFeature(key) + + for _, p := range license.Properties { + if p.Key != "feature" { + continue + } + + kv, ok := p.Value.(types.KeyValue) + + if !ok { + continue + } + + lf := parseLicenseFeature(kv.Key) + + if lf.name == feature.name && lf.level >= feature.level { + return true + } + } + + return false +} + +// InfoList provides helper methods for []types.LicenseManagerLicenseInfo +type InfoList []types.LicenseManagerLicenseInfo + +func (l InfoList) WithFeature(key string) InfoList { + var result InfoList + + for _, license := range l { + if HasFeature(license, key) { + result = append(result, license) + } + } + + return result +} diff --git a/vendor/github.com/vmware/govmomi/list/lister.go b/vendor/github.com/vmware/govmomi/list/lister.go new file mode 100644 index 000000000..fd632010c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/list/lister.go @@ -0,0 +1,571 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "fmt" + "path" + "reflect" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Element struct { + Path string + Object mo.Reference +} + +func ToElement(r mo.Reference, prefix string) Element { + var name string + + // Comments about types to be expected in folders copied from the + // documentation of the Folder managed object: + // http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/vim.Folder.html + switch m := r.(type) { + case mo.Folder: + name = m.Name + case mo.StoragePod: + name = m.Name + + // { "vim.Datacenter" } - Identifies the root folder and its descendant + // folders. Data center folders can contain child data center folders and + // Datacenter managed objects. Datacenter objects contain virtual machine, + // compute resource, network entity, and datastore folders. + case mo.Datacenter: + name = m.Name + + // { "vim.Virtualmachine", "vim.VirtualApp" } - Identifies a virtual machine + // folder. A virtual machine folder may contain child virtual machine + // folders. It also can contain VirtualMachine managed objects, templates, + // and VirtualApp managed objects. + case mo.VirtualMachine: + name = m.Name + case mo.VirtualApp: + name = m.Name + + // { "vim.ComputeResource" } - Identifies a compute resource + // folder, which contains child compute resource folders and ComputeResource + // hierarchies. + case mo.ComputeResource: + name = m.Name + case mo.ClusterComputeResource: + name = m.Name + case mo.HostSystem: + name = m.Name + case mo.ResourcePool: + name = m.Name + + // { "vim.Network" } - Identifies a network entity folder. + // Network entity folders on a vCenter Server can contain Network, + // DistributedVirtualSwitch, and DistributedVirtualPortgroup managed objects. + // Network entity folders on an ESXi host can contain only Network objects. + case mo.Network: + name = m.Name + case mo.DistributedVirtualSwitch: + name = m.Name + case mo.DistributedVirtualPortgroup: + name = m.Name + case mo.VmwareDistributedVirtualSwitch: + name = m.Name + + // { "vim.Datastore" } - Identifies a datastore folder. Datastore folders can + // contain child datastore folders and Datastore managed objects. + case mo.Datastore: + name = m.Name + + default: + panic("not implemented for type " + reflect.TypeOf(r).String()) + } + + e := Element{ + Path: path.Join(prefix, name), + Object: r, + } + + return e +} + +type Lister struct { + Collector *property.Collector + Reference types.ManagedObjectReference + Prefix string + All bool +} + +func traversable(ref types.ManagedObjectReference) bool { + switch ref.Type { + case "Folder": + case "Datacenter": + case "ComputeResource", "ClusterComputeResource": + // Treat ComputeResource and ClusterComputeResource as one and the same. + // It doesn't matter from the perspective of the lister. + case "HostSystem": + case "VirtualApp": + default: + return false + } + + return true +} + +func (l Lister) retrieveProperties(ctx context.Context, req types.RetrieveProperties, dst *[]interface{}) error { + res, err := l.Collector.RetrieveProperties(ctx, req) + if err != nil { + return err + } + + // Instead of using mo.LoadRetrievePropertiesResponse, use a custom loop to + // iterate over the results and ignore entries that have properties that + // could not be retrieved (a non-empty `missingSet` property). Since the + // returned objects are enumerated by vSphere in the first place, any object + // that has a non-empty `missingSet` property is indicative of a race + // condition in vSphere where the object was enumerated initially, but was + // removed before its properties could be collected. + for _, p := range res.Returnval { + v, err := mo.ObjectContentToType(p) + if err != nil { + // Ignore fault if it is ManagedObjectNotFound + if soap.IsVimFault(err) { + switch soap.ToVimFault(err).(type) { + case *types.ManagedObjectNotFound: + continue + } + } + + return err + } + + *dst = append(*dst, v) + } + + return nil +} + +func (l Lister) List(ctx context.Context) ([]Element, error) { + switch l.Reference.Type { + case "Folder", "StoragePod": + return l.ListFolder(ctx) + case "Datacenter": + return l.ListDatacenter(ctx) + case "ComputeResource", "ClusterComputeResource": + // Treat ComputeResource and ClusterComputeResource as one and the same. + // It doesn't matter from the perspective of the lister. + return l.ListComputeResource(ctx) + case "ResourcePool": + return l.ListResourcePool(ctx) + case "HostSystem": + return l.ListHostSystem(ctx) + case "VirtualApp": + return l.ListVirtualApp(ctx) + default: + return nil, fmt.Errorf("cannot traverse type " + l.Reference.Type) + } +} + +func (l Lister) ListFolder(ctx context.Context) ([]Element, error) { + spec := types.PropertyFilterSpec{ + ObjectSet: []types.ObjectSpec{ + { + Obj: l.Reference, + SelectSet: []types.BaseSelectionSpec{ + &types.TraversalSpec{ + Path: "childEntity", + Skip: types.NewBool(false), + Type: "Folder", + }, + }, + Skip: types.NewBool(true), + }, + }, + } + + // Retrieve all objects that we can deal with + childTypes := []string{ + "Folder", + "Datacenter", + "VirtualApp", + "VirtualMachine", + "Network", + "ComputeResource", + "ClusterComputeResource", + "Datastore", + "DistributedVirtualSwitch", + } + + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + + // Additional basic properties. + switch t { + case "ComputeResource", "ClusterComputeResource": + // The ComputeResource and ClusterComputeResource are dereferenced in + // the ResourcePoolFlag. Make sure they always have their resourcePool + // field populated. + pspec.PathSet = append(pspec.PathSet, "resourcePool") + } + } + + spec.PropSet = append(spec.PropSet, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{spec}, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListDatacenter(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + // Include every datastore folder in the select set + fields := []string{ + "vmFolder", + "hostFolder", + "datastoreFolder", + "networkFolder", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "Datacenter", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + pspec := types.PropertySpec{ + Type: "Folder", + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: []types.PropertySpec{pspec}, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListComputeResource(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "host", + "resourcePool", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "ComputeResource", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "HostSystem", + "ResourcePool", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListResourcePool(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "resourcePool", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "ResourcePool", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "ResourcePool", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListHostSystem(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "datastore", + "network", + "vm", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "HostSystem", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "Datastore", + "Network", + "VirtualMachine", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListVirtualApp(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "resourcePool", + "vm", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "VirtualApp", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "ResourcePool", + "VirtualMachine", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} diff --git a/vendor/github.com/vmware/govmomi/list/path.go b/vendor/github.com/vmware/govmomi/list/path.go new file mode 100644 index 000000000..f3a106520 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/list/path.go @@ -0,0 +1,44 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "path" + "strings" +) + +func ToParts(p string) []string { + p = path.Clean(p) + if p == "/" { + return []string{} + } + + if len(p) > 0 { + // Prefix ./ if relative + if p[0] != '/' && p[0] != '.' { + p = "./" + p + } + } + + ps := strings.Split(p, "/") + if ps[0] == "" { + // Start at root + ps = ps[1:] + } + + return ps +} diff --git a/vendor/github.com/vmware/govmomi/list/path_test.go b/vendor/github.com/vmware/govmomi/list/path_test.go new file mode 100644 index 000000000..8cb73c384 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/list/path_test.go @@ -0,0 +1,93 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "reflect" + "testing" +) + +func TestToParts(t *testing.T) { + tests := []struct { + In string + Out []string + }{ + { + In: "/", + Out: []string{}, + }, + { + In: "/foo", + Out: []string{"foo"}, + }, + { + In: "/foo/..", + Out: []string{}, + }, + { + In: "/./foo", + Out: []string{"foo"}, + }, + { + In: "/../foo", + Out: []string{"foo"}, + }, + { + In: "/foo/bar", + Out: []string{"foo", "bar"}, + }, + { + In: "/foo/bar/..", + Out: []string{"foo"}, + }, + { + In: "", + Out: []string{"."}, + }, + { + In: ".", + Out: []string{"."}, + }, + { + In: "foo", + Out: []string{".", "foo"}, + }, + { + In: "foo/..", + Out: []string{"."}, + }, + { + In: "./foo", + Out: []string{".", "foo"}, + }, + { + In: "../foo", // Special case... + Out: []string{"..", "foo"}, + }, + { + In: "foo/bar/..", + Out: []string{".", "foo"}, + }, + } + + for _, test := range tests { + out := ToParts(test.In) + if !reflect.DeepEqual(test.Out, out) { + t.Errorf("Expected %s to return: %#v, actual: %#v", test.In, test.Out, out) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/list/recurser.go b/vendor/github.com/vmware/govmomi/list/recurser.go new file mode 100644 index 000000000..4b78415b3 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/list/recurser.go @@ -0,0 +1,97 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "path" + "path/filepath" + + "github.com/vmware/govmomi/property" + "golang.org/x/net/context" +) + +type Recurser struct { + Collector *property.Collector + + // All configures the recurses to fetch complete objects for leaf nodes. + All bool + + // TraverseLeafs configures the Recurser to traverse traversable leaf nodes. + // This is typically set to true when used from the ls command, where listing + // a folder means listing its contents. This is typically set to false for + // commands that take managed entities that are not folders as input. + TraverseLeafs bool +} + +func (r Recurser) Recurse(ctx context.Context, root Element, parts []string) ([]Element, error) { + if len(parts) == 0 { + // Include non-traversable leaf elements in result. For example, consider + // the pattern "./vm/my-vm-*", where the pattern should match the VMs and + // not try to traverse them. + // + // Include traversable leaf elements in result, if the TraverseLeafs + // field is set to false. + // + if !traversable(root.Object.Reference()) || !r.TraverseLeafs { + return []Element{root}, nil + } + } + + k := Lister{ + Collector: r.Collector, + Reference: root.Object.Reference(), + Prefix: root.Path, + } + + if r.All && len(parts) < 2 { + k.All = true + } + + in, err := k.List(ctx) + if err != nil { + return nil, err + } + + // This folder is a leaf as far as the glob goes. + if len(parts) == 0 { + return in, nil + } + + pattern := parts[0] + parts = parts[1:] + + var out []Element + for _, e := range in { + matched, err := filepath.Match(pattern, path.Base(e.Path)) + if err != nil { + return nil, err + } + + if !matched { + continue + } + + nres, err := r.Recurse(ctx, e, parts) + if err != nil { + return nil, err + } + + out = append(out, nres...) + } + + return out, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/authorization_manager.go b/vendor/github.com/vmware/govmomi/object/authorization_manager.go new file mode 100644 index 000000000..60f063b9d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/authorization_manager.go @@ -0,0 +1,108 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type AuthorizationManager struct { + Common +} + +func NewAuthorizationManager(c *vim25.Client) *AuthorizationManager { + m := AuthorizationManager{ + Common: NewCommon(c, *c.ServiceContent.AuthorizationManager), + } + + return &m +} + +type AuthorizationRoleList []types.AuthorizationRole + +func (l AuthorizationRoleList) ById(id int) *types.AuthorizationRole { + for _, role := range l { + if role.RoleId == id { + return &role + } + } + + return nil +} + +func (l AuthorizationRoleList) ByName(name string) *types.AuthorizationRole { + for _, role := range l { + if role.Name == name { + return &role + } + } + + return nil +} + +func (m AuthorizationManager) RoleList(ctx context.Context) (AuthorizationRoleList, error) { + var am mo.AuthorizationManager + + err := m.Properties(ctx, m.Reference(), []string{"roleList"}, &am) + if err != nil { + return nil, err + } + + return AuthorizationRoleList(am.RoleList), nil +} + +func (m AuthorizationManager) RetrieveEntityPermissions(ctx context.Context, entity types.ManagedObjectReference, inherited bool) ([]types.Permission, error) { + req := types.RetrieveEntityPermissions{ + This: m.Reference(), + Entity: entity, + Inherited: inherited, + } + + res, err := methods.RetrieveEntityPermissions(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) RemoveEntityPermission(ctx context.Context, entity types.ManagedObjectReference, user string, isGroup bool) error { + req := types.RemoveEntityPermission{ + This: m.Reference(), + Entity: entity, + User: user, + IsGroup: isGroup, + } + + _, err := methods.RemoveEntityPermission(ctx, m.Client(), &req) + return err +} + +func (m AuthorizationManager) SetEntityPermissions(ctx context.Context, entity types.ManagedObjectReference, permission []types.Permission) error { + req := types.SetEntityPermissions{ + This: m.Reference(), + Entity: entity, + Permission: permission, + } + + _, err := methods.SetEntityPermissions(ctx, m.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go new file mode 100644 index 000000000..3cd52f705 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go @@ -0,0 +1,87 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ClusterComputeResource struct { + ComputeResource + + InventoryPath string +} + +func NewClusterComputeResource(c *vim25.Client, ref types.ManagedObjectReference) *ClusterComputeResource { + return &ClusterComputeResource{ + ComputeResource: *NewComputeResource(c, ref), + } +} + +func (c ClusterComputeResource) ReconfigureCluster(ctx context.Context, spec types.ClusterConfigSpec) (*Task, error) { + req := types.ReconfigureCluster_Task{ + This: c.Reference(), + Spec: spec, + Modify: true, + } + + res, err := methods.ReconfigureCluster_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} + +func (c ClusterComputeResource) AddHost(ctx context.Context, spec types.HostConnectSpec, asConnected bool, license *string, resourcePool *types.ManagedObjectReference) (*Task, error) { + req := types.AddHost_Task{ + This: c.Reference(), + Spec: spec, + AsConnected: asConnected, + } + + if license != nil { + req.License = *license + } + + if resourcePool != nil { + req.ResourcePool = resourcePool + } + + res, err := methods.AddHost_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} + +func (c ClusterComputeResource) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: c.Reference(), + } + + res, err := methods.Destroy_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/cluster_compute_resource_test.go b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource_test.go new file mode 100644 index 000000000..684f4a671 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource_test.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// ComputeResource should implement the Reference interface. +var _ Reference = ClusterComputeResource{} diff --git a/vendor/github.com/vmware/govmomi/object/common.go b/vendor/github.com/vmware/govmomi/object/common.go new file mode 100644 index 000000000..3e641441a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/common.go @@ -0,0 +1,71 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "fmt" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +var ( + ErrNotSupported = errors.New("not supported (vCenter only)") +) + +// Common contains the fields and functions common to all objects. +type Common struct { + c *vim25.Client + r types.ManagedObjectReference +} + +func (c Common) String() string { + return fmt.Sprintf("%v", c.Reference()) +} + +func NewCommon(c *vim25.Client, r types.ManagedObjectReference) Common { + return Common{c: c, r: r} +} + +func (c Common) Reference() types.ManagedObjectReference { + return c.r +} + +func (c Common) Client() *vim25.Client { + return c.c +} + +func (c Common) Properties(ctx context.Context, r types.ManagedObjectReference, ps []string, dst interface{}) error { + return property.DefaultCollector(c.c).RetrieveOne(ctx, r, ps, dst) +} + +func (c Common) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: c.Reference(), + } + + res, err := methods.Destroy_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/compute_resource.go b/vendor/github.com/vmware/govmomi/object/compute_resource.go new file mode 100644 index 000000000..328c2e07c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/compute_resource.go @@ -0,0 +1,126 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "path" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ComputeResource struct { + Common + + InventoryPath string +} + +func NewComputeResource(c *vim25.Client, ref types.ManagedObjectReference) *ComputeResource { + return &ComputeResource{ + Common: NewCommon(c, ref), + } +} + +func (c ComputeResource) Hosts(ctx context.Context) ([]*HostSystem, error) { + var cr mo.ComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"host"}, &cr) + if err != nil { + return nil, err + } + + if len(cr.Host) == 0 { + return nil, nil + } + + var hs []mo.HostSystem + pc := property.DefaultCollector(c.Client()) + err = pc.Retrieve(ctx, cr.Host, []string{"name"}, &hs) + if err != nil { + return nil, err + } + + var hosts []*HostSystem + + for _, h := range hs { + host := NewHostSystem(c.Client(), h.Reference()) + host.InventoryPath = path.Join(c.InventoryPath, h.Name) + hosts = append(hosts, host) + } + + return hosts, nil +} + +func (c ComputeResource) Datastores(ctx context.Context) ([]*Datastore, error) { + var cr mo.ComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"datastore"}, &cr) + if err != nil { + return nil, err + } + + var dss []*Datastore + for _, ref := range cr.Datastore { + ds := NewDatastore(c.c, ref) + dss = append(dss, ds) + } + + return dss, nil +} + +func (c ComputeResource) ResourcePool(ctx context.Context) (*ResourcePool, error) { + var cr mo.ComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"resourcePool"}, &cr) + if err != nil { + return nil, err + } + + return NewResourcePool(c.c, *cr.ResourcePool), nil +} + +func (c ComputeResource) Reconfigure(ctx context.Context, spec types.BaseComputeResourceConfigSpec, modify bool) (*Task, error) { + req := types.ReconfigureComputeResource_Task{ + This: c.Reference(), + Spec: spec, + Modify: modify, + } + + res, err := methods.ReconfigureComputeResource_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} + +func (c ComputeResource) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: c.Reference(), + } + + res, err := methods.Destroy_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/compute_resource_test.go b/vendor/github.com/vmware/govmomi/object/compute_resource_test.go new file mode 100644 index 000000000..0f29d58e9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/compute_resource_test.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// ComputeResource should implement the Reference interface. +var _ Reference = ComputeResource{} diff --git a/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go b/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go new file mode 100644 index 000000000..5b5e91829 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go @@ -0,0 +1,135 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "strconv" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +var ( + ErrKeyNameNotFound = errors.New("key name not found") +) + +type CustomFieldsManager struct { + Common +} + +// GetCustomFieldsManager wraps NewCustomFieldsManager, returning ErrNotSupported +// when the client is not connected to a vCenter instance. +func GetCustomFieldsManager(c *vim25.Client) (*CustomFieldsManager, error) { + if c.ServiceContent.CustomFieldsManager == nil { + return nil, ErrNotSupported + } + return NewCustomFieldsManager(c), nil +} + +func NewCustomFieldsManager(c *vim25.Client) *CustomFieldsManager { + m := CustomFieldsManager{ + Common: NewCommon(c, *c.ServiceContent.CustomFieldsManager), + } + + return &m +} + +func (m CustomFieldsManager) Add(ctx context.Context, name string, moType string, fieldDefPolicy *types.PrivilegePolicyDef, fieldPolicy *types.PrivilegePolicyDef) (*types.CustomFieldDef, error) { + req := types.AddCustomFieldDef{ + This: m.Reference(), + Name: name, + MoType: moType, + FieldDefPolicy: fieldDefPolicy, + FieldPolicy: fieldPolicy, + } + + res, err := methods.AddCustomFieldDef(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (m CustomFieldsManager) Remove(ctx context.Context, key int) error { + req := types.RemoveCustomFieldDef{ + This: m.Reference(), + Key: key, + } + + _, err := methods.RemoveCustomFieldDef(ctx, m.c, &req) + return err +} + +func (m CustomFieldsManager) Rename(ctx context.Context, key int, name string) error { + req := types.RenameCustomFieldDef{ + This: m.Reference(), + Key: key, + Name: name, + } + + _, err := methods.RenameCustomFieldDef(ctx, m.c, &req) + return err +} + +func (m CustomFieldsManager) Set(ctx context.Context, entity types.ManagedObjectReference, key int, value string) error { + req := types.SetField{ + This: m.Reference(), + Entity: entity, + Key: key, + Value: value, + } + + _, err := methods.SetField(ctx, m.c, &req) + return err +} + +func (m CustomFieldsManager) Field(ctx context.Context) ([]types.CustomFieldDef, error) { + var fm mo.CustomFieldsManager + + err := m.Properties(ctx, m.Reference(), []string{"field"}, &fm) + if err != nil { + return nil, err + } + + return fm.Field, nil +} + +func (m CustomFieldsManager) FindKey(ctx context.Context, key string) (int, error) { + field, err := m.Field(ctx) + if err != nil { + return -1, err + } + + for _, def := range field { + if def.Name == key { + return def.Key, nil + } + } + + k, err := strconv.Atoi(key) + if err == nil { + // assume literal int key + return k, nil + } + + return -1, ErrKeyNameNotFound +} diff --git a/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go b/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go new file mode 100644 index 000000000..aebe73e84 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go @@ -0,0 +1,165 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type CustomizationSpecManager struct { + Common +} + +func NewCustomizationSpecManager(c *vim25.Client) *CustomizationSpecManager { + cs := CustomizationSpecManager{ + Common: NewCommon(c, *c.ServiceContent.CustomizationSpecManager), + } + + return &cs +} + +func (cs CustomizationSpecManager) DoesCustomizationSpecExist(ctx context.Context, name string) (bool, error) { + req := types.DoesCustomizationSpecExist{ + This: cs.Reference(), + Name: name, + } + + res, err := methods.DoesCustomizationSpecExist(ctx, cs.c, &req) + + if err != nil { + return false, err + } + + return res.Returnval, nil +} + +func (cs CustomizationSpecManager) GetCustomizationSpec(ctx context.Context, name string) (*types.CustomizationSpecItem, error) { + req := types.GetCustomizationSpec{ + This: cs.Reference(), + Name: name, + } + + res, err := methods.GetCustomizationSpec(ctx, cs.c, &req) + + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (cs CustomizationSpecManager) CreateCustomizationSpec(ctx context.Context, item types.CustomizationSpecItem) error { + req := types.CreateCustomizationSpec{ + This: cs.Reference(), + Item: item, + } + + _, err := methods.CreateCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) OverwriteCustomizationSpec(ctx context.Context, item types.CustomizationSpecItem) error { + req := types.OverwriteCustomizationSpec{ + This: cs.Reference(), + Item: item, + } + + _, err := methods.OverwriteCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) DeleteCustomizationSpec(ctx context.Context, name string) error { + req := types.DeleteCustomizationSpec{ + This: cs.Reference(), + Name: name, + } + + _, err := methods.DeleteCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) DuplicateCustomizationSpec(ctx context.Context, name string, newName string) error { + req := types.DuplicateCustomizationSpec{ + This: cs.Reference(), + Name: name, + NewName: newName, + } + + _, err := methods.DuplicateCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) RenameCustomizationSpec(ctx context.Context, name string, newName string) error { + req := types.RenameCustomizationSpec{ + This: cs.Reference(), + Name: name, + NewName: newName, + } + + _, err := methods.RenameCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) CustomizationSpecItemToXml(ctx context.Context, item types.CustomizationSpecItem) (string, error) { + req := types.CustomizationSpecItemToXml{ + This: cs.Reference(), + Item: item, + } + + res, err := methods.CustomizationSpecItemToXml(ctx, cs.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +func (cs CustomizationSpecManager) XmlToCustomizationSpecItem(ctx context.Context, xml string) (*types.CustomizationSpecItem, error) { + req := types.XmlToCustomizationSpecItem{ + This: cs.Reference(), + SpecItemXml: xml, + } + + res, err := methods.XmlToCustomizationSpecItem(ctx, cs.c, &req) + if err != nil { + return nil, err + } + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/datacenter.go b/vendor/github.com/vmware/govmomi/object/datacenter.go new file mode 100644 index 000000000..fa522fb05 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datacenter.go @@ -0,0 +1,90 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type DatacenterFolders struct { + VmFolder *Folder + HostFolder *Folder + DatastoreFolder *Folder + NetworkFolder *Folder +} + +type Datacenter struct { + Common +} + +func NewDatacenter(c *vim25.Client, ref types.ManagedObjectReference) *Datacenter { + return &Datacenter{ + Common: NewCommon(c, ref), + } +} + +func (d *Datacenter) Folders(ctx context.Context) (*DatacenterFolders, error) { + var md mo.Datacenter + + ps := []string{"name", "vmFolder", "hostFolder", "datastoreFolder", "networkFolder"} + err := d.Properties(ctx, d.Reference(), ps, &md) + if err != nil { + return nil, err + } + + df := &DatacenterFolders{ + VmFolder: NewFolder(d.c, md.VmFolder), + HostFolder: NewFolder(d.c, md.HostFolder), + DatastoreFolder: NewFolder(d.c, md.DatastoreFolder), + NetworkFolder: NewFolder(d.c, md.NetworkFolder), + } + + paths := []struct { + name string + path *string + }{ + {"vm", &df.VmFolder.InventoryPath}, + {"host", &df.HostFolder.InventoryPath}, + {"datastore", &df.DatastoreFolder.InventoryPath}, + {"network", &df.NetworkFolder.InventoryPath}, + } + + for _, p := range paths { + *p.path = fmt.Sprintf("/%s/%s", md.Name, p.name) + } + + return df, nil +} + +func (d Datacenter) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: d.Reference(), + } + + res, err := methods.Destroy_Task(ctx, d.c, &req) + if err != nil { + return nil, err + } + + return NewTask(d.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/datacenter_test.go b/vendor/github.com/vmware/govmomi/object/datacenter_test.go new file mode 100644 index 000000000..1eb410576 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datacenter_test.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// Datacenter should implement the Reference interface. +var _ Reference = Datacenter{} diff --git a/vendor/github.com/vmware/govmomi/object/datastore.go b/vendor/github.com/vmware/govmomi/object/datastore.go new file mode 100644 index 000000000..34bb062d5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datastore.go @@ -0,0 +1,351 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + "io" + "math/rand" + "path" + "strings" + + "net/http" + "net/url" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// DatastoreNoSuchDirectoryError is returned when a directory could not be found. +type DatastoreNoSuchDirectoryError struct { + verb string + subject string +} + +func (e DatastoreNoSuchDirectoryError) Error() string { + return fmt.Sprintf("cannot %s '%s': No such directory", e.verb, e.subject) +} + +// DatastoreNoSuchFileError is returned when a file could not be found. +type DatastoreNoSuchFileError struct { + verb string + subject string +} + +func (e DatastoreNoSuchFileError) Error() string { + return fmt.Sprintf("cannot %s '%s': No such file", e.verb, e.subject) +} + +type Datastore struct { + Common + + InventoryPath string +} + +func NewDatastore(c *vim25.Client, ref types.ManagedObjectReference) *Datastore { + return &Datastore{ + Common: NewCommon(c, ref), + } +} + +func (d Datastore) Name() string { + return path.Base(d.InventoryPath) +} + +func (d Datastore) Path(path string) string { + name := d.Name() + if name == "" { + panic("expected non-empty name") + } + + return fmt.Sprintf("[%s] %s", name, path) +} + +// URL for datastore access over HTTP +func (d Datastore) URL(ctx context.Context, dc *Datacenter, path string) (*url.URL, error) { + var mdc mo.Datacenter + if err := dc.Properties(ctx, dc.Reference(), []string{"name"}, &mdc); err != nil { + return nil, err + } + + var mds mo.Datastore + if err := d.Properties(ctx, d.Reference(), []string{"name"}, &mds); err != nil { + return nil, err + } + + u := d.c.URL() + + return &url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: fmt.Sprintf("/folder/%s", path), + RawQuery: url.Values{ + "dcPath": []string{mdc.Name}, + "dsName": []string{mds.Name}, + }.Encode(), + }, nil +} + +func (d Datastore) Browser(ctx context.Context) (*HostDatastoreBrowser, error) { + var do mo.Datastore + + err := d.Properties(ctx, d.Reference(), []string{"browser"}, &do) + if err != nil { + return nil, err + } + + return NewHostDatastoreBrowser(d.c, do.Browser), nil +} + +// ServiceTicket obtains a ticket via AcquireGenericServiceTicket and returns it an http.Cookie with the url.URL +// that can be used along with the ticket cookie to access the given path. +func (d Datastore) ServiceTicket(ctx context.Context, path string, method string) (*url.URL, *http.Cookie, error) { + // We are uploading to an ESX host + u := &url.URL{ + Scheme: d.c.URL().Scheme, + Host: d.c.URL().Host, + Path: fmt.Sprintf("/folder/%s", path), + RawQuery: url.Values{ + "dsName": []string{d.Name()}, + }.Encode(), + } + + // If connected to VC, the ticket request must be for an ESX host. + if d.c.IsVC() { + hosts, err := d.AttachedHosts(ctx) + if err != nil { + return nil, nil, err + } + + if len(hosts) == 0 { + return nil, nil, fmt.Errorf("no hosts attached to datastore %#v", d.Reference()) + } + + // Pick a random attached host + host := hosts[rand.Intn(len(hosts))] + name, err := host.Name(ctx) + if err != nil { + return nil, nil, err + } + u.Host = name + } + + spec := types.SessionManagerHttpServiceRequestSpec{ + Url: u.String(), + // See SessionManagerHttpServiceRequestSpecMethod enum + Method: fmt.Sprintf("http%s%s", method[0:1], strings.ToLower(method[1:])), + } + + sm := session.NewManager(d.Client()) + + ticket, err := sm.AcquireGenericServiceTicket(ctx, &spec) + if err != nil { + return nil, nil, err + } + + cookie := &http.Cookie{ + Name: "vmware_cgi_ticket", + Value: ticket.Id, + } + + return u, cookie, nil +} + +func (d Datastore) uploadTicket(ctx context.Context, path string, param *soap.Upload) (*url.URL, *soap.Upload, error) { + p := soap.DefaultUpload + if param != nil { + p = *param // copy + } + + u, ticket, err := d.ServiceTicket(ctx, path, p.Method) + if err != nil { + return nil, nil, err + } + + p.Ticket = ticket + + return u, &p, nil +} + +func (d Datastore) downloadTicket(ctx context.Context, path string, param *soap.Download) (*url.URL, *soap.Download, error) { + p := soap.DefaultDownload + if param != nil { + p = *param // copy + } + + u, ticket, err := d.ServiceTicket(ctx, path, p.Method) + if err != nil { + return nil, nil, err + } + + p.Ticket = ticket + + return u, &p, nil +} + +// Upload via soap.Upload with an http service ticket +func (d Datastore) Upload(ctx context.Context, f io.Reader, path string, param *soap.Upload) error { + u, p, err := d.uploadTicket(ctx, path, param) + if err != nil { + return err + } + return d.Client().Upload(f, u, p) +} + +// UploadFile via soap.Upload with an http service ticket +func (d Datastore) UploadFile(ctx context.Context, file string, path string, param *soap.Upload) error { + u, p, err := d.uploadTicket(ctx, path, param) + if err != nil { + return err + } + return d.Client().UploadFile(file, u, p) +} + +// DownloadFile via soap.Upload with an http service ticket +func (d Datastore) DownloadFile(ctx context.Context, path string, file string, param *soap.Download) error { + u, p, err := d.downloadTicket(ctx, path, param) + if err != nil { + return err + } + return d.Client().DownloadFile(file, u, p) +} + +// AttachedHosts returns hosts that have this Datastore attached, accessible and writable. +func (d Datastore) AttachedHosts(ctx context.Context) ([]*HostSystem, error) { + var ds mo.Datastore + var hosts []*HostSystem + + pc := property.DefaultCollector(d.Client()) + err := pc.RetrieveOne(ctx, d.Reference(), []string{"host"}, &ds) + if err != nil { + return nil, err + } + + mounts := make(map[types.ManagedObjectReference]types.DatastoreHostMount) + var refs []types.ManagedObjectReference + for _, host := range ds.Host { + refs = append(refs, host.Key) + mounts[host.Key] = host + } + + var hs []mo.HostSystem + err = pc.Retrieve(ctx, refs, []string{"runtime.connectionState", "runtime.powerState"}, &hs) + if err != nil { + return nil, err + } + + for _, host := range hs { + if host.Runtime.ConnectionState == types.HostSystemConnectionStateConnected && + host.Runtime.PowerState == types.HostSystemPowerStatePoweredOn { + + mount := mounts[host.Reference()] + info := mount.MountInfo + + if *info.Mounted && *info.Accessible && info.AccessMode == string(types.HostMountModeReadWrite) { + hosts = append(hosts, NewHostSystem(d.Client(), mount.Key)) + } + } + } + + return hosts, nil +} + +// AttachedHosts returns hosts that have this Datastore attached, accessible and writable and are members of the given cluster. +func (d Datastore) AttachedClusterHosts(ctx context.Context, cluster *ComputeResource) ([]*HostSystem, error) { + var hosts []*HostSystem + + clusterHosts, err := cluster.Hosts(ctx) + if err != nil { + return nil, err + } + + attachedHosts, err := d.AttachedHosts(ctx) + if err != nil { + return nil, err + } + + refs := make(map[types.ManagedObjectReference]bool) + for _, host := range attachedHosts { + refs[host.Reference()] = true + } + + for _, host := range clusterHosts { + if refs[host.Reference()] { + hosts = append(hosts, host) + } + } + + return hosts, nil +} + +func (d Datastore) Stat(ctx context.Context, file string) (types.BaseFileInfo, error) { + b, err := d.Browser(ctx) + if err != nil { + return nil, err + } + + spec := types.HostDatastoreBrowserSearchSpec{ + Details: &types.FileQueryFlags{ + FileType: true, + FileOwner: types.NewBool(true), // TODO: omitempty is generated, but seems to be required + }, + MatchPattern: []string{path.Base(file)}, + } + + dsPath := d.Path(path.Dir(file)) + task, err := b.SearchDatastore(context.TODO(), dsPath, &spec) + if err != nil { + return nil, err + } + + info, err := task.WaitForResult(context.TODO(), nil) + if err != nil { + if info == nil || info.Error != nil { + _, ok := info.Error.Fault.(*types.FileNotFound) + if ok { + // FileNotFound means the base path doesn't exist. + return nil, DatastoreNoSuchDirectoryError{"stat", dsPath} + } + } + + return nil, err + } + + res := info.Result.(types.HostDatastoreBrowserSearchResults) + if len(res.File) == 0 { + // File doesn't exist + return nil, DatastoreNoSuchFileError{"stat", d.Path(file)} + } + + return res.File[0], nil + +} + +// Type returns the type of file system volume. +func (d Datastore) Type(ctx context.Context) (types.HostFileSystemVolumeFileSystemType, error) { + var mds mo.Datastore + + if err := d.Properties(ctx, d.Reference(), []string{"summary.type"}, &mds); err != nil { + return types.HostFileSystemVolumeFileSystemType(""), err + } + return types.HostFileSystemVolumeFileSystemType(mds.Summary.Type), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/datastore_test.go b/vendor/github.com/vmware/govmomi/object/datastore_test.go new file mode 100644 index 000000000..5b4f790ff --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datastore_test.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// Datastore should implement the Reference interface. +var _ Reference = Datastore{} diff --git a/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go b/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go new file mode 100644 index 000000000..b181592cd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go @@ -0,0 +1,95 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type DiagnosticManager struct { + Common +} + +func NewDiagnosticManager(c *vim25.Client) *DiagnosticManager { + m := DiagnosticManager{ + Common: NewCommon(c, *c.ServiceContent.DiagnosticManager), + } + + return &m +} + +func (m DiagnosticManager) BrowseLog(ctx context.Context, host *HostSystem, key string, start, lines int) (*types.DiagnosticManagerLogHeader, error) { + req := types.BrowseDiagnosticLog{ + This: m.Reference(), + Key: key, + Start: start, + Lines: lines, + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.BrowseDiagnosticLog(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (m DiagnosticManager) GenerateLogBundles(ctx context.Context, includeDefault bool, host []*HostSystem) (*Task, error) { + req := types.GenerateLogBundles_Task{ + This: m.Reference(), + IncludeDefault: includeDefault, + } + + if host != nil { + for _, h := range host { + req.Host = append(req.Host, h.Reference()) + } + } + + res, err := methods.GenerateLogBundles_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +func (m DiagnosticManager) QueryDescriptions(ctx context.Context, host *HostSystem) ([]types.DiagnosticManagerLogDescriptor, error) { + req := types.QueryDescriptions{ + This: m.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.QueryDescriptions(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go new file mode 100644 index 000000000..0dbd4b589 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go @@ -0,0 +1,64 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "path" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type DistributedVirtualPortgroup struct { + Common + + InventoryPath string +} + +func NewDistributedVirtualPortgroup(c *vim25.Client, ref types.ManagedObjectReference) *DistributedVirtualPortgroup { + return &DistributedVirtualPortgroup{ + Common: NewCommon(c, ref), + } +} +func (p DistributedVirtualPortgroup) Name() string { + return path.Base(p.InventoryPath) +} + +// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this DistributedVirtualPortgroup +func (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + var dvp mo.DistributedVirtualPortgroup + var dvs mo.VmwareDistributedVirtualSwitch // TODO: should be mo.BaseDistributedVirtualSwitch + + if err := p.Properties(ctx, p.Reference(), []string{"key", "config.distributedVirtualSwitch"}, &dvp); err != nil { + return nil, err + } + + if err := p.Properties(ctx, *dvp.Config.DistributedVirtualSwitch, []string{"uuid"}, &dvs); err != nil { + return nil, err + } + + backing := &types.VirtualEthernetCardDistributedVirtualPortBackingInfo{ + Port: types.DistributedVirtualSwitchPortConnection{ + PortgroupKey: dvp.Key, + SwitchUuid: dvs.Uuid, + }, + } + + return backing, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup_test.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup_test.go new file mode 100644 index 000000000..fcbbab26e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup_test.go @@ -0,0 +1,23 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// DistributedVirtualPortgroup should implement the Reference interface. +var _ Reference = DistributedVirtualPortgroup{} + +// DistributedVirtualPortgroup should implement the NetworkReference interface. +var _ NetworkReference = DistributedVirtualPortgroup{} diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go new file mode 100644 index 000000000..f2fb0abee --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go @@ -0,0 +1,68 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type DistributedVirtualSwitch struct { + Common + + InventoryPath string +} + +func NewDistributedVirtualSwitch(c *vim25.Client, ref types.ManagedObjectReference) *DistributedVirtualSwitch { + return &DistributedVirtualSwitch{ + Common: NewCommon(c, ref), + } +} + +func (s DistributedVirtualSwitch) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + return nil, ErrNotSupported // TODO: just to satisfy NetworkReference interface for the finder +} + +func (s DistributedVirtualSwitch) Reconfigure(ctx context.Context, spec types.BaseDVSConfigSpec) (*Task, error) { + req := types.ReconfigureDvs_Task{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.ReconfigureDvs_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} + +func (s DistributedVirtualSwitch) AddPortgroup(ctx context.Context, spec []types.DVPortgroupConfigSpec) (*Task, error) { + req := types.AddDVPortgroup_Task{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.AddDVPortgroup_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/extension_manager.go b/vendor/github.com/vmware/govmomi/object/extension_manager.go new file mode 100644 index 000000000..b7ce77c5c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/extension_manager.go @@ -0,0 +1,112 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ExtensionManager struct { + Common +} + +// GetExtensionManager wraps NewExtensionManager, returning ErrNotSupported +// when the client is not connected to a vCenter instance. +func GetExtensionManager(c *vim25.Client) (*ExtensionManager, error) { + if c.ServiceContent.ExtensionManager == nil { + return nil, ErrNotSupported + } + return NewExtensionManager(c), nil +} + +func NewExtensionManager(c *vim25.Client) *ExtensionManager { + o := ExtensionManager{ + Common: NewCommon(c, *c.ServiceContent.ExtensionManager), + } + + return &o +} + +func (m ExtensionManager) List(ctx context.Context) ([]types.Extension, error) { + var em mo.ExtensionManager + + err := m.Properties(ctx, m.Reference(), []string{"extensionList"}, &em) + if err != nil { + return nil, err + } + + return em.ExtensionList, nil +} + +func (m ExtensionManager) Find(ctx context.Context, key string) (*types.Extension, error) { + req := types.FindExtension{ + This: m.Reference(), + ExtensionKey: key, + } + + res, err := methods.FindExtension(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m ExtensionManager) Register(ctx context.Context, extension types.Extension) error { + req := types.RegisterExtension{ + This: m.Reference(), + Extension: extension, + } + + _, err := methods.RegisterExtension(ctx, m.c, &req) + return err +} + +func (m ExtensionManager) SetCertificate(ctx context.Context, key string, certificatePem string) error { + req := types.SetExtensionCertificate{ + This: m.Reference(), + ExtensionKey: key, + CertificatePem: certificatePem, + } + + _, err := methods.SetExtensionCertificate(ctx, m.c, &req) + return err +} + +func (m ExtensionManager) Unregister(ctx context.Context, key string) error { + req := types.UnregisterExtension{ + This: m.Reference(), + ExtensionKey: key, + } + + _, err := methods.UnregisterExtension(ctx, m.c, &req) + return err +} + +func (m ExtensionManager) Update(ctx context.Context, extension types.Extension) error { + req := types.UpdateExtension{ + This: m.Reference(), + Extension: extension, + } + + _, err := methods.UpdateExtension(ctx, m.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/file_manager.go b/vendor/github.com/vmware/govmomi/object/file_manager.go new file mode 100644 index 000000000..716472826 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/file_manager.go @@ -0,0 +1,125 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type FileManager struct { + Common +} + +func NewFileManager(c *vim25.Client) *FileManager { + f := FileManager{ + Common: NewCommon(c, *c.ServiceContent.FileManager), + } + + return &f +} + +func (f FileManager) CopyDatastoreFile(ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destinationName string, destinationDatacenter *Datacenter, force bool) (*Task, error) { + req := types.CopyDatastoreFile_Task{ + This: f.Reference(), + SourceName: sourceName, + DestinationName: destinationName, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destinationDatacenter != nil { + ref := destinationDatacenter.Reference() + req.DestinationDatacenter = &ref + } + + res, err := methods.CopyDatastoreFile_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +// DeleteDatastoreFile deletes the specified file or folder from the datastore. +func (f FileManager) DeleteDatastoreFile(ctx context.Context, name string, dc *Datacenter) (*Task, error) { + req := types.DeleteDatastoreFile_Task{ + This: f.Reference(), + Name: name, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.DeleteDatastoreFile_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +// MakeDirectory creates a folder using the specified name. +func (f FileManager) MakeDirectory(ctx context.Context, name string, dc *Datacenter, createParentDirectories bool) error { + req := types.MakeDirectory{ + This: f.Reference(), + Name: name, + CreateParentDirectories: types.NewBool(createParentDirectories), + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + _, err := methods.MakeDirectory(ctx, f.c, &req) + return err +} + +func (f FileManager) MoveDatastoreFile(ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destinationName string, destinationDatacenter *Datacenter, force bool) (*Task, error) { + req := types.MoveDatastoreFile_Task{ + This: f.Reference(), + SourceName: sourceName, + DestinationName: destinationName, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destinationDatacenter != nil { + ref := destinationDatacenter.Reference() + req.DestinationDatacenter = &ref + } + + res, err := methods.MoveDatastoreFile_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/folder.go b/vendor/github.com/vmware/govmomi/object/folder.go new file mode 100644 index 000000000..38fddd6da --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/folder.go @@ -0,0 +1,198 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Folder struct { + Common + + InventoryPath string +} + +func NewFolder(c *vim25.Client, ref types.ManagedObjectReference) *Folder { + return &Folder{ + Common: NewCommon(c, ref), + } +} + +func NewRootFolder(c *vim25.Client) *Folder { + return NewFolder(c, c.ServiceContent.RootFolder) +} + +func (f Folder) Children(ctx context.Context) ([]Reference, error) { + var mf mo.Folder + + err := f.Properties(ctx, f.Reference(), []string{"childEntity"}, &mf) + if err != nil { + return nil, err + } + + var rs []Reference + for _, e := range mf.ChildEntity { + if r := NewReference(f.c, e); r != nil { + rs = append(rs, r) + } + } + + return rs, nil +} + +func (f Folder) CreateDatacenter(ctx context.Context, datacenter string) (*Datacenter, error) { + req := types.CreateDatacenter{ + This: f.Reference(), + Name: datacenter, + } + + res, err := methods.CreateDatacenter(ctx, f.c, &req) + if err != nil { + return nil, err + } + + // Response will be nil if this is an ESX host that does not belong to a vCenter + if res == nil { + return nil, nil + } + + return NewDatacenter(f.c, res.Returnval), nil +} + +func (f Folder) CreateCluster(ctx context.Context, cluster string, spec types.ClusterConfigSpecEx) (*ClusterComputeResource, error) { + req := types.CreateClusterEx{ + This: f.Reference(), + Name: cluster, + Spec: spec, + } + + res, err := methods.CreateClusterEx(ctx, f.c, &req) + if err != nil { + return nil, err + } + + // Response will be nil if this is an ESX host that does not belong to a vCenter + if res == nil { + return nil, nil + } + + return NewClusterComputeResource(f.c, res.Returnval), nil +} + +func (f Folder) CreateFolder(ctx context.Context, name string) (*Folder, error) { + req := types.CreateFolder{ + This: f.Reference(), + Name: name, + } + + res, err := methods.CreateFolder(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewFolder(f.c, res.Returnval), err +} + +func (f Folder) AddStandaloneHost(ctx context.Context, spec types.HostConnectSpec, addConnected bool, license *string, compResSpec *types.BaseComputeResourceConfigSpec) (*Task, error) { + req := types.AddStandaloneHost_Task{ + This: f.Reference(), + Spec: spec, + AddConnected: addConnected, + } + + if license != nil { + req.License = *license + } + + if compResSpec != nil { + req.CompResSpec = *compResSpec + } + + res, err := methods.AddStandaloneHost_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +func (f Folder) CreateVM(ctx context.Context, config types.VirtualMachineConfigSpec, pool *ResourcePool, host *HostSystem) (*Task, error) { + req := types.CreateVM_Task{ + This: f.Reference(), + Config: config, + Pool: pool.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.CreateVM_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +func (f Folder) RegisterVM(ctx context.Context, path string, name string, asTemplate bool, pool *ResourcePool, host *HostSystem) (*Task, error) { + req := types.RegisterVM_Task{ + This: f.Reference(), + Path: path, + AsTemplate: asTemplate, + } + + if name != "" { + req.Name = name + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + if pool != nil { + ref := pool.Reference() + req.Pool = &ref + } + + res, err := methods.RegisterVM_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +func (f Folder) CreateDVS(ctx context.Context, spec types.DVSCreateSpec) (*Task, error) { + req := types.CreateDVS_Task{ + This: f.Reference(), + Spec: spec, + } + + res, err := methods.CreateDVS_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/folder_test.go b/vendor/github.com/vmware/govmomi/object/folder_test.go new file mode 100644 index 000000000..4423961f7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/folder_test.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// Folder should implement the Reference interface. +var _ Reference = Folder{} diff --git a/vendor/github.com/vmware/govmomi/object/history_collector.go b/vendor/github.com/vmware/govmomi/object/history_collector.go new file mode 100644 index 000000000..a0fb6aabc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/history_collector.go @@ -0,0 +1,71 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HistoryCollector struct { + Common +} + +func NewHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector { + return &HistoryCollector{ + Common: NewCommon(c, ref), + } +} + +func (h HistoryCollector) Destroy(ctx context.Context) error { + req := types.DestroyCollector{ + This: h.Reference(), + } + + _, err := methods.DestroyCollector(ctx, h.c, &req) + return err +} + +func (h HistoryCollector) Reset(ctx context.Context) error { + req := types.ResetCollector{ + This: h.Reference(), + } + + _, err := methods.ResetCollector(ctx, h.c, &req) + return err +} + +func (h HistoryCollector) Rewind(ctx context.Context) error { + req := types.RewindCollector{ + This: h.Reference(), + } + + _, err := methods.RewindCollector(ctx, h.c, &req) + return err +} + +func (h HistoryCollector) SetPageSize(ctx context.Context, maxCount int) error { + req := types.SetCollectorPageSize{ + This: h.Reference(), + MaxCount: maxCount, + } + + _, err := methods.SetCollectorPageSize(ctx, h.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_config_manager.go b/vendor/github.com/vmware/govmomi/object/host_config_manager.go new file mode 100644 index 000000000..6d906e8c5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_config_manager.go @@ -0,0 +1,100 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostConfigManager struct { + Common +} + +func NewHostConfigManager(c *vim25.Client, ref types.ManagedObjectReference) *HostConfigManager { + return &HostConfigManager{ + Common: NewCommon(c, ref), + } +} + +func (m HostConfigManager) DatastoreSystem(ctx context.Context) (*HostDatastoreSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.datastoreSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostDatastoreSystem(m.c, *h.ConfigManager.DatastoreSystem), nil +} + +func (m HostConfigManager) NetworkSystem(ctx context.Context) (*HostNetworkSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.networkSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostNetworkSystem(m.c, *h.ConfigManager.NetworkSystem), nil +} + +func (m HostConfigManager) FirewallSystem(ctx context.Context) (*HostFirewallSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.firewallSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostFirewallSystem(m.c, *h.ConfigManager.FirewallSystem), nil +} + +func (m HostConfigManager) StorageSystem(ctx context.Context) (*HostStorageSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.storageSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostStorageSystem(m.c, *h.ConfigManager.StorageSystem), nil +} + +func (m HostConfigManager) VirtualNicManager(ctx context.Context) (*HostVirtualNicManager, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.virtualNicManager"}, &h) + if err != nil { + return nil, err + } + + return NewHostVirtualNicManager(m.c, *h.ConfigManager.VirtualNicManager, m.Reference()), nil +} + +func (m HostConfigManager) VsanSystem(ctx context.Context) (*HostVsanSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.vsanSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostVsanSystem(m.c, *h.ConfigManager.VsanSystem), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_datastore_browser.go b/vendor/github.com/vmware/govmomi/object/host_datastore_browser.go new file mode 100644 index 000000000..a2e205663 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_datastore_browser.go @@ -0,0 +1,64 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostDatastoreBrowser struct { + Common +} + +func NewHostDatastoreBrowser(c *vim25.Client, ref types.ManagedObjectReference) *HostDatastoreBrowser { + return &HostDatastoreBrowser{ + Common: NewCommon(c, ref), + } +} + +func (b HostDatastoreBrowser) SearchDatastore(ctx context.Context, datastorePath string, searchSpec *types.HostDatastoreBrowserSearchSpec) (*Task, error) { + req := types.SearchDatastore_Task{ + This: b.Reference(), + DatastorePath: datastorePath, + SearchSpec: searchSpec, + } + + res, err := methods.SearchDatastore_Task(ctx, b.c, &req) + if err != nil { + return nil, err + } + + return NewTask(b.c, res.Returnval), nil +} + +func (b HostDatastoreBrowser) SearchDatastoreSubFolders(ctx context.Context, datastorePath string, searchSpec *types.HostDatastoreBrowserSearchSpec) (*Task, error) { + req := types.SearchDatastoreSubFolders_Task{ + This: b.Reference(), + DatastorePath: datastorePath, + SearchSpec: searchSpec, + } + + res, err := methods.SearchDatastoreSubFolders_Task(ctx, b.c, &req) + if err != nil { + return nil, err + } + + return NewTask(b.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_datastore_system.go b/vendor/github.com/vmware/govmomi/object/host_datastore_system.go new file mode 100644 index 000000000..632b7d255 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_datastore_system.go @@ -0,0 +1,103 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostDatastoreSystem struct { + Common +} + +func NewHostDatastoreSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostDatastoreSystem { + return &HostDatastoreSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostDatastoreSystem) CreateNasDatastore(ctx context.Context, spec types.HostNasVolumeSpec) (*Datastore, error) { + req := types.CreateNasDatastore{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.CreateNasDatastore(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewDatastore(s.Client(), res.Returnval), nil +} + +func (s HostDatastoreSystem) CreateVmfsDatastore(ctx context.Context, spec types.VmfsDatastoreCreateSpec) (*Datastore, error) { + req := types.CreateVmfsDatastore{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.CreateVmfsDatastore(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewDatastore(s.Client(), res.Returnval), nil +} + +func (s HostDatastoreSystem) Remove(ctx context.Context, ds *Datastore) error { + req := types.RemoveDatastore{ + This: s.Reference(), + Datastore: ds.Reference(), + } + + _, err := methods.RemoveDatastore(ctx, s.Client(), &req) + if err != nil { + return err + } + + return nil +} + +func (s HostDatastoreSystem) QueryAvailableDisksForVmfs(ctx context.Context) ([]types.HostScsiDisk, error) { + req := types.QueryAvailableDisksForVmfs{ + This: s.Reference(), + } + + res, err := methods.QueryAvailableDisksForVmfs(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (s HostDatastoreSystem) QueryVmfsDatastoreCreateOptions(ctx context.Context, devicePath string) ([]types.VmfsDatastoreOption, error) { + req := types.QueryVmfsDatastoreCreateOptions{ + This: s.Reference(), + DevicePath: devicePath, + } + + res, err := methods.QueryVmfsDatastoreCreateOptions(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_firewall_system.go b/vendor/github.com/vmware/govmomi/object/host_firewall_system.go new file mode 100644 index 000000000..143f0983d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_firewall_system.go @@ -0,0 +1,181 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "fmt" + "strings" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostFirewallSystem struct { + Common +} + +func NewHostFirewallSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostFirewallSystem { + return &HostFirewallSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostFirewallSystem) DisableRuleset(ctx context.Context, id string) error { + req := types.DisableRuleset{ + This: s.Reference(), + Id: id, + } + + _, err := methods.DisableRuleset(ctx, s.c, &req) + return err +} + +func (s HostFirewallSystem) EnableRuleset(ctx context.Context, id string) error { + req := types.EnableRuleset{ + This: s.Reference(), + Id: id, + } + + _, err := methods.EnableRuleset(ctx, s.c, &req) + return err +} + +func (s HostFirewallSystem) Refresh(ctx context.Context) error { + req := types.RefreshFirewall{ + This: s.Reference(), + } + + _, err := methods.RefreshFirewall(ctx, s.c, &req) + return err +} + +func (s HostFirewallSystem) Info(ctx context.Context) (*types.HostFirewallInfo, error) { + var fs mo.HostFirewallSystem + + err := s.Properties(ctx, s.Reference(), []string{"firewallInfo"}, &fs) + if err != nil { + return nil, err + } + + return fs.FirewallInfo, nil +} + +// HostFirewallRulesetList provides helpers for a slice of types.HostFirewallRuleset +type HostFirewallRulesetList []types.HostFirewallRuleset + +// ByRule returns a HostFirewallRulesetList where Direction, PortType and Protocol are equal and Port is within range +func (l HostFirewallRulesetList) ByRule(rule types.HostFirewallRule) HostFirewallRulesetList { + var matches HostFirewallRulesetList + + for _, rs := range l { + for _, r := range rs.Rule { + if r.PortType != rule.PortType || + r.Protocol != rule.Protocol || + r.Direction != rule.Direction { + continue + } + + if r.EndPort == 0 && rule.Port == r.Port || + rule.Port >= r.Port && rule.Port <= r.EndPort { + matches = append(matches, rs) + break + } + } + } + + return matches +} + +// EnabledByRule returns a HostFirewallRulesetList with Match(rule) applied and filtered via Enabled() +// if enabled param is true, otherwise filtered via Disabled(). +// An error is returned if the resulting list is empty. +func (l HostFirewallRulesetList) EnabledByRule(rule types.HostFirewallRule, enabled bool) (HostFirewallRulesetList, error) { + var matched, skipped HostFirewallRulesetList + var matchedKind, skippedKind string + + l = l.ByRule(rule) + + if enabled { + matched = l.Enabled() + matchedKind = "enabled" + + skipped = l.Disabled() + skippedKind = "disabled" + } else { + matched = l.Disabled() + matchedKind = "disabled" + + skipped = l.Enabled() + skippedKind = "enabled" + } + + if len(matched) == 0 { + msg := fmt.Sprintf("%d %s firewall rulesets match %s %s %s %d, %d %s rulesets match", + len(matched), matchedKind, + rule.Direction, rule.Protocol, rule.PortType, rule.Port, + len(skipped), skippedKind) + + if len(skipped) != 0 { + msg += fmt.Sprintf(": %s", strings.Join(skipped.Keys(), ", ")) + } + + return nil, errors.New(msg) + } + + return matched, nil +} + +// Enabled returns a HostFirewallRulesetList with enabled rules +func (l HostFirewallRulesetList) Enabled() HostFirewallRulesetList { + var matches HostFirewallRulesetList + + for _, rs := range l { + if rs.Enabled { + matches = append(matches, rs) + } + } + + return matches +} + +// Disabled returns a HostFirewallRulesetList with disabled rules +func (l HostFirewallRulesetList) Disabled() HostFirewallRulesetList { + var matches HostFirewallRulesetList + + for _, rs := range l { + if !rs.Enabled { + matches = append(matches, rs) + } + } + + return matches +} + +// Keys returns the HostFirewallRuleset.Key for each ruleset in the list +func (l HostFirewallRulesetList) Keys() []string { + var keys []string + + for _, rs := range l { + keys = append(keys, rs.Key) + } + + return keys +} diff --git a/vendor/github.com/vmware/govmomi/object/host_network_system.go b/vendor/github.com/vmware/govmomi/object/host_network_system.go new file mode 100644 index 000000000..1418de42f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_network_system.go @@ -0,0 +1,357 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostNetworkSystem struct { + Common +} + +func NewHostNetworkSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostNetworkSystem { + return &HostNetworkSystem{ + Common: NewCommon(c, ref), + } +} + +// AddPortGroup wraps methods.AddPortGroup +func (o HostNetworkSystem) AddPortGroup(ctx context.Context, portgrp types.HostPortGroupSpec) error { + req := types.AddPortGroup{ + This: o.Reference(), + Portgrp: portgrp, + } + + _, err := methods.AddPortGroup(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// AddServiceConsoleVirtualNic wraps methods.AddServiceConsoleVirtualNic +func (o HostNetworkSystem) AddServiceConsoleVirtualNic(ctx context.Context, portgroup string, nic types.HostVirtualNicSpec) (string, error) { + req := types.AddServiceConsoleVirtualNic{ + This: o.Reference(), + Portgroup: portgroup, + Nic: nic, + } + + res, err := methods.AddServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +// AddVirtualNic wraps methods.AddVirtualNic +func (o HostNetworkSystem) AddVirtualNic(ctx context.Context, portgroup string, nic types.HostVirtualNicSpec) (string, error) { + req := types.AddVirtualNic{ + This: o.Reference(), + Portgroup: portgroup, + Nic: nic, + } + + res, err := methods.AddVirtualNic(ctx, o.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +// AddVirtualSwitch wraps methods.AddVirtualSwitch +func (o HostNetworkSystem) AddVirtualSwitch(ctx context.Context, vswitchName string, spec *types.HostVirtualSwitchSpec) error { + req := types.AddVirtualSwitch{ + This: o.Reference(), + VswitchName: vswitchName, + Spec: spec, + } + + _, err := methods.AddVirtualSwitch(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// QueryNetworkHint wraps methods.QueryNetworkHint +func (o HostNetworkSystem) QueryNetworkHint(ctx context.Context, device []string) error { + req := types.QueryNetworkHint{ + This: o.Reference(), + Device: device, + } + + _, err := methods.QueryNetworkHint(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RefreshNetworkSystem wraps methods.RefreshNetworkSystem +func (o HostNetworkSystem) RefreshNetworkSystem(ctx context.Context) error { + req := types.RefreshNetworkSystem{ + This: o.Reference(), + } + + _, err := methods.RefreshNetworkSystem(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemovePortGroup wraps methods.RemovePortGroup +func (o HostNetworkSystem) RemovePortGroup(ctx context.Context, pgName string) error { + req := types.RemovePortGroup{ + This: o.Reference(), + PgName: pgName, + } + + _, err := methods.RemovePortGroup(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemoveServiceConsoleVirtualNic wraps methods.RemoveServiceConsoleVirtualNic +func (o HostNetworkSystem) RemoveServiceConsoleVirtualNic(ctx context.Context, device string) error { + req := types.RemoveServiceConsoleVirtualNic{ + This: o.Reference(), + Device: device, + } + + _, err := methods.RemoveServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemoveVirtualNic wraps methods.RemoveVirtualNic +func (o HostNetworkSystem) RemoveVirtualNic(ctx context.Context, device string) error { + req := types.RemoveVirtualNic{ + This: o.Reference(), + Device: device, + } + + _, err := methods.RemoveVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemoveVirtualSwitch wraps methods.RemoveVirtualSwitch +func (o HostNetworkSystem) RemoveVirtualSwitch(ctx context.Context, vswitchName string) error { + req := types.RemoveVirtualSwitch{ + This: o.Reference(), + VswitchName: vswitchName, + } + + _, err := methods.RemoveVirtualSwitch(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RestartServiceConsoleVirtualNic wraps methods.RestartServiceConsoleVirtualNic +func (o HostNetworkSystem) RestartServiceConsoleVirtualNic(ctx context.Context, device string) error { + req := types.RestartServiceConsoleVirtualNic{ + This: o.Reference(), + Device: device, + } + + _, err := methods.RestartServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateConsoleIpRouteConfig wraps methods.UpdateConsoleIpRouteConfig +func (o HostNetworkSystem) UpdateConsoleIpRouteConfig(ctx context.Context, config types.BaseHostIpRouteConfig) error { + req := types.UpdateConsoleIpRouteConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateConsoleIpRouteConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateDnsConfig wraps methods.UpdateDnsConfig +func (o HostNetworkSystem) UpdateDnsConfig(ctx context.Context, config types.BaseHostDnsConfig) error { + req := types.UpdateDnsConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateDnsConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateIpRouteConfig wraps methods.UpdateIpRouteConfig +func (o HostNetworkSystem) UpdateIpRouteConfig(ctx context.Context, config types.BaseHostIpRouteConfig) error { + req := types.UpdateIpRouteConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateIpRouteConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateIpRouteTableConfig wraps methods.UpdateIpRouteTableConfig +func (o HostNetworkSystem) UpdateIpRouteTableConfig(ctx context.Context, config types.HostIpRouteTableConfig) error { + req := types.UpdateIpRouteTableConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateIpRouteTableConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateNetworkConfig wraps methods.UpdateNetworkConfig +func (o HostNetworkSystem) UpdateNetworkConfig(ctx context.Context, config types.HostNetworkConfig, changeMode string) (*types.HostNetworkConfigResult, error) { + req := types.UpdateNetworkConfig{ + This: o.Reference(), + Config: config, + ChangeMode: changeMode, + } + + res, err := methods.UpdateNetworkConfig(ctx, o.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +// UpdatePhysicalNicLinkSpeed wraps methods.UpdatePhysicalNicLinkSpeed +func (o HostNetworkSystem) UpdatePhysicalNicLinkSpeed(ctx context.Context, device string, linkSpeed *types.PhysicalNicLinkInfo) error { + req := types.UpdatePhysicalNicLinkSpeed{ + This: o.Reference(), + Device: device, + LinkSpeed: linkSpeed, + } + + _, err := methods.UpdatePhysicalNicLinkSpeed(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdatePortGroup wraps methods.UpdatePortGroup +func (o HostNetworkSystem) UpdatePortGroup(ctx context.Context, pgName string, portgrp types.HostPortGroupSpec) error { + req := types.UpdatePortGroup{ + This: o.Reference(), + PgName: pgName, + Portgrp: portgrp, + } + + _, err := methods.UpdatePortGroup(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateServiceConsoleVirtualNic wraps methods.UpdateServiceConsoleVirtualNic +func (o HostNetworkSystem) UpdateServiceConsoleVirtualNic(ctx context.Context, device string, nic types.HostVirtualNicSpec) error { + req := types.UpdateServiceConsoleVirtualNic{ + This: o.Reference(), + Device: device, + Nic: nic, + } + + _, err := methods.UpdateServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateVirtualNic wraps methods.UpdateVirtualNic +func (o HostNetworkSystem) UpdateVirtualNic(ctx context.Context, device string, nic types.HostVirtualNicSpec) error { + req := types.UpdateVirtualNic{ + This: o.Reference(), + Device: device, + Nic: nic, + } + + _, err := methods.UpdateVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateVirtualSwitch wraps methods.UpdateVirtualSwitch +func (o HostNetworkSystem) UpdateVirtualSwitch(ctx context.Context, vswitchName string, spec types.HostVirtualSwitchSpec) error { + req := types.UpdateVirtualSwitch{ + This: o.Reference(), + VswitchName: vswitchName, + Spec: spec, + } + + _, err := methods.UpdateVirtualSwitch(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_storage_system.go b/vendor/github.com/vmware/govmomi/object/host_storage_system.go new file mode 100644 index 000000000..36cd68e3a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_storage_system.go @@ -0,0 +1,80 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostStorageSystem struct { + Common +} + +func NewHostStorageSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostStorageSystem { + return &HostStorageSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostStorageSystem) RetrieveDiskPartitionInfo(ctx context.Context, devicePath string) (*types.HostDiskPartitionInfo, error) { + req := types.RetrieveDiskPartitionInfo{ + This: s.Reference(), + DevicePath: []string{devicePath}, + } + + res, err := methods.RetrieveDiskPartitionInfo(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil || len(res.Returnval) == 0 { + return nil, errors.New("no partition info") + } + + return &res.Returnval[0], nil +} + +func (s HostStorageSystem) ComputeDiskPartitionInfo(ctx context.Context, devicePath string, layout types.HostDiskPartitionLayout) (*types.HostDiskPartitionInfo, error) { + req := types.ComputeDiskPartitionInfo{ + This: s.Reference(), + DevicePath: devicePath, + Layout: layout, + } + + res, err := methods.ComputeDiskPartitionInfo(ctx, s.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (s HostStorageSystem) UpdateDiskPartitionInfo(ctx context.Context, devicePath string, spec types.HostDiskPartitionSpec) error { + req := types.UpdateDiskPartitions{ + This: s.Reference(), + DevicePath: devicePath, + Spec: spec, + } + + _, err := methods.UpdateDiskPartitions(ctx, s.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_system.go b/vendor/github.com/vmware/govmomi/object/host_system.go new file mode 100644 index 000000000..206b27cac --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_system.go @@ -0,0 +1,173 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + "net" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostSystem struct { + Common + + InventoryPath string +} + +func (h HostSystem) String() string { + if h.InventoryPath == "" { + return h.Common.String() + } + return fmt.Sprintf("%v @ %v", h.Common, h.InventoryPath) +} + +func NewHostSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostSystem { + return &HostSystem{ + Common: NewCommon(c, ref), + } +} + +func (h HostSystem) Name(ctx context.Context) (string, error) { + var mh mo.HostSystem + + err := h.Properties(ctx, h.Reference(), []string{"name"}, &mh) + if err != nil { + return "", err + } + + return mh.Name, nil +} + +func (h HostSystem) ConfigManager() *HostConfigManager { + return NewHostConfigManager(h.c, h.Reference()) +} + +func (h HostSystem) ResourcePool(ctx context.Context) (*ResourcePool, error) { + var mh mo.HostSystem + + err := h.Properties(ctx, h.Reference(), []string{"parent"}, &mh) + if err != nil { + return nil, err + } + + var mcr *mo.ComputeResource + var parent interface{} + + switch mh.Parent.Type { + case "ComputeResource": + mcr = new(mo.ComputeResource) + parent = mcr + case "ClusterComputeResource": + mcc := new(mo.ClusterComputeResource) + mcr = &mcc.ComputeResource + parent = mcc + default: + return nil, fmt.Errorf("unknown host parent type: %s", mh.Parent.Type) + } + + err = h.Properties(ctx, *mh.Parent, []string{"resourcePool"}, parent) + if err != nil { + return nil, err + } + + pool := NewResourcePool(h.c, *mcr.ResourcePool) + return pool, nil +} + +func (h HostSystem) ManagementIPs(ctx context.Context) ([]net.IP, error) { + var mh mo.HostSystem + + err := h.Properties(ctx, h.Reference(), []string{"config.virtualNicManagerInfo.netConfig"}, &mh) + if err != nil { + return nil, err + } + + var ips []net.IP + for _, nc := range mh.Config.VirtualNicManagerInfo.NetConfig { + if nc.NicType == "management" && len(nc.CandidateVnic) > 0 { + ip := net.ParseIP(nc.CandidateVnic[0].Spec.Ip.IpAddress) + if ip != nil { + ips = append(ips, ip) + } + } + } + + return ips, nil +} + +func (h HostSystem) Disconnect(ctx context.Context) (*Task, error) { + req := types.DisconnectHost_Task{ + This: h.Reference(), + } + + res, err := methods.DisconnectHost_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} + +func (h HostSystem) Reconnect(ctx context.Context, cnxSpec *types.HostConnectSpec, reconnectSpec *types.HostSystemReconnectSpec) (*Task, error) { + req := types.ReconnectHost_Task{ + This: h.Reference(), + CnxSpec: cnxSpec, + ReconnectSpec: reconnectSpec, + } + + res, err := methods.ReconnectHost_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} + +func (h HostSystem) EnterMaintenanceMode(ctx context.Context, timeout int, evacuate bool, spec *types.HostMaintenanceSpec) (*Task, error) { + req := types.EnterMaintenanceMode_Task{ + This: h.Reference(), + Timeout: timeout, + EvacuatePoweredOffVms: types.NewBool(evacuate), + MaintenanceSpec: spec, + } + + res, err := methods.EnterMaintenanceMode_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} + +func (h HostSystem) ExitMaintenanceMode(ctx context.Context, timeout int) (*Task, error) { + req := types.ExitMaintenanceMode_Task{ + This: h.Reference(), + Timeout: timeout, + } + + res, err := methods.ExitMaintenanceMode_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go b/vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go new file mode 100644 index 000000000..05338b82e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go @@ -0,0 +1,92 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostVirtualNicManager struct { + Common + Host *HostSystem +} + +func NewHostVirtualNicManager(c *vim25.Client, ref types.ManagedObjectReference, host types.ManagedObjectReference) *HostVirtualNicManager { + return &HostVirtualNicManager{ + Common: NewCommon(c, ref), + Host: NewHostSystem(c, host), + } +} + +func (m HostVirtualNicManager) Info(ctx context.Context) (*types.HostVirtualNicManagerInfo, error) { + var vnm mo.HostVirtualNicManager + + err := m.Properties(ctx, m.Reference(), []string{"info"}, &vnm) + if err != nil { + return nil, err + } + + return &vnm.Info, nil +} + +func (m HostVirtualNicManager) DeselectVnic(ctx context.Context, nicType string, device string) error { + if nicType == string(types.HostVirtualNicManagerNicTypeVsan) { + // Avoid fault.NotSupported: + // "Error deselecting device '$device': VSAN interfaces must be deselected using vim.host.VsanSystem" + s, err := m.Host.ConfigManager().VsanSystem(ctx) + if err != nil { + return err + } + + return s.updateVnic(ctx, device, false) + } + + req := types.DeselectVnicForNicType{ + This: m.Reference(), + NicType: nicType, + Device: device, + } + + _, err := methods.DeselectVnicForNicType(ctx, m.Client(), &req) + return err +} + +func (m HostVirtualNicManager) SelectVnic(ctx context.Context, nicType string, device string) error { + if nicType == string(types.HostVirtualNicManagerNicTypeVsan) { + // Avoid fault.NotSupported: + // "Error selecting device '$device': VSAN interfaces must be selected using vim.host.VsanSystem" + s, err := m.Host.ConfigManager().VsanSystem(ctx) + if err != nil { + return err + } + + return s.updateVnic(ctx, device, true) + } + + req := types.SelectVnicForNicType{ + This: m.Reference(), + NicType: nicType, + Device: device, + } + + _, err := methods.SelectVnicForNicType(ctx, m.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_vsan_system.go b/vendor/github.com/vmware/govmomi/object/host_vsan_system.go new file mode 100644 index 000000000..8c571421d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_vsan_system.go @@ -0,0 +1,87 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HostVsanSystem struct { + Common +} + +func NewHostVsanSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostVsanSystem { + return &HostVsanSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostVsanSystem) Update(ctx context.Context, config types.VsanHostConfigInfo) (*Task, error) { + req := types.UpdateVsan_Task{ + This: s.Reference(), + Config: config, + } + + res, err := methods.UpdateVsan_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} + +// updateVnic in support of the HostVirtualNicManager.{SelectVnic,DeselectVnic} methods +func (s HostVsanSystem) updateVnic(ctx context.Context, device string, enable bool) error { + var vsan mo.HostVsanSystem + + err := s.Properties(ctx, s.Reference(), []string{"config.networkInfo.port"}, &vsan) + if err != nil { + return err + } + + info := vsan.Config + + var port []types.VsanHostConfigInfoNetworkInfoPortConfig + + for _, p := range info.NetworkInfo.Port { + if p.Device == device { + continue + } + + port = append(port, p) + } + + if enable { + port = append(port, types.VsanHostConfigInfoNetworkInfoPortConfig{ + Device: device, + }) + } + + info.NetworkInfo.Port = port + + task, err := s.Update(ctx, info) + if err != nil { + return err + } + + _, err = task.WaitForResult(ctx, nil) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/http_nfc_lease.go b/vendor/github.com/vmware/govmomi/object/http_nfc_lease.go new file mode 100644 index 000000000..f100f2088 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/http_nfc_lease.go @@ -0,0 +1,143 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "fmt" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type HttpNfcLease struct { + Common +} + +func NewHttpNfcLease(c *vim25.Client, ref types.ManagedObjectReference) *HttpNfcLease { + return &HttpNfcLease{ + Common: NewCommon(c, ref), + } +} + +// HttpNfcLeaseAbort wraps methods.HttpNfcLeaseAbort +func (o HttpNfcLease) HttpNfcLeaseAbort(ctx context.Context, fault *types.LocalizedMethodFault) error { + req := types.HttpNfcLeaseAbort{ + This: o.Reference(), + Fault: fault, + } + + _, err := methods.HttpNfcLeaseAbort(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// HttpNfcLeaseComplete wraps methods.HttpNfcLeaseComplete +func (o HttpNfcLease) HttpNfcLeaseComplete(ctx context.Context) error { + req := types.HttpNfcLeaseComplete{ + This: o.Reference(), + } + + _, err := methods.HttpNfcLeaseComplete(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// HttpNfcLeaseGetManifest wraps methods.HttpNfcLeaseGetManifest +func (o HttpNfcLease) HttpNfcLeaseGetManifest(ctx context.Context) error { + req := types.HttpNfcLeaseGetManifest{ + This: o.Reference(), + } + + _, err := methods.HttpNfcLeaseGetManifest(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// HttpNfcLeaseProgress wraps methods.HttpNfcLeaseProgress +func (o HttpNfcLease) HttpNfcLeaseProgress(ctx context.Context, percent int) error { + req := types.HttpNfcLeaseProgress{ + This: o.Reference(), + Percent: percent, + } + + _, err := methods.HttpNfcLeaseProgress(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +func (o HttpNfcLease) Wait(ctx context.Context) (*types.HttpNfcLeaseInfo, error) { + var lease mo.HttpNfcLease + + pc := property.DefaultCollector(o.c) + err := property.Wait(ctx, pc, o.Reference(), []string{"state", "info", "error"}, func(pc []types.PropertyChange) bool { + done := false + + for _, c := range pc { + if c.Val == nil { + continue + } + + switch c.Name { + case "error": + val := c.Val.(types.LocalizedMethodFault) + lease.Error = &val + done = true + case "info": + val := c.Val.(types.HttpNfcLeaseInfo) + lease.Info = &val + case "state": + lease.State = c.Val.(types.HttpNfcLeaseState) + if lease.State != types.HttpNfcLeaseStateInitializing { + done = true + } + } + } + + return done + }) + + if err != nil { + return nil, err + } + + if lease.State == types.HttpNfcLeaseStateReady { + return lease.Info, nil + } + + if lease.Error != nil { + return nil, errors.New(lease.Error.LocalizedMessage) + } + + return nil, fmt.Errorf("unexpected nfc lease state: %s", lease.State) +} diff --git a/vendor/github.com/vmware/govmomi/object/network.go b/vendor/github.com/vmware/govmomi/object/network.go new file mode 100644 index 000000000..9963744aa --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/network.go @@ -0,0 +1,54 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "path" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Network struct { + Common + + InventoryPath string +} + +func NewNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Network { + return &Network{ + Common: NewCommon(c, ref), + } +} + +func (n Network) Name() string { + return path.Base(n.InventoryPath) +} + +// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network +func (n Network) EthernetCardBackingInfo(_ context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + name := n.Name() + + backing := &types.VirtualEthernetCardNetworkBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + DeviceName: name, + }, + } + + return backing, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/network_reference.go b/vendor/github.com/vmware/govmomi/object/network_reference.go new file mode 100644 index 000000000..98dd53813 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/network_reference.go @@ -0,0 +1,30 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// The NetworkReference interface is implemented by managed objects +// which can be used as the backing for a VirtualEthernetCard. +type NetworkReference interface { + Reference + + EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) +} diff --git a/vendor/github.com/vmware/govmomi/object/network_test.go b/vendor/github.com/vmware/govmomi/object/network_test.go new file mode 100644 index 000000000..e95761545 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/network_test.go @@ -0,0 +1,23 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// Network should implement the Reference interface. +var _ Reference = Network{} + +// Network should implement the NetworkReference interface. +var _ NetworkReference = Network{} diff --git a/vendor/github.com/vmware/govmomi/object/ovf_manager.go b/vendor/github.com/vmware/govmomi/object/ovf_manager.go new file mode 100644 index 000000000..e7912d0fd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/ovf_manager.go @@ -0,0 +1,103 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type OvfManager struct { + Common +} + +func NewOvfManager(c *vim25.Client) *OvfManager { + o := OvfManager{ + Common: NewCommon(c, *c.ServiceContent.OvfManager), + } + + return &o +} + +// CreateDescriptor wraps methods.CreateDescriptor +func (o OvfManager) CreateDescriptor(ctx context.Context, obj Reference, cdp types.OvfCreateDescriptorParams) (*types.OvfCreateDescriptorResult, error) { + req := types.CreateDescriptor{ + This: o.Reference(), + Obj: obj.Reference(), + Cdp: cdp, + } + + res, err := methods.CreateDescriptor(ctx, o.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +// CreateImportSpec wraps methods.CreateImportSpec +func (o OvfManager) CreateImportSpec(ctx context.Context, ovfDescriptor string, resourcePool Reference, datastore Reference, cisp types.OvfCreateImportSpecParams) (*types.OvfCreateImportSpecResult, error) { + req := types.CreateImportSpec{ + This: o.Reference(), + OvfDescriptor: ovfDescriptor, + ResourcePool: resourcePool.Reference(), + Datastore: datastore.Reference(), + Cisp: cisp, + } + + res, err := methods.CreateImportSpec(ctx, o.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +// ParseDescriptor wraps methods.ParseDescriptor +func (o OvfManager) ParseDescriptor(ctx context.Context, ovfDescriptor string, pdp types.OvfParseDescriptorParams) (*types.OvfParseDescriptorResult, error) { + req := types.ParseDescriptor{ + This: o.Reference(), + OvfDescriptor: ovfDescriptor, + Pdp: pdp, + } + + res, err := methods.ParseDescriptor(ctx, o.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +// ValidateHost wraps methods.ValidateHost +func (o OvfManager) ValidateHost(ctx context.Context, ovfDescriptor string, host Reference, vhp types.OvfValidateHostParams) (*types.OvfValidateHostResult, error) { + req := types.ValidateHost{ + This: o.Reference(), + OvfDescriptor: ovfDescriptor, + Host: host.Reference(), + Vhp: vhp, + } + + res, err := methods.ValidateHost(ctx, o.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/resource_pool.go b/vendor/github.com/vmware/govmomi/object/resource_pool.go new file mode 100644 index 000000000..e03460854 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/resource_pool.go @@ -0,0 +1,159 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type ResourcePool struct { + Common + + InventoryPath string +} + +func (p ResourcePool) String() string { + if p.InventoryPath == "" { + return p.Common.String() + } + return fmt.Sprintf("%v @ %v", p.Common, p.InventoryPath) +} + +func NewResourcePool(c *vim25.Client, ref types.ManagedObjectReference) *ResourcePool { + return &ResourcePool{ + Common: NewCommon(c, ref), + } +} + +func (p ResourcePool) Name(ctx context.Context) (string, error) { + var o mo.ResourcePool + + err := p.Properties(ctx, p.Reference(), []string{"name"}, &o) + if err != nil { + return "", err + } + + return o.Name, nil +} + +func (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, folder *Folder, host *HostSystem) (*HttpNfcLease, error) { + req := types.ImportVApp{ + This: p.Reference(), + Spec: spec, + } + + if folder != nil { + ref := folder.Reference() + req.Folder = &ref + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.ImportVApp(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewHttpNfcLease(p.c, res.Returnval), nil +} + +func (p ResourcePool) Create(ctx context.Context, name string, spec types.ResourceConfigSpec) (*ResourcePool, error) { + req := types.CreateResourcePool{ + This: p.Reference(), + Name: name, + Spec: spec, + } + + res, err := methods.CreateResourcePool(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewResourcePool(p.c, res.Returnval), nil +} + +func (p ResourcePool) CreateVApp(ctx context.Context, name string, resSpec types.ResourceConfigSpec, configSpec types.VAppConfigSpec, folder *Folder) (*VirtualApp, error) { + req := types.CreateVApp{ + This: p.Reference(), + Name: name, + ResSpec: resSpec, + ConfigSpec: configSpec, + } + + if folder != nil { + ref := folder.Reference() + req.VmFolder = &ref + } + + res, err := methods.CreateVApp(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewVirtualApp(p.c, res.Returnval), nil +} + +func (p ResourcePool) UpdateConfig(ctx context.Context, name string, config *types.ResourceConfigSpec) error { + req := types.UpdateConfig{ + This: p.Reference(), + Name: name, + Config: config, + } + + if config != nil && config.Entity == nil { + ref := p.Reference() + + // Create copy of config so changes won't leak back to the caller + newConfig := *config + newConfig.Entity = &ref + req.Config = &newConfig + } + + _, err := methods.UpdateConfig(ctx, p.c, &req) + return err +} + +func (p ResourcePool) DestroyChildren(ctx context.Context) error { + req := types.DestroyChildren{ + This: p.Reference(), + } + + _, err := methods.DestroyChildren(ctx, p.c, &req) + return err +} + +func (p ResourcePool) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: p.Reference(), + } + + res, err := methods.Destroy_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/search_index.go b/vendor/github.com/vmware/govmomi/object/search_index.go new file mode 100644 index 000000000..638c8de2a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/search_index.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type SearchIndex struct { + Common +} + +func NewSearchIndex(c *vim25.Client) *SearchIndex { + s := SearchIndex{ + Common: NewCommon(c, *c.ServiceContent.SearchIndex), + } + + return &s +} + +// FindByDatastorePath finds a virtual machine by its location on a datastore. +func (s SearchIndex) FindByDatastorePath(ctx context.Context, dc *Datacenter, path string) (Reference, error) { + req := types.FindByDatastorePath{ + This: s.Reference(), + Datacenter: dc.Reference(), + Path: path, + } + + res, err := methods.FindByDatastorePath(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByDnsName finds a virtual machine or host by DNS name. +func (s SearchIndex) FindByDnsName(ctx context.Context, dc *Datacenter, dnsName string, vmSearch bool) (Reference, error) { + req := types.FindByDnsName{ + This: s.Reference(), + DnsName: dnsName, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindByDnsName(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByInventoryPath finds a managed entity based on its location in the inventory. +func (s SearchIndex) FindByInventoryPath(ctx context.Context, path string) (Reference, error) { + req := types.FindByInventoryPath{ + This: s.Reference(), + InventoryPath: path, + } + + res, err := methods.FindByInventoryPath(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByIp finds a virtual machine or host by IP address. +func (s SearchIndex) FindByIp(ctx context.Context, dc *Datacenter, ip string, vmSearch bool) (Reference, error) { + req := types.FindByIp{ + This: s.Reference(), + Ip: ip, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindByIp(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByUuid finds a virtual machine or host by UUID. +func (s SearchIndex) FindByUuid(ctx context.Context, dc *Datacenter, uuid string, vmSearch bool, instanceUuid *bool) (Reference, error) { + req := types.FindByUuid{ + This: s.Reference(), + Uuid: uuid, + VmSearch: vmSearch, + InstanceUuid: instanceUuid, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindByUuid(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindChild finds a particular child based on a managed entity name. +func (s SearchIndex) FindChild(ctx context.Context, entity Reference, name string) (Reference, error) { + req := types.FindChild{ + This: s.Reference(), + Entity: entity.Reference(), + Name: name, + } + + res, err := methods.FindChild(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/search_index_test.go b/vendor/github.com/vmware/govmomi/object/search_index_test.go new file mode 100644 index 000000000..a601f0aee --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/search_index_test.go @@ -0,0 +1,155 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + "reflect" + "testing" + + "github.com/vmware/govmomi/test" + "github.com/vmware/govmomi/vim25/mo" + "golang.org/x/net/context" +) + +func TestSearch(t *testing.T) { + c := test.NewAuthenticatedClient(t) + s := NewSearchIndex(c) + + ref, err := s.FindChild(context.Background(), NewRootFolder(c), "ha-datacenter") + if err != nil { + t.Fatal(err) + } + + dc, ok := ref.(*Datacenter) + if !ok { + t.Errorf("Expected Datacenter: %#v", ref) + } + + folders, err := dc.Folders(context.Background()) + if err != nil { + t.Fatal(err) + } + + ref, err = s.FindChild(context.Background(), folders.DatastoreFolder, "datastore1") + if err != nil { + t.Fatal(err) + } + + _, ok = ref.(*Datastore) + if !ok { + t.Errorf("Expected Datastore: %#v", ref) + } + + ref, err = s.FindByInventoryPath(context.Background(), "/ha-datacenter/network/VM Network") + if err != nil { + t.Fatal(err) + } + + _, ok = ref.(*Network) + if !ok { + t.Errorf("Expected Network: %#v", ref) + } + + crs, err := folders.HostFolder.Children(context.Background()) + if err != nil { + if err != nil { + t.Fatal(err) + } + } + if len(crs) != 0 { + var cr mo.ComputeResource + ref = crs[0] + err = s.Properties(context.Background(), ref.Reference(), []string{"host"}, &cr) + if err != nil { + t.Fatal(err) + } + + var host mo.HostSystem + ref = NewHostSystem(c, cr.Host[0]) + err = s.Properties(context.Background(), ref.Reference(), []string{"name", "hardware", "config"}, &host) + if err != nil { + t.Fatal(err) + } + + dnsConfig := host.Config.Network.DnsConfig.GetHostDnsConfig() + dnsName := fmt.Sprintf("%s.%s", dnsConfig.HostName, dnsConfig.DomainName) + shost, err := s.FindByDnsName(context.Background(), dc, dnsName, false) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(ref, shost) { + t.Errorf("%#v != %#v\n", ref, shost) + } + + shost, err = s.FindByUuid(context.Background(), dc, host.Hardware.SystemInfo.Uuid, false, nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(ref, shost) { + t.Errorf("%#v != %#v\n", ref, shost) + } + } + + vms, err := folders.VmFolder.Children(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(vms) != 0 { + var vm mo.VirtualMachine + ref = vms[0] + err = s.Properties(context.Background(), ref.Reference(), []string{"config", "guest"}, &vm) + if err != nil { + t.Fatal(err) + } + svm, err := s.FindByDatastorePath(context.Background(), dc, vm.Config.Files.VmPathName) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(ref, svm) { + t.Errorf("%#v != %#v\n", ref, svm) + } + + svm, err = s.FindByUuid(context.Background(), dc, vm.Config.Uuid, true, nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(ref, svm) { + t.Errorf("%#v != %#v\n", ref, svm) + } + + if vm.Guest.HostName != "" { + svm, err := s.FindByDnsName(context.Background(), dc, vm.Guest.HostName, true) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(ref, svm) { + t.Errorf("%#v != %#v\n", ref, svm) + } + } + + if vm.Guest.IpAddress != "" { + svm, err := s.FindByIp(context.Background(), dc, vm.Guest.IpAddress, true) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(ref, svm) { + t.Errorf("%#v != %#v\n", ref, svm) + } + } + } +} diff --git a/vendor/github.com/vmware/govmomi/object/storage_pod.go b/vendor/github.com/vmware/govmomi/object/storage_pod.go new file mode 100644 index 000000000..18865ff98 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/storage_pod.go @@ -0,0 +1,21 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +type StoragePod struct { + *Folder +} diff --git a/vendor/github.com/vmware/govmomi/object/storage_resource_manager.go b/vendor/github.com/vmware/govmomi/object/storage_resource_manager.go new file mode 100644 index 000000000..54fd0d9e1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/storage_resource_manager.go @@ -0,0 +1,178 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type StorageResourceManager struct { + Common +} + +func NewStorageResourceManager(c *vim25.Client) *StorageResourceManager { + sr := StorageResourceManager{ + Common: NewCommon(c, *c.ServiceContent.StorageResourceManager), + } + + return &sr +} + +func (sr StorageResourceManager) ApplyStorageDrsRecommendation(ctx context.Context, key []string) (*Task, error) { + req := types.ApplyStorageDrsRecommendation_Task{ + This: sr.Reference(), + Key: key, + } + + res, err := methods.ApplyStorageDrsRecommendation_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) ApplyStorageDrsRecommendationToPod(ctx context.Context, pod *StoragePod, key string) (*Task, error) { + req := types.ApplyStorageDrsRecommendationToPod_Task{ + This: sr.Reference(), + Key: key, + } + + if pod != nil { + req.Pod = pod.Reference() + } + + res, err := methods.ApplyStorageDrsRecommendationToPod_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) CancelStorageDrsRecommendation(ctx context.Context, key []string) error { + req := types.CancelStorageDrsRecommendation{ + This: sr.Reference(), + Key: key, + } + + _, err := methods.CancelStorageDrsRecommendation(ctx, sr.c, &req) + + return err +} + +func (sr StorageResourceManager) ConfigureDatastoreIORM(ctx context.Context, datastore *Datastore, spec types.StorageIORMConfigSpec, key string) (*Task, error) { + req := types.ConfigureDatastoreIORM_Task{ + This: sr.Reference(), + Spec: spec, + } + + if datastore != nil { + req.Datastore = datastore.Reference() + } + + res, err := methods.ConfigureDatastoreIORM_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) ConfigureStorageDrsForPod(ctx context.Context, pod *StoragePod, spec types.StorageDrsConfigSpec, modify bool) (*Task, error) { + req := types.ConfigureStorageDrsForPod_Task{ + This: sr.Reference(), + Spec: spec, + Modify: modify, + } + + if pod != nil { + req.Pod = pod.Reference() + } + + res, err := methods.ConfigureStorageDrsForPod_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) QueryDatastorePerformanceSummary(ctx context.Context, datastore *Datastore) ([]types.StoragePerformanceSummary, error) { + req := types.QueryDatastorePerformanceSummary{ + This: sr.Reference(), + } + + if datastore != nil { + req.Datastore = datastore.Reference() + } + + res, err := methods.QueryDatastorePerformanceSummary(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (sr StorageResourceManager) QueryIORMConfigOption(ctx context.Context, host *HostSystem) (*types.StorageIORMConfigOption, error) { + req := types.QueryIORMConfigOption{ + This: sr.Reference(), + } + + if host != nil { + req.Host = host.Reference() + } + + res, err := methods.QueryIORMConfigOption(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (sr StorageResourceManager) RecommendDatastores(ctx context.Context, storageSpec types.StoragePlacementSpec) (*types.StoragePlacementResult, error) { + req := types.RecommendDatastores{ + This: sr.Reference(), + StorageSpec: storageSpec, + } + + res, err := methods.RecommendDatastores(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (sr StorageResourceManager) RefreshStorageDrsRecommendation(ctx context.Context, pod *StoragePod) error { + req := types.RefreshStorageDrsRecommendation{ + This: sr.Reference(), + } + + if pod != nil { + req.Pod = pod.Reference() + } + + _, err := methods.RefreshStorageDrsRecommendation(ctx, sr.c, &req) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/task.go b/vendor/github.com/vmware/govmomi/object/task.go new file mode 100644 index 000000000..d70a417be --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/task.go @@ -0,0 +1,52 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/task" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// Task is a convenience wrapper around task.Task that keeps a reference to +// the client that was used to create it. This allows users to call the Wait() +// function with only a context parameter, instead of a context parameter, a +// soap.RoundTripper, and reference to the root property collector. +type Task struct { + Common +} + +func NewTask(c *vim25.Client, ref types.ManagedObjectReference) *Task { + t := Task{ + Common: NewCommon(c, ref), + } + + return &t +} + +func (t *Task) Wait(ctx context.Context) error { + _, err := t.WaitForResult(ctx, nil) + return err +} + +func (t *Task) WaitForResult(ctx context.Context, s progress.Sinker) (*types.TaskInfo, error) { + p := property.DefaultCollector(t.c) + return task.Wait(ctx, t.Reference(), p, s) +} diff --git a/vendor/github.com/vmware/govmomi/object/types.go b/vendor/github.com/vmware/govmomi/object/types.go new file mode 100644 index 000000000..f61aa362c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/types.go @@ -0,0 +1,65 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" +) + +type Reference interface { + Reference() types.ManagedObjectReference +} + +func NewReference(c *vim25.Client, e types.ManagedObjectReference) Reference { + switch e.Type { + case "Folder": + return NewFolder(c, e) + case "StoragePod": + return &StoragePod{ + NewFolder(c, e), + } + case "Datacenter": + return NewDatacenter(c, e) + case "VirtualMachine": + return NewVirtualMachine(c, e) + case "VirtualApp": + return &VirtualApp{ + NewResourcePool(c, e), + } + case "ComputeResource": + return NewComputeResource(c, e) + case "ClusterComputeResource": + return NewClusterComputeResource(c, e) + case "HostSystem": + return NewHostSystem(c, e) + case "Network": + return NewNetwork(c, e) + case "ResourcePool": + return NewResourcePool(c, e) + case "DistributedVirtualSwitch": + return NewDistributedVirtualSwitch(c, e) + case "VmwareDistributedVirtualSwitch": + return &VmwareDistributedVirtualSwitch{*NewDistributedVirtualSwitch(c, e)} + case "DistributedVirtualPortgroup": + return NewDistributedVirtualPortgroup(c, e) + case "Datastore": + return NewDatastore(c, e) + default: + panic("Unknown managed entity: " + e.Type) + } +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_app.go b/vendor/github.com/vmware/govmomi/object/virtual_app.go new file mode 100644 index 000000000..5b93f618f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_app.go @@ -0,0 +1,126 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type VirtualApp struct { + *ResourcePool +} + +func NewVirtualApp(c *vim25.Client, ref types.ManagedObjectReference) *VirtualApp { + return &VirtualApp{ + ResourcePool: NewResourcePool(c, ref), + } +} + +func (p VirtualApp) String() string { + if p.InventoryPath == "" { + return p.Common.String() + } + return fmt.Sprintf("%v @ %v", p.Common, p.InventoryPath) +} + +func (p VirtualApp) Name(ctx context.Context) (string, error) { + var o mo.VirtualApp + + err := p.Properties(ctx, p.Reference(), []string{"name"}, &o) + if err != nil { + return "", err + } + + return o.Name, nil +} + +func (p VirtualApp) CreateChildVM_Task(ctx context.Context, config types.VirtualMachineConfigSpec, host *HostSystem) (*Task, error) { + req := types.CreateChildVM_Task{ + This: p.Reference(), + Config: config, + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.CreateChildVM_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} + +func (p VirtualApp) UpdateVAppConfig(ctx context.Context, spec types.VAppConfigSpec) error { + req := types.UpdateVAppConfig{ + This: p.Reference(), + Spec: spec, + } + + _, err := methods.UpdateVAppConfig(ctx, p.c, &req) + return err +} + +func (p VirtualApp) PowerOnVApp_Task(ctx context.Context) (*Task, error) { + req := types.PowerOnVApp_Task{ + This: p.Reference(), + } + + res, err := methods.PowerOnVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} + +func (p VirtualApp) PowerOffVApp_Task(ctx context.Context, force bool) (*Task, error) { + req := types.PowerOffVApp_Task{ + This: p.Reference(), + Force: force, + } + + res, err := methods.PowerOffVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil + +} + +func (p VirtualApp) SuspendVApp_Task(ctx context.Context) (*Task, error) { + req := types.SuspendVApp_Task{ + This: p.Reference(), + } + + res, err := methods.SuspendVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go new file mode 100644 index 000000000..e80f69910 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go @@ -0,0 +1,754 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "fmt" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/vmware/govmomi/vim25/types" +) + +// Type values for use in BootOrder +const ( + DeviceTypeCdrom = "cdrom" + DeviceTypeDisk = "disk" + DeviceTypeEthernet = "ethernet" + DeviceTypeFloppy = "floppy" +) + +// VirtualDeviceList provides helper methods for working with a list of virtual devices. +type VirtualDeviceList []types.BaseVirtualDevice + +// SCSIControllerTypes are used for adding a new SCSI controller to a VM. +func SCSIControllerTypes() VirtualDeviceList { + // Return a mutable list of SCSI controller types, initialized with defaults. + return VirtualDeviceList([]types.BaseVirtualDevice{ + &types.VirtualLsiLogicController{}, + &types.VirtualBusLogicController{}, + &types.ParaVirtualSCSIController{}, + &types.VirtualLsiLogicSASController{}, + }).Select(func(device types.BaseVirtualDevice) bool { + c := device.(types.BaseVirtualSCSIController).GetVirtualSCSIController() + c.SharedBus = types.VirtualSCSISharingNoSharing + c.BusNumber = -1 + return true + }) +} + +// EthernetCardTypes are used for adding a new ethernet card to a VM. +func EthernetCardTypes() VirtualDeviceList { + return VirtualDeviceList([]types.BaseVirtualDevice{ + &types.VirtualE1000{}, + &types.VirtualE1000e{}, + &types.VirtualVmxnet3{}, + }).Select(func(device types.BaseVirtualDevice) bool { + c := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() + c.AddressType = string(types.VirtualEthernetCardMacTypeGenerated) + c.GetVirtualDevice().Key = -1 + return true + }) +} + +// Select returns a new list containing all elements of the list for which the given func returns true. +func (l VirtualDeviceList) Select(f func(device types.BaseVirtualDevice) bool) VirtualDeviceList { + var found VirtualDeviceList + + for _, device := range l { + if f(device) { + found = append(found, device) + } + } + + return found +} + +// SelectByType returns a new list with devices that are equal to or extend the given type. +func (l VirtualDeviceList) SelectByType(deviceType types.BaseVirtualDevice) VirtualDeviceList { + dtype := reflect.TypeOf(deviceType) + dname := dtype.Elem().Name() + + return l.Select(func(device types.BaseVirtualDevice) bool { + t := reflect.TypeOf(device) + + if t == dtype { + return true + } + + _, ok := t.Elem().FieldByName(dname) + + return ok + }) +} + +// SelectByBackingInfo returns a new list with devices matching the given backing info. +// If the value of backing is nil, any device with a backing of the same type will be returned. +func (l VirtualDeviceList) SelectByBackingInfo(backing types.BaseVirtualDeviceBackingInfo) VirtualDeviceList { + t := reflect.TypeOf(backing) + + return l.Select(func(device types.BaseVirtualDevice) bool { + db := device.GetVirtualDevice().Backing + if db == nil { + return false + } + + if reflect.TypeOf(db) != t { + return false + } + + if reflect.ValueOf(backing).IsNil() { + // selecting by backing type + return true + } + + switch a := db.(type) { + case *types.VirtualEthernetCardNetworkBackingInfo: + b := backing.(*types.VirtualEthernetCardNetworkBackingInfo) + return a.DeviceName == b.DeviceName + case *types.VirtualEthernetCardDistributedVirtualPortBackingInfo: + b := backing.(*types.VirtualEthernetCardDistributedVirtualPortBackingInfo) + return a.Port.SwitchUuid == b.Port.SwitchUuid + case *types.VirtualDiskFlatVer2BackingInfo: + b := backing.(*types.VirtualDiskFlatVer2BackingInfo) + if a.Parent != nil && b.Parent != nil { + return a.Parent.FileName == b.Parent.FileName + } + return a.FileName == b.FileName + case *types.VirtualSerialPortURIBackingInfo: + b := backing.(*types.VirtualSerialPortURIBackingInfo) + return a.ServiceURI == b.ServiceURI + case types.BaseVirtualDeviceFileBackingInfo: + b := backing.(types.BaseVirtualDeviceFileBackingInfo) + return a.GetVirtualDeviceFileBackingInfo().FileName == b.GetVirtualDeviceFileBackingInfo().FileName + default: + return false + } + }) +} + +// Find returns the device matching the given name. +func (l VirtualDeviceList) Find(name string) types.BaseVirtualDevice { + for _, device := range l { + if l.Name(device) == name { + return device + } + } + return nil +} + +// FindByKey returns the device matching the given key. +func (l VirtualDeviceList) FindByKey(key int) types.BaseVirtualDevice { + for _, device := range l { + if device.GetVirtualDevice().Key == key { + return device + } + } + return nil +} + +// FindIDEController will find the named IDE controller if given, otherwise will pick an available controller. +// An error is returned if the named controller is not found or not an IDE controller. Or, if name is not +// given and no available controller can be found. +func (l VirtualDeviceList) FindIDEController(name string) (*types.VirtualIDEController, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualIDEController); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not an IDE controller", name) + } + + c := l.PickController((*types.VirtualIDEController)(nil)) + if c == nil { + return nil, errors.New("no available IDE controller") + } + + return c.(*types.VirtualIDEController), nil +} + +// FindSCSIController will find the named SCSI controller if given, otherwise will pick an available controller. +// An error is returned if the named controller is not found or not an SCSI controller. Or, if name is not +// given and no available controller can be found. +func (l VirtualDeviceList) FindSCSIController(name string) (*types.VirtualSCSIController, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(types.BaseVirtualSCSIController); ok { + return c.GetVirtualSCSIController(), nil + } + return nil, fmt.Errorf("%s is not an SCSI controller", name) + } + + c := l.PickController((*types.VirtualSCSIController)(nil)) + if c == nil { + return nil, errors.New("no available SCSI controller") + } + + return c.(types.BaseVirtualSCSIController).GetVirtualSCSIController(), nil +} + +// CreateSCSIController creates a new SCSI controller of type name if given, otherwise defaults to lsilogic. +func (l VirtualDeviceList) CreateSCSIController(name string) (types.BaseVirtualDevice, error) { + ctypes := SCSIControllerTypes() + + if name == "scsi" || name == "" { + name = ctypes.Type(ctypes[0]) + } + + found := ctypes.Select(func(device types.BaseVirtualDevice) bool { + return l.Type(device) == name + }) + + if len(found) == 0 { + return nil, fmt.Errorf("unknown SCSI controller type '%s'", name) + } + + c, ok := found[0].(types.BaseVirtualSCSIController) + if !ok { + return nil, fmt.Errorf("invalid SCSI controller type '%s'", name) + } + + scsi := c.GetVirtualSCSIController() + + scsi.BusNumber = l.newSCSIBusNumber() + + return c.(types.BaseVirtualDevice), nil +} + +var scsiBusNumbers = []int{0, 1, 2, 3} + +// newSCSIBusNumber returns the bus number to use for adding a new SCSI bus device. +// -1 is returned if there are no bus numbers available. +func (l VirtualDeviceList) newSCSIBusNumber() int { + var used []int + + for _, d := range l.SelectByType((*types.VirtualSCSIController)(nil)) { + num := d.(types.BaseVirtualSCSIController).GetVirtualSCSIController().BusNumber + if num >= 0 { + used = append(used, num) + } // else caller is creating a new vm using SCSIControllerTypes + } + + sort.Ints(used) + + for i, n := range scsiBusNumbers { + if i == len(used) || n != used[i] { + return n + } + } + + return -1 +} + +// FindDiskController will find an existing ide or scsi disk controller. +func (l VirtualDeviceList) FindDiskController(name string) (types.BaseVirtualController, error) { + switch { + case name == "ide": + return l.FindIDEController("") + case name == "scsi" || name == "": + return l.FindSCSIController("") + default: + if c, ok := l.Find(name).(types.BaseVirtualController); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a valid controller", name) + } +} + +// PickController returns a controller of the given type(s). +// If no controllers are found or have no available slots, then nil is returned. +func (l VirtualDeviceList) PickController(kind types.BaseVirtualController) types.BaseVirtualController { + l = l.SelectByType(kind.(types.BaseVirtualDevice)).Select(func(device types.BaseVirtualDevice) bool { + num := len(device.(types.BaseVirtualController).GetVirtualController().Device) + + switch device.(type) { + case types.BaseVirtualSCSIController: + return num < 15 + case *types.VirtualIDEController: + return num < 2 + default: + return true + } + }) + + if len(l) == 0 { + return nil + } + + return l[0].(types.BaseVirtualController) +} + +// newUnitNumber returns the unit number to use for attaching a new device to the given controller. +func (l VirtualDeviceList) newUnitNumber(c types.BaseVirtualController) int { + key := c.GetVirtualController().Key + max := -1 + + for _, device := range l { + d := device.GetVirtualDevice() + + if d.ControllerKey == key { + if d.UnitNumber > max { + max = d.UnitNumber + } + } + } + + return max + 1 +} + +// AssignController assigns a device to a controller. +func (l VirtualDeviceList) AssignController(device types.BaseVirtualDevice, c types.BaseVirtualController) { + d := device.GetVirtualDevice() + d.ControllerKey = c.GetVirtualController().Key + d.UnitNumber = l.newUnitNumber(c) + d.Key = -1 +} + +// CreateDisk creates a new VirtualDisk device which can be added to a VM. +func (l VirtualDeviceList) CreateDisk(c types.BaseVirtualController, name string) *types.VirtualDisk { + // If name is not specified, one will be chosen for you. + // But if when given, make sure it ends in .vmdk, otherwise it will be treated as a directory. + if len(name) > 0 && filepath.Ext(name) != ".vmdk" { + name += ".vmdk" + } + + device := &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + DiskMode: string(types.VirtualDiskModePersistent), + ThinProvisioned: types.NewBool(true), + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: name, + }, + }, + }, + } + + l.AssignController(device, c) + + if device.UnitNumber == 0 { + device.UnitNumber = -1 // TODO: this field is annotated as omitempty + } + + return device +} + +// ChildDisk creates a new VirtualDisk device, linked to the given parent disk, which can be added to a VM. +func (l VirtualDeviceList) ChildDisk(parent *types.VirtualDisk) *types.VirtualDisk { + disk := *parent + backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) + ds := strings.SplitN(backing.FileName[1:], "]", 2) + + // Use specified disk as parent backing to a new disk. + disk.Backing = &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: fmt.Sprintf("[%s]", ds[0]), + }, + Parent: backing, + DiskMode: backing.DiskMode, + ThinProvisioned: backing.ThinProvisioned, + } + + return &disk +} + +func (l VirtualDeviceList) connectivity(device types.BaseVirtualDevice, v bool) error { + c := device.GetVirtualDevice().Connectable + if c == nil { + return fmt.Errorf("%s is not connectable", l.Name(device)) + } + + c.Connected = v + c.StartConnected = v + + return nil +} + +// Connect changes the device to connected, returns an error if the device is not connectable. +func (l VirtualDeviceList) Connect(device types.BaseVirtualDevice) error { + return l.connectivity(device, true) +} + +// Disconnect changes the device to disconnected, returns an error if the device is not connectable. +func (l VirtualDeviceList) Disconnect(device types.BaseVirtualDevice) error { + return l.connectivity(device, false) +} + +// FindCdrom finds a cdrom device with the given name, defaulting to the first cdrom device if any. +func (l VirtualDeviceList) FindCdrom(name string) (*types.VirtualCdrom, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualCdrom); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a cdrom device", name) + } + + c := l.SelectByType((*types.VirtualCdrom)(nil)) + if len(c) == 0 { + return nil, errors.New("no cdrom device found") + } + + return c[0].(*types.VirtualCdrom), nil +} + +// CreateCdrom creates a new VirtualCdrom device which can be added to a VM. +func (l VirtualDeviceList) CreateCdrom(c *types.VirtualIDEController) (*types.VirtualCdrom, error) { + device := &types.VirtualCdrom{} + + l.AssignController(device, c) + + l.setDefaultCdromBacking(device) + + device.Connectable = &types.VirtualDeviceConnectInfo{ + AllowGuestControl: true, + Connected: true, + StartConnected: true, + } + + return device, nil +} + +// InsertIso changes the cdrom device backing to use the given iso file. +func (l VirtualDeviceList) InsertIso(device *types.VirtualCdrom, iso string) *types.VirtualCdrom { + device.Backing = &types.VirtualCdromIsoBackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: iso, + }, + } + + return device +} + +// EjectIso removes the iso file based backing and replaces with the default cdrom backing. +func (l VirtualDeviceList) EjectIso(device *types.VirtualCdrom) *types.VirtualCdrom { + l.setDefaultCdromBacking(device) + return device +} + +func (l VirtualDeviceList) setDefaultCdromBacking(device *types.VirtualCdrom) { + device.Backing = &types.VirtualCdromAtapiBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + DeviceName: fmt.Sprintf("%s-%d-%d", DeviceTypeCdrom, device.ControllerKey, device.UnitNumber), + UseAutoDetect: types.NewBool(false), + }, + } +} + +// FindFloppy finds a floppy device with the given name, defaulting to the first floppy device if any. +func (l VirtualDeviceList) FindFloppy(name string) (*types.VirtualFloppy, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualFloppy); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a floppy device", name) + } + + c := l.SelectByType((*types.VirtualFloppy)(nil)) + if len(c) == 0 { + return nil, errors.New("no floppy device found") + } + + return c[0].(*types.VirtualFloppy), nil +} + +// CreateFloppy creates a new VirtualFloppy device which can be added to a VM. +func (l VirtualDeviceList) CreateFloppy() (*types.VirtualFloppy, error) { + device := &types.VirtualFloppy{} + + c := l.PickController((*types.VirtualSIOController)(nil)) + if c == nil { + return nil, errors.New("no available SIO controller") + } + + l.AssignController(device, c) + + l.setDefaultFloppyBacking(device) + + device.Connectable = &types.VirtualDeviceConnectInfo{ + AllowGuestControl: true, + Connected: true, + StartConnected: true, + } + + return device, nil +} + +// InsertImg changes the floppy device backing to use the given img file. +func (l VirtualDeviceList) InsertImg(device *types.VirtualFloppy, img string) *types.VirtualFloppy { + device.Backing = &types.VirtualFloppyImageBackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: img, + }, + } + + return device +} + +// EjectImg removes the img file based backing and replaces with the default floppy backing. +func (l VirtualDeviceList) EjectImg(device *types.VirtualFloppy) *types.VirtualFloppy { + l.setDefaultFloppyBacking(device) + return device +} + +func (l VirtualDeviceList) setDefaultFloppyBacking(device *types.VirtualFloppy) { + device.Backing = &types.VirtualFloppyDeviceBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + DeviceName: fmt.Sprintf("%s-%d", DeviceTypeFloppy, device.UnitNumber), + UseAutoDetect: types.NewBool(false), + }, + } +} + +// FindSerialPort finds a serial port device with the given name, defaulting to the first serial port device if any. +func (l VirtualDeviceList) FindSerialPort(name string) (*types.VirtualSerialPort, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualSerialPort); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a serial port device", name) + } + + c := l.SelectByType((*types.VirtualSerialPort)(nil)) + if len(c) == 0 { + return nil, errors.New("no serial port device found") + } + + return c[0].(*types.VirtualSerialPort), nil +} + +// CreateSerialPort creates a new VirtualSerialPort device which can be added to a VM. +func (l VirtualDeviceList) CreateSerialPort() (*types.VirtualSerialPort, error) { + device := &types.VirtualSerialPort{ + YieldOnPoll: true, + } + + c := l.PickController((*types.VirtualSIOController)(nil)) + if c == nil { + return nil, errors.New("no available SIO controller") + } + + l.AssignController(device, c) + + l.setDefaultSerialPortBacking(device) + + return device, nil +} + +// ConnectSerialPort connects a serial port to a server or client uri. +func (l VirtualDeviceList) ConnectSerialPort(device *types.VirtualSerialPort, uri string, client bool) *types.VirtualSerialPort { + direction := types.VirtualDeviceURIBackingOptionDirectionServer + if client { + direction = types.VirtualDeviceURIBackingOptionDirectionClient + } + + device.Backing = &types.VirtualSerialPortURIBackingInfo{ + VirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{ + Direction: string(direction), + ServiceURI: uri, + }, + } + + return device +} + +// DisconnectSerialPort disconnects the serial port backing. +func (l VirtualDeviceList) DisconnectSerialPort(device *types.VirtualSerialPort) *types.VirtualSerialPort { + l.setDefaultSerialPortBacking(device) + return device +} + +func (l VirtualDeviceList) setDefaultSerialPortBacking(device *types.VirtualSerialPort) { + device.Backing = &types.VirtualSerialPortURIBackingInfo{ + VirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{ + Direction: "client", + ServiceURI: "localhost:0", + }, + } +} + +// CreateEthernetCard creates a new VirtualEthernetCard of the given name name and initialized with the given backing. +func (l VirtualDeviceList) CreateEthernetCard(name string, backing types.BaseVirtualDeviceBackingInfo) (types.BaseVirtualDevice, error) { + ctypes := EthernetCardTypes() + + if name == "" { + name = ctypes.deviceName(ctypes[0]) + } + + found := ctypes.Select(func(device types.BaseVirtualDevice) bool { + return l.deviceName(device) == name + }) + + if len(found) == 0 { + return nil, fmt.Errorf("unknown ethernet card type '%s'", name) + } + + c, ok := found[0].(types.BaseVirtualEthernetCard) + if !ok { + return nil, fmt.Errorf("invalid ethernet card type '%s'", name) + } + + c.GetVirtualEthernetCard().Backing = backing + + return c.(types.BaseVirtualDevice), nil +} + +// PrimaryMacAddress returns the MacAddress field of the primary VirtualEthernetCard +func (l VirtualDeviceList) PrimaryMacAddress() string { + eth0 := l.Find("ethernet-0") + + if eth0 == nil { + return "" + } + + return eth0.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard().MacAddress +} + +// convert a BaseVirtualDevice to a BaseVirtualMachineBootOptionsBootableDevice +var bootableDevices = map[string]func(device types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice{ + DeviceTypeCdrom: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableCdromDevice{} + }, + DeviceTypeDisk: func(d types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableDiskDevice{ + DeviceKey: d.GetVirtualDevice().Key, + } + }, + DeviceTypeEthernet: func(d types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableEthernetDevice{ + DeviceKey: d.GetVirtualDevice().Key, + } + }, + DeviceTypeFloppy: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableFloppyDevice{} + }, +} + +// BootOrder returns a list of devices which can be used to set boot order via VirtualMachine.SetBootOptions. +// The order can any of "ethernet", "cdrom", "floppy" or "disk" or by specific device name. +func (l VirtualDeviceList) BootOrder(order []string) []types.BaseVirtualMachineBootOptionsBootableDevice { + var devices []types.BaseVirtualMachineBootOptionsBootableDevice + + for _, name := range order { + if kind, ok := bootableDevices[name]; ok { + for _, device := range l { + if l.Type(device) == name { + devices = append(devices, kind(device)) + } + + } + continue + } + + if d := l.Find(name); d != nil { + if kind, ok := bootableDevices[l.Type(d)]; ok { + devices = append(devices, kind(d)) + } + } + } + + return devices +} + +// SelectBootOrder returns an ordered list of devices matching the given bootable device order +func (l VirtualDeviceList) SelectBootOrder(order []types.BaseVirtualMachineBootOptionsBootableDevice) VirtualDeviceList { + var devices VirtualDeviceList + + for _, bd := range order { + for _, device := range l { + if kind, ok := bootableDevices[l.Type(device)]; ok { + if reflect.DeepEqual(kind(device), bd) { + devices = append(devices, device) + } + } + } + } + + return devices +} + +// TypeName returns the vmodl type name of the device +func (l VirtualDeviceList) TypeName(device types.BaseVirtualDevice) string { + return reflect.TypeOf(device).Elem().Name() +} + +var deviceNameRegexp = regexp.MustCompile(`(?:Virtual)?(?:Machine)?(\w+?)(?:Card|Device|Controller)?$`) + +func (l VirtualDeviceList) deviceName(device types.BaseVirtualDevice) string { + name := "device" + typeName := l.TypeName(device) + + m := deviceNameRegexp.FindStringSubmatch(typeName) + if len(m) == 2 { + name = strings.ToLower(m[1]) + } + + return name +} + +// Type returns a human-readable name for the given device +func (l VirtualDeviceList) Type(device types.BaseVirtualDevice) string { + switch device.(type) { + case types.BaseVirtualEthernetCard: + return DeviceTypeEthernet + case *types.ParaVirtualSCSIController: + return "pvscsi" + case *types.VirtualLsiLogicSASController: + return "lsilogic-sas" + default: + return l.deviceName(device) + } +} + +// Name returns a stable, human-readable name for the given device +func (l VirtualDeviceList) Name(device types.BaseVirtualDevice) string { + var key string + d := device.GetVirtualDevice() + dtype := l.Type(device) + + switch dtype { + case DeviceTypeEthernet: + key = fmt.Sprintf("%d", d.UnitNumber-7) + case DeviceTypeDisk: + key = fmt.Sprintf("%d-%d", d.ControllerKey, d.UnitNumber) + default: + key = fmt.Sprintf("%d", d.Key) + } + + return fmt.Sprintf("%s-%s", dtype, key) +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_device_list_test.go b/vendor/github.com/vmware/govmomi/object/virtual_device_list_test.go new file mode 100644 index 000000000..f15c0d68a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_device_list_test.go @@ -0,0 +1,848 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "math/rand" + "reflect" + "testing" + + "github.com/vmware/govmomi/vim25/types" +) + +var devices = VirtualDeviceList([]types.BaseVirtualDevice{ + &types.VirtualIDEController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 200, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "IDE 0", + Summary: "IDE 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: 0, + }, + BusNumber: 0, + Device: []int{3001, 3000}, + }, + }, + &types.VirtualIDEController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 201, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "IDE 1", + Summary: "IDE 1", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: 0, + }, + BusNumber: 1, + Device: []int{3002}, + }, + }, + &types.VirtualPS2Controller{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 300, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "PS2 controller 0", + Summary: "PS2 controller 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: 0, + }, + BusNumber: 0, + Device: []int{600, 700}, + }, + }, + &types.VirtualPCIController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 100, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "PCI controller 0", + Summary: "PCI controller 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: 0, + }, + BusNumber: 0, + Device: []int{500, 12000, 1000, 4000}, + }, + }, + &types.VirtualSIOController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 400, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "SIO controller 0", + Summary: "SIO controller 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: 0, + }, + BusNumber: 0, + Device: []int{9000}, + }, + }, + &types.VirtualKeyboard{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 600, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Keyboard ", + Summary: "Keyboard", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 300, + UnitNumber: 0, + }, + }, + &types.VirtualPointingDevice{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 700, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Pointing device", + Summary: "Pointing device; Device", + }, + Backing: &types.VirtualPointingDeviceDeviceBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{}, + HostPointingDevice: "autodetect", + }, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 300, + UnitNumber: 1, + }, + }, + &types.VirtualMachineVideoCard{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 500, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Video card ", + Summary: "Video card", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 100, + UnitNumber: 0, + }, + VideoRamSizeInKB: 4096, + NumDisplays: 1, + UseAutoDetect: types.NewBool(false), + Enable3DSupport: types.NewBool(false), + Use3dRenderer: "automatic", + }, + &types.VirtualMachineVMCIDevice{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 12000, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "VMCI device", + Summary: "Device on the virtual machine PCI bus that provides support for the virtual machine communication interface", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: &types.VirtualDevicePciBusSlotInfo{ + VirtualDeviceBusSlotInfo: types.VirtualDeviceBusSlotInfo{}, + PciSlotNumber: 33, + }, + ControllerKey: 100, + UnitNumber: 17, + }, + Id: 1754519335, + AllowUnrestrictedCommunication: types.NewBool(false), + }, + &types.VirtualLsiLogicController{ + VirtualSCSIController: types.VirtualSCSIController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 1000, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "SCSI controller 0", + Summary: "LSI Logic", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 100, + UnitNumber: 3, + }, + BusNumber: 0, + Device: nil, + }, + HotAddRemove: types.NewBool(true), + SharedBus: "noSharing", + ScsiCtlrUnitNumber: 7, + }, + }, + &types.VirtualCdrom{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 3001, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "CD/DVD drive 1", + Summary: "ISO [datastore1] ttylinux-pc_i486-16.1.iso", + }, + Backing: &types.VirtualCdromIsoBackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + FileName: "[datastore1] foo.iso", + Datastore: &types.ManagedObjectReference{Type: "Datastore", Value: "53fe43cc-75dc5110-3643-000c2918dc41"}, + BackingObjectId: "", + }, + }, + Connectable: &types.VirtualDeviceConnectInfo{ + DynamicData: types.DynamicData{}, + StartConnected: true, + AllowGuestControl: true, + Connected: false, + Status: "untried", + }, + SlotInfo: nil, + ControllerKey: 200, + UnitNumber: 1, + }, + }, + &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 3000, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Hard disk 1", + Summary: "30,720 KB", + }, + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + FileName: "[datastore1] bar/bar.vmdk", + Datastore: &types.ManagedObjectReference{Type: "Datastore", Value: "53fe43cc-75dc5110-3643-000c2918dc41"}, + BackingObjectId: "3-3000-0", + }, + DiskMode: "persistent", + Split: types.NewBool(false), + WriteThrough: types.NewBool(false), + ThinProvisioned: types.NewBool(false), + EagerlyScrub: types.NewBool(true), + Uuid: "6000C296-d0af-1209-1975-10c98eae10e4", + ContentId: "d46395062e2d1b1790985bdec573b211", + ChangeId: "", + Parent: &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + FileName: "[datastore1] ttylinux.vmdk", + Datastore: &types.ManagedObjectReference{Type: "Datastore", Value: "53fe43cc-75dc5110-3643-000c2918dc41"}, + BackingObjectId: "3-3000-1", + }, + DiskMode: "persistent", + Split: types.NewBool(false), + WriteThrough: types.NewBool(false), + ThinProvisioned: types.NewBool(false), + EagerlyScrub: types.NewBool(true), + Uuid: "6000C296-d0af-1209-1975-10c98eae10e4", + ContentId: "1c2dad9e1662219e962a620c6d238a7c", + ChangeId: "", + Parent: (*types.VirtualDiskFlatVer2BackingInfo)(nil), + DeltaDiskFormat: "", + DigestEnabled: types.NewBool(false), + DeltaGrainSize: 0, + }, + DeltaDiskFormat: "redoLogFormat", + DigestEnabled: types.NewBool(false), + DeltaGrainSize: 0, + }, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 200, + UnitNumber: 0, + }, + CapacityInKB: 30720, + CapacityInBytes: 31457280, + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 1000, + Level: "normal", + }, + StorageIOAllocation: &types.StorageIOAllocationInfo{ + DynamicData: types.DynamicData{}, + Limit: -1, + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 1000, + Level: "normal", + }, + Reservation: 0, + }, + DiskObjectId: "3-3000", + VFlashCacheConfigInfo: (*types.VirtualDiskVFlashCacheConfigInfo)(nil), + }, + &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 3002, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Hard disk 2", + Summary: "10,000,000 KB", + }, + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + FileName: "[datastore1] bar/disk-201-0.vmdk", + Datastore: &types.ManagedObjectReference{Type: "Datastore", Value: "53fe43cc-75dc5110-3643-000c2918dc41"}, + BackingObjectId: "3-3002-0", + }, + DiskMode: "persistent", + Split: types.NewBool(false), + WriteThrough: types.NewBool(false), + ThinProvisioned: types.NewBool(true), + EagerlyScrub: types.NewBool(false), + Uuid: "6000C293-fde5-4457-5118-dd267ea992a7", + ContentId: "90399989b9d520eed6793ab0fffffffe", + ChangeId: "", + Parent: (*types.VirtualDiskFlatVer2BackingInfo)(nil), + DeltaDiskFormat: "", + DigestEnabled: types.NewBool(false), + DeltaGrainSize: 0, + }, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 201, + UnitNumber: 0, + }, + CapacityInKB: 10000000, + CapacityInBytes: 10240000000, + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 1000, + Level: "normal", + }, + StorageIOAllocation: &types.StorageIOAllocationInfo{ + DynamicData: types.DynamicData{}, + Limit: -1, + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 1000, + Level: "normal", + }, + Reservation: 0, + }, + DiskObjectId: "3-3002", + VFlashCacheConfigInfo: (*types.VirtualDiskVFlashCacheConfigInfo)(nil), + }, + &types.VirtualE1000{ + VirtualEthernetCard: types.VirtualEthernetCard{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 4000, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Network adapter 1", + Summary: "VM Network", + }, + Backing: &types.VirtualEthernetCardNetworkBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + DeviceName: "VM Network", + UseAutoDetect: types.NewBool(false), + }, + Network: &types.ManagedObjectReference{Type: "Network", Value: "HaNetwork-VM Network"}, + InPassthroughMode: types.NewBool(false), + }, + Connectable: &types.VirtualDeviceConnectInfo{ + DynamicData: types.DynamicData{}, + StartConnected: true, + AllowGuestControl: true, + Connected: false, + Status: "untried", + }, + SlotInfo: &types.VirtualDevicePciBusSlotInfo{ + VirtualDeviceBusSlotInfo: types.VirtualDeviceBusSlotInfo{}, + PciSlotNumber: 32, + }, + ControllerKey: 100, + UnitNumber: 7, + }, + AddressType: "generated", + MacAddress: "00:0c:29:93:d7:27", + WakeOnLanEnabled: types.NewBool(true), + }, + }, + &types.VirtualSerialPort{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 9000, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Serial port 1", + Summary: "Remote localhost:0", + }, + Backing: &types.VirtualSerialPortURIBackingInfo{ + VirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + ServiceURI: "localhost:0", + Direction: "client", + ProxyURI: "", + }, + }, + Connectable: &types.VirtualDeviceConnectInfo{ + DynamicData: types.DynamicData{}, + StartConnected: true, + AllowGuestControl: true, + Connected: false, + Status: "untried", + }, + SlotInfo: nil, + ControllerKey: 400, + UnitNumber: 0, + }, + YieldOnPoll: true, + }, +}) + +func TestSelectByType(t *testing.T) { + tests := []struct { + dtype types.BaseVirtualDevice + expect int + }{ + { + (*types.VirtualCdrom)(nil), + 1, + }, + { + (*types.VirtualEthernetCard)(nil), + 1, + }, + { + (*types.VirtualDisk)(nil), + 2, + }, + { + (*types.VirtualController)(nil), + 6, + }, + { + (*types.VirtualIDEController)(nil), + 2, + }, + { + (*types.VirtualSCSIController)(nil), + 1, + }, + { + (*types.VirtualLsiLogicController)(nil), + 1, + }, + { + (*types.ParaVirtualSCSIController)(nil), + 0, + }, + } + + for _, test := range tests { + d := devices.SelectByType(test.dtype) + + if len(d) != test.expect { + t.Errorf("%#v has %d", test.dtype, len(devices)) + } + } +} + +func TestSelectByBackingInfo(t *testing.T) { + tests := []types.BaseVirtualDeviceBackingInfo{ + &types.VirtualEthernetCardNetworkBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + DeviceName: "VM Network", + }, + }, + &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: "[datastore1] bar/bar.vmdk", + }, + }, + &types.VirtualDiskFlatVer2BackingInfo{ + Parent: &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: "[datastore1] ttylinux.vmdk", + }, + }, + }, + &types.VirtualCdromIsoBackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + FileName: "[datastore1] foo.iso", + }, + }, + (*types.VirtualCdromIsoBackingInfo)(nil), + &types.VirtualSerialPortURIBackingInfo{ + VirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + ServiceURI: "localhost:0", + Direction: "client", + ProxyURI: "", + }, + }, + (*types.VirtualSerialPortURIBackingInfo)(nil), + } + + for _, test := range tests { + l := devices.SelectByBackingInfo(test) + + if len(l) != 1 { + t.Errorf("Expected 1, got %d: %#v", len(l), test) + } + } + + // test selecting by backing type + tests = []types.BaseVirtualDeviceBackingInfo{ + (*types.VirtualDiskFlatVer2BackingInfo)(nil), + } + + for _, test := range tests { + l := devices.SelectByBackingInfo(test) + + if len(l) != 2 { + t.Errorf("Expected 2, got %d: %#v", len(l), test) + } + } +} + +func TestFind(t *testing.T) { + for _, device := range devices { + name := devices.Name(device) + d := devices.Find(name) + if name != devices.Name(d) { + t.Errorf("expected name: %s, got: %s", name, devices.Name(d)) + } + } + + d := devices.Find("enoent") + if d != nil { + t.Errorf("unexpected: %#v", d) + } +} + +func TestFindController(t *testing.T) { + for _, name := range []string{"", "ide-200"} { + _, err := devices.FindIDEController(name) + if err != nil { + t.Error(err) + } + } + + for _, name := range []string{"", "lsilogic-1000"} { + _, err := devices.FindSCSIController(name) + if err != nil { + t.Error(err) + } + } + + fns := []func() error{ + func() error { + _, err := devices.FindIDEController("lsilogic-1000") + return err + }, + func() error { + _, err := devices.FindSCSIController("ide-200") + return err + }, + } + + for _, f := range fns { + err := f() + if err == nil { + t.Error("should fail") + } + } +} + +func TestPickController(t *testing.T) { + list := devices + + tests := []struct { + ctype types.BaseVirtualController + key int + unit int + }{ + { + (*types.VirtualIDEController)(nil), 201, 1, + }, + { + (*types.VirtualSCSIController)(nil), 1000, 0, + }, + { + (*types.VirtualSCSIController)(nil), 1000, 1, + }, + } + + for _, test := range tests { + c := list.PickController(test.ctype).GetVirtualController() + + key := c.Key + if key != test.key { + t.Errorf("expected controller key: %d, got: %d\n", test.key, key) + } + + unit := list.newUnitNumber(c) + if unit != test.unit { + t.Errorf("expected unit number: %d, got: %d\n", test.unit, unit) + } + + dev := &types.VirtualDevice{ + Key: rand.Int(), + UnitNumber: unit, + ControllerKey: key, + } + + list = append(list, dev) + c.Device = append(c.Device, dev.Key) + } + + if list.PickController((*types.VirtualIDEController)(nil)) != nil { + t.Error("should be nil") + } + + if list.PickController((*types.VirtualSCSIController)(nil)) == nil { + t.Errorf("should not be nil") + } +} + +func TestCreateSCSIController(t *testing.T) { + for _, l := range []VirtualDeviceList{SCSIControllerTypes(), devices} { + _, err := l.CreateSCSIController("enoent") + if err == nil { + t.Error("should fail") + } + + for _, name := range []string{"", "scsi", "pvscsi", "buslogic", "lsilogic", "lsilogic-sas"} { + _, err = l.CreateSCSIController(name) + if err != nil { + t.Error(err) + } + } + } +} + +func TestCreateEthernetCard(t *testing.T) { + _, err := EthernetCardTypes().CreateEthernetCard("enoent", nil) + if err == nil { + t.Error("should fail") + } + + for _, name := range []string{"", "e1000", "e1000e", "vmxnet3"} { + _, err := EthernetCardTypes().CreateEthernetCard(name, nil) + if err != nil { + t.Error(err) + } + } +} + +func TestCdrom(t *testing.T) { + c, err := devices.FindCdrom("") + if err != nil { + t.Error(err) + } + + d := devices.Find(devices.Name(c)) + + if c.Key != d.GetVirtualDevice().Key { + t.Error("device key mismatch") + } + + for _, name := range []string{"enoent", "ide-200"} { + c, err = devices.FindCdrom(name) + if err == nil { + t.Errorf("FindCdrom(%s) should fail", name) + } + } + + c, err = devices.Select(func(device types.BaseVirtualDevice) bool { + if _, ok := device.(*types.VirtualCdrom); ok { + return false + } + return true + }).FindCdrom("") + + if err == nil { + t.Error("FindCdrom('') should fail") + } +} + +func TestSerialPort(t *testing.T) { + device, err := devices.CreateSerialPort() + if err != nil { + t.Error(err) + } + devices.ConnectSerialPort(device, "telnet://:33233", false) +} + +func TestPrimaryMacAddress(t *testing.T) { + expect := "00:0c:29:93:d7:27" + mac := devices.PrimaryMacAddress() + if expect != mac { + t.Errorf("expected: %s, got: %s", expect, mac) + } +} + +func TestBootOrder(t *testing.T) { + o := []string{DeviceTypeEthernet, DeviceTypeCdrom, DeviceTypeFloppy, DeviceTypeDisk} + list := devices + + n := 4 // 1 ethernet, 1 cdrom, 2 disk + order := list.BootOrder(o) + if len(order) != n { + t.Errorf("expected %d boot devices, got: %d", n, len(order)) + } + + list = list.SelectBootOrder(order) + if len(list) != n { + t.Errorf("expected %d boot devices, got: %d", n, len(list)) + } + + // test lookup by name + var names []string + for _, x := range list { + names = append(names, list.Name(x)) + } + + order = list.BootOrder(names) + if len(order) != n { + t.Errorf("expected %d boot devices, got: %d", n, len(order)) + } + + if !reflect.DeepEqual(list, list.SelectBootOrder(order)) { + t.Error("boot order mismatch") + } + + // remove disks + list = list.Select(func(device types.BaseVirtualDevice) bool { + if _, ok := device.(*types.VirtualDisk); ok { + return false + } + return true + }) + + n = 2 // 1 ethernet, 1 cdrom + order = list.BootOrder(o) + if len(order) != n { + t.Errorf("expected %d boot devices, got: %d", n, len(order)) + } + + if !reflect.DeepEqual(list, list.SelectBootOrder(order)) { + t.Error("boot order mismatch") + } + + if len(list.BootOrder([]string{DeviceTypeDisk})) != 0 { + t.Error("expected 0 disks") + } +} + +func TestName(t *testing.T) { + tests := []struct { + device types.BaseVirtualDevice + expect string + }{ + { + &types.VirtualCdrom{}, + "cdrom-0", + }, + { + &types.VirtualDisk{}, + "disk-0-0", + }, + { + &types.VirtualFloppy{}, + "floppy-0", + }, + { + &types.VirtualIDEController{}, + "ide-0", + }, + { + &types.VirtualMachineVideoCard{}, + "video-0", + }, + { + &types.VirtualPointingDevice{}, + "pointing-0", + }, + { + &types.ParaVirtualSCSIController{}, + "pvscsi-0", + }, + { + &types.VirtualSerialPort{}, + "serialport-0", + }, + { + &types.VirtualE1000{ + VirtualEthernetCard: types.VirtualEthernetCard{ + VirtualDevice: types.VirtualDevice{ + UnitNumber: 7, + }, + }, + }, + "ethernet-0", + }, + } + + for _, test := range tests { + name := devices.Name(test.device) + if name != test.expect { + t.Errorf("expected: %s, got: %s", test.expect, name) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go b/vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go new file mode 100644 index 000000000..800cfa076 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go @@ -0,0 +1,145 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type VirtualDiskManager struct { + Common +} + +func NewVirtualDiskManager(c *vim25.Client) *VirtualDiskManager { + m := VirtualDiskManager{ + Common: NewCommon(c, *c.ServiceContent.VirtualDiskManager), + } + + return &m +} + +// CopyVirtualDisk copies a virtual disk, performing conversions as specified in the spec. +func (m VirtualDiskManager) CopyVirtualDisk( + ctx context.Context, + sourceName string, sourceDatacenter *Datacenter, + destName string, destDatacenter *Datacenter, + destSpec *types.VirtualDiskSpec, force bool) (*Task, error) { + + req := types.CopyVirtualDisk_Task{ + This: m.Reference(), + SourceName: sourceName, + DestName: destName, + DestSpec: destSpec, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destDatacenter != nil { + ref := destDatacenter.Reference() + req.DestDatacenter = &ref + } + + res, err := methods.CopyVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// CreateVirtualDisk creates a new virtual disk. +func (m VirtualDiskManager) CreateVirtualDisk( + ctx context.Context, + name string, datacenter *Datacenter, + spec types.BaseVirtualDiskSpec) (*Task, error) { + + req := types.CreateVirtualDisk_Task{ + This: m.Reference(), + Name: name, + Spec: spec, + } + + if datacenter != nil { + ref := datacenter.Reference() + req.Datacenter = &ref + } + + res, err := methods.CreateVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// MoveVirtualDisk moves a virtual disk. +func (m VirtualDiskManager) MoveVirtualDisk( + ctx context.Context, + sourceName string, sourceDatacenter *Datacenter, + destName string, destDatacenter *Datacenter, + force bool) (*Task, error) { + req := types.MoveVirtualDisk_Task{ + This: m.Reference(), + SourceName: sourceName, + DestName: destName, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destDatacenter != nil { + ref := destDatacenter.Reference() + req.DestDatacenter = &ref + } + + res, err := methods.MoveVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// DeleteVirtualDisk deletes a virtual disk. +func (m VirtualDiskManager) DeleteVirtualDisk(ctx context.Context, name string, dc *Datacenter) (*Task, error) { + req := types.DeleteVirtualDisk_Task{ + This: m.Reference(), + Name: name, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.DeleteVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/vendor/github.com/vmware/govmomi/object/virtual_machine.go new file mode 100644 index 000000000..701b27e4a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -0,0 +1,468 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "fmt" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +const ( + PropRuntimePowerState = "summary.runtime.powerState" +) + +type VirtualMachine struct { + Common + + InventoryPath string +} + +func (v VirtualMachine) String() string { + if v.InventoryPath == "" { + return v.Common.String() + } + return fmt.Sprintf("%v @ %v", v.Common, v.InventoryPath) +} + +func NewVirtualMachine(c *vim25.Client, ref types.ManagedObjectReference) *VirtualMachine { + return &VirtualMachine{ + Common: NewCommon(c, ref), + } +} + +func (v VirtualMachine) Name(ctx context.Context) (string, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"name"}, &o) + if err != nil { + return "", err + } + + return o.Name, nil +} + +func (v VirtualMachine) PowerState(ctx context.Context) (types.VirtualMachinePowerState, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{PropRuntimePowerState}, &o) + if err != nil { + return "", err + } + + return o.Summary.Runtime.PowerState, nil +} + +func (v VirtualMachine) PowerOn(ctx context.Context) (*Task, error) { + req := types.PowerOnVM_Task{ + This: v.Reference(), + } + + res, err := methods.PowerOnVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) PowerOff(ctx context.Context) (*Task, error) { + req := types.PowerOffVM_Task{ + This: v.Reference(), + } + + res, err := methods.PowerOffVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Reset(ctx context.Context) (*Task, error) { + req := types.ResetVM_Task{ + This: v.Reference(), + } + + res, err := methods.ResetVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Suspend(ctx context.Context) (*Task, error) { + req := types.SuspendVM_Task{ + This: v.Reference(), + } + + res, err := methods.SuspendVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) ShutdownGuest(ctx context.Context) error { + req := types.ShutdownGuest{ + This: v.Reference(), + } + + _, err := methods.ShutdownGuest(ctx, v.c, &req) + return err +} + +func (v VirtualMachine) RebootGuest(ctx context.Context) error { + req := types.RebootGuest{ + This: v.Reference(), + } + + _, err := methods.RebootGuest(ctx, v.c, &req) + return err +} + +func (v VirtualMachine) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: v.Reference(), + } + + res, err := methods.Destroy_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Clone(ctx context.Context, folder *Folder, name string, config types.VirtualMachineCloneSpec) (*Task, error) { + req := types.CloneVM_Task{ + This: v.Reference(), + Folder: folder.Reference(), + Name: name, + Spec: config, + } + + res, err := methods.CloneVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Customize(ctx context.Context, spec types.CustomizationSpec) (*Task, error) { + req := types.CustomizeVM_Task{ + This: v.Reference(), + Spec: spec, + } + + res, err := methods.CustomizeVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Relocate(ctx context.Context, config types.VirtualMachineRelocateSpec, priority types.VirtualMachineMovePriority) (*Task, error) { + req := types.RelocateVM_Task{ + This: v.Reference(), + Spec: config, + Priority: priority, + } + + res, err := methods.RelocateVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Reconfigure(ctx context.Context, config types.VirtualMachineConfigSpec) (*Task, error) { + req := types.ReconfigVM_Task{ + This: v.Reference(), + Spec: config, + } + + res, err := methods.ReconfigVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) { + var ip string + + p := property.DefaultCollector(v.c) + err := property.Wait(ctx, p, v.Reference(), []string{"guest.ipAddress"}, func(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Name != "guest.ipAddress" { + continue + } + if c.Op != types.PropertyChangeOpAssign { + continue + } + if c.Val == nil { + continue + } + + ip = c.Val.(string) + return true + } + + return false + }) + + if err != nil { + return "", err + } + + return ip, nil +} + +// Device returns the VirtualMachine's config.hardware.device property. +func (v VirtualMachine) Device(ctx context.Context) (VirtualDeviceList, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"config.hardware.device"}, &o) + if err != nil { + return nil, err + } + + return VirtualDeviceList(o.Config.Hardware.Device), nil +} + +func (v VirtualMachine) HostSystem(ctx context.Context) (*HostSystem, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"summary"}, &o) + if err != nil { + return nil, err + } + + host := o.Summary.Runtime.Host + if host == nil { + return nil, errors.New("VM doesn't have a HostSystem") + } + + return NewHostSystem(v.c, *host), nil +} + +func (v VirtualMachine) ResourcePool(ctx context.Context) (*ResourcePool, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"resourcePool"}, &o) + if err != nil { + return nil, err + } + + rp := o.ResourcePool + if rp == nil { + return nil, errors.New("VM doesn't have a resourcePool") + } + + return NewResourcePool(v.c, *rp), nil +} + +func (v VirtualMachine) configureDevice(ctx context.Context, op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, devices ...types.BaseVirtualDevice) error { + spec := types.VirtualMachineConfigSpec{} + + for _, device := range devices { + config := &types.VirtualDeviceConfigSpec{ + Device: device, + Operation: op, + } + + if disk, ok := device.(*types.VirtualDisk); ok { + config.FileOperation = fop + + // Special case to attach an existing disk + if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 { + childDisk := false + if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + childDisk = b.Parent != nil + } + + if !childDisk { + config.FileOperation = "" // existing disk + } + } + } + + spec.DeviceChange = append(spec.DeviceChange, config) + } + + task, err := v.Reconfigure(ctx, spec) + if err != nil { + return err + } + + return task.Wait(ctx) +} + +// AddDevice adds the given devices to the VirtualMachine +func (v VirtualMachine) AddDevice(ctx context.Context, device ...types.BaseVirtualDevice) error { + return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationAdd, types.VirtualDeviceConfigSpecFileOperationCreate, device...) +} + +// EditDevice edits the given (existing) devices on the VirtualMachine +func (v VirtualMachine) EditDevice(ctx context.Context, device ...types.BaseVirtualDevice) error { + return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationEdit, types.VirtualDeviceConfigSpecFileOperationReplace, device...) +} + +// RemoveDevice removes the given devices on the VirtualMachine +func (v VirtualMachine) RemoveDevice(ctx context.Context, device ...types.BaseVirtualDevice) error { + return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, types.VirtualDeviceConfigSpecFileOperationDestroy, device...) +} + +// BootOptions returns the VirtualMachine's config.bootOptions property. +func (v VirtualMachine) BootOptions(ctx context.Context) (*types.VirtualMachineBootOptions, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"config.bootOptions"}, &o) + if err != nil { + return nil, err + } + + return o.Config.BootOptions, nil +} + +// SetBootOptions reconfigures the VirtualMachine with the given options. +func (v VirtualMachine) SetBootOptions(ctx context.Context, options *types.VirtualMachineBootOptions) error { + spec := types.VirtualMachineConfigSpec{} + + spec.BootOptions = options + + task, err := v.Reconfigure(ctx, spec) + if err != nil { + return err + } + + return task.Wait(ctx) +} + +// Answer answers a pending question. +func (v VirtualMachine) Answer(ctx context.Context, id, answer string) error { + req := types.AnswerVM{ + This: v.Reference(), + QuestionId: id, + AnswerChoice: answer, + } + + _, err := methods.AnswerVM(ctx, v.c, &req) + if err != nil { + return err + } + + return nil +} + +// CreateSnapshot creates a new snapshot of a virtual machine. +func (v VirtualMachine) CreateSnapshot(ctx context.Context, name string, description string, memory bool, quiesce bool) (*Task, error) { + req := types.CreateSnapshot_Task{ + This: v.Reference(), + Name: name, + Description: description, + Memory: memory, + Quiesce: quiesce, + } + + res, err := methods.CreateSnapshot_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +// IsToolsRunning returns true if VMware Tools is currently running in the guest OS, and false otherwise. +func (v VirtualMachine) IsToolsRunning(ctx context.Context) (bool, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"guest.toolsRunningStatus"}, &o) + if err != nil { + return false, err + } + + return o.Guest.ToolsRunningStatus == string(types.VirtualMachineToolsRunningStatusGuestToolsRunning), nil +} + +// Wait for the VirtualMachine to change to the desired power state. +func (v VirtualMachine) WaitForPowerState(ctx context.Context, state types.VirtualMachinePowerState) error { + p := property.DefaultCollector(v.c) + err := property.Wait(ctx, p, v.Reference(), []string{PropRuntimePowerState}, func(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Name != PropRuntimePowerState { + continue + } + if c.Val == nil { + continue + } + + ps := c.Val.(types.VirtualMachinePowerState) + if ps == state { + return true + } + } + return false + }) + + return err +} + +func (v VirtualMachine) MarkAsTemplate(ctx context.Context) error { + req := types.MarkAsTemplate{ + This: v.Reference(), + } + + _, err := methods.MarkAsTemplate(ctx, v.c, &req) + if err != nil { + return err + } + + return nil +} + +func (v VirtualMachine) MarkAsVirtualMachine(ctx context.Context, pool ResourcePool, host *HostSystem) error { + req := types.MarkAsVirtualMachine{ + This: v.Reference(), + Pool: pool.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + _, err := methods.MarkAsVirtualMachine(ctx, v.c, &req) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine_test.go b/vendor/github.com/vmware/govmomi/object/virtual_machine_test.go new file mode 100644 index 000000000..3cd1c8d53 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine_test.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +// VirtualMachine should implement the Reference interface. +var _ Reference = VirtualMachine{} diff --git a/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go b/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go new file mode 100644 index 000000000..f6caf9870 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go @@ -0,0 +1,21 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +type VmwareDistributedVirtualSwitch struct { + DistributedVirtualSwitch +} diff --git a/vendor/github.com/vmware/govmomi/ovf/cim.go b/vendor/github.com/vmware/govmomi/ovf/cim.go new file mode 100644 index 000000000..ce20bde19 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/cim.go @@ -0,0 +1,78 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ovf + +/* +Source: http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.24.0/CIM_VirtualSystemSettingData.xsd +*/ + +type CIMVirtualSystemSettingData struct { + ElementName string `xml:"ElementName"` + InstanceID string `xml:"InstanceID"` + + AutomaticRecoveryAction *uint8 `xml:"AutomaticRecoveryAction"` + AutomaticShutdownAction *uint8 `xml:"AutomaticShutdownAction"` + AutomaticStartupAction *uint8 `xml:"AutomaticStartupAction"` + AutomaticStartupActionDelay *string `xml:"AutomaticStartupActionDelay>Interval"` + AutomaticStartupActionSequenceNumber *uint16 `xml:"AutomaticStartupActionSequenceNumber"` + Caption *string `xml:"Caption"` + ConfigurationDataRoot *string `xml:"ConfigurationDataRoot"` + ConfigurationFile *string `xml:"ConfigurationFile"` + ConfigurationID *string `xml:"ConfigurationID"` + CreationTime *string `xml:"CreationTime"` + Description *string `xml:"Description"` + LogDataRoot *string `xml:"LogDataRoot"` + Notes []string `xml:"Notes"` + RecoveryFile *string `xml:"RecoveryFile"` + SnapshotDataRoot *string `xml:"SnapshotDataRoot"` + SuspendDataRoot *string `xml:"SuspendDataRoot"` + SwapFileDataRoot *string `xml:"SwapFileDataRoot"` + VirtualSystemIdentifier *string `xml:"VirtualSystemIdentifier"` + VirtualSystemType *string `xml:"VirtualSystemType"` +} + +/* +Source: http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.24.0/CIM_ResourceAllocationSettingData.xsd +*/ + +type CIMResourceAllocationSettingData struct { + ElementName string `xml:"ElementName"` + InstanceID string `xml:"InstanceID"` + + ResourceType *uint16 `xml:"ResourceType"` + OtherResourceType *string `xml:"OtherResourceType"` + ResourceSubType *string `xml:"ResourceSubType"` + + AddressOnParent *string `xml:"AddressOnParent"` + Address *string `xml:"Address"` + AllocationUnits *string `xml:"AllocationUnits"` + AutomaticAllocation *bool `xml:"AutomaticAllocation"` + AutomaticDeallocation *bool `xml:"AutomaticDeallocation"` + Caption *string `xml:"Caption"` + Connection []string `xml:"Connection"` + ConsumerVisibility *uint16 `xml:"ConsumerVisibility"` + Description *string `xml:"Description"` + HostResource []string `xml:"HostResource"` + Limit *uint64 `xml:"Limit"` + MappingBehavior *uint `xml:"MappingBehavior"` + Parent *string `xml:"Parent"` + PoolID *string `xml:"PoolID"` + Reservation *uint64 `xml:"Reservation"` + VirtualQuantity *uint `xml:"VirtualQuantity"` + VirtualQuantityUnits *string `xml:"VirtualQuantityUnits"` + Weight *uint `xml:"Weight"` +} diff --git a/vendor/github.com/vmware/govmomi/ovf/doc.go b/vendor/github.com/vmware/govmomi/ovf/doc.go new file mode 100644 index 000000000..6284b1ac5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/doc.go @@ -0,0 +1,25 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ovf provides functionality to unmarshal and inspect the structure +of an OVF file. It is not a complete implementation of the specification and +is intended to be used to import virtual infrastructure into vSphere. + +For a complete specification of the OVF standard, refer to: +https://www.dmtf.org/sites/default/files/standards/documents/DSP0243_2.1.0.pdf +*/ +package ovf diff --git a/vendor/github.com/vmware/govmomi/ovf/env.go b/vendor/github.com/vmware/govmomi/ovf/env.go new file mode 100644 index 000000000..5a5fc2f62 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/env.go @@ -0,0 +1,98 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ovf + +import ( + "bytes" + "encoding/xml" + "fmt" +) + +const ( + ovfEnvHeader = `` + ovfEnvPlatformSection = ` + %s + %s + %s + %s + ` + ovfEnvPropertyHeader = `` + ovfEnvPropertyEntry = `` + ovfEnvPropertyFooter = `` + ovfEnvFooter = `` +) + +type Env struct { + XMLName xml.Name `xml:"http://schemas.dmtf.org/ovf/environment/1 Environment"` + ID string `xml:"id,attr"` + EsxID string `xml:"http://www.vmware.com/schema/ovfenv esxId,attr"` + + Platform *PlatformSection `xml:"PlatformSection"` + Property *PropertySection `xml:"PropertySection"` +} + +type PlatformSection struct { + Kind string `xml:"Kind"` + Version string `xml:"Version"` + Vendor string `xml:"Vendor"` + Locale string `xml:"Locale"` +} + +type PropertySection struct { + Properties []EnvProperty `xml:"Property"` +} + +type EnvProperty struct { + Key string `xml:"key,attr"` + Value string `xml:"value,attr"` +} + +// Marshal marshals Env to xml by using xml.Marshal. +func (e Env) Marshal() (string, error) { + x, err := xml.Marshal(e) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s%s", xml.Header, x), nil +} + +// MarshalManual manually marshals Env to xml suitable for a vApp guest. +// It exists to overcome the lack of expressiveness in Go's XML namespaces. +func (e Env) MarshalManual() string { + var buffer bytes.Buffer + + buffer.WriteString(xml.Header) + buffer.WriteString(fmt.Sprintf(ovfEnvHeader, e.EsxID)) + buffer.WriteString(fmt.Sprintf(ovfEnvPlatformSection, e.Platform.Kind, e.Platform.Version, e.Platform.Vendor, e.Platform.Locale)) + + buffer.WriteString(fmt.Sprintf(ovfEnvPropertyHeader)) + for _, p := range e.Property.Properties { + buffer.WriteString(fmt.Sprintf(ovfEnvPropertyEntry, p.Key, p.Value)) + } + buffer.WriteString(fmt.Sprintf(ovfEnvPropertyFooter)) + + buffer.WriteString(fmt.Sprintf(ovfEnvFooter)) + + return buffer.String() +} diff --git a/vendor/github.com/vmware/govmomi/ovf/env_test.go b/vendor/github.com/vmware/govmomi/ovf/env_test.go new file mode 100644 index 000000000..096a5c235 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/env_test.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package ovf + +import "testing" + +func testEnv() Env { + return Env{ + EsxID: "vm moref", + Platform: &PlatformSection{ + Kind: "VMware vCenter Server", + Version: "5.5.0", + Vendor: "VMware, Inc.", + Locale: "US", + }, + Property: &PropertySection{ + Properties: []EnvProperty{ + EnvProperty{"foo", "bar"}, + EnvProperty{"ham", "eggs"}}}, + } +} + +func TestMarshalEnv(t *testing.T) { + env := testEnv() + + xenv, err := env.Marshal() + if err != nil { + t.Fatal("Error marshalling environment") + } + if len(xenv) < 1 { + t.Fatal("Marshalled document is empty") + } + t.Log(xenv) +} + +func TestMarshalManualEnv(t *testing.T) { + env := testEnv() + + xenv := env.MarshalManual() + if len(xenv) < 1 { + t.Fatal("Marshalled document is empty") + } + t.Log(xenv) +} diff --git a/vendor/github.com/vmware/govmomi/ovf/envelope.go b/vendor/github.com/vmware/govmomi/ovf/envelope.go new file mode 100644 index 000000000..af96b1548 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/envelope.go @@ -0,0 +1,191 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ovf + +type Envelope struct { + References []File `xml:"References>File"` + + // Package level meta-data + Annotation *AnnotationSection `xml:"AnnotationSection"` + Product *ProductSection `xml:"ProductSection"` + Network *NetworkSection `xml:"NetworkSection"` + Disk *DiskSection `xml:"DiskSection"` + OperatingSystem *OperatingSystemSection `xml:"OperatingSystemSection"` + Eula *EulaSection `xml:"EulaSection"` + VirtualHardware *VirtualHardwareSection `xml:"VirtualHardwareSection"` + ResourceAllocation *ResourceAllocationSection `xml:"ResourceAllocationSection"` + DeploymentOption *DeploymentOptionSection `xml:"DeploymentOptionSection"` + + // Content: A VirtualSystem or a VirtualSystemCollection + VirtualSystem *VirtualSystem `xml:"VirtualSystem"` +} + +type VirtualSystem struct { + Content + + Annotation []AnnotationSection `xml:"AnnotationSection"` + Product []ProductSection `xml:"ProductSection"` + OperatingSystem []OperatingSystemSection `xml:"OperatingSystemSection"` + Eula []EulaSection `xml:"EulaSection"` + VirtualHardware []VirtualHardwareSection `xml:"VirtualHardwareSection"` +} + +type File struct { + ID string `xml:"id,attr"` + Href string `xml:"href,attr"` + Size uint `xml:"size,attr"` + Compression *string `xml:"compression,attr"` + ChunkSize *int `xml:"chunkSize,attr"` +} + +type Content struct { + ID string `xml:"id,attr"` + Info string `xml:"Info"` + Name *string `xml:"Name"` +} + +type Section struct { + Required *bool `xml:"required,attr"` + Info string `xml:"Info"` +} + +type AnnotationSection struct { + Section + + Annotation string `xml:"Annotation"` +} + +type ProductSection struct { + Section + + Class *string `xml:"class,attr"` + Instance *string `xml:"instance,attr"` + + Product string `xml:"Product"` + Vendor string `xml:"Vendor"` + Version string `xml:"Version"` + FullVersion string `xml:"FullVersion"` + ProductURL string `xml:"ProductUrl"` + VendorURL string `xml:"VendorUrl"` + AppURL string `xml:"AppUrl"` + Property []Property `xml:"Property"` +} + +type Property struct { + Key string `xml:"key,attr"` + Type string `xml:"type,attr"` + Qualifiers *string `xml:"qualifiers,attr"` + UserConfigurable *bool `xml:"userConfigurable,attr"` + Default *string `xml:"value,attr"` + Password *bool `xml:"password,attr"` + + Label *string `xml:"Label"` + Description *string `xml:"Description"` + + Values []PropertyConfigurationValue `xml:"Value"` +} + +type PropertyConfigurationValue struct { + Value string `xml:"value,attr"` + Configuration *string `xml:"configuration,attr"` +} + +type NetworkSection struct { + Section + + Networks []Network `xml:"Network"` +} + +type Network struct { + Name string `xml:"name,attr"` + + Description string `xml:"Description"` +} + +type DiskSection struct { + Section + + Disks []VirtualDiskDesc `xml:"Disk"` +} + +type VirtualDiskDesc struct { + DiskID string `xml:"diskId,attr"` + FileRef *string `xml:"fileRef,attr"` + Capacity string `xml:"capacity,attr"` + CapacityAllocationUnits *string `xml:"capacityAllocationUnits,attr"` + Format *string `xml:"format,attr"` + PopulatedSize *int `xml:"populatedSize,attr"` + ParentRef *string `xml:"parentRef,attr"` +} + +type OperatingSystemSection struct { + Section + + ID uint16 `xml:"id,attr"` + Version *string `xml:"version,attr"` + OSType *string `xml:"osType,attr"` + + Description *string `xml:"Description"` +} + +type EulaSection struct { + Section + + License string `xml:"License"` +} + +type VirtualHardwareSection struct { + Section + + ID *string `xml:"id,attr"` + Transport *string `xml:"transport,attr"` + + System *VirtualSystemSettingData `xml:"System"` + Item []ResourceAllocationSettingData `xml:"Item"` +} + +type VirtualSystemSettingData struct { + CIMVirtualSystemSettingData +} + +type ResourceAllocationSettingData struct { + CIMResourceAllocationSettingData + + Required *bool `xml:"required,attr"` + Configuration *string `xml:"configuration,attr"` + Bound *string `xml:"bound,attr"` +} + +type ResourceAllocationSection struct { + Section + + Item []ResourceAllocationSettingData `xml:"Item"` +} + +type DeploymentOptionSection struct { + Section + + Configuration []DeploymentOptionConfiguration `xml:"Configuration"` +} + +type DeploymentOptionConfiguration struct { + ID string `xml:"id,attr"` + Default *bool `xml:"default,attr"` + + Label string `xml:"Label"` + Description string `xml:"Description"` +} diff --git a/vendor/github.com/vmware/govmomi/ovf/ovf.go b/vendor/github.com/vmware/govmomi/ovf/ovf.go new file mode 100644 index 000000000..eea02d677 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/ovf.go @@ -0,0 +1,34 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ovf + +import ( + "encoding/xml" + "io" +) + +func Unmarshal(r io.Reader) (*Envelope, error) { + var e Envelope + + dec := xml.NewDecoder(r) + err := dec.Decode(&e) + if err != nil { + return nil, err + } + + return &e, nil +} diff --git a/vendor/github.com/vmware/govmomi/ovf/ovf_test.go b/vendor/github.com/vmware/govmomi/ovf/ovf_test.go new file mode 100644 index 000000000..40f2f08b5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/ovf_test.go @@ -0,0 +1,85 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ovf + +import ( + "bytes" + "fmt" + "os" + "testing" + "text/tabwriter" +) + +func testFile(t *testing.T) *os.File { + n := os.Getenv("OVF_TEST_FILE") + if n == "" { + t.Skip("Please specify OVF_TEST_FILE") + } + + f, err := os.Open(n) + if err != nil { + t.Fatal(err) + } + + return f +} + +func testEnvelope(t *testing.T) *Envelope { + f := testFile(t) + defer f.Close() + + e, err := Unmarshal(f) + if err != nil { + t.Fatal(f) + } + + if e == nil { + t.Fatal("Empty envelope") + } + + return e +} + +func TestUnmarshal(t *testing.T) { + testEnvelope(t) +} + +func TestDeploymentOptions(t *testing.T) { + e := testEnvelope(t) + + if e.DeploymentOption == nil { + t.Fatal("Missing DeploymentOptionSection") + } + + var b bytes.Buffer + tw := tabwriter.NewWriter(&b, 2, 0, 2, ' ', 0) + fmt.Fprintf(tw, "\n") + for _, c := range e.DeploymentOption.Configuration { + fmt.Fprintf(tw, "id=%s\t", c.ID) + fmt.Fprintf(tw, "label=%s\t", c.Label) + + d := false + if c.Default != nil { + d = *c.Default + } + + fmt.Fprintf(tw, "default=%t\t", d) + fmt.Fprintf(tw, "\n") + } + tw.Flush() + t.Log(b.String()) +} diff --git a/vendor/github.com/vmware/govmomi/property/collector.go b/vendor/github.com/vmware/govmomi/property/collector.go new file mode 100644 index 000000000..50ea8d881 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/property/collector.go @@ -0,0 +1,174 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package property + +import ( + "errors" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// Collector models the PropertyCollector managed object. +// +// For more information, see: +// http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.wssdk.apiref.doc/vmodl.query.PropertyCollector.html +// +type Collector struct { + roundTripper soap.RoundTripper + reference types.ManagedObjectReference +} + +// DefaultCollector returns the session's default property collector. +func DefaultCollector(c *vim25.Client) *Collector { + p := Collector{ + roundTripper: c, + reference: c.ServiceContent.PropertyCollector, + } + + return &p +} + +func (p Collector) Reference() types.ManagedObjectReference { + return p.reference +} + +// Create creates a new session-specific Collector that can be used to +// retrieve property updates independent of any other Collector. +func (p *Collector) Create(ctx context.Context) (*Collector, error) { + req := types.CreatePropertyCollector{ + This: p.Reference(), + } + + res, err := methods.CreatePropertyCollector(ctx, p.roundTripper, &req) + if err != nil { + return nil, err + } + + newp := Collector{ + roundTripper: p.roundTripper, + reference: res.Returnval, + } + + return &newp, nil +} + +// Destroy destroys this Collector. +func (p *Collector) Destroy(ctx context.Context) error { + req := types.DestroyPropertyCollector{ + This: p.Reference(), + } + + _, err := methods.DestroyPropertyCollector(ctx, p.roundTripper, &req) + if err != nil { + return err + } + + p.reference = types.ManagedObjectReference{} + return nil +} + +func (p *Collector) CreateFilter(ctx context.Context, req types.CreateFilter) error { + req.This = p.Reference() + + _, err := methods.CreateFilter(ctx, p.roundTripper, &req) + if err != nil { + return err + } + + return nil +} + +func (p *Collector) WaitForUpdates(ctx context.Context, v string) (*types.UpdateSet, error) { + req := types.WaitForUpdatesEx{ + This: p.Reference(), + Version: v, + } + + res, err := methods.WaitForUpdatesEx(ctx, p.roundTripper, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (p *Collector) RetrieveProperties(ctx context.Context, req types.RetrieveProperties) (*types.RetrievePropertiesResponse, error) { + req.This = p.Reference() + return methods.RetrieveProperties(ctx, p.roundTripper, &req) +} + +// Retrieve loads properties for a slice of managed objects. The dst argument +// must be a pointer to a []interface{}, which is populated with the instances +// of the specified managed objects, with the relevant properties filled in. If +// the properties slice is nil, all properties are loaded. +func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}) error { + var propSpec *types.PropertySpec + var objectSet []types.ObjectSpec + + for _, obj := range objs { + // Ensure that all object reference types are the same + if propSpec == nil { + propSpec = &types.PropertySpec{ + Type: obj.Type, + } + + if ps == nil { + propSpec.All = types.NewBool(true) + } else { + propSpec.PathSet = ps + } + } else { + if obj.Type != propSpec.Type { + return errors.New("object references must have the same type") + } + } + + objectSpec := types.ObjectSpec{ + Obj: obj, + Skip: types.NewBool(false), + } + + objectSet = append(objectSet, objectSpec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: objectSet, + PropSet: []types.PropertySpec{*propSpec}, + }, + }, + } + + res, err := p.RetrieveProperties(ctx, req) + if err != nil { + return err + } + + return mo.LoadRetrievePropertiesResponse(res, dst) +} + +// RetrieveOne calls Retrieve with a single managed object reference. +func (p *Collector) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, ps []string, dst interface{}) error { + var objs = []types.ManagedObjectReference{obj} + return p.Retrieve(ctx, objs, ps, dst) +} diff --git a/vendor/github.com/vmware/govmomi/property/wait.go b/vendor/github.com/vmware/govmomi/property/wait.go new file mode 100644 index 000000000..44f73eda6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/property/wait.go @@ -0,0 +1,91 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package property + +import ( + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// Wait waits for any of the specified properties of the specified managed +// object to change. It calls the specified function for every update it +// receives. If this function returns false, it continues waiting for +// subsequent updates. If this function returns true, it stops waiting and +// returns. +// +// To only receive updates for the specified managed object, the function +// creates a new property collector and calls CreateFilter. A new property +// collector is required because filters can only be added, not removed. +// +// The newly created collector is destroyed before this function returns (both +// in case of success or error). +// +func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { + p, err := c.Create(ctx) + if err != nil { + return err + } + + // Attempt to destroy the collector using the background context, as the + // specified context may have timed out or have been cancelled. + defer p.Destroy(context.Background()) + + req := types.CreateFilter{ + Spec: types.PropertyFilterSpec{ + ObjectSet: []types.ObjectSpec{ + { + Obj: obj, + }, + }, + PropSet: []types.PropertySpec{ + { + PathSet: ps, + Type: obj.Type, + }, + }, + }, + } + + err = p.CreateFilter(ctx, req) + if err != nil { + return err + } + + for version := ""; ; { + res, err := p.WaitForUpdates(ctx, version) + if err != nil { + return err + } + + // Retry if the result came back empty + if res == nil { + continue + } + + version = res.Version + + for _, fs := range res.FilterSet { + for _, os := range fs.ObjectSet { + if os.Obj == obj { + if f(os.ChangeSet) { + return nil + } + } + } + } + } +} diff --git a/vendor/github.com/vmware/govmomi/scripts/.gitignore b/vendor/github.com/vmware/govmomi/scripts/.gitignore new file mode 100644 index 000000000..d0153ee29 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/.gitignore @@ -0,0 +1 @@ +.wireshark-* diff --git a/vendor/github.com/vmware/govmomi/scripts/contributors.sh b/vendor/github.com/vmware/govmomi/scripts/contributors.sh new file mode 100644 index 000000000..609873842 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/contributors.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +outfile="CONTRIBUTORS" +tmpfile="CONTRIBUTORS.tmp" +cp "${outfile}" "${tmpfile}" + +# Make sure the email address of every contributor is listed +git shortlog -sne | while read line; do + name=$(perl -pe 's/\d+\s+//' <<<"${line}") + email=$(grep -Po '(?<=<).*(?=>)' <<<"${name}") + if ! grep -q "${email}" "${outfile}"; then + echo "${name}" >> "${tmpfile}" + fi +done + +# Sort entries +( + sed -ne '1,5p' "${tmpfile}" + sed -ne '1,5!p' "${tmpfile}" | sort +) > "${outfile}" + +rm -f "${tmpfile}" diff --git a/vendor/github.com/vmware/govmomi/scripts/debug-ls.sh b/vendor/github.com/vmware/govmomi/scripts/debug-ls.sh new file mode 100644 index 000000000..0cb51abf8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/debug-ls.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright (c) 2014 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# This script shows for every request in a debug trace how long it took +# and the name of the request body. + +function body-name { + ( + xmllint --shell $1 <` to save GOVC_* variables. +# * Execute `govc-env ` to load GOVC_* variables. +# + +_govc_env_dir=$HOME/.govmomi/env +mkdir -p "${_govc_env_dir}" + +_govc-env-complete() { + local w="${COMP_WORDS[COMP_CWORD]}" + local c="$(find ${_govc_env_dir} -mindepth 1 -maxdepth 1 -type f | sort | xargs -r -L1 basename | xargs echo)" + + # Only allow completion if preceding argument if the function itself + if [ "$3" == "govc-env" ]; then + COMPREPLY=( $(compgen -W "${c}" -- "${w}") ) + fi +} + +govc-env() { + # Print current environment + if [ -z "$1" ]; then + for VAR in $(env | grep ^GOVC_ | cut -d= -f1); do + echo "export ${VAR}='${!VAR}'" + done + + return + fi + + # Save current environment + if [ "$1" == "--save" ]; then + govc-env > ${_govc_env_dir}/$2 + return + fi + + # Load specified environment + source ${_govc_env_dir}/$1 +} + +complete -F _govc-env-complete govc-env + diff --git a/vendor/github.com/vmware/govmomi/scripts/headers/go.txt b/vendor/github.com/vmware/govmomi/scripts/headers/go.txt new file mode 100644 index 000000000..7dfe744e9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/headers/go.txt @@ -0,0 +1,15 @@ +/* +Copyright (c) ${YEARS} VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/vendor/github.com/vmware/govmomi/scripts/headers/rb.txt b/vendor/github.com/vmware/govmomi/scripts/headers/rb.txt new file mode 100644 index 000000000..1c3bd22d4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/headers/rb.txt @@ -0,0 +1,13 @@ +# Copyright (c) ${YEARS} VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/vendor/github.com/vmware/govmomi/scripts/license.sh b/vendor/github.com/vmware/govmomi/scripts/license.sh new file mode 100644 index 000000000..d47cad02c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/license.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +set -e + +header_dir=$(dirname $0)/headers + +tmpfile=$(mktemp) +trap "rm -f ${tmpfile}" EXIT + +git ls-files | while read file; do + years=( $(git log --format='%ai' $file | cut -d- -f1 | sort -u) ) + num_years=${#years[@]} + + if [ "${num_years}" == 0 ]; then + export YEARS="$(date +%Y)" + else + yearA=${years[0]} + yearB=${years[$((${num_years}-1))]} + + if [ ${yearA} == ${yearB} ]; then + export YEARS="${yearA}" + else + export YEARS="${yearA}-${yearB}" + fi + fi + + case "$file" in + vim25/xml/*) + # Ignore + ;; + *.go) + sed -e "s/\${YEARS}/${YEARS}/" ${header_dir}/go.txt > ${tmpfile} + last_header_line=$(grep -n '\*/' ${file} | head -1 | cut -d: -f1) + tail -n +$((${last_header_line} + 1)) ${file} >> ${tmpfile} + mv ${tmpfile} ${file} + ;; + *.rb) + sed -e "s/\${YEARS}/${YEARS}/" ${header_dir}/rb.txt > ${tmpfile} + last_header_line=$(grep -n '^$' ${file} | head -1 | cut -d: -f1) + tail -n +$((${last_header_line})) ${file} >> ${tmpfile} + mv ${tmpfile} ${file} + ;; + *) + echo "Unhandled file: $file" + ;; + esac +done + diff --git a/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/.gitignore b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/.gitignore new file mode 100644 index 000000000..cd5d9c70b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/.gitignore @@ -0,0 +1,3 @@ +*.box +*.ova +.vagrant diff --git a/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/Vagrantfile b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/Vagrantfile new file mode 100644 index 000000000..705799b74 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/Vagrantfile @@ -0,0 +1,13 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Using the VCSA base box, no provisioning, inventory will be empty. + +Vagrant.configure("2") do |config| + config.vm.hostname = "vcsa" + + config.vm.box = "vcsa" + config.vm.synced_folder ".", "/vagrant", disabled: true + + config.vm.network "forwarded_port", guest: 443, host: 16443 +end diff --git a/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/create-box.sh b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/create-box.sh new file mode 100644 index 000000000..487253b21 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/create-box.sh @@ -0,0 +1,81 @@ +#!/bin/sh + +set -e + +if [ "$(uname -s)" == "Darwin" ]; then + PATH="/Applications/VMware Fusion.app/Contents/Library:$PATH" + PATH="/Applications/VMware Fusion.app/Contents/Library/VMware OVF Tool:$PATH" +fi + +ovf="$1" + +if [ -z "$ovf" ]; then + ovf="./VMware-vCenter-Server-Appliance-5.5.0.10300-2000350_OVA10.ova" +fi + +# check for greadlink and gmktemp +readlink=$(type -p greadlink readlink | head -1) +mktemp=$(type -p gmktemp mktemp | head -1) + +dir=$($readlink -nf $(dirname $0)) +tmp=$($mktemp -d) +trap "rm -rf $tmp" EXIT + +cd $tmp + +echo "Converting ovf..." +ovftool \ + --noSSLVerify \ + --acceptAllEulas \ + --overwrite \ + --powerOffTarget \ + $ovf vcsa.vmx + +echo "Starting vm..." +vmrun start vcsa.vmx nogui + +echo "Waiting for vm ip..." +ip=$(vmrun getGuestIPAddress vcsa.vmx -wait) + +echo "Configuring vm for use with vagrant..." +vmrun -gu root -gp vmware CopyFileFromHostToGuest vcsa.vmx \ + $dir/vagrant.sh /tmp/vagrant.sh + +vmrun -gu root -gp vmware runProgramInGuest vcsa.vmx \ + /bin/sh -e /tmp/vagrant.sh + +vmrun -gu root -gp vmware deleteFileInGuest vcsa.vmx \ + /tmp/vagrant.sh + +echo "Configuring vCenter Server Appliance..." + +ssh_opts="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -T" + +ssh ${ssh_opts} -i ~/.vagrant.d/insecure_private_key vagrant@$ip < ./metadata.json + +cd $dir + +tar -C $tmp -cvzf vcsa.box . + +vagrant box add --name vcsa vcsa.box diff --git a/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/vagrant.sh b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/vagrant.sh new file mode 100644 index 000000000..976cc17de --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/vagrant.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +useradd vagrant -m -s /bin/bash +groupmod -A vagrant wheel + +echo "vagrant ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +mkdir ~vagrant/.ssh +wget --no-check-certificate \ + https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub \ + -O ~vagrant/.ssh/authorized_keys +chown -R vagrant ~vagrant/.ssh +chmod -R go-rwsx ~vagrant/.ssh + +sed -i -e 's/^#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config +sed -i -e 's/^AllowTcpForwarding no//' /etc/ssh/sshd_config +sed -i -e 's/^PermitTunnel no//' /etc/ssh/sshd_config +sed -i -e 's/^MaxSessions 1//' /etc/ssh/sshd_config + +# disable password expiration +for uid in root vagrant; do + chage -I -1 -E -1 -m 0 -M -1 $uid +done diff --git a/vendor/github.com/vmware/govmomi/scripts/wireshark-esx.sh b/vendor/github.com/vmware/govmomi/scripts/wireshark-esx.sh new file mode 100644 index 000000000..d1b2427d5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/wireshark-esx.sh @@ -0,0 +1,68 @@ +#!/bin/bash -e +# +# Capture ESXi traffic and decrypt SOAP traffic on port 443 via wireshark + +# Device to capture +dev="${1-vmk0}" + +# Device to get the ip for wireshark ssl_keys config +if [ "$dev" = "lo0" ] ; then + ip_dev="vmk0" +else + ip_dev="$dev" +fi + +ip=$(govc host.info -k -json | \ + jq -r ".HostSystems[].Config.Network.Vnic[] | select(.Device == \"${ip_dev}\") | .Spec.Ip.IpAddress") + +scp=(scp) +ssh=(ssh) + +# Check if vagrant ssh-config applies to $ip +if [ -d ".vagrant" ] ; then + vssh_opts=($(vagrant ssh-config | awk NF | awk -v ORS=' ' '{print "-o " $1 "=" $2}')) + if grep "HostName=${ip}" >/dev/null <<<"${vssh_opts[*]}" ; then + ssh_opts=("${vssh_opts[@]}") + fi +fi + +# Otherwise, use default ssh opts + sshpass if available +if [ ${#ssh_opts[@]} -eq 0 ] ; then + userpass=$(awk -F// '{print $NF}' <<<"$GOVC_URL" | awk -F@ '{print $1}') + username=$(awk -F: '{print $1}' <<<"$userpass") + password=$(awk -F: '{print $2}' <<<"$userpass") + + ssh_opts=(-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=FATAL -o User=${username}) + + if [ -x "$(which sshpass)" ] ; then + if [ -z "$password" ] ; then + password="$GOVC_PASSWORD" + fi + scp=(sshpass -p $password scp) + ssh=(sshpass -p $password ssh) + fi +fi + +if [ "$dev" != "lo0" ] ; then + # If you change this filter, be sure to exclude the ssh port (not tcp port 22) + filter="host $ip and \(port 80 or port 443\)" + + dst="$HOME/.wireshark/rui-${ip}.key" + if [ ! -f "$dst" ] ; then + # Copy key from ESX + "${scp[@]}" "${ssh_opts[@]}" "${ip}:/etc/vmware/ssl/rui.key" "$dst" + fi + + if ! grep "$ip" ~/.wireshark/ssl_keys 2>/dev/null ; then + # Add key to wireshark ssl_keys config + echo "adding rui.key for $ip" + + cat <> ~/.wireshark/ssl_keys +"$ip","443","http","$dst","" +EOF + fi +fi + +echo "Capturing $dev on $ip..." + +"${ssh[@]}" "${ssh_opts[@]}" "$ip" tcpdump-uw -i "$dev" -s0 -v -w - "$filter" | wireshark -k -i - diff --git a/vendor/github.com/vmware/govmomi/scripts/wireshark-vcsa.sh b/vendor/github.com/vmware/govmomi/scripts/wireshark-vcsa.sh new file mode 100644 index 000000000..ea762ebd9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/scripts/wireshark-vcsa.sh @@ -0,0 +1,65 @@ +#!/bin/bash -e +# +# Capture SOAP traffic between web client and vpxd on 127.0.0.1:8085. +# +# Caveats: tested with VCSA 6.0, unlikely to work for other versions. +# + +set -e + +cache_deb() { + wget $1 + ar x *.deb data.tar.gz + tar zxf data.tar.gz + rm -f data.tar.gz + rm -f *.deb +} + +dirname="$(dirname $0)" +basename="$(basename $0)" +bindir="${dirname}/.${basename}" + +mkdir -p "${bindir}" + +# Cache binaries required to run tcpdump on vcsa +if [ ! -f "${bindir}/.done" ]; then + pushd ${bindir} + cache_deb https://launchpadlibrarian.net/200649143/libssl0.9.8_0.9.8k-7ubuntu8.27_amd64.deb + cache_deb https://launchpadlibrarian.net/37430984/libpcap0.8_1.0.0-6_amd64.deb + cache_deb https://launchpadlibrarian.net/41774869/tcpdump_4.0.0-6ubuntu3_amd64.deb + touch .done + popd +fi + +scp=(scp) +ssh=(ssh) + +# Extract host from GOVC_URL +userpasshost=$(awk -F/ '{print $(NF-1)}' <<<"$GOVC_URL") +host=$(awk -F@ '{print $NF}' <<<"$userpasshost") +username=root +password="$GOVC_PASSWORD" + +if [ -x "$(which sshpass)" ] ; then + scp=(sshpass -p "$password" scp) + ssh=(sshpass -p "$password" ssh) +fi + +ssh_opts=(-o UserKnownHostsFile=/dev/null + -o StrictHostKeyChecking=no + -o LogLevel=FATAL + -o User=${username} + -o ControlMaster=no) +dev="lo" +filter="port 8085" +tcpdump="env LD_LIBRARY_PATH=/tmp /tmp/tcpdump" + +echo "Capturing $dev on $host..." + +"${scp[@]}" "${ssh_opts[@]}" \ + "${bindir}/lib/libcrypto.so.0.9.8" \ + "${bindir}/usr/lib/libpcap.so.0.8" \ + "${bindir}/usr/sbin/tcpdump" \ + "${host}:/tmp" + +"${ssh[@]}" "${ssh_opts[@]}" "$host" ${tcpdump} -i "$dev" -s0 -v -w - "$filter" | wireshark -k -i - 2>/dev/null diff --git a/vendor/github.com/vmware/govmomi/session/keep_alive.go b/vendor/github.com/vmware/govmomi/session/keep_alive.go new file mode 100644 index 000000000..f157b7f6d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/session/keep_alive.go @@ -0,0 +1,127 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package session + +import ( + "sync" + "time" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "golang.org/x/net/context" +) + +type keepAlive struct { + sync.Mutex + + roundTripper soap.RoundTripper + idleTime time.Duration + notifyRequest chan struct{} + notifyStop chan struct{} + notifyWaitGroup sync.WaitGroup + + // keepAlive executes a request in the background with the purpose of + // keeping the session active. The response for this request is discarded. + keepAlive func(soap.RoundTripper) error +} + +func defaultKeepAlive(roundTripper soap.RoundTripper) error { + _, _ = methods.GetCurrentTime(context.Background(), roundTripper) + return nil +} + +// KeepAlive wraps the specified soap.RoundTripper and executes a meaningless +// API request in the background after the RoundTripper has been idle for the +// specified amount of idle time. The keep alive process only starts once a +// user logs in and runs until the user logs out again. +func KeepAlive(roundTripper soap.RoundTripper, idleTime time.Duration) soap.RoundTripper { + return KeepAliveHandler(roundTripper, idleTime, defaultKeepAlive) +} + +// KeepAliveHandler works as KeepAlive() does, but the handler param can decide how to handle errors. +// For example, if connectivity to ESX/VC is down long enough for a session to expire, a handler can choose to +// Login() on a types.NotAuthenticated error. If handler returns non-nil, the keep alive go routine will be stopped. +func KeepAliveHandler(roundTripper soap.RoundTripper, idleTime time.Duration, handler func(soap.RoundTripper) error) soap.RoundTripper { + k := &keepAlive{ + roundTripper: roundTripper, + idleTime: idleTime, + notifyRequest: make(chan struct{}), + } + + k.keepAlive = handler + + return k +} + +func (k *keepAlive) start() { + k.Lock() + defer k.Unlock() + + if k.notifyStop != nil { + return + } + + // This channel must be closed to terminate idle timer. + k.notifyStop = make(chan struct{}) + k.notifyWaitGroup.Add(1) + + go func() { + defer k.notifyWaitGroup.Done() + + for t := time.NewTimer(k.idleTime); ; { + select { + case <-k.notifyStop: + return + case <-k.notifyRequest: + t.Reset(k.idleTime) + case <-t.C: + if err := k.keepAlive(k.roundTripper); err != nil { + k.stop() + } + t = time.NewTimer(k.idleTime) + } + } + }() +} + +func (k *keepAlive) stop() { + k.Lock() + defer k.Unlock() + + if k.notifyStop != nil { + close(k.notifyStop) + k.notifyWaitGroup.Wait() + k.notifyStop = nil + } +} + +func (k *keepAlive) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + err := k.roundTripper.RoundTrip(ctx, req, res) + if err != nil { + return err + } + + // Start ticker on login, stop ticker on logout. + switch req.(type) { + case *methods.LoginBody, *methods.LoginExtensionByCertificateBody: + k.start() + case *methods.LogoutBody: + k.stop() + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/session/keep_alive_test.go b/vendor/github.com/vmware/govmomi/session/keep_alive_test.go new file mode 100644 index 000000000..69b317c7d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/session/keep_alive_test.go @@ -0,0 +1,280 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package session + +import ( + "fmt" + "net/url" + "os" + "runtime" + "testing" + "time" + + "github.com/vmware/govmomi/test" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type testKeepAlive int + +func (t *testKeepAlive) Func(soap.RoundTripper) error { + *t++ + return nil +} + +func newManager(t *testing.T) (*Manager, *url.URL) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + soapClient := soap.NewClient(u, true) + vimClient, err := vim25.NewClient(context.Background(), soapClient) + if err != nil { + t.Fatal(err) + } + + return NewManager(vimClient), u +} + +func TestKeepAlive(t *testing.T) { + var i testKeepAlive + var j int + + m, u := newManager(t) + k := KeepAlive(m.client.RoundTripper, time.Millisecond) + k.(*keepAlive).keepAlive = i.Func + m.client.RoundTripper = k + + // Expect keep alive to not have triggered yet + if i != 0 { + t.Errorf("Expected i == 0, got i: %d", i) + } + + // Logging in starts keep alive + err := m.Login(context.Background(), u.User) + if err != nil { + t.Error(err) + } + + time.Sleep(2 * time.Millisecond) + + // Expect keep alive to triggered at least once + if i == 0 { + t.Errorf("Expected i != 0, got i: %d", i) + } + + j = int(i) + time.Sleep(2 * time.Millisecond) + + // Expect keep alive to triggered at least once more + if int(i) <= j { + t.Errorf("Expected i > j, got i: %d, j: %d", i, j) + } + + // Logging out stops keep alive + err = m.Logout(context.Background()) + if err != nil { + t.Error(err) + } + + j = int(i) + time.Sleep(2 * time.Millisecond) + + // Expect keep alive to have stopped + if int(i) != j { + t.Errorf("Expected i == j, got i: %d, j: %d", i, j) + } +} + +func testSessionOK(t *testing.T, m *Manager, ok bool) { + s, err := m.UserSession(context.Background()) + if err != nil { + t.Fatal(err) + } + + _, file, line, _ := runtime.Caller(1) + prefix := fmt.Sprintf("%s:%d", file, line) + + if ok && s == nil { + t.Fatalf("%s: Expected session to be OK, but is invalid", prefix) + } + + if !ok && s != nil { + t.Fatalf("%s: Expected session to be invalid, but is OK", prefix) + } +} + +// Run with: +// +// env GOVMOMI_KEEPALIVE_TEST=1 go test -timeout=60m -run TestRealKeepAlive +// +func TestRealKeepAlive(t *testing.T) { + if os.Getenv("GOVMOMI_KEEPALIVE_TEST") != "1" { + t.SkipNow() + } + + m1, u1 := newManager(t) + m2, u2 := newManager(t) + + // Enable keepalive on m2 + k := KeepAlive(m2.client.RoundTripper, 10*time.Minute) + m2.client.RoundTripper = k + + // Expect both sessions to be invalid + testSessionOK(t, m1, false) + testSessionOK(t, m2, false) + + // Logging in starts keep alive + if err := m1.Login(context.Background(), u1.User); err != nil { + t.Error(err) + } + if err := m2.Login(context.Background(), u2.User); err != nil { + t.Error(err) + } + + // Expect both sessions to be valid + testSessionOK(t, m1, true) + testSessionOK(t, m2, true) + + // Wait for m1 to time out + delay := 31 * time.Minute + fmt.Printf("%s: Waiting %d minutes for session to time out...\n", time.Now(), int(delay.Minutes())) + time.Sleep(delay) + + // Expect m1's session to be invalid, m2's session to be valid + testSessionOK(t, m1, false) + testSessionOK(t, m2, true) +} + +func isNotAuthenticated(err error) bool { + if soap.IsSoapFault(err) { + switch soap.ToSoapFault(err).VimFault().(type) { + case types.NotAuthenticated: + return true + } + } + return false +} + +func isInvalidLogin(err error) bool { + if soap.IsSoapFault(err) { + switch soap.ToSoapFault(err).VimFault().(type) { + case types.InvalidLogin: + return true + } + } + return false +} + +func TestKeepAliveHandler(t *testing.T) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + m1, u1 := newManager(t) + m2, u2 := newManager(t) + + reauth := make(chan bool) + + // Keep alive handler that will re-login. + // Real-world case: connectivity to ESX/VC is down long enough for the session to expire + // Test-world case: we call TerminateSession below + k := KeepAliveHandler(m2.client.RoundTripper, 2*time.Second, func(roundTripper soap.RoundTripper) error { + _, err := methods.GetCurrentTime(context.Background(), roundTripper) + if err != nil { + if isNotAuthenticated(err) { + err = m2.Login(context.Background(), u2.User) + + if err != nil { + if isInvalidLogin(err) { + reauth <- false + t.Log("failed to re-authenticate, quitting keep alive handler") + return err + } + } else { + reauth <- true + } + } + } + + return nil + }) + + m2.client.RoundTripper = k + + // Logging in starts keep alive + if err := m1.Login(context.Background(), u1.User); err != nil { + t.Error(err) + } + if err := m2.Login(context.Background(), u2.User); err != nil { + t.Error(err) + } + + // Terminate session for m2. Note that self terminate fails, so we need 2 sessions for this test. + s, err := m2.UserSession(context.Background()) + if err != nil { + t.Fatal(err) + } + + err = m1.TerminateSession(context.Background(), []string{s.Key}) + if err != nil { + t.Fatal(err) + } + + _, err = methods.GetCurrentTime(context.Background(), m2.client) + if err == nil { + t.Error("expected to fail") + } + + // Wait for keepalive to re-authenticate + <-reauth + + _, err = methods.GetCurrentTime(context.Background(), m2.client) + if err != nil { + t.Fatal(err) + } + + // Clear credentials to test re-authentication failure + u2.User = nil + + s, err = m2.UserSession(context.Background()) + if err != nil { + t.Fatal(err) + } + + err = m1.TerminateSession(context.Background(), []string{s.Key}) + if err != nil { + t.Fatal(err) + } + + // Wait for keepalive re-authenticate attempt + result := <-reauth + + _, err = methods.GetCurrentTime(context.Background(), m2.client) + if err == nil { + t.Error("expected to fail") + } + + if result { + t.Errorf("expected reauth to fail") + } +} diff --git a/vendor/github.com/vmware/govmomi/session/manager.go b/vendor/github.com/vmware/govmomi/session/manager.go new file mode 100644 index 000000000..5df6ebfc4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/session/manager.go @@ -0,0 +1,163 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package session + +import ( + "net/url" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Manager struct { + client *vim25.Client + userSession *types.UserSession +} + +func NewManager(client *vim25.Client) *Manager { + m := Manager{ + client: client, + } + + return &m +} + +func (sm Manager) Reference() types.ManagedObjectReference { + return *sm.client.ServiceContent.SessionManager +} + +func (sm *Manager) Login(ctx context.Context, u *url.Userinfo) error { + req := types.Login{ + This: sm.Reference(), + } + + if u != nil { + req.UserName = u.Username() + if pw, ok := u.Password(); ok { + req.Password = pw + } + } + + login, err := methods.Login(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = &login.Returnval + return nil +} + +func (sm *Manager) LoginExtensionByCertificate(ctx context.Context, key string, locale string) error { + req := types.LoginExtensionByCertificate{ + This: sm.Reference(), + ExtensionKey: key, + Locale: locale, + } + + login, err := methods.LoginExtensionByCertificate(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = &login.Returnval + return nil +} + +func (sm *Manager) Logout(ctx context.Context) error { + req := types.Logout{ + This: sm.Reference(), + } + + _, err := methods.Logout(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = nil + return nil +} + +// UserSession retrieves and returns the SessionManager's CurrentSession field. +// Nil is returned if the session is not authenticated. +func (sm *Manager) UserSession(ctx context.Context) (*types.UserSession, error) { + var mgr mo.SessionManager + + pc := property.DefaultCollector(sm.client) + err := pc.RetrieveOne(ctx, sm.Reference(), []string{"currentSession"}, &mgr) + if err != nil { + // It's OK if we can't retrieve properties because we're not authenticated + if f, ok := err.(types.HasFault); ok { + switch f.Fault().(type) { + case *types.NotAuthenticated: + return nil, nil + } + } + + return nil, err + } + + return mgr.CurrentSession, nil +} + +func (sm *Manager) TerminateSession(ctx context.Context, sessionId []string) error { + req := types.TerminateSession{ + This: sm.Reference(), + SessionId: sessionId, + } + + _, err := methods.TerminateSession(ctx, sm.client, &req) + return err +} + +// SessionIsActive checks whether the session that was created at login is +// still valid. This function only works against vCenter. +func (sm *Manager) SessionIsActive(ctx context.Context) (bool, error) { + if sm.userSession == nil { + return false, nil + } + + req := types.SessionIsActive{ + This: sm.Reference(), + SessionID: sm.userSession.Key, + UserName: sm.userSession.UserName, + } + + active, err := methods.SessionIsActive(ctx, sm.client, &req) + if err != nil { + return false, err + } + + return active.Returnval, err +} + +func (sm *Manager) AcquireGenericServiceTicket(ctx context.Context, spec types.BaseSessionManagerServiceRequestSpec) (*types.SessionManagerGenericServiceTicket, error) { + req := types.AcquireGenericServiceTicket{ + This: sm.Reference(), + Spec: spec, + } + + res, err := methods.AcquireGenericServiceTicket(ctx, sm.client, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/session/manager_test.go b/vendor/github.com/vmware/govmomi/session/manager_test.go new file mode 100644 index 000000000..7ad0092e6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/session/manager_test.go @@ -0,0 +1,106 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package session + +import ( + "net/url" + "testing" + + "github.com/vmware/govmomi/test" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/soap" + "golang.org/x/net/context" +) + +func sessionClient(u *url.URL, t *testing.T) *Manager { + soapClient := soap.NewClient(u, true) + vimClient, err := vim25.NewClient(context.Background(), soapClient) + if err != nil { + t.Fatal(err) + } + + return NewManager(vimClient) +} + +func TestLogin(t *testing.T) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + session := sessionClient(u, t) + err := session.Login(context.Background(), u.User) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } +} + +func TestLogout(t *testing.T) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + session := sessionClient(u, t) + err := session.Login(context.Background(), u.User) + if err != nil { + t.Error("Login Error: ", err) + } + + err = session.Logout(context.Background()) + if err != nil { + t.Errorf("Expected nil, got %v", err) + } + + err = session.Logout(context.Background()) + if err == nil { + t.Errorf("Expected NotAuthenticated, got nil") + } +} + +func TestSessionIsActive(t *testing.T) { + u := test.URL() + if u == nil { + t.SkipNow() + } + + session := sessionClient(u, t) + + // Skip test against ESXi -- SessionIsActive is not implemented + if session.client.ServiceContent.About.ApiType != "VirtualCenter" { + t.Skipf("Talking to %s instead of %s", session.client.ServiceContent.About.ApiType, "VirtualCenter") + } + + err := session.Login(context.Background(), u.User) + if err != nil { + t.Error("Login Error: ", err) + } + + active, err := session.SessionIsActive(context.Background()) + if err != nil || !active { + t.Errorf("Expected %t, got %t", true, active) + t.Errorf("Expected nil, got %v", err) + } + + session.Logout(context.Background()) + + active, err = session.SessionIsActive(context.Background()) + if err == nil || active { + t.Errorf("Expected %t, got %t", false, active) + t.Errorf("Expected NotAuthenticated, got %v", err) + } +} diff --git a/vendor/github.com/vmware/govmomi/task/error.go b/vendor/github.com/vmware/govmomi/task/error.go new file mode 100644 index 000000000..5f6b8503f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/task/error.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package task + +import "github.com/vmware/govmomi/vim25/types" + +type Error struct { + *types.LocalizedMethodFault +} + +// Error returns the task's localized fault message. +func (e Error) Error() string { + return e.LocalizedMethodFault.LocalizedMessage +} + +func (e Error) Fault() types.BaseMethodFault { + return e.LocalizedMethodFault.Fault +} diff --git a/vendor/github.com/vmware/govmomi/task/wait.go b/vendor/github.com/vmware/govmomi/task/wait.go new file mode 100644 index 000000000..9432ee734 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/task/wait.go @@ -0,0 +1,126 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package task + +import ( + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type taskProgress struct { + info *types.TaskInfo +} + +func (t taskProgress) Percentage() float32 { + return float32(t.info.Progress) +} + +func (t taskProgress) Detail() string { + return "" +} + +func (t taskProgress) Error() error { + if t.info.Error != nil { + return Error{t.info.Error} + } + + return nil +} + +type taskCallback struct { + ch chan<- progress.Report + info *types.TaskInfo + err error +} + +func (t *taskCallback) fn(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Name != "info" { + continue + } + + if c.Op != types.PropertyChangeOpAssign { + continue + } + + if c.Val == nil { + continue + } + + ti := c.Val.(types.TaskInfo) + t.info = &ti + } + + pr := taskProgress{t.info} + + // Store copy of error, so Wait() can return it as well. + t.err = pr.Error() + + switch t.info.State { + case types.TaskInfoStateQueued, types.TaskInfoStateRunning: + if t.ch != nil { + // Don't care if this is dropped + select { + case t.ch <- pr: + default: + } + } + return false + case types.TaskInfoStateSuccess, types.TaskInfoStateError: + if t.ch != nil { + // Last one must always be delivered + t.ch <- pr + } + return true + default: + panic("unknown state: " + t.info.State) + } +} + +// Wait waits for a task to finish with either success or failure. It does so +// by waiting for the "info" property of task managed object to change. The +// function returns when it finds the task in the "success" or "error" state. +// In the former case, the return value is nil. In the latter case the return +// value is an instance of this package's Error struct. +// +// Any error returned while waiting for property changes causes the function to +// return immediately and propagate the error. +// +// If the progress.Sinker argument is specified, any progress updates for the +// task are sent here. The completion percentage is passed through directly. +// The detail for the progress update is set to an empty string. If the task +// finishes in the error state, the error instance is passed through as well. +// Note that this error is the same error that is returned by this function. +// +func Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Collector, s progress.Sinker) (*types.TaskInfo, error) { + cb := &taskCallback{} + + // Include progress sink if specified + if s != nil { + cb.ch = s.Sink() + defer close(cb.ch) + } + + err := property.Wait(ctx, pc, ref, []string{"info"}, cb.fn) + if err != nil { + return nil, err + } + + return cb.info, cb.err +} diff --git a/vendor/github.com/vmware/govmomi/test/doc.go b/vendor/github.com/vmware/govmomi/test/doc.go new file mode 100644 index 000000000..4bcff639a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/test/doc.go @@ -0,0 +1,22 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package test contains functions that implement common functionality between +tests. The code (non-test) in this package intentionally does not take any +dependendies outside the vim25 tree. +*/ +package test diff --git a/vendor/github.com/vmware/govmomi/test/functional/helper.go b/vendor/github.com/vmware/govmomi/test/functional/helper.go new file mode 100644 index 000000000..1852200cf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/test/functional/helper.go @@ -0,0 +1,130 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package functional + +import ( + "testing" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/test" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type Helper struct { + *testing.T + + c *vim25.Client + f *find.Finder + fns []func() +} + +func NewHelper(t *testing.T) *Helper { + h := &Helper{ + T: t, + + c: test.NewAuthenticatedClient(t), + fns: make([]func(), 0), + } + + h.f = find.NewFinder(h.c, true) + + return h +} + +func (h *Helper) Defer(fn func()) { + h.fns = append(h.fns, fn) +} + +func (h *Helper) Teardown() { + for _, fn := range h.fns { + fn() + } +} + +func (h *Helper) RequireVirtualCenter() { + var expect = "VirtualCenter" + var actual = h.c.ServiceContent.About.ApiType + if actual != expect { + h.Skipf("Requires %s, running against %s", expect, actual) + } +} + +func (h *Helper) Datacenter() *object.Datacenter { + dc, err := h.f.DefaultDatacenter(context.Background()) + if err != nil { + h.Fatal(err) + } + + h.f.SetDatacenter(dc) + + return dc +} + +func (h *Helper) DatacenterFolders() *object.DatacenterFolders { + df, err := h.Datacenter().Folders(context.Background()) + if err != nil { + h.Fatal(err) + } + + return df +} + +func (h *Helper) ComputeResource() *object.ComputeResource { + cr, err := h.f.DefaultComputeResource(context.Background()) + if err != nil { + h.Fatal(err) + } + + return cr +} + +func (h *Helper) LocalDatastores(ctx context.Context, cr *object.ComputeResource) ([]*object.Datastore, error) { + // List datastores for compute resource + dss, err := cr.Datastores(ctx) + if err != nil { + return nil, err + } + + // Filter local datastores + var ldss []*object.Datastore + for _, ds := range dss { + var mds mo.Datastore + err = property.DefaultCollector(h.c).RetrieveOne(ctx, ds.Reference(), nil, &mds) + if err != nil { + return nil, err + } + + switch i := mds.Info.(type) { + case *types.VmfsDatastoreInfo: + if i.Vmfs.Local != nil && *i.Vmfs.Local == true { + break + } + default: + continue + } + + ds.InventoryPath = mds.Name + ldss = append(ldss, ds) + } + + return ldss, nil +} diff --git a/vendor/github.com/vmware/govmomi/test/functional/issue_242_test.go b/vendor/github.com/vmware/govmomi/test/functional/issue_242_test.go new file mode 100644 index 000000000..f1bba5285 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/test/functional/issue_242_test.go @@ -0,0 +1,104 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package functional + +import ( + "fmt" + "testing" + "time" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +func TestIssue242(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := NewHelper(t) + defer h.Teardown() + + h.RequireVirtualCenter() + + df, err := h.Datacenter().Folders(ctx) + if err != nil { + t.Fatal(err) + } + + cr := h.ComputeResource() + + // Get local datastores for compute resource + dss, err := h.LocalDatastores(ctx, cr) + if err != nil { + t.Fatal(err) + } + if len(dss) == 0 { + t.Fatalf("No local datastores") + } + + // Get root resource pool for compute resource + rp, err := cr.ResourcePool(ctx) + if err != nil { + t.Fatal(err) + } + + spec := types.VirtualMachineConfigSpec{ + Name: fmt.Sprintf("govmomi-test-%s", time.Now().Format(time.RFC3339)), + Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", dss[0].Name())}, + NumCPUs: 1, + MemoryMB: 32, + } + + // Create new VM + task, err := df.VmFolder.CreateVM(context.Background(), spec, rp, nil) + if err != nil { + t.Fatal(err) + } + + info, err := task.WaitForResult(context.Background(), nil) + if err != nil { + t.Fatal(err) + } + + vm := object.NewVirtualMachine(h.c, info.Result.(types.ManagedObjectReference)) + defer func() { + task, err := vm.Destroy(context.Background()) + if err != nil { + panic(err) + } + err = task.Wait(context.Background()) + if err != nil { + panic(err) + } + }() + + // Mark VM as template + err = vm.MarkAsTemplate(context.Background()) + if err != nil { + t.Fatal(err) + } + + // Get "environmentBrowser" property for VM template + var mvm mo.VirtualMachine + err = property.DefaultCollector(h.c).RetrieveOne(ctx, vm.Reference(), []string{"environmentBrowser"}, &mvm) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/vmware/govmomi/test/helper.go b/vendor/github.com/vmware/govmomi/test/helper.go new file mode 100644 index 000000000..f69db7500 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/test/helper.go @@ -0,0 +1,73 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "net/url" + "os" + "testing" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// URL parses the GOVMOMI_TEST_URL environment variable if set. +func URL() *url.URL { + s := os.Getenv("GOVMOMI_TEST_URL") + if s == "" { + return nil + } + u, err := soap.ParseURL(s) + if err != nil { + panic(err) + } + return u +} + +// NewAuthenticatedClient creates a new vim25.Client, authenticates the user +// specified in the test URL, and returns it. +func NewAuthenticatedClient(t *testing.T) *vim25.Client { + u := URL() + if u == nil { + t.SkipNow() + } + + soapClient := soap.NewClient(u, true) + vimClient, err := vim25.NewClient(context.Background(), soapClient) + if err != nil { + t.Fatal(err) + } + + req := types.Login{ + This: *vimClient.ServiceContent.SessionManager, + } + + req.UserName = u.User.Username() + if pw, ok := u.User.Password(); ok { + req.Password = pw + } + + _, err = methods.Login(context.Background(), vimClient, &req) + if err != nil { + t.Fatal(err) + } + + return vimClient +} diff --git a/vendor/github.com/vmware/govmomi/units/size.go b/vendor/github.com/vmware/govmomi/units/size.go new file mode 100644 index 000000000..48208364a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/units/size.go @@ -0,0 +1,89 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package units + +import ( + "errors" + "fmt" + "regexp" + "strconv" +) + +type ByteSize int64 + +const ( + _ = iota + KB = 1 << (10 * iota) + MB + GB + TB + PB + EB +) + +func (b ByteSize) String() string { + switch { + case b >= EB: + return fmt.Sprintf("%.1fEB", float32(b)/EB) + case b >= PB: + return fmt.Sprintf("%.1fPB", float32(b)/PB) + case b >= TB: + return fmt.Sprintf("%.1fTB", float32(b)/TB) + case b >= GB: + return fmt.Sprintf("%.1fGB", float32(b)/GB) + case b >= MB: + return fmt.Sprintf("%.1fMB", float32(b)/MB) + case b >= KB: + return fmt.Sprintf("%.1fKB", float32(b)/KB) + } + return fmt.Sprintf("%dB", b) +} + +var bytesRegexp = regexp.MustCompile(`^(?i)(\d+)([BKMGTPE]?)(ib|b)?$`) + +func (b *ByteSize) Set(s string) error { + m := bytesRegexp.FindStringSubmatch(s) + if len(m) == 0 { + return errors.New("invalid byte value") + } + + i, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return err + } + *b = ByteSize(i) + + switch m[2] { + case "B", "b", "": + case "K", "k": + *b *= ByteSize(KB) + case "M", "m": + *b *= ByteSize(MB) + case "G", "g": + *b *= ByteSize(GB) + case "T", "t": + *b *= ByteSize(TB) + case "P", "p": + *b *= ByteSize(PB) + case "E", "e": + *b *= ByteSize(EB) + default: + return errors.New("invalid byte suffix") + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/units/size_test.go b/vendor/github.com/vmware/govmomi/units/size_test.go new file mode 100644 index 000000000..14772887b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/units/size_test.go @@ -0,0 +1,145 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package units + +import ( + "fmt" + "math" + "testing" +) + +func TestMB(t *testing.T) { + b := ByteSize(1024 * 1024) + expected := "1.0MB" + if b.String() != expected { + t.Errorf("Expected '%v' but got '%v'", expected, b) + } +} + +func TestTenMB(t *testing.T) { + b := ByteSize(10 * 1024 * 1024) + actual := fmt.Sprintf("%s", b) + expected := "10.0MB" + if actual != expected { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +func assertEquals(t *testing.T, expected string, actual ByteSize) { + if expected != actual.String() { + t.Errorf("Expected '%v' but got '%v'", expected, actual.String()) + } +} + +func TestByteSize(t *testing.T) { + assertEquals(t, "1B", ByteSize(1)) + assertEquals(t, "10B", ByteSize(10)) + assertEquals(t, "100B", ByteSize(100)) + assertEquals(t, "1000B", ByteSize(1000)) + assertEquals(t, "1.0KB", ByteSize(1024)) + assertEquals(t, "1.0MB", ByteSize(1024*1024)) + assertEquals(t, "1.0MB", ByteSize(1048576)) + assertEquals(t, "10.0MB", ByteSize(10*math.Pow(1024, 2))) + assertEquals(t, "1.0GB", ByteSize(1024*1024*1024)) + assertEquals(t, "1.0TB", ByteSize(1024*1024*1024*1024)) + assertEquals(t, "1.0TB", ByteSize(1048576*1048576)) + assertEquals(t, "1.0PB", ByteSize(1024*1024*1024*1024*1024)) + assertEquals(t, "1.0EB", ByteSize(1024*1024*1024*1024*1024*1024)) + assertEquals(t, "1.0EB", ByteSize(1048576*1048576*1048576)) +} + +func TestByteSizeSet(t *testing.T) { + var tests = []struct { + In string + OutStr string + Out ByteSize + }{ + { + In: "345", + OutStr: "345B", + Out: 345.0, + }, + { + In: "345b", + OutStr: "345B", + Out: 345.0, + }, + { + In: "345K", + OutStr: "345.0KB", + Out: 345 * KB, + }, + { + In: "345kb", + OutStr: "345.0KB", + Out: 345 * KB, + }, + { + In: "345kib", + OutStr: "345.0KB", + Out: 345 * KB, + }, + { + In: "345KiB", + OutStr: "345.0KB", + Out: 345 * KB, + }, + { + In: "345M", + OutStr: "345.0MB", + Out: 345 * MB, + }, + { + In: "345G", + OutStr: "345.0GB", + Out: 345 * GB, + }, + { + In: "345T", + OutStr: "345.0TB", + Out: 345 * TB, + }, + { + In: "345P", + OutStr: "345.0PB", + Out: 345 * PB, + }, + { + In: "3E", + OutStr: "3.0EB", + Out: 3 * EB, + }, + } + + for _, test := range tests { + var v ByteSize + err := v.Set(test.In) + if err != nil { + t.Errorf("Error: %s [%v]", err, test.In) + continue + } + + if v != test.Out { + t.Errorf("Out: expect '%v' actual '%v'", test.Out, v) + continue + } + + if v.String() != test.OutStr { + t.Errorf("String: expect '%v' actual '%v'", test.OutStr, v.String()) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/client.go b/vendor/github.com/vmware/govmomi/vim25/client.go new file mode 100644 index 000000000..41cc45069 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/client.go @@ -0,0 +1,123 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vim25 + +import ( + "encoding/json" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// Client is a tiny wrapper around the vim25/soap Client that stores session +// specific state (i.e. state that only needs to be retrieved once after the +// client has been created). This means the client can be reused after +// serialization without performing additional requests for initialization. +type Client struct { + *soap.Client + + ServiceContent types.ServiceContent + + // RoundTripper is a separate field such that the client's implementation of + // the RoundTripper interface can be wrapped by separate implementations for + // extra functionality (for example, reauthentication on session timeout). + RoundTripper soap.RoundTripper +} + +// NewClient creates and returns a new client wirh the ServiceContent field +// filled in. +func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { + serviceContent, err := methods.GetServiceContent(ctx, rt) + if err != nil { + return nil, err + } + + c := Client{ + ServiceContent: serviceContent, + RoundTripper: rt, + } + + // Set client if it happens to be a soap.Client + if sc, ok := rt.(*soap.Client); ok { + c.Client = sc + } + + return &c, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) +} + +type marshaledClient struct { + SoapClient *soap.Client + ServiceContent types.ServiceContent +} + +func (c *Client) MarshalJSON() ([]byte, error) { + m := marshaledClient{ + SoapClient: c.Client, + ServiceContent: c.ServiceContent, + } + + return json.Marshal(m) +} + +func (c *Client) UnmarshalJSON(b []byte) error { + var m marshaledClient + + err := json.Unmarshal(b, &m) + if err != nil { + return err + } + + *c = Client{ + Client: m.SoapClient, + ServiceContent: m.ServiceContent, + RoundTripper: m.SoapClient, + } + + return nil +} + +// Valid returns whether or not the client is valid and ready for use. +// This should be called after unmarshalling the client. +func (c *Client) Valid() bool { + if c == nil { + return false + } + + if c.Client == nil { + return false + } + + // Use arbitrary pointer field in the service content. + // Doesn't matter which one, as long as it is populated by default. + if c.ServiceContent.SessionManager == nil { + return false + } + + return true +} + +// IsVC returns true if we are connected to a vCenter +func (c *Client) IsVC() bool { + return c.ServiceContent.About.ApiType == "VirtualCenter" +} diff --git a/vendor/github.com/vmware/govmomi/vim25/client_test.go b/vendor/github.com/vmware/govmomi/vim25/client_test.go new file mode 100644 index 000000000..05885c7f8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/client_test.go @@ -0,0 +1,97 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vim25 + +import ( + "encoding/json" + "net/url" + "os" + "testing" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// Duplicated to prevent cyclic dependency... +func testURL(t *testing.T) *url.URL { + s := os.Getenv("GOVMOMI_TEST_URL") + if s == "" { + t.SkipNow() + } + u, err := soap.ParseURL(s) + if err != nil { + panic(err) + } + return u +} + +func sessionLogin(t *testing.T, c *Client) { + req := types.Login{ + This: *c.ServiceContent.SessionManager, + } + + u := testURL(t).User + req.UserName = u.Username() + if pw, ok := u.Password(); ok { + req.Password = pw + } + + _, err := methods.Login(context.Background(), c, &req) + if err != nil { + t.Fatal(err) + } +} + +func sessionCheck(t *testing.T, c *Client) { + var mgr mo.SessionManager + + err := mo.RetrieveProperties(context.Background(), c, c.ServiceContent.PropertyCollector, *c.ServiceContent.SessionManager, &mgr) + if err != nil { + t.Fatal(err) + } +} + +func TestClientSerialization(t *testing.T) { + var c1, c2 *Client + + soapClient := soap.NewClient(testURL(t), true) + c1, err := NewClient(context.Background(), soapClient) + if err != nil { + t.Fatal(err) + } + + // Login + sessionLogin(t, c1) + sessionCheck(t, c1) + + // Serialize/deserialize + b, err := json.Marshal(c1) + if err != nil { + t.Fatal(err) + } + c2 = &Client{} + err = json.Unmarshal(b, c2) + if err != nil { + t.Fatal(err) + } + + // Check the session is still valid + sessionCheck(t, c2) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/debug/debug.go b/vendor/github.com/vmware/govmomi/vim25/debug/debug.go new file mode 100644 index 000000000..22d547178 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/debug/debug.go @@ -0,0 +1,81 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "io" + "os" + "path" +) + +// Provider specified the interface types must implement to be used as a +// debugging sink. Having multiple such sink implementations allows it to be +// changed externally (for example when running tests). +type Provider interface { + NewFile(s string) io.WriteCloser + Flush() +} + +var currentProvider Provider = nil + +func SetProvider(p Provider) { + if currentProvider != nil { + currentProvider.Flush() + } + currentProvider = p +} + +// Enabled returns whether debugging is enabled or not. +func Enabled() bool { + return currentProvider != nil +} + +// NewFile dispatches to the current provider's NewFile function. +func NewFile(s string) io.WriteCloser { + return currentProvider.NewFile(s) +} + +// Flush dispatches to the current provider's Flush function. +func Flush() { + currentProvider.Flush() +} + +// FileProvider implements a debugging provider that creates a real file for +// every call to NewFile. It maintains a list of all files that it creates, +// such that it can close them when its Flush function is called. +type FileProvider struct { + Path string + + files []*os.File +} + +func (fp *FileProvider) NewFile(p string) io.WriteCloser { + f, err := os.Create(path.Join(fp.Path, p)) + if err != nil { + panic(err) + } + + fp.files = append(fp.files, f) + + return f +} + +func (fp *FileProvider) Flush() { + for _, f := range fp.files { + f.Close() + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/doc.go b/vendor/github.com/vmware/govmomi/vim25/doc.go new file mode 100644 index 000000000..acb2c9f64 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/doc.go @@ -0,0 +1,29 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package vim25 provides a minimal client implementation to use with other +packages in the vim25 tree. The code in this package intentionally does not +take any dependendies outside the vim25 tree. + +The client implementation in this package embeds the soap.Client structure. +Additionally, it stores the value of the session's ServiceContent object. This +object stores references to a variety of subsystems, such as the root property +collector, the session manager, and the search index. The client is fully +functional after serialization and deserialization, without the need for +additional requests for initialization. +*/ +package vim25 diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/fault_test.go b/vendor/github.com/vmware/govmomi/vim25/methods/fault_test.go new file mode 100644 index 000000000..78bcaae81 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/methods/fault_test.go @@ -0,0 +1,62 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package methods + +import ( + "bytes" + "testing" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" +) + +var invalidLoginFault = ` + + + +ServerFaultCodeCannot complete login due to an incorrect user name or password. + +` + +type TestBody struct { + Fault *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func TestFaultDetail(t *testing.T) { + body := TestBody{} + env := soap.Envelope{Body: &body} + + dec := xml.NewDecoder(bytes.NewReader([]byte(invalidLoginFault))) + dec.TypeFunc = types.TypeFunc() + + err := dec.Decode(&env) + if err != nil { + t.Fatalf("Decode: %s", err) + } + + if body.Fault == nil { + t.Fatal("Expected fault") + } + + if _, ok := body.Fault.Detail.Fault.(types.InvalidLogin); !ok { + t.Fatalf("Expected InvalidLogin, got: %#v", body.Fault.Detail.Fault) + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/internal.go b/vendor/github.com/vmware/govmomi/vim25/methods/internal.go new file mode 100644 index 000000000..e2fb2a22b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/methods/internal.go @@ -0,0 +1,123 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package methods + +import ( + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type RetrieveDynamicTypeManagerBody struct { + Req *types.RetrieveDynamicTypeManager `xml:"urn:vim25 RetrieveDynamicTypeManager"` + Res *types.RetrieveDynamicTypeManagerResponse `xml:"urn:vim25 RetrieveDynamicTypeManagerResponse"` + Fault_ *soap.Fault +} + +func (b *RetrieveDynamicTypeManagerBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDynamicTypeManager(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDynamicTypeManager) (*types.RetrieveDynamicTypeManagerResponse, error) { + var reqBody, resBody RetrieveDynamicTypeManagerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveManagedMethodExecuterBody struct { + Req *types.RetrieveManagedMethodExecuter `xml:"urn:vim25 RetrieveManagedMethodExecuter"` + Res *types.RetrieveManagedMethodExecuterResponse `xml:"urn:vim25 RetrieveManagedMethodExecuterResponse"` + Fault_ *soap.Fault +} + +func (b *RetrieveManagedMethodExecuterBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveManagedMethodExecuter(ctx context.Context, r soap.RoundTripper, req *types.RetrieveManagedMethodExecuter) (*types.RetrieveManagedMethodExecuterResponse, error) { + var reqBody, resBody RetrieveManagedMethodExecuterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DynamicTypeMgrQueryMoInstancesBody struct { + Req *types.DynamicTypeMgrQueryMoInstances `xml:"urn:vim25 DynamicTypeMgrQueryMoInstances"` + Res *types.DynamicTypeMgrQueryMoInstancesResponse `xml:"urn:vim25 DynamicTypeMgrQueryMoInstancesResponse"` + Fault_ *soap.Fault +} + +func (b *DynamicTypeMgrQueryMoInstancesBody) Fault() *soap.Fault { return b.Fault_ } + +func DynamicTypeMgrQueryMoInstances(ctx context.Context, r soap.RoundTripper, req *types.DynamicTypeMgrQueryMoInstances) (*types.DynamicTypeMgrQueryMoInstancesResponse, error) { + var reqBody, resBody DynamicTypeMgrQueryMoInstancesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DynamicTypeMgrQueryTypeInfoBody struct { + Req *types.DynamicTypeMgrQueryTypeInfo `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfo"` + Res *types.DynamicTypeMgrQueryTypeInfoResponse `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfoResponse"` + Fault_ *soap.Fault +} + +func (b *DynamicTypeMgrQueryTypeInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func DynamicTypeMgrQueryTypeInfo(ctx context.Context, r soap.RoundTripper, req *types.DynamicTypeMgrQueryTypeInfo) (*types.DynamicTypeMgrQueryTypeInfoResponse, error) { + var reqBody, resBody DynamicTypeMgrQueryTypeInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExecuteSoapBody struct { + Req *types.ExecuteSoap `xml:"urn:vim25 ExecuteSoap"` + Res *types.ExecuteSoapResponse `xml:"urn:vim25 ExecuteSoapResponse"` + Fault_ *soap.Fault +} + +func (b *ExecuteSoapBody) Fault() *soap.Fault { return b.Fault_ } + +func ExecuteSoap(ctx context.Context, r soap.RoundTripper, req *types.ExecuteSoap) (*types.ExecuteSoapResponse, error) { + var reqBody, resBody ExecuteSoapBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/methods.go b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go new file mode 100644 index 000000000..8356883bf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go @@ -0,0 +1,14343 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package methods + +import ( + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +type AbdicateDomOwnershipBody struct { + Req *types.AbdicateDomOwnership `xml:"urn:vim25 AbdicateDomOwnership,omitempty"` + Res *types.AbdicateDomOwnershipResponse `xml:"urn:vim25 AbdicateDomOwnershipResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AbdicateDomOwnershipBody) Fault() *soap.Fault { return b.Fault_ } + +func AbdicateDomOwnership(ctx context.Context, r soap.RoundTripper, req *types.AbdicateDomOwnership) (*types.AbdicateDomOwnershipResponse, error) { + var reqBody, resBody AbdicateDomOwnershipBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcknowledgeAlarmBody struct { + Req *types.AcknowledgeAlarm `xml:"urn:vim25 AcknowledgeAlarm,omitempty"` + Res *types.AcknowledgeAlarmResponse `xml:"urn:vim25 AcknowledgeAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcknowledgeAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func AcknowledgeAlarm(ctx context.Context, r soap.RoundTripper, req *types.AcknowledgeAlarm) (*types.AcknowledgeAlarmResponse, error) { + var reqBody, resBody AcknowledgeAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireCimServicesTicketBody struct { + Req *types.AcquireCimServicesTicket `xml:"urn:vim25 AcquireCimServicesTicket,omitempty"` + Res *types.AcquireCimServicesTicketResponse `xml:"urn:vim25 AcquireCimServicesTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireCimServicesTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireCimServicesTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCimServicesTicket) (*types.AcquireCimServicesTicketResponse, error) { + var reqBody, resBody AcquireCimServicesTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireCloneTicketBody struct { + Req *types.AcquireCloneTicket `xml:"urn:vim25 AcquireCloneTicket,omitempty"` + Res *types.AcquireCloneTicketResponse `xml:"urn:vim25 AcquireCloneTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireCloneTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireCloneTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCloneTicket) (*types.AcquireCloneTicketResponse, error) { + var reqBody, resBody AcquireCloneTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireCredentialsInGuestBody struct { + Req *types.AcquireCredentialsInGuest `xml:"urn:vim25 AcquireCredentialsInGuest,omitempty"` + Res *types.AcquireCredentialsInGuestResponse `xml:"urn:vim25 AcquireCredentialsInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.AcquireCredentialsInGuest) (*types.AcquireCredentialsInGuestResponse, error) { + var reqBody, resBody AcquireCredentialsInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireGenericServiceTicketBody struct { + Req *types.AcquireGenericServiceTicket `xml:"urn:vim25 AcquireGenericServiceTicket,omitempty"` + Res *types.AcquireGenericServiceTicketResponse `xml:"urn:vim25 AcquireGenericServiceTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireGenericServiceTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireGenericServiceTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireGenericServiceTicket) (*types.AcquireGenericServiceTicketResponse, error) { + var reqBody, resBody AcquireGenericServiceTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireLocalTicketBody struct { + Req *types.AcquireLocalTicket `xml:"urn:vim25 AcquireLocalTicket,omitempty"` + Res *types.AcquireLocalTicketResponse `xml:"urn:vim25 AcquireLocalTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireLocalTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireLocalTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireLocalTicket) (*types.AcquireLocalTicketResponse, error) { + var reqBody, resBody AcquireLocalTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireMksTicketBody struct { + Req *types.AcquireMksTicket `xml:"urn:vim25 AcquireMksTicket,omitempty"` + Res *types.AcquireMksTicketResponse `xml:"urn:vim25 AcquireMksTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireMksTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireMksTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireMksTicket) (*types.AcquireMksTicketResponse, error) { + var reqBody, resBody AcquireMksTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireTicketBody struct { + Req *types.AcquireTicket `xml:"urn:vim25 AcquireTicket,omitempty"` + Res *types.AcquireTicketResponse `xml:"urn:vim25 AcquireTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireTicket) (*types.AcquireTicketResponse, error) { + var reqBody, resBody AcquireTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddAuthorizationRoleBody struct { + Req *types.AddAuthorizationRole `xml:"urn:vim25 AddAuthorizationRole,omitempty"` + Res *types.AddAuthorizationRoleResponse `xml:"urn:vim25 AddAuthorizationRoleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ } + +func AddAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.AddAuthorizationRole) (*types.AddAuthorizationRoleResponse, error) { + var reqBody, resBody AddAuthorizationRoleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddCustomFieldDefBody struct { + Req *types.AddCustomFieldDef `xml:"urn:vim25 AddCustomFieldDef,omitempty"` + Res *types.AddCustomFieldDefResponse `xml:"urn:vim25 AddCustomFieldDefResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ } + +func AddCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.AddCustomFieldDef) (*types.AddCustomFieldDefResponse, error) { + var reqBody, resBody AddCustomFieldDefBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddDVPortgroup_TaskBody struct { + Req *types.AddDVPortgroup_Task `xml:"urn:vim25 AddDVPortgroup_Task,omitempty"` + Res *types.AddDVPortgroup_TaskResponse `xml:"urn:vim25 AddDVPortgroup_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDVPortgroup_Task) (*types.AddDVPortgroup_TaskResponse, error) { + var reqBody, resBody AddDVPortgroup_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddDisks_TaskBody struct { + Req *types.AddDisks_Task `xml:"urn:vim25 AddDisks_Task,omitempty"` + Res *types.AddDisks_TaskResponse `xml:"urn:vim25 AddDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDisks_Task) (*types.AddDisks_TaskResponse, error) { + var reqBody, resBody AddDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddGuestAliasBody struct { + Req *types.AddGuestAlias `xml:"urn:vim25 AddGuestAlias,omitempty"` + Res *types.AddGuestAliasResponse `xml:"urn:vim25 AddGuestAliasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddGuestAliasBody) Fault() *soap.Fault { return b.Fault_ } + +func AddGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.AddGuestAlias) (*types.AddGuestAliasResponse, error) { + var reqBody, resBody AddGuestAliasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddHost_TaskBody struct { + Req *types.AddHost_Task `xml:"urn:vim25 AddHost_Task,omitempty"` + Res *types.AddHost_TaskResponse `xml:"urn:vim25 AddHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddHost_Task) (*types.AddHost_TaskResponse, error) { + var reqBody, resBody AddHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddInternetScsiSendTargetsBody struct { + Req *types.AddInternetScsiSendTargets `xml:"urn:vim25 AddInternetScsiSendTargets,omitempty"` + Res *types.AddInternetScsiSendTargetsResponse `xml:"urn:vim25 AddInternetScsiSendTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddInternetScsiSendTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func AddInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiSendTargets) (*types.AddInternetScsiSendTargetsResponse, error) { + var reqBody, resBody AddInternetScsiSendTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddInternetScsiStaticTargetsBody struct { + Req *types.AddInternetScsiStaticTargets `xml:"urn:vim25 AddInternetScsiStaticTargets,omitempty"` + Res *types.AddInternetScsiStaticTargetsResponse `xml:"urn:vim25 AddInternetScsiStaticTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddInternetScsiStaticTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func AddInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiStaticTargets) (*types.AddInternetScsiStaticTargetsResponse, error) { + var reqBody, resBody AddInternetScsiStaticTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddLicenseBody struct { + Req *types.AddLicense `xml:"urn:vim25 AddLicense,omitempty"` + Res *types.AddLicenseResponse `xml:"urn:vim25 AddLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func AddLicense(ctx context.Context, r soap.RoundTripper, req *types.AddLicense) (*types.AddLicenseResponse, error) { + var reqBody, resBody AddLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddNetworkResourcePoolBody struct { + Req *types.AddNetworkResourcePool `xml:"urn:vim25 AddNetworkResourcePool,omitempty"` + Res *types.AddNetworkResourcePoolResponse `xml:"urn:vim25 AddNetworkResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func AddNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.AddNetworkResourcePool) (*types.AddNetworkResourcePoolResponse, error) { + var reqBody, resBody AddNetworkResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddPortGroupBody struct { + Req *types.AddPortGroup `xml:"urn:vim25 AddPortGroup,omitempty"` + Res *types.AddPortGroupResponse `xml:"urn:vim25 AddPortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddPortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func AddPortGroup(ctx context.Context, r soap.RoundTripper, req *types.AddPortGroup) (*types.AddPortGroupResponse, error) { + var reqBody, resBody AddPortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddServiceConsoleVirtualNicBody struct { + Req *types.AddServiceConsoleVirtualNic `xml:"urn:vim25 AddServiceConsoleVirtualNic,omitempty"` + Res *types.AddServiceConsoleVirtualNicResponse `xml:"urn:vim25 AddServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func AddServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddServiceConsoleVirtualNic) (*types.AddServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody AddServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddStandaloneHost_TaskBody struct { + Req *types.AddStandaloneHost_Task `xml:"urn:vim25 AddStandaloneHost_Task,omitempty"` + Res *types.AddStandaloneHost_TaskResponse `xml:"urn:vim25 AddStandaloneHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddStandaloneHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddStandaloneHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddStandaloneHost_Task) (*types.AddStandaloneHost_TaskResponse, error) { + var reqBody, resBody AddStandaloneHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddVirtualNicBody struct { + Req *types.AddVirtualNic `xml:"urn:vim25 AddVirtualNic,omitempty"` + Res *types.AddVirtualNicResponse `xml:"urn:vim25 AddVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func AddVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualNic) (*types.AddVirtualNicResponse, error) { + var reqBody, resBody AddVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddVirtualSwitchBody struct { + Req *types.AddVirtualSwitch `xml:"urn:vim25 AddVirtualSwitch,omitempty"` + Res *types.AddVirtualSwitchResponse `xml:"urn:vim25 AddVirtualSwitchResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ } + +func AddVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualSwitch) (*types.AddVirtualSwitchResponse, error) { + var reqBody, resBody AddVirtualSwitchBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AllocateIpv4AddressBody struct { + Req *types.AllocateIpv4Address `xml:"urn:vim25 AllocateIpv4Address,omitempty"` + Res *types.AllocateIpv4AddressResponse `xml:"urn:vim25 AllocateIpv4AddressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AllocateIpv4AddressBody) Fault() *soap.Fault { return b.Fault_ } + +func AllocateIpv4Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv4Address) (*types.AllocateIpv4AddressResponse, error) { + var reqBody, resBody AllocateIpv4AddressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AllocateIpv6AddressBody struct { + Req *types.AllocateIpv6Address `xml:"urn:vim25 AllocateIpv6Address,omitempty"` + Res *types.AllocateIpv6AddressResponse `xml:"urn:vim25 AllocateIpv6AddressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AllocateIpv6AddressBody) Fault() *soap.Fault { return b.Fault_ } + +func AllocateIpv6Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv6Address) (*types.AllocateIpv6AddressResponse, error) { + var reqBody, resBody AllocateIpv6AddressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AnswerVMBody struct { + Req *types.AnswerVM `xml:"urn:vim25 AnswerVM,omitempty"` + Res *types.AnswerVMResponse `xml:"urn:vim25 AnswerVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AnswerVMBody) Fault() *soap.Fault { return b.Fault_ } + +func AnswerVM(ctx context.Context, r soap.RoundTripper, req *types.AnswerVM) (*types.AnswerVMResponse, error) { + var reqBody, resBody AnswerVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyHostConfig_TaskBody struct { + Req *types.ApplyHostConfig_Task `xml:"urn:vim25 ApplyHostConfig_Task,omitempty"` + Res *types.ApplyHostConfig_TaskResponse `xml:"urn:vim25 ApplyHostConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyHostConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyHostConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyHostConfig_Task) (*types.ApplyHostConfig_TaskResponse, error) { + var reqBody, resBody ApplyHostConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyRecommendationBody struct { + Req *types.ApplyRecommendation `xml:"urn:vim25 ApplyRecommendation,omitempty"` + Res *types.ApplyRecommendationResponse `xml:"urn:vim25 ApplyRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyRecommendation(ctx context.Context, r soap.RoundTripper, req *types.ApplyRecommendation) (*types.ApplyRecommendationResponse, error) { + var reqBody, resBody ApplyRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyStorageDrsRecommendationToPod_TaskBody struct { + Req *types.ApplyStorageDrsRecommendationToPod_Task `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_Task,omitempty"` + Res *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyStorageDrsRecommendationToPod_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyStorageDrsRecommendationToPod_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendationToPod_Task) (*types.ApplyStorageDrsRecommendationToPod_TaskResponse, error) { + var reqBody, resBody ApplyStorageDrsRecommendationToPod_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyStorageDrsRecommendation_TaskBody struct { + Req *types.ApplyStorageDrsRecommendation_Task `xml:"urn:vim25 ApplyStorageDrsRecommendation_Task,omitempty"` + Res *types.ApplyStorageDrsRecommendation_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendation_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyStorageDrsRecommendation_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyStorageDrsRecommendation_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendation_Task) (*types.ApplyStorageDrsRecommendation_TaskResponse, error) { + var reqBody, resBody ApplyStorageDrsRecommendation_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AreAlarmActionsEnabledBody struct { + Req *types.AreAlarmActionsEnabled `xml:"urn:vim25 AreAlarmActionsEnabled,omitempty"` + Res *types.AreAlarmActionsEnabledResponse `xml:"urn:vim25 AreAlarmActionsEnabledResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AreAlarmActionsEnabledBody) Fault() *soap.Fault { return b.Fault_ } + +func AreAlarmActionsEnabled(ctx context.Context, r soap.RoundTripper, req *types.AreAlarmActionsEnabled) (*types.AreAlarmActionsEnabledResponse, error) { + var reqBody, resBody AreAlarmActionsEnabledBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AssignUserToGroupBody struct { + Req *types.AssignUserToGroup `xml:"urn:vim25 AssignUserToGroup,omitempty"` + Res *types.AssignUserToGroupResponse `xml:"urn:vim25 AssignUserToGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AssignUserToGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func AssignUserToGroup(ctx context.Context, r soap.RoundTripper, req *types.AssignUserToGroup) (*types.AssignUserToGroupResponse, error) { + var reqBody, resBody AssignUserToGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AssociateProfileBody struct { + Req *types.AssociateProfile `xml:"urn:vim25 AssociateProfile,omitempty"` + Res *types.AssociateProfileResponse `xml:"urn:vim25 AssociateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AssociateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func AssociateProfile(ctx context.Context, r soap.RoundTripper, req *types.AssociateProfile) (*types.AssociateProfileResponse, error) { + var reqBody, resBody AssociateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachScsiLunBody struct { + Req *types.AttachScsiLun `xml:"urn:vim25 AttachScsiLun,omitempty"` + Res *types.AttachScsiLunResponse `xml:"urn:vim25 AttachScsiLunResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachScsiLunBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.AttachScsiLun) (*types.AttachScsiLunResponse, error) { + var reqBody, resBody AttachScsiLunBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachScsiLunEx_TaskBody struct { + Req *types.AttachScsiLunEx_Task `xml:"urn:vim25 AttachScsiLunEx_Task,omitempty"` + Res *types.AttachScsiLunEx_TaskResponse `xml:"urn:vim25 AttachScsiLunEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachScsiLunEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.AttachScsiLunEx_Task) (*types.AttachScsiLunEx_TaskResponse, error) { + var reqBody, resBody AttachScsiLunEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachVmfsExtentBody struct { + Req *types.AttachVmfsExtent `xml:"urn:vim25 AttachVmfsExtent,omitempty"` + Res *types.AttachVmfsExtentResponse `xml:"urn:vim25 AttachVmfsExtentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachVmfsExtentBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.AttachVmfsExtent) (*types.AttachVmfsExtentResponse, error) { + var reqBody, resBody AttachVmfsExtentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AutoStartPowerOffBody struct { + Req *types.AutoStartPowerOff `xml:"urn:vim25 AutoStartPowerOff,omitempty"` + Res *types.AutoStartPowerOffResponse `xml:"urn:vim25 AutoStartPowerOffResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AutoStartPowerOffBody) Fault() *soap.Fault { return b.Fault_ } + +func AutoStartPowerOff(ctx context.Context, r soap.RoundTripper, req *types.AutoStartPowerOff) (*types.AutoStartPowerOffResponse, error) { + var reqBody, resBody AutoStartPowerOffBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AutoStartPowerOnBody struct { + Req *types.AutoStartPowerOn `xml:"urn:vim25 AutoStartPowerOn,omitempty"` + Res *types.AutoStartPowerOnResponse `xml:"urn:vim25 AutoStartPowerOnResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AutoStartPowerOnBody) Fault() *soap.Fault { return b.Fault_ } + +func AutoStartPowerOn(ctx context.Context, r soap.RoundTripper, req *types.AutoStartPowerOn) (*types.AutoStartPowerOnResponse, error) { + var reqBody, resBody AutoStartPowerOnBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BackupFirmwareConfigurationBody struct { + Req *types.BackupFirmwareConfiguration `xml:"urn:vim25 BackupFirmwareConfiguration,omitempty"` + Res *types.BackupFirmwareConfigurationResponse `xml:"urn:vim25 BackupFirmwareConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BackupFirmwareConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func BackupFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req *types.BackupFirmwareConfiguration) (*types.BackupFirmwareConfigurationResponse, error) { + var reqBody, resBody BackupFirmwareConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BindVnicBody struct { + Req *types.BindVnic `xml:"urn:vim25 BindVnic,omitempty"` + Res *types.BindVnicResponse `xml:"urn:vim25 BindVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BindVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func BindVnic(ctx context.Context, r soap.RoundTripper, req *types.BindVnic) (*types.BindVnicResponse, error) { + var reqBody, resBody BindVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BrowseDiagnosticLogBody struct { + Req *types.BrowseDiagnosticLog `xml:"urn:vim25 BrowseDiagnosticLog,omitempty"` + Res *types.BrowseDiagnosticLogResponse `xml:"urn:vim25 BrowseDiagnosticLogResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BrowseDiagnosticLogBody) Fault() *soap.Fault { return b.Fault_ } + +func BrowseDiagnosticLog(ctx context.Context, r soap.RoundTripper, req *types.BrowseDiagnosticLog) (*types.BrowseDiagnosticLogResponse, error) { + var reqBody, resBody BrowseDiagnosticLogBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CanProvisionObjectsBody struct { + Req *types.CanProvisionObjects `xml:"urn:vim25 CanProvisionObjects,omitempty"` + Res *types.CanProvisionObjectsResponse `xml:"urn:vim25 CanProvisionObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CanProvisionObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func CanProvisionObjects(ctx context.Context, r soap.RoundTripper, req *types.CanProvisionObjects) (*types.CanProvisionObjectsResponse, error) { + var reqBody, resBody CanProvisionObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelRecommendationBody struct { + Req *types.CancelRecommendation `xml:"urn:vim25 CancelRecommendation,omitempty"` + Res *types.CancelRecommendationResponse `xml:"urn:vim25 CancelRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelRecommendation(ctx context.Context, r soap.RoundTripper, req *types.CancelRecommendation) (*types.CancelRecommendationResponse, error) { + var reqBody, resBody CancelRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelRetrievePropertiesExBody struct { + Req *types.CancelRetrievePropertiesEx `xml:"urn:vim25 CancelRetrievePropertiesEx,omitempty"` + Res *types.CancelRetrievePropertiesExResponse `xml:"urn:vim25 CancelRetrievePropertiesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelRetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.CancelRetrievePropertiesEx) (*types.CancelRetrievePropertiesExResponse, error) { + var reqBody, resBody CancelRetrievePropertiesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelStorageDrsRecommendationBody struct { + Req *types.CancelStorageDrsRecommendation `xml:"urn:vim25 CancelStorageDrsRecommendation,omitempty"` + Res *types.CancelStorageDrsRecommendationResponse `xml:"urn:vim25 CancelStorageDrsRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelStorageDrsRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, req *types.CancelStorageDrsRecommendation) (*types.CancelStorageDrsRecommendationResponse, error) { + var reqBody, resBody CancelStorageDrsRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelTaskBody struct { + Req *types.CancelTask `xml:"urn:vim25 CancelTask,omitempty"` + Res *types.CancelTaskResponse `xml:"urn:vim25 CancelTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelTask(ctx context.Context, r soap.RoundTripper, req *types.CancelTask) (*types.CancelTaskResponse, error) { + var reqBody, resBody CancelTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelWaitForUpdatesBody struct { + Req *types.CancelWaitForUpdates `xml:"urn:vim25 CancelWaitForUpdates,omitempty"` + Res *types.CancelWaitForUpdatesResponse `xml:"urn:vim25 CancelWaitForUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelWaitForUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelWaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CancelWaitForUpdates) (*types.CancelWaitForUpdatesResponse, error) { + var reqBody, resBody CancelWaitForUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CertMgrRefreshCACertificatesAndCRLs_TaskBody struct { + Req *types.CertMgrRefreshCACertificatesAndCRLs_Task `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_Task,omitempty"` + Res *types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CertMgrRefreshCACertificatesAndCRLs_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CertMgrRefreshCACertificatesAndCRLs_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRefreshCACertificatesAndCRLs_Task) (*types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse, error) { + var reqBody, resBody CertMgrRefreshCACertificatesAndCRLs_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CertMgrRefreshCertificates_TaskBody struct { + Req *types.CertMgrRefreshCertificates_Task `xml:"urn:vim25 CertMgrRefreshCertificates_Task,omitempty"` + Res *types.CertMgrRefreshCertificates_TaskResponse `xml:"urn:vim25 CertMgrRefreshCertificates_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CertMgrRefreshCertificates_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CertMgrRefreshCertificates_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRefreshCertificates_Task) (*types.CertMgrRefreshCertificates_TaskResponse, error) { + var reqBody, resBody CertMgrRefreshCertificates_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CertMgrRevokeCertificates_TaskBody struct { + Req *types.CertMgrRevokeCertificates_Task `xml:"urn:vim25 CertMgrRevokeCertificates_Task,omitempty"` + Res *types.CertMgrRevokeCertificates_TaskResponse `xml:"urn:vim25 CertMgrRevokeCertificates_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CertMgrRevokeCertificates_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CertMgrRevokeCertificates_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRevokeCertificates_Task) (*types.CertMgrRevokeCertificates_TaskResponse, error) { + var reqBody, resBody CertMgrRevokeCertificates_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeAccessModeBody struct { + Req *types.ChangeAccessMode `xml:"urn:vim25 ChangeAccessMode,omitempty"` + Res *types.ChangeAccessModeResponse `xml:"urn:vim25 ChangeAccessModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeAccessModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeAccessMode(ctx context.Context, r soap.RoundTripper, req *types.ChangeAccessMode) (*types.ChangeAccessModeResponse, error) { + var reqBody, resBody ChangeAccessModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeFileAttributesInGuestBody struct { + Req *types.ChangeFileAttributesInGuest `xml:"urn:vim25 ChangeFileAttributesInGuest,omitempty"` + Res *types.ChangeFileAttributesInGuestResponse `xml:"urn:vim25 ChangeFileAttributesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeFileAttributesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeFileAttributesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ChangeFileAttributesInGuest) (*types.ChangeFileAttributesInGuestResponse, error) { + var reqBody, resBody ChangeFileAttributesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeLockdownModeBody struct { + Req *types.ChangeLockdownMode `xml:"urn:vim25 ChangeLockdownMode,omitempty"` + Res *types.ChangeLockdownModeResponse `xml:"urn:vim25 ChangeLockdownModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeLockdownModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ChangeLockdownMode) (*types.ChangeLockdownModeResponse, error) { + var reqBody, resBody ChangeLockdownModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeNFSUserPasswordBody struct { + Req *types.ChangeNFSUserPassword `xml:"urn:vim25 ChangeNFSUserPassword,omitempty"` + Res *types.ChangeNFSUserPasswordResponse `xml:"urn:vim25 ChangeNFSUserPasswordResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeNFSUserPasswordBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeNFSUserPassword(ctx context.Context, r soap.RoundTripper, req *types.ChangeNFSUserPassword) (*types.ChangeNFSUserPasswordResponse, error) { + var reqBody, resBody ChangeNFSUserPasswordBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeOwnerBody struct { + Req *types.ChangeOwner `xml:"urn:vim25 ChangeOwner,omitempty"` + Res *types.ChangeOwnerResponse `xml:"urn:vim25 ChangeOwnerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeOwnerBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeOwner(ctx context.Context, r soap.RoundTripper, req *types.ChangeOwner) (*types.ChangeOwnerResponse, error) { + var reqBody, resBody ChangeOwnerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckAddHostEvc_TaskBody struct { + Req *types.CheckAddHostEvc_Task `xml:"urn:vim25 CheckAddHostEvc_Task,omitempty"` + Res *types.CheckAddHostEvc_TaskResponse `xml:"urn:vim25 CheckAddHostEvc_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckAddHostEvc_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckAddHostEvc_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckAddHostEvc_Task) (*types.CheckAddHostEvc_TaskResponse, error) { + var reqBody, resBody CheckAddHostEvc_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckAnswerFileStatus_TaskBody struct { + Req *types.CheckAnswerFileStatus_Task `xml:"urn:vim25 CheckAnswerFileStatus_Task,omitempty"` + Res *types.CheckAnswerFileStatus_TaskResponse `xml:"urn:vim25 CheckAnswerFileStatus_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckAnswerFileStatus_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckAnswerFileStatus_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckAnswerFileStatus_Task) (*types.CheckAnswerFileStatus_TaskResponse, error) { + var reqBody, resBody CheckAnswerFileStatus_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCompatibility_TaskBody struct { + Req *types.CheckCompatibility_Task `xml:"urn:vim25 CheckCompatibility_Task,omitempty"` + Res *types.CheckCompatibility_TaskResponse `xml:"urn:vim25 CheckCompatibility_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCompatibility_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCompatibility_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckCompatibility_Task) (*types.CheckCompatibility_TaskResponse, error) { + var reqBody, resBody CheckCompatibility_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCompliance_TaskBody struct { + Req *types.CheckCompliance_Task `xml:"urn:vim25 CheckCompliance_Task,omitempty"` + Res *types.CheckCompliance_TaskResponse `xml:"urn:vim25 CheckCompliance_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCompliance_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckCompliance_Task) (*types.CheckCompliance_TaskResponse, error) { + var reqBody, resBody CheckCompliance_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckConfigureEvcMode_TaskBody struct { + Req *types.CheckConfigureEvcMode_Task `xml:"urn:vim25 CheckConfigureEvcMode_Task,omitempty"` + Res *types.CheckConfigureEvcMode_TaskResponse `xml:"urn:vim25 CheckConfigureEvcMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckConfigureEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckConfigureEvcMode_Task) (*types.CheckConfigureEvcMode_TaskResponse, error) { + var reqBody, resBody CheckConfigureEvcMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCustomizationResourcesBody struct { + Req *types.CheckCustomizationResources `xml:"urn:vim25 CheckCustomizationResources,omitempty"` + Res *types.CheckCustomizationResourcesResponse `xml:"urn:vim25 CheckCustomizationResourcesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCustomizationResourcesBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCustomizationResources(ctx context.Context, r soap.RoundTripper, req *types.CheckCustomizationResources) (*types.CheckCustomizationResourcesResponse, error) { + var reqBody, resBody CheckCustomizationResourcesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCustomizationSpecBody struct { + Req *types.CheckCustomizationSpec `xml:"urn:vim25 CheckCustomizationSpec,omitempty"` + Res *types.CheckCustomizationSpecResponse `xml:"urn:vim25 CheckCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.CheckCustomizationSpec) (*types.CheckCustomizationSpecResponse, error) { + var reqBody, resBody CheckCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckForUpdatesBody struct { + Req *types.CheckForUpdates `xml:"urn:vim25 CheckForUpdates,omitempty"` + Res *types.CheckForUpdatesResponse `xml:"urn:vim25 CheckForUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckForUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CheckForUpdates) (*types.CheckForUpdatesResponse, error) { + var reqBody, resBody CheckForUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckHostPatch_TaskBody struct { + Req *types.CheckHostPatch_Task `xml:"urn:vim25 CheckHostPatch_Task,omitempty"` + Res *types.CheckHostPatch_TaskResponse `xml:"urn:vim25 CheckHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckHostPatch_Task) (*types.CheckHostPatch_TaskResponse, error) { + var reqBody, resBody CheckHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckLicenseFeatureBody struct { + Req *types.CheckLicenseFeature `xml:"urn:vim25 CheckLicenseFeature,omitempty"` + Res *types.CheckLicenseFeatureResponse `xml:"urn:vim25 CheckLicenseFeatureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckLicenseFeatureBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckLicenseFeature(ctx context.Context, r soap.RoundTripper, req *types.CheckLicenseFeature) (*types.CheckLicenseFeatureResponse, error) { + var reqBody, resBody CheckLicenseFeatureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckMigrate_TaskBody struct { + Req *types.CheckMigrate_Task `xml:"urn:vim25 CheckMigrate_Task,omitempty"` + Res *types.CheckMigrate_TaskResponse `xml:"urn:vim25 CheckMigrate_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckMigrate_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckMigrate_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckMigrate_Task) (*types.CheckMigrate_TaskResponse, error) { + var reqBody, resBody CheckMigrate_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckProfileCompliance_TaskBody struct { + Req *types.CheckProfileCompliance_Task `xml:"urn:vim25 CheckProfileCompliance_Task,omitempty"` + Res *types.CheckProfileCompliance_TaskResponse `xml:"urn:vim25 CheckProfileCompliance_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckProfileCompliance_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckProfileCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckProfileCompliance_Task) (*types.CheckProfileCompliance_TaskResponse, error) { + var reqBody, resBody CheckProfileCompliance_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckRelocate_TaskBody struct { + Req *types.CheckRelocate_Task `xml:"urn:vim25 CheckRelocate_Task,omitempty"` + Res *types.CheckRelocate_TaskResponse `xml:"urn:vim25 CheckRelocate_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckRelocate_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckRelocate_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckRelocate_Task) (*types.CheckRelocate_TaskResponse, error) { + var reqBody, resBody CheckRelocate_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClearComplianceStatusBody struct { + Req *types.ClearComplianceStatus `xml:"urn:vim25 ClearComplianceStatus,omitempty"` + Res *types.ClearComplianceStatusResponse `xml:"urn:vim25 ClearComplianceStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClearComplianceStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func ClearComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types.ClearComplianceStatus) (*types.ClearComplianceStatusResponse, error) { + var reqBody, resBody ClearComplianceStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClearNFSUserBody struct { + Req *types.ClearNFSUser `xml:"urn:vim25 ClearNFSUser,omitempty"` + Res *types.ClearNFSUserResponse `xml:"urn:vim25 ClearNFSUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClearNFSUserBody) Fault() *soap.Fault { return b.Fault_ } + +func ClearNFSUser(ctx context.Context, r soap.RoundTripper, req *types.ClearNFSUser) (*types.ClearNFSUserResponse, error) { + var reqBody, resBody ClearNFSUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloneSessionBody struct { + Req *types.CloneSession `xml:"urn:vim25 CloneSession,omitempty"` + Res *types.CloneSessionResponse `xml:"urn:vim25 CloneSessionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloneSessionBody) Fault() *soap.Fault { return b.Fault_ } + +func CloneSession(ctx context.Context, r soap.RoundTripper, req *types.CloneSession) (*types.CloneSessionResponse, error) { + var reqBody, resBody CloneSessionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloneVApp_TaskBody struct { + Req *types.CloneVApp_Task `xml:"urn:vim25 CloneVApp_Task,omitempty"` + Res *types.CloneVApp_TaskResponse `xml:"urn:vim25 CloneVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloneVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CloneVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVApp_Task) (*types.CloneVApp_TaskResponse, error) { + var reqBody, resBody CloneVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloneVM_TaskBody struct { + Req *types.CloneVM_Task `xml:"urn:vim25 CloneVM_Task,omitempty"` + Res *types.CloneVM_TaskResponse `xml:"urn:vim25 CloneVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloneVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CloneVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVM_Task) (*types.CloneVM_TaskResponse, error) { + var reqBody, resBody CloneVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloseInventoryViewFolderBody struct { + Req *types.CloseInventoryViewFolder `xml:"urn:vim25 CloseInventoryViewFolder,omitempty"` + Res *types.CloseInventoryViewFolderResponse `xml:"urn:vim25 CloseInventoryViewFolderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloseInventoryViewFolderBody) Fault() *soap.Fault { return b.Fault_ } + +func CloseInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *types.CloseInventoryViewFolder) (*types.CloseInventoryViewFolderResponse, error) { + var reqBody, resBody CloseInventoryViewFolderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClusterEnterMaintenanceModeBody struct { + Req *types.ClusterEnterMaintenanceMode `xml:"urn:vim25 ClusterEnterMaintenanceMode,omitempty"` + Res *types.ClusterEnterMaintenanceModeResponse `xml:"urn:vim25 ClusterEnterMaintenanceModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClusterEnterMaintenanceModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ClusterEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req *types.ClusterEnterMaintenanceMode) (*types.ClusterEnterMaintenanceModeResponse, error) { + var reqBody, resBody ClusterEnterMaintenanceModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ComputeDiskPartitionInfoBody struct { + Req *types.ComputeDiskPartitionInfo `xml:"urn:vim25 ComputeDiskPartitionInfo,omitempty"` + Res *types.ComputeDiskPartitionInfoResponse `xml:"urn:vim25 ComputeDiskPartitionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ComputeDiskPartitionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func ComputeDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *types.ComputeDiskPartitionInfo) (*types.ComputeDiskPartitionInfoResponse, error) { + var reqBody, resBody ComputeDiskPartitionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ComputeDiskPartitionInfoForResizeBody struct { + Req *types.ComputeDiskPartitionInfoForResize `xml:"urn:vim25 ComputeDiskPartitionInfoForResize,omitempty"` + Res *types.ComputeDiskPartitionInfoForResizeResponse `xml:"urn:vim25 ComputeDiskPartitionInfoForResizeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ComputeDiskPartitionInfoForResizeBody) Fault() *soap.Fault { return b.Fault_ } + +func ComputeDiskPartitionInfoForResize(ctx context.Context, r soap.RoundTripper, req *types.ComputeDiskPartitionInfoForResize) (*types.ComputeDiskPartitionInfoForResizeResponse, error) { + var reqBody, resBody ComputeDiskPartitionInfoForResizeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureDatastoreIORM_TaskBody struct { + Req *types.ConfigureDatastoreIORM_Task `xml:"urn:vim25 ConfigureDatastoreIORM_Task,omitempty"` + Res *types.ConfigureDatastoreIORM_TaskResponse `xml:"urn:vim25 ConfigureDatastoreIORM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureDatastoreIORM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureDatastoreIORM_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureDatastoreIORM_Task) (*types.ConfigureDatastoreIORM_TaskResponse, error) { + var reqBody, resBody ConfigureDatastoreIORM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureDatastorePrincipalBody struct { + Req *types.ConfigureDatastorePrincipal `xml:"urn:vim25 ConfigureDatastorePrincipal,omitempty"` + Res *types.ConfigureDatastorePrincipalResponse `xml:"urn:vim25 ConfigureDatastorePrincipalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureDatastorePrincipalBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureDatastorePrincipal(ctx context.Context, r soap.RoundTripper, req *types.ConfigureDatastorePrincipal) (*types.ConfigureDatastorePrincipalResponse, error) { + var reqBody, resBody ConfigureDatastorePrincipalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureEvcMode_TaskBody struct { + Req *types.ConfigureEvcMode_Task `xml:"urn:vim25 ConfigureEvcMode_Task,omitempty"` + Res *types.ConfigureEvcMode_TaskResponse `xml:"urn:vim25 ConfigureEvcMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureEvcMode_Task) (*types.ConfigureEvcMode_TaskResponse, error) { + var reqBody, resBody ConfigureEvcMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureHostCache_TaskBody struct { + Req *types.ConfigureHostCache_Task `xml:"urn:vim25 ConfigureHostCache_Task,omitempty"` + Res *types.ConfigureHostCache_TaskResponse `xml:"urn:vim25 ConfigureHostCache_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureHostCache_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureHostCache_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureHostCache_Task) (*types.ConfigureHostCache_TaskResponse, error) { + var reqBody, resBody ConfigureHostCache_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureLicenseSourceBody struct { + Req *types.ConfigureLicenseSource `xml:"urn:vim25 ConfigureLicenseSource,omitempty"` + Res *types.ConfigureLicenseSourceResponse `xml:"urn:vim25 ConfigureLicenseSourceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureLicenseSourceBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureLicenseSource(ctx context.Context, r soap.RoundTripper, req *types.ConfigureLicenseSource) (*types.ConfigureLicenseSourceResponse, error) { + var reqBody, resBody ConfigureLicenseSourceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigurePowerPolicyBody struct { + Req *types.ConfigurePowerPolicy `xml:"urn:vim25 ConfigurePowerPolicy,omitempty"` + Res *types.ConfigurePowerPolicyResponse `xml:"urn:vim25 ConfigurePowerPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigurePowerPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigurePowerPolicy(ctx context.Context, r soap.RoundTripper, req *types.ConfigurePowerPolicy) (*types.ConfigurePowerPolicyResponse, error) { + var reqBody, resBody ConfigurePowerPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureStorageDrsForPod_TaskBody struct { + Req *types.ConfigureStorageDrsForPod_Task `xml:"urn:vim25 ConfigureStorageDrsForPod_Task,omitempty"` + Res *types.ConfigureStorageDrsForPod_TaskResponse `xml:"urn:vim25 ConfigureStorageDrsForPod_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureStorageDrsForPod_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureStorageDrsForPod_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureStorageDrsForPod_Task) (*types.ConfigureStorageDrsForPod_TaskResponse, error) { + var reqBody, resBody ConfigureStorageDrsForPod_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureVFlashResourceEx_TaskBody struct { + Req *types.ConfigureVFlashResourceEx_Task `xml:"urn:vim25 ConfigureVFlashResourceEx_Task,omitempty"` + Res *types.ConfigureVFlashResourceEx_TaskResponse `xml:"urn:vim25 ConfigureVFlashResourceEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureVFlashResourceEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureVFlashResourceEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureVFlashResourceEx_Task) (*types.ConfigureVFlashResourceEx_TaskResponse, error) { + var reqBody, resBody ConfigureVFlashResourceEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConsolidateVMDisks_TaskBody struct { + Req *types.ConsolidateVMDisks_Task `xml:"urn:vim25 ConsolidateVMDisks_Task,omitempty"` + Res *types.ConsolidateVMDisks_TaskResponse `xml:"urn:vim25 ConsolidateVMDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConsolidateVMDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConsolidateVMDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.ConsolidateVMDisks_Task) (*types.ConsolidateVMDisks_TaskResponse, error) { + var reqBody, resBody ConsolidateVMDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ContinueRetrievePropertiesExBody struct { + Req *types.ContinueRetrievePropertiesEx `xml:"urn:vim25 ContinueRetrievePropertiesEx,omitempty"` + Res *types.ContinueRetrievePropertiesExResponse `xml:"urn:vim25 ContinueRetrievePropertiesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ContinueRetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ } + +func ContinueRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.ContinueRetrievePropertiesEx) (*types.ContinueRetrievePropertiesExResponse, error) { + var reqBody, resBody ContinueRetrievePropertiesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CopyDatastoreFile_TaskBody struct { + Req *types.CopyDatastoreFile_Task `xml:"urn:vim25 CopyDatastoreFile_Task,omitempty"` + Res *types.CopyDatastoreFile_TaskResponse `xml:"urn:vim25 CopyDatastoreFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CopyDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CopyDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.CopyDatastoreFile_Task) (*types.CopyDatastoreFile_TaskResponse, error) { + var reqBody, resBody CopyDatastoreFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CopyVirtualDisk_TaskBody struct { + Req *types.CopyVirtualDisk_Task `xml:"urn:vim25 CopyVirtualDisk_Task,omitempty"` + Res *types.CopyVirtualDisk_TaskResponse `xml:"urn:vim25 CopyVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CopyVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CopyVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CopyVirtualDisk_Task) (*types.CopyVirtualDisk_TaskResponse, error) { + var reqBody, resBody CopyVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateAlarmBody struct { + Req *types.CreateAlarm `xml:"urn:vim25 CreateAlarm,omitempty"` + Res *types.CreateAlarmResponse `xml:"urn:vim25 CreateAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateAlarm(ctx context.Context, r soap.RoundTripper, req *types.CreateAlarm) (*types.CreateAlarmResponse, error) { + var reqBody, resBody CreateAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateChildVM_TaskBody struct { + Req *types.CreateChildVM_Task `xml:"urn:vim25 CreateChildVM_Task,omitempty"` + Res *types.CreateChildVM_TaskResponse `xml:"urn:vim25 CreateChildVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateChildVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateChildVM_Task) (*types.CreateChildVM_TaskResponse, error) { + var reqBody, resBody CreateChildVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateClusterBody struct { + Req *types.CreateCluster `xml:"urn:vim25 CreateCluster,omitempty"` + Res *types.CreateClusterResponse `xml:"urn:vim25 CreateClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCluster(ctx context.Context, r soap.RoundTripper, req *types.CreateCluster) (*types.CreateClusterResponse, error) { + var reqBody, resBody CreateClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateClusterExBody struct { + Req *types.CreateClusterEx `xml:"urn:vim25 CreateClusterEx,omitempty"` + Res *types.CreateClusterExResponse `xml:"urn:vim25 CreateClusterExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateClusterExBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateClusterEx(ctx context.Context, r soap.RoundTripper, req *types.CreateClusterEx) (*types.CreateClusterExResponse, error) { + var reqBody, resBody CreateClusterExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateCollectorForEventsBody struct { + Req *types.CreateCollectorForEvents `xml:"urn:vim25 CreateCollectorForEvents,omitempty"` + Res *types.CreateCollectorForEventsResponse `xml:"urn:vim25 CreateCollectorForEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateCollectorForEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCollectorForEvents(ctx context.Context, r soap.RoundTripper, req *types.CreateCollectorForEvents) (*types.CreateCollectorForEventsResponse, error) { + var reqBody, resBody CreateCollectorForEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateCollectorForTasksBody struct { + Req *types.CreateCollectorForTasks `xml:"urn:vim25 CreateCollectorForTasks,omitempty"` + Res *types.CreateCollectorForTasksResponse `xml:"urn:vim25 CreateCollectorForTasksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateCollectorForTasksBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCollectorForTasks(ctx context.Context, r soap.RoundTripper, req *types.CreateCollectorForTasks) (*types.CreateCollectorForTasksResponse, error) { + var reqBody, resBody CreateCollectorForTasksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateContainerViewBody struct { + Req *types.CreateContainerView `xml:"urn:vim25 CreateContainerView,omitempty"` + Res *types.CreateContainerViewResponse `xml:"urn:vim25 CreateContainerViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateContainerViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateContainerView(ctx context.Context, r soap.RoundTripper, req *types.CreateContainerView) (*types.CreateContainerViewResponse, error) { + var reqBody, resBody CreateContainerViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateCustomizationSpecBody struct { + Req *types.CreateCustomizationSpec `xml:"urn:vim25 CreateCustomizationSpec,omitempty"` + Res *types.CreateCustomizationSpecResponse `xml:"urn:vim25 CreateCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.CreateCustomizationSpec) (*types.CreateCustomizationSpecResponse, error) { + var reqBody, resBody CreateCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDVPortgroup_TaskBody struct { + Req *types.CreateDVPortgroup_Task `xml:"urn:vim25 CreateDVPortgroup_Task,omitempty"` + Res *types.CreateDVPortgroup_TaskResponse `xml:"urn:vim25 CreateDVPortgroup_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDVPortgroup_Task) (*types.CreateDVPortgroup_TaskResponse, error) { + var reqBody, resBody CreateDVPortgroup_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDVS_TaskBody struct { + Req *types.CreateDVS_Task `xml:"urn:vim25 CreateDVS_Task,omitempty"` + Res *types.CreateDVS_TaskResponse `xml:"urn:vim25 CreateDVS_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDVS_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDVS_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDVS_Task) (*types.CreateDVS_TaskResponse, error) { + var reqBody, resBody CreateDVS_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDatacenterBody struct { + Req *types.CreateDatacenter `xml:"urn:vim25 CreateDatacenter,omitempty"` + Res *types.CreateDatacenterResponse `xml:"urn:vim25 CreateDatacenterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDatacenterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDatacenter(ctx context.Context, r soap.RoundTripper, req *types.CreateDatacenter) (*types.CreateDatacenterResponse, error) { + var reqBody, resBody CreateDatacenterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDefaultProfileBody struct { + Req *types.CreateDefaultProfile `xml:"urn:vim25 CreateDefaultProfile,omitempty"` + Res *types.CreateDefaultProfileResponse `xml:"urn:vim25 CreateDefaultProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDefaultProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDefaultProfile(ctx context.Context, r soap.RoundTripper, req *types.CreateDefaultProfile) (*types.CreateDefaultProfileResponse, error) { + var reqBody, resBody CreateDefaultProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDescriptorBody struct { + Req *types.CreateDescriptor `xml:"urn:vim25 CreateDescriptor,omitempty"` + Res *types.CreateDescriptorResponse `xml:"urn:vim25 CreateDescriptorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDescriptorBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDescriptor(ctx context.Context, r soap.RoundTripper, req *types.CreateDescriptor) (*types.CreateDescriptorResponse, error) { + var reqBody, resBody CreateDescriptorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDiagnosticPartitionBody struct { + Req *types.CreateDiagnosticPartition `xml:"urn:vim25 CreateDiagnosticPartition,omitempty"` + Res *types.CreateDiagnosticPartitionResponse `xml:"urn:vim25 CreateDiagnosticPartitionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDiagnosticPartitionBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDiagnosticPartition(ctx context.Context, r soap.RoundTripper, req *types.CreateDiagnosticPartition) (*types.CreateDiagnosticPartitionResponse, error) { + var reqBody, resBody CreateDiagnosticPartitionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDirectoryBody struct { + Req *types.CreateDirectory `xml:"urn:vim25 CreateDirectory,omitempty"` + Res *types.CreateDirectoryResponse `xml:"urn:vim25 CreateDirectoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDirectoryBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDirectory(ctx context.Context, r soap.RoundTripper, req *types.CreateDirectory) (*types.CreateDirectoryResponse, error) { + var reqBody, resBody CreateDirectoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateFilterBody struct { + Req *types.CreateFilter `xml:"urn:vim25 CreateFilter,omitempty"` + Res *types.CreateFilterResponse `xml:"urn:vim25 CreateFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateFilter(ctx context.Context, r soap.RoundTripper, req *types.CreateFilter) (*types.CreateFilterResponse, error) { + var reqBody, resBody CreateFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateFolderBody struct { + Req *types.CreateFolder `xml:"urn:vim25 CreateFolder,omitempty"` + Res *types.CreateFolderResponse `xml:"urn:vim25 CreateFolderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateFolderBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateFolder(ctx context.Context, r soap.RoundTripper, req *types.CreateFolder) (*types.CreateFolderResponse, error) { + var reqBody, resBody CreateFolderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateGroupBody struct { + Req *types.CreateGroup `xml:"urn:vim25 CreateGroup,omitempty"` + Res *types.CreateGroupResponse `xml:"urn:vim25 CreateGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateGroup(ctx context.Context, r soap.RoundTripper, req *types.CreateGroup) (*types.CreateGroupResponse, error) { + var reqBody, resBody CreateGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateImportSpecBody struct { + Req *types.CreateImportSpec `xml:"urn:vim25 CreateImportSpec,omitempty"` + Res *types.CreateImportSpecResponse `xml:"urn:vim25 CreateImportSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateImportSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateImportSpec(ctx context.Context, r soap.RoundTripper, req *types.CreateImportSpec) (*types.CreateImportSpecResponse, error) { + var reqBody, resBody CreateImportSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateInventoryViewBody struct { + Req *types.CreateInventoryView `xml:"urn:vim25 CreateInventoryView,omitempty"` + Res *types.CreateInventoryViewResponse `xml:"urn:vim25 CreateInventoryViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateInventoryViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateInventoryView(ctx context.Context, r soap.RoundTripper, req *types.CreateInventoryView) (*types.CreateInventoryViewResponse, error) { + var reqBody, resBody CreateInventoryViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateIpPoolBody struct { + Req *types.CreateIpPool `xml:"urn:vim25 CreateIpPool,omitempty"` + Res *types.CreateIpPoolResponse `xml:"urn:vim25 CreateIpPoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateIpPoolBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateIpPool(ctx context.Context, r soap.RoundTripper, req *types.CreateIpPool) (*types.CreateIpPoolResponse, error) { + var reqBody, resBody CreateIpPoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateListViewBody struct { + Req *types.CreateListView `xml:"urn:vim25 CreateListView,omitempty"` + Res *types.CreateListViewResponse `xml:"urn:vim25 CreateListViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateListViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateListView(ctx context.Context, r soap.RoundTripper, req *types.CreateListView) (*types.CreateListViewResponse, error) { + var reqBody, resBody CreateListViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateListViewFromViewBody struct { + Req *types.CreateListViewFromView `xml:"urn:vim25 CreateListViewFromView,omitempty"` + Res *types.CreateListViewFromViewResponse `xml:"urn:vim25 CreateListViewFromViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateListViewFromViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateListViewFromView(ctx context.Context, r soap.RoundTripper, req *types.CreateListViewFromView) (*types.CreateListViewFromViewResponse, error) { + var reqBody, resBody CreateListViewFromViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateLocalDatastoreBody struct { + Req *types.CreateLocalDatastore `xml:"urn:vim25 CreateLocalDatastore,omitempty"` + Res *types.CreateLocalDatastoreResponse `xml:"urn:vim25 CreateLocalDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateLocalDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateLocalDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateLocalDatastore) (*types.CreateLocalDatastoreResponse, error) { + var reqBody, resBody CreateLocalDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateNasDatastoreBody struct { + Req *types.CreateNasDatastore `xml:"urn:vim25 CreateNasDatastore,omitempty"` + Res *types.CreateNasDatastoreResponse `xml:"urn:vim25 CreateNasDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateNasDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateNasDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateNasDatastore) (*types.CreateNasDatastoreResponse, error) { + var reqBody, resBody CreateNasDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateObjectScheduledTaskBody struct { + Req *types.CreateObjectScheduledTask `xml:"urn:vim25 CreateObjectScheduledTask,omitempty"` + Res *types.CreateObjectScheduledTaskResponse `xml:"urn:vim25 CreateObjectScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateObjectScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.CreateObjectScheduledTask) (*types.CreateObjectScheduledTaskResponse, error) { + var reqBody, resBody CreateObjectScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreatePerfIntervalBody struct { + Req *types.CreatePerfInterval `xml:"urn:vim25 CreatePerfInterval,omitempty"` + Res *types.CreatePerfIntervalResponse `xml:"urn:vim25 CreatePerfIntervalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreatePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ } + +func CreatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.CreatePerfInterval) (*types.CreatePerfIntervalResponse, error) { + var reqBody, resBody CreatePerfIntervalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateProfileBody struct { + Req *types.CreateProfile `xml:"urn:vim25 CreateProfile,omitempty"` + Res *types.CreateProfileResponse `xml:"urn:vim25 CreateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateProfile(ctx context.Context, r soap.RoundTripper, req *types.CreateProfile) (*types.CreateProfileResponse, error) { + var reqBody, resBody CreateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreatePropertyCollectorBody struct { + Req *types.CreatePropertyCollector `xml:"urn:vim25 CreatePropertyCollector,omitempty"` + Res *types.CreatePropertyCollectorResponse `xml:"urn:vim25 CreatePropertyCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreatePropertyCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func CreatePropertyCollector(ctx context.Context, r soap.RoundTripper, req *types.CreatePropertyCollector) (*types.CreatePropertyCollectorResponse, error) { + var reqBody, resBody CreatePropertyCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateRegistryKeyInGuestBody struct { + Req *types.CreateRegistryKeyInGuest `xml:"urn:vim25 CreateRegistryKeyInGuest,omitempty"` + Res *types.CreateRegistryKeyInGuestResponse `xml:"urn:vim25 CreateRegistryKeyInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateRegistryKeyInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateRegistryKeyInGuest) (*types.CreateRegistryKeyInGuestResponse, error) { + var reqBody, resBody CreateRegistryKeyInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateResourcePoolBody struct { + Req *types.CreateResourcePool `xml:"urn:vim25 CreateResourcePool,omitempty"` + Res *types.CreateResourcePoolResponse `xml:"urn:vim25 CreateResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateResourcePool(ctx context.Context, r soap.RoundTripper, req *types.CreateResourcePool) (*types.CreateResourcePoolResponse, error) { + var reqBody, resBody CreateResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateScheduledTaskBody struct { + Req *types.CreateScheduledTask `xml:"urn:vim25 CreateScheduledTask,omitempty"` + Res *types.CreateScheduledTaskResponse `xml:"urn:vim25 CreateScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.CreateScheduledTask) (*types.CreateScheduledTaskResponse, error) { + var reqBody, resBody CreateScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateScreenshot_TaskBody struct { + Req *types.CreateScreenshot_Task `xml:"urn:vim25 CreateScreenshot_Task,omitempty"` + Res *types.CreateScreenshot_TaskResponse `xml:"urn:vim25 CreateScreenshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateScreenshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateScreenshot_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateScreenshot_Task) (*types.CreateScreenshot_TaskResponse, error) { + var reqBody, resBody CreateScreenshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateSecondaryVMEx_TaskBody struct { + Req *types.CreateSecondaryVMEx_Task `xml:"urn:vim25 CreateSecondaryVMEx_Task,omitempty"` + Res *types.CreateSecondaryVMEx_TaskResponse `xml:"urn:vim25 CreateSecondaryVMEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSecondaryVMEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSecondaryVMEx_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSecondaryVMEx_Task) (*types.CreateSecondaryVMEx_TaskResponse, error) { + var reqBody, resBody CreateSecondaryVMEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateSecondaryVM_TaskBody struct { + Req *types.CreateSecondaryVM_Task `xml:"urn:vim25 CreateSecondaryVM_Task,omitempty"` + Res *types.CreateSecondaryVM_TaskResponse `xml:"urn:vim25 CreateSecondaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSecondaryVM_Task) (*types.CreateSecondaryVM_TaskResponse, error) { + var reqBody, resBody CreateSecondaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateSnapshot_TaskBody struct { + Req *types.CreateSnapshot_Task `xml:"urn:vim25 CreateSnapshot_Task,omitempty"` + Res *types.CreateSnapshot_TaskResponse `xml:"urn:vim25 CreateSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSnapshot_Task) (*types.CreateSnapshot_TaskResponse, error) { + var reqBody, resBody CreateSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateStoragePodBody struct { + Req *types.CreateStoragePod `xml:"urn:vim25 CreateStoragePod,omitempty"` + Res *types.CreateStoragePodResponse `xml:"urn:vim25 CreateStoragePodResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateStoragePodBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateStoragePod(ctx context.Context, r soap.RoundTripper, req *types.CreateStoragePod) (*types.CreateStoragePodResponse, error) { + var reqBody, resBody CreateStoragePodBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateTaskBody struct { + Req *types.CreateTask `xml:"urn:vim25 CreateTask,omitempty"` + Res *types.CreateTaskResponse `xml:"urn:vim25 CreateTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateTask(ctx context.Context, r soap.RoundTripper, req *types.CreateTask) (*types.CreateTaskResponse, error) { + var reqBody, resBody CreateTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateTemporaryDirectoryInGuestBody struct { + Req *types.CreateTemporaryDirectoryInGuest `xml:"urn:vim25 CreateTemporaryDirectoryInGuest,omitempty"` + Res *types.CreateTemporaryDirectoryInGuestResponse `xml:"urn:vim25 CreateTemporaryDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateTemporaryDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateTemporaryDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateTemporaryDirectoryInGuest) (*types.CreateTemporaryDirectoryInGuestResponse, error) { + var reqBody, resBody CreateTemporaryDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateTemporaryFileInGuestBody struct { + Req *types.CreateTemporaryFileInGuest `xml:"urn:vim25 CreateTemporaryFileInGuest,omitempty"` + Res *types.CreateTemporaryFileInGuestResponse `xml:"urn:vim25 CreateTemporaryFileInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateTemporaryFileInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateTemporaryFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateTemporaryFileInGuest) (*types.CreateTemporaryFileInGuestResponse, error) { + var reqBody, resBody CreateTemporaryFileInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateUserBody struct { + Req *types.CreateUser `xml:"urn:vim25 CreateUser,omitempty"` + Res *types.CreateUserResponse `xml:"urn:vim25 CreateUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateUserBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateUser(ctx context.Context, r soap.RoundTripper, req *types.CreateUser) (*types.CreateUserResponse, error) { + var reqBody, resBody CreateUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVAppBody struct { + Req *types.CreateVApp `xml:"urn:vim25 CreateVApp,omitempty"` + Res *types.CreateVAppResponse `xml:"urn:vim25 CreateVAppResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVAppBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVApp(ctx context.Context, r soap.RoundTripper, req *types.CreateVApp) (*types.CreateVAppResponse, error) { + var reqBody, resBody CreateVAppBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVM_TaskBody struct { + Req *types.CreateVM_Task `xml:"urn:vim25 CreateVM_Task,omitempty"` + Res *types.CreateVM_TaskResponse `xml:"urn:vim25 CreateVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVM_Task) (*types.CreateVM_TaskResponse, error) { + var reqBody, resBody CreateVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVRPBody struct { + Req *types.CreateVRP `xml:"urn:vim25 CreateVRP,omitempty"` + Res *types.CreateVRPResponse `xml:"urn:vim25 CreateVRPResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVRPBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVRP(ctx context.Context, r soap.RoundTripper, req *types.CreateVRP) (*types.CreateVRPResponse, error) { + var reqBody, resBody CreateVRPBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVirtualDisk_TaskBody struct { + Req *types.CreateVirtualDisk_Task `xml:"urn:vim25 CreateVirtualDisk_Task,omitempty"` + Res *types.CreateVirtualDisk_TaskResponse `xml:"urn:vim25 CreateVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVirtualDisk_Task) (*types.CreateVirtualDisk_TaskResponse, error) { + var reqBody, resBody CreateVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVmfsDatastoreBody struct { + Req *types.CreateVmfsDatastore `xml:"urn:vim25 CreateVmfsDatastore,omitempty"` + Res *types.CreateVmfsDatastoreResponse `xml:"urn:vim25 CreateVmfsDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateVmfsDatastore) (*types.CreateVmfsDatastoreResponse, error) { + var reqBody, resBody CreateVmfsDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVvolDatastoreBody struct { + Req *types.CreateVvolDatastore `xml:"urn:vim25 CreateVvolDatastore,omitempty"` + Res *types.CreateVvolDatastoreResponse `xml:"urn:vim25 CreateVvolDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVvolDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVvolDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateVvolDatastore) (*types.CreateVvolDatastoreResponse, error) { + var reqBody, resBody CreateVvolDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CurrentTimeBody struct { + Req *types.CurrentTime `xml:"urn:vim25 CurrentTime,omitempty"` + Res *types.CurrentTimeResponse `xml:"urn:vim25 CurrentTimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CurrentTimeBody) Fault() *soap.Fault { return b.Fault_ } + +func CurrentTime(ctx context.Context, r soap.RoundTripper, req *types.CurrentTime) (*types.CurrentTimeResponse, error) { + var reqBody, resBody CurrentTimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CustomizationSpecItemToXmlBody struct { + Req *types.CustomizationSpecItemToXml `xml:"urn:vim25 CustomizationSpecItemToXml,omitempty"` + Res *types.CustomizationSpecItemToXmlResponse `xml:"urn:vim25 CustomizationSpecItemToXmlResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CustomizationSpecItemToXmlBody) Fault() *soap.Fault { return b.Fault_ } + +func CustomizationSpecItemToXml(ctx context.Context, r soap.RoundTripper, req *types.CustomizationSpecItemToXml) (*types.CustomizationSpecItemToXmlResponse, error) { + var reqBody, resBody CustomizationSpecItemToXmlBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CustomizeVM_TaskBody struct { + Req *types.CustomizeVM_Task `xml:"urn:vim25 CustomizeVM_Task,omitempty"` + Res *types.CustomizeVM_TaskResponse `xml:"urn:vim25 CustomizeVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CustomizeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CustomizeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CustomizeVM_Task) (*types.CustomizeVM_TaskResponse, error) { + var reqBody, resBody CustomizeVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVPortgroupRollback_TaskBody struct { + Req *types.DVPortgroupRollback_Task `xml:"urn:vim25 DVPortgroupRollback_Task,omitempty"` + Res *types.DVPortgroupRollback_TaskResponse `xml:"urn:vim25 DVPortgroupRollback_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVPortgroupRollback_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVPortgroupRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVPortgroupRollback_Task) (*types.DVPortgroupRollback_TaskResponse, error) { + var reqBody, resBody DVPortgroupRollback_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSManagerExportEntity_TaskBody struct { + Req *types.DVSManagerExportEntity_Task `xml:"urn:vim25 DVSManagerExportEntity_Task,omitempty"` + Res *types.DVSManagerExportEntity_TaskResponse `xml:"urn:vim25 DVSManagerExportEntity_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSManagerExportEntity_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSManagerExportEntity_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerExportEntity_Task) (*types.DVSManagerExportEntity_TaskResponse, error) { + var reqBody, resBody DVSManagerExportEntity_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSManagerImportEntity_TaskBody struct { + Req *types.DVSManagerImportEntity_Task `xml:"urn:vim25 DVSManagerImportEntity_Task,omitempty"` + Res *types.DVSManagerImportEntity_TaskResponse `xml:"urn:vim25 DVSManagerImportEntity_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSManagerImportEntity_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSManagerImportEntity_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerImportEntity_Task) (*types.DVSManagerImportEntity_TaskResponse, error) { + var reqBody, resBody DVSManagerImportEntity_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSManagerLookupDvPortGroupBody struct { + Req *types.DVSManagerLookupDvPortGroup `xml:"urn:vim25 DVSManagerLookupDvPortGroup,omitempty"` + Res *types.DVSManagerLookupDvPortGroupResponse `xml:"urn:vim25 DVSManagerLookupDvPortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSManagerLookupDvPortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSManagerLookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerLookupDvPortGroup) (*types.DVSManagerLookupDvPortGroupResponse, error) { + var reqBody, resBody DVSManagerLookupDvPortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSRollback_TaskBody struct { + Req *types.DVSRollback_Task `xml:"urn:vim25 DVSRollback_Task,omitempty"` + Res *types.DVSRollback_TaskResponse `xml:"urn:vim25 DVSRollback_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSRollback_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSRollback_Task) (*types.DVSRollback_TaskResponse, error) { + var reqBody, resBody DVSRollback_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DatastoreEnterMaintenanceModeBody struct { + Req *types.DatastoreEnterMaintenanceMode `xml:"urn:vim25 DatastoreEnterMaintenanceMode,omitempty"` + Res *types.DatastoreEnterMaintenanceModeResponse `xml:"urn:vim25 DatastoreEnterMaintenanceModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DatastoreEnterMaintenanceModeBody) Fault() *soap.Fault { return b.Fault_ } + +func DatastoreEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req *types.DatastoreEnterMaintenanceMode) (*types.DatastoreEnterMaintenanceModeResponse, error) { + var reqBody, resBody DatastoreEnterMaintenanceModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DatastoreExitMaintenanceMode_TaskBody struct { + Req *types.DatastoreExitMaintenanceMode_Task `xml:"urn:vim25 DatastoreExitMaintenanceMode_Task,omitempty"` + Res *types.DatastoreExitMaintenanceMode_TaskResponse `xml:"urn:vim25 DatastoreExitMaintenanceMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DatastoreExitMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DatastoreExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.DatastoreExitMaintenanceMode_Task) (*types.DatastoreExitMaintenanceMode_TaskResponse, error) { + var reqBody, resBody DatastoreExitMaintenanceMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DecodeLicenseBody struct { + Req *types.DecodeLicense `xml:"urn:vim25 DecodeLicense,omitempty"` + Res *types.DecodeLicenseResponse `xml:"urn:vim25 DecodeLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DecodeLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func DecodeLicense(ctx context.Context, r soap.RoundTripper, req *types.DecodeLicense) (*types.DecodeLicenseResponse, error) { + var reqBody, resBody DecodeLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DefragmentAllDisksBody struct { + Req *types.DefragmentAllDisks `xml:"urn:vim25 DefragmentAllDisks,omitempty"` + Res *types.DefragmentAllDisksResponse `xml:"urn:vim25 DefragmentAllDisksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DefragmentAllDisksBody) Fault() *soap.Fault { return b.Fault_ } + +func DefragmentAllDisks(ctx context.Context, r soap.RoundTripper, req *types.DefragmentAllDisks) (*types.DefragmentAllDisksResponse, error) { + var reqBody, resBody DefragmentAllDisksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DefragmentVirtualDisk_TaskBody struct { + Req *types.DefragmentVirtualDisk_Task `xml:"urn:vim25 DefragmentVirtualDisk_Task,omitempty"` + Res *types.DefragmentVirtualDisk_TaskResponse `xml:"urn:vim25 DefragmentVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DefragmentVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DefragmentVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DefragmentVirtualDisk_Task) (*types.DefragmentVirtualDisk_TaskResponse, error) { + var reqBody, resBody DefragmentVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteCustomizationSpecBody struct { + Req *types.DeleteCustomizationSpec `xml:"urn:vim25 DeleteCustomizationSpec,omitempty"` + Res *types.DeleteCustomizationSpecResponse `xml:"urn:vim25 DeleteCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.DeleteCustomizationSpec) (*types.DeleteCustomizationSpecResponse, error) { + var reqBody, resBody DeleteCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteDatastoreFile_TaskBody struct { + Req *types.DeleteDatastoreFile_Task `xml:"urn:vim25 DeleteDatastoreFile_Task,omitempty"` + Res *types.DeleteDatastoreFile_TaskResponse `xml:"urn:vim25 DeleteDatastoreFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteDatastoreFile_Task) (*types.DeleteDatastoreFile_TaskResponse, error) { + var reqBody, resBody DeleteDatastoreFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteDirectoryBody struct { + Req *types.DeleteDirectory `xml:"urn:vim25 DeleteDirectory,omitempty"` + Res *types.DeleteDirectoryResponse `xml:"urn:vim25 DeleteDirectoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteDirectoryBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteDirectory(ctx context.Context, r soap.RoundTripper, req *types.DeleteDirectory) (*types.DeleteDirectoryResponse, error) { + var reqBody, resBody DeleteDirectoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteDirectoryInGuestBody struct { + Req *types.DeleteDirectoryInGuest `xml:"urn:vim25 DeleteDirectoryInGuest,omitempty"` + Res *types.DeleteDirectoryInGuestResponse `xml:"urn:vim25 DeleteDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteDirectoryInGuest) (*types.DeleteDirectoryInGuestResponse, error) { + var reqBody, resBody DeleteDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteFileBody struct { + Req *types.DeleteFile `xml:"urn:vim25 DeleteFile,omitempty"` + Res *types.DeleteFileResponse `xml:"urn:vim25 DeleteFileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteFileBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteFile(ctx context.Context, r soap.RoundTripper, req *types.DeleteFile) (*types.DeleteFileResponse, error) { + var reqBody, resBody DeleteFileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteFileInGuestBody struct { + Req *types.DeleteFileInGuest `xml:"urn:vim25 DeleteFileInGuest,omitempty"` + Res *types.DeleteFileInGuestResponse `xml:"urn:vim25 DeleteFileInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteFileInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteFileInGuest) (*types.DeleteFileInGuestResponse, error) { + var reqBody, resBody DeleteFileInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteRegistryKeyInGuestBody struct { + Req *types.DeleteRegistryKeyInGuest `xml:"urn:vim25 DeleteRegistryKeyInGuest,omitempty"` + Res *types.DeleteRegistryKeyInGuestResponse `xml:"urn:vim25 DeleteRegistryKeyInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteRegistryKeyInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteRegistryKeyInGuest) (*types.DeleteRegistryKeyInGuestResponse, error) { + var reqBody, resBody DeleteRegistryKeyInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteRegistryValueInGuestBody struct { + Req *types.DeleteRegistryValueInGuest `xml:"urn:vim25 DeleteRegistryValueInGuest,omitempty"` + Res *types.DeleteRegistryValueInGuestResponse `xml:"urn:vim25 DeleteRegistryValueInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteRegistryValueInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteRegistryValueInGuest) (*types.DeleteRegistryValueInGuestResponse, error) { + var reqBody, resBody DeleteRegistryValueInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteScsiLunStateBody struct { + Req *types.DeleteScsiLunState `xml:"urn:vim25 DeleteScsiLunState,omitempty"` + Res *types.DeleteScsiLunStateResponse `xml:"urn:vim25 DeleteScsiLunStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteScsiLunStateBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteScsiLunState(ctx context.Context, r soap.RoundTripper, req *types.DeleteScsiLunState) (*types.DeleteScsiLunStateResponse, error) { + var reqBody, resBody DeleteScsiLunStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVRPBody struct { + Req *types.DeleteVRP `xml:"urn:vim25 DeleteVRP,omitempty"` + Res *types.DeleteVRPResponse `xml:"urn:vim25 DeleteVRPResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVRPBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVRP(ctx context.Context, r soap.RoundTripper, req *types.DeleteVRP) (*types.DeleteVRPResponse, error) { + var reqBody, resBody DeleteVRPBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVffsVolumeStateBody struct { + Req *types.DeleteVffsVolumeState `xml:"urn:vim25 DeleteVffsVolumeState,omitempty"` + Res *types.DeleteVffsVolumeStateResponse `xml:"urn:vim25 DeleteVffsVolumeStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVffsVolumeStateBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVffsVolumeState(ctx context.Context, r soap.RoundTripper, req *types.DeleteVffsVolumeState) (*types.DeleteVffsVolumeStateResponse, error) { + var reqBody, resBody DeleteVffsVolumeStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVirtualDisk_TaskBody struct { + Req *types.DeleteVirtualDisk_Task `xml:"urn:vim25 DeleteVirtualDisk_Task,omitempty"` + Res *types.DeleteVirtualDisk_TaskResponse `xml:"urn:vim25 DeleteVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteVirtualDisk_Task) (*types.DeleteVirtualDisk_TaskResponse, error) { + var reqBody, resBody DeleteVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVmfsVolumeStateBody struct { + Req *types.DeleteVmfsVolumeState `xml:"urn:vim25 DeleteVmfsVolumeState,omitempty"` + Res *types.DeleteVmfsVolumeStateResponse `xml:"urn:vim25 DeleteVmfsVolumeStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVmfsVolumeStateBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVmfsVolumeState(ctx context.Context, r soap.RoundTripper, req *types.DeleteVmfsVolumeState) (*types.DeleteVmfsVolumeStateResponse, error) { + var reqBody, resBody DeleteVmfsVolumeStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVsanObjectsBody struct { + Req *types.DeleteVsanObjects `xml:"urn:vim25 DeleteVsanObjects,omitempty"` + Res *types.DeleteVsanObjectsResponse `xml:"urn:vim25 DeleteVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.DeleteVsanObjects) (*types.DeleteVsanObjectsResponse, error) { + var reqBody, resBody DeleteVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeployVMBody struct { + Req *types.DeployVM `xml:"urn:vim25 DeployVM,omitempty"` + Res *types.DeployVMResponse `xml:"urn:vim25 DeployVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeployVMBody) Fault() *soap.Fault { return b.Fault_ } + +func DeployVM(ctx context.Context, r soap.RoundTripper, req *types.DeployVM) (*types.DeployVMResponse, error) { + var reqBody, resBody DeployVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeselectVnicBody struct { + Req *types.DeselectVnic `xml:"urn:vim25 DeselectVnic,omitempty"` + Res *types.DeselectVnicResponse `xml:"urn:vim25 DeselectVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeselectVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func DeselectVnic(ctx context.Context, r soap.RoundTripper, req *types.DeselectVnic) (*types.DeselectVnicResponse, error) { + var reqBody, resBody DeselectVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeselectVnicForNicTypeBody struct { + Req *types.DeselectVnicForNicType `xml:"urn:vim25 DeselectVnicForNicType,omitempty"` + Res *types.DeselectVnicForNicTypeResponse `xml:"urn:vim25 DeselectVnicForNicTypeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeselectVnicForNicTypeBody) Fault() *soap.Fault { return b.Fault_ } + +func DeselectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.DeselectVnicForNicType) (*types.DeselectVnicForNicTypeResponse, error) { + var reqBody, resBody DeselectVnicForNicTypeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyChildrenBody struct { + Req *types.DestroyChildren `xml:"urn:vim25 DestroyChildren,omitempty"` + Res *types.DestroyChildrenResponse `xml:"urn:vim25 DestroyChildrenResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyChildrenBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyChildren(ctx context.Context, r soap.RoundTripper, req *types.DestroyChildren) (*types.DestroyChildrenResponse, error) { + var reqBody, resBody DestroyChildrenBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyCollectorBody struct { + Req *types.DestroyCollector `xml:"urn:vim25 DestroyCollector,omitempty"` + Res *types.DestroyCollectorResponse `xml:"urn:vim25 DestroyCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyCollector(ctx context.Context, r soap.RoundTripper, req *types.DestroyCollector) (*types.DestroyCollectorResponse, error) { + var reqBody, resBody DestroyCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyDatastoreBody struct { + Req *types.DestroyDatastore `xml:"urn:vim25 DestroyDatastore,omitempty"` + Res *types.DestroyDatastoreResponse `xml:"urn:vim25 DestroyDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyDatastore(ctx context.Context, r soap.RoundTripper, req *types.DestroyDatastore) (*types.DestroyDatastoreResponse, error) { + var reqBody, resBody DestroyDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyIpPoolBody struct { + Req *types.DestroyIpPool `xml:"urn:vim25 DestroyIpPool,omitempty"` + Res *types.DestroyIpPoolResponse `xml:"urn:vim25 DestroyIpPoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyIpPoolBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyIpPool(ctx context.Context, r soap.RoundTripper, req *types.DestroyIpPool) (*types.DestroyIpPoolResponse, error) { + var reqBody, resBody DestroyIpPoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyNetworkBody struct { + Req *types.DestroyNetwork `xml:"urn:vim25 DestroyNetwork,omitempty"` + Res *types.DestroyNetworkResponse `xml:"urn:vim25 DestroyNetworkResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyNetworkBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyNetwork(ctx context.Context, r soap.RoundTripper, req *types.DestroyNetwork) (*types.DestroyNetworkResponse, error) { + var reqBody, resBody DestroyNetworkBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyProfileBody struct { + Req *types.DestroyProfile `xml:"urn:vim25 DestroyProfile,omitempty"` + Res *types.DestroyProfileResponse `xml:"urn:vim25 DestroyProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyProfile(ctx context.Context, r soap.RoundTripper, req *types.DestroyProfile) (*types.DestroyProfileResponse, error) { + var reqBody, resBody DestroyProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyPropertyCollectorBody struct { + Req *types.DestroyPropertyCollector `xml:"urn:vim25 DestroyPropertyCollector,omitempty"` + Res *types.DestroyPropertyCollectorResponse `xml:"urn:vim25 DestroyPropertyCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyPropertyCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyPropertyCollector(ctx context.Context, r soap.RoundTripper, req *types.DestroyPropertyCollector) (*types.DestroyPropertyCollectorResponse, error) { + var reqBody, resBody DestroyPropertyCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyPropertyFilterBody struct { + Req *types.DestroyPropertyFilter `xml:"urn:vim25 DestroyPropertyFilter,omitempty"` + Res *types.DestroyPropertyFilterResponse `xml:"urn:vim25 DestroyPropertyFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyPropertyFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyPropertyFilter(ctx context.Context, r soap.RoundTripper, req *types.DestroyPropertyFilter) (*types.DestroyPropertyFilterResponse, error) { + var reqBody, resBody DestroyPropertyFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyVffsBody struct { + Req *types.DestroyVffs `xml:"urn:vim25 DestroyVffs,omitempty"` + Res *types.DestroyVffsResponse `xml:"urn:vim25 DestroyVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyVffs(ctx context.Context, r soap.RoundTripper, req *types.DestroyVffs) (*types.DestroyVffsResponse, error) { + var reqBody, resBody DestroyVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyViewBody struct { + Req *types.DestroyView `xml:"urn:vim25 DestroyView,omitempty"` + Res *types.DestroyViewResponse `xml:"urn:vim25 DestroyViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyViewBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyView(ctx context.Context, r soap.RoundTripper, req *types.DestroyView) (*types.DestroyViewResponse, error) { + var reqBody, resBody DestroyViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type Destroy_TaskBody struct { + Req *types.Destroy_Task `xml:"urn:vim25 Destroy_Task,omitempty"` + Res *types.Destroy_TaskResponse `xml:"urn:vim25 Destroy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *Destroy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func Destroy_Task(ctx context.Context, r soap.RoundTripper, req *types.Destroy_Task) (*types.Destroy_TaskResponse, error) { + var reqBody, resBody Destroy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DetachScsiLunBody struct { + Req *types.DetachScsiLun `xml:"urn:vim25 DetachScsiLun,omitempty"` + Res *types.DetachScsiLunResponse `xml:"urn:vim25 DetachScsiLunResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DetachScsiLunBody) Fault() *soap.Fault { return b.Fault_ } + +func DetachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.DetachScsiLun) (*types.DetachScsiLunResponse, error) { + var reqBody, resBody DetachScsiLunBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DetachScsiLunEx_TaskBody struct { + Req *types.DetachScsiLunEx_Task `xml:"urn:vim25 DetachScsiLunEx_Task,omitempty"` + Res *types.DetachScsiLunEx_TaskResponse `xml:"urn:vim25 DetachScsiLunEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DetachScsiLunEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DetachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.DetachScsiLunEx_Task) (*types.DetachScsiLunEx_TaskResponse, error) { + var reqBody, resBody DetachScsiLunEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableEvcMode_TaskBody struct { + Req *types.DisableEvcMode_Task `xml:"urn:vim25 DisableEvcMode_Task,omitempty"` + Res *types.DisableEvcMode_TaskResponse `xml:"urn:vim25 DisableEvcMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.DisableEvcMode_Task) (*types.DisableEvcMode_TaskResponse, error) { + var reqBody, resBody DisableEvcMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableFeatureBody struct { + Req *types.DisableFeature `xml:"urn:vim25 DisableFeature,omitempty"` + Res *types.DisableFeatureResponse `xml:"urn:vim25 DisableFeatureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableFeatureBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableFeature(ctx context.Context, r soap.RoundTripper, req *types.DisableFeature) (*types.DisableFeatureResponse, error) { + var reqBody, resBody DisableFeatureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableHyperThreadingBody struct { + Req *types.DisableHyperThreading `xml:"urn:vim25 DisableHyperThreading,omitempty"` + Res *types.DisableHyperThreadingResponse `xml:"urn:vim25 DisableHyperThreadingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableHyperThreadingBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.DisableHyperThreading) (*types.DisableHyperThreadingResponse, error) { + var reqBody, resBody DisableHyperThreadingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableMultipathPathBody struct { + Req *types.DisableMultipathPath `xml:"urn:vim25 DisableMultipathPath,omitempty"` + Res *types.DisableMultipathPathResponse `xml:"urn:vim25 DisableMultipathPathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableMultipathPathBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.DisableMultipathPath) (*types.DisableMultipathPathResponse, error) { + var reqBody, resBody DisableMultipathPathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableRulesetBody struct { + Req *types.DisableRuleset `xml:"urn:vim25 DisableRuleset,omitempty"` + Res *types.DisableRulesetResponse `xml:"urn:vim25 DisableRulesetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableRulesetBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableRuleset(ctx context.Context, r soap.RoundTripper, req *types.DisableRuleset) (*types.DisableRulesetResponse, error) { + var reqBody, resBody DisableRulesetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableSecondaryVM_TaskBody struct { + Req *types.DisableSecondaryVM_Task `xml:"urn:vim25 DisableSecondaryVM_Task,omitempty"` + Res *types.DisableSecondaryVM_TaskResponse `xml:"urn:vim25 DisableSecondaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.DisableSecondaryVM_Task) (*types.DisableSecondaryVM_TaskResponse, error) { + var reqBody, resBody DisableSecondaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableSmartCardAuthenticationBody struct { + Req *types.DisableSmartCardAuthentication `xml:"urn:vim25 DisableSmartCardAuthentication,omitempty"` + Res *types.DisableSmartCardAuthenticationResponse `xml:"urn:vim25 DisableSmartCardAuthenticationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableSmartCardAuthenticationBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req *types.DisableSmartCardAuthentication) (*types.DisableSmartCardAuthenticationResponse, error) { + var reqBody, resBody DisableSmartCardAuthenticationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisconnectHost_TaskBody struct { + Req *types.DisconnectHost_Task `xml:"urn:vim25 DisconnectHost_Task,omitempty"` + Res *types.DisconnectHost_TaskResponse `xml:"urn:vim25 DisconnectHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisconnectHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DisconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.DisconnectHost_Task) (*types.DisconnectHost_TaskResponse, error) { + var reqBody, resBody DisconnectHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DiscoverFcoeHbasBody struct { + Req *types.DiscoverFcoeHbas `xml:"urn:vim25 DiscoverFcoeHbas,omitempty"` + Res *types.DiscoverFcoeHbasResponse `xml:"urn:vim25 DiscoverFcoeHbasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DiscoverFcoeHbasBody) Fault() *soap.Fault { return b.Fault_ } + +func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.DiscoverFcoeHbas) (*types.DiscoverFcoeHbasResponse, error) { + var reqBody, resBody DiscoverFcoeHbasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DissociateProfileBody struct { + Req *types.DissociateProfile `xml:"urn:vim25 DissociateProfile,omitempty"` + Res *types.DissociateProfileResponse `xml:"urn:vim25 DissociateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DissociateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.DissociateProfile) (*types.DissociateProfileResponse, error) { + var reqBody, resBody DissociateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DoesCustomizationSpecExistBody struct { + Req *types.DoesCustomizationSpecExist `xml:"urn:vim25 DoesCustomizationSpecExist,omitempty"` + Res *types.DoesCustomizationSpecExistResponse `xml:"urn:vim25 DoesCustomizationSpecExistResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DoesCustomizationSpecExistBody) Fault() *soap.Fault { return b.Fault_ } + +func DoesCustomizationSpecExist(ctx context.Context, r soap.RoundTripper, req *types.DoesCustomizationSpecExist) (*types.DoesCustomizationSpecExistResponse, error) { + var reqBody, resBody DoesCustomizationSpecExistBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DuplicateCustomizationSpecBody struct { + Req *types.DuplicateCustomizationSpec `xml:"urn:vim25 DuplicateCustomizationSpec,omitempty"` + Res *types.DuplicateCustomizationSpecResponse `xml:"urn:vim25 DuplicateCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DuplicateCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func DuplicateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.DuplicateCustomizationSpec) (*types.DuplicateCustomizationSpecResponse, error) { + var reqBody, resBody DuplicateCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DvsReconfigureVmVnicNetworkResourcePool_TaskBody struct { + Req *types.DvsReconfigureVmVnicNetworkResourcePool_Task `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_Task,omitempty"` + Res *types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DvsReconfigureVmVnicNetworkResourcePool_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DvsReconfigureVmVnicNetworkResourcePool_Task(ctx context.Context, r soap.RoundTripper, req *types.DvsReconfigureVmVnicNetworkResourcePool_Task) (*types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse, error) { + var reqBody, resBody DvsReconfigureVmVnicNetworkResourcePool_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EagerZeroVirtualDisk_TaskBody struct { + Req *types.EagerZeroVirtualDisk_Task `xml:"urn:vim25 EagerZeroVirtualDisk_Task,omitempty"` + Res *types.EagerZeroVirtualDisk_TaskResponse `xml:"urn:vim25 EagerZeroVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EagerZeroVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EagerZeroVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.EagerZeroVirtualDisk_Task) (*types.EagerZeroVirtualDisk_TaskResponse, error) { + var reqBody, resBody EagerZeroVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableAlarmActionsBody struct { + Req *types.EnableAlarmActions `xml:"urn:vim25 EnableAlarmActions,omitempty"` + Res *types.EnableAlarmActionsResponse `xml:"urn:vim25 EnableAlarmActionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableAlarmActionsBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableAlarmActions(ctx context.Context, r soap.RoundTripper, req *types.EnableAlarmActions) (*types.EnableAlarmActionsResponse, error) { + var reqBody, resBody EnableAlarmActionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableFeatureBody struct { + Req *types.EnableFeature `xml:"urn:vim25 EnableFeature,omitempty"` + Res *types.EnableFeatureResponse `xml:"urn:vim25 EnableFeatureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableFeatureBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableFeature(ctx context.Context, r soap.RoundTripper, req *types.EnableFeature) (*types.EnableFeatureResponse, error) { + var reqBody, resBody EnableFeatureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableHyperThreadingBody struct { + Req *types.EnableHyperThreading `xml:"urn:vim25 EnableHyperThreading,omitempty"` + Res *types.EnableHyperThreadingResponse `xml:"urn:vim25 EnableHyperThreadingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableHyperThreadingBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.EnableHyperThreading) (*types.EnableHyperThreadingResponse, error) { + var reqBody, resBody EnableHyperThreadingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableMultipathPathBody struct { + Req *types.EnableMultipathPath `xml:"urn:vim25 EnableMultipathPath,omitempty"` + Res *types.EnableMultipathPathResponse `xml:"urn:vim25 EnableMultipathPathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableMultipathPathBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.EnableMultipathPath) (*types.EnableMultipathPathResponse, error) { + var reqBody, resBody EnableMultipathPathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableNetworkResourceManagementBody struct { + Req *types.EnableNetworkResourceManagement `xml:"urn:vim25 EnableNetworkResourceManagement,omitempty"` + Res *types.EnableNetworkResourceManagementResponse `xml:"urn:vim25 EnableNetworkResourceManagementResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableNetworkResourceManagementBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableNetworkResourceManagement(ctx context.Context, r soap.RoundTripper, req *types.EnableNetworkResourceManagement) (*types.EnableNetworkResourceManagementResponse, error) { + var reqBody, resBody EnableNetworkResourceManagementBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableRulesetBody struct { + Req *types.EnableRuleset `xml:"urn:vim25 EnableRuleset,omitempty"` + Res *types.EnableRulesetResponse `xml:"urn:vim25 EnableRulesetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableRulesetBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableRuleset(ctx context.Context, r soap.RoundTripper, req *types.EnableRuleset) (*types.EnableRulesetResponse, error) { + var reqBody, resBody EnableRulesetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableSecondaryVM_TaskBody struct { + Req *types.EnableSecondaryVM_Task `xml:"urn:vim25 EnableSecondaryVM_Task,omitempty"` + Res *types.EnableSecondaryVM_TaskResponse `xml:"urn:vim25 EnableSecondaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.EnableSecondaryVM_Task) (*types.EnableSecondaryVM_TaskResponse, error) { + var reqBody, resBody EnableSecondaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableSmartCardAuthenticationBody struct { + Req *types.EnableSmartCardAuthentication `xml:"urn:vim25 EnableSmartCardAuthentication,omitempty"` + Res *types.EnableSmartCardAuthenticationResponse `xml:"urn:vim25 EnableSmartCardAuthenticationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableSmartCardAuthenticationBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req *types.EnableSmartCardAuthentication) (*types.EnableSmartCardAuthenticationResponse, error) { + var reqBody, resBody EnableSmartCardAuthenticationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnterLockdownModeBody struct { + Req *types.EnterLockdownMode `xml:"urn:vim25 EnterLockdownMode,omitempty"` + Res *types.EnterLockdownModeResponse `xml:"urn:vim25 EnterLockdownModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnterLockdownModeBody) Fault() *soap.Fault { return b.Fault_ } + +func EnterLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.EnterLockdownMode) (*types.EnterLockdownModeResponse, error) { + var reqBody, resBody EnterLockdownModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnterMaintenanceMode_TaskBody struct { + Req *types.EnterMaintenanceMode_Task `xml:"urn:vim25 EnterMaintenanceMode_Task,omitempty"` + Res *types.EnterMaintenanceMode_TaskResponse `xml:"urn:vim25 EnterMaintenanceMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnterMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EnterMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.EnterMaintenanceMode_Task) (*types.EnterMaintenanceMode_TaskResponse, error) { + var reqBody, resBody EnterMaintenanceMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EstimateDatabaseSizeBody struct { + Req *types.EstimateDatabaseSize `xml:"urn:vim25 EstimateDatabaseSize,omitempty"` + Res *types.EstimateDatabaseSizeResponse `xml:"urn:vim25 EstimateDatabaseSizeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EstimateDatabaseSizeBody) Fault() *soap.Fault { return b.Fault_ } + +func EstimateDatabaseSize(ctx context.Context, r soap.RoundTripper, req *types.EstimateDatabaseSize) (*types.EstimateDatabaseSizeResponse, error) { + var reqBody, resBody EstimateDatabaseSizeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EstimateStorageForConsolidateSnapshots_TaskBody struct { + Req *types.EstimateStorageForConsolidateSnapshots_Task `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_Task,omitempty"` + Res *types.EstimateStorageForConsolidateSnapshots_TaskResponse `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EstimateStorageForConsolidateSnapshots_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EstimateStorageForConsolidateSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *types.EstimateStorageForConsolidateSnapshots_Task) (*types.EstimateStorageForConsolidateSnapshots_TaskResponse, error) { + var reqBody, resBody EstimateStorageForConsolidateSnapshots_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EsxAgentHostManagerUpdateConfigBody struct { + Req *types.EsxAgentHostManagerUpdateConfig `xml:"urn:vim25 EsxAgentHostManagerUpdateConfig,omitempty"` + Res *types.EsxAgentHostManagerUpdateConfigResponse `xml:"urn:vim25 EsxAgentHostManagerUpdateConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EsxAgentHostManagerUpdateConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func EsxAgentHostManagerUpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.EsxAgentHostManagerUpdateConfig) (*types.EsxAgentHostManagerUpdateConfigResponse, error) { + var reqBody, resBody EsxAgentHostManagerUpdateConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EvacuateVsanNode_TaskBody struct { + Req *types.EvacuateVsanNode_Task `xml:"urn:vim25 EvacuateVsanNode_Task,omitempty"` + Res *types.EvacuateVsanNode_TaskResponse `xml:"urn:vim25 EvacuateVsanNode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EvacuateVsanNode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EvacuateVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types.EvacuateVsanNode_Task) (*types.EvacuateVsanNode_TaskResponse, error) { + var reqBody, resBody EvacuateVsanNode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EvcManagerBody struct { + Req *types.EvcManager `xml:"urn:vim25 EvcManager,omitempty"` + Res *types.EvcManagerResponse `xml:"urn:vim25 EvcManagerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EvcManagerBody) Fault() *soap.Fault { return b.Fault_ } + +func EvcManager(ctx context.Context, r soap.RoundTripper, req *types.EvcManager) (*types.EvcManagerResponse, error) { + var reqBody, resBody EvcManagerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExecuteHostProfileBody struct { + Req *types.ExecuteHostProfile `xml:"urn:vim25 ExecuteHostProfile,omitempty"` + Res *types.ExecuteHostProfileResponse `xml:"urn:vim25 ExecuteHostProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExecuteHostProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func ExecuteHostProfile(ctx context.Context, r soap.RoundTripper, req *types.ExecuteHostProfile) (*types.ExecuteHostProfileResponse, error) { + var reqBody, resBody ExecuteHostProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExecuteSimpleCommandBody struct { + Req *types.ExecuteSimpleCommand `xml:"urn:vim25 ExecuteSimpleCommand,omitempty"` + Res *types.ExecuteSimpleCommandResponse `xml:"urn:vim25 ExecuteSimpleCommandResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExecuteSimpleCommandBody) Fault() *soap.Fault { return b.Fault_ } + +func ExecuteSimpleCommand(ctx context.Context, r soap.RoundTripper, req *types.ExecuteSimpleCommand) (*types.ExecuteSimpleCommandResponse, error) { + var reqBody, resBody ExecuteSimpleCommandBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExitLockdownModeBody struct { + Req *types.ExitLockdownMode `xml:"urn:vim25 ExitLockdownMode,omitempty"` + Res *types.ExitLockdownModeResponse `xml:"urn:vim25 ExitLockdownModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExitLockdownModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ExitLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ExitLockdownMode) (*types.ExitLockdownModeResponse, error) { + var reqBody, resBody ExitLockdownModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExitMaintenanceMode_TaskBody struct { + Req *types.ExitMaintenanceMode_Task `xml:"urn:vim25 ExitMaintenanceMode_Task,omitempty"` + Res *types.ExitMaintenanceMode_TaskResponse `xml:"urn:vim25 ExitMaintenanceMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExitMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.ExitMaintenanceMode_Task) (*types.ExitMaintenanceMode_TaskResponse, error) { + var reqBody, resBody ExitMaintenanceMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExpandVmfsDatastoreBody struct { + Req *types.ExpandVmfsDatastore `xml:"urn:vim25 ExpandVmfsDatastore,omitempty"` + Res *types.ExpandVmfsDatastoreResponse `xml:"urn:vim25 ExpandVmfsDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExpandVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func ExpandVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.ExpandVmfsDatastore) (*types.ExpandVmfsDatastoreResponse, error) { + var reqBody, resBody ExpandVmfsDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExpandVmfsExtentBody struct { + Req *types.ExpandVmfsExtent `xml:"urn:vim25 ExpandVmfsExtent,omitempty"` + Res *types.ExpandVmfsExtentResponse `xml:"urn:vim25 ExpandVmfsExtentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExpandVmfsExtentBody) Fault() *soap.Fault { return b.Fault_ } + +func ExpandVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.ExpandVmfsExtent) (*types.ExpandVmfsExtentResponse, error) { + var reqBody, resBody ExpandVmfsExtentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportAnswerFile_TaskBody struct { + Req *types.ExportAnswerFile_Task `xml:"urn:vim25 ExportAnswerFile_Task,omitempty"` + Res *types.ExportAnswerFile_TaskResponse `xml:"urn:vim25 ExportAnswerFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportAnswerFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types.ExportAnswerFile_Task) (*types.ExportAnswerFile_TaskResponse, error) { + var reqBody, resBody ExportAnswerFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportProfileBody struct { + Req *types.ExportProfile `xml:"urn:vim25 ExportProfile,omitempty"` + Res *types.ExportProfileResponse `xml:"urn:vim25 ExportProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportProfile(ctx context.Context, r soap.RoundTripper, req *types.ExportProfile) (*types.ExportProfileResponse, error) { + var reqBody, resBody ExportProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportSnapshotBody struct { + Req *types.ExportSnapshot `xml:"urn:vim25 ExportSnapshot,omitempty"` + Res *types.ExportSnapshotResponse `xml:"urn:vim25 ExportSnapshotResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportSnapshotBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ExportSnapshot) (*types.ExportSnapshotResponse, error) { + var reqBody, resBody ExportSnapshotBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportVAppBody struct { + Req *types.ExportVApp `xml:"urn:vim25 ExportVApp,omitempty"` + Res *types.ExportVAppResponse `xml:"urn:vim25 ExportVAppResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportVAppBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportVApp(ctx context.Context, r soap.RoundTripper, req *types.ExportVApp) (*types.ExportVAppResponse, error) { + var reqBody, resBody ExportVAppBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportVmBody struct { + Req *types.ExportVm `xml:"urn:vim25 ExportVm,omitempty"` + Res *types.ExportVmResponse `xml:"urn:vim25 ExportVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportVmBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportVm(ctx context.Context, r soap.RoundTripper, req *types.ExportVm) (*types.ExportVmResponse, error) { + var reqBody, resBody ExportVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtendVffsBody struct { + Req *types.ExtendVffs `xml:"urn:vim25 ExtendVffs,omitempty"` + Res *types.ExtendVffsResponse `xml:"urn:vim25 ExtendVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendVffs(ctx context.Context, r soap.RoundTripper, req *types.ExtendVffs) (*types.ExtendVffsResponse, error) { + var reqBody, resBody ExtendVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtendVirtualDisk_TaskBody struct { + Req *types.ExtendVirtualDisk_Task `xml:"urn:vim25 ExtendVirtualDisk_Task,omitempty"` + Res *types.ExtendVirtualDisk_TaskResponse `xml:"urn:vim25 ExtendVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ExtendVirtualDisk_Task) (*types.ExtendVirtualDisk_TaskResponse, error) { + var reqBody, resBody ExtendVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtendVmfsDatastoreBody struct { + Req *types.ExtendVmfsDatastore `xml:"urn:vim25 ExtendVmfsDatastore,omitempty"` + Res *types.ExtendVmfsDatastoreResponse `xml:"urn:vim25 ExtendVmfsDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.ExtendVmfsDatastore) (*types.ExtendVmfsDatastoreResponse, error) { + var reqBody, resBody ExtendVmfsDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtractOvfEnvironmentBody struct { + Req *types.ExtractOvfEnvironment `xml:"urn:vim25 ExtractOvfEnvironment,omitempty"` + Res *types.ExtractOvfEnvironmentResponse `xml:"urn:vim25 ExtractOvfEnvironmentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtractOvfEnvironmentBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtractOvfEnvironment(ctx context.Context, r soap.RoundTripper, req *types.ExtractOvfEnvironment) (*types.ExtractOvfEnvironmentResponse, error) { + var reqBody, resBody ExtractOvfEnvironmentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FetchDVPortKeysBody struct { + Req *types.FetchDVPortKeys `xml:"urn:vim25 FetchDVPortKeys,omitempty"` + Res *types.FetchDVPortKeysResponse `xml:"urn:vim25 FetchDVPortKeysResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchDVPortKeysBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchDVPortKeys(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPortKeys) (*types.FetchDVPortKeysResponse, error) { + var reqBody, resBody FetchDVPortKeysBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FetchDVPortsBody struct { + Req *types.FetchDVPorts `xml:"urn:vim25 FetchDVPorts,omitempty"` + Res *types.FetchDVPortsResponse `xml:"urn:vim25 FetchDVPortsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchDVPortsBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchDVPorts(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPorts) (*types.FetchDVPortsResponse, error) { + var reqBody, resBody FetchDVPortsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAllByDnsNameBody struct { + Req *types.FindAllByDnsName `xml:"urn:vim25 FindAllByDnsName,omitempty"` + Res *types.FindAllByDnsNameResponse `xml:"urn:vim25 FindAllByDnsNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAllByDnsNameBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAllByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindAllByDnsName) (*types.FindAllByDnsNameResponse, error) { + var reqBody, resBody FindAllByDnsNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAllByIpBody struct { + Req *types.FindAllByIp `xml:"urn:vim25 FindAllByIp,omitempty"` + Res *types.FindAllByIpResponse `xml:"urn:vim25 FindAllByIpResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAllByIpBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAllByIp(ctx context.Context, r soap.RoundTripper, req *types.FindAllByIp) (*types.FindAllByIpResponse, error) { + var reqBody, resBody FindAllByIpBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAllByUuidBody struct { + Req *types.FindAllByUuid `xml:"urn:vim25 FindAllByUuid,omitempty"` + Res *types.FindAllByUuidResponse `xml:"urn:vim25 FindAllByUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAllByUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAllByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindAllByUuid) (*types.FindAllByUuidResponse, error) { + var reqBody, resBody FindAllByUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAssociatedProfileBody struct { + Req *types.FindAssociatedProfile `xml:"urn:vim25 FindAssociatedProfile,omitempty"` + Res *types.FindAssociatedProfileResponse `xml:"urn:vim25 FindAssociatedProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAssociatedProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAssociatedProfile(ctx context.Context, r soap.RoundTripper, req *types.FindAssociatedProfile) (*types.FindAssociatedProfileResponse, error) { + var reqBody, resBody FindAssociatedProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByDatastorePathBody struct { + Req *types.FindByDatastorePath `xml:"urn:vim25 FindByDatastorePath,omitempty"` + Res *types.FindByDatastorePathResponse `xml:"urn:vim25 FindByDatastorePathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByDatastorePathBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByDatastorePath(ctx context.Context, r soap.RoundTripper, req *types.FindByDatastorePath) (*types.FindByDatastorePathResponse, error) { + var reqBody, resBody FindByDatastorePathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByDnsNameBody struct { + Req *types.FindByDnsName `xml:"urn:vim25 FindByDnsName,omitempty"` + Res *types.FindByDnsNameResponse `xml:"urn:vim25 FindByDnsNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByDnsNameBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindByDnsName) (*types.FindByDnsNameResponse, error) { + var reqBody, resBody FindByDnsNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByInventoryPathBody struct { + Req *types.FindByInventoryPath `xml:"urn:vim25 FindByInventoryPath,omitempty"` + Res *types.FindByInventoryPathResponse `xml:"urn:vim25 FindByInventoryPathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByInventoryPathBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByInventoryPath(ctx context.Context, r soap.RoundTripper, req *types.FindByInventoryPath) (*types.FindByInventoryPathResponse, error) { + var reqBody, resBody FindByInventoryPathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByIpBody struct { + Req *types.FindByIp `xml:"urn:vim25 FindByIp,omitempty"` + Res *types.FindByIpResponse `xml:"urn:vim25 FindByIpResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByIpBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByIp(ctx context.Context, r soap.RoundTripper, req *types.FindByIp) (*types.FindByIpResponse, error) { + var reqBody, resBody FindByIpBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByUuidBody struct { + Req *types.FindByUuid `xml:"urn:vim25 FindByUuid,omitempty"` + Res *types.FindByUuidResponse `xml:"urn:vim25 FindByUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindByUuid) (*types.FindByUuidResponse, error) { + var reqBody, resBody FindByUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindChildBody struct { + Req *types.FindChild `xml:"urn:vim25 FindChild,omitempty"` + Res *types.FindChildResponse `xml:"urn:vim25 FindChildResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindChildBody) Fault() *soap.Fault { return b.Fault_ } + +func FindChild(ctx context.Context, r soap.RoundTripper, req *types.FindChild) (*types.FindChildResponse, error) { + var reqBody, resBody FindChildBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindExtensionBody struct { + Req *types.FindExtension `xml:"urn:vim25 FindExtension,omitempty"` + Res *types.FindExtensionResponse `xml:"urn:vim25 FindExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func FindExtension(ctx context.Context, r soap.RoundTripper, req *types.FindExtension) (*types.FindExtensionResponse, error) { + var reqBody, resBody FindExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindRulesForVmBody struct { + Req *types.FindRulesForVm `xml:"urn:vim25 FindRulesForVm,omitempty"` + Res *types.FindRulesForVmResponse `xml:"urn:vim25 FindRulesForVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindRulesForVmBody) Fault() *soap.Fault { return b.Fault_ } + +func FindRulesForVm(ctx context.Context, r soap.RoundTripper, req *types.FindRulesForVm) (*types.FindRulesForVmResponse, error) { + var reqBody, resBody FindRulesForVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FormatVffsBody struct { + Req *types.FormatVffs `xml:"urn:vim25 FormatVffs,omitempty"` + Res *types.FormatVffsResponse `xml:"urn:vim25 FormatVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FormatVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func FormatVffs(ctx context.Context, r soap.RoundTripper, req *types.FormatVffs) (*types.FormatVffsResponse, error) { + var reqBody, resBody FormatVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FormatVmfsBody struct { + Req *types.FormatVmfs `xml:"urn:vim25 FormatVmfs,omitempty"` + Res *types.FormatVmfsResponse `xml:"urn:vim25 FormatVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FormatVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func FormatVmfs(ctx context.Context, r soap.RoundTripper, req *types.FormatVmfs) (*types.FormatVmfsResponse, error) { + var reqBody, resBody FormatVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateCertificateSigningRequestBody struct { + Req *types.GenerateCertificateSigningRequest `xml:"urn:vim25 GenerateCertificateSigningRequest,omitempty"` + Res *types.GenerateCertificateSigningRequestResponse `xml:"urn:vim25 GenerateCertificateSigningRequestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateCertificateSigningRequestBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateCertificateSigningRequest(ctx context.Context, r soap.RoundTripper, req *types.GenerateCertificateSigningRequest) (*types.GenerateCertificateSigningRequestResponse, error) { + var reqBody, resBody GenerateCertificateSigningRequestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateCertificateSigningRequestByDnBody struct { + Req *types.GenerateCertificateSigningRequestByDn `xml:"urn:vim25 GenerateCertificateSigningRequestByDn,omitempty"` + Res *types.GenerateCertificateSigningRequestByDnResponse `xml:"urn:vim25 GenerateCertificateSigningRequestByDnResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateCertificateSigningRequestByDnBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateCertificateSigningRequestByDn(ctx context.Context, r soap.RoundTripper, req *types.GenerateCertificateSigningRequestByDn) (*types.GenerateCertificateSigningRequestByDnResponse, error) { + var reqBody, resBody GenerateCertificateSigningRequestByDnBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateConfigTaskListBody struct { + Req *types.GenerateConfigTaskList `xml:"urn:vim25 GenerateConfigTaskList,omitempty"` + Res *types.GenerateConfigTaskListResponse `xml:"urn:vim25 GenerateConfigTaskListResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateConfigTaskListBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateConfigTaskList(ctx context.Context, r soap.RoundTripper, req *types.GenerateConfigTaskList) (*types.GenerateConfigTaskListResponse, error) { + var reqBody, resBody GenerateConfigTaskListBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateHostProfileTaskList_TaskBody struct { + Req *types.GenerateHostProfileTaskList_Task `xml:"urn:vim25 GenerateHostProfileTaskList_Task,omitempty"` + Res *types.GenerateHostProfileTaskList_TaskResponse `xml:"urn:vim25 GenerateHostProfileTaskList_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateHostProfileTaskList_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateHostProfileTaskList_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateHostProfileTaskList_Task) (*types.GenerateHostProfileTaskList_TaskResponse, error) { + var reqBody, resBody GenerateHostProfileTaskList_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateLogBundles_TaskBody struct { + Req *types.GenerateLogBundles_Task `xml:"urn:vim25 GenerateLogBundles_Task,omitempty"` + Res *types.GenerateLogBundles_TaskResponse `xml:"urn:vim25 GenerateLogBundles_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateLogBundles_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateLogBundles_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateLogBundles_Task) (*types.GenerateLogBundles_TaskResponse, error) { + var reqBody, resBody GenerateLogBundles_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetAlarmBody struct { + Req *types.GetAlarm `xml:"urn:vim25 GetAlarm,omitempty"` + Res *types.GetAlarmResponse `xml:"urn:vim25 GetAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func GetAlarm(ctx context.Context, r soap.RoundTripper, req *types.GetAlarm) (*types.GetAlarmResponse, error) { + var reqBody, resBody GetAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetAlarmStateBody struct { + Req *types.GetAlarmState `xml:"urn:vim25 GetAlarmState,omitempty"` + Res *types.GetAlarmStateResponse `xml:"urn:vim25 GetAlarmStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetAlarmStateBody) Fault() *soap.Fault { return b.Fault_ } + +func GetAlarmState(ctx context.Context, r soap.RoundTripper, req *types.GetAlarmState) (*types.GetAlarmStateResponse, error) { + var reqBody, resBody GetAlarmStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetAllVRPIdsBody struct { + Req *types.GetAllVRPIds `xml:"urn:vim25 GetAllVRPIds,omitempty"` + Res *types.GetAllVRPIdsResponse `xml:"urn:vim25 GetAllVRPIdsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetAllVRPIdsBody) Fault() *soap.Fault { return b.Fault_ } + +func GetAllVRPIds(ctx context.Context, r soap.RoundTripper, req *types.GetAllVRPIds) (*types.GetAllVRPIdsResponse, error) { + var reqBody, resBody GetAllVRPIdsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetChildRPforHubBody struct { + Req *types.GetChildRPforHub `xml:"urn:vim25 GetChildRPforHub,omitempty"` + Res *types.GetChildRPforHubResponse `xml:"urn:vim25 GetChildRPforHubResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetChildRPforHubBody) Fault() *soap.Fault { return b.Fault_ } + +func GetChildRPforHub(ctx context.Context, r soap.RoundTripper, req *types.GetChildRPforHub) (*types.GetChildRPforHubResponse, error) { + var reqBody, resBody GetChildRPforHubBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetCustomizationSpecBody struct { + Req *types.GetCustomizationSpec `xml:"urn:vim25 GetCustomizationSpec,omitempty"` + Res *types.GetCustomizationSpecResponse `xml:"urn:vim25 GetCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func GetCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.GetCustomizationSpec) (*types.GetCustomizationSpecResponse, error) { + var reqBody, resBody GetCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetPublicKeyBody struct { + Req *types.GetPublicKey `xml:"urn:vim25 GetPublicKey,omitempty"` + Res *types.GetPublicKeyResponse `xml:"urn:vim25 GetPublicKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetPublicKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func GetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.GetPublicKey) (*types.GetPublicKeyResponse, error) { + var reqBody, resBody GetPublicKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetRPSettingsBody struct { + Req *types.GetRPSettings `xml:"urn:vim25 GetRPSettings,omitempty"` + Res *types.GetRPSettingsResponse `xml:"urn:vim25 GetRPSettingsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetRPSettingsBody) Fault() *soap.Fault { return b.Fault_ } + +func GetRPSettings(ctx context.Context, r soap.RoundTripper, req *types.GetRPSettings) (*types.GetRPSettingsResponse, error) { + var reqBody, resBody GetRPSettingsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetResourceUsageBody struct { + Req *types.GetResourceUsage `xml:"urn:vim25 GetResourceUsage,omitempty"` + Res *types.GetResourceUsageResponse `xml:"urn:vim25 GetResourceUsageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetResourceUsageBody) Fault() *soap.Fault { return b.Fault_ } + +func GetResourceUsage(ctx context.Context, r soap.RoundTripper, req *types.GetResourceUsage) (*types.GetResourceUsageResponse, error) { + var reqBody, resBody GetResourceUsageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetVRPSettingsBody struct { + Req *types.GetVRPSettings `xml:"urn:vim25 GetVRPSettings,omitempty"` + Res *types.GetVRPSettingsResponse `xml:"urn:vim25 GetVRPSettingsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetVRPSettingsBody) Fault() *soap.Fault { return b.Fault_ } + +func GetVRPSettings(ctx context.Context, r soap.RoundTripper, req *types.GetVRPSettings) (*types.GetVRPSettingsResponse, error) { + var reqBody, resBody GetVRPSettingsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetVRPUsageBody struct { + Req *types.GetVRPUsage `xml:"urn:vim25 GetVRPUsage,omitempty"` + Res *types.GetVRPUsageResponse `xml:"urn:vim25 GetVRPUsageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetVRPUsageBody) Fault() *soap.Fault { return b.Fault_ } + +func GetVRPUsage(ctx context.Context, r soap.RoundTripper, req *types.GetVRPUsage) (*types.GetVRPUsageResponse, error) { + var reqBody, resBody GetVRPUsageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetVRPofVMBody struct { + Req *types.GetVRPofVM `xml:"urn:vim25 GetVRPofVM,omitempty"` + Res *types.GetVRPofVMResponse `xml:"urn:vim25 GetVRPofVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetVRPofVMBody) Fault() *soap.Fault { return b.Fault_ } + +func GetVRPofVM(ctx context.Context, r soap.RoundTripper, req *types.GetVRPofVM) (*types.GetVRPofVMResponse, error) { + var reqBody, resBody GetVRPofVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetVsanObjExtAttrsBody struct { + Req *types.GetVsanObjExtAttrs `xml:"urn:vim25 GetVsanObjExtAttrs,omitempty"` + Res *types.GetVsanObjExtAttrsResponse `xml:"urn:vim25 GetVsanObjExtAttrsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetVsanObjExtAttrsBody) Fault() *soap.Fault { return b.Fault_ } + +func GetVsanObjExtAttrs(ctx context.Context, r soap.RoundTripper, req *types.GetVsanObjExtAttrs) (*types.GetVsanObjExtAttrsResponse, error) { + var reqBody, resBody GetVsanObjExtAttrsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HasPrivilegeOnEntitiesBody struct { + Req *types.HasPrivilegeOnEntities `xml:"urn:vim25 HasPrivilegeOnEntities,omitempty"` + Res *types.HasPrivilegeOnEntitiesResponse `xml:"urn:vim25 HasPrivilegeOnEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HasPrivilegeOnEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func HasPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types.HasPrivilegeOnEntities) (*types.HasPrivilegeOnEntitiesResponse, error) { + var reqBody, resBody HasPrivilegeOnEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HasPrivilegeOnEntityBody struct { + Req *types.HasPrivilegeOnEntity `xml:"urn:vim25 HasPrivilegeOnEntity,omitempty"` + Res *types.HasPrivilegeOnEntityResponse `xml:"urn:vim25 HasPrivilegeOnEntityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HasPrivilegeOnEntityBody) Fault() *soap.Fault { return b.Fault_ } + +func HasPrivilegeOnEntity(ctx context.Context, r soap.RoundTripper, req *types.HasPrivilegeOnEntity) (*types.HasPrivilegeOnEntityResponse, error) { + var reqBody, resBody HasPrivilegeOnEntityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostConfigVFlashCacheBody struct { + Req *types.HostConfigVFlashCache `xml:"urn:vim25 HostConfigVFlashCache,omitempty"` + Res *types.HostConfigVFlashCacheResponse `xml:"urn:vim25 HostConfigVFlashCacheResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostConfigVFlashCacheBody) Fault() *soap.Fault { return b.Fault_ } + +func HostConfigVFlashCache(ctx context.Context, r soap.RoundTripper, req *types.HostConfigVFlashCache) (*types.HostConfigVFlashCacheResponse, error) { + var reqBody, resBody HostConfigVFlashCacheBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostConfigureVFlashResourceBody struct { + Req *types.HostConfigureVFlashResource `xml:"urn:vim25 HostConfigureVFlashResource,omitempty"` + Res *types.HostConfigureVFlashResourceResponse `xml:"urn:vim25 HostConfigureVFlashResourceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostConfigureVFlashResourceBody) Fault() *soap.Fault { return b.Fault_ } + +func HostConfigureVFlashResource(ctx context.Context, r soap.RoundTripper, req *types.HostConfigureVFlashResource) (*types.HostConfigureVFlashResourceResponse, error) { + var reqBody, resBody HostConfigureVFlashResourceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostGetVFlashModuleDefaultConfigBody struct { + Req *types.HostGetVFlashModuleDefaultConfig `xml:"urn:vim25 HostGetVFlashModuleDefaultConfig,omitempty"` + Res *types.HostGetVFlashModuleDefaultConfigResponse `xml:"urn:vim25 HostGetVFlashModuleDefaultConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostGetVFlashModuleDefaultConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func HostGetVFlashModuleDefaultConfig(ctx context.Context, r soap.RoundTripper, req *types.HostGetVFlashModuleDefaultConfig) (*types.HostGetVFlashModuleDefaultConfigResponse, error) { + var reqBody, resBody HostGetVFlashModuleDefaultConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostImageConfigGetAcceptanceBody struct { + Req *types.HostImageConfigGetAcceptance `xml:"urn:vim25 HostImageConfigGetAcceptance,omitempty"` + Res *types.HostImageConfigGetAcceptanceResponse `xml:"urn:vim25 HostImageConfigGetAcceptanceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostImageConfigGetAcceptanceBody) Fault() *soap.Fault { return b.Fault_ } + +func HostImageConfigGetAcceptance(ctx context.Context, r soap.RoundTripper, req *types.HostImageConfigGetAcceptance) (*types.HostImageConfigGetAcceptanceResponse, error) { + var reqBody, resBody HostImageConfigGetAcceptanceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostImageConfigGetProfileBody struct { + Req *types.HostImageConfigGetProfile `xml:"urn:vim25 HostImageConfigGetProfile,omitempty"` + Res *types.HostImageConfigGetProfileResponse `xml:"urn:vim25 HostImageConfigGetProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostImageConfigGetProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func HostImageConfigGetProfile(ctx context.Context, r soap.RoundTripper, req *types.HostImageConfigGetProfile) (*types.HostImageConfigGetProfileResponse, error) { + var reqBody, resBody HostImageConfigGetProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRemoveVFlashResourceBody struct { + Req *types.HostRemoveVFlashResource `xml:"urn:vim25 HostRemoveVFlashResource,omitempty"` + Res *types.HostRemoveVFlashResourceResponse `xml:"urn:vim25 HostRemoveVFlashResourceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRemoveVFlashResourceBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRemoveVFlashResource(ctx context.Context, r soap.RoundTripper, req *types.HostRemoveVFlashResource) (*types.HostRemoveVFlashResourceResponse, error) { + var reqBody, resBody HostRemoveVFlashResourceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseAbortBody struct { + Req *types.HttpNfcLeaseAbort `xml:"urn:vim25 HttpNfcLeaseAbort,omitempty"` + Res *types.HttpNfcLeaseAbortResponse `xml:"urn:vim25 HttpNfcLeaseAbortResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseAbortBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseAbort(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseAbort) (*types.HttpNfcLeaseAbortResponse, error) { + var reqBody, resBody HttpNfcLeaseAbortBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseCompleteBody struct { + Req *types.HttpNfcLeaseComplete `xml:"urn:vim25 HttpNfcLeaseComplete,omitempty"` + Res *types.HttpNfcLeaseCompleteResponse `xml:"urn:vim25 HttpNfcLeaseCompleteResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseCompleteBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseComplete(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseComplete) (*types.HttpNfcLeaseCompleteResponse, error) { + var reqBody, resBody HttpNfcLeaseCompleteBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseGetManifestBody struct { + Req *types.HttpNfcLeaseGetManifest `xml:"urn:vim25 HttpNfcLeaseGetManifest,omitempty"` + Res *types.HttpNfcLeaseGetManifestResponse `xml:"urn:vim25 HttpNfcLeaseGetManifestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseGetManifestBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseGetManifest(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseGetManifest) (*types.HttpNfcLeaseGetManifestResponse, error) { + var reqBody, resBody HttpNfcLeaseGetManifestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseProgressBody struct { + Req *types.HttpNfcLeaseProgress `xml:"urn:vim25 HttpNfcLeaseProgress,omitempty"` + Res *types.HttpNfcLeaseProgressResponse `xml:"urn:vim25 HttpNfcLeaseProgressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseProgressBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseProgress(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseProgress) (*types.HttpNfcLeaseProgressResponse, error) { + var reqBody, resBody HttpNfcLeaseProgressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImpersonateUserBody struct { + Req *types.ImpersonateUser `xml:"urn:vim25 ImpersonateUser,omitempty"` + Res *types.ImpersonateUserResponse `xml:"urn:vim25 ImpersonateUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImpersonateUserBody) Fault() *soap.Fault { return b.Fault_ } + +func ImpersonateUser(ctx context.Context, r soap.RoundTripper, req *types.ImpersonateUser) (*types.ImpersonateUserResponse, error) { + var reqBody, resBody ImpersonateUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImportCertificateForCAM_TaskBody struct { + Req *types.ImportCertificateForCAM_Task `xml:"urn:vim25 ImportCertificateForCAM_Task,omitempty"` + Res *types.ImportCertificateForCAM_TaskResponse `xml:"urn:vim25 ImportCertificateForCAM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImportCertificateForCAM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ImportCertificateForCAM_Task(ctx context.Context, r soap.RoundTripper, req *types.ImportCertificateForCAM_Task) (*types.ImportCertificateForCAM_TaskResponse, error) { + var reqBody, resBody ImportCertificateForCAM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImportUnmanagedSnapshotBody struct { + Req *types.ImportUnmanagedSnapshot `xml:"urn:vim25 ImportUnmanagedSnapshot,omitempty"` + Res *types.ImportUnmanagedSnapshotResponse `xml:"urn:vim25 ImportUnmanagedSnapshotResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImportUnmanagedSnapshotBody) Fault() *soap.Fault { return b.Fault_ } + +func ImportUnmanagedSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ImportUnmanagedSnapshot) (*types.ImportUnmanagedSnapshotResponse, error) { + var reqBody, resBody ImportUnmanagedSnapshotBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImportVAppBody struct { + Req *types.ImportVApp `xml:"urn:vim25 ImportVApp,omitempty"` + Res *types.ImportVAppResponse `xml:"urn:vim25 ImportVAppResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImportVAppBody) Fault() *soap.Fault { return b.Fault_ } + +func ImportVApp(ctx context.Context, r soap.RoundTripper, req *types.ImportVApp) (*types.ImportVAppResponse, error) { + var reqBody, resBody ImportVAppBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InflateVirtualDisk_TaskBody struct { + Req *types.InflateVirtualDisk_Task `xml:"urn:vim25 InflateVirtualDisk_Task,omitempty"` + Res *types.InflateVirtualDisk_TaskResponse `xml:"urn:vim25 InflateVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InflateVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InflateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.InflateVirtualDisk_Task) (*types.InflateVirtualDisk_TaskResponse, error) { + var reqBody, resBody InflateVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InitializeDisks_TaskBody struct { + Req *types.InitializeDisks_Task `xml:"urn:vim25 InitializeDisks_Task,omitempty"` + Res *types.InitializeDisks_TaskResponse `xml:"urn:vim25 InitializeDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InitializeDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InitializeDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.InitializeDisks_Task) (*types.InitializeDisks_TaskResponse, error) { + var reqBody, resBody InitializeDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InitiateFileTransferFromGuestBody struct { + Req *types.InitiateFileTransferFromGuest `xml:"urn:vim25 InitiateFileTransferFromGuest,omitempty"` + Res *types.InitiateFileTransferFromGuestResponse `xml:"urn:vim25 InitiateFileTransferFromGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InitiateFileTransferFromGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func InitiateFileTransferFromGuest(ctx context.Context, r soap.RoundTripper, req *types.InitiateFileTransferFromGuest) (*types.InitiateFileTransferFromGuestResponse, error) { + var reqBody, resBody InitiateFileTransferFromGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InitiateFileTransferToGuestBody struct { + Req *types.InitiateFileTransferToGuest `xml:"urn:vim25 InitiateFileTransferToGuest,omitempty"` + Res *types.InitiateFileTransferToGuestResponse `xml:"urn:vim25 InitiateFileTransferToGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InitiateFileTransferToGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func InitiateFileTransferToGuest(ctx context.Context, r soap.RoundTripper, req *types.InitiateFileTransferToGuest) (*types.InitiateFileTransferToGuestResponse, error) { + var reqBody, resBody InitiateFileTransferToGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallHostPatchV2_TaskBody struct { + Req *types.InstallHostPatchV2_Task `xml:"urn:vim25 InstallHostPatchV2_Task,omitempty"` + Res *types.InstallHostPatchV2_TaskResponse `xml:"urn:vim25 InstallHostPatchV2_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallHostPatchV2_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallHostPatchV2_Task) (*types.InstallHostPatchV2_TaskResponse, error) { + var reqBody, resBody InstallHostPatchV2_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallHostPatch_TaskBody struct { + Req *types.InstallHostPatch_Task `xml:"urn:vim25 InstallHostPatch_Task,omitempty"` + Res *types.InstallHostPatch_TaskResponse `xml:"urn:vim25 InstallHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallHostPatch_Task) (*types.InstallHostPatch_TaskResponse, error) { + var reqBody, resBody InstallHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallIoFilter_TaskBody struct { + Req *types.InstallIoFilter_Task `xml:"urn:vim25 InstallIoFilter_Task,omitempty"` + Res *types.InstallIoFilter_TaskResponse `xml:"urn:vim25 InstallIoFilter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallIoFilter_Task) (*types.InstallIoFilter_TaskResponse, error) { + var reqBody, resBody InstallIoFilter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallServerCertificateBody struct { + Req *types.InstallServerCertificate `xml:"urn:vim25 InstallServerCertificate,omitempty"` + Res *types.InstallServerCertificateResponse `xml:"urn:vim25 InstallServerCertificateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallServerCertificateBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallServerCertificate(ctx context.Context, r soap.RoundTripper, req *types.InstallServerCertificate) (*types.InstallServerCertificateResponse, error) { + var reqBody, resBody InstallServerCertificateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallSmartCardTrustAnchorBody struct { + Req *types.InstallSmartCardTrustAnchor `xml:"urn:vim25 InstallSmartCardTrustAnchor,omitempty"` + Res *types.InstallSmartCardTrustAnchorResponse `xml:"urn:vim25 InstallSmartCardTrustAnchorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallSmartCardTrustAnchorBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *types.InstallSmartCardTrustAnchor) (*types.InstallSmartCardTrustAnchorResponse, error) { + var reqBody, resBody InstallSmartCardTrustAnchorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type IsSharedGraphicsActiveBody struct { + Req *types.IsSharedGraphicsActive `xml:"urn:vim25 IsSharedGraphicsActive,omitempty"` + Res *types.IsSharedGraphicsActiveResponse `xml:"urn:vim25 IsSharedGraphicsActiveResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *IsSharedGraphicsActiveBody) Fault() *soap.Fault { return b.Fault_ } + +func IsSharedGraphicsActive(ctx context.Context, r soap.RoundTripper, req *types.IsSharedGraphicsActive) (*types.IsSharedGraphicsActiveResponse, error) { + var reqBody, resBody IsSharedGraphicsActiveBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type JoinDomainWithCAM_TaskBody struct { + Req *types.JoinDomainWithCAM_Task `xml:"urn:vim25 JoinDomainWithCAM_Task,omitempty"` + Res *types.JoinDomainWithCAM_TaskResponse `xml:"urn:vim25 JoinDomainWithCAM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *JoinDomainWithCAM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func JoinDomainWithCAM_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDomainWithCAM_Task) (*types.JoinDomainWithCAM_TaskResponse, error) { + var reqBody, resBody JoinDomainWithCAM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type JoinDomain_TaskBody struct { + Req *types.JoinDomain_Task `xml:"urn:vim25 JoinDomain_Task,omitempty"` + Res *types.JoinDomain_TaskResponse `xml:"urn:vim25 JoinDomain_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *JoinDomain_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func JoinDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDomain_Task) (*types.JoinDomain_TaskResponse, error) { + var reqBody, resBody JoinDomain_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LeaveCurrentDomain_TaskBody struct { + Req *types.LeaveCurrentDomain_Task `xml:"urn:vim25 LeaveCurrentDomain_Task,omitempty"` + Res *types.LeaveCurrentDomain_TaskResponse `xml:"urn:vim25 LeaveCurrentDomain_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LeaveCurrentDomain_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func LeaveCurrentDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.LeaveCurrentDomain_Task) (*types.LeaveCurrentDomain_TaskResponse, error) { + var reqBody, resBody LeaveCurrentDomain_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListCACertificateRevocationListsBody struct { + Req *types.ListCACertificateRevocationLists `xml:"urn:vim25 ListCACertificateRevocationLists,omitempty"` + Res *types.ListCACertificateRevocationListsResponse `xml:"urn:vim25 ListCACertificateRevocationListsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListCACertificateRevocationListsBody) Fault() *soap.Fault { return b.Fault_ } + +func ListCACertificateRevocationLists(ctx context.Context, r soap.RoundTripper, req *types.ListCACertificateRevocationLists) (*types.ListCACertificateRevocationListsResponse, error) { + var reqBody, resBody ListCACertificateRevocationListsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListCACertificatesBody struct { + Req *types.ListCACertificates `xml:"urn:vim25 ListCACertificates,omitempty"` + Res *types.ListCACertificatesResponse `xml:"urn:vim25 ListCACertificatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListCACertificatesBody) Fault() *soap.Fault { return b.Fault_ } + +func ListCACertificates(ctx context.Context, r soap.RoundTripper, req *types.ListCACertificates) (*types.ListCACertificatesResponse, error) { + var reqBody, resBody ListCACertificatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListFilesInGuestBody struct { + Req *types.ListFilesInGuest `xml:"urn:vim25 ListFilesInGuest,omitempty"` + Res *types.ListFilesInGuestResponse `xml:"urn:vim25 ListFilesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListFilesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListFilesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListFilesInGuest) (*types.ListFilesInGuestResponse, error) { + var reqBody, resBody ListFilesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListGuestAliasesBody struct { + Req *types.ListGuestAliases `xml:"urn:vim25 ListGuestAliases,omitempty"` + Res *types.ListGuestAliasesResponse `xml:"urn:vim25 ListGuestAliasesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListGuestAliasesBody) Fault() *soap.Fault { return b.Fault_ } + +func ListGuestAliases(ctx context.Context, r soap.RoundTripper, req *types.ListGuestAliases) (*types.ListGuestAliasesResponse, error) { + var reqBody, resBody ListGuestAliasesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListGuestMappedAliasesBody struct { + Req *types.ListGuestMappedAliases `xml:"urn:vim25 ListGuestMappedAliases,omitempty"` + Res *types.ListGuestMappedAliasesResponse `xml:"urn:vim25 ListGuestMappedAliasesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListGuestMappedAliasesBody) Fault() *soap.Fault { return b.Fault_ } + +func ListGuestMappedAliases(ctx context.Context, r soap.RoundTripper, req *types.ListGuestMappedAliases) (*types.ListGuestMappedAliasesResponse, error) { + var reqBody, resBody ListGuestMappedAliasesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListProcessesInGuestBody struct { + Req *types.ListProcessesInGuest `xml:"urn:vim25 ListProcessesInGuest,omitempty"` + Res *types.ListProcessesInGuestResponse `xml:"urn:vim25 ListProcessesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListProcessesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListProcessesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListProcessesInGuest) (*types.ListProcessesInGuestResponse, error) { + var reqBody, resBody ListProcessesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListRegistryKeysInGuestBody struct { + Req *types.ListRegistryKeysInGuest `xml:"urn:vim25 ListRegistryKeysInGuest,omitempty"` + Res *types.ListRegistryKeysInGuestResponse `xml:"urn:vim25 ListRegistryKeysInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListRegistryKeysInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListRegistryKeysInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListRegistryKeysInGuest) (*types.ListRegistryKeysInGuestResponse, error) { + var reqBody, resBody ListRegistryKeysInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListRegistryValuesInGuestBody struct { + Req *types.ListRegistryValuesInGuest `xml:"urn:vim25 ListRegistryValuesInGuest,omitempty"` + Res *types.ListRegistryValuesInGuestResponse `xml:"urn:vim25 ListRegistryValuesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListRegistryValuesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListRegistryValuesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListRegistryValuesInGuest) (*types.ListRegistryValuesInGuestResponse, error) { + var reqBody, resBody ListRegistryValuesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListSmartCardTrustAnchorsBody struct { + Req *types.ListSmartCardTrustAnchors `xml:"urn:vim25 ListSmartCardTrustAnchors,omitempty"` + Res *types.ListSmartCardTrustAnchorsResponse `xml:"urn:vim25 ListSmartCardTrustAnchorsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListSmartCardTrustAnchorsBody) Fault() *soap.Fault { return b.Fault_ } + +func ListSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *types.ListSmartCardTrustAnchors) (*types.ListSmartCardTrustAnchorsResponse, error) { + var reqBody, resBody ListSmartCardTrustAnchorsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LogUserEventBody struct { + Req *types.LogUserEvent `xml:"urn:vim25 LogUserEvent,omitempty"` + Res *types.LogUserEventResponse `xml:"urn:vim25 LogUserEventResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LogUserEventBody) Fault() *soap.Fault { return b.Fault_ } + +func LogUserEvent(ctx context.Context, r soap.RoundTripper, req *types.LogUserEvent) (*types.LogUserEventResponse, error) { + var reqBody, resBody LogUserEventBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginBody struct { + Req *types.Login `xml:"urn:vim25 Login,omitempty"` + Res *types.LoginResponse `xml:"urn:vim25 LoginResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginBody) Fault() *soap.Fault { return b.Fault_ } + +func Login(ctx context.Context, r soap.RoundTripper, req *types.Login) (*types.LoginResponse, error) { + var reqBody, resBody LoginBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginBySSPIBody struct { + Req *types.LoginBySSPI `xml:"urn:vim25 LoginBySSPI,omitempty"` + Res *types.LoginBySSPIResponse `xml:"urn:vim25 LoginBySSPIResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginBySSPIBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginBySSPI(ctx context.Context, r soap.RoundTripper, req *types.LoginBySSPI) (*types.LoginBySSPIResponse, error) { + var reqBody, resBody LoginBySSPIBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginByTokenBody struct { + Req *types.LoginByToken `xml:"urn:vim25 LoginByToken,omitempty"` + Res *types.LoginByTokenResponse `xml:"urn:vim25 LoginByTokenResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginByTokenBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginByToken(ctx context.Context, r soap.RoundTripper, req *types.LoginByToken) (*types.LoginByTokenResponse, error) { + var reqBody, resBody LoginByTokenBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginExtensionByCertificateBody struct { + Req *types.LoginExtensionByCertificate `xml:"urn:vim25 LoginExtensionByCertificate,omitempty"` + Res *types.LoginExtensionByCertificateResponse `xml:"urn:vim25 LoginExtensionByCertificateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginExtensionByCertificateBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginExtensionByCertificate(ctx context.Context, r soap.RoundTripper, req *types.LoginExtensionByCertificate) (*types.LoginExtensionByCertificateResponse, error) { + var reqBody, resBody LoginExtensionByCertificateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginExtensionBySubjectNameBody struct { + Req *types.LoginExtensionBySubjectName `xml:"urn:vim25 LoginExtensionBySubjectName,omitempty"` + Res *types.LoginExtensionBySubjectNameResponse `xml:"urn:vim25 LoginExtensionBySubjectNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginExtensionBySubjectNameBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginExtensionBySubjectName(ctx context.Context, r soap.RoundTripper, req *types.LoginExtensionBySubjectName) (*types.LoginExtensionBySubjectNameResponse, error) { + var reqBody, resBody LoginExtensionBySubjectNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LogoutBody struct { + Req *types.Logout `xml:"urn:vim25 Logout,omitempty"` + Res *types.LogoutResponse `xml:"urn:vim25 LogoutResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LogoutBody) Fault() *soap.Fault { return b.Fault_ } + +func Logout(ctx context.Context, r soap.RoundTripper, req *types.Logout) (*types.LogoutResponse, error) { + var reqBody, resBody LogoutBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LookupDvPortGroupBody struct { + Req *types.LookupDvPortGroup `xml:"urn:vim25 LookupDvPortGroup,omitempty"` + Res *types.LookupDvPortGroupResponse `xml:"urn:vim25 LookupDvPortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LookupDvPortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func LookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.LookupDvPortGroup) (*types.LookupDvPortGroupResponse, error) { + var reqBody, resBody LookupDvPortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LookupVmOverheadMemoryBody struct { + Req *types.LookupVmOverheadMemory `xml:"urn:vim25 LookupVmOverheadMemory,omitempty"` + Res *types.LookupVmOverheadMemoryResponse `xml:"urn:vim25 LookupVmOverheadMemoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LookupVmOverheadMemoryBody) Fault() *soap.Fault { return b.Fault_ } + +func LookupVmOverheadMemory(ctx context.Context, r soap.RoundTripper, req *types.LookupVmOverheadMemory) (*types.LookupVmOverheadMemoryResponse, error) { + var reqBody, resBody LookupVmOverheadMemoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MakeDirectoryBody struct { + Req *types.MakeDirectory `xml:"urn:vim25 MakeDirectory,omitempty"` + Res *types.MakeDirectoryResponse `xml:"urn:vim25 MakeDirectoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MakeDirectoryBody) Fault() *soap.Fault { return b.Fault_ } + +func MakeDirectory(ctx context.Context, r soap.RoundTripper, req *types.MakeDirectory) (*types.MakeDirectoryResponse, error) { + var reqBody, resBody MakeDirectoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MakeDirectoryInGuestBody struct { + Req *types.MakeDirectoryInGuest `xml:"urn:vim25 MakeDirectoryInGuest,omitempty"` + Res *types.MakeDirectoryInGuestResponse `xml:"urn:vim25 MakeDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MakeDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func MakeDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.MakeDirectoryInGuest) (*types.MakeDirectoryInGuestResponse, error) { + var reqBody, resBody MakeDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MakePrimaryVM_TaskBody struct { + Req *types.MakePrimaryVM_Task `xml:"urn:vim25 MakePrimaryVM_Task,omitempty"` + Res *types.MakePrimaryVM_TaskResponse `xml:"urn:vim25 MakePrimaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MakePrimaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MakePrimaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.MakePrimaryVM_Task) (*types.MakePrimaryVM_TaskResponse, error) { + var reqBody, resBody MakePrimaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsLocal_TaskBody struct { + Req *types.MarkAsLocal_Task `xml:"urn:vim25 MarkAsLocal_Task,omitempty"` + Res *types.MarkAsLocal_TaskResponse `xml:"urn:vim25 MarkAsLocal_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsLocal_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsLocal_Task) (*types.MarkAsLocal_TaskResponse, error) { + var reqBody, resBody MarkAsLocal_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsNonLocal_TaskBody struct { + Req *types.MarkAsNonLocal_Task `xml:"urn:vim25 MarkAsNonLocal_Task,omitempty"` + Res *types.MarkAsNonLocal_TaskResponse `xml:"urn:vim25 MarkAsNonLocal_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsNonLocal_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsNonLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsNonLocal_Task) (*types.MarkAsNonLocal_TaskResponse, error) { + var reqBody, resBody MarkAsNonLocal_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsNonSsd_TaskBody struct { + Req *types.MarkAsNonSsd_Task `xml:"urn:vim25 MarkAsNonSsd_Task,omitempty"` + Res *types.MarkAsNonSsd_TaskResponse `xml:"urn:vim25 MarkAsNonSsd_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsNonSsd_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsNonSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsNonSsd_Task) (*types.MarkAsNonSsd_TaskResponse, error) { + var reqBody, resBody MarkAsNonSsd_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsSsd_TaskBody struct { + Req *types.MarkAsSsd_Task `xml:"urn:vim25 MarkAsSsd_Task,omitempty"` + Res *types.MarkAsSsd_TaskResponse `xml:"urn:vim25 MarkAsSsd_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsSsd_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsSsd_Task) (*types.MarkAsSsd_TaskResponse, error) { + var reqBody, resBody MarkAsSsd_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsTemplateBody struct { + Req *types.MarkAsTemplate `xml:"urn:vim25 MarkAsTemplate,omitempty"` + Res *types.MarkAsTemplateResponse `xml:"urn:vim25 MarkAsTemplateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsTemplateBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsTemplate(ctx context.Context, r soap.RoundTripper, req *types.MarkAsTemplate) (*types.MarkAsTemplateResponse, error) { + var reqBody, resBody MarkAsTemplateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsVirtualMachineBody struct { + Req *types.MarkAsVirtualMachine `xml:"urn:vim25 MarkAsVirtualMachine,omitempty"` + Res *types.MarkAsVirtualMachineResponse `xml:"urn:vim25 MarkAsVirtualMachineResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsVirtualMachineBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsVirtualMachine(ctx context.Context, r soap.RoundTripper, req *types.MarkAsVirtualMachine) (*types.MarkAsVirtualMachineResponse, error) { + var reqBody, resBody MarkAsVirtualMachineBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkForRemovalBody struct { + Req *types.MarkForRemoval `xml:"urn:vim25 MarkForRemoval,omitempty"` + Res *types.MarkForRemovalResponse `xml:"urn:vim25 MarkForRemovalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkForRemovalBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkForRemoval(ctx context.Context, r soap.RoundTripper, req *types.MarkForRemoval) (*types.MarkForRemovalResponse, error) { + var reqBody, resBody MarkForRemovalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MergeDvs_TaskBody struct { + Req *types.MergeDvs_Task `xml:"urn:vim25 MergeDvs_Task,omitempty"` + Res *types.MergeDvs_TaskResponse `xml:"urn:vim25 MergeDvs_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MergeDvs_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MergeDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.MergeDvs_Task) (*types.MergeDvs_TaskResponse, error) { + var reqBody, resBody MergeDvs_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MergePermissionsBody struct { + Req *types.MergePermissions `xml:"urn:vim25 MergePermissions,omitempty"` + Res *types.MergePermissionsResponse `xml:"urn:vim25 MergePermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MergePermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func MergePermissions(ctx context.Context, r soap.RoundTripper, req *types.MergePermissions) (*types.MergePermissionsResponse, error) { + var reqBody, resBody MergePermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MigrateVM_TaskBody struct { + Req *types.MigrateVM_Task `xml:"urn:vim25 MigrateVM_Task,omitempty"` + Res *types.MigrateVM_TaskResponse `xml:"urn:vim25 MigrateVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MigrateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MigrateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.MigrateVM_Task) (*types.MigrateVM_TaskResponse, error) { + var reqBody, resBody MigrateVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ModifyListViewBody struct { + Req *types.ModifyListView `xml:"urn:vim25 ModifyListView,omitempty"` + Res *types.ModifyListViewResponse `xml:"urn:vim25 ModifyListViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ModifyListViewBody) Fault() *soap.Fault { return b.Fault_ } + +func ModifyListView(ctx context.Context, r soap.RoundTripper, req *types.ModifyListView) (*types.ModifyListViewResponse, error) { + var reqBody, resBody ModifyListViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountToolsInstallerBody struct { + Req *types.MountToolsInstaller `xml:"urn:vim25 MountToolsInstaller,omitempty"` + Res *types.MountToolsInstallerResponse `xml:"urn:vim25 MountToolsInstallerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountToolsInstallerBody) Fault() *soap.Fault { return b.Fault_ } + +func MountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.MountToolsInstaller) (*types.MountToolsInstallerResponse, error) { + var reqBody, resBody MountToolsInstallerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountVffsVolumeBody struct { + Req *types.MountVffsVolume `xml:"urn:vim25 MountVffsVolume,omitempty"` + Res *types.MountVffsVolumeResponse `xml:"urn:vim25 MountVffsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountVffsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func MountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountVffsVolume) (*types.MountVffsVolumeResponse, error) { + var reqBody, resBody MountVffsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountVmfsVolumeBody struct { + Req *types.MountVmfsVolume `xml:"urn:vim25 MountVmfsVolume,omitempty"` + Res *types.MountVmfsVolumeResponse `xml:"urn:vim25 MountVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func MountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountVmfsVolume) (*types.MountVmfsVolumeResponse, error) { + var reqBody, resBody MountVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountVmfsVolumeEx_TaskBody struct { + Req *types.MountVmfsVolumeEx_Task `xml:"urn:vim25 MountVmfsVolumeEx_Task,omitempty"` + Res *types.MountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 MountVmfsVolumeEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.MountVmfsVolumeEx_Task) (*types.MountVmfsVolumeEx_TaskResponse, error) { + var reqBody, resBody MountVmfsVolumeEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveDVPort_TaskBody struct { + Req *types.MoveDVPort_Task `xml:"urn:vim25 MoveDVPort_Task,omitempty"` + Res *types.MoveDVPort_TaskResponse `xml:"urn:vim25 MoveDVPort_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveDVPort_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDVPort_Task) (*types.MoveDVPort_TaskResponse, error) { + var reqBody, resBody MoveDVPort_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveDatastoreFile_TaskBody struct { + Req *types.MoveDatastoreFile_Task `xml:"urn:vim25 MoveDatastoreFile_Task,omitempty"` + Res *types.MoveDatastoreFile_TaskResponse `xml:"urn:vim25 MoveDatastoreFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDatastoreFile_Task) (*types.MoveDatastoreFile_TaskResponse, error) { + var reqBody, resBody MoveDatastoreFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveDirectoryInGuestBody struct { + Req *types.MoveDirectoryInGuest `xml:"urn:vim25 MoveDirectoryInGuest,omitempty"` + Res *types.MoveDirectoryInGuestResponse `xml:"urn:vim25 MoveDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveDirectoryInGuest) (*types.MoveDirectoryInGuestResponse, error) { + var reqBody, resBody MoveDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveFileInGuestBody struct { + Req *types.MoveFileInGuest `xml:"urn:vim25 MoveFileInGuest,omitempty"` + Res *types.MoveFileInGuestResponse `xml:"urn:vim25 MoveFileInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveFileInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveFileInGuest) (*types.MoveFileInGuestResponse, error) { + var reqBody, resBody MoveFileInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveHostInto_TaskBody struct { + Req *types.MoveHostInto_Task `xml:"urn:vim25 MoveHostInto_Task,omitempty"` + Res *types.MoveHostInto_TaskResponse `xml:"urn:vim25 MoveHostInto_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveHostInto_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveHostInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveHostInto_Task) (*types.MoveHostInto_TaskResponse, error) { + var reqBody, resBody MoveHostInto_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveIntoFolder_TaskBody struct { + Req *types.MoveIntoFolder_Task `xml:"urn:vim25 MoveIntoFolder_Task,omitempty"` + Res *types.MoveIntoFolder_TaskResponse `xml:"urn:vim25 MoveIntoFolder_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveIntoFolder_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveIntoFolder_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveIntoFolder_Task) (*types.MoveIntoFolder_TaskResponse, error) { + var reqBody, resBody MoveIntoFolder_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveIntoResourcePoolBody struct { + Req *types.MoveIntoResourcePool `xml:"urn:vim25 MoveIntoResourcePool,omitempty"` + Res *types.MoveIntoResourcePoolResponse `xml:"urn:vim25 MoveIntoResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveIntoResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveIntoResourcePool(ctx context.Context, r soap.RoundTripper, req *types.MoveIntoResourcePool) (*types.MoveIntoResourcePoolResponse, error) { + var reqBody, resBody MoveIntoResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveInto_TaskBody struct { + Req *types.MoveInto_Task `xml:"urn:vim25 MoveInto_Task,omitempty"` + Res *types.MoveInto_TaskResponse `xml:"urn:vim25 MoveInto_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveInto_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveInto_Task) (*types.MoveInto_TaskResponse, error) { + var reqBody, resBody MoveInto_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveVirtualDisk_TaskBody struct { + Req *types.MoveVirtualDisk_Task `xml:"urn:vim25 MoveVirtualDisk_Task,omitempty"` + Res *types.MoveVirtualDisk_TaskResponse `xml:"urn:vim25 MoveVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveVirtualDisk_Task) (*types.MoveVirtualDisk_TaskResponse, error) { + var reqBody, resBody MoveVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type OpenInventoryViewFolderBody struct { + Req *types.OpenInventoryViewFolder `xml:"urn:vim25 OpenInventoryViewFolder,omitempty"` + Res *types.OpenInventoryViewFolderResponse `xml:"urn:vim25 OpenInventoryViewFolderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *OpenInventoryViewFolderBody) Fault() *soap.Fault { return b.Fault_ } + +func OpenInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *types.OpenInventoryViewFolder) (*types.OpenInventoryViewFolderResponse, error) { + var reqBody, resBody OpenInventoryViewFolderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type OverwriteCustomizationSpecBody struct { + Req *types.OverwriteCustomizationSpec `xml:"urn:vim25 OverwriteCustomizationSpec,omitempty"` + Res *types.OverwriteCustomizationSpecResponse `xml:"urn:vim25 OverwriteCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *OverwriteCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func OverwriteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.OverwriteCustomizationSpec) (*types.OverwriteCustomizationSpecResponse, error) { + var reqBody, resBody OverwriteCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ParseDescriptorBody struct { + Req *types.ParseDescriptor `xml:"urn:vim25 ParseDescriptor,omitempty"` + Res *types.ParseDescriptorResponse `xml:"urn:vim25 ParseDescriptorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ParseDescriptorBody) Fault() *soap.Fault { return b.Fault_ } + +func ParseDescriptor(ctx context.Context, r soap.RoundTripper, req *types.ParseDescriptor) (*types.ParseDescriptorResponse, error) { + var reqBody, resBody ParseDescriptorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PerformDvsProductSpecOperation_TaskBody struct { + Req *types.PerformDvsProductSpecOperation_Task `xml:"urn:vim25 PerformDvsProductSpecOperation_Task,omitempty"` + Res *types.PerformDvsProductSpecOperation_TaskResponse `xml:"urn:vim25 PerformDvsProductSpecOperation_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PerformDvsProductSpecOperation_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PerformDvsProductSpecOperation_Task(ctx context.Context, r soap.RoundTripper, req *types.PerformDvsProductSpecOperation_Task) (*types.PerformDvsProductSpecOperation_TaskResponse, error) { + var reqBody, resBody PerformDvsProductSpecOperation_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PerformVsanUpgradePreflightCheckBody struct { + Req *types.PerformVsanUpgradePreflightCheck `xml:"urn:vim25 PerformVsanUpgradePreflightCheck,omitempty"` + Res *types.PerformVsanUpgradePreflightCheckResponse `xml:"urn:vim25 PerformVsanUpgradePreflightCheckResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PerformVsanUpgradePreflightCheckBody) Fault() *soap.Fault { return b.Fault_ } + +func PerformVsanUpgradePreflightCheck(ctx context.Context, r soap.RoundTripper, req *types.PerformVsanUpgradePreflightCheck) (*types.PerformVsanUpgradePreflightCheckResponse, error) { + var reqBody, resBody PerformVsanUpgradePreflightCheckBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PerformVsanUpgrade_TaskBody struct { + Req *types.PerformVsanUpgrade_Task `xml:"urn:vim25 PerformVsanUpgrade_Task,omitempty"` + Res *types.PerformVsanUpgrade_TaskResponse `xml:"urn:vim25 PerformVsanUpgrade_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PerformVsanUpgrade_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PerformVsanUpgrade_Task(ctx context.Context, r soap.RoundTripper, req *types.PerformVsanUpgrade_Task) (*types.PerformVsanUpgrade_TaskResponse, error) { + var reqBody, resBody PerformVsanUpgrade_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PlaceVmBody struct { + Req *types.PlaceVm `xml:"urn:vim25 PlaceVm,omitempty"` + Res *types.PlaceVmResponse `xml:"urn:vim25 PlaceVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PlaceVmBody) Fault() *soap.Fault { return b.Fault_ } + +func PlaceVm(ctx context.Context, r soap.RoundTripper, req *types.PlaceVm) (*types.PlaceVmResponse, error) { + var reqBody, resBody PlaceVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PostEventBody struct { + Req *types.PostEvent `xml:"urn:vim25 PostEvent,omitempty"` + Res *types.PostEventResponse `xml:"urn:vim25 PostEventResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PostEventBody) Fault() *soap.Fault { return b.Fault_ } + +func PostEvent(ctx context.Context, r soap.RoundTripper, req *types.PostEvent) (*types.PostEventResponse, error) { + var reqBody, resBody PostEventBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerDownHostToStandBy_TaskBody struct { + Req *types.PowerDownHostToStandBy_Task `xml:"urn:vim25 PowerDownHostToStandBy_Task,omitempty"` + Res *types.PowerDownHostToStandBy_TaskResponse `xml:"urn:vim25 PowerDownHostToStandBy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerDownHostToStandBy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerDownHostToStandBy_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerDownHostToStandBy_Task) (*types.PowerDownHostToStandBy_TaskResponse, error) { + var reqBody, resBody PowerDownHostToStandBy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOffVApp_TaskBody struct { + Req *types.PowerOffVApp_Task `xml:"urn:vim25 PowerOffVApp_Task,omitempty"` + Res *types.PowerOffVApp_TaskResponse `xml:"urn:vim25 PowerOffVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOffVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOffVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOffVApp_Task) (*types.PowerOffVApp_TaskResponse, error) { + var reqBody, resBody PowerOffVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOffVM_TaskBody struct { + Req *types.PowerOffVM_Task `xml:"urn:vim25 PowerOffVM_Task,omitempty"` + Res *types.PowerOffVM_TaskResponse `xml:"urn:vim25 PowerOffVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOffVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOffVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOffVM_Task) (*types.PowerOffVM_TaskResponse, error) { + var reqBody, resBody PowerOffVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOnMultiVM_TaskBody struct { + Req *types.PowerOnMultiVM_Task `xml:"urn:vim25 PowerOnMultiVM_Task,omitempty"` + Res *types.PowerOnMultiVM_TaskResponse `xml:"urn:vim25 PowerOnMultiVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOnMultiVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOnMultiVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnMultiVM_Task) (*types.PowerOnMultiVM_TaskResponse, error) { + var reqBody, resBody PowerOnMultiVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOnVApp_TaskBody struct { + Req *types.PowerOnVApp_Task `xml:"urn:vim25 PowerOnVApp_Task,omitempty"` + Res *types.PowerOnVApp_TaskResponse `xml:"urn:vim25 PowerOnVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOnVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOnVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnVApp_Task) (*types.PowerOnVApp_TaskResponse, error) { + var reqBody, resBody PowerOnVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOnVM_TaskBody struct { + Req *types.PowerOnVM_Task `xml:"urn:vim25 PowerOnVM_Task,omitempty"` + Res *types.PowerOnVM_TaskResponse `xml:"urn:vim25 PowerOnVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOnVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOnVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnVM_Task) (*types.PowerOnVM_TaskResponse, error) { + var reqBody, resBody PowerOnVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerUpHostFromStandBy_TaskBody struct { + Req *types.PowerUpHostFromStandBy_Task `xml:"urn:vim25 PowerUpHostFromStandBy_Task,omitempty"` + Res *types.PowerUpHostFromStandBy_TaskResponse `xml:"urn:vim25 PowerUpHostFromStandBy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerUpHostFromStandBy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerUpHostFromStandBy_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerUpHostFromStandBy_Task) (*types.PowerUpHostFromStandBy_TaskResponse, error) { + var reqBody, resBody PowerUpHostFromStandBy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PromoteDisks_TaskBody struct { + Req *types.PromoteDisks_Task `xml:"urn:vim25 PromoteDisks_Task,omitempty"` + Res *types.PromoteDisks_TaskResponse `xml:"urn:vim25 PromoteDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PromoteDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PromoteDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.PromoteDisks_Task) (*types.PromoteDisks_TaskResponse, error) { + var reqBody, resBody PromoteDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAnswerFileStatusBody struct { + Req *types.QueryAnswerFileStatus `xml:"urn:vim25 QueryAnswerFileStatus,omitempty"` + Res *types.QueryAnswerFileStatusResponse `xml:"urn:vim25 QueryAnswerFileStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAnswerFileStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAnswerFileStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryAnswerFileStatus) (*types.QueryAnswerFileStatusResponse, error) { + var reqBody, resBody QueryAnswerFileStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAssignedLicensesBody struct { + Req *types.QueryAssignedLicenses `xml:"urn:vim25 QueryAssignedLicenses,omitempty"` + Res *types.QueryAssignedLicensesResponse `xml:"urn:vim25 QueryAssignedLicensesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAssignedLicensesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAssignedLicenses(ctx context.Context, r soap.RoundTripper, req *types.QueryAssignedLicenses) (*types.QueryAssignedLicensesResponse, error) { + var reqBody, resBody QueryAssignedLicensesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableDisksForVmfsBody struct { + Req *types.QueryAvailableDisksForVmfs `xml:"urn:vim25 QueryAvailableDisksForVmfs,omitempty"` + Res *types.QueryAvailableDisksForVmfsResponse `xml:"urn:vim25 QueryAvailableDisksForVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableDisksForVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableDisksForVmfs(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableDisksForVmfs) (*types.QueryAvailableDisksForVmfsResponse, error) { + var reqBody, resBody QueryAvailableDisksForVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableDvsSpecBody struct { + Req *types.QueryAvailableDvsSpec `xml:"urn:vim25 QueryAvailableDvsSpec,omitempty"` + Res *types.QueryAvailableDvsSpecResponse `xml:"urn:vim25 QueryAvailableDvsSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableDvsSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableDvsSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableDvsSpec) (*types.QueryAvailableDvsSpecResponse, error) { + var reqBody, resBody QueryAvailableDvsSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailablePartitionBody struct { + Req *types.QueryAvailablePartition `xml:"urn:vim25 QueryAvailablePartition,omitempty"` + Res *types.QueryAvailablePartitionResponse `xml:"urn:vim25 QueryAvailablePartitionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailablePartitionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailablePartition(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailablePartition) (*types.QueryAvailablePartitionResponse, error) { + var reqBody, resBody QueryAvailablePartitionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailablePerfMetricBody struct { + Req *types.QueryAvailablePerfMetric `xml:"urn:vim25 QueryAvailablePerfMetric,omitempty"` + Res *types.QueryAvailablePerfMetricResponse `xml:"urn:vim25 QueryAvailablePerfMetricResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailablePerfMetricBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailablePerfMetric(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailablePerfMetric) (*types.QueryAvailablePerfMetricResponse, error) { + var reqBody, resBody QueryAvailablePerfMetricBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableSsdsBody struct { + Req *types.QueryAvailableSsds `xml:"urn:vim25 QueryAvailableSsds,omitempty"` + Res *types.QueryAvailableSsdsResponse `xml:"urn:vim25 QueryAvailableSsdsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableSsdsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableSsds(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableSsds) (*types.QueryAvailableSsdsResponse, error) { + var reqBody, resBody QueryAvailableSsdsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableTimeZonesBody struct { + Req *types.QueryAvailableTimeZones `xml:"urn:vim25 QueryAvailableTimeZones,omitempty"` + Res *types.QueryAvailableTimeZonesResponse `xml:"urn:vim25 QueryAvailableTimeZonesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableTimeZonesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableTimeZones(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableTimeZones) (*types.QueryAvailableTimeZonesResponse, error) { + var reqBody, resBody QueryAvailableTimeZonesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryBootDevicesBody struct { + Req *types.QueryBootDevices `xml:"urn:vim25 QueryBootDevices,omitempty"` + Res *types.QueryBootDevicesResponse `xml:"urn:vim25 QueryBootDevicesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryBootDevicesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryBootDevices(ctx context.Context, r soap.RoundTripper, req *types.QueryBootDevices) (*types.QueryBootDevicesResponse, error) { + var reqBody, resBody QueryBootDevicesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryBoundVnicsBody struct { + Req *types.QueryBoundVnics `xml:"urn:vim25 QueryBoundVnics,omitempty"` + Res *types.QueryBoundVnicsResponse `xml:"urn:vim25 QueryBoundVnicsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryBoundVnicsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryBoundVnics(ctx context.Context, r soap.RoundTripper, req *types.QueryBoundVnics) (*types.QueryBoundVnicsResponse, error) { + var reqBody, resBody QueryBoundVnicsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCandidateNicsBody struct { + Req *types.QueryCandidateNics `xml:"urn:vim25 QueryCandidateNics,omitempty"` + Res *types.QueryCandidateNicsResponse `xml:"urn:vim25 QueryCandidateNicsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCandidateNicsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCandidateNics(ctx context.Context, r soap.RoundTripper, req *types.QueryCandidateNics) (*types.QueryCandidateNicsResponse, error) { + var reqBody, resBody QueryCandidateNicsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryChangedDiskAreasBody struct { + Req *types.QueryChangedDiskAreas `xml:"urn:vim25 QueryChangedDiskAreas,omitempty"` + Res *types.QueryChangedDiskAreasResponse `xml:"urn:vim25 QueryChangedDiskAreasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryChangedDiskAreasBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryChangedDiskAreas(ctx context.Context, r soap.RoundTripper, req *types.QueryChangedDiskAreas) (*types.QueryChangedDiskAreasResponse, error) { + var reqBody, resBody QueryChangedDiskAreasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCmmdsBody struct { + Req *types.QueryCmmds `xml:"urn:vim25 QueryCmmds,omitempty"` + Res *types.QueryCmmdsResponse `xml:"urn:vim25 QueryCmmdsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCmmdsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCmmds(ctx context.Context, r soap.RoundTripper, req *types.QueryCmmds) (*types.QueryCmmdsResponse, error) { + var reqBody, resBody QueryCmmdsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCompatibleHostForExistingDvsBody struct { + Req *types.QueryCompatibleHostForExistingDvs `xml:"urn:vim25 QueryCompatibleHostForExistingDvs,omitempty"` + Res *types.QueryCompatibleHostForExistingDvsResponse `xml:"urn:vim25 QueryCompatibleHostForExistingDvsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCompatibleHostForExistingDvsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCompatibleHostForExistingDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryCompatibleHostForExistingDvs) (*types.QueryCompatibleHostForExistingDvsResponse, error) { + var reqBody, resBody QueryCompatibleHostForExistingDvsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCompatibleHostForNewDvsBody struct { + Req *types.QueryCompatibleHostForNewDvs `xml:"urn:vim25 QueryCompatibleHostForNewDvs,omitempty"` + Res *types.QueryCompatibleHostForNewDvsResponse `xml:"urn:vim25 QueryCompatibleHostForNewDvsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCompatibleHostForNewDvsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCompatibleHostForNewDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryCompatibleHostForNewDvs) (*types.QueryCompatibleHostForNewDvsResponse, error) { + var reqBody, resBody QueryCompatibleHostForNewDvsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryComplianceStatusBody struct { + Req *types.QueryComplianceStatus `xml:"urn:vim25 QueryComplianceStatus,omitempty"` + Res *types.QueryComplianceStatusResponse `xml:"urn:vim25 QueryComplianceStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryComplianceStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryComplianceStatus) (*types.QueryComplianceStatusResponse, error) { + var reqBody, resBody QueryComplianceStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigOptionBody struct { + Req *types.QueryConfigOption `xml:"urn:vim25 QueryConfigOption,omitempty"` + Res *types.QueryConfigOptionResponse `xml:"urn:vim25 QueryConfigOptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigOptionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOption) (*types.QueryConfigOptionResponse, error) { + var reqBody, resBody QueryConfigOptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigOptionDescriptorBody struct { + Req *types.QueryConfigOptionDescriptor `xml:"urn:vim25 QueryConfigOptionDescriptor,omitempty"` + Res *types.QueryConfigOptionDescriptorResponse `xml:"urn:vim25 QueryConfigOptionDescriptorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigOptionDescriptorBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigOptionDescriptor(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOptionDescriptor) (*types.QueryConfigOptionDescriptorResponse, error) { + var reqBody, resBody QueryConfigOptionDescriptorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigOptionExBody struct { + Req *types.QueryConfigOptionEx `xml:"urn:vim25 QueryConfigOptionEx,omitempty"` + Res *types.QueryConfigOptionExResponse `xml:"urn:vim25 QueryConfigOptionExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigOptionExBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigOptionEx(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOptionEx) (*types.QueryConfigOptionExResponse, error) { + var reqBody, resBody QueryConfigOptionExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigTargetBody struct { + Req *types.QueryConfigTarget `xml:"urn:vim25 QueryConfigTarget,omitempty"` + Res *types.QueryConfigTargetResponse `xml:"urn:vim25 QueryConfigTargetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigTargetBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigTarget) (*types.QueryConfigTargetResponse, error) { + var reqBody, resBody QueryConfigTargetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfiguredModuleOptionStringBody struct { + Req *types.QueryConfiguredModuleOptionString `xml:"urn:vim25 QueryConfiguredModuleOptionString,omitempty"` + Res *types.QueryConfiguredModuleOptionStringResponse `xml:"urn:vim25 QueryConfiguredModuleOptionStringResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfiguredModuleOptionStringBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfiguredModuleOptionString(ctx context.Context, r soap.RoundTripper, req *types.QueryConfiguredModuleOptionString) (*types.QueryConfiguredModuleOptionStringResponse, error) { + var reqBody, resBody QueryConfiguredModuleOptionStringBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConnectionInfoBody struct { + Req *types.QueryConnectionInfo `xml:"urn:vim25 QueryConnectionInfo,omitempty"` + Res *types.QueryConnectionInfoResponse `xml:"urn:vim25 QueryConnectionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConnectionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryConnectionInfo) (*types.QueryConnectionInfoResponse, error) { + var reqBody, resBody QueryConnectionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConnectionInfoViaSpecBody struct { + Req *types.QueryConnectionInfoViaSpec `xml:"urn:vim25 QueryConnectionInfoViaSpec,omitempty"` + Res *types.QueryConnectionInfoViaSpecResponse `xml:"urn:vim25 QueryConnectionInfoViaSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConnectionInfoViaSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConnectionInfoViaSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryConnectionInfoViaSpec) (*types.QueryConnectionInfoViaSpecResponse, error) { + var reqBody, resBody QueryConnectionInfoViaSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDatastorePerformanceSummaryBody struct { + Req *types.QueryDatastorePerformanceSummary `xml:"urn:vim25 QueryDatastorePerformanceSummary,omitempty"` + Res *types.QueryDatastorePerformanceSummaryResponse `xml:"urn:vim25 QueryDatastorePerformanceSummaryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDatastorePerformanceSummaryBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDatastorePerformanceSummary(ctx context.Context, r soap.RoundTripper, req *types.QueryDatastorePerformanceSummary) (*types.QueryDatastorePerformanceSummaryResponse, error) { + var reqBody, resBody QueryDatastorePerformanceSummaryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDateTimeBody struct { + Req *types.QueryDateTime `xml:"urn:vim25 QueryDateTime,omitempty"` + Res *types.QueryDateTimeResponse `xml:"urn:vim25 QueryDateTimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDateTimeBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDateTime(ctx context.Context, r soap.RoundTripper, req *types.QueryDateTime) (*types.QueryDateTimeResponse, error) { + var reqBody, resBody QueryDateTimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDescriptionsBody struct { + Req *types.QueryDescriptions `xml:"urn:vim25 QueryDescriptions,omitempty"` + Res *types.QueryDescriptionsResponse `xml:"urn:vim25 QueryDescriptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDescriptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDescriptions(ctx context.Context, r soap.RoundTripper, req *types.QueryDescriptions) (*types.QueryDescriptionsResponse, error) { + var reqBody, resBody QueryDescriptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDisksForVsanBody struct { + Req *types.QueryDisksForVsan `xml:"urn:vim25 QueryDisksForVsan,omitempty"` + Res *types.QueryDisksForVsanResponse `xml:"urn:vim25 QueryDisksForVsanResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDisksForVsanBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDisksForVsan(ctx context.Context, r soap.RoundTripper, req *types.QueryDisksForVsan) (*types.QueryDisksForVsanResponse, error) { + var reqBody, resBody QueryDisksForVsanBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDisksUsingFilterBody struct { + Req *types.QueryDisksUsingFilter `xml:"urn:vim25 QueryDisksUsingFilter,omitempty"` + Res *types.QueryDisksUsingFilterResponse `xml:"urn:vim25 QueryDisksUsingFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDisksUsingFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDisksUsingFilter(ctx context.Context, r soap.RoundTripper, req *types.QueryDisksUsingFilter) (*types.QueryDisksUsingFilterResponse, error) { + var reqBody, resBody QueryDisksUsingFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsByUuidBody struct { + Req *types.QueryDvsByUuid `xml:"urn:vim25 QueryDvsByUuid,omitempty"` + Res *types.QueryDvsByUuidResponse `xml:"urn:vim25 QueryDvsByUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsByUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsByUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsByUuid) (*types.QueryDvsByUuidResponse, error) { + var reqBody, resBody QueryDvsByUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsCheckCompatibilityBody struct { + Req *types.QueryDvsCheckCompatibility `xml:"urn:vim25 QueryDvsCheckCompatibility,omitempty"` + Res *types.QueryDvsCheckCompatibilityResponse `xml:"urn:vim25 QueryDvsCheckCompatibilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsCheckCompatibilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsCheckCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsCheckCompatibility) (*types.QueryDvsCheckCompatibilityResponse, error) { + var reqBody, resBody QueryDvsCheckCompatibilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsCompatibleHostSpecBody struct { + Req *types.QueryDvsCompatibleHostSpec `xml:"urn:vim25 QueryDvsCompatibleHostSpec,omitempty"` + Res *types.QueryDvsCompatibleHostSpecResponse `xml:"urn:vim25 QueryDvsCompatibleHostSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsCompatibleHostSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsCompatibleHostSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsCompatibleHostSpec) (*types.QueryDvsCompatibleHostSpecResponse, error) { + var reqBody, resBody QueryDvsCompatibleHostSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsConfigTargetBody struct { + Req *types.QueryDvsConfigTarget `xml:"urn:vim25 QueryDvsConfigTarget,omitempty"` + Res *types.QueryDvsConfigTargetResponse `xml:"urn:vim25 QueryDvsConfigTargetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsConfigTargetBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsConfigTarget) (*types.QueryDvsConfigTargetResponse, error) { + var reqBody, resBody QueryDvsConfigTargetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsFeatureCapabilityBody struct { + Req *types.QueryDvsFeatureCapability `xml:"urn:vim25 QueryDvsFeatureCapability,omitempty"` + Res *types.QueryDvsFeatureCapabilityResponse `xml:"urn:vim25 QueryDvsFeatureCapabilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsFeatureCapabilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsFeatureCapability(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsFeatureCapability) (*types.QueryDvsFeatureCapabilityResponse, error) { + var reqBody, resBody QueryDvsFeatureCapabilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryEventsBody struct { + Req *types.QueryEvents `xml:"urn:vim25 QueryEvents,omitempty"` + Res *types.QueryEventsResponse `xml:"urn:vim25 QueryEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryEvents(ctx context.Context, r soap.RoundTripper, req *types.QueryEvents) (*types.QueryEventsResponse, error) { + var reqBody, resBody QueryEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryExpressionMetadataBody struct { + Req *types.QueryExpressionMetadata `xml:"urn:vim25 QueryExpressionMetadata,omitempty"` + Res *types.QueryExpressionMetadataResponse `xml:"urn:vim25 QueryExpressionMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryExpressionMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryExpressionMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryExpressionMetadata) (*types.QueryExpressionMetadataResponse, error) { + var reqBody, resBody QueryExpressionMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryExtensionIpAllocationUsageBody struct { + Req *types.QueryExtensionIpAllocationUsage `xml:"urn:vim25 QueryExtensionIpAllocationUsage,omitempty"` + Res *types.QueryExtensionIpAllocationUsageResponse `xml:"urn:vim25 QueryExtensionIpAllocationUsageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryExtensionIpAllocationUsageBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryExtensionIpAllocationUsage(ctx context.Context, r soap.RoundTripper, req *types.QueryExtensionIpAllocationUsage) (*types.QueryExtensionIpAllocationUsageResponse, error) { + var reqBody, resBody QueryExtensionIpAllocationUsageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFaultToleranceCompatibilityBody struct { + Req *types.QueryFaultToleranceCompatibility `xml:"urn:vim25 QueryFaultToleranceCompatibility,omitempty"` + Res *types.QueryFaultToleranceCompatibilityResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFaultToleranceCompatibilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFaultToleranceCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryFaultToleranceCompatibility) (*types.QueryFaultToleranceCompatibilityResponse, error) { + var reqBody, resBody QueryFaultToleranceCompatibilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFaultToleranceCompatibilityExBody struct { + Req *types.QueryFaultToleranceCompatibilityEx `xml:"urn:vim25 QueryFaultToleranceCompatibilityEx,omitempty"` + Res *types.QueryFaultToleranceCompatibilityExResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFaultToleranceCompatibilityExBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFaultToleranceCompatibilityEx(ctx context.Context, r soap.RoundTripper, req *types.QueryFaultToleranceCompatibilityEx) (*types.QueryFaultToleranceCompatibilityExResponse, error) { + var reqBody, resBody QueryFaultToleranceCompatibilityExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFirmwareConfigUploadURLBody struct { + Req *types.QueryFirmwareConfigUploadURL `xml:"urn:vim25 QueryFirmwareConfigUploadURL,omitempty"` + Res *types.QueryFirmwareConfigUploadURLResponse `xml:"urn:vim25 QueryFirmwareConfigUploadURLResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFirmwareConfigUploadURLBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFirmwareConfigUploadURL(ctx context.Context, r soap.RoundTripper, req *types.QueryFirmwareConfigUploadURL) (*types.QueryFirmwareConfigUploadURLResponse, error) { + var reqBody, resBody QueryFirmwareConfigUploadURLBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostConnectionInfoBody struct { + Req *types.QueryHostConnectionInfo `xml:"urn:vim25 QueryHostConnectionInfo,omitempty"` + Res *types.QueryHostConnectionInfoResponse `xml:"urn:vim25 QueryHostConnectionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostConnectionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryHostConnectionInfo) (*types.QueryHostConnectionInfoResponse, error) { + var reqBody, resBody QueryHostConnectionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostPatch_TaskBody struct { + Req *types.QueryHostPatch_Task `xml:"urn:vim25 QueryHostPatch_Task,omitempty"` + Res *types.QueryHostPatch_TaskResponse `xml:"urn:vim25 QueryHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.QueryHostPatch_Task) (*types.QueryHostPatch_TaskResponse, error) { + var reqBody, resBody QueryHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostProfileMetadataBody struct { + Req *types.QueryHostProfileMetadata `xml:"urn:vim25 QueryHostProfileMetadata,omitempty"` + Res *types.QueryHostProfileMetadataResponse `xml:"urn:vim25 QueryHostProfileMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostProfileMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostProfileMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryHostProfileMetadata) (*types.QueryHostProfileMetadataResponse, error) { + var reqBody, resBody QueryHostProfileMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostStatusBody struct { + Req *types.QueryHostStatus `xml:"urn:vim25 QueryHostStatus,omitempty"` + Res *types.QueryHostStatusResponse `xml:"urn:vim25 QueryHostStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryHostStatus) (*types.QueryHostStatusResponse, error) { + var reqBody, resBody QueryHostStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIORMConfigOptionBody struct { + Req *types.QueryIORMConfigOption `xml:"urn:vim25 QueryIORMConfigOption,omitempty"` + Res *types.QueryIORMConfigOptionResponse `xml:"urn:vim25 QueryIORMConfigOptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIORMConfigOptionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIORMConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryIORMConfigOption) (*types.QueryIORMConfigOptionResponse, error) { + var reqBody, resBody QueryIORMConfigOptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIPAllocationsBody struct { + Req *types.QueryIPAllocations `xml:"urn:vim25 QueryIPAllocations,omitempty"` + Res *types.QueryIPAllocationsResponse `xml:"urn:vim25 QueryIPAllocationsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIPAllocationsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIPAllocations(ctx context.Context, r soap.RoundTripper, req *types.QueryIPAllocations) (*types.QueryIPAllocationsResponse, error) { + var reqBody, resBody QueryIPAllocationsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIoFilterInfoBody struct { + Req *types.QueryIoFilterInfo `xml:"urn:vim25 QueryIoFilterInfo,omitempty"` + Res *types.QueryIoFilterInfoResponse `xml:"urn:vim25 QueryIoFilterInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIoFilterInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIoFilterInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryIoFilterInfo) (*types.QueryIoFilterInfoResponse, error) { + var reqBody, resBody QueryIoFilterInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIoFilterIssuesBody struct { + Req *types.QueryIoFilterIssues `xml:"urn:vim25 QueryIoFilterIssues,omitempty"` + Res *types.QueryIoFilterIssuesResponse `xml:"urn:vim25 QueryIoFilterIssuesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIoFilterIssuesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIoFilterIssues(ctx context.Context, r soap.RoundTripper, req *types.QueryIoFilterIssues) (*types.QueryIoFilterIssuesResponse, error) { + var reqBody, resBody QueryIoFilterIssuesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIpPoolsBody struct { + Req *types.QueryIpPools `xml:"urn:vim25 QueryIpPools,omitempty"` + Res *types.QueryIpPoolsResponse `xml:"urn:vim25 QueryIpPoolsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIpPoolsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIpPools(ctx context.Context, r soap.RoundTripper, req *types.QueryIpPools) (*types.QueryIpPoolsResponse, error) { + var reqBody, resBody QueryIpPoolsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryLicenseSourceAvailabilityBody struct { + Req *types.QueryLicenseSourceAvailability `xml:"urn:vim25 QueryLicenseSourceAvailability,omitempty"` + Res *types.QueryLicenseSourceAvailabilityResponse `xml:"urn:vim25 QueryLicenseSourceAvailabilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryLicenseSourceAvailabilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryLicenseSourceAvailability(ctx context.Context, r soap.RoundTripper, req *types.QueryLicenseSourceAvailability) (*types.QueryLicenseSourceAvailabilityResponse, error) { + var reqBody, resBody QueryLicenseSourceAvailabilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryLicenseUsageBody struct { + Req *types.QueryLicenseUsage `xml:"urn:vim25 QueryLicenseUsage,omitempty"` + Res *types.QueryLicenseUsageResponse `xml:"urn:vim25 QueryLicenseUsageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryLicenseUsageBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryLicenseUsage(ctx context.Context, r soap.RoundTripper, req *types.QueryLicenseUsage) (*types.QueryLicenseUsageResponse, error) { + var reqBody, resBody QueryLicenseUsageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryLockdownExceptionsBody struct { + Req *types.QueryLockdownExceptions `xml:"urn:vim25 QueryLockdownExceptions,omitempty"` + Res *types.QueryLockdownExceptionsResponse `xml:"urn:vim25 QueryLockdownExceptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryLockdownExceptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *types.QueryLockdownExceptions) (*types.QueryLockdownExceptionsResponse, error) { + var reqBody, resBody QueryLockdownExceptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryManagedByBody struct { + Req *types.QueryManagedBy `xml:"urn:vim25 QueryManagedBy,omitempty"` + Res *types.QueryManagedByResponse `xml:"urn:vim25 QueryManagedByResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryManagedByBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryManagedBy(ctx context.Context, r soap.RoundTripper, req *types.QueryManagedBy) (*types.QueryManagedByResponse, error) { + var reqBody, resBody QueryManagedByBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryMemoryOverheadBody struct { + Req *types.QueryMemoryOverhead `xml:"urn:vim25 QueryMemoryOverhead,omitempty"` + Res *types.QueryMemoryOverheadResponse `xml:"urn:vim25 QueryMemoryOverheadResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMemoryOverheadBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMemoryOverhead(ctx context.Context, r soap.RoundTripper, req *types.QueryMemoryOverhead) (*types.QueryMemoryOverheadResponse, error) { + var reqBody, resBody QueryMemoryOverheadBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryMemoryOverheadExBody struct { + Req *types.QueryMemoryOverheadEx `xml:"urn:vim25 QueryMemoryOverheadEx,omitempty"` + Res *types.QueryMemoryOverheadExResponse `xml:"urn:vim25 QueryMemoryOverheadExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMemoryOverheadExBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMemoryOverheadEx(ctx context.Context, r soap.RoundTripper, req *types.QueryMemoryOverheadEx) (*types.QueryMemoryOverheadExResponse, error) { + var reqBody, resBody QueryMemoryOverheadExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryMigrationDependenciesBody struct { + Req *types.QueryMigrationDependencies `xml:"urn:vim25 QueryMigrationDependencies,omitempty"` + Res *types.QueryMigrationDependenciesResponse `xml:"urn:vim25 QueryMigrationDependenciesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMigrationDependenciesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMigrationDependencies(ctx context.Context, r soap.RoundTripper, req *types.QueryMigrationDependencies) (*types.QueryMigrationDependenciesResponse, error) { + var reqBody, resBody QueryMigrationDependenciesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryModulesBody struct { + Req *types.QueryModules `xml:"urn:vim25 QueryModules,omitempty"` + Res *types.QueryModulesResponse `xml:"urn:vim25 QueryModulesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryModulesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryModules(ctx context.Context, r soap.RoundTripper, req *types.QueryModules) (*types.QueryModulesResponse, error) { + var reqBody, resBody QueryModulesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryNFSUserBody struct { + Req *types.QueryNFSUser `xml:"urn:vim25 QueryNFSUser,omitempty"` + Res *types.QueryNFSUserResponse `xml:"urn:vim25 QueryNFSUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryNFSUserBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryNFSUser(ctx context.Context, r soap.RoundTripper, req *types.QueryNFSUser) (*types.QueryNFSUserResponse, error) { + var reqBody, resBody QueryNFSUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryNetConfigBody struct { + Req *types.QueryNetConfig `xml:"urn:vim25 QueryNetConfig,omitempty"` + Res *types.QueryNetConfigResponse `xml:"urn:vim25 QueryNetConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryNetConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryNetConfig(ctx context.Context, r soap.RoundTripper, req *types.QueryNetConfig) (*types.QueryNetConfigResponse, error) { + var reqBody, resBody QueryNetConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryNetworkHintBody struct { + Req *types.QueryNetworkHint `xml:"urn:vim25 QueryNetworkHint,omitempty"` + Res *types.QueryNetworkHintResponse `xml:"urn:vim25 QueryNetworkHintResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryNetworkHintBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryNetworkHint(ctx context.Context, r soap.RoundTripper, req *types.QueryNetworkHint) (*types.QueryNetworkHintResponse, error) { + var reqBody, resBody QueryNetworkHintBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryObjectsOnPhysicalVsanDiskBody struct { + Req *types.QueryObjectsOnPhysicalVsanDisk `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDisk,omitempty"` + Res *types.QueryObjectsOnPhysicalVsanDiskResponse `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDiskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryObjectsOnPhysicalVsanDiskBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryObjectsOnPhysicalVsanDisk(ctx context.Context, r soap.RoundTripper, req *types.QueryObjectsOnPhysicalVsanDisk) (*types.QueryObjectsOnPhysicalVsanDiskResponse, error) { + var reqBody, resBody QueryObjectsOnPhysicalVsanDiskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryOptionsBody struct { + Req *types.QueryOptions `xml:"urn:vim25 QueryOptions,omitempty"` + Res *types.QueryOptionsResponse `xml:"urn:vim25 QueryOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryOptions) (*types.QueryOptionsResponse, error) { + var reqBody, resBody QueryOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPartitionCreateDescBody struct { + Req *types.QueryPartitionCreateDesc `xml:"urn:vim25 QueryPartitionCreateDesc,omitempty"` + Res *types.QueryPartitionCreateDescResponse `xml:"urn:vim25 QueryPartitionCreateDescResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPartitionCreateDescBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPartitionCreateDesc(ctx context.Context, r soap.RoundTripper, req *types.QueryPartitionCreateDesc) (*types.QueryPartitionCreateDescResponse, error) { + var reqBody, resBody QueryPartitionCreateDescBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPartitionCreateOptionsBody struct { + Req *types.QueryPartitionCreateOptions `xml:"urn:vim25 QueryPartitionCreateOptions,omitempty"` + Res *types.QueryPartitionCreateOptionsResponse `xml:"urn:vim25 QueryPartitionCreateOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPartitionCreateOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPartitionCreateOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryPartitionCreateOptions) (*types.QueryPartitionCreateOptionsResponse, error) { + var reqBody, resBody QueryPartitionCreateOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPathSelectionPolicyOptionsBody struct { + Req *types.QueryPathSelectionPolicyOptions `xml:"urn:vim25 QueryPathSelectionPolicyOptions,omitempty"` + Res *types.QueryPathSelectionPolicyOptionsResponse `xml:"urn:vim25 QueryPathSelectionPolicyOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPathSelectionPolicyOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPathSelectionPolicyOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryPathSelectionPolicyOptions) (*types.QueryPathSelectionPolicyOptionsResponse, error) { + var reqBody, resBody QueryPathSelectionPolicyOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfBody struct { + Req *types.QueryPerf `xml:"urn:vim25 QueryPerf,omitempty"` + Res *types.QueryPerfResponse `xml:"urn:vim25 QueryPerfResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerf(ctx context.Context, r soap.RoundTripper, req *types.QueryPerf) (*types.QueryPerfResponse, error) { + var reqBody, resBody QueryPerfBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfCompositeBody struct { + Req *types.QueryPerfComposite `xml:"urn:vim25 QueryPerfComposite,omitempty"` + Res *types.QueryPerfCompositeResponse `xml:"urn:vim25 QueryPerfCompositeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfCompositeBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfComposite(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfComposite) (*types.QueryPerfCompositeResponse, error) { + var reqBody, resBody QueryPerfCompositeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfCounterBody struct { + Req *types.QueryPerfCounter `xml:"urn:vim25 QueryPerfCounter,omitempty"` + Res *types.QueryPerfCounterResponse `xml:"urn:vim25 QueryPerfCounterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfCounterBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfCounter(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfCounter) (*types.QueryPerfCounterResponse, error) { + var reqBody, resBody QueryPerfCounterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfCounterByLevelBody struct { + Req *types.QueryPerfCounterByLevel `xml:"urn:vim25 QueryPerfCounterByLevel,omitempty"` + Res *types.QueryPerfCounterByLevelResponse `xml:"urn:vim25 QueryPerfCounterByLevelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfCounterByLevelBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfCounterByLevel(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfCounterByLevel) (*types.QueryPerfCounterByLevelResponse, error) { + var reqBody, resBody QueryPerfCounterByLevelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfProviderSummaryBody struct { + Req *types.QueryPerfProviderSummary `xml:"urn:vim25 QueryPerfProviderSummary,omitempty"` + Res *types.QueryPerfProviderSummaryResponse `xml:"urn:vim25 QueryPerfProviderSummaryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfProviderSummaryBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfProviderSummary(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfProviderSummary) (*types.QueryPerfProviderSummaryResponse, error) { + var reqBody, resBody QueryPerfProviderSummaryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPhysicalVsanDisksBody struct { + Req *types.QueryPhysicalVsanDisks `xml:"urn:vim25 QueryPhysicalVsanDisks,omitempty"` + Res *types.QueryPhysicalVsanDisksResponse `xml:"urn:vim25 QueryPhysicalVsanDisksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPhysicalVsanDisksBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPhysicalVsanDisks(ctx context.Context, r soap.RoundTripper, req *types.QueryPhysicalVsanDisks) (*types.QueryPhysicalVsanDisksResponse, error) { + var reqBody, resBody QueryPhysicalVsanDisksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPnicStatusBody struct { + Req *types.QueryPnicStatus `xml:"urn:vim25 QueryPnicStatus,omitempty"` + Res *types.QueryPnicStatusResponse `xml:"urn:vim25 QueryPnicStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPnicStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryPnicStatus) (*types.QueryPnicStatusResponse, error) { + var reqBody, resBody QueryPnicStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPolicyMetadataBody struct { + Req *types.QueryPolicyMetadata `xml:"urn:vim25 QueryPolicyMetadata,omitempty"` + Res *types.QueryPolicyMetadataResponse `xml:"urn:vim25 QueryPolicyMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPolicyMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPolicyMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryPolicyMetadata) (*types.QueryPolicyMetadataResponse, error) { + var reqBody, resBody QueryPolicyMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryProfileStructureBody struct { + Req *types.QueryProfileStructure `xml:"urn:vim25 QueryProfileStructure,omitempty"` + Res *types.QueryProfileStructureResponse `xml:"urn:vim25 QueryProfileStructureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryProfileStructureBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryProfileStructure(ctx context.Context, r soap.RoundTripper, req *types.QueryProfileStructure) (*types.QueryProfileStructureResponse, error) { + var reqBody, resBody QueryProfileStructureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryResourceConfigOptionBody struct { + Req *types.QueryResourceConfigOption `xml:"urn:vim25 QueryResourceConfigOption,omitempty"` + Res *types.QueryResourceConfigOptionResponse `xml:"urn:vim25 QueryResourceConfigOptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryResourceConfigOptionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryResourceConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryResourceConfigOption) (*types.QueryResourceConfigOptionResponse, error) { + var reqBody, resBody QueryResourceConfigOptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryServiceListBody struct { + Req *types.QueryServiceList `xml:"urn:vim25 QueryServiceList,omitempty"` + Res *types.QueryServiceListResponse `xml:"urn:vim25 QueryServiceListResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryServiceListBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryServiceList(ctx context.Context, r soap.RoundTripper, req *types.QueryServiceList) (*types.QueryServiceListResponse, error) { + var reqBody, resBody QueryServiceListBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryStorageArrayTypePolicyOptionsBody struct { + Req *types.QueryStorageArrayTypePolicyOptions `xml:"urn:vim25 QueryStorageArrayTypePolicyOptions,omitempty"` + Res *types.QueryStorageArrayTypePolicyOptionsResponse `xml:"urn:vim25 QueryStorageArrayTypePolicyOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryStorageArrayTypePolicyOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryStorageArrayTypePolicyOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryStorageArrayTypePolicyOptions) (*types.QueryStorageArrayTypePolicyOptionsResponse, error) { + var reqBody, resBody QueryStorageArrayTypePolicyOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QuerySupportedFeaturesBody struct { + Req *types.QuerySupportedFeatures `xml:"urn:vim25 QuerySupportedFeatures,omitempty"` + Res *types.QuerySupportedFeaturesResponse `xml:"urn:vim25 QuerySupportedFeaturesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QuerySupportedFeaturesBody) Fault() *soap.Fault { return b.Fault_ } + +func QuerySupportedFeatures(ctx context.Context, r soap.RoundTripper, req *types.QuerySupportedFeatures) (*types.QuerySupportedFeaturesResponse, error) { + var reqBody, resBody QuerySupportedFeaturesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QuerySyncingVsanObjectsBody struct { + Req *types.QuerySyncingVsanObjects `xml:"urn:vim25 QuerySyncingVsanObjects,omitempty"` + Res *types.QuerySyncingVsanObjectsResponse `xml:"urn:vim25 QuerySyncingVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QuerySyncingVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func QuerySyncingVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.QuerySyncingVsanObjects) (*types.QuerySyncingVsanObjectsResponse, error) { + var reqBody, resBody QuerySyncingVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QuerySystemUsersBody struct { + Req *types.QuerySystemUsers `xml:"urn:vim25 QuerySystemUsers,omitempty"` + Res *types.QuerySystemUsersResponse `xml:"urn:vim25 QuerySystemUsersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QuerySystemUsersBody) Fault() *soap.Fault { return b.Fault_ } + +func QuerySystemUsers(ctx context.Context, r soap.RoundTripper, req *types.QuerySystemUsers) (*types.QuerySystemUsersResponse, error) { + var reqBody, resBody QuerySystemUsersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryTargetCapabilitiesBody struct { + Req *types.QueryTargetCapabilities `xml:"urn:vim25 QueryTargetCapabilities,omitempty"` + Res *types.QueryTargetCapabilitiesResponse `xml:"urn:vim25 QueryTargetCapabilitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryTargetCapabilitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryTargetCapabilities(ctx context.Context, r soap.RoundTripper, req *types.QueryTargetCapabilities) (*types.QueryTargetCapabilitiesResponse, error) { + var reqBody, resBody QueryTargetCapabilitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryTpmAttestationReportBody struct { + Req *types.QueryTpmAttestationReport `xml:"urn:vim25 QueryTpmAttestationReport,omitempty"` + Res *types.QueryTpmAttestationReportResponse `xml:"urn:vim25 QueryTpmAttestationReportResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryTpmAttestationReportBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryTpmAttestationReport(ctx context.Context, r soap.RoundTripper, req *types.QueryTpmAttestationReport) (*types.QueryTpmAttestationReportResponse, error) { + var reqBody, resBody QueryTpmAttestationReportBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUnownedFilesBody struct { + Req *types.QueryUnownedFiles `xml:"urn:vim25 QueryUnownedFiles,omitempty"` + Res *types.QueryUnownedFilesResponse `xml:"urn:vim25 QueryUnownedFilesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUnownedFilesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUnownedFiles(ctx context.Context, r soap.RoundTripper, req *types.QueryUnownedFiles) (*types.QueryUnownedFilesResponse, error) { + var reqBody, resBody QueryUnownedFilesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUnresolvedVmfsVolumeBody struct { + Req *types.QueryUnresolvedVmfsVolume `xml:"urn:vim25 QueryUnresolvedVmfsVolume,omitempty"` + Res *types.QueryUnresolvedVmfsVolumeResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUnresolvedVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUnresolvedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.QueryUnresolvedVmfsVolume) (*types.QueryUnresolvedVmfsVolumeResponse, error) { + var reqBody, resBody QueryUnresolvedVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUnresolvedVmfsVolumesBody struct { + Req *types.QueryUnresolvedVmfsVolumes `xml:"urn:vim25 QueryUnresolvedVmfsVolumes,omitempty"` + Res *types.QueryUnresolvedVmfsVolumesResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUnresolvedVmfsVolumesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *types.QueryUnresolvedVmfsVolumes) (*types.QueryUnresolvedVmfsVolumesResponse, error) { + var reqBody, resBody QueryUnresolvedVmfsVolumesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUsedVlanIdInDvsBody struct { + Req *types.QueryUsedVlanIdInDvs `xml:"urn:vim25 QueryUsedVlanIdInDvs,omitempty"` + Res *types.QueryUsedVlanIdInDvsResponse `xml:"urn:vim25 QueryUsedVlanIdInDvsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUsedVlanIdInDvsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUsedVlanIdInDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryUsedVlanIdInDvs) (*types.QueryUsedVlanIdInDvsResponse, error) { + var reqBody, resBody QueryUsedVlanIdInDvsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVMotionCompatibilityBody struct { + Req *types.QueryVMotionCompatibility `xml:"urn:vim25 QueryVMotionCompatibility,omitempty"` + Res *types.QueryVMotionCompatibilityResponse `xml:"urn:vim25 QueryVMotionCompatibilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVMotionCompatibilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVMotionCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryVMotionCompatibility) (*types.QueryVMotionCompatibilityResponse, error) { + var reqBody, resBody QueryVMotionCompatibilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVMotionCompatibilityEx_TaskBody struct { + Req *types.QueryVMotionCompatibilityEx_Task `xml:"urn:vim25 QueryVMotionCompatibilityEx_Task,omitempty"` + Res *types.QueryVMotionCompatibilityEx_TaskResponse `xml:"urn:vim25 QueryVMotionCompatibilityEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVMotionCompatibilityEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVMotionCompatibilityEx_Task(ctx context.Context, r soap.RoundTripper, req *types.QueryVMotionCompatibilityEx_Task) (*types.QueryVMotionCompatibilityEx_TaskResponse, error) { + var reqBody, resBody QueryVMotionCompatibilityEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVirtualDiskFragmentationBody struct { + Req *types.QueryVirtualDiskFragmentation `xml:"urn:vim25 QueryVirtualDiskFragmentation,omitempty"` + Res *types.QueryVirtualDiskFragmentationResponse `xml:"urn:vim25 QueryVirtualDiskFragmentationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVirtualDiskFragmentationBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVirtualDiskFragmentation(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskFragmentation) (*types.QueryVirtualDiskFragmentationResponse, error) { + var reqBody, resBody QueryVirtualDiskFragmentationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVirtualDiskGeometryBody struct { + Req *types.QueryVirtualDiskGeometry `xml:"urn:vim25 QueryVirtualDiskGeometry,omitempty"` + Res *types.QueryVirtualDiskGeometryResponse `xml:"urn:vim25 QueryVirtualDiskGeometryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVirtualDiskGeometryBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVirtualDiskGeometry(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskGeometry) (*types.QueryVirtualDiskGeometryResponse, error) { + var reqBody, resBody QueryVirtualDiskGeometryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVirtualDiskUuidBody struct { + Req *types.QueryVirtualDiskUuid `xml:"urn:vim25 QueryVirtualDiskUuid,omitempty"` + Res *types.QueryVirtualDiskUuidResponse `xml:"urn:vim25 QueryVirtualDiskUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVirtualDiskUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskUuid) (*types.QueryVirtualDiskUuidResponse, error) { + var reqBody, resBody QueryVirtualDiskUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVmfsDatastoreCreateOptionsBody struct { + Req *types.QueryVmfsDatastoreCreateOptions `xml:"urn:vim25 QueryVmfsDatastoreCreateOptions,omitempty"` + Res *types.QueryVmfsDatastoreCreateOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreCreateOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVmfsDatastoreCreateOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVmfsDatastoreCreateOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreCreateOptions) (*types.QueryVmfsDatastoreCreateOptionsResponse, error) { + var reqBody, resBody QueryVmfsDatastoreCreateOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVmfsDatastoreExpandOptionsBody struct { + Req *types.QueryVmfsDatastoreExpandOptions `xml:"urn:vim25 QueryVmfsDatastoreExpandOptions,omitempty"` + Res *types.QueryVmfsDatastoreExpandOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExpandOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVmfsDatastoreExpandOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVmfsDatastoreExpandOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreExpandOptions) (*types.QueryVmfsDatastoreExpandOptionsResponse, error) { + var reqBody, resBody QueryVmfsDatastoreExpandOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVmfsDatastoreExtendOptionsBody struct { + Req *types.QueryVmfsDatastoreExtendOptions `xml:"urn:vim25 QueryVmfsDatastoreExtendOptions,omitempty"` + Res *types.QueryVmfsDatastoreExtendOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExtendOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVmfsDatastoreExtendOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVmfsDatastoreExtendOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreExtendOptions) (*types.QueryVmfsDatastoreExtendOptionsResponse, error) { + var reqBody, resBody QueryVmfsDatastoreExtendOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVnicStatusBody struct { + Req *types.QueryVnicStatus `xml:"urn:vim25 QueryVnicStatus,omitempty"` + Res *types.QueryVnicStatusResponse `xml:"urn:vim25 QueryVnicStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVnicStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryVnicStatus) (*types.QueryVnicStatusResponse, error) { + var reqBody, resBody QueryVnicStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanObjectUuidsByFilterBody struct { + Req *types.QueryVsanObjectUuidsByFilter `xml:"urn:vim25 QueryVsanObjectUuidsByFilter,omitempty"` + Res *types.QueryVsanObjectUuidsByFilterResponse `xml:"urn:vim25 QueryVsanObjectUuidsByFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanObjectUuidsByFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanObjectUuidsByFilter(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanObjectUuidsByFilter) (*types.QueryVsanObjectUuidsByFilterResponse, error) { + var reqBody, resBody QueryVsanObjectUuidsByFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanObjectsBody struct { + Req *types.QueryVsanObjects `xml:"urn:vim25 QueryVsanObjects,omitempty"` + Res *types.QueryVsanObjectsResponse `xml:"urn:vim25 QueryVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanObjects) (*types.QueryVsanObjectsResponse, error) { + var reqBody, resBody QueryVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanStatisticsBody struct { + Req *types.QueryVsanStatistics `xml:"urn:vim25 QueryVsanStatistics,omitempty"` + Res *types.QueryVsanStatisticsResponse `xml:"urn:vim25 QueryVsanStatisticsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanStatisticsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanStatistics(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanStatistics) (*types.QueryVsanStatisticsResponse, error) { + var reqBody, resBody QueryVsanStatisticsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanUpgradeStatusBody struct { + Req *types.QueryVsanUpgradeStatus `xml:"urn:vim25 QueryVsanUpgradeStatus,omitempty"` + Res *types.QueryVsanUpgradeStatusResponse `xml:"urn:vim25 QueryVsanUpgradeStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanUpgradeStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanUpgradeStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanUpgradeStatus) (*types.QueryVsanUpgradeStatusResponse, error) { + var reqBody, resBody QueryVsanUpgradeStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadEnvironmentVariableInGuestBody struct { + Req *types.ReadEnvironmentVariableInGuest `xml:"urn:vim25 ReadEnvironmentVariableInGuest,omitempty"` + Res *types.ReadEnvironmentVariableInGuestResponse `xml:"urn:vim25 ReadEnvironmentVariableInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadEnvironmentVariableInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadEnvironmentVariableInGuest(ctx context.Context, r soap.RoundTripper, req *types.ReadEnvironmentVariableInGuest) (*types.ReadEnvironmentVariableInGuestResponse, error) { + var reqBody, resBody ReadEnvironmentVariableInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadNextEventsBody struct { + Req *types.ReadNextEvents `xml:"urn:vim25 ReadNextEvents,omitempty"` + Res *types.ReadNextEventsResponse `xml:"urn:vim25 ReadNextEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadNextEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadNextEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadNextEvents) (*types.ReadNextEventsResponse, error) { + var reqBody, resBody ReadNextEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadNextTasksBody struct { + Req *types.ReadNextTasks `xml:"urn:vim25 ReadNextTasks,omitempty"` + Res *types.ReadNextTasksResponse `xml:"urn:vim25 ReadNextTasksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadNextTasksBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadNextTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadNextTasks) (*types.ReadNextTasksResponse, error) { + var reqBody, resBody ReadNextTasksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadPreviousEventsBody struct { + Req *types.ReadPreviousEvents `xml:"urn:vim25 ReadPreviousEvents,omitempty"` + Res *types.ReadPreviousEventsResponse `xml:"urn:vim25 ReadPreviousEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadPreviousEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadPreviousEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadPreviousEvents) (*types.ReadPreviousEventsResponse, error) { + var reqBody, resBody ReadPreviousEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadPreviousTasksBody struct { + Req *types.ReadPreviousTasks `xml:"urn:vim25 ReadPreviousTasks,omitempty"` + Res *types.ReadPreviousTasksResponse `xml:"urn:vim25 ReadPreviousTasksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadPreviousTasksBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadPreviousTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadPreviousTasks) (*types.ReadPreviousTasksResponse, error) { + var reqBody, resBody ReadPreviousTasksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RebootGuestBody struct { + Req *types.RebootGuest `xml:"urn:vim25 RebootGuest,omitempty"` + Res *types.RebootGuestResponse `xml:"urn:vim25 RebootGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RebootGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func RebootGuest(ctx context.Context, r soap.RoundTripper, req *types.RebootGuest) (*types.RebootGuestResponse, error) { + var reqBody, resBody RebootGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RebootHost_TaskBody struct { + Req *types.RebootHost_Task `xml:"urn:vim25 RebootHost_Task,omitempty"` + Res *types.RebootHost_TaskResponse `xml:"urn:vim25 RebootHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RebootHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RebootHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RebootHost_Task) (*types.RebootHost_TaskResponse, error) { + var reqBody, resBody RebootHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RecommendDatastoresBody struct { + Req *types.RecommendDatastores `xml:"urn:vim25 RecommendDatastores,omitempty"` + Res *types.RecommendDatastoresResponse `xml:"urn:vim25 RecommendDatastoresResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RecommendDatastoresBody) Fault() *soap.Fault { return b.Fault_ } + +func RecommendDatastores(ctx context.Context, r soap.RoundTripper, req *types.RecommendDatastores) (*types.RecommendDatastoresResponse, error) { + var reqBody, resBody RecommendDatastoresBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RecommendHostsForVmBody struct { + Req *types.RecommendHostsForVm `xml:"urn:vim25 RecommendHostsForVm,omitempty"` + Res *types.RecommendHostsForVmResponse `xml:"urn:vim25 RecommendHostsForVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RecommendHostsForVmBody) Fault() *soap.Fault { return b.Fault_ } + +func RecommendHostsForVm(ctx context.Context, r soap.RoundTripper, req *types.RecommendHostsForVm) (*types.RecommendHostsForVmResponse, error) { + var reqBody, resBody RecommendHostsForVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RecommissionVsanNode_TaskBody struct { + Req *types.RecommissionVsanNode_Task `xml:"urn:vim25 RecommissionVsanNode_Task,omitempty"` + Res *types.RecommissionVsanNode_TaskResponse `xml:"urn:vim25 RecommissionVsanNode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RecommissionVsanNode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RecommissionVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types.RecommissionVsanNode_Task) (*types.RecommissionVsanNode_TaskResponse, error) { + var reqBody, resBody RecommissionVsanNode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigVM_TaskBody struct { + Req *types.ReconfigVM_Task `xml:"urn:vim25 ReconfigVM_Task,omitempty"` + Res *types.ReconfigVM_TaskResponse `xml:"urn:vim25 ReconfigVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigVM_Task) (*types.ReconfigVM_TaskResponse, error) { + var reqBody, resBody ReconfigVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigurationSatisfiableBody struct { + Req *types.ReconfigurationSatisfiable `xml:"urn:vim25 ReconfigurationSatisfiable,omitempty"` + Res *types.ReconfigurationSatisfiableResponse `xml:"urn:vim25 ReconfigurationSatisfiableResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigurationSatisfiableBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigurationSatisfiable(ctx context.Context, r soap.RoundTripper, req *types.ReconfigurationSatisfiable) (*types.ReconfigurationSatisfiableResponse, error) { + var reqBody, resBody ReconfigurationSatisfiableBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureAlarmBody struct { + Req *types.ReconfigureAlarm `xml:"urn:vim25 ReconfigureAlarm,omitempty"` + Res *types.ReconfigureAlarmResponse `xml:"urn:vim25 ReconfigureAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureAlarm(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureAlarm) (*types.ReconfigureAlarmResponse, error) { + var reqBody, resBody ReconfigureAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureAutostartBody struct { + Req *types.ReconfigureAutostart `xml:"urn:vim25 ReconfigureAutostart,omitempty"` + Res *types.ReconfigureAutostartResponse `xml:"urn:vim25 ReconfigureAutostartResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureAutostartBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureAutostart(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureAutostart) (*types.ReconfigureAutostartResponse, error) { + var reqBody, resBody ReconfigureAutostartBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureCluster_TaskBody struct { + Req *types.ReconfigureCluster_Task `xml:"urn:vim25 ReconfigureCluster_Task,omitempty"` + Res *types.ReconfigureCluster_TaskResponse `xml:"urn:vim25 ReconfigureCluster_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureCluster_Task) (*types.ReconfigureCluster_TaskResponse, error) { + var reqBody, resBody ReconfigureCluster_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureComputeResource_TaskBody struct { + Req *types.ReconfigureComputeResource_Task `xml:"urn:vim25 ReconfigureComputeResource_Task,omitempty"` + Res *types.ReconfigureComputeResource_TaskResponse `xml:"urn:vim25 ReconfigureComputeResource_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureComputeResource_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureComputeResource_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureComputeResource_Task) (*types.ReconfigureComputeResource_TaskResponse, error) { + var reqBody, resBody ReconfigureComputeResource_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDVPort_TaskBody struct { + Req *types.ReconfigureDVPort_Task `xml:"urn:vim25 ReconfigureDVPort_Task,omitempty"` + Res *types.ReconfigureDVPort_TaskResponse `xml:"urn:vim25 ReconfigureDVPort_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDVPort_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDVPort_Task) (*types.ReconfigureDVPort_TaskResponse, error) { + var reqBody, resBody ReconfigureDVPort_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDVPortgroup_TaskBody struct { + Req *types.ReconfigureDVPortgroup_Task `xml:"urn:vim25 ReconfigureDVPortgroup_Task,omitempty"` + Res *types.ReconfigureDVPortgroup_TaskResponse `xml:"urn:vim25 ReconfigureDVPortgroup_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDVPortgroup_Task) (*types.ReconfigureDVPortgroup_TaskResponse, error) { + var reqBody, resBody ReconfigureDVPortgroup_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDatacenter_TaskBody struct { + Req *types.ReconfigureDatacenter_Task `xml:"urn:vim25 ReconfigureDatacenter_Task,omitempty"` + Res *types.ReconfigureDatacenter_TaskResponse `xml:"urn:vim25 ReconfigureDatacenter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDatacenter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDatacenter_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDatacenter_Task) (*types.ReconfigureDatacenter_TaskResponse, error) { + var reqBody, resBody ReconfigureDatacenter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDomObjectBody struct { + Req *types.ReconfigureDomObject `xml:"urn:vim25 ReconfigureDomObject,omitempty"` + Res *types.ReconfigureDomObjectResponse `xml:"urn:vim25 ReconfigureDomObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDomObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDomObject(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDomObject) (*types.ReconfigureDomObjectResponse, error) { + var reqBody, resBody ReconfigureDomObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDvs_TaskBody struct { + Req *types.ReconfigureDvs_Task `xml:"urn:vim25 ReconfigureDvs_Task,omitempty"` + Res *types.ReconfigureDvs_TaskResponse `xml:"urn:vim25 ReconfigureDvs_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDvs_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDvs_Task) (*types.ReconfigureDvs_TaskResponse, error) { + var reqBody, resBody ReconfigureDvs_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureHostForDAS_TaskBody struct { + Req *types.ReconfigureHostForDAS_Task `xml:"urn:vim25 ReconfigureHostForDAS_Task,omitempty"` + Res *types.ReconfigureHostForDAS_TaskResponse `xml:"urn:vim25 ReconfigureHostForDAS_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureHostForDAS_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureHostForDAS_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureHostForDAS_Task) (*types.ReconfigureHostForDAS_TaskResponse, error) { + var reqBody, resBody ReconfigureHostForDAS_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureScheduledTaskBody struct { + Req *types.ReconfigureScheduledTask `xml:"urn:vim25 ReconfigureScheduledTask,omitempty"` + Res *types.ReconfigureScheduledTaskResponse `xml:"urn:vim25 ReconfigureScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureScheduledTask) (*types.ReconfigureScheduledTaskResponse, error) { + var reqBody, resBody ReconfigureScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureServiceConsoleReservationBody struct { + Req *types.ReconfigureServiceConsoleReservation `xml:"urn:vim25 ReconfigureServiceConsoleReservation,omitempty"` + Res *types.ReconfigureServiceConsoleReservationResponse `xml:"urn:vim25 ReconfigureServiceConsoleReservationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureServiceConsoleReservationBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureServiceConsoleReservation(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureServiceConsoleReservation) (*types.ReconfigureServiceConsoleReservationResponse, error) { + var reqBody, resBody ReconfigureServiceConsoleReservationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureSnmpAgentBody struct { + Req *types.ReconfigureSnmpAgent `xml:"urn:vim25 ReconfigureSnmpAgent,omitempty"` + Res *types.ReconfigureSnmpAgentResponse `xml:"urn:vim25 ReconfigureSnmpAgentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureSnmpAgentBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureSnmpAgent(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureSnmpAgent) (*types.ReconfigureSnmpAgentResponse, error) { + var reqBody, resBody ReconfigureSnmpAgentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureVirtualMachineReservationBody struct { + Req *types.ReconfigureVirtualMachineReservation `xml:"urn:vim25 ReconfigureVirtualMachineReservation,omitempty"` + Res *types.ReconfigureVirtualMachineReservationResponse `xml:"urn:vim25 ReconfigureVirtualMachineReservationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureVirtualMachineReservationBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureVirtualMachineReservation(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureVirtualMachineReservation) (*types.ReconfigureVirtualMachineReservationResponse, error) { + var reqBody, resBody ReconfigureVirtualMachineReservationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconnectHost_TaskBody struct { + Req *types.ReconnectHost_Task `xml:"urn:vim25 ReconnectHost_Task,omitempty"` + Res *types.ReconnectHost_TaskResponse `xml:"urn:vim25 ReconnectHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconnectHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconnectHost_Task) (*types.ReconnectHost_TaskResponse, error) { + var reqBody, resBody ReconnectHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RectifyDvsHost_TaskBody struct { + Req *types.RectifyDvsHost_Task `xml:"urn:vim25 RectifyDvsHost_Task,omitempty"` + Res *types.RectifyDvsHost_TaskResponse `xml:"urn:vim25 RectifyDvsHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RectifyDvsHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RectifyDvsHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RectifyDvsHost_Task) (*types.RectifyDvsHost_TaskResponse, error) { + var reqBody, resBody RectifyDvsHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RectifyDvsOnHost_TaskBody struct { + Req *types.RectifyDvsOnHost_Task `xml:"urn:vim25 RectifyDvsOnHost_Task,omitempty"` + Res *types.RectifyDvsOnHost_TaskResponse `xml:"urn:vim25 RectifyDvsOnHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RectifyDvsOnHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RectifyDvsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RectifyDvsOnHost_Task) (*types.RectifyDvsOnHost_TaskResponse, error) { + var reqBody, resBody RectifyDvsOnHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshBody struct { + Req *types.Refresh `xml:"urn:vim25 Refresh,omitempty"` + Res *types.RefreshResponse `xml:"urn:vim25 RefreshResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshBody) Fault() *soap.Fault { return b.Fault_ } + +func Refresh(ctx context.Context, r soap.RoundTripper, req *types.Refresh) (*types.RefreshResponse, error) { + var reqBody, resBody RefreshBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDVPortStateBody struct { + Req *types.RefreshDVPortState `xml:"urn:vim25 RefreshDVPortState,omitempty"` + Res *types.RefreshDVPortStateResponse `xml:"urn:vim25 RefreshDVPortStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDVPortStateBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDVPortState(ctx context.Context, r soap.RoundTripper, req *types.RefreshDVPortState) (*types.RefreshDVPortStateResponse, error) { + var reqBody, resBody RefreshDVPortStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDatastoreBody struct { + Req *types.RefreshDatastore `xml:"urn:vim25 RefreshDatastore,omitempty"` + Res *types.RefreshDatastoreResponse `xml:"urn:vim25 RefreshDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDatastore(ctx context.Context, r soap.RoundTripper, req *types.RefreshDatastore) (*types.RefreshDatastoreResponse, error) { + var reqBody, resBody RefreshDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDatastoreStorageInfoBody struct { + Req *types.RefreshDatastoreStorageInfo `xml:"urn:vim25 RefreshDatastoreStorageInfo,omitempty"` + Res *types.RefreshDatastoreStorageInfoResponse `xml:"urn:vim25 RefreshDatastoreStorageInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDatastoreStorageInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDatastoreStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.RefreshDatastoreStorageInfo) (*types.RefreshDatastoreStorageInfoResponse, error) { + var reqBody, resBody RefreshDatastoreStorageInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDateTimeSystemBody struct { + Req *types.RefreshDateTimeSystem `xml:"urn:vim25 RefreshDateTimeSystem,omitempty"` + Res *types.RefreshDateTimeSystemResponse `xml:"urn:vim25 RefreshDateTimeSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDateTimeSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDateTimeSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshDateTimeSystem) (*types.RefreshDateTimeSystemResponse, error) { + var reqBody, resBody RefreshDateTimeSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshFirewallBody struct { + Req *types.RefreshFirewall `xml:"urn:vim25 RefreshFirewall,omitempty"` + Res *types.RefreshFirewallResponse `xml:"urn:vim25 RefreshFirewallResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshFirewallBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshFirewall(ctx context.Context, r soap.RoundTripper, req *types.RefreshFirewall) (*types.RefreshFirewallResponse, error) { + var reqBody, resBody RefreshFirewallBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshGraphicsManagerBody struct { + Req *types.RefreshGraphicsManager `xml:"urn:vim25 RefreshGraphicsManager,omitempty"` + Res *types.RefreshGraphicsManagerResponse `xml:"urn:vim25 RefreshGraphicsManagerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshGraphicsManagerBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshGraphicsManager(ctx context.Context, r soap.RoundTripper, req *types.RefreshGraphicsManager) (*types.RefreshGraphicsManagerResponse, error) { + var reqBody, resBody RefreshGraphicsManagerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshHealthStatusSystemBody struct { + Req *types.RefreshHealthStatusSystem `xml:"urn:vim25 RefreshHealthStatusSystem,omitempty"` + Res *types.RefreshHealthStatusSystemResponse `xml:"urn:vim25 RefreshHealthStatusSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshHealthStatusSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshHealthStatusSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshHealthStatusSystem) (*types.RefreshHealthStatusSystemResponse, error) { + var reqBody, resBody RefreshHealthStatusSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshNetworkSystemBody struct { + Req *types.RefreshNetworkSystem `xml:"urn:vim25 RefreshNetworkSystem,omitempty"` + Res *types.RefreshNetworkSystemResponse `xml:"urn:vim25 RefreshNetworkSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshNetworkSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshNetworkSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshNetworkSystem) (*types.RefreshNetworkSystemResponse, error) { + var reqBody, resBody RefreshNetworkSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshRecommendationBody struct { + Req *types.RefreshRecommendation `xml:"urn:vim25 RefreshRecommendation,omitempty"` + Res *types.RefreshRecommendationResponse `xml:"urn:vim25 RefreshRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshRecommendation(ctx context.Context, r soap.RoundTripper, req *types.RefreshRecommendation) (*types.RefreshRecommendationResponse, error) { + var reqBody, resBody RefreshRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshRuntimeBody struct { + Req *types.RefreshRuntime `xml:"urn:vim25 RefreshRuntime,omitempty"` + Res *types.RefreshRuntimeResponse `xml:"urn:vim25 RefreshRuntimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshRuntimeBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshRuntime(ctx context.Context, r soap.RoundTripper, req *types.RefreshRuntime) (*types.RefreshRuntimeResponse, error) { + var reqBody, resBody RefreshRuntimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshServicesBody struct { + Req *types.RefreshServices `xml:"urn:vim25 RefreshServices,omitempty"` + Res *types.RefreshServicesResponse `xml:"urn:vim25 RefreshServicesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshServicesBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshServices(ctx context.Context, r soap.RoundTripper, req *types.RefreshServices) (*types.RefreshServicesResponse, error) { + var reqBody, resBody RefreshServicesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshStorageDrsRecommendationBody struct { + Req *types.RefreshStorageDrsRecommendation `xml:"urn:vim25 RefreshStorageDrsRecommendation,omitempty"` + Res *types.RefreshStorageDrsRecommendationResponse `xml:"urn:vim25 RefreshStorageDrsRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshStorageDrsRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageDrsRecommendation) (*types.RefreshStorageDrsRecommendationResponse, error) { + var reqBody, resBody RefreshStorageDrsRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshStorageInfoBody struct { + Req *types.RefreshStorageInfo `xml:"urn:vim25 RefreshStorageInfo,omitempty"` + Res *types.RefreshStorageInfoResponse `xml:"urn:vim25 RefreshStorageInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshStorageInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageInfo) (*types.RefreshStorageInfoResponse, error) { + var reqBody, resBody RefreshStorageInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshStorageSystemBody struct { + Req *types.RefreshStorageSystem `xml:"urn:vim25 RefreshStorageSystem,omitempty"` + Res *types.RefreshStorageSystemResponse `xml:"urn:vim25 RefreshStorageSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshStorageSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshStorageSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageSystem) (*types.RefreshStorageSystemResponse, error) { + var reqBody, resBody RefreshStorageSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterChildVM_TaskBody struct { + Req *types.RegisterChildVM_Task `xml:"urn:vim25 RegisterChildVM_Task,omitempty"` + Res *types.RegisterChildVM_TaskResponse `xml:"urn:vim25 RegisterChildVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterChildVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RegisterChildVM_Task) (*types.RegisterChildVM_TaskResponse, error) { + var reqBody, resBody RegisterChildVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterExtensionBody struct { + Req *types.RegisterExtension `xml:"urn:vim25 RegisterExtension,omitempty"` + Res *types.RegisterExtensionResponse `xml:"urn:vim25 RegisterExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterExtension(ctx context.Context, r soap.RoundTripper, req *types.RegisterExtension) (*types.RegisterExtensionResponse, error) { + var reqBody, resBody RegisterExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterVM_TaskBody struct { + Req *types.RegisterVM_Task `xml:"urn:vim25 RegisterVM_Task,omitempty"` + Res *types.RegisterVM_TaskResponse `xml:"urn:vim25 RegisterVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RegisterVM_Task) (*types.RegisterVM_TaskResponse, error) { + var reqBody, resBody RegisterVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReleaseCredentialsInGuestBody struct { + Req *types.ReleaseCredentialsInGuest `xml:"urn:vim25 ReleaseCredentialsInGuest,omitempty"` + Res *types.ReleaseCredentialsInGuestResponse `xml:"urn:vim25 ReleaseCredentialsInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReleaseCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ReleaseCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.ReleaseCredentialsInGuest) (*types.ReleaseCredentialsInGuestResponse, error) { + var reqBody, resBody ReleaseCredentialsInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReleaseIpAllocationBody struct { + Req *types.ReleaseIpAllocation `xml:"urn:vim25 ReleaseIpAllocation,omitempty"` + Res *types.ReleaseIpAllocationResponse `xml:"urn:vim25 ReleaseIpAllocationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReleaseIpAllocationBody) Fault() *soap.Fault { return b.Fault_ } + +func ReleaseIpAllocation(ctx context.Context, r soap.RoundTripper, req *types.ReleaseIpAllocation) (*types.ReleaseIpAllocationResponse, error) { + var reqBody, resBody ReleaseIpAllocationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReloadBody struct { + Req *types.Reload `xml:"urn:vim25 Reload,omitempty"` + Res *types.ReloadResponse `xml:"urn:vim25 ReloadResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReloadBody) Fault() *soap.Fault { return b.Fault_ } + +func Reload(ctx context.Context, r soap.RoundTripper, req *types.Reload) (*types.ReloadResponse, error) { + var reqBody, resBody ReloadBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RelocateVM_TaskBody struct { + Req *types.RelocateVM_Task `xml:"urn:vim25 RelocateVM_Task,omitempty"` + Res *types.RelocateVM_TaskResponse `xml:"urn:vim25 RelocateVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RelocateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RelocateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RelocateVM_Task) (*types.RelocateVM_TaskResponse, error) { + var reqBody, resBody RelocateVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAlarmBody struct { + Req *types.RemoveAlarm `xml:"urn:vim25 RemoveAlarm,omitempty"` + Res *types.RemoveAlarmResponse `xml:"urn:vim25 RemoveAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAlarm(ctx context.Context, r soap.RoundTripper, req *types.RemoveAlarm) (*types.RemoveAlarmResponse, error) { + var reqBody, resBody RemoveAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAllSnapshots_TaskBody struct { + Req *types.RemoveAllSnapshots_Task `xml:"urn:vim25 RemoveAllSnapshots_Task,omitempty"` + Res *types.RemoveAllSnapshots_TaskResponse `xml:"urn:vim25 RemoveAllSnapshots_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAllSnapshots_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAllSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveAllSnapshots_Task) (*types.RemoveAllSnapshots_TaskResponse, error) { + var reqBody, resBody RemoveAllSnapshots_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAssignedLicenseBody struct { + Req *types.RemoveAssignedLicense `xml:"urn:vim25 RemoveAssignedLicense,omitempty"` + Res *types.RemoveAssignedLicenseResponse `xml:"urn:vim25 RemoveAssignedLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAssignedLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveAssignedLicense) (*types.RemoveAssignedLicenseResponse, error) { + var reqBody, resBody RemoveAssignedLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAuthorizationRoleBody struct { + Req *types.RemoveAuthorizationRole `xml:"urn:vim25 RemoveAuthorizationRole,omitempty"` + Res *types.RemoveAuthorizationRoleResponse `xml:"urn:vim25 RemoveAuthorizationRoleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.RemoveAuthorizationRole) (*types.RemoveAuthorizationRoleResponse, error) { + var reqBody, resBody RemoveAuthorizationRoleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveCustomFieldDefBody struct { + Req *types.RemoveCustomFieldDef `xml:"urn:vim25 RemoveCustomFieldDef,omitempty"` + Res *types.RemoveCustomFieldDefResponse `xml:"urn:vim25 RemoveCustomFieldDefResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.RemoveCustomFieldDef) (*types.RemoveCustomFieldDefResponse, error) { + var reqBody, resBody RemoveCustomFieldDefBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDatastoreBody struct { + Req *types.RemoveDatastore `xml:"urn:vim25 RemoveDatastore,omitempty"` + Res *types.RemoveDatastoreResponse `xml:"urn:vim25 RemoveDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDatastore(ctx context.Context, r soap.RoundTripper, req *types.RemoveDatastore) (*types.RemoveDatastoreResponse, error) { + var reqBody, resBody RemoveDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDatastoreEx_TaskBody struct { + Req *types.RemoveDatastoreEx_Task `xml:"urn:vim25 RemoveDatastoreEx_Task,omitempty"` + Res *types.RemoveDatastoreEx_TaskResponse `xml:"urn:vim25 RemoveDatastoreEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDatastoreEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDatastoreEx_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDatastoreEx_Task) (*types.RemoveDatastoreEx_TaskResponse, error) { + var reqBody, resBody RemoveDatastoreEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDiskMapping_TaskBody struct { + Req *types.RemoveDiskMapping_Task `xml:"urn:vim25 RemoveDiskMapping_Task,omitempty"` + Res *types.RemoveDiskMapping_TaskResponse `xml:"urn:vim25 RemoveDiskMapping_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDiskMapping_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDiskMapping_Task) (*types.RemoveDiskMapping_TaskResponse, error) { + var reqBody, resBody RemoveDiskMapping_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDisk_TaskBody struct { + Req *types.RemoveDisk_Task `xml:"urn:vim25 RemoveDisk_Task,omitempty"` + Res *types.RemoveDisk_TaskResponse `xml:"urn:vim25 RemoveDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDisk_Task) (*types.RemoveDisk_TaskResponse, error) { + var reqBody, resBody RemoveDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveEntityPermissionBody struct { + Req *types.RemoveEntityPermission `xml:"urn:vim25 RemoveEntityPermission,omitempty"` + Res *types.RemoveEntityPermissionResponse `xml:"urn:vim25 RemoveEntityPermissionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveEntityPermissionBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveEntityPermission(ctx context.Context, r soap.RoundTripper, req *types.RemoveEntityPermission) (*types.RemoveEntityPermissionResponse, error) { + var reqBody, resBody RemoveEntityPermissionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveGroupBody struct { + Req *types.RemoveGroup `xml:"urn:vim25 RemoveGroup,omitempty"` + Res *types.RemoveGroupResponse `xml:"urn:vim25 RemoveGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveGroup(ctx context.Context, r soap.RoundTripper, req *types.RemoveGroup) (*types.RemoveGroupResponse, error) { + var reqBody, resBody RemoveGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveGuestAliasBody struct { + Req *types.RemoveGuestAlias `xml:"urn:vim25 RemoveGuestAlias,omitempty"` + Res *types.RemoveGuestAliasResponse `xml:"urn:vim25 RemoveGuestAliasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveGuestAliasBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.RemoveGuestAlias) (*types.RemoveGuestAliasResponse, error) { + var reqBody, resBody RemoveGuestAliasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveGuestAliasByCertBody struct { + Req *types.RemoveGuestAliasByCert `xml:"urn:vim25 RemoveGuestAliasByCert,omitempty"` + Res *types.RemoveGuestAliasByCertResponse `xml:"urn:vim25 RemoveGuestAliasByCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveGuestAliasByCertBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveGuestAliasByCert(ctx context.Context, r soap.RoundTripper, req *types.RemoveGuestAliasByCert) (*types.RemoveGuestAliasByCertResponse, error) { + var reqBody, resBody RemoveGuestAliasByCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveInternetScsiSendTargetsBody struct { + Req *types.RemoveInternetScsiSendTargets `xml:"urn:vim25 RemoveInternetScsiSendTargets,omitempty"` + Res *types.RemoveInternetScsiSendTargetsResponse `xml:"urn:vim25 RemoveInternetScsiSendTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveInternetScsiSendTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *types.RemoveInternetScsiSendTargets) (*types.RemoveInternetScsiSendTargetsResponse, error) { + var reqBody, resBody RemoveInternetScsiSendTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveInternetScsiStaticTargetsBody struct { + Req *types.RemoveInternetScsiStaticTargets `xml:"urn:vim25 RemoveInternetScsiStaticTargets,omitempty"` + Res *types.RemoveInternetScsiStaticTargetsResponse `xml:"urn:vim25 RemoveInternetScsiStaticTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveInternetScsiStaticTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req *types.RemoveInternetScsiStaticTargets) (*types.RemoveInternetScsiStaticTargetsResponse, error) { + var reqBody, resBody RemoveInternetScsiStaticTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveLicenseBody struct { + Req *types.RemoveLicense `xml:"urn:vim25 RemoveLicense,omitempty"` + Res *types.RemoveLicenseResponse `xml:"urn:vim25 RemoveLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveLicense) (*types.RemoveLicenseResponse, error) { + var reqBody, resBody RemoveLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveLicenseLabelBody struct { + Req *types.RemoveLicenseLabel `xml:"urn:vim25 RemoveLicenseLabel,omitempty"` + Res *types.RemoveLicenseLabelResponse `xml:"urn:vim25 RemoveLicenseLabelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveLicenseLabelBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.RemoveLicenseLabel) (*types.RemoveLicenseLabelResponse, error) { + var reqBody, resBody RemoveLicenseLabelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveNetworkResourcePoolBody struct { + Req *types.RemoveNetworkResourcePool `xml:"urn:vim25 RemoveNetworkResourcePool,omitempty"` + Res *types.RemoveNetworkResourcePoolResponse `xml:"urn:vim25 RemoveNetworkResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.RemoveNetworkResourcePool) (*types.RemoveNetworkResourcePoolResponse, error) { + var reqBody, resBody RemoveNetworkResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemovePerfIntervalBody struct { + Req *types.RemovePerfInterval `xml:"urn:vim25 RemovePerfInterval,omitempty"` + Res *types.RemovePerfIntervalResponse `xml:"urn:vim25 RemovePerfIntervalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemovePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ } + +func RemovePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.RemovePerfInterval) (*types.RemovePerfIntervalResponse, error) { + var reqBody, resBody RemovePerfIntervalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemovePortGroupBody struct { + Req *types.RemovePortGroup `xml:"urn:vim25 RemovePortGroup,omitempty"` + Res *types.RemovePortGroupResponse `xml:"urn:vim25 RemovePortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemovePortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func RemovePortGroup(ctx context.Context, r soap.RoundTripper, req *types.RemovePortGroup) (*types.RemovePortGroupResponse, error) { + var reqBody, resBody RemovePortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveScheduledTaskBody struct { + Req *types.RemoveScheduledTask `xml:"urn:vim25 RemoveScheduledTask,omitempty"` + Res *types.RemoveScheduledTaskResponse `xml:"urn:vim25 RemoveScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RemoveScheduledTask) (*types.RemoveScheduledTaskResponse, error) { + var reqBody, resBody RemoveScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveServiceConsoleVirtualNicBody struct { + Req *types.RemoveServiceConsoleVirtualNic `xml:"urn:vim25 RemoveServiceConsoleVirtualNic,omitempty"` + Res *types.RemoveServiceConsoleVirtualNicResponse `xml:"urn:vim25 RemoveServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RemoveServiceConsoleVirtualNic) (*types.RemoveServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody RemoveServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveSmartCardTrustAnchorBody struct { + Req *types.RemoveSmartCardTrustAnchor `xml:"urn:vim25 RemoveSmartCardTrustAnchor,omitempty"` + Res *types.RemoveSmartCardTrustAnchorResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSmartCardTrustAnchorBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *types.RemoveSmartCardTrustAnchor) (*types.RemoveSmartCardTrustAnchorResponse, error) { + var reqBody, resBody RemoveSmartCardTrustAnchorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveSmartCardTrustAnchorByFingerprintBody struct { + Req *types.RemoveSmartCardTrustAnchorByFingerprint `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprint,omitempty"` + Res *types.RemoveSmartCardTrustAnchorByFingerprintResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprintResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSmartCardTrustAnchorByFingerprintBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSmartCardTrustAnchorByFingerprint(ctx context.Context, r soap.RoundTripper, req *types.RemoveSmartCardTrustAnchorByFingerprint) (*types.RemoveSmartCardTrustAnchorByFingerprintResponse, error) { + var reqBody, resBody RemoveSmartCardTrustAnchorByFingerprintBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveSnapshot_TaskBody struct { + Req *types.RemoveSnapshot_Task `xml:"urn:vim25 RemoveSnapshot_Task,omitempty"` + Res *types.RemoveSnapshot_TaskResponse `xml:"urn:vim25 RemoveSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveSnapshot_Task) (*types.RemoveSnapshot_TaskResponse, error) { + var reqBody, resBody RemoveSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveUserBody struct { + Req *types.RemoveUser `xml:"urn:vim25 RemoveUser,omitempty"` + Res *types.RemoveUserResponse `xml:"urn:vim25 RemoveUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveUserBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveUser(ctx context.Context, r soap.RoundTripper, req *types.RemoveUser) (*types.RemoveUserResponse, error) { + var reqBody, resBody RemoveUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveVirtualNicBody struct { + Req *types.RemoveVirtualNic `xml:"urn:vim25 RemoveVirtualNic,omitempty"` + Res *types.RemoveVirtualNicResponse `xml:"urn:vim25 RemoveVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RemoveVirtualNic) (*types.RemoveVirtualNicResponse, error) { + var reqBody, resBody RemoveVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveVirtualSwitchBody struct { + Req *types.RemoveVirtualSwitch `xml:"urn:vim25 RemoveVirtualSwitch,omitempty"` + Res *types.RemoveVirtualSwitchResponse `xml:"urn:vim25 RemoveVirtualSwitchResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.RemoveVirtualSwitch) (*types.RemoveVirtualSwitchResponse, error) { + var reqBody, resBody RemoveVirtualSwitchBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameCustomFieldDefBody struct { + Req *types.RenameCustomFieldDef `xml:"urn:vim25 RenameCustomFieldDef,omitempty"` + Res *types.RenameCustomFieldDefResponse `xml:"urn:vim25 RenameCustomFieldDefResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.RenameCustomFieldDef) (*types.RenameCustomFieldDefResponse, error) { + var reqBody, resBody RenameCustomFieldDefBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameCustomizationSpecBody struct { + Req *types.RenameCustomizationSpec `xml:"urn:vim25 RenameCustomizationSpec,omitempty"` + Res *types.RenameCustomizationSpecResponse `xml:"urn:vim25 RenameCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.RenameCustomizationSpec) (*types.RenameCustomizationSpecResponse, error) { + var reqBody, resBody RenameCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameDatastoreBody struct { + Req *types.RenameDatastore `xml:"urn:vim25 RenameDatastore,omitempty"` + Res *types.RenameDatastoreResponse `xml:"urn:vim25 RenameDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameDatastore(ctx context.Context, r soap.RoundTripper, req *types.RenameDatastore) (*types.RenameDatastoreResponse, error) { + var reqBody, resBody RenameDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameSnapshotBody struct { + Req *types.RenameSnapshot `xml:"urn:vim25 RenameSnapshot,omitempty"` + Res *types.RenameSnapshotResponse `xml:"urn:vim25 RenameSnapshotResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameSnapshotBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameSnapshot(ctx context.Context, r soap.RoundTripper, req *types.RenameSnapshot) (*types.RenameSnapshotResponse, error) { + var reqBody, resBody RenameSnapshotBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type Rename_TaskBody struct { + Req *types.Rename_Task `xml:"urn:vim25 Rename_Task,omitempty"` + Res *types.Rename_TaskResponse `xml:"urn:vim25 Rename_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *Rename_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func Rename_Task(ctx context.Context, r soap.RoundTripper, req *types.Rename_Task) (*types.Rename_TaskResponse, error) { + var reqBody, resBody Rename_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReplaceCACertificatesAndCRLsBody struct { + Req *types.ReplaceCACertificatesAndCRLs `xml:"urn:vim25 ReplaceCACertificatesAndCRLs,omitempty"` + Res *types.ReplaceCACertificatesAndCRLsResponse `xml:"urn:vim25 ReplaceCACertificatesAndCRLsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReplaceCACertificatesAndCRLsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReplaceCACertificatesAndCRLs(ctx context.Context, r soap.RoundTripper, req *types.ReplaceCACertificatesAndCRLs) (*types.ReplaceCACertificatesAndCRLsResponse, error) { + var reqBody, resBody ReplaceCACertificatesAndCRLsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReplaceSmartCardTrustAnchorsBody struct { + Req *types.ReplaceSmartCardTrustAnchors `xml:"urn:vim25 ReplaceSmartCardTrustAnchors,omitempty"` + Res *types.ReplaceSmartCardTrustAnchorsResponse `xml:"urn:vim25 ReplaceSmartCardTrustAnchorsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReplaceSmartCardTrustAnchorsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReplaceSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *types.ReplaceSmartCardTrustAnchors) (*types.ReplaceSmartCardTrustAnchorsResponse, error) { + var reqBody, resBody ReplaceSmartCardTrustAnchorsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanAllHbaBody struct { + Req *types.RescanAllHba `xml:"urn:vim25 RescanAllHba,omitempty"` + Res *types.RescanAllHbaResponse `xml:"urn:vim25 RescanAllHbaResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanAllHbaBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanAllHba(ctx context.Context, r soap.RoundTripper, req *types.RescanAllHba) (*types.RescanAllHbaResponse, error) { + var reqBody, resBody RescanAllHbaBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanHbaBody struct { + Req *types.RescanHba `xml:"urn:vim25 RescanHba,omitempty"` + Res *types.RescanHbaResponse `xml:"urn:vim25 RescanHbaResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanHbaBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanHba(ctx context.Context, r soap.RoundTripper, req *types.RescanHba) (*types.RescanHbaResponse, error) { + var reqBody, resBody RescanHbaBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanVffsBody struct { + Req *types.RescanVffs `xml:"urn:vim25 RescanVffs,omitempty"` + Res *types.RescanVffsResponse `xml:"urn:vim25 RescanVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanVffs(ctx context.Context, r soap.RoundTripper, req *types.RescanVffs) (*types.RescanVffsResponse, error) { + var reqBody, resBody RescanVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanVmfsBody struct { + Req *types.RescanVmfs `xml:"urn:vim25 RescanVmfs,omitempty"` + Res *types.RescanVmfsResponse `xml:"urn:vim25 RescanVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanVmfs(ctx context.Context, r soap.RoundTripper, req *types.RescanVmfs) (*types.RescanVmfsResponse, error) { + var reqBody, resBody RescanVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetCollectorBody struct { + Req *types.ResetCollector `xml:"urn:vim25 ResetCollector,omitempty"` + Res *types.ResetCollectorResponse `xml:"urn:vim25 ResetCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetCollector(ctx context.Context, r soap.RoundTripper, req *types.ResetCollector) (*types.ResetCollectorResponse, error) { + var reqBody, resBody ResetCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetCounterLevelMappingBody struct { + Req *types.ResetCounterLevelMapping `xml:"urn:vim25 ResetCounterLevelMapping,omitempty"` + Res *types.ResetCounterLevelMappingResponse `xml:"urn:vim25 ResetCounterLevelMappingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetCounterLevelMappingBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *types.ResetCounterLevelMapping) (*types.ResetCounterLevelMappingResponse, error) { + var reqBody, resBody ResetCounterLevelMappingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetEntityPermissionsBody struct { + Req *types.ResetEntityPermissions `xml:"urn:vim25 ResetEntityPermissions,omitempty"` + Res *types.ResetEntityPermissionsResponse `xml:"urn:vim25 ResetEntityPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.ResetEntityPermissions) (*types.ResetEntityPermissionsResponse, error) { + var reqBody, resBody ResetEntityPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetFirmwareToFactoryDefaultsBody struct { + Req *types.ResetFirmwareToFactoryDefaults `xml:"urn:vim25 ResetFirmwareToFactoryDefaults,omitempty"` + Res *types.ResetFirmwareToFactoryDefaultsResponse `xml:"urn:vim25 ResetFirmwareToFactoryDefaultsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetFirmwareToFactoryDefaultsBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetFirmwareToFactoryDefaults(ctx context.Context, r soap.RoundTripper, req *types.ResetFirmwareToFactoryDefaults) (*types.ResetFirmwareToFactoryDefaultsResponse, error) { + var reqBody, resBody ResetFirmwareToFactoryDefaultsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetGuestInformationBody struct { + Req *types.ResetGuestInformation `xml:"urn:vim25 ResetGuestInformation,omitempty"` + Res *types.ResetGuestInformationResponse `xml:"urn:vim25 ResetGuestInformationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetGuestInformationBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetGuestInformation(ctx context.Context, r soap.RoundTripper, req *types.ResetGuestInformation) (*types.ResetGuestInformationResponse, error) { + var reqBody, resBody ResetGuestInformationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetListViewBody struct { + Req *types.ResetListView `xml:"urn:vim25 ResetListView,omitempty"` + Res *types.ResetListViewResponse `xml:"urn:vim25 ResetListViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetListViewBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetListView(ctx context.Context, r soap.RoundTripper, req *types.ResetListView) (*types.ResetListViewResponse, error) { + var reqBody, resBody ResetListViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetListViewFromViewBody struct { + Req *types.ResetListViewFromView `xml:"urn:vim25 ResetListViewFromView,omitempty"` + Res *types.ResetListViewFromViewResponse `xml:"urn:vim25 ResetListViewFromViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetListViewFromViewBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetListViewFromView(ctx context.Context, r soap.RoundTripper, req *types.ResetListViewFromView) (*types.ResetListViewFromViewResponse, error) { + var reqBody, resBody ResetListViewFromViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetSystemHealthInfoBody struct { + Req *types.ResetSystemHealthInfo `xml:"urn:vim25 ResetSystemHealthInfo,omitempty"` + Res *types.ResetSystemHealthInfoResponse `xml:"urn:vim25 ResetSystemHealthInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetSystemHealthInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetSystemHealthInfo(ctx context.Context, r soap.RoundTripper, req *types.ResetSystemHealthInfo) (*types.ResetSystemHealthInfoResponse, error) { + var reqBody, resBody ResetSystemHealthInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetVM_TaskBody struct { + Req *types.ResetVM_Task `xml:"urn:vim25 ResetVM_Task,omitempty"` + Res *types.ResetVM_TaskResponse `xml:"urn:vim25 ResetVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ResetVM_Task) (*types.ResetVM_TaskResponse, error) { + var reqBody, resBody ResetVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResignatureUnresolvedVmfsVolume_TaskBody struct { + Req *types.ResignatureUnresolvedVmfsVolume_Task `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_Task,omitempty"` + Res *types.ResignatureUnresolvedVmfsVolume_TaskResponse `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResignatureUnresolvedVmfsVolume_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResignatureUnresolvedVmfsVolume_Task(ctx context.Context, r soap.RoundTripper, req *types.ResignatureUnresolvedVmfsVolume_Task) (*types.ResignatureUnresolvedVmfsVolume_TaskResponse, error) { + var reqBody, resBody ResignatureUnresolvedVmfsVolume_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveInstallationErrorsOnCluster_TaskBody struct { + Req *types.ResolveInstallationErrorsOnCluster_Task `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_Task,omitempty"` + Res *types.ResolveInstallationErrorsOnCluster_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveInstallationErrorsOnCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveInstallationErrorsOnCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveInstallationErrorsOnCluster_Task) (*types.ResolveInstallationErrorsOnCluster_TaskResponse, error) { + var reqBody, resBody ResolveInstallationErrorsOnCluster_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveInstallationErrorsOnHost_TaskBody struct { + Req *types.ResolveInstallationErrorsOnHost_Task `xml:"urn:vim25 ResolveInstallationErrorsOnHost_Task,omitempty"` + Res *types.ResolveInstallationErrorsOnHost_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveInstallationErrorsOnHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveInstallationErrorsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveInstallationErrorsOnHost_Task) (*types.ResolveInstallationErrorsOnHost_TaskResponse, error) { + var reqBody, resBody ResolveInstallationErrorsOnHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveMultipleUnresolvedVmfsVolumesBody struct { + Req *types.ResolveMultipleUnresolvedVmfsVolumes `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumes,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveMultipleUnresolvedVmfsVolumesBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveMultipleUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *types.ResolveMultipleUnresolvedVmfsVolumes) (*types.ResolveMultipleUnresolvedVmfsVolumesResponse, error) { + var reqBody, resBody ResolveMultipleUnresolvedVmfsVolumesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody struct { + Req *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_Task,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveMultipleUnresolvedVmfsVolumesEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task) (*types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse, error) { + var reqBody, resBody ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RestartServiceBody struct { + Req *types.RestartService `xml:"urn:vim25 RestartService,omitempty"` + Res *types.RestartServiceResponse `xml:"urn:vim25 RestartServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RestartServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func RestartService(ctx context.Context, r soap.RoundTripper, req *types.RestartService) (*types.RestartServiceResponse, error) { + var reqBody, resBody RestartServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RestartServiceConsoleVirtualNicBody struct { + Req *types.RestartServiceConsoleVirtualNic `xml:"urn:vim25 RestartServiceConsoleVirtualNic,omitempty"` + Res *types.RestartServiceConsoleVirtualNicResponse `xml:"urn:vim25 RestartServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RestartServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func RestartServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RestartServiceConsoleVirtualNic) (*types.RestartServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody RestartServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RestoreFirmwareConfigurationBody struct { + Req *types.RestoreFirmwareConfiguration `xml:"urn:vim25 RestoreFirmwareConfiguration,omitempty"` + Res *types.RestoreFirmwareConfigurationResponse `xml:"urn:vim25 RestoreFirmwareConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RestoreFirmwareConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func RestoreFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req *types.RestoreFirmwareConfiguration) (*types.RestoreFirmwareConfigurationResponse, error) { + var reqBody, resBody RestoreFirmwareConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveAllPermissionsBody struct { + Req *types.RetrieveAllPermissions `xml:"urn:vim25 RetrieveAllPermissions,omitempty"` + Res *types.RetrieveAllPermissionsResponse `xml:"urn:vim25 RetrieveAllPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveAllPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveAllPermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAllPermissions) (*types.RetrieveAllPermissionsResponse, error) { + var reqBody, resBody RetrieveAllPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveAnswerFileBody struct { + Req *types.RetrieveAnswerFile `xml:"urn:vim25 RetrieveAnswerFile,omitempty"` + Res *types.RetrieveAnswerFileResponse `xml:"urn:vim25 RetrieveAnswerFileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveAnswerFileBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveAnswerFile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAnswerFile) (*types.RetrieveAnswerFileResponse, error) { + var reqBody, resBody RetrieveAnswerFileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveAnswerFileForProfileBody struct { + Req *types.RetrieveAnswerFileForProfile `xml:"urn:vim25 RetrieveAnswerFileForProfile,omitempty"` + Res *types.RetrieveAnswerFileForProfileResponse `xml:"urn:vim25 RetrieveAnswerFileForProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveAnswerFileForProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveAnswerFileForProfile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAnswerFileForProfile) (*types.RetrieveAnswerFileForProfileResponse, error) { + var reqBody, resBody RetrieveAnswerFileForProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveArgumentDescriptionBody struct { + Req *types.RetrieveArgumentDescription `xml:"urn:vim25 RetrieveArgumentDescription,omitempty"` + Res *types.RetrieveArgumentDescriptionResponse `xml:"urn:vim25 RetrieveArgumentDescriptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveArgumentDescriptionBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveArgumentDescription(ctx context.Context, r soap.RoundTripper, req *types.RetrieveArgumentDescription) (*types.RetrieveArgumentDescriptionResponse, error) { + var reqBody, resBody RetrieveArgumentDescriptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveDasAdvancedRuntimeInfoBody struct { + Req *types.RetrieveDasAdvancedRuntimeInfo `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfo,omitempty"` + Res *types.RetrieveDasAdvancedRuntimeInfoResponse `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDasAdvancedRuntimeInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDasAdvancedRuntimeInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDasAdvancedRuntimeInfo) (*types.RetrieveDasAdvancedRuntimeInfoResponse, error) { + var reqBody, resBody RetrieveDasAdvancedRuntimeInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveDescriptionBody struct { + Req *types.RetrieveDescription `xml:"urn:vim25 RetrieveDescription,omitempty"` + Res *types.RetrieveDescriptionResponse `xml:"urn:vim25 RetrieveDescriptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDescriptionBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDescription(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDescription) (*types.RetrieveDescriptionResponse, error) { + var reqBody, resBody RetrieveDescriptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveDiskPartitionInfoBody struct { + Req *types.RetrieveDiskPartitionInfo `xml:"urn:vim25 RetrieveDiskPartitionInfo,omitempty"` + Res *types.RetrieveDiskPartitionInfoResponse `xml:"urn:vim25 RetrieveDiskPartitionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDiskPartitionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDiskPartitionInfo) (*types.RetrieveDiskPartitionInfoResponse, error) { + var reqBody, resBody RetrieveDiskPartitionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveEntityPermissionsBody struct { + Req *types.RetrieveEntityPermissions `xml:"urn:vim25 RetrieveEntityPermissions,omitempty"` + Res *types.RetrieveEntityPermissionsResponse `xml:"urn:vim25 RetrieveEntityPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveEntityPermissions) (*types.RetrieveEntityPermissionsResponse, error) { + var reqBody, resBody RetrieveEntityPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveEntityScheduledTaskBody struct { + Req *types.RetrieveEntityScheduledTask `xml:"urn:vim25 RetrieveEntityScheduledTask,omitempty"` + Res *types.RetrieveEntityScheduledTaskResponse `xml:"urn:vim25 RetrieveEntityScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveEntityScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveEntityScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RetrieveEntityScheduledTask) (*types.RetrieveEntityScheduledTaskResponse, error) { + var reqBody, resBody RetrieveEntityScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveHardwareUptimeBody struct { + Req *types.RetrieveHardwareUptime `xml:"urn:vim25 RetrieveHardwareUptime,omitempty"` + Res *types.RetrieveHardwareUptimeResponse `xml:"urn:vim25 RetrieveHardwareUptimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveHardwareUptimeBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveHardwareUptime(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHardwareUptime) (*types.RetrieveHardwareUptimeResponse, error) { + var reqBody, resBody RetrieveHardwareUptimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveHostAccessControlEntriesBody struct { + Req *types.RetrieveHostAccessControlEntries `xml:"urn:vim25 RetrieveHostAccessControlEntries,omitempty"` + Res *types.RetrieveHostAccessControlEntriesResponse `xml:"urn:vim25 RetrieveHostAccessControlEntriesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveHostAccessControlEntriesBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveHostAccessControlEntries(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostAccessControlEntries) (*types.RetrieveHostAccessControlEntriesResponse, error) { + var reqBody, resBody RetrieveHostAccessControlEntriesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveObjectScheduledTaskBody struct { + Req *types.RetrieveObjectScheduledTask `xml:"urn:vim25 RetrieveObjectScheduledTask,omitempty"` + Res *types.RetrieveObjectScheduledTaskResponse `xml:"urn:vim25 RetrieveObjectScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveObjectScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RetrieveObjectScheduledTask) (*types.RetrieveObjectScheduledTaskResponse, error) { + var reqBody, resBody RetrieveObjectScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveProductComponentsBody struct { + Req *types.RetrieveProductComponents `xml:"urn:vim25 RetrieveProductComponents,omitempty"` + Res *types.RetrieveProductComponentsResponse `xml:"urn:vim25 RetrieveProductComponentsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveProductComponentsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveProductComponents(ctx context.Context, r soap.RoundTripper, req *types.RetrieveProductComponents) (*types.RetrieveProductComponentsResponse, error) { + var reqBody, resBody RetrieveProductComponentsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrievePropertiesBody struct { + Req *types.RetrieveProperties `xml:"urn:vim25 RetrieveProperties,omitempty"` + Res *types.RetrievePropertiesResponse `xml:"urn:vim25 RetrievePropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrievePropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveProperties(ctx context.Context, r soap.RoundTripper, req *types.RetrieveProperties) (*types.RetrievePropertiesResponse, error) { + var reqBody, resBody RetrievePropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrievePropertiesExBody struct { + Req *types.RetrievePropertiesEx `xml:"urn:vim25 RetrievePropertiesEx,omitempty"` + Res *types.RetrievePropertiesExResponse `xml:"urn:vim25 RetrievePropertiesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.RetrievePropertiesEx) (*types.RetrievePropertiesExResponse, error) { + var reqBody, resBody RetrievePropertiesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveRolePermissionsBody struct { + Req *types.RetrieveRolePermissions `xml:"urn:vim25 RetrieveRolePermissions,omitempty"` + Res *types.RetrieveRolePermissionsResponse `xml:"urn:vim25 RetrieveRolePermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveRolePermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveRolePermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveRolePermissions) (*types.RetrieveRolePermissionsResponse, error) { + var reqBody, resBody RetrieveRolePermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveServiceContentBody struct { + Req *types.RetrieveServiceContent `xml:"urn:vim25 RetrieveServiceContent,omitempty"` + Res *types.RetrieveServiceContentResponse `xml:"urn:vim25 RetrieveServiceContentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveServiceContentBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types.RetrieveServiceContent) (*types.RetrieveServiceContentResponse, error) { + var reqBody, resBody RetrieveServiceContentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveUserGroupsBody struct { + Req *types.RetrieveUserGroups `xml:"urn:vim25 RetrieveUserGroups,omitempty"` + Res *types.RetrieveUserGroupsResponse `xml:"urn:vim25 RetrieveUserGroupsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveUserGroupsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveUserGroups(ctx context.Context, r soap.RoundTripper, req *types.RetrieveUserGroups) (*types.RetrieveUserGroupsResponse, error) { + var reqBody, resBody RetrieveUserGroupsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RevertToCurrentSnapshot_TaskBody struct { + Req *types.RevertToCurrentSnapshot_Task `xml:"urn:vim25 RevertToCurrentSnapshot_Task,omitempty"` + Res *types.RevertToCurrentSnapshot_TaskResponse `xml:"urn:vim25 RevertToCurrentSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RevertToCurrentSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RevertToCurrentSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RevertToCurrentSnapshot_Task) (*types.RevertToCurrentSnapshot_TaskResponse, error) { + var reqBody, resBody RevertToCurrentSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RevertToSnapshot_TaskBody struct { + Req *types.RevertToSnapshot_Task `xml:"urn:vim25 RevertToSnapshot_Task,omitempty"` + Res *types.RevertToSnapshot_TaskResponse `xml:"urn:vim25 RevertToSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RevertToSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RevertToSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RevertToSnapshot_Task) (*types.RevertToSnapshot_TaskResponse, error) { + var reqBody, resBody RevertToSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RewindCollectorBody struct { + Req *types.RewindCollector `xml:"urn:vim25 RewindCollector,omitempty"` + Res *types.RewindCollectorResponse `xml:"urn:vim25 RewindCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RewindCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func RewindCollector(ctx context.Context, r soap.RoundTripper, req *types.RewindCollector) (*types.RewindCollectorResponse, error) { + var reqBody, resBody RewindCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RunScheduledTaskBody struct { + Req *types.RunScheduledTask `xml:"urn:vim25 RunScheduledTask,omitempty"` + Res *types.RunScheduledTaskResponse `xml:"urn:vim25 RunScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RunScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RunScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RunScheduledTask) (*types.RunScheduledTaskResponse, error) { + var reqBody, resBody RunScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RunVsanPhysicalDiskDiagnosticsBody struct { + Req *types.RunVsanPhysicalDiskDiagnostics `xml:"urn:vim25 RunVsanPhysicalDiskDiagnostics,omitempty"` + Res *types.RunVsanPhysicalDiskDiagnosticsResponse `xml:"urn:vim25 RunVsanPhysicalDiskDiagnosticsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RunVsanPhysicalDiskDiagnosticsBody) Fault() *soap.Fault { return b.Fault_ } + +func RunVsanPhysicalDiskDiagnostics(ctx context.Context, r soap.RoundTripper, req *types.RunVsanPhysicalDiskDiagnostics) (*types.RunVsanPhysicalDiskDiagnosticsResponse, error) { + var reqBody, resBody RunVsanPhysicalDiskDiagnosticsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ScanHostPatchV2_TaskBody struct { + Req *types.ScanHostPatchV2_Task `xml:"urn:vim25 ScanHostPatchV2_Task,omitempty"` + Res *types.ScanHostPatchV2_TaskResponse `xml:"urn:vim25 ScanHostPatchV2_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ScanHostPatchV2_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ScanHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.ScanHostPatchV2_Task) (*types.ScanHostPatchV2_TaskResponse, error) { + var reqBody, resBody ScanHostPatchV2_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ScanHostPatch_TaskBody struct { + Req *types.ScanHostPatch_Task `xml:"urn:vim25 ScanHostPatch_Task,omitempty"` + Res *types.ScanHostPatch_TaskResponse `xml:"urn:vim25 ScanHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ScanHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ScanHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.ScanHostPatch_Task) (*types.ScanHostPatch_TaskResponse, error) { + var reqBody, resBody ScanHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SearchDatastoreSubFolders_TaskBody struct { + Req *types.SearchDatastoreSubFolders_Task `xml:"urn:vim25 SearchDatastoreSubFolders_Task,omitempty"` + Res *types.SearchDatastoreSubFolders_TaskResponse `xml:"urn:vim25 SearchDatastoreSubFolders_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SearchDatastoreSubFolders_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SearchDatastoreSubFolders_Task(ctx context.Context, r soap.RoundTripper, req *types.SearchDatastoreSubFolders_Task) (*types.SearchDatastoreSubFolders_TaskResponse, error) { + var reqBody, resBody SearchDatastoreSubFolders_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SearchDatastore_TaskBody struct { + Req *types.SearchDatastore_Task `xml:"urn:vim25 SearchDatastore_Task,omitempty"` + Res *types.SearchDatastore_TaskResponse `xml:"urn:vim25 SearchDatastore_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SearchDatastore_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SearchDatastore_Task(ctx context.Context, r soap.RoundTripper, req *types.SearchDatastore_Task) (*types.SearchDatastore_TaskResponse, error) { + var reqBody, resBody SearchDatastore_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SelectActivePartitionBody struct { + Req *types.SelectActivePartition `xml:"urn:vim25 SelectActivePartition,omitempty"` + Res *types.SelectActivePartitionResponse `xml:"urn:vim25 SelectActivePartitionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SelectActivePartitionBody) Fault() *soap.Fault { return b.Fault_ } + +func SelectActivePartition(ctx context.Context, r soap.RoundTripper, req *types.SelectActivePartition) (*types.SelectActivePartitionResponse, error) { + var reqBody, resBody SelectActivePartitionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SelectVnicBody struct { + Req *types.SelectVnic `xml:"urn:vim25 SelectVnic,omitempty"` + Res *types.SelectVnicResponse `xml:"urn:vim25 SelectVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SelectVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func SelectVnic(ctx context.Context, r soap.RoundTripper, req *types.SelectVnic) (*types.SelectVnicResponse, error) { + var reqBody, resBody SelectVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SelectVnicForNicTypeBody struct { + Req *types.SelectVnicForNicType `xml:"urn:vim25 SelectVnicForNicType,omitempty"` + Res *types.SelectVnicForNicTypeResponse `xml:"urn:vim25 SelectVnicForNicTypeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SelectVnicForNicTypeBody) Fault() *soap.Fault { return b.Fault_ } + +func SelectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.SelectVnicForNicType) (*types.SelectVnicForNicTypeResponse, error) { + var reqBody, resBody SelectVnicForNicTypeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SendNMIBody struct { + Req *types.SendNMI `xml:"urn:vim25 SendNMI,omitempty"` + Res *types.SendNMIResponse `xml:"urn:vim25 SendNMIResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SendNMIBody) Fault() *soap.Fault { return b.Fault_ } + +func SendNMI(ctx context.Context, r soap.RoundTripper, req *types.SendNMI) (*types.SendNMIResponse, error) { + var reqBody, resBody SendNMIBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SendTestNotificationBody struct { + Req *types.SendTestNotification `xml:"urn:vim25 SendTestNotification,omitempty"` + Res *types.SendTestNotificationResponse `xml:"urn:vim25 SendTestNotificationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SendTestNotificationBody) Fault() *soap.Fault { return b.Fault_ } + +func SendTestNotification(ctx context.Context, r soap.RoundTripper, req *types.SendTestNotification) (*types.SendTestNotificationResponse, error) { + var reqBody, resBody SendTestNotificationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SessionIsActiveBody struct { + Req *types.SessionIsActive `xml:"urn:vim25 SessionIsActive,omitempty"` + Res *types.SessionIsActiveResponse `xml:"urn:vim25 SessionIsActiveResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SessionIsActiveBody) Fault() *soap.Fault { return b.Fault_ } + +func SessionIsActive(ctx context.Context, r soap.RoundTripper, req *types.SessionIsActive) (*types.SessionIsActiveResponse, error) { + var reqBody, resBody SessionIsActiveBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetCollectorPageSizeBody struct { + Req *types.SetCollectorPageSize `xml:"urn:vim25 SetCollectorPageSize,omitempty"` + Res *types.SetCollectorPageSizeResponse `xml:"urn:vim25 SetCollectorPageSizeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetCollectorPageSizeBody) Fault() *soap.Fault { return b.Fault_ } + +func SetCollectorPageSize(ctx context.Context, r soap.RoundTripper, req *types.SetCollectorPageSize) (*types.SetCollectorPageSizeResponse, error) { + var reqBody, resBody SetCollectorPageSizeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetDisplayTopologyBody struct { + Req *types.SetDisplayTopology `xml:"urn:vim25 SetDisplayTopology,omitempty"` + Res *types.SetDisplayTopologyResponse `xml:"urn:vim25 SetDisplayTopologyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetDisplayTopologyBody) Fault() *soap.Fault { return b.Fault_ } + +func SetDisplayTopology(ctx context.Context, r soap.RoundTripper, req *types.SetDisplayTopology) (*types.SetDisplayTopologyResponse, error) { + var reqBody, resBody SetDisplayTopologyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetEntityPermissionsBody struct { + Req *types.SetEntityPermissions `xml:"urn:vim25 SetEntityPermissions,omitempty"` + Res *types.SetEntityPermissionsResponse `xml:"urn:vim25 SetEntityPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func SetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.SetEntityPermissions) (*types.SetEntityPermissionsResponse, error) { + var reqBody, resBody SetEntityPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetExtensionCertificateBody struct { + Req *types.SetExtensionCertificate `xml:"urn:vim25 SetExtensionCertificate,omitempty"` + Res *types.SetExtensionCertificateResponse `xml:"urn:vim25 SetExtensionCertificateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetExtensionCertificateBody) Fault() *soap.Fault { return b.Fault_ } + +func SetExtensionCertificate(ctx context.Context, r soap.RoundTripper, req *types.SetExtensionCertificate) (*types.SetExtensionCertificateResponse, error) { + var reqBody, resBody SetExtensionCertificateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetFieldBody struct { + Req *types.SetField `xml:"urn:vim25 SetField,omitempty"` + Res *types.SetFieldResponse `xml:"urn:vim25 SetFieldResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetFieldBody) Fault() *soap.Fault { return b.Fault_ } + +func SetField(ctx context.Context, r soap.RoundTripper, req *types.SetField) (*types.SetFieldResponse, error) { + var reqBody, resBody SetFieldBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetLicenseEditionBody struct { + Req *types.SetLicenseEdition `xml:"urn:vim25 SetLicenseEdition,omitempty"` + Res *types.SetLicenseEditionResponse `xml:"urn:vim25 SetLicenseEditionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetLicenseEditionBody) Fault() *soap.Fault { return b.Fault_ } + +func SetLicenseEdition(ctx context.Context, r soap.RoundTripper, req *types.SetLicenseEdition) (*types.SetLicenseEditionResponse, error) { + var reqBody, resBody SetLicenseEditionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetLocaleBody struct { + Req *types.SetLocale `xml:"urn:vim25 SetLocale,omitempty"` + Res *types.SetLocaleResponse `xml:"urn:vim25 SetLocaleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetLocaleBody) Fault() *soap.Fault { return b.Fault_ } + +func SetLocale(ctx context.Context, r soap.RoundTripper, req *types.SetLocale) (*types.SetLocaleResponse, error) { + var reqBody, resBody SetLocaleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetManagedByVDCBody struct { + Req *types.SetManagedByVDC `xml:"urn:vim25 SetManagedByVDC,omitempty"` + Res *types.SetManagedByVDCResponse `xml:"urn:vim25 SetManagedByVDCResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetManagedByVDCBody) Fault() *soap.Fault { return b.Fault_ } + +func SetManagedByVDC(ctx context.Context, r soap.RoundTripper, req *types.SetManagedByVDC) (*types.SetManagedByVDCResponse, error) { + var reqBody, resBody SetManagedByVDCBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetMultipathLunPolicyBody struct { + Req *types.SetMultipathLunPolicy `xml:"urn:vim25 SetMultipathLunPolicy,omitempty"` + Res *types.SetMultipathLunPolicyResponse `xml:"urn:vim25 SetMultipathLunPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetMultipathLunPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func SetMultipathLunPolicy(ctx context.Context, r soap.RoundTripper, req *types.SetMultipathLunPolicy) (*types.SetMultipathLunPolicyResponse, error) { + var reqBody, resBody SetMultipathLunPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetNFSUserBody struct { + Req *types.SetNFSUser `xml:"urn:vim25 SetNFSUser,omitempty"` + Res *types.SetNFSUserResponse `xml:"urn:vim25 SetNFSUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetNFSUserBody) Fault() *soap.Fault { return b.Fault_ } + +func SetNFSUser(ctx context.Context, r soap.RoundTripper, req *types.SetNFSUser) (*types.SetNFSUserResponse, error) { + var reqBody, resBody SetNFSUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetPublicKeyBody struct { + Req *types.SetPublicKey `xml:"urn:vim25 SetPublicKey,omitempty"` + Res *types.SetPublicKeyResponse `xml:"urn:vim25 SetPublicKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetPublicKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func SetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.SetPublicKey) (*types.SetPublicKeyResponse, error) { + var reqBody, resBody SetPublicKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetRegistryValueInGuestBody struct { + Req *types.SetRegistryValueInGuest `xml:"urn:vim25 SetRegistryValueInGuest,omitempty"` + Res *types.SetRegistryValueInGuestResponse `xml:"urn:vim25 SetRegistryValueInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetRegistryValueInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func SetRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *types.SetRegistryValueInGuest) (*types.SetRegistryValueInGuestResponse, error) { + var reqBody, resBody SetRegistryValueInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetScreenResolutionBody struct { + Req *types.SetScreenResolution `xml:"urn:vim25 SetScreenResolution,omitempty"` + Res *types.SetScreenResolutionResponse `xml:"urn:vim25 SetScreenResolutionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetScreenResolutionBody) Fault() *soap.Fault { return b.Fault_ } + +func SetScreenResolution(ctx context.Context, r soap.RoundTripper, req *types.SetScreenResolution) (*types.SetScreenResolutionResponse, error) { + var reqBody, resBody SetScreenResolutionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetTaskDescriptionBody struct { + Req *types.SetTaskDescription `xml:"urn:vim25 SetTaskDescription,omitempty"` + Res *types.SetTaskDescriptionResponse `xml:"urn:vim25 SetTaskDescriptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetTaskDescriptionBody) Fault() *soap.Fault { return b.Fault_ } + +func SetTaskDescription(ctx context.Context, r soap.RoundTripper, req *types.SetTaskDescription) (*types.SetTaskDescriptionResponse, error) { + var reqBody, resBody SetTaskDescriptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetTaskStateBody struct { + Req *types.SetTaskState `xml:"urn:vim25 SetTaskState,omitempty"` + Res *types.SetTaskStateResponse `xml:"urn:vim25 SetTaskStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetTaskStateBody) Fault() *soap.Fault { return b.Fault_ } + +func SetTaskState(ctx context.Context, r soap.RoundTripper, req *types.SetTaskState) (*types.SetTaskStateResponse, error) { + var reqBody, resBody SetTaskStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetVirtualDiskUuidBody struct { + Req *types.SetVirtualDiskUuid `xml:"urn:vim25 SetVirtualDiskUuid,omitempty"` + Res *types.SetVirtualDiskUuidResponse `xml:"urn:vim25 SetVirtualDiskUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetVirtualDiskUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func SetVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.SetVirtualDiskUuid) (*types.SetVirtualDiskUuidResponse, error) { + var reqBody, resBody SetVirtualDiskUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ShrinkVirtualDisk_TaskBody struct { + Req *types.ShrinkVirtualDisk_Task `xml:"urn:vim25 ShrinkVirtualDisk_Task,omitempty"` + Res *types.ShrinkVirtualDisk_TaskResponse `xml:"urn:vim25 ShrinkVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ShrinkVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ShrinkVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ShrinkVirtualDisk_Task) (*types.ShrinkVirtualDisk_TaskResponse, error) { + var reqBody, resBody ShrinkVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ShutdownGuestBody struct { + Req *types.ShutdownGuest `xml:"urn:vim25 ShutdownGuest,omitempty"` + Res *types.ShutdownGuestResponse `xml:"urn:vim25 ShutdownGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ShutdownGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ShutdownGuest(ctx context.Context, r soap.RoundTripper, req *types.ShutdownGuest) (*types.ShutdownGuestResponse, error) { + var reqBody, resBody ShutdownGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ShutdownHost_TaskBody struct { + Req *types.ShutdownHost_Task `xml:"urn:vim25 ShutdownHost_Task,omitempty"` + Res *types.ShutdownHost_TaskResponse `xml:"urn:vim25 ShutdownHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ShutdownHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ShutdownHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ShutdownHost_Task) (*types.ShutdownHost_TaskResponse, error) { + var reqBody, resBody ShutdownHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StageHostPatch_TaskBody struct { + Req *types.StageHostPatch_Task `xml:"urn:vim25 StageHostPatch_Task,omitempty"` + Res *types.StageHostPatch_TaskResponse `xml:"urn:vim25 StageHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StageHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StageHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.StageHostPatch_Task) (*types.StageHostPatch_TaskResponse, error) { + var reqBody, resBody StageHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StampAllRulesWithUuid_TaskBody struct { + Req *types.StampAllRulesWithUuid_Task `xml:"urn:vim25 StampAllRulesWithUuid_Task,omitempty"` + Res *types.StampAllRulesWithUuid_TaskResponse `xml:"urn:vim25 StampAllRulesWithUuid_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StampAllRulesWithUuid_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StampAllRulesWithUuid_Task(ctx context.Context, r soap.RoundTripper, req *types.StampAllRulesWithUuid_Task) (*types.StampAllRulesWithUuid_TaskResponse, error) { + var reqBody, resBody StampAllRulesWithUuid_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StandbyGuestBody struct { + Req *types.StandbyGuest `xml:"urn:vim25 StandbyGuest,omitempty"` + Res *types.StandbyGuestResponse `xml:"urn:vim25 StandbyGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StandbyGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func StandbyGuest(ctx context.Context, r soap.RoundTripper, req *types.StandbyGuest) (*types.StandbyGuestResponse, error) { + var reqBody, resBody StandbyGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartProgramInGuestBody struct { + Req *types.StartProgramInGuest `xml:"urn:vim25 StartProgramInGuest,omitempty"` + Res *types.StartProgramInGuestResponse `xml:"urn:vim25 StartProgramInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartProgramInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func StartProgramInGuest(ctx context.Context, r soap.RoundTripper, req *types.StartProgramInGuest) (*types.StartProgramInGuestResponse, error) { + var reqBody, resBody StartProgramInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartRecording_TaskBody struct { + Req *types.StartRecording_Task `xml:"urn:vim25 StartRecording_Task,omitempty"` + Res *types.StartRecording_TaskResponse `xml:"urn:vim25 StartRecording_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartRecording_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StartRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.StartRecording_Task) (*types.StartRecording_TaskResponse, error) { + var reqBody, resBody StartRecording_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartReplaying_TaskBody struct { + Req *types.StartReplaying_Task `xml:"urn:vim25 StartReplaying_Task,omitempty"` + Res *types.StartReplaying_TaskResponse `xml:"urn:vim25 StartReplaying_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartReplaying_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StartReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.StartReplaying_Task) (*types.StartReplaying_TaskResponse, error) { + var reqBody, resBody StartReplaying_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartServiceBody struct { + Req *types.StartService `xml:"urn:vim25 StartService,omitempty"` + Res *types.StartServiceResponse `xml:"urn:vim25 StartServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func StartService(ctx context.Context, r soap.RoundTripper, req *types.StartService) (*types.StartServiceResponse, error) { + var reqBody, resBody StartServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StopRecording_TaskBody struct { + Req *types.StopRecording_Task `xml:"urn:vim25 StopRecording_Task,omitempty"` + Res *types.StopRecording_TaskResponse `xml:"urn:vim25 StopRecording_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StopRecording_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StopRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.StopRecording_Task) (*types.StopRecording_TaskResponse, error) { + var reqBody, resBody StopRecording_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StopReplaying_TaskBody struct { + Req *types.StopReplaying_Task `xml:"urn:vim25 StopReplaying_Task,omitempty"` + Res *types.StopReplaying_TaskResponse `xml:"urn:vim25 StopReplaying_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StopReplaying_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StopReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.StopReplaying_Task) (*types.StopReplaying_TaskResponse, error) { + var reqBody, resBody StopReplaying_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StopServiceBody struct { + Req *types.StopService `xml:"urn:vim25 StopService,omitempty"` + Res *types.StopServiceResponse `xml:"urn:vim25 StopServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StopServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func StopService(ctx context.Context, r soap.RoundTripper, req *types.StopService) (*types.StopServiceResponse, error) { + var reqBody, resBody StopServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SuspendVApp_TaskBody struct { + Req *types.SuspendVApp_Task `xml:"urn:vim25 SuspendVApp_Task,omitempty"` + Res *types.SuspendVApp_TaskResponse `xml:"urn:vim25 SuspendVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SuspendVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SuspendVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.SuspendVApp_Task) (*types.SuspendVApp_TaskResponse, error) { + var reqBody, resBody SuspendVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SuspendVM_TaskBody struct { + Req *types.SuspendVM_Task `xml:"urn:vim25 SuspendVM_Task,omitempty"` + Res *types.SuspendVM_TaskResponse `xml:"urn:vim25 SuspendVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SuspendVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SuspendVM_Task(ctx context.Context, r soap.RoundTripper, req *types.SuspendVM_Task) (*types.SuspendVM_TaskResponse, error) { + var reqBody, resBody SuspendVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateFaultTolerantVM_TaskBody struct { + Req *types.TerminateFaultTolerantVM_Task `xml:"urn:vim25 TerminateFaultTolerantVM_Task,omitempty"` + Res *types.TerminateFaultTolerantVM_TaskResponse `xml:"urn:vim25 TerminateFaultTolerantVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateFaultTolerantVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateFaultTolerantVM_Task(ctx context.Context, r soap.RoundTripper, req *types.TerminateFaultTolerantVM_Task) (*types.TerminateFaultTolerantVM_TaskResponse, error) { + var reqBody, resBody TerminateFaultTolerantVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateProcessInGuestBody struct { + Req *types.TerminateProcessInGuest `xml:"urn:vim25 TerminateProcessInGuest,omitempty"` + Res *types.TerminateProcessInGuestResponse `xml:"urn:vim25 TerminateProcessInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateProcessInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateProcessInGuest(ctx context.Context, r soap.RoundTripper, req *types.TerminateProcessInGuest) (*types.TerminateProcessInGuestResponse, error) { + var reqBody, resBody TerminateProcessInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateSessionBody struct { + Req *types.TerminateSession `xml:"urn:vim25 TerminateSession,omitempty"` + Res *types.TerminateSessionResponse `xml:"urn:vim25 TerminateSessionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateSessionBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateSession(ctx context.Context, r soap.RoundTripper, req *types.TerminateSession) (*types.TerminateSessionResponse, error) { + var reqBody, resBody TerminateSessionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateVMBody struct { + Req *types.TerminateVM `xml:"urn:vim25 TerminateVM,omitempty"` + Res *types.TerminateVMResponse `xml:"urn:vim25 TerminateVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateVMBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateVM(ctx context.Context, r soap.RoundTripper, req *types.TerminateVM) (*types.TerminateVMResponse, error) { + var reqBody, resBody TerminateVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TurnDiskLocatorLedOff_TaskBody struct { + Req *types.TurnDiskLocatorLedOff_Task `xml:"urn:vim25 TurnDiskLocatorLedOff_Task,omitempty"` + Res *types.TurnDiskLocatorLedOff_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOff_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TurnDiskLocatorLedOff_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TurnDiskLocatorLedOff_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnDiskLocatorLedOff_Task) (*types.TurnDiskLocatorLedOff_TaskResponse, error) { + var reqBody, resBody TurnDiskLocatorLedOff_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TurnDiskLocatorLedOn_TaskBody struct { + Req *types.TurnDiskLocatorLedOn_Task `xml:"urn:vim25 TurnDiskLocatorLedOn_Task,omitempty"` + Res *types.TurnDiskLocatorLedOn_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOn_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TurnDiskLocatorLedOn_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TurnDiskLocatorLedOn_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnDiskLocatorLedOn_Task) (*types.TurnDiskLocatorLedOn_TaskResponse, error) { + var reqBody, resBody TurnDiskLocatorLedOn_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TurnOffFaultToleranceForVM_TaskBody struct { + Req *types.TurnOffFaultToleranceForVM_Task `xml:"urn:vim25 TurnOffFaultToleranceForVM_Task,omitempty"` + Res *types.TurnOffFaultToleranceForVM_TaskResponse `xml:"urn:vim25 TurnOffFaultToleranceForVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TurnOffFaultToleranceForVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TurnOffFaultToleranceForVM_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnOffFaultToleranceForVM_Task) (*types.TurnOffFaultToleranceForVM_TaskResponse, error) { + var reqBody, resBody TurnOffFaultToleranceForVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnassignUserFromGroupBody struct { + Req *types.UnassignUserFromGroup `xml:"urn:vim25 UnassignUserFromGroup,omitempty"` + Res *types.UnassignUserFromGroupResponse `xml:"urn:vim25 UnassignUserFromGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnassignUserFromGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func UnassignUserFromGroup(ctx context.Context, r soap.RoundTripper, req *types.UnassignUserFromGroup) (*types.UnassignUserFromGroupResponse, error) { + var reqBody, resBody UnassignUserFromGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnbindVnicBody struct { + Req *types.UnbindVnic `xml:"urn:vim25 UnbindVnic,omitempty"` + Res *types.UnbindVnicResponse `xml:"urn:vim25 UnbindVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnbindVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func UnbindVnic(ctx context.Context, r soap.RoundTripper, req *types.UnbindVnic) (*types.UnbindVnicResponse, error) { + var reqBody, resBody UnbindVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UndeployVMBody struct { + Req *types.UndeployVM `xml:"urn:vim25 UndeployVM,omitempty"` + Res *types.UndeployVMResponse `xml:"urn:vim25 UndeployVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UndeployVMBody) Fault() *soap.Fault { return b.Fault_ } + +func UndeployVM(ctx context.Context, r soap.RoundTripper, req *types.UndeployVM) (*types.UndeployVMResponse, error) { + var reqBody, resBody UndeployVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UninstallHostPatch_TaskBody struct { + Req *types.UninstallHostPatch_Task `xml:"urn:vim25 UninstallHostPatch_Task,omitempty"` + Res *types.UninstallHostPatch_TaskResponse `xml:"urn:vim25 UninstallHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UninstallHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UninstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.UninstallHostPatch_Task) (*types.UninstallHostPatch_TaskResponse, error) { + var reqBody, resBody UninstallHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UninstallIoFilter_TaskBody struct { + Req *types.UninstallIoFilter_Task `xml:"urn:vim25 UninstallIoFilter_Task,omitempty"` + Res *types.UninstallIoFilter_TaskResponse `xml:"urn:vim25 UninstallIoFilter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UninstallIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UninstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.UninstallIoFilter_Task) (*types.UninstallIoFilter_TaskResponse, error) { + var reqBody, resBody UninstallIoFilter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UninstallServiceBody struct { + Req *types.UninstallService `xml:"urn:vim25 UninstallService,omitempty"` + Res *types.UninstallServiceResponse `xml:"urn:vim25 UninstallServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UninstallServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func UninstallService(ctx context.Context, r soap.RoundTripper, req *types.UninstallService) (*types.UninstallServiceResponse, error) { + var reqBody, resBody UninstallServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmapVmfsVolumeEx_TaskBody struct { + Req *types.UnmapVmfsVolumeEx_Task `xml:"urn:vim25 UnmapVmfsVolumeEx_Task,omitempty"` + Res *types.UnmapVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmapVmfsVolumeEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmapVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmapVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmapVmfsVolumeEx_Task) (*types.UnmapVmfsVolumeEx_TaskResponse, error) { + var reqBody, resBody UnmapVmfsVolumeEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountDiskMapping_TaskBody struct { + Req *types.UnmountDiskMapping_Task `xml:"urn:vim25 UnmountDiskMapping_Task,omitempty"` + Res *types.UnmountDiskMapping_TaskResponse `xml:"urn:vim25 UnmountDiskMapping_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountDiskMapping_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmountDiskMapping_Task) (*types.UnmountDiskMapping_TaskResponse, error) { + var reqBody, resBody UnmountDiskMapping_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountForceMountedVmfsVolumeBody struct { + Req *types.UnmountForceMountedVmfsVolume `xml:"urn:vim25 UnmountForceMountedVmfsVolume,omitempty"` + Res *types.UnmountForceMountedVmfsVolumeResponse `xml:"urn:vim25 UnmountForceMountedVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountForceMountedVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountForceMountedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountForceMountedVmfsVolume) (*types.UnmountForceMountedVmfsVolumeResponse, error) { + var reqBody, resBody UnmountForceMountedVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountToolsInstallerBody struct { + Req *types.UnmountToolsInstaller `xml:"urn:vim25 UnmountToolsInstaller,omitempty"` + Res *types.UnmountToolsInstallerResponse `xml:"urn:vim25 UnmountToolsInstallerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountToolsInstallerBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.UnmountToolsInstaller) (*types.UnmountToolsInstallerResponse, error) { + var reqBody, resBody UnmountToolsInstallerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountVffsVolumeBody struct { + Req *types.UnmountVffsVolume `xml:"urn:vim25 UnmountVffsVolume,omitempty"` + Res *types.UnmountVffsVolumeResponse `xml:"urn:vim25 UnmountVffsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountVffsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountVffsVolume) (*types.UnmountVffsVolumeResponse, error) { + var reqBody, resBody UnmountVffsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountVmfsVolumeBody struct { + Req *types.UnmountVmfsVolume `xml:"urn:vim25 UnmountVmfsVolume,omitempty"` + Res *types.UnmountVmfsVolumeResponse `xml:"urn:vim25 UnmountVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountVmfsVolume) (*types.UnmountVmfsVolumeResponse, error) { + var reqBody, resBody UnmountVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountVmfsVolumeEx_TaskBody struct { + Req *types.UnmountVmfsVolumeEx_Task `xml:"urn:vim25 UnmountVmfsVolumeEx_Task,omitempty"` + Res *types.UnmountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmountVmfsVolumeEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmountVmfsVolumeEx_Task) (*types.UnmountVmfsVolumeEx_TaskResponse, error) { + var reqBody, resBody UnmountVmfsVolumeEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterAndDestroy_TaskBody struct { + Req *types.UnregisterAndDestroy_Task `xml:"urn:vim25 UnregisterAndDestroy_Task,omitempty"` + Res *types.UnregisterAndDestroy_TaskResponse `xml:"urn:vim25 UnregisterAndDestroy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterAndDestroy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterAndDestroy_Task(ctx context.Context, r soap.RoundTripper, req *types.UnregisterAndDestroy_Task) (*types.UnregisterAndDestroy_TaskResponse, error) { + var reqBody, resBody UnregisterAndDestroy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterExtensionBody struct { + Req *types.UnregisterExtension `xml:"urn:vim25 UnregisterExtension,omitempty"` + Res *types.UnregisterExtensionResponse `xml:"urn:vim25 UnregisterExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterExtension(ctx context.Context, r soap.RoundTripper, req *types.UnregisterExtension) (*types.UnregisterExtensionResponse, error) { + var reqBody, resBody UnregisterExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterVMBody struct { + Req *types.UnregisterVM `xml:"urn:vim25 UnregisterVM,omitempty"` + Res *types.UnregisterVMResponse `xml:"urn:vim25 UnregisterVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterVMBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterVM(ctx context.Context, r soap.RoundTripper, req *types.UnregisterVM) (*types.UnregisterVMResponse, error) { + var reqBody, resBody UnregisterVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateAnswerFile_TaskBody struct { + Req *types.UpdateAnswerFile_Task `xml:"urn:vim25 UpdateAnswerFile_Task,omitempty"` + Res *types.UpdateAnswerFile_TaskResponse `xml:"urn:vim25 UpdateAnswerFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAnswerFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateAnswerFile_Task) (*types.UpdateAnswerFile_TaskResponse, error) { + var reqBody, resBody UpdateAnswerFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateAssignedLicenseBody struct { + Req *types.UpdateAssignedLicense `xml:"urn:vim25 UpdateAssignedLicense,omitempty"` + Res *types.UpdateAssignedLicenseResponse `xml:"urn:vim25 UpdateAssignedLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAssignedLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateAssignedLicense) (*types.UpdateAssignedLicenseResponse, error) { + var reqBody, resBody UpdateAssignedLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateAuthorizationRoleBody struct { + Req *types.UpdateAuthorizationRole `xml:"urn:vim25 UpdateAuthorizationRole,omitempty"` + Res *types.UpdateAuthorizationRoleResponse `xml:"urn:vim25 UpdateAuthorizationRoleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.UpdateAuthorizationRole) (*types.UpdateAuthorizationRoleResponse, error) { + var reqBody, resBody UpdateAuthorizationRoleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateBootDeviceBody struct { + Req *types.UpdateBootDevice `xml:"urn:vim25 UpdateBootDevice,omitempty"` + Res *types.UpdateBootDeviceResponse `xml:"urn:vim25 UpdateBootDeviceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateBootDeviceBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateBootDevice(ctx context.Context, r soap.RoundTripper, req *types.UpdateBootDevice) (*types.UpdateBootDeviceResponse, error) { + var reqBody, resBody UpdateBootDeviceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateChildResourceConfigurationBody struct { + Req *types.UpdateChildResourceConfiguration `xml:"urn:vim25 UpdateChildResourceConfiguration,omitempty"` + Res *types.UpdateChildResourceConfigurationResponse `xml:"urn:vim25 UpdateChildResourceConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateChildResourceConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateChildResourceConfiguration(ctx context.Context, r soap.RoundTripper, req *types.UpdateChildResourceConfiguration) (*types.UpdateChildResourceConfigurationResponse, error) { + var reqBody, resBody UpdateChildResourceConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateClusterProfileBody struct { + Req *types.UpdateClusterProfile `xml:"urn:vim25 UpdateClusterProfile,omitempty"` + Res *types.UpdateClusterProfileResponse `xml:"urn:vim25 UpdateClusterProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateClusterProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateClusterProfile(ctx context.Context, r soap.RoundTripper, req *types.UpdateClusterProfile) (*types.UpdateClusterProfileResponse, error) { + var reqBody, resBody UpdateClusterProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateConfigBody struct { + Req *types.UpdateConfig `xml:"urn:vim25 UpdateConfig,omitempty"` + Res *types.UpdateConfigResponse `xml:"urn:vim25 UpdateConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateConfig) (*types.UpdateConfigResponse, error) { + var reqBody, resBody UpdateConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateConsoleIpRouteConfigBody struct { + Req *types.UpdateConsoleIpRouteConfig `xml:"urn:vim25 UpdateConsoleIpRouteConfig,omitempty"` + Res *types.UpdateConsoleIpRouteConfigResponse `xml:"urn:vim25 UpdateConsoleIpRouteConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateConsoleIpRouteConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateConsoleIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateConsoleIpRouteConfig) (*types.UpdateConsoleIpRouteConfigResponse, error) { + var reqBody, resBody UpdateConsoleIpRouteConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateCounterLevelMappingBody struct { + Req *types.UpdateCounterLevelMapping `xml:"urn:vim25 UpdateCounterLevelMapping,omitempty"` + Res *types.UpdateCounterLevelMappingResponse `xml:"urn:vim25 UpdateCounterLevelMappingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateCounterLevelMappingBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *types.UpdateCounterLevelMapping) (*types.UpdateCounterLevelMappingResponse, error) { + var reqBody, resBody UpdateCounterLevelMappingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDVSHealthCheckConfig_TaskBody struct { + Req *types.UpdateDVSHealthCheckConfig_Task `xml:"urn:vim25 UpdateDVSHealthCheckConfig_Task,omitempty"` + Res *types.UpdateDVSHealthCheckConfig_TaskResponse `xml:"urn:vim25 UpdateDVSHealthCheckConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDVSHealthCheckConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDVSHealthCheckConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateDVSHealthCheckConfig_Task) (*types.UpdateDVSHealthCheckConfig_TaskResponse, error) { + var reqBody, resBody UpdateDVSHealthCheckConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDVSLacpGroupConfig_TaskBody struct { + Req *types.UpdateDVSLacpGroupConfig_Task `xml:"urn:vim25 UpdateDVSLacpGroupConfig_Task,omitempty"` + Res *types.UpdateDVSLacpGroupConfig_TaskResponse `xml:"urn:vim25 UpdateDVSLacpGroupConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDVSLacpGroupConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDVSLacpGroupConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateDVSLacpGroupConfig_Task) (*types.UpdateDVSLacpGroupConfig_TaskResponse, error) { + var reqBody, resBody UpdateDVSLacpGroupConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDateTimeBody struct { + Req *types.UpdateDateTime `xml:"urn:vim25 UpdateDateTime,omitempty"` + Res *types.UpdateDateTimeResponse `xml:"urn:vim25 UpdateDateTimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDateTimeBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDateTime(ctx context.Context, r soap.RoundTripper, req *types.UpdateDateTime) (*types.UpdateDateTimeResponse, error) { + var reqBody, resBody UpdateDateTimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDateTimeConfigBody struct { + Req *types.UpdateDateTimeConfig `xml:"urn:vim25 UpdateDateTimeConfig,omitempty"` + Res *types.UpdateDateTimeConfigResponse `xml:"urn:vim25 UpdateDateTimeConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDateTimeConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDateTimeConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateDateTimeConfig) (*types.UpdateDateTimeConfigResponse, error) { + var reqBody, resBody UpdateDateTimeConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDefaultPolicyBody struct { + Req *types.UpdateDefaultPolicy `xml:"urn:vim25 UpdateDefaultPolicy,omitempty"` + Res *types.UpdateDefaultPolicyResponse `xml:"urn:vim25 UpdateDefaultPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDefaultPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDefaultPolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateDefaultPolicy) (*types.UpdateDefaultPolicyResponse, error) { + var reqBody, resBody UpdateDefaultPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDiskPartitionsBody struct { + Req *types.UpdateDiskPartitions `xml:"urn:vim25 UpdateDiskPartitions,omitempty"` + Res *types.UpdateDiskPartitionsResponse `xml:"urn:vim25 UpdateDiskPartitionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDiskPartitionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDiskPartitions(ctx context.Context, r soap.RoundTripper, req *types.UpdateDiskPartitions) (*types.UpdateDiskPartitionsResponse, error) { + var reqBody, resBody UpdateDiskPartitionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDnsConfigBody struct { + Req *types.UpdateDnsConfig `xml:"urn:vim25 UpdateDnsConfig,omitempty"` + Res *types.UpdateDnsConfigResponse `xml:"urn:vim25 UpdateDnsConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDnsConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDnsConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateDnsConfig) (*types.UpdateDnsConfigResponse, error) { + var reqBody, resBody UpdateDnsConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDvsCapabilityBody struct { + Req *types.UpdateDvsCapability `xml:"urn:vim25 UpdateDvsCapability,omitempty"` + Res *types.UpdateDvsCapabilityResponse `xml:"urn:vim25 UpdateDvsCapabilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDvsCapabilityBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDvsCapability(ctx context.Context, r soap.RoundTripper, req *types.UpdateDvsCapability) (*types.UpdateDvsCapabilityResponse, error) { + var reqBody, resBody UpdateDvsCapabilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateExtensionBody struct { + Req *types.UpdateExtension `xml:"urn:vim25 UpdateExtension,omitempty"` + Res *types.UpdateExtensionResponse `xml:"urn:vim25 UpdateExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateExtension(ctx context.Context, r soap.RoundTripper, req *types.UpdateExtension) (*types.UpdateExtensionResponse, error) { + var reqBody, resBody UpdateExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateFlagsBody struct { + Req *types.UpdateFlags `xml:"urn:vim25 UpdateFlags,omitempty"` + Res *types.UpdateFlagsResponse `xml:"urn:vim25 UpdateFlagsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateFlagsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateFlags(ctx context.Context, r soap.RoundTripper, req *types.UpdateFlags) (*types.UpdateFlagsResponse, error) { + var reqBody, resBody UpdateFlagsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateHostImageAcceptanceLevelBody struct { + Req *types.UpdateHostImageAcceptanceLevel `xml:"urn:vim25 UpdateHostImageAcceptanceLevel,omitempty"` + Res *types.UpdateHostImageAcceptanceLevelResponse `xml:"urn:vim25 UpdateHostImageAcceptanceLevelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHostImageAcceptanceLevelBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHostImageAcceptanceLevel(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostImageAcceptanceLevel) (*types.UpdateHostImageAcceptanceLevelResponse, error) { + var reqBody, resBody UpdateHostImageAcceptanceLevelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateHostProfileBody struct { + Req *types.UpdateHostProfile `xml:"urn:vim25 UpdateHostProfile,omitempty"` + Res *types.UpdateHostProfileResponse `xml:"urn:vim25 UpdateHostProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHostProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHostProfile(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostProfile) (*types.UpdateHostProfileResponse, error) { + var reqBody, resBody UpdateHostProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiAdvancedOptionsBody struct { + Req *types.UpdateInternetScsiAdvancedOptions `xml:"urn:vim25 UpdateInternetScsiAdvancedOptions,omitempty"` + Res *types.UpdateInternetScsiAdvancedOptionsResponse `xml:"urn:vim25 UpdateInternetScsiAdvancedOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiAdvancedOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiAdvancedOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAdvancedOptions) (*types.UpdateInternetScsiAdvancedOptionsResponse, error) { + var reqBody, resBody UpdateInternetScsiAdvancedOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiAliasBody struct { + Req *types.UpdateInternetScsiAlias `xml:"urn:vim25 UpdateInternetScsiAlias,omitempty"` + Res *types.UpdateInternetScsiAliasResponse `xml:"urn:vim25 UpdateInternetScsiAliasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiAliasBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiAlias(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAlias) (*types.UpdateInternetScsiAliasResponse, error) { + var reqBody, resBody UpdateInternetScsiAliasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiAuthenticationPropertiesBody struct { + Req *types.UpdateInternetScsiAuthenticationProperties `xml:"urn:vim25 UpdateInternetScsiAuthenticationProperties,omitempty"` + Res *types.UpdateInternetScsiAuthenticationPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiAuthenticationPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiAuthenticationPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiAuthenticationProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAuthenticationProperties) (*types.UpdateInternetScsiAuthenticationPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiAuthenticationPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiDigestPropertiesBody struct { + Req *types.UpdateInternetScsiDigestProperties `xml:"urn:vim25 UpdateInternetScsiDigestProperties,omitempty"` + Res *types.UpdateInternetScsiDigestPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDigestPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiDigestPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiDigestProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiDigestProperties) (*types.UpdateInternetScsiDigestPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiDigestPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiDiscoveryPropertiesBody struct { + Req *types.UpdateInternetScsiDiscoveryProperties `xml:"urn:vim25 UpdateInternetScsiDiscoveryProperties,omitempty"` + Res *types.UpdateInternetScsiDiscoveryPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDiscoveryPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiDiscoveryPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiDiscoveryProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiDiscoveryProperties) (*types.UpdateInternetScsiDiscoveryPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiDiscoveryPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiIPPropertiesBody struct { + Req *types.UpdateInternetScsiIPProperties `xml:"urn:vim25 UpdateInternetScsiIPProperties,omitempty"` + Res *types.UpdateInternetScsiIPPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiIPPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiIPPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiIPProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiIPProperties) (*types.UpdateInternetScsiIPPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiIPPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiNameBody struct { + Req *types.UpdateInternetScsiName `xml:"urn:vim25 UpdateInternetScsiName,omitempty"` + Res *types.UpdateInternetScsiNameResponse `xml:"urn:vim25 UpdateInternetScsiNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiNameBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiName(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiName) (*types.UpdateInternetScsiNameResponse, error) { + var reqBody, resBody UpdateInternetScsiNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpConfigBody struct { + Req *types.UpdateIpConfig `xml:"urn:vim25 UpdateIpConfig,omitempty"` + Res *types.UpdateIpConfigResponse `xml:"urn:vim25 UpdateIpConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpConfig) (*types.UpdateIpConfigResponse, error) { + var reqBody, resBody UpdateIpConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpPoolBody struct { + Req *types.UpdateIpPool `xml:"urn:vim25 UpdateIpPool,omitempty"` + Res *types.UpdateIpPoolResponse `xml:"urn:vim25 UpdateIpPoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpPoolBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpPool(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpPool) (*types.UpdateIpPoolResponse, error) { + var reqBody, resBody UpdateIpPoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpRouteConfigBody struct { + Req *types.UpdateIpRouteConfig `xml:"urn:vim25 UpdateIpRouteConfig,omitempty"` + Res *types.UpdateIpRouteConfigResponse `xml:"urn:vim25 UpdateIpRouteConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpRouteConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpRouteConfig) (*types.UpdateIpRouteConfigResponse, error) { + var reqBody, resBody UpdateIpRouteConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpRouteTableConfigBody struct { + Req *types.UpdateIpRouteTableConfig `xml:"urn:vim25 UpdateIpRouteTableConfig,omitempty"` + Res *types.UpdateIpRouteTableConfigResponse `xml:"urn:vim25 UpdateIpRouteTableConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpRouteTableConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpRouteTableConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpRouteTableConfig) (*types.UpdateIpRouteTableConfigResponse, error) { + var reqBody, resBody UpdateIpRouteTableConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpmiBody struct { + Req *types.UpdateIpmi `xml:"urn:vim25 UpdateIpmi,omitempty"` + Res *types.UpdateIpmiResponse `xml:"urn:vim25 UpdateIpmiResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpmiBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpmi(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpmi) (*types.UpdateIpmiResponse, error) { + var reqBody, resBody UpdateIpmiBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLicenseBody struct { + Req *types.UpdateLicense `xml:"urn:vim25 UpdateLicense,omitempty"` + Res *types.UpdateLicenseResponse `xml:"urn:vim25 UpdateLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateLicense) (*types.UpdateLicenseResponse, error) { + var reqBody, resBody UpdateLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLicenseLabelBody struct { + Req *types.UpdateLicenseLabel `xml:"urn:vim25 UpdateLicenseLabel,omitempty"` + Res *types.UpdateLicenseLabelResponse `xml:"urn:vim25 UpdateLicenseLabelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLicenseLabelBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.UpdateLicenseLabel) (*types.UpdateLicenseLabelResponse, error) { + var reqBody, resBody UpdateLicenseLabelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLinkedChildrenBody struct { + Req *types.UpdateLinkedChildren `xml:"urn:vim25 UpdateLinkedChildren,omitempty"` + Res *types.UpdateLinkedChildrenResponse `xml:"urn:vim25 UpdateLinkedChildrenResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLinkedChildrenBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLinkedChildren(ctx context.Context, r soap.RoundTripper, req *types.UpdateLinkedChildren) (*types.UpdateLinkedChildrenResponse, error) { + var reqBody, resBody UpdateLinkedChildrenBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLocalSwapDatastoreBody struct { + Req *types.UpdateLocalSwapDatastore `xml:"urn:vim25 UpdateLocalSwapDatastore,omitempty"` + Res *types.UpdateLocalSwapDatastoreResponse `xml:"urn:vim25 UpdateLocalSwapDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLocalSwapDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLocalSwapDatastore(ctx context.Context, r soap.RoundTripper, req *types.UpdateLocalSwapDatastore) (*types.UpdateLocalSwapDatastoreResponse, error) { + var reqBody, resBody UpdateLocalSwapDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLockdownExceptionsBody struct { + Req *types.UpdateLockdownExceptions `xml:"urn:vim25 UpdateLockdownExceptions,omitempty"` + Res *types.UpdateLockdownExceptionsResponse `xml:"urn:vim25 UpdateLockdownExceptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLockdownExceptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateLockdownExceptions) (*types.UpdateLockdownExceptionsResponse, error) { + var reqBody, resBody UpdateLockdownExceptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateModuleOptionStringBody struct { + Req *types.UpdateModuleOptionString `xml:"urn:vim25 UpdateModuleOptionString,omitempty"` + Res *types.UpdateModuleOptionStringResponse `xml:"urn:vim25 UpdateModuleOptionStringResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateModuleOptionStringBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateModuleOptionString(ctx context.Context, r soap.RoundTripper, req *types.UpdateModuleOptionString) (*types.UpdateModuleOptionStringResponse, error) { + var reqBody, resBody UpdateModuleOptionStringBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateNetworkConfigBody struct { + Req *types.UpdateNetworkConfig `xml:"urn:vim25 UpdateNetworkConfig,omitempty"` + Res *types.UpdateNetworkConfigResponse `xml:"urn:vim25 UpdateNetworkConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateNetworkConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateNetworkConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateNetworkConfig) (*types.UpdateNetworkConfigResponse, error) { + var reqBody, resBody UpdateNetworkConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateNetworkResourcePoolBody struct { + Req *types.UpdateNetworkResourcePool `xml:"urn:vim25 UpdateNetworkResourcePool,omitempty"` + Res *types.UpdateNetworkResourcePoolResponse `xml:"urn:vim25 UpdateNetworkResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.UpdateNetworkResourcePool) (*types.UpdateNetworkResourcePoolResponse, error) { + var reqBody, resBody UpdateNetworkResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateOptionsBody struct { + Req *types.UpdateOptions `xml:"urn:vim25 UpdateOptions,omitempty"` + Res *types.UpdateOptionsResponse `xml:"urn:vim25 UpdateOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateOptions) (*types.UpdateOptionsResponse, error) { + var reqBody, resBody UpdateOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePassthruConfigBody struct { + Req *types.UpdatePassthruConfig `xml:"urn:vim25 UpdatePassthruConfig,omitempty"` + Res *types.UpdatePassthruConfigResponse `xml:"urn:vim25 UpdatePassthruConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePassthruConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePassthruConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdatePassthruConfig) (*types.UpdatePassthruConfigResponse, error) { + var reqBody, resBody UpdatePassthruConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePerfIntervalBody struct { + Req *types.UpdatePerfInterval `xml:"urn:vim25 UpdatePerfInterval,omitempty"` + Res *types.UpdatePerfIntervalResponse `xml:"urn:vim25 UpdatePerfIntervalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.UpdatePerfInterval) (*types.UpdatePerfIntervalResponse, error) { + var reqBody, resBody UpdatePerfIntervalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePhysicalNicLinkSpeedBody struct { + Req *types.UpdatePhysicalNicLinkSpeed `xml:"urn:vim25 UpdatePhysicalNicLinkSpeed,omitempty"` + Res *types.UpdatePhysicalNicLinkSpeedResponse `xml:"urn:vim25 UpdatePhysicalNicLinkSpeedResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePhysicalNicLinkSpeedBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePhysicalNicLinkSpeed(ctx context.Context, r soap.RoundTripper, req *types.UpdatePhysicalNicLinkSpeed) (*types.UpdatePhysicalNicLinkSpeedResponse, error) { + var reqBody, resBody UpdatePhysicalNicLinkSpeedBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePortGroupBody struct { + Req *types.UpdatePortGroup `xml:"urn:vim25 UpdatePortGroup,omitempty"` + Res *types.UpdatePortGroupResponse `xml:"urn:vim25 UpdatePortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePortGroup(ctx context.Context, r soap.RoundTripper, req *types.UpdatePortGroup) (*types.UpdatePortGroupResponse, error) { + var reqBody, resBody UpdatePortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateProgressBody struct { + Req *types.UpdateProgress `xml:"urn:vim25 UpdateProgress,omitempty"` + Res *types.UpdateProgressResponse `xml:"urn:vim25 UpdateProgressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateProgressBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateProgress(ctx context.Context, r soap.RoundTripper, req *types.UpdateProgress) (*types.UpdateProgressResponse, error) { + var reqBody, resBody UpdateProgressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateReferenceHostBody struct { + Req *types.UpdateReferenceHost `xml:"urn:vim25 UpdateReferenceHost,omitempty"` + Res *types.UpdateReferenceHostResponse `xml:"urn:vim25 UpdateReferenceHostResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateReferenceHostBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateReferenceHost(ctx context.Context, r soap.RoundTripper, req *types.UpdateReferenceHost) (*types.UpdateReferenceHostResponse, error) { + var reqBody, resBody UpdateReferenceHostBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateRulesetBody struct { + Req *types.UpdateRuleset `xml:"urn:vim25 UpdateRuleset,omitempty"` + Res *types.UpdateRulesetResponse `xml:"urn:vim25 UpdateRulesetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateRulesetBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateRuleset(ctx context.Context, r soap.RoundTripper, req *types.UpdateRuleset) (*types.UpdateRulesetResponse, error) { + var reqBody, resBody UpdateRulesetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateScsiLunDisplayNameBody struct { + Req *types.UpdateScsiLunDisplayName `xml:"urn:vim25 UpdateScsiLunDisplayName,omitempty"` + Res *types.UpdateScsiLunDisplayNameResponse `xml:"urn:vim25 UpdateScsiLunDisplayNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateScsiLunDisplayNameBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateScsiLunDisplayName(ctx context.Context, r soap.RoundTripper, req *types.UpdateScsiLunDisplayName) (*types.UpdateScsiLunDisplayNameResponse, error) { + var reqBody, resBody UpdateScsiLunDisplayNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateServiceConsoleVirtualNicBody struct { + Req *types.UpdateServiceConsoleVirtualNic `xml:"urn:vim25 UpdateServiceConsoleVirtualNic,omitempty"` + Res *types.UpdateServiceConsoleVirtualNicResponse `xml:"urn:vim25 UpdateServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.UpdateServiceConsoleVirtualNic) (*types.UpdateServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody UpdateServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateServiceMessageBody struct { + Req *types.UpdateServiceMessage `xml:"urn:vim25 UpdateServiceMessage,omitempty"` + Res *types.UpdateServiceMessageResponse `xml:"urn:vim25 UpdateServiceMessageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateServiceMessageBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateServiceMessage(ctx context.Context, r soap.RoundTripper, req *types.UpdateServiceMessage) (*types.UpdateServiceMessageResponse, error) { + var reqBody, resBody UpdateServiceMessageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateServicePolicyBody struct { + Req *types.UpdateServicePolicy `xml:"urn:vim25 UpdateServicePolicy,omitempty"` + Res *types.UpdateServicePolicyResponse `xml:"urn:vim25 UpdateServicePolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateServicePolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateServicePolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateServicePolicy) (*types.UpdateServicePolicyResponse, error) { + var reqBody, resBody UpdateServicePolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSoftwareInternetScsiEnabledBody struct { + Req *types.UpdateSoftwareInternetScsiEnabled `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabled,omitempty"` + Res *types.UpdateSoftwareInternetScsiEnabledResponse `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabledResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSoftwareInternetScsiEnabledBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSoftwareInternetScsiEnabled(ctx context.Context, r soap.RoundTripper, req *types.UpdateSoftwareInternetScsiEnabled) (*types.UpdateSoftwareInternetScsiEnabledResponse, error) { + var reqBody, resBody UpdateSoftwareInternetScsiEnabledBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSystemResourcesBody struct { + Req *types.UpdateSystemResources `xml:"urn:vim25 UpdateSystemResources,omitempty"` + Res *types.UpdateSystemResourcesResponse `xml:"urn:vim25 UpdateSystemResourcesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSystemResourcesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSystemResources(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemResources) (*types.UpdateSystemResourcesResponse, error) { + var reqBody, resBody UpdateSystemResourcesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSystemSwapConfigurationBody struct { + Req *types.UpdateSystemSwapConfiguration `xml:"urn:vim25 UpdateSystemSwapConfiguration,omitempty"` + Res *types.UpdateSystemSwapConfigurationResponse `xml:"urn:vim25 UpdateSystemSwapConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSystemSwapConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSystemSwapConfiguration(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemSwapConfiguration) (*types.UpdateSystemSwapConfigurationResponse, error) { + var reqBody, resBody UpdateSystemSwapConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSystemUsersBody struct { + Req *types.UpdateSystemUsers `xml:"urn:vim25 UpdateSystemUsers,omitempty"` + Res *types.UpdateSystemUsersResponse `xml:"urn:vim25 UpdateSystemUsersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSystemUsersBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSystemUsers(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemUsers) (*types.UpdateSystemUsersResponse, error) { + var reqBody, resBody UpdateSystemUsersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateUserBody struct { + Req *types.UpdateUser `xml:"urn:vim25 UpdateUser,omitempty"` + Res *types.UpdateUserResponse `xml:"urn:vim25 UpdateUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateUserBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateUser(ctx context.Context, r soap.RoundTripper, req *types.UpdateUser) (*types.UpdateUserResponse, error) { + var reqBody, resBody UpdateUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVAppConfigBody struct { + Req *types.UpdateVAppConfig `xml:"urn:vim25 UpdateVAppConfig,omitempty"` + Res *types.UpdateVAppConfigResponse `xml:"urn:vim25 UpdateVAppConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVAppConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVAppConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateVAppConfig) (*types.UpdateVAppConfigResponse, error) { + var reqBody, resBody UpdateVAppConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVRPBody struct { + Req *types.UpdateVRP `xml:"urn:vim25 UpdateVRP,omitempty"` + Res *types.UpdateVRPResponse `xml:"urn:vim25 UpdateVRPResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVRPBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVRP(ctx context.Context, r soap.RoundTripper, req *types.UpdateVRP) (*types.UpdateVRPResponse, error) { + var reqBody, resBody UpdateVRPBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVirtualMachineFiles_TaskBody struct { + Req *types.UpdateVirtualMachineFiles_Task `xml:"urn:vim25 UpdateVirtualMachineFiles_Task,omitempty"` + Res *types.UpdateVirtualMachineFiles_TaskResponse `xml:"urn:vim25 UpdateVirtualMachineFiles_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVirtualMachineFiles_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualMachineFiles_Task) (*types.UpdateVirtualMachineFiles_TaskResponse, error) { + var reqBody, resBody UpdateVirtualMachineFiles_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVirtualNicBody struct { + Req *types.UpdateVirtualNic `xml:"urn:vim25 UpdateVirtualNic,omitempty"` + Res *types.UpdateVirtualNicResponse `xml:"urn:vim25 UpdateVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualNic) (*types.UpdateVirtualNicResponse, error) { + var reqBody, resBody UpdateVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVirtualSwitchBody struct { + Req *types.UpdateVirtualSwitch `xml:"urn:vim25 UpdateVirtualSwitch,omitempty"` + Res *types.UpdateVirtualSwitchResponse `xml:"urn:vim25 UpdateVirtualSwitchResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualSwitch) (*types.UpdateVirtualSwitchResponse, error) { + var reqBody, resBody UpdateVirtualSwitchBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVsan_TaskBody struct { + Req *types.UpdateVsan_Task `xml:"urn:vim25 UpdateVsan_Task,omitempty"` + Res *types.UpdateVsan_TaskResponse `xml:"urn:vim25 UpdateVsan_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVsan_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVsan_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVsan_Task) (*types.UpdateVsan_TaskResponse, error) { + var reqBody, resBody UpdateVsan_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeIoFilter_TaskBody struct { + Req *types.UpgradeIoFilter_Task `xml:"urn:vim25 UpgradeIoFilter_Task,omitempty"` + Res *types.UpgradeIoFilter_TaskResponse `xml:"urn:vim25 UpgradeIoFilter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeIoFilter_Task) (*types.UpgradeIoFilter_TaskResponse, error) { + var reqBody, resBody UpgradeIoFilter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeTools_TaskBody struct { + Req *types.UpgradeTools_Task `xml:"urn:vim25 UpgradeTools_Task,omitempty"` + Res *types.UpgradeTools_TaskResponse `xml:"urn:vim25 UpgradeTools_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeTools_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeTools_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeTools_Task) (*types.UpgradeTools_TaskResponse, error) { + var reqBody, resBody UpgradeTools_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVM_TaskBody struct { + Req *types.UpgradeVM_Task `xml:"urn:vim25 UpgradeVM_Task,omitempty"` + Res *types.UpgradeVM_TaskResponse `xml:"urn:vim25 UpgradeVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVM_Task) (*types.UpgradeVM_TaskResponse, error) { + var reqBody, resBody UpgradeVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVmLayoutBody struct { + Req *types.UpgradeVmLayout `xml:"urn:vim25 UpgradeVmLayout,omitempty"` + Res *types.UpgradeVmLayoutResponse `xml:"urn:vim25 UpgradeVmLayoutResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVmLayoutBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVmLayout(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmLayout) (*types.UpgradeVmLayoutResponse, error) { + var reqBody, resBody UpgradeVmLayoutBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVmfsBody struct { + Req *types.UpgradeVmfs `xml:"urn:vim25 UpgradeVmfs,omitempty"` + Res *types.UpgradeVmfsResponse `xml:"urn:vim25 UpgradeVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVmfs(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmfs) (*types.UpgradeVmfsResponse, error) { + var reqBody, resBody UpgradeVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVsanObjectsBody struct { + Req *types.UpgradeVsanObjects `xml:"urn:vim25 UpgradeVsanObjects,omitempty"` + Res *types.UpgradeVsanObjectsResponse `xml:"urn:vim25 UpgradeVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVsanObjects) (*types.UpgradeVsanObjectsResponse, error) { + var reqBody, resBody UpgradeVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateCredentialsInGuestBody struct { + Req *types.ValidateCredentialsInGuest `xml:"urn:vim25 ValidateCredentialsInGuest,omitempty"` + Res *types.ValidateCredentialsInGuestResponse `xml:"urn:vim25 ValidateCredentialsInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.ValidateCredentialsInGuest) (*types.ValidateCredentialsInGuestResponse, error) { + var reqBody, resBody ValidateCredentialsInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateHostBody struct { + Req *types.ValidateHost `xml:"urn:vim25 ValidateHost,omitempty"` + Res *types.ValidateHostResponse `xml:"urn:vim25 ValidateHostResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateHostBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateHost(ctx context.Context, r soap.RoundTripper, req *types.ValidateHost) (*types.ValidateHostResponse, error) { + var reqBody, resBody ValidateHostBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateMigrationBody struct { + Req *types.ValidateMigration `xml:"urn:vim25 ValidateMigration,omitempty"` + Res *types.ValidateMigrationResponse `xml:"urn:vim25 ValidateMigrationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateMigrationBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateMigration(ctx context.Context, r soap.RoundTripper, req *types.ValidateMigration) (*types.ValidateMigrationResponse, error) { + var reqBody, resBody ValidateMigrationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type WaitForUpdatesBody struct { + Req *types.WaitForUpdates `xml:"urn:vim25 WaitForUpdates,omitempty"` + Res *types.WaitForUpdatesResponse `xml:"urn:vim25 WaitForUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *WaitForUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func WaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.WaitForUpdates) (*types.WaitForUpdatesResponse, error) { + var reqBody, resBody WaitForUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type WaitForUpdatesExBody struct { + Req *types.WaitForUpdatesEx `xml:"urn:vim25 WaitForUpdatesEx,omitempty"` + Res *types.WaitForUpdatesExResponse `xml:"urn:vim25 WaitForUpdatesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *WaitForUpdatesExBody) Fault() *soap.Fault { return b.Fault_ } + +func WaitForUpdatesEx(ctx context.Context, r soap.RoundTripper, req *types.WaitForUpdatesEx) (*types.WaitForUpdatesExResponse, error) { + var reqBody, resBody WaitForUpdatesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type XmlToCustomizationSpecItemBody struct { + Req *types.XmlToCustomizationSpecItem `xml:"urn:vim25 XmlToCustomizationSpecItem,omitempty"` + Res *types.XmlToCustomizationSpecItemResponse `xml:"urn:vim25 XmlToCustomizationSpecItemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *XmlToCustomizationSpecItemBody) Fault() *soap.Fault { return b.Fault_ } + +func XmlToCustomizationSpecItem(ctx context.Context, r soap.RoundTripper, req *types.XmlToCustomizationSpecItem) (*types.XmlToCustomizationSpecItemResponse, error) { + var reqBody, resBody XmlToCustomizationSpecItemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ZeroFillVirtualDisk_TaskBody struct { + Req *types.ZeroFillVirtualDisk_Task `xml:"urn:vim25 ZeroFillVirtualDisk_Task,omitempty"` + Res *types.ZeroFillVirtualDisk_TaskResponse `xml:"urn:vim25 ZeroFillVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ZeroFillVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ZeroFillVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ZeroFillVirtualDisk_Task) (*types.ZeroFillVirtualDisk_TaskResponse, error) { + var reqBody, resBody ZeroFillVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/service_content.go b/vendor/github.com/vmware/govmomi/vim25/methods/service_content.go new file mode 100644 index 000000000..f685fdc2e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/methods/service_content.go @@ -0,0 +1,56 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package methods + +import ( + "time" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +var serviceInstance = types.ManagedObjectReference{ + Type: "ServiceInstance", + Value: "ServiceInstance", +} + +func GetServiceContent(ctx context.Context, r soap.RoundTripper) (types.ServiceContent, error) { + req := types.RetrieveServiceContent{ + This: serviceInstance, + } + + res, err := RetrieveServiceContent(ctx, r, &req) + if err != nil { + return types.ServiceContent{}, err + } + + return res.Returnval, nil +} + +func GetCurrentTime(ctx context.Context, r soap.RoundTripper) (*time.Time, error) { + req := types.CurrentTime{ + This: serviceInstance, + } + + res, err := CurrentTime(ctx, r, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go b/vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go new file mode 100644 index 000000000..4bea1552c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go @@ -0,0 +1,92 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +// Ancestors returns the entire ancestry tree of a specified managed object. +// The return value includes the root node and the specified object itself. +func Ancestors(ctx context.Context, rt soap.RoundTripper, pc, obj types.ManagedObjectReference) ([]ManagedEntity, error) { + ospec := types.ObjectSpec{ + Obj: obj, + SelectSet: []types.BaseSelectionSpec{ + &types.TraversalSpec{ + SelectionSpec: types.SelectionSpec{Name: "traverseParent"}, + Type: "ManagedEntity", + Path: "parent", + Skip: types.NewBool(false), + SelectSet: []types.BaseSelectionSpec{ + &types.SelectionSpec{Name: "traverseParent"}, + }, + }, + }, + Skip: types.NewBool(false), + } + + pspec := types.PropertySpec{ + Type: "ManagedEntity", + PathSet: []string{"name", "parent"}, + } + + req := types.RetrieveProperties{ + This: pc, + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: []types.PropertySpec{pspec}, + }, + }, + } + + var ifaces []interface{} + + err := RetrievePropertiesForRequest(ctx, rt, req, &ifaces) + if err != nil { + return nil, err + } + + var out []ManagedEntity + + // Build ancestry tree by iteratively finding a new child. + for len(out) < len(ifaces) { + var find types.ManagedObjectReference + + if len(out) > 0 { + find = out[len(out)-1].Self + } + + // Find entity we're looking for given the last entity in the current tree. + for _, iface := range ifaces { + me := iface.(IsManagedEntity).GetManagedEntity() + if me.Parent == nil { + out = append(out, me) + break + } + + if *me.Parent == find { + out = append(out, me) + break + } + } + } + + return out, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/extra.go b/vendor/github.com/vmware/govmomi/vim25/mo/extra.go new file mode 100644 index 000000000..36ed5ff0f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/extra.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +type IsManagedEntity interface { + GetManagedEntity() ManagedEntity +} + +func (m ComputeResource) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Datacenter) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Datastore) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m DistributedVirtualSwitch) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Folder) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m HostSystem) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Network) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m ResourcePool) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m VirtualMachine) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/cluster_host_property.xml b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/cluster_host_property.xml new file mode 100644 index 000000000..be218c052 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/cluster_host_property.xml @@ -0,0 +1,15 @@ + + + + domain-c7 + + host + + host-14 + host-17 + host-19 + host-52 + + + + diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/hostsystem_list_name_property.xml b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/hostsystem_list_name_property.xml new file mode 100644 index 000000000..05d369d85 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/hostsystem_list_name_property.xml @@ -0,0 +1,17 @@ + + + + host-10 + + name + host-01.example.com + + + + host-30 + + name + host-02.example.com + + + diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/nested_property.xml b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/nested_property.xml new file mode 100644 index 000000000..ed44167f4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/nested_property.xml @@ -0,0 +1,14 @@ + + + + vm-411 + + config.name + kubernetes-master + + + config.uuid + 422ec880-ab06-06b4-23f3-beb7a052a4c9 + + + diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/not_authenticated_fault.xml b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/not_authenticated_fault.xml new file mode 100644 index 000000000..2a7fcdbdf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/not_authenticated_fault.xml @@ -0,0 +1,42 @@ + + + + SessionManager + + defaultLocale + en + + + messageLocaleList + + ja + zh_CN + en + de + zh_TW + ko + fr + + + + message + + + group-d1 + System.View + + + + + + sessionList + + + group-d1 + Sessions.TerminateSession + + + + + + diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/pointer_property.xml b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/pointer_property.xml new file mode 100644 index 000000000..54baaf5ff --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/fixtures/pointer_property.xml @@ -0,0 +1,15 @@ + + + + vm-411 + + config.bootOptions + + 0 + false + false + 10000 + + + + diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/mo.go b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go new file mode 100644 index 000000000..4ae25839d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go @@ -0,0 +1,1509 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "reflect" + "time" + + "github.com/vmware/govmomi/vim25/types" +) + +type Alarm struct { + ExtensibleManagedObject + + Info types.AlarmInfo `mo:"info"` +} + +func init() { + t["Alarm"] = reflect.TypeOf((*Alarm)(nil)).Elem() +} + +type AlarmManager struct { + Self types.ManagedObjectReference + + DefaultExpression []types.BaseAlarmExpression `mo:"defaultExpression"` + Description types.AlarmDescription `mo:"description"` +} + +func (m AlarmManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["AlarmManager"] = reflect.TypeOf((*AlarmManager)(nil)).Elem() +} + +type AuthorizationManager struct { + Self types.ManagedObjectReference + + PrivilegeList []types.AuthorizationPrivilege `mo:"privilegeList"` + RoleList []types.AuthorizationRole `mo:"roleList"` + Description types.AuthorizationDescription `mo:"description"` +} + +func (m AuthorizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["AuthorizationManager"] = reflect.TypeOf((*AuthorizationManager)(nil)).Elem() +} + +type ClusterComputeResource struct { + ComputeResource + + Configuration types.ClusterConfigInfo `mo:"configuration"` + Recommendation []types.ClusterRecommendation `mo:"recommendation"` + DrsRecommendation []types.ClusterDrsRecommendation `mo:"drsRecommendation"` + MigrationHistory []types.ClusterDrsMigration `mo:"migrationHistory"` + ActionHistory []types.ClusterActionHistory `mo:"actionHistory"` + DrsFault []types.ClusterDrsFaults `mo:"drsFault"` +} + +func init() { + t["ClusterComputeResource"] = reflect.TypeOf((*ClusterComputeResource)(nil)).Elem() +} + +type ClusterProfile struct { + Profile +} + +func init() { + t["ClusterProfile"] = reflect.TypeOf((*ClusterProfile)(nil)).Elem() +} + +type ClusterProfileManager struct { + ProfileManager +} + +func init() { + t["ClusterProfileManager"] = reflect.TypeOf((*ClusterProfileManager)(nil)).Elem() +} + +type ComputeResource struct { + ManagedEntity + + ResourcePool *types.ManagedObjectReference `mo:"resourcePool"` + Host []types.ManagedObjectReference `mo:"host"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + Summary types.BaseComputeResourceSummary `mo:"summary"` + EnvironmentBrowser *types.ManagedObjectReference `mo:"environmentBrowser"` + ConfigurationEx types.BaseComputeResourceConfigInfo `mo:"configurationEx"` +} + +func init() { + t["ComputeResource"] = reflect.TypeOf((*ComputeResource)(nil)).Elem() +} + +type ContainerView struct { + ManagedObjectView + + Container types.ManagedObjectReference `mo:"container"` + Type []string `mo:"type"` + Recursive bool `mo:"recursive"` +} + +func init() { + t["ContainerView"] = reflect.TypeOf((*ContainerView)(nil)).Elem() +} + +type CustomFieldsManager struct { + Self types.ManagedObjectReference + + Field []types.CustomFieldDef `mo:"field"` +} + +func (m CustomFieldsManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["CustomFieldsManager"] = reflect.TypeOf((*CustomFieldsManager)(nil)).Elem() +} + +type CustomizationSpecManager struct { + Self types.ManagedObjectReference + + Info []types.CustomizationSpecInfo `mo:"info"` + EncryptionKey []byte `mo:"encryptionKey"` +} + +func (m CustomizationSpecManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["CustomizationSpecManager"] = reflect.TypeOf((*CustomizationSpecManager)(nil)).Elem() +} + +type Datacenter struct { + ManagedEntity + + VmFolder types.ManagedObjectReference `mo:"vmFolder"` + HostFolder types.ManagedObjectReference `mo:"hostFolder"` + DatastoreFolder types.ManagedObjectReference `mo:"datastoreFolder"` + NetworkFolder types.ManagedObjectReference `mo:"networkFolder"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + Configuration types.DatacenterConfigInfo `mo:"configuration"` +} + +func init() { + t["Datacenter"] = reflect.TypeOf((*Datacenter)(nil)).Elem() +} + +type Datastore struct { + ManagedEntity + + Info types.BaseDatastoreInfo `mo:"info"` + Summary types.DatastoreSummary `mo:"summary"` + Host []types.DatastoreHostMount `mo:"host"` + Vm []types.ManagedObjectReference `mo:"vm"` + Browser types.ManagedObjectReference `mo:"browser"` + Capability types.DatastoreCapability `mo:"capability"` + IormConfiguration *types.StorageIORMInfo `mo:"iormConfiguration"` +} + +func init() { + t["Datastore"] = reflect.TypeOf((*Datastore)(nil)).Elem() +} + +type DatastoreNamespaceManager struct { + Self types.ManagedObjectReference +} + +func (m DatastoreNamespaceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["DatastoreNamespaceManager"] = reflect.TypeOf((*DatastoreNamespaceManager)(nil)).Elem() +} + +type DiagnosticManager struct { + Self types.ManagedObjectReference +} + +func (m DiagnosticManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["DiagnosticManager"] = reflect.TypeOf((*DiagnosticManager)(nil)).Elem() +} + +type DistributedVirtualPortgroup struct { + Network + + Key string `mo:"key"` + Config types.DVPortgroupConfigInfo `mo:"config"` + PortKeys []string `mo:"portKeys"` +} + +func init() { + t["DistributedVirtualPortgroup"] = reflect.TypeOf((*DistributedVirtualPortgroup)(nil)).Elem() +} + +type DistributedVirtualSwitch struct { + ManagedEntity + + Uuid string `mo:"uuid"` + Capability types.DVSCapability `mo:"capability"` + Summary types.DVSSummary `mo:"summary"` + Config types.BaseDVSConfigInfo `mo:"config"` + NetworkResourcePool []types.DVSNetworkResourcePool `mo:"networkResourcePool"` + Portgroup []types.ManagedObjectReference `mo:"portgroup"` + Runtime *types.DVSRuntimeInfo `mo:"runtime"` +} + +func init() { + t["DistributedVirtualSwitch"] = reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem() +} + +type DistributedVirtualSwitchManager struct { + Self types.ManagedObjectReference +} + +func (m DistributedVirtualSwitchManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["DistributedVirtualSwitchManager"] = reflect.TypeOf((*DistributedVirtualSwitchManager)(nil)).Elem() +} + +type EnvironmentBrowser struct { + Self types.ManagedObjectReference + + DatastoreBrowser *types.ManagedObjectReference `mo:"datastoreBrowser"` +} + +func (m EnvironmentBrowser) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["EnvironmentBrowser"] = reflect.TypeOf((*EnvironmentBrowser)(nil)).Elem() +} + +type EventHistoryCollector struct { + HistoryCollector + + LatestPage []types.BaseEvent `mo:"latestPage"` +} + +func init() { + t["EventHistoryCollector"] = reflect.TypeOf((*EventHistoryCollector)(nil)).Elem() +} + +type EventManager struct { + Self types.ManagedObjectReference + + Description types.EventDescription `mo:"description"` + LatestEvent types.BaseEvent `mo:"latestEvent"` + MaxCollector int `mo:"maxCollector"` +} + +func (m EventManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["EventManager"] = reflect.TypeOf((*EventManager)(nil)).Elem() +} + +type ExtensibleManagedObject struct { + Self types.ManagedObjectReference + + Value []types.BaseCustomFieldValue `mo:"value"` + AvailableField []types.CustomFieldDef `mo:"availableField"` +} + +func (m ExtensibleManagedObject) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ExtensibleManagedObject"] = reflect.TypeOf((*ExtensibleManagedObject)(nil)).Elem() +} + +type ExtensionManager struct { + Self types.ManagedObjectReference + + ExtensionList []types.Extension `mo:"extensionList"` +} + +func (m ExtensionManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ExtensionManager"] = reflect.TypeOf((*ExtensionManager)(nil)).Elem() +} + +type FileManager struct { + Self types.ManagedObjectReference +} + +func (m FileManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["FileManager"] = reflect.TypeOf((*FileManager)(nil)).Elem() +} + +type Folder struct { + ManagedEntity + + ChildType []string `mo:"childType"` + ChildEntity []types.ManagedObjectReference `mo:"childEntity"` +} + +func init() { + t["Folder"] = reflect.TypeOf((*Folder)(nil)).Elem() +} + +type GuestAuthManager struct { + Self types.ManagedObjectReference +} + +func (m GuestAuthManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestAuthManager"] = reflect.TypeOf((*GuestAuthManager)(nil)).Elem() +} + +type GuestFileManager struct { + Self types.ManagedObjectReference +} + +func (m GuestFileManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestFileManager"] = reflect.TypeOf((*GuestFileManager)(nil)).Elem() +} + +type GuestOperationsManager struct { + Self types.ManagedObjectReference + + AuthManager *types.ManagedObjectReference `mo:"authManager"` + FileManager *types.ManagedObjectReference `mo:"fileManager"` + ProcessManager *types.ManagedObjectReference `mo:"processManager"` +} + +func (m GuestOperationsManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestOperationsManager"] = reflect.TypeOf((*GuestOperationsManager)(nil)).Elem() +} + +type GuestProcessManager struct { + Self types.ManagedObjectReference +} + +func (m GuestProcessManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestProcessManager"] = reflect.TypeOf((*GuestProcessManager)(nil)).Elem() +} + +type HistoryCollector struct { + Self types.ManagedObjectReference + + Filter types.AnyType `mo:"filter"` +} + +func (m HistoryCollector) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HistoryCollector"] = reflect.TypeOf((*HistoryCollector)(nil)).Elem() +} + +type HostActiveDirectoryAuthentication struct { + HostDirectoryStore +} + +func init() { + t["HostActiveDirectoryAuthentication"] = reflect.TypeOf((*HostActiveDirectoryAuthentication)(nil)).Elem() +} + +type HostAuthenticationManager struct { + Self types.ManagedObjectReference + + Info types.HostAuthenticationManagerInfo `mo:"info"` + SupportedStore []types.ManagedObjectReference `mo:"supportedStore"` +} + +func (m HostAuthenticationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAuthenticationManager"] = reflect.TypeOf((*HostAuthenticationManager)(nil)).Elem() +} + +type HostAuthenticationStore struct { + Self types.ManagedObjectReference + + Info types.BaseHostAuthenticationStoreInfo `mo:"info"` +} + +func (m HostAuthenticationStore) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAuthenticationStore"] = reflect.TypeOf((*HostAuthenticationStore)(nil)).Elem() +} + +type HostAutoStartManager struct { + Self types.ManagedObjectReference + + Config types.HostAutoStartManagerConfig `mo:"config"` +} + +func (m HostAutoStartManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAutoStartManager"] = reflect.TypeOf((*HostAutoStartManager)(nil)).Elem() +} + +type HostBootDeviceSystem struct { + Self types.ManagedObjectReference +} + +func (m HostBootDeviceSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostBootDeviceSystem"] = reflect.TypeOf((*HostBootDeviceSystem)(nil)).Elem() +} + +type HostCacheConfigurationManager struct { + Self types.ManagedObjectReference + + CacheConfigurationInfo []types.HostCacheConfigurationInfo `mo:"cacheConfigurationInfo"` +} + +func (m HostCacheConfigurationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostCacheConfigurationManager"] = reflect.TypeOf((*HostCacheConfigurationManager)(nil)).Elem() +} + +type HostCpuSchedulerSystem struct { + ExtensibleManagedObject + + HyperthreadInfo *types.HostHyperThreadScheduleInfo `mo:"hyperthreadInfo"` +} + +func init() { + t["HostCpuSchedulerSystem"] = reflect.TypeOf((*HostCpuSchedulerSystem)(nil)).Elem() +} + +type HostDatastoreBrowser struct { + Self types.ManagedObjectReference + + Datastore []types.ManagedObjectReference `mo:"datastore"` + SupportedType []types.BaseFileQuery `mo:"supportedType"` +} + +func (m HostDatastoreBrowser) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDatastoreBrowser"] = reflect.TypeOf((*HostDatastoreBrowser)(nil)).Elem() +} + +type HostDatastoreSystem struct { + Self types.ManagedObjectReference + + Datastore []types.ManagedObjectReference `mo:"datastore"` + Capabilities types.HostDatastoreSystemCapabilities `mo:"capabilities"` +} + +func (m HostDatastoreSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDatastoreSystem"] = reflect.TypeOf((*HostDatastoreSystem)(nil)).Elem() +} + +type HostDateTimeSystem struct { + Self types.ManagedObjectReference + + DateTimeInfo types.HostDateTimeInfo `mo:"dateTimeInfo"` +} + +func (m HostDateTimeSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDateTimeSystem"] = reflect.TypeOf((*HostDateTimeSystem)(nil)).Elem() +} + +type HostDiagnosticSystem struct { + Self types.ManagedObjectReference + + ActivePartition *types.HostDiagnosticPartition `mo:"activePartition"` +} + +func (m HostDiagnosticSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDiagnosticSystem"] = reflect.TypeOf((*HostDiagnosticSystem)(nil)).Elem() +} + +type HostDirectoryStore struct { + HostAuthenticationStore +} + +func init() { + t["HostDirectoryStore"] = reflect.TypeOf((*HostDirectoryStore)(nil)).Elem() +} + +type HostEsxAgentHostManager struct { + Self types.ManagedObjectReference + + ConfigInfo types.HostEsxAgentHostManagerConfigInfo `mo:"configInfo"` +} + +func (m HostEsxAgentHostManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostEsxAgentHostManager"] = reflect.TypeOf((*HostEsxAgentHostManager)(nil)).Elem() +} + +type HostFirewallSystem struct { + ExtensibleManagedObject + + FirewallInfo *types.HostFirewallInfo `mo:"firewallInfo"` +} + +func init() { + t["HostFirewallSystem"] = reflect.TypeOf((*HostFirewallSystem)(nil)).Elem() +} + +type HostFirmwareSystem struct { + Self types.ManagedObjectReference +} + +func (m HostFirmwareSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostFirmwareSystem"] = reflect.TypeOf((*HostFirmwareSystem)(nil)).Elem() +} + +type HostGraphicsManager struct { + ExtensibleManagedObject + + GraphicsInfo []types.HostGraphicsInfo `mo:"graphicsInfo"` +} + +func init() { + t["HostGraphicsManager"] = reflect.TypeOf((*HostGraphicsManager)(nil)).Elem() +} + +type HostHealthStatusSystem struct { + Self types.ManagedObjectReference + + Runtime types.HealthSystemRuntime `mo:"runtime"` +} + +func (m HostHealthStatusSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostHealthStatusSystem"] = reflect.TypeOf((*HostHealthStatusSystem)(nil)).Elem() +} + +type HostImageConfigManager struct { + Self types.ManagedObjectReference +} + +func (m HostImageConfigManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostImageConfigManager"] = reflect.TypeOf((*HostImageConfigManager)(nil)).Elem() +} + +type HostKernelModuleSystem struct { + Self types.ManagedObjectReference +} + +func (m HostKernelModuleSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostKernelModuleSystem"] = reflect.TypeOf((*HostKernelModuleSystem)(nil)).Elem() +} + +type HostLocalAccountManager struct { + Self types.ManagedObjectReference +} + +func (m HostLocalAccountManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostLocalAccountManager"] = reflect.TypeOf((*HostLocalAccountManager)(nil)).Elem() +} + +type HostLocalAuthentication struct { + HostAuthenticationStore +} + +func init() { + t["HostLocalAuthentication"] = reflect.TypeOf((*HostLocalAuthentication)(nil)).Elem() +} + +type HostMemorySystem struct { + ExtensibleManagedObject + + ConsoleReservationInfo *types.ServiceConsoleReservationInfo `mo:"consoleReservationInfo"` + VirtualMachineReservationInfo *types.VirtualMachineMemoryReservationInfo `mo:"virtualMachineReservationInfo"` +} + +func init() { + t["HostMemorySystem"] = reflect.TypeOf((*HostMemorySystem)(nil)).Elem() +} + +type HostNetworkSystem struct { + ExtensibleManagedObject + + Capabilities *types.HostNetCapabilities `mo:"capabilities"` + NetworkInfo *types.HostNetworkInfo `mo:"networkInfo"` + OffloadCapabilities *types.HostNetOffloadCapabilities `mo:"offloadCapabilities"` + NetworkConfig *types.HostNetworkConfig `mo:"networkConfig"` + DnsConfig types.BaseHostDnsConfig `mo:"dnsConfig"` + IpRouteConfig types.BaseHostIpRouteConfig `mo:"ipRouteConfig"` + ConsoleIpRouteConfig types.BaseHostIpRouteConfig `mo:"consoleIpRouteConfig"` +} + +func init() { + t["HostNetworkSystem"] = reflect.TypeOf((*HostNetworkSystem)(nil)).Elem() +} + +type HostPatchManager struct { + Self types.ManagedObjectReference +} + +func (m HostPatchManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostPatchManager"] = reflect.TypeOf((*HostPatchManager)(nil)).Elem() +} + +type HostPciPassthruSystem struct { + ExtensibleManagedObject + + PciPassthruInfo []types.BaseHostPciPassthruInfo `mo:"pciPassthruInfo"` +} + +func init() { + t["HostPciPassthruSystem"] = reflect.TypeOf((*HostPciPassthruSystem)(nil)).Elem() +} + +type HostPowerSystem struct { + Self types.ManagedObjectReference + + Capability types.PowerSystemCapability `mo:"capability"` + Info types.PowerSystemInfo `mo:"info"` +} + +func (m HostPowerSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostPowerSystem"] = reflect.TypeOf((*HostPowerSystem)(nil)).Elem() +} + +type HostProfile struct { + Profile + + ReferenceHost *types.ManagedObjectReference `mo:"referenceHost"` +} + +func init() { + t["HostProfile"] = reflect.TypeOf((*HostProfile)(nil)).Elem() +} + +type HostProfileManager struct { + ProfileManager +} + +func init() { + t["HostProfileManager"] = reflect.TypeOf((*HostProfileManager)(nil)).Elem() +} + +type HostServiceSystem struct { + ExtensibleManagedObject + + ServiceInfo types.HostServiceInfo `mo:"serviceInfo"` +} + +func init() { + t["HostServiceSystem"] = reflect.TypeOf((*HostServiceSystem)(nil)).Elem() +} + +type HostSnmpSystem struct { + Self types.ManagedObjectReference + + Configuration types.HostSnmpConfigSpec `mo:"configuration"` + Limits types.HostSnmpSystemAgentLimits `mo:"limits"` +} + +func (m HostSnmpSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostSnmpSystem"] = reflect.TypeOf((*HostSnmpSystem)(nil)).Elem() +} + +type HostStorageSystem struct { + ExtensibleManagedObject + + StorageDeviceInfo *types.HostStorageDeviceInfo `mo:"storageDeviceInfo"` + FileSystemVolumeInfo types.HostFileSystemVolumeInfo `mo:"fileSystemVolumeInfo"` + SystemFile []string `mo:"systemFile"` + MultipathStateInfo *types.HostMultipathStateInfo `mo:"multipathStateInfo"` +} + +func init() { + t["HostStorageSystem"] = reflect.TypeOf((*HostStorageSystem)(nil)).Elem() +} + +type HostSystem struct { + ManagedEntity + + Runtime types.HostRuntimeInfo `mo:"runtime"` + Summary types.HostListSummary `mo:"summary"` + Hardware *types.HostHardwareInfo `mo:"hardware"` + Capability *types.HostCapability `mo:"capability"` + LicensableResource types.HostLicensableResourceInfo `mo:"licensableResource"` + ConfigManager types.HostConfigManager `mo:"configManager"` + Config *types.HostConfigInfo `mo:"config"` + Vm []types.ManagedObjectReference `mo:"vm"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + DatastoreBrowser types.ManagedObjectReference `mo:"datastoreBrowser"` + SystemResources *types.HostSystemResourceInfo `mo:"systemResources"` +} + +func init() { + t["HostSystem"] = reflect.TypeOf((*HostSystem)(nil)).Elem() +} + +type HostVFlashManager struct { + Self types.ManagedObjectReference + + VFlashConfigInfo *types.HostVFlashManagerVFlashConfigInfo `mo:"vFlashConfigInfo"` +} + +func (m HostVFlashManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostVFlashManager"] = reflect.TypeOf((*HostVFlashManager)(nil)).Elem() +} + +type HostVMotionSystem struct { + ExtensibleManagedObject + + NetConfig *types.HostVMotionNetConfig `mo:"netConfig"` + IpConfig *types.HostIpConfig `mo:"ipConfig"` +} + +func init() { + t["HostVMotionSystem"] = reflect.TypeOf((*HostVMotionSystem)(nil)).Elem() +} + +type HostVirtualNicManager struct { + ExtensibleManagedObject + + Info types.HostVirtualNicManagerInfo `mo:"info"` +} + +func init() { + t["HostVirtualNicManager"] = reflect.TypeOf((*HostVirtualNicManager)(nil)).Elem() +} + +type HostVsanInternalSystem struct { + Self types.ManagedObjectReference +} + +func (m HostVsanInternalSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostVsanInternalSystem"] = reflect.TypeOf((*HostVsanInternalSystem)(nil)).Elem() +} + +type HostVsanSystem struct { + Self types.ManagedObjectReference + + Config types.VsanHostConfigInfo `mo:"config"` +} + +func (m HostVsanSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostVsanSystem"] = reflect.TypeOf((*HostVsanSystem)(nil)).Elem() +} + +type HttpNfcLease struct { + Self types.ManagedObjectReference + + InitializeProgress int `mo:"initializeProgress"` + Info *types.HttpNfcLeaseInfo `mo:"info"` + State types.HttpNfcLeaseState `mo:"state"` + Error *types.LocalizedMethodFault `mo:"error"` +} + +func (m HttpNfcLease) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HttpNfcLease"] = reflect.TypeOf((*HttpNfcLease)(nil)).Elem() +} + +type InternalDynamicTypeManager struct { + Self types.ManagedObjectReference +} + +func (m InternalDynamicTypeManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["InternalDynamicTypeManager"] = reflect.TypeOf((*InternalDynamicTypeManager)(nil)).Elem() +} + +type InventoryView struct { + ManagedObjectView +} + +func init() { + t["InventoryView"] = reflect.TypeOf((*InventoryView)(nil)).Elem() +} + +type IpPoolManager struct { + Self types.ManagedObjectReference +} + +func (m IpPoolManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["IpPoolManager"] = reflect.TypeOf((*IpPoolManager)(nil)).Elem() +} + +type IscsiManager struct { + Self types.ManagedObjectReference +} + +func (m IscsiManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["IscsiManager"] = reflect.TypeOf((*IscsiManager)(nil)).Elem() +} + +type LicenseAssignmentManager struct { + Self types.ManagedObjectReference +} + +func (m LicenseAssignmentManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["LicenseAssignmentManager"] = reflect.TypeOf((*LicenseAssignmentManager)(nil)).Elem() +} + +type LicenseManager struct { + Self types.ManagedObjectReference + + Source types.BaseLicenseSource `mo:"source"` + SourceAvailable bool `mo:"sourceAvailable"` + Diagnostics *types.LicenseDiagnostics `mo:"diagnostics"` + FeatureInfo []types.LicenseFeatureInfo `mo:"featureInfo"` + LicensedEdition string `mo:"licensedEdition"` + Licenses []types.LicenseManagerLicenseInfo `mo:"licenses"` + LicenseAssignmentManager *types.ManagedObjectReference `mo:"licenseAssignmentManager"` + Evaluation types.LicenseManagerEvaluationInfo `mo:"evaluation"` +} + +func (m LicenseManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["LicenseManager"] = reflect.TypeOf((*LicenseManager)(nil)).Elem() +} + +type ListView struct { + ManagedObjectView +} + +func init() { + t["ListView"] = reflect.TypeOf((*ListView)(nil)).Elem() +} + +type LocalizationManager struct { + Self types.ManagedObjectReference + + Catalog []types.LocalizationManagerMessageCatalog `mo:"catalog"` +} + +func (m LocalizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["LocalizationManager"] = reflect.TypeOf((*LocalizationManager)(nil)).Elem() +} + +type ManagedEntity struct { + ExtensibleManagedObject + + Parent *types.ManagedObjectReference `mo:"parent"` + CustomValue []types.BaseCustomFieldValue `mo:"customValue"` + OverallStatus types.ManagedEntityStatus `mo:"overallStatus"` + ConfigStatus types.ManagedEntityStatus `mo:"configStatus"` + ConfigIssue []types.BaseEvent `mo:"configIssue"` + EffectiveRole []int `mo:"effectiveRole"` + Permission []types.Permission `mo:"permission"` + Name string `mo:"name"` + DisabledMethod []string `mo:"disabledMethod"` + RecentTask []types.ManagedObjectReference `mo:"recentTask"` + DeclaredAlarmState []types.AlarmState `mo:"declaredAlarmState"` + TriggeredAlarmState []types.AlarmState `mo:"triggeredAlarmState"` + AlarmActionsEnabled *bool `mo:"alarmActionsEnabled"` + Tag []types.Tag `mo:"tag"` +} + +func init() { + t["ManagedEntity"] = reflect.TypeOf((*ManagedEntity)(nil)).Elem() +} + +type ManagedObjectView struct { + Self types.ManagedObjectReference + + View []types.ManagedObjectReference `mo:"view"` +} + +func (m ManagedObjectView) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ManagedObjectView"] = reflect.TypeOf((*ManagedObjectView)(nil)).Elem() +} + +type Network struct { + ManagedEntity + + Name string `mo:"name"` + Summary types.BaseNetworkSummary `mo:"summary"` + Host []types.ManagedObjectReference `mo:"host"` + Vm []types.ManagedObjectReference `mo:"vm"` +} + +func init() { + t["Network"] = reflect.TypeOf((*Network)(nil)).Elem() +} + +type OpaqueNetwork struct { + Network +} + +func init() { + t["OpaqueNetwork"] = reflect.TypeOf((*OpaqueNetwork)(nil)).Elem() +} + +type OptionManager struct { + Self types.ManagedObjectReference + + SupportedOption []types.OptionDef `mo:"supportedOption"` + Setting []types.BaseOptionValue `mo:"setting"` +} + +func (m OptionManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["OptionManager"] = reflect.TypeOf((*OptionManager)(nil)).Elem() +} + +type OvfManager struct { + Self types.ManagedObjectReference + + OvfImportOption []types.OvfOptionInfo `mo:"ovfImportOption"` + OvfExportOption []types.OvfOptionInfo `mo:"ovfExportOption"` +} + +func (m OvfManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["OvfManager"] = reflect.TypeOf((*OvfManager)(nil)).Elem() +} + +type PerformanceManager struct { + Self types.ManagedObjectReference + + Description types.PerformanceDescription `mo:"description"` + HistoricalInterval []types.PerfInterval `mo:"historicalInterval"` + PerfCounter []types.PerfCounterInfo `mo:"perfCounter"` +} + +func (m PerformanceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["PerformanceManager"] = reflect.TypeOf((*PerformanceManager)(nil)).Elem() +} + +type Profile struct { + Self types.ManagedObjectReference + + Config types.BaseProfileConfigInfo `mo:"config"` + Description *types.ProfileDescription `mo:"description"` + Name string `mo:"name"` + CreatedTime time.Time `mo:"createdTime"` + ModifiedTime time.Time `mo:"modifiedTime"` + Entity []types.ManagedObjectReference `mo:"entity"` + ComplianceStatus string `mo:"complianceStatus"` +} + +func (m Profile) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["Profile"] = reflect.TypeOf((*Profile)(nil)).Elem() +} + +type ProfileComplianceManager struct { + Self types.ManagedObjectReference +} + +func (m ProfileComplianceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ProfileComplianceManager"] = reflect.TypeOf((*ProfileComplianceManager)(nil)).Elem() +} + +type ProfileManager struct { + Self types.ManagedObjectReference + + Profile []types.ManagedObjectReference `mo:"profile"` +} + +func (m ProfileManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ProfileManager"] = reflect.TypeOf((*ProfileManager)(nil)).Elem() +} + +type PropertyCollector struct { + Self types.ManagedObjectReference + + Filter []types.ManagedObjectReference `mo:"filter"` +} + +func (m PropertyCollector) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["PropertyCollector"] = reflect.TypeOf((*PropertyCollector)(nil)).Elem() +} + +type PropertyFilter struct { + Self types.ManagedObjectReference + + Spec types.PropertyFilterSpec `mo:"spec"` + PartialUpdates bool `mo:"partialUpdates"` +} + +func (m PropertyFilter) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["PropertyFilter"] = reflect.TypeOf((*PropertyFilter)(nil)).Elem() +} + +type ReflectManagedMethodExecuter struct { + Self types.ManagedObjectReference +} + +func (m ReflectManagedMethodExecuter) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ReflectManagedMethodExecuter"] = reflect.TypeOf((*ReflectManagedMethodExecuter)(nil)).Elem() +} + +type ResourcePlanningManager struct { + Self types.ManagedObjectReference +} + +func (m ResourcePlanningManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ResourcePlanningManager"] = reflect.TypeOf((*ResourcePlanningManager)(nil)).Elem() +} + +type ResourcePool struct { + ManagedEntity + + Summary types.BaseResourcePoolSummary `mo:"summary"` + Runtime types.ResourcePoolRuntimeInfo `mo:"runtime"` + Owner types.ManagedObjectReference `mo:"owner"` + ResourcePool []types.ManagedObjectReference `mo:"resourcePool"` + Vm []types.ManagedObjectReference `mo:"vm"` + Config types.ResourceConfigSpec `mo:"config"` + ChildConfiguration []types.ResourceConfigSpec `mo:"childConfiguration"` +} + +func init() { + t["ResourcePool"] = reflect.TypeOf((*ResourcePool)(nil)).Elem() +} + +type ScheduledTask struct { + ExtensibleManagedObject + + Info types.ScheduledTaskInfo `mo:"info"` +} + +func init() { + t["ScheduledTask"] = reflect.TypeOf((*ScheduledTask)(nil)).Elem() +} + +type ScheduledTaskManager struct { + Self types.ManagedObjectReference + + ScheduledTask []types.ManagedObjectReference `mo:"scheduledTask"` + Description types.ScheduledTaskDescription `mo:"description"` +} + +func (m ScheduledTaskManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ScheduledTaskManager"] = reflect.TypeOf((*ScheduledTaskManager)(nil)).Elem() +} + +type SearchIndex struct { + Self types.ManagedObjectReference +} + +func (m SearchIndex) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SearchIndex"] = reflect.TypeOf((*SearchIndex)(nil)).Elem() +} + +type ServiceInstance struct { + Self types.ManagedObjectReference + + ServerClock time.Time `mo:"serverClock"` + Capability types.Capability `mo:"capability"` + Content types.ServiceContent `mo:"content"` +} + +func (m ServiceInstance) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ServiceInstance"] = reflect.TypeOf((*ServiceInstance)(nil)).Elem() +} + +type ServiceManager struct { + Self types.ManagedObjectReference + + Service []types.ServiceManagerServiceInfo `mo:"service"` +} + +func (m ServiceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ServiceManager"] = reflect.TypeOf((*ServiceManager)(nil)).Elem() +} + +type SessionManager struct { + Self types.ManagedObjectReference + + SessionList []types.UserSession `mo:"sessionList"` + CurrentSession *types.UserSession `mo:"currentSession"` + Message *string `mo:"message"` + MessageLocaleList []string `mo:"messageLocaleList"` + SupportedLocaleList []string `mo:"supportedLocaleList"` + DefaultLocale string `mo:"defaultLocale"` +} + +func (m SessionManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SessionManager"] = reflect.TypeOf((*SessionManager)(nil)).Elem() +} + +type SimpleCommand struct { + Self types.ManagedObjectReference + + EncodingType types.SimpleCommandEncoding `mo:"encodingType"` + Entity types.ServiceManagerServiceInfo `mo:"entity"` +} + +func (m SimpleCommand) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SimpleCommand"] = reflect.TypeOf((*SimpleCommand)(nil)).Elem() +} + +type StoragePod struct { + Folder + + Summary *types.StoragePodSummary `mo:"summary"` + PodStorageDrsEntry *types.PodStorageDrsEntry `mo:"podStorageDrsEntry"` +} + +func init() { + t["StoragePod"] = reflect.TypeOf((*StoragePod)(nil)).Elem() +} + +type StorageResourceManager struct { + Self types.ManagedObjectReference +} + +func (m StorageResourceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["StorageResourceManager"] = reflect.TypeOf((*StorageResourceManager)(nil)).Elem() +} + +type Task struct { + ExtensibleManagedObject + + Info types.TaskInfo `mo:"info"` +} + +func init() { + t["Task"] = reflect.TypeOf((*Task)(nil)).Elem() +} + +type TaskHistoryCollector struct { + HistoryCollector + + LatestPage []types.TaskInfo `mo:"latestPage"` +} + +func init() { + t["TaskHistoryCollector"] = reflect.TypeOf((*TaskHistoryCollector)(nil)).Elem() +} + +type TaskManager struct { + Self types.ManagedObjectReference + + RecentTask []types.ManagedObjectReference `mo:"recentTask"` + Description types.TaskDescription `mo:"description"` + MaxCollector int `mo:"maxCollector"` +} + +func (m TaskManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["TaskManager"] = reflect.TypeOf((*TaskManager)(nil)).Elem() +} + +type UserDirectory struct { + Self types.ManagedObjectReference + + DomainList []string `mo:"domainList"` +} + +func (m UserDirectory) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["UserDirectory"] = reflect.TypeOf((*UserDirectory)(nil)).Elem() +} + +type View struct { + Self types.ManagedObjectReference +} + +func (m View) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["View"] = reflect.TypeOf((*View)(nil)).Elem() +} + +type ViewManager struct { + Self types.ManagedObjectReference + + ViewList []types.ManagedObjectReference `mo:"viewList"` +} + +func (m ViewManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ViewManager"] = reflect.TypeOf((*ViewManager)(nil)).Elem() +} + +type VirtualApp struct { + ResourcePool + + ParentFolder *types.ManagedObjectReference `mo:"parentFolder"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + VAppConfig *types.VAppConfigInfo `mo:"vAppConfig"` + ParentVApp *types.ManagedObjectReference `mo:"parentVApp"` + ChildLink []types.VirtualAppLinkInfo `mo:"childLink"` +} + +func init() { + t["VirtualApp"] = reflect.TypeOf((*VirtualApp)(nil)).Elem() +} + +type VirtualDiskManager struct { + Self types.ManagedObjectReference +} + +func (m VirtualDiskManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualDiskManager"] = reflect.TypeOf((*VirtualDiskManager)(nil)).Elem() +} + +type VirtualMachine struct { + ManagedEntity + + Capability types.VirtualMachineCapability `mo:"capability"` + Config *types.VirtualMachineConfigInfo `mo:"config"` + Layout *types.VirtualMachineFileLayout `mo:"layout"` + LayoutEx *types.VirtualMachineFileLayoutEx `mo:"layoutEx"` + Storage *types.VirtualMachineStorageInfo `mo:"storage"` + EnvironmentBrowser types.ManagedObjectReference `mo:"environmentBrowser"` + ResourcePool *types.ManagedObjectReference `mo:"resourcePool"` + ParentVApp *types.ManagedObjectReference `mo:"parentVApp"` + ResourceConfig *types.ResourceConfigSpec `mo:"resourceConfig"` + Runtime types.VirtualMachineRuntimeInfo `mo:"runtime"` + Guest *types.GuestInfo `mo:"guest"` + Summary types.VirtualMachineSummary `mo:"summary"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + Snapshot *types.VirtualMachineSnapshotInfo `mo:"snapshot"` + RootSnapshot []types.ManagedObjectReference `mo:"rootSnapshot"` + GuestHeartbeatStatus types.ManagedEntityStatus `mo:"guestHeartbeatStatus"` +} + +func init() { + t["VirtualMachine"] = reflect.TypeOf((*VirtualMachine)(nil)).Elem() +} + +type VirtualMachineCompatibilityChecker struct { + Self types.ManagedObjectReference +} + +func (m VirtualMachineCompatibilityChecker) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualMachineCompatibilityChecker"] = reflect.TypeOf((*VirtualMachineCompatibilityChecker)(nil)).Elem() +} + +type VirtualMachineProvisioningChecker struct { + Self types.ManagedObjectReference +} + +func (m VirtualMachineProvisioningChecker) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualMachineProvisioningChecker"] = reflect.TypeOf((*VirtualMachineProvisioningChecker)(nil)).Elem() +} + +type VirtualMachineSnapshot struct { + ExtensibleManagedObject + + Config types.VirtualMachineConfigInfo `mo:"config"` + ChildSnapshot []types.ManagedObjectReference `mo:"childSnapshot"` +} + +func init() { + t["VirtualMachineSnapshot"] = reflect.TypeOf((*VirtualMachineSnapshot)(nil)).Elem() +} + +type VirtualizationManager struct { + Self types.ManagedObjectReference +} + +func (m VirtualizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualizationManager"] = reflect.TypeOf((*VirtualizationManager)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitch struct { + DistributedVirtualSwitch +} + +func init() { + t["VmwareDistributedVirtualSwitch"] = reflect.TypeOf((*VmwareDistributedVirtualSwitch)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/reference.go b/vendor/github.com/vmware/govmomi/vim25/mo/reference.go new file mode 100644 index 000000000..465edbe80 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/reference.go @@ -0,0 +1,26 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import "github.com/vmware/govmomi/vim25/types" + +// Reference is the interface that is implemented by all the managed objects +// defined in this package. It specifies that these managed objects have a +// function that returns the managed object reference to themselves. +type Reference interface { + Reference() types.ManagedObjectReference +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/registry.go b/vendor/github.com/vmware/govmomi/vim25/mo/registry.go new file mode 100644 index 000000000..deacf508b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/registry.go @@ -0,0 +1,21 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import "reflect" + +var t = map[string]reflect.Type{} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go new file mode 100644 index 000000000..310f720cc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go @@ -0,0 +1,171 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "reflect" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "golang.org/x/net/context" +) + +func ignoreMissingProperty(ref types.ManagedObjectReference, p types.MissingProperty) bool { + switch ref.Type { + case "VirtualMachine": + switch p.Path { + case "environmentBrowser": + // See https://github.com/vmware/govmomi/pull/242 + return true + } + } + + return false +} + +// ObjectContentToType loads an ObjectContent value into the value it +// represents. If the ObjectContent value has a non-empty 'MissingSet' field, +// it returns the first fault it finds there as error. If the 'MissingSet' +// field is empty, it returns a pointer to a reflect.Value. It handles contain +// nested properties, such as 'guest.ipAddress' or 'config.hardware'. +func ObjectContentToType(o types.ObjectContent) (interface{}, error) { + // Expect no properties in the missing set + for _, p := range o.MissingSet { + if ignoreMissingProperty(o.Obj, p) { + continue + } + + return nil, soap.WrapVimFault(p.Fault.Fault) + } + + ti := typeInfoForType(o.Obj.Type) + v, err := ti.LoadFromObjectContent(o) + if err != nil { + return nil, err + } + + return v.Elem().Interface(), nil +} + +// LoadRetrievePropertiesResponse converts the response of a call to +// RetrieveProperties to one or more managed objects. +func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst interface{}) error { + rt := reflect.TypeOf(dst) + if rt == nil || rt.Kind() != reflect.Ptr { + panic("need pointer") + } + + rv := reflect.ValueOf(dst).Elem() + if !rv.CanSet() { + panic("cannot set dst") + } + + isSlice := false + switch rt.Elem().Kind() { + case reflect.Struct: + case reflect.Slice: + isSlice = true + default: + panic("unexpected type") + } + + if isSlice { + for _, p := range res.Returnval { + v, err := ObjectContentToType(p) + if err != nil { + return err + } + + vt := reflect.TypeOf(v) + + if !rv.Type().AssignableTo(vt) { + // For example: dst is []ManagedEntity, res is []HostSystem + if field, ok := vt.FieldByName(rt.Elem().Elem().Name()); ok && field.Anonymous { + rv.Set(reflect.Append(rv, reflect.ValueOf(v).FieldByIndex(field.Index))) + continue + } + } + + rv.Set(reflect.Append(rv, reflect.ValueOf(v))) + } + } else { + switch len(res.Returnval) { + case 0: + case 1: + v, err := ObjectContentToType(res.Returnval[0]) + if err != nil { + return err + } + + vt := reflect.TypeOf(v) + + if !rv.Type().AssignableTo(vt) { + // For example: dst is ComputeResource, res is ClusterComputeResource + if field, ok := vt.FieldByName(rt.Elem().Name()); ok && field.Anonymous { + rv.Set(reflect.ValueOf(v).FieldByIndex(field.Index)) + return nil + } + } + + rv.Set(reflect.ValueOf(v)) + default: + // If dst is not a slice, expect to receive 0 or 1 results + panic("more than 1 result") + } + } + + return nil +} + +// RetrievePropertiesForRequest calls the RetrieveProperties method with the +// specified request and decodes the response struct into the value pointed to +// by dst. +func RetrievePropertiesForRequest(ctx context.Context, r soap.RoundTripper, req types.RetrieveProperties, dst interface{}) error { + res, err := methods.RetrieveProperties(ctx, r, &req) + if err != nil { + return err + } + + return LoadRetrievePropertiesResponse(res, dst) +} + +// RetrieveProperties retrieves the properties of the managed object specified +// as obj and decodes the response struct into the value pointed to by dst. +func RetrieveProperties(ctx context.Context, r soap.RoundTripper, pc, obj types.ManagedObjectReference, dst interface{}) error { + req := types.RetrieveProperties{ + This: pc, + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ + { + Obj: obj, + Skip: types.NewBool(false), + }, + }, + PropSet: []types.PropertySpec{ + { + All: types.NewBool(true), + Type: obj.Type, + }, + }, + }, + }, + } + + return RetrievePropertiesForRequest(ctx, r, req, dst) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/retrieve_test.go b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve_test.go new file mode 100644 index 000000000..5d6e011df --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve_test.go @@ -0,0 +1,144 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "os" + "testing" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" +) + +func load(name string) *types.RetrievePropertiesResponse { + f, err := os.Open(name) + if err != nil { + panic(err) + } + + defer f.Close() + + var b types.RetrievePropertiesResponse + + dec := xml.NewDecoder(f) + dec.TypeFunc = types.TypeFunc() + if err := dec.Decode(&b); err != nil { + panic(err) + } + + return &b +} + +func TestNotAuthenticatedFault(t *testing.T) { + var s SessionManager + + err := LoadRetrievePropertiesResponse(load("fixtures/not_authenticated_fault.xml"), &s) + if !soap.IsVimFault(err) { + t.Errorf("Expected IsVimFault") + } + + fault := soap.ToVimFault(err).(*types.NotAuthenticated) + if fault.PrivilegeId != "System.View" { + t.Errorf("Expected first fault to be returned") + } +} + +func TestNestedProperty(t *testing.T) { + var vm VirtualMachine + + err := LoadRetrievePropertiesResponse(load("fixtures/nested_property.xml"), &vm) + if err != nil { + t.Fatalf("Expected no error, got: %s", err) + } + + self := types.ManagedObjectReference{ + Type: "VirtualMachine", + Value: "vm-411", + } + + if vm.Self != self { + t.Fatalf("Expected vm.Self to be set") + } + + if vm.Config == nil { + t.Fatalf("Expected vm.Config to be set") + } + + if vm.Config.Name != "kubernetes-master" { + t.Errorf("Got: %s", vm.Config.Name) + } + + if vm.Config.Uuid != "422ec880-ab06-06b4-23f3-beb7a052a4c9" { + t.Errorf("Got: %s", vm.Config.Uuid) + } +} + +func TestPointerProperty(t *testing.T) { + var vm VirtualMachine + + err := LoadRetrievePropertiesResponse(load("fixtures/pointer_property.xml"), &vm) + if err != nil { + t.Fatalf("Expected no error, got: %s", err) + } + + if vm.Config == nil { + t.Fatalf("Expected vm.Config to be set") + } + + if vm.Config.BootOptions == nil { + t.Fatalf("Expected vm.Config.BootOptions to be set") + } +} + +func TestEmbeddedTypeProperty(t *testing.T) { + // Test that we avoid in this case: + // panic: reflect.Set: value of type mo.ClusterComputeResource is not assignable to type mo.ComputeResource + var cr ComputeResource + + err := LoadRetrievePropertiesResponse(load("fixtures/cluster_host_property.xml"), &cr) + if err != nil { + t.Fatalf("Expected no error, got: %s", err) + } + + if len(cr.Host) != 4 { + t.Fatalf("Expected cr.Host to be set") + } +} + +func TestEmbeddedTypePropertySlice(t *testing.T) { + var me []ManagedEntity + + err := LoadRetrievePropertiesResponse(load("fixtures/hostsystem_list_name_property.xml"), &me) + if err != nil { + t.Fatalf("Expected no error, got: %s", err) + } + + if len(me) != 2 { + t.Fatalf("Expected 2 elements") + } + + for _, m := range me { + if m.Name == "" { + t.Fatal("Expected Name field to be set") + } + } + + if me[0].Name == me[1].Name { + t.Fatal("Name fields should not be the same") + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go new file mode 100644 index 000000000..119d23740 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go @@ -0,0 +1,239 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "fmt" + "reflect" + "regexp" + "strings" + "sync" + + "github.com/vmware/govmomi/vim25/types" +) + +type typeInfo struct { + typ reflect.Type + + // Field indices of "Self" field. + self []int + + // Map property names to field indices. + props map[string][]int +} + +var typeInfoLock sync.RWMutex +var typeInfoMap = make(map[string]*typeInfo) + +func typeInfoForType(tname string) *typeInfo { + typeInfoLock.RLock() + ti, ok := typeInfoMap[tname] + typeInfoLock.RUnlock() + + if ok { + return ti + } + + // Create new typeInfo for type. + if typ, ok := t[tname]; !ok { + panic("unknown type: " + tname) + } else { + // Multiple routines may race to set it, but the result is the same. + typeInfoLock.Lock() + ti = newTypeInfo(typ) + typeInfoMap[tname] = ti + typeInfoLock.Unlock() + } + + return ti +} + +func newTypeInfo(typ reflect.Type) *typeInfo { + t := typeInfo{ + typ: typ, + props: make(map[string][]int), + } + + t.build(typ, "", []int{}) + + return &t +} + +var managedObjectRefType = reflect.TypeOf((*types.ManagedObjectReference)(nil)).Elem() + +func buildName(fn string, f reflect.StructField) string { + if fn != "" { + fn += "." + } + + motag := f.Tag.Get("mo") + if motag != "" { + return fn + motag + } + + xmltag := f.Tag.Get("xml") + if xmltag != "" { + tokens := strings.Split(xmltag, ",") + if tokens[0] != "" { + return fn + tokens[0] + } + } + + return "" +} + +func (t *typeInfo) build(typ reflect.Type, fn string, fi []int) { + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + if typ.Kind() != reflect.Struct { + panic("need struct") + } + + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + ftyp := f.Type + + // Copy field indices so they can be passed along. + fic := make([]int, len(fi)+1) + copy(fic, fi) + fic[len(fi)] = i + + // Recurse into embedded field. + if f.Anonymous { + t.build(ftyp, fn, fic) + continue + } + + // Top level type has a "Self" field. + if f.Name == "Self" && ftyp == managedObjectRefType { + t.self = fic + continue + } + + fnc := buildName(fn, f) + if fnc == "" { + continue + } + + t.props[fnc] = fic + + // Dereference pointer. + if ftyp.Kind() == reflect.Ptr { + ftyp = ftyp.Elem() + } + + // Slices are not addressable by `foo.bar.qux`. + if ftyp.Kind() == reflect.Slice { + continue + } + + // Skip the managed reference type. + if ftyp == managedObjectRefType { + continue + } + + // Recurse into structs. + if ftyp.Kind() == reflect.Struct { + t.build(ftyp, fnc, fic) + } + } +} + +// assignValue assignes a value 'pv' to the struct pointed to by 'val', given a +// slice of field indices. It recurses into the struct until it finds the field +// specified by the indices. It creates new values for pointer types where +// needed. +func assignValue(val reflect.Value, fi []int, pv reflect.Value) { + // Create new value if necessary. + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + + val = val.Elem() + } + + rv := val.Field(fi[0]) + fi = fi[1:] + if len(fi) == 0 { + rt := rv.Type() + pt := pv.Type() + + // If type is a pointer, create new instance of type. + if rt.Kind() == reflect.Ptr { + rv.Set(reflect.New(rt.Elem())) + rv = rv.Elem() + rt = rv.Type() + } + + // If type is an interface, check if pv implements it. + if rt.Kind() == reflect.Interface && !pt.Implements(rt) { + // Check if pointer to pv implements it. + if reflect.PtrTo(pt).Implements(rt) { + npv := reflect.New(pt) + npv.Elem().Set(pv) + pv = npv + } else { + panic(fmt.Sprintf("type %s doesn't implement %s", pt.Name(), rt.Name())) + } + } + + rv.Set(pv) + return + } + + assignValue(rv, fi, pv) +} + +var arrayOfRegexp = regexp.MustCompile("ArrayOf(.*)$") + +func anyTypeToValue(t interface{}) reflect.Value { + rt := reflect.TypeOf(t) + rv := reflect.ValueOf(t) + + // Dereference if ArrayOfXYZ type + m := arrayOfRegexp.FindStringSubmatch(rt.Name()) + if len(m) > 0 { + // ArrayOfXYZ type has single field named XYZ + rv = rv.FieldByName(m[1]) + if !rv.IsValid() { + panic(fmt.Sprintf("expected %s type to have field %s", m[0], m[1])) + } + } + + return rv +} + +// LoadObjectFromContent loads properties from the 'PropSet' field in the +// specified ObjectContent value into the value it represents, which is +// returned as a reflect.Value. +func (t *typeInfo) LoadFromObjectContent(o types.ObjectContent) (reflect.Value, error) { + v := reflect.New(t.typ) + assignValue(v, t.self, reflect.ValueOf(o.Obj)) + + for _, p := range o.PropSet { + rv, ok := t.props[p.Name] + if !ok { + continue + } + assignValue(v, rv, anyTypeToValue(p.Val)) + } + + return v, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/type_info_test.go b/vendor/github.com/vmware/govmomi/vim25/mo/type_info_test.go new file mode 100644 index 000000000..849d9eea4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/type_info_test.go @@ -0,0 +1,37 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "reflect" + "testing" +) + +func TestLoadAll(*testing.T) { + for _, typ := range t { + newTypeInfo(typ) + } +} + +// The virtual machine managed object has about 500 nested properties. +// It's likely to be indicative of the function's performance in general. +func BenchmarkLoadVirtualMachine(b *testing.B) { + vmtyp := reflect.TypeOf((*VirtualMachine)(nil)).Elem() + for i := 0; i < b.N; i++ { + newTypeInfo(vmtyp) + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go b/vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go new file mode 100644 index 000000000..24cb3d59a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go @@ -0,0 +1,73 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import "sync" + +type Aggregator struct { + downstream Sinker + upstream chan (<-chan Report) + + done chan struct{} + w sync.WaitGroup +} + +func NewAggregator(s Sinker) *Aggregator { + a := &Aggregator{ + downstream: s, + upstream: make(chan (<-chan Report)), + + done: make(chan struct{}), + } + + a.w.Add(1) + go a.loop() + + return a +} + +func (a *Aggregator) loop() { + defer a.w.Done() + + dch := a.downstream.Sink() + defer close(dch) + + for { + select { + case uch := <-a.upstream: + // Drain upstream channel + for e := range uch { + dch <- e + } + case <-a.done: + return + } + } +} + +func (a *Aggregator) Sink() chan<- Report { + ch := make(chan Report) + a.upstream <- ch + return ch +} + +// Done marks the aggregator as done. No more calls to Sink() may be made and +// the downstream progress report channel will be closed when Done() returns. +func (a *Aggregator) Done() { + close(a.done) + a.w.Wait() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/aggregator_test.go b/vendor/github.com/vmware/govmomi/vim25/progress/aggregator_test.go new file mode 100644 index 000000000..bac959428 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/aggregator_test.go @@ -0,0 +1,81 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import ( + "testing" + "time" +) + +func TestAggregatorNoSinks(t *testing.T) { + ch := make(chan Report) + a := NewAggregator(dummySinker{ch}) + a.Done() + + _, ok := <-ch + if ok { + t.Errorf("Expected channel to be closed") + } +} + +func TestAggregatorMultipleSinks(t *testing.T) { + ch := make(chan Report) + a := NewAggregator(dummySinker{ch}) + + for i := 0; i < 5; i++ { + go func(ch chan<- Report) { + ch <- dummyReport{} + ch <- dummyReport{} + close(ch) + }(a.Sink()) + + <-ch + <-ch + } + + a.Done() + + _, ok := <-ch + if ok { + t.Errorf("Expected channel to be closed") + } +} + +func TestAggregatorSinkInFlightOnDone(t *testing.T) { + ch := make(chan Report) + a := NewAggregator(dummySinker{ch}) + + // Simulate upstream + go func(ch chan<- Report) { + time.Sleep(1 * time.Millisecond) + ch <- dummyReport{} + close(ch) + }(a.Sink()) + + // Drain downstream + go func(ch <-chan Report) { + <-ch + }(ch) + + // This should wait for upstream to complete + a.Done() + + _, ok := <-ch + if ok { + t.Errorf("Expected channel to be closed") + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/common_test.go b/vendor/github.com/vmware/govmomi/vim25/progress/common_test.go new file mode 100644 index 000000000..c0da5e418 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/common_test.go @@ -0,0 +1,43 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +type dummySinker struct { + ch chan Report +} + +func (d dummySinker) Sink() chan<- Report { + return d.ch +} + +type dummyReport struct { + p float32 + d string + e error +} + +func (p dummyReport) Percentage() float32 { + return p.p +} + +func (p dummyReport) Detail() string { + return p.d +} + +func (p dummyReport) Error() error { + return p.e +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/doc.go b/vendor/github.com/vmware/govmomi/vim25/progress/doc.go new file mode 100644 index 000000000..a0458dd5c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/doc.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +/* +The progress package contains functionality to deal with progress reporting. +The functionality is built to serve progress reporting for infrastructure +operations when talking the vSphere API, but is generic enough to be used +elsewhere. + +At the core of this progress reporting API lies the Sinker interface. This +interface is implemented by any object that can act as a sink for progress +reports. Callers of the Sink() function receives a send-only channel for +progress reports. They are responsible for closing the channel when done. +This semantic makes it easy to keep track of multiple progress report channels; +they are only created when Sink() is called and assumed closed when any +function that receives a Sinker parameter returns. +*/ diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/prefix.go b/vendor/github.com/vmware/govmomi/vim25/progress/prefix.go new file mode 100644 index 000000000..4f842ad95 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/prefix.go @@ -0,0 +1,54 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import "fmt" + +type prefixedReport struct { + Report + prefix string +} + +func (r prefixedReport) Detail() string { + if d := r.Report.Detail(); d != "" { + return fmt.Sprintf("%s: %s", r.prefix, d) + } + + return r.prefix +} + +func prefixLoop(upstream <-chan Report, downstream chan<- Report, prefix string) { + defer close(downstream) + + for r := range upstream { + downstream <- prefixedReport{ + Report: r, + prefix: prefix, + } + } +} + +func Prefix(s Sinker, prefix string) Sinker { + fn := func() chan<- Report { + upstream := make(chan Report) + downstream := s.Sink() + go prefixLoop(upstream, downstream, prefix) + return upstream + } + + return SinkFunc(fn) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/prefix_test.go b/vendor/github.com/vmware/govmomi/vim25/progress/prefix_test.go new file mode 100644 index 000000000..2438bfd79 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/prefix_test.go @@ -0,0 +1,40 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import "testing" + +func TestPrefix(t *testing.T) { + var r Report + + ch := make(chan Report, 1) + s := Prefix(dummySinker{ch}, "prefix").Sink() + + // No detail + s <- dummyReport{d: ""} + r = <-ch + if r.Detail() != "prefix" { + t.Errorf("Expected detail to be prefixed") + } + + // With detail + s <- dummyReport{d: "something"} + r = <-ch + if r.Detail() != "prefix: something" { + t.Errorf("Expected detail to be prefixed") + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/reader.go b/vendor/github.com/vmware/govmomi/vim25/progress/reader.go new file mode 100644 index 000000000..a981cb4e1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/reader.go @@ -0,0 +1,177 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import ( + "container/list" + "fmt" + "io" + "sync/atomic" + "time" +) + +type readerReport struct { + t time.Time + + pos int64 + size int64 + bps *uint64 + + err error +} + +func (r readerReport) Percentage() float32 { + return 100.0 * float32(r.pos) / float32(r.size) +} + +func (r readerReport) Detail() string { + const ( + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + ) + + // Use the reader's bps field, so this report returns an up-to-date number. + // + // For example: if there hasn't been progress for the last 5 seconds, the + // most recent report should return "0B/s". + // + bps := atomic.LoadUint64(r.bps) + + switch { + case bps >= GiB: + return fmt.Sprintf("%.1fGiB/s", float32(bps)/float32(GiB)) + case bps >= MiB: + return fmt.Sprintf("%.1fMiB/s", float32(bps)/float32(MiB)) + case bps >= KiB: + return fmt.Sprintf("%.1fKiB/s", float32(bps)/float32(KiB)) + default: + return fmt.Sprintf("%dB/s", bps) + } +} + +func (p readerReport) Error() error { + return p.err +} + +// reader wraps an io.Reader and sends a progress report over a channel for +// every read it handles. +type reader struct { + r io.Reader + + pos int64 + size int64 + + bps uint64 + + ch chan<- Report +} + +func NewReader(s Sinker, r io.Reader, size int64) *reader { + pr := reader{ + r: r, + + size: size, + } + + // Reports must be sent downstream and to the bps computation loop. + pr.ch = Tee(s, newBpsLoop(&pr.bps)).Sink() + + return &pr +} + +// Read calls the Read function on the underlying io.Reader. Additionally, +// every read causes a progress report to be sent to the progress reader's +// underlying channel. +func (r *reader) Read(b []byte) (int, error) { + n, err := r.r.Read(b) + if err != nil { + return n, err + } + + r.pos += int64(n) + q := readerReport{ + t: time.Now(), + pos: r.pos, + size: r.size, + bps: &r.bps, + } + + r.ch <- q + + return n, err +} + +// Done marks the progress reader as done, optionally including an error in the +// progress report. After sending it, the underlying channel is closed. +func (r *reader) Done(err error) { + q := readerReport{ + t: time.Now(), + pos: r.pos, + size: r.size, + bps: &r.bps, + err: err, + } + + r.ch <- q + close(r.ch) +} + +// newBpsLoop returns a sink that monitors and stores throughput. +func newBpsLoop(dst *uint64) SinkFunc { + fn := func() chan<- Report { + sink := make(chan Report) + go bpsLoop(sink, dst) + return sink + } + + return fn +} + +func bpsLoop(ch <-chan Report, dst *uint64) { + l := list.New() + + for { + var tch <-chan time.Time + + // Setup timer for front of list to become stale. + if e := l.Front(); e != nil { + dt := time.Second - time.Now().Sub(e.Value.(readerReport).t) + tch = time.After(dt) + } + + select { + case q, ok := <-ch: + if !ok { + return + } + + l.PushBack(q) + case <-tch: + l.Remove(l.Front()) + } + + // Compute new bps + if l.Len() == 0 { + atomic.StoreUint64(dst, 0) + } else { + f := l.Front().Value.(readerReport) + b := l.Back().Value.(readerReport) + atomic.StoreUint64(dst, uint64(b.pos-f.pos)) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/reader_test.go b/vendor/github.com/vmware/govmomi/vim25/progress/reader_test.go new file mode 100644 index 000000000..2d6058223 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/reader_test.go @@ -0,0 +1,90 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import ( + "io" + "strings" + "testing" +) + +func TestReader(t *testing.T) { + s := "helloworld" + ch := make(chan Report, 1) + pr := NewReader(&dummySinker{ch}, strings.NewReader(s), int64(len(s))) + + var buf [10]byte + var q Report + var n int + var err error + + // Read first byte + n, err = pr.Read(buf[0:1]) + if n != 1 { + t.Errorf("Expected n=1, but got: %d", n) + } + + if err != nil { + t.Errorf("Error: %s", err) + } + + q = <-ch + if q.Error() != nil { + t.Errorf("Error: %s", err) + } + + if f := q.Percentage(); f != 10.0 { + t.Errorf("Expected percentage after 1 byte to be 10%%, but got: %.0f%%", f) + } + + // Read remaining bytes + n, err = pr.Read(buf[:]) + if n != 9 { + t.Errorf("Expected n=1, but got: %d", n) + } + if err != nil { + t.Errorf("Error: %s", err) + } + + q = <-ch + if q.Error() != nil { + t.Errorf("Error: %s", err) + } + + if f := q.Percentage(); f != 100.0 { + t.Errorf("Expected percentage after 10 bytes to be 100%%, but got: %.0f%%", f) + } + + // Read EOF + _, err = pr.Read(buf[:]) + if err != io.EOF { + t.Errorf("Expected io.EOF, but got: %s", err) + } + + // Mark progress reader as done + pr.Done(io.EOF) + q = <-ch + if err != io.EOF { + t.Errorf("Expected io.EOF, but got: %s", err) + } + + // Progress channel should be closed after progress reader is marked done + _, ok := <-ch + if ok { + t.Errorf("Expected channel to be closed") + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/report.go b/vendor/github.com/vmware/govmomi/vim25/progress/report.go new file mode 100644 index 000000000..bf80263ff --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/report.go @@ -0,0 +1,26 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +// Report defines the interface for types that can deliver progress reports. +// Examples include uploads/downloads in the http client and the task info +// field in the task managed object. +type Report interface { + Percentage() float32 + Detail() string + Error() error +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/scale.go b/vendor/github.com/vmware/govmomi/vim25/progress/scale.go new file mode 100644 index 000000000..988083920 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/scale.go @@ -0,0 +1,76 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +type scaledReport struct { + Report + n int + i int +} + +func (r scaledReport) Percentage() float32 { + b := 100 * float32(r.i) / float32(r.n) + return b + (r.Report.Percentage() / float32(r.n)) +} + +type scaleOne struct { + s Sinker + n int + i int +} + +func (s scaleOne) Sink() chan<- Report { + upstream := make(chan Report) + downstream := s.s.Sink() + go s.loop(upstream, downstream) + return upstream +} + +func (s scaleOne) loop(upstream <-chan Report, downstream chan<- Report) { + defer close(downstream) + + for r := range upstream { + downstream <- scaledReport{ + Report: r, + n: s.n, + i: s.i, + } + } +} + +type scaleMany struct { + s Sinker + n int + i int +} + +func Scale(s Sinker, n int) Sinker { + return &scaleMany{ + s: s, + n: n, + } +} + +func (s *scaleMany) Sink() chan<- Report { + if s.i == s.n { + s.n++ + } + + ch := scaleOne{s: s.s, n: s.n, i: s.i}.Sink() + s.i++ + return ch +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/scale_test.go b/vendor/github.com/vmware/govmomi/vim25/progress/scale_test.go new file mode 100644 index 000000000..8b722e9a9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/scale_test.go @@ -0,0 +1,45 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import "testing" + +func TestScaleMany(t *testing.T) { + ch := make(chan Report) + a := NewAggregator(dummySinker{ch}) + defer a.Done() + + s := Scale(a, 5) + + go func() { + for i := 0; i < 5; i++ { + go func(ch chan<- Report) { + ch <- dummyReport{p: 0.0} + ch <- dummyReport{p: 50.0} + close(ch) + }(s.Sink()) + } + }() + + // Expect percentages to be scaled across sinks + for p := float32(0.0); p < 100.0; p += 10.0 { + r := <-ch + if r.Percentage() != p { + t.Errorf("Expected percentage to be: %.0f%%", p) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/sinker.go b/vendor/github.com/vmware/govmomi/vim25/progress/sinker.go new file mode 100644 index 000000000..0bd35a47f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/sinker.go @@ -0,0 +1,33 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +// Sinker defines what is expected of a type that can act as a sink for +// progress reports. The semantics are as follows. If you call Sink(), you are +// responsible for closing the returned channel. Closing this channel means +// that the related task is done, or resulted in error. +type Sinker interface { + Sink() chan<- Report +} + +// SinkFunc defines a function that returns a progress report channel. +type SinkFunc func() chan<- Report + +// Sink makes the SinkFunc implement the Sinker interface. +func (fn SinkFunc) Sink() chan<- Report { + return fn() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/tee.go b/vendor/github.com/vmware/govmomi/vim25/progress/tee.go new file mode 100644 index 000000000..ab4607842 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/tee.go @@ -0,0 +1,41 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +// Tee works like Unix tee; it forwards all progress reports it receives to the +// specified sinks +func Tee(s1, s2 Sinker) Sinker { + fn := func() chan<- Report { + d1 := s1.Sink() + d2 := s2.Sink() + u := make(chan Report) + go tee(u, d1, d2) + return u + } + + return SinkFunc(fn) +} + +func tee(u <-chan Report, d1, d2 chan<- Report) { + defer close(d1) + defer close(d2) + + for r := range u { + d1 <- r + d2 <- r + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/tee_test.go b/vendor/github.com/vmware/govmomi/vim25/progress/tee_test.go new file mode 100644 index 000000000..350f6f1cb --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/tee_test.go @@ -0,0 +1,46 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import "testing" + +func TestTee(t *testing.T) { + var ok bool + + ch1 := make(chan Report) + ch2 := make(chan Report) + + s := Tee(&dummySinker{ch: ch1}, &dummySinker{ch: ch2}) + + in := s.Sink() + in <- dummyReport{} + close(in) + + // Receive dummy on both sinks + <-ch1 + <-ch2 + + _, ok = <-ch1 + if ok { + t.Errorf("Expected channel to be closed") + } + + _, ok = <-ch2 + if ok { + t.Errorf("Expected channel to be closed") + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/retry.go b/vendor/github.com/vmware/govmomi/vim25/retry.go new file mode 100644 index 000000000..b5868e179 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/retry.go @@ -0,0 +1,105 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vim25 + +import ( + "net" + "net/url" + "time" + + "github.com/vmware/govmomi/vim25/soap" + "golang.org/x/net/context" +) + +type RetryFunc func(err error) (retry bool, delay time.Duration) + +// TemporaryNetworkError returns a RetryFunc that retries up to a maximum of n +// times, only if the error returned by the RoundTrip function is a temporary +// network error (for example: a connect timeout). +func TemporaryNetworkError(n int) RetryFunc { + return func(err error) (retry bool, delay time.Duration) { + var nerr net.Error + var ok bool + + // Never retry if this is not a network error. + switch rerr := err.(type) { + case *url.Error: + if nerr, ok = rerr.Err.(net.Error); !ok { + return false, 0 + } + case net.Error: + nerr = rerr + default: + return false, 0 + } + + if !nerr.Temporary() { + return false, 0 + } + + // Don't retry if we're out of tries. + if n--; n <= 0 { + return false, 0 + } + + return true, 0 + } +} + +type retry struct { + roundTripper soap.RoundTripper + + // fn is a custom function that is called when an error occurs. + // It returns whether or not to retry, and if so, how long to + // delay before retrying. + fn RetryFunc +} + +// Retry wraps the specified soap.RoundTripper and invokes the +// specified RetryFunc. The RetryFunc returns whether or not to +// retry the call, and if so, how long to wait before retrying. If +// the result of this function is to not retry, the original error +// is returned from the RoundTrip function. +func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { + r := &retry{ + roundTripper: roundTripper, + fn: fn, + } + + return r +} + +func (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + var err error + + for { + err = r.roundTripper.RoundTrip(ctx, req, res) + if err == nil { + break + } + + // Invoke retry function to see if another attempt should be made. + if retry, delay := r.fn(err); retry { + time.Sleep(delay) + continue + } + + break + } + + return err +} diff --git a/vendor/github.com/vmware/govmomi/vim25/retry_test.go b/vendor/github.com/vmware/govmomi/vim25/retry_test.go new file mode 100644 index 000000000..14a636dc8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/retry_test.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vim25 + +import ( + "testing" + + "github.com/vmware/govmomi/vim25/soap" + "golang.org/x/net/context" +) + +type tempError struct{} + +func (tempError) Error() string { return "tempError" } +func (tempError) Timeout() bool { return true } +func (tempError) Temporary() bool { return true } + +type nonTempError struct{} + +func (nonTempError) Error() string { return "nonTempError" } +func (nonTempError) Timeout() bool { return false } +func (nonTempError) Temporary() bool { return false } + +type fakeRoundTripper struct { + errs []error +} + +func (f *fakeRoundTripper) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + err := f.errs[0] + f.errs = f.errs[1:] + return err +} + +func TestRetry(t *testing.T) { + var tcs = []struct { + errs []error + expected error + }{ + { + errs: []error{nil}, + expected: nil, + }, + { + errs: []error{tempError{}, nil}, + expected: nil, + }, + { + errs: []error{tempError{}, tempError{}}, + expected: tempError{}, + }, + { + errs: []error{nonTempError{}}, + expected: nonTempError{}, + }, + { + errs: []error{tempError{}, nonTempError{}}, + expected: nonTempError{}, + }, + } + + for _, tc := range tcs { + var rt soap.RoundTripper + + rt = &fakeRoundTripper{errs: tc.errs} + rt = Retry(rt, TemporaryNetworkError(2)) + + err := rt.RoundTrip(nil, nil, nil) + if err != tc.expected { + t.Errorf("Expected: %s, got: %s", tc.expected, err) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/vendor/github.com/vmware/govmomi/vim25/soap/client.go new file mode 100644 index 000000000..74aed0bef --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -0,0 +1,484 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "io" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "regexp" + "strings" + "time" + + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" + "golang.org/x/net/context" +) + +type HasFault interface { + Fault() *Fault +} + +type RoundTripper interface { + RoundTrip(ctx context.Context, req, res HasFault) error +} + +type Client struct { + http.Client + + u *url.URL + k bool // Named after curl's -k flag + d *debugContainer + t *http.Transport + p *url.URL +} + +var schemeMatch = regexp.MustCompile(`^\w+://`) + +// ParseURL is wrapper around url.Parse, where Scheme defaults to "https" and Path defaults to "/sdk" +func ParseURL(s string) (*url.URL, error) { + var err error + var u *url.URL + + if s != "" { + // Default the scheme to https + if !schemeMatch.MatchString(s) { + s = "https://" + s + } + + u, err = url.Parse(s) + if err != nil { + return nil, err + } + + // Default the path to /sdk + if u.Path == "" { + u.Path = "/sdk" + } + + if u.User == nil { + u.User = url.UserPassword("", "") + } + } + + return u, nil +} + +func NewClient(u *url.URL, insecure bool) *Client { + c := Client{ + u: u, + k: insecure, + d: newDebug(), + } + + // Initialize http.RoundTripper on client, so we can customize it below + c.t = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + } + + if c.u.Scheme == "https" { + c.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: c.k} + c.t.TLSHandshakeTimeout = 10 * time.Second + } + + c.Client.Transport = c.t + c.Client.Jar, _ = cookiejar.New(nil) + + // Remove user information from a copy of the URL + c.u = c.URL() + c.u.User = nil + + return &c +} + +// splitHostPort is similar to net.SplitHostPort, +// but rather than return error if there isn't a ':port', +// return an empty string for the port. +func splitHostPort(host string) (string, string) { + ix := strings.LastIndex(host, ":") + + if ix <= strings.LastIndex(host, "]") { + return host, "" + } + + name := host[:ix] + port := host[ix+1:] + + return name, port +} + +const sdkTunnel = "sdkTunnel:8089" + +func (c *Client) SetCertificate(cert tls.Certificate) { + t := c.Client.Transport.(*http.Transport) + + // Extension certificate + t.TLSClientConfig.Certificates = []tls.Certificate{cert} + + // Proxy to vCenter host on port 80 + host, _ := splitHostPort(c.u.Host) + + // Should be no reason to change the default port other than testing + port := os.Getenv("GOVC_TUNNEL_PROXY_PORT") + if port != "" { + host += ":" + port + } + + c.p = &url.URL{ + Scheme: "http", + Host: host, + } + t.Proxy = func(r *http.Request) (*url.URL, error) { + // Only sdk requests should be proxied + if r.URL.Path == "/sdk" { + return c.p, nil + } + return http.ProxyFromEnvironment(r) + } + + // Rewrite url Host to use the sdk tunnel, required for a certificate request. + c.u.Host = sdkTunnel +} + +func (c *Client) URL() *url.URL { + urlCopy := *c.u + return &urlCopy +} + +type marshaledClient struct { + Cookies []*http.Cookie + URL *url.URL + Insecure bool +} + +func (c *Client) MarshalJSON() ([]byte, error) { + m := marshaledClient{ + Cookies: c.Jar.Cookies(c.u), + URL: c.u, + Insecure: c.k, + } + + return json.Marshal(m) +} + +func (c *Client) UnmarshalJSON(b []byte) error { + var m marshaledClient + + err := json.Unmarshal(b, &m) + if err != nil { + return err + } + + *c = *NewClient(m.URL, m.Insecure) + c.Jar.SetCookies(m.URL, m.Cookies) + + return nil +} + +func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) { + if nil == ctx || nil == ctx.Done() { // ctx.Done() is for context.TODO() + return c.Client.Do(req) + } + + var resc = make(chan *http.Response, 1) + var errc = make(chan error, 1) + + // Perform request from separate routine. + go func() { + res, err := c.Client.Do(req) + if err != nil { + errc <- err + } else { + resc <- res + } + }() + + // Wait for request completion of context expiry. + select { + case <-ctx.Done(): + c.t.CancelRequest(req) + return nil, ctx.Err() + case err := <-errc: + return nil, err + case res := <-resc: + return res, nil + } +} + +func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error { + var err error + + reqEnv := Envelope{Body: reqBody} + resEnv := Envelope{Body: resBody} + + // Create debugging context for this round trip + d := c.d.newRoundTrip() + if d.enabled() { + defer d.done() + } + + b, err := xml.Marshal(reqEnv) + if err != nil { + panic(err) + } + + rawReqBody := io.MultiReader(strings.NewReader(xml.Header), bytes.NewReader(b)) + req, err := http.NewRequest("POST", c.u.String(), rawReqBody) + if err != nil { + panic(err) + } + + req.Header.Set(`Content-Type`, `text/xml; charset="utf-8"`) + req.Header.Set(`SOAPAction`, `urn:vim25/6.0`) + + if d.enabled() { + d.debugRequest(req) + } + + tstart := time.Now() + res, err := c.do(ctx, req) + tstop := time.Now() + + if d.enabled() { + d.logf("%6dms (%T)", tstop.Sub(tstart)/time.Millisecond, resBody) + } + + if err != nil { + return err + } + + if d.enabled() { + d.debugResponse(res) + } + + // Close response regardless of what happens next + defer res.Body.Close() + + switch res.StatusCode { + case http.StatusOK: + // OK + case http.StatusInternalServerError: + // Error, but typically includes a body explaining the error + default: + return errors.New(res.Status) + } + + dec := xml.NewDecoder(res.Body) + dec.TypeFunc = types.TypeFunc() + err = dec.Decode(&resEnv) + if err != nil { + return err + } + + if f := resBody.Fault(); f != nil { + return WrapSoapFault(f) + } + + return err +} + +func (c *Client) CloseIdleConnections() { + c.t.CloseIdleConnections() +} + +// ParseURL wraps url.Parse to rewrite the URL.Host field +// In the case of VM guest uploads or NFC lease URLs, a Host +// field with a value of "*" is rewritten to the Client's URL.Host. +func (c *Client) ParseURL(urlStr string) (*url.URL, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + host, _ := splitHostPort(u.Host) + if host == "*" { + // Also use Client's port, to support port forwarding + u.Host = c.URL().Host + } + + return u, nil +} + +type Upload struct { + Type string + Method string + ContentLength int64 + Headers map[string]string + Ticket *http.Cookie + Progress progress.Sinker +} + +var DefaultUpload = Upload{ + Type: "application/octet-stream", + Method: "PUT", +} + +// Upload PUTs the local file to the given URL +func (c *Client) Upload(f io.Reader, u *url.URL, param *Upload) error { + var err error + + if param.Progress != nil { + pr := progress.NewReader(param.Progress, f, param.ContentLength) + f = pr + + // Mark progress reader as done when returning from this function. + defer func() { + pr.Done(err) + }() + } + + req, err := http.NewRequest(param.Method, u.String(), f) + if err != nil { + return err + } + + req.ContentLength = param.ContentLength + req.Header.Set("Content-Type", param.Type) + + for k, v := range param.Headers { + req.Header.Add(k, v) + } + + if param.Ticket != nil { + req.AddCookie(param.Ticket) + } + + res, err := c.Client.Do(req) + if err != nil { + return err + } + + switch res.StatusCode { + case http.StatusOK: + case http.StatusCreated: + default: + err = errors.New(res.Status) + } + + return err +} + +// UploadFile PUTs the local file to the given URL +func (c *Client) UploadFile(file string, u *url.URL, param *Upload) error { + if param == nil { + p := DefaultUpload // Copy since we set ContentLength + param = &p + } + + s, err := os.Stat(file) + if err != nil { + return err + } + + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + param.ContentLength = s.Size() + + return c.Upload(f, u, param) +} + +type Download struct { + Method string + Ticket *http.Cookie + Progress progress.Sinker +} + +var DefaultDownload = Download{ + Method: "GET", +} + +// DownloadFile GETs the given URL to a local file +func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error { + var err error + + if param == nil { + param = &DefaultDownload + } + + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + + req, err := http.NewRequest(param.Method, u.String(), nil) + if err != nil { + return err + } + + if param.Ticket != nil { + req.AddCookie(param.Ticket) + } + + res, err := c.Client.Do(req) + if err != nil { + return err + } + + defer res.Body.Close() + + switch res.StatusCode { + case http.StatusOK: + default: + err = errors.New(res.Status) + } + + if err != nil { + return err + } + + var r io.Reader = res.Body + if param.Progress != nil { + pr := progress.NewReader(param.Progress, res.Body, res.ContentLength) + r = pr + + // Mark progress reader as done when returning from this function. + defer func() { + pr.Done(err) + }() + } + + _, err = io.Copy(fh, r) + if err != nil { + return err + } + + // Assign error before returning so that it gets picked up by the deferred + // function marking the progress reader as done. + err = fh.Close() + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/client_test.go b/vendor/github.com/vmware/govmomi/vim25/soap/client_test.go new file mode 100644 index 000000000..0cf4729dd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/client_test.go @@ -0,0 +1,43 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import "testing" + +func TestSplitHostPort(t *testing.T) { + tests := []struct { + url string + host string + port string + }{ + {"127.0.0.1", "127.0.0.1", ""}, + {"*:1234", "*", "1234"}, + {"127.0.0.1:80", "127.0.0.1", "80"}, + {"[::1]:6767", "[::1]", "6767"}, + {"[::1]", "[::1]", ""}, + } + + for _, test := range tests { + host, port := splitHostPort(test.url) + if host != test.host { + t.Errorf("(%s) %s != %s", test.url, host, test.host) + } + if port != test.port { + t.Errorf("(%s) %s != %s", test.url, port, test.port) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/debug.go b/vendor/github.com/vmware/govmomi/vim25/soap/debug.go new file mode 100644 index 000000000..63518abca --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/debug.go @@ -0,0 +1,149 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "fmt" + "io" + "net/http" + "net/http/httputil" + "sync/atomic" + "time" + + "github.com/vmware/govmomi/vim25/debug" +) + +// teeReader wraps io.TeeReader and patches through the Close() function. +type teeReader struct { + io.Reader + io.Closer +} + +func newTeeReader(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return teeReader{ + Reader: io.TeeReader(rc, w), + Closer: rc, + } +} + +// debugRoundTrip contains state and logic needed to debug a single round trip. +type debugRoundTrip struct { + cn uint64 // Client number + rn uint64 // Request number + log io.WriteCloser // Request log + cs []io.Closer // Files that need closing when done +} + +func (d *debugRoundTrip) logf(format string, a ...interface{}) { + now := time.Now().Format("2006-01-02T15-04-05.000000000") + fmt.Fprintf(d.log, "%s - %04d: ", now, d.rn) + fmt.Fprintf(d.log, format, a...) + fmt.Fprintf(d.log, "\n") +} + +func (d *debugRoundTrip) enabled() bool { + return d != nil +} + +func (d *debugRoundTrip) done() { + for _, c := range d.cs { + c.Close() + } +} + +func (d *debugRoundTrip) newFile(suffix string) io.WriteCloser { + return debug.NewFile(fmt.Sprintf("%d-%04d.%s", d.cn, d.rn, suffix)) +} + +func (d *debugRoundTrip) debugRequest(req *http.Request) { + if d == nil { + return + } + + var wc io.WriteCloser + + // Capture headers + wc = d.newFile("req.headers") + b, _ := httputil.DumpRequest(req, false) + wc.Write(b) + wc.Close() + + // Capture body + wc = d.newFile("req.xml") + req.Body = newTeeReader(req.Body, wc) + + // Delay closing until marked done + d.cs = append(d.cs, wc) +} + +func (d *debugRoundTrip) debugResponse(res *http.Response) { + if d == nil { + return + } + + var wc io.WriteCloser + + // Capture headers + wc = d.newFile("res.headers") + b, _ := httputil.DumpResponse(res, false) + wc.Write(b) + wc.Close() + + // Capture body + wc = d.newFile("res.xml") + res.Body = newTeeReader(res.Body, wc) + + // Delay closing until marked done + d.cs = append(d.cs, wc) +} + +var cn uint64 // Client counter + +// debugContainer wraps the debugging state for a single client. +type debugContainer struct { + cn uint64 // Client number + rn uint64 // Request counter + log io.WriteCloser // Request log +} + +func newDebug() *debugContainer { + d := debugContainer{ + cn: atomic.AddUint64(&cn, 1), + rn: 0, + } + + if !debug.Enabled() { + return nil + } + + d.log = debug.NewFile(fmt.Sprintf("%d-client.log", d.cn)) + return &d +} + +func (d *debugContainer) newRoundTrip() *debugRoundTrip { + if d == nil { + return nil + } + + drt := debugRoundTrip{ + cn: d.cn, + rn: atomic.AddUint64(&d.rn, 1), + log: d.log, + } + + return &drt +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/error.go b/vendor/github.com/vmware/govmomi/vim25/soap/error.go new file mode 100644 index 000000000..0408e7bf0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/error.go @@ -0,0 +1,109 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "fmt" + "reflect" + + "github.com/vmware/govmomi/vim25/types" +) + +type regularError struct { + err error +} + +func (r regularError) Error() string { + return r.err.Error() +} + +type soapFaultError struct { + fault *Fault +} + +func (s soapFaultError) Error() string { + return fmt.Sprintf("%s: %s", s.fault.Code, s.fault.String) +} + +type vimFaultError struct { + fault types.BaseMethodFault +} + +func (v vimFaultError) Error() string { + typ := reflect.TypeOf(v.fault) + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + return typ.Name() +} + +func (v vimFaultError) Fault() types.BaseMethodFault { + return v.fault +} + +func Wrap(err error) error { + switch err.(type) { + case regularError: + return err + case soapFaultError: + return err + case vimFaultError: + return err + } + + return WrapRegularError(err) +} + +func WrapRegularError(err error) error { + return regularError{err} +} + +func IsRegularError(err error) bool { + _, ok := err.(regularError) + return ok +} + +func ToRegularError(err error) error { + return err.(regularError).err +} + +func WrapSoapFault(f *Fault) error { + return soapFaultError{f} +} + +func IsSoapFault(err error) bool { + _, ok := err.(soapFaultError) + return ok +} + +func ToSoapFault(err error) *Fault { + return err.(soapFaultError).fault +} + +func WrapVimFault(v types.BaseMethodFault) error { + return vimFaultError{v} +} + +func IsVimFault(err error) bool { + _, ok := err.(vimFaultError) + return ok +} + +func ToVimFault(err error) types.BaseMethodFault { + return err.(vimFaultError).fault +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/soap.go b/vendor/github.com/vmware/govmomi/vim25/soap/soap.go new file mode 100644 index 000000000..e3b1a97dc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/soap.go @@ -0,0 +1,45 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" +) + +type Envelope struct { + XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` + Header *Header `xml:",omitempty"` + Body interface{} +} + +type Header struct { + XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Header"` +} + +type Fault struct { + XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault"` + Code string `xml:"faultcode"` + String string `xml:"faultstring"` + Detail struct { + Fault types.AnyType `xml:",any"` + } `xml:"detail"` +} + +func (f *Fault) VimFault() types.AnyType { + return f.Detail.Fault +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/soap_test.go b/vendor/github.com/vmware/govmomi/vim25/soap/soap_test.go new file mode 100644 index 000000000..d90a17d19 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/soap_test.go @@ -0,0 +1,55 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "testing" + + "github.com/vmware/govmomi/vim25/xml" +) + +func TestEmptyEnvelope(t *testing.T) { + env := Envelope{} + + b, err := xml.Marshal(env) + if err != nil { + t.Errorf("error: %s", err) + return + } + + expected := `` + actual := string(b) + if expected != actual { + t.Fatalf("expected: %s, actual: %s", expected, actual) + } +} + +func TestEmptyHeader(t *testing.T) { + h := Header{} + + b, err := xml.Marshal(h) + if err != nil { + t.Errorf("error: %s", err) + return + } + + expected := `
` + actual := string(b) + if expected != actual { + t.Fatalf("expected: %s, actual: %s", expected, actual) + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/base.go b/vendor/github.com/vmware/govmomi/vim25/types/base.go new file mode 100644 index 000000000..3bb12b741 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/base.go @@ -0,0 +1,19 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +type AnyType interface{} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/base_test.go b/vendor/github.com/vmware/govmomi/vim25/types/base_test.go new file mode 100644 index 000000000..ab8b1de73 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/base_test.go @@ -0,0 +1,65 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "bytes" + "reflect" + "testing" + + "github.com/vmware/govmomi/vim25/xml" +) + +func TestAnyType(t *testing.T) { + x := func(s string) []byte { + s = `` + s + s += `` + return []byte(s) + } + + tests := []struct { + Input []byte + Value interface{} + }{ + { + Input: x(`test`), + Value: "test", + }, + { + Input: x(`AABB`), + Value: ArrayOfString{String: []string{"AA", "BB"}}, + }, + } + + for _, test := range tests { + var r struct { + A interface{} `xml:"name,typeattr"` + } + + dec := xml.NewDecoder(bytes.NewReader(test.Input)) + dec.TypeFunc = TypeFunc() + + err := dec.Decode(&r) + if err != nil { + t.Fatalf("Decode: %s", err) + } + + if !reflect.DeepEqual(r.A, test.Value) { + t.Errorf("Expected: %#v, actual: %#v", r.A, test.Value) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/enum.go b/vendor/github.com/vmware/govmomi/vim25/types/enum.go new file mode 100644 index 000000000..d63a1d54b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/enum.go @@ -0,0 +1,4097 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "reflect" + +type ActionParameter string + +const ( + ActionParameterTargetName = ActionParameter("targetName") + ActionParameterAlarmName = ActionParameter("alarmName") + ActionParameterOldStatus = ActionParameter("oldStatus") + ActionParameterNewStatus = ActionParameter("newStatus") + ActionParameterTriggeringSummary = ActionParameter("triggeringSummary") + ActionParameterDeclaringSummary = ActionParameter("declaringSummary") + ActionParameterEventDescription = ActionParameter("eventDescription") + ActionParameterTarget = ActionParameter("target") + ActionParameterAlarm = ActionParameter("alarm") +) + +func init() { + t["ActionParameter"] = reflect.TypeOf((*ActionParameter)(nil)).Elem() +} + +type ActionType string + +const ( + ActionTypeMigrationV1 = ActionType("MigrationV1") + ActionTypeVmPowerV1 = ActionType("VmPowerV1") + ActionTypeHostPowerV1 = ActionType("HostPowerV1") + ActionTypeHostMaintenanceV1 = ActionType("HostMaintenanceV1") + ActionTypeStorageMigrationV1 = ActionType("StorageMigrationV1") + ActionTypeStoragePlacementV1 = ActionType("StoragePlacementV1") + ActionTypePlacementV1 = ActionType("PlacementV1") +) + +func init() { + t["ActionType"] = reflect.TypeOf((*ActionType)(nil)).Elem() +} + +type AffinityType string + +const ( + AffinityTypeMemory = AffinityType("memory") + AffinityTypeCpu = AffinityType("cpu") +) + +func init() { + t["AffinityType"] = reflect.TypeOf((*AffinityType)(nil)).Elem() +} + +type AgentInstallFailedReason string + +const ( + AgentInstallFailedReasonNotEnoughSpaceOnDevice = AgentInstallFailedReason("NotEnoughSpaceOnDevice") + AgentInstallFailedReasonPrepareToUpgradeFailed = AgentInstallFailedReason("PrepareToUpgradeFailed") + AgentInstallFailedReasonAgentNotRunning = AgentInstallFailedReason("AgentNotRunning") + AgentInstallFailedReasonAgentNotReachable = AgentInstallFailedReason("AgentNotReachable") + AgentInstallFailedReasonInstallTimedout = AgentInstallFailedReason("InstallTimedout") + AgentInstallFailedReasonSignatureVerificationFailed = AgentInstallFailedReason("SignatureVerificationFailed") + AgentInstallFailedReasonAgentUploadFailed = AgentInstallFailedReason("AgentUploadFailed") + AgentInstallFailedReasonAgentUploadTimedout = AgentInstallFailedReason("AgentUploadTimedout") + AgentInstallFailedReasonUnknownInstallerError = AgentInstallFailedReason("UnknownInstallerError") +) + +func init() { + t["AgentInstallFailedReason"] = reflect.TypeOf((*AgentInstallFailedReason)(nil)).Elem() +} + +type ArrayUpdateOperation string + +const ( + ArrayUpdateOperationAdd = ArrayUpdateOperation("add") + ArrayUpdateOperationRemove = ArrayUpdateOperation("remove") + ArrayUpdateOperationEdit = ArrayUpdateOperation("edit") +) + +func init() { + t["ArrayUpdateOperation"] = reflect.TypeOf((*ArrayUpdateOperation)(nil)).Elem() +} + +type AutoStartAction string + +const ( + AutoStartActionNone = AutoStartAction("none") + AutoStartActionSystemDefault = AutoStartAction("systemDefault") + AutoStartActionPowerOn = AutoStartAction("powerOn") + AutoStartActionPowerOff = AutoStartAction("powerOff") + AutoStartActionGuestShutdown = AutoStartAction("guestShutdown") + AutoStartActionSuspend = AutoStartAction("suspend") +) + +func init() { + t["AutoStartAction"] = reflect.TypeOf((*AutoStartAction)(nil)).Elem() +} + +type AutoStartWaitHeartbeatSetting string + +const ( + AutoStartWaitHeartbeatSettingYes = AutoStartWaitHeartbeatSetting("yes") + AutoStartWaitHeartbeatSettingNo = AutoStartWaitHeartbeatSetting("no") + AutoStartWaitHeartbeatSettingSystemDefault = AutoStartWaitHeartbeatSetting("systemDefault") +) + +func init() { + t["AutoStartWaitHeartbeatSetting"] = reflect.TypeOf((*AutoStartWaitHeartbeatSetting)(nil)).Elem() +} + +type BatchResultResult string + +const ( + BatchResultResultSuccess = BatchResultResult("success") + BatchResultResultFail = BatchResultResult("fail") +) + +func init() { + t["BatchResultResult"] = reflect.TypeOf((*BatchResultResult)(nil)).Elem() +} + +type CannotEnableVmcpForClusterReason string + +const ( + CannotEnableVmcpForClusterReasonAPDTimeoutDisabled = CannotEnableVmcpForClusterReason("APDTimeoutDisabled") + CannotEnableVmcpForClusterReasonIncompatibleHostVersion = CannotEnableVmcpForClusterReason("IncompatibleHostVersion") +) + +func init() { + t["CannotEnableVmcpForClusterReason"] = reflect.TypeOf((*CannotEnableVmcpForClusterReason)(nil)).Elem() +} + +type CannotMoveFaultToleranceVmMoveType string + +const ( + CannotMoveFaultToleranceVmMoveTypeResourcePool = CannotMoveFaultToleranceVmMoveType("resourcePool") + CannotMoveFaultToleranceVmMoveTypeCluster = CannotMoveFaultToleranceVmMoveType("cluster") +) + +func init() { + t["CannotMoveFaultToleranceVmMoveType"] = reflect.TypeOf((*CannotMoveFaultToleranceVmMoveType)(nil)).Elem() +} + +type CannotPowerOffVmInClusterOperation string + +const ( + CannotPowerOffVmInClusterOperationSuspend = CannotPowerOffVmInClusterOperation("suspend") + CannotPowerOffVmInClusterOperationPowerOff = CannotPowerOffVmInClusterOperation("powerOff") + CannotPowerOffVmInClusterOperationGuestShutdown = CannotPowerOffVmInClusterOperation("guestShutdown") + CannotPowerOffVmInClusterOperationGuestSuspend = CannotPowerOffVmInClusterOperation("guestSuspend") +) + +func init() { + t["CannotPowerOffVmInClusterOperation"] = reflect.TypeOf((*CannotPowerOffVmInClusterOperation)(nil)).Elem() +} + +type CannotUseNetworkReason string + +const ( + CannotUseNetworkReasonNetworkReservationNotSupported = CannotUseNetworkReason("NetworkReservationNotSupported") + CannotUseNetworkReasonMismatchedNetworkPolicies = CannotUseNetworkReason("MismatchedNetworkPolicies") + CannotUseNetworkReasonMismatchedDvsVersionOrVendor = CannotUseNetworkReason("MismatchedDvsVersionOrVendor") + CannotUseNetworkReasonVMotionToUnsupportedNetworkType = CannotUseNetworkReason("VMotionToUnsupportedNetworkType") +) + +func init() { + t["CannotUseNetworkReason"] = reflect.TypeOf((*CannotUseNetworkReason)(nil)).Elem() +} + +type CheckTestType string + +const ( + CheckTestTypeSourceTests = CheckTestType("sourceTests") + CheckTestTypeHostTests = CheckTestType("hostTests") + CheckTestTypeResourcePoolTests = CheckTestType("resourcePoolTests") + CheckTestTypeDatastoreTests = CheckTestType("datastoreTests") + CheckTestTypeNetworkTests = CheckTestType("networkTests") +) + +func init() { + t["CheckTestType"] = reflect.TypeOf((*CheckTestType)(nil)).Elem() +} + +type ClusterDasAamNodeStateDasState string + +const ( + ClusterDasAamNodeStateDasStateUninitialized = ClusterDasAamNodeStateDasState("uninitialized") + ClusterDasAamNodeStateDasStateInitialized = ClusterDasAamNodeStateDasState("initialized") + ClusterDasAamNodeStateDasStateConfiguring = ClusterDasAamNodeStateDasState("configuring") + ClusterDasAamNodeStateDasStateUnconfiguring = ClusterDasAamNodeStateDasState("unconfiguring") + ClusterDasAamNodeStateDasStateRunning = ClusterDasAamNodeStateDasState("running") + ClusterDasAamNodeStateDasStateError = ClusterDasAamNodeStateDasState("error") + ClusterDasAamNodeStateDasStateAgentShutdown = ClusterDasAamNodeStateDasState("agentShutdown") + ClusterDasAamNodeStateDasStateNodeFailed = ClusterDasAamNodeStateDasState("nodeFailed") +) + +func init() { + t["ClusterDasAamNodeStateDasState"] = reflect.TypeOf((*ClusterDasAamNodeStateDasState)(nil)).Elem() +} + +type ClusterDasConfigInfoHBDatastoreCandidate string + +const ( + ClusterDasConfigInfoHBDatastoreCandidateUserSelectedDs = ClusterDasConfigInfoHBDatastoreCandidate("userSelectedDs") + ClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDs = ClusterDasConfigInfoHBDatastoreCandidate("allFeasibleDs") + ClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDsWithUserPreference = ClusterDasConfigInfoHBDatastoreCandidate("allFeasibleDsWithUserPreference") +) + +func init() { + t["ClusterDasConfigInfoHBDatastoreCandidate"] = reflect.TypeOf((*ClusterDasConfigInfoHBDatastoreCandidate)(nil)).Elem() +} + +type ClusterDasConfigInfoServiceState string + +const ( + ClusterDasConfigInfoServiceStateDisabled = ClusterDasConfigInfoServiceState("disabled") + ClusterDasConfigInfoServiceStateEnabled = ClusterDasConfigInfoServiceState("enabled") +) + +func init() { + t["ClusterDasConfigInfoServiceState"] = reflect.TypeOf((*ClusterDasConfigInfoServiceState)(nil)).Elem() +} + +type ClusterDasConfigInfoVmMonitoringState string + +const ( + ClusterDasConfigInfoVmMonitoringStateVmMonitoringDisabled = ClusterDasConfigInfoVmMonitoringState("vmMonitoringDisabled") + ClusterDasConfigInfoVmMonitoringStateVmMonitoringOnly = ClusterDasConfigInfoVmMonitoringState("vmMonitoringOnly") + ClusterDasConfigInfoVmMonitoringStateVmAndAppMonitoring = ClusterDasConfigInfoVmMonitoringState("vmAndAppMonitoring") +) + +func init() { + t["ClusterDasConfigInfoVmMonitoringState"] = reflect.TypeOf((*ClusterDasConfigInfoVmMonitoringState)(nil)).Elem() +} + +type ClusterDasFdmAvailabilityState string + +const ( + ClusterDasFdmAvailabilityStateUninitialized = ClusterDasFdmAvailabilityState("uninitialized") + ClusterDasFdmAvailabilityStateElection = ClusterDasFdmAvailabilityState("election") + ClusterDasFdmAvailabilityStateMaster = ClusterDasFdmAvailabilityState("master") + ClusterDasFdmAvailabilityStateConnectedToMaster = ClusterDasFdmAvailabilityState("connectedToMaster") + ClusterDasFdmAvailabilityStateNetworkPartitionedFromMaster = ClusterDasFdmAvailabilityState("networkPartitionedFromMaster") + ClusterDasFdmAvailabilityStateNetworkIsolated = ClusterDasFdmAvailabilityState("networkIsolated") + ClusterDasFdmAvailabilityStateHostDown = ClusterDasFdmAvailabilityState("hostDown") + ClusterDasFdmAvailabilityStateInitializationError = ClusterDasFdmAvailabilityState("initializationError") + ClusterDasFdmAvailabilityStateUninitializationError = ClusterDasFdmAvailabilityState("uninitializationError") + ClusterDasFdmAvailabilityStateFdmUnreachable = ClusterDasFdmAvailabilityState("fdmUnreachable") +) + +func init() { + t["ClusterDasFdmAvailabilityState"] = reflect.TypeOf((*ClusterDasFdmAvailabilityState)(nil)).Elem() +} + +type ClusterDasVmSettingsIsolationResponse string + +const ( + ClusterDasVmSettingsIsolationResponseNone = ClusterDasVmSettingsIsolationResponse("none") + ClusterDasVmSettingsIsolationResponsePowerOff = ClusterDasVmSettingsIsolationResponse("powerOff") + ClusterDasVmSettingsIsolationResponseShutdown = ClusterDasVmSettingsIsolationResponse("shutdown") + ClusterDasVmSettingsIsolationResponseClusterIsolationResponse = ClusterDasVmSettingsIsolationResponse("clusterIsolationResponse") +) + +func init() { + t["ClusterDasVmSettingsIsolationResponse"] = reflect.TypeOf((*ClusterDasVmSettingsIsolationResponse)(nil)).Elem() +} + +type ClusterDasVmSettingsRestartPriority string + +const ( + ClusterDasVmSettingsRestartPriorityDisabled = ClusterDasVmSettingsRestartPriority("disabled") + ClusterDasVmSettingsRestartPriorityLow = ClusterDasVmSettingsRestartPriority("low") + ClusterDasVmSettingsRestartPriorityMedium = ClusterDasVmSettingsRestartPriority("medium") + ClusterDasVmSettingsRestartPriorityHigh = ClusterDasVmSettingsRestartPriority("high") + ClusterDasVmSettingsRestartPriorityClusterRestartPriority = ClusterDasVmSettingsRestartPriority("clusterRestartPriority") +) + +func init() { + t["ClusterDasVmSettingsRestartPriority"] = reflect.TypeOf((*ClusterDasVmSettingsRestartPriority)(nil)).Elem() +} + +type ClusterPowerOnVmOption string + +const ( + ClusterPowerOnVmOptionOverrideAutomationLevel = ClusterPowerOnVmOption("OverrideAutomationLevel") + ClusterPowerOnVmOptionReserveResources = ClusterPowerOnVmOption("ReserveResources") +) + +func init() { + t["ClusterPowerOnVmOption"] = reflect.TypeOf((*ClusterPowerOnVmOption)(nil)).Elem() +} + +type ClusterProfileServiceType string + +const ( + ClusterProfileServiceTypeDRS = ClusterProfileServiceType("DRS") + ClusterProfileServiceTypeHA = ClusterProfileServiceType("HA") + ClusterProfileServiceTypeDPM = ClusterProfileServiceType("DPM") + ClusterProfileServiceTypeFT = ClusterProfileServiceType("FT") +) + +func init() { + t["ClusterProfileServiceType"] = reflect.TypeOf((*ClusterProfileServiceType)(nil)).Elem() +} + +type ClusterVmComponentProtectionSettingsStorageVmReaction string + +const ( + ClusterVmComponentProtectionSettingsStorageVmReactionDisabled = ClusterVmComponentProtectionSettingsStorageVmReaction("disabled") + ClusterVmComponentProtectionSettingsStorageVmReactionWarning = ClusterVmComponentProtectionSettingsStorageVmReaction("warning") + ClusterVmComponentProtectionSettingsStorageVmReactionRestartConservative = ClusterVmComponentProtectionSettingsStorageVmReaction("restartConservative") + ClusterVmComponentProtectionSettingsStorageVmReactionRestartAggressive = ClusterVmComponentProtectionSettingsStorageVmReaction("restartAggressive") + ClusterVmComponentProtectionSettingsStorageVmReactionClusterDefault = ClusterVmComponentProtectionSettingsStorageVmReaction("clusterDefault") +) + +func init() { + t["ClusterVmComponentProtectionSettingsStorageVmReaction"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsStorageVmReaction)(nil)).Elem() +} + +type ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared string + +const ( + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedNone = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared("none") + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedReset = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared("reset") + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedUseClusterDefault = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared("useClusterDefault") +) + +func init() { + t["ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared)(nil)).Elem() +} + +type ComplianceResultStatus string + +const ( + ComplianceResultStatusCompliant = ComplianceResultStatus("compliant") + ComplianceResultStatusNonCompliant = ComplianceResultStatus("nonCompliant") + ComplianceResultStatusUnknown = ComplianceResultStatus("unknown") +) + +func init() { + t["ComplianceResultStatus"] = reflect.TypeOf((*ComplianceResultStatus)(nil)).Elem() +} + +type ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState string + +const ( + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateLicensed = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState("licensed") + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnlicensed = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState("unlicensed") + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnknown = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState("unknown") +) + +func init() { + t["ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState)(nil)).Elem() +} + +type ConfigSpecOperation string + +const ( + ConfigSpecOperationAdd = ConfigSpecOperation("add") + ConfigSpecOperationEdit = ConfigSpecOperation("edit") + ConfigSpecOperationRemove = ConfigSpecOperation("remove") +) + +func init() { + t["ConfigSpecOperation"] = reflect.TypeOf((*ConfigSpecOperation)(nil)).Elem() +} + +type CustomizationLicenseDataMode string + +const ( + CustomizationLicenseDataModePerServer = CustomizationLicenseDataMode("perServer") + CustomizationLicenseDataModePerSeat = CustomizationLicenseDataMode("perSeat") +) + +func init() { + t["CustomizationLicenseDataMode"] = reflect.TypeOf((*CustomizationLicenseDataMode)(nil)).Elem() +} + +type CustomizationNetBIOSMode string + +const ( + CustomizationNetBIOSModeEnableNetBIOSViaDhcp = CustomizationNetBIOSMode("enableNetBIOSViaDhcp") + CustomizationNetBIOSModeEnableNetBIOS = CustomizationNetBIOSMode("enableNetBIOS") + CustomizationNetBIOSModeDisableNetBIOS = CustomizationNetBIOSMode("disableNetBIOS") +) + +func init() { + t["CustomizationNetBIOSMode"] = reflect.TypeOf((*CustomizationNetBIOSMode)(nil)).Elem() +} + +type CustomizationSysprepRebootOption string + +const ( + CustomizationSysprepRebootOptionReboot = CustomizationSysprepRebootOption("reboot") + CustomizationSysprepRebootOptionNoreboot = CustomizationSysprepRebootOption("noreboot") + CustomizationSysprepRebootOptionShutdown = CustomizationSysprepRebootOption("shutdown") +) + +func init() { + t["CustomizationSysprepRebootOption"] = reflect.TypeOf((*CustomizationSysprepRebootOption)(nil)).Elem() +} + +type DVPortStatusVmDirectPathGen2InactiveReasonNetwork string + +const ( + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptIncompatibleDvs = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptIncompatibleDvs") + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoCompatibleNics = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptNoCompatibleNics") + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoVirtualFunctionsAvailable = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptNoVirtualFunctionsAvailable") + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptDisabledForPort = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptDisabledForPort") +) + +func init() { + t["DVPortStatusVmDirectPathGen2InactiveReasonNetwork"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonNetwork)(nil)).Elem() +} + +type DVPortStatusVmDirectPathGen2InactiveReasonOther string + +const ( + DVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleHost = DVPortStatusVmDirectPathGen2InactiveReasonOther("portNptIncompatibleHost") + DVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleConnectee = DVPortStatusVmDirectPathGen2InactiveReasonOther("portNptIncompatibleConnectee") +) + +func init() { + t["DVPortStatusVmDirectPathGen2InactiveReasonOther"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonOther)(nil)).Elem() +} + +type DasConfigFaultDasConfigFaultReason string + +const ( + DasConfigFaultDasConfigFaultReasonHostNetworkMisconfiguration = DasConfigFaultDasConfigFaultReason("HostNetworkMisconfiguration") + DasConfigFaultDasConfigFaultReasonHostMisconfiguration = DasConfigFaultDasConfigFaultReason("HostMisconfiguration") + DasConfigFaultDasConfigFaultReasonInsufficientPrivileges = DasConfigFaultDasConfigFaultReason("InsufficientPrivileges") + DasConfigFaultDasConfigFaultReasonNoPrimaryAgentAvailable = DasConfigFaultDasConfigFaultReason("NoPrimaryAgentAvailable") + DasConfigFaultDasConfigFaultReasonOther = DasConfigFaultDasConfigFaultReason("Other") + DasConfigFaultDasConfigFaultReasonNoDatastoresConfigured = DasConfigFaultDasConfigFaultReason("NoDatastoresConfigured") + DasConfigFaultDasConfigFaultReasonCreateConfigVvolFailed = DasConfigFaultDasConfigFaultReason("CreateConfigVvolFailed") + DasConfigFaultDasConfigFaultReasonVSanNotSupportedOnHost = DasConfigFaultDasConfigFaultReason("VSanNotSupportedOnHost") + DasConfigFaultDasConfigFaultReasonDasNetworkMisconfiguration = DasConfigFaultDasConfigFaultReason("DasNetworkMisconfiguration") +) + +func init() { + t["DasConfigFaultDasConfigFaultReason"] = reflect.TypeOf((*DasConfigFaultDasConfigFaultReason)(nil)).Elem() +} + +type DasVmPriority string + +const ( + DasVmPriorityDisabled = DasVmPriority("disabled") + DasVmPriorityLow = DasVmPriority("low") + DasVmPriorityMedium = DasVmPriority("medium") + DasVmPriorityHigh = DasVmPriority("high") +) + +func init() { + t["DasVmPriority"] = reflect.TypeOf((*DasVmPriority)(nil)).Elem() +} + +type DatastoreAccessible string + +const ( + DatastoreAccessibleTrue = DatastoreAccessible("True") + DatastoreAccessibleFalse = DatastoreAccessible("False") +) + +func init() { + t["DatastoreAccessible"] = reflect.TypeOf((*DatastoreAccessible)(nil)).Elem() +} + +type DatastoreSummaryMaintenanceModeState string + +const ( + DatastoreSummaryMaintenanceModeStateNormal = DatastoreSummaryMaintenanceModeState("normal") + DatastoreSummaryMaintenanceModeStateEnteringMaintenance = DatastoreSummaryMaintenanceModeState("enteringMaintenance") + DatastoreSummaryMaintenanceModeStateInMaintenance = DatastoreSummaryMaintenanceModeState("inMaintenance") +) + +func init() { + t["DatastoreSummaryMaintenanceModeState"] = reflect.TypeOf((*DatastoreSummaryMaintenanceModeState)(nil)).Elem() +} + +type DayOfWeek string + +const ( + DayOfWeekSunday = DayOfWeek("sunday") + DayOfWeekMonday = DayOfWeek("monday") + DayOfWeekTuesday = DayOfWeek("tuesday") + DayOfWeekWednesday = DayOfWeek("wednesday") + DayOfWeekThursday = DayOfWeek("thursday") + DayOfWeekFriday = DayOfWeek("friday") + DayOfWeekSaturday = DayOfWeek("saturday") +) + +func init() { + t["DayOfWeek"] = reflect.TypeOf((*DayOfWeek)(nil)).Elem() +} + +type DeviceNotSupportedReason string + +const ( + DeviceNotSupportedReasonHost = DeviceNotSupportedReason("host") + DeviceNotSupportedReasonGuest = DeviceNotSupportedReason("guest") +) + +func init() { + t["DeviceNotSupportedReason"] = reflect.TypeOf((*DeviceNotSupportedReason)(nil)).Elem() +} + +type DiagnosticManagerLogCreator string + +const ( + DiagnosticManagerLogCreatorVpxd = DiagnosticManagerLogCreator("vpxd") + DiagnosticManagerLogCreatorVpxa = DiagnosticManagerLogCreator("vpxa") + DiagnosticManagerLogCreatorHostd = DiagnosticManagerLogCreator("hostd") + DiagnosticManagerLogCreatorServerd = DiagnosticManagerLogCreator("serverd") + DiagnosticManagerLogCreatorInstall = DiagnosticManagerLogCreator("install") + DiagnosticManagerLogCreatorVpxClient = DiagnosticManagerLogCreator("vpxClient") + DiagnosticManagerLogCreatorRecordLog = DiagnosticManagerLogCreator("recordLog") +) + +func init() { + t["DiagnosticManagerLogCreator"] = reflect.TypeOf((*DiagnosticManagerLogCreator)(nil)).Elem() +} + +type DiagnosticManagerLogFormat string + +const ( + DiagnosticManagerLogFormatPlain = DiagnosticManagerLogFormat("plain") +) + +func init() { + t["DiagnosticManagerLogFormat"] = reflect.TypeOf((*DiagnosticManagerLogFormat)(nil)).Elem() +} + +type DiagnosticPartitionStorageType string + +const ( + DiagnosticPartitionStorageTypeDirectAttached = DiagnosticPartitionStorageType("directAttached") + DiagnosticPartitionStorageTypeNetworkAttached = DiagnosticPartitionStorageType("networkAttached") +) + +func init() { + t["DiagnosticPartitionStorageType"] = reflect.TypeOf((*DiagnosticPartitionStorageType)(nil)).Elem() +} + +type DiagnosticPartitionType string + +const ( + DiagnosticPartitionTypeSingleHost = DiagnosticPartitionType("singleHost") + DiagnosticPartitionTypeMultiHost = DiagnosticPartitionType("multiHost") +) + +func init() { + t["DiagnosticPartitionType"] = reflect.TypeOf((*DiagnosticPartitionType)(nil)).Elem() +} + +type DisallowedChangeByServiceDisallowedChange string + +const ( + DisallowedChangeByServiceDisallowedChangeHotExtendDisk = DisallowedChangeByServiceDisallowedChange("hotExtendDisk") +) + +func init() { + t["DisallowedChangeByServiceDisallowedChange"] = reflect.TypeOf((*DisallowedChangeByServiceDisallowedChange)(nil)).Elem() +} + +type DistributedVirtualPortgroupMetaTagName string + +const ( + DistributedVirtualPortgroupMetaTagNameDvsName = DistributedVirtualPortgroupMetaTagName("dvsName") + DistributedVirtualPortgroupMetaTagNamePortgroupName = DistributedVirtualPortgroupMetaTagName("portgroupName") + DistributedVirtualPortgroupMetaTagNamePortIndex = DistributedVirtualPortgroupMetaTagName("portIndex") +) + +func init() { + t["DistributedVirtualPortgroupMetaTagName"] = reflect.TypeOf((*DistributedVirtualPortgroupMetaTagName)(nil)).Elem() +} + +type DistributedVirtualPortgroupPortgroupType string + +const ( + DistributedVirtualPortgroupPortgroupTypeEarlyBinding = DistributedVirtualPortgroupPortgroupType("earlyBinding") + DistributedVirtualPortgroupPortgroupTypeLateBinding = DistributedVirtualPortgroupPortgroupType("lateBinding") + DistributedVirtualPortgroupPortgroupTypeEphemeral = DistributedVirtualPortgroupPortgroupType("ephemeral") +) + +func init() { + t["DistributedVirtualPortgroupPortgroupType"] = reflect.TypeOf((*DistributedVirtualPortgroupPortgroupType)(nil)).Elem() +} + +type DistributedVirtualSwitchHostInfrastructureTrafficClass string + +const ( + DistributedVirtualSwitchHostInfrastructureTrafficClassManagement = DistributedVirtualSwitchHostInfrastructureTrafficClass("management") + DistributedVirtualSwitchHostInfrastructureTrafficClassFaultTolerance = DistributedVirtualSwitchHostInfrastructureTrafficClass("faultTolerance") + DistributedVirtualSwitchHostInfrastructureTrafficClassVmotion = DistributedVirtualSwitchHostInfrastructureTrafficClass("vmotion") + DistributedVirtualSwitchHostInfrastructureTrafficClassVirtualMachine = DistributedVirtualSwitchHostInfrastructureTrafficClass("virtualMachine") + DistributedVirtualSwitchHostInfrastructureTrafficClassISCSI = DistributedVirtualSwitchHostInfrastructureTrafficClass("iSCSI") + DistributedVirtualSwitchHostInfrastructureTrafficClassNfs = DistributedVirtualSwitchHostInfrastructureTrafficClass("nfs") + DistributedVirtualSwitchHostInfrastructureTrafficClassHbr = DistributedVirtualSwitchHostInfrastructureTrafficClass("hbr") + DistributedVirtualSwitchHostInfrastructureTrafficClassVsan = DistributedVirtualSwitchHostInfrastructureTrafficClass("vsan") + DistributedVirtualSwitchHostInfrastructureTrafficClassVdp = DistributedVirtualSwitchHostInfrastructureTrafficClass("vdp") +) + +func init() { + t["DistributedVirtualSwitchHostInfrastructureTrafficClass"] = reflect.TypeOf((*DistributedVirtualSwitchHostInfrastructureTrafficClass)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberHostComponentState string + +const ( + DistributedVirtualSwitchHostMemberHostComponentStateUp = DistributedVirtualSwitchHostMemberHostComponentState("up") + DistributedVirtualSwitchHostMemberHostComponentStatePending = DistributedVirtualSwitchHostMemberHostComponentState("pending") + DistributedVirtualSwitchHostMemberHostComponentStateOutOfSync = DistributedVirtualSwitchHostMemberHostComponentState("outOfSync") + DistributedVirtualSwitchHostMemberHostComponentStateWarning = DistributedVirtualSwitchHostMemberHostComponentState("warning") + DistributedVirtualSwitchHostMemberHostComponentStateDisconnected = DistributedVirtualSwitchHostMemberHostComponentState("disconnected") + DistributedVirtualSwitchHostMemberHostComponentStateDown = DistributedVirtualSwitchHostMemberHostComponentState("down") +) + +func init() { + t["DistributedVirtualSwitchHostMemberHostComponentState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostComponentState)(nil)).Elem() +} + +type DistributedVirtualSwitchNetworkResourceControlVersion string + +const ( + DistributedVirtualSwitchNetworkResourceControlVersionVersion2 = DistributedVirtualSwitchNetworkResourceControlVersion("version2") + DistributedVirtualSwitchNetworkResourceControlVersionVersion3 = DistributedVirtualSwitchNetworkResourceControlVersion("version3") +) + +func init() { + t["DistributedVirtualSwitchNetworkResourceControlVersion"] = reflect.TypeOf((*DistributedVirtualSwitchNetworkResourceControlVersion)(nil)).Elem() +} + +type DistributedVirtualSwitchNicTeamingPolicyMode string + +const ( + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_ip = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_ip") + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcmac = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_srcmac") + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcid = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_srcid") + DistributedVirtualSwitchNicTeamingPolicyModeFailover_explicit = DistributedVirtualSwitchNicTeamingPolicyMode("failover_explicit") + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_loadbased = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_loadbased") +) + +func init() { + t["DistributedVirtualSwitchNicTeamingPolicyMode"] = reflect.TypeOf((*DistributedVirtualSwitchNicTeamingPolicyMode)(nil)).Elem() +} + +type DistributedVirtualSwitchPortConnecteeConnecteeType string + +const ( + DistributedVirtualSwitchPortConnecteeConnecteeTypePnic = DistributedVirtualSwitchPortConnecteeConnecteeType("pnic") + DistributedVirtualSwitchPortConnecteeConnecteeTypeVmVnic = DistributedVirtualSwitchPortConnecteeConnecteeType("vmVnic") + DistributedVirtualSwitchPortConnecteeConnecteeTypeHostConsoleVnic = DistributedVirtualSwitchPortConnecteeConnecteeType("hostConsoleVnic") + DistributedVirtualSwitchPortConnecteeConnecteeTypeHostVmkVnic = DistributedVirtualSwitchPortConnecteeConnecteeType("hostVmkVnic") +) + +func init() { + t["DistributedVirtualSwitchPortConnecteeConnecteeType"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnecteeConnecteeType)(nil)).Elem() +} + +type DistributedVirtualSwitchProductSpecOperationType string + +const ( + DistributedVirtualSwitchProductSpecOperationTypePreInstall = DistributedVirtualSwitchProductSpecOperationType("preInstall") + DistributedVirtualSwitchProductSpecOperationTypeUpgrade = DistributedVirtualSwitchProductSpecOperationType("upgrade") + DistributedVirtualSwitchProductSpecOperationTypeNotifyAvailableUpgrade = DistributedVirtualSwitchProductSpecOperationType("notifyAvailableUpgrade") + DistributedVirtualSwitchProductSpecOperationTypeProceedWithUpgrade = DistributedVirtualSwitchProductSpecOperationType("proceedWithUpgrade") + DistributedVirtualSwitchProductSpecOperationTypeUpdateBundleInfo = DistributedVirtualSwitchProductSpecOperationType("updateBundleInfo") +) + +func init() { + t["DistributedVirtualSwitchProductSpecOperationType"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpecOperationType)(nil)).Elem() +} + +type DpmBehavior string + +const ( + DpmBehaviorManual = DpmBehavior("manual") + DpmBehaviorAutomated = DpmBehavior("automated") +) + +func init() { + t["DpmBehavior"] = reflect.TypeOf((*DpmBehavior)(nil)).Elem() +} + +type DrsBehavior string + +const ( + DrsBehaviorManual = DrsBehavior("manual") + DrsBehaviorPartiallyAutomated = DrsBehavior("partiallyAutomated") + DrsBehaviorFullyAutomated = DrsBehavior("fullyAutomated") +) + +func init() { + t["DrsBehavior"] = reflect.TypeOf((*DrsBehavior)(nil)).Elem() +} + +type DrsInjectorWorkloadCorrelationState string + +const ( + DrsInjectorWorkloadCorrelationStateCorrelated = DrsInjectorWorkloadCorrelationState("Correlated") + DrsInjectorWorkloadCorrelationStateUncorrelated = DrsInjectorWorkloadCorrelationState("Uncorrelated") +) + +func init() { + t["DrsInjectorWorkloadCorrelationState"] = reflect.TypeOf((*DrsInjectorWorkloadCorrelationState)(nil)).Elem() +} + +type DrsRecommendationReasonCode string + +const ( + DrsRecommendationReasonCodeFairnessCpuAvg = DrsRecommendationReasonCode("fairnessCpuAvg") + DrsRecommendationReasonCodeFairnessMemAvg = DrsRecommendationReasonCode("fairnessMemAvg") + DrsRecommendationReasonCodeJointAffin = DrsRecommendationReasonCode("jointAffin") + DrsRecommendationReasonCodeAntiAffin = DrsRecommendationReasonCode("antiAffin") + DrsRecommendationReasonCodeHostMaint = DrsRecommendationReasonCode("hostMaint") +) + +func init() { + t["DrsRecommendationReasonCode"] = reflect.TypeOf((*DrsRecommendationReasonCode)(nil)).Elem() +} + +type DvsFilterOnFailure string + +const ( + DvsFilterOnFailureFailOpen = DvsFilterOnFailure("failOpen") + DvsFilterOnFailureFailClosed = DvsFilterOnFailure("failClosed") +) + +func init() { + t["DvsFilterOnFailure"] = reflect.TypeOf((*DvsFilterOnFailure)(nil)).Elem() +} + +type DvsNetworkRuleDirectionType string + +const ( + DvsNetworkRuleDirectionTypeIncomingPackets = DvsNetworkRuleDirectionType("incomingPackets") + DvsNetworkRuleDirectionTypeOutgoingPackets = DvsNetworkRuleDirectionType("outgoingPackets") + DvsNetworkRuleDirectionTypeBoth = DvsNetworkRuleDirectionType("both") +) + +func init() { + t["DvsNetworkRuleDirectionType"] = reflect.TypeOf((*DvsNetworkRuleDirectionType)(nil)).Elem() +} + +type EntityImportType string + +const ( + EntityImportTypeCreateEntityWithNewIdentifier = EntityImportType("createEntityWithNewIdentifier") + EntityImportTypeCreateEntityWithOriginalIdentifier = EntityImportType("createEntityWithOriginalIdentifier") + EntityImportTypeApplyToEntitySpecified = EntityImportType("applyToEntitySpecified") +) + +func init() { + t["EntityImportType"] = reflect.TypeOf((*EntityImportType)(nil)).Elem() +} + +type EntityType string + +const ( + EntityTypeDistributedVirtualSwitch = EntityType("distributedVirtualSwitch") + EntityTypeDistributedVirtualPortgroup = EntityType("distributedVirtualPortgroup") +) + +func init() { + t["EntityType"] = reflect.TypeOf((*EntityType)(nil)).Elem() +} + +type EventAlarmExpressionComparisonOperator string + +const ( + EventAlarmExpressionComparisonOperatorEquals = EventAlarmExpressionComparisonOperator("equals") + EventAlarmExpressionComparisonOperatorNotEqualTo = EventAlarmExpressionComparisonOperator("notEqualTo") + EventAlarmExpressionComparisonOperatorStartsWith = EventAlarmExpressionComparisonOperator("startsWith") + EventAlarmExpressionComparisonOperatorDoesNotStartWith = EventAlarmExpressionComparisonOperator("doesNotStartWith") + EventAlarmExpressionComparisonOperatorEndsWith = EventAlarmExpressionComparisonOperator("endsWith") + EventAlarmExpressionComparisonOperatorDoesNotEndWith = EventAlarmExpressionComparisonOperator("doesNotEndWith") +) + +func init() { + t["EventAlarmExpressionComparisonOperator"] = reflect.TypeOf((*EventAlarmExpressionComparisonOperator)(nil)).Elem() +} + +type EventCategory string + +const ( + EventCategoryInfo = EventCategory("info") + EventCategoryWarning = EventCategory("warning") + EventCategoryError = EventCategory("error") + EventCategoryUser = EventCategory("user") +) + +func init() { + t["EventCategory"] = reflect.TypeOf((*EventCategory)(nil)).Elem() +} + +type EventEventSeverity string + +const ( + EventEventSeverityError = EventEventSeverity("error") + EventEventSeverityWarning = EventEventSeverity("warning") + EventEventSeverityInfo = EventEventSeverity("info") + EventEventSeverityUser = EventEventSeverity("user") +) + +func init() { + t["EventEventSeverity"] = reflect.TypeOf((*EventEventSeverity)(nil)).Elem() +} + +type EventFilterSpecRecursionOption string + +const ( + EventFilterSpecRecursionOptionSelf = EventFilterSpecRecursionOption("self") + EventFilterSpecRecursionOptionChildren = EventFilterSpecRecursionOption("children") + EventFilterSpecRecursionOptionAll = EventFilterSpecRecursionOption("all") +) + +func init() { + t["EventFilterSpecRecursionOption"] = reflect.TypeOf((*EventFilterSpecRecursionOption)(nil)).Elem() +} + +type FibreChannelPortType string + +const ( + FibreChannelPortTypeFabric = FibreChannelPortType("fabric") + FibreChannelPortTypeLoop = FibreChannelPortType("loop") + FibreChannelPortTypePointToPoint = FibreChannelPortType("pointToPoint") + FibreChannelPortTypeUnknown = FibreChannelPortType("unknown") +) + +func init() { + t["FibreChannelPortType"] = reflect.TypeOf((*FibreChannelPortType)(nil)).Elem() +} + +type FileSystemMountInfoVStorageSupportStatus string + +const ( + FileSystemMountInfoVStorageSupportStatusVStorageSupported = FileSystemMountInfoVStorageSupportStatus("vStorageSupported") + FileSystemMountInfoVStorageSupportStatusVStorageUnsupported = FileSystemMountInfoVStorageSupportStatus("vStorageUnsupported") + FileSystemMountInfoVStorageSupportStatusVStorageUnknown = FileSystemMountInfoVStorageSupportStatus("vStorageUnknown") +) + +func init() { + t["FileSystemMountInfoVStorageSupportStatus"] = reflect.TypeOf((*FileSystemMountInfoVStorageSupportStatus)(nil)).Elem() +} + +type FtIssuesOnHostHostSelectionType string + +const ( + FtIssuesOnHostHostSelectionTypeUser = FtIssuesOnHostHostSelectionType("user") + FtIssuesOnHostHostSelectionTypeVc = FtIssuesOnHostHostSelectionType("vc") + FtIssuesOnHostHostSelectionTypeDrs = FtIssuesOnHostHostSelectionType("drs") +) + +func init() { + t["FtIssuesOnHostHostSelectionType"] = reflect.TypeOf((*FtIssuesOnHostHostSelectionType)(nil)).Elem() +} + +type GuestFileType string + +const ( + GuestFileTypeFile = GuestFileType("file") + GuestFileTypeDirectory = GuestFileType("directory") + GuestFileTypeSymlink = GuestFileType("symlink") +) + +func init() { + t["GuestFileType"] = reflect.TypeOf((*GuestFileType)(nil)).Elem() +} + +type GuestInfoAppStateType string + +const ( + GuestInfoAppStateTypeNone = GuestInfoAppStateType("none") + GuestInfoAppStateTypeAppStateOk = GuestInfoAppStateType("appStateOk") + GuestInfoAppStateTypeAppStateNeedReset = GuestInfoAppStateType("appStateNeedReset") +) + +func init() { + t["GuestInfoAppStateType"] = reflect.TypeOf((*GuestInfoAppStateType)(nil)).Elem() +} + +type GuestOsDescriptorFirmwareType string + +const ( + GuestOsDescriptorFirmwareTypeBios = GuestOsDescriptorFirmwareType("bios") + GuestOsDescriptorFirmwareTypeEfi = GuestOsDescriptorFirmwareType("efi") +) + +func init() { + t["GuestOsDescriptorFirmwareType"] = reflect.TypeOf((*GuestOsDescriptorFirmwareType)(nil)).Elem() +} + +type GuestOsDescriptorSupportLevel string + +const ( + GuestOsDescriptorSupportLevelExperimental = GuestOsDescriptorSupportLevel("experimental") + GuestOsDescriptorSupportLevelLegacy = GuestOsDescriptorSupportLevel("legacy") + GuestOsDescriptorSupportLevelTerminated = GuestOsDescriptorSupportLevel("terminated") + GuestOsDescriptorSupportLevelSupported = GuestOsDescriptorSupportLevel("supported") + GuestOsDescriptorSupportLevelUnsupported = GuestOsDescriptorSupportLevel("unsupported") + GuestOsDescriptorSupportLevelDeprecated = GuestOsDescriptorSupportLevel("deprecated") + GuestOsDescriptorSupportLevelTechPreview = GuestOsDescriptorSupportLevel("techPreview") +) + +func init() { + t["GuestOsDescriptorSupportLevel"] = reflect.TypeOf((*GuestOsDescriptorSupportLevel)(nil)).Elem() +} + +type GuestRegKeyWowSpec string + +const ( + GuestRegKeyWowSpecWOWNative = GuestRegKeyWowSpec("WOWNative") + GuestRegKeyWowSpecWOW32 = GuestRegKeyWowSpec("WOW32") + GuestRegKeyWowSpecWOW64 = GuestRegKeyWowSpec("WOW64") +) + +func init() { + t["GuestRegKeyWowSpec"] = reflect.TypeOf((*GuestRegKeyWowSpec)(nil)).Elem() +} + +type HostAccessMode string + +const ( + HostAccessModeAccessNone = HostAccessMode("accessNone") + HostAccessModeAccessAdmin = HostAccessMode("accessAdmin") + HostAccessModeAccessNoAccess = HostAccessMode("accessNoAccess") + HostAccessModeAccessReadOnly = HostAccessMode("accessReadOnly") + HostAccessModeAccessOther = HostAccessMode("accessOther") +) + +func init() { + t["HostAccessMode"] = reflect.TypeOf((*HostAccessMode)(nil)).Elem() +} + +type HostActiveDirectoryAuthenticationCertificateDigest string + +const ( + HostActiveDirectoryAuthenticationCertificateDigestSHA1 = HostActiveDirectoryAuthenticationCertificateDigest("SHA1") +) + +func init() { + t["HostActiveDirectoryAuthenticationCertificateDigest"] = reflect.TypeOf((*HostActiveDirectoryAuthenticationCertificateDigest)(nil)).Elem() +} + +type HostActiveDirectoryInfoDomainMembershipStatus string + +const ( + HostActiveDirectoryInfoDomainMembershipStatusUnknown = HostActiveDirectoryInfoDomainMembershipStatus("unknown") + HostActiveDirectoryInfoDomainMembershipStatusOk = HostActiveDirectoryInfoDomainMembershipStatus("ok") + HostActiveDirectoryInfoDomainMembershipStatusNoServers = HostActiveDirectoryInfoDomainMembershipStatus("noServers") + HostActiveDirectoryInfoDomainMembershipStatusClientTrustBroken = HostActiveDirectoryInfoDomainMembershipStatus("clientTrustBroken") + HostActiveDirectoryInfoDomainMembershipStatusServerTrustBroken = HostActiveDirectoryInfoDomainMembershipStatus("serverTrustBroken") + HostActiveDirectoryInfoDomainMembershipStatusInconsistentTrust = HostActiveDirectoryInfoDomainMembershipStatus("inconsistentTrust") + HostActiveDirectoryInfoDomainMembershipStatusOtherProblem = HostActiveDirectoryInfoDomainMembershipStatus("otherProblem") +) + +func init() { + t["HostActiveDirectoryInfoDomainMembershipStatus"] = reflect.TypeOf((*HostActiveDirectoryInfoDomainMembershipStatus)(nil)).Elem() +} + +type HostCapabilityFtUnsupportedReason string + +const ( + HostCapabilityFtUnsupportedReasonVMotionNotLicensed = HostCapabilityFtUnsupportedReason("vMotionNotLicensed") + HostCapabilityFtUnsupportedReasonMissingVMotionNic = HostCapabilityFtUnsupportedReason("missingVMotionNic") + HostCapabilityFtUnsupportedReasonMissingFTLoggingNic = HostCapabilityFtUnsupportedReason("missingFTLoggingNic") + HostCapabilityFtUnsupportedReasonFtNotLicensed = HostCapabilityFtUnsupportedReason("ftNotLicensed") + HostCapabilityFtUnsupportedReasonHaAgentIssue = HostCapabilityFtUnsupportedReason("haAgentIssue") + HostCapabilityFtUnsupportedReasonUnsupportedProduct = HostCapabilityFtUnsupportedReason("unsupportedProduct") + HostCapabilityFtUnsupportedReasonCpuHvUnsupported = HostCapabilityFtUnsupportedReason("cpuHvUnsupported") + HostCapabilityFtUnsupportedReasonCpuHwmmuUnsupported = HostCapabilityFtUnsupportedReason("cpuHwmmuUnsupported") + HostCapabilityFtUnsupportedReasonCpuHvDisabled = HostCapabilityFtUnsupportedReason("cpuHvDisabled") +) + +func init() { + t["HostCapabilityFtUnsupportedReason"] = reflect.TypeOf((*HostCapabilityFtUnsupportedReason)(nil)).Elem() +} + +type HostCapabilityVmDirectPathGen2UnsupportedReason string + +const ( + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleProduct = HostCapabilityVmDirectPathGen2UnsupportedReason("hostNptIncompatibleProduct") + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleHardware = HostCapabilityVmDirectPathGen2UnsupportedReason("hostNptIncompatibleHardware") + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptDisabled = HostCapabilityVmDirectPathGen2UnsupportedReason("hostNptDisabled") +) + +func init() { + t["HostCapabilityVmDirectPathGen2UnsupportedReason"] = reflect.TypeOf((*HostCapabilityVmDirectPathGen2UnsupportedReason)(nil)).Elem() +} + +type HostCertificateManagerCertificateInfoCertificateStatus string + +const ( + HostCertificateManagerCertificateInfoCertificateStatusUnknown = HostCertificateManagerCertificateInfoCertificateStatus("unknown") + HostCertificateManagerCertificateInfoCertificateStatusExpired = HostCertificateManagerCertificateInfoCertificateStatus("expired") + HostCertificateManagerCertificateInfoCertificateStatusExpiring = HostCertificateManagerCertificateInfoCertificateStatus("expiring") + HostCertificateManagerCertificateInfoCertificateStatusExpiringShortly = HostCertificateManagerCertificateInfoCertificateStatus("expiringShortly") + HostCertificateManagerCertificateInfoCertificateStatusExpirationImminent = HostCertificateManagerCertificateInfoCertificateStatus("expirationImminent") + HostCertificateManagerCertificateInfoCertificateStatusGood = HostCertificateManagerCertificateInfoCertificateStatus("good") +) + +func init() { + t["HostCertificateManagerCertificateInfoCertificateStatus"] = reflect.TypeOf((*HostCertificateManagerCertificateInfoCertificateStatus)(nil)).Elem() +} + +type HostConfigChangeMode string + +const ( + HostConfigChangeModeModify = HostConfigChangeMode("modify") + HostConfigChangeModeReplace = HostConfigChangeMode("replace") +) + +func init() { + t["HostConfigChangeMode"] = reflect.TypeOf((*HostConfigChangeMode)(nil)).Elem() +} + +type HostConfigChangeOperation string + +const ( + HostConfigChangeOperationAdd = HostConfigChangeOperation("add") + HostConfigChangeOperationRemove = HostConfigChangeOperation("remove") + HostConfigChangeOperationEdit = HostConfigChangeOperation("edit") +) + +func init() { + t["HostConfigChangeOperation"] = reflect.TypeOf((*HostConfigChangeOperation)(nil)).Elem() +} + +type HostCpuPackageVendor string + +const ( + HostCpuPackageVendorUnknown = HostCpuPackageVendor("unknown") + HostCpuPackageVendorIntel = HostCpuPackageVendor("intel") + HostCpuPackageVendorAmd = HostCpuPackageVendor("amd") +) + +func init() { + t["HostCpuPackageVendor"] = reflect.TypeOf((*HostCpuPackageVendor)(nil)).Elem() +} + +type HostCpuPowerManagementInfoPolicyType string + +const ( + HostCpuPowerManagementInfoPolicyTypeOff = HostCpuPowerManagementInfoPolicyType("off") + HostCpuPowerManagementInfoPolicyTypeStaticPolicy = HostCpuPowerManagementInfoPolicyType("staticPolicy") + HostCpuPowerManagementInfoPolicyTypeDynamicPolicy = HostCpuPowerManagementInfoPolicyType("dynamicPolicy") +) + +func init() { + t["HostCpuPowerManagementInfoPolicyType"] = reflect.TypeOf((*HostCpuPowerManagementInfoPolicyType)(nil)).Elem() +} + +type HostDasErrorEventHostDasErrorReason string + +const ( + HostDasErrorEventHostDasErrorReasonConfigFailed = HostDasErrorEventHostDasErrorReason("configFailed") + HostDasErrorEventHostDasErrorReasonTimeout = HostDasErrorEventHostDasErrorReason("timeout") + HostDasErrorEventHostDasErrorReasonCommunicationInitFailed = HostDasErrorEventHostDasErrorReason("communicationInitFailed") + HostDasErrorEventHostDasErrorReasonHealthCheckScriptFailed = HostDasErrorEventHostDasErrorReason("healthCheckScriptFailed") + HostDasErrorEventHostDasErrorReasonAgentFailed = HostDasErrorEventHostDasErrorReason("agentFailed") + HostDasErrorEventHostDasErrorReasonAgentShutdown = HostDasErrorEventHostDasErrorReason("agentShutdown") + HostDasErrorEventHostDasErrorReasonIsolationAddressUnpingable = HostDasErrorEventHostDasErrorReason("isolationAddressUnpingable") + HostDasErrorEventHostDasErrorReasonOther = HostDasErrorEventHostDasErrorReason("other") +) + +func init() { + t["HostDasErrorEventHostDasErrorReason"] = reflect.TypeOf((*HostDasErrorEventHostDasErrorReason)(nil)).Elem() +} + +type HostDigestInfoDigestMethodType string + +const ( + HostDigestInfoDigestMethodTypeSHA1 = HostDigestInfoDigestMethodType("SHA1") + HostDigestInfoDigestMethodTypeMD5 = HostDigestInfoDigestMethodType("MD5") +) + +func init() { + t["HostDigestInfoDigestMethodType"] = reflect.TypeOf((*HostDigestInfoDigestMethodType)(nil)).Elem() +} + +type HostDisconnectedEventReasonCode string + +const ( + HostDisconnectedEventReasonCodeSslThumbprintVerifyFailed = HostDisconnectedEventReasonCode("sslThumbprintVerifyFailed") + HostDisconnectedEventReasonCodeLicenseExpired = HostDisconnectedEventReasonCode("licenseExpired") + HostDisconnectedEventReasonCodeAgentUpgrade = HostDisconnectedEventReasonCode("agentUpgrade") + HostDisconnectedEventReasonCodeUserRequest = HostDisconnectedEventReasonCode("userRequest") + HostDisconnectedEventReasonCodeInsufficientLicenses = HostDisconnectedEventReasonCode("insufficientLicenses") + HostDisconnectedEventReasonCodeAgentOutOfDate = HostDisconnectedEventReasonCode("agentOutOfDate") + HostDisconnectedEventReasonCodePasswordDecryptFailure = HostDisconnectedEventReasonCode("passwordDecryptFailure") + HostDisconnectedEventReasonCodeUnknown = HostDisconnectedEventReasonCode("unknown") + HostDisconnectedEventReasonCodeVcVRAMCapacityExceeded = HostDisconnectedEventReasonCode("vcVRAMCapacityExceeded") +) + +func init() { + t["HostDisconnectedEventReasonCode"] = reflect.TypeOf((*HostDisconnectedEventReasonCode)(nil)).Elem() +} + +type HostDiskPartitionInfoPartitionFormat string + +const ( + HostDiskPartitionInfoPartitionFormatGpt = HostDiskPartitionInfoPartitionFormat("gpt") + HostDiskPartitionInfoPartitionFormatMbr = HostDiskPartitionInfoPartitionFormat("mbr") + HostDiskPartitionInfoPartitionFormatUnknown = HostDiskPartitionInfoPartitionFormat("unknown") +) + +func init() { + t["HostDiskPartitionInfoPartitionFormat"] = reflect.TypeOf((*HostDiskPartitionInfoPartitionFormat)(nil)).Elem() +} + +type HostDiskPartitionInfoType string + +const ( + HostDiskPartitionInfoTypeNone = HostDiskPartitionInfoType("none") + HostDiskPartitionInfoTypeVmfs = HostDiskPartitionInfoType("vmfs") + HostDiskPartitionInfoTypeLinuxNative = HostDiskPartitionInfoType("linuxNative") + HostDiskPartitionInfoTypeLinuxSwap = HostDiskPartitionInfoType("linuxSwap") + HostDiskPartitionInfoTypeExtended = HostDiskPartitionInfoType("extended") + HostDiskPartitionInfoTypeNtfs = HostDiskPartitionInfoType("ntfs") + HostDiskPartitionInfoTypeVmkDiagnostic = HostDiskPartitionInfoType("vmkDiagnostic") + HostDiskPartitionInfoTypeVffs = HostDiskPartitionInfoType("vffs") +) + +func init() { + t["HostDiskPartitionInfoType"] = reflect.TypeOf((*HostDiskPartitionInfoType)(nil)).Elem() +} + +type HostFeatureVersionKey string + +const ( + HostFeatureVersionKeyFaultTolerance = HostFeatureVersionKey("faultTolerance") +) + +func init() { + t["HostFeatureVersionKey"] = reflect.TypeOf((*HostFeatureVersionKey)(nil)).Elem() +} + +type HostFileSystemVolumeFileSystemType string + +const ( + HostFileSystemVolumeFileSystemTypeVMFS = HostFileSystemVolumeFileSystemType("VMFS") + HostFileSystemVolumeFileSystemTypeNFS = HostFileSystemVolumeFileSystemType("NFS") + HostFileSystemVolumeFileSystemTypeNFS41 = HostFileSystemVolumeFileSystemType("NFS41") + HostFileSystemVolumeFileSystemTypeCIFS = HostFileSystemVolumeFileSystemType("CIFS") + HostFileSystemVolumeFileSystemTypeVsan = HostFileSystemVolumeFileSystemType("vsan") + HostFileSystemVolumeFileSystemTypeVFFS = HostFileSystemVolumeFileSystemType("VFFS") + HostFileSystemVolumeFileSystemTypeVVOL = HostFileSystemVolumeFileSystemType("VVOL") + HostFileSystemVolumeFileSystemTypeOTHER = HostFileSystemVolumeFileSystemType("OTHER") +) + +func init() { + t["HostFileSystemVolumeFileSystemType"] = reflect.TypeOf((*HostFileSystemVolumeFileSystemType)(nil)).Elem() +} + +type HostFirewallRuleDirection string + +const ( + HostFirewallRuleDirectionInbound = HostFirewallRuleDirection("inbound") + HostFirewallRuleDirectionOutbound = HostFirewallRuleDirection("outbound") +) + +func init() { + t["HostFirewallRuleDirection"] = reflect.TypeOf((*HostFirewallRuleDirection)(nil)).Elem() +} + +type HostFirewallRulePortType string + +const ( + HostFirewallRulePortTypeSrc = HostFirewallRulePortType("src") + HostFirewallRulePortTypeDst = HostFirewallRulePortType("dst") +) + +func init() { + t["HostFirewallRulePortType"] = reflect.TypeOf((*HostFirewallRulePortType)(nil)).Elem() +} + +type HostFirewallRuleProtocol string + +const ( + HostFirewallRuleProtocolTcp = HostFirewallRuleProtocol("tcp") + HostFirewallRuleProtocolUdp = HostFirewallRuleProtocol("udp") +) + +func init() { + t["HostFirewallRuleProtocol"] = reflect.TypeOf((*HostFirewallRuleProtocol)(nil)).Elem() +} + +type HostGraphicsInfoGraphicsType string + +const ( + HostGraphicsInfoGraphicsTypeBasic = HostGraphicsInfoGraphicsType("basic") + HostGraphicsInfoGraphicsTypeShared = HostGraphicsInfoGraphicsType("shared") + HostGraphicsInfoGraphicsTypeDirect = HostGraphicsInfoGraphicsType("direct") +) + +func init() { + t["HostGraphicsInfoGraphicsType"] = reflect.TypeOf((*HostGraphicsInfoGraphicsType)(nil)).Elem() +} + +type HostHardwareElementStatus string + +const ( + HostHardwareElementStatusUnknown = HostHardwareElementStatus("Unknown") + HostHardwareElementStatusGreen = HostHardwareElementStatus("Green") + HostHardwareElementStatusYellow = HostHardwareElementStatus("Yellow") + HostHardwareElementStatusRed = HostHardwareElementStatus("Red") +) + +func init() { + t["HostHardwareElementStatus"] = reflect.TypeOf((*HostHardwareElementStatus)(nil)).Elem() +} + +type HostHasComponentFailureHostComponentType string + +const ( + HostHasComponentFailureHostComponentTypeDatastore = HostHasComponentFailureHostComponentType("Datastore") +) + +func init() { + t["HostHasComponentFailureHostComponentType"] = reflect.TypeOf((*HostHasComponentFailureHostComponentType)(nil)).Elem() +} + +type HostImageAcceptanceLevel string + +const ( + HostImageAcceptanceLevelVmware_certified = HostImageAcceptanceLevel("vmware_certified") + HostImageAcceptanceLevelVmware_accepted = HostImageAcceptanceLevel("vmware_accepted") + HostImageAcceptanceLevelPartner = HostImageAcceptanceLevel("partner") + HostImageAcceptanceLevelCommunity = HostImageAcceptanceLevel("community") +) + +func init() { + t["HostImageAcceptanceLevel"] = reflect.TypeOf((*HostImageAcceptanceLevel)(nil)).Elem() +} + +type HostIncompatibleForFaultToleranceReason string + +const ( + HostIncompatibleForFaultToleranceReasonProduct = HostIncompatibleForFaultToleranceReason("product") + HostIncompatibleForFaultToleranceReasonProcessor = HostIncompatibleForFaultToleranceReason("processor") +) + +func init() { + t["HostIncompatibleForFaultToleranceReason"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceReason)(nil)).Elem() +} + +type HostIncompatibleForRecordReplayReason string + +const ( + HostIncompatibleForRecordReplayReasonProduct = HostIncompatibleForRecordReplayReason("product") + HostIncompatibleForRecordReplayReasonProcessor = HostIncompatibleForRecordReplayReason("processor") +) + +func init() { + t["HostIncompatibleForRecordReplayReason"] = reflect.TypeOf((*HostIncompatibleForRecordReplayReason)(nil)).Elem() +} + +type HostInternetScsiHbaChapAuthenticationType string + +const ( + HostInternetScsiHbaChapAuthenticationTypeChapProhibited = HostInternetScsiHbaChapAuthenticationType("chapProhibited") + HostInternetScsiHbaChapAuthenticationTypeChapDiscouraged = HostInternetScsiHbaChapAuthenticationType("chapDiscouraged") + HostInternetScsiHbaChapAuthenticationTypeChapPreferred = HostInternetScsiHbaChapAuthenticationType("chapPreferred") + HostInternetScsiHbaChapAuthenticationTypeChapRequired = HostInternetScsiHbaChapAuthenticationType("chapRequired") +) + +func init() { + t["HostInternetScsiHbaChapAuthenticationType"] = reflect.TypeOf((*HostInternetScsiHbaChapAuthenticationType)(nil)).Elem() +} + +type HostInternetScsiHbaDigestType string + +const ( + HostInternetScsiHbaDigestTypeDigestProhibited = HostInternetScsiHbaDigestType("digestProhibited") + HostInternetScsiHbaDigestTypeDigestDiscouraged = HostInternetScsiHbaDigestType("digestDiscouraged") + HostInternetScsiHbaDigestTypeDigestPreferred = HostInternetScsiHbaDigestType("digestPreferred") + HostInternetScsiHbaDigestTypeDigestRequired = HostInternetScsiHbaDigestType("digestRequired") +) + +func init() { + t["HostInternetScsiHbaDigestType"] = reflect.TypeOf((*HostInternetScsiHbaDigestType)(nil)).Elem() +} + +type HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType string + +const ( + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeDHCP = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("DHCP") + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeAutoConfigured = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("AutoConfigured") + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeStatic = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("Static") + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeOther = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("Other") +) + +func init() { + t["HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType)(nil)).Elem() +} + +type HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation string + +const ( + HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationAdd = HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation("add") + HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationRemove = HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation("remove") +) + +func init() { + t["HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation)(nil)).Elem() +} + +type HostInternetScsiHbaNetworkBindingSupportType string + +const ( + HostInternetScsiHbaNetworkBindingSupportTypeNotsupported = HostInternetScsiHbaNetworkBindingSupportType("notsupported") + HostInternetScsiHbaNetworkBindingSupportTypeOptional = HostInternetScsiHbaNetworkBindingSupportType("optional") + HostInternetScsiHbaNetworkBindingSupportTypeRequired = HostInternetScsiHbaNetworkBindingSupportType("required") +) + +func init() { + t["HostInternetScsiHbaNetworkBindingSupportType"] = reflect.TypeOf((*HostInternetScsiHbaNetworkBindingSupportType)(nil)).Elem() +} + +type HostInternetScsiHbaStaticTargetTargetDiscoveryMethod string + +const ( + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodStaticMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("staticMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodSendTargetMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("sendTargetMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodSlpMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("slpMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodIsnsMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("isnsMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodUnknownMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("unknownMethod") +) + +func init() { + t["HostInternetScsiHbaStaticTargetTargetDiscoveryMethod"] = reflect.TypeOf((*HostInternetScsiHbaStaticTargetTargetDiscoveryMethod)(nil)).Elem() +} + +type HostIpConfigIpV6AddressConfigType string + +const ( + HostIpConfigIpV6AddressConfigTypeOther = HostIpConfigIpV6AddressConfigType("other") + HostIpConfigIpV6AddressConfigTypeManual = HostIpConfigIpV6AddressConfigType("manual") + HostIpConfigIpV6AddressConfigTypeDhcp = HostIpConfigIpV6AddressConfigType("dhcp") + HostIpConfigIpV6AddressConfigTypeLinklayer = HostIpConfigIpV6AddressConfigType("linklayer") + HostIpConfigIpV6AddressConfigTypeRandom = HostIpConfigIpV6AddressConfigType("random") +) + +func init() { + t["HostIpConfigIpV6AddressConfigType"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfigType)(nil)).Elem() +} + +type HostIpConfigIpV6AddressStatus string + +const ( + HostIpConfigIpV6AddressStatusPreferred = HostIpConfigIpV6AddressStatus("preferred") + HostIpConfigIpV6AddressStatusDeprecated = HostIpConfigIpV6AddressStatus("deprecated") + HostIpConfigIpV6AddressStatusInvalid = HostIpConfigIpV6AddressStatus("invalid") + HostIpConfigIpV6AddressStatusInaccessible = HostIpConfigIpV6AddressStatus("inaccessible") + HostIpConfigIpV6AddressStatusUnknown = HostIpConfigIpV6AddressStatus("unknown") + HostIpConfigIpV6AddressStatusTentative = HostIpConfigIpV6AddressStatus("tentative") + HostIpConfigIpV6AddressStatusDuplicate = HostIpConfigIpV6AddressStatus("duplicate") +) + +func init() { + t["HostIpConfigIpV6AddressStatus"] = reflect.TypeOf((*HostIpConfigIpV6AddressStatus)(nil)).Elem() +} + +type HostLicensableResourceKey string + +const ( + HostLicensableResourceKeyNumCpuPackages = HostLicensableResourceKey("numCpuPackages") + HostLicensableResourceKeyNumCpuCores = HostLicensableResourceKey("numCpuCores") + HostLicensableResourceKeyMemorySize = HostLicensableResourceKey("memorySize") + HostLicensableResourceKeyMemoryForVms = HostLicensableResourceKey("memoryForVms") + HostLicensableResourceKeyNumVmsStarted = HostLicensableResourceKey("numVmsStarted") + HostLicensableResourceKeyNumVmsStarting = HostLicensableResourceKey("numVmsStarting") +) + +func init() { + t["HostLicensableResourceKey"] = reflect.TypeOf((*HostLicensableResourceKey)(nil)).Elem() +} + +type HostLockdownMode string + +const ( + HostLockdownModeLockdownDisabled = HostLockdownMode("lockdownDisabled") + HostLockdownModeLockdownNormal = HostLockdownMode("lockdownNormal") + HostLockdownModeLockdownStrict = HostLockdownMode("lockdownStrict") +) + +func init() { + t["HostLockdownMode"] = reflect.TypeOf((*HostLockdownMode)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileType string + +const ( + HostLowLevelProvisioningManagerFileTypeFile = HostLowLevelProvisioningManagerFileType("File") + HostLowLevelProvisioningManagerFileTypeVirtualDisk = HostLowLevelProvisioningManagerFileType("VirtualDisk") + HostLowLevelProvisioningManagerFileTypeDirectory = HostLowLevelProvisioningManagerFileType("Directory") +) + +func init() { + t["HostLowLevelProvisioningManagerFileType"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileType)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerReloadTarget string + +const ( + HostLowLevelProvisioningManagerReloadTargetCurrentConfig = HostLowLevelProvisioningManagerReloadTarget("currentConfig") + HostLowLevelProvisioningManagerReloadTargetSnapshotConfig = HostLowLevelProvisioningManagerReloadTarget("snapshotConfig") +) + +func init() { + t["HostLowLevelProvisioningManagerReloadTarget"] = reflect.TypeOf((*HostLowLevelProvisioningManagerReloadTarget)(nil)).Elem() +} + +type HostMountInfoInaccessibleReason string + +const ( + HostMountInfoInaccessibleReasonAllPathsDown_Start = HostMountInfoInaccessibleReason("AllPathsDown_Start") + HostMountInfoInaccessibleReasonAllPathsDown_Timeout = HostMountInfoInaccessibleReason("AllPathsDown_Timeout") + HostMountInfoInaccessibleReasonPermanentDeviceLoss = HostMountInfoInaccessibleReason("PermanentDeviceLoss") +) + +func init() { + t["HostMountInfoInaccessibleReason"] = reflect.TypeOf((*HostMountInfoInaccessibleReason)(nil)).Elem() +} + +type HostMountMode string + +const ( + HostMountModeReadWrite = HostMountMode("readWrite") + HostMountModeReadOnly = HostMountMode("readOnly") +) + +func init() { + t["HostMountMode"] = reflect.TypeOf((*HostMountMode)(nil)).Elem() +} + +type HostNasVolumeSecurityType string + +const ( + HostNasVolumeSecurityTypeAUTH_SYS = HostNasVolumeSecurityType("AUTH_SYS") + HostNasVolumeSecurityTypeSEC_KRB5 = HostNasVolumeSecurityType("SEC_KRB5") +) + +func init() { + t["HostNasVolumeSecurityType"] = reflect.TypeOf((*HostNasVolumeSecurityType)(nil)).Elem() +} + +type HostNetStackInstanceCongestionControlAlgorithmType string + +const ( + HostNetStackInstanceCongestionControlAlgorithmTypeNewreno = HostNetStackInstanceCongestionControlAlgorithmType("newreno") + HostNetStackInstanceCongestionControlAlgorithmTypeCubic = HostNetStackInstanceCongestionControlAlgorithmType("cubic") +) + +func init() { + t["HostNetStackInstanceCongestionControlAlgorithmType"] = reflect.TypeOf((*HostNetStackInstanceCongestionControlAlgorithmType)(nil)).Elem() +} + +type HostNetStackInstanceSystemStackKey string + +const ( + HostNetStackInstanceSystemStackKeyDefaultTcpipStack = HostNetStackInstanceSystemStackKey("defaultTcpipStack") + HostNetStackInstanceSystemStackKeyVmotion = HostNetStackInstanceSystemStackKey("vmotion") + HostNetStackInstanceSystemStackKeyVSphereProvisioning = HostNetStackInstanceSystemStackKey("vSphereProvisioning") +) + +func init() { + t["HostNetStackInstanceSystemStackKey"] = reflect.TypeOf((*HostNetStackInstanceSystemStackKey)(nil)).Elem() +} + +type HostNumericSensorHealthState string + +const ( + HostNumericSensorHealthStateUnknown = HostNumericSensorHealthState("unknown") + HostNumericSensorHealthStateGreen = HostNumericSensorHealthState("green") + HostNumericSensorHealthStateYellow = HostNumericSensorHealthState("yellow") + HostNumericSensorHealthStateRed = HostNumericSensorHealthState("red") +) + +func init() { + t["HostNumericSensorHealthState"] = reflect.TypeOf((*HostNumericSensorHealthState)(nil)).Elem() +} + +type HostNumericSensorType string + +const ( + HostNumericSensorTypeFan = HostNumericSensorType("fan") + HostNumericSensorTypePower = HostNumericSensorType("power") + HostNumericSensorTypeTemperature = HostNumericSensorType("temperature") + HostNumericSensorTypeVoltage = HostNumericSensorType("voltage") + HostNumericSensorTypeOther = HostNumericSensorType("other") +) + +func init() { + t["HostNumericSensorType"] = reflect.TypeOf((*HostNumericSensorType)(nil)).Elem() +} + +type HostOpaqueSwitchOpaqueSwitchState string + +const ( + HostOpaqueSwitchOpaqueSwitchStateUp = HostOpaqueSwitchOpaqueSwitchState("up") + HostOpaqueSwitchOpaqueSwitchStateWarning = HostOpaqueSwitchOpaqueSwitchState("warning") + HostOpaqueSwitchOpaqueSwitchStateDown = HostOpaqueSwitchOpaqueSwitchState("down") +) + +func init() { + t["HostOpaqueSwitchOpaqueSwitchState"] = reflect.TypeOf((*HostOpaqueSwitchOpaqueSwitchState)(nil)).Elem() +} + +type HostPatchManagerInstallState string + +const ( + HostPatchManagerInstallStateHostRestarted = HostPatchManagerInstallState("hostRestarted") + HostPatchManagerInstallStateImageActive = HostPatchManagerInstallState("imageActive") +) + +func init() { + t["HostPatchManagerInstallState"] = reflect.TypeOf((*HostPatchManagerInstallState)(nil)).Elem() +} + +type HostPatchManagerIntegrityStatus string + +const ( + HostPatchManagerIntegrityStatusValidated = HostPatchManagerIntegrityStatus("validated") + HostPatchManagerIntegrityStatusKeyNotFound = HostPatchManagerIntegrityStatus("keyNotFound") + HostPatchManagerIntegrityStatusKeyRevoked = HostPatchManagerIntegrityStatus("keyRevoked") + HostPatchManagerIntegrityStatusKeyExpired = HostPatchManagerIntegrityStatus("keyExpired") + HostPatchManagerIntegrityStatusDigestMismatch = HostPatchManagerIntegrityStatus("digestMismatch") + HostPatchManagerIntegrityStatusNotEnoughSignatures = HostPatchManagerIntegrityStatus("notEnoughSignatures") + HostPatchManagerIntegrityStatusValidationError = HostPatchManagerIntegrityStatus("validationError") +) + +func init() { + t["HostPatchManagerIntegrityStatus"] = reflect.TypeOf((*HostPatchManagerIntegrityStatus)(nil)).Elem() +} + +type HostPatchManagerReason string + +const ( + HostPatchManagerReasonObsoleted = HostPatchManagerReason("obsoleted") + HostPatchManagerReasonMissingPatch = HostPatchManagerReason("missingPatch") + HostPatchManagerReasonMissingLib = HostPatchManagerReason("missingLib") + HostPatchManagerReasonHasDependentPatch = HostPatchManagerReason("hasDependentPatch") + HostPatchManagerReasonConflictPatch = HostPatchManagerReason("conflictPatch") + HostPatchManagerReasonConflictLib = HostPatchManagerReason("conflictLib") +) + +func init() { + t["HostPatchManagerReason"] = reflect.TypeOf((*HostPatchManagerReason)(nil)).Elem() +} + +type HostPowerOperationType string + +const ( + HostPowerOperationTypePowerOn = HostPowerOperationType("powerOn") + HostPowerOperationTypePowerOff = HostPowerOperationType("powerOff") +) + +func init() { + t["HostPowerOperationType"] = reflect.TypeOf((*HostPowerOperationType)(nil)).Elem() +} + +type HostProfileManagerAnswerFileStatus string + +const ( + HostProfileManagerAnswerFileStatusValid = HostProfileManagerAnswerFileStatus("valid") + HostProfileManagerAnswerFileStatusInvalid = HostProfileManagerAnswerFileStatus("invalid") + HostProfileManagerAnswerFileStatusUnknown = HostProfileManagerAnswerFileStatus("unknown") +) + +func init() { + t["HostProfileManagerAnswerFileStatus"] = reflect.TypeOf((*HostProfileManagerAnswerFileStatus)(nil)).Elem() +} + +type HostProfileManagerTaskListRequirement string + +const ( + HostProfileManagerTaskListRequirementMaintenanceModeRequired = HostProfileManagerTaskListRequirement("maintenanceModeRequired") + HostProfileManagerTaskListRequirementRebootRequired = HostProfileManagerTaskListRequirement("rebootRequired") +) + +func init() { + t["HostProfileManagerTaskListRequirement"] = reflect.TypeOf((*HostProfileManagerTaskListRequirement)(nil)).Elem() +} + +type HostProtocolEndpointPEType string + +const ( + HostProtocolEndpointPETypeBlock = HostProtocolEndpointPEType("block") + HostProtocolEndpointPETypeNas = HostProtocolEndpointPEType("nas") +) + +func init() { + t["HostProtocolEndpointPEType"] = reflect.TypeOf((*HostProtocolEndpointPEType)(nil)).Elem() +} + +type HostReplayUnsupportedReason string + +const ( + HostReplayUnsupportedReasonIncompatibleProduct = HostReplayUnsupportedReason("incompatibleProduct") + HostReplayUnsupportedReasonIncompatibleCpu = HostReplayUnsupportedReason("incompatibleCpu") + HostReplayUnsupportedReasonHvDisabled = HostReplayUnsupportedReason("hvDisabled") + HostReplayUnsupportedReasonCpuidLimitSet = HostReplayUnsupportedReason("cpuidLimitSet") + HostReplayUnsupportedReasonOldBIOS = HostReplayUnsupportedReason("oldBIOS") + HostReplayUnsupportedReasonUnknown = HostReplayUnsupportedReason("unknown") +) + +func init() { + t["HostReplayUnsupportedReason"] = reflect.TypeOf((*HostReplayUnsupportedReason)(nil)).Elem() +} + +type HostRuntimeInfoNetStackInstanceRuntimeInfoState string + +const ( + HostRuntimeInfoNetStackInstanceRuntimeInfoStateInactive = HostRuntimeInfoNetStackInstanceRuntimeInfoState("inactive") + HostRuntimeInfoNetStackInstanceRuntimeInfoStateActive = HostRuntimeInfoNetStackInstanceRuntimeInfoState("active") + HostRuntimeInfoNetStackInstanceRuntimeInfoStateDeactivating = HostRuntimeInfoNetStackInstanceRuntimeInfoState("deactivating") + HostRuntimeInfoNetStackInstanceRuntimeInfoStateActivating = HostRuntimeInfoNetStackInstanceRuntimeInfoState("activating") +) + +func init() { + t["HostRuntimeInfoNetStackInstanceRuntimeInfoState"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfoState)(nil)).Elem() +} + +type HostServicePolicy string + +const ( + HostServicePolicyOn = HostServicePolicy("on") + HostServicePolicyAutomatic = HostServicePolicy("automatic") + HostServicePolicyOff = HostServicePolicy("off") +) + +func init() { + t["HostServicePolicy"] = reflect.TypeOf((*HostServicePolicy)(nil)).Elem() +} + +type HostSnmpAgentCapability string + +const ( + HostSnmpAgentCapabilityCOMPLETE = HostSnmpAgentCapability("COMPLETE") + HostSnmpAgentCapabilityDIAGNOSTICS = HostSnmpAgentCapability("DIAGNOSTICS") + HostSnmpAgentCapabilityCONFIGURATION = HostSnmpAgentCapability("CONFIGURATION") +) + +func init() { + t["HostSnmpAgentCapability"] = reflect.TypeOf((*HostSnmpAgentCapability)(nil)).Elem() +} + +type HostStandbyMode string + +const ( + HostStandbyModeEntering = HostStandbyMode("entering") + HostStandbyModeExiting = HostStandbyMode("exiting") + HostStandbyModeIn = HostStandbyMode("in") + HostStandbyModeNone = HostStandbyMode("none") +) + +func init() { + t["HostStandbyMode"] = reflect.TypeOf((*HostStandbyMode)(nil)).Elem() +} + +type HostSystemConnectionState string + +const ( + HostSystemConnectionStateConnected = HostSystemConnectionState("connected") + HostSystemConnectionStateNotResponding = HostSystemConnectionState("notResponding") + HostSystemConnectionStateDisconnected = HostSystemConnectionState("disconnected") +) + +func init() { + t["HostSystemConnectionState"] = reflect.TypeOf((*HostSystemConnectionState)(nil)).Elem() +} + +type HostSystemIdentificationInfoIdentifier string + +const ( + HostSystemIdentificationInfoIdentifierAssetTag = HostSystemIdentificationInfoIdentifier("AssetTag") + HostSystemIdentificationInfoIdentifierServiceTag = HostSystemIdentificationInfoIdentifier("ServiceTag") + HostSystemIdentificationInfoIdentifierOemSpecificString = HostSystemIdentificationInfoIdentifier("OemSpecificString") +) + +func init() { + t["HostSystemIdentificationInfoIdentifier"] = reflect.TypeOf((*HostSystemIdentificationInfoIdentifier)(nil)).Elem() +} + +type HostSystemPowerState string + +const ( + HostSystemPowerStatePoweredOn = HostSystemPowerState("poweredOn") + HostSystemPowerStatePoweredOff = HostSystemPowerState("poweredOff") + HostSystemPowerStateStandBy = HostSystemPowerState("standBy") + HostSystemPowerStateUnknown = HostSystemPowerState("unknown") +) + +func init() { + t["HostSystemPowerState"] = reflect.TypeOf((*HostSystemPowerState)(nil)).Elem() +} + +type HostUnresolvedVmfsExtentUnresolvedReason string + +const ( + HostUnresolvedVmfsExtentUnresolvedReasonDiskIdMismatch = HostUnresolvedVmfsExtentUnresolvedReason("diskIdMismatch") + HostUnresolvedVmfsExtentUnresolvedReasonUuidConflict = HostUnresolvedVmfsExtentUnresolvedReason("uuidConflict") +) + +func init() { + t["HostUnresolvedVmfsExtentUnresolvedReason"] = reflect.TypeOf((*HostUnresolvedVmfsExtentUnresolvedReason)(nil)).Elem() +} + +type HostUnresolvedVmfsResolutionSpecVmfsUuidResolution string + +const ( + HostUnresolvedVmfsResolutionSpecVmfsUuidResolutionResignature = HostUnresolvedVmfsResolutionSpecVmfsUuidResolution("resignature") + HostUnresolvedVmfsResolutionSpecVmfsUuidResolutionForceMount = HostUnresolvedVmfsResolutionSpecVmfsUuidResolution("forceMount") +) + +func init() { + t["HostUnresolvedVmfsResolutionSpecVmfsUuidResolution"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpecVmfsUuidResolution)(nil)).Elem() +} + +type HostVirtualNicManagerNicType string + +const ( + HostVirtualNicManagerNicTypeVmotion = HostVirtualNicManagerNicType("vmotion") + HostVirtualNicManagerNicTypeFaultToleranceLogging = HostVirtualNicManagerNicType("faultToleranceLogging") + HostVirtualNicManagerNicTypeVSphereReplication = HostVirtualNicManagerNicType("vSphereReplication") + HostVirtualNicManagerNicTypeVSphereReplicationNFC = HostVirtualNicManagerNicType("vSphereReplicationNFC") + HostVirtualNicManagerNicTypeManagement = HostVirtualNicManagerNicType("management") + HostVirtualNicManagerNicTypeVsan = HostVirtualNicManagerNicType("vsan") + HostVirtualNicManagerNicTypeVSphereProvisioning = HostVirtualNicManagerNicType("vSphereProvisioning") +) + +func init() { + t["HostVirtualNicManagerNicType"] = reflect.TypeOf((*HostVirtualNicManagerNicType)(nil)).Elem() +} + +type HostVmciAccessManagerMode string + +const ( + HostVmciAccessManagerModeGrant = HostVmciAccessManagerMode("grant") + HostVmciAccessManagerModeReplace = HostVmciAccessManagerMode("replace") + HostVmciAccessManagerModeRevoke = HostVmciAccessManagerMode("revoke") +) + +func init() { + t["HostVmciAccessManagerMode"] = reflect.TypeOf((*HostVmciAccessManagerMode)(nil)).Elem() +} + +type HttpNfcLeaseState string + +const ( + HttpNfcLeaseStateInitializing = HttpNfcLeaseState("initializing") + HttpNfcLeaseStateReady = HttpNfcLeaseState("ready") + HttpNfcLeaseStateDone = HttpNfcLeaseState("done") + HttpNfcLeaseStateError = HttpNfcLeaseState("error") +) + +func init() { + t["HttpNfcLeaseState"] = reflect.TypeOf((*HttpNfcLeaseState)(nil)).Elem() +} + +type IncompatibleHostForVmReplicationIncompatibleReason string + +const ( + IncompatibleHostForVmReplicationIncompatibleReasonRpo = IncompatibleHostForVmReplicationIncompatibleReason("rpo") + IncompatibleHostForVmReplicationIncompatibleReasonNetCompression = IncompatibleHostForVmReplicationIncompatibleReason("netCompression") +) + +func init() { + t["IncompatibleHostForVmReplicationIncompatibleReason"] = reflect.TypeOf((*IncompatibleHostForVmReplicationIncompatibleReason)(nil)).Elem() +} + +type InternetScsiSnsDiscoveryMethod string + +const ( + InternetScsiSnsDiscoveryMethodIsnsStatic = InternetScsiSnsDiscoveryMethod("isnsStatic") + InternetScsiSnsDiscoveryMethodIsnsDhcp = InternetScsiSnsDiscoveryMethod("isnsDhcp") + InternetScsiSnsDiscoveryMethodIsnsSlp = InternetScsiSnsDiscoveryMethod("isnsSlp") +) + +func init() { + t["InternetScsiSnsDiscoveryMethod"] = reflect.TypeOf((*InternetScsiSnsDiscoveryMethod)(nil)).Elem() +} + +type InvalidDasConfigArgumentEntryForInvalidArgument string + +const ( + InvalidDasConfigArgumentEntryForInvalidArgumentAdmissionControl = InvalidDasConfigArgumentEntryForInvalidArgument("admissionControl") + InvalidDasConfigArgumentEntryForInvalidArgumentUserHeartbeatDs = InvalidDasConfigArgumentEntryForInvalidArgument("userHeartbeatDs") + InvalidDasConfigArgumentEntryForInvalidArgumentVmConfig = InvalidDasConfigArgumentEntryForInvalidArgument("vmConfig") +) + +func init() { + t["InvalidDasConfigArgumentEntryForInvalidArgument"] = reflect.TypeOf((*InvalidDasConfigArgumentEntryForInvalidArgument)(nil)).Elem() +} + +type InvalidProfileReferenceHostReason string + +const ( + InvalidProfileReferenceHostReasonIncompatibleVersion = InvalidProfileReferenceHostReason("incompatibleVersion") + InvalidProfileReferenceHostReasonMissingReferenceHost = InvalidProfileReferenceHostReason("missingReferenceHost") +) + +func init() { + t["InvalidProfileReferenceHostReason"] = reflect.TypeOf((*InvalidProfileReferenceHostReason)(nil)).Elem() +} + +type IoFilterOperation string + +const ( + IoFilterOperationInstall = IoFilterOperation("install") + IoFilterOperationUninstall = IoFilterOperation("uninstall") + IoFilterOperationUpgrade = IoFilterOperation("upgrade") +) + +func init() { + t["IoFilterOperation"] = reflect.TypeOf((*IoFilterOperation)(nil)).Elem() +} + +type IscsiPortInfoPathStatus string + +const ( + IscsiPortInfoPathStatusNotUsed = IscsiPortInfoPathStatus("notUsed") + IscsiPortInfoPathStatusActive = IscsiPortInfoPathStatus("active") + IscsiPortInfoPathStatusStandBy = IscsiPortInfoPathStatus("standBy") + IscsiPortInfoPathStatusLastActive = IscsiPortInfoPathStatus("lastActive") +) + +func init() { + t["IscsiPortInfoPathStatus"] = reflect.TypeOf((*IscsiPortInfoPathStatus)(nil)).Elem() +} + +type LatencySensitivitySensitivityLevel string + +const ( + LatencySensitivitySensitivityLevelLow = LatencySensitivitySensitivityLevel("low") + LatencySensitivitySensitivityLevelNormal = LatencySensitivitySensitivityLevel("normal") + LatencySensitivitySensitivityLevelMedium = LatencySensitivitySensitivityLevel("medium") + LatencySensitivitySensitivityLevelHigh = LatencySensitivitySensitivityLevel("high") + LatencySensitivitySensitivityLevelCustom = LatencySensitivitySensitivityLevel("custom") +) + +func init() { + t["LatencySensitivitySensitivityLevel"] = reflect.TypeOf((*LatencySensitivitySensitivityLevel)(nil)).Elem() +} + +type LicenseAssignmentFailedReason string + +const ( + LicenseAssignmentFailedReasonKeyEntityMismatch = LicenseAssignmentFailedReason("keyEntityMismatch") + LicenseAssignmentFailedReasonDowngradeDisallowed = LicenseAssignmentFailedReason("downgradeDisallowed") + LicenseAssignmentFailedReasonInventoryNotManageableByVirtualCenter = LicenseAssignmentFailedReason("inventoryNotManageableByVirtualCenter") + LicenseAssignmentFailedReasonHostsUnmanageableByVirtualCenterWithoutLicenseServer = LicenseAssignmentFailedReason("hostsUnmanageableByVirtualCenterWithoutLicenseServer") +) + +func init() { + t["LicenseAssignmentFailedReason"] = reflect.TypeOf((*LicenseAssignmentFailedReason)(nil)).Elem() +} + +type LicenseFeatureInfoSourceRestriction string + +const ( + LicenseFeatureInfoSourceRestrictionUnrestricted = LicenseFeatureInfoSourceRestriction("unrestricted") + LicenseFeatureInfoSourceRestrictionServed = LicenseFeatureInfoSourceRestriction("served") + LicenseFeatureInfoSourceRestrictionFile = LicenseFeatureInfoSourceRestriction("file") +) + +func init() { + t["LicenseFeatureInfoSourceRestriction"] = reflect.TypeOf((*LicenseFeatureInfoSourceRestriction)(nil)).Elem() +} + +type LicenseFeatureInfoState string + +const ( + LicenseFeatureInfoStateEnabled = LicenseFeatureInfoState("enabled") + LicenseFeatureInfoStateDisabled = LicenseFeatureInfoState("disabled") + LicenseFeatureInfoStateOptional = LicenseFeatureInfoState("optional") +) + +func init() { + t["LicenseFeatureInfoState"] = reflect.TypeOf((*LicenseFeatureInfoState)(nil)).Elem() +} + +type LicenseFeatureInfoUnit string + +const ( + LicenseFeatureInfoUnitHost = LicenseFeatureInfoUnit("host") + LicenseFeatureInfoUnitCpuCore = LicenseFeatureInfoUnit("cpuCore") + LicenseFeatureInfoUnitCpuPackage = LicenseFeatureInfoUnit("cpuPackage") + LicenseFeatureInfoUnitServer = LicenseFeatureInfoUnit("server") + LicenseFeatureInfoUnitVm = LicenseFeatureInfoUnit("vm") +) + +func init() { + t["LicenseFeatureInfoUnit"] = reflect.TypeOf((*LicenseFeatureInfoUnit)(nil)).Elem() +} + +type LicenseManagerLicenseKey string + +const ( + LicenseManagerLicenseKeyEsxFull = LicenseManagerLicenseKey("esxFull") + LicenseManagerLicenseKeyEsxVmtn = LicenseManagerLicenseKey("esxVmtn") + LicenseManagerLicenseKeyEsxExpress = LicenseManagerLicenseKey("esxExpress") + LicenseManagerLicenseKeySan = LicenseManagerLicenseKey("san") + LicenseManagerLicenseKeyIscsi = LicenseManagerLicenseKey("iscsi") + LicenseManagerLicenseKeyNas = LicenseManagerLicenseKey("nas") + LicenseManagerLicenseKeyVsmp = LicenseManagerLicenseKey("vsmp") + LicenseManagerLicenseKeyBackup = LicenseManagerLicenseKey("backup") + LicenseManagerLicenseKeyVc = LicenseManagerLicenseKey("vc") + LicenseManagerLicenseKeyVcExpress = LicenseManagerLicenseKey("vcExpress") + LicenseManagerLicenseKeyEsxHost = LicenseManagerLicenseKey("esxHost") + LicenseManagerLicenseKeyGsxHost = LicenseManagerLicenseKey("gsxHost") + LicenseManagerLicenseKeyServerHost = LicenseManagerLicenseKey("serverHost") + LicenseManagerLicenseKeyDrsPower = LicenseManagerLicenseKey("drsPower") + LicenseManagerLicenseKeyVmotion = LicenseManagerLicenseKey("vmotion") + LicenseManagerLicenseKeyDrs = LicenseManagerLicenseKey("drs") + LicenseManagerLicenseKeyDas = LicenseManagerLicenseKey("das") +) + +func init() { + t["LicenseManagerLicenseKey"] = reflect.TypeOf((*LicenseManagerLicenseKey)(nil)).Elem() +} + +type LicenseManagerState string + +const ( + LicenseManagerStateInitializing = LicenseManagerState("initializing") + LicenseManagerStateNormal = LicenseManagerState("normal") + LicenseManagerStateMarginal = LicenseManagerState("marginal") + LicenseManagerStateFault = LicenseManagerState("fault") +) + +func init() { + t["LicenseManagerState"] = reflect.TypeOf((*LicenseManagerState)(nil)).Elem() +} + +type LicenseReservationInfoState string + +const ( + LicenseReservationInfoStateNotUsed = LicenseReservationInfoState("notUsed") + LicenseReservationInfoStateNoLicense = LicenseReservationInfoState("noLicense") + LicenseReservationInfoStateUnlicensedUse = LicenseReservationInfoState("unlicensedUse") + LicenseReservationInfoStateLicensed = LicenseReservationInfoState("licensed") +) + +func init() { + t["LicenseReservationInfoState"] = reflect.TypeOf((*LicenseReservationInfoState)(nil)).Elem() +} + +type LinkDiscoveryProtocolConfigOperationType string + +const ( + LinkDiscoveryProtocolConfigOperationTypeNone = LinkDiscoveryProtocolConfigOperationType("none") + LinkDiscoveryProtocolConfigOperationTypeListen = LinkDiscoveryProtocolConfigOperationType("listen") + LinkDiscoveryProtocolConfigOperationTypeAdvertise = LinkDiscoveryProtocolConfigOperationType("advertise") + LinkDiscoveryProtocolConfigOperationTypeBoth = LinkDiscoveryProtocolConfigOperationType("both") +) + +func init() { + t["LinkDiscoveryProtocolConfigOperationType"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigOperationType)(nil)).Elem() +} + +type LinkDiscoveryProtocolConfigProtocolType string + +const ( + LinkDiscoveryProtocolConfigProtocolTypeCdp = LinkDiscoveryProtocolConfigProtocolType("cdp") + LinkDiscoveryProtocolConfigProtocolTypeLldp = LinkDiscoveryProtocolConfigProtocolType("lldp") +) + +func init() { + t["LinkDiscoveryProtocolConfigProtocolType"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigProtocolType)(nil)).Elem() +} + +type ManagedEntityStatus string + +const ( + ManagedEntityStatusGray = ManagedEntityStatus("gray") + ManagedEntityStatusGreen = ManagedEntityStatus("green") + ManagedEntityStatusYellow = ManagedEntityStatus("yellow") + ManagedEntityStatusRed = ManagedEntityStatus("red") +) + +func init() { + t["ManagedEntityStatus"] = reflect.TypeOf((*ManagedEntityStatus)(nil)).Elem() +} + +type MetricAlarmOperator string + +const ( + MetricAlarmOperatorIsAbove = MetricAlarmOperator("isAbove") + MetricAlarmOperatorIsBelow = MetricAlarmOperator("isBelow") +) + +func init() { + t["MetricAlarmOperator"] = reflect.TypeOf((*MetricAlarmOperator)(nil)).Elem() +} + +type MultipathState string + +const ( + MultipathStateStandby = MultipathState("standby") + MultipathStateActive = MultipathState("active") + MultipathStateDisabled = MultipathState("disabled") + MultipathStateDead = MultipathState("dead") + MultipathStateUnknown = MultipathState("unknown") +) + +func init() { + t["MultipathState"] = reflect.TypeOf((*MultipathState)(nil)).Elem() +} + +type NetBIOSConfigInfoMode string + +const ( + NetBIOSConfigInfoModeUnknown = NetBIOSConfigInfoMode("unknown") + NetBIOSConfigInfoModeEnabled = NetBIOSConfigInfoMode("enabled") + NetBIOSConfigInfoModeDisabled = NetBIOSConfigInfoMode("disabled") + NetBIOSConfigInfoModeEnabledViaDHCP = NetBIOSConfigInfoMode("enabledViaDHCP") +) + +func init() { + t["NetBIOSConfigInfoMode"] = reflect.TypeOf((*NetBIOSConfigInfoMode)(nil)).Elem() +} + +type NetIpConfigInfoIpAddressOrigin string + +const ( + NetIpConfigInfoIpAddressOriginOther = NetIpConfigInfoIpAddressOrigin("other") + NetIpConfigInfoIpAddressOriginManual = NetIpConfigInfoIpAddressOrigin("manual") + NetIpConfigInfoIpAddressOriginDhcp = NetIpConfigInfoIpAddressOrigin("dhcp") + NetIpConfigInfoIpAddressOriginLinklayer = NetIpConfigInfoIpAddressOrigin("linklayer") + NetIpConfigInfoIpAddressOriginRandom = NetIpConfigInfoIpAddressOrigin("random") +) + +func init() { + t["NetIpConfigInfoIpAddressOrigin"] = reflect.TypeOf((*NetIpConfigInfoIpAddressOrigin)(nil)).Elem() +} + +type NetIpConfigInfoIpAddressStatus string + +const ( + NetIpConfigInfoIpAddressStatusPreferred = NetIpConfigInfoIpAddressStatus("preferred") + NetIpConfigInfoIpAddressStatusDeprecated = NetIpConfigInfoIpAddressStatus("deprecated") + NetIpConfigInfoIpAddressStatusInvalid = NetIpConfigInfoIpAddressStatus("invalid") + NetIpConfigInfoIpAddressStatusInaccessible = NetIpConfigInfoIpAddressStatus("inaccessible") + NetIpConfigInfoIpAddressStatusUnknown = NetIpConfigInfoIpAddressStatus("unknown") + NetIpConfigInfoIpAddressStatusTentative = NetIpConfigInfoIpAddressStatus("tentative") + NetIpConfigInfoIpAddressStatusDuplicate = NetIpConfigInfoIpAddressStatus("duplicate") +) + +func init() { + t["NetIpConfigInfoIpAddressStatus"] = reflect.TypeOf((*NetIpConfigInfoIpAddressStatus)(nil)).Elem() +} + +type NetIpStackInfoEntryType string + +const ( + NetIpStackInfoEntryTypeOther = NetIpStackInfoEntryType("other") + NetIpStackInfoEntryTypeInvalid = NetIpStackInfoEntryType("invalid") + NetIpStackInfoEntryTypeDynamic = NetIpStackInfoEntryType("dynamic") + NetIpStackInfoEntryTypeManual = NetIpStackInfoEntryType("manual") +) + +func init() { + t["NetIpStackInfoEntryType"] = reflect.TypeOf((*NetIpStackInfoEntryType)(nil)).Elem() +} + +type NetIpStackInfoPreference string + +const ( + NetIpStackInfoPreferenceReserved = NetIpStackInfoPreference("reserved") + NetIpStackInfoPreferenceLow = NetIpStackInfoPreference("low") + NetIpStackInfoPreferenceMedium = NetIpStackInfoPreference("medium") + NetIpStackInfoPreferenceHigh = NetIpStackInfoPreference("high") +) + +func init() { + t["NetIpStackInfoPreference"] = reflect.TypeOf((*NetIpStackInfoPreference)(nil)).Elem() +} + +type NotSupportedDeviceForFTDeviceType string + +const ( + NotSupportedDeviceForFTDeviceTypeVirtualVmxnet3 = NotSupportedDeviceForFTDeviceType("virtualVmxnet3") + NotSupportedDeviceForFTDeviceTypeParaVirtualSCSIController = NotSupportedDeviceForFTDeviceType("paraVirtualSCSIController") +) + +func init() { + t["NotSupportedDeviceForFTDeviceType"] = reflect.TypeOf((*NotSupportedDeviceForFTDeviceType)(nil)).Elem() +} + +type NumVirtualCpusIncompatibleReason string + +const ( + NumVirtualCpusIncompatibleReasonRecordReplay = NumVirtualCpusIncompatibleReason("recordReplay") + NumVirtualCpusIncompatibleReasonFaultTolerance = NumVirtualCpusIncompatibleReason("faultTolerance") +) + +func init() { + t["NumVirtualCpusIncompatibleReason"] = reflect.TypeOf((*NumVirtualCpusIncompatibleReason)(nil)).Elem() +} + +type ObjectUpdateKind string + +const ( + ObjectUpdateKindModify = ObjectUpdateKind("modify") + ObjectUpdateKindEnter = ObjectUpdateKind("enter") + ObjectUpdateKindLeave = ObjectUpdateKind("leave") +) + +func init() { + t["ObjectUpdateKind"] = reflect.TypeOf((*ObjectUpdateKind)(nil)).Elem() +} + +type OvfConsumerOstNodeType string + +const ( + OvfConsumerOstNodeTypeEnvelope = OvfConsumerOstNodeType("envelope") + OvfConsumerOstNodeTypeVirtualSystem = OvfConsumerOstNodeType("virtualSystem") + OvfConsumerOstNodeTypeVirtualSystemCollection = OvfConsumerOstNodeType("virtualSystemCollection") +) + +func init() { + t["OvfConsumerOstNodeType"] = reflect.TypeOf((*OvfConsumerOstNodeType)(nil)).Elem() +} + +type OvfCreateImportSpecParamsDiskProvisioningType string + +const ( + OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicSparse = OvfCreateImportSpecParamsDiskProvisioningType("monolithicSparse") + OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicFlat = OvfCreateImportSpecParamsDiskProvisioningType("monolithicFlat") + OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentSparse = OvfCreateImportSpecParamsDiskProvisioningType("twoGbMaxExtentSparse") + OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentFlat = OvfCreateImportSpecParamsDiskProvisioningType("twoGbMaxExtentFlat") + OvfCreateImportSpecParamsDiskProvisioningTypeThin = OvfCreateImportSpecParamsDiskProvisioningType("thin") + OvfCreateImportSpecParamsDiskProvisioningTypeThick = OvfCreateImportSpecParamsDiskProvisioningType("thick") + OvfCreateImportSpecParamsDiskProvisioningTypeSeSparse = OvfCreateImportSpecParamsDiskProvisioningType("seSparse") + OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick = OvfCreateImportSpecParamsDiskProvisioningType("eagerZeroedThick") + OvfCreateImportSpecParamsDiskProvisioningTypeSparse = OvfCreateImportSpecParamsDiskProvisioningType("sparse") + OvfCreateImportSpecParamsDiskProvisioningTypeFlat = OvfCreateImportSpecParamsDiskProvisioningType("flat") +) + +func init() { + t["OvfCreateImportSpecParamsDiskProvisioningType"] = reflect.TypeOf((*OvfCreateImportSpecParamsDiskProvisioningType)(nil)).Elem() +} + +type PerfFormat string + +const ( + PerfFormatNormal = PerfFormat("normal") + PerfFormatCsv = PerfFormat("csv") +) + +func init() { + t["PerfFormat"] = reflect.TypeOf((*PerfFormat)(nil)).Elem() +} + +type PerfStatsType string + +const ( + PerfStatsTypeAbsolute = PerfStatsType("absolute") + PerfStatsTypeDelta = PerfStatsType("delta") + PerfStatsTypeRate = PerfStatsType("rate") +) + +func init() { + t["PerfStatsType"] = reflect.TypeOf((*PerfStatsType)(nil)).Elem() +} + +type PerfSummaryType string + +const ( + PerfSummaryTypeAverage = PerfSummaryType("average") + PerfSummaryTypeMaximum = PerfSummaryType("maximum") + PerfSummaryTypeMinimum = PerfSummaryType("minimum") + PerfSummaryTypeLatest = PerfSummaryType("latest") + PerfSummaryTypeSummation = PerfSummaryType("summation") + PerfSummaryTypeNone = PerfSummaryType("none") +) + +func init() { + t["PerfSummaryType"] = reflect.TypeOf((*PerfSummaryType)(nil)).Elem() +} + +type PerformanceManagerUnit string + +const ( + PerformanceManagerUnitPercent = PerformanceManagerUnit("percent") + PerformanceManagerUnitKiloBytes = PerformanceManagerUnit("kiloBytes") + PerformanceManagerUnitMegaBytes = PerformanceManagerUnit("megaBytes") + PerformanceManagerUnitMegaHertz = PerformanceManagerUnit("megaHertz") + PerformanceManagerUnitNumber = PerformanceManagerUnit("number") + PerformanceManagerUnitMicrosecond = PerformanceManagerUnit("microsecond") + PerformanceManagerUnitMillisecond = PerformanceManagerUnit("millisecond") + PerformanceManagerUnitSecond = PerformanceManagerUnit("second") + PerformanceManagerUnitKiloBytesPerSecond = PerformanceManagerUnit("kiloBytesPerSecond") + PerformanceManagerUnitMegaBytesPerSecond = PerformanceManagerUnit("megaBytesPerSecond") + PerformanceManagerUnitWatt = PerformanceManagerUnit("watt") + PerformanceManagerUnitJoule = PerformanceManagerUnit("joule") + PerformanceManagerUnitTeraBytes = PerformanceManagerUnit("teraBytes") +) + +func init() { + t["PerformanceManagerUnit"] = reflect.TypeOf((*PerformanceManagerUnit)(nil)).Elem() +} + +type PhysicalNicResourcePoolSchedulerDisallowedReason string + +const ( + PhysicalNicResourcePoolSchedulerDisallowedReasonUserOptOut = PhysicalNicResourcePoolSchedulerDisallowedReason("userOptOut") + PhysicalNicResourcePoolSchedulerDisallowedReasonHardwareUnsupported = PhysicalNicResourcePoolSchedulerDisallowedReason("hardwareUnsupported") +) + +func init() { + t["PhysicalNicResourcePoolSchedulerDisallowedReason"] = reflect.TypeOf((*PhysicalNicResourcePoolSchedulerDisallowedReason)(nil)).Elem() +} + +type PhysicalNicVmDirectPathGen2SupportedMode string + +const ( + PhysicalNicVmDirectPathGen2SupportedModeUpt = PhysicalNicVmDirectPathGen2SupportedMode("upt") +) + +func init() { + t["PhysicalNicVmDirectPathGen2SupportedMode"] = reflect.TypeOf((*PhysicalNicVmDirectPathGen2SupportedMode)(nil)).Elem() +} + +type PlacementAffinityRuleRuleScope string + +const ( + PlacementAffinityRuleRuleScopeCluster = PlacementAffinityRuleRuleScope("cluster") + PlacementAffinityRuleRuleScopeHost = PlacementAffinityRuleRuleScope("host") + PlacementAffinityRuleRuleScopeStoragePod = PlacementAffinityRuleRuleScope("storagePod") + PlacementAffinityRuleRuleScopeDatastore = PlacementAffinityRuleRuleScope("datastore") +) + +func init() { + t["PlacementAffinityRuleRuleScope"] = reflect.TypeOf((*PlacementAffinityRuleRuleScope)(nil)).Elem() +} + +type PlacementAffinityRuleRuleType string + +const ( + PlacementAffinityRuleRuleTypeAffinity = PlacementAffinityRuleRuleType("affinity") + PlacementAffinityRuleRuleTypeAntiAffinity = PlacementAffinityRuleRuleType("antiAffinity") + PlacementAffinityRuleRuleTypeSoftAffinity = PlacementAffinityRuleRuleType("softAffinity") + PlacementAffinityRuleRuleTypeSoftAntiAffinity = PlacementAffinityRuleRuleType("softAntiAffinity") +) + +func init() { + t["PlacementAffinityRuleRuleType"] = reflect.TypeOf((*PlacementAffinityRuleRuleType)(nil)).Elem() +} + +type PlacementSpecPlacementType string + +const ( + PlacementSpecPlacementTypeCreate = PlacementSpecPlacementType("create") + PlacementSpecPlacementTypeReconfigure = PlacementSpecPlacementType("reconfigure") + PlacementSpecPlacementTypeRelocate = PlacementSpecPlacementType("relocate") + PlacementSpecPlacementTypeClone = PlacementSpecPlacementType("clone") +) + +func init() { + t["PlacementSpecPlacementType"] = reflect.TypeOf((*PlacementSpecPlacementType)(nil)).Elem() +} + +type PortGroupConnecteeType string + +const ( + PortGroupConnecteeTypeVirtualMachine = PortGroupConnecteeType("virtualMachine") + PortGroupConnecteeTypeSystemManagement = PortGroupConnecteeType("systemManagement") + PortGroupConnecteeTypeHost = PortGroupConnecteeType("host") + PortGroupConnecteeTypeUnknown = PortGroupConnecteeType("unknown") +) + +func init() { + t["PortGroupConnecteeType"] = reflect.TypeOf((*PortGroupConnecteeType)(nil)).Elem() +} + +type ProfileExecuteResultStatus string + +const ( + ProfileExecuteResultStatusSuccess = ProfileExecuteResultStatus("success") + ProfileExecuteResultStatusNeedInput = ProfileExecuteResultStatus("needInput") + ProfileExecuteResultStatusError = ProfileExecuteResultStatus("error") +) + +func init() { + t["ProfileExecuteResultStatus"] = reflect.TypeOf((*ProfileExecuteResultStatus)(nil)).Elem() +} + +type ProfileNumericComparator string + +const ( + ProfileNumericComparatorLessThan = ProfileNumericComparator("lessThan") + ProfileNumericComparatorLessThanEqual = ProfileNumericComparator("lessThanEqual") + ProfileNumericComparatorEqual = ProfileNumericComparator("equal") + ProfileNumericComparatorNotEqual = ProfileNumericComparator("notEqual") + ProfileNumericComparatorGreaterThanEqual = ProfileNumericComparator("greaterThanEqual") + ProfileNumericComparatorGreaterThan = ProfileNumericComparator("greaterThan") +) + +func init() { + t["ProfileNumericComparator"] = reflect.TypeOf((*ProfileNumericComparator)(nil)).Elem() +} + +type PropertyChangeOp string + +const ( + PropertyChangeOpAdd = PropertyChangeOp("add") + PropertyChangeOpRemove = PropertyChangeOp("remove") + PropertyChangeOpAssign = PropertyChangeOp("assign") + PropertyChangeOpIndirectRemove = PropertyChangeOp("indirectRemove") +) + +func init() { + t["PropertyChangeOp"] = reflect.TypeOf((*PropertyChangeOp)(nil)).Elem() +} + +type QuiesceMode string + +const ( + QuiesceModeApplication = QuiesceMode("application") + QuiesceModeFilesystem = QuiesceMode("filesystem") + QuiesceModeNone = QuiesceMode("none") +) + +func init() { + t["QuiesceMode"] = reflect.TypeOf((*QuiesceMode)(nil)).Elem() +} + +type RecommendationReasonCode string + +const ( + RecommendationReasonCodeFairnessCpuAvg = RecommendationReasonCode("fairnessCpuAvg") + RecommendationReasonCodeFairnessMemAvg = RecommendationReasonCode("fairnessMemAvg") + RecommendationReasonCodeJointAffin = RecommendationReasonCode("jointAffin") + RecommendationReasonCodeAntiAffin = RecommendationReasonCode("antiAffin") + RecommendationReasonCodeHostMaint = RecommendationReasonCode("hostMaint") + RecommendationReasonCodeEnterStandby = RecommendationReasonCode("enterStandby") + RecommendationReasonCodeReservationCpu = RecommendationReasonCode("reservationCpu") + RecommendationReasonCodeReservationMem = RecommendationReasonCode("reservationMem") + RecommendationReasonCodePowerOnVm = RecommendationReasonCode("powerOnVm") + RecommendationReasonCodePowerSaving = RecommendationReasonCode("powerSaving") + RecommendationReasonCodeIncreaseCapacity = RecommendationReasonCode("increaseCapacity") + RecommendationReasonCodeCheckResource = RecommendationReasonCode("checkResource") + RecommendationReasonCodeUnreservedCapacity = RecommendationReasonCode("unreservedCapacity") + RecommendationReasonCodeVmHostHardAffinity = RecommendationReasonCode("vmHostHardAffinity") + RecommendationReasonCodeVmHostSoftAffinity = RecommendationReasonCode("vmHostSoftAffinity") + RecommendationReasonCodeBalanceDatastoreSpaceUsage = RecommendationReasonCode("balanceDatastoreSpaceUsage") + RecommendationReasonCodeBalanceDatastoreIOLoad = RecommendationReasonCode("balanceDatastoreIOLoad") + RecommendationReasonCodeBalanceDatastoreIOPSReservation = RecommendationReasonCode("balanceDatastoreIOPSReservation") + RecommendationReasonCodeDatastoreMaint = RecommendationReasonCode("datastoreMaint") + RecommendationReasonCodeVirtualDiskJointAffin = RecommendationReasonCode("virtualDiskJointAffin") + RecommendationReasonCodeVirtualDiskAntiAffin = RecommendationReasonCode("virtualDiskAntiAffin") + RecommendationReasonCodeDatastoreSpaceOutage = RecommendationReasonCode("datastoreSpaceOutage") + RecommendationReasonCodeStoragePlacement = RecommendationReasonCode("storagePlacement") + RecommendationReasonCodeIolbDisabledInternal = RecommendationReasonCode("iolbDisabledInternal") + RecommendationReasonCodeXvmotionPlacement = RecommendationReasonCode("xvmotionPlacement") + RecommendationReasonCodeNetworkBandwidthReservation = RecommendationReasonCode("networkBandwidthReservation") +) + +func init() { + t["RecommendationReasonCode"] = reflect.TypeOf((*RecommendationReasonCode)(nil)).Elem() +} + +type RecommendationType string + +const ( + RecommendationTypeV1 = RecommendationType("V1") +) + +func init() { + t["RecommendationType"] = reflect.TypeOf((*RecommendationType)(nil)).Elem() +} + +type ReplicationDiskConfigFaultReasonForFault string + +const ( + ReplicationDiskConfigFaultReasonForFaultDiskNotFound = ReplicationDiskConfigFaultReasonForFault("diskNotFound") + ReplicationDiskConfigFaultReasonForFaultDiskTypeNotSupported = ReplicationDiskConfigFaultReasonForFault("diskTypeNotSupported") + ReplicationDiskConfigFaultReasonForFaultInvalidDiskKey = ReplicationDiskConfigFaultReasonForFault("invalidDiskKey") + ReplicationDiskConfigFaultReasonForFaultInvalidDiskReplicationId = ReplicationDiskConfigFaultReasonForFault("invalidDiskReplicationId") + ReplicationDiskConfigFaultReasonForFaultDuplicateDiskReplicationId = ReplicationDiskConfigFaultReasonForFault("duplicateDiskReplicationId") + ReplicationDiskConfigFaultReasonForFaultInvalidPersistentFilePath = ReplicationDiskConfigFaultReasonForFault("invalidPersistentFilePath") + ReplicationDiskConfigFaultReasonForFaultReconfigureDiskReplicationIdNotAllowed = ReplicationDiskConfigFaultReasonForFault("reconfigureDiskReplicationIdNotAllowed") +) + +func init() { + t["ReplicationDiskConfigFaultReasonForFault"] = reflect.TypeOf((*ReplicationDiskConfigFaultReasonForFault)(nil)).Elem() +} + +type ReplicationVmConfigFaultReasonForFault string + +const ( + ReplicationVmConfigFaultReasonForFaultIncompatibleHwVersion = ReplicationVmConfigFaultReasonForFault("incompatibleHwVersion") + ReplicationVmConfigFaultReasonForFaultInvalidVmReplicationId = ReplicationVmConfigFaultReasonForFault("invalidVmReplicationId") + ReplicationVmConfigFaultReasonForFaultInvalidGenerationNumber = ReplicationVmConfigFaultReasonForFault("invalidGenerationNumber") + ReplicationVmConfigFaultReasonForFaultOutOfBoundsRpoValue = ReplicationVmConfigFaultReasonForFault("outOfBoundsRpoValue") + ReplicationVmConfigFaultReasonForFaultInvalidDestinationIpAddress = ReplicationVmConfigFaultReasonForFault("invalidDestinationIpAddress") + ReplicationVmConfigFaultReasonForFaultInvalidDestinationPort = ReplicationVmConfigFaultReasonForFault("invalidDestinationPort") + ReplicationVmConfigFaultReasonForFaultInvalidExtraVmOptions = ReplicationVmConfigFaultReasonForFault("invalidExtraVmOptions") + ReplicationVmConfigFaultReasonForFaultStaleGenerationNumber = ReplicationVmConfigFaultReasonForFault("staleGenerationNumber") + ReplicationVmConfigFaultReasonForFaultReconfigureVmReplicationIdNotAllowed = ReplicationVmConfigFaultReasonForFault("reconfigureVmReplicationIdNotAllowed") + ReplicationVmConfigFaultReasonForFaultCannotRetrieveVmReplicationConfiguration = ReplicationVmConfigFaultReasonForFault("cannotRetrieveVmReplicationConfiguration") + ReplicationVmConfigFaultReasonForFaultReplicationAlreadyEnabled = ReplicationVmConfigFaultReasonForFault("replicationAlreadyEnabled") + ReplicationVmConfigFaultReasonForFaultInvalidPriorConfiguration = ReplicationVmConfigFaultReasonForFault("invalidPriorConfiguration") + ReplicationVmConfigFaultReasonForFaultReplicationNotEnabled = ReplicationVmConfigFaultReasonForFault("replicationNotEnabled") + ReplicationVmConfigFaultReasonForFaultReplicationConfigurationFailed = ReplicationVmConfigFaultReasonForFault("replicationConfigurationFailed") +) + +func init() { + t["ReplicationVmConfigFaultReasonForFault"] = reflect.TypeOf((*ReplicationVmConfigFaultReasonForFault)(nil)).Elem() +} + +type ReplicationVmFaultReasonForFault string + +const ( + ReplicationVmFaultReasonForFaultNotConfigured = ReplicationVmFaultReasonForFault("notConfigured") + ReplicationVmFaultReasonForFaultPoweredOff = ReplicationVmFaultReasonForFault("poweredOff") + ReplicationVmFaultReasonForFaultSuspended = ReplicationVmFaultReasonForFault("suspended") + ReplicationVmFaultReasonForFaultPoweredOn = ReplicationVmFaultReasonForFault("poweredOn") + ReplicationVmFaultReasonForFaultOfflineReplicating = ReplicationVmFaultReasonForFault("offlineReplicating") + ReplicationVmFaultReasonForFaultInvalidState = ReplicationVmFaultReasonForFault("invalidState") + ReplicationVmFaultReasonForFaultInvalidInstanceId = ReplicationVmFaultReasonForFault("invalidInstanceId") +) + +func init() { + t["ReplicationVmFaultReasonForFault"] = reflect.TypeOf((*ReplicationVmFaultReasonForFault)(nil)).Elem() +} + +type ReplicationVmInProgressFaultActivity string + +const ( + ReplicationVmInProgressFaultActivityFullSync = ReplicationVmInProgressFaultActivity("fullSync") + ReplicationVmInProgressFaultActivityDelta = ReplicationVmInProgressFaultActivity("delta") +) + +func init() { + t["ReplicationVmInProgressFaultActivity"] = reflect.TypeOf((*ReplicationVmInProgressFaultActivity)(nil)).Elem() +} + +type ReplicationVmState string + +const ( + ReplicationVmStateNone = ReplicationVmState("none") + ReplicationVmStatePaused = ReplicationVmState("paused") + ReplicationVmStateSyncing = ReplicationVmState("syncing") + ReplicationVmStateIdle = ReplicationVmState("idle") + ReplicationVmStateActive = ReplicationVmState("active") + ReplicationVmStateError = ReplicationVmState("error") +) + +func init() { + t["ReplicationVmState"] = reflect.TypeOf((*ReplicationVmState)(nil)).Elem() +} + +type ScheduledHardwareUpgradeInfoHardwareUpgradePolicy string + +const ( + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyNever = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("never") + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyOnSoftPowerOff = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("onSoftPowerOff") + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyAlways = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("always") +) + +func init() { + t["ScheduledHardwareUpgradeInfoHardwareUpgradePolicy"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradePolicy)(nil)).Elem() +} + +type ScheduledHardwareUpgradeInfoHardwareUpgradeStatus string + +const ( + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusNone = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("none") + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusPending = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("pending") + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusSuccess = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("success") + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusFailed = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("failed") +) + +func init() { + t["ScheduledHardwareUpgradeInfoHardwareUpgradeStatus"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradeStatus)(nil)).Elem() +} + +type ScsiLunDescriptorQuality string + +const ( + ScsiLunDescriptorQualityHighQuality = ScsiLunDescriptorQuality("highQuality") + ScsiLunDescriptorQualityMediumQuality = ScsiLunDescriptorQuality("mediumQuality") + ScsiLunDescriptorQualityLowQuality = ScsiLunDescriptorQuality("lowQuality") + ScsiLunDescriptorQualityUnknownQuality = ScsiLunDescriptorQuality("unknownQuality") +) + +func init() { + t["ScsiLunDescriptorQuality"] = reflect.TypeOf((*ScsiLunDescriptorQuality)(nil)).Elem() +} + +type ScsiLunState string + +const ( + ScsiLunStateUnknownState = ScsiLunState("unknownState") + ScsiLunStateOk = ScsiLunState("ok") + ScsiLunStateError = ScsiLunState("error") + ScsiLunStateOff = ScsiLunState("off") + ScsiLunStateQuiesced = ScsiLunState("quiesced") + ScsiLunStateDegraded = ScsiLunState("degraded") + ScsiLunStateLostCommunication = ScsiLunState("lostCommunication") + ScsiLunStateTimeout = ScsiLunState("timeout") +) + +func init() { + t["ScsiLunState"] = reflect.TypeOf((*ScsiLunState)(nil)).Elem() +} + +type ScsiLunType string + +const ( + ScsiLunTypeDisk = ScsiLunType("disk") + ScsiLunTypeTape = ScsiLunType("tape") + ScsiLunTypePrinter = ScsiLunType("printer") + ScsiLunTypeProcessor = ScsiLunType("processor") + ScsiLunTypeWorm = ScsiLunType("worm") + ScsiLunTypeCdrom = ScsiLunType("cdrom") + ScsiLunTypeScanner = ScsiLunType("scanner") + ScsiLunTypeOpticalDevice = ScsiLunType("opticalDevice") + ScsiLunTypeMediaChanger = ScsiLunType("mediaChanger") + ScsiLunTypeCommunications = ScsiLunType("communications") + ScsiLunTypeStorageArrayController = ScsiLunType("storageArrayController") + ScsiLunTypeEnclosure = ScsiLunType("enclosure") + ScsiLunTypeUnknown = ScsiLunType("unknown") +) + +func init() { + t["ScsiLunType"] = reflect.TypeOf((*ScsiLunType)(nil)).Elem() +} + +type ScsiLunVStorageSupportStatus string + +const ( + ScsiLunVStorageSupportStatusVStorageSupported = ScsiLunVStorageSupportStatus("vStorageSupported") + ScsiLunVStorageSupportStatusVStorageUnsupported = ScsiLunVStorageSupportStatus("vStorageUnsupported") + ScsiLunVStorageSupportStatusVStorageUnknown = ScsiLunVStorageSupportStatus("vStorageUnknown") +) + +func init() { + t["ScsiLunVStorageSupportStatus"] = reflect.TypeOf((*ScsiLunVStorageSupportStatus)(nil)).Elem() +} + +type SessionManagerHttpServiceRequestSpecMethod string + +const ( + SessionManagerHttpServiceRequestSpecMethodHttpOptions = SessionManagerHttpServiceRequestSpecMethod("httpOptions") + SessionManagerHttpServiceRequestSpecMethodHttpGet = SessionManagerHttpServiceRequestSpecMethod("httpGet") + SessionManagerHttpServiceRequestSpecMethodHttpHead = SessionManagerHttpServiceRequestSpecMethod("httpHead") + SessionManagerHttpServiceRequestSpecMethodHttpPost = SessionManagerHttpServiceRequestSpecMethod("httpPost") + SessionManagerHttpServiceRequestSpecMethodHttpPut = SessionManagerHttpServiceRequestSpecMethod("httpPut") + SessionManagerHttpServiceRequestSpecMethodHttpDelete = SessionManagerHttpServiceRequestSpecMethod("httpDelete") + SessionManagerHttpServiceRequestSpecMethodHttpTrace = SessionManagerHttpServiceRequestSpecMethod("httpTrace") + SessionManagerHttpServiceRequestSpecMethodHttpConnect = SessionManagerHttpServiceRequestSpecMethod("httpConnect") +) + +func init() { + t["SessionManagerHttpServiceRequestSpecMethod"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpecMethod)(nil)).Elem() +} + +type SharesLevel string + +const ( + SharesLevelLow = SharesLevel("low") + SharesLevelNormal = SharesLevel("normal") + SharesLevelHigh = SharesLevel("high") + SharesLevelCustom = SharesLevel("custom") +) + +func init() { + t["SharesLevel"] = reflect.TypeOf((*SharesLevel)(nil)).Elem() +} + +type SimpleCommandEncoding string + +const ( + SimpleCommandEncodingCSV = SimpleCommandEncoding("CSV") + SimpleCommandEncodingHEX = SimpleCommandEncoding("HEX") + SimpleCommandEncodingSTRING = SimpleCommandEncoding("STRING") +) + +func init() { + t["SimpleCommandEncoding"] = reflect.TypeOf((*SimpleCommandEncoding)(nil)).Elem() +} + +type SlpDiscoveryMethod string + +const ( + SlpDiscoveryMethodSlpDhcp = SlpDiscoveryMethod("slpDhcp") + SlpDiscoveryMethodSlpAutoUnicast = SlpDiscoveryMethod("slpAutoUnicast") + SlpDiscoveryMethodSlpAutoMulticast = SlpDiscoveryMethod("slpAutoMulticast") + SlpDiscoveryMethodSlpManual = SlpDiscoveryMethod("slpManual") +) + +func init() { + t["SlpDiscoveryMethod"] = reflect.TypeOf((*SlpDiscoveryMethod)(nil)).Elem() +} + +type StateAlarmOperator string + +const ( + StateAlarmOperatorIsEqual = StateAlarmOperator("isEqual") + StateAlarmOperatorIsUnequal = StateAlarmOperator("isUnequal") +) + +func init() { + t["StateAlarmOperator"] = reflect.TypeOf((*StateAlarmOperator)(nil)).Elem() +} + +type StorageDrsPodConfigInfoBehavior string + +const ( + StorageDrsPodConfigInfoBehaviorManual = StorageDrsPodConfigInfoBehavior("manual") + StorageDrsPodConfigInfoBehaviorAutomated = StorageDrsPodConfigInfoBehavior("automated") +) + +func init() { + t["StorageDrsPodConfigInfoBehavior"] = reflect.TypeOf((*StorageDrsPodConfigInfoBehavior)(nil)).Elem() +} + +type StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode string + +const ( + StorageDrsSpaceLoadBalanceConfigSpaceThresholdModeUtilization = StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode("utilization") + StorageDrsSpaceLoadBalanceConfigSpaceThresholdModeFreeSpace = StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode("freeSpace") +) + +func init() { + t["StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode)(nil)).Elem() +} + +type StorageIORMThresholdMode string + +const ( + StorageIORMThresholdModeAutomatic = StorageIORMThresholdMode("automatic") + StorageIORMThresholdModeManual = StorageIORMThresholdMode("manual") +) + +func init() { + t["StorageIORMThresholdMode"] = reflect.TypeOf((*StorageIORMThresholdMode)(nil)).Elem() +} + +type StoragePlacementSpecPlacementType string + +const ( + StoragePlacementSpecPlacementTypeCreate = StoragePlacementSpecPlacementType("create") + StoragePlacementSpecPlacementTypeReconfigure = StoragePlacementSpecPlacementType("reconfigure") + StoragePlacementSpecPlacementTypeRelocate = StoragePlacementSpecPlacementType("relocate") + StoragePlacementSpecPlacementTypeClone = StoragePlacementSpecPlacementType("clone") +) + +func init() { + t["StoragePlacementSpecPlacementType"] = reflect.TypeOf((*StoragePlacementSpecPlacementType)(nil)).Elem() +} + +type TaskFilterSpecRecursionOption string + +const ( + TaskFilterSpecRecursionOptionSelf = TaskFilterSpecRecursionOption("self") + TaskFilterSpecRecursionOptionChildren = TaskFilterSpecRecursionOption("children") + TaskFilterSpecRecursionOptionAll = TaskFilterSpecRecursionOption("all") +) + +func init() { + t["TaskFilterSpecRecursionOption"] = reflect.TypeOf((*TaskFilterSpecRecursionOption)(nil)).Elem() +} + +type TaskFilterSpecTimeOption string + +const ( + TaskFilterSpecTimeOptionQueuedTime = TaskFilterSpecTimeOption("queuedTime") + TaskFilterSpecTimeOptionStartedTime = TaskFilterSpecTimeOption("startedTime") + TaskFilterSpecTimeOptionCompletedTime = TaskFilterSpecTimeOption("completedTime") +) + +func init() { + t["TaskFilterSpecTimeOption"] = reflect.TypeOf((*TaskFilterSpecTimeOption)(nil)).Elem() +} + +type TaskInfoState string + +const ( + TaskInfoStateQueued = TaskInfoState("queued") + TaskInfoStateRunning = TaskInfoState("running") + TaskInfoStateSuccess = TaskInfoState("success") + TaskInfoStateError = TaskInfoState("error") +) + +func init() { + t["TaskInfoState"] = reflect.TypeOf((*TaskInfoState)(nil)).Elem() +} + +type ThirdPartyLicenseAssignmentFailedReason string + +const ( + ThirdPartyLicenseAssignmentFailedReasonLicenseAssignmentFailed = ThirdPartyLicenseAssignmentFailedReason("licenseAssignmentFailed") + ThirdPartyLicenseAssignmentFailedReasonModuleNotInstalled = ThirdPartyLicenseAssignmentFailedReason("moduleNotInstalled") +) + +func init() { + t["ThirdPartyLicenseAssignmentFailedReason"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailedReason)(nil)).Elem() +} + +type UpgradePolicy string + +const ( + UpgradePolicyManual = UpgradePolicy("manual") + UpgradePolicyUpgradeAtPowerCycle = UpgradePolicy("upgradeAtPowerCycle") +) + +func init() { + t["UpgradePolicy"] = reflect.TypeOf((*UpgradePolicy)(nil)).Elem() +} + +type VAppAutoStartAction string + +const ( + VAppAutoStartActionNone = VAppAutoStartAction("none") + VAppAutoStartActionPowerOn = VAppAutoStartAction("powerOn") + VAppAutoStartActionPowerOff = VAppAutoStartAction("powerOff") + VAppAutoStartActionGuestShutdown = VAppAutoStartAction("guestShutdown") + VAppAutoStartActionSuspend = VAppAutoStartAction("suspend") +) + +func init() { + t["VAppAutoStartAction"] = reflect.TypeOf((*VAppAutoStartAction)(nil)).Elem() +} + +type VAppCloneSpecProvisioningType string + +const ( + VAppCloneSpecProvisioningTypeSameAsSource = VAppCloneSpecProvisioningType("sameAsSource") + VAppCloneSpecProvisioningTypeThin = VAppCloneSpecProvisioningType("thin") + VAppCloneSpecProvisioningTypeThick = VAppCloneSpecProvisioningType("thick") +) + +func init() { + t["VAppCloneSpecProvisioningType"] = reflect.TypeOf((*VAppCloneSpecProvisioningType)(nil)).Elem() +} + +type VAppIPAssignmentInfoAllocationSchemes string + +const ( + VAppIPAssignmentInfoAllocationSchemesDhcp = VAppIPAssignmentInfoAllocationSchemes("dhcp") + VAppIPAssignmentInfoAllocationSchemesOvfenv = VAppIPAssignmentInfoAllocationSchemes("ovfenv") +) + +func init() { + t["VAppIPAssignmentInfoAllocationSchemes"] = reflect.TypeOf((*VAppIPAssignmentInfoAllocationSchemes)(nil)).Elem() +} + +type VAppIPAssignmentInfoIpAllocationPolicy string + +const ( + VAppIPAssignmentInfoIpAllocationPolicyDhcpPolicy = VAppIPAssignmentInfoIpAllocationPolicy("dhcpPolicy") + VAppIPAssignmentInfoIpAllocationPolicyTransientPolicy = VAppIPAssignmentInfoIpAllocationPolicy("transientPolicy") + VAppIPAssignmentInfoIpAllocationPolicyFixedPolicy = VAppIPAssignmentInfoIpAllocationPolicy("fixedPolicy") + VAppIPAssignmentInfoIpAllocationPolicyFixedAllocatedPolicy = VAppIPAssignmentInfoIpAllocationPolicy("fixedAllocatedPolicy") +) + +func init() { + t["VAppIPAssignmentInfoIpAllocationPolicy"] = reflect.TypeOf((*VAppIPAssignmentInfoIpAllocationPolicy)(nil)).Elem() +} + +type VAppIPAssignmentInfoProtocols string + +const ( + VAppIPAssignmentInfoProtocolsIPv4 = VAppIPAssignmentInfoProtocols("IPv4") + VAppIPAssignmentInfoProtocolsIPv6 = VAppIPAssignmentInfoProtocols("IPv6") +) + +func init() { + t["VAppIPAssignmentInfoProtocols"] = reflect.TypeOf((*VAppIPAssignmentInfoProtocols)(nil)).Elem() +} + +type VFlashModuleNotSupportedReason string + +const ( + VFlashModuleNotSupportedReasonCacheModeNotSupported = VFlashModuleNotSupportedReason("CacheModeNotSupported") + VFlashModuleNotSupportedReasonCacheConsistencyTypeNotSupported = VFlashModuleNotSupportedReason("CacheConsistencyTypeNotSupported") + VFlashModuleNotSupportedReasonCacheBlockSizeNotSupported = VFlashModuleNotSupportedReason("CacheBlockSizeNotSupported") + VFlashModuleNotSupportedReasonCacheReservationNotSupported = VFlashModuleNotSupportedReason("CacheReservationNotSupported") + VFlashModuleNotSupportedReasonDiskSizeNotSupported = VFlashModuleNotSupportedReason("DiskSizeNotSupported") +) + +func init() { + t["VFlashModuleNotSupportedReason"] = reflect.TypeOf((*VFlashModuleNotSupportedReason)(nil)).Elem() +} + +type VMotionCompatibilityType string + +const ( + VMotionCompatibilityTypeCpu = VMotionCompatibilityType("cpu") + VMotionCompatibilityTypeSoftware = VMotionCompatibilityType("software") +) + +func init() { + t["VMotionCompatibilityType"] = reflect.TypeOf((*VMotionCompatibilityType)(nil)).Elem() +} + +type VMwareDVSTeamingMatchStatus string + +const ( + VMwareDVSTeamingMatchStatusIphashMatch = VMwareDVSTeamingMatchStatus("iphashMatch") + VMwareDVSTeamingMatchStatusNonIphashMatch = VMwareDVSTeamingMatchStatus("nonIphashMatch") + VMwareDVSTeamingMatchStatusIphashMismatch = VMwareDVSTeamingMatchStatus("iphashMismatch") + VMwareDVSTeamingMatchStatusNonIphashMismatch = VMwareDVSTeamingMatchStatus("nonIphashMismatch") +) + +func init() { + t["VMwareDVSTeamingMatchStatus"] = reflect.TypeOf((*VMwareDVSTeamingMatchStatus)(nil)).Elem() +} + +type VMwareDVSVspanSessionType string + +const ( + VMwareDVSVspanSessionTypeMixedDestMirror = VMwareDVSVspanSessionType("mixedDestMirror") + VMwareDVSVspanSessionTypeDvPortMirror = VMwareDVSVspanSessionType("dvPortMirror") + VMwareDVSVspanSessionTypeRemoteMirrorSource = VMwareDVSVspanSessionType("remoteMirrorSource") + VMwareDVSVspanSessionTypeRemoteMirrorDest = VMwareDVSVspanSessionType("remoteMirrorDest") + VMwareDVSVspanSessionTypeEncapsulatedRemoteMirrorSource = VMwareDVSVspanSessionType("encapsulatedRemoteMirrorSource") +) + +func init() { + t["VMwareDVSVspanSessionType"] = reflect.TypeOf((*VMwareDVSVspanSessionType)(nil)).Elem() +} + +type VMwareDvsLacpApiVersion string + +const ( + VMwareDvsLacpApiVersionSingleLag = VMwareDvsLacpApiVersion("singleLag") + VMwareDvsLacpApiVersionMultipleLag = VMwareDvsLacpApiVersion("multipleLag") +) + +func init() { + t["VMwareDvsLacpApiVersion"] = reflect.TypeOf((*VMwareDvsLacpApiVersion)(nil)).Elem() +} + +type VMwareDvsLacpLoadBalanceAlgorithm string + +const ( + VMwareDvsLacpLoadBalanceAlgorithmSrcMac = VMwareDvsLacpLoadBalanceAlgorithm("srcMac") + VMwareDvsLacpLoadBalanceAlgorithmDestMac = VMwareDvsLacpLoadBalanceAlgorithm("destMac") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestMac = VMwareDvsLacpLoadBalanceAlgorithm("srcDestMac") + VMwareDvsLacpLoadBalanceAlgorithmDestIpVlan = VMwareDvsLacpLoadBalanceAlgorithm("destIpVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcIpVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcIpVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIpVlan") + VMwareDvsLacpLoadBalanceAlgorithmDestTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("destTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcDestTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("destIpTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcIpTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIpTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPortVlan = VMwareDvsLacpLoadBalanceAlgorithm("destIpTcpUdpPortVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPortVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcIpTcpUdpPortVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPortVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIpTcpUdpPortVlan") + VMwareDvsLacpLoadBalanceAlgorithmDestIp = VMwareDvsLacpLoadBalanceAlgorithm("destIp") + VMwareDvsLacpLoadBalanceAlgorithmSrcIp = VMwareDvsLacpLoadBalanceAlgorithm("srcIp") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIp = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIp") + VMwareDvsLacpLoadBalanceAlgorithmVlan = VMwareDvsLacpLoadBalanceAlgorithm("vlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcPortId = VMwareDvsLacpLoadBalanceAlgorithm("srcPortId") +) + +func init() { + t["VMwareDvsLacpLoadBalanceAlgorithm"] = reflect.TypeOf((*VMwareDvsLacpLoadBalanceAlgorithm)(nil)).Elem() +} + +type VMwareDvsMulticastFilteringMode string + +const ( + VMwareDvsMulticastFilteringModeLegacyFiltering = VMwareDvsMulticastFilteringMode("legacyFiltering") + VMwareDvsMulticastFilteringModeSnooping = VMwareDvsMulticastFilteringMode("snooping") +) + +func init() { + t["VMwareDvsMulticastFilteringMode"] = reflect.TypeOf((*VMwareDvsMulticastFilteringMode)(nil)).Elem() +} + +type VMwareUplinkLacpMode string + +const ( + VMwareUplinkLacpModeActive = VMwareUplinkLacpMode("active") + VMwareUplinkLacpModePassive = VMwareUplinkLacpMode("passive") +) + +func init() { + t["VMwareUplinkLacpMode"] = reflect.TypeOf((*VMwareUplinkLacpMode)(nil)).Elem() +} + +type ValidateMigrationTestType string + +const ( + ValidateMigrationTestTypeSourceTests = ValidateMigrationTestType("sourceTests") + ValidateMigrationTestTypeCompatibilityTests = ValidateMigrationTestType("compatibilityTests") + ValidateMigrationTestTypeDiskAccessibilityTests = ValidateMigrationTestType("diskAccessibilityTests") + ValidateMigrationTestTypeResourceTests = ValidateMigrationTestType("resourceTests") +) + +func init() { + t["ValidateMigrationTestType"] = reflect.TypeOf((*ValidateMigrationTestType)(nil)).Elem() +} + +type VirtualAppVAppState string + +const ( + VirtualAppVAppStateStarted = VirtualAppVAppState("started") + VirtualAppVAppStateStopped = VirtualAppVAppState("stopped") + VirtualAppVAppStateStarting = VirtualAppVAppState("starting") + VirtualAppVAppStateStopping = VirtualAppVAppState("stopping") +) + +func init() { + t["VirtualAppVAppState"] = reflect.TypeOf((*VirtualAppVAppState)(nil)).Elem() +} + +type VirtualDeviceConfigSpecFileOperation string + +const ( + VirtualDeviceConfigSpecFileOperationCreate = VirtualDeviceConfigSpecFileOperation("create") + VirtualDeviceConfigSpecFileOperationDestroy = VirtualDeviceConfigSpecFileOperation("destroy") + VirtualDeviceConfigSpecFileOperationReplace = VirtualDeviceConfigSpecFileOperation("replace") +) + +func init() { + t["VirtualDeviceConfigSpecFileOperation"] = reflect.TypeOf((*VirtualDeviceConfigSpecFileOperation)(nil)).Elem() +} + +type VirtualDeviceConfigSpecOperation string + +const ( + VirtualDeviceConfigSpecOperationAdd = VirtualDeviceConfigSpecOperation("add") + VirtualDeviceConfigSpecOperationRemove = VirtualDeviceConfigSpecOperation("remove") + VirtualDeviceConfigSpecOperationEdit = VirtualDeviceConfigSpecOperation("edit") +) + +func init() { + t["VirtualDeviceConfigSpecOperation"] = reflect.TypeOf((*VirtualDeviceConfigSpecOperation)(nil)).Elem() +} + +type VirtualDeviceConnectInfoStatus string + +const ( + VirtualDeviceConnectInfoStatusOk = VirtualDeviceConnectInfoStatus("ok") + VirtualDeviceConnectInfoStatusRecoverableError = VirtualDeviceConnectInfoStatus("recoverableError") + VirtualDeviceConnectInfoStatusUnrecoverableError = VirtualDeviceConnectInfoStatus("unrecoverableError") + VirtualDeviceConnectInfoStatusUntried = VirtualDeviceConnectInfoStatus("untried") +) + +func init() { + t["VirtualDeviceConnectInfoStatus"] = reflect.TypeOf((*VirtualDeviceConnectInfoStatus)(nil)).Elem() +} + +type VirtualDeviceFileExtension string + +const ( + VirtualDeviceFileExtensionIso = VirtualDeviceFileExtension("iso") + VirtualDeviceFileExtensionFlp = VirtualDeviceFileExtension("flp") + VirtualDeviceFileExtensionVmdk = VirtualDeviceFileExtension("vmdk") + VirtualDeviceFileExtensionDsk = VirtualDeviceFileExtension("dsk") + VirtualDeviceFileExtensionRdm = VirtualDeviceFileExtension("rdm") +) + +func init() { + t["VirtualDeviceFileExtension"] = reflect.TypeOf((*VirtualDeviceFileExtension)(nil)).Elem() +} + +type VirtualDeviceURIBackingOptionDirection string + +const ( + VirtualDeviceURIBackingOptionDirectionServer = VirtualDeviceURIBackingOptionDirection("server") + VirtualDeviceURIBackingOptionDirectionClient = VirtualDeviceURIBackingOptionDirection("client") +) + +func init() { + t["VirtualDeviceURIBackingOptionDirection"] = reflect.TypeOf((*VirtualDeviceURIBackingOptionDirection)(nil)).Elem() +} + +type VirtualDiskAdapterType string + +const ( + VirtualDiskAdapterTypeIde = VirtualDiskAdapterType("ide") + VirtualDiskAdapterTypeBusLogic = VirtualDiskAdapterType("busLogic") + VirtualDiskAdapterTypeLsiLogic = VirtualDiskAdapterType("lsiLogic") +) + +func init() { + t["VirtualDiskAdapterType"] = reflect.TypeOf((*VirtualDiskAdapterType)(nil)).Elem() +} + +type VirtualDiskCompatibilityMode string + +const ( + VirtualDiskCompatibilityModeVirtualMode = VirtualDiskCompatibilityMode("virtualMode") + VirtualDiskCompatibilityModePhysicalMode = VirtualDiskCompatibilityMode("physicalMode") +) + +func init() { + t["VirtualDiskCompatibilityMode"] = reflect.TypeOf((*VirtualDiskCompatibilityMode)(nil)).Elem() +} + +type VirtualDiskDeltaDiskFormat string + +const ( + VirtualDiskDeltaDiskFormatRedoLogFormat = VirtualDiskDeltaDiskFormat("redoLogFormat") + VirtualDiskDeltaDiskFormatNativeFormat = VirtualDiskDeltaDiskFormat("nativeFormat") + VirtualDiskDeltaDiskFormatSeSparseFormat = VirtualDiskDeltaDiskFormat("seSparseFormat") +) + +func init() { + t["VirtualDiskDeltaDiskFormat"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormat)(nil)).Elem() +} + +type VirtualDiskDeltaDiskFormatVariant string + +const ( + VirtualDiskDeltaDiskFormatVariantVmfsSparseVariant = VirtualDiskDeltaDiskFormatVariant("vmfsSparseVariant") + VirtualDiskDeltaDiskFormatVariantVsanSparseVariant = VirtualDiskDeltaDiskFormatVariant("vsanSparseVariant") +) + +func init() { + t["VirtualDiskDeltaDiskFormatVariant"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatVariant)(nil)).Elem() +} + +type VirtualDiskMode string + +const ( + VirtualDiskModePersistent = VirtualDiskMode("persistent") + VirtualDiskModeNonpersistent = VirtualDiskMode("nonpersistent") + VirtualDiskModeUndoable = VirtualDiskMode("undoable") + VirtualDiskModeIndependent_persistent = VirtualDiskMode("independent_persistent") + VirtualDiskModeIndependent_nonpersistent = VirtualDiskMode("independent_nonpersistent") + VirtualDiskModeAppend = VirtualDiskMode("append") +) + +func init() { + t["VirtualDiskMode"] = reflect.TypeOf((*VirtualDiskMode)(nil)).Elem() +} + +type VirtualDiskSharing string + +const ( + VirtualDiskSharingSharingNone = VirtualDiskSharing("sharingNone") + VirtualDiskSharingSharingMultiWriter = VirtualDiskSharing("sharingMultiWriter") +) + +func init() { + t["VirtualDiskSharing"] = reflect.TypeOf((*VirtualDiskSharing)(nil)).Elem() +} + +type VirtualDiskType string + +const ( + VirtualDiskTypePreallocated = VirtualDiskType("preallocated") + VirtualDiskTypeThin = VirtualDiskType("thin") + VirtualDiskTypeSeSparse = VirtualDiskType("seSparse") + VirtualDiskTypeRdm = VirtualDiskType("rdm") + VirtualDiskTypeRdmp = VirtualDiskType("rdmp") + VirtualDiskTypeRaw = VirtualDiskType("raw") + VirtualDiskTypeDelta = VirtualDiskType("delta") + VirtualDiskTypeSparse2Gb = VirtualDiskType("sparse2Gb") + VirtualDiskTypeThick2Gb = VirtualDiskType("thick2Gb") + VirtualDiskTypeEagerZeroedThick = VirtualDiskType("eagerZeroedThick") + VirtualDiskTypeSparseMonolithic = VirtualDiskType("sparseMonolithic") + VirtualDiskTypeFlatMonolithic = VirtualDiskType("flatMonolithic") + VirtualDiskTypeThick = VirtualDiskType("thick") +) + +func init() { + t["VirtualDiskType"] = reflect.TypeOf((*VirtualDiskType)(nil)).Elem() +} + +type VirtualDiskVFlashCacheConfigInfoCacheConsistencyType string + +const ( + VirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeStrong = VirtualDiskVFlashCacheConfigInfoCacheConsistencyType("strong") + VirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeWeak = VirtualDiskVFlashCacheConfigInfoCacheConsistencyType("weak") +) + +func init() { + t["VirtualDiskVFlashCacheConfigInfoCacheConsistencyType"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheConsistencyType)(nil)).Elem() +} + +type VirtualDiskVFlashCacheConfigInfoCacheMode string + +const ( + VirtualDiskVFlashCacheConfigInfoCacheModeWrite_thru = VirtualDiskVFlashCacheConfigInfoCacheMode("write_thru") + VirtualDiskVFlashCacheConfigInfoCacheModeWrite_back = VirtualDiskVFlashCacheConfigInfoCacheMode("write_back") +) + +func init() { + t["VirtualDiskVFlashCacheConfigInfoCacheMode"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheMode)(nil)).Elem() +} + +type VirtualEthernetCardLegacyNetworkDeviceName string + +const ( + VirtualEthernetCardLegacyNetworkDeviceNameBridged = VirtualEthernetCardLegacyNetworkDeviceName("bridged") + VirtualEthernetCardLegacyNetworkDeviceNameNat = VirtualEthernetCardLegacyNetworkDeviceName("nat") + VirtualEthernetCardLegacyNetworkDeviceNameHostonly = VirtualEthernetCardLegacyNetworkDeviceName("hostonly") +) + +func init() { + t["VirtualEthernetCardLegacyNetworkDeviceName"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkDeviceName)(nil)).Elem() +} + +type VirtualEthernetCardMacType string + +const ( + VirtualEthernetCardMacTypeManual = VirtualEthernetCardMacType("manual") + VirtualEthernetCardMacTypeGenerated = VirtualEthernetCardMacType("generated") + VirtualEthernetCardMacTypeAssigned = VirtualEthernetCardMacType("assigned") +) + +func init() { + t["VirtualEthernetCardMacType"] = reflect.TypeOf((*VirtualEthernetCardMacType)(nil)).Elem() +} + +type VirtualMachineAppHeartbeatStatusType string + +const ( + VirtualMachineAppHeartbeatStatusTypeAppStatusGray = VirtualMachineAppHeartbeatStatusType("appStatusGray") + VirtualMachineAppHeartbeatStatusTypeAppStatusGreen = VirtualMachineAppHeartbeatStatusType("appStatusGreen") + VirtualMachineAppHeartbeatStatusTypeAppStatusRed = VirtualMachineAppHeartbeatStatusType("appStatusRed") +) + +func init() { + t["VirtualMachineAppHeartbeatStatusType"] = reflect.TypeOf((*VirtualMachineAppHeartbeatStatusType)(nil)).Elem() +} + +type VirtualMachineBootOptionsNetworkBootProtocolType string + +const ( + VirtualMachineBootOptionsNetworkBootProtocolTypeIpv4 = VirtualMachineBootOptionsNetworkBootProtocolType("ipv4") + VirtualMachineBootOptionsNetworkBootProtocolTypeIpv6 = VirtualMachineBootOptionsNetworkBootProtocolType("ipv6") +) + +func init() { + t["VirtualMachineBootOptionsNetworkBootProtocolType"] = reflect.TypeOf((*VirtualMachineBootOptionsNetworkBootProtocolType)(nil)).Elem() +} + +type VirtualMachineConfigInfoNpivWwnType string + +const ( + VirtualMachineConfigInfoNpivWwnTypeVc = VirtualMachineConfigInfoNpivWwnType("vc") + VirtualMachineConfigInfoNpivWwnTypeHost = VirtualMachineConfigInfoNpivWwnType("host") + VirtualMachineConfigInfoNpivWwnTypeExternal = VirtualMachineConfigInfoNpivWwnType("external") +) + +func init() { + t["VirtualMachineConfigInfoNpivWwnType"] = reflect.TypeOf((*VirtualMachineConfigInfoNpivWwnType)(nil)).Elem() +} + +type VirtualMachineConfigInfoSwapPlacementType string + +const ( + VirtualMachineConfigInfoSwapPlacementTypeInherit = VirtualMachineConfigInfoSwapPlacementType("inherit") + VirtualMachineConfigInfoSwapPlacementTypeVmDirectory = VirtualMachineConfigInfoSwapPlacementType("vmDirectory") + VirtualMachineConfigInfoSwapPlacementTypeHostLocal = VirtualMachineConfigInfoSwapPlacementType("hostLocal") +) + +func init() { + t["VirtualMachineConfigInfoSwapPlacementType"] = reflect.TypeOf((*VirtualMachineConfigInfoSwapPlacementType)(nil)).Elem() +} + +type VirtualMachineConfigSpecNpivWwnOp string + +const ( + VirtualMachineConfigSpecNpivWwnOpGenerate = VirtualMachineConfigSpecNpivWwnOp("generate") + VirtualMachineConfigSpecNpivWwnOpSet = VirtualMachineConfigSpecNpivWwnOp("set") + VirtualMachineConfigSpecNpivWwnOpRemove = VirtualMachineConfigSpecNpivWwnOp("remove") + VirtualMachineConfigSpecNpivWwnOpExtend = VirtualMachineConfigSpecNpivWwnOp("extend") +) + +func init() { + t["VirtualMachineConfigSpecNpivWwnOp"] = reflect.TypeOf((*VirtualMachineConfigSpecNpivWwnOp)(nil)).Elem() +} + +type VirtualMachineConnectionState string + +const ( + VirtualMachineConnectionStateConnected = VirtualMachineConnectionState("connected") + VirtualMachineConnectionStateDisconnected = VirtualMachineConnectionState("disconnected") + VirtualMachineConnectionStateOrphaned = VirtualMachineConnectionState("orphaned") + VirtualMachineConnectionStateInaccessible = VirtualMachineConnectionState("inaccessible") + VirtualMachineConnectionStateInvalid = VirtualMachineConnectionState("invalid") +) + +func init() { + t["VirtualMachineConnectionState"] = reflect.TypeOf((*VirtualMachineConnectionState)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther string + +const ( + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleHost = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther("vmNptIncompatibleHost") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleNetwork = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther("vmNptIncompatibleNetwork") +) + +func init() { + t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm string + +const ( + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuest = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleGuest") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuestDriver = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleGuestDriver") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterType = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleAdapterType") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptDisabledOrDisconnectedAdapter = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptDisabledOrDisconnectedAdapter") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterFeatures = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleAdapterFeatures") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleBackingType = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleBackingType") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptInsufficientMemoryReservation = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptInsufficientMemoryReservation") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptFaultToleranceOrRecordReplayConfigured = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptFaultToleranceOrRecordReplayConfigured") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingIOChainConfigured = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptConflictingIOChainConfigured") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptMonitorBlocks = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptMonitorBlocks") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingOperationInProgress = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptConflictingOperationInProgress") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptRuntimeError = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptRuntimeError") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptOutOfIntrVector = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptOutOfIntrVector") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptVMCIActive = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptVMCIActive") +) + +func init() { + t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm)(nil)).Elem() +} + +type VirtualMachineFaultToleranceState string + +const ( + VirtualMachineFaultToleranceStateNotConfigured = VirtualMachineFaultToleranceState("notConfigured") + VirtualMachineFaultToleranceStateDisabled = VirtualMachineFaultToleranceState("disabled") + VirtualMachineFaultToleranceStateEnabled = VirtualMachineFaultToleranceState("enabled") + VirtualMachineFaultToleranceStateNeedSecondary = VirtualMachineFaultToleranceState("needSecondary") + VirtualMachineFaultToleranceStateStarting = VirtualMachineFaultToleranceState("starting") + VirtualMachineFaultToleranceStateRunning = VirtualMachineFaultToleranceState("running") +) + +func init() { + t["VirtualMachineFaultToleranceState"] = reflect.TypeOf((*VirtualMachineFaultToleranceState)(nil)).Elem() +} + +type VirtualMachineFaultToleranceType string + +const ( + VirtualMachineFaultToleranceTypeUnset = VirtualMachineFaultToleranceType("unset") + VirtualMachineFaultToleranceTypeRecordReplay = VirtualMachineFaultToleranceType("recordReplay") + VirtualMachineFaultToleranceTypeCheckpointing = VirtualMachineFaultToleranceType("checkpointing") +) + +func init() { + t["VirtualMachineFaultToleranceType"] = reflect.TypeOf((*VirtualMachineFaultToleranceType)(nil)).Elem() +} + +type VirtualMachineFileLayoutExFileType string + +const ( + VirtualMachineFileLayoutExFileTypeConfig = VirtualMachineFileLayoutExFileType("config") + VirtualMachineFileLayoutExFileTypeExtendedConfig = VirtualMachineFileLayoutExFileType("extendedConfig") + VirtualMachineFileLayoutExFileTypeDiskDescriptor = VirtualMachineFileLayoutExFileType("diskDescriptor") + VirtualMachineFileLayoutExFileTypeDiskExtent = VirtualMachineFileLayoutExFileType("diskExtent") + VirtualMachineFileLayoutExFileTypeDigestDescriptor = VirtualMachineFileLayoutExFileType("digestDescriptor") + VirtualMachineFileLayoutExFileTypeDigestExtent = VirtualMachineFileLayoutExFileType("digestExtent") + VirtualMachineFileLayoutExFileTypeDiskReplicationState = VirtualMachineFileLayoutExFileType("diskReplicationState") + VirtualMachineFileLayoutExFileTypeLog = VirtualMachineFileLayoutExFileType("log") + VirtualMachineFileLayoutExFileTypeStat = VirtualMachineFileLayoutExFileType("stat") + VirtualMachineFileLayoutExFileTypeNamespaceData = VirtualMachineFileLayoutExFileType("namespaceData") + VirtualMachineFileLayoutExFileTypeNvram = VirtualMachineFileLayoutExFileType("nvram") + VirtualMachineFileLayoutExFileTypeSnapshotData = VirtualMachineFileLayoutExFileType("snapshotData") + VirtualMachineFileLayoutExFileTypeSnapshotMemory = VirtualMachineFileLayoutExFileType("snapshotMemory") + VirtualMachineFileLayoutExFileTypeSnapshotList = VirtualMachineFileLayoutExFileType("snapshotList") + VirtualMachineFileLayoutExFileTypeSnapshotManifestList = VirtualMachineFileLayoutExFileType("snapshotManifestList") + VirtualMachineFileLayoutExFileTypeSuspend = VirtualMachineFileLayoutExFileType("suspend") + VirtualMachineFileLayoutExFileTypeSuspendMemory = VirtualMachineFileLayoutExFileType("suspendMemory") + VirtualMachineFileLayoutExFileTypeSwap = VirtualMachineFileLayoutExFileType("swap") + VirtualMachineFileLayoutExFileTypeUwswap = VirtualMachineFileLayoutExFileType("uwswap") + VirtualMachineFileLayoutExFileTypeCore = VirtualMachineFileLayoutExFileType("core") + VirtualMachineFileLayoutExFileTypeScreenshot = VirtualMachineFileLayoutExFileType("screenshot") + VirtualMachineFileLayoutExFileTypeFtMetadata = VirtualMachineFileLayoutExFileType("ftMetadata") + VirtualMachineFileLayoutExFileTypeGuestCustomization = VirtualMachineFileLayoutExFileType("guestCustomization") +) + +func init() { + t["VirtualMachineFileLayoutExFileType"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileType)(nil)).Elem() +} + +type VirtualMachineFlagInfoMonitorType string + +const ( + VirtualMachineFlagInfoMonitorTypeRelease = VirtualMachineFlagInfoMonitorType("release") + VirtualMachineFlagInfoMonitorTypeDebug = VirtualMachineFlagInfoMonitorType("debug") + VirtualMachineFlagInfoMonitorTypeStats = VirtualMachineFlagInfoMonitorType("stats") +) + +func init() { + t["VirtualMachineFlagInfoMonitorType"] = reflect.TypeOf((*VirtualMachineFlagInfoMonitorType)(nil)).Elem() +} + +type VirtualMachineFlagInfoVirtualExecUsage string + +const ( + VirtualMachineFlagInfoVirtualExecUsageHvAuto = VirtualMachineFlagInfoVirtualExecUsage("hvAuto") + VirtualMachineFlagInfoVirtualExecUsageHvOn = VirtualMachineFlagInfoVirtualExecUsage("hvOn") + VirtualMachineFlagInfoVirtualExecUsageHvOff = VirtualMachineFlagInfoVirtualExecUsage("hvOff") +) + +func init() { + t["VirtualMachineFlagInfoVirtualExecUsage"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualExecUsage)(nil)).Elem() +} + +type VirtualMachineFlagInfoVirtualMmuUsage string + +const ( + VirtualMachineFlagInfoVirtualMmuUsageAutomatic = VirtualMachineFlagInfoVirtualMmuUsage("automatic") + VirtualMachineFlagInfoVirtualMmuUsageOn = VirtualMachineFlagInfoVirtualMmuUsage("on") + VirtualMachineFlagInfoVirtualMmuUsageOff = VirtualMachineFlagInfoVirtualMmuUsage("off") +) + +func init() { + t["VirtualMachineFlagInfoVirtualMmuUsage"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualMmuUsage)(nil)).Elem() +} + +type VirtualMachineForkConfigInfoChildType string + +const ( + VirtualMachineForkConfigInfoChildTypeNone = VirtualMachineForkConfigInfoChildType("none") + VirtualMachineForkConfigInfoChildTypePersistent = VirtualMachineForkConfigInfoChildType("persistent") + VirtualMachineForkConfigInfoChildTypeNonpersistent = VirtualMachineForkConfigInfoChildType("nonpersistent") +) + +func init() { + t["VirtualMachineForkConfigInfoChildType"] = reflect.TypeOf((*VirtualMachineForkConfigInfoChildType)(nil)).Elem() +} + +type VirtualMachineGuestOsFamily string + +const ( + VirtualMachineGuestOsFamilyWindowsGuest = VirtualMachineGuestOsFamily("windowsGuest") + VirtualMachineGuestOsFamilyLinuxGuest = VirtualMachineGuestOsFamily("linuxGuest") + VirtualMachineGuestOsFamilyNetwareGuest = VirtualMachineGuestOsFamily("netwareGuest") + VirtualMachineGuestOsFamilySolarisGuest = VirtualMachineGuestOsFamily("solarisGuest") + VirtualMachineGuestOsFamilyDarwinGuestFamily = VirtualMachineGuestOsFamily("darwinGuestFamily") + VirtualMachineGuestOsFamilyOtherGuestFamily = VirtualMachineGuestOsFamily("otherGuestFamily") +) + +func init() { + t["VirtualMachineGuestOsFamily"] = reflect.TypeOf((*VirtualMachineGuestOsFamily)(nil)).Elem() +} + +type VirtualMachineGuestOsIdentifier string + +const ( + VirtualMachineGuestOsIdentifierDosGuest = VirtualMachineGuestOsIdentifier("dosGuest") + VirtualMachineGuestOsIdentifierWin31Guest = VirtualMachineGuestOsIdentifier("win31Guest") + VirtualMachineGuestOsIdentifierWin95Guest = VirtualMachineGuestOsIdentifier("win95Guest") + VirtualMachineGuestOsIdentifierWin98Guest = VirtualMachineGuestOsIdentifier("win98Guest") + VirtualMachineGuestOsIdentifierWinMeGuest = VirtualMachineGuestOsIdentifier("winMeGuest") + VirtualMachineGuestOsIdentifierWinNTGuest = VirtualMachineGuestOsIdentifier("winNTGuest") + VirtualMachineGuestOsIdentifierWin2000ProGuest = VirtualMachineGuestOsIdentifier("win2000ProGuest") + VirtualMachineGuestOsIdentifierWin2000ServGuest = VirtualMachineGuestOsIdentifier("win2000ServGuest") + VirtualMachineGuestOsIdentifierWin2000AdvServGuest = VirtualMachineGuestOsIdentifier("win2000AdvServGuest") + VirtualMachineGuestOsIdentifierWinXPHomeGuest = VirtualMachineGuestOsIdentifier("winXPHomeGuest") + VirtualMachineGuestOsIdentifierWinXPProGuest = VirtualMachineGuestOsIdentifier("winXPProGuest") + VirtualMachineGuestOsIdentifierWinXPPro64Guest = VirtualMachineGuestOsIdentifier("winXPPro64Guest") + VirtualMachineGuestOsIdentifierWinNetWebGuest = VirtualMachineGuestOsIdentifier("winNetWebGuest") + VirtualMachineGuestOsIdentifierWinNetStandardGuest = VirtualMachineGuestOsIdentifier("winNetStandardGuest") + VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest = VirtualMachineGuestOsIdentifier("winNetEnterpriseGuest") + VirtualMachineGuestOsIdentifierWinNetDatacenterGuest = VirtualMachineGuestOsIdentifier("winNetDatacenterGuest") + VirtualMachineGuestOsIdentifierWinNetBusinessGuest = VirtualMachineGuestOsIdentifier("winNetBusinessGuest") + VirtualMachineGuestOsIdentifierWinNetStandard64Guest = VirtualMachineGuestOsIdentifier("winNetStandard64Guest") + VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest = VirtualMachineGuestOsIdentifier("winNetEnterprise64Guest") + VirtualMachineGuestOsIdentifierWinLonghornGuest = VirtualMachineGuestOsIdentifier("winLonghornGuest") + VirtualMachineGuestOsIdentifierWinLonghorn64Guest = VirtualMachineGuestOsIdentifier("winLonghorn64Guest") + VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest = VirtualMachineGuestOsIdentifier("winNetDatacenter64Guest") + VirtualMachineGuestOsIdentifierWinVistaGuest = VirtualMachineGuestOsIdentifier("winVistaGuest") + VirtualMachineGuestOsIdentifierWinVista64Guest = VirtualMachineGuestOsIdentifier("winVista64Guest") + VirtualMachineGuestOsIdentifierWindows7Guest = VirtualMachineGuestOsIdentifier("windows7Guest") + VirtualMachineGuestOsIdentifierWindows7_64Guest = VirtualMachineGuestOsIdentifier("windows7_64Guest") + VirtualMachineGuestOsIdentifierWindows7Server64Guest = VirtualMachineGuestOsIdentifier("windows7Server64Guest") + VirtualMachineGuestOsIdentifierWindows8Guest = VirtualMachineGuestOsIdentifier("windows8Guest") + VirtualMachineGuestOsIdentifierWindows8_64Guest = VirtualMachineGuestOsIdentifier("windows8_64Guest") + VirtualMachineGuestOsIdentifierWindows8Server64Guest = VirtualMachineGuestOsIdentifier("windows8Server64Guest") + VirtualMachineGuestOsIdentifierWindows9Guest = VirtualMachineGuestOsIdentifier("windows9Guest") + VirtualMachineGuestOsIdentifierWindows9_64Guest = VirtualMachineGuestOsIdentifier("windows9_64Guest") + VirtualMachineGuestOsIdentifierWindows9Server64Guest = VirtualMachineGuestOsIdentifier("windows9Server64Guest") + VirtualMachineGuestOsIdentifierWindowsHyperVGuest = VirtualMachineGuestOsIdentifier("windowsHyperVGuest") + VirtualMachineGuestOsIdentifierFreebsdGuest = VirtualMachineGuestOsIdentifier("freebsdGuest") + VirtualMachineGuestOsIdentifierFreebsd64Guest = VirtualMachineGuestOsIdentifier("freebsd64Guest") + VirtualMachineGuestOsIdentifierRedhatGuest = VirtualMachineGuestOsIdentifier("redhatGuest") + VirtualMachineGuestOsIdentifierRhel2Guest = VirtualMachineGuestOsIdentifier("rhel2Guest") + VirtualMachineGuestOsIdentifierRhel3Guest = VirtualMachineGuestOsIdentifier("rhel3Guest") + VirtualMachineGuestOsIdentifierRhel3_64Guest = VirtualMachineGuestOsIdentifier("rhel3_64Guest") + VirtualMachineGuestOsIdentifierRhel4Guest = VirtualMachineGuestOsIdentifier("rhel4Guest") + VirtualMachineGuestOsIdentifierRhel4_64Guest = VirtualMachineGuestOsIdentifier("rhel4_64Guest") + VirtualMachineGuestOsIdentifierRhel5Guest = VirtualMachineGuestOsIdentifier("rhel5Guest") + VirtualMachineGuestOsIdentifierRhel5_64Guest = VirtualMachineGuestOsIdentifier("rhel5_64Guest") + VirtualMachineGuestOsIdentifierRhel6Guest = VirtualMachineGuestOsIdentifier("rhel6Guest") + VirtualMachineGuestOsIdentifierRhel6_64Guest = VirtualMachineGuestOsIdentifier("rhel6_64Guest") + VirtualMachineGuestOsIdentifierRhel7Guest = VirtualMachineGuestOsIdentifier("rhel7Guest") + VirtualMachineGuestOsIdentifierRhel7_64Guest = VirtualMachineGuestOsIdentifier("rhel7_64Guest") + VirtualMachineGuestOsIdentifierCentosGuest = VirtualMachineGuestOsIdentifier("centosGuest") + VirtualMachineGuestOsIdentifierCentos64Guest = VirtualMachineGuestOsIdentifier("centos64Guest") + VirtualMachineGuestOsIdentifierOracleLinuxGuest = VirtualMachineGuestOsIdentifier("oracleLinuxGuest") + VirtualMachineGuestOsIdentifierOracleLinux64Guest = VirtualMachineGuestOsIdentifier("oracleLinux64Guest") + VirtualMachineGuestOsIdentifierSuseGuest = VirtualMachineGuestOsIdentifier("suseGuest") + VirtualMachineGuestOsIdentifierSuse64Guest = VirtualMachineGuestOsIdentifier("suse64Guest") + VirtualMachineGuestOsIdentifierSlesGuest = VirtualMachineGuestOsIdentifier("slesGuest") + VirtualMachineGuestOsIdentifierSles64Guest = VirtualMachineGuestOsIdentifier("sles64Guest") + VirtualMachineGuestOsIdentifierSles10Guest = VirtualMachineGuestOsIdentifier("sles10Guest") + VirtualMachineGuestOsIdentifierSles10_64Guest = VirtualMachineGuestOsIdentifier("sles10_64Guest") + VirtualMachineGuestOsIdentifierSles11Guest = VirtualMachineGuestOsIdentifier("sles11Guest") + VirtualMachineGuestOsIdentifierSles11_64Guest = VirtualMachineGuestOsIdentifier("sles11_64Guest") + VirtualMachineGuestOsIdentifierSles12Guest = VirtualMachineGuestOsIdentifier("sles12Guest") + VirtualMachineGuestOsIdentifierSles12_64Guest = VirtualMachineGuestOsIdentifier("sles12_64Guest") + VirtualMachineGuestOsIdentifierNld9Guest = VirtualMachineGuestOsIdentifier("nld9Guest") + VirtualMachineGuestOsIdentifierOesGuest = VirtualMachineGuestOsIdentifier("oesGuest") + VirtualMachineGuestOsIdentifierSjdsGuest = VirtualMachineGuestOsIdentifier("sjdsGuest") + VirtualMachineGuestOsIdentifierMandrakeGuest = VirtualMachineGuestOsIdentifier("mandrakeGuest") + VirtualMachineGuestOsIdentifierMandrivaGuest = VirtualMachineGuestOsIdentifier("mandrivaGuest") + VirtualMachineGuestOsIdentifierMandriva64Guest = VirtualMachineGuestOsIdentifier("mandriva64Guest") + VirtualMachineGuestOsIdentifierTurboLinuxGuest = VirtualMachineGuestOsIdentifier("turboLinuxGuest") + VirtualMachineGuestOsIdentifierTurboLinux64Guest = VirtualMachineGuestOsIdentifier("turboLinux64Guest") + VirtualMachineGuestOsIdentifierUbuntuGuest = VirtualMachineGuestOsIdentifier("ubuntuGuest") + VirtualMachineGuestOsIdentifierUbuntu64Guest = VirtualMachineGuestOsIdentifier("ubuntu64Guest") + VirtualMachineGuestOsIdentifierDebian4Guest = VirtualMachineGuestOsIdentifier("debian4Guest") + VirtualMachineGuestOsIdentifierDebian4_64Guest = VirtualMachineGuestOsIdentifier("debian4_64Guest") + VirtualMachineGuestOsIdentifierDebian5Guest = VirtualMachineGuestOsIdentifier("debian5Guest") + VirtualMachineGuestOsIdentifierDebian5_64Guest = VirtualMachineGuestOsIdentifier("debian5_64Guest") + VirtualMachineGuestOsIdentifierDebian6Guest = VirtualMachineGuestOsIdentifier("debian6Guest") + VirtualMachineGuestOsIdentifierDebian6_64Guest = VirtualMachineGuestOsIdentifier("debian6_64Guest") + VirtualMachineGuestOsIdentifierDebian7Guest = VirtualMachineGuestOsIdentifier("debian7Guest") + VirtualMachineGuestOsIdentifierDebian7_64Guest = VirtualMachineGuestOsIdentifier("debian7_64Guest") + VirtualMachineGuestOsIdentifierDebian8Guest = VirtualMachineGuestOsIdentifier("debian8Guest") + VirtualMachineGuestOsIdentifierDebian8_64Guest = VirtualMachineGuestOsIdentifier("debian8_64Guest") + VirtualMachineGuestOsIdentifierAsianux3Guest = VirtualMachineGuestOsIdentifier("asianux3Guest") + VirtualMachineGuestOsIdentifierAsianux3_64Guest = VirtualMachineGuestOsIdentifier("asianux3_64Guest") + VirtualMachineGuestOsIdentifierAsianux4Guest = VirtualMachineGuestOsIdentifier("asianux4Guest") + VirtualMachineGuestOsIdentifierAsianux4_64Guest = VirtualMachineGuestOsIdentifier("asianux4_64Guest") + VirtualMachineGuestOsIdentifierAsianux5_64Guest = VirtualMachineGuestOsIdentifier("asianux5_64Guest") + VirtualMachineGuestOsIdentifierOpensuseGuest = VirtualMachineGuestOsIdentifier("opensuseGuest") + VirtualMachineGuestOsIdentifierOpensuse64Guest = VirtualMachineGuestOsIdentifier("opensuse64Guest") + VirtualMachineGuestOsIdentifierFedoraGuest = VirtualMachineGuestOsIdentifier("fedoraGuest") + VirtualMachineGuestOsIdentifierFedora64Guest = VirtualMachineGuestOsIdentifier("fedora64Guest") + VirtualMachineGuestOsIdentifierCoreos64Guest = VirtualMachineGuestOsIdentifier("coreos64Guest") + VirtualMachineGuestOsIdentifierOther24xLinuxGuest = VirtualMachineGuestOsIdentifier("other24xLinuxGuest") + VirtualMachineGuestOsIdentifierOther26xLinuxGuest = VirtualMachineGuestOsIdentifier("other26xLinuxGuest") + VirtualMachineGuestOsIdentifierOtherLinuxGuest = VirtualMachineGuestOsIdentifier("otherLinuxGuest") + VirtualMachineGuestOsIdentifierOther3xLinuxGuest = VirtualMachineGuestOsIdentifier("other3xLinuxGuest") + VirtualMachineGuestOsIdentifierGenericLinuxGuest = VirtualMachineGuestOsIdentifier("genericLinuxGuest") + VirtualMachineGuestOsIdentifierOther24xLinux64Guest = VirtualMachineGuestOsIdentifier("other24xLinux64Guest") + VirtualMachineGuestOsIdentifierOther26xLinux64Guest = VirtualMachineGuestOsIdentifier("other26xLinux64Guest") + VirtualMachineGuestOsIdentifierOther3xLinux64Guest = VirtualMachineGuestOsIdentifier("other3xLinux64Guest") + VirtualMachineGuestOsIdentifierOtherLinux64Guest = VirtualMachineGuestOsIdentifier("otherLinux64Guest") + VirtualMachineGuestOsIdentifierSolaris6Guest = VirtualMachineGuestOsIdentifier("solaris6Guest") + VirtualMachineGuestOsIdentifierSolaris7Guest = VirtualMachineGuestOsIdentifier("solaris7Guest") + VirtualMachineGuestOsIdentifierSolaris8Guest = VirtualMachineGuestOsIdentifier("solaris8Guest") + VirtualMachineGuestOsIdentifierSolaris9Guest = VirtualMachineGuestOsIdentifier("solaris9Guest") + VirtualMachineGuestOsIdentifierSolaris10Guest = VirtualMachineGuestOsIdentifier("solaris10Guest") + VirtualMachineGuestOsIdentifierSolaris10_64Guest = VirtualMachineGuestOsIdentifier("solaris10_64Guest") + VirtualMachineGuestOsIdentifierSolaris11_64Guest = VirtualMachineGuestOsIdentifier("solaris11_64Guest") + VirtualMachineGuestOsIdentifierOs2Guest = VirtualMachineGuestOsIdentifier("os2Guest") + VirtualMachineGuestOsIdentifierEComStationGuest = VirtualMachineGuestOsIdentifier("eComStationGuest") + VirtualMachineGuestOsIdentifierEComStation2Guest = VirtualMachineGuestOsIdentifier("eComStation2Guest") + VirtualMachineGuestOsIdentifierNetware4Guest = VirtualMachineGuestOsIdentifier("netware4Guest") + VirtualMachineGuestOsIdentifierNetware5Guest = VirtualMachineGuestOsIdentifier("netware5Guest") + VirtualMachineGuestOsIdentifierNetware6Guest = VirtualMachineGuestOsIdentifier("netware6Guest") + VirtualMachineGuestOsIdentifierOpenServer5Guest = VirtualMachineGuestOsIdentifier("openServer5Guest") + VirtualMachineGuestOsIdentifierOpenServer6Guest = VirtualMachineGuestOsIdentifier("openServer6Guest") + VirtualMachineGuestOsIdentifierUnixWare7Guest = VirtualMachineGuestOsIdentifier("unixWare7Guest") + VirtualMachineGuestOsIdentifierDarwinGuest = VirtualMachineGuestOsIdentifier("darwinGuest") + VirtualMachineGuestOsIdentifierDarwin64Guest = VirtualMachineGuestOsIdentifier("darwin64Guest") + VirtualMachineGuestOsIdentifierDarwin10Guest = VirtualMachineGuestOsIdentifier("darwin10Guest") + VirtualMachineGuestOsIdentifierDarwin10_64Guest = VirtualMachineGuestOsIdentifier("darwin10_64Guest") + VirtualMachineGuestOsIdentifierDarwin11Guest = VirtualMachineGuestOsIdentifier("darwin11Guest") + VirtualMachineGuestOsIdentifierDarwin11_64Guest = VirtualMachineGuestOsIdentifier("darwin11_64Guest") + VirtualMachineGuestOsIdentifierDarwin12_64Guest = VirtualMachineGuestOsIdentifier("darwin12_64Guest") + VirtualMachineGuestOsIdentifierDarwin13_64Guest = VirtualMachineGuestOsIdentifier("darwin13_64Guest") + VirtualMachineGuestOsIdentifierDarwin14_64Guest = VirtualMachineGuestOsIdentifier("darwin14_64Guest") + VirtualMachineGuestOsIdentifierVmkernelGuest = VirtualMachineGuestOsIdentifier("vmkernelGuest") + VirtualMachineGuestOsIdentifierVmkernel5Guest = VirtualMachineGuestOsIdentifier("vmkernel5Guest") + VirtualMachineGuestOsIdentifierVmkernel6Guest = VirtualMachineGuestOsIdentifier("vmkernel6Guest") + VirtualMachineGuestOsIdentifierOtherGuest = VirtualMachineGuestOsIdentifier("otherGuest") + VirtualMachineGuestOsIdentifierOtherGuest64 = VirtualMachineGuestOsIdentifier("otherGuest64") +) + +func init() { + t["VirtualMachineGuestOsIdentifier"] = reflect.TypeOf((*VirtualMachineGuestOsIdentifier)(nil)).Elem() +} + +type VirtualMachineGuestState string + +const ( + VirtualMachineGuestStateRunning = VirtualMachineGuestState("running") + VirtualMachineGuestStateShuttingDown = VirtualMachineGuestState("shuttingDown") + VirtualMachineGuestStateResetting = VirtualMachineGuestState("resetting") + VirtualMachineGuestStateStandby = VirtualMachineGuestState("standby") + VirtualMachineGuestStateNotRunning = VirtualMachineGuestState("notRunning") + VirtualMachineGuestStateUnknown = VirtualMachineGuestState("unknown") +) + +func init() { + t["VirtualMachineGuestState"] = reflect.TypeOf((*VirtualMachineGuestState)(nil)).Elem() +} + +type VirtualMachineHtSharing string + +const ( + VirtualMachineHtSharingAny = VirtualMachineHtSharing("any") + VirtualMachineHtSharingNone = VirtualMachineHtSharing("none") + VirtualMachineHtSharingInternal = VirtualMachineHtSharing("internal") +) + +func init() { + t["VirtualMachineHtSharing"] = reflect.TypeOf((*VirtualMachineHtSharing)(nil)).Elem() +} + +type VirtualMachineMemoryAllocationPolicy string + +const ( + VirtualMachineMemoryAllocationPolicySwapNone = VirtualMachineMemoryAllocationPolicy("swapNone") + VirtualMachineMemoryAllocationPolicySwapSome = VirtualMachineMemoryAllocationPolicy("swapSome") + VirtualMachineMemoryAllocationPolicySwapMost = VirtualMachineMemoryAllocationPolicy("swapMost") +) + +func init() { + t["VirtualMachineMemoryAllocationPolicy"] = reflect.TypeOf((*VirtualMachineMemoryAllocationPolicy)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataOp string + +const ( + VirtualMachineMetadataManagerVmMetadataOpUpdate = VirtualMachineMetadataManagerVmMetadataOp("Update") + VirtualMachineMetadataManagerVmMetadataOpRemove = VirtualMachineMetadataManagerVmMetadataOp("Remove") +) + +func init() { + t["VirtualMachineMetadataManagerVmMetadataOp"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOp)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataOwnerOwner string + +const ( + VirtualMachineMetadataManagerVmMetadataOwnerOwnerComVmwareVsphereHA = VirtualMachineMetadataManagerVmMetadataOwnerOwner("ComVmwareVsphereHA") +) + +func init() { + t["VirtualMachineMetadataManagerVmMetadataOwnerOwner"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwnerOwner)(nil)).Elem() +} + +type VirtualMachineMovePriority string + +const ( + VirtualMachineMovePriorityLowPriority = VirtualMachineMovePriority("lowPriority") + VirtualMachineMovePriorityHighPriority = VirtualMachineMovePriority("highPriority") + VirtualMachineMovePriorityDefaultPriority = VirtualMachineMovePriority("defaultPriority") +) + +func init() { + t["VirtualMachineMovePriority"] = reflect.TypeOf((*VirtualMachineMovePriority)(nil)).Elem() +} + +type VirtualMachineNeedSecondaryReason string + +const ( + VirtualMachineNeedSecondaryReasonInitializing = VirtualMachineNeedSecondaryReason("initializing") + VirtualMachineNeedSecondaryReasonDivergence = VirtualMachineNeedSecondaryReason("divergence") + VirtualMachineNeedSecondaryReasonLostConnection = VirtualMachineNeedSecondaryReason("lostConnection") + VirtualMachineNeedSecondaryReasonPartialHardwareFailure = VirtualMachineNeedSecondaryReason("partialHardwareFailure") + VirtualMachineNeedSecondaryReasonUserAction = VirtualMachineNeedSecondaryReason("userAction") + VirtualMachineNeedSecondaryReasonCheckpointError = VirtualMachineNeedSecondaryReason("checkpointError") + VirtualMachineNeedSecondaryReasonOther = VirtualMachineNeedSecondaryReason("other") +) + +func init() { + t["VirtualMachineNeedSecondaryReason"] = reflect.TypeOf((*VirtualMachineNeedSecondaryReason)(nil)).Elem() +} + +type VirtualMachinePowerOffBehavior string + +const ( + VirtualMachinePowerOffBehaviorPowerOff = VirtualMachinePowerOffBehavior("powerOff") + VirtualMachinePowerOffBehaviorRevert = VirtualMachinePowerOffBehavior("revert") + VirtualMachinePowerOffBehaviorPrompt = VirtualMachinePowerOffBehavior("prompt") + VirtualMachinePowerOffBehaviorTake = VirtualMachinePowerOffBehavior("take") +) + +func init() { + t["VirtualMachinePowerOffBehavior"] = reflect.TypeOf((*VirtualMachinePowerOffBehavior)(nil)).Elem() +} + +type VirtualMachinePowerOpType string + +const ( + VirtualMachinePowerOpTypeSoft = VirtualMachinePowerOpType("soft") + VirtualMachinePowerOpTypeHard = VirtualMachinePowerOpType("hard") + VirtualMachinePowerOpTypePreset = VirtualMachinePowerOpType("preset") +) + +func init() { + t["VirtualMachinePowerOpType"] = reflect.TypeOf((*VirtualMachinePowerOpType)(nil)).Elem() +} + +type VirtualMachinePowerState string + +const ( + VirtualMachinePowerStatePoweredOff = VirtualMachinePowerState("poweredOff") + VirtualMachinePowerStatePoweredOn = VirtualMachinePowerState("poweredOn") + VirtualMachinePowerStateSuspended = VirtualMachinePowerState("suspended") +) + +func init() { + t["VirtualMachinePowerState"] = reflect.TypeOf((*VirtualMachinePowerState)(nil)).Elem() +} + +type VirtualMachineRecordReplayState string + +const ( + VirtualMachineRecordReplayStateRecording = VirtualMachineRecordReplayState("recording") + VirtualMachineRecordReplayStateReplaying = VirtualMachineRecordReplayState("replaying") + VirtualMachineRecordReplayStateInactive = VirtualMachineRecordReplayState("inactive") +) + +func init() { + t["VirtualMachineRecordReplayState"] = reflect.TypeOf((*VirtualMachineRecordReplayState)(nil)).Elem() +} + +type VirtualMachineRelocateDiskMoveOptions string + +const ( + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndAllowSharing = VirtualMachineRelocateDiskMoveOptions("moveAllDiskBackingsAndAllowSharing") + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndDisallowSharing = VirtualMachineRelocateDiskMoveOptions("moveAllDiskBackingsAndDisallowSharing") + VirtualMachineRelocateDiskMoveOptionsMoveChildMostDiskBacking = VirtualMachineRelocateDiskMoveOptions("moveChildMostDiskBacking") + VirtualMachineRelocateDiskMoveOptionsCreateNewChildDiskBacking = VirtualMachineRelocateDiskMoveOptions("createNewChildDiskBacking") + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndConsolidate = VirtualMachineRelocateDiskMoveOptions("moveAllDiskBackingsAndConsolidate") +) + +func init() { + t["VirtualMachineRelocateDiskMoveOptions"] = reflect.TypeOf((*VirtualMachineRelocateDiskMoveOptions)(nil)).Elem() +} + +type VirtualMachineRelocateTransformation string + +const ( + VirtualMachineRelocateTransformationFlat = VirtualMachineRelocateTransformation("flat") + VirtualMachineRelocateTransformationSparse = VirtualMachineRelocateTransformation("sparse") +) + +func init() { + t["VirtualMachineRelocateTransformation"] = reflect.TypeOf((*VirtualMachineRelocateTransformation)(nil)).Elem() +} + +type VirtualMachineScsiPassthroughType string + +const ( + VirtualMachineScsiPassthroughTypeDisk = VirtualMachineScsiPassthroughType("disk") + VirtualMachineScsiPassthroughTypeTape = VirtualMachineScsiPassthroughType("tape") + VirtualMachineScsiPassthroughTypePrinter = VirtualMachineScsiPassthroughType("printer") + VirtualMachineScsiPassthroughTypeProcessor = VirtualMachineScsiPassthroughType("processor") + VirtualMachineScsiPassthroughTypeWorm = VirtualMachineScsiPassthroughType("worm") + VirtualMachineScsiPassthroughTypeCdrom = VirtualMachineScsiPassthroughType("cdrom") + VirtualMachineScsiPassthroughTypeScanner = VirtualMachineScsiPassthroughType("scanner") + VirtualMachineScsiPassthroughTypeOptical = VirtualMachineScsiPassthroughType("optical") + VirtualMachineScsiPassthroughTypeMedia = VirtualMachineScsiPassthroughType("media") + VirtualMachineScsiPassthroughTypeCom = VirtualMachineScsiPassthroughType("com") + VirtualMachineScsiPassthroughTypeRaid = VirtualMachineScsiPassthroughType("raid") + VirtualMachineScsiPassthroughTypeUnknown = VirtualMachineScsiPassthroughType("unknown") +) + +func init() { + t["VirtualMachineScsiPassthroughType"] = reflect.TypeOf((*VirtualMachineScsiPassthroughType)(nil)).Elem() +} + +type VirtualMachineStandbyActionType string + +const ( + VirtualMachineStandbyActionTypeCheckpoint = VirtualMachineStandbyActionType("checkpoint") + VirtualMachineStandbyActionTypePowerOnSuspend = VirtualMachineStandbyActionType("powerOnSuspend") +) + +func init() { + t["VirtualMachineStandbyActionType"] = reflect.TypeOf((*VirtualMachineStandbyActionType)(nil)).Elem() +} + +type VirtualMachineTargetInfoConfigurationTag string + +const ( + VirtualMachineTargetInfoConfigurationTagCompliant = VirtualMachineTargetInfoConfigurationTag("compliant") + VirtualMachineTargetInfoConfigurationTagClusterWide = VirtualMachineTargetInfoConfigurationTag("clusterWide") +) + +func init() { + t["VirtualMachineTargetInfoConfigurationTag"] = reflect.TypeOf((*VirtualMachineTargetInfoConfigurationTag)(nil)).Elem() +} + +type VirtualMachineTicketType string + +const ( + VirtualMachineTicketTypeMks = VirtualMachineTicketType("mks") + VirtualMachineTicketTypeDevice = VirtualMachineTicketType("device") + VirtualMachineTicketTypeGuestControl = VirtualMachineTicketType("guestControl") + VirtualMachineTicketTypeWebmks = VirtualMachineTicketType("webmks") +) + +func init() { + t["VirtualMachineTicketType"] = reflect.TypeOf((*VirtualMachineTicketType)(nil)).Elem() +} + +type VirtualMachineToolsRunningStatus string + +const ( + VirtualMachineToolsRunningStatusGuestToolsNotRunning = VirtualMachineToolsRunningStatus("guestToolsNotRunning") + VirtualMachineToolsRunningStatusGuestToolsRunning = VirtualMachineToolsRunningStatus("guestToolsRunning") + VirtualMachineToolsRunningStatusGuestToolsExecutingScripts = VirtualMachineToolsRunningStatus("guestToolsExecutingScripts") +) + +func init() { + t["VirtualMachineToolsRunningStatus"] = reflect.TypeOf((*VirtualMachineToolsRunningStatus)(nil)).Elem() +} + +type VirtualMachineToolsStatus string + +const ( + VirtualMachineToolsStatusToolsNotInstalled = VirtualMachineToolsStatus("toolsNotInstalled") + VirtualMachineToolsStatusToolsNotRunning = VirtualMachineToolsStatus("toolsNotRunning") + VirtualMachineToolsStatusToolsOld = VirtualMachineToolsStatus("toolsOld") + VirtualMachineToolsStatusToolsOk = VirtualMachineToolsStatus("toolsOk") +) + +func init() { + t["VirtualMachineToolsStatus"] = reflect.TypeOf((*VirtualMachineToolsStatus)(nil)).Elem() +} + +type VirtualMachineToolsVersionStatus string + +const ( + VirtualMachineToolsVersionStatusGuestToolsNotInstalled = VirtualMachineToolsVersionStatus("guestToolsNotInstalled") + VirtualMachineToolsVersionStatusGuestToolsNeedUpgrade = VirtualMachineToolsVersionStatus("guestToolsNeedUpgrade") + VirtualMachineToolsVersionStatusGuestToolsCurrent = VirtualMachineToolsVersionStatus("guestToolsCurrent") + VirtualMachineToolsVersionStatusGuestToolsUnmanaged = VirtualMachineToolsVersionStatus("guestToolsUnmanaged") + VirtualMachineToolsVersionStatusGuestToolsTooOld = VirtualMachineToolsVersionStatus("guestToolsTooOld") + VirtualMachineToolsVersionStatusGuestToolsSupportedOld = VirtualMachineToolsVersionStatus("guestToolsSupportedOld") + VirtualMachineToolsVersionStatusGuestToolsSupportedNew = VirtualMachineToolsVersionStatus("guestToolsSupportedNew") + VirtualMachineToolsVersionStatusGuestToolsTooNew = VirtualMachineToolsVersionStatus("guestToolsTooNew") + VirtualMachineToolsVersionStatusGuestToolsBlacklisted = VirtualMachineToolsVersionStatus("guestToolsBlacklisted") +) + +func init() { + t["VirtualMachineToolsVersionStatus"] = reflect.TypeOf((*VirtualMachineToolsVersionStatus)(nil)).Elem() +} + +type VirtualMachineUsbInfoFamily string + +const ( + VirtualMachineUsbInfoFamilyAudio = VirtualMachineUsbInfoFamily("audio") + VirtualMachineUsbInfoFamilyHid = VirtualMachineUsbInfoFamily("hid") + VirtualMachineUsbInfoFamilyHid_bootable = VirtualMachineUsbInfoFamily("hid_bootable") + VirtualMachineUsbInfoFamilyPhysical = VirtualMachineUsbInfoFamily("physical") + VirtualMachineUsbInfoFamilyCommunication = VirtualMachineUsbInfoFamily("communication") + VirtualMachineUsbInfoFamilyImaging = VirtualMachineUsbInfoFamily("imaging") + VirtualMachineUsbInfoFamilyPrinter = VirtualMachineUsbInfoFamily("printer") + VirtualMachineUsbInfoFamilyStorage = VirtualMachineUsbInfoFamily("storage") + VirtualMachineUsbInfoFamilyHub = VirtualMachineUsbInfoFamily("hub") + VirtualMachineUsbInfoFamilySmart_card = VirtualMachineUsbInfoFamily("smart_card") + VirtualMachineUsbInfoFamilySecurity = VirtualMachineUsbInfoFamily("security") + VirtualMachineUsbInfoFamilyVideo = VirtualMachineUsbInfoFamily("video") + VirtualMachineUsbInfoFamilyWireless = VirtualMachineUsbInfoFamily("wireless") + VirtualMachineUsbInfoFamilyBluetooth = VirtualMachineUsbInfoFamily("bluetooth") + VirtualMachineUsbInfoFamilyWusb = VirtualMachineUsbInfoFamily("wusb") + VirtualMachineUsbInfoFamilyPda = VirtualMachineUsbInfoFamily("pda") + VirtualMachineUsbInfoFamilyVendor_specific = VirtualMachineUsbInfoFamily("vendor_specific") + VirtualMachineUsbInfoFamilyOther = VirtualMachineUsbInfoFamily("other") + VirtualMachineUsbInfoFamilyUnknownFamily = VirtualMachineUsbInfoFamily("unknownFamily") +) + +func init() { + t["VirtualMachineUsbInfoFamily"] = reflect.TypeOf((*VirtualMachineUsbInfoFamily)(nil)).Elem() +} + +type VirtualMachineUsbInfoSpeed string + +const ( + VirtualMachineUsbInfoSpeedLow = VirtualMachineUsbInfoSpeed("low") + VirtualMachineUsbInfoSpeedFull = VirtualMachineUsbInfoSpeed("full") + VirtualMachineUsbInfoSpeedHigh = VirtualMachineUsbInfoSpeed("high") + VirtualMachineUsbInfoSpeedSuperSpeed = VirtualMachineUsbInfoSpeed("superSpeed") + VirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed("unknownSpeed") +) + +func init() { + t["VirtualMachineUsbInfoSpeed"] = reflect.TypeOf((*VirtualMachineUsbInfoSpeed)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceAction string + +const ( + VirtualMachineVMCIDeviceActionAllow = VirtualMachineVMCIDeviceAction("allow") + VirtualMachineVMCIDeviceActionDeny = VirtualMachineVMCIDeviceAction("deny") +) + +func init() { + t["VirtualMachineVMCIDeviceAction"] = reflect.TypeOf((*VirtualMachineVMCIDeviceAction)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceDirection string + +const ( + VirtualMachineVMCIDeviceDirectionGuest = VirtualMachineVMCIDeviceDirection("guest") + VirtualMachineVMCIDeviceDirectionHost = VirtualMachineVMCIDeviceDirection("host") + VirtualMachineVMCIDeviceDirectionAnyDirection = VirtualMachineVMCIDeviceDirection("anyDirection") +) + +func init() { + t["VirtualMachineVMCIDeviceDirection"] = reflect.TypeOf((*VirtualMachineVMCIDeviceDirection)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceProtocol string + +const ( + VirtualMachineVMCIDeviceProtocolHypervisor = VirtualMachineVMCIDeviceProtocol("hypervisor") + VirtualMachineVMCIDeviceProtocolDoorbell = VirtualMachineVMCIDeviceProtocol("doorbell") + VirtualMachineVMCIDeviceProtocolQueuepair = VirtualMachineVMCIDeviceProtocol("queuepair") + VirtualMachineVMCIDeviceProtocolDatagram = VirtualMachineVMCIDeviceProtocol("datagram") + VirtualMachineVMCIDeviceProtocolStream = VirtualMachineVMCIDeviceProtocol("stream") + VirtualMachineVMCIDeviceProtocolAnyProtocol = VirtualMachineVMCIDeviceProtocol("anyProtocol") +) + +func init() { + t["VirtualMachineVMCIDeviceProtocol"] = reflect.TypeOf((*VirtualMachineVMCIDeviceProtocol)(nil)).Elem() +} + +type VirtualMachineVideoCardUse3dRenderer string + +const ( + VirtualMachineVideoCardUse3dRendererAutomatic = VirtualMachineVideoCardUse3dRenderer("automatic") + VirtualMachineVideoCardUse3dRendererSoftware = VirtualMachineVideoCardUse3dRenderer("software") + VirtualMachineVideoCardUse3dRendererHardware = VirtualMachineVideoCardUse3dRenderer("hardware") +) + +func init() { + t["VirtualMachineVideoCardUse3dRenderer"] = reflect.TypeOf((*VirtualMachineVideoCardUse3dRenderer)(nil)).Elem() +} + +type VirtualPointingDeviceHostChoice string + +const ( + VirtualPointingDeviceHostChoiceAutodetect = VirtualPointingDeviceHostChoice("autodetect") + VirtualPointingDeviceHostChoiceIntellimouseExplorer = VirtualPointingDeviceHostChoice("intellimouseExplorer") + VirtualPointingDeviceHostChoiceIntellimousePs2 = VirtualPointingDeviceHostChoice("intellimousePs2") + VirtualPointingDeviceHostChoiceLogitechMouseman = VirtualPointingDeviceHostChoice("logitechMouseman") + VirtualPointingDeviceHostChoiceMicrosoft_serial = VirtualPointingDeviceHostChoice("microsoft_serial") + VirtualPointingDeviceHostChoiceMouseSystems = VirtualPointingDeviceHostChoice("mouseSystems") + VirtualPointingDeviceHostChoiceMousemanSerial = VirtualPointingDeviceHostChoice("mousemanSerial") + VirtualPointingDeviceHostChoicePs2 = VirtualPointingDeviceHostChoice("ps2") +) + +func init() { + t["VirtualPointingDeviceHostChoice"] = reflect.TypeOf((*VirtualPointingDeviceHostChoice)(nil)).Elem() +} + +type VirtualSCSISharing string + +const ( + VirtualSCSISharingNoSharing = VirtualSCSISharing("noSharing") + VirtualSCSISharingVirtualSharing = VirtualSCSISharing("virtualSharing") + VirtualSCSISharingPhysicalSharing = VirtualSCSISharing("physicalSharing") +) + +func init() { + t["VirtualSCSISharing"] = reflect.TypeOf((*VirtualSCSISharing)(nil)).Elem() +} + +type VirtualSerialPortEndPoint string + +const ( + VirtualSerialPortEndPointClient = VirtualSerialPortEndPoint("client") + VirtualSerialPortEndPointServer = VirtualSerialPortEndPoint("server") +) + +func init() { + t["VirtualSerialPortEndPoint"] = reflect.TypeOf((*VirtualSerialPortEndPoint)(nil)).Elem() +} + +type VmDasBeingResetEventReasonCode string + +const ( + VmDasBeingResetEventReasonCodeVmtoolsHeartbeatFailure = VmDasBeingResetEventReasonCode("vmtoolsHeartbeatFailure") + VmDasBeingResetEventReasonCodeAppHeartbeatFailure = VmDasBeingResetEventReasonCode("appHeartbeatFailure") + VmDasBeingResetEventReasonCodeAppImmediateResetRequest = VmDasBeingResetEventReasonCode("appImmediateResetRequest") + VmDasBeingResetEventReasonCodeVmcpResetApdCleared = VmDasBeingResetEventReasonCode("vmcpResetApdCleared") +) + +func init() { + t["VmDasBeingResetEventReasonCode"] = reflect.TypeOf((*VmDasBeingResetEventReasonCode)(nil)).Elem() +} + +type VmFailedStartingSecondaryEventFailureReason string + +const ( + VmFailedStartingSecondaryEventFailureReasonIncompatibleHost = VmFailedStartingSecondaryEventFailureReason("incompatibleHost") + VmFailedStartingSecondaryEventFailureReasonLoginFailed = VmFailedStartingSecondaryEventFailureReason("loginFailed") + VmFailedStartingSecondaryEventFailureReasonRegisterVmFailed = VmFailedStartingSecondaryEventFailureReason("registerVmFailed") + VmFailedStartingSecondaryEventFailureReasonMigrateFailed = VmFailedStartingSecondaryEventFailureReason("migrateFailed") +) + +func init() { + t["VmFailedStartingSecondaryEventFailureReason"] = reflect.TypeOf((*VmFailedStartingSecondaryEventFailureReason)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueReasonForIssue string + +const ( + VmFaultToleranceConfigIssueReasonForIssueHaNotEnabled = VmFaultToleranceConfigIssueReasonForIssue("haNotEnabled") + VmFaultToleranceConfigIssueReasonForIssueMoreThanOneSecondary = VmFaultToleranceConfigIssueReasonForIssue("moreThanOneSecondary") + VmFaultToleranceConfigIssueReasonForIssueRecordReplayNotSupported = VmFaultToleranceConfigIssueReasonForIssue("recordReplayNotSupported") + VmFaultToleranceConfigIssueReasonForIssueReplayNotSupported = VmFaultToleranceConfigIssueReasonForIssue("replayNotSupported") + VmFaultToleranceConfigIssueReasonForIssueTemplateVm = VmFaultToleranceConfigIssueReasonForIssue("templateVm") + VmFaultToleranceConfigIssueReasonForIssueMultipleVCPU = VmFaultToleranceConfigIssueReasonForIssue("multipleVCPU") + VmFaultToleranceConfigIssueReasonForIssueHostInactive = VmFaultToleranceConfigIssueReasonForIssue("hostInactive") + VmFaultToleranceConfigIssueReasonForIssueFtUnsupportedHardware = VmFaultToleranceConfigIssueReasonForIssue("ftUnsupportedHardware") + VmFaultToleranceConfigIssueReasonForIssueFtUnsupportedProduct = VmFaultToleranceConfigIssueReasonForIssue("ftUnsupportedProduct") + VmFaultToleranceConfigIssueReasonForIssueMissingVMotionNic = VmFaultToleranceConfigIssueReasonForIssue("missingVMotionNic") + VmFaultToleranceConfigIssueReasonForIssueMissingFTLoggingNic = VmFaultToleranceConfigIssueReasonForIssue("missingFTLoggingNic") + VmFaultToleranceConfigIssueReasonForIssueThinDisk = VmFaultToleranceConfigIssueReasonForIssue("thinDisk") + VmFaultToleranceConfigIssueReasonForIssueVerifySSLCertificateFlagNotSet = VmFaultToleranceConfigIssueReasonForIssue("verifySSLCertificateFlagNotSet") + VmFaultToleranceConfigIssueReasonForIssueHasSnapshots = VmFaultToleranceConfigIssueReasonForIssue("hasSnapshots") + VmFaultToleranceConfigIssueReasonForIssueNoConfig = VmFaultToleranceConfigIssueReasonForIssue("noConfig") + VmFaultToleranceConfigIssueReasonForIssueFtSecondaryVm = VmFaultToleranceConfigIssueReasonForIssue("ftSecondaryVm") + VmFaultToleranceConfigIssueReasonForIssueHasLocalDisk = VmFaultToleranceConfigIssueReasonForIssue("hasLocalDisk") + VmFaultToleranceConfigIssueReasonForIssueEsxAgentVm = VmFaultToleranceConfigIssueReasonForIssue("esxAgentVm") + VmFaultToleranceConfigIssueReasonForIssueVideo3dEnabled = VmFaultToleranceConfigIssueReasonForIssue("video3dEnabled") + VmFaultToleranceConfigIssueReasonForIssueHasUnsupportedDisk = VmFaultToleranceConfigIssueReasonForIssue("hasUnsupportedDisk") + VmFaultToleranceConfigIssueReasonForIssueInsufficientBandwidth = VmFaultToleranceConfigIssueReasonForIssue("insufficientBandwidth") + VmFaultToleranceConfigIssueReasonForIssueHasNestedHVConfiguration = VmFaultToleranceConfigIssueReasonForIssue("hasNestedHVConfiguration") + VmFaultToleranceConfigIssueReasonForIssueHasVFlashConfiguration = VmFaultToleranceConfigIssueReasonForIssue("hasVFlashConfiguration") + VmFaultToleranceConfigIssueReasonForIssueUnsupportedProduct = VmFaultToleranceConfigIssueReasonForIssue("unsupportedProduct") + VmFaultToleranceConfigIssueReasonForIssueCpuHvUnsupported = VmFaultToleranceConfigIssueReasonForIssue("cpuHvUnsupported") + VmFaultToleranceConfigIssueReasonForIssueCpuHwmmuUnsupported = VmFaultToleranceConfigIssueReasonForIssue("cpuHwmmuUnsupported") + VmFaultToleranceConfigIssueReasonForIssueCpuHvDisabled = VmFaultToleranceConfigIssueReasonForIssue("cpuHvDisabled") + VmFaultToleranceConfigIssueReasonForIssueHasEFIFirmware = VmFaultToleranceConfigIssueReasonForIssue("hasEFIFirmware") +) + +func init() { + t["VmFaultToleranceConfigIssueReasonForIssue"] = reflect.TypeOf((*VmFaultToleranceConfigIssueReasonForIssue)(nil)).Elem() +} + +type VmFaultToleranceInvalidFileBackingDeviceType string + +const ( + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualFloppy = VmFaultToleranceInvalidFileBackingDeviceType("virtualFloppy") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualCdrom = VmFaultToleranceInvalidFileBackingDeviceType("virtualCdrom") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualSerialPort = VmFaultToleranceInvalidFileBackingDeviceType("virtualSerialPort") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualParallelPort = VmFaultToleranceInvalidFileBackingDeviceType("virtualParallelPort") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualDisk = VmFaultToleranceInvalidFileBackingDeviceType("virtualDisk") +) + +func init() { + t["VmFaultToleranceInvalidFileBackingDeviceType"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBackingDeviceType)(nil)).Elem() +} + +type VmShutdownOnIsolationEventOperation string + +const ( + VmShutdownOnIsolationEventOperationShutdown = VmShutdownOnIsolationEventOperation("shutdown") + VmShutdownOnIsolationEventOperationPoweredOff = VmShutdownOnIsolationEventOperation("poweredOff") +) + +func init() { + t["VmShutdownOnIsolationEventOperation"] = reflect.TypeOf((*VmShutdownOnIsolationEventOperation)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchPvlanPortType string + +const ( + VmwareDistributedVirtualSwitchPvlanPortTypePromiscuous = VmwareDistributedVirtualSwitchPvlanPortType("promiscuous") + VmwareDistributedVirtualSwitchPvlanPortTypeIsolated = VmwareDistributedVirtualSwitchPvlanPortType("isolated") + VmwareDistributedVirtualSwitchPvlanPortTypeCommunity = VmwareDistributedVirtualSwitchPvlanPortType("community") +) + +func init() { + t["VmwareDistributedVirtualSwitchPvlanPortType"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanPortType)(nil)).Elem() +} + +type VsanDiskIssueType string + +const ( + VsanDiskIssueTypeNonExist = VsanDiskIssueType("nonExist") + VsanDiskIssueTypeStampMismatch = VsanDiskIssueType("stampMismatch") + VsanDiskIssueTypeUnknown = VsanDiskIssueType("unknown") +) + +func init() { + t["VsanDiskIssueType"] = reflect.TypeOf((*VsanDiskIssueType)(nil)).Elem() +} + +type VsanHostDecommissionModeObjectAction string + +const ( + VsanHostDecommissionModeObjectActionNoAction = VsanHostDecommissionModeObjectAction("noAction") + VsanHostDecommissionModeObjectActionEnsureObjectAccessibility = VsanHostDecommissionModeObjectAction("ensureObjectAccessibility") + VsanHostDecommissionModeObjectActionEvacuateAllData = VsanHostDecommissionModeObjectAction("evacuateAllData") +) + +func init() { + t["VsanHostDecommissionModeObjectAction"] = reflect.TypeOf((*VsanHostDecommissionModeObjectAction)(nil)).Elem() +} + +type VsanHostDiskResultState string + +const ( + VsanHostDiskResultStateInUse = VsanHostDiskResultState("inUse") + VsanHostDiskResultStateEligible = VsanHostDiskResultState("eligible") + VsanHostDiskResultStateIneligible = VsanHostDiskResultState("ineligible") +) + +func init() { + t["VsanHostDiskResultState"] = reflect.TypeOf((*VsanHostDiskResultState)(nil)).Elem() +} + +type VsanHostHealthState string + +const ( + VsanHostHealthStateUnknown = VsanHostHealthState("unknown") + VsanHostHealthStateHealthy = VsanHostHealthState("healthy") + VsanHostHealthStateUnhealthy = VsanHostHealthState("unhealthy") +) + +func init() { + t["VsanHostHealthState"] = reflect.TypeOf((*VsanHostHealthState)(nil)).Elem() +} + +type VsanHostNodeState string + +const ( + VsanHostNodeStateError = VsanHostNodeState("error") + VsanHostNodeStateDisabled = VsanHostNodeState("disabled") + VsanHostNodeStateAgent = VsanHostNodeState("agent") + VsanHostNodeStateMaster = VsanHostNodeState("master") + VsanHostNodeStateBackup = VsanHostNodeState("backup") + VsanHostNodeStateStarting = VsanHostNodeState("starting") + VsanHostNodeStateStopping = VsanHostNodeState("stopping") + VsanHostNodeStateEnteringMaintenanceMode = VsanHostNodeState("enteringMaintenanceMode") + VsanHostNodeStateExitingMaintenanceMode = VsanHostNodeState("exitingMaintenanceMode") + VsanHostNodeStateDecommissioning = VsanHostNodeState("decommissioning") +) + +func init() { + t["VsanHostNodeState"] = reflect.TypeOf((*VsanHostNodeState)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryDiskGroupOpType string + +const ( + VsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeAdd = VsanUpgradeSystemUpgradeHistoryDiskGroupOpType("add") + VsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeRemove = VsanUpgradeSystemUpgradeHistoryDiskGroupOpType("remove") +) + +func init() { + t["VsanUpgradeSystemUpgradeHistoryDiskGroupOpType"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOpType)(nil)).Elem() +} + +type WeekOfMonth string + +const ( + WeekOfMonthFirst = WeekOfMonth("first") + WeekOfMonthSecond = WeekOfMonth("second") + WeekOfMonthThird = WeekOfMonth("third") + WeekOfMonthFourth = WeekOfMonth("fourth") + WeekOfMonthLast = WeekOfMonth("last") +) + +func init() { + t["WeekOfMonth"] = reflect.TypeOf((*WeekOfMonth)(nil)).Elem() +} + +type WillLoseHAProtectionResolution string + +const ( + WillLoseHAProtectionResolutionSvmotion = WillLoseHAProtectionResolution("svmotion") + WillLoseHAProtectionResolutionRelocate = WillLoseHAProtectionResolution("relocate") +) + +func init() { + t["WillLoseHAProtectionResolution"] = reflect.TypeOf((*WillLoseHAProtectionResolution)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/fault.go b/vendor/github.com/vmware/govmomi/vim25/types/fault.go new file mode 100644 index 000000000..c2503fa5c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/fault.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +type HasFault interface { + Fault() BaseMethodFault +} + +func IsFileNotFound(err error) bool { + if f, ok := err.(HasFault); ok { + switch f.Fault().(type) { + case *FileNotFound: + return true + } + } + + return false +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go new file mode 100644 index 000000000..cbe577d63 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go @@ -0,0 +1,21 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +func NewBool(v bool) *bool { + return &v +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/if.go b/vendor/github.com/vmware/govmomi/vim25/types/if.go new file mode 100644 index 000000000..f1102e10d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/if.go @@ -0,0 +1,3287 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "reflect" + +func (b *Action) GetAction() *Action { return b } + +type BaseAction interface { + GetAction() *Action +} + +func init() { + t["BaseAction"] = reflect.TypeOf((*Action)(nil)).Elem() +} + +func (b *ActiveDirectoryFault) GetActiveDirectoryFault() *ActiveDirectoryFault { return b } + +type BaseActiveDirectoryFault interface { + GetActiveDirectoryFault() *ActiveDirectoryFault +} + +func init() { + t["BaseActiveDirectoryFault"] = reflect.TypeOf((*ActiveDirectoryFault)(nil)).Elem() +} + +func (b *AlarmAction) GetAlarmAction() *AlarmAction { return b } + +type BaseAlarmAction interface { + GetAlarmAction() *AlarmAction +} + +func init() { + t["BaseAlarmAction"] = reflect.TypeOf((*AlarmAction)(nil)).Elem() +} + +func (b *AlarmEvent) GetAlarmEvent() *AlarmEvent { return b } + +type BaseAlarmEvent interface { + GetAlarmEvent() *AlarmEvent +} + +func init() { + t["BaseAlarmEvent"] = reflect.TypeOf((*AlarmEvent)(nil)).Elem() +} + +func (b *AlarmExpression) GetAlarmExpression() *AlarmExpression { return b } + +type BaseAlarmExpression interface { + GetAlarmExpression() *AlarmExpression +} + +func init() { + t["BaseAlarmExpression"] = reflect.TypeOf((*AlarmExpression)(nil)).Elem() +} + +func (b *AlarmSpec) GetAlarmSpec() *AlarmSpec { return b } + +type BaseAlarmSpec interface { + GetAlarmSpec() *AlarmSpec +} + +func init() { + t["BaseAlarmSpec"] = reflect.TypeOf((*AlarmSpec)(nil)).Elem() +} + +func (b *AnswerFileCreateSpec) GetAnswerFileCreateSpec() *AnswerFileCreateSpec { return b } + +type BaseAnswerFileCreateSpec interface { + GetAnswerFileCreateSpec() *AnswerFileCreateSpec +} + +func init() { + t["BaseAnswerFileCreateSpec"] = reflect.TypeOf((*AnswerFileCreateSpec)(nil)).Elem() +} + +func (b *ApplyProfile) GetApplyProfile() *ApplyProfile { return b } + +type BaseApplyProfile interface { + GetApplyProfile() *ApplyProfile +} + +func init() { + t["BaseApplyProfile"] = reflect.TypeOf((*ApplyProfile)(nil)).Elem() +} + +func (b *ArrayUpdateSpec) GetArrayUpdateSpec() *ArrayUpdateSpec { return b } + +type BaseArrayUpdateSpec interface { + GetArrayUpdateSpec() *ArrayUpdateSpec +} + +func init() { + t["BaseArrayUpdateSpec"] = reflect.TypeOf((*ArrayUpdateSpec)(nil)).Elem() +} + +func (b *AuthorizationEvent) GetAuthorizationEvent() *AuthorizationEvent { return b } + +type BaseAuthorizationEvent interface { + GetAuthorizationEvent() *AuthorizationEvent +} + +func init() { + t["BaseAuthorizationEvent"] = reflect.TypeOf((*AuthorizationEvent)(nil)).Elem() +} + +func (b *CannotAccessNetwork) GetCannotAccessNetwork() *CannotAccessNetwork { return b } + +type BaseCannotAccessNetwork interface { + GetCannotAccessNetwork() *CannotAccessNetwork +} + +func init() { + t["BaseCannotAccessNetwork"] = reflect.TypeOf((*CannotAccessNetwork)(nil)).Elem() +} + +func (b *CannotAccessVmComponent) GetCannotAccessVmComponent() *CannotAccessVmComponent { return b } + +type BaseCannotAccessVmComponent interface { + GetCannotAccessVmComponent() *CannotAccessVmComponent +} + +func init() { + t["BaseCannotAccessVmComponent"] = reflect.TypeOf((*CannotAccessVmComponent)(nil)).Elem() +} + +func (b *CannotAccessVmDevice) GetCannotAccessVmDevice() *CannotAccessVmDevice { return b } + +type BaseCannotAccessVmDevice interface { + GetCannotAccessVmDevice() *CannotAccessVmDevice +} + +func init() { + t["BaseCannotAccessVmDevice"] = reflect.TypeOf((*CannotAccessVmDevice)(nil)).Elem() +} + +func (b *CannotAccessVmDisk) GetCannotAccessVmDisk() *CannotAccessVmDisk { return b } + +type BaseCannotAccessVmDisk interface { + GetCannotAccessVmDisk() *CannotAccessVmDisk +} + +func init() { + t["BaseCannotAccessVmDisk"] = reflect.TypeOf((*CannotAccessVmDisk)(nil)).Elem() +} + +func (b *CannotMoveVsanEnabledHost) GetCannotMoveVsanEnabledHost() *CannotMoveVsanEnabledHost { + return b +} + +type BaseCannotMoveVsanEnabledHost interface { + GetCannotMoveVsanEnabledHost() *CannotMoveVsanEnabledHost +} + +func init() { + t["BaseCannotMoveVsanEnabledHost"] = reflect.TypeOf((*CannotMoveVsanEnabledHost)(nil)).Elem() +} + +func (b *ClusterAction) GetClusterAction() *ClusterAction { return b } + +type BaseClusterAction interface { + GetClusterAction() *ClusterAction +} + +func init() { + t["BaseClusterAction"] = reflect.TypeOf((*ClusterAction)(nil)).Elem() +} + +func (b *ClusterDasAdmissionControlInfo) GetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo { + return b +} + +type BaseClusterDasAdmissionControlInfo interface { + GetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo +} + +func init() { + t["BaseClusterDasAdmissionControlInfo"] = reflect.TypeOf((*ClusterDasAdmissionControlInfo)(nil)).Elem() +} + +func (b *ClusterDasAdmissionControlPolicy) GetClusterDasAdmissionControlPolicy() *ClusterDasAdmissionControlPolicy { + return b +} + +type BaseClusterDasAdmissionControlPolicy interface { + GetClusterDasAdmissionControlPolicy() *ClusterDasAdmissionControlPolicy +} + +func init() { + t["BaseClusterDasAdmissionControlPolicy"] = reflect.TypeOf((*ClusterDasAdmissionControlPolicy)(nil)).Elem() +} + +func (b *ClusterDasAdvancedRuntimeInfo) GetClusterDasAdvancedRuntimeInfo() *ClusterDasAdvancedRuntimeInfo { + return b +} + +type BaseClusterDasAdvancedRuntimeInfo interface { + GetClusterDasAdvancedRuntimeInfo() *ClusterDasAdvancedRuntimeInfo +} + +func init() { + t["BaseClusterDasAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfo)(nil)).Elem() +} + +func (b *ClusterDasData) GetClusterDasData() *ClusterDasData { return b } + +type BaseClusterDasData interface { + GetClusterDasData() *ClusterDasData +} + +func init() { + t["BaseClusterDasData"] = reflect.TypeOf((*ClusterDasData)(nil)).Elem() +} + +func (b *ClusterDasHostInfo) GetClusterDasHostInfo() *ClusterDasHostInfo { return b } + +type BaseClusterDasHostInfo interface { + GetClusterDasHostInfo() *ClusterDasHostInfo +} + +func init() { + t["BaseClusterDasHostInfo"] = reflect.TypeOf((*ClusterDasHostInfo)(nil)).Elem() +} + +func (b *ClusterDrsFaultsFaultsByVm) GetClusterDrsFaultsFaultsByVm() *ClusterDrsFaultsFaultsByVm { + return b +} + +type BaseClusterDrsFaultsFaultsByVm interface { + GetClusterDrsFaultsFaultsByVm() *ClusterDrsFaultsFaultsByVm +} + +func init() { + t["BaseClusterDrsFaultsFaultsByVm"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVm)(nil)).Elem() +} + +func (b *ClusterEvent) GetClusterEvent() *ClusterEvent { return b } + +type BaseClusterEvent interface { + GetClusterEvent() *ClusterEvent +} + +func init() { + t["BaseClusterEvent"] = reflect.TypeOf((*ClusterEvent)(nil)).Elem() +} + +func (b *ClusterGroupInfo) GetClusterGroupInfo() *ClusterGroupInfo { return b } + +type BaseClusterGroupInfo interface { + GetClusterGroupInfo() *ClusterGroupInfo +} + +func init() { + t["BaseClusterGroupInfo"] = reflect.TypeOf((*ClusterGroupInfo)(nil)).Elem() +} + +func (b *ClusterOvercommittedEvent) GetClusterOvercommittedEvent() *ClusterOvercommittedEvent { + return b +} + +type BaseClusterOvercommittedEvent interface { + GetClusterOvercommittedEvent() *ClusterOvercommittedEvent +} + +func init() { + t["BaseClusterOvercommittedEvent"] = reflect.TypeOf((*ClusterOvercommittedEvent)(nil)).Elem() +} + +func (b *ClusterProfileConfigSpec) GetClusterProfileConfigSpec() *ClusterProfileConfigSpec { return b } + +type BaseClusterProfileConfigSpec interface { + GetClusterProfileConfigSpec() *ClusterProfileConfigSpec +} + +func init() { + t["BaseClusterProfileConfigSpec"] = reflect.TypeOf((*ClusterProfileConfigSpec)(nil)).Elem() +} + +func (b *ClusterProfileCreateSpec) GetClusterProfileCreateSpec() *ClusterProfileCreateSpec { return b } + +type BaseClusterProfileCreateSpec interface { + GetClusterProfileCreateSpec() *ClusterProfileCreateSpec +} + +func init() { + t["BaseClusterProfileCreateSpec"] = reflect.TypeOf((*ClusterProfileCreateSpec)(nil)).Elem() +} + +func (b *ClusterRuleInfo) GetClusterRuleInfo() *ClusterRuleInfo { return b } + +type BaseClusterRuleInfo interface { + GetClusterRuleInfo() *ClusterRuleInfo +} + +func init() { + t["BaseClusterRuleInfo"] = reflect.TypeOf((*ClusterRuleInfo)(nil)).Elem() +} + +func (b *ClusterSlotPolicy) GetClusterSlotPolicy() *ClusterSlotPolicy { return b } + +type BaseClusterSlotPolicy interface { + GetClusterSlotPolicy() *ClusterSlotPolicy +} + +func init() { + t["BaseClusterSlotPolicy"] = reflect.TypeOf((*ClusterSlotPolicy)(nil)).Elem() +} + +func (b *ClusterStatusChangedEvent) GetClusterStatusChangedEvent() *ClusterStatusChangedEvent { + return b +} + +type BaseClusterStatusChangedEvent interface { + GetClusterStatusChangedEvent() *ClusterStatusChangedEvent +} + +func init() { + t["BaseClusterStatusChangedEvent"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem() +} + +func (b *ComputeResourceConfigInfo) GetComputeResourceConfigInfo() *ComputeResourceConfigInfo { + return b +} + +type BaseComputeResourceConfigInfo interface { + GetComputeResourceConfigInfo() *ComputeResourceConfigInfo +} + +func init() { + t["BaseComputeResourceConfigInfo"] = reflect.TypeOf((*ComputeResourceConfigInfo)(nil)).Elem() +} + +func (b *ComputeResourceConfigSpec) GetComputeResourceConfigSpec() *ComputeResourceConfigSpec { + return b +} + +type BaseComputeResourceConfigSpec interface { + GetComputeResourceConfigSpec() *ComputeResourceConfigSpec +} + +func init() { + t["BaseComputeResourceConfigSpec"] = reflect.TypeOf((*ComputeResourceConfigSpec)(nil)).Elem() +} + +func (b *ComputeResourceSummary) GetComputeResourceSummary() *ComputeResourceSummary { return b } + +type BaseComputeResourceSummary interface { + GetComputeResourceSummary() *ComputeResourceSummary +} + +func init() { + t["BaseComputeResourceSummary"] = reflect.TypeOf((*ComputeResourceSummary)(nil)).Elem() +} + +func (b *CpuIncompatible) GetCpuIncompatible() *CpuIncompatible { return b } + +type BaseCpuIncompatible interface { + GetCpuIncompatible() *CpuIncompatible +} + +func init() { + t["BaseCpuIncompatible"] = reflect.TypeOf((*CpuIncompatible)(nil)).Elem() +} + +func (b *CustomFieldDefEvent) GetCustomFieldDefEvent() *CustomFieldDefEvent { return b } + +type BaseCustomFieldDefEvent interface { + GetCustomFieldDefEvent() *CustomFieldDefEvent +} + +func init() { + t["BaseCustomFieldDefEvent"] = reflect.TypeOf((*CustomFieldDefEvent)(nil)).Elem() +} + +func (b *CustomFieldEvent) GetCustomFieldEvent() *CustomFieldEvent { return b } + +type BaseCustomFieldEvent interface { + GetCustomFieldEvent() *CustomFieldEvent +} + +func init() { + t["BaseCustomFieldEvent"] = reflect.TypeOf((*CustomFieldEvent)(nil)).Elem() +} + +func (b *CustomFieldValue) GetCustomFieldValue() *CustomFieldValue { return b } + +type BaseCustomFieldValue interface { + GetCustomFieldValue() *CustomFieldValue +} + +func init() { + t["BaseCustomFieldValue"] = reflect.TypeOf((*CustomFieldValue)(nil)).Elem() +} + +func (b *CustomizationEvent) GetCustomizationEvent() *CustomizationEvent { return b } + +type BaseCustomizationEvent interface { + GetCustomizationEvent() *CustomizationEvent +} + +func init() { + t["BaseCustomizationEvent"] = reflect.TypeOf((*CustomizationEvent)(nil)).Elem() +} + +func (b *CustomizationFailed) GetCustomizationFailed() *CustomizationFailed { return b } + +type BaseCustomizationFailed interface { + GetCustomizationFailed() *CustomizationFailed +} + +func init() { + t["BaseCustomizationFailed"] = reflect.TypeOf((*CustomizationFailed)(nil)).Elem() +} + +func (b *CustomizationFault) GetCustomizationFault() *CustomizationFault { return b } + +type BaseCustomizationFault interface { + GetCustomizationFault() *CustomizationFault +} + +func init() { + t["BaseCustomizationFault"] = reflect.TypeOf((*CustomizationFault)(nil)).Elem() +} + +func (b *CustomizationIdentitySettings) GetCustomizationIdentitySettings() *CustomizationIdentitySettings { + return b +} + +type BaseCustomizationIdentitySettings interface { + GetCustomizationIdentitySettings() *CustomizationIdentitySettings +} + +func init() { + t["BaseCustomizationIdentitySettings"] = reflect.TypeOf((*CustomizationIdentitySettings)(nil)).Elem() +} + +func (b *CustomizationIpGenerator) GetCustomizationIpGenerator() *CustomizationIpGenerator { return b } + +type BaseCustomizationIpGenerator interface { + GetCustomizationIpGenerator() *CustomizationIpGenerator +} + +func init() { + t["BaseCustomizationIpGenerator"] = reflect.TypeOf((*CustomizationIpGenerator)(nil)).Elem() +} + +func (b *CustomizationIpV6Generator) GetCustomizationIpV6Generator() *CustomizationIpV6Generator { + return b +} + +type BaseCustomizationIpV6Generator interface { + GetCustomizationIpV6Generator() *CustomizationIpV6Generator +} + +func init() { + t["BaseCustomizationIpV6Generator"] = reflect.TypeOf((*CustomizationIpV6Generator)(nil)).Elem() +} + +func (b *CustomizationName) GetCustomizationName() *CustomizationName { return b } + +type BaseCustomizationName interface { + GetCustomizationName() *CustomizationName +} + +func init() { + t["BaseCustomizationName"] = reflect.TypeOf((*CustomizationName)(nil)).Elem() +} + +func (b *CustomizationOptions) GetCustomizationOptions() *CustomizationOptions { return b } + +type BaseCustomizationOptions interface { + GetCustomizationOptions() *CustomizationOptions +} + +func init() { + t["BaseCustomizationOptions"] = reflect.TypeOf((*CustomizationOptions)(nil)).Elem() +} + +func (b *DVPortSetting) GetDVPortSetting() *DVPortSetting { return b } + +type BaseDVPortSetting interface { + GetDVPortSetting() *DVPortSetting +} + +func init() { + t["BaseDVPortSetting"] = reflect.TypeOf((*DVPortSetting)(nil)).Elem() +} + +func (b *DVPortgroupEvent) GetDVPortgroupEvent() *DVPortgroupEvent { return b } + +type BaseDVPortgroupEvent interface { + GetDVPortgroupEvent() *DVPortgroupEvent +} + +func init() { + t["BaseDVPortgroupEvent"] = reflect.TypeOf((*DVPortgroupEvent)(nil)).Elem() +} + +func (b *DVPortgroupPolicy) GetDVPortgroupPolicy() *DVPortgroupPolicy { return b } + +type BaseDVPortgroupPolicy interface { + GetDVPortgroupPolicy() *DVPortgroupPolicy +} + +func init() { + t["BaseDVPortgroupPolicy"] = reflect.TypeOf((*DVPortgroupPolicy)(nil)).Elem() +} + +func (b *DVSConfigInfo) GetDVSConfigInfo() *DVSConfigInfo { return b } + +type BaseDVSConfigInfo interface { + GetDVSConfigInfo() *DVSConfigInfo +} + +func init() { + t["BaseDVSConfigInfo"] = reflect.TypeOf((*DVSConfigInfo)(nil)).Elem() +} + +func (b *DVSConfigSpec) GetDVSConfigSpec() *DVSConfigSpec { return b } + +type BaseDVSConfigSpec interface { + GetDVSConfigSpec() *DVSConfigSpec +} + +func init() { + t["BaseDVSConfigSpec"] = reflect.TypeOf((*DVSConfigSpec)(nil)).Elem() +} + +func (b *DVSFeatureCapability) GetDVSFeatureCapability() *DVSFeatureCapability { return b } + +type BaseDVSFeatureCapability interface { + GetDVSFeatureCapability() *DVSFeatureCapability +} + +func init() { + t["BaseDVSFeatureCapability"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem() +} + +func (b *DVSHealthCheckCapability) GetDVSHealthCheckCapability() *DVSHealthCheckCapability { return b } + +type BaseDVSHealthCheckCapability interface { + GetDVSHealthCheckCapability() *DVSHealthCheckCapability +} + +func init() { + t["BaseDVSHealthCheckCapability"] = reflect.TypeOf((*DVSHealthCheckCapability)(nil)).Elem() +} + +func (b *DVSHealthCheckConfig) GetDVSHealthCheckConfig() *DVSHealthCheckConfig { return b } + +type BaseDVSHealthCheckConfig interface { + GetDVSHealthCheckConfig() *DVSHealthCheckConfig +} + +func init() { + t["BaseDVSHealthCheckConfig"] = reflect.TypeOf((*DVSHealthCheckConfig)(nil)).Elem() +} + +func (b *DVSUplinkPortPolicy) GetDVSUplinkPortPolicy() *DVSUplinkPortPolicy { return b } + +type BaseDVSUplinkPortPolicy interface { + GetDVSUplinkPortPolicy() *DVSUplinkPortPolicy +} + +func init() { + t["BaseDVSUplinkPortPolicy"] = reflect.TypeOf((*DVSUplinkPortPolicy)(nil)).Elem() +} + +func (b *DailyTaskScheduler) GetDailyTaskScheduler() *DailyTaskScheduler { return b } + +type BaseDailyTaskScheduler interface { + GetDailyTaskScheduler() *DailyTaskScheduler +} + +func init() { + t["BaseDailyTaskScheduler"] = reflect.TypeOf((*DailyTaskScheduler)(nil)).Elem() +} + +func (b *DatacenterEvent) GetDatacenterEvent() *DatacenterEvent { return b } + +type BaseDatacenterEvent interface { + GetDatacenterEvent() *DatacenterEvent +} + +func init() { + t["BaseDatacenterEvent"] = reflect.TypeOf((*DatacenterEvent)(nil)).Elem() +} + +func (b *DatastoreEvent) GetDatastoreEvent() *DatastoreEvent { return b } + +type BaseDatastoreEvent interface { + GetDatastoreEvent() *DatastoreEvent +} + +func init() { + t["BaseDatastoreEvent"] = reflect.TypeOf((*DatastoreEvent)(nil)).Elem() +} + +func (b *DatastoreFileEvent) GetDatastoreFileEvent() *DatastoreFileEvent { return b } + +type BaseDatastoreFileEvent interface { + GetDatastoreFileEvent() *DatastoreFileEvent +} + +func init() { + t["BaseDatastoreFileEvent"] = reflect.TypeOf((*DatastoreFileEvent)(nil)).Elem() +} + +func (b *DatastoreInfo) GetDatastoreInfo() *DatastoreInfo { return b } + +type BaseDatastoreInfo interface { + GetDatastoreInfo() *DatastoreInfo +} + +func init() { + t["BaseDatastoreInfo"] = reflect.TypeOf((*DatastoreInfo)(nil)).Elem() +} + +func (b *DatastoreNotWritableOnHost) GetDatastoreNotWritableOnHost() *DatastoreNotWritableOnHost { + return b +} + +type BaseDatastoreNotWritableOnHost interface { + GetDatastoreNotWritableOnHost() *DatastoreNotWritableOnHost +} + +func init() { + t["BaseDatastoreNotWritableOnHost"] = reflect.TypeOf((*DatastoreNotWritableOnHost)(nil)).Elem() +} + +func (b *Description) GetDescription() *Description { return b } + +type BaseDescription interface { + GetDescription() *Description +} + +func init() { + t["BaseDescription"] = reflect.TypeOf((*Description)(nil)).Elem() +} + +func (b *DeviceBackingNotSupported) GetDeviceBackingNotSupported() *DeviceBackingNotSupported { + return b +} + +type BaseDeviceBackingNotSupported interface { + GetDeviceBackingNotSupported() *DeviceBackingNotSupported +} + +func init() { + t["BaseDeviceBackingNotSupported"] = reflect.TypeOf((*DeviceBackingNotSupported)(nil)).Elem() +} + +func (b *DeviceNotSupported) GetDeviceNotSupported() *DeviceNotSupported { return b } + +type BaseDeviceNotSupported interface { + GetDeviceNotSupported() *DeviceNotSupported +} + +func init() { + t["BaseDeviceNotSupported"] = reflect.TypeOf((*DeviceNotSupported)(nil)).Elem() +} + +func (b *DiskNotSupported) GetDiskNotSupported() *DiskNotSupported { return b } + +type BaseDiskNotSupported interface { + GetDiskNotSupported() *DiskNotSupported +} + +func init() { + t["BaseDiskNotSupported"] = reflect.TypeOf((*DiskNotSupported)(nil)).Elem() +} + +func (b *DistributedVirtualSwitchHostMemberBacking) GetDistributedVirtualSwitchHostMemberBacking() *DistributedVirtualSwitchHostMemberBacking { + return b +} + +type BaseDistributedVirtualSwitchHostMemberBacking interface { + GetDistributedVirtualSwitchHostMemberBacking() *DistributedVirtualSwitchHostMemberBacking +} + +func init() { + t["BaseDistributedVirtualSwitchHostMemberBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberBacking)(nil)).Elem() +} + +func (b *DistributedVirtualSwitchManagerHostDvsFilterSpec) GetDistributedVirtualSwitchManagerHostDvsFilterSpec() *DistributedVirtualSwitchManagerHostDvsFilterSpec { + return b +} + +type BaseDistributedVirtualSwitchManagerHostDvsFilterSpec interface { + GetDistributedVirtualSwitchManagerHostDvsFilterSpec() *DistributedVirtualSwitchManagerHostDvsFilterSpec +} + +func init() { + t["BaseDistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() +} + +func (b *DvsEvent) GetDvsEvent() *DvsEvent { return b } + +type BaseDvsEvent interface { + GetDvsEvent() *DvsEvent +} + +func init() { + t["BaseDvsEvent"] = reflect.TypeOf((*DvsEvent)(nil)).Elem() +} + +func (b *DvsFault) GetDvsFault() *DvsFault { return b } + +type BaseDvsFault interface { + GetDvsFault() *DvsFault +} + +func init() { + t["BaseDvsFault"] = reflect.TypeOf((*DvsFault)(nil)).Elem() +} + +func (b *DvsFilterConfig) GetDvsFilterConfig() *DvsFilterConfig { return b } + +type BaseDvsFilterConfig interface { + GetDvsFilterConfig() *DvsFilterConfig +} + +func init() { + t["BaseDvsFilterConfig"] = reflect.TypeOf((*DvsFilterConfig)(nil)).Elem() +} + +func (b *DvsHealthStatusChangeEvent) GetDvsHealthStatusChangeEvent() *DvsHealthStatusChangeEvent { + return b +} + +type BaseDvsHealthStatusChangeEvent interface { + GetDvsHealthStatusChangeEvent() *DvsHealthStatusChangeEvent +} + +func init() { + t["BaseDvsHealthStatusChangeEvent"] = reflect.TypeOf((*DvsHealthStatusChangeEvent)(nil)).Elem() +} + +func (b *DvsIpPort) GetDvsIpPort() *DvsIpPort { return b } + +type BaseDvsIpPort interface { + GetDvsIpPort() *DvsIpPort +} + +func init() { + t["BaseDvsIpPort"] = reflect.TypeOf((*DvsIpPort)(nil)).Elem() +} + +func (b *DvsNetworkRuleAction) GetDvsNetworkRuleAction() *DvsNetworkRuleAction { return b } + +type BaseDvsNetworkRuleAction interface { + GetDvsNetworkRuleAction() *DvsNetworkRuleAction +} + +func init() { + t["BaseDvsNetworkRuleAction"] = reflect.TypeOf((*DvsNetworkRuleAction)(nil)).Elem() +} + +func (b *DvsNetworkRuleQualifier) GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier { return b } + +type BaseDvsNetworkRuleQualifier interface { + GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier +} + +func init() { + t["BaseDvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() +} + +func (b *DvsTrafficFilterConfig) GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig { return b } + +type BaseDvsTrafficFilterConfig interface { + GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig +} + +func init() { + t["BaseDvsTrafficFilterConfig"] = reflect.TypeOf((*DvsTrafficFilterConfig)(nil)).Elem() +} + +func (b *DvsVNicProfile) GetDvsVNicProfile() *DvsVNicProfile { return b } + +type BaseDvsVNicProfile interface { + GetDvsVNicProfile() *DvsVNicProfile +} + +func init() { + t["BaseDvsVNicProfile"] = reflect.TypeOf((*DvsVNicProfile)(nil)).Elem() +} + +func (b *DynamicData) GetDynamicData() *DynamicData { return b } + +type BaseDynamicData interface { + GetDynamicData() *DynamicData +} + +func init() { + t["BaseDynamicData"] = reflect.TypeOf((*DynamicData)(nil)).Elem() +} + +func (b *EVCAdmissionFailed) GetEVCAdmissionFailed() *EVCAdmissionFailed { return b } + +type BaseEVCAdmissionFailed interface { + GetEVCAdmissionFailed() *EVCAdmissionFailed +} + +func init() { + t["BaseEVCAdmissionFailed"] = reflect.TypeOf((*EVCAdmissionFailed)(nil)).Elem() +} + +func (b *EVCConfigFault) GetEVCConfigFault() *EVCConfigFault { return b } + +type BaseEVCConfigFault interface { + GetEVCConfigFault() *EVCConfigFault +} + +func init() { + t["BaseEVCConfigFault"] = reflect.TypeOf((*EVCConfigFault)(nil)).Elem() +} + +func (b *ElementDescription) GetElementDescription() *ElementDescription { return b } + +type BaseElementDescription interface { + GetElementDescription() *ElementDescription +} + +func init() { + t["BaseElementDescription"] = reflect.TypeOf((*ElementDescription)(nil)).Elem() +} + +func (b *EnteredStandbyModeEvent) GetEnteredStandbyModeEvent() *EnteredStandbyModeEvent { return b } + +type BaseEnteredStandbyModeEvent interface { + GetEnteredStandbyModeEvent() *EnteredStandbyModeEvent +} + +func init() { + t["BaseEnteredStandbyModeEvent"] = reflect.TypeOf((*EnteredStandbyModeEvent)(nil)).Elem() +} + +func (b *EnteringStandbyModeEvent) GetEnteringStandbyModeEvent() *EnteringStandbyModeEvent { return b } + +type BaseEnteringStandbyModeEvent interface { + GetEnteringStandbyModeEvent() *EnteringStandbyModeEvent +} + +func init() { + t["BaseEnteringStandbyModeEvent"] = reflect.TypeOf((*EnteringStandbyModeEvent)(nil)).Elem() +} + +func (b *EntityEventArgument) GetEntityEventArgument() *EntityEventArgument { return b } + +type BaseEntityEventArgument interface { + GetEntityEventArgument() *EntityEventArgument +} + +func init() { + t["BaseEntityEventArgument"] = reflect.TypeOf((*EntityEventArgument)(nil)).Elem() +} + +func (b *Event) GetEvent() *Event { return b } + +type BaseEvent interface { + GetEvent() *Event +} + +func init() { + t["BaseEvent"] = reflect.TypeOf((*Event)(nil)).Elem() +} + +func (b *EventArgument) GetEventArgument() *EventArgument { return b } + +type BaseEventArgument interface { + GetEventArgument() *EventArgument +} + +func init() { + t["BaseEventArgument"] = reflect.TypeOf((*EventArgument)(nil)).Elem() +} + +func (b *ExitStandbyModeFailedEvent) GetExitStandbyModeFailedEvent() *ExitStandbyModeFailedEvent { + return b +} + +type BaseExitStandbyModeFailedEvent interface { + GetExitStandbyModeFailedEvent() *ExitStandbyModeFailedEvent +} + +func init() { + t["BaseExitStandbyModeFailedEvent"] = reflect.TypeOf((*ExitStandbyModeFailedEvent)(nil)).Elem() +} + +func (b *ExitedStandbyModeEvent) GetExitedStandbyModeEvent() *ExitedStandbyModeEvent { return b } + +type BaseExitedStandbyModeEvent interface { + GetExitedStandbyModeEvent() *ExitedStandbyModeEvent +} + +func init() { + t["BaseExitedStandbyModeEvent"] = reflect.TypeOf((*ExitedStandbyModeEvent)(nil)).Elem() +} + +func (b *ExitingStandbyModeEvent) GetExitingStandbyModeEvent() *ExitingStandbyModeEvent { return b } + +type BaseExitingStandbyModeEvent interface { + GetExitingStandbyModeEvent() *ExitingStandbyModeEvent +} + +func init() { + t["BaseExitingStandbyModeEvent"] = reflect.TypeOf((*ExitingStandbyModeEvent)(nil)).Elem() +} + +func (b *ExpiredFeatureLicense) GetExpiredFeatureLicense() *ExpiredFeatureLicense { return b } + +type BaseExpiredFeatureLicense interface { + GetExpiredFeatureLicense() *ExpiredFeatureLicense +} + +func init() { + t["BaseExpiredFeatureLicense"] = reflect.TypeOf((*ExpiredFeatureLicense)(nil)).Elem() +} + +func (b *FaultToleranceConfigInfo) GetFaultToleranceConfigInfo() *FaultToleranceConfigInfo { return b } + +type BaseFaultToleranceConfigInfo interface { + GetFaultToleranceConfigInfo() *FaultToleranceConfigInfo +} + +func init() { + t["BaseFaultToleranceConfigInfo"] = reflect.TypeOf((*FaultToleranceConfigInfo)(nil)).Elem() +} + +func (b *FcoeFault) GetFcoeFault() *FcoeFault { return b } + +type BaseFcoeFault interface { + GetFcoeFault() *FcoeFault +} + +func init() { + t["BaseFcoeFault"] = reflect.TypeOf((*FcoeFault)(nil)).Elem() +} + +func (b *FileBackedVirtualDiskSpec) GetFileBackedVirtualDiskSpec() *FileBackedVirtualDiskSpec { + return b +} + +type BaseFileBackedVirtualDiskSpec interface { + GetFileBackedVirtualDiskSpec() *FileBackedVirtualDiskSpec +} + +func init() { + t["BaseFileBackedVirtualDiskSpec"] = reflect.TypeOf((*FileBackedVirtualDiskSpec)(nil)).Elem() +} + +func (b *FileFault) GetFileFault() *FileFault { return b } + +type BaseFileFault interface { + GetFileFault() *FileFault +} + +func init() { + t["BaseFileFault"] = reflect.TypeOf((*FileFault)(nil)).Elem() +} + +func (b *FileInfo) GetFileInfo() *FileInfo { return b } + +type BaseFileInfo interface { + GetFileInfo() *FileInfo +} + +func init() { + t["BaseFileInfo"] = reflect.TypeOf((*FileInfo)(nil)).Elem() +} + +func (b *FileQuery) GetFileQuery() *FileQuery { return b } + +type BaseFileQuery interface { + GetFileQuery() *FileQuery +} + +func init() { + t["BaseFileQuery"] = reflect.TypeOf((*FileQuery)(nil)).Elem() +} + +func (b *GatewayConnectFault) GetGatewayConnectFault() *GatewayConnectFault { return b } + +type BaseGatewayConnectFault interface { + GetGatewayConnectFault() *GatewayConnectFault +} + +func init() { + t["BaseGatewayConnectFault"] = reflect.TypeOf((*GatewayConnectFault)(nil)).Elem() +} + +func (b *GatewayToHostConnectFault) GetGatewayToHostConnectFault() *GatewayToHostConnectFault { + return b +} + +type BaseGatewayToHostConnectFault interface { + GetGatewayToHostConnectFault() *GatewayToHostConnectFault +} + +func init() { + t["BaseGatewayToHostConnectFault"] = reflect.TypeOf((*GatewayToHostConnectFault)(nil)).Elem() +} + +func (b *GeneralEvent) GetGeneralEvent() *GeneralEvent { return b } + +type BaseGeneralEvent interface { + GetGeneralEvent() *GeneralEvent +} + +func init() { + t["BaseGeneralEvent"] = reflect.TypeOf((*GeneralEvent)(nil)).Elem() +} + +func (b *GuestAuthSubject) GetGuestAuthSubject() *GuestAuthSubject { return b } + +type BaseGuestAuthSubject interface { + GetGuestAuthSubject() *GuestAuthSubject +} + +func init() { + t["BaseGuestAuthSubject"] = reflect.TypeOf((*GuestAuthSubject)(nil)).Elem() +} + +func (b *GuestAuthentication) GetGuestAuthentication() *GuestAuthentication { return b } + +type BaseGuestAuthentication interface { + GetGuestAuthentication() *GuestAuthentication +} + +func init() { + t["BaseGuestAuthentication"] = reflect.TypeOf((*GuestAuthentication)(nil)).Elem() +} + +func (b *GuestFileAttributes) GetGuestFileAttributes() *GuestFileAttributes { return b } + +type BaseGuestFileAttributes interface { + GetGuestFileAttributes() *GuestFileAttributes +} + +func init() { + t["BaseGuestFileAttributes"] = reflect.TypeOf((*GuestFileAttributes)(nil)).Elem() +} + +func (b *GuestOperationsFault) GetGuestOperationsFault() *GuestOperationsFault { return b } + +type BaseGuestOperationsFault interface { + GetGuestOperationsFault() *GuestOperationsFault +} + +func init() { + t["BaseGuestOperationsFault"] = reflect.TypeOf((*GuestOperationsFault)(nil)).Elem() +} + +func (b *GuestProgramSpec) GetGuestProgramSpec() *GuestProgramSpec { return b } + +type BaseGuestProgramSpec interface { + GetGuestProgramSpec() *GuestProgramSpec +} + +func init() { + t["BaseGuestProgramSpec"] = reflect.TypeOf((*GuestProgramSpec)(nil)).Elem() +} + +func (b *GuestRegValueDataSpec) GetGuestRegValueDataSpec() *GuestRegValueDataSpec { return b } + +type BaseGuestRegValueDataSpec interface { + GetGuestRegValueDataSpec() *GuestRegValueDataSpec +} + +func init() { + t["BaseGuestRegValueDataSpec"] = reflect.TypeOf((*GuestRegValueDataSpec)(nil)).Elem() +} + +func (b *GuestRegistryFault) GetGuestRegistryFault() *GuestRegistryFault { return b } + +type BaseGuestRegistryFault interface { + GetGuestRegistryFault() *GuestRegistryFault +} + +func init() { + t["BaseGuestRegistryFault"] = reflect.TypeOf((*GuestRegistryFault)(nil)).Elem() +} + +func (b *GuestRegistryKeyFault) GetGuestRegistryKeyFault() *GuestRegistryKeyFault { return b } + +type BaseGuestRegistryKeyFault interface { + GetGuestRegistryKeyFault() *GuestRegistryKeyFault +} + +func init() { + t["BaseGuestRegistryKeyFault"] = reflect.TypeOf((*GuestRegistryKeyFault)(nil)).Elem() +} + +func (b *GuestRegistryValueFault) GetGuestRegistryValueFault() *GuestRegistryValueFault { return b } + +type BaseGuestRegistryValueFault interface { + GetGuestRegistryValueFault() *GuestRegistryValueFault +} + +func init() { + t["BaseGuestRegistryValueFault"] = reflect.TypeOf((*GuestRegistryValueFault)(nil)).Elem() +} + +func (b *HostAccountSpec) GetHostAccountSpec() *HostAccountSpec { return b } + +type BaseHostAccountSpec interface { + GetHostAccountSpec() *HostAccountSpec +} + +func init() { + t["BaseHostAccountSpec"] = reflect.TypeOf((*HostAccountSpec)(nil)).Elem() +} + +func (b *HostAuthenticationStoreInfo) GetHostAuthenticationStoreInfo() *HostAuthenticationStoreInfo { + return b +} + +type BaseHostAuthenticationStoreInfo interface { + GetHostAuthenticationStoreInfo() *HostAuthenticationStoreInfo +} + +func init() { + t["BaseHostAuthenticationStoreInfo"] = reflect.TypeOf((*HostAuthenticationStoreInfo)(nil)).Elem() +} + +func (b *HostCommunication) GetHostCommunication() *HostCommunication { return b } + +type BaseHostCommunication interface { + GetHostCommunication() *HostCommunication +} + +func init() { + t["BaseHostCommunication"] = reflect.TypeOf((*HostCommunication)(nil)).Elem() +} + +func (b *HostConfigFault) GetHostConfigFault() *HostConfigFault { return b } + +type BaseHostConfigFault interface { + GetHostConfigFault() *HostConfigFault +} + +func init() { + t["BaseHostConfigFault"] = reflect.TypeOf((*HostConfigFault)(nil)).Elem() +} + +func (b *HostConnectFault) GetHostConnectFault() *HostConnectFault { return b } + +type BaseHostConnectFault interface { + GetHostConnectFault() *HostConnectFault +} + +func init() { + t["BaseHostConnectFault"] = reflect.TypeOf((*HostConnectFault)(nil)).Elem() +} + +func (b *HostConnectInfoNetworkInfo) GetHostConnectInfoNetworkInfo() *HostConnectInfoNetworkInfo { + return b +} + +type BaseHostConnectInfoNetworkInfo interface { + GetHostConnectInfoNetworkInfo() *HostConnectInfoNetworkInfo +} + +func init() { + t["BaseHostConnectInfoNetworkInfo"] = reflect.TypeOf((*HostConnectInfoNetworkInfo)(nil)).Elem() +} + +func (b *HostDasEvent) GetHostDasEvent() *HostDasEvent { return b } + +type BaseHostDasEvent interface { + GetHostDasEvent() *HostDasEvent +} + +func init() { + t["BaseHostDasEvent"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem() +} + +func (b *HostDatastoreConnectInfo) GetHostDatastoreConnectInfo() *HostDatastoreConnectInfo { return b } + +type BaseHostDatastoreConnectInfo interface { + GetHostDatastoreConnectInfo() *HostDatastoreConnectInfo +} + +func init() { + t["BaseHostDatastoreConnectInfo"] = reflect.TypeOf((*HostDatastoreConnectInfo)(nil)).Elem() +} + +func (b *HostDevice) GetHostDevice() *HostDevice { return b } + +type BaseHostDevice interface { + GetHostDevice() *HostDevice +} + +func init() { + t["BaseHostDevice"] = reflect.TypeOf((*HostDevice)(nil)).Elem() +} + +func (b *HostDigestInfo) GetHostDigestInfo() *HostDigestInfo { return b } + +type BaseHostDigestInfo interface { + GetHostDigestInfo() *HostDigestInfo +} + +func init() { + t["BaseHostDigestInfo"] = reflect.TypeOf((*HostDigestInfo)(nil)).Elem() +} + +func (b *HostDirectoryStoreInfo) GetHostDirectoryStoreInfo() *HostDirectoryStoreInfo { return b } + +type BaseHostDirectoryStoreInfo interface { + GetHostDirectoryStoreInfo() *HostDirectoryStoreInfo +} + +func init() { + t["BaseHostDirectoryStoreInfo"] = reflect.TypeOf((*HostDirectoryStoreInfo)(nil)).Elem() +} + +func (b *HostDnsConfig) GetHostDnsConfig() *HostDnsConfig { return b } + +type BaseHostDnsConfig interface { + GetHostDnsConfig() *HostDnsConfig +} + +func init() { + t["BaseHostDnsConfig"] = reflect.TypeOf((*HostDnsConfig)(nil)).Elem() +} + +func (b *HostEvent) GetHostEvent() *HostEvent { return b } + +type BaseHostEvent interface { + GetHostEvent() *HostEvent +} + +func init() { + t["BaseHostEvent"] = reflect.TypeOf((*HostEvent)(nil)).Elem() +} + +func (b *HostFibreChannelHba) GetHostFibreChannelHba() *HostFibreChannelHba { return b } + +type BaseHostFibreChannelHba interface { + GetHostFibreChannelHba() *HostFibreChannelHba +} + +func init() { + t["BaseHostFibreChannelHba"] = reflect.TypeOf((*HostFibreChannelHba)(nil)).Elem() +} + +func (b *HostFibreChannelTargetTransport) GetHostFibreChannelTargetTransport() *HostFibreChannelTargetTransport { + return b +} + +type BaseHostFibreChannelTargetTransport interface { + GetHostFibreChannelTargetTransport() *HostFibreChannelTargetTransport +} + +func init() { + t["BaseHostFibreChannelTargetTransport"] = reflect.TypeOf((*HostFibreChannelTargetTransport)(nil)).Elem() +} + +func (b *HostFileSystemVolume) GetHostFileSystemVolume() *HostFileSystemVolume { return b } + +type BaseHostFileSystemVolume interface { + GetHostFileSystemVolume() *HostFileSystemVolume +} + +func init() { + t["BaseHostFileSystemVolume"] = reflect.TypeOf((*HostFileSystemVolume)(nil)).Elem() +} + +func (b *HostHardwareElementInfo) GetHostHardwareElementInfo() *HostHardwareElementInfo { return b } + +type BaseHostHardwareElementInfo interface { + GetHostHardwareElementInfo() *HostHardwareElementInfo +} + +func init() { + t["BaseHostHardwareElementInfo"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem() +} + +func (b *HostHostBusAdapter) GetHostHostBusAdapter() *HostHostBusAdapter { return b } + +type BaseHostHostBusAdapter interface { + GetHostHostBusAdapter() *HostHostBusAdapter +} + +func init() { + t["BaseHostHostBusAdapter"] = reflect.TypeOf((*HostHostBusAdapter)(nil)).Elem() +} + +func (b *HostIpRouteConfig) GetHostIpRouteConfig() *HostIpRouteConfig { return b } + +type BaseHostIpRouteConfig interface { + GetHostIpRouteConfig() *HostIpRouteConfig +} + +func init() { + t["BaseHostIpRouteConfig"] = reflect.TypeOf((*HostIpRouteConfig)(nil)).Elem() +} + +func (b *HostMemberHealthCheckResult) GetHostMemberHealthCheckResult() *HostMemberHealthCheckResult { + return b +} + +type BaseHostMemberHealthCheckResult interface { + GetHostMemberHealthCheckResult() *HostMemberHealthCheckResult +} + +func init() { + t["BaseHostMemberHealthCheckResult"] = reflect.TypeOf((*HostMemberHealthCheckResult)(nil)).Elem() +} + +func (b *HostMemberUplinkHealthCheckResult) GetHostMemberUplinkHealthCheckResult() *HostMemberUplinkHealthCheckResult { + return b +} + +type BaseHostMemberUplinkHealthCheckResult interface { + GetHostMemberUplinkHealthCheckResult() *HostMemberUplinkHealthCheckResult +} + +func init() { + t["BaseHostMemberUplinkHealthCheckResult"] = reflect.TypeOf((*HostMemberUplinkHealthCheckResult)(nil)).Elem() +} + +func (b *HostMultipathInfoLogicalUnitPolicy) GetHostMultipathInfoLogicalUnitPolicy() *HostMultipathInfoLogicalUnitPolicy { + return b +} + +type BaseHostMultipathInfoLogicalUnitPolicy interface { + GetHostMultipathInfoLogicalUnitPolicy() *HostMultipathInfoLogicalUnitPolicy +} + +func init() { + t["BaseHostMultipathInfoLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem() +} + +func (b *HostPciPassthruConfig) GetHostPciPassthruConfig() *HostPciPassthruConfig { return b } + +type BaseHostPciPassthruConfig interface { + GetHostPciPassthruConfig() *HostPciPassthruConfig +} + +func init() { + t["BaseHostPciPassthruConfig"] = reflect.TypeOf((*HostPciPassthruConfig)(nil)).Elem() +} + +func (b *HostPciPassthruInfo) GetHostPciPassthruInfo() *HostPciPassthruInfo { return b } + +type BaseHostPciPassthruInfo interface { + GetHostPciPassthruInfo() *HostPciPassthruInfo +} + +func init() { + t["BaseHostPciPassthruInfo"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem() +} + +func (b *HostPowerOpFailed) GetHostPowerOpFailed() *HostPowerOpFailed { return b } + +type BaseHostPowerOpFailed interface { + GetHostPowerOpFailed() *HostPowerOpFailed +} + +func init() { + t["BaseHostPowerOpFailed"] = reflect.TypeOf((*HostPowerOpFailed)(nil)).Elem() +} + +func (b *HostProfileConfigSpec) GetHostProfileConfigSpec() *HostProfileConfigSpec { return b } + +type BaseHostProfileConfigSpec interface { + GetHostProfileConfigSpec() *HostProfileConfigSpec +} + +func init() { + t["BaseHostProfileConfigSpec"] = reflect.TypeOf((*HostProfileConfigSpec)(nil)).Elem() +} + +func (b *HostSystemSwapConfigurationSystemSwapOption) GetHostSystemSwapConfigurationSystemSwapOption() *HostSystemSwapConfigurationSystemSwapOption { + return b +} + +type BaseHostSystemSwapConfigurationSystemSwapOption interface { + GetHostSystemSwapConfigurationSystemSwapOption() *HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["BaseHostSystemSwapConfigurationSystemSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationSystemSwapOption)(nil)).Elem() +} + +func (b *HostTargetTransport) GetHostTargetTransport() *HostTargetTransport { return b } + +type BaseHostTargetTransport interface { + GetHostTargetTransport() *HostTargetTransport +} + +func init() { + t["BaseHostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() +} + +func (b *HostTpmEventDetails) GetHostTpmEventDetails() *HostTpmEventDetails { return b } + +type BaseHostTpmEventDetails interface { + GetHostTpmEventDetails() *HostTpmEventDetails +} + +func init() { + t["BaseHostTpmEventDetails"] = reflect.TypeOf((*HostTpmEventDetails)(nil)).Elem() +} + +func (b *HostVirtualSwitchBridge) GetHostVirtualSwitchBridge() *HostVirtualSwitchBridge { return b } + +type BaseHostVirtualSwitchBridge interface { + GetHostVirtualSwitchBridge() *HostVirtualSwitchBridge +} + +func init() { + t["BaseHostVirtualSwitchBridge"] = reflect.TypeOf((*HostVirtualSwitchBridge)(nil)).Elem() +} + +func (b *HourlyTaskScheduler) GetHourlyTaskScheduler() *HourlyTaskScheduler { return b } + +type BaseHourlyTaskScheduler interface { + GetHourlyTaskScheduler() *HourlyTaskScheduler +} + +func init() { + t["BaseHourlyTaskScheduler"] = reflect.TypeOf((*HourlyTaskScheduler)(nil)).Elem() +} + +func (b *ImportSpec) GetImportSpec() *ImportSpec { return b } + +type BaseImportSpec interface { + GetImportSpec() *ImportSpec +} + +func init() { + t["BaseImportSpec"] = reflect.TypeOf((*ImportSpec)(nil)).Elem() +} + +func (b *InaccessibleDatastore) GetInaccessibleDatastore() *InaccessibleDatastore { return b } + +type BaseInaccessibleDatastore interface { + GetInaccessibleDatastore() *InaccessibleDatastore +} + +func init() { + t["BaseInaccessibleDatastore"] = reflect.TypeOf((*InaccessibleDatastore)(nil)).Elem() +} + +func (b *InheritablePolicy) GetInheritablePolicy() *InheritablePolicy { return b } + +type BaseInheritablePolicy interface { + GetInheritablePolicy() *InheritablePolicy +} + +func init() { + t["BaseInheritablePolicy"] = reflect.TypeOf((*InheritablePolicy)(nil)).Elem() +} + +func (b *InsufficientHostCapacityFault) GetInsufficientHostCapacityFault() *InsufficientHostCapacityFault { + return b +} + +type BaseInsufficientHostCapacityFault interface { + GetInsufficientHostCapacityFault() *InsufficientHostCapacityFault +} + +func init() { + t["BaseInsufficientHostCapacityFault"] = reflect.TypeOf((*InsufficientHostCapacityFault)(nil)).Elem() +} + +func (b *InsufficientResourcesFault) GetInsufficientResourcesFault() *InsufficientResourcesFault { + return b +} + +type BaseInsufficientResourcesFault interface { + GetInsufficientResourcesFault() *InsufficientResourcesFault +} + +func init() { + t["BaseInsufficientResourcesFault"] = reflect.TypeOf((*InsufficientResourcesFault)(nil)).Elem() +} + +func (b *InsufficientStandbyResource) GetInsufficientStandbyResource() *InsufficientStandbyResource { + return b +} + +type BaseInsufficientStandbyResource interface { + GetInsufficientStandbyResource() *InsufficientStandbyResource +} + +func init() { + t["BaseInsufficientStandbyResource"] = reflect.TypeOf((*InsufficientStandbyResource)(nil)).Elem() +} + +func (b *InvalidArgument) GetInvalidArgument() *InvalidArgument { return b } + +type BaseInvalidArgument interface { + GetInvalidArgument() *InvalidArgument +} + +func init() { + t["BaseInvalidArgument"] = reflect.TypeOf((*InvalidArgument)(nil)).Elem() +} + +func (b *InvalidCAMServer) GetInvalidCAMServer() *InvalidCAMServer { return b } + +type BaseInvalidCAMServer interface { + GetInvalidCAMServer() *InvalidCAMServer +} + +func init() { + t["BaseInvalidCAMServer"] = reflect.TypeOf((*InvalidCAMServer)(nil)).Elem() +} + +func (b *InvalidDatastore) GetInvalidDatastore() *InvalidDatastore { return b } + +type BaseInvalidDatastore interface { + GetInvalidDatastore() *InvalidDatastore +} + +func init() { + t["BaseInvalidDatastore"] = reflect.TypeOf((*InvalidDatastore)(nil)).Elem() +} + +func (b *InvalidDeviceSpec) GetInvalidDeviceSpec() *InvalidDeviceSpec { return b } + +type BaseInvalidDeviceSpec interface { + GetInvalidDeviceSpec() *InvalidDeviceSpec +} + +func init() { + t["BaseInvalidDeviceSpec"] = reflect.TypeOf((*InvalidDeviceSpec)(nil)).Elem() +} + +func (b *InvalidFolder) GetInvalidFolder() *InvalidFolder { return b } + +type BaseInvalidFolder interface { + GetInvalidFolder() *InvalidFolder +} + +func init() { + t["BaseInvalidFolder"] = reflect.TypeOf((*InvalidFolder)(nil)).Elem() +} + +func (b *InvalidFormat) GetInvalidFormat() *InvalidFormat { return b } + +type BaseInvalidFormat interface { + GetInvalidFormat() *InvalidFormat +} + +func init() { + t["BaseInvalidFormat"] = reflect.TypeOf((*InvalidFormat)(nil)).Elem() +} + +func (b *InvalidHostState) GetInvalidHostState() *InvalidHostState { return b } + +type BaseInvalidHostState interface { + GetInvalidHostState() *InvalidHostState +} + +func init() { + t["BaseInvalidHostState"] = reflect.TypeOf((*InvalidHostState)(nil)).Elem() +} + +func (b *InvalidLogin) GetInvalidLogin() *InvalidLogin { return b } + +type BaseInvalidLogin interface { + GetInvalidLogin() *InvalidLogin +} + +func init() { + t["BaseInvalidLogin"] = reflect.TypeOf((*InvalidLogin)(nil)).Elem() +} + +func (b *InvalidPropertyValue) GetInvalidPropertyValue() *InvalidPropertyValue { return b } + +type BaseInvalidPropertyValue interface { + GetInvalidPropertyValue() *InvalidPropertyValue +} + +func init() { + t["BaseInvalidPropertyValue"] = reflect.TypeOf((*InvalidPropertyValue)(nil)).Elem() +} + +func (b *InvalidRequest) GetInvalidRequest() *InvalidRequest { return b } + +type BaseInvalidRequest interface { + GetInvalidRequest() *InvalidRequest +} + +func init() { + t["BaseInvalidRequest"] = reflect.TypeOf((*InvalidRequest)(nil)).Elem() +} + +func (b *InvalidState) GetInvalidState() *InvalidState { return b } + +type BaseInvalidState interface { + GetInvalidState() *InvalidState +} + +func init() { + t["BaseInvalidState"] = reflect.TypeOf((*InvalidState)(nil)).Elem() +} + +func (b *InvalidVmConfig) GetInvalidVmConfig() *InvalidVmConfig { return b } + +type BaseInvalidVmConfig interface { + GetInvalidVmConfig() *InvalidVmConfig +} + +func init() { + t["BaseInvalidVmConfig"] = reflect.TypeOf((*InvalidVmConfig)(nil)).Elem() +} + +func (b *IoFilterInfo) GetIoFilterInfo() *IoFilterInfo { return b } + +type BaseIoFilterInfo interface { + GetIoFilterInfo() *IoFilterInfo +} + +func init() { + t["BaseIoFilterInfo"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem() +} + +func (b *IpAddress) GetIpAddress() *IpAddress { return b } + +type BaseIpAddress interface { + GetIpAddress() *IpAddress +} + +func init() { + t["BaseIpAddress"] = reflect.TypeOf((*IpAddress)(nil)).Elem() +} + +func (b *IscsiFault) GetIscsiFault() *IscsiFault { return b } + +type BaseIscsiFault interface { + GetIscsiFault() *IscsiFault +} + +func init() { + t["BaseIscsiFault"] = reflect.TypeOf((*IscsiFault)(nil)).Elem() +} + +func (b *LicenseEvent) GetLicenseEvent() *LicenseEvent { return b } + +type BaseLicenseEvent interface { + GetLicenseEvent() *LicenseEvent +} + +func init() { + t["BaseLicenseEvent"] = reflect.TypeOf((*LicenseEvent)(nil)).Elem() +} + +func (b *LicenseSource) GetLicenseSource() *LicenseSource { return b } + +type BaseLicenseSource interface { + GetLicenseSource() *LicenseSource +} + +func init() { + t["BaseLicenseSource"] = reflect.TypeOf((*LicenseSource)(nil)).Elem() +} + +func (b *MacAddress) GetMacAddress() *MacAddress { return b } + +type BaseMacAddress interface { + GetMacAddress() *MacAddress +} + +func init() { + t["BaseMacAddress"] = reflect.TypeOf((*MacAddress)(nil)).Elem() +} + +func (b *MethodFault) GetMethodFault() *MethodFault { return b } + +type BaseMethodFault interface { + GetMethodFault() *MethodFault +} + +func init() { + t["BaseMethodFault"] = reflect.TypeOf((*MethodFault)(nil)).Elem() +} + +func (b *MigrationEvent) GetMigrationEvent() *MigrationEvent { return b } + +type BaseMigrationEvent interface { + GetMigrationEvent() *MigrationEvent +} + +func init() { + t["BaseMigrationEvent"] = reflect.TypeOf((*MigrationEvent)(nil)).Elem() +} + +func (b *MigrationFault) GetMigrationFault() *MigrationFault { return b } + +type BaseMigrationFault interface { + GetMigrationFault() *MigrationFault +} + +func init() { + t["BaseMigrationFault"] = reflect.TypeOf((*MigrationFault)(nil)).Elem() +} + +func (b *MigrationFeatureNotSupported) GetMigrationFeatureNotSupported() *MigrationFeatureNotSupported { + return b +} + +type BaseMigrationFeatureNotSupported interface { + GetMigrationFeatureNotSupported() *MigrationFeatureNotSupported +} + +func init() { + t["BaseMigrationFeatureNotSupported"] = reflect.TypeOf((*MigrationFeatureNotSupported)(nil)).Elem() +} + +func (b *MonthlyTaskScheduler) GetMonthlyTaskScheduler() *MonthlyTaskScheduler { return b } + +type BaseMonthlyTaskScheduler interface { + GetMonthlyTaskScheduler() *MonthlyTaskScheduler +} + +func init() { + t["BaseMonthlyTaskScheduler"] = reflect.TypeOf((*MonthlyTaskScheduler)(nil)).Elem() +} + +func (b *NasConfigFault) GetNasConfigFault() *NasConfigFault { return b } + +type BaseNasConfigFault interface { + GetNasConfigFault() *NasConfigFault +} + +func init() { + t["BaseNasConfigFault"] = reflect.TypeOf((*NasConfigFault)(nil)).Elem() +} + +func (b *NegatableExpression) GetNegatableExpression() *NegatableExpression { return b } + +type BaseNegatableExpression interface { + GetNegatableExpression() *NegatableExpression +} + +func init() { + t["BaseNegatableExpression"] = reflect.TypeOf((*NegatableExpression)(nil)).Elem() +} + +func (b *NetBIOSConfigInfo) GetNetBIOSConfigInfo() *NetBIOSConfigInfo { return b } + +type BaseNetBIOSConfigInfo interface { + GetNetBIOSConfigInfo() *NetBIOSConfigInfo +} + +func init() { + t["BaseNetBIOSConfigInfo"] = reflect.TypeOf((*NetBIOSConfigInfo)(nil)).Elem() +} + +func (b *NetworkSummary) GetNetworkSummary() *NetworkSummary { return b } + +type BaseNetworkSummary interface { + GetNetworkSummary() *NetworkSummary +} + +func init() { + t["BaseNetworkSummary"] = reflect.TypeOf((*NetworkSummary)(nil)).Elem() +} + +func (b *NoCompatibleHost) GetNoCompatibleHost() *NoCompatibleHost { return b } + +type BaseNoCompatibleHost interface { + GetNoCompatibleHost() *NoCompatibleHost +} + +func init() { + t["BaseNoCompatibleHost"] = reflect.TypeOf((*NoCompatibleHost)(nil)).Elem() +} + +func (b *NoPermission) GetNoPermission() *NoPermission { return b } + +type BaseNoPermission interface { + GetNoPermission() *NoPermission +} + +func init() { + t["BaseNoPermission"] = reflect.TypeOf((*NoPermission)(nil)).Elem() +} + +func (b *NotEnoughCpus) GetNotEnoughCpus() *NotEnoughCpus { return b } + +type BaseNotEnoughCpus interface { + GetNotEnoughCpus() *NotEnoughCpus +} + +func init() { + t["BaseNotEnoughCpus"] = reflect.TypeOf((*NotEnoughCpus)(nil)).Elem() +} + +func (b *NotEnoughLicenses) GetNotEnoughLicenses() *NotEnoughLicenses { return b } + +type BaseNotEnoughLicenses interface { + GetNotEnoughLicenses() *NotEnoughLicenses +} + +func init() { + t["BaseNotEnoughLicenses"] = reflect.TypeOf((*NotEnoughLicenses)(nil)).Elem() +} + +func (b *NotSupported) GetNotSupported() *NotSupported { return b } + +type BaseNotSupported interface { + GetNotSupported() *NotSupported +} + +func init() { + t["BaseNotSupported"] = reflect.TypeOf((*NotSupported)(nil)).Elem() +} + +func (b *NotSupportedHost) GetNotSupportedHost() *NotSupportedHost { return b } + +type BaseNotSupportedHost interface { + GetNotSupportedHost() *NotSupportedHost +} + +func init() { + t["BaseNotSupportedHost"] = reflect.TypeOf((*NotSupportedHost)(nil)).Elem() +} + +func (b *NotSupportedHostInCluster) GetNotSupportedHostInCluster() *NotSupportedHostInCluster { + return b +} + +type BaseNotSupportedHostInCluster interface { + GetNotSupportedHostInCluster() *NotSupportedHostInCluster +} + +func init() { + t["BaseNotSupportedHostInCluster"] = reflect.TypeOf((*NotSupportedHostInCluster)(nil)).Elem() +} + +func (b *OptionType) GetOptionType() *OptionType { return b } + +type BaseOptionType interface { + GetOptionType() *OptionType +} + +func init() { + t["BaseOptionType"] = reflect.TypeOf((*OptionType)(nil)).Elem() +} + +func (b *OptionValue) GetOptionValue() *OptionValue { return b } + +type BaseOptionValue interface { + GetOptionValue() *OptionValue +} + +func init() { + t["BaseOptionValue"] = reflect.TypeOf((*OptionValue)(nil)).Elem() +} + +func (b *OvfAttribute) GetOvfAttribute() *OvfAttribute { return b } + +type BaseOvfAttribute interface { + GetOvfAttribute() *OvfAttribute +} + +func init() { + t["BaseOvfAttribute"] = reflect.TypeOf((*OvfAttribute)(nil)).Elem() +} + +func (b *OvfConnectedDevice) GetOvfConnectedDevice() *OvfConnectedDevice { return b } + +type BaseOvfConnectedDevice interface { + GetOvfConnectedDevice() *OvfConnectedDevice +} + +func init() { + t["BaseOvfConnectedDevice"] = reflect.TypeOf((*OvfConnectedDevice)(nil)).Elem() +} + +func (b *OvfConstraint) GetOvfConstraint() *OvfConstraint { return b } + +type BaseOvfConstraint interface { + GetOvfConstraint() *OvfConstraint +} + +func init() { + t["BaseOvfConstraint"] = reflect.TypeOf((*OvfConstraint)(nil)).Elem() +} + +func (b *OvfConsumerCallbackFault) GetOvfConsumerCallbackFault() *OvfConsumerCallbackFault { return b } + +type BaseOvfConsumerCallbackFault interface { + GetOvfConsumerCallbackFault() *OvfConsumerCallbackFault +} + +func init() { + t["BaseOvfConsumerCallbackFault"] = reflect.TypeOf((*OvfConsumerCallbackFault)(nil)).Elem() +} + +func (b *OvfElement) GetOvfElement() *OvfElement { return b } + +type BaseOvfElement interface { + GetOvfElement() *OvfElement +} + +func init() { + t["BaseOvfElement"] = reflect.TypeOf((*OvfElement)(nil)).Elem() +} + +func (b *OvfExport) GetOvfExport() *OvfExport { return b } + +type BaseOvfExport interface { + GetOvfExport() *OvfExport +} + +func init() { + t["BaseOvfExport"] = reflect.TypeOf((*OvfExport)(nil)).Elem() +} + +func (b *OvfFault) GetOvfFault() *OvfFault { return b } + +type BaseOvfFault interface { + GetOvfFault() *OvfFault +} + +func init() { + t["BaseOvfFault"] = reflect.TypeOf((*OvfFault)(nil)).Elem() +} + +func (b *OvfHardwareExport) GetOvfHardwareExport() *OvfHardwareExport { return b } + +type BaseOvfHardwareExport interface { + GetOvfHardwareExport() *OvfHardwareExport +} + +func init() { + t["BaseOvfHardwareExport"] = reflect.TypeOf((*OvfHardwareExport)(nil)).Elem() +} + +func (b *OvfImport) GetOvfImport() *OvfImport { return b } + +type BaseOvfImport interface { + GetOvfImport() *OvfImport +} + +func init() { + t["BaseOvfImport"] = reflect.TypeOf((*OvfImport)(nil)).Elem() +} + +func (b *OvfInvalidPackage) GetOvfInvalidPackage() *OvfInvalidPackage { return b } + +type BaseOvfInvalidPackage interface { + GetOvfInvalidPackage() *OvfInvalidPackage +} + +func init() { + t["BaseOvfInvalidPackage"] = reflect.TypeOf((*OvfInvalidPackage)(nil)).Elem() +} + +func (b *OvfInvalidValue) GetOvfInvalidValue() *OvfInvalidValue { return b } + +type BaseOvfInvalidValue interface { + GetOvfInvalidValue() *OvfInvalidValue +} + +func init() { + t["BaseOvfInvalidValue"] = reflect.TypeOf((*OvfInvalidValue)(nil)).Elem() +} + +func (b *OvfManagerCommonParams) GetOvfManagerCommonParams() *OvfManagerCommonParams { return b } + +type BaseOvfManagerCommonParams interface { + GetOvfManagerCommonParams() *OvfManagerCommonParams +} + +func init() { + t["BaseOvfManagerCommonParams"] = reflect.TypeOf((*OvfManagerCommonParams)(nil)).Elem() +} + +func (b *OvfMissingElement) GetOvfMissingElement() *OvfMissingElement { return b } + +type BaseOvfMissingElement interface { + GetOvfMissingElement() *OvfMissingElement +} + +func init() { + t["BaseOvfMissingElement"] = reflect.TypeOf((*OvfMissingElement)(nil)).Elem() +} + +func (b *OvfProperty) GetOvfProperty() *OvfProperty { return b } + +type BaseOvfProperty interface { + GetOvfProperty() *OvfProperty +} + +func init() { + t["BaseOvfProperty"] = reflect.TypeOf((*OvfProperty)(nil)).Elem() +} + +func (b *OvfSystemFault) GetOvfSystemFault() *OvfSystemFault { return b } + +type BaseOvfSystemFault interface { + GetOvfSystemFault() *OvfSystemFault +} + +func init() { + t["BaseOvfSystemFault"] = reflect.TypeOf((*OvfSystemFault)(nil)).Elem() +} + +func (b *OvfUnsupportedAttribute) GetOvfUnsupportedAttribute() *OvfUnsupportedAttribute { return b } + +type BaseOvfUnsupportedAttribute interface { + GetOvfUnsupportedAttribute() *OvfUnsupportedAttribute +} + +func init() { + t["BaseOvfUnsupportedAttribute"] = reflect.TypeOf((*OvfUnsupportedAttribute)(nil)).Elem() +} + +func (b *OvfUnsupportedElement) GetOvfUnsupportedElement() *OvfUnsupportedElement { return b } + +type BaseOvfUnsupportedElement interface { + GetOvfUnsupportedElement() *OvfUnsupportedElement +} + +func init() { + t["BaseOvfUnsupportedElement"] = reflect.TypeOf((*OvfUnsupportedElement)(nil)).Elem() +} + +func (b *OvfUnsupportedPackage) GetOvfUnsupportedPackage() *OvfUnsupportedPackage { return b } + +type BaseOvfUnsupportedPackage interface { + GetOvfUnsupportedPackage() *OvfUnsupportedPackage +} + +func init() { + t["BaseOvfUnsupportedPackage"] = reflect.TypeOf((*OvfUnsupportedPackage)(nil)).Elem() +} + +func (b *PatchMetadataInvalid) GetPatchMetadataInvalid() *PatchMetadataInvalid { return b } + +type BasePatchMetadataInvalid interface { + GetPatchMetadataInvalid() *PatchMetadataInvalid +} + +func init() { + t["BasePatchMetadataInvalid"] = reflect.TypeOf((*PatchMetadataInvalid)(nil)).Elem() +} + +func (b *PatchNotApplicable) GetPatchNotApplicable() *PatchNotApplicable { return b } + +type BasePatchNotApplicable interface { + GetPatchNotApplicable() *PatchNotApplicable +} + +func init() { + t["BasePatchNotApplicable"] = reflect.TypeOf((*PatchNotApplicable)(nil)).Elem() +} + +func (b *PerfEntityMetricBase) GetPerfEntityMetricBase() *PerfEntityMetricBase { return b } + +type BasePerfEntityMetricBase interface { + GetPerfEntityMetricBase() *PerfEntityMetricBase +} + +func init() { + t["BasePerfEntityMetricBase"] = reflect.TypeOf((*PerfEntityMetricBase)(nil)).Elem() +} + +func (b *PerfMetricSeries) GetPerfMetricSeries() *PerfMetricSeries { return b } + +type BasePerfMetricSeries interface { + GetPerfMetricSeries() *PerfMetricSeries +} + +func init() { + t["BasePerfMetricSeries"] = reflect.TypeOf((*PerfMetricSeries)(nil)).Elem() +} + +func (b *PermissionEvent) GetPermissionEvent() *PermissionEvent { return b } + +type BasePermissionEvent interface { + GetPermissionEvent() *PermissionEvent +} + +func init() { + t["BasePermissionEvent"] = reflect.TypeOf((*PermissionEvent)(nil)).Elem() +} + +func (b *PhysicalNicHint) GetPhysicalNicHint() *PhysicalNicHint { return b } + +type BasePhysicalNicHint interface { + GetPhysicalNicHint() *PhysicalNicHint +} + +func init() { + t["BasePhysicalNicHint"] = reflect.TypeOf((*PhysicalNicHint)(nil)).Elem() +} + +func (b *PlatformConfigFault) GetPlatformConfigFault() *PlatformConfigFault { return b } + +type BasePlatformConfigFault interface { + GetPlatformConfigFault() *PlatformConfigFault +} + +func init() { + t["BasePlatformConfigFault"] = reflect.TypeOf((*PlatformConfigFault)(nil)).Elem() +} + +func (b *PolicyOption) GetPolicyOption() *PolicyOption { return b } + +type BasePolicyOption interface { + GetPolicyOption() *PolicyOption +} + +func init() { + t["BasePolicyOption"] = reflect.TypeOf((*PolicyOption)(nil)).Elem() +} + +func (b *PortGroupProfile) GetPortGroupProfile() *PortGroupProfile { return b } + +type BasePortGroupProfile interface { + GetPortGroupProfile() *PortGroupProfile +} + +func init() { + t["BasePortGroupProfile"] = reflect.TypeOf((*PortGroupProfile)(nil)).Elem() +} + +func (b *ProfileConfigInfo) GetProfileConfigInfo() *ProfileConfigInfo { return b } + +type BaseProfileConfigInfo interface { + GetProfileConfigInfo() *ProfileConfigInfo +} + +func init() { + t["BaseProfileConfigInfo"] = reflect.TypeOf((*ProfileConfigInfo)(nil)).Elem() +} + +func (b *ProfileCreateSpec) GetProfileCreateSpec() *ProfileCreateSpec { return b } + +type BaseProfileCreateSpec interface { + GetProfileCreateSpec() *ProfileCreateSpec +} + +func init() { + t["BaseProfileCreateSpec"] = reflect.TypeOf((*ProfileCreateSpec)(nil)).Elem() +} + +func (b *ProfileEvent) GetProfileEvent() *ProfileEvent { return b } + +type BaseProfileEvent interface { + GetProfileEvent() *ProfileEvent +} + +func init() { + t["BaseProfileEvent"] = reflect.TypeOf((*ProfileEvent)(nil)).Elem() +} + +func (b *ProfileExpression) GetProfileExpression() *ProfileExpression { return b } + +type BaseProfileExpression interface { + GetProfileExpression() *ProfileExpression +} + +func init() { + t["BaseProfileExpression"] = reflect.TypeOf((*ProfileExpression)(nil)).Elem() +} + +func (b *ProfilePolicyOptionMetadata) GetProfilePolicyOptionMetadata() *ProfilePolicyOptionMetadata { + return b +} + +type BaseProfilePolicyOptionMetadata interface { + GetProfilePolicyOptionMetadata() *ProfilePolicyOptionMetadata +} + +func init() { + t["BaseProfilePolicyOptionMetadata"] = reflect.TypeOf((*ProfilePolicyOptionMetadata)(nil)).Elem() +} + +func (b *ProfileSerializedCreateSpec) GetProfileSerializedCreateSpec() *ProfileSerializedCreateSpec { + return b +} + +type BaseProfileSerializedCreateSpec interface { + GetProfileSerializedCreateSpec() *ProfileSerializedCreateSpec +} + +func init() { + t["BaseProfileSerializedCreateSpec"] = reflect.TypeOf((*ProfileSerializedCreateSpec)(nil)).Elem() +} + +func (b *RDMNotSupported) GetRDMNotSupported() *RDMNotSupported { return b } + +type BaseRDMNotSupported interface { + GetRDMNotSupported() *RDMNotSupported +} + +func init() { + t["BaseRDMNotSupported"] = reflect.TypeOf((*RDMNotSupported)(nil)).Elem() +} + +func (b *RecurrentTaskScheduler) GetRecurrentTaskScheduler() *RecurrentTaskScheduler { return b } + +type BaseRecurrentTaskScheduler interface { + GetRecurrentTaskScheduler() *RecurrentTaskScheduler +} + +func init() { + t["BaseRecurrentTaskScheduler"] = reflect.TypeOf((*RecurrentTaskScheduler)(nil)).Elem() +} + +func (b *ReplicationConfigFault) GetReplicationConfigFault() *ReplicationConfigFault { return b } + +type BaseReplicationConfigFault interface { + GetReplicationConfigFault() *ReplicationConfigFault +} + +func init() { + t["BaseReplicationConfigFault"] = reflect.TypeOf((*ReplicationConfigFault)(nil)).Elem() +} + +func (b *ReplicationFault) GetReplicationFault() *ReplicationFault { return b } + +type BaseReplicationFault interface { + GetReplicationFault() *ReplicationFault +} + +func init() { + t["BaseReplicationFault"] = reflect.TypeOf((*ReplicationFault)(nil)).Elem() +} + +func (b *ReplicationVmFault) GetReplicationVmFault() *ReplicationVmFault { return b } + +type BaseReplicationVmFault interface { + GetReplicationVmFault() *ReplicationVmFault +} + +func init() { + t["BaseReplicationVmFault"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem() +} + +func (b *ResourceAllocationInfo) GetResourceAllocationInfo() *ResourceAllocationInfo { return b } + +type BaseResourceAllocationInfo interface { + GetResourceAllocationInfo() *ResourceAllocationInfo +} + +func init() { + t["BaseResourceAllocationInfo"] = reflect.TypeOf((*ResourceAllocationInfo)(nil)).Elem() +} + +func (b *ResourceInUse) GetResourceInUse() *ResourceInUse { return b } + +type BaseResourceInUse interface { + GetResourceInUse() *ResourceInUse +} + +func init() { + t["BaseResourceInUse"] = reflect.TypeOf((*ResourceInUse)(nil)).Elem() +} + +func (b *ResourcePoolEvent) GetResourcePoolEvent() *ResourcePoolEvent { return b } + +type BaseResourcePoolEvent interface { + GetResourcePoolEvent() *ResourcePoolEvent +} + +func init() { + t["BaseResourcePoolEvent"] = reflect.TypeOf((*ResourcePoolEvent)(nil)).Elem() +} + +func (b *ResourcePoolSummary) GetResourcePoolSummary() *ResourcePoolSummary { return b } + +type BaseResourcePoolSummary interface { + GetResourcePoolSummary() *ResourcePoolSummary +} + +func init() { + t["BaseResourcePoolSummary"] = reflect.TypeOf((*ResourcePoolSummary)(nil)).Elem() +} + +func (b *RoleEvent) GetRoleEvent() *RoleEvent { return b } + +type BaseRoleEvent interface { + GetRoleEvent() *RoleEvent +} + +func init() { + t["BaseRoleEvent"] = reflect.TypeOf((*RoleEvent)(nil)).Elem() +} + +func (b *RuntimeFault) GetRuntimeFault() *RuntimeFault { return b } + +type BaseRuntimeFault interface { + GetRuntimeFault() *RuntimeFault +} + +func init() { + t["BaseRuntimeFault"] = reflect.TypeOf((*RuntimeFault)(nil)).Elem() +} + +func (b *ScheduledTaskEvent) GetScheduledTaskEvent() *ScheduledTaskEvent { return b } + +type BaseScheduledTaskEvent interface { + GetScheduledTaskEvent() *ScheduledTaskEvent +} + +func init() { + t["BaseScheduledTaskEvent"] = reflect.TypeOf((*ScheduledTaskEvent)(nil)).Elem() +} + +func (b *ScheduledTaskSpec) GetScheduledTaskSpec() *ScheduledTaskSpec { return b } + +type BaseScheduledTaskSpec interface { + GetScheduledTaskSpec() *ScheduledTaskSpec +} + +func init() { + t["BaseScheduledTaskSpec"] = reflect.TypeOf((*ScheduledTaskSpec)(nil)).Elem() +} + +func (b *ScsiLun) GetScsiLun() *ScsiLun { return b } + +type BaseScsiLun interface { + GetScsiLun() *ScsiLun +} + +func init() { + t["BaseScsiLun"] = reflect.TypeOf((*ScsiLun)(nil)).Elem() +} + +func (b *SecurityError) GetSecurityError() *SecurityError { return b } + +type BaseSecurityError interface { + GetSecurityError() *SecurityError +} + +func init() { + t["BaseSecurityError"] = reflect.TypeOf((*SecurityError)(nil)).Elem() +} + +func (b *SelectionSet) GetSelectionSet() *SelectionSet { return b } + +type BaseSelectionSet interface { + GetSelectionSet() *SelectionSet +} + +func init() { + t["BaseSelectionSet"] = reflect.TypeOf((*SelectionSet)(nil)).Elem() +} + +func (b *SelectionSpec) GetSelectionSpec() *SelectionSpec { return b } + +type BaseSelectionSpec interface { + GetSelectionSpec() *SelectionSpec +} + +func init() { + t["BaseSelectionSpec"] = reflect.TypeOf((*SelectionSpec)(nil)).Elem() +} + +func (b *ServiceLocatorCredential) GetServiceLocatorCredential() *ServiceLocatorCredential { return b } + +type BaseServiceLocatorCredential interface { + GetServiceLocatorCredential() *ServiceLocatorCredential +} + +func init() { + t["BaseServiceLocatorCredential"] = reflect.TypeOf((*ServiceLocatorCredential)(nil)).Elem() +} + +func (b *SessionEvent) GetSessionEvent() *SessionEvent { return b } + +type BaseSessionEvent interface { + GetSessionEvent() *SessionEvent +} + +func init() { + t["BaseSessionEvent"] = reflect.TypeOf((*SessionEvent)(nil)).Elem() +} + +func (b *SessionManagerServiceRequestSpec) GetSessionManagerServiceRequestSpec() *SessionManagerServiceRequestSpec { + return b +} + +type BaseSessionManagerServiceRequestSpec interface { + GetSessionManagerServiceRequestSpec() *SessionManagerServiceRequestSpec +} + +func init() { + t["BaseSessionManagerServiceRequestSpec"] = reflect.TypeOf((*SessionManagerServiceRequestSpec)(nil)).Elem() +} + +func (b *SnapshotCopyNotSupported) GetSnapshotCopyNotSupported() *SnapshotCopyNotSupported { return b } + +type BaseSnapshotCopyNotSupported interface { + GetSnapshotCopyNotSupported() *SnapshotCopyNotSupported +} + +func init() { + t["BaseSnapshotCopyNotSupported"] = reflect.TypeOf((*SnapshotCopyNotSupported)(nil)).Elem() +} + +func (b *SnapshotFault) GetSnapshotFault() *SnapshotFault { return b } + +type BaseSnapshotFault interface { + GetSnapshotFault() *SnapshotFault +} + +func init() { + t["BaseSnapshotFault"] = reflect.TypeOf((*SnapshotFault)(nil)).Elem() +} + +func (b *TaskEvent) GetTaskEvent() *TaskEvent { return b } + +type BaseTaskEvent interface { + GetTaskEvent() *TaskEvent +} + +func init() { + t["BaseTaskEvent"] = reflect.TypeOf((*TaskEvent)(nil)).Elem() +} + +func (b *TaskInProgress) GetTaskInProgress() *TaskInProgress { return b } + +type BaseTaskInProgress interface { + GetTaskInProgress() *TaskInProgress +} + +func init() { + t["BaseTaskInProgress"] = reflect.TypeOf((*TaskInProgress)(nil)).Elem() +} + +func (b *TaskReason) GetTaskReason() *TaskReason { return b } + +type BaseTaskReason interface { + GetTaskReason() *TaskReason +} + +func init() { + t["BaseTaskReason"] = reflect.TypeOf((*TaskReason)(nil)).Elem() +} + +func (b *TaskScheduler) GetTaskScheduler() *TaskScheduler { return b } + +type BaseTaskScheduler interface { + GetTaskScheduler() *TaskScheduler +} + +func init() { + t["BaseTaskScheduler"] = reflect.TypeOf((*TaskScheduler)(nil)).Elem() +} + +func (b *TemplateUpgradeEvent) GetTemplateUpgradeEvent() *TemplateUpgradeEvent { return b } + +type BaseTemplateUpgradeEvent interface { + GetTemplateUpgradeEvent() *TemplateUpgradeEvent +} + +func init() { + t["BaseTemplateUpgradeEvent"] = reflect.TypeOf((*TemplateUpgradeEvent)(nil)).Elem() +} + +func (b *Timedout) GetTimedout() *Timedout { return b } + +type BaseTimedout interface { + GetTimedout() *Timedout +} + +func init() { + t["BaseTimedout"] = reflect.TypeOf((*Timedout)(nil)).Elem() +} + +func (b *TypeDescription) GetTypeDescription() *TypeDescription { return b } + +type BaseTypeDescription interface { + GetTypeDescription() *TypeDescription +} + +func init() { + t["BaseTypeDescription"] = reflect.TypeOf((*TypeDescription)(nil)).Elem() +} + +func (b *UnsupportedDatastore) GetUnsupportedDatastore() *UnsupportedDatastore { return b } + +type BaseUnsupportedDatastore interface { + GetUnsupportedDatastore() *UnsupportedDatastore +} + +func init() { + t["BaseUnsupportedDatastore"] = reflect.TypeOf((*UnsupportedDatastore)(nil)).Elem() +} + +func (b *UpgradeEvent) GetUpgradeEvent() *UpgradeEvent { return b } + +type BaseUpgradeEvent interface { + GetUpgradeEvent() *UpgradeEvent +} + +func init() { + t["BaseUpgradeEvent"] = reflect.TypeOf((*UpgradeEvent)(nil)).Elem() +} + +func (b *UserSearchResult) GetUserSearchResult() *UserSearchResult { return b } + +type BaseUserSearchResult interface { + GetUserSearchResult() *UserSearchResult +} + +func init() { + t["BaseUserSearchResult"] = reflect.TypeOf((*UserSearchResult)(nil)).Elem() +} + +func (b *VAppConfigFault) GetVAppConfigFault() *VAppConfigFault { return b } + +type BaseVAppConfigFault interface { + GetVAppConfigFault() *VAppConfigFault +} + +func init() { + t["BaseVAppConfigFault"] = reflect.TypeOf((*VAppConfigFault)(nil)).Elem() +} + +func (b *VAppPropertyFault) GetVAppPropertyFault() *VAppPropertyFault { return b } + +type BaseVAppPropertyFault interface { + GetVAppPropertyFault() *VAppPropertyFault +} + +func init() { + t["BaseVAppPropertyFault"] = reflect.TypeOf((*VAppPropertyFault)(nil)).Elem() +} + +func (b *VMotionInterfaceIssue) GetVMotionInterfaceIssue() *VMotionInterfaceIssue { return b } + +type BaseVMotionInterfaceIssue interface { + GetVMotionInterfaceIssue() *VMotionInterfaceIssue +} + +func init() { + t["BaseVMotionInterfaceIssue"] = reflect.TypeOf((*VMotionInterfaceIssue)(nil)).Elem() +} + +func (b *VMwareDVSHealthCheckConfig) GetVMwareDVSHealthCheckConfig() *VMwareDVSHealthCheckConfig { + return b +} + +type BaseVMwareDVSHealthCheckConfig interface { + GetVMwareDVSHealthCheckConfig() *VMwareDVSHealthCheckConfig +} + +func init() { + t["BaseVMwareDVSHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSHealthCheckConfig)(nil)).Elem() +} + +func (b *VimFault) GetVimFault() *VimFault { return b } + +type BaseVimFault interface { + GetVimFault() *VimFault +} + +func init() { + t["BaseVimFault"] = reflect.TypeOf((*VimFault)(nil)).Elem() +} + +func (b *VirtualController) GetVirtualController() *VirtualController { return b } + +type BaseVirtualController interface { + GetVirtualController() *VirtualController +} + +func init() { + t["BaseVirtualController"] = reflect.TypeOf((*VirtualController)(nil)).Elem() +} + +func (b *VirtualControllerOption) GetVirtualControllerOption() *VirtualControllerOption { return b } + +type BaseVirtualControllerOption interface { + GetVirtualControllerOption() *VirtualControllerOption +} + +func init() { + t["BaseVirtualControllerOption"] = reflect.TypeOf((*VirtualControllerOption)(nil)).Elem() +} + +func (b *VirtualDevice) GetVirtualDevice() *VirtualDevice { return b } + +type BaseVirtualDevice interface { + GetVirtualDevice() *VirtualDevice +} + +func init() { + t["BaseVirtualDevice"] = reflect.TypeOf((*VirtualDevice)(nil)).Elem() +} + +func (b *VirtualDeviceBackingInfo) GetVirtualDeviceBackingInfo() *VirtualDeviceBackingInfo { return b } + +type BaseVirtualDeviceBackingInfo interface { + GetVirtualDeviceBackingInfo() *VirtualDeviceBackingInfo +} + +func init() { + t["BaseVirtualDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceBackingOption) GetVirtualDeviceBackingOption() *VirtualDeviceBackingOption { + return b +} + +type BaseVirtualDeviceBackingOption interface { + GetVirtualDeviceBackingOption() *VirtualDeviceBackingOption +} + +func init() { + t["BaseVirtualDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceBusSlotInfo) GetVirtualDeviceBusSlotInfo() *VirtualDeviceBusSlotInfo { return b } + +type BaseVirtualDeviceBusSlotInfo interface { + GetVirtualDeviceBusSlotInfo() *VirtualDeviceBusSlotInfo +} + +func init() { + t["BaseVirtualDeviceBusSlotInfo"] = reflect.TypeOf((*VirtualDeviceBusSlotInfo)(nil)).Elem() +} + +func (b *VirtualDeviceConfigSpec) GetVirtualDeviceConfigSpec() *VirtualDeviceConfigSpec { return b } + +type BaseVirtualDeviceConfigSpec interface { + GetVirtualDeviceConfigSpec() *VirtualDeviceConfigSpec +} + +func init() { + t["BaseVirtualDeviceConfigSpec"] = reflect.TypeOf((*VirtualDeviceConfigSpec)(nil)).Elem() +} + +func (b *VirtualDeviceDeviceBackingInfo) GetVirtualDeviceDeviceBackingInfo() *VirtualDeviceDeviceBackingInfo { + return b +} + +type BaseVirtualDeviceDeviceBackingInfo interface { + GetVirtualDeviceDeviceBackingInfo() *VirtualDeviceDeviceBackingInfo +} + +func init() { + t["BaseVirtualDeviceDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceDeviceBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceDeviceBackingOption) GetVirtualDeviceDeviceBackingOption() *VirtualDeviceDeviceBackingOption { + return b +} + +type BaseVirtualDeviceDeviceBackingOption interface { + GetVirtualDeviceDeviceBackingOption() *VirtualDeviceDeviceBackingOption +} + +func init() { + t["BaseVirtualDeviceDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceDeviceBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceFileBackingInfo) GetVirtualDeviceFileBackingInfo() *VirtualDeviceFileBackingInfo { + return b +} + +type BaseVirtualDeviceFileBackingInfo interface { + GetVirtualDeviceFileBackingInfo() *VirtualDeviceFileBackingInfo +} + +func init() { + t["BaseVirtualDeviceFileBackingInfo"] = reflect.TypeOf((*VirtualDeviceFileBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceFileBackingOption) GetVirtualDeviceFileBackingOption() *VirtualDeviceFileBackingOption { + return b +} + +type BaseVirtualDeviceFileBackingOption interface { + GetVirtualDeviceFileBackingOption() *VirtualDeviceFileBackingOption +} + +func init() { + t["BaseVirtualDeviceFileBackingOption"] = reflect.TypeOf((*VirtualDeviceFileBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceOption) GetVirtualDeviceOption() *VirtualDeviceOption { return b } + +type BaseVirtualDeviceOption interface { + GetVirtualDeviceOption() *VirtualDeviceOption +} + +func init() { + t["BaseVirtualDeviceOption"] = reflect.TypeOf((*VirtualDeviceOption)(nil)).Elem() +} + +func (b *VirtualDevicePciBusSlotInfo) GetVirtualDevicePciBusSlotInfo() *VirtualDevicePciBusSlotInfo { + return b +} + +type BaseVirtualDevicePciBusSlotInfo interface { + GetVirtualDevicePciBusSlotInfo() *VirtualDevicePciBusSlotInfo +} + +func init() { + t["BaseVirtualDevicePciBusSlotInfo"] = reflect.TypeOf((*VirtualDevicePciBusSlotInfo)(nil)).Elem() +} + +func (b *VirtualDevicePipeBackingInfo) GetVirtualDevicePipeBackingInfo() *VirtualDevicePipeBackingInfo { + return b +} + +type BaseVirtualDevicePipeBackingInfo interface { + GetVirtualDevicePipeBackingInfo() *VirtualDevicePipeBackingInfo +} + +func init() { + t["BaseVirtualDevicePipeBackingInfo"] = reflect.TypeOf((*VirtualDevicePipeBackingInfo)(nil)).Elem() +} + +func (b *VirtualDevicePipeBackingOption) GetVirtualDevicePipeBackingOption() *VirtualDevicePipeBackingOption { + return b +} + +type BaseVirtualDevicePipeBackingOption interface { + GetVirtualDevicePipeBackingOption() *VirtualDevicePipeBackingOption +} + +func init() { + t["BaseVirtualDevicePipeBackingOption"] = reflect.TypeOf((*VirtualDevicePipeBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceRemoteDeviceBackingInfo) GetVirtualDeviceRemoteDeviceBackingInfo() *VirtualDeviceRemoteDeviceBackingInfo { + return b +} + +type BaseVirtualDeviceRemoteDeviceBackingInfo interface { + GetVirtualDeviceRemoteDeviceBackingInfo() *VirtualDeviceRemoteDeviceBackingInfo +} + +func init() { + t["BaseVirtualDeviceRemoteDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceRemoteDeviceBackingOption) GetVirtualDeviceRemoteDeviceBackingOption() *VirtualDeviceRemoteDeviceBackingOption { + return b +} + +type BaseVirtualDeviceRemoteDeviceBackingOption interface { + GetVirtualDeviceRemoteDeviceBackingOption() *VirtualDeviceRemoteDeviceBackingOption +} + +func init() { + t["BaseVirtualDeviceRemoteDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceURIBackingInfo) GetVirtualDeviceURIBackingInfo() *VirtualDeviceURIBackingInfo { + return b +} + +type BaseVirtualDeviceURIBackingInfo interface { + GetVirtualDeviceURIBackingInfo() *VirtualDeviceURIBackingInfo +} + +func init() { + t["BaseVirtualDeviceURIBackingInfo"] = reflect.TypeOf((*VirtualDeviceURIBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceURIBackingOption) GetVirtualDeviceURIBackingOption() *VirtualDeviceURIBackingOption { + return b +} + +type BaseVirtualDeviceURIBackingOption interface { + GetVirtualDeviceURIBackingOption() *VirtualDeviceURIBackingOption +} + +func init() { + t["BaseVirtualDeviceURIBackingOption"] = reflect.TypeOf((*VirtualDeviceURIBackingOption)(nil)).Elem() +} + +func (b *VirtualDiskRawDiskVer2BackingInfo) GetVirtualDiskRawDiskVer2BackingInfo() *VirtualDiskRawDiskVer2BackingInfo { + return b +} + +type BaseVirtualDiskRawDiskVer2BackingInfo interface { + GetVirtualDiskRawDiskVer2BackingInfo() *VirtualDiskRawDiskVer2BackingInfo +} + +func init() { + t["BaseVirtualDiskRawDiskVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingInfo)(nil)).Elem() +} + +func (b *VirtualDiskRawDiskVer2BackingOption) GetVirtualDiskRawDiskVer2BackingOption() *VirtualDiskRawDiskVer2BackingOption { + return b +} + +type BaseVirtualDiskRawDiskVer2BackingOption interface { + GetVirtualDiskRawDiskVer2BackingOption() *VirtualDiskRawDiskVer2BackingOption +} + +func init() { + t["BaseVirtualDiskRawDiskVer2BackingOption"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingOption)(nil)).Elem() +} + +func (b *VirtualDiskSpec) GetVirtualDiskSpec() *VirtualDiskSpec { return b } + +type BaseVirtualDiskSpec interface { + GetVirtualDiskSpec() *VirtualDiskSpec +} + +func init() { + t["BaseVirtualDiskSpec"] = reflect.TypeOf((*VirtualDiskSpec)(nil)).Elem() +} + +func (b *VirtualEthernetCard) GetVirtualEthernetCard() *VirtualEthernetCard { return b } + +type BaseVirtualEthernetCard interface { + GetVirtualEthernetCard() *VirtualEthernetCard +} + +func init() { + t["BaseVirtualEthernetCard"] = reflect.TypeOf((*VirtualEthernetCard)(nil)).Elem() +} + +func (b *VirtualEthernetCardOption) GetVirtualEthernetCardOption() *VirtualEthernetCardOption { + return b +} + +type BaseVirtualEthernetCardOption interface { + GetVirtualEthernetCardOption() *VirtualEthernetCardOption +} + +func init() { + t["BaseVirtualEthernetCardOption"] = reflect.TypeOf((*VirtualEthernetCardOption)(nil)).Elem() +} + +func (b *VirtualHardwareCompatibilityIssue) GetVirtualHardwareCompatibilityIssue() *VirtualHardwareCompatibilityIssue { + return b +} + +type BaseVirtualHardwareCompatibilityIssue interface { + GetVirtualHardwareCompatibilityIssue() *VirtualHardwareCompatibilityIssue +} + +func init() { + t["BaseVirtualHardwareCompatibilityIssue"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem() +} + +func (b *VirtualMachineBootOptionsBootableDevice) GetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice { + return b +} + +type BaseVirtualMachineBootOptionsBootableDevice interface { + GetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice +} + +func init() { + t["BaseVirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem() +} + +func (b *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState) GetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState { + return b +} + +type BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState interface { + GetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState +} + +func init() { + t["BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoDeviceRuntimeState)(nil)).Elem() +} + +func (b *VirtualMachineDiskDeviceInfo) GetVirtualMachineDiskDeviceInfo() *VirtualMachineDiskDeviceInfo { + return b +} + +type BaseVirtualMachineDiskDeviceInfo interface { + GetVirtualMachineDiskDeviceInfo() *VirtualMachineDiskDeviceInfo +} + +func init() { + t["BaseVirtualMachineDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineDiskDeviceInfo)(nil)).Elem() +} + +func (b *VirtualMachinePciPassthroughInfo) GetVirtualMachinePciPassthroughInfo() *VirtualMachinePciPassthroughInfo { + return b +} + +type BaseVirtualMachinePciPassthroughInfo interface { + GetVirtualMachinePciPassthroughInfo() *VirtualMachinePciPassthroughInfo +} + +func init() { + t["BaseVirtualMachinePciPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciPassthroughInfo)(nil)).Elem() +} + +func (b *VirtualMachineProfileSpec) GetVirtualMachineProfileSpec() *VirtualMachineProfileSpec { + return b +} + +type BaseVirtualMachineProfileSpec interface { + GetVirtualMachineProfileSpec() *VirtualMachineProfileSpec +} + +func init() { + t["BaseVirtualMachineProfileSpec"] = reflect.TypeOf((*VirtualMachineProfileSpec)(nil)).Elem() +} + +func (b *VirtualMachineTargetInfo) GetVirtualMachineTargetInfo() *VirtualMachineTargetInfo { return b } + +type BaseVirtualMachineTargetInfo interface { + GetVirtualMachineTargetInfo() *VirtualMachineTargetInfo +} + +func init() { + t["BaseVirtualMachineTargetInfo"] = reflect.TypeOf((*VirtualMachineTargetInfo)(nil)).Elem() +} + +func (b *VirtualPCIPassthroughPluginBackingInfo) GetVirtualPCIPassthroughPluginBackingInfo() *VirtualPCIPassthroughPluginBackingInfo { + return b +} + +type BaseVirtualPCIPassthroughPluginBackingInfo interface { + GetVirtualPCIPassthroughPluginBackingInfo() *VirtualPCIPassthroughPluginBackingInfo +} + +func init() { + t["BaseVirtualPCIPassthroughPluginBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingInfo)(nil)).Elem() +} + +func (b *VirtualPCIPassthroughPluginBackingOption) GetVirtualPCIPassthroughPluginBackingOption() *VirtualPCIPassthroughPluginBackingOption { + return b +} + +type BaseVirtualPCIPassthroughPluginBackingOption interface { + GetVirtualPCIPassthroughPluginBackingOption() *VirtualPCIPassthroughPluginBackingOption +} + +func init() { + t["BaseVirtualPCIPassthroughPluginBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingOption)(nil)).Elem() +} + +func (b *VirtualSATAController) GetVirtualSATAController() *VirtualSATAController { return b } + +type BaseVirtualSATAController interface { + GetVirtualSATAController() *VirtualSATAController +} + +func init() { + t["BaseVirtualSATAController"] = reflect.TypeOf((*VirtualSATAController)(nil)).Elem() +} + +func (b *VirtualSATAControllerOption) GetVirtualSATAControllerOption() *VirtualSATAControllerOption { + return b +} + +type BaseVirtualSATAControllerOption interface { + GetVirtualSATAControllerOption() *VirtualSATAControllerOption +} + +func init() { + t["BaseVirtualSATAControllerOption"] = reflect.TypeOf((*VirtualSATAControllerOption)(nil)).Elem() +} + +func (b *VirtualSCSIController) GetVirtualSCSIController() *VirtualSCSIController { return b } + +type BaseVirtualSCSIController interface { + GetVirtualSCSIController() *VirtualSCSIController +} + +func init() { + t["BaseVirtualSCSIController"] = reflect.TypeOf((*VirtualSCSIController)(nil)).Elem() +} + +func (b *VirtualSCSIControllerOption) GetVirtualSCSIControllerOption() *VirtualSCSIControllerOption { + return b +} + +type BaseVirtualSCSIControllerOption interface { + GetVirtualSCSIControllerOption() *VirtualSCSIControllerOption +} + +func init() { + t["BaseVirtualSCSIControllerOption"] = reflect.TypeOf((*VirtualSCSIControllerOption)(nil)).Elem() +} + +func (b *VirtualSoundCard) GetVirtualSoundCard() *VirtualSoundCard { return b } + +type BaseVirtualSoundCard interface { + GetVirtualSoundCard() *VirtualSoundCard +} + +func init() { + t["BaseVirtualSoundCard"] = reflect.TypeOf((*VirtualSoundCard)(nil)).Elem() +} + +func (b *VirtualSoundCardOption) GetVirtualSoundCardOption() *VirtualSoundCardOption { return b } + +type BaseVirtualSoundCardOption interface { + GetVirtualSoundCardOption() *VirtualSoundCardOption +} + +func init() { + t["BaseVirtualSoundCardOption"] = reflect.TypeOf((*VirtualSoundCardOption)(nil)).Elem() +} + +func (b *VirtualVmxnet) GetVirtualVmxnet() *VirtualVmxnet { return b } + +type BaseVirtualVmxnet interface { + GetVirtualVmxnet() *VirtualVmxnet +} + +func init() { + t["BaseVirtualVmxnet"] = reflect.TypeOf((*VirtualVmxnet)(nil)).Elem() +} + +func (b *VirtualVmxnetOption) GetVirtualVmxnetOption() *VirtualVmxnetOption { return b } + +type BaseVirtualVmxnetOption interface { + GetVirtualVmxnetOption() *VirtualVmxnetOption +} + +func init() { + t["BaseVirtualVmxnetOption"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem() +} + +func (b *VmCloneEvent) GetVmCloneEvent() *VmCloneEvent { return b } + +type BaseVmCloneEvent interface { + GetVmCloneEvent() *VmCloneEvent +} + +func init() { + t["BaseVmCloneEvent"] = reflect.TypeOf((*VmCloneEvent)(nil)).Elem() +} + +func (b *VmConfigFault) GetVmConfigFault() *VmConfigFault { return b } + +type BaseVmConfigFault interface { + GetVmConfigFault() *VmConfigFault +} + +func init() { + t["BaseVmConfigFault"] = reflect.TypeOf((*VmConfigFault)(nil)).Elem() +} + +func (b *VmConfigFileInfo) GetVmConfigFileInfo() *VmConfigFileInfo { return b } + +type BaseVmConfigFileInfo interface { + GetVmConfigFileInfo() *VmConfigFileInfo +} + +func init() { + t["BaseVmConfigFileInfo"] = reflect.TypeOf((*VmConfigFileInfo)(nil)).Elem() +} + +func (b *VmConfigFileQuery) GetVmConfigFileQuery() *VmConfigFileQuery { return b } + +type BaseVmConfigFileQuery interface { + GetVmConfigFileQuery() *VmConfigFileQuery +} + +func init() { + t["BaseVmConfigFileQuery"] = reflect.TypeOf((*VmConfigFileQuery)(nil)).Elem() +} + +func (b *VmConfigInfo) GetVmConfigInfo() *VmConfigInfo { return b } + +type BaseVmConfigInfo interface { + GetVmConfigInfo() *VmConfigInfo +} + +func init() { + t["BaseVmConfigInfo"] = reflect.TypeOf((*VmConfigInfo)(nil)).Elem() +} + +func (b *VmConfigSpec) GetVmConfigSpec() *VmConfigSpec { return b } + +type BaseVmConfigSpec interface { + GetVmConfigSpec() *VmConfigSpec +} + +func init() { + t["BaseVmConfigSpec"] = reflect.TypeOf((*VmConfigSpec)(nil)).Elem() +} + +func (b *VmDasBeingResetEvent) GetVmDasBeingResetEvent() *VmDasBeingResetEvent { return b } + +type BaseVmDasBeingResetEvent interface { + GetVmDasBeingResetEvent() *VmDasBeingResetEvent +} + +func init() { + t["BaseVmDasBeingResetEvent"] = reflect.TypeOf((*VmDasBeingResetEvent)(nil)).Elem() +} + +func (b *VmEvent) GetVmEvent() *VmEvent { return b } + +type BaseVmEvent interface { + GetVmEvent() *VmEvent +} + +func init() { + t["BaseVmEvent"] = reflect.TypeOf((*VmEvent)(nil)).Elem() +} + +func (b *VmFaultToleranceIssue) GetVmFaultToleranceIssue() *VmFaultToleranceIssue { return b } + +type BaseVmFaultToleranceIssue interface { + GetVmFaultToleranceIssue() *VmFaultToleranceIssue +} + +func init() { + t["BaseVmFaultToleranceIssue"] = reflect.TypeOf((*VmFaultToleranceIssue)(nil)).Elem() +} + +func (b *VmMigratedEvent) GetVmMigratedEvent() *VmMigratedEvent { return b } + +type BaseVmMigratedEvent interface { + GetVmMigratedEvent() *VmMigratedEvent +} + +func init() { + t["BaseVmMigratedEvent"] = reflect.TypeOf((*VmMigratedEvent)(nil)).Elem() +} + +func (b *VmPoweredOffEvent) GetVmPoweredOffEvent() *VmPoweredOffEvent { return b } + +type BaseVmPoweredOffEvent interface { + GetVmPoweredOffEvent() *VmPoweredOffEvent +} + +func init() { + t["BaseVmPoweredOffEvent"] = reflect.TypeOf((*VmPoweredOffEvent)(nil)).Elem() +} + +func (b *VmPoweredOnEvent) GetVmPoweredOnEvent() *VmPoweredOnEvent { return b } + +type BaseVmPoweredOnEvent interface { + GetVmPoweredOnEvent() *VmPoweredOnEvent +} + +func init() { + t["BaseVmPoweredOnEvent"] = reflect.TypeOf((*VmPoweredOnEvent)(nil)).Elem() +} + +func (b *VmRelocateSpecEvent) GetVmRelocateSpecEvent() *VmRelocateSpecEvent { return b } + +type BaseVmRelocateSpecEvent interface { + GetVmRelocateSpecEvent() *VmRelocateSpecEvent +} + +func init() { + t["BaseVmRelocateSpecEvent"] = reflect.TypeOf((*VmRelocateSpecEvent)(nil)).Elem() +} + +func (b *VmStartingEvent) GetVmStartingEvent() *VmStartingEvent { return b } + +type BaseVmStartingEvent interface { + GetVmStartingEvent() *VmStartingEvent +} + +func init() { + t["BaseVmStartingEvent"] = reflect.TypeOf((*VmStartingEvent)(nil)).Elem() +} + +func (b *VmToolsUpgradeFault) GetVmToolsUpgradeFault() *VmToolsUpgradeFault { return b } + +type BaseVmToolsUpgradeFault interface { + GetVmToolsUpgradeFault() *VmToolsUpgradeFault +} + +func init() { + t["BaseVmToolsUpgradeFault"] = reflect.TypeOf((*VmToolsUpgradeFault)(nil)).Elem() +} + +func (b *VmfsDatastoreBaseOption) GetVmfsDatastoreBaseOption() *VmfsDatastoreBaseOption { return b } + +type BaseVmfsDatastoreBaseOption interface { + GetVmfsDatastoreBaseOption() *VmfsDatastoreBaseOption +} + +func init() { + t["BaseVmfsDatastoreBaseOption"] = reflect.TypeOf((*VmfsDatastoreBaseOption)(nil)).Elem() +} + +func (b *VmfsDatastoreSingleExtentOption) GetVmfsDatastoreSingleExtentOption() *VmfsDatastoreSingleExtentOption { + return b +} + +type BaseVmfsDatastoreSingleExtentOption interface { + GetVmfsDatastoreSingleExtentOption() *VmfsDatastoreSingleExtentOption +} + +func init() { + t["BaseVmfsDatastoreSingleExtentOption"] = reflect.TypeOf((*VmfsDatastoreSingleExtentOption)(nil)).Elem() +} + +func (b *VmfsDatastoreSpec) GetVmfsDatastoreSpec() *VmfsDatastoreSpec { return b } + +type BaseVmfsDatastoreSpec interface { + GetVmfsDatastoreSpec() *VmfsDatastoreSpec +} + +func init() { + t["BaseVmfsDatastoreSpec"] = reflect.TypeOf((*VmfsDatastoreSpec)(nil)).Elem() +} + +func (b *VmfsMountFault) GetVmfsMountFault() *VmfsMountFault { return b } + +type BaseVmfsMountFault interface { + GetVmfsMountFault() *VmfsMountFault +} + +func init() { + t["BaseVmfsMountFault"] = reflect.TypeOf((*VmfsMountFault)(nil)).Elem() +} + +func (b *VmwareDistributedVirtualSwitchVlanSpec) GetVmwareDistributedVirtualSwitchVlanSpec() *VmwareDistributedVirtualSwitchVlanSpec { + return b +} + +type BaseVmwareDistributedVirtualSwitchVlanSpec interface { + GetVmwareDistributedVirtualSwitchVlanSpec() *VmwareDistributedVirtualSwitchVlanSpec +} + +func init() { + t["BaseVmwareDistributedVirtualSwitchVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanSpec)(nil)).Elem() +} + +func (b *VsanDiskFault) GetVsanDiskFault() *VsanDiskFault { return b } + +type BaseVsanDiskFault interface { + GetVsanDiskFault() *VsanDiskFault +} + +func init() { + t["BaseVsanDiskFault"] = reflect.TypeOf((*VsanDiskFault)(nil)).Elem() +} + +func (b *VsanFault) GetVsanFault() *VsanFault { return b } + +type BaseVsanFault interface { + GetVsanFault() *VsanFault +} + +func init() { + t["BaseVsanFault"] = reflect.TypeOf((*VsanFault)(nil)).Elem() +} + +func (b *VsanUpgradeSystemPreflightCheckIssue) GetVsanUpgradeSystemPreflightCheckIssue() *VsanUpgradeSystemPreflightCheckIssue { + return b +} + +type BaseVsanUpgradeSystemPreflightCheckIssue interface { + GetVsanUpgradeSystemPreflightCheckIssue() *VsanUpgradeSystemPreflightCheckIssue +} + +func init() { + t["BaseVsanUpgradeSystemPreflightCheckIssue"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckIssue)(nil)).Elem() +} + +func (b *VsanUpgradeSystemUpgradeHistoryItem) GetVsanUpgradeSystemUpgradeHistoryItem() *VsanUpgradeSystemUpgradeHistoryItem { + return b +} + +type BaseVsanUpgradeSystemUpgradeHistoryItem interface { + GetVsanUpgradeSystemUpgradeHistoryItem() *VsanUpgradeSystemUpgradeHistoryItem +} + +func init() { + t["BaseVsanUpgradeSystemUpgradeHistoryItem"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/internal.go b/vendor/github.com/vmware/govmomi/vim25/types/internal.go new file mode 100644 index 000000000..5a9e6c63e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/internal.go @@ -0,0 +1,262 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "reflect" + +type DynamicTypeMgrQueryMoInstances struct { + This ManagedObjectReference `xml:"_this"` + FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` +} + +type DynamicTypeMgrQueryMoInstancesResponse struct { + Returnval []DynamicTypeMgrMoInstance `xml:"urn:vim25 returnval"` +} + +type DynamicTypeEnumTypeInfo struct { + DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Value []string `xml:"value,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + t["DynamicTypeEnumTypeInfo"] = reflect.TypeOf((*DynamicTypeEnumTypeInfo)(nil)).Elem() +} + +type DynamicTypeMgrAllTypeInfo struct { + DynamicData + + ManagedTypeInfo []DynamicTypeMgrManagedTypeInfo `xml:"managedTypeInfo,omitempty"` + EnumTypeInfo []DynamicTypeEnumTypeInfo `xml:"enumTypeInfo,omitempty"` + DataTypeInfo []DynamicTypeMgrDataTypeInfo `xml:"dataTypeInfo,omitempty"` +} + +func init() { + t["DynamicTypeMgrAllTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrAllTypeInfo)(nil)).Elem() +} + +type DynamicTypeMgrAnnotation struct { + DynamicData + + Name string `xml:"name"` + Parameter []string `xml:"parameter,omitempty"` +} + +func init() { + t["DynamicTypeMgrAnnotation"] = reflect.TypeOf((*DynamicTypeMgrAnnotation)(nil)).Elem() +} + +type DynamicTypeMgrDataTypeInfo struct { + DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Base []string `xml:"base,omitempty"` + Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + t["DynamicTypeMgrDataTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrDataTypeInfo)(nil)).Elem() +} + +func (b *DynamicTypeMgrFilterSpec) GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec { return b } + +type BaseDynamicTypeMgrFilterSpec interface { + GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec +} + +type DynamicTypeMgrFilterSpec struct { + DynamicData +} + +func init() { + t["DynamicTypeMgrFilterSpec"] = reflect.TypeOf((*DynamicTypeMgrFilterSpec)(nil)).Elem() +} + +type DynamicTypeMgrManagedTypeInfo struct { + DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Base []string `xml:"base,omitempty"` + Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` + Method []DynamicTypeMgrMethodTypeInfo `xml:"method,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + t["DynamicTypeMgrManagedTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrManagedTypeInfo)(nil)).Elem() +} + +type DynamicTypeMgrMethodTypeInfo struct { + DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + ParamTypeInfo []DynamicTypeMgrParamTypeInfo `xml:"paramTypeInfo,omitempty"` + ReturnTypeInfo *DynamicTypeMgrParamTypeInfo `xml:"returnTypeInfo,omitempty"` + Fault []string `xml:"fault,omitempty"` + PrivId string `xml:"privId,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + t["DynamicTypeMgrMethodTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrMethodTypeInfo)(nil)).Elem() +} + +type DynamicTypeMgrMoFilterSpec struct { + DynamicTypeMgrFilterSpec + + Id string `xml:"id,omitempty"` + TypeSubstr string `xml:"typeSubstr,omitempty"` +} + +func init() { + t["DynamicTypeMgrMoFilterSpec"] = reflect.TypeOf((*DynamicTypeMgrMoFilterSpec)(nil)).Elem() +} + +type DynamicTypeMgrMoInstance struct { + DynamicData + + Id string `xml:"id"` + MoType string `xml:"moType"` +} + +func init() { + t["DynamicTypeMgrMoInstance"] = reflect.TypeOf((*DynamicTypeMgrMoInstance)(nil)).Elem() +} + +type DynamicTypeMgrParamTypeInfo struct { + DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` + Type string `xml:"type"` + PrivId string `xml:"privId,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + t["DynamicTypeMgrParamTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrParamTypeInfo)(nil)).Elem() +} + +type DynamicTypeMgrPropertyTypeInfo struct { + DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` + Type string `xml:"type"` + PrivId string `xml:"privId,omitempty"` + MsgIdFormat string `xml:"msgIdFormat,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +type DynamicTypeMgrQueryTypeInfo struct { + This ManagedObjectReference `xml:"_this"` + FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` +} + +type DynamicTypeMgrQueryTypeInfoResponse struct { + Returnval DynamicTypeMgrAllTypeInfo `xml:"urn:vim25 returnval"` +} + +func init() { + t["DynamicTypeMgrPropertyTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrPropertyTypeInfo)(nil)).Elem() +} + +type DynamicTypeMgrTypeFilterSpec struct { + DynamicTypeMgrFilterSpec + + TypeSubstr string `xml:"typeSubstr,omitempty"` +} + +func init() { + t["DynamicTypeMgrTypeFilterSpec"] = reflect.TypeOf((*DynamicTypeMgrTypeFilterSpec)(nil)).Elem() +} + +type ReflectManagedMethodExecuterSoapArgument struct { + DynamicData + + Name string `xml:"name"` + Val string `xml:"val"` +} + +func init() { + t["ReflectManagedMethodExecuterSoapArgument"] = reflect.TypeOf((*ReflectManagedMethodExecuterSoapArgument)(nil)).Elem() +} + +type ReflectManagedMethodExecuterSoapFault struct { + DynamicData + + FaultMsg string `xml:"faultMsg"` + FaultDetail string `xml:"faultDetail,omitempty"` +} + +func init() { + t["ReflectManagedMethodExecuterSoapFault"] = reflect.TypeOf((*ReflectManagedMethodExecuterSoapFault)(nil)).Elem() +} + +type ReflectManagedMethodExecuterSoapResult struct { + DynamicData + + Response string `xml:"response,omitempty"` + Fault *ReflectManagedMethodExecuterSoapFault `xml:"fault,omitempty"` +} + +type RetrieveDynamicTypeManager struct { + This ManagedObjectReference `xml:"_this"` +} + +type RetrieveDynamicTypeManagerResponse struct { + Returnval *InternalDynamicTypeManager `xml:"urn:vim25 returnval"` +} + +type RetrieveManagedMethodExecuter struct { + This ManagedObjectReference `xml:"_this"` +} + +type RetrieveManagedMethodExecuterResponse struct { + Returnval *ReflectManagedMethodExecuter `xml:"urn:vim25 returnval"` +} + +type InternalDynamicTypeManager struct { + ManagedObjectReference +} + +type ReflectManagedMethodExecuter struct { + ManagedObjectReference +} + +type ExecuteSoap struct { + This ManagedObjectReference `xml:"_this"` + Moid string `xml:"moid"` + Version string `xml:"version"` + Method string `xml:"method"` + Argument []ReflectManagedMethodExecuterSoapArgument `xml:"argument,omitempty"` +} + +type ExecuteSoapResponse struct { + Returnval *ReflectManagedMethodExecuterSoapResult `xml:"urn:vim25 returnval"` +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/registry.go b/vendor/github.com/vmware/govmomi/vim25/types/registry.go new file mode 100644 index 000000000..8f238088d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/registry.go @@ -0,0 +1,30 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "reflect" + +var t = map[string]reflect.Type{} + +type Func func(string) (reflect.Type, bool) + +func TypeFunc() Func { + return func(name string) (reflect.Type, bool) { + typ, ok := t[name] + return typ, ok + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/registry_test.go b/vendor/github.com/vmware/govmomi/vim25/types/registry_test.go new file mode 100644 index 000000000..1ea675c5c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/registry_test.go @@ -0,0 +1,43 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "reflect" + "testing" +) + +func TestTypeFunc(t *testing.T) { + var ok bool + + fn := TypeFunc() + + _, ok = fn("unknown") + if ok { + t.Errorf("Expected ok==false") + } + + actual, ok := fn("UserProfile") + if !ok { + t.Errorf("Expected ok==true") + } + + expected := reflect.TypeOf(UserProfile{}) + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected: %#v, actual: %#v", expected, actual) + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/types.go b/vendor/github.com/vmware/govmomi/vim25/types/types.go new file mode 100644 index 000000000..e04995e94 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/types.go @@ -0,0 +1,49930 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "net/url" + "reflect" + "time" +) + +type AbdicateDomOwnership AbdicateDomOwnershipRequestType + +func init() { + t["AbdicateDomOwnership"] = reflect.TypeOf((*AbdicateDomOwnership)(nil)).Elem() +} + +type AbdicateDomOwnershipRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` +} + +func init() { + t["AbdicateDomOwnershipRequestType"] = reflect.TypeOf((*AbdicateDomOwnershipRequestType)(nil)).Elem() +} + +type AbdicateDomOwnershipResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type AboutInfo struct { + DynamicData + + Name string `xml:"name"` + FullName string `xml:"fullName"` + Vendor string `xml:"vendor"` + Version string `xml:"version"` + Build string `xml:"build"` + LocaleVersion string `xml:"localeVersion,omitempty"` + LocaleBuild string `xml:"localeBuild,omitempty"` + OsType string `xml:"osType"` + ProductLineId string `xml:"productLineId"` + ApiType string `xml:"apiType"` + ApiVersion string `xml:"apiVersion"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + LicenseProductName string `xml:"licenseProductName,omitempty"` + LicenseProductVersion string `xml:"licenseProductVersion,omitempty"` +} + +func init() { + t["AboutInfo"] = reflect.TypeOf((*AboutInfo)(nil)).Elem() +} + +type AccountCreatedEvent struct { + HostEvent + + Spec BaseHostAccountSpec `xml:"spec,typeattr"` + Group bool `xml:"group"` +} + +func init() { + t["AccountCreatedEvent"] = reflect.TypeOf((*AccountCreatedEvent)(nil)).Elem() +} + +type AccountRemovedEvent struct { + HostEvent + + Account string `xml:"account"` + Group bool `xml:"group"` +} + +func init() { + t["AccountRemovedEvent"] = reflect.TypeOf((*AccountRemovedEvent)(nil)).Elem() +} + +type AccountUpdatedEvent struct { + HostEvent + + Spec BaseHostAccountSpec `xml:"spec,typeattr"` + Group bool `xml:"group"` +} + +func init() { + t["AccountUpdatedEvent"] = reflect.TypeOf((*AccountUpdatedEvent)(nil)).Elem() +} + +type AcknowledgeAlarm AcknowledgeAlarmRequestType + +func init() { + t["AcknowledgeAlarm"] = reflect.TypeOf((*AcknowledgeAlarm)(nil)).Elem() +} + +type AcknowledgeAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["AcknowledgeAlarmRequestType"] = reflect.TypeOf((*AcknowledgeAlarmRequestType)(nil)).Elem() +} + +type AcknowledgeAlarmResponse struct { +} + +type AcquireCimServicesTicket AcquireCimServicesTicketRequestType + +func init() { + t["AcquireCimServicesTicket"] = reflect.TypeOf((*AcquireCimServicesTicket)(nil)).Elem() +} + +type AcquireCimServicesTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AcquireCimServicesTicketRequestType"] = reflect.TypeOf((*AcquireCimServicesTicketRequestType)(nil)).Elem() +} + +type AcquireCimServicesTicketResponse struct { + Returnval HostServiceTicket `xml:"returnval"` +} + +type AcquireCloneTicket AcquireCloneTicketRequestType + +func init() { + t["AcquireCloneTicket"] = reflect.TypeOf((*AcquireCloneTicket)(nil)).Elem() +} + +type AcquireCloneTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AcquireCloneTicketRequestType"] = reflect.TypeOf((*AcquireCloneTicketRequestType)(nil)).Elem() +} + +type AcquireCloneTicketResponse struct { + Returnval string `xml:"returnval"` +} + +type AcquireCredentialsInGuest AcquireCredentialsInGuestRequestType + +func init() { + t["AcquireCredentialsInGuest"] = reflect.TypeOf((*AcquireCredentialsInGuest)(nil)).Elem() +} + +type AcquireCredentialsInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + RequestedAuth BaseGuestAuthentication `xml:"requestedAuth,typeattr"` + SessionID int64 `xml:"sessionID,omitempty"` +} + +func init() { + t["AcquireCredentialsInGuestRequestType"] = reflect.TypeOf((*AcquireCredentialsInGuestRequestType)(nil)).Elem() +} + +type AcquireCredentialsInGuestResponse struct { + Returnval BaseGuestAuthentication `xml:"returnval,typeattr"` +} + +type AcquireGenericServiceTicket AcquireGenericServiceTicketRequestType + +func init() { + t["AcquireGenericServiceTicket"] = reflect.TypeOf((*AcquireGenericServiceTicket)(nil)).Elem() +} + +type AcquireGenericServiceTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseSessionManagerServiceRequestSpec `xml:"spec,typeattr"` +} + +func init() { + t["AcquireGenericServiceTicketRequestType"] = reflect.TypeOf((*AcquireGenericServiceTicketRequestType)(nil)).Elem() +} + +type AcquireGenericServiceTicketResponse struct { + Returnval SessionManagerGenericServiceTicket `xml:"returnval"` +} + +type AcquireLocalTicket AcquireLocalTicketRequestType + +func init() { + t["AcquireLocalTicket"] = reflect.TypeOf((*AcquireLocalTicket)(nil)).Elem() +} + +type AcquireLocalTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` +} + +func init() { + t["AcquireLocalTicketRequestType"] = reflect.TypeOf((*AcquireLocalTicketRequestType)(nil)).Elem() +} + +type AcquireLocalTicketResponse struct { + Returnval SessionManagerLocalTicket `xml:"returnval"` +} + +type AcquireMksTicket AcquireMksTicketRequestType + +func init() { + t["AcquireMksTicket"] = reflect.TypeOf((*AcquireMksTicket)(nil)).Elem() +} + +type AcquireMksTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AcquireMksTicketRequestType"] = reflect.TypeOf((*AcquireMksTicketRequestType)(nil)).Elem() +} + +type AcquireMksTicketResponse struct { + Returnval VirtualMachineMksTicket `xml:"returnval"` +} + +type AcquireTicket AcquireTicketRequestType + +func init() { + t["AcquireTicket"] = reflect.TypeOf((*AcquireTicket)(nil)).Elem() +} + +type AcquireTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` + TicketType string `xml:"ticketType"` +} + +func init() { + t["AcquireTicketRequestType"] = reflect.TypeOf((*AcquireTicketRequestType)(nil)).Elem() +} + +type AcquireTicketResponse struct { + Returnval VirtualMachineTicket `xml:"returnval"` +} + +type Action struct { + DynamicData +} + +func init() { + t["Action"] = reflect.TypeOf((*Action)(nil)).Elem() +} + +type ActiveDirectoryFault struct { + VimFault + + ErrorCode int `xml:"errorCode,omitempty"` +} + +func init() { + t["ActiveDirectoryFault"] = reflect.TypeOf((*ActiveDirectoryFault)(nil)).Elem() +} + +type ActiveDirectoryFaultFault BaseActiveDirectoryFault + +func init() { + t["ActiveDirectoryFaultFault"] = reflect.TypeOf((*ActiveDirectoryFaultFault)(nil)).Elem() +} + +type ActiveDirectoryProfile struct { + ApplyProfile +} + +func init() { + t["ActiveDirectoryProfile"] = reflect.TypeOf((*ActiveDirectoryProfile)(nil)).Elem() +} + +type ActiveVMsBlockingEVC struct { + EVCConfigFault + + EvcMode string `xml:"evcMode,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + HostName []string `xml:"hostName,omitempty"` +} + +func init() { + t["ActiveVMsBlockingEVC"] = reflect.TypeOf((*ActiveVMsBlockingEVC)(nil)).Elem() +} + +type ActiveVMsBlockingEVCFault ActiveVMsBlockingEVC + +func init() { + t["ActiveVMsBlockingEVCFault"] = reflect.TypeOf((*ActiveVMsBlockingEVCFault)(nil)).Elem() +} + +type AddAuthorizationRole AddAuthorizationRoleRequestType + +func init() { + t["AddAuthorizationRole"] = reflect.TypeOf((*AddAuthorizationRole)(nil)).Elem() +} + +type AddAuthorizationRoleRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + PrivIds []string `xml:"privIds,omitempty"` +} + +func init() { + t["AddAuthorizationRoleRequestType"] = reflect.TypeOf((*AddAuthorizationRoleRequestType)(nil)).Elem() +} + +type AddAuthorizationRoleResponse struct { + Returnval int `xml:"returnval"` +} + +type AddCustomFieldDef AddCustomFieldDefRequestType + +func init() { + t["AddCustomFieldDef"] = reflect.TypeOf((*AddCustomFieldDef)(nil)).Elem() +} + +type AddCustomFieldDefRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + MoType string `xml:"moType,omitempty"` + FieldDefPolicy *PrivilegePolicyDef `xml:"fieldDefPolicy,omitempty"` + FieldPolicy *PrivilegePolicyDef `xml:"fieldPolicy,omitempty"` +} + +func init() { + t["AddCustomFieldDefRequestType"] = reflect.TypeOf((*AddCustomFieldDefRequestType)(nil)).Elem() +} + +type AddCustomFieldDefResponse struct { + Returnval CustomFieldDef `xml:"returnval"` +} + +type AddDVPortgroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec []DVPortgroupConfigSpec `xml:"spec"` +} + +func init() { + t["AddDVPortgroupRequestType"] = reflect.TypeOf((*AddDVPortgroupRequestType)(nil)).Elem() +} + +type AddDVPortgroup_Task AddDVPortgroupRequestType + +func init() { + t["AddDVPortgroup_Task"] = reflect.TypeOf((*AddDVPortgroup_Task)(nil)).Elem() +} + +type AddDVPortgroup_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disk []HostScsiDisk `xml:"disk"` +} + +func init() { + t["AddDisksRequestType"] = reflect.TypeOf((*AddDisksRequestType)(nil)).Elem() +} + +type AddDisks_Task AddDisksRequestType + +func init() { + t["AddDisks_Task"] = reflect.TypeOf((*AddDisks_Task)(nil)).Elem() +} + +type AddDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddGuestAlias AddGuestAliasRequestType + +func init() { + t["AddGuestAlias"] = reflect.TypeOf((*AddGuestAlias)(nil)).Elem() +} + +type AddGuestAliasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` + MapCert bool `xml:"mapCert"` + Base64Cert string `xml:"base64Cert"` + AliasInfo GuestAuthAliasInfo `xml:"aliasInfo"` +} + +func init() { + t["AddGuestAliasRequestType"] = reflect.TypeOf((*AddGuestAliasRequestType)(nil)).Elem() +} + +type AddGuestAliasResponse struct { +} + +type AddHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostConnectSpec `xml:"spec"` + AsConnected bool `xml:"asConnected"` + ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty"` + License string `xml:"license,omitempty"` +} + +func init() { + t["AddHostRequestType"] = reflect.TypeOf((*AddHostRequestType)(nil)).Elem() +} + +type AddHost_Task AddHostRequestType + +func init() { + t["AddHost_Task"] = reflect.TypeOf((*AddHost_Task)(nil)).Elem() +} + +type AddHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddInternetScsiSendTargets AddInternetScsiSendTargetsRequestType + +func init() { + t["AddInternetScsiSendTargets"] = reflect.TypeOf((*AddInternetScsiSendTargets)(nil)).Elem() +} + +type AddInternetScsiSendTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaSendTarget `xml:"targets"` +} + +func init() { + t["AddInternetScsiSendTargetsRequestType"] = reflect.TypeOf((*AddInternetScsiSendTargetsRequestType)(nil)).Elem() +} + +type AddInternetScsiSendTargetsResponse struct { +} + +type AddInternetScsiStaticTargets AddInternetScsiStaticTargetsRequestType + +func init() { + t["AddInternetScsiStaticTargets"] = reflect.TypeOf((*AddInternetScsiStaticTargets)(nil)).Elem() +} + +type AddInternetScsiStaticTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaStaticTarget `xml:"targets"` +} + +func init() { + t["AddInternetScsiStaticTargetsRequestType"] = reflect.TypeOf((*AddInternetScsiStaticTargetsRequestType)(nil)).Elem() +} + +type AddInternetScsiStaticTargetsResponse struct { +} + +type AddLicense AddLicenseRequestType + +func init() { + t["AddLicense"] = reflect.TypeOf((*AddLicense)(nil)).Elem() +} + +type AddLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + Labels []KeyValue `xml:"labels,omitempty"` +} + +func init() { + t["AddLicenseRequestType"] = reflect.TypeOf((*AddLicenseRequestType)(nil)).Elem() +} + +type AddLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type AddNetworkResourcePool AddNetworkResourcePoolRequestType + +func init() { + t["AddNetworkResourcePool"] = reflect.TypeOf((*AddNetworkResourcePool)(nil)).Elem() +} + +type AddNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:"configSpec"` +} + +func init() { + t["AddNetworkResourcePoolRequestType"] = reflect.TypeOf((*AddNetworkResourcePoolRequestType)(nil)).Elem() +} + +type AddNetworkResourcePoolResponse struct { +} + +type AddPortGroup AddPortGroupRequestType + +func init() { + t["AddPortGroup"] = reflect.TypeOf((*AddPortGroup)(nil)).Elem() +} + +type AddPortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Portgrp HostPortGroupSpec `xml:"portgrp"` +} + +func init() { + t["AddPortGroupRequestType"] = reflect.TypeOf((*AddPortGroupRequestType)(nil)).Elem() +} + +type AddPortGroupResponse struct { +} + +type AddServiceConsoleVirtualNic AddServiceConsoleVirtualNicRequestType + +func init() { + t["AddServiceConsoleVirtualNic"] = reflect.TypeOf((*AddServiceConsoleVirtualNic)(nil)).Elem() +} + +type AddServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Portgroup string `xml:"portgroup"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["AddServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*AddServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type AddServiceConsoleVirtualNicResponse struct { + Returnval string `xml:"returnval"` +} + +type AddStandaloneHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostConnectSpec `xml:"spec"` + CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr"` + AddConnected bool `xml:"addConnected"` + License string `xml:"license,omitempty"` +} + +func init() { + t["AddStandaloneHostRequestType"] = reflect.TypeOf((*AddStandaloneHostRequestType)(nil)).Elem() +} + +type AddStandaloneHost_Task AddStandaloneHostRequestType + +func init() { + t["AddStandaloneHost_Task"] = reflect.TypeOf((*AddStandaloneHost_Task)(nil)).Elem() +} + +type AddStandaloneHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddVirtualNic AddVirtualNicRequestType + +func init() { + t["AddVirtualNic"] = reflect.TypeOf((*AddVirtualNic)(nil)).Elem() +} + +type AddVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Portgroup string `xml:"portgroup"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["AddVirtualNicRequestType"] = reflect.TypeOf((*AddVirtualNicRequestType)(nil)).Elem() +} + +type AddVirtualNicResponse struct { + Returnval string `xml:"returnval"` +} + +type AddVirtualSwitch AddVirtualSwitchRequestType + +func init() { + t["AddVirtualSwitch"] = reflect.TypeOf((*AddVirtualSwitch)(nil)).Elem() +} + +type AddVirtualSwitchRequestType struct { + This ManagedObjectReference `xml:"_this"` + VswitchName string `xml:"vswitchName"` + Spec *HostVirtualSwitchSpec `xml:"spec,omitempty"` +} + +func init() { + t["AddVirtualSwitchRequestType"] = reflect.TypeOf((*AddVirtualSwitchRequestType)(nil)).Elem() +} + +type AddVirtualSwitchResponse struct { +} + +type AdminDisabled struct { + HostConfigFault +} + +func init() { + t["AdminDisabled"] = reflect.TypeOf((*AdminDisabled)(nil)).Elem() +} + +type AdminDisabledFault AdminDisabled + +func init() { + t["AdminDisabledFault"] = reflect.TypeOf((*AdminDisabledFault)(nil)).Elem() +} + +type AdminNotDisabled struct { + HostConfigFault +} + +func init() { + t["AdminNotDisabled"] = reflect.TypeOf((*AdminNotDisabled)(nil)).Elem() +} + +type AdminNotDisabledFault AdminNotDisabled + +func init() { + t["AdminNotDisabledFault"] = reflect.TypeOf((*AdminNotDisabledFault)(nil)).Elem() +} + +type AdminPasswordNotChangedEvent struct { + HostEvent +} + +func init() { + t["AdminPasswordNotChangedEvent"] = reflect.TypeOf((*AdminPasswordNotChangedEvent)(nil)).Elem() +} + +type AffinityConfigured struct { + MigrationFault + + ConfiguredAffinity []string `xml:"configuredAffinity"` +} + +func init() { + t["AffinityConfigured"] = reflect.TypeOf((*AffinityConfigured)(nil)).Elem() +} + +type AffinityConfiguredFault AffinityConfigured + +func init() { + t["AffinityConfiguredFault"] = reflect.TypeOf((*AffinityConfiguredFault)(nil)).Elem() +} + +type AfterStartupTaskScheduler struct { + TaskScheduler + + Minute int `xml:"minute"` +} + +func init() { + t["AfterStartupTaskScheduler"] = reflect.TypeOf((*AfterStartupTaskScheduler)(nil)).Elem() +} + +type AgentInstallFailed struct { + HostConnectFault + + Reason string `xml:"reason,omitempty"` + StatusCode int `xml:"statusCode,omitempty"` + InstallerOutput string `xml:"installerOutput,omitempty"` +} + +func init() { + t["AgentInstallFailed"] = reflect.TypeOf((*AgentInstallFailed)(nil)).Elem() +} + +type AgentInstallFailedFault AgentInstallFailed + +func init() { + t["AgentInstallFailedFault"] = reflect.TypeOf((*AgentInstallFailedFault)(nil)).Elem() +} + +type AlarmAcknowledgedEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmAcknowledgedEvent"] = reflect.TypeOf((*AlarmAcknowledgedEvent)(nil)).Elem() +} + +type AlarmAction struct { + DynamicData +} + +func init() { + t["AlarmAction"] = reflect.TypeOf((*AlarmAction)(nil)).Elem() +} + +type AlarmActionTriggeredEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmActionTriggeredEvent"] = reflect.TypeOf((*AlarmActionTriggeredEvent)(nil)).Elem() +} + +type AlarmClearedEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` + From string `xml:"from"` +} + +func init() { + t["AlarmClearedEvent"] = reflect.TypeOf((*AlarmClearedEvent)(nil)).Elem() +} + +type AlarmCreatedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmCreatedEvent"] = reflect.TypeOf((*AlarmCreatedEvent)(nil)).Elem() +} + +type AlarmDescription struct { + DynamicData + + Expr []BaseTypeDescription `xml:"expr,typeattr"` + StateOperator []BaseElementDescription `xml:"stateOperator,typeattr"` + MetricOperator []BaseElementDescription `xml:"metricOperator,typeattr"` + HostSystemConnectionState []BaseElementDescription `xml:"hostSystemConnectionState,typeattr"` + VirtualMachinePowerState []BaseElementDescription `xml:"virtualMachinePowerState,typeattr"` + DatastoreConnectionState []BaseElementDescription `xml:"datastoreConnectionState,omitempty,typeattr"` + HostSystemPowerState []BaseElementDescription `xml:"hostSystemPowerState,omitempty,typeattr"` + VirtualMachineGuestHeartbeatStatus []BaseElementDescription `xml:"virtualMachineGuestHeartbeatStatus,omitempty,typeattr"` + EntityStatus []BaseElementDescription `xml:"entityStatus,typeattr"` + Action []BaseTypeDescription `xml:"action,typeattr"` +} + +func init() { + t["AlarmDescription"] = reflect.TypeOf((*AlarmDescription)(nil)).Elem() +} + +type AlarmEmailCompletedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + To string `xml:"to"` +} + +func init() { + t["AlarmEmailCompletedEvent"] = reflect.TypeOf((*AlarmEmailCompletedEvent)(nil)).Elem() +} + +type AlarmEmailFailedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + To string `xml:"to"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["AlarmEmailFailedEvent"] = reflect.TypeOf((*AlarmEmailFailedEvent)(nil)).Elem() +} + +type AlarmEvent struct { + Event + + Alarm AlarmEventArgument `xml:"alarm"` +} + +func init() { + t["AlarmEvent"] = reflect.TypeOf((*AlarmEvent)(nil)).Elem() +} + +type AlarmEventArgument struct { + EntityEventArgument + + Alarm ManagedObjectReference `xml:"alarm"` +} + +func init() { + t["AlarmEventArgument"] = reflect.TypeOf((*AlarmEventArgument)(nil)).Elem() +} + +type AlarmExpression struct { + DynamicData +} + +func init() { + t["AlarmExpression"] = reflect.TypeOf((*AlarmExpression)(nil)).Elem() +} + +type AlarmInfo struct { + AlarmSpec + + Key string `xml:"key"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` + LastModifiedTime time.Time `xml:"lastModifiedTime"` + LastModifiedUser string `xml:"lastModifiedUser"` + CreationEventId int `xml:"creationEventId"` +} + +func init() { + t["AlarmInfo"] = reflect.TypeOf((*AlarmInfo)(nil)).Elem() +} + +type AlarmReconfiguredEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmReconfiguredEvent"] = reflect.TypeOf((*AlarmReconfiguredEvent)(nil)).Elem() +} + +type AlarmRemovedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmRemovedEvent"] = reflect.TypeOf((*AlarmRemovedEvent)(nil)).Elem() +} + +type AlarmScriptCompleteEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Script string `xml:"script"` +} + +func init() { + t["AlarmScriptCompleteEvent"] = reflect.TypeOf((*AlarmScriptCompleteEvent)(nil)).Elem() +} + +type AlarmScriptFailedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Script string `xml:"script"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["AlarmScriptFailedEvent"] = reflect.TypeOf((*AlarmScriptFailedEvent)(nil)).Elem() +} + +type AlarmSetting struct { + DynamicData + + ToleranceRange int `xml:"toleranceRange"` + ReportingFrequency int `xml:"reportingFrequency"` +} + +func init() { + t["AlarmSetting"] = reflect.TypeOf((*AlarmSetting)(nil)).Elem() +} + +type AlarmSnmpCompletedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmSnmpCompletedEvent"] = reflect.TypeOf((*AlarmSnmpCompletedEvent)(nil)).Elem() +} + +type AlarmSnmpFailedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["AlarmSnmpFailedEvent"] = reflect.TypeOf((*AlarmSnmpFailedEvent)(nil)).Elem() +} + +type AlarmSpec struct { + DynamicData + + Name string `xml:"name"` + SystemName string `xml:"systemName,omitempty"` + Description string `xml:"description"` + Enabled bool `xml:"enabled"` + Expression BaseAlarmExpression `xml:"expression,typeattr"` + Action BaseAlarmAction `xml:"action,omitempty,typeattr"` + ActionFrequency int `xml:"actionFrequency,omitempty"` + Setting *AlarmSetting `xml:"setting,omitempty"` +} + +func init() { + t["AlarmSpec"] = reflect.TypeOf((*AlarmSpec)(nil)).Elem() +} + +type AlarmState struct { + DynamicData + + Key string `xml:"key"` + Entity ManagedObjectReference `xml:"entity"` + Alarm ManagedObjectReference `xml:"alarm"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + Time time.Time `xml:"time"` + Acknowledged *bool `xml:"acknowledged"` + AcknowledgedByUser string `xml:"acknowledgedByUser,omitempty"` + AcknowledgedTime *time.Time `xml:"acknowledgedTime"` + EventKey int `xml:"eventKey,omitempty"` +} + +func init() { + t["AlarmState"] = reflect.TypeOf((*AlarmState)(nil)).Elem() +} + +type AlarmStatusChangedEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` + From string `xml:"from"` + To string `xml:"to"` +} + +func init() { + t["AlarmStatusChangedEvent"] = reflect.TypeOf((*AlarmStatusChangedEvent)(nil)).Elem() +} + +type AlarmTriggeringAction struct { + AlarmAction + + Action BaseAction `xml:"action,typeattr"` + TransitionSpecs []AlarmTriggeringActionTransitionSpec `xml:"transitionSpecs,omitempty"` + Green2yellow bool `xml:"green2yellow"` + Yellow2red bool `xml:"yellow2red"` + Red2yellow bool `xml:"red2yellow"` + Yellow2green bool `xml:"yellow2green"` +} + +func init() { + t["AlarmTriggeringAction"] = reflect.TypeOf((*AlarmTriggeringAction)(nil)).Elem() +} + +type AlarmTriggeringActionTransitionSpec struct { + DynamicData + + StartState ManagedEntityStatus `xml:"startState"` + FinalState ManagedEntityStatus `xml:"finalState"` + Repeats bool `xml:"repeats"` +} + +func init() { + t["AlarmTriggeringActionTransitionSpec"] = reflect.TypeOf((*AlarmTriggeringActionTransitionSpec)(nil)).Elem() +} + +type AllVirtualMachinesLicensedEvent struct { + LicenseEvent +} + +func init() { + t["AllVirtualMachinesLicensedEvent"] = reflect.TypeOf((*AllVirtualMachinesLicensedEvent)(nil)).Elem() +} + +type AllocateIpv4Address AllocateIpv4AddressRequestType + +func init() { + t["AllocateIpv4Address"] = reflect.TypeOf((*AllocateIpv4Address)(nil)).Elem() +} + +type AllocateIpv4AddressRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int `xml:"poolId"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["AllocateIpv4AddressRequestType"] = reflect.TypeOf((*AllocateIpv4AddressRequestType)(nil)).Elem() +} + +type AllocateIpv4AddressResponse struct { + Returnval string `xml:"returnval"` +} + +type AllocateIpv6Address AllocateIpv6AddressRequestType + +func init() { + t["AllocateIpv6Address"] = reflect.TypeOf((*AllocateIpv6Address)(nil)).Elem() +} + +type AllocateIpv6AddressRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int `xml:"poolId"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["AllocateIpv6AddressRequestType"] = reflect.TypeOf((*AllocateIpv6AddressRequestType)(nil)).Elem() +} + +type AllocateIpv6AddressResponse struct { + Returnval string `xml:"returnval"` +} + +type AlreadyAuthenticatedSessionEvent struct { + SessionEvent +} + +func init() { + t["AlreadyAuthenticatedSessionEvent"] = reflect.TypeOf((*AlreadyAuthenticatedSessionEvent)(nil)).Elem() +} + +type AlreadyBeingManaged struct { + HostConnectFault + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["AlreadyBeingManaged"] = reflect.TypeOf((*AlreadyBeingManaged)(nil)).Elem() +} + +type AlreadyBeingManagedFault AlreadyBeingManaged + +func init() { + t["AlreadyBeingManagedFault"] = reflect.TypeOf((*AlreadyBeingManagedFault)(nil)).Elem() +} + +type AlreadyConnected struct { + HostConnectFault + + Name string `xml:"name"` +} + +func init() { + t["AlreadyConnected"] = reflect.TypeOf((*AlreadyConnected)(nil)).Elem() +} + +type AlreadyConnectedFault AlreadyConnected + +func init() { + t["AlreadyConnectedFault"] = reflect.TypeOf((*AlreadyConnectedFault)(nil)).Elem() +} + +type AlreadyExists struct { + VimFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["AlreadyExists"] = reflect.TypeOf((*AlreadyExists)(nil)).Elem() +} + +type AlreadyExistsFault AlreadyExists + +func init() { + t["AlreadyExistsFault"] = reflect.TypeOf((*AlreadyExistsFault)(nil)).Elem() +} + +type AlreadyUpgraded struct { + VimFault +} + +func init() { + t["AlreadyUpgraded"] = reflect.TypeOf((*AlreadyUpgraded)(nil)).Elem() +} + +type AlreadyUpgradedFault AlreadyUpgraded + +func init() { + t["AlreadyUpgradedFault"] = reflect.TypeOf((*AlreadyUpgradedFault)(nil)).Elem() +} + +type AndAlarmExpression struct { + AlarmExpression + + Expression []BaseAlarmExpression `xml:"expression,typeattr"` +} + +func init() { + t["AndAlarmExpression"] = reflect.TypeOf((*AndAlarmExpression)(nil)).Elem() +} + +type AnswerFile struct { + DynamicData + + UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty"` + CreatedTime time.Time `xml:"createdTime"` + ModifiedTime time.Time `xml:"modifiedTime"` +} + +func init() { + t["AnswerFile"] = reflect.TypeOf((*AnswerFile)(nil)).Elem() +} + +type AnswerFileCreateSpec struct { + DynamicData + + Validating *bool `xml:"validating"` +} + +func init() { + t["AnswerFileCreateSpec"] = reflect.TypeOf((*AnswerFileCreateSpec)(nil)).Elem() +} + +type AnswerFileOptionsCreateSpec struct { + AnswerFileCreateSpec + + UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty"` +} + +func init() { + t["AnswerFileOptionsCreateSpec"] = reflect.TypeOf((*AnswerFileOptionsCreateSpec)(nil)).Elem() +} + +type AnswerFileSerializedCreateSpec struct { + AnswerFileCreateSpec + + AnswerFileConfigString string `xml:"answerFileConfigString"` +} + +func init() { + t["AnswerFileSerializedCreateSpec"] = reflect.TypeOf((*AnswerFileSerializedCreateSpec)(nil)).Elem() +} + +type AnswerFileStatusError struct { + DynamicData + + UserInputPath ProfilePropertyPath `xml:"userInputPath"` + ErrMsg LocalizableMessage `xml:"errMsg"` +} + +func init() { + t["AnswerFileStatusError"] = reflect.TypeOf((*AnswerFileStatusError)(nil)).Elem() +} + +type AnswerFileStatusResult struct { + DynamicData + + CheckedTime time.Time `xml:"checkedTime"` + Host ManagedObjectReference `xml:"host"` + Status string `xml:"status"` + Error []AnswerFileStatusError `xml:"error,omitempty"` +} + +func init() { + t["AnswerFileStatusResult"] = reflect.TypeOf((*AnswerFileStatusResult)(nil)).Elem() +} + +type AnswerFileUpdateFailed struct { + VimFault + + Failure []AnswerFileUpdateFailure `xml:"failure"` +} + +func init() { + t["AnswerFileUpdateFailed"] = reflect.TypeOf((*AnswerFileUpdateFailed)(nil)).Elem() +} + +type AnswerFileUpdateFailedFault AnswerFileUpdateFailed + +func init() { + t["AnswerFileUpdateFailedFault"] = reflect.TypeOf((*AnswerFileUpdateFailedFault)(nil)).Elem() +} + +type AnswerFileUpdateFailure struct { + DynamicData + + UserInputPath ProfilePropertyPath `xml:"userInputPath"` + ErrMsg LocalizableMessage `xml:"errMsg"` +} + +func init() { + t["AnswerFileUpdateFailure"] = reflect.TypeOf((*AnswerFileUpdateFailure)(nil)).Elem() +} + +type AnswerVM AnswerVMRequestType + +func init() { + t["AnswerVM"] = reflect.TypeOf((*AnswerVM)(nil)).Elem() +} + +type AnswerVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + QuestionId string `xml:"questionId"` + AnswerChoice string `xml:"answerChoice"` +} + +func init() { + t["AnswerVMRequestType"] = reflect.TypeOf((*AnswerVMRequestType)(nil)).Elem() +} + +type AnswerVMResponse struct { +} + +type ApplicationQuiesceFault struct { + SnapshotFault +} + +func init() { + t["ApplicationQuiesceFault"] = reflect.TypeOf((*ApplicationQuiesceFault)(nil)).Elem() +} + +type ApplicationQuiesceFaultFault ApplicationQuiesceFault + +func init() { + t["ApplicationQuiesceFaultFault"] = reflect.TypeOf((*ApplicationQuiesceFaultFault)(nil)).Elem() +} + +type ApplyHostConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ConfigSpec HostConfigSpec `xml:"configSpec"` + UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty"` +} + +func init() { + t["ApplyHostConfigRequestType"] = reflect.TypeOf((*ApplyHostConfigRequestType)(nil)).Elem() +} + +type ApplyHostConfig_Task ApplyHostConfigRequestType + +func init() { + t["ApplyHostConfig_Task"] = reflect.TypeOf((*ApplyHostConfig_Task)(nil)).Elem() +} + +type ApplyHostConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyProfile struct { + DynamicData + + Enabled bool `xml:"enabled"` + Policy []ProfilePolicy `xml:"policy,omitempty"` + ProfileTypeName string `xml:"profileTypeName,omitempty"` + ProfileVersion string `xml:"profileVersion,omitempty"` + Property []ProfileApplyProfileProperty `xml:"property,omitempty"` +} + +func init() { + t["ApplyProfile"] = reflect.TypeOf((*ApplyProfile)(nil)).Elem() +} + +type ApplyRecommendation ApplyRecommendationRequestType + +func init() { + t["ApplyRecommendation"] = reflect.TypeOf((*ApplyRecommendation)(nil)).Elem() +} + +type ApplyRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key"` +} + +func init() { + t["ApplyRecommendationRequestType"] = reflect.TypeOf((*ApplyRecommendationRequestType)(nil)).Elem() +} + +type ApplyRecommendationResponse struct { +} + +type ApplyStorageDrsRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key []string `xml:"key"` +} + +func init() { + t["ApplyStorageDrsRecommendationRequestType"] = reflect.TypeOf((*ApplyStorageDrsRecommendationRequestType)(nil)).Elem() +} + +type ApplyStorageDrsRecommendationToPodRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` + Key string `xml:"key"` +} + +func init() { + t["ApplyStorageDrsRecommendationToPodRequestType"] = reflect.TypeOf((*ApplyStorageDrsRecommendationToPodRequestType)(nil)).Elem() +} + +type ApplyStorageDrsRecommendationToPod_Task ApplyStorageDrsRecommendationToPodRequestType + +func init() { + t["ApplyStorageDrsRecommendationToPod_Task"] = reflect.TypeOf((*ApplyStorageDrsRecommendationToPod_Task)(nil)).Elem() +} + +type ApplyStorageDrsRecommendationToPod_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyStorageDrsRecommendation_Task ApplyStorageDrsRecommendationRequestType + +func init() { + t["ApplyStorageDrsRecommendation_Task"] = reflect.TypeOf((*ApplyStorageDrsRecommendation_Task)(nil)).Elem() +} + +type ApplyStorageDrsRecommendation_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyStorageRecommendationResult struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["ApplyStorageRecommendationResult"] = reflect.TypeOf((*ApplyStorageRecommendationResult)(nil)).Elem() +} + +type AreAlarmActionsEnabled AreAlarmActionsEnabledRequestType + +func init() { + t["AreAlarmActionsEnabled"] = reflect.TypeOf((*AreAlarmActionsEnabled)(nil)).Elem() +} + +type AreAlarmActionsEnabledRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["AreAlarmActionsEnabledRequestType"] = reflect.TypeOf((*AreAlarmActionsEnabledRequestType)(nil)).Elem() +} + +type AreAlarmActionsEnabledResponse struct { + Returnval bool `xml:"returnval"` +} + +type ArrayOfAlarmAction struct { + AlarmAction []BaseAlarmAction `xml:"AlarmAction,omitempty,typeattr"` +} + +func init() { + t["ArrayOfAlarmAction"] = reflect.TypeOf((*ArrayOfAlarmAction)(nil)).Elem() +} + +type ArrayOfAlarmExpression struct { + AlarmExpression []BaseAlarmExpression `xml:"AlarmExpression,omitempty,typeattr"` +} + +func init() { + t["ArrayOfAlarmExpression"] = reflect.TypeOf((*ArrayOfAlarmExpression)(nil)).Elem() +} + +type ArrayOfAlarmState struct { + AlarmState []AlarmState `xml:"AlarmState,omitempty"` +} + +func init() { + t["ArrayOfAlarmState"] = reflect.TypeOf((*ArrayOfAlarmState)(nil)).Elem() +} + +type ArrayOfAlarmTriggeringActionTransitionSpec struct { + AlarmTriggeringActionTransitionSpec []AlarmTriggeringActionTransitionSpec `xml:"AlarmTriggeringActionTransitionSpec,omitempty"` +} + +func init() { + t["ArrayOfAlarmTriggeringActionTransitionSpec"] = reflect.TypeOf((*ArrayOfAlarmTriggeringActionTransitionSpec)(nil)).Elem() +} + +type ArrayOfAnswerFileStatusError struct { + AnswerFileStatusError []AnswerFileStatusError `xml:"AnswerFileStatusError,omitempty"` +} + +func init() { + t["ArrayOfAnswerFileStatusError"] = reflect.TypeOf((*ArrayOfAnswerFileStatusError)(nil)).Elem() +} + +type ArrayOfAnswerFileStatusResult struct { + AnswerFileStatusResult []AnswerFileStatusResult `xml:"AnswerFileStatusResult,omitempty"` +} + +func init() { + t["ArrayOfAnswerFileStatusResult"] = reflect.TypeOf((*ArrayOfAnswerFileStatusResult)(nil)).Elem() +} + +type ArrayOfAnswerFileUpdateFailure struct { + AnswerFileUpdateFailure []AnswerFileUpdateFailure `xml:"AnswerFileUpdateFailure,omitempty"` +} + +func init() { + t["ArrayOfAnswerFileUpdateFailure"] = reflect.TypeOf((*ArrayOfAnswerFileUpdateFailure)(nil)).Elem() +} + +type ArrayOfAnyType struct { + AnyType []AnyType `xml:"anyType,omitempty,typeattr"` +} + +func init() { + t["ArrayOfAnyType"] = reflect.TypeOf((*ArrayOfAnyType)(nil)).Elem() +} + +type ArrayOfAnyURI struct { + AnyURI []url.URL `xml:"anyURI,omitempty"` +} + +func init() { + t["ArrayOfAnyURI"] = reflect.TypeOf((*ArrayOfAnyURI)(nil)).Elem() +} + +type ArrayOfApplyProfile struct { + ApplyProfile []BaseApplyProfile `xml:"ApplyProfile,omitempty,typeattr"` +} + +func init() { + t["ArrayOfApplyProfile"] = reflect.TypeOf((*ArrayOfApplyProfile)(nil)).Elem() +} + +type ArrayOfAuthorizationPrivilege struct { + AuthorizationPrivilege []AuthorizationPrivilege `xml:"AuthorizationPrivilege,omitempty"` +} + +func init() { + t["ArrayOfAuthorizationPrivilege"] = reflect.TypeOf((*ArrayOfAuthorizationPrivilege)(nil)).Elem() +} + +type ArrayOfAuthorizationRole struct { + AuthorizationRole []AuthorizationRole `xml:"AuthorizationRole,omitempty"` +} + +func init() { + t["ArrayOfAuthorizationRole"] = reflect.TypeOf((*ArrayOfAuthorizationRole)(nil)).Elem() +} + +type ArrayOfAutoStartPowerInfo struct { + AutoStartPowerInfo []AutoStartPowerInfo `xml:"AutoStartPowerInfo,omitempty"` +} + +func init() { + t["ArrayOfAutoStartPowerInfo"] = reflect.TypeOf((*ArrayOfAutoStartPowerInfo)(nil)).Elem() +} + +type ArrayOfBoolean struct { + Boolean []bool `xml:"boolean,omitempty"` +} + +func init() { + t["ArrayOfBoolean"] = reflect.TypeOf((*ArrayOfBoolean)(nil)).Elem() +} + +type ArrayOfByte struct { + Byte []byte `xml:"byte,omitempty"` +} + +func init() { + t["ArrayOfByte"] = reflect.TypeOf((*ArrayOfByte)(nil)).Elem() +} + +type ArrayOfCheckResult struct { + CheckResult []CheckResult `xml:"CheckResult,omitempty"` +} + +func init() { + t["ArrayOfCheckResult"] = reflect.TypeOf((*ArrayOfCheckResult)(nil)).Elem() +} + +type ArrayOfClusterAction struct { + ClusterAction []BaseClusterAction `xml:"ClusterAction,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterAction"] = reflect.TypeOf((*ArrayOfClusterAction)(nil)).Elem() +} + +type ArrayOfClusterActionHistory struct { + ClusterActionHistory []ClusterActionHistory `xml:"ClusterActionHistory,omitempty"` +} + +func init() { + t["ArrayOfClusterActionHistory"] = reflect.TypeOf((*ArrayOfClusterActionHistory)(nil)).Elem() +} + +type ArrayOfClusterAttemptedVmInfo struct { + ClusterAttemptedVmInfo []ClusterAttemptedVmInfo `xml:"ClusterAttemptedVmInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterAttemptedVmInfo"] = reflect.TypeOf((*ArrayOfClusterAttemptedVmInfo)(nil)).Elem() +} + +type ArrayOfClusterDasAamNodeState struct { + ClusterDasAamNodeState []ClusterDasAamNodeState `xml:"ClusterDasAamNodeState,omitempty"` +} + +func init() { + t["ArrayOfClusterDasAamNodeState"] = reflect.TypeOf((*ArrayOfClusterDasAamNodeState)(nil)).Elem() +} + +type ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots struct { + ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots `xml:"ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots,omitempty"` +} + +func init() { + t["ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots"] = reflect.TypeOf((*ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots)(nil)).Elem() +} + +type ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots struct { + ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots `xml:"ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots,omitempty"` +} + +func init() { + t["ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots"] = reflect.TypeOf((*ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots)(nil)).Elem() +} + +type ArrayOfClusterDasVmConfigInfo struct { + ClusterDasVmConfigInfo []ClusterDasVmConfigInfo `xml:"ClusterDasVmConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterDasVmConfigInfo"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigInfo)(nil)).Elem() +} + +type ArrayOfClusterDasVmConfigSpec struct { + ClusterDasVmConfigSpec []ClusterDasVmConfigSpec `xml:"ClusterDasVmConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDasVmConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigSpec)(nil)).Elem() +} + +type ArrayOfClusterDpmHostConfigInfo struct { + ClusterDpmHostConfigInfo []ClusterDpmHostConfigInfo `xml:"ClusterDpmHostConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterDpmHostConfigInfo"] = reflect.TypeOf((*ArrayOfClusterDpmHostConfigInfo)(nil)).Elem() +} + +type ArrayOfClusterDpmHostConfigSpec struct { + ClusterDpmHostConfigSpec []ClusterDpmHostConfigSpec `xml:"ClusterDpmHostConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDpmHostConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDpmHostConfigSpec)(nil)).Elem() +} + +type ArrayOfClusterDrsFaults struct { + ClusterDrsFaults []ClusterDrsFaults `xml:"ClusterDrsFaults,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsFaults"] = reflect.TypeOf((*ArrayOfClusterDrsFaults)(nil)).Elem() +} + +type ArrayOfClusterDrsFaultsFaultsByVm struct { + ClusterDrsFaultsFaultsByVm []BaseClusterDrsFaultsFaultsByVm `xml:"ClusterDrsFaultsFaultsByVm,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterDrsFaultsFaultsByVm"] = reflect.TypeOf((*ArrayOfClusterDrsFaultsFaultsByVm)(nil)).Elem() +} + +type ArrayOfClusterDrsMigration struct { + ClusterDrsMigration []ClusterDrsMigration `xml:"ClusterDrsMigration,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsMigration"] = reflect.TypeOf((*ArrayOfClusterDrsMigration)(nil)).Elem() +} + +type ArrayOfClusterDrsRecommendation struct { + ClusterDrsRecommendation []ClusterDrsRecommendation `xml:"ClusterDrsRecommendation,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsRecommendation"] = reflect.TypeOf((*ArrayOfClusterDrsRecommendation)(nil)).Elem() +} + +type ArrayOfClusterDrsVmConfigInfo struct { + ClusterDrsVmConfigInfo []ClusterDrsVmConfigInfo `xml:"ClusterDrsVmConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsVmConfigInfo"] = reflect.TypeOf((*ArrayOfClusterDrsVmConfigInfo)(nil)).Elem() +} + +type ArrayOfClusterDrsVmConfigSpec struct { + ClusterDrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:"ClusterDrsVmConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsVmConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDrsVmConfigSpec)(nil)).Elem() +} + +type ArrayOfClusterEVCManagerCheckResult struct { + ClusterEVCManagerCheckResult []ClusterEVCManagerCheckResult `xml:"ClusterEVCManagerCheckResult,omitempty"` +} + +func init() { + t["ArrayOfClusterEVCManagerCheckResult"] = reflect.TypeOf((*ArrayOfClusterEVCManagerCheckResult)(nil)).Elem() +} + +type ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus struct { + ClusterFailoverHostAdmissionControlInfoHostStatus []ClusterFailoverHostAdmissionControlInfoHostStatus `xml:"ClusterFailoverHostAdmissionControlInfoHostStatus,omitempty"` +} + +func init() { + t["ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus"] = reflect.TypeOf((*ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus)(nil)).Elem() +} + +type ArrayOfClusterGroupInfo struct { + ClusterGroupInfo []BaseClusterGroupInfo `xml:"ClusterGroupInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterGroupInfo"] = reflect.TypeOf((*ArrayOfClusterGroupInfo)(nil)).Elem() +} + +type ArrayOfClusterGroupSpec struct { + ClusterGroupSpec []ClusterGroupSpec `xml:"ClusterGroupSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterGroupSpec"] = reflect.TypeOf((*ArrayOfClusterGroupSpec)(nil)).Elem() +} + +type ArrayOfClusterHostRecommendation struct { + ClusterHostRecommendation []ClusterHostRecommendation `xml:"ClusterHostRecommendation,omitempty"` +} + +func init() { + t["ArrayOfClusterHostRecommendation"] = reflect.TypeOf((*ArrayOfClusterHostRecommendation)(nil)).Elem() +} + +type ArrayOfClusterIoFilterInfo struct { + ClusterIoFilterInfo []ClusterIoFilterInfo `xml:"ClusterIoFilterInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterIoFilterInfo"] = reflect.TypeOf((*ArrayOfClusterIoFilterInfo)(nil)).Elem() +} + +type ArrayOfClusterNotAttemptedVmInfo struct { + ClusterNotAttemptedVmInfo []ClusterNotAttemptedVmInfo `xml:"ClusterNotAttemptedVmInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterNotAttemptedVmInfo"] = reflect.TypeOf((*ArrayOfClusterNotAttemptedVmInfo)(nil)).Elem() +} + +type ArrayOfClusterRecommendation struct { + ClusterRecommendation []ClusterRecommendation `xml:"ClusterRecommendation,omitempty"` +} + +func init() { + t["ArrayOfClusterRecommendation"] = reflect.TypeOf((*ArrayOfClusterRecommendation)(nil)).Elem() +} + +type ArrayOfClusterRuleInfo struct { + ClusterRuleInfo []BaseClusterRuleInfo `xml:"ClusterRuleInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterRuleInfo"] = reflect.TypeOf((*ArrayOfClusterRuleInfo)(nil)).Elem() +} + +type ArrayOfClusterRuleSpec struct { + ClusterRuleSpec []ClusterRuleSpec `xml:"ClusterRuleSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterRuleSpec"] = reflect.TypeOf((*ArrayOfClusterRuleSpec)(nil)).Elem() +} + +type ArrayOfComplianceFailure struct { + ComplianceFailure []ComplianceFailure `xml:"ComplianceFailure,omitempty"` +} + +func init() { + t["ArrayOfComplianceFailure"] = reflect.TypeOf((*ArrayOfComplianceFailure)(nil)).Elem() +} + +type ArrayOfComplianceLocator struct { + ComplianceLocator []ComplianceLocator `xml:"ComplianceLocator,omitempty"` +} + +func init() { + t["ArrayOfComplianceLocator"] = reflect.TypeOf((*ArrayOfComplianceLocator)(nil)).Elem() +} + +type ArrayOfComplianceResult struct { + ComplianceResult []ComplianceResult `xml:"ComplianceResult,omitempty"` +} + +func init() { + t["ArrayOfComplianceResult"] = reflect.TypeOf((*ArrayOfComplianceResult)(nil)).Elem() +} + +type ArrayOfComputeResourceHostSPBMLicenseInfo struct { + ComputeResourceHostSPBMLicenseInfo []ComputeResourceHostSPBMLicenseInfo `xml:"ComputeResourceHostSPBMLicenseInfo,omitempty"` +} + +func init() { + t["ArrayOfComputeResourceHostSPBMLicenseInfo"] = reflect.TypeOf((*ArrayOfComputeResourceHostSPBMLicenseInfo)(nil)).Elem() +} + +type ArrayOfConflictingConfigurationConfig struct { + ConflictingConfigurationConfig []ConflictingConfigurationConfig `xml:"ConflictingConfigurationConfig,omitempty"` +} + +func init() { + t["ArrayOfConflictingConfigurationConfig"] = reflect.TypeOf((*ArrayOfConflictingConfigurationConfig)(nil)).Elem() +} + +type ArrayOfCustomFieldDef struct { + CustomFieldDef []CustomFieldDef `xml:"CustomFieldDef,omitempty"` +} + +func init() { + t["ArrayOfCustomFieldDef"] = reflect.TypeOf((*ArrayOfCustomFieldDef)(nil)).Elem() +} + +type ArrayOfCustomFieldValue struct { + CustomFieldValue []BaseCustomFieldValue `xml:"CustomFieldValue,omitempty,typeattr"` +} + +func init() { + t["ArrayOfCustomFieldValue"] = reflect.TypeOf((*ArrayOfCustomFieldValue)(nil)).Elem() +} + +type ArrayOfCustomizationAdapterMapping struct { + CustomizationAdapterMapping []CustomizationAdapterMapping `xml:"CustomizationAdapterMapping,omitempty"` +} + +func init() { + t["ArrayOfCustomizationAdapterMapping"] = reflect.TypeOf((*ArrayOfCustomizationAdapterMapping)(nil)).Elem() +} + +type ArrayOfCustomizationIpV6Generator struct { + CustomizationIpV6Generator []BaseCustomizationIpV6Generator `xml:"CustomizationIpV6Generator,omitempty,typeattr"` +} + +func init() { + t["ArrayOfCustomizationIpV6Generator"] = reflect.TypeOf((*ArrayOfCustomizationIpV6Generator)(nil)).Elem() +} + +type ArrayOfCustomizationSpecInfo struct { + CustomizationSpecInfo []CustomizationSpecInfo `xml:"CustomizationSpecInfo,omitempty"` +} + +func init() { + t["ArrayOfCustomizationSpecInfo"] = reflect.TypeOf((*ArrayOfCustomizationSpecInfo)(nil)).Elem() +} + +type ArrayOfDVPortConfigSpec struct { + DVPortConfigSpec []DVPortConfigSpec `xml:"DVPortConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDVPortConfigSpec"] = reflect.TypeOf((*ArrayOfDVPortConfigSpec)(nil)).Elem() +} + +type ArrayOfDVPortgroupConfigSpec struct { + DVPortgroupConfigSpec []DVPortgroupConfigSpec `xml:"DVPortgroupConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDVPortgroupConfigSpec"] = reflect.TypeOf((*ArrayOfDVPortgroupConfigSpec)(nil)).Elem() +} + +type ArrayOfDVSHealthCheckConfig struct { + DVSHealthCheckConfig []BaseDVSHealthCheckConfig `xml:"DVSHealthCheckConfig,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDVSHealthCheckConfig"] = reflect.TypeOf((*ArrayOfDVSHealthCheckConfig)(nil)).Elem() +} + +type ArrayOfDVSNetworkResourcePool struct { + DVSNetworkResourcePool []DVSNetworkResourcePool `xml:"DVSNetworkResourcePool,omitempty"` +} + +func init() { + t["ArrayOfDVSNetworkResourcePool"] = reflect.TypeOf((*ArrayOfDVSNetworkResourcePool)(nil)).Elem() +} + +type ArrayOfDVSNetworkResourcePoolConfigSpec struct { + DVSNetworkResourcePoolConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:"DVSNetworkResourcePoolConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDVSNetworkResourcePoolConfigSpec"] = reflect.TypeOf((*ArrayOfDVSNetworkResourcePoolConfigSpec)(nil)).Elem() +} + +type ArrayOfDVSVmVnicNetworkResourcePool struct { + DVSVmVnicNetworkResourcePool []DVSVmVnicNetworkResourcePool `xml:"DVSVmVnicNetworkResourcePool,omitempty"` +} + +func init() { + t["ArrayOfDVSVmVnicNetworkResourcePool"] = reflect.TypeOf((*ArrayOfDVSVmVnicNetworkResourcePool)(nil)).Elem() +} + +type ArrayOfDasHeartbeatDatastoreInfo struct { + DasHeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo `xml:"DasHeartbeatDatastoreInfo,omitempty"` +} + +func init() { + t["ArrayOfDasHeartbeatDatastoreInfo"] = reflect.TypeOf((*ArrayOfDasHeartbeatDatastoreInfo)(nil)).Elem() +} + +type ArrayOfDatacenterMismatchArgument struct { + DatacenterMismatchArgument []DatacenterMismatchArgument `xml:"DatacenterMismatchArgument,omitempty"` +} + +func init() { + t["ArrayOfDatacenterMismatchArgument"] = reflect.TypeOf((*ArrayOfDatacenterMismatchArgument)(nil)).Elem() +} + +type ArrayOfDatastoreHostMount struct { + DatastoreHostMount []DatastoreHostMount `xml:"DatastoreHostMount,omitempty"` +} + +func init() { + t["ArrayOfDatastoreHostMount"] = reflect.TypeOf((*ArrayOfDatastoreHostMount)(nil)).Elem() +} + +type ArrayOfDatastoreMountPathDatastorePair struct { + DatastoreMountPathDatastorePair []DatastoreMountPathDatastorePair `xml:"DatastoreMountPathDatastorePair,omitempty"` +} + +func init() { + t["ArrayOfDatastoreMountPathDatastorePair"] = reflect.TypeOf((*ArrayOfDatastoreMountPathDatastorePair)(nil)).Elem() +} + +type ArrayOfDiagnosticManagerBundleInfo struct { + DiagnosticManagerBundleInfo []DiagnosticManagerBundleInfo `xml:"DiagnosticManagerBundleInfo,omitempty"` +} + +func init() { + t["ArrayOfDiagnosticManagerBundleInfo"] = reflect.TypeOf((*ArrayOfDiagnosticManagerBundleInfo)(nil)).Elem() +} + +type ArrayOfDiagnosticManagerLogDescriptor struct { + DiagnosticManagerLogDescriptor []DiagnosticManagerLogDescriptor `xml:"DiagnosticManagerLogDescriptor,omitempty"` +} + +func init() { + t["ArrayOfDiagnosticManagerLogDescriptor"] = reflect.TypeOf((*ArrayOfDiagnosticManagerLogDescriptor)(nil)).Elem() +} + +type ArrayOfDiskChangeExtent struct { + DiskChangeExtent []DiskChangeExtent `xml:"DiskChangeExtent,omitempty"` +} + +func init() { + t["ArrayOfDiskChangeExtent"] = reflect.TypeOf((*ArrayOfDiskChangeExtent)(nil)).Elem() +} + +type ArrayOfDistributedVirtualPort struct { + DistributedVirtualPort []DistributedVirtualPort `xml:"DistributedVirtualPort,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualPort"] = reflect.TypeOf((*ArrayOfDistributedVirtualPort)(nil)).Elem() +} + +type ArrayOfDistributedVirtualPortgroupInfo struct { + DistributedVirtualPortgroupInfo []DistributedVirtualPortgroupInfo `xml:"DistributedVirtualPortgroupInfo,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualPortgroupInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualPortgroupInfo)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostMember struct { + DistributedVirtualSwitchHostMember []DistributedVirtualSwitchHostMember `xml:"DistributedVirtualSwitchHostMember,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMember"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMember)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostMemberConfigSpec struct { + DistributedVirtualSwitchHostMemberConfigSpec []DistributedVirtualSwitchHostMemberConfigSpec `xml:"DistributedVirtualSwitchHostMemberConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMemberConfigSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostMemberPnicSpec struct { + DistributedVirtualSwitchHostMemberPnicSpec []DistributedVirtualSwitchHostMemberPnicSpec `xml:"DistributedVirtualSwitchHostMemberPnicSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMemberPnicSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostProductSpec struct { + DistributedVirtualSwitchHostProductSpec []DistributedVirtualSwitchHostProductSpec `xml:"DistributedVirtualSwitchHostProductSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostProductSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostProductSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchInfo struct { + DistributedVirtualSwitchInfo []DistributedVirtualSwitchInfo `xml:"DistributedVirtualSwitchInfo,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchInfo)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob struct { + DistributedVirtualSwitchKeyedOpaqueBlob []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"DistributedVirtualSwitchKeyedOpaqueBlob,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchManagerCompatibilityResult struct { + DistributedVirtualSwitchManagerCompatibilityResult []DistributedVirtualSwitchManagerCompatibilityResult `xml:"DistributedVirtualSwitchManagerCompatibilityResult,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchManagerCompatibilityResult"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchManagerCompatibilityResult)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec []BaseDistributedVirtualSwitchManagerHostDvsFilterSpec `xml:"DistributedVirtualSwitchManagerHostDvsFilterSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchProductSpec struct { + DistributedVirtualSwitchProductSpec []DistributedVirtualSwitchProductSpec `xml:"DistributedVirtualSwitchProductSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchProductSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchProductSpec)(nil)).Elem() +} + +type ArrayOfDouble struct { + Double []float64 `xml:"double,omitempty"` +} + +func init() { + t["ArrayOfDouble"] = reflect.TypeOf((*ArrayOfDouble)(nil)).Elem() +} + +type ArrayOfDvsApplyOperationFaultFaultOnObject struct { + DvsApplyOperationFaultFaultOnObject []DvsApplyOperationFaultFaultOnObject `xml:"DvsApplyOperationFaultFaultOnObject,omitempty"` +} + +func init() { + t["ArrayOfDvsApplyOperationFaultFaultOnObject"] = reflect.TypeOf((*ArrayOfDvsApplyOperationFaultFaultOnObject)(nil)).Elem() +} + +type ArrayOfDvsFilterConfig struct { + DvsFilterConfig []BaseDvsFilterConfig `xml:"DvsFilterConfig,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDvsFilterConfig"] = reflect.TypeOf((*ArrayOfDvsFilterConfig)(nil)).Elem() +} + +type ArrayOfDvsHostInfrastructureTrafficResource struct { + DvsHostInfrastructureTrafficResource []DvsHostInfrastructureTrafficResource `xml:"DvsHostInfrastructureTrafficResource,omitempty"` +} + +func init() { + t["ArrayOfDvsHostInfrastructureTrafficResource"] = reflect.TypeOf((*ArrayOfDvsHostInfrastructureTrafficResource)(nil)).Elem() +} + +type ArrayOfDvsHostVNicProfile struct { + DvsHostVNicProfile []DvsHostVNicProfile `xml:"DvsHostVNicProfile,omitempty"` +} + +func init() { + t["ArrayOfDvsHostVNicProfile"] = reflect.TypeOf((*ArrayOfDvsHostVNicProfile)(nil)).Elem() +} + +type ArrayOfDvsNetworkRuleQualifier struct { + DvsNetworkRuleQualifier []BaseDvsNetworkRuleQualifier `xml:"DvsNetworkRuleQualifier,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDvsNetworkRuleQualifier"] = reflect.TypeOf((*ArrayOfDvsNetworkRuleQualifier)(nil)).Elem() +} + +type ArrayOfDvsOperationBulkFaultFaultOnHost struct { + DvsOperationBulkFaultFaultOnHost []DvsOperationBulkFaultFaultOnHost `xml:"DvsOperationBulkFaultFaultOnHost,omitempty"` +} + +func init() { + t["ArrayOfDvsOperationBulkFaultFaultOnHost"] = reflect.TypeOf((*ArrayOfDvsOperationBulkFaultFaultOnHost)(nil)).Elem() +} + +type ArrayOfDvsOutOfSyncHostArgument struct { + DvsOutOfSyncHostArgument []DvsOutOfSyncHostArgument `xml:"DvsOutOfSyncHostArgument,omitempty"` +} + +func init() { + t["ArrayOfDvsOutOfSyncHostArgument"] = reflect.TypeOf((*ArrayOfDvsOutOfSyncHostArgument)(nil)).Elem() +} + +type ArrayOfDvsProfile struct { + DvsProfile []DvsProfile `xml:"DvsProfile,omitempty"` +} + +func init() { + t["ArrayOfDvsProfile"] = reflect.TypeOf((*ArrayOfDvsProfile)(nil)).Elem() +} + +type ArrayOfDvsServiceConsoleVNicProfile struct { + DvsServiceConsoleVNicProfile []DvsServiceConsoleVNicProfile `xml:"DvsServiceConsoleVNicProfile,omitempty"` +} + +func init() { + t["ArrayOfDvsServiceConsoleVNicProfile"] = reflect.TypeOf((*ArrayOfDvsServiceConsoleVNicProfile)(nil)).Elem() +} + +type ArrayOfDvsTrafficRule struct { + DvsTrafficRule []DvsTrafficRule `xml:"DvsTrafficRule,omitempty"` +} + +func init() { + t["ArrayOfDvsTrafficRule"] = reflect.TypeOf((*ArrayOfDvsTrafficRule)(nil)).Elem() +} + +type ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo struct { + DvsVmVnicNetworkResourcePoolRuntimeInfo []DvsVmVnicNetworkResourcePoolRuntimeInfo `xml:"DvsVmVnicNetworkResourcePoolRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo"] = reflect.TypeOf((*ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo)(nil)).Elem() +} + +type ArrayOfDvsVmVnicResourcePoolConfigSpec struct { + DvsVmVnicResourcePoolConfigSpec []DvsVmVnicResourcePoolConfigSpec `xml:"DvsVmVnicResourcePoolConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDvsVmVnicResourcePoolConfigSpec"] = reflect.TypeOf((*ArrayOfDvsVmVnicResourcePoolConfigSpec)(nil)).Elem() +} + +type ArrayOfDvsVnicAllocatedResource struct { + DvsVnicAllocatedResource []DvsVnicAllocatedResource `xml:"DvsVnicAllocatedResource,omitempty"` +} + +func init() { + t["ArrayOfDvsVnicAllocatedResource"] = reflect.TypeOf((*ArrayOfDvsVnicAllocatedResource)(nil)).Elem() +} + +type ArrayOfDynamicProperty struct { + DynamicProperty []DynamicProperty `xml:"DynamicProperty,omitempty"` +} + +func init() { + t["ArrayOfDynamicProperty"] = reflect.TypeOf((*ArrayOfDynamicProperty)(nil)).Elem() +} + +type ArrayOfEVCMode struct { + EVCMode []EVCMode `xml:"EVCMode,omitempty"` +} + +func init() { + t["ArrayOfEVCMode"] = reflect.TypeOf((*ArrayOfEVCMode)(nil)).Elem() +} + +type ArrayOfElementDescription struct { + ElementDescription []BaseElementDescription `xml:"ElementDescription,omitempty,typeattr"` +} + +func init() { + t["ArrayOfElementDescription"] = reflect.TypeOf((*ArrayOfElementDescription)(nil)).Elem() +} + +type ArrayOfEntityBackupConfig struct { + EntityBackupConfig []EntityBackupConfig `xml:"EntityBackupConfig,omitempty"` +} + +func init() { + t["ArrayOfEntityBackupConfig"] = reflect.TypeOf((*ArrayOfEntityBackupConfig)(nil)).Elem() +} + +type ArrayOfEntityPrivilege struct { + EntityPrivilege []EntityPrivilege `xml:"EntityPrivilege,omitempty"` +} + +func init() { + t["ArrayOfEntityPrivilege"] = reflect.TypeOf((*ArrayOfEntityPrivilege)(nil)).Elem() +} + +type ArrayOfEnumDescription struct { + EnumDescription []EnumDescription `xml:"EnumDescription,omitempty"` +} + +func init() { + t["ArrayOfEnumDescription"] = reflect.TypeOf((*ArrayOfEnumDescription)(nil)).Elem() +} + +type ArrayOfEvent struct { + Event []BaseEvent `xml:"Event,omitempty,typeattr"` +} + +func init() { + t["ArrayOfEvent"] = reflect.TypeOf((*ArrayOfEvent)(nil)).Elem() +} + +type ArrayOfEventAlarmExpressionComparison struct { + EventAlarmExpressionComparison []EventAlarmExpressionComparison `xml:"EventAlarmExpressionComparison,omitempty"` +} + +func init() { + t["ArrayOfEventAlarmExpressionComparison"] = reflect.TypeOf((*ArrayOfEventAlarmExpressionComparison)(nil)).Elem() +} + +type ArrayOfEventArgDesc struct { + EventArgDesc []EventArgDesc `xml:"EventArgDesc,omitempty"` +} + +func init() { + t["ArrayOfEventArgDesc"] = reflect.TypeOf((*ArrayOfEventArgDesc)(nil)).Elem() +} + +type ArrayOfEventDescriptionEventDetail struct { + EventDescriptionEventDetail []EventDescriptionEventDetail `xml:"EventDescriptionEventDetail,omitempty"` +} + +func init() { + t["ArrayOfEventDescriptionEventDetail"] = reflect.TypeOf((*ArrayOfEventDescriptionEventDetail)(nil)).Elem() +} + +type ArrayOfExtManagedEntityInfo struct { + ExtManagedEntityInfo []ExtManagedEntityInfo `xml:"ExtManagedEntityInfo,omitempty"` +} + +func init() { + t["ArrayOfExtManagedEntityInfo"] = reflect.TypeOf((*ArrayOfExtManagedEntityInfo)(nil)).Elem() +} + +type ArrayOfExtSolutionManagerInfoTabInfo struct { + ExtSolutionManagerInfoTabInfo []ExtSolutionManagerInfoTabInfo `xml:"ExtSolutionManagerInfoTabInfo,omitempty"` +} + +func init() { + t["ArrayOfExtSolutionManagerInfoTabInfo"] = reflect.TypeOf((*ArrayOfExtSolutionManagerInfoTabInfo)(nil)).Elem() +} + +type ArrayOfExtendedEventPair struct { + ExtendedEventPair []ExtendedEventPair `xml:"ExtendedEventPair,omitempty"` +} + +func init() { + t["ArrayOfExtendedEventPair"] = reflect.TypeOf((*ArrayOfExtendedEventPair)(nil)).Elem() +} + +type ArrayOfExtension struct { + Extension []Extension `xml:"Extension,omitempty"` +} + +func init() { + t["ArrayOfExtension"] = reflect.TypeOf((*ArrayOfExtension)(nil)).Elem() +} + +type ArrayOfExtensionClientInfo struct { + ExtensionClientInfo []ExtensionClientInfo `xml:"ExtensionClientInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionClientInfo"] = reflect.TypeOf((*ArrayOfExtensionClientInfo)(nil)).Elem() +} + +type ArrayOfExtensionEventTypeInfo struct { + ExtensionEventTypeInfo []ExtensionEventTypeInfo `xml:"ExtensionEventTypeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionEventTypeInfo"] = reflect.TypeOf((*ArrayOfExtensionEventTypeInfo)(nil)).Elem() +} + +type ArrayOfExtensionFaultTypeInfo struct { + ExtensionFaultTypeInfo []ExtensionFaultTypeInfo `xml:"ExtensionFaultTypeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionFaultTypeInfo"] = reflect.TypeOf((*ArrayOfExtensionFaultTypeInfo)(nil)).Elem() +} + +type ArrayOfExtensionManagerIpAllocationUsage struct { + ExtensionManagerIpAllocationUsage []ExtensionManagerIpAllocationUsage `xml:"ExtensionManagerIpAllocationUsage,omitempty"` +} + +func init() { + t["ArrayOfExtensionManagerIpAllocationUsage"] = reflect.TypeOf((*ArrayOfExtensionManagerIpAllocationUsage)(nil)).Elem() +} + +type ArrayOfExtensionPrivilegeInfo struct { + ExtensionPrivilegeInfo []ExtensionPrivilegeInfo `xml:"ExtensionPrivilegeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionPrivilegeInfo"] = reflect.TypeOf((*ArrayOfExtensionPrivilegeInfo)(nil)).Elem() +} + +type ArrayOfExtensionResourceInfo struct { + ExtensionResourceInfo []ExtensionResourceInfo `xml:"ExtensionResourceInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionResourceInfo"] = reflect.TypeOf((*ArrayOfExtensionResourceInfo)(nil)).Elem() +} + +type ArrayOfExtensionServerInfo struct { + ExtensionServerInfo []ExtensionServerInfo `xml:"ExtensionServerInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionServerInfo"] = reflect.TypeOf((*ArrayOfExtensionServerInfo)(nil)).Elem() +} + +type ArrayOfExtensionTaskTypeInfo struct { + ExtensionTaskTypeInfo []ExtensionTaskTypeInfo `xml:"ExtensionTaskTypeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionTaskTypeInfo"] = reflect.TypeOf((*ArrayOfExtensionTaskTypeInfo)(nil)).Elem() +} + +type ArrayOfFaultToleranceDiskSpec struct { + FaultToleranceDiskSpec []FaultToleranceDiskSpec `xml:"FaultToleranceDiskSpec,omitempty"` +} + +func init() { + t["ArrayOfFaultToleranceDiskSpec"] = reflect.TypeOf((*ArrayOfFaultToleranceDiskSpec)(nil)).Elem() +} + +type ArrayOfFcoeConfigVlanRange struct { + FcoeConfigVlanRange []FcoeConfigVlanRange `xml:"FcoeConfigVlanRange,omitempty"` +} + +func init() { + t["ArrayOfFcoeConfigVlanRange"] = reflect.TypeOf((*ArrayOfFcoeConfigVlanRange)(nil)).Elem() +} + +type ArrayOfFileInfo struct { + FileInfo []BaseFileInfo `xml:"FileInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfFileInfo"] = reflect.TypeOf((*ArrayOfFileInfo)(nil)).Elem() +} + +type ArrayOfFileQuery struct { + FileQuery []BaseFileQuery `xml:"FileQuery,omitempty,typeattr"` +} + +func init() { + t["ArrayOfFileQuery"] = reflect.TypeOf((*ArrayOfFileQuery)(nil)).Elem() +} + +type ArrayOfFirewallProfileRulesetProfile struct { + FirewallProfileRulesetProfile []FirewallProfileRulesetProfile `xml:"FirewallProfileRulesetProfile,omitempty"` +} + +func init() { + t["ArrayOfFirewallProfileRulesetProfile"] = reflect.TypeOf((*ArrayOfFirewallProfileRulesetProfile)(nil)).Elem() +} + +type ArrayOfGuestAliases struct { + GuestAliases []GuestAliases `xml:"GuestAliases,omitempty"` +} + +func init() { + t["ArrayOfGuestAliases"] = reflect.TypeOf((*ArrayOfGuestAliases)(nil)).Elem() +} + +type ArrayOfGuestAuthAliasInfo struct { + GuestAuthAliasInfo []GuestAuthAliasInfo `xml:"GuestAuthAliasInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestAuthAliasInfo"] = reflect.TypeOf((*ArrayOfGuestAuthAliasInfo)(nil)).Elem() +} + +type ArrayOfGuestAuthSubject struct { + GuestAuthSubject []BaseGuestAuthSubject `xml:"GuestAuthSubject,omitempty,typeattr"` +} + +func init() { + t["ArrayOfGuestAuthSubject"] = reflect.TypeOf((*ArrayOfGuestAuthSubject)(nil)).Elem() +} + +type ArrayOfGuestDiskInfo struct { + GuestDiskInfo []GuestDiskInfo `xml:"GuestDiskInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestDiskInfo"] = reflect.TypeOf((*ArrayOfGuestDiskInfo)(nil)).Elem() +} + +type ArrayOfGuestFileInfo struct { + GuestFileInfo []GuestFileInfo `xml:"GuestFileInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestFileInfo"] = reflect.TypeOf((*ArrayOfGuestFileInfo)(nil)).Elem() +} + +type ArrayOfGuestInfoNamespaceGenerationInfo struct { + GuestInfoNamespaceGenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"GuestInfoNamespaceGenerationInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*ArrayOfGuestInfoNamespaceGenerationInfo)(nil)).Elem() +} + +type ArrayOfGuestMappedAliases struct { + GuestMappedAliases []GuestMappedAliases `xml:"GuestMappedAliases,omitempty"` +} + +func init() { + t["ArrayOfGuestMappedAliases"] = reflect.TypeOf((*ArrayOfGuestMappedAliases)(nil)).Elem() +} + +type ArrayOfGuestNicInfo struct { + GuestNicInfo []GuestNicInfo `xml:"GuestNicInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestNicInfo"] = reflect.TypeOf((*ArrayOfGuestNicInfo)(nil)).Elem() +} + +type ArrayOfGuestOsDescriptor struct { + GuestOsDescriptor []GuestOsDescriptor `xml:"GuestOsDescriptor,omitempty"` +} + +func init() { + t["ArrayOfGuestOsDescriptor"] = reflect.TypeOf((*ArrayOfGuestOsDescriptor)(nil)).Elem() +} + +type ArrayOfGuestProcessInfo struct { + GuestProcessInfo []GuestProcessInfo `xml:"GuestProcessInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestProcessInfo"] = reflect.TypeOf((*ArrayOfGuestProcessInfo)(nil)).Elem() +} + +type ArrayOfGuestRegKeyRecordSpec struct { + GuestRegKeyRecordSpec []GuestRegKeyRecordSpec `xml:"GuestRegKeyRecordSpec,omitempty"` +} + +func init() { + t["ArrayOfGuestRegKeyRecordSpec"] = reflect.TypeOf((*ArrayOfGuestRegKeyRecordSpec)(nil)).Elem() +} + +type ArrayOfGuestRegValueSpec struct { + GuestRegValueSpec []GuestRegValueSpec `xml:"GuestRegValueSpec,omitempty"` +} + +func init() { + t["ArrayOfGuestRegValueSpec"] = reflect.TypeOf((*ArrayOfGuestRegValueSpec)(nil)).Elem() +} + +type ArrayOfGuestStackInfo struct { + GuestStackInfo []GuestStackInfo `xml:"GuestStackInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestStackInfo"] = reflect.TypeOf((*ArrayOfGuestStackInfo)(nil)).Elem() +} + +type ArrayOfHbrManagerVmReplicationCapability struct { + HbrManagerVmReplicationCapability []HbrManagerVmReplicationCapability `xml:"HbrManagerVmReplicationCapability,omitempty"` +} + +func init() { + t["ArrayOfHbrManagerVmReplicationCapability"] = reflect.TypeOf((*ArrayOfHbrManagerVmReplicationCapability)(nil)).Elem() +} + +type ArrayOfHostAccessControlEntry struct { + HostAccessControlEntry []HostAccessControlEntry `xml:"HostAccessControlEntry,omitempty"` +} + +func init() { + t["ArrayOfHostAccessControlEntry"] = reflect.TypeOf((*ArrayOfHostAccessControlEntry)(nil)).Elem() +} + +type ArrayOfHostAccountSpec struct { + HostAccountSpec []BaseHostAccountSpec `xml:"HostAccountSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostAccountSpec"] = reflect.TypeOf((*ArrayOfHostAccountSpec)(nil)).Elem() +} + +type ArrayOfHostActiveDirectory struct { + HostActiveDirectory []HostActiveDirectory `xml:"HostActiveDirectory,omitempty"` +} + +func init() { + t["ArrayOfHostActiveDirectory"] = reflect.TypeOf((*ArrayOfHostActiveDirectory)(nil)).Elem() +} + +type ArrayOfHostAuthenticationStoreInfo struct { + HostAuthenticationStoreInfo []BaseHostAuthenticationStoreInfo `xml:"HostAuthenticationStoreInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostAuthenticationStoreInfo"] = reflect.TypeOf((*ArrayOfHostAuthenticationStoreInfo)(nil)).Elem() +} + +type ArrayOfHostBootDevice struct { + HostBootDevice []HostBootDevice `xml:"HostBootDevice,omitempty"` +} + +func init() { + t["ArrayOfHostBootDevice"] = reflect.TypeOf((*ArrayOfHostBootDevice)(nil)).Elem() +} + +type ArrayOfHostCacheConfigurationInfo struct { + HostCacheConfigurationInfo []HostCacheConfigurationInfo `xml:"HostCacheConfigurationInfo,omitempty"` +} + +func init() { + t["ArrayOfHostCacheConfigurationInfo"] = reflect.TypeOf((*ArrayOfHostCacheConfigurationInfo)(nil)).Elem() +} + +type ArrayOfHostConnectInfoNetworkInfo struct { + HostConnectInfoNetworkInfo []BaseHostConnectInfoNetworkInfo `xml:"HostConnectInfoNetworkInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostConnectInfoNetworkInfo"] = reflect.TypeOf((*ArrayOfHostConnectInfoNetworkInfo)(nil)).Elem() +} + +type ArrayOfHostCpuIdInfo struct { + HostCpuIdInfo []HostCpuIdInfo `xml:"HostCpuIdInfo,omitempty"` +} + +func init() { + t["ArrayOfHostCpuIdInfo"] = reflect.TypeOf((*ArrayOfHostCpuIdInfo)(nil)).Elem() +} + +type ArrayOfHostCpuPackage struct { + HostCpuPackage []HostCpuPackage `xml:"HostCpuPackage,omitempty"` +} + +func init() { + t["ArrayOfHostCpuPackage"] = reflect.TypeOf((*ArrayOfHostCpuPackage)(nil)).Elem() +} + +type ArrayOfHostDatastoreBrowserSearchResults struct { + HostDatastoreBrowserSearchResults []HostDatastoreBrowserSearchResults `xml:"HostDatastoreBrowserSearchResults,omitempty"` +} + +func init() { + t["ArrayOfHostDatastoreBrowserSearchResults"] = reflect.TypeOf((*ArrayOfHostDatastoreBrowserSearchResults)(nil)).Elem() +} + +type ArrayOfHostDatastoreConnectInfo struct { + HostDatastoreConnectInfo []BaseHostDatastoreConnectInfo `xml:"HostDatastoreConnectInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostDatastoreConnectInfo"] = reflect.TypeOf((*ArrayOfHostDatastoreConnectInfo)(nil)).Elem() +} + +type ArrayOfHostDatastoreSystemDatastoreResult struct { + HostDatastoreSystemDatastoreResult []HostDatastoreSystemDatastoreResult `xml:"HostDatastoreSystemDatastoreResult,omitempty"` +} + +func init() { + t["ArrayOfHostDatastoreSystemDatastoreResult"] = reflect.TypeOf((*ArrayOfHostDatastoreSystemDatastoreResult)(nil)).Elem() +} + +type ArrayOfHostDateTimeSystemTimeZone struct { + HostDateTimeSystemTimeZone []HostDateTimeSystemTimeZone `xml:"HostDateTimeSystemTimeZone,omitempty"` +} + +func init() { + t["ArrayOfHostDateTimeSystemTimeZone"] = reflect.TypeOf((*ArrayOfHostDateTimeSystemTimeZone)(nil)).Elem() +} + +type ArrayOfHostDhcpService struct { + HostDhcpService []HostDhcpService `xml:"HostDhcpService,omitempty"` +} + +func init() { + t["ArrayOfHostDhcpService"] = reflect.TypeOf((*ArrayOfHostDhcpService)(nil)).Elem() +} + +type ArrayOfHostDhcpServiceConfig struct { + HostDhcpServiceConfig []HostDhcpServiceConfig `xml:"HostDhcpServiceConfig,omitempty"` +} + +func init() { + t["ArrayOfHostDhcpServiceConfig"] = reflect.TypeOf((*ArrayOfHostDhcpServiceConfig)(nil)).Elem() +} + +type ArrayOfHostDiagnosticPartition struct { + HostDiagnosticPartition []HostDiagnosticPartition `xml:"HostDiagnosticPartition,omitempty"` +} + +func init() { + t["ArrayOfHostDiagnosticPartition"] = reflect.TypeOf((*ArrayOfHostDiagnosticPartition)(nil)).Elem() +} + +type ArrayOfHostDiagnosticPartitionCreateOption struct { + HostDiagnosticPartitionCreateOption []HostDiagnosticPartitionCreateOption `xml:"HostDiagnosticPartitionCreateOption,omitempty"` +} + +func init() { + t["ArrayOfHostDiagnosticPartitionCreateOption"] = reflect.TypeOf((*ArrayOfHostDiagnosticPartitionCreateOption)(nil)).Elem() +} + +type ArrayOfHostDiskConfigurationResult struct { + HostDiskConfigurationResult []HostDiskConfigurationResult `xml:"HostDiskConfigurationResult,omitempty"` +} + +func init() { + t["ArrayOfHostDiskConfigurationResult"] = reflect.TypeOf((*ArrayOfHostDiskConfigurationResult)(nil)).Elem() +} + +type ArrayOfHostDiskMappingPartitionOption struct { + HostDiskMappingPartitionOption []HostDiskMappingPartitionOption `xml:"HostDiskMappingPartitionOption,omitempty"` +} + +func init() { + t["ArrayOfHostDiskMappingPartitionOption"] = reflect.TypeOf((*ArrayOfHostDiskMappingPartitionOption)(nil)).Elem() +} + +type ArrayOfHostDiskPartitionAttributes struct { + HostDiskPartitionAttributes []HostDiskPartitionAttributes `xml:"HostDiskPartitionAttributes,omitempty"` +} + +func init() { + t["ArrayOfHostDiskPartitionAttributes"] = reflect.TypeOf((*ArrayOfHostDiskPartitionAttributes)(nil)).Elem() +} + +type ArrayOfHostDiskPartitionBlockRange struct { + HostDiskPartitionBlockRange []HostDiskPartitionBlockRange `xml:"HostDiskPartitionBlockRange,omitempty"` +} + +func init() { + t["ArrayOfHostDiskPartitionBlockRange"] = reflect.TypeOf((*ArrayOfHostDiskPartitionBlockRange)(nil)).Elem() +} + +type ArrayOfHostDiskPartitionInfo struct { + HostDiskPartitionInfo []HostDiskPartitionInfo `xml:"HostDiskPartitionInfo,omitempty"` +} + +func init() { + t["ArrayOfHostDiskPartitionInfo"] = reflect.TypeOf((*ArrayOfHostDiskPartitionInfo)(nil)).Elem() +} + +type ArrayOfHostEventArgument struct { + HostEventArgument []HostEventArgument `xml:"HostEventArgument,omitempty"` +} + +func init() { + t["ArrayOfHostEventArgument"] = reflect.TypeOf((*ArrayOfHostEventArgument)(nil)).Elem() +} + +type ArrayOfHostFeatureCapability struct { + HostFeatureCapability []HostFeatureCapability `xml:"HostFeatureCapability,omitempty"` +} + +func init() { + t["ArrayOfHostFeatureCapability"] = reflect.TypeOf((*ArrayOfHostFeatureCapability)(nil)).Elem() +} + +type ArrayOfHostFeatureMask struct { + HostFeatureMask []HostFeatureMask `xml:"HostFeatureMask,omitempty"` +} + +func init() { + t["ArrayOfHostFeatureMask"] = reflect.TypeOf((*ArrayOfHostFeatureMask)(nil)).Elem() +} + +type ArrayOfHostFeatureVersionInfo struct { + HostFeatureVersionInfo []HostFeatureVersionInfo `xml:"HostFeatureVersionInfo,omitempty"` +} + +func init() { + t["ArrayOfHostFeatureVersionInfo"] = reflect.TypeOf((*ArrayOfHostFeatureVersionInfo)(nil)).Elem() +} + +type ArrayOfHostFileSystemMountInfo struct { + HostFileSystemMountInfo []HostFileSystemMountInfo `xml:"HostFileSystemMountInfo,omitempty"` +} + +func init() { + t["ArrayOfHostFileSystemMountInfo"] = reflect.TypeOf((*ArrayOfHostFileSystemMountInfo)(nil)).Elem() +} + +type ArrayOfHostFirewallConfigRuleSetConfig struct { + HostFirewallConfigRuleSetConfig []HostFirewallConfigRuleSetConfig `xml:"HostFirewallConfigRuleSetConfig,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallConfigRuleSetConfig"] = reflect.TypeOf((*ArrayOfHostFirewallConfigRuleSetConfig)(nil)).Elem() +} + +type ArrayOfHostFirewallRule struct { + HostFirewallRule []HostFirewallRule `xml:"HostFirewallRule,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallRule"] = reflect.TypeOf((*ArrayOfHostFirewallRule)(nil)).Elem() +} + +type ArrayOfHostFirewallRuleset struct { + HostFirewallRuleset []HostFirewallRuleset `xml:"HostFirewallRuleset,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallRuleset"] = reflect.TypeOf((*ArrayOfHostFirewallRuleset)(nil)).Elem() +} + +type ArrayOfHostFirewallRulesetIpNetwork struct { + HostFirewallRulesetIpNetwork []HostFirewallRulesetIpNetwork `xml:"HostFirewallRulesetIpNetwork,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallRulesetIpNetwork"] = reflect.TypeOf((*ArrayOfHostFirewallRulesetIpNetwork)(nil)).Elem() +} + +type ArrayOfHostGraphicsInfo struct { + HostGraphicsInfo []HostGraphicsInfo `xml:"HostGraphicsInfo,omitempty"` +} + +func init() { + t["ArrayOfHostGraphicsInfo"] = reflect.TypeOf((*ArrayOfHostGraphicsInfo)(nil)).Elem() +} + +type ArrayOfHostHardwareElementInfo struct { + HostHardwareElementInfo []BaseHostHardwareElementInfo `xml:"HostHardwareElementInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostHardwareElementInfo"] = reflect.TypeOf((*ArrayOfHostHardwareElementInfo)(nil)).Elem() +} + +type ArrayOfHostHostBusAdapter struct { + HostHostBusAdapter []BaseHostHostBusAdapter `xml:"HostHostBusAdapter,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostHostBusAdapter"] = reflect.TypeOf((*ArrayOfHostHostBusAdapter)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaIscsiIpv6Address struct { + HostInternetScsiHbaIscsiIpv6Address []HostInternetScsiHbaIscsiIpv6Address `xml:"HostInternetScsiHbaIscsiIpv6Address,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaIscsiIpv6Address"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaIscsiIpv6Address)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaParamValue struct { + HostInternetScsiHbaParamValue []HostInternetScsiHbaParamValue `xml:"HostInternetScsiHbaParamValue,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaParamValue"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaParamValue)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaSendTarget struct { + HostInternetScsiHbaSendTarget []HostInternetScsiHbaSendTarget `xml:"HostInternetScsiHbaSendTarget,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaSendTarget"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaSendTarget)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaStaticTarget struct { + HostInternetScsiHbaStaticTarget []HostInternetScsiHbaStaticTarget `xml:"HostInternetScsiHbaStaticTarget,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaStaticTarget"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaStaticTarget)(nil)).Elem() +} + +type ArrayOfHostIoFilterInfo struct { + HostIoFilterInfo []HostIoFilterInfo `xml:"HostIoFilterInfo,omitempty"` +} + +func init() { + t["ArrayOfHostIoFilterInfo"] = reflect.TypeOf((*ArrayOfHostIoFilterInfo)(nil)).Elem() +} + +type ArrayOfHostIpConfigIpV6Address struct { + HostIpConfigIpV6Address []HostIpConfigIpV6Address `xml:"HostIpConfigIpV6Address,omitempty"` +} + +func init() { + t["ArrayOfHostIpConfigIpV6Address"] = reflect.TypeOf((*ArrayOfHostIpConfigIpV6Address)(nil)).Elem() +} + +type ArrayOfHostIpRouteEntry struct { + HostIpRouteEntry []HostIpRouteEntry `xml:"HostIpRouteEntry,omitempty"` +} + +func init() { + t["ArrayOfHostIpRouteEntry"] = reflect.TypeOf((*ArrayOfHostIpRouteEntry)(nil)).Elem() +} + +type ArrayOfHostIpRouteOp struct { + HostIpRouteOp []HostIpRouteOp `xml:"HostIpRouteOp,omitempty"` +} + +func init() { + t["ArrayOfHostIpRouteOp"] = reflect.TypeOf((*ArrayOfHostIpRouteOp)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec struct { + HostLowLevelProvisioningManagerDiskLayoutSpec []HostLowLevelProvisioningManagerDiskLayoutSpec `xml:"HostLowLevelProvisioningManagerDiskLayoutSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileDeleteResult struct { + HostLowLevelProvisioningManagerFileDeleteResult []HostLowLevelProvisioningManagerFileDeleteResult `xml:"HostLowLevelProvisioningManagerFileDeleteResult,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileDeleteResult"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileDeleteResult)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec struct { + HostLowLevelProvisioningManagerFileDeleteSpec []HostLowLevelProvisioningManagerFileDeleteSpec `xml:"HostLowLevelProvisioningManagerFileDeleteSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileReserveResult struct { + HostLowLevelProvisioningManagerFileReserveResult []HostLowLevelProvisioningManagerFileReserveResult `xml:"HostLowLevelProvisioningManagerFileReserveResult,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileReserveResult"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileReserveResult)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileReserveSpec struct { + HostLowLevelProvisioningManagerFileReserveSpec []HostLowLevelProvisioningManagerFileReserveSpec `xml:"HostLowLevelProvisioningManagerFileReserveSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileReserveSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileReserveSpec)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec struct { + HostLowLevelProvisioningManagerSnapshotLayoutSpec []HostLowLevelProvisioningManagerSnapshotLayoutSpec `xml:"HostLowLevelProvisioningManagerSnapshotLayoutSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec)(nil)).Elem() +} + +type ArrayOfHostMemberHealthCheckResult struct { + HostMemberHealthCheckResult []BaseHostMemberHealthCheckResult `xml:"HostMemberHealthCheckResult,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostMemberHealthCheckResult"] = reflect.TypeOf((*ArrayOfHostMemberHealthCheckResult)(nil)).Elem() +} + +type ArrayOfHostMemberRuntimeInfo struct { + HostMemberRuntimeInfo []HostMemberRuntimeInfo `xml:"HostMemberRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfHostMemberRuntimeInfo"] = reflect.TypeOf((*ArrayOfHostMemberRuntimeInfo)(nil)).Elem() +} + +type ArrayOfHostMultipathInfoLogicalUnit struct { + HostMultipathInfoLogicalUnit []HostMultipathInfoLogicalUnit `xml:"HostMultipathInfoLogicalUnit,omitempty"` +} + +func init() { + t["ArrayOfHostMultipathInfoLogicalUnit"] = reflect.TypeOf((*ArrayOfHostMultipathInfoLogicalUnit)(nil)).Elem() +} + +type ArrayOfHostMultipathInfoPath struct { + HostMultipathInfoPath []HostMultipathInfoPath `xml:"HostMultipathInfoPath,omitempty"` +} + +func init() { + t["ArrayOfHostMultipathInfoPath"] = reflect.TypeOf((*ArrayOfHostMultipathInfoPath)(nil)).Elem() +} + +type ArrayOfHostMultipathStateInfoPath struct { + HostMultipathStateInfoPath []HostMultipathStateInfoPath `xml:"HostMultipathStateInfoPath,omitempty"` +} + +func init() { + t["ArrayOfHostMultipathStateInfoPath"] = reflect.TypeOf((*ArrayOfHostMultipathStateInfoPath)(nil)).Elem() +} + +type ArrayOfHostNasVolumeConfig struct { + HostNasVolumeConfig []HostNasVolumeConfig `xml:"HostNasVolumeConfig,omitempty"` +} + +func init() { + t["ArrayOfHostNasVolumeConfig"] = reflect.TypeOf((*ArrayOfHostNasVolumeConfig)(nil)).Elem() +} + +type ArrayOfHostNatService struct { + HostNatService []HostNatService `xml:"HostNatService,omitempty"` +} + +func init() { + t["ArrayOfHostNatService"] = reflect.TypeOf((*ArrayOfHostNatService)(nil)).Elem() +} + +type ArrayOfHostNatServiceConfig struct { + HostNatServiceConfig []HostNatServiceConfig `xml:"HostNatServiceConfig,omitempty"` +} + +func init() { + t["ArrayOfHostNatServiceConfig"] = reflect.TypeOf((*ArrayOfHostNatServiceConfig)(nil)).Elem() +} + +type ArrayOfHostNatServicePortForwardSpec struct { + HostNatServicePortForwardSpec []HostNatServicePortForwardSpec `xml:"HostNatServicePortForwardSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNatServicePortForwardSpec"] = reflect.TypeOf((*ArrayOfHostNatServicePortForwardSpec)(nil)).Elem() +} + +type ArrayOfHostNetStackInstance struct { + HostNetStackInstance []HostNetStackInstance `xml:"HostNetStackInstance,omitempty"` +} + +func init() { + t["ArrayOfHostNetStackInstance"] = reflect.TypeOf((*ArrayOfHostNetStackInstance)(nil)).Elem() +} + +type ArrayOfHostNetworkConfigNetStackSpec struct { + HostNetworkConfigNetStackSpec []HostNetworkConfigNetStackSpec `xml:"HostNetworkConfigNetStackSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNetworkConfigNetStackSpec"] = reflect.TypeOf((*ArrayOfHostNetworkConfigNetStackSpec)(nil)).Elem() +} + +type ArrayOfHostNumaNode struct { + HostNumaNode []HostNumaNode `xml:"HostNumaNode,omitempty"` +} + +func init() { + t["ArrayOfHostNumaNode"] = reflect.TypeOf((*ArrayOfHostNumaNode)(nil)).Elem() +} + +type ArrayOfHostNumericSensorInfo struct { + HostNumericSensorInfo []HostNumericSensorInfo `xml:"HostNumericSensorInfo,omitempty"` +} + +func init() { + t["ArrayOfHostNumericSensorInfo"] = reflect.TypeOf((*ArrayOfHostNumericSensorInfo)(nil)).Elem() +} + +type ArrayOfHostOpaqueNetworkInfo struct { + HostOpaqueNetworkInfo []HostOpaqueNetworkInfo `xml:"HostOpaqueNetworkInfo,omitempty"` +} + +func init() { + t["ArrayOfHostOpaqueNetworkInfo"] = reflect.TypeOf((*ArrayOfHostOpaqueNetworkInfo)(nil)).Elem() +} + +type ArrayOfHostOpaqueSwitch struct { + HostOpaqueSwitch []HostOpaqueSwitch `xml:"HostOpaqueSwitch,omitempty"` +} + +func init() { + t["ArrayOfHostOpaqueSwitch"] = reflect.TypeOf((*ArrayOfHostOpaqueSwitch)(nil)).Elem() +} + +type ArrayOfHostOpaqueSwitchPhysicalNicZone struct { + HostOpaqueSwitchPhysicalNicZone []HostOpaqueSwitchPhysicalNicZone `xml:"HostOpaqueSwitchPhysicalNicZone,omitempty"` +} + +func init() { + t["ArrayOfHostOpaqueSwitchPhysicalNicZone"] = reflect.TypeOf((*ArrayOfHostOpaqueSwitchPhysicalNicZone)(nil)).Elem() +} + +type ArrayOfHostPatchManagerStatus struct { + HostPatchManagerStatus []HostPatchManagerStatus `xml:"HostPatchManagerStatus,omitempty"` +} + +func init() { + t["ArrayOfHostPatchManagerStatus"] = reflect.TypeOf((*ArrayOfHostPatchManagerStatus)(nil)).Elem() +} + +type ArrayOfHostPatchManagerStatusPrerequisitePatch struct { + HostPatchManagerStatusPrerequisitePatch []HostPatchManagerStatusPrerequisitePatch `xml:"HostPatchManagerStatusPrerequisitePatch,omitempty"` +} + +func init() { + t["ArrayOfHostPatchManagerStatusPrerequisitePatch"] = reflect.TypeOf((*ArrayOfHostPatchManagerStatusPrerequisitePatch)(nil)).Elem() +} + +type ArrayOfHostPathSelectionPolicyOption struct { + HostPathSelectionPolicyOption []HostPathSelectionPolicyOption `xml:"HostPathSelectionPolicyOption,omitempty"` +} + +func init() { + t["ArrayOfHostPathSelectionPolicyOption"] = reflect.TypeOf((*ArrayOfHostPathSelectionPolicyOption)(nil)).Elem() +} + +type ArrayOfHostPciDevice struct { + HostPciDevice []HostPciDevice `xml:"HostPciDevice,omitempty"` +} + +func init() { + t["ArrayOfHostPciDevice"] = reflect.TypeOf((*ArrayOfHostPciDevice)(nil)).Elem() +} + +type ArrayOfHostPciPassthruConfig struct { + HostPciPassthruConfig []BaseHostPciPassthruConfig `xml:"HostPciPassthruConfig,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostPciPassthruConfig"] = reflect.TypeOf((*ArrayOfHostPciPassthruConfig)(nil)).Elem() +} + +type ArrayOfHostPciPassthruInfo struct { + HostPciPassthruInfo []BaseHostPciPassthruInfo `xml:"HostPciPassthruInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostPciPassthruInfo"] = reflect.TypeOf((*ArrayOfHostPciPassthruInfo)(nil)).Elem() +} + +type ArrayOfHostPlacedVirtualNicIdentifier struct { + HostPlacedVirtualNicIdentifier []HostPlacedVirtualNicIdentifier `xml:"HostPlacedVirtualNicIdentifier,omitempty"` +} + +func init() { + t["ArrayOfHostPlacedVirtualNicIdentifier"] = reflect.TypeOf((*ArrayOfHostPlacedVirtualNicIdentifier)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyAdapter struct { + HostPlugStoreTopologyAdapter []HostPlugStoreTopologyAdapter `xml:"HostPlugStoreTopologyAdapter,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyAdapter"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyAdapter)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyDevice struct { + HostPlugStoreTopologyDevice []HostPlugStoreTopologyDevice `xml:"HostPlugStoreTopologyDevice,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyDevice"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyDevice)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyPath struct { + HostPlugStoreTopologyPath []HostPlugStoreTopologyPath `xml:"HostPlugStoreTopologyPath,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyPath"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyPath)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyPlugin struct { + HostPlugStoreTopologyPlugin []HostPlugStoreTopologyPlugin `xml:"HostPlugStoreTopologyPlugin,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyPlugin"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyPlugin)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyTarget struct { + HostPlugStoreTopologyTarget []HostPlugStoreTopologyTarget `xml:"HostPlugStoreTopologyTarget,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyTarget"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyTarget)(nil)).Elem() +} + +type ArrayOfHostPnicNetworkResourceInfo struct { + HostPnicNetworkResourceInfo []HostPnicNetworkResourceInfo `xml:"HostPnicNetworkResourceInfo,omitempty"` +} + +func init() { + t["ArrayOfHostPnicNetworkResourceInfo"] = reflect.TypeOf((*ArrayOfHostPnicNetworkResourceInfo)(nil)).Elem() +} + +type ArrayOfHostPortGroup struct { + HostPortGroup []HostPortGroup `xml:"HostPortGroup,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroup"] = reflect.TypeOf((*ArrayOfHostPortGroup)(nil)).Elem() +} + +type ArrayOfHostPortGroupConfig struct { + HostPortGroupConfig []HostPortGroupConfig `xml:"HostPortGroupConfig,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroupConfig"] = reflect.TypeOf((*ArrayOfHostPortGroupConfig)(nil)).Elem() +} + +type ArrayOfHostPortGroupPort struct { + HostPortGroupPort []HostPortGroupPort `xml:"HostPortGroupPort,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroupPort"] = reflect.TypeOf((*ArrayOfHostPortGroupPort)(nil)).Elem() +} + +type ArrayOfHostPortGroupProfile struct { + HostPortGroupProfile []HostPortGroupProfile `xml:"HostPortGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroupProfile"] = reflect.TypeOf((*ArrayOfHostPortGroupProfile)(nil)).Elem() +} + +type ArrayOfHostPowerPolicy struct { + HostPowerPolicy []HostPowerPolicy `xml:"HostPowerPolicy,omitempty"` +} + +func init() { + t["ArrayOfHostPowerPolicy"] = reflect.TypeOf((*ArrayOfHostPowerPolicy)(nil)).Elem() +} + +type ArrayOfHostProtocolEndpoint struct { + HostProtocolEndpoint []HostProtocolEndpoint `xml:"HostProtocolEndpoint,omitempty"` +} + +func init() { + t["ArrayOfHostProtocolEndpoint"] = reflect.TypeOf((*ArrayOfHostProtocolEndpoint)(nil)).Elem() +} + +type ArrayOfHostProxySwitch struct { + HostProxySwitch []HostProxySwitch `xml:"HostProxySwitch,omitempty"` +} + +func init() { + t["ArrayOfHostProxySwitch"] = reflect.TypeOf((*ArrayOfHostProxySwitch)(nil)).Elem() +} + +type ArrayOfHostProxySwitchConfig struct { + HostProxySwitchConfig []HostProxySwitchConfig `xml:"HostProxySwitchConfig,omitempty"` +} + +func init() { + t["ArrayOfHostProxySwitchConfig"] = reflect.TypeOf((*ArrayOfHostProxySwitchConfig)(nil)).Elem() +} + +type ArrayOfHostProxySwitchHostLagConfig struct { + HostProxySwitchHostLagConfig []HostProxySwitchHostLagConfig `xml:"HostProxySwitchHostLagConfig,omitempty"` +} + +func init() { + t["ArrayOfHostProxySwitchHostLagConfig"] = reflect.TypeOf((*ArrayOfHostProxySwitchHostLagConfig)(nil)).Elem() +} + +type ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo struct { + HostRuntimeInfoNetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:"HostRuntimeInfoNetStackInstanceRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo"] = reflect.TypeOf((*ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo)(nil)).Elem() +} + +type ArrayOfHostScsiDisk struct { + HostScsiDisk []HostScsiDisk `xml:"HostScsiDisk,omitempty"` +} + +func init() { + t["ArrayOfHostScsiDisk"] = reflect.TypeOf((*ArrayOfHostScsiDisk)(nil)).Elem() +} + +type ArrayOfHostScsiDiskPartition struct { + HostScsiDiskPartition []HostScsiDiskPartition `xml:"HostScsiDiskPartition,omitempty"` +} + +func init() { + t["ArrayOfHostScsiDiskPartition"] = reflect.TypeOf((*ArrayOfHostScsiDiskPartition)(nil)).Elem() +} + +type ArrayOfHostScsiTopologyInterface struct { + HostScsiTopologyInterface []HostScsiTopologyInterface `xml:"HostScsiTopologyInterface,omitempty"` +} + +func init() { + t["ArrayOfHostScsiTopologyInterface"] = reflect.TypeOf((*ArrayOfHostScsiTopologyInterface)(nil)).Elem() +} + +type ArrayOfHostScsiTopologyLun struct { + HostScsiTopologyLun []HostScsiTopologyLun `xml:"HostScsiTopologyLun,omitempty"` +} + +func init() { + t["ArrayOfHostScsiTopologyLun"] = reflect.TypeOf((*ArrayOfHostScsiTopologyLun)(nil)).Elem() +} + +type ArrayOfHostScsiTopologyTarget struct { + HostScsiTopologyTarget []HostScsiTopologyTarget `xml:"HostScsiTopologyTarget,omitempty"` +} + +func init() { + t["ArrayOfHostScsiTopologyTarget"] = reflect.TypeOf((*ArrayOfHostScsiTopologyTarget)(nil)).Elem() +} + +type ArrayOfHostService struct { + HostService []HostService `xml:"HostService,omitempty"` +} + +func init() { + t["ArrayOfHostService"] = reflect.TypeOf((*ArrayOfHostService)(nil)).Elem() +} + +type ArrayOfHostServiceConfig struct { + HostServiceConfig []HostServiceConfig `xml:"HostServiceConfig,omitempty"` +} + +func init() { + t["ArrayOfHostServiceConfig"] = reflect.TypeOf((*ArrayOfHostServiceConfig)(nil)).Elem() +} + +type ArrayOfHostSnmpDestination struct { + HostSnmpDestination []HostSnmpDestination `xml:"HostSnmpDestination,omitempty"` +} + +func init() { + t["ArrayOfHostSnmpDestination"] = reflect.TypeOf((*ArrayOfHostSnmpDestination)(nil)).Elem() +} + +type ArrayOfHostSslThumbprintInfo struct { + HostSslThumbprintInfo []HostSslThumbprintInfo `xml:"HostSslThumbprintInfo,omitempty"` +} + +func init() { + t["ArrayOfHostSslThumbprintInfo"] = reflect.TypeOf((*ArrayOfHostSslThumbprintInfo)(nil)).Elem() +} + +type ArrayOfHostStorageArrayTypePolicyOption struct { + HostStorageArrayTypePolicyOption []HostStorageArrayTypePolicyOption `xml:"HostStorageArrayTypePolicyOption,omitempty"` +} + +func init() { + t["ArrayOfHostStorageArrayTypePolicyOption"] = reflect.TypeOf((*ArrayOfHostStorageArrayTypePolicyOption)(nil)).Elem() +} + +type ArrayOfHostStorageElementInfo struct { + HostStorageElementInfo []HostStorageElementInfo `xml:"HostStorageElementInfo,omitempty"` +} + +func init() { + t["ArrayOfHostStorageElementInfo"] = reflect.TypeOf((*ArrayOfHostStorageElementInfo)(nil)).Elem() +} + +type ArrayOfHostStorageOperationalInfo struct { + HostStorageOperationalInfo []HostStorageOperationalInfo `xml:"HostStorageOperationalInfo,omitempty"` +} + +func init() { + t["ArrayOfHostStorageOperationalInfo"] = reflect.TypeOf((*ArrayOfHostStorageOperationalInfo)(nil)).Elem() +} + +type ArrayOfHostStorageSystemDiskLocatorLedResult struct { + HostStorageSystemDiskLocatorLedResult []HostStorageSystemDiskLocatorLedResult `xml:"HostStorageSystemDiskLocatorLedResult,omitempty"` +} + +func init() { + t["ArrayOfHostStorageSystemDiskLocatorLedResult"] = reflect.TypeOf((*ArrayOfHostStorageSystemDiskLocatorLedResult)(nil)).Elem() +} + +type ArrayOfHostStorageSystemScsiLunResult struct { + HostStorageSystemScsiLunResult []HostStorageSystemScsiLunResult `xml:"HostStorageSystemScsiLunResult,omitempty"` +} + +func init() { + t["ArrayOfHostStorageSystemScsiLunResult"] = reflect.TypeOf((*ArrayOfHostStorageSystemScsiLunResult)(nil)).Elem() +} + +type ArrayOfHostStorageSystemVmfsVolumeResult struct { + HostStorageSystemVmfsVolumeResult []HostStorageSystemVmfsVolumeResult `xml:"HostStorageSystemVmfsVolumeResult,omitempty"` +} + +func init() { + t["ArrayOfHostStorageSystemVmfsVolumeResult"] = reflect.TypeOf((*ArrayOfHostStorageSystemVmfsVolumeResult)(nil)).Elem() +} + +type ArrayOfHostSystemIdentificationInfo struct { + HostSystemIdentificationInfo []HostSystemIdentificationInfo `xml:"HostSystemIdentificationInfo,omitempty"` +} + +func init() { + t["ArrayOfHostSystemIdentificationInfo"] = reflect.TypeOf((*ArrayOfHostSystemIdentificationInfo)(nil)).Elem() +} + +type ArrayOfHostSystemResourceInfo struct { + HostSystemResourceInfo []HostSystemResourceInfo `xml:"HostSystemResourceInfo,omitempty"` +} + +func init() { + t["ArrayOfHostSystemResourceInfo"] = reflect.TypeOf((*ArrayOfHostSystemResourceInfo)(nil)).Elem() +} + +type ArrayOfHostSystemSwapConfigurationSystemSwapOption struct { + HostSystemSwapConfigurationSystemSwapOption []BaseHostSystemSwapConfigurationSystemSwapOption `xml:"HostSystemSwapConfigurationSystemSwapOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostSystemSwapConfigurationSystemSwapOption"] = reflect.TypeOf((*ArrayOfHostSystemSwapConfigurationSystemSwapOption)(nil)).Elem() +} + +type ArrayOfHostTpmDigestInfo struct { + HostTpmDigestInfo []HostTpmDigestInfo `xml:"HostTpmDigestInfo,omitempty"` +} + +func init() { + t["ArrayOfHostTpmDigestInfo"] = reflect.TypeOf((*ArrayOfHostTpmDigestInfo)(nil)).Elem() +} + +type ArrayOfHostTpmEventLogEntry struct { + HostTpmEventLogEntry []HostTpmEventLogEntry `xml:"HostTpmEventLogEntry,omitempty"` +} + +func init() { + t["ArrayOfHostTpmEventLogEntry"] = reflect.TypeOf((*ArrayOfHostTpmEventLogEntry)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsExtent struct { + HostUnresolvedVmfsExtent []HostUnresolvedVmfsExtent `xml:"HostUnresolvedVmfsExtent,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsExtent"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsExtent)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsResolutionResult struct { + HostUnresolvedVmfsResolutionResult []HostUnresolvedVmfsResolutionResult `xml:"HostUnresolvedVmfsResolutionResult,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsResolutionResult"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsResolutionResult)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsResolutionSpec struct { + HostUnresolvedVmfsResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:"HostUnresolvedVmfsResolutionSpec,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsResolutionSpec"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsResolutionSpec)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsVolume struct { + HostUnresolvedVmfsVolume []HostUnresolvedVmfsVolume `xml:"HostUnresolvedVmfsVolume,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsVolume"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsVolume)(nil)).Elem() +} + +type ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption struct { + HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption []HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:"HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption,omitempty"` +} + +func init() { + t["ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption"] = reflect.TypeOf((*ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption)(nil)).Elem() +} + +type ArrayOfHostVMotionCompatibility struct { + HostVMotionCompatibility []HostVMotionCompatibility `xml:"HostVMotionCompatibility,omitempty"` +} + +func init() { + t["ArrayOfHostVMotionCompatibility"] = reflect.TypeOf((*ArrayOfHostVMotionCompatibility)(nil)).Elem() +} + +type ArrayOfHostVirtualNic struct { + HostVirtualNic []HostVirtualNic `xml:"HostVirtualNic,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualNic"] = reflect.TypeOf((*ArrayOfHostVirtualNic)(nil)).Elem() +} + +type ArrayOfHostVirtualNicConfig struct { + HostVirtualNicConfig []HostVirtualNicConfig `xml:"HostVirtualNicConfig,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualNicConfig"] = reflect.TypeOf((*ArrayOfHostVirtualNicConfig)(nil)).Elem() +} + +type ArrayOfHostVirtualNicManagerNicTypeSelection struct { + HostVirtualNicManagerNicTypeSelection []HostVirtualNicManagerNicTypeSelection `xml:"HostVirtualNicManagerNicTypeSelection,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualNicManagerNicTypeSelection"] = reflect.TypeOf((*ArrayOfHostVirtualNicManagerNicTypeSelection)(nil)).Elem() +} + +type ArrayOfHostVirtualSwitch struct { + HostVirtualSwitch []HostVirtualSwitch `xml:"HostVirtualSwitch,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualSwitch"] = reflect.TypeOf((*ArrayOfHostVirtualSwitch)(nil)).Elem() +} + +type ArrayOfHostVirtualSwitchConfig struct { + HostVirtualSwitchConfig []HostVirtualSwitchConfig `xml:"HostVirtualSwitchConfig,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualSwitchConfig"] = reflect.TypeOf((*ArrayOfHostVirtualSwitchConfig)(nil)).Elem() +} + +type ArrayOfHostVmciAccessManagerAccessSpec struct { + HostVmciAccessManagerAccessSpec []HostVmciAccessManagerAccessSpec `xml:"HostVmciAccessManagerAccessSpec,omitempty"` +} + +func init() { + t["ArrayOfHostVmciAccessManagerAccessSpec"] = reflect.TypeOf((*ArrayOfHostVmciAccessManagerAccessSpec)(nil)).Elem() +} + +type ArrayOfHostVmfsRescanResult struct { + HostVmfsRescanResult []HostVmfsRescanResult `xml:"HostVmfsRescanResult,omitempty"` +} + +func init() { + t["ArrayOfHostVmfsRescanResult"] = reflect.TypeOf((*ArrayOfHostVmfsRescanResult)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemCmmdsQuery struct { + HostVsanInternalSystemCmmdsQuery []HostVsanInternalSystemCmmdsQuery `xml:"HostVsanInternalSystemCmmdsQuery,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemCmmdsQuery"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemCmmdsQuery)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult struct { + HostVsanInternalSystemDeleteVsanObjectsResult []HostVsanInternalSystemDeleteVsanObjectsResult `xml:"HostVsanInternalSystemDeleteVsanObjectsResult,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemVsanObjectOperationResult struct { + HostVsanInternalSystemVsanObjectOperationResult []HostVsanInternalSystemVsanObjectOperationResult `xml:"HostVsanInternalSystemVsanObjectOperationResult,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemVsanObjectOperationResult"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemVsanObjectOperationResult)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult struct { + HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult []HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult `xml:"HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseDatastoreLeaseInfo struct { + HttpNfcLeaseDatastoreLeaseInfo []HttpNfcLeaseDatastoreLeaseInfo `xml:"HttpNfcLeaseDatastoreLeaseInfo,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseDatastoreLeaseInfo"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseDatastoreLeaseInfo)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseDeviceUrl struct { + HttpNfcLeaseDeviceUrl []HttpNfcLeaseDeviceUrl `xml:"HttpNfcLeaseDeviceUrl,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseDeviceUrl"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseDeviceUrl)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseHostInfo struct { + HttpNfcLeaseHostInfo []HttpNfcLeaseHostInfo `xml:"HttpNfcLeaseHostInfo,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseHostInfo"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseHostInfo)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseManifestEntry struct { + HttpNfcLeaseManifestEntry []HttpNfcLeaseManifestEntry `xml:"HttpNfcLeaseManifestEntry,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseManifestEntry"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseManifestEntry)(nil)).Elem() +} + +type ArrayOfImportOperationBulkFaultFaultOnImport struct { + ImportOperationBulkFaultFaultOnImport []ImportOperationBulkFaultFaultOnImport `xml:"ImportOperationBulkFaultFaultOnImport,omitempty"` +} + +func init() { + t["ArrayOfImportOperationBulkFaultFaultOnImport"] = reflect.TypeOf((*ArrayOfImportOperationBulkFaultFaultOnImport)(nil)).Elem() +} + +type ArrayOfImportSpec struct { + ImportSpec []BaseImportSpec `xml:"ImportSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfImportSpec"] = reflect.TypeOf((*ArrayOfImportSpec)(nil)).Elem() +} + +type ArrayOfInt struct { + Int []int `xml:"int,omitempty"` +} + +func init() { + t["ArrayOfInt"] = reflect.TypeOf((*ArrayOfInt)(nil)).Elem() +} + +type ArrayOfIoFilterHostIssue struct { + IoFilterHostIssue []IoFilterHostIssue `xml:"IoFilterHostIssue,omitempty"` +} + +func init() { + t["ArrayOfIoFilterHostIssue"] = reflect.TypeOf((*ArrayOfIoFilterHostIssue)(nil)).Elem() +} + +type ArrayOfIpPool struct { + IpPool []IpPool `xml:"IpPool,omitempty"` +} + +func init() { + t["ArrayOfIpPool"] = reflect.TypeOf((*ArrayOfIpPool)(nil)).Elem() +} + +type ArrayOfIpPoolAssociation struct { + IpPoolAssociation []IpPoolAssociation `xml:"IpPoolAssociation,omitempty"` +} + +func init() { + t["ArrayOfIpPoolAssociation"] = reflect.TypeOf((*ArrayOfIpPoolAssociation)(nil)).Elem() +} + +type ArrayOfIpPoolManagerIpAllocation struct { + IpPoolManagerIpAllocation []IpPoolManagerIpAllocation `xml:"IpPoolManagerIpAllocation,omitempty"` +} + +func init() { + t["ArrayOfIpPoolManagerIpAllocation"] = reflect.TypeOf((*ArrayOfIpPoolManagerIpAllocation)(nil)).Elem() +} + +type ArrayOfIscsiDependencyEntity struct { + IscsiDependencyEntity []IscsiDependencyEntity `xml:"IscsiDependencyEntity,omitempty"` +} + +func init() { + t["ArrayOfIscsiDependencyEntity"] = reflect.TypeOf((*ArrayOfIscsiDependencyEntity)(nil)).Elem() +} + +type ArrayOfIscsiPortInfo struct { + IscsiPortInfo []IscsiPortInfo `xml:"IscsiPortInfo,omitempty"` +} + +func init() { + t["ArrayOfIscsiPortInfo"] = reflect.TypeOf((*ArrayOfIscsiPortInfo)(nil)).Elem() +} + +type ArrayOfKernelModuleInfo struct { + KernelModuleInfo []KernelModuleInfo `xml:"KernelModuleInfo,omitempty"` +} + +func init() { + t["ArrayOfKernelModuleInfo"] = reflect.TypeOf((*ArrayOfKernelModuleInfo)(nil)).Elem() +} + +type ArrayOfKeyAnyValue struct { + KeyAnyValue []KeyAnyValue `xml:"KeyAnyValue,omitempty"` +} + +func init() { + t["ArrayOfKeyAnyValue"] = reflect.TypeOf((*ArrayOfKeyAnyValue)(nil)).Elem() +} + +type ArrayOfKeyValue struct { + KeyValue []KeyValue `xml:"KeyValue,omitempty"` +} + +func init() { + t["ArrayOfKeyValue"] = reflect.TypeOf((*ArrayOfKeyValue)(nil)).Elem() +} + +type ArrayOfLicenseAssignmentManagerLicenseAssignment struct { + LicenseAssignmentManagerLicenseAssignment []LicenseAssignmentManagerLicenseAssignment `xml:"LicenseAssignmentManagerLicenseAssignment,omitempty"` +} + +func init() { + t["ArrayOfLicenseAssignmentManagerLicenseAssignment"] = reflect.TypeOf((*ArrayOfLicenseAssignmentManagerLicenseAssignment)(nil)).Elem() +} + +type ArrayOfLicenseAvailabilityInfo struct { + LicenseAvailabilityInfo []LicenseAvailabilityInfo `xml:"LicenseAvailabilityInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseAvailabilityInfo"] = reflect.TypeOf((*ArrayOfLicenseAvailabilityInfo)(nil)).Elem() +} + +type ArrayOfLicenseFeatureInfo struct { + LicenseFeatureInfo []LicenseFeatureInfo `xml:"LicenseFeatureInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseFeatureInfo"] = reflect.TypeOf((*ArrayOfLicenseFeatureInfo)(nil)).Elem() +} + +type ArrayOfLicenseManagerLicenseInfo struct { + LicenseManagerLicenseInfo []LicenseManagerLicenseInfo `xml:"LicenseManagerLicenseInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseManagerLicenseInfo"] = reflect.TypeOf((*ArrayOfLicenseManagerLicenseInfo)(nil)).Elem() +} + +type ArrayOfLicenseReservationInfo struct { + LicenseReservationInfo []LicenseReservationInfo `xml:"LicenseReservationInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseReservationInfo"] = reflect.TypeOf((*ArrayOfLicenseReservationInfo)(nil)).Elem() +} + +type ArrayOfLocalizableMessage struct { + LocalizableMessage []LocalizableMessage `xml:"LocalizableMessage,omitempty"` +} + +func init() { + t["ArrayOfLocalizableMessage"] = reflect.TypeOf((*ArrayOfLocalizableMessage)(nil)).Elem() +} + +type ArrayOfLocalizationManagerMessageCatalog struct { + LocalizationManagerMessageCatalog []LocalizationManagerMessageCatalog `xml:"LocalizationManagerMessageCatalog,omitempty"` +} + +func init() { + t["ArrayOfLocalizationManagerMessageCatalog"] = reflect.TypeOf((*ArrayOfLocalizationManagerMessageCatalog)(nil)).Elem() +} + +type ArrayOfLong struct { + Long []int64 `xml:"long,omitempty"` +} + +func init() { + t["ArrayOfLong"] = reflect.TypeOf((*ArrayOfLong)(nil)).Elem() +} + +type ArrayOfManagedObjectReference struct { + ManagedObjectReference []ManagedObjectReference `xml:"ManagedObjectReference,omitempty"` +} + +func init() { + t["ArrayOfManagedObjectReference"] = reflect.TypeOf((*ArrayOfManagedObjectReference)(nil)).Elem() +} + +type ArrayOfMethodActionArgument struct { + MethodActionArgument []MethodActionArgument `xml:"MethodActionArgument,omitempty"` +} + +func init() { + t["ArrayOfMethodActionArgument"] = reflect.TypeOf((*ArrayOfMethodActionArgument)(nil)).Elem() +} + +type ArrayOfMethodFault struct { + MethodFault []BaseMethodFault `xml:"MethodFault,omitempty,typeattr"` +} + +func init() { + t["ArrayOfMethodFault"] = reflect.TypeOf((*ArrayOfMethodFault)(nil)).Elem() +} + +type ArrayOfMissingObject struct { + MissingObject []MissingObject `xml:"MissingObject,omitempty"` +} + +func init() { + t["ArrayOfMissingObject"] = reflect.TypeOf((*ArrayOfMissingObject)(nil)).Elem() +} + +type ArrayOfMissingProperty struct { + MissingProperty []MissingProperty `xml:"MissingProperty,omitempty"` +} + +func init() { + t["ArrayOfMissingProperty"] = reflect.TypeOf((*ArrayOfMissingProperty)(nil)).Elem() +} + +type ArrayOfMultipleCertificatesVerifyFaultThumbprintData struct { + MultipleCertificatesVerifyFaultThumbprintData []MultipleCertificatesVerifyFaultThumbprintData `xml:"MultipleCertificatesVerifyFaultThumbprintData,omitempty"` +} + +func init() { + t["ArrayOfMultipleCertificatesVerifyFaultThumbprintData"] = reflect.TypeOf((*ArrayOfMultipleCertificatesVerifyFaultThumbprintData)(nil)).Elem() +} + +type ArrayOfNasStorageProfile struct { + NasStorageProfile []NasStorageProfile `xml:"NasStorageProfile,omitempty"` +} + +func init() { + t["ArrayOfNasStorageProfile"] = reflect.TypeOf((*ArrayOfNasStorageProfile)(nil)).Elem() +} + +type ArrayOfNetIpConfigInfoIpAddress struct { + NetIpConfigInfoIpAddress []NetIpConfigInfoIpAddress `xml:"NetIpConfigInfoIpAddress,omitempty"` +} + +func init() { + t["ArrayOfNetIpConfigInfoIpAddress"] = reflect.TypeOf((*ArrayOfNetIpConfigInfoIpAddress)(nil)).Elem() +} + +type ArrayOfNetIpConfigSpecIpAddressSpec struct { + NetIpConfigSpecIpAddressSpec []NetIpConfigSpecIpAddressSpec `xml:"NetIpConfigSpecIpAddressSpec,omitempty"` +} + +func init() { + t["ArrayOfNetIpConfigSpecIpAddressSpec"] = reflect.TypeOf((*ArrayOfNetIpConfigSpecIpAddressSpec)(nil)).Elem() +} + +type ArrayOfNetIpRouteConfigInfoIpRoute struct { + NetIpRouteConfigInfoIpRoute []NetIpRouteConfigInfoIpRoute `xml:"NetIpRouteConfigInfoIpRoute,omitempty"` +} + +func init() { + t["ArrayOfNetIpRouteConfigInfoIpRoute"] = reflect.TypeOf((*ArrayOfNetIpRouteConfigInfoIpRoute)(nil)).Elem() +} + +type ArrayOfNetIpRouteConfigSpecIpRouteSpec struct { + NetIpRouteConfigSpecIpRouteSpec []NetIpRouteConfigSpecIpRouteSpec `xml:"NetIpRouteConfigSpecIpRouteSpec,omitempty"` +} + +func init() { + t["ArrayOfNetIpRouteConfigSpecIpRouteSpec"] = reflect.TypeOf((*ArrayOfNetIpRouteConfigSpecIpRouteSpec)(nil)).Elem() +} + +type ArrayOfNetIpStackInfoDefaultRouter struct { + NetIpStackInfoDefaultRouter []NetIpStackInfoDefaultRouter `xml:"NetIpStackInfoDefaultRouter,omitempty"` +} + +func init() { + t["ArrayOfNetIpStackInfoDefaultRouter"] = reflect.TypeOf((*ArrayOfNetIpStackInfoDefaultRouter)(nil)).Elem() +} + +type ArrayOfNetIpStackInfoNetToMedia struct { + NetIpStackInfoNetToMedia []NetIpStackInfoNetToMedia `xml:"NetIpStackInfoNetToMedia,omitempty"` +} + +func init() { + t["ArrayOfNetIpStackInfoNetToMedia"] = reflect.TypeOf((*ArrayOfNetIpStackInfoNetToMedia)(nil)).Elem() +} + +type ArrayOfNetStackInstanceProfile struct { + NetStackInstanceProfile []NetStackInstanceProfile `xml:"NetStackInstanceProfile,omitempty"` +} + +func init() { + t["ArrayOfNetStackInstanceProfile"] = reflect.TypeOf((*ArrayOfNetStackInstanceProfile)(nil)).Elem() +} + +type ArrayOfNumericRange struct { + NumericRange []NumericRange `xml:"NumericRange,omitempty"` +} + +func init() { + t["ArrayOfNumericRange"] = reflect.TypeOf((*ArrayOfNumericRange)(nil)).Elem() +} + +type ArrayOfObjectContent struct { + ObjectContent []ObjectContent `xml:"ObjectContent,omitempty"` +} + +func init() { + t["ArrayOfObjectContent"] = reflect.TypeOf((*ArrayOfObjectContent)(nil)).Elem() +} + +type ArrayOfObjectSpec struct { + ObjectSpec []ObjectSpec `xml:"ObjectSpec,omitempty"` +} + +func init() { + t["ArrayOfObjectSpec"] = reflect.TypeOf((*ArrayOfObjectSpec)(nil)).Elem() +} + +type ArrayOfObjectUpdate struct { + ObjectUpdate []ObjectUpdate `xml:"ObjectUpdate,omitempty"` +} + +func init() { + t["ArrayOfObjectUpdate"] = reflect.TypeOf((*ArrayOfObjectUpdate)(nil)).Elem() +} + +type ArrayOfOpaqueNetworkTargetInfo struct { + OpaqueNetworkTargetInfo []OpaqueNetworkTargetInfo `xml:"OpaqueNetworkTargetInfo,omitempty"` +} + +func init() { + t["ArrayOfOpaqueNetworkTargetInfo"] = reflect.TypeOf((*ArrayOfOpaqueNetworkTargetInfo)(nil)).Elem() +} + +type ArrayOfOptionDef struct { + OptionDef []OptionDef `xml:"OptionDef,omitempty"` +} + +func init() { + t["ArrayOfOptionDef"] = reflect.TypeOf((*ArrayOfOptionDef)(nil)).Elem() +} + +type ArrayOfOptionProfile struct { + OptionProfile []OptionProfile `xml:"OptionProfile,omitempty"` +} + +func init() { + t["ArrayOfOptionProfile"] = reflect.TypeOf((*ArrayOfOptionProfile)(nil)).Elem() +} + +type ArrayOfOptionValue struct { + OptionValue []BaseOptionValue `xml:"OptionValue,omitempty,typeattr"` +} + +func init() { + t["ArrayOfOptionValue"] = reflect.TypeOf((*ArrayOfOptionValue)(nil)).Elem() +} + +type ArrayOfOvfConsumerOstNode struct { + OvfConsumerOstNode []OvfConsumerOstNode `xml:"OvfConsumerOstNode,omitempty"` +} + +func init() { + t["ArrayOfOvfConsumerOstNode"] = reflect.TypeOf((*ArrayOfOvfConsumerOstNode)(nil)).Elem() +} + +type ArrayOfOvfConsumerOvfSection struct { + OvfConsumerOvfSection []OvfConsumerOvfSection `xml:"OvfConsumerOvfSection,omitempty"` +} + +func init() { + t["ArrayOfOvfConsumerOvfSection"] = reflect.TypeOf((*ArrayOfOvfConsumerOvfSection)(nil)).Elem() +} + +type ArrayOfOvfDeploymentOption struct { + OvfDeploymentOption []OvfDeploymentOption `xml:"OvfDeploymentOption,omitempty"` +} + +func init() { + t["ArrayOfOvfDeploymentOption"] = reflect.TypeOf((*ArrayOfOvfDeploymentOption)(nil)).Elem() +} + +type ArrayOfOvfFile struct { + OvfFile []OvfFile `xml:"OvfFile,omitempty"` +} + +func init() { + t["ArrayOfOvfFile"] = reflect.TypeOf((*ArrayOfOvfFile)(nil)).Elem() +} + +type ArrayOfOvfFileItem struct { + OvfFileItem []OvfFileItem `xml:"OvfFileItem,omitempty"` +} + +func init() { + t["ArrayOfOvfFileItem"] = reflect.TypeOf((*ArrayOfOvfFileItem)(nil)).Elem() +} + +type ArrayOfOvfNetworkInfo struct { + OvfNetworkInfo []OvfNetworkInfo `xml:"OvfNetworkInfo,omitempty"` +} + +func init() { + t["ArrayOfOvfNetworkInfo"] = reflect.TypeOf((*ArrayOfOvfNetworkInfo)(nil)).Elem() +} + +type ArrayOfOvfNetworkMapping struct { + OvfNetworkMapping []OvfNetworkMapping `xml:"OvfNetworkMapping,omitempty"` +} + +func init() { + t["ArrayOfOvfNetworkMapping"] = reflect.TypeOf((*ArrayOfOvfNetworkMapping)(nil)).Elem() +} + +type ArrayOfOvfOptionInfo struct { + OvfOptionInfo []OvfOptionInfo `xml:"OvfOptionInfo,omitempty"` +} + +func init() { + t["ArrayOfOvfOptionInfo"] = reflect.TypeOf((*ArrayOfOvfOptionInfo)(nil)).Elem() +} + +type ArrayOfOvfResourceMap struct { + OvfResourceMap []OvfResourceMap `xml:"OvfResourceMap,omitempty"` +} + +func init() { + t["ArrayOfOvfResourceMap"] = reflect.TypeOf((*ArrayOfOvfResourceMap)(nil)).Elem() +} + +type ArrayOfPerfCounterInfo struct { + PerfCounterInfo []PerfCounterInfo `xml:"PerfCounterInfo,omitempty"` +} + +func init() { + t["ArrayOfPerfCounterInfo"] = reflect.TypeOf((*ArrayOfPerfCounterInfo)(nil)).Elem() +} + +type ArrayOfPerfEntityMetricBase struct { + PerfEntityMetricBase []BasePerfEntityMetricBase `xml:"PerfEntityMetricBase,omitempty,typeattr"` +} + +func init() { + t["ArrayOfPerfEntityMetricBase"] = reflect.TypeOf((*ArrayOfPerfEntityMetricBase)(nil)).Elem() +} + +type ArrayOfPerfInterval struct { + PerfInterval []PerfInterval `xml:"PerfInterval,omitempty"` +} + +func init() { + t["ArrayOfPerfInterval"] = reflect.TypeOf((*ArrayOfPerfInterval)(nil)).Elem() +} + +type ArrayOfPerfMetricId struct { + PerfMetricId []PerfMetricId `xml:"PerfMetricId,omitempty"` +} + +func init() { + t["ArrayOfPerfMetricId"] = reflect.TypeOf((*ArrayOfPerfMetricId)(nil)).Elem() +} + +type ArrayOfPerfMetricSeries struct { + PerfMetricSeries []BasePerfMetricSeries `xml:"PerfMetricSeries,omitempty,typeattr"` +} + +func init() { + t["ArrayOfPerfMetricSeries"] = reflect.TypeOf((*ArrayOfPerfMetricSeries)(nil)).Elem() +} + +type ArrayOfPerfMetricSeriesCSV struct { + PerfMetricSeriesCSV []PerfMetricSeriesCSV `xml:"PerfMetricSeriesCSV,omitempty"` +} + +func init() { + t["ArrayOfPerfMetricSeriesCSV"] = reflect.TypeOf((*ArrayOfPerfMetricSeriesCSV)(nil)).Elem() +} + +type ArrayOfPerfQuerySpec struct { + PerfQuerySpec []PerfQuerySpec `xml:"PerfQuerySpec,omitempty"` +} + +func init() { + t["ArrayOfPerfQuerySpec"] = reflect.TypeOf((*ArrayOfPerfQuerySpec)(nil)).Elem() +} + +type ArrayOfPerfSampleInfo struct { + PerfSampleInfo []PerfSampleInfo `xml:"PerfSampleInfo,omitempty"` +} + +func init() { + t["ArrayOfPerfSampleInfo"] = reflect.TypeOf((*ArrayOfPerfSampleInfo)(nil)).Elem() +} + +type ArrayOfPerformanceManagerCounterLevelMapping struct { + PerformanceManagerCounterLevelMapping []PerformanceManagerCounterLevelMapping `xml:"PerformanceManagerCounterLevelMapping,omitempty"` +} + +func init() { + t["ArrayOfPerformanceManagerCounterLevelMapping"] = reflect.TypeOf((*ArrayOfPerformanceManagerCounterLevelMapping)(nil)).Elem() +} + +type ArrayOfPermission struct { + Permission []Permission `xml:"Permission,omitempty"` +} + +func init() { + t["ArrayOfPermission"] = reflect.TypeOf((*ArrayOfPermission)(nil)).Elem() +} + +type ArrayOfPermissionProfile struct { + PermissionProfile []PermissionProfile `xml:"PermissionProfile,omitempty"` +} + +func init() { + t["ArrayOfPermissionProfile"] = reflect.TypeOf((*ArrayOfPermissionProfile)(nil)).Elem() +} + +type ArrayOfPhysicalNic struct { + PhysicalNic []PhysicalNic `xml:"PhysicalNic,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNic"] = reflect.TypeOf((*ArrayOfPhysicalNic)(nil)).Elem() +} + +type ArrayOfPhysicalNicConfig struct { + PhysicalNicConfig []PhysicalNicConfig `xml:"PhysicalNicConfig,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicConfig"] = reflect.TypeOf((*ArrayOfPhysicalNicConfig)(nil)).Elem() +} + +type ArrayOfPhysicalNicHintInfo struct { + PhysicalNicHintInfo []PhysicalNicHintInfo `xml:"PhysicalNicHintInfo,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicHintInfo"] = reflect.TypeOf((*ArrayOfPhysicalNicHintInfo)(nil)).Elem() +} + +type ArrayOfPhysicalNicIpHint struct { + PhysicalNicIpHint []PhysicalNicIpHint `xml:"PhysicalNicIpHint,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicIpHint"] = reflect.TypeOf((*ArrayOfPhysicalNicIpHint)(nil)).Elem() +} + +type ArrayOfPhysicalNicLinkInfo struct { + PhysicalNicLinkInfo []PhysicalNicLinkInfo `xml:"PhysicalNicLinkInfo,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicLinkInfo"] = reflect.TypeOf((*ArrayOfPhysicalNicLinkInfo)(nil)).Elem() +} + +type ArrayOfPhysicalNicNameHint struct { + PhysicalNicNameHint []PhysicalNicNameHint `xml:"PhysicalNicNameHint,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicNameHint"] = reflect.TypeOf((*ArrayOfPhysicalNicNameHint)(nil)).Elem() +} + +type ArrayOfPhysicalNicProfile struct { + PhysicalNicProfile []PhysicalNicProfile `xml:"PhysicalNicProfile,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicProfile"] = reflect.TypeOf((*ArrayOfPhysicalNicProfile)(nil)).Elem() +} + +type ArrayOfPlacementAffinityRule struct { + PlacementAffinityRule []PlacementAffinityRule `xml:"PlacementAffinityRule,omitempty"` +} + +func init() { + t["ArrayOfPlacementAffinityRule"] = reflect.TypeOf((*ArrayOfPlacementAffinityRule)(nil)).Elem() +} + +type ArrayOfPlacementSpec struct { + PlacementSpec []PlacementSpec `xml:"PlacementSpec,omitempty"` +} + +func init() { + t["ArrayOfPlacementSpec"] = reflect.TypeOf((*ArrayOfPlacementSpec)(nil)).Elem() +} + +type ArrayOfPnicUplinkProfile struct { + PnicUplinkProfile []PnicUplinkProfile `xml:"PnicUplinkProfile,omitempty"` +} + +func init() { + t["ArrayOfPnicUplinkProfile"] = reflect.TypeOf((*ArrayOfPnicUplinkProfile)(nil)).Elem() +} + +type ArrayOfPodDiskLocator struct { + PodDiskLocator []PodDiskLocator `xml:"PodDiskLocator,omitempty"` +} + +func init() { + t["ArrayOfPodDiskLocator"] = reflect.TypeOf((*ArrayOfPodDiskLocator)(nil)).Elem() +} + +type ArrayOfPolicyOption struct { + PolicyOption []BasePolicyOption `xml:"PolicyOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfPolicyOption"] = reflect.TypeOf((*ArrayOfPolicyOption)(nil)).Elem() +} + +type ArrayOfPrivilegeAvailability struct { + PrivilegeAvailability []PrivilegeAvailability `xml:"PrivilegeAvailability,omitempty"` +} + +func init() { + t["ArrayOfPrivilegeAvailability"] = reflect.TypeOf((*ArrayOfPrivilegeAvailability)(nil)).Elem() +} + +type ArrayOfProductComponentInfo struct { + ProductComponentInfo []ProductComponentInfo `xml:"ProductComponentInfo,omitempty"` +} + +func init() { + t["ArrayOfProductComponentInfo"] = reflect.TypeOf((*ArrayOfProductComponentInfo)(nil)).Elem() +} + +type ArrayOfProfileApplyProfileProperty struct { + ProfileApplyProfileProperty []ProfileApplyProfileProperty `xml:"ProfileApplyProfileProperty,omitempty"` +} + +func init() { + t["ArrayOfProfileApplyProfileProperty"] = reflect.TypeOf((*ArrayOfProfileApplyProfileProperty)(nil)).Elem() +} + +type ArrayOfProfileDeferredPolicyOptionParameter struct { + ProfileDeferredPolicyOptionParameter []ProfileDeferredPolicyOptionParameter `xml:"ProfileDeferredPolicyOptionParameter,omitempty"` +} + +func init() { + t["ArrayOfProfileDeferredPolicyOptionParameter"] = reflect.TypeOf((*ArrayOfProfileDeferredPolicyOptionParameter)(nil)).Elem() +} + +type ArrayOfProfileDescriptionSection struct { + ProfileDescriptionSection []ProfileDescriptionSection `xml:"ProfileDescriptionSection,omitempty"` +} + +func init() { + t["ArrayOfProfileDescriptionSection"] = reflect.TypeOf((*ArrayOfProfileDescriptionSection)(nil)).Elem() +} + +type ArrayOfProfileExecuteError struct { + ProfileExecuteError []ProfileExecuteError `xml:"ProfileExecuteError,omitempty"` +} + +func init() { + t["ArrayOfProfileExecuteError"] = reflect.TypeOf((*ArrayOfProfileExecuteError)(nil)).Elem() +} + +type ArrayOfProfileExpression struct { + ProfileExpression []BaseProfileExpression `xml:"ProfileExpression,omitempty,typeattr"` +} + +func init() { + t["ArrayOfProfileExpression"] = reflect.TypeOf((*ArrayOfProfileExpression)(nil)).Elem() +} + +type ArrayOfProfileExpressionMetadata struct { + ProfileExpressionMetadata []ProfileExpressionMetadata `xml:"ProfileExpressionMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfileExpressionMetadata"] = reflect.TypeOf((*ArrayOfProfileExpressionMetadata)(nil)).Elem() +} + +type ArrayOfProfileMetadata struct { + ProfileMetadata []ProfileMetadata `xml:"ProfileMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfileMetadata"] = reflect.TypeOf((*ArrayOfProfileMetadata)(nil)).Elem() +} + +type ArrayOfProfileMetadataProfileSortSpec struct { + ProfileMetadataProfileSortSpec []ProfileMetadataProfileSortSpec `xml:"ProfileMetadataProfileSortSpec,omitempty"` +} + +func init() { + t["ArrayOfProfileMetadataProfileSortSpec"] = reflect.TypeOf((*ArrayOfProfileMetadataProfileSortSpec)(nil)).Elem() +} + +type ArrayOfProfileParameterMetadata struct { + ProfileParameterMetadata []ProfileParameterMetadata `xml:"ProfileParameterMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfileParameterMetadata"] = reflect.TypeOf((*ArrayOfProfileParameterMetadata)(nil)).Elem() +} + +type ArrayOfProfilePolicy struct { + ProfilePolicy []ProfilePolicy `xml:"ProfilePolicy,omitempty"` +} + +func init() { + t["ArrayOfProfilePolicy"] = reflect.TypeOf((*ArrayOfProfilePolicy)(nil)).Elem() +} + +type ArrayOfProfilePolicyMetadata struct { + ProfilePolicyMetadata []ProfilePolicyMetadata `xml:"ProfilePolicyMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfilePolicyMetadata"] = reflect.TypeOf((*ArrayOfProfilePolicyMetadata)(nil)).Elem() +} + +type ArrayOfProfilePolicyOptionMetadata struct { + ProfilePolicyOptionMetadata []BaseProfilePolicyOptionMetadata `xml:"ProfilePolicyOptionMetadata,omitempty,typeattr"` +} + +func init() { + t["ArrayOfProfilePolicyOptionMetadata"] = reflect.TypeOf((*ArrayOfProfilePolicyOptionMetadata)(nil)).Elem() +} + +type ArrayOfProfileProfileStructureProperty struct { + ProfileProfileStructureProperty []ProfileProfileStructureProperty `xml:"ProfileProfileStructureProperty,omitempty"` +} + +func init() { + t["ArrayOfProfileProfileStructureProperty"] = reflect.TypeOf((*ArrayOfProfileProfileStructureProperty)(nil)).Elem() +} + +type ArrayOfProfilePropertyPath struct { + ProfilePropertyPath []ProfilePropertyPath `xml:"ProfilePropertyPath,omitempty"` +} + +func init() { + t["ArrayOfProfilePropertyPath"] = reflect.TypeOf((*ArrayOfProfilePropertyPath)(nil)).Elem() +} + +type ArrayOfProfileUpdateFailedUpdateFailure struct { + ProfileUpdateFailedUpdateFailure []ProfileUpdateFailedUpdateFailure `xml:"ProfileUpdateFailedUpdateFailure,omitempty"` +} + +func init() { + t["ArrayOfProfileUpdateFailedUpdateFailure"] = reflect.TypeOf((*ArrayOfProfileUpdateFailedUpdateFailure)(nil)).Elem() +} + +type ArrayOfPropertyChange struct { + PropertyChange []PropertyChange `xml:"PropertyChange,omitempty"` +} + +func init() { + t["ArrayOfPropertyChange"] = reflect.TypeOf((*ArrayOfPropertyChange)(nil)).Elem() +} + +type ArrayOfPropertyFilterSpec struct { + PropertyFilterSpec []PropertyFilterSpec `xml:"PropertyFilterSpec,omitempty"` +} + +func init() { + t["ArrayOfPropertyFilterSpec"] = reflect.TypeOf((*ArrayOfPropertyFilterSpec)(nil)).Elem() +} + +type ArrayOfPropertyFilterUpdate struct { + PropertyFilterUpdate []PropertyFilterUpdate `xml:"PropertyFilterUpdate,omitempty"` +} + +func init() { + t["ArrayOfPropertyFilterUpdate"] = reflect.TypeOf((*ArrayOfPropertyFilterUpdate)(nil)).Elem() +} + +type ArrayOfPropertySpec struct { + PropertySpec []PropertySpec `xml:"PropertySpec,omitempty"` +} + +func init() { + t["ArrayOfPropertySpec"] = reflect.TypeOf((*ArrayOfPropertySpec)(nil)).Elem() +} + +type ArrayOfReplicationInfoDiskSettings struct { + ReplicationInfoDiskSettings []ReplicationInfoDiskSettings `xml:"ReplicationInfoDiskSettings,omitempty"` +} + +func init() { + t["ArrayOfReplicationInfoDiskSettings"] = reflect.TypeOf((*ArrayOfReplicationInfoDiskSettings)(nil)).Elem() +} + +type ArrayOfResourceConfigSpec struct { + ResourceConfigSpec []ResourceConfigSpec `xml:"ResourceConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfResourceConfigSpec"] = reflect.TypeOf((*ArrayOfResourceConfigSpec)(nil)).Elem() +} + +type ArrayOfScheduledTaskDetail struct { + ScheduledTaskDetail []ScheduledTaskDetail `xml:"ScheduledTaskDetail,omitempty"` +} + +func init() { + t["ArrayOfScheduledTaskDetail"] = reflect.TypeOf((*ArrayOfScheduledTaskDetail)(nil)).Elem() +} + +type ArrayOfScsiLun struct { + ScsiLun []BaseScsiLun `xml:"ScsiLun,omitempty,typeattr"` +} + +func init() { + t["ArrayOfScsiLun"] = reflect.TypeOf((*ArrayOfScsiLun)(nil)).Elem() +} + +type ArrayOfScsiLunDescriptor struct { + ScsiLunDescriptor []ScsiLunDescriptor `xml:"ScsiLunDescriptor,omitempty"` +} + +func init() { + t["ArrayOfScsiLunDescriptor"] = reflect.TypeOf((*ArrayOfScsiLunDescriptor)(nil)).Elem() +} + +type ArrayOfScsiLunDurableName struct { + ScsiLunDurableName []ScsiLunDurableName `xml:"ScsiLunDurableName,omitempty"` +} + +func init() { + t["ArrayOfScsiLunDurableName"] = reflect.TypeOf((*ArrayOfScsiLunDurableName)(nil)).Elem() +} + +type ArrayOfSelectionSet struct { + SelectionSet []BaseSelectionSet `xml:"SelectionSet,omitempty,typeattr"` +} + +func init() { + t["ArrayOfSelectionSet"] = reflect.TypeOf((*ArrayOfSelectionSet)(nil)).Elem() +} + +type ArrayOfSelectionSpec struct { + SelectionSpec []BaseSelectionSpec `xml:"SelectionSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfSelectionSpec"] = reflect.TypeOf((*ArrayOfSelectionSpec)(nil)).Elem() +} + +type ArrayOfServiceConsolePortGroupProfile struct { + ServiceConsolePortGroupProfile []ServiceConsolePortGroupProfile `xml:"ServiceConsolePortGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfServiceConsolePortGroupProfile"] = reflect.TypeOf((*ArrayOfServiceConsolePortGroupProfile)(nil)).Elem() +} + +type ArrayOfServiceLocator struct { + ServiceLocator []ServiceLocator `xml:"ServiceLocator,omitempty"` +} + +func init() { + t["ArrayOfServiceLocator"] = reflect.TypeOf((*ArrayOfServiceLocator)(nil)).Elem() +} + +type ArrayOfServiceManagerServiceInfo struct { + ServiceManagerServiceInfo []ServiceManagerServiceInfo `xml:"ServiceManagerServiceInfo,omitempty"` +} + +func init() { + t["ArrayOfServiceManagerServiceInfo"] = reflect.TypeOf((*ArrayOfServiceManagerServiceInfo)(nil)).Elem() +} + +type ArrayOfServiceProfile struct { + ServiceProfile []ServiceProfile `xml:"ServiceProfile,omitempty"` +} + +func init() { + t["ArrayOfServiceProfile"] = reflect.TypeOf((*ArrayOfServiceProfile)(nil)).Elem() +} + +type ArrayOfShort struct { + Short []int16 `xml:"short,omitempty"` +} + +func init() { + t["ArrayOfShort"] = reflect.TypeOf((*ArrayOfShort)(nil)).Elem() +} + +type ArrayOfStaticRouteProfile struct { + StaticRouteProfile []StaticRouteProfile `xml:"StaticRouteProfile,omitempty"` +} + +func init() { + t["ArrayOfStaticRouteProfile"] = reflect.TypeOf((*ArrayOfStaticRouteProfile)(nil)).Elem() +} + +type ArrayOfStorageDrsOptionSpec struct { + StorageDrsOptionSpec []StorageDrsOptionSpec `xml:"StorageDrsOptionSpec,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsOptionSpec"] = reflect.TypeOf((*ArrayOfStorageDrsOptionSpec)(nil)).Elem() +} + +type ArrayOfStorageDrsPlacementRankVmSpec struct { + StorageDrsPlacementRankVmSpec []StorageDrsPlacementRankVmSpec `xml:"StorageDrsPlacementRankVmSpec,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsPlacementRankVmSpec"] = reflect.TypeOf((*ArrayOfStorageDrsPlacementRankVmSpec)(nil)).Elem() +} + +type ArrayOfStorageDrsVmConfigInfo struct { + StorageDrsVmConfigInfo []StorageDrsVmConfigInfo `xml:"StorageDrsVmConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsVmConfigInfo"] = reflect.TypeOf((*ArrayOfStorageDrsVmConfigInfo)(nil)).Elem() +} + +type ArrayOfStorageDrsVmConfigSpec struct { + StorageDrsVmConfigSpec []StorageDrsVmConfigSpec `xml:"StorageDrsVmConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsVmConfigSpec"] = reflect.TypeOf((*ArrayOfStorageDrsVmConfigSpec)(nil)).Elem() +} + +type ArrayOfStoragePerformanceSummary struct { + StoragePerformanceSummary []StoragePerformanceSummary `xml:"StoragePerformanceSummary,omitempty"` +} + +func init() { + t["ArrayOfStoragePerformanceSummary"] = reflect.TypeOf((*ArrayOfStoragePerformanceSummary)(nil)).Elem() +} + +type ArrayOfStorageRequirement struct { + StorageRequirement []StorageRequirement `xml:"StorageRequirement,omitempty"` +} + +func init() { + t["ArrayOfStorageRequirement"] = reflect.TypeOf((*ArrayOfStorageRequirement)(nil)).Elem() +} + +type ArrayOfString struct { + String []string `xml:"string,omitempty"` +} + +func init() { + t["ArrayOfString"] = reflect.TypeOf((*ArrayOfString)(nil)).Elem() +} + +type ArrayOfTag struct { + Tag []Tag `xml:"Tag,omitempty"` +} + +func init() { + t["ArrayOfTag"] = reflect.TypeOf((*ArrayOfTag)(nil)).Elem() +} + +type ArrayOfTaskInfo struct { + TaskInfo []TaskInfo `xml:"TaskInfo,omitempty"` +} + +func init() { + t["ArrayOfTaskInfo"] = reflect.TypeOf((*ArrayOfTaskInfo)(nil)).Elem() +} + +type ArrayOfTaskInfoState struct { + TaskInfoState []TaskInfoState `xml:"TaskInfoState,omitempty"` +} + +func init() { + t["ArrayOfTaskInfoState"] = reflect.TypeOf((*ArrayOfTaskInfoState)(nil)).Elem() +} + +type ArrayOfTypeDescription struct { + TypeDescription []BaseTypeDescription `xml:"TypeDescription,omitempty,typeattr"` +} + +func init() { + t["ArrayOfTypeDescription"] = reflect.TypeOf((*ArrayOfTypeDescription)(nil)).Elem() +} + +type ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo struct { + UpdateVirtualMachineFilesResultFailedVmFileInfo []UpdateVirtualMachineFilesResultFailedVmFileInfo `xml:"UpdateVirtualMachineFilesResultFailedVmFileInfo,omitempty"` +} + +func init() { + t["ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo"] = reflect.TypeOf((*ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem() +} + +type ArrayOfUserGroupProfile struct { + UserGroupProfile []UserGroupProfile `xml:"UserGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfUserGroupProfile"] = reflect.TypeOf((*ArrayOfUserGroupProfile)(nil)).Elem() +} + +type ArrayOfUserProfile struct { + UserProfile []UserProfile `xml:"UserProfile,omitempty"` +} + +func init() { + t["ArrayOfUserProfile"] = reflect.TypeOf((*ArrayOfUserProfile)(nil)).Elem() +} + +type ArrayOfUserSearchResult struct { + UserSearchResult []BaseUserSearchResult `xml:"UserSearchResult,omitempty,typeattr"` +} + +func init() { + t["ArrayOfUserSearchResult"] = reflect.TypeOf((*ArrayOfUserSearchResult)(nil)).Elem() +} + +type ArrayOfUserSession struct { + UserSession []UserSession `xml:"UserSession,omitempty"` +} + +func init() { + t["ArrayOfUserSession"] = reflect.TypeOf((*ArrayOfUserSession)(nil)).Elem() +} + +type ArrayOfVASAStorageArray struct { + VASAStorageArray []VASAStorageArray `xml:"VASAStorageArray,omitempty"` +} + +func init() { + t["ArrayOfVASAStorageArray"] = reflect.TypeOf((*ArrayOfVASAStorageArray)(nil)).Elem() +} + +type ArrayOfVAppCloneSpecNetworkMappingPair struct { + VAppCloneSpecNetworkMappingPair []VAppCloneSpecNetworkMappingPair `xml:"VAppCloneSpecNetworkMappingPair,omitempty"` +} + +func init() { + t["ArrayOfVAppCloneSpecNetworkMappingPair"] = reflect.TypeOf((*ArrayOfVAppCloneSpecNetworkMappingPair)(nil)).Elem() +} + +type ArrayOfVAppCloneSpecResourceMap struct { + VAppCloneSpecResourceMap []VAppCloneSpecResourceMap `xml:"VAppCloneSpecResourceMap,omitempty"` +} + +func init() { + t["ArrayOfVAppCloneSpecResourceMap"] = reflect.TypeOf((*ArrayOfVAppCloneSpecResourceMap)(nil)).Elem() +} + +type ArrayOfVAppEntityConfigInfo struct { + VAppEntityConfigInfo []VAppEntityConfigInfo `xml:"VAppEntityConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppEntityConfigInfo"] = reflect.TypeOf((*ArrayOfVAppEntityConfigInfo)(nil)).Elem() +} + +type ArrayOfVAppOvfSectionInfo struct { + VAppOvfSectionInfo []VAppOvfSectionInfo `xml:"VAppOvfSectionInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppOvfSectionInfo"] = reflect.TypeOf((*ArrayOfVAppOvfSectionInfo)(nil)).Elem() +} + +type ArrayOfVAppOvfSectionSpec struct { + VAppOvfSectionSpec []VAppOvfSectionSpec `xml:"VAppOvfSectionSpec,omitempty"` +} + +func init() { + t["ArrayOfVAppOvfSectionSpec"] = reflect.TypeOf((*ArrayOfVAppOvfSectionSpec)(nil)).Elem() +} + +type ArrayOfVAppProductInfo struct { + VAppProductInfo []VAppProductInfo `xml:"VAppProductInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppProductInfo"] = reflect.TypeOf((*ArrayOfVAppProductInfo)(nil)).Elem() +} + +type ArrayOfVAppProductSpec struct { + VAppProductSpec []VAppProductSpec `xml:"VAppProductSpec,omitempty"` +} + +func init() { + t["ArrayOfVAppProductSpec"] = reflect.TypeOf((*ArrayOfVAppProductSpec)(nil)).Elem() +} + +type ArrayOfVAppPropertyInfo struct { + VAppPropertyInfo []VAppPropertyInfo `xml:"VAppPropertyInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppPropertyInfo"] = reflect.TypeOf((*ArrayOfVAppPropertyInfo)(nil)).Elem() +} + +type ArrayOfVAppPropertySpec struct { + VAppPropertySpec []VAppPropertySpec `xml:"VAppPropertySpec,omitempty"` +} + +func init() { + t["ArrayOfVAppPropertySpec"] = reflect.TypeOf((*ArrayOfVAppPropertySpec)(nil)).Elem() +} + +type ArrayOfVMwareDVSPvlanConfigSpec struct { + VMwareDVSPvlanConfigSpec []VMwareDVSPvlanConfigSpec `xml:"VMwareDVSPvlanConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfVMwareDVSPvlanConfigSpec"] = reflect.TypeOf((*ArrayOfVMwareDVSPvlanConfigSpec)(nil)).Elem() +} + +type ArrayOfVMwareDVSPvlanMapEntry struct { + VMwareDVSPvlanMapEntry []VMwareDVSPvlanMapEntry `xml:"VMwareDVSPvlanMapEntry,omitempty"` +} + +func init() { + t["ArrayOfVMwareDVSPvlanMapEntry"] = reflect.TypeOf((*ArrayOfVMwareDVSPvlanMapEntry)(nil)).Elem() +} + +type ArrayOfVMwareDVSVspanConfigSpec struct { + VMwareDVSVspanConfigSpec []VMwareDVSVspanConfigSpec `xml:"VMwareDVSVspanConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfVMwareDVSVspanConfigSpec"] = reflect.TypeOf((*ArrayOfVMwareDVSVspanConfigSpec)(nil)).Elem() +} + +type ArrayOfVMwareDvsLacpGroupConfig struct { + VMwareDvsLacpGroupConfig []VMwareDvsLacpGroupConfig `xml:"VMwareDvsLacpGroupConfig,omitempty"` +} + +func init() { + t["ArrayOfVMwareDvsLacpGroupConfig"] = reflect.TypeOf((*ArrayOfVMwareDvsLacpGroupConfig)(nil)).Elem() +} + +type ArrayOfVMwareDvsLacpGroupSpec struct { + VMwareDvsLacpGroupSpec []VMwareDvsLacpGroupSpec `xml:"VMwareDvsLacpGroupSpec,omitempty"` +} + +func init() { + t["ArrayOfVMwareDvsLacpGroupSpec"] = reflect.TypeOf((*ArrayOfVMwareDvsLacpGroupSpec)(nil)).Elem() +} + +type ArrayOfVMwareVspanSession struct { + VMwareVspanSession []VMwareVspanSession `xml:"VMwareVspanSession,omitempty"` +} + +func init() { + t["ArrayOfVMwareVspanSession"] = reflect.TypeOf((*ArrayOfVMwareVspanSession)(nil)).Elem() +} + +type ArrayOfVVolHostPE struct { + VVolHostPE []VVolHostPE `xml:"VVolHostPE,omitempty"` +} + +func init() { + t["ArrayOfVVolHostPE"] = reflect.TypeOf((*ArrayOfVVolHostPE)(nil)).Elem() +} + +type ArrayOfVimVasaProviderInfo struct { + VimVasaProviderInfo []VimVasaProviderInfo `xml:"VimVasaProviderInfo,omitempty"` +} + +func init() { + t["ArrayOfVimVasaProviderInfo"] = reflect.TypeOf((*ArrayOfVimVasaProviderInfo)(nil)).Elem() +} + +type ArrayOfVimVasaProviderStatePerArray struct { + VimVasaProviderStatePerArray []VimVasaProviderStatePerArray `xml:"VimVasaProviderStatePerArray,omitempty"` +} + +func init() { + t["ArrayOfVimVasaProviderStatePerArray"] = reflect.TypeOf((*ArrayOfVimVasaProviderStatePerArray)(nil)).Elem() +} + +type ArrayOfVirtualAppLinkInfo struct { + VirtualAppLinkInfo []VirtualAppLinkInfo `xml:"VirtualAppLinkInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualAppLinkInfo"] = reflect.TypeOf((*ArrayOfVirtualAppLinkInfo)(nil)).Elem() +} + +type ArrayOfVirtualDevice struct { + VirtualDevice []BaseVirtualDevice `xml:"VirtualDevice,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDevice"] = reflect.TypeOf((*ArrayOfVirtualDevice)(nil)).Elem() +} + +type ArrayOfVirtualDeviceBackingOption struct { + VirtualDeviceBackingOption []BaseVirtualDeviceBackingOption `xml:"VirtualDeviceBackingOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDeviceBackingOption"] = reflect.TypeOf((*ArrayOfVirtualDeviceBackingOption)(nil)).Elem() +} + +type ArrayOfVirtualDeviceConfigSpec struct { + VirtualDeviceConfigSpec []BaseVirtualDeviceConfigSpec `xml:"VirtualDeviceConfigSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDeviceConfigSpec"] = reflect.TypeOf((*ArrayOfVirtualDeviceConfigSpec)(nil)).Elem() +} + +type ArrayOfVirtualDeviceOption struct { + VirtualDeviceOption []BaseVirtualDeviceOption `xml:"VirtualDeviceOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDeviceOption"] = reflect.TypeOf((*ArrayOfVirtualDeviceOption)(nil)).Elem() +} + +type ArrayOfVirtualDisk struct { + VirtualDisk []VirtualDisk `xml:"VirtualDisk,omitempty"` +} + +func init() { + t["ArrayOfVirtualDisk"] = reflect.TypeOf((*ArrayOfVirtualDisk)(nil)).Elem() +} + +type ArrayOfVirtualDiskDeltaDiskFormatsSupported struct { + VirtualDiskDeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"VirtualDiskDeltaDiskFormatsSupported,omitempty"` +} + +func init() { + t["ArrayOfVirtualDiskDeltaDiskFormatsSupported"] = reflect.TypeOf((*ArrayOfVirtualDiskDeltaDiskFormatsSupported)(nil)).Elem() +} + +type ArrayOfVirtualDiskId struct { + VirtualDiskId []VirtualDiskId `xml:"VirtualDiskId,omitempty"` +} + +func init() { + t["ArrayOfVirtualDiskId"] = reflect.TypeOf((*ArrayOfVirtualDiskId)(nil)).Elem() +} + +type ArrayOfVirtualMachineBootOptionsBootableDevice struct { + VirtualMachineBootOptionsBootableDevice []BaseVirtualMachineBootOptionsBootableDevice `xml:"VirtualMachineBootOptionsBootableDevice,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*ArrayOfVirtualMachineBootOptionsBootableDevice)(nil)).Elem() +} + +type ArrayOfVirtualMachineCdromInfo struct { + VirtualMachineCdromInfo []VirtualMachineCdromInfo `xml:"VirtualMachineCdromInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineCdromInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineCdromInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineConfigInfoDatastoreUrlPair struct { + VirtualMachineConfigInfoDatastoreUrlPair []VirtualMachineConfigInfoDatastoreUrlPair `xml:"VirtualMachineConfigInfoDatastoreUrlPair,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineConfigInfoDatastoreUrlPair"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigInfoDatastoreUrlPair)(nil)).Elem() +} + +type ArrayOfVirtualMachineConfigOptionDescriptor struct { + VirtualMachineConfigOptionDescriptor []VirtualMachineConfigOptionDescriptor `xml:"VirtualMachineConfigOptionDescriptor,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineConfigOptionDescriptor"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigOptionDescriptor)(nil)).Elem() +} + +type ArrayOfVirtualMachineCpuIdInfoSpec struct { + VirtualMachineCpuIdInfoSpec []VirtualMachineCpuIdInfoSpec `xml:"VirtualMachineCpuIdInfoSpec,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineCpuIdInfoSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineCpuIdInfoSpec)(nil)).Elem() +} + +type ArrayOfVirtualMachineDatastoreInfo struct { + VirtualMachineDatastoreInfo []VirtualMachineDatastoreInfo `xml:"VirtualMachineDatastoreInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDatastoreInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDatastoreInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineDatastoreVolumeOption struct { + VirtualMachineDatastoreVolumeOption []VirtualMachineDatastoreVolumeOption `xml:"VirtualMachineDatastoreVolumeOption,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDatastoreVolumeOption"] = reflect.TypeOf((*ArrayOfVirtualMachineDatastoreVolumeOption)(nil)).Elem() +} + +type ArrayOfVirtualMachineDeviceRuntimeInfo struct { + VirtualMachineDeviceRuntimeInfo []VirtualMachineDeviceRuntimeInfo `xml:"VirtualMachineDeviceRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDeviceRuntimeInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDeviceRuntimeInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineDisplayTopology struct { + VirtualMachineDisplayTopology []VirtualMachineDisplayTopology `xml:"VirtualMachineDisplayTopology,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDisplayTopology"] = reflect.TypeOf((*ArrayOfVirtualMachineDisplayTopology)(nil)).Elem() +} + +type ArrayOfVirtualMachineFeatureRequirement struct { + VirtualMachineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"VirtualMachineFeatureRequirement,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFeatureRequirement"] = reflect.TypeOf((*ArrayOfVirtualMachineFeatureRequirement)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutDiskLayout struct { + VirtualMachineFileLayoutDiskLayout []VirtualMachineFileLayoutDiskLayout `xml:"VirtualMachineFileLayoutDiskLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutDiskLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutDiskLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExDiskLayout struct { + VirtualMachineFileLayoutExDiskLayout []VirtualMachineFileLayoutExDiskLayout `xml:"VirtualMachineFileLayoutExDiskLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExDiskLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExDiskLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExDiskUnit struct { + VirtualMachineFileLayoutExDiskUnit []VirtualMachineFileLayoutExDiskUnit `xml:"VirtualMachineFileLayoutExDiskUnit,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExDiskUnit"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExDiskUnit)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExFileInfo struct { + VirtualMachineFileLayoutExFileInfo []VirtualMachineFileLayoutExFileInfo `xml:"VirtualMachineFileLayoutExFileInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExFileInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExFileInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExSnapshotLayout struct { + VirtualMachineFileLayoutExSnapshotLayout []VirtualMachineFileLayoutExSnapshotLayout `xml:"VirtualMachineFileLayoutExSnapshotLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExSnapshotLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExSnapshotLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutSnapshotLayout struct { + VirtualMachineFileLayoutSnapshotLayout []VirtualMachineFileLayoutSnapshotLayout `xml:"VirtualMachineFileLayoutSnapshotLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutSnapshotLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutSnapshotLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFloppyInfo struct { + VirtualMachineFloppyInfo []VirtualMachineFloppyInfo `xml:"VirtualMachineFloppyInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFloppyInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineFloppyInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineIdeDiskDeviceInfo struct { + VirtualMachineIdeDiskDeviceInfo []VirtualMachineIdeDiskDeviceInfo `xml:"VirtualMachineIdeDiskDeviceInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineIdeDiskDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineIdeDiskDeviceInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineIdeDiskDevicePartitionInfo struct { + VirtualMachineIdeDiskDevicePartitionInfo []VirtualMachineIdeDiskDevicePartitionInfo `xml:"VirtualMachineIdeDiskDevicePartitionInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineIdeDiskDevicePartitionInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineIdeDiskDevicePartitionInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineLegacyNetworkSwitchInfo struct { + VirtualMachineLegacyNetworkSwitchInfo []VirtualMachineLegacyNetworkSwitchInfo `xml:"VirtualMachineLegacyNetworkSwitchInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineLegacyNetworkSwitchInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineLegacyNetworkSwitchInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineMessage struct { + VirtualMachineMessage []VirtualMachineMessage `xml:"VirtualMachineMessage,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineMessage"] = reflect.TypeOf((*ArrayOfVirtualMachineMessage)(nil)).Elem() +} + +type ArrayOfVirtualMachineMetadataManagerVmMetadataInput struct { + VirtualMachineMetadataManagerVmMetadataInput []VirtualMachineMetadataManagerVmMetadataInput `xml:"VirtualMachineMetadataManagerVmMetadataInput,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineMetadataManagerVmMetadataInput"] = reflect.TypeOf((*ArrayOfVirtualMachineMetadataManagerVmMetadataInput)(nil)).Elem() +} + +type ArrayOfVirtualMachineMetadataManagerVmMetadataResult struct { + VirtualMachineMetadataManagerVmMetadataResult []VirtualMachineMetadataManagerVmMetadataResult `xml:"VirtualMachineMetadataManagerVmMetadataResult,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineMetadataManagerVmMetadataResult"] = reflect.TypeOf((*ArrayOfVirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem() +} + +type ArrayOfVirtualMachineNetworkInfo struct { + VirtualMachineNetworkInfo []VirtualMachineNetworkInfo `xml:"VirtualMachineNetworkInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineNetworkInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineNetworkInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineParallelInfo struct { + VirtualMachineParallelInfo []VirtualMachineParallelInfo `xml:"VirtualMachineParallelInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineParallelInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineParallelInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachinePciPassthroughInfo struct { + VirtualMachinePciPassthroughInfo []BaseVirtualMachinePciPassthroughInfo `xml:"VirtualMachinePciPassthroughInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachinePciPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePciPassthroughInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachinePciSharedGpuPassthroughInfo struct { + VirtualMachinePciSharedGpuPassthroughInfo []VirtualMachinePciSharedGpuPassthroughInfo `xml:"VirtualMachinePciSharedGpuPassthroughInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineProfileSpec struct { + VirtualMachineProfileSpec []BaseVirtualMachineProfileSpec `xml:"VirtualMachineProfileSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineProfileSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineProfileSpec)(nil)).Elem() +} + +type ArrayOfVirtualMachineRelocateSpecDiskLocator struct { + VirtualMachineRelocateSpecDiskLocator []VirtualMachineRelocateSpecDiskLocator `xml:"VirtualMachineRelocateSpecDiskLocator,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineRelocateSpecDiskLocator"] = reflect.TypeOf((*ArrayOfVirtualMachineRelocateSpecDiskLocator)(nil)).Elem() +} + +type ArrayOfVirtualMachineScsiDiskDeviceInfo struct { + VirtualMachineScsiDiskDeviceInfo []VirtualMachineScsiDiskDeviceInfo `xml:"VirtualMachineScsiDiskDeviceInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineScsiDiskDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineScsiDiskDeviceInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineScsiPassthroughInfo struct { + VirtualMachineScsiPassthroughInfo []VirtualMachineScsiPassthroughInfo `xml:"VirtualMachineScsiPassthroughInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineScsiPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineScsiPassthroughInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSerialInfo struct { + VirtualMachineSerialInfo []VirtualMachineSerialInfo `xml:"VirtualMachineSerialInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSerialInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineSerialInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSnapshotTree struct { + VirtualMachineSnapshotTree []VirtualMachineSnapshotTree `xml:"VirtualMachineSnapshotTree,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSnapshotTree"] = reflect.TypeOf((*ArrayOfVirtualMachineSnapshotTree)(nil)).Elem() +} + +type ArrayOfVirtualMachineSoundInfo struct { + VirtualMachineSoundInfo []VirtualMachineSoundInfo `xml:"VirtualMachineSoundInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSoundInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineSoundInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSriovInfo struct { + VirtualMachineSriovInfo []VirtualMachineSriovInfo `xml:"VirtualMachineSriovInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSriovInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineSriovInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSummary struct { + VirtualMachineSummary []VirtualMachineSummary `xml:"VirtualMachineSummary,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSummary"] = reflect.TypeOf((*ArrayOfVirtualMachineSummary)(nil)).Elem() +} + +type ArrayOfVirtualMachineUsageOnDatastore struct { + VirtualMachineUsageOnDatastore []VirtualMachineUsageOnDatastore `xml:"VirtualMachineUsageOnDatastore,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineUsageOnDatastore"] = reflect.TypeOf((*ArrayOfVirtualMachineUsageOnDatastore)(nil)).Elem() +} + +type ArrayOfVirtualMachineUsbInfo struct { + VirtualMachineUsbInfo []VirtualMachineUsbInfo `xml:"VirtualMachineUsbInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineUsbInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineUsbInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVFlashModuleInfo struct { + VirtualMachineVFlashModuleInfo []VirtualMachineVFlashModuleInfo `xml:"VirtualMachineVFlashModuleInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVFlashModuleInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVFlashModuleInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVMCIDeviceFilterSpec struct { + VirtualMachineVMCIDeviceFilterSpec []VirtualMachineVMCIDeviceFilterSpec `xml:"VirtualMachineVMCIDeviceFilterSpec,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVMCIDeviceFilterSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineVMCIDeviceFilterSpec)(nil)).Elem() +} + +type ArrayOfVirtualNicManagerNetConfig struct { + VirtualNicManagerNetConfig []VirtualNicManagerNetConfig `xml:"VirtualNicManagerNetConfig,omitempty"` +} + +func init() { + t["ArrayOfVirtualNicManagerNetConfig"] = reflect.TypeOf((*ArrayOfVirtualNicManagerNetConfig)(nil)).Elem() +} + +type ArrayOfVirtualSCSISharing struct { + VirtualSCSISharing []VirtualSCSISharing `xml:"VirtualSCSISharing,omitempty"` +} + +func init() { + t["ArrayOfVirtualSCSISharing"] = reflect.TypeOf((*ArrayOfVirtualSCSISharing)(nil)).Elem() +} + +type ArrayOfVirtualSwitchProfile struct { + VirtualSwitchProfile []VirtualSwitchProfile `xml:"VirtualSwitchProfile,omitempty"` +} + +func init() { + t["ArrayOfVirtualSwitchProfile"] = reflect.TypeOf((*ArrayOfVirtualSwitchProfile)(nil)).Elem() +} + +type ArrayOfVmEventArgument struct { + VmEventArgument []VmEventArgument `xml:"VmEventArgument,omitempty"` +} + +func init() { + t["ArrayOfVmEventArgument"] = reflect.TypeOf((*ArrayOfVmEventArgument)(nil)).Elem() +} + +type ArrayOfVmPodConfigForPlacement struct { + VmPodConfigForPlacement []VmPodConfigForPlacement `xml:"VmPodConfigForPlacement,omitempty"` +} + +func init() { + t["ArrayOfVmPodConfigForPlacement"] = reflect.TypeOf((*ArrayOfVmPodConfigForPlacement)(nil)).Elem() +} + +type ArrayOfVmPortGroupProfile struct { + VmPortGroupProfile []VmPortGroupProfile `xml:"VmPortGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfVmPortGroupProfile"] = reflect.TypeOf((*ArrayOfVmPortGroupProfile)(nil)).Elem() +} + +type ArrayOfVmfsDatastoreOption struct { + VmfsDatastoreOption []VmfsDatastoreOption `xml:"VmfsDatastoreOption,omitempty"` +} + +func init() { + t["ArrayOfVmfsDatastoreOption"] = reflect.TypeOf((*ArrayOfVmfsDatastoreOption)(nil)).Elem() +} + +type ArrayOfVnicPortArgument struct { + VnicPortArgument []VnicPortArgument `xml:"VnicPortArgument,omitempty"` +} + +func init() { + t["ArrayOfVnicPortArgument"] = reflect.TypeOf((*ArrayOfVnicPortArgument)(nil)).Elem() +} + +type ArrayOfVsanHostConfigInfo struct { + VsanHostConfigInfo []VsanHostConfigInfo `xml:"VsanHostConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanHostConfigInfo"] = reflect.TypeOf((*ArrayOfVsanHostConfigInfo)(nil)).Elem() +} + +type ArrayOfVsanHostConfigInfoNetworkInfoPortConfig struct { + VsanHostConfigInfoNetworkInfoPortConfig []VsanHostConfigInfoNetworkInfoPortConfig `xml:"VsanHostConfigInfoNetworkInfoPortConfig,omitempty"` +} + +func init() { + t["ArrayOfVsanHostConfigInfoNetworkInfoPortConfig"] = reflect.TypeOf((*ArrayOfVsanHostConfigInfoNetworkInfoPortConfig)(nil)).Elem() +} + +type ArrayOfVsanHostDiskMapInfo struct { + VsanHostDiskMapInfo []VsanHostDiskMapInfo `xml:"VsanHostDiskMapInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskMapInfo"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapInfo)(nil)).Elem() +} + +type ArrayOfVsanHostDiskMapResult struct { + VsanHostDiskMapResult []VsanHostDiskMapResult `xml:"VsanHostDiskMapResult,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskMapResult"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapResult)(nil)).Elem() +} + +type ArrayOfVsanHostDiskMapping struct { + VsanHostDiskMapping []VsanHostDiskMapping `xml:"VsanHostDiskMapping,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskMapping"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapping)(nil)).Elem() +} + +type ArrayOfVsanHostDiskResult struct { + VsanHostDiskResult []VsanHostDiskResult `xml:"VsanHostDiskResult,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskResult"] = reflect.TypeOf((*ArrayOfVsanHostDiskResult)(nil)).Elem() +} + +type ArrayOfVsanHostMembershipInfo struct { + VsanHostMembershipInfo []VsanHostMembershipInfo `xml:"VsanHostMembershipInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanHostMembershipInfo"] = reflect.TypeOf((*ArrayOfVsanHostMembershipInfo)(nil)).Elem() +} + +type ArrayOfVsanHostRuntimeInfoDiskIssue struct { + VsanHostRuntimeInfoDiskIssue []VsanHostRuntimeInfoDiskIssue `xml:"VsanHostRuntimeInfoDiskIssue,omitempty"` +} + +func init() { + t["ArrayOfVsanHostRuntimeInfoDiskIssue"] = reflect.TypeOf((*ArrayOfVsanHostRuntimeInfoDiskIssue)(nil)).Elem() +} + +type ArrayOfVsanNewPolicyBatch struct { + VsanNewPolicyBatch []VsanNewPolicyBatch `xml:"VsanNewPolicyBatch,omitempty"` +} + +func init() { + t["ArrayOfVsanNewPolicyBatch"] = reflect.TypeOf((*ArrayOfVsanNewPolicyBatch)(nil)).Elem() +} + +type ArrayOfVsanPolicyChangeBatch struct { + VsanPolicyChangeBatch []VsanPolicyChangeBatch `xml:"VsanPolicyChangeBatch,omitempty"` +} + +func init() { + t["ArrayOfVsanPolicyChangeBatch"] = reflect.TypeOf((*ArrayOfVsanPolicyChangeBatch)(nil)).Elem() +} + +type ArrayOfVsanPolicySatisfiability struct { + VsanPolicySatisfiability []VsanPolicySatisfiability `xml:"VsanPolicySatisfiability,omitempty"` +} + +func init() { + t["ArrayOfVsanPolicySatisfiability"] = reflect.TypeOf((*ArrayOfVsanPolicySatisfiability)(nil)).Elem() +} + +type ArrayOfVsanUpgradeSystemNetworkPartitionInfo struct { + VsanUpgradeSystemNetworkPartitionInfo []VsanUpgradeSystemNetworkPartitionInfo `xml:"VsanUpgradeSystemNetworkPartitionInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanUpgradeSystemNetworkPartitionInfo"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemNetworkPartitionInfo)(nil)).Elem() +} + +type ArrayOfVsanUpgradeSystemPreflightCheckIssue struct { + VsanUpgradeSystemPreflightCheckIssue []BaseVsanUpgradeSystemPreflightCheckIssue `xml:"VsanUpgradeSystemPreflightCheckIssue,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVsanUpgradeSystemPreflightCheckIssue"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemPreflightCheckIssue)(nil)).Elem() +} + +type ArrayOfVsanUpgradeSystemUpgradeHistoryItem struct { + VsanUpgradeSystemUpgradeHistoryItem []BaseVsanUpgradeSystemUpgradeHistoryItem `xml:"VsanUpgradeSystemUpgradeHistoryItem,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVsanUpgradeSystemUpgradeHistoryItem"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem() +} + +type ArrayUpdateSpec struct { + DynamicData + + Operation ArrayUpdateOperation `xml:"operation"` + RemoveKey AnyType `xml:"removeKey,omitempty,typeattr"` +} + +func init() { + t["ArrayUpdateSpec"] = reflect.TypeOf((*ArrayUpdateSpec)(nil)).Elem() +} + +type AssignUserToGroup AssignUserToGroupRequestType + +func init() { + t["AssignUserToGroup"] = reflect.TypeOf((*AssignUserToGroup)(nil)).Elem() +} + +type AssignUserToGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + Group string `xml:"group"` +} + +func init() { + t["AssignUserToGroupRequestType"] = reflect.TypeOf((*AssignUserToGroupRequestType)(nil)).Elem() +} + +type AssignUserToGroupResponse struct { +} + +type AssociateProfile AssociateProfileRequestType + +func init() { + t["AssociateProfile"] = reflect.TypeOf((*AssociateProfile)(nil)).Elem() +} + +type AssociateProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` +} + +func init() { + t["AssociateProfileRequestType"] = reflect.TypeOf((*AssociateProfileRequestType)(nil)).Elem() +} + +type AssociateProfileResponse struct { +} + +type AttachScsiLun AttachScsiLunRequestType + +func init() { + t["AttachScsiLun"] = reflect.TypeOf((*AttachScsiLun)(nil)).Elem() +} + +type AttachScsiLunExRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid []string `xml:"lunUuid"` +} + +func init() { + t["AttachScsiLunExRequestType"] = reflect.TypeOf((*AttachScsiLunExRequestType)(nil)).Elem() +} + +type AttachScsiLunEx_Task AttachScsiLunExRequestType + +func init() { + t["AttachScsiLunEx_Task"] = reflect.TypeOf((*AttachScsiLunEx_Task)(nil)).Elem() +} + +type AttachScsiLunEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AttachScsiLunRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` +} + +func init() { + t["AttachScsiLunRequestType"] = reflect.TypeOf((*AttachScsiLunRequestType)(nil)).Elem() +} + +type AttachScsiLunResponse struct { +} + +type AttachVmfsExtent AttachVmfsExtentRequestType + +func init() { + t["AttachVmfsExtent"] = reflect.TypeOf((*AttachVmfsExtent)(nil)).Elem() +} + +type AttachVmfsExtentRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsPath string `xml:"vmfsPath"` + Extent HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["AttachVmfsExtentRequestType"] = reflect.TypeOf((*AttachVmfsExtentRequestType)(nil)).Elem() +} + +type AttachVmfsExtentResponse struct { +} + +type AuthMinimumAdminPermission struct { + VimFault +} + +func init() { + t["AuthMinimumAdminPermission"] = reflect.TypeOf((*AuthMinimumAdminPermission)(nil)).Elem() +} + +type AuthMinimumAdminPermissionFault AuthMinimumAdminPermission + +func init() { + t["AuthMinimumAdminPermissionFault"] = reflect.TypeOf((*AuthMinimumAdminPermissionFault)(nil)).Elem() +} + +type AuthenticationProfile struct { + ApplyProfile + + ActiveDirectory *ActiveDirectoryProfile `xml:"activeDirectory,omitempty"` +} + +func init() { + t["AuthenticationProfile"] = reflect.TypeOf((*AuthenticationProfile)(nil)).Elem() +} + +type AuthorizationDescription struct { + DynamicData + + Privilege []BaseElementDescription `xml:"privilege,typeattr"` + PrivilegeGroup []BaseElementDescription `xml:"privilegeGroup,typeattr"` +} + +func init() { + t["AuthorizationDescription"] = reflect.TypeOf((*AuthorizationDescription)(nil)).Elem() +} + +type AuthorizationEvent struct { + Event +} + +func init() { + t["AuthorizationEvent"] = reflect.TypeOf((*AuthorizationEvent)(nil)).Elem() +} + +type AuthorizationPrivilege struct { + DynamicData + + PrivId string `xml:"privId"` + OnParent bool `xml:"onParent"` + Name string `xml:"name"` + PrivGroupName string `xml:"privGroupName"` +} + +func init() { + t["AuthorizationPrivilege"] = reflect.TypeOf((*AuthorizationPrivilege)(nil)).Elem() +} + +type AuthorizationRole struct { + DynamicData + + RoleId int `xml:"roleId"` + System bool `xml:"system"` + Name string `xml:"name"` + Info BaseDescription `xml:"info,typeattr"` + Privilege []string `xml:"privilege,omitempty"` +} + +func init() { + t["AuthorizationRole"] = reflect.TypeOf((*AuthorizationRole)(nil)).Elem() +} + +type AutoStartDefaults struct { + DynamicData + + Enabled *bool `xml:"enabled"` + StartDelay int `xml:"startDelay,omitempty"` + StopDelay int `xml:"stopDelay,omitempty"` + WaitForHeartbeat *bool `xml:"waitForHeartbeat"` + StopAction string `xml:"stopAction,omitempty"` +} + +func init() { + t["AutoStartDefaults"] = reflect.TypeOf((*AutoStartDefaults)(nil)).Elem() +} + +type AutoStartPowerInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + StartOrder int `xml:"startOrder"` + StartDelay int `xml:"startDelay"` + WaitForHeartbeat AutoStartWaitHeartbeatSetting `xml:"waitForHeartbeat"` + StartAction string `xml:"startAction"` + StopDelay int `xml:"stopDelay"` + StopAction string `xml:"stopAction"` +} + +func init() { + t["AutoStartPowerInfo"] = reflect.TypeOf((*AutoStartPowerInfo)(nil)).Elem() +} + +type AutoStartPowerOff AutoStartPowerOffRequestType + +func init() { + t["AutoStartPowerOff"] = reflect.TypeOf((*AutoStartPowerOff)(nil)).Elem() +} + +type AutoStartPowerOffRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AutoStartPowerOffRequestType"] = reflect.TypeOf((*AutoStartPowerOffRequestType)(nil)).Elem() +} + +type AutoStartPowerOffResponse struct { +} + +type AutoStartPowerOn AutoStartPowerOnRequestType + +func init() { + t["AutoStartPowerOn"] = reflect.TypeOf((*AutoStartPowerOn)(nil)).Elem() +} + +type AutoStartPowerOnRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AutoStartPowerOnRequestType"] = reflect.TypeOf((*AutoStartPowerOnRequestType)(nil)).Elem() +} + +type AutoStartPowerOnResponse struct { +} + +type BackupBlobReadFailure struct { + DvsFault + + EntityName string `xml:"entityName"` + EntityType string `xml:"entityType"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["BackupBlobReadFailure"] = reflect.TypeOf((*BackupBlobReadFailure)(nil)).Elem() +} + +type BackupBlobReadFailureFault BackupBlobReadFailure + +func init() { + t["BackupBlobReadFailureFault"] = reflect.TypeOf((*BackupBlobReadFailureFault)(nil)).Elem() +} + +type BackupBlobWriteFailure struct { + DvsFault + + EntityName string `xml:"entityName"` + EntityType string `xml:"entityType"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["BackupBlobWriteFailure"] = reflect.TypeOf((*BackupBlobWriteFailure)(nil)).Elem() +} + +type BackupBlobWriteFailureFault BackupBlobWriteFailure + +func init() { + t["BackupBlobWriteFailureFault"] = reflect.TypeOf((*BackupBlobWriteFailureFault)(nil)).Elem() +} + +type BackupFirmwareConfiguration BackupFirmwareConfigurationRequestType + +func init() { + t["BackupFirmwareConfiguration"] = reflect.TypeOf((*BackupFirmwareConfiguration)(nil)).Elem() +} + +type BackupFirmwareConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["BackupFirmwareConfigurationRequestType"] = reflect.TypeOf((*BackupFirmwareConfigurationRequestType)(nil)).Elem() +} + +type BackupFirmwareConfigurationResponse struct { + Returnval string `xml:"returnval"` +} + +type BadUsernameSessionEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["BadUsernameSessionEvent"] = reflect.TypeOf((*BadUsernameSessionEvent)(nil)).Elem() +} + +type BatchResult struct { + DynamicData + + Result string `xml:"result"` + HostKey string `xml:"hostKey"` + Ds *ManagedObjectReference `xml:"ds,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["BatchResult"] = reflect.TypeOf((*BatchResult)(nil)).Elem() +} + +type BindVnic BindVnicRequestType + +func init() { + t["BindVnic"] = reflect.TypeOf((*BindVnic)(nil)).Elem() +} + +type BindVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["BindVnicRequestType"] = reflect.TypeOf((*BindVnicRequestType)(nil)).Elem() +} + +type BindVnicResponse struct { +} + +type BlockedByFirewall struct { + HostConfigFault +} + +func init() { + t["BlockedByFirewall"] = reflect.TypeOf((*BlockedByFirewall)(nil)).Elem() +} + +type BlockedByFirewallFault BlockedByFirewall + +func init() { + t["BlockedByFirewallFault"] = reflect.TypeOf((*BlockedByFirewallFault)(nil)).Elem() +} + +type BoolOption struct { + OptionType + + Supported bool `xml:"supported"` + DefaultValue bool `xml:"defaultValue"` +} + +func init() { + t["BoolOption"] = reflect.TypeOf((*BoolOption)(nil)).Elem() +} + +type BoolPolicy struct { + InheritablePolicy + + Value *bool `xml:"value"` +} + +func init() { + t["BoolPolicy"] = reflect.TypeOf((*BoolPolicy)(nil)).Elem() +} + +type BrowseDiagnosticLog BrowseDiagnosticLogRequestType + +func init() { + t["BrowseDiagnosticLog"] = reflect.TypeOf((*BrowseDiagnosticLog)(nil)).Elem() +} + +type BrowseDiagnosticLogRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Key string `xml:"key"` + Start int `xml:"start,omitempty"` + Lines int `xml:"lines,omitempty"` +} + +func init() { + t["BrowseDiagnosticLogRequestType"] = reflect.TypeOf((*BrowseDiagnosticLogRequestType)(nil)).Elem() +} + +type BrowseDiagnosticLogResponse struct { + Returnval DiagnosticManagerLogHeader `xml:"returnval"` +} + +type CAMServerRefusedConnection struct { + InvalidCAMServer +} + +func init() { + t["CAMServerRefusedConnection"] = reflect.TypeOf((*CAMServerRefusedConnection)(nil)).Elem() +} + +type CAMServerRefusedConnectionFault CAMServerRefusedConnection + +func init() { + t["CAMServerRefusedConnectionFault"] = reflect.TypeOf((*CAMServerRefusedConnectionFault)(nil)).Elem() +} + +type CanProvisionObjects CanProvisionObjectsRequestType + +func init() { + t["CanProvisionObjects"] = reflect.TypeOf((*CanProvisionObjects)(nil)).Elem() +} + +type CanProvisionObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Npbs []VsanNewPolicyBatch `xml:"npbs"` + IgnoreSatisfiability *bool `xml:"ignoreSatisfiability"` +} + +func init() { + t["CanProvisionObjectsRequestType"] = reflect.TypeOf((*CanProvisionObjectsRequestType)(nil)).Elem() +} + +type CanProvisionObjectsResponse struct { + Returnval []VsanPolicySatisfiability `xml:"returnval"` +} + +type CancelRecommendation CancelRecommendationRequestType + +func init() { + t["CancelRecommendation"] = reflect.TypeOf((*CancelRecommendation)(nil)).Elem() +} + +type CancelRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key"` +} + +func init() { + t["CancelRecommendationRequestType"] = reflect.TypeOf((*CancelRecommendationRequestType)(nil)).Elem() +} + +type CancelRecommendationResponse struct { +} + +type CancelRetrievePropertiesEx CancelRetrievePropertiesExRequestType + +func init() { + t["CancelRetrievePropertiesEx"] = reflect.TypeOf((*CancelRetrievePropertiesEx)(nil)).Elem() +} + +type CancelRetrievePropertiesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Token string `xml:"token"` +} + +func init() { + t["CancelRetrievePropertiesExRequestType"] = reflect.TypeOf((*CancelRetrievePropertiesExRequestType)(nil)).Elem() +} + +type CancelRetrievePropertiesExResponse struct { +} + +type CancelStorageDrsRecommendation CancelStorageDrsRecommendationRequestType + +func init() { + t["CancelStorageDrsRecommendation"] = reflect.TypeOf((*CancelStorageDrsRecommendation)(nil)).Elem() +} + +type CancelStorageDrsRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key []string `xml:"key"` +} + +func init() { + t["CancelStorageDrsRecommendationRequestType"] = reflect.TypeOf((*CancelStorageDrsRecommendationRequestType)(nil)).Elem() +} + +type CancelStorageDrsRecommendationResponse struct { +} + +type CancelTask CancelTaskRequestType + +func init() { + t["CancelTask"] = reflect.TypeOf((*CancelTask)(nil)).Elem() +} + +type CancelTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CancelTaskRequestType"] = reflect.TypeOf((*CancelTaskRequestType)(nil)).Elem() +} + +type CancelTaskResponse struct { +} + +type CancelWaitForUpdates CancelWaitForUpdatesRequestType + +func init() { + t["CancelWaitForUpdates"] = reflect.TypeOf((*CancelWaitForUpdates)(nil)).Elem() +} + +type CancelWaitForUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CancelWaitForUpdatesRequestType"] = reflect.TypeOf((*CancelWaitForUpdatesRequestType)(nil)).Elem() +} + +type CancelWaitForUpdatesResponse struct { +} + +type CanceledHostOperationEvent struct { + HostEvent +} + +func init() { + t["CanceledHostOperationEvent"] = reflect.TypeOf((*CanceledHostOperationEvent)(nil)).Elem() +} + +type CannotAccessFile struct { + FileFault +} + +func init() { + t["CannotAccessFile"] = reflect.TypeOf((*CannotAccessFile)(nil)).Elem() +} + +type CannotAccessFileFault CannotAccessFile + +func init() { + t["CannotAccessFileFault"] = reflect.TypeOf((*CannotAccessFileFault)(nil)).Elem() +} + +type CannotAccessLocalSource struct { + VimFault +} + +func init() { + t["CannotAccessLocalSource"] = reflect.TypeOf((*CannotAccessLocalSource)(nil)).Elem() +} + +type CannotAccessLocalSourceFault CannotAccessLocalSource + +func init() { + t["CannotAccessLocalSourceFault"] = reflect.TypeOf((*CannotAccessLocalSourceFault)(nil)).Elem() +} + +type CannotAccessNetwork struct { + CannotAccessVmDevice + + Network *ManagedObjectReference `xml:"network,omitempty"` +} + +func init() { + t["CannotAccessNetwork"] = reflect.TypeOf((*CannotAccessNetwork)(nil)).Elem() +} + +type CannotAccessNetworkFault BaseCannotAccessNetwork + +func init() { + t["CannotAccessNetworkFault"] = reflect.TypeOf((*CannotAccessNetworkFault)(nil)).Elem() +} + +type CannotAccessVmComponent struct { + VmConfigFault +} + +func init() { + t["CannotAccessVmComponent"] = reflect.TypeOf((*CannotAccessVmComponent)(nil)).Elem() +} + +type CannotAccessVmComponentFault BaseCannotAccessVmComponent + +func init() { + t["CannotAccessVmComponentFault"] = reflect.TypeOf((*CannotAccessVmComponentFault)(nil)).Elem() +} + +type CannotAccessVmConfig struct { + CannotAccessVmComponent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["CannotAccessVmConfig"] = reflect.TypeOf((*CannotAccessVmConfig)(nil)).Elem() +} + +type CannotAccessVmConfigFault CannotAccessVmConfig + +func init() { + t["CannotAccessVmConfigFault"] = reflect.TypeOf((*CannotAccessVmConfigFault)(nil)).Elem() +} + +type CannotAccessVmDevice struct { + CannotAccessVmComponent + + Device string `xml:"device"` + Backing string `xml:"backing"` + Connected bool `xml:"connected"` +} + +func init() { + t["CannotAccessVmDevice"] = reflect.TypeOf((*CannotAccessVmDevice)(nil)).Elem() +} + +type CannotAccessVmDeviceFault BaseCannotAccessVmDevice + +func init() { + t["CannotAccessVmDeviceFault"] = reflect.TypeOf((*CannotAccessVmDeviceFault)(nil)).Elem() +} + +type CannotAccessVmDisk struct { + CannotAccessVmDevice + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["CannotAccessVmDisk"] = reflect.TypeOf((*CannotAccessVmDisk)(nil)).Elem() +} + +type CannotAccessVmDiskFault BaseCannotAccessVmDisk + +func init() { + t["CannotAccessVmDiskFault"] = reflect.TypeOf((*CannotAccessVmDiskFault)(nil)).Elem() +} + +type CannotAddHostWithFTVmAsStandalone struct { + HostConnectFault +} + +func init() { + t["CannotAddHostWithFTVmAsStandalone"] = reflect.TypeOf((*CannotAddHostWithFTVmAsStandalone)(nil)).Elem() +} + +type CannotAddHostWithFTVmAsStandaloneFault CannotAddHostWithFTVmAsStandalone + +func init() { + t["CannotAddHostWithFTVmAsStandaloneFault"] = reflect.TypeOf((*CannotAddHostWithFTVmAsStandaloneFault)(nil)).Elem() +} + +type CannotAddHostWithFTVmToDifferentCluster struct { + HostConnectFault +} + +func init() { + t["CannotAddHostWithFTVmToDifferentCluster"] = reflect.TypeOf((*CannotAddHostWithFTVmToDifferentCluster)(nil)).Elem() +} + +type CannotAddHostWithFTVmToDifferentClusterFault CannotAddHostWithFTVmToDifferentCluster + +func init() { + t["CannotAddHostWithFTVmToDifferentClusterFault"] = reflect.TypeOf((*CannotAddHostWithFTVmToDifferentClusterFault)(nil)).Elem() +} + +type CannotAddHostWithFTVmToNonHACluster struct { + HostConnectFault +} + +func init() { + t["CannotAddHostWithFTVmToNonHACluster"] = reflect.TypeOf((*CannotAddHostWithFTVmToNonHACluster)(nil)).Elem() +} + +type CannotAddHostWithFTVmToNonHAClusterFault CannotAddHostWithFTVmToNonHACluster + +func init() { + t["CannotAddHostWithFTVmToNonHAClusterFault"] = reflect.TypeOf((*CannotAddHostWithFTVmToNonHAClusterFault)(nil)).Elem() +} + +type CannotChangeDrsBehaviorForFtSecondary struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotChangeDrsBehaviorForFtSecondary"] = reflect.TypeOf((*CannotChangeDrsBehaviorForFtSecondary)(nil)).Elem() +} + +type CannotChangeDrsBehaviorForFtSecondaryFault CannotChangeDrsBehaviorForFtSecondary + +func init() { + t["CannotChangeDrsBehaviorForFtSecondaryFault"] = reflect.TypeOf((*CannotChangeDrsBehaviorForFtSecondaryFault)(nil)).Elem() +} + +type CannotChangeHaSettingsForFtSecondary struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotChangeHaSettingsForFtSecondary"] = reflect.TypeOf((*CannotChangeHaSettingsForFtSecondary)(nil)).Elem() +} + +type CannotChangeHaSettingsForFtSecondaryFault CannotChangeHaSettingsForFtSecondary + +func init() { + t["CannotChangeHaSettingsForFtSecondaryFault"] = reflect.TypeOf((*CannotChangeHaSettingsForFtSecondaryFault)(nil)).Elem() +} + +type CannotChangeVsanClusterUuid struct { + VsanFault +} + +func init() { + t["CannotChangeVsanClusterUuid"] = reflect.TypeOf((*CannotChangeVsanClusterUuid)(nil)).Elem() +} + +type CannotChangeVsanClusterUuidFault CannotChangeVsanClusterUuid + +func init() { + t["CannotChangeVsanClusterUuidFault"] = reflect.TypeOf((*CannotChangeVsanClusterUuidFault)(nil)).Elem() +} + +type CannotChangeVsanNodeUuid struct { + VsanFault +} + +func init() { + t["CannotChangeVsanNodeUuid"] = reflect.TypeOf((*CannotChangeVsanNodeUuid)(nil)).Elem() +} + +type CannotChangeVsanNodeUuidFault CannotChangeVsanNodeUuid + +func init() { + t["CannotChangeVsanNodeUuidFault"] = reflect.TypeOf((*CannotChangeVsanNodeUuidFault)(nil)).Elem() +} + +type CannotComputeFTCompatibleHosts struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotComputeFTCompatibleHosts"] = reflect.TypeOf((*CannotComputeFTCompatibleHosts)(nil)).Elem() +} + +type CannotComputeFTCompatibleHostsFault CannotComputeFTCompatibleHosts + +func init() { + t["CannotComputeFTCompatibleHostsFault"] = reflect.TypeOf((*CannotComputeFTCompatibleHostsFault)(nil)).Elem() +} + +type CannotCreateFile struct { + FileFault +} + +func init() { + t["CannotCreateFile"] = reflect.TypeOf((*CannotCreateFile)(nil)).Elem() +} + +type CannotCreateFileFault CannotCreateFile + +func init() { + t["CannotCreateFileFault"] = reflect.TypeOf((*CannotCreateFileFault)(nil)).Elem() +} + +type CannotDecryptPasswords struct { + CustomizationFault +} + +func init() { + t["CannotDecryptPasswords"] = reflect.TypeOf((*CannotDecryptPasswords)(nil)).Elem() +} + +type CannotDecryptPasswordsFault CannotDecryptPasswords + +func init() { + t["CannotDecryptPasswordsFault"] = reflect.TypeOf((*CannotDecryptPasswordsFault)(nil)).Elem() +} + +type CannotDeleteFile struct { + FileFault +} + +func init() { + t["CannotDeleteFile"] = reflect.TypeOf((*CannotDeleteFile)(nil)).Elem() +} + +type CannotDeleteFileFault CannotDeleteFile + +func init() { + t["CannotDeleteFileFault"] = reflect.TypeOf((*CannotDeleteFileFault)(nil)).Elem() +} + +type CannotDisableDrsOnClusterManagedByVDC struct { + RuntimeFault +} + +func init() { + t["CannotDisableDrsOnClusterManagedByVDC"] = reflect.TypeOf((*CannotDisableDrsOnClusterManagedByVDC)(nil)).Elem() +} + +type CannotDisableDrsOnClusterManagedByVDCFault CannotDisableDrsOnClusterManagedByVDC + +func init() { + t["CannotDisableDrsOnClusterManagedByVDCFault"] = reflect.TypeOf((*CannotDisableDrsOnClusterManagedByVDCFault)(nil)).Elem() +} + +type CannotDisableDrsOnClustersWithVApps struct { + RuntimeFault +} + +func init() { + t["CannotDisableDrsOnClustersWithVApps"] = reflect.TypeOf((*CannotDisableDrsOnClustersWithVApps)(nil)).Elem() +} + +type CannotDisableDrsOnClustersWithVAppsFault CannotDisableDrsOnClustersWithVApps + +func init() { + t["CannotDisableDrsOnClustersWithVAppsFault"] = reflect.TypeOf((*CannotDisableDrsOnClustersWithVAppsFault)(nil)).Elem() +} + +type CannotDisableSnapshot struct { + VmConfigFault +} + +func init() { + t["CannotDisableSnapshot"] = reflect.TypeOf((*CannotDisableSnapshot)(nil)).Elem() +} + +type CannotDisableSnapshotFault CannotDisableSnapshot + +func init() { + t["CannotDisableSnapshotFault"] = reflect.TypeOf((*CannotDisableSnapshotFault)(nil)).Elem() +} + +type CannotDisconnectHostWithFaultToleranceVm struct { + VimFault + + HostName string `xml:"hostName"` +} + +func init() { + t["CannotDisconnectHostWithFaultToleranceVm"] = reflect.TypeOf((*CannotDisconnectHostWithFaultToleranceVm)(nil)).Elem() +} + +type CannotDisconnectHostWithFaultToleranceVmFault CannotDisconnectHostWithFaultToleranceVm + +func init() { + t["CannotDisconnectHostWithFaultToleranceVmFault"] = reflect.TypeOf((*CannotDisconnectHostWithFaultToleranceVmFault)(nil)).Elem() +} + +type CannotEnableVmcpForCluster struct { + VimFault + + Host *ManagedObjectReference `xml:"host,omitempty"` + HostName string `xml:"hostName,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["CannotEnableVmcpForCluster"] = reflect.TypeOf((*CannotEnableVmcpForCluster)(nil)).Elem() +} + +type CannotEnableVmcpForClusterFault CannotEnableVmcpForCluster + +func init() { + t["CannotEnableVmcpForClusterFault"] = reflect.TypeOf((*CannotEnableVmcpForClusterFault)(nil)).Elem() +} + +type CannotModifyConfigCpuRequirements struct { + MigrationFault +} + +func init() { + t["CannotModifyConfigCpuRequirements"] = reflect.TypeOf((*CannotModifyConfigCpuRequirements)(nil)).Elem() +} + +type CannotModifyConfigCpuRequirementsFault CannotModifyConfigCpuRequirements + +func init() { + t["CannotModifyConfigCpuRequirementsFault"] = reflect.TypeOf((*CannotModifyConfigCpuRequirementsFault)(nil)).Elem() +} + +type CannotMoveFaultToleranceVm struct { + VimFault + + MoveType string `xml:"moveType"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotMoveFaultToleranceVm"] = reflect.TypeOf((*CannotMoveFaultToleranceVm)(nil)).Elem() +} + +type CannotMoveFaultToleranceVmFault CannotMoveFaultToleranceVm + +func init() { + t["CannotMoveFaultToleranceVmFault"] = reflect.TypeOf((*CannotMoveFaultToleranceVmFault)(nil)).Elem() +} + +type CannotMoveHostWithFaultToleranceVm struct { + VimFault +} + +func init() { + t["CannotMoveHostWithFaultToleranceVm"] = reflect.TypeOf((*CannotMoveHostWithFaultToleranceVm)(nil)).Elem() +} + +type CannotMoveHostWithFaultToleranceVmFault CannotMoveHostWithFaultToleranceVm + +func init() { + t["CannotMoveHostWithFaultToleranceVmFault"] = reflect.TypeOf((*CannotMoveHostWithFaultToleranceVmFault)(nil)).Elem() +} + +type CannotMoveVmWithDeltaDisk struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["CannotMoveVmWithDeltaDisk"] = reflect.TypeOf((*CannotMoveVmWithDeltaDisk)(nil)).Elem() +} + +type CannotMoveVmWithDeltaDiskFault CannotMoveVmWithDeltaDisk + +func init() { + t["CannotMoveVmWithDeltaDiskFault"] = reflect.TypeOf((*CannotMoveVmWithDeltaDiskFault)(nil)).Elem() +} + +type CannotMoveVmWithNativeDeltaDisk struct { + MigrationFault +} + +func init() { + t["CannotMoveVmWithNativeDeltaDisk"] = reflect.TypeOf((*CannotMoveVmWithNativeDeltaDisk)(nil)).Elem() +} + +type CannotMoveVmWithNativeDeltaDiskFault CannotMoveVmWithNativeDeltaDisk + +func init() { + t["CannotMoveVmWithNativeDeltaDiskFault"] = reflect.TypeOf((*CannotMoveVmWithNativeDeltaDiskFault)(nil)).Elem() +} + +type CannotMoveVsanEnabledHost struct { + VsanFault +} + +func init() { + t["CannotMoveVsanEnabledHost"] = reflect.TypeOf((*CannotMoveVsanEnabledHost)(nil)).Elem() +} + +type CannotMoveVsanEnabledHostFault BaseCannotMoveVsanEnabledHost + +func init() { + t["CannotMoveVsanEnabledHostFault"] = reflect.TypeOf((*CannotMoveVsanEnabledHostFault)(nil)).Elem() +} + +type CannotPlaceWithoutPrerequisiteMoves struct { + VimFault +} + +func init() { + t["CannotPlaceWithoutPrerequisiteMoves"] = reflect.TypeOf((*CannotPlaceWithoutPrerequisiteMoves)(nil)).Elem() +} + +type CannotPlaceWithoutPrerequisiteMovesFault CannotPlaceWithoutPrerequisiteMoves + +func init() { + t["CannotPlaceWithoutPrerequisiteMovesFault"] = reflect.TypeOf((*CannotPlaceWithoutPrerequisiteMovesFault)(nil)).Elem() +} + +type CannotPowerOffVmInCluster struct { + InvalidState + + Operation string `xml:"operation"` + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotPowerOffVmInCluster"] = reflect.TypeOf((*CannotPowerOffVmInCluster)(nil)).Elem() +} + +type CannotPowerOffVmInClusterFault CannotPowerOffVmInCluster + +func init() { + t["CannotPowerOffVmInClusterFault"] = reflect.TypeOf((*CannotPowerOffVmInClusterFault)(nil)).Elem() +} + +type CannotReconfigureVsanWhenHaEnabled struct { + VsanFault +} + +func init() { + t["CannotReconfigureVsanWhenHaEnabled"] = reflect.TypeOf((*CannotReconfigureVsanWhenHaEnabled)(nil)).Elem() +} + +type CannotReconfigureVsanWhenHaEnabledFault CannotReconfigureVsanWhenHaEnabled + +func init() { + t["CannotReconfigureVsanWhenHaEnabledFault"] = reflect.TypeOf((*CannotReconfigureVsanWhenHaEnabledFault)(nil)).Elem() +} + +type CannotUseNetwork struct { + VmConfigFault + + Device string `xml:"device"` + Backing string `xml:"backing"` + Connected bool `xml:"connected"` + Reason string `xml:"reason"` + Network *ManagedObjectReference `xml:"network,omitempty"` +} + +func init() { + t["CannotUseNetwork"] = reflect.TypeOf((*CannotUseNetwork)(nil)).Elem() +} + +type CannotUseNetworkFault CannotUseNetwork + +func init() { + t["CannotUseNetworkFault"] = reflect.TypeOf((*CannotUseNetworkFault)(nil)).Elem() +} + +type Capability struct { + DynamicData + + ProvisioningSupported bool `xml:"provisioningSupported"` + MultiHostSupported bool `xml:"multiHostSupported"` + UserShellAccessSupported bool `xml:"userShellAccessSupported"` + SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty"` + NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported"` +} + +func init() { + t["Capability"] = reflect.TypeOf((*Capability)(nil)).Elem() +} + +type CertMgrRefreshCACertificatesAndCRLsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CertMgrRefreshCACertificatesAndCRLsRequestType"] = reflect.TypeOf((*CertMgrRefreshCACertificatesAndCRLsRequestType)(nil)).Elem() +} + +type CertMgrRefreshCACertificatesAndCRLs_Task CertMgrRefreshCACertificatesAndCRLsRequestType + +func init() { + t["CertMgrRefreshCACertificatesAndCRLs_Task"] = reflect.TypeOf((*CertMgrRefreshCACertificatesAndCRLs_Task)(nil)).Elem() +} + +type CertMgrRefreshCACertificatesAndCRLs_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CertMgrRefreshCertificatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CertMgrRefreshCertificatesRequestType"] = reflect.TypeOf((*CertMgrRefreshCertificatesRequestType)(nil)).Elem() +} + +type CertMgrRefreshCertificates_Task CertMgrRefreshCertificatesRequestType + +func init() { + t["CertMgrRefreshCertificates_Task"] = reflect.TypeOf((*CertMgrRefreshCertificates_Task)(nil)).Elem() +} + +type CertMgrRefreshCertificates_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CertMgrRevokeCertificatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CertMgrRevokeCertificatesRequestType"] = reflect.TypeOf((*CertMgrRevokeCertificatesRequestType)(nil)).Elem() +} + +type CertMgrRevokeCertificates_Task CertMgrRevokeCertificatesRequestType + +func init() { + t["CertMgrRevokeCertificates_Task"] = reflect.TypeOf((*CertMgrRevokeCertificates_Task)(nil)).Elem() +} + +type CertMgrRevokeCertificates_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ChangeAccessMode ChangeAccessModeRequestType + +func init() { + t["ChangeAccessMode"] = reflect.TypeOf((*ChangeAccessMode)(nil)).Elem() +} + +type ChangeAccessModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Principal string `xml:"principal"` + IsGroup bool `xml:"isGroup"` + AccessMode HostAccessMode `xml:"accessMode"` +} + +func init() { + t["ChangeAccessModeRequestType"] = reflect.TypeOf((*ChangeAccessModeRequestType)(nil)).Elem() +} + +type ChangeAccessModeResponse struct { +} + +type ChangeFileAttributesInGuest ChangeFileAttributesInGuestRequestType + +func init() { + t["ChangeFileAttributesInGuest"] = reflect.TypeOf((*ChangeFileAttributesInGuest)(nil)).Elem() +} + +type ChangeFileAttributesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + GuestFilePath string `xml:"guestFilePath"` + FileAttributes BaseGuestFileAttributes `xml:"fileAttributes,typeattr"` +} + +func init() { + t["ChangeFileAttributesInGuestRequestType"] = reflect.TypeOf((*ChangeFileAttributesInGuestRequestType)(nil)).Elem() +} + +type ChangeFileAttributesInGuestResponse struct { +} + +type ChangeLockdownMode ChangeLockdownModeRequestType + +func init() { + t["ChangeLockdownMode"] = reflect.TypeOf((*ChangeLockdownMode)(nil)).Elem() +} + +type ChangeLockdownModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mode HostLockdownMode `xml:"mode"` +} + +func init() { + t["ChangeLockdownModeRequestType"] = reflect.TypeOf((*ChangeLockdownModeRequestType)(nil)).Elem() +} + +type ChangeLockdownModeResponse struct { +} + +type ChangeNFSUserPassword ChangeNFSUserPasswordRequestType + +func init() { + t["ChangeNFSUserPassword"] = reflect.TypeOf((*ChangeNFSUserPassword)(nil)).Elem() +} + +type ChangeNFSUserPasswordRequestType struct { + This ManagedObjectReference `xml:"_this"` + Password string `xml:"password"` +} + +func init() { + t["ChangeNFSUserPasswordRequestType"] = reflect.TypeOf((*ChangeNFSUserPasswordRequestType)(nil)).Elem() +} + +type ChangeNFSUserPasswordResponse struct { +} + +type ChangeOwner ChangeOwnerRequestType + +func init() { + t["ChangeOwner"] = reflect.TypeOf((*ChangeOwner)(nil)).Elem() +} + +type ChangeOwnerRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Owner string `xml:"owner"` +} + +func init() { + t["ChangeOwnerRequestType"] = reflect.TypeOf((*ChangeOwnerRequestType)(nil)).Elem() +} + +type ChangeOwnerResponse struct { +} + +type CheckAddHostEvcRequestType struct { + This ManagedObjectReference `xml:"_this"` + CnxSpec HostConnectSpec `xml:"cnxSpec"` +} + +func init() { + t["CheckAddHostEvcRequestType"] = reflect.TypeOf((*CheckAddHostEvcRequestType)(nil)).Elem() +} + +type CheckAddHostEvc_Task CheckAddHostEvcRequestType + +func init() { + t["CheckAddHostEvc_Task"] = reflect.TypeOf((*CheckAddHostEvc_Task)(nil)).Elem() +} + +type CheckAddHostEvc_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckAnswerFileStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CheckAnswerFileStatusRequestType"] = reflect.TypeOf((*CheckAnswerFileStatusRequestType)(nil)).Elem() +} + +type CheckAnswerFileStatus_Task CheckAnswerFileStatusRequestType + +func init() { + t["CheckAnswerFileStatus_Task"] = reflect.TypeOf((*CheckAnswerFileStatus_Task)(nil)).Elem() +} + +type CheckAnswerFileStatus_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckCompatibilityRequestType"] = reflect.TypeOf((*CheckCompatibilityRequestType)(nil)).Elem() +} + +type CheckCompatibility_Task CheckCompatibilityRequestType + +func init() { + t["CheckCompatibility_Task"] = reflect.TypeOf((*CheckCompatibility_Task)(nil)).Elem() +} + +type CheckCompatibility_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckComplianceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile []ManagedObjectReference `xml:"profile,omitempty"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["CheckComplianceRequestType"] = reflect.TypeOf((*CheckComplianceRequestType)(nil)).Elem() +} + +type CheckCompliance_Task CheckComplianceRequestType + +func init() { + t["CheckCompliance_Task"] = reflect.TypeOf((*CheckCompliance_Task)(nil)).Elem() +} + +type CheckCompliance_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckConfigureEvcModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` +} + +func init() { + t["CheckConfigureEvcModeRequestType"] = reflect.TypeOf((*CheckConfigureEvcModeRequestType)(nil)).Elem() +} + +type CheckConfigureEvcMode_Task CheckConfigureEvcModeRequestType + +func init() { + t["CheckConfigureEvcMode_Task"] = reflect.TypeOf((*CheckConfigureEvcMode_Task)(nil)).Elem() +} + +type CheckConfigureEvcMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckCustomizationResources CheckCustomizationResourcesRequestType + +func init() { + t["CheckCustomizationResources"] = reflect.TypeOf((*CheckCustomizationResources)(nil)).Elem() +} + +type CheckCustomizationResourcesRequestType struct { + This ManagedObjectReference `xml:"_this"` + GuestOs string `xml:"guestOs"` +} + +func init() { + t["CheckCustomizationResourcesRequestType"] = reflect.TypeOf((*CheckCustomizationResourcesRequestType)(nil)).Elem() +} + +type CheckCustomizationResourcesResponse struct { +} + +type CheckCustomizationSpec CheckCustomizationSpecRequestType + +func init() { + t["CheckCustomizationSpec"] = reflect.TypeOf((*CheckCustomizationSpec)(nil)).Elem() +} + +type CheckCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec CustomizationSpec `xml:"spec"` +} + +func init() { + t["CheckCustomizationSpecRequestType"] = reflect.TypeOf((*CheckCustomizationSpecRequestType)(nil)).Elem() +} + +type CheckCustomizationSpecResponse struct { +} + +type CheckForUpdates CheckForUpdatesRequestType + +func init() { + t["CheckForUpdates"] = reflect.TypeOf((*CheckForUpdates)(nil)).Elem() +} + +type CheckForUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["CheckForUpdatesRequestType"] = reflect.TypeOf((*CheckForUpdatesRequestType)(nil)).Elem() +} + +type CheckForUpdatesResponse struct { + Returnval *UpdateSet `xml:"returnval,omitempty"` +} + +type CheckHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["CheckHostPatchRequestType"] = reflect.TypeOf((*CheckHostPatchRequestType)(nil)).Elem() +} + +type CheckHostPatch_Task CheckHostPatchRequestType + +func init() { + t["CheckHostPatch_Task"] = reflect.TypeOf((*CheckHostPatch_Task)(nil)).Elem() +} + +type CheckHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckLicenseFeature CheckLicenseFeatureRequestType + +func init() { + t["CheckLicenseFeature"] = reflect.TypeOf((*CheckLicenseFeature)(nil)).Elem() +} + +type CheckLicenseFeatureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey"` +} + +func init() { + t["CheckLicenseFeatureRequestType"] = reflect.TypeOf((*CheckLicenseFeatureRequestType)(nil)).Elem() +} + +type CheckLicenseFeatureResponse struct { + Returnval bool `xml:"returnval"` +} + +type CheckMigrateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + State VirtualMachinePowerState `xml:"state,omitempty"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckMigrateRequestType"] = reflect.TypeOf((*CheckMigrateRequestType)(nil)).Elem() +} + +type CheckMigrate_Task CheckMigrateRequestType + +func init() { + t["CheckMigrate_Task"] = reflect.TypeOf((*CheckMigrate_Task)(nil)).Elem() +} + +type CheckMigrate_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckProfileComplianceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["CheckProfileComplianceRequestType"] = reflect.TypeOf((*CheckProfileComplianceRequestType)(nil)).Elem() +} + +type CheckProfileCompliance_Task CheckProfileComplianceRequestType + +func init() { + t["CheckProfileCompliance_Task"] = reflect.TypeOf((*CheckProfileCompliance_Task)(nil)).Elem() +} + +type CheckProfileCompliance_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckRelocateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Spec VirtualMachineRelocateSpec `xml:"spec"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckRelocateRequestType"] = reflect.TypeOf((*CheckRelocateRequestType)(nil)).Elem() +} + +type CheckRelocate_Task CheckRelocateRequestType + +func init() { + t["CheckRelocate_Task"] = reflect.TypeOf((*CheckRelocate_Task)(nil)).Elem() +} + +type CheckRelocate_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckResult struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["CheckResult"] = reflect.TypeOf((*CheckResult)(nil)).Elem() +} + +type ChoiceOption struct { + OptionType + + ChoiceInfo []BaseElementDescription `xml:"choiceInfo,typeattr"` + DefaultIndex int `xml:"defaultIndex,omitempty"` +} + +func init() { + t["ChoiceOption"] = reflect.TypeOf((*ChoiceOption)(nil)).Elem() +} + +type ClearComplianceStatus ClearComplianceStatusRequestType + +func init() { + t["ClearComplianceStatus"] = reflect.TypeOf((*ClearComplianceStatus)(nil)).Elem() +} + +type ClearComplianceStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile []ManagedObjectReference `xml:"profile,omitempty"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["ClearComplianceStatusRequestType"] = reflect.TypeOf((*ClearComplianceStatusRequestType)(nil)).Elem() +} + +type ClearComplianceStatusResponse struct { +} + +type ClearNFSUser ClearNFSUserRequestType + +func init() { + t["ClearNFSUser"] = reflect.TypeOf((*ClearNFSUser)(nil)).Elem() +} + +type ClearNFSUserRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ClearNFSUserRequestType"] = reflect.TypeOf((*ClearNFSUserRequestType)(nil)).Elem() +} + +type ClearNFSUserResponse struct { +} + +type ClockSkew struct { + HostConfigFault +} + +func init() { + t["ClockSkew"] = reflect.TypeOf((*ClockSkew)(nil)).Elem() +} + +type ClockSkewFault ClockSkew + +func init() { + t["ClockSkewFault"] = reflect.TypeOf((*ClockSkewFault)(nil)).Elem() +} + +type CloneFromSnapshotNotSupported struct { + MigrationFault +} + +func init() { + t["CloneFromSnapshotNotSupported"] = reflect.TypeOf((*CloneFromSnapshotNotSupported)(nil)).Elem() +} + +type CloneFromSnapshotNotSupportedFault CloneFromSnapshotNotSupported + +func init() { + t["CloneFromSnapshotNotSupportedFault"] = reflect.TypeOf((*CloneFromSnapshotNotSupportedFault)(nil)).Elem() +} + +type CloneSession CloneSessionRequestType + +func init() { + t["CloneSession"] = reflect.TypeOf((*CloneSession)(nil)).Elem() +} + +type CloneSessionRequestType struct { + This ManagedObjectReference `xml:"_this"` + CloneTicket string `xml:"cloneTicket"` +} + +func init() { + t["CloneSessionRequestType"] = reflect.TypeOf((*CloneSessionRequestType)(nil)).Elem() +} + +type CloneSessionResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type CloneVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Target ManagedObjectReference `xml:"target"` + Spec VAppCloneSpec `xml:"spec"` +} + +func init() { + t["CloneVAppRequestType"] = reflect.TypeOf((*CloneVAppRequestType)(nil)).Elem() +} + +type CloneVApp_Task CloneVAppRequestType + +func init() { + t["CloneVApp_Task"] = reflect.TypeOf((*CloneVApp_Task)(nil)).Elem() +} + +type CloneVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CloneVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Folder ManagedObjectReference `xml:"folder"` + Name string `xml:"name"` + Spec VirtualMachineCloneSpec `xml:"spec"` +} + +func init() { + t["CloneVMRequestType"] = reflect.TypeOf((*CloneVMRequestType)(nil)).Elem() +} + +type CloneVM_Task CloneVMRequestType + +func init() { + t["CloneVM_Task"] = reflect.TypeOf((*CloneVM_Task)(nil)).Elem() +} + +type CloneVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CloseInventoryViewFolder CloseInventoryViewFolderRequestType + +func init() { + t["CloseInventoryViewFolder"] = reflect.TypeOf((*CloseInventoryViewFolder)(nil)).Elem() +} + +type CloseInventoryViewFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` +} + +func init() { + t["CloseInventoryViewFolderRequestType"] = reflect.TypeOf((*CloseInventoryViewFolderRequestType)(nil)).Elem() +} + +type CloseInventoryViewFolderResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type ClusterAction struct { + DynamicData + + Type string `xml:"type"` + Target *ManagedObjectReference `xml:"target,omitempty"` +} + +func init() { + t["ClusterAction"] = reflect.TypeOf((*ClusterAction)(nil)).Elem() +} + +type ClusterActionHistory struct { + DynamicData + + Action BaseClusterAction `xml:"action,typeattr"` + Time time.Time `xml:"time"` +} + +func init() { + t["ClusterActionHistory"] = reflect.TypeOf((*ClusterActionHistory)(nil)).Elem() +} + +type ClusterAffinityRuleSpec struct { + ClusterRuleInfo + + Vm []ManagedObjectReference `xml:"vm"` +} + +func init() { + t["ClusterAffinityRuleSpec"] = reflect.TypeOf((*ClusterAffinityRuleSpec)(nil)).Elem() +} + +type ClusterAntiAffinityRuleSpec struct { + ClusterRuleInfo + + Vm []ManagedObjectReference `xml:"vm"` +} + +func init() { + t["ClusterAntiAffinityRuleSpec"] = reflect.TypeOf((*ClusterAntiAffinityRuleSpec)(nil)).Elem() +} + +type ClusterAttemptedVmInfo struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Task *ManagedObjectReference `xml:"task,omitempty"` +} + +func init() { + t["ClusterAttemptedVmInfo"] = reflect.TypeOf((*ClusterAttemptedVmInfo)(nil)).Elem() +} + +type ClusterComplianceCheckedEvent struct { + ClusterEvent + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["ClusterComplianceCheckedEvent"] = reflect.TypeOf((*ClusterComplianceCheckedEvent)(nil)).Elem() +} + +type ClusterComputeResourceSummary struct { + ComputeResourceSummary + + CurrentFailoverLevel int `xml:"currentFailoverLevel"` + AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr"` + NumVmotions int `xml:"numVmotions"` + TargetBalance int `xml:"targetBalance,omitempty"` + CurrentBalance int `xml:"currentBalance,omitempty"` + UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr"` +} + +func init() { + t["ClusterComputeResourceSummary"] = reflect.TypeOf((*ClusterComputeResourceSummary)(nil)).Elem() +} + +type ClusterConfigInfo struct { + DynamicData + + DasConfig ClusterDasConfigInfo `xml:"dasConfig"` + DasVmConfig []ClusterDasVmConfigInfo `xml:"dasVmConfig,omitempty"` + DrsConfig ClusterDrsConfigInfo `xml:"drsConfig"` + DrsVmConfig []ClusterDrsVmConfigInfo `xml:"drsVmConfig,omitempty"` + Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` +} + +func init() { + t["ClusterConfigInfo"] = reflect.TypeOf((*ClusterConfigInfo)(nil)).Elem() +} + +type ClusterConfigInfoEx struct { + ComputeResourceConfigInfo + + DasConfig ClusterDasConfigInfo `xml:"dasConfig"` + DasVmConfig []ClusterDasVmConfigInfo `xml:"dasVmConfig,omitempty"` + DrsConfig ClusterDrsConfigInfo `xml:"drsConfig"` + DrsVmConfig []ClusterDrsVmConfigInfo `xml:"drsVmConfig,omitempty"` + Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` + DpmConfigInfo *ClusterDpmConfigInfo `xml:"dpmConfigInfo,omitempty"` + DpmHostConfig []ClusterDpmHostConfigInfo `xml:"dpmHostConfig,omitempty"` + VsanConfigInfo *VsanClusterConfigInfo `xml:"vsanConfigInfo,omitempty"` + VsanHostConfig []VsanHostConfigInfo `xml:"vsanHostConfig,omitempty"` + Group []BaseClusterGroupInfo `xml:"group,omitempty,typeattr"` +} + +func init() { + t["ClusterConfigInfoEx"] = reflect.TypeOf((*ClusterConfigInfoEx)(nil)).Elem() +} + +type ClusterConfigSpec struct { + DynamicData + + DasConfig *ClusterDasConfigInfo `xml:"dasConfig,omitempty"` + DasVmConfigSpec []ClusterDasVmConfigSpec `xml:"dasVmConfigSpec,omitempty"` + DrsConfig *ClusterDrsConfigInfo `xml:"drsConfig,omitempty"` + DrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:"drsVmConfigSpec,omitempty"` + RulesSpec []ClusterRuleSpec `xml:"rulesSpec,omitempty"` +} + +func init() { + t["ClusterConfigSpec"] = reflect.TypeOf((*ClusterConfigSpec)(nil)).Elem() +} + +type ClusterConfigSpecEx struct { + ComputeResourceConfigSpec + + DasConfig *ClusterDasConfigInfo `xml:"dasConfig,omitempty"` + DasVmConfigSpec []ClusterDasVmConfigSpec `xml:"dasVmConfigSpec,omitempty"` + DrsConfig *ClusterDrsConfigInfo `xml:"drsConfig,omitempty"` + DrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:"drsVmConfigSpec,omitempty"` + RulesSpec []ClusterRuleSpec `xml:"rulesSpec,omitempty"` + DpmConfig *ClusterDpmConfigInfo `xml:"dpmConfig,omitempty"` + DpmHostConfigSpec []ClusterDpmHostConfigSpec `xml:"dpmHostConfigSpec,omitempty"` + VsanConfig *VsanClusterConfigInfo `xml:"vsanConfig,omitempty"` + VsanHostConfigSpec []VsanHostConfigInfo `xml:"vsanHostConfigSpec,omitempty"` + GroupSpec []ClusterGroupSpec `xml:"groupSpec,omitempty"` +} + +func init() { + t["ClusterConfigSpecEx"] = reflect.TypeOf((*ClusterConfigSpecEx)(nil)).Elem() +} + +type ClusterCreatedEvent struct { + ClusterEvent + + Parent FolderEventArgument `xml:"parent"` +} + +func init() { + t["ClusterCreatedEvent"] = reflect.TypeOf((*ClusterCreatedEvent)(nil)).Elem() +} + +type ClusterDasAamHostInfo struct { + ClusterDasHostInfo + + HostDasState []ClusterDasAamNodeState `xml:"hostDasState,omitempty"` + PrimaryHosts []string `xml:"primaryHosts,omitempty"` +} + +func init() { + t["ClusterDasAamHostInfo"] = reflect.TypeOf((*ClusterDasAamHostInfo)(nil)).Elem() +} + +type ClusterDasAamNodeState struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Name string `xml:"name"` + ConfigState string `xml:"configState"` + RuntimeState string `xml:"runtimeState"` +} + +func init() { + t["ClusterDasAamNodeState"] = reflect.TypeOf((*ClusterDasAamNodeState)(nil)).Elem() +} + +type ClusterDasAdmissionControlInfo struct { + DynamicData +} + +func init() { + t["ClusterDasAdmissionControlInfo"] = reflect.TypeOf((*ClusterDasAdmissionControlInfo)(nil)).Elem() +} + +type ClusterDasAdmissionControlPolicy struct { + DynamicData +} + +func init() { + t["ClusterDasAdmissionControlPolicy"] = reflect.TypeOf((*ClusterDasAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterDasAdvancedRuntimeInfo struct { + DynamicData + + DasHostInfo BaseClusterDasHostInfo `xml:"dasHostInfo,omitempty,typeattr"` + VmcpSupported *ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo `xml:"vmcpSupported,omitempty"` + HeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo `xml:"heartbeatDatastoreInfo,omitempty"` +} + +func init() { + t["ClusterDasAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfo)(nil)).Elem() +} + +type ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo struct { + DynamicData + + StorageAPDSupported bool `xml:"storageAPDSupported"` + StoragePDLSupported bool `xml:"storagePDLSupported"` +} + +func init() { + t["ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo)(nil)).Elem() +} + +type ClusterDasConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + VmMonitoring string `xml:"vmMonitoring,omitempty"` + HostMonitoring string `xml:"hostMonitoring,omitempty"` + VmComponentProtecting string `xml:"vmComponentProtecting,omitempty"` + FailoverLevel int `xml:"failoverLevel,omitempty"` + AdmissionControlPolicy BaseClusterDasAdmissionControlPolicy `xml:"admissionControlPolicy,omitempty,typeattr"` + AdmissionControlEnabled *bool `xml:"admissionControlEnabled"` + DefaultVmSettings *ClusterDasVmSettings `xml:"defaultVmSettings,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` + HeartbeatDatastore []ManagedObjectReference `xml:"heartbeatDatastore,omitempty"` + HBDatastoreCandidatePolicy string `xml:"hBDatastoreCandidatePolicy,omitempty"` +} + +func init() { + t["ClusterDasConfigInfo"] = reflect.TypeOf((*ClusterDasConfigInfo)(nil)).Elem() +} + +type ClusterDasData struct { + DynamicData +} + +func init() { + t["ClusterDasData"] = reflect.TypeOf((*ClusterDasData)(nil)).Elem() +} + +type ClusterDasDataSummary struct { + ClusterDasData + + HostListVersion int64 `xml:"hostListVersion"` + ClusterConfigVersion int64 `xml:"clusterConfigVersion"` + CompatListVersion int64 `xml:"compatListVersion"` +} + +func init() { + t["ClusterDasDataSummary"] = reflect.TypeOf((*ClusterDasDataSummary)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfo struct { + ClusterDasAdvancedRuntimeInfo + + SlotInfo ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo `xml:"slotInfo"` + TotalSlots int `xml:"totalSlots"` + UsedSlots int `xml:"usedSlots"` + UnreservedSlots int `xml:"unreservedSlots"` + TotalVms int `xml:"totalVms"` + TotalHosts int `xml:"totalHosts"` + TotalGoodHosts int `xml:"totalGoodHosts"` + HostSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots `xml:"hostSlots,omitempty"` + VmsRequiringMultipleSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots `xml:"vmsRequiringMultipleSlots,omitempty"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfo)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Slots int `xml:"slots"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo struct { + DynamicData + + NumVcpus int `xml:"numVcpus"` + CpuMHz int `xml:"cpuMHz"` + MemoryMB int `xml:"memoryMB"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Slots int `xml:"slots"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots)(nil)).Elem() +} + +type ClusterDasFdmHostState struct { + DynamicData + + State string `xml:"state"` + StateReporter *ManagedObjectReference `xml:"stateReporter,omitempty"` +} + +func init() { + t["ClusterDasFdmHostState"] = reflect.TypeOf((*ClusterDasFdmHostState)(nil)).Elem() +} + +type ClusterDasHostInfo struct { + DynamicData +} + +func init() { + t["ClusterDasHostInfo"] = reflect.TypeOf((*ClusterDasHostInfo)(nil)).Elem() +} + +type ClusterDasHostRecommendation struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + DrsRating int `xml:"drsRating,omitempty"` +} + +func init() { + t["ClusterDasHostRecommendation"] = reflect.TypeOf((*ClusterDasHostRecommendation)(nil)).Elem() +} + +type ClusterDasVmConfigInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + RestartPriority DasVmPriority `xml:"restartPriority,omitempty"` + PowerOffOnIsolation *bool `xml:"powerOffOnIsolation"` + DasSettings *ClusterDasVmSettings `xml:"dasSettings,omitempty"` +} + +func init() { + t["ClusterDasVmConfigInfo"] = reflect.TypeOf((*ClusterDasVmConfigInfo)(nil)).Elem() +} + +type ClusterDasVmConfigSpec struct { + ArrayUpdateSpec + + Info *ClusterDasVmConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterDasVmConfigSpec"] = reflect.TypeOf((*ClusterDasVmConfigSpec)(nil)).Elem() +} + +type ClusterDasVmSettings struct { + DynamicData + + RestartPriority string `xml:"restartPriority,omitempty"` + IsolationResponse string `xml:"isolationResponse,omitempty"` + VmToolsMonitoringSettings *ClusterVmToolsMonitoringSettings `xml:"vmToolsMonitoringSettings,omitempty"` + VmComponentProtectionSettings *ClusterVmComponentProtectionSettings `xml:"vmComponentProtectionSettings,omitempty"` +} + +func init() { + t["ClusterDasVmSettings"] = reflect.TypeOf((*ClusterDasVmSettings)(nil)).Elem() +} + +type ClusterDestroyedEvent struct { + ClusterEvent +} + +func init() { + t["ClusterDestroyedEvent"] = reflect.TypeOf((*ClusterDestroyedEvent)(nil)).Elem() +} + +type ClusterDpmConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + DefaultDpmBehavior DpmBehavior `xml:"defaultDpmBehavior,omitempty"` + HostPowerActionRate int `xml:"hostPowerActionRate,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["ClusterDpmConfigInfo"] = reflect.TypeOf((*ClusterDpmConfigInfo)(nil)).Elem() +} + +type ClusterDpmHostConfigInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + Enabled *bool `xml:"enabled"` + Behavior DpmBehavior `xml:"behavior,omitempty"` +} + +func init() { + t["ClusterDpmHostConfigInfo"] = reflect.TypeOf((*ClusterDpmHostConfigInfo)(nil)).Elem() +} + +type ClusterDpmHostConfigSpec struct { + ArrayUpdateSpec + + Info *ClusterDpmHostConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterDpmHostConfigSpec"] = reflect.TypeOf((*ClusterDpmHostConfigSpec)(nil)).Elem() +} + +type ClusterDrsConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + EnableVmBehaviorOverrides *bool `xml:"enableVmBehaviorOverrides"` + DefaultVmBehavior DrsBehavior `xml:"defaultVmBehavior,omitempty"` + VmotionRate int `xml:"vmotionRate,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["ClusterDrsConfigInfo"] = reflect.TypeOf((*ClusterDrsConfigInfo)(nil)).Elem() +} + +type ClusterDrsFaults struct { + DynamicData + + Reason string `xml:"reason"` + FaultsByVm []BaseClusterDrsFaultsFaultsByVm `xml:"faultsByVm,typeattr"` +} + +func init() { + t["ClusterDrsFaults"] = reflect.TypeOf((*ClusterDrsFaults)(nil)).Elem() +} + +type ClusterDrsFaultsFaultsByVirtualDisk struct { + ClusterDrsFaultsFaultsByVm + + Disk *VirtualDiskId `xml:"disk,omitempty"` +} + +func init() { + t["ClusterDrsFaultsFaultsByVirtualDisk"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVirtualDisk)(nil)).Elem() +} + +type ClusterDrsFaultsFaultsByVm struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Fault []LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["ClusterDrsFaultsFaultsByVm"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVm)(nil)).Elem() +} + +type ClusterDrsMigration struct { + DynamicData + + Key string `xml:"key"` + Time time.Time `xml:"time"` + Vm ManagedObjectReference `xml:"vm"` + CpuLoad int `xml:"cpuLoad,omitempty"` + MemoryLoad int64 `xml:"memoryLoad,omitempty"` + Source ManagedObjectReference `xml:"source"` + SourceCpuLoad int `xml:"sourceCpuLoad,omitempty"` + SourceMemoryLoad int64 `xml:"sourceMemoryLoad,omitempty"` + Destination ManagedObjectReference `xml:"destination"` + DestinationCpuLoad int `xml:"destinationCpuLoad,omitempty"` + DestinationMemoryLoad int64 `xml:"destinationMemoryLoad,omitempty"` +} + +func init() { + t["ClusterDrsMigration"] = reflect.TypeOf((*ClusterDrsMigration)(nil)).Elem() +} + +type ClusterDrsRecommendation struct { + DynamicData + + Key string `xml:"key"` + Rating int `xml:"rating"` + Reason string `xml:"reason"` + ReasonText string `xml:"reasonText"` + MigrationList []ClusterDrsMigration `xml:"migrationList"` +} + +func init() { + t["ClusterDrsRecommendation"] = reflect.TypeOf((*ClusterDrsRecommendation)(nil)).Elem() +} + +type ClusterDrsVmConfigInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + Enabled *bool `xml:"enabled"` + Behavior DrsBehavior `xml:"behavior,omitempty"` +} + +func init() { + t["ClusterDrsVmConfigInfo"] = reflect.TypeOf((*ClusterDrsVmConfigInfo)(nil)).Elem() +} + +type ClusterDrsVmConfigSpec struct { + ArrayUpdateSpec + + Info *ClusterDrsVmConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterDrsVmConfigSpec"] = reflect.TypeOf((*ClusterDrsVmConfigSpec)(nil)).Elem() +} + +type ClusterEVCManagerCheckResult struct { + DynamicData + + EvcModeKey string `xml:"evcModeKey"` + Error LocalizedMethodFault `xml:"error"` + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ClusterEVCManagerCheckResult"] = reflect.TypeOf((*ClusterEVCManagerCheckResult)(nil)).Elem() +} + +type ClusterEVCManagerEVCState struct { + DynamicData + + SupportedEVCMode []EVCMode `xml:"supportedEVCMode"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + GuaranteedCPUFeatures []HostCpuIdInfo `xml:"guaranteedCPUFeatures,omitempty"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` +} + +func init() { + t["ClusterEVCManagerEVCState"] = reflect.TypeOf((*ClusterEVCManagerEVCState)(nil)).Elem() +} + +type ClusterEnterMaintenanceMode ClusterEnterMaintenanceModeRequestType + +func init() { + t["ClusterEnterMaintenanceMode"] = reflect.TypeOf((*ClusterEnterMaintenanceMode)(nil)).Elem() +} + +type ClusterEnterMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["ClusterEnterMaintenanceModeRequestType"] = reflect.TypeOf((*ClusterEnterMaintenanceModeRequestType)(nil)).Elem() +} + +type ClusterEnterMaintenanceModeResponse struct { + Returnval ClusterEnterMaintenanceResult `xml:"returnval"` +} + +type ClusterEnterMaintenanceResult struct { + DynamicData + + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` + Fault *ClusterDrsFaults `xml:"fault,omitempty"` +} + +func init() { + t["ClusterEnterMaintenanceResult"] = reflect.TypeOf((*ClusterEnterMaintenanceResult)(nil)).Elem() +} + +type ClusterEvent struct { + Event +} + +func init() { + t["ClusterEvent"] = reflect.TypeOf((*ClusterEvent)(nil)).Elem() +} + +type ClusterFailoverHostAdmissionControlInfo struct { + ClusterDasAdmissionControlInfo + + HostStatus []ClusterFailoverHostAdmissionControlInfoHostStatus `xml:"hostStatus,omitempty"` +} + +func init() { + t["ClusterFailoverHostAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfo)(nil)).Elem() +} + +type ClusterFailoverHostAdmissionControlInfoHostStatus struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Status ManagedEntityStatus `xml:"status"` +} + +func init() { + t["ClusterFailoverHostAdmissionControlInfoHostStatus"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfoHostStatus)(nil)).Elem() +} + +type ClusterFailoverHostAdmissionControlPolicy struct { + ClusterDasAdmissionControlPolicy + + FailoverHosts []ManagedObjectReference `xml:"failoverHosts,omitempty"` +} + +func init() { + t["ClusterFailoverHostAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterFailoverLevelAdmissionControlInfo struct { + ClusterDasAdmissionControlInfo + + CurrentFailoverLevel int `xml:"currentFailoverLevel"` +} + +func init() { + t["ClusterFailoverLevelAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlInfo)(nil)).Elem() +} + +type ClusterFailoverLevelAdmissionControlPolicy struct { + ClusterDasAdmissionControlPolicy + + FailoverLevel int `xml:"failoverLevel"` + SlotPolicy BaseClusterSlotPolicy `xml:"slotPolicy,omitempty,typeattr"` +} + +func init() { + t["ClusterFailoverLevelAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterFailoverResourcesAdmissionControlInfo struct { + ClusterDasAdmissionControlInfo + + CurrentCpuFailoverResourcesPercent int `xml:"currentCpuFailoverResourcesPercent"` + CurrentMemoryFailoverResourcesPercent int `xml:"currentMemoryFailoverResourcesPercent"` +} + +func init() { + t["ClusterFailoverResourcesAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlInfo)(nil)).Elem() +} + +type ClusterFailoverResourcesAdmissionControlPolicy struct { + ClusterDasAdmissionControlPolicy + + CpuFailoverResourcesPercent int `xml:"cpuFailoverResourcesPercent"` + MemoryFailoverResourcesPercent int `xml:"memoryFailoverResourcesPercent"` +} + +func init() { + t["ClusterFailoverResourcesAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterFixedSizeSlotPolicy struct { + ClusterSlotPolicy + + Cpu int `xml:"cpu"` + Memory int `xml:"memory"` +} + +func init() { + t["ClusterFixedSizeSlotPolicy"] = reflect.TypeOf((*ClusterFixedSizeSlotPolicy)(nil)).Elem() +} + +type ClusterGroupInfo struct { + DynamicData + + Name string `xml:"name"` + UserCreated *bool `xml:"userCreated"` + UniqueID string `xml:"uniqueID,omitempty"` +} + +func init() { + t["ClusterGroupInfo"] = reflect.TypeOf((*ClusterGroupInfo)(nil)).Elem() +} + +type ClusterGroupSpec struct { + ArrayUpdateSpec + + Info BaseClusterGroupInfo `xml:"info,omitempty,typeattr"` +} + +func init() { + t["ClusterGroupSpec"] = reflect.TypeOf((*ClusterGroupSpec)(nil)).Elem() +} + +type ClusterHostGroup struct { + ClusterGroupInfo + + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ClusterHostGroup"] = reflect.TypeOf((*ClusterHostGroup)(nil)).Elem() +} + +type ClusterHostPowerAction struct { + ClusterAction + + OperationType HostPowerOperationType `xml:"operationType"` + PowerConsumptionWatt int `xml:"powerConsumptionWatt,omitempty"` + CpuCapacityMHz int `xml:"cpuCapacityMHz,omitempty"` + MemCapacityMB int `xml:"memCapacityMB,omitempty"` +} + +func init() { + t["ClusterHostPowerAction"] = reflect.TypeOf((*ClusterHostPowerAction)(nil)).Elem() +} + +type ClusterHostRecommendation struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Rating int `xml:"rating"` +} + +func init() { + t["ClusterHostRecommendation"] = reflect.TypeOf((*ClusterHostRecommendation)(nil)).Elem() +} + +type ClusterInitialPlacementAction struct { + ClusterAction + + TargetHost ManagedObjectReference `xml:"targetHost"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` +} + +func init() { + t["ClusterInitialPlacementAction"] = reflect.TypeOf((*ClusterInitialPlacementAction)(nil)).Elem() +} + +type ClusterIoFilterInfo struct { + IoFilterInfo + + OpType string `xml:"opType"` +} + +func init() { + t["ClusterIoFilterInfo"] = reflect.TypeOf((*ClusterIoFilterInfo)(nil)).Elem() +} + +type ClusterMigrationAction struct { + ClusterAction + + DrsMigration *ClusterDrsMigration `xml:"drsMigration,omitempty"` +} + +func init() { + t["ClusterMigrationAction"] = reflect.TypeOf((*ClusterMigrationAction)(nil)).Elem() +} + +type ClusterNotAttemptedVmInfo struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["ClusterNotAttemptedVmInfo"] = reflect.TypeOf((*ClusterNotAttemptedVmInfo)(nil)).Elem() +} + +type ClusterOvercommittedEvent struct { + ClusterEvent +} + +func init() { + t["ClusterOvercommittedEvent"] = reflect.TypeOf((*ClusterOvercommittedEvent)(nil)).Elem() +} + +type ClusterPowerOnVmResult struct { + DynamicData + + Attempted []ClusterAttemptedVmInfo `xml:"attempted,omitempty"` + NotAttempted []ClusterNotAttemptedVmInfo `xml:"notAttempted,omitempty"` + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` +} + +func init() { + t["ClusterPowerOnVmResult"] = reflect.TypeOf((*ClusterPowerOnVmResult)(nil)).Elem() +} + +type ClusterProfileCompleteConfigSpec struct { + ClusterProfileConfigSpec + + ComplyProfile *ComplianceProfile `xml:"complyProfile,omitempty"` +} + +func init() { + t["ClusterProfileCompleteConfigSpec"] = reflect.TypeOf((*ClusterProfileCompleteConfigSpec)(nil)).Elem() +} + +type ClusterProfileConfigInfo struct { + ProfileConfigInfo + + ComplyProfile *ComplianceProfile `xml:"complyProfile,omitempty"` +} + +func init() { + t["ClusterProfileConfigInfo"] = reflect.TypeOf((*ClusterProfileConfigInfo)(nil)).Elem() +} + +type ClusterProfileConfigServiceCreateSpec struct { + ClusterProfileConfigSpec + + ServiceType []string `xml:"serviceType,omitempty"` +} + +func init() { + t["ClusterProfileConfigServiceCreateSpec"] = reflect.TypeOf((*ClusterProfileConfigServiceCreateSpec)(nil)).Elem() +} + +type ClusterProfileConfigSpec struct { + ClusterProfileCreateSpec +} + +func init() { + t["ClusterProfileConfigSpec"] = reflect.TypeOf((*ClusterProfileConfigSpec)(nil)).Elem() +} + +type ClusterProfileCreateSpec struct { + ProfileCreateSpec +} + +func init() { + t["ClusterProfileCreateSpec"] = reflect.TypeOf((*ClusterProfileCreateSpec)(nil)).Elem() +} + +type ClusterRecommendation struct { + DynamicData + + Key string `xml:"key"` + Type string `xml:"type"` + Time time.Time `xml:"time"` + Rating int `xml:"rating"` + Reason string `xml:"reason"` + ReasonText string `xml:"reasonText"` + WarningText string `xml:"warningText,omitempty"` + WarningDetails *LocalizableMessage `xml:"warningDetails,omitempty"` + Prerequisite []string `xml:"prerequisite,omitempty"` + Action []BaseClusterAction `xml:"action,omitempty,typeattr"` + Target *ManagedObjectReference `xml:"target,omitempty"` +} + +func init() { + t["ClusterRecommendation"] = reflect.TypeOf((*ClusterRecommendation)(nil)).Elem() +} + +type ClusterReconfiguredEvent struct { + ClusterEvent +} + +func init() { + t["ClusterReconfiguredEvent"] = reflect.TypeOf((*ClusterReconfiguredEvent)(nil)).Elem() +} + +type ClusterResourceUsageSummary struct { + DynamicData + + CpuUsedMHz int `xml:"cpuUsedMHz"` + CpuCapacityMHz int `xml:"cpuCapacityMHz"` + MemUsedMB int `xml:"memUsedMB"` + MemCapacityMB int `xml:"memCapacityMB"` + StorageUsedMB int64 `xml:"storageUsedMB"` + StorageCapacityMB int64 `xml:"storageCapacityMB"` +} + +func init() { + t["ClusterResourceUsageSummary"] = reflect.TypeOf((*ClusterResourceUsageSummary)(nil)).Elem() +} + +type ClusterRuleInfo struct { + DynamicData + + Key int `xml:"key,omitempty"` + Status ManagedEntityStatus `xml:"status,omitempty"` + Enabled *bool `xml:"enabled"` + Name string `xml:"name,omitempty"` + Mandatory *bool `xml:"mandatory"` + UserCreated *bool `xml:"userCreated"` + InCompliance *bool `xml:"inCompliance"` + RuleUuid string `xml:"ruleUuid,omitempty"` +} + +func init() { + t["ClusterRuleInfo"] = reflect.TypeOf((*ClusterRuleInfo)(nil)).Elem() +} + +type ClusterRuleSpec struct { + ArrayUpdateSpec + + Info BaseClusterRuleInfo `xml:"info,omitempty,typeattr"` +} + +func init() { + t["ClusterRuleSpec"] = reflect.TypeOf((*ClusterRuleSpec)(nil)).Elem() +} + +type ClusterSlotPolicy struct { + DynamicData +} + +func init() { + t["ClusterSlotPolicy"] = reflect.TypeOf((*ClusterSlotPolicy)(nil)).Elem() +} + +type ClusterStatusChangedEvent struct { + ClusterEvent + + OldStatus string `xml:"oldStatus"` + NewStatus string `xml:"newStatus"` +} + +func init() { + t["ClusterStatusChangedEvent"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem() +} + +type ClusterUsageSummary struct { + DynamicData + + TotalCpuCapacityMhz int `xml:"totalCpuCapacityMhz"` + TotalMemCapacityMB int `xml:"totalMemCapacityMB"` + CpuReservationMhz int `xml:"cpuReservationMhz"` + MemReservationMB int `xml:"memReservationMB"` + PoweredOffCpuReservationMhz int `xml:"poweredOffCpuReservationMhz,omitempty"` + PoweredOffMemReservationMB int `xml:"poweredOffMemReservationMB,omitempty"` + CpuDemandMhz int `xml:"cpuDemandMhz"` + MemDemandMB int `xml:"memDemandMB"` + StatsGenNumber int64 `xml:"statsGenNumber"` + CpuEntitledMhz int `xml:"cpuEntitledMhz"` + MemEntitledMB int `xml:"memEntitledMB"` + PoweredOffVmCount int `xml:"poweredOffVmCount"` + TotalVmCount int `xml:"totalVmCount"` +} + +func init() { + t["ClusterUsageSummary"] = reflect.TypeOf((*ClusterUsageSummary)(nil)).Elem() +} + +type ClusterVmComponentProtectionSettings struct { + DynamicData + + VmStorageProtectionForAPD string `xml:"vmStorageProtectionForAPD,omitempty"` + EnableAPDTimeoutForHosts *bool `xml:"enableAPDTimeoutForHosts"` + VmTerminateDelayForAPDSec int `xml:"vmTerminateDelayForAPDSec,omitempty"` + VmReactionOnAPDCleared string `xml:"vmReactionOnAPDCleared,omitempty"` + VmStorageProtectionForPDL string `xml:"vmStorageProtectionForPDL,omitempty"` +} + +func init() { + t["ClusterVmComponentProtectionSettings"] = reflect.TypeOf((*ClusterVmComponentProtectionSettings)(nil)).Elem() +} + +type ClusterVmGroup struct { + ClusterGroupInfo + + Vm []ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["ClusterVmGroup"] = reflect.TypeOf((*ClusterVmGroup)(nil)).Elem() +} + +type ClusterVmHostRuleInfo struct { + ClusterRuleInfo + + VmGroupName string `xml:"vmGroupName,omitempty"` + AffineHostGroupName string `xml:"affineHostGroupName,omitempty"` + AntiAffineHostGroupName string `xml:"antiAffineHostGroupName,omitempty"` +} + +func init() { + t["ClusterVmHostRuleInfo"] = reflect.TypeOf((*ClusterVmHostRuleInfo)(nil)).Elem() +} + +type ClusterVmToolsMonitoringSettings struct { + DynamicData + + Enabled *bool `xml:"enabled"` + VmMonitoring string `xml:"vmMonitoring,omitempty"` + ClusterSettings *bool `xml:"clusterSettings"` + FailureInterval int `xml:"failureInterval,omitempty"` + MinUpTime int `xml:"minUpTime,omitempty"` + MaxFailures int `xml:"maxFailures,omitempty"` + MaxFailureWindow int `xml:"maxFailureWindow,omitempty"` +} + +func init() { + t["ClusterVmToolsMonitoringSettings"] = reflect.TypeOf((*ClusterVmToolsMonitoringSettings)(nil)).Elem() +} + +type CollectorAddressUnset struct { + DvsFault +} + +func init() { + t["CollectorAddressUnset"] = reflect.TypeOf((*CollectorAddressUnset)(nil)).Elem() +} + +type CollectorAddressUnsetFault CollectorAddressUnset + +func init() { + t["CollectorAddressUnsetFault"] = reflect.TypeOf((*CollectorAddressUnsetFault)(nil)).Elem() +} + +type ComplianceFailure struct { + DynamicData + + FailureType string `xml:"failureType"` + Message LocalizableMessage `xml:"message"` + ExpressionName string `xml:"expressionName,omitempty"` +} + +func init() { + t["ComplianceFailure"] = reflect.TypeOf((*ComplianceFailure)(nil)).Elem() +} + +type ComplianceLocator struct { + DynamicData + + ExpressionName string `xml:"expressionName"` + ApplyPath ProfilePropertyPath `xml:"applyPath"` +} + +func init() { + t["ComplianceLocator"] = reflect.TypeOf((*ComplianceLocator)(nil)).Elem() +} + +type ComplianceProfile struct { + DynamicData + + Expression []BaseProfileExpression `xml:"expression,typeattr"` + RootExpression string `xml:"rootExpression"` +} + +func init() { + t["ComplianceProfile"] = reflect.TypeOf((*ComplianceProfile)(nil)).Elem() +} + +type ComplianceResult struct { + DynamicData + + Profile *ManagedObjectReference `xml:"profile,omitempty"` + ComplianceStatus string `xml:"complianceStatus"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + CheckTime *time.Time `xml:"checkTime"` + Failure []ComplianceFailure `xml:"failure,omitempty"` +} + +func init() { + t["ComplianceResult"] = reflect.TypeOf((*ComplianceResult)(nil)).Elem() +} + +type CompositePolicyOption struct { + PolicyOption + + Option []BasePolicyOption `xml:"option,omitempty,typeattr"` +} + +func init() { + t["CompositePolicyOption"] = reflect.TypeOf((*CompositePolicyOption)(nil)).Elem() +} + +type ComputeDiskPartitionInfo ComputeDiskPartitionInfoRequestType + +func init() { + t["ComputeDiskPartitionInfo"] = reflect.TypeOf((*ComputeDiskPartitionInfo)(nil)).Elem() +} + +type ComputeDiskPartitionInfoForResize ComputeDiskPartitionInfoForResizeRequestType + +func init() { + t["ComputeDiskPartitionInfoForResize"] = reflect.TypeOf((*ComputeDiskPartitionInfoForResize)(nil)).Elem() +} + +type ComputeDiskPartitionInfoForResizeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Partition HostScsiDiskPartition `xml:"partition"` + BlockRange HostDiskPartitionBlockRange `xml:"blockRange"` + PartitionFormat string `xml:"partitionFormat,omitempty"` +} + +func init() { + t["ComputeDiskPartitionInfoForResizeRequestType"] = reflect.TypeOf((*ComputeDiskPartitionInfoForResizeRequestType)(nil)).Elem() +} + +type ComputeDiskPartitionInfoForResizeResponse struct { + Returnval HostDiskPartitionInfo `xml:"returnval"` +} + +type ComputeDiskPartitionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath string `xml:"devicePath"` + Layout HostDiskPartitionLayout `xml:"layout"` + PartitionFormat string `xml:"partitionFormat,omitempty"` +} + +func init() { + t["ComputeDiskPartitionInfoRequestType"] = reflect.TypeOf((*ComputeDiskPartitionInfoRequestType)(nil)).Elem() +} + +type ComputeDiskPartitionInfoResponse struct { + Returnval HostDiskPartitionInfo `xml:"returnval"` +} + +type ComputeResourceConfigInfo struct { + DynamicData + + VmSwapPlacement string `xml:"vmSwapPlacement"` + SpbmEnabled *bool `xml:"spbmEnabled"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["ComputeResourceConfigInfo"] = reflect.TypeOf((*ComputeResourceConfigInfo)(nil)).Elem() +} + +type ComputeResourceConfigSpec struct { + DynamicData + + VmSwapPlacement string `xml:"vmSwapPlacement,omitempty"` + SpbmEnabled *bool `xml:"spbmEnabled"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["ComputeResourceConfigSpec"] = reflect.TypeOf((*ComputeResourceConfigSpec)(nil)).Elem() +} + +type ComputeResourceEventArgument struct { + EntityEventArgument + + ComputeResource ManagedObjectReference `xml:"computeResource"` +} + +func init() { + t["ComputeResourceEventArgument"] = reflect.TypeOf((*ComputeResourceEventArgument)(nil)).Elem() +} + +type ComputeResourceHostSPBMLicenseInfo struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + LicenseState ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState `xml:"licenseState"` +} + +func init() { + t["ComputeResourceHostSPBMLicenseInfo"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfo)(nil)).Elem() +} + +type ComputeResourceSummary struct { + DynamicData + + TotalCpu int `xml:"totalCpu"` + TotalMemory int64 `xml:"totalMemory"` + NumCpuCores int16 `xml:"numCpuCores"` + NumCpuThreads int16 `xml:"numCpuThreads"` + EffectiveCpu int `xml:"effectiveCpu"` + EffectiveMemory int64 `xml:"effectiveMemory"` + NumHosts int `xml:"numHosts"` + NumEffectiveHosts int `xml:"numEffectiveHosts"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` +} + +func init() { + t["ComputeResourceSummary"] = reflect.TypeOf((*ComputeResourceSummary)(nil)).Elem() +} + +type ConcurrentAccess struct { + VimFault +} + +func init() { + t["ConcurrentAccess"] = reflect.TypeOf((*ConcurrentAccess)(nil)).Elem() +} + +type ConcurrentAccessFault ConcurrentAccess + +func init() { + t["ConcurrentAccessFault"] = reflect.TypeOf((*ConcurrentAccessFault)(nil)).Elem() +} + +type ConfigTarget struct { + DynamicData + + NumCpus int `xml:"numCpus"` + NumCpuCores int `xml:"numCpuCores"` + NumNumaNodes int `xml:"numNumaNodes"` + SmcPresent *bool `xml:"smcPresent"` + Datastore []VirtualMachineDatastoreInfo `xml:"datastore,omitempty"` + Network []VirtualMachineNetworkInfo `xml:"network,omitempty"` + OpaqueNetwork []OpaqueNetworkTargetInfo `xml:"opaqueNetwork,omitempty"` + DistributedVirtualPortgroup []DistributedVirtualPortgroupInfo `xml:"distributedVirtualPortgroup,omitempty"` + DistributedVirtualSwitch []DistributedVirtualSwitchInfo `xml:"distributedVirtualSwitch,omitempty"` + CdRom []VirtualMachineCdromInfo `xml:"cdRom,omitempty"` + Serial []VirtualMachineSerialInfo `xml:"serial,omitempty"` + Parallel []VirtualMachineParallelInfo `xml:"parallel,omitempty"` + Sound []VirtualMachineSoundInfo `xml:"sound,omitempty"` + Usb []VirtualMachineUsbInfo `xml:"usb,omitempty"` + Floppy []VirtualMachineFloppyInfo `xml:"floppy,omitempty"` + LegacyNetworkInfo []VirtualMachineLegacyNetworkSwitchInfo `xml:"legacyNetworkInfo,omitempty"` + ScsiPassthrough []VirtualMachineScsiPassthroughInfo `xml:"scsiPassthrough,omitempty"` + ScsiDisk []VirtualMachineScsiDiskDeviceInfo `xml:"scsiDisk,omitempty"` + IdeDisk []VirtualMachineIdeDiskDeviceInfo `xml:"ideDisk,omitempty"` + MaxMemMBOptimalPerf int `xml:"maxMemMBOptimalPerf"` + ResourcePool *ResourcePoolRuntimeInfo `xml:"resourcePool,omitempty"` + AutoVmotion *bool `xml:"autoVmotion"` + PciPassthrough []BaseVirtualMachinePciPassthroughInfo `xml:"pciPassthrough,omitempty,typeattr"` + Sriov []VirtualMachineSriovInfo `xml:"sriov,omitempty"` + VFlashModule []VirtualMachineVFlashModuleInfo `xml:"vFlashModule,omitempty"` + SharedGpuPassthroughTypes []VirtualMachinePciSharedGpuPassthroughInfo `xml:"sharedGpuPassthroughTypes,omitempty"` +} + +func init() { + t["ConfigTarget"] = reflect.TypeOf((*ConfigTarget)(nil)).Elem() +} + +type ConfigureDatastoreIORMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec StorageIORMConfigSpec `xml:"spec"` +} + +func init() { + t["ConfigureDatastoreIORMRequestType"] = reflect.TypeOf((*ConfigureDatastoreIORMRequestType)(nil)).Elem() +} + +type ConfigureDatastoreIORM_Task ConfigureDatastoreIORMRequestType + +func init() { + t["ConfigureDatastoreIORM_Task"] = reflect.TypeOf((*ConfigureDatastoreIORM_Task)(nil)).Elem() +} + +type ConfigureDatastoreIORM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureDatastorePrincipal ConfigureDatastorePrincipalRequestType + +func init() { + t["ConfigureDatastorePrincipal"] = reflect.TypeOf((*ConfigureDatastorePrincipal)(nil)).Elem() +} + +type ConfigureDatastorePrincipalRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` + Password string `xml:"password,omitempty"` +} + +func init() { + t["ConfigureDatastorePrincipalRequestType"] = reflect.TypeOf((*ConfigureDatastorePrincipalRequestType)(nil)).Elem() +} + +type ConfigureDatastorePrincipalResponse struct { +} + +type ConfigureEvcModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` +} + +func init() { + t["ConfigureEvcModeRequestType"] = reflect.TypeOf((*ConfigureEvcModeRequestType)(nil)).Elem() +} + +type ConfigureEvcMode_Task ConfigureEvcModeRequestType + +func init() { + t["ConfigureEvcMode_Task"] = reflect.TypeOf((*ConfigureEvcMode_Task)(nil)).Elem() +} + +type ConfigureEvcMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureHostCacheRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostCacheConfigurationSpec `xml:"spec"` +} + +func init() { + t["ConfigureHostCacheRequestType"] = reflect.TypeOf((*ConfigureHostCacheRequestType)(nil)).Elem() +} + +type ConfigureHostCache_Task ConfigureHostCacheRequestType + +func init() { + t["ConfigureHostCache_Task"] = reflect.TypeOf((*ConfigureHostCache_Task)(nil)).Elem() +} + +type ConfigureHostCache_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureLicenseSource ConfigureLicenseSourceRequestType + +func init() { + t["ConfigureLicenseSource"] = reflect.TypeOf((*ConfigureLicenseSource)(nil)).Elem() +} + +type ConfigureLicenseSourceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + LicenseSource BaseLicenseSource `xml:"licenseSource,typeattr"` +} + +func init() { + t["ConfigureLicenseSourceRequestType"] = reflect.TypeOf((*ConfigureLicenseSourceRequestType)(nil)).Elem() +} + +type ConfigureLicenseSourceResponse struct { +} + +type ConfigurePowerPolicy ConfigurePowerPolicyRequestType + +func init() { + t["ConfigurePowerPolicy"] = reflect.TypeOf((*ConfigurePowerPolicy)(nil)).Elem() +} + +type ConfigurePowerPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key int `xml:"key"` +} + +func init() { + t["ConfigurePowerPolicyRequestType"] = reflect.TypeOf((*ConfigurePowerPolicyRequestType)(nil)).Elem() +} + +type ConfigurePowerPolicyResponse struct { +} + +type ConfigureStorageDrsForPodRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` + Spec StorageDrsConfigSpec `xml:"spec"` + Modify bool `xml:"modify"` +} + +func init() { + t["ConfigureStorageDrsForPodRequestType"] = reflect.TypeOf((*ConfigureStorageDrsForPodRequestType)(nil)).Elem() +} + +type ConfigureStorageDrsForPod_Task ConfigureStorageDrsForPodRequestType + +func init() { + t["ConfigureStorageDrsForPod_Task"] = reflect.TypeOf((*ConfigureStorageDrsForPod_Task)(nil)).Elem() +} + +type ConfigureStorageDrsForPod_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureVFlashResourceExRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath []string `xml:"devicePath,omitempty"` +} + +func init() { + t["ConfigureVFlashResourceExRequestType"] = reflect.TypeOf((*ConfigureVFlashResourceExRequestType)(nil)).Elem() +} + +type ConfigureVFlashResourceEx_Task ConfigureVFlashResourceExRequestType + +func init() { + t["ConfigureVFlashResourceEx_Task"] = reflect.TypeOf((*ConfigureVFlashResourceEx_Task)(nil)).Elem() +} + +type ConfigureVFlashResourceEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConflictingConfiguration struct { + DvsFault + + ConfigInConflict []ConflictingConfigurationConfig `xml:"configInConflict"` +} + +func init() { + t["ConflictingConfiguration"] = reflect.TypeOf((*ConflictingConfiguration)(nil)).Elem() +} + +type ConflictingConfigurationConfig struct { + DynamicData + + Entity *ManagedObjectReference `xml:"entity,omitempty"` + PropertyPath string `xml:"propertyPath"` +} + +func init() { + t["ConflictingConfigurationConfig"] = reflect.TypeOf((*ConflictingConfigurationConfig)(nil)).Elem() +} + +type ConflictingConfigurationFault ConflictingConfiguration + +func init() { + t["ConflictingConfigurationFault"] = reflect.TypeOf((*ConflictingConfigurationFault)(nil)).Elem() +} + +type ConflictingDatastoreFound struct { + RuntimeFault + + Name string `xml:"name"` + Url string `xml:"url"` +} + +func init() { + t["ConflictingDatastoreFound"] = reflect.TypeOf((*ConflictingDatastoreFound)(nil)).Elem() +} + +type ConflictingDatastoreFoundFault ConflictingDatastoreFound + +func init() { + t["ConflictingDatastoreFoundFault"] = reflect.TypeOf((*ConflictingDatastoreFoundFault)(nil)).Elem() +} + +type ConnectedIso struct { + OvfExport + + Cdrom VirtualCdrom `xml:"cdrom"` + Filename string `xml:"filename"` +} + +func init() { + t["ConnectedIso"] = reflect.TypeOf((*ConnectedIso)(nil)).Elem() +} + +type ConnectedIsoFault ConnectedIso + +func init() { + t["ConnectedIsoFault"] = reflect.TypeOf((*ConnectedIsoFault)(nil)).Elem() +} + +type ConsolidateVMDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ConsolidateVMDisksRequestType"] = reflect.TypeOf((*ConsolidateVMDisksRequestType)(nil)).Elem() +} + +type ConsolidateVMDisks_Task ConsolidateVMDisksRequestType + +func init() { + t["ConsolidateVMDisks_Task"] = reflect.TypeOf((*ConsolidateVMDisks_Task)(nil)).Elem() +} + +type ConsolidateVMDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ContinueRetrievePropertiesEx ContinueRetrievePropertiesExRequestType + +func init() { + t["ContinueRetrievePropertiesEx"] = reflect.TypeOf((*ContinueRetrievePropertiesEx)(nil)).Elem() +} + +type ContinueRetrievePropertiesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Token string `xml:"token"` +} + +func init() { + t["ContinueRetrievePropertiesExRequestType"] = reflect.TypeOf((*ContinueRetrievePropertiesExRequestType)(nil)).Elem() +} + +type ContinueRetrievePropertiesExResponse struct { + Returnval RetrieveResult `xml:"returnval"` +} + +type CopyDatastoreFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestinationName string `xml:"destinationName"` + DestinationDatacenter *ManagedObjectReference `xml:"destinationDatacenter,omitempty"` + Force *bool `xml:"force"` +} + +func init() { + t["CopyDatastoreFileRequestType"] = reflect.TypeOf((*CopyDatastoreFileRequestType)(nil)).Elem() +} + +type CopyDatastoreFile_Task CopyDatastoreFileRequestType + +func init() { + t["CopyDatastoreFile_Task"] = reflect.TypeOf((*CopyDatastoreFile_Task)(nil)).Elem() +} + +type CopyDatastoreFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CopyVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestName string `xml:"destName"` + DestDatacenter *ManagedObjectReference `xml:"destDatacenter,omitempty"` + DestSpec BaseVirtualDiskSpec `xml:"destSpec,omitempty,typeattr"` + Force *bool `xml:"force"` +} + +func init() { + t["CopyVirtualDiskRequestType"] = reflect.TypeOf((*CopyVirtualDiskRequestType)(nil)).Elem() +} + +type CopyVirtualDisk_Task CopyVirtualDiskRequestType + +func init() { + t["CopyVirtualDisk_Task"] = reflect.TypeOf((*CopyVirtualDisk_Task)(nil)).Elem() +} + +type CopyVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CpuCompatibilityUnknown struct { + CpuIncompatible +} + +func init() { + t["CpuCompatibilityUnknown"] = reflect.TypeOf((*CpuCompatibilityUnknown)(nil)).Elem() +} + +type CpuCompatibilityUnknownFault CpuCompatibilityUnknown + +func init() { + t["CpuCompatibilityUnknownFault"] = reflect.TypeOf((*CpuCompatibilityUnknownFault)(nil)).Elem() +} + +type CpuHotPlugNotSupported struct { + VmConfigFault +} + +func init() { + t["CpuHotPlugNotSupported"] = reflect.TypeOf((*CpuHotPlugNotSupported)(nil)).Elem() +} + +type CpuHotPlugNotSupportedFault CpuHotPlugNotSupported + +func init() { + t["CpuHotPlugNotSupportedFault"] = reflect.TypeOf((*CpuHotPlugNotSupportedFault)(nil)).Elem() +} + +type CpuIncompatible struct { + VirtualHardwareCompatibilityIssue + + Level int `xml:"level"` + RegisterName string `xml:"registerName"` + RegisterBits string `xml:"registerBits,omitempty"` + DesiredBits string `xml:"desiredBits,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CpuIncompatible"] = reflect.TypeOf((*CpuIncompatible)(nil)).Elem() +} + +type CpuIncompatible1ECX struct { + CpuIncompatible + + Sse3 bool `xml:"sse3"` + Pclmulqdq *bool `xml:"pclmulqdq"` + Ssse3 bool `xml:"ssse3"` + Sse41 bool `xml:"sse41"` + Sse42 bool `xml:"sse42"` + Aes *bool `xml:"aes"` + Other bool `xml:"other"` + OtherOnly bool `xml:"otherOnly"` +} + +func init() { + t["CpuIncompatible1ECX"] = reflect.TypeOf((*CpuIncompatible1ECX)(nil)).Elem() +} + +type CpuIncompatible1ECXFault CpuIncompatible1ECX + +func init() { + t["CpuIncompatible1ECXFault"] = reflect.TypeOf((*CpuIncompatible1ECXFault)(nil)).Elem() +} + +type CpuIncompatible81EDX struct { + CpuIncompatible + + Nx bool `xml:"nx"` + Ffxsr bool `xml:"ffxsr"` + Rdtscp bool `xml:"rdtscp"` + Lm bool `xml:"lm"` + Other bool `xml:"other"` + OtherOnly bool `xml:"otherOnly"` +} + +func init() { + t["CpuIncompatible81EDX"] = reflect.TypeOf((*CpuIncompatible81EDX)(nil)).Elem() +} + +type CpuIncompatible81EDXFault CpuIncompatible81EDX + +func init() { + t["CpuIncompatible81EDXFault"] = reflect.TypeOf((*CpuIncompatible81EDXFault)(nil)).Elem() +} + +type CpuIncompatibleFault BaseCpuIncompatible + +func init() { + t["CpuIncompatibleFault"] = reflect.TypeOf((*CpuIncompatibleFault)(nil)).Elem() +} + +type CreateAlarm CreateAlarmRequestType + +func init() { + t["CreateAlarm"] = reflect.TypeOf((*CreateAlarm)(nil)).Elem() +} + +type CreateAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Spec BaseAlarmSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateAlarmRequestType"] = reflect.TypeOf((*CreateAlarmRequestType)(nil)).Elem() +} + +type CreateAlarmResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateChildVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config VirtualMachineConfigSpec `xml:"config"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CreateChildVMRequestType"] = reflect.TypeOf((*CreateChildVMRequestType)(nil)).Elem() +} + +type CreateChildVM_Task CreateChildVMRequestType + +func init() { + t["CreateChildVM_Task"] = reflect.TypeOf((*CreateChildVM_Task)(nil)).Elem() +} + +type CreateChildVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCluster CreateClusterRequestType + +func init() { + t["CreateCluster"] = reflect.TypeOf((*CreateCluster)(nil)).Elem() +} + +type CreateClusterEx CreateClusterExRequestType + +func init() { + t["CreateClusterEx"] = reflect.TypeOf((*CreateClusterEx)(nil)).Elem() +} + +type CreateClusterExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Spec ClusterConfigSpecEx `xml:"spec"` +} + +func init() { + t["CreateClusterExRequestType"] = reflect.TypeOf((*CreateClusterExRequestType)(nil)).Elem() +} + +type CreateClusterExResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Spec ClusterConfigSpec `xml:"spec"` +} + +func init() { + t["CreateClusterRequestType"] = reflect.TypeOf((*CreateClusterRequestType)(nil)).Elem() +} + +type CreateClusterResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCollectorForEvents CreateCollectorForEventsRequestType + +func init() { + t["CreateCollectorForEvents"] = reflect.TypeOf((*CreateCollectorForEvents)(nil)).Elem() +} + +type CreateCollectorForEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Filter EventFilterSpec `xml:"filter"` +} + +func init() { + t["CreateCollectorForEventsRequestType"] = reflect.TypeOf((*CreateCollectorForEventsRequestType)(nil)).Elem() +} + +type CreateCollectorForEventsResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCollectorForTasks CreateCollectorForTasksRequestType + +func init() { + t["CreateCollectorForTasks"] = reflect.TypeOf((*CreateCollectorForTasks)(nil)).Elem() +} + +type CreateCollectorForTasksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Filter TaskFilterSpec `xml:"filter"` +} + +func init() { + t["CreateCollectorForTasksRequestType"] = reflect.TypeOf((*CreateCollectorForTasksRequestType)(nil)).Elem() +} + +type CreateCollectorForTasksResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateContainerView CreateContainerViewRequestType + +func init() { + t["CreateContainerView"] = reflect.TypeOf((*CreateContainerView)(nil)).Elem() +} + +type CreateContainerViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Container ManagedObjectReference `xml:"container"` + Type []string `xml:"type,omitempty"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["CreateContainerViewRequestType"] = reflect.TypeOf((*CreateContainerViewRequestType)(nil)).Elem() +} + +type CreateContainerViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCustomizationSpec CreateCustomizationSpecRequestType + +func init() { + t["CreateCustomizationSpec"] = reflect.TypeOf((*CreateCustomizationSpec)(nil)).Elem() +} + +type CreateCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Item CustomizationSpecItem `xml:"item"` +} + +func init() { + t["CreateCustomizationSpecRequestType"] = reflect.TypeOf((*CreateCustomizationSpecRequestType)(nil)).Elem() +} + +type CreateCustomizationSpecResponse struct { +} + +type CreateDVPortgroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DVPortgroupConfigSpec `xml:"spec"` +} + +func init() { + t["CreateDVPortgroupRequestType"] = reflect.TypeOf((*CreateDVPortgroupRequestType)(nil)).Elem() +} + +type CreateDVPortgroup_Task CreateDVPortgroupRequestType + +func init() { + t["CreateDVPortgroup_Task"] = reflect.TypeOf((*CreateDVPortgroup_Task)(nil)).Elem() +} + +type CreateDVPortgroup_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateDVSRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DVSCreateSpec `xml:"spec"` +} + +func init() { + t["CreateDVSRequestType"] = reflect.TypeOf((*CreateDVSRequestType)(nil)).Elem() +} + +type CreateDVS_Task CreateDVSRequestType + +func init() { + t["CreateDVS_Task"] = reflect.TypeOf((*CreateDVS_Task)(nil)).Elem() +} + +type CreateDVS_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateDatacenter CreateDatacenterRequestType + +func init() { + t["CreateDatacenter"] = reflect.TypeOf((*CreateDatacenter)(nil)).Elem() +} + +type CreateDatacenterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["CreateDatacenterRequestType"] = reflect.TypeOf((*CreateDatacenterRequestType)(nil)).Elem() +} + +type CreateDatacenterResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateDefaultProfile CreateDefaultProfileRequestType + +func init() { + t["CreateDefaultProfile"] = reflect.TypeOf((*CreateDefaultProfile)(nil)).Elem() +} + +type CreateDefaultProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProfileType string `xml:"profileType"` + ProfileTypeName string `xml:"profileTypeName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["CreateDefaultProfileRequestType"] = reflect.TypeOf((*CreateDefaultProfileRequestType)(nil)).Elem() +} + +type CreateDefaultProfileResponse struct { + Returnval BaseApplyProfile `xml:"returnval,typeattr"` +} + +type CreateDescriptor CreateDescriptorRequestType + +func init() { + t["CreateDescriptor"] = reflect.TypeOf((*CreateDescriptor)(nil)).Elem() +} + +type CreateDescriptorRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj ManagedObjectReference `xml:"obj"` + Cdp OvfCreateDescriptorParams `xml:"cdp"` +} + +func init() { + t["CreateDescriptorRequestType"] = reflect.TypeOf((*CreateDescriptorRequestType)(nil)).Elem() +} + +type CreateDescriptorResponse struct { + Returnval OvfCreateDescriptorResult `xml:"returnval"` +} + +type CreateDiagnosticPartition CreateDiagnosticPartitionRequestType + +func init() { + t["CreateDiagnosticPartition"] = reflect.TypeOf((*CreateDiagnosticPartition)(nil)).Elem() +} + +type CreateDiagnosticPartitionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostDiagnosticPartitionCreateSpec `xml:"spec"` +} + +func init() { + t["CreateDiagnosticPartitionRequestType"] = reflect.TypeOf((*CreateDiagnosticPartitionRequestType)(nil)).Elem() +} + +type CreateDiagnosticPartitionResponse struct { +} + +type CreateDirectory CreateDirectoryRequestType + +func init() { + t["CreateDirectory"] = reflect.TypeOf((*CreateDirectory)(nil)).Elem() +} + +type CreateDirectoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + DisplayName string `xml:"displayName,omitempty"` + Policy string `xml:"policy,omitempty"` +} + +func init() { + t["CreateDirectoryRequestType"] = reflect.TypeOf((*CreateDirectoryRequestType)(nil)).Elem() +} + +type CreateDirectoryResponse struct { + Returnval string `xml:"returnval"` +} + +type CreateFilter CreateFilterRequestType + +func init() { + t["CreateFilter"] = reflect.TypeOf((*CreateFilter)(nil)).Elem() +} + +type CreateFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec PropertyFilterSpec `xml:"spec"` + PartialUpdates bool `xml:"partialUpdates"` +} + +func init() { + t["CreateFilterRequestType"] = reflect.TypeOf((*CreateFilterRequestType)(nil)).Elem() +} + +type CreateFilterResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateFolder CreateFolderRequestType + +func init() { + t["CreateFolder"] = reflect.TypeOf((*CreateFolder)(nil)).Elem() +} + +type CreateFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["CreateFolderRequestType"] = reflect.TypeOf((*CreateFolderRequestType)(nil)).Elem() +} + +type CreateFolderResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateGroup CreateGroupRequestType + +func init() { + t["CreateGroup"] = reflect.TypeOf((*CreateGroup)(nil)).Elem() +} + +type CreateGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Group BaseHostAccountSpec `xml:"group,typeattr"` +} + +func init() { + t["CreateGroupRequestType"] = reflect.TypeOf((*CreateGroupRequestType)(nil)).Elem() +} + +type CreateGroupResponse struct { +} + +type CreateImportSpec CreateImportSpecRequestType + +func init() { + t["CreateImportSpec"] = reflect.TypeOf((*CreateImportSpec)(nil)).Elem() +} + +type CreateImportSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + OvfDescriptor string `xml:"ovfDescriptor"` + ResourcePool ManagedObjectReference `xml:"resourcePool"` + Datastore ManagedObjectReference `xml:"datastore"` + Cisp OvfCreateImportSpecParams `xml:"cisp"` +} + +func init() { + t["CreateImportSpecRequestType"] = reflect.TypeOf((*CreateImportSpecRequestType)(nil)).Elem() +} + +type CreateImportSpecResponse struct { + Returnval OvfCreateImportSpecResult `xml:"returnval"` +} + +type CreateInventoryView CreateInventoryViewRequestType + +func init() { + t["CreateInventoryView"] = reflect.TypeOf((*CreateInventoryView)(nil)).Elem() +} + +type CreateInventoryViewRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CreateInventoryViewRequestType"] = reflect.TypeOf((*CreateInventoryViewRequestType)(nil)).Elem() +} + +type CreateInventoryViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateIpPool CreateIpPoolRequestType + +func init() { + t["CreateIpPool"] = reflect.TypeOf((*CreateIpPool)(nil)).Elem() +} + +type CreateIpPoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + Pool IpPool `xml:"pool"` +} + +func init() { + t["CreateIpPoolRequestType"] = reflect.TypeOf((*CreateIpPoolRequestType)(nil)).Elem() +} + +type CreateIpPoolResponse struct { + Returnval int `xml:"returnval"` +} + +type CreateListView CreateListViewRequestType + +func init() { + t["CreateListView"] = reflect.TypeOf((*CreateListView)(nil)).Elem() +} + +type CreateListViewFromView CreateListViewFromViewRequestType + +func init() { + t["CreateListViewFromView"] = reflect.TypeOf((*CreateListViewFromView)(nil)).Elem() +} + +type CreateListViewFromViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + View ManagedObjectReference `xml:"view"` +} + +func init() { + t["CreateListViewFromViewRequestType"] = reflect.TypeOf((*CreateListViewFromViewRequestType)(nil)).Elem() +} + +type CreateListViewFromViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateListViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj []ManagedObjectReference `xml:"obj,omitempty"` +} + +func init() { + t["CreateListViewRequestType"] = reflect.TypeOf((*CreateListViewRequestType)(nil)).Elem() +} + +type CreateListViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateLocalDatastore CreateLocalDatastoreRequestType + +func init() { + t["CreateLocalDatastore"] = reflect.TypeOf((*CreateLocalDatastore)(nil)).Elem() +} + +type CreateLocalDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Path string `xml:"path"` +} + +func init() { + t["CreateLocalDatastoreRequestType"] = reflect.TypeOf((*CreateLocalDatastoreRequestType)(nil)).Elem() +} + +type CreateLocalDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateNasDatastore CreateNasDatastoreRequestType + +func init() { + t["CreateNasDatastore"] = reflect.TypeOf((*CreateNasDatastore)(nil)).Elem() +} + +type CreateNasDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostNasVolumeSpec `xml:"spec"` +} + +func init() { + t["CreateNasDatastoreRequestType"] = reflect.TypeOf((*CreateNasDatastoreRequestType)(nil)).Elem() +} + +type CreateNasDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateObjectScheduledTask CreateObjectScheduledTaskRequestType + +func init() { + t["CreateObjectScheduledTask"] = reflect.TypeOf((*CreateObjectScheduledTask)(nil)).Elem() +} + +type CreateObjectScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj ManagedObjectReference `xml:"obj"` + Spec BaseScheduledTaskSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateObjectScheduledTaskRequestType"] = reflect.TypeOf((*CreateObjectScheduledTaskRequestType)(nil)).Elem() +} + +type CreateObjectScheduledTaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreatePerfInterval CreatePerfIntervalRequestType + +func init() { + t["CreatePerfInterval"] = reflect.TypeOf((*CreatePerfInterval)(nil)).Elem() +} + +type CreatePerfIntervalRequestType struct { + This ManagedObjectReference `xml:"_this"` + IntervalId PerfInterval `xml:"intervalId"` +} + +func init() { + t["CreatePerfIntervalRequestType"] = reflect.TypeOf((*CreatePerfIntervalRequestType)(nil)).Elem() +} + +type CreatePerfIntervalResponse struct { +} + +type CreateProfile CreateProfileRequestType + +func init() { + t["CreateProfile"] = reflect.TypeOf((*CreateProfile)(nil)).Elem() +} + +type CreateProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec BaseProfileCreateSpec `xml:"createSpec,typeattr"` +} + +func init() { + t["CreateProfileRequestType"] = reflect.TypeOf((*CreateProfileRequestType)(nil)).Elem() +} + +type CreateProfileResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreatePropertyCollector CreatePropertyCollectorRequestType + +func init() { + t["CreatePropertyCollector"] = reflect.TypeOf((*CreatePropertyCollector)(nil)).Elem() +} + +type CreatePropertyCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CreatePropertyCollectorRequestType"] = reflect.TypeOf((*CreatePropertyCollectorRequestType)(nil)).Elem() +} + +type CreatePropertyCollectorResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateRegistryKeyInGuest CreateRegistryKeyInGuestRequestType + +func init() { + t["CreateRegistryKeyInGuest"] = reflect.TypeOf((*CreateRegistryKeyInGuest)(nil)).Elem() +} + +type CreateRegistryKeyInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + IsVolatile bool `xml:"isVolatile"` + ClassType string `xml:"classType,omitempty"` +} + +func init() { + t["CreateRegistryKeyInGuestRequestType"] = reflect.TypeOf((*CreateRegistryKeyInGuestRequestType)(nil)).Elem() +} + +type CreateRegistryKeyInGuestResponse struct { +} + +type CreateResourcePool CreateResourcePoolRequestType + +func init() { + t["CreateResourcePool"] = reflect.TypeOf((*CreateResourcePool)(nil)).Elem() +} + +type CreateResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Spec ResourceConfigSpec `xml:"spec"` +} + +func init() { + t["CreateResourcePoolRequestType"] = reflect.TypeOf((*CreateResourcePoolRequestType)(nil)).Elem() +} + +type CreateResourcePoolResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateScheduledTask CreateScheduledTaskRequestType + +func init() { + t["CreateScheduledTask"] = reflect.TypeOf((*CreateScheduledTask)(nil)).Elem() +} + +type CreateScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Spec BaseScheduledTaskSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateScheduledTaskRequestType"] = reflect.TypeOf((*CreateScheduledTaskRequestType)(nil)).Elem() +} + +type CreateScheduledTaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateScreenshotRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CreateScreenshotRequestType"] = reflect.TypeOf((*CreateScreenshotRequestType)(nil)).Elem() +} + +type CreateScreenshot_Task CreateScreenshotRequestType + +func init() { + t["CreateScreenshot_Task"] = reflect.TypeOf((*CreateScreenshot_Task)(nil)).Elem() +} + +type CreateScreenshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateSecondaryVMExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Spec *FaultToleranceConfigSpec `xml:"spec,omitempty"` +} + +func init() { + t["CreateSecondaryVMExRequestType"] = reflect.TypeOf((*CreateSecondaryVMExRequestType)(nil)).Elem() +} + +type CreateSecondaryVMEx_Task CreateSecondaryVMExRequestType + +func init() { + t["CreateSecondaryVMEx_Task"] = reflect.TypeOf((*CreateSecondaryVMEx_Task)(nil)).Elem() +} + +type CreateSecondaryVMEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateSecondaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CreateSecondaryVMRequestType"] = reflect.TypeOf((*CreateSecondaryVMRequestType)(nil)).Elem() +} + +type CreateSecondaryVM_Task CreateSecondaryVMRequestType + +func init() { + t["CreateSecondaryVM_Task"] = reflect.TypeOf((*CreateSecondaryVM_Task)(nil)).Elem() +} + +type CreateSecondaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Description string `xml:"description,omitempty"` + Memory bool `xml:"memory"` + Quiesce bool `xml:"quiesce"` +} + +func init() { + t["CreateSnapshotRequestType"] = reflect.TypeOf((*CreateSnapshotRequestType)(nil)).Elem() +} + +type CreateSnapshot_Task CreateSnapshotRequestType + +func init() { + t["CreateSnapshot_Task"] = reflect.TypeOf((*CreateSnapshot_Task)(nil)).Elem() +} + +type CreateSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateStoragePod CreateStoragePodRequestType + +func init() { + t["CreateStoragePod"] = reflect.TypeOf((*CreateStoragePod)(nil)).Elem() +} + +type CreateStoragePodRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["CreateStoragePodRequestType"] = reflect.TypeOf((*CreateStoragePodRequestType)(nil)).Elem() +} + +type CreateStoragePodResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateTask CreateTaskRequestType + +func init() { + t["CreateTask"] = reflect.TypeOf((*CreateTask)(nil)).Elem() +} + +type CreateTaskAction struct { + Action + + TaskTypeId string `xml:"taskTypeId"` + Cancelable bool `xml:"cancelable"` +} + +func init() { + t["CreateTaskAction"] = reflect.TypeOf((*CreateTaskAction)(nil)).Elem() +} + +type CreateTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj ManagedObjectReference `xml:"obj"` + TaskTypeId string `xml:"taskTypeId"` + InitiatedBy string `xml:"initiatedBy,omitempty"` + Cancelable bool `xml:"cancelable"` + ParentTaskKey string `xml:"parentTaskKey,omitempty"` + ActivationId string `xml:"activationId,omitempty"` +} + +func init() { + t["CreateTaskRequestType"] = reflect.TypeOf((*CreateTaskRequestType)(nil)).Elem() +} + +type CreateTaskResponse struct { + Returnval TaskInfo `xml:"returnval"` +} + +type CreateTemporaryDirectoryInGuest CreateTemporaryDirectoryInGuestRequestType + +func init() { + t["CreateTemporaryDirectoryInGuest"] = reflect.TypeOf((*CreateTemporaryDirectoryInGuest)(nil)).Elem() +} + +type CreateTemporaryDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Prefix string `xml:"prefix"` + Suffix string `xml:"suffix"` + DirectoryPath string `xml:"directoryPath,omitempty"` +} + +func init() { + t["CreateTemporaryDirectoryInGuestRequestType"] = reflect.TypeOf((*CreateTemporaryDirectoryInGuestRequestType)(nil)).Elem() +} + +type CreateTemporaryDirectoryInGuestResponse struct { + Returnval string `xml:"returnval"` +} + +type CreateTemporaryFileInGuest CreateTemporaryFileInGuestRequestType + +func init() { + t["CreateTemporaryFileInGuest"] = reflect.TypeOf((*CreateTemporaryFileInGuest)(nil)).Elem() +} + +type CreateTemporaryFileInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Prefix string `xml:"prefix"` + Suffix string `xml:"suffix"` + DirectoryPath string `xml:"directoryPath,omitempty"` +} + +func init() { + t["CreateTemporaryFileInGuestRequestType"] = reflect.TypeOf((*CreateTemporaryFileInGuestRequestType)(nil)).Elem() +} + +type CreateTemporaryFileInGuestResponse struct { + Returnval string `xml:"returnval"` +} + +type CreateUser CreateUserRequestType + +func init() { + t["CreateUser"] = reflect.TypeOf((*CreateUser)(nil)).Elem() +} + +type CreateUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + User BaseHostAccountSpec `xml:"user,typeattr"` +} + +func init() { + t["CreateUserRequestType"] = reflect.TypeOf((*CreateUserRequestType)(nil)).Elem() +} + +type CreateUserResponse struct { +} + +type CreateVApp CreateVAppRequestType + +func init() { + t["CreateVApp"] = reflect.TypeOf((*CreateVApp)(nil)).Elem() +} + +type CreateVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + ResSpec ResourceConfigSpec `xml:"resSpec"` + ConfigSpec VAppConfigSpec `xml:"configSpec"` + VmFolder *ManagedObjectReference `xml:"vmFolder,omitempty"` +} + +func init() { + t["CreateVAppRequestType"] = reflect.TypeOf((*CreateVAppRequestType)(nil)).Elem() +} + +type CreateVAppResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config VirtualMachineConfigSpec `xml:"config"` + Pool ManagedObjectReference `xml:"pool"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CreateVMRequestType"] = reflect.TypeOf((*CreateVMRequestType)(nil)).Elem() +} + +type CreateVM_Task CreateVMRequestType + +func init() { + t["CreateVM_Task"] = reflect.TypeOf((*CreateVM_Task)(nil)).Elem() +} + +type CreateVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVRP CreateVRPRequestType + +func init() { + t["CreateVRP"] = reflect.TypeOf((*CreateVRP)(nil)).Elem() +} + +type CreateVRPRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualResourcePoolSpec `xml:"spec"` +} + +func init() { + t["CreateVRPRequestType"] = reflect.TypeOf((*CreateVRPRequestType)(nil)).Elem() +} + +type CreateVRPResponse struct { + Returnval string `xml:"returnval"` +} + +type CreateVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Spec BaseVirtualDiskSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateVirtualDiskRequestType"] = reflect.TypeOf((*CreateVirtualDiskRequestType)(nil)).Elem() +} + +type CreateVirtualDisk_Task CreateVirtualDiskRequestType + +func init() { + t["CreateVirtualDisk_Task"] = reflect.TypeOf((*CreateVirtualDisk_Task)(nil)).Elem() +} + +type CreateVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVmfsDatastore CreateVmfsDatastoreRequestType + +func init() { + t["CreateVmfsDatastore"] = reflect.TypeOf((*CreateVmfsDatastore)(nil)).Elem() +} + +type CreateVmfsDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VmfsDatastoreCreateSpec `xml:"spec"` +} + +func init() { + t["CreateVmfsDatastoreRequestType"] = reflect.TypeOf((*CreateVmfsDatastoreRequestType)(nil)).Elem() +} + +type CreateVmfsDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVvolDatastore CreateVvolDatastoreRequestType + +func init() { + t["CreateVvolDatastore"] = reflect.TypeOf((*CreateVvolDatastore)(nil)).Elem() +} + +type CreateVvolDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostDatastoreSystemVvolDatastoreSpec `xml:"spec"` +} + +func init() { + t["CreateVvolDatastoreRequestType"] = reflect.TypeOf((*CreateVvolDatastoreRequestType)(nil)).Elem() +} + +type CreateVvolDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CurrentTime CurrentTimeRequestType + +func init() { + t["CurrentTime"] = reflect.TypeOf((*CurrentTime)(nil)).Elem() +} + +type CurrentTimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CurrentTimeRequestType"] = reflect.TypeOf((*CurrentTimeRequestType)(nil)).Elem() +} + +type CurrentTimeResponse struct { + Returnval time.Time `xml:"returnval"` +} + +type CustomFieldDef struct { + DynamicData + + Key int `xml:"key"` + Name string `xml:"name"` + Type string `xml:"type"` + ManagedObjectType string `xml:"managedObjectType,omitempty"` + FieldDefPrivileges *PrivilegePolicyDef `xml:"fieldDefPrivileges,omitempty"` + FieldInstancePrivileges *PrivilegePolicyDef `xml:"fieldInstancePrivileges,omitempty"` +} + +func init() { + t["CustomFieldDef"] = reflect.TypeOf((*CustomFieldDef)(nil)).Elem() +} + +type CustomFieldDefAddedEvent struct { + CustomFieldDefEvent +} + +func init() { + t["CustomFieldDefAddedEvent"] = reflect.TypeOf((*CustomFieldDefAddedEvent)(nil)).Elem() +} + +type CustomFieldDefEvent struct { + CustomFieldEvent + + FieldKey int `xml:"fieldKey"` + Name string `xml:"name"` +} + +func init() { + t["CustomFieldDefEvent"] = reflect.TypeOf((*CustomFieldDefEvent)(nil)).Elem() +} + +type CustomFieldDefRemovedEvent struct { + CustomFieldDefEvent +} + +func init() { + t["CustomFieldDefRemovedEvent"] = reflect.TypeOf((*CustomFieldDefRemovedEvent)(nil)).Elem() +} + +type CustomFieldDefRenamedEvent struct { + CustomFieldDefEvent + + NewName string `xml:"newName"` +} + +func init() { + t["CustomFieldDefRenamedEvent"] = reflect.TypeOf((*CustomFieldDefRenamedEvent)(nil)).Elem() +} + +type CustomFieldEvent struct { + Event +} + +func init() { + t["CustomFieldEvent"] = reflect.TypeOf((*CustomFieldEvent)(nil)).Elem() +} + +type CustomFieldStringValue struct { + CustomFieldValue + + Value string `xml:"value"` +} + +func init() { + t["CustomFieldStringValue"] = reflect.TypeOf((*CustomFieldStringValue)(nil)).Elem() +} + +type CustomFieldValue struct { + DynamicData + + Key int `xml:"key"` +} + +func init() { + t["CustomFieldValue"] = reflect.TypeOf((*CustomFieldValue)(nil)).Elem() +} + +type CustomFieldValueChangedEvent struct { + CustomFieldEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + FieldKey int `xml:"fieldKey"` + Name string `xml:"name"` + Value string `xml:"value"` +} + +func init() { + t["CustomFieldValueChangedEvent"] = reflect.TypeOf((*CustomFieldValueChangedEvent)(nil)).Elem() +} + +type CustomizationAdapterMapping struct { + DynamicData + + MacAddress string `xml:"macAddress,omitempty"` + Adapter CustomizationIPSettings `xml:"adapter"` +} + +func init() { + t["CustomizationAdapterMapping"] = reflect.TypeOf((*CustomizationAdapterMapping)(nil)).Elem() +} + +type CustomizationAutoIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationAutoIpV6Generator"] = reflect.TypeOf((*CustomizationAutoIpV6Generator)(nil)).Elem() +} + +type CustomizationCustomIpGenerator struct { + CustomizationIpGenerator + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["CustomizationCustomIpGenerator"] = reflect.TypeOf((*CustomizationCustomIpGenerator)(nil)).Elem() +} + +type CustomizationCustomIpV6Generator struct { + CustomizationIpV6Generator + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["CustomizationCustomIpV6Generator"] = reflect.TypeOf((*CustomizationCustomIpV6Generator)(nil)).Elem() +} + +type CustomizationCustomName struct { + CustomizationName + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["CustomizationCustomName"] = reflect.TypeOf((*CustomizationCustomName)(nil)).Elem() +} + +type CustomizationDhcpIpGenerator struct { + CustomizationIpGenerator +} + +func init() { + t["CustomizationDhcpIpGenerator"] = reflect.TypeOf((*CustomizationDhcpIpGenerator)(nil)).Elem() +} + +type CustomizationDhcpIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationDhcpIpV6Generator"] = reflect.TypeOf((*CustomizationDhcpIpV6Generator)(nil)).Elem() +} + +type CustomizationEvent struct { + VmEvent + + LogLocation string `xml:"logLocation,omitempty"` +} + +func init() { + t["CustomizationEvent"] = reflect.TypeOf((*CustomizationEvent)(nil)).Elem() +} + +type CustomizationFailed struct { + CustomizationEvent +} + +func init() { + t["CustomizationFailed"] = reflect.TypeOf((*CustomizationFailed)(nil)).Elem() +} + +type CustomizationFault struct { + VimFault +} + +func init() { + t["CustomizationFault"] = reflect.TypeOf((*CustomizationFault)(nil)).Elem() +} + +type CustomizationFaultFault BaseCustomizationFault + +func init() { + t["CustomizationFaultFault"] = reflect.TypeOf((*CustomizationFaultFault)(nil)).Elem() +} + +type CustomizationFixedIp struct { + CustomizationIpGenerator + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["CustomizationFixedIp"] = reflect.TypeOf((*CustomizationFixedIp)(nil)).Elem() +} + +type CustomizationFixedIpV6 struct { + CustomizationIpV6Generator + + IpAddress string `xml:"ipAddress"` + SubnetMask int `xml:"subnetMask"` +} + +func init() { + t["CustomizationFixedIpV6"] = reflect.TypeOf((*CustomizationFixedIpV6)(nil)).Elem() +} + +type CustomizationFixedName struct { + CustomizationName + + Name string `xml:"name"` +} + +func init() { + t["CustomizationFixedName"] = reflect.TypeOf((*CustomizationFixedName)(nil)).Elem() +} + +type CustomizationGlobalIPSettings struct { + DynamicData + + DnsSuffixList []string `xml:"dnsSuffixList,omitempty"` + DnsServerList []string `xml:"dnsServerList,omitempty"` +} + +func init() { + t["CustomizationGlobalIPSettings"] = reflect.TypeOf((*CustomizationGlobalIPSettings)(nil)).Elem() +} + +type CustomizationGuiRunOnce struct { + DynamicData + + CommandList []string `xml:"commandList"` +} + +func init() { + t["CustomizationGuiRunOnce"] = reflect.TypeOf((*CustomizationGuiRunOnce)(nil)).Elem() +} + +type CustomizationGuiUnattended struct { + DynamicData + + Password *CustomizationPassword `xml:"password,omitempty"` + TimeZone int `xml:"timeZone"` + AutoLogon bool `xml:"autoLogon"` + AutoLogonCount int `xml:"autoLogonCount"` +} + +func init() { + t["CustomizationGuiUnattended"] = reflect.TypeOf((*CustomizationGuiUnattended)(nil)).Elem() +} + +type CustomizationIPSettings struct { + DynamicData + + Ip BaseCustomizationIpGenerator `xml:"ip,typeattr"` + SubnetMask string `xml:"subnetMask,omitempty"` + Gateway []string `xml:"gateway,omitempty"` + IpV6Spec *CustomizationIPSettingsIpV6AddressSpec `xml:"ipV6Spec,omitempty"` + DnsServerList []string `xml:"dnsServerList,omitempty"` + DnsDomain string `xml:"dnsDomain,omitempty"` + PrimaryWINS string `xml:"primaryWINS,omitempty"` + SecondaryWINS string `xml:"secondaryWINS,omitempty"` + NetBIOS CustomizationNetBIOSMode `xml:"netBIOS,omitempty"` +} + +func init() { + t["CustomizationIPSettings"] = reflect.TypeOf((*CustomizationIPSettings)(nil)).Elem() +} + +type CustomizationIPSettingsIpV6AddressSpec struct { + DynamicData + + Ip []BaseCustomizationIpV6Generator `xml:"ip,typeattr"` + Gateway []string `xml:"gateway,omitempty"` +} + +func init() { + t["CustomizationIPSettingsIpV6AddressSpec"] = reflect.TypeOf((*CustomizationIPSettingsIpV6AddressSpec)(nil)).Elem() +} + +type CustomizationIdentification struct { + DynamicData + + JoinWorkgroup string `xml:"joinWorkgroup,omitempty"` + JoinDomain string `xml:"joinDomain,omitempty"` + DomainAdmin string `xml:"domainAdmin,omitempty"` + DomainAdminPassword *CustomizationPassword `xml:"domainAdminPassword,omitempty"` +} + +func init() { + t["CustomizationIdentification"] = reflect.TypeOf((*CustomizationIdentification)(nil)).Elem() +} + +type CustomizationIdentitySettings struct { + DynamicData +} + +func init() { + t["CustomizationIdentitySettings"] = reflect.TypeOf((*CustomizationIdentitySettings)(nil)).Elem() +} + +type CustomizationIpGenerator struct { + DynamicData +} + +func init() { + t["CustomizationIpGenerator"] = reflect.TypeOf((*CustomizationIpGenerator)(nil)).Elem() +} + +type CustomizationIpV6Generator struct { + DynamicData +} + +func init() { + t["CustomizationIpV6Generator"] = reflect.TypeOf((*CustomizationIpV6Generator)(nil)).Elem() +} + +type CustomizationLicenseFilePrintData struct { + DynamicData + + AutoMode CustomizationLicenseDataMode `xml:"autoMode"` + AutoUsers int `xml:"autoUsers,omitempty"` +} + +func init() { + t["CustomizationLicenseFilePrintData"] = reflect.TypeOf((*CustomizationLicenseFilePrintData)(nil)).Elem() +} + +type CustomizationLinuxIdentityFailed struct { + CustomizationFailed +} + +func init() { + t["CustomizationLinuxIdentityFailed"] = reflect.TypeOf((*CustomizationLinuxIdentityFailed)(nil)).Elem() +} + +type CustomizationLinuxOptions struct { + CustomizationOptions +} + +func init() { + t["CustomizationLinuxOptions"] = reflect.TypeOf((*CustomizationLinuxOptions)(nil)).Elem() +} + +type CustomizationLinuxPrep struct { + CustomizationIdentitySettings + + HostName BaseCustomizationName `xml:"hostName,typeattr"` + Domain string `xml:"domain"` + TimeZone string `xml:"timeZone,omitempty"` + HwClockUTC *bool `xml:"hwClockUTC"` +} + +func init() { + t["CustomizationLinuxPrep"] = reflect.TypeOf((*CustomizationLinuxPrep)(nil)).Elem() +} + +type CustomizationName struct { + DynamicData +} + +func init() { + t["CustomizationName"] = reflect.TypeOf((*CustomizationName)(nil)).Elem() +} + +type CustomizationNetworkSetupFailed struct { + CustomizationFailed +} + +func init() { + t["CustomizationNetworkSetupFailed"] = reflect.TypeOf((*CustomizationNetworkSetupFailed)(nil)).Elem() +} + +type CustomizationOptions struct { + DynamicData +} + +func init() { + t["CustomizationOptions"] = reflect.TypeOf((*CustomizationOptions)(nil)).Elem() +} + +type CustomizationPassword struct { + DynamicData + + Value string `xml:"value"` + PlainText bool `xml:"plainText"` +} + +func init() { + t["CustomizationPassword"] = reflect.TypeOf((*CustomizationPassword)(nil)).Elem() +} + +type CustomizationPending struct { + CustomizationFault +} + +func init() { + t["CustomizationPending"] = reflect.TypeOf((*CustomizationPending)(nil)).Elem() +} + +type CustomizationPendingFault CustomizationPending + +func init() { + t["CustomizationPendingFault"] = reflect.TypeOf((*CustomizationPendingFault)(nil)).Elem() +} + +type CustomizationPrefixName struct { + CustomizationName + + Base string `xml:"base"` +} + +func init() { + t["CustomizationPrefixName"] = reflect.TypeOf((*CustomizationPrefixName)(nil)).Elem() +} + +type CustomizationSpec struct { + DynamicData + + Options BaseCustomizationOptions `xml:"options,omitempty,typeattr"` + Identity BaseCustomizationIdentitySettings `xml:"identity,typeattr"` + GlobalIPSettings CustomizationGlobalIPSettings `xml:"globalIPSettings"` + NicSettingMap []CustomizationAdapterMapping `xml:"nicSettingMap,omitempty"` + EncryptionKey []byte `xml:"encryptionKey,omitempty"` +} + +func init() { + t["CustomizationSpec"] = reflect.TypeOf((*CustomizationSpec)(nil)).Elem() +} + +type CustomizationSpecInfo struct { + DynamicData + + Name string `xml:"name"` + Description string `xml:"description"` + Type string `xml:"type"` + ChangeVersion string `xml:"changeVersion,omitempty"` + LastUpdateTime *time.Time `xml:"lastUpdateTime"` +} + +func init() { + t["CustomizationSpecInfo"] = reflect.TypeOf((*CustomizationSpecInfo)(nil)).Elem() +} + +type CustomizationSpecItem struct { + DynamicData + + Info CustomizationSpecInfo `xml:"info"` + Spec CustomizationSpec `xml:"spec"` +} + +func init() { + t["CustomizationSpecItem"] = reflect.TypeOf((*CustomizationSpecItem)(nil)).Elem() +} + +type CustomizationSpecItemToXml CustomizationSpecItemToXmlRequestType + +func init() { + t["CustomizationSpecItemToXml"] = reflect.TypeOf((*CustomizationSpecItemToXml)(nil)).Elem() +} + +type CustomizationSpecItemToXmlRequestType struct { + This ManagedObjectReference `xml:"_this"` + Item CustomizationSpecItem `xml:"item"` +} + +func init() { + t["CustomizationSpecItemToXmlRequestType"] = reflect.TypeOf((*CustomizationSpecItemToXmlRequestType)(nil)).Elem() +} + +type CustomizationSpecItemToXmlResponse struct { + Returnval string `xml:"returnval"` +} + +type CustomizationStartedEvent struct { + CustomizationEvent +} + +func init() { + t["CustomizationStartedEvent"] = reflect.TypeOf((*CustomizationStartedEvent)(nil)).Elem() +} + +type CustomizationStatelessIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationStatelessIpV6Generator"] = reflect.TypeOf((*CustomizationStatelessIpV6Generator)(nil)).Elem() +} + +type CustomizationSucceeded struct { + CustomizationEvent +} + +func init() { + t["CustomizationSucceeded"] = reflect.TypeOf((*CustomizationSucceeded)(nil)).Elem() +} + +type CustomizationSysprep struct { + CustomizationIdentitySettings + + GuiUnattended CustomizationGuiUnattended `xml:"guiUnattended"` + UserData CustomizationUserData `xml:"userData"` + GuiRunOnce *CustomizationGuiRunOnce `xml:"guiRunOnce,omitempty"` + Identification CustomizationIdentification `xml:"identification"` + LicenseFilePrintData *CustomizationLicenseFilePrintData `xml:"licenseFilePrintData,omitempty"` +} + +func init() { + t["CustomizationSysprep"] = reflect.TypeOf((*CustomizationSysprep)(nil)).Elem() +} + +type CustomizationSysprepFailed struct { + CustomizationFailed + + SysprepVersion string `xml:"sysprepVersion"` + SystemVersion string `xml:"systemVersion"` +} + +func init() { + t["CustomizationSysprepFailed"] = reflect.TypeOf((*CustomizationSysprepFailed)(nil)).Elem() +} + +type CustomizationSysprepText struct { + CustomizationIdentitySettings + + Value string `xml:"value"` +} + +func init() { + t["CustomizationSysprepText"] = reflect.TypeOf((*CustomizationSysprepText)(nil)).Elem() +} + +type CustomizationUnknownFailure struct { + CustomizationFailed +} + +func init() { + t["CustomizationUnknownFailure"] = reflect.TypeOf((*CustomizationUnknownFailure)(nil)).Elem() +} + +type CustomizationUnknownIpGenerator struct { + CustomizationIpGenerator +} + +func init() { + t["CustomizationUnknownIpGenerator"] = reflect.TypeOf((*CustomizationUnknownIpGenerator)(nil)).Elem() +} + +type CustomizationUnknownIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationUnknownIpV6Generator"] = reflect.TypeOf((*CustomizationUnknownIpV6Generator)(nil)).Elem() +} + +type CustomizationUnknownName struct { + CustomizationName +} + +func init() { + t["CustomizationUnknownName"] = reflect.TypeOf((*CustomizationUnknownName)(nil)).Elem() +} + +type CustomizationUserData struct { + DynamicData + + FullName string `xml:"fullName"` + OrgName string `xml:"orgName"` + ComputerName BaseCustomizationName `xml:"computerName,typeattr"` + ProductId string `xml:"productId"` +} + +func init() { + t["CustomizationUserData"] = reflect.TypeOf((*CustomizationUserData)(nil)).Elem() +} + +type CustomizationVirtualMachineName struct { + CustomizationName +} + +func init() { + t["CustomizationVirtualMachineName"] = reflect.TypeOf((*CustomizationVirtualMachineName)(nil)).Elem() +} + +type CustomizationWinOptions struct { + CustomizationOptions + + ChangeSID bool `xml:"changeSID"` + DeleteAccounts bool `xml:"deleteAccounts"` + Reboot CustomizationSysprepRebootOption `xml:"reboot,omitempty"` +} + +func init() { + t["CustomizationWinOptions"] = reflect.TypeOf((*CustomizationWinOptions)(nil)).Elem() +} + +type CustomizeVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec CustomizationSpec `xml:"spec"` +} + +func init() { + t["CustomizeVMRequestType"] = reflect.TypeOf((*CustomizeVMRequestType)(nil)).Elem() +} + +type CustomizeVM_Task CustomizeVMRequestType + +func init() { + t["CustomizeVM_Task"] = reflect.TypeOf((*CustomizeVM_Task)(nil)).Elem() +} + +type CustomizeVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVPortConfigInfo struct { + DynamicData + + Name string `xml:"name,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + Description string `xml:"description,omitempty"` + Setting BaseDVPortSetting `xml:"setting,omitempty,typeattr"` + ConfigVersion string `xml:"configVersion"` +} + +func init() { + t["DVPortConfigInfo"] = reflect.TypeOf((*DVPortConfigInfo)(nil)).Elem() +} + +type DVPortConfigSpec struct { + DynamicData + + Operation string `xml:"operation"` + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + Description string `xml:"description,omitempty"` + Setting BaseDVPortSetting `xml:"setting,omitempty,typeattr"` + ConfigVersion string `xml:"configVersion,omitempty"` +} + +func init() { + t["DVPortConfigSpec"] = reflect.TypeOf((*DVPortConfigSpec)(nil)).Elem() +} + +type DVPortNotSupported struct { + DeviceBackingNotSupported +} + +func init() { + t["DVPortNotSupported"] = reflect.TypeOf((*DVPortNotSupported)(nil)).Elem() +} + +type DVPortNotSupportedFault DVPortNotSupported + +func init() { + t["DVPortNotSupportedFault"] = reflect.TypeOf((*DVPortNotSupportedFault)(nil)).Elem() +} + +type DVPortSetting struct { + DynamicData + + Blocked *BoolPolicy `xml:"blocked,omitempty"` + VmDirectPathGen2Allowed *BoolPolicy `xml:"vmDirectPathGen2Allowed,omitempty"` + InShapingPolicy *DVSTrafficShapingPolicy `xml:"inShapingPolicy,omitempty"` + OutShapingPolicy *DVSTrafficShapingPolicy `xml:"outShapingPolicy,omitempty"` + VendorSpecificConfig *DVSVendorSpecificConfig `xml:"vendorSpecificConfig,omitempty"` + NetworkResourcePoolKey *StringPolicy `xml:"networkResourcePoolKey,omitempty"` + FilterPolicy *DvsFilterPolicy `xml:"filterPolicy,omitempty"` +} + +func init() { + t["DVPortSetting"] = reflect.TypeOf((*DVPortSetting)(nil)).Elem() +} + +type DVPortState struct { + DynamicData + + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` + Stats DistributedVirtualSwitchPortStatistics `xml:"stats"` + VendorSpecificState []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificState,omitempty"` +} + +func init() { + t["DVPortState"] = reflect.TypeOf((*DVPortState)(nil)).Elem() +} + +type DVPortStatus struct { + DynamicData + + LinkUp bool `xml:"linkUp"` + Blocked bool `xml:"blocked"` + VlanIds []NumericRange `xml:"vlanIds,omitempty"` + TrunkingMode *bool `xml:"trunkingMode"` + Mtu int `xml:"mtu,omitempty"` + LinkPeer string `xml:"linkPeer,omitempty"` + MacAddress string `xml:"macAddress,omitempty"` + StatusDetail string `xml:"statusDetail,omitempty"` + VmDirectPathGen2Active *bool `xml:"vmDirectPathGen2Active"` + VmDirectPathGen2InactiveReasonNetwork []string `xml:"vmDirectPathGen2InactiveReasonNetwork,omitempty"` + VmDirectPathGen2InactiveReasonOther []string `xml:"vmDirectPathGen2InactiveReasonOther,omitempty"` + VmDirectPathGen2InactiveReasonExtended string `xml:"vmDirectPathGen2InactiveReasonExtended,omitempty"` +} + +func init() { + t["DVPortStatus"] = reflect.TypeOf((*DVPortStatus)(nil)).Elem() +} + +type DVPortgroupConfigInfo struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + NumPorts int `xml:"numPorts"` + DistributedVirtualSwitch *ManagedObjectReference `xml:"distributedVirtualSwitch,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` + Description string `xml:"description,omitempty"` + Type string `xml:"type"` + Policy BaseDVPortgroupPolicy `xml:"policy,typeattr"` + PortNameFormat string `xml:"portNameFormat,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + ConfigVersion string `xml:"configVersion,omitempty"` + AutoExpand *bool `xml:"autoExpand"` + VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` +} + +func init() { + t["DVPortgroupConfigInfo"] = reflect.TypeOf((*DVPortgroupConfigInfo)(nil)).Elem() +} + +type DVPortgroupConfigSpec struct { + DynamicData + + ConfigVersion string `xml:"configVersion,omitempty"` + Name string `xml:"name,omitempty"` + NumPorts int `xml:"numPorts,omitempty"` + PortNameFormat string `xml:"portNameFormat,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` + Description string `xml:"description,omitempty"` + Type string `xml:"type,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + Policy BaseDVPortgroupPolicy `xml:"policy,omitempty,typeattr"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + AutoExpand *bool `xml:"autoExpand"` + VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` +} + +func init() { + t["DVPortgroupConfigSpec"] = reflect.TypeOf((*DVPortgroupConfigSpec)(nil)).Elem() +} + +type DVPortgroupCreatedEvent struct { + DVPortgroupEvent +} + +func init() { + t["DVPortgroupCreatedEvent"] = reflect.TypeOf((*DVPortgroupCreatedEvent)(nil)).Elem() +} + +type DVPortgroupDestroyedEvent struct { + DVPortgroupEvent +} + +func init() { + t["DVPortgroupDestroyedEvent"] = reflect.TypeOf((*DVPortgroupDestroyedEvent)(nil)).Elem() +} + +type DVPortgroupEvent struct { + Event +} + +func init() { + t["DVPortgroupEvent"] = reflect.TypeOf((*DVPortgroupEvent)(nil)).Elem() +} + +type DVPortgroupPolicy struct { + DynamicData + + BlockOverrideAllowed bool `xml:"blockOverrideAllowed"` + ShapingOverrideAllowed bool `xml:"shapingOverrideAllowed"` + VendorConfigOverrideAllowed bool `xml:"vendorConfigOverrideAllowed"` + LivePortMovingAllowed bool `xml:"livePortMovingAllowed"` + PortConfigResetAtDisconnect bool `xml:"portConfigResetAtDisconnect"` + NetworkResourcePoolOverrideAllowed *bool `xml:"networkResourcePoolOverrideAllowed"` + TrafficFilterOverrideAllowed *bool `xml:"trafficFilterOverrideAllowed"` +} + +func init() { + t["DVPortgroupPolicy"] = reflect.TypeOf((*DVPortgroupPolicy)(nil)).Elem() +} + +type DVPortgroupReconfiguredEvent struct { + DVPortgroupEvent + + ConfigSpec DVPortgroupConfigSpec `xml:"configSpec"` +} + +func init() { + t["DVPortgroupReconfiguredEvent"] = reflect.TypeOf((*DVPortgroupReconfiguredEvent)(nil)).Elem() +} + +type DVPortgroupRenamedEvent struct { + DVPortgroupEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DVPortgroupRenamedEvent"] = reflect.TypeOf((*DVPortgroupRenamedEvent)(nil)).Elem() +} + +type DVPortgroupRollbackRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityBackup *EntityBackupConfig `xml:"entityBackup,omitempty"` +} + +func init() { + t["DVPortgroupRollbackRequestType"] = reflect.TypeOf((*DVPortgroupRollbackRequestType)(nil)).Elem() +} + +type DVPortgroupRollback_Task DVPortgroupRollbackRequestType + +func init() { + t["DVPortgroupRollback_Task"] = reflect.TypeOf((*DVPortgroupRollback_Task)(nil)).Elem() +} + +type DVPortgroupRollback_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVPortgroupSelection struct { + SelectionSet + + DvsUuid string `xml:"dvsUuid"` + PortgroupKey []string `xml:"portgroupKey"` +} + +func init() { + t["DVPortgroupSelection"] = reflect.TypeOf((*DVPortgroupSelection)(nil)).Elem() +} + +type DVSBackupRestoreCapability struct { + DynamicData + + BackupRestoreSupported bool `xml:"backupRestoreSupported"` +} + +func init() { + t["DVSBackupRestoreCapability"] = reflect.TypeOf((*DVSBackupRestoreCapability)(nil)).Elem() +} + +type DVSCapability struct { + DynamicData + + DvsOperationSupported *bool `xml:"dvsOperationSupported"` + DvPortGroupOperationSupported *bool `xml:"dvPortGroupOperationSupported"` + DvPortOperationSupported *bool `xml:"dvPortOperationSupported"` + CompatibleHostComponentProductInfo []DistributedVirtualSwitchHostProductSpec `xml:"compatibleHostComponentProductInfo,omitempty"` + FeaturesSupported BaseDVSFeatureCapability `xml:"featuresSupported,omitempty,typeattr"` +} + +func init() { + t["DVSCapability"] = reflect.TypeOf((*DVSCapability)(nil)).Elem() +} + +type DVSConfigInfo struct { + DynamicData + + Uuid string `xml:"uuid"` + Name string `xml:"name"` + NumStandalonePorts int `xml:"numStandalonePorts"` + NumPorts int `xml:"numPorts"` + MaxPorts int `xml:"maxPorts"` + UplinkPortPolicy BaseDVSUplinkPortPolicy `xml:"uplinkPortPolicy,typeattr"` + UplinkPortgroup []ManagedObjectReference `xml:"uplinkPortgroup,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,typeattr"` + Host []DistributedVirtualSwitchHostMember `xml:"host,omitempty"` + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` + TargetInfo *DistributedVirtualSwitchProductSpec `xml:"targetInfo,omitempty"` + ExtensionKey string `xml:"extensionKey,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Policy *DVSPolicy `xml:"policy,omitempty"` + Description string `xml:"description,omitempty"` + ConfigVersion string `xml:"configVersion"` + Contact DVSContactInfo `xml:"contact"` + SwitchIpAddress string `xml:"switchIpAddress,omitempty"` + CreateTime time.Time `xml:"createTime"` + NetworkResourceManagementEnabled *bool `xml:"networkResourceManagementEnabled"` + DefaultProxySwitchMaxNumPorts int `xml:"defaultProxySwitchMaxNumPorts,omitempty"` + HealthCheckConfig []BaseDVSHealthCheckConfig `xml:"healthCheckConfig,omitempty,typeattr"` + InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty"` + NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty"` + VmVnicNetworkResourcePool []DVSVmVnicNetworkResourcePool `xml:"vmVnicNetworkResourcePool,omitempty"` + PnicCapacityRatioForReservation int `xml:"pnicCapacityRatioForReservation,omitempty"` +} + +func init() { + t["DVSConfigInfo"] = reflect.TypeOf((*DVSConfigInfo)(nil)).Elem() +} + +type DVSConfigSpec struct { + DynamicData + + ConfigVersion string `xml:"configVersion,omitempty"` + Name string `xml:"name,omitempty"` + NumStandalonePorts int `xml:"numStandalonePorts,omitempty"` + MaxPorts int `xml:"maxPorts,omitempty"` + UplinkPortPolicy BaseDVSUplinkPortPolicy `xml:"uplinkPortPolicy,omitempty,typeattr"` + UplinkPortgroup []ManagedObjectReference `xml:"uplinkPortgroup,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` + Host []DistributedVirtualSwitchHostMemberConfigSpec `xml:"host,omitempty"` + ExtensionKey string `xml:"extensionKey,omitempty"` + Description string `xml:"description,omitempty"` + Policy *DVSPolicy `xml:"policy,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Contact *DVSContactInfo `xml:"contact,omitempty"` + SwitchIpAddress string `xml:"switchIpAddress,omitempty"` + DefaultProxySwitchMaxNumPorts int `xml:"defaultProxySwitchMaxNumPorts,omitempty"` + InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty"` + NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty"` +} + +func init() { + t["DVSConfigSpec"] = reflect.TypeOf((*DVSConfigSpec)(nil)).Elem() +} + +type DVSContactInfo struct { + DynamicData + + Name string `xml:"name,omitempty"` + Contact string `xml:"contact,omitempty"` +} + +func init() { + t["DVSContactInfo"] = reflect.TypeOf((*DVSContactInfo)(nil)).Elem() +} + +type DVSCreateSpec struct { + DynamicData + + ConfigSpec BaseDVSConfigSpec `xml:"configSpec,typeattr"` + ProductInfo *DistributedVirtualSwitchProductSpec `xml:"productInfo,omitempty"` + Capability *DVSCapability `xml:"capability,omitempty"` +} + +func init() { + t["DVSCreateSpec"] = reflect.TypeOf((*DVSCreateSpec)(nil)).Elem() +} + +type DVSFailureCriteria struct { + InheritablePolicy + + CheckSpeed *StringPolicy `xml:"checkSpeed,omitempty"` + Speed *IntPolicy `xml:"speed,omitempty"` + CheckDuplex *BoolPolicy `xml:"checkDuplex,omitempty"` + FullDuplex *BoolPolicy `xml:"fullDuplex,omitempty"` + CheckErrorPercent *BoolPolicy `xml:"checkErrorPercent,omitempty"` + Percentage *IntPolicy `xml:"percentage,omitempty"` + CheckBeacon *BoolPolicy `xml:"checkBeacon,omitempty"` +} + +func init() { + t["DVSFailureCriteria"] = reflect.TypeOf((*DVSFailureCriteria)(nil)).Elem() +} + +type DVSFeatureCapability struct { + DynamicData + + NetworkResourceManagementSupported bool `xml:"networkResourceManagementSupported"` + VmDirectPathGen2Supported bool `xml:"vmDirectPathGen2Supported"` + NicTeamingPolicy []string `xml:"nicTeamingPolicy,omitempty"` + NetworkResourcePoolHighShareValue int `xml:"networkResourcePoolHighShareValue,omitempty"` + NetworkResourceManagementCapability *DVSNetworkResourceManagementCapability `xml:"networkResourceManagementCapability,omitempty"` + HealthCheckCapability BaseDVSHealthCheckCapability `xml:"healthCheckCapability,omitempty,typeattr"` + RollbackCapability *DVSRollbackCapability `xml:"rollbackCapability,omitempty"` + BackupRestoreCapability *DVSBackupRestoreCapability `xml:"backupRestoreCapability,omitempty"` + NetworkFilterSupported *bool `xml:"networkFilterSupported"` +} + +func init() { + t["DVSFeatureCapability"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem() +} + +type DVSHealthCheckCapability struct { + DynamicData +} + +func init() { + t["DVSHealthCheckCapability"] = reflect.TypeOf((*DVSHealthCheckCapability)(nil)).Elem() +} + +type DVSHealthCheckConfig struct { + DynamicData + + Enable *bool `xml:"enable"` + Interval int `xml:"interval,omitempty"` +} + +func init() { + t["DVSHealthCheckConfig"] = reflect.TypeOf((*DVSHealthCheckConfig)(nil)).Elem() +} + +type DVSHostLocalPortInfo struct { + DynamicData + + SwitchUuid string `xml:"switchUuid"` + PortKey string `xml:"portKey"` + Setting BaseDVPortSetting `xml:"setting,typeattr"` + Vnic string `xml:"vnic"` +} + +func init() { + t["DVSHostLocalPortInfo"] = reflect.TypeOf((*DVSHostLocalPortInfo)(nil)).Elem() +} + +type DVSManagerDvsConfigTarget struct { + DynamicData + + DistributedVirtualPortgroup []DistributedVirtualPortgroupInfo `xml:"distributedVirtualPortgroup,omitempty"` + DistributedVirtualSwitch []DistributedVirtualSwitchInfo `xml:"distributedVirtualSwitch,omitempty"` +} + +func init() { + t["DVSManagerDvsConfigTarget"] = reflect.TypeOf((*DVSManagerDvsConfigTarget)(nil)).Elem() +} + +type DVSManagerExportEntityRequestType struct { + This ManagedObjectReference `xml:"_this"` + SelectionSet []BaseSelectionSet `xml:"selectionSet,typeattr"` +} + +func init() { + t["DVSManagerExportEntityRequestType"] = reflect.TypeOf((*DVSManagerExportEntityRequestType)(nil)).Elem() +} + +type DVSManagerExportEntity_Task DVSManagerExportEntityRequestType + +func init() { + t["DVSManagerExportEntity_Task"] = reflect.TypeOf((*DVSManagerExportEntity_Task)(nil)).Elem() +} + +type DVSManagerExportEntity_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVSManagerImportEntityRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityBackup []EntityBackupConfig `xml:"entityBackup"` + ImportType string `xml:"importType"` +} + +func init() { + t["DVSManagerImportEntityRequestType"] = reflect.TypeOf((*DVSManagerImportEntityRequestType)(nil)).Elem() +} + +type DVSManagerImportEntity_Task DVSManagerImportEntityRequestType + +func init() { + t["DVSManagerImportEntity_Task"] = reflect.TypeOf((*DVSManagerImportEntity_Task)(nil)).Elem() +} + +type DVSManagerImportEntity_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVSManagerLookupDvPortGroup DVSManagerLookupDvPortGroupRequestType + +func init() { + t["DVSManagerLookupDvPortGroup"] = reflect.TypeOf((*DVSManagerLookupDvPortGroup)(nil)).Elem() +} + +type DVSManagerLookupDvPortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + SwitchUuid string `xml:"switchUuid"` + PortgroupKey string `xml:"portgroupKey"` +} + +func init() { + t["DVSManagerLookupDvPortGroupRequestType"] = reflect.TypeOf((*DVSManagerLookupDvPortGroupRequestType)(nil)).Elem() +} + +type DVSManagerLookupDvPortGroupResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type DVSNameArrayUplinkPortPolicy struct { + DVSUplinkPortPolicy + + UplinkPortName []string `xml:"uplinkPortName"` +} + +func init() { + t["DVSNameArrayUplinkPortPolicy"] = reflect.TypeOf((*DVSNameArrayUplinkPortPolicy)(nil)).Elem() +} + +type DVSNetworkResourceManagementCapability struct { + DynamicData + + NetworkResourceManagementSupported bool `xml:"networkResourceManagementSupported"` + NetworkResourcePoolHighShareValue int `xml:"networkResourcePoolHighShareValue"` + QosSupported bool `xml:"qosSupported"` + UserDefinedNetworkResourcePoolsSupported bool `xml:"userDefinedNetworkResourcePoolsSupported"` + NetworkResourceControlVersion3Supported *bool `xml:"networkResourceControlVersion3Supported"` +} + +func init() { + t["DVSNetworkResourceManagementCapability"] = reflect.TypeOf((*DVSNetworkResourceManagementCapability)(nil)).Elem() +} + +type DVSNetworkResourcePool struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + ConfigVersion string `xml:"configVersion"` + AllocationInfo DVSNetworkResourcePoolAllocationInfo `xml:"allocationInfo"` +} + +func init() { + t["DVSNetworkResourcePool"] = reflect.TypeOf((*DVSNetworkResourcePool)(nil)).Elem() +} + +type DVSNetworkResourcePoolAllocationInfo struct { + DynamicData + + Limit int64 `xml:"limit,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + PriorityTag int `xml:"priorityTag,omitempty"` +} + +func init() { + t["DVSNetworkResourcePoolAllocationInfo"] = reflect.TypeOf((*DVSNetworkResourcePoolAllocationInfo)(nil)).Elem() +} + +type DVSNetworkResourcePoolConfigSpec struct { + DynamicData + + Key string `xml:"key"` + ConfigVersion string `xml:"configVersion,omitempty"` + AllocationInfo *DVSNetworkResourcePoolAllocationInfo `xml:"allocationInfo,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["DVSNetworkResourcePoolConfigSpec"] = reflect.TypeOf((*DVSNetworkResourcePoolConfigSpec)(nil)).Elem() +} + +type DVSPolicy struct { + DynamicData + + AutoPreInstallAllowed *bool `xml:"autoPreInstallAllowed"` + AutoUpgradeAllowed *bool `xml:"autoUpgradeAllowed"` + PartialUpgradeAllowed *bool `xml:"partialUpgradeAllowed"` +} + +func init() { + t["DVSPolicy"] = reflect.TypeOf((*DVSPolicy)(nil)).Elem() +} + +type DVSRollbackCapability struct { + DynamicData + + RollbackSupported bool `xml:"rollbackSupported"` +} + +func init() { + t["DVSRollbackCapability"] = reflect.TypeOf((*DVSRollbackCapability)(nil)).Elem() +} + +type DVSRollbackRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityBackup *EntityBackupConfig `xml:"entityBackup,omitempty"` +} + +func init() { + t["DVSRollbackRequestType"] = reflect.TypeOf((*DVSRollbackRequestType)(nil)).Elem() +} + +type DVSRollback_Task DVSRollbackRequestType + +func init() { + t["DVSRollback_Task"] = reflect.TypeOf((*DVSRollback_Task)(nil)).Elem() +} + +type DVSRollback_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVSRuntimeInfo struct { + DynamicData + + HostMemberRuntime []HostMemberRuntimeInfo `xml:"hostMemberRuntime,omitempty"` + ResourceRuntimeInfo *DvsResourceRuntimeInfo `xml:"resourceRuntimeInfo,omitempty"` +} + +func init() { + t["DVSRuntimeInfo"] = reflect.TypeOf((*DVSRuntimeInfo)(nil)).Elem() +} + +type DVSSecurityPolicy struct { + InheritablePolicy + + AllowPromiscuous *BoolPolicy `xml:"allowPromiscuous,omitempty"` + MacChanges *BoolPolicy `xml:"macChanges,omitempty"` + ForgedTransmits *BoolPolicy `xml:"forgedTransmits,omitempty"` +} + +func init() { + t["DVSSecurityPolicy"] = reflect.TypeOf((*DVSSecurityPolicy)(nil)).Elem() +} + +type DVSSelection struct { + SelectionSet + + DvsUuid string `xml:"dvsUuid"` +} + +func init() { + t["DVSSelection"] = reflect.TypeOf((*DVSSelection)(nil)).Elem() +} + +type DVSSummary struct { + DynamicData + + Name string `xml:"name"` + Uuid string `xml:"uuid"` + NumPorts int `xml:"numPorts"` + ProductInfo *DistributedVirtualSwitchProductSpec `xml:"productInfo,omitempty"` + HostMember []ManagedObjectReference `xml:"hostMember,omitempty"` + Vm []ManagedObjectReference `xml:"vm,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + PortgroupName []string `xml:"portgroupName,omitempty"` + Description string `xml:"description,omitempty"` + Contact *DVSContactInfo `xml:"contact,omitempty"` + NumHosts int `xml:"numHosts,omitempty"` +} + +func init() { + t["DVSSummary"] = reflect.TypeOf((*DVSSummary)(nil)).Elem() +} + +type DVSTrafficShapingPolicy struct { + InheritablePolicy + + Enabled *BoolPolicy `xml:"enabled,omitempty"` + AverageBandwidth *LongPolicy `xml:"averageBandwidth,omitempty"` + PeakBandwidth *LongPolicy `xml:"peakBandwidth,omitempty"` + BurstSize *LongPolicy `xml:"burstSize,omitempty"` +} + +func init() { + t["DVSTrafficShapingPolicy"] = reflect.TypeOf((*DVSTrafficShapingPolicy)(nil)).Elem() +} + +type DVSUplinkPortPolicy struct { + DynamicData +} + +func init() { + t["DVSUplinkPortPolicy"] = reflect.TypeOf((*DVSUplinkPortPolicy)(nil)).Elem() +} + +type DVSVendorSpecificConfig struct { + InheritablePolicy + + KeyValue []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"keyValue,omitempty"` +} + +func init() { + t["DVSVendorSpecificConfig"] = reflect.TypeOf((*DVSVendorSpecificConfig)(nil)).Elem() +} + +type DVSVmVnicNetworkResourcePool struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + ConfigVersion string `xml:"configVersion"` + AllocationInfo *DvsVmVnicResourceAllocation `xml:"allocationInfo,omitempty"` +} + +func init() { + t["DVSVmVnicNetworkResourcePool"] = reflect.TypeOf((*DVSVmVnicNetworkResourcePool)(nil)).Elem() +} + +type DailyTaskScheduler struct { + HourlyTaskScheduler + + Hour int `xml:"hour"` +} + +func init() { + t["DailyTaskScheduler"] = reflect.TypeOf((*DailyTaskScheduler)(nil)).Elem() +} + +type DasAdmissionControlDisabledEvent struct { + ClusterEvent +} + +func init() { + t["DasAdmissionControlDisabledEvent"] = reflect.TypeOf((*DasAdmissionControlDisabledEvent)(nil)).Elem() +} + +type DasAdmissionControlEnabledEvent struct { + ClusterEvent +} + +func init() { + t["DasAdmissionControlEnabledEvent"] = reflect.TypeOf((*DasAdmissionControlEnabledEvent)(nil)).Elem() +} + +type DasAgentFoundEvent struct { + ClusterEvent +} + +func init() { + t["DasAgentFoundEvent"] = reflect.TypeOf((*DasAgentFoundEvent)(nil)).Elem() +} + +type DasAgentUnavailableEvent struct { + ClusterEvent +} + +func init() { + t["DasAgentUnavailableEvent"] = reflect.TypeOf((*DasAgentUnavailableEvent)(nil)).Elem() +} + +type DasClusterIsolatedEvent struct { + ClusterEvent +} + +func init() { + t["DasClusterIsolatedEvent"] = reflect.TypeOf((*DasClusterIsolatedEvent)(nil)).Elem() +} + +type DasConfigFault struct { + VimFault + + Reason string `xml:"reason,omitempty"` + Output string `xml:"output,omitempty"` + Event []BaseEvent `xml:"event,omitempty,typeattr"` +} + +func init() { + t["DasConfigFault"] = reflect.TypeOf((*DasConfigFault)(nil)).Elem() +} + +type DasConfigFaultFault DasConfigFault + +func init() { + t["DasConfigFaultFault"] = reflect.TypeOf((*DasConfigFaultFault)(nil)).Elem() +} + +type DasDisabledEvent struct { + ClusterEvent +} + +func init() { + t["DasDisabledEvent"] = reflect.TypeOf((*DasDisabledEvent)(nil)).Elem() +} + +type DasEnabledEvent struct { + ClusterEvent +} + +func init() { + t["DasEnabledEvent"] = reflect.TypeOf((*DasEnabledEvent)(nil)).Elem() +} + +type DasHeartbeatDatastoreInfo struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["DasHeartbeatDatastoreInfo"] = reflect.TypeOf((*DasHeartbeatDatastoreInfo)(nil)).Elem() +} + +type DasHostFailedEvent struct { + ClusterEvent + + FailedHost HostEventArgument `xml:"failedHost"` +} + +func init() { + t["DasHostFailedEvent"] = reflect.TypeOf((*DasHostFailedEvent)(nil)).Elem() +} + +type DasHostIsolatedEvent struct { + ClusterEvent + + IsolatedHost HostEventArgument `xml:"isolatedHost"` +} + +func init() { + t["DasHostIsolatedEvent"] = reflect.TypeOf((*DasHostIsolatedEvent)(nil)).Elem() +} + +type DatabaseError struct { + RuntimeFault +} + +func init() { + t["DatabaseError"] = reflect.TypeOf((*DatabaseError)(nil)).Elem() +} + +type DatabaseErrorFault DatabaseError + +func init() { + t["DatabaseErrorFault"] = reflect.TypeOf((*DatabaseErrorFault)(nil)).Elem() +} + +type DatabaseSizeEstimate struct { + DynamicData + + Size int64 `xml:"size"` +} + +func init() { + t["DatabaseSizeEstimate"] = reflect.TypeOf((*DatabaseSizeEstimate)(nil)).Elem() +} + +type DatabaseSizeParam struct { + DynamicData + + InventoryDesc InventoryDescription `xml:"inventoryDesc"` + PerfStatsDesc *PerformanceStatisticsDescription `xml:"perfStatsDesc,omitempty"` +} + +func init() { + t["DatabaseSizeParam"] = reflect.TypeOf((*DatabaseSizeParam)(nil)).Elem() +} + +type DatacenterConfigInfo struct { + DynamicData + + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["DatacenterConfigInfo"] = reflect.TypeOf((*DatacenterConfigInfo)(nil)).Elem() +} + +type DatacenterConfigSpec struct { + DynamicData + + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["DatacenterConfigSpec"] = reflect.TypeOf((*DatacenterConfigSpec)(nil)).Elem() +} + +type DatacenterCreatedEvent struct { + DatacenterEvent + + Parent FolderEventArgument `xml:"parent"` +} + +func init() { + t["DatacenterCreatedEvent"] = reflect.TypeOf((*DatacenterCreatedEvent)(nil)).Elem() +} + +type DatacenterEvent struct { + Event +} + +func init() { + t["DatacenterEvent"] = reflect.TypeOf((*DatacenterEvent)(nil)).Elem() +} + +type DatacenterEventArgument struct { + EntityEventArgument + + Datacenter ManagedObjectReference `xml:"datacenter"` +} + +func init() { + t["DatacenterEventArgument"] = reflect.TypeOf((*DatacenterEventArgument)(nil)).Elem() +} + +type DatacenterMismatch struct { + MigrationFault + + InvalidArgument []DatacenterMismatchArgument `xml:"invalidArgument"` + ExpectedDatacenter ManagedObjectReference `xml:"expectedDatacenter"` +} + +func init() { + t["DatacenterMismatch"] = reflect.TypeOf((*DatacenterMismatch)(nil)).Elem() +} + +type DatacenterMismatchArgument struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + InputDatacenter *ManagedObjectReference `xml:"inputDatacenter,omitempty"` +} + +func init() { + t["DatacenterMismatchArgument"] = reflect.TypeOf((*DatacenterMismatchArgument)(nil)).Elem() +} + +type DatacenterMismatchFault DatacenterMismatch + +func init() { + t["DatacenterMismatchFault"] = reflect.TypeOf((*DatacenterMismatchFault)(nil)).Elem() +} + +type DatacenterRenamedEvent struct { + DatacenterEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DatacenterRenamedEvent"] = reflect.TypeOf((*DatacenterRenamedEvent)(nil)).Elem() +} + +type DatastoreCapability struct { + DynamicData + + DirectoryHierarchySupported bool `xml:"directoryHierarchySupported"` + RawDiskMappingsSupported bool `xml:"rawDiskMappingsSupported"` + PerFileThinProvisioningSupported bool `xml:"perFileThinProvisioningSupported"` + StorageIORMSupported *bool `xml:"storageIORMSupported"` + NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported"` + TopLevelDirectoryCreateSupported *bool `xml:"topLevelDirectoryCreateSupported"` + SeSparseSupported *bool `xml:"seSparseSupported"` +} + +func init() { + t["DatastoreCapability"] = reflect.TypeOf((*DatastoreCapability)(nil)).Elem() +} + +type DatastoreCapacityIncreasedEvent struct { + DatastoreEvent + + OldCapacity int64 `xml:"oldCapacity"` + NewCapacity int64 `xml:"newCapacity"` +} + +func init() { + t["DatastoreCapacityIncreasedEvent"] = reflect.TypeOf((*DatastoreCapacityIncreasedEvent)(nil)).Elem() +} + +type DatastoreDestroyedEvent struct { + DatastoreEvent +} + +func init() { + t["DatastoreDestroyedEvent"] = reflect.TypeOf((*DatastoreDestroyedEvent)(nil)).Elem() +} + +type DatastoreDiscoveredEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["DatastoreDiscoveredEvent"] = reflect.TypeOf((*DatastoreDiscoveredEvent)(nil)).Elem() +} + +type DatastoreDuplicatedEvent struct { + DatastoreEvent +} + +func init() { + t["DatastoreDuplicatedEvent"] = reflect.TypeOf((*DatastoreDuplicatedEvent)(nil)).Elem() +} + +type DatastoreEnterMaintenanceMode DatastoreEnterMaintenanceModeRequestType + +func init() { + t["DatastoreEnterMaintenanceMode"] = reflect.TypeOf((*DatastoreEnterMaintenanceMode)(nil)).Elem() +} + +type DatastoreEnterMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DatastoreEnterMaintenanceModeRequestType"] = reflect.TypeOf((*DatastoreEnterMaintenanceModeRequestType)(nil)).Elem() +} + +type DatastoreEnterMaintenanceModeResponse struct { + Returnval StoragePlacementResult `xml:"returnval"` +} + +type DatastoreEvent struct { + Event + + Datastore *DatastoreEventArgument `xml:"datastore,omitempty"` +} + +func init() { + t["DatastoreEvent"] = reflect.TypeOf((*DatastoreEvent)(nil)).Elem() +} + +type DatastoreEventArgument struct { + EntityEventArgument + + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DatastoreEventArgument"] = reflect.TypeOf((*DatastoreEventArgument)(nil)).Elem() +} + +type DatastoreExitMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DatastoreExitMaintenanceModeRequestType"] = reflect.TypeOf((*DatastoreExitMaintenanceModeRequestType)(nil)).Elem() +} + +type DatastoreExitMaintenanceMode_Task DatastoreExitMaintenanceModeRequestType + +func init() { + t["DatastoreExitMaintenanceMode_Task"] = reflect.TypeOf((*DatastoreExitMaintenanceMode_Task)(nil)).Elem() +} + +type DatastoreExitMaintenanceMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DatastoreFileCopiedEvent struct { + DatastoreFileEvent + + SourceDatastore DatastoreEventArgument `xml:"sourceDatastore"` + SourceFile string `xml:"sourceFile"` +} + +func init() { + t["DatastoreFileCopiedEvent"] = reflect.TypeOf((*DatastoreFileCopiedEvent)(nil)).Elem() +} + +type DatastoreFileDeletedEvent struct { + DatastoreFileEvent +} + +func init() { + t["DatastoreFileDeletedEvent"] = reflect.TypeOf((*DatastoreFileDeletedEvent)(nil)).Elem() +} + +type DatastoreFileEvent struct { + DatastoreEvent + + TargetFile string `xml:"targetFile"` +} + +func init() { + t["DatastoreFileEvent"] = reflect.TypeOf((*DatastoreFileEvent)(nil)).Elem() +} + +type DatastoreFileMovedEvent struct { + DatastoreFileEvent + + SourceDatastore DatastoreEventArgument `xml:"sourceDatastore"` + SourceFile string `xml:"sourceFile"` +} + +func init() { + t["DatastoreFileMovedEvent"] = reflect.TypeOf((*DatastoreFileMovedEvent)(nil)).Elem() +} + +type DatastoreHostMount struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + MountInfo HostMountInfo `xml:"mountInfo"` +} + +func init() { + t["DatastoreHostMount"] = reflect.TypeOf((*DatastoreHostMount)(nil)).Elem() +} + +type DatastoreIORMReconfiguredEvent struct { + DatastoreEvent +} + +func init() { + t["DatastoreIORMReconfiguredEvent"] = reflect.TypeOf((*DatastoreIORMReconfiguredEvent)(nil)).Elem() +} + +type DatastoreInfo struct { + DynamicData + + Name string `xml:"name"` + Url string `xml:"url"` + FreeSpace int64 `xml:"freeSpace"` + MaxFileSize int64 `xml:"maxFileSize"` + MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty"` + MaxMemoryFileSize int64 `xml:"maxMemoryFileSize,omitempty"` + Timestamp *time.Time `xml:"timestamp"` + ContainerId string `xml:"containerId,omitempty"` +} + +func init() { + t["DatastoreInfo"] = reflect.TypeOf((*DatastoreInfo)(nil)).Elem() +} + +type DatastoreMountPathDatastorePair struct { + DynamicData + + OldMountPath string `xml:"oldMountPath"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DatastoreMountPathDatastorePair"] = reflect.TypeOf((*DatastoreMountPathDatastorePair)(nil)).Elem() +} + +type DatastoreNotWritableOnHost struct { + InvalidDatastore + + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["DatastoreNotWritableOnHost"] = reflect.TypeOf((*DatastoreNotWritableOnHost)(nil)).Elem() +} + +type DatastoreNotWritableOnHostFault BaseDatastoreNotWritableOnHost + +func init() { + t["DatastoreNotWritableOnHostFault"] = reflect.TypeOf((*DatastoreNotWritableOnHostFault)(nil)).Elem() +} + +type DatastoreOption struct { + DynamicData + + UnsupportedVolumes []VirtualMachineDatastoreVolumeOption `xml:"unsupportedVolumes,omitempty"` +} + +func init() { + t["DatastoreOption"] = reflect.TypeOf((*DatastoreOption)(nil)).Elem() +} + +type DatastorePrincipalConfigured struct { + HostEvent + + DatastorePrincipal string `xml:"datastorePrincipal"` +} + +func init() { + t["DatastorePrincipalConfigured"] = reflect.TypeOf((*DatastorePrincipalConfigured)(nil)).Elem() +} + +type DatastoreRemovedOnHostEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["DatastoreRemovedOnHostEvent"] = reflect.TypeOf((*DatastoreRemovedOnHostEvent)(nil)).Elem() +} + +type DatastoreRenamedEvent struct { + DatastoreEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DatastoreRenamedEvent"] = reflect.TypeOf((*DatastoreRenamedEvent)(nil)).Elem() +} + +type DatastoreRenamedOnHostEvent struct { + HostEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DatastoreRenamedOnHostEvent"] = reflect.TypeOf((*DatastoreRenamedOnHostEvent)(nil)).Elem() +} + +type DatastoreSummary struct { + DynamicData + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + Name string `xml:"name"` + Url string `xml:"url"` + Capacity int64 `xml:"capacity"` + FreeSpace int64 `xml:"freeSpace"` + Uncommitted int64 `xml:"uncommitted,omitempty"` + Accessible bool `xml:"accessible"` + MultipleHostAccess *bool `xml:"multipleHostAccess"` + Type string `xml:"type"` + MaintenanceMode string `xml:"maintenanceMode,omitempty"` +} + +func init() { + t["DatastoreSummary"] = reflect.TypeOf((*DatastoreSummary)(nil)).Elem() +} + +type DateTimeProfile struct { + ApplyProfile +} + +func init() { + t["DateTimeProfile"] = reflect.TypeOf((*DateTimeProfile)(nil)).Elem() +} + +type DecodeLicense DecodeLicenseRequestType + +func init() { + t["DecodeLicense"] = reflect.TypeOf((*DecodeLicense)(nil)).Elem() +} + +type DecodeLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` +} + +func init() { + t["DecodeLicenseRequestType"] = reflect.TypeOf((*DecodeLicenseRequestType)(nil)).Elem() +} + +type DecodeLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type DefragmentAllDisks DefragmentAllDisksRequestType + +func init() { + t["DefragmentAllDisks"] = reflect.TypeOf((*DefragmentAllDisks)(nil)).Elem() +} + +type DefragmentAllDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DefragmentAllDisksRequestType"] = reflect.TypeOf((*DefragmentAllDisksRequestType)(nil)).Elem() +} + +type DefragmentAllDisksResponse struct { +} + +type DefragmentVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["DefragmentVirtualDiskRequestType"] = reflect.TypeOf((*DefragmentVirtualDiskRequestType)(nil)).Elem() +} + +type DefragmentVirtualDisk_Task DefragmentVirtualDiskRequestType + +func init() { + t["DefragmentVirtualDisk_Task"] = reflect.TypeOf((*DefragmentVirtualDisk_Task)(nil)).Elem() +} + +type DefragmentVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteCustomizationSpec DeleteCustomizationSpecRequestType + +func init() { + t["DeleteCustomizationSpec"] = reflect.TypeOf((*DeleteCustomizationSpec)(nil)).Elem() +} + +type DeleteCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["DeleteCustomizationSpecRequestType"] = reflect.TypeOf((*DeleteCustomizationSpecRequestType)(nil)).Elem() +} + +type DeleteCustomizationSpecResponse struct { +} + +type DeleteDatastoreFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["DeleteDatastoreFileRequestType"] = reflect.TypeOf((*DeleteDatastoreFileRequestType)(nil)).Elem() +} + +type DeleteDatastoreFile_Task DeleteDatastoreFileRequestType + +func init() { + t["DeleteDatastoreFile_Task"] = reflect.TypeOf((*DeleteDatastoreFile_Task)(nil)).Elem() +} + +type DeleteDatastoreFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteDirectory DeleteDirectoryRequestType + +func init() { + t["DeleteDirectory"] = reflect.TypeOf((*DeleteDirectory)(nil)).Elem() +} + +type DeleteDirectoryInGuest DeleteDirectoryInGuestRequestType + +func init() { + t["DeleteDirectoryInGuest"] = reflect.TypeOf((*DeleteDirectoryInGuest)(nil)).Elem() +} + +type DeleteDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + DirectoryPath string `xml:"directoryPath"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["DeleteDirectoryInGuestRequestType"] = reflect.TypeOf((*DeleteDirectoryInGuestRequestType)(nil)).Elem() +} + +type DeleteDirectoryInGuestResponse struct { +} + +type DeleteDirectoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + DatastorePath string `xml:"datastorePath"` +} + +func init() { + t["DeleteDirectoryRequestType"] = reflect.TypeOf((*DeleteDirectoryRequestType)(nil)).Elem() +} + +type DeleteDirectoryResponse struct { +} + +type DeleteFile DeleteFileRequestType + +func init() { + t["DeleteFile"] = reflect.TypeOf((*DeleteFile)(nil)).Elem() +} + +type DeleteFileInGuest DeleteFileInGuestRequestType + +func init() { + t["DeleteFileInGuest"] = reflect.TypeOf((*DeleteFileInGuest)(nil)).Elem() +} + +type DeleteFileInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + FilePath string `xml:"filePath"` +} + +func init() { + t["DeleteFileInGuestRequestType"] = reflect.TypeOf((*DeleteFileInGuestRequestType)(nil)).Elem() +} + +type DeleteFileInGuestResponse struct { +} + +type DeleteFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + DatastorePath string `xml:"datastorePath"` +} + +func init() { + t["DeleteFileRequestType"] = reflect.TypeOf((*DeleteFileRequestType)(nil)).Elem() +} + +type DeleteFileResponse struct { +} + +type DeleteRegistryKeyInGuest DeleteRegistryKeyInGuestRequestType + +func init() { + t["DeleteRegistryKeyInGuest"] = reflect.TypeOf((*DeleteRegistryKeyInGuest)(nil)).Elem() +} + +type DeleteRegistryKeyInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["DeleteRegistryKeyInGuestRequestType"] = reflect.TypeOf((*DeleteRegistryKeyInGuestRequestType)(nil)).Elem() +} + +type DeleteRegistryKeyInGuestResponse struct { +} + +type DeleteRegistryValueInGuest DeleteRegistryValueInGuestRequestType + +func init() { + t["DeleteRegistryValueInGuest"] = reflect.TypeOf((*DeleteRegistryValueInGuest)(nil)).Elem() +} + +type DeleteRegistryValueInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + ValueName GuestRegValueNameSpec `xml:"valueName"` +} + +func init() { + t["DeleteRegistryValueInGuestRequestType"] = reflect.TypeOf((*DeleteRegistryValueInGuestRequestType)(nil)).Elem() +} + +type DeleteRegistryValueInGuestResponse struct { +} + +type DeleteScsiLunState DeleteScsiLunStateRequestType + +func init() { + t["DeleteScsiLunState"] = reflect.TypeOf((*DeleteScsiLunState)(nil)).Elem() +} + +type DeleteScsiLunStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunCanonicalName string `xml:"lunCanonicalName"` +} + +func init() { + t["DeleteScsiLunStateRequestType"] = reflect.TypeOf((*DeleteScsiLunStateRequestType)(nil)).Elem() +} + +type DeleteScsiLunStateResponse struct { +} + +type DeleteVRP DeleteVRPRequestType + +func init() { + t["DeleteVRP"] = reflect.TypeOf((*DeleteVRP)(nil)).Elem() +} + +type DeleteVRPRequestType struct { + This ManagedObjectReference `xml:"_this"` + VrpId string `xml:"vrpId"` +} + +func init() { + t["DeleteVRPRequestType"] = reflect.TypeOf((*DeleteVRPRequestType)(nil)).Elem() +} + +type DeleteVRPResponse struct { +} + +type DeleteVffsVolumeState DeleteVffsVolumeStateRequestType + +func init() { + t["DeleteVffsVolumeState"] = reflect.TypeOf((*DeleteVffsVolumeState)(nil)).Elem() +} + +type DeleteVffsVolumeStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["DeleteVffsVolumeStateRequestType"] = reflect.TypeOf((*DeleteVffsVolumeStateRequestType)(nil)).Elem() +} + +type DeleteVffsVolumeStateResponse struct { +} + +type DeleteVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["DeleteVirtualDiskRequestType"] = reflect.TypeOf((*DeleteVirtualDiskRequestType)(nil)).Elem() +} + +type DeleteVirtualDisk_Task DeleteVirtualDiskRequestType + +func init() { + t["DeleteVirtualDisk_Task"] = reflect.TypeOf((*DeleteVirtualDisk_Task)(nil)).Elem() +} + +type DeleteVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteVmfsVolumeState DeleteVmfsVolumeStateRequestType + +func init() { + t["DeleteVmfsVolumeState"] = reflect.TypeOf((*DeleteVmfsVolumeState)(nil)).Elem() +} + +type DeleteVmfsVolumeStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["DeleteVmfsVolumeStateRequestType"] = reflect.TypeOf((*DeleteVmfsVolumeStateRequestType)(nil)).Elem() +} + +type DeleteVmfsVolumeStateResponse struct { +} + +type DeleteVsanObjects DeleteVsanObjectsRequestType + +func init() { + t["DeleteVsanObjects"] = reflect.TypeOf((*DeleteVsanObjects)(nil)).Elem() +} + +type DeleteVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` + Force *bool `xml:"force"` +} + +func init() { + t["DeleteVsanObjectsRequestType"] = reflect.TypeOf((*DeleteVsanObjectsRequestType)(nil)).Elem() +} + +type DeleteVsanObjectsResponse struct { + Returnval []HostVsanInternalSystemDeleteVsanObjectsResult `xml:"returnval"` +} + +type DeltaDiskFormatNotSupported struct { + VmConfigFault + + Datastore []ManagedObjectReference `xml:"datastore,omitempty"` + DeltaDiskFormat string `xml:"deltaDiskFormat"` +} + +func init() { + t["DeltaDiskFormatNotSupported"] = reflect.TypeOf((*DeltaDiskFormatNotSupported)(nil)).Elem() +} + +type DeltaDiskFormatNotSupportedFault DeltaDiskFormatNotSupported + +func init() { + t["DeltaDiskFormatNotSupportedFault"] = reflect.TypeOf((*DeltaDiskFormatNotSupportedFault)(nil)).Elem() +} + +type DeployVM DeployVMRequestType + +func init() { + t["DeployVM"] = reflect.TypeOf((*DeployVM)(nil)).Elem() +} + +type DeployVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + VrpId string `xml:"vrpId"` + Vm ManagedObjectReference `xml:"vm"` + Cluster ManagedObjectReference `xml:"cluster"` +} + +func init() { + t["DeployVMRequestType"] = reflect.TypeOf((*DeployVMRequestType)(nil)).Elem() +} + +type DeployVMResponse struct { +} + +type Description struct { + DynamicData + + Label string `xml:"label"` + Summary string `xml:"summary"` +} + +func init() { + t["Description"] = reflect.TypeOf((*Description)(nil)).Elem() +} + +type DeselectVnic DeselectVnicRequestType + +func init() { + t["DeselectVnic"] = reflect.TypeOf((*DeselectVnic)(nil)).Elem() +} + +type DeselectVnicForNicType DeselectVnicForNicTypeRequestType + +func init() { + t["DeselectVnicForNicType"] = reflect.TypeOf((*DeselectVnicForNicType)(nil)).Elem() +} + +type DeselectVnicForNicTypeRequestType struct { + This ManagedObjectReference `xml:"_this"` + NicType string `xml:"nicType"` + Device string `xml:"device"` +} + +func init() { + t["DeselectVnicForNicTypeRequestType"] = reflect.TypeOf((*DeselectVnicForNicTypeRequestType)(nil)).Elem() +} + +type DeselectVnicForNicTypeResponse struct { +} + +type DeselectVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DeselectVnicRequestType"] = reflect.TypeOf((*DeselectVnicRequestType)(nil)).Elem() +} + +type DeselectVnicResponse struct { +} + +type DestinationSwitchFull struct { + CannotAccessNetwork +} + +func init() { + t["DestinationSwitchFull"] = reflect.TypeOf((*DestinationSwitchFull)(nil)).Elem() +} + +type DestinationSwitchFullFault DestinationSwitchFull + +func init() { + t["DestinationSwitchFullFault"] = reflect.TypeOf((*DestinationSwitchFullFault)(nil)).Elem() +} + +type DestinationVsanDisabled struct { + CannotMoveVsanEnabledHost + + DestinationCluster string `xml:"destinationCluster"` +} + +func init() { + t["DestinationVsanDisabled"] = reflect.TypeOf((*DestinationVsanDisabled)(nil)).Elem() +} + +type DestinationVsanDisabledFault DestinationVsanDisabled + +func init() { + t["DestinationVsanDisabledFault"] = reflect.TypeOf((*DestinationVsanDisabledFault)(nil)).Elem() +} + +type DestroyChildren DestroyChildrenRequestType + +func init() { + t["DestroyChildren"] = reflect.TypeOf((*DestroyChildren)(nil)).Elem() +} + +type DestroyChildrenRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyChildrenRequestType"] = reflect.TypeOf((*DestroyChildrenRequestType)(nil)).Elem() +} + +type DestroyChildrenResponse struct { +} + +type DestroyCollector DestroyCollectorRequestType + +func init() { + t["DestroyCollector"] = reflect.TypeOf((*DestroyCollector)(nil)).Elem() +} + +type DestroyCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyCollectorRequestType"] = reflect.TypeOf((*DestroyCollectorRequestType)(nil)).Elem() +} + +type DestroyCollectorResponse struct { +} + +type DestroyDatastore DestroyDatastoreRequestType + +func init() { + t["DestroyDatastore"] = reflect.TypeOf((*DestroyDatastore)(nil)).Elem() +} + +type DestroyDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyDatastoreRequestType"] = reflect.TypeOf((*DestroyDatastoreRequestType)(nil)).Elem() +} + +type DestroyDatastoreResponse struct { +} + +type DestroyIpPool DestroyIpPoolRequestType + +func init() { + t["DestroyIpPool"] = reflect.TypeOf((*DestroyIpPool)(nil)).Elem() +} + +type DestroyIpPoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + Id int `xml:"id"` + Force bool `xml:"force"` +} + +func init() { + t["DestroyIpPoolRequestType"] = reflect.TypeOf((*DestroyIpPoolRequestType)(nil)).Elem() +} + +type DestroyIpPoolResponse struct { +} + +type DestroyNetwork DestroyNetworkRequestType + +func init() { + t["DestroyNetwork"] = reflect.TypeOf((*DestroyNetwork)(nil)).Elem() +} + +type DestroyNetworkRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyNetworkRequestType"] = reflect.TypeOf((*DestroyNetworkRequestType)(nil)).Elem() +} + +type DestroyNetworkResponse struct { +} + +type DestroyProfile DestroyProfileRequestType + +func init() { + t["DestroyProfile"] = reflect.TypeOf((*DestroyProfile)(nil)).Elem() +} + +type DestroyProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyProfileRequestType"] = reflect.TypeOf((*DestroyProfileRequestType)(nil)).Elem() +} + +type DestroyProfileResponse struct { +} + +type DestroyPropertyCollector DestroyPropertyCollectorRequestType + +func init() { + t["DestroyPropertyCollector"] = reflect.TypeOf((*DestroyPropertyCollector)(nil)).Elem() +} + +type DestroyPropertyCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyPropertyCollectorRequestType"] = reflect.TypeOf((*DestroyPropertyCollectorRequestType)(nil)).Elem() +} + +type DestroyPropertyCollectorResponse struct { +} + +type DestroyPropertyFilter DestroyPropertyFilterRequestType + +func init() { + t["DestroyPropertyFilter"] = reflect.TypeOf((*DestroyPropertyFilter)(nil)).Elem() +} + +type DestroyPropertyFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyPropertyFilterRequestType"] = reflect.TypeOf((*DestroyPropertyFilterRequestType)(nil)).Elem() +} + +type DestroyPropertyFilterResponse struct { +} + +type DestroyRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyRequestType"] = reflect.TypeOf((*DestroyRequestType)(nil)).Elem() +} + +type DestroyVffs DestroyVffsRequestType + +func init() { + t["DestroyVffs"] = reflect.TypeOf((*DestroyVffs)(nil)).Elem() +} + +type DestroyVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsPath string `xml:"vffsPath"` +} + +func init() { + t["DestroyVffsRequestType"] = reflect.TypeOf((*DestroyVffsRequestType)(nil)).Elem() +} + +type DestroyVffsResponse struct { +} + +type DestroyView DestroyViewRequestType + +func init() { + t["DestroyView"] = reflect.TypeOf((*DestroyView)(nil)).Elem() +} + +type DestroyViewRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyViewRequestType"] = reflect.TypeOf((*DestroyViewRequestType)(nil)).Elem() +} + +type DestroyViewResponse struct { +} + +type Destroy_Task DestroyRequestType + +func init() { + t["Destroy_Task"] = reflect.TypeOf((*Destroy_Task)(nil)).Elem() +} + +type Destroy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DetachScsiLun DetachScsiLunRequestType + +func init() { + t["DetachScsiLun"] = reflect.TypeOf((*DetachScsiLun)(nil)).Elem() +} + +type DetachScsiLunExRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid []string `xml:"lunUuid"` +} + +func init() { + t["DetachScsiLunExRequestType"] = reflect.TypeOf((*DetachScsiLunExRequestType)(nil)).Elem() +} + +type DetachScsiLunEx_Task DetachScsiLunExRequestType + +func init() { + t["DetachScsiLunEx_Task"] = reflect.TypeOf((*DetachScsiLunEx_Task)(nil)).Elem() +} + +type DetachScsiLunEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DetachScsiLunRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` +} + +func init() { + t["DetachScsiLunRequestType"] = reflect.TypeOf((*DetachScsiLunRequestType)(nil)).Elem() +} + +type DetachScsiLunResponse struct { +} + +type DeviceBackedVirtualDiskSpec struct { + VirtualDiskSpec + + Device string `xml:"device"` +} + +func init() { + t["DeviceBackedVirtualDiskSpec"] = reflect.TypeOf((*DeviceBackedVirtualDiskSpec)(nil)).Elem() +} + +type DeviceBackingNotSupported struct { + DeviceNotSupported + + Backing string `xml:"backing"` +} + +func init() { + t["DeviceBackingNotSupported"] = reflect.TypeOf((*DeviceBackingNotSupported)(nil)).Elem() +} + +type DeviceBackingNotSupportedFault BaseDeviceBackingNotSupported + +func init() { + t["DeviceBackingNotSupportedFault"] = reflect.TypeOf((*DeviceBackingNotSupportedFault)(nil)).Elem() +} + +type DeviceControllerNotSupported struct { + DeviceNotSupported + + Controller string `xml:"controller"` +} + +func init() { + t["DeviceControllerNotSupported"] = reflect.TypeOf((*DeviceControllerNotSupported)(nil)).Elem() +} + +type DeviceControllerNotSupportedFault DeviceControllerNotSupported + +func init() { + t["DeviceControllerNotSupportedFault"] = reflect.TypeOf((*DeviceControllerNotSupportedFault)(nil)).Elem() +} + +type DeviceHotPlugNotSupported struct { + InvalidDeviceSpec +} + +func init() { + t["DeviceHotPlugNotSupported"] = reflect.TypeOf((*DeviceHotPlugNotSupported)(nil)).Elem() +} + +type DeviceHotPlugNotSupportedFault DeviceHotPlugNotSupported + +func init() { + t["DeviceHotPlugNotSupportedFault"] = reflect.TypeOf((*DeviceHotPlugNotSupportedFault)(nil)).Elem() +} + +type DeviceNotFound struct { + InvalidDeviceSpec +} + +func init() { + t["DeviceNotFound"] = reflect.TypeOf((*DeviceNotFound)(nil)).Elem() +} + +type DeviceNotFoundFault DeviceNotFound + +func init() { + t["DeviceNotFoundFault"] = reflect.TypeOf((*DeviceNotFoundFault)(nil)).Elem() +} + +type DeviceNotSupported struct { + VirtualHardwareCompatibilityIssue + + Device string `xml:"device"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["DeviceNotSupported"] = reflect.TypeOf((*DeviceNotSupported)(nil)).Elem() +} + +type DeviceNotSupportedFault BaseDeviceNotSupported + +func init() { + t["DeviceNotSupportedFault"] = reflect.TypeOf((*DeviceNotSupportedFault)(nil)).Elem() +} + +type DeviceUnsupportedForVmPlatform struct { + InvalidDeviceSpec +} + +func init() { + t["DeviceUnsupportedForVmPlatform"] = reflect.TypeOf((*DeviceUnsupportedForVmPlatform)(nil)).Elem() +} + +type DeviceUnsupportedForVmPlatformFault DeviceUnsupportedForVmPlatform + +func init() { + t["DeviceUnsupportedForVmPlatformFault"] = reflect.TypeOf((*DeviceUnsupportedForVmPlatformFault)(nil)).Elem() +} + +type DeviceUnsupportedForVmVersion struct { + InvalidDeviceSpec + + CurrentVersion string `xml:"currentVersion"` + ExpectedVersion string `xml:"expectedVersion"` +} + +func init() { + t["DeviceUnsupportedForVmVersion"] = reflect.TypeOf((*DeviceUnsupportedForVmVersion)(nil)).Elem() +} + +type DeviceUnsupportedForVmVersionFault DeviceUnsupportedForVmVersion + +func init() { + t["DeviceUnsupportedForVmVersionFault"] = reflect.TypeOf((*DeviceUnsupportedForVmVersionFault)(nil)).Elem() +} + +type DiagnosticManagerBundleInfo struct { + DynamicData + + System *ManagedObjectReference `xml:"system,omitempty"` + Url string `xml:"url"` +} + +func init() { + t["DiagnosticManagerBundleInfo"] = reflect.TypeOf((*DiagnosticManagerBundleInfo)(nil)).Elem() +} + +type DiagnosticManagerLogDescriptor struct { + DynamicData + + Key string `xml:"key"` + FileName string `xml:"fileName"` + Creator string `xml:"creator"` + Format string `xml:"format"` + MimeType string `xml:"mimeType"` + Info BaseDescription `xml:"info,typeattr"` +} + +func init() { + t["DiagnosticManagerLogDescriptor"] = reflect.TypeOf((*DiagnosticManagerLogDescriptor)(nil)).Elem() +} + +type DiagnosticManagerLogHeader struct { + DynamicData + + LineStart int `xml:"lineStart"` + LineEnd int `xml:"lineEnd"` + LineText []string `xml:"lineText,omitempty"` +} + +func init() { + t["DiagnosticManagerLogHeader"] = reflect.TypeOf((*DiagnosticManagerLogHeader)(nil)).Elem() +} + +type DigestNotSupported struct { + DeviceNotSupported +} + +func init() { + t["DigestNotSupported"] = reflect.TypeOf((*DigestNotSupported)(nil)).Elem() +} + +type DigestNotSupportedFault DigestNotSupported + +func init() { + t["DigestNotSupportedFault"] = reflect.TypeOf((*DigestNotSupportedFault)(nil)).Elem() +} + +type DirectoryNotEmpty struct { + FileFault +} + +func init() { + t["DirectoryNotEmpty"] = reflect.TypeOf((*DirectoryNotEmpty)(nil)).Elem() +} + +type DirectoryNotEmptyFault DirectoryNotEmpty + +func init() { + t["DirectoryNotEmptyFault"] = reflect.TypeOf((*DirectoryNotEmptyFault)(nil)).Elem() +} + +type DisableAdminNotSupported struct { + HostConfigFault +} + +func init() { + t["DisableAdminNotSupported"] = reflect.TypeOf((*DisableAdminNotSupported)(nil)).Elem() +} + +type DisableAdminNotSupportedFault DisableAdminNotSupported + +func init() { + t["DisableAdminNotSupportedFault"] = reflect.TypeOf((*DisableAdminNotSupportedFault)(nil)).Elem() +} + +type DisableEvcModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisableEvcModeRequestType"] = reflect.TypeOf((*DisableEvcModeRequestType)(nil)).Elem() +} + +type DisableEvcMode_Task DisableEvcModeRequestType + +func init() { + t["DisableEvcMode_Task"] = reflect.TypeOf((*DisableEvcMode_Task)(nil)).Elem() +} + +type DisableEvcMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisableFeature DisableFeatureRequestType + +func init() { + t["DisableFeature"] = reflect.TypeOf((*DisableFeature)(nil)).Elem() +} + +type DisableFeatureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey"` +} + +func init() { + t["DisableFeatureRequestType"] = reflect.TypeOf((*DisableFeatureRequestType)(nil)).Elem() +} + +type DisableFeatureResponse struct { + Returnval bool `xml:"returnval"` +} + +type DisableHyperThreading DisableHyperThreadingRequestType + +func init() { + t["DisableHyperThreading"] = reflect.TypeOf((*DisableHyperThreading)(nil)).Elem() +} + +type DisableHyperThreadingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisableHyperThreadingRequestType"] = reflect.TypeOf((*DisableHyperThreadingRequestType)(nil)).Elem() +} + +type DisableHyperThreadingResponse struct { +} + +type DisableMultipathPath DisableMultipathPathRequestType + +func init() { + t["DisableMultipathPath"] = reflect.TypeOf((*DisableMultipathPath)(nil)).Elem() +} + +type DisableMultipathPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + PathName string `xml:"pathName"` +} + +func init() { + t["DisableMultipathPathRequestType"] = reflect.TypeOf((*DisableMultipathPathRequestType)(nil)).Elem() +} + +type DisableMultipathPathResponse struct { +} + +type DisableRuleset DisableRulesetRequestType + +func init() { + t["DisableRuleset"] = reflect.TypeOf((*DisableRuleset)(nil)).Elem() +} + +type DisableRulesetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["DisableRulesetRequestType"] = reflect.TypeOf((*DisableRulesetRequestType)(nil)).Elem() +} + +type DisableRulesetResponse struct { +} + +type DisableSecondaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["DisableSecondaryVMRequestType"] = reflect.TypeOf((*DisableSecondaryVMRequestType)(nil)).Elem() +} + +type DisableSecondaryVM_Task DisableSecondaryVMRequestType + +func init() { + t["DisableSecondaryVM_Task"] = reflect.TypeOf((*DisableSecondaryVM_Task)(nil)).Elem() +} + +type DisableSecondaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisableSmartCardAuthentication DisableSmartCardAuthenticationRequestType + +func init() { + t["DisableSmartCardAuthentication"] = reflect.TypeOf((*DisableSmartCardAuthentication)(nil)).Elem() +} + +type DisableSmartCardAuthenticationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisableSmartCardAuthenticationRequestType"] = reflect.TypeOf((*DisableSmartCardAuthenticationRequestType)(nil)).Elem() +} + +type DisableSmartCardAuthenticationResponse struct { +} + +type DisallowedChangeByService struct { + RuntimeFault + + ServiceName string `xml:"serviceName"` + DisallowedChange string `xml:"disallowedChange,omitempty"` +} + +func init() { + t["DisallowedChangeByService"] = reflect.TypeOf((*DisallowedChangeByService)(nil)).Elem() +} + +type DisallowedChangeByServiceFault DisallowedChangeByService + +func init() { + t["DisallowedChangeByServiceFault"] = reflect.TypeOf((*DisallowedChangeByServiceFault)(nil)).Elem() +} + +type DisallowedDiskModeChange struct { + InvalidDeviceSpec +} + +func init() { + t["DisallowedDiskModeChange"] = reflect.TypeOf((*DisallowedDiskModeChange)(nil)).Elem() +} + +type DisallowedDiskModeChangeFault DisallowedDiskModeChange + +func init() { + t["DisallowedDiskModeChangeFault"] = reflect.TypeOf((*DisallowedDiskModeChangeFault)(nil)).Elem() +} + +type DisallowedMigrationDeviceAttached struct { + MigrationFault + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DisallowedMigrationDeviceAttached"] = reflect.TypeOf((*DisallowedMigrationDeviceAttached)(nil)).Elem() +} + +type DisallowedMigrationDeviceAttachedFault DisallowedMigrationDeviceAttached + +func init() { + t["DisallowedMigrationDeviceAttachedFault"] = reflect.TypeOf((*DisallowedMigrationDeviceAttachedFault)(nil)).Elem() +} + +type DisallowedOperationOnFailoverHost struct { + RuntimeFault + + Host ManagedObjectReference `xml:"host"` + Hostname string `xml:"hostname"` +} + +func init() { + t["DisallowedOperationOnFailoverHost"] = reflect.TypeOf((*DisallowedOperationOnFailoverHost)(nil)).Elem() +} + +type DisallowedOperationOnFailoverHostFault DisallowedOperationOnFailoverHost + +func init() { + t["DisallowedOperationOnFailoverHostFault"] = reflect.TypeOf((*DisallowedOperationOnFailoverHostFault)(nil)).Elem() +} + +type DisconnectHostRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisconnectHostRequestType"] = reflect.TypeOf((*DisconnectHostRequestType)(nil)).Elem() +} + +type DisconnectHost_Task DisconnectHostRequestType + +func init() { + t["DisconnectHost_Task"] = reflect.TypeOf((*DisconnectHost_Task)(nil)).Elem() +} + +type DisconnectHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisconnectedHostsBlockingEVC struct { + EVCConfigFault +} + +func init() { + t["DisconnectedHostsBlockingEVC"] = reflect.TypeOf((*DisconnectedHostsBlockingEVC)(nil)).Elem() +} + +type DisconnectedHostsBlockingEVCFault DisconnectedHostsBlockingEVC + +func init() { + t["DisconnectedHostsBlockingEVCFault"] = reflect.TypeOf((*DisconnectedHostsBlockingEVCFault)(nil)).Elem() +} + +type DiscoverFcoeHbas DiscoverFcoeHbasRequestType + +func init() { + t["DiscoverFcoeHbas"] = reflect.TypeOf((*DiscoverFcoeHbas)(nil)).Elem() +} + +type DiscoverFcoeHbasRequestType struct { + This ManagedObjectReference `xml:"_this"` + FcoeSpec FcoeConfigFcoeSpecification `xml:"fcoeSpec"` +} + +func init() { + t["DiscoverFcoeHbasRequestType"] = reflect.TypeOf((*DiscoverFcoeHbasRequestType)(nil)).Elem() +} + +type DiscoverFcoeHbasResponse struct { +} + +type DiskChangeExtent struct { + DynamicData + + Start int64 `xml:"start"` + Length int64 `xml:"length"` +} + +func init() { + t["DiskChangeExtent"] = reflect.TypeOf((*DiskChangeExtent)(nil)).Elem() +} + +type DiskChangeInfo struct { + DynamicData + + StartOffset int64 `xml:"startOffset"` + Length int64 `xml:"length"` + ChangedArea []DiskChangeExtent `xml:"changedArea,omitempty"` +} + +func init() { + t["DiskChangeInfo"] = reflect.TypeOf((*DiskChangeInfo)(nil)).Elem() +} + +type DiskHasPartitions struct { + VsanDiskFault +} + +func init() { + t["DiskHasPartitions"] = reflect.TypeOf((*DiskHasPartitions)(nil)).Elem() +} + +type DiskHasPartitionsFault DiskHasPartitions + +func init() { + t["DiskHasPartitionsFault"] = reflect.TypeOf((*DiskHasPartitionsFault)(nil)).Elem() +} + +type DiskIsLastRemainingNonSSD struct { + VsanDiskFault +} + +func init() { + t["DiskIsLastRemainingNonSSD"] = reflect.TypeOf((*DiskIsLastRemainingNonSSD)(nil)).Elem() +} + +type DiskIsLastRemainingNonSSDFault DiskIsLastRemainingNonSSD + +func init() { + t["DiskIsLastRemainingNonSSDFault"] = reflect.TypeOf((*DiskIsLastRemainingNonSSDFault)(nil)).Elem() +} + +type DiskIsNonLocal struct { + VsanDiskFault +} + +func init() { + t["DiskIsNonLocal"] = reflect.TypeOf((*DiskIsNonLocal)(nil)).Elem() +} + +type DiskIsNonLocalFault DiskIsNonLocal + +func init() { + t["DiskIsNonLocalFault"] = reflect.TypeOf((*DiskIsNonLocalFault)(nil)).Elem() +} + +type DiskIsUSB struct { + VsanDiskFault +} + +func init() { + t["DiskIsUSB"] = reflect.TypeOf((*DiskIsUSB)(nil)).Elem() +} + +type DiskIsUSBFault DiskIsUSB + +func init() { + t["DiskIsUSBFault"] = reflect.TypeOf((*DiskIsUSBFault)(nil)).Elem() +} + +type DiskMoveTypeNotSupported struct { + MigrationFault +} + +func init() { + t["DiskMoveTypeNotSupported"] = reflect.TypeOf((*DiskMoveTypeNotSupported)(nil)).Elem() +} + +type DiskMoveTypeNotSupportedFault DiskMoveTypeNotSupported + +func init() { + t["DiskMoveTypeNotSupportedFault"] = reflect.TypeOf((*DiskMoveTypeNotSupportedFault)(nil)).Elem() +} + +type DiskNotSupported struct { + VirtualHardwareCompatibilityIssue + + Disk int `xml:"disk"` +} + +func init() { + t["DiskNotSupported"] = reflect.TypeOf((*DiskNotSupported)(nil)).Elem() +} + +type DiskNotSupportedFault BaseDiskNotSupported + +func init() { + t["DiskNotSupportedFault"] = reflect.TypeOf((*DiskNotSupportedFault)(nil)).Elem() +} + +type DiskTooSmall struct { + VsanDiskFault +} + +func init() { + t["DiskTooSmall"] = reflect.TypeOf((*DiskTooSmall)(nil)).Elem() +} + +type DiskTooSmallFault DiskTooSmall + +func init() { + t["DiskTooSmallFault"] = reflect.TypeOf((*DiskTooSmallFault)(nil)).Elem() +} + +type DissociateProfile DissociateProfileRequestType + +func init() { + t["DissociateProfile"] = reflect.TypeOf((*DissociateProfile)(nil)).Elem() +} + +type DissociateProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["DissociateProfileRequestType"] = reflect.TypeOf((*DissociateProfileRequestType)(nil)).Elem() +} + +type DissociateProfileResponse struct { +} + +type DistributedVirtualPort struct { + DynamicData + + Key string `xml:"key"` + Config DVPortConfigInfo `xml:"config"` + DvsUuid string `xml:"dvsUuid"` + PortgroupKey string `xml:"portgroupKey,omitempty"` + ProxyHost *ManagedObjectReference `xml:"proxyHost,omitempty"` + Connectee *DistributedVirtualSwitchPortConnectee `xml:"connectee,omitempty"` + Conflict bool `xml:"conflict"` + ConflictPortKey string `xml:"conflictPortKey,omitempty"` + State *DVPortState `xml:"state,omitempty"` + ConnectionCookie int `xml:"connectionCookie,omitempty"` + LastStatusChange time.Time `xml:"lastStatusChange"` + HostLocalPort *bool `xml:"hostLocalPort"` +} + +func init() { + t["DistributedVirtualPort"] = reflect.TypeOf((*DistributedVirtualPort)(nil)).Elem() +} + +type DistributedVirtualPortgroupInfo struct { + DynamicData + + SwitchName string `xml:"switchName"` + SwitchUuid string `xml:"switchUuid"` + PortgroupName string `xml:"portgroupName"` + PortgroupKey string `xml:"portgroupKey"` + PortgroupType string `xml:"portgroupType"` + UplinkPortgroup bool `xml:"uplinkPortgroup"` + Portgroup ManagedObjectReference `xml:"portgroup"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["DistributedVirtualPortgroupInfo"] = reflect.TypeOf((*DistributedVirtualPortgroupInfo)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMember struct { + DynamicData + + RuntimeState *DistributedVirtualSwitchHostMemberRuntimeState `xml:"runtimeState,omitempty"` + Config DistributedVirtualSwitchHostMemberConfigInfo `xml:"config"` + ProductInfo *DistributedVirtualSwitchProductSpec `xml:"productInfo,omitempty"` + UplinkPortKey []string `xml:"uplinkPortKey,omitempty"` + Status string `xml:"status"` + StatusDetail string `xml:"statusDetail,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMember"] = reflect.TypeOf((*DistributedVirtualSwitchHostMember)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberBacking struct { + DynamicData +} + +func init() { + t["DistributedVirtualSwitchHostMemberBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberBacking)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberConfigInfo struct { + DynamicData + + Host *ManagedObjectReference `xml:"host,omitempty"` + MaxProxySwitchPorts int `xml:"maxProxySwitchPorts"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,typeattr"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberConfigInfo"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigInfo)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberConfigSpec struct { + DynamicData + + Operation string `xml:"operation"` + Host ManagedObjectReference `xml:"host"` + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,omitempty,typeattr"` + MaxProxySwitchPorts int `xml:"maxProxySwitchPorts,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberConfigSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberPnicBacking struct { + DistributedVirtualSwitchHostMemberBacking + + PnicSpec []DistributedVirtualSwitchHostMemberPnicSpec `xml:"pnicSpec,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberPnicBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicBacking)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberPnicSpec struct { + DynamicData + + PnicDevice string `xml:"pnicDevice"` + UplinkPortKey string `xml:"uplinkPortKey,omitempty"` + UplinkPortgroupKey string `xml:"uplinkPortgroupKey,omitempty"` + ConnectionCookie int `xml:"connectionCookie,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberPnicSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberRuntimeState struct { + DynamicData + + CurrentMaxProxySwitchPorts int `xml:"currentMaxProxySwitchPorts"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberRuntimeState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberRuntimeState)(nil)).Elem() +} + +type DistributedVirtualSwitchHostProductSpec struct { + DynamicData + + ProductLineId string `xml:"productLineId,omitempty"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostProductSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchInfo struct { + DynamicData + + SwitchName string `xml:"switchName"` + SwitchUuid string `xml:"switchUuid"` + DistributedVirtualSwitch ManagedObjectReference `xml:"distributedVirtualSwitch"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["DistributedVirtualSwitchInfo"] = reflect.TypeOf((*DistributedVirtualSwitchInfo)(nil)).Elem() +} + +type DistributedVirtualSwitchKeyedOpaqueBlob struct { + DynamicData + + Key string `xml:"key"` + OpaqueData string `xml:"opaqueData"` +} + +func init() { + t["DistributedVirtualSwitchKeyedOpaqueBlob"] = reflect.TypeOf((*DistributedVirtualSwitchKeyedOpaqueBlob)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerCompatibilityResult struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchManagerCompatibilityResult"] = reflect.TypeOf((*DistributedVirtualSwitchManagerCompatibilityResult)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerDvsProductSpec struct { + DynamicData + + NewSwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"newSwitchProductSpec,omitempty"` + DistributedVirtualSwitch *ManagedObjectReference `xml:"distributedVirtualSwitch,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchManagerDvsProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerDvsProductSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostArrayFilter struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec + + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostArrayFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostArrayFilter)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostContainer struct { + DynamicData + + Container ManagedObjectReference `xml:"container"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostContainer"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainer)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostContainerFilter struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec + + HostContainer DistributedVirtualSwitchManagerHostContainer `xml:"hostContainer"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostContainerFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainerFilter)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostDvsFilterSpec struct { + DynamicData + + Inclusive bool `xml:"inclusive"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostDvsMembershipFilter struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec + + DistributedVirtualSwitch ManagedObjectReference `xml:"distributedVirtualSwitch"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostDvsMembershipFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsMembershipFilter)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerImportResult struct { + DynamicData + + DistributedVirtualSwitch []ManagedObjectReference `xml:"distributedVirtualSwitch,omitempty"` + DistributedVirtualPortgroup []ManagedObjectReference `xml:"distributedVirtualPortgroup,omitempty"` + ImportFault []ImportOperationBulkFaultFaultOnImport `xml:"importFault,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchManagerImportResult"] = reflect.TypeOf((*DistributedVirtualSwitchManagerImportResult)(nil)).Elem() +} + +type DistributedVirtualSwitchPortConnectee struct { + DynamicData + + ConnectedEntity *ManagedObjectReference `xml:"connectedEntity,omitempty"` + NicKey string `xml:"nicKey,omitempty"` + Type string `xml:"type,omitempty"` + AddressHint string `xml:"addressHint,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchPortConnectee"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnectee)(nil)).Elem() +} + +type DistributedVirtualSwitchPortConnection struct { + DynamicData + + SwitchUuid string `xml:"switchUuid"` + PortgroupKey string `xml:"portgroupKey,omitempty"` + PortKey string `xml:"portKey,omitempty"` + ConnectionCookie int `xml:"connectionCookie,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchPortConnection"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnection)(nil)).Elem() +} + +type DistributedVirtualSwitchPortCriteria struct { + DynamicData + + Connected *bool `xml:"connected"` + Active *bool `xml:"active"` + UplinkPort *bool `xml:"uplinkPort"` + Scope *ManagedObjectReference `xml:"scope,omitempty"` + PortgroupKey []string `xml:"portgroupKey,omitempty"` + Inside *bool `xml:"inside"` + PortKey []string `xml:"portKey,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchPortCriteria"] = reflect.TypeOf((*DistributedVirtualSwitchPortCriteria)(nil)).Elem() +} + +type DistributedVirtualSwitchPortStatistics struct { + DynamicData + + PacketsInMulticast int64 `xml:"packetsInMulticast"` + PacketsOutMulticast int64 `xml:"packetsOutMulticast"` + BytesInMulticast int64 `xml:"bytesInMulticast"` + BytesOutMulticast int64 `xml:"bytesOutMulticast"` + PacketsInUnicast int64 `xml:"packetsInUnicast"` + PacketsOutUnicast int64 `xml:"packetsOutUnicast"` + BytesInUnicast int64 `xml:"bytesInUnicast"` + BytesOutUnicast int64 `xml:"bytesOutUnicast"` + PacketsInBroadcast int64 `xml:"packetsInBroadcast"` + PacketsOutBroadcast int64 `xml:"packetsOutBroadcast"` + BytesInBroadcast int64 `xml:"bytesInBroadcast"` + BytesOutBroadcast int64 `xml:"bytesOutBroadcast"` + PacketsInDropped int64 `xml:"packetsInDropped"` + PacketsOutDropped int64 `xml:"packetsOutDropped"` + PacketsInException int64 `xml:"packetsInException"` + PacketsOutException int64 `xml:"packetsOutException"` +} + +func init() { + t["DistributedVirtualSwitchPortStatistics"] = reflect.TypeOf((*DistributedVirtualSwitchPortStatistics)(nil)).Elem() +} + +type DistributedVirtualSwitchProductSpec struct { + DynamicData + + Name string `xml:"name,omitempty"` + Vendor string `xml:"vendor,omitempty"` + Version string `xml:"version,omitempty"` + Build string `xml:"build,omitempty"` + ForwardingClass string `xml:"forwardingClass,omitempty"` + BundleId string `xml:"bundleId,omitempty"` + BundleUrl string `xml:"bundleUrl,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpec)(nil)).Elem() +} + +type DoesCustomizationSpecExist DoesCustomizationSpecExistRequestType + +func init() { + t["DoesCustomizationSpecExist"] = reflect.TypeOf((*DoesCustomizationSpecExist)(nil)).Elem() +} + +type DoesCustomizationSpecExistRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["DoesCustomizationSpecExistRequestType"] = reflect.TypeOf((*DoesCustomizationSpecExistRequestType)(nil)).Elem() +} + +type DoesCustomizationSpecExistResponse struct { + Returnval bool `xml:"returnval"` +} + +type DomainNotFound struct { + ActiveDirectoryFault + + DomainName string `xml:"domainName"` +} + +func init() { + t["DomainNotFound"] = reflect.TypeOf((*DomainNotFound)(nil)).Elem() +} + +type DomainNotFoundFault DomainNotFound + +func init() { + t["DomainNotFoundFault"] = reflect.TypeOf((*DomainNotFoundFault)(nil)).Elem() +} + +type DrsDisabledEvent struct { + ClusterEvent +} + +func init() { + t["DrsDisabledEvent"] = reflect.TypeOf((*DrsDisabledEvent)(nil)).Elem() +} + +type DrsDisabledOnVm struct { + VimFault +} + +func init() { + t["DrsDisabledOnVm"] = reflect.TypeOf((*DrsDisabledOnVm)(nil)).Elem() +} + +type DrsDisabledOnVmFault DrsDisabledOnVm + +func init() { + t["DrsDisabledOnVmFault"] = reflect.TypeOf((*DrsDisabledOnVmFault)(nil)).Elem() +} + +type DrsEnabledEvent struct { + ClusterEvent + + Behavior string `xml:"behavior"` +} + +func init() { + t["DrsEnabledEvent"] = reflect.TypeOf((*DrsEnabledEvent)(nil)).Elem() +} + +type DrsEnteredStandbyModeEvent struct { + EnteredStandbyModeEvent +} + +func init() { + t["DrsEnteredStandbyModeEvent"] = reflect.TypeOf((*DrsEnteredStandbyModeEvent)(nil)).Elem() +} + +type DrsEnteringStandbyModeEvent struct { + EnteringStandbyModeEvent +} + +func init() { + t["DrsEnteringStandbyModeEvent"] = reflect.TypeOf((*DrsEnteringStandbyModeEvent)(nil)).Elem() +} + +type DrsExitStandbyModeFailedEvent struct { + ExitStandbyModeFailedEvent +} + +func init() { + t["DrsExitStandbyModeFailedEvent"] = reflect.TypeOf((*DrsExitStandbyModeFailedEvent)(nil)).Elem() +} + +type DrsExitedStandbyModeEvent struct { + ExitedStandbyModeEvent +} + +func init() { + t["DrsExitedStandbyModeEvent"] = reflect.TypeOf((*DrsExitedStandbyModeEvent)(nil)).Elem() +} + +type DrsExitingStandbyModeEvent struct { + ExitingStandbyModeEvent +} + +func init() { + t["DrsExitingStandbyModeEvent"] = reflect.TypeOf((*DrsExitingStandbyModeEvent)(nil)).Elem() +} + +type DrsInvocationFailedEvent struct { + ClusterEvent +} + +func init() { + t["DrsInvocationFailedEvent"] = reflect.TypeOf((*DrsInvocationFailedEvent)(nil)).Elem() +} + +type DrsRecoveredFromFailureEvent struct { + ClusterEvent +} + +func init() { + t["DrsRecoveredFromFailureEvent"] = reflect.TypeOf((*DrsRecoveredFromFailureEvent)(nil)).Elem() +} + +type DrsResourceConfigureFailedEvent struct { + HostEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["DrsResourceConfigureFailedEvent"] = reflect.TypeOf((*DrsResourceConfigureFailedEvent)(nil)).Elem() +} + +type DrsResourceConfigureSyncedEvent struct { + HostEvent +} + +func init() { + t["DrsResourceConfigureSyncedEvent"] = reflect.TypeOf((*DrsResourceConfigureSyncedEvent)(nil)).Elem() +} + +type DrsRuleComplianceEvent struct { + VmEvent +} + +func init() { + t["DrsRuleComplianceEvent"] = reflect.TypeOf((*DrsRuleComplianceEvent)(nil)).Elem() +} + +type DrsRuleViolationEvent struct { + VmEvent +} + +func init() { + t["DrsRuleViolationEvent"] = reflect.TypeOf((*DrsRuleViolationEvent)(nil)).Elem() +} + +type DrsSoftRuleViolationEvent struct { + VmEvent +} + +func init() { + t["DrsSoftRuleViolationEvent"] = reflect.TypeOf((*DrsSoftRuleViolationEvent)(nil)).Elem() +} + +type DrsVmMigratedEvent struct { + VmMigratedEvent +} + +func init() { + t["DrsVmMigratedEvent"] = reflect.TypeOf((*DrsVmMigratedEvent)(nil)).Elem() +} + +type DrsVmPoweredOnEvent struct { + VmPoweredOnEvent +} + +func init() { + t["DrsVmPoweredOnEvent"] = reflect.TypeOf((*DrsVmPoweredOnEvent)(nil)).Elem() +} + +type DrsVmotionIncompatibleFault struct { + VirtualHardwareCompatibilityIssue + + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["DrsVmotionIncompatibleFault"] = reflect.TypeOf((*DrsVmotionIncompatibleFault)(nil)).Elem() +} + +type DrsVmotionIncompatibleFaultFault DrsVmotionIncompatibleFault + +func init() { + t["DrsVmotionIncompatibleFaultFault"] = reflect.TypeOf((*DrsVmotionIncompatibleFaultFault)(nil)).Elem() +} + +type DuplicateCustomizationSpec DuplicateCustomizationSpecRequestType + +func init() { + t["DuplicateCustomizationSpec"] = reflect.TypeOf((*DuplicateCustomizationSpec)(nil)).Elem() +} + +type DuplicateCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + NewName string `xml:"newName"` +} + +func init() { + t["DuplicateCustomizationSpecRequestType"] = reflect.TypeOf((*DuplicateCustomizationSpecRequestType)(nil)).Elem() +} + +type DuplicateCustomizationSpecResponse struct { +} + +type DuplicateDisks struct { + VsanDiskFault +} + +func init() { + t["DuplicateDisks"] = reflect.TypeOf((*DuplicateDisks)(nil)).Elem() +} + +type DuplicateDisksFault DuplicateDisks + +func init() { + t["DuplicateDisksFault"] = reflect.TypeOf((*DuplicateDisksFault)(nil)).Elem() +} + +type DuplicateIpDetectedEvent struct { + HostEvent + + DuplicateIP string `xml:"duplicateIP"` + MacAddress string `xml:"macAddress"` +} + +func init() { + t["DuplicateIpDetectedEvent"] = reflect.TypeOf((*DuplicateIpDetectedEvent)(nil)).Elem() +} + +type DuplicateName struct { + VimFault + + Name string `xml:"name"` + Object ManagedObjectReference `xml:"object"` +} + +func init() { + t["DuplicateName"] = reflect.TypeOf((*DuplicateName)(nil)).Elem() +} + +type DuplicateNameFault DuplicateName + +func init() { + t["DuplicateNameFault"] = reflect.TypeOf((*DuplicateNameFault)(nil)).Elem() +} + +type DuplicateVsanNetworkInterface struct { + VsanFault + + Device string `xml:"device"` +} + +func init() { + t["DuplicateVsanNetworkInterface"] = reflect.TypeOf((*DuplicateVsanNetworkInterface)(nil)).Elem() +} + +type DuplicateVsanNetworkInterfaceFault DuplicateVsanNetworkInterface + +func init() { + t["DuplicateVsanNetworkInterfaceFault"] = reflect.TypeOf((*DuplicateVsanNetworkInterfaceFault)(nil)).Elem() +} + +type DvpgImportEvent struct { + DVPortgroupEvent + + ImportType string `xml:"importType"` +} + +func init() { + t["DvpgImportEvent"] = reflect.TypeOf((*DvpgImportEvent)(nil)).Elem() +} + +type DvpgRestoreEvent struct { + DVPortgroupEvent +} + +func init() { + t["DvpgRestoreEvent"] = reflect.TypeOf((*DvpgRestoreEvent)(nil)).Elem() +} + +type DvsAcceptNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsAcceptNetworkRuleAction"] = reflect.TypeOf((*DvsAcceptNetworkRuleAction)(nil)).Elem() +} + +type DvsApplyOperationFault struct { + DvsFault + + ObjectFault []DvsApplyOperationFaultFaultOnObject `xml:"objectFault"` +} + +func init() { + t["DvsApplyOperationFault"] = reflect.TypeOf((*DvsApplyOperationFault)(nil)).Elem() +} + +type DvsApplyOperationFaultFault DvsApplyOperationFault + +func init() { + t["DvsApplyOperationFaultFault"] = reflect.TypeOf((*DvsApplyOperationFaultFault)(nil)).Elem() +} + +type DvsApplyOperationFaultFaultOnObject struct { + DynamicData + + ObjectId string `xml:"objectId"` + Type string `xml:"type"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DvsApplyOperationFaultFaultOnObject"] = reflect.TypeOf((*DvsApplyOperationFaultFaultOnObject)(nil)).Elem() +} + +type DvsCopyNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsCopyNetworkRuleAction"] = reflect.TypeOf((*DvsCopyNetworkRuleAction)(nil)).Elem() +} + +type DvsCreatedEvent struct { + DvsEvent + + Parent FolderEventArgument `xml:"parent"` +} + +func init() { + t["DvsCreatedEvent"] = reflect.TypeOf((*DvsCreatedEvent)(nil)).Elem() +} + +type DvsDestroyedEvent struct { + DvsEvent +} + +func init() { + t["DvsDestroyedEvent"] = reflect.TypeOf((*DvsDestroyedEvent)(nil)).Elem() +} + +type DvsDropNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsDropNetworkRuleAction"] = reflect.TypeOf((*DvsDropNetworkRuleAction)(nil)).Elem() +} + +type DvsEvent struct { + Event +} + +func init() { + t["DvsEvent"] = reflect.TypeOf((*DvsEvent)(nil)).Elem() +} + +type DvsEventArgument struct { + EntityEventArgument + + Dvs ManagedObjectReference `xml:"dvs"` +} + +func init() { + t["DvsEventArgument"] = reflect.TypeOf((*DvsEventArgument)(nil)).Elem() +} + +type DvsFault struct { + VimFault +} + +func init() { + t["DvsFault"] = reflect.TypeOf((*DvsFault)(nil)).Elem() +} + +type DvsFaultFault BaseDvsFault + +func init() { + t["DvsFaultFault"] = reflect.TypeOf((*DvsFaultFault)(nil)).Elem() +} + +type DvsFilterConfig struct { + InheritablePolicy + + Key string `xml:"key,omitempty"` + AgentName string `xml:"agentName,omitempty"` + SlotNumber string `xml:"slotNumber,omitempty"` + Parameters *DvsFilterParameter `xml:"parameters,omitempty"` + OnFailure string `xml:"onFailure,omitempty"` +} + +func init() { + t["DvsFilterConfig"] = reflect.TypeOf((*DvsFilterConfig)(nil)).Elem() +} + +type DvsFilterConfigSpec struct { + DvsFilterConfig + + Operation string `xml:"operation"` +} + +func init() { + t["DvsFilterConfigSpec"] = reflect.TypeOf((*DvsFilterConfigSpec)(nil)).Elem() +} + +type DvsFilterParameter struct { + DynamicData + + Parameters []string `xml:"parameters,omitempty"` +} + +func init() { + t["DvsFilterParameter"] = reflect.TypeOf((*DvsFilterParameter)(nil)).Elem() +} + +type DvsFilterPolicy struct { + InheritablePolicy + + FilterConfig []BaseDvsFilterConfig `xml:"filterConfig,omitempty,typeattr"` +} + +func init() { + t["DvsFilterPolicy"] = reflect.TypeOf((*DvsFilterPolicy)(nil)).Elem() +} + +type DvsGreEncapNetworkRuleAction struct { + DvsNetworkRuleAction + + EncapsulationIp SingleIp `xml:"encapsulationIp"` +} + +func init() { + t["DvsGreEncapNetworkRuleAction"] = reflect.TypeOf((*DvsGreEncapNetworkRuleAction)(nil)).Elem() +} + +type DvsHealthStatusChangeEvent struct { + HostEvent + + SwitchUuid string `xml:"switchUuid"` + HealthResult BaseHostMemberHealthCheckResult `xml:"healthResult,omitempty,typeattr"` +} + +func init() { + t["DvsHealthStatusChangeEvent"] = reflect.TypeOf((*DvsHealthStatusChangeEvent)(nil)).Elem() +} + +type DvsHostBackInSyncEvent struct { + DvsEvent + + HostBackInSync HostEventArgument `xml:"hostBackInSync"` +} + +func init() { + t["DvsHostBackInSyncEvent"] = reflect.TypeOf((*DvsHostBackInSyncEvent)(nil)).Elem() +} + +type DvsHostInfrastructureTrafficResource struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description,omitempty"` + AllocationInfo DvsHostInfrastructureTrafficResourceAllocation `xml:"allocationInfo"` +} + +func init() { + t["DvsHostInfrastructureTrafficResource"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResource)(nil)).Elem() +} + +type DvsHostInfrastructureTrafficResourceAllocation struct { + DynamicData + + Limit int64 `xml:"limit,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + Reservation int64 `xml:"reservation,omitempty"` +} + +func init() { + t["DvsHostInfrastructureTrafficResourceAllocation"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResourceAllocation)(nil)).Elem() +} + +type DvsHostJoinedEvent struct { + DvsEvent + + HostJoined HostEventArgument `xml:"hostJoined"` +} + +func init() { + t["DvsHostJoinedEvent"] = reflect.TypeOf((*DvsHostJoinedEvent)(nil)).Elem() +} + +type DvsHostLeftEvent struct { + DvsEvent + + HostLeft HostEventArgument `xml:"hostLeft"` +} + +func init() { + t["DvsHostLeftEvent"] = reflect.TypeOf((*DvsHostLeftEvent)(nil)).Elem() +} + +type DvsHostStatusUpdated struct { + DvsEvent + + HostMember HostEventArgument `xml:"hostMember"` + OldStatus string `xml:"oldStatus,omitempty"` + NewStatus string `xml:"newStatus,omitempty"` + OldStatusDetail string `xml:"oldStatusDetail,omitempty"` + NewStatusDetail string `xml:"newStatusDetail,omitempty"` +} + +func init() { + t["DvsHostStatusUpdated"] = reflect.TypeOf((*DvsHostStatusUpdated)(nil)).Elem() +} + +type DvsHostVNicProfile struct { + DvsVNicProfile +} + +func init() { + t["DvsHostVNicProfile"] = reflect.TypeOf((*DvsHostVNicProfile)(nil)).Elem() +} + +type DvsHostWentOutOfSyncEvent struct { + DvsEvent + + HostOutOfSync DvsOutOfSyncHostArgument `xml:"hostOutOfSync"` +} + +func init() { + t["DvsHostWentOutOfSyncEvent"] = reflect.TypeOf((*DvsHostWentOutOfSyncEvent)(nil)).Elem() +} + +type DvsImportEvent struct { + DvsEvent + + ImportType string `xml:"importType"` +} + +func init() { + t["DvsImportEvent"] = reflect.TypeOf((*DvsImportEvent)(nil)).Elem() +} + +type DvsIpNetworkRuleQualifier struct { + DvsNetworkRuleQualifier + + SourceAddress BaseIpAddress `xml:"sourceAddress,omitempty,typeattr"` + DestinationAddress BaseIpAddress `xml:"destinationAddress,omitempty,typeattr"` + Protocol *IntExpression `xml:"protocol,omitempty"` + SourceIpPort BaseDvsIpPort `xml:"sourceIpPort,omitempty,typeattr"` + DestinationIpPort BaseDvsIpPort `xml:"destinationIpPort,omitempty,typeattr"` + TcpFlags *IntExpression `xml:"tcpFlags,omitempty"` +} + +func init() { + t["DvsIpNetworkRuleQualifier"] = reflect.TypeOf((*DvsIpNetworkRuleQualifier)(nil)).Elem() +} + +type DvsIpPort struct { + NegatableExpression +} + +func init() { + t["DvsIpPort"] = reflect.TypeOf((*DvsIpPort)(nil)).Elem() +} + +type DvsIpPortRange struct { + DvsIpPort + + StartPortNumber int `xml:"startPortNumber"` + EndPortNumber int `xml:"endPortNumber"` +} + +func init() { + t["DvsIpPortRange"] = reflect.TypeOf((*DvsIpPortRange)(nil)).Elem() +} + +type DvsLogNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsLogNetworkRuleAction"] = reflect.TypeOf((*DvsLogNetworkRuleAction)(nil)).Elem() +} + +type DvsMacNetworkRuleQualifier struct { + DvsNetworkRuleQualifier + + SourceAddress BaseMacAddress `xml:"sourceAddress,omitempty,typeattr"` + DestinationAddress BaseMacAddress `xml:"destinationAddress,omitempty,typeattr"` + Protocol *IntExpression `xml:"protocol,omitempty"` + VlanId *IntExpression `xml:"vlanId,omitempty"` +} + +func init() { + t["DvsMacNetworkRuleQualifier"] = reflect.TypeOf((*DvsMacNetworkRuleQualifier)(nil)).Elem() +} + +type DvsMacRewriteNetworkRuleAction struct { + DvsNetworkRuleAction + + RewriteMac string `xml:"rewriteMac"` +} + +func init() { + t["DvsMacRewriteNetworkRuleAction"] = reflect.TypeOf((*DvsMacRewriteNetworkRuleAction)(nil)).Elem() +} + +type DvsMergedEvent struct { + DvsEvent + + SourceDvs DvsEventArgument `xml:"sourceDvs"` + DestinationDvs DvsEventArgument `xml:"destinationDvs"` +} + +func init() { + t["DvsMergedEvent"] = reflect.TypeOf((*DvsMergedEvent)(nil)).Elem() +} + +type DvsNetworkRuleAction struct { + DynamicData +} + +func init() { + t["DvsNetworkRuleAction"] = reflect.TypeOf((*DvsNetworkRuleAction)(nil)).Elem() +} + +type DvsNetworkRuleQualifier struct { + DynamicData + + Key string `xml:"key,omitempty"` +} + +func init() { + t["DvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() +} + +type DvsNotAuthorized struct { + DvsFault + + SessionExtensionKey string `xml:"sessionExtensionKey,omitempty"` + DvsExtensionKey string `xml:"dvsExtensionKey,omitempty"` +} + +func init() { + t["DvsNotAuthorized"] = reflect.TypeOf((*DvsNotAuthorized)(nil)).Elem() +} + +type DvsNotAuthorizedFault DvsNotAuthorized + +func init() { + t["DvsNotAuthorizedFault"] = reflect.TypeOf((*DvsNotAuthorizedFault)(nil)).Elem() +} + +type DvsOperationBulkFault struct { + DvsFault + + HostFault []DvsOperationBulkFaultFaultOnHost `xml:"hostFault"` +} + +func init() { + t["DvsOperationBulkFault"] = reflect.TypeOf((*DvsOperationBulkFault)(nil)).Elem() +} + +type DvsOperationBulkFaultFault DvsOperationBulkFault + +func init() { + t["DvsOperationBulkFaultFault"] = reflect.TypeOf((*DvsOperationBulkFaultFault)(nil)).Elem() +} + +type DvsOperationBulkFaultFaultOnHost struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DvsOperationBulkFaultFaultOnHost"] = reflect.TypeOf((*DvsOperationBulkFaultFaultOnHost)(nil)).Elem() +} + +type DvsOutOfSyncHostArgument struct { + DynamicData + + OutOfSyncHost HostEventArgument `xml:"outOfSyncHost"` + ConfigParamters []string `xml:"configParamters"` +} + +func init() { + t["DvsOutOfSyncHostArgument"] = reflect.TypeOf((*DvsOutOfSyncHostArgument)(nil)).Elem() +} + +type DvsPortBlockedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + StatusDetail string `xml:"statusDetail,omitempty"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortBlockedEvent"] = reflect.TypeOf((*DvsPortBlockedEvent)(nil)).Elem() +} + +type DvsPortConnectedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + Connectee *DistributedVirtualSwitchPortConnectee `xml:"connectee,omitempty"` +} + +func init() { + t["DvsPortConnectedEvent"] = reflect.TypeOf((*DvsPortConnectedEvent)(nil)).Elem() +} + +type DvsPortCreatedEvent struct { + DvsEvent + + PortKey []string `xml:"portKey"` +} + +func init() { + t["DvsPortCreatedEvent"] = reflect.TypeOf((*DvsPortCreatedEvent)(nil)).Elem() +} + +type DvsPortDeletedEvent struct { + DvsEvent + + PortKey []string `xml:"portKey"` +} + +func init() { + t["DvsPortDeletedEvent"] = reflect.TypeOf((*DvsPortDeletedEvent)(nil)).Elem() +} + +type DvsPortDisconnectedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + Connectee *DistributedVirtualSwitchPortConnectee `xml:"connectee,omitempty"` +} + +func init() { + t["DvsPortDisconnectedEvent"] = reflect.TypeOf((*DvsPortDisconnectedEvent)(nil)).Elem() +} + +type DvsPortEnteredPassthruEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortEnteredPassthruEvent"] = reflect.TypeOf((*DvsPortEnteredPassthruEvent)(nil)).Elem() +} + +type DvsPortExitedPassthruEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortExitedPassthruEvent"] = reflect.TypeOf((*DvsPortExitedPassthruEvent)(nil)).Elem() +} + +type DvsPortJoinPortgroupEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + PortgroupKey string `xml:"portgroupKey"` + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["DvsPortJoinPortgroupEvent"] = reflect.TypeOf((*DvsPortJoinPortgroupEvent)(nil)).Elem() +} + +type DvsPortLeavePortgroupEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + PortgroupKey string `xml:"portgroupKey"` + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["DvsPortLeavePortgroupEvent"] = reflect.TypeOf((*DvsPortLeavePortgroupEvent)(nil)).Elem() +} + +type DvsPortLinkDownEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortLinkDownEvent"] = reflect.TypeOf((*DvsPortLinkDownEvent)(nil)).Elem() +} + +type DvsPortLinkUpEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortLinkUpEvent"] = reflect.TypeOf((*DvsPortLinkUpEvent)(nil)).Elem() +} + +type DvsPortReconfiguredEvent struct { + DvsEvent + + PortKey []string `xml:"portKey"` +} + +func init() { + t["DvsPortReconfiguredEvent"] = reflect.TypeOf((*DvsPortReconfiguredEvent)(nil)).Elem() +} + +type DvsPortRuntimeChangeEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo DVPortStatus `xml:"runtimeInfo"` +} + +func init() { + t["DvsPortRuntimeChangeEvent"] = reflect.TypeOf((*DvsPortRuntimeChangeEvent)(nil)).Elem() +} + +type DvsPortUnblockedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortUnblockedEvent"] = reflect.TypeOf((*DvsPortUnblockedEvent)(nil)).Elem() +} + +type DvsPortVendorSpecificStateChangeEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` +} + +func init() { + t["DvsPortVendorSpecificStateChangeEvent"] = reflect.TypeOf((*DvsPortVendorSpecificStateChangeEvent)(nil)).Elem() +} + +type DvsProfile struct { + ApplyProfile + + Key string `xml:"key"` + Name string `xml:"name"` + Uplink []PnicUplinkProfile `xml:"uplink,omitempty"` +} + +func init() { + t["DvsProfile"] = reflect.TypeOf((*DvsProfile)(nil)).Elem() +} + +type DvsPuntNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsPuntNetworkRuleAction"] = reflect.TypeOf((*DvsPuntNetworkRuleAction)(nil)).Elem() +} + +type DvsRateLimitNetworkRuleAction struct { + DvsNetworkRuleAction + + PacketsPerSecond int `xml:"packetsPerSecond"` +} + +func init() { + t["DvsRateLimitNetworkRuleAction"] = reflect.TypeOf((*DvsRateLimitNetworkRuleAction)(nil)).Elem() +} + +type DvsReconfigureVmVnicNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec []DvsVmVnicResourcePoolConfigSpec `xml:"configSpec"` +} + +func init() { + t["DvsReconfigureVmVnicNetworkResourcePoolRequestType"] = reflect.TypeOf((*DvsReconfigureVmVnicNetworkResourcePoolRequestType)(nil)).Elem() +} + +type DvsReconfigureVmVnicNetworkResourcePool_Task DvsReconfigureVmVnicNetworkResourcePoolRequestType + +func init() { + t["DvsReconfigureVmVnicNetworkResourcePool_Task"] = reflect.TypeOf((*DvsReconfigureVmVnicNetworkResourcePool_Task)(nil)).Elem() +} + +type DvsReconfigureVmVnicNetworkResourcePool_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DvsReconfiguredEvent struct { + DvsEvent + + ConfigSpec BaseDVSConfigSpec `xml:"configSpec,typeattr"` +} + +func init() { + t["DvsReconfiguredEvent"] = reflect.TypeOf((*DvsReconfiguredEvent)(nil)).Elem() +} + +type DvsRenamedEvent struct { + DvsEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DvsRenamedEvent"] = reflect.TypeOf((*DvsRenamedEvent)(nil)).Elem() +} + +type DvsResourceRuntimeInfo struct { + DynamicData + + Capacity int `xml:"capacity,omitempty"` + Usage int `xml:"usage,omitempty"` + Available int `xml:"available,omitempty"` + AllocatedResource []DvsVnicAllocatedResource `xml:"allocatedResource,omitempty"` + VmVnicNetworkResourcePoolRuntime []DvsVmVnicNetworkResourcePoolRuntimeInfo `xml:"vmVnicNetworkResourcePoolRuntime,omitempty"` +} + +func init() { + t["DvsResourceRuntimeInfo"] = reflect.TypeOf((*DvsResourceRuntimeInfo)(nil)).Elem() +} + +type DvsRestoreEvent struct { + DvsEvent +} + +func init() { + t["DvsRestoreEvent"] = reflect.TypeOf((*DvsRestoreEvent)(nil)).Elem() +} + +type DvsScopeViolated struct { + DvsFault + + Scope []ManagedObjectReference `xml:"scope"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["DvsScopeViolated"] = reflect.TypeOf((*DvsScopeViolated)(nil)).Elem() +} + +type DvsScopeViolatedFault DvsScopeViolated + +func init() { + t["DvsScopeViolatedFault"] = reflect.TypeOf((*DvsScopeViolatedFault)(nil)).Elem() +} + +type DvsServiceConsoleVNicProfile struct { + DvsVNicProfile +} + +func init() { + t["DvsServiceConsoleVNicProfile"] = reflect.TypeOf((*DvsServiceConsoleVNicProfile)(nil)).Elem() +} + +type DvsSingleIpPort struct { + DvsIpPort + + PortNumber int `xml:"portNumber"` +} + +func init() { + t["DvsSingleIpPort"] = reflect.TypeOf((*DvsSingleIpPort)(nil)).Elem() +} + +type DvsSystemTrafficNetworkRuleQualifier struct { + DvsNetworkRuleQualifier + + TypeOfSystemTraffic *StringExpression `xml:"typeOfSystemTraffic,omitempty"` +} + +func init() { + t["DvsSystemTrafficNetworkRuleQualifier"] = reflect.TypeOf((*DvsSystemTrafficNetworkRuleQualifier)(nil)).Elem() +} + +type DvsTrafficFilterConfig struct { + DvsFilterConfig + + TrafficRuleset *DvsTrafficRuleset `xml:"trafficRuleset,omitempty"` +} + +func init() { + t["DvsTrafficFilterConfig"] = reflect.TypeOf((*DvsTrafficFilterConfig)(nil)).Elem() +} + +type DvsTrafficFilterConfigSpec struct { + DvsTrafficFilterConfig + + Operation string `xml:"operation"` +} + +func init() { + t["DvsTrafficFilterConfigSpec"] = reflect.TypeOf((*DvsTrafficFilterConfigSpec)(nil)).Elem() +} + +type DvsTrafficRule struct { + DynamicData + + Key string `xml:"key,omitempty"` + Description string `xml:"description,omitempty"` + Sequence int `xml:"sequence,omitempty"` + Qualifier []BaseDvsNetworkRuleQualifier `xml:"qualifier,omitempty,typeattr"` + Action BaseDvsNetworkRuleAction `xml:"action,omitempty,typeattr"` + Direction string `xml:"direction,omitempty"` +} + +func init() { + t["DvsTrafficRule"] = reflect.TypeOf((*DvsTrafficRule)(nil)).Elem() +} + +type DvsTrafficRuleset struct { + DynamicData + + Key string `xml:"key,omitempty"` + Enabled *bool `xml:"enabled"` + Precedence int `xml:"precedence,omitempty"` + Rules []DvsTrafficRule `xml:"rules,omitempty"` +} + +func init() { + t["DvsTrafficRuleset"] = reflect.TypeOf((*DvsTrafficRuleset)(nil)).Elem() +} + +type DvsUpdateTagNetworkRuleAction struct { + DvsNetworkRuleAction + + QosTag int `xml:"qosTag,omitempty"` + DscpTag int `xml:"dscpTag,omitempty"` +} + +func init() { + t["DvsUpdateTagNetworkRuleAction"] = reflect.TypeOf((*DvsUpdateTagNetworkRuleAction)(nil)).Elem() +} + +type DvsUpgradeAvailableEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradeAvailableEvent"] = reflect.TypeOf((*DvsUpgradeAvailableEvent)(nil)).Elem() +} + +type DvsUpgradeInProgressEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradeInProgressEvent"] = reflect.TypeOf((*DvsUpgradeInProgressEvent)(nil)).Elem() +} + +type DvsUpgradeRejectedEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradeRejectedEvent"] = reflect.TypeOf((*DvsUpgradeRejectedEvent)(nil)).Elem() +} + +type DvsUpgradedEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradedEvent"] = reflect.TypeOf((*DvsUpgradedEvent)(nil)).Elem() +} + +type DvsVNicProfile struct { + ApplyProfile + + Key string `xml:"key"` + IpConfig IpAddressProfile `xml:"ipConfig"` +} + +func init() { + t["DvsVNicProfile"] = reflect.TypeOf((*DvsVNicProfile)(nil)).Elem() +} + +type DvsVmVnicNetworkResourcePoolRuntimeInfo struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Capacity int `xml:"capacity,omitempty"` + Usage int `xml:"usage,omitempty"` + Available int `xml:"available,omitempty"` + Status string `xml:"status"` + AllocatedResource []DvsVnicAllocatedResource `xml:"allocatedResource,omitempty"` +} + +func init() { + t["DvsVmVnicNetworkResourcePoolRuntimeInfo"] = reflect.TypeOf((*DvsVmVnicNetworkResourcePoolRuntimeInfo)(nil)).Elem() +} + +type DvsVmVnicResourceAllocation struct { + DynamicData + + ReservationQuota int64 `xml:"reservationQuota,omitempty"` +} + +func init() { + t["DvsVmVnicResourceAllocation"] = reflect.TypeOf((*DvsVmVnicResourceAllocation)(nil)).Elem() +} + +type DvsVmVnicResourcePoolConfigSpec struct { + DynamicData + + Operation string `xml:"operation"` + Key string `xml:"key,omitempty"` + ConfigVersion string `xml:"configVersion,omitempty"` + AllocationInfo *DvsVmVnicResourceAllocation `xml:"allocationInfo,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["DvsVmVnicResourcePoolConfigSpec"] = reflect.TypeOf((*DvsVmVnicResourcePoolConfigSpec)(nil)).Elem() +} + +type DvsVnicAllocatedResource struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + VnicKey string `xml:"vnicKey"` + Reservation int64 `xml:"reservation,omitempty"` +} + +func init() { + t["DvsVnicAllocatedResource"] = reflect.TypeOf((*DvsVnicAllocatedResource)(nil)).Elem() +} + +type DynamicArray struct { + Val []AnyType `xml:"val,typeattr"` +} + +func init() { + t["DynamicArray"] = reflect.TypeOf((*DynamicArray)(nil)).Elem() +} + +type DynamicData struct { +} + +func init() { + t["DynamicData"] = reflect.TypeOf((*DynamicData)(nil)).Elem() +} + +type DynamicProperty struct { + Name string `xml:"name"` + Val AnyType `xml:"val,typeattr"` +} + +func init() { + t["DynamicProperty"] = reflect.TypeOf((*DynamicProperty)(nil)).Elem() +} + +type EVCAdmissionFailed struct { + NotSupportedHostInCluster + + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["EVCAdmissionFailed"] = reflect.TypeOf((*EVCAdmissionFailed)(nil)).Elem() +} + +type EVCAdmissionFailedCPUFeaturesForMode struct { + EVCAdmissionFailed + + CurrentEVCModeKey string `xml:"currentEVCModeKey"` +} + +func init() { + t["EVCAdmissionFailedCPUFeaturesForMode"] = reflect.TypeOf((*EVCAdmissionFailedCPUFeaturesForMode)(nil)).Elem() +} + +type EVCAdmissionFailedCPUFeaturesForModeFault EVCAdmissionFailedCPUFeaturesForMode + +func init() { + t["EVCAdmissionFailedCPUFeaturesForModeFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUFeaturesForModeFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModel struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedCPUModel"] = reflect.TypeOf((*EVCAdmissionFailedCPUModel)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModelFault EVCAdmissionFailedCPUModel + +func init() { + t["EVCAdmissionFailedCPUModelFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModelForMode struct { + EVCAdmissionFailed + + CurrentEVCModeKey string `xml:"currentEVCModeKey"` +} + +func init() { + t["EVCAdmissionFailedCPUModelForMode"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelForMode)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModelForModeFault EVCAdmissionFailedCPUModelForMode + +func init() { + t["EVCAdmissionFailedCPUModelForModeFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelForModeFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendor struct { + EVCAdmissionFailed + + ClusterCPUVendor string `xml:"clusterCPUVendor"` + HostCPUVendor string `xml:"hostCPUVendor"` +} + +func init() { + t["EVCAdmissionFailedCPUVendor"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendor)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendorFault EVCAdmissionFailedCPUVendor + +func init() { + t["EVCAdmissionFailedCPUVendorFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendorUnknown struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedCPUVendorUnknown"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorUnknown)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendorUnknownFault EVCAdmissionFailedCPUVendorUnknown + +func init() { + t["EVCAdmissionFailedCPUVendorUnknownFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorUnknownFault)(nil)).Elem() +} + +type EVCAdmissionFailedFault BaseEVCAdmissionFailed + +func init() { + t["EVCAdmissionFailedFault"] = reflect.TypeOf((*EVCAdmissionFailedFault)(nil)).Elem() +} + +type EVCAdmissionFailedHostDisconnected struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedHostDisconnected"] = reflect.TypeOf((*EVCAdmissionFailedHostDisconnected)(nil)).Elem() +} + +type EVCAdmissionFailedHostDisconnectedFault EVCAdmissionFailedHostDisconnected + +func init() { + t["EVCAdmissionFailedHostDisconnectedFault"] = reflect.TypeOf((*EVCAdmissionFailedHostDisconnectedFault)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftware struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedHostSoftware"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftware)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftwareFault EVCAdmissionFailedHostSoftware + +func init() { + t["EVCAdmissionFailedHostSoftwareFault"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareFault)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftwareForMode struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedHostSoftwareForMode"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareForMode)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftwareForModeFault EVCAdmissionFailedHostSoftwareForMode + +func init() { + t["EVCAdmissionFailedHostSoftwareForModeFault"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareForModeFault)(nil)).Elem() +} + +type EVCAdmissionFailedVmActive struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedVmActive"] = reflect.TypeOf((*EVCAdmissionFailedVmActive)(nil)).Elem() +} + +type EVCAdmissionFailedVmActiveFault EVCAdmissionFailedVmActive + +func init() { + t["EVCAdmissionFailedVmActiveFault"] = reflect.TypeOf((*EVCAdmissionFailedVmActiveFault)(nil)).Elem() +} + +type EVCConfigFault struct { + VimFault + + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["EVCConfigFault"] = reflect.TypeOf((*EVCConfigFault)(nil)).Elem() +} + +type EVCConfigFaultFault BaseEVCConfigFault + +func init() { + t["EVCConfigFaultFault"] = reflect.TypeOf((*EVCConfigFaultFault)(nil)).Elem() +} + +type EVCMode struct { + ElementDescription + + GuaranteedCPUFeatures []HostCpuIdInfo `xml:"guaranteedCPUFeatures,omitempty"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` + Vendor string `xml:"vendor"` + Track []string `xml:"track,omitempty"` + VendorTier int `xml:"vendorTier"` +} + +func init() { + t["EVCMode"] = reflect.TypeOf((*EVCMode)(nil)).Elem() +} + +type EVCModeIllegalByVendor struct { + EVCConfigFault + + ClusterCPUVendor string `xml:"clusterCPUVendor"` + ModeCPUVendor string `xml:"modeCPUVendor"` +} + +func init() { + t["EVCModeIllegalByVendor"] = reflect.TypeOf((*EVCModeIllegalByVendor)(nil)).Elem() +} + +type EVCModeIllegalByVendorFault EVCModeIllegalByVendor + +func init() { + t["EVCModeIllegalByVendorFault"] = reflect.TypeOf((*EVCModeIllegalByVendorFault)(nil)).Elem() +} + +type EVCModeUnsupportedByHosts struct { + EVCConfigFault + + EvcMode string `xml:"evcMode,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + HostName []string `xml:"hostName,omitempty"` +} + +func init() { + t["EVCModeUnsupportedByHosts"] = reflect.TypeOf((*EVCModeUnsupportedByHosts)(nil)).Elem() +} + +type EVCModeUnsupportedByHostsFault EVCModeUnsupportedByHosts + +func init() { + t["EVCModeUnsupportedByHostsFault"] = reflect.TypeOf((*EVCModeUnsupportedByHostsFault)(nil)).Elem() +} + +type EVCUnsupportedByHostHardware struct { + EVCConfigFault + + Host []ManagedObjectReference `xml:"host"` + HostName []string `xml:"hostName"` +} + +func init() { + t["EVCUnsupportedByHostHardware"] = reflect.TypeOf((*EVCUnsupportedByHostHardware)(nil)).Elem() +} + +type EVCUnsupportedByHostHardwareFault EVCUnsupportedByHostHardware + +func init() { + t["EVCUnsupportedByHostHardwareFault"] = reflect.TypeOf((*EVCUnsupportedByHostHardwareFault)(nil)).Elem() +} + +type EVCUnsupportedByHostSoftware struct { + EVCConfigFault + + Host []ManagedObjectReference `xml:"host"` + HostName []string `xml:"hostName"` +} + +func init() { + t["EVCUnsupportedByHostSoftware"] = reflect.TypeOf((*EVCUnsupportedByHostSoftware)(nil)).Elem() +} + +type EVCUnsupportedByHostSoftwareFault EVCUnsupportedByHostSoftware + +func init() { + t["EVCUnsupportedByHostSoftwareFault"] = reflect.TypeOf((*EVCUnsupportedByHostSoftwareFault)(nil)).Elem() +} + +type EagerZeroVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["EagerZeroVirtualDiskRequestType"] = reflect.TypeOf((*EagerZeroVirtualDiskRequestType)(nil)).Elem() +} + +type EagerZeroVirtualDisk_Task EagerZeroVirtualDiskRequestType + +func init() { + t["EagerZeroVirtualDisk_Task"] = reflect.TypeOf((*EagerZeroVirtualDisk_Task)(nil)).Elem() +} + +type EagerZeroVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EightHostLimitViolated struct { + VmConfigFault +} + +func init() { + t["EightHostLimitViolated"] = reflect.TypeOf((*EightHostLimitViolated)(nil)).Elem() +} + +type EightHostLimitViolatedFault EightHostLimitViolated + +func init() { + t["EightHostLimitViolatedFault"] = reflect.TypeOf((*EightHostLimitViolatedFault)(nil)).Elem() +} + +type ElementDescription struct { + Description + + Key string `xml:"key"` +} + +func init() { + t["ElementDescription"] = reflect.TypeOf((*ElementDescription)(nil)).Elem() +} + +type EnableAlarmActions EnableAlarmActionsRequestType + +func init() { + t["EnableAlarmActions"] = reflect.TypeOf((*EnableAlarmActions)(nil)).Elem() +} + +type EnableAlarmActionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["EnableAlarmActionsRequestType"] = reflect.TypeOf((*EnableAlarmActionsRequestType)(nil)).Elem() +} + +type EnableAlarmActionsResponse struct { +} + +type EnableFeature EnableFeatureRequestType + +func init() { + t["EnableFeature"] = reflect.TypeOf((*EnableFeature)(nil)).Elem() +} + +type EnableFeatureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey"` +} + +func init() { + t["EnableFeatureRequestType"] = reflect.TypeOf((*EnableFeatureRequestType)(nil)).Elem() +} + +type EnableFeatureResponse struct { + Returnval bool `xml:"returnval"` +} + +type EnableHyperThreading EnableHyperThreadingRequestType + +func init() { + t["EnableHyperThreading"] = reflect.TypeOf((*EnableHyperThreading)(nil)).Elem() +} + +type EnableHyperThreadingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EnableHyperThreadingRequestType"] = reflect.TypeOf((*EnableHyperThreadingRequestType)(nil)).Elem() +} + +type EnableHyperThreadingResponse struct { +} + +type EnableMultipathPath EnableMultipathPathRequestType + +func init() { + t["EnableMultipathPath"] = reflect.TypeOf((*EnableMultipathPath)(nil)).Elem() +} + +type EnableMultipathPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + PathName string `xml:"pathName"` +} + +func init() { + t["EnableMultipathPathRequestType"] = reflect.TypeOf((*EnableMultipathPathRequestType)(nil)).Elem() +} + +type EnableMultipathPathResponse struct { +} + +type EnableNetworkResourceManagement EnableNetworkResourceManagementRequestType + +func init() { + t["EnableNetworkResourceManagement"] = reflect.TypeOf((*EnableNetworkResourceManagement)(nil)).Elem() +} + +type EnableNetworkResourceManagementRequestType struct { + This ManagedObjectReference `xml:"_this"` + Enable bool `xml:"enable"` +} + +func init() { + t["EnableNetworkResourceManagementRequestType"] = reflect.TypeOf((*EnableNetworkResourceManagementRequestType)(nil)).Elem() +} + +type EnableNetworkResourceManagementResponse struct { +} + +type EnableRuleset EnableRulesetRequestType + +func init() { + t["EnableRuleset"] = reflect.TypeOf((*EnableRuleset)(nil)).Elem() +} + +type EnableRulesetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["EnableRulesetRequestType"] = reflect.TypeOf((*EnableRulesetRequestType)(nil)).Elem() +} + +type EnableRulesetResponse struct { +} + +type EnableSecondaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["EnableSecondaryVMRequestType"] = reflect.TypeOf((*EnableSecondaryVMRequestType)(nil)).Elem() +} + +type EnableSecondaryVM_Task EnableSecondaryVMRequestType + +func init() { + t["EnableSecondaryVM_Task"] = reflect.TypeOf((*EnableSecondaryVM_Task)(nil)).Elem() +} + +type EnableSecondaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EnableSmartCardAuthentication EnableSmartCardAuthenticationRequestType + +func init() { + t["EnableSmartCardAuthentication"] = reflect.TypeOf((*EnableSmartCardAuthentication)(nil)).Elem() +} + +type EnableSmartCardAuthenticationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EnableSmartCardAuthenticationRequestType"] = reflect.TypeOf((*EnableSmartCardAuthenticationRequestType)(nil)).Elem() +} + +type EnableSmartCardAuthenticationResponse struct { +} + +type EnterLockdownMode EnterLockdownModeRequestType + +func init() { + t["EnterLockdownMode"] = reflect.TypeOf((*EnterLockdownMode)(nil)).Elem() +} + +type EnterLockdownModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EnterLockdownModeRequestType"] = reflect.TypeOf((*EnterLockdownModeRequestType)(nil)).Elem() +} + +type EnterLockdownModeResponse struct { +} + +type EnterMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Timeout int `xml:"timeout"` + EvacuatePoweredOffVms *bool `xml:"evacuatePoweredOffVms"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty"` +} + +func init() { + t["EnterMaintenanceModeRequestType"] = reflect.TypeOf((*EnterMaintenanceModeRequestType)(nil)).Elem() +} + +type EnterMaintenanceMode_Task EnterMaintenanceModeRequestType + +func init() { + t["EnterMaintenanceMode_Task"] = reflect.TypeOf((*EnterMaintenanceMode_Task)(nil)).Elem() +} + +type EnterMaintenanceMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EnteredMaintenanceModeEvent struct { + HostEvent +} + +func init() { + t["EnteredMaintenanceModeEvent"] = reflect.TypeOf((*EnteredMaintenanceModeEvent)(nil)).Elem() +} + +type EnteredStandbyModeEvent struct { + HostEvent +} + +func init() { + t["EnteredStandbyModeEvent"] = reflect.TypeOf((*EnteredStandbyModeEvent)(nil)).Elem() +} + +type EnteringMaintenanceModeEvent struct { + HostEvent +} + +func init() { + t["EnteringMaintenanceModeEvent"] = reflect.TypeOf((*EnteringMaintenanceModeEvent)(nil)).Elem() +} + +type EnteringStandbyModeEvent struct { + HostEvent +} + +func init() { + t["EnteringStandbyModeEvent"] = reflect.TypeOf((*EnteringStandbyModeEvent)(nil)).Elem() +} + +type EntityBackup struct { + DynamicData +} + +func init() { + t["EntityBackup"] = reflect.TypeOf((*EntityBackup)(nil)).Elem() +} + +type EntityBackupConfig struct { + DynamicData + + EntityType string `xml:"entityType"` + ConfigBlob []byte `xml:"configBlob"` + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Container *ManagedObjectReference `xml:"container,omitempty"` + ConfigVersion string `xml:"configVersion,omitempty"` +} + +func init() { + t["EntityBackupConfig"] = reflect.TypeOf((*EntityBackupConfig)(nil)).Elem() +} + +type EntityEventArgument struct { + EventArgument + + Name string `xml:"name"` +} + +func init() { + t["EntityEventArgument"] = reflect.TypeOf((*EntityEventArgument)(nil)).Elem() +} + +type EntityPrivilege struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + PrivAvailability []PrivilegeAvailability `xml:"privAvailability"` +} + +func init() { + t["EntityPrivilege"] = reflect.TypeOf((*EntityPrivilege)(nil)).Elem() +} + +type EnumDescription struct { + DynamicData + + Key string `xml:"key"` + Tags []BaseElementDescription `xml:"tags,typeattr"` +} + +func init() { + t["EnumDescription"] = reflect.TypeOf((*EnumDescription)(nil)).Elem() +} + +type EnvironmentBrowserConfigOptionQuerySpec struct { + DynamicData + + Key string `xml:"key,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + GuestId []string `xml:"guestId,omitempty"` +} + +func init() { + t["EnvironmentBrowserConfigOptionQuerySpec"] = reflect.TypeOf((*EnvironmentBrowserConfigOptionQuerySpec)(nil)).Elem() +} + +type ErrorUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["ErrorUpgradeEvent"] = reflect.TypeOf((*ErrorUpgradeEvent)(nil)).Elem() +} + +type EstimateDatabaseSize EstimateDatabaseSizeRequestType + +func init() { + t["EstimateDatabaseSize"] = reflect.TypeOf((*EstimateDatabaseSize)(nil)).Elem() +} + +type EstimateDatabaseSizeRequestType struct { + This ManagedObjectReference `xml:"_this"` + DbSizeParam DatabaseSizeParam `xml:"dbSizeParam"` +} + +func init() { + t["EstimateDatabaseSizeRequestType"] = reflect.TypeOf((*EstimateDatabaseSizeRequestType)(nil)).Elem() +} + +type EstimateDatabaseSizeResponse struct { + Returnval DatabaseSizeEstimate `xml:"returnval"` +} + +type EstimateStorageForConsolidateSnapshotsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EstimateStorageForConsolidateSnapshotsRequestType"] = reflect.TypeOf((*EstimateStorageForConsolidateSnapshotsRequestType)(nil)).Elem() +} + +type EstimateStorageForConsolidateSnapshots_Task EstimateStorageForConsolidateSnapshotsRequestType + +func init() { + t["EstimateStorageForConsolidateSnapshots_Task"] = reflect.TypeOf((*EstimateStorageForConsolidateSnapshots_Task)(nil)).Elem() +} + +type EstimateStorageForConsolidateSnapshots_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EsxAgentHostManagerUpdateConfig EsxAgentHostManagerUpdateConfigRequestType + +func init() { + t["EsxAgentHostManagerUpdateConfig"] = reflect.TypeOf((*EsxAgentHostManagerUpdateConfig)(nil)).Elem() +} + +type EsxAgentHostManagerUpdateConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigInfo HostEsxAgentHostManagerConfigInfo `xml:"configInfo"` +} + +func init() { + t["EsxAgentHostManagerUpdateConfigRequestType"] = reflect.TypeOf((*EsxAgentHostManagerUpdateConfigRequestType)(nil)).Elem() +} + +type EsxAgentHostManagerUpdateConfigResponse struct { +} + +type EvacuateVsanNodeRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaintenanceSpec HostMaintenanceSpec `xml:"maintenanceSpec"` + Timeout int `xml:"timeout"` +} + +func init() { + t["EvacuateVsanNodeRequestType"] = reflect.TypeOf((*EvacuateVsanNodeRequestType)(nil)).Elem() +} + +type EvacuateVsanNode_Task EvacuateVsanNodeRequestType + +func init() { + t["EvacuateVsanNode_Task"] = reflect.TypeOf((*EvacuateVsanNode_Task)(nil)).Elem() +} + +type EvacuateVsanNode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EvaluationLicenseSource struct { + LicenseSource + + RemainingHours int64 `xml:"remainingHours,omitempty"` +} + +func init() { + t["EvaluationLicenseSource"] = reflect.TypeOf((*EvaluationLicenseSource)(nil)).Elem() +} + +type EvcManager EvcManagerRequestType + +func init() { + t["EvcManager"] = reflect.TypeOf((*EvcManager)(nil)).Elem() +} + +type EvcManagerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EvcManagerRequestType"] = reflect.TypeOf((*EvcManagerRequestType)(nil)).Elem() +} + +type EvcManagerResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type Event struct { + DynamicData + + Key int `xml:"key"` + ChainId int `xml:"chainId"` + CreatedTime time.Time `xml:"createdTime"` + UserName string `xml:"userName"` + Datacenter *DatacenterEventArgument `xml:"datacenter,omitempty"` + ComputeResource *ComputeResourceEventArgument `xml:"computeResource,omitempty"` + Host *HostEventArgument `xml:"host,omitempty"` + Vm *VmEventArgument `xml:"vm,omitempty"` + Ds *DatastoreEventArgument `xml:"ds,omitempty"` + Net *NetworkEventArgument `xml:"net,omitempty"` + Dvs *DvsEventArgument `xml:"dvs,omitempty"` + FullFormattedMessage string `xml:"fullFormattedMessage,omitempty"` + ChangeTag string `xml:"changeTag,omitempty"` +} + +func init() { + t["Event"] = reflect.TypeOf((*Event)(nil)).Elem() +} + +type EventAlarmExpression struct { + AlarmExpression + + Comparisons []EventAlarmExpressionComparison `xml:"comparisons,omitempty"` + EventType string `xml:"eventType"` + EventTypeId string `xml:"eventTypeId,omitempty"` + ObjectType string `xml:"objectType,omitempty"` + Status ManagedEntityStatus `xml:"status,omitempty"` +} + +func init() { + t["EventAlarmExpression"] = reflect.TypeOf((*EventAlarmExpression)(nil)).Elem() +} + +type EventAlarmExpressionComparison struct { + DynamicData + + AttributeName string `xml:"attributeName"` + Operator string `xml:"operator"` + Value string `xml:"value"` +} + +func init() { + t["EventAlarmExpressionComparison"] = reflect.TypeOf((*EventAlarmExpressionComparison)(nil)).Elem() +} + +type EventArgDesc struct { + DynamicData + + Name string `xml:"name"` + Type string `xml:"type"` + Description BaseElementDescription `xml:"description,omitempty,typeattr"` +} + +func init() { + t["EventArgDesc"] = reflect.TypeOf((*EventArgDesc)(nil)).Elem() +} + +type EventArgument struct { + DynamicData +} + +func init() { + t["EventArgument"] = reflect.TypeOf((*EventArgument)(nil)).Elem() +} + +type EventDescription struct { + DynamicData + + Category []BaseElementDescription `xml:"category,typeattr"` + EventInfo []EventDescriptionEventDetail `xml:"eventInfo"` + EnumeratedTypes []EnumDescription `xml:"enumeratedTypes,omitempty"` +} + +func init() { + t["EventDescription"] = reflect.TypeOf((*EventDescription)(nil)).Elem() +} + +type EventDescriptionEventDetail struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description,omitempty"` + Category string `xml:"category"` + FormatOnDatacenter string `xml:"formatOnDatacenter"` + FormatOnComputeResource string `xml:"formatOnComputeResource"` + FormatOnHost string `xml:"formatOnHost"` + FormatOnVm string `xml:"formatOnVm"` + FullFormat string `xml:"fullFormat"` + LongDescription string `xml:"longDescription,omitempty"` +} + +func init() { + t["EventDescriptionEventDetail"] = reflect.TypeOf((*EventDescriptionEventDetail)(nil)).Elem() +} + +type EventEx struct { + Event + + EventTypeId string `xml:"eventTypeId"` + Severity string `xml:"severity,omitempty"` + Message string `xml:"message,omitempty"` + Arguments []KeyAnyValue `xml:"arguments,omitempty"` + ObjectId string `xml:"objectId,omitempty"` + ObjectType string `xml:"objectType,omitempty"` + ObjectName string `xml:"objectName,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["EventEx"] = reflect.TypeOf((*EventEx)(nil)).Elem() +} + +type EventFilterSpec struct { + DynamicData + + Entity *EventFilterSpecByEntity `xml:"entity,omitempty"` + Time *EventFilterSpecByTime `xml:"time,omitempty"` + UserName *EventFilterSpecByUsername `xml:"userName,omitempty"` + EventChainId int `xml:"eventChainId,omitempty"` + Alarm *ManagedObjectReference `xml:"alarm,omitempty"` + ScheduledTask *ManagedObjectReference `xml:"scheduledTask,omitempty"` + DisableFullMessage *bool `xml:"disableFullMessage"` + Category []string `xml:"category,omitempty"` + Type []string `xml:"type,omitempty"` + Tag []string `xml:"tag,omitempty"` + EventTypeId []string `xml:"eventTypeId,omitempty"` +} + +func init() { + t["EventFilterSpec"] = reflect.TypeOf((*EventFilterSpec)(nil)).Elem() +} + +type EventFilterSpecByEntity struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + Recursion EventFilterSpecRecursionOption `xml:"recursion"` +} + +func init() { + t["EventFilterSpecByEntity"] = reflect.TypeOf((*EventFilterSpecByEntity)(nil)).Elem() +} + +type EventFilterSpecByTime struct { + DynamicData + + BeginTime *time.Time `xml:"beginTime"` + EndTime *time.Time `xml:"endTime"` +} + +func init() { + t["EventFilterSpecByTime"] = reflect.TypeOf((*EventFilterSpecByTime)(nil)).Elem() +} + +type EventFilterSpecByUsername struct { + DynamicData + + SystemUser bool `xml:"systemUser"` + UserList []string `xml:"userList,omitempty"` +} + +func init() { + t["EventFilterSpecByUsername"] = reflect.TypeOf((*EventFilterSpecByUsername)(nil)).Elem() +} + +type ExecuteHostProfile ExecuteHostProfileRequestType + +func init() { + t["ExecuteHostProfile"] = reflect.TypeOf((*ExecuteHostProfile)(nil)).Elem() +} + +type ExecuteHostProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + DeferredParam []ProfileDeferredPolicyOptionParameter `xml:"deferredParam,omitempty"` +} + +func init() { + t["ExecuteHostProfileRequestType"] = reflect.TypeOf((*ExecuteHostProfileRequestType)(nil)).Elem() +} + +type ExecuteHostProfileResponse struct { + Returnval ProfileExecuteResult `xml:"returnval"` +} + +type ExecuteSimpleCommand ExecuteSimpleCommandRequestType + +func init() { + t["ExecuteSimpleCommand"] = reflect.TypeOf((*ExecuteSimpleCommand)(nil)).Elem() +} + +type ExecuteSimpleCommandRequestType struct { + This ManagedObjectReference `xml:"_this"` + Arguments []string `xml:"arguments,omitempty"` +} + +func init() { + t["ExecuteSimpleCommandRequestType"] = reflect.TypeOf((*ExecuteSimpleCommandRequestType)(nil)).Elem() +} + +type ExecuteSimpleCommandResponse struct { + Returnval string `xml:"returnval"` +} + +type ExitLockdownMode ExitLockdownModeRequestType + +func init() { + t["ExitLockdownMode"] = reflect.TypeOf((*ExitLockdownMode)(nil)).Elem() +} + +type ExitLockdownModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExitLockdownModeRequestType"] = reflect.TypeOf((*ExitLockdownModeRequestType)(nil)).Elem() +} + +type ExitLockdownModeResponse struct { +} + +type ExitMaintenanceModeEvent struct { + HostEvent +} + +func init() { + t["ExitMaintenanceModeEvent"] = reflect.TypeOf((*ExitMaintenanceModeEvent)(nil)).Elem() +} + +type ExitMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Timeout int `xml:"timeout"` +} + +func init() { + t["ExitMaintenanceModeRequestType"] = reflect.TypeOf((*ExitMaintenanceModeRequestType)(nil)).Elem() +} + +type ExitMaintenanceMode_Task ExitMaintenanceModeRequestType + +func init() { + t["ExitMaintenanceMode_Task"] = reflect.TypeOf((*ExitMaintenanceMode_Task)(nil)).Elem() +} + +type ExitMaintenanceMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExitStandbyModeFailedEvent struct { + HostEvent +} + +func init() { + t["ExitStandbyModeFailedEvent"] = reflect.TypeOf((*ExitStandbyModeFailedEvent)(nil)).Elem() +} + +type ExitedStandbyModeEvent struct { + HostEvent +} + +func init() { + t["ExitedStandbyModeEvent"] = reflect.TypeOf((*ExitedStandbyModeEvent)(nil)).Elem() +} + +type ExitingStandbyModeEvent struct { + HostEvent +} + +func init() { + t["ExitingStandbyModeEvent"] = reflect.TypeOf((*ExitingStandbyModeEvent)(nil)).Elem() +} + +type ExpandVmfsDatastore ExpandVmfsDatastoreRequestType + +func init() { + t["ExpandVmfsDatastore"] = reflect.TypeOf((*ExpandVmfsDatastore)(nil)).Elem() +} + +type ExpandVmfsDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VmfsDatastoreExpandSpec `xml:"spec"` +} + +func init() { + t["ExpandVmfsDatastoreRequestType"] = reflect.TypeOf((*ExpandVmfsDatastoreRequestType)(nil)).Elem() +} + +type ExpandVmfsDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExpandVmfsExtent ExpandVmfsExtentRequestType + +func init() { + t["ExpandVmfsExtent"] = reflect.TypeOf((*ExpandVmfsExtent)(nil)).Elem() +} + +type ExpandVmfsExtentRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsPath string `xml:"vmfsPath"` + Extent HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["ExpandVmfsExtentRequestType"] = reflect.TypeOf((*ExpandVmfsExtentRequestType)(nil)).Elem() +} + +type ExpandVmfsExtentResponse struct { +} + +type ExpiredAddonLicense struct { + ExpiredFeatureLicense +} + +func init() { + t["ExpiredAddonLicense"] = reflect.TypeOf((*ExpiredAddonLicense)(nil)).Elem() +} + +type ExpiredAddonLicenseFault ExpiredAddonLicense + +func init() { + t["ExpiredAddonLicenseFault"] = reflect.TypeOf((*ExpiredAddonLicenseFault)(nil)).Elem() +} + +type ExpiredEditionLicense struct { + ExpiredFeatureLicense +} + +func init() { + t["ExpiredEditionLicense"] = reflect.TypeOf((*ExpiredEditionLicense)(nil)).Elem() +} + +type ExpiredEditionLicenseFault ExpiredEditionLicense + +func init() { + t["ExpiredEditionLicenseFault"] = reflect.TypeOf((*ExpiredEditionLicenseFault)(nil)).Elem() +} + +type ExpiredFeatureLicense struct { + NotEnoughLicenses + + Feature string `xml:"feature"` + Count int `xml:"count"` + ExpirationDate time.Time `xml:"expirationDate"` +} + +func init() { + t["ExpiredFeatureLicense"] = reflect.TypeOf((*ExpiredFeatureLicense)(nil)).Elem() +} + +type ExpiredFeatureLicenseFault BaseExpiredFeatureLicense + +func init() { + t["ExpiredFeatureLicenseFault"] = reflect.TypeOf((*ExpiredFeatureLicenseFault)(nil)).Elem() +} + +type ExportAnswerFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["ExportAnswerFileRequestType"] = reflect.TypeOf((*ExportAnswerFileRequestType)(nil)).Elem() +} + +type ExportAnswerFile_Task ExportAnswerFileRequestType + +func init() { + t["ExportAnswerFile_Task"] = reflect.TypeOf((*ExportAnswerFile_Task)(nil)).Elem() +} + +type ExportAnswerFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExportProfile ExportProfileRequestType + +func init() { + t["ExportProfile"] = reflect.TypeOf((*ExportProfile)(nil)).Elem() +} + +type ExportProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportProfileRequestType"] = reflect.TypeOf((*ExportProfileRequestType)(nil)).Elem() +} + +type ExportProfileResponse struct { + Returnval string `xml:"returnval"` +} + +type ExportSnapshot ExportSnapshotRequestType + +func init() { + t["ExportSnapshot"] = reflect.TypeOf((*ExportSnapshot)(nil)).Elem() +} + +type ExportSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportSnapshotRequestType"] = reflect.TypeOf((*ExportSnapshotRequestType)(nil)).Elem() +} + +type ExportSnapshotResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExportVApp ExportVAppRequestType + +func init() { + t["ExportVApp"] = reflect.TypeOf((*ExportVApp)(nil)).Elem() +} + +type ExportVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportVAppRequestType"] = reflect.TypeOf((*ExportVAppRequestType)(nil)).Elem() +} + +type ExportVAppResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExportVm ExportVmRequestType + +func init() { + t["ExportVm"] = reflect.TypeOf((*ExportVm)(nil)).Elem() +} + +type ExportVmRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportVmRequestType"] = reflect.TypeOf((*ExportVmRequestType)(nil)).Elem() +} + +type ExportVmResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExtExtendedProductInfo struct { + DynamicData + + CompanyUrl string `xml:"companyUrl,omitempty"` + ProductUrl string `xml:"productUrl,omitempty"` + ManagementUrl string `xml:"managementUrl,omitempty"` + Self *ManagedObjectReference `xml:"self,omitempty"` +} + +func init() { + t["ExtExtendedProductInfo"] = reflect.TypeOf((*ExtExtendedProductInfo)(nil)).Elem() +} + +type ExtManagedEntityInfo struct { + DynamicData + + Type string `xml:"type"` + SmallIconUrl string `xml:"smallIconUrl,omitempty"` + IconUrl string `xml:"iconUrl,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["ExtManagedEntityInfo"] = reflect.TypeOf((*ExtManagedEntityInfo)(nil)).Elem() +} + +type ExtSolutionManagerInfo struct { + DynamicData + + Tab []ExtSolutionManagerInfoTabInfo `xml:"tab,omitempty"` + SmallIconUrl string `xml:"smallIconUrl,omitempty"` +} + +func init() { + t["ExtSolutionManagerInfo"] = reflect.TypeOf((*ExtSolutionManagerInfo)(nil)).Elem() +} + +type ExtSolutionManagerInfoTabInfo struct { + DynamicData + + Label string `xml:"label"` + Url string `xml:"url"` +} + +func init() { + t["ExtSolutionManagerInfoTabInfo"] = reflect.TypeOf((*ExtSolutionManagerInfoTabInfo)(nil)).Elem() +} + +type ExtendVffs ExtendVffsRequestType + +func init() { + t["ExtendVffs"] = reflect.TypeOf((*ExtendVffs)(nil)).Elem() +} + +type ExtendVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsPath string `xml:"vffsPath"` + DevicePath string `xml:"devicePath"` + Spec *HostDiskPartitionSpec `xml:"spec,omitempty"` +} + +func init() { + t["ExtendVffsRequestType"] = reflect.TypeOf((*ExtendVffsRequestType)(nil)).Elem() +} + +type ExtendVffsResponse struct { +} + +type ExtendVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + NewCapacityKb int64 `xml:"newCapacityKb"` + EagerZero *bool `xml:"eagerZero"` +} + +func init() { + t["ExtendVirtualDiskRequestType"] = reflect.TypeOf((*ExtendVirtualDiskRequestType)(nil)).Elem() +} + +type ExtendVirtualDisk_Task ExtendVirtualDiskRequestType + +func init() { + t["ExtendVirtualDisk_Task"] = reflect.TypeOf((*ExtendVirtualDisk_Task)(nil)).Elem() +} + +type ExtendVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExtendVmfsDatastore ExtendVmfsDatastoreRequestType + +func init() { + t["ExtendVmfsDatastore"] = reflect.TypeOf((*ExtendVmfsDatastore)(nil)).Elem() +} + +type ExtendVmfsDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VmfsDatastoreExtendSpec `xml:"spec"` +} + +func init() { + t["ExtendVmfsDatastoreRequestType"] = reflect.TypeOf((*ExtendVmfsDatastoreRequestType)(nil)).Elem() +} + +type ExtendVmfsDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExtendedDescription struct { + Description + + MessageCatalogKeyPrefix string `xml:"messageCatalogKeyPrefix"` + MessageArg []KeyAnyValue `xml:"messageArg,omitempty"` +} + +func init() { + t["ExtendedDescription"] = reflect.TypeOf((*ExtendedDescription)(nil)).Elem() +} + +type ExtendedElementDescription struct { + ElementDescription + + MessageCatalogKeyPrefix string `xml:"messageCatalogKeyPrefix"` + MessageArg []KeyAnyValue `xml:"messageArg,omitempty"` +} + +func init() { + t["ExtendedElementDescription"] = reflect.TypeOf((*ExtendedElementDescription)(nil)).Elem() +} + +type ExtendedEvent struct { + GeneralEvent + + EventTypeId string `xml:"eventTypeId"` + ManagedObject ManagedObjectReference `xml:"managedObject"` + Data []ExtendedEventPair `xml:"data,omitempty"` +} + +func init() { + t["ExtendedEvent"] = reflect.TypeOf((*ExtendedEvent)(nil)).Elem() +} + +type ExtendedEventPair struct { + DynamicData + + Key string `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["ExtendedEventPair"] = reflect.TypeOf((*ExtendedEventPair)(nil)).Elem() +} + +type ExtendedFault struct { + VimFault + + FaultTypeId string `xml:"faultTypeId"` + Data []KeyValue `xml:"data,omitempty"` +} + +func init() { + t["ExtendedFault"] = reflect.TypeOf((*ExtendedFault)(nil)).Elem() +} + +type ExtendedFaultFault ExtendedFault + +func init() { + t["ExtendedFaultFault"] = reflect.TypeOf((*ExtendedFaultFault)(nil)).Elem() +} + +type Extension struct { + DynamicData + + Description BaseDescription `xml:"description,typeattr"` + Key string `xml:"key"` + Company string `xml:"company,omitempty"` + Type string `xml:"type,omitempty"` + Version string `xml:"version"` + SubjectName string `xml:"subjectName,omitempty"` + Server []ExtensionServerInfo `xml:"server,omitempty"` + Client []ExtensionClientInfo `xml:"client,omitempty"` + TaskList []ExtensionTaskTypeInfo `xml:"taskList,omitempty"` + EventList []ExtensionEventTypeInfo `xml:"eventList,omitempty"` + FaultList []ExtensionFaultTypeInfo `xml:"faultList,omitempty"` + PrivilegeList []ExtensionPrivilegeInfo `xml:"privilegeList,omitempty"` + ResourceList []ExtensionResourceInfo `xml:"resourceList,omitempty"` + LastHeartbeatTime time.Time `xml:"lastHeartbeatTime"` + HealthInfo *ExtensionHealthInfo `xml:"healthInfo,omitempty"` + OvfConsumerInfo *ExtensionOvfConsumerInfo `xml:"ovfConsumerInfo,omitempty"` + ExtendedProductInfo *ExtExtendedProductInfo `xml:"extendedProductInfo,omitempty"` + ManagedEntityInfo []ExtManagedEntityInfo `xml:"managedEntityInfo,omitempty"` + ShownInSolutionManager *bool `xml:"shownInSolutionManager"` + SolutionManagerInfo *ExtSolutionManagerInfo `xml:"solutionManagerInfo,omitempty"` +} + +func init() { + t["Extension"] = reflect.TypeOf((*Extension)(nil)).Elem() +} + +type ExtensionClientInfo struct { + DynamicData + + Version string `xml:"version"` + Description BaseDescription `xml:"description,typeattr"` + Company string `xml:"company"` + Type string `xml:"type"` + Url string `xml:"url"` +} + +func init() { + t["ExtensionClientInfo"] = reflect.TypeOf((*ExtensionClientInfo)(nil)).Elem() +} + +type ExtensionEventTypeInfo struct { + DynamicData + + EventID string `xml:"eventID"` + EventTypeSchema string `xml:"eventTypeSchema,omitempty"` +} + +func init() { + t["ExtensionEventTypeInfo"] = reflect.TypeOf((*ExtensionEventTypeInfo)(nil)).Elem() +} + +type ExtensionFaultTypeInfo struct { + DynamicData + + FaultID string `xml:"faultID"` +} + +func init() { + t["ExtensionFaultTypeInfo"] = reflect.TypeOf((*ExtensionFaultTypeInfo)(nil)).Elem() +} + +type ExtensionHealthInfo struct { + DynamicData + + Url string `xml:"url"` +} + +func init() { + t["ExtensionHealthInfo"] = reflect.TypeOf((*ExtensionHealthInfo)(nil)).Elem() +} + +type ExtensionManagerIpAllocationUsage struct { + DynamicData + + ExtensionKey string `xml:"extensionKey"` + NumAddresses int `xml:"numAddresses"` +} + +func init() { + t["ExtensionManagerIpAllocationUsage"] = reflect.TypeOf((*ExtensionManagerIpAllocationUsage)(nil)).Elem() +} + +type ExtensionOvfConsumerInfo struct { + DynamicData + + CallbackUrl string `xml:"callbackUrl"` + SectionType []string `xml:"sectionType"` +} + +func init() { + t["ExtensionOvfConsumerInfo"] = reflect.TypeOf((*ExtensionOvfConsumerInfo)(nil)).Elem() +} + +type ExtensionPrivilegeInfo struct { + DynamicData + + PrivID string `xml:"privID"` + PrivGroupName string `xml:"privGroupName"` +} + +func init() { + t["ExtensionPrivilegeInfo"] = reflect.TypeOf((*ExtensionPrivilegeInfo)(nil)).Elem() +} + +type ExtensionResourceInfo struct { + DynamicData + + Locale string `xml:"locale"` + Module string `xml:"module"` + Data []KeyValue `xml:"data"` +} + +func init() { + t["ExtensionResourceInfo"] = reflect.TypeOf((*ExtensionResourceInfo)(nil)).Elem() +} + +type ExtensionServerInfo struct { + DynamicData + + Url string `xml:"url"` + Description BaseDescription `xml:"description,typeattr"` + Company string `xml:"company"` + Type string `xml:"type"` + AdminEmail []string `xml:"adminEmail"` + ServerThumbprint string `xml:"serverThumbprint,omitempty"` +} + +func init() { + t["ExtensionServerInfo"] = reflect.TypeOf((*ExtensionServerInfo)(nil)).Elem() +} + +type ExtensionTaskTypeInfo struct { + DynamicData + + TaskID string `xml:"taskID"` +} + +func init() { + t["ExtensionTaskTypeInfo"] = reflect.TypeOf((*ExtensionTaskTypeInfo)(nil)).Elem() +} + +type ExtractOvfEnvironment ExtractOvfEnvironmentRequestType + +func init() { + t["ExtractOvfEnvironment"] = reflect.TypeOf((*ExtractOvfEnvironment)(nil)).Elem() +} + +type ExtractOvfEnvironmentRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExtractOvfEnvironmentRequestType"] = reflect.TypeOf((*ExtractOvfEnvironmentRequestType)(nil)).Elem() +} + +type ExtractOvfEnvironmentResponse struct { + Returnval string `xml:"returnval"` +} + +type FailToEnableSPBM struct { + NotEnoughLicenses + + Cs ManagedObjectReference `xml:"cs"` + CsName string `xml:"csName"` + HostLicenseStates []ComputeResourceHostSPBMLicenseInfo `xml:"hostLicenseStates"` +} + +func init() { + t["FailToEnableSPBM"] = reflect.TypeOf((*FailToEnableSPBM)(nil)).Elem() +} + +type FailToEnableSPBMFault FailToEnableSPBM + +func init() { + t["FailToEnableSPBMFault"] = reflect.TypeOf((*FailToEnableSPBMFault)(nil)).Elem() +} + +type FailToLockFaultToleranceVMs struct { + RuntimeFault + + VmName string `xml:"vmName"` + Vm ManagedObjectReference `xml:"vm"` + AlreadyLockedVm ManagedObjectReference `xml:"alreadyLockedVm"` +} + +func init() { + t["FailToLockFaultToleranceVMs"] = reflect.TypeOf((*FailToLockFaultToleranceVMs)(nil)).Elem() +} + +type FailToLockFaultToleranceVMsFault FailToLockFaultToleranceVMs + +func init() { + t["FailToLockFaultToleranceVMsFault"] = reflect.TypeOf((*FailToLockFaultToleranceVMsFault)(nil)).Elem() +} + +type FailoverLevelRestored struct { + ClusterEvent +} + +func init() { + t["FailoverLevelRestored"] = reflect.TypeOf((*FailoverLevelRestored)(nil)).Elem() +} + +type FaultToleranceAntiAffinityViolated struct { + MigrationFault + + HostName string `xml:"hostName"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["FaultToleranceAntiAffinityViolated"] = reflect.TypeOf((*FaultToleranceAntiAffinityViolated)(nil)).Elem() +} + +type FaultToleranceAntiAffinityViolatedFault FaultToleranceAntiAffinityViolated + +func init() { + t["FaultToleranceAntiAffinityViolatedFault"] = reflect.TypeOf((*FaultToleranceAntiAffinityViolatedFault)(nil)).Elem() +} + +type FaultToleranceCannotEditMem struct { + VmConfigFault + + VmName string `xml:"vmName"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["FaultToleranceCannotEditMem"] = reflect.TypeOf((*FaultToleranceCannotEditMem)(nil)).Elem() +} + +type FaultToleranceCannotEditMemFault FaultToleranceCannotEditMem + +func init() { + t["FaultToleranceCannotEditMemFault"] = reflect.TypeOf((*FaultToleranceCannotEditMemFault)(nil)).Elem() +} + +type FaultToleranceConfigInfo struct { + DynamicData + + Role int `xml:"role"` + InstanceUuids []string `xml:"instanceUuids"` + ConfigPaths []string `xml:"configPaths"` + Orphaned *bool `xml:"orphaned"` +} + +func init() { + t["FaultToleranceConfigInfo"] = reflect.TypeOf((*FaultToleranceConfigInfo)(nil)).Elem() +} + +type FaultToleranceConfigSpec struct { + DynamicData + + MetaDataPath *FaultToleranceMetaSpec `xml:"metaDataPath,omitempty"` + SecondaryVmSpec *FaultToleranceVMConfigSpec `xml:"secondaryVmSpec,omitempty"` +} + +func init() { + t["FaultToleranceConfigSpec"] = reflect.TypeOf((*FaultToleranceConfigSpec)(nil)).Elem() +} + +type FaultToleranceCpuIncompatible struct { + CpuIncompatible + + Model bool `xml:"model"` + Family bool `xml:"family"` + Stepping bool `xml:"stepping"` +} + +func init() { + t["FaultToleranceCpuIncompatible"] = reflect.TypeOf((*FaultToleranceCpuIncompatible)(nil)).Elem() +} + +type FaultToleranceCpuIncompatibleFault FaultToleranceCpuIncompatible + +func init() { + t["FaultToleranceCpuIncompatibleFault"] = reflect.TypeOf((*FaultToleranceCpuIncompatibleFault)(nil)).Elem() +} + +type FaultToleranceDiskSpec struct { + DynamicData + + Disk BaseVirtualDevice `xml:"disk,typeattr"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["FaultToleranceDiskSpec"] = reflect.TypeOf((*FaultToleranceDiskSpec)(nil)).Elem() +} + +type FaultToleranceMetaSpec struct { + DynamicData + + MetaDataDatastore ManagedObjectReference `xml:"metaDataDatastore"` +} + +func init() { + t["FaultToleranceMetaSpec"] = reflect.TypeOf((*FaultToleranceMetaSpec)(nil)).Elem() +} + +type FaultToleranceNeedsThickDisk struct { + MigrationFault + + VmName string `xml:"vmName"` +} + +func init() { + t["FaultToleranceNeedsThickDisk"] = reflect.TypeOf((*FaultToleranceNeedsThickDisk)(nil)).Elem() +} + +type FaultToleranceNeedsThickDiskFault FaultToleranceNeedsThickDisk + +func init() { + t["FaultToleranceNeedsThickDiskFault"] = reflect.TypeOf((*FaultToleranceNeedsThickDiskFault)(nil)).Elem() +} + +type FaultToleranceNotLicensed struct { + VmFaultToleranceIssue + + HostName string `xml:"hostName,omitempty"` +} + +func init() { + t["FaultToleranceNotLicensed"] = reflect.TypeOf((*FaultToleranceNotLicensed)(nil)).Elem() +} + +type FaultToleranceNotLicensedFault FaultToleranceNotLicensed + +func init() { + t["FaultToleranceNotLicensedFault"] = reflect.TypeOf((*FaultToleranceNotLicensedFault)(nil)).Elem() +} + +type FaultToleranceNotSameBuild struct { + MigrationFault + + Build string `xml:"build"` +} + +func init() { + t["FaultToleranceNotSameBuild"] = reflect.TypeOf((*FaultToleranceNotSameBuild)(nil)).Elem() +} + +type FaultToleranceNotSameBuildFault FaultToleranceNotSameBuild + +func init() { + t["FaultToleranceNotSameBuildFault"] = reflect.TypeOf((*FaultToleranceNotSameBuildFault)(nil)).Elem() +} + +type FaultTolerancePrimaryConfigInfo struct { + FaultToleranceConfigInfo + + Secondaries []ManagedObjectReference `xml:"secondaries"` +} + +func init() { + t["FaultTolerancePrimaryConfigInfo"] = reflect.TypeOf((*FaultTolerancePrimaryConfigInfo)(nil)).Elem() +} + +type FaultTolerancePrimaryPowerOnNotAttempted struct { + VmFaultToleranceIssue + + SecondaryVm ManagedObjectReference `xml:"secondaryVm"` + PrimaryVm ManagedObjectReference `xml:"primaryVm"` +} + +func init() { + t["FaultTolerancePrimaryPowerOnNotAttempted"] = reflect.TypeOf((*FaultTolerancePrimaryPowerOnNotAttempted)(nil)).Elem() +} + +type FaultTolerancePrimaryPowerOnNotAttemptedFault FaultTolerancePrimaryPowerOnNotAttempted + +func init() { + t["FaultTolerancePrimaryPowerOnNotAttemptedFault"] = reflect.TypeOf((*FaultTolerancePrimaryPowerOnNotAttemptedFault)(nil)).Elem() +} + +type FaultToleranceSecondaryConfigInfo struct { + FaultToleranceConfigInfo + + PrimaryVM ManagedObjectReference `xml:"primaryVM"` +} + +func init() { + t["FaultToleranceSecondaryConfigInfo"] = reflect.TypeOf((*FaultToleranceSecondaryConfigInfo)(nil)).Elem() +} + +type FaultToleranceSecondaryOpResult struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + PowerOnAttempted bool `xml:"powerOnAttempted"` + PowerOnResult *ClusterPowerOnVmResult `xml:"powerOnResult,omitempty"` +} + +func init() { + t["FaultToleranceSecondaryOpResult"] = reflect.TypeOf((*FaultToleranceSecondaryOpResult)(nil)).Elem() +} + +type FaultToleranceVMConfigSpec struct { + DynamicData + + VmConfig *ManagedObjectReference `xml:"vmConfig,omitempty"` + Disks []FaultToleranceDiskSpec `xml:"disks,omitempty"` +} + +func init() { + t["FaultToleranceVMConfigSpec"] = reflect.TypeOf((*FaultToleranceVMConfigSpec)(nil)).Elem() +} + +type FaultToleranceVmNotDasProtected struct { + VimFault + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["FaultToleranceVmNotDasProtected"] = reflect.TypeOf((*FaultToleranceVmNotDasProtected)(nil)).Elem() +} + +type FaultToleranceVmNotDasProtectedFault FaultToleranceVmNotDasProtected + +func init() { + t["FaultToleranceVmNotDasProtectedFault"] = reflect.TypeOf((*FaultToleranceVmNotDasProtectedFault)(nil)).Elem() +} + +type FcoeConfig struct { + DynamicData + + PriorityClass int `xml:"priorityClass"` + SourceMac string `xml:"sourceMac"` + VlanRange []FcoeConfigVlanRange `xml:"vlanRange"` + Capabilities FcoeConfigFcoeCapabilities `xml:"capabilities"` + FcoeActive bool `xml:"fcoeActive"` +} + +func init() { + t["FcoeConfig"] = reflect.TypeOf((*FcoeConfig)(nil)).Elem() +} + +type FcoeConfigFcoeCapabilities struct { + DynamicData + + PriorityClass bool `xml:"priorityClass"` + SourceMacAddress bool `xml:"sourceMacAddress"` + VlanRange bool `xml:"vlanRange"` +} + +func init() { + t["FcoeConfigFcoeCapabilities"] = reflect.TypeOf((*FcoeConfigFcoeCapabilities)(nil)).Elem() +} + +type FcoeConfigFcoeSpecification struct { + DynamicData + + UnderlyingPnic string `xml:"underlyingPnic"` + PriorityClass int `xml:"priorityClass,omitempty"` + SourceMac string `xml:"sourceMac,omitempty"` + VlanRange []FcoeConfigVlanRange `xml:"vlanRange,omitempty"` +} + +func init() { + t["FcoeConfigFcoeSpecification"] = reflect.TypeOf((*FcoeConfigFcoeSpecification)(nil)).Elem() +} + +type FcoeConfigVlanRange struct { + DynamicData + + VlanLow int `xml:"vlanLow"` + VlanHigh int `xml:"vlanHigh"` +} + +func init() { + t["FcoeConfigVlanRange"] = reflect.TypeOf((*FcoeConfigVlanRange)(nil)).Elem() +} + +type FcoeFault struct { + VimFault +} + +func init() { + t["FcoeFault"] = reflect.TypeOf((*FcoeFault)(nil)).Elem() +} + +type FcoeFaultFault BaseFcoeFault + +func init() { + t["FcoeFaultFault"] = reflect.TypeOf((*FcoeFaultFault)(nil)).Elem() +} + +type FcoeFaultPnicHasNoPortSet struct { + FcoeFault + + NicDevice string `xml:"nicDevice"` +} + +func init() { + t["FcoeFaultPnicHasNoPortSet"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSet)(nil)).Elem() +} + +type FcoeFaultPnicHasNoPortSetFault FcoeFaultPnicHasNoPortSet + +func init() { + t["FcoeFaultPnicHasNoPortSetFault"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSetFault)(nil)).Elem() +} + +type FeatureRequirementsNotMet struct { + VirtualHardwareCompatibilityIssue + + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["FeatureRequirementsNotMet"] = reflect.TypeOf((*FeatureRequirementsNotMet)(nil)).Elem() +} + +type FeatureRequirementsNotMetFault FeatureRequirementsNotMet + +func init() { + t["FeatureRequirementsNotMetFault"] = reflect.TypeOf((*FeatureRequirementsNotMetFault)(nil)).Elem() +} + +type FetchDVPortKeys FetchDVPortKeysRequestType + +func init() { + t["FetchDVPortKeys"] = reflect.TypeOf((*FetchDVPortKeys)(nil)).Elem() +} + +type FetchDVPortKeysRequestType struct { + This ManagedObjectReference `xml:"_this"` + Criteria *DistributedVirtualSwitchPortCriteria `xml:"criteria,omitempty"` +} + +func init() { + t["FetchDVPortKeysRequestType"] = reflect.TypeOf((*FetchDVPortKeysRequestType)(nil)).Elem() +} + +type FetchDVPortKeysResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type FetchDVPorts FetchDVPortsRequestType + +func init() { + t["FetchDVPorts"] = reflect.TypeOf((*FetchDVPorts)(nil)).Elem() +} + +type FetchDVPortsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Criteria *DistributedVirtualSwitchPortCriteria `xml:"criteria,omitempty"` +} + +func init() { + t["FetchDVPortsRequestType"] = reflect.TypeOf((*FetchDVPortsRequestType)(nil)).Elem() +} + +type FetchDVPortsResponse struct { + Returnval []DistributedVirtualPort `xml:"returnval,omitempty"` +} + +type FileAlreadyExists struct { + FileFault +} + +func init() { + t["FileAlreadyExists"] = reflect.TypeOf((*FileAlreadyExists)(nil)).Elem() +} + +type FileAlreadyExistsFault FileAlreadyExists + +func init() { + t["FileAlreadyExistsFault"] = reflect.TypeOf((*FileAlreadyExistsFault)(nil)).Elem() +} + +type FileBackedPortNotSupported struct { + DeviceNotSupported +} + +func init() { + t["FileBackedPortNotSupported"] = reflect.TypeOf((*FileBackedPortNotSupported)(nil)).Elem() +} + +type FileBackedPortNotSupportedFault FileBackedPortNotSupported + +func init() { + t["FileBackedPortNotSupportedFault"] = reflect.TypeOf((*FileBackedPortNotSupportedFault)(nil)).Elem() +} + +type FileBackedVirtualDiskSpec struct { + VirtualDiskSpec + + CapacityKb int64 `xml:"capacityKb"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["FileBackedVirtualDiskSpec"] = reflect.TypeOf((*FileBackedVirtualDiskSpec)(nil)).Elem() +} + +type FileFault struct { + VimFault + + File string `xml:"file"` +} + +func init() { + t["FileFault"] = reflect.TypeOf((*FileFault)(nil)).Elem() +} + +type FileFaultFault BaseFileFault + +func init() { + t["FileFaultFault"] = reflect.TypeOf((*FileFaultFault)(nil)).Elem() +} + +type FileInfo struct { + DynamicData + + Path string `xml:"path"` + FileSize int64 `xml:"fileSize,omitempty"` + Modification *time.Time `xml:"modification"` + Owner string `xml:"owner,omitempty"` +} + +func init() { + t["FileInfo"] = reflect.TypeOf((*FileInfo)(nil)).Elem() +} + +type FileLocked struct { + FileFault +} + +func init() { + t["FileLocked"] = reflect.TypeOf((*FileLocked)(nil)).Elem() +} + +type FileLockedFault FileLocked + +func init() { + t["FileLockedFault"] = reflect.TypeOf((*FileLockedFault)(nil)).Elem() +} + +type FileNameTooLong struct { + FileFault +} + +func init() { + t["FileNameTooLong"] = reflect.TypeOf((*FileNameTooLong)(nil)).Elem() +} + +type FileNameTooLongFault FileNameTooLong + +func init() { + t["FileNameTooLongFault"] = reflect.TypeOf((*FileNameTooLongFault)(nil)).Elem() +} + +type FileNotFound struct { + FileFault +} + +func init() { + t["FileNotFound"] = reflect.TypeOf((*FileNotFound)(nil)).Elem() +} + +type FileNotFoundFault FileNotFound + +func init() { + t["FileNotFoundFault"] = reflect.TypeOf((*FileNotFoundFault)(nil)).Elem() +} + +type FileNotWritable struct { + FileFault +} + +func init() { + t["FileNotWritable"] = reflect.TypeOf((*FileNotWritable)(nil)).Elem() +} + +type FileNotWritableFault FileNotWritable + +func init() { + t["FileNotWritableFault"] = reflect.TypeOf((*FileNotWritableFault)(nil)).Elem() +} + +type FileQuery struct { + DynamicData +} + +func init() { + t["FileQuery"] = reflect.TypeOf((*FileQuery)(nil)).Elem() +} + +type FileQueryFlags struct { + DynamicData + + FileType bool `xml:"fileType"` + FileSize bool `xml:"fileSize"` + Modification bool `xml:"modification"` + FileOwner *bool `xml:"fileOwner"` +} + +func init() { + t["FileQueryFlags"] = reflect.TypeOf((*FileQueryFlags)(nil)).Elem() +} + +type FileTooLarge struct { + FileFault + + Datastore string `xml:"datastore"` + FileSize int64 `xml:"fileSize"` + MaxFileSize int64 `xml:"maxFileSize,omitempty"` +} + +func init() { + t["FileTooLarge"] = reflect.TypeOf((*FileTooLarge)(nil)).Elem() +} + +type FileTooLargeFault FileTooLarge + +func init() { + t["FileTooLargeFault"] = reflect.TypeOf((*FileTooLargeFault)(nil)).Elem() +} + +type FileTransferInformation struct { + DynamicData + + Attributes BaseGuestFileAttributes `xml:"attributes,typeattr"` + Size int64 `xml:"size"` + Url string `xml:"url"` +} + +func init() { + t["FileTransferInformation"] = reflect.TypeOf((*FileTransferInformation)(nil)).Elem() +} + +type FilesystemQuiesceFault struct { + SnapshotFault +} + +func init() { + t["FilesystemQuiesceFault"] = reflect.TypeOf((*FilesystemQuiesceFault)(nil)).Elem() +} + +type FilesystemQuiesceFaultFault FilesystemQuiesceFault + +func init() { + t["FilesystemQuiesceFaultFault"] = reflect.TypeOf((*FilesystemQuiesceFaultFault)(nil)).Elem() +} + +type FilterInUse struct { + ResourceInUse + + Disk []VirtualDiskId `xml:"disk,omitempty"` +} + +func init() { + t["FilterInUse"] = reflect.TypeOf((*FilterInUse)(nil)).Elem() +} + +type FilterInUseFault FilterInUse + +func init() { + t["FilterInUseFault"] = reflect.TypeOf((*FilterInUseFault)(nil)).Elem() +} + +type FindAllByDnsName FindAllByDnsNameRequestType + +func init() { + t["FindAllByDnsName"] = reflect.TypeOf((*FindAllByDnsName)(nil)).Elem() +} + +type FindAllByDnsNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + DnsName string `xml:"dnsName"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindAllByDnsNameRequestType"] = reflect.TypeOf((*FindAllByDnsNameRequestType)(nil)).Elem() +} + +type FindAllByDnsNameResponse struct { + Returnval []ManagedObjectReference `xml:"returnval"` +} + +type FindAllByIp FindAllByIpRequestType + +func init() { + t["FindAllByIp"] = reflect.TypeOf((*FindAllByIp)(nil)).Elem() +} + +type FindAllByIpRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Ip string `xml:"ip"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindAllByIpRequestType"] = reflect.TypeOf((*FindAllByIpRequestType)(nil)).Elem() +} + +type FindAllByIpResponse struct { + Returnval []ManagedObjectReference `xml:"returnval"` +} + +type FindAllByUuid FindAllByUuidRequestType + +func init() { + t["FindAllByUuid"] = reflect.TypeOf((*FindAllByUuid)(nil)).Elem() +} + +type FindAllByUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Uuid string `xml:"uuid"` + VmSearch bool `xml:"vmSearch"` + InstanceUuid *bool `xml:"instanceUuid"` +} + +func init() { + t["FindAllByUuidRequestType"] = reflect.TypeOf((*FindAllByUuidRequestType)(nil)).Elem() +} + +type FindAllByUuidResponse struct { + Returnval []ManagedObjectReference `xml:"returnval"` +} + +type FindAssociatedProfile FindAssociatedProfileRequestType + +func init() { + t["FindAssociatedProfile"] = reflect.TypeOf((*FindAssociatedProfile)(nil)).Elem() +} + +type FindAssociatedProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["FindAssociatedProfileRequestType"] = reflect.TypeOf((*FindAssociatedProfileRequestType)(nil)).Elem() +} + +type FindAssociatedProfileResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByDatastorePath FindByDatastorePathRequestType + +func init() { + t["FindByDatastorePath"] = reflect.TypeOf((*FindByDatastorePath)(nil)).Elem() +} + +type FindByDatastorePathRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter ManagedObjectReference `xml:"datacenter"` + Path string `xml:"path"` +} + +func init() { + t["FindByDatastorePathRequestType"] = reflect.TypeOf((*FindByDatastorePathRequestType)(nil)).Elem() +} + +type FindByDatastorePathResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByDnsName FindByDnsNameRequestType + +func init() { + t["FindByDnsName"] = reflect.TypeOf((*FindByDnsName)(nil)).Elem() +} + +type FindByDnsNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + DnsName string `xml:"dnsName"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindByDnsNameRequestType"] = reflect.TypeOf((*FindByDnsNameRequestType)(nil)).Elem() +} + +type FindByDnsNameResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByInventoryPath FindByInventoryPathRequestType + +func init() { + t["FindByInventoryPath"] = reflect.TypeOf((*FindByInventoryPath)(nil)).Elem() +} + +type FindByInventoryPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + InventoryPath string `xml:"inventoryPath"` +} + +func init() { + t["FindByInventoryPathRequestType"] = reflect.TypeOf((*FindByInventoryPathRequestType)(nil)).Elem() +} + +type FindByInventoryPathResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByIp FindByIpRequestType + +func init() { + t["FindByIp"] = reflect.TypeOf((*FindByIp)(nil)).Elem() +} + +type FindByIpRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Ip string `xml:"ip"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindByIpRequestType"] = reflect.TypeOf((*FindByIpRequestType)(nil)).Elem() +} + +type FindByIpResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByUuid FindByUuidRequestType + +func init() { + t["FindByUuid"] = reflect.TypeOf((*FindByUuid)(nil)).Elem() +} + +type FindByUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Uuid string `xml:"uuid"` + VmSearch bool `xml:"vmSearch"` + InstanceUuid *bool `xml:"instanceUuid"` +} + +func init() { + t["FindByUuidRequestType"] = reflect.TypeOf((*FindByUuidRequestType)(nil)).Elem() +} + +type FindByUuidResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindChild FindChildRequestType + +func init() { + t["FindChild"] = reflect.TypeOf((*FindChild)(nil)).Elem() +} + +type FindChildRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Name string `xml:"name"` +} + +func init() { + t["FindChildRequestType"] = reflect.TypeOf((*FindChildRequestType)(nil)).Elem() +} + +type FindChildResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindExtension FindExtensionRequestType + +func init() { + t["FindExtension"] = reflect.TypeOf((*FindExtension)(nil)).Elem() +} + +type FindExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["FindExtensionRequestType"] = reflect.TypeOf((*FindExtensionRequestType)(nil)).Elem() +} + +type FindExtensionResponse struct { + Returnval *Extension `xml:"returnval,omitempty"` +} + +type FindRulesForVm FindRulesForVmRequestType + +func init() { + t["FindRulesForVm"] = reflect.TypeOf((*FindRulesForVm)(nil)).Elem() +} + +type FindRulesForVmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["FindRulesForVmRequestType"] = reflect.TypeOf((*FindRulesForVmRequestType)(nil)).Elem() +} + +type FindRulesForVmResponse struct { + Returnval []BaseClusterRuleInfo `xml:"returnval,omitempty,typeattr"` +} + +type FirewallProfile struct { + ApplyProfile + + Ruleset []FirewallProfileRulesetProfile `xml:"ruleset,omitempty"` +} + +func init() { + t["FirewallProfile"] = reflect.TypeOf((*FirewallProfile)(nil)).Elem() +} + +type FirewallProfileRulesetProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["FirewallProfileRulesetProfile"] = reflect.TypeOf((*FirewallProfileRulesetProfile)(nil)).Elem() +} + +type FloatOption struct { + OptionType + + Min float32 `xml:"min"` + Max float32 `xml:"max"` + DefaultValue float32 `xml:"defaultValue"` +} + +func init() { + t["FloatOption"] = reflect.TypeOf((*FloatOption)(nil)).Elem() +} + +type FloppyImageFileInfo struct { + FileInfo +} + +func init() { + t["FloppyImageFileInfo"] = reflect.TypeOf((*FloppyImageFileInfo)(nil)).Elem() +} + +type FloppyImageFileQuery struct { + FileQuery +} + +func init() { + t["FloppyImageFileQuery"] = reflect.TypeOf((*FloppyImageFileQuery)(nil)).Elem() +} + +type FolderEventArgument struct { + EntityEventArgument + + Folder ManagedObjectReference `xml:"folder"` +} + +func init() { + t["FolderEventArgument"] = reflect.TypeOf((*FolderEventArgument)(nil)).Elem() +} + +type FolderFileInfo struct { + FileInfo +} + +func init() { + t["FolderFileInfo"] = reflect.TypeOf((*FolderFileInfo)(nil)).Elem() +} + +type FolderFileQuery struct { + FileQuery +} + +func init() { + t["FolderFileQuery"] = reflect.TypeOf((*FolderFileQuery)(nil)).Elem() +} + +type FormatVffs FormatVffsRequestType + +func init() { + t["FormatVffs"] = reflect.TypeOf((*FormatVffs)(nil)).Elem() +} + +type FormatVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec HostVffsSpec `xml:"createSpec"` +} + +func init() { + t["FormatVffsRequestType"] = reflect.TypeOf((*FormatVffsRequestType)(nil)).Elem() +} + +type FormatVffsResponse struct { + Returnval HostVffsVolume `xml:"returnval"` +} + +type FormatVmfs FormatVmfsRequestType + +func init() { + t["FormatVmfs"] = reflect.TypeOf((*FormatVmfs)(nil)).Elem() +} + +type FormatVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec HostVmfsSpec `xml:"createSpec"` +} + +func init() { + t["FormatVmfsRequestType"] = reflect.TypeOf((*FormatVmfsRequestType)(nil)).Elem() +} + +type FormatVmfsResponse struct { + Returnval HostVmfsVolume `xml:"returnval"` +} + +type FtIssuesOnHost struct { + VmFaultToleranceIssue + + Host ManagedObjectReference `xml:"host"` + HostName string `xml:"hostName"` + Errors []LocalizedMethodFault `xml:"errors,omitempty"` +} + +func init() { + t["FtIssuesOnHost"] = reflect.TypeOf((*FtIssuesOnHost)(nil)).Elem() +} + +type FtIssuesOnHostFault FtIssuesOnHost + +func init() { + t["FtIssuesOnHostFault"] = reflect.TypeOf((*FtIssuesOnHostFault)(nil)).Elem() +} + +type FullStorageVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["FullStorageVMotionNotSupported"] = reflect.TypeOf((*FullStorageVMotionNotSupported)(nil)).Elem() +} + +type FullStorageVMotionNotSupportedFault FullStorageVMotionNotSupported + +func init() { + t["FullStorageVMotionNotSupportedFault"] = reflect.TypeOf((*FullStorageVMotionNotSupportedFault)(nil)).Elem() +} + +type GatewayConnectFault struct { + HostConnectFault + + GatewayType string `xml:"gatewayType"` + GatewayId string `xml:"gatewayId"` + GatewayInfo string `xml:"gatewayInfo"` + Details *LocalizableMessage `xml:"details,omitempty"` +} + +func init() { + t["GatewayConnectFault"] = reflect.TypeOf((*GatewayConnectFault)(nil)).Elem() +} + +type GatewayConnectFaultFault BaseGatewayConnectFault + +func init() { + t["GatewayConnectFaultFault"] = reflect.TypeOf((*GatewayConnectFaultFault)(nil)).Elem() +} + +type GatewayHostNotReachable struct { + GatewayToHostConnectFault +} + +func init() { + t["GatewayHostNotReachable"] = reflect.TypeOf((*GatewayHostNotReachable)(nil)).Elem() +} + +type GatewayHostNotReachableFault GatewayHostNotReachable + +func init() { + t["GatewayHostNotReachableFault"] = reflect.TypeOf((*GatewayHostNotReachableFault)(nil)).Elem() +} + +type GatewayNotFound struct { + GatewayConnectFault +} + +func init() { + t["GatewayNotFound"] = reflect.TypeOf((*GatewayNotFound)(nil)).Elem() +} + +type GatewayNotFoundFault GatewayNotFound + +func init() { + t["GatewayNotFoundFault"] = reflect.TypeOf((*GatewayNotFoundFault)(nil)).Elem() +} + +type GatewayNotReachable struct { + GatewayConnectFault +} + +func init() { + t["GatewayNotReachable"] = reflect.TypeOf((*GatewayNotReachable)(nil)).Elem() +} + +type GatewayNotReachableFault GatewayNotReachable + +func init() { + t["GatewayNotReachableFault"] = reflect.TypeOf((*GatewayNotReachableFault)(nil)).Elem() +} + +type GatewayOperationRefused struct { + GatewayConnectFault +} + +func init() { + t["GatewayOperationRefused"] = reflect.TypeOf((*GatewayOperationRefused)(nil)).Elem() +} + +type GatewayOperationRefusedFault GatewayOperationRefused + +func init() { + t["GatewayOperationRefusedFault"] = reflect.TypeOf((*GatewayOperationRefusedFault)(nil)).Elem() +} + +type GatewayToHostAuthFault struct { + GatewayToHostConnectFault + + InvalidProperties []string `xml:"invalidProperties"` + MissingProperties []string `xml:"missingProperties"` +} + +func init() { + t["GatewayToHostAuthFault"] = reflect.TypeOf((*GatewayToHostAuthFault)(nil)).Elem() +} + +type GatewayToHostAuthFaultFault GatewayToHostAuthFault + +func init() { + t["GatewayToHostAuthFaultFault"] = reflect.TypeOf((*GatewayToHostAuthFaultFault)(nil)).Elem() +} + +type GatewayToHostConnectFault struct { + GatewayConnectFault + + Hostname string `xml:"hostname"` + Port int `xml:"port,omitempty"` +} + +func init() { + t["GatewayToHostConnectFault"] = reflect.TypeOf((*GatewayToHostConnectFault)(nil)).Elem() +} + +type GatewayToHostConnectFaultFault BaseGatewayToHostConnectFault + +func init() { + t["GatewayToHostConnectFaultFault"] = reflect.TypeOf((*GatewayToHostConnectFaultFault)(nil)).Elem() +} + +type GatewayToHostTrustVerifyFault struct { + GatewayToHostConnectFault + + VerificationToken string `xml:"verificationToken"` + PropertiesToVerify []KeyValue `xml:"propertiesToVerify"` +} + +func init() { + t["GatewayToHostTrustVerifyFault"] = reflect.TypeOf((*GatewayToHostTrustVerifyFault)(nil)).Elem() +} + +type GatewayToHostTrustVerifyFaultFault GatewayToHostTrustVerifyFault + +func init() { + t["GatewayToHostTrustVerifyFaultFault"] = reflect.TypeOf((*GatewayToHostTrustVerifyFaultFault)(nil)).Elem() +} + +type GeneralEvent struct { + Event + + Message string `xml:"message"` +} + +func init() { + t["GeneralEvent"] = reflect.TypeOf((*GeneralEvent)(nil)).Elem() +} + +type GeneralHostErrorEvent struct { + GeneralEvent +} + +func init() { + t["GeneralHostErrorEvent"] = reflect.TypeOf((*GeneralHostErrorEvent)(nil)).Elem() +} + +type GeneralHostInfoEvent struct { + GeneralEvent +} + +func init() { + t["GeneralHostInfoEvent"] = reflect.TypeOf((*GeneralHostInfoEvent)(nil)).Elem() +} + +type GeneralHostWarningEvent struct { + GeneralEvent +} + +func init() { + t["GeneralHostWarningEvent"] = reflect.TypeOf((*GeneralHostWarningEvent)(nil)).Elem() +} + +type GeneralUserEvent struct { + GeneralEvent + + Entity *ManagedEntityEventArgument `xml:"entity,omitempty"` +} + +func init() { + t["GeneralUserEvent"] = reflect.TypeOf((*GeneralUserEvent)(nil)).Elem() +} + +type GeneralVmErrorEvent struct { + GeneralEvent +} + +func init() { + t["GeneralVmErrorEvent"] = reflect.TypeOf((*GeneralVmErrorEvent)(nil)).Elem() +} + +type GeneralVmInfoEvent struct { + GeneralEvent +} + +func init() { + t["GeneralVmInfoEvent"] = reflect.TypeOf((*GeneralVmInfoEvent)(nil)).Elem() +} + +type GeneralVmWarningEvent struct { + GeneralEvent +} + +func init() { + t["GeneralVmWarningEvent"] = reflect.TypeOf((*GeneralVmWarningEvent)(nil)).Elem() +} + +type GenerateCertificateSigningRequest GenerateCertificateSigningRequestRequestType + +func init() { + t["GenerateCertificateSigningRequest"] = reflect.TypeOf((*GenerateCertificateSigningRequest)(nil)).Elem() +} + +type GenerateCertificateSigningRequestByDn GenerateCertificateSigningRequestByDnRequestType + +func init() { + t["GenerateCertificateSigningRequestByDn"] = reflect.TypeOf((*GenerateCertificateSigningRequestByDn)(nil)).Elem() +} + +type GenerateCertificateSigningRequestByDnRequestType struct { + This ManagedObjectReference `xml:"_this"` + DistinguishedName string `xml:"distinguishedName"` +} + +func init() { + t["GenerateCertificateSigningRequestByDnRequestType"] = reflect.TypeOf((*GenerateCertificateSigningRequestByDnRequestType)(nil)).Elem() +} + +type GenerateCertificateSigningRequestByDnResponse struct { + Returnval string `xml:"returnval"` +} + +type GenerateCertificateSigningRequestRequestType struct { + This ManagedObjectReference `xml:"_this"` + UseIpAddressAsCommonName bool `xml:"useIpAddressAsCommonName"` +} + +func init() { + t["GenerateCertificateSigningRequestRequestType"] = reflect.TypeOf((*GenerateCertificateSigningRequestRequestType)(nil)).Elem() +} + +type GenerateCertificateSigningRequestResponse struct { + Returnval string `xml:"returnval"` +} + +type GenerateConfigTaskList GenerateConfigTaskListRequestType + +func init() { + t["GenerateConfigTaskList"] = reflect.TypeOf((*GenerateConfigTaskList)(nil)).Elem() +} + +type GenerateConfigTaskListRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec HostConfigSpec `xml:"configSpec"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["GenerateConfigTaskListRequestType"] = reflect.TypeOf((*GenerateConfigTaskListRequestType)(nil)).Elem() +} + +type GenerateConfigTaskListResponse struct { + Returnval HostProfileManagerConfigTaskList `xml:"returnval"` +} + +type GenerateHostProfileTaskListRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec HostConfigSpec `xml:"configSpec"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["GenerateHostProfileTaskListRequestType"] = reflect.TypeOf((*GenerateHostProfileTaskListRequestType)(nil)).Elem() +} + +type GenerateHostProfileTaskList_Task GenerateHostProfileTaskListRequestType + +func init() { + t["GenerateHostProfileTaskList_Task"] = reflect.TypeOf((*GenerateHostProfileTaskList_Task)(nil)).Elem() +} + +type GenerateHostProfileTaskList_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type GenerateLogBundlesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IncludeDefault bool `xml:"includeDefault"` + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["GenerateLogBundlesRequestType"] = reflect.TypeOf((*GenerateLogBundlesRequestType)(nil)).Elem() +} + +type GenerateLogBundles_Task GenerateLogBundlesRequestType + +func init() { + t["GenerateLogBundles_Task"] = reflect.TypeOf((*GenerateLogBundles_Task)(nil)).Elem() +} + +type GenerateLogBundles_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type GenericDrsFault struct { + VimFault + + HostFaults []LocalizedMethodFault `xml:"hostFaults,omitempty"` +} + +func init() { + t["GenericDrsFault"] = reflect.TypeOf((*GenericDrsFault)(nil)).Elem() +} + +type GenericDrsFaultFault GenericDrsFault + +func init() { + t["GenericDrsFaultFault"] = reflect.TypeOf((*GenericDrsFaultFault)(nil)).Elem() +} + +type GenericVmConfigFault struct { + VmConfigFault + + Reason string `xml:"reason"` +} + +func init() { + t["GenericVmConfigFault"] = reflect.TypeOf((*GenericVmConfigFault)(nil)).Elem() +} + +type GenericVmConfigFaultFault GenericVmConfigFault + +func init() { + t["GenericVmConfigFaultFault"] = reflect.TypeOf((*GenericVmConfigFaultFault)(nil)).Elem() +} + +type GetAlarm GetAlarmRequestType + +func init() { + t["GetAlarm"] = reflect.TypeOf((*GetAlarm)(nil)).Elem() +} + +type GetAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["GetAlarmRequestType"] = reflect.TypeOf((*GetAlarmRequestType)(nil)).Elem() +} + +type GetAlarmResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type GetAlarmState GetAlarmStateRequestType + +func init() { + t["GetAlarmState"] = reflect.TypeOf((*GetAlarmState)(nil)).Elem() +} + +type GetAlarmStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["GetAlarmStateRequestType"] = reflect.TypeOf((*GetAlarmStateRequestType)(nil)).Elem() +} + +type GetAlarmStateResponse struct { + Returnval []AlarmState `xml:"returnval,omitempty"` +} + +type GetAllVRPIds GetAllVRPIdsRequestType + +func init() { + t["GetAllVRPIds"] = reflect.TypeOf((*GetAllVRPIds)(nil)).Elem() +} + +type GetAllVRPIdsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetAllVRPIdsRequestType"] = reflect.TypeOf((*GetAllVRPIdsRequestType)(nil)).Elem() +} + +type GetAllVRPIdsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type GetChildRPforHub GetChildRPforHubRequestType + +func init() { + t["GetChildRPforHub"] = reflect.TypeOf((*GetChildRPforHub)(nil)).Elem() +} + +type GetChildRPforHubRequestType struct { + This ManagedObjectReference `xml:"_this"` + VrpId string `xml:"vrpId"` + Hub ManagedObjectReference `xml:"hub"` +} + +func init() { + t["GetChildRPforHubRequestType"] = reflect.TypeOf((*GetChildRPforHubRequestType)(nil)).Elem() +} + +type GetChildRPforHubResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type GetCustomizationSpec GetCustomizationSpecRequestType + +func init() { + t["GetCustomizationSpec"] = reflect.TypeOf((*GetCustomizationSpec)(nil)).Elem() +} + +type GetCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["GetCustomizationSpecRequestType"] = reflect.TypeOf((*GetCustomizationSpecRequestType)(nil)).Elem() +} + +type GetCustomizationSpecResponse struct { + Returnval CustomizationSpecItem `xml:"returnval"` +} + +type GetPublicKey GetPublicKeyRequestType + +func init() { + t["GetPublicKey"] = reflect.TypeOf((*GetPublicKey)(nil)).Elem() +} + +type GetPublicKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetPublicKeyRequestType"] = reflect.TypeOf((*GetPublicKeyRequestType)(nil)).Elem() +} + +type GetPublicKeyResponse struct { + Returnval string `xml:"returnval"` +} + +type GetRPSettings GetRPSettingsRequestType + +func init() { + t["GetRPSettings"] = reflect.TypeOf((*GetRPSettings)(nil)).Elem() +} + +type GetRPSettingsRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResourcePool ManagedObjectReference `xml:"resourcePool"` +} + +func init() { + t["GetRPSettingsRequestType"] = reflect.TypeOf((*GetRPSettingsRequestType)(nil)).Elem() +} + +type GetRPSettingsResponse struct { + Returnval *ResourceConfigSpec `xml:"returnval,omitempty"` +} + +type GetResourceUsage GetResourceUsageRequestType + +func init() { + t["GetResourceUsage"] = reflect.TypeOf((*GetResourceUsage)(nil)).Elem() +} + +type GetResourceUsageRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetResourceUsageRequestType"] = reflect.TypeOf((*GetResourceUsageRequestType)(nil)).Elem() +} + +type GetResourceUsageResponse struct { + Returnval ClusterResourceUsageSummary `xml:"returnval"` +} + +type GetVRPSettings GetVRPSettingsRequestType + +func init() { + t["GetVRPSettings"] = reflect.TypeOf((*GetVRPSettings)(nil)).Elem() +} + +type GetVRPSettingsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VrpId string `xml:"vrpId"` +} + +func init() { + t["GetVRPSettingsRequestType"] = reflect.TypeOf((*GetVRPSettingsRequestType)(nil)).Elem() +} + +type GetVRPSettingsResponse struct { + Returnval VirtualResourcePoolSpec `xml:"returnval"` +} + +type GetVRPUsage GetVRPUsageRequestType + +func init() { + t["GetVRPUsage"] = reflect.TypeOf((*GetVRPUsage)(nil)).Elem() +} + +type GetVRPUsageRequestType struct { + This ManagedObjectReference `xml:"_this"` + VrpId string `xml:"vrpId"` +} + +func init() { + t["GetVRPUsageRequestType"] = reflect.TypeOf((*GetVRPUsageRequestType)(nil)).Elem() +} + +type GetVRPUsageResponse struct { + Returnval VirtualResourcePoolUsage `xml:"returnval"` +} + +type GetVRPofVM GetVRPofVMRequestType + +func init() { + t["GetVRPofVM"] = reflect.TypeOf((*GetVRPofVM)(nil)).Elem() +} + +type GetVRPofVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["GetVRPofVMRequestType"] = reflect.TypeOf((*GetVRPofVMRequestType)(nil)).Elem() +} + +type GetVRPofVMResponse struct { + Returnval string `xml:"returnval,omitempty"` +} + +type GetVsanObjExtAttrs GetVsanObjExtAttrsRequestType + +func init() { + t["GetVsanObjExtAttrs"] = reflect.TypeOf((*GetVsanObjExtAttrs)(nil)).Elem() +} + +type GetVsanObjExtAttrsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` +} + +func init() { + t["GetVsanObjExtAttrsRequestType"] = reflect.TypeOf((*GetVsanObjExtAttrsRequestType)(nil)).Elem() +} + +type GetVsanObjExtAttrsResponse struct { + Returnval string `xml:"returnval"` +} + +type GhostDvsProxySwitchDetectedEvent struct { + HostEvent + + SwitchUuid []string `xml:"switchUuid"` +} + +func init() { + t["GhostDvsProxySwitchDetectedEvent"] = reflect.TypeOf((*GhostDvsProxySwitchDetectedEvent)(nil)).Elem() +} + +type GhostDvsProxySwitchRemovedEvent struct { + HostEvent + + SwitchUuid []string `xml:"switchUuid"` +} + +func init() { + t["GhostDvsProxySwitchRemovedEvent"] = reflect.TypeOf((*GhostDvsProxySwitchRemovedEvent)(nil)).Elem() +} + +type GlobalMessageChangedEvent struct { + SessionEvent + + Message string `xml:"message"` +} + +func init() { + t["GlobalMessageChangedEvent"] = reflect.TypeOf((*GlobalMessageChangedEvent)(nil)).Elem() +} + +type GroupAlarmAction struct { + AlarmAction + + Action []BaseAlarmAction `xml:"action,typeattr"` +} + +func init() { + t["GroupAlarmAction"] = reflect.TypeOf((*GroupAlarmAction)(nil)).Elem() +} + +type GuestAliases struct { + DynamicData + + Base64Cert string `xml:"base64Cert"` + Aliases []GuestAuthAliasInfo `xml:"aliases"` +} + +func init() { + t["GuestAliases"] = reflect.TypeOf((*GuestAliases)(nil)).Elem() +} + +type GuestAuthAliasInfo struct { + DynamicData + + Subject BaseGuestAuthSubject `xml:"subject,typeattr"` + Comment string `xml:"comment"` +} + +func init() { + t["GuestAuthAliasInfo"] = reflect.TypeOf((*GuestAuthAliasInfo)(nil)).Elem() +} + +type GuestAuthAnySubject struct { + GuestAuthSubject +} + +func init() { + t["GuestAuthAnySubject"] = reflect.TypeOf((*GuestAuthAnySubject)(nil)).Elem() +} + +type GuestAuthNamedSubject struct { + GuestAuthSubject + + Name string `xml:"name"` +} + +func init() { + t["GuestAuthNamedSubject"] = reflect.TypeOf((*GuestAuthNamedSubject)(nil)).Elem() +} + +type GuestAuthSubject struct { + DynamicData +} + +func init() { + t["GuestAuthSubject"] = reflect.TypeOf((*GuestAuthSubject)(nil)).Elem() +} + +type GuestAuthentication struct { + DynamicData + + InteractiveSession bool `xml:"interactiveSession"` +} + +func init() { + t["GuestAuthentication"] = reflect.TypeOf((*GuestAuthentication)(nil)).Elem() +} + +type GuestAuthenticationChallenge struct { + GuestOperationsFault + + ServerChallenge BaseGuestAuthentication `xml:"serverChallenge,typeattr"` + SessionID int64 `xml:"sessionID"` +} + +func init() { + t["GuestAuthenticationChallenge"] = reflect.TypeOf((*GuestAuthenticationChallenge)(nil)).Elem() +} + +type GuestAuthenticationChallengeFault GuestAuthenticationChallenge + +func init() { + t["GuestAuthenticationChallengeFault"] = reflect.TypeOf((*GuestAuthenticationChallengeFault)(nil)).Elem() +} + +type GuestComponentsOutOfDate struct { + GuestOperationsFault +} + +func init() { + t["GuestComponentsOutOfDate"] = reflect.TypeOf((*GuestComponentsOutOfDate)(nil)).Elem() +} + +type GuestComponentsOutOfDateFault GuestComponentsOutOfDate + +func init() { + t["GuestComponentsOutOfDateFault"] = reflect.TypeOf((*GuestComponentsOutOfDateFault)(nil)).Elem() +} + +type GuestDiskInfo struct { + DynamicData + + DiskPath string `xml:"diskPath,omitempty"` + Capacity int64 `xml:"capacity,omitempty"` + FreeSpace int64 `xml:"freeSpace,omitempty"` +} + +func init() { + t["GuestDiskInfo"] = reflect.TypeOf((*GuestDiskInfo)(nil)).Elem() +} + +type GuestFileAttributes struct { + DynamicData + + ModificationTime *time.Time `xml:"modificationTime"` + AccessTime *time.Time `xml:"accessTime"` + SymlinkTarget string `xml:"symlinkTarget,omitempty"` +} + +func init() { + t["GuestFileAttributes"] = reflect.TypeOf((*GuestFileAttributes)(nil)).Elem() +} + +type GuestFileInfo struct { + DynamicData + + Path string `xml:"path"` + Type string `xml:"type"` + Size int64 `xml:"size"` + Attributes BaseGuestFileAttributes `xml:"attributes,typeattr"` +} + +func init() { + t["GuestFileInfo"] = reflect.TypeOf((*GuestFileInfo)(nil)).Elem() +} + +type GuestInfo struct { + DynamicData + + ToolsStatus VirtualMachineToolsStatus `xml:"toolsStatus,omitempty"` + ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty"` + ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty"` + ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty"` + ToolsVersion string `xml:"toolsVersion,omitempty"` + GuestId string `xml:"guestId,omitempty"` + GuestFamily string `xml:"guestFamily,omitempty"` + GuestFullName string `xml:"guestFullName,omitempty"` + HostName string `xml:"hostName,omitempty"` + IpAddress string `xml:"ipAddress,omitempty"` + Net []GuestNicInfo `xml:"net,omitempty"` + IpStack []GuestStackInfo `xml:"ipStack,omitempty"` + Disk []GuestDiskInfo `xml:"disk,omitempty"` + Screen *GuestScreenInfo `xml:"screen,omitempty"` + GuestState string `xml:"guestState"` + AppHeartbeatStatus string `xml:"appHeartbeatStatus,omitempty"` + GuestKernelCrashed *bool `xml:"guestKernelCrashed"` + AppState string `xml:"appState,omitempty"` + GuestOperationsReady *bool `xml:"guestOperationsReady"` + InteractiveGuestOperationsReady *bool `xml:"interactiveGuestOperationsReady"` + GuestStateChangeSupported *bool `xml:"guestStateChangeSupported"` + GenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"generationInfo,omitempty"` +} + +func init() { + t["GuestInfo"] = reflect.TypeOf((*GuestInfo)(nil)).Elem() +} + +type GuestInfoNamespaceGenerationInfo struct { + DynamicData + + Key string `xml:"key"` + GenerationNo int `xml:"generationNo"` +} + +func init() { + t["GuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*GuestInfoNamespaceGenerationInfo)(nil)).Elem() +} + +type GuestListFileInfo struct { + DynamicData + + Files []GuestFileInfo `xml:"files,omitempty"` + Remaining int `xml:"remaining"` +} + +func init() { + t["GuestListFileInfo"] = reflect.TypeOf((*GuestListFileInfo)(nil)).Elem() +} + +type GuestMappedAliases struct { + DynamicData + + Base64Cert string `xml:"base64Cert"` + Username string `xml:"username"` + Subjects []BaseGuestAuthSubject `xml:"subjects,typeattr"` +} + +func init() { + t["GuestMappedAliases"] = reflect.TypeOf((*GuestMappedAliases)(nil)).Elem() +} + +type GuestMultipleMappings struct { + GuestOperationsFault +} + +func init() { + t["GuestMultipleMappings"] = reflect.TypeOf((*GuestMultipleMappings)(nil)).Elem() +} + +type GuestMultipleMappingsFault GuestMultipleMappings + +func init() { + t["GuestMultipleMappingsFault"] = reflect.TypeOf((*GuestMultipleMappingsFault)(nil)).Elem() +} + +type GuestNicInfo struct { + DynamicData + + Network string `xml:"network,omitempty"` + IpAddress []string `xml:"ipAddress,omitempty"` + MacAddress string `xml:"macAddress,omitempty"` + Connected bool `xml:"connected"` + DeviceConfigId int `xml:"deviceConfigId"` + DnsConfig *NetDnsConfigInfo `xml:"dnsConfig,omitempty"` + IpConfig *NetIpConfigInfo `xml:"ipConfig,omitempty"` + NetBIOSConfig BaseNetBIOSConfigInfo `xml:"netBIOSConfig,omitempty,typeattr"` +} + +func init() { + t["GuestNicInfo"] = reflect.TypeOf((*GuestNicInfo)(nil)).Elem() +} + +type GuestOperationsFault struct { + VimFault +} + +func init() { + t["GuestOperationsFault"] = reflect.TypeOf((*GuestOperationsFault)(nil)).Elem() +} + +type GuestOperationsFaultFault BaseGuestOperationsFault + +func init() { + t["GuestOperationsFaultFault"] = reflect.TypeOf((*GuestOperationsFaultFault)(nil)).Elem() +} + +type GuestOperationsUnavailable struct { + GuestOperationsFault +} + +func init() { + t["GuestOperationsUnavailable"] = reflect.TypeOf((*GuestOperationsUnavailable)(nil)).Elem() +} + +type GuestOperationsUnavailableFault GuestOperationsUnavailable + +func init() { + t["GuestOperationsUnavailableFault"] = reflect.TypeOf((*GuestOperationsUnavailableFault)(nil)).Elem() +} + +type GuestOsDescriptor struct { + DynamicData + + Id string `xml:"id"` + Family string `xml:"family"` + FullName string `xml:"fullName"` + SupportedMaxCPUs int `xml:"supportedMaxCPUs"` + NumSupportedPhysicalSockets int `xml:"numSupportedPhysicalSockets,omitempty"` + NumSupportedCoresPerSocket int `xml:"numSupportedCoresPerSocket,omitempty"` + SupportedMinMemMB int `xml:"supportedMinMemMB"` + SupportedMaxMemMB int `xml:"supportedMaxMemMB"` + RecommendedMemMB int `xml:"recommendedMemMB"` + RecommendedColorDepth int `xml:"recommendedColorDepth"` + SupportedDiskControllerList []string `xml:"supportedDiskControllerList"` + RecommendedSCSIController string `xml:"recommendedSCSIController,omitempty"` + RecommendedDiskController string `xml:"recommendedDiskController"` + SupportedNumDisks int `xml:"supportedNumDisks"` + RecommendedDiskSizeMB int `xml:"recommendedDiskSizeMB"` + RecommendedCdromController string `xml:"recommendedCdromController,omitempty"` + SupportedEthernetCard []string `xml:"supportedEthernetCard"` + RecommendedEthernetCard string `xml:"recommendedEthernetCard,omitempty"` + SupportsSlaveDisk *bool `xml:"supportsSlaveDisk"` + CpuFeatureMask []HostCpuIdInfo `xml:"cpuFeatureMask,omitempty"` + SmcRequired *bool `xml:"smcRequired"` + SupportsWakeOnLan bool `xml:"supportsWakeOnLan"` + SupportsVMI *bool `xml:"supportsVMI"` + SupportsMemoryHotAdd *bool `xml:"supportsMemoryHotAdd"` + SupportsCpuHotAdd *bool `xml:"supportsCpuHotAdd"` + SupportsCpuHotRemove *bool `xml:"supportsCpuHotRemove"` + SupportedFirmware []string `xml:"supportedFirmware,omitempty"` + RecommendedFirmware string `xml:"recommendedFirmware,omitempty"` + SupportedUSBControllerList []string `xml:"supportedUSBControllerList,omitempty"` + RecommendedUSBController string `xml:"recommendedUSBController,omitempty"` + Supports3D *bool `xml:"supports3D"` + Recommended3D *bool `xml:"recommended3D"` + SmcRecommended *bool `xml:"smcRecommended"` + Ich7mRecommended *bool `xml:"ich7mRecommended"` + UsbRecommended *bool `xml:"usbRecommended"` + SupportLevel string `xml:"supportLevel,omitempty"` + SupportedForCreate *bool `xml:"supportedForCreate"` + VRAMSizeInKB *IntOption `xml:"vRAMSizeInKB,omitempty"` + NumSupportedFloppyDevices int `xml:"numSupportedFloppyDevices,omitempty"` + WakeOnLanEthernetCard []string `xml:"wakeOnLanEthernetCard,omitempty"` + SupportsPvscsiControllerForBoot *bool `xml:"supportsPvscsiControllerForBoot"` + DiskUuidEnabled *bool `xml:"diskUuidEnabled"` + SupportsHotPlugPCI *bool `xml:"supportsHotPlugPCI"` +} + +func init() { + t["GuestOsDescriptor"] = reflect.TypeOf((*GuestOsDescriptor)(nil)).Elem() +} + +type GuestPermissionDenied struct { + GuestOperationsFault +} + +func init() { + t["GuestPermissionDenied"] = reflect.TypeOf((*GuestPermissionDenied)(nil)).Elem() +} + +type GuestPermissionDeniedFault GuestPermissionDenied + +func init() { + t["GuestPermissionDeniedFault"] = reflect.TypeOf((*GuestPermissionDeniedFault)(nil)).Elem() +} + +type GuestPosixFileAttributes struct { + GuestFileAttributes + + OwnerId int `xml:"ownerId,omitempty"` + GroupId int `xml:"groupId,omitempty"` + Permissions int64 `xml:"permissions,omitempty"` +} + +func init() { + t["GuestPosixFileAttributes"] = reflect.TypeOf((*GuestPosixFileAttributes)(nil)).Elem() +} + +type GuestProcessInfo struct { + DynamicData + + Name string `xml:"name"` + Pid int64 `xml:"pid"` + Owner string `xml:"owner"` + CmdLine string `xml:"cmdLine"` + StartTime time.Time `xml:"startTime"` + EndTime *time.Time `xml:"endTime"` + ExitCode int `xml:"exitCode,omitempty"` +} + +func init() { + t["GuestProcessInfo"] = reflect.TypeOf((*GuestProcessInfo)(nil)).Elem() +} + +type GuestProcessNotFound struct { + GuestOperationsFault + + Pid int64 `xml:"pid"` +} + +func init() { + t["GuestProcessNotFound"] = reflect.TypeOf((*GuestProcessNotFound)(nil)).Elem() +} + +type GuestProcessNotFoundFault GuestProcessNotFound + +func init() { + t["GuestProcessNotFoundFault"] = reflect.TypeOf((*GuestProcessNotFoundFault)(nil)).Elem() +} + +type GuestProgramSpec struct { + DynamicData + + ProgramPath string `xml:"programPath"` + Arguments string `xml:"arguments"` + WorkingDirectory string `xml:"workingDirectory,omitempty"` + EnvVariables []string `xml:"envVariables,omitempty"` +} + +func init() { + t["GuestProgramSpec"] = reflect.TypeOf((*GuestProgramSpec)(nil)).Elem() +} + +type GuestRegKeyNameSpec struct { + DynamicData + + RegistryPath string `xml:"registryPath"` + WowBitness string `xml:"wowBitness"` +} + +func init() { + t["GuestRegKeyNameSpec"] = reflect.TypeOf((*GuestRegKeyNameSpec)(nil)).Elem() +} + +type GuestRegKeyRecordSpec struct { + DynamicData + + Key GuestRegKeySpec `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["GuestRegKeyRecordSpec"] = reflect.TypeOf((*GuestRegKeyRecordSpec)(nil)).Elem() +} + +type GuestRegKeySpec struct { + DynamicData + + KeyName GuestRegKeyNameSpec `xml:"keyName"` + ClassType string `xml:"classType"` + LastWritten time.Time `xml:"lastWritten"` +} + +func init() { + t["GuestRegKeySpec"] = reflect.TypeOf((*GuestRegKeySpec)(nil)).Elem() +} + +type GuestRegValueBinarySpec struct { + GuestRegValueDataSpec + + Value []byte `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueBinarySpec"] = reflect.TypeOf((*GuestRegValueBinarySpec)(nil)).Elem() +} + +type GuestRegValueDataSpec struct { + DynamicData +} + +func init() { + t["GuestRegValueDataSpec"] = reflect.TypeOf((*GuestRegValueDataSpec)(nil)).Elem() +} + +type GuestRegValueDwordSpec struct { + GuestRegValueDataSpec + + Value int `xml:"value"` +} + +func init() { + t["GuestRegValueDwordSpec"] = reflect.TypeOf((*GuestRegValueDwordSpec)(nil)).Elem() +} + +type GuestRegValueExpandStringSpec struct { + GuestRegValueDataSpec + + Value string `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueExpandStringSpec"] = reflect.TypeOf((*GuestRegValueExpandStringSpec)(nil)).Elem() +} + +type GuestRegValueMultiStringSpec struct { + GuestRegValueDataSpec + + Value []string `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueMultiStringSpec"] = reflect.TypeOf((*GuestRegValueMultiStringSpec)(nil)).Elem() +} + +type GuestRegValueNameSpec struct { + DynamicData + + KeyName GuestRegKeyNameSpec `xml:"keyName"` + Name string `xml:"name"` +} + +func init() { + t["GuestRegValueNameSpec"] = reflect.TypeOf((*GuestRegValueNameSpec)(nil)).Elem() +} + +type GuestRegValueQwordSpec struct { + GuestRegValueDataSpec + + Value int64 `xml:"value"` +} + +func init() { + t["GuestRegValueQwordSpec"] = reflect.TypeOf((*GuestRegValueQwordSpec)(nil)).Elem() +} + +type GuestRegValueSpec struct { + DynamicData + + Name GuestRegValueNameSpec `xml:"name"` + Data BaseGuestRegValueDataSpec `xml:"data,typeattr"` +} + +func init() { + t["GuestRegValueSpec"] = reflect.TypeOf((*GuestRegValueSpec)(nil)).Elem() +} + +type GuestRegValueStringSpec struct { + GuestRegValueDataSpec + + Value string `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueStringSpec"] = reflect.TypeOf((*GuestRegValueStringSpec)(nil)).Elem() +} + +type GuestRegistryFault struct { + GuestOperationsFault + + WindowsSystemErrorCode int64 `xml:"windowsSystemErrorCode"` +} + +func init() { + t["GuestRegistryFault"] = reflect.TypeOf((*GuestRegistryFault)(nil)).Elem() +} + +type GuestRegistryFaultFault BaseGuestRegistryFault + +func init() { + t["GuestRegistryFaultFault"] = reflect.TypeOf((*GuestRegistryFaultFault)(nil)).Elem() +} + +type GuestRegistryKeyAlreadyExists struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyAlreadyExists"] = reflect.TypeOf((*GuestRegistryKeyAlreadyExists)(nil)).Elem() +} + +type GuestRegistryKeyAlreadyExistsFault GuestRegistryKeyAlreadyExists + +func init() { + t["GuestRegistryKeyAlreadyExistsFault"] = reflect.TypeOf((*GuestRegistryKeyAlreadyExistsFault)(nil)).Elem() +} + +type GuestRegistryKeyFault struct { + GuestRegistryFault + + KeyName string `xml:"keyName"` +} + +func init() { + t["GuestRegistryKeyFault"] = reflect.TypeOf((*GuestRegistryKeyFault)(nil)).Elem() +} + +type GuestRegistryKeyFaultFault BaseGuestRegistryKeyFault + +func init() { + t["GuestRegistryKeyFaultFault"] = reflect.TypeOf((*GuestRegistryKeyFaultFault)(nil)).Elem() +} + +type GuestRegistryKeyHasSubkeys struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyHasSubkeys"] = reflect.TypeOf((*GuestRegistryKeyHasSubkeys)(nil)).Elem() +} + +type GuestRegistryKeyHasSubkeysFault GuestRegistryKeyHasSubkeys + +func init() { + t["GuestRegistryKeyHasSubkeysFault"] = reflect.TypeOf((*GuestRegistryKeyHasSubkeysFault)(nil)).Elem() +} + +type GuestRegistryKeyInvalid struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyInvalid"] = reflect.TypeOf((*GuestRegistryKeyInvalid)(nil)).Elem() +} + +type GuestRegistryKeyInvalidFault GuestRegistryKeyInvalid + +func init() { + t["GuestRegistryKeyInvalidFault"] = reflect.TypeOf((*GuestRegistryKeyInvalidFault)(nil)).Elem() +} + +type GuestRegistryKeyParentVolatile struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyParentVolatile"] = reflect.TypeOf((*GuestRegistryKeyParentVolatile)(nil)).Elem() +} + +type GuestRegistryKeyParentVolatileFault GuestRegistryKeyParentVolatile + +func init() { + t["GuestRegistryKeyParentVolatileFault"] = reflect.TypeOf((*GuestRegistryKeyParentVolatileFault)(nil)).Elem() +} + +type GuestRegistryValueFault struct { + GuestRegistryFault + + KeyName string `xml:"keyName"` + ValueName string `xml:"valueName"` +} + +func init() { + t["GuestRegistryValueFault"] = reflect.TypeOf((*GuestRegistryValueFault)(nil)).Elem() +} + +type GuestRegistryValueFaultFault BaseGuestRegistryValueFault + +func init() { + t["GuestRegistryValueFaultFault"] = reflect.TypeOf((*GuestRegistryValueFaultFault)(nil)).Elem() +} + +type GuestRegistryValueNotFound struct { + GuestRegistryValueFault +} + +func init() { + t["GuestRegistryValueNotFound"] = reflect.TypeOf((*GuestRegistryValueNotFound)(nil)).Elem() +} + +type GuestRegistryValueNotFoundFault GuestRegistryValueNotFound + +func init() { + t["GuestRegistryValueNotFoundFault"] = reflect.TypeOf((*GuestRegistryValueNotFoundFault)(nil)).Elem() +} + +type GuestScreenInfo struct { + DynamicData + + Width int `xml:"width"` + Height int `xml:"height"` +} + +func init() { + t["GuestScreenInfo"] = reflect.TypeOf((*GuestScreenInfo)(nil)).Elem() +} + +type GuestStackInfo struct { + DynamicData + + DnsConfig *NetDnsConfigInfo `xml:"dnsConfig,omitempty"` + IpRouteConfig *NetIpRouteConfigInfo `xml:"ipRouteConfig,omitempty"` + IpStackConfig []KeyValue `xml:"ipStackConfig,omitempty"` + DhcpConfig *NetDhcpConfigInfo `xml:"dhcpConfig,omitempty"` +} + +func init() { + t["GuestStackInfo"] = reflect.TypeOf((*GuestStackInfo)(nil)).Elem() +} + +type GuestWindowsFileAttributes struct { + GuestFileAttributes + + Hidden *bool `xml:"hidden"` + ReadOnly *bool `xml:"readOnly"` + CreateTime *time.Time `xml:"createTime"` +} + +func init() { + t["GuestWindowsFileAttributes"] = reflect.TypeOf((*GuestWindowsFileAttributes)(nil)).Elem() +} + +type GuestWindowsProgramSpec struct { + GuestProgramSpec + + StartMinimized bool `xml:"startMinimized"` +} + +func init() { + t["GuestWindowsProgramSpec"] = reflect.TypeOf((*GuestWindowsProgramSpec)(nil)).Elem() +} + +type HAErrorsAtDest struct { + MigrationFault +} + +func init() { + t["HAErrorsAtDest"] = reflect.TypeOf((*HAErrorsAtDest)(nil)).Elem() +} + +type HAErrorsAtDestFault HAErrorsAtDest + +func init() { + t["HAErrorsAtDestFault"] = reflect.TypeOf((*HAErrorsAtDestFault)(nil)).Elem() +} + +type HasPrivilegeOnEntities HasPrivilegeOnEntitiesRequestType + +func init() { + t["HasPrivilegeOnEntities"] = reflect.TypeOf((*HasPrivilegeOnEntities)(nil)).Elem() +} + +type HasPrivilegeOnEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` + SessionId string `xml:"sessionId"` + PrivId []string `xml:"privId,omitempty"` +} + +func init() { + t["HasPrivilegeOnEntitiesRequestType"] = reflect.TypeOf((*HasPrivilegeOnEntitiesRequestType)(nil)).Elem() +} + +type HasPrivilegeOnEntitiesResponse struct { + Returnval []EntityPrivilege `xml:"returnval,omitempty"` +} + +type HasPrivilegeOnEntity HasPrivilegeOnEntityRequestType + +func init() { + t["HasPrivilegeOnEntity"] = reflect.TypeOf((*HasPrivilegeOnEntity)(nil)).Elem() +} + +type HasPrivilegeOnEntityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + SessionId string `xml:"sessionId"` + PrivId []string `xml:"privId,omitempty"` +} + +func init() { + t["HasPrivilegeOnEntityRequestType"] = reflect.TypeOf((*HasPrivilegeOnEntityRequestType)(nil)).Elem() +} + +type HasPrivilegeOnEntityResponse struct { + Returnval []bool `xml:"returnval,omitempty"` +} + +type HbrDiskMigrationAction struct { + ClusterAction + + CollectionId string `xml:"collectionId"` + CollectionName string `xml:"collectionName"` + DiskIds []string `xml:"diskIds"` + Source ManagedObjectReference `xml:"source"` + Destination ManagedObjectReference `xml:"destination"` + SizeTransferred int64 `xml:"sizeTransferred"` + SpaceUtilSrcBefore float32 `xml:"spaceUtilSrcBefore,omitempty"` + SpaceUtilDstBefore float32 `xml:"spaceUtilDstBefore,omitempty"` + SpaceUtilSrcAfter float32 `xml:"spaceUtilSrcAfter,omitempty"` + SpaceUtilDstAfter float32 `xml:"spaceUtilDstAfter,omitempty"` + IoLatencySrcBefore float32 `xml:"ioLatencySrcBefore,omitempty"` + IoLatencyDstBefore float32 `xml:"ioLatencyDstBefore,omitempty"` +} + +func init() { + t["HbrDiskMigrationAction"] = reflect.TypeOf((*HbrDiskMigrationAction)(nil)).Elem() +} + +type HbrManagerReplicationVmInfo struct { + DynamicData + + State string `xml:"state"` + ProgressInfo *ReplicationVmProgressInfo `xml:"progressInfo,omitempty"` + ImageId string `xml:"imageId,omitempty"` + LastError *LocalizedMethodFault `xml:"lastError,omitempty"` +} + +func init() { + t["HbrManagerReplicationVmInfo"] = reflect.TypeOf((*HbrManagerReplicationVmInfo)(nil)).Elem() +} + +type HbrManagerVmReplicationCapability struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + SupportedQuiesceMode string `xml:"supportedQuiesceMode"` + CompressionSupported bool `xml:"compressionSupported"` + MaxSupportedSourceDiskCapacity int64 `xml:"maxSupportedSourceDiskCapacity"` + MinRpo int64 `xml:"minRpo,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HbrManagerVmReplicationCapability"] = reflect.TypeOf((*HbrManagerVmReplicationCapability)(nil)).Elem() +} + +type HealthStatusChangedEvent struct { + Event + + ComponentId string `xml:"componentId"` + OldStatus string `xml:"oldStatus"` + NewStatus string `xml:"newStatus"` + ComponentName string `xml:"componentName"` + ServiceId string `xml:"serviceId,omitempty"` +} + +func init() { + t["HealthStatusChangedEvent"] = reflect.TypeOf((*HealthStatusChangedEvent)(nil)).Elem() +} + +type HealthSystemRuntime struct { + DynamicData + + SystemHealthInfo *HostSystemHealthInfo `xml:"systemHealthInfo,omitempty"` + HardwareStatusInfo *HostHardwareStatusInfo `xml:"hardwareStatusInfo,omitempty"` +} + +func init() { + t["HealthSystemRuntime"] = reflect.TypeOf((*HealthSystemRuntime)(nil)).Elem() +} + +type HeterogenousHostsBlockingEVC struct { + EVCConfigFault +} + +func init() { + t["HeterogenousHostsBlockingEVC"] = reflect.TypeOf((*HeterogenousHostsBlockingEVC)(nil)).Elem() +} + +type HeterogenousHostsBlockingEVCFault HeterogenousHostsBlockingEVC + +func init() { + t["HeterogenousHostsBlockingEVCFault"] = reflect.TypeOf((*HeterogenousHostsBlockingEVCFault)(nil)).Elem() +} + +type HostAccessControlEntry struct { + DynamicData + + Principal string `xml:"principal"` + Group bool `xml:"group"` + AccessMode HostAccessMode `xml:"accessMode"` +} + +func init() { + t["HostAccessControlEntry"] = reflect.TypeOf((*HostAccessControlEntry)(nil)).Elem() +} + +type HostAccessRestrictedToManagementServer struct { + NotSupported + + ManagementServer string `xml:"managementServer"` +} + +func init() { + t["HostAccessRestrictedToManagementServer"] = reflect.TypeOf((*HostAccessRestrictedToManagementServer)(nil)).Elem() +} + +type HostAccessRestrictedToManagementServerFault HostAccessRestrictedToManagementServer + +func init() { + t["HostAccessRestrictedToManagementServerFault"] = reflect.TypeOf((*HostAccessRestrictedToManagementServerFault)(nil)).Elem() +} + +type HostAccountSpec struct { + DynamicData + + Id string `xml:"id"` + Password string `xml:"password,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["HostAccountSpec"] = reflect.TypeOf((*HostAccountSpec)(nil)).Elem() +} + +type HostActiveDirectory struct { + DynamicData + + ChangeOperation string `xml:"changeOperation"` + Spec *HostActiveDirectorySpec `xml:"spec,omitempty"` +} + +func init() { + t["HostActiveDirectory"] = reflect.TypeOf((*HostActiveDirectory)(nil)).Elem() +} + +type HostActiveDirectoryInfo struct { + HostDirectoryStoreInfo + + JoinedDomain string `xml:"joinedDomain,omitempty"` + TrustedDomain []string `xml:"trustedDomain,omitempty"` + DomainMembershipStatus string `xml:"domainMembershipStatus,omitempty"` + SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled"` +} + +func init() { + t["HostActiveDirectoryInfo"] = reflect.TypeOf((*HostActiveDirectoryInfo)(nil)).Elem() +} + +type HostActiveDirectorySpec struct { + DynamicData + + DomainName string `xml:"domainName,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + CamServer string `xml:"camServer,omitempty"` + Thumbprint string `xml:"thumbprint,omitempty"` + SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled"` + SmartCardTrustAnchors []string `xml:"smartCardTrustAnchors,omitempty"` +} + +func init() { + t["HostActiveDirectorySpec"] = reflect.TypeOf((*HostActiveDirectorySpec)(nil)).Elem() +} + +type HostAddFailedEvent struct { + HostEvent + + Hostname string `xml:"hostname"` +} + +func init() { + t["HostAddFailedEvent"] = reflect.TypeOf((*HostAddFailedEvent)(nil)).Elem() +} + +type HostAddedEvent struct { + HostEvent +} + +func init() { + t["HostAddedEvent"] = reflect.TypeOf((*HostAddedEvent)(nil)).Elem() +} + +type HostAdminDisableEvent struct { + HostEvent +} + +func init() { + t["HostAdminDisableEvent"] = reflect.TypeOf((*HostAdminDisableEvent)(nil)).Elem() +} + +type HostAdminEnableEvent struct { + HostEvent +} + +func init() { + t["HostAdminEnableEvent"] = reflect.TypeOf((*HostAdminEnableEvent)(nil)).Elem() +} + +type HostApplyProfile struct { + ApplyProfile + + Memory *HostMemoryProfile `xml:"memory,omitempty"` + Storage *StorageProfile `xml:"storage,omitempty"` + Network *NetworkProfile `xml:"network,omitempty"` + Datetime *DateTimeProfile `xml:"datetime,omitempty"` + Firewall *FirewallProfile `xml:"firewall,omitempty"` + Security *SecurityProfile `xml:"security,omitempty"` + Service []ServiceProfile `xml:"service,omitempty"` + Option []OptionProfile `xml:"option,omitempty"` + UserAccount []UserProfile `xml:"userAccount,omitempty"` + UsergroupAccount []UserGroupProfile `xml:"usergroupAccount,omitempty"` + Authentication *AuthenticationProfile `xml:"authentication,omitempty"` +} + +func init() { + t["HostApplyProfile"] = reflect.TypeOf((*HostApplyProfile)(nil)).Elem() +} + +type HostAuthenticationManagerInfo struct { + DynamicData + + AuthConfig []BaseHostAuthenticationStoreInfo `xml:"authConfig,typeattr"` +} + +func init() { + t["HostAuthenticationManagerInfo"] = reflect.TypeOf((*HostAuthenticationManagerInfo)(nil)).Elem() +} + +type HostAuthenticationStoreInfo struct { + DynamicData + + Enabled bool `xml:"enabled"` +} + +func init() { + t["HostAuthenticationStoreInfo"] = reflect.TypeOf((*HostAuthenticationStoreInfo)(nil)).Elem() +} + +type HostAutoStartManagerConfig struct { + DynamicData + + Defaults *AutoStartDefaults `xml:"defaults,omitempty"` + PowerInfo []AutoStartPowerInfo `xml:"powerInfo,omitempty"` +} + +func init() { + t["HostAutoStartManagerConfig"] = reflect.TypeOf((*HostAutoStartManagerConfig)(nil)).Elem() +} + +type HostBIOSInfo struct { + DynamicData + + BiosVersion string `xml:"biosVersion,omitempty"` + ReleaseDate *time.Time `xml:"releaseDate"` +} + +func init() { + t["HostBIOSInfo"] = reflect.TypeOf((*HostBIOSInfo)(nil)).Elem() +} + +type HostBlockAdapterTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostBlockAdapterTargetTransport"] = reflect.TypeOf((*HostBlockAdapterTargetTransport)(nil)).Elem() +} + +type HostBlockHba struct { + HostHostBusAdapter +} + +func init() { + t["HostBlockHba"] = reflect.TypeOf((*HostBlockHba)(nil)).Elem() +} + +type HostBootDevice struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description"` +} + +func init() { + t["HostBootDevice"] = reflect.TypeOf((*HostBootDevice)(nil)).Elem() +} + +type HostBootDeviceInfo struct { + DynamicData + + BootDevices []HostBootDevice `xml:"bootDevices,omitempty"` + CurrentBootDeviceKey string `xml:"currentBootDeviceKey,omitempty"` +} + +func init() { + t["HostBootDeviceInfo"] = reflect.TypeOf((*HostBootDeviceInfo)(nil)).Elem() +} + +type HostCacheConfigurationInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + SwapSize int64 `xml:"swapSize"` +} + +func init() { + t["HostCacheConfigurationInfo"] = reflect.TypeOf((*HostCacheConfigurationInfo)(nil)).Elem() +} + +type HostCacheConfigurationSpec struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + SwapSize int64 `xml:"swapSize"` +} + +func init() { + t["HostCacheConfigurationSpec"] = reflect.TypeOf((*HostCacheConfigurationSpec)(nil)).Elem() +} + +type HostCapability struct { + DynamicData + + RecursiveResourcePoolsSupported bool `xml:"recursiveResourcePoolsSupported"` + CpuMemoryResourceConfigurationSupported bool `xml:"cpuMemoryResourceConfigurationSupported"` + RebootSupported bool `xml:"rebootSupported"` + ShutdownSupported bool `xml:"shutdownSupported"` + VmotionSupported bool `xml:"vmotionSupported"` + StandbySupported bool `xml:"standbySupported"` + IpmiSupported *bool `xml:"ipmiSupported"` + MaxSupportedVMs int `xml:"maxSupportedVMs,omitempty"` + MaxRunningVMs int `xml:"maxRunningVMs,omitempty"` + MaxSupportedVcpus int `xml:"maxSupportedVcpus,omitempty"` + MaxRegisteredVMs int `xml:"maxRegisteredVMs,omitempty"` + DatastorePrincipalSupported bool `xml:"datastorePrincipalSupported"` + SanSupported bool `xml:"sanSupported"` + NfsSupported bool `xml:"nfsSupported"` + IscsiSupported bool `xml:"iscsiSupported"` + VlanTaggingSupported bool `xml:"vlanTaggingSupported"` + NicTeamingSupported bool `xml:"nicTeamingSupported"` + HighGuestMemSupported bool `xml:"highGuestMemSupported"` + MaintenanceModeSupported bool `xml:"maintenanceModeSupported"` + SuspendedRelocateSupported bool `xml:"suspendedRelocateSupported"` + RestrictedSnapshotRelocateSupported bool `xml:"restrictedSnapshotRelocateSupported"` + PerVmSwapFiles bool `xml:"perVmSwapFiles"` + LocalSwapDatastoreSupported bool `xml:"localSwapDatastoreSupported"` + UnsharedSwapVMotionSupported bool `xml:"unsharedSwapVMotionSupported"` + BackgroundSnapshotsSupported bool `xml:"backgroundSnapshotsSupported"` + PreAssignedPCIUnitNumbersSupported bool `xml:"preAssignedPCIUnitNumbersSupported"` + ScreenshotSupported bool `xml:"screenshotSupported"` + ScaledScreenshotSupported bool `xml:"scaledScreenshotSupported"` + StorageVMotionSupported *bool `xml:"storageVMotionSupported"` + VmotionWithStorageVMotionSupported *bool `xml:"vmotionWithStorageVMotionSupported"` + VmotionAcrossNetworkSupported *bool `xml:"vmotionAcrossNetworkSupported"` + MaxNumDisksSVMotion int `xml:"maxNumDisksSVMotion,omitempty"` + HbrNicSelectionSupported *bool `xml:"hbrNicSelectionSupported"` + VrNfcNicSelectionSupported *bool `xml:"vrNfcNicSelectionSupported"` + RecordReplaySupported *bool `xml:"recordReplaySupported"` + FtSupported *bool `xml:"ftSupported"` + ReplayUnsupportedReason string `xml:"replayUnsupportedReason,omitempty"` + ReplayCompatibilityIssues []string `xml:"replayCompatibilityIssues,omitempty"` + SmpFtSupported *bool `xml:"smpFtSupported"` + FtCompatibilityIssues []string `xml:"ftCompatibilityIssues,omitempty"` + SmpFtCompatibilityIssues []string `xml:"smpFtCompatibilityIssues,omitempty"` + MaxVcpusPerFtVm int `xml:"maxVcpusPerFtVm,omitempty"` + LoginBySSLThumbprintSupported *bool `xml:"loginBySSLThumbprintSupported"` + CloneFromSnapshotSupported *bool `xml:"cloneFromSnapshotSupported"` + DeltaDiskBackingsSupported *bool `xml:"deltaDiskBackingsSupported"` + PerVMNetworkTrafficShapingSupported *bool `xml:"perVMNetworkTrafficShapingSupported"` + TpmSupported *bool `xml:"tpmSupported"` + SupportedCpuFeature []HostCpuIdInfo `xml:"supportedCpuFeature,omitempty"` + VirtualExecUsageSupported *bool `xml:"virtualExecUsageSupported"` + StorageIORMSupported *bool `xml:"storageIORMSupported"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported"` + VmDirectPathGen2UnsupportedReason []string `xml:"vmDirectPathGen2UnsupportedReason,omitempty"` + VmDirectPathGen2UnsupportedReasonExtended string `xml:"vmDirectPathGen2UnsupportedReasonExtended,omitempty"` + SupportedVmfsMajorVersion []int `xml:"supportedVmfsMajorVersion,omitempty"` + VStorageCapable *bool `xml:"vStorageCapable"` + SnapshotRelayoutSupported *bool `xml:"snapshotRelayoutSupported"` + FirewallIpRulesSupported *bool `xml:"firewallIpRulesSupported"` + ServicePackageInfoSupported *bool `xml:"servicePackageInfoSupported"` + MaxHostRunningVms int `xml:"maxHostRunningVms,omitempty"` + MaxHostSupportedVcpus int `xml:"maxHostSupportedVcpus,omitempty"` + VmfsDatastoreMountCapable *bool `xml:"vmfsDatastoreMountCapable"` + EightPlusHostVmfsSharedAccessSupported *bool `xml:"eightPlusHostVmfsSharedAccessSupported"` + NestedHVSupported *bool `xml:"nestedHVSupported"` + VPMCSupported *bool `xml:"vPMCSupported"` + InterVMCommunicationThroughVMCISupported *bool `xml:"interVMCommunicationThroughVMCISupported"` + ScheduledHardwareUpgradeSupported *bool `xml:"scheduledHardwareUpgradeSupported"` + FeatureCapabilitiesSupported *bool `xml:"featureCapabilitiesSupported"` + LatencySensitivitySupported *bool `xml:"latencySensitivitySupported"` + StoragePolicySupported *bool `xml:"storagePolicySupported"` + Accel3dSupported *bool `xml:"accel3dSupported"` + ReliableMemoryAware *bool `xml:"reliableMemoryAware"` + MultipleNetworkStackInstanceSupported *bool `xml:"multipleNetworkStackInstanceSupported"` + MessageBusProxySupported *bool `xml:"messageBusProxySupported"` + VsanSupported *bool `xml:"vsanSupported"` + VFlashSupported *bool `xml:"vFlashSupported"` + HostAccessManagerSupported *bool `xml:"hostAccessManagerSupported"` + ProvisioningNicSelectionSupported *bool `xml:"provisioningNicSelectionSupported"` + Nfs41Supported *bool `xml:"nfs41Supported"` + TurnDiskLocatorLedSupported *bool `xml:"turnDiskLocatorLedSupported"` + VirtualVolumeDatastoreSupported *bool `xml:"virtualVolumeDatastoreSupported"` + MarkAsSsdSupported *bool `xml:"markAsSsdSupported"` + MarkAsLocalSupported *bool `xml:"markAsLocalSupported"` + SmartCardAuthenticationSupported *bool `xml:"smartCardAuthenticationSupported"` +} + +func init() { + t["HostCapability"] = reflect.TypeOf((*HostCapability)(nil)).Elem() +} + +type HostCertificateManagerCertificateInfo struct { + DynamicData + + Issuer string `xml:"issuer,omitempty"` + NotBefore *time.Time `xml:"notBefore"` + NotAfter *time.Time `xml:"notAfter"` + Subject string `xml:"subject,omitempty"` + Status string `xml:"status"` +} + +func init() { + t["HostCertificateManagerCertificateInfo"] = reflect.TypeOf((*HostCertificateManagerCertificateInfo)(nil)).Elem() +} + +type HostCnxFailedAccountFailedEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedAccountFailedEvent"] = reflect.TypeOf((*HostCnxFailedAccountFailedEvent)(nil)).Elem() +} + +type HostCnxFailedAlreadyManagedEvent struct { + HostEvent + + ServerName string `xml:"serverName"` +} + +func init() { + t["HostCnxFailedAlreadyManagedEvent"] = reflect.TypeOf((*HostCnxFailedAlreadyManagedEvent)(nil)).Elem() +} + +type HostCnxFailedBadCcagentEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedBadCcagentEvent"] = reflect.TypeOf((*HostCnxFailedBadCcagentEvent)(nil)).Elem() +} + +type HostCnxFailedBadUsernameEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedBadUsernameEvent"] = reflect.TypeOf((*HostCnxFailedBadUsernameEvent)(nil)).Elem() +} + +type HostCnxFailedBadVersionEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedBadVersionEvent"] = reflect.TypeOf((*HostCnxFailedBadVersionEvent)(nil)).Elem() +} + +type HostCnxFailedCcagentUpgradeEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedCcagentUpgradeEvent"] = reflect.TypeOf((*HostCnxFailedCcagentUpgradeEvent)(nil)).Elem() +} + +type HostCnxFailedEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedEvent"] = reflect.TypeOf((*HostCnxFailedEvent)(nil)).Elem() +} + +type HostCnxFailedNetworkErrorEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNetworkErrorEvent"] = reflect.TypeOf((*HostCnxFailedNetworkErrorEvent)(nil)).Elem() +} + +type HostCnxFailedNoAccessEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNoAccessEvent"] = reflect.TypeOf((*HostCnxFailedNoAccessEvent)(nil)).Elem() +} + +type HostCnxFailedNoConnectionEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNoConnectionEvent"] = reflect.TypeOf((*HostCnxFailedNoConnectionEvent)(nil)).Elem() +} + +type HostCnxFailedNoLicenseEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNoLicenseEvent"] = reflect.TypeOf((*HostCnxFailedNoLicenseEvent)(nil)).Elem() +} + +type HostCnxFailedNotFoundEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNotFoundEvent"] = reflect.TypeOf((*HostCnxFailedNotFoundEvent)(nil)).Elem() +} + +type HostCnxFailedTimeoutEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedTimeoutEvent"] = reflect.TypeOf((*HostCnxFailedTimeoutEvent)(nil)).Elem() +} + +type HostCommunication struct { + RuntimeFault +} + +func init() { + t["HostCommunication"] = reflect.TypeOf((*HostCommunication)(nil)).Elem() +} + +type HostCommunicationFault BaseHostCommunication + +func init() { + t["HostCommunicationFault"] = reflect.TypeOf((*HostCommunicationFault)(nil)).Elem() +} + +type HostComplianceCheckedEvent struct { + HostEvent + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["HostComplianceCheckedEvent"] = reflect.TypeOf((*HostComplianceCheckedEvent)(nil)).Elem() +} + +type HostCompliantEvent struct { + HostEvent +} + +func init() { + t["HostCompliantEvent"] = reflect.TypeOf((*HostCompliantEvent)(nil)).Elem() +} + +type HostConfigAppliedEvent struct { + HostEvent +} + +func init() { + t["HostConfigAppliedEvent"] = reflect.TypeOf((*HostConfigAppliedEvent)(nil)).Elem() +} + +type HostConfigChange struct { + DynamicData +} + +func init() { + t["HostConfigChange"] = reflect.TypeOf((*HostConfigChange)(nil)).Elem() +} + +type HostConfigFailed struct { + HostConfigFault + + Failure []LocalizedMethodFault `xml:"failure"` +} + +func init() { + t["HostConfigFailed"] = reflect.TypeOf((*HostConfigFailed)(nil)).Elem() +} + +type HostConfigFailedFault HostConfigFailed + +func init() { + t["HostConfigFailedFault"] = reflect.TypeOf((*HostConfigFailedFault)(nil)).Elem() +} + +type HostConfigFault struct { + VimFault +} + +func init() { + t["HostConfigFault"] = reflect.TypeOf((*HostConfigFault)(nil)).Elem() +} + +type HostConfigFaultFault BaseHostConfigFault + +func init() { + t["HostConfigFaultFault"] = reflect.TypeOf((*HostConfigFaultFault)(nil)).Elem() +} + +type HostConfigInfo struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Product AboutInfo `xml:"product"` + HyperThread *HostHyperThreadScheduleInfo `xml:"hyperThread,omitempty"` + ConsoleReservation *ServiceConsoleReservationInfo `xml:"consoleReservation,omitempty"` + VirtualMachineReservation *VirtualMachineMemoryReservationInfo `xml:"virtualMachineReservation,omitempty"` + StorageDevice *HostStorageDeviceInfo `xml:"storageDevice,omitempty"` + MultipathState *HostMultipathStateInfo `xml:"multipathState,omitempty"` + FileSystemVolume *HostFileSystemVolumeInfo `xml:"fileSystemVolume,omitempty"` + SystemFile []string `xml:"systemFile,omitempty"` + Network *HostNetworkInfo `xml:"network,omitempty"` + Vmotion *HostVMotionInfo `xml:"vmotion,omitempty"` + VirtualNicManagerInfo *HostVirtualNicManagerInfo `xml:"virtualNicManagerInfo,omitempty"` + Capabilities *HostNetCapabilities `xml:"capabilities,omitempty"` + DatastoreCapabilities *HostDatastoreSystemCapabilities `xml:"datastoreCapabilities,omitempty"` + OffloadCapabilities *HostNetOffloadCapabilities `xml:"offloadCapabilities,omitempty"` + Service *HostServiceInfo `xml:"service,omitempty"` + Firewall *HostFirewallInfo `xml:"firewall,omitempty"` + AutoStart *HostAutoStartManagerConfig `xml:"autoStart,omitempty"` + ActiveDiagnosticPartition *HostDiagnosticPartition `xml:"activeDiagnosticPartition,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` + OptionDef []OptionDef `xml:"optionDef,omitempty"` + DatastorePrincipal string `xml:"datastorePrincipal,omitempty"` + LocalSwapDatastore *ManagedObjectReference `xml:"localSwapDatastore,omitempty"` + SystemSwapConfiguration *HostSystemSwapConfiguration `xml:"systemSwapConfiguration,omitempty"` + SystemResources *HostSystemResourceInfo `xml:"systemResources,omitempty"` + DateTimeInfo *HostDateTimeInfo `xml:"dateTimeInfo,omitempty"` + Flags *HostFlagInfo `xml:"flags,omitempty"` + AdminDisabled *bool `xml:"adminDisabled"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty"` + Ipmi *HostIpmiInfo `xml:"ipmi,omitempty"` + SslThumbprintInfo *HostSslThumbprintInfo `xml:"sslThumbprintInfo,omitempty"` + SslThumbprintData []HostSslThumbprintInfo `xml:"sslThumbprintData,omitempty"` + Certificate []byte `xml:"certificate,omitempty"` + PciPassthruInfo []BaseHostPciPassthruInfo `xml:"pciPassthruInfo,omitempty,typeattr"` + AuthenticationManagerInfo *HostAuthenticationManagerInfo `xml:"authenticationManagerInfo,omitempty"` + FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty"` + PowerSystemCapability *PowerSystemCapability `xml:"powerSystemCapability,omitempty"` + PowerSystemInfo *PowerSystemInfo `xml:"powerSystemInfo,omitempty"` + CacheConfigurationInfo []HostCacheConfigurationInfo `xml:"cacheConfigurationInfo,omitempty"` + WakeOnLanCapable *bool `xml:"wakeOnLanCapable"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty"` + MaskedFeatureCapability []HostFeatureCapability `xml:"maskedFeatureCapability,omitempty"` + VFlashConfigInfo *HostVFlashManagerVFlashConfigInfo `xml:"vFlashConfigInfo,omitempty"` + VsanHostConfig *VsanHostConfigInfo `xml:"vsanHostConfig,omitempty"` + DomainList []string `xml:"domainList,omitempty"` + ScriptCheckSum []byte `xml:"scriptCheckSum,omitempty"` + HostConfigCheckSum []byte `xml:"hostConfigCheckSum,omitempty"` + GraphicsInfo []HostGraphicsInfo `xml:"graphicsInfo,omitempty"` + SharedPassthruGpuTypes []string `xml:"sharedPassthruGpuTypes,omitempty"` + IoFilterInfo []HostIoFilterInfo `xml:"ioFilterInfo,omitempty"` +} + +func init() { + t["HostConfigInfo"] = reflect.TypeOf((*HostConfigInfo)(nil)).Elem() +} + +type HostConfigManager struct { + DynamicData + + CpuScheduler *ManagedObjectReference `xml:"cpuScheduler,omitempty"` + DatastoreSystem *ManagedObjectReference `xml:"datastoreSystem,omitempty"` + MemoryManager *ManagedObjectReference `xml:"memoryManager,omitempty"` + StorageSystem *ManagedObjectReference `xml:"storageSystem,omitempty"` + NetworkSystem *ManagedObjectReference `xml:"networkSystem,omitempty"` + VmotionSystem *ManagedObjectReference `xml:"vmotionSystem,omitempty"` + VirtualNicManager *ManagedObjectReference `xml:"virtualNicManager,omitempty"` + ServiceSystem *ManagedObjectReference `xml:"serviceSystem,omitempty"` + FirewallSystem *ManagedObjectReference `xml:"firewallSystem,omitempty"` + AdvancedOption *ManagedObjectReference `xml:"advancedOption,omitempty"` + DiagnosticSystem *ManagedObjectReference `xml:"diagnosticSystem,omitempty"` + AutoStartManager *ManagedObjectReference `xml:"autoStartManager,omitempty"` + SnmpSystem *ManagedObjectReference `xml:"snmpSystem,omitempty"` + DateTimeSystem *ManagedObjectReference `xml:"dateTimeSystem,omitempty"` + PatchManager *ManagedObjectReference `xml:"patchManager,omitempty"` + ImageConfigManager *ManagedObjectReference `xml:"imageConfigManager,omitempty"` + BootDeviceSystem *ManagedObjectReference `xml:"bootDeviceSystem,omitempty"` + FirmwareSystem *ManagedObjectReference `xml:"firmwareSystem,omitempty"` + HealthStatusSystem *ManagedObjectReference `xml:"healthStatusSystem,omitempty"` + PciPassthruSystem *ManagedObjectReference `xml:"pciPassthruSystem,omitempty"` + LicenseManager *ManagedObjectReference `xml:"licenseManager,omitempty"` + KernelModuleSystem *ManagedObjectReference `xml:"kernelModuleSystem,omitempty"` + AuthenticationManager *ManagedObjectReference `xml:"authenticationManager,omitempty"` + PowerSystem *ManagedObjectReference `xml:"powerSystem,omitempty"` + CacheConfigurationManager *ManagedObjectReference `xml:"cacheConfigurationManager,omitempty"` + EsxAgentHostManager *ManagedObjectReference `xml:"esxAgentHostManager,omitempty"` + IscsiManager *ManagedObjectReference `xml:"iscsiManager,omitempty"` + VFlashManager *ManagedObjectReference `xml:"vFlashManager,omitempty"` + VsanSystem *ManagedObjectReference `xml:"vsanSystem,omitempty"` + MessageBusProxy *ManagedObjectReference `xml:"messageBusProxy,omitempty"` + UserDirectory *ManagedObjectReference `xml:"userDirectory,omitempty"` + AccountManager *ManagedObjectReference `xml:"accountManager,omitempty"` + HostAccessManager *ManagedObjectReference `xml:"hostAccessManager,omitempty"` + GraphicsManager *ManagedObjectReference `xml:"graphicsManager,omitempty"` + VsanInternalSystem *ManagedObjectReference `xml:"vsanInternalSystem,omitempty"` + CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty"` +} + +func init() { + t["HostConfigManager"] = reflect.TypeOf((*HostConfigManager)(nil)).Elem() +} + +type HostConfigSpec struct { + DynamicData + + NasDatastore []HostNasVolumeConfig `xml:"nasDatastore,omitempty"` + Network *HostNetworkConfig `xml:"network,omitempty"` + NicTypeSelection []HostVirtualNicManagerNicTypeSelection `xml:"nicTypeSelection,omitempty"` + Service []HostServiceConfig `xml:"service,omitempty"` + Firewall *HostFirewallConfig `xml:"firewall,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` + DatastorePrincipal string `xml:"datastorePrincipal,omitempty"` + DatastorePrincipalPasswd string `xml:"datastorePrincipalPasswd,omitempty"` + Datetime *HostDateTimeConfig `xml:"datetime,omitempty"` + StorageDevice *HostStorageDeviceInfo `xml:"storageDevice,omitempty"` + License *HostLicenseSpec `xml:"license,omitempty"` + Security *HostSecuritySpec `xml:"security,omitempty"` + UserAccount []BaseHostAccountSpec `xml:"userAccount,omitempty,typeattr"` + UsergroupAccount []BaseHostAccountSpec `xml:"usergroupAccount,omitempty,typeattr"` + Memory *HostMemorySpec `xml:"memory,omitempty"` + ActiveDirectory []HostActiveDirectory `xml:"activeDirectory,omitempty"` + GenericConfig []KeyAnyValue `xml:"genericConfig,omitempty"` +} + +func init() { + t["HostConfigSpec"] = reflect.TypeOf((*HostConfigSpec)(nil)).Elem() +} + +type HostConfigSummary struct { + DynamicData + + Name string `xml:"name"` + Port int `xml:"port"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + Product *AboutInfo `xml:"product,omitempty"` + VmotionEnabled bool `xml:"vmotionEnabled"` + FaultToleranceEnabled *bool `xml:"faultToleranceEnabled"` + FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty"` + AgentVmDatastore *ManagedObjectReference `xml:"agentVmDatastore,omitempty"` + AgentVmNetwork *ManagedObjectReference `xml:"agentVmNetwork,omitempty"` +} + +func init() { + t["HostConfigSummary"] = reflect.TypeOf((*HostConfigSummary)(nil)).Elem() +} + +type HostConfigVFlashCache HostConfigVFlashCacheRequestType + +func init() { + t["HostConfigVFlashCache"] = reflect.TypeOf((*HostConfigVFlashCache)(nil)).Elem() +} + +type HostConfigVFlashCacheRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostVFlashManagerVFlashCacheConfigSpec `xml:"spec"` +} + +func init() { + t["HostConfigVFlashCacheRequestType"] = reflect.TypeOf((*HostConfigVFlashCacheRequestType)(nil)).Elem() +} + +type HostConfigVFlashCacheResponse struct { +} + +type HostConfigureVFlashResource HostConfigureVFlashResourceRequestType + +func init() { + t["HostConfigureVFlashResource"] = reflect.TypeOf((*HostConfigureVFlashResource)(nil)).Elem() +} + +type HostConfigureVFlashResourceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostVFlashManagerVFlashResourceConfigSpec `xml:"spec"` +} + +func init() { + t["HostConfigureVFlashResourceRequestType"] = reflect.TypeOf((*HostConfigureVFlashResourceRequestType)(nil)).Elem() +} + +type HostConfigureVFlashResourceResponse struct { +} + +type HostConnectFault struct { + VimFault +} + +func init() { + t["HostConnectFault"] = reflect.TypeOf((*HostConnectFault)(nil)).Elem() +} + +type HostConnectFaultFault BaseHostConnectFault + +func init() { + t["HostConnectFaultFault"] = reflect.TypeOf((*HostConnectFaultFault)(nil)).Elem() +} + +type HostConnectInfo struct { + DynamicData + + ServerIp string `xml:"serverIp,omitempty"` + InDasCluster *bool `xml:"inDasCluster"` + Host HostListSummary `xml:"host"` + Vm []VirtualMachineSummary `xml:"vm,omitempty"` + VimAccountNameRequired *bool `xml:"vimAccountNameRequired"` + ClusterSupported *bool `xml:"clusterSupported"` + Network []BaseHostConnectInfoNetworkInfo `xml:"network,omitempty,typeattr"` + Datastore []BaseHostDatastoreConnectInfo `xml:"datastore,omitempty,typeattr"` + License *HostLicenseConnectInfo `xml:"license,omitempty"` + Capability *HostCapability `xml:"capability,omitempty"` +} + +func init() { + t["HostConnectInfo"] = reflect.TypeOf((*HostConnectInfo)(nil)).Elem() +} + +type HostConnectInfoNetworkInfo struct { + DynamicData + + Summary BaseNetworkSummary `xml:"summary,typeattr"` +} + +func init() { + t["HostConnectInfoNetworkInfo"] = reflect.TypeOf((*HostConnectInfoNetworkInfo)(nil)).Elem() +} + +type HostConnectSpec struct { + DynamicData + + HostName string `xml:"hostName,omitempty"` + Port int `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + VmFolder *ManagedObjectReference `xml:"vmFolder,omitempty"` + Force bool `xml:"force"` + VimAccountName string `xml:"vimAccountName,omitempty"` + VimAccountPassword string `xml:"vimAccountPassword,omitempty"` + ManagementIp string `xml:"managementIp,omitempty"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty"` + HostGateway *HostGatewaySpec `xml:"hostGateway,omitempty"` +} + +func init() { + t["HostConnectSpec"] = reflect.TypeOf((*HostConnectSpec)(nil)).Elem() +} + +type HostConnectedEvent struct { + HostEvent +} + +func init() { + t["HostConnectedEvent"] = reflect.TypeOf((*HostConnectedEvent)(nil)).Elem() +} + +type HostConnectionLostEvent struct { + HostEvent +} + +func init() { + t["HostConnectionLostEvent"] = reflect.TypeOf((*HostConnectionLostEvent)(nil)).Elem() +} + +type HostCpuIdInfo struct { + DynamicData + + Level int `xml:"level"` + Vendor string `xml:"vendor,omitempty"` + Eax string `xml:"eax,omitempty"` + Ebx string `xml:"ebx,omitempty"` + Ecx string `xml:"ecx,omitempty"` + Edx string `xml:"edx,omitempty"` +} + +func init() { + t["HostCpuIdInfo"] = reflect.TypeOf((*HostCpuIdInfo)(nil)).Elem() +} + +type HostCpuInfo struct { + DynamicData + + NumCpuPackages int16 `xml:"numCpuPackages"` + NumCpuCores int16 `xml:"numCpuCores"` + NumCpuThreads int16 `xml:"numCpuThreads"` + Hz int64 `xml:"hz"` +} + +func init() { + t["HostCpuInfo"] = reflect.TypeOf((*HostCpuInfo)(nil)).Elem() +} + +type HostCpuPackage struct { + DynamicData + + Index int16 `xml:"index"` + Vendor string `xml:"vendor"` + Hz int64 `xml:"hz"` + BusHz int64 `xml:"busHz"` + Description string `xml:"description"` + ThreadId []int16 `xml:"threadId"` + CpuFeature []HostCpuIdInfo `xml:"cpuFeature,omitempty"` +} + +func init() { + t["HostCpuPackage"] = reflect.TypeOf((*HostCpuPackage)(nil)).Elem() +} + +type HostCpuPowerManagementInfo struct { + DynamicData + + CurrentPolicy string `xml:"currentPolicy,omitempty"` + HardwareSupport string `xml:"hardwareSupport,omitempty"` +} + +func init() { + t["HostCpuPowerManagementInfo"] = reflect.TypeOf((*HostCpuPowerManagementInfo)(nil)).Elem() +} + +type HostDasDisabledEvent struct { + HostEvent +} + +func init() { + t["HostDasDisabledEvent"] = reflect.TypeOf((*HostDasDisabledEvent)(nil)).Elem() +} + +type HostDasDisablingEvent struct { + HostEvent +} + +func init() { + t["HostDasDisablingEvent"] = reflect.TypeOf((*HostDasDisablingEvent)(nil)).Elem() +} + +type HostDasEnabledEvent struct { + HostEvent +} + +func init() { + t["HostDasEnabledEvent"] = reflect.TypeOf((*HostDasEnabledEvent)(nil)).Elem() +} + +type HostDasEnablingEvent struct { + HostEvent +} + +func init() { + t["HostDasEnablingEvent"] = reflect.TypeOf((*HostDasEnablingEvent)(nil)).Elem() +} + +type HostDasErrorEvent struct { + HostEvent + + Message string `xml:"message,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostDasErrorEvent"] = reflect.TypeOf((*HostDasErrorEvent)(nil)).Elem() +} + +type HostDasEvent struct { + HostEvent +} + +func init() { + t["HostDasEvent"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem() +} + +type HostDasOkEvent struct { + HostEvent +} + +func init() { + t["HostDasOkEvent"] = reflect.TypeOf((*HostDasOkEvent)(nil)).Elem() +} + +type HostDatastoreBrowserSearchResults struct { + DynamicData + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + FolderPath string `xml:"folderPath,omitempty"` + File []BaseFileInfo `xml:"file,omitempty,typeattr"` +} + +func init() { + t["HostDatastoreBrowserSearchResults"] = reflect.TypeOf((*HostDatastoreBrowserSearchResults)(nil)).Elem() +} + +type HostDatastoreBrowserSearchSpec struct { + DynamicData + + Query []BaseFileQuery `xml:"query,omitempty,typeattr"` + Details *FileQueryFlags `xml:"details,omitempty"` + SearchCaseInsensitive *bool `xml:"searchCaseInsensitive"` + MatchPattern []string `xml:"matchPattern,omitempty"` + SortFoldersFirst *bool `xml:"sortFoldersFirst"` +} + +func init() { + t["HostDatastoreBrowserSearchSpec"] = reflect.TypeOf((*HostDatastoreBrowserSearchSpec)(nil)).Elem() +} + +type HostDatastoreConnectInfo struct { + DynamicData + + Summary DatastoreSummary `xml:"summary"` +} + +func init() { + t["HostDatastoreConnectInfo"] = reflect.TypeOf((*HostDatastoreConnectInfo)(nil)).Elem() +} + +type HostDatastoreExistsConnectInfo struct { + HostDatastoreConnectInfo + + NewDatastoreName string `xml:"newDatastoreName"` +} + +func init() { + t["HostDatastoreExistsConnectInfo"] = reflect.TypeOf((*HostDatastoreExistsConnectInfo)(nil)).Elem() +} + +type HostDatastoreNameConflictConnectInfo struct { + HostDatastoreConnectInfo + + NewDatastoreName string `xml:"newDatastoreName"` +} + +func init() { + t["HostDatastoreNameConflictConnectInfo"] = reflect.TypeOf((*HostDatastoreNameConflictConnectInfo)(nil)).Elem() +} + +type HostDatastoreSystemCapabilities struct { + DynamicData + + NfsMountCreationRequired bool `xml:"nfsMountCreationRequired"` + NfsMountCreationSupported bool `xml:"nfsMountCreationSupported"` + LocalDatastoreSupported bool `xml:"localDatastoreSupported"` + VmfsExtentExpansionSupported *bool `xml:"vmfsExtentExpansionSupported"` +} + +func init() { + t["HostDatastoreSystemCapabilities"] = reflect.TypeOf((*HostDatastoreSystemCapabilities)(nil)).Elem() +} + +type HostDatastoreSystemDatastoreResult struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostDatastoreSystemDatastoreResult"] = reflect.TypeOf((*HostDatastoreSystemDatastoreResult)(nil)).Elem() +} + +type HostDatastoreSystemVvolDatastoreSpec struct { + DynamicData + + Name string `xml:"name"` + ScId string `xml:"scId"` +} + +func init() { + t["HostDatastoreSystemVvolDatastoreSpec"] = reflect.TypeOf((*HostDatastoreSystemVvolDatastoreSpec)(nil)).Elem() +} + +type HostDateTimeConfig struct { + DynamicData + + TimeZone string `xml:"timeZone,omitempty"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` +} + +func init() { + t["HostDateTimeConfig"] = reflect.TypeOf((*HostDateTimeConfig)(nil)).Elem() +} + +type HostDateTimeInfo struct { + DynamicData + + TimeZone HostDateTimeSystemTimeZone `xml:"timeZone"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` +} + +func init() { + t["HostDateTimeInfo"] = reflect.TypeOf((*HostDateTimeInfo)(nil)).Elem() +} + +type HostDateTimeSystemTimeZone struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + Description string `xml:"description"` + GmtOffset int `xml:"gmtOffset"` +} + +func init() { + t["HostDateTimeSystemTimeZone"] = reflect.TypeOf((*HostDateTimeSystemTimeZone)(nil)).Elem() +} + +type HostDevice struct { + DynamicData + + DeviceName string `xml:"deviceName"` + DeviceType string `xml:"deviceType"` +} + +func init() { + t["HostDevice"] = reflect.TypeOf((*HostDevice)(nil)).Elem() +} + +type HostDhcpService struct { + DynamicData + + Key string `xml:"key"` + Spec HostDhcpServiceSpec `xml:"spec"` +} + +func init() { + t["HostDhcpService"] = reflect.TypeOf((*HostDhcpService)(nil)).Elem() +} + +type HostDhcpServiceConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Key string `xml:"key"` + Spec HostDhcpServiceSpec `xml:"spec"` +} + +func init() { + t["HostDhcpServiceConfig"] = reflect.TypeOf((*HostDhcpServiceConfig)(nil)).Elem() +} + +type HostDhcpServiceSpec struct { + DynamicData + + VirtualSwitch string `xml:"virtualSwitch"` + DefaultLeaseDuration int `xml:"defaultLeaseDuration"` + LeaseBeginIp string `xml:"leaseBeginIp"` + LeaseEndIp string `xml:"leaseEndIp"` + MaxLeaseDuration int `xml:"maxLeaseDuration"` + UnlimitedLease bool `xml:"unlimitedLease"` + IpSubnetAddr string `xml:"ipSubnetAddr"` + IpSubnetMask string `xml:"ipSubnetMask"` +} + +func init() { + t["HostDhcpServiceSpec"] = reflect.TypeOf((*HostDhcpServiceSpec)(nil)).Elem() +} + +type HostDiagnosticPartition struct { + DynamicData + + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` + Slots int `xml:"slots"` + Id HostScsiDiskPartition `xml:"id"` +} + +func init() { + t["HostDiagnosticPartition"] = reflect.TypeOf((*HostDiagnosticPartition)(nil)).Elem() +} + +type HostDiagnosticPartitionCreateDescription struct { + DynamicData + + Layout HostDiskPartitionLayout `xml:"layout"` + DiskUuid string `xml:"diskUuid"` + Spec HostDiagnosticPartitionCreateSpec `xml:"spec"` +} + +func init() { + t["HostDiagnosticPartitionCreateDescription"] = reflect.TypeOf((*HostDiagnosticPartitionCreateDescription)(nil)).Elem() +} + +type HostDiagnosticPartitionCreateOption struct { + DynamicData + + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` + Disk HostScsiDisk `xml:"disk"` +} + +func init() { + t["HostDiagnosticPartitionCreateOption"] = reflect.TypeOf((*HostDiagnosticPartitionCreateOption)(nil)).Elem() +} + +type HostDiagnosticPartitionCreateSpec struct { + DynamicData + + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` + Id HostScsiDiskPartition `xml:"id"` + Partition HostDiskPartitionSpec `xml:"partition"` + Active *bool `xml:"active"` +} + +func init() { + t["HostDiagnosticPartitionCreateSpec"] = reflect.TypeOf((*HostDiagnosticPartitionCreateSpec)(nil)).Elem() +} + +type HostDigestInfo struct { + DynamicData + + DigestMethod string `xml:"digestMethod"` + DigestValue []byte `xml:"digestValue"` + ObjectName string `xml:"objectName,omitempty"` +} + +func init() { + t["HostDigestInfo"] = reflect.TypeOf((*HostDigestInfo)(nil)).Elem() +} + +type HostDirectoryStoreInfo struct { + HostAuthenticationStoreInfo +} + +func init() { + t["HostDirectoryStoreInfo"] = reflect.TypeOf((*HostDirectoryStoreInfo)(nil)).Elem() +} + +type HostDisconnectedEvent struct { + HostEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostDisconnectedEvent"] = reflect.TypeOf((*HostDisconnectedEvent)(nil)).Elem() +} + +type HostDiskConfigurationResult struct { + DynamicData + + DevicePath string `xml:"devicePath,omitempty"` + Success *bool `xml:"success"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostDiskConfigurationResult"] = reflect.TypeOf((*HostDiskConfigurationResult)(nil)).Elem() +} + +type HostDiskDimensions struct { + DynamicData +} + +func init() { + t["HostDiskDimensions"] = reflect.TypeOf((*HostDiskDimensions)(nil)).Elem() +} + +type HostDiskDimensionsChs struct { + DynamicData + + Cylinder int64 `xml:"cylinder"` + Head int `xml:"head"` + Sector int `xml:"sector"` +} + +func init() { + t["HostDiskDimensionsChs"] = reflect.TypeOf((*HostDiskDimensionsChs)(nil)).Elem() +} + +type HostDiskDimensionsLba struct { + DynamicData + + BlockSize int `xml:"blockSize"` + Block int64 `xml:"block"` +} + +func init() { + t["HostDiskDimensionsLba"] = reflect.TypeOf((*HostDiskDimensionsLba)(nil)).Elem() +} + +type HostDiskMappingInfo struct { + DynamicData + + PhysicalPartition *HostDiskMappingPartitionInfo `xml:"physicalPartition,omitempty"` + Name string `xml:"name"` + Exclusive *bool `xml:"exclusive"` +} + +func init() { + t["HostDiskMappingInfo"] = reflect.TypeOf((*HostDiskMappingInfo)(nil)).Elem() +} + +type HostDiskMappingOption struct { + DynamicData + + PhysicalPartition []HostDiskMappingPartitionOption `xml:"physicalPartition,omitempty"` + Name string `xml:"name"` +} + +func init() { + t["HostDiskMappingOption"] = reflect.TypeOf((*HostDiskMappingOption)(nil)).Elem() +} + +type HostDiskMappingPartitionInfo struct { + DynamicData + + Name string `xml:"name"` + FileSystem string `xml:"fileSystem"` + CapacityInKb int64 `xml:"capacityInKb"` +} + +func init() { + t["HostDiskMappingPartitionInfo"] = reflect.TypeOf((*HostDiskMappingPartitionInfo)(nil)).Elem() +} + +type HostDiskMappingPartitionOption struct { + DynamicData + + Name string `xml:"name"` + FileSystem string `xml:"fileSystem"` + CapacityInKb int64 `xml:"capacityInKb"` +} + +func init() { + t["HostDiskMappingPartitionOption"] = reflect.TypeOf((*HostDiskMappingPartitionOption)(nil)).Elem() +} + +type HostDiskPartitionAttributes struct { + DynamicData + + Partition int `xml:"partition"` + StartSector int64 `xml:"startSector"` + EndSector int64 `xml:"endSector"` + Type string `xml:"type"` + Guid string `xml:"guid,omitempty"` + Logical bool `xml:"logical"` + Attributes byte `xml:"attributes"` + PartitionAlignment int64 `xml:"partitionAlignment,omitempty"` +} + +func init() { + t["HostDiskPartitionAttributes"] = reflect.TypeOf((*HostDiskPartitionAttributes)(nil)).Elem() +} + +type HostDiskPartitionBlockRange struct { + DynamicData + + Partition int `xml:"partition,omitempty"` + Type string `xml:"type"` + Start HostDiskDimensionsLba `xml:"start"` + End HostDiskDimensionsLba `xml:"end"` +} + +func init() { + t["HostDiskPartitionBlockRange"] = reflect.TypeOf((*HostDiskPartitionBlockRange)(nil)).Elem() +} + +type HostDiskPartitionInfo struct { + DynamicData + + DeviceName string `xml:"deviceName"` + Spec HostDiskPartitionSpec `xml:"spec"` + Layout HostDiskPartitionLayout `xml:"layout"` +} + +func init() { + t["HostDiskPartitionInfo"] = reflect.TypeOf((*HostDiskPartitionInfo)(nil)).Elem() +} + +type HostDiskPartitionLayout struct { + DynamicData + + Total *HostDiskDimensionsLba `xml:"total,omitempty"` + Partition []HostDiskPartitionBlockRange `xml:"partition"` +} + +func init() { + t["HostDiskPartitionLayout"] = reflect.TypeOf((*HostDiskPartitionLayout)(nil)).Elem() +} + +type HostDiskPartitionSpec struct { + DynamicData + + PartitionFormat string `xml:"partitionFormat,omitempty"` + Chs *HostDiskDimensionsChs `xml:"chs,omitempty"` + TotalSectors int64 `xml:"totalSectors,omitempty"` + Partition []HostDiskPartitionAttributes `xml:"partition,omitempty"` +} + +func init() { + t["HostDiskPartitionSpec"] = reflect.TypeOf((*HostDiskPartitionSpec)(nil)).Elem() +} + +type HostDnsConfig struct { + DynamicData + + Dhcp bool `xml:"dhcp"` + VirtualNicDevice string `xml:"virtualNicDevice,omitempty"` + HostName string `xml:"hostName"` + DomainName string `xml:"domainName"` + Address []string `xml:"address,omitempty"` + SearchDomain []string `xml:"searchDomain,omitempty"` +} + +func init() { + t["HostDnsConfig"] = reflect.TypeOf((*HostDnsConfig)(nil)).Elem() +} + +type HostDnsConfigSpec struct { + HostDnsConfig + + VirtualNicConnection *HostVirtualNicConnection `xml:"virtualNicConnection,omitempty"` +} + +func init() { + t["HostDnsConfigSpec"] = reflect.TypeOf((*HostDnsConfigSpec)(nil)).Elem() +} + +type HostEnableAdminFailedEvent struct { + HostEvent + + Permissions []Permission `xml:"permissions"` +} + +func init() { + t["HostEnableAdminFailedEvent"] = reflect.TypeOf((*HostEnableAdminFailedEvent)(nil)).Elem() +} + +type HostEsxAgentHostManagerConfigInfo struct { + DynamicData + + AgentVmDatastore *ManagedObjectReference `xml:"agentVmDatastore,omitempty"` + AgentVmNetwork *ManagedObjectReference `xml:"agentVmNetwork,omitempty"` +} + +func init() { + t["HostEsxAgentHostManagerConfigInfo"] = reflect.TypeOf((*HostEsxAgentHostManagerConfigInfo)(nil)).Elem() +} + +type HostEvent struct { + Event +} + +func init() { + t["HostEvent"] = reflect.TypeOf((*HostEvent)(nil)).Elem() +} + +type HostEventArgument struct { + EntityEventArgument + + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["HostEventArgument"] = reflect.TypeOf((*HostEventArgument)(nil)).Elem() +} + +type HostExtraNetworksEvent struct { + HostDasEvent + + Ips string `xml:"ips,omitempty"` +} + +func init() { + t["HostExtraNetworksEvent"] = reflect.TypeOf((*HostExtraNetworksEvent)(nil)).Elem() +} + +type HostFaultToleranceManagerComponentHealthInfo struct { + DynamicData + + IsStorageHealthy bool `xml:"isStorageHealthy"` + IsNetworkHealthy bool `xml:"isNetworkHealthy"` +} + +func init() { + t["HostFaultToleranceManagerComponentHealthInfo"] = reflect.TypeOf((*HostFaultToleranceManagerComponentHealthInfo)(nil)).Elem() +} + +type HostFeatureCapability struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + Value string `xml:"value"` +} + +func init() { + t["HostFeatureCapability"] = reflect.TypeOf((*HostFeatureCapability)(nil)).Elem() +} + +type HostFeatureMask struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + Value string `xml:"value"` +} + +func init() { + t["HostFeatureMask"] = reflect.TypeOf((*HostFeatureMask)(nil)).Elem() +} + +type HostFeatureVersionInfo struct { + DynamicData + + Key string `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["HostFeatureVersionInfo"] = reflect.TypeOf((*HostFeatureVersionInfo)(nil)).Elem() +} + +type HostFibreChannelHba struct { + HostHostBusAdapter + + PortWorldWideName int64 `xml:"portWorldWideName"` + NodeWorldWideName int64 `xml:"nodeWorldWideName"` + PortType FibreChannelPortType `xml:"portType"` + Speed int64 `xml:"speed"` +} + +func init() { + t["HostFibreChannelHba"] = reflect.TypeOf((*HostFibreChannelHba)(nil)).Elem() +} + +type HostFibreChannelOverEthernetHba struct { + HostFibreChannelHba + + UnderlyingNic string `xml:"underlyingNic"` + LinkInfo HostFibreChannelOverEthernetHbaLinkInfo `xml:"linkInfo"` + IsSoftwareFcoe bool `xml:"isSoftwareFcoe"` + MarkedForRemoval bool `xml:"markedForRemoval"` +} + +func init() { + t["HostFibreChannelOverEthernetHba"] = reflect.TypeOf((*HostFibreChannelOverEthernetHba)(nil)).Elem() +} + +type HostFibreChannelOverEthernetHbaLinkInfo struct { + DynamicData + + VnportMac string `xml:"vnportMac"` + FcfMac string `xml:"fcfMac"` + VlanId int `xml:"vlanId"` +} + +func init() { + t["HostFibreChannelOverEthernetHbaLinkInfo"] = reflect.TypeOf((*HostFibreChannelOverEthernetHbaLinkInfo)(nil)).Elem() +} + +type HostFibreChannelOverEthernetTargetTransport struct { + HostFibreChannelTargetTransport + + VnportMac string `xml:"vnportMac"` + FcfMac string `xml:"fcfMac"` + VlanId int `xml:"vlanId"` +} + +func init() { + t["HostFibreChannelOverEthernetTargetTransport"] = reflect.TypeOf((*HostFibreChannelOverEthernetTargetTransport)(nil)).Elem() +} + +type HostFibreChannelTargetTransport struct { + HostTargetTransport + + PortWorldWideName int64 `xml:"portWorldWideName"` + NodeWorldWideName int64 `xml:"nodeWorldWideName"` +} + +func init() { + t["HostFibreChannelTargetTransport"] = reflect.TypeOf((*HostFibreChannelTargetTransport)(nil)).Elem() +} + +type HostFileAccess struct { + DynamicData + + Who string `xml:"who"` + What string `xml:"what"` +} + +func init() { + t["HostFileAccess"] = reflect.TypeOf((*HostFileAccess)(nil)).Elem() +} + +type HostFileSystemMountInfo struct { + DynamicData + + MountInfo HostMountInfo `xml:"mountInfo"` + Volume BaseHostFileSystemVolume `xml:"volume,typeattr"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` +} + +func init() { + t["HostFileSystemMountInfo"] = reflect.TypeOf((*HostFileSystemMountInfo)(nil)).Elem() +} + +type HostFileSystemVolume struct { + DynamicData + + Type string `xml:"type"` + Name string `xml:"name"` + Capacity int64 `xml:"capacity"` +} + +func init() { + t["HostFileSystemVolume"] = reflect.TypeOf((*HostFileSystemVolume)(nil)).Elem() +} + +type HostFileSystemVolumeInfo struct { + DynamicData + + VolumeTypeList []string `xml:"volumeTypeList,omitempty"` + MountInfo []HostFileSystemMountInfo `xml:"mountInfo,omitempty"` +} + +func init() { + t["HostFileSystemVolumeInfo"] = reflect.TypeOf((*HostFileSystemVolumeInfo)(nil)).Elem() +} + +type HostFirewallConfig struct { + DynamicData + + Rule []HostFirewallConfigRuleSetConfig `xml:"rule,omitempty"` + DefaultBlockingPolicy HostFirewallDefaultPolicy `xml:"defaultBlockingPolicy"` +} + +func init() { + t["HostFirewallConfig"] = reflect.TypeOf((*HostFirewallConfig)(nil)).Elem() +} + +type HostFirewallConfigRuleSetConfig struct { + DynamicData + + RulesetId string `xml:"rulesetId"` + Enabled bool `xml:"enabled"` + AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty"` +} + +func init() { + t["HostFirewallConfigRuleSetConfig"] = reflect.TypeOf((*HostFirewallConfigRuleSetConfig)(nil)).Elem() +} + +type HostFirewallDefaultPolicy struct { + DynamicData + + IncomingBlocked *bool `xml:"incomingBlocked"` + OutgoingBlocked *bool `xml:"outgoingBlocked"` +} + +func init() { + t["HostFirewallDefaultPolicy"] = reflect.TypeOf((*HostFirewallDefaultPolicy)(nil)).Elem() +} + +type HostFirewallInfo struct { + DynamicData + + DefaultPolicy HostFirewallDefaultPolicy `xml:"defaultPolicy"` + Ruleset []HostFirewallRuleset `xml:"ruleset,omitempty"` +} + +func init() { + t["HostFirewallInfo"] = reflect.TypeOf((*HostFirewallInfo)(nil)).Elem() +} + +type HostFirewallRule struct { + DynamicData + + Port int `xml:"port"` + EndPort int `xml:"endPort,omitempty"` + Direction HostFirewallRuleDirection `xml:"direction"` + PortType HostFirewallRulePortType `xml:"portType,omitempty"` + Protocol string `xml:"protocol"` +} + +func init() { + t["HostFirewallRule"] = reflect.TypeOf((*HostFirewallRule)(nil)).Elem() +} + +type HostFirewallRuleset struct { + DynamicData + + Key string `xml:"key"` + Label string `xml:"label"` + Required bool `xml:"required"` + Rule []HostFirewallRule `xml:"rule"` + Service string `xml:"service,omitempty"` + Enabled bool `xml:"enabled"` + AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty"` +} + +func init() { + t["HostFirewallRuleset"] = reflect.TypeOf((*HostFirewallRuleset)(nil)).Elem() +} + +type HostFirewallRulesetIpList struct { + DynamicData + + IpAddress []string `xml:"ipAddress,omitempty"` + IpNetwork []HostFirewallRulesetIpNetwork `xml:"ipNetwork,omitempty"` + AllIp bool `xml:"allIp"` +} + +func init() { + t["HostFirewallRulesetIpList"] = reflect.TypeOf((*HostFirewallRulesetIpList)(nil)).Elem() +} + +type HostFirewallRulesetIpNetwork struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int `xml:"prefixLength"` +} + +func init() { + t["HostFirewallRulesetIpNetwork"] = reflect.TypeOf((*HostFirewallRulesetIpNetwork)(nil)).Elem() +} + +type HostFirewallRulesetRulesetSpec struct { + DynamicData + + AllowedHosts HostFirewallRulesetIpList `xml:"allowedHosts"` +} + +func init() { + t["HostFirewallRulesetRulesetSpec"] = reflect.TypeOf((*HostFirewallRulesetRulesetSpec)(nil)).Elem() +} + +type HostFlagInfo struct { + DynamicData + + BackgroundSnapshotsEnabled *bool `xml:"backgroundSnapshotsEnabled"` +} + +func init() { + t["HostFlagInfo"] = reflect.TypeOf((*HostFlagInfo)(nil)).Elem() +} + +type HostForceMountedInfo struct { + DynamicData + + Persist bool `xml:"persist"` + Mounted bool `xml:"mounted"` +} + +func init() { + t["HostForceMountedInfo"] = reflect.TypeOf((*HostForceMountedInfo)(nil)).Elem() +} + +type HostGatewaySpec struct { + DynamicData + + GatewayType string `xml:"gatewayType"` + GatewayId string `xml:"gatewayId,omitempty"` + TrustVerificationToken string `xml:"trustVerificationToken,omitempty"` + HostAuthParams []KeyValue `xml:"hostAuthParams,omitempty"` +} + +func init() { + t["HostGatewaySpec"] = reflect.TypeOf((*HostGatewaySpec)(nil)).Elem() +} + +type HostGetShortNameFailedEvent struct { + HostEvent +} + +func init() { + t["HostGetShortNameFailedEvent"] = reflect.TypeOf((*HostGetShortNameFailedEvent)(nil)).Elem() +} + +type HostGetVFlashModuleDefaultConfig HostGetVFlashModuleDefaultConfigRequestType + +func init() { + t["HostGetVFlashModuleDefaultConfig"] = reflect.TypeOf((*HostGetVFlashModuleDefaultConfig)(nil)).Elem() +} + +type HostGetVFlashModuleDefaultConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + VFlashModule string `xml:"vFlashModule"` +} + +func init() { + t["HostGetVFlashModuleDefaultConfigRequestType"] = reflect.TypeOf((*HostGetVFlashModuleDefaultConfigRequestType)(nil)).Elem() +} + +type HostGetVFlashModuleDefaultConfigResponse struct { + Returnval VirtualDiskVFlashCacheConfigInfo `xml:"returnval"` +} + +type HostGraphicsInfo struct { + DynamicData + + DeviceName string `xml:"deviceName"` + VendorName string `xml:"vendorName"` + PciId string `xml:"pciId"` + GraphicsType string `xml:"graphicsType"` + MemorySizeInKB int64 `xml:"memorySizeInKB"` + Vm []ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["HostGraphicsInfo"] = reflect.TypeOf((*HostGraphicsInfo)(nil)).Elem() +} + +type HostHardwareElementInfo struct { + DynamicData + + Name string `xml:"name"` + Status BaseElementDescription `xml:"status,typeattr"` +} + +func init() { + t["HostHardwareElementInfo"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem() +} + +type HostHardwareInfo struct { + DynamicData + + SystemInfo HostSystemInfo `xml:"systemInfo"` + CpuPowerManagementInfo *HostCpuPowerManagementInfo `xml:"cpuPowerManagementInfo,omitempty"` + CpuInfo HostCpuInfo `xml:"cpuInfo"` + CpuPkg []HostCpuPackage `xml:"cpuPkg"` + MemorySize int64 `xml:"memorySize"` + NumaInfo *HostNumaInfo `xml:"numaInfo,omitempty"` + SmcPresent *bool `xml:"smcPresent"` + PciDevice []HostPciDevice `xml:"pciDevice,omitempty"` + CpuFeature []HostCpuIdInfo `xml:"cpuFeature,omitempty"` + BiosInfo *HostBIOSInfo `xml:"biosInfo,omitempty"` + ReliableMemoryInfo *HostReliableMemoryInfo `xml:"reliableMemoryInfo,omitempty"` +} + +func init() { + t["HostHardwareInfo"] = reflect.TypeOf((*HostHardwareInfo)(nil)).Elem() +} + +type HostHardwareStatusInfo struct { + DynamicData + + MemoryStatusInfo []BaseHostHardwareElementInfo `xml:"memoryStatusInfo,omitempty,typeattr"` + CpuStatusInfo []BaseHostHardwareElementInfo `xml:"cpuStatusInfo,omitempty,typeattr"` + StorageStatusInfo []HostStorageElementInfo `xml:"storageStatusInfo,omitempty"` +} + +func init() { + t["HostHardwareStatusInfo"] = reflect.TypeOf((*HostHardwareStatusInfo)(nil)).Elem() +} + +type HostHardwareSummary struct { + DynamicData + + Vendor string `xml:"vendor"` + Model string `xml:"model"` + Uuid string `xml:"uuid"` + OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty"` + MemorySize int64 `xml:"memorySize"` + CpuModel string `xml:"cpuModel"` + CpuMhz int `xml:"cpuMhz"` + NumCpuPkgs int16 `xml:"numCpuPkgs"` + NumCpuCores int16 `xml:"numCpuCores"` + NumCpuThreads int16 `xml:"numCpuThreads"` + NumNics int `xml:"numNics"` + NumHBAs int `xml:"numHBAs"` +} + +func init() { + t["HostHardwareSummary"] = reflect.TypeOf((*HostHardwareSummary)(nil)).Elem() +} + +type HostHasComponentFailure struct { + VimFault + + HostName string `xml:"hostName"` + ComponentType string `xml:"componentType"` + ComponentName string `xml:"componentName"` +} + +func init() { + t["HostHasComponentFailure"] = reflect.TypeOf((*HostHasComponentFailure)(nil)).Elem() +} + +type HostHasComponentFailureFault HostHasComponentFailure + +func init() { + t["HostHasComponentFailureFault"] = reflect.TypeOf((*HostHasComponentFailureFault)(nil)).Elem() +} + +type HostHostBusAdapter struct { + DynamicData + + Key string `xml:"key,omitempty"` + Device string `xml:"device"` + Bus int `xml:"bus"` + Status string `xml:"status"` + Model string `xml:"model"` + Driver string `xml:"driver,omitempty"` + Pci string `xml:"pci,omitempty"` +} + +func init() { + t["HostHostBusAdapter"] = reflect.TypeOf((*HostHostBusAdapter)(nil)).Elem() +} + +type HostHyperThreadScheduleInfo struct { + DynamicData + + Available bool `xml:"available"` + Active bool `xml:"active"` + Config bool `xml:"config"` +} + +func init() { + t["HostHyperThreadScheduleInfo"] = reflect.TypeOf((*HostHyperThreadScheduleInfo)(nil)).Elem() +} + +type HostImageConfigGetAcceptance HostImageConfigGetAcceptanceRequestType + +func init() { + t["HostImageConfigGetAcceptance"] = reflect.TypeOf((*HostImageConfigGetAcceptance)(nil)).Elem() +} + +type HostImageConfigGetAcceptanceRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HostImageConfigGetAcceptanceRequestType"] = reflect.TypeOf((*HostImageConfigGetAcceptanceRequestType)(nil)).Elem() +} + +type HostImageConfigGetAcceptanceResponse struct { + Returnval string `xml:"returnval"` +} + +type HostImageConfigGetProfile HostImageConfigGetProfileRequestType + +func init() { + t["HostImageConfigGetProfile"] = reflect.TypeOf((*HostImageConfigGetProfile)(nil)).Elem() +} + +type HostImageConfigGetProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HostImageConfigGetProfileRequestType"] = reflect.TypeOf((*HostImageConfigGetProfileRequestType)(nil)).Elem() +} + +type HostImageConfigGetProfileResponse struct { + Returnval HostImageProfileSummary `xml:"returnval"` +} + +type HostImageProfileSummary struct { + DynamicData + + Name string `xml:"name"` + Vendor string `xml:"vendor"` +} + +func init() { + t["HostImageProfileSummary"] = reflect.TypeOf((*HostImageProfileSummary)(nil)).Elem() +} + +type HostInAuditModeEvent struct { + HostEvent +} + +func init() { + t["HostInAuditModeEvent"] = reflect.TypeOf((*HostInAuditModeEvent)(nil)).Elem() +} + +type HostInDomain struct { + HostConfigFault +} + +func init() { + t["HostInDomain"] = reflect.TypeOf((*HostInDomain)(nil)).Elem() +} + +type HostInDomainFault HostInDomain + +func init() { + t["HostInDomainFault"] = reflect.TypeOf((*HostInDomainFault)(nil)).Elem() +} + +type HostIncompatibleForFaultTolerance struct { + VmFaultToleranceIssue + + HostName string `xml:"hostName,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostIncompatibleForFaultTolerance"] = reflect.TypeOf((*HostIncompatibleForFaultTolerance)(nil)).Elem() +} + +type HostIncompatibleForFaultToleranceFault HostIncompatibleForFaultTolerance + +func init() { + t["HostIncompatibleForFaultToleranceFault"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceFault)(nil)).Elem() +} + +type HostIncompatibleForRecordReplay struct { + VimFault + + HostName string `xml:"hostName,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostIncompatibleForRecordReplay"] = reflect.TypeOf((*HostIncompatibleForRecordReplay)(nil)).Elem() +} + +type HostIncompatibleForRecordReplayFault HostIncompatibleForRecordReplay + +func init() { + t["HostIncompatibleForRecordReplayFault"] = reflect.TypeOf((*HostIncompatibleForRecordReplayFault)(nil)).Elem() +} + +type HostInternetScsiHba struct { + HostHostBusAdapter + + IsSoftwareBased bool `xml:"isSoftwareBased"` + CanBeDisabled *bool `xml:"canBeDisabled"` + NetworkBindingSupport HostInternetScsiHbaNetworkBindingSupportType `xml:"networkBindingSupport,omitempty"` + DiscoveryCapabilities HostInternetScsiHbaDiscoveryCapabilities `xml:"discoveryCapabilities"` + DiscoveryProperties HostInternetScsiHbaDiscoveryProperties `xml:"discoveryProperties"` + AuthenticationCapabilities HostInternetScsiHbaAuthenticationCapabilities `xml:"authenticationCapabilities"` + AuthenticationProperties HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties"` + DigestCapabilities *HostInternetScsiHbaDigestCapabilities `xml:"digestCapabilities,omitempty"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty"` + IpCapabilities HostInternetScsiHbaIPCapabilities `xml:"ipCapabilities"` + IpProperties HostInternetScsiHbaIPProperties `xml:"ipProperties"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty"` + IScsiName string `xml:"iScsiName"` + IScsiAlias string `xml:"iScsiAlias,omitempty"` + ConfiguredSendTarget []HostInternetScsiHbaSendTarget `xml:"configuredSendTarget,omitempty"` + ConfiguredStaticTarget []HostInternetScsiHbaStaticTarget `xml:"configuredStaticTarget,omitempty"` + MaxSpeedMb int `xml:"maxSpeedMb,omitempty"` + CurrentSpeedMb int `xml:"currentSpeedMb,omitempty"` +} + +func init() { + t["HostInternetScsiHba"] = reflect.TypeOf((*HostInternetScsiHba)(nil)).Elem() +} + +type HostInternetScsiHbaAuthenticationCapabilities struct { + DynamicData + + ChapAuthSettable bool `xml:"chapAuthSettable"` + Krb5AuthSettable bool `xml:"krb5AuthSettable"` + SrpAuthSettable bool `xml:"srpAuthSettable"` + SpkmAuthSettable bool `xml:"spkmAuthSettable"` + MutualChapSettable *bool `xml:"mutualChapSettable"` + TargetChapSettable *bool `xml:"targetChapSettable"` + TargetMutualChapSettable *bool `xml:"targetMutualChapSettable"` +} + +func init() { + t["HostInternetScsiHbaAuthenticationCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaAuthenticationCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaAuthenticationProperties struct { + DynamicData + + ChapAuthEnabled bool `xml:"chapAuthEnabled"` + ChapName string `xml:"chapName,omitempty"` + ChapSecret string `xml:"chapSecret,omitempty"` + ChapAuthenticationType string `xml:"chapAuthenticationType,omitempty"` + ChapInherited *bool `xml:"chapInherited"` + MutualChapName string `xml:"mutualChapName,omitempty"` + MutualChapSecret string `xml:"mutualChapSecret,omitempty"` + MutualChapAuthenticationType string `xml:"mutualChapAuthenticationType,omitempty"` + MutualChapInherited *bool `xml:"mutualChapInherited"` +} + +func init() { + t["HostInternetScsiHbaAuthenticationProperties"] = reflect.TypeOf((*HostInternetScsiHbaAuthenticationProperties)(nil)).Elem() +} + +type HostInternetScsiHbaDigestCapabilities struct { + DynamicData + + HeaderDigestSettable *bool `xml:"headerDigestSettable"` + DataDigestSettable *bool `xml:"dataDigestSettable"` + TargetHeaderDigestSettable *bool `xml:"targetHeaderDigestSettable"` + TargetDataDigestSettable *bool `xml:"targetDataDigestSettable"` +} + +func init() { + t["HostInternetScsiHbaDigestCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaDigestCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaDigestProperties struct { + DynamicData + + HeaderDigestType string `xml:"headerDigestType,omitempty"` + HeaderDigestInherited *bool `xml:"headerDigestInherited"` + DataDigestType string `xml:"dataDigestType,omitempty"` + DataDigestInherited *bool `xml:"dataDigestInherited"` +} + +func init() { + t["HostInternetScsiHbaDigestProperties"] = reflect.TypeOf((*HostInternetScsiHbaDigestProperties)(nil)).Elem() +} + +type HostInternetScsiHbaDiscoveryCapabilities struct { + DynamicData + + ISnsDiscoverySettable bool `xml:"iSnsDiscoverySettable"` + SlpDiscoverySettable bool `xml:"slpDiscoverySettable"` + StaticTargetDiscoverySettable bool `xml:"staticTargetDiscoverySettable"` + SendTargetsDiscoverySettable bool `xml:"sendTargetsDiscoverySettable"` +} + +func init() { + t["HostInternetScsiHbaDiscoveryCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaDiscoveryCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaDiscoveryProperties struct { + DynamicData + + ISnsDiscoveryEnabled bool `xml:"iSnsDiscoveryEnabled"` + ISnsDiscoveryMethod string `xml:"iSnsDiscoveryMethod,omitempty"` + ISnsHost string `xml:"iSnsHost,omitempty"` + SlpDiscoveryEnabled bool `xml:"slpDiscoveryEnabled"` + SlpDiscoveryMethod string `xml:"slpDiscoveryMethod,omitempty"` + SlpHost string `xml:"slpHost,omitempty"` + StaticTargetDiscoveryEnabled bool `xml:"staticTargetDiscoveryEnabled"` + SendTargetsDiscoveryEnabled bool `xml:"sendTargetsDiscoveryEnabled"` +} + +func init() { + t["HostInternetScsiHbaDiscoveryProperties"] = reflect.TypeOf((*HostInternetScsiHbaDiscoveryProperties)(nil)).Elem() +} + +type HostInternetScsiHbaIPCapabilities struct { + DynamicData + + AddressSettable bool `xml:"addressSettable"` + IpConfigurationMethodSettable bool `xml:"ipConfigurationMethodSettable"` + SubnetMaskSettable bool `xml:"subnetMaskSettable"` + DefaultGatewaySettable bool `xml:"defaultGatewaySettable"` + PrimaryDnsServerAddressSettable bool `xml:"primaryDnsServerAddressSettable"` + AlternateDnsServerAddressSettable bool `xml:"alternateDnsServerAddressSettable"` + Ipv6Supported *bool `xml:"ipv6Supported"` + ArpRedirectSettable *bool `xml:"arpRedirectSettable"` + MtuSettable *bool `xml:"mtuSettable"` + HostNameAsTargetAddress *bool `xml:"hostNameAsTargetAddress"` + NameAliasSettable *bool `xml:"nameAliasSettable"` + Ipv4EnableSettable *bool `xml:"ipv4EnableSettable"` + Ipv6EnableSettable *bool `xml:"ipv6EnableSettable"` + Ipv6PrefixLengthSettable *bool `xml:"ipv6PrefixLengthSettable"` + Ipv6PrefixLength int `xml:"ipv6PrefixLength,omitempty"` + Ipv6DhcpConfigurationSettable *bool `xml:"ipv6DhcpConfigurationSettable"` + Ipv6LinkLocalAutoConfigurationSettable *bool `xml:"ipv6LinkLocalAutoConfigurationSettable"` + Ipv6RouterAdvertisementConfigurationSettable *bool `xml:"ipv6RouterAdvertisementConfigurationSettable"` + Ipv6DefaultGatewaySettable *bool `xml:"ipv6DefaultGatewaySettable"` + Ipv6MaxStaticAddressesSupported int `xml:"ipv6MaxStaticAddressesSupported,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIPCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaIPCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaIPProperties struct { + DynamicData + + Mac string `xml:"mac,omitempty"` + Address string `xml:"address,omitempty"` + DhcpConfigurationEnabled bool `xml:"dhcpConfigurationEnabled"` + SubnetMask string `xml:"subnetMask,omitempty"` + DefaultGateway string `xml:"defaultGateway,omitempty"` + PrimaryDnsServerAddress string `xml:"primaryDnsServerAddress,omitempty"` + AlternateDnsServerAddress string `xml:"alternateDnsServerAddress,omitempty"` + Ipv6Address string `xml:"ipv6Address,omitempty"` + Ipv6SubnetMask string `xml:"ipv6SubnetMask,omitempty"` + Ipv6DefaultGateway string `xml:"ipv6DefaultGateway,omitempty"` + ArpRedirectEnabled *bool `xml:"arpRedirectEnabled"` + Mtu int `xml:"mtu,omitempty"` + JumboFramesEnabled *bool `xml:"jumboFramesEnabled"` + Ipv4Enabled *bool `xml:"ipv4Enabled"` + Ipv6Enabled *bool `xml:"ipv6Enabled"` + Ipv6properties *HostInternetScsiHbaIPv6Properties `xml:"ipv6properties,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIPProperties"] = reflect.TypeOf((*HostInternetScsiHbaIPProperties)(nil)).Elem() +} + +type HostInternetScsiHbaIPv6Properties struct { + DynamicData + + IscsiIpv6Address []HostInternetScsiHbaIscsiIpv6Address `xml:"iscsiIpv6Address,omitempty"` + Ipv6DhcpConfigurationEnabled *bool `xml:"ipv6DhcpConfigurationEnabled"` + Ipv6LinkLocalAutoConfigurationEnabled *bool `xml:"ipv6LinkLocalAutoConfigurationEnabled"` + Ipv6RouterAdvertisementConfigurationEnabled *bool `xml:"ipv6RouterAdvertisementConfigurationEnabled"` + Ipv6DefaultGateway string `xml:"ipv6DefaultGateway,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIPv6Properties"] = reflect.TypeOf((*HostInternetScsiHbaIPv6Properties)(nil)).Elem() +} + +type HostInternetScsiHbaIscsiIpv6Address struct { + DynamicData + + Address string `xml:"address"` + PrefixLength int `xml:"prefixLength"` + Origin string `xml:"origin"` + Operation string `xml:"operation,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIscsiIpv6Address"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6Address)(nil)).Elem() +} + +type HostInternetScsiHbaParamValue struct { + OptionValue + + IsInherited *bool `xml:"isInherited"` +} + +func init() { + t["HostInternetScsiHbaParamValue"] = reflect.TypeOf((*HostInternetScsiHbaParamValue)(nil)).Elem() +} + +type HostInternetScsiHbaSendTarget struct { + DynamicData + + Address string `xml:"address"` + Port int `xml:"port,omitempty"` + AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty"` + Parent string `xml:"parent,omitempty"` +} + +func init() { + t["HostInternetScsiHbaSendTarget"] = reflect.TypeOf((*HostInternetScsiHbaSendTarget)(nil)).Elem() +} + +type HostInternetScsiHbaStaticTarget struct { + DynamicData + + Address string `xml:"address"` + Port int `xml:"port,omitempty"` + IScsiName string `xml:"iScsiName"` + DiscoveryMethod string `xml:"discoveryMethod,omitempty"` + AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty"` + Parent string `xml:"parent,omitempty"` +} + +func init() { + t["HostInternetScsiHbaStaticTarget"] = reflect.TypeOf((*HostInternetScsiHbaStaticTarget)(nil)).Elem() +} + +type HostInternetScsiHbaTargetSet struct { + DynamicData + + StaticTargets []HostInternetScsiHbaStaticTarget `xml:"staticTargets,omitempty"` + SendTargets []HostInternetScsiHbaSendTarget `xml:"sendTargets,omitempty"` +} + +func init() { + t["HostInternetScsiHbaTargetSet"] = reflect.TypeOf((*HostInternetScsiHbaTargetSet)(nil)).Elem() +} + +type HostInternetScsiTargetTransport struct { + HostTargetTransport + + IScsiName string `xml:"iScsiName"` + IScsiAlias string `xml:"iScsiAlias"` + Address []string `xml:"address,omitempty"` +} + +func init() { + t["HostInternetScsiTargetTransport"] = reflect.TypeOf((*HostInternetScsiTargetTransport)(nil)).Elem() +} + +type HostInventoryFull struct { + NotEnoughLicenses + + Capacity int `xml:"capacity"` +} + +func init() { + t["HostInventoryFull"] = reflect.TypeOf((*HostInventoryFull)(nil)).Elem() +} + +type HostInventoryFullEvent struct { + LicenseEvent + + Capacity int `xml:"capacity"` +} + +func init() { + t["HostInventoryFullEvent"] = reflect.TypeOf((*HostInventoryFullEvent)(nil)).Elem() +} + +type HostInventoryFullFault HostInventoryFull + +func init() { + t["HostInventoryFullFault"] = reflect.TypeOf((*HostInventoryFullFault)(nil)).Elem() +} + +type HostInventoryUnreadableEvent struct { + Event +} + +func init() { + t["HostInventoryUnreadableEvent"] = reflect.TypeOf((*HostInventoryUnreadableEvent)(nil)).Elem() +} + +type HostIoFilterInfo struct { + IoFilterInfo + + Available bool `xml:"available"` +} + +func init() { + t["HostIoFilterInfo"] = reflect.TypeOf((*HostIoFilterInfo)(nil)).Elem() +} + +type HostIpChangedEvent struct { + HostEvent + + OldIP string `xml:"oldIP"` + NewIP string `xml:"newIP"` +} + +func init() { + t["HostIpChangedEvent"] = reflect.TypeOf((*HostIpChangedEvent)(nil)).Elem() +} + +type HostIpConfig struct { + DynamicData + + Dhcp bool `xml:"dhcp"` + IpAddress string `xml:"ipAddress,omitempty"` + SubnetMask string `xml:"subnetMask,omitempty"` + IpV6Config *HostIpConfigIpV6AddressConfiguration `xml:"ipV6Config,omitempty"` +} + +func init() { + t["HostIpConfig"] = reflect.TypeOf((*HostIpConfig)(nil)).Elem() +} + +type HostIpConfigIpV6Address struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PrefixLength int `xml:"prefixLength"` + Origin string `xml:"origin,omitempty"` + DadState string `xml:"dadState,omitempty"` + Lifetime *time.Time `xml:"lifetime"` + Operation string `xml:"operation,omitempty"` +} + +func init() { + t["HostIpConfigIpV6Address"] = reflect.TypeOf((*HostIpConfigIpV6Address)(nil)).Elem() +} + +type HostIpConfigIpV6AddressConfiguration struct { + DynamicData + + IpV6Address []HostIpConfigIpV6Address `xml:"ipV6Address,omitempty"` + AutoConfigurationEnabled *bool `xml:"autoConfigurationEnabled"` + DhcpV6Enabled *bool `xml:"dhcpV6Enabled"` +} + +func init() { + t["HostIpConfigIpV6AddressConfiguration"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfiguration)(nil)).Elem() +} + +type HostIpInconsistentEvent struct { + HostEvent + + IpAddress string `xml:"ipAddress"` + IpAddress2 string `xml:"ipAddress2"` +} + +func init() { + t["HostIpInconsistentEvent"] = reflect.TypeOf((*HostIpInconsistentEvent)(nil)).Elem() +} + +type HostIpRouteConfig struct { + DynamicData + + DefaultGateway string `xml:"defaultGateway,omitempty"` + GatewayDevice string `xml:"gatewayDevice,omitempty"` + IpV6DefaultGateway string `xml:"ipV6DefaultGateway,omitempty"` + IpV6GatewayDevice string `xml:"ipV6GatewayDevice,omitempty"` +} + +func init() { + t["HostIpRouteConfig"] = reflect.TypeOf((*HostIpRouteConfig)(nil)).Elem() +} + +type HostIpRouteConfigSpec struct { + HostIpRouteConfig + + GatewayDeviceConnection *HostVirtualNicConnection `xml:"gatewayDeviceConnection,omitempty"` + IpV6GatewayDeviceConnection *HostVirtualNicConnection `xml:"ipV6GatewayDeviceConnection,omitempty"` +} + +func init() { + t["HostIpRouteConfigSpec"] = reflect.TypeOf((*HostIpRouteConfigSpec)(nil)).Elem() +} + +type HostIpRouteEntry struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int `xml:"prefixLength"` + Gateway string `xml:"gateway"` + DeviceName string `xml:"deviceName,omitempty"` +} + +func init() { + t["HostIpRouteEntry"] = reflect.TypeOf((*HostIpRouteEntry)(nil)).Elem() +} + +type HostIpRouteOp struct { + DynamicData + + ChangeOperation string `xml:"changeOperation"` + Route HostIpRouteEntry `xml:"route"` +} + +func init() { + t["HostIpRouteOp"] = reflect.TypeOf((*HostIpRouteOp)(nil)).Elem() +} + +type HostIpRouteTableConfig struct { + DynamicData + + IpRoute []HostIpRouteOp `xml:"ipRoute,omitempty"` + Ipv6Route []HostIpRouteOp `xml:"ipv6Route,omitempty"` +} + +func init() { + t["HostIpRouteTableConfig"] = reflect.TypeOf((*HostIpRouteTableConfig)(nil)).Elem() +} + +type HostIpRouteTableInfo struct { + DynamicData + + IpRoute []HostIpRouteEntry `xml:"ipRoute,omitempty"` + Ipv6Route []HostIpRouteEntry `xml:"ipv6Route,omitempty"` +} + +func init() { + t["HostIpRouteTableInfo"] = reflect.TypeOf((*HostIpRouteTableInfo)(nil)).Elem() +} + +type HostIpToShortNameFailedEvent struct { + HostEvent +} + +func init() { + t["HostIpToShortNameFailedEvent"] = reflect.TypeOf((*HostIpToShortNameFailedEvent)(nil)).Elem() +} + +type HostIpmiInfo struct { + DynamicData + + BmcIpAddress string `xml:"bmcIpAddress,omitempty"` + BmcMacAddress string `xml:"bmcMacAddress,omitempty"` + Login string `xml:"login,omitempty"` + Password string `xml:"password,omitempty"` +} + +func init() { + t["HostIpmiInfo"] = reflect.TypeOf((*HostIpmiInfo)(nil)).Elem() +} + +type HostIsolationIpPingFailedEvent struct { + HostDasEvent + + IsolationIp string `xml:"isolationIp"` +} + +func init() { + t["HostIsolationIpPingFailedEvent"] = reflect.TypeOf((*HostIsolationIpPingFailedEvent)(nil)).Elem() +} + +type HostLicensableResourceInfo struct { + DynamicData + + Resource []KeyAnyValue `xml:"resource"` +} + +func init() { + t["HostLicensableResourceInfo"] = reflect.TypeOf((*HostLicensableResourceInfo)(nil)).Elem() +} + +type HostLicenseConnectInfo struct { + DynamicData + + License LicenseManagerLicenseInfo `xml:"license"` + Evaluation LicenseManagerEvaluationInfo `xml:"evaluation"` + Resource *HostLicensableResourceInfo `xml:"resource,omitempty"` +} + +func init() { + t["HostLicenseConnectInfo"] = reflect.TypeOf((*HostLicenseConnectInfo)(nil)).Elem() +} + +type HostLicenseExpiredEvent struct { + LicenseEvent +} + +func init() { + t["HostLicenseExpiredEvent"] = reflect.TypeOf((*HostLicenseExpiredEvent)(nil)).Elem() +} + +type HostLicenseSpec struct { + DynamicData + + Source BaseLicenseSource `xml:"source,omitempty,typeattr"` + EditionKey string `xml:"editionKey,omitempty"` + DisabledFeatureKey []string `xml:"disabledFeatureKey,omitempty"` + EnabledFeatureKey []string `xml:"enabledFeatureKey,omitempty"` +} + +func init() { + t["HostLicenseSpec"] = reflect.TypeOf((*HostLicenseSpec)(nil)).Elem() +} + +type HostListSummary struct { + DynamicData + + Host *ManagedObjectReference `xml:"host,omitempty"` + Hardware *HostHardwareSummary `xml:"hardware,omitempty"` + Runtime *HostRuntimeInfo `xml:"runtime,omitempty"` + Config HostConfigSummary `xml:"config"` + QuickStats HostListSummaryQuickStats `xml:"quickStats"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + RebootRequired bool `xml:"rebootRequired"` + CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` + ManagementServerIp string `xml:"managementServerIp,omitempty"` + MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty"` +} + +func init() { + t["HostListSummary"] = reflect.TypeOf((*HostListSummary)(nil)).Elem() +} + +type HostListSummaryGatewaySummary struct { + DynamicData + + GatewayType string `xml:"gatewayType"` + GatewayId string `xml:"gatewayId"` +} + +func init() { + t["HostListSummaryGatewaySummary"] = reflect.TypeOf((*HostListSummaryGatewaySummary)(nil)).Elem() +} + +type HostListSummaryQuickStats struct { + DynamicData + + OverallCpuUsage int `xml:"overallCpuUsage,omitempty"` + OverallMemoryUsage int `xml:"overallMemoryUsage,omitempty"` + DistributedCpuFairness int `xml:"distributedCpuFairness,omitempty"` + DistributedMemoryFairness int `xml:"distributedMemoryFairness,omitempty"` + Uptime int `xml:"uptime,omitempty"` +} + +func init() { + t["HostListSummaryQuickStats"] = reflect.TypeOf((*HostListSummaryQuickStats)(nil)).Elem() +} + +type HostLocalAuthenticationInfo struct { + HostAuthenticationStoreInfo +} + +func init() { + t["HostLocalAuthenticationInfo"] = reflect.TypeOf((*HostLocalAuthenticationInfo)(nil)).Elem() +} + +type HostLocalFileSystemVolume struct { + HostFileSystemVolume + + Device string `xml:"device"` +} + +func init() { + t["HostLocalFileSystemVolume"] = reflect.TypeOf((*HostLocalFileSystemVolume)(nil)).Elem() +} + +type HostLocalFileSystemVolumeSpec struct { + DynamicData + + Device string `xml:"device"` + LocalPath string `xml:"localPath"` +} + +func init() { + t["HostLocalFileSystemVolumeSpec"] = reflect.TypeOf((*HostLocalFileSystemVolumeSpec)(nil)).Elem() +} + +type HostLocalPortCreatedEvent struct { + DvsEvent + + HostLocalPort DVSHostLocalPortInfo `xml:"hostLocalPort"` +} + +func init() { + t["HostLocalPortCreatedEvent"] = reflect.TypeOf((*HostLocalPortCreatedEvent)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerDiskLayoutSpec struct { + DynamicData + + ControllerType string `xml:"controllerType"` + BusNumber int `xml:"busNumber"` + UnitNumber int `xml:"unitNumber"` + SrcFilename string `xml:"srcFilename"` + DstFilename string `xml:"dstFilename"` +} + +func init() { + t["HostLowLevelProvisioningManagerDiskLayoutSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerDiskLayoutSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileDeleteResult struct { + DynamicData + + FileName string `xml:"fileName"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileDeleteResult"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileDeleteResult)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileDeleteSpec struct { + DynamicData + + FileName string `xml:"fileName"` + FileType string `xml:"fileType"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileDeleteSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileDeleteSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileReserveResult struct { + DynamicData + + BaseName string `xml:"baseName"` + ParentDir string `xml:"parentDir"` + ReservedName string `xml:"reservedName"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileReserveResult"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileReserveResult)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileReserveSpec struct { + DynamicData + + BaseName string `xml:"baseName"` + ParentDir string `xml:"parentDir"` + FileType string `xml:"fileType"` + StorageProfile string `xml:"storageProfile"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileReserveSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileReserveSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerSnapshotLayoutSpec struct { + DynamicData + + Id int `xml:"id"` + SrcFilename string `xml:"srcFilename"` + DstFilename string `xml:"dstFilename"` + Disk []HostLowLevelProvisioningManagerDiskLayoutSpec `xml:"disk,omitempty"` +} + +func init() { + t["HostLowLevelProvisioningManagerSnapshotLayoutSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerSnapshotLayoutSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerVmMigrationStatus struct { + DynamicData + + MigrationId int64 `xml:"migrationId"` + Type string `xml:"type"` + Source bool `xml:"source"` + ConsideredSuccessful bool `xml:"consideredSuccessful"` +} + +func init() { + t["HostLowLevelProvisioningManagerVmMigrationStatus"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmMigrationStatus)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerVmRecoveryInfo struct { + DynamicData + + Version string `xml:"version"` + BiosUUID string `xml:"biosUUID"` + InstanceUUID string `xml:"instanceUUID"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` +} + +func init() { + t["HostLowLevelProvisioningManagerVmRecoveryInfo"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmRecoveryInfo)(nil)).Elem() +} + +type HostMaintenanceSpec struct { + DynamicData + + VsanMode *VsanHostDecommissionMode `xml:"vsanMode,omitempty"` +} + +func init() { + t["HostMaintenanceSpec"] = reflect.TypeOf((*HostMaintenanceSpec)(nil)).Elem() +} + +type HostMemberHealthCheckResult struct { + DynamicData + + Summary string `xml:"summary,omitempty"` +} + +func init() { + t["HostMemberHealthCheckResult"] = reflect.TypeOf((*HostMemberHealthCheckResult)(nil)).Elem() +} + +type HostMemberRuntimeInfo struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Status string `xml:"status,omitempty"` + StatusDetail string `xml:"statusDetail,omitempty"` + HealthCheckResult []BaseHostMemberHealthCheckResult `xml:"healthCheckResult,omitempty,typeattr"` +} + +func init() { + t["HostMemberRuntimeInfo"] = reflect.TypeOf((*HostMemberRuntimeInfo)(nil)).Elem() +} + +type HostMemberUplinkHealthCheckResult struct { + HostMemberHealthCheckResult + + UplinkPortKey string `xml:"uplinkPortKey"` +} + +func init() { + t["HostMemberUplinkHealthCheckResult"] = reflect.TypeOf((*HostMemberUplinkHealthCheckResult)(nil)).Elem() +} + +type HostMemoryProfile struct { + ApplyProfile +} + +func init() { + t["HostMemoryProfile"] = reflect.TypeOf((*HostMemoryProfile)(nil)).Elem() +} + +type HostMemorySpec struct { + DynamicData + + ServiceConsoleReservation int64 `xml:"serviceConsoleReservation,omitempty"` +} + +func init() { + t["HostMemorySpec"] = reflect.TypeOf((*HostMemorySpec)(nil)).Elem() +} + +type HostMissingNetworksEvent struct { + HostDasEvent + + Ips string `xml:"ips,omitempty"` +} + +func init() { + t["HostMissingNetworksEvent"] = reflect.TypeOf((*HostMissingNetworksEvent)(nil)).Elem() +} + +type HostMonitoringStateChangedEvent struct { + ClusterEvent + + State string `xml:"state"` +} + +func init() { + t["HostMonitoringStateChangedEvent"] = reflect.TypeOf((*HostMonitoringStateChangedEvent)(nil)).Elem() +} + +type HostMountInfo struct { + DynamicData + + Path string `xml:"path,omitempty"` + AccessMode string `xml:"accessMode"` + Mounted *bool `xml:"mounted"` + Accessible *bool `xml:"accessible"` + InaccessibleReason string `xml:"inaccessibleReason,omitempty"` +} + +func init() { + t["HostMountInfo"] = reflect.TypeOf((*HostMountInfo)(nil)).Elem() +} + +type HostMultipathInfo struct { + DynamicData + + Lun []HostMultipathInfoLogicalUnit `xml:"lun,omitempty"` +} + +func init() { + t["HostMultipathInfo"] = reflect.TypeOf((*HostMultipathInfo)(nil)).Elem() +} + +type HostMultipathInfoFixedLogicalUnitPolicy struct { + HostMultipathInfoLogicalUnitPolicy + + Prefer string `xml:"prefer"` +} + +func init() { + t["HostMultipathInfoFixedLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoFixedLogicalUnitPolicy)(nil)).Elem() +} + +type HostMultipathInfoLogicalUnit struct { + DynamicData + + Key string `xml:"key"` + Id string `xml:"id"` + Lun string `xml:"lun"` + Path []HostMultipathInfoPath `xml:"path"` + Policy BaseHostMultipathInfoLogicalUnitPolicy `xml:"policy,typeattr"` + StorageArrayTypePolicy *HostMultipathInfoLogicalUnitStorageArrayTypePolicy `xml:"storageArrayTypePolicy,omitempty"` +} + +func init() { + t["HostMultipathInfoLogicalUnit"] = reflect.TypeOf((*HostMultipathInfoLogicalUnit)(nil)).Elem() +} + +type HostMultipathInfoLogicalUnitPolicy struct { + DynamicData + + Policy string `xml:"policy"` +} + +func init() { + t["HostMultipathInfoLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem() +} + +type HostMultipathInfoLogicalUnitStorageArrayTypePolicy struct { + DynamicData + + Policy string `xml:"policy"` +} + +func init() { + t["HostMultipathInfoLogicalUnitStorageArrayTypePolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitStorageArrayTypePolicy)(nil)).Elem() +} + +type HostMultipathInfoPath struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + PathState string `xml:"pathState"` + State string `xml:"state,omitempty"` + IsWorkingPath *bool `xml:"isWorkingPath"` + Adapter string `xml:"adapter"` + Lun string `xml:"lun"` + Transport BaseHostTargetTransport `xml:"transport,omitempty,typeattr"` +} + +func init() { + t["HostMultipathInfoPath"] = reflect.TypeOf((*HostMultipathInfoPath)(nil)).Elem() +} + +type HostMultipathStateInfo struct { + DynamicData + + Path []HostMultipathStateInfoPath `xml:"path,omitempty"` +} + +func init() { + t["HostMultipathStateInfo"] = reflect.TypeOf((*HostMultipathStateInfo)(nil)).Elem() +} + +type HostMultipathStateInfoPath struct { + DynamicData + + Name string `xml:"name"` + PathState string `xml:"pathState"` +} + +func init() { + t["HostMultipathStateInfoPath"] = reflect.TypeOf((*HostMultipathStateInfoPath)(nil)).Elem() +} + +type HostNasVolume struct { + HostFileSystemVolume + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` + UserName string `xml:"userName,omitempty"` + RemoteHostNames []string `xml:"remoteHostNames,omitempty"` + SecurityType string `xml:"securityType,omitempty"` + ProtocolEndpoint *bool `xml:"protocolEndpoint"` +} + +func init() { + t["HostNasVolume"] = reflect.TypeOf((*HostNasVolume)(nil)).Elem() +} + +type HostNasVolumeConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Spec *HostNasVolumeSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostNasVolumeConfig"] = reflect.TypeOf((*HostNasVolumeConfig)(nil)).Elem() +} + +type HostNasVolumeSpec struct { + DynamicData + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` + LocalPath string `xml:"localPath"` + AccessMode string `xml:"accessMode"` + Type string `xml:"type,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + RemoteHostNames []string `xml:"remoteHostNames,omitempty"` + SecurityType string `xml:"securityType,omitempty"` +} + +func init() { + t["HostNasVolumeSpec"] = reflect.TypeOf((*HostNasVolumeSpec)(nil)).Elem() +} + +type HostNasVolumeUserInfo struct { + DynamicData + + User string `xml:"user"` +} + +func init() { + t["HostNasVolumeUserInfo"] = reflect.TypeOf((*HostNasVolumeUserInfo)(nil)).Elem() +} + +type HostNatService struct { + DynamicData + + Key string `xml:"key"` + Spec HostNatServiceSpec `xml:"spec"` +} + +func init() { + t["HostNatService"] = reflect.TypeOf((*HostNatService)(nil)).Elem() +} + +type HostNatServiceConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Key string `xml:"key"` + Spec HostNatServiceSpec `xml:"spec"` +} + +func init() { + t["HostNatServiceConfig"] = reflect.TypeOf((*HostNatServiceConfig)(nil)).Elem() +} + +type HostNatServiceNameServiceSpec struct { + DynamicData + + DnsAutoDetect bool `xml:"dnsAutoDetect"` + DnsPolicy string `xml:"dnsPolicy"` + DnsRetries int `xml:"dnsRetries"` + DnsTimeout int `xml:"dnsTimeout"` + DnsNameServer []string `xml:"dnsNameServer,omitempty"` + NbdsTimeout int `xml:"nbdsTimeout"` + NbnsRetries int `xml:"nbnsRetries"` + NbnsTimeout int `xml:"nbnsTimeout"` +} + +func init() { + t["HostNatServiceNameServiceSpec"] = reflect.TypeOf((*HostNatServiceNameServiceSpec)(nil)).Elem() +} + +type HostNatServicePortForwardSpec struct { + DynamicData + + Type string `xml:"type"` + Name string `xml:"name"` + HostPort int `xml:"hostPort"` + GuestPort int `xml:"guestPort"` + GuestIpAddress string `xml:"guestIpAddress"` +} + +func init() { + t["HostNatServicePortForwardSpec"] = reflect.TypeOf((*HostNatServicePortForwardSpec)(nil)).Elem() +} + +type HostNatServiceSpec struct { + DynamicData + + VirtualSwitch string `xml:"virtualSwitch"` + ActiveFtp bool `xml:"activeFtp"` + AllowAnyOui bool `xml:"allowAnyOui"` + ConfigPort bool `xml:"configPort"` + IpGatewayAddress string `xml:"ipGatewayAddress"` + UdpTimeout int `xml:"udpTimeout"` + PortForward []HostNatServicePortForwardSpec `xml:"portForward,omitempty"` + NameService *HostNatServiceNameServiceSpec `xml:"nameService,omitempty"` +} + +func init() { + t["HostNatServiceSpec"] = reflect.TypeOf((*HostNatServiceSpec)(nil)).Elem() +} + +type HostNetCapabilities struct { + DynamicData + + CanSetPhysicalNicLinkSpeed bool `xml:"canSetPhysicalNicLinkSpeed"` + SupportsNicTeaming bool `xml:"supportsNicTeaming"` + NicTeamingPolicy []string `xml:"nicTeamingPolicy,omitempty"` + SupportsVlan bool `xml:"supportsVlan"` + UsesServiceConsoleNic bool `xml:"usesServiceConsoleNic"` + SupportsNetworkHints bool `xml:"supportsNetworkHints"` + MaxPortGroupsPerVswitch int `xml:"maxPortGroupsPerVswitch,omitempty"` + VswitchConfigSupported bool `xml:"vswitchConfigSupported"` + VnicConfigSupported bool `xml:"vnicConfigSupported"` + IpRouteConfigSupported bool `xml:"ipRouteConfigSupported"` + DnsConfigSupported bool `xml:"dnsConfigSupported"` + DhcpOnVnicSupported bool `xml:"dhcpOnVnicSupported"` + IpV6Supported *bool `xml:"ipV6Supported"` +} + +func init() { + t["HostNetCapabilities"] = reflect.TypeOf((*HostNetCapabilities)(nil)).Elem() +} + +type HostNetOffloadCapabilities struct { + DynamicData + + CsumOffload *bool `xml:"csumOffload"` + TcpSegmentation *bool `xml:"tcpSegmentation"` + ZeroCopyXmit *bool `xml:"zeroCopyXmit"` +} + +func init() { + t["HostNetOffloadCapabilities"] = reflect.TypeOf((*HostNetOffloadCapabilities)(nil)).Elem() +} + +type HostNetStackInstance struct { + DynamicData + + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + RequestedMaxNumberOfConnections int `xml:"requestedMaxNumberOfConnections,omitempty"` + CongestionControlAlgorithm string `xml:"congestionControlAlgorithm,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + RouteTableConfig *HostIpRouteTableConfig `xml:"routeTableConfig,omitempty"` +} + +func init() { + t["HostNetStackInstance"] = reflect.TypeOf((*HostNetStackInstance)(nil)).Elem() +} + +type HostNetworkConfig struct { + DynamicData + + Vswitch []HostVirtualSwitchConfig `xml:"vswitch,omitempty"` + ProxySwitch []HostProxySwitchConfig `xml:"proxySwitch,omitempty"` + Portgroup []HostPortGroupConfig `xml:"portgroup,omitempty"` + Pnic []PhysicalNicConfig `xml:"pnic,omitempty"` + Vnic []HostVirtualNicConfig `xml:"vnic,omitempty"` + ConsoleVnic []HostVirtualNicConfig `xml:"consoleVnic,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` + RouteTableConfig *HostIpRouteTableConfig `xml:"routeTableConfig,omitempty"` + Dhcp []HostDhcpServiceConfig `xml:"dhcp,omitempty"` + Nat []HostNatServiceConfig `xml:"nat,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + NetStackSpec []HostNetworkConfigNetStackSpec `xml:"netStackSpec,omitempty"` +} + +func init() { + t["HostNetworkConfig"] = reflect.TypeOf((*HostNetworkConfig)(nil)).Elem() +} + +type HostNetworkConfigNetStackSpec struct { + DynamicData + + NetStackInstance HostNetStackInstance `xml:"netStackInstance"` + Operation string `xml:"operation,omitempty"` +} + +func init() { + t["HostNetworkConfigNetStackSpec"] = reflect.TypeOf((*HostNetworkConfigNetStackSpec)(nil)).Elem() +} + +type HostNetworkConfigResult struct { + DynamicData + + VnicDevice []string `xml:"vnicDevice,omitempty"` + ConsoleVnicDevice []string `xml:"consoleVnicDevice,omitempty"` +} + +func init() { + t["HostNetworkConfigResult"] = reflect.TypeOf((*HostNetworkConfigResult)(nil)).Elem() +} + +type HostNetworkInfo struct { + DynamicData + + Vswitch []HostVirtualSwitch `xml:"vswitch,omitempty"` + ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty"` + Portgroup []HostPortGroup `xml:"portgroup,omitempty"` + Pnic []PhysicalNic `xml:"pnic,omitempty"` + Vnic []HostVirtualNic `xml:"vnic,omitempty"` + ConsoleVnic []HostVirtualNic `xml:"consoleVnic,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` + RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty"` + Dhcp []HostDhcpService `xml:"dhcp,omitempty"` + Nat []HostNatService `xml:"nat,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled"` + NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty"` + OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty"` + OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty"` +} + +func init() { + t["HostNetworkInfo"] = reflect.TypeOf((*HostNetworkInfo)(nil)).Elem() +} + +type HostNetworkPolicy struct { + DynamicData + + Security *HostNetworkSecurityPolicy `xml:"security,omitempty"` + NicTeaming *HostNicTeamingPolicy `xml:"nicTeaming,omitempty"` + OffloadPolicy *HostNetOffloadCapabilities `xml:"offloadPolicy,omitempty"` + ShapingPolicy *HostNetworkTrafficShapingPolicy `xml:"shapingPolicy,omitempty"` +} + +func init() { + t["HostNetworkPolicy"] = reflect.TypeOf((*HostNetworkPolicy)(nil)).Elem() +} + +type HostNetworkResourceRuntime struct { + DynamicData + + PnicResourceInfo []HostPnicNetworkResourceInfo `xml:"pnicResourceInfo"` +} + +func init() { + t["HostNetworkResourceRuntime"] = reflect.TypeOf((*HostNetworkResourceRuntime)(nil)).Elem() +} + +type HostNetworkSecurityPolicy struct { + DynamicData + + AllowPromiscuous *bool `xml:"allowPromiscuous"` + MacChanges *bool `xml:"macChanges"` + ForgedTransmits *bool `xml:"forgedTransmits"` +} + +func init() { + t["HostNetworkSecurityPolicy"] = reflect.TypeOf((*HostNetworkSecurityPolicy)(nil)).Elem() +} + +type HostNetworkTrafficShapingPolicy struct { + DynamicData + + Enabled *bool `xml:"enabled"` + AverageBandwidth int64 `xml:"averageBandwidth,omitempty"` + PeakBandwidth int64 `xml:"peakBandwidth,omitempty"` + BurstSize int64 `xml:"burstSize,omitempty"` +} + +func init() { + t["HostNetworkTrafficShapingPolicy"] = reflect.TypeOf((*HostNetworkTrafficShapingPolicy)(nil)).Elem() +} + +type HostNewNetworkConnectInfo struct { + HostConnectInfoNetworkInfo +} + +func init() { + t["HostNewNetworkConnectInfo"] = reflect.TypeOf((*HostNewNetworkConnectInfo)(nil)).Elem() +} + +type HostNicFailureCriteria struct { + DynamicData + + CheckSpeed string `xml:"checkSpeed,omitempty"` + Speed int `xml:"speed,omitempty"` + CheckDuplex *bool `xml:"checkDuplex"` + FullDuplex *bool `xml:"fullDuplex"` + CheckErrorPercent *bool `xml:"checkErrorPercent"` + Percentage int `xml:"percentage,omitempty"` + CheckBeacon *bool `xml:"checkBeacon"` +} + +func init() { + t["HostNicFailureCriteria"] = reflect.TypeOf((*HostNicFailureCriteria)(nil)).Elem() +} + +type HostNicOrderPolicy struct { + DynamicData + + ActiveNic []string `xml:"activeNic,omitempty"` + StandbyNic []string `xml:"standbyNic,omitempty"` +} + +func init() { + t["HostNicOrderPolicy"] = reflect.TypeOf((*HostNicOrderPolicy)(nil)).Elem() +} + +type HostNicTeamingPolicy struct { + DynamicData + + Policy string `xml:"policy,omitempty"` + ReversePolicy *bool `xml:"reversePolicy"` + NotifySwitches *bool `xml:"notifySwitches"` + RollingOrder *bool `xml:"rollingOrder"` + FailureCriteria *HostNicFailureCriteria `xml:"failureCriteria,omitempty"` + NicOrder *HostNicOrderPolicy `xml:"nicOrder,omitempty"` +} + +func init() { + t["HostNicTeamingPolicy"] = reflect.TypeOf((*HostNicTeamingPolicy)(nil)).Elem() +} + +type HostNoAvailableNetworksEvent struct { + HostDasEvent + + Ips string `xml:"ips,omitempty"` +} + +func init() { + t["HostNoAvailableNetworksEvent"] = reflect.TypeOf((*HostNoAvailableNetworksEvent)(nil)).Elem() +} + +type HostNoHAEnabledPortGroupsEvent struct { + HostDasEvent +} + +func init() { + t["HostNoHAEnabledPortGroupsEvent"] = reflect.TypeOf((*HostNoHAEnabledPortGroupsEvent)(nil)).Elem() +} + +type HostNoRedundantManagementNetworkEvent struct { + HostDasEvent +} + +func init() { + t["HostNoRedundantManagementNetworkEvent"] = reflect.TypeOf((*HostNoRedundantManagementNetworkEvent)(nil)).Elem() +} + +type HostNonCompliantEvent struct { + HostEvent +} + +func init() { + t["HostNonCompliantEvent"] = reflect.TypeOf((*HostNonCompliantEvent)(nil)).Elem() +} + +type HostNotConnected struct { + HostCommunication +} + +func init() { + t["HostNotConnected"] = reflect.TypeOf((*HostNotConnected)(nil)).Elem() +} + +type HostNotConnectedFault HostNotConnected + +func init() { + t["HostNotConnectedFault"] = reflect.TypeOf((*HostNotConnectedFault)(nil)).Elem() +} + +type HostNotInClusterEvent struct { + HostDasEvent +} + +func init() { + t["HostNotInClusterEvent"] = reflect.TypeOf((*HostNotInClusterEvent)(nil)).Elem() +} + +type HostNotReachable struct { + HostCommunication +} + +func init() { + t["HostNotReachable"] = reflect.TypeOf((*HostNotReachable)(nil)).Elem() +} + +type HostNotReachableFault HostNotReachable + +func init() { + t["HostNotReachableFault"] = reflect.TypeOf((*HostNotReachableFault)(nil)).Elem() +} + +type HostNtpConfig struct { + DynamicData + + Server []string `xml:"server,omitempty"` + ConfigFile []string `xml:"configFile,omitempty"` +} + +func init() { + t["HostNtpConfig"] = reflect.TypeOf((*HostNtpConfig)(nil)).Elem() +} + +type HostNumaInfo struct { + DynamicData + + Type string `xml:"type"` + NumNodes int `xml:"numNodes"` + NumaNode []HostNumaNode `xml:"numaNode,omitempty"` +} + +func init() { + t["HostNumaInfo"] = reflect.TypeOf((*HostNumaInfo)(nil)).Elem() +} + +type HostNumaNode struct { + DynamicData + + TypeId byte `xml:"typeId"` + CpuID []int16 `xml:"cpuID"` + MemoryRangeBegin int64 `xml:"memoryRangeBegin"` + MemoryRangeLength int64 `xml:"memoryRangeLength"` +} + +func init() { + t["HostNumaNode"] = reflect.TypeOf((*HostNumaNode)(nil)).Elem() +} + +type HostNumericSensorInfo struct { + DynamicData + + Name string `xml:"name"` + HealthState BaseElementDescription `xml:"healthState,omitempty,typeattr"` + CurrentReading int64 `xml:"currentReading"` + UnitModifier int `xml:"unitModifier"` + BaseUnits string `xml:"baseUnits"` + RateUnits string `xml:"rateUnits,omitempty"` + SensorType string `xml:"sensorType"` +} + +func init() { + t["HostNumericSensorInfo"] = reflect.TypeOf((*HostNumericSensorInfo)(nil)).Elem() +} + +type HostOpaqueNetworkInfo struct { + DynamicData + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkName string `xml:"opaqueNetworkName"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` + PnicZone []string `xml:"pnicZone,omitempty"` +} + +func init() { + t["HostOpaqueNetworkInfo"] = reflect.TypeOf((*HostOpaqueNetworkInfo)(nil)).Elem() +} + +type HostOpaqueSwitch struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + PnicZone []HostOpaqueSwitchPhysicalNicZone `xml:"pnicZone,omitempty"` + Status string `xml:"status,omitempty"` + Vtep []HostVirtualNic `xml:"vtep,omitempty"` +} + +func init() { + t["HostOpaqueSwitch"] = reflect.TypeOf((*HostOpaqueSwitch)(nil)).Elem() +} + +type HostOpaqueSwitchPhysicalNicZone struct { + DynamicData + + Key string `xml:"key"` + PnicDevice []string `xml:"pnicDevice,omitempty"` +} + +func init() { + t["HostOpaqueSwitchPhysicalNicZone"] = reflect.TypeOf((*HostOpaqueSwitchPhysicalNicZone)(nil)).Elem() +} + +type HostOvercommittedEvent struct { + ClusterOvercommittedEvent +} + +func init() { + t["HostOvercommittedEvent"] = reflect.TypeOf((*HostOvercommittedEvent)(nil)).Elem() +} + +type HostParallelScsiHba struct { + HostHostBusAdapter +} + +func init() { + t["HostParallelScsiHba"] = reflect.TypeOf((*HostParallelScsiHba)(nil)).Elem() +} + +type HostParallelScsiTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostParallelScsiTargetTransport"] = reflect.TypeOf((*HostParallelScsiTargetTransport)(nil)).Elem() +} + +type HostPatchManagerLocator struct { + DynamicData + + Url string `xml:"url"` + Proxy string `xml:"proxy,omitempty"` +} + +func init() { + t["HostPatchManagerLocator"] = reflect.TypeOf((*HostPatchManagerLocator)(nil)).Elem() +} + +type HostPatchManagerPatchManagerOperationSpec struct { + DynamicData + + Proxy string `xml:"proxy,omitempty"` + Port int `xml:"port,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + CmdOption string `xml:"cmdOption,omitempty"` +} + +func init() { + t["HostPatchManagerPatchManagerOperationSpec"] = reflect.TypeOf((*HostPatchManagerPatchManagerOperationSpec)(nil)).Elem() +} + +type HostPatchManagerResult struct { + DynamicData + + Version string `xml:"version"` + Status []HostPatchManagerStatus `xml:"status,omitempty"` + XmlResult string `xml:"xmlResult,omitempty"` +} + +func init() { + t["HostPatchManagerResult"] = reflect.TypeOf((*HostPatchManagerResult)(nil)).Elem() +} + +type HostPatchManagerStatus struct { + DynamicData + + Id string `xml:"id"` + Applicable bool `xml:"applicable"` + Reason []string `xml:"reason,omitempty"` + Integrity string `xml:"integrity,omitempty"` + Installed bool `xml:"installed"` + InstallState []string `xml:"installState,omitempty"` + PrerequisitePatch []HostPatchManagerStatusPrerequisitePatch `xml:"prerequisitePatch,omitempty"` + RestartRequired bool `xml:"restartRequired"` + ReconnectRequired bool `xml:"reconnectRequired"` + VmOffRequired bool `xml:"vmOffRequired"` + SupersededPatchIds []string `xml:"supersededPatchIds,omitempty"` +} + +func init() { + t["HostPatchManagerStatus"] = reflect.TypeOf((*HostPatchManagerStatus)(nil)).Elem() +} + +type HostPatchManagerStatusPrerequisitePatch struct { + DynamicData + + Id string `xml:"id"` + InstallState []string `xml:"installState,omitempty"` +} + +func init() { + t["HostPatchManagerStatusPrerequisitePatch"] = reflect.TypeOf((*HostPatchManagerStatusPrerequisitePatch)(nil)).Elem() +} + +type HostPathSelectionPolicyOption struct { + DynamicData + + Policy BaseElementDescription `xml:"policy,typeattr"` +} + +func init() { + t["HostPathSelectionPolicyOption"] = reflect.TypeOf((*HostPathSelectionPolicyOption)(nil)).Elem() +} + +type HostPciDevice struct { + DynamicData + + Id string `xml:"id"` + ClassId int16 `xml:"classId"` + Bus byte `xml:"bus"` + Slot byte `xml:"slot"` + Function byte `xml:"function"` + VendorId int16 `xml:"vendorId"` + SubVendorId int16 `xml:"subVendorId"` + VendorName string `xml:"vendorName"` + DeviceId int16 `xml:"deviceId"` + SubDeviceId int16 `xml:"subDeviceId"` + ParentBridge string `xml:"parentBridge,omitempty"` + DeviceName string `xml:"deviceName"` +} + +func init() { + t["HostPciDevice"] = reflect.TypeOf((*HostPciDevice)(nil)).Elem() +} + +type HostPciPassthruConfig struct { + DynamicData + + Id string `xml:"id"` + PassthruEnabled bool `xml:"passthruEnabled"` +} + +func init() { + t["HostPciPassthruConfig"] = reflect.TypeOf((*HostPciPassthruConfig)(nil)).Elem() +} + +type HostPciPassthruInfo struct { + DynamicData + + Id string `xml:"id"` + DependentDevice string `xml:"dependentDevice"` + PassthruEnabled bool `xml:"passthruEnabled"` + PassthruCapable bool `xml:"passthruCapable"` + PassthruActive bool `xml:"passthruActive"` +} + +func init() { + t["HostPciPassthruInfo"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem() +} + +type HostPlacedVirtualNicIdentifier struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + VnicKey string `xml:"vnicKey"` + Reservation int `xml:"reservation,omitempty"` +} + +func init() { + t["HostPlacedVirtualNicIdentifier"] = reflect.TypeOf((*HostPlacedVirtualNicIdentifier)(nil)).Elem() +} + +type HostPlugStoreTopology struct { + DynamicData + + Adapter []HostPlugStoreTopologyAdapter `xml:"adapter,omitempty"` + Path []HostPlugStoreTopologyPath `xml:"path,omitempty"` + Target []HostPlugStoreTopologyTarget `xml:"target,omitempty"` + Device []HostPlugStoreTopologyDevice `xml:"device,omitempty"` + Plugin []HostPlugStoreTopologyPlugin `xml:"plugin,omitempty"` +} + +func init() { + t["HostPlugStoreTopology"] = reflect.TypeOf((*HostPlugStoreTopology)(nil)).Elem() +} + +type HostPlugStoreTopologyAdapter struct { + DynamicData + + Key string `xml:"key"` + Adapter string `xml:"adapter"` + Path []string `xml:"path,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyAdapter"] = reflect.TypeOf((*HostPlugStoreTopologyAdapter)(nil)).Elem() +} + +type HostPlugStoreTopologyDevice struct { + DynamicData + + Key string `xml:"key"` + Lun string `xml:"lun"` + Path []string `xml:"path,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyDevice"] = reflect.TypeOf((*HostPlugStoreTopologyDevice)(nil)).Elem() +} + +type HostPlugStoreTopologyPath struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + ChannelNumber int `xml:"channelNumber,omitempty"` + TargetNumber int `xml:"targetNumber,omitempty"` + LunNumber int `xml:"lunNumber,omitempty"` + Adapter string `xml:"adapter,omitempty"` + Target string `xml:"target,omitempty"` + Device string `xml:"device,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyPath"] = reflect.TypeOf((*HostPlugStoreTopologyPath)(nil)).Elem() +} + +type HostPlugStoreTopologyPlugin struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + Device []string `xml:"device,omitempty"` + ClaimedPath []string `xml:"claimedPath,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyPlugin"] = reflect.TypeOf((*HostPlugStoreTopologyPlugin)(nil)).Elem() +} + +type HostPlugStoreTopologyTarget struct { + DynamicData + + Key string `xml:"key"` + Transport BaseHostTargetTransport `xml:"transport,omitempty,typeattr"` +} + +func init() { + t["HostPlugStoreTopologyTarget"] = reflect.TypeOf((*HostPlugStoreTopologyTarget)(nil)).Elem() +} + +type HostPnicNetworkResourceInfo struct { + DynamicData + + PnicDevice string `xml:"pnicDevice"` + AvailableBandwidthForVMTraffic int64 `xml:"availableBandwidthForVMTraffic,omitempty"` + UnusedBandwidthForVMTraffic int64 `xml:"unusedBandwidthForVMTraffic,omitempty"` + PlacedVirtualNics []HostPlacedVirtualNicIdentifier `xml:"placedVirtualNics,omitempty"` +} + +func init() { + t["HostPnicNetworkResourceInfo"] = reflect.TypeOf((*HostPnicNetworkResourceInfo)(nil)).Elem() +} + +type HostPortGroup struct { + DynamicData + + Key string `xml:"key,omitempty"` + Port []HostPortGroupPort `xml:"port,omitempty"` + Vswitch string `xml:"vswitch,omitempty"` + ComputedPolicy HostNetworkPolicy `xml:"computedPolicy"` + Spec HostPortGroupSpec `xml:"spec"` +} + +func init() { + t["HostPortGroup"] = reflect.TypeOf((*HostPortGroup)(nil)).Elem() +} + +type HostPortGroupConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Spec *HostPortGroupSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostPortGroupConfig"] = reflect.TypeOf((*HostPortGroupConfig)(nil)).Elem() +} + +type HostPortGroupPort struct { + DynamicData + + Key string `xml:"key,omitempty"` + Mac []string `xml:"mac,omitempty"` + Type string `xml:"type"` +} + +func init() { + t["HostPortGroupPort"] = reflect.TypeOf((*HostPortGroupPort)(nil)).Elem() +} + +type HostPortGroupProfile struct { + PortGroupProfile + + IpConfig IpAddressProfile `xml:"ipConfig"` +} + +func init() { + t["HostPortGroupProfile"] = reflect.TypeOf((*HostPortGroupProfile)(nil)).Elem() +} + +type HostPortGroupSpec struct { + DynamicData + + Name string `xml:"name"` + VlanId int `xml:"vlanId"` + VswitchName string `xml:"vswitchName"` + Policy HostNetworkPolicy `xml:"policy"` +} + +func init() { + t["HostPortGroupSpec"] = reflect.TypeOf((*HostPortGroupSpec)(nil)).Elem() +} + +type HostPosixAccountSpec struct { + HostAccountSpec + + PosixId int `xml:"posixId,omitempty"` + ShellAccess *bool `xml:"shellAccess"` +} + +func init() { + t["HostPosixAccountSpec"] = reflect.TypeOf((*HostPosixAccountSpec)(nil)).Elem() +} + +type HostPowerOpFailed struct { + VimFault +} + +func init() { + t["HostPowerOpFailed"] = reflect.TypeOf((*HostPowerOpFailed)(nil)).Elem() +} + +type HostPowerOpFailedFault BaseHostPowerOpFailed + +func init() { + t["HostPowerOpFailedFault"] = reflect.TypeOf((*HostPowerOpFailedFault)(nil)).Elem() +} + +type HostPowerPolicy struct { + DynamicData + + Key int `xml:"key"` + Name string `xml:"name"` + ShortName string `xml:"shortName"` + Description string `xml:"description"` +} + +func init() { + t["HostPowerPolicy"] = reflect.TypeOf((*HostPowerPolicy)(nil)).Elem() +} + +type HostPrimaryAgentNotShortNameEvent struct { + HostDasEvent + + PrimaryAgent string `xml:"primaryAgent"` +} + +func init() { + t["HostPrimaryAgentNotShortNameEvent"] = reflect.TypeOf((*HostPrimaryAgentNotShortNameEvent)(nil)).Elem() +} + +type HostProfileAppliedEvent struct { + HostEvent + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["HostProfileAppliedEvent"] = reflect.TypeOf((*HostProfileAppliedEvent)(nil)).Elem() +} + +type HostProfileCompleteConfigSpec struct { + HostProfileConfigSpec + + ApplyProfile *HostApplyProfile `xml:"applyProfile,omitempty"` + CustomComplyProfile *ComplianceProfile `xml:"customComplyProfile,omitempty"` + DisabledExpressionListChanged bool `xml:"disabledExpressionListChanged"` + DisabledExpressionList []string `xml:"disabledExpressionList,omitempty"` + ValidatorHost *ManagedObjectReference `xml:"validatorHost,omitempty"` + Validating *bool `xml:"validating"` +} + +func init() { + t["HostProfileCompleteConfigSpec"] = reflect.TypeOf((*HostProfileCompleteConfigSpec)(nil)).Elem() +} + +type HostProfileConfigInfo struct { + ProfileConfigInfo + + ApplyProfile *HostApplyProfile `xml:"applyProfile,omitempty"` + DefaultComplyProfile *ComplianceProfile `xml:"defaultComplyProfile,omitempty"` + DefaultComplyLocator []ComplianceLocator `xml:"defaultComplyLocator,omitempty"` + CustomComplyProfile *ComplianceProfile `xml:"customComplyProfile,omitempty"` + DisabledExpressionList []string `xml:"disabledExpressionList,omitempty"` +} + +func init() { + t["HostProfileConfigInfo"] = reflect.TypeOf((*HostProfileConfigInfo)(nil)).Elem() +} + +type HostProfileConfigSpec struct { + ProfileCreateSpec +} + +func init() { + t["HostProfileConfigSpec"] = reflect.TypeOf((*HostProfileConfigSpec)(nil)).Elem() +} + +type HostProfileHostBasedConfigSpec struct { + HostProfileConfigSpec + + Host ManagedObjectReference `xml:"host"` + UseHostProfileEngine *bool `xml:"useHostProfileEngine"` +} + +func init() { + t["HostProfileHostBasedConfigSpec"] = reflect.TypeOf((*HostProfileHostBasedConfigSpec)(nil)).Elem() +} + +type HostProfileManagerConfigTaskList struct { + DynamicData + + ConfigSpec *HostConfigSpec `xml:"configSpec,omitempty"` + TaskDescription []LocalizableMessage `xml:"taskDescription,omitempty"` + TaskListRequirement []string `xml:"taskListRequirement,omitempty"` +} + +func init() { + t["HostProfileManagerConfigTaskList"] = reflect.TypeOf((*HostProfileManagerConfigTaskList)(nil)).Elem() +} + +type HostProfileSerializedHostProfileSpec struct { + ProfileSerializedCreateSpec + + ValidatorHost *ManagedObjectReference `xml:"validatorHost,omitempty"` + Validating *bool `xml:"validating"` +} + +func init() { + t["HostProfileSerializedHostProfileSpec"] = reflect.TypeOf((*HostProfileSerializedHostProfileSpec)(nil)).Elem() +} + +type HostProtocolEndpoint struct { + DynamicData + + PeType string `xml:"peType"` + Uuid string `xml:"uuid"` + HostKey []ManagedObjectReference `xml:"hostKey,omitempty"` + StorageArray string `xml:"storageArray,omitempty"` + NfsServer string `xml:"nfsServer,omitempty"` + NfsDir string `xml:"nfsDir,omitempty"` + DeviceId string `xml:"deviceId,omitempty"` +} + +func init() { + t["HostProtocolEndpoint"] = reflect.TypeOf((*HostProtocolEndpoint)(nil)).Elem() +} + +type HostProxySwitch struct { + DynamicData + + DvsUuid string `xml:"dvsUuid"` + DvsName string `xml:"dvsName"` + Key string `xml:"key"` + NumPorts int `xml:"numPorts"` + ConfigNumPorts int `xml:"configNumPorts,omitempty"` + NumPortsAvailable int `xml:"numPortsAvailable"` + UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` + Mtu int `xml:"mtu,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + Spec HostProxySwitchSpec `xml:"spec"` + HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["HostProxySwitch"] = reflect.TypeOf((*HostProxySwitch)(nil)).Elem() +} + +type HostProxySwitchConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Uuid string `xml:"uuid"` + Spec *HostProxySwitchSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostProxySwitchConfig"] = reflect.TypeOf((*HostProxySwitchConfig)(nil)).Elem() +} + +type HostProxySwitchHostLagConfig struct { + DynamicData + + LagKey string `xml:"lagKey"` + LagName string `xml:"lagName,omitempty"` + UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` +} + +func init() { + t["HostProxySwitchHostLagConfig"] = reflect.TypeOf((*HostProxySwitchHostLagConfig)(nil)).Elem() +} + +type HostProxySwitchSpec struct { + DynamicData + + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,omitempty,typeattr"` +} + +func init() { + t["HostProxySwitchSpec"] = reflect.TypeOf((*HostProxySwitchSpec)(nil)).Elem() +} + +type HostReconnectionFailedEvent struct { + HostEvent +} + +func init() { + t["HostReconnectionFailedEvent"] = reflect.TypeOf((*HostReconnectionFailedEvent)(nil)).Elem() +} + +type HostReliableMemoryInfo struct { + DynamicData + + MemorySize int64 `xml:"memorySize"` +} + +func init() { + t["HostReliableMemoryInfo"] = reflect.TypeOf((*HostReliableMemoryInfo)(nil)).Elem() +} + +type HostRemoveVFlashResource HostRemoveVFlashResourceRequestType + +func init() { + t["HostRemoveVFlashResource"] = reflect.TypeOf((*HostRemoveVFlashResource)(nil)).Elem() +} + +type HostRemoveVFlashResourceRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HostRemoveVFlashResourceRequestType"] = reflect.TypeOf((*HostRemoveVFlashResourceRequestType)(nil)).Elem() +} + +type HostRemoveVFlashResourceResponse struct { +} + +type HostRemovedEvent struct { + HostEvent +} + +func init() { + t["HostRemovedEvent"] = reflect.TypeOf((*HostRemovedEvent)(nil)).Elem() +} + +type HostResignatureRescanResult struct { + DynamicData + + Rescan []HostVmfsRescanResult `xml:"rescan,omitempty"` + Result ManagedObjectReference `xml:"result"` +} + +func init() { + t["HostResignatureRescanResult"] = reflect.TypeOf((*HostResignatureRescanResult)(nil)).Elem() +} + +type HostRuntimeInfo struct { + DynamicData + + ConnectionState HostSystemConnectionState `xml:"connectionState"` + PowerState HostSystemPowerState `xml:"powerState"` + StandbyMode string `xml:"standbyMode,omitempty"` + InMaintenanceMode bool `xml:"inMaintenanceMode"` + BootTime *time.Time `xml:"bootTime"` + HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty"` + DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty"` + TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty"` + VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty"` + NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty"` + VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty"` + HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty"` +} + +func init() { + t["HostRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfo)(nil)).Elem() +} + +type HostRuntimeInfoNetStackInstanceRuntimeInfo struct { + DynamicData + + NetStackInstanceKey string `xml:"netStackInstanceKey"` + State string `xml:"state,omitempty"` + VmknicKeys []string `xml:"vmknicKeys,omitempty"` + MaxNumberOfConnections int `xml:"maxNumberOfConnections,omitempty"` + CurrentIpV6Enabled *bool `xml:"currentIpV6Enabled"` +} + +func init() { + t["HostRuntimeInfoNetStackInstanceRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfo)(nil)).Elem() +} + +type HostRuntimeInfoNetworkRuntimeInfo struct { + DynamicData + + NetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:"netStackInstanceRuntimeInfo,omitempty"` + NetworkResourceRuntime *HostNetworkResourceRuntime `xml:"networkResourceRuntime,omitempty"` +} + +func init() { + t["HostRuntimeInfoNetworkRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetworkRuntimeInfo)(nil)).Elem() +} + +type HostScsiDisk struct { + ScsiLun + + Capacity HostDiskDimensionsLba `xml:"capacity"` + DevicePath string `xml:"devicePath"` + Ssd *bool `xml:"ssd"` + LocalDisk *bool `xml:"localDisk"` + PhysicalLocation []string `xml:"physicalLocation,omitempty"` + EmulatedDIXDIFEnabled *bool `xml:"emulatedDIXDIFEnabled"` + VsanDiskInfo *VsanHostVsanDiskInfo `xml:"vsanDiskInfo,omitempty"` +} + +func init() { + t["HostScsiDisk"] = reflect.TypeOf((*HostScsiDisk)(nil)).Elem() +} + +type HostScsiDiskPartition struct { + DynamicData + + DiskName string `xml:"diskName"` + Partition int `xml:"partition"` +} + +func init() { + t["HostScsiDiskPartition"] = reflect.TypeOf((*HostScsiDiskPartition)(nil)).Elem() +} + +type HostScsiTopology struct { + DynamicData + + Adapter []HostScsiTopologyInterface `xml:"adapter,omitempty"` +} + +func init() { + t["HostScsiTopology"] = reflect.TypeOf((*HostScsiTopology)(nil)).Elem() +} + +type HostScsiTopologyInterface struct { + DynamicData + + Key string `xml:"key"` + Adapter string `xml:"adapter"` + Target []HostScsiTopologyTarget `xml:"target,omitempty"` +} + +func init() { + t["HostScsiTopologyInterface"] = reflect.TypeOf((*HostScsiTopologyInterface)(nil)).Elem() +} + +type HostScsiTopologyLun struct { + DynamicData + + Key string `xml:"key"` + Lun int `xml:"lun"` + ScsiLun string `xml:"scsiLun"` +} + +func init() { + t["HostScsiTopologyLun"] = reflect.TypeOf((*HostScsiTopologyLun)(nil)).Elem() +} + +type HostScsiTopologyTarget struct { + DynamicData + + Key string `xml:"key"` + Target int `xml:"target"` + Lun []HostScsiTopologyLun `xml:"lun,omitempty"` + Transport BaseHostTargetTransport `xml:"transport,omitempty,typeattr"` +} + +func init() { + t["HostScsiTopologyTarget"] = reflect.TypeOf((*HostScsiTopologyTarget)(nil)).Elem() +} + +type HostSecuritySpec struct { + DynamicData + + AdminPassword string `xml:"adminPassword,omitempty"` + RemovePermission []Permission `xml:"removePermission,omitempty"` + AddPermission []Permission `xml:"addPermission,omitempty"` +} + +func init() { + t["HostSecuritySpec"] = reflect.TypeOf((*HostSecuritySpec)(nil)).Elem() +} + +type HostService struct { + DynamicData + + Key string `xml:"key"` + Label string `xml:"label"` + Required bool `xml:"required"` + Uninstallable bool `xml:"uninstallable"` + Running bool `xml:"running"` + Ruleset []string `xml:"ruleset,omitempty"` + Policy string `xml:"policy"` + SourcePackage *HostServiceSourcePackage `xml:"sourcePackage,omitempty"` +} + +func init() { + t["HostService"] = reflect.TypeOf((*HostService)(nil)).Elem() +} + +type HostServiceConfig struct { + DynamicData + + ServiceId string `xml:"serviceId"` + StartupPolicy string `xml:"startupPolicy"` +} + +func init() { + t["HostServiceConfig"] = reflect.TypeOf((*HostServiceConfig)(nil)).Elem() +} + +type HostServiceInfo struct { + DynamicData + + Service []HostService `xml:"service,omitempty"` +} + +func init() { + t["HostServiceInfo"] = reflect.TypeOf((*HostServiceInfo)(nil)).Elem() +} + +type HostServiceSourcePackage struct { + DynamicData + + SourcePackageName string `xml:"sourcePackageName"` + Description string `xml:"description"` +} + +func init() { + t["HostServiceSourcePackage"] = reflect.TypeOf((*HostServiceSourcePackage)(nil)).Elem() +} + +type HostServiceTicket struct { + DynamicData + + Host string `xml:"host,omitempty"` + Port int `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + Service string `xml:"service"` + ServiceVersion string `xml:"serviceVersion"` + SessionId string `xml:"sessionId"` +} + +func init() { + t["HostServiceTicket"] = reflect.TypeOf((*HostServiceTicket)(nil)).Elem() +} + +type HostShortNameInconsistentEvent struct { + HostDasEvent + + ShortName string `xml:"shortName"` + ShortName2 string `xml:"shortName2"` +} + +func init() { + t["HostShortNameInconsistentEvent"] = reflect.TypeOf((*HostShortNameInconsistentEvent)(nil)).Elem() +} + +type HostShortNameToIpFailedEvent struct { + HostEvent + + ShortName string `xml:"shortName"` +} + +func init() { + t["HostShortNameToIpFailedEvent"] = reflect.TypeOf((*HostShortNameToIpFailedEvent)(nil)).Elem() +} + +type HostShutdownEvent struct { + HostEvent + + Reason string `xml:"reason"` +} + +func init() { + t["HostShutdownEvent"] = reflect.TypeOf((*HostShutdownEvent)(nil)).Elem() +} + +type HostSnmpConfigSpec struct { + DynamicData + + Enabled *bool `xml:"enabled"` + Port int `xml:"port,omitempty"` + ReadOnlyCommunities []string `xml:"readOnlyCommunities,omitempty"` + TrapTargets []HostSnmpDestination `xml:"trapTargets,omitempty"` + Option []KeyValue `xml:"option,omitempty"` +} + +func init() { + t["HostSnmpConfigSpec"] = reflect.TypeOf((*HostSnmpConfigSpec)(nil)).Elem() +} + +type HostSnmpDestination struct { + DynamicData + + HostName string `xml:"hostName"` + Port int `xml:"port"` + Community string `xml:"community"` +} + +func init() { + t["HostSnmpDestination"] = reflect.TypeOf((*HostSnmpDestination)(nil)).Elem() +} + +type HostSnmpSystemAgentLimits struct { + DynamicData + + MaxReadOnlyCommunities int `xml:"maxReadOnlyCommunities"` + MaxTrapDestinations int `xml:"maxTrapDestinations"` + MaxCommunityLength int `xml:"maxCommunityLength"` + MaxBufferSize int `xml:"maxBufferSize"` + Capability HostSnmpAgentCapability `xml:"capability,omitempty"` +} + +func init() { + t["HostSnmpSystemAgentLimits"] = reflect.TypeOf((*HostSnmpSystemAgentLimits)(nil)).Elem() +} + +type HostSriovConfig struct { + HostPciPassthruConfig + + SriovEnabled bool `xml:"sriovEnabled"` + NumVirtualFunction int `xml:"numVirtualFunction"` +} + +func init() { + t["HostSriovConfig"] = reflect.TypeOf((*HostSriovConfig)(nil)).Elem() +} + +type HostSriovInfo struct { + HostPciPassthruInfo + + SriovEnabled bool `xml:"sriovEnabled"` + SriovCapable bool `xml:"sriovCapable"` + SriovActive bool `xml:"sriovActive"` + NumVirtualFunctionRequested int `xml:"numVirtualFunctionRequested"` + NumVirtualFunction int `xml:"numVirtualFunction"` + MaxVirtualFunctionSupported int `xml:"maxVirtualFunctionSupported"` +} + +func init() { + t["HostSriovInfo"] = reflect.TypeOf((*HostSriovInfo)(nil)).Elem() +} + +type HostSslThumbprintInfo struct { + DynamicData + + Principal string `xml:"principal"` + OwnerTag string `xml:"ownerTag,omitempty"` + SslThumbprints []string `xml:"sslThumbprints,omitempty"` +} + +func init() { + t["HostSslThumbprintInfo"] = reflect.TypeOf((*HostSslThumbprintInfo)(nil)).Elem() +} + +type HostStatusChangedEvent struct { + ClusterStatusChangedEvent +} + +func init() { + t["HostStatusChangedEvent"] = reflect.TypeOf((*HostStatusChangedEvent)(nil)).Elem() +} + +type HostStorageArrayTypePolicyOption struct { + DynamicData + + Policy BaseElementDescription `xml:"policy,typeattr"` +} + +func init() { + t["HostStorageArrayTypePolicyOption"] = reflect.TypeOf((*HostStorageArrayTypePolicyOption)(nil)).Elem() +} + +type HostStorageDeviceInfo struct { + DynamicData + + HostBusAdapter []BaseHostHostBusAdapter `xml:"hostBusAdapter,omitempty,typeattr"` + ScsiLun []BaseScsiLun `xml:"scsiLun,omitempty,typeattr"` + ScsiTopology *HostScsiTopology `xml:"scsiTopology,omitempty"` + MultipathInfo *HostMultipathInfo `xml:"multipathInfo,omitempty"` + PlugStoreTopology *HostPlugStoreTopology `xml:"plugStoreTopology,omitempty"` + SoftwareInternetScsiEnabled bool `xml:"softwareInternetScsiEnabled"` +} + +func init() { + t["HostStorageDeviceInfo"] = reflect.TypeOf((*HostStorageDeviceInfo)(nil)).Elem() +} + +type HostStorageElementInfo struct { + HostHardwareElementInfo + + OperationalInfo []HostStorageOperationalInfo `xml:"operationalInfo,omitempty"` +} + +func init() { + t["HostStorageElementInfo"] = reflect.TypeOf((*HostStorageElementInfo)(nil)).Elem() +} + +type HostStorageOperationalInfo struct { + DynamicData + + Property string `xml:"property"` + Value string `xml:"value"` +} + +func init() { + t["HostStorageOperationalInfo"] = reflect.TypeOf((*HostStorageOperationalInfo)(nil)).Elem() +} + +type HostStorageSystemDiskLocatorLedResult struct { + DynamicData + + Key string `xml:"key"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["HostStorageSystemDiskLocatorLedResult"] = reflect.TypeOf((*HostStorageSystemDiskLocatorLedResult)(nil)).Elem() +} + +type HostStorageSystemScsiLunResult struct { + DynamicData + + Key string `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostStorageSystemScsiLunResult"] = reflect.TypeOf((*HostStorageSystemScsiLunResult)(nil)).Elem() +} + +type HostStorageSystemVmfsVolumeResult struct { + DynamicData + + Key string `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostStorageSystemVmfsVolumeResult"] = reflect.TypeOf((*HostStorageSystemVmfsVolumeResult)(nil)).Elem() +} + +type HostSyncFailedEvent struct { + HostEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["HostSyncFailedEvent"] = reflect.TypeOf((*HostSyncFailedEvent)(nil)).Elem() +} + +type HostSystemHealthInfo struct { + DynamicData + + NumericSensorInfo []HostNumericSensorInfo `xml:"numericSensorInfo,omitempty"` +} + +func init() { + t["HostSystemHealthInfo"] = reflect.TypeOf((*HostSystemHealthInfo)(nil)).Elem() +} + +type HostSystemIdentificationInfo struct { + DynamicData + + IdentifierValue string `xml:"identifierValue"` + IdentifierType BaseElementDescription `xml:"identifierType,typeattr"` +} + +func init() { + t["HostSystemIdentificationInfo"] = reflect.TypeOf((*HostSystemIdentificationInfo)(nil)).Elem() +} + +type HostSystemInfo struct { + DynamicData + + Vendor string `xml:"vendor"` + Model string `xml:"model"` + Uuid string `xml:"uuid"` + OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty"` +} + +func init() { + t["HostSystemInfo"] = reflect.TypeOf((*HostSystemInfo)(nil)).Elem() +} + +type HostSystemReconnectSpec struct { + DynamicData + + SyncState *bool `xml:"syncState"` +} + +func init() { + t["HostSystemReconnectSpec"] = reflect.TypeOf((*HostSystemReconnectSpec)(nil)).Elem() +} + +type HostSystemResourceInfo struct { + DynamicData + + Key string `xml:"key"` + Config *ResourceConfigSpec `xml:"config,omitempty"` + Child []HostSystemResourceInfo `xml:"child,omitempty"` +} + +func init() { + t["HostSystemResourceInfo"] = reflect.TypeOf((*HostSystemResourceInfo)(nil)).Elem() +} + +type HostSystemSwapConfiguration struct { + DynamicData + + Option []BaseHostSystemSwapConfigurationSystemSwapOption `xml:"option,omitempty,typeattr"` +} + +func init() { + t["HostSystemSwapConfiguration"] = reflect.TypeOf((*HostSystemSwapConfiguration)(nil)).Elem() +} + +type HostSystemSwapConfigurationDatastoreOption struct { + HostSystemSwapConfigurationSystemSwapOption + + Datastore string `xml:"datastore"` +} + +func init() { + t["HostSystemSwapConfigurationDatastoreOption"] = reflect.TypeOf((*HostSystemSwapConfigurationDatastoreOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationDisabledOption struct { + HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["HostSystemSwapConfigurationDisabledOption"] = reflect.TypeOf((*HostSystemSwapConfigurationDisabledOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationHostCacheOption struct { + HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["HostSystemSwapConfigurationHostCacheOption"] = reflect.TypeOf((*HostSystemSwapConfigurationHostCacheOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationHostLocalSwapOption struct { + HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["HostSystemSwapConfigurationHostLocalSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationHostLocalSwapOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationSystemSwapOption struct { + DynamicData + + Key int `xml:"key"` +} + +func init() { + t["HostSystemSwapConfigurationSystemSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationSystemSwapOption)(nil)).Elem() +} + +type HostTargetTransport struct { + DynamicData +} + +func init() { + t["HostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() +} + +type HostTpmAttestationReport struct { + DynamicData + + TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues"` + TpmEvents []HostTpmEventLogEntry `xml:"tpmEvents"` + TpmLogReliable bool `xml:"tpmLogReliable"` +} + +func init() { + t["HostTpmAttestationReport"] = reflect.TypeOf((*HostTpmAttestationReport)(nil)).Elem() +} + +type HostTpmBootSecurityOptionEventDetails struct { + HostTpmEventDetails + + BootSecurityOption string `xml:"bootSecurityOption"` +} + +func init() { + t["HostTpmBootSecurityOptionEventDetails"] = reflect.TypeOf((*HostTpmBootSecurityOptionEventDetails)(nil)).Elem() +} + +type HostTpmCommandEventDetails struct { + HostTpmEventDetails + + CommandLine string `xml:"commandLine"` +} + +func init() { + t["HostTpmCommandEventDetails"] = reflect.TypeOf((*HostTpmCommandEventDetails)(nil)).Elem() +} + +type HostTpmDigestInfo struct { + HostDigestInfo + + PcrNumber int `xml:"pcrNumber"` +} + +func init() { + t["HostTpmDigestInfo"] = reflect.TypeOf((*HostTpmDigestInfo)(nil)).Elem() +} + +type HostTpmEventDetails struct { + DynamicData + + DataHash []byte `xml:"dataHash"` +} + +func init() { + t["HostTpmEventDetails"] = reflect.TypeOf((*HostTpmEventDetails)(nil)).Elem() +} + +type HostTpmEventLogEntry struct { + DynamicData + + PcrIndex int `xml:"pcrIndex"` + EventDetails BaseHostTpmEventDetails `xml:"eventDetails,typeattr"` +} + +func init() { + t["HostTpmEventLogEntry"] = reflect.TypeOf((*HostTpmEventLogEntry)(nil)).Elem() +} + +type HostTpmOptionEventDetails struct { + HostTpmEventDetails + + OptionsFileName string `xml:"optionsFileName"` + BootOptions []byte `xml:"bootOptions,omitempty"` +} + +func init() { + t["HostTpmOptionEventDetails"] = reflect.TypeOf((*HostTpmOptionEventDetails)(nil)).Elem() +} + +type HostTpmSoftwareComponentEventDetails struct { + HostTpmEventDetails + + ComponentName string `xml:"componentName"` + VibName string `xml:"vibName"` + VibVersion string `xml:"vibVersion"` + VibVendor string `xml:"vibVendor"` +} + +func init() { + t["HostTpmSoftwareComponentEventDetails"] = reflect.TypeOf((*HostTpmSoftwareComponentEventDetails)(nil)).Elem() +} + +type HostUnresolvedVmfsExtent struct { + DynamicData + + Device HostScsiDiskPartition `xml:"device"` + DevicePath string `xml:"devicePath"` + VmfsUuid string `xml:"vmfsUuid"` + IsHeadExtent bool `xml:"isHeadExtent"` + Ordinal int `xml:"ordinal"` + StartBlock int `xml:"startBlock"` + EndBlock int `xml:"endBlock"` + Reason string `xml:"reason"` +} + +func init() { + t["HostUnresolvedVmfsExtent"] = reflect.TypeOf((*HostUnresolvedVmfsExtent)(nil)).Elem() +} + +type HostUnresolvedVmfsResignatureSpec struct { + DynamicData + + ExtentDevicePath []string `xml:"extentDevicePath"` +} + +func init() { + t["HostUnresolvedVmfsResignatureSpec"] = reflect.TypeOf((*HostUnresolvedVmfsResignatureSpec)(nil)).Elem() +} + +type HostUnresolvedVmfsResolutionResult struct { + DynamicData + + Spec HostUnresolvedVmfsResolutionSpec `xml:"spec"` + Vmfs *HostVmfsVolume `xml:"vmfs,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostUnresolvedVmfsResolutionResult"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionResult)(nil)).Elem() +} + +type HostUnresolvedVmfsResolutionSpec struct { + DynamicData + + ExtentDevicePath []string `xml:"extentDevicePath"` + UuidResolution string `xml:"uuidResolution"` +} + +func init() { + t["HostUnresolvedVmfsResolutionSpec"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpec)(nil)).Elem() +} + +type HostUnresolvedVmfsVolume struct { + DynamicData + + Extent []HostUnresolvedVmfsExtent `xml:"extent"` + VmfsLabel string `xml:"vmfsLabel"` + VmfsUuid string `xml:"vmfsUuid"` + TotalBlocks int `xml:"totalBlocks"` + ResolveStatus HostUnresolvedVmfsVolumeResolveStatus `xml:"resolveStatus"` +} + +func init() { + t["HostUnresolvedVmfsVolume"] = reflect.TypeOf((*HostUnresolvedVmfsVolume)(nil)).Elem() +} + +type HostUnresolvedVmfsVolumeResolveStatus struct { + DynamicData + + Resolvable bool `xml:"resolvable"` + IncompleteExtents *bool `xml:"incompleteExtents"` + MultipleCopies *bool `xml:"multipleCopies"` +} + +func init() { + t["HostUnresolvedVmfsVolumeResolveStatus"] = reflect.TypeOf((*HostUnresolvedVmfsVolumeResolveStatus)(nil)).Elem() +} + +type HostUpgradeFailedEvent struct { + HostEvent +} + +func init() { + t["HostUpgradeFailedEvent"] = reflect.TypeOf((*HostUpgradeFailedEvent)(nil)).Elem() +} + +type HostUserWorldSwapNotEnabledEvent struct { + HostEvent +} + +func init() { + t["HostUserWorldSwapNotEnabledEvent"] = reflect.TypeOf((*HostUserWorldSwapNotEnabledEvent)(nil)).Elem() +} + +type HostVFlashManagerVFlashCacheConfigInfo struct { + DynamicData + + VFlashModuleConfigOption []HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:"vFlashModuleConfigOption,omitempty"` + DefaultVFlashModule string `xml:"defaultVFlashModule,omitempty"` + SwapCacheReservationInGB int64 `xml:"swapCacheReservationInGB,omitempty"` +} + +func init() { + t["HostVFlashManagerVFlashCacheConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigInfo)(nil)).Elem() +} + +type HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption struct { + DynamicData + + VFlashModule string `xml:"vFlashModule"` + VFlashModuleVersion string `xml:"vFlashModuleVersion"` + MinSupportedModuleVersion string `xml:"minSupportedModuleVersion"` + CacheConsistencyType ChoiceOption `xml:"cacheConsistencyType"` + CacheMode ChoiceOption `xml:"cacheMode"` + BlockSizeInKBOption LongOption `xml:"blockSizeInKBOption"` + ReservationInMBOption LongOption `xml:"reservationInMBOption"` + MaxDiskSizeInKB int64 `xml:"maxDiskSizeInKB"` +} + +func init() { + t["HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption)(nil)).Elem() +} + +type HostVFlashManagerVFlashCacheConfigSpec struct { + DynamicData + + DefaultVFlashModule string `xml:"defaultVFlashModule"` + SwapCacheReservationInGB int64 `xml:"swapCacheReservationInGB"` +} + +func init() { + t["HostVFlashManagerVFlashCacheConfigSpec"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigSpec)(nil)).Elem() +} + +type HostVFlashManagerVFlashConfigInfo struct { + DynamicData + + VFlashResourceConfigInfo *HostVFlashManagerVFlashResourceConfigInfo `xml:"vFlashResourceConfigInfo,omitempty"` + VFlashCacheConfigInfo *HostVFlashManagerVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` +} + +func init() { + t["HostVFlashManagerVFlashConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashConfigInfo)(nil)).Elem() +} + +type HostVFlashManagerVFlashResourceConfigInfo struct { + DynamicData + + Vffs *HostVffsVolume `xml:"vffs,omitempty"` + Capacity int64 `xml:"capacity"` +} + +func init() { + t["HostVFlashManagerVFlashResourceConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigInfo)(nil)).Elem() +} + +type HostVFlashManagerVFlashResourceConfigSpec struct { + DynamicData + + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["HostVFlashManagerVFlashResourceConfigSpec"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigSpec)(nil)).Elem() +} + +type HostVFlashManagerVFlashResourceRunTimeInfo struct { + DynamicData + + Usage int64 `xml:"usage"` + Capacity int64 `xml:"capacity"` + Accessible bool `xml:"accessible"` + CapacityForVmCache int64 `xml:"capacityForVmCache"` + FreeForVmCache int64 `xml:"freeForVmCache"` +} + +func init() { + t["HostVFlashManagerVFlashResourceRunTimeInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceRunTimeInfo)(nil)).Elem() +} + +type HostVFlashResourceConfigurationResult struct { + DynamicData + + DevicePath []string `xml:"devicePath,omitempty"` + Vffs *HostVffsVolume `xml:"vffs,omitempty"` + DiskConfigurationResult []HostDiskConfigurationResult `xml:"diskConfigurationResult,omitempty"` +} + +func init() { + t["HostVFlashResourceConfigurationResult"] = reflect.TypeOf((*HostVFlashResourceConfigurationResult)(nil)).Elem() +} + +type HostVMotionCompatibility struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Compatibility []string `xml:"compatibility,omitempty"` +} + +func init() { + t["HostVMotionCompatibility"] = reflect.TypeOf((*HostVMotionCompatibility)(nil)).Elem() +} + +type HostVMotionConfig struct { + DynamicData + + VmotionNicKey string `xml:"vmotionNicKey,omitempty"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["HostVMotionConfig"] = reflect.TypeOf((*HostVMotionConfig)(nil)).Elem() +} + +type HostVMotionInfo struct { + DynamicData + + NetConfig *HostVMotionNetConfig `xml:"netConfig,omitempty"` + IpConfig *HostIpConfig `xml:"ipConfig,omitempty"` +} + +func init() { + t["HostVMotionInfo"] = reflect.TypeOf((*HostVMotionInfo)(nil)).Elem() +} + +type HostVMotionNetConfig struct { + DynamicData + + CandidateVnic []HostVirtualNic `xml:"candidateVnic,omitempty"` + SelectedVnic string `xml:"selectedVnic,omitempty"` +} + +func init() { + t["HostVMotionNetConfig"] = reflect.TypeOf((*HostVMotionNetConfig)(nil)).Elem() +} + +type HostVfatVolume struct { + HostFileSystemVolume +} + +func init() { + t["HostVfatVolume"] = reflect.TypeOf((*HostVfatVolume)(nil)).Elem() +} + +type HostVffsSpec struct { + DynamicData + + DevicePath string `xml:"devicePath"` + Partition *HostDiskPartitionSpec `xml:"partition,omitempty"` + MajorVersion int `xml:"majorVersion"` + VolumeName string `xml:"volumeName"` +} + +func init() { + t["HostVffsSpec"] = reflect.TypeOf((*HostVffsSpec)(nil)).Elem() +} + +type HostVffsVolume struct { + HostFileSystemVolume + + MajorVersion int `xml:"majorVersion"` + Version string `xml:"version"` + Uuid string `xml:"uuid"` + Extent []HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["HostVffsVolume"] = reflect.TypeOf((*HostVffsVolume)(nil)).Elem() +} + +type HostVirtualNic struct { + DynamicData + + Device string `xml:"device"` + Key string `xml:"key"` + Portgroup string `xml:"portgroup"` + Spec HostVirtualNicSpec `xml:"spec"` + Port string `xml:"port,omitempty"` +} + +func init() { + t["HostVirtualNic"] = reflect.TypeOf((*HostVirtualNic)(nil)).Elem() +} + +type HostVirtualNicConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Device string `xml:"device,omitempty"` + Portgroup string `xml:"portgroup"` + Spec *HostVirtualNicSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostVirtualNicConfig"] = reflect.TypeOf((*HostVirtualNicConfig)(nil)).Elem() +} + +type HostVirtualNicConnection struct { + DynamicData + + Portgroup string `xml:"portgroup,omitempty"` + DvPort *DistributedVirtualSwitchPortConnection `xml:"dvPort,omitempty"` +} + +func init() { + t["HostVirtualNicConnection"] = reflect.TypeOf((*HostVirtualNicConnection)(nil)).Elem() +} + +type HostVirtualNicManagerInfo struct { + DynamicData + + NetConfig []VirtualNicManagerNetConfig `xml:"netConfig,omitempty"` +} + +func init() { + t["HostVirtualNicManagerInfo"] = reflect.TypeOf((*HostVirtualNicManagerInfo)(nil)).Elem() +} + +type HostVirtualNicManagerNicTypeSelection struct { + DynamicData + + Vnic HostVirtualNicConnection `xml:"vnic"` + NicType []string `xml:"nicType,omitempty"` +} + +func init() { + t["HostVirtualNicManagerNicTypeSelection"] = reflect.TypeOf((*HostVirtualNicManagerNicTypeSelection)(nil)).Elem() +} + +type HostVirtualNicOpaqueNetworkSpec struct { + DynamicData + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` +} + +func init() { + t["HostVirtualNicOpaqueNetworkSpec"] = reflect.TypeOf((*HostVirtualNicOpaqueNetworkSpec)(nil)).Elem() +} + +type HostVirtualNicSpec struct { + DynamicData + + Ip *HostIpConfig `xml:"ip,omitempty"` + Mac string `xml:"mac,omitempty"` + DistributedVirtualPort *DistributedVirtualSwitchPortConnection `xml:"distributedVirtualPort,omitempty"` + Portgroup string `xml:"portgroup,omitempty"` + Mtu int `xml:"mtu,omitempty"` + TsoEnabled *bool `xml:"tsoEnabled"` + NetStackInstanceKey string `xml:"netStackInstanceKey,omitempty"` + OpaqueNetwork *HostVirtualNicOpaqueNetworkSpec `xml:"opaqueNetwork,omitempty"` + ExternalId string `xml:"externalId,omitempty"` + PinnedPnic string `xml:"pinnedPnic,omitempty"` +} + +func init() { + t["HostVirtualNicSpec"] = reflect.TypeOf((*HostVirtualNicSpec)(nil)).Elem() +} + +type HostVirtualSwitch struct { + DynamicData + + Name string `xml:"name"` + Key string `xml:"key"` + NumPorts int `xml:"numPorts"` + NumPortsAvailable int `xml:"numPortsAvailable"` + Mtu int `xml:"mtu,omitempty"` + Portgroup []string `xml:"portgroup,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + Spec HostVirtualSwitchSpec `xml:"spec"` +} + +func init() { + t["HostVirtualSwitch"] = reflect.TypeOf((*HostVirtualSwitch)(nil)).Elem() +} + +type HostVirtualSwitchAutoBridge struct { + HostVirtualSwitchBridge + + ExcludedNicDevice []string `xml:"excludedNicDevice,omitempty"` +} + +func init() { + t["HostVirtualSwitchAutoBridge"] = reflect.TypeOf((*HostVirtualSwitchAutoBridge)(nil)).Elem() +} + +type HostVirtualSwitchBeaconConfig struct { + DynamicData + + Interval int `xml:"interval"` +} + +func init() { + t["HostVirtualSwitchBeaconConfig"] = reflect.TypeOf((*HostVirtualSwitchBeaconConfig)(nil)).Elem() +} + +type HostVirtualSwitchBondBridge struct { + HostVirtualSwitchBridge + + NicDevice []string `xml:"nicDevice"` + Beacon *HostVirtualSwitchBeaconConfig `xml:"beacon,omitempty"` + LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty"` +} + +func init() { + t["HostVirtualSwitchBondBridge"] = reflect.TypeOf((*HostVirtualSwitchBondBridge)(nil)).Elem() +} + +type HostVirtualSwitchBridge struct { + DynamicData +} + +func init() { + t["HostVirtualSwitchBridge"] = reflect.TypeOf((*HostVirtualSwitchBridge)(nil)).Elem() +} + +type HostVirtualSwitchConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Name string `xml:"name"` + Spec *HostVirtualSwitchSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostVirtualSwitchConfig"] = reflect.TypeOf((*HostVirtualSwitchConfig)(nil)).Elem() +} + +type HostVirtualSwitchSimpleBridge struct { + HostVirtualSwitchBridge + + NicDevice string `xml:"nicDevice"` +} + +func init() { + t["HostVirtualSwitchSimpleBridge"] = reflect.TypeOf((*HostVirtualSwitchSimpleBridge)(nil)).Elem() +} + +type HostVirtualSwitchSpec struct { + DynamicData + + NumPorts int `xml:"numPorts"` + Bridge BaseHostVirtualSwitchBridge `xml:"bridge,omitempty,typeattr"` + Policy *HostNetworkPolicy `xml:"policy,omitempty"` + Mtu int `xml:"mtu,omitempty"` +} + +func init() { + t["HostVirtualSwitchSpec"] = reflect.TypeOf((*HostVirtualSwitchSpec)(nil)).Elem() +} + +type HostVmciAccessManagerAccessSpec struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Services []string `xml:"services,omitempty"` + Mode string `xml:"mode"` +} + +func init() { + t["HostVmciAccessManagerAccessSpec"] = reflect.TypeOf((*HostVmciAccessManagerAccessSpec)(nil)).Elem() +} + +type HostVmfsRescanResult struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostVmfsRescanResult"] = reflect.TypeOf((*HostVmfsRescanResult)(nil)).Elem() +} + +type HostVmfsSpec struct { + DynamicData + + Extent HostScsiDiskPartition `xml:"extent"` + BlockSizeMb int `xml:"blockSizeMb,omitempty"` + MajorVersion int `xml:"majorVersion"` + VolumeName string `xml:"volumeName"` +} + +func init() { + t["HostVmfsSpec"] = reflect.TypeOf((*HostVmfsSpec)(nil)).Elem() +} + +type HostVmfsVolume struct { + HostFileSystemVolume + + BlockSizeMb int `xml:"blockSizeMb"` + MaxBlocks int `xml:"maxBlocks"` + MajorVersion int `xml:"majorVersion"` + Version string `xml:"version"` + Uuid string `xml:"uuid"` + Extent []HostScsiDiskPartition `xml:"extent"` + VmfsUpgradable bool `xml:"vmfsUpgradable"` + ForceMountedInfo *HostForceMountedInfo `xml:"forceMountedInfo,omitempty"` + Ssd *bool `xml:"ssd"` + Local *bool `xml:"local"` +} + +func init() { + t["HostVmfsVolume"] = reflect.TypeOf((*HostVmfsVolume)(nil)).Elem() +} + +type HostVnicConnectedToCustomizedDVPortEvent struct { + HostEvent + + Vnic VnicPortArgument `xml:"vnic"` +} + +func init() { + t["HostVnicConnectedToCustomizedDVPortEvent"] = reflect.TypeOf((*HostVnicConnectedToCustomizedDVPortEvent)(nil)).Elem() +} + +type HostVsanInternalSystemCmmdsQuery struct { + DynamicData + + Type string `xml:"type,omitempty"` + Uuid string `xml:"uuid,omitempty"` + Owner string `xml:"owner,omitempty"` +} + +func init() { + t["HostVsanInternalSystemCmmdsQuery"] = reflect.TypeOf((*HostVsanInternalSystemCmmdsQuery)(nil)).Elem() +} + +type HostVsanInternalSystemDeleteVsanObjectsResult struct { + DynamicData + + Uuid string `xml:"uuid"` + Success bool `xml:"success"` + FailureReason []LocalizableMessage `xml:"failureReason,omitempty"` +} + +func init() { + t["HostVsanInternalSystemDeleteVsanObjectsResult"] = reflect.TypeOf((*HostVsanInternalSystemDeleteVsanObjectsResult)(nil)).Elem() +} + +type HostVsanInternalSystemVsanObjectOperationResult struct { + DynamicData + + Uuid string `xml:"uuid"` + FailureReason []LocalizableMessage `xml:"failureReason,omitempty"` +} + +func init() { + t["HostVsanInternalSystemVsanObjectOperationResult"] = reflect.TypeOf((*HostVsanInternalSystemVsanObjectOperationResult)(nil)).Elem() +} + +type HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult struct { + DynamicData + + DiskUuid string `xml:"diskUuid"` + Success bool `xml:"success"` + FailureReason string `xml:"failureReason,omitempty"` +} + +func init() { + t["HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult"] = reflect.TypeOf((*HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult)(nil)).Elem() +} + +type HostVvolVolume struct { + HostFileSystemVolume + + ScId string `xml:"scId"` + HostPE []VVolHostPE `xml:"hostPE,omitempty"` + VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` + StorageArray []VASAStorageArray `xml:"storageArray,omitempty"` +} + +func init() { + t["HostVvolVolume"] = reflect.TypeOf((*HostVvolVolume)(nil)).Elem() +} + +type HostVvolVolumeSpecification struct { + DynamicData + + MaxSizeInMB int64 `xml:"maxSizeInMB"` + VolumeName string `xml:"volumeName"` + VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` + StorageArray []VASAStorageArray `xml:"storageArray,omitempty"` + Uuid string `xml:"uuid"` +} + +func init() { + t["HostVvolVolumeSpecification"] = reflect.TypeOf((*HostVvolVolumeSpecification)(nil)).Elem() +} + +type HostWwnChangedEvent struct { + HostEvent + + OldNodeWwns []int64 `xml:"oldNodeWwns,omitempty"` + OldPortWwns []int64 `xml:"oldPortWwns,omitempty"` + NewNodeWwns []int64 `xml:"newNodeWwns,omitempty"` + NewPortWwns []int64 `xml:"newPortWwns,omitempty"` +} + +func init() { + t["HostWwnChangedEvent"] = reflect.TypeOf((*HostWwnChangedEvent)(nil)).Elem() +} + +type HostWwnConflictEvent struct { + HostEvent + + ConflictedVms []VmEventArgument `xml:"conflictedVms,omitempty"` + ConflictedHosts []HostEventArgument `xml:"conflictedHosts,omitempty"` + Wwn int64 `xml:"wwn"` +} + +func init() { + t["HostWwnConflictEvent"] = reflect.TypeOf((*HostWwnConflictEvent)(nil)).Elem() +} + +type HotSnapshotMoveNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["HotSnapshotMoveNotSupported"] = reflect.TypeOf((*HotSnapshotMoveNotSupported)(nil)).Elem() +} + +type HotSnapshotMoveNotSupportedFault HotSnapshotMoveNotSupported + +func init() { + t["HotSnapshotMoveNotSupportedFault"] = reflect.TypeOf((*HotSnapshotMoveNotSupportedFault)(nil)).Elem() +} + +type HourlyTaskScheduler struct { + RecurrentTaskScheduler + + Minute int `xml:"minute"` +} + +func init() { + t["HourlyTaskScheduler"] = reflect.TypeOf((*HourlyTaskScheduler)(nil)).Elem() +} + +type HttpNfcLeaseAbort HttpNfcLeaseAbortRequestType + +func init() { + t["HttpNfcLeaseAbort"] = reflect.TypeOf((*HttpNfcLeaseAbort)(nil)).Elem() +} + +type HttpNfcLeaseAbortRequestType struct { + This ManagedObjectReference `xml:"_this"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HttpNfcLeaseAbortRequestType"] = reflect.TypeOf((*HttpNfcLeaseAbortRequestType)(nil)).Elem() +} + +type HttpNfcLeaseAbortResponse struct { +} + +type HttpNfcLeaseComplete HttpNfcLeaseCompleteRequestType + +func init() { + t["HttpNfcLeaseComplete"] = reflect.TypeOf((*HttpNfcLeaseComplete)(nil)).Elem() +} + +type HttpNfcLeaseCompleteRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HttpNfcLeaseCompleteRequestType"] = reflect.TypeOf((*HttpNfcLeaseCompleteRequestType)(nil)).Elem() +} + +type HttpNfcLeaseCompleteResponse struct { +} + +type HttpNfcLeaseDatastoreLeaseInfo struct { + DynamicData + + DatastoreKey string `xml:"datastoreKey"` + Hosts []HttpNfcLeaseHostInfo `xml:"hosts"` +} + +func init() { + t["HttpNfcLeaseDatastoreLeaseInfo"] = reflect.TypeOf((*HttpNfcLeaseDatastoreLeaseInfo)(nil)).Elem() +} + +type HttpNfcLeaseDeviceUrl struct { + DynamicData + + Key string `xml:"key"` + ImportKey string `xml:"importKey"` + Url string `xml:"url"` + SslThumbprint string `xml:"sslThumbprint"` + Disk *bool `xml:"disk"` + TargetId string `xml:"targetId,omitempty"` + DatastoreKey string `xml:"datastoreKey,omitempty"` + FileSize int64 `xml:"fileSize,omitempty"` +} + +func init() { + t["HttpNfcLeaseDeviceUrl"] = reflect.TypeOf((*HttpNfcLeaseDeviceUrl)(nil)).Elem() +} + +type HttpNfcLeaseGetManifest HttpNfcLeaseGetManifestRequestType + +func init() { + t["HttpNfcLeaseGetManifest"] = reflect.TypeOf((*HttpNfcLeaseGetManifest)(nil)).Elem() +} + +type HttpNfcLeaseGetManifestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HttpNfcLeaseGetManifestRequestType"] = reflect.TypeOf((*HttpNfcLeaseGetManifestRequestType)(nil)).Elem() +} + +type HttpNfcLeaseGetManifestResponse struct { + Returnval []HttpNfcLeaseManifestEntry `xml:"returnval,omitempty"` +} + +type HttpNfcLeaseHostInfo struct { + DynamicData + + Url string `xml:"url"` + SslThumbprint string `xml:"sslThumbprint"` +} + +func init() { + t["HttpNfcLeaseHostInfo"] = reflect.TypeOf((*HttpNfcLeaseHostInfo)(nil)).Elem() +} + +type HttpNfcLeaseInfo struct { + DynamicData + + Lease ManagedObjectReference `xml:"lease"` + Entity ManagedObjectReference `xml:"entity"` + DeviceUrl []HttpNfcLeaseDeviceUrl `xml:"deviceUrl,omitempty"` + TotalDiskCapacityInKB int64 `xml:"totalDiskCapacityInKB"` + LeaseTimeout int `xml:"leaseTimeout"` + HostMap []HttpNfcLeaseDatastoreLeaseInfo `xml:"hostMap,omitempty"` +} + +func init() { + t["HttpNfcLeaseInfo"] = reflect.TypeOf((*HttpNfcLeaseInfo)(nil)).Elem() +} + +type HttpNfcLeaseManifestEntry struct { + DynamicData + + Key string `xml:"key"` + Sha1 string `xml:"sha1"` + Size int64 `xml:"size"` + Disk bool `xml:"disk"` + Capacity int64 `xml:"capacity,omitempty"` + PopulatedSize int64 `xml:"populatedSize,omitempty"` +} + +func init() { + t["HttpNfcLeaseManifestEntry"] = reflect.TypeOf((*HttpNfcLeaseManifestEntry)(nil)).Elem() +} + +type HttpNfcLeaseProgress HttpNfcLeaseProgressRequestType + +func init() { + t["HttpNfcLeaseProgress"] = reflect.TypeOf((*HttpNfcLeaseProgress)(nil)).Elem() +} + +type HttpNfcLeaseProgressRequestType struct { + This ManagedObjectReference `xml:"_this"` + Percent int `xml:"percent"` +} + +func init() { + t["HttpNfcLeaseProgressRequestType"] = reflect.TypeOf((*HttpNfcLeaseProgressRequestType)(nil)).Elem() +} + +type HttpNfcLeaseProgressResponse struct { +} + +type IDEDiskNotSupported struct { + DiskNotSupported +} + +func init() { + t["IDEDiskNotSupported"] = reflect.TypeOf((*IDEDiskNotSupported)(nil)).Elem() +} + +type IDEDiskNotSupportedFault IDEDiskNotSupported + +func init() { + t["IDEDiskNotSupportedFault"] = reflect.TypeOf((*IDEDiskNotSupportedFault)(nil)).Elem() +} + +type IORMNotSupportedHostOnDatastore struct { + VimFault + + Datastore ManagedObjectReference `xml:"datastore"` + DatastoreName string `xml:"datastoreName"` + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["IORMNotSupportedHostOnDatastore"] = reflect.TypeOf((*IORMNotSupportedHostOnDatastore)(nil)).Elem() +} + +type IORMNotSupportedHostOnDatastoreFault IORMNotSupportedHostOnDatastore + +func init() { + t["IORMNotSupportedHostOnDatastoreFault"] = reflect.TypeOf((*IORMNotSupportedHostOnDatastoreFault)(nil)).Elem() +} + +type IScsiBootFailureEvent struct { + HostEvent +} + +func init() { + t["IScsiBootFailureEvent"] = reflect.TypeOf((*IScsiBootFailureEvent)(nil)).Elem() +} + +type ImpersonateUser ImpersonateUserRequestType + +func init() { + t["ImpersonateUser"] = reflect.TypeOf((*ImpersonateUser)(nil)).Elem() +} + +type ImpersonateUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["ImpersonateUserRequestType"] = reflect.TypeOf((*ImpersonateUserRequestType)(nil)).Elem() +} + +type ImpersonateUserResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type ImportCertificateForCAMRequestType struct { + This ManagedObjectReference `xml:"_this"` + CertPath string `xml:"certPath"` + CamServer string `xml:"camServer"` +} + +func init() { + t["ImportCertificateForCAMRequestType"] = reflect.TypeOf((*ImportCertificateForCAMRequestType)(nil)).Elem() +} + +type ImportCertificateForCAM_Task ImportCertificateForCAMRequestType + +func init() { + t["ImportCertificateForCAM_Task"] = reflect.TypeOf((*ImportCertificateForCAM_Task)(nil)).Elem() +} + +type ImportCertificateForCAM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ImportHostAddFailure struct { + DvsFault + + HostIp []string `xml:"hostIp"` +} + +func init() { + t["ImportHostAddFailure"] = reflect.TypeOf((*ImportHostAddFailure)(nil)).Elem() +} + +type ImportHostAddFailureFault ImportHostAddFailure + +func init() { + t["ImportHostAddFailureFault"] = reflect.TypeOf((*ImportHostAddFailureFault)(nil)).Elem() +} + +type ImportOperationBulkFault struct { + DvsFault + + ImportFaults []ImportOperationBulkFaultFaultOnImport `xml:"importFaults"` +} + +func init() { + t["ImportOperationBulkFault"] = reflect.TypeOf((*ImportOperationBulkFault)(nil)).Elem() +} + +type ImportOperationBulkFaultFault ImportOperationBulkFault + +func init() { + t["ImportOperationBulkFaultFault"] = reflect.TypeOf((*ImportOperationBulkFaultFault)(nil)).Elem() +} + +type ImportOperationBulkFaultFaultOnImport struct { + DynamicData + + EntityType string `xml:"entityType,omitempty"` + Key string `xml:"key,omitempty"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["ImportOperationBulkFaultFaultOnImport"] = reflect.TypeOf((*ImportOperationBulkFaultFaultOnImport)(nil)).Elem() +} + +type ImportSpec struct { + DynamicData + + EntityConfig *VAppEntityConfigInfo `xml:"entityConfig,omitempty"` + InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty"` +} + +func init() { + t["ImportSpec"] = reflect.TypeOf((*ImportSpec)(nil)).Elem() +} + +type ImportUnmanagedSnapshot ImportUnmanagedSnapshotRequestType + +func init() { + t["ImportUnmanagedSnapshot"] = reflect.TypeOf((*ImportUnmanagedSnapshot)(nil)).Elem() +} + +type ImportUnmanagedSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vdisk string `xml:"vdisk"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + VvolId string `xml:"vvolId"` +} + +func init() { + t["ImportUnmanagedSnapshotRequestType"] = reflect.TypeOf((*ImportUnmanagedSnapshotRequestType)(nil)).Elem() +} + +type ImportUnmanagedSnapshotResponse struct { +} + +type ImportVApp ImportVAppRequestType + +func init() { + t["ImportVApp"] = reflect.TypeOf((*ImportVApp)(nil)).Elem() +} + +type ImportVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseImportSpec `xml:"spec,typeattr"` + Folder *ManagedObjectReference `xml:"folder,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ImportVAppRequestType"] = reflect.TypeOf((*ImportVAppRequestType)(nil)).Elem() +} + +type ImportVAppResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InUseFeatureManipulationDisallowed struct { + NotEnoughLicenses +} + +func init() { + t["InUseFeatureManipulationDisallowed"] = reflect.TypeOf((*InUseFeatureManipulationDisallowed)(nil)).Elem() +} + +type InUseFeatureManipulationDisallowedFault InUseFeatureManipulationDisallowed + +func init() { + t["InUseFeatureManipulationDisallowedFault"] = reflect.TypeOf((*InUseFeatureManipulationDisallowedFault)(nil)).Elem() +} + +type InaccessibleDatastore struct { + InvalidDatastore + + Detail string `xml:"detail,omitempty"` +} + +func init() { + t["InaccessibleDatastore"] = reflect.TypeOf((*InaccessibleDatastore)(nil)).Elem() +} + +type InaccessibleDatastoreFault BaseInaccessibleDatastore + +func init() { + t["InaccessibleDatastoreFault"] = reflect.TypeOf((*InaccessibleDatastoreFault)(nil)).Elem() +} + +type InaccessibleFTMetadataDatastore struct { + InaccessibleDatastore +} + +func init() { + t["InaccessibleFTMetadataDatastore"] = reflect.TypeOf((*InaccessibleFTMetadataDatastore)(nil)).Elem() +} + +type InaccessibleFTMetadataDatastoreFault InaccessibleFTMetadataDatastore + +func init() { + t["InaccessibleFTMetadataDatastoreFault"] = reflect.TypeOf((*InaccessibleFTMetadataDatastoreFault)(nil)).Elem() +} + +type InaccessibleVFlashSource struct { + VimFault + + HostName string `xml:"hostName"` +} + +func init() { + t["InaccessibleVFlashSource"] = reflect.TypeOf((*InaccessibleVFlashSource)(nil)).Elem() +} + +type InaccessibleVFlashSourceFault InaccessibleVFlashSource + +func init() { + t["InaccessibleVFlashSourceFault"] = reflect.TypeOf((*InaccessibleVFlashSourceFault)(nil)).Elem() +} + +type IncompatibleDefaultDevice struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["IncompatibleDefaultDevice"] = reflect.TypeOf((*IncompatibleDefaultDevice)(nil)).Elem() +} + +type IncompatibleDefaultDeviceFault IncompatibleDefaultDevice + +func init() { + t["IncompatibleDefaultDeviceFault"] = reflect.TypeOf((*IncompatibleDefaultDeviceFault)(nil)).Elem() +} + +type IncompatibleHostForFtSecondary struct { + VmFaultToleranceIssue + + Host ManagedObjectReference `xml:"host"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["IncompatibleHostForFtSecondary"] = reflect.TypeOf((*IncompatibleHostForFtSecondary)(nil)).Elem() +} + +type IncompatibleHostForFtSecondaryFault IncompatibleHostForFtSecondary + +func init() { + t["IncompatibleHostForFtSecondaryFault"] = reflect.TypeOf((*IncompatibleHostForFtSecondaryFault)(nil)).Elem() +} + +type IncompatibleHostForVmReplication struct { + ReplicationFault + + VmName string `xml:"vmName"` + HostName string `xml:"hostName"` + Reason string `xml:"reason"` +} + +func init() { + t["IncompatibleHostForVmReplication"] = reflect.TypeOf((*IncompatibleHostForVmReplication)(nil)).Elem() +} + +type IncompatibleHostForVmReplicationFault IncompatibleHostForVmReplication + +func init() { + t["IncompatibleHostForVmReplicationFault"] = reflect.TypeOf((*IncompatibleHostForVmReplicationFault)(nil)).Elem() +} + +type IncompatibleSetting struct { + InvalidArgument + + ConflictingProperty string `xml:"conflictingProperty"` +} + +func init() { + t["IncompatibleSetting"] = reflect.TypeOf((*IncompatibleSetting)(nil)).Elem() +} + +type IncompatibleSettingFault IncompatibleSetting + +func init() { + t["IncompatibleSettingFault"] = reflect.TypeOf((*IncompatibleSettingFault)(nil)).Elem() +} + +type IncorrectFileType struct { + FileFault +} + +func init() { + t["IncorrectFileType"] = reflect.TypeOf((*IncorrectFileType)(nil)).Elem() +} + +type IncorrectFileTypeFault IncorrectFileType + +func init() { + t["IncorrectFileTypeFault"] = reflect.TypeOf((*IncorrectFileTypeFault)(nil)).Elem() +} + +type IncorrectHostInformation struct { + NotEnoughLicenses +} + +func init() { + t["IncorrectHostInformation"] = reflect.TypeOf((*IncorrectHostInformation)(nil)).Elem() +} + +type IncorrectHostInformationEvent struct { + LicenseEvent +} + +func init() { + t["IncorrectHostInformationEvent"] = reflect.TypeOf((*IncorrectHostInformationEvent)(nil)).Elem() +} + +type IncorrectHostInformationFault IncorrectHostInformation + +func init() { + t["IncorrectHostInformationFault"] = reflect.TypeOf((*IncorrectHostInformationFault)(nil)).Elem() +} + +type IndependentDiskVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["IndependentDiskVMotionNotSupported"] = reflect.TypeOf((*IndependentDiskVMotionNotSupported)(nil)).Elem() +} + +type IndependentDiskVMotionNotSupportedFault IndependentDiskVMotionNotSupported + +func init() { + t["IndependentDiskVMotionNotSupportedFault"] = reflect.TypeOf((*IndependentDiskVMotionNotSupportedFault)(nil)).Elem() +} + +type InflateVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["InflateVirtualDiskRequestType"] = reflect.TypeOf((*InflateVirtualDiskRequestType)(nil)).Elem() +} + +type InflateVirtualDisk_Task InflateVirtualDiskRequestType + +func init() { + t["InflateVirtualDisk_Task"] = reflect.TypeOf((*InflateVirtualDisk_Task)(nil)).Elem() +} + +type InflateVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InfoUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["InfoUpgradeEvent"] = reflect.TypeOf((*InfoUpgradeEvent)(nil)).Elem() +} + +type InheritablePolicy struct { + DynamicData + + Inherited bool `xml:"inherited"` +} + +func init() { + t["InheritablePolicy"] = reflect.TypeOf((*InheritablePolicy)(nil)).Elem() +} + +type InitializeDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mapping []VsanHostDiskMapping `xml:"mapping"` +} + +func init() { + t["InitializeDisksRequestType"] = reflect.TypeOf((*InitializeDisksRequestType)(nil)).Elem() +} + +type InitializeDisks_Task InitializeDisksRequestType + +func init() { + t["InitializeDisks_Task"] = reflect.TypeOf((*InitializeDisks_Task)(nil)).Elem() +} + +type InitializeDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InitiateFileTransferFromGuest InitiateFileTransferFromGuestRequestType + +func init() { + t["InitiateFileTransferFromGuest"] = reflect.TypeOf((*InitiateFileTransferFromGuest)(nil)).Elem() +} + +type InitiateFileTransferFromGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + GuestFilePath string `xml:"guestFilePath"` +} + +func init() { + t["InitiateFileTransferFromGuestRequestType"] = reflect.TypeOf((*InitiateFileTransferFromGuestRequestType)(nil)).Elem() +} + +type InitiateFileTransferFromGuestResponse struct { + Returnval FileTransferInformation `xml:"returnval"` +} + +type InitiateFileTransferToGuest InitiateFileTransferToGuestRequestType + +func init() { + t["InitiateFileTransferToGuest"] = reflect.TypeOf((*InitiateFileTransferToGuest)(nil)).Elem() +} + +type InitiateFileTransferToGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + GuestFilePath string `xml:"guestFilePath"` + FileAttributes BaseGuestFileAttributes `xml:"fileAttributes,typeattr"` + FileSize int64 `xml:"fileSize"` + Overwrite bool `xml:"overwrite"` +} + +func init() { + t["InitiateFileTransferToGuestRequestType"] = reflect.TypeOf((*InitiateFileTransferToGuestRequestType)(nil)).Elem() +} + +type InitiateFileTransferToGuestResponse struct { + Returnval string `xml:"returnval"` +} + +type InstallHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + Repository HostPatchManagerLocator `xml:"repository"` + UpdateID string `xml:"updateID"` + Force *bool `xml:"force"` +} + +func init() { + t["InstallHostPatchRequestType"] = reflect.TypeOf((*InstallHostPatchRequestType)(nil)).Elem() +} + +type InstallHostPatchV2RequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + VibUrls []string `xml:"vibUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["InstallHostPatchV2RequestType"] = reflect.TypeOf((*InstallHostPatchV2RequestType)(nil)).Elem() +} + +type InstallHostPatchV2_Task InstallHostPatchV2RequestType + +func init() { + t["InstallHostPatchV2_Task"] = reflect.TypeOf((*InstallHostPatchV2_Task)(nil)).Elem() +} + +type InstallHostPatchV2_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InstallHostPatch_Task InstallHostPatchRequestType + +func init() { + t["InstallHostPatch_Task"] = reflect.TypeOf((*InstallHostPatch_Task)(nil)).Elem() +} + +type InstallHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InstallIoFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + VibUrl string `xml:"vibUrl"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["InstallIoFilterRequestType"] = reflect.TypeOf((*InstallIoFilterRequestType)(nil)).Elem() +} + +type InstallIoFilter_Task InstallIoFilterRequestType + +func init() { + t["InstallIoFilter_Task"] = reflect.TypeOf((*InstallIoFilter_Task)(nil)).Elem() +} + +type InstallIoFilter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InstallServerCertificate InstallServerCertificateRequestType + +func init() { + t["InstallServerCertificate"] = reflect.TypeOf((*InstallServerCertificate)(nil)).Elem() +} + +type InstallServerCertificateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cert string `xml:"cert"` +} + +func init() { + t["InstallServerCertificateRequestType"] = reflect.TypeOf((*InstallServerCertificateRequestType)(nil)).Elem() +} + +type InstallServerCertificateResponse struct { +} + +type InstallSmartCardTrustAnchor InstallSmartCardTrustAnchorRequestType + +func init() { + t["InstallSmartCardTrustAnchor"] = reflect.TypeOf((*InstallSmartCardTrustAnchor)(nil)).Elem() +} + +type InstallSmartCardTrustAnchorRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cert string `xml:"cert"` +} + +func init() { + t["InstallSmartCardTrustAnchorRequestType"] = reflect.TypeOf((*InstallSmartCardTrustAnchorRequestType)(nil)).Elem() +} + +type InstallSmartCardTrustAnchorResponse struct { +} + +type InsufficientAgentVmsDeployed struct { + InsufficientResourcesFault + + HostName string `xml:"hostName"` + RequiredNumAgentVms int `xml:"requiredNumAgentVms"` + CurrentNumAgentVms int `xml:"currentNumAgentVms"` +} + +func init() { + t["InsufficientAgentVmsDeployed"] = reflect.TypeOf((*InsufficientAgentVmsDeployed)(nil)).Elem() +} + +type InsufficientAgentVmsDeployedFault InsufficientAgentVmsDeployed + +func init() { + t["InsufficientAgentVmsDeployedFault"] = reflect.TypeOf((*InsufficientAgentVmsDeployedFault)(nil)).Elem() +} + +type InsufficientCpuResourcesFault struct { + InsufficientResourcesFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientCpuResourcesFault"] = reflect.TypeOf((*InsufficientCpuResourcesFault)(nil)).Elem() +} + +type InsufficientCpuResourcesFaultFault InsufficientCpuResourcesFault + +func init() { + t["InsufficientCpuResourcesFaultFault"] = reflect.TypeOf((*InsufficientCpuResourcesFaultFault)(nil)).Elem() +} + +type InsufficientDisks struct { + VsanDiskFault +} + +func init() { + t["InsufficientDisks"] = reflect.TypeOf((*InsufficientDisks)(nil)).Elem() +} + +type InsufficientDisksFault InsufficientDisks + +func init() { + t["InsufficientDisksFault"] = reflect.TypeOf((*InsufficientDisksFault)(nil)).Elem() +} + +type InsufficientFailoverResourcesEvent struct { + ClusterEvent +} + +func init() { + t["InsufficientFailoverResourcesEvent"] = reflect.TypeOf((*InsufficientFailoverResourcesEvent)(nil)).Elem() +} + +type InsufficientFailoverResourcesFault struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientFailoverResourcesFault"] = reflect.TypeOf((*InsufficientFailoverResourcesFault)(nil)).Elem() +} + +type InsufficientFailoverResourcesFaultFault InsufficientFailoverResourcesFault + +func init() { + t["InsufficientFailoverResourcesFaultFault"] = reflect.TypeOf((*InsufficientFailoverResourcesFaultFault)(nil)).Elem() +} + +type InsufficientGraphicsResourcesFault struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientGraphicsResourcesFault"] = reflect.TypeOf((*InsufficientGraphicsResourcesFault)(nil)).Elem() +} + +type InsufficientGraphicsResourcesFaultFault InsufficientGraphicsResourcesFault + +func init() { + t["InsufficientGraphicsResourcesFaultFault"] = reflect.TypeOf((*InsufficientGraphicsResourcesFaultFault)(nil)).Elem() +} + +type InsufficientHostCapacityFault struct { + InsufficientResourcesFault + + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["InsufficientHostCapacityFault"] = reflect.TypeOf((*InsufficientHostCapacityFault)(nil)).Elem() +} + +type InsufficientHostCapacityFaultFault BaseInsufficientHostCapacityFault + +func init() { + t["InsufficientHostCapacityFaultFault"] = reflect.TypeOf((*InsufficientHostCapacityFaultFault)(nil)).Elem() +} + +type InsufficientHostCpuCapacityFault struct { + InsufficientHostCapacityFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientHostCpuCapacityFault"] = reflect.TypeOf((*InsufficientHostCpuCapacityFault)(nil)).Elem() +} + +type InsufficientHostCpuCapacityFaultFault InsufficientHostCpuCapacityFault + +func init() { + t["InsufficientHostCpuCapacityFaultFault"] = reflect.TypeOf((*InsufficientHostCpuCapacityFaultFault)(nil)).Elem() +} + +type InsufficientHostMemoryCapacityFault struct { + InsufficientHostCapacityFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientHostMemoryCapacityFault"] = reflect.TypeOf((*InsufficientHostMemoryCapacityFault)(nil)).Elem() +} + +type InsufficientHostMemoryCapacityFaultFault InsufficientHostMemoryCapacityFault + +func init() { + t["InsufficientHostMemoryCapacityFaultFault"] = reflect.TypeOf((*InsufficientHostMemoryCapacityFaultFault)(nil)).Elem() +} + +type InsufficientMemoryResourcesFault struct { + InsufficientResourcesFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientMemoryResourcesFault"] = reflect.TypeOf((*InsufficientMemoryResourcesFault)(nil)).Elem() +} + +type InsufficientMemoryResourcesFaultFault InsufficientMemoryResourcesFault + +func init() { + t["InsufficientMemoryResourcesFaultFault"] = reflect.TypeOf((*InsufficientMemoryResourcesFaultFault)(nil)).Elem() +} + +type InsufficientNetworkCapacity struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientNetworkCapacity"] = reflect.TypeOf((*InsufficientNetworkCapacity)(nil)).Elem() +} + +type InsufficientNetworkCapacityFault InsufficientNetworkCapacity + +func init() { + t["InsufficientNetworkCapacityFault"] = reflect.TypeOf((*InsufficientNetworkCapacityFault)(nil)).Elem() +} + +type InsufficientNetworkResourcePoolCapacity struct { + InsufficientResourcesFault + + DvsName string `xml:"dvsName"` + DvsUuid string `xml:"dvsUuid"` + ResourcePoolKey string `xml:"resourcePoolKey"` + Available int64 `xml:"available"` + Requested int64 `xml:"requested"` + Device []string `xml:"device"` +} + +func init() { + t["InsufficientNetworkResourcePoolCapacity"] = reflect.TypeOf((*InsufficientNetworkResourcePoolCapacity)(nil)).Elem() +} + +type InsufficientNetworkResourcePoolCapacityFault InsufficientNetworkResourcePoolCapacity + +func init() { + t["InsufficientNetworkResourcePoolCapacityFault"] = reflect.TypeOf((*InsufficientNetworkResourcePoolCapacityFault)(nil)).Elem() +} + +type InsufficientPerCpuCapacity struct { + InsufficientHostCapacityFault +} + +func init() { + t["InsufficientPerCpuCapacity"] = reflect.TypeOf((*InsufficientPerCpuCapacity)(nil)).Elem() +} + +type InsufficientPerCpuCapacityFault InsufficientPerCpuCapacity + +func init() { + t["InsufficientPerCpuCapacityFault"] = reflect.TypeOf((*InsufficientPerCpuCapacityFault)(nil)).Elem() +} + +type InsufficientResourcesFault struct { + VimFault +} + +func init() { + t["InsufficientResourcesFault"] = reflect.TypeOf((*InsufficientResourcesFault)(nil)).Elem() +} + +type InsufficientResourcesFaultFault BaseInsufficientResourcesFault + +func init() { + t["InsufficientResourcesFaultFault"] = reflect.TypeOf((*InsufficientResourcesFaultFault)(nil)).Elem() +} + +type InsufficientStandbyCpuResource struct { + InsufficientStandbyResource + + Available int64 `xml:"available"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientStandbyCpuResource"] = reflect.TypeOf((*InsufficientStandbyCpuResource)(nil)).Elem() +} + +type InsufficientStandbyCpuResourceFault InsufficientStandbyCpuResource + +func init() { + t["InsufficientStandbyCpuResourceFault"] = reflect.TypeOf((*InsufficientStandbyCpuResourceFault)(nil)).Elem() +} + +type InsufficientStandbyMemoryResource struct { + InsufficientStandbyResource + + Available int64 `xml:"available"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientStandbyMemoryResource"] = reflect.TypeOf((*InsufficientStandbyMemoryResource)(nil)).Elem() +} + +type InsufficientStandbyMemoryResourceFault InsufficientStandbyMemoryResource + +func init() { + t["InsufficientStandbyMemoryResourceFault"] = reflect.TypeOf((*InsufficientStandbyMemoryResourceFault)(nil)).Elem() +} + +type InsufficientStandbyResource struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientStandbyResource"] = reflect.TypeOf((*InsufficientStandbyResource)(nil)).Elem() +} + +type InsufficientStandbyResourceFault BaseInsufficientStandbyResource + +func init() { + t["InsufficientStandbyResourceFault"] = reflect.TypeOf((*InsufficientStandbyResourceFault)(nil)).Elem() +} + +type InsufficientStorageIops struct { + VimFault + + UnreservedIops int64 `xml:"unreservedIops"` + RequestedIops int64 `xml:"requestedIops"` + DatastoreName string `xml:"datastoreName"` +} + +func init() { + t["InsufficientStorageIops"] = reflect.TypeOf((*InsufficientStorageIops)(nil)).Elem() +} + +type InsufficientStorageIopsFault InsufficientStorageIops + +func init() { + t["InsufficientStorageIopsFault"] = reflect.TypeOf((*InsufficientStorageIopsFault)(nil)).Elem() +} + +type InsufficientStorageSpace struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientStorageSpace"] = reflect.TypeOf((*InsufficientStorageSpace)(nil)).Elem() +} + +type InsufficientStorageSpaceFault InsufficientStorageSpace + +func init() { + t["InsufficientStorageSpaceFault"] = reflect.TypeOf((*InsufficientStorageSpaceFault)(nil)).Elem() +} + +type InsufficientVFlashResourcesFault struct { + InsufficientResourcesFault + + FreeSpaceInMB int64 `xml:"freeSpaceInMB,omitempty"` + FreeSpace int64 `xml:"freeSpace"` + RequestedSpaceInMB int64 `xml:"requestedSpaceInMB,omitempty"` + RequestedSpace int64 `xml:"requestedSpace"` +} + +func init() { + t["InsufficientVFlashResourcesFault"] = reflect.TypeOf((*InsufficientVFlashResourcesFault)(nil)).Elem() +} + +type InsufficientVFlashResourcesFaultFault InsufficientVFlashResourcesFault + +func init() { + t["InsufficientVFlashResourcesFaultFault"] = reflect.TypeOf((*InsufficientVFlashResourcesFaultFault)(nil)).Elem() +} + +type IntExpression struct { + NegatableExpression + + Value int `xml:"value,omitempty"` +} + +func init() { + t["IntExpression"] = reflect.TypeOf((*IntExpression)(nil)).Elem() +} + +type IntOption struct { + OptionType + + Min int `xml:"min"` + Max int `xml:"max"` + DefaultValue int `xml:"defaultValue"` +} + +func init() { + t["IntOption"] = reflect.TypeOf((*IntOption)(nil)).Elem() +} + +type IntPolicy struct { + InheritablePolicy + + Value int `xml:"value,omitempty"` +} + +func init() { + t["IntPolicy"] = reflect.TypeOf((*IntPolicy)(nil)).Elem() +} + +type InvalidAffinitySettingFault struct { + VimFault +} + +func init() { + t["InvalidAffinitySettingFault"] = reflect.TypeOf((*InvalidAffinitySettingFault)(nil)).Elem() +} + +type InvalidAffinitySettingFaultFault InvalidAffinitySettingFault + +func init() { + t["InvalidAffinitySettingFaultFault"] = reflect.TypeOf((*InvalidAffinitySettingFaultFault)(nil)).Elem() +} + +type InvalidArgument struct { + RuntimeFault + + InvalidProperty string `xml:"invalidProperty,omitempty"` +} + +func init() { + t["InvalidArgument"] = reflect.TypeOf((*InvalidArgument)(nil)).Elem() +} + +type InvalidArgumentFault BaseInvalidArgument + +func init() { + t["InvalidArgumentFault"] = reflect.TypeOf((*InvalidArgumentFault)(nil)).Elem() +} + +type InvalidBmcRole struct { + VimFault +} + +func init() { + t["InvalidBmcRole"] = reflect.TypeOf((*InvalidBmcRole)(nil)).Elem() +} + +type InvalidBmcRoleFault InvalidBmcRole + +func init() { + t["InvalidBmcRoleFault"] = reflect.TypeOf((*InvalidBmcRoleFault)(nil)).Elem() +} + +type InvalidBundle struct { + PlatformConfigFault +} + +func init() { + t["InvalidBundle"] = reflect.TypeOf((*InvalidBundle)(nil)).Elem() +} + +type InvalidBundleFault InvalidBundle + +func init() { + t["InvalidBundleFault"] = reflect.TypeOf((*InvalidBundleFault)(nil)).Elem() +} + +type InvalidCAMCertificate struct { + InvalidCAMServer +} + +func init() { + t["InvalidCAMCertificate"] = reflect.TypeOf((*InvalidCAMCertificate)(nil)).Elem() +} + +type InvalidCAMCertificateFault InvalidCAMCertificate + +func init() { + t["InvalidCAMCertificateFault"] = reflect.TypeOf((*InvalidCAMCertificateFault)(nil)).Elem() +} + +type InvalidCAMServer struct { + ActiveDirectoryFault + + CamServer string `xml:"camServer"` +} + +func init() { + t["InvalidCAMServer"] = reflect.TypeOf((*InvalidCAMServer)(nil)).Elem() +} + +type InvalidCAMServerFault BaseInvalidCAMServer + +func init() { + t["InvalidCAMServerFault"] = reflect.TypeOf((*InvalidCAMServerFault)(nil)).Elem() +} + +type InvalidClientCertificate struct { + InvalidLogin +} + +func init() { + t["InvalidClientCertificate"] = reflect.TypeOf((*InvalidClientCertificate)(nil)).Elem() +} + +type InvalidClientCertificateFault InvalidClientCertificate + +func init() { + t["InvalidClientCertificateFault"] = reflect.TypeOf((*InvalidClientCertificateFault)(nil)).Elem() +} + +type InvalidCollectorVersion struct { + MethodFault +} + +func init() { + t["InvalidCollectorVersion"] = reflect.TypeOf((*InvalidCollectorVersion)(nil)).Elem() +} + +type InvalidCollectorVersionFault InvalidCollectorVersion + +func init() { + t["InvalidCollectorVersionFault"] = reflect.TypeOf((*InvalidCollectorVersionFault)(nil)).Elem() +} + +type InvalidController struct { + InvalidDeviceSpec + + ControllerKey int `xml:"controllerKey"` +} + +func init() { + t["InvalidController"] = reflect.TypeOf((*InvalidController)(nil)).Elem() +} + +type InvalidControllerFault InvalidController + +func init() { + t["InvalidControllerFault"] = reflect.TypeOf((*InvalidControllerFault)(nil)).Elem() +} + +type InvalidDasConfigArgument struct { + InvalidArgument + + Entry string `xml:"entry,omitempty"` + ClusterName string `xml:"clusterName,omitempty"` +} + +func init() { + t["InvalidDasConfigArgument"] = reflect.TypeOf((*InvalidDasConfigArgument)(nil)).Elem() +} + +type InvalidDasConfigArgumentFault InvalidDasConfigArgument + +func init() { + t["InvalidDasConfigArgumentFault"] = reflect.TypeOf((*InvalidDasConfigArgumentFault)(nil)).Elem() +} + +type InvalidDasRestartPriorityForFtVm struct { + InvalidArgument + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["InvalidDasRestartPriorityForFtVm"] = reflect.TypeOf((*InvalidDasRestartPriorityForFtVm)(nil)).Elem() +} + +type InvalidDasRestartPriorityForFtVmFault InvalidDasRestartPriorityForFtVm + +func init() { + t["InvalidDasRestartPriorityForFtVmFault"] = reflect.TypeOf((*InvalidDasRestartPriorityForFtVmFault)(nil)).Elem() +} + +type InvalidDatastore struct { + VimFault + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["InvalidDatastore"] = reflect.TypeOf((*InvalidDatastore)(nil)).Elem() +} + +type InvalidDatastoreFault BaseInvalidDatastore + +func init() { + t["InvalidDatastoreFault"] = reflect.TypeOf((*InvalidDatastoreFault)(nil)).Elem() +} + +type InvalidDatastorePath struct { + InvalidDatastore + + DatastorePath string `xml:"datastorePath"` +} + +func init() { + t["InvalidDatastorePath"] = reflect.TypeOf((*InvalidDatastorePath)(nil)).Elem() +} + +type InvalidDatastorePathFault InvalidDatastorePath + +func init() { + t["InvalidDatastorePathFault"] = reflect.TypeOf((*InvalidDatastorePathFault)(nil)).Elem() +} + +type InvalidDatastoreState struct { + InvalidState + + DatastoreName string `xml:"datastoreName,omitempty"` +} + +func init() { + t["InvalidDatastoreState"] = reflect.TypeOf((*InvalidDatastoreState)(nil)).Elem() +} + +type InvalidDatastoreStateFault InvalidDatastoreState + +func init() { + t["InvalidDatastoreStateFault"] = reflect.TypeOf((*InvalidDatastoreStateFault)(nil)).Elem() +} + +type InvalidDeviceBacking struct { + InvalidDeviceSpec +} + +func init() { + t["InvalidDeviceBacking"] = reflect.TypeOf((*InvalidDeviceBacking)(nil)).Elem() +} + +type InvalidDeviceBackingFault InvalidDeviceBacking + +func init() { + t["InvalidDeviceBackingFault"] = reflect.TypeOf((*InvalidDeviceBackingFault)(nil)).Elem() +} + +type InvalidDeviceOperation struct { + InvalidDeviceSpec + + BadOp VirtualDeviceConfigSpecOperation `xml:"badOp,omitempty"` + BadFileOp VirtualDeviceConfigSpecFileOperation `xml:"badFileOp,omitempty"` +} + +func init() { + t["InvalidDeviceOperation"] = reflect.TypeOf((*InvalidDeviceOperation)(nil)).Elem() +} + +type InvalidDeviceOperationFault InvalidDeviceOperation + +func init() { + t["InvalidDeviceOperationFault"] = reflect.TypeOf((*InvalidDeviceOperationFault)(nil)).Elem() +} + +type InvalidDeviceSpec struct { + InvalidVmConfig + + DeviceIndex int `xml:"deviceIndex"` +} + +func init() { + t["InvalidDeviceSpec"] = reflect.TypeOf((*InvalidDeviceSpec)(nil)).Elem() +} + +type InvalidDeviceSpecFault BaseInvalidDeviceSpec + +func init() { + t["InvalidDeviceSpecFault"] = reflect.TypeOf((*InvalidDeviceSpecFault)(nil)).Elem() +} + +type InvalidDiskFormat struct { + InvalidFormat +} + +func init() { + t["InvalidDiskFormat"] = reflect.TypeOf((*InvalidDiskFormat)(nil)).Elem() +} + +type InvalidDiskFormatFault InvalidDiskFormat + +func init() { + t["InvalidDiskFormatFault"] = reflect.TypeOf((*InvalidDiskFormatFault)(nil)).Elem() +} + +type InvalidDrsBehaviorForFtVm struct { + InvalidArgument + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["InvalidDrsBehaviorForFtVm"] = reflect.TypeOf((*InvalidDrsBehaviorForFtVm)(nil)).Elem() +} + +type InvalidDrsBehaviorForFtVmFault InvalidDrsBehaviorForFtVm + +func init() { + t["InvalidDrsBehaviorForFtVmFault"] = reflect.TypeOf((*InvalidDrsBehaviorForFtVmFault)(nil)).Elem() +} + +type InvalidEditionEvent struct { + LicenseEvent + + Feature string `xml:"feature"` +} + +func init() { + t["InvalidEditionEvent"] = reflect.TypeOf((*InvalidEditionEvent)(nil)).Elem() +} + +type InvalidEditionLicense struct { + NotEnoughLicenses + + Feature string `xml:"feature"` +} + +func init() { + t["InvalidEditionLicense"] = reflect.TypeOf((*InvalidEditionLicense)(nil)).Elem() +} + +type InvalidEditionLicenseFault InvalidEditionLicense + +func init() { + t["InvalidEditionLicenseFault"] = reflect.TypeOf((*InvalidEditionLicenseFault)(nil)).Elem() +} + +type InvalidEvent struct { + VimFault +} + +func init() { + t["InvalidEvent"] = reflect.TypeOf((*InvalidEvent)(nil)).Elem() +} + +type InvalidEventFault InvalidEvent + +func init() { + t["InvalidEventFault"] = reflect.TypeOf((*InvalidEventFault)(nil)).Elem() +} + +type InvalidFolder struct { + VimFault + + Target ManagedObjectReference `xml:"target"` +} + +func init() { + t["InvalidFolder"] = reflect.TypeOf((*InvalidFolder)(nil)).Elem() +} + +type InvalidFolderFault BaseInvalidFolder + +func init() { + t["InvalidFolderFault"] = reflect.TypeOf((*InvalidFolderFault)(nil)).Elem() +} + +type InvalidFormat struct { + VmConfigFault +} + +func init() { + t["InvalidFormat"] = reflect.TypeOf((*InvalidFormat)(nil)).Elem() +} + +type InvalidFormatFault BaseInvalidFormat + +func init() { + t["InvalidFormatFault"] = reflect.TypeOf((*InvalidFormatFault)(nil)).Elem() +} + +type InvalidGuestLogin struct { + GuestOperationsFault +} + +func init() { + t["InvalidGuestLogin"] = reflect.TypeOf((*InvalidGuestLogin)(nil)).Elem() +} + +type InvalidGuestLoginFault InvalidGuestLogin + +func init() { + t["InvalidGuestLoginFault"] = reflect.TypeOf((*InvalidGuestLoginFault)(nil)).Elem() +} + +type InvalidHostConnectionState struct { + InvalidHostState +} + +func init() { + t["InvalidHostConnectionState"] = reflect.TypeOf((*InvalidHostConnectionState)(nil)).Elem() +} + +type InvalidHostConnectionStateFault InvalidHostConnectionState + +func init() { + t["InvalidHostConnectionStateFault"] = reflect.TypeOf((*InvalidHostConnectionStateFault)(nil)).Elem() +} + +type InvalidHostName struct { + HostConfigFault +} + +func init() { + t["InvalidHostName"] = reflect.TypeOf((*InvalidHostName)(nil)).Elem() +} + +type InvalidHostNameFault InvalidHostName + +func init() { + t["InvalidHostNameFault"] = reflect.TypeOf((*InvalidHostNameFault)(nil)).Elem() +} + +type InvalidHostState struct { + InvalidState + + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["InvalidHostState"] = reflect.TypeOf((*InvalidHostState)(nil)).Elem() +} + +type InvalidHostStateFault BaseInvalidHostState + +func init() { + t["InvalidHostStateFault"] = reflect.TypeOf((*InvalidHostStateFault)(nil)).Elem() +} + +type InvalidIndexArgument struct { + InvalidArgument + + Key string `xml:"key"` +} + +func init() { + t["InvalidIndexArgument"] = reflect.TypeOf((*InvalidIndexArgument)(nil)).Elem() +} + +type InvalidIndexArgumentFault InvalidIndexArgument + +func init() { + t["InvalidIndexArgumentFault"] = reflect.TypeOf((*InvalidIndexArgumentFault)(nil)).Elem() +} + +type InvalidIpfixConfig struct { + DvsFault + + Property string `xml:"property,omitempty"` +} + +func init() { + t["InvalidIpfixConfig"] = reflect.TypeOf((*InvalidIpfixConfig)(nil)).Elem() +} + +type InvalidIpfixConfigFault InvalidIpfixConfig + +func init() { + t["InvalidIpfixConfigFault"] = reflect.TypeOf((*InvalidIpfixConfigFault)(nil)).Elem() +} + +type InvalidIpmiLoginInfo struct { + VimFault +} + +func init() { + t["InvalidIpmiLoginInfo"] = reflect.TypeOf((*InvalidIpmiLoginInfo)(nil)).Elem() +} + +type InvalidIpmiLoginInfoFault InvalidIpmiLoginInfo + +func init() { + t["InvalidIpmiLoginInfoFault"] = reflect.TypeOf((*InvalidIpmiLoginInfoFault)(nil)).Elem() +} + +type InvalidIpmiMacAddress struct { + VimFault + + UserProvidedMacAddress string `xml:"userProvidedMacAddress"` + ObservedMacAddress string `xml:"observedMacAddress"` +} + +func init() { + t["InvalidIpmiMacAddress"] = reflect.TypeOf((*InvalidIpmiMacAddress)(nil)).Elem() +} + +type InvalidIpmiMacAddressFault InvalidIpmiMacAddress + +func init() { + t["InvalidIpmiMacAddressFault"] = reflect.TypeOf((*InvalidIpmiMacAddressFault)(nil)).Elem() +} + +type InvalidLicense struct { + VimFault + + LicenseContent string `xml:"licenseContent"` +} + +func init() { + t["InvalidLicense"] = reflect.TypeOf((*InvalidLicense)(nil)).Elem() +} + +type InvalidLicenseFault InvalidLicense + +func init() { + t["InvalidLicenseFault"] = reflect.TypeOf((*InvalidLicenseFault)(nil)).Elem() +} + +type InvalidLocale struct { + VimFault +} + +func init() { + t["InvalidLocale"] = reflect.TypeOf((*InvalidLocale)(nil)).Elem() +} + +type InvalidLocaleFault InvalidLocale + +func init() { + t["InvalidLocaleFault"] = reflect.TypeOf((*InvalidLocaleFault)(nil)).Elem() +} + +type InvalidLogin struct { + VimFault +} + +func init() { + t["InvalidLogin"] = reflect.TypeOf((*InvalidLogin)(nil)).Elem() +} + +type InvalidLoginFault BaseInvalidLogin + +func init() { + t["InvalidLoginFault"] = reflect.TypeOf((*InvalidLoginFault)(nil)).Elem() +} + +type InvalidName struct { + VimFault + + Name string `xml:"name"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["InvalidName"] = reflect.TypeOf((*InvalidName)(nil)).Elem() +} + +type InvalidNameFault InvalidName + +func init() { + t["InvalidNameFault"] = reflect.TypeOf((*InvalidNameFault)(nil)).Elem() +} + +type InvalidNasCredentials struct { + NasConfigFault + + UserName string `xml:"userName"` +} + +func init() { + t["InvalidNasCredentials"] = reflect.TypeOf((*InvalidNasCredentials)(nil)).Elem() +} + +type InvalidNasCredentialsFault InvalidNasCredentials + +func init() { + t["InvalidNasCredentialsFault"] = reflect.TypeOf((*InvalidNasCredentialsFault)(nil)).Elem() +} + +type InvalidNetworkInType struct { + VAppPropertyFault +} + +func init() { + t["InvalidNetworkInType"] = reflect.TypeOf((*InvalidNetworkInType)(nil)).Elem() +} + +type InvalidNetworkInTypeFault InvalidNetworkInType + +func init() { + t["InvalidNetworkInTypeFault"] = reflect.TypeOf((*InvalidNetworkInTypeFault)(nil)).Elem() +} + +type InvalidNetworkResource struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` +} + +func init() { + t["InvalidNetworkResource"] = reflect.TypeOf((*InvalidNetworkResource)(nil)).Elem() +} + +type InvalidNetworkResourceFault InvalidNetworkResource + +func init() { + t["InvalidNetworkResourceFault"] = reflect.TypeOf((*InvalidNetworkResourceFault)(nil)).Elem() +} + +type InvalidOperationOnSecondaryVm struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["InvalidOperationOnSecondaryVm"] = reflect.TypeOf((*InvalidOperationOnSecondaryVm)(nil)).Elem() +} + +type InvalidOperationOnSecondaryVmFault InvalidOperationOnSecondaryVm + +func init() { + t["InvalidOperationOnSecondaryVmFault"] = reflect.TypeOf((*InvalidOperationOnSecondaryVmFault)(nil)).Elem() +} + +type InvalidPowerState struct { + InvalidState + + RequestedState VirtualMachinePowerState `xml:"requestedState,omitempty"` + ExistingState VirtualMachinePowerState `xml:"existingState"` +} + +func init() { + t["InvalidPowerState"] = reflect.TypeOf((*InvalidPowerState)(nil)).Elem() +} + +type InvalidPowerStateFault InvalidPowerState + +func init() { + t["InvalidPowerStateFault"] = reflect.TypeOf((*InvalidPowerStateFault)(nil)).Elem() +} + +type InvalidPrivilege struct { + VimFault + + Privilege string `xml:"privilege"` +} + +func init() { + t["InvalidPrivilege"] = reflect.TypeOf((*InvalidPrivilege)(nil)).Elem() +} + +type InvalidPrivilegeFault InvalidPrivilege + +func init() { + t["InvalidPrivilegeFault"] = reflect.TypeOf((*InvalidPrivilegeFault)(nil)).Elem() +} + +type InvalidProfileReferenceHost struct { + RuntimeFault + + Reason string `xml:"reason,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["InvalidProfileReferenceHost"] = reflect.TypeOf((*InvalidProfileReferenceHost)(nil)).Elem() +} + +type InvalidProfileReferenceHostFault InvalidProfileReferenceHost + +func init() { + t["InvalidProfileReferenceHostFault"] = reflect.TypeOf((*InvalidProfileReferenceHostFault)(nil)).Elem() +} + +type InvalidProperty struct { + MethodFault + + Name string `xml:"name"` +} + +func init() { + t["InvalidProperty"] = reflect.TypeOf((*InvalidProperty)(nil)).Elem() +} + +type InvalidPropertyFault InvalidProperty + +func init() { + t["InvalidPropertyFault"] = reflect.TypeOf((*InvalidPropertyFault)(nil)).Elem() +} + +type InvalidPropertyType struct { + VAppPropertyFault +} + +func init() { + t["InvalidPropertyType"] = reflect.TypeOf((*InvalidPropertyType)(nil)).Elem() +} + +type InvalidPropertyTypeFault InvalidPropertyType + +func init() { + t["InvalidPropertyTypeFault"] = reflect.TypeOf((*InvalidPropertyTypeFault)(nil)).Elem() +} + +type InvalidPropertyValue struct { + VAppPropertyFault +} + +func init() { + t["InvalidPropertyValue"] = reflect.TypeOf((*InvalidPropertyValue)(nil)).Elem() +} + +type InvalidPropertyValueFault BaseInvalidPropertyValue + +func init() { + t["InvalidPropertyValueFault"] = reflect.TypeOf((*InvalidPropertyValueFault)(nil)).Elem() +} + +type InvalidRequest struct { + RuntimeFault +} + +func init() { + t["InvalidRequest"] = reflect.TypeOf((*InvalidRequest)(nil)).Elem() +} + +type InvalidRequestFault BaseInvalidRequest + +func init() { + t["InvalidRequestFault"] = reflect.TypeOf((*InvalidRequestFault)(nil)).Elem() +} + +type InvalidResourcePoolStructureFault struct { + InsufficientResourcesFault +} + +func init() { + t["InvalidResourcePoolStructureFault"] = reflect.TypeOf((*InvalidResourcePoolStructureFault)(nil)).Elem() +} + +type InvalidResourcePoolStructureFaultFault InvalidResourcePoolStructureFault + +func init() { + t["InvalidResourcePoolStructureFaultFault"] = reflect.TypeOf((*InvalidResourcePoolStructureFaultFault)(nil)).Elem() +} + +type InvalidSnapshotFormat struct { + InvalidFormat +} + +func init() { + t["InvalidSnapshotFormat"] = reflect.TypeOf((*InvalidSnapshotFormat)(nil)).Elem() +} + +type InvalidSnapshotFormatFault InvalidSnapshotFormat + +func init() { + t["InvalidSnapshotFormatFault"] = reflect.TypeOf((*InvalidSnapshotFormatFault)(nil)).Elem() +} + +type InvalidState struct { + VimFault +} + +func init() { + t["InvalidState"] = reflect.TypeOf((*InvalidState)(nil)).Elem() +} + +type InvalidStateFault BaseInvalidState + +func init() { + t["InvalidStateFault"] = reflect.TypeOf((*InvalidStateFault)(nil)).Elem() +} + +type InvalidType struct { + InvalidRequest + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["InvalidType"] = reflect.TypeOf((*InvalidType)(nil)).Elem() +} + +type InvalidTypeFault InvalidType + +func init() { + t["InvalidTypeFault"] = reflect.TypeOf((*InvalidTypeFault)(nil)).Elem() +} + +type InvalidVmConfig struct { + VmConfigFault + + Property string `xml:"property,omitempty"` +} + +func init() { + t["InvalidVmConfig"] = reflect.TypeOf((*InvalidVmConfig)(nil)).Elem() +} + +type InvalidVmConfigFault BaseInvalidVmConfig + +func init() { + t["InvalidVmConfigFault"] = reflect.TypeOf((*InvalidVmConfigFault)(nil)).Elem() +} + +type InventoryDescription struct { + DynamicData + + NumHosts int `xml:"numHosts"` + NumVirtualMachines int `xml:"numVirtualMachines"` + NumResourcePools int `xml:"numResourcePools,omitempty"` + NumClusters int `xml:"numClusters,omitempty"` + NumCpuDev int `xml:"numCpuDev,omitempty"` + NumNetDev int `xml:"numNetDev,omitempty"` + NumDiskDev int `xml:"numDiskDev,omitempty"` + NumvCpuDev int `xml:"numvCpuDev,omitempty"` + NumvNetDev int `xml:"numvNetDev,omitempty"` + NumvDiskDev int `xml:"numvDiskDev,omitempty"` +} + +func init() { + t["InventoryDescription"] = reflect.TypeOf((*InventoryDescription)(nil)).Elem() +} + +type InventoryHasStandardAloneHosts struct { + NotEnoughLicenses + + Hosts []string `xml:"hosts"` +} + +func init() { + t["InventoryHasStandardAloneHosts"] = reflect.TypeOf((*InventoryHasStandardAloneHosts)(nil)).Elem() +} + +type InventoryHasStandardAloneHostsFault InventoryHasStandardAloneHosts + +func init() { + t["InventoryHasStandardAloneHostsFault"] = reflect.TypeOf((*InventoryHasStandardAloneHostsFault)(nil)).Elem() +} + +type IoFilterHostIssue struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Issue []LocalizedMethodFault `xml:"issue"` +} + +func init() { + t["IoFilterHostIssue"] = reflect.TypeOf((*IoFilterHostIssue)(nil)).Elem() +} + +type IoFilterInfo struct { + DynamicData + + Id string `xml:"id"` + Name string `xml:"name"` + Vendor string `xml:"vendor"` + Version string `xml:"version"` + Summary string `xml:"summary,omitempty"` + ReleaseDate string `xml:"releaseDate,omitempty"` +} + +func init() { + t["IoFilterInfo"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem() +} + +type IoFilterQueryIssueResult struct { + DynamicData + + OpType string `xml:"opType"` + HostIssue []IoFilterHostIssue `xml:"hostIssue,omitempty"` +} + +func init() { + t["IoFilterQueryIssueResult"] = reflect.TypeOf((*IoFilterQueryIssueResult)(nil)).Elem() +} + +type IpAddress struct { + NegatableExpression +} + +func init() { + t["IpAddress"] = reflect.TypeOf((*IpAddress)(nil)).Elem() +} + +type IpAddressProfile struct { + ApplyProfile +} + +func init() { + t["IpAddressProfile"] = reflect.TypeOf((*IpAddressProfile)(nil)).Elem() +} + +type IpHostnameGeneratorError struct { + CustomizationFault +} + +func init() { + t["IpHostnameGeneratorError"] = reflect.TypeOf((*IpHostnameGeneratorError)(nil)).Elem() +} + +type IpHostnameGeneratorErrorFault IpHostnameGeneratorError + +func init() { + t["IpHostnameGeneratorErrorFault"] = reflect.TypeOf((*IpHostnameGeneratorErrorFault)(nil)).Elem() +} + +type IpPool struct { + DynamicData + + Id int `xml:"id,omitempty"` + Name string `xml:"name,omitempty"` + Ipv4Config *IpPoolIpPoolConfigInfo `xml:"ipv4Config,omitempty"` + Ipv6Config *IpPoolIpPoolConfigInfo `xml:"ipv6Config,omitempty"` + DnsDomain string `xml:"dnsDomain,omitempty"` + DnsSearchPath string `xml:"dnsSearchPath,omitempty"` + HostPrefix string `xml:"hostPrefix,omitempty"` + HttpProxy string `xml:"httpProxy,omitempty"` + NetworkAssociation []IpPoolAssociation `xml:"networkAssociation,omitempty"` + AvailableIpv4Addresses int `xml:"availableIpv4Addresses,omitempty"` + AvailableIpv6Addresses int `xml:"availableIpv6Addresses,omitempty"` + AllocatedIpv4Addresses int `xml:"allocatedIpv4Addresses,omitempty"` + AllocatedIpv6Addresses int `xml:"allocatedIpv6Addresses,omitempty"` +} + +func init() { + t["IpPool"] = reflect.TypeOf((*IpPool)(nil)).Elem() +} + +type IpPoolAssociation struct { + DynamicData + + Network *ManagedObjectReference `xml:"network,omitempty"` + NetworkName string `xml:"networkName"` +} + +func init() { + t["IpPoolAssociation"] = reflect.TypeOf((*IpPoolAssociation)(nil)).Elem() +} + +type IpPoolIpPoolConfigInfo struct { + DynamicData + + SubnetAddress string `xml:"subnetAddress,omitempty"` + Netmask string `xml:"netmask,omitempty"` + Gateway string `xml:"gateway,omitempty"` + Range string `xml:"range,omitempty"` + Dns []string `xml:"dns,omitempty"` + DhcpServerAvailable *bool `xml:"dhcpServerAvailable"` + IpPoolEnabled *bool `xml:"ipPoolEnabled"` +} + +func init() { + t["IpPoolIpPoolConfigInfo"] = reflect.TypeOf((*IpPoolIpPoolConfigInfo)(nil)).Elem() +} + +type IpPoolManagerIpAllocation struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["IpPoolManagerIpAllocation"] = reflect.TypeOf((*IpPoolManagerIpAllocation)(nil)).Elem() +} + +type IpRange struct { + IpAddress + + AddressPrefix string `xml:"addressPrefix"` + PrefixLength int `xml:"prefixLength,omitempty"` +} + +func init() { + t["IpRange"] = reflect.TypeOf((*IpRange)(nil)).Elem() +} + +type IpRouteProfile struct { + ApplyProfile + + StaticRoute []StaticRouteProfile `xml:"staticRoute,omitempty"` +} + +func init() { + t["IpRouteProfile"] = reflect.TypeOf((*IpRouteProfile)(nil)).Elem() +} + +type IsSharedGraphicsActive IsSharedGraphicsActiveRequestType + +func init() { + t["IsSharedGraphicsActive"] = reflect.TypeOf((*IsSharedGraphicsActive)(nil)).Elem() +} + +type IsSharedGraphicsActiveRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["IsSharedGraphicsActiveRequestType"] = reflect.TypeOf((*IsSharedGraphicsActiveRequestType)(nil)).Elem() +} + +type IsSharedGraphicsActiveResponse struct { + Returnval bool `xml:"returnval"` +} + +type IscsiDependencyEntity struct { + DynamicData + + PnicDevice string `xml:"pnicDevice"` + VnicDevice string `xml:"vnicDevice"` + VmhbaName string `xml:"vmhbaName"` +} + +func init() { + t["IscsiDependencyEntity"] = reflect.TypeOf((*IscsiDependencyEntity)(nil)).Elem() +} + +type IscsiFault struct { + VimFault +} + +func init() { + t["IscsiFault"] = reflect.TypeOf((*IscsiFault)(nil)).Elem() +} + +type IscsiFaultFault BaseIscsiFault + +func init() { + t["IscsiFaultFault"] = reflect.TypeOf((*IscsiFaultFault)(nil)).Elem() +} + +type IscsiFaultInvalidVnic struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultInvalidVnic"] = reflect.TypeOf((*IscsiFaultInvalidVnic)(nil)).Elem() +} + +type IscsiFaultInvalidVnicFault IscsiFaultInvalidVnic + +func init() { + t["IscsiFaultInvalidVnicFault"] = reflect.TypeOf((*IscsiFaultInvalidVnicFault)(nil)).Elem() +} + +type IscsiFaultPnicInUse struct { + IscsiFault + + PnicDevice string `xml:"pnicDevice"` +} + +func init() { + t["IscsiFaultPnicInUse"] = reflect.TypeOf((*IscsiFaultPnicInUse)(nil)).Elem() +} + +type IscsiFaultPnicInUseFault IscsiFaultPnicInUse + +func init() { + t["IscsiFaultPnicInUseFault"] = reflect.TypeOf((*IscsiFaultPnicInUseFault)(nil)).Elem() +} + +type IscsiFaultVnicAlreadyBound struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicAlreadyBound"] = reflect.TypeOf((*IscsiFaultVnicAlreadyBound)(nil)).Elem() +} + +type IscsiFaultVnicAlreadyBoundFault IscsiFaultVnicAlreadyBound + +func init() { + t["IscsiFaultVnicAlreadyBoundFault"] = reflect.TypeOf((*IscsiFaultVnicAlreadyBoundFault)(nil)).Elem() +} + +type IscsiFaultVnicHasActivePaths struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasActivePaths"] = reflect.TypeOf((*IscsiFaultVnicHasActivePaths)(nil)).Elem() +} + +type IscsiFaultVnicHasActivePathsFault IscsiFaultVnicHasActivePaths + +func init() { + t["IscsiFaultVnicHasActivePathsFault"] = reflect.TypeOf((*IscsiFaultVnicHasActivePathsFault)(nil)).Elem() +} + +type IscsiFaultVnicHasMultipleUplinks struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasMultipleUplinks"] = reflect.TypeOf((*IscsiFaultVnicHasMultipleUplinks)(nil)).Elem() +} + +type IscsiFaultVnicHasMultipleUplinksFault IscsiFaultVnicHasMultipleUplinks + +func init() { + t["IscsiFaultVnicHasMultipleUplinksFault"] = reflect.TypeOf((*IscsiFaultVnicHasMultipleUplinksFault)(nil)).Elem() +} + +type IscsiFaultVnicHasNoUplinks struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasNoUplinks"] = reflect.TypeOf((*IscsiFaultVnicHasNoUplinks)(nil)).Elem() +} + +type IscsiFaultVnicHasNoUplinksFault IscsiFaultVnicHasNoUplinks + +func init() { + t["IscsiFaultVnicHasNoUplinksFault"] = reflect.TypeOf((*IscsiFaultVnicHasNoUplinksFault)(nil)).Elem() +} + +type IscsiFaultVnicHasWrongUplink struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasWrongUplink"] = reflect.TypeOf((*IscsiFaultVnicHasWrongUplink)(nil)).Elem() +} + +type IscsiFaultVnicHasWrongUplinkFault IscsiFaultVnicHasWrongUplink + +func init() { + t["IscsiFaultVnicHasWrongUplinkFault"] = reflect.TypeOf((*IscsiFaultVnicHasWrongUplinkFault)(nil)).Elem() +} + +type IscsiFaultVnicInUse struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicInUse"] = reflect.TypeOf((*IscsiFaultVnicInUse)(nil)).Elem() +} + +type IscsiFaultVnicInUseFault IscsiFaultVnicInUse + +func init() { + t["IscsiFaultVnicInUseFault"] = reflect.TypeOf((*IscsiFaultVnicInUseFault)(nil)).Elem() +} + +type IscsiFaultVnicIsLastPath struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicIsLastPath"] = reflect.TypeOf((*IscsiFaultVnicIsLastPath)(nil)).Elem() +} + +type IscsiFaultVnicIsLastPathFault IscsiFaultVnicIsLastPath + +func init() { + t["IscsiFaultVnicIsLastPathFault"] = reflect.TypeOf((*IscsiFaultVnicIsLastPathFault)(nil)).Elem() +} + +type IscsiFaultVnicNotBound struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicNotBound"] = reflect.TypeOf((*IscsiFaultVnicNotBound)(nil)).Elem() +} + +type IscsiFaultVnicNotBoundFault IscsiFaultVnicNotBound + +func init() { + t["IscsiFaultVnicNotBoundFault"] = reflect.TypeOf((*IscsiFaultVnicNotBoundFault)(nil)).Elem() +} + +type IscsiFaultVnicNotFound struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicNotFound"] = reflect.TypeOf((*IscsiFaultVnicNotFound)(nil)).Elem() +} + +type IscsiFaultVnicNotFoundFault IscsiFaultVnicNotFound + +func init() { + t["IscsiFaultVnicNotFoundFault"] = reflect.TypeOf((*IscsiFaultVnicNotFoundFault)(nil)).Elem() +} + +type IscsiMigrationDependency struct { + DynamicData + + MigrationAllowed bool `xml:"migrationAllowed"` + DisallowReason *IscsiStatus `xml:"disallowReason,omitempty"` + Dependency []IscsiDependencyEntity `xml:"dependency,omitempty"` +} + +func init() { + t["IscsiMigrationDependency"] = reflect.TypeOf((*IscsiMigrationDependency)(nil)).Elem() +} + +type IscsiPortInfo struct { + DynamicData + + VnicDevice string `xml:"vnicDevice,omitempty"` + Vnic *HostVirtualNic `xml:"vnic,omitempty"` + PnicDevice string `xml:"pnicDevice,omitempty"` + Pnic *PhysicalNic `xml:"pnic,omitempty"` + SwitchName string `xml:"switchName,omitempty"` + SwitchUuid string `xml:"switchUuid,omitempty"` + PortgroupName string `xml:"portgroupName,omitempty"` + PortgroupKey string `xml:"portgroupKey,omitempty"` + PortKey string `xml:"portKey,omitempty"` + ComplianceStatus *IscsiStatus `xml:"complianceStatus,omitempty"` + PathStatus string `xml:"pathStatus,omitempty"` +} + +func init() { + t["IscsiPortInfo"] = reflect.TypeOf((*IscsiPortInfo)(nil)).Elem() +} + +type IscsiStatus struct { + DynamicData + + Reason []LocalizedMethodFault `xml:"reason,omitempty"` +} + +func init() { + t["IscsiStatus"] = reflect.TypeOf((*IscsiStatus)(nil)).Elem() +} + +type IsoImageFileInfo struct { + FileInfo +} + +func init() { + t["IsoImageFileInfo"] = reflect.TypeOf((*IsoImageFileInfo)(nil)).Elem() +} + +type IsoImageFileQuery struct { + FileQuery +} + +func init() { + t["IsoImageFileQuery"] = reflect.TypeOf((*IsoImageFileQuery)(nil)).Elem() +} + +type JoinDomainRequestType struct { + This ManagedObjectReference `xml:"_this"` + DomainName string `xml:"domainName"` + UserName string `xml:"userName"` + Password string `xml:"password"` +} + +func init() { + t["JoinDomainRequestType"] = reflect.TypeOf((*JoinDomainRequestType)(nil)).Elem() +} + +type JoinDomainWithCAMRequestType struct { + This ManagedObjectReference `xml:"_this"` + DomainName string `xml:"domainName"` + CamServer string `xml:"camServer"` +} + +func init() { + t["JoinDomainWithCAMRequestType"] = reflect.TypeOf((*JoinDomainWithCAMRequestType)(nil)).Elem() +} + +type JoinDomainWithCAM_Task JoinDomainWithCAMRequestType + +func init() { + t["JoinDomainWithCAM_Task"] = reflect.TypeOf((*JoinDomainWithCAM_Task)(nil)).Elem() +} + +type JoinDomainWithCAM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type JoinDomain_Task JoinDomainRequestType + +func init() { + t["JoinDomain_Task"] = reflect.TypeOf((*JoinDomain_Task)(nil)).Elem() +} + +type JoinDomain_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type KernelModuleInfo struct { + DynamicData + + Id int `xml:"id"` + Name string `xml:"name"` + Version string `xml:"version"` + Filename string `xml:"filename"` + OptionString string `xml:"optionString"` + Loaded bool `xml:"loaded"` + Enabled bool `xml:"enabled"` + UseCount int `xml:"useCount"` + ReadOnlySection KernelModuleSectionInfo `xml:"readOnlySection"` + WritableSection KernelModuleSectionInfo `xml:"writableSection"` + TextSection KernelModuleSectionInfo `xml:"textSection"` + DataSection KernelModuleSectionInfo `xml:"dataSection"` + BssSection KernelModuleSectionInfo `xml:"bssSection"` +} + +func init() { + t["KernelModuleInfo"] = reflect.TypeOf((*KernelModuleInfo)(nil)).Elem() +} + +type KernelModuleSectionInfo struct { + DynamicData + + Address int64 `xml:"address"` + Length int `xml:"length,omitempty"` +} + +func init() { + t["KernelModuleSectionInfo"] = reflect.TypeOf((*KernelModuleSectionInfo)(nil)).Elem() +} + +type KeyAnyValue struct { + DynamicData + + Key string `xml:"key"` + Value AnyType `xml:"value,typeattr"` +} + +func init() { + t["KeyAnyValue"] = reflect.TypeOf((*KeyAnyValue)(nil)).Elem() +} + +type KeyValue struct { + DynamicData + + Key string `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["KeyValue"] = reflect.TypeOf((*KeyValue)(nil)).Elem() +} + +type LargeRDMConversionNotSupported struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["LargeRDMConversionNotSupported"] = reflect.TypeOf((*LargeRDMConversionNotSupported)(nil)).Elem() +} + +type LargeRDMConversionNotSupportedFault LargeRDMConversionNotSupported + +func init() { + t["LargeRDMConversionNotSupportedFault"] = reflect.TypeOf((*LargeRDMConversionNotSupportedFault)(nil)).Elem() +} + +type LargeRDMNotSupportedOnDatastore struct { + VmConfigFault + + Device string `xml:"device"` + Datastore ManagedObjectReference `xml:"datastore"` + DatastoreName string `xml:"datastoreName"` +} + +func init() { + t["LargeRDMNotSupportedOnDatastore"] = reflect.TypeOf((*LargeRDMNotSupportedOnDatastore)(nil)).Elem() +} + +type LargeRDMNotSupportedOnDatastoreFault LargeRDMNotSupportedOnDatastore + +func init() { + t["LargeRDMNotSupportedOnDatastoreFault"] = reflect.TypeOf((*LargeRDMNotSupportedOnDatastoreFault)(nil)).Elem() +} + +type LatencySensitivity struct { + DynamicData + + Level LatencySensitivitySensitivityLevel `xml:"level"` + Sensitivity int `xml:"sensitivity,omitempty"` +} + +func init() { + t["LatencySensitivity"] = reflect.TypeOf((*LatencySensitivity)(nil)).Elem() +} + +type LeaveCurrentDomainRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["LeaveCurrentDomainRequestType"] = reflect.TypeOf((*LeaveCurrentDomainRequestType)(nil)).Elem() +} + +type LeaveCurrentDomain_Task LeaveCurrentDomainRequestType + +func init() { + t["LeaveCurrentDomain_Task"] = reflect.TypeOf((*LeaveCurrentDomain_Task)(nil)).Elem() +} + +type LeaveCurrentDomain_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type LegacyNetworkInterfaceInUse struct { + CannotAccessNetwork +} + +func init() { + t["LegacyNetworkInterfaceInUse"] = reflect.TypeOf((*LegacyNetworkInterfaceInUse)(nil)).Elem() +} + +type LegacyNetworkInterfaceInUseFault LegacyNetworkInterfaceInUse + +func init() { + t["LegacyNetworkInterfaceInUseFault"] = reflect.TypeOf((*LegacyNetworkInterfaceInUseFault)(nil)).Elem() +} + +type LicenseAssignmentFailed struct { + RuntimeFault + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["LicenseAssignmentFailed"] = reflect.TypeOf((*LicenseAssignmentFailed)(nil)).Elem() +} + +type LicenseAssignmentFailedFault LicenseAssignmentFailed + +func init() { + t["LicenseAssignmentFailedFault"] = reflect.TypeOf((*LicenseAssignmentFailedFault)(nil)).Elem() +} + +type LicenseAssignmentManagerLicenseAssignment struct { + DynamicData + + EntityId string `xml:"entityId"` + Scope string `xml:"scope,omitempty"` + EntityDisplayName string `xml:"entityDisplayName,omitempty"` + AssignedLicense LicenseManagerLicenseInfo `xml:"assignedLicense"` + Properties []KeyAnyValue `xml:"properties,omitempty"` +} + +func init() { + t["LicenseAssignmentManagerLicenseAssignment"] = reflect.TypeOf((*LicenseAssignmentManagerLicenseAssignment)(nil)).Elem() +} + +type LicenseAvailabilityInfo struct { + DynamicData + + Feature LicenseFeatureInfo `xml:"feature"` + Total int `xml:"total"` + Available int `xml:"available"` +} + +func init() { + t["LicenseAvailabilityInfo"] = reflect.TypeOf((*LicenseAvailabilityInfo)(nil)).Elem() +} + +type LicenseDiagnostics struct { + DynamicData + + SourceLastChanged time.Time `xml:"sourceLastChanged"` + SourceLost string `xml:"sourceLost"` + SourceLatency float32 `xml:"sourceLatency"` + LicenseRequests string `xml:"licenseRequests"` + LicenseRequestFailures string `xml:"licenseRequestFailures"` + LicenseFeatureUnknowns string `xml:"licenseFeatureUnknowns"` + OpState LicenseManagerState `xml:"opState"` + LastStatusUpdate time.Time `xml:"lastStatusUpdate"` + OpFailureMessage string `xml:"opFailureMessage"` +} + +func init() { + t["LicenseDiagnostics"] = reflect.TypeOf((*LicenseDiagnostics)(nil)).Elem() +} + +type LicenseDowngradeDisallowed struct { + NotEnoughLicenses + + Edition string `xml:"edition"` + EntityId string `xml:"entityId"` + Features []KeyAnyValue `xml:"features"` +} + +func init() { + t["LicenseDowngradeDisallowed"] = reflect.TypeOf((*LicenseDowngradeDisallowed)(nil)).Elem() +} + +type LicenseDowngradeDisallowedFault LicenseDowngradeDisallowed + +func init() { + t["LicenseDowngradeDisallowedFault"] = reflect.TypeOf((*LicenseDowngradeDisallowedFault)(nil)).Elem() +} + +type LicenseEntityNotFound struct { + VimFault + + EntityId string `xml:"entityId"` +} + +func init() { + t["LicenseEntityNotFound"] = reflect.TypeOf((*LicenseEntityNotFound)(nil)).Elem() +} + +type LicenseEntityNotFoundFault LicenseEntityNotFound + +func init() { + t["LicenseEntityNotFoundFault"] = reflect.TypeOf((*LicenseEntityNotFoundFault)(nil)).Elem() +} + +type LicenseEvent struct { + Event +} + +func init() { + t["LicenseEvent"] = reflect.TypeOf((*LicenseEvent)(nil)).Elem() +} + +type LicenseExpired struct { + NotEnoughLicenses + + LicenseKey string `xml:"licenseKey"` +} + +func init() { + t["LicenseExpired"] = reflect.TypeOf((*LicenseExpired)(nil)).Elem() +} + +type LicenseExpiredEvent struct { + Event + + Feature LicenseFeatureInfo `xml:"feature"` +} + +func init() { + t["LicenseExpiredEvent"] = reflect.TypeOf((*LicenseExpiredEvent)(nil)).Elem() +} + +type LicenseExpiredFault LicenseExpired + +func init() { + t["LicenseExpiredFault"] = reflect.TypeOf((*LicenseExpiredFault)(nil)).Elem() +} + +type LicenseFeatureInfo struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + FeatureDescription string `xml:"featureDescription,omitempty"` + State LicenseFeatureInfoState `xml:"state,omitempty"` + CostUnit string `xml:"costUnit"` + SourceRestriction string `xml:"sourceRestriction,omitempty"` + DependentKey []string `xml:"dependentKey,omitempty"` + Edition *bool `xml:"edition"` + ExpiresOn *time.Time `xml:"expiresOn"` +} + +func init() { + t["LicenseFeatureInfo"] = reflect.TypeOf((*LicenseFeatureInfo)(nil)).Elem() +} + +type LicenseKeyEntityMismatch struct { + NotEnoughLicenses +} + +func init() { + t["LicenseKeyEntityMismatch"] = reflect.TypeOf((*LicenseKeyEntityMismatch)(nil)).Elem() +} + +type LicenseKeyEntityMismatchFault LicenseKeyEntityMismatch + +func init() { + t["LicenseKeyEntityMismatchFault"] = reflect.TypeOf((*LicenseKeyEntityMismatchFault)(nil)).Elem() +} + +type LicenseManagerEvaluationInfo struct { + DynamicData + + Properties []KeyAnyValue `xml:"properties"` +} + +func init() { + t["LicenseManagerEvaluationInfo"] = reflect.TypeOf((*LicenseManagerEvaluationInfo)(nil)).Elem() +} + +type LicenseManagerLicenseInfo struct { + DynamicData + + LicenseKey string `xml:"licenseKey"` + EditionKey string `xml:"editionKey"` + Name string `xml:"name"` + Total int `xml:"total"` + Used int `xml:"used,omitempty"` + CostUnit string `xml:"costUnit"` + Properties []KeyAnyValue `xml:"properties,omitempty"` + Labels []KeyValue `xml:"labels,omitempty"` +} + +func init() { + t["LicenseManagerLicenseInfo"] = reflect.TypeOf((*LicenseManagerLicenseInfo)(nil)).Elem() +} + +type LicenseNonComplianceEvent struct { + LicenseEvent + + Url string `xml:"url"` +} + +func init() { + t["LicenseNonComplianceEvent"] = reflect.TypeOf((*LicenseNonComplianceEvent)(nil)).Elem() +} + +type LicenseReservationInfo struct { + DynamicData + + Key string `xml:"key"` + State LicenseReservationInfoState `xml:"state"` + Required int `xml:"required"` +} + +func init() { + t["LicenseReservationInfo"] = reflect.TypeOf((*LicenseReservationInfo)(nil)).Elem() +} + +type LicenseRestricted struct { + NotEnoughLicenses +} + +func init() { + t["LicenseRestricted"] = reflect.TypeOf((*LicenseRestricted)(nil)).Elem() +} + +type LicenseRestrictedEvent struct { + LicenseEvent +} + +func init() { + t["LicenseRestrictedEvent"] = reflect.TypeOf((*LicenseRestrictedEvent)(nil)).Elem() +} + +type LicenseRestrictedFault LicenseRestricted + +func init() { + t["LicenseRestrictedFault"] = reflect.TypeOf((*LicenseRestrictedFault)(nil)).Elem() +} + +type LicenseServerAvailableEvent struct { + LicenseEvent + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerAvailableEvent"] = reflect.TypeOf((*LicenseServerAvailableEvent)(nil)).Elem() +} + +type LicenseServerSource struct { + LicenseSource + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerSource"] = reflect.TypeOf((*LicenseServerSource)(nil)).Elem() +} + +type LicenseServerUnavailable struct { + VimFault + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerUnavailable"] = reflect.TypeOf((*LicenseServerUnavailable)(nil)).Elem() +} + +type LicenseServerUnavailableEvent struct { + LicenseEvent + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerUnavailableEvent"] = reflect.TypeOf((*LicenseServerUnavailableEvent)(nil)).Elem() +} + +type LicenseServerUnavailableFault LicenseServerUnavailable + +func init() { + t["LicenseServerUnavailableFault"] = reflect.TypeOf((*LicenseServerUnavailableFault)(nil)).Elem() +} + +type LicenseSource struct { + DynamicData +} + +func init() { + t["LicenseSource"] = reflect.TypeOf((*LicenseSource)(nil)).Elem() +} + +type LicenseSourceUnavailable struct { + NotEnoughLicenses + + LicenseSource BaseLicenseSource `xml:"licenseSource,typeattr"` +} + +func init() { + t["LicenseSourceUnavailable"] = reflect.TypeOf((*LicenseSourceUnavailable)(nil)).Elem() +} + +type LicenseSourceUnavailableFault LicenseSourceUnavailable + +func init() { + t["LicenseSourceUnavailableFault"] = reflect.TypeOf((*LicenseSourceUnavailableFault)(nil)).Elem() +} + +type LicenseUsageInfo struct { + DynamicData + + Source BaseLicenseSource `xml:"source,typeattr"` + SourceAvailable bool `xml:"sourceAvailable"` + ReservationInfo []LicenseReservationInfo `xml:"reservationInfo,omitempty"` + FeatureInfo []LicenseFeatureInfo `xml:"featureInfo,omitempty"` +} + +func init() { + t["LicenseUsageInfo"] = reflect.TypeOf((*LicenseUsageInfo)(nil)).Elem() +} + +type LimitExceeded struct { + VimFault + + Property string `xml:"property,omitempty"` + Limit int `xml:"limit,omitempty"` +} + +func init() { + t["LimitExceeded"] = reflect.TypeOf((*LimitExceeded)(nil)).Elem() +} + +type LimitExceededFault LimitExceeded + +func init() { + t["LimitExceededFault"] = reflect.TypeOf((*LimitExceededFault)(nil)).Elem() +} + +type LinkDiscoveryProtocolConfig struct { + DynamicData + + Protocol string `xml:"protocol"` + Operation string `xml:"operation"` +} + +func init() { + t["LinkDiscoveryProtocolConfig"] = reflect.TypeOf((*LinkDiscoveryProtocolConfig)(nil)).Elem() +} + +type LinkLayerDiscoveryProtocolInfo struct { + DynamicData + + ChassisId string `xml:"chassisId"` + PortId string `xml:"portId"` + TimeToLive int `xml:"timeToLive"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["LinkLayerDiscoveryProtocolInfo"] = reflect.TypeOf((*LinkLayerDiscoveryProtocolInfo)(nil)).Elem() +} + +type LinkProfile struct { + ApplyProfile +} + +func init() { + t["LinkProfile"] = reflect.TypeOf((*LinkProfile)(nil)).Elem() +} + +type LinuxVolumeNotClean struct { + CustomizationFault +} + +func init() { + t["LinuxVolumeNotClean"] = reflect.TypeOf((*LinuxVolumeNotClean)(nil)).Elem() +} + +type LinuxVolumeNotCleanFault LinuxVolumeNotClean + +func init() { + t["LinuxVolumeNotCleanFault"] = reflect.TypeOf((*LinuxVolumeNotCleanFault)(nil)).Elem() +} + +type ListCACertificateRevocationLists ListCACertificateRevocationListsRequestType + +func init() { + t["ListCACertificateRevocationLists"] = reflect.TypeOf((*ListCACertificateRevocationLists)(nil)).Elem() +} + +type ListCACertificateRevocationListsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ListCACertificateRevocationListsRequestType"] = reflect.TypeOf((*ListCACertificateRevocationListsRequestType)(nil)).Elem() +} + +type ListCACertificateRevocationListsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type ListCACertificates ListCACertificatesRequestType + +func init() { + t["ListCACertificates"] = reflect.TypeOf((*ListCACertificates)(nil)).Elem() +} + +type ListCACertificatesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ListCACertificatesRequestType"] = reflect.TypeOf((*ListCACertificatesRequestType)(nil)).Elem() +} + +type ListCACertificatesResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type ListFilesInGuest ListFilesInGuestRequestType + +func init() { + t["ListFilesInGuest"] = reflect.TypeOf((*ListFilesInGuest)(nil)).Elem() +} + +type ListFilesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + FilePath string `xml:"filePath"` + Index int `xml:"index,omitempty"` + MaxResults int `xml:"maxResults,omitempty"` + MatchPattern string `xml:"matchPattern,omitempty"` +} + +func init() { + t["ListFilesInGuestRequestType"] = reflect.TypeOf((*ListFilesInGuestRequestType)(nil)).Elem() +} + +type ListFilesInGuestResponse struct { + Returnval GuestListFileInfo `xml:"returnval"` +} + +type ListGuestAliases ListGuestAliasesRequestType + +func init() { + t["ListGuestAliases"] = reflect.TypeOf((*ListGuestAliases)(nil)).Elem() +} + +type ListGuestAliasesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` +} + +func init() { + t["ListGuestAliasesRequestType"] = reflect.TypeOf((*ListGuestAliasesRequestType)(nil)).Elem() +} + +type ListGuestAliasesResponse struct { + Returnval []GuestAliases `xml:"returnval,omitempty"` +} + +type ListGuestMappedAliases ListGuestMappedAliasesRequestType + +func init() { + t["ListGuestMappedAliases"] = reflect.TypeOf((*ListGuestMappedAliases)(nil)).Elem() +} + +type ListGuestMappedAliasesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["ListGuestMappedAliasesRequestType"] = reflect.TypeOf((*ListGuestMappedAliasesRequestType)(nil)).Elem() +} + +type ListGuestMappedAliasesResponse struct { + Returnval []GuestMappedAliases `xml:"returnval,omitempty"` +} + +type ListProcessesInGuest ListProcessesInGuestRequestType + +func init() { + t["ListProcessesInGuest"] = reflect.TypeOf((*ListProcessesInGuest)(nil)).Elem() +} + +type ListProcessesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Pids []int64 `xml:"pids,omitempty"` +} + +func init() { + t["ListProcessesInGuestRequestType"] = reflect.TypeOf((*ListProcessesInGuestRequestType)(nil)).Elem() +} + +type ListProcessesInGuestResponse struct { + Returnval []GuestProcessInfo `xml:"returnval,omitempty"` +} + +type ListRegistryKeysInGuest ListRegistryKeysInGuestRequestType + +func init() { + t["ListRegistryKeysInGuest"] = reflect.TypeOf((*ListRegistryKeysInGuest)(nil)).Elem() +} + +type ListRegistryKeysInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + Recursive bool `xml:"recursive"` + MatchPattern string `xml:"matchPattern,omitempty"` +} + +func init() { + t["ListRegistryKeysInGuestRequestType"] = reflect.TypeOf((*ListRegistryKeysInGuestRequestType)(nil)).Elem() +} + +type ListRegistryKeysInGuestResponse struct { + Returnval []GuestRegKeyRecordSpec `xml:"returnval,omitempty"` +} + +type ListRegistryValuesInGuest ListRegistryValuesInGuestRequestType + +func init() { + t["ListRegistryValuesInGuest"] = reflect.TypeOf((*ListRegistryValuesInGuest)(nil)).Elem() +} + +type ListRegistryValuesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + ExpandStrings bool `xml:"expandStrings"` + MatchPattern string `xml:"matchPattern,omitempty"` +} + +func init() { + t["ListRegistryValuesInGuestRequestType"] = reflect.TypeOf((*ListRegistryValuesInGuestRequestType)(nil)).Elem() +} + +type ListRegistryValuesInGuestResponse struct { + Returnval []GuestRegValueSpec `xml:"returnval,omitempty"` +} + +type ListSmartCardTrustAnchors ListSmartCardTrustAnchorsRequestType + +func init() { + t["ListSmartCardTrustAnchors"] = reflect.TypeOf((*ListSmartCardTrustAnchors)(nil)).Elem() +} + +type ListSmartCardTrustAnchorsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ListSmartCardTrustAnchorsRequestType"] = reflect.TypeOf((*ListSmartCardTrustAnchorsRequestType)(nil)).Elem() +} + +type ListSmartCardTrustAnchorsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type LocalDatastoreCreatedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["LocalDatastoreCreatedEvent"] = reflect.TypeOf((*LocalDatastoreCreatedEvent)(nil)).Elem() +} + +type LocalDatastoreInfo struct { + DatastoreInfo + + Path string `xml:"path,omitempty"` +} + +func init() { + t["LocalDatastoreInfo"] = reflect.TypeOf((*LocalDatastoreInfo)(nil)).Elem() +} + +type LocalLicenseSource struct { + LicenseSource + + LicenseKeys string `xml:"licenseKeys"` +} + +func init() { + t["LocalLicenseSource"] = reflect.TypeOf((*LocalLicenseSource)(nil)).Elem() +} + +type LocalTSMEnabledEvent struct { + HostEvent +} + +func init() { + t["LocalTSMEnabledEvent"] = reflect.TypeOf((*LocalTSMEnabledEvent)(nil)).Elem() +} + +type LocalizableMessage struct { + DynamicData + + Key string `xml:"key"` + Arg []KeyAnyValue `xml:"arg,omitempty"` + Message string `xml:"message,omitempty"` +} + +func init() { + t["LocalizableMessage"] = reflect.TypeOf((*LocalizableMessage)(nil)).Elem() +} + +type LocalizationManagerMessageCatalog struct { + DynamicData + + ModuleName string `xml:"moduleName"` + CatalogName string `xml:"catalogName"` + Locale string `xml:"locale"` + CatalogUri string `xml:"catalogUri"` + LastModified *time.Time `xml:"lastModified"` + Md5sum string `xml:"md5sum,omitempty"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["LocalizationManagerMessageCatalog"] = reflect.TypeOf((*LocalizationManagerMessageCatalog)(nil)).Elem() +} + +type LocalizedMethodFault struct { + DynamicData + + Fault BaseMethodFault `xml:"fault,typeattr"` + LocalizedMessage string `xml:"localizedMessage,omitempty"` +} + +func init() { + t["LocalizedMethodFault"] = reflect.TypeOf((*LocalizedMethodFault)(nil)).Elem() +} + +type LockerMisconfiguredEvent struct { + Event + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["LockerMisconfiguredEvent"] = reflect.TypeOf((*LockerMisconfiguredEvent)(nil)).Elem() +} + +type LockerReconfiguredEvent struct { + Event + + OldDatastore *DatastoreEventArgument `xml:"oldDatastore,omitempty"` + NewDatastore *DatastoreEventArgument `xml:"newDatastore,omitempty"` +} + +func init() { + t["LockerReconfiguredEvent"] = reflect.TypeOf((*LockerReconfiguredEvent)(nil)).Elem() +} + +type LogBundlingFailed struct { + VimFault +} + +func init() { + t["LogBundlingFailed"] = reflect.TypeOf((*LogBundlingFailed)(nil)).Elem() +} + +type LogBundlingFailedFault LogBundlingFailed + +func init() { + t["LogBundlingFailedFault"] = reflect.TypeOf((*LogBundlingFailedFault)(nil)).Elem() +} + +type LogUserEvent LogUserEventRequestType + +func init() { + t["LogUserEvent"] = reflect.TypeOf((*LogUserEvent)(nil)).Elem() +} + +type LogUserEventRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Msg string `xml:"msg"` +} + +func init() { + t["LogUserEventRequestType"] = reflect.TypeOf((*LogUserEventRequestType)(nil)).Elem() +} + +type LogUserEventResponse struct { +} + +type Login LoginRequestType + +func init() { + t["Login"] = reflect.TypeOf((*Login)(nil)).Elem() +} + +type LoginBySSPI LoginBySSPIRequestType + +func init() { + t["LoginBySSPI"] = reflect.TypeOf((*LoginBySSPI)(nil)).Elem() +} + +type LoginBySSPIRequestType struct { + This ManagedObjectReference `xml:"_this"` + Base64Token string `xml:"base64Token"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginBySSPIRequestType"] = reflect.TypeOf((*LoginBySSPIRequestType)(nil)).Elem() +} + +type LoginBySSPIResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginByToken LoginByTokenRequestType + +func init() { + t["LoginByToken"] = reflect.TypeOf((*LoginByToken)(nil)).Elem() +} + +type LoginByTokenRequestType struct { + This ManagedObjectReference `xml:"_this"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginByTokenRequestType"] = reflect.TypeOf((*LoginByTokenRequestType)(nil)).Elem() +} + +type LoginByTokenResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginExtensionByCertificate LoginExtensionByCertificateRequestType + +func init() { + t["LoginExtensionByCertificate"] = reflect.TypeOf((*LoginExtensionByCertificate)(nil)).Elem() +} + +type LoginExtensionByCertificateRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginExtensionByCertificateRequestType"] = reflect.TypeOf((*LoginExtensionByCertificateRequestType)(nil)).Elem() +} + +type LoginExtensionByCertificateResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginExtensionBySubjectName LoginExtensionBySubjectNameRequestType + +func init() { + t["LoginExtensionBySubjectName"] = reflect.TypeOf((*LoginExtensionBySubjectName)(nil)).Elem() +} + +type LoginExtensionBySubjectNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginExtensionBySubjectNameRequestType"] = reflect.TypeOf((*LoginExtensionBySubjectNameRequestType)(nil)).Elem() +} + +type LoginExtensionBySubjectNameResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` + Password string `xml:"password"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginRequestType"] = reflect.TypeOf((*LoginRequestType)(nil)).Elem() +} + +type LoginResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type Logout LogoutRequestType + +func init() { + t["Logout"] = reflect.TypeOf((*Logout)(nil)).Elem() +} + +type LogoutRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["LogoutRequestType"] = reflect.TypeOf((*LogoutRequestType)(nil)).Elem() +} + +type LogoutResponse struct { +} + +type LongOption struct { + OptionType + + Min int64 `xml:"min"` + Max int64 `xml:"max"` + DefaultValue int64 `xml:"defaultValue"` +} + +func init() { + t["LongOption"] = reflect.TypeOf((*LongOption)(nil)).Elem() +} + +type LongPolicy struct { + InheritablePolicy + + Value int64 `xml:"value,omitempty"` +} + +func init() { + t["LongPolicy"] = reflect.TypeOf((*LongPolicy)(nil)).Elem() +} + +type LookupDvPortGroup LookupDvPortGroupRequestType + +func init() { + t["LookupDvPortGroup"] = reflect.TypeOf((*LookupDvPortGroup)(nil)).Elem() +} + +type LookupDvPortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + PortgroupKey string `xml:"portgroupKey"` +} + +func init() { + t["LookupDvPortGroupRequestType"] = reflect.TypeOf((*LookupDvPortGroupRequestType)(nil)).Elem() +} + +type LookupDvPortGroupResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type LookupVmOverheadMemory LookupVmOverheadMemoryRequestType + +func init() { + t["LookupVmOverheadMemory"] = reflect.TypeOf((*LookupVmOverheadMemory)(nil)).Elem() +} + +type LookupVmOverheadMemoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["LookupVmOverheadMemoryRequestType"] = reflect.TypeOf((*LookupVmOverheadMemoryRequestType)(nil)).Elem() +} + +type LookupVmOverheadMemoryResponse struct { + Returnval int64 `xml:"returnval"` +} + +type MacAddress struct { + NegatableExpression +} + +func init() { + t["MacAddress"] = reflect.TypeOf((*MacAddress)(nil)).Elem() +} + +type MacRange struct { + MacAddress + + Address string `xml:"address"` + Mask string `xml:"mask"` +} + +func init() { + t["MacRange"] = reflect.TypeOf((*MacRange)(nil)).Elem() +} + +type MaintenanceModeFileMove struct { + MigrationFault +} + +func init() { + t["MaintenanceModeFileMove"] = reflect.TypeOf((*MaintenanceModeFileMove)(nil)).Elem() +} + +type MaintenanceModeFileMoveFault MaintenanceModeFileMove + +func init() { + t["MaintenanceModeFileMoveFault"] = reflect.TypeOf((*MaintenanceModeFileMoveFault)(nil)).Elem() +} + +type MakeDirectory MakeDirectoryRequestType + +func init() { + t["MakeDirectory"] = reflect.TypeOf((*MakeDirectory)(nil)).Elem() +} + +type MakeDirectoryInGuest MakeDirectoryInGuestRequestType + +func init() { + t["MakeDirectoryInGuest"] = reflect.TypeOf((*MakeDirectoryInGuest)(nil)).Elem() +} + +type MakeDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + DirectoryPath string `xml:"directoryPath"` + CreateParentDirectories bool `xml:"createParentDirectories"` +} + +func init() { + t["MakeDirectoryInGuestRequestType"] = reflect.TypeOf((*MakeDirectoryInGuestRequestType)(nil)).Elem() +} + +type MakeDirectoryInGuestResponse struct { +} + +type MakeDirectoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + CreateParentDirectories *bool `xml:"createParentDirectories"` +} + +func init() { + t["MakeDirectoryRequestType"] = reflect.TypeOf((*MakeDirectoryRequestType)(nil)).Elem() +} + +type MakeDirectoryResponse struct { +} + +type MakePrimaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["MakePrimaryVMRequestType"] = reflect.TypeOf((*MakePrimaryVMRequestType)(nil)).Elem() +} + +type MakePrimaryVM_Task MakePrimaryVMRequestType + +func init() { + t["MakePrimaryVM_Task"] = reflect.TypeOf((*MakePrimaryVM_Task)(nil)).Elem() +} + +type MakePrimaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ManagedByInfo struct { + DynamicData + + ExtensionKey string `xml:"extensionKey"` + Type string `xml:"type"` +} + +func init() { + t["ManagedByInfo"] = reflect.TypeOf((*ManagedByInfo)(nil)).Elem() +} + +type ManagedEntityEventArgument struct { + EntityEventArgument + + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["ManagedEntityEventArgument"] = reflect.TypeOf((*ManagedEntityEventArgument)(nil)).Elem() +} + +type ManagedObjectNotFound struct { + RuntimeFault + + Obj ManagedObjectReference `xml:"obj"` +} + +func init() { + t["ManagedObjectNotFound"] = reflect.TypeOf((*ManagedObjectNotFound)(nil)).Elem() +} + +type ManagedObjectNotFoundFault ManagedObjectNotFound + +func init() { + t["ManagedObjectNotFoundFault"] = reflect.TypeOf((*ManagedObjectNotFoundFault)(nil)).Elem() +} + +type ManagedObjectReference struct { + Type string `xml:"type,attr"` + Value string `xml:",chardata"` +} + +func init() { + t["ManagedObjectReference"] = reflect.TypeOf((*ManagedObjectReference)(nil)).Elem() +} + +type MarkAsLocalRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsLocalRequestType"] = reflect.TypeOf((*MarkAsLocalRequestType)(nil)).Elem() +} + +type MarkAsLocal_Task MarkAsLocalRequestType + +func init() { + t["MarkAsLocal_Task"] = reflect.TypeOf((*MarkAsLocal_Task)(nil)).Elem() +} + +type MarkAsLocal_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsNonLocalRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsNonLocalRequestType"] = reflect.TypeOf((*MarkAsNonLocalRequestType)(nil)).Elem() +} + +type MarkAsNonLocal_Task MarkAsNonLocalRequestType + +func init() { + t["MarkAsNonLocal_Task"] = reflect.TypeOf((*MarkAsNonLocal_Task)(nil)).Elem() +} + +type MarkAsNonLocal_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsNonSsdRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsNonSsdRequestType"] = reflect.TypeOf((*MarkAsNonSsdRequestType)(nil)).Elem() +} + +type MarkAsNonSsd_Task MarkAsNonSsdRequestType + +func init() { + t["MarkAsNonSsd_Task"] = reflect.TypeOf((*MarkAsNonSsd_Task)(nil)).Elem() +} + +type MarkAsNonSsd_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsSsdRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsSsdRequestType"] = reflect.TypeOf((*MarkAsSsdRequestType)(nil)).Elem() +} + +type MarkAsSsd_Task MarkAsSsdRequestType + +func init() { + t["MarkAsSsd_Task"] = reflect.TypeOf((*MarkAsSsd_Task)(nil)).Elem() +} + +type MarkAsSsd_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsTemplate MarkAsTemplateRequestType + +func init() { + t["MarkAsTemplate"] = reflect.TypeOf((*MarkAsTemplate)(nil)).Elem() +} + +type MarkAsTemplateRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["MarkAsTemplateRequestType"] = reflect.TypeOf((*MarkAsTemplateRequestType)(nil)).Elem() +} + +type MarkAsTemplateResponse struct { +} + +type MarkAsVirtualMachine MarkAsVirtualMachineRequestType + +func init() { + t["MarkAsVirtualMachine"] = reflect.TypeOf((*MarkAsVirtualMachine)(nil)).Elem() +} + +type MarkAsVirtualMachineRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pool ManagedObjectReference `xml:"pool"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["MarkAsVirtualMachineRequestType"] = reflect.TypeOf((*MarkAsVirtualMachineRequestType)(nil)).Elem() +} + +type MarkAsVirtualMachineResponse struct { +} + +type MarkForRemoval MarkForRemovalRequestType + +func init() { + t["MarkForRemoval"] = reflect.TypeOf((*MarkForRemoval)(nil)).Elem() +} + +type MarkForRemovalRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaName string `xml:"hbaName"` + Remove bool `xml:"remove"` +} + +func init() { + t["MarkForRemovalRequestType"] = reflect.TypeOf((*MarkForRemovalRequestType)(nil)).Elem() +} + +type MarkForRemovalResponse struct { +} + +type MemoryFileFormatNotSupportedByDatastore struct { + UnsupportedDatastore + + DatastoreName string `xml:"datastoreName"` + Type string `xml:"type"` +} + +func init() { + t["MemoryFileFormatNotSupportedByDatastore"] = reflect.TypeOf((*MemoryFileFormatNotSupportedByDatastore)(nil)).Elem() +} + +type MemoryFileFormatNotSupportedByDatastoreFault MemoryFileFormatNotSupportedByDatastore + +func init() { + t["MemoryFileFormatNotSupportedByDatastoreFault"] = reflect.TypeOf((*MemoryFileFormatNotSupportedByDatastoreFault)(nil)).Elem() +} + +type MemoryHotPlugNotSupported struct { + VmConfigFault +} + +func init() { + t["MemoryHotPlugNotSupported"] = reflect.TypeOf((*MemoryHotPlugNotSupported)(nil)).Elem() +} + +type MemoryHotPlugNotSupportedFault MemoryHotPlugNotSupported + +func init() { + t["MemoryHotPlugNotSupportedFault"] = reflect.TypeOf((*MemoryHotPlugNotSupportedFault)(nil)).Elem() +} + +type MemorySizeNotRecommended struct { + VirtualHardwareCompatibilityIssue + + MemorySizeMB int `xml:"memorySizeMB"` + MinMemorySizeMB int `xml:"minMemorySizeMB"` + MaxMemorySizeMB int `xml:"maxMemorySizeMB"` +} + +func init() { + t["MemorySizeNotRecommended"] = reflect.TypeOf((*MemorySizeNotRecommended)(nil)).Elem() +} + +type MemorySizeNotRecommendedFault MemorySizeNotRecommended + +func init() { + t["MemorySizeNotRecommendedFault"] = reflect.TypeOf((*MemorySizeNotRecommendedFault)(nil)).Elem() +} + +type MemorySizeNotSupported struct { + VirtualHardwareCompatibilityIssue + + MemorySizeMB int `xml:"memorySizeMB"` + MinMemorySizeMB int `xml:"minMemorySizeMB"` + MaxMemorySizeMB int `xml:"maxMemorySizeMB"` +} + +func init() { + t["MemorySizeNotSupported"] = reflect.TypeOf((*MemorySizeNotSupported)(nil)).Elem() +} + +type MemorySizeNotSupportedByDatastore struct { + VirtualHardwareCompatibilityIssue + + Datastore ManagedObjectReference `xml:"datastore"` + MemorySizeMB int `xml:"memorySizeMB"` + MaxMemorySizeMB int `xml:"maxMemorySizeMB"` +} + +func init() { + t["MemorySizeNotSupportedByDatastore"] = reflect.TypeOf((*MemorySizeNotSupportedByDatastore)(nil)).Elem() +} + +type MemorySizeNotSupportedByDatastoreFault MemorySizeNotSupportedByDatastore + +func init() { + t["MemorySizeNotSupportedByDatastoreFault"] = reflect.TypeOf((*MemorySizeNotSupportedByDatastoreFault)(nil)).Elem() +} + +type MemorySizeNotSupportedFault MemorySizeNotSupported + +func init() { + t["MemorySizeNotSupportedFault"] = reflect.TypeOf((*MemorySizeNotSupportedFault)(nil)).Elem() +} + +type MemorySnapshotOnIndependentDisk struct { + SnapshotFault +} + +func init() { + t["MemorySnapshotOnIndependentDisk"] = reflect.TypeOf((*MemorySnapshotOnIndependentDisk)(nil)).Elem() +} + +type MemorySnapshotOnIndependentDiskFault MemorySnapshotOnIndependentDisk + +func init() { + t["MemorySnapshotOnIndependentDiskFault"] = reflect.TypeOf((*MemorySnapshotOnIndependentDiskFault)(nil)).Elem() +} + +type MergeDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dvs ManagedObjectReference `xml:"dvs"` +} + +func init() { + t["MergeDvsRequestType"] = reflect.TypeOf((*MergeDvsRequestType)(nil)).Elem() +} + +type MergeDvs_Task MergeDvsRequestType + +func init() { + t["MergeDvs_Task"] = reflect.TypeOf((*MergeDvs_Task)(nil)).Elem() +} + +type MergeDvs_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MergePermissions MergePermissionsRequestType + +func init() { + t["MergePermissions"] = reflect.TypeOf((*MergePermissions)(nil)).Elem() +} + +type MergePermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + SrcRoleId int `xml:"srcRoleId"` + DstRoleId int `xml:"dstRoleId"` +} + +func init() { + t["MergePermissionsRequestType"] = reflect.TypeOf((*MergePermissionsRequestType)(nil)).Elem() +} + +type MergePermissionsResponse struct { +} + +type MethodAction struct { + Action + + Name string `xml:"name"` + Argument []MethodActionArgument `xml:"argument,omitempty"` +} + +func init() { + t["MethodAction"] = reflect.TypeOf((*MethodAction)(nil)).Elem() +} + +type MethodActionArgument struct { + DynamicData + + Value AnyType `xml:"value,omitempty,typeattr"` +} + +func init() { + t["MethodActionArgument"] = reflect.TypeOf((*MethodActionArgument)(nil)).Elem() +} + +type MethodAlreadyDisabledFault struct { + RuntimeFault + + SourceId string `xml:"sourceId"` +} + +func init() { + t["MethodAlreadyDisabledFault"] = reflect.TypeOf((*MethodAlreadyDisabledFault)(nil)).Elem() +} + +type MethodAlreadyDisabledFaultFault MethodAlreadyDisabledFault + +func init() { + t["MethodAlreadyDisabledFaultFault"] = reflect.TypeOf((*MethodAlreadyDisabledFaultFault)(nil)).Elem() +} + +type MethodDescription struct { + Description + + Key string `xml:"key"` +} + +func init() { + t["MethodDescription"] = reflect.TypeOf((*MethodDescription)(nil)).Elem() +} + +type MethodDisabled struct { + RuntimeFault + + Source string `xml:"source,omitempty"` +} + +func init() { + t["MethodDisabled"] = reflect.TypeOf((*MethodDisabled)(nil)).Elem() +} + +type MethodDisabledFault MethodDisabled + +func init() { + t["MethodDisabledFault"] = reflect.TypeOf((*MethodDisabledFault)(nil)).Elem() +} + +type MethodFault struct { + FaultCause *LocalizedMethodFault `xml:"faultCause,omitempty"` + FaultMessage []LocalizableMessage `xml:"faultMessage,omitempty"` +} + +func init() { + t["MethodFault"] = reflect.TypeOf((*MethodFault)(nil)).Elem() +} + +type MethodFaultFault BaseMethodFault + +func init() { + t["MethodFaultFault"] = reflect.TypeOf((*MethodFaultFault)(nil)).Elem() +} + +type MethodNotFound struct { + InvalidRequest + + Receiver ManagedObjectReference `xml:"receiver"` + Method string `xml:"method"` +} + +func init() { + t["MethodNotFound"] = reflect.TypeOf((*MethodNotFound)(nil)).Elem() +} + +type MethodNotFoundFault MethodNotFound + +func init() { + t["MethodNotFoundFault"] = reflect.TypeOf((*MethodNotFoundFault)(nil)).Elem() +} + +type MetricAlarmExpression struct { + AlarmExpression + + Operator MetricAlarmOperator `xml:"operator"` + Type string `xml:"type"` + Metric PerfMetricId `xml:"metric"` + Yellow int `xml:"yellow,omitempty"` + YellowInterval int `xml:"yellowInterval,omitempty"` + Red int `xml:"red,omitempty"` + RedInterval int `xml:"redInterval,omitempty"` +} + +func init() { + t["MetricAlarmExpression"] = reflect.TypeOf((*MetricAlarmExpression)(nil)).Elem() +} + +type MigrateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Priority VirtualMachineMovePriority `xml:"priority"` + State VirtualMachinePowerState `xml:"state,omitempty"` +} + +func init() { + t["MigrateVMRequestType"] = reflect.TypeOf((*MigrateVMRequestType)(nil)).Elem() +} + +type MigrateVM_Task MigrateVMRequestType + +func init() { + t["MigrateVM_Task"] = reflect.TypeOf((*MigrateVM_Task)(nil)).Elem() +} + +type MigrateVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MigrationDisabled struct { + MigrationFault +} + +func init() { + t["MigrationDisabled"] = reflect.TypeOf((*MigrationDisabled)(nil)).Elem() +} + +type MigrationDisabledFault MigrationDisabled + +func init() { + t["MigrationDisabledFault"] = reflect.TypeOf((*MigrationDisabledFault)(nil)).Elem() +} + +type MigrationErrorEvent struct { + MigrationEvent +} + +func init() { + t["MigrationErrorEvent"] = reflect.TypeOf((*MigrationErrorEvent)(nil)).Elem() +} + +type MigrationEvent struct { + VmEvent + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["MigrationEvent"] = reflect.TypeOf((*MigrationEvent)(nil)).Elem() +} + +type MigrationFault struct { + VimFault +} + +func init() { + t["MigrationFault"] = reflect.TypeOf((*MigrationFault)(nil)).Elem() +} + +type MigrationFaultFault BaseMigrationFault + +func init() { + t["MigrationFaultFault"] = reflect.TypeOf((*MigrationFaultFault)(nil)).Elem() +} + +type MigrationFeatureNotSupported struct { + MigrationFault + + AtSourceHost bool `xml:"atSourceHost"` + FailedHostName string `xml:"failedHostName"` + FailedHost ManagedObjectReference `xml:"failedHost"` +} + +func init() { + t["MigrationFeatureNotSupported"] = reflect.TypeOf((*MigrationFeatureNotSupported)(nil)).Elem() +} + +type MigrationFeatureNotSupportedFault BaseMigrationFeatureNotSupported + +func init() { + t["MigrationFeatureNotSupportedFault"] = reflect.TypeOf((*MigrationFeatureNotSupportedFault)(nil)).Elem() +} + +type MigrationHostErrorEvent struct { + MigrationEvent + + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationHostErrorEvent"] = reflect.TypeOf((*MigrationHostErrorEvent)(nil)).Elem() +} + +type MigrationHostWarningEvent struct { + MigrationEvent + + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationHostWarningEvent"] = reflect.TypeOf((*MigrationHostWarningEvent)(nil)).Elem() +} + +type MigrationNotReady struct { + MigrationFault + + Reason string `xml:"reason"` +} + +func init() { + t["MigrationNotReady"] = reflect.TypeOf((*MigrationNotReady)(nil)).Elem() +} + +type MigrationNotReadyFault MigrationNotReady + +func init() { + t["MigrationNotReadyFault"] = reflect.TypeOf((*MigrationNotReadyFault)(nil)).Elem() +} + +type MigrationResourceErrorEvent struct { + MigrationEvent + + DstPool ResourcePoolEventArgument `xml:"dstPool"` + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationResourceErrorEvent"] = reflect.TypeOf((*MigrationResourceErrorEvent)(nil)).Elem() +} + +type MigrationResourceWarningEvent struct { + MigrationEvent + + DstPool ResourcePoolEventArgument `xml:"dstPool"` + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationResourceWarningEvent"] = reflect.TypeOf((*MigrationResourceWarningEvent)(nil)).Elem() +} + +type MigrationWarningEvent struct { + MigrationEvent +} + +func init() { + t["MigrationWarningEvent"] = reflect.TypeOf((*MigrationWarningEvent)(nil)).Elem() +} + +type MismatchedBundle struct { + VimFault + + BundleUuid string `xml:"bundleUuid"` + HostUuid string `xml:"hostUuid"` + BundleBuildNumber int `xml:"bundleBuildNumber"` + HostBuildNumber int `xml:"hostBuildNumber"` +} + +func init() { + t["MismatchedBundle"] = reflect.TypeOf((*MismatchedBundle)(nil)).Elem() +} + +type MismatchedBundleFault MismatchedBundle + +func init() { + t["MismatchedBundleFault"] = reflect.TypeOf((*MismatchedBundleFault)(nil)).Elem() +} + +type MismatchedNetworkPolicies struct { + MigrationFault + + Device string `xml:"device"` + Backing string `xml:"backing"` + Connected bool `xml:"connected"` +} + +func init() { + t["MismatchedNetworkPolicies"] = reflect.TypeOf((*MismatchedNetworkPolicies)(nil)).Elem() +} + +type MismatchedNetworkPoliciesFault MismatchedNetworkPolicies + +func init() { + t["MismatchedNetworkPoliciesFault"] = reflect.TypeOf((*MismatchedNetworkPoliciesFault)(nil)).Elem() +} + +type MismatchedVMotionNetworkNames struct { + MigrationFault + + SourceNetwork string `xml:"sourceNetwork"` + DestNetwork string `xml:"destNetwork"` +} + +func init() { + t["MismatchedVMotionNetworkNames"] = reflect.TypeOf((*MismatchedVMotionNetworkNames)(nil)).Elem() +} + +type MismatchedVMotionNetworkNamesFault MismatchedVMotionNetworkNames + +func init() { + t["MismatchedVMotionNetworkNamesFault"] = reflect.TypeOf((*MismatchedVMotionNetworkNamesFault)(nil)).Elem() +} + +type MissingBmcSupport struct { + VimFault +} + +func init() { + t["MissingBmcSupport"] = reflect.TypeOf((*MissingBmcSupport)(nil)).Elem() +} + +type MissingBmcSupportFault MissingBmcSupport + +func init() { + t["MissingBmcSupportFault"] = reflect.TypeOf((*MissingBmcSupportFault)(nil)).Elem() +} + +type MissingController struct { + InvalidDeviceSpec +} + +func init() { + t["MissingController"] = reflect.TypeOf((*MissingController)(nil)).Elem() +} + +type MissingControllerFault MissingController + +func init() { + t["MissingControllerFault"] = reflect.TypeOf((*MissingControllerFault)(nil)).Elem() +} + +type MissingIpPool struct { + VAppPropertyFault +} + +func init() { + t["MissingIpPool"] = reflect.TypeOf((*MissingIpPool)(nil)).Elem() +} + +type MissingIpPoolFault MissingIpPool + +func init() { + t["MissingIpPoolFault"] = reflect.TypeOf((*MissingIpPoolFault)(nil)).Elem() +} + +type MissingLinuxCustResources struct { + CustomizationFault +} + +func init() { + t["MissingLinuxCustResources"] = reflect.TypeOf((*MissingLinuxCustResources)(nil)).Elem() +} + +type MissingLinuxCustResourcesFault MissingLinuxCustResources + +func init() { + t["MissingLinuxCustResourcesFault"] = reflect.TypeOf((*MissingLinuxCustResourcesFault)(nil)).Elem() +} + +type MissingNetworkIpConfig struct { + VAppPropertyFault +} + +func init() { + t["MissingNetworkIpConfig"] = reflect.TypeOf((*MissingNetworkIpConfig)(nil)).Elem() +} + +type MissingNetworkIpConfigFault MissingNetworkIpConfig + +func init() { + t["MissingNetworkIpConfigFault"] = reflect.TypeOf((*MissingNetworkIpConfigFault)(nil)).Elem() +} + +type MissingObject struct { + DynamicData + + Obj ManagedObjectReference `xml:"obj"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["MissingObject"] = reflect.TypeOf((*MissingObject)(nil)).Elem() +} + +type MissingPowerOffConfiguration struct { + VAppConfigFault +} + +func init() { + t["MissingPowerOffConfiguration"] = reflect.TypeOf((*MissingPowerOffConfiguration)(nil)).Elem() +} + +type MissingPowerOffConfigurationFault MissingPowerOffConfiguration + +func init() { + t["MissingPowerOffConfigurationFault"] = reflect.TypeOf((*MissingPowerOffConfigurationFault)(nil)).Elem() +} + +type MissingPowerOnConfiguration struct { + VAppConfigFault +} + +func init() { + t["MissingPowerOnConfiguration"] = reflect.TypeOf((*MissingPowerOnConfiguration)(nil)).Elem() +} + +type MissingPowerOnConfigurationFault MissingPowerOnConfiguration + +func init() { + t["MissingPowerOnConfigurationFault"] = reflect.TypeOf((*MissingPowerOnConfigurationFault)(nil)).Elem() +} + +type MissingProperty struct { + DynamicData + + Path string `xml:"path"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["MissingProperty"] = reflect.TypeOf((*MissingProperty)(nil)).Elem() +} + +type MissingWindowsCustResources struct { + CustomizationFault +} + +func init() { + t["MissingWindowsCustResources"] = reflect.TypeOf((*MissingWindowsCustResources)(nil)).Elem() +} + +type MissingWindowsCustResourcesFault MissingWindowsCustResources + +func init() { + t["MissingWindowsCustResourcesFault"] = reflect.TypeOf((*MissingWindowsCustResourcesFault)(nil)).Elem() +} + +type MksConnectionLimitReached struct { + InvalidState + + ConnectionLimit int `xml:"connectionLimit"` +} + +func init() { + t["MksConnectionLimitReached"] = reflect.TypeOf((*MksConnectionLimitReached)(nil)).Elem() +} + +type MksConnectionLimitReachedFault MksConnectionLimitReached + +func init() { + t["MksConnectionLimitReachedFault"] = reflect.TypeOf((*MksConnectionLimitReachedFault)(nil)).Elem() +} + +type ModeInfo struct { + DynamicData + + Browse string `xml:"browse,omitempty"` + Read string `xml:"read"` + Modify string `xml:"modify"` + Use string `xml:"use"` + Admin string `xml:"admin,omitempty"` + Full string `xml:"full"` +} + +func init() { + t["ModeInfo"] = reflect.TypeOf((*ModeInfo)(nil)).Elem() +} + +type ModifyListView ModifyListViewRequestType + +func init() { + t["ModifyListView"] = reflect.TypeOf((*ModifyListView)(nil)).Elem() +} + +type ModifyListViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Add []ManagedObjectReference `xml:"add,omitempty"` + Remove []ManagedObjectReference `xml:"remove,omitempty"` +} + +func init() { + t["ModifyListViewRequestType"] = reflect.TypeOf((*ModifyListViewRequestType)(nil)).Elem() +} + +type ModifyListViewResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type MonthlyByDayTaskScheduler struct { + MonthlyTaskScheduler + + Day int `xml:"day"` +} + +func init() { + t["MonthlyByDayTaskScheduler"] = reflect.TypeOf((*MonthlyByDayTaskScheduler)(nil)).Elem() +} + +type MonthlyByWeekdayTaskScheduler struct { + MonthlyTaskScheduler + + Offset WeekOfMonth `xml:"offset"` + Weekday DayOfWeek `xml:"weekday"` +} + +func init() { + t["MonthlyByWeekdayTaskScheduler"] = reflect.TypeOf((*MonthlyByWeekdayTaskScheduler)(nil)).Elem() +} + +type MonthlyTaskScheduler struct { + DailyTaskScheduler +} + +func init() { + t["MonthlyTaskScheduler"] = reflect.TypeOf((*MonthlyTaskScheduler)(nil)).Elem() +} + +type MountError struct { + CustomizationFault + + Vm ManagedObjectReference `xml:"vm"` + DiskIndex int `xml:"diskIndex"` +} + +func init() { + t["MountError"] = reflect.TypeOf((*MountError)(nil)).Elem() +} + +type MountErrorFault MountError + +func init() { + t["MountErrorFault"] = reflect.TypeOf((*MountErrorFault)(nil)).Elem() +} + +type MountToolsInstaller MountToolsInstallerRequestType + +func init() { + t["MountToolsInstaller"] = reflect.TypeOf((*MountToolsInstaller)(nil)).Elem() +} + +type MountToolsInstallerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["MountToolsInstallerRequestType"] = reflect.TypeOf((*MountToolsInstallerRequestType)(nil)).Elem() +} + +type MountToolsInstallerResponse struct { +} + +type MountVffsVolume MountVffsVolumeRequestType + +func init() { + t["MountVffsVolume"] = reflect.TypeOf((*MountVffsVolume)(nil)).Elem() +} + +type MountVffsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["MountVffsVolumeRequestType"] = reflect.TypeOf((*MountVffsVolumeRequestType)(nil)).Elem() +} + +type MountVffsVolumeResponse struct { +} + +type MountVmfsVolume MountVmfsVolumeRequestType + +func init() { + t["MountVmfsVolume"] = reflect.TypeOf((*MountVmfsVolume)(nil)).Elem() +} + +type MountVmfsVolumeExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid []string `xml:"vmfsUuid"` +} + +func init() { + t["MountVmfsVolumeExRequestType"] = reflect.TypeOf((*MountVmfsVolumeExRequestType)(nil)).Elem() +} + +type MountVmfsVolumeEx_Task MountVmfsVolumeExRequestType + +func init() { + t["MountVmfsVolumeEx_Task"] = reflect.TypeOf((*MountVmfsVolumeEx_Task)(nil)).Elem() +} + +type MountVmfsVolumeEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MountVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["MountVmfsVolumeRequestType"] = reflect.TypeOf((*MountVmfsVolumeRequestType)(nil)).Elem() +} + +type MountVmfsVolumeResponse struct { +} + +type MoveDVPortRequestType struct { + This ManagedObjectReference `xml:"_this"` + PortKey []string `xml:"portKey"` + DestinationPortgroupKey string `xml:"destinationPortgroupKey,omitempty"` +} + +func init() { + t["MoveDVPortRequestType"] = reflect.TypeOf((*MoveDVPortRequestType)(nil)).Elem() +} + +type MoveDVPort_Task MoveDVPortRequestType + +func init() { + t["MoveDVPort_Task"] = reflect.TypeOf((*MoveDVPort_Task)(nil)).Elem() +} + +type MoveDVPort_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveDatastoreFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestinationName string `xml:"destinationName"` + DestinationDatacenter *ManagedObjectReference `xml:"destinationDatacenter,omitempty"` + Force *bool `xml:"force"` +} + +func init() { + t["MoveDatastoreFileRequestType"] = reflect.TypeOf((*MoveDatastoreFileRequestType)(nil)).Elem() +} + +type MoveDatastoreFile_Task MoveDatastoreFileRequestType + +func init() { + t["MoveDatastoreFile_Task"] = reflect.TypeOf((*MoveDatastoreFile_Task)(nil)).Elem() +} + +type MoveDatastoreFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveDirectoryInGuest MoveDirectoryInGuestRequestType + +func init() { + t["MoveDirectoryInGuest"] = reflect.TypeOf((*MoveDirectoryInGuest)(nil)).Elem() +} + +type MoveDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + SrcDirectoryPath string `xml:"srcDirectoryPath"` + DstDirectoryPath string `xml:"dstDirectoryPath"` +} + +func init() { + t["MoveDirectoryInGuestRequestType"] = reflect.TypeOf((*MoveDirectoryInGuestRequestType)(nil)).Elem() +} + +type MoveDirectoryInGuestResponse struct { +} + +type MoveFileInGuest MoveFileInGuestRequestType + +func init() { + t["MoveFileInGuest"] = reflect.TypeOf((*MoveFileInGuest)(nil)).Elem() +} + +type MoveFileInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + SrcFilePath string `xml:"srcFilePath"` + DstFilePath string `xml:"dstFilePath"` + Overwrite bool `xml:"overwrite"` +} + +func init() { + t["MoveFileInGuestRequestType"] = reflect.TypeOf((*MoveFileInGuestRequestType)(nil)).Elem() +} + +type MoveFileInGuestResponse struct { +} + +type MoveHostIntoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty"` +} + +func init() { + t["MoveHostIntoRequestType"] = reflect.TypeOf((*MoveHostIntoRequestType)(nil)).Elem() +} + +type MoveHostInto_Task MoveHostIntoRequestType + +func init() { + t["MoveHostInto_Task"] = reflect.TypeOf((*MoveHostInto_Task)(nil)).Elem() +} + +type MoveHostInto_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveIntoFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + List []ManagedObjectReference `xml:"list"` +} + +func init() { + t["MoveIntoFolderRequestType"] = reflect.TypeOf((*MoveIntoFolderRequestType)(nil)).Elem() +} + +type MoveIntoFolder_Task MoveIntoFolderRequestType + +func init() { + t["MoveIntoFolder_Task"] = reflect.TypeOf((*MoveIntoFolder_Task)(nil)).Elem() +} + +type MoveIntoFolder_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveIntoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["MoveIntoRequestType"] = reflect.TypeOf((*MoveIntoRequestType)(nil)).Elem() +} + +type MoveIntoResourcePool MoveIntoResourcePoolRequestType + +func init() { + t["MoveIntoResourcePool"] = reflect.TypeOf((*MoveIntoResourcePool)(nil)).Elem() +} + +type MoveIntoResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + List []ManagedObjectReference `xml:"list"` +} + +func init() { + t["MoveIntoResourcePoolRequestType"] = reflect.TypeOf((*MoveIntoResourcePoolRequestType)(nil)).Elem() +} + +type MoveIntoResourcePoolResponse struct { +} + +type MoveInto_Task MoveIntoRequestType + +func init() { + t["MoveInto_Task"] = reflect.TypeOf((*MoveInto_Task)(nil)).Elem() +} + +type MoveInto_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestName string `xml:"destName"` + DestDatacenter *ManagedObjectReference `xml:"destDatacenter,omitempty"` + Force *bool `xml:"force"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["MoveVirtualDiskRequestType"] = reflect.TypeOf((*MoveVirtualDiskRequestType)(nil)).Elem() +} + +type MoveVirtualDisk_Task MoveVirtualDiskRequestType + +func init() { + t["MoveVirtualDisk_Task"] = reflect.TypeOf((*MoveVirtualDisk_Task)(nil)).Elem() +} + +type MoveVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MtuMatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["MtuMatchEvent"] = reflect.TypeOf((*MtuMatchEvent)(nil)).Elem() +} + +type MtuMismatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["MtuMismatchEvent"] = reflect.TypeOf((*MtuMismatchEvent)(nil)).Elem() +} + +type MultiWriterNotSupported struct { + DeviceNotSupported +} + +func init() { + t["MultiWriterNotSupported"] = reflect.TypeOf((*MultiWriterNotSupported)(nil)).Elem() +} + +type MultiWriterNotSupportedFault MultiWriterNotSupported + +func init() { + t["MultiWriterNotSupportedFault"] = reflect.TypeOf((*MultiWriterNotSupportedFault)(nil)).Elem() +} + +type MultipleCertificatesVerifyFault struct { + HostConnectFault + + ThumbprintData []MultipleCertificatesVerifyFaultThumbprintData `xml:"thumbprintData"` +} + +func init() { + t["MultipleCertificatesVerifyFault"] = reflect.TypeOf((*MultipleCertificatesVerifyFault)(nil)).Elem() +} + +type MultipleCertificatesVerifyFaultFault MultipleCertificatesVerifyFault + +func init() { + t["MultipleCertificatesVerifyFaultFault"] = reflect.TypeOf((*MultipleCertificatesVerifyFaultFault)(nil)).Elem() +} + +type MultipleCertificatesVerifyFaultThumbprintData struct { + DynamicData + + Port int `xml:"port"` + Thumbprint string `xml:"thumbprint"` +} + +func init() { + t["MultipleCertificatesVerifyFaultThumbprintData"] = reflect.TypeOf((*MultipleCertificatesVerifyFaultThumbprintData)(nil)).Elem() +} + +type MultipleSnapshotsNotSupported struct { + SnapshotFault +} + +func init() { + t["MultipleSnapshotsNotSupported"] = reflect.TypeOf((*MultipleSnapshotsNotSupported)(nil)).Elem() +} + +type MultipleSnapshotsNotSupportedFault MultipleSnapshotsNotSupported + +func init() { + t["MultipleSnapshotsNotSupportedFault"] = reflect.TypeOf((*MultipleSnapshotsNotSupportedFault)(nil)).Elem() +} + +type NASDatastoreCreatedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["NASDatastoreCreatedEvent"] = reflect.TypeOf((*NASDatastoreCreatedEvent)(nil)).Elem() +} + +type NamePasswordAuthentication struct { + GuestAuthentication + + Username string `xml:"username"` + Password string `xml:"password"` +} + +func init() { + t["NamePasswordAuthentication"] = reflect.TypeOf((*NamePasswordAuthentication)(nil)).Elem() +} + +type NamespaceFull struct { + VimFault + + Name string `xml:"name"` + CurrentMaxSize int64 `xml:"currentMaxSize"` + RequiredSize int64 `xml:"requiredSize,omitempty"` +} + +func init() { + t["NamespaceFull"] = reflect.TypeOf((*NamespaceFull)(nil)).Elem() +} + +type NamespaceFullFault NamespaceFull + +func init() { + t["NamespaceFullFault"] = reflect.TypeOf((*NamespaceFullFault)(nil)).Elem() +} + +type NamespaceLimitReached struct { + VimFault + + Limit int `xml:"limit,omitempty"` +} + +func init() { + t["NamespaceLimitReached"] = reflect.TypeOf((*NamespaceLimitReached)(nil)).Elem() +} + +type NamespaceLimitReachedFault NamespaceLimitReached + +func init() { + t["NamespaceLimitReachedFault"] = reflect.TypeOf((*NamespaceLimitReachedFault)(nil)).Elem() +} + +type NamespaceWriteProtected struct { + VimFault + + Name string `xml:"name"` +} + +func init() { + t["NamespaceWriteProtected"] = reflect.TypeOf((*NamespaceWriteProtected)(nil)).Elem() +} + +type NamespaceWriteProtectedFault NamespaceWriteProtected + +func init() { + t["NamespaceWriteProtectedFault"] = reflect.TypeOf((*NamespaceWriteProtectedFault)(nil)).Elem() +} + +type NasConfigFault struct { + HostConfigFault + + Name string `xml:"name"` +} + +func init() { + t["NasConfigFault"] = reflect.TypeOf((*NasConfigFault)(nil)).Elem() +} + +type NasConfigFaultFault BaseNasConfigFault + +func init() { + t["NasConfigFaultFault"] = reflect.TypeOf((*NasConfigFaultFault)(nil)).Elem() +} + +type NasConnectionLimitReached struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` +} + +func init() { + t["NasConnectionLimitReached"] = reflect.TypeOf((*NasConnectionLimitReached)(nil)).Elem() +} + +type NasConnectionLimitReachedFault NasConnectionLimitReached + +func init() { + t["NasConnectionLimitReachedFault"] = reflect.TypeOf((*NasConnectionLimitReachedFault)(nil)).Elem() +} + +type NasDatastoreInfo struct { + DatastoreInfo + + Nas *HostNasVolume `xml:"nas,omitempty"` +} + +func init() { + t["NasDatastoreInfo"] = reflect.TypeOf((*NasDatastoreInfo)(nil)).Elem() +} + +type NasSessionCredentialConflict struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` + UserName string `xml:"userName"` +} + +func init() { + t["NasSessionCredentialConflict"] = reflect.TypeOf((*NasSessionCredentialConflict)(nil)).Elem() +} + +type NasSessionCredentialConflictFault NasSessionCredentialConflict + +func init() { + t["NasSessionCredentialConflictFault"] = reflect.TypeOf((*NasSessionCredentialConflictFault)(nil)).Elem() +} + +type NasStorageProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["NasStorageProfile"] = reflect.TypeOf((*NasStorageProfile)(nil)).Elem() +} + +type NasVolumeNotMounted struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` +} + +func init() { + t["NasVolumeNotMounted"] = reflect.TypeOf((*NasVolumeNotMounted)(nil)).Elem() +} + +type NasVolumeNotMountedFault NasVolumeNotMounted + +func init() { + t["NasVolumeNotMountedFault"] = reflect.TypeOf((*NasVolumeNotMountedFault)(nil)).Elem() +} + +type NegatableExpression struct { + DynamicData + + Negate *bool `xml:"negate"` +} + +func init() { + t["NegatableExpression"] = reflect.TypeOf((*NegatableExpression)(nil)).Elem() +} + +type NetBIOSConfigInfo struct { + DynamicData + + Mode string `xml:"mode"` +} + +func init() { + t["NetBIOSConfigInfo"] = reflect.TypeOf((*NetBIOSConfigInfo)(nil)).Elem() +} + +type NetDhcpConfigInfo struct { + DynamicData + + Ipv6 *NetDhcpConfigInfoDhcpOptions `xml:"ipv6,omitempty"` + Ipv4 *NetDhcpConfigInfoDhcpOptions `xml:"ipv4,omitempty"` +} + +func init() { + t["NetDhcpConfigInfo"] = reflect.TypeOf((*NetDhcpConfigInfo)(nil)).Elem() +} + +type NetDhcpConfigInfoDhcpOptions struct { + DynamicData + + Enable bool `xml:"enable"` + Config []KeyValue `xml:"config,omitempty"` +} + +func init() { + t["NetDhcpConfigInfoDhcpOptions"] = reflect.TypeOf((*NetDhcpConfigInfoDhcpOptions)(nil)).Elem() +} + +type NetDhcpConfigSpec struct { + DynamicData + + Ipv6 *NetDhcpConfigSpecDhcpOptionsSpec `xml:"ipv6,omitempty"` + Ipv4 *NetDhcpConfigSpecDhcpOptionsSpec `xml:"ipv4,omitempty"` +} + +func init() { + t["NetDhcpConfigSpec"] = reflect.TypeOf((*NetDhcpConfigSpec)(nil)).Elem() +} + +type NetDhcpConfigSpecDhcpOptionsSpec struct { + DynamicData + + Enable *bool `xml:"enable"` + Config []KeyValue `xml:"config"` + Operation string `xml:"operation"` +} + +func init() { + t["NetDhcpConfigSpecDhcpOptionsSpec"] = reflect.TypeOf((*NetDhcpConfigSpecDhcpOptionsSpec)(nil)).Elem() +} + +type NetDnsConfigInfo struct { + DynamicData + + Dhcp bool `xml:"dhcp"` + HostName string `xml:"hostName"` + DomainName string `xml:"domainName"` + IpAddress []string `xml:"ipAddress,omitempty"` + SearchDomain []string `xml:"searchDomain,omitempty"` +} + +func init() { + t["NetDnsConfigInfo"] = reflect.TypeOf((*NetDnsConfigInfo)(nil)).Elem() +} + +type NetDnsConfigSpec struct { + DynamicData + + Dhcp *bool `xml:"dhcp"` + HostName string `xml:"hostName,omitempty"` + DomainName string `xml:"domainName,omitempty"` + IpAddress []string `xml:"ipAddress,omitempty"` + SearchDomain []string `xml:"searchDomain,omitempty"` +} + +func init() { + t["NetDnsConfigSpec"] = reflect.TypeOf((*NetDnsConfigSpec)(nil)).Elem() +} + +type NetIpConfigInfo struct { + DynamicData + + IpAddress []NetIpConfigInfoIpAddress `xml:"ipAddress,omitempty"` + Dhcp *NetDhcpConfigInfo `xml:"dhcp,omitempty"` + AutoConfigurationEnabled *bool `xml:"autoConfigurationEnabled"` +} + +func init() { + t["NetIpConfigInfo"] = reflect.TypeOf((*NetIpConfigInfo)(nil)).Elem() +} + +type NetIpConfigInfoIpAddress struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PrefixLength int `xml:"prefixLength"` + Origin string `xml:"origin,omitempty"` + State string `xml:"state,omitempty"` + Lifetime *time.Time `xml:"lifetime"` +} + +func init() { + t["NetIpConfigInfoIpAddress"] = reflect.TypeOf((*NetIpConfigInfoIpAddress)(nil)).Elem() +} + +type NetIpConfigSpec struct { + DynamicData + + IpAddress []NetIpConfigSpecIpAddressSpec `xml:"ipAddress,omitempty"` + Dhcp *NetDhcpConfigSpec `xml:"dhcp,omitempty"` + AutoConfigurationEnabled *bool `xml:"autoConfigurationEnabled"` +} + +func init() { + t["NetIpConfigSpec"] = reflect.TypeOf((*NetIpConfigSpec)(nil)).Elem() +} + +type NetIpConfigSpecIpAddressSpec struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PrefixLength int `xml:"prefixLength"` + Operation string `xml:"operation"` +} + +func init() { + t["NetIpConfigSpecIpAddressSpec"] = reflect.TypeOf((*NetIpConfigSpecIpAddressSpec)(nil)).Elem() +} + +type NetIpRouteConfigInfo struct { + DynamicData + + IpRoute []NetIpRouteConfigInfoIpRoute `xml:"ipRoute,omitempty"` +} + +func init() { + t["NetIpRouteConfigInfo"] = reflect.TypeOf((*NetIpRouteConfigInfo)(nil)).Elem() +} + +type NetIpRouteConfigInfoGateway struct { + DynamicData + + IpAddress string `xml:"ipAddress,omitempty"` + Device string `xml:"device,omitempty"` +} + +func init() { + t["NetIpRouteConfigInfoGateway"] = reflect.TypeOf((*NetIpRouteConfigInfoGateway)(nil)).Elem() +} + +type NetIpRouteConfigInfoIpRoute struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int `xml:"prefixLength"` + Gateway NetIpRouteConfigInfoGateway `xml:"gateway"` +} + +func init() { + t["NetIpRouteConfigInfoIpRoute"] = reflect.TypeOf((*NetIpRouteConfigInfoIpRoute)(nil)).Elem() +} + +type NetIpRouteConfigSpec struct { + DynamicData + + IpRoute []NetIpRouteConfigSpecIpRouteSpec `xml:"ipRoute,omitempty"` +} + +func init() { + t["NetIpRouteConfigSpec"] = reflect.TypeOf((*NetIpRouteConfigSpec)(nil)).Elem() +} + +type NetIpRouteConfigSpecGatewaySpec struct { + DynamicData + + IpAddress string `xml:"ipAddress,omitempty"` + Device string `xml:"device,omitempty"` +} + +func init() { + t["NetIpRouteConfigSpecGatewaySpec"] = reflect.TypeOf((*NetIpRouteConfigSpecGatewaySpec)(nil)).Elem() +} + +type NetIpRouteConfigSpecIpRouteSpec struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int `xml:"prefixLength"` + Gateway NetIpRouteConfigSpecGatewaySpec `xml:"gateway"` + Operation string `xml:"operation"` +} + +func init() { + t["NetIpRouteConfigSpecIpRouteSpec"] = reflect.TypeOf((*NetIpRouteConfigSpecIpRouteSpec)(nil)).Elem() +} + +type NetIpStackInfo struct { + DynamicData + + Neighbor []NetIpStackInfoNetToMedia `xml:"neighbor,omitempty"` + DefaultRouter []NetIpStackInfoDefaultRouter `xml:"defaultRouter,omitempty"` +} + +func init() { + t["NetIpStackInfo"] = reflect.TypeOf((*NetIpStackInfo)(nil)).Elem() +} + +type NetIpStackInfoDefaultRouter struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + Device string `xml:"device"` + Lifetime time.Time `xml:"lifetime"` + Preference string `xml:"preference"` +} + +func init() { + t["NetIpStackInfoDefaultRouter"] = reflect.TypeOf((*NetIpStackInfoDefaultRouter)(nil)).Elem() +} + +type NetIpStackInfoNetToMedia struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PhysicalAddress string `xml:"physicalAddress"` + Device string `xml:"device"` + Type string `xml:"type"` +} + +func init() { + t["NetIpStackInfoNetToMedia"] = reflect.TypeOf((*NetIpStackInfoNetToMedia)(nil)).Elem() +} + +type NetStackInstanceProfile struct { + ApplyProfile + + Key string `xml:"key"` + DnsConfig NetworkProfileDnsConfigProfile `xml:"dnsConfig"` + IpRouteConfig IpRouteProfile `xml:"ipRouteConfig"` +} + +func init() { + t["NetStackInstanceProfile"] = reflect.TypeOf((*NetStackInstanceProfile)(nil)).Elem() +} + +type NetworkCopyFault struct { + FileFault +} + +func init() { + t["NetworkCopyFault"] = reflect.TypeOf((*NetworkCopyFault)(nil)).Elem() +} + +type NetworkCopyFaultFault NetworkCopyFault + +func init() { + t["NetworkCopyFaultFault"] = reflect.TypeOf((*NetworkCopyFaultFault)(nil)).Elem() +} + +type NetworkDisruptedAndConfigRolledBack struct { + VimFault + + Host string `xml:"host"` +} + +func init() { + t["NetworkDisruptedAndConfigRolledBack"] = reflect.TypeOf((*NetworkDisruptedAndConfigRolledBack)(nil)).Elem() +} + +type NetworkDisruptedAndConfigRolledBackFault NetworkDisruptedAndConfigRolledBack + +func init() { + t["NetworkDisruptedAndConfigRolledBackFault"] = reflect.TypeOf((*NetworkDisruptedAndConfigRolledBackFault)(nil)).Elem() +} + +type NetworkEventArgument struct { + EntityEventArgument + + Network ManagedObjectReference `xml:"network"` +} + +func init() { + t["NetworkEventArgument"] = reflect.TypeOf((*NetworkEventArgument)(nil)).Elem() +} + +type NetworkInaccessible struct { + NasConfigFault +} + +func init() { + t["NetworkInaccessible"] = reflect.TypeOf((*NetworkInaccessible)(nil)).Elem() +} + +type NetworkInaccessibleFault NetworkInaccessible + +func init() { + t["NetworkInaccessibleFault"] = reflect.TypeOf((*NetworkInaccessibleFault)(nil)).Elem() +} + +type NetworkPolicyProfile struct { + ApplyProfile +} + +func init() { + t["NetworkPolicyProfile"] = reflect.TypeOf((*NetworkPolicyProfile)(nil)).Elem() +} + +type NetworkProfile struct { + ApplyProfile + + Vswitch []VirtualSwitchProfile `xml:"vswitch,omitempty"` + VmPortGroup []VmPortGroupProfile `xml:"vmPortGroup,omitempty"` + HostPortGroup []HostPortGroupProfile `xml:"hostPortGroup,omitempty"` + ServiceConsolePortGroup []ServiceConsolePortGroupProfile `xml:"serviceConsolePortGroup,omitempty"` + DnsConfig *NetworkProfileDnsConfigProfile `xml:"dnsConfig,omitempty"` + IpRouteConfig *IpRouteProfile `xml:"ipRouteConfig,omitempty"` + ConsoleIpRouteConfig *IpRouteProfile `xml:"consoleIpRouteConfig,omitempty"` + Pnic []PhysicalNicProfile `xml:"pnic,omitempty"` + Dvswitch []DvsProfile `xml:"dvswitch,omitempty"` + DvsServiceConsoleNic []DvsServiceConsoleVNicProfile `xml:"dvsServiceConsoleNic,omitempty"` + DvsHostNic []DvsHostVNicProfile `xml:"dvsHostNic,omitempty"` + NetStackInstance []NetStackInstanceProfile `xml:"netStackInstance,omitempty"` +} + +func init() { + t["NetworkProfile"] = reflect.TypeOf((*NetworkProfile)(nil)).Elem() +} + +type NetworkProfileDnsConfigProfile struct { + ApplyProfile +} + +func init() { + t["NetworkProfileDnsConfigProfile"] = reflect.TypeOf((*NetworkProfileDnsConfigProfile)(nil)).Elem() +} + +type NetworkRollbackEvent struct { + Event + + MethodName string `xml:"methodName"` + TransactionId string `xml:"transactionId"` +} + +func init() { + t["NetworkRollbackEvent"] = reflect.TypeOf((*NetworkRollbackEvent)(nil)).Elem() +} + +type NetworkSummary struct { + DynamicData + + Network *ManagedObjectReference `xml:"network,omitempty"` + Name string `xml:"name"` + Accessible bool `xml:"accessible"` + IpPoolName string `xml:"ipPoolName,omitempty"` + IpPoolId int `xml:"ipPoolId,omitempty"` +} + +func init() { + t["NetworkSummary"] = reflect.TypeOf((*NetworkSummary)(nil)).Elem() +} + +type NetworksMayNotBeTheSame struct { + MigrationFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["NetworksMayNotBeTheSame"] = reflect.TypeOf((*NetworksMayNotBeTheSame)(nil)).Elem() +} + +type NetworksMayNotBeTheSameFault NetworksMayNotBeTheSame + +func init() { + t["NetworksMayNotBeTheSameFault"] = reflect.TypeOf((*NetworksMayNotBeTheSameFault)(nil)).Elem() +} + +type NicSettingMismatch struct { + CustomizationFault + + NumberOfNicsInSpec int `xml:"numberOfNicsInSpec"` + NumberOfNicsInVM int `xml:"numberOfNicsInVM"` +} + +func init() { + t["NicSettingMismatch"] = reflect.TypeOf((*NicSettingMismatch)(nil)).Elem() +} + +type NicSettingMismatchFault NicSettingMismatch + +func init() { + t["NicSettingMismatchFault"] = reflect.TypeOf((*NicSettingMismatchFault)(nil)).Elem() +} + +type NoAccessUserEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["NoAccessUserEvent"] = reflect.TypeOf((*NoAccessUserEvent)(nil)).Elem() +} + +type NoActiveHostInCluster struct { + InvalidState + + ComputeResource ManagedObjectReference `xml:"computeResource"` +} + +func init() { + t["NoActiveHostInCluster"] = reflect.TypeOf((*NoActiveHostInCluster)(nil)).Elem() +} + +type NoActiveHostInClusterFault NoActiveHostInCluster + +func init() { + t["NoActiveHostInClusterFault"] = reflect.TypeOf((*NoActiveHostInClusterFault)(nil)).Elem() +} + +type NoAvailableIp struct { + VAppPropertyFault + + Network ManagedObjectReference `xml:"network"` +} + +func init() { + t["NoAvailableIp"] = reflect.TypeOf((*NoAvailableIp)(nil)).Elem() +} + +type NoAvailableIpFault NoAvailableIp + +func init() { + t["NoAvailableIpFault"] = reflect.TypeOf((*NoAvailableIpFault)(nil)).Elem() +} + +type NoClientCertificate struct { + VimFault +} + +func init() { + t["NoClientCertificate"] = reflect.TypeOf((*NoClientCertificate)(nil)).Elem() +} + +type NoClientCertificateFault NoClientCertificate + +func init() { + t["NoClientCertificateFault"] = reflect.TypeOf((*NoClientCertificateFault)(nil)).Elem() +} + +type NoCompatibleDatastore struct { + VimFault +} + +func init() { + t["NoCompatibleDatastore"] = reflect.TypeOf((*NoCompatibleDatastore)(nil)).Elem() +} + +type NoCompatibleDatastoreFault NoCompatibleDatastore + +func init() { + t["NoCompatibleDatastoreFault"] = reflect.TypeOf((*NoCompatibleDatastoreFault)(nil)).Elem() +} + +type NoCompatibleHardAffinityHost struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["NoCompatibleHardAffinityHost"] = reflect.TypeOf((*NoCompatibleHardAffinityHost)(nil)).Elem() +} + +type NoCompatibleHardAffinityHostFault NoCompatibleHardAffinityHost + +func init() { + t["NoCompatibleHardAffinityHostFault"] = reflect.TypeOf((*NoCompatibleHardAffinityHostFault)(nil)).Elem() +} + +type NoCompatibleHost struct { + VimFault + + Host []ManagedObjectReference `xml:"host,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["NoCompatibleHost"] = reflect.TypeOf((*NoCompatibleHost)(nil)).Elem() +} + +type NoCompatibleHostFault BaseNoCompatibleHost + +func init() { + t["NoCompatibleHostFault"] = reflect.TypeOf((*NoCompatibleHostFault)(nil)).Elem() +} + +type NoCompatibleHostWithAccessToDevice struct { + NoCompatibleHost +} + +func init() { + t["NoCompatibleHostWithAccessToDevice"] = reflect.TypeOf((*NoCompatibleHostWithAccessToDevice)(nil)).Elem() +} + +type NoCompatibleHostWithAccessToDeviceFault NoCompatibleHostWithAccessToDevice + +func init() { + t["NoCompatibleHostWithAccessToDeviceFault"] = reflect.TypeOf((*NoCompatibleHostWithAccessToDeviceFault)(nil)).Elem() +} + +type NoCompatibleSoftAffinityHost struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["NoCompatibleSoftAffinityHost"] = reflect.TypeOf((*NoCompatibleSoftAffinityHost)(nil)).Elem() +} + +type NoCompatibleSoftAffinityHostFault NoCompatibleSoftAffinityHost + +func init() { + t["NoCompatibleSoftAffinityHostFault"] = reflect.TypeOf((*NoCompatibleSoftAffinityHostFault)(nil)).Elem() +} + +type NoConnectedDatastore struct { + VimFault +} + +func init() { + t["NoConnectedDatastore"] = reflect.TypeOf((*NoConnectedDatastore)(nil)).Elem() +} + +type NoConnectedDatastoreFault NoConnectedDatastore + +func init() { + t["NoConnectedDatastoreFault"] = reflect.TypeOf((*NoConnectedDatastoreFault)(nil)).Elem() +} + +type NoDatastoresConfiguredEvent struct { + HostEvent +} + +func init() { + t["NoDatastoresConfiguredEvent"] = reflect.TypeOf((*NoDatastoresConfiguredEvent)(nil)).Elem() +} + +type NoDiskFound struct { + VimFault +} + +func init() { + t["NoDiskFound"] = reflect.TypeOf((*NoDiskFound)(nil)).Elem() +} + +type NoDiskFoundFault NoDiskFound + +func init() { + t["NoDiskFoundFault"] = reflect.TypeOf((*NoDiskFoundFault)(nil)).Elem() +} + +type NoDiskSpace struct { + FileFault + + Datastore string `xml:"datastore"` +} + +func init() { + t["NoDiskSpace"] = reflect.TypeOf((*NoDiskSpace)(nil)).Elem() +} + +type NoDiskSpaceFault NoDiskSpace + +func init() { + t["NoDiskSpaceFault"] = reflect.TypeOf((*NoDiskSpaceFault)(nil)).Elem() +} + +type NoDisksToCustomize struct { + CustomizationFault +} + +func init() { + t["NoDisksToCustomize"] = reflect.TypeOf((*NoDisksToCustomize)(nil)).Elem() +} + +type NoDisksToCustomizeFault NoDisksToCustomize + +func init() { + t["NoDisksToCustomizeFault"] = reflect.TypeOf((*NoDisksToCustomizeFault)(nil)).Elem() +} + +type NoGateway struct { + HostConfigFault +} + +func init() { + t["NoGateway"] = reflect.TypeOf((*NoGateway)(nil)).Elem() +} + +type NoGatewayFault NoGateway + +func init() { + t["NoGatewayFault"] = reflect.TypeOf((*NoGatewayFault)(nil)).Elem() +} + +type NoGuestHeartbeat struct { + MigrationFault +} + +func init() { + t["NoGuestHeartbeat"] = reflect.TypeOf((*NoGuestHeartbeat)(nil)).Elem() +} + +type NoGuestHeartbeatFault NoGuestHeartbeat + +func init() { + t["NoGuestHeartbeatFault"] = reflect.TypeOf((*NoGuestHeartbeatFault)(nil)).Elem() +} + +type NoHost struct { + HostConnectFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["NoHost"] = reflect.TypeOf((*NoHost)(nil)).Elem() +} + +type NoHostFault NoHost + +func init() { + t["NoHostFault"] = reflect.TypeOf((*NoHostFault)(nil)).Elem() +} + +type NoHostSuitableForFtSecondary struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["NoHostSuitableForFtSecondary"] = reflect.TypeOf((*NoHostSuitableForFtSecondary)(nil)).Elem() +} + +type NoHostSuitableForFtSecondaryFault NoHostSuitableForFtSecondary + +func init() { + t["NoHostSuitableForFtSecondaryFault"] = reflect.TypeOf((*NoHostSuitableForFtSecondaryFault)(nil)).Elem() +} + +type NoLicenseEvent struct { + LicenseEvent + + Feature LicenseFeatureInfo `xml:"feature"` +} + +func init() { + t["NoLicenseEvent"] = reflect.TypeOf((*NoLicenseEvent)(nil)).Elem() +} + +type NoLicenseServerConfigured struct { + NotEnoughLicenses +} + +func init() { + t["NoLicenseServerConfigured"] = reflect.TypeOf((*NoLicenseServerConfigured)(nil)).Elem() +} + +type NoLicenseServerConfiguredFault NoLicenseServerConfigured + +func init() { + t["NoLicenseServerConfiguredFault"] = reflect.TypeOf((*NoLicenseServerConfiguredFault)(nil)).Elem() +} + +type NoMaintenanceModeDrsRecommendationForVM struct { + VmEvent +} + +func init() { + t["NoMaintenanceModeDrsRecommendationForVM"] = reflect.TypeOf((*NoMaintenanceModeDrsRecommendationForVM)(nil)).Elem() +} + +type NoPeerHostFound struct { + HostPowerOpFailed +} + +func init() { + t["NoPeerHostFound"] = reflect.TypeOf((*NoPeerHostFound)(nil)).Elem() +} + +type NoPeerHostFoundFault NoPeerHostFound + +func init() { + t["NoPeerHostFoundFault"] = reflect.TypeOf((*NoPeerHostFoundFault)(nil)).Elem() +} + +type NoPermission struct { + SecurityError + + Object ManagedObjectReference `xml:"object"` + PrivilegeId string `xml:"privilegeId"` +} + +func init() { + t["NoPermission"] = reflect.TypeOf((*NoPermission)(nil)).Elem() +} + +type NoPermissionFault BaseNoPermission + +func init() { + t["NoPermissionFault"] = reflect.TypeOf((*NoPermissionFault)(nil)).Elem() +} + +type NoPermissionOnAD struct { + ActiveDirectoryFault +} + +func init() { + t["NoPermissionOnAD"] = reflect.TypeOf((*NoPermissionOnAD)(nil)).Elem() +} + +type NoPermissionOnADFault NoPermissionOnAD + +func init() { + t["NoPermissionOnADFault"] = reflect.TypeOf((*NoPermissionOnADFault)(nil)).Elem() +} + +type NoPermissionOnHost struct { + HostConnectFault +} + +func init() { + t["NoPermissionOnHost"] = reflect.TypeOf((*NoPermissionOnHost)(nil)).Elem() +} + +type NoPermissionOnHostFault NoPermissionOnHost + +func init() { + t["NoPermissionOnHostFault"] = reflect.TypeOf((*NoPermissionOnHostFault)(nil)).Elem() +} + +type NoPermissionOnNasVolume struct { + NasConfigFault + + UserName string `xml:"userName,omitempty"` +} + +func init() { + t["NoPermissionOnNasVolume"] = reflect.TypeOf((*NoPermissionOnNasVolume)(nil)).Elem() +} + +type NoPermissionOnNasVolumeFault NoPermissionOnNasVolume + +func init() { + t["NoPermissionOnNasVolumeFault"] = reflect.TypeOf((*NoPermissionOnNasVolumeFault)(nil)).Elem() +} + +type NoSubjectName struct { + VimFault +} + +func init() { + t["NoSubjectName"] = reflect.TypeOf((*NoSubjectName)(nil)).Elem() +} + +type NoSubjectNameFault NoSubjectName + +func init() { + t["NoSubjectNameFault"] = reflect.TypeOf((*NoSubjectNameFault)(nil)).Elem() +} + +type NoVcManagedIpConfigured struct { + VAppPropertyFault +} + +func init() { + t["NoVcManagedIpConfigured"] = reflect.TypeOf((*NoVcManagedIpConfigured)(nil)).Elem() +} + +type NoVcManagedIpConfiguredFault NoVcManagedIpConfigured + +func init() { + t["NoVcManagedIpConfiguredFault"] = reflect.TypeOf((*NoVcManagedIpConfiguredFault)(nil)).Elem() +} + +type NoVirtualNic struct { + HostConfigFault +} + +func init() { + t["NoVirtualNic"] = reflect.TypeOf((*NoVirtualNic)(nil)).Elem() +} + +type NoVirtualNicFault NoVirtualNic + +func init() { + t["NoVirtualNicFault"] = reflect.TypeOf((*NoVirtualNicFault)(nil)).Elem() +} + +type NoVmInVApp struct { + VAppConfigFault +} + +func init() { + t["NoVmInVApp"] = reflect.TypeOf((*NoVmInVApp)(nil)).Elem() +} + +type NoVmInVAppFault NoVmInVApp + +func init() { + t["NoVmInVAppFault"] = reflect.TypeOf((*NoVmInVAppFault)(nil)).Elem() +} + +type NonADUserRequired struct { + ActiveDirectoryFault +} + +func init() { + t["NonADUserRequired"] = reflect.TypeOf((*NonADUserRequired)(nil)).Elem() +} + +type NonADUserRequiredFault NonADUserRequired + +func init() { + t["NonADUserRequiredFault"] = reflect.TypeOf((*NonADUserRequiredFault)(nil)).Elem() +} + +type NonHomeRDMVMotionNotSupported struct { + MigrationFeatureNotSupported + + Device string `xml:"device"` +} + +func init() { + t["NonHomeRDMVMotionNotSupported"] = reflect.TypeOf((*NonHomeRDMVMotionNotSupported)(nil)).Elem() +} + +type NonHomeRDMVMotionNotSupportedFault NonHomeRDMVMotionNotSupported + +func init() { + t["NonHomeRDMVMotionNotSupportedFault"] = reflect.TypeOf((*NonHomeRDMVMotionNotSupportedFault)(nil)).Elem() +} + +type NonPersistentDisksNotSupported struct { + DeviceNotSupported +} + +func init() { + t["NonPersistentDisksNotSupported"] = reflect.TypeOf((*NonPersistentDisksNotSupported)(nil)).Elem() +} + +type NonPersistentDisksNotSupportedFault NonPersistentDisksNotSupported + +func init() { + t["NonPersistentDisksNotSupportedFault"] = reflect.TypeOf((*NonPersistentDisksNotSupportedFault)(nil)).Elem() +} + +type NonVIWorkloadDetectedOnDatastoreEvent struct { + DatastoreEvent +} + +func init() { + t["NonVIWorkloadDetectedOnDatastoreEvent"] = reflect.TypeOf((*NonVIWorkloadDetectedOnDatastoreEvent)(nil)).Elem() +} + +type NonVmwareOuiMacNotSupportedHost struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NonVmwareOuiMacNotSupportedHost"] = reflect.TypeOf((*NonVmwareOuiMacNotSupportedHost)(nil)).Elem() +} + +type NonVmwareOuiMacNotSupportedHostFault NonVmwareOuiMacNotSupportedHost + +func init() { + t["NonVmwareOuiMacNotSupportedHostFault"] = reflect.TypeOf((*NonVmwareOuiMacNotSupportedHostFault)(nil)).Elem() +} + +type NotADirectory struct { + FileFault +} + +func init() { + t["NotADirectory"] = reflect.TypeOf((*NotADirectory)(nil)).Elem() +} + +type NotADirectoryFault NotADirectory + +func init() { + t["NotADirectoryFault"] = reflect.TypeOf((*NotADirectoryFault)(nil)).Elem() +} + +type NotAFile struct { + FileFault +} + +func init() { + t["NotAFile"] = reflect.TypeOf((*NotAFile)(nil)).Elem() +} + +type NotAFileFault NotAFile + +func init() { + t["NotAFileFault"] = reflect.TypeOf((*NotAFileFault)(nil)).Elem() +} + +type NotAuthenticated struct { + NoPermission +} + +func init() { + t["NotAuthenticated"] = reflect.TypeOf((*NotAuthenticated)(nil)).Elem() +} + +type NotAuthenticatedFault NotAuthenticated + +func init() { + t["NotAuthenticatedFault"] = reflect.TypeOf((*NotAuthenticatedFault)(nil)).Elem() +} + +type NotEnoughCpus struct { + VirtualHardwareCompatibilityIssue + + NumCpuDest int `xml:"numCpuDest"` + NumCpuVm int `xml:"numCpuVm"` +} + +func init() { + t["NotEnoughCpus"] = reflect.TypeOf((*NotEnoughCpus)(nil)).Elem() +} + +type NotEnoughCpusFault BaseNotEnoughCpus + +func init() { + t["NotEnoughCpusFault"] = reflect.TypeOf((*NotEnoughCpusFault)(nil)).Elem() +} + +type NotEnoughLicenses struct { + RuntimeFault +} + +func init() { + t["NotEnoughLicenses"] = reflect.TypeOf((*NotEnoughLicenses)(nil)).Elem() +} + +type NotEnoughLicensesFault BaseNotEnoughLicenses + +func init() { + t["NotEnoughLicensesFault"] = reflect.TypeOf((*NotEnoughLicensesFault)(nil)).Elem() +} + +type NotEnoughLogicalCpus struct { + NotEnoughCpus + + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["NotEnoughLogicalCpus"] = reflect.TypeOf((*NotEnoughLogicalCpus)(nil)).Elem() +} + +type NotEnoughLogicalCpusFault NotEnoughLogicalCpus + +func init() { + t["NotEnoughLogicalCpusFault"] = reflect.TypeOf((*NotEnoughLogicalCpusFault)(nil)).Elem() +} + +type NotEnoughResourcesToStartVmEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["NotEnoughResourcesToStartVmEvent"] = reflect.TypeOf((*NotEnoughResourcesToStartVmEvent)(nil)).Elem() +} + +type NotFound struct { + VimFault +} + +func init() { + t["NotFound"] = reflect.TypeOf((*NotFound)(nil)).Elem() +} + +type NotFoundFault NotFound + +func init() { + t["NotFoundFault"] = reflect.TypeOf((*NotFoundFault)(nil)).Elem() +} + +type NotImplemented struct { + RuntimeFault +} + +func init() { + t["NotImplemented"] = reflect.TypeOf((*NotImplemented)(nil)).Elem() +} + +type NotImplementedFault NotImplemented + +func init() { + t["NotImplementedFault"] = reflect.TypeOf((*NotImplementedFault)(nil)).Elem() +} + +type NotSupported struct { + RuntimeFault +} + +func init() { + t["NotSupported"] = reflect.TypeOf((*NotSupported)(nil)).Elem() +} + +type NotSupportedDeviceForFT struct { + VmFaultToleranceIssue + + Host ManagedObjectReference `xml:"host"` + HostName string `xml:"hostName,omitempty"` + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName,omitempty"` + DeviceType string `xml:"deviceType"` + DeviceLabel string `xml:"deviceLabel,omitempty"` +} + +func init() { + t["NotSupportedDeviceForFT"] = reflect.TypeOf((*NotSupportedDeviceForFT)(nil)).Elem() +} + +type NotSupportedDeviceForFTFault NotSupportedDeviceForFT + +func init() { + t["NotSupportedDeviceForFTFault"] = reflect.TypeOf((*NotSupportedDeviceForFTFault)(nil)).Elem() +} + +type NotSupportedFault BaseNotSupported + +func init() { + t["NotSupportedFault"] = reflect.TypeOf((*NotSupportedFault)(nil)).Elem() +} + +type NotSupportedHost struct { + HostConnectFault + + ProductName string `xml:"productName,omitempty"` + ProductVersion string `xml:"productVersion,omitempty"` +} + +func init() { + t["NotSupportedHost"] = reflect.TypeOf((*NotSupportedHost)(nil)).Elem() +} + +type NotSupportedHostFault BaseNotSupportedHost + +func init() { + t["NotSupportedHostFault"] = reflect.TypeOf((*NotSupportedHostFault)(nil)).Elem() +} + +type NotSupportedHostForChecksum struct { + VimFault +} + +func init() { + t["NotSupportedHostForChecksum"] = reflect.TypeOf((*NotSupportedHostForChecksum)(nil)).Elem() +} + +type NotSupportedHostForChecksumFault NotSupportedHostForChecksum + +func init() { + t["NotSupportedHostForChecksumFault"] = reflect.TypeOf((*NotSupportedHostForChecksumFault)(nil)).Elem() +} + +type NotSupportedHostForVFlash struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVFlash"] = reflect.TypeOf((*NotSupportedHostForVFlash)(nil)).Elem() +} + +type NotSupportedHostForVFlashFault NotSupportedHostForVFlash + +func init() { + t["NotSupportedHostForVFlashFault"] = reflect.TypeOf((*NotSupportedHostForVFlashFault)(nil)).Elem() +} + +type NotSupportedHostForVmcp struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVmcp"] = reflect.TypeOf((*NotSupportedHostForVmcp)(nil)).Elem() +} + +type NotSupportedHostForVmcpFault NotSupportedHostForVmcp + +func init() { + t["NotSupportedHostForVmcpFault"] = reflect.TypeOf((*NotSupportedHostForVmcpFault)(nil)).Elem() +} + +type NotSupportedHostForVmemFile struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVmemFile"] = reflect.TypeOf((*NotSupportedHostForVmemFile)(nil)).Elem() +} + +type NotSupportedHostForVmemFileFault NotSupportedHostForVmemFile + +func init() { + t["NotSupportedHostForVmemFileFault"] = reflect.TypeOf((*NotSupportedHostForVmemFileFault)(nil)).Elem() +} + +type NotSupportedHostForVsan struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVsan"] = reflect.TypeOf((*NotSupportedHostForVsan)(nil)).Elem() +} + +type NotSupportedHostForVsanFault NotSupportedHostForVsan + +func init() { + t["NotSupportedHostForVsanFault"] = reflect.TypeOf((*NotSupportedHostForVsanFault)(nil)).Elem() +} + +type NotSupportedHostInCluster struct { + NotSupportedHost +} + +func init() { + t["NotSupportedHostInCluster"] = reflect.TypeOf((*NotSupportedHostInCluster)(nil)).Elem() +} + +type NotSupportedHostInClusterFault BaseNotSupportedHostInCluster + +func init() { + t["NotSupportedHostInClusterFault"] = reflect.TypeOf((*NotSupportedHostInClusterFault)(nil)).Elem() +} + +type NotSupportedHostInDvs struct { + NotSupportedHost + + SwitchProductSpec DistributedVirtualSwitchProductSpec `xml:"switchProductSpec"` +} + +func init() { + t["NotSupportedHostInDvs"] = reflect.TypeOf((*NotSupportedHostInDvs)(nil)).Elem() +} + +type NotSupportedHostInDvsFault NotSupportedHostInDvs + +func init() { + t["NotSupportedHostInDvsFault"] = reflect.TypeOf((*NotSupportedHostInDvsFault)(nil)).Elem() +} + +type NotSupportedHostInHACluster struct { + NotSupportedHost + + HostName string `xml:"hostName"` + Build string `xml:"build"` +} + +func init() { + t["NotSupportedHostInHACluster"] = reflect.TypeOf((*NotSupportedHostInHACluster)(nil)).Elem() +} + +type NotSupportedHostInHAClusterFault NotSupportedHostInHACluster + +func init() { + t["NotSupportedHostInHAClusterFault"] = reflect.TypeOf((*NotSupportedHostInHAClusterFault)(nil)).Elem() +} + +type NotUserConfigurableProperty struct { + VAppPropertyFault +} + +func init() { + t["NotUserConfigurableProperty"] = reflect.TypeOf((*NotUserConfigurableProperty)(nil)).Elem() +} + +type NotUserConfigurablePropertyFault NotUserConfigurableProperty + +func init() { + t["NotUserConfigurablePropertyFault"] = reflect.TypeOf((*NotUserConfigurablePropertyFault)(nil)).Elem() +} + +type NumPortsProfile struct { + ApplyProfile +} + +func init() { + t["NumPortsProfile"] = reflect.TypeOf((*NumPortsProfile)(nil)).Elem() +} + +type NumVirtualCoresPerSocketNotSupported struct { + VirtualHardwareCompatibilityIssue + + MaxSupportedCoresPerSocketDest int `xml:"maxSupportedCoresPerSocketDest"` + NumCoresPerSocketVm int `xml:"numCoresPerSocketVm"` +} + +func init() { + t["NumVirtualCoresPerSocketNotSupported"] = reflect.TypeOf((*NumVirtualCoresPerSocketNotSupported)(nil)).Elem() +} + +type NumVirtualCoresPerSocketNotSupportedFault NumVirtualCoresPerSocketNotSupported + +func init() { + t["NumVirtualCoresPerSocketNotSupportedFault"] = reflect.TypeOf((*NumVirtualCoresPerSocketNotSupportedFault)(nil)).Elem() +} + +type NumVirtualCpusExceedsLimit struct { + InsufficientResourcesFault + + MaxSupportedVcpus int `xml:"maxSupportedVcpus"` +} + +func init() { + t["NumVirtualCpusExceedsLimit"] = reflect.TypeOf((*NumVirtualCpusExceedsLimit)(nil)).Elem() +} + +type NumVirtualCpusExceedsLimitFault NumVirtualCpusExceedsLimit + +func init() { + t["NumVirtualCpusExceedsLimitFault"] = reflect.TypeOf((*NumVirtualCpusExceedsLimitFault)(nil)).Elem() +} + +type NumVirtualCpusIncompatible struct { + VmConfigFault + + Reason string `xml:"reason"` + NumCpu int `xml:"numCpu"` +} + +func init() { + t["NumVirtualCpusIncompatible"] = reflect.TypeOf((*NumVirtualCpusIncompatible)(nil)).Elem() +} + +type NumVirtualCpusIncompatibleFault NumVirtualCpusIncompatible + +func init() { + t["NumVirtualCpusIncompatibleFault"] = reflect.TypeOf((*NumVirtualCpusIncompatibleFault)(nil)).Elem() +} + +type NumVirtualCpusNotSupported struct { + VirtualHardwareCompatibilityIssue + + MaxSupportedVcpusDest int `xml:"maxSupportedVcpusDest"` + NumCpuVm int `xml:"numCpuVm"` +} + +func init() { + t["NumVirtualCpusNotSupported"] = reflect.TypeOf((*NumVirtualCpusNotSupported)(nil)).Elem() +} + +type NumVirtualCpusNotSupportedFault NumVirtualCpusNotSupported + +func init() { + t["NumVirtualCpusNotSupportedFault"] = reflect.TypeOf((*NumVirtualCpusNotSupportedFault)(nil)).Elem() +} + +type NumericRange struct { + DynamicData + + Start int `xml:"start"` + End int `xml:"end"` +} + +func init() { + t["NumericRange"] = reflect.TypeOf((*NumericRange)(nil)).Elem() +} + +type ObjectContent struct { + DynamicData + + Obj ManagedObjectReference `xml:"obj"` + PropSet []DynamicProperty `xml:"propSet,omitempty"` + MissingSet []MissingProperty `xml:"missingSet,omitempty"` +} + +func init() { + t["ObjectContent"] = reflect.TypeOf((*ObjectContent)(nil)).Elem() +} + +type ObjectSpec struct { + DynamicData + + Obj ManagedObjectReference `xml:"obj"` + Skip *bool `xml:"skip"` + SelectSet []BaseSelectionSpec `xml:"selectSet,omitempty,typeattr"` +} + +func init() { + t["ObjectSpec"] = reflect.TypeOf((*ObjectSpec)(nil)).Elem() +} + +type ObjectUpdate struct { + DynamicData + + Kind ObjectUpdateKind `xml:"kind"` + Obj ManagedObjectReference `xml:"obj"` + ChangeSet []PropertyChange `xml:"changeSet,omitempty"` + MissingSet []MissingProperty `xml:"missingSet,omitempty"` +} + +func init() { + t["ObjectUpdate"] = reflect.TypeOf((*ObjectUpdate)(nil)).Elem() +} + +type OnceTaskScheduler struct { + TaskScheduler + + RunAt *time.Time `xml:"runAt"` +} + +func init() { + t["OnceTaskScheduler"] = reflect.TypeOf((*OnceTaskScheduler)(nil)).Elem() +} + +type OpaqueNetworkSummary struct { + NetworkSummary + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` +} + +func init() { + t["OpaqueNetworkSummary"] = reflect.TypeOf((*OpaqueNetworkSummary)(nil)).Elem() +} + +type OpaqueNetworkTargetInfo struct { + VirtualMachineTargetInfo + + Network OpaqueNetworkSummary `xml:"network"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["OpaqueNetworkTargetInfo"] = reflect.TypeOf((*OpaqueNetworkTargetInfo)(nil)).Elem() +} + +type OpenInventoryViewFolder OpenInventoryViewFolderRequestType + +func init() { + t["OpenInventoryViewFolder"] = reflect.TypeOf((*OpenInventoryViewFolder)(nil)).Elem() +} + +type OpenInventoryViewFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` +} + +func init() { + t["OpenInventoryViewFolderRequestType"] = reflect.TypeOf((*OpenInventoryViewFolderRequestType)(nil)).Elem() +} + +type OpenInventoryViewFolderResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type OperationDisabledByGuest struct { + GuestOperationsFault +} + +func init() { + t["OperationDisabledByGuest"] = reflect.TypeOf((*OperationDisabledByGuest)(nil)).Elem() +} + +type OperationDisabledByGuestFault OperationDisabledByGuest + +func init() { + t["OperationDisabledByGuestFault"] = reflect.TypeOf((*OperationDisabledByGuestFault)(nil)).Elem() +} + +type OperationDisallowedOnHost struct { + RuntimeFault +} + +func init() { + t["OperationDisallowedOnHost"] = reflect.TypeOf((*OperationDisallowedOnHost)(nil)).Elem() +} + +type OperationDisallowedOnHostFault OperationDisallowedOnHost + +func init() { + t["OperationDisallowedOnHostFault"] = reflect.TypeOf((*OperationDisallowedOnHostFault)(nil)).Elem() +} + +type OperationNotSupportedByGuest struct { + GuestOperationsFault +} + +func init() { + t["OperationNotSupportedByGuest"] = reflect.TypeOf((*OperationNotSupportedByGuest)(nil)).Elem() +} + +type OperationNotSupportedByGuestFault OperationNotSupportedByGuest + +func init() { + t["OperationNotSupportedByGuestFault"] = reflect.TypeOf((*OperationNotSupportedByGuestFault)(nil)).Elem() +} + +type OptionDef struct { + ElementDescription + + OptionType BaseOptionType `xml:"optionType,typeattr"` +} + +func init() { + t["OptionDef"] = reflect.TypeOf((*OptionDef)(nil)).Elem() +} + +type OptionProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["OptionProfile"] = reflect.TypeOf((*OptionProfile)(nil)).Elem() +} + +type OptionType struct { + DynamicData + + ValueIsReadonly *bool `xml:"valueIsReadonly"` +} + +func init() { + t["OptionType"] = reflect.TypeOf((*OptionType)(nil)).Elem() +} + +type OptionValue struct { + DynamicData + + Key string `xml:"key"` + Value AnyType `xml:"value,omitempty,typeattr"` +} + +func init() { + t["OptionValue"] = reflect.TypeOf((*OptionValue)(nil)).Elem() +} + +type OrAlarmExpression struct { + AlarmExpression + + Expression []BaseAlarmExpression `xml:"expression,typeattr"` +} + +func init() { + t["OrAlarmExpression"] = reflect.TypeOf((*OrAlarmExpression)(nil)).Elem() +} + +type OutOfBounds struct { + VimFault + + ArgumentName string `xml:"argumentName"` +} + +func init() { + t["OutOfBounds"] = reflect.TypeOf((*OutOfBounds)(nil)).Elem() +} + +type OutOfBoundsFault OutOfBounds + +func init() { + t["OutOfBoundsFault"] = reflect.TypeOf((*OutOfBoundsFault)(nil)).Elem() +} + +type OutOfSyncDvsHost struct { + DvsEvent + + HostOutOfSync []DvsOutOfSyncHostArgument `xml:"hostOutOfSync"` +} + +func init() { + t["OutOfSyncDvsHost"] = reflect.TypeOf((*OutOfSyncDvsHost)(nil)).Elem() +} + +type OverwriteCustomizationSpec OverwriteCustomizationSpecRequestType + +func init() { + t["OverwriteCustomizationSpec"] = reflect.TypeOf((*OverwriteCustomizationSpec)(nil)).Elem() +} + +type OverwriteCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Item CustomizationSpecItem `xml:"item"` +} + +func init() { + t["OverwriteCustomizationSpecRequestType"] = reflect.TypeOf((*OverwriteCustomizationSpecRequestType)(nil)).Elem() +} + +type OverwriteCustomizationSpecResponse struct { +} + +type OvfAttribute struct { + OvfInvalidPackage + + ElementName string `xml:"elementName"` + AttributeName string `xml:"attributeName"` +} + +func init() { + t["OvfAttribute"] = reflect.TypeOf((*OvfAttribute)(nil)).Elem() +} + +type OvfAttributeFault BaseOvfAttribute + +func init() { + t["OvfAttributeFault"] = reflect.TypeOf((*OvfAttributeFault)(nil)).Elem() +} + +type OvfConnectedDevice struct { + OvfHardwareExport +} + +func init() { + t["OvfConnectedDevice"] = reflect.TypeOf((*OvfConnectedDevice)(nil)).Elem() +} + +type OvfConnectedDeviceFault BaseOvfConnectedDevice + +func init() { + t["OvfConnectedDeviceFault"] = reflect.TypeOf((*OvfConnectedDeviceFault)(nil)).Elem() +} + +type OvfConnectedDeviceFloppy struct { + OvfConnectedDevice + + Filename string `xml:"filename"` +} + +func init() { + t["OvfConnectedDeviceFloppy"] = reflect.TypeOf((*OvfConnectedDeviceFloppy)(nil)).Elem() +} + +type OvfConnectedDeviceFloppyFault OvfConnectedDeviceFloppy + +func init() { + t["OvfConnectedDeviceFloppyFault"] = reflect.TypeOf((*OvfConnectedDeviceFloppyFault)(nil)).Elem() +} + +type OvfConnectedDeviceIso struct { + OvfConnectedDevice + + Filename string `xml:"filename"` +} + +func init() { + t["OvfConnectedDeviceIso"] = reflect.TypeOf((*OvfConnectedDeviceIso)(nil)).Elem() +} + +type OvfConnectedDeviceIsoFault OvfConnectedDeviceIso + +func init() { + t["OvfConnectedDeviceIsoFault"] = reflect.TypeOf((*OvfConnectedDeviceIsoFault)(nil)).Elem() +} + +type OvfConstraint struct { + OvfInvalidPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfConstraint"] = reflect.TypeOf((*OvfConstraint)(nil)).Elem() +} + +type OvfConstraintFault BaseOvfConstraint + +func init() { + t["OvfConstraintFault"] = reflect.TypeOf((*OvfConstraintFault)(nil)).Elem() +} + +type OvfConsumerCallbackFault struct { + OvfFault + + ExtensionKey string `xml:"extensionKey"` + ExtensionName string `xml:"extensionName"` +} + +func init() { + t["OvfConsumerCallbackFault"] = reflect.TypeOf((*OvfConsumerCallbackFault)(nil)).Elem() +} + +type OvfConsumerCallbackFaultFault BaseOvfConsumerCallbackFault + +func init() { + t["OvfConsumerCallbackFaultFault"] = reflect.TypeOf((*OvfConsumerCallbackFaultFault)(nil)).Elem() +} + +type OvfConsumerCommunicationError struct { + OvfConsumerCallbackFault + + Description string `xml:"description"` +} + +func init() { + t["OvfConsumerCommunicationError"] = reflect.TypeOf((*OvfConsumerCommunicationError)(nil)).Elem() +} + +type OvfConsumerCommunicationErrorFault OvfConsumerCommunicationError + +func init() { + t["OvfConsumerCommunicationErrorFault"] = reflect.TypeOf((*OvfConsumerCommunicationErrorFault)(nil)).Elem() +} + +type OvfConsumerFault struct { + OvfConsumerCallbackFault + + ErrorKey string `xml:"errorKey"` + Message string `xml:"message"` + Params []KeyValue `xml:"params,omitempty"` +} + +func init() { + t["OvfConsumerFault"] = reflect.TypeOf((*OvfConsumerFault)(nil)).Elem() +} + +type OvfConsumerFaultFault OvfConsumerFault + +func init() { + t["OvfConsumerFaultFault"] = reflect.TypeOf((*OvfConsumerFaultFault)(nil)).Elem() +} + +type OvfConsumerInvalidSection struct { + OvfConsumerCallbackFault + + LineNumber int `xml:"lineNumber"` + Description string `xml:"description"` +} + +func init() { + t["OvfConsumerInvalidSection"] = reflect.TypeOf((*OvfConsumerInvalidSection)(nil)).Elem() +} + +type OvfConsumerInvalidSectionFault OvfConsumerInvalidSection + +func init() { + t["OvfConsumerInvalidSectionFault"] = reflect.TypeOf((*OvfConsumerInvalidSectionFault)(nil)).Elem() +} + +type OvfConsumerOstNode struct { + DynamicData + + Id string `xml:"id"` + Type string `xml:"type"` + Section []OvfConsumerOvfSection `xml:"section,omitempty"` + Child []OvfConsumerOstNode `xml:"child,omitempty"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["OvfConsumerOstNode"] = reflect.TypeOf((*OvfConsumerOstNode)(nil)).Elem() +} + +type OvfConsumerOvfSection struct { + DynamicData + + LineNumber int `xml:"lineNumber"` + Xml string `xml:"xml"` +} + +func init() { + t["OvfConsumerOvfSection"] = reflect.TypeOf((*OvfConsumerOvfSection)(nil)).Elem() +} + +type OvfConsumerPowerOnFault struct { + InvalidState + + ExtensionKey string `xml:"extensionKey"` + ExtensionName string `xml:"extensionName"` + Description string `xml:"description"` +} + +func init() { + t["OvfConsumerPowerOnFault"] = reflect.TypeOf((*OvfConsumerPowerOnFault)(nil)).Elem() +} + +type OvfConsumerPowerOnFaultFault OvfConsumerPowerOnFault + +func init() { + t["OvfConsumerPowerOnFaultFault"] = reflect.TypeOf((*OvfConsumerPowerOnFaultFault)(nil)).Elem() +} + +type OvfConsumerUndeclaredSection struct { + OvfConsumerCallbackFault + + QualifiedSectionType string `xml:"qualifiedSectionType"` +} + +func init() { + t["OvfConsumerUndeclaredSection"] = reflect.TypeOf((*OvfConsumerUndeclaredSection)(nil)).Elem() +} + +type OvfConsumerUndeclaredSectionFault OvfConsumerUndeclaredSection + +func init() { + t["OvfConsumerUndeclaredSectionFault"] = reflect.TypeOf((*OvfConsumerUndeclaredSectionFault)(nil)).Elem() +} + +type OvfConsumerUndefinedPrefix struct { + OvfConsumerCallbackFault + + Prefix string `xml:"prefix"` +} + +func init() { + t["OvfConsumerUndefinedPrefix"] = reflect.TypeOf((*OvfConsumerUndefinedPrefix)(nil)).Elem() +} + +type OvfConsumerUndefinedPrefixFault OvfConsumerUndefinedPrefix + +func init() { + t["OvfConsumerUndefinedPrefixFault"] = reflect.TypeOf((*OvfConsumerUndefinedPrefixFault)(nil)).Elem() +} + +type OvfConsumerValidationFault struct { + VmConfigFault + + ExtensionKey string `xml:"extensionKey"` + ExtensionName string `xml:"extensionName"` + Message string `xml:"message"` +} + +func init() { + t["OvfConsumerValidationFault"] = reflect.TypeOf((*OvfConsumerValidationFault)(nil)).Elem() +} + +type OvfConsumerValidationFaultFault OvfConsumerValidationFault + +func init() { + t["OvfConsumerValidationFaultFault"] = reflect.TypeOf((*OvfConsumerValidationFaultFault)(nil)).Elem() +} + +type OvfCpuCompatibility struct { + OvfImport + + RegisterName string `xml:"registerName"` + Level int `xml:"level"` + RegisterValue string `xml:"registerValue"` + DesiredRegisterValue string `xml:"desiredRegisterValue"` +} + +func init() { + t["OvfCpuCompatibility"] = reflect.TypeOf((*OvfCpuCompatibility)(nil)).Elem() +} + +type OvfCpuCompatibilityCheckNotSupported struct { + OvfImport +} + +func init() { + t["OvfCpuCompatibilityCheckNotSupported"] = reflect.TypeOf((*OvfCpuCompatibilityCheckNotSupported)(nil)).Elem() +} + +type OvfCpuCompatibilityCheckNotSupportedFault OvfCpuCompatibilityCheckNotSupported + +func init() { + t["OvfCpuCompatibilityCheckNotSupportedFault"] = reflect.TypeOf((*OvfCpuCompatibilityCheckNotSupportedFault)(nil)).Elem() +} + +type OvfCpuCompatibilityFault OvfCpuCompatibility + +func init() { + t["OvfCpuCompatibilityFault"] = reflect.TypeOf((*OvfCpuCompatibilityFault)(nil)).Elem() +} + +type OvfCreateDescriptorParams struct { + DynamicData + + OvfFiles []OvfFile `xml:"ovfFiles,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + IncludeImageFiles *bool `xml:"includeImageFiles"` + ExportOption []string `xml:"exportOption,omitempty"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` +} + +func init() { + t["OvfCreateDescriptorParams"] = reflect.TypeOf((*OvfCreateDescriptorParams)(nil)).Elem() +} + +type OvfCreateDescriptorResult struct { + DynamicData + + OvfDescriptor string `xml:"ovfDescriptor"` + Error []LocalizedMethodFault `xml:"error,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + IncludeImageFiles *bool `xml:"includeImageFiles"` +} + +func init() { + t["OvfCreateDescriptorResult"] = reflect.TypeOf((*OvfCreateDescriptorResult)(nil)).Elem() +} + +type OvfCreateImportSpecParams struct { + OvfManagerCommonParams + + EntityName string `xml:"entityName"` + HostSystem *ManagedObjectReference `xml:"hostSystem,omitempty"` + NetworkMapping []OvfNetworkMapping `xml:"networkMapping,omitempty"` + IpAllocationPolicy string `xml:"ipAllocationPolicy,omitempty"` + IpProtocol string `xml:"ipProtocol,omitempty"` + PropertyMapping []KeyValue `xml:"propertyMapping,omitempty"` + ResourceMapping []OvfResourceMap `xml:"resourceMapping,omitempty"` + DiskProvisioning string `xml:"diskProvisioning,omitempty"` + InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty"` +} + +func init() { + t["OvfCreateImportSpecParams"] = reflect.TypeOf((*OvfCreateImportSpecParams)(nil)).Elem() +} + +type OvfCreateImportSpecResult struct { + DynamicData + + ImportSpec BaseImportSpec `xml:"importSpec,omitempty,typeattr"` + FileItem []OvfFileItem `xml:"fileItem,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["OvfCreateImportSpecResult"] = reflect.TypeOf((*OvfCreateImportSpecResult)(nil)).Elem() +} + +type OvfDeploymentOption struct { + DynamicData + + Key string `xml:"key"` + Label string `xml:"label"` + Description string `xml:"description"` +} + +func init() { + t["OvfDeploymentOption"] = reflect.TypeOf((*OvfDeploymentOption)(nil)).Elem() +} + +type OvfDiskMappingNotFound struct { + OvfSystemFault + + DiskName string `xml:"diskName"` + VmName string `xml:"vmName"` +} + +func init() { + t["OvfDiskMappingNotFound"] = reflect.TypeOf((*OvfDiskMappingNotFound)(nil)).Elem() +} + +type OvfDiskMappingNotFoundFault OvfDiskMappingNotFound + +func init() { + t["OvfDiskMappingNotFoundFault"] = reflect.TypeOf((*OvfDiskMappingNotFoundFault)(nil)).Elem() +} + +type OvfDiskOrderConstraint struct { + OvfConstraint +} + +func init() { + t["OvfDiskOrderConstraint"] = reflect.TypeOf((*OvfDiskOrderConstraint)(nil)).Elem() +} + +type OvfDiskOrderConstraintFault OvfDiskOrderConstraint + +func init() { + t["OvfDiskOrderConstraintFault"] = reflect.TypeOf((*OvfDiskOrderConstraintFault)(nil)).Elem() +} + +type OvfDuplicateElement struct { + OvfElement +} + +func init() { + t["OvfDuplicateElement"] = reflect.TypeOf((*OvfDuplicateElement)(nil)).Elem() +} + +type OvfDuplicateElementFault OvfDuplicateElement + +func init() { + t["OvfDuplicateElementFault"] = reflect.TypeOf((*OvfDuplicateElementFault)(nil)).Elem() +} + +type OvfDuplicatedElementBoundary struct { + OvfElement + + Boundary string `xml:"boundary"` +} + +func init() { + t["OvfDuplicatedElementBoundary"] = reflect.TypeOf((*OvfDuplicatedElementBoundary)(nil)).Elem() +} + +type OvfDuplicatedElementBoundaryFault OvfDuplicatedElementBoundary + +func init() { + t["OvfDuplicatedElementBoundaryFault"] = reflect.TypeOf((*OvfDuplicatedElementBoundaryFault)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdExport struct { + OvfExport + + Fqid string `xml:"fqid"` +} + +func init() { + t["OvfDuplicatedPropertyIdExport"] = reflect.TypeOf((*OvfDuplicatedPropertyIdExport)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdExportFault OvfDuplicatedPropertyIdExport + +func init() { + t["OvfDuplicatedPropertyIdExportFault"] = reflect.TypeOf((*OvfDuplicatedPropertyIdExportFault)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdImport struct { + OvfExport +} + +func init() { + t["OvfDuplicatedPropertyIdImport"] = reflect.TypeOf((*OvfDuplicatedPropertyIdImport)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdImportFault OvfDuplicatedPropertyIdImport + +func init() { + t["OvfDuplicatedPropertyIdImportFault"] = reflect.TypeOf((*OvfDuplicatedPropertyIdImportFault)(nil)).Elem() +} + +type OvfElement struct { + OvfInvalidPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfElement"] = reflect.TypeOf((*OvfElement)(nil)).Elem() +} + +type OvfElementFault BaseOvfElement + +func init() { + t["OvfElementFault"] = reflect.TypeOf((*OvfElementFault)(nil)).Elem() +} + +type OvfElementInvalidValue struct { + OvfElement + + Value string `xml:"value"` +} + +func init() { + t["OvfElementInvalidValue"] = reflect.TypeOf((*OvfElementInvalidValue)(nil)).Elem() +} + +type OvfElementInvalidValueFault OvfElementInvalidValue + +func init() { + t["OvfElementInvalidValueFault"] = reflect.TypeOf((*OvfElementInvalidValueFault)(nil)).Elem() +} + +type OvfExport struct { + OvfFault +} + +func init() { + t["OvfExport"] = reflect.TypeOf((*OvfExport)(nil)).Elem() +} + +type OvfExportFailed struct { + OvfExport +} + +func init() { + t["OvfExportFailed"] = reflect.TypeOf((*OvfExportFailed)(nil)).Elem() +} + +type OvfExportFailedFault OvfExportFailed + +func init() { + t["OvfExportFailedFault"] = reflect.TypeOf((*OvfExportFailedFault)(nil)).Elem() +} + +type OvfExportFault BaseOvfExport + +func init() { + t["OvfExportFault"] = reflect.TypeOf((*OvfExportFault)(nil)).Elem() +} + +type OvfFault struct { + VimFault +} + +func init() { + t["OvfFault"] = reflect.TypeOf((*OvfFault)(nil)).Elem() +} + +type OvfFaultFault BaseOvfFault + +func init() { + t["OvfFaultFault"] = reflect.TypeOf((*OvfFaultFault)(nil)).Elem() +} + +type OvfFile struct { + DynamicData + + DeviceId string `xml:"deviceId"` + Path string `xml:"path"` + CompressionMethod string `xml:"compressionMethod,omitempty"` + ChunkSize int64 `xml:"chunkSize,omitempty"` + Size int64 `xml:"size"` + Capacity int64 `xml:"capacity,omitempty"` + PopulatedSize int64 `xml:"populatedSize,omitempty"` +} + +func init() { + t["OvfFile"] = reflect.TypeOf((*OvfFile)(nil)).Elem() +} + +type OvfFileItem struct { + DynamicData + + DeviceId string `xml:"deviceId"` + Path string `xml:"path"` + CompressionMethod string `xml:"compressionMethod,omitempty"` + ChunkSize int64 `xml:"chunkSize,omitempty"` + Size int64 `xml:"size,omitempty"` + CimType int `xml:"cimType"` + Create bool `xml:"create"` +} + +func init() { + t["OvfFileItem"] = reflect.TypeOf((*OvfFileItem)(nil)).Elem() +} + +type OvfHardwareCheck struct { + OvfImport +} + +func init() { + t["OvfHardwareCheck"] = reflect.TypeOf((*OvfHardwareCheck)(nil)).Elem() +} + +type OvfHardwareCheckFault OvfHardwareCheck + +func init() { + t["OvfHardwareCheckFault"] = reflect.TypeOf((*OvfHardwareCheckFault)(nil)).Elem() +} + +type OvfHardwareExport struct { + OvfExport + + Device BaseVirtualDevice `xml:"device,omitempty,typeattr"` + VmPath string `xml:"vmPath"` +} + +func init() { + t["OvfHardwareExport"] = reflect.TypeOf((*OvfHardwareExport)(nil)).Elem() +} + +type OvfHardwareExportFault BaseOvfHardwareExport + +func init() { + t["OvfHardwareExportFault"] = reflect.TypeOf((*OvfHardwareExportFault)(nil)).Elem() +} + +type OvfHostResourceConstraint struct { + OvfConstraint + + Value string `xml:"value"` +} + +func init() { + t["OvfHostResourceConstraint"] = reflect.TypeOf((*OvfHostResourceConstraint)(nil)).Elem() +} + +type OvfHostResourceConstraintFault OvfHostResourceConstraint + +func init() { + t["OvfHostResourceConstraintFault"] = reflect.TypeOf((*OvfHostResourceConstraintFault)(nil)).Elem() +} + +type OvfHostValueNotParsed struct { + OvfSystemFault + + Property string `xml:"property"` + Value string `xml:"value"` +} + +func init() { + t["OvfHostValueNotParsed"] = reflect.TypeOf((*OvfHostValueNotParsed)(nil)).Elem() +} + +type OvfHostValueNotParsedFault OvfHostValueNotParsed + +func init() { + t["OvfHostValueNotParsedFault"] = reflect.TypeOf((*OvfHostValueNotParsedFault)(nil)).Elem() +} + +type OvfImport struct { + OvfFault +} + +func init() { + t["OvfImport"] = reflect.TypeOf((*OvfImport)(nil)).Elem() +} + +type OvfImportFailed struct { + OvfImport +} + +func init() { + t["OvfImportFailed"] = reflect.TypeOf((*OvfImportFailed)(nil)).Elem() +} + +type OvfImportFailedFault OvfImportFailed + +func init() { + t["OvfImportFailedFault"] = reflect.TypeOf((*OvfImportFailedFault)(nil)).Elem() +} + +type OvfImportFault BaseOvfImport + +func init() { + t["OvfImportFault"] = reflect.TypeOf((*OvfImportFault)(nil)).Elem() +} + +type OvfInternalError struct { + OvfSystemFault +} + +func init() { + t["OvfInternalError"] = reflect.TypeOf((*OvfInternalError)(nil)).Elem() +} + +type OvfInternalErrorFault OvfInternalError + +func init() { + t["OvfInternalErrorFault"] = reflect.TypeOf((*OvfInternalErrorFault)(nil)).Elem() +} + +type OvfInvalidPackage struct { + OvfFault + + LineNumber int `xml:"lineNumber"` +} + +func init() { + t["OvfInvalidPackage"] = reflect.TypeOf((*OvfInvalidPackage)(nil)).Elem() +} + +type OvfInvalidPackageFault BaseOvfInvalidPackage + +func init() { + t["OvfInvalidPackageFault"] = reflect.TypeOf((*OvfInvalidPackageFault)(nil)).Elem() +} + +type OvfInvalidValue struct { + OvfAttribute + + Value string `xml:"value"` +} + +func init() { + t["OvfInvalidValue"] = reflect.TypeOf((*OvfInvalidValue)(nil)).Elem() +} + +type OvfInvalidValueConfiguration struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueConfiguration"] = reflect.TypeOf((*OvfInvalidValueConfiguration)(nil)).Elem() +} + +type OvfInvalidValueConfigurationFault OvfInvalidValueConfiguration + +func init() { + t["OvfInvalidValueConfigurationFault"] = reflect.TypeOf((*OvfInvalidValueConfigurationFault)(nil)).Elem() +} + +type OvfInvalidValueEmpty struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueEmpty"] = reflect.TypeOf((*OvfInvalidValueEmpty)(nil)).Elem() +} + +type OvfInvalidValueEmptyFault OvfInvalidValueEmpty + +func init() { + t["OvfInvalidValueEmptyFault"] = reflect.TypeOf((*OvfInvalidValueEmptyFault)(nil)).Elem() +} + +type OvfInvalidValueFault BaseOvfInvalidValue + +func init() { + t["OvfInvalidValueFault"] = reflect.TypeOf((*OvfInvalidValueFault)(nil)).Elem() +} + +type OvfInvalidValueFormatMalformed struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueFormatMalformed"] = reflect.TypeOf((*OvfInvalidValueFormatMalformed)(nil)).Elem() +} + +type OvfInvalidValueFormatMalformedFault OvfInvalidValueFormatMalformed + +func init() { + t["OvfInvalidValueFormatMalformedFault"] = reflect.TypeOf((*OvfInvalidValueFormatMalformedFault)(nil)).Elem() +} + +type OvfInvalidValueReference struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueReference"] = reflect.TypeOf((*OvfInvalidValueReference)(nil)).Elem() +} + +type OvfInvalidValueReferenceFault OvfInvalidValueReference + +func init() { + t["OvfInvalidValueReferenceFault"] = reflect.TypeOf((*OvfInvalidValueReferenceFault)(nil)).Elem() +} + +type OvfInvalidVmName struct { + OvfUnsupportedPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfInvalidVmName"] = reflect.TypeOf((*OvfInvalidVmName)(nil)).Elem() +} + +type OvfInvalidVmNameFault OvfInvalidVmName + +func init() { + t["OvfInvalidVmNameFault"] = reflect.TypeOf((*OvfInvalidVmNameFault)(nil)).Elem() +} + +type OvfManagerCommonParams struct { + DynamicData + + Locale string `xml:"locale"` + DeploymentOption string `xml:"deploymentOption"` + MsgBundle []KeyValue `xml:"msgBundle,omitempty"` + ImportOption []string `xml:"importOption,omitempty"` +} + +func init() { + t["OvfManagerCommonParams"] = reflect.TypeOf((*OvfManagerCommonParams)(nil)).Elem() +} + +type OvfMappedOsId struct { + OvfImport + + OvfId int `xml:"ovfId"` + OvfDescription string `xml:"ovfDescription"` + TargetDescription string `xml:"targetDescription"` +} + +func init() { + t["OvfMappedOsId"] = reflect.TypeOf((*OvfMappedOsId)(nil)).Elem() +} + +type OvfMappedOsIdFault OvfMappedOsId + +func init() { + t["OvfMappedOsIdFault"] = reflect.TypeOf((*OvfMappedOsIdFault)(nil)).Elem() +} + +type OvfMissingAttribute struct { + OvfAttribute +} + +func init() { + t["OvfMissingAttribute"] = reflect.TypeOf((*OvfMissingAttribute)(nil)).Elem() +} + +type OvfMissingAttributeFault OvfMissingAttribute + +func init() { + t["OvfMissingAttributeFault"] = reflect.TypeOf((*OvfMissingAttributeFault)(nil)).Elem() +} + +type OvfMissingElement struct { + OvfElement +} + +func init() { + t["OvfMissingElement"] = reflect.TypeOf((*OvfMissingElement)(nil)).Elem() +} + +type OvfMissingElementFault BaseOvfMissingElement + +func init() { + t["OvfMissingElementFault"] = reflect.TypeOf((*OvfMissingElementFault)(nil)).Elem() +} + +type OvfMissingElementNormalBoundary struct { + OvfMissingElement + + Boundary string `xml:"boundary"` +} + +func init() { + t["OvfMissingElementNormalBoundary"] = reflect.TypeOf((*OvfMissingElementNormalBoundary)(nil)).Elem() +} + +type OvfMissingElementNormalBoundaryFault OvfMissingElementNormalBoundary + +func init() { + t["OvfMissingElementNormalBoundaryFault"] = reflect.TypeOf((*OvfMissingElementNormalBoundaryFault)(nil)).Elem() +} + +type OvfMissingHardware struct { + OvfImport + + Name string `xml:"name"` + ResourceType int `xml:"resourceType"` +} + +func init() { + t["OvfMissingHardware"] = reflect.TypeOf((*OvfMissingHardware)(nil)).Elem() +} + +type OvfMissingHardwareFault OvfMissingHardware + +func init() { + t["OvfMissingHardwareFault"] = reflect.TypeOf((*OvfMissingHardwareFault)(nil)).Elem() +} + +type OvfNetworkInfo struct { + DynamicData + + Name string `xml:"name"` + Description string `xml:"description"` +} + +func init() { + t["OvfNetworkInfo"] = reflect.TypeOf((*OvfNetworkInfo)(nil)).Elem() +} + +type OvfNetworkMapping struct { + DynamicData + + Name string `xml:"name"` + Network ManagedObjectReference `xml:"network"` +} + +func init() { + t["OvfNetworkMapping"] = reflect.TypeOf((*OvfNetworkMapping)(nil)).Elem() +} + +type OvfNetworkMappingNotSupported struct { + OvfImport +} + +func init() { + t["OvfNetworkMappingNotSupported"] = reflect.TypeOf((*OvfNetworkMappingNotSupported)(nil)).Elem() +} + +type OvfNetworkMappingNotSupportedFault OvfNetworkMappingNotSupported + +func init() { + t["OvfNetworkMappingNotSupportedFault"] = reflect.TypeOf((*OvfNetworkMappingNotSupportedFault)(nil)).Elem() +} + +type OvfNoHostNic struct { + OvfUnsupportedPackage +} + +func init() { + t["OvfNoHostNic"] = reflect.TypeOf((*OvfNoHostNic)(nil)).Elem() +} + +type OvfNoHostNicFault OvfNoHostNic + +func init() { + t["OvfNoHostNicFault"] = reflect.TypeOf((*OvfNoHostNicFault)(nil)).Elem() +} + +type OvfNoSpaceOnController struct { + OvfUnsupportedElement + + Parent string `xml:"parent"` +} + +func init() { + t["OvfNoSpaceOnController"] = reflect.TypeOf((*OvfNoSpaceOnController)(nil)).Elem() +} + +type OvfNoSpaceOnControllerFault OvfNoSpaceOnController + +func init() { + t["OvfNoSpaceOnControllerFault"] = reflect.TypeOf((*OvfNoSpaceOnControllerFault)(nil)).Elem() +} + +type OvfNoSupportedHardwareFamily struct { + OvfUnsupportedPackage + + Version string `xml:"version"` +} + +func init() { + t["OvfNoSupportedHardwareFamily"] = reflect.TypeOf((*OvfNoSupportedHardwareFamily)(nil)).Elem() +} + +type OvfNoSupportedHardwareFamilyFault OvfNoSupportedHardwareFamily + +func init() { + t["OvfNoSupportedHardwareFamilyFault"] = reflect.TypeOf((*OvfNoSupportedHardwareFamilyFault)(nil)).Elem() +} + +type OvfOptionInfo struct { + DynamicData + + Option string `xml:"option"` + Description LocalizableMessage `xml:"description"` +} + +func init() { + t["OvfOptionInfo"] = reflect.TypeOf((*OvfOptionInfo)(nil)).Elem() +} + +type OvfParseDescriptorParams struct { + OvfManagerCommonParams +} + +func init() { + t["OvfParseDescriptorParams"] = reflect.TypeOf((*OvfParseDescriptorParams)(nil)).Elem() +} + +type OvfParseDescriptorResult struct { + DynamicData + + Eula []string `xml:"eula,omitempty"` + Network []OvfNetworkInfo `xml:"network,omitempty"` + IpAllocationScheme []string `xml:"ipAllocationScheme,omitempty"` + IpProtocols []string `xml:"ipProtocols,omitempty"` + Property []VAppPropertyInfo `xml:"property,omitempty"` + ProductInfo *VAppProductInfo `xml:"productInfo,omitempty"` + Annotation string `xml:"annotation"` + ApproximateDownloadSize int64 `xml:"approximateDownloadSize,omitempty"` + ApproximateFlatDeploymentSize int64 `xml:"approximateFlatDeploymentSize,omitempty"` + ApproximateSparseDeploymentSize int64 `xml:"approximateSparseDeploymentSize,omitempty"` + DefaultEntityName string `xml:"defaultEntityName"` + VirtualApp bool `xml:"virtualApp"` + DeploymentOption []OvfDeploymentOption `xml:"deploymentOption,omitempty"` + DefaultDeploymentOption string `xml:"defaultDeploymentOption"` + EntityName []KeyValue `xml:"entityName,omitempty"` + AnnotatedOst *OvfConsumerOstNode `xml:"annotatedOst,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` +} + +func init() { + t["OvfParseDescriptorResult"] = reflect.TypeOf((*OvfParseDescriptorResult)(nil)).Elem() +} + +type OvfProperty struct { + OvfInvalidPackage + + Type string `xml:"type"` + Value string `xml:"value"` +} + +func init() { + t["OvfProperty"] = reflect.TypeOf((*OvfProperty)(nil)).Elem() +} + +type OvfPropertyExport struct { + OvfExport + + Type string `xml:"type"` + Value string `xml:"value"` +} + +func init() { + t["OvfPropertyExport"] = reflect.TypeOf((*OvfPropertyExport)(nil)).Elem() +} + +type OvfPropertyExportFault OvfPropertyExport + +func init() { + t["OvfPropertyExportFault"] = reflect.TypeOf((*OvfPropertyExportFault)(nil)).Elem() +} + +type OvfPropertyFault BaseOvfProperty + +func init() { + t["OvfPropertyFault"] = reflect.TypeOf((*OvfPropertyFault)(nil)).Elem() +} + +type OvfPropertyNetwork struct { + OvfProperty +} + +func init() { + t["OvfPropertyNetwork"] = reflect.TypeOf((*OvfPropertyNetwork)(nil)).Elem() +} + +type OvfPropertyNetworkExport struct { + OvfExport + + Network string `xml:"network"` +} + +func init() { + t["OvfPropertyNetworkExport"] = reflect.TypeOf((*OvfPropertyNetworkExport)(nil)).Elem() +} + +type OvfPropertyNetworkExportFault OvfPropertyNetworkExport + +func init() { + t["OvfPropertyNetworkExportFault"] = reflect.TypeOf((*OvfPropertyNetworkExportFault)(nil)).Elem() +} + +type OvfPropertyNetworkFault OvfPropertyNetwork + +func init() { + t["OvfPropertyNetworkFault"] = reflect.TypeOf((*OvfPropertyNetworkFault)(nil)).Elem() +} + +type OvfPropertyQualifier struct { + OvfProperty + + Qualifier string `xml:"qualifier"` +} + +func init() { + t["OvfPropertyQualifier"] = reflect.TypeOf((*OvfPropertyQualifier)(nil)).Elem() +} + +type OvfPropertyQualifierDuplicate struct { + OvfProperty + + Qualifier string `xml:"qualifier"` +} + +func init() { + t["OvfPropertyQualifierDuplicate"] = reflect.TypeOf((*OvfPropertyQualifierDuplicate)(nil)).Elem() +} + +type OvfPropertyQualifierDuplicateFault OvfPropertyQualifierDuplicate + +func init() { + t["OvfPropertyQualifierDuplicateFault"] = reflect.TypeOf((*OvfPropertyQualifierDuplicateFault)(nil)).Elem() +} + +type OvfPropertyQualifierFault OvfPropertyQualifier + +func init() { + t["OvfPropertyQualifierFault"] = reflect.TypeOf((*OvfPropertyQualifierFault)(nil)).Elem() +} + +type OvfPropertyQualifierIgnored struct { + OvfProperty + + Qualifier string `xml:"qualifier"` +} + +func init() { + t["OvfPropertyQualifierIgnored"] = reflect.TypeOf((*OvfPropertyQualifierIgnored)(nil)).Elem() +} + +type OvfPropertyQualifierIgnoredFault OvfPropertyQualifierIgnored + +func init() { + t["OvfPropertyQualifierIgnoredFault"] = reflect.TypeOf((*OvfPropertyQualifierIgnoredFault)(nil)).Elem() +} + +type OvfPropertyType struct { + OvfProperty +} + +func init() { + t["OvfPropertyType"] = reflect.TypeOf((*OvfPropertyType)(nil)).Elem() +} + +type OvfPropertyTypeFault OvfPropertyType + +func init() { + t["OvfPropertyTypeFault"] = reflect.TypeOf((*OvfPropertyTypeFault)(nil)).Elem() +} + +type OvfPropertyValue struct { + OvfProperty +} + +func init() { + t["OvfPropertyValue"] = reflect.TypeOf((*OvfPropertyValue)(nil)).Elem() +} + +type OvfPropertyValueFault OvfPropertyValue + +func init() { + t["OvfPropertyValueFault"] = reflect.TypeOf((*OvfPropertyValueFault)(nil)).Elem() +} + +type OvfResourceMap struct { + DynamicData + + Source string `xml:"source"` + Parent *ManagedObjectReference `xml:"parent,omitempty"` + ResourceSpec *ResourceConfigSpec `xml:"resourceSpec,omitempty"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["OvfResourceMap"] = reflect.TypeOf((*OvfResourceMap)(nil)).Elem() +} + +type OvfSystemFault struct { + OvfFault +} + +func init() { + t["OvfSystemFault"] = reflect.TypeOf((*OvfSystemFault)(nil)).Elem() +} + +type OvfSystemFaultFault BaseOvfSystemFault + +func init() { + t["OvfSystemFaultFault"] = reflect.TypeOf((*OvfSystemFaultFault)(nil)).Elem() +} + +type OvfToXmlUnsupportedElement struct { + OvfSystemFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["OvfToXmlUnsupportedElement"] = reflect.TypeOf((*OvfToXmlUnsupportedElement)(nil)).Elem() +} + +type OvfToXmlUnsupportedElementFault OvfToXmlUnsupportedElement + +func init() { + t["OvfToXmlUnsupportedElementFault"] = reflect.TypeOf((*OvfToXmlUnsupportedElementFault)(nil)).Elem() +} + +type OvfUnableToExportDisk struct { + OvfHardwareExport + + DiskName string `xml:"diskName"` +} + +func init() { + t["OvfUnableToExportDisk"] = reflect.TypeOf((*OvfUnableToExportDisk)(nil)).Elem() +} + +type OvfUnableToExportDiskFault OvfUnableToExportDisk + +func init() { + t["OvfUnableToExportDiskFault"] = reflect.TypeOf((*OvfUnableToExportDiskFault)(nil)).Elem() +} + +type OvfUnexpectedElement struct { + OvfElement +} + +func init() { + t["OvfUnexpectedElement"] = reflect.TypeOf((*OvfUnexpectedElement)(nil)).Elem() +} + +type OvfUnexpectedElementFault OvfUnexpectedElement + +func init() { + t["OvfUnexpectedElementFault"] = reflect.TypeOf((*OvfUnexpectedElementFault)(nil)).Elem() +} + +type OvfUnknownDevice struct { + OvfSystemFault + + Device BaseVirtualDevice `xml:"device,omitempty,typeattr"` + VmName string `xml:"vmName"` +} + +func init() { + t["OvfUnknownDevice"] = reflect.TypeOf((*OvfUnknownDevice)(nil)).Elem() +} + +type OvfUnknownDeviceBacking struct { + OvfHardwareExport + + Backing BaseVirtualDeviceBackingInfo `xml:"backing,typeattr"` +} + +func init() { + t["OvfUnknownDeviceBacking"] = reflect.TypeOf((*OvfUnknownDeviceBacking)(nil)).Elem() +} + +type OvfUnknownDeviceBackingFault OvfUnknownDeviceBacking + +func init() { + t["OvfUnknownDeviceBackingFault"] = reflect.TypeOf((*OvfUnknownDeviceBackingFault)(nil)).Elem() +} + +type OvfUnknownDeviceFault OvfUnknownDevice + +func init() { + t["OvfUnknownDeviceFault"] = reflect.TypeOf((*OvfUnknownDeviceFault)(nil)).Elem() +} + +type OvfUnknownEntity struct { + OvfSystemFault + + LineNumber int `xml:"lineNumber"` +} + +func init() { + t["OvfUnknownEntity"] = reflect.TypeOf((*OvfUnknownEntity)(nil)).Elem() +} + +type OvfUnknownEntityFault OvfUnknownEntity + +func init() { + t["OvfUnknownEntityFault"] = reflect.TypeOf((*OvfUnknownEntityFault)(nil)).Elem() +} + +type OvfUnsupportedAttribute struct { + OvfUnsupportedPackage + + ElementName string `xml:"elementName"` + AttributeName string `xml:"attributeName"` +} + +func init() { + t["OvfUnsupportedAttribute"] = reflect.TypeOf((*OvfUnsupportedAttribute)(nil)).Elem() +} + +type OvfUnsupportedAttributeFault BaseOvfUnsupportedAttribute + +func init() { + t["OvfUnsupportedAttributeFault"] = reflect.TypeOf((*OvfUnsupportedAttributeFault)(nil)).Elem() +} + +type OvfUnsupportedAttributeValue struct { + OvfUnsupportedAttribute + + Value string `xml:"value"` +} + +func init() { + t["OvfUnsupportedAttributeValue"] = reflect.TypeOf((*OvfUnsupportedAttributeValue)(nil)).Elem() +} + +type OvfUnsupportedAttributeValueFault OvfUnsupportedAttributeValue + +func init() { + t["OvfUnsupportedAttributeValueFault"] = reflect.TypeOf((*OvfUnsupportedAttributeValueFault)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingInfo struct { + OvfSystemFault + + ElementName string `xml:"elementName,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + DeviceName string `xml:"deviceName"` + BackingName string `xml:"backingName,omitempty"` +} + +func init() { + t["OvfUnsupportedDeviceBackingInfo"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingInfo)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingInfoFault OvfUnsupportedDeviceBackingInfo + +func init() { + t["OvfUnsupportedDeviceBackingInfoFault"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingInfoFault)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingOption struct { + OvfSystemFault + + ElementName string `xml:"elementName,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + DeviceName string `xml:"deviceName"` + BackingName string `xml:"backingName,omitempty"` +} + +func init() { + t["OvfUnsupportedDeviceBackingOption"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingOption)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingOptionFault OvfUnsupportedDeviceBackingOption + +func init() { + t["OvfUnsupportedDeviceBackingOptionFault"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingOptionFault)(nil)).Elem() +} + +type OvfUnsupportedDeviceExport struct { + OvfHardwareExport +} + +func init() { + t["OvfUnsupportedDeviceExport"] = reflect.TypeOf((*OvfUnsupportedDeviceExport)(nil)).Elem() +} + +type OvfUnsupportedDeviceExportFault OvfUnsupportedDeviceExport + +func init() { + t["OvfUnsupportedDeviceExportFault"] = reflect.TypeOf((*OvfUnsupportedDeviceExportFault)(nil)).Elem() +} + +type OvfUnsupportedDiskProvisioning struct { + OvfImport + + DiskProvisioning string `xml:"diskProvisioning"` + SupportedDiskProvisioning string `xml:"supportedDiskProvisioning"` +} + +func init() { + t["OvfUnsupportedDiskProvisioning"] = reflect.TypeOf((*OvfUnsupportedDiskProvisioning)(nil)).Elem() +} + +type OvfUnsupportedDiskProvisioningFault OvfUnsupportedDiskProvisioning + +func init() { + t["OvfUnsupportedDiskProvisioningFault"] = reflect.TypeOf((*OvfUnsupportedDiskProvisioningFault)(nil)).Elem() +} + +type OvfUnsupportedElement struct { + OvfUnsupportedPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfUnsupportedElement"] = reflect.TypeOf((*OvfUnsupportedElement)(nil)).Elem() +} + +type OvfUnsupportedElementFault BaseOvfUnsupportedElement + +func init() { + t["OvfUnsupportedElementFault"] = reflect.TypeOf((*OvfUnsupportedElementFault)(nil)).Elem() +} + +type OvfUnsupportedElementValue struct { + OvfUnsupportedElement + + Value string `xml:"value"` +} + +func init() { + t["OvfUnsupportedElementValue"] = reflect.TypeOf((*OvfUnsupportedElementValue)(nil)).Elem() +} + +type OvfUnsupportedElementValueFault OvfUnsupportedElementValue + +func init() { + t["OvfUnsupportedElementValueFault"] = reflect.TypeOf((*OvfUnsupportedElementValueFault)(nil)).Elem() +} + +type OvfUnsupportedPackage struct { + OvfFault + + LineNumber int `xml:"lineNumber,omitempty"` +} + +func init() { + t["OvfUnsupportedPackage"] = reflect.TypeOf((*OvfUnsupportedPackage)(nil)).Elem() +} + +type OvfUnsupportedPackageFault BaseOvfUnsupportedPackage + +func init() { + t["OvfUnsupportedPackageFault"] = reflect.TypeOf((*OvfUnsupportedPackageFault)(nil)).Elem() +} + +type OvfUnsupportedSection struct { + OvfUnsupportedElement + + Info string `xml:"info"` +} + +func init() { + t["OvfUnsupportedSection"] = reflect.TypeOf((*OvfUnsupportedSection)(nil)).Elem() +} + +type OvfUnsupportedSectionFault OvfUnsupportedSection + +func init() { + t["OvfUnsupportedSectionFault"] = reflect.TypeOf((*OvfUnsupportedSectionFault)(nil)).Elem() +} + +type OvfUnsupportedSubType struct { + OvfUnsupportedPackage + + ElementName string `xml:"elementName"` + InstanceId string `xml:"instanceId"` + DeviceType int `xml:"deviceType"` + DeviceSubType string `xml:"deviceSubType"` +} + +func init() { + t["OvfUnsupportedSubType"] = reflect.TypeOf((*OvfUnsupportedSubType)(nil)).Elem() +} + +type OvfUnsupportedSubTypeFault OvfUnsupportedSubType + +func init() { + t["OvfUnsupportedSubTypeFault"] = reflect.TypeOf((*OvfUnsupportedSubTypeFault)(nil)).Elem() +} + +type OvfUnsupportedType struct { + OvfUnsupportedPackage + + Name string `xml:"name"` + InstanceId string `xml:"instanceId"` + DeviceType int `xml:"deviceType"` +} + +func init() { + t["OvfUnsupportedType"] = reflect.TypeOf((*OvfUnsupportedType)(nil)).Elem() +} + +type OvfUnsupportedTypeFault OvfUnsupportedType + +func init() { + t["OvfUnsupportedTypeFault"] = reflect.TypeOf((*OvfUnsupportedTypeFault)(nil)).Elem() +} + +type OvfValidateHostParams struct { + OvfManagerCommonParams +} + +func init() { + t["OvfValidateHostParams"] = reflect.TypeOf((*OvfValidateHostParams)(nil)).Elem() +} + +type OvfValidateHostResult struct { + DynamicData + + DownloadSize int64 `xml:"downloadSize,omitempty"` + FlatDeploymentSize int64 `xml:"flatDeploymentSize,omitempty"` + SparseDeploymentSize int64 `xml:"sparseDeploymentSize,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + SupportedDiskProvisioning []string `xml:"supportedDiskProvisioning,omitempty"` +} + +func init() { + t["OvfValidateHostResult"] = reflect.TypeOf((*OvfValidateHostResult)(nil)).Elem() +} + +type OvfWrongElement struct { + OvfElement +} + +func init() { + t["OvfWrongElement"] = reflect.TypeOf((*OvfWrongElement)(nil)).Elem() +} + +type OvfWrongElementFault OvfWrongElement + +func init() { + t["OvfWrongElementFault"] = reflect.TypeOf((*OvfWrongElementFault)(nil)).Elem() +} + +type OvfWrongNamespace struct { + OvfInvalidPackage + + NamespaceName string `xml:"namespaceName"` +} + +func init() { + t["OvfWrongNamespace"] = reflect.TypeOf((*OvfWrongNamespace)(nil)).Elem() +} + +type OvfWrongNamespaceFault OvfWrongNamespace + +func init() { + t["OvfWrongNamespaceFault"] = reflect.TypeOf((*OvfWrongNamespaceFault)(nil)).Elem() +} + +type OvfXmlFormat struct { + OvfInvalidPackage + + Description string `xml:"description"` +} + +func init() { + t["OvfXmlFormat"] = reflect.TypeOf((*OvfXmlFormat)(nil)).Elem() +} + +type OvfXmlFormatFault OvfXmlFormat + +func init() { + t["OvfXmlFormatFault"] = reflect.TypeOf((*OvfXmlFormatFault)(nil)).Elem() +} + +type ParaVirtualSCSIController struct { + VirtualSCSIController +} + +func init() { + t["ParaVirtualSCSIController"] = reflect.TypeOf((*ParaVirtualSCSIController)(nil)).Elem() +} + +type ParaVirtualSCSIControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["ParaVirtualSCSIControllerOption"] = reflect.TypeOf((*ParaVirtualSCSIControllerOption)(nil)).Elem() +} + +type ParseDescriptor ParseDescriptorRequestType + +func init() { + t["ParseDescriptor"] = reflect.TypeOf((*ParseDescriptor)(nil)).Elem() +} + +type ParseDescriptorRequestType struct { + This ManagedObjectReference `xml:"_this"` + OvfDescriptor string `xml:"ovfDescriptor"` + Pdp OvfParseDescriptorParams `xml:"pdp"` +} + +func init() { + t["ParseDescriptorRequestType"] = reflect.TypeOf((*ParseDescriptorRequestType)(nil)).Elem() +} + +type ParseDescriptorResponse struct { + Returnval OvfParseDescriptorResult `xml:"returnval"` +} + +type PasswordField struct { + DynamicData + + Value string `xml:"value"` +} + +func init() { + t["PasswordField"] = reflect.TypeOf((*PasswordField)(nil)).Elem() +} + +type PatchAlreadyInstalled struct { + PatchNotApplicable +} + +func init() { + t["PatchAlreadyInstalled"] = reflect.TypeOf((*PatchAlreadyInstalled)(nil)).Elem() +} + +type PatchAlreadyInstalledFault PatchAlreadyInstalled + +func init() { + t["PatchAlreadyInstalledFault"] = reflect.TypeOf((*PatchAlreadyInstalledFault)(nil)).Elem() +} + +type PatchBinariesNotFound struct { + VimFault + + PatchID string `xml:"patchID"` + Binary []string `xml:"binary,omitempty"` +} + +func init() { + t["PatchBinariesNotFound"] = reflect.TypeOf((*PatchBinariesNotFound)(nil)).Elem() +} + +type PatchBinariesNotFoundFault PatchBinariesNotFound + +func init() { + t["PatchBinariesNotFoundFault"] = reflect.TypeOf((*PatchBinariesNotFoundFault)(nil)).Elem() +} + +type PatchInstallFailed struct { + PlatformConfigFault + + RolledBack bool `xml:"rolledBack"` +} + +func init() { + t["PatchInstallFailed"] = reflect.TypeOf((*PatchInstallFailed)(nil)).Elem() +} + +type PatchInstallFailedFault PatchInstallFailed + +func init() { + t["PatchInstallFailedFault"] = reflect.TypeOf((*PatchInstallFailedFault)(nil)).Elem() +} + +type PatchIntegrityError struct { + PlatformConfigFault +} + +func init() { + t["PatchIntegrityError"] = reflect.TypeOf((*PatchIntegrityError)(nil)).Elem() +} + +type PatchIntegrityErrorFault PatchIntegrityError + +func init() { + t["PatchIntegrityErrorFault"] = reflect.TypeOf((*PatchIntegrityErrorFault)(nil)).Elem() +} + +type PatchMetadataCorrupted struct { + PatchMetadataInvalid +} + +func init() { + t["PatchMetadataCorrupted"] = reflect.TypeOf((*PatchMetadataCorrupted)(nil)).Elem() +} + +type PatchMetadataCorruptedFault PatchMetadataCorrupted + +func init() { + t["PatchMetadataCorruptedFault"] = reflect.TypeOf((*PatchMetadataCorruptedFault)(nil)).Elem() +} + +type PatchMetadataInvalid struct { + VimFault + + PatchID string `xml:"patchID"` + MetaData []string `xml:"metaData,omitempty"` +} + +func init() { + t["PatchMetadataInvalid"] = reflect.TypeOf((*PatchMetadataInvalid)(nil)).Elem() +} + +type PatchMetadataInvalidFault BasePatchMetadataInvalid + +func init() { + t["PatchMetadataInvalidFault"] = reflect.TypeOf((*PatchMetadataInvalidFault)(nil)).Elem() +} + +type PatchMetadataNotFound struct { + PatchMetadataInvalid +} + +func init() { + t["PatchMetadataNotFound"] = reflect.TypeOf((*PatchMetadataNotFound)(nil)).Elem() +} + +type PatchMetadataNotFoundFault PatchMetadataNotFound + +func init() { + t["PatchMetadataNotFoundFault"] = reflect.TypeOf((*PatchMetadataNotFoundFault)(nil)).Elem() +} + +type PatchMissingDependencies struct { + PatchNotApplicable + + PrerequisitePatch []string `xml:"prerequisitePatch,omitempty"` + PrerequisiteLib []string `xml:"prerequisiteLib,omitempty"` +} + +func init() { + t["PatchMissingDependencies"] = reflect.TypeOf((*PatchMissingDependencies)(nil)).Elem() +} + +type PatchMissingDependenciesFault PatchMissingDependencies + +func init() { + t["PatchMissingDependenciesFault"] = reflect.TypeOf((*PatchMissingDependenciesFault)(nil)).Elem() +} + +type PatchNotApplicable struct { + VimFault + + PatchID string `xml:"patchID"` +} + +func init() { + t["PatchNotApplicable"] = reflect.TypeOf((*PatchNotApplicable)(nil)).Elem() +} + +type PatchNotApplicableFault BasePatchNotApplicable + +func init() { + t["PatchNotApplicableFault"] = reflect.TypeOf((*PatchNotApplicableFault)(nil)).Elem() +} + +type PatchSuperseded struct { + PatchNotApplicable + + Supersede []string `xml:"supersede,omitempty"` +} + +func init() { + t["PatchSuperseded"] = reflect.TypeOf((*PatchSuperseded)(nil)).Elem() +} + +type PatchSupersededFault PatchSuperseded + +func init() { + t["PatchSupersededFault"] = reflect.TypeOf((*PatchSupersededFault)(nil)).Elem() +} + +type PerfCompositeMetric struct { + DynamicData + + Entity BasePerfEntityMetricBase `xml:"entity,omitempty,typeattr"` + ChildEntity []BasePerfEntityMetricBase `xml:"childEntity,omitempty,typeattr"` +} + +func init() { + t["PerfCompositeMetric"] = reflect.TypeOf((*PerfCompositeMetric)(nil)).Elem() +} + +type PerfCounterInfo struct { + DynamicData + + Key int `xml:"key"` + NameInfo BaseElementDescription `xml:"nameInfo,typeattr"` + GroupInfo BaseElementDescription `xml:"groupInfo,typeattr"` + UnitInfo BaseElementDescription `xml:"unitInfo,typeattr"` + RollupType PerfSummaryType `xml:"rollupType"` + StatsType PerfStatsType `xml:"statsType"` + Level int `xml:"level,omitempty"` + PerDeviceLevel int `xml:"perDeviceLevel,omitempty"` + AssociatedCounterId []int `xml:"associatedCounterId,omitempty"` +} + +func init() { + t["PerfCounterInfo"] = reflect.TypeOf((*PerfCounterInfo)(nil)).Elem() +} + +type PerfEntityMetric struct { + PerfEntityMetricBase + + SampleInfo []PerfSampleInfo `xml:"sampleInfo,omitempty"` + Value []BasePerfMetricSeries `xml:"value,omitempty,typeattr"` +} + +func init() { + t["PerfEntityMetric"] = reflect.TypeOf((*PerfEntityMetric)(nil)).Elem() +} + +type PerfEntityMetricBase struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["PerfEntityMetricBase"] = reflect.TypeOf((*PerfEntityMetricBase)(nil)).Elem() +} + +type PerfEntityMetricCSV struct { + PerfEntityMetricBase + + SampleInfoCSV string `xml:"sampleInfoCSV"` + Value []PerfMetricSeriesCSV `xml:"value,omitempty"` +} + +func init() { + t["PerfEntityMetricCSV"] = reflect.TypeOf((*PerfEntityMetricCSV)(nil)).Elem() +} + +type PerfInterval struct { + DynamicData + + Key int `xml:"key"` + SamplingPeriod int `xml:"samplingPeriod"` + Name string `xml:"name"` + Length int `xml:"length"` + Level int `xml:"level,omitempty"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["PerfInterval"] = reflect.TypeOf((*PerfInterval)(nil)).Elem() +} + +type PerfMetricId struct { + DynamicData + + CounterId int `xml:"counterId"` + Instance string `xml:"instance"` +} + +func init() { + t["PerfMetricId"] = reflect.TypeOf((*PerfMetricId)(nil)).Elem() +} + +type PerfMetricIntSeries struct { + PerfMetricSeries + + Value []int64 `xml:"value,omitempty"` +} + +func init() { + t["PerfMetricIntSeries"] = reflect.TypeOf((*PerfMetricIntSeries)(nil)).Elem() +} + +type PerfMetricSeries struct { + DynamicData + + Id PerfMetricId `xml:"id"` +} + +func init() { + t["PerfMetricSeries"] = reflect.TypeOf((*PerfMetricSeries)(nil)).Elem() +} + +type PerfMetricSeriesCSV struct { + PerfMetricSeries + + Value string `xml:"value,omitempty"` +} + +func init() { + t["PerfMetricSeriesCSV"] = reflect.TypeOf((*PerfMetricSeriesCSV)(nil)).Elem() +} + +type PerfProviderSummary struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + CurrentSupported bool `xml:"currentSupported"` + SummarySupported bool `xml:"summarySupported"` + RefreshRate int `xml:"refreshRate,omitempty"` +} + +func init() { + t["PerfProviderSummary"] = reflect.TypeOf((*PerfProviderSummary)(nil)).Elem() +} + +type PerfQuerySpec struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + StartTime *time.Time `xml:"startTime"` + EndTime *time.Time `xml:"endTime"` + MaxSample int `xml:"maxSample,omitempty"` + MetricId []PerfMetricId `xml:"metricId,omitempty"` + IntervalId int `xml:"intervalId,omitempty"` + Format string `xml:"format,omitempty"` +} + +func init() { + t["PerfQuerySpec"] = reflect.TypeOf((*PerfQuerySpec)(nil)).Elem() +} + +type PerfSampleInfo struct { + DynamicData + + Timestamp time.Time `xml:"timestamp"` + Interval int `xml:"interval"` +} + +func init() { + t["PerfSampleInfo"] = reflect.TypeOf((*PerfSampleInfo)(nil)).Elem() +} + +type PerformDvsProductSpecOperationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Operation string `xml:"operation"` + ProductSpec *DistributedVirtualSwitchProductSpec `xml:"productSpec,omitempty"` +} + +func init() { + t["PerformDvsProductSpecOperationRequestType"] = reflect.TypeOf((*PerformDvsProductSpecOperationRequestType)(nil)).Elem() +} + +type PerformDvsProductSpecOperation_Task PerformDvsProductSpecOperationRequestType + +func init() { + t["PerformDvsProductSpecOperation_Task"] = reflect.TypeOf((*PerformDvsProductSpecOperation_Task)(nil)).Elem() +} + +type PerformDvsProductSpecOperation_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PerformVsanUpgradePreflightCheck PerformVsanUpgradePreflightCheckRequestType + +func init() { + t["PerformVsanUpgradePreflightCheck"] = reflect.TypeOf((*PerformVsanUpgradePreflightCheck)(nil)).Elem() +} + +type PerformVsanUpgradePreflightCheckRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` + DowngradeFormat *bool `xml:"downgradeFormat"` +} + +func init() { + t["PerformVsanUpgradePreflightCheckRequestType"] = reflect.TypeOf((*PerformVsanUpgradePreflightCheckRequestType)(nil)).Elem() +} + +type PerformVsanUpgradePreflightCheckResponse struct { + Returnval VsanUpgradeSystemPreflightCheckResult `xml:"returnval"` +} + +type PerformVsanUpgradeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` + PerformObjectUpgrade *bool `xml:"performObjectUpgrade"` + DowngradeFormat *bool `xml:"downgradeFormat"` + AllowReducedRedundancy *bool `xml:"allowReducedRedundancy"` + ExcludeHosts []ManagedObjectReference `xml:"excludeHosts,omitempty"` +} + +func init() { + t["PerformVsanUpgradeRequestType"] = reflect.TypeOf((*PerformVsanUpgradeRequestType)(nil)).Elem() +} + +type PerformVsanUpgrade_Task PerformVsanUpgradeRequestType + +func init() { + t["PerformVsanUpgrade_Task"] = reflect.TypeOf((*PerformVsanUpgrade_Task)(nil)).Elem() +} + +type PerformVsanUpgrade_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PerformanceDescription struct { + DynamicData + + CounterType []BaseElementDescription `xml:"counterType,typeattr"` + StatsType []BaseElementDescription `xml:"statsType,typeattr"` +} + +func init() { + t["PerformanceDescription"] = reflect.TypeOf((*PerformanceDescription)(nil)).Elem() +} + +type PerformanceManagerCounterLevelMapping struct { + DynamicData + + CounterId int `xml:"counterId"` + AggregateLevel int `xml:"aggregateLevel,omitempty"` + PerDeviceLevel int `xml:"perDeviceLevel,omitempty"` +} + +func init() { + t["PerformanceManagerCounterLevelMapping"] = reflect.TypeOf((*PerformanceManagerCounterLevelMapping)(nil)).Elem() +} + +type PerformanceStatisticsDescription struct { + DynamicData + + Intervals []PerfInterval `xml:"intervals,omitempty"` +} + +func init() { + t["PerformanceStatisticsDescription"] = reflect.TypeOf((*PerformanceStatisticsDescription)(nil)).Elem() +} + +type Permission struct { + DynamicData + + Entity *ManagedObjectReference `xml:"entity,omitempty"` + Principal string `xml:"principal"` + Group bool `xml:"group"` + RoleId int `xml:"roleId"` + Propagate bool `xml:"propagate"` +} + +func init() { + t["Permission"] = reflect.TypeOf((*Permission)(nil)).Elem() +} + +type PermissionAddedEvent struct { + PermissionEvent + + Role RoleEventArgument `xml:"role"` + Propagate bool `xml:"propagate"` +} + +func init() { + t["PermissionAddedEvent"] = reflect.TypeOf((*PermissionAddedEvent)(nil)).Elem() +} + +type PermissionEvent struct { + AuthorizationEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Principal string `xml:"principal"` + Group bool `xml:"group"` +} + +func init() { + t["PermissionEvent"] = reflect.TypeOf((*PermissionEvent)(nil)).Elem() +} + +type PermissionProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["PermissionProfile"] = reflect.TypeOf((*PermissionProfile)(nil)).Elem() +} + +type PermissionRemovedEvent struct { + PermissionEvent +} + +func init() { + t["PermissionRemovedEvent"] = reflect.TypeOf((*PermissionRemovedEvent)(nil)).Elem() +} + +type PermissionUpdatedEvent struct { + PermissionEvent + + Role RoleEventArgument `xml:"role"` + Propagate bool `xml:"propagate"` +} + +func init() { + t["PermissionUpdatedEvent"] = reflect.TypeOf((*PermissionUpdatedEvent)(nil)).Elem() +} + +type PhysCompatRDMNotSupported struct { + RDMNotSupported +} + +func init() { + t["PhysCompatRDMNotSupported"] = reflect.TypeOf((*PhysCompatRDMNotSupported)(nil)).Elem() +} + +type PhysCompatRDMNotSupportedFault PhysCompatRDMNotSupported + +func init() { + t["PhysCompatRDMNotSupportedFault"] = reflect.TypeOf((*PhysCompatRDMNotSupportedFault)(nil)).Elem() +} + +type PhysicalNic struct { + DynamicData + + Key string `xml:"key,omitempty"` + Device string `xml:"device"` + Pci string `xml:"pci"` + Driver string `xml:"driver,omitempty"` + LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` + ValidLinkSpecification []PhysicalNicLinkInfo `xml:"validLinkSpecification,omitempty"` + Spec PhysicalNicSpec `xml:"spec"` + WakeOnLanSupported bool `xml:"wakeOnLanSupported"` + Mac string `xml:"mac"` + FcoeConfiguration *FcoeConfig `xml:"fcoeConfiguration,omitempty"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported"` + VmDirectPathGen2SupportedMode string `xml:"vmDirectPathGen2SupportedMode,omitempty"` + ResourcePoolSchedulerAllowed *bool `xml:"resourcePoolSchedulerAllowed"` + ResourcePoolSchedulerDisallowedReason []string `xml:"resourcePoolSchedulerDisallowedReason,omitempty"` + AutoNegotiateSupported *bool `xml:"autoNegotiateSupported"` +} + +func init() { + t["PhysicalNic"] = reflect.TypeOf((*PhysicalNic)(nil)).Elem() +} + +type PhysicalNicCdpDeviceCapability struct { + DynamicData + + Router bool `xml:"router"` + TransparentBridge bool `xml:"transparentBridge"` + SourceRouteBridge bool `xml:"sourceRouteBridge"` + NetworkSwitch bool `xml:"networkSwitch"` + Host bool `xml:"host"` + IgmpEnabled bool `xml:"igmpEnabled"` + Repeater bool `xml:"repeater"` +} + +func init() { + t["PhysicalNicCdpDeviceCapability"] = reflect.TypeOf((*PhysicalNicCdpDeviceCapability)(nil)).Elem() +} + +type PhysicalNicCdpInfo struct { + DynamicData + + CdpVersion int `xml:"cdpVersion,omitempty"` + Timeout int `xml:"timeout,omitempty"` + Ttl int `xml:"ttl,omitempty"` + Samples int `xml:"samples,omitempty"` + DevId string `xml:"devId,omitempty"` + Address string `xml:"address,omitempty"` + PortId string `xml:"portId,omitempty"` + DeviceCapability *PhysicalNicCdpDeviceCapability `xml:"deviceCapability,omitempty"` + SoftwareVersion string `xml:"softwareVersion,omitempty"` + HardwarePlatform string `xml:"hardwarePlatform,omitempty"` + IpPrefix string `xml:"ipPrefix,omitempty"` + IpPrefixLen int `xml:"ipPrefixLen,omitempty"` + Vlan int `xml:"vlan,omitempty"` + FullDuplex *bool `xml:"fullDuplex"` + Mtu int `xml:"mtu,omitempty"` + SystemName string `xml:"systemName,omitempty"` + SystemOID string `xml:"systemOID,omitempty"` + MgmtAddr string `xml:"mgmtAddr,omitempty"` + Location string `xml:"location,omitempty"` +} + +func init() { + t["PhysicalNicCdpInfo"] = reflect.TypeOf((*PhysicalNicCdpInfo)(nil)).Elem() +} + +type PhysicalNicConfig struct { + DynamicData + + Device string `xml:"device"` + Spec PhysicalNicSpec `xml:"spec"` +} + +func init() { + t["PhysicalNicConfig"] = reflect.TypeOf((*PhysicalNicConfig)(nil)).Elem() +} + +type PhysicalNicHint struct { + DynamicData + + VlanId int `xml:"vlanId,omitempty"` +} + +func init() { + t["PhysicalNicHint"] = reflect.TypeOf((*PhysicalNicHint)(nil)).Elem() +} + +type PhysicalNicHintInfo struct { + DynamicData + + Device string `xml:"device"` + Subnet []PhysicalNicIpHint `xml:"subnet,omitempty"` + Network []PhysicalNicNameHint `xml:"network,omitempty"` + ConnectedSwitchPort *PhysicalNicCdpInfo `xml:"connectedSwitchPort,omitempty"` + LldpInfo *LinkLayerDiscoveryProtocolInfo `xml:"lldpInfo,omitempty"` +} + +func init() { + t["PhysicalNicHintInfo"] = reflect.TypeOf((*PhysicalNicHintInfo)(nil)).Elem() +} + +type PhysicalNicIpHint struct { + PhysicalNicHint + + IpSubnet string `xml:"ipSubnet"` +} + +func init() { + t["PhysicalNicIpHint"] = reflect.TypeOf((*PhysicalNicIpHint)(nil)).Elem() +} + +type PhysicalNicLinkInfo struct { + DynamicData + + SpeedMb int `xml:"speedMb"` + Duplex bool `xml:"duplex"` +} + +func init() { + t["PhysicalNicLinkInfo"] = reflect.TypeOf((*PhysicalNicLinkInfo)(nil)).Elem() +} + +type PhysicalNicNameHint struct { + PhysicalNicHint + + Network string `xml:"network"` +} + +func init() { + t["PhysicalNicNameHint"] = reflect.TypeOf((*PhysicalNicNameHint)(nil)).Elem() +} + +type PhysicalNicProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["PhysicalNicProfile"] = reflect.TypeOf((*PhysicalNicProfile)(nil)).Elem() +} + +type PhysicalNicSpec struct { + DynamicData + + Ip *HostIpConfig `xml:"ip,omitempty"` + LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` +} + +func init() { + t["PhysicalNicSpec"] = reflect.TypeOf((*PhysicalNicSpec)(nil)).Elem() +} + +type PlaceVm PlaceVmRequestType + +func init() { + t["PlaceVm"] = reflect.TypeOf((*PlaceVm)(nil)).Elem() +} + +type PlaceVmRequestType struct { + This ManagedObjectReference `xml:"_this"` + PlacementSpec PlacementSpec `xml:"placementSpec"` +} + +func init() { + t["PlaceVmRequestType"] = reflect.TypeOf((*PlaceVmRequestType)(nil)).Elem() +} + +type PlaceVmResponse struct { + Returnval PlacementResult `xml:"returnval"` +} + +type PlacementAction struct { + ClusterAction + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + TargetHost *ManagedObjectReference `xml:"targetHost,omitempty"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` +} + +func init() { + t["PlacementAction"] = reflect.TypeOf((*PlacementAction)(nil)).Elem() +} + +type PlacementAffinityRule struct { + DynamicData + + RuleType string `xml:"ruleType"` + RuleScope string `xml:"ruleScope"` + Vms []ManagedObjectReference `xml:"vms,omitempty"` + Keys []string `xml:"keys,omitempty"` +} + +func init() { + t["PlacementAffinityRule"] = reflect.TypeOf((*PlacementAffinityRule)(nil)).Elem() +} + +type PlacementRankResult struct { + DynamicData + + Key string `xml:"key"` + Candidate ManagedObjectReference `xml:"candidate"` + ReservedSpaceMB int64 `xml:"reservedSpaceMB"` + UsedSpaceMB int64 `xml:"usedSpaceMB"` + TotalSpaceMB int64 `xml:"totalSpaceMB"` + Utilization float64 `xml:"utilization"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["PlacementRankResult"] = reflect.TypeOf((*PlacementRankResult)(nil)).Elem() +} + +type PlacementRankSpec struct { + DynamicData + + Specs []PlacementSpec `xml:"specs"` + Clusters []ManagedObjectReference `xml:"clusters"` + Rules []PlacementAffinityRule `xml:"rules,omitempty"` + PlacementRankByVm []StorageDrsPlacementRankVmSpec `xml:"placementRankByVm,omitempty"` +} + +func init() { + t["PlacementRankSpec"] = reflect.TypeOf((*PlacementRankSpec)(nil)).Elem() +} + +type PlacementResult struct { + DynamicData + + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` + DrsFault *ClusterDrsFaults `xml:"drsFault,omitempty"` +} + +func init() { + t["PlacementResult"] = reflect.TypeOf((*PlacementResult)(nil)).Elem() +} + +type PlacementSpec struct { + DynamicData + + Priority VirtualMachineMovePriority `xml:"priority,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` + Datastores []ManagedObjectReference `xml:"datastores,omitempty"` + StoragePods []ManagedObjectReference `xml:"storagePods,omitempty"` + DisallowPrerequisiteMoves *bool `xml:"disallowPrerequisiteMoves"` + Rules []BaseClusterRuleInfo `xml:"rules,omitempty,typeattr"` + Key string `xml:"key,omitempty"` + PlacementType string `xml:"placementType,omitempty"` + CloneSpec *VirtualMachineCloneSpec `xml:"cloneSpec,omitempty"` + CloneName string `xml:"cloneName,omitempty"` +} + +func init() { + t["PlacementSpec"] = reflect.TypeOf((*PlacementSpec)(nil)).Elem() +} + +type PlatformConfigFault struct { + HostConfigFault + + Text string `xml:"text"` +} + +func init() { + t["PlatformConfigFault"] = reflect.TypeOf((*PlatformConfigFault)(nil)).Elem() +} + +type PlatformConfigFaultFault BasePlatformConfigFault + +func init() { + t["PlatformConfigFaultFault"] = reflect.TypeOf((*PlatformConfigFaultFault)(nil)).Elem() +} + +type PnicUplinkProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["PnicUplinkProfile"] = reflect.TypeOf((*PnicUplinkProfile)(nil)).Elem() +} + +type PodDiskLocator struct { + DynamicData + + DiskId int `xml:"diskId"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["PodDiskLocator"] = reflect.TypeOf((*PodDiskLocator)(nil)).Elem() +} + +type PodStorageDrsEntry struct { + DynamicData + + StorageDrsConfig StorageDrsConfigInfo `xml:"storageDrsConfig"` + Recommendation []ClusterRecommendation `xml:"recommendation,omitempty"` + DrsFault []ClusterDrsFaults `xml:"drsFault,omitempty"` + ActionHistory []ClusterActionHistory `xml:"actionHistory,omitempty"` +} + +func init() { + t["PodStorageDrsEntry"] = reflect.TypeOf((*PodStorageDrsEntry)(nil)).Elem() +} + +type PolicyOption struct { + DynamicData + + Id string `xml:"id"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["PolicyOption"] = reflect.TypeOf((*PolicyOption)(nil)).Elem() +} + +type PortGroupProfile struct { + ApplyProfile + + Key string `xml:"key"` + Name string `xml:"name"` + Vlan VlanProfile `xml:"vlan"` + Vswitch VirtualSwitchSelectionProfile `xml:"vswitch"` + NetworkPolicy NetworkPolicyProfile `xml:"networkPolicy"` +} + +func init() { + t["PortGroupProfile"] = reflect.TypeOf((*PortGroupProfile)(nil)).Elem() +} + +type PosixUserSearchResult struct { + UserSearchResult + + Id int `xml:"id"` + ShellAccess *bool `xml:"shellAccess"` +} + +func init() { + t["PosixUserSearchResult"] = reflect.TypeOf((*PosixUserSearchResult)(nil)).Elem() +} + +type PostEvent PostEventRequestType + +func init() { + t["PostEvent"] = reflect.TypeOf((*PostEvent)(nil)).Elem() +} + +type PostEventRequestType struct { + This ManagedObjectReference `xml:"_this"` + EventToPost BaseEvent `xml:"eventToPost,typeattr"` + TaskInfo *TaskInfo `xml:"taskInfo,omitempty"` +} + +func init() { + t["PostEventRequestType"] = reflect.TypeOf((*PostEventRequestType)(nil)).Elem() +} + +type PostEventResponse struct { +} + +type PowerDownHostToStandByRequestType struct { + This ManagedObjectReference `xml:"_this"` + TimeoutSec int `xml:"timeoutSec"` + EvacuatePoweredOffVms *bool `xml:"evacuatePoweredOffVms"` +} + +func init() { + t["PowerDownHostToStandByRequestType"] = reflect.TypeOf((*PowerDownHostToStandByRequestType)(nil)).Elem() +} + +type PowerDownHostToStandBy_Task PowerDownHostToStandByRequestType + +func init() { + t["PowerDownHostToStandBy_Task"] = reflect.TypeOf((*PowerDownHostToStandBy_Task)(nil)).Elem() +} + +type PowerDownHostToStandBy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOffVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["PowerOffVAppRequestType"] = reflect.TypeOf((*PowerOffVAppRequestType)(nil)).Elem() +} + +type PowerOffVApp_Task PowerOffVAppRequestType + +func init() { + t["PowerOffVApp_Task"] = reflect.TypeOf((*PowerOffVApp_Task)(nil)).Elem() +} + +type PowerOffVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOffVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["PowerOffVMRequestType"] = reflect.TypeOf((*PowerOffVMRequestType)(nil)).Elem() +} + +type PowerOffVM_Task PowerOffVMRequestType + +func init() { + t["PowerOffVM_Task"] = reflect.TypeOf((*PowerOffVM_Task)(nil)).Elem() +} + +type PowerOffVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOnFtSecondaryFailed struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` + HostSelectionBy FtIssuesOnHostHostSelectionType `xml:"hostSelectionBy"` + HostErrors []LocalizedMethodFault `xml:"hostErrors,omitempty"` + RootCause LocalizedMethodFault `xml:"rootCause"` +} + +func init() { + t["PowerOnFtSecondaryFailed"] = reflect.TypeOf((*PowerOnFtSecondaryFailed)(nil)).Elem() +} + +type PowerOnFtSecondaryFailedFault PowerOnFtSecondaryFailed + +func init() { + t["PowerOnFtSecondaryFailedFault"] = reflect.TypeOf((*PowerOnFtSecondaryFailedFault)(nil)).Elem() +} + +type PowerOnFtSecondaryTimedout struct { + Timedout + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` + Timeout int `xml:"timeout"` +} + +func init() { + t["PowerOnFtSecondaryTimedout"] = reflect.TypeOf((*PowerOnFtSecondaryTimedout)(nil)).Elem() +} + +type PowerOnFtSecondaryTimedoutFault PowerOnFtSecondaryTimedout + +func init() { + t["PowerOnFtSecondaryTimedoutFault"] = reflect.TypeOf((*PowerOnFtSecondaryTimedoutFault)(nil)).Elem() +} + +type PowerOnMultiVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm []ManagedObjectReference `xml:"vm"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["PowerOnMultiVMRequestType"] = reflect.TypeOf((*PowerOnMultiVMRequestType)(nil)).Elem() +} + +type PowerOnMultiVM_Task PowerOnMultiVMRequestType + +func init() { + t["PowerOnMultiVM_Task"] = reflect.TypeOf((*PowerOnMultiVM_Task)(nil)).Elem() +} + +type PowerOnMultiVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOnVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["PowerOnVAppRequestType"] = reflect.TypeOf((*PowerOnVAppRequestType)(nil)).Elem() +} + +type PowerOnVApp_Task PowerOnVAppRequestType + +func init() { + t["PowerOnVApp_Task"] = reflect.TypeOf((*PowerOnVApp_Task)(nil)).Elem() +} + +type PowerOnVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOnVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["PowerOnVMRequestType"] = reflect.TypeOf((*PowerOnVMRequestType)(nil)).Elem() +} + +type PowerOnVM_Task PowerOnVMRequestType + +func init() { + t["PowerOnVM_Task"] = reflect.TypeOf((*PowerOnVM_Task)(nil)).Elem() +} + +type PowerOnVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerSystemCapability struct { + DynamicData + + AvailablePolicy []HostPowerPolicy `xml:"availablePolicy"` +} + +func init() { + t["PowerSystemCapability"] = reflect.TypeOf((*PowerSystemCapability)(nil)).Elem() +} + +type PowerSystemInfo struct { + DynamicData + + CurrentPolicy HostPowerPolicy `xml:"currentPolicy"` +} + +func init() { + t["PowerSystemInfo"] = reflect.TypeOf((*PowerSystemInfo)(nil)).Elem() +} + +type PowerUpHostFromStandByRequestType struct { + This ManagedObjectReference `xml:"_this"` + TimeoutSec int `xml:"timeoutSec"` +} + +func init() { + t["PowerUpHostFromStandByRequestType"] = reflect.TypeOf((*PowerUpHostFromStandByRequestType)(nil)).Elem() +} + +type PowerUpHostFromStandBy_Task PowerUpHostFromStandByRequestType + +func init() { + t["PowerUpHostFromStandBy_Task"] = reflect.TypeOf((*PowerUpHostFromStandBy_Task)(nil)).Elem() +} + +type PowerUpHostFromStandBy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PrivilegeAvailability struct { + DynamicData + + PrivId string `xml:"privId"` + IsGranted bool `xml:"isGranted"` +} + +func init() { + t["PrivilegeAvailability"] = reflect.TypeOf((*PrivilegeAvailability)(nil)).Elem() +} + +type PrivilegePolicyDef struct { + DynamicData + + CreatePrivilege string `xml:"createPrivilege"` + ReadPrivilege string `xml:"readPrivilege"` + UpdatePrivilege string `xml:"updatePrivilege"` + DeletePrivilege string `xml:"deletePrivilege"` +} + +func init() { + t["PrivilegePolicyDef"] = reflect.TypeOf((*PrivilegePolicyDef)(nil)).Elem() +} + +type ProductComponentInfo struct { + DynamicData + + Id string `xml:"id"` + Name string `xml:"name"` + Version string `xml:"version"` + Release int `xml:"release"` +} + +func init() { + t["ProductComponentInfo"] = reflect.TypeOf((*ProductComponentInfo)(nil)).Elem() +} + +type ProfileApplyProfileElement struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["ProfileApplyProfileElement"] = reflect.TypeOf((*ProfileApplyProfileElement)(nil)).Elem() +} + +type ProfileApplyProfileProperty struct { + DynamicData + + PropertyName string `xml:"propertyName"` + Array bool `xml:"array"` + Profile []BaseApplyProfile `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["ProfileApplyProfileProperty"] = reflect.TypeOf((*ProfileApplyProfileProperty)(nil)).Elem() +} + +type ProfileAssociatedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileAssociatedEvent"] = reflect.TypeOf((*ProfileAssociatedEvent)(nil)).Elem() +} + +type ProfileChangedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileChangedEvent"] = reflect.TypeOf((*ProfileChangedEvent)(nil)).Elem() +} + +type ProfileCompositeExpression struct { + ProfileExpression + + Operator string `xml:"operator"` + ExpressionName []string `xml:"expressionName"` +} + +func init() { + t["ProfileCompositeExpression"] = reflect.TypeOf((*ProfileCompositeExpression)(nil)).Elem() +} + +type ProfileCompositePolicyOptionMetadata struct { + ProfilePolicyOptionMetadata + + Option []string `xml:"option"` +} + +func init() { + t["ProfileCompositePolicyOptionMetadata"] = reflect.TypeOf((*ProfileCompositePolicyOptionMetadata)(nil)).Elem() +} + +type ProfileConfigInfo struct { + DynamicData + + Name string `xml:"name"` + Annotation string `xml:"annotation,omitempty"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["ProfileConfigInfo"] = reflect.TypeOf((*ProfileConfigInfo)(nil)).Elem() +} + +type ProfileCreateSpec struct { + DynamicData + + Name string `xml:"name,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Enabled *bool `xml:"enabled"` +} + +func init() { + t["ProfileCreateSpec"] = reflect.TypeOf((*ProfileCreateSpec)(nil)).Elem() +} + +type ProfileCreatedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileCreatedEvent"] = reflect.TypeOf((*ProfileCreatedEvent)(nil)).Elem() +} + +type ProfileDeferredPolicyOptionParameter struct { + DynamicData + + InputPath ProfilePropertyPath `xml:"inputPath"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["ProfileDeferredPolicyOptionParameter"] = reflect.TypeOf((*ProfileDeferredPolicyOptionParameter)(nil)).Elem() +} + +type ProfileDescription struct { + DynamicData + + Section []ProfileDescriptionSection `xml:"section"` +} + +func init() { + t["ProfileDescription"] = reflect.TypeOf((*ProfileDescription)(nil)).Elem() +} + +type ProfileDescriptionSection struct { + DynamicData + + Description ExtendedElementDescription `xml:"description"` + Message []LocalizableMessage `xml:"message,omitempty"` +} + +func init() { + t["ProfileDescriptionSection"] = reflect.TypeOf((*ProfileDescriptionSection)(nil)).Elem() +} + +type ProfileDissociatedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileDissociatedEvent"] = reflect.TypeOf((*ProfileDissociatedEvent)(nil)).Elem() +} + +type ProfileEvent struct { + Event + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["ProfileEvent"] = reflect.TypeOf((*ProfileEvent)(nil)).Elem() +} + +type ProfileEventArgument struct { + EventArgument + + Profile ManagedObjectReference `xml:"profile"` + Name string `xml:"name"` +} + +func init() { + t["ProfileEventArgument"] = reflect.TypeOf((*ProfileEventArgument)(nil)).Elem() +} + +type ProfileExecuteError struct { + DynamicData + + Path *ProfilePropertyPath `xml:"path,omitempty"` + Message LocalizableMessage `xml:"message"` +} + +func init() { + t["ProfileExecuteError"] = reflect.TypeOf((*ProfileExecuteError)(nil)).Elem() +} + +type ProfileExecuteResult struct { + DynamicData + + Status string `xml:"status"` + ConfigSpec *HostConfigSpec `xml:"configSpec,omitempty"` + InapplicablePath []string `xml:"inapplicablePath,omitempty"` + RequireInput []ProfileDeferredPolicyOptionParameter `xml:"requireInput,omitempty"` + Error []ProfileExecuteError `xml:"error,omitempty"` +} + +func init() { + t["ProfileExecuteResult"] = reflect.TypeOf((*ProfileExecuteResult)(nil)).Elem() +} + +type ProfileExpression struct { + DynamicData + + Id string `xml:"id"` + DisplayName string `xml:"displayName"` + Negated bool `xml:"negated"` +} + +func init() { + t["ProfileExpression"] = reflect.TypeOf((*ProfileExpression)(nil)).Elem() +} + +type ProfileExpressionMetadata struct { + DynamicData + + ExpressionId ExtendedElementDescription `xml:"expressionId"` + Parameter []ProfileParameterMetadata `xml:"parameter,omitempty"` +} + +func init() { + t["ProfileExpressionMetadata"] = reflect.TypeOf((*ProfileExpressionMetadata)(nil)).Elem() +} + +type ProfileMetadata struct { + DynamicData + + Key string `xml:"key"` + ProfileTypeName string `xml:"profileTypeName,omitempty"` + Description *ExtendedDescription `xml:"description,omitempty"` + SortSpec []ProfileMetadataProfileSortSpec `xml:"sortSpec,omitempty"` + ProfileCategory string `xml:"profileCategory,omitempty"` + ProfileComponent string `xml:"profileComponent,omitempty"` +} + +func init() { + t["ProfileMetadata"] = reflect.TypeOf((*ProfileMetadata)(nil)).Elem() +} + +type ProfileMetadataProfileSortSpec struct { + DynamicData + + PolicyId string `xml:"policyId"` + Parameter string `xml:"parameter"` +} + +func init() { + t["ProfileMetadataProfileSortSpec"] = reflect.TypeOf((*ProfileMetadataProfileSortSpec)(nil)).Elem() +} + +type ProfileParameterMetadata struct { + DynamicData + + Id ExtendedElementDescription `xml:"id"` + Type string `xml:"type"` + Optional bool `xml:"optional"` + DefaultValue AnyType `xml:"defaultValue,omitempty,typeattr"` +} + +func init() { + t["ProfileParameterMetadata"] = reflect.TypeOf((*ProfileParameterMetadata)(nil)).Elem() +} + +type ProfilePolicy struct { + DynamicData + + Id string `xml:"id"` + PolicyOption BasePolicyOption `xml:"policyOption,typeattr"` +} + +func init() { + t["ProfilePolicy"] = reflect.TypeOf((*ProfilePolicy)(nil)).Elem() +} + +type ProfilePolicyMetadata struct { + DynamicData + + Id ExtendedElementDescription `xml:"id"` + PossibleOption []BaseProfilePolicyOptionMetadata `xml:"possibleOption,typeattr"` +} + +func init() { + t["ProfilePolicyMetadata"] = reflect.TypeOf((*ProfilePolicyMetadata)(nil)).Elem() +} + +type ProfilePolicyOptionMetadata struct { + DynamicData + + Id ExtendedElementDescription `xml:"id"` + Parameter []ProfileParameterMetadata `xml:"parameter,omitempty"` +} + +func init() { + t["ProfilePolicyOptionMetadata"] = reflect.TypeOf((*ProfilePolicyOptionMetadata)(nil)).Elem() +} + +type ProfileProfileStructure struct { + DynamicData + + ProfileTypeName string `xml:"profileTypeName"` + Child []ProfileProfileStructureProperty `xml:"child,omitempty"` +} + +func init() { + t["ProfileProfileStructure"] = reflect.TypeOf((*ProfileProfileStructure)(nil)).Elem() +} + +type ProfileProfileStructureProperty struct { + DynamicData + + PropertyName string `xml:"propertyName"` + Array bool `xml:"array"` + Element ProfileProfileStructure `xml:"element"` +} + +func init() { + t["ProfileProfileStructureProperty"] = reflect.TypeOf((*ProfileProfileStructureProperty)(nil)).Elem() +} + +type ProfilePropertyPath struct { + DynamicData + + ProfilePath string `xml:"profilePath"` + PolicyId string `xml:"policyId,omitempty"` + ParameterId string `xml:"parameterId,omitempty"` +} + +func init() { + t["ProfilePropertyPath"] = reflect.TypeOf((*ProfilePropertyPath)(nil)).Elem() +} + +type ProfileReferenceHostChangedEvent struct { + ProfileEvent + + ReferenceHost *ManagedObjectReference `xml:"referenceHost,omitempty"` +} + +func init() { + t["ProfileReferenceHostChangedEvent"] = reflect.TypeOf((*ProfileReferenceHostChangedEvent)(nil)).Elem() +} + +type ProfileRemovedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileRemovedEvent"] = reflect.TypeOf((*ProfileRemovedEvent)(nil)).Elem() +} + +type ProfileSerializedCreateSpec struct { + ProfileCreateSpec + + ProfileConfigString string `xml:"profileConfigString"` +} + +func init() { + t["ProfileSerializedCreateSpec"] = reflect.TypeOf((*ProfileSerializedCreateSpec)(nil)).Elem() +} + +type ProfileSimpleExpression struct { + ProfileExpression + + ExpressionType string `xml:"expressionType"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["ProfileSimpleExpression"] = reflect.TypeOf((*ProfileSimpleExpression)(nil)).Elem() +} + +type ProfileUpdateFailed struct { + VimFault + + Failure []ProfileUpdateFailedUpdateFailure `xml:"failure"` +} + +func init() { + t["ProfileUpdateFailed"] = reflect.TypeOf((*ProfileUpdateFailed)(nil)).Elem() +} + +type ProfileUpdateFailedFault ProfileUpdateFailed + +func init() { + t["ProfileUpdateFailedFault"] = reflect.TypeOf((*ProfileUpdateFailedFault)(nil)).Elem() +} + +type ProfileUpdateFailedUpdateFailure struct { + DynamicData + + ProfilePath ProfilePropertyPath `xml:"profilePath"` + ErrMsg LocalizableMessage `xml:"errMsg"` +} + +func init() { + t["ProfileUpdateFailedUpdateFailure"] = reflect.TypeOf((*ProfileUpdateFailedUpdateFailure)(nil)).Elem() +} + +type PromoteDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Unlink bool `xml:"unlink"` + Disks []VirtualDisk `xml:"disks,omitempty"` +} + +func init() { + t["PromoteDisksRequestType"] = reflect.TypeOf((*PromoteDisksRequestType)(nil)).Elem() +} + +type PromoteDisks_Task PromoteDisksRequestType + +func init() { + t["PromoteDisks_Task"] = reflect.TypeOf((*PromoteDisks_Task)(nil)).Elem() +} + +type PromoteDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PropertyChange struct { + DynamicData + + Name string `xml:"name"` + Op PropertyChangeOp `xml:"op"` + Val AnyType `xml:"val,omitempty,typeattr"` +} + +func init() { + t["PropertyChange"] = reflect.TypeOf((*PropertyChange)(nil)).Elem() +} + +type PropertyFilterSpec struct { + DynamicData + + PropSet []PropertySpec `xml:"propSet"` + ObjectSet []ObjectSpec `xml:"objectSet"` + ReportMissingObjectsInResults *bool `xml:"reportMissingObjectsInResults"` +} + +func init() { + t["PropertyFilterSpec"] = reflect.TypeOf((*PropertyFilterSpec)(nil)).Elem() +} + +type PropertyFilterUpdate struct { + DynamicData + + Filter ManagedObjectReference `xml:"filter"` + ObjectSet []ObjectUpdate `xml:"objectSet,omitempty"` + MissingSet []MissingObject `xml:"missingSet,omitempty"` +} + +func init() { + t["PropertyFilterUpdate"] = reflect.TypeOf((*PropertyFilterUpdate)(nil)).Elem() +} + +type PropertySpec struct { + DynamicData + + Type string `xml:"type"` + All *bool `xml:"all"` + PathSet []string `xml:"pathSet,omitempty"` +} + +func init() { + t["PropertySpec"] = reflect.TypeOf((*PropertySpec)(nil)).Elem() +} + +type QueryAnswerFileStatus QueryAnswerFileStatusRequestType + +func init() { + t["QueryAnswerFileStatus"] = reflect.TypeOf((*QueryAnswerFileStatus)(nil)).Elem() +} + +type QueryAnswerFileStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["QueryAnswerFileStatusRequestType"] = reflect.TypeOf((*QueryAnswerFileStatusRequestType)(nil)).Elem() +} + +type QueryAnswerFileStatusResponse struct { + Returnval []AnswerFileStatusResult `xml:"returnval,omitempty"` +} + +type QueryAssignedLicenses QueryAssignedLicensesRequestType + +func init() { + t["QueryAssignedLicenses"] = reflect.TypeOf((*QueryAssignedLicenses)(nil)).Elem() +} + +type QueryAssignedLicensesRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityId string `xml:"entityId,omitempty"` +} + +func init() { + t["QueryAssignedLicensesRequestType"] = reflect.TypeOf((*QueryAssignedLicensesRequestType)(nil)).Elem() +} + +type QueryAssignedLicensesResponse struct { + Returnval []LicenseAssignmentManagerLicenseAssignment `xml:"returnval"` +} + +type QueryAvailableDisksForVmfs QueryAvailableDisksForVmfsRequestType + +func init() { + t["QueryAvailableDisksForVmfs"] = reflect.TypeOf((*QueryAvailableDisksForVmfs)(nil)).Elem() +} + +type QueryAvailableDisksForVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["QueryAvailableDisksForVmfsRequestType"] = reflect.TypeOf((*QueryAvailableDisksForVmfsRequestType)(nil)).Elem() +} + +type QueryAvailableDisksForVmfsResponse struct { + Returnval []HostScsiDisk `xml:"returnval,omitempty"` +} + +type QueryAvailableDvsSpec QueryAvailableDvsSpecRequestType + +func init() { + t["QueryAvailableDvsSpec"] = reflect.TypeOf((*QueryAvailableDvsSpec)(nil)).Elem() +} + +type QueryAvailableDvsSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Recommended *bool `xml:"recommended"` +} + +func init() { + t["QueryAvailableDvsSpecRequestType"] = reflect.TypeOf((*QueryAvailableDvsSpecRequestType)(nil)).Elem() +} + +type QueryAvailableDvsSpecResponse struct { + Returnval []DistributedVirtualSwitchProductSpec `xml:"returnval,omitempty"` +} + +type QueryAvailablePartition QueryAvailablePartitionRequestType + +func init() { + t["QueryAvailablePartition"] = reflect.TypeOf((*QueryAvailablePartition)(nil)).Elem() +} + +type QueryAvailablePartitionRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryAvailablePartitionRequestType"] = reflect.TypeOf((*QueryAvailablePartitionRequestType)(nil)).Elem() +} + +type QueryAvailablePartitionResponse struct { + Returnval []HostDiagnosticPartition `xml:"returnval,omitempty"` +} + +type QueryAvailablePerfMetric QueryAvailablePerfMetricRequestType + +func init() { + t["QueryAvailablePerfMetric"] = reflect.TypeOf((*QueryAvailablePerfMetric)(nil)).Elem() +} + +type QueryAvailablePerfMetricRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + BeginTime *time.Time `xml:"beginTime"` + EndTime *time.Time `xml:"endTime"` + IntervalId int `xml:"intervalId,omitempty"` +} + +func init() { + t["QueryAvailablePerfMetricRequestType"] = reflect.TypeOf((*QueryAvailablePerfMetricRequestType)(nil)).Elem() +} + +type QueryAvailablePerfMetricResponse struct { + Returnval []PerfMetricId `xml:"returnval,omitempty"` +} + +type QueryAvailableSsds QueryAvailableSsdsRequestType + +func init() { + t["QueryAvailableSsds"] = reflect.TypeOf((*QueryAvailableSsds)(nil)).Elem() +} + +type QueryAvailableSsdsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsPath string `xml:"vffsPath,omitempty"` +} + +func init() { + t["QueryAvailableSsdsRequestType"] = reflect.TypeOf((*QueryAvailableSsdsRequestType)(nil)).Elem() +} + +type QueryAvailableSsdsResponse struct { + Returnval []HostScsiDisk `xml:"returnval,omitempty"` +} + +type QueryAvailableTimeZones QueryAvailableTimeZonesRequestType + +func init() { + t["QueryAvailableTimeZones"] = reflect.TypeOf((*QueryAvailableTimeZones)(nil)).Elem() +} + +type QueryAvailableTimeZonesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryAvailableTimeZonesRequestType"] = reflect.TypeOf((*QueryAvailableTimeZonesRequestType)(nil)).Elem() +} + +type QueryAvailableTimeZonesResponse struct { + Returnval []HostDateTimeSystemTimeZone `xml:"returnval,omitempty"` +} + +type QueryBootDevices QueryBootDevicesRequestType + +func init() { + t["QueryBootDevices"] = reflect.TypeOf((*QueryBootDevices)(nil)).Elem() +} + +type QueryBootDevicesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryBootDevicesRequestType"] = reflect.TypeOf((*QueryBootDevicesRequestType)(nil)).Elem() +} + +type QueryBootDevicesResponse struct { + Returnval *HostBootDeviceInfo `xml:"returnval,omitempty"` +} + +type QueryBoundVnics QueryBoundVnicsRequestType + +func init() { + t["QueryBoundVnics"] = reflect.TypeOf((*QueryBoundVnics)(nil)).Elem() +} + +type QueryBoundVnicsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` +} + +func init() { + t["QueryBoundVnicsRequestType"] = reflect.TypeOf((*QueryBoundVnicsRequestType)(nil)).Elem() +} + +type QueryBoundVnicsResponse struct { + Returnval []IscsiPortInfo `xml:"returnval,omitempty"` +} + +type QueryCandidateNics QueryCandidateNicsRequestType + +func init() { + t["QueryCandidateNics"] = reflect.TypeOf((*QueryCandidateNics)(nil)).Elem() +} + +type QueryCandidateNicsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` +} + +func init() { + t["QueryCandidateNicsRequestType"] = reflect.TypeOf((*QueryCandidateNicsRequestType)(nil)).Elem() +} + +type QueryCandidateNicsResponse struct { + Returnval []IscsiPortInfo `xml:"returnval,omitempty"` +} + +type QueryChangedDiskAreas QueryChangedDiskAreasRequestType + +func init() { + t["QueryChangedDiskAreas"] = reflect.TypeOf((*QueryChangedDiskAreas)(nil)).Elem() +} + +type QueryChangedDiskAreasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` + DeviceKey int `xml:"deviceKey"` + StartOffset int64 `xml:"startOffset"` + ChangeId string `xml:"changeId"` +} + +func init() { + t["QueryChangedDiskAreasRequestType"] = reflect.TypeOf((*QueryChangedDiskAreasRequestType)(nil)).Elem() +} + +type QueryChangedDiskAreasResponse struct { + Returnval DiskChangeInfo `xml:"returnval"` +} + +type QueryCmmds QueryCmmdsRequestType + +func init() { + t["QueryCmmds"] = reflect.TypeOf((*QueryCmmds)(nil)).Elem() +} + +type QueryCmmdsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Queries []HostVsanInternalSystemCmmdsQuery `xml:"queries"` +} + +func init() { + t["QueryCmmdsRequestType"] = reflect.TypeOf((*QueryCmmdsRequestType)(nil)).Elem() +} + +type QueryCmmdsResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryCompatibleHostForExistingDvs QueryCompatibleHostForExistingDvsRequestType + +func init() { + t["QueryCompatibleHostForExistingDvs"] = reflect.TypeOf((*QueryCompatibleHostForExistingDvs)(nil)).Elem() +} + +type QueryCompatibleHostForExistingDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Container ManagedObjectReference `xml:"container"` + Recursive bool `xml:"recursive"` + Dvs ManagedObjectReference `xml:"dvs"` +} + +func init() { + t["QueryCompatibleHostForExistingDvsRequestType"] = reflect.TypeOf((*QueryCompatibleHostForExistingDvsRequestType)(nil)).Elem() +} + +type QueryCompatibleHostForExistingDvsResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryCompatibleHostForNewDvs QueryCompatibleHostForNewDvsRequestType + +func init() { + t["QueryCompatibleHostForNewDvs"] = reflect.TypeOf((*QueryCompatibleHostForNewDvs)(nil)).Elem() +} + +type QueryCompatibleHostForNewDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Container ManagedObjectReference `xml:"container"` + Recursive bool `xml:"recursive"` + SwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"switchProductSpec,omitempty"` +} + +func init() { + t["QueryCompatibleHostForNewDvsRequestType"] = reflect.TypeOf((*QueryCompatibleHostForNewDvsRequestType)(nil)).Elem() +} + +type QueryCompatibleHostForNewDvsResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryComplianceStatus QueryComplianceStatusRequestType + +func init() { + t["QueryComplianceStatus"] = reflect.TypeOf((*QueryComplianceStatus)(nil)).Elem() +} + +type QueryComplianceStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile []ManagedObjectReference `xml:"profile,omitempty"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["QueryComplianceStatusRequestType"] = reflect.TypeOf((*QueryComplianceStatusRequestType)(nil)).Elem() +} + +type QueryComplianceStatusResponse struct { + Returnval []ComplianceResult `xml:"returnval,omitempty"` +} + +type QueryConfigOption QueryConfigOptionRequestType + +func init() { + t["QueryConfigOption"] = reflect.TypeOf((*QueryConfigOption)(nil)).Elem() +} + +type QueryConfigOptionDescriptor QueryConfigOptionDescriptorRequestType + +func init() { + t["QueryConfigOptionDescriptor"] = reflect.TypeOf((*QueryConfigOptionDescriptor)(nil)).Elem() +} + +type QueryConfigOptionDescriptorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryConfigOptionDescriptorRequestType"] = reflect.TypeOf((*QueryConfigOptionDescriptorRequestType)(nil)).Elem() +} + +type QueryConfigOptionDescriptorResponse struct { + Returnval []VirtualMachineConfigOptionDescriptor `xml:"returnval,omitempty"` +} + +type QueryConfigOptionEx QueryConfigOptionExRequestType + +func init() { + t["QueryConfigOptionEx"] = reflect.TypeOf((*QueryConfigOptionEx)(nil)).Elem() +} + +type QueryConfigOptionExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec *EnvironmentBrowserConfigOptionQuerySpec `xml:"spec,omitempty"` +} + +func init() { + t["QueryConfigOptionExRequestType"] = reflect.TypeOf((*QueryConfigOptionExRequestType)(nil)).Elem() +} + +type QueryConfigOptionExResponse struct { + Returnval *VirtualMachineConfigOption `xml:"returnval,omitempty"` +} + +type QueryConfigOptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryConfigOptionRequestType"] = reflect.TypeOf((*QueryConfigOptionRequestType)(nil)).Elem() +} + +type QueryConfigOptionResponse struct { + Returnval *VirtualMachineConfigOption `xml:"returnval,omitempty"` +} + +type QueryConfigTarget QueryConfigTargetRequestType + +func init() { + t["QueryConfigTarget"] = reflect.TypeOf((*QueryConfigTarget)(nil)).Elem() +} + +type QueryConfigTargetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryConfigTargetRequestType"] = reflect.TypeOf((*QueryConfigTargetRequestType)(nil)).Elem() +} + +type QueryConfigTargetResponse struct { + Returnval *ConfigTarget `xml:"returnval,omitempty"` +} + +type QueryConfiguredModuleOptionString QueryConfiguredModuleOptionStringRequestType + +func init() { + t["QueryConfiguredModuleOptionString"] = reflect.TypeOf((*QueryConfiguredModuleOptionString)(nil)).Elem() +} + +type QueryConfiguredModuleOptionStringRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["QueryConfiguredModuleOptionStringRequestType"] = reflect.TypeOf((*QueryConfiguredModuleOptionStringRequestType)(nil)).Elem() +} + +type QueryConfiguredModuleOptionStringResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryConnectionInfo QueryConnectionInfoRequestType + +func init() { + t["QueryConnectionInfo"] = reflect.TypeOf((*QueryConnectionInfo)(nil)).Elem() +} + +type QueryConnectionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hostname string `xml:"hostname"` + Port int `xml:"port"` + Username string `xml:"username"` + Password string `xml:"password"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["QueryConnectionInfoRequestType"] = reflect.TypeOf((*QueryConnectionInfoRequestType)(nil)).Elem() +} + +type QueryConnectionInfoResponse struct { + Returnval HostConnectInfo `xml:"returnval"` +} + +type QueryConnectionInfoViaSpec QueryConnectionInfoViaSpecRequestType + +func init() { + t["QueryConnectionInfoViaSpec"] = reflect.TypeOf((*QueryConnectionInfoViaSpec)(nil)).Elem() +} + +type QueryConnectionInfoViaSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostConnectSpec `xml:"spec"` +} + +func init() { + t["QueryConnectionInfoViaSpecRequestType"] = reflect.TypeOf((*QueryConnectionInfoViaSpecRequestType)(nil)).Elem() +} + +type QueryConnectionInfoViaSpecResponse struct { + Returnval HostConnectInfo `xml:"returnval"` +} + +type QueryDatastorePerformanceSummary QueryDatastorePerformanceSummaryRequestType + +func init() { + t["QueryDatastorePerformanceSummary"] = reflect.TypeOf((*QueryDatastorePerformanceSummary)(nil)).Elem() +} + +type QueryDatastorePerformanceSummaryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["QueryDatastorePerformanceSummaryRequestType"] = reflect.TypeOf((*QueryDatastorePerformanceSummaryRequestType)(nil)).Elem() +} + +type QueryDatastorePerformanceSummaryResponse struct { + Returnval []StoragePerformanceSummary `xml:"returnval,omitempty"` +} + +type QueryDateTime QueryDateTimeRequestType + +func init() { + t["QueryDateTime"] = reflect.TypeOf((*QueryDateTime)(nil)).Elem() +} + +type QueryDateTimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryDateTimeRequestType"] = reflect.TypeOf((*QueryDateTimeRequestType)(nil)).Elem() +} + +type QueryDateTimeResponse struct { + Returnval time.Time `xml:"returnval"` +} + +type QueryDescriptions QueryDescriptionsRequestType + +func init() { + t["QueryDescriptions"] = reflect.TypeOf((*QueryDescriptions)(nil)).Elem() +} + +type QueryDescriptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryDescriptionsRequestType"] = reflect.TypeOf((*QueryDescriptionsRequestType)(nil)).Elem() +} + +type QueryDescriptionsResponse struct { + Returnval []DiagnosticManagerLogDescriptor `xml:"returnval,omitempty"` +} + +type QueryDisksForVsan QueryDisksForVsanRequestType + +func init() { + t["QueryDisksForVsan"] = reflect.TypeOf((*QueryDisksForVsan)(nil)).Elem() +} + +type QueryDisksForVsanRequestType struct { + This ManagedObjectReference `xml:"_this"` + CanonicalName []string `xml:"canonicalName,omitempty"` +} + +func init() { + t["QueryDisksForVsanRequestType"] = reflect.TypeOf((*QueryDisksForVsanRequestType)(nil)).Elem() +} + +type QueryDisksForVsanResponse struct { + Returnval []VsanHostDiskResult `xml:"returnval,omitempty"` +} + +type QueryDisksUsingFilter QueryDisksUsingFilterRequestType + +func init() { + t["QueryDisksUsingFilter"] = reflect.TypeOf((*QueryDisksUsingFilter)(nil)).Elem() +} + +type QueryDisksUsingFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["QueryDisksUsingFilterRequestType"] = reflect.TypeOf((*QueryDisksUsingFilterRequestType)(nil)).Elem() +} + +type QueryDisksUsingFilterResponse struct { + Returnval []VirtualDiskId `xml:"returnval"` +} + +type QueryDvsByUuid QueryDvsByUuidRequestType + +func init() { + t["QueryDvsByUuid"] = reflect.TypeOf((*QueryDvsByUuid)(nil)).Elem() +} + +type QueryDvsByUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuid string `xml:"uuid"` +} + +func init() { + t["QueryDvsByUuidRequestType"] = reflect.TypeOf((*QueryDvsByUuidRequestType)(nil)).Elem() +} + +type QueryDvsByUuidResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryDvsCheckCompatibility QueryDvsCheckCompatibilityRequestType + +func init() { + t["QueryDvsCheckCompatibility"] = reflect.TypeOf((*QueryDvsCheckCompatibility)(nil)).Elem() +} + +type QueryDvsCheckCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostContainer DistributedVirtualSwitchManagerHostContainer `xml:"hostContainer"` + DvsProductSpec *DistributedVirtualSwitchManagerDvsProductSpec `xml:"dvsProductSpec,omitempty"` + HostFilterSpec []BaseDistributedVirtualSwitchManagerHostDvsFilterSpec `xml:"hostFilterSpec,omitempty,typeattr"` +} + +func init() { + t["QueryDvsCheckCompatibilityRequestType"] = reflect.TypeOf((*QueryDvsCheckCompatibilityRequestType)(nil)).Elem() +} + +type QueryDvsCheckCompatibilityResponse struct { + Returnval []DistributedVirtualSwitchManagerCompatibilityResult `xml:"returnval,omitempty"` +} + +type QueryDvsCompatibleHostSpec QueryDvsCompatibleHostSpecRequestType + +func init() { + t["QueryDvsCompatibleHostSpec"] = reflect.TypeOf((*QueryDvsCompatibleHostSpec)(nil)).Elem() +} + +type QueryDvsCompatibleHostSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + SwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"switchProductSpec,omitempty"` +} + +func init() { + t["QueryDvsCompatibleHostSpecRequestType"] = reflect.TypeOf((*QueryDvsCompatibleHostSpecRequestType)(nil)).Elem() +} + +type QueryDvsCompatibleHostSpecResponse struct { + Returnval []DistributedVirtualSwitchHostProductSpec `xml:"returnval,omitempty"` +} + +type QueryDvsConfigTarget QueryDvsConfigTargetRequestType + +func init() { + t["QueryDvsConfigTarget"] = reflect.TypeOf((*QueryDvsConfigTarget)(nil)).Elem() +} + +type QueryDvsConfigTargetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Dvs *ManagedObjectReference `xml:"dvs,omitempty"` +} + +func init() { + t["QueryDvsConfigTargetRequestType"] = reflect.TypeOf((*QueryDvsConfigTargetRequestType)(nil)).Elem() +} + +type QueryDvsConfigTargetResponse struct { + Returnval DVSManagerDvsConfigTarget `xml:"returnval"` +} + +type QueryDvsFeatureCapability QueryDvsFeatureCapabilityRequestType + +func init() { + t["QueryDvsFeatureCapability"] = reflect.TypeOf((*QueryDvsFeatureCapability)(nil)).Elem() +} + +type QueryDvsFeatureCapabilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + SwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"switchProductSpec,omitempty"` +} + +func init() { + t["QueryDvsFeatureCapabilityRequestType"] = reflect.TypeOf((*QueryDvsFeatureCapabilityRequestType)(nil)).Elem() +} + +type QueryDvsFeatureCapabilityResponse struct { + Returnval BaseDVSFeatureCapability `xml:"returnval,omitempty,typeattr"` +} + +type QueryEvents QueryEventsRequestType + +func init() { + t["QueryEvents"] = reflect.TypeOf((*QueryEvents)(nil)).Elem() +} + +type QueryEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Filter EventFilterSpec `xml:"filter"` +} + +func init() { + t["QueryEventsRequestType"] = reflect.TypeOf((*QueryEventsRequestType)(nil)).Elem() +} + +type QueryEventsResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type QueryExpressionMetadata QueryExpressionMetadataRequestType + +func init() { + t["QueryExpressionMetadata"] = reflect.TypeOf((*QueryExpressionMetadata)(nil)).Elem() +} + +type QueryExpressionMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExpressionName []string `xml:"expressionName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryExpressionMetadataRequestType"] = reflect.TypeOf((*QueryExpressionMetadataRequestType)(nil)).Elem() +} + +type QueryExpressionMetadataResponse struct { + Returnval []ProfileExpressionMetadata `xml:"returnval,omitempty"` +} + +type QueryExtensionIpAllocationUsage QueryExtensionIpAllocationUsageRequestType + +func init() { + t["QueryExtensionIpAllocationUsage"] = reflect.TypeOf((*QueryExtensionIpAllocationUsage)(nil)).Elem() +} + +type QueryExtensionIpAllocationUsageRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKeys []string `xml:"extensionKeys,omitempty"` +} + +func init() { + t["QueryExtensionIpAllocationUsageRequestType"] = reflect.TypeOf((*QueryExtensionIpAllocationUsageRequestType)(nil)).Elem() +} + +type QueryExtensionIpAllocationUsageResponse struct { + Returnval []ExtensionManagerIpAllocationUsage `xml:"returnval,omitempty"` +} + +type QueryFaultToleranceCompatibility QueryFaultToleranceCompatibilityRequestType + +func init() { + t["QueryFaultToleranceCompatibility"] = reflect.TypeOf((*QueryFaultToleranceCompatibility)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityEx QueryFaultToleranceCompatibilityExRequestType + +func init() { + t["QueryFaultToleranceCompatibilityEx"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityEx)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityExRequestType struct { + This ManagedObjectReference `xml:"_this"` + ForLegacyFt *bool `xml:"forLegacyFt"` +} + +func init() { + t["QueryFaultToleranceCompatibilityExRequestType"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityExRequestType)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityExResponse struct { + Returnval []LocalizedMethodFault `xml:"returnval,omitempty"` +} + +type QueryFaultToleranceCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryFaultToleranceCompatibilityRequestType"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityRequestType)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityResponse struct { + Returnval []LocalizedMethodFault `xml:"returnval,omitempty"` +} + +type QueryFirmwareConfigUploadURL QueryFirmwareConfigUploadURLRequestType + +func init() { + t["QueryFirmwareConfigUploadURL"] = reflect.TypeOf((*QueryFirmwareConfigUploadURL)(nil)).Elem() +} + +type QueryFirmwareConfigUploadURLRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryFirmwareConfigUploadURLRequestType"] = reflect.TypeOf((*QueryFirmwareConfigUploadURLRequestType)(nil)).Elem() +} + +type QueryFirmwareConfigUploadURLResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryHostConnectionInfo QueryHostConnectionInfoRequestType + +func init() { + t["QueryHostConnectionInfo"] = reflect.TypeOf((*QueryHostConnectionInfo)(nil)).Elem() +} + +type QueryHostConnectionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryHostConnectionInfoRequestType"] = reflect.TypeOf((*QueryHostConnectionInfoRequestType)(nil)).Elem() +} + +type QueryHostConnectionInfoResponse struct { + Returnval HostConnectInfo `xml:"returnval"` +} + +type QueryHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["QueryHostPatchRequestType"] = reflect.TypeOf((*QueryHostPatchRequestType)(nil)).Elem() +} + +type QueryHostPatch_Task QueryHostPatchRequestType + +func init() { + t["QueryHostPatch_Task"] = reflect.TypeOf((*QueryHostPatch_Task)(nil)).Elem() +} + +type QueryHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type QueryHostProfileMetadata QueryHostProfileMetadataRequestType + +func init() { + t["QueryHostProfileMetadata"] = reflect.TypeOf((*QueryHostProfileMetadata)(nil)).Elem() +} + +type QueryHostProfileMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProfileName []string `xml:"profileName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryHostProfileMetadataRequestType"] = reflect.TypeOf((*QueryHostProfileMetadataRequestType)(nil)).Elem() +} + +type QueryHostProfileMetadataResponse struct { + Returnval []ProfileMetadata `xml:"returnval,omitempty"` +} + +type QueryHostStatus QueryHostStatusRequestType + +func init() { + t["QueryHostStatus"] = reflect.TypeOf((*QueryHostStatus)(nil)).Elem() +} + +type QueryHostStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryHostStatusRequestType"] = reflect.TypeOf((*QueryHostStatusRequestType)(nil)).Elem() +} + +type QueryHostStatusResponse struct { + Returnval VsanHostClusterStatus `xml:"returnval"` +} + +type QueryIORMConfigOption QueryIORMConfigOptionRequestType + +func init() { + t["QueryIORMConfigOption"] = reflect.TypeOf((*QueryIORMConfigOption)(nil)).Elem() +} + +type QueryIORMConfigOptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["QueryIORMConfigOptionRequestType"] = reflect.TypeOf((*QueryIORMConfigOptionRequestType)(nil)).Elem() +} + +type QueryIORMConfigOptionResponse struct { + Returnval StorageIORMConfigOption `xml:"returnval"` +} + +type QueryIPAllocations QueryIPAllocationsRequestType + +func init() { + t["QueryIPAllocations"] = reflect.TypeOf((*QueryIPAllocations)(nil)).Elem() +} + +type QueryIPAllocationsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int `xml:"poolId"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["QueryIPAllocationsRequestType"] = reflect.TypeOf((*QueryIPAllocationsRequestType)(nil)).Elem() +} + +type QueryIPAllocationsResponse struct { + Returnval []IpPoolManagerIpAllocation `xml:"returnval"` +} + +type QueryIoFilterInfo QueryIoFilterInfoRequestType + +func init() { + t["QueryIoFilterInfo"] = reflect.TypeOf((*QueryIoFilterInfo)(nil)).Elem() +} + +type QueryIoFilterInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["QueryIoFilterInfoRequestType"] = reflect.TypeOf((*QueryIoFilterInfoRequestType)(nil)).Elem() +} + +type QueryIoFilterInfoResponse struct { + Returnval []ClusterIoFilterInfo `xml:"returnval,omitempty"` +} + +type QueryIoFilterIssues QueryIoFilterIssuesRequestType + +func init() { + t["QueryIoFilterIssues"] = reflect.TypeOf((*QueryIoFilterIssues)(nil)).Elem() +} + +type QueryIoFilterIssuesRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["QueryIoFilterIssuesRequestType"] = reflect.TypeOf((*QueryIoFilterIssuesRequestType)(nil)).Elem() +} + +type QueryIoFilterIssuesResponse struct { + Returnval IoFilterQueryIssueResult `xml:"returnval"` +} + +type QueryIpPools QueryIpPoolsRequestType + +func init() { + t["QueryIpPools"] = reflect.TypeOf((*QueryIpPools)(nil)).Elem() +} + +type QueryIpPoolsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` +} + +func init() { + t["QueryIpPoolsRequestType"] = reflect.TypeOf((*QueryIpPoolsRequestType)(nil)).Elem() +} + +type QueryIpPoolsResponse struct { + Returnval []IpPool `xml:"returnval,omitempty"` +} + +type QueryLicenseSourceAvailability QueryLicenseSourceAvailabilityRequestType + +func init() { + t["QueryLicenseSourceAvailability"] = reflect.TypeOf((*QueryLicenseSourceAvailability)(nil)).Elem() +} + +type QueryLicenseSourceAvailabilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryLicenseSourceAvailabilityRequestType"] = reflect.TypeOf((*QueryLicenseSourceAvailabilityRequestType)(nil)).Elem() +} + +type QueryLicenseSourceAvailabilityResponse struct { + Returnval []LicenseAvailabilityInfo `xml:"returnval,omitempty"` +} + +type QueryLicenseUsage QueryLicenseUsageRequestType + +func init() { + t["QueryLicenseUsage"] = reflect.TypeOf((*QueryLicenseUsage)(nil)).Elem() +} + +type QueryLicenseUsageRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryLicenseUsageRequestType"] = reflect.TypeOf((*QueryLicenseUsageRequestType)(nil)).Elem() +} + +type QueryLicenseUsageResponse struct { + Returnval LicenseUsageInfo `xml:"returnval"` +} + +type QueryLockdownExceptions QueryLockdownExceptionsRequestType + +func init() { + t["QueryLockdownExceptions"] = reflect.TypeOf((*QueryLockdownExceptions)(nil)).Elem() +} + +type QueryLockdownExceptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryLockdownExceptionsRequestType"] = reflect.TypeOf((*QueryLockdownExceptionsRequestType)(nil)).Elem() +} + +type QueryLockdownExceptionsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryManagedBy QueryManagedByRequestType + +func init() { + t["QueryManagedBy"] = reflect.TypeOf((*QueryManagedBy)(nil)).Elem() +} + +type QueryManagedByRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["QueryManagedByRequestType"] = reflect.TypeOf((*QueryManagedByRequestType)(nil)).Elem() +} + +type QueryManagedByResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryMemoryOverhead QueryMemoryOverheadRequestType + +func init() { + t["QueryMemoryOverhead"] = reflect.TypeOf((*QueryMemoryOverhead)(nil)).Elem() +} + +type QueryMemoryOverheadEx QueryMemoryOverheadExRequestType + +func init() { + t["QueryMemoryOverheadEx"] = reflect.TypeOf((*QueryMemoryOverheadEx)(nil)).Elem() +} + +type QueryMemoryOverheadExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmConfigInfo VirtualMachineConfigInfo `xml:"vmConfigInfo"` +} + +func init() { + t["QueryMemoryOverheadExRequestType"] = reflect.TypeOf((*QueryMemoryOverheadExRequestType)(nil)).Elem() +} + +type QueryMemoryOverheadExResponse struct { + Returnval int64 `xml:"returnval"` +} + +type QueryMemoryOverheadRequestType struct { + This ManagedObjectReference `xml:"_this"` + MemorySize int64 `xml:"memorySize"` + VideoRamSize int `xml:"videoRamSize,omitempty"` + NumVcpus int `xml:"numVcpus"` +} + +func init() { + t["QueryMemoryOverheadRequestType"] = reflect.TypeOf((*QueryMemoryOverheadRequestType)(nil)).Elem() +} + +type QueryMemoryOverheadResponse struct { + Returnval int64 `xml:"returnval"` +} + +type QueryMigrationDependencies QueryMigrationDependenciesRequestType + +func init() { + t["QueryMigrationDependencies"] = reflect.TypeOf((*QueryMigrationDependencies)(nil)).Elem() +} + +type QueryMigrationDependenciesRequestType struct { + This ManagedObjectReference `xml:"_this"` + PnicDevice []string `xml:"pnicDevice"` +} + +func init() { + t["QueryMigrationDependenciesRequestType"] = reflect.TypeOf((*QueryMigrationDependenciesRequestType)(nil)).Elem() +} + +type QueryMigrationDependenciesResponse struct { + Returnval IscsiMigrationDependency `xml:"returnval"` +} + +type QueryModules QueryModulesRequestType + +func init() { + t["QueryModules"] = reflect.TypeOf((*QueryModules)(nil)).Elem() +} + +type QueryModulesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryModulesRequestType"] = reflect.TypeOf((*QueryModulesRequestType)(nil)).Elem() +} + +type QueryModulesResponse struct { + Returnval []KernelModuleInfo `xml:"returnval,omitempty"` +} + +type QueryNFSUser QueryNFSUserRequestType + +func init() { + t["QueryNFSUser"] = reflect.TypeOf((*QueryNFSUser)(nil)).Elem() +} + +type QueryNFSUserRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryNFSUserRequestType"] = reflect.TypeOf((*QueryNFSUserRequestType)(nil)).Elem() +} + +type QueryNFSUserResponse struct { + Returnval *HostNasVolumeUserInfo `xml:"returnval,omitempty"` +} + +type QueryNetConfig QueryNetConfigRequestType + +func init() { + t["QueryNetConfig"] = reflect.TypeOf((*QueryNetConfig)(nil)).Elem() +} + +type QueryNetConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + NicType string `xml:"nicType"` +} + +func init() { + t["QueryNetConfigRequestType"] = reflect.TypeOf((*QueryNetConfigRequestType)(nil)).Elem() +} + +type QueryNetConfigResponse struct { + Returnval *VirtualNicManagerNetConfig `xml:"returnval,omitempty"` +} + +type QueryNetworkHint QueryNetworkHintRequestType + +func init() { + t["QueryNetworkHint"] = reflect.TypeOf((*QueryNetworkHint)(nil)).Elem() +} + +type QueryNetworkHintRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device []string `xml:"device,omitempty"` +} + +func init() { + t["QueryNetworkHintRequestType"] = reflect.TypeOf((*QueryNetworkHintRequestType)(nil)).Elem() +} + +type QueryNetworkHintResponse struct { + Returnval []PhysicalNicHintInfo `xml:"returnval,omitempty"` +} + +type QueryObjectsOnPhysicalVsanDisk QueryObjectsOnPhysicalVsanDiskRequestType + +func init() { + t["QueryObjectsOnPhysicalVsanDisk"] = reflect.TypeOf((*QueryObjectsOnPhysicalVsanDisk)(nil)).Elem() +} + +type QueryObjectsOnPhysicalVsanDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disks []string `xml:"disks"` +} + +func init() { + t["QueryObjectsOnPhysicalVsanDiskRequestType"] = reflect.TypeOf((*QueryObjectsOnPhysicalVsanDiskRequestType)(nil)).Elem() +} + +type QueryObjectsOnPhysicalVsanDiskResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryOptions QueryOptionsRequestType + +func init() { + t["QueryOptions"] = reflect.TypeOf((*QueryOptions)(nil)).Elem() +} + +type QueryOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["QueryOptionsRequestType"] = reflect.TypeOf((*QueryOptionsRequestType)(nil)).Elem() +} + +type QueryOptionsResponse struct { + Returnval []BaseOptionValue `xml:"returnval,omitempty,typeattr"` +} + +type QueryPartitionCreateDesc QueryPartitionCreateDescRequestType + +func init() { + t["QueryPartitionCreateDesc"] = reflect.TypeOf((*QueryPartitionCreateDesc)(nil)).Elem() +} + +type QueryPartitionCreateDescRequestType struct { + This ManagedObjectReference `xml:"_this"` + DiskUuid string `xml:"diskUuid"` + DiagnosticType string `xml:"diagnosticType"` +} + +func init() { + t["QueryPartitionCreateDescRequestType"] = reflect.TypeOf((*QueryPartitionCreateDescRequestType)(nil)).Elem() +} + +type QueryPartitionCreateDescResponse struct { + Returnval HostDiagnosticPartitionCreateDescription `xml:"returnval"` +} + +type QueryPartitionCreateOptions QueryPartitionCreateOptionsRequestType + +func init() { + t["QueryPartitionCreateOptions"] = reflect.TypeOf((*QueryPartitionCreateOptions)(nil)).Elem() +} + +type QueryPartitionCreateOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` +} + +func init() { + t["QueryPartitionCreateOptionsRequestType"] = reflect.TypeOf((*QueryPartitionCreateOptionsRequestType)(nil)).Elem() +} + +type QueryPartitionCreateOptionsResponse struct { + Returnval []HostDiagnosticPartitionCreateOption `xml:"returnval,omitempty"` +} + +type QueryPathSelectionPolicyOptions QueryPathSelectionPolicyOptionsRequestType + +func init() { + t["QueryPathSelectionPolicyOptions"] = reflect.TypeOf((*QueryPathSelectionPolicyOptions)(nil)).Elem() +} + +type QueryPathSelectionPolicyOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryPathSelectionPolicyOptionsRequestType"] = reflect.TypeOf((*QueryPathSelectionPolicyOptionsRequestType)(nil)).Elem() +} + +type QueryPathSelectionPolicyOptionsResponse struct { + Returnval []HostPathSelectionPolicyOption `xml:"returnval,omitempty"` +} + +type QueryPerf QueryPerfRequestType + +func init() { + t["QueryPerf"] = reflect.TypeOf((*QueryPerf)(nil)).Elem() +} + +type QueryPerfComposite QueryPerfCompositeRequestType + +func init() { + t["QueryPerfComposite"] = reflect.TypeOf((*QueryPerfComposite)(nil)).Elem() +} + +type QueryPerfCompositeRequestType struct { + This ManagedObjectReference `xml:"_this"` + QuerySpec PerfQuerySpec `xml:"querySpec"` +} + +func init() { + t["QueryPerfCompositeRequestType"] = reflect.TypeOf((*QueryPerfCompositeRequestType)(nil)).Elem() +} + +type QueryPerfCompositeResponse struct { + Returnval PerfCompositeMetric `xml:"returnval"` +} + +type QueryPerfCounter QueryPerfCounterRequestType + +func init() { + t["QueryPerfCounter"] = reflect.TypeOf((*QueryPerfCounter)(nil)).Elem() +} + +type QueryPerfCounterByLevel QueryPerfCounterByLevelRequestType + +func init() { + t["QueryPerfCounterByLevel"] = reflect.TypeOf((*QueryPerfCounterByLevel)(nil)).Elem() +} + +type QueryPerfCounterByLevelRequestType struct { + This ManagedObjectReference `xml:"_this"` + Level int `xml:"level"` +} + +func init() { + t["QueryPerfCounterByLevelRequestType"] = reflect.TypeOf((*QueryPerfCounterByLevelRequestType)(nil)).Elem() +} + +type QueryPerfCounterByLevelResponse struct { + Returnval []PerfCounterInfo `xml:"returnval"` +} + +type QueryPerfCounterRequestType struct { + This ManagedObjectReference `xml:"_this"` + CounterId []int `xml:"counterId"` +} + +func init() { + t["QueryPerfCounterRequestType"] = reflect.TypeOf((*QueryPerfCounterRequestType)(nil)).Elem() +} + +type QueryPerfCounterResponse struct { + Returnval []PerfCounterInfo `xml:"returnval,omitempty"` +} + +type QueryPerfProviderSummary QueryPerfProviderSummaryRequestType + +func init() { + t["QueryPerfProviderSummary"] = reflect.TypeOf((*QueryPerfProviderSummary)(nil)).Elem() +} + +type QueryPerfProviderSummaryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["QueryPerfProviderSummaryRequestType"] = reflect.TypeOf((*QueryPerfProviderSummaryRequestType)(nil)).Elem() +} + +type QueryPerfProviderSummaryResponse struct { + Returnval PerfProviderSummary `xml:"returnval"` +} + +type QueryPerfRequestType struct { + This ManagedObjectReference `xml:"_this"` + QuerySpec []PerfQuerySpec `xml:"querySpec"` +} + +func init() { + t["QueryPerfRequestType"] = reflect.TypeOf((*QueryPerfRequestType)(nil)).Elem() +} + +type QueryPerfResponse struct { + Returnval []BasePerfEntityMetricBase `xml:"returnval,omitempty,typeattr"` +} + +type QueryPhysicalVsanDisks QueryPhysicalVsanDisksRequestType + +func init() { + t["QueryPhysicalVsanDisks"] = reflect.TypeOf((*QueryPhysicalVsanDisks)(nil)).Elem() +} + +type QueryPhysicalVsanDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Props []string `xml:"props,omitempty"` +} + +func init() { + t["QueryPhysicalVsanDisksRequestType"] = reflect.TypeOf((*QueryPhysicalVsanDisksRequestType)(nil)).Elem() +} + +type QueryPhysicalVsanDisksResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryPnicStatus QueryPnicStatusRequestType + +func init() { + t["QueryPnicStatus"] = reflect.TypeOf((*QueryPnicStatus)(nil)).Elem() +} + +type QueryPnicStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + PnicDevice string `xml:"pnicDevice"` +} + +func init() { + t["QueryPnicStatusRequestType"] = reflect.TypeOf((*QueryPnicStatusRequestType)(nil)).Elem() +} + +type QueryPnicStatusResponse struct { + Returnval IscsiStatus `xml:"returnval"` +} + +type QueryPolicyMetadata QueryPolicyMetadataRequestType + +func init() { + t["QueryPolicyMetadata"] = reflect.TypeOf((*QueryPolicyMetadata)(nil)).Elem() +} + +type QueryPolicyMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + PolicyName []string `xml:"policyName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryPolicyMetadataRequestType"] = reflect.TypeOf((*QueryPolicyMetadataRequestType)(nil)).Elem() +} + +type QueryPolicyMetadataResponse struct { + Returnval []ProfilePolicyMetadata `xml:"returnval,omitempty"` +} + +type QueryProfileStructure QueryProfileStructureRequestType + +func init() { + t["QueryProfileStructure"] = reflect.TypeOf((*QueryProfileStructure)(nil)).Elem() +} + +type QueryProfileStructureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryProfileStructureRequestType"] = reflect.TypeOf((*QueryProfileStructureRequestType)(nil)).Elem() +} + +type QueryProfileStructureResponse struct { + Returnval ProfileProfileStructure `xml:"returnval"` +} + +type QueryResourceConfigOption QueryResourceConfigOptionRequestType + +func init() { + t["QueryResourceConfigOption"] = reflect.TypeOf((*QueryResourceConfigOption)(nil)).Elem() +} + +type QueryResourceConfigOptionRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryResourceConfigOptionRequestType"] = reflect.TypeOf((*QueryResourceConfigOptionRequestType)(nil)).Elem() +} + +type QueryResourceConfigOptionResponse struct { + Returnval ResourceConfigOption `xml:"returnval"` +} + +type QueryServiceList QueryServiceListRequestType + +func init() { + t["QueryServiceList"] = reflect.TypeOf((*QueryServiceList)(nil)).Elem() +} + +type QueryServiceListRequestType struct { + This ManagedObjectReference `xml:"_this"` + ServiceName string `xml:"serviceName,omitempty"` + Location []string `xml:"location,omitempty"` +} + +func init() { + t["QueryServiceListRequestType"] = reflect.TypeOf((*QueryServiceListRequestType)(nil)).Elem() +} + +type QueryServiceListResponse struct { + Returnval []ServiceManagerServiceInfo `xml:"returnval,omitempty"` +} + +type QueryStorageArrayTypePolicyOptions QueryStorageArrayTypePolicyOptionsRequestType + +func init() { + t["QueryStorageArrayTypePolicyOptions"] = reflect.TypeOf((*QueryStorageArrayTypePolicyOptions)(nil)).Elem() +} + +type QueryStorageArrayTypePolicyOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryStorageArrayTypePolicyOptionsRequestType"] = reflect.TypeOf((*QueryStorageArrayTypePolicyOptionsRequestType)(nil)).Elem() +} + +type QueryStorageArrayTypePolicyOptionsResponse struct { + Returnval []HostStorageArrayTypePolicyOption `xml:"returnval,omitempty"` +} + +type QuerySupportedFeatures QuerySupportedFeaturesRequestType + +func init() { + t["QuerySupportedFeatures"] = reflect.TypeOf((*QuerySupportedFeatures)(nil)).Elem() +} + +type QuerySupportedFeaturesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QuerySupportedFeaturesRequestType"] = reflect.TypeOf((*QuerySupportedFeaturesRequestType)(nil)).Elem() +} + +type QuerySupportedFeaturesResponse struct { + Returnval []LicenseFeatureInfo `xml:"returnval,omitempty"` +} + +type QuerySyncingVsanObjects QuerySyncingVsanObjectsRequestType + +func init() { + t["QuerySyncingVsanObjects"] = reflect.TypeOf((*QuerySyncingVsanObjects)(nil)).Elem() +} + +type QuerySyncingVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids,omitempty"` +} + +func init() { + t["QuerySyncingVsanObjectsRequestType"] = reflect.TypeOf((*QuerySyncingVsanObjectsRequestType)(nil)).Elem() +} + +type QuerySyncingVsanObjectsResponse struct { + Returnval string `xml:"returnval"` +} + +type QuerySystemUsers QuerySystemUsersRequestType + +func init() { + t["QuerySystemUsers"] = reflect.TypeOf((*QuerySystemUsers)(nil)).Elem() +} + +type QuerySystemUsersRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QuerySystemUsersRequestType"] = reflect.TypeOf((*QuerySystemUsersRequestType)(nil)).Elem() +} + +type QuerySystemUsersResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryTargetCapabilities QueryTargetCapabilitiesRequestType + +func init() { + t["QueryTargetCapabilities"] = reflect.TypeOf((*QueryTargetCapabilities)(nil)).Elem() +} + +type QueryTargetCapabilitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryTargetCapabilitiesRequestType"] = reflect.TypeOf((*QueryTargetCapabilitiesRequestType)(nil)).Elem() +} + +type QueryTargetCapabilitiesResponse struct { + Returnval *HostCapability `xml:"returnval,omitempty"` +} + +type QueryTpmAttestationReport QueryTpmAttestationReportRequestType + +func init() { + t["QueryTpmAttestationReport"] = reflect.TypeOf((*QueryTpmAttestationReport)(nil)).Elem() +} + +type QueryTpmAttestationReportRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryTpmAttestationReportRequestType"] = reflect.TypeOf((*QueryTpmAttestationReportRequestType)(nil)).Elem() +} + +type QueryTpmAttestationReportResponse struct { + Returnval *HostTpmAttestationReport `xml:"returnval,omitempty"` +} + +type QueryUnownedFiles QueryUnownedFilesRequestType + +func init() { + t["QueryUnownedFiles"] = reflect.TypeOf((*QueryUnownedFiles)(nil)).Elem() +} + +type QueryUnownedFilesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUnownedFilesRequestType"] = reflect.TypeOf((*QueryUnownedFilesRequestType)(nil)).Elem() +} + +type QueryUnownedFilesResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryUnresolvedVmfsVolume QueryUnresolvedVmfsVolumeRequestType + +func init() { + t["QueryUnresolvedVmfsVolume"] = reflect.TypeOf((*QueryUnresolvedVmfsVolume)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUnresolvedVmfsVolumeRequestType"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumeRequestType)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumeResponse struct { + Returnval []HostUnresolvedVmfsVolume `xml:"returnval,omitempty"` +} + +type QueryUnresolvedVmfsVolumes QueryUnresolvedVmfsVolumesRequestType + +func init() { + t["QueryUnresolvedVmfsVolumes"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumes)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUnresolvedVmfsVolumesRequestType"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumesRequestType)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumesResponse struct { + Returnval []HostUnresolvedVmfsVolume `xml:"returnval,omitempty"` +} + +type QueryUsedVlanIdInDvs QueryUsedVlanIdInDvsRequestType + +func init() { + t["QueryUsedVlanIdInDvs"] = reflect.TypeOf((*QueryUsedVlanIdInDvs)(nil)).Elem() +} + +type QueryUsedVlanIdInDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUsedVlanIdInDvsRequestType"] = reflect.TypeOf((*QueryUsedVlanIdInDvsRequestType)(nil)).Elem() +} + +type QueryUsedVlanIdInDvsResponse struct { + Returnval []int `xml:"returnval,omitempty"` +} + +type QueryVMotionCompatibility QueryVMotionCompatibilityRequestType + +func init() { + t["QueryVMotionCompatibility"] = reflect.TypeOf((*QueryVMotionCompatibility)(nil)).Elem() +} + +type QueryVMotionCompatibilityExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm []ManagedObjectReference `xml:"vm"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["QueryVMotionCompatibilityExRequestType"] = reflect.TypeOf((*QueryVMotionCompatibilityExRequestType)(nil)).Elem() +} + +type QueryVMotionCompatibilityEx_Task QueryVMotionCompatibilityExRequestType + +func init() { + t["QueryVMotionCompatibilityEx_Task"] = reflect.TypeOf((*QueryVMotionCompatibilityEx_Task)(nil)).Elem() +} + +type QueryVMotionCompatibilityEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type QueryVMotionCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host []ManagedObjectReference `xml:"host"` + Compatibility []string `xml:"compatibility,omitempty"` +} + +func init() { + t["QueryVMotionCompatibilityRequestType"] = reflect.TypeOf((*QueryVMotionCompatibilityRequestType)(nil)).Elem() +} + +type QueryVMotionCompatibilityResponse struct { + Returnval []HostVMotionCompatibility `xml:"returnval,omitempty"` +} + +type QueryVirtualDiskFragmentation QueryVirtualDiskFragmentationRequestType + +func init() { + t["QueryVirtualDiskFragmentation"] = reflect.TypeOf((*QueryVirtualDiskFragmentation)(nil)).Elem() +} + +type QueryVirtualDiskFragmentationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["QueryVirtualDiskFragmentationRequestType"] = reflect.TypeOf((*QueryVirtualDiskFragmentationRequestType)(nil)).Elem() +} + +type QueryVirtualDiskFragmentationResponse struct { + Returnval int `xml:"returnval"` +} + +type QueryVirtualDiskGeometry QueryVirtualDiskGeometryRequestType + +func init() { + t["QueryVirtualDiskGeometry"] = reflect.TypeOf((*QueryVirtualDiskGeometry)(nil)).Elem() +} + +type QueryVirtualDiskGeometryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["QueryVirtualDiskGeometryRequestType"] = reflect.TypeOf((*QueryVirtualDiskGeometryRequestType)(nil)).Elem() +} + +type QueryVirtualDiskGeometryResponse struct { + Returnval HostDiskDimensionsChs `xml:"returnval"` +} + +type QueryVirtualDiskUuid QueryVirtualDiskUuidRequestType + +func init() { + t["QueryVirtualDiskUuid"] = reflect.TypeOf((*QueryVirtualDiskUuid)(nil)).Elem() +} + +type QueryVirtualDiskUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["QueryVirtualDiskUuidRequestType"] = reflect.TypeOf((*QueryVirtualDiskUuidRequestType)(nil)).Elem() +} + +type QueryVirtualDiskUuidResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryVmfsDatastoreCreateOptions QueryVmfsDatastoreCreateOptionsRequestType + +func init() { + t["QueryVmfsDatastoreCreateOptions"] = reflect.TypeOf((*QueryVmfsDatastoreCreateOptions)(nil)).Elem() +} + +type QueryVmfsDatastoreCreateOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath string `xml:"devicePath"` + VmfsMajorVersion int `xml:"vmfsMajorVersion,omitempty"` +} + +func init() { + t["QueryVmfsDatastoreCreateOptionsRequestType"] = reflect.TypeOf((*QueryVmfsDatastoreCreateOptionsRequestType)(nil)).Elem() +} + +type QueryVmfsDatastoreCreateOptionsResponse struct { + Returnval []VmfsDatastoreOption `xml:"returnval,omitempty"` +} + +type QueryVmfsDatastoreExpandOptions QueryVmfsDatastoreExpandOptionsRequestType + +func init() { + t["QueryVmfsDatastoreExpandOptions"] = reflect.TypeOf((*QueryVmfsDatastoreExpandOptions)(nil)).Elem() +} + +type QueryVmfsDatastoreExpandOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["QueryVmfsDatastoreExpandOptionsRequestType"] = reflect.TypeOf((*QueryVmfsDatastoreExpandOptionsRequestType)(nil)).Elem() +} + +type QueryVmfsDatastoreExpandOptionsResponse struct { + Returnval []VmfsDatastoreOption `xml:"returnval,omitempty"` +} + +type QueryVmfsDatastoreExtendOptions QueryVmfsDatastoreExtendOptionsRequestType + +func init() { + t["QueryVmfsDatastoreExtendOptions"] = reflect.TypeOf((*QueryVmfsDatastoreExtendOptions)(nil)).Elem() +} + +type QueryVmfsDatastoreExtendOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + DevicePath string `xml:"devicePath"` + SuppressExpandCandidates *bool `xml:"suppressExpandCandidates"` +} + +func init() { + t["QueryVmfsDatastoreExtendOptionsRequestType"] = reflect.TypeOf((*QueryVmfsDatastoreExtendOptionsRequestType)(nil)).Elem() +} + +type QueryVmfsDatastoreExtendOptionsResponse struct { + Returnval []VmfsDatastoreOption `xml:"returnval,omitempty"` +} + +type QueryVnicStatus QueryVnicStatusRequestType + +func init() { + t["QueryVnicStatus"] = reflect.TypeOf((*QueryVnicStatus)(nil)).Elem() +} + +type QueryVnicStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["QueryVnicStatusRequestType"] = reflect.TypeOf((*QueryVnicStatusRequestType)(nil)).Elem() +} + +type QueryVnicStatusResponse struct { + Returnval IscsiStatus `xml:"returnval"` +} + +type QueryVsanObjectUuidsByFilter QueryVsanObjectUuidsByFilterRequestType + +func init() { + t["QueryVsanObjectUuidsByFilter"] = reflect.TypeOf((*QueryVsanObjectUuidsByFilter)(nil)).Elem() +} + +type QueryVsanObjectUuidsByFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids,omitempty"` + Limit int `xml:"limit,omitempty"` + Version int `xml:"version,omitempty"` +} + +func init() { + t["QueryVsanObjectUuidsByFilterRequestType"] = reflect.TypeOf((*QueryVsanObjectUuidsByFilterRequestType)(nil)).Elem() +} + +type QueryVsanObjectUuidsByFilterResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryVsanObjects QueryVsanObjectsRequestType + +func init() { + t["QueryVsanObjects"] = reflect.TypeOf((*QueryVsanObjects)(nil)).Elem() +} + +type QueryVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids,omitempty"` +} + +func init() { + t["QueryVsanObjectsRequestType"] = reflect.TypeOf((*QueryVsanObjectsRequestType)(nil)).Elem() +} + +type QueryVsanObjectsResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryVsanStatistics QueryVsanStatisticsRequestType + +func init() { + t["QueryVsanStatistics"] = reflect.TypeOf((*QueryVsanStatistics)(nil)).Elem() +} + +type QueryVsanStatisticsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Labels []string `xml:"labels"` +} + +func init() { + t["QueryVsanStatisticsRequestType"] = reflect.TypeOf((*QueryVsanStatisticsRequestType)(nil)).Elem() +} + +type QueryVsanStatisticsResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryVsanUpgradeStatus QueryVsanUpgradeStatusRequestType + +func init() { + t["QueryVsanUpgradeStatus"] = reflect.TypeOf((*QueryVsanUpgradeStatus)(nil)).Elem() +} + +type QueryVsanUpgradeStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` +} + +func init() { + t["QueryVsanUpgradeStatusRequestType"] = reflect.TypeOf((*QueryVsanUpgradeStatusRequestType)(nil)).Elem() +} + +type QueryVsanUpgradeStatusResponse struct { + Returnval VsanUpgradeSystemUpgradeStatus `xml:"returnval"` +} + +type QuestionPending struct { + InvalidState + + Text string `xml:"text"` +} + +func init() { + t["QuestionPending"] = reflect.TypeOf((*QuestionPending)(nil)).Elem() +} + +type QuestionPendingFault QuestionPending + +func init() { + t["QuestionPendingFault"] = reflect.TypeOf((*QuestionPendingFault)(nil)).Elem() +} + +type QuiesceDatastoreIOForHAFailed struct { + ResourceInUse + + Host ManagedObjectReference `xml:"host"` + HostName string `xml:"hostName"` + Ds ManagedObjectReference `xml:"ds"` + DsName string `xml:"dsName"` +} + +func init() { + t["QuiesceDatastoreIOForHAFailed"] = reflect.TypeOf((*QuiesceDatastoreIOForHAFailed)(nil)).Elem() +} + +type QuiesceDatastoreIOForHAFailedFault QuiesceDatastoreIOForHAFailed + +func init() { + t["QuiesceDatastoreIOForHAFailedFault"] = reflect.TypeOf((*QuiesceDatastoreIOForHAFailedFault)(nil)).Elem() +} + +type RDMConversionNotSupported struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["RDMConversionNotSupported"] = reflect.TypeOf((*RDMConversionNotSupported)(nil)).Elem() +} + +type RDMConversionNotSupportedFault RDMConversionNotSupported + +func init() { + t["RDMConversionNotSupportedFault"] = reflect.TypeOf((*RDMConversionNotSupportedFault)(nil)).Elem() +} + +type RDMNotPreserved struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["RDMNotPreserved"] = reflect.TypeOf((*RDMNotPreserved)(nil)).Elem() +} + +type RDMNotPreservedFault RDMNotPreserved + +func init() { + t["RDMNotPreservedFault"] = reflect.TypeOf((*RDMNotPreservedFault)(nil)).Elem() +} + +type RDMNotSupported struct { + DeviceNotSupported +} + +func init() { + t["RDMNotSupported"] = reflect.TypeOf((*RDMNotSupported)(nil)).Elem() +} + +type RDMNotSupportedFault BaseRDMNotSupported + +func init() { + t["RDMNotSupportedFault"] = reflect.TypeOf((*RDMNotSupportedFault)(nil)).Elem() +} + +type RDMNotSupportedOnDatastore struct { + VmConfigFault + + Device string `xml:"device"` + Datastore ManagedObjectReference `xml:"datastore"` + DatastoreName string `xml:"datastoreName"` +} + +func init() { + t["RDMNotSupportedOnDatastore"] = reflect.TypeOf((*RDMNotSupportedOnDatastore)(nil)).Elem() +} + +type RDMNotSupportedOnDatastoreFault RDMNotSupportedOnDatastore + +func init() { + t["RDMNotSupportedOnDatastoreFault"] = reflect.TypeOf((*RDMNotSupportedOnDatastoreFault)(nil)).Elem() +} + +type RDMPointsToInaccessibleDisk struct { + CannotAccessVmDisk +} + +func init() { + t["RDMPointsToInaccessibleDisk"] = reflect.TypeOf((*RDMPointsToInaccessibleDisk)(nil)).Elem() +} + +type RDMPointsToInaccessibleDiskFault RDMPointsToInaccessibleDisk + +func init() { + t["RDMPointsToInaccessibleDiskFault"] = reflect.TypeOf((*RDMPointsToInaccessibleDiskFault)(nil)).Elem() +} + +type RawDiskNotSupported struct { + DeviceNotSupported +} + +func init() { + t["RawDiskNotSupported"] = reflect.TypeOf((*RawDiskNotSupported)(nil)).Elem() +} + +type RawDiskNotSupportedFault RawDiskNotSupported + +func init() { + t["RawDiskNotSupportedFault"] = reflect.TypeOf((*RawDiskNotSupportedFault)(nil)).Elem() +} + +type ReadEnvironmentVariableInGuest ReadEnvironmentVariableInGuestRequestType + +func init() { + t["ReadEnvironmentVariableInGuest"] = reflect.TypeOf((*ReadEnvironmentVariableInGuest)(nil)).Elem() +} + +type ReadEnvironmentVariableInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Names []string `xml:"names,omitempty"` +} + +func init() { + t["ReadEnvironmentVariableInGuestRequestType"] = reflect.TypeOf((*ReadEnvironmentVariableInGuestRequestType)(nil)).Elem() +} + +type ReadEnvironmentVariableInGuestResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type ReadHostResourcePoolTreeFailed struct { + HostConnectFault +} + +func init() { + t["ReadHostResourcePoolTreeFailed"] = reflect.TypeOf((*ReadHostResourcePoolTreeFailed)(nil)).Elem() +} + +type ReadHostResourcePoolTreeFailedFault ReadHostResourcePoolTreeFailed + +func init() { + t["ReadHostResourcePoolTreeFailedFault"] = reflect.TypeOf((*ReadHostResourcePoolTreeFailedFault)(nil)).Elem() +} + +type ReadNextEvents ReadNextEventsRequestType + +func init() { + t["ReadNextEvents"] = reflect.TypeOf((*ReadNextEvents)(nil)).Elem() +} + +type ReadNextEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int `xml:"maxCount"` +} + +func init() { + t["ReadNextEventsRequestType"] = reflect.TypeOf((*ReadNextEventsRequestType)(nil)).Elem() +} + +type ReadNextEventsResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type ReadNextTasks ReadNextTasksRequestType + +func init() { + t["ReadNextTasks"] = reflect.TypeOf((*ReadNextTasks)(nil)).Elem() +} + +type ReadNextTasksRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int `xml:"maxCount"` +} + +func init() { + t["ReadNextTasksRequestType"] = reflect.TypeOf((*ReadNextTasksRequestType)(nil)).Elem() +} + +type ReadNextTasksResponse struct { + Returnval []TaskInfo `xml:"returnval,omitempty"` +} + +type ReadOnlyDisksWithLegacyDestination struct { + MigrationFault + + RoDiskCount int `xml:"roDiskCount"` + TimeoutDanger bool `xml:"timeoutDanger"` +} + +func init() { + t["ReadOnlyDisksWithLegacyDestination"] = reflect.TypeOf((*ReadOnlyDisksWithLegacyDestination)(nil)).Elem() +} + +type ReadOnlyDisksWithLegacyDestinationFault ReadOnlyDisksWithLegacyDestination + +func init() { + t["ReadOnlyDisksWithLegacyDestinationFault"] = reflect.TypeOf((*ReadOnlyDisksWithLegacyDestinationFault)(nil)).Elem() +} + +type ReadPreviousEvents ReadPreviousEventsRequestType + +func init() { + t["ReadPreviousEvents"] = reflect.TypeOf((*ReadPreviousEvents)(nil)).Elem() +} + +type ReadPreviousEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int `xml:"maxCount"` +} + +func init() { + t["ReadPreviousEventsRequestType"] = reflect.TypeOf((*ReadPreviousEventsRequestType)(nil)).Elem() +} + +type ReadPreviousEventsResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type ReadPreviousTasks ReadPreviousTasksRequestType + +func init() { + t["ReadPreviousTasks"] = reflect.TypeOf((*ReadPreviousTasks)(nil)).Elem() +} + +type ReadPreviousTasksRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int `xml:"maxCount"` +} + +func init() { + t["ReadPreviousTasksRequestType"] = reflect.TypeOf((*ReadPreviousTasksRequestType)(nil)).Elem() +} + +type ReadPreviousTasksResponse struct { + Returnval []TaskInfo `xml:"returnval,omitempty"` +} + +type RebootGuest RebootGuestRequestType + +func init() { + t["RebootGuest"] = reflect.TypeOf((*RebootGuest)(nil)).Elem() +} + +type RebootGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RebootGuestRequestType"] = reflect.TypeOf((*RebootGuestRequestType)(nil)).Elem() +} + +type RebootGuestResponse struct { +} + +type RebootHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["RebootHostRequestType"] = reflect.TypeOf((*RebootHostRequestType)(nil)).Elem() +} + +type RebootHost_Task RebootHostRequestType + +func init() { + t["RebootHost_Task"] = reflect.TypeOf((*RebootHost_Task)(nil)).Elem() +} + +type RebootHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RebootRequired struct { + VimFault + + Patch string `xml:"patch,omitempty"` +} + +func init() { + t["RebootRequired"] = reflect.TypeOf((*RebootRequired)(nil)).Elem() +} + +type RebootRequiredFault RebootRequired + +func init() { + t["RebootRequiredFault"] = reflect.TypeOf((*RebootRequiredFault)(nil)).Elem() +} + +type RecommendDatastores RecommendDatastoresRequestType + +func init() { + t["RecommendDatastores"] = reflect.TypeOf((*RecommendDatastores)(nil)).Elem() +} + +type RecommendDatastoresRequestType struct { + This ManagedObjectReference `xml:"_this"` + StorageSpec StoragePlacementSpec `xml:"storageSpec"` +} + +func init() { + t["RecommendDatastoresRequestType"] = reflect.TypeOf((*RecommendDatastoresRequestType)(nil)).Elem() +} + +type RecommendDatastoresResponse struct { + Returnval StoragePlacementResult `xml:"returnval"` +} + +type RecommendHostsForVm RecommendHostsForVmRequestType + +func init() { + t["RecommendHostsForVm"] = reflect.TypeOf((*RecommendHostsForVm)(nil)).Elem() +} + +type RecommendHostsForVmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` +} + +func init() { + t["RecommendHostsForVmRequestType"] = reflect.TypeOf((*RecommendHostsForVmRequestType)(nil)).Elem() +} + +type RecommendHostsForVmResponse struct { + Returnval []ClusterHostRecommendation `xml:"returnval,omitempty"` +} + +type RecommissionVsanNodeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RecommissionVsanNodeRequestType"] = reflect.TypeOf((*RecommissionVsanNodeRequestType)(nil)).Elem() +} + +type RecommissionVsanNode_Task RecommissionVsanNodeRequestType + +func init() { + t["RecommissionVsanNode_Task"] = reflect.TypeOf((*RecommissionVsanNode_Task)(nil)).Elem() +} + +type RecommissionVsanNode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineConfigSpec `xml:"spec"` +} + +func init() { + t["ReconfigVMRequestType"] = reflect.TypeOf((*ReconfigVMRequestType)(nil)).Elem() +} + +type ReconfigVM_Task ReconfigVMRequestType + +func init() { + t["ReconfigVM_Task"] = reflect.TypeOf((*ReconfigVM_Task)(nil)).Elem() +} + +type ReconfigVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigurationSatisfiable ReconfigurationSatisfiableRequestType + +func init() { + t["ReconfigurationSatisfiable"] = reflect.TypeOf((*ReconfigurationSatisfiable)(nil)).Elem() +} + +type ReconfigurationSatisfiableRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pcbs []VsanPolicyChangeBatch `xml:"pcbs"` + IgnoreSatisfiability *bool `xml:"ignoreSatisfiability"` +} + +func init() { + t["ReconfigurationSatisfiableRequestType"] = reflect.TypeOf((*ReconfigurationSatisfiableRequestType)(nil)).Elem() +} + +type ReconfigurationSatisfiableResponse struct { + Returnval []VsanPolicySatisfiability `xml:"returnval"` +} + +type ReconfigureAlarm ReconfigureAlarmRequestType + +func init() { + t["ReconfigureAlarm"] = reflect.TypeOf((*ReconfigureAlarm)(nil)).Elem() +} + +type ReconfigureAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseAlarmSpec `xml:"spec,typeattr"` +} + +func init() { + t["ReconfigureAlarmRequestType"] = reflect.TypeOf((*ReconfigureAlarmRequestType)(nil)).Elem() +} + +type ReconfigureAlarmResponse struct { +} + +type ReconfigureAutostart ReconfigureAutostartRequestType + +func init() { + t["ReconfigureAutostart"] = reflect.TypeOf((*ReconfigureAutostart)(nil)).Elem() +} + +type ReconfigureAutostartRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostAutoStartManagerConfig `xml:"spec"` +} + +func init() { + t["ReconfigureAutostartRequestType"] = reflect.TypeOf((*ReconfigureAutostartRequestType)(nil)).Elem() +} + +type ReconfigureAutostartResponse struct { +} + +type ReconfigureClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec ClusterConfigSpec `xml:"spec"` + Modify bool `xml:"modify"` +} + +func init() { + t["ReconfigureClusterRequestType"] = reflect.TypeOf((*ReconfigureClusterRequestType)(nil)).Elem() +} + +type ReconfigureCluster_Task ReconfigureClusterRequestType + +func init() { + t["ReconfigureCluster_Task"] = reflect.TypeOf((*ReconfigureCluster_Task)(nil)).Elem() +} + +type ReconfigureCluster_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureComputeResourceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseComputeResourceConfigSpec `xml:"spec,typeattr"` + Modify bool `xml:"modify"` +} + +func init() { + t["ReconfigureComputeResourceRequestType"] = reflect.TypeOf((*ReconfigureComputeResourceRequestType)(nil)).Elem() +} + +type ReconfigureComputeResource_Task ReconfigureComputeResourceRequestType + +func init() { + t["ReconfigureComputeResource_Task"] = reflect.TypeOf((*ReconfigureComputeResource_Task)(nil)).Elem() +} + +type ReconfigureComputeResource_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDVPortRequestType struct { + This ManagedObjectReference `xml:"_this"` + Port []DVPortConfigSpec `xml:"port"` +} + +func init() { + t["ReconfigureDVPortRequestType"] = reflect.TypeOf((*ReconfigureDVPortRequestType)(nil)).Elem() +} + +type ReconfigureDVPort_Task ReconfigureDVPortRequestType + +func init() { + t["ReconfigureDVPort_Task"] = reflect.TypeOf((*ReconfigureDVPort_Task)(nil)).Elem() +} + +type ReconfigureDVPort_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDVPortgroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DVPortgroupConfigSpec `xml:"spec"` +} + +func init() { + t["ReconfigureDVPortgroupRequestType"] = reflect.TypeOf((*ReconfigureDVPortgroupRequestType)(nil)).Elem() +} + +type ReconfigureDVPortgroup_Task ReconfigureDVPortgroupRequestType + +func init() { + t["ReconfigureDVPortgroup_Task"] = reflect.TypeOf((*ReconfigureDVPortgroup_Task)(nil)).Elem() +} + +type ReconfigureDVPortgroup_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDatacenterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DatacenterConfigSpec `xml:"spec"` + Modify bool `xml:"modify"` +} + +func init() { + t["ReconfigureDatacenterRequestType"] = reflect.TypeOf((*ReconfigureDatacenterRequestType)(nil)).Elem() +} + +type ReconfigureDatacenter_Task ReconfigureDatacenterRequestType + +func init() { + t["ReconfigureDatacenter_Task"] = reflect.TypeOf((*ReconfigureDatacenter_Task)(nil)).Elem() +} + +type ReconfigureDatacenter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDomObject ReconfigureDomObjectRequestType + +func init() { + t["ReconfigureDomObject"] = reflect.TypeOf((*ReconfigureDomObject)(nil)).Elem() +} + +type ReconfigureDomObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuid string `xml:"uuid"` + Policy string `xml:"policy"` +} + +func init() { + t["ReconfigureDomObjectRequestType"] = reflect.TypeOf((*ReconfigureDomObjectRequestType)(nil)).Elem() +} + +type ReconfigureDomObjectResponse struct { +} + +type ReconfigureDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseDVSConfigSpec `xml:"spec,typeattr"` +} + +func init() { + t["ReconfigureDvsRequestType"] = reflect.TypeOf((*ReconfigureDvsRequestType)(nil)).Elem() +} + +type ReconfigureDvs_Task ReconfigureDvsRequestType + +func init() { + t["ReconfigureDvs_Task"] = reflect.TypeOf((*ReconfigureDvs_Task)(nil)).Elem() +} + +type ReconfigureDvs_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureHostForDASRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ReconfigureHostForDASRequestType"] = reflect.TypeOf((*ReconfigureHostForDASRequestType)(nil)).Elem() +} + +type ReconfigureHostForDAS_Task ReconfigureHostForDASRequestType + +func init() { + t["ReconfigureHostForDAS_Task"] = reflect.TypeOf((*ReconfigureHostForDAS_Task)(nil)).Elem() +} + +type ReconfigureHostForDAS_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureScheduledTask ReconfigureScheduledTaskRequestType + +func init() { + t["ReconfigureScheduledTask"] = reflect.TypeOf((*ReconfigureScheduledTask)(nil)).Elem() +} + +type ReconfigureScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseScheduledTaskSpec `xml:"spec,typeattr"` +} + +func init() { + t["ReconfigureScheduledTaskRequestType"] = reflect.TypeOf((*ReconfigureScheduledTaskRequestType)(nil)).Elem() +} + +type ReconfigureScheduledTaskResponse struct { +} + +type ReconfigureServiceConsoleReservation ReconfigureServiceConsoleReservationRequestType + +func init() { + t["ReconfigureServiceConsoleReservation"] = reflect.TypeOf((*ReconfigureServiceConsoleReservation)(nil)).Elem() +} + +type ReconfigureServiceConsoleReservationRequestType struct { + This ManagedObjectReference `xml:"_this"` + CfgBytes int64 `xml:"cfgBytes"` +} + +func init() { + t["ReconfigureServiceConsoleReservationRequestType"] = reflect.TypeOf((*ReconfigureServiceConsoleReservationRequestType)(nil)).Elem() +} + +type ReconfigureServiceConsoleReservationResponse struct { +} + +type ReconfigureSnmpAgent ReconfigureSnmpAgentRequestType + +func init() { + t["ReconfigureSnmpAgent"] = reflect.TypeOf((*ReconfigureSnmpAgent)(nil)).Elem() +} + +type ReconfigureSnmpAgentRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostSnmpConfigSpec `xml:"spec"` +} + +func init() { + t["ReconfigureSnmpAgentRequestType"] = reflect.TypeOf((*ReconfigureSnmpAgentRequestType)(nil)).Elem() +} + +type ReconfigureSnmpAgentResponse struct { +} + +type ReconfigureVirtualMachineReservation ReconfigureVirtualMachineReservationRequestType + +func init() { + t["ReconfigureVirtualMachineReservation"] = reflect.TypeOf((*ReconfigureVirtualMachineReservation)(nil)).Elem() +} + +type ReconfigureVirtualMachineReservationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineMemoryReservationSpec `xml:"spec"` +} + +func init() { + t["ReconfigureVirtualMachineReservationRequestType"] = reflect.TypeOf((*ReconfigureVirtualMachineReservationRequestType)(nil)).Elem() +} + +type ReconfigureVirtualMachineReservationResponse struct { +} + +type ReconnectHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + CnxSpec *HostConnectSpec `xml:"cnxSpec,omitempty"` + ReconnectSpec *HostSystemReconnectSpec `xml:"reconnectSpec,omitempty"` +} + +func init() { + t["ReconnectHostRequestType"] = reflect.TypeOf((*ReconnectHostRequestType)(nil)).Elem() +} + +type ReconnectHost_Task ReconnectHostRequestType + +func init() { + t["ReconnectHost_Task"] = reflect.TypeOf((*ReconnectHost_Task)(nil)).Elem() +} + +type ReconnectHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RecordReplayDisabled struct { + VimFault +} + +func init() { + t["RecordReplayDisabled"] = reflect.TypeOf((*RecordReplayDisabled)(nil)).Elem() +} + +type RecordReplayDisabledFault RecordReplayDisabled + +func init() { + t["RecordReplayDisabledFault"] = reflect.TypeOf((*RecordReplayDisabledFault)(nil)).Elem() +} + +type RecoveryEvent struct { + DvsEvent + + HostName string `xml:"hostName"` + PortKey string `xml:"portKey"` + DvsUuid string `xml:"dvsUuid,omitempty"` + Vnic string `xml:"vnic,omitempty"` +} + +func init() { + t["RecoveryEvent"] = reflect.TypeOf((*RecoveryEvent)(nil)).Elem() +} + +type RectifyDvsHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` +} + +func init() { + t["RectifyDvsHostRequestType"] = reflect.TypeOf((*RectifyDvsHostRequestType)(nil)).Elem() +} + +type RectifyDvsHost_Task RectifyDvsHostRequestType + +func init() { + t["RectifyDvsHost_Task"] = reflect.TypeOf((*RectifyDvsHost_Task)(nil)).Elem() +} + +type RectifyDvsHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RectifyDvsOnHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["RectifyDvsOnHostRequestType"] = reflect.TypeOf((*RectifyDvsOnHostRequestType)(nil)).Elem() +} + +type RectifyDvsOnHost_Task RectifyDvsOnHostRequestType + +func init() { + t["RectifyDvsOnHost_Task"] = reflect.TypeOf((*RectifyDvsOnHost_Task)(nil)).Elem() +} + +type RectifyDvsOnHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RecurrentTaskScheduler struct { + TaskScheduler + + Interval int `xml:"interval"` +} + +func init() { + t["RecurrentTaskScheduler"] = reflect.TypeOf((*RecurrentTaskScheduler)(nil)).Elem() +} + +type Refresh RefreshRequestType + +func init() { + t["Refresh"] = reflect.TypeOf((*Refresh)(nil)).Elem() +} + +type RefreshDVPortState RefreshDVPortStateRequestType + +func init() { + t["RefreshDVPortState"] = reflect.TypeOf((*RefreshDVPortState)(nil)).Elem() +} + +type RefreshDVPortStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + PortKeys []string `xml:"portKeys,omitempty"` +} + +func init() { + t["RefreshDVPortStateRequestType"] = reflect.TypeOf((*RefreshDVPortStateRequestType)(nil)).Elem() +} + +type RefreshDVPortStateResponse struct { +} + +type RefreshDatastore RefreshDatastoreRequestType + +func init() { + t["RefreshDatastore"] = reflect.TypeOf((*RefreshDatastore)(nil)).Elem() +} + +type RefreshDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshDatastoreRequestType"] = reflect.TypeOf((*RefreshDatastoreRequestType)(nil)).Elem() +} + +type RefreshDatastoreResponse struct { +} + +type RefreshDatastoreStorageInfo RefreshDatastoreStorageInfoRequestType + +func init() { + t["RefreshDatastoreStorageInfo"] = reflect.TypeOf((*RefreshDatastoreStorageInfo)(nil)).Elem() +} + +type RefreshDatastoreStorageInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshDatastoreStorageInfoRequestType"] = reflect.TypeOf((*RefreshDatastoreStorageInfoRequestType)(nil)).Elem() +} + +type RefreshDatastoreStorageInfoResponse struct { +} + +type RefreshDateTimeSystem RefreshDateTimeSystemRequestType + +func init() { + t["RefreshDateTimeSystem"] = reflect.TypeOf((*RefreshDateTimeSystem)(nil)).Elem() +} + +type RefreshDateTimeSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshDateTimeSystemRequestType"] = reflect.TypeOf((*RefreshDateTimeSystemRequestType)(nil)).Elem() +} + +type RefreshDateTimeSystemResponse struct { +} + +type RefreshFirewall RefreshFirewallRequestType + +func init() { + t["RefreshFirewall"] = reflect.TypeOf((*RefreshFirewall)(nil)).Elem() +} + +type RefreshFirewallRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshFirewallRequestType"] = reflect.TypeOf((*RefreshFirewallRequestType)(nil)).Elem() +} + +type RefreshFirewallResponse struct { +} + +type RefreshGraphicsManager RefreshGraphicsManagerRequestType + +func init() { + t["RefreshGraphicsManager"] = reflect.TypeOf((*RefreshGraphicsManager)(nil)).Elem() +} + +type RefreshGraphicsManagerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshGraphicsManagerRequestType"] = reflect.TypeOf((*RefreshGraphicsManagerRequestType)(nil)).Elem() +} + +type RefreshGraphicsManagerResponse struct { +} + +type RefreshHealthStatusSystem RefreshHealthStatusSystemRequestType + +func init() { + t["RefreshHealthStatusSystem"] = reflect.TypeOf((*RefreshHealthStatusSystem)(nil)).Elem() +} + +type RefreshHealthStatusSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshHealthStatusSystemRequestType"] = reflect.TypeOf((*RefreshHealthStatusSystemRequestType)(nil)).Elem() +} + +type RefreshHealthStatusSystemResponse struct { +} + +type RefreshNetworkSystem RefreshNetworkSystemRequestType + +func init() { + t["RefreshNetworkSystem"] = reflect.TypeOf((*RefreshNetworkSystem)(nil)).Elem() +} + +type RefreshNetworkSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshNetworkSystemRequestType"] = reflect.TypeOf((*RefreshNetworkSystemRequestType)(nil)).Elem() +} + +type RefreshNetworkSystemResponse struct { +} + +type RefreshRecommendation RefreshRecommendationRequestType + +func init() { + t["RefreshRecommendation"] = reflect.TypeOf((*RefreshRecommendation)(nil)).Elem() +} + +type RefreshRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshRecommendationRequestType"] = reflect.TypeOf((*RefreshRecommendationRequestType)(nil)).Elem() +} + +type RefreshRecommendationResponse struct { +} + +type RefreshRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshRequestType"] = reflect.TypeOf((*RefreshRequestType)(nil)).Elem() +} + +type RefreshResponse struct { +} + +type RefreshRuntime RefreshRuntimeRequestType + +func init() { + t["RefreshRuntime"] = reflect.TypeOf((*RefreshRuntime)(nil)).Elem() +} + +type RefreshRuntimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshRuntimeRequestType"] = reflect.TypeOf((*RefreshRuntimeRequestType)(nil)).Elem() +} + +type RefreshRuntimeResponse struct { +} + +type RefreshServices RefreshServicesRequestType + +func init() { + t["RefreshServices"] = reflect.TypeOf((*RefreshServices)(nil)).Elem() +} + +type RefreshServicesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshServicesRequestType"] = reflect.TypeOf((*RefreshServicesRequestType)(nil)).Elem() +} + +type RefreshServicesResponse struct { +} + +type RefreshStorageDrsRecommendation RefreshStorageDrsRecommendationRequestType + +func init() { + t["RefreshStorageDrsRecommendation"] = reflect.TypeOf((*RefreshStorageDrsRecommendation)(nil)).Elem() +} + +type RefreshStorageDrsRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` +} + +func init() { + t["RefreshStorageDrsRecommendationRequestType"] = reflect.TypeOf((*RefreshStorageDrsRecommendationRequestType)(nil)).Elem() +} + +type RefreshStorageDrsRecommendationResponse struct { +} + +type RefreshStorageInfo RefreshStorageInfoRequestType + +func init() { + t["RefreshStorageInfo"] = reflect.TypeOf((*RefreshStorageInfo)(nil)).Elem() +} + +type RefreshStorageInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshStorageInfoRequestType"] = reflect.TypeOf((*RefreshStorageInfoRequestType)(nil)).Elem() +} + +type RefreshStorageInfoResponse struct { +} + +type RefreshStorageSystem RefreshStorageSystemRequestType + +func init() { + t["RefreshStorageSystem"] = reflect.TypeOf((*RefreshStorageSystem)(nil)).Elem() +} + +type RefreshStorageSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshStorageSystemRequestType"] = reflect.TypeOf((*RefreshStorageSystemRequestType)(nil)).Elem() +} + +type RefreshStorageSystemResponse struct { +} + +type RegisterChildVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` + Name string `xml:"name,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["RegisterChildVMRequestType"] = reflect.TypeOf((*RegisterChildVMRequestType)(nil)).Elem() +} + +type RegisterChildVM_Task RegisterChildVMRequestType + +func init() { + t["RegisterChildVM_Task"] = reflect.TypeOf((*RegisterChildVM_Task)(nil)).Elem() +} + +type RegisterChildVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RegisterExtension RegisterExtensionRequestType + +func init() { + t["RegisterExtension"] = reflect.TypeOf((*RegisterExtension)(nil)).Elem() +} + +type RegisterExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Extension Extension `xml:"extension"` +} + +func init() { + t["RegisterExtensionRequestType"] = reflect.TypeOf((*RegisterExtensionRequestType)(nil)).Elem() +} + +type RegisterExtensionResponse struct { +} + +type RegisterVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` + Name string `xml:"name,omitempty"` + AsTemplate bool `xml:"asTemplate"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["RegisterVMRequestType"] = reflect.TypeOf((*RegisterVMRequestType)(nil)).Elem() +} + +type RegisterVM_Task RegisterVMRequestType + +func init() { + t["RegisterVM_Task"] = reflect.TypeOf((*RegisterVM_Task)(nil)).Elem() +} + +type RegisterVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReleaseCredentialsInGuest ReleaseCredentialsInGuestRequestType + +func init() { + t["ReleaseCredentialsInGuest"] = reflect.TypeOf((*ReleaseCredentialsInGuest)(nil)).Elem() +} + +type ReleaseCredentialsInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["ReleaseCredentialsInGuestRequestType"] = reflect.TypeOf((*ReleaseCredentialsInGuestRequestType)(nil)).Elem() +} + +type ReleaseCredentialsInGuestResponse struct { +} + +type ReleaseIpAllocation ReleaseIpAllocationRequestType + +func init() { + t["ReleaseIpAllocation"] = reflect.TypeOf((*ReleaseIpAllocation)(nil)).Elem() +} + +type ReleaseIpAllocationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int `xml:"poolId"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["ReleaseIpAllocationRequestType"] = reflect.TypeOf((*ReleaseIpAllocationRequestType)(nil)).Elem() +} + +type ReleaseIpAllocationResponse struct { +} + +type Reload ReloadRequestType + +func init() { + t["Reload"] = reflect.TypeOf((*Reload)(nil)).Elem() +} + +type ReloadRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ReloadRequestType"] = reflect.TypeOf((*ReloadRequestType)(nil)).Elem() +} + +type ReloadResponse struct { +} + +type RelocateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineRelocateSpec `xml:"spec"` + Priority VirtualMachineMovePriority `xml:"priority,omitempty"` +} + +func init() { + t["RelocateVMRequestType"] = reflect.TypeOf((*RelocateVMRequestType)(nil)).Elem() +} + +type RelocateVM_Task RelocateVMRequestType + +func init() { + t["RelocateVM_Task"] = reflect.TypeOf((*RelocateVM_Task)(nil)).Elem() +} + +type RelocateVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoteDeviceNotSupported struct { + DeviceNotSupported +} + +func init() { + t["RemoteDeviceNotSupported"] = reflect.TypeOf((*RemoteDeviceNotSupported)(nil)).Elem() +} + +type RemoteDeviceNotSupportedFault RemoteDeviceNotSupported + +func init() { + t["RemoteDeviceNotSupportedFault"] = reflect.TypeOf((*RemoteDeviceNotSupportedFault)(nil)).Elem() +} + +type RemoteTSMEnabledEvent struct { + HostEvent +} + +func init() { + t["RemoteTSMEnabledEvent"] = reflect.TypeOf((*RemoteTSMEnabledEvent)(nil)).Elem() +} + +type RemoveAlarm RemoveAlarmRequestType + +func init() { + t["RemoveAlarm"] = reflect.TypeOf((*RemoveAlarm)(nil)).Elem() +} + +type RemoveAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RemoveAlarmRequestType"] = reflect.TypeOf((*RemoveAlarmRequestType)(nil)).Elem() +} + +type RemoveAlarmResponse struct { +} + +type RemoveAllSnapshotsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Consolidate *bool `xml:"consolidate"` +} + +func init() { + t["RemoveAllSnapshotsRequestType"] = reflect.TypeOf((*RemoveAllSnapshotsRequestType)(nil)).Elem() +} + +type RemoveAllSnapshots_Task RemoveAllSnapshotsRequestType + +func init() { + t["RemoveAllSnapshots_Task"] = reflect.TypeOf((*RemoveAllSnapshots_Task)(nil)).Elem() +} + +type RemoveAllSnapshots_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveAssignedLicense RemoveAssignedLicenseRequestType + +func init() { + t["RemoveAssignedLicense"] = reflect.TypeOf((*RemoveAssignedLicense)(nil)).Elem() +} + +type RemoveAssignedLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityId string `xml:"entityId"` +} + +func init() { + t["RemoveAssignedLicenseRequestType"] = reflect.TypeOf((*RemoveAssignedLicenseRequestType)(nil)).Elem() +} + +type RemoveAssignedLicenseResponse struct { +} + +type RemoveAuthorizationRole RemoveAuthorizationRoleRequestType + +func init() { + t["RemoveAuthorizationRole"] = reflect.TypeOf((*RemoveAuthorizationRole)(nil)).Elem() +} + +type RemoveAuthorizationRoleRequestType struct { + This ManagedObjectReference `xml:"_this"` + RoleId int `xml:"roleId"` + FailIfUsed bool `xml:"failIfUsed"` +} + +func init() { + t["RemoveAuthorizationRoleRequestType"] = reflect.TypeOf((*RemoveAuthorizationRoleRequestType)(nil)).Elem() +} + +type RemoveAuthorizationRoleResponse struct { +} + +type RemoveCustomFieldDef RemoveCustomFieldDefRequestType + +func init() { + t["RemoveCustomFieldDef"] = reflect.TypeOf((*RemoveCustomFieldDef)(nil)).Elem() +} + +type RemoveCustomFieldDefRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key int `xml:"key"` +} + +func init() { + t["RemoveCustomFieldDefRequestType"] = reflect.TypeOf((*RemoveCustomFieldDefRequestType)(nil)).Elem() +} + +type RemoveCustomFieldDefResponse struct { +} + +type RemoveDatastore RemoveDatastoreRequestType + +func init() { + t["RemoveDatastore"] = reflect.TypeOf((*RemoveDatastore)(nil)).Elem() +} + +type RemoveDatastoreExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore []ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RemoveDatastoreExRequestType"] = reflect.TypeOf((*RemoveDatastoreExRequestType)(nil)).Elem() +} + +type RemoveDatastoreEx_Task RemoveDatastoreExRequestType + +func init() { + t["RemoveDatastoreEx_Task"] = reflect.TypeOf((*RemoveDatastoreEx_Task)(nil)).Elem() +} + +type RemoveDatastoreEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RemoveDatastoreRequestType"] = reflect.TypeOf((*RemoveDatastoreRequestType)(nil)).Elem() +} + +type RemoveDatastoreResponse struct { +} + +type RemoveDiskMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mapping []VsanHostDiskMapping `xml:"mapping"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty"` + Timeout int `xml:"timeout,omitempty"` +} + +func init() { + t["RemoveDiskMappingRequestType"] = reflect.TypeOf((*RemoveDiskMappingRequestType)(nil)).Elem() +} + +type RemoveDiskMapping_Task RemoveDiskMappingRequestType + +func init() { + t["RemoveDiskMapping_Task"] = reflect.TypeOf((*RemoveDiskMapping_Task)(nil)).Elem() +} + +type RemoveDiskMapping_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disk []HostScsiDisk `xml:"disk"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty"` + Timeout int `xml:"timeout,omitempty"` +} + +func init() { + t["RemoveDiskRequestType"] = reflect.TypeOf((*RemoveDiskRequestType)(nil)).Elem() +} + +type RemoveDisk_Task RemoveDiskRequestType + +func init() { + t["RemoveDisk_Task"] = reflect.TypeOf((*RemoveDisk_Task)(nil)).Elem() +} + +type RemoveDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveEntityPermission RemoveEntityPermissionRequestType + +func init() { + t["RemoveEntityPermission"] = reflect.TypeOf((*RemoveEntityPermission)(nil)).Elem() +} + +type RemoveEntityPermissionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + User string `xml:"user"` + IsGroup bool `xml:"isGroup"` +} + +func init() { + t["RemoveEntityPermissionRequestType"] = reflect.TypeOf((*RemoveEntityPermissionRequestType)(nil)).Elem() +} + +type RemoveEntityPermissionResponse struct { +} + +type RemoveFailed struct { + VimFault +} + +func init() { + t["RemoveFailed"] = reflect.TypeOf((*RemoveFailed)(nil)).Elem() +} + +type RemoveFailedFault RemoveFailed + +func init() { + t["RemoveFailedFault"] = reflect.TypeOf((*RemoveFailedFault)(nil)).Elem() +} + +type RemoveGroup RemoveGroupRequestType + +func init() { + t["RemoveGroup"] = reflect.TypeOf((*RemoveGroup)(nil)).Elem() +} + +type RemoveGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + GroupName string `xml:"groupName"` +} + +func init() { + t["RemoveGroupRequestType"] = reflect.TypeOf((*RemoveGroupRequestType)(nil)).Elem() +} + +type RemoveGroupResponse struct { +} + +type RemoveGuestAlias RemoveGuestAliasRequestType + +func init() { + t["RemoveGuestAlias"] = reflect.TypeOf((*RemoveGuestAlias)(nil)).Elem() +} + +type RemoveGuestAliasByCert RemoveGuestAliasByCertRequestType + +func init() { + t["RemoveGuestAliasByCert"] = reflect.TypeOf((*RemoveGuestAliasByCert)(nil)).Elem() +} + +type RemoveGuestAliasByCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` + Base64Cert string `xml:"base64Cert"` +} + +func init() { + t["RemoveGuestAliasByCertRequestType"] = reflect.TypeOf((*RemoveGuestAliasByCertRequestType)(nil)).Elem() +} + +type RemoveGuestAliasByCertResponse struct { +} + +type RemoveGuestAliasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` + Base64Cert string `xml:"base64Cert"` + Subject BaseGuestAuthSubject `xml:"subject,typeattr"` +} + +func init() { + t["RemoveGuestAliasRequestType"] = reflect.TypeOf((*RemoveGuestAliasRequestType)(nil)).Elem() +} + +type RemoveGuestAliasResponse struct { +} + +type RemoveInternetScsiSendTargets RemoveInternetScsiSendTargetsRequestType + +func init() { + t["RemoveInternetScsiSendTargets"] = reflect.TypeOf((*RemoveInternetScsiSendTargets)(nil)).Elem() +} + +type RemoveInternetScsiSendTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaSendTarget `xml:"targets"` +} + +func init() { + t["RemoveInternetScsiSendTargetsRequestType"] = reflect.TypeOf((*RemoveInternetScsiSendTargetsRequestType)(nil)).Elem() +} + +type RemoveInternetScsiSendTargetsResponse struct { +} + +type RemoveInternetScsiStaticTargets RemoveInternetScsiStaticTargetsRequestType + +func init() { + t["RemoveInternetScsiStaticTargets"] = reflect.TypeOf((*RemoveInternetScsiStaticTargets)(nil)).Elem() +} + +type RemoveInternetScsiStaticTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaStaticTarget `xml:"targets"` +} + +func init() { + t["RemoveInternetScsiStaticTargetsRequestType"] = reflect.TypeOf((*RemoveInternetScsiStaticTargetsRequestType)(nil)).Elem() +} + +type RemoveInternetScsiStaticTargetsResponse struct { +} + +type RemoveLicense RemoveLicenseRequestType + +func init() { + t["RemoveLicense"] = reflect.TypeOf((*RemoveLicense)(nil)).Elem() +} + +type RemoveLicenseLabel RemoveLicenseLabelRequestType + +func init() { + t["RemoveLicenseLabel"] = reflect.TypeOf((*RemoveLicenseLabel)(nil)).Elem() +} + +type RemoveLicenseLabelRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + LabelKey string `xml:"labelKey"` +} + +func init() { + t["RemoveLicenseLabelRequestType"] = reflect.TypeOf((*RemoveLicenseLabelRequestType)(nil)).Elem() +} + +type RemoveLicenseLabelResponse struct { +} + +type RemoveLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` +} + +func init() { + t["RemoveLicenseRequestType"] = reflect.TypeOf((*RemoveLicenseRequestType)(nil)).Elem() +} + +type RemoveLicenseResponse struct { +} + +type RemoveNetworkResourcePool RemoveNetworkResourcePoolRequestType + +func init() { + t["RemoveNetworkResourcePool"] = reflect.TypeOf((*RemoveNetworkResourcePool)(nil)).Elem() +} + +type RemoveNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key []string `xml:"key"` +} + +func init() { + t["RemoveNetworkResourcePoolRequestType"] = reflect.TypeOf((*RemoveNetworkResourcePoolRequestType)(nil)).Elem() +} + +type RemoveNetworkResourcePoolResponse struct { +} + +type RemovePerfInterval RemovePerfIntervalRequestType + +func init() { + t["RemovePerfInterval"] = reflect.TypeOf((*RemovePerfInterval)(nil)).Elem() +} + +type RemovePerfIntervalRequestType struct { + This ManagedObjectReference `xml:"_this"` + SamplePeriod int `xml:"samplePeriod"` +} + +func init() { + t["RemovePerfIntervalRequestType"] = reflect.TypeOf((*RemovePerfIntervalRequestType)(nil)).Elem() +} + +type RemovePerfIntervalResponse struct { +} + +type RemovePortGroup RemovePortGroupRequestType + +func init() { + t["RemovePortGroup"] = reflect.TypeOf((*RemovePortGroup)(nil)).Elem() +} + +type RemovePortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + PgName string `xml:"pgName"` +} + +func init() { + t["RemovePortGroupRequestType"] = reflect.TypeOf((*RemovePortGroupRequestType)(nil)).Elem() +} + +type RemovePortGroupResponse struct { +} + +type RemoveScheduledTask RemoveScheduledTaskRequestType + +func init() { + t["RemoveScheduledTask"] = reflect.TypeOf((*RemoveScheduledTask)(nil)).Elem() +} + +type RemoveScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RemoveScheduledTaskRequestType"] = reflect.TypeOf((*RemoveScheduledTaskRequestType)(nil)).Elem() +} + +type RemoveScheduledTaskResponse struct { +} + +type RemoveServiceConsoleVirtualNic RemoveServiceConsoleVirtualNicRequestType + +func init() { + t["RemoveServiceConsoleVirtualNic"] = reflect.TypeOf((*RemoveServiceConsoleVirtualNic)(nil)).Elem() +} + +type RemoveServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["RemoveServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*RemoveServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type RemoveServiceConsoleVirtualNicResponse struct { +} + +type RemoveSmartCardTrustAnchor RemoveSmartCardTrustAnchorRequestType + +func init() { + t["RemoveSmartCardTrustAnchor"] = reflect.TypeOf((*RemoveSmartCardTrustAnchor)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorByFingerprint RemoveSmartCardTrustAnchorByFingerprintRequestType + +func init() { + t["RemoveSmartCardTrustAnchorByFingerprint"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorByFingerprint)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorByFingerprintRequestType struct { + This ManagedObjectReference `xml:"_this"` + Fingerprint string `xml:"fingerprint"` + Digest string `xml:"digest"` +} + +func init() { + t["RemoveSmartCardTrustAnchorByFingerprintRequestType"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorByFingerprintRequestType)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorByFingerprintResponse struct { +} + +type RemoveSmartCardTrustAnchorRequestType struct { + This ManagedObjectReference `xml:"_this"` + Issuer string `xml:"issuer"` + Serial string `xml:"serial"` +} + +func init() { + t["RemoveSmartCardTrustAnchorRequestType"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorRequestType)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorResponse struct { +} + +type RemoveSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + RemoveChildren bool `xml:"removeChildren"` + Consolidate *bool `xml:"consolidate"` +} + +func init() { + t["RemoveSnapshotRequestType"] = reflect.TypeOf((*RemoveSnapshotRequestType)(nil)).Elem() +} + +type RemoveSnapshot_Task RemoveSnapshotRequestType + +func init() { + t["RemoveSnapshot_Task"] = reflect.TypeOf((*RemoveSnapshot_Task)(nil)).Elem() +} + +type RemoveSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveUser RemoveUserRequestType + +func init() { + t["RemoveUser"] = reflect.TypeOf((*RemoveUser)(nil)).Elem() +} + +type RemoveUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` +} + +func init() { + t["RemoveUserRequestType"] = reflect.TypeOf((*RemoveUserRequestType)(nil)).Elem() +} + +type RemoveUserResponse struct { +} + +type RemoveVirtualNic RemoveVirtualNicRequestType + +func init() { + t["RemoveVirtualNic"] = reflect.TypeOf((*RemoveVirtualNic)(nil)).Elem() +} + +type RemoveVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["RemoveVirtualNicRequestType"] = reflect.TypeOf((*RemoveVirtualNicRequestType)(nil)).Elem() +} + +type RemoveVirtualNicResponse struct { +} + +type RemoveVirtualSwitch RemoveVirtualSwitchRequestType + +func init() { + t["RemoveVirtualSwitch"] = reflect.TypeOf((*RemoveVirtualSwitch)(nil)).Elem() +} + +type RemoveVirtualSwitchRequestType struct { + This ManagedObjectReference `xml:"_this"` + VswitchName string `xml:"vswitchName"` +} + +func init() { + t["RemoveVirtualSwitchRequestType"] = reflect.TypeOf((*RemoveVirtualSwitchRequestType)(nil)).Elem() +} + +type RemoveVirtualSwitchResponse struct { +} + +type RenameCustomFieldDef RenameCustomFieldDefRequestType + +func init() { + t["RenameCustomFieldDef"] = reflect.TypeOf((*RenameCustomFieldDef)(nil)).Elem() +} + +type RenameCustomFieldDefRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key int `xml:"key"` + Name string `xml:"name"` +} + +func init() { + t["RenameCustomFieldDefRequestType"] = reflect.TypeOf((*RenameCustomFieldDefRequestType)(nil)).Elem() +} + +type RenameCustomFieldDefResponse struct { +} + +type RenameCustomizationSpec RenameCustomizationSpecRequestType + +func init() { + t["RenameCustomizationSpec"] = reflect.TypeOf((*RenameCustomizationSpec)(nil)).Elem() +} + +type RenameCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + NewName string `xml:"newName"` +} + +func init() { + t["RenameCustomizationSpecRequestType"] = reflect.TypeOf((*RenameCustomizationSpecRequestType)(nil)).Elem() +} + +type RenameCustomizationSpecResponse struct { +} + +type RenameDatastore RenameDatastoreRequestType + +func init() { + t["RenameDatastore"] = reflect.TypeOf((*RenameDatastore)(nil)).Elem() +} + +type RenameDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewName string `xml:"newName"` +} + +func init() { + t["RenameDatastoreRequestType"] = reflect.TypeOf((*RenameDatastoreRequestType)(nil)).Elem() +} + +type RenameDatastoreResponse struct { +} + +type RenameRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewName string `xml:"newName"` +} + +func init() { + t["RenameRequestType"] = reflect.TypeOf((*RenameRequestType)(nil)).Elem() +} + +type RenameSnapshot RenameSnapshotRequestType + +func init() { + t["RenameSnapshot"] = reflect.TypeOf((*RenameSnapshot)(nil)).Elem() +} + +type RenameSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["RenameSnapshotRequestType"] = reflect.TypeOf((*RenameSnapshotRequestType)(nil)).Elem() +} + +type RenameSnapshotResponse struct { +} + +type Rename_Task RenameRequestType + +func init() { + t["Rename_Task"] = reflect.TypeOf((*Rename_Task)(nil)).Elem() +} + +type Rename_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReplaceCACertificatesAndCRLs ReplaceCACertificatesAndCRLsRequestType + +func init() { + t["ReplaceCACertificatesAndCRLs"] = reflect.TypeOf((*ReplaceCACertificatesAndCRLs)(nil)).Elem() +} + +type ReplaceCACertificatesAndCRLsRequestType struct { + This ManagedObjectReference `xml:"_this"` + CaCert []string `xml:"caCert"` + CaCrl []string `xml:"caCrl,omitempty"` +} + +func init() { + t["ReplaceCACertificatesAndCRLsRequestType"] = reflect.TypeOf((*ReplaceCACertificatesAndCRLsRequestType)(nil)).Elem() +} + +type ReplaceCACertificatesAndCRLsResponse struct { +} + +type ReplaceSmartCardTrustAnchors ReplaceSmartCardTrustAnchorsRequestType + +func init() { + t["ReplaceSmartCardTrustAnchors"] = reflect.TypeOf((*ReplaceSmartCardTrustAnchors)(nil)).Elem() +} + +type ReplaceSmartCardTrustAnchorsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Certs []string `xml:"certs,omitempty"` +} + +func init() { + t["ReplaceSmartCardTrustAnchorsRequestType"] = reflect.TypeOf((*ReplaceSmartCardTrustAnchorsRequestType)(nil)).Elem() +} + +type ReplaceSmartCardTrustAnchorsResponse struct { +} + +type ReplicationConfigFault struct { + ReplicationFault +} + +func init() { + t["ReplicationConfigFault"] = reflect.TypeOf((*ReplicationConfigFault)(nil)).Elem() +} + +type ReplicationConfigFaultFault BaseReplicationConfigFault + +func init() { + t["ReplicationConfigFaultFault"] = reflect.TypeOf((*ReplicationConfigFaultFault)(nil)).Elem() +} + +type ReplicationConfigSpec struct { + DynamicData + + Generation int64 `xml:"generation"` + VmReplicationId string `xml:"vmReplicationId"` + Destination string `xml:"destination"` + Port int `xml:"port"` + Rpo int64 `xml:"rpo"` + QuiesceGuestEnabled bool `xml:"quiesceGuestEnabled"` + Paused bool `xml:"paused"` + OppUpdatesEnabled bool `xml:"oppUpdatesEnabled"` + NetCompressionEnabled *bool `xml:"netCompressionEnabled"` + Disk []ReplicationInfoDiskSettings `xml:"disk,omitempty"` +} + +func init() { + t["ReplicationConfigSpec"] = reflect.TypeOf((*ReplicationConfigSpec)(nil)).Elem() +} + +type ReplicationDiskConfigFault struct { + ReplicationConfigFault + + Reason string `xml:"reason,omitempty"` + VmRef *ManagedObjectReference `xml:"vmRef,omitempty"` + Key int `xml:"key,omitempty"` +} + +func init() { + t["ReplicationDiskConfigFault"] = reflect.TypeOf((*ReplicationDiskConfigFault)(nil)).Elem() +} + +type ReplicationDiskConfigFaultFault ReplicationDiskConfigFault + +func init() { + t["ReplicationDiskConfigFaultFault"] = reflect.TypeOf((*ReplicationDiskConfigFaultFault)(nil)).Elem() +} + +type ReplicationFault struct { + VimFault +} + +func init() { + t["ReplicationFault"] = reflect.TypeOf((*ReplicationFault)(nil)).Elem() +} + +type ReplicationFaultFault BaseReplicationFault + +func init() { + t["ReplicationFaultFault"] = reflect.TypeOf((*ReplicationFaultFault)(nil)).Elem() +} + +type ReplicationIncompatibleWithFT struct { + ReplicationFault +} + +func init() { + t["ReplicationIncompatibleWithFT"] = reflect.TypeOf((*ReplicationIncompatibleWithFT)(nil)).Elem() +} + +type ReplicationIncompatibleWithFTFault ReplicationIncompatibleWithFT + +func init() { + t["ReplicationIncompatibleWithFTFault"] = reflect.TypeOf((*ReplicationIncompatibleWithFTFault)(nil)).Elem() +} + +type ReplicationInfoDiskSettings struct { + DynamicData + + Key int `xml:"key"` + DiskReplicationId string `xml:"diskReplicationId"` +} + +func init() { + t["ReplicationInfoDiskSettings"] = reflect.TypeOf((*ReplicationInfoDiskSettings)(nil)).Elem() +} + +type ReplicationInvalidOptions struct { + ReplicationFault + + Options string `xml:"options"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["ReplicationInvalidOptions"] = reflect.TypeOf((*ReplicationInvalidOptions)(nil)).Elem() +} + +type ReplicationInvalidOptionsFault ReplicationInvalidOptions + +func init() { + t["ReplicationInvalidOptionsFault"] = reflect.TypeOf((*ReplicationInvalidOptionsFault)(nil)).Elem() +} + +type ReplicationNotSupportedOnHost struct { + ReplicationFault +} + +func init() { + t["ReplicationNotSupportedOnHost"] = reflect.TypeOf((*ReplicationNotSupportedOnHost)(nil)).Elem() +} + +type ReplicationNotSupportedOnHostFault ReplicationNotSupportedOnHost + +func init() { + t["ReplicationNotSupportedOnHostFault"] = reflect.TypeOf((*ReplicationNotSupportedOnHostFault)(nil)).Elem() +} + +type ReplicationVmConfigFault struct { + ReplicationConfigFault + + Reason string `xml:"reason,omitempty"` + VmRef *ManagedObjectReference `xml:"vmRef,omitempty"` +} + +func init() { + t["ReplicationVmConfigFault"] = reflect.TypeOf((*ReplicationVmConfigFault)(nil)).Elem() +} + +type ReplicationVmConfigFaultFault ReplicationVmConfigFault + +func init() { + t["ReplicationVmConfigFaultFault"] = reflect.TypeOf((*ReplicationVmConfigFaultFault)(nil)).Elem() +} + +type ReplicationVmFault struct { + ReplicationFault + + Reason string `xml:"reason,omitempty"` + State string `xml:"state,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["ReplicationVmFault"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem() +} + +type ReplicationVmFaultFault BaseReplicationVmFault + +func init() { + t["ReplicationVmFaultFault"] = reflect.TypeOf((*ReplicationVmFaultFault)(nil)).Elem() +} + +type ReplicationVmInProgressFault struct { + ReplicationVmFault + + RequestedActivity string `xml:"requestedActivity"` + InProgressActivity string `xml:"inProgressActivity"` +} + +func init() { + t["ReplicationVmInProgressFault"] = reflect.TypeOf((*ReplicationVmInProgressFault)(nil)).Elem() +} + +type ReplicationVmInProgressFaultFault ReplicationVmInProgressFault + +func init() { + t["ReplicationVmInProgressFaultFault"] = reflect.TypeOf((*ReplicationVmInProgressFaultFault)(nil)).Elem() +} + +type ReplicationVmProgressInfo struct { + DynamicData + + Progress int `xml:"progress"` + BytesTransferred int64 `xml:"bytesTransferred"` + BytesToTransfer int64 `xml:"bytesToTransfer"` + ChecksumTotalBytes int64 `xml:"checksumTotalBytes,omitempty"` + ChecksumComparedBytes int64 `xml:"checksumComparedBytes,omitempty"` +} + +func init() { + t["ReplicationVmProgressInfo"] = reflect.TypeOf((*ReplicationVmProgressInfo)(nil)).Elem() +} + +type RequestCanceled struct { + RuntimeFault +} + +func init() { + t["RequestCanceled"] = reflect.TypeOf((*RequestCanceled)(nil)).Elem() +} + +type RequestCanceledFault RequestCanceled + +func init() { + t["RequestCanceledFault"] = reflect.TypeOf((*RequestCanceledFault)(nil)).Elem() +} + +type RescanAllHba RescanAllHbaRequestType + +func init() { + t["RescanAllHba"] = reflect.TypeOf((*RescanAllHba)(nil)).Elem() +} + +type RescanAllHbaRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RescanAllHbaRequestType"] = reflect.TypeOf((*RescanAllHbaRequestType)(nil)).Elem() +} + +type RescanAllHbaResponse struct { +} + +type RescanHba RescanHbaRequestType + +func init() { + t["RescanHba"] = reflect.TypeOf((*RescanHba)(nil)).Elem() +} + +type RescanHbaRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaDevice string `xml:"hbaDevice"` +} + +func init() { + t["RescanHbaRequestType"] = reflect.TypeOf((*RescanHbaRequestType)(nil)).Elem() +} + +type RescanHbaResponse struct { +} + +type RescanVffs RescanVffsRequestType + +func init() { + t["RescanVffs"] = reflect.TypeOf((*RescanVffs)(nil)).Elem() +} + +type RescanVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RescanVffsRequestType"] = reflect.TypeOf((*RescanVffsRequestType)(nil)).Elem() +} + +type RescanVffsResponse struct { +} + +type RescanVmfs RescanVmfsRequestType + +func init() { + t["RescanVmfs"] = reflect.TypeOf((*RescanVmfs)(nil)).Elem() +} + +type RescanVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RescanVmfsRequestType"] = reflect.TypeOf((*RescanVmfsRequestType)(nil)).Elem() +} + +type RescanVmfsResponse struct { +} + +type ResetCollector ResetCollectorRequestType + +func init() { + t["ResetCollector"] = reflect.TypeOf((*ResetCollector)(nil)).Elem() +} + +type ResetCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetCollectorRequestType"] = reflect.TypeOf((*ResetCollectorRequestType)(nil)).Elem() +} + +type ResetCollectorResponse struct { +} + +type ResetCounterLevelMapping ResetCounterLevelMappingRequestType + +func init() { + t["ResetCounterLevelMapping"] = reflect.TypeOf((*ResetCounterLevelMapping)(nil)).Elem() +} + +type ResetCounterLevelMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Counters []int `xml:"counters"` +} + +func init() { + t["ResetCounterLevelMappingRequestType"] = reflect.TypeOf((*ResetCounterLevelMappingRequestType)(nil)).Elem() +} + +type ResetCounterLevelMappingResponse struct { +} + +type ResetEntityPermissions ResetEntityPermissionsRequestType + +func init() { + t["ResetEntityPermissions"] = reflect.TypeOf((*ResetEntityPermissions)(nil)).Elem() +} + +type ResetEntityPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Permission []Permission `xml:"permission,omitempty"` +} + +func init() { + t["ResetEntityPermissionsRequestType"] = reflect.TypeOf((*ResetEntityPermissionsRequestType)(nil)).Elem() +} + +type ResetEntityPermissionsResponse struct { +} + +type ResetFirmwareToFactoryDefaults ResetFirmwareToFactoryDefaultsRequestType + +func init() { + t["ResetFirmwareToFactoryDefaults"] = reflect.TypeOf((*ResetFirmwareToFactoryDefaults)(nil)).Elem() +} + +type ResetFirmwareToFactoryDefaultsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetFirmwareToFactoryDefaultsRequestType"] = reflect.TypeOf((*ResetFirmwareToFactoryDefaultsRequestType)(nil)).Elem() +} + +type ResetFirmwareToFactoryDefaultsResponse struct { +} + +type ResetGuestInformation ResetGuestInformationRequestType + +func init() { + t["ResetGuestInformation"] = reflect.TypeOf((*ResetGuestInformation)(nil)).Elem() +} + +type ResetGuestInformationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetGuestInformationRequestType"] = reflect.TypeOf((*ResetGuestInformationRequestType)(nil)).Elem() +} + +type ResetGuestInformationResponse struct { +} + +type ResetListView ResetListViewRequestType + +func init() { + t["ResetListView"] = reflect.TypeOf((*ResetListView)(nil)).Elem() +} + +type ResetListViewFromView ResetListViewFromViewRequestType + +func init() { + t["ResetListViewFromView"] = reflect.TypeOf((*ResetListViewFromView)(nil)).Elem() +} + +type ResetListViewFromViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + View ManagedObjectReference `xml:"view"` +} + +func init() { + t["ResetListViewFromViewRequestType"] = reflect.TypeOf((*ResetListViewFromViewRequestType)(nil)).Elem() +} + +type ResetListViewFromViewResponse struct { +} + +type ResetListViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj []ManagedObjectReference `xml:"obj,omitempty"` +} + +func init() { + t["ResetListViewRequestType"] = reflect.TypeOf((*ResetListViewRequestType)(nil)).Elem() +} + +type ResetListViewResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type ResetSystemHealthInfo ResetSystemHealthInfoRequestType + +func init() { + t["ResetSystemHealthInfo"] = reflect.TypeOf((*ResetSystemHealthInfo)(nil)).Elem() +} + +type ResetSystemHealthInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetSystemHealthInfoRequestType"] = reflect.TypeOf((*ResetSystemHealthInfoRequestType)(nil)).Elem() +} + +type ResetSystemHealthInfoResponse struct { +} + +type ResetVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetVMRequestType"] = reflect.TypeOf((*ResetVMRequestType)(nil)).Elem() +} + +type ResetVM_Task ResetVMRequestType + +func init() { + t["ResetVM_Task"] = reflect.TypeOf((*ResetVM_Task)(nil)).Elem() +} + +type ResetVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResignatureUnresolvedVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResolutionSpec HostUnresolvedVmfsResignatureSpec `xml:"resolutionSpec"` +} + +func init() { + t["ResignatureUnresolvedVmfsVolumeRequestType"] = reflect.TypeOf((*ResignatureUnresolvedVmfsVolumeRequestType)(nil)).Elem() +} + +type ResignatureUnresolvedVmfsVolume_Task ResignatureUnresolvedVmfsVolumeRequestType + +func init() { + t["ResignatureUnresolvedVmfsVolume_Task"] = reflect.TypeOf((*ResignatureUnresolvedVmfsVolume_Task)(nil)).Elem() +} + +type ResignatureUnresolvedVmfsVolume_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveInstallationErrorsOnClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + Cluster ManagedObjectReference `xml:"cluster"` +} + +func init() { + t["ResolveInstallationErrorsOnClusterRequestType"] = reflect.TypeOf((*ResolveInstallationErrorsOnClusterRequestType)(nil)).Elem() +} + +type ResolveInstallationErrorsOnCluster_Task ResolveInstallationErrorsOnClusterRequestType + +func init() { + t["ResolveInstallationErrorsOnCluster_Task"] = reflect.TypeOf((*ResolveInstallationErrorsOnCluster_Task)(nil)).Elem() +} + +type ResolveInstallationErrorsOnCluster_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveInstallationErrorsOnHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["ResolveInstallationErrorsOnHostRequestType"] = reflect.TypeOf((*ResolveInstallationErrorsOnHostRequestType)(nil)).Elem() +} + +type ResolveInstallationErrorsOnHost_Task ResolveInstallationErrorsOnHostRequestType + +func init() { + t["ResolveInstallationErrorsOnHost_Task"] = reflect.TypeOf((*ResolveInstallationErrorsOnHost_Task)(nil)).Elem() +} + +type ResolveInstallationErrorsOnHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveMultipleUnresolvedVmfsVolumes ResolveMultipleUnresolvedVmfsVolumesRequestType + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumes"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumes)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:"resolutionSpec"` +} + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumesExRequestType"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesExRequestType)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesEx_Task ResolveMultipleUnresolvedVmfsVolumesExRequestType + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumesEx_Task"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesEx_Task)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveMultipleUnresolvedVmfsVolumesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:"resolutionSpec"` +} + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumesRequestType"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesRequestType)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesResponse struct { + Returnval []HostUnresolvedVmfsResolutionResult `xml:"returnval,omitempty"` +} + +type ResourceAllocationInfo struct { + DynamicData + + Reservation int64 `xml:"reservation,omitempty"` + ExpandableReservation *bool `xml:"expandableReservation"` + Limit int64 `xml:"limit,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + OverheadLimit int64 `xml:"overheadLimit,omitempty"` +} + +func init() { + t["ResourceAllocationInfo"] = reflect.TypeOf((*ResourceAllocationInfo)(nil)).Elem() +} + +type ResourceAllocationOption struct { + DynamicData + + SharesOption SharesOption `xml:"sharesOption"` +} + +func init() { + t["ResourceAllocationOption"] = reflect.TypeOf((*ResourceAllocationOption)(nil)).Elem() +} + +type ResourceConfigOption struct { + DynamicData + + CpuAllocationOption ResourceAllocationOption `xml:"cpuAllocationOption"` + MemoryAllocationOption ResourceAllocationOption `xml:"memoryAllocationOption"` +} + +func init() { + t["ResourceConfigOption"] = reflect.TypeOf((*ResourceConfigOption)(nil)).Elem() +} + +type ResourceConfigSpec struct { + DynamicData + + Entity *ManagedObjectReference `xml:"entity,omitempty"` + ChangeVersion string `xml:"changeVersion,omitempty"` + LastModified *time.Time `xml:"lastModified"` + CpuAllocation BaseResourceAllocationInfo `xml:"cpuAllocation,typeattr"` + MemoryAllocation BaseResourceAllocationInfo `xml:"memoryAllocation,typeattr"` +} + +func init() { + t["ResourceConfigSpec"] = reflect.TypeOf((*ResourceConfigSpec)(nil)).Elem() +} + +type ResourceInUse struct { + VimFault + + Type string `xml:"type,omitempty"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["ResourceInUse"] = reflect.TypeOf((*ResourceInUse)(nil)).Elem() +} + +type ResourceInUseFault BaseResourceInUse + +func init() { + t["ResourceInUseFault"] = reflect.TypeOf((*ResourceInUseFault)(nil)).Elem() +} + +type ResourceNotAvailable struct { + VimFault + + ContainerType string `xml:"containerType,omitempty"` + ContainerName string `xml:"containerName,omitempty"` + Type string `xml:"type,omitempty"` +} + +func init() { + t["ResourceNotAvailable"] = reflect.TypeOf((*ResourceNotAvailable)(nil)).Elem() +} + +type ResourceNotAvailableFault ResourceNotAvailable + +func init() { + t["ResourceNotAvailableFault"] = reflect.TypeOf((*ResourceNotAvailableFault)(nil)).Elem() +} + +type ResourcePoolCreatedEvent struct { + ResourcePoolEvent + + Parent ResourcePoolEventArgument `xml:"parent"` +} + +func init() { + t["ResourcePoolCreatedEvent"] = reflect.TypeOf((*ResourcePoolCreatedEvent)(nil)).Elem() +} + +type ResourcePoolDestroyedEvent struct { + ResourcePoolEvent +} + +func init() { + t["ResourcePoolDestroyedEvent"] = reflect.TypeOf((*ResourcePoolDestroyedEvent)(nil)).Elem() +} + +type ResourcePoolEvent struct { + Event + + ResourcePool ResourcePoolEventArgument `xml:"resourcePool"` +} + +func init() { + t["ResourcePoolEvent"] = reflect.TypeOf((*ResourcePoolEvent)(nil)).Elem() +} + +type ResourcePoolEventArgument struct { + EntityEventArgument + + ResourcePool ManagedObjectReference `xml:"resourcePool"` +} + +func init() { + t["ResourcePoolEventArgument"] = reflect.TypeOf((*ResourcePoolEventArgument)(nil)).Elem() +} + +type ResourcePoolMovedEvent struct { + ResourcePoolEvent + + OldParent ResourcePoolEventArgument `xml:"oldParent"` + NewParent ResourcePoolEventArgument `xml:"newParent"` +} + +func init() { + t["ResourcePoolMovedEvent"] = reflect.TypeOf((*ResourcePoolMovedEvent)(nil)).Elem() +} + +type ResourcePoolQuickStats struct { + DynamicData + + OverallCpuUsage int64 `xml:"overallCpuUsage,omitempty"` + OverallCpuDemand int64 `xml:"overallCpuDemand,omitempty"` + GuestMemoryUsage int64 `xml:"guestMemoryUsage,omitempty"` + HostMemoryUsage int64 `xml:"hostMemoryUsage,omitempty"` + DistributedCpuEntitlement int64 `xml:"distributedCpuEntitlement,omitempty"` + DistributedMemoryEntitlement int64 `xml:"distributedMemoryEntitlement,omitempty"` + StaticCpuEntitlement int `xml:"staticCpuEntitlement,omitempty"` + StaticMemoryEntitlement int `xml:"staticMemoryEntitlement,omitempty"` + PrivateMemory int64 `xml:"privateMemory,omitempty"` + SharedMemory int64 `xml:"sharedMemory,omitempty"` + SwappedMemory int64 `xml:"swappedMemory,omitempty"` + BalloonedMemory int64 `xml:"balloonedMemory,omitempty"` + OverheadMemory int64 `xml:"overheadMemory,omitempty"` + ConsumedOverheadMemory int64 `xml:"consumedOverheadMemory,omitempty"` + CompressedMemory int64 `xml:"compressedMemory,omitempty"` +} + +func init() { + t["ResourcePoolQuickStats"] = reflect.TypeOf((*ResourcePoolQuickStats)(nil)).Elem() +} + +type ResourcePoolReconfiguredEvent struct { + ResourcePoolEvent +} + +func init() { + t["ResourcePoolReconfiguredEvent"] = reflect.TypeOf((*ResourcePoolReconfiguredEvent)(nil)).Elem() +} + +type ResourcePoolResourceUsage struct { + DynamicData + + ReservationUsed int64 `xml:"reservationUsed"` + ReservationUsedForVm int64 `xml:"reservationUsedForVm"` + UnreservedForPool int64 `xml:"unreservedForPool"` + UnreservedForVm int64 `xml:"unreservedForVm"` + OverallUsage int64 `xml:"overallUsage"` + MaxUsage int64 `xml:"maxUsage"` +} + +func init() { + t["ResourcePoolResourceUsage"] = reflect.TypeOf((*ResourcePoolResourceUsage)(nil)).Elem() +} + +type ResourcePoolRuntimeInfo struct { + DynamicData + + Memory ResourcePoolResourceUsage `xml:"memory"` + Cpu ResourcePoolResourceUsage `xml:"cpu"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` +} + +func init() { + t["ResourcePoolRuntimeInfo"] = reflect.TypeOf((*ResourcePoolRuntimeInfo)(nil)).Elem() +} + +type ResourcePoolSummary struct { + DynamicData + + Name string `xml:"name"` + Config ResourceConfigSpec `xml:"config"` + Runtime ResourcePoolRuntimeInfo `xml:"runtime"` + QuickStats *ResourcePoolQuickStats `xml:"quickStats,omitempty"` + ConfiguredMemoryMB int `xml:"configuredMemoryMB,omitempty"` +} + +func init() { + t["ResourcePoolSummary"] = reflect.TypeOf((*ResourcePoolSummary)(nil)).Elem() +} + +type ResourceViolatedEvent struct { + ResourcePoolEvent +} + +func init() { + t["ResourceViolatedEvent"] = reflect.TypeOf((*ResourceViolatedEvent)(nil)).Elem() +} + +type RestartService RestartServiceRequestType + +func init() { + t["RestartService"] = reflect.TypeOf((*RestartService)(nil)).Elem() +} + +type RestartServiceConsoleVirtualNic RestartServiceConsoleVirtualNicRequestType + +func init() { + t["RestartServiceConsoleVirtualNic"] = reflect.TypeOf((*RestartServiceConsoleVirtualNic)(nil)).Elem() +} + +type RestartServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["RestartServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*RestartServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type RestartServiceConsoleVirtualNicResponse struct { +} + +type RestartServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["RestartServiceRequestType"] = reflect.TypeOf((*RestartServiceRequestType)(nil)).Elem() +} + +type RestartServiceResponse struct { +} + +type RestoreFirmwareConfiguration RestoreFirmwareConfigurationRequestType + +func init() { + t["RestoreFirmwareConfiguration"] = reflect.TypeOf((*RestoreFirmwareConfiguration)(nil)).Elem() +} + +type RestoreFirmwareConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["RestoreFirmwareConfigurationRequestType"] = reflect.TypeOf((*RestoreFirmwareConfigurationRequestType)(nil)).Elem() +} + +type RestoreFirmwareConfigurationResponse struct { +} + +type RestrictedByAdministrator struct { + RuntimeFault + + Details string `xml:"details"` +} + +func init() { + t["RestrictedByAdministrator"] = reflect.TypeOf((*RestrictedByAdministrator)(nil)).Elem() +} + +type RestrictedByAdministratorFault RestrictedByAdministrator + +func init() { + t["RestrictedByAdministratorFault"] = reflect.TypeOf((*RestrictedByAdministratorFault)(nil)).Elem() +} + +type RestrictedVersion struct { + SecurityError +} + +func init() { + t["RestrictedVersion"] = reflect.TypeOf((*RestrictedVersion)(nil)).Elem() +} + +type RestrictedVersionFault RestrictedVersion + +func init() { + t["RestrictedVersionFault"] = reflect.TypeOf((*RestrictedVersionFault)(nil)).Elem() +} + +type RetrieveAllPermissions RetrieveAllPermissionsRequestType + +func init() { + t["RetrieveAllPermissions"] = reflect.TypeOf((*RetrieveAllPermissions)(nil)).Elem() +} + +type RetrieveAllPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveAllPermissionsRequestType"] = reflect.TypeOf((*RetrieveAllPermissionsRequestType)(nil)).Elem() +} + +type RetrieveAllPermissionsResponse struct { + Returnval []Permission `xml:"returnval,omitempty"` +} + +type RetrieveAnswerFile RetrieveAnswerFileRequestType + +func init() { + t["RetrieveAnswerFile"] = reflect.TypeOf((*RetrieveAnswerFile)(nil)).Elem() +} + +type RetrieveAnswerFileForProfile RetrieveAnswerFileForProfileRequestType + +func init() { + t["RetrieveAnswerFileForProfile"] = reflect.TypeOf((*RetrieveAnswerFileForProfile)(nil)).Elem() +} + +type RetrieveAnswerFileForProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ApplyProfile HostApplyProfile `xml:"applyProfile"` +} + +func init() { + t["RetrieveAnswerFileForProfileRequestType"] = reflect.TypeOf((*RetrieveAnswerFileForProfileRequestType)(nil)).Elem() +} + +type RetrieveAnswerFileForProfileResponse struct { + Returnval *AnswerFile `xml:"returnval,omitempty"` +} + +type RetrieveAnswerFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["RetrieveAnswerFileRequestType"] = reflect.TypeOf((*RetrieveAnswerFileRequestType)(nil)).Elem() +} + +type RetrieveAnswerFileResponse struct { + Returnval *AnswerFile `xml:"returnval,omitempty"` +} + +type RetrieveArgumentDescription RetrieveArgumentDescriptionRequestType + +func init() { + t["RetrieveArgumentDescription"] = reflect.TypeOf((*RetrieveArgumentDescription)(nil)).Elem() +} + +type RetrieveArgumentDescriptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + EventTypeId string `xml:"eventTypeId"` +} + +func init() { + t["RetrieveArgumentDescriptionRequestType"] = reflect.TypeOf((*RetrieveArgumentDescriptionRequestType)(nil)).Elem() +} + +type RetrieveArgumentDescriptionResponse struct { + Returnval []EventArgDesc `xml:"returnval,omitempty"` +} + +type RetrieveDasAdvancedRuntimeInfo RetrieveDasAdvancedRuntimeInfoRequestType + +func init() { + t["RetrieveDasAdvancedRuntimeInfo"] = reflect.TypeOf((*RetrieveDasAdvancedRuntimeInfo)(nil)).Elem() +} + +type RetrieveDasAdvancedRuntimeInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveDasAdvancedRuntimeInfoRequestType"] = reflect.TypeOf((*RetrieveDasAdvancedRuntimeInfoRequestType)(nil)).Elem() +} + +type RetrieveDasAdvancedRuntimeInfoResponse struct { + Returnval BaseClusterDasAdvancedRuntimeInfo `xml:"returnval,omitempty,typeattr"` +} + +type RetrieveDescription RetrieveDescriptionRequestType + +func init() { + t["RetrieveDescription"] = reflect.TypeOf((*RetrieveDescription)(nil)).Elem() +} + +type RetrieveDescriptionRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveDescriptionRequestType"] = reflect.TypeOf((*RetrieveDescriptionRequestType)(nil)).Elem() +} + +type RetrieveDescriptionResponse struct { + Returnval *ProfileDescription `xml:"returnval,omitempty"` +} + +type RetrieveDiskPartitionInfo RetrieveDiskPartitionInfoRequestType + +func init() { + t["RetrieveDiskPartitionInfo"] = reflect.TypeOf((*RetrieveDiskPartitionInfo)(nil)).Elem() +} + +type RetrieveDiskPartitionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath []string `xml:"devicePath"` +} + +func init() { + t["RetrieveDiskPartitionInfoRequestType"] = reflect.TypeOf((*RetrieveDiskPartitionInfoRequestType)(nil)).Elem() +} + +type RetrieveDiskPartitionInfoResponse struct { + Returnval []HostDiskPartitionInfo `xml:"returnval,omitempty"` +} + +type RetrieveEntityPermissions RetrieveEntityPermissionsRequestType + +func init() { + t["RetrieveEntityPermissions"] = reflect.TypeOf((*RetrieveEntityPermissions)(nil)).Elem() +} + +type RetrieveEntityPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Inherited bool `xml:"inherited"` +} + +func init() { + t["RetrieveEntityPermissionsRequestType"] = reflect.TypeOf((*RetrieveEntityPermissionsRequestType)(nil)).Elem() +} + +type RetrieveEntityPermissionsResponse struct { + Returnval []Permission `xml:"returnval,omitempty"` +} + +type RetrieveEntityScheduledTask RetrieveEntityScheduledTaskRequestType + +func init() { + t["RetrieveEntityScheduledTask"] = reflect.TypeOf((*RetrieveEntityScheduledTask)(nil)).Elem() +} + +type RetrieveEntityScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["RetrieveEntityScheduledTaskRequestType"] = reflect.TypeOf((*RetrieveEntityScheduledTaskRequestType)(nil)).Elem() +} + +type RetrieveEntityScheduledTaskResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type RetrieveHardwareUptime RetrieveHardwareUptimeRequestType + +func init() { + t["RetrieveHardwareUptime"] = reflect.TypeOf((*RetrieveHardwareUptime)(nil)).Elem() +} + +type RetrieveHardwareUptimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveHardwareUptimeRequestType"] = reflect.TypeOf((*RetrieveHardwareUptimeRequestType)(nil)).Elem() +} + +type RetrieveHardwareUptimeResponse struct { + Returnval int64 `xml:"returnval"` +} + +type RetrieveHostAccessControlEntries RetrieveHostAccessControlEntriesRequestType + +func init() { + t["RetrieveHostAccessControlEntries"] = reflect.TypeOf((*RetrieveHostAccessControlEntries)(nil)).Elem() +} + +type RetrieveHostAccessControlEntriesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveHostAccessControlEntriesRequestType"] = reflect.TypeOf((*RetrieveHostAccessControlEntriesRequestType)(nil)).Elem() +} + +type RetrieveHostAccessControlEntriesResponse struct { + Returnval []HostAccessControlEntry `xml:"returnval,omitempty"` +} + +type RetrieveObjectScheduledTask RetrieveObjectScheduledTaskRequestType + +func init() { + t["RetrieveObjectScheduledTask"] = reflect.TypeOf((*RetrieveObjectScheduledTask)(nil)).Elem() +} + +type RetrieveObjectScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj *ManagedObjectReference `xml:"obj,omitempty"` +} + +func init() { + t["RetrieveObjectScheduledTaskRequestType"] = reflect.TypeOf((*RetrieveObjectScheduledTaskRequestType)(nil)).Elem() +} + +type RetrieveObjectScheduledTaskResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type RetrieveOptions struct { + DynamicData + + MaxObjects int `xml:"maxObjects,omitempty"` +} + +func init() { + t["RetrieveOptions"] = reflect.TypeOf((*RetrieveOptions)(nil)).Elem() +} + +type RetrieveProductComponents RetrieveProductComponentsRequestType + +func init() { + t["RetrieveProductComponents"] = reflect.TypeOf((*RetrieveProductComponents)(nil)).Elem() +} + +type RetrieveProductComponentsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveProductComponentsRequestType"] = reflect.TypeOf((*RetrieveProductComponentsRequestType)(nil)).Elem() +} + +type RetrieveProductComponentsResponse struct { + Returnval []ProductComponentInfo `xml:"returnval,omitempty"` +} + +type RetrieveProperties RetrievePropertiesRequestType + +func init() { + t["RetrieveProperties"] = reflect.TypeOf((*RetrieveProperties)(nil)).Elem() +} + +type RetrievePropertiesEx RetrievePropertiesExRequestType + +func init() { + t["RetrievePropertiesEx"] = reflect.TypeOf((*RetrievePropertiesEx)(nil)).Elem() +} + +type RetrievePropertiesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + SpecSet []PropertyFilterSpec `xml:"specSet"` + Options RetrieveOptions `xml:"options"` +} + +func init() { + t["RetrievePropertiesExRequestType"] = reflect.TypeOf((*RetrievePropertiesExRequestType)(nil)).Elem() +} + +type RetrievePropertiesExResponse struct { + Returnval *RetrieveResult `xml:"returnval,omitempty"` +} + +type RetrievePropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + SpecSet []PropertyFilterSpec `xml:"specSet"` +} + +func init() { + t["RetrievePropertiesRequestType"] = reflect.TypeOf((*RetrievePropertiesRequestType)(nil)).Elem() +} + +type RetrievePropertiesResponse struct { + Returnval []ObjectContent `xml:"returnval,omitempty"` +} + +type RetrieveResult struct { + DynamicData + + Token string `xml:"token,omitempty"` + Objects []ObjectContent `xml:"objects"` +} + +func init() { + t["RetrieveResult"] = reflect.TypeOf((*RetrieveResult)(nil)).Elem() +} + +type RetrieveRolePermissions RetrieveRolePermissionsRequestType + +func init() { + t["RetrieveRolePermissions"] = reflect.TypeOf((*RetrieveRolePermissions)(nil)).Elem() +} + +type RetrieveRolePermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + RoleId int `xml:"roleId"` +} + +func init() { + t["RetrieveRolePermissionsRequestType"] = reflect.TypeOf((*RetrieveRolePermissionsRequestType)(nil)).Elem() +} + +type RetrieveRolePermissionsResponse struct { + Returnval []Permission `xml:"returnval,omitempty"` +} + +type RetrieveServiceContent RetrieveServiceContentRequestType + +func init() { + t["RetrieveServiceContent"] = reflect.TypeOf((*RetrieveServiceContent)(nil)).Elem() +} + +type RetrieveServiceContentRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveServiceContentRequestType"] = reflect.TypeOf((*RetrieveServiceContentRequestType)(nil)).Elem() +} + +type RetrieveServiceContentResponse struct { + Returnval ServiceContent `xml:"returnval"` +} + +type RetrieveUserGroups RetrieveUserGroupsRequestType + +func init() { + t["RetrieveUserGroups"] = reflect.TypeOf((*RetrieveUserGroups)(nil)).Elem() +} + +type RetrieveUserGroupsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Domain string `xml:"domain,omitempty"` + SearchStr string `xml:"searchStr"` + BelongsToGroup string `xml:"belongsToGroup,omitempty"` + BelongsToUser string `xml:"belongsToUser,omitempty"` + ExactMatch bool `xml:"exactMatch"` + FindUsers bool `xml:"findUsers"` + FindGroups bool `xml:"findGroups"` +} + +func init() { + t["RetrieveUserGroupsRequestType"] = reflect.TypeOf((*RetrieveUserGroupsRequestType)(nil)).Elem() +} + +type RetrieveUserGroupsResponse struct { + Returnval []BaseUserSearchResult `xml:"returnval,omitempty,typeattr"` +} + +type RevertToCurrentSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + SuppressPowerOn *bool `xml:"suppressPowerOn"` +} + +func init() { + t["RevertToCurrentSnapshotRequestType"] = reflect.TypeOf((*RevertToCurrentSnapshotRequestType)(nil)).Elem() +} + +type RevertToCurrentSnapshot_Task RevertToCurrentSnapshotRequestType + +func init() { + t["RevertToCurrentSnapshot_Task"] = reflect.TypeOf((*RevertToCurrentSnapshot_Task)(nil)).Elem() +} + +type RevertToCurrentSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RevertToSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + SuppressPowerOn *bool `xml:"suppressPowerOn"` +} + +func init() { + t["RevertToSnapshotRequestType"] = reflect.TypeOf((*RevertToSnapshotRequestType)(nil)).Elem() +} + +type RevertToSnapshot_Task RevertToSnapshotRequestType + +func init() { + t["RevertToSnapshot_Task"] = reflect.TypeOf((*RevertToSnapshot_Task)(nil)).Elem() +} + +type RevertToSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RewindCollector RewindCollectorRequestType + +func init() { + t["RewindCollector"] = reflect.TypeOf((*RewindCollector)(nil)).Elem() +} + +type RewindCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RewindCollectorRequestType"] = reflect.TypeOf((*RewindCollectorRequestType)(nil)).Elem() +} + +type RewindCollectorResponse struct { +} + +type RoleAddedEvent struct { + RoleEvent + + PrivilegeList []string `xml:"privilegeList,omitempty"` +} + +func init() { + t["RoleAddedEvent"] = reflect.TypeOf((*RoleAddedEvent)(nil)).Elem() +} + +type RoleEvent struct { + AuthorizationEvent + + Role RoleEventArgument `xml:"role"` +} + +func init() { + t["RoleEvent"] = reflect.TypeOf((*RoleEvent)(nil)).Elem() +} + +type RoleEventArgument struct { + EventArgument + + RoleId int `xml:"roleId"` + Name string `xml:"name"` +} + +func init() { + t["RoleEventArgument"] = reflect.TypeOf((*RoleEventArgument)(nil)).Elem() +} + +type RoleRemovedEvent struct { + RoleEvent +} + +func init() { + t["RoleRemovedEvent"] = reflect.TypeOf((*RoleRemovedEvent)(nil)).Elem() +} + +type RoleUpdatedEvent struct { + RoleEvent + + PrivilegeList []string `xml:"privilegeList,omitempty"` +} + +func init() { + t["RoleUpdatedEvent"] = reflect.TypeOf((*RoleUpdatedEvent)(nil)).Elem() +} + +type RollbackEvent struct { + DvsEvent + + HostName string `xml:"hostName"` + MethodName string `xml:"methodName,omitempty"` +} + +func init() { + t["RollbackEvent"] = reflect.TypeOf((*RollbackEvent)(nil)).Elem() +} + +type RollbackFailure struct { + DvsFault + + EntityName string `xml:"entityName"` + EntityType string `xml:"entityType"` +} + +func init() { + t["RollbackFailure"] = reflect.TypeOf((*RollbackFailure)(nil)).Elem() +} + +type RollbackFailureFault RollbackFailure + +func init() { + t["RollbackFailureFault"] = reflect.TypeOf((*RollbackFailureFault)(nil)).Elem() +} + +type RuleViolation struct { + VmConfigFault + + Host *ManagedObjectReference `xml:"host,omitempty"` + Rule BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` +} + +func init() { + t["RuleViolation"] = reflect.TypeOf((*RuleViolation)(nil)).Elem() +} + +type RuleViolationFault RuleViolation + +func init() { + t["RuleViolationFault"] = reflect.TypeOf((*RuleViolationFault)(nil)).Elem() +} + +type RunScheduledTask RunScheduledTaskRequestType + +func init() { + t["RunScheduledTask"] = reflect.TypeOf((*RunScheduledTask)(nil)).Elem() +} + +type RunScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RunScheduledTaskRequestType"] = reflect.TypeOf((*RunScheduledTaskRequestType)(nil)).Elem() +} + +type RunScheduledTaskResponse struct { +} + +type RunScriptAction struct { + Action + + Script string `xml:"script"` +} + +func init() { + t["RunScriptAction"] = reflect.TypeOf((*RunScriptAction)(nil)).Elem() +} + +type RunVsanPhysicalDiskDiagnostics RunVsanPhysicalDiskDiagnosticsRequestType + +func init() { + t["RunVsanPhysicalDiskDiagnostics"] = reflect.TypeOf((*RunVsanPhysicalDiskDiagnostics)(nil)).Elem() +} + +type RunVsanPhysicalDiskDiagnosticsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disks []string `xml:"disks,omitempty"` +} + +func init() { + t["RunVsanPhysicalDiskDiagnosticsRequestType"] = reflect.TypeOf((*RunVsanPhysicalDiskDiagnosticsRequestType)(nil)).Elem() +} + +type RunVsanPhysicalDiskDiagnosticsResponse struct { + Returnval []HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult `xml:"returnval"` +} + +type RuntimeFault struct { + MethodFault +} + +func init() { + t["RuntimeFault"] = reflect.TypeOf((*RuntimeFault)(nil)).Elem() +} + +type RuntimeFaultFault BaseRuntimeFault + +func init() { + t["RuntimeFaultFault"] = reflect.TypeOf((*RuntimeFaultFault)(nil)).Elem() +} + +type SAMLTokenAuthentication struct { + GuestAuthentication + + Token string `xml:"token"` + Username string `xml:"username,omitempty"` +} + +func init() { + t["SAMLTokenAuthentication"] = reflect.TypeOf((*SAMLTokenAuthentication)(nil)).Elem() +} + +type SSLDisabledFault struct { + HostConnectFault +} + +func init() { + t["SSLDisabledFault"] = reflect.TypeOf((*SSLDisabledFault)(nil)).Elem() +} + +type SSLDisabledFaultFault SSLDisabledFault + +func init() { + t["SSLDisabledFaultFault"] = reflect.TypeOf((*SSLDisabledFaultFault)(nil)).Elem() +} + +type SSLVerifyFault struct { + HostConnectFault + + SelfSigned bool `xml:"selfSigned"` + Thumbprint string `xml:"thumbprint"` +} + +func init() { + t["SSLVerifyFault"] = reflect.TypeOf((*SSLVerifyFault)(nil)).Elem() +} + +type SSLVerifyFaultFault SSLVerifyFault + +func init() { + t["SSLVerifyFaultFault"] = reflect.TypeOf((*SSLVerifyFaultFault)(nil)).Elem() +} + +type SSPIAuthentication struct { + GuestAuthentication + + SspiToken string `xml:"sspiToken"` +} + +func init() { + t["SSPIAuthentication"] = reflect.TypeOf((*SSPIAuthentication)(nil)).Elem() +} + +type SSPIChallenge struct { + VimFault + + Base64Token string `xml:"base64Token"` +} + +func init() { + t["SSPIChallenge"] = reflect.TypeOf((*SSPIChallenge)(nil)).Elem() +} + +type SSPIChallengeFault SSPIChallenge + +func init() { + t["SSPIChallengeFault"] = reflect.TypeOf((*SSPIChallengeFault)(nil)).Elem() +} + +type ScanHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + Repository HostPatchManagerLocator `xml:"repository"` + UpdateID []string `xml:"updateID,omitempty"` +} + +func init() { + t["ScanHostPatchRequestType"] = reflect.TypeOf((*ScanHostPatchRequestType)(nil)).Elem() +} + +type ScanHostPatchV2RequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["ScanHostPatchV2RequestType"] = reflect.TypeOf((*ScanHostPatchV2RequestType)(nil)).Elem() +} + +type ScanHostPatchV2_Task ScanHostPatchV2RequestType + +func init() { + t["ScanHostPatchV2_Task"] = reflect.TypeOf((*ScanHostPatchV2_Task)(nil)).Elem() +} + +type ScanHostPatchV2_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ScanHostPatch_Task ScanHostPatchRequestType + +func init() { + t["ScanHostPatch_Task"] = reflect.TypeOf((*ScanHostPatch_Task)(nil)).Elem() +} + +type ScanHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ScheduledHardwareUpgradeInfo struct { + DynamicData + + UpgradePolicy string `xml:"upgradePolicy,omitempty"` + VersionKey string `xml:"versionKey,omitempty"` + ScheduledHardwareUpgradeStatus string `xml:"scheduledHardwareUpgradeStatus,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["ScheduledHardwareUpgradeInfo"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfo)(nil)).Elem() +} + +type ScheduledTaskCompletedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskCompletedEvent"] = reflect.TypeOf((*ScheduledTaskCompletedEvent)(nil)).Elem() +} + +type ScheduledTaskCreatedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskCreatedEvent"] = reflect.TypeOf((*ScheduledTaskCreatedEvent)(nil)).Elem() +} + +type ScheduledTaskDescription struct { + DynamicData + + Action []BaseTypeDescription `xml:"action,typeattr"` + SchedulerInfo []ScheduledTaskDetail `xml:"schedulerInfo"` + State []BaseElementDescription `xml:"state,typeattr"` + DayOfWeek []BaseElementDescription `xml:"dayOfWeek,typeattr"` + WeekOfMonth []BaseElementDescription `xml:"weekOfMonth,typeattr"` +} + +func init() { + t["ScheduledTaskDescription"] = reflect.TypeOf((*ScheduledTaskDescription)(nil)).Elem() +} + +type ScheduledTaskDetail struct { + TypeDescription + + Frequency string `xml:"frequency"` +} + +func init() { + t["ScheduledTaskDetail"] = reflect.TypeOf((*ScheduledTaskDetail)(nil)).Elem() +} + +type ScheduledTaskEmailCompletedEvent struct { + ScheduledTaskEvent + + To string `xml:"to"` +} + +func init() { + t["ScheduledTaskEmailCompletedEvent"] = reflect.TypeOf((*ScheduledTaskEmailCompletedEvent)(nil)).Elem() +} + +type ScheduledTaskEmailFailedEvent struct { + ScheduledTaskEvent + + To string `xml:"to"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["ScheduledTaskEmailFailedEvent"] = reflect.TypeOf((*ScheduledTaskEmailFailedEvent)(nil)).Elem() +} + +type ScheduledTaskEvent struct { + Event + + ScheduledTask ScheduledTaskEventArgument `xml:"scheduledTask"` + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["ScheduledTaskEvent"] = reflect.TypeOf((*ScheduledTaskEvent)(nil)).Elem() +} + +type ScheduledTaskEventArgument struct { + EntityEventArgument + + ScheduledTask ManagedObjectReference `xml:"scheduledTask"` +} + +func init() { + t["ScheduledTaskEventArgument"] = reflect.TypeOf((*ScheduledTaskEventArgument)(nil)).Elem() +} + +type ScheduledTaskFailedEvent struct { + ScheduledTaskEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["ScheduledTaskFailedEvent"] = reflect.TypeOf((*ScheduledTaskFailedEvent)(nil)).Elem() +} + +type ScheduledTaskInfo struct { + ScheduledTaskSpec + + ScheduledTask ManagedObjectReference `xml:"scheduledTask"` + Entity ManagedObjectReference `xml:"entity"` + LastModifiedTime time.Time `xml:"lastModifiedTime"` + LastModifiedUser string `xml:"lastModifiedUser"` + NextRunTime *time.Time `xml:"nextRunTime"` + PrevRunTime *time.Time `xml:"prevRunTime"` + State TaskInfoState `xml:"state"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + Result AnyType `xml:"result,omitempty,typeattr"` + Progress int `xml:"progress,omitempty"` + ActiveTask *ManagedObjectReference `xml:"activeTask,omitempty"` + TaskObject *ManagedObjectReference `xml:"taskObject,omitempty"` +} + +func init() { + t["ScheduledTaskInfo"] = reflect.TypeOf((*ScheduledTaskInfo)(nil)).Elem() +} + +type ScheduledTaskReconfiguredEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskReconfiguredEvent"] = reflect.TypeOf((*ScheduledTaskReconfiguredEvent)(nil)).Elem() +} + +type ScheduledTaskRemovedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskRemovedEvent"] = reflect.TypeOf((*ScheduledTaskRemovedEvent)(nil)).Elem() +} + +type ScheduledTaskSpec struct { + DynamicData + + Name string `xml:"name"` + Description string `xml:"description"` + Enabled bool `xml:"enabled"` + Scheduler BaseTaskScheduler `xml:"scheduler,typeattr"` + Action BaseAction `xml:"action,typeattr"` + Notification string `xml:"notification,omitempty"` +} + +func init() { + t["ScheduledTaskSpec"] = reflect.TypeOf((*ScheduledTaskSpec)(nil)).Elem() +} + +type ScheduledTaskStartedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskStartedEvent"] = reflect.TypeOf((*ScheduledTaskStartedEvent)(nil)).Elem() +} + +type ScsiLun struct { + HostDevice + + Key string `xml:"key,omitempty"` + Uuid string `xml:"uuid"` + Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty"` + CanonicalName string `xml:"canonicalName,omitempty"` + DisplayName string `xml:"displayName,omitempty"` + LunType string `xml:"lunType"` + Vendor string `xml:"vendor,omitempty"` + Model string `xml:"model,omitempty"` + Revision string `xml:"revision,omitempty"` + ScsiLevel int `xml:"scsiLevel,omitempty"` + SerialNumber string `xml:"serialNumber,omitempty"` + DurableName *ScsiLunDurableName `xml:"durableName,omitempty"` + AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty"` + StandardInquiry []byte `xml:"standardInquiry,omitempty"` + QueueDepth int `xml:"queueDepth,omitempty"` + OperationalState []string `xml:"operationalState"` + Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` + ProtocolEndpoint *bool `xml:"protocolEndpoint"` +} + +func init() { + t["ScsiLun"] = reflect.TypeOf((*ScsiLun)(nil)).Elem() +} + +type ScsiLunCapabilities struct { + DynamicData + + UpdateDisplayNameSupported bool `xml:"updateDisplayNameSupported"` +} + +func init() { + t["ScsiLunCapabilities"] = reflect.TypeOf((*ScsiLunCapabilities)(nil)).Elem() +} + +type ScsiLunDescriptor struct { + DynamicData + + Quality string `xml:"quality"` + Id string `xml:"id"` +} + +func init() { + t["ScsiLunDescriptor"] = reflect.TypeOf((*ScsiLunDescriptor)(nil)).Elem() +} + +type ScsiLunDurableName struct { + DynamicData + + Namespace string `xml:"namespace"` + NamespaceId byte `xml:"namespaceId"` + Data []byte `xml:"data,omitempty"` +} + +func init() { + t["ScsiLunDurableName"] = reflect.TypeOf((*ScsiLunDurableName)(nil)).Elem() +} + +type SeSparseVirtualDiskSpec struct { + FileBackedVirtualDiskSpec + + GrainSizeKb int `xml:"grainSizeKb,omitempty"` +} + +func init() { + t["SeSparseVirtualDiskSpec"] = reflect.TypeOf((*SeSparseVirtualDiskSpec)(nil)).Elem() +} + +type SearchDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + DatastorePath string `xml:"datastorePath"` + SearchSpec *HostDatastoreBrowserSearchSpec `xml:"searchSpec,omitempty"` +} + +func init() { + t["SearchDatastoreRequestType"] = reflect.TypeOf((*SearchDatastoreRequestType)(nil)).Elem() +} + +type SearchDatastoreSubFoldersRequestType struct { + This ManagedObjectReference `xml:"_this"` + DatastorePath string `xml:"datastorePath"` + SearchSpec *HostDatastoreBrowserSearchSpec `xml:"searchSpec,omitempty"` +} + +func init() { + t["SearchDatastoreSubFoldersRequestType"] = reflect.TypeOf((*SearchDatastoreSubFoldersRequestType)(nil)).Elem() +} + +type SearchDatastoreSubFolders_Task SearchDatastoreSubFoldersRequestType + +func init() { + t["SearchDatastoreSubFolders_Task"] = reflect.TypeOf((*SearchDatastoreSubFolders_Task)(nil)).Elem() +} + +type SearchDatastoreSubFolders_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SearchDatastore_Task SearchDatastoreRequestType + +func init() { + t["SearchDatastore_Task"] = reflect.TypeOf((*SearchDatastore_Task)(nil)).Elem() +} + +type SearchDatastore_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SecondaryVmAlreadyDisabled struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["SecondaryVmAlreadyDisabled"] = reflect.TypeOf((*SecondaryVmAlreadyDisabled)(nil)).Elem() +} + +type SecondaryVmAlreadyDisabledFault SecondaryVmAlreadyDisabled + +func init() { + t["SecondaryVmAlreadyDisabledFault"] = reflect.TypeOf((*SecondaryVmAlreadyDisabledFault)(nil)).Elem() +} + +type SecondaryVmAlreadyEnabled struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["SecondaryVmAlreadyEnabled"] = reflect.TypeOf((*SecondaryVmAlreadyEnabled)(nil)).Elem() +} + +type SecondaryVmAlreadyEnabledFault SecondaryVmAlreadyEnabled + +func init() { + t["SecondaryVmAlreadyEnabledFault"] = reflect.TypeOf((*SecondaryVmAlreadyEnabledFault)(nil)).Elem() +} + +type SecondaryVmAlreadyRegistered struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["SecondaryVmAlreadyRegistered"] = reflect.TypeOf((*SecondaryVmAlreadyRegistered)(nil)).Elem() +} + +type SecondaryVmAlreadyRegisteredFault SecondaryVmAlreadyRegistered + +func init() { + t["SecondaryVmAlreadyRegisteredFault"] = reflect.TypeOf((*SecondaryVmAlreadyRegisteredFault)(nil)).Elem() +} + +type SecondaryVmNotRegistered struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["SecondaryVmNotRegistered"] = reflect.TypeOf((*SecondaryVmNotRegistered)(nil)).Elem() +} + +type SecondaryVmNotRegisteredFault SecondaryVmNotRegistered + +func init() { + t["SecondaryVmNotRegisteredFault"] = reflect.TypeOf((*SecondaryVmNotRegisteredFault)(nil)).Elem() +} + +type SecurityError struct { + RuntimeFault +} + +func init() { + t["SecurityError"] = reflect.TypeOf((*SecurityError)(nil)).Elem() +} + +type SecurityErrorFault BaseSecurityError + +func init() { + t["SecurityErrorFault"] = reflect.TypeOf((*SecurityErrorFault)(nil)).Elem() +} + +type SecurityProfile struct { + ApplyProfile + + Permission []PermissionProfile `xml:"permission,omitempty"` +} + +func init() { + t["SecurityProfile"] = reflect.TypeOf((*SecurityProfile)(nil)).Elem() +} + +type SelectActivePartition SelectActivePartitionRequestType + +func init() { + t["SelectActivePartition"] = reflect.TypeOf((*SelectActivePartition)(nil)).Elem() +} + +type SelectActivePartitionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Partition *HostScsiDiskPartition `xml:"partition,omitempty"` +} + +func init() { + t["SelectActivePartitionRequestType"] = reflect.TypeOf((*SelectActivePartitionRequestType)(nil)).Elem() +} + +type SelectActivePartitionResponse struct { +} + +type SelectVnic SelectVnicRequestType + +func init() { + t["SelectVnic"] = reflect.TypeOf((*SelectVnic)(nil)).Elem() +} + +type SelectVnicForNicType SelectVnicForNicTypeRequestType + +func init() { + t["SelectVnicForNicType"] = reflect.TypeOf((*SelectVnicForNicType)(nil)).Elem() +} + +type SelectVnicForNicTypeRequestType struct { + This ManagedObjectReference `xml:"_this"` + NicType string `xml:"nicType"` + Device string `xml:"device"` +} + +func init() { + t["SelectVnicForNicTypeRequestType"] = reflect.TypeOf((*SelectVnicForNicTypeRequestType)(nil)).Elem() +} + +type SelectVnicForNicTypeResponse struct { +} + +type SelectVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["SelectVnicRequestType"] = reflect.TypeOf((*SelectVnicRequestType)(nil)).Elem() +} + +type SelectVnicResponse struct { +} + +type SelectionSet struct { + DynamicData +} + +func init() { + t["SelectionSet"] = reflect.TypeOf((*SelectionSet)(nil)).Elem() +} + +type SelectionSpec struct { + DynamicData + + Name string `xml:"name,omitempty"` +} + +func init() { + t["SelectionSpec"] = reflect.TypeOf((*SelectionSpec)(nil)).Elem() +} + +type SendEmailAction struct { + Action + + ToList string `xml:"toList"` + CcList string `xml:"ccList"` + Subject string `xml:"subject"` + Body string `xml:"body"` +} + +func init() { + t["SendEmailAction"] = reflect.TypeOf((*SendEmailAction)(nil)).Elem() +} + +type SendNMI SendNMIRequestType + +func init() { + t["SendNMI"] = reflect.TypeOf((*SendNMI)(nil)).Elem() +} + +type SendNMIRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SendNMIRequestType"] = reflect.TypeOf((*SendNMIRequestType)(nil)).Elem() +} + +type SendNMIResponse struct { +} + +type SendSNMPAction struct { + Action +} + +func init() { + t["SendSNMPAction"] = reflect.TypeOf((*SendSNMPAction)(nil)).Elem() +} + +type SendTestNotification SendTestNotificationRequestType + +func init() { + t["SendTestNotification"] = reflect.TypeOf((*SendTestNotification)(nil)).Elem() +} + +type SendTestNotificationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SendTestNotificationRequestType"] = reflect.TypeOf((*SendTestNotificationRequestType)(nil)).Elem() +} + +type SendTestNotificationResponse struct { +} + +type ServerLicenseExpiredEvent struct { + LicenseEvent + + Product string `xml:"product"` +} + +func init() { + t["ServerLicenseExpiredEvent"] = reflect.TypeOf((*ServerLicenseExpiredEvent)(nil)).Elem() +} + +type ServerStartedSessionEvent struct { + SessionEvent +} + +func init() { + t["ServerStartedSessionEvent"] = reflect.TypeOf((*ServerStartedSessionEvent)(nil)).Elem() +} + +type ServiceConsolePortGroupProfile struct { + PortGroupProfile + + IpConfig IpAddressProfile `xml:"ipConfig"` +} + +func init() { + t["ServiceConsolePortGroupProfile"] = reflect.TypeOf((*ServiceConsolePortGroupProfile)(nil)).Elem() +} + +type ServiceConsoleReservationInfo struct { + DynamicData + + ServiceConsoleReservedCfg int64 `xml:"serviceConsoleReservedCfg"` + ServiceConsoleReserved int64 `xml:"serviceConsoleReserved"` + Unreserved int64 `xml:"unreserved"` +} + +func init() { + t["ServiceConsoleReservationInfo"] = reflect.TypeOf((*ServiceConsoleReservationInfo)(nil)).Elem() +} + +type ServiceContent struct { + DynamicData + + RootFolder ManagedObjectReference `xml:"rootFolder"` + PropertyCollector ManagedObjectReference `xml:"propertyCollector"` + ViewManager *ManagedObjectReference `xml:"viewManager,omitempty"` + About AboutInfo `xml:"about"` + Setting *ManagedObjectReference `xml:"setting,omitempty"` + UserDirectory *ManagedObjectReference `xml:"userDirectory,omitempty"` + SessionManager *ManagedObjectReference `xml:"sessionManager,omitempty"` + AuthorizationManager *ManagedObjectReference `xml:"authorizationManager,omitempty"` + ServiceManager *ManagedObjectReference `xml:"serviceManager,omitempty"` + PerfManager *ManagedObjectReference `xml:"perfManager,omitempty"` + ScheduledTaskManager *ManagedObjectReference `xml:"scheduledTaskManager,omitempty"` + AlarmManager *ManagedObjectReference `xml:"alarmManager,omitempty"` + EventManager *ManagedObjectReference `xml:"eventManager,omitempty"` + TaskManager *ManagedObjectReference `xml:"taskManager,omitempty"` + ExtensionManager *ManagedObjectReference `xml:"extensionManager,omitempty"` + CustomizationSpecManager *ManagedObjectReference `xml:"customizationSpecManager,omitempty"` + CustomFieldsManager *ManagedObjectReference `xml:"customFieldsManager,omitempty"` + AccountManager *ManagedObjectReference `xml:"accountManager,omitempty"` + DiagnosticManager *ManagedObjectReference `xml:"diagnosticManager,omitempty"` + LicenseManager *ManagedObjectReference `xml:"licenseManager,omitempty"` + SearchIndex *ManagedObjectReference `xml:"searchIndex,omitempty"` + FileManager *ManagedObjectReference `xml:"fileManager,omitempty"` + DatastoreNamespaceManager *ManagedObjectReference `xml:"datastoreNamespaceManager,omitempty"` + VirtualDiskManager *ManagedObjectReference `xml:"virtualDiskManager,omitempty"` + VirtualizationManager *ManagedObjectReference `xml:"virtualizationManager,omitempty"` + SnmpSystem *ManagedObjectReference `xml:"snmpSystem,omitempty"` + VmProvisioningChecker *ManagedObjectReference `xml:"vmProvisioningChecker,omitempty"` + VmCompatibilityChecker *ManagedObjectReference `xml:"vmCompatibilityChecker,omitempty"` + OvfManager *ManagedObjectReference `xml:"ovfManager,omitempty"` + IpPoolManager *ManagedObjectReference `xml:"ipPoolManager,omitempty"` + DvSwitchManager *ManagedObjectReference `xml:"dvSwitchManager,omitempty"` + HostProfileManager *ManagedObjectReference `xml:"hostProfileManager,omitempty"` + ClusterProfileManager *ManagedObjectReference `xml:"clusterProfileManager,omitempty"` + ComplianceManager *ManagedObjectReference `xml:"complianceManager,omitempty"` + LocalizationManager *ManagedObjectReference `xml:"localizationManager,omitempty"` + StorageResourceManager *ManagedObjectReference `xml:"storageResourceManager,omitempty"` + GuestOperationsManager *ManagedObjectReference `xml:"guestOperationsManager,omitempty"` + OverheadMemoryManager *ManagedObjectReference `xml:"overheadMemoryManager,omitempty"` + CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty"` + IoFilterManager *ManagedObjectReference `xml:"ioFilterManager,omitempty"` +} + +func init() { + t["ServiceContent"] = reflect.TypeOf((*ServiceContent)(nil)).Elem() +} + +type ServiceLocator struct { + DynamicData + + InstanceUuid string `xml:"instanceUuid"` + Url string `xml:"url"` + Credential BaseServiceLocatorCredential `xml:"credential,typeattr"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["ServiceLocator"] = reflect.TypeOf((*ServiceLocator)(nil)).Elem() +} + +type ServiceLocatorCredential struct { + DynamicData +} + +func init() { + t["ServiceLocatorCredential"] = reflect.TypeOf((*ServiceLocatorCredential)(nil)).Elem() +} + +type ServiceLocatorNamePassword struct { + ServiceLocatorCredential + + Username string `xml:"username"` + Password string `xml:"password"` +} + +func init() { + t["ServiceLocatorNamePassword"] = reflect.TypeOf((*ServiceLocatorNamePassword)(nil)).Elem() +} + +type ServiceLocatorSAMLCredential struct { + ServiceLocatorCredential + + Token string `xml:"token,omitempty"` +} + +func init() { + t["ServiceLocatorSAMLCredential"] = reflect.TypeOf((*ServiceLocatorSAMLCredential)(nil)).Elem() +} + +type ServiceManagerServiceInfo struct { + DynamicData + + ServiceName string `xml:"serviceName"` + Location []string `xml:"location,omitempty"` + Service ManagedObjectReference `xml:"service"` + Description string `xml:"description"` +} + +func init() { + t["ServiceManagerServiceInfo"] = reflect.TypeOf((*ServiceManagerServiceInfo)(nil)).Elem() +} + +type ServiceProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["ServiceProfile"] = reflect.TypeOf((*ServiceProfile)(nil)).Elem() +} + +type SessionEvent struct { + Event +} + +func init() { + t["SessionEvent"] = reflect.TypeOf((*SessionEvent)(nil)).Elem() +} + +type SessionIsActive SessionIsActiveRequestType + +func init() { + t["SessionIsActive"] = reflect.TypeOf((*SessionIsActive)(nil)).Elem() +} + +type SessionIsActiveRequestType struct { + This ManagedObjectReference `xml:"_this"` + SessionID string `xml:"sessionID"` + UserName string `xml:"userName"` +} + +func init() { + t["SessionIsActiveRequestType"] = reflect.TypeOf((*SessionIsActiveRequestType)(nil)).Elem() +} + +type SessionIsActiveResponse struct { + Returnval bool `xml:"returnval"` +} + +type SessionManagerGenericServiceTicket struct { + DynamicData + + Id string `xml:"id"` + HostName string `xml:"hostName,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["SessionManagerGenericServiceTicket"] = reflect.TypeOf((*SessionManagerGenericServiceTicket)(nil)).Elem() +} + +type SessionManagerHttpServiceRequestSpec struct { + SessionManagerServiceRequestSpec + + Method string `xml:"method,omitempty"` + Url string `xml:"url"` +} + +func init() { + t["SessionManagerHttpServiceRequestSpec"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpec)(nil)).Elem() +} + +type SessionManagerLocalTicket struct { + DynamicData + + UserName string `xml:"userName"` + PasswordFilePath string `xml:"passwordFilePath"` +} + +func init() { + t["SessionManagerLocalTicket"] = reflect.TypeOf((*SessionManagerLocalTicket)(nil)).Elem() +} + +type SessionManagerServiceRequestSpec struct { + DynamicData +} + +func init() { + t["SessionManagerServiceRequestSpec"] = reflect.TypeOf((*SessionManagerServiceRequestSpec)(nil)).Elem() +} + +type SessionManagerVmomiServiceRequestSpec struct { + SessionManagerServiceRequestSpec + + Method string `xml:"method"` +} + +func init() { + t["SessionManagerVmomiServiceRequestSpec"] = reflect.TypeOf((*SessionManagerVmomiServiceRequestSpec)(nil)).Elem() +} + +type SessionTerminatedEvent struct { + SessionEvent + + SessionId string `xml:"sessionId"` + TerminatedUsername string `xml:"terminatedUsername"` +} + +func init() { + t["SessionTerminatedEvent"] = reflect.TypeOf((*SessionTerminatedEvent)(nil)).Elem() +} + +type SetCollectorPageSize SetCollectorPageSizeRequestType + +func init() { + t["SetCollectorPageSize"] = reflect.TypeOf((*SetCollectorPageSize)(nil)).Elem() +} + +type SetCollectorPageSizeRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int `xml:"maxCount"` +} + +func init() { + t["SetCollectorPageSizeRequestType"] = reflect.TypeOf((*SetCollectorPageSizeRequestType)(nil)).Elem() +} + +type SetCollectorPageSizeResponse struct { +} + +type SetDisplayTopology SetDisplayTopologyRequestType + +func init() { + t["SetDisplayTopology"] = reflect.TypeOf((*SetDisplayTopology)(nil)).Elem() +} + +type SetDisplayTopologyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Displays []VirtualMachineDisplayTopology `xml:"displays"` +} + +func init() { + t["SetDisplayTopologyRequestType"] = reflect.TypeOf((*SetDisplayTopologyRequestType)(nil)).Elem() +} + +type SetDisplayTopologyResponse struct { +} + +type SetEntityPermissions SetEntityPermissionsRequestType + +func init() { + t["SetEntityPermissions"] = reflect.TypeOf((*SetEntityPermissions)(nil)).Elem() +} + +type SetEntityPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Permission []Permission `xml:"permission,omitempty"` +} + +func init() { + t["SetEntityPermissionsRequestType"] = reflect.TypeOf((*SetEntityPermissionsRequestType)(nil)).Elem() +} + +type SetEntityPermissionsResponse struct { +} + +type SetExtensionCertificate SetExtensionCertificateRequestType + +func init() { + t["SetExtensionCertificate"] = reflect.TypeOf((*SetExtensionCertificate)(nil)).Elem() +} + +type SetExtensionCertificateRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + CertificatePem string `xml:"certificatePem,omitempty"` +} + +func init() { + t["SetExtensionCertificateRequestType"] = reflect.TypeOf((*SetExtensionCertificateRequestType)(nil)).Elem() +} + +type SetExtensionCertificateResponse struct { +} + +type SetField SetFieldRequestType + +func init() { + t["SetField"] = reflect.TypeOf((*SetField)(nil)).Elem() +} + +type SetFieldRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Key int `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["SetFieldRequestType"] = reflect.TypeOf((*SetFieldRequestType)(nil)).Elem() +} + +type SetFieldResponse struct { +} + +type SetLicenseEdition SetLicenseEditionRequestType + +func init() { + t["SetLicenseEdition"] = reflect.TypeOf((*SetLicenseEdition)(nil)).Elem() +} + +type SetLicenseEditionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey,omitempty"` +} + +func init() { + t["SetLicenseEditionRequestType"] = reflect.TypeOf((*SetLicenseEditionRequestType)(nil)).Elem() +} + +type SetLicenseEditionResponse struct { +} + +type SetLocale SetLocaleRequestType + +func init() { + t["SetLocale"] = reflect.TypeOf((*SetLocale)(nil)).Elem() +} + +type SetLocaleRequestType struct { + This ManagedObjectReference `xml:"_this"` + Locale string `xml:"locale"` +} + +func init() { + t["SetLocaleRequestType"] = reflect.TypeOf((*SetLocaleRequestType)(nil)).Elem() +} + +type SetLocaleResponse struct { +} + +type SetManagedByVDC SetManagedByVDCRequestType + +func init() { + t["SetManagedByVDC"] = reflect.TypeOf((*SetManagedByVDC)(nil)).Elem() +} + +type SetManagedByVDCRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` + Status bool `xml:"status"` +} + +func init() { + t["SetManagedByVDCRequestType"] = reflect.TypeOf((*SetManagedByVDCRequestType)(nil)).Elem() +} + +type SetManagedByVDCResponse struct { +} + +type SetMultipathLunPolicy SetMultipathLunPolicyRequestType + +func init() { + t["SetMultipathLunPolicy"] = reflect.TypeOf((*SetMultipathLunPolicy)(nil)).Elem() +} + +type SetMultipathLunPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunId string `xml:"lunId"` + Policy BaseHostMultipathInfoLogicalUnitPolicy `xml:"policy,typeattr"` +} + +func init() { + t["SetMultipathLunPolicyRequestType"] = reflect.TypeOf((*SetMultipathLunPolicyRequestType)(nil)).Elem() +} + +type SetMultipathLunPolicyResponse struct { +} + +type SetNFSUser SetNFSUserRequestType + +func init() { + t["SetNFSUser"] = reflect.TypeOf((*SetNFSUser)(nil)).Elem() +} + +type SetNFSUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + Password string `xml:"password"` +} + +func init() { + t["SetNFSUserRequestType"] = reflect.TypeOf((*SetNFSUserRequestType)(nil)).Elem() +} + +type SetNFSUserResponse struct { +} + +type SetPublicKey SetPublicKeyRequestType + +func init() { + t["SetPublicKey"] = reflect.TypeOf((*SetPublicKey)(nil)).Elem() +} + +type SetPublicKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + PublicKey string `xml:"publicKey"` +} + +func init() { + t["SetPublicKeyRequestType"] = reflect.TypeOf((*SetPublicKeyRequestType)(nil)).Elem() +} + +type SetPublicKeyResponse struct { +} + +type SetRegistryValueInGuest SetRegistryValueInGuestRequestType + +func init() { + t["SetRegistryValueInGuest"] = reflect.TypeOf((*SetRegistryValueInGuest)(nil)).Elem() +} + +type SetRegistryValueInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Value GuestRegValueSpec `xml:"value"` +} + +func init() { + t["SetRegistryValueInGuestRequestType"] = reflect.TypeOf((*SetRegistryValueInGuestRequestType)(nil)).Elem() +} + +type SetRegistryValueInGuestResponse struct { +} + +type SetScreenResolution SetScreenResolutionRequestType + +func init() { + t["SetScreenResolution"] = reflect.TypeOf((*SetScreenResolution)(nil)).Elem() +} + +type SetScreenResolutionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Width int `xml:"width"` + Height int `xml:"height"` +} + +func init() { + t["SetScreenResolutionRequestType"] = reflect.TypeOf((*SetScreenResolutionRequestType)(nil)).Elem() +} + +type SetScreenResolutionResponse struct { +} + +type SetTaskDescription SetTaskDescriptionRequestType + +func init() { + t["SetTaskDescription"] = reflect.TypeOf((*SetTaskDescription)(nil)).Elem() +} + +type SetTaskDescriptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Description LocalizableMessage `xml:"description"` +} + +func init() { + t["SetTaskDescriptionRequestType"] = reflect.TypeOf((*SetTaskDescriptionRequestType)(nil)).Elem() +} + +type SetTaskDescriptionResponse struct { +} + +type SetTaskState SetTaskStateRequestType + +func init() { + t["SetTaskState"] = reflect.TypeOf((*SetTaskState)(nil)).Elem() +} + +type SetTaskStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + State TaskInfoState `xml:"state"` + Result AnyType `xml:"result,omitempty,typeattr"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["SetTaskStateRequestType"] = reflect.TypeOf((*SetTaskStateRequestType)(nil)).Elem() +} + +type SetTaskStateResponse struct { +} + +type SetVirtualDiskUuid SetVirtualDiskUuidRequestType + +func init() { + t["SetVirtualDiskUuid"] = reflect.TypeOf((*SetVirtualDiskUuid)(nil)).Elem() +} + +type SetVirtualDiskUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Uuid string `xml:"uuid"` +} + +func init() { + t["SetVirtualDiskUuidRequestType"] = reflect.TypeOf((*SetVirtualDiskUuidRequestType)(nil)).Elem() +} + +type SetVirtualDiskUuidResponse struct { +} + +type SharedBusControllerNotSupported struct { + DeviceNotSupported +} + +func init() { + t["SharedBusControllerNotSupported"] = reflect.TypeOf((*SharedBusControllerNotSupported)(nil)).Elem() +} + +type SharedBusControllerNotSupportedFault SharedBusControllerNotSupported + +func init() { + t["SharedBusControllerNotSupportedFault"] = reflect.TypeOf((*SharedBusControllerNotSupportedFault)(nil)).Elem() +} + +type SharesInfo struct { + DynamicData + + Shares int `xml:"shares"` + Level SharesLevel `xml:"level"` +} + +func init() { + t["SharesInfo"] = reflect.TypeOf((*SharesInfo)(nil)).Elem() +} + +type SharesOption struct { + DynamicData + + SharesOption IntOption `xml:"sharesOption"` + DefaultLevel SharesLevel `xml:"defaultLevel"` +} + +func init() { + t["SharesOption"] = reflect.TypeOf((*SharesOption)(nil)).Elem() +} + +type ShrinkDiskFault struct { + VimFault + + DiskId int `xml:"diskId,omitempty"` +} + +func init() { + t["ShrinkDiskFault"] = reflect.TypeOf((*ShrinkDiskFault)(nil)).Elem() +} + +type ShrinkDiskFaultFault ShrinkDiskFault + +func init() { + t["ShrinkDiskFaultFault"] = reflect.TypeOf((*ShrinkDiskFaultFault)(nil)).Elem() +} + +type ShrinkVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Copy *bool `xml:"copy"` +} + +func init() { + t["ShrinkVirtualDiskRequestType"] = reflect.TypeOf((*ShrinkVirtualDiskRequestType)(nil)).Elem() +} + +type ShrinkVirtualDisk_Task ShrinkVirtualDiskRequestType + +func init() { + t["ShrinkVirtualDisk_Task"] = reflect.TypeOf((*ShrinkVirtualDisk_Task)(nil)).Elem() +} + +type ShrinkVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ShutdownGuest ShutdownGuestRequestType + +func init() { + t["ShutdownGuest"] = reflect.TypeOf((*ShutdownGuest)(nil)).Elem() +} + +type ShutdownGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ShutdownGuestRequestType"] = reflect.TypeOf((*ShutdownGuestRequestType)(nil)).Elem() +} + +type ShutdownGuestResponse struct { +} + +type ShutdownHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["ShutdownHostRequestType"] = reflect.TypeOf((*ShutdownHostRequestType)(nil)).Elem() +} + +type ShutdownHost_Task ShutdownHostRequestType + +func init() { + t["ShutdownHost_Task"] = reflect.TypeOf((*ShutdownHost_Task)(nil)).Elem() +} + +type ShutdownHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SingleIp struct { + IpAddress + + Address string `xml:"address"` +} + +func init() { + t["SingleIp"] = reflect.TypeOf((*SingleIp)(nil)).Elem() +} + +type SingleMac struct { + MacAddress + + Address string `xml:"address"` +} + +func init() { + t["SingleMac"] = reflect.TypeOf((*SingleMac)(nil)).Elem() +} + +type SnapshotCloneNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotCloneNotSupported"] = reflect.TypeOf((*SnapshotCloneNotSupported)(nil)).Elem() +} + +type SnapshotCloneNotSupportedFault SnapshotCloneNotSupported + +func init() { + t["SnapshotCloneNotSupportedFault"] = reflect.TypeOf((*SnapshotCloneNotSupportedFault)(nil)).Elem() +} + +type SnapshotCopyNotSupported struct { + MigrationFault +} + +func init() { + t["SnapshotCopyNotSupported"] = reflect.TypeOf((*SnapshotCopyNotSupported)(nil)).Elem() +} + +type SnapshotCopyNotSupportedFault BaseSnapshotCopyNotSupported + +func init() { + t["SnapshotCopyNotSupportedFault"] = reflect.TypeOf((*SnapshotCopyNotSupportedFault)(nil)).Elem() +} + +type SnapshotDisabled struct { + SnapshotFault +} + +func init() { + t["SnapshotDisabled"] = reflect.TypeOf((*SnapshotDisabled)(nil)).Elem() +} + +type SnapshotDisabledFault SnapshotDisabled + +func init() { + t["SnapshotDisabledFault"] = reflect.TypeOf((*SnapshotDisabledFault)(nil)).Elem() +} + +type SnapshotFault struct { + VimFault +} + +func init() { + t["SnapshotFault"] = reflect.TypeOf((*SnapshotFault)(nil)).Elem() +} + +type SnapshotFaultFault BaseSnapshotFault + +func init() { + t["SnapshotFaultFault"] = reflect.TypeOf((*SnapshotFaultFault)(nil)).Elem() +} + +type SnapshotIncompatibleDeviceInVm struct { + SnapshotFault + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["SnapshotIncompatibleDeviceInVm"] = reflect.TypeOf((*SnapshotIncompatibleDeviceInVm)(nil)).Elem() +} + +type SnapshotIncompatibleDeviceInVmFault SnapshotIncompatibleDeviceInVm + +func init() { + t["SnapshotIncompatibleDeviceInVmFault"] = reflect.TypeOf((*SnapshotIncompatibleDeviceInVmFault)(nil)).Elem() +} + +type SnapshotLocked struct { + SnapshotFault +} + +func init() { + t["SnapshotLocked"] = reflect.TypeOf((*SnapshotLocked)(nil)).Elem() +} + +type SnapshotLockedFault SnapshotLocked + +func init() { + t["SnapshotLockedFault"] = reflect.TypeOf((*SnapshotLockedFault)(nil)).Elem() +} + +type SnapshotMoveFromNonHomeNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotMoveFromNonHomeNotSupported"] = reflect.TypeOf((*SnapshotMoveFromNonHomeNotSupported)(nil)).Elem() +} + +type SnapshotMoveFromNonHomeNotSupportedFault SnapshotMoveFromNonHomeNotSupported + +func init() { + t["SnapshotMoveFromNonHomeNotSupportedFault"] = reflect.TypeOf((*SnapshotMoveFromNonHomeNotSupportedFault)(nil)).Elem() +} + +type SnapshotMoveNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotMoveNotSupported"] = reflect.TypeOf((*SnapshotMoveNotSupported)(nil)).Elem() +} + +type SnapshotMoveNotSupportedFault SnapshotMoveNotSupported + +func init() { + t["SnapshotMoveNotSupportedFault"] = reflect.TypeOf((*SnapshotMoveNotSupportedFault)(nil)).Elem() +} + +type SnapshotMoveToNonHomeNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotMoveToNonHomeNotSupported"] = reflect.TypeOf((*SnapshotMoveToNonHomeNotSupported)(nil)).Elem() +} + +type SnapshotMoveToNonHomeNotSupportedFault SnapshotMoveToNonHomeNotSupported + +func init() { + t["SnapshotMoveToNonHomeNotSupportedFault"] = reflect.TypeOf((*SnapshotMoveToNonHomeNotSupportedFault)(nil)).Elem() +} + +type SnapshotNoChange struct { + SnapshotFault +} + +func init() { + t["SnapshotNoChange"] = reflect.TypeOf((*SnapshotNoChange)(nil)).Elem() +} + +type SnapshotNoChangeFault SnapshotNoChange + +func init() { + t["SnapshotNoChangeFault"] = reflect.TypeOf((*SnapshotNoChangeFault)(nil)).Elem() +} + +type SnapshotRevertIssue struct { + MigrationFault + + SnapshotName string `xml:"snapshotName,omitempty"` + Event []BaseEvent `xml:"event,omitempty,typeattr"` + Errors bool `xml:"errors"` +} + +func init() { + t["SnapshotRevertIssue"] = reflect.TypeOf((*SnapshotRevertIssue)(nil)).Elem() +} + +type SnapshotRevertIssueFault SnapshotRevertIssue + +func init() { + t["SnapshotRevertIssueFault"] = reflect.TypeOf((*SnapshotRevertIssueFault)(nil)).Elem() +} + +type SoftRuleVioCorrectionDisallowed struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["SoftRuleVioCorrectionDisallowed"] = reflect.TypeOf((*SoftRuleVioCorrectionDisallowed)(nil)).Elem() +} + +type SoftRuleVioCorrectionDisallowedFault SoftRuleVioCorrectionDisallowed + +func init() { + t["SoftRuleVioCorrectionDisallowedFault"] = reflect.TypeOf((*SoftRuleVioCorrectionDisallowedFault)(nil)).Elem() +} + +type SoftRuleVioCorrectionImpact struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["SoftRuleVioCorrectionImpact"] = reflect.TypeOf((*SoftRuleVioCorrectionImpact)(nil)).Elem() +} + +type SoftRuleVioCorrectionImpactFault SoftRuleVioCorrectionImpact + +func init() { + t["SoftRuleVioCorrectionImpactFault"] = reflect.TypeOf((*SoftRuleVioCorrectionImpactFault)(nil)).Elem() +} + +type SsdDiskNotAvailable struct { + VimFault + + DevicePath string `xml:"devicePath"` +} + +func init() { + t["SsdDiskNotAvailable"] = reflect.TypeOf((*SsdDiskNotAvailable)(nil)).Elem() +} + +type SsdDiskNotAvailableFault SsdDiskNotAvailable + +func init() { + t["SsdDiskNotAvailableFault"] = reflect.TypeOf((*SsdDiskNotAvailableFault)(nil)).Elem() +} + +type StageHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + VibUrls []string `xml:"vibUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["StageHostPatchRequestType"] = reflect.TypeOf((*StageHostPatchRequestType)(nil)).Elem() +} + +type StageHostPatch_Task StageHostPatchRequestType + +func init() { + t["StageHostPatch_Task"] = reflect.TypeOf((*StageHostPatch_Task)(nil)).Elem() +} + +type StageHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StampAllRulesWithUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StampAllRulesWithUuidRequestType"] = reflect.TypeOf((*StampAllRulesWithUuidRequestType)(nil)).Elem() +} + +type StampAllRulesWithUuid_Task StampAllRulesWithUuidRequestType + +func init() { + t["StampAllRulesWithUuid_Task"] = reflect.TypeOf((*StampAllRulesWithUuid_Task)(nil)).Elem() +} + +type StampAllRulesWithUuid_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StandbyGuest StandbyGuestRequestType + +func init() { + t["StandbyGuest"] = reflect.TypeOf((*StandbyGuest)(nil)).Elem() +} + +type StandbyGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StandbyGuestRequestType"] = reflect.TypeOf((*StandbyGuestRequestType)(nil)).Elem() +} + +type StandbyGuestResponse struct { +} + +type StartProgramInGuest StartProgramInGuestRequestType + +func init() { + t["StartProgramInGuest"] = reflect.TypeOf((*StartProgramInGuest)(nil)).Elem() +} + +type StartProgramInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Spec BaseGuestProgramSpec `xml:"spec,typeattr"` +} + +func init() { + t["StartProgramInGuestRequestType"] = reflect.TypeOf((*StartProgramInGuestRequestType)(nil)).Elem() +} + +type StartProgramInGuestResponse struct { + Returnval int64 `xml:"returnval"` +} + +type StartRecordingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["StartRecordingRequestType"] = reflect.TypeOf((*StartRecordingRequestType)(nil)).Elem() +} + +type StartRecording_Task StartRecordingRequestType + +func init() { + t["StartRecording_Task"] = reflect.TypeOf((*StartRecording_Task)(nil)).Elem() +} + +type StartRecording_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StartReplayingRequestType struct { + This ManagedObjectReference `xml:"_this"` + ReplaySnapshot ManagedObjectReference `xml:"replaySnapshot"` +} + +func init() { + t["StartReplayingRequestType"] = reflect.TypeOf((*StartReplayingRequestType)(nil)).Elem() +} + +type StartReplaying_Task StartReplayingRequestType + +func init() { + t["StartReplaying_Task"] = reflect.TypeOf((*StartReplaying_Task)(nil)).Elem() +} + +type StartReplaying_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StartService StartServiceRequestType + +func init() { + t["StartService"] = reflect.TypeOf((*StartService)(nil)).Elem() +} + +type StartServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["StartServiceRequestType"] = reflect.TypeOf((*StartServiceRequestType)(nil)).Elem() +} + +type StartServiceResponse struct { +} + +type StateAlarmExpression struct { + AlarmExpression + + Operator StateAlarmOperator `xml:"operator"` + Type string `xml:"type"` + StatePath string `xml:"statePath"` + Yellow string `xml:"yellow,omitempty"` + Red string `xml:"red,omitempty"` +} + +func init() { + t["StateAlarmExpression"] = reflect.TypeOf((*StateAlarmExpression)(nil)).Elem() +} + +type StaticRouteProfile struct { + ApplyProfile + + Key string `xml:"key,omitempty"` +} + +func init() { + t["StaticRouteProfile"] = reflect.TypeOf((*StaticRouteProfile)(nil)).Elem() +} + +type StopRecordingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StopRecordingRequestType"] = reflect.TypeOf((*StopRecordingRequestType)(nil)).Elem() +} + +type StopRecording_Task StopRecordingRequestType + +func init() { + t["StopRecording_Task"] = reflect.TypeOf((*StopRecording_Task)(nil)).Elem() +} + +type StopRecording_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StopReplayingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StopReplayingRequestType"] = reflect.TypeOf((*StopReplayingRequestType)(nil)).Elem() +} + +type StopReplaying_Task StopReplayingRequestType + +func init() { + t["StopReplaying_Task"] = reflect.TypeOf((*StopReplaying_Task)(nil)).Elem() +} + +type StopReplaying_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StopService StopServiceRequestType + +func init() { + t["StopService"] = reflect.TypeOf((*StopService)(nil)).Elem() +} + +type StopServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["StopServiceRequestType"] = reflect.TypeOf((*StopServiceRequestType)(nil)).Elem() +} + +type StopServiceResponse struct { +} + +type StorageDrsAutomationConfig struct { + DynamicData + + SpaceLoadBalanceAutomationMode string `xml:"spaceLoadBalanceAutomationMode,omitempty"` + IoLoadBalanceAutomationMode string `xml:"ioLoadBalanceAutomationMode,omitempty"` + RuleEnforcementAutomationMode string `xml:"ruleEnforcementAutomationMode,omitempty"` + PolicyEnforcementAutomationMode string `xml:"policyEnforcementAutomationMode,omitempty"` + VmEvacuationAutomationMode string `xml:"vmEvacuationAutomationMode,omitempty"` +} + +func init() { + t["StorageDrsAutomationConfig"] = reflect.TypeOf((*StorageDrsAutomationConfig)(nil)).Elem() +} + +type StorageDrsCannotMoveDiskInMultiWriterMode struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveDiskInMultiWriterMode"] = reflect.TypeOf((*StorageDrsCannotMoveDiskInMultiWriterMode)(nil)).Elem() +} + +type StorageDrsCannotMoveDiskInMultiWriterModeFault StorageDrsCannotMoveDiskInMultiWriterMode + +func init() { + t["StorageDrsCannotMoveDiskInMultiWriterModeFault"] = reflect.TypeOf((*StorageDrsCannotMoveDiskInMultiWriterModeFault)(nil)).Elem() +} + +type StorageDrsCannotMoveFTVm struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveFTVm"] = reflect.TypeOf((*StorageDrsCannotMoveFTVm)(nil)).Elem() +} + +type StorageDrsCannotMoveFTVmFault StorageDrsCannotMoveFTVm + +func init() { + t["StorageDrsCannotMoveFTVmFault"] = reflect.TypeOf((*StorageDrsCannotMoveFTVmFault)(nil)).Elem() +} + +type StorageDrsCannotMoveIndependentDisk struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveIndependentDisk"] = reflect.TypeOf((*StorageDrsCannotMoveIndependentDisk)(nil)).Elem() +} + +type StorageDrsCannotMoveIndependentDiskFault StorageDrsCannotMoveIndependentDisk + +func init() { + t["StorageDrsCannotMoveIndependentDiskFault"] = reflect.TypeOf((*StorageDrsCannotMoveIndependentDiskFault)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedSwapFile struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveManuallyPlacedSwapFile"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedSwapFile)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedSwapFileFault StorageDrsCannotMoveManuallyPlacedSwapFile + +func init() { + t["StorageDrsCannotMoveManuallyPlacedSwapFileFault"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedSwapFileFault)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedVm struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveManuallyPlacedVm"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedVm)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedVmFault StorageDrsCannotMoveManuallyPlacedVm + +func init() { + t["StorageDrsCannotMoveManuallyPlacedVmFault"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedVmFault)(nil)).Elem() +} + +type StorageDrsCannotMoveSharedDisk struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveSharedDisk"] = reflect.TypeOf((*StorageDrsCannotMoveSharedDisk)(nil)).Elem() +} + +type StorageDrsCannotMoveSharedDiskFault StorageDrsCannotMoveSharedDisk + +func init() { + t["StorageDrsCannotMoveSharedDiskFault"] = reflect.TypeOf((*StorageDrsCannotMoveSharedDiskFault)(nil)).Elem() +} + +type StorageDrsCannotMoveTemplate struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveTemplate"] = reflect.TypeOf((*StorageDrsCannotMoveTemplate)(nil)).Elem() +} + +type StorageDrsCannotMoveTemplateFault StorageDrsCannotMoveTemplate + +func init() { + t["StorageDrsCannotMoveTemplateFault"] = reflect.TypeOf((*StorageDrsCannotMoveTemplateFault)(nil)).Elem() +} + +type StorageDrsCannotMoveVmInUserFolder struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveVmInUserFolder"] = reflect.TypeOf((*StorageDrsCannotMoveVmInUserFolder)(nil)).Elem() +} + +type StorageDrsCannotMoveVmInUserFolderFault StorageDrsCannotMoveVmInUserFolder + +func init() { + t["StorageDrsCannotMoveVmInUserFolderFault"] = reflect.TypeOf((*StorageDrsCannotMoveVmInUserFolderFault)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithMountedCDROM struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveVmWithMountedCDROM"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithMountedCDROM)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithMountedCDROMFault StorageDrsCannotMoveVmWithMountedCDROM + +func init() { + t["StorageDrsCannotMoveVmWithMountedCDROMFault"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithMountedCDROMFault)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithNoFilesInLayout struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveVmWithNoFilesInLayout"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithNoFilesInLayout)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithNoFilesInLayoutFault StorageDrsCannotMoveVmWithNoFilesInLayout + +func init() { + t["StorageDrsCannotMoveVmWithNoFilesInLayoutFault"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithNoFilesInLayoutFault)(nil)).Elem() +} + +type StorageDrsConfigInfo struct { + DynamicData + + PodConfig StorageDrsPodConfigInfo `xml:"podConfig"` + VmConfig []StorageDrsVmConfigInfo `xml:"vmConfig,omitempty"` +} + +func init() { + t["StorageDrsConfigInfo"] = reflect.TypeOf((*StorageDrsConfigInfo)(nil)).Elem() +} + +type StorageDrsConfigSpec struct { + DynamicData + + PodConfigSpec *StorageDrsPodConfigSpec `xml:"podConfigSpec,omitempty"` + VmConfigSpec []StorageDrsVmConfigSpec `xml:"vmConfigSpec,omitempty"` +} + +func init() { + t["StorageDrsConfigSpec"] = reflect.TypeOf((*StorageDrsConfigSpec)(nil)).Elem() +} + +type StorageDrsDatacentersCannotShareDatastore struct { + VimFault +} + +func init() { + t["StorageDrsDatacentersCannotShareDatastore"] = reflect.TypeOf((*StorageDrsDatacentersCannotShareDatastore)(nil)).Elem() +} + +type StorageDrsDatacentersCannotShareDatastoreFault StorageDrsDatacentersCannotShareDatastore + +func init() { + t["StorageDrsDatacentersCannotShareDatastoreFault"] = reflect.TypeOf((*StorageDrsDatacentersCannotShareDatastoreFault)(nil)).Elem() +} + +type StorageDrsDisabledOnVm struct { + VimFault +} + +func init() { + t["StorageDrsDisabledOnVm"] = reflect.TypeOf((*StorageDrsDisabledOnVm)(nil)).Elem() +} + +type StorageDrsDisabledOnVmFault StorageDrsDisabledOnVm + +func init() { + t["StorageDrsDisabledOnVmFault"] = reflect.TypeOf((*StorageDrsDisabledOnVmFault)(nil)).Elem() +} + +type StorageDrsHbrDiskNotMovable struct { + VimFault + + NonMovableDiskIds string `xml:"nonMovableDiskIds"` +} + +func init() { + t["StorageDrsHbrDiskNotMovable"] = reflect.TypeOf((*StorageDrsHbrDiskNotMovable)(nil)).Elem() +} + +type StorageDrsHbrDiskNotMovableFault StorageDrsHbrDiskNotMovable + +func init() { + t["StorageDrsHbrDiskNotMovableFault"] = reflect.TypeOf((*StorageDrsHbrDiskNotMovableFault)(nil)).Elem() +} + +type StorageDrsHmsMoveInProgress struct { + VimFault +} + +func init() { + t["StorageDrsHmsMoveInProgress"] = reflect.TypeOf((*StorageDrsHmsMoveInProgress)(nil)).Elem() +} + +type StorageDrsHmsMoveInProgressFault StorageDrsHmsMoveInProgress + +func init() { + t["StorageDrsHmsMoveInProgressFault"] = reflect.TypeOf((*StorageDrsHmsMoveInProgressFault)(nil)).Elem() +} + +type StorageDrsHmsUnreachable struct { + VimFault +} + +func init() { + t["StorageDrsHmsUnreachable"] = reflect.TypeOf((*StorageDrsHmsUnreachable)(nil)).Elem() +} + +type StorageDrsHmsUnreachableFault StorageDrsHmsUnreachable + +func init() { + t["StorageDrsHmsUnreachableFault"] = reflect.TypeOf((*StorageDrsHmsUnreachableFault)(nil)).Elem() +} + +type StorageDrsIoLoadBalanceConfig struct { + DynamicData + + ReservablePercentThreshold int `xml:"reservablePercentThreshold,omitempty"` + ReservableIopsThreshold int `xml:"reservableIopsThreshold,omitempty"` + ReservableThresholdMode string `xml:"reservableThresholdMode,omitempty"` + IoLatencyThreshold int `xml:"ioLatencyThreshold,omitempty"` + IoLoadImbalanceThreshold int `xml:"ioLoadImbalanceThreshold,omitempty"` +} + +func init() { + t["StorageDrsIoLoadBalanceConfig"] = reflect.TypeOf((*StorageDrsIoLoadBalanceConfig)(nil)).Elem() +} + +type StorageDrsIolbDisabledInternally struct { + VimFault +} + +func init() { + t["StorageDrsIolbDisabledInternally"] = reflect.TypeOf((*StorageDrsIolbDisabledInternally)(nil)).Elem() +} + +type StorageDrsIolbDisabledInternallyFault StorageDrsIolbDisabledInternally + +func init() { + t["StorageDrsIolbDisabledInternallyFault"] = reflect.TypeOf((*StorageDrsIolbDisabledInternallyFault)(nil)).Elem() +} + +type StorageDrsOptionSpec struct { + ArrayUpdateSpec + + Option BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["StorageDrsOptionSpec"] = reflect.TypeOf((*StorageDrsOptionSpec)(nil)).Elem() +} + +type StorageDrsPlacementRankVmSpec struct { + DynamicData + + VmPlacementSpec PlacementSpec `xml:"vmPlacementSpec"` + VmClusters []ManagedObjectReference `xml:"vmClusters"` +} + +func init() { + t["StorageDrsPlacementRankVmSpec"] = reflect.TypeOf((*StorageDrsPlacementRankVmSpec)(nil)).Elem() +} + +type StorageDrsPodConfigInfo struct { + DynamicData + + Enabled bool `xml:"enabled"` + IoLoadBalanceEnabled bool `xml:"ioLoadBalanceEnabled"` + DefaultVmBehavior string `xml:"defaultVmBehavior"` + LoadBalanceInterval int `xml:"loadBalanceInterval,omitempty"` + DefaultIntraVmAffinity *bool `xml:"defaultIntraVmAffinity"` + SpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:"spaceLoadBalanceConfig,omitempty"` + IoLoadBalanceConfig *StorageDrsIoLoadBalanceConfig `xml:"ioLoadBalanceConfig,omitempty"` + AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty"` + Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["StorageDrsPodConfigInfo"] = reflect.TypeOf((*StorageDrsPodConfigInfo)(nil)).Elem() +} + +type StorageDrsPodConfigSpec struct { + DynamicData + + Enabled *bool `xml:"enabled"` + IoLoadBalanceEnabled *bool `xml:"ioLoadBalanceEnabled"` + DefaultVmBehavior string `xml:"defaultVmBehavior,omitempty"` + LoadBalanceInterval int `xml:"loadBalanceInterval,omitempty"` + DefaultIntraVmAffinity *bool `xml:"defaultIntraVmAffinity"` + SpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:"spaceLoadBalanceConfig,omitempty"` + IoLoadBalanceConfig *StorageDrsIoLoadBalanceConfig `xml:"ioLoadBalanceConfig,omitempty"` + AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty"` + Rule []ClusterRuleSpec `xml:"rule,omitempty"` + Option []StorageDrsOptionSpec `xml:"option,omitempty"` +} + +func init() { + t["StorageDrsPodConfigSpec"] = reflect.TypeOf((*StorageDrsPodConfigSpec)(nil)).Elem() +} + +type StorageDrsPodSelectionSpec struct { + DynamicData + + InitialVmConfig []VmPodConfigForPlacement `xml:"initialVmConfig,omitempty"` + StoragePod *ManagedObjectReference `xml:"storagePod,omitempty"` +} + +func init() { + t["StorageDrsPodSelectionSpec"] = reflect.TypeOf((*StorageDrsPodSelectionSpec)(nil)).Elem() +} + +type StorageDrsRelocateDisabled struct { + VimFault +} + +func init() { + t["StorageDrsRelocateDisabled"] = reflect.TypeOf((*StorageDrsRelocateDisabled)(nil)).Elem() +} + +type StorageDrsRelocateDisabledFault StorageDrsRelocateDisabled + +func init() { + t["StorageDrsRelocateDisabledFault"] = reflect.TypeOf((*StorageDrsRelocateDisabledFault)(nil)).Elem() +} + +type StorageDrsSpaceLoadBalanceConfig struct { + DynamicData + + SpaceThresholdMode string `xml:"spaceThresholdMode,omitempty"` + SpaceUtilizationThreshold int `xml:"spaceUtilizationThreshold,omitempty"` + FreeSpaceThresholdGB int `xml:"freeSpaceThresholdGB,omitempty"` + MinSpaceUtilizationDifference int `xml:"minSpaceUtilizationDifference,omitempty"` +} + +func init() { + t["StorageDrsSpaceLoadBalanceConfig"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfig)(nil)).Elem() +} + +type StorageDrsStaleHmsCollection struct { + VimFault +} + +func init() { + t["StorageDrsStaleHmsCollection"] = reflect.TypeOf((*StorageDrsStaleHmsCollection)(nil)).Elem() +} + +type StorageDrsStaleHmsCollectionFault StorageDrsStaleHmsCollection + +func init() { + t["StorageDrsStaleHmsCollectionFault"] = reflect.TypeOf((*StorageDrsStaleHmsCollectionFault)(nil)).Elem() +} + +type StorageDrsUnableToMoveFiles struct { + VimFault +} + +func init() { + t["StorageDrsUnableToMoveFiles"] = reflect.TypeOf((*StorageDrsUnableToMoveFiles)(nil)).Elem() +} + +type StorageDrsUnableToMoveFilesFault StorageDrsUnableToMoveFiles + +func init() { + t["StorageDrsUnableToMoveFilesFault"] = reflect.TypeOf((*StorageDrsUnableToMoveFilesFault)(nil)).Elem() +} + +type StorageDrsVmConfigInfo struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Enabled *bool `xml:"enabled"` + Behavior string `xml:"behavior,omitempty"` + IntraVmAffinity *bool `xml:"intraVmAffinity"` + IntraVmAntiAffinity *VirtualDiskAntiAffinityRuleSpec `xml:"intraVmAntiAffinity,omitempty"` +} + +func init() { + t["StorageDrsVmConfigInfo"] = reflect.TypeOf((*StorageDrsVmConfigInfo)(nil)).Elem() +} + +type StorageDrsVmConfigSpec struct { + ArrayUpdateSpec + + Info *StorageDrsVmConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["StorageDrsVmConfigSpec"] = reflect.TypeOf((*StorageDrsVmConfigSpec)(nil)).Elem() +} + +type StorageIOAllocationInfo struct { + DynamicData + + Limit int64 `xml:"limit,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + Reservation int `xml:"reservation,omitempty"` +} + +func init() { + t["StorageIOAllocationInfo"] = reflect.TypeOf((*StorageIOAllocationInfo)(nil)).Elem() +} + +type StorageIOAllocationOption struct { + DynamicData + + LimitOption LongOption `xml:"limitOption"` + SharesOption SharesOption `xml:"sharesOption"` +} + +func init() { + t["StorageIOAllocationOption"] = reflect.TypeOf((*StorageIOAllocationOption)(nil)).Elem() +} + +type StorageIORMConfigOption struct { + DynamicData + + EnabledOption BoolOption `xml:"enabledOption"` + CongestionThresholdOption IntOption `xml:"congestionThresholdOption"` + StatsCollectionEnabledOption *BoolOption `xml:"statsCollectionEnabledOption,omitempty"` + ReservationEnabledOption *BoolOption `xml:"reservationEnabledOption,omitempty"` +} + +func init() { + t["StorageIORMConfigOption"] = reflect.TypeOf((*StorageIORMConfigOption)(nil)).Elem() +} + +type StorageIORMConfigSpec struct { + DynamicData + + Enabled *bool `xml:"enabled"` + CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty"` + CongestionThreshold int `xml:"congestionThreshold,omitempty"` + PercentOfPeakThroughput int `xml:"percentOfPeakThroughput,omitempty"` + StatsCollectionEnabled *bool `xml:"statsCollectionEnabled"` + ReservationEnabled *bool `xml:"reservationEnabled"` + StatsAggregationDisabled *bool `xml:"statsAggregationDisabled"` + ReservableIopsThreshold int `xml:"reservableIopsThreshold,omitempty"` +} + +func init() { + t["StorageIORMConfigSpec"] = reflect.TypeOf((*StorageIORMConfigSpec)(nil)).Elem() +} + +type StorageIORMInfo struct { + DynamicData + + Enabled bool `xml:"enabled"` + CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty"` + CongestionThreshold int `xml:"congestionThreshold"` + PercentOfPeakThroughput int `xml:"percentOfPeakThroughput,omitempty"` + StatsCollectionEnabled *bool `xml:"statsCollectionEnabled"` + ReservationEnabled *bool `xml:"reservationEnabled"` + StatsAggregationDisabled *bool `xml:"statsAggregationDisabled"` + ReservableIopsThreshold int `xml:"reservableIopsThreshold,omitempty"` +} + +func init() { + t["StorageIORMInfo"] = reflect.TypeOf((*StorageIORMInfo)(nil)).Elem() +} + +type StorageMigrationAction struct { + ClusterAction + + Vm ManagedObjectReference `xml:"vm"` + RelocateSpec VirtualMachineRelocateSpec `xml:"relocateSpec"` + Source ManagedObjectReference `xml:"source"` + Destination ManagedObjectReference `xml:"destination"` + SizeTransferred int64 `xml:"sizeTransferred"` + SpaceUtilSrcBefore float32 `xml:"spaceUtilSrcBefore,omitempty"` + SpaceUtilDstBefore float32 `xml:"spaceUtilDstBefore,omitempty"` + SpaceUtilSrcAfter float32 `xml:"spaceUtilSrcAfter,omitempty"` + SpaceUtilDstAfter float32 `xml:"spaceUtilDstAfter,omitempty"` + IoLatencySrcBefore float32 `xml:"ioLatencySrcBefore,omitempty"` + IoLatencyDstBefore float32 `xml:"ioLatencyDstBefore,omitempty"` +} + +func init() { + t["StorageMigrationAction"] = reflect.TypeOf((*StorageMigrationAction)(nil)).Elem() +} + +type StoragePerformanceSummary struct { + DynamicData + + Interval int `xml:"interval"` + Percentile []int `xml:"percentile"` + DatastoreReadLatency []float64 `xml:"datastoreReadLatency"` + DatastoreWriteLatency []float64 `xml:"datastoreWriteLatency"` + DatastoreVmLatency []float64 `xml:"datastoreVmLatency"` + DatastoreReadIops []float64 `xml:"datastoreReadIops"` + DatastoreWriteIops []float64 `xml:"datastoreWriteIops"` + SiocActivityDuration int `xml:"siocActivityDuration"` +} + +func init() { + t["StoragePerformanceSummary"] = reflect.TypeOf((*StoragePerformanceSummary)(nil)).Elem() +} + +type StoragePlacementAction struct { + ClusterAction + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + RelocateSpec VirtualMachineRelocateSpec `xml:"relocateSpec"` + Destination ManagedObjectReference `xml:"destination"` + SpaceUtilBefore float32 `xml:"spaceUtilBefore,omitempty"` + SpaceDemandBefore float32 `xml:"spaceDemandBefore,omitempty"` + SpaceUtilAfter float32 `xml:"spaceUtilAfter,omitempty"` + SpaceDemandAfter float32 `xml:"spaceDemandAfter,omitempty"` + IoLatencyBefore float32 `xml:"ioLatencyBefore,omitempty"` +} + +func init() { + t["StoragePlacementAction"] = reflect.TypeOf((*StoragePlacementAction)(nil)).Elem() +} + +type StoragePlacementResult struct { + DynamicData + + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` + DrsFault *ClusterDrsFaults `xml:"drsFault,omitempty"` + Task *ManagedObjectReference `xml:"task,omitempty"` +} + +func init() { + t["StoragePlacementResult"] = reflect.TypeOf((*StoragePlacementResult)(nil)).Elem() +} + +type StoragePlacementSpec struct { + DynamicData + + Type string `xml:"type"` + Priority VirtualMachineMovePriority `xml:"priority,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + PodSelectionSpec StorageDrsPodSelectionSpec `xml:"podSelectionSpec"` + CloneSpec *VirtualMachineCloneSpec `xml:"cloneSpec,omitempty"` + CloneName string `xml:"cloneName,omitempty"` + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` + ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Folder *ManagedObjectReference `xml:"folder,omitempty"` + DisallowPrerequisiteMoves *bool `xml:"disallowPrerequisiteMoves"` + ResourceLeaseDurationSec int `xml:"resourceLeaseDurationSec,omitempty"` +} + +func init() { + t["StoragePlacementSpec"] = reflect.TypeOf((*StoragePlacementSpec)(nil)).Elem() +} + +type StoragePodSummary struct { + DynamicData + + Name string `xml:"name"` + Capacity int64 `xml:"capacity"` + FreeSpace int64 `xml:"freeSpace"` +} + +func init() { + t["StoragePodSummary"] = reflect.TypeOf((*StoragePodSummary)(nil)).Elem() +} + +type StorageProfile struct { + ApplyProfile + + NasStorage []NasStorageProfile `xml:"nasStorage,omitempty"` +} + +func init() { + t["StorageProfile"] = reflect.TypeOf((*StorageProfile)(nil)).Elem() +} + +type StorageRequirement struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + FreeSpaceRequiredInKb int64 `xml:"freeSpaceRequiredInKb"` +} + +func init() { + t["StorageRequirement"] = reflect.TypeOf((*StorageRequirement)(nil)).Elem() +} + +type StorageResourceManagerStorageProfileStatistics struct { + DynamicData + + ProfileId string `xml:"profileId"` + TotalSpaceMB int64 `xml:"totalSpaceMB"` + UsedSpaceMB int64 `xml:"usedSpaceMB"` +} + +func init() { + t["StorageResourceManagerStorageProfileStatistics"] = reflect.TypeOf((*StorageResourceManagerStorageProfileStatistics)(nil)).Elem() +} + +type StorageVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["StorageVMotionNotSupported"] = reflect.TypeOf((*StorageVMotionNotSupported)(nil)).Elem() +} + +type StorageVMotionNotSupportedFault StorageVMotionNotSupported + +func init() { + t["StorageVMotionNotSupportedFault"] = reflect.TypeOf((*StorageVMotionNotSupportedFault)(nil)).Elem() +} + +type StorageVmotionIncompatible struct { + VirtualHardwareCompatibilityIssue + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["StorageVmotionIncompatible"] = reflect.TypeOf((*StorageVmotionIncompatible)(nil)).Elem() +} + +type StorageVmotionIncompatibleFault StorageVmotionIncompatible + +func init() { + t["StorageVmotionIncompatibleFault"] = reflect.TypeOf((*StorageVmotionIncompatibleFault)(nil)).Elem() +} + +type StringExpression struct { + NegatableExpression + + Value string `xml:"value,omitempty"` +} + +func init() { + t["StringExpression"] = reflect.TypeOf((*StringExpression)(nil)).Elem() +} + +type StringOption struct { + OptionType + + DefaultValue string `xml:"defaultValue"` + ValidCharacters string `xml:"validCharacters,omitempty"` +} + +func init() { + t["StringOption"] = reflect.TypeOf((*StringOption)(nil)).Elem() +} + +type StringPolicy struct { + InheritablePolicy + + Value string `xml:"value,omitempty"` +} + +func init() { + t["StringPolicy"] = reflect.TypeOf((*StringPolicy)(nil)).Elem() +} + +type SuspendVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SuspendVAppRequestType"] = reflect.TypeOf((*SuspendVAppRequestType)(nil)).Elem() +} + +type SuspendVApp_Task SuspendVAppRequestType + +func init() { + t["SuspendVApp_Task"] = reflect.TypeOf((*SuspendVApp_Task)(nil)).Elem() +} + +type SuspendVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SuspendVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SuspendVMRequestType"] = reflect.TypeOf((*SuspendVMRequestType)(nil)).Elem() +} + +type SuspendVM_Task SuspendVMRequestType + +func init() { + t["SuspendVM_Task"] = reflect.TypeOf((*SuspendVM_Task)(nil)).Elem() +} + +type SuspendVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SuspendedRelocateNotSupported struct { + MigrationFault +} + +func init() { + t["SuspendedRelocateNotSupported"] = reflect.TypeOf((*SuspendedRelocateNotSupported)(nil)).Elem() +} + +type SuspendedRelocateNotSupportedFault SuspendedRelocateNotSupported + +func init() { + t["SuspendedRelocateNotSupportedFault"] = reflect.TypeOf((*SuspendedRelocateNotSupportedFault)(nil)).Elem() +} + +type SwapDatastoreNotWritableOnHost struct { + DatastoreNotWritableOnHost +} + +func init() { + t["SwapDatastoreNotWritableOnHost"] = reflect.TypeOf((*SwapDatastoreNotWritableOnHost)(nil)).Elem() +} + +type SwapDatastoreNotWritableOnHostFault SwapDatastoreNotWritableOnHost + +func init() { + t["SwapDatastoreNotWritableOnHostFault"] = reflect.TypeOf((*SwapDatastoreNotWritableOnHostFault)(nil)).Elem() +} + +type SwapDatastoreUnset struct { + VimFault +} + +func init() { + t["SwapDatastoreUnset"] = reflect.TypeOf((*SwapDatastoreUnset)(nil)).Elem() +} + +type SwapDatastoreUnsetFault SwapDatastoreUnset + +func init() { + t["SwapDatastoreUnsetFault"] = reflect.TypeOf((*SwapDatastoreUnsetFault)(nil)).Elem() +} + +type SwapPlacementOverrideNotSupported struct { + InvalidVmConfig +} + +func init() { + t["SwapPlacementOverrideNotSupported"] = reflect.TypeOf((*SwapPlacementOverrideNotSupported)(nil)).Elem() +} + +type SwapPlacementOverrideNotSupportedFault SwapPlacementOverrideNotSupported + +func init() { + t["SwapPlacementOverrideNotSupportedFault"] = reflect.TypeOf((*SwapPlacementOverrideNotSupportedFault)(nil)).Elem() +} + +type SwitchIpUnset struct { + DvsFault +} + +func init() { + t["SwitchIpUnset"] = reflect.TypeOf((*SwitchIpUnset)(nil)).Elem() +} + +type SwitchIpUnsetFault SwitchIpUnset + +func init() { + t["SwitchIpUnsetFault"] = reflect.TypeOf((*SwitchIpUnsetFault)(nil)).Elem() +} + +type SwitchNotInUpgradeMode struct { + DvsFault +} + +func init() { + t["SwitchNotInUpgradeMode"] = reflect.TypeOf((*SwitchNotInUpgradeMode)(nil)).Elem() +} + +type SwitchNotInUpgradeModeFault SwitchNotInUpgradeMode + +func init() { + t["SwitchNotInUpgradeModeFault"] = reflect.TypeOf((*SwitchNotInUpgradeModeFault)(nil)).Elem() +} + +type SystemError struct { + RuntimeFault + + Reason string `xml:"reason"` +} + +func init() { + t["SystemError"] = reflect.TypeOf((*SystemError)(nil)).Elem() +} + +type SystemErrorFault SystemError + +func init() { + t["SystemErrorFault"] = reflect.TypeOf((*SystemErrorFault)(nil)).Elem() +} + +type Tag struct { + DynamicData + + Key string `xml:"key"` +} + +func init() { + t["Tag"] = reflect.TypeOf((*Tag)(nil)).Elem() +} + +type TaskDescription struct { + DynamicData + + MethodInfo []BaseElementDescription `xml:"methodInfo,typeattr"` + State []BaseElementDescription `xml:"state,typeattr"` + Reason []BaseTypeDescription `xml:"reason,typeattr"` +} + +func init() { + t["TaskDescription"] = reflect.TypeOf((*TaskDescription)(nil)).Elem() +} + +type TaskEvent struct { + Event + + Info TaskInfo `xml:"info"` +} + +func init() { + t["TaskEvent"] = reflect.TypeOf((*TaskEvent)(nil)).Elem() +} + +type TaskFilterSpec struct { + DynamicData + + Entity *TaskFilterSpecByEntity `xml:"entity,omitempty"` + Time *TaskFilterSpecByTime `xml:"time,omitempty"` + UserName *TaskFilterSpecByUsername `xml:"userName,omitempty"` + ActivationId []string `xml:"activationId,omitempty"` + State []TaskInfoState `xml:"state,omitempty"` + Alarm *ManagedObjectReference `xml:"alarm,omitempty"` + ScheduledTask *ManagedObjectReference `xml:"scheduledTask,omitempty"` + EventChainId []int `xml:"eventChainId,omitempty"` + Tag []string `xml:"tag,omitempty"` + ParentTaskKey []string `xml:"parentTaskKey,omitempty"` + RootTaskKey []string `xml:"rootTaskKey,omitempty"` +} + +func init() { + t["TaskFilterSpec"] = reflect.TypeOf((*TaskFilterSpec)(nil)).Elem() +} + +type TaskFilterSpecByEntity struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + Recursion TaskFilterSpecRecursionOption `xml:"recursion"` +} + +func init() { + t["TaskFilterSpecByEntity"] = reflect.TypeOf((*TaskFilterSpecByEntity)(nil)).Elem() +} + +type TaskFilterSpecByTime struct { + DynamicData + + TimeType TaskFilterSpecTimeOption `xml:"timeType"` + BeginTime *time.Time `xml:"beginTime"` + EndTime *time.Time `xml:"endTime"` +} + +func init() { + t["TaskFilterSpecByTime"] = reflect.TypeOf((*TaskFilterSpecByTime)(nil)).Elem() +} + +type TaskFilterSpecByUsername struct { + DynamicData + + SystemUser bool `xml:"systemUser"` + UserList []string `xml:"userList,omitempty"` +} + +func init() { + t["TaskFilterSpecByUsername"] = reflect.TypeOf((*TaskFilterSpecByUsername)(nil)).Elem() +} + +type TaskInProgress struct { + VimFault + + Task ManagedObjectReference `xml:"task"` +} + +func init() { + t["TaskInProgress"] = reflect.TypeOf((*TaskInProgress)(nil)).Elem() +} + +type TaskInProgressFault BaseTaskInProgress + +func init() { + t["TaskInProgressFault"] = reflect.TypeOf((*TaskInProgressFault)(nil)).Elem() +} + +type TaskInfo struct { + DynamicData + + Key string `xml:"key"` + Task ManagedObjectReference `xml:"task"` + Description *LocalizableMessage `xml:"description,omitempty"` + Name string `xml:"name,omitempty"` + DescriptionId string `xml:"descriptionId"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + EntityName string `xml:"entityName,omitempty"` + Locked []ManagedObjectReference `xml:"locked,omitempty"` + State TaskInfoState `xml:"state"` + Cancelled bool `xml:"cancelled"` + Cancelable bool `xml:"cancelable"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + Result AnyType `xml:"result,omitempty,typeattr"` + Progress int `xml:"progress,omitempty"` + Reason BaseTaskReason `xml:"reason,typeattr"` + QueueTime time.Time `xml:"queueTime"` + StartTime *time.Time `xml:"startTime"` + CompleteTime *time.Time `xml:"completeTime"` + EventChainId int `xml:"eventChainId"` + ChangeTag string `xml:"changeTag,omitempty"` + ParentTaskKey string `xml:"parentTaskKey,omitempty"` + RootTaskKey string `xml:"rootTaskKey,omitempty"` + ActivationId string `xml:"activationId,omitempty"` +} + +func init() { + t["TaskInfo"] = reflect.TypeOf((*TaskInfo)(nil)).Elem() +} + +type TaskReason struct { + DynamicData +} + +func init() { + t["TaskReason"] = reflect.TypeOf((*TaskReason)(nil)).Elem() +} + +type TaskReasonAlarm struct { + TaskReason + + AlarmName string `xml:"alarmName"` + Alarm ManagedObjectReference `xml:"alarm"` + EntityName string `xml:"entityName"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["TaskReasonAlarm"] = reflect.TypeOf((*TaskReasonAlarm)(nil)).Elem() +} + +type TaskReasonSchedule struct { + TaskReason + + Name string `xml:"name"` + ScheduledTask ManagedObjectReference `xml:"scheduledTask"` +} + +func init() { + t["TaskReasonSchedule"] = reflect.TypeOf((*TaskReasonSchedule)(nil)).Elem() +} + +type TaskReasonSystem struct { + TaskReason +} + +func init() { + t["TaskReasonSystem"] = reflect.TypeOf((*TaskReasonSystem)(nil)).Elem() +} + +type TaskReasonUser struct { + TaskReason + + UserName string `xml:"userName"` +} + +func init() { + t["TaskReasonUser"] = reflect.TypeOf((*TaskReasonUser)(nil)).Elem() +} + +type TaskScheduler struct { + DynamicData + + ActiveTime *time.Time `xml:"activeTime"` + ExpireTime *time.Time `xml:"expireTime"` +} + +func init() { + t["TaskScheduler"] = reflect.TypeOf((*TaskScheduler)(nil)).Elem() +} + +type TaskTimeoutEvent struct { + TaskEvent +} + +func init() { + t["TaskTimeoutEvent"] = reflect.TypeOf((*TaskTimeoutEvent)(nil)).Elem() +} + +type TeamingMatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["TeamingMatchEvent"] = reflect.TypeOf((*TeamingMatchEvent)(nil)).Elem() +} + +type TeamingMisMatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["TeamingMisMatchEvent"] = reflect.TypeOf((*TeamingMisMatchEvent)(nil)).Elem() +} + +type TemplateBeingUpgradedEvent struct { + TemplateUpgradeEvent +} + +func init() { + t["TemplateBeingUpgradedEvent"] = reflect.TypeOf((*TemplateBeingUpgradedEvent)(nil)).Elem() +} + +type TemplateConfigFileInfo struct { + VmConfigFileInfo +} + +func init() { + t["TemplateConfigFileInfo"] = reflect.TypeOf((*TemplateConfigFileInfo)(nil)).Elem() +} + +type TemplateConfigFileQuery struct { + VmConfigFileQuery +} + +func init() { + t["TemplateConfigFileQuery"] = reflect.TypeOf((*TemplateConfigFileQuery)(nil)).Elem() +} + +type TemplateUpgradeEvent struct { + Event + + LegacyTemplate string `xml:"legacyTemplate"` +} + +func init() { + t["TemplateUpgradeEvent"] = reflect.TypeOf((*TemplateUpgradeEvent)(nil)).Elem() +} + +type TemplateUpgradeFailedEvent struct { + TemplateUpgradeEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["TemplateUpgradeFailedEvent"] = reflect.TypeOf((*TemplateUpgradeFailedEvent)(nil)).Elem() +} + +type TemplateUpgradedEvent struct { + TemplateUpgradeEvent +} + +func init() { + t["TemplateUpgradedEvent"] = reflect.TypeOf((*TemplateUpgradedEvent)(nil)).Elem() +} + +type TerminateFaultTolerantVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["TerminateFaultTolerantVMRequestType"] = reflect.TypeOf((*TerminateFaultTolerantVMRequestType)(nil)).Elem() +} + +type TerminateFaultTolerantVM_Task TerminateFaultTolerantVMRequestType + +func init() { + t["TerminateFaultTolerantVM_Task"] = reflect.TypeOf((*TerminateFaultTolerantVM_Task)(nil)).Elem() +} + +type TerminateFaultTolerantVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TerminateProcessInGuest TerminateProcessInGuestRequestType + +func init() { + t["TerminateProcessInGuest"] = reflect.TypeOf((*TerminateProcessInGuest)(nil)).Elem() +} + +type TerminateProcessInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Pid int64 `xml:"pid"` +} + +func init() { + t["TerminateProcessInGuestRequestType"] = reflect.TypeOf((*TerminateProcessInGuestRequestType)(nil)).Elem() +} + +type TerminateProcessInGuestResponse struct { +} + +type TerminateSession TerminateSessionRequestType + +func init() { + t["TerminateSession"] = reflect.TypeOf((*TerminateSession)(nil)).Elem() +} + +type TerminateSessionRequestType struct { + This ManagedObjectReference `xml:"_this"` + SessionId []string `xml:"sessionId"` +} + +func init() { + t["TerminateSessionRequestType"] = reflect.TypeOf((*TerminateSessionRequestType)(nil)).Elem() +} + +type TerminateSessionResponse struct { +} + +type TerminateVM TerminateVMRequestType + +func init() { + t["TerminateVM"] = reflect.TypeOf((*TerminateVM)(nil)).Elem() +} + +type TerminateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["TerminateVMRequestType"] = reflect.TypeOf((*TerminateVMRequestType)(nil)).Elem() +} + +type TerminateVMResponse struct { +} + +type ThirdPartyLicenseAssignmentFailed struct { + RuntimeFault + + Host ManagedObjectReference `xml:"host"` + Module string `xml:"module"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["ThirdPartyLicenseAssignmentFailed"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailed)(nil)).Elem() +} + +type ThirdPartyLicenseAssignmentFailedFault ThirdPartyLicenseAssignmentFailed + +func init() { + t["ThirdPartyLicenseAssignmentFailedFault"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailedFault)(nil)).Elem() +} + +type TicketedSessionAuthentication struct { + GuestAuthentication + + Ticket string `xml:"ticket"` +} + +func init() { + t["TicketedSessionAuthentication"] = reflect.TypeOf((*TicketedSessionAuthentication)(nil)).Elem() +} + +type TimedOutHostOperationEvent struct { + HostEvent +} + +func init() { + t["TimedOutHostOperationEvent"] = reflect.TypeOf((*TimedOutHostOperationEvent)(nil)).Elem() +} + +type Timedout struct { + VimFault +} + +func init() { + t["Timedout"] = reflect.TypeOf((*Timedout)(nil)).Elem() +} + +type TimedoutFault BaseTimedout + +func init() { + t["TimedoutFault"] = reflect.TypeOf((*TimedoutFault)(nil)).Elem() +} + +type TooManyConcurrentNativeClones struct { + FileFault +} + +func init() { + t["TooManyConcurrentNativeClones"] = reflect.TypeOf((*TooManyConcurrentNativeClones)(nil)).Elem() +} + +type TooManyConcurrentNativeClonesFault TooManyConcurrentNativeClones + +func init() { + t["TooManyConcurrentNativeClonesFault"] = reflect.TypeOf((*TooManyConcurrentNativeClonesFault)(nil)).Elem() +} + +type TooManyConsecutiveOverrides struct { + VimFault +} + +func init() { + t["TooManyConsecutiveOverrides"] = reflect.TypeOf((*TooManyConsecutiveOverrides)(nil)).Elem() +} + +type TooManyConsecutiveOverridesFault TooManyConsecutiveOverrides + +func init() { + t["TooManyConsecutiveOverridesFault"] = reflect.TypeOf((*TooManyConsecutiveOverridesFault)(nil)).Elem() +} + +type TooManyDevices struct { + InvalidVmConfig +} + +func init() { + t["TooManyDevices"] = reflect.TypeOf((*TooManyDevices)(nil)).Elem() +} + +type TooManyDevicesFault TooManyDevices + +func init() { + t["TooManyDevicesFault"] = reflect.TypeOf((*TooManyDevicesFault)(nil)).Elem() +} + +type TooManyDisksOnLegacyHost struct { + MigrationFault + + DiskCount int `xml:"diskCount"` + TimeoutDanger bool `xml:"timeoutDanger"` +} + +func init() { + t["TooManyDisksOnLegacyHost"] = reflect.TypeOf((*TooManyDisksOnLegacyHost)(nil)).Elem() +} + +type TooManyDisksOnLegacyHostFault TooManyDisksOnLegacyHost + +func init() { + t["TooManyDisksOnLegacyHostFault"] = reflect.TypeOf((*TooManyDisksOnLegacyHostFault)(nil)).Elem() +} + +type TooManyGuestLogons struct { + GuestOperationsFault +} + +func init() { + t["TooManyGuestLogons"] = reflect.TypeOf((*TooManyGuestLogons)(nil)).Elem() +} + +type TooManyGuestLogonsFault TooManyGuestLogons + +func init() { + t["TooManyGuestLogonsFault"] = reflect.TypeOf((*TooManyGuestLogonsFault)(nil)).Elem() +} + +type TooManyHosts struct { + HostConnectFault +} + +func init() { + t["TooManyHosts"] = reflect.TypeOf((*TooManyHosts)(nil)).Elem() +} + +type TooManyHostsFault TooManyHosts + +func init() { + t["TooManyHostsFault"] = reflect.TypeOf((*TooManyHostsFault)(nil)).Elem() +} + +type TooManyNativeCloneLevels struct { + FileFault +} + +func init() { + t["TooManyNativeCloneLevels"] = reflect.TypeOf((*TooManyNativeCloneLevels)(nil)).Elem() +} + +type TooManyNativeCloneLevelsFault TooManyNativeCloneLevels + +func init() { + t["TooManyNativeCloneLevelsFault"] = reflect.TypeOf((*TooManyNativeCloneLevelsFault)(nil)).Elem() +} + +type TooManyNativeClonesOnFile struct { + FileFault +} + +func init() { + t["TooManyNativeClonesOnFile"] = reflect.TypeOf((*TooManyNativeClonesOnFile)(nil)).Elem() +} + +type TooManyNativeClonesOnFileFault TooManyNativeClonesOnFile + +func init() { + t["TooManyNativeClonesOnFileFault"] = reflect.TypeOf((*TooManyNativeClonesOnFileFault)(nil)).Elem() +} + +type TooManySnapshotLevels struct { + SnapshotFault +} + +func init() { + t["TooManySnapshotLevels"] = reflect.TypeOf((*TooManySnapshotLevels)(nil)).Elem() +} + +type TooManySnapshotLevelsFault TooManySnapshotLevels + +func init() { + t["TooManySnapshotLevelsFault"] = reflect.TypeOf((*TooManySnapshotLevelsFault)(nil)).Elem() +} + +type ToolsAlreadyUpgraded struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsAlreadyUpgraded"] = reflect.TypeOf((*ToolsAlreadyUpgraded)(nil)).Elem() +} + +type ToolsAlreadyUpgradedFault ToolsAlreadyUpgraded + +func init() { + t["ToolsAlreadyUpgradedFault"] = reflect.TypeOf((*ToolsAlreadyUpgradedFault)(nil)).Elem() +} + +type ToolsAutoUpgradeNotSupported struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsAutoUpgradeNotSupported"] = reflect.TypeOf((*ToolsAutoUpgradeNotSupported)(nil)).Elem() +} + +type ToolsAutoUpgradeNotSupportedFault ToolsAutoUpgradeNotSupported + +func init() { + t["ToolsAutoUpgradeNotSupportedFault"] = reflect.TypeOf((*ToolsAutoUpgradeNotSupportedFault)(nil)).Elem() +} + +type ToolsConfigInfo struct { + DynamicData + + ToolsVersion int `xml:"toolsVersion,omitempty"` + AfterPowerOn *bool `xml:"afterPowerOn"` + AfterResume *bool `xml:"afterResume"` + BeforeGuestStandby *bool `xml:"beforeGuestStandby"` + BeforeGuestShutdown *bool `xml:"beforeGuestShutdown"` + BeforeGuestReboot *bool `xml:"beforeGuestReboot"` + ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty"` + PendingCustomization string `xml:"pendingCustomization,omitempty"` + SyncTimeWithHost *bool `xml:"syncTimeWithHost"` + LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty"` +} + +func init() { + t["ToolsConfigInfo"] = reflect.TypeOf((*ToolsConfigInfo)(nil)).Elem() +} + +type ToolsConfigInfoToolsLastInstallInfo struct { + DynamicData + + Counter int `xml:"counter"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["ToolsConfigInfoToolsLastInstallInfo"] = reflect.TypeOf((*ToolsConfigInfoToolsLastInstallInfo)(nil)).Elem() +} + +type ToolsImageCopyFailed struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsImageCopyFailed"] = reflect.TypeOf((*ToolsImageCopyFailed)(nil)).Elem() +} + +type ToolsImageCopyFailedFault ToolsImageCopyFailed + +func init() { + t["ToolsImageCopyFailedFault"] = reflect.TypeOf((*ToolsImageCopyFailedFault)(nil)).Elem() +} + +type ToolsImageNotAvailable struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsImageNotAvailable"] = reflect.TypeOf((*ToolsImageNotAvailable)(nil)).Elem() +} + +type ToolsImageNotAvailableFault ToolsImageNotAvailable + +func init() { + t["ToolsImageNotAvailableFault"] = reflect.TypeOf((*ToolsImageNotAvailableFault)(nil)).Elem() +} + +type ToolsImageSignatureCheckFailed struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsImageSignatureCheckFailed"] = reflect.TypeOf((*ToolsImageSignatureCheckFailed)(nil)).Elem() +} + +type ToolsImageSignatureCheckFailedFault ToolsImageSignatureCheckFailed + +func init() { + t["ToolsImageSignatureCheckFailedFault"] = reflect.TypeOf((*ToolsImageSignatureCheckFailedFault)(nil)).Elem() +} + +type ToolsInstallationInProgress struct { + MigrationFault +} + +func init() { + t["ToolsInstallationInProgress"] = reflect.TypeOf((*ToolsInstallationInProgress)(nil)).Elem() +} + +type ToolsInstallationInProgressFault ToolsInstallationInProgress + +func init() { + t["ToolsInstallationInProgressFault"] = reflect.TypeOf((*ToolsInstallationInProgressFault)(nil)).Elem() +} + +type ToolsUnavailable struct { + VimFault +} + +func init() { + t["ToolsUnavailable"] = reflect.TypeOf((*ToolsUnavailable)(nil)).Elem() +} + +type ToolsUnavailableFault ToolsUnavailable + +func init() { + t["ToolsUnavailableFault"] = reflect.TypeOf((*ToolsUnavailableFault)(nil)).Elem() +} + +type ToolsUpgradeCancelled struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsUpgradeCancelled"] = reflect.TypeOf((*ToolsUpgradeCancelled)(nil)).Elem() +} + +type ToolsUpgradeCancelledFault ToolsUpgradeCancelled + +func init() { + t["ToolsUpgradeCancelledFault"] = reflect.TypeOf((*ToolsUpgradeCancelledFault)(nil)).Elem() +} + +type TraversalSpec struct { + SelectionSpec + + Type string `xml:"type"` + Path string `xml:"path"` + Skip *bool `xml:"skip"` + SelectSet []BaseSelectionSpec `xml:"selectSet,omitempty,typeattr"` +} + +func init() { + t["TraversalSpec"] = reflect.TypeOf((*TraversalSpec)(nil)).Elem() +} + +type TurnDiskLocatorLedOffRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuids []string `xml:"scsiDiskUuids"` +} + +func init() { + t["TurnDiskLocatorLedOffRequestType"] = reflect.TypeOf((*TurnDiskLocatorLedOffRequestType)(nil)).Elem() +} + +type TurnDiskLocatorLedOff_Task TurnDiskLocatorLedOffRequestType + +func init() { + t["TurnDiskLocatorLedOff_Task"] = reflect.TypeOf((*TurnDiskLocatorLedOff_Task)(nil)).Elem() +} + +type TurnDiskLocatorLedOff_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TurnDiskLocatorLedOnRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuids []string `xml:"scsiDiskUuids"` +} + +func init() { + t["TurnDiskLocatorLedOnRequestType"] = reflect.TypeOf((*TurnDiskLocatorLedOnRequestType)(nil)).Elem() +} + +type TurnDiskLocatorLedOn_Task TurnDiskLocatorLedOnRequestType + +func init() { + t["TurnDiskLocatorLedOn_Task"] = reflect.TypeOf((*TurnDiskLocatorLedOn_Task)(nil)).Elem() +} + +type TurnDiskLocatorLedOn_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TurnOffFaultToleranceForVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["TurnOffFaultToleranceForVMRequestType"] = reflect.TypeOf((*TurnOffFaultToleranceForVMRequestType)(nil)).Elem() +} + +type TurnOffFaultToleranceForVM_Task TurnOffFaultToleranceForVMRequestType + +func init() { + t["TurnOffFaultToleranceForVM_Task"] = reflect.TypeOf((*TurnOffFaultToleranceForVM_Task)(nil)).Elem() +} + +type TurnOffFaultToleranceForVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TypeDescription struct { + Description + + Key string `xml:"key"` +} + +func init() { + t["TypeDescription"] = reflect.TypeOf((*TypeDescription)(nil)).Elem() +} + +type UnSupportedDatastoreForVFlash struct { + UnsupportedDatastore + + DatastoreName string `xml:"datastoreName"` + Type string `xml:"type"` +} + +func init() { + t["UnSupportedDatastoreForVFlash"] = reflect.TypeOf((*UnSupportedDatastoreForVFlash)(nil)).Elem() +} + +type UnSupportedDatastoreForVFlashFault UnSupportedDatastoreForVFlash + +func init() { + t["UnSupportedDatastoreForVFlashFault"] = reflect.TypeOf((*UnSupportedDatastoreForVFlashFault)(nil)).Elem() +} + +type UnassignUserFromGroup UnassignUserFromGroupRequestType + +func init() { + t["UnassignUserFromGroup"] = reflect.TypeOf((*UnassignUserFromGroup)(nil)).Elem() +} + +type UnassignUserFromGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + Group string `xml:"group"` +} + +func init() { + t["UnassignUserFromGroupRequestType"] = reflect.TypeOf((*UnassignUserFromGroupRequestType)(nil)).Elem() +} + +type UnassignUserFromGroupResponse struct { +} + +type UnbindVnic UnbindVnicRequestType + +func init() { + t["UnbindVnic"] = reflect.TypeOf((*UnbindVnic)(nil)).Elem() +} + +type UnbindVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` + VnicDevice string `xml:"vnicDevice"` + Force bool `xml:"force"` +} + +func init() { + t["UnbindVnicRequestType"] = reflect.TypeOf((*UnbindVnicRequestType)(nil)).Elem() +} + +type UnbindVnicResponse struct { +} + +type UncommittedUndoableDisk struct { + MigrationFault +} + +func init() { + t["UncommittedUndoableDisk"] = reflect.TypeOf((*UncommittedUndoableDisk)(nil)).Elem() +} + +type UncommittedUndoableDiskFault UncommittedUndoableDisk + +func init() { + t["UncommittedUndoableDiskFault"] = reflect.TypeOf((*UncommittedUndoableDiskFault)(nil)).Elem() +} + +type UnconfiguredPropertyValue struct { + InvalidPropertyValue +} + +func init() { + t["UnconfiguredPropertyValue"] = reflect.TypeOf((*UnconfiguredPropertyValue)(nil)).Elem() +} + +type UnconfiguredPropertyValueFault UnconfiguredPropertyValue + +func init() { + t["UnconfiguredPropertyValueFault"] = reflect.TypeOf((*UnconfiguredPropertyValueFault)(nil)).Elem() +} + +type UncustomizableGuest struct { + CustomizationFault + + UncustomizableGuestOS string `xml:"uncustomizableGuestOS"` +} + +func init() { + t["UncustomizableGuest"] = reflect.TypeOf((*UncustomizableGuest)(nil)).Elem() +} + +type UncustomizableGuestFault UncustomizableGuest + +func init() { + t["UncustomizableGuestFault"] = reflect.TypeOf((*UncustomizableGuestFault)(nil)).Elem() +} + +type UndeployVM UndeployVMRequestType + +func init() { + t["UndeployVM"] = reflect.TypeOf((*UndeployVM)(nil)).Elem() +} + +type UndeployVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + VrpId string `xml:"vrpId"` + Vm ManagedObjectReference `xml:"vm"` + Cluster ManagedObjectReference `xml:"cluster"` +} + +func init() { + t["UndeployVMRequestType"] = reflect.TypeOf((*UndeployVMRequestType)(nil)).Elem() +} + +type UndeployVMResponse struct { +} + +type UnexpectedCustomizationFault struct { + CustomizationFault +} + +func init() { + t["UnexpectedCustomizationFault"] = reflect.TypeOf((*UnexpectedCustomizationFault)(nil)).Elem() +} + +type UnexpectedCustomizationFaultFault UnexpectedCustomizationFault + +func init() { + t["UnexpectedCustomizationFaultFault"] = reflect.TypeOf((*UnexpectedCustomizationFaultFault)(nil)).Elem() +} + +type UnexpectedFault struct { + RuntimeFault + + FaultName string `xml:"faultName"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["UnexpectedFault"] = reflect.TypeOf((*UnexpectedFault)(nil)).Elem() +} + +type UnexpectedFaultFault UnexpectedFault + +func init() { + t["UnexpectedFaultFault"] = reflect.TypeOf((*UnexpectedFaultFault)(nil)).Elem() +} + +type UninstallHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + BulletinIds []string `xml:"bulletinIds,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["UninstallHostPatchRequestType"] = reflect.TypeOf((*UninstallHostPatchRequestType)(nil)).Elem() +} + +type UninstallHostPatch_Task UninstallHostPatchRequestType + +func init() { + t["UninstallHostPatch_Task"] = reflect.TypeOf((*UninstallHostPatch_Task)(nil)).Elem() +} + +type UninstallHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UninstallIoFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["UninstallIoFilterRequestType"] = reflect.TypeOf((*UninstallIoFilterRequestType)(nil)).Elem() +} + +type UninstallIoFilter_Task UninstallIoFilterRequestType + +func init() { + t["UninstallIoFilter_Task"] = reflect.TypeOf((*UninstallIoFilter_Task)(nil)).Elem() +} + +type UninstallIoFilter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UninstallService UninstallServiceRequestType + +func init() { + t["UninstallService"] = reflect.TypeOf((*UninstallService)(nil)).Elem() +} + +type UninstallServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["UninstallServiceRequestType"] = reflect.TypeOf((*UninstallServiceRequestType)(nil)).Elem() +} + +type UninstallServiceResponse struct { +} + +type UnlicensedVirtualMachinesEvent struct { + LicenseEvent + + Unlicensed int `xml:"unlicensed"` + Available int `xml:"available"` +} + +func init() { + t["UnlicensedVirtualMachinesEvent"] = reflect.TypeOf((*UnlicensedVirtualMachinesEvent)(nil)).Elem() +} + +type UnlicensedVirtualMachinesFoundEvent struct { + LicenseEvent + + Available int `xml:"available"` +} + +func init() { + t["UnlicensedVirtualMachinesFoundEvent"] = reflect.TypeOf((*UnlicensedVirtualMachinesFoundEvent)(nil)).Elem() +} + +type UnmapVmfsVolumeExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid []string `xml:"vmfsUuid"` +} + +func init() { + t["UnmapVmfsVolumeExRequestType"] = reflect.TypeOf((*UnmapVmfsVolumeExRequestType)(nil)).Elem() +} + +type UnmapVmfsVolumeEx_Task UnmapVmfsVolumeExRequestType + +func init() { + t["UnmapVmfsVolumeEx_Task"] = reflect.TypeOf((*UnmapVmfsVolumeEx_Task)(nil)).Elem() +} + +type UnmapVmfsVolumeEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnmountDiskMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mapping []VsanHostDiskMapping `xml:"mapping"` +} + +func init() { + t["UnmountDiskMappingRequestType"] = reflect.TypeOf((*UnmountDiskMappingRequestType)(nil)).Elem() +} + +type UnmountDiskMapping_Task UnmountDiskMappingRequestType + +func init() { + t["UnmountDiskMapping_Task"] = reflect.TypeOf((*UnmountDiskMapping_Task)(nil)).Elem() +} + +type UnmountDiskMapping_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnmountForceMountedVmfsVolume UnmountForceMountedVmfsVolumeRequestType + +func init() { + t["UnmountForceMountedVmfsVolume"] = reflect.TypeOf((*UnmountForceMountedVmfsVolume)(nil)).Elem() +} + +type UnmountForceMountedVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["UnmountForceMountedVmfsVolumeRequestType"] = reflect.TypeOf((*UnmountForceMountedVmfsVolumeRequestType)(nil)).Elem() +} + +type UnmountForceMountedVmfsVolumeResponse struct { +} + +type UnmountToolsInstaller UnmountToolsInstallerRequestType + +func init() { + t["UnmountToolsInstaller"] = reflect.TypeOf((*UnmountToolsInstaller)(nil)).Elem() +} + +type UnmountToolsInstallerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UnmountToolsInstallerRequestType"] = reflect.TypeOf((*UnmountToolsInstallerRequestType)(nil)).Elem() +} + +type UnmountToolsInstallerResponse struct { +} + +type UnmountVffsVolume UnmountVffsVolumeRequestType + +func init() { + t["UnmountVffsVolume"] = reflect.TypeOf((*UnmountVffsVolume)(nil)).Elem() +} + +type UnmountVffsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["UnmountVffsVolumeRequestType"] = reflect.TypeOf((*UnmountVffsVolumeRequestType)(nil)).Elem() +} + +type UnmountVffsVolumeResponse struct { +} + +type UnmountVmfsVolume UnmountVmfsVolumeRequestType + +func init() { + t["UnmountVmfsVolume"] = reflect.TypeOf((*UnmountVmfsVolume)(nil)).Elem() +} + +type UnmountVmfsVolumeExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid []string `xml:"vmfsUuid"` +} + +func init() { + t["UnmountVmfsVolumeExRequestType"] = reflect.TypeOf((*UnmountVmfsVolumeExRequestType)(nil)).Elem() +} + +type UnmountVmfsVolumeEx_Task UnmountVmfsVolumeExRequestType + +func init() { + t["UnmountVmfsVolumeEx_Task"] = reflect.TypeOf((*UnmountVmfsVolumeEx_Task)(nil)).Elem() +} + +type UnmountVmfsVolumeEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnmountVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["UnmountVmfsVolumeRequestType"] = reflect.TypeOf((*UnmountVmfsVolumeRequestType)(nil)).Elem() +} + +type UnmountVmfsVolumeResponse struct { +} + +type UnrecognizedHost struct { + VimFault + + HostName string `xml:"hostName"` +} + +func init() { + t["UnrecognizedHost"] = reflect.TypeOf((*UnrecognizedHost)(nil)).Elem() +} + +type UnrecognizedHostFault UnrecognizedHost + +func init() { + t["UnrecognizedHostFault"] = reflect.TypeOf((*UnrecognizedHostFault)(nil)).Elem() +} + +type UnregisterAndDestroyRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UnregisterAndDestroyRequestType"] = reflect.TypeOf((*UnregisterAndDestroyRequestType)(nil)).Elem() +} + +type UnregisterAndDestroy_Task UnregisterAndDestroyRequestType + +func init() { + t["UnregisterAndDestroy_Task"] = reflect.TypeOf((*UnregisterAndDestroy_Task)(nil)).Elem() +} + +type UnregisterAndDestroy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnregisterExtension UnregisterExtensionRequestType + +func init() { + t["UnregisterExtension"] = reflect.TypeOf((*UnregisterExtension)(nil)).Elem() +} + +type UnregisterExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["UnregisterExtensionRequestType"] = reflect.TypeOf((*UnregisterExtensionRequestType)(nil)).Elem() +} + +type UnregisterExtensionResponse struct { +} + +type UnregisterVM UnregisterVMRequestType + +func init() { + t["UnregisterVM"] = reflect.TypeOf((*UnregisterVM)(nil)).Elem() +} + +type UnregisterVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UnregisterVMRequestType"] = reflect.TypeOf((*UnregisterVMRequestType)(nil)).Elem() +} + +type UnregisterVMResponse struct { +} + +type UnsharedSwapVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["UnsharedSwapVMotionNotSupported"] = reflect.TypeOf((*UnsharedSwapVMotionNotSupported)(nil)).Elem() +} + +type UnsharedSwapVMotionNotSupportedFault UnsharedSwapVMotionNotSupported + +func init() { + t["UnsharedSwapVMotionNotSupportedFault"] = reflect.TypeOf((*UnsharedSwapVMotionNotSupportedFault)(nil)).Elem() +} + +type UnsupportedDatastore struct { + VmConfigFault + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["UnsupportedDatastore"] = reflect.TypeOf((*UnsupportedDatastore)(nil)).Elem() +} + +type UnsupportedDatastoreFault BaseUnsupportedDatastore + +func init() { + t["UnsupportedDatastoreFault"] = reflect.TypeOf((*UnsupportedDatastoreFault)(nil)).Elem() +} + +type UnsupportedGuest struct { + InvalidVmConfig + + UnsupportedGuestOS string `xml:"unsupportedGuestOS"` +} + +func init() { + t["UnsupportedGuest"] = reflect.TypeOf((*UnsupportedGuest)(nil)).Elem() +} + +type UnsupportedGuestFault UnsupportedGuest + +func init() { + t["UnsupportedGuestFault"] = reflect.TypeOf((*UnsupportedGuestFault)(nil)).Elem() +} + +type UnsupportedVimApiVersion struct { + VimFault + + Version string `xml:"version,omitempty"` +} + +func init() { + t["UnsupportedVimApiVersion"] = reflect.TypeOf((*UnsupportedVimApiVersion)(nil)).Elem() +} + +type UnsupportedVimApiVersionFault UnsupportedVimApiVersion + +func init() { + t["UnsupportedVimApiVersionFault"] = reflect.TypeOf((*UnsupportedVimApiVersionFault)(nil)).Elem() +} + +type UnsupportedVmxLocation struct { + VmConfigFault +} + +func init() { + t["UnsupportedVmxLocation"] = reflect.TypeOf((*UnsupportedVmxLocation)(nil)).Elem() +} + +type UnsupportedVmxLocationFault UnsupportedVmxLocation + +func init() { + t["UnsupportedVmxLocationFault"] = reflect.TypeOf((*UnsupportedVmxLocationFault)(nil)).Elem() +} + +type UnusedVirtualDiskBlocksNotScrubbed struct { + DeviceBackingNotSupported +} + +func init() { + t["UnusedVirtualDiskBlocksNotScrubbed"] = reflect.TypeOf((*UnusedVirtualDiskBlocksNotScrubbed)(nil)).Elem() +} + +type UnusedVirtualDiskBlocksNotScrubbedFault UnusedVirtualDiskBlocksNotScrubbed + +func init() { + t["UnusedVirtualDiskBlocksNotScrubbedFault"] = reflect.TypeOf((*UnusedVirtualDiskBlocksNotScrubbedFault)(nil)).Elem() +} + +type UpdateAnswerFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ConfigSpec BaseAnswerFileCreateSpec `xml:"configSpec,typeattr"` +} + +func init() { + t["UpdateAnswerFileRequestType"] = reflect.TypeOf((*UpdateAnswerFileRequestType)(nil)).Elem() +} + +type UpdateAnswerFile_Task UpdateAnswerFileRequestType + +func init() { + t["UpdateAnswerFile_Task"] = reflect.TypeOf((*UpdateAnswerFile_Task)(nil)).Elem() +} + +type UpdateAnswerFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateAssignedLicense UpdateAssignedLicenseRequestType + +func init() { + t["UpdateAssignedLicense"] = reflect.TypeOf((*UpdateAssignedLicense)(nil)).Elem() +} + +type UpdateAssignedLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity string `xml:"entity"` + LicenseKey string `xml:"licenseKey"` + EntityDisplayName string `xml:"entityDisplayName,omitempty"` +} + +func init() { + t["UpdateAssignedLicenseRequestType"] = reflect.TypeOf((*UpdateAssignedLicenseRequestType)(nil)).Elem() +} + +type UpdateAssignedLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type UpdateAuthorizationRole UpdateAuthorizationRoleRequestType + +func init() { + t["UpdateAuthorizationRole"] = reflect.TypeOf((*UpdateAuthorizationRole)(nil)).Elem() +} + +type UpdateAuthorizationRoleRequestType struct { + This ManagedObjectReference `xml:"_this"` + RoleId int `xml:"roleId"` + NewName string `xml:"newName"` + PrivIds []string `xml:"privIds,omitempty"` +} + +func init() { + t["UpdateAuthorizationRoleRequestType"] = reflect.TypeOf((*UpdateAuthorizationRoleRequestType)(nil)).Elem() +} + +type UpdateAuthorizationRoleResponse struct { +} + +type UpdateBootDevice UpdateBootDeviceRequestType + +func init() { + t["UpdateBootDevice"] = reflect.TypeOf((*UpdateBootDevice)(nil)).Elem() +} + +type UpdateBootDeviceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key"` +} + +func init() { + t["UpdateBootDeviceRequestType"] = reflect.TypeOf((*UpdateBootDeviceRequestType)(nil)).Elem() +} + +type UpdateBootDeviceResponse struct { +} + +type UpdateChildResourceConfiguration UpdateChildResourceConfigurationRequestType + +func init() { + t["UpdateChildResourceConfiguration"] = reflect.TypeOf((*UpdateChildResourceConfiguration)(nil)).Elem() +} + +type UpdateChildResourceConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec []ResourceConfigSpec `xml:"spec"` +} + +func init() { + t["UpdateChildResourceConfigurationRequestType"] = reflect.TypeOf((*UpdateChildResourceConfigurationRequestType)(nil)).Elem() +} + +type UpdateChildResourceConfigurationResponse struct { +} + +type UpdateClusterProfile UpdateClusterProfileRequestType + +func init() { + t["UpdateClusterProfile"] = reflect.TypeOf((*UpdateClusterProfile)(nil)).Elem() +} + +type UpdateClusterProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseClusterProfileConfigSpec `xml:"config,typeattr"` +} + +func init() { + t["UpdateClusterProfileRequestType"] = reflect.TypeOf((*UpdateClusterProfileRequestType)(nil)).Elem() +} + +type UpdateClusterProfileResponse struct { +} + +type UpdateConfig UpdateConfigRequestType + +func init() { + t["UpdateConfig"] = reflect.TypeOf((*UpdateConfig)(nil)).Elem() +} + +type UpdateConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name,omitempty"` + Config *ResourceConfigSpec `xml:"config,omitempty"` +} + +func init() { + t["UpdateConfigRequestType"] = reflect.TypeOf((*UpdateConfigRequestType)(nil)).Elem() +} + +type UpdateConfigResponse struct { +} + +type UpdateConsoleIpRouteConfig UpdateConsoleIpRouteConfigRequestType + +func init() { + t["UpdateConsoleIpRouteConfig"] = reflect.TypeOf((*UpdateConsoleIpRouteConfig)(nil)).Elem() +} + +type UpdateConsoleIpRouteConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostIpRouteConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdateConsoleIpRouteConfigRequestType"] = reflect.TypeOf((*UpdateConsoleIpRouteConfigRequestType)(nil)).Elem() +} + +type UpdateConsoleIpRouteConfigResponse struct { +} + +type UpdateCounterLevelMapping UpdateCounterLevelMappingRequestType + +func init() { + t["UpdateCounterLevelMapping"] = reflect.TypeOf((*UpdateCounterLevelMapping)(nil)).Elem() +} + +type UpdateCounterLevelMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + CounterLevelMap []PerformanceManagerCounterLevelMapping `xml:"counterLevelMap"` +} + +func init() { + t["UpdateCounterLevelMappingRequestType"] = reflect.TypeOf((*UpdateCounterLevelMappingRequestType)(nil)).Elem() +} + +type UpdateCounterLevelMappingResponse struct { +} + +type UpdateDVSHealthCheckConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + HealthCheckConfig []BaseDVSHealthCheckConfig `xml:"healthCheckConfig,typeattr"` +} + +func init() { + t["UpdateDVSHealthCheckConfigRequestType"] = reflect.TypeOf((*UpdateDVSHealthCheckConfigRequestType)(nil)).Elem() +} + +type UpdateDVSHealthCheckConfig_Task UpdateDVSHealthCheckConfigRequestType + +func init() { + t["UpdateDVSHealthCheckConfig_Task"] = reflect.TypeOf((*UpdateDVSHealthCheckConfig_Task)(nil)).Elem() +} + +type UpdateDVSHealthCheckConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateDVSLacpGroupConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + LacpGroupSpec []VMwareDvsLacpGroupSpec `xml:"lacpGroupSpec"` +} + +func init() { + t["UpdateDVSLacpGroupConfigRequestType"] = reflect.TypeOf((*UpdateDVSLacpGroupConfigRequestType)(nil)).Elem() +} + +type UpdateDVSLacpGroupConfig_Task UpdateDVSLacpGroupConfigRequestType + +func init() { + t["UpdateDVSLacpGroupConfig_Task"] = reflect.TypeOf((*UpdateDVSLacpGroupConfig_Task)(nil)).Elem() +} + +type UpdateDVSLacpGroupConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateDateTime UpdateDateTimeRequestType + +func init() { + t["UpdateDateTime"] = reflect.TypeOf((*UpdateDateTime)(nil)).Elem() +} + +type UpdateDateTimeConfig UpdateDateTimeConfigRequestType + +func init() { + t["UpdateDateTimeConfig"] = reflect.TypeOf((*UpdateDateTimeConfig)(nil)).Elem() +} + +type UpdateDateTimeConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostDateTimeConfig `xml:"config"` +} + +func init() { + t["UpdateDateTimeConfigRequestType"] = reflect.TypeOf((*UpdateDateTimeConfigRequestType)(nil)).Elem() +} + +type UpdateDateTimeConfigResponse struct { +} + +type UpdateDateTimeRequestType struct { + This ManagedObjectReference `xml:"_this"` + DateTime time.Time `xml:"dateTime"` +} + +func init() { + t["UpdateDateTimeRequestType"] = reflect.TypeOf((*UpdateDateTimeRequestType)(nil)).Elem() +} + +type UpdateDateTimeResponse struct { +} + +type UpdateDefaultPolicy UpdateDefaultPolicyRequestType + +func init() { + t["UpdateDefaultPolicy"] = reflect.TypeOf((*UpdateDefaultPolicy)(nil)).Elem() +} + +type UpdateDefaultPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + DefaultPolicy HostFirewallDefaultPolicy `xml:"defaultPolicy"` +} + +func init() { + t["UpdateDefaultPolicyRequestType"] = reflect.TypeOf((*UpdateDefaultPolicyRequestType)(nil)).Elem() +} + +type UpdateDefaultPolicyResponse struct { +} + +type UpdateDiskPartitions UpdateDiskPartitionsRequestType + +func init() { + t["UpdateDiskPartitions"] = reflect.TypeOf((*UpdateDiskPartitions)(nil)).Elem() +} + +type UpdateDiskPartitionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath string `xml:"devicePath"` + Spec HostDiskPartitionSpec `xml:"spec"` +} + +func init() { + t["UpdateDiskPartitionsRequestType"] = reflect.TypeOf((*UpdateDiskPartitionsRequestType)(nil)).Elem() +} + +type UpdateDiskPartitionsResponse struct { +} + +type UpdateDnsConfig UpdateDnsConfigRequestType + +func init() { + t["UpdateDnsConfig"] = reflect.TypeOf((*UpdateDnsConfig)(nil)).Elem() +} + +type UpdateDnsConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostDnsConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdateDnsConfigRequestType"] = reflect.TypeOf((*UpdateDnsConfigRequestType)(nil)).Elem() +} + +type UpdateDnsConfigResponse struct { +} + +type UpdateDvsCapability UpdateDvsCapabilityRequestType + +func init() { + t["UpdateDvsCapability"] = reflect.TypeOf((*UpdateDvsCapability)(nil)).Elem() +} + +type UpdateDvsCapabilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Capability DVSCapability `xml:"capability"` +} + +func init() { + t["UpdateDvsCapabilityRequestType"] = reflect.TypeOf((*UpdateDvsCapabilityRequestType)(nil)).Elem() +} + +type UpdateDvsCapabilityResponse struct { +} + +type UpdateExtension UpdateExtensionRequestType + +func init() { + t["UpdateExtension"] = reflect.TypeOf((*UpdateExtension)(nil)).Elem() +} + +type UpdateExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Extension Extension `xml:"extension"` +} + +func init() { + t["UpdateExtensionRequestType"] = reflect.TypeOf((*UpdateExtensionRequestType)(nil)).Elem() +} + +type UpdateExtensionResponse struct { +} + +type UpdateFlags UpdateFlagsRequestType + +func init() { + t["UpdateFlags"] = reflect.TypeOf((*UpdateFlags)(nil)).Elem() +} + +type UpdateFlagsRequestType struct { + This ManagedObjectReference `xml:"_this"` + FlagInfo HostFlagInfo `xml:"flagInfo"` +} + +func init() { + t["UpdateFlagsRequestType"] = reflect.TypeOf((*UpdateFlagsRequestType)(nil)).Elem() +} + +type UpdateFlagsResponse struct { +} + +type UpdateHostImageAcceptanceLevel UpdateHostImageAcceptanceLevelRequestType + +func init() { + t["UpdateHostImageAcceptanceLevel"] = reflect.TypeOf((*UpdateHostImageAcceptanceLevel)(nil)).Elem() +} + +type UpdateHostImageAcceptanceLevelRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewAcceptanceLevel string `xml:"newAcceptanceLevel"` +} + +func init() { + t["UpdateHostImageAcceptanceLevelRequestType"] = reflect.TypeOf((*UpdateHostImageAcceptanceLevelRequestType)(nil)).Elem() +} + +type UpdateHostImageAcceptanceLevelResponse struct { +} + +type UpdateHostProfile UpdateHostProfileRequestType + +func init() { + t["UpdateHostProfile"] = reflect.TypeOf((*UpdateHostProfile)(nil)).Elem() +} + +type UpdateHostProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostProfileConfigSpec `xml:"config,typeattr"` +} + +func init() { + t["UpdateHostProfileRequestType"] = reflect.TypeOf((*UpdateHostProfileRequestType)(nil)).Elem() +} + +type UpdateHostProfileResponse struct { +} + +type UpdateInternetScsiAdvancedOptions UpdateInternetScsiAdvancedOptionsRequestType + +func init() { + t["UpdateInternetScsiAdvancedOptions"] = reflect.TypeOf((*UpdateInternetScsiAdvancedOptions)(nil)).Elem() +} + +type UpdateInternetScsiAdvancedOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty"` + Options []HostInternetScsiHbaParamValue `xml:"options"` +} + +func init() { + t["UpdateInternetScsiAdvancedOptionsRequestType"] = reflect.TypeOf((*UpdateInternetScsiAdvancedOptionsRequestType)(nil)).Elem() +} + +type UpdateInternetScsiAdvancedOptionsResponse struct { +} + +type UpdateInternetScsiAlias UpdateInternetScsiAliasRequestType + +func init() { + t["UpdateInternetScsiAlias"] = reflect.TypeOf((*UpdateInternetScsiAlias)(nil)).Elem() +} + +type UpdateInternetScsiAliasRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + IScsiAlias string `xml:"iScsiAlias"` +} + +func init() { + t["UpdateInternetScsiAliasRequestType"] = reflect.TypeOf((*UpdateInternetScsiAliasRequestType)(nil)).Elem() +} + +type UpdateInternetScsiAliasResponse struct { +} + +type UpdateInternetScsiAuthenticationProperties UpdateInternetScsiAuthenticationPropertiesRequestType + +func init() { + t["UpdateInternetScsiAuthenticationProperties"] = reflect.TypeOf((*UpdateInternetScsiAuthenticationProperties)(nil)).Elem() +} + +type UpdateInternetScsiAuthenticationPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + AuthenticationProperties HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties"` + TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty"` +} + +func init() { + t["UpdateInternetScsiAuthenticationPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiAuthenticationPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiAuthenticationPropertiesResponse struct { +} + +type UpdateInternetScsiDigestProperties UpdateInternetScsiDigestPropertiesRequestType + +func init() { + t["UpdateInternetScsiDigestProperties"] = reflect.TypeOf((*UpdateInternetScsiDigestProperties)(nil)).Elem() +} + +type UpdateInternetScsiDigestPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty"` + DigestProperties HostInternetScsiHbaDigestProperties `xml:"digestProperties"` +} + +func init() { + t["UpdateInternetScsiDigestPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiDigestPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiDigestPropertiesResponse struct { +} + +type UpdateInternetScsiDiscoveryProperties UpdateInternetScsiDiscoveryPropertiesRequestType + +func init() { + t["UpdateInternetScsiDiscoveryProperties"] = reflect.TypeOf((*UpdateInternetScsiDiscoveryProperties)(nil)).Elem() +} + +type UpdateInternetScsiDiscoveryPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + DiscoveryProperties HostInternetScsiHbaDiscoveryProperties `xml:"discoveryProperties"` +} + +func init() { + t["UpdateInternetScsiDiscoveryPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiDiscoveryPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiDiscoveryPropertiesResponse struct { +} + +type UpdateInternetScsiIPProperties UpdateInternetScsiIPPropertiesRequestType + +func init() { + t["UpdateInternetScsiIPProperties"] = reflect.TypeOf((*UpdateInternetScsiIPProperties)(nil)).Elem() +} + +type UpdateInternetScsiIPPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + IpProperties HostInternetScsiHbaIPProperties `xml:"ipProperties"` +} + +func init() { + t["UpdateInternetScsiIPPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiIPPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiIPPropertiesResponse struct { +} + +type UpdateInternetScsiName UpdateInternetScsiNameRequestType + +func init() { + t["UpdateInternetScsiName"] = reflect.TypeOf((*UpdateInternetScsiName)(nil)).Elem() +} + +type UpdateInternetScsiNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + IScsiName string `xml:"iScsiName"` +} + +func init() { + t["UpdateInternetScsiNameRequestType"] = reflect.TypeOf((*UpdateInternetScsiNameRequestType)(nil)).Elem() +} + +type UpdateInternetScsiNameResponse struct { +} + +type UpdateIpConfig UpdateIpConfigRequestType + +func init() { + t["UpdateIpConfig"] = reflect.TypeOf((*UpdateIpConfig)(nil)).Elem() +} + +type UpdateIpConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + IpConfig HostIpConfig `xml:"ipConfig"` +} + +func init() { + t["UpdateIpConfigRequestType"] = reflect.TypeOf((*UpdateIpConfigRequestType)(nil)).Elem() +} + +type UpdateIpConfigResponse struct { +} + +type UpdateIpPool UpdateIpPoolRequestType + +func init() { + t["UpdateIpPool"] = reflect.TypeOf((*UpdateIpPool)(nil)).Elem() +} + +type UpdateIpPoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + Pool IpPool `xml:"pool"` +} + +func init() { + t["UpdateIpPoolRequestType"] = reflect.TypeOf((*UpdateIpPoolRequestType)(nil)).Elem() +} + +type UpdateIpPoolResponse struct { +} + +type UpdateIpRouteConfig UpdateIpRouteConfigRequestType + +func init() { + t["UpdateIpRouteConfig"] = reflect.TypeOf((*UpdateIpRouteConfig)(nil)).Elem() +} + +type UpdateIpRouteConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostIpRouteConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdateIpRouteConfigRequestType"] = reflect.TypeOf((*UpdateIpRouteConfigRequestType)(nil)).Elem() +} + +type UpdateIpRouteConfigResponse struct { +} + +type UpdateIpRouteTableConfig UpdateIpRouteTableConfigRequestType + +func init() { + t["UpdateIpRouteTableConfig"] = reflect.TypeOf((*UpdateIpRouteTableConfig)(nil)).Elem() +} + +type UpdateIpRouteTableConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostIpRouteTableConfig `xml:"config"` +} + +func init() { + t["UpdateIpRouteTableConfigRequestType"] = reflect.TypeOf((*UpdateIpRouteTableConfigRequestType)(nil)).Elem() +} + +type UpdateIpRouteTableConfigResponse struct { +} + +type UpdateIpmi UpdateIpmiRequestType + +func init() { + t["UpdateIpmi"] = reflect.TypeOf((*UpdateIpmi)(nil)).Elem() +} + +type UpdateIpmiRequestType struct { + This ManagedObjectReference `xml:"_this"` + IpmiInfo HostIpmiInfo `xml:"ipmiInfo"` +} + +func init() { + t["UpdateIpmiRequestType"] = reflect.TypeOf((*UpdateIpmiRequestType)(nil)).Elem() +} + +type UpdateIpmiResponse struct { +} + +type UpdateLicense UpdateLicenseRequestType + +func init() { + t["UpdateLicense"] = reflect.TypeOf((*UpdateLicense)(nil)).Elem() +} + +type UpdateLicenseLabel UpdateLicenseLabelRequestType + +func init() { + t["UpdateLicenseLabel"] = reflect.TypeOf((*UpdateLicenseLabel)(nil)).Elem() +} + +type UpdateLicenseLabelRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + LabelKey string `xml:"labelKey"` + LabelValue string `xml:"labelValue"` +} + +func init() { + t["UpdateLicenseLabelRequestType"] = reflect.TypeOf((*UpdateLicenseLabelRequestType)(nil)).Elem() +} + +type UpdateLicenseLabelResponse struct { +} + +type UpdateLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + Labels []KeyValue `xml:"labels,omitempty"` +} + +func init() { + t["UpdateLicenseRequestType"] = reflect.TypeOf((*UpdateLicenseRequestType)(nil)).Elem() +} + +type UpdateLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type UpdateLinkedChildren UpdateLinkedChildrenRequestType + +func init() { + t["UpdateLinkedChildren"] = reflect.TypeOf((*UpdateLinkedChildren)(nil)).Elem() +} + +type UpdateLinkedChildrenRequestType struct { + This ManagedObjectReference `xml:"_this"` + AddChangeSet []VirtualAppLinkInfo `xml:"addChangeSet,omitempty"` + RemoveSet []ManagedObjectReference `xml:"removeSet,omitempty"` +} + +func init() { + t["UpdateLinkedChildrenRequestType"] = reflect.TypeOf((*UpdateLinkedChildrenRequestType)(nil)).Elem() +} + +type UpdateLinkedChildrenResponse struct { +} + +type UpdateLocalSwapDatastore UpdateLocalSwapDatastoreRequestType + +func init() { + t["UpdateLocalSwapDatastore"] = reflect.TypeOf((*UpdateLocalSwapDatastore)(nil)).Elem() +} + +type UpdateLocalSwapDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["UpdateLocalSwapDatastoreRequestType"] = reflect.TypeOf((*UpdateLocalSwapDatastoreRequestType)(nil)).Elem() +} + +type UpdateLocalSwapDatastoreResponse struct { +} + +type UpdateLockdownExceptions UpdateLockdownExceptionsRequestType + +func init() { + t["UpdateLockdownExceptions"] = reflect.TypeOf((*UpdateLockdownExceptions)(nil)).Elem() +} + +type UpdateLockdownExceptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Users []string `xml:"users,omitempty"` +} + +func init() { + t["UpdateLockdownExceptionsRequestType"] = reflect.TypeOf((*UpdateLockdownExceptionsRequestType)(nil)).Elem() +} + +type UpdateLockdownExceptionsResponse struct { +} + +type UpdateModuleOptionString UpdateModuleOptionStringRequestType + +func init() { + t["UpdateModuleOptionString"] = reflect.TypeOf((*UpdateModuleOptionString)(nil)).Elem() +} + +type UpdateModuleOptionStringRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Options string `xml:"options"` +} + +func init() { + t["UpdateModuleOptionStringRequestType"] = reflect.TypeOf((*UpdateModuleOptionStringRequestType)(nil)).Elem() +} + +type UpdateModuleOptionStringResponse struct { +} + +type UpdateNetworkConfig UpdateNetworkConfigRequestType + +func init() { + t["UpdateNetworkConfig"] = reflect.TypeOf((*UpdateNetworkConfig)(nil)).Elem() +} + +type UpdateNetworkConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostNetworkConfig `xml:"config"` + ChangeMode string `xml:"changeMode"` +} + +func init() { + t["UpdateNetworkConfigRequestType"] = reflect.TypeOf((*UpdateNetworkConfigRequestType)(nil)).Elem() +} + +type UpdateNetworkConfigResponse struct { + Returnval HostNetworkConfigResult `xml:"returnval"` +} + +type UpdateNetworkResourcePool UpdateNetworkResourcePoolRequestType + +func init() { + t["UpdateNetworkResourcePool"] = reflect.TypeOf((*UpdateNetworkResourcePool)(nil)).Elem() +} + +type UpdateNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:"configSpec"` +} + +func init() { + t["UpdateNetworkResourcePoolRequestType"] = reflect.TypeOf((*UpdateNetworkResourcePoolRequestType)(nil)).Elem() +} + +type UpdateNetworkResourcePoolResponse struct { +} + +type UpdateOptions UpdateOptionsRequestType + +func init() { + t["UpdateOptions"] = reflect.TypeOf((*UpdateOptions)(nil)).Elem() +} + +type UpdateOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + ChangedValue []BaseOptionValue `xml:"changedValue,typeattr"` +} + +func init() { + t["UpdateOptionsRequestType"] = reflect.TypeOf((*UpdateOptionsRequestType)(nil)).Elem() +} + +type UpdateOptionsResponse struct { +} + +type UpdatePassthruConfig UpdatePassthruConfigRequestType + +func init() { + t["UpdatePassthruConfig"] = reflect.TypeOf((*UpdatePassthruConfig)(nil)).Elem() +} + +type UpdatePassthruConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config []BaseHostPciPassthruConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdatePassthruConfigRequestType"] = reflect.TypeOf((*UpdatePassthruConfigRequestType)(nil)).Elem() +} + +type UpdatePassthruConfigResponse struct { +} + +type UpdatePerfInterval UpdatePerfIntervalRequestType + +func init() { + t["UpdatePerfInterval"] = reflect.TypeOf((*UpdatePerfInterval)(nil)).Elem() +} + +type UpdatePerfIntervalRequestType struct { + This ManagedObjectReference `xml:"_this"` + Interval PerfInterval `xml:"interval"` +} + +func init() { + t["UpdatePerfIntervalRequestType"] = reflect.TypeOf((*UpdatePerfIntervalRequestType)(nil)).Elem() +} + +type UpdatePerfIntervalResponse struct { +} + +type UpdatePhysicalNicLinkSpeed UpdatePhysicalNicLinkSpeedRequestType + +func init() { + t["UpdatePhysicalNicLinkSpeed"] = reflect.TypeOf((*UpdatePhysicalNicLinkSpeed)(nil)).Elem() +} + +type UpdatePhysicalNicLinkSpeedRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` + LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` +} + +func init() { + t["UpdatePhysicalNicLinkSpeedRequestType"] = reflect.TypeOf((*UpdatePhysicalNicLinkSpeedRequestType)(nil)).Elem() +} + +type UpdatePhysicalNicLinkSpeedResponse struct { +} + +type UpdatePortGroup UpdatePortGroupRequestType + +func init() { + t["UpdatePortGroup"] = reflect.TypeOf((*UpdatePortGroup)(nil)).Elem() +} + +type UpdatePortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + PgName string `xml:"pgName"` + Portgrp HostPortGroupSpec `xml:"portgrp"` +} + +func init() { + t["UpdatePortGroupRequestType"] = reflect.TypeOf((*UpdatePortGroupRequestType)(nil)).Elem() +} + +type UpdatePortGroupResponse struct { +} + +type UpdateProgress UpdateProgressRequestType + +func init() { + t["UpdateProgress"] = reflect.TypeOf((*UpdateProgress)(nil)).Elem() +} + +type UpdateProgressRequestType struct { + This ManagedObjectReference `xml:"_this"` + PercentDone int `xml:"percentDone"` +} + +func init() { + t["UpdateProgressRequestType"] = reflect.TypeOf((*UpdateProgressRequestType)(nil)).Elem() +} + +type UpdateProgressResponse struct { +} + +type UpdateReferenceHost UpdateReferenceHostRequestType + +func init() { + t["UpdateReferenceHost"] = reflect.TypeOf((*UpdateReferenceHost)(nil)).Elem() +} + +type UpdateReferenceHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["UpdateReferenceHostRequestType"] = reflect.TypeOf((*UpdateReferenceHostRequestType)(nil)).Elem() +} + +type UpdateReferenceHostResponse struct { +} + +type UpdateRuleset UpdateRulesetRequestType + +func init() { + t["UpdateRuleset"] = reflect.TypeOf((*UpdateRuleset)(nil)).Elem() +} + +type UpdateRulesetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` + Spec HostFirewallRulesetRulesetSpec `xml:"spec"` +} + +func init() { + t["UpdateRulesetRequestType"] = reflect.TypeOf((*UpdateRulesetRequestType)(nil)).Elem() +} + +type UpdateRulesetResponse struct { +} + +type UpdateScsiLunDisplayName UpdateScsiLunDisplayNameRequestType + +func init() { + t["UpdateScsiLunDisplayName"] = reflect.TypeOf((*UpdateScsiLunDisplayName)(nil)).Elem() +} + +type UpdateScsiLunDisplayNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` + DisplayName string `xml:"displayName"` +} + +func init() { + t["UpdateScsiLunDisplayNameRequestType"] = reflect.TypeOf((*UpdateScsiLunDisplayNameRequestType)(nil)).Elem() +} + +type UpdateScsiLunDisplayNameResponse struct { +} + +type UpdateServiceConsoleVirtualNic UpdateServiceConsoleVirtualNicRequestType + +func init() { + t["UpdateServiceConsoleVirtualNic"] = reflect.TypeOf((*UpdateServiceConsoleVirtualNic)(nil)).Elem() +} + +type UpdateServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["UpdateServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*UpdateServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type UpdateServiceConsoleVirtualNicResponse struct { +} + +type UpdateServiceMessage UpdateServiceMessageRequestType + +func init() { + t["UpdateServiceMessage"] = reflect.TypeOf((*UpdateServiceMessage)(nil)).Elem() +} + +type UpdateServiceMessageRequestType struct { + This ManagedObjectReference `xml:"_this"` + Message string `xml:"message"` +} + +func init() { + t["UpdateServiceMessageRequestType"] = reflect.TypeOf((*UpdateServiceMessageRequestType)(nil)).Elem() +} + +type UpdateServiceMessageResponse struct { +} + +type UpdateServicePolicy UpdateServicePolicyRequestType + +func init() { + t["UpdateServicePolicy"] = reflect.TypeOf((*UpdateServicePolicy)(nil)).Elem() +} + +type UpdateServicePolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` + Policy string `xml:"policy"` +} + +func init() { + t["UpdateServicePolicyRequestType"] = reflect.TypeOf((*UpdateServicePolicyRequestType)(nil)).Elem() +} + +type UpdateServicePolicyResponse struct { +} + +type UpdateSet struct { + DynamicData + + Version string `xml:"version"` + FilterSet []PropertyFilterUpdate `xml:"filterSet,omitempty"` + Truncated *bool `xml:"truncated"` +} + +func init() { + t["UpdateSet"] = reflect.TypeOf((*UpdateSet)(nil)).Elem() +} + +type UpdateSoftwareInternetScsiEnabled UpdateSoftwareInternetScsiEnabledRequestType + +func init() { + t["UpdateSoftwareInternetScsiEnabled"] = reflect.TypeOf((*UpdateSoftwareInternetScsiEnabled)(nil)).Elem() +} + +type UpdateSoftwareInternetScsiEnabledRequestType struct { + This ManagedObjectReference `xml:"_this"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["UpdateSoftwareInternetScsiEnabledRequestType"] = reflect.TypeOf((*UpdateSoftwareInternetScsiEnabledRequestType)(nil)).Elem() +} + +type UpdateSoftwareInternetScsiEnabledResponse struct { +} + +type UpdateSystemResources UpdateSystemResourcesRequestType + +func init() { + t["UpdateSystemResources"] = reflect.TypeOf((*UpdateSystemResources)(nil)).Elem() +} + +type UpdateSystemResourcesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResourceInfo HostSystemResourceInfo `xml:"resourceInfo"` +} + +func init() { + t["UpdateSystemResourcesRequestType"] = reflect.TypeOf((*UpdateSystemResourcesRequestType)(nil)).Elem() +} + +type UpdateSystemResourcesResponse struct { +} + +type UpdateSystemSwapConfiguration UpdateSystemSwapConfigurationRequestType + +func init() { + t["UpdateSystemSwapConfiguration"] = reflect.TypeOf((*UpdateSystemSwapConfiguration)(nil)).Elem() +} + +type UpdateSystemSwapConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + SysSwapConfig HostSystemSwapConfiguration `xml:"sysSwapConfig"` +} + +func init() { + t["UpdateSystemSwapConfigurationRequestType"] = reflect.TypeOf((*UpdateSystemSwapConfigurationRequestType)(nil)).Elem() +} + +type UpdateSystemSwapConfigurationResponse struct { +} + +type UpdateSystemUsers UpdateSystemUsersRequestType + +func init() { + t["UpdateSystemUsers"] = reflect.TypeOf((*UpdateSystemUsers)(nil)).Elem() +} + +type UpdateSystemUsersRequestType struct { + This ManagedObjectReference `xml:"_this"` + Users []string `xml:"users,omitempty"` +} + +func init() { + t["UpdateSystemUsersRequestType"] = reflect.TypeOf((*UpdateSystemUsersRequestType)(nil)).Elem() +} + +type UpdateSystemUsersResponse struct { +} + +type UpdateUser UpdateUserRequestType + +func init() { + t["UpdateUser"] = reflect.TypeOf((*UpdateUser)(nil)).Elem() +} + +type UpdateUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + User BaseHostAccountSpec `xml:"user,typeattr"` +} + +func init() { + t["UpdateUserRequestType"] = reflect.TypeOf((*UpdateUserRequestType)(nil)).Elem() +} + +type UpdateUserResponse struct { +} + +type UpdateVAppConfig UpdateVAppConfigRequestType + +func init() { + t["UpdateVAppConfig"] = reflect.TypeOf((*UpdateVAppConfig)(nil)).Elem() +} + +type UpdateVAppConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VAppConfigSpec `xml:"spec"` +} + +func init() { + t["UpdateVAppConfigRequestType"] = reflect.TypeOf((*UpdateVAppConfigRequestType)(nil)).Elem() +} + +type UpdateVAppConfigResponse struct { +} + +type UpdateVRP UpdateVRPRequestType + +func init() { + t["UpdateVRP"] = reflect.TypeOf((*UpdateVRP)(nil)).Elem() +} + +type UpdateVRPRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VRPEditSpec `xml:"spec"` +} + +func init() { + t["UpdateVRPRequestType"] = reflect.TypeOf((*UpdateVRPRequestType)(nil)).Elem() +} + +type UpdateVRPResponse struct { +} + +type UpdateVirtualMachineFilesRequestType struct { + This ManagedObjectReference `xml:"_this"` + MountPathDatastoreMapping []DatastoreMountPathDatastorePair `xml:"mountPathDatastoreMapping"` +} + +func init() { + t["UpdateVirtualMachineFilesRequestType"] = reflect.TypeOf((*UpdateVirtualMachineFilesRequestType)(nil)).Elem() +} + +type UpdateVirtualMachineFilesResult struct { + DynamicData + + FailedVmFile []UpdateVirtualMachineFilesResultFailedVmFileInfo `xml:"failedVmFile,omitempty"` +} + +func init() { + t["UpdateVirtualMachineFilesResult"] = reflect.TypeOf((*UpdateVirtualMachineFilesResult)(nil)).Elem() +} + +type UpdateVirtualMachineFilesResultFailedVmFileInfo struct { + DynamicData + + VmFile string `xml:"vmFile"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["UpdateVirtualMachineFilesResultFailedVmFileInfo"] = reflect.TypeOf((*UpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem() +} + +type UpdateVirtualMachineFiles_Task UpdateVirtualMachineFilesRequestType + +func init() { + t["UpdateVirtualMachineFiles_Task"] = reflect.TypeOf((*UpdateVirtualMachineFiles_Task)(nil)).Elem() +} + +type UpdateVirtualMachineFiles_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateVirtualNic UpdateVirtualNicRequestType + +func init() { + t["UpdateVirtualNic"] = reflect.TypeOf((*UpdateVirtualNic)(nil)).Elem() +} + +type UpdateVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["UpdateVirtualNicRequestType"] = reflect.TypeOf((*UpdateVirtualNicRequestType)(nil)).Elem() +} + +type UpdateVirtualNicResponse struct { +} + +type UpdateVirtualSwitch UpdateVirtualSwitchRequestType + +func init() { + t["UpdateVirtualSwitch"] = reflect.TypeOf((*UpdateVirtualSwitch)(nil)).Elem() +} + +type UpdateVirtualSwitchRequestType struct { + This ManagedObjectReference `xml:"_this"` + VswitchName string `xml:"vswitchName"` + Spec HostVirtualSwitchSpec `xml:"spec"` +} + +func init() { + t["UpdateVirtualSwitchRequestType"] = reflect.TypeOf((*UpdateVirtualSwitchRequestType)(nil)).Elem() +} + +type UpdateVirtualSwitchResponse struct { +} + +type UpdateVsanRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config VsanHostConfigInfo `xml:"config"` +} + +func init() { + t["UpdateVsanRequestType"] = reflect.TypeOf((*UpdateVsanRequestType)(nil)).Elem() +} + +type UpdateVsan_Task UpdateVsanRequestType + +func init() { + t["UpdateVsan_Task"] = reflect.TypeOf((*UpdateVsan_Task)(nil)).Elem() +} + +type UpdateVsan_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdatedAgentBeingRestartedEvent struct { + HostEvent +} + +func init() { + t["UpdatedAgentBeingRestartedEvent"] = reflect.TypeOf((*UpdatedAgentBeingRestartedEvent)(nil)).Elem() +} + +type UpgradeEvent struct { + Event + + Message string `xml:"message"` +} + +func init() { + t["UpgradeEvent"] = reflect.TypeOf((*UpgradeEvent)(nil)).Elem() +} + +type UpgradeIoFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` + VibUrl string `xml:"vibUrl"` +} + +func init() { + t["UpgradeIoFilterRequestType"] = reflect.TypeOf((*UpgradeIoFilterRequestType)(nil)).Elem() +} + +type UpgradeIoFilter_Task UpgradeIoFilterRequestType + +func init() { + t["UpgradeIoFilter_Task"] = reflect.TypeOf((*UpgradeIoFilter_Task)(nil)).Elem() +} + +type UpgradeIoFilter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpgradeToolsRequestType struct { + This ManagedObjectReference `xml:"_this"` + InstallerOptions string `xml:"installerOptions,omitempty"` +} + +func init() { + t["UpgradeToolsRequestType"] = reflect.TypeOf((*UpgradeToolsRequestType)(nil)).Elem() +} + +type UpgradeTools_Task UpgradeToolsRequestType + +func init() { + t["UpgradeTools_Task"] = reflect.TypeOf((*UpgradeTools_Task)(nil)).Elem() +} + +type UpgradeTools_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpgradeVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["UpgradeVMRequestType"] = reflect.TypeOf((*UpgradeVMRequestType)(nil)).Elem() +} + +type UpgradeVM_Task UpgradeVMRequestType + +func init() { + t["UpgradeVM_Task"] = reflect.TypeOf((*UpgradeVM_Task)(nil)).Elem() +} + +type UpgradeVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpgradeVmLayout UpgradeVmLayoutRequestType + +func init() { + t["UpgradeVmLayout"] = reflect.TypeOf((*UpgradeVmLayout)(nil)).Elem() +} + +type UpgradeVmLayoutRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UpgradeVmLayoutRequestType"] = reflect.TypeOf((*UpgradeVmLayoutRequestType)(nil)).Elem() +} + +type UpgradeVmLayoutResponse struct { +} + +type UpgradeVmfs UpgradeVmfsRequestType + +func init() { + t["UpgradeVmfs"] = reflect.TypeOf((*UpgradeVmfs)(nil)).Elem() +} + +type UpgradeVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsPath string `xml:"vmfsPath"` +} + +func init() { + t["UpgradeVmfsRequestType"] = reflect.TypeOf((*UpgradeVmfsRequestType)(nil)).Elem() +} + +type UpgradeVmfsResponse struct { +} + +type UpgradeVsanObjects UpgradeVsanObjectsRequestType + +func init() { + t["UpgradeVsanObjects"] = reflect.TypeOf((*UpgradeVsanObjects)(nil)).Elem() +} + +type UpgradeVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` + NewVersion int `xml:"newVersion"` +} + +func init() { + t["UpgradeVsanObjectsRequestType"] = reflect.TypeOf((*UpgradeVsanObjectsRequestType)(nil)).Elem() +} + +type UpgradeVsanObjectsResponse struct { + Returnval []HostVsanInternalSystemVsanObjectOperationResult `xml:"returnval,omitempty"` +} + +type UplinkPortMtuNotSupportEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortMtuNotSupportEvent"] = reflect.TypeOf((*UplinkPortMtuNotSupportEvent)(nil)).Elem() +} + +type UplinkPortMtuSupportEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortMtuSupportEvent"] = reflect.TypeOf((*UplinkPortMtuSupportEvent)(nil)).Elem() +} + +type UplinkPortVlanTrunkedEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortVlanTrunkedEvent"] = reflect.TypeOf((*UplinkPortVlanTrunkedEvent)(nil)).Elem() +} + +type UplinkPortVlanUntrunkedEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortVlanUntrunkedEvent"] = reflect.TypeOf((*UplinkPortVlanUntrunkedEvent)(nil)).Elem() +} + +type UserAssignedToGroup struct { + HostEvent + + UserLogin string `xml:"userLogin"` + Group string `xml:"group"` +} + +func init() { + t["UserAssignedToGroup"] = reflect.TypeOf((*UserAssignedToGroup)(nil)).Elem() +} + +type UserGroupProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["UserGroupProfile"] = reflect.TypeOf((*UserGroupProfile)(nil)).Elem() +} + +type UserInputRequiredParameterMetadata struct { + ProfilePolicyOptionMetadata + + UserInputParameter []ProfileParameterMetadata `xml:"userInputParameter,omitempty"` +} + +func init() { + t["UserInputRequiredParameterMetadata"] = reflect.TypeOf((*UserInputRequiredParameterMetadata)(nil)).Elem() +} + +type UserLoginSessionEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress"` + UserAgent string `xml:"userAgent,omitempty"` + Locale string `xml:"locale"` + SessionId string `xml:"sessionId"` +} + +func init() { + t["UserLoginSessionEvent"] = reflect.TypeOf((*UserLoginSessionEvent)(nil)).Elem() +} + +type UserLogoutSessionEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress,omitempty"` + UserAgent string `xml:"userAgent,omitempty"` + CallCount int64 `xml:"callCount,omitempty"` + SessionId string `xml:"sessionId,omitempty"` + LoginTime *time.Time `xml:"loginTime"` +} + +func init() { + t["UserLogoutSessionEvent"] = reflect.TypeOf((*UserLogoutSessionEvent)(nil)).Elem() +} + +type UserNotFound struct { + VimFault + + Principal string `xml:"principal"` + Unresolved bool `xml:"unresolved"` +} + +func init() { + t["UserNotFound"] = reflect.TypeOf((*UserNotFound)(nil)).Elem() +} + +type UserNotFoundFault UserNotFound + +func init() { + t["UserNotFoundFault"] = reflect.TypeOf((*UserNotFoundFault)(nil)).Elem() +} + +type UserPasswordChanged struct { + HostEvent + + UserLogin string `xml:"userLogin"` +} + +func init() { + t["UserPasswordChanged"] = reflect.TypeOf((*UserPasswordChanged)(nil)).Elem() +} + +type UserProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["UserProfile"] = reflect.TypeOf((*UserProfile)(nil)).Elem() +} + +type UserSearchResult struct { + DynamicData + + Principal string `xml:"principal"` + FullName string `xml:"fullName,omitempty"` + Group bool `xml:"group"` +} + +func init() { + t["UserSearchResult"] = reflect.TypeOf((*UserSearchResult)(nil)).Elem() +} + +type UserSession struct { + DynamicData + + Key string `xml:"key"` + UserName string `xml:"userName"` + FullName string `xml:"fullName"` + LoginTime time.Time `xml:"loginTime"` + LastActiveTime time.Time `xml:"lastActiveTime"` + Locale string `xml:"locale"` + MessageLocale string `xml:"messageLocale"` + ExtensionSession *bool `xml:"extensionSession"` + IpAddress string `xml:"ipAddress,omitempty"` + UserAgent string `xml:"userAgent,omitempty"` + CallCount int64 `xml:"callCount,omitempty"` +} + +func init() { + t["UserSession"] = reflect.TypeOf((*UserSession)(nil)).Elem() +} + +type UserUnassignedFromGroup struct { + HostEvent + + UserLogin string `xml:"userLogin"` + Group string `xml:"group"` +} + +func init() { + t["UserUnassignedFromGroup"] = reflect.TypeOf((*UserUnassignedFromGroup)(nil)).Elem() +} + +type UserUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["UserUpgradeEvent"] = reflect.TypeOf((*UserUpgradeEvent)(nil)).Elem() +} + +type VASAStorageArray struct { + DynamicData + + Name string `xml:"name"` + Uuid string `xml:"uuid"` + VendorId string `xml:"vendorId"` + ModelId string `xml:"modelId"` +} + +func init() { + t["VASAStorageArray"] = reflect.TypeOf((*VASAStorageArray)(nil)).Elem() +} + +type VAppCloneSpec struct { + DynamicData + + Location ManagedObjectReference `xml:"location"` + Host *ManagedObjectReference `xml:"host,omitempty"` + ResourceSpec *ResourceConfigSpec `xml:"resourceSpec,omitempty"` + VmFolder *ManagedObjectReference `xml:"vmFolder,omitempty"` + NetworkMapping []VAppCloneSpecNetworkMappingPair `xml:"networkMapping,omitempty"` + Property []KeyValue `xml:"property,omitempty"` + ResourceMapping []VAppCloneSpecResourceMap `xml:"resourceMapping,omitempty"` + Provisioning string `xml:"provisioning,omitempty"` +} + +func init() { + t["VAppCloneSpec"] = reflect.TypeOf((*VAppCloneSpec)(nil)).Elem() +} + +type VAppCloneSpecNetworkMappingPair struct { + DynamicData + + Source ManagedObjectReference `xml:"source"` + Destination ManagedObjectReference `xml:"destination"` +} + +func init() { + t["VAppCloneSpecNetworkMappingPair"] = reflect.TypeOf((*VAppCloneSpecNetworkMappingPair)(nil)).Elem() +} + +type VAppCloneSpecResourceMap struct { + DynamicData + + Source ManagedObjectReference `xml:"source"` + Parent *ManagedObjectReference `xml:"parent,omitempty"` + ResourceSpec *ResourceConfigSpec `xml:"resourceSpec,omitempty"` + Location *ManagedObjectReference `xml:"location,omitempty"` +} + +func init() { + t["VAppCloneSpecResourceMap"] = reflect.TypeOf((*VAppCloneSpecResourceMap)(nil)).Elem() +} + +type VAppConfigFault struct { + VimFault +} + +func init() { + t["VAppConfigFault"] = reflect.TypeOf((*VAppConfigFault)(nil)).Elem() +} + +type VAppConfigFaultFault BaseVAppConfigFault + +func init() { + t["VAppConfigFaultFault"] = reflect.TypeOf((*VAppConfigFaultFault)(nil)).Elem() +} + +type VAppConfigInfo struct { + VmConfigInfo + + EntityConfig []VAppEntityConfigInfo `xml:"entityConfig,omitempty"` + Annotation string `xml:"annotation"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` +} + +func init() { + t["VAppConfigInfo"] = reflect.TypeOf((*VAppConfigInfo)(nil)).Elem() +} + +type VAppConfigSpec struct { + VmConfigSpec + + EntityConfig []VAppEntityConfigInfo `xml:"entityConfig,omitempty"` + Annotation string `xml:"annotation,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` +} + +func init() { + t["VAppConfigSpec"] = reflect.TypeOf((*VAppConfigSpec)(nil)).Elem() +} + +type VAppEntityConfigInfo struct { + DynamicData + + Key *ManagedObjectReference `xml:"key,omitempty"` + Tag string `xml:"tag,omitempty"` + StartOrder int `xml:"startOrder,omitempty"` + StartDelay int `xml:"startDelay,omitempty"` + WaitingForGuest *bool `xml:"waitingForGuest"` + StartAction string `xml:"startAction,omitempty"` + StopDelay int `xml:"stopDelay,omitempty"` + StopAction string `xml:"stopAction,omitempty"` + DestroyWithParent *bool `xml:"destroyWithParent"` +} + +func init() { + t["VAppEntityConfigInfo"] = reflect.TypeOf((*VAppEntityConfigInfo)(nil)).Elem() +} + +type VAppIPAssignmentInfo struct { + DynamicData + + SupportedAllocationScheme []string `xml:"supportedAllocationScheme,omitempty"` + IpAllocationPolicy string `xml:"ipAllocationPolicy,omitempty"` + SupportedIpProtocol []string `xml:"supportedIpProtocol,omitempty"` + IpProtocol string `xml:"ipProtocol,omitempty"` +} + +func init() { + t["VAppIPAssignmentInfo"] = reflect.TypeOf((*VAppIPAssignmentInfo)(nil)).Elem() +} + +type VAppNotRunning struct { + VmConfigFault +} + +func init() { + t["VAppNotRunning"] = reflect.TypeOf((*VAppNotRunning)(nil)).Elem() +} + +type VAppNotRunningFault VAppNotRunning + +func init() { + t["VAppNotRunningFault"] = reflect.TypeOf((*VAppNotRunningFault)(nil)).Elem() +} + +type VAppOperationInProgress struct { + RuntimeFault +} + +func init() { + t["VAppOperationInProgress"] = reflect.TypeOf((*VAppOperationInProgress)(nil)).Elem() +} + +type VAppOperationInProgressFault VAppOperationInProgress + +func init() { + t["VAppOperationInProgressFault"] = reflect.TypeOf((*VAppOperationInProgressFault)(nil)).Elem() +} + +type VAppOvfSectionInfo struct { + DynamicData + + Key int `xml:"key,omitempty"` + Namespace string `xml:"namespace,omitempty"` + Type string `xml:"type,omitempty"` + AtEnvelopeLevel *bool `xml:"atEnvelopeLevel"` + Contents string `xml:"contents,omitempty"` +} + +func init() { + t["VAppOvfSectionInfo"] = reflect.TypeOf((*VAppOvfSectionInfo)(nil)).Elem() +} + +type VAppOvfSectionSpec struct { + ArrayUpdateSpec + + Info *VAppOvfSectionInfo `xml:"info,omitempty"` +} + +func init() { + t["VAppOvfSectionSpec"] = reflect.TypeOf((*VAppOvfSectionSpec)(nil)).Elem() +} + +type VAppProductInfo struct { + DynamicData + + Key int `xml:"key"` + ClassId string `xml:"classId,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Name string `xml:"name,omitempty"` + Vendor string `xml:"vendor,omitempty"` + Version string `xml:"version,omitempty"` + FullVersion string `xml:"fullVersion,omitempty"` + VendorUrl string `xml:"vendorUrl,omitempty"` + ProductUrl string `xml:"productUrl,omitempty"` + AppUrl string `xml:"appUrl,omitempty"` +} + +func init() { + t["VAppProductInfo"] = reflect.TypeOf((*VAppProductInfo)(nil)).Elem() +} + +type VAppProductSpec struct { + ArrayUpdateSpec + + Info *VAppProductInfo `xml:"info,omitempty"` +} + +func init() { + t["VAppProductSpec"] = reflect.TypeOf((*VAppProductSpec)(nil)).Elem() +} + +type VAppPropertyFault struct { + VmConfigFault + + Id string `xml:"id"` + Category string `xml:"category"` + Label string `xml:"label"` + Type string `xml:"type"` + Value string `xml:"value"` +} + +func init() { + t["VAppPropertyFault"] = reflect.TypeOf((*VAppPropertyFault)(nil)).Elem() +} + +type VAppPropertyFaultFault BaseVAppPropertyFault + +func init() { + t["VAppPropertyFaultFault"] = reflect.TypeOf((*VAppPropertyFaultFault)(nil)).Elem() +} + +type VAppPropertyInfo struct { + DynamicData + + Key int `xml:"key"` + ClassId string `xml:"classId,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Id string `xml:"id,omitempty"` + Category string `xml:"category,omitempty"` + Label string `xml:"label,omitempty"` + Type string `xml:"type,omitempty"` + TypeReference string `xml:"typeReference,omitempty"` + UserConfigurable *bool `xml:"userConfigurable"` + DefaultValue string `xml:"defaultValue,omitempty"` + Value string `xml:"value,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["VAppPropertyInfo"] = reflect.TypeOf((*VAppPropertyInfo)(nil)).Elem() +} + +type VAppPropertySpec struct { + ArrayUpdateSpec + + Info *VAppPropertyInfo `xml:"info,omitempty"` +} + +func init() { + t["VAppPropertySpec"] = reflect.TypeOf((*VAppPropertySpec)(nil)).Elem() +} + +type VAppTaskInProgress struct { + TaskInProgress +} + +func init() { + t["VAppTaskInProgress"] = reflect.TypeOf((*VAppTaskInProgress)(nil)).Elem() +} + +type VAppTaskInProgressFault VAppTaskInProgress + +func init() { + t["VAppTaskInProgressFault"] = reflect.TypeOf((*VAppTaskInProgressFault)(nil)).Elem() +} + +type VFlashCacheHotConfigNotSupported struct { + VmConfigFault +} + +func init() { + t["VFlashCacheHotConfigNotSupported"] = reflect.TypeOf((*VFlashCacheHotConfigNotSupported)(nil)).Elem() +} + +type VFlashCacheHotConfigNotSupportedFault VFlashCacheHotConfigNotSupported + +func init() { + t["VFlashCacheHotConfigNotSupportedFault"] = reflect.TypeOf((*VFlashCacheHotConfigNotSupportedFault)(nil)).Elem() +} + +type VFlashModuleNotSupported struct { + VmConfigFault + + VmName string `xml:"vmName"` + ModuleName string `xml:"moduleName"` + Reason string `xml:"reason"` + HostName string `xml:"hostName"` +} + +func init() { + t["VFlashModuleNotSupported"] = reflect.TypeOf((*VFlashModuleNotSupported)(nil)).Elem() +} + +type VFlashModuleNotSupportedFault VFlashModuleNotSupported + +func init() { + t["VFlashModuleNotSupportedFault"] = reflect.TypeOf((*VFlashModuleNotSupportedFault)(nil)).Elem() +} + +type VFlashModuleVersionIncompatible struct { + VimFault + + ModuleName string `xml:"moduleName"` + VmRequestModuleVersion string `xml:"vmRequestModuleVersion"` + HostMinSupportedVerson string `xml:"hostMinSupportedVerson"` + HostModuleVersion string `xml:"hostModuleVersion"` +} + +func init() { + t["VFlashModuleVersionIncompatible"] = reflect.TypeOf((*VFlashModuleVersionIncompatible)(nil)).Elem() +} + +type VFlashModuleVersionIncompatibleFault VFlashModuleVersionIncompatible + +func init() { + t["VFlashModuleVersionIncompatibleFault"] = reflect.TypeOf((*VFlashModuleVersionIncompatibleFault)(nil)).Elem() +} + +type VMFSDatastoreCreatedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["VMFSDatastoreCreatedEvent"] = reflect.TypeOf((*VMFSDatastoreCreatedEvent)(nil)).Elem() +} + +type VMFSDatastoreExpandedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["VMFSDatastoreExpandedEvent"] = reflect.TypeOf((*VMFSDatastoreExpandedEvent)(nil)).Elem() +} + +type VMFSDatastoreExtendedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["VMFSDatastoreExtendedEvent"] = reflect.TypeOf((*VMFSDatastoreExtendedEvent)(nil)).Elem() +} + +type VMINotSupported struct { + DeviceNotSupported +} + +func init() { + t["VMINotSupported"] = reflect.TypeOf((*VMINotSupported)(nil)).Elem() +} + +type VMINotSupportedFault VMINotSupported + +func init() { + t["VMINotSupportedFault"] = reflect.TypeOf((*VMINotSupportedFault)(nil)).Elem() +} + +type VMOnConflictDVPort struct { + CannotAccessNetwork +} + +func init() { + t["VMOnConflictDVPort"] = reflect.TypeOf((*VMOnConflictDVPort)(nil)).Elem() +} + +type VMOnConflictDVPortFault VMOnConflictDVPort + +func init() { + t["VMOnConflictDVPortFault"] = reflect.TypeOf((*VMOnConflictDVPortFault)(nil)).Elem() +} + +type VMOnVirtualIntranet struct { + CannotAccessNetwork +} + +func init() { + t["VMOnVirtualIntranet"] = reflect.TypeOf((*VMOnVirtualIntranet)(nil)).Elem() +} + +type VMOnVirtualIntranetFault VMOnVirtualIntranet + +func init() { + t["VMOnVirtualIntranetFault"] = reflect.TypeOf((*VMOnVirtualIntranetFault)(nil)).Elem() +} + +type VMotionAcrossNetworkNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["VMotionAcrossNetworkNotSupported"] = reflect.TypeOf((*VMotionAcrossNetworkNotSupported)(nil)).Elem() +} + +type VMotionAcrossNetworkNotSupportedFault VMotionAcrossNetworkNotSupported + +func init() { + t["VMotionAcrossNetworkNotSupportedFault"] = reflect.TypeOf((*VMotionAcrossNetworkNotSupportedFault)(nil)).Elem() +} + +type VMotionInterfaceIssue struct { + MigrationFault + + AtSourceHost bool `xml:"atSourceHost"` + FailedHost string `xml:"failedHost"` + FailedHostEntity *ManagedObjectReference `xml:"failedHostEntity,omitempty"` +} + +func init() { + t["VMotionInterfaceIssue"] = reflect.TypeOf((*VMotionInterfaceIssue)(nil)).Elem() +} + +type VMotionInterfaceIssueFault BaseVMotionInterfaceIssue + +func init() { + t["VMotionInterfaceIssueFault"] = reflect.TypeOf((*VMotionInterfaceIssueFault)(nil)).Elem() +} + +type VMotionLicenseExpiredEvent struct { + LicenseEvent +} + +func init() { + t["VMotionLicenseExpiredEvent"] = reflect.TypeOf((*VMotionLicenseExpiredEvent)(nil)).Elem() +} + +type VMotionLinkCapacityLow struct { + VMotionInterfaceIssue + + Network string `xml:"network"` +} + +func init() { + t["VMotionLinkCapacityLow"] = reflect.TypeOf((*VMotionLinkCapacityLow)(nil)).Elem() +} + +type VMotionLinkCapacityLowFault VMotionLinkCapacityLow + +func init() { + t["VMotionLinkCapacityLowFault"] = reflect.TypeOf((*VMotionLinkCapacityLowFault)(nil)).Elem() +} + +type VMotionLinkDown struct { + VMotionInterfaceIssue + + Network string `xml:"network"` +} + +func init() { + t["VMotionLinkDown"] = reflect.TypeOf((*VMotionLinkDown)(nil)).Elem() +} + +type VMotionLinkDownFault VMotionLinkDown + +func init() { + t["VMotionLinkDownFault"] = reflect.TypeOf((*VMotionLinkDownFault)(nil)).Elem() +} + +type VMotionNotConfigured struct { + VMotionInterfaceIssue +} + +func init() { + t["VMotionNotConfigured"] = reflect.TypeOf((*VMotionNotConfigured)(nil)).Elem() +} + +type VMotionNotConfiguredFault VMotionNotConfigured + +func init() { + t["VMotionNotConfiguredFault"] = reflect.TypeOf((*VMotionNotConfiguredFault)(nil)).Elem() +} + +type VMotionNotLicensed struct { + VMotionInterfaceIssue +} + +func init() { + t["VMotionNotLicensed"] = reflect.TypeOf((*VMotionNotLicensed)(nil)).Elem() +} + +type VMotionNotLicensedFault VMotionNotLicensed + +func init() { + t["VMotionNotLicensedFault"] = reflect.TypeOf((*VMotionNotLicensedFault)(nil)).Elem() +} + +type VMotionNotSupported struct { + VMotionInterfaceIssue +} + +func init() { + t["VMotionNotSupported"] = reflect.TypeOf((*VMotionNotSupported)(nil)).Elem() +} + +type VMotionNotSupportedFault VMotionNotSupported + +func init() { + t["VMotionNotSupportedFault"] = reflect.TypeOf((*VMotionNotSupportedFault)(nil)).Elem() +} + +type VMotionProtocolIncompatible struct { + MigrationFault +} + +func init() { + t["VMotionProtocolIncompatible"] = reflect.TypeOf((*VMotionProtocolIncompatible)(nil)).Elem() +} + +type VMotionProtocolIncompatibleFault VMotionProtocolIncompatible + +func init() { + t["VMotionProtocolIncompatibleFault"] = reflect.TypeOf((*VMotionProtocolIncompatibleFault)(nil)).Elem() +} + +type VMwareDVSConfigInfo struct { + DVSConfigInfo + + VspanSession []VMwareVspanSession `xml:"vspanSession,omitempty"` + PvlanConfig []VMwareDVSPvlanMapEntry `xml:"pvlanConfig,omitempty"` + MaxMtu int `xml:"maxMtu"` + LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty"` + IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty"` + LacpGroupConfig []VMwareDvsLacpGroupConfig `xml:"lacpGroupConfig,omitempty"` + LacpApiVersion string `xml:"lacpApiVersion,omitempty"` + MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty"` +} + +func init() { + t["VMwareDVSConfigInfo"] = reflect.TypeOf((*VMwareDVSConfigInfo)(nil)).Elem() +} + +type VMwareDVSConfigSpec struct { + DVSConfigSpec + + PvlanConfigSpec []VMwareDVSPvlanConfigSpec `xml:"pvlanConfigSpec,omitempty"` + VspanConfigSpec []VMwareDVSVspanConfigSpec `xml:"vspanConfigSpec,omitempty"` + MaxMtu int `xml:"maxMtu,omitempty"` + LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty"` + IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty"` + LacpApiVersion string `xml:"lacpApiVersion,omitempty"` + MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty"` +} + +func init() { + t["VMwareDVSConfigSpec"] = reflect.TypeOf((*VMwareDVSConfigSpec)(nil)).Elem() +} + +type VMwareDVSFeatureCapability struct { + DVSFeatureCapability + + VspanSupported *bool `xml:"vspanSupported"` + LldpSupported *bool `xml:"lldpSupported"` + IpfixSupported *bool `xml:"ipfixSupported"` + IpfixCapability *VMwareDvsIpfixCapability `xml:"ipfixCapability,omitempty"` + MulticastSnoopingSupported *bool `xml:"multicastSnoopingSupported"` + VspanCapability *VMwareDVSVspanCapability `xml:"vspanCapability,omitempty"` + LacpCapability *VMwareDvsLacpCapability `xml:"lacpCapability,omitempty"` +} + +func init() { + t["VMwareDVSFeatureCapability"] = reflect.TypeOf((*VMwareDVSFeatureCapability)(nil)).Elem() +} + +type VMwareDVSHealthCheckCapability struct { + DVSHealthCheckCapability + + VlanMtuSupported bool `xml:"vlanMtuSupported"` + TeamingSupported bool `xml:"teamingSupported"` +} + +func init() { + t["VMwareDVSHealthCheckCapability"] = reflect.TypeOf((*VMwareDVSHealthCheckCapability)(nil)).Elem() +} + +type VMwareDVSHealthCheckConfig struct { + DVSHealthCheckConfig +} + +func init() { + t["VMwareDVSHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSHealthCheckConfig)(nil)).Elem() +} + +type VMwareDVSMtuHealthCheckResult struct { + HostMemberUplinkHealthCheckResult + + MtuMismatch bool `xml:"mtuMismatch"` + VlanSupportSwitchMtu []NumericRange `xml:"vlanSupportSwitchMtu,omitempty"` + VlanNotSupportSwitchMtu []NumericRange `xml:"vlanNotSupportSwitchMtu,omitempty"` +} + +func init() { + t["VMwareDVSMtuHealthCheckResult"] = reflect.TypeOf((*VMwareDVSMtuHealthCheckResult)(nil)).Elem() +} + +type VMwareDVSPortSetting struct { + DVPortSetting + + Vlan BaseVmwareDistributedVirtualSwitchVlanSpec `xml:"vlan,omitempty,typeattr"` + QosTag *IntPolicy `xml:"qosTag,omitempty"` + UplinkTeamingPolicy *VmwareUplinkPortTeamingPolicy `xml:"uplinkTeamingPolicy,omitempty"` + SecurityPolicy *DVSSecurityPolicy `xml:"securityPolicy,omitempty"` + IpfixEnabled *BoolPolicy `xml:"ipfixEnabled,omitempty"` + TxUplink *BoolPolicy `xml:"txUplink,omitempty"` + LacpPolicy *VMwareUplinkLacpPolicy `xml:"lacpPolicy,omitempty"` +} + +func init() { + t["VMwareDVSPortSetting"] = reflect.TypeOf((*VMwareDVSPortSetting)(nil)).Elem() +} + +type VMwareDVSPortgroupPolicy struct { + DVPortgroupPolicy + + VlanOverrideAllowed bool `xml:"vlanOverrideAllowed"` + UplinkTeamingOverrideAllowed bool `xml:"uplinkTeamingOverrideAllowed"` + SecurityPolicyOverrideAllowed bool `xml:"securityPolicyOverrideAllowed"` + IpfixOverrideAllowed *bool `xml:"ipfixOverrideAllowed"` +} + +func init() { + t["VMwareDVSPortgroupPolicy"] = reflect.TypeOf((*VMwareDVSPortgroupPolicy)(nil)).Elem() +} + +type VMwareDVSPvlanConfigSpec struct { + DynamicData + + PvlanEntry VMwareDVSPvlanMapEntry `xml:"pvlanEntry"` + Operation string `xml:"operation"` +} + +func init() { + t["VMwareDVSPvlanConfigSpec"] = reflect.TypeOf((*VMwareDVSPvlanConfigSpec)(nil)).Elem() +} + +type VMwareDVSPvlanMapEntry struct { + DynamicData + + PrimaryVlanId int `xml:"primaryVlanId"` + SecondaryVlanId int `xml:"secondaryVlanId"` + PvlanType string `xml:"pvlanType"` +} + +func init() { + t["VMwareDVSPvlanMapEntry"] = reflect.TypeOf((*VMwareDVSPvlanMapEntry)(nil)).Elem() +} + +type VMwareDVSTeamingHealthCheckConfig struct { + VMwareDVSHealthCheckConfig +} + +func init() { + t["VMwareDVSTeamingHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckConfig)(nil)).Elem() +} + +type VMwareDVSTeamingHealthCheckResult struct { + HostMemberHealthCheckResult + + TeamingStatus string `xml:"teamingStatus"` +} + +func init() { + t["VMwareDVSTeamingHealthCheckResult"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckResult)(nil)).Elem() +} + +type VMwareDVSVlanHealthCheckResult struct { + HostMemberUplinkHealthCheckResult + + TrunkedVlan []NumericRange `xml:"trunkedVlan,omitempty"` + UntrunkedVlan []NumericRange `xml:"untrunkedVlan,omitempty"` +} + +func init() { + t["VMwareDVSVlanHealthCheckResult"] = reflect.TypeOf((*VMwareDVSVlanHealthCheckResult)(nil)).Elem() +} + +type VMwareDVSVlanMtuHealthCheckConfig struct { + VMwareDVSHealthCheckConfig +} + +func init() { + t["VMwareDVSVlanMtuHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSVlanMtuHealthCheckConfig)(nil)).Elem() +} + +type VMwareDVSVspanCapability struct { + DynamicData + + MixedDestSupported bool `xml:"mixedDestSupported"` + DvportSupported bool `xml:"dvportSupported"` + RemoteSourceSupported bool `xml:"remoteSourceSupported"` + RemoteDestSupported bool `xml:"remoteDestSupported"` + EncapRemoteSourceSupported bool `xml:"encapRemoteSourceSupported"` +} + +func init() { + t["VMwareDVSVspanCapability"] = reflect.TypeOf((*VMwareDVSVspanCapability)(nil)).Elem() +} + +type VMwareDVSVspanConfigSpec struct { + DynamicData + + VspanSession VMwareVspanSession `xml:"vspanSession"` + Operation string `xml:"operation"` +} + +func init() { + t["VMwareDVSVspanConfigSpec"] = reflect.TypeOf((*VMwareDVSVspanConfigSpec)(nil)).Elem() +} + +type VMwareDvsIpfixCapability struct { + DynamicData + + IpfixSupported *bool `xml:"ipfixSupported"` + Ipv6ForIpfixSupported *bool `xml:"ipv6ForIpfixSupported"` + ObservationDomainIdSupported *bool `xml:"observationDomainIdSupported"` +} + +func init() { + t["VMwareDvsIpfixCapability"] = reflect.TypeOf((*VMwareDvsIpfixCapability)(nil)).Elem() +} + +type VMwareDvsLacpCapability struct { + DynamicData + + LacpSupported *bool `xml:"lacpSupported"` + MultiLacpGroupSupported *bool `xml:"multiLacpGroupSupported"` +} + +func init() { + t["VMwareDvsLacpCapability"] = reflect.TypeOf((*VMwareDvsLacpCapability)(nil)).Elem() +} + +type VMwareDvsLacpGroupConfig struct { + DynamicData + + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Mode string `xml:"mode,omitempty"` + UplinkNum int `xml:"uplinkNum,omitempty"` + LoadbalanceAlgorithm string `xml:"loadbalanceAlgorithm,omitempty"` + Vlan *VMwareDvsLagVlanConfig `xml:"vlan,omitempty"` + Ipfix *VMwareDvsLagIpfixConfig `xml:"ipfix,omitempty"` + UplinkName []string `xml:"uplinkName,omitempty"` + UplinkPortKey []string `xml:"uplinkPortKey,omitempty"` +} + +func init() { + t["VMwareDvsLacpGroupConfig"] = reflect.TypeOf((*VMwareDvsLacpGroupConfig)(nil)).Elem() +} + +type VMwareDvsLacpGroupSpec struct { + DynamicData + + LacpGroupConfig VMwareDvsLacpGroupConfig `xml:"lacpGroupConfig"` + Operation string `xml:"operation"` +} + +func init() { + t["VMwareDvsLacpGroupSpec"] = reflect.TypeOf((*VMwareDvsLacpGroupSpec)(nil)).Elem() +} + +type VMwareDvsLagIpfixConfig struct { + DynamicData + + IpfixEnabled *bool `xml:"ipfixEnabled"` +} + +func init() { + t["VMwareDvsLagIpfixConfig"] = reflect.TypeOf((*VMwareDvsLagIpfixConfig)(nil)).Elem() +} + +type VMwareDvsLagVlanConfig struct { + DynamicData + + VlanId []NumericRange `xml:"vlanId,omitempty"` +} + +func init() { + t["VMwareDvsLagVlanConfig"] = reflect.TypeOf((*VMwareDvsLagVlanConfig)(nil)).Elem() +} + +type VMwareIpfixConfig struct { + DynamicData + + CollectorIpAddress string `xml:"collectorIpAddress,omitempty"` + CollectorPort int `xml:"collectorPort,omitempty"` + ObservationDomainId int64 `xml:"observationDomainId,omitempty"` + ActiveFlowTimeout int `xml:"activeFlowTimeout"` + IdleFlowTimeout int `xml:"idleFlowTimeout"` + SamplingRate int `xml:"samplingRate"` + InternalFlowsOnly bool `xml:"internalFlowsOnly"` +} + +func init() { + t["VMwareIpfixConfig"] = reflect.TypeOf((*VMwareIpfixConfig)(nil)).Elem() +} + +type VMwareUplinkLacpPolicy struct { + InheritablePolicy + + Enable *BoolPolicy `xml:"enable,omitempty"` + Mode *StringPolicy `xml:"mode,omitempty"` +} + +func init() { + t["VMwareUplinkLacpPolicy"] = reflect.TypeOf((*VMwareUplinkLacpPolicy)(nil)).Elem() +} + +type VMwareUplinkPortOrderPolicy struct { + InheritablePolicy + + ActiveUplinkPort []string `xml:"activeUplinkPort,omitempty"` + StandbyUplinkPort []string `xml:"standbyUplinkPort,omitempty"` +} + +func init() { + t["VMwareUplinkPortOrderPolicy"] = reflect.TypeOf((*VMwareUplinkPortOrderPolicy)(nil)).Elem() +} + +type VMwareVspanPort struct { + DynamicData + + PortKey []string `xml:"portKey,omitempty"` + UplinkPortName []string `xml:"uplinkPortName,omitempty"` + WildcardPortConnecteeType []string `xml:"wildcardPortConnecteeType,omitempty"` + Vlans []int `xml:"vlans,omitempty"` + IpAddress []string `xml:"ipAddress,omitempty"` +} + +func init() { + t["VMwareVspanPort"] = reflect.TypeOf((*VMwareVspanPort)(nil)).Elem() +} + +type VMwareVspanSession struct { + DynamicData + + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + Enabled bool `xml:"enabled"` + SourcePortTransmitted *VMwareVspanPort `xml:"sourcePortTransmitted,omitempty"` + SourcePortReceived *VMwareVspanPort `xml:"sourcePortReceived,omitempty"` + DestinationPort *VMwareVspanPort `xml:"destinationPort,omitempty"` + EncapsulationVlanId int `xml:"encapsulationVlanId,omitempty"` + StripOriginalVlan bool `xml:"stripOriginalVlan"` + MirroredPacketLength int `xml:"mirroredPacketLength,omitempty"` + NormalTrafficAllowed bool `xml:"normalTrafficAllowed"` + SessionType string `xml:"sessionType,omitempty"` + SamplingRate int `xml:"samplingRate,omitempty"` +} + +func init() { + t["VMwareVspanSession"] = reflect.TypeOf((*VMwareVspanSession)(nil)).Elem() +} + +type VRPEditSpec struct { + DynamicData + + VrpId string `xml:"vrpId"` + Description string `xml:"description,omitempty"` + CpuAllocation *VrpResourceAllocationInfo `xml:"cpuAllocation,omitempty"` + MemoryAllocation *VrpResourceAllocationInfo `xml:"memoryAllocation,omitempty"` + AddedHubs []ManagedObjectReference `xml:"addedHubs,omitempty"` + RemovedHubs []ManagedObjectReference `xml:"removedHubs,omitempty"` + ChangeVersion int64 `xml:"changeVersion,omitempty"` +} + +func init() { + t["VRPEditSpec"] = reflect.TypeOf((*VRPEditSpec)(nil)).Elem() +} + +type VVolHostPE struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + ProtocolEndpoint []HostProtocolEndpoint `xml:"protocolEndpoint"` +} + +func init() { + t["VVolHostPE"] = reflect.TypeOf((*VVolHostPE)(nil)).Elem() +} + +type ValidateCredentialsInGuest ValidateCredentialsInGuestRequestType + +func init() { + t["ValidateCredentialsInGuest"] = reflect.TypeOf((*ValidateCredentialsInGuest)(nil)).Elem() +} + +type ValidateCredentialsInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["ValidateCredentialsInGuestRequestType"] = reflect.TypeOf((*ValidateCredentialsInGuestRequestType)(nil)).Elem() +} + +type ValidateCredentialsInGuestResponse struct { +} + +type ValidateHost ValidateHostRequestType + +func init() { + t["ValidateHost"] = reflect.TypeOf((*ValidateHost)(nil)).Elem() +} + +type ValidateHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + OvfDescriptor string `xml:"ovfDescriptor"` + Host ManagedObjectReference `xml:"host"` + Vhp OvfValidateHostParams `xml:"vhp"` +} + +func init() { + t["ValidateHostRequestType"] = reflect.TypeOf((*ValidateHostRequestType)(nil)).Elem() +} + +type ValidateHostResponse struct { + Returnval OvfValidateHostResult `xml:"returnval"` +} + +type ValidateMigration ValidateMigrationRequestType + +func init() { + t["ValidateMigration"] = reflect.TypeOf((*ValidateMigration)(nil)).Elem() +} + +type ValidateMigrationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm []ManagedObjectReference `xml:"vm"` + State VirtualMachinePowerState `xml:"state,omitempty"` + TestType []string `xml:"testType,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ValidateMigrationRequestType"] = reflect.TypeOf((*ValidateMigrationRequestType)(nil)).Elem() +} + +type ValidateMigrationResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type VasaProviderContainerSpec struct { + DynamicData + + VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` + ScId string `xml:"scId"` + Deleted bool `xml:"deleted"` +} + +func init() { + t["VasaProviderContainerSpec"] = reflect.TypeOf((*VasaProviderContainerSpec)(nil)).Elem() +} + +type VcAgentUninstallFailedEvent struct { + HostEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VcAgentUninstallFailedEvent"] = reflect.TypeOf((*VcAgentUninstallFailedEvent)(nil)).Elem() +} + +type VcAgentUninstalledEvent struct { + HostEvent +} + +func init() { + t["VcAgentUninstalledEvent"] = reflect.TypeOf((*VcAgentUninstalledEvent)(nil)).Elem() +} + +type VcAgentUpgradeFailedEvent struct { + HostEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VcAgentUpgradeFailedEvent"] = reflect.TypeOf((*VcAgentUpgradeFailedEvent)(nil)).Elem() +} + +type VcAgentUpgradedEvent struct { + HostEvent +} + +func init() { + t["VcAgentUpgradedEvent"] = reflect.TypeOf((*VcAgentUpgradedEvent)(nil)).Elem() +} + +type VimAccountPasswordChangedEvent struct { + HostEvent +} + +func init() { + t["VimAccountPasswordChangedEvent"] = reflect.TypeOf((*VimAccountPasswordChangedEvent)(nil)).Elem() +} + +type VimFault struct { + MethodFault +} + +func init() { + t["VimFault"] = reflect.TypeOf((*VimFault)(nil)).Elem() +} + +type VimFaultFault BaseVimFault + +func init() { + t["VimFaultFault"] = reflect.TypeOf((*VimFaultFault)(nil)).Elem() +} + +type VimVasaProvider struct { + DynamicData + + Url string `xml:"url"` + Name string `xml:"name,omitempty"` + SelfSignedCertificate string `xml:"selfSignedCertificate,omitempty"` +} + +func init() { + t["VimVasaProvider"] = reflect.TypeOf((*VimVasaProvider)(nil)).Elem() +} + +type VimVasaProviderInfo struct { + DynamicData + + Provider VimVasaProvider `xml:"provider"` + ArrayState []VimVasaProviderStatePerArray `xml:"arrayState,omitempty"` +} + +func init() { + t["VimVasaProviderInfo"] = reflect.TypeOf((*VimVasaProviderInfo)(nil)).Elem() +} + +type VimVasaProviderStatePerArray struct { + DynamicData + + Priority int `xml:"priority"` + ArrayId string `xml:"arrayId"` + Active bool `xml:"active"` +} + +func init() { + t["VimVasaProviderStatePerArray"] = reflect.TypeOf((*VimVasaProviderStatePerArray)(nil)).Elem() +} + +type VirtualAHCIController struct { + VirtualSATAController +} + +func init() { + t["VirtualAHCIController"] = reflect.TypeOf((*VirtualAHCIController)(nil)).Elem() +} + +type VirtualAHCIControllerOption struct { + VirtualSATAControllerOption +} + +func init() { + t["VirtualAHCIControllerOption"] = reflect.TypeOf((*VirtualAHCIControllerOption)(nil)).Elem() +} + +type VirtualAppImportSpec struct { + ImportSpec + + Name string `xml:"name"` + VAppConfigSpec VAppConfigSpec `xml:"vAppConfigSpec"` + ResourcePoolSpec ResourceConfigSpec `xml:"resourcePoolSpec"` + Child []BaseImportSpec `xml:"child,omitempty,typeattr"` +} + +func init() { + t["VirtualAppImportSpec"] = reflect.TypeOf((*VirtualAppImportSpec)(nil)).Elem() +} + +type VirtualAppLinkInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + DestroyWithParent *bool `xml:"destroyWithParent"` +} + +func init() { + t["VirtualAppLinkInfo"] = reflect.TypeOf((*VirtualAppLinkInfo)(nil)).Elem() +} + +type VirtualAppSummary struct { + ResourcePoolSummary + + Product *VAppProductInfo `xml:"product,omitempty"` + VAppState VirtualAppVAppState `xml:"vAppState,omitempty"` + Suspended *bool `xml:"suspended"` + InstallBootRequired *bool `xml:"installBootRequired"` + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["VirtualAppSummary"] = reflect.TypeOf((*VirtualAppSummary)(nil)).Elem() +} + +type VirtualBusLogicController struct { + VirtualSCSIController +} + +func init() { + t["VirtualBusLogicController"] = reflect.TypeOf((*VirtualBusLogicController)(nil)).Elem() +} + +type VirtualBusLogicControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["VirtualBusLogicControllerOption"] = reflect.TypeOf((*VirtualBusLogicControllerOption)(nil)).Elem() +} + +type VirtualCdrom struct { + VirtualDevice +} + +func init() { + t["VirtualCdrom"] = reflect.TypeOf((*VirtualCdrom)(nil)).Elem() +} + +type VirtualCdromAtapiBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualCdromAtapiBackingInfo"] = reflect.TypeOf((*VirtualCdromAtapiBackingInfo)(nil)).Elem() +} + +type VirtualCdromAtapiBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualCdromAtapiBackingOption"] = reflect.TypeOf((*VirtualCdromAtapiBackingOption)(nil)).Elem() +} + +type VirtualCdromIsoBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualCdromIsoBackingInfo"] = reflect.TypeOf((*VirtualCdromIsoBackingInfo)(nil)).Elem() +} + +type VirtualCdromIsoBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualCdromIsoBackingOption"] = reflect.TypeOf((*VirtualCdromIsoBackingOption)(nil)).Elem() +} + +type VirtualCdromOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualCdromOption"] = reflect.TypeOf((*VirtualCdromOption)(nil)).Elem() +} + +type VirtualCdromPassthroughBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Exclusive bool `xml:"exclusive"` +} + +func init() { + t["VirtualCdromPassthroughBackingInfo"] = reflect.TypeOf((*VirtualCdromPassthroughBackingInfo)(nil)).Elem() +} + +type VirtualCdromPassthroughBackingOption struct { + VirtualDeviceDeviceBackingOption + + Exclusive BoolOption `xml:"exclusive"` +} + +func init() { + t["VirtualCdromPassthroughBackingOption"] = reflect.TypeOf((*VirtualCdromPassthroughBackingOption)(nil)).Elem() +} + +type VirtualCdromRemoteAtapiBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo +} + +func init() { + t["VirtualCdromRemoteAtapiBackingInfo"] = reflect.TypeOf((*VirtualCdromRemoteAtapiBackingInfo)(nil)).Elem() +} + +type VirtualCdromRemoteAtapiBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualCdromRemoteAtapiBackingOption"] = reflect.TypeOf((*VirtualCdromRemoteAtapiBackingOption)(nil)).Elem() +} + +type VirtualCdromRemotePassthroughBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo + + Exclusive bool `xml:"exclusive"` +} + +func init() { + t["VirtualCdromRemotePassthroughBackingInfo"] = reflect.TypeOf((*VirtualCdromRemotePassthroughBackingInfo)(nil)).Elem() +} + +type VirtualCdromRemotePassthroughBackingOption struct { + VirtualDeviceRemoteDeviceBackingOption + + Exclusive BoolOption `xml:"exclusive"` +} + +func init() { + t["VirtualCdromRemotePassthroughBackingOption"] = reflect.TypeOf((*VirtualCdromRemotePassthroughBackingOption)(nil)).Elem() +} + +type VirtualController struct { + VirtualDevice + + BusNumber int `xml:"busNumber"` + Device []int `xml:"device,omitempty"` +} + +func init() { + t["VirtualController"] = reflect.TypeOf((*VirtualController)(nil)).Elem() +} + +type VirtualControllerOption struct { + VirtualDeviceOption + + Devices IntOption `xml:"devices"` + SupportedDevice []string `xml:"supportedDevice,omitempty"` +} + +func init() { + t["VirtualControllerOption"] = reflect.TypeOf((*VirtualControllerOption)(nil)).Elem() +} + +type VirtualDevice struct { + DynamicData + + Key int `xml:"key"` + DeviceInfo BaseDescription `xml:"deviceInfo,omitempty,typeattr"` + Backing BaseVirtualDeviceBackingInfo `xml:"backing,omitempty,typeattr"` + Connectable *VirtualDeviceConnectInfo `xml:"connectable,omitempty"` + SlotInfo BaseVirtualDeviceBusSlotInfo `xml:"slotInfo,omitempty,typeattr"` + ControllerKey int `xml:"controllerKey,omitempty"` + UnitNumber int `xml:"unitNumber,omitempty"` +} + +func init() { + t["VirtualDevice"] = reflect.TypeOf((*VirtualDevice)(nil)).Elem() +} + +type VirtualDeviceBackingInfo struct { + DynamicData +} + +func init() { + t["VirtualDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceBackingInfo)(nil)).Elem() +} + +type VirtualDeviceBackingOption struct { + DynamicData + + Type string `xml:"type"` +} + +func init() { + t["VirtualDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceBackingOption)(nil)).Elem() +} + +type VirtualDeviceBusSlotInfo struct { + DynamicData +} + +func init() { + t["VirtualDeviceBusSlotInfo"] = reflect.TypeOf((*VirtualDeviceBusSlotInfo)(nil)).Elem() +} + +type VirtualDeviceBusSlotOption struct { + DynamicData + + Type string `xml:"type"` +} + +func init() { + t["VirtualDeviceBusSlotOption"] = reflect.TypeOf((*VirtualDeviceBusSlotOption)(nil)).Elem() +} + +type VirtualDeviceConfigSpec struct { + DynamicData + + Operation VirtualDeviceConfigSpecOperation `xml:"operation,omitempty"` + FileOperation VirtualDeviceConfigSpecFileOperation `xml:"fileOperation,omitempty"` + Device BaseVirtualDevice `xml:"device,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["VirtualDeviceConfigSpec"] = reflect.TypeOf((*VirtualDeviceConfigSpec)(nil)).Elem() +} + +type VirtualDeviceConnectInfo struct { + DynamicData + + StartConnected bool `xml:"startConnected"` + AllowGuestControl bool `xml:"allowGuestControl"` + Connected bool `xml:"connected"` + Status string `xml:"status,omitempty"` +} + +func init() { + t["VirtualDeviceConnectInfo"] = reflect.TypeOf((*VirtualDeviceConnectInfo)(nil)).Elem() +} + +type VirtualDeviceConnectOption struct { + DynamicData + + StartConnected BoolOption `xml:"startConnected"` + AllowGuestControl BoolOption `xml:"allowGuestControl"` +} + +func init() { + t["VirtualDeviceConnectOption"] = reflect.TypeOf((*VirtualDeviceConnectOption)(nil)).Elem() +} + +type VirtualDeviceDeviceBackingInfo struct { + VirtualDeviceBackingInfo + + DeviceName string `xml:"deviceName"` + UseAutoDetect *bool `xml:"useAutoDetect"` +} + +func init() { + t["VirtualDeviceDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceDeviceBackingInfo)(nil)).Elem() +} + +type VirtualDeviceDeviceBackingOption struct { + VirtualDeviceBackingOption + + AutoDetectAvailable BoolOption `xml:"autoDetectAvailable"` +} + +func init() { + t["VirtualDeviceDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceDeviceBackingOption)(nil)).Elem() +} + +type VirtualDeviceFileBackingInfo struct { + VirtualDeviceBackingInfo + + FileName string `xml:"fileName"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + BackingObjectId string `xml:"backingObjectId,omitempty"` +} + +func init() { + t["VirtualDeviceFileBackingInfo"] = reflect.TypeOf((*VirtualDeviceFileBackingInfo)(nil)).Elem() +} + +type VirtualDeviceFileBackingOption struct { + VirtualDeviceBackingOption + + FileNameExtensions *ChoiceOption `xml:"fileNameExtensions,omitempty"` +} + +func init() { + t["VirtualDeviceFileBackingOption"] = reflect.TypeOf((*VirtualDeviceFileBackingOption)(nil)).Elem() +} + +type VirtualDeviceOption struct { + DynamicData + + Type string `xml:"type"` + ConnectOption *VirtualDeviceConnectOption `xml:"connectOption,omitempty"` + BusSlotOption *VirtualDeviceBusSlotOption `xml:"busSlotOption,omitempty"` + ControllerType string `xml:"controllerType,omitempty"` + AutoAssignController *BoolOption `xml:"autoAssignController,omitempty"` + BackingOption []BaseVirtualDeviceBackingOption `xml:"backingOption,omitempty,typeattr"` + DefaultBackingOptionIndex int `xml:"defaultBackingOptionIndex,omitempty"` + LicensingLimit []string `xml:"licensingLimit,omitempty"` + Deprecated bool `xml:"deprecated"` + PlugAndPlay bool `xml:"plugAndPlay"` + HotRemoveSupported *bool `xml:"hotRemoveSupported"` +} + +func init() { + t["VirtualDeviceOption"] = reflect.TypeOf((*VirtualDeviceOption)(nil)).Elem() +} + +type VirtualDevicePciBusSlotInfo struct { + VirtualDeviceBusSlotInfo + + PciSlotNumber int `xml:"pciSlotNumber"` +} + +func init() { + t["VirtualDevicePciBusSlotInfo"] = reflect.TypeOf((*VirtualDevicePciBusSlotInfo)(nil)).Elem() +} + +type VirtualDevicePipeBackingInfo struct { + VirtualDeviceBackingInfo + + PipeName string `xml:"pipeName"` +} + +func init() { + t["VirtualDevicePipeBackingInfo"] = reflect.TypeOf((*VirtualDevicePipeBackingInfo)(nil)).Elem() +} + +type VirtualDevicePipeBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualDevicePipeBackingOption"] = reflect.TypeOf((*VirtualDevicePipeBackingOption)(nil)).Elem() +} + +type VirtualDeviceRemoteDeviceBackingInfo struct { + VirtualDeviceBackingInfo + + DeviceName string `xml:"deviceName"` + UseAutoDetect *bool `xml:"useAutoDetect"` +} + +func init() { + t["VirtualDeviceRemoteDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingInfo)(nil)).Elem() +} + +type VirtualDeviceRemoteDeviceBackingOption struct { + VirtualDeviceBackingOption + + AutoDetectAvailable BoolOption `xml:"autoDetectAvailable"` +} + +func init() { + t["VirtualDeviceRemoteDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingOption)(nil)).Elem() +} + +type VirtualDeviceURIBackingInfo struct { + VirtualDeviceBackingInfo + + ServiceURI string `xml:"serviceURI"` + Direction string `xml:"direction"` + ProxyURI string `xml:"proxyURI,omitempty"` +} + +func init() { + t["VirtualDeviceURIBackingInfo"] = reflect.TypeOf((*VirtualDeviceURIBackingInfo)(nil)).Elem() +} + +type VirtualDeviceURIBackingOption struct { + VirtualDeviceBackingOption + + Directions ChoiceOption `xml:"directions"` +} + +func init() { + t["VirtualDeviceURIBackingOption"] = reflect.TypeOf((*VirtualDeviceURIBackingOption)(nil)).Elem() +} + +type VirtualDisk struct { + VirtualDevice + + CapacityInKB int64 `xml:"capacityInKB"` + CapacityInBytes int64 `xml:"capacityInBytes,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty"` + DiskObjectId string `xml:"diskObjectId,omitempty"` + VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` + Iofilter []string `xml:"iofilter,omitempty"` +} + +func init() { + t["VirtualDisk"] = reflect.TypeOf((*VirtualDisk)(nil)).Elem() +} + +type VirtualDiskAntiAffinityRuleSpec struct { + ClusterRuleInfo + + DiskId []int `xml:"diskId"` +} + +func init() { + t["VirtualDiskAntiAffinityRuleSpec"] = reflect.TypeOf((*VirtualDiskAntiAffinityRuleSpec)(nil)).Elem() +} + +type VirtualDiskBlocksNotFullyProvisioned struct { + DeviceBackingNotSupported +} + +func init() { + t["VirtualDiskBlocksNotFullyProvisioned"] = reflect.TypeOf((*VirtualDiskBlocksNotFullyProvisioned)(nil)).Elem() +} + +type VirtualDiskBlocksNotFullyProvisionedFault VirtualDiskBlocksNotFullyProvisioned + +func init() { + t["VirtualDiskBlocksNotFullyProvisionedFault"] = reflect.TypeOf((*VirtualDiskBlocksNotFullyProvisionedFault)(nil)).Elem() +} + +type VirtualDiskConfigSpec struct { + VirtualDeviceConfigSpec + + DiskMoveType string `xml:"diskMoveType,omitempty"` + MigrateCache *bool `xml:"migrateCache"` +} + +func init() { + t["VirtualDiskConfigSpec"] = reflect.TypeOf((*VirtualDiskConfigSpec)(nil)).Elem() +} + +type VirtualDiskDeltaDiskFormatsSupported struct { + DynamicData + + DatastoreType string `xml:"datastoreType"` + DeltaDiskFormat ChoiceOption `xml:"deltaDiskFormat"` +} + +func init() { + t["VirtualDiskDeltaDiskFormatsSupported"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatsSupported)(nil)).Elem() +} + +type VirtualDiskFlatVer1BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + ContentId string `xml:"contentId,omitempty"` + Parent *VirtualDiskFlatVer1BackingInfo `xml:"parent,omitempty"` +} + +func init() { + t["VirtualDiskFlatVer1BackingInfo"] = reflect.TypeOf((*VirtualDiskFlatVer1BackingInfo)(nil)).Elem() +} + +type VirtualDiskFlatVer1BackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` +} + +func init() { + t["VirtualDiskFlatVer1BackingOption"] = reflect.TypeOf((*VirtualDiskFlatVer1BackingOption)(nil)).Elem() +} + +type VirtualDiskFlatVer2BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + ThinProvisioned *bool `xml:"thinProvisioned"` + EagerlyScrub *bool `xml:"eagerlyScrub"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskFlatVer2BackingInfo `xml:"parent,omitempty"` + DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty"` + DigestEnabled *bool `xml:"digestEnabled"` + DeltaGrainSize int `xml:"deltaGrainSize,omitempty"` + DeltaDiskFormatVariant string `xml:"deltaDiskFormatVariant,omitempty"` + Sharing string `xml:"sharing,omitempty"` +} + +func init() { + t["VirtualDiskFlatVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskFlatVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskFlatVer2BackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + Uuid bool `xml:"uuid"` + ThinProvisioned *BoolOption `xml:"thinProvisioned,omitempty"` + EagerlyScrub *BoolOption `xml:"eagerlyScrub,omitempty"` + DeltaDiskFormat *ChoiceOption `xml:"deltaDiskFormat,omitempty"` + DeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"deltaDiskFormatsSupported,omitempty"` +} + +func init() { + t["VirtualDiskFlatVer2BackingOption"] = reflect.TypeOf((*VirtualDiskFlatVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskId struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + DiskId int `xml:"diskId"` +} + +func init() { + t["VirtualDiskId"] = reflect.TypeOf((*VirtualDiskId)(nil)).Elem() +} + +type VirtualDiskModeNotSupported struct { + DeviceNotSupported + + Mode string `xml:"mode"` +} + +func init() { + t["VirtualDiskModeNotSupported"] = reflect.TypeOf((*VirtualDiskModeNotSupported)(nil)).Elem() +} + +type VirtualDiskModeNotSupportedFault VirtualDiskModeNotSupported + +func init() { + t["VirtualDiskModeNotSupportedFault"] = reflect.TypeOf((*VirtualDiskModeNotSupportedFault)(nil)).Elem() +} + +type VirtualDiskOption struct { + VirtualDeviceOption + + CapacityInKB LongOption `xml:"capacityInKB"` + IoAllocationOption *StorageIOAllocationOption `xml:"ioAllocationOption,omitempty"` + VFlashCacheConfigOption *VirtualDiskOptionVFlashCacheConfigOption `xml:"vFlashCacheConfigOption,omitempty"` +} + +func init() { + t["VirtualDiskOption"] = reflect.TypeOf((*VirtualDiskOption)(nil)).Elem() +} + +type VirtualDiskOptionVFlashCacheConfigOption struct { + DynamicData + + CacheConsistencyType ChoiceOption `xml:"cacheConsistencyType"` + CacheMode ChoiceOption `xml:"cacheMode"` + ReservationInMB LongOption `xml:"reservationInMB"` + BlockSizeInKB LongOption `xml:"blockSizeInKB"` +} + +func init() { + t["VirtualDiskOptionVFlashCacheConfigOption"] = reflect.TypeOf((*VirtualDiskOptionVFlashCacheConfigOption)(nil)).Elem() +} + +type VirtualDiskPartitionedRawDiskVer2BackingInfo struct { + VirtualDiskRawDiskVer2BackingInfo + + Partition []int `xml:"partition"` +} + +func init() { + t["VirtualDiskPartitionedRawDiskVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskPartitionedRawDiskVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskPartitionedRawDiskVer2BackingOption struct { + VirtualDiskRawDiskVer2BackingOption +} + +func init() { + t["VirtualDiskPartitionedRawDiskVer2BackingOption"] = reflect.TypeOf((*VirtualDiskPartitionedRawDiskVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskRawDiskMappingVer1BackingInfo struct { + VirtualDeviceFileBackingInfo + + LunUuid string `xml:"lunUuid,omitempty"` + DeviceName string `xml:"deviceName,omitempty"` + CompatibilityMode string `xml:"compatibilityMode,omitempty"` + DiskMode string `xml:"diskMode,omitempty"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskRawDiskMappingVer1BackingInfo `xml:"parent,omitempty"` + Sharing string `xml:"sharing,omitempty"` +} + +func init() { + t["VirtualDiskRawDiskMappingVer1BackingInfo"] = reflect.TypeOf((*VirtualDiskRawDiskMappingVer1BackingInfo)(nil)).Elem() +} + +type VirtualDiskRawDiskMappingVer1BackingOption struct { + VirtualDeviceDeviceBackingOption + + DescriptorFileNameExtensions *ChoiceOption `xml:"descriptorFileNameExtensions,omitempty"` + CompatibilityMode ChoiceOption `xml:"compatibilityMode"` + DiskMode ChoiceOption `xml:"diskMode"` + Uuid bool `xml:"uuid"` +} + +func init() { + t["VirtualDiskRawDiskMappingVer1BackingOption"] = reflect.TypeOf((*VirtualDiskRawDiskMappingVer1BackingOption)(nil)).Elem() +} + +type VirtualDiskRawDiskVer2BackingInfo struct { + VirtualDeviceDeviceBackingInfo + + DescriptorFileName string `xml:"descriptorFileName"` + Uuid string `xml:"uuid,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Sharing string `xml:"sharing,omitempty"` +} + +func init() { + t["VirtualDiskRawDiskVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskRawDiskVer2BackingOption struct { + VirtualDeviceDeviceBackingOption + + DescriptorFileNameExtensions ChoiceOption `xml:"descriptorFileNameExtensions"` + Uuid bool `xml:"uuid"` +} + +func init() { + t["VirtualDiskRawDiskVer2BackingOption"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskSeSparseBackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + WriteThrough *bool `xml:"writeThrough"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskSeSparseBackingInfo `xml:"parent,omitempty"` + DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty"` + DigestEnabled *bool `xml:"digestEnabled"` + GrainSize int `xml:"grainSize,omitempty"` +} + +func init() { + t["VirtualDiskSeSparseBackingInfo"] = reflect.TypeOf((*VirtualDiskSeSparseBackingInfo)(nil)).Elem() +} + +type VirtualDiskSeSparseBackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + Uuid bool `xml:"uuid"` + DeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"deltaDiskFormatsSupported"` +} + +func init() { + t["VirtualDiskSeSparseBackingOption"] = reflect.TypeOf((*VirtualDiskSeSparseBackingOption)(nil)).Elem() +} + +type VirtualDiskSparseVer1BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + SpaceUsedInKB int64 `xml:"spaceUsedInKB,omitempty"` + ContentId string `xml:"contentId,omitempty"` + Parent *VirtualDiskSparseVer1BackingInfo `xml:"parent,omitempty"` +} + +func init() { + t["VirtualDiskSparseVer1BackingInfo"] = reflect.TypeOf((*VirtualDiskSparseVer1BackingInfo)(nil)).Elem() +} + +type VirtualDiskSparseVer1BackingOption struct { + VirtualDeviceFileBackingOption + + DiskModes ChoiceOption `xml:"diskModes"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` +} + +func init() { + t["VirtualDiskSparseVer1BackingOption"] = reflect.TypeOf((*VirtualDiskSparseVer1BackingOption)(nil)).Elem() +} + +type VirtualDiskSparseVer2BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + SpaceUsedInKB int64 `xml:"spaceUsedInKB,omitempty"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskSparseVer2BackingInfo `xml:"parent,omitempty"` +} + +func init() { + t["VirtualDiskSparseVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskSparseVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskSparseVer2BackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + Uuid bool `xml:"uuid"` +} + +func init() { + t["VirtualDiskSparseVer2BackingOption"] = reflect.TypeOf((*VirtualDiskSparseVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskSpec struct { + DynamicData + + DiskType string `xml:"diskType"` + AdapterType string `xml:"adapterType"` +} + +func init() { + t["VirtualDiskSpec"] = reflect.TypeOf((*VirtualDiskSpec)(nil)).Elem() +} + +type VirtualDiskVFlashCacheConfigInfo struct { + DynamicData + + VFlashModule string `xml:"vFlashModule,omitempty"` + ReservationInMB int64 `xml:"reservationInMB,omitempty"` + CacheConsistencyType string `xml:"cacheConsistencyType,omitempty"` + CacheMode string `xml:"cacheMode,omitempty"` + BlockSizeInKB int64 `xml:"blockSizeInKB,omitempty"` +} + +func init() { + t["VirtualDiskVFlashCacheConfigInfo"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfo)(nil)).Elem() +} + +type VirtualE1000 struct { + VirtualEthernetCard +} + +func init() { + t["VirtualE1000"] = reflect.TypeOf((*VirtualE1000)(nil)).Elem() +} + +type VirtualE1000Option struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualE1000Option"] = reflect.TypeOf((*VirtualE1000Option)(nil)).Elem() +} + +type VirtualE1000e struct { + VirtualEthernetCard +} + +func init() { + t["VirtualE1000e"] = reflect.TypeOf((*VirtualE1000e)(nil)).Elem() +} + +type VirtualE1000eOption struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualE1000eOption"] = reflect.TypeOf((*VirtualE1000eOption)(nil)).Elem() +} + +type VirtualEnsoniq1371 struct { + VirtualSoundCard +} + +func init() { + t["VirtualEnsoniq1371"] = reflect.TypeOf((*VirtualEnsoniq1371)(nil)).Elem() +} + +type VirtualEnsoniq1371Option struct { + VirtualSoundCardOption +} + +func init() { + t["VirtualEnsoniq1371Option"] = reflect.TypeOf((*VirtualEnsoniq1371Option)(nil)).Elem() +} + +type VirtualEthernetCard struct { + VirtualDevice + + AddressType string `xml:"addressType,omitempty"` + MacAddress string `xml:"macAddress,omitempty"` + WakeOnLanEnabled *bool `xml:"wakeOnLanEnabled"` + ResourceAllocation *VirtualEthernetCardResourceAllocation `xml:"resourceAllocation,omitempty"` + ExternalId string `xml:"externalId,omitempty"` + UptCompatibilityEnabled *bool `xml:"uptCompatibilityEnabled"` +} + +func init() { + t["VirtualEthernetCard"] = reflect.TypeOf((*VirtualEthernetCard)(nil)).Elem() +} + +type VirtualEthernetCardDVPortBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardDVPortBackingOption"] = reflect.TypeOf((*VirtualEthernetCardDVPortBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardDistributedVirtualPortBackingInfo struct { + VirtualDeviceBackingInfo + + Port DistributedVirtualSwitchPortConnection `xml:"port"` +} + +func init() { + t["VirtualEthernetCardDistributedVirtualPortBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardDistributedVirtualPortBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardLegacyNetworkBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualEthernetCardLegacyNetworkBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardLegacyNetworkBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardLegacyNetworkBackingOption"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardNetworkBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Network *ManagedObjectReference `xml:"network,omitempty"` + InPassthroughMode *bool `xml:"inPassthroughMode"` +} + +func init() { + t["VirtualEthernetCardNetworkBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardNetworkBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardNetworkBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardNetworkBackingOption"] = reflect.TypeOf((*VirtualEthernetCardNetworkBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardNotSupported struct { + DeviceNotSupported +} + +func init() { + t["VirtualEthernetCardNotSupported"] = reflect.TypeOf((*VirtualEthernetCardNotSupported)(nil)).Elem() +} + +type VirtualEthernetCardNotSupportedFault VirtualEthernetCardNotSupported + +func init() { + t["VirtualEthernetCardNotSupportedFault"] = reflect.TypeOf((*VirtualEthernetCardNotSupportedFault)(nil)).Elem() +} + +type VirtualEthernetCardOpaqueNetworkBackingInfo struct { + VirtualDeviceBackingInfo + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` +} + +func init() { + t["VirtualEthernetCardOpaqueNetworkBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardOpaqueNetworkBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardOpaqueNetworkBackingOption"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardOption struct { + VirtualDeviceOption + + SupportedOUI ChoiceOption `xml:"supportedOUI"` + MacType ChoiceOption `xml:"macType"` + WakeOnLanEnabled BoolOption `xml:"wakeOnLanEnabled"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported"` + UptCompatibilityEnabled *BoolOption `xml:"uptCompatibilityEnabled,omitempty"` +} + +func init() { + t["VirtualEthernetCardOption"] = reflect.TypeOf((*VirtualEthernetCardOption)(nil)).Elem() +} + +type VirtualEthernetCardResourceAllocation struct { + DynamicData + + Reservation int64 `xml:"reservation,omitempty"` + Share SharesInfo `xml:"share"` + Limit int64 `xml:"limit,omitempty"` +} + +func init() { + t["VirtualEthernetCardResourceAllocation"] = reflect.TypeOf((*VirtualEthernetCardResourceAllocation)(nil)).Elem() +} + +type VirtualFloppy struct { + VirtualDevice +} + +func init() { + t["VirtualFloppy"] = reflect.TypeOf((*VirtualFloppy)(nil)).Elem() +} + +type VirtualFloppyDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualFloppyDeviceBackingInfo"] = reflect.TypeOf((*VirtualFloppyDeviceBackingInfo)(nil)).Elem() +} + +type VirtualFloppyDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualFloppyDeviceBackingOption"] = reflect.TypeOf((*VirtualFloppyDeviceBackingOption)(nil)).Elem() +} + +type VirtualFloppyImageBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualFloppyImageBackingInfo"] = reflect.TypeOf((*VirtualFloppyImageBackingInfo)(nil)).Elem() +} + +type VirtualFloppyImageBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualFloppyImageBackingOption"] = reflect.TypeOf((*VirtualFloppyImageBackingOption)(nil)).Elem() +} + +type VirtualFloppyOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualFloppyOption"] = reflect.TypeOf((*VirtualFloppyOption)(nil)).Elem() +} + +type VirtualFloppyRemoteDeviceBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo +} + +func init() { + t["VirtualFloppyRemoteDeviceBackingInfo"] = reflect.TypeOf((*VirtualFloppyRemoteDeviceBackingInfo)(nil)).Elem() +} + +type VirtualFloppyRemoteDeviceBackingOption struct { + VirtualDeviceRemoteDeviceBackingOption +} + +func init() { + t["VirtualFloppyRemoteDeviceBackingOption"] = reflect.TypeOf((*VirtualFloppyRemoteDeviceBackingOption)(nil)).Elem() +} + +type VirtualHardware struct { + DynamicData + + NumCPU int `xml:"numCPU"` + NumCoresPerSocket int `xml:"numCoresPerSocket,omitempty"` + MemoryMB int `xml:"memoryMB"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent"` + Device []BaseVirtualDevice `xml:"device,omitempty,typeattr"` +} + +func init() { + t["VirtualHardware"] = reflect.TypeOf((*VirtualHardware)(nil)).Elem() +} + +type VirtualHardwareCompatibilityIssue struct { + VmConfigFault +} + +func init() { + t["VirtualHardwareCompatibilityIssue"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem() +} + +type VirtualHardwareCompatibilityIssueFault BaseVirtualHardwareCompatibilityIssue + +func init() { + t["VirtualHardwareCompatibilityIssueFault"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssueFault)(nil)).Elem() +} + +type VirtualHardwareOption struct { + DynamicData + + HwVersion int `xml:"hwVersion"` + VirtualDeviceOption []BaseVirtualDeviceOption `xml:"virtualDeviceOption,typeattr"` + DeviceListReadonly bool `xml:"deviceListReadonly"` + NumCPU []int `xml:"numCPU"` + NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty"` + NumCpuReadonly bool `xml:"numCpuReadonly"` + MemoryMB LongOption `xml:"memoryMB"` + NumPCIControllers IntOption `xml:"numPCIControllers"` + NumIDEControllers IntOption `xml:"numIDEControllers"` + NumUSBControllers IntOption `xml:"numUSBControllers"` + NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty"` + NumSIOControllers IntOption `xml:"numSIOControllers"` + NumPS2Controllers IntOption `xml:"numPS2Controllers"` + LicensingLimit []string `xml:"licensingLimit,omitempty"` + NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty"` + NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty"` + ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty"` +} + +func init() { + t["VirtualHardwareOption"] = reflect.TypeOf((*VirtualHardwareOption)(nil)).Elem() +} + +type VirtualHardwareVersionNotSupported struct { + VirtualHardwareCompatibilityIssue + + HostName string `xml:"hostName"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["VirtualHardwareVersionNotSupported"] = reflect.TypeOf((*VirtualHardwareVersionNotSupported)(nil)).Elem() +} + +type VirtualHardwareVersionNotSupportedFault VirtualHardwareVersionNotSupported + +func init() { + t["VirtualHardwareVersionNotSupportedFault"] = reflect.TypeOf((*VirtualHardwareVersionNotSupportedFault)(nil)).Elem() +} + +type VirtualHdAudioCard struct { + VirtualSoundCard +} + +func init() { + t["VirtualHdAudioCard"] = reflect.TypeOf((*VirtualHdAudioCard)(nil)).Elem() +} + +type VirtualHdAudioCardOption struct { + VirtualSoundCardOption +} + +func init() { + t["VirtualHdAudioCardOption"] = reflect.TypeOf((*VirtualHdAudioCardOption)(nil)).Elem() +} + +type VirtualIDEController struct { + VirtualController +} + +func init() { + t["VirtualIDEController"] = reflect.TypeOf((*VirtualIDEController)(nil)).Elem() +} + +type VirtualIDEControllerOption struct { + VirtualControllerOption + + NumIDEDisks IntOption `xml:"numIDEDisks"` + NumIDECdroms IntOption `xml:"numIDECdroms"` +} + +func init() { + t["VirtualIDEControllerOption"] = reflect.TypeOf((*VirtualIDEControllerOption)(nil)).Elem() +} + +type VirtualKeyboard struct { + VirtualDevice +} + +func init() { + t["VirtualKeyboard"] = reflect.TypeOf((*VirtualKeyboard)(nil)).Elem() +} + +type VirtualKeyboardOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualKeyboardOption"] = reflect.TypeOf((*VirtualKeyboardOption)(nil)).Elem() +} + +type VirtualLsiLogicController struct { + VirtualSCSIController +} + +func init() { + t["VirtualLsiLogicController"] = reflect.TypeOf((*VirtualLsiLogicController)(nil)).Elem() +} + +type VirtualLsiLogicControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["VirtualLsiLogicControllerOption"] = reflect.TypeOf((*VirtualLsiLogicControllerOption)(nil)).Elem() +} + +type VirtualLsiLogicSASController struct { + VirtualSCSIController +} + +func init() { + t["VirtualLsiLogicSASController"] = reflect.TypeOf((*VirtualLsiLogicSASController)(nil)).Elem() +} + +type VirtualLsiLogicSASControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["VirtualLsiLogicSASControllerOption"] = reflect.TypeOf((*VirtualLsiLogicSASControllerOption)(nil)).Elem() +} + +type VirtualMachineAffinityInfo struct { + DynamicData + + AffinitySet []int `xml:"affinitySet,omitempty"` +} + +func init() { + t["VirtualMachineAffinityInfo"] = reflect.TypeOf((*VirtualMachineAffinityInfo)(nil)).Elem() +} + +type VirtualMachineBootOptions struct { + DynamicData + + BootDelay int64 `xml:"bootDelay,omitempty"` + EnterBIOSSetup *bool `xml:"enterBIOSSetup"` + BootRetryEnabled *bool `xml:"bootRetryEnabled"` + BootRetryDelay int64 `xml:"bootRetryDelay,omitempty"` + BootOrder []BaseVirtualMachineBootOptionsBootableDevice `xml:"bootOrder,omitempty,typeattr"` + NetworkBootProtocol string `xml:"networkBootProtocol,omitempty"` +} + +func init() { + t["VirtualMachineBootOptions"] = reflect.TypeOf((*VirtualMachineBootOptions)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableCdromDevice struct { + VirtualMachineBootOptionsBootableDevice +} + +func init() { + t["VirtualMachineBootOptionsBootableCdromDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableCdromDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableDevice struct { + DynamicData +} + +func init() { + t["VirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableDiskDevice struct { + VirtualMachineBootOptionsBootableDevice + + DeviceKey int `xml:"deviceKey"` +} + +func init() { + t["VirtualMachineBootOptionsBootableDiskDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDiskDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableEthernetDevice struct { + VirtualMachineBootOptionsBootableDevice + + DeviceKey int `xml:"deviceKey"` +} + +func init() { + t["VirtualMachineBootOptionsBootableEthernetDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableEthernetDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableFloppyDevice struct { + VirtualMachineBootOptionsBootableDevice +} + +func init() { + t["VirtualMachineBootOptionsBootableFloppyDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableFloppyDevice)(nil)).Elem() +} + +type VirtualMachineCapability struct { + DynamicData + + SnapshotOperationsSupported bool `xml:"snapshotOperationsSupported"` + MultipleSnapshotsSupported bool `xml:"multipleSnapshotsSupported"` + SnapshotConfigSupported bool `xml:"snapshotConfigSupported"` + PoweredOffSnapshotsSupported bool `xml:"poweredOffSnapshotsSupported"` + MemorySnapshotsSupported bool `xml:"memorySnapshotsSupported"` + RevertToSnapshotSupported bool `xml:"revertToSnapshotSupported"` + QuiescedSnapshotsSupported bool `xml:"quiescedSnapshotsSupported"` + DisableSnapshotsSupported bool `xml:"disableSnapshotsSupported"` + LockSnapshotsSupported bool `xml:"lockSnapshotsSupported"` + ConsolePreferencesSupported bool `xml:"consolePreferencesSupported"` + CpuFeatureMaskSupported bool `xml:"cpuFeatureMaskSupported"` + S1AcpiManagementSupported bool `xml:"s1AcpiManagementSupported"` + SettingScreenResolutionSupported bool `xml:"settingScreenResolutionSupported"` + ToolsAutoUpdateSupported bool `xml:"toolsAutoUpdateSupported"` + VmNpivWwnSupported bool `xml:"vmNpivWwnSupported"` + NpivWwnOnNonRdmVmSupported bool `xml:"npivWwnOnNonRdmVmSupported"` + VmNpivWwnDisableSupported *bool `xml:"vmNpivWwnDisableSupported"` + VmNpivWwnUpdateSupported *bool `xml:"vmNpivWwnUpdateSupported"` + SwapPlacementSupported bool `xml:"swapPlacementSupported"` + ToolsSyncTimeSupported bool `xml:"toolsSyncTimeSupported"` + VirtualMmuUsageSupported bool `xml:"virtualMmuUsageSupported"` + DiskSharesSupported bool `xml:"diskSharesSupported"` + BootOptionsSupported bool `xml:"bootOptionsSupported"` + BootRetryOptionsSupported *bool `xml:"bootRetryOptionsSupported"` + SettingVideoRamSizeSupported bool `xml:"settingVideoRamSizeSupported"` + SettingDisplayTopologySupported *bool `xml:"settingDisplayTopologySupported"` + RecordReplaySupported *bool `xml:"recordReplaySupported"` + ChangeTrackingSupported *bool `xml:"changeTrackingSupported"` + MultipleCoresPerSocketSupported *bool `xml:"multipleCoresPerSocketSupported"` + HostBasedReplicationSupported *bool `xml:"hostBasedReplicationSupported"` + GuestAutoLockSupported *bool `xml:"guestAutoLockSupported"` + MemoryReservationLockSupported *bool `xml:"memoryReservationLockSupported"` + FeatureRequirementSupported *bool `xml:"featureRequirementSupported"` + PoweredOnMonitorTypeChangeSupported *bool `xml:"poweredOnMonitorTypeChangeSupported"` + SeSparseDiskSupported *bool `xml:"seSparseDiskSupported"` + NestedHVSupported *bool `xml:"nestedHVSupported"` + VPMCSupported *bool `xml:"vPMCSupported"` +} + +func init() { + t["VirtualMachineCapability"] = reflect.TypeOf((*VirtualMachineCapability)(nil)).Elem() +} + +type VirtualMachineCdromInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineCdromInfo"] = reflect.TypeOf((*VirtualMachineCdromInfo)(nil)).Elem() +} + +type VirtualMachineCloneSpec struct { + DynamicData + + Location VirtualMachineRelocateSpec `xml:"location"` + Template bool `xml:"template"` + Config *VirtualMachineConfigSpec `xml:"config,omitempty"` + Customization *CustomizationSpec `xml:"customization,omitempty"` + PowerOn bool `xml:"powerOn"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` + Memory *bool `xml:"memory"` +} + +func init() { + t["VirtualMachineCloneSpec"] = reflect.TypeOf((*VirtualMachineCloneSpec)(nil)).Elem() +} + +type VirtualMachineConfigInfo struct { + DynamicData + + ChangeVersion string `xml:"changeVersion"` + Modified time.Time `xml:"modified"` + Name string `xml:"name"` + GuestFullName string `xml:"guestFullName"` + Version string `xml:"version"` + Uuid string `xml:"uuid"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` + LocationId string `xml:"locationId,omitempty"` + Template bool `xml:"template"` + GuestId string `xml:"guestId"` + AlternateGuestName string `xml:"alternateGuestName"` + Annotation string `xml:"annotation,omitempty"` + Files VirtualMachineFileInfo `xml:"files"` + Tools *ToolsConfigInfo `xml:"tools,omitempty"` + Flags VirtualMachineFlagInfo `xml:"flags"` + ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` + DefaultPowerOps VirtualMachineDefaultPowerOpInfo `xml:"defaultPowerOps"` + Hardware VirtualHardware `xml:"hardware"` + CpuAllocation BaseResourceAllocationInfo `xml:"cpuAllocation,omitempty,typeattr"` + MemoryAllocation BaseResourceAllocationInfo `xml:"memoryAllocation,omitempty,typeattr"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` + HotPlugMemoryLimit int64 `xml:"hotPlugMemoryLimit,omitempty"` + HotPlugMemoryIncrementSize int64 `xml:"hotPlugMemoryIncrementSize,omitempty"` + CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` + MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` + NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` + CpuFeatureMask []HostCpuIdInfo `xml:"cpuFeatureMask,omitempty"` + DatastoreUrl []VirtualMachineConfigInfoDatastoreUrlPair `xml:"datastoreUrl,omitempty"` + SwapPlacement string `xml:"swapPlacement,omitempty"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` + VAppConfig BaseVmConfigInfo `xml:"vAppConfig,omitempty,typeattr"` + VAssertsEnabled *bool `xml:"vAssertsEnabled"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` + Firmware string `xml:"firmware,omitempty"` + MaxMksConnections int `xml:"maxMksConnections,omitempty"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` + InitialOverhead *VirtualMachineConfigInfoOverheadInfo `xml:"initialOverhead,omitempty"` + NestedHVEnabled *bool `xml:"nestedHVEnabled"` + VPMCEnabled *bool `xml:"vPMCEnabled"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` + ForkConfigInfo *VirtualMachineForkConfigInfo `xml:"forkConfigInfo,omitempty"` + VFlashCacheReservation int64 `xml:"vFlashCacheReservation,omitempty"` + VmxConfigChecksum []byte `xml:"vmxConfigChecksum,omitempty"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` + VmStorageObjectId string `xml:"vmStorageObjectId,omitempty"` + SwapStorageObjectId string `xml:"swapStorageObjectId,omitempty"` +} + +func init() { + t["VirtualMachineConfigInfo"] = reflect.TypeOf((*VirtualMachineConfigInfo)(nil)).Elem() +} + +type VirtualMachineConfigInfoDatastoreUrlPair struct { + DynamicData + + Name string `xml:"name"` + Url string `xml:"url"` +} + +func init() { + t["VirtualMachineConfigInfoDatastoreUrlPair"] = reflect.TypeOf((*VirtualMachineConfigInfoDatastoreUrlPair)(nil)).Elem() +} + +type VirtualMachineConfigInfoOverheadInfo struct { + DynamicData + + InitialMemoryReservation int64 `xml:"initialMemoryReservation,omitempty"` + InitialSwapReservation int64 `xml:"initialSwapReservation,omitempty"` +} + +func init() { + t["VirtualMachineConfigInfoOverheadInfo"] = reflect.TypeOf((*VirtualMachineConfigInfoOverheadInfo)(nil)).Elem() +} + +type VirtualMachineConfigOption struct { + DynamicData + + Version string `xml:"version"` + Description string `xml:"description"` + GuestOSDescriptor []GuestOsDescriptor `xml:"guestOSDescriptor"` + GuestOSDefaultIndex int `xml:"guestOSDefaultIndex"` + HardwareOptions VirtualHardwareOption `xml:"hardwareOptions"` + Capabilities VirtualMachineCapability `xml:"capabilities"` + Datastore DatastoreOption `xml:"datastore"` + DefaultDevice []BaseVirtualDevice `xml:"defaultDevice,omitempty,typeattr"` + SupportedMonitorType []string `xml:"supportedMonitorType"` + SupportedOvfEnvironmentTransport []string `xml:"supportedOvfEnvironmentTransport,omitempty"` + SupportedOvfInstallTransport []string `xml:"supportedOvfInstallTransport,omitempty"` +} + +func init() { + t["VirtualMachineConfigOption"] = reflect.TypeOf((*VirtualMachineConfigOption)(nil)).Elem() +} + +type VirtualMachineConfigOptionDescriptor struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + CreateSupported *bool `xml:"createSupported"` + DefaultConfigOption *bool `xml:"defaultConfigOption"` + RunSupported *bool `xml:"runSupported"` + UpgradeSupported *bool `xml:"upgradeSupported"` +} + +func init() { + t["VirtualMachineConfigOptionDescriptor"] = reflect.TypeOf((*VirtualMachineConfigOptionDescriptor)(nil)).Elem() +} + +type VirtualMachineConfigSpec struct { + DynamicData + + ChangeVersion string `xml:"changeVersion,omitempty"` + Name string `xml:"name,omitempty"` + Version string `xml:"version,omitempty"` + Uuid string `xml:"uuid,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` + NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty"` + LocationId string `xml:"locationId,omitempty"` + GuestId string `xml:"guestId,omitempty"` + AlternateGuestName string `xml:"alternateGuestName,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Files *VirtualMachineFileInfo `xml:"files,omitempty"` + Tools *ToolsConfigInfo `xml:"tools,omitempty"` + Flags *VirtualMachineFlagInfo `xml:"flags,omitempty"` + ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` + PowerOpInfo *VirtualMachineDefaultPowerOpInfo `xml:"powerOpInfo,omitempty"` + NumCPUs int `xml:"numCPUs,omitempty"` + NumCoresPerSocket int `xml:"numCoresPerSocket,omitempty"` + MemoryMB int64 `xml:"memoryMB,omitempty"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent"` + DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` + CpuAllocation BaseResourceAllocationInfo `xml:"cpuAllocation,omitempty,typeattr"` + MemoryAllocation BaseResourceAllocationInfo `xml:"memoryAllocation,omitempty,typeattr"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` + CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` + MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` + NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` + CpuFeatureMask []VirtualMachineCpuIdInfoSpec `xml:"cpuFeatureMask,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` + SwapPlacement string `xml:"swapPlacement,omitempty"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` + VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` + VAppConfigRemoved *bool `xml:"vAppConfigRemoved"` + VAssertsEnabled *bool `xml:"vAssertsEnabled"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` + Firmware string `xml:"firmware,omitempty"` + MaxMksConnections int `xml:"maxMksConnections,omitempty"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` + NestedHVEnabled *bool `xml:"nestedHVEnabled"` + VPMCEnabled *bool `xml:"vPMCEnabled"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` + VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` +} + +func init() { + t["VirtualMachineConfigSpec"] = reflect.TypeOf((*VirtualMachineConfigSpec)(nil)).Elem() +} + +type VirtualMachineConfigSummary struct { + DynamicData + + Name string `xml:"name"` + Template bool `xml:"template"` + VmPathName string `xml:"vmPathName"` + MemorySizeMB int `xml:"memorySizeMB,omitempty"` + CpuReservation int `xml:"cpuReservation,omitempty"` + MemoryReservation int `xml:"memoryReservation,omitempty"` + NumCpu int `xml:"numCpu,omitempty"` + NumEthernetCards int `xml:"numEthernetCards,omitempty"` + NumVirtualDisks int `xml:"numVirtualDisks,omitempty"` + Uuid string `xml:"uuid,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + GuestId string `xml:"guestId,omitempty"` + GuestFullName string `xml:"guestFullName,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Product *VAppProductInfo `xml:"product,omitempty"` + InstallBootRequired *bool `xml:"installBootRequired"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` +} + +func init() { + t["VirtualMachineConfigSummary"] = reflect.TypeOf((*VirtualMachineConfigSummary)(nil)).Elem() +} + +type VirtualMachineConsolePreferences struct { + DynamicData + + PowerOnWhenOpened *bool `xml:"powerOnWhenOpened"` + EnterFullScreenOnPowerOn *bool `xml:"enterFullScreenOnPowerOn"` + CloseOnPowerOffOrSuspend *bool `xml:"closeOnPowerOffOrSuspend"` +} + +func init() { + t["VirtualMachineConsolePreferences"] = reflect.TypeOf((*VirtualMachineConsolePreferences)(nil)).Elem() +} + +type VirtualMachineCpuIdInfoSpec struct { + ArrayUpdateSpec + + Info *HostCpuIdInfo `xml:"info,omitempty"` +} + +func init() { + t["VirtualMachineCpuIdInfoSpec"] = reflect.TypeOf((*VirtualMachineCpuIdInfoSpec)(nil)).Elem() +} + +type VirtualMachineCreateChildSpec struct { + DynamicData + + Location *VirtualMachineRelocateSpec `xml:"location,omitempty"` + Persistent bool `xml:"persistent"` + ConfigParams []BaseOptionValue `xml:"configParams,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineCreateChildSpec"] = reflect.TypeOf((*VirtualMachineCreateChildSpec)(nil)).Elem() +} + +type VirtualMachineDatastoreInfo struct { + VirtualMachineTargetInfo + + Datastore DatastoreSummary `xml:"datastore"` + Capability DatastoreCapability `xml:"capability"` + MaxFileSize int64 `xml:"maxFileSize"` + MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty"` + MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty"` + MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty"` + Mode string `xml:"mode"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` +} + +func init() { + t["VirtualMachineDatastoreInfo"] = reflect.TypeOf((*VirtualMachineDatastoreInfo)(nil)).Elem() +} + +type VirtualMachineDatastoreVolumeOption struct { + DynamicData + + FileSystemType string `xml:"fileSystemType"` + MajorVersion int `xml:"majorVersion,omitempty"` +} + +func init() { + t["VirtualMachineDatastoreVolumeOption"] = reflect.TypeOf((*VirtualMachineDatastoreVolumeOption)(nil)).Elem() +} + +type VirtualMachineDefaultPowerOpInfo struct { + DynamicData + + PowerOffType string `xml:"powerOffType,omitempty"` + SuspendType string `xml:"suspendType,omitempty"` + ResetType string `xml:"resetType,omitempty"` + DefaultPowerOffType string `xml:"defaultPowerOffType,omitempty"` + DefaultSuspendType string `xml:"defaultSuspendType,omitempty"` + DefaultResetType string `xml:"defaultResetType,omitempty"` + StandbyAction string `xml:"standbyAction,omitempty"` +} + +func init() { + t["VirtualMachineDefaultPowerOpInfo"] = reflect.TypeOf((*VirtualMachineDefaultPowerOpInfo)(nil)).Elem() +} + +type VirtualMachineDefaultProfileSpec struct { + VirtualMachineProfileSpec +} + +func init() { + t["VirtualMachineDefaultProfileSpec"] = reflect.TypeOf((*VirtualMachineDefaultProfileSpec)(nil)).Elem() +} + +type VirtualMachineDefinedProfileSpec struct { + VirtualMachineProfileSpec + + ProfileId string `xml:"profileId"` + ProfileData *VirtualMachineProfileRawData `xml:"profileData,omitempty"` +} + +func init() { + t["VirtualMachineDefinedProfileSpec"] = reflect.TypeOf((*VirtualMachineDefinedProfileSpec)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfo struct { + DynamicData + + RuntimeState BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState `xml:"runtimeState,typeattr"` + Key int `xml:"key"` +} + +func init() { + t["VirtualMachineDeviceRuntimeInfo"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfo)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoDeviceRuntimeState struct { + DynamicData +} + +func init() { + t["VirtualMachineDeviceRuntimeInfoDeviceRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoDeviceRuntimeState)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState struct { + VirtualMachineDeviceRuntimeInfoDeviceRuntimeState + + VmDirectPathGen2Active bool `xml:"vmDirectPathGen2Active"` + VmDirectPathGen2InactiveReasonVm []string `xml:"vmDirectPathGen2InactiveReasonVm,omitempty"` + VmDirectPathGen2InactiveReasonOther []string `xml:"vmDirectPathGen2InactiveReasonOther,omitempty"` + VmDirectPathGen2InactiveReasonExtended string `xml:"vmDirectPathGen2InactiveReasonExtended,omitempty"` + ReservationStatus string `xml:"reservationStatus,omitempty"` +} + +func init() { + t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState)(nil)).Elem() +} + +type VirtualMachineDiskDeviceInfo struct { + VirtualMachineTargetInfo + + Capacity int64 `xml:"capacity,omitempty"` + Vm []ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["VirtualMachineDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineDiskDeviceInfo)(nil)).Elem() +} + +type VirtualMachineDisplayTopology struct { + DynamicData + + X int `xml:"x"` + Y int `xml:"y"` + Width int `xml:"width"` + Height int `xml:"height"` +} + +func init() { + t["VirtualMachineDisplayTopology"] = reflect.TypeOf((*VirtualMachineDisplayTopology)(nil)).Elem() +} + +type VirtualMachineEmptyProfileSpec struct { + VirtualMachineProfileSpec +} + +func init() { + t["VirtualMachineEmptyProfileSpec"] = reflect.TypeOf((*VirtualMachineEmptyProfileSpec)(nil)).Elem() +} + +type VirtualMachineFeatureRequirement struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + Value string `xml:"value"` +} + +func init() { + t["VirtualMachineFeatureRequirement"] = reflect.TypeOf((*VirtualMachineFeatureRequirement)(nil)).Elem() +} + +type VirtualMachineFileInfo struct { + DynamicData + + VmPathName string `xml:"vmPathName,omitempty"` + SnapshotDirectory string `xml:"snapshotDirectory,omitempty"` + SuspendDirectory string `xml:"suspendDirectory,omitempty"` + LogDirectory string `xml:"logDirectory,omitempty"` + FtMetadataDirectory string `xml:"ftMetadataDirectory,omitempty"` +} + +func init() { + t["VirtualMachineFileInfo"] = reflect.TypeOf((*VirtualMachineFileInfo)(nil)).Elem() +} + +type VirtualMachineFileLayout struct { + DynamicData + + ConfigFile []string `xml:"configFile,omitempty"` + LogFile []string `xml:"logFile,omitempty"` + Disk []VirtualMachineFileLayoutDiskLayout `xml:"disk,omitempty"` + Snapshot []VirtualMachineFileLayoutSnapshotLayout `xml:"snapshot,omitempty"` + SwapFile string `xml:"swapFile,omitempty"` +} + +func init() { + t["VirtualMachineFileLayout"] = reflect.TypeOf((*VirtualMachineFileLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutDiskLayout struct { + DynamicData + + Key int `xml:"key"` + DiskFile []string `xml:"diskFile"` +} + +func init() { + t["VirtualMachineFileLayoutDiskLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutDiskLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutEx struct { + DynamicData + + File []VirtualMachineFileLayoutExFileInfo `xml:"file,omitempty"` + Disk []VirtualMachineFileLayoutExDiskLayout `xml:"disk,omitempty"` + Snapshot []VirtualMachineFileLayoutExSnapshotLayout `xml:"snapshot,omitempty"` + Timestamp time.Time `xml:"timestamp"` +} + +func init() { + t["VirtualMachineFileLayoutEx"] = reflect.TypeOf((*VirtualMachineFileLayoutEx)(nil)).Elem() +} + +type VirtualMachineFileLayoutExDiskLayout struct { + DynamicData + + Key int `xml:"key"` + Chain []VirtualMachineFileLayoutExDiskUnit `xml:"chain,omitempty"` +} + +func init() { + t["VirtualMachineFileLayoutExDiskLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutExDiskUnit struct { + DynamicData + + FileKey []int `xml:"fileKey"` +} + +func init() { + t["VirtualMachineFileLayoutExDiskUnit"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskUnit)(nil)).Elem() +} + +type VirtualMachineFileLayoutExFileInfo struct { + DynamicData + + Key int `xml:"key"` + Name string `xml:"name"` + Type string `xml:"type"` + Size int64 `xml:"size"` + UniqueSize int64 `xml:"uniqueSize,omitempty"` + BackingObjectId string `xml:"backingObjectId,omitempty"` + Accessible *bool `xml:"accessible"` +} + +func init() { + t["VirtualMachineFileLayoutExFileInfo"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileInfo)(nil)).Elem() +} + +type VirtualMachineFileLayoutExSnapshotLayout struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + DataKey int `xml:"dataKey"` + MemoryKey int `xml:"memoryKey,omitempty"` + Disk []VirtualMachineFileLayoutExDiskLayout `xml:"disk,omitempty"` +} + +func init() { + t["VirtualMachineFileLayoutExSnapshotLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutExSnapshotLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutSnapshotLayout struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + SnapshotFile []string `xml:"snapshotFile"` +} + +func init() { + t["VirtualMachineFileLayoutSnapshotLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutSnapshotLayout)(nil)).Elem() +} + +type VirtualMachineFlagInfo struct { + DynamicData + + DisableAcceleration *bool `xml:"disableAcceleration"` + EnableLogging *bool `xml:"enableLogging"` + UseToe *bool `xml:"useToe"` + RunWithDebugInfo *bool `xml:"runWithDebugInfo"` + MonitorType string `xml:"monitorType,omitempty"` + HtSharing string `xml:"htSharing,omitempty"` + SnapshotDisabled *bool `xml:"snapshotDisabled"` + SnapshotLocked *bool `xml:"snapshotLocked"` + DiskUuidEnabled *bool `xml:"diskUuidEnabled"` + VirtualMmuUsage string `xml:"virtualMmuUsage,omitempty"` + VirtualExecUsage string `xml:"virtualExecUsage,omitempty"` + SnapshotPowerOffBehavior string `xml:"snapshotPowerOffBehavior,omitempty"` + RecordReplayEnabled *bool `xml:"recordReplayEnabled"` + FaultToleranceType string `xml:"faultToleranceType,omitempty"` +} + +func init() { + t["VirtualMachineFlagInfo"] = reflect.TypeOf((*VirtualMachineFlagInfo)(nil)).Elem() +} + +type VirtualMachineFloppyInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineFloppyInfo"] = reflect.TypeOf((*VirtualMachineFloppyInfo)(nil)).Elem() +} + +type VirtualMachineForkConfigInfo struct { + DynamicData + + ParentEnabled *bool `xml:"parentEnabled"` + ChildForkGroupId string `xml:"childForkGroupId,omitempty"` + ChildType string `xml:"childType,omitempty"` +} + +func init() { + t["VirtualMachineForkConfigInfo"] = reflect.TypeOf((*VirtualMachineForkConfigInfo)(nil)).Elem() +} + +type VirtualMachineGuestSummary struct { + DynamicData + + GuestId string `xml:"guestId,omitempty"` + GuestFullName string `xml:"guestFullName,omitempty"` + ToolsStatus VirtualMachineToolsStatus `xml:"toolsStatus,omitempty"` + ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty"` + ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty"` + ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty"` + HostName string `xml:"hostName,omitempty"` + IpAddress string `xml:"ipAddress,omitempty"` +} + +func init() { + t["VirtualMachineGuestSummary"] = reflect.TypeOf((*VirtualMachineGuestSummary)(nil)).Elem() +} + +type VirtualMachineIdeDiskDeviceInfo struct { + VirtualMachineDiskDeviceInfo + + PartitionTable []VirtualMachineIdeDiskDevicePartitionInfo `xml:"partitionTable,omitempty"` +} + +func init() { + t["VirtualMachineIdeDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineIdeDiskDeviceInfo)(nil)).Elem() +} + +type VirtualMachineIdeDiskDevicePartitionInfo struct { + DynamicData + + Id int `xml:"id"` + Capacity int `xml:"capacity"` +} + +func init() { + t["VirtualMachineIdeDiskDevicePartitionInfo"] = reflect.TypeOf((*VirtualMachineIdeDiskDevicePartitionInfo)(nil)).Elem() +} + +type VirtualMachineImportSpec struct { + ImportSpec + + ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` + ResPoolEntity *ManagedObjectReference `xml:"resPoolEntity,omitempty"` +} + +func init() { + t["VirtualMachineImportSpec"] = reflect.TypeOf((*VirtualMachineImportSpec)(nil)).Elem() +} + +type VirtualMachineLegacyNetworkSwitchInfo struct { + DynamicData + + Name string `xml:"name"` +} + +func init() { + t["VirtualMachineLegacyNetworkSwitchInfo"] = reflect.TypeOf((*VirtualMachineLegacyNetworkSwitchInfo)(nil)).Elem() +} + +type VirtualMachineMemoryReservationInfo struct { + DynamicData + + VirtualMachineMin int64 `xml:"virtualMachineMin"` + VirtualMachineMax int64 `xml:"virtualMachineMax"` + VirtualMachineReserved int64 `xml:"virtualMachineReserved"` + AllocationPolicy string `xml:"allocationPolicy"` +} + +func init() { + t["VirtualMachineMemoryReservationInfo"] = reflect.TypeOf((*VirtualMachineMemoryReservationInfo)(nil)).Elem() +} + +type VirtualMachineMemoryReservationSpec struct { + DynamicData + + VirtualMachineReserved int64 `xml:"virtualMachineReserved,omitempty"` + AllocationPolicy string `xml:"allocationPolicy,omitempty"` +} + +func init() { + t["VirtualMachineMemoryReservationSpec"] = reflect.TypeOf((*VirtualMachineMemoryReservationSpec)(nil)).Elem() +} + +type VirtualMachineMessage struct { + DynamicData + + Id string `xml:"id"` + Argument []AnyType `xml:"argument,omitempty,typeattr"` + Text string `xml:"text,omitempty"` +} + +func init() { + t["VirtualMachineMessage"] = reflect.TypeOf((*VirtualMachineMessage)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadata struct { + DynamicData + + VmId string `xml:"vmId"` + Metadata string `xml:"metadata,omitempty"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadata"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadata)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataInput struct { + DynamicData + + Operation string `xml:"operation"` + VmMetadata VirtualMachineMetadataManagerVmMetadata `xml:"vmMetadata"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadataInput"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataInput)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataOwner struct { + DynamicData + + Name string `xml:"name"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadataOwner"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwner)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataResult struct { + DynamicData + + VmMetadata VirtualMachineMetadataManagerVmMetadata `xml:"vmMetadata"` + Error *LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadataResult"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem() +} + +type VirtualMachineMksTicket struct { + DynamicData + + Ticket string `xml:"ticket"` + CfgFile string `xml:"cfgFile"` + Host string `xml:"host,omitempty"` + Port int `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["VirtualMachineMksTicket"] = reflect.TypeOf((*VirtualMachineMksTicket)(nil)).Elem() +} + +type VirtualMachineNetworkInfo struct { + VirtualMachineTargetInfo + + Network BaseNetworkSummary `xml:"network,typeattr"` +} + +func init() { + t["VirtualMachineNetworkInfo"] = reflect.TypeOf((*VirtualMachineNetworkInfo)(nil)).Elem() +} + +type VirtualMachineNetworkShaperInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + PeakBps int64 `xml:"peakBps,omitempty"` + AverageBps int64 `xml:"averageBps,omitempty"` + BurstSize int64 `xml:"burstSize,omitempty"` +} + +func init() { + t["VirtualMachineNetworkShaperInfo"] = reflect.TypeOf((*VirtualMachineNetworkShaperInfo)(nil)).Elem() +} + +type VirtualMachineParallelInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineParallelInfo"] = reflect.TypeOf((*VirtualMachineParallelInfo)(nil)).Elem() +} + +type VirtualMachinePciPassthroughInfo struct { + VirtualMachineTargetInfo + + PciDevice HostPciDevice `xml:"pciDevice"` + SystemId string `xml:"systemId"` +} + +func init() { + t["VirtualMachinePciPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciPassthroughInfo)(nil)).Elem() +} + +type VirtualMachinePciSharedGpuPassthroughInfo struct { + VirtualMachineTargetInfo + + Vgpu string `xml:"vgpu"` +} + +func init() { + t["VirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() +} + +type VirtualMachineProfileRawData struct { + DynamicData + + ExtensionKey string `xml:"extensionKey"` + ObjectData string `xml:"objectData,omitempty"` +} + +func init() { + t["VirtualMachineProfileRawData"] = reflect.TypeOf((*VirtualMachineProfileRawData)(nil)).Elem() +} + +type VirtualMachineProfileSpec struct { + DynamicData +} + +func init() { + t["VirtualMachineProfileSpec"] = reflect.TypeOf((*VirtualMachineProfileSpec)(nil)).Elem() +} + +type VirtualMachineQuestionInfo struct { + DynamicData + + Id string `xml:"id"` + Text string `xml:"text"` + Choice ChoiceOption `xml:"choice"` + Message []VirtualMachineMessage `xml:"message,omitempty"` +} + +func init() { + t["VirtualMachineQuestionInfo"] = reflect.TypeOf((*VirtualMachineQuestionInfo)(nil)).Elem() +} + +type VirtualMachineQuickStats struct { + DynamicData + + OverallCpuUsage int `xml:"overallCpuUsage,omitempty"` + OverallCpuDemand int `xml:"overallCpuDemand,omitempty"` + GuestMemoryUsage int `xml:"guestMemoryUsage,omitempty"` + HostMemoryUsage int `xml:"hostMemoryUsage,omitempty"` + GuestHeartbeatStatus ManagedEntityStatus `xml:"guestHeartbeatStatus"` + DistributedCpuEntitlement int `xml:"distributedCpuEntitlement,omitempty"` + DistributedMemoryEntitlement int `xml:"distributedMemoryEntitlement,omitempty"` + StaticCpuEntitlement int `xml:"staticCpuEntitlement,omitempty"` + StaticMemoryEntitlement int `xml:"staticMemoryEntitlement,omitempty"` + PrivateMemory int `xml:"privateMemory,omitempty"` + SharedMemory int `xml:"sharedMemory,omitempty"` + SwappedMemory int `xml:"swappedMemory,omitempty"` + BalloonedMemory int `xml:"balloonedMemory,omitempty"` + ConsumedOverheadMemory int `xml:"consumedOverheadMemory,omitempty"` + FtLogBandwidth int `xml:"ftLogBandwidth,omitempty"` + FtSecondaryLatency int `xml:"ftSecondaryLatency,omitempty"` + FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty"` + CompressedMemory int64 `xml:"compressedMemory,omitempty"` + UptimeSeconds int `xml:"uptimeSeconds,omitempty"` + SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty"` +} + +func init() { + t["VirtualMachineQuickStats"] = reflect.TypeOf((*VirtualMachineQuickStats)(nil)).Elem() +} + +type VirtualMachineRelocateSpec struct { + DynamicData + + Service *ServiceLocator `xml:"service,omitempty"` + Folder *ManagedObjectReference `xml:"folder,omitempty"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Disk []VirtualMachineRelocateSpecDiskLocator `xml:"disk,omitempty"` + Transform VirtualMachineRelocateTransformation `xml:"transform,omitempty"` + DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineRelocateSpec"] = reflect.TypeOf((*VirtualMachineRelocateSpec)(nil)).Elem() +} + +type VirtualMachineRelocateSpecDiskLocator struct { + DynamicData + + DiskId int `xml:"diskId"` + Datastore ManagedObjectReference `xml:"datastore"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineRelocateSpecDiskLocator"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocator)(nil)).Elem() +} + +type VirtualMachineRuntimeInfo struct { + DynamicData + + Device []VirtualMachineDeviceRuntimeInfo `xml:"device,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + ConnectionState VirtualMachineConnectionState `xml:"connectionState"` + PowerState VirtualMachinePowerState `xml:"powerState"` + FaultToleranceState VirtualMachineFaultToleranceState `xml:"faultToleranceState,omitempty"` + DasVmProtection *VirtualMachineRuntimeInfoDasProtectionState `xml:"dasVmProtection,omitempty"` + ToolsInstallerMounted bool `xml:"toolsInstallerMounted"` + SuspendTime *time.Time `xml:"suspendTime"` + BootTime *time.Time `xml:"bootTime"` + SuspendInterval int64 `xml:"suspendInterval,omitempty"` + Question *VirtualMachineQuestionInfo `xml:"question,omitempty"` + MemoryOverhead int64 `xml:"memoryOverhead,omitempty"` + MaxCpuUsage int `xml:"maxCpuUsage,omitempty"` + MaxMemoryUsage int `xml:"maxMemoryUsage,omitempty"` + NumMksConnections int `xml:"numMksConnections"` + RecordReplayState VirtualMachineRecordReplayState `xml:"recordReplayState,omitempty"` + CleanPowerOff *bool `xml:"cleanPowerOff"` + NeedSecondaryReason string `xml:"needSecondaryReason,omitempty"` + OnlineStandby *bool `xml:"onlineStandby"` + MinRequiredEVCModeKey string `xml:"minRequiredEVCModeKey,omitempty"` + ConsolidationNeeded *bool `xml:"consolidationNeeded"` + OfflineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"offlineFeatureRequirement,omitempty"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty"` + VFlashCacheAllocation int64 `xml:"vFlashCacheAllocation,omitempty"` + Paused *bool `xml:"paused"` + SnapshotInBackground *bool `xml:"snapshotInBackground"` + QuiescedForkParent *bool `xml:"quiescedForkParent"` +} + +func init() { + t["VirtualMachineRuntimeInfo"] = reflect.TypeOf((*VirtualMachineRuntimeInfo)(nil)).Elem() +} + +type VirtualMachineRuntimeInfoDasProtectionState struct { + DynamicData + + DasProtected bool `xml:"dasProtected"` +} + +func init() { + t["VirtualMachineRuntimeInfoDasProtectionState"] = reflect.TypeOf((*VirtualMachineRuntimeInfoDasProtectionState)(nil)).Elem() +} + +type VirtualMachineScsiDiskDeviceInfo struct { + VirtualMachineDiskDeviceInfo + + Disk *HostScsiDisk `xml:"disk,omitempty"` + TransportHint string `xml:"transportHint,omitempty"` + LunNumber int `xml:"lunNumber,omitempty"` +} + +func init() { + t["VirtualMachineScsiDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineScsiDiskDeviceInfo)(nil)).Elem() +} + +type VirtualMachineScsiPassthroughInfo struct { + VirtualMachineTargetInfo + + ScsiClass string `xml:"scsiClass"` + Vendor string `xml:"vendor"` + PhysicalUnitNumber int `xml:"physicalUnitNumber"` +} + +func init() { + t["VirtualMachineScsiPassthroughInfo"] = reflect.TypeOf((*VirtualMachineScsiPassthroughInfo)(nil)).Elem() +} + +type VirtualMachineSerialInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineSerialInfo"] = reflect.TypeOf((*VirtualMachineSerialInfo)(nil)).Elem() +} + +type VirtualMachineSnapshotInfo struct { + DynamicData + + CurrentSnapshot *ManagedObjectReference `xml:"currentSnapshot,omitempty"` + RootSnapshotList []VirtualMachineSnapshotTree `xml:"rootSnapshotList"` +} + +func init() { + t["VirtualMachineSnapshotInfo"] = reflect.TypeOf((*VirtualMachineSnapshotInfo)(nil)).Elem() +} + +type VirtualMachineSnapshotTree struct { + DynamicData + + Snapshot ManagedObjectReference `xml:"snapshot"` + Vm ManagedObjectReference `xml:"vm"` + Name string `xml:"name"` + Description string `xml:"description"` + Id int `xml:"id,omitempty"` + CreateTime time.Time `xml:"createTime"` + State VirtualMachinePowerState `xml:"state"` + Quiesced bool `xml:"quiesced"` + BackupManifest string `xml:"backupManifest,omitempty"` + ChildSnapshotList []VirtualMachineSnapshotTree `xml:"childSnapshotList,omitempty"` + ReplaySupported *bool `xml:"replaySupported"` +} + +func init() { + t["VirtualMachineSnapshotTree"] = reflect.TypeOf((*VirtualMachineSnapshotTree)(nil)).Elem() +} + +type VirtualMachineSoundInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineSoundInfo"] = reflect.TypeOf((*VirtualMachineSoundInfo)(nil)).Elem() +} + +type VirtualMachineSriovInfo struct { + VirtualMachinePciPassthroughInfo + + VirtualFunction bool `xml:"virtualFunction"` + Pnic string `xml:"pnic,omitempty"` +} + +func init() { + t["VirtualMachineSriovInfo"] = reflect.TypeOf((*VirtualMachineSriovInfo)(nil)).Elem() +} + +type VirtualMachineStorageInfo struct { + DynamicData + + PerDatastoreUsage []VirtualMachineUsageOnDatastore `xml:"perDatastoreUsage,omitempty"` + Timestamp time.Time `xml:"timestamp"` +} + +func init() { + t["VirtualMachineStorageInfo"] = reflect.TypeOf((*VirtualMachineStorageInfo)(nil)).Elem() +} + +type VirtualMachineStorageSummary struct { + DynamicData + + Committed int64 `xml:"committed"` + Uncommitted int64 `xml:"uncommitted"` + Unshared int64 `xml:"unshared"` + Timestamp time.Time `xml:"timestamp"` +} + +func init() { + t["VirtualMachineStorageSummary"] = reflect.TypeOf((*VirtualMachineStorageSummary)(nil)).Elem() +} + +type VirtualMachineSummary struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Runtime VirtualMachineRuntimeInfo `xml:"runtime"` + Guest *VirtualMachineGuestSummary `xml:"guest,omitempty"` + Config VirtualMachineConfigSummary `xml:"config"` + Storage *VirtualMachineStorageSummary `xml:"storage,omitempty"` + QuickStats VirtualMachineQuickStats `xml:"quickStats"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineSummary"] = reflect.TypeOf((*VirtualMachineSummary)(nil)).Elem() +} + +type VirtualMachineTargetInfo struct { + DynamicData + + Name string `xml:"name"` + ConfigurationTag []string `xml:"configurationTag,omitempty"` +} + +func init() { + t["VirtualMachineTargetInfo"] = reflect.TypeOf((*VirtualMachineTargetInfo)(nil)).Elem() +} + +type VirtualMachineTicket struct { + DynamicData + + Ticket string `xml:"ticket"` + CfgFile string `xml:"cfgFile"` + Host string `xml:"host,omitempty"` + Port int `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["VirtualMachineTicket"] = reflect.TypeOf((*VirtualMachineTicket)(nil)).Elem() +} + +type VirtualMachineUsageOnDatastore struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + Committed int64 `xml:"committed"` + Uncommitted int64 `xml:"uncommitted"` + Unshared int64 `xml:"unshared"` +} + +func init() { + t["VirtualMachineUsageOnDatastore"] = reflect.TypeOf((*VirtualMachineUsageOnDatastore)(nil)).Elem() +} + +type VirtualMachineUsbInfo struct { + VirtualMachineTargetInfo + + Description string `xml:"description"` + Vendor int `xml:"vendor"` + Product int `xml:"product"` + PhysicalPath string `xml:"physicalPath"` + Family []string `xml:"family,omitempty"` + Speed []string `xml:"speed,omitempty"` + Summary *VirtualMachineSummary `xml:"summary,omitempty"` +} + +func init() { + t["VirtualMachineUsbInfo"] = reflect.TypeOf((*VirtualMachineUsbInfo)(nil)).Elem() +} + +type VirtualMachineVFlashModuleInfo struct { + VirtualMachineTargetInfo + + VFlashModule HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:"vFlashModule"` +} + +func init() { + t["VirtualMachineVFlashModuleInfo"] = reflect.TypeOf((*VirtualMachineVFlashModuleInfo)(nil)).Elem() +} + +type VirtualMachineVMCIDevice struct { + VirtualDevice + + Id int64 `xml:"id,omitempty"` + AllowUnrestrictedCommunication *bool `xml:"allowUnrestrictedCommunication"` + FilterEnable *bool `xml:"filterEnable"` + FilterInfo *VirtualMachineVMCIDeviceFilterInfo `xml:"filterInfo,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDevice"] = reflect.TypeOf((*VirtualMachineVMCIDevice)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceFilterInfo struct { + DynamicData + + Filters []VirtualMachineVMCIDeviceFilterSpec `xml:"filters,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDeviceFilterInfo"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterInfo)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceFilterSpec struct { + DynamicData + + Rank int64 `xml:"rank"` + Action string `xml:"action"` + Protocol string `xml:"protocol"` + Direction string `xml:"direction"` + LowerDstPortBoundary int64 `xml:"lowerDstPortBoundary,omitempty"` + UpperDstPortBoundary int64 `xml:"upperDstPortBoundary,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDeviceFilterSpec"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterSpec)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceOption struct { + VirtualDeviceOption + + AllowUnrestrictedCommunication BoolOption `xml:"allowUnrestrictedCommunication"` + FilterSpecOption *VirtualMachineVMCIDeviceOptionFilterSpecOption `xml:"filterSpecOption,omitempty"` + FilterSupported *BoolOption `xml:"filterSupported,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDeviceOption"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOption)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceOptionFilterSpecOption struct { + DynamicData + + Action ChoiceOption `xml:"action"` + Protocol ChoiceOption `xml:"protocol"` + Direction ChoiceOption `xml:"direction"` + LowerDstPortBoundary LongOption `xml:"lowerDstPortBoundary"` + UpperDstPortBoundary LongOption `xml:"upperDstPortBoundary"` +} + +func init() { + t["VirtualMachineVMCIDeviceOptionFilterSpecOption"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOptionFilterSpecOption)(nil)).Elem() +} + +type VirtualMachineVMIROM struct { + VirtualDevice +} + +func init() { + t["VirtualMachineVMIROM"] = reflect.TypeOf((*VirtualMachineVMIROM)(nil)).Elem() +} + +type VirtualMachineVideoCard struct { + VirtualDevice + + VideoRamSizeInKB int64 `xml:"videoRamSizeInKB,omitempty"` + NumDisplays int `xml:"numDisplays,omitempty"` + UseAutoDetect *bool `xml:"useAutoDetect"` + Enable3DSupport *bool `xml:"enable3DSupport"` + Use3dRenderer string `xml:"use3dRenderer,omitempty"` + GraphicsMemorySizeInKB int64 `xml:"graphicsMemorySizeInKB,omitempty"` +} + +func init() { + t["VirtualMachineVideoCard"] = reflect.TypeOf((*VirtualMachineVideoCard)(nil)).Elem() +} + +type VirtualMachineWipeResult struct { + DynamicData + + DiskId int `xml:"diskId"` + ShrinkableDiskSpace int64 `xml:"shrinkableDiskSpace"` +} + +func init() { + t["VirtualMachineWipeResult"] = reflect.TypeOf((*VirtualMachineWipeResult)(nil)).Elem() +} + +type VirtualNicManagerNetConfig struct { + DynamicData + + NicType string `xml:"nicType"` + MultiSelectAllowed bool `xml:"multiSelectAllowed"` + CandidateVnic []HostVirtualNic `xml:"candidateVnic,omitempty"` + SelectedVnic []string `xml:"selectedVnic,omitempty"` +} + +func init() { + t["VirtualNicManagerNetConfig"] = reflect.TypeOf((*VirtualNicManagerNetConfig)(nil)).Elem() +} + +type VirtualPCIController struct { + VirtualController +} + +func init() { + t["VirtualPCIController"] = reflect.TypeOf((*VirtualPCIController)(nil)).Elem() +} + +type VirtualPCIControllerOption struct { + VirtualControllerOption + + NumSCSIControllers IntOption `xml:"numSCSIControllers"` + NumEthernetCards IntOption `xml:"numEthernetCards"` + NumVideoCards IntOption `xml:"numVideoCards"` + NumSoundCards IntOption `xml:"numSoundCards"` + NumVmiRoms IntOption `xml:"numVmiRoms"` + NumVmciDevices *IntOption `xml:"numVmciDevices,omitempty"` + NumPCIPassthroughDevices *IntOption `xml:"numPCIPassthroughDevices,omitempty"` + NumSasSCSIControllers *IntOption `xml:"numSasSCSIControllers,omitempty"` + NumVmxnet3EthernetCards *IntOption `xml:"numVmxnet3EthernetCards,omitempty"` + NumParaVirtualSCSIControllers *IntOption `xml:"numParaVirtualSCSIControllers,omitempty"` + NumSATAControllers *IntOption `xml:"numSATAControllers,omitempty"` +} + +func init() { + t["VirtualPCIControllerOption"] = reflect.TypeOf((*VirtualPCIControllerOption)(nil)).Elem() +} + +type VirtualPCIPassthrough struct { + VirtualDevice +} + +func init() { + t["VirtualPCIPassthrough"] = reflect.TypeOf((*VirtualPCIPassthrough)(nil)).Elem() +} + +type VirtualPCIPassthroughDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Id string `xml:"id"` + DeviceId string `xml:"deviceId"` + SystemId string `xml:"systemId"` + VendorId int16 `xml:"vendorId"` +} + +func init() { + t["VirtualPCIPassthroughDeviceBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualPCIPassthroughDeviceBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingOption)(nil)).Elem() +} + +type VirtualPCIPassthroughOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualPCIPassthroughOption"] = reflect.TypeOf((*VirtualPCIPassthroughOption)(nil)).Elem() +} + +type VirtualPCIPassthroughPluginBackingInfo struct { + VirtualDeviceBackingInfo +} + +func init() { + t["VirtualPCIPassthroughPluginBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughPluginBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualPCIPassthroughPluginBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingOption)(nil)).Elem() +} + +type VirtualPCIPassthroughVmiopBackingInfo struct { + VirtualPCIPassthroughPluginBackingInfo + + Vgpu string `xml:"vgpu,omitempty"` +} + +func init() { + t["VirtualPCIPassthroughVmiopBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughVmiopBackingOption struct { + VirtualPCIPassthroughPluginBackingOption + + Vgpu StringOption `xml:"vgpu"` + MaxInstances int `xml:"maxInstances"` +} + +func init() { + t["VirtualPCIPassthroughVmiopBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingOption)(nil)).Elem() +} + +type VirtualPCNet32 struct { + VirtualEthernetCard +} + +func init() { + t["VirtualPCNet32"] = reflect.TypeOf((*VirtualPCNet32)(nil)).Elem() +} + +type VirtualPCNet32Option struct { + VirtualEthernetCardOption + + SupportsMorphing bool `xml:"supportsMorphing"` +} + +func init() { + t["VirtualPCNet32Option"] = reflect.TypeOf((*VirtualPCNet32Option)(nil)).Elem() +} + +type VirtualPS2Controller struct { + VirtualController +} + +func init() { + t["VirtualPS2Controller"] = reflect.TypeOf((*VirtualPS2Controller)(nil)).Elem() +} + +type VirtualPS2ControllerOption struct { + VirtualControllerOption + + NumKeyboards IntOption `xml:"numKeyboards"` + NumPointingDevices IntOption `xml:"numPointingDevices"` +} + +func init() { + t["VirtualPS2ControllerOption"] = reflect.TypeOf((*VirtualPS2ControllerOption)(nil)).Elem() +} + +type VirtualParallelPort struct { + VirtualDevice +} + +func init() { + t["VirtualParallelPort"] = reflect.TypeOf((*VirtualParallelPort)(nil)).Elem() +} + +type VirtualParallelPortDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualParallelPortDeviceBackingInfo"] = reflect.TypeOf((*VirtualParallelPortDeviceBackingInfo)(nil)).Elem() +} + +type VirtualParallelPortDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualParallelPortDeviceBackingOption"] = reflect.TypeOf((*VirtualParallelPortDeviceBackingOption)(nil)).Elem() +} + +type VirtualParallelPortFileBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualParallelPortFileBackingInfo"] = reflect.TypeOf((*VirtualParallelPortFileBackingInfo)(nil)).Elem() +} + +type VirtualParallelPortFileBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualParallelPortFileBackingOption"] = reflect.TypeOf((*VirtualParallelPortFileBackingOption)(nil)).Elem() +} + +type VirtualParallelPortOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualParallelPortOption"] = reflect.TypeOf((*VirtualParallelPortOption)(nil)).Elem() +} + +type VirtualPointingDevice struct { + VirtualDevice +} + +func init() { + t["VirtualPointingDevice"] = reflect.TypeOf((*VirtualPointingDevice)(nil)).Elem() +} + +type VirtualPointingDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption + + HostPointingDevice ChoiceOption `xml:"hostPointingDevice"` +} + +func init() { + t["VirtualPointingDeviceBackingOption"] = reflect.TypeOf((*VirtualPointingDeviceBackingOption)(nil)).Elem() +} + +type VirtualPointingDeviceDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + HostPointingDevice string `xml:"hostPointingDevice"` +} + +func init() { + t["VirtualPointingDeviceDeviceBackingInfo"] = reflect.TypeOf((*VirtualPointingDeviceDeviceBackingInfo)(nil)).Elem() +} + +type VirtualPointingDeviceOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualPointingDeviceOption"] = reflect.TypeOf((*VirtualPointingDeviceOption)(nil)).Elem() +} + +type VirtualResourcePoolSpec struct { + DynamicData + + VrpId string `xml:"vrpId,omitempty"` + VrpName string `xml:"vrpName,omitempty"` + Description string `xml:"description,omitempty"` + CpuAllocation VrpResourceAllocationInfo `xml:"cpuAllocation"` + MemoryAllocation VrpResourceAllocationInfo `xml:"memoryAllocation"` + RpList []ManagedObjectReference `xml:"rpList,omitempty"` + HubList []ManagedObjectReference `xml:"hubList,omitempty"` + RootVRP *bool `xml:"rootVRP"` + StaticVRP *bool `xml:"staticVRP"` + ChangeVersion int64 `xml:"changeVersion,omitempty"` +} + +func init() { + t["VirtualResourcePoolSpec"] = reflect.TypeOf((*VirtualResourcePoolSpec)(nil)).Elem() +} + +type VirtualResourcePoolUsage struct { + DynamicData + + VrpId string `xml:"vrpId"` + CpuReservationMhz int64 `xml:"cpuReservationMhz"` + MemReservationMB int64 `xml:"memReservationMB"` + CpuReservationUsedMhz int64 `xml:"cpuReservationUsedMhz"` + MemReservationUsedMB int64 `xml:"memReservationUsedMB"` +} + +func init() { + t["VirtualResourcePoolUsage"] = reflect.TypeOf((*VirtualResourcePoolUsage)(nil)).Elem() +} + +type VirtualSATAController struct { + VirtualController +} + +func init() { + t["VirtualSATAController"] = reflect.TypeOf((*VirtualSATAController)(nil)).Elem() +} + +type VirtualSATAControllerOption struct { + VirtualControllerOption + + NumSATADisks IntOption `xml:"numSATADisks"` + NumSATACdroms IntOption `xml:"numSATACdroms"` +} + +func init() { + t["VirtualSATAControllerOption"] = reflect.TypeOf((*VirtualSATAControllerOption)(nil)).Elem() +} + +type VirtualSCSIController struct { + VirtualController + + HotAddRemove *bool `xml:"hotAddRemove"` + SharedBus VirtualSCSISharing `xml:"sharedBus"` + ScsiCtlrUnitNumber int `xml:"scsiCtlrUnitNumber,omitempty"` +} + +func init() { + t["VirtualSCSIController"] = reflect.TypeOf((*VirtualSCSIController)(nil)).Elem() +} + +type VirtualSCSIControllerOption struct { + VirtualControllerOption + + NumSCSIDisks IntOption `xml:"numSCSIDisks"` + NumSCSICdroms IntOption `xml:"numSCSICdroms"` + NumSCSIPassthrough IntOption `xml:"numSCSIPassthrough"` + Sharing []VirtualSCSISharing `xml:"sharing"` + DefaultSharedIndex int `xml:"defaultSharedIndex"` + HotAddRemove BoolOption `xml:"hotAddRemove"` + ScsiCtlrUnitNumber int `xml:"scsiCtlrUnitNumber"` +} + +func init() { + t["VirtualSCSIControllerOption"] = reflect.TypeOf((*VirtualSCSIControllerOption)(nil)).Elem() +} + +type VirtualSCSIPassthrough struct { + VirtualDevice +} + +func init() { + t["VirtualSCSIPassthrough"] = reflect.TypeOf((*VirtualSCSIPassthrough)(nil)).Elem() +} + +type VirtualSCSIPassthroughDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualSCSIPassthroughDeviceBackingInfo"] = reflect.TypeOf((*VirtualSCSIPassthroughDeviceBackingInfo)(nil)).Elem() +} + +type VirtualSCSIPassthroughDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualSCSIPassthroughDeviceBackingOption"] = reflect.TypeOf((*VirtualSCSIPassthroughDeviceBackingOption)(nil)).Elem() +} + +type VirtualSCSIPassthroughOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualSCSIPassthroughOption"] = reflect.TypeOf((*VirtualSCSIPassthroughOption)(nil)).Elem() +} + +type VirtualSIOController struct { + VirtualController +} + +func init() { + t["VirtualSIOController"] = reflect.TypeOf((*VirtualSIOController)(nil)).Elem() +} + +type VirtualSIOControllerOption struct { + VirtualControllerOption + + NumFloppyDrives IntOption `xml:"numFloppyDrives"` + NumSerialPorts IntOption `xml:"numSerialPorts"` + NumParallelPorts IntOption `xml:"numParallelPorts"` +} + +func init() { + t["VirtualSIOControllerOption"] = reflect.TypeOf((*VirtualSIOControllerOption)(nil)).Elem() +} + +type VirtualSerialPort struct { + VirtualDevice + + YieldOnPoll bool `xml:"yieldOnPoll"` +} + +func init() { + t["VirtualSerialPort"] = reflect.TypeOf((*VirtualSerialPort)(nil)).Elem() +} + +type VirtualSerialPortDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualSerialPortDeviceBackingInfo"] = reflect.TypeOf((*VirtualSerialPortDeviceBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualSerialPortDeviceBackingOption"] = reflect.TypeOf((*VirtualSerialPortDeviceBackingOption)(nil)).Elem() +} + +type VirtualSerialPortFileBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualSerialPortFileBackingInfo"] = reflect.TypeOf((*VirtualSerialPortFileBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortFileBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualSerialPortFileBackingOption"] = reflect.TypeOf((*VirtualSerialPortFileBackingOption)(nil)).Elem() +} + +type VirtualSerialPortOption struct { + VirtualDeviceOption + + YieldOnPoll BoolOption `xml:"yieldOnPoll"` +} + +func init() { + t["VirtualSerialPortOption"] = reflect.TypeOf((*VirtualSerialPortOption)(nil)).Elem() +} + +type VirtualSerialPortPipeBackingInfo struct { + VirtualDevicePipeBackingInfo + + Endpoint string `xml:"endpoint"` + NoRxLoss *bool `xml:"noRxLoss"` +} + +func init() { + t["VirtualSerialPortPipeBackingInfo"] = reflect.TypeOf((*VirtualSerialPortPipeBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortPipeBackingOption struct { + VirtualDevicePipeBackingOption + + Endpoint ChoiceOption `xml:"endpoint"` + NoRxLoss BoolOption `xml:"noRxLoss"` +} + +func init() { + t["VirtualSerialPortPipeBackingOption"] = reflect.TypeOf((*VirtualSerialPortPipeBackingOption)(nil)).Elem() +} + +type VirtualSerialPortThinPrintBackingInfo struct { + VirtualDeviceBackingInfo +} + +func init() { + t["VirtualSerialPortThinPrintBackingInfo"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortThinPrintBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualSerialPortThinPrintBackingOption"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingOption)(nil)).Elem() +} + +type VirtualSerialPortURIBackingInfo struct { + VirtualDeviceURIBackingInfo +} + +func init() { + t["VirtualSerialPortURIBackingInfo"] = reflect.TypeOf((*VirtualSerialPortURIBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortURIBackingOption struct { + VirtualDeviceURIBackingOption +} + +func init() { + t["VirtualSerialPortURIBackingOption"] = reflect.TypeOf((*VirtualSerialPortURIBackingOption)(nil)).Elem() +} + +type VirtualSoundBlaster16 struct { + VirtualSoundCard +} + +func init() { + t["VirtualSoundBlaster16"] = reflect.TypeOf((*VirtualSoundBlaster16)(nil)).Elem() +} + +type VirtualSoundBlaster16Option struct { + VirtualSoundCardOption +} + +func init() { + t["VirtualSoundBlaster16Option"] = reflect.TypeOf((*VirtualSoundBlaster16Option)(nil)).Elem() +} + +type VirtualSoundCard struct { + VirtualDevice +} + +func init() { + t["VirtualSoundCard"] = reflect.TypeOf((*VirtualSoundCard)(nil)).Elem() +} + +type VirtualSoundCardDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualSoundCardDeviceBackingInfo"] = reflect.TypeOf((*VirtualSoundCardDeviceBackingInfo)(nil)).Elem() +} + +type VirtualSoundCardDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualSoundCardDeviceBackingOption"] = reflect.TypeOf((*VirtualSoundCardDeviceBackingOption)(nil)).Elem() +} + +type VirtualSoundCardOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualSoundCardOption"] = reflect.TypeOf((*VirtualSoundCardOption)(nil)).Elem() +} + +type VirtualSriovEthernetCard struct { + VirtualEthernetCard + + AllowGuestOSMtuChange *bool `xml:"allowGuestOSMtuChange"` + SriovBacking *VirtualSriovEthernetCardSriovBackingInfo `xml:"sriovBacking,omitempty"` +} + +func init() { + t["VirtualSriovEthernetCard"] = reflect.TypeOf((*VirtualSriovEthernetCard)(nil)).Elem() +} + +type VirtualSriovEthernetCardOption struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualSriovEthernetCardOption"] = reflect.TypeOf((*VirtualSriovEthernetCardOption)(nil)).Elem() +} + +type VirtualSriovEthernetCardSriovBackingInfo struct { + VirtualDeviceBackingInfo + + PhysicalFunctionBacking *VirtualPCIPassthroughDeviceBackingInfo `xml:"physicalFunctionBacking,omitempty"` + VirtualFunctionBacking *VirtualPCIPassthroughDeviceBackingInfo `xml:"virtualFunctionBacking,omitempty"` + VirtualFunctionIndex int `xml:"virtualFunctionIndex,omitempty"` +} + +func init() { + t["VirtualSriovEthernetCardSriovBackingInfo"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingInfo)(nil)).Elem() +} + +type VirtualSriovEthernetCardSriovBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualSriovEthernetCardSriovBackingOption"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingOption)(nil)).Elem() +} + +type VirtualSwitchProfile struct { + ApplyProfile + + Key string `xml:"key"` + Name string `xml:"name"` + Link LinkProfile `xml:"link"` + NumPorts NumPortsProfile `xml:"numPorts"` + NetworkPolicy NetworkPolicyProfile `xml:"networkPolicy"` +} + +func init() { + t["VirtualSwitchProfile"] = reflect.TypeOf((*VirtualSwitchProfile)(nil)).Elem() +} + +type VirtualSwitchSelectionProfile struct { + ApplyProfile +} + +func init() { + t["VirtualSwitchSelectionProfile"] = reflect.TypeOf((*VirtualSwitchSelectionProfile)(nil)).Elem() +} + +type VirtualUSB struct { + VirtualDevice + + Connected bool `xml:"connected"` + Vendor int `xml:"vendor,omitempty"` + Product int `xml:"product,omitempty"` + Family []string `xml:"family,omitempty"` + Speed []string `xml:"speed,omitempty"` +} + +func init() { + t["VirtualUSB"] = reflect.TypeOf((*VirtualUSB)(nil)).Elem() +} + +type VirtualUSBController struct { + VirtualController + + AutoConnectDevices *bool `xml:"autoConnectDevices"` + EhciEnabled *bool `xml:"ehciEnabled"` +} + +func init() { + t["VirtualUSBController"] = reflect.TypeOf((*VirtualUSBController)(nil)).Elem() +} + +type VirtualUSBControllerOption struct { + VirtualControllerOption + + AutoConnectDevices BoolOption `xml:"autoConnectDevices"` + EhciSupported BoolOption `xml:"ehciSupported"` + SupportedSpeeds []string `xml:"supportedSpeeds,omitempty"` +} + +func init() { + t["VirtualUSBControllerOption"] = reflect.TypeOf((*VirtualUSBControllerOption)(nil)).Elem() +} + +type VirtualUSBControllerPciBusSlotInfo struct { + VirtualDevicePciBusSlotInfo + + EhciPciSlotNumber int `xml:"ehciPciSlotNumber,omitempty"` +} + +func init() { + t["VirtualUSBControllerPciBusSlotInfo"] = reflect.TypeOf((*VirtualUSBControllerPciBusSlotInfo)(nil)).Elem() +} + +type VirtualUSBOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualUSBOption"] = reflect.TypeOf((*VirtualUSBOption)(nil)).Elem() +} + +type VirtualUSBRemoteClientBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo + + Hostname string `xml:"hostname"` +} + +func init() { + t["VirtualUSBRemoteClientBackingInfo"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingInfo)(nil)).Elem() +} + +type VirtualUSBRemoteClientBackingOption struct { + VirtualDeviceRemoteDeviceBackingOption +} + +func init() { + t["VirtualUSBRemoteClientBackingOption"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingOption)(nil)).Elem() +} + +type VirtualUSBRemoteHostBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Hostname string `xml:"hostname"` +} + +func init() { + t["VirtualUSBRemoteHostBackingInfo"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingInfo)(nil)).Elem() +} + +type VirtualUSBRemoteHostBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualUSBRemoteHostBackingOption"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingOption)(nil)).Elem() +} + +type VirtualUSBUSBBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualUSBUSBBackingInfo"] = reflect.TypeOf((*VirtualUSBUSBBackingInfo)(nil)).Elem() +} + +type VirtualUSBUSBBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualUSBUSBBackingOption"] = reflect.TypeOf((*VirtualUSBUSBBackingOption)(nil)).Elem() +} + +type VirtualUSBXHCIController struct { + VirtualController + + AutoConnectDevices *bool `xml:"autoConnectDevices"` +} + +func init() { + t["VirtualUSBXHCIController"] = reflect.TypeOf((*VirtualUSBXHCIController)(nil)).Elem() +} + +type VirtualUSBXHCIControllerOption struct { + VirtualControllerOption + + AutoConnectDevices BoolOption `xml:"autoConnectDevices"` + SupportedSpeeds []string `xml:"supportedSpeeds"` +} + +func init() { + t["VirtualUSBXHCIControllerOption"] = reflect.TypeOf((*VirtualUSBXHCIControllerOption)(nil)).Elem() +} + +type VirtualVMIROMOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualVMIROMOption"] = reflect.TypeOf((*VirtualVMIROMOption)(nil)).Elem() +} + +type VirtualVideoCardOption struct { + VirtualDeviceOption + + VideoRamSizeInKB *LongOption `xml:"videoRamSizeInKB,omitempty"` + NumDisplays *IntOption `xml:"numDisplays,omitempty"` + UseAutoDetect *BoolOption `xml:"useAutoDetect,omitempty"` + Support3D *BoolOption `xml:"support3D,omitempty"` + Use3dRendererSupported *BoolOption `xml:"use3dRendererSupported,omitempty"` + GraphicsMemorySizeInKB *LongOption `xml:"graphicsMemorySizeInKB,omitempty"` + GraphicsMemorySizeSupported *BoolOption `xml:"graphicsMemorySizeSupported,omitempty"` +} + +func init() { + t["VirtualVideoCardOption"] = reflect.TypeOf((*VirtualVideoCardOption)(nil)).Elem() +} + +type VirtualVmxnet struct { + VirtualEthernetCard +} + +func init() { + t["VirtualVmxnet"] = reflect.TypeOf((*VirtualVmxnet)(nil)).Elem() +} + +type VirtualVmxnet2 struct { + VirtualVmxnet +} + +func init() { + t["VirtualVmxnet2"] = reflect.TypeOf((*VirtualVmxnet2)(nil)).Elem() +} + +type VirtualVmxnet2Option struct { + VirtualVmxnetOption +} + +func init() { + t["VirtualVmxnet2Option"] = reflect.TypeOf((*VirtualVmxnet2Option)(nil)).Elem() +} + +type VirtualVmxnet3 struct { + VirtualVmxnet +} + +func init() { + t["VirtualVmxnet3"] = reflect.TypeOf((*VirtualVmxnet3)(nil)).Elem() +} + +type VirtualVmxnet3Option struct { + VirtualVmxnetOption +} + +func init() { + t["VirtualVmxnet3Option"] = reflect.TypeOf((*VirtualVmxnet3Option)(nil)).Elem() +} + +type VirtualVmxnetOption struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualVmxnetOption"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem() +} + +type VlanProfile struct { + ApplyProfile +} + +func init() { + t["VlanProfile"] = reflect.TypeOf((*VlanProfile)(nil)).Elem() +} + +type VmAcquiredMksTicketEvent struct { + VmEvent +} + +func init() { + t["VmAcquiredMksTicketEvent"] = reflect.TypeOf((*VmAcquiredMksTicketEvent)(nil)).Elem() +} + +type VmAcquiredTicketEvent struct { + VmEvent + + TicketType string `xml:"ticketType"` +} + +func init() { + t["VmAcquiredTicketEvent"] = reflect.TypeOf((*VmAcquiredTicketEvent)(nil)).Elem() +} + +type VmAlreadyExistsInDatacenter struct { + InvalidFolder + + Host ManagedObjectReference `xml:"host"` + Hostname string `xml:"hostname"` + Vm []ManagedObjectReference `xml:"vm"` +} + +func init() { + t["VmAlreadyExistsInDatacenter"] = reflect.TypeOf((*VmAlreadyExistsInDatacenter)(nil)).Elem() +} + +type VmAlreadyExistsInDatacenterFault VmAlreadyExistsInDatacenter + +func init() { + t["VmAlreadyExistsInDatacenterFault"] = reflect.TypeOf((*VmAlreadyExistsInDatacenterFault)(nil)).Elem() +} + +type VmAutoRenameEvent struct { + VmEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["VmAutoRenameEvent"] = reflect.TypeOf((*VmAutoRenameEvent)(nil)).Elem() +} + +type VmBeingClonedEvent struct { + VmCloneEvent + + DestFolder FolderEventArgument `xml:"destFolder"` + DestName string `xml:"destName"` + DestHost HostEventArgument `xml:"destHost"` +} + +func init() { + t["VmBeingClonedEvent"] = reflect.TypeOf((*VmBeingClonedEvent)(nil)).Elem() +} + +type VmBeingClonedNoFolderEvent struct { + VmCloneEvent + + DestName string `xml:"destName"` + DestHost HostEventArgument `xml:"destHost"` +} + +func init() { + t["VmBeingClonedNoFolderEvent"] = reflect.TypeOf((*VmBeingClonedNoFolderEvent)(nil)).Elem() +} + +type VmBeingCreatedEvent struct { + VmEvent + + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` +} + +func init() { + t["VmBeingCreatedEvent"] = reflect.TypeOf((*VmBeingCreatedEvent)(nil)).Elem() +} + +type VmBeingDeployedEvent struct { + VmEvent + + SrcTemplate VmEventArgument `xml:"srcTemplate"` +} + +func init() { + t["VmBeingDeployedEvent"] = reflect.TypeOf((*VmBeingDeployedEvent)(nil)).Elem() +} + +type VmBeingHotMigratedEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmBeingHotMigratedEvent"] = reflect.TypeOf((*VmBeingHotMigratedEvent)(nil)).Elem() +} + +type VmBeingMigratedEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmBeingMigratedEvent"] = reflect.TypeOf((*VmBeingMigratedEvent)(nil)).Elem() +} + +type VmBeingRelocatedEvent struct { + VmRelocateSpecEvent + + DestHost HostEventArgument `xml:"destHost"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmBeingRelocatedEvent"] = reflect.TypeOf((*VmBeingRelocatedEvent)(nil)).Elem() +} + +type VmCloneEvent struct { + VmEvent +} + +func init() { + t["VmCloneEvent"] = reflect.TypeOf((*VmCloneEvent)(nil)).Elem() +} + +type VmCloneFailedEvent struct { + VmCloneEvent + + DestFolder FolderEventArgument `xml:"destFolder"` + DestName string `xml:"destName"` + DestHost HostEventArgument `xml:"destHost"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmCloneFailedEvent"] = reflect.TypeOf((*VmCloneFailedEvent)(nil)).Elem() +} + +type VmClonedEvent struct { + VmCloneEvent + + SourceVm VmEventArgument `xml:"sourceVm"` +} + +func init() { + t["VmClonedEvent"] = reflect.TypeOf((*VmClonedEvent)(nil)).Elem() +} + +type VmConfigFault struct { + VimFault +} + +func init() { + t["VmConfigFault"] = reflect.TypeOf((*VmConfigFault)(nil)).Elem() +} + +type VmConfigFaultFault BaseVmConfigFault + +func init() { + t["VmConfigFaultFault"] = reflect.TypeOf((*VmConfigFaultFault)(nil)).Elem() +} + +type VmConfigFileInfo struct { + FileInfo + + ConfigVersion int `xml:"configVersion,omitempty"` +} + +func init() { + t["VmConfigFileInfo"] = reflect.TypeOf((*VmConfigFileInfo)(nil)).Elem() +} + +type VmConfigFileQuery struct { + FileQuery + + Filter *VmConfigFileQueryFilter `xml:"filter,omitempty"` + Details *VmConfigFileQueryFlags `xml:"details,omitempty"` +} + +func init() { + t["VmConfigFileQuery"] = reflect.TypeOf((*VmConfigFileQuery)(nil)).Elem() +} + +type VmConfigFileQueryFilter struct { + DynamicData + + MatchConfigVersion []int `xml:"matchConfigVersion,omitempty"` +} + +func init() { + t["VmConfigFileQueryFilter"] = reflect.TypeOf((*VmConfigFileQueryFilter)(nil)).Elem() +} + +type VmConfigFileQueryFlags struct { + DynamicData + + ConfigVersion bool `xml:"configVersion"` +} + +func init() { + t["VmConfigFileQueryFlags"] = reflect.TypeOf((*VmConfigFileQueryFlags)(nil)).Elem() +} + +type VmConfigIncompatibleForFaultTolerance struct { + VmConfigFault + + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["VmConfigIncompatibleForFaultTolerance"] = reflect.TypeOf((*VmConfigIncompatibleForFaultTolerance)(nil)).Elem() +} + +type VmConfigIncompatibleForFaultToleranceFault VmConfigIncompatibleForFaultTolerance + +func init() { + t["VmConfigIncompatibleForFaultToleranceFault"] = reflect.TypeOf((*VmConfigIncompatibleForFaultToleranceFault)(nil)).Elem() +} + +type VmConfigIncompatibleForRecordReplay struct { + VmConfigFault + + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["VmConfigIncompatibleForRecordReplay"] = reflect.TypeOf((*VmConfigIncompatibleForRecordReplay)(nil)).Elem() +} + +type VmConfigIncompatibleForRecordReplayFault VmConfigIncompatibleForRecordReplay + +func init() { + t["VmConfigIncompatibleForRecordReplayFault"] = reflect.TypeOf((*VmConfigIncompatibleForRecordReplayFault)(nil)).Elem() +} + +type VmConfigInfo struct { + DynamicData + + Product []VAppProductInfo `xml:"product,omitempty"` + Property []VAppPropertyInfo `xml:"property,omitempty"` + IpAssignment VAppIPAssignmentInfo `xml:"ipAssignment"` + Eula []string `xml:"eula,omitempty"` + OvfSection []VAppOvfSectionInfo `xml:"ovfSection,omitempty"` + OvfEnvironmentTransport []string `xml:"ovfEnvironmentTransport,omitempty"` + InstallBootRequired bool `xml:"installBootRequired"` + InstallBootStopDelay int `xml:"installBootStopDelay"` +} + +func init() { + t["VmConfigInfo"] = reflect.TypeOf((*VmConfigInfo)(nil)).Elem() +} + +type VmConfigMissingEvent struct { + VmEvent +} + +func init() { + t["VmConfigMissingEvent"] = reflect.TypeOf((*VmConfigMissingEvent)(nil)).Elem() +} + +type VmConfigSpec struct { + DynamicData + + Product []VAppProductSpec `xml:"product,omitempty"` + Property []VAppPropertySpec `xml:"property,omitempty"` + IpAssignment *VAppIPAssignmentInfo `xml:"ipAssignment,omitempty"` + Eula []string `xml:"eula,omitempty"` + OvfSection []VAppOvfSectionSpec `xml:"ovfSection,omitempty"` + OvfEnvironmentTransport []string `xml:"ovfEnvironmentTransport,omitempty"` + InstallBootRequired *bool `xml:"installBootRequired"` + InstallBootStopDelay int `xml:"installBootStopDelay,omitempty"` +} + +func init() { + t["VmConfigSpec"] = reflect.TypeOf((*VmConfigSpec)(nil)).Elem() +} + +type VmConnectedEvent struct { + VmEvent +} + +func init() { + t["VmConnectedEvent"] = reflect.TypeOf((*VmConnectedEvent)(nil)).Elem() +} + +type VmCreatedEvent struct { + VmEvent +} + +func init() { + t["VmCreatedEvent"] = reflect.TypeOf((*VmCreatedEvent)(nil)).Elem() +} + +type VmDasBeingResetEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmDasBeingResetEvent"] = reflect.TypeOf((*VmDasBeingResetEvent)(nil)).Elem() +} + +type VmDasBeingResetWithScreenshotEvent struct { + VmDasBeingResetEvent + + ScreenshotFilePath string `xml:"screenshotFilePath"` +} + +func init() { + t["VmDasBeingResetWithScreenshotEvent"] = reflect.TypeOf((*VmDasBeingResetWithScreenshotEvent)(nil)).Elem() +} + +type VmDasResetFailedEvent struct { + VmEvent +} + +func init() { + t["VmDasResetFailedEvent"] = reflect.TypeOf((*VmDasResetFailedEvent)(nil)).Elem() +} + +type VmDasUpdateErrorEvent struct { + VmEvent +} + +func init() { + t["VmDasUpdateErrorEvent"] = reflect.TypeOf((*VmDasUpdateErrorEvent)(nil)).Elem() +} + +type VmDasUpdateOkEvent struct { + VmEvent +} + +func init() { + t["VmDasUpdateOkEvent"] = reflect.TypeOf((*VmDasUpdateOkEvent)(nil)).Elem() +} + +type VmDateRolledBackEvent struct { + VmEvent +} + +func init() { + t["VmDateRolledBackEvent"] = reflect.TypeOf((*VmDateRolledBackEvent)(nil)).Elem() +} + +type VmDeployFailedEvent struct { + VmEvent + + DestDatastore BaseEntityEventArgument `xml:"destDatastore,typeattr"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmDeployFailedEvent"] = reflect.TypeOf((*VmDeployFailedEvent)(nil)).Elem() +} + +type VmDeployedEvent struct { + VmEvent + + SrcTemplate VmEventArgument `xml:"srcTemplate"` +} + +func init() { + t["VmDeployedEvent"] = reflect.TypeOf((*VmDeployedEvent)(nil)).Elem() +} + +type VmDisconnectedEvent struct { + VmEvent +} + +func init() { + t["VmDisconnectedEvent"] = reflect.TypeOf((*VmDisconnectedEvent)(nil)).Elem() +} + +type VmDiscoveredEvent struct { + VmEvent +} + +func init() { + t["VmDiscoveredEvent"] = reflect.TypeOf((*VmDiscoveredEvent)(nil)).Elem() +} + +type VmDiskFailedEvent struct { + VmEvent + + Disk string `xml:"disk"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmDiskFailedEvent"] = reflect.TypeOf((*VmDiskFailedEvent)(nil)).Elem() +} + +type VmDiskFileInfo struct { + FileInfo + + DiskType string `xml:"diskType,omitempty"` + CapacityKb int64 `xml:"capacityKb,omitempty"` + HardwareVersion int `xml:"hardwareVersion,omitempty"` + ControllerType string `xml:"controllerType,omitempty"` + DiskExtents []string `xml:"diskExtents,omitempty"` + Thin *bool `xml:"thin"` +} + +func init() { + t["VmDiskFileInfo"] = reflect.TypeOf((*VmDiskFileInfo)(nil)).Elem() +} + +type VmDiskFileQuery struct { + FileQuery + + Filter *VmDiskFileQueryFilter `xml:"filter,omitempty"` + Details *VmDiskFileQueryFlags `xml:"details,omitempty"` +} + +func init() { + t["VmDiskFileQuery"] = reflect.TypeOf((*VmDiskFileQuery)(nil)).Elem() +} + +type VmDiskFileQueryFilter struct { + DynamicData + + DiskType []string `xml:"diskType,omitempty"` + MatchHardwareVersion []int `xml:"matchHardwareVersion,omitempty"` + ControllerType []string `xml:"controllerType,omitempty"` + Thin *bool `xml:"thin"` +} + +func init() { + t["VmDiskFileQueryFilter"] = reflect.TypeOf((*VmDiskFileQueryFilter)(nil)).Elem() +} + +type VmDiskFileQueryFlags struct { + DynamicData + + DiskType bool `xml:"diskType"` + CapacityKb bool `xml:"capacityKb"` + HardwareVersion bool `xml:"hardwareVersion"` + ControllerType *bool `xml:"controllerType"` + DiskExtents *bool `xml:"diskExtents"` + Thin *bool `xml:"thin"` +} + +func init() { + t["VmDiskFileQueryFlags"] = reflect.TypeOf((*VmDiskFileQueryFlags)(nil)).Elem() +} + +type VmEmigratingEvent struct { + VmEvent +} + +func init() { + t["VmEmigratingEvent"] = reflect.TypeOf((*VmEmigratingEvent)(nil)).Elem() +} + +type VmEndRecordingEvent struct { + VmEvent +} + +func init() { + t["VmEndRecordingEvent"] = reflect.TypeOf((*VmEndRecordingEvent)(nil)).Elem() +} + +type VmEndReplayingEvent struct { + VmEvent +} + +func init() { + t["VmEndReplayingEvent"] = reflect.TypeOf((*VmEndReplayingEvent)(nil)).Elem() +} + +type VmEvent struct { + Event + + Template bool `xml:"template"` +} + +func init() { + t["VmEvent"] = reflect.TypeOf((*VmEvent)(nil)).Elem() +} + +type VmEventArgument struct { + EntityEventArgument + + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["VmEventArgument"] = reflect.TypeOf((*VmEventArgument)(nil)).Elem() +} + +type VmFailedMigrateEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` + Reason LocalizedMethodFault `xml:"reason"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmFailedMigrateEvent"] = reflect.TypeOf((*VmFailedMigrateEvent)(nil)).Elem() +} + +type VmFailedRelayoutEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedRelayoutEvent"] = reflect.TypeOf((*VmFailedRelayoutEvent)(nil)).Elem() +} + +type VmFailedRelayoutOnVmfs2DatastoreEvent struct { + VmEvent +} + +func init() { + t["VmFailedRelayoutOnVmfs2DatastoreEvent"] = reflect.TypeOf((*VmFailedRelayoutOnVmfs2DatastoreEvent)(nil)).Elem() +} + +type VmFailedStartingSecondaryEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmFailedStartingSecondaryEvent"] = reflect.TypeOf((*VmFailedStartingSecondaryEvent)(nil)).Elem() +} + +type VmFailedToPowerOffEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToPowerOffEvent"] = reflect.TypeOf((*VmFailedToPowerOffEvent)(nil)).Elem() +} + +type VmFailedToPowerOnEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToPowerOnEvent"] = reflect.TypeOf((*VmFailedToPowerOnEvent)(nil)).Elem() +} + +type VmFailedToRebootGuestEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToRebootGuestEvent"] = reflect.TypeOf((*VmFailedToRebootGuestEvent)(nil)).Elem() +} + +type VmFailedToResetEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToResetEvent"] = reflect.TypeOf((*VmFailedToResetEvent)(nil)).Elem() +} + +type VmFailedToShutdownGuestEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToShutdownGuestEvent"] = reflect.TypeOf((*VmFailedToShutdownGuestEvent)(nil)).Elem() +} + +type VmFailedToStandbyGuestEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToStandbyGuestEvent"] = reflect.TypeOf((*VmFailedToStandbyGuestEvent)(nil)).Elem() +} + +type VmFailedToSuspendEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToSuspendEvent"] = reflect.TypeOf((*VmFailedToSuspendEvent)(nil)).Elem() +} + +type VmFailedUpdatingSecondaryConfig struct { + VmEvent +} + +func init() { + t["VmFailedUpdatingSecondaryConfig"] = reflect.TypeOf((*VmFailedUpdatingSecondaryConfig)(nil)).Elem() +} + +type VmFailoverFailed struct { + VmEvent + + Reason *LocalizedMethodFault `xml:"reason,omitempty"` +} + +func init() { + t["VmFailoverFailed"] = reflect.TypeOf((*VmFailoverFailed)(nil)).Elem() +} + +type VmFaultToleranceConfigIssue struct { + VmFaultToleranceIssue + + Reason string `xml:"reason,omitempty"` + EntityName string `xml:"entityName,omitempty"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["VmFaultToleranceConfigIssue"] = reflect.TypeOf((*VmFaultToleranceConfigIssue)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueFault VmFaultToleranceConfigIssue + +func init() { + t["VmFaultToleranceConfigIssueFault"] = reflect.TypeOf((*VmFaultToleranceConfigIssueFault)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueWrapper struct { + VmFaultToleranceIssue + + EntityName string `xml:"entityName,omitempty"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["VmFaultToleranceConfigIssueWrapper"] = reflect.TypeOf((*VmFaultToleranceConfigIssueWrapper)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueWrapperFault VmFaultToleranceConfigIssueWrapper + +func init() { + t["VmFaultToleranceConfigIssueWrapperFault"] = reflect.TypeOf((*VmFaultToleranceConfigIssueWrapperFault)(nil)).Elem() +} + +type VmFaultToleranceInvalidFileBacking struct { + VmFaultToleranceIssue + + BackingType string `xml:"backingType,omitempty"` + BackingFilename string `xml:"backingFilename,omitempty"` +} + +func init() { + t["VmFaultToleranceInvalidFileBacking"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBacking)(nil)).Elem() +} + +type VmFaultToleranceInvalidFileBackingFault VmFaultToleranceInvalidFileBacking + +func init() { + t["VmFaultToleranceInvalidFileBackingFault"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBackingFault)(nil)).Elem() +} + +type VmFaultToleranceIssue struct { + VimFault +} + +func init() { + t["VmFaultToleranceIssue"] = reflect.TypeOf((*VmFaultToleranceIssue)(nil)).Elem() +} + +type VmFaultToleranceIssueFault BaseVmFaultToleranceIssue + +func init() { + t["VmFaultToleranceIssueFault"] = reflect.TypeOf((*VmFaultToleranceIssueFault)(nil)).Elem() +} + +type VmFaultToleranceOpIssuesList struct { + VmFaultToleranceIssue + + Errors []LocalizedMethodFault `xml:"errors,omitempty"` + Warnings []LocalizedMethodFault `xml:"warnings,omitempty"` +} + +func init() { + t["VmFaultToleranceOpIssuesList"] = reflect.TypeOf((*VmFaultToleranceOpIssuesList)(nil)).Elem() +} + +type VmFaultToleranceOpIssuesListFault VmFaultToleranceOpIssuesList + +func init() { + t["VmFaultToleranceOpIssuesListFault"] = reflect.TypeOf((*VmFaultToleranceOpIssuesListFault)(nil)).Elem() +} + +type VmFaultToleranceStateChangedEvent struct { + VmEvent + + OldState VirtualMachineFaultToleranceState `xml:"oldState"` + NewState VirtualMachineFaultToleranceState `xml:"newState"` +} + +func init() { + t["VmFaultToleranceStateChangedEvent"] = reflect.TypeOf((*VmFaultToleranceStateChangedEvent)(nil)).Elem() +} + +type VmFaultToleranceTooManyFtVcpusOnHost struct { + InsufficientResourcesFault + + HostName string `xml:"hostName,omitempty"` + MaxNumFtVcpus int `xml:"maxNumFtVcpus"` +} + +func init() { + t["VmFaultToleranceTooManyFtVcpusOnHost"] = reflect.TypeOf((*VmFaultToleranceTooManyFtVcpusOnHost)(nil)).Elem() +} + +type VmFaultToleranceTooManyFtVcpusOnHostFault VmFaultToleranceTooManyFtVcpusOnHost + +func init() { + t["VmFaultToleranceTooManyFtVcpusOnHostFault"] = reflect.TypeOf((*VmFaultToleranceTooManyFtVcpusOnHostFault)(nil)).Elem() +} + +type VmFaultToleranceTooManyVMsOnHost struct { + InsufficientResourcesFault + + HostName string `xml:"hostName,omitempty"` + MaxNumFtVms int `xml:"maxNumFtVms"` +} + +func init() { + t["VmFaultToleranceTooManyVMsOnHost"] = reflect.TypeOf((*VmFaultToleranceTooManyVMsOnHost)(nil)).Elem() +} + +type VmFaultToleranceTooManyVMsOnHostFault VmFaultToleranceTooManyVMsOnHost + +func init() { + t["VmFaultToleranceTooManyVMsOnHostFault"] = reflect.TypeOf((*VmFaultToleranceTooManyVMsOnHostFault)(nil)).Elem() +} + +type VmFaultToleranceTurnedOffEvent struct { + VmEvent +} + +func init() { + t["VmFaultToleranceTurnedOffEvent"] = reflect.TypeOf((*VmFaultToleranceTurnedOffEvent)(nil)).Elem() +} + +type VmFaultToleranceVmTerminatedEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmFaultToleranceVmTerminatedEvent"] = reflect.TypeOf((*VmFaultToleranceVmTerminatedEvent)(nil)).Elem() +} + +type VmGuestOSCrashedEvent struct { + VmEvent +} + +func init() { + t["VmGuestOSCrashedEvent"] = reflect.TypeOf((*VmGuestOSCrashedEvent)(nil)).Elem() +} + +type VmGuestRebootEvent struct { + VmEvent +} + +func init() { + t["VmGuestRebootEvent"] = reflect.TypeOf((*VmGuestRebootEvent)(nil)).Elem() +} + +type VmGuestShutdownEvent struct { + VmEvent +} + +func init() { + t["VmGuestShutdownEvent"] = reflect.TypeOf((*VmGuestShutdownEvent)(nil)).Elem() +} + +type VmGuestStandbyEvent struct { + VmEvent +} + +func init() { + t["VmGuestStandbyEvent"] = reflect.TypeOf((*VmGuestStandbyEvent)(nil)).Elem() +} + +type VmHealthMonitoringStateChangedEvent struct { + ClusterEvent + + State string `xml:"state"` +} + +func init() { + t["VmHealthMonitoringStateChangedEvent"] = reflect.TypeOf((*VmHealthMonitoringStateChangedEvent)(nil)).Elem() +} + +type VmHostAffinityRuleViolation struct { + VmConfigFault + + VmName string `xml:"vmName"` + HostName string `xml:"hostName"` +} + +func init() { + t["VmHostAffinityRuleViolation"] = reflect.TypeOf((*VmHostAffinityRuleViolation)(nil)).Elem() +} + +type VmHostAffinityRuleViolationFault VmHostAffinityRuleViolation + +func init() { + t["VmHostAffinityRuleViolationFault"] = reflect.TypeOf((*VmHostAffinityRuleViolationFault)(nil)).Elem() +} + +type VmInstanceUuidAssignedEvent struct { + VmEvent + + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["VmInstanceUuidAssignedEvent"] = reflect.TypeOf((*VmInstanceUuidAssignedEvent)(nil)).Elem() +} + +type VmInstanceUuidChangedEvent struct { + VmEvent + + OldInstanceUuid string `xml:"oldInstanceUuid"` + NewInstanceUuid string `xml:"newInstanceUuid"` +} + +func init() { + t["VmInstanceUuidChangedEvent"] = reflect.TypeOf((*VmInstanceUuidChangedEvent)(nil)).Elem() +} + +type VmInstanceUuidConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["VmInstanceUuidConflictEvent"] = reflect.TypeOf((*VmInstanceUuidConflictEvent)(nil)).Elem() +} + +type VmLimitLicense struct { + NotEnoughLicenses + + Limit int `xml:"limit"` +} + +func init() { + t["VmLimitLicense"] = reflect.TypeOf((*VmLimitLicense)(nil)).Elem() +} + +type VmLimitLicenseFault VmLimitLicense + +func init() { + t["VmLimitLicenseFault"] = reflect.TypeOf((*VmLimitLicenseFault)(nil)).Elem() +} + +type VmLogFileInfo struct { + FileInfo +} + +func init() { + t["VmLogFileInfo"] = reflect.TypeOf((*VmLogFileInfo)(nil)).Elem() +} + +type VmLogFileQuery struct { + FileQuery +} + +func init() { + t["VmLogFileQuery"] = reflect.TypeOf((*VmLogFileQuery)(nil)).Elem() +} + +type VmMacAssignedEvent struct { + VmEvent + + Adapter string `xml:"adapter"` + Mac string `xml:"mac"` +} + +func init() { + t["VmMacAssignedEvent"] = reflect.TypeOf((*VmMacAssignedEvent)(nil)).Elem() +} + +type VmMacChangedEvent struct { + VmEvent + + Adapter string `xml:"adapter"` + OldMac string `xml:"oldMac"` + NewMac string `xml:"newMac"` +} + +func init() { + t["VmMacChangedEvent"] = reflect.TypeOf((*VmMacChangedEvent)(nil)).Elem() +} + +type VmMacConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + Mac string `xml:"mac"` +} + +func init() { + t["VmMacConflictEvent"] = reflect.TypeOf((*VmMacConflictEvent)(nil)).Elem() +} + +type VmMaxFTRestartCountReached struct { + VmEvent +} + +func init() { + t["VmMaxFTRestartCountReached"] = reflect.TypeOf((*VmMaxFTRestartCountReached)(nil)).Elem() +} + +type VmMaxRestartCountReached struct { + VmEvent +} + +func init() { + t["VmMaxRestartCountReached"] = reflect.TypeOf((*VmMaxRestartCountReached)(nil)).Elem() +} + +type VmMessageErrorEvent struct { + VmEvent + + Message string `xml:"message"` + MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty"` +} + +func init() { + t["VmMessageErrorEvent"] = reflect.TypeOf((*VmMessageErrorEvent)(nil)).Elem() +} + +type VmMessageEvent struct { + VmEvent + + Message string `xml:"message"` + MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty"` +} + +func init() { + t["VmMessageEvent"] = reflect.TypeOf((*VmMessageEvent)(nil)).Elem() +} + +type VmMessageWarningEvent struct { + VmEvent + + Message string `xml:"message"` + MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty"` +} + +func init() { + t["VmMessageWarningEvent"] = reflect.TypeOf((*VmMessageWarningEvent)(nil)).Elem() +} + +type VmMetadataManagerFault struct { + VimFault +} + +func init() { + t["VmMetadataManagerFault"] = reflect.TypeOf((*VmMetadataManagerFault)(nil)).Elem() +} + +type VmMetadataManagerFaultFault VmMetadataManagerFault + +func init() { + t["VmMetadataManagerFaultFault"] = reflect.TypeOf((*VmMetadataManagerFaultFault)(nil)).Elem() +} + +type VmMigratedEvent struct { + VmEvent + + SourceHost HostEventArgument `xml:"sourceHost"` + SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty"` + SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty"` +} + +func init() { + t["VmMigratedEvent"] = reflect.TypeOf((*VmMigratedEvent)(nil)).Elem() +} + +type VmMonitorIncompatibleForFaultTolerance struct { + VimFault +} + +func init() { + t["VmMonitorIncompatibleForFaultTolerance"] = reflect.TypeOf((*VmMonitorIncompatibleForFaultTolerance)(nil)).Elem() +} + +type VmMonitorIncompatibleForFaultToleranceFault VmMonitorIncompatibleForFaultTolerance + +func init() { + t["VmMonitorIncompatibleForFaultToleranceFault"] = reflect.TypeOf((*VmMonitorIncompatibleForFaultToleranceFault)(nil)).Elem() +} + +type VmNoCompatibleHostForSecondaryEvent struct { + VmEvent +} + +func init() { + t["VmNoCompatibleHostForSecondaryEvent"] = reflect.TypeOf((*VmNoCompatibleHostForSecondaryEvent)(nil)).Elem() +} + +type VmNoNetworkAccessEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` +} + +func init() { + t["VmNoNetworkAccessEvent"] = reflect.TypeOf((*VmNoNetworkAccessEvent)(nil)).Elem() +} + +type VmNvramFileInfo struct { + FileInfo +} + +func init() { + t["VmNvramFileInfo"] = reflect.TypeOf((*VmNvramFileInfo)(nil)).Elem() +} + +type VmNvramFileQuery struct { + FileQuery +} + +func init() { + t["VmNvramFileQuery"] = reflect.TypeOf((*VmNvramFileQuery)(nil)).Elem() +} + +type VmOrphanedEvent struct { + VmEvent +} + +func init() { + t["VmOrphanedEvent"] = reflect.TypeOf((*VmOrphanedEvent)(nil)).Elem() +} + +type VmPodConfigForPlacement struct { + DynamicData + + StoragePod ManagedObjectReference `xml:"storagePod"` + Disk []PodDiskLocator `xml:"disk,omitempty"` + VmConfig *StorageDrsVmConfigInfo `xml:"vmConfig,omitempty"` + InterVmRule []BaseClusterRuleInfo `xml:"interVmRule,omitempty,typeattr"` +} + +func init() { + t["VmPodConfigForPlacement"] = reflect.TypeOf((*VmPodConfigForPlacement)(nil)).Elem() +} + +type VmPortGroupProfile struct { + PortGroupProfile +} + +func init() { + t["VmPortGroupProfile"] = reflect.TypeOf((*VmPortGroupProfile)(nil)).Elem() +} + +type VmPowerOffOnIsolationEvent struct { + VmPoweredOffEvent + + IsolatedHost HostEventArgument `xml:"isolatedHost"` +} + +func init() { + t["VmPowerOffOnIsolationEvent"] = reflect.TypeOf((*VmPowerOffOnIsolationEvent)(nil)).Elem() +} + +type VmPowerOnDisabled struct { + InvalidState +} + +func init() { + t["VmPowerOnDisabled"] = reflect.TypeOf((*VmPowerOnDisabled)(nil)).Elem() +} + +type VmPowerOnDisabledFault VmPowerOnDisabled + +func init() { + t["VmPowerOnDisabledFault"] = reflect.TypeOf((*VmPowerOnDisabledFault)(nil)).Elem() +} + +type VmPoweredOffEvent struct { + VmEvent +} + +func init() { + t["VmPoweredOffEvent"] = reflect.TypeOf((*VmPoweredOffEvent)(nil)).Elem() +} + +type VmPoweredOnEvent struct { + VmEvent +} + +func init() { + t["VmPoweredOnEvent"] = reflect.TypeOf((*VmPoweredOnEvent)(nil)).Elem() +} + +type VmPoweringOnWithCustomizedDVPortEvent struct { + VmEvent + + Vnic []VnicPortArgument `xml:"vnic"` +} + +func init() { + t["VmPoweringOnWithCustomizedDVPortEvent"] = reflect.TypeOf((*VmPoweringOnWithCustomizedDVPortEvent)(nil)).Elem() +} + +type VmPrimaryFailoverEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmPrimaryFailoverEvent"] = reflect.TypeOf((*VmPrimaryFailoverEvent)(nil)).Elem() +} + +type VmReconfiguredEvent struct { + VmEvent + + ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` +} + +func init() { + t["VmReconfiguredEvent"] = reflect.TypeOf((*VmReconfiguredEvent)(nil)).Elem() +} + +type VmRegisteredEvent struct { + VmEvent +} + +func init() { + t["VmRegisteredEvent"] = reflect.TypeOf((*VmRegisteredEvent)(nil)).Elem() +} + +type VmRelayoutSuccessfulEvent struct { + VmEvent +} + +func init() { + t["VmRelayoutSuccessfulEvent"] = reflect.TypeOf((*VmRelayoutSuccessfulEvent)(nil)).Elem() +} + +type VmRelayoutUpToDateEvent struct { + VmEvent +} + +func init() { + t["VmRelayoutUpToDateEvent"] = reflect.TypeOf((*VmRelayoutUpToDateEvent)(nil)).Elem() +} + +type VmReloadFromPathEvent struct { + VmEvent + + ConfigPath string `xml:"configPath"` +} + +func init() { + t["VmReloadFromPathEvent"] = reflect.TypeOf((*VmReloadFromPathEvent)(nil)).Elem() +} + +type VmReloadFromPathFailedEvent struct { + VmEvent + + ConfigPath string `xml:"configPath"` +} + +func init() { + t["VmReloadFromPathFailedEvent"] = reflect.TypeOf((*VmReloadFromPathFailedEvent)(nil)).Elem() +} + +type VmRelocateFailedEvent struct { + VmRelocateSpecEvent + + DestHost HostEventArgument `xml:"destHost"` + Reason LocalizedMethodFault `xml:"reason"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmRelocateFailedEvent"] = reflect.TypeOf((*VmRelocateFailedEvent)(nil)).Elem() +} + +type VmRelocateSpecEvent struct { + VmEvent +} + +func init() { + t["VmRelocateSpecEvent"] = reflect.TypeOf((*VmRelocateSpecEvent)(nil)).Elem() +} + +type VmRelocatedEvent struct { + VmRelocateSpecEvent + + SourceHost HostEventArgument `xml:"sourceHost"` + SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty"` + SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty"` +} + +func init() { + t["VmRelocatedEvent"] = reflect.TypeOf((*VmRelocatedEvent)(nil)).Elem() +} + +type VmRemoteConsoleConnectedEvent struct { + VmEvent +} + +func init() { + t["VmRemoteConsoleConnectedEvent"] = reflect.TypeOf((*VmRemoteConsoleConnectedEvent)(nil)).Elem() +} + +type VmRemoteConsoleDisconnectedEvent struct { + VmEvent +} + +func init() { + t["VmRemoteConsoleDisconnectedEvent"] = reflect.TypeOf((*VmRemoteConsoleDisconnectedEvent)(nil)).Elem() +} + +type VmRemovedEvent struct { + VmEvent +} + +func init() { + t["VmRemovedEvent"] = reflect.TypeOf((*VmRemovedEvent)(nil)).Elem() +} + +type VmRenamedEvent struct { + VmEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["VmRenamedEvent"] = reflect.TypeOf((*VmRenamedEvent)(nil)).Elem() +} + +type VmRequirementsExceedCurrentEVCModeEvent struct { + VmEvent +} + +func init() { + t["VmRequirementsExceedCurrentEVCModeEvent"] = reflect.TypeOf((*VmRequirementsExceedCurrentEVCModeEvent)(nil)).Elem() +} + +type VmResettingEvent struct { + VmEvent +} + +func init() { + t["VmResettingEvent"] = reflect.TypeOf((*VmResettingEvent)(nil)).Elem() +} + +type VmResourcePoolMovedEvent struct { + VmEvent + + OldParent ResourcePoolEventArgument `xml:"oldParent"` + NewParent ResourcePoolEventArgument `xml:"newParent"` +} + +func init() { + t["VmResourcePoolMovedEvent"] = reflect.TypeOf((*VmResourcePoolMovedEvent)(nil)).Elem() +} + +type VmResourceReallocatedEvent struct { + VmEvent +} + +func init() { + t["VmResourceReallocatedEvent"] = reflect.TypeOf((*VmResourceReallocatedEvent)(nil)).Elem() +} + +type VmRestartedOnAlternateHostEvent struct { + VmPoweredOnEvent + + SourceHost HostEventArgument `xml:"sourceHost"` +} + +func init() { + t["VmRestartedOnAlternateHostEvent"] = reflect.TypeOf((*VmRestartedOnAlternateHostEvent)(nil)).Elem() +} + +type VmResumingEvent struct { + VmEvent +} + +func init() { + t["VmResumingEvent"] = reflect.TypeOf((*VmResumingEvent)(nil)).Elem() +} + +type VmSecondaryAddedEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryAddedEvent"] = reflect.TypeOf((*VmSecondaryAddedEvent)(nil)).Elem() +} + +type VmSecondaryDisabledBySystemEvent struct { + VmEvent + + Reason *LocalizedMethodFault `xml:"reason,omitempty"` +} + +func init() { + t["VmSecondaryDisabledBySystemEvent"] = reflect.TypeOf((*VmSecondaryDisabledBySystemEvent)(nil)).Elem() +} + +type VmSecondaryDisabledEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryDisabledEvent"] = reflect.TypeOf((*VmSecondaryDisabledEvent)(nil)).Elem() +} + +type VmSecondaryEnabledEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryEnabledEvent"] = reflect.TypeOf((*VmSecondaryEnabledEvent)(nil)).Elem() +} + +type VmSecondaryStartedEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryStartedEvent"] = reflect.TypeOf((*VmSecondaryStartedEvent)(nil)).Elem() +} + +type VmShutdownOnIsolationEvent struct { + VmPoweredOffEvent + + IsolatedHost HostEventArgument `xml:"isolatedHost"` + ShutdownResult string `xml:"shutdownResult,omitempty"` +} + +func init() { + t["VmShutdownOnIsolationEvent"] = reflect.TypeOf((*VmShutdownOnIsolationEvent)(nil)).Elem() +} + +type VmSmpFaultToleranceTooManyVMsOnHost struct { + InsufficientResourcesFault + + HostName string `xml:"hostName,omitempty"` + MaxNumSmpFtVms int `xml:"maxNumSmpFtVms"` +} + +func init() { + t["VmSmpFaultToleranceTooManyVMsOnHost"] = reflect.TypeOf((*VmSmpFaultToleranceTooManyVMsOnHost)(nil)).Elem() +} + +type VmSmpFaultToleranceTooManyVMsOnHostFault VmSmpFaultToleranceTooManyVMsOnHost + +func init() { + t["VmSmpFaultToleranceTooManyVMsOnHostFault"] = reflect.TypeOf((*VmSmpFaultToleranceTooManyVMsOnHostFault)(nil)).Elem() +} + +type VmSnapshotFileInfo struct { + FileInfo +} + +func init() { + t["VmSnapshotFileInfo"] = reflect.TypeOf((*VmSnapshotFileInfo)(nil)).Elem() +} + +type VmSnapshotFileQuery struct { + FileQuery +} + +func init() { + t["VmSnapshotFileQuery"] = reflect.TypeOf((*VmSnapshotFileQuery)(nil)).Elem() +} + +type VmStartRecordingEvent struct { + VmEvent +} + +func init() { + t["VmStartRecordingEvent"] = reflect.TypeOf((*VmStartRecordingEvent)(nil)).Elem() +} + +type VmStartReplayingEvent struct { + VmEvent +} + +func init() { + t["VmStartReplayingEvent"] = reflect.TypeOf((*VmStartReplayingEvent)(nil)).Elem() +} + +type VmStartingEvent struct { + VmEvent +} + +func init() { + t["VmStartingEvent"] = reflect.TypeOf((*VmStartingEvent)(nil)).Elem() +} + +type VmStartingSecondaryEvent struct { + VmEvent +} + +func init() { + t["VmStartingSecondaryEvent"] = reflect.TypeOf((*VmStartingSecondaryEvent)(nil)).Elem() +} + +type VmStaticMacConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + Mac string `xml:"mac"` +} + +func init() { + t["VmStaticMacConflictEvent"] = reflect.TypeOf((*VmStaticMacConflictEvent)(nil)).Elem() +} + +type VmStoppingEvent struct { + VmEvent +} + +func init() { + t["VmStoppingEvent"] = reflect.TypeOf((*VmStoppingEvent)(nil)).Elem() +} + +type VmSuspendedEvent struct { + VmEvent +} + +func init() { + t["VmSuspendedEvent"] = reflect.TypeOf((*VmSuspendedEvent)(nil)).Elem() +} + +type VmSuspendingEvent struct { + VmEvent +} + +func init() { + t["VmSuspendingEvent"] = reflect.TypeOf((*VmSuspendingEvent)(nil)).Elem() +} + +type VmTimedoutStartingSecondaryEvent struct { + VmEvent + + Timeout int64 `xml:"timeout,omitempty"` +} + +func init() { + t["VmTimedoutStartingSecondaryEvent"] = reflect.TypeOf((*VmTimedoutStartingSecondaryEvent)(nil)).Elem() +} + +type VmToolsUpgradeFault struct { + VimFault +} + +func init() { + t["VmToolsUpgradeFault"] = reflect.TypeOf((*VmToolsUpgradeFault)(nil)).Elem() +} + +type VmToolsUpgradeFaultFault BaseVmToolsUpgradeFault + +func init() { + t["VmToolsUpgradeFaultFault"] = reflect.TypeOf((*VmToolsUpgradeFaultFault)(nil)).Elem() +} + +type VmUnsupportedStartingEvent struct { + VmStartingEvent + + GuestId string `xml:"guestId"` +} + +func init() { + t["VmUnsupportedStartingEvent"] = reflect.TypeOf((*VmUnsupportedStartingEvent)(nil)).Elem() +} + +type VmUpgradeCompleteEvent struct { + VmEvent + + Version string `xml:"version"` +} + +func init() { + t["VmUpgradeCompleteEvent"] = reflect.TypeOf((*VmUpgradeCompleteEvent)(nil)).Elem() +} + +type VmUpgradeFailedEvent struct { + VmEvent +} + +func init() { + t["VmUpgradeFailedEvent"] = reflect.TypeOf((*VmUpgradeFailedEvent)(nil)).Elem() +} + +type VmUpgradingEvent struct { + VmEvent + + Version string `xml:"version"` +} + +func init() { + t["VmUpgradingEvent"] = reflect.TypeOf((*VmUpgradingEvent)(nil)).Elem() +} + +type VmUuidAssignedEvent struct { + VmEvent + + Uuid string `xml:"uuid"` +} + +func init() { + t["VmUuidAssignedEvent"] = reflect.TypeOf((*VmUuidAssignedEvent)(nil)).Elem() +} + +type VmUuidChangedEvent struct { + VmEvent + + OldUuid string `xml:"oldUuid"` + NewUuid string `xml:"newUuid"` +} + +func init() { + t["VmUuidChangedEvent"] = reflect.TypeOf((*VmUuidChangedEvent)(nil)).Elem() +} + +type VmUuidConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + Uuid string `xml:"uuid"` +} + +func init() { + t["VmUuidConflictEvent"] = reflect.TypeOf((*VmUuidConflictEvent)(nil)).Elem() +} + +type VmValidateMaxDevice struct { + VimFault + + Device string `xml:"device"` + Max int `xml:"max"` + Count int `xml:"count"` +} + +func init() { + t["VmValidateMaxDevice"] = reflect.TypeOf((*VmValidateMaxDevice)(nil)).Elem() +} + +type VmValidateMaxDeviceFault VmValidateMaxDevice + +func init() { + t["VmValidateMaxDeviceFault"] = reflect.TypeOf((*VmValidateMaxDeviceFault)(nil)).Elem() +} + +type VmVnicPoolReservationViolationClearEvent struct { + DvsEvent + + VmVnicResourcePoolKey string `xml:"vmVnicResourcePoolKey"` + VmVnicResourcePoolName string `xml:"vmVnicResourcePoolName,omitempty"` +} + +func init() { + t["VmVnicPoolReservationViolationClearEvent"] = reflect.TypeOf((*VmVnicPoolReservationViolationClearEvent)(nil)).Elem() +} + +type VmVnicPoolReservationViolationRaiseEvent struct { + DvsEvent + + VmVnicResourcePoolKey string `xml:"vmVnicResourcePoolKey"` + VmVnicResourcePoolName string `xml:"vmVnicResourcePoolName,omitempty"` +} + +func init() { + t["VmVnicPoolReservationViolationRaiseEvent"] = reflect.TypeOf((*VmVnicPoolReservationViolationRaiseEvent)(nil)).Elem() +} + +type VmWwnAssignedEvent struct { + VmEvent + + NodeWwns []int64 `xml:"nodeWwns"` + PortWwns []int64 `xml:"portWwns"` +} + +func init() { + t["VmWwnAssignedEvent"] = reflect.TypeOf((*VmWwnAssignedEvent)(nil)).Elem() +} + +type VmWwnChangedEvent struct { + VmEvent + + OldNodeWwns []int64 `xml:"oldNodeWwns,omitempty"` + OldPortWwns []int64 `xml:"oldPortWwns,omitempty"` + NewNodeWwns []int64 `xml:"newNodeWwns,omitempty"` + NewPortWwns []int64 `xml:"newPortWwns,omitempty"` +} + +func init() { + t["VmWwnChangedEvent"] = reflect.TypeOf((*VmWwnChangedEvent)(nil)).Elem() +} + +type VmWwnConflict struct { + InvalidVmConfig + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Name string `xml:"name,omitempty"` + Wwn int64 `xml:"wwn,omitempty"` +} + +func init() { + t["VmWwnConflict"] = reflect.TypeOf((*VmWwnConflict)(nil)).Elem() +} + +type VmWwnConflictEvent struct { + VmEvent + + ConflictedVms []VmEventArgument `xml:"conflictedVms,omitempty"` + ConflictedHosts []HostEventArgument `xml:"conflictedHosts,omitempty"` + Wwn int64 `xml:"wwn"` +} + +func init() { + t["VmWwnConflictEvent"] = reflect.TypeOf((*VmWwnConflictEvent)(nil)).Elem() +} + +type VmWwnConflictFault VmWwnConflict + +func init() { + t["VmWwnConflictFault"] = reflect.TypeOf((*VmWwnConflictFault)(nil)).Elem() +} + +type VmfsAlreadyMounted struct { + VmfsMountFault +} + +func init() { + t["VmfsAlreadyMounted"] = reflect.TypeOf((*VmfsAlreadyMounted)(nil)).Elem() +} + +type VmfsAlreadyMountedFault VmfsAlreadyMounted + +func init() { + t["VmfsAlreadyMountedFault"] = reflect.TypeOf((*VmfsAlreadyMountedFault)(nil)).Elem() +} + +type VmfsAmbiguousMount struct { + VmfsMountFault +} + +func init() { + t["VmfsAmbiguousMount"] = reflect.TypeOf((*VmfsAmbiguousMount)(nil)).Elem() +} + +type VmfsAmbiguousMountFault VmfsAmbiguousMount + +func init() { + t["VmfsAmbiguousMountFault"] = reflect.TypeOf((*VmfsAmbiguousMountFault)(nil)).Elem() +} + +type VmfsDatastoreAllExtentOption struct { + VmfsDatastoreSingleExtentOption +} + +func init() { + t["VmfsDatastoreAllExtentOption"] = reflect.TypeOf((*VmfsDatastoreAllExtentOption)(nil)).Elem() +} + +type VmfsDatastoreBaseOption struct { + DynamicData + + Layout HostDiskPartitionLayout `xml:"layout"` + PartitionFormatChange *bool `xml:"partitionFormatChange"` +} + +func init() { + t["VmfsDatastoreBaseOption"] = reflect.TypeOf((*VmfsDatastoreBaseOption)(nil)).Elem() +} + +type VmfsDatastoreCreateSpec struct { + VmfsDatastoreSpec + + Partition HostDiskPartitionSpec `xml:"partition"` + Vmfs HostVmfsSpec `xml:"vmfs"` + Extent []HostScsiDiskPartition `xml:"extent,omitempty"` +} + +func init() { + t["VmfsDatastoreCreateSpec"] = reflect.TypeOf((*VmfsDatastoreCreateSpec)(nil)).Elem() +} + +type VmfsDatastoreExpandSpec struct { + VmfsDatastoreSpec + + Partition HostDiskPartitionSpec `xml:"partition"` + Extent HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["VmfsDatastoreExpandSpec"] = reflect.TypeOf((*VmfsDatastoreExpandSpec)(nil)).Elem() +} + +type VmfsDatastoreExtendSpec struct { + VmfsDatastoreSpec + + Partition HostDiskPartitionSpec `xml:"partition"` + Extent []HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["VmfsDatastoreExtendSpec"] = reflect.TypeOf((*VmfsDatastoreExtendSpec)(nil)).Elem() +} + +type VmfsDatastoreInfo struct { + DatastoreInfo + + MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty"` + MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty"` + Vmfs *HostVmfsVolume `xml:"vmfs,omitempty"` +} + +func init() { + t["VmfsDatastoreInfo"] = reflect.TypeOf((*VmfsDatastoreInfo)(nil)).Elem() +} + +type VmfsDatastoreMultipleExtentOption struct { + VmfsDatastoreBaseOption + + VmfsExtent []HostDiskPartitionBlockRange `xml:"vmfsExtent"` +} + +func init() { + t["VmfsDatastoreMultipleExtentOption"] = reflect.TypeOf((*VmfsDatastoreMultipleExtentOption)(nil)).Elem() +} + +type VmfsDatastoreOption struct { + DynamicData + + Info BaseVmfsDatastoreBaseOption `xml:"info,typeattr"` + Spec BaseVmfsDatastoreSpec `xml:"spec,typeattr"` +} + +func init() { + t["VmfsDatastoreOption"] = reflect.TypeOf((*VmfsDatastoreOption)(nil)).Elem() +} + +type VmfsDatastoreSingleExtentOption struct { + VmfsDatastoreBaseOption + + VmfsExtent HostDiskPartitionBlockRange `xml:"vmfsExtent"` +} + +func init() { + t["VmfsDatastoreSingleExtentOption"] = reflect.TypeOf((*VmfsDatastoreSingleExtentOption)(nil)).Elem() +} + +type VmfsDatastoreSpec struct { + DynamicData + + DiskUuid string `xml:"diskUuid"` +} + +func init() { + t["VmfsDatastoreSpec"] = reflect.TypeOf((*VmfsDatastoreSpec)(nil)).Elem() +} + +type VmfsMountFault struct { + HostConfigFault + + Uuid string `xml:"uuid"` +} + +func init() { + t["VmfsMountFault"] = reflect.TypeOf((*VmfsMountFault)(nil)).Elem() +} + +type VmfsMountFaultFault BaseVmfsMountFault + +func init() { + t["VmfsMountFaultFault"] = reflect.TypeOf((*VmfsMountFaultFault)(nil)).Elem() +} + +type VmotionInterfaceNotEnabled struct { + HostPowerOpFailed +} + +func init() { + t["VmotionInterfaceNotEnabled"] = reflect.TypeOf((*VmotionInterfaceNotEnabled)(nil)).Elem() +} + +type VmotionInterfaceNotEnabledFault VmotionInterfaceNotEnabled + +func init() { + t["VmotionInterfaceNotEnabledFault"] = reflect.TypeOf((*VmotionInterfaceNotEnabledFault)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchPvlanSpec struct { + VmwareDistributedVirtualSwitchVlanSpec + + PvlanId int `xml:"pvlanId"` +} + +func init() { + t["VmwareDistributedVirtualSwitchPvlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanSpec)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchTrunkVlanSpec struct { + VmwareDistributedVirtualSwitchVlanSpec + + VlanId []NumericRange `xml:"vlanId"` +} + +func init() { + t["VmwareDistributedVirtualSwitchTrunkVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchTrunkVlanSpec)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchVlanIdSpec struct { + VmwareDistributedVirtualSwitchVlanSpec + + VlanId int `xml:"vlanId"` +} + +func init() { + t["VmwareDistributedVirtualSwitchVlanIdSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanIdSpec)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchVlanSpec struct { + InheritablePolicy +} + +func init() { + t["VmwareDistributedVirtualSwitchVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanSpec)(nil)).Elem() +} + +type VmwareUplinkPortTeamingPolicy struct { + InheritablePolicy + + Policy *StringPolicy `xml:"policy,omitempty"` + ReversePolicy *BoolPolicy `xml:"reversePolicy,omitempty"` + NotifySwitches *BoolPolicy `xml:"notifySwitches,omitempty"` + RollingOrder *BoolPolicy `xml:"rollingOrder,omitempty"` + FailureCriteria *DVSFailureCriteria `xml:"failureCriteria,omitempty"` + UplinkPortOrder *VMwareUplinkPortOrderPolicy `xml:"uplinkPortOrder,omitempty"` +} + +func init() { + t["VmwareUplinkPortTeamingPolicy"] = reflect.TypeOf((*VmwareUplinkPortTeamingPolicy)(nil)).Elem() +} + +type VnicPortArgument struct { + DynamicData + + Vnic string `xml:"vnic"` + Port DistributedVirtualSwitchPortConnection `xml:"port"` +} + +func init() { + t["VnicPortArgument"] = reflect.TypeOf((*VnicPortArgument)(nil)).Elem() +} + +type VolumeEditorError struct { + CustomizationFault +} + +func init() { + t["VolumeEditorError"] = reflect.TypeOf((*VolumeEditorError)(nil)).Elem() +} + +type VolumeEditorErrorFault VolumeEditorError + +func init() { + t["VolumeEditorErrorFault"] = reflect.TypeOf((*VolumeEditorErrorFault)(nil)).Elem() +} + +type VramLimitLicense struct { + NotEnoughLicenses + + Limit int `xml:"limit"` +} + +func init() { + t["VramLimitLicense"] = reflect.TypeOf((*VramLimitLicense)(nil)).Elem() +} + +type VramLimitLicenseFault VramLimitLicense + +func init() { + t["VramLimitLicenseFault"] = reflect.TypeOf((*VramLimitLicenseFault)(nil)).Elem() +} + +type VrpResourceAllocationInfo struct { + ResourceAllocationInfo + + ReservationLimit int64 `xml:"reservationLimit,omitempty"` +} + +func init() { + t["VrpResourceAllocationInfo"] = reflect.TypeOf((*VrpResourceAllocationInfo)(nil)).Elem() +} + +type VsanClusterConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + DefaultConfig *VsanClusterConfigInfoHostDefaultInfo `xml:"defaultConfig,omitempty"` +} + +func init() { + t["VsanClusterConfigInfo"] = reflect.TypeOf((*VsanClusterConfigInfo)(nil)).Elem() +} + +type VsanClusterConfigInfoHostDefaultInfo struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + AutoClaimStorage *bool `xml:"autoClaimStorage"` + ChecksumEnabled *bool `xml:"checksumEnabled"` +} + +func init() { + t["VsanClusterConfigInfoHostDefaultInfo"] = reflect.TypeOf((*VsanClusterConfigInfoHostDefaultInfo)(nil)).Elem() +} + +type VsanClusterUuidMismatch struct { + CannotMoveVsanEnabledHost + + HostClusterUuid string `xml:"hostClusterUuid"` + DestinationClusterUuid string `xml:"destinationClusterUuid"` +} + +func init() { + t["VsanClusterUuidMismatch"] = reflect.TypeOf((*VsanClusterUuidMismatch)(nil)).Elem() +} + +type VsanClusterUuidMismatchFault VsanClusterUuidMismatch + +func init() { + t["VsanClusterUuidMismatchFault"] = reflect.TypeOf((*VsanClusterUuidMismatchFault)(nil)).Elem() +} + +type VsanDiskFault struct { + VsanFault + + Device string `xml:"device,omitempty"` +} + +func init() { + t["VsanDiskFault"] = reflect.TypeOf((*VsanDiskFault)(nil)).Elem() +} + +type VsanDiskFaultFault BaseVsanDiskFault + +func init() { + t["VsanDiskFaultFault"] = reflect.TypeOf((*VsanDiskFaultFault)(nil)).Elem() +} + +type VsanFault struct { + VimFault +} + +func init() { + t["VsanFault"] = reflect.TypeOf((*VsanFault)(nil)).Elem() +} + +type VsanFaultFault BaseVsanFault + +func init() { + t["VsanFaultFault"] = reflect.TypeOf((*VsanFaultFault)(nil)).Elem() +} + +type VsanHostClusterStatus struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + NodeUuid string `xml:"nodeUuid,omitempty"` + Health string `xml:"health"` + NodeState VsanHostClusterStatusState `xml:"nodeState"` + MemberUuid []string `xml:"memberUuid,omitempty"` +} + +func init() { + t["VsanHostClusterStatus"] = reflect.TypeOf((*VsanHostClusterStatus)(nil)).Elem() +} + +type VsanHostClusterStatusState struct { + DynamicData + + State string `xml:"state"` + Completion *VsanHostClusterStatusStateCompletionEstimate `xml:"completion,omitempty"` +} + +func init() { + t["VsanHostClusterStatusState"] = reflect.TypeOf((*VsanHostClusterStatusState)(nil)).Elem() +} + +type VsanHostClusterStatusStateCompletionEstimate struct { + DynamicData + + CompleteTime *time.Time `xml:"completeTime"` + PercentComplete int `xml:"percentComplete,omitempty"` +} + +func init() { + t["VsanHostClusterStatusStateCompletionEstimate"] = reflect.TypeOf((*VsanHostClusterStatusStateCompletionEstimate)(nil)).Elem() +} + +type VsanHostConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + HostSystem *ManagedObjectReference `xml:"hostSystem,omitempty"` + ClusterInfo *VsanHostConfigInfoClusterInfo `xml:"clusterInfo,omitempty"` + StorageInfo *VsanHostConfigInfoStorageInfo `xml:"storageInfo,omitempty"` + NetworkInfo *VsanHostConfigInfoNetworkInfo `xml:"networkInfo,omitempty"` + FaultDomainInfo *VsanHostFaultDomainInfo `xml:"faultDomainInfo,omitempty"` +} + +func init() { + t["VsanHostConfigInfo"] = reflect.TypeOf((*VsanHostConfigInfo)(nil)).Elem() +} + +type VsanHostConfigInfoClusterInfo struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + NodeUuid string `xml:"nodeUuid,omitempty"` +} + +func init() { + t["VsanHostConfigInfoClusterInfo"] = reflect.TypeOf((*VsanHostConfigInfoClusterInfo)(nil)).Elem() +} + +type VsanHostConfigInfoNetworkInfo struct { + DynamicData + + Port []VsanHostConfigInfoNetworkInfoPortConfig `xml:"port,omitempty"` +} + +func init() { + t["VsanHostConfigInfoNetworkInfo"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfo)(nil)).Elem() +} + +type VsanHostConfigInfoNetworkInfoPortConfig struct { + DynamicData + + IpConfig *VsanHostIpConfig `xml:"ipConfig,omitempty"` + Device string `xml:"device"` +} + +func init() { + t["VsanHostConfigInfoNetworkInfoPortConfig"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfoPortConfig)(nil)).Elem() +} + +type VsanHostConfigInfoStorageInfo struct { + DynamicData + + AutoClaimStorage *bool `xml:"autoClaimStorage"` + DiskMapping []VsanHostDiskMapping `xml:"diskMapping,omitempty"` + DiskMapInfo []VsanHostDiskMapInfo `xml:"diskMapInfo,omitempty"` + ChecksumEnabled *bool `xml:"checksumEnabled"` +} + +func init() { + t["VsanHostConfigInfoStorageInfo"] = reflect.TypeOf((*VsanHostConfigInfoStorageInfo)(nil)).Elem() +} + +type VsanHostDecommissionMode struct { + DynamicData + + ObjectAction string `xml:"objectAction"` +} + +func init() { + t["VsanHostDecommissionMode"] = reflect.TypeOf((*VsanHostDecommissionMode)(nil)).Elem() +} + +type VsanHostDiskMapInfo struct { + DynamicData + + Mapping VsanHostDiskMapping `xml:"mapping"` + Mounted bool `xml:"mounted"` +} + +func init() { + t["VsanHostDiskMapInfo"] = reflect.TypeOf((*VsanHostDiskMapInfo)(nil)).Elem() +} + +type VsanHostDiskMapResult struct { + DynamicData + + Mapping VsanHostDiskMapping `xml:"mapping"` + DiskResult []VsanHostDiskResult `xml:"diskResult,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["VsanHostDiskMapResult"] = reflect.TypeOf((*VsanHostDiskMapResult)(nil)).Elem() +} + +type VsanHostDiskMapping struct { + DynamicData + + Ssd HostScsiDisk `xml:"ssd"` + NonSsd []HostScsiDisk `xml:"nonSsd"` +} + +func init() { + t["VsanHostDiskMapping"] = reflect.TypeOf((*VsanHostDiskMapping)(nil)).Elem() +} + +type VsanHostDiskResult struct { + DynamicData + + Disk HostScsiDisk `xml:"disk"` + State string `xml:"state"` + VsanUuid string `xml:"vsanUuid,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + Degraded *bool `xml:"degraded"` +} + +func init() { + t["VsanHostDiskResult"] = reflect.TypeOf((*VsanHostDiskResult)(nil)).Elem() +} + +type VsanHostFaultDomainInfo struct { + DynamicData + + Name string `xml:"name"` +} + +func init() { + t["VsanHostFaultDomainInfo"] = reflect.TypeOf((*VsanHostFaultDomainInfo)(nil)).Elem() +} + +type VsanHostIpConfig struct { + DynamicData + + UpstreamIpAddress string `xml:"upstreamIpAddress"` + DownstreamIpAddress string `xml:"downstreamIpAddress"` +} + +func init() { + t["VsanHostIpConfig"] = reflect.TypeOf((*VsanHostIpConfig)(nil)).Elem() +} + +type VsanHostMembershipInfo struct { + DynamicData + + NodeUuid string `xml:"nodeUuid"` + Hostname string `xml:"hostname"` +} + +func init() { + t["VsanHostMembershipInfo"] = reflect.TypeOf((*VsanHostMembershipInfo)(nil)).Elem() +} + +type VsanHostRuntimeInfo struct { + DynamicData + + MembershipList []VsanHostMembershipInfo `xml:"membershipList,omitempty"` + DiskIssues []VsanHostRuntimeInfoDiskIssue `xml:"diskIssues,omitempty"` + AccessGenNo int `xml:"accessGenNo,omitempty"` +} + +func init() { + t["VsanHostRuntimeInfo"] = reflect.TypeOf((*VsanHostRuntimeInfo)(nil)).Elem() +} + +type VsanHostRuntimeInfoDiskIssue struct { + DynamicData + + DiskId string `xml:"diskId"` + Issue string `xml:"issue"` +} + +func init() { + t["VsanHostRuntimeInfoDiskIssue"] = reflect.TypeOf((*VsanHostRuntimeInfoDiskIssue)(nil)).Elem() +} + +type VsanHostVsanDiskInfo struct { + DynamicData + + VsanUuid string `xml:"vsanUuid"` + FormatVersion int `xml:"formatVersion"` +} + +func init() { + t["VsanHostVsanDiskInfo"] = reflect.TypeOf((*VsanHostVsanDiskInfo)(nil)).Elem() +} + +type VsanIncompatibleDiskMapping struct { + VsanDiskFault +} + +func init() { + t["VsanIncompatibleDiskMapping"] = reflect.TypeOf((*VsanIncompatibleDiskMapping)(nil)).Elem() +} + +type VsanIncompatibleDiskMappingFault VsanIncompatibleDiskMapping + +func init() { + t["VsanIncompatibleDiskMappingFault"] = reflect.TypeOf((*VsanIncompatibleDiskMappingFault)(nil)).Elem() +} + +type VsanNewPolicyBatch struct { + DynamicData + + Size []int64 `xml:"size,omitempty"` + Policy string `xml:"policy,omitempty"` +} + +func init() { + t["VsanNewPolicyBatch"] = reflect.TypeOf((*VsanNewPolicyBatch)(nil)).Elem() +} + +type VsanPolicyChangeBatch struct { + DynamicData + + Uuid []string `xml:"uuid,omitempty"` + Policy string `xml:"policy,omitempty"` +} + +func init() { + t["VsanPolicyChangeBatch"] = reflect.TypeOf((*VsanPolicyChangeBatch)(nil)).Elem() +} + +type VsanPolicyCost struct { + DynamicData + + ChangeDataSize int64 `xml:"changeDataSize,omitempty"` + CurrentDataSize int64 `xml:"currentDataSize,omitempty"` + TempDataSize int64 `xml:"tempDataSize,omitempty"` + CopyDataSize int64 `xml:"copyDataSize,omitempty"` + ChangeFlashReadCacheSize int64 `xml:"changeFlashReadCacheSize,omitempty"` + CurrentFlashReadCacheSize int64 `xml:"currentFlashReadCacheSize,omitempty"` + CurrentDiskSpaceToAddressSpaceRatio float32 `xml:"currentDiskSpaceToAddressSpaceRatio,omitempty"` + DiskSpaceToAddressSpaceRatio float32 `xml:"diskSpaceToAddressSpaceRatio,omitempty"` +} + +func init() { + t["VsanPolicyCost"] = reflect.TypeOf((*VsanPolicyCost)(nil)).Elem() +} + +type VsanPolicySatisfiability struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + IsSatisfiable bool `xml:"isSatisfiable"` + Reason *LocalizableMessage `xml:"reason,omitempty"` + Cost *VsanPolicyCost `xml:"cost,omitempty"` +} + +func init() { + t["VsanPolicySatisfiability"] = reflect.TypeOf((*VsanPolicySatisfiability)(nil)).Elem() +} + +type VsanUpgradeSystemAPIBrokenIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemAPIBrokenIssue"] = reflect.TypeOf((*VsanUpgradeSystemAPIBrokenIssue)(nil)).Elem() +} + +type VsanUpgradeSystemAutoClaimEnabledOnHostsIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemAutoClaimEnabledOnHostsIssue"] = reflect.TypeOf((*VsanUpgradeSystemAutoClaimEnabledOnHostsIssue)(nil)).Elem() +} + +type VsanUpgradeSystemHostsDisconnectedIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemHostsDisconnectedIssue"] = reflect.TypeOf((*VsanUpgradeSystemHostsDisconnectedIssue)(nil)).Elem() +} + +type VsanUpgradeSystemMissingHostsInClusterIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemMissingHostsInClusterIssue"] = reflect.TypeOf((*VsanUpgradeSystemMissingHostsInClusterIssue)(nil)).Elem() +} + +type VsanUpgradeSystemNetworkPartitionInfo struct { + DynamicData + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemNetworkPartitionInfo"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionInfo)(nil)).Elem() +} + +type VsanUpgradeSystemNetworkPartitionIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Partitions []VsanUpgradeSystemNetworkPartitionInfo `xml:"partitions"` +} + +func init() { + t["VsanUpgradeSystemNetworkPartitionIssue"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionIssue)(nil)).Elem() +} + +type VsanUpgradeSystemNotEnoughFreeCapacityIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + ReducedRedundancyUpgradePossible bool `xml:"reducedRedundancyUpgradePossible"` +} + +func init() { + t["VsanUpgradeSystemNotEnoughFreeCapacityIssue"] = reflect.TypeOf((*VsanUpgradeSystemNotEnoughFreeCapacityIssue)(nil)).Elem() +} + +type VsanUpgradeSystemPreflightCheckIssue struct { + DynamicData + + Msg string `xml:"msg"` +} + +func init() { + t["VsanUpgradeSystemPreflightCheckIssue"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckIssue)(nil)).Elem() +} + +type VsanUpgradeSystemPreflightCheckResult struct { + DynamicData + + Issues []BaseVsanUpgradeSystemPreflightCheckIssue `xml:"issues,omitempty,typeattr"` + DiskMappingToRestore *VsanHostDiskMapping `xml:"diskMappingToRestore,omitempty"` +} + +func init() { + t["VsanUpgradeSystemPreflightCheckResult"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckResult)(nil)).Elem() +} + +type VsanUpgradeSystemRogueHostsInClusterIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Uuids []string `xml:"uuids"` +} + +func init() { + t["VsanUpgradeSystemRogueHostsInClusterIssue"] = reflect.TypeOf((*VsanUpgradeSystemRogueHostsInClusterIssue)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryDiskGroupOp struct { + VsanUpgradeSystemUpgradeHistoryItem + + Operation string `xml:"operation"` + DiskMapping VsanHostDiskMapping `xml:"diskMapping"` +} + +func init() { + t["VsanUpgradeSystemUpgradeHistoryDiskGroupOp"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOp)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryItem struct { + DynamicData + + Timestamp time.Time `xml:"timestamp"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Message string `xml:"message"` + Task *ManagedObjectReference `xml:"task,omitempty"` +} + +func init() { + t["VsanUpgradeSystemUpgradeHistoryItem"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryPreflightFail struct { + VsanUpgradeSystemUpgradeHistoryItem + + PreflightResult VsanUpgradeSystemPreflightCheckResult `xml:"preflightResult"` +} + +func init() { + t["VsanUpgradeSystemUpgradeHistoryPreflightFail"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryPreflightFail)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeStatus struct { + DynamicData + + InProgress bool `xml:"inProgress"` + History []BaseVsanUpgradeSystemUpgradeHistoryItem `xml:"history,omitempty,typeattr"` + Aborted *bool `xml:"aborted"` + Completed *bool `xml:"completed"` + Progress int `xml:"progress,omitempty"` +} + +func init() { + t["VsanUpgradeSystemUpgradeStatus"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeStatus)(nil)).Elem() +} + +type VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Uuids []string `xml:"uuids"` +} + +func init() { + t["VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue"] = reflect.TypeOf((*VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue)(nil)).Elem() +} + +type VsanUpgradeSystemWrongEsxVersionIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemWrongEsxVersionIssue"] = reflect.TypeOf((*VsanUpgradeSystemWrongEsxVersionIssue)(nil)).Elem() +} + +type VspanDestPortConflict struct { + DvsFault + + VspanSessionKey1 string `xml:"vspanSessionKey1"` + VspanSessionKey2 string `xml:"vspanSessionKey2"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanDestPortConflict"] = reflect.TypeOf((*VspanDestPortConflict)(nil)).Elem() +} + +type VspanDestPortConflictFault VspanDestPortConflict + +func init() { + t["VspanDestPortConflictFault"] = reflect.TypeOf((*VspanDestPortConflictFault)(nil)).Elem() +} + +type VspanPortConflict struct { + DvsFault + + VspanSessionKey1 string `xml:"vspanSessionKey1"` + VspanSessionKey2 string `xml:"vspanSessionKey2"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPortConflict"] = reflect.TypeOf((*VspanPortConflict)(nil)).Elem() +} + +type VspanPortConflictFault VspanPortConflict + +func init() { + t["VspanPortConflictFault"] = reflect.TypeOf((*VspanPortConflictFault)(nil)).Elem() +} + +type VspanPortMoveFault struct { + DvsFault + + SrcPortgroupName string `xml:"srcPortgroupName"` + DestPortgroupName string `xml:"destPortgroupName"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPortMoveFault"] = reflect.TypeOf((*VspanPortMoveFault)(nil)).Elem() +} + +type VspanPortMoveFaultFault VspanPortMoveFault + +func init() { + t["VspanPortMoveFaultFault"] = reflect.TypeOf((*VspanPortMoveFaultFault)(nil)).Elem() +} + +type VspanPortPromiscChangeFault struct { + DvsFault + + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPortPromiscChangeFault"] = reflect.TypeOf((*VspanPortPromiscChangeFault)(nil)).Elem() +} + +type VspanPortPromiscChangeFaultFault VspanPortPromiscChangeFault + +func init() { + t["VspanPortPromiscChangeFaultFault"] = reflect.TypeOf((*VspanPortPromiscChangeFaultFault)(nil)).Elem() +} + +type VspanPortgroupPromiscChangeFault struct { + DvsFault + + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["VspanPortgroupPromiscChangeFault"] = reflect.TypeOf((*VspanPortgroupPromiscChangeFault)(nil)).Elem() +} + +type VspanPortgroupPromiscChangeFaultFault VspanPortgroupPromiscChangeFault + +func init() { + t["VspanPortgroupPromiscChangeFaultFault"] = reflect.TypeOf((*VspanPortgroupPromiscChangeFaultFault)(nil)).Elem() +} + +type VspanPortgroupTypeChangeFault struct { + DvsFault + + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["VspanPortgroupTypeChangeFault"] = reflect.TypeOf((*VspanPortgroupTypeChangeFault)(nil)).Elem() +} + +type VspanPortgroupTypeChangeFaultFault VspanPortgroupTypeChangeFault + +func init() { + t["VspanPortgroupTypeChangeFaultFault"] = reflect.TypeOf((*VspanPortgroupTypeChangeFaultFault)(nil)).Elem() +} + +type VspanPromiscuousPortNotSupported struct { + DvsFault + + VspanSessionKey string `xml:"vspanSessionKey"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPromiscuousPortNotSupported"] = reflect.TypeOf((*VspanPromiscuousPortNotSupported)(nil)).Elem() +} + +type VspanPromiscuousPortNotSupportedFault VspanPromiscuousPortNotSupported + +func init() { + t["VspanPromiscuousPortNotSupportedFault"] = reflect.TypeOf((*VspanPromiscuousPortNotSupportedFault)(nil)).Elem() +} + +type VspanSameSessionPortConflict struct { + DvsFault + + VspanSessionKey string `xml:"vspanSessionKey"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanSameSessionPortConflict"] = reflect.TypeOf((*VspanSameSessionPortConflict)(nil)).Elem() +} + +type VspanSameSessionPortConflictFault VspanSameSessionPortConflict + +func init() { + t["VspanSameSessionPortConflictFault"] = reflect.TypeOf((*VspanSameSessionPortConflictFault)(nil)).Elem() +} + +type VvolDatastoreInfo struct { + DatastoreInfo + + VvolDS *HostVvolVolume `xml:"vvolDS,omitempty"` +} + +func init() { + t["VvolDatastoreInfo"] = reflect.TypeOf((*VvolDatastoreInfo)(nil)).Elem() +} + +type WaitForUpdates WaitForUpdatesRequestType + +func init() { + t["WaitForUpdates"] = reflect.TypeOf((*WaitForUpdates)(nil)).Elem() +} + +type WaitForUpdatesEx WaitForUpdatesExRequestType + +func init() { + t["WaitForUpdatesEx"] = reflect.TypeOf((*WaitForUpdatesEx)(nil)).Elem() +} + +type WaitForUpdatesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` + Options *WaitOptions `xml:"options,omitempty"` +} + +func init() { + t["WaitForUpdatesExRequestType"] = reflect.TypeOf((*WaitForUpdatesExRequestType)(nil)).Elem() +} + +type WaitForUpdatesExResponse struct { + Returnval *UpdateSet `xml:"returnval,omitempty"` +} + +type WaitForUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["WaitForUpdatesRequestType"] = reflect.TypeOf((*WaitForUpdatesRequestType)(nil)).Elem() +} + +type WaitForUpdatesResponse struct { + Returnval UpdateSet `xml:"returnval"` +} + +type WaitOptions struct { + DynamicData + + MaxWaitSeconds int `xml:"maxWaitSeconds,omitempty"` + MaxObjectUpdates int `xml:"maxObjectUpdates,omitempty"` +} + +func init() { + t["WaitOptions"] = reflect.TypeOf((*WaitOptions)(nil)).Elem() +} + +type WakeOnLanNotSupported struct { + VirtualHardwareCompatibilityIssue +} + +func init() { + t["WakeOnLanNotSupported"] = reflect.TypeOf((*WakeOnLanNotSupported)(nil)).Elem() +} + +type WakeOnLanNotSupportedByVmotionNIC struct { + HostPowerOpFailed +} + +func init() { + t["WakeOnLanNotSupportedByVmotionNIC"] = reflect.TypeOf((*WakeOnLanNotSupportedByVmotionNIC)(nil)).Elem() +} + +type WakeOnLanNotSupportedByVmotionNICFault WakeOnLanNotSupportedByVmotionNIC + +func init() { + t["WakeOnLanNotSupportedByVmotionNICFault"] = reflect.TypeOf((*WakeOnLanNotSupportedByVmotionNICFault)(nil)).Elem() +} + +type WakeOnLanNotSupportedFault WakeOnLanNotSupported + +func init() { + t["WakeOnLanNotSupportedFault"] = reflect.TypeOf((*WakeOnLanNotSupportedFault)(nil)).Elem() +} + +type WarningUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["WarningUpgradeEvent"] = reflect.TypeOf((*WarningUpgradeEvent)(nil)).Elem() +} + +type WeeklyTaskScheduler struct { + DailyTaskScheduler + + Sunday bool `xml:"sunday"` + Monday bool `xml:"monday"` + Tuesday bool `xml:"tuesday"` + Wednesday bool `xml:"wednesday"` + Thursday bool `xml:"thursday"` + Friday bool `xml:"friday"` + Saturday bool `xml:"saturday"` +} + +func init() { + t["WeeklyTaskScheduler"] = reflect.TypeOf((*WeeklyTaskScheduler)(nil)).Elem() +} + +type WillLoseHAProtection struct { + MigrationFault + + Resolution string `xml:"resolution"` +} + +func init() { + t["WillLoseHAProtection"] = reflect.TypeOf((*WillLoseHAProtection)(nil)).Elem() +} + +type WillLoseHAProtectionFault WillLoseHAProtection + +func init() { + t["WillLoseHAProtectionFault"] = reflect.TypeOf((*WillLoseHAProtectionFault)(nil)).Elem() +} + +type WillModifyConfigCpuRequirements struct { + MigrationFault +} + +func init() { + t["WillModifyConfigCpuRequirements"] = reflect.TypeOf((*WillModifyConfigCpuRequirements)(nil)).Elem() +} + +type WillModifyConfigCpuRequirementsFault WillModifyConfigCpuRequirements + +func init() { + t["WillModifyConfigCpuRequirementsFault"] = reflect.TypeOf((*WillModifyConfigCpuRequirementsFault)(nil)).Elem() +} + +type WillResetSnapshotDirectory struct { + MigrationFault +} + +func init() { + t["WillResetSnapshotDirectory"] = reflect.TypeOf((*WillResetSnapshotDirectory)(nil)).Elem() +} + +type WillResetSnapshotDirectoryFault WillResetSnapshotDirectory + +func init() { + t["WillResetSnapshotDirectoryFault"] = reflect.TypeOf((*WillResetSnapshotDirectoryFault)(nil)).Elem() +} + +type WinNetBIOSConfigInfo struct { + NetBIOSConfigInfo + + PrimaryWINS string `xml:"primaryWINS"` + SecondaryWINS string `xml:"secondaryWINS,omitempty"` +} + +func init() { + t["WinNetBIOSConfigInfo"] = reflect.TypeOf((*WinNetBIOSConfigInfo)(nil)).Elem() +} + +type WipeDiskFault struct { + VimFault +} + +func init() { + t["WipeDiskFault"] = reflect.TypeOf((*WipeDiskFault)(nil)).Elem() +} + +type WipeDiskFaultFault WipeDiskFault + +func init() { + t["WipeDiskFaultFault"] = reflect.TypeOf((*WipeDiskFaultFault)(nil)).Elem() +} + +type XmlToCustomizationSpecItem XmlToCustomizationSpecItemRequestType + +func init() { + t["XmlToCustomizationSpecItem"] = reflect.TypeOf((*XmlToCustomizationSpecItem)(nil)).Elem() +} + +type XmlToCustomizationSpecItemRequestType struct { + This ManagedObjectReference `xml:"_this"` + SpecItemXml string `xml:"specItemXml"` +} + +func init() { + t["XmlToCustomizationSpecItemRequestType"] = reflect.TypeOf((*XmlToCustomizationSpecItemRequestType)(nil)).Elem() +} + +type XmlToCustomizationSpecItemResponse struct { + Returnval CustomizationSpecItem `xml:"returnval"` +} + +type ZeroFillVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["ZeroFillVirtualDiskRequestType"] = reflect.TypeOf((*ZeroFillVirtualDiskRequestType)(nil)).Elem() +} + +type ZeroFillVirtualDisk_Task ZeroFillVirtualDiskRequestType + +func init() { + t["ZeroFillVirtualDisk_Task"] = reflect.TypeOf((*ZeroFillVirtualDisk_Task)(nil)).Elem() +} + +type ZeroFillVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/types_test.go b/vendor/github.com/vmware/govmomi/vim25/types/types_test.go new file mode 100644 index 000000000..0bb67dc51 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/types_test.go @@ -0,0 +1,92 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "testing" + + "github.com/vmware/govmomi/vim25/xml" +) + +func TestVirtualMachineConfigSpec(t *testing.T) { + spec := VirtualMachineConfigSpec{ + Name: "vm-001", + GuestId: "otherGuest", + Files: &VirtualMachineFileInfo{VmPathName: "[datastore1]"}, + NumCPUs: 1, + MemoryMB: 128, + DeviceChange: []BaseVirtualDeviceConfigSpec{ + &VirtualDeviceConfigSpec{ + Operation: VirtualDeviceConfigSpecOperationAdd, + Device: &VirtualLsiLogicController{VirtualSCSIController{ + SharedBus: VirtualSCSISharingNoSharing, + VirtualController: VirtualController{ + BusNumber: 0, + VirtualDevice: VirtualDevice{ + Key: 1000, + }, + }, + }}, + }, + &VirtualDeviceConfigSpec{ + Operation: VirtualDeviceConfigSpecOperationAdd, + FileOperation: VirtualDeviceConfigSpecFileOperationCreate, + Device: &VirtualDisk{ + VirtualDevice: VirtualDevice{ + Key: 0, + ControllerKey: 1000, + UnitNumber: 0, + Backing: &VirtualDiskFlatVer2BackingInfo{ + DiskMode: string(VirtualDiskModePersistent), + ThinProvisioned: NewBool(true), + VirtualDeviceFileBackingInfo: VirtualDeviceFileBackingInfo{ + FileName: "[datastore1]", + }, + }, + }, + CapacityInKB: 4000000, + }, + }, + &VirtualDeviceConfigSpec{ + Operation: VirtualDeviceConfigSpecOperationAdd, + Device: &VirtualE1000{VirtualEthernetCard{ + VirtualDevice: VirtualDevice{ + Key: 0, + DeviceInfo: &Description{ + Label: "Network Adapter 1", + Summary: "VM Network", + }, + Backing: &VirtualEthernetCardNetworkBackingInfo{ + VirtualDeviceDeviceBackingInfo: VirtualDeviceDeviceBackingInfo{ + DeviceName: "VM Network", + }, + }, + }, + AddressType: string(VirtualEthernetCardMacTypeGenerated), + }}, + }, + }, + ExtraConfig: []BaseOptionValue{ + &OptionValue{Key: "bios.bootOrder", Value: "ethernet0"}, + }, + } + + _, err := xml.MarshalIndent(spec, "", " ") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE b/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/atom_test.go b/vendor/github.com/vmware/govmomi/vim25/xml/atom_test.go new file mode 100644 index 000000000..a71284312 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/example_test.go b/vendor/github.com/vmware/govmomi/vim25/xml/example_test.go new file mode 100644 index 000000000..becedd583 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/extras.go b/vendor/github.com/vmware/govmomi/vim25/xml/extras.go new file mode 100644 index 000000000..9f1a764d5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/extras.go @@ -0,0 +1,93 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xml + +import ( + "reflect" + "time" +) + +var xmlSchemaInstance = Name{Space: "http://www.w3.org/2001/XMLSchema-instance", Local: "type"} + +var stringToTypeMap = map[string]reflect.Type{ + "xsd:boolean": reflect.TypeOf((*bool)(nil)).Elem(), + "xsd:byte": reflect.TypeOf((*int8)(nil)).Elem(), + "xsd:short": reflect.TypeOf((*int16)(nil)).Elem(), + "xsd:int": reflect.TypeOf((*int32)(nil)).Elem(), + "xsd:long": reflect.TypeOf((*int64)(nil)).Elem(), + "xsd:unsignedByte": reflect.TypeOf((*uint8)(nil)).Elem(), + "xsd:unsignedShort": reflect.TypeOf((*uint16)(nil)).Elem(), + "xsd:unsignedInt": reflect.TypeOf((*uint32)(nil)).Elem(), + "xsd:unsignedLong": reflect.TypeOf((*uint64)(nil)).Elem(), + "xsd:float": reflect.TypeOf((*float32)(nil)).Elem(), + "xsd:double": reflect.TypeOf((*float64)(nil)).Elem(), + "xsd:string": reflect.TypeOf((*string)(nil)).Elem(), + "xsd:dateTime": reflect.TypeOf((*time.Time)(nil)).Elem(), + "xsd:base64Binary": reflect.TypeOf((*[]byte)(nil)).Elem(), +} + +// Return a reflect.Type for the specified type. Nil if unknown. +func stringToType(s string) reflect.Type { + return stringToTypeMap[s] +} + +// Return a string for the specified reflect.Type. Panic if unknown. +func typeToString(typ reflect.Type) string { + switch typ.Kind() { + case reflect.Bool: + return "xsd:boolean" + case reflect.Int8: + return "xsd:byte" + case reflect.Int16: + return "xsd:short" + case reflect.Int32: + return "xsd:int" + case reflect.Int, reflect.Int64: + return "xsd:long" + case reflect.Uint8: + return "xsd:unsignedByte" + case reflect.Uint16: + return "xsd:unsignedShort" + case reflect.Uint32: + return "xsd:unsignedInt" + case reflect.Uint, reflect.Uint64: + return "xsd:unsignedLong" + case reflect.Float32: + return "xsd:float" + case reflect.Float64: + return "xsd:double" + case reflect.String: + return "xsd:string" + case reflect.Struct: + if typ == stringToTypeMap["xsd:dateTime"] { + return "xsd:dateTime" + } + + // Expect any other struct to be handled... + return typ.Name() + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return "xsd:base64Binary" + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return "xsd:base64Binary" + } + } + + panic("don't know what to do for type: " + typ.String()) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/extras_test.go b/vendor/github.com/vmware/govmomi/vim25/xml/extras_test.go new file mode 100644 index 000000000..8e200578a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/extras_test.go @@ -0,0 +1,222 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xml + +import ( + "bytes" + "reflect" + "testing" + "time" +) + +type MyType struct { + Value string +} + +var myTypes = map[string]reflect.Type{ + "MyType": reflect.TypeOf(MyType{}), + "ValueType": reflect.TypeOf(ValueType{}), + "PointerType": reflect.TypeOf(PointerType{}), +} + +func MyTypes(name string) (reflect.Type, bool) { + t, ok := myTypes[name] + return t, ok +} + +func TestMarshalWithEmptyInterface(t *testing.T) { + var r1, r2 struct { + XMLName Name `xml:"root"` + Values []interface{} `xml:"value,typeattr"` + } + + var tests = []struct { + Value interface{} + }{ + {Value: bool(true)}, + {Value: int8(-8)}, + {Value: int16(-16)}, + {Value: int32(-32)}, + {Value: int64(-64)}, + {Value: uint8(8)}, + {Value: uint16(16)}, + {Value: uint32(32)}, + {Value: uint64(64)}, + {Value: float32(32.0)}, + {Value: float64(64.0)}, + {Value: string("string")}, + {Value: time.Now()}, + {Value: ParseTime("2009-10-04T01:35:58+00:00")}, + {Value: []byte("bytes")}, + {Value: MyType{Value: "v"}}, + } + + for _, test := range tests { + r1.XMLName.Local = "root" + r1.Values = []interface{}{test.Value} + r2.XMLName = Name{} + r2.Values = nil + + b, err := Marshal(r1) + if err != nil { + t.Fatalf("Marshal: %s", err) + } + + dec := NewDecoder(bytes.NewReader(b)) + dec.TypeFunc = MyTypes + err = dec.Decode(&r2) + if err != nil { + t.Fatalf("Unmarshal: %s", err) + } + + switch r1.Values[0].(type) { + case time.Time: + if !r1.Values[0].(time.Time).Equal(r2.Values[0].(time.Time)) { + t.Errorf("Expected: %#v, actual: %#v", r1, r2) + } + default: + if !reflect.DeepEqual(r1, r2) { + t.Errorf("Expected: %#v, actual: %#v", r1, r2) + } + } + } +} + +type VIntf interface { + V() string +} + +type ValueType struct { + Value string `xml:",chardata"` +} + +type PointerType struct { + Value string `xml:",chardata"` +} + +func (t ValueType) V() string { + return t.Value +} + +func (t *PointerType) V() string { + return t.Value +} + +func TestMarshalWithInterface(t *testing.T) { + var r1, r2 struct { + XMLName Name `xml:"root"` + Values []VIntf `xml:"value,typeattr"` + } + + r1.XMLName.Local = "root" + r1.Values = []VIntf{ + ValueType{"v1"}, + &PointerType{"v2"}, + } + + b, err := Marshal(r1) + if err != nil { + t.Fatalf("Marshal: %s", err) + } + + dec := NewDecoder(bytes.NewReader(b)) + dec.TypeFunc = MyTypes + err = dec.Decode(&r2) + if err != nil { + t.Fatalf("Unmarshal: %s", err) + } + + if !reflect.DeepEqual(r1, r2) { + t.Errorf("expected: %#v, actual: %#v", r1, r2) + } +} + +type test3iface interface { + Value() string +} + +type test3a struct { + V string `xml:",chardata"` +} + +func (t test3a) Value() string { return t.V } + +type test3b struct { + V string `xml:",chardata"` +} + +func (t test3b) Value() string { return t.V } + +func TestUnmarshalInterfaceWithoutTypeAttr(t *testing.T) { + var r struct { + XMLName Name `xml:"root"` + Values []test3iface `xml:"value,typeattr"` + } + + b := ` + + A + B + + ` + + fn := func(name string) (reflect.Type, bool) { + switch name { + case "test3a": + return reflect.TypeOf(test3a{}), true + case "test3iface": + return reflect.TypeOf(test3b{}), true + default: + return nil, false + } + } + + dec := NewDecoder(bytes.NewReader([]byte(b))) + dec.TypeFunc = fn + err := dec.Decode(&r) + if err != nil { + t.Fatalf("Unmarshal: %s", err) + } + + if len(r.Values) != 2 { + t.Errorf("Expected 2 values") + } + + exps := []struct { + Typ reflect.Type + Val string + }{ + { + Typ: reflect.TypeOf(test3a{}), + Val: "A", + }, + { + Typ: reflect.TypeOf(test3b{}), + Val: "B", + }, + } + + for i, e := range exps { + if val := r.Values[i].Value(); val != e.Val { + t.Errorf("Expected: %s, got: %s", e.Val, val) + } + + if typ := reflect.TypeOf(r.Values[i]); typ.Name() != e.Typ.Name() { + t.Errorf("Expected: %s, got: %s", e.Typ.Name(), typ.Name()) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go b/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go new file mode 100644 index 000000000..39bbac1d1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go @@ -0,0 +1,949 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + endComment = []byte("-->") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a larger operation +// such as Encode or EncodeElement (or a custom Marshaler's MarshalXML invoked +// during those), and those will call Flush when finished. +// Callers that create an Encoder and then invoke EncodeToken directly, without +// using Encode or EncodeElement, need to call Flush when finished to ensure +// that the XML is written to the underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only as the first token +// in the stream. +func (enc *Encoder) EncodeToken(t Token) error { + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + EscapeText(p, t) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if bytes.Contains(t, endDirective) { + return fmt.Errorf("xml: EncodeToken of Directive containing > marker") + } + p.WriteString("") + } + return p.cachedWriteError() +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []string + tags []Name +} + +// createAttrPrefix finds the name space prefix attribute to use for the given name space, +// defining a new prefix if necessary. It returns the prefix. +func (p *printer) createAttrPrefix(url string) string { + if prefix := p.attrPrefix[url]; prefix != "" { + return prefix + } + + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + + // Need to define a new name space. + if p.attrPrefix == nil { + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url + + p.WriteString(`xmlns:`) + p.WriteString(prefix) + p.WriteString(`="`) + EscapeText(p, []byte(url)) + p.WriteString(`" `) + + p.prefixes = append(p.prefixes, prefix) + + return prefix +} + +// deleteAttrPrefix removes an attribute name space prefix. +func (p *printer) deleteAttrPrefix(prefix string) { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) +} + +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, "") +} + +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix == "" { + break + } + p.deleteAttrPrefix(prefix) + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + } + if start.Name.Local == "" && finfo != nil { + start.Name.Space, start.Name.Local = finfo.xmlns, finfo.name + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // Add type attribute if necessary + if finfo != nil && finfo.flags&fTypeAttr != 0 { + start.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)}) + } + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + continue + } + + if fv.Kind() == reflect.Interface && fv.IsNil() { + continue + } + + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + continue + } + + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + continue + } + } + + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + continue + } + + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + continue + } + } + + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + continue + } + fv = fv.Elem() + } + + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return err + } + if b != nil { + s = string(b) + } + start.Attr = append(start.Attr, Attr{name, s}) + } + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + + // Add type attribute if necessary + if finfo != nil && finfo.flags&fTypeAttr != 0 { + start.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)}) + } + + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + + p.writeIndent(1) + p.WriteByte('<') + p.WriteString(start.Name.Local) + + if start.Name.Space != "" { + p.WriteString(` xmlns="`) + p.EscapeString(start.Name.Space) + p.WriteByte('"') + } + + // Attributes + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" { + continue + } + p.WriteByte(' ') + if name.Space != "" { + p.WriteString(p.createAttrPrefix(name.Space)) + p.WriteByte(':') + } + p.WriteString(name.Local) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.WriteString(name.Local) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.trim(finfo.parents); err != nil { + return err + } + if len(finfo.parents) > len(s.stack) { + if vf.Kind() != reflect.Ptr && vf.Kind() != reflect.Interface || !vf.IsNil() { + if err := s.push(finfo.parents[len(s.stack):]); err != nil { + return err + } + } + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + s.trim(nil) + return p.cachedWriteError() +} + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + stack []string +} + +// trim updates the XML context to match the longest common prefix of the stack +// and the given parents. A closing tag will be written for every parent +// popped. Passing a zero slice or nil will close all the elements. +func (s *parentStack) trim(parents []string) error { + split := 0 + for ; split < len(parents) && split < len(s.stack); split++ { + if parents[split] != s.stack[split] { + break + } + } + for i := len(s.stack) - 1; i >= split; i-- { + if err := s.p.writeEnd(Name{Local: s.stack[i]}); err != nil { + return err + } + } + s.stack = parents[:split] + return nil +} + +// push adds parent elements to the stack and writes open tags. +func (s *parentStack) push(parents []string) error { + for i := 0; i < len(parents); i++ { + if err := s.p.writeStart(&StartElement{Name: Name{Local: parents[i]}}); err != nil { + return err + } + } + s.stack = append(s.stack, parents...) + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/marshal_test.go b/vendor/github.com/vmware/govmomi/vim25/xml/marshal_test.go new file mode 100644 index 000000000..14f73a75d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/marshal_test.go @@ -0,0 +1,1266 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct { +} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: Error: %s", idx, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +func BenchmarkMarshal(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + tok Token + want string + ok bool +}{ + {StartElement{Name{"space", "local"}, nil}, "", true}, + {StartElement{Name{"space", ""}, nil}, "", false}, + {EndElement{Name{"space", ""}}, "", false}, + {CharData("foo"), "foo", true}, + {Comment("foo"), "", true}, + {Comment("foo-->"), "", false}, + {ProcInst{"Target", []byte("Instruction")}, "", true}, + {ProcInst{"", []byte("Instruction")}, "", false}, + {ProcInst{"Target", []byte("Instruction?>")}, "", false}, + {Directive("foo"), "", true}, + {Directive("foo>"), "", false}, +} + +func TestEncodeToken(t *testing.T) { + for _, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeToken(tt.tok) + switch { + case !tt.ok && err == nil: + t.Errorf("enc.EncodeToken(%#v): expected error; got none", tt.tok) + case tt.ok && err != nil: + t.Fatalf("enc.EncodeToken: %v", err) + case !tt.ok && err != nil: + // expected error, got one + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if got := buf.String(); got != tt.want { + t.Errorf("enc.EncodeToken = %s; want: %s", got, tt.want) + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/read.go b/vendor/github.com/vmware/govmomi/vim25/xml/read.go new file mode 100644 index 000000000..12a0ce270 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/read.go @@ -0,0 +1,769 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Find reflect.Type for an element's type attribute. +func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type { + t := "" + for _, a := range start.Attr { + if a.Name == xmlSchemaInstance { + t = a.Value + break + } + } + + if t == "" { + // No type attribute; fall back to looking up type by interface name. + t = val.Type().Name() + } + + // Maybe the type is a basic xsd:* type. + typ := stringToType(t) + if typ != nil { + return typ + } + + // Maybe the type is a custom type. + if p.TypeFunc != nil { + if typ, ok := p.TypeFunc(t); ok { + return typ + } + } + + return nil +} + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Try to figure out type for empty interface values. + if val.Kind() == reflect.Interface && val.IsNil() { + typ := p.typeForElement(val, start) + if typ != nil { + pval := reflect.New(typ).Elem() + err := p.unmarshal(pval, start) + if err != nil { + return err + } + + for i := 0; i < 2; i++ { + if typ.Implements(val.Type()) { + val.Set(pval) + return nil + } + + typ = reflect.PtrTo(typ) + pval = pval.Addr() + } + + val.Set(pval) + return nil + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + var utmp uint64 + if len(src) > 0 && src[0] == '-' { + // Negative value for unsigned field. + // Assume it was serialized following two's complement. + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + // Reinterpret value based on type width. + switch dst.Type().Bits() { + case 8: + utmp = uint64(uint8(itmp)) + case 16: + utmp = uint64(uint16(itmp)) + case 32: + utmp = uint64(uint32(itmp)) + case 64: + utmp = uint64(uint64(itmp)) + } + } else { + utmp, err = strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/read_test.go b/vendor/github.com/vmware/govmomi/vim25/xml/read_test.go new file mode 100644 index 000000000..77f43023e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/read_test.go @@ -0,0 +1,762 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `hello
` + + `world
` + + `
`, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
` + + `hello
` + + `
`, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
` + + `
`, + tab: Tables{}, + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
world
` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://code.google.com/p/go/issues/detail?id=6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} + +// https://github.com/vmware/govmomi/issues/246 +func TestNegativeValuesUnsignedFields(t *testing.T) { + type T struct { + I string + O interface{} + U8 uint8 `xml:"u8"` + U16 uint16 `xml:"u16"` + U32 uint32 `xml:"u32"` + U64 uint64 `xml:"u64"` + } + + var tests = []T{ + {I: "-128", O: uint8(0x80)}, + {I: "-1", O: uint8(0xff)}, + {I: "-32768", O: uint16(0x8000)}, + {I: "-1", O: uint16(0xffff)}, + {I: "-2147483648", O: uint32(0x80000000)}, + {I: "-1", O: uint32(0xffffffff)}, + {I: "-9223372036854775808", O: uint64(0x8000000000000000)}, + {I: "-1", O: uint64(0xffffffffffffffff)}, + } + + for _, test := range tests { + err := Unmarshal([]byte(test.I), &test) + if err != nil { + t.Errorf("Unmarshal error: %v", err) + continue + } + + var expected = test.O + var actual interface{} + switch reflect.ValueOf(test.O).Type().Kind() { + case reflect.Uint8: + actual = test.U8 + case reflect.Uint16: + actual = test.U16 + case reflect.Uint32: + actual = test.U32 + case reflect.Uint64: + actual = test.U64 + } + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("Actual: %v, expected: %v", actual, expected) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go b/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go new file mode 100644 index 000000000..086e83b69 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go @@ -0,0 +1,366 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + fTypeAttr + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + case "typeattr": + finfo.flags |= fTypeAttr + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/xml.go b/vendor/github.com/vmware/govmomi/vim25/xml/xml.go new file mode 100644 index 000000000..6c6c5c821 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/xml.go @@ -0,0 +1,1939 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated +// with a name space identifier (Space). +// In tokens returned by Decoder.Token, the Space identifier +// is given as a canonical URL, not the short prefix used +// in the document being parsed. +type Name struct { + Space, Local string +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + // TypeFunc is used to map type names to actual types. + TypeFunc func(string) (reflect.Type, bool) + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
+// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + enc := procInstEncoding(string(data)) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&ãªã¾ãˆ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&ãªã¾ãˆ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, rawTokens []Token) { + for i, want := range rawTokens { + have, err := d.RawToken() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

", "p", "nowrap"}, + {"

", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

Foo

\n\n

Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input, expect string +}{ + {`version="1.0" encoding="utf-8"`, "utf-8"}, + {`version="1.0" encoding='utf-8'`, "utf-8"}, + {`version="1.0" encoding='utf-8' `, "utf-8"}, + {`version="1.0" encoding=utf-8`, ""}, + {`encoding="FOO" `, "FOO"}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + got := procInstEncoding(test.input) + if got != test.expect { + t.Errorf("procInstEncoding(%q) = %q; want %q", test.input, got, test.expect) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/APIDiscoveryService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/APIDiscoveryService.go new file mode 100644 index 000000000..0802f3351 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/APIDiscoveryService.go @@ -0,0 +1,96 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" +) + +type ListApisParams struct { + p map[string]interface{} +} + +func (p *ListApisParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *ListApisParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new ListApisParams instance, +// as then you are sure you have configured all required params +func (s *APIDiscoveryService) NewListApisParams() *ListApisParams { + p := &ListApisParams{} + p.p = make(map[string]interface{}) + return p +} + +// lists all available apis on the server, provided by the Api Discovery plugin +func (s *APIDiscoveryService) ListApis(p *ListApisParams) (*ListApisResponse, error) { + resp, err := s.cs.newRequest("listApis", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListApisResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListApisResponse struct { + Count int `json:"count"` + Apis []*Api `json:"api"` +} + +type Api struct { + Description string `json:"description,omitempty"` + Isasync bool `json:"isasync,omitempty"` + Name string `json:"name,omitempty"` + Params []struct { + Description string `json:"description,omitempty"` + Length int `json:"length,omitempty"` + Name string `json:"name,omitempty"` + Related string `json:"related,omitempty"` + Required bool `json:"required,omitempty"` + Since string `json:"since,omitempty"` + Type string `json:"type,omitempty"` + } `json:"params,omitempty"` + Related string `json:"related,omitempty"` + Response []struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + Response []string `json:"response,omitempty"` + Type string `json:"type,omitempty"` + } `json:"response,omitempty"` + Since string `json:"since,omitempty"` + Type string `json:"type,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/AccountService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AccountService.go new file mode 100644 index 000000000..3c55ac0a5 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AccountService.go @@ -0,0 +1,1833 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateAccountParams struct { + p map[string]interface{} +} + +func (p *CreateAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["accountdetails"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("accountdetails[%d].key", i), k) + u.Set(fmt.Sprintf("accountdetails[%d].value", i), vv) + i++ + } + } + if v, found := p.p["accountid"]; found { + u.Set("accountid", v.(string)) + } + if v, found := p.p["accounttype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("accounttype", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["email"]; found { + u.Set("email", v.(string)) + } + if v, found := p.p["firstname"]; found { + u.Set("firstname", v.(string)) + } + if v, found := p.p["lastname"]; found { + u.Set("lastname", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["timezone"]; found { + u.Set("timezone", v.(string)) + } + if v, found := p.p["userid"]; found { + u.Set("userid", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *CreateAccountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateAccountParams) SetAccountdetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountdetails"] = v + return +} + +func (p *CreateAccountParams) SetAccountid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountid"] = v + return +} + +func (p *CreateAccountParams) SetAccounttype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accounttype"] = v + return +} + +func (p *CreateAccountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateAccountParams) SetEmail(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["email"] = v + return +} + +func (p *CreateAccountParams) SetFirstname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["firstname"] = v + return +} + +func (p *CreateAccountParams) SetLastname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lastname"] = v + return +} + +func (p *CreateAccountParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +func (p *CreateAccountParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *CreateAccountParams) SetTimezone(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["timezone"] = v + return +} + +func (p *CreateAccountParams) SetUserid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userid"] = v + return +} + +func (p *CreateAccountParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new CreateAccountParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewCreateAccountParams(accounttype int, email string, firstname string, lastname string, password string, username string) *CreateAccountParams { + p := &CreateAccountParams{} + p.p = make(map[string]interface{}) + p.p["accounttype"] = accounttype + p.p["email"] = email + p.p["firstname"] = firstname + p.p["lastname"] = lastname + p.p["password"] = password + p.p["username"] = username + return p +} + +// Creates an account +func (s *AccountService) CreateAccount(p *CreateAccountParams) (*CreateAccountResponse, error) { + resp, err := s.cs.newRequest("createAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateAccountResponse struct { + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type DeleteAccountParams struct { + p map[string]interface{} +} + +func (p *DeleteAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteAccountParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteAccountParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewDeleteAccountParams(id string) *DeleteAccountParams { + p := &DeleteAccountParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a account, and all users associated with this account +func (s *AccountService) DeleteAccount(p *DeleteAccountParams) (*DeleteAccountResponse, error) { + resp, err := s.cs.newRequest("deleteAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteAccountResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type UpdateAccountParams struct { + p map[string]interface{} +} + +func (p *UpdateAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["accountdetails"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("accountdetails[%d].key", i), k) + u.Set(fmt.Sprintf("accountdetails[%d].value", i), vv) + i++ + } + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + if v, found := p.p["newname"]; found { + u.Set("newname", v.(string)) + } + return u +} + +func (p *UpdateAccountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UpdateAccountParams) SetAccountdetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountdetails"] = v + return +} + +func (p *UpdateAccountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *UpdateAccountParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateAccountParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +func (p *UpdateAccountParams) SetNewname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["newname"] = v + return +} + +// You should always use this function to get a new UpdateAccountParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewUpdateAccountParams(newname string) *UpdateAccountParams { + p := &UpdateAccountParams{} + p.p = make(map[string]interface{}) + p.p["newname"] = newname + return p +} + +// Updates account information for the authenticated user +func (s *AccountService) UpdateAccount(p *UpdateAccountParams) (*UpdateAccountResponse, error) { + resp, err := s.cs.newRequest("updateAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateAccountResponse struct { + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type DisableAccountParams struct { + p map[string]interface{} +} + +func (p *DisableAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["lock"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("lock", vv) + } + return u +} + +func (p *DisableAccountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DisableAccountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DisableAccountParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DisableAccountParams) SetLock(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lock"] = v + return +} + +// You should always use this function to get a new DisableAccountParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewDisableAccountParams(lock bool) *DisableAccountParams { + p := &DisableAccountParams{} + p.p = make(map[string]interface{}) + p.p["lock"] = lock + return p +} + +// Disables an account +func (s *AccountService) DisableAccount(p *DisableAccountParams) (*DisableAccountResponse, error) { + resp, err := s.cs.newRequest("disableAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DisableAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DisableAccountResponse struct { + JobID string `json:"jobid,omitempty"` + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type EnableAccountParams struct { + p map[string]interface{} +} + +func (p *EnableAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *EnableAccountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *EnableAccountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *EnableAccountParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new EnableAccountParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewEnableAccountParams() *EnableAccountParams { + p := &EnableAccountParams{} + p.p = make(map[string]interface{}) + return p +} + +// Enables an account +func (s *AccountService) EnableAccount(p *EnableAccountParams) (*EnableAccountResponse, error) { + resp, err := s.cs.newRequest("enableAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r EnableAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type EnableAccountResponse struct { + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type LockAccountParams struct { + p map[string]interface{} +} + +func (p *LockAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + return u +} + +func (p *LockAccountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *LockAccountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +// You should always use this function to get a new LockAccountParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewLockAccountParams(account string, domainid string) *LockAccountParams { + p := &LockAccountParams{} + p.p = make(map[string]interface{}) + p.p["account"] = account + p.p["domainid"] = domainid + return p +} + +// Locks an account +func (s *AccountService) LockAccount(p *LockAccountParams) (*LockAccountResponse, error) { + resp, err := s.cs.newRequest("lockAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r LockAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type LockAccountResponse struct { + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type ListAccountsParams struct { + p map[string]interface{} +} + +func (p *ListAccountsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accounttype"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("accounttype", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["iscleanuprequired"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("iscleanuprequired", vv) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + return u +} + +func (p *ListAccountsParams) SetAccounttype(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accounttype"] = v + return +} + +func (p *ListAccountsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListAccountsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListAccountsParams) SetIscleanuprequired(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["iscleanuprequired"] = v + return +} + +func (p *ListAccountsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListAccountsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAccountsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListAccountsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListAccountsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAccountsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListAccountsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +// You should always use this function to get a new ListAccountsParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewListAccountsParams() *ListAccountsParams { + p := &ListAccountsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AccountService) GetAccountID(name string) (string, error) { + p := &ListAccountsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListAccounts(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Accounts[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Accounts { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AccountService) GetAccountByName(name string) (*Account, int, error) { + id, err := s.GetAccountID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetAccountByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AccountService) GetAccountByID(id string) (*Account, int, error) { + p := &ListAccountsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListAccounts(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Accounts[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Account UUID: %s!", id) +} + +// Lists accounts and provides detailed account information for listed accounts +func (s *AccountService) ListAccounts(p *ListAccountsParams) (*ListAccountsResponse, error) { + resp, err := s.cs.newRequest("listAccounts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAccountsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAccountsResponse struct { + Count int `json:"count"` + Accounts []*Account `json:"account"` +} + +type Account struct { + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type MarkDefaultZoneForAccountParams struct { + p map[string]interface{} +} + +func (p *MarkDefaultZoneForAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *MarkDefaultZoneForAccountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *MarkDefaultZoneForAccountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *MarkDefaultZoneForAccountParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new MarkDefaultZoneForAccountParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewMarkDefaultZoneForAccountParams(account string, domainid string, zoneid string) *MarkDefaultZoneForAccountParams { + p := &MarkDefaultZoneForAccountParams{} + p.p = make(map[string]interface{}) + p.p["account"] = account + p.p["domainid"] = domainid + p.p["zoneid"] = zoneid + return p +} + +// Marks a default zone for this account +func (s *AccountService) MarkDefaultZoneForAccount(p *MarkDefaultZoneForAccountParams) (*MarkDefaultZoneForAccountResponse, error) { + resp, err := s.cs.newRequest("markDefaultZoneForAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r MarkDefaultZoneForAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type MarkDefaultZoneForAccountResponse struct { + JobID string `json:"jobid,omitempty"` + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type AddAccountToProjectParams struct { + p map[string]interface{} +} + +func (p *AddAccountToProjectParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["email"]; found { + u.Set("email", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *AddAccountToProjectParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *AddAccountToProjectParams) SetEmail(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["email"] = v + return +} + +func (p *AddAccountToProjectParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new AddAccountToProjectParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewAddAccountToProjectParams(projectid string) *AddAccountToProjectParams { + p := &AddAccountToProjectParams{} + p.p = make(map[string]interface{}) + p.p["projectid"] = projectid + return p +} + +// Adds acoount to a project +func (s *AccountService) AddAccountToProject(p *AddAccountToProjectParams) (*AddAccountToProjectResponse, error) { + resp, err := s.cs.newRequest("addAccountToProject", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddAccountToProjectResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddAccountToProjectResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteAccountFromProjectParams struct { + p map[string]interface{} +} + +func (p *DeleteAccountFromProjectParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *DeleteAccountFromProjectParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DeleteAccountFromProjectParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new DeleteAccountFromProjectParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewDeleteAccountFromProjectParams(account string, projectid string) *DeleteAccountFromProjectParams { + p := &DeleteAccountFromProjectParams{} + p.p = make(map[string]interface{}) + p.p["account"] = account + p.p["projectid"] = projectid + return p +} + +// Deletes account from the project +func (s *AccountService) DeleteAccountFromProject(p *DeleteAccountFromProjectParams) (*DeleteAccountFromProjectResponse, error) { + resp, err := s.cs.newRequest("deleteAccountFromProject", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteAccountFromProjectResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteAccountFromProjectResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListProjectAccountsParams struct { + p map[string]interface{} +} + +func (p *ListProjectAccountsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["role"]; found { + u.Set("role", v.(string)) + } + return u +} + +func (p *ListProjectAccountsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListProjectAccountsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListProjectAccountsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListProjectAccountsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListProjectAccountsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListProjectAccountsParams) SetRole(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["role"] = v + return +} + +// You should always use this function to get a new ListProjectAccountsParams instance, +// as then you are sure you have configured all required params +func (s *AccountService) NewListProjectAccountsParams(projectid string) *ListProjectAccountsParams { + p := &ListProjectAccountsParams{} + p.p = make(map[string]interface{}) + p.p["projectid"] = projectid + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AccountService) GetProjectAccountID(keyword string, projectid string) (string, error) { + p := &ListProjectAccountsParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + p.p["projectid"] = projectid + + l, err := s.ListProjectAccounts(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListProjectAccounts(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.ProjectAccounts[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.ProjectAccounts { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// Lists project's accounts +func (s *AccountService) ListProjectAccounts(p *ListProjectAccountsParams) (*ListProjectAccountsResponse, error) { + resp, err := s.cs.newRequest("listProjectAccounts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListProjectAccountsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListProjectAccountsResponse struct { + Count int `json:"count"` + ProjectAccounts []*ProjectAccount `json:"projectaccount"` +} + +type ProjectAccount struct { + Account string `json:"account,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/AddressService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AddressService.go new file mode 100644 index 000000000..fb5c586f7 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AddressService.go @@ -0,0 +1,809 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type AssociateIpAddressParams struct { + p map[string]interface{} +} + +func (p *AssociateIpAddressParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["isportable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isportable", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["regionid"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("regionid", vv) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AssociateIpAddressParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *AssociateIpAddressParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *AssociateIpAddressParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *AssociateIpAddressParams) SetIsportable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isportable"] = v + return +} + +func (p *AssociateIpAddressParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *AssociateIpAddressParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *AssociateIpAddressParams) SetRegionid(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["regionid"] = v + return +} + +func (p *AssociateIpAddressParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +func (p *AssociateIpAddressParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AssociateIpAddressParams instance, +// as then you are sure you have configured all required params +func (s *AddressService) NewAssociateIpAddressParams() *AssociateIpAddressParams { + p := &AssociateIpAddressParams{} + p.p = make(map[string]interface{}) + return p +} + +// Acquires and associates a public IP to an account. +func (s *AddressService) AssociateIpAddress(p *AssociateIpAddressParams) (*AssociateIpAddressResponse, error) { + resp, err := s.cs.newRequest("associateIpAddress", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AssociateIpAddressResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AssociateIpAddressResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Allocated string `json:"allocated,omitempty"` + Associatednetworkid string `json:"associatednetworkid,omitempty"` + Associatednetworkname string `json:"associatednetworkname,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isportable bool `json:"isportable,omitempty"` + Issourcenat bool `json:"issourcenat,omitempty"` + Isstaticnat bool `json:"isstaticnat,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Purpose string `json:"purpose,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vlanid string `json:"vlanid,omitempty"` + Vlanname string `json:"vlanname,omitempty"` + Vmipaddress string `json:"vmipaddress,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DisassociateIpAddressParams struct { + p map[string]interface{} +} + +func (p *DisassociateIpAddressParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DisassociateIpAddressParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DisassociateIpAddressParams instance, +// as then you are sure you have configured all required params +func (s *AddressService) NewDisassociateIpAddressParams(id string) *DisassociateIpAddressParams { + p := &DisassociateIpAddressParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Disassociates an ip address from the account. +func (s *AddressService) DisassociateIpAddress(p *DisassociateIpAddressParams) (*DisassociateIpAddressResponse, error) { + resp, err := s.cs.newRequest("disassociateIpAddress", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DisassociateIpAddressResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DisassociateIpAddressResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListPublicIpAddressesParams struct { + p map[string]interface{} +} + +func (p *ListPublicIpAddressesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["allocatedonly"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("allocatedonly", vv) + } + if v, found := p.p["associatednetworkid"]; found { + u.Set("associatednetworkid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["forloadbalancing"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forloadbalancing", vv) + } + if v, found := p.p["forvirtualnetwork"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvirtualnetwork", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["issourcenat"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("issourcenat", vv) + } + if v, found := p.p["isstaticnat"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isstaticnat", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["vlanid"]; found { + u.Set("vlanid", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListPublicIpAddressesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetAllocatedonly(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocatedonly"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetAssociatednetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["associatednetworkid"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetForloadbalancing(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forloadbalancing"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetForvirtualnetwork(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvirtualnetwork"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetIssourcenat(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["issourcenat"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetIsstaticnat(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isstaticnat"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetVlanid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlanid"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +func (p *ListPublicIpAddressesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListPublicIpAddressesParams instance, +// as then you are sure you have configured all required params +func (s *AddressService) NewListPublicIpAddressesParams() *ListPublicIpAddressesParams { + p := &ListPublicIpAddressesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AddressService) GetPublicIpAddressByID(id string) (*PublicIpAddress, int, error) { + p := &ListPublicIpAddressesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListPublicIpAddresses(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListPublicIpAddresses(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.PublicIpAddresses[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for PublicIpAddress UUID: %s!", id) +} + +// Lists all public ip addresses +func (s *AddressService) ListPublicIpAddresses(p *ListPublicIpAddressesParams) (*ListPublicIpAddressesResponse, error) { + resp, err := s.cs.newRequest("listPublicIpAddresses", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPublicIpAddressesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPublicIpAddressesResponse struct { + Count int `json:"count"` + PublicIpAddresses []*PublicIpAddress `json:"publicipaddress"` +} + +type PublicIpAddress struct { + Account string `json:"account,omitempty"` + Allocated string `json:"allocated,omitempty"` + Associatednetworkid string `json:"associatednetworkid,omitempty"` + Associatednetworkname string `json:"associatednetworkname,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isportable bool `json:"isportable,omitempty"` + Issourcenat bool `json:"issourcenat,omitempty"` + Isstaticnat bool `json:"isstaticnat,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Purpose string `json:"purpose,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vlanid string `json:"vlanid,omitempty"` + Vlanname string `json:"vlanname,omitempty"` + Vmipaddress string `json:"vmipaddress,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateIpAddressParams struct { + p map[string]interface{} +} + +func (p *UpdateIpAddressParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateIpAddressParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateIpAddressParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateIpAddressParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateIpAddressParams instance, +// as then you are sure you have configured all required params +func (s *AddressService) NewUpdateIpAddressParams(id string) *UpdateIpAddressParams { + p := &UpdateIpAddressParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates an ip address +func (s *AddressService) UpdateIpAddress(p *UpdateIpAddressParams) (*UpdateIpAddressResponse, error) { + resp, err := s.cs.newRequest("updateIpAddress", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateIpAddressResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateIpAddressResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Allocated string `json:"allocated,omitempty"` + Associatednetworkid string `json:"associatednetworkid,omitempty"` + Associatednetworkname string `json:"associatednetworkname,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isportable bool `json:"isportable,omitempty"` + Issourcenat bool `json:"issourcenat,omitempty"` + Isstaticnat bool `json:"isstaticnat,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Purpose string `json:"purpose,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vlanid string `json:"vlanid,omitempty"` + Vlanname string `json:"vlanname,omitempty"` + Vmipaddress string `json:"vmipaddress,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/AffinityGroupService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AffinityGroupService.go new file mode 100644 index 000000000..fd3630590 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AffinityGroupService.go @@ -0,0 +1,832 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateAffinityGroupParams struct { + p map[string]interface{} +} + +func (p *CreateAffinityGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *CreateAffinityGroupParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateAffinityGroupParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateAffinityGroupParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateAffinityGroupParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateAffinityGroupParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinityGroupType"] = v + return +} + +// You should always use this function to get a new CreateAffinityGroupParams instance, +// as then you are sure you have configured all required params +func (s *AffinityGroupService) NewCreateAffinityGroupParams(name string, affinityGroupType string) *CreateAffinityGroupParams { + p := &CreateAffinityGroupParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["affinityGroupType"] = affinityGroupType + return p +} + +// Creates an affinity/anti-affinity group +func (s *AffinityGroupService) CreateAffinityGroup(p *CreateAffinityGroupParams) (*CreateAffinityGroupResponse, error) { + resp, err := s.cs.newRequest("createAffinityGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateAffinityGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateAffinityGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` +} + +type DeleteAffinityGroupParams struct { + p map[string]interface{} +} + +func (p *DeleteAffinityGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *DeleteAffinityGroupParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DeleteAffinityGroupParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DeleteAffinityGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DeleteAffinityGroupParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new DeleteAffinityGroupParams instance, +// as then you are sure you have configured all required params +func (s *AffinityGroupService) NewDeleteAffinityGroupParams() *DeleteAffinityGroupParams { + p := &DeleteAffinityGroupParams{} + p.p = make(map[string]interface{}) + return p +} + +// Deletes affinity group +func (s *AffinityGroupService) DeleteAffinityGroup(p *DeleteAffinityGroupParams) (*DeleteAffinityGroupResponse, error) { + resp, err := s.cs.newRequest("deleteAffinityGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteAffinityGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteAffinityGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListAffinityGroupsParams struct { + p map[string]interface{} +} + +func (p *ListAffinityGroupsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *ListAffinityGroupsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListAffinityGroupsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListAffinityGroupsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListAffinityGroupsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListAffinityGroupsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAffinityGroupsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListAffinityGroupsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListAffinityGroupsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAffinityGroupsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListAffinityGroupsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinityGroupType"] = v + return +} + +func (p *ListAffinityGroupsParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new ListAffinityGroupsParams instance, +// as then you are sure you have configured all required params +func (s *AffinityGroupService) NewListAffinityGroupsParams() *ListAffinityGroupsParams { + p := &ListAffinityGroupsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AffinityGroupService) GetAffinityGroupID(name string) (string, error) { + p := &ListAffinityGroupsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListAffinityGroups(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.AffinityGroups[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.AffinityGroups { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AffinityGroupService) GetAffinityGroupByName(name string) (*AffinityGroup, int, error) { + id, err := s.GetAffinityGroupID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetAffinityGroupByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AffinityGroupService) GetAffinityGroupByID(id string) (*AffinityGroup, int, error) { + p := &ListAffinityGroupsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListAffinityGroups(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.AffinityGroups[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for AffinityGroup UUID: %s!", id) +} + +// Lists affinity groups +func (s *AffinityGroupService) ListAffinityGroups(p *ListAffinityGroupsParams) (*ListAffinityGroupsResponse, error) { + resp, err := s.cs.newRequest("listAffinityGroups", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAffinityGroupsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAffinityGroupsResponse struct { + Count int `json:"count"` + AffinityGroups []*AffinityGroup `json:"affinitygroup"` +} + +type AffinityGroup struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` +} + +type UpdateVMAffinityGroupParams struct { + p map[string]interface{} +} + +func (p *UpdateVMAffinityGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["affinitygroupids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("affinitygroupids", vv) + } + if v, found := p.p["affinitygroupnames"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("affinitygroupnames", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateVMAffinityGroupParams) SetAffinitygroupids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupids"] = v + return +} + +func (p *UpdateVMAffinityGroupParams) SetAffinitygroupnames(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupnames"] = v + return +} + +func (p *UpdateVMAffinityGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateVMAffinityGroupParams instance, +// as then you are sure you have configured all required params +func (s *AffinityGroupService) NewUpdateVMAffinityGroupParams(id string) *UpdateVMAffinityGroupParams { + p := &UpdateVMAffinityGroupParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates the affinity/anti-affinity group associations of a virtual machine. The VM has to be stopped and restarted for the new properties to take effect. +func (s *AffinityGroupService) UpdateVMAffinityGroup(p *UpdateVMAffinityGroupParams) (*UpdateVMAffinityGroupResponse, error) { + resp, err := s.cs.newRequest("updateVMAffinityGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVMAffinityGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateVMAffinityGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListAffinityGroupTypesParams struct { + p map[string]interface{} +} + +func (p *ListAffinityGroupTypesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListAffinityGroupTypesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAffinityGroupTypesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAffinityGroupTypesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListAffinityGroupTypesParams instance, +// as then you are sure you have configured all required params +func (s *AffinityGroupService) NewListAffinityGroupTypesParams() *ListAffinityGroupTypesParams { + p := &ListAffinityGroupTypesParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists affinity group types available +func (s *AffinityGroupService) ListAffinityGroupTypes(p *ListAffinityGroupTypesParams) (*ListAffinityGroupTypesResponse, error) { + resp, err := s.cs.newRequest("listAffinityGroupTypes", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAffinityGroupTypesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAffinityGroupTypesResponse struct { + Count int `json:"count"` + AffinityGroupTypes []*AffinityGroupType `json:"affinitygrouptype"` +} + +type AffinityGroupType struct { + Type string `json:"type,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/AlertService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AlertService.go new file mode 100644 index 000000000..2cfd4b291 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AlertService.go @@ -0,0 +1,493 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ListAlertsParams struct { + p map[string]interface{} +} + +func (p *ListAlertsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *ListAlertsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListAlertsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAlertsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListAlertsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAlertsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListAlertsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["alertType"] = v + return +} + +// You should always use this function to get a new ListAlertsParams instance, +// as then you are sure you have configured all required params +func (s *AlertService) NewListAlertsParams() *ListAlertsParams { + p := &ListAlertsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AlertService) GetAlertID(name string) (string, error) { + p := &ListAlertsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListAlerts(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Alerts[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Alerts { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AlertService) GetAlertByName(name string) (*Alert, int, error) { + id, err := s.GetAlertID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetAlertByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AlertService) GetAlertByID(id string) (*Alert, int, error) { + p := &ListAlertsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListAlerts(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Alerts[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Alert UUID: %s!", id) +} + +// Lists all alerts. +func (s *AlertService) ListAlerts(p *ListAlertsParams) (*ListAlertsResponse, error) { + resp, err := s.cs.newRequest("listAlerts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAlertsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAlertsResponse struct { + Count int `json:"count"` + Alerts []*Alert `json:"alert"` +} + +type Alert struct { + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Sent string `json:"sent,omitempty"` + Type int `json:"type,omitempty"` +} + +type ArchiveAlertsParams struct { + p map[string]interface{} +} + +func (p *ArchiveAlertsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enddate"]; found { + u.Set("enddate", v.(string)) + } + if v, found := p.p["ids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("ids", vv) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *ArchiveAlertsParams) SetEnddate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enddate"] = v + return +} + +func (p *ArchiveAlertsParams) SetIds(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ids"] = v + return +} + +func (p *ArchiveAlertsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +func (p *ArchiveAlertsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["alertType"] = v + return +} + +// You should always use this function to get a new ArchiveAlertsParams instance, +// as then you are sure you have configured all required params +func (s *AlertService) NewArchiveAlertsParams() *ArchiveAlertsParams { + p := &ArchiveAlertsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Archive one or more alerts. +func (s *AlertService) ArchiveAlerts(p *ArchiveAlertsParams) (*ArchiveAlertsResponse, error) { + resp, err := s.cs.newRequest("archiveAlerts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ArchiveAlertsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ArchiveAlertsResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type DeleteAlertsParams struct { + p map[string]interface{} +} + +func (p *DeleteAlertsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enddate"]; found { + u.Set("enddate", v.(string)) + } + if v, found := p.p["ids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("ids", vv) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *DeleteAlertsParams) SetEnddate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enddate"] = v + return +} + +func (p *DeleteAlertsParams) SetIds(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ids"] = v + return +} + +func (p *DeleteAlertsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +func (p *DeleteAlertsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["alertType"] = v + return +} + +// You should always use this function to get a new DeleteAlertsParams instance, +// as then you are sure you have configured all required params +func (s *AlertService) NewDeleteAlertsParams() *DeleteAlertsParams { + p := &DeleteAlertsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Delete one or more alerts. +func (s *AlertService) DeleteAlerts(p *DeleteAlertsParams) (*DeleteAlertsResponse, error) { + resp, err := s.cs.newRequest("deleteAlerts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteAlertsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteAlertsResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type GenerateAlertParams struct { + p map[string]interface{} +} + +func (p *GenerateAlertParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["type"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("type", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *GenerateAlertParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *GenerateAlertParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *GenerateAlertParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *GenerateAlertParams) SetType(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["alertType"] = v + return +} + +func (p *GenerateAlertParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new GenerateAlertParams instance, +// as then you are sure you have configured all required params +func (s *AlertService) NewGenerateAlertParams(description string, name string, alertType int) *GenerateAlertParams { + p := &GenerateAlertParams{} + p.p = make(map[string]interface{}) + p.p["description"] = description + p.p["name"] = name + p.p["alertType"] = alertType + return p +} + +// Generates an alert +func (s *AlertService) GenerateAlert(p *GenerateAlertParams) (*GenerateAlertResponse, error) { + resp, err := s.cs.newRequest("generateAlert", p.toURLValues()) + if err != nil { + return nil, err + } + + var r GenerateAlertResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type GenerateAlertResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/AsyncjobService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AsyncjobService.go new file mode 100644 index 000000000..ca21e7f04 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AsyncjobService.go @@ -0,0 +1,239 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" + "time" +) + +type QueryAsyncJobResultParams struct { + p map[string]interface{} +} + +func (p *QueryAsyncJobResultParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["jobid"]; found { + u.Set("jobid", v.(string)) + } + return u +} + +func (p *QueryAsyncJobResultParams) SetJobid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["jobid"] = v + return +} + +// You should always use this function to get a new QueryAsyncJobResultParams instance, +// as then you are sure you have configured all required params +func (s *AsyncjobService) NewQueryAsyncJobResultParams(jobid string) *QueryAsyncJobResultParams { + p := &QueryAsyncJobResultParams{} + p.p = make(map[string]interface{}) + p.p["jobid"] = jobid + return p +} + +// Retrieves the current status of asynchronous job. +func (s *AsyncjobService) QueryAsyncJobResult(p *QueryAsyncJobResultParams) (*QueryAsyncJobResultResponse, error) { + var resp json.RawMessage + var err error + + // We should be able to retry on failure as this call is idempotent + for i := 0; i < 3; i++ { + resp, err = s.cs.newRequest("queryAsyncJobResult", p.toURLValues()) + if err == nil { + break + } + time.Sleep(500 * time.Millisecond) + } + if err != nil { + return nil, err + } + + var r QueryAsyncJobResultResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type QueryAsyncJobResultResponse struct { + Accountid string `json:"accountid,omitempty"` + Cmd string `json:"cmd,omitempty"` + Created string `json:"created,omitempty"` + Jobinstanceid string `json:"jobinstanceid,omitempty"` + Jobinstancetype string `json:"jobinstancetype,omitempty"` + Jobprocstatus int `json:"jobprocstatus,omitempty"` + Jobresult json.RawMessage `json:"jobresult,omitempty"` + Jobresultcode int `json:"jobresultcode,omitempty"` + Jobresulttype string `json:"jobresulttype,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Userid string `json:"userid,omitempty"` +} + +type ListAsyncJobsParams struct { + p map[string]interface{} +} + +func (p *ListAsyncJobsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + return u +} + +func (p *ListAsyncJobsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListAsyncJobsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListAsyncJobsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListAsyncJobsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAsyncJobsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListAsyncJobsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAsyncJobsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListAsyncJobsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +// You should always use this function to get a new ListAsyncJobsParams instance, +// as then you are sure you have configured all required params +func (s *AsyncjobService) NewListAsyncJobsParams() *ListAsyncJobsParams { + p := &ListAsyncJobsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists all pending asynchronous jobs for the account. +func (s *AsyncjobService) ListAsyncJobs(p *ListAsyncJobsParams) (*ListAsyncJobsResponse, error) { + resp, err := s.cs.newRequest("listAsyncJobs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAsyncJobsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAsyncJobsResponse struct { + Count int `json:"count"` + AsyncJobs []*AsyncJob `json:"asyncjobs"` +} + +type AsyncJob struct { + Accountid string `json:"accountid,omitempty"` + Cmd string `json:"cmd,omitempty"` + Created string `json:"created,omitempty"` + Jobinstanceid string `json:"jobinstanceid,omitempty"` + Jobinstancetype string `json:"jobinstancetype,omitempty"` + Jobprocstatus int `json:"jobprocstatus,omitempty"` + Jobresult json.RawMessage `json:"jobresult,omitempty"` + Jobresultcode int `json:"jobresultcode,omitempty"` + Jobresulttype string `json:"jobresulttype,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Userid string `json:"userid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/AutoScaleService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AutoScaleService.go new file mode 100644 index 000000000..65a1d2e05 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/AutoScaleService.go @@ -0,0 +1,2753 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateCounterParams struct { + p map[string]interface{} +} + +func (p *CreateCounterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["source"]; found { + u.Set("source", v.(string)) + } + if v, found := p.p["value"]; found { + u.Set("value", v.(string)) + } + return u +} + +func (p *CreateCounterParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateCounterParams) SetSource(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["source"] = v + return +} + +func (p *CreateCounterParams) SetValue(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["value"] = v + return +} + +// You should always use this function to get a new CreateCounterParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewCreateCounterParams(name string, source string, value string) *CreateCounterParams { + p := &CreateCounterParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["source"] = source + p.p["value"] = value + return p +} + +// Adds metric counter +func (s *AutoScaleService) CreateCounter(p *CreateCounterParams) (*CreateCounterResponse, error) { + resp, err := s.cs.newRequest("createCounter", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateCounterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateCounterResponse struct { + JobID string `json:"jobid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Source string `json:"source,omitempty"` + Value string `json:"value,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type CreateConditionParams struct { + p map[string]interface{} +} + +func (p *CreateConditionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["counterid"]; found { + u.Set("counterid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["relationaloperator"]; found { + u.Set("relationaloperator", v.(string)) + } + if v, found := p.p["threshold"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("threshold", vv) + } + return u +} + +func (p *CreateConditionParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateConditionParams) SetCounterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["counterid"] = v + return +} + +func (p *CreateConditionParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateConditionParams) SetRelationaloperator(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["relationaloperator"] = v + return +} + +func (p *CreateConditionParams) SetThreshold(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["threshold"] = v + return +} + +// You should always use this function to get a new CreateConditionParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewCreateConditionParams(counterid string, relationaloperator string, threshold int64) *CreateConditionParams { + p := &CreateConditionParams{} + p.p = make(map[string]interface{}) + p.p["counterid"] = counterid + p.p["relationaloperator"] = relationaloperator + p.p["threshold"] = threshold + return p +} + +// Creates a condition +func (s *AutoScaleService) CreateCondition(p *CreateConditionParams) (*CreateConditionResponse, error) { + resp, err := s.cs.newRequest("createCondition", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateConditionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateConditionResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Counter []string `json:"counter,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Relationaloperator string `json:"relationaloperator,omitempty"` + Threshold int64 `json:"threshold,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type CreateAutoScalePolicyParams struct { + p map[string]interface{} +} + +func (p *CreateAutoScalePolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["action"]; found { + u.Set("action", v.(string)) + } + if v, found := p.p["conditionids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("conditionids", vv) + } + if v, found := p.p["duration"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("duration", vv) + } + if v, found := p.p["quiettime"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("quiettime", vv) + } + return u +} + +func (p *CreateAutoScalePolicyParams) SetAction(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["action"] = v + return +} + +func (p *CreateAutoScalePolicyParams) SetConditionids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["conditionids"] = v + return +} + +func (p *CreateAutoScalePolicyParams) SetDuration(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["duration"] = v + return +} + +func (p *CreateAutoScalePolicyParams) SetQuiettime(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["quiettime"] = v + return +} + +// You should always use this function to get a new CreateAutoScalePolicyParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewCreateAutoScalePolicyParams(action string, conditionids []string, duration int) *CreateAutoScalePolicyParams { + p := &CreateAutoScalePolicyParams{} + p.p = make(map[string]interface{}) + p.p["action"] = action + p.p["conditionids"] = conditionids + p.p["duration"] = duration + return p +} + +// Creates an autoscale policy for a provision or deprovision action, the action is taken when the all the conditions evaluates to true for the specified duration. The policy is in effect once it is attached to a autscale vm group. +func (s *AutoScaleService) CreateAutoScalePolicy(p *CreateAutoScalePolicyParams) (*CreateAutoScalePolicyResponse, error) { + resp, err := s.cs.newRequest("createAutoScalePolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateAutoScalePolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateAutoScalePolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Action string `json:"action,omitempty"` + Conditions []string `json:"conditions,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Duration int `json:"duration,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiettime int `json:"quiettime,omitempty"` +} + +type CreateAutoScaleVmProfileParams struct { + p map[string]interface{} +} + +func (p *CreateAutoScaleVmProfileParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["autoscaleuserid"]; found { + u.Set("autoscaleuserid", v.(string)) + } + if v, found := p.p["counterparam"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("counterparam[%d].key", i), k) + u.Set(fmt.Sprintf("counterparam[%d].value", i), vv) + i++ + } + } + if v, found := p.p["destroyvmgraceperiod"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("destroyvmgraceperiod", vv) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["otherdeployparams"]; found { + u.Set("otherdeployparams", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + if v, found := p.p["templateid"]; found { + u.Set("templateid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateAutoScaleVmProfileParams) SetAutoscaleuserid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["autoscaleuserid"] = v + return +} + +func (p *CreateAutoScaleVmProfileParams) SetCounterparam(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["counterparam"] = v + return +} + +func (p *CreateAutoScaleVmProfileParams) SetDestroyvmgraceperiod(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["destroyvmgraceperiod"] = v + return +} + +func (p *CreateAutoScaleVmProfileParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateAutoScaleVmProfileParams) SetOtherdeployparams(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["otherdeployparams"] = v + return +} + +func (p *CreateAutoScaleVmProfileParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +func (p *CreateAutoScaleVmProfileParams) SetTemplateid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templateid"] = v + return +} + +func (p *CreateAutoScaleVmProfileParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateAutoScaleVmProfileParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewCreateAutoScaleVmProfileParams(serviceofferingid string, templateid string, zoneid string) *CreateAutoScaleVmProfileParams { + p := &CreateAutoScaleVmProfileParams{} + p.p = make(map[string]interface{}) + p.p["serviceofferingid"] = serviceofferingid + p.p["templateid"] = templateid + p.p["zoneid"] = zoneid + return p +} + +// Creates a profile that contains information about the virtual machine which will be provisioned automatically by autoscale feature. +func (s *AutoScaleService) CreateAutoScaleVmProfile(p *CreateAutoScaleVmProfileParams) (*CreateAutoScaleVmProfileResponse, error) { + resp, err := s.cs.newRequest("createAutoScaleVmProfile", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateAutoScaleVmProfileResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateAutoScaleVmProfileResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Autoscaleuserid string `json:"autoscaleuserid,omitempty"` + Destroyvmgraceperiod int `json:"destroyvmgraceperiod,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Otherdeployparams string `json:"otherdeployparams,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type CreateAutoScaleVmGroupParams struct { + p map[string]interface{} +} + +func (p *CreateAutoScaleVmGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["interval"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("interval", vv) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + if v, found := p.p["maxmembers"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("maxmembers", vv) + } + if v, found := p.p["minmembers"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("minmembers", vv) + } + if v, found := p.p["scaledownpolicyids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("scaledownpolicyids", vv) + } + if v, found := p.p["scaleuppolicyids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("scaleuppolicyids", vv) + } + if v, found := p.p["vmprofileid"]; found { + u.Set("vmprofileid", v.(string)) + } + return u +} + +func (p *CreateAutoScaleVmGroupParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateAutoScaleVmGroupParams) SetInterval(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["interval"] = v + return +} + +func (p *CreateAutoScaleVmGroupParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +func (p *CreateAutoScaleVmGroupParams) SetMaxmembers(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxmembers"] = v + return +} + +func (p *CreateAutoScaleVmGroupParams) SetMinmembers(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["minmembers"] = v + return +} + +func (p *CreateAutoScaleVmGroupParams) SetScaledownpolicyids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scaledownpolicyids"] = v + return +} + +func (p *CreateAutoScaleVmGroupParams) SetScaleuppolicyids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scaleuppolicyids"] = v + return +} + +func (p *CreateAutoScaleVmGroupParams) SetVmprofileid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmprofileid"] = v + return +} + +// You should always use this function to get a new CreateAutoScaleVmGroupParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewCreateAutoScaleVmGroupParams(lbruleid string, maxmembers int, minmembers int, scaledownpolicyids []string, scaleuppolicyids []string, vmprofileid string) *CreateAutoScaleVmGroupParams { + p := &CreateAutoScaleVmGroupParams{} + p.p = make(map[string]interface{}) + p.p["lbruleid"] = lbruleid + p.p["maxmembers"] = maxmembers + p.p["minmembers"] = minmembers + p.p["scaledownpolicyids"] = scaledownpolicyids + p.p["scaleuppolicyids"] = scaleuppolicyids + p.p["vmprofileid"] = vmprofileid + return p +} + +// Creates and automatically starts a virtual machine based on a service offering, disk offering, and template. +func (s *AutoScaleService) CreateAutoScaleVmGroup(p *CreateAutoScaleVmGroupParams) (*CreateAutoScaleVmGroupResponse, error) { + resp, err := s.cs.newRequest("createAutoScaleVmGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateAutoScaleVmGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateAutoScaleVmGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Interval int `json:"interval,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Maxmembers int `json:"maxmembers,omitempty"` + Minmembers int `json:"minmembers,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Scaledownpolicies []string `json:"scaledownpolicies,omitempty"` + Scaleuppolicies []string `json:"scaleuppolicies,omitempty"` + State string `json:"state,omitempty"` + Vmprofileid string `json:"vmprofileid,omitempty"` +} + +type DeleteCounterParams struct { + p map[string]interface{} +} + +func (p *DeleteCounterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteCounterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteCounterParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewDeleteCounterParams(id string) *DeleteCounterParams { + p := &DeleteCounterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a counter +func (s *AutoScaleService) DeleteCounter(p *DeleteCounterParams) (*DeleteCounterResponse, error) { + resp, err := s.cs.newRequest("deleteCounter", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteCounterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteCounterResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteConditionParams struct { + p map[string]interface{} +} + +func (p *DeleteConditionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteConditionParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteConditionParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewDeleteConditionParams(id string) *DeleteConditionParams { + p := &DeleteConditionParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Removes a condition +func (s *AutoScaleService) DeleteCondition(p *DeleteConditionParams) (*DeleteConditionResponse, error) { + resp, err := s.cs.newRequest("deleteCondition", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteConditionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteConditionResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteAutoScalePolicyParams struct { + p map[string]interface{} +} + +func (p *DeleteAutoScalePolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteAutoScalePolicyParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteAutoScalePolicyParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewDeleteAutoScalePolicyParams(id string) *DeleteAutoScalePolicyParams { + p := &DeleteAutoScalePolicyParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a autoscale policy. +func (s *AutoScaleService) DeleteAutoScalePolicy(p *DeleteAutoScalePolicyParams) (*DeleteAutoScalePolicyResponse, error) { + resp, err := s.cs.newRequest("deleteAutoScalePolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteAutoScalePolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteAutoScalePolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteAutoScaleVmProfileParams struct { + p map[string]interface{} +} + +func (p *DeleteAutoScaleVmProfileParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteAutoScaleVmProfileParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteAutoScaleVmProfileParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewDeleteAutoScaleVmProfileParams(id string) *DeleteAutoScaleVmProfileParams { + p := &DeleteAutoScaleVmProfileParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a autoscale vm profile. +func (s *AutoScaleService) DeleteAutoScaleVmProfile(p *DeleteAutoScaleVmProfileParams) (*DeleteAutoScaleVmProfileResponse, error) { + resp, err := s.cs.newRequest("deleteAutoScaleVmProfile", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteAutoScaleVmProfileResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteAutoScaleVmProfileResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteAutoScaleVmGroupParams struct { + p map[string]interface{} +} + +func (p *DeleteAutoScaleVmGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteAutoScaleVmGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteAutoScaleVmGroupParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewDeleteAutoScaleVmGroupParams(id string) *DeleteAutoScaleVmGroupParams { + p := &DeleteAutoScaleVmGroupParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a autoscale vm group. +func (s *AutoScaleService) DeleteAutoScaleVmGroup(p *DeleteAutoScaleVmGroupParams) (*DeleteAutoScaleVmGroupResponse, error) { + resp, err := s.cs.newRequest("deleteAutoScaleVmGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteAutoScaleVmGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteAutoScaleVmGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListCountersParams struct { + p map[string]interface{} +} + +func (p *ListCountersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["source"]; found { + u.Set("source", v.(string)) + } + return u +} + +func (p *ListCountersParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListCountersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListCountersParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListCountersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListCountersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListCountersParams) SetSource(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["source"] = v + return +} + +// You should always use this function to get a new ListCountersParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewListCountersParams() *ListCountersParams { + p := &ListCountersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AutoScaleService) GetCounterID(name string) (string, error) { + p := &ListCountersParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListCounters(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Counters[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Counters { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AutoScaleService) GetCounterByName(name string) (*Counter, int, error) { + id, err := s.GetCounterID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetCounterByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AutoScaleService) GetCounterByID(id string) (*Counter, int, error) { + p := &ListCountersParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListCounters(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Counters[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Counter UUID: %s!", id) +} + +// List the counters +func (s *AutoScaleService) ListCounters(p *ListCountersParams) (*ListCountersResponse, error) { + resp, err := s.cs.newRequest("listCounters", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListCountersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListCountersResponse struct { + Count int `json:"count"` + Counters []*Counter `json:"counter"` +} + +type Counter struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Source string `json:"source,omitempty"` + Value string `json:"value,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListConditionsParams struct { + p map[string]interface{} +} + +func (p *ListConditionsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["counterid"]; found { + u.Set("counterid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["policyid"]; found { + u.Set("policyid", v.(string)) + } + return u +} + +func (p *ListConditionsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListConditionsParams) SetCounterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["counterid"] = v + return +} + +func (p *ListConditionsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListConditionsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListConditionsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListConditionsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListConditionsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListConditionsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListConditionsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListConditionsParams) SetPolicyid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["policyid"] = v + return +} + +// You should always use this function to get a new ListConditionsParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewListConditionsParams() *ListConditionsParams { + p := &ListConditionsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AutoScaleService) GetConditionByID(id string) (*Condition, int, error) { + p := &ListConditionsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListConditions(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Conditions[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Condition UUID: %s!", id) +} + +// List Conditions for the specific user +func (s *AutoScaleService) ListConditions(p *ListConditionsParams) (*ListConditionsResponse, error) { + resp, err := s.cs.newRequest("listConditions", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListConditionsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListConditionsResponse struct { + Count int `json:"count"` + Conditions []*Condition `json:"condition"` +} + +type Condition struct { + Account string `json:"account,omitempty"` + Counter []string `json:"counter,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Relationaloperator string `json:"relationaloperator,omitempty"` + Threshold int64 `json:"threshold,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListAutoScalePoliciesParams struct { + p map[string]interface{} +} + +func (p *ListAutoScalePoliciesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["action"]; found { + u.Set("action", v.(string)) + } + if v, found := p.p["conditionid"]; found { + u.Set("conditionid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["vmgroupid"]; found { + u.Set("vmgroupid", v.(string)) + } + return u +} + +func (p *ListAutoScalePoliciesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetAction(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["action"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetConditionid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["conditionid"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListAutoScalePoliciesParams) SetVmgroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmgroupid"] = v + return +} + +// You should always use this function to get a new ListAutoScalePoliciesParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewListAutoScalePoliciesParams() *ListAutoScalePoliciesParams { + p := &ListAutoScalePoliciesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AutoScaleService) GetAutoScalePolicyByID(id string) (*AutoScalePolicy, int, error) { + p := &ListAutoScalePoliciesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListAutoScalePolicies(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.AutoScalePolicies[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for AutoScalePolicy UUID: %s!", id) +} + +// Lists autoscale policies. +func (s *AutoScaleService) ListAutoScalePolicies(p *ListAutoScalePoliciesParams) (*ListAutoScalePoliciesResponse, error) { + resp, err := s.cs.newRequest("listAutoScalePolicies", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAutoScalePoliciesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAutoScalePoliciesResponse struct { + Count int `json:"count"` + AutoScalePolicies []*AutoScalePolicy `json:"autoscalepolicy"` +} + +type AutoScalePolicy struct { + Account string `json:"account,omitempty"` + Action string `json:"action,omitempty"` + Conditions []string `json:"conditions,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Duration int `json:"duration,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiettime int `json:"quiettime,omitempty"` +} + +type ListAutoScaleVmProfilesParams struct { + p map[string]interface{} +} + +func (p *ListAutoScaleVmProfilesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["otherdeployparams"]; found { + u.Set("otherdeployparams", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + if v, found := p.p["templateid"]; found { + u.Set("templateid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListAutoScaleVmProfilesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetOtherdeployparams(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["otherdeployparams"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetTemplateid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templateid"] = v + return +} + +func (p *ListAutoScaleVmProfilesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListAutoScaleVmProfilesParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewListAutoScaleVmProfilesParams() *ListAutoScaleVmProfilesParams { + p := &ListAutoScaleVmProfilesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AutoScaleService) GetAutoScaleVmProfileByID(id string) (*AutoScaleVmProfile, int, error) { + p := &ListAutoScaleVmProfilesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListAutoScaleVmProfiles(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListAutoScaleVmProfiles(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.AutoScaleVmProfiles[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for AutoScaleVmProfile UUID: %s!", id) +} + +// Lists autoscale vm profiles. +func (s *AutoScaleService) ListAutoScaleVmProfiles(p *ListAutoScaleVmProfilesParams) (*ListAutoScaleVmProfilesResponse, error) { + resp, err := s.cs.newRequest("listAutoScaleVmProfiles", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAutoScaleVmProfilesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAutoScaleVmProfilesResponse struct { + Count int `json:"count"` + AutoScaleVmProfiles []*AutoScaleVmProfile `json:"autoscalevmprofile"` +} + +type AutoScaleVmProfile struct { + Account string `json:"account,omitempty"` + Autoscaleuserid string `json:"autoscaleuserid,omitempty"` + Destroyvmgraceperiod int `json:"destroyvmgraceperiod,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Otherdeployparams string `json:"otherdeployparams,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListAutoScaleVmGroupsParams struct { + p map[string]interface{} +} + +func (p *ListAutoScaleVmGroupsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["policyid"]; found { + u.Set("policyid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["vmprofileid"]; found { + u.Set("vmprofileid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListAutoScaleVmGroupsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetPolicyid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["policyid"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetVmprofileid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmprofileid"] = v + return +} + +func (p *ListAutoScaleVmGroupsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListAutoScaleVmGroupsParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewListAutoScaleVmGroupsParams() *ListAutoScaleVmGroupsParams { + p := &ListAutoScaleVmGroupsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *AutoScaleService) GetAutoScaleVmGroupByID(id string) (*AutoScaleVmGroup, int, error) { + p := &ListAutoScaleVmGroupsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListAutoScaleVmGroups(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListAutoScaleVmGroups(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.AutoScaleVmGroups[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for AutoScaleVmGroup UUID: %s!", id) +} + +// Lists autoscale vm groups. +func (s *AutoScaleService) ListAutoScaleVmGroups(p *ListAutoScaleVmGroupsParams) (*ListAutoScaleVmGroupsResponse, error) { + resp, err := s.cs.newRequest("listAutoScaleVmGroups", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListAutoScaleVmGroupsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListAutoScaleVmGroupsResponse struct { + Count int `json:"count"` + AutoScaleVmGroups []*AutoScaleVmGroup `json:"autoscalevmgroup"` +} + +type AutoScaleVmGroup struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Interval int `json:"interval,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Maxmembers int `json:"maxmembers,omitempty"` + Minmembers int `json:"minmembers,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Scaledownpolicies []string `json:"scaledownpolicies,omitempty"` + Scaleuppolicies []string `json:"scaleuppolicies,omitempty"` + State string `json:"state,omitempty"` + Vmprofileid string `json:"vmprofileid,omitempty"` +} + +type EnableAutoScaleVmGroupParams struct { + p map[string]interface{} +} + +func (p *EnableAutoScaleVmGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *EnableAutoScaleVmGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new EnableAutoScaleVmGroupParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewEnableAutoScaleVmGroupParams(id string) *EnableAutoScaleVmGroupParams { + p := &EnableAutoScaleVmGroupParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Enables an AutoScale Vm Group +func (s *AutoScaleService) EnableAutoScaleVmGroup(p *EnableAutoScaleVmGroupParams) (*EnableAutoScaleVmGroupResponse, error) { + resp, err := s.cs.newRequest("enableAutoScaleVmGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r EnableAutoScaleVmGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type EnableAutoScaleVmGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Interval int `json:"interval,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Maxmembers int `json:"maxmembers,omitempty"` + Minmembers int `json:"minmembers,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Scaledownpolicies []string `json:"scaledownpolicies,omitempty"` + Scaleuppolicies []string `json:"scaleuppolicies,omitempty"` + State string `json:"state,omitempty"` + Vmprofileid string `json:"vmprofileid,omitempty"` +} + +type DisableAutoScaleVmGroupParams struct { + p map[string]interface{} +} + +func (p *DisableAutoScaleVmGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DisableAutoScaleVmGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DisableAutoScaleVmGroupParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewDisableAutoScaleVmGroupParams(id string) *DisableAutoScaleVmGroupParams { + p := &DisableAutoScaleVmGroupParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Disables an AutoScale Vm Group +func (s *AutoScaleService) DisableAutoScaleVmGroup(p *DisableAutoScaleVmGroupParams) (*DisableAutoScaleVmGroupResponse, error) { + resp, err := s.cs.newRequest("disableAutoScaleVmGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DisableAutoScaleVmGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DisableAutoScaleVmGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Interval int `json:"interval,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Maxmembers int `json:"maxmembers,omitempty"` + Minmembers int `json:"minmembers,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Scaledownpolicies []string `json:"scaledownpolicies,omitempty"` + Scaleuppolicies []string `json:"scaleuppolicies,omitempty"` + State string `json:"state,omitempty"` + Vmprofileid string `json:"vmprofileid,omitempty"` +} + +type UpdateAutoScalePolicyParams struct { + p map[string]interface{} +} + +func (p *UpdateAutoScalePolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["conditionids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("conditionids", vv) + } + if v, found := p.p["duration"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("duration", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["quiettime"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("quiettime", vv) + } + return u +} + +func (p *UpdateAutoScalePolicyParams) SetConditionids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["conditionids"] = v + return +} + +func (p *UpdateAutoScalePolicyParams) SetDuration(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["duration"] = v + return +} + +func (p *UpdateAutoScalePolicyParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateAutoScalePolicyParams) SetQuiettime(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["quiettime"] = v + return +} + +// You should always use this function to get a new UpdateAutoScalePolicyParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewUpdateAutoScalePolicyParams(id string) *UpdateAutoScalePolicyParams { + p := &UpdateAutoScalePolicyParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates an existing autoscale policy. +func (s *AutoScaleService) UpdateAutoScalePolicy(p *UpdateAutoScalePolicyParams) (*UpdateAutoScalePolicyResponse, error) { + resp, err := s.cs.newRequest("updateAutoScalePolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateAutoScalePolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateAutoScalePolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Action string `json:"action,omitempty"` + Conditions []string `json:"conditions,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Duration int `json:"duration,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiettime int `json:"quiettime,omitempty"` +} + +type UpdateAutoScaleVmProfileParams struct { + p map[string]interface{} +} + +func (p *UpdateAutoScaleVmProfileParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["autoscaleuserid"]; found { + u.Set("autoscaleuserid", v.(string)) + } + if v, found := p.p["counterparam"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("counterparam[%d].key", i), k) + u.Set(fmt.Sprintf("counterparam[%d].value", i), vv) + i++ + } + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["destroyvmgraceperiod"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("destroyvmgraceperiod", vv) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["templateid"]; found { + u.Set("templateid", v.(string)) + } + return u +} + +func (p *UpdateAutoScaleVmProfileParams) SetAutoscaleuserid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["autoscaleuserid"] = v + return +} + +func (p *UpdateAutoScaleVmProfileParams) SetCounterparam(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["counterparam"] = v + return +} + +func (p *UpdateAutoScaleVmProfileParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateAutoScaleVmProfileParams) SetDestroyvmgraceperiod(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["destroyvmgraceperiod"] = v + return +} + +func (p *UpdateAutoScaleVmProfileParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateAutoScaleVmProfileParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateAutoScaleVmProfileParams) SetTemplateid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templateid"] = v + return +} + +// You should always use this function to get a new UpdateAutoScaleVmProfileParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewUpdateAutoScaleVmProfileParams(id string) *UpdateAutoScaleVmProfileParams { + p := &UpdateAutoScaleVmProfileParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates an existing autoscale vm profile. +func (s *AutoScaleService) UpdateAutoScaleVmProfile(p *UpdateAutoScaleVmProfileParams) (*UpdateAutoScaleVmProfileResponse, error) { + resp, err := s.cs.newRequest("updateAutoScaleVmProfile", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateAutoScaleVmProfileResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateAutoScaleVmProfileResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Autoscaleuserid string `json:"autoscaleuserid,omitempty"` + Destroyvmgraceperiod int `json:"destroyvmgraceperiod,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Otherdeployparams string `json:"otherdeployparams,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type UpdateAutoScaleVmGroupParams struct { + p map[string]interface{} +} + +func (p *UpdateAutoScaleVmGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["interval"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("interval", vv) + } + if v, found := p.p["maxmembers"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("maxmembers", vv) + } + if v, found := p.p["minmembers"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("minmembers", vv) + } + if v, found := p.p["scaledownpolicyids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("scaledownpolicyids", vv) + } + if v, found := p.p["scaleuppolicyids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("scaleuppolicyids", vv) + } + return u +} + +func (p *UpdateAutoScaleVmGroupParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateAutoScaleVmGroupParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateAutoScaleVmGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateAutoScaleVmGroupParams) SetInterval(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["interval"] = v + return +} + +func (p *UpdateAutoScaleVmGroupParams) SetMaxmembers(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxmembers"] = v + return +} + +func (p *UpdateAutoScaleVmGroupParams) SetMinmembers(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["minmembers"] = v + return +} + +func (p *UpdateAutoScaleVmGroupParams) SetScaledownpolicyids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scaledownpolicyids"] = v + return +} + +func (p *UpdateAutoScaleVmGroupParams) SetScaleuppolicyids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scaleuppolicyids"] = v + return +} + +// You should always use this function to get a new UpdateAutoScaleVmGroupParams instance, +// as then you are sure you have configured all required params +func (s *AutoScaleService) NewUpdateAutoScaleVmGroupParams(id string) *UpdateAutoScaleVmGroupParams { + p := &UpdateAutoScaleVmGroupParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates an existing autoscale vm group. +func (s *AutoScaleService) UpdateAutoScaleVmGroup(p *UpdateAutoScaleVmGroupParams) (*UpdateAutoScaleVmGroupResponse, error) { + resp, err := s.cs.newRequest("updateAutoScaleVmGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateAutoScaleVmGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateAutoScaleVmGroupResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Interval int `json:"interval,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Maxmembers int `json:"maxmembers,omitempty"` + Minmembers int `json:"minmembers,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Scaledownpolicies []string `json:"scaledownpolicies,omitempty"` + Scaleuppolicies []string `json:"scaleuppolicies,omitempty"` + State string `json:"state,omitempty"` + Vmprofileid string `json:"vmprofileid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/BaremetalService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/BaremetalService.go new file mode 100644 index 000000000..a37aa1c60 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/BaremetalService.go @@ -0,0 +1,676 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type AddBaremetalPxeKickStartServerParams struct { + p map[string]interface{} +} + +func (p *AddBaremetalPxeKickStartServerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["pxeservertype"]; found { + u.Set("pxeservertype", v.(string)) + } + if v, found := p.p["tftpdir"]; found { + u.Set("tftpdir", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddBaremetalPxeKickStartServerParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddBaremetalPxeKickStartServerParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddBaremetalPxeKickStartServerParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *AddBaremetalPxeKickStartServerParams) SetPxeservertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pxeservertype"] = v + return +} + +func (p *AddBaremetalPxeKickStartServerParams) SetTftpdir(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tftpdir"] = v + return +} + +func (p *AddBaremetalPxeKickStartServerParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddBaremetalPxeKickStartServerParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddBaremetalPxeKickStartServerParams instance, +// as then you are sure you have configured all required params +func (s *BaremetalService) NewAddBaremetalPxeKickStartServerParams(password string, physicalnetworkid string, pxeservertype string, tftpdir string, url string, username string) *AddBaremetalPxeKickStartServerParams { + p := &AddBaremetalPxeKickStartServerParams{} + p.p = make(map[string]interface{}) + p.p["password"] = password + p.p["physicalnetworkid"] = physicalnetworkid + p.p["pxeservertype"] = pxeservertype + p.p["tftpdir"] = tftpdir + p.p["url"] = url + p.p["username"] = username + return p +} + +// add a baremetal pxe server +func (s *BaremetalService) AddBaremetalPxeKickStartServer(p *AddBaremetalPxeKickStartServerParams) (*AddBaremetalPxeKickStartServerResponse, error) { + resp, err := s.cs.newRequest("addBaremetalPxeKickStartServer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddBaremetalPxeKickStartServerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddBaremetalPxeKickStartServerResponse struct { + JobID string `json:"jobid,omitempty"` + Tftpdir string `json:"tftpdir,omitempty"` +} + +type AddBaremetalPxePingServerParams struct { + p map[string]interface{} +} + +func (p *AddBaremetalPxePingServerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["pingcifspassword"]; found { + u.Set("pingcifspassword", v.(string)) + } + if v, found := p.p["pingcifsusername"]; found { + u.Set("pingcifsusername", v.(string)) + } + if v, found := p.p["pingdir"]; found { + u.Set("pingdir", v.(string)) + } + if v, found := p.p["pingstorageserverip"]; found { + u.Set("pingstorageserverip", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["pxeservertype"]; found { + u.Set("pxeservertype", v.(string)) + } + if v, found := p.p["tftpdir"]; found { + u.Set("tftpdir", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddBaremetalPxePingServerParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetPingcifspassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pingcifspassword"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetPingcifsusername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pingcifsusername"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetPingdir(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pingdir"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetPingstorageserverip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pingstorageserverip"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetPxeservertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pxeservertype"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetTftpdir(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tftpdir"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddBaremetalPxePingServerParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddBaremetalPxePingServerParams instance, +// as then you are sure you have configured all required params +func (s *BaremetalService) NewAddBaremetalPxePingServerParams(password string, physicalnetworkid string, pingdir string, pingstorageserverip string, pxeservertype string, tftpdir string, url string, username string) *AddBaremetalPxePingServerParams { + p := &AddBaremetalPxePingServerParams{} + p.p = make(map[string]interface{}) + p.p["password"] = password + p.p["physicalnetworkid"] = physicalnetworkid + p.p["pingdir"] = pingdir + p.p["pingstorageserverip"] = pingstorageserverip + p.p["pxeservertype"] = pxeservertype + p.p["tftpdir"] = tftpdir + p.p["url"] = url + p.p["username"] = username + return p +} + +// add a baremetal ping pxe server +func (s *BaremetalService) AddBaremetalPxePingServer(p *AddBaremetalPxePingServerParams) (*AddBaremetalPxePingServerResponse, error) { + resp, err := s.cs.newRequest("addBaremetalPxePingServer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddBaremetalPxePingServerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddBaremetalPxePingServerResponse struct { + JobID string `json:"jobid,omitempty"` + Pingdir string `json:"pingdir,omitempty"` + Pingstorageserverip string `json:"pingstorageserverip,omitempty"` + Tftpdir string `json:"tftpdir,omitempty"` +} + +type AddBaremetalDhcpParams struct { + p map[string]interface{} +} + +func (p *AddBaremetalDhcpParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["dhcpservertype"]; found { + u.Set("dhcpservertype", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddBaremetalDhcpParams) SetDhcpservertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dhcpservertype"] = v + return +} + +func (p *AddBaremetalDhcpParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddBaremetalDhcpParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddBaremetalDhcpParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddBaremetalDhcpParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddBaremetalDhcpParams instance, +// as then you are sure you have configured all required params +func (s *BaremetalService) NewAddBaremetalDhcpParams(dhcpservertype string, password string, physicalnetworkid string, url string, username string) *AddBaremetalDhcpParams { + p := &AddBaremetalDhcpParams{} + p.p = make(map[string]interface{}) + p.p["dhcpservertype"] = dhcpservertype + p.p["password"] = password + p.p["physicalnetworkid"] = physicalnetworkid + p.p["url"] = url + p.p["username"] = username + return p +} + +// adds a baremetal dhcp server +func (s *BaremetalService) AddBaremetalDhcp(p *AddBaremetalDhcpParams) (*AddBaremetalDhcpResponse, error) { + resp, err := s.cs.newRequest("addBaremetalDhcp", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddBaremetalDhcpResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddBaremetalDhcpResponse struct { + JobID string `json:"jobid,omitempty"` + Dhcpservertype string `json:"dhcpservertype,omitempty"` + Id string `json:"id,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Provider string `json:"provider,omitempty"` + Url string `json:"url,omitempty"` +} + +type ListBaremetalDhcpParams struct { + p map[string]interface{} +} + +func (p *ListBaremetalDhcpParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["dhcpservertype"]; found { + u.Set("dhcpservertype", v.(string)) + } + if v, found := p.p["id"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("id", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListBaremetalDhcpParams) SetDhcpservertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dhcpservertype"] = v + return +} + +func (p *ListBaremetalDhcpParams) SetId(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListBaremetalDhcpParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListBaremetalDhcpParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListBaremetalDhcpParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListBaremetalDhcpParams instance, +// as then you are sure you have configured all required params +func (s *BaremetalService) NewListBaremetalDhcpParams() *ListBaremetalDhcpParams { + p := &ListBaremetalDhcpParams{} + p.p = make(map[string]interface{}) + return p +} + +// list baremetal dhcp servers +func (s *BaremetalService) ListBaremetalDhcp(p *ListBaremetalDhcpParams) (*ListBaremetalDhcpResponse, error) { + resp, err := s.cs.newRequest("listBaremetalDhcp", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListBaremetalDhcpResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListBaremetalDhcpResponse struct { + Count int `json:"count"` + BaremetalDhcp []*BaremetalDhcp `json:"baremetaldhcp"` +} + +type BaremetalDhcp struct { + Dhcpservertype string `json:"dhcpservertype,omitempty"` + Id string `json:"id,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Provider string `json:"provider,omitempty"` + Url string `json:"url,omitempty"` +} + +type ListBaremetalPxeServersParams struct { + p map[string]interface{} +} + +func (p *ListBaremetalPxeServersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("id", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListBaremetalPxeServersParams) SetId(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListBaremetalPxeServersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListBaremetalPxeServersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListBaremetalPxeServersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListBaremetalPxeServersParams instance, +// as then you are sure you have configured all required params +func (s *BaremetalService) NewListBaremetalPxeServersParams() *ListBaremetalPxeServersParams { + p := &ListBaremetalPxeServersParams{} + p.p = make(map[string]interface{}) + return p +} + +// list baremetal pxe server +func (s *BaremetalService) ListBaremetalPxeServers(p *ListBaremetalPxeServersParams) (*ListBaremetalPxeServersResponse, error) { + resp, err := s.cs.newRequest("listBaremetalPxeServers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListBaremetalPxeServersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListBaremetalPxeServersResponse struct { + Count int `json:"count"` + BaremetalPxeServers []*BaremetalPxeServer `json:"baremetalpxeserver"` +} + +type BaremetalPxeServer struct { + Id string `json:"id,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Provider string `json:"provider,omitempty"` + Url string `json:"url,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/BigSwitchVNSService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/BigSwitchVNSService.go new file mode 100644 index 000000000..f6bc55106 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/BigSwitchVNSService.go @@ -0,0 +1,281 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type AddBigSwitchVnsDeviceParams struct { + p map[string]interface{} +} + +func (p *AddBigSwitchVnsDeviceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostname"]; found { + u.Set("hostname", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + return u +} + +func (p *AddBigSwitchVnsDeviceParams) SetHostname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostname"] = v + return +} + +func (p *AddBigSwitchVnsDeviceParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +// You should always use this function to get a new AddBigSwitchVnsDeviceParams instance, +// as then you are sure you have configured all required params +func (s *BigSwitchVNSService) NewAddBigSwitchVnsDeviceParams(hostname string, physicalnetworkid string) *AddBigSwitchVnsDeviceParams { + p := &AddBigSwitchVnsDeviceParams{} + p.p = make(map[string]interface{}) + p.p["hostname"] = hostname + p.p["physicalnetworkid"] = physicalnetworkid + return p +} + +// Adds a BigSwitch VNS device +func (s *BigSwitchVNSService) AddBigSwitchVnsDevice(p *AddBigSwitchVnsDeviceParams) (*AddBigSwitchVnsDeviceResponse, error) { + resp, err := s.cs.newRequest("addBigSwitchVnsDevice", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddBigSwitchVnsDeviceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddBigSwitchVnsDeviceResponse struct { + JobID string `json:"jobid,omitempty"` + Bigswitchdevicename string `json:"bigswitchdevicename,omitempty"` + Hostname string `json:"hostname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Provider string `json:"provider,omitempty"` + Vnsdeviceid string `json:"vnsdeviceid,omitempty"` +} + +type DeleteBigSwitchVnsDeviceParams struct { + p map[string]interface{} +} + +func (p *DeleteBigSwitchVnsDeviceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["vnsdeviceid"]; found { + u.Set("vnsdeviceid", v.(string)) + } + return u +} + +func (p *DeleteBigSwitchVnsDeviceParams) SetVnsdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vnsdeviceid"] = v + return +} + +// You should always use this function to get a new DeleteBigSwitchVnsDeviceParams instance, +// as then you are sure you have configured all required params +func (s *BigSwitchVNSService) NewDeleteBigSwitchVnsDeviceParams(vnsdeviceid string) *DeleteBigSwitchVnsDeviceParams { + p := &DeleteBigSwitchVnsDeviceParams{} + p.p = make(map[string]interface{}) + p.p["vnsdeviceid"] = vnsdeviceid + return p +} + +// delete a bigswitch vns device +func (s *BigSwitchVNSService) DeleteBigSwitchVnsDevice(p *DeleteBigSwitchVnsDeviceParams) (*DeleteBigSwitchVnsDeviceResponse, error) { + resp, err := s.cs.newRequest("deleteBigSwitchVnsDevice", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteBigSwitchVnsDeviceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteBigSwitchVnsDeviceResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListBigSwitchVnsDevicesParams struct { + p map[string]interface{} +} + +func (p *ListBigSwitchVnsDevicesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["vnsdeviceid"]; found { + u.Set("vnsdeviceid", v.(string)) + } + return u +} + +func (p *ListBigSwitchVnsDevicesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListBigSwitchVnsDevicesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListBigSwitchVnsDevicesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListBigSwitchVnsDevicesParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *ListBigSwitchVnsDevicesParams) SetVnsdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vnsdeviceid"] = v + return +} + +// You should always use this function to get a new ListBigSwitchVnsDevicesParams instance, +// as then you are sure you have configured all required params +func (s *BigSwitchVNSService) NewListBigSwitchVnsDevicesParams() *ListBigSwitchVnsDevicesParams { + p := &ListBigSwitchVnsDevicesParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists BigSwitch Vns devices +func (s *BigSwitchVNSService) ListBigSwitchVnsDevices(p *ListBigSwitchVnsDevicesParams) (*ListBigSwitchVnsDevicesResponse, error) { + resp, err := s.cs.newRequest("listBigSwitchVnsDevices", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListBigSwitchVnsDevicesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListBigSwitchVnsDevicesResponse struct { + Count int `json:"count"` + BigSwitchVnsDevices []*BigSwitchVnsDevice `json:"bigswitchvnsdevice"` +} + +type BigSwitchVnsDevice struct { + Bigswitchdevicename string `json:"bigswitchdevicename,omitempty"` + Hostname string `json:"hostname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Provider string `json:"provider,omitempty"` + Vnsdeviceid string `json:"vnsdeviceid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/CertificateService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/CertificateService.go new file mode 100644 index 000000000..dc125a02f --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/CertificateService.go @@ -0,0 +1,140 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type UploadCustomCertificateParams struct { + p map[string]interface{} +} + +func (p *UploadCustomCertificateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["certificate"]; found { + u.Set("certificate", v.(string)) + } + if v, found := p.p["domainsuffix"]; found { + u.Set("domainsuffix", v.(string)) + } + if v, found := p.p["id"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("id", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["privatekey"]; found { + u.Set("privatekey", v.(string)) + } + return u +} + +func (p *UploadCustomCertificateParams) SetCertificate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["certificate"] = v + return +} + +func (p *UploadCustomCertificateParams) SetDomainsuffix(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainsuffix"] = v + return +} + +func (p *UploadCustomCertificateParams) SetId(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UploadCustomCertificateParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UploadCustomCertificateParams) SetPrivatekey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["privatekey"] = v + return +} + +// You should always use this function to get a new UploadCustomCertificateParams instance, +// as then you are sure you have configured all required params +func (s *CertificateService) NewUploadCustomCertificateParams(certificate string, domainsuffix string) *UploadCustomCertificateParams { + p := &UploadCustomCertificateParams{} + p.p = make(map[string]interface{}) + p.p["certificate"] = certificate + p.p["domainsuffix"] = domainsuffix + return p +} + +// Uploads a custom certificate for the console proxy VMs to use for SSL. Can be used to upload a single certificate signed by a known CA. Can also be used, through multiple calls, to upload a chain of certificates from CA to the custom certificate itself. +func (s *CertificateService) UploadCustomCertificate(p *UploadCustomCertificateParams) (*UploadCustomCertificateResponse, error) { + resp, err := s.cs.newRequest("uploadCustomCertificate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UploadCustomCertificateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UploadCustomCertificateResponse struct { + JobID string `json:"jobid,omitempty"` + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/CloudIdentifierService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/CloudIdentifierService.go new file mode 100644 index 000000000..0572bcdee --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/CloudIdentifierService.go @@ -0,0 +1,74 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" +) + +type GetCloudIdentifierParams struct { + p map[string]interface{} +} + +func (p *GetCloudIdentifierParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["userid"]; found { + u.Set("userid", v.(string)) + } + return u +} + +func (p *GetCloudIdentifierParams) SetUserid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userid"] = v + return +} + +// You should always use this function to get a new GetCloudIdentifierParams instance, +// as then you are sure you have configured all required params +func (s *CloudIdentifierService) NewGetCloudIdentifierParams(userid string) *GetCloudIdentifierParams { + p := &GetCloudIdentifierParams{} + p.p = make(map[string]interface{}) + p.p["userid"] = userid + return p +} + +// Retrieves a cloud identifier. +func (s *CloudIdentifierService) GetCloudIdentifier(p *GetCloudIdentifierParams) (*GetCloudIdentifierResponse, error) { + resp, err := s.cs.newRequest("getCloudIdentifier", p.toURLValues()) + if err != nil { + return nil, err + } + + var r GetCloudIdentifierResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type GetCloudIdentifierResponse struct { + Cloudidentifier string `json:"cloudidentifier,omitempty"` + Signature string `json:"signature,omitempty"` + Userid string `json:"userid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ClusterService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ClusterService.go new file mode 100644 index 000000000..24d0214ee --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ClusterService.go @@ -0,0 +1,1011 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type AddClusterParams struct { + p map[string]interface{} +} + +func (p *AddClusterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["clustername"]; found { + u.Set("clustername", v.(string)) + } + if v, found := p.p["clustertype"]; found { + u.Set("clustertype", v.(string)) + } + if v, found := p.p["guestvswitchname"]; found { + u.Set("guestvswitchname", v.(string)) + } + if v, found := p.p["guestvswitchtype"]; found { + u.Set("guestvswitchtype", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["publicvswitchname"]; found { + u.Set("publicvswitchname", v.(string)) + } + if v, found := p.p["publicvswitchtype"]; found { + u.Set("publicvswitchtype", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + if v, found := p.p["vsmipaddress"]; found { + u.Set("vsmipaddress", v.(string)) + } + if v, found := p.p["vsmpassword"]; found { + u.Set("vsmpassword", v.(string)) + } + if v, found := p.p["vsmusername"]; found { + u.Set("vsmusername", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddClusterParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *AddClusterParams) SetClustername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clustername"] = v + return +} + +func (p *AddClusterParams) SetClustertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clustertype"] = v + return +} + +func (p *AddClusterParams) SetGuestvswitchname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestvswitchname"] = v + return +} + +func (p *AddClusterParams) SetGuestvswitchtype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestvswitchtype"] = v + return +} + +func (p *AddClusterParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *AddClusterParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddClusterParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *AddClusterParams) SetPublicvswitchname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicvswitchname"] = v + return +} + +func (p *AddClusterParams) SetPublicvswitchtype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicvswitchtype"] = v + return +} + +func (p *AddClusterParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddClusterParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +func (p *AddClusterParams) SetVsmipaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vsmipaddress"] = v + return +} + +func (p *AddClusterParams) SetVsmpassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vsmpassword"] = v + return +} + +func (p *AddClusterParams) SetVsmusername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vsmusername"] = v + return +} + +func (p *AddClusterParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddClusterParams instance, +// as then you are sure you have configured all required params +func (s *ClusterService) NewAddClusterParams(clustername string, clustertype string, hypervisor string, podid string, zoneid string) *AddClusterParams { + p := &AddClusterParams{} + p.p = make(map[string]interface{}) + p.p["clustername"] = clustername + p.p["clustertype"] = clustertype + p.p["hypervisor"] = hypervisor + p.p["podid"] = podid + p.p["zoneid"] = zoneid + return p +} + +// Adds a new cluster +func (s *ClusterService) AddCluster(p *AddClusterParams) (*AddClusterResponse, error) { + resp, err := s.cs.newRequest("addCluster", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddClusterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddClusterResponse struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuovercommitratio string `json:"cpuovercommitratio,omitempty"` + Hypervisortype string `json:"hypervisortype,omitempty"` + Id string `json:"id,omitempty"` + Managedstate string `json:"managedstate,omitempty"` + Memoryovercommitratio string `json:"memoryovercommitratio,omitempty"` + Name string `json:"name,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteClusterParams struct { + p map[string]interface{} +} + +func (p *DeleteClusterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteClusterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteClusterParams instance, +// as then you are sure you have configured all required params +func (s *ClusterService) NewDeleteClusterParams(id string) *DeleteClusterParams { + p := &DeleteClusterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a cluster. +func (s *ClusterService) DeleteCluster(p *DeleteClusterParams) (*DeleteClusterResponse, error) { + resp, err := s.cs.newRequest("deleteCluster", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteClusterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteClusterResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type UpdateClusterParams struct { + p map[string]interface{} +} + +func (p *UpdateClusterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["clustername"]; found { + u.Set("clustername", v.(string)) + } + if v, found := p.p["clustertype"]; found { + u.Set("clustertype", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["managedstate"]; found { + u.Set("managedstate", v.(string)) + } + return u +} + +func (p *UpdateClusterParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *UpdateClusterParams) SetClustername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clustername"] = v + return +} + +func (p *UpdateClusterParams) SetClustertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clustertype"] = v + return +} + +func (p *UpdateClusterParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *UpdateClusterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateClusterParams) SetManagedstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["managedstate"] = v + return +} + +// You should always use this function to get a new UpdateClusterParams instance, +// as then you are sure you have configured all required params +func (s *ClusterService) NewUpdateClusterParams(id string) *UpdateClusterParams { + p := &UpdateClusterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates an existing cluster +func (s *ClusterService) UpdateCluster(p *UpdateClusterParams) (*UpdateClusterResponse, error) { + resp, err := s.cs.newRequest("updateCluster", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateClusterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateClusterResponse struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuovercommitratio string `json:"cpuovercommitratio,omitempty"` + Hypervisortype string `json:"hypervisortype,omitempty"` + Id string `json:"id,omitempty"` + Managedstate string `json:"managedstate,omitempty"` + Memoryovercommitratio string `json:"memoryovercommitratio,omitempty"` + Name string `json:"name,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListClustersParams struct { + p map[string]interface{} +} + +func (p *ListClustersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["clustertype"]; found { + u.Set("clustertype", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["managedstate"]; found { + u.Set("managedstate", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["showcapacities"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("showcapacities", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListClustersParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *ListClustersParams) SetClustertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clustertype"] = v + return +} + +func (p *ListClustersParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *ListClustersParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListClustersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListClustersParams) SetManagedstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["managedstate"] = v + return +} + +func (p *ListClustersParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListClustersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListClustersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListClustersParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListClustersParams) SetShowcapacities(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["showcapacities"] = v + return +} + +func (p *ListClustersParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListClustersParams instance, +// as then you are sure you have configured all required params +func (s *ClusterService) NewListClustersParams() *ListClustersParams { + p := &ListClustersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ClusterService) GetClusterID(name string) (string, error) { + p := &ListClustersParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListClusters(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Clusters[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Clusters { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ClusterService) GetClusterByName(name string) (*Cluster, int, error) { + id, err := s.GetClusterID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetClusterByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ClusterService) GetClusterByID(id string) (*Cluster, int, error) { + p := &ListClustersParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListClusters(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Clusters[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Cluster UUID: %s!", id) +} + +// Lists clusters. +func (s *ClusterService) ListClusters(p *ListClustersParams) (*ListClustersResponse, error) { + resp, err := s.cs.newRequest("listClusters", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListClustersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListClustersResponse struct { + Count int `json:"count"` + Clusters []*Cluster `json:"cluster"` +} + +type Cluster struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuovercommitratio string `json:"cpuovercommitratio,omitempty"` + Hypervisortype string `json:"hypervisortype,omitempty"` + Id string `json:"id,omitempty"` + Managedstate string `json:"managedstate,omitempty"` + Memoryovercommitratio string `json:"memoryovercommitratio,omitempty"` + Name string `json:"name,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DedicateClusterParams struct { + p map[string]interface{} +} + +func (p *DedicateClusterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + return u +} + +func (p *DedicateClusterParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DedicateClusterParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *DedicateClusterParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +// You should always use this function to get a new DedicateClusterParams instance, +// as then you are sure you have configured all required params +func (s *ClusterService) NewDedicateClusterParams(clusterid string, domainid string) *DedicateClusterParams { + p := &DedicateClusterParams{} + p.p = make(map[string]interface{}) + p.p["clusterid"] = clusterid + p.p["domainid"] = domainid + return p +} + +// Dedicate an existing cluster +func (s *ClusterService) DedicateCluster(p *DedicateClusterParams) (*DedicateClusterResponse, error) { + resp, err := s.cs.newRequest("dedicateCluster", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DedicateClusterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DedicateClusterResponse struct { + JobID string `json:"jobid,omitempty"` + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` +} + +type ReleaseDedicatedClusterParams struct { + p map[string]interface{} +} + +func (p *ReleaseDedicatedClusterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + return u +} + +func (p *ReleaseDedicatedClusterParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +// You should always use this function to get a new ReleaseDedicatedClusterParams instance, +// as then you are sure you have configured all required params +func (s *ClusterService) NewReleaseDedicatedClusterParams(clusterid string) *ReleaseDedicatedClusterParams { + p := &ReleaseDedicatedClusterParams{} + p.p = make(map[string]interface{}) + p.p["clusterid"] = clusterid + return p +} + +// Release the dedication for cluster +func (s *ClusterService) ReleaseDedicatedCluster(p *ReleaseDedicatedClusterParams) (*ReleaseDedicatedClusterResponse, error) { + resp, err := s.cs.newRequest("releaseDedicatedCluster", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReleaseDedicatedClusterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReleaseDedicatedClusterResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListDedicatedClustersParams struct { + p map[string]interface{} +} + +func (p *ListDedicatedClustersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["affinitygroupid"]; found { + u.Set("affinitygroupid", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListDedicatedClustersParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListDedicatedClustersParams) SetAffinitygroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupid"] = v + return +} + +func (p *ListDedicatedClustersParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *ListDedicatedClustersParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListDedicatedClustersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDedicatedClustersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDedicatedClustersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListDedicatedClustersParams instance, +// as then you are sure you have configured all required params +func (s *ClusterService) NewListDedicatedClustersParams() *ListDedicatedClustersParams { + p := &ListDedicatedClustersParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists dedicated clusters. +func (s *ClusterService) ListDedicatedClusters(p *ListDedicatedClustersParams) (*ListDedicatedClustersResponse, error) { + resp, err := s.cs.newRequest("listDedicatedClusters", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDedicatedClustersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDedicatedClustersResponse struct { + Count int `json:"count"` + DedicatedClusters []*DedicatedCluster `json:"dedicatedcluster"` +} + +type DedicatedCluster struct { + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ConfigurationService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ConfigurationService.go new file mode 100644 index 000000000..d4a2f55d7 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ConfigurationService.go @@ -0,0 +1,631 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type UpdateConfigurationParams struct { + p map[string]interface{} +} + +func (p *UpdateConfigurationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accountid"]; found { + u.Set("accountid", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["value"]; found { + u.Set("value", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *UpdateConfigurationParams) SetAccountid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountid"] = v + return +} + +func (p *UpdateConfigurationParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *UpdateConfigurationParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateConfigurationParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +func (p *UpdateConfigurationParams) SetValue(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["value"] = v + return +} + +func (p *UpdateConfigurationParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new UpdateConfigurationParams instance, +// as then you are sure you have configured all required params +func (s *ConfigurationService) NewUpdateConfigurationParams(name string) *UpdateConfigurationParams { + p := &UpdateConfigurationParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + return p +} + +// Updates a configuration. +func (s *ConfigurationService) UpdateConfiguration(p *UpdateConfigurationParams) (*UpdateConfigurationResponse, error) { + resp, err := s.cs.newRequest("updateConfiguration", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateConfigurationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateConfigurationResponse struct { + Category string `json:"category,omitempty"` + Description string `json:"description,omitempty"` + Id int64 `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Scope string `json:"scope,omitempty"` + Value string `json:"value,omitempty"` +} + +type ListConfigurationsParams struct { + p map[string]interface{} +} + +func (p *ListConfigurationsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accountid"]; found { + u.Set("accountid", v.(string)) + } + if v, found := p.p["category"]; found { + u.Set("category", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListConfigurationsParams) SetAccountid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountid"] = v + return +} + +func (p *ListConfigurationsParams) SetCategory(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["category"] = v + return +} + +func (p *ListConfigurationsParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *ListConfigurationsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListConfigurationsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListConfigurationsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListConfigurationsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListConfigurationsParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +func (p *ListConfigurationsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListConfigurationsParams instance, +// as then you are sure you have configured all required params +func (s *ConfigurationService) NewListConfigurationsParams() *ListConfigurationsParams { + p := &ListConfigurationsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists all configurations. +func (s *ConfigurationService) ListConfigurations(p *ListConfigurationsParams) (*ListConfigurationsResponse, error) { + resp, err := s.cs.newRequest("listConfigurations", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListConfigurationsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListConfigurationsResponse struct { + Count int `json:"count"` + Configurations []*Configuration `json:"configuration"` +} + +type Configuration struct { + Category string `json:"category,omitempty"` + Description string `json:"description,omitempty"` + Id int64 `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Scope string `json:"scope,omitempty"` + Value string `json:"value,omitempty"` +} + +type ListCapabilitiesParams struct { + p map[string]interface{} +} + +func (p *ListCapabilitiesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + return u +} + +// You should always use this function to get a new ListCapabilitiesParams instance, +// as then you are sure you have configured all required params +func (s *ConfigurationService) NewListCapabilitiesParams() *ListCapabilitiesParams { + p := &ListCapabilitiesParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists capabilities +func (s *ConfigurationService) ListCapabilities(p *ListCapabilitiesParams) (*ListCapabilitiesResponse, error) { + resp, err := s.cs.newRequest("listCapabilities", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListCapabilitiesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListCapabilitiesResponse struct { + Count int `json:"count"` + Capabilities []*Capability `json:"capability"` +} + +type Capability struct { + Allowusercreateprojects bool `json:"allowusercreateprojects,omitempty"` + Apilimitinterval int `json:"apilimitinterval,omitempty"` + Apilimitmax int `json:"apilimitmax,omitempty"` + Cloudstackversion string `json:"cloudstackversion,omitempty"` + Customdiskofferingmaxsize int64 `json:"customdiskofferingmaxsize,omitempty"` + Customdiskofferingminsize int64 `json:"customdiskofferingminsize,omitempty"` + Kvmsnapshotenabled bool `json:"kvmsnapshotenabled,omitempty"` + Projectinviterequired bool `json:"projectinviterequired,omitempty"` + Regionsecondaryenabled bool `json:"regionsecondaryenabled,omitempty"` + Securitygroupsenabled bool `json:"securitygroupsenabled,omitempty"` + SupportELB string `json:"supportELB,omitempty"` + Userpublictemplateenabled bool `json:"userpublictemplateenabled,omitempty"` +} + +type ListDeploymentPlannersParams struct { + p map[string]interface{} +} + +func (p *ListDeploymentPlannersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListDeploymentPlannersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDeploymentPlannersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDeploymentPlannersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListDeploymentPlannersParams instance, +// as then you are sure you have configured all required params +func (s *ConfigurationService) NewListDeploymentPlannersParams() *ListDeploymentPlannersParams { + p := &ListDeploymentPlannersParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists all DeploymentPlanners available. +func (s *ConfigurationService) ListDeploymentPlanners(p *ListDeploymentPlannersParams) (*ListDeploymentPlannersResponse, error) { + resp, err := s.cs.newRequest("listDeploymentPlanners", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDeploymentPlannersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDeploymentPlannersResponse struct { + Count int `json:"count"` + DeploymentPlanners []*DeploymentPlanner `json:"deploymentplanner"` +} + +type DeploymentPlanner struct { + Name string `json:"name,omitempty"` +} + +type ListLdapConfigurationsParams struct { + p map[string]interface{} +} + +func (p *ListLdapConfigurationsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostname"]; found { + u.Set("hostname", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["port"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("port", vv) + } + return u +} + +func (p *ListLdapConfigurationsParams) SetHostname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostname"] = v + return +} + +func (p *ListLdapConfigurationsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListLdapConfigurationsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListLdapConfigurationsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListLdapConfigurationsParams) SetPort(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["port"] = v + return +} + +// You should always use this function to get a new ListLdapConfigurationsParams instance, +// as then you are sure you have configured all required params +func (s *ConfigurationService) NewListLdapConfigurationsParams() *ListLdapConfigurationsParams { + p := &ListLdapConfigurationsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists all LDAP configurations +func (s *ConfigurationService) ListLdapConfigurations(p *ListLdapConfigurationsParams) (*ListLdapConfigurationsResponse, error) { + resp, err := s.cs.newRequest("listLdapConfigurations", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListLdapConfigurationsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListLdapConfigurationsResponse struct { + Count int `json:"count"` + LdapConfigurations []*LdapConfiguration `json:"ldapconfiguration"` +} + +type LdapConfiguration struct { + Hostname string `json:"hostname,omitempty"` + Port int `json:"port,omitempty"` +} + +type AddLdapConfigurationParams struct { + p map[string]interface{} +} + +func (p *AddLdapConfigurationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostname"]; found { + u.Set("hostname", v.(string)) + } + if v, found := p.p["port"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("port", vv) + } + return u +} + +func (p *AddLdapConfigurationParams) SetHostname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostname"] = v + return +} + +func (p *AddLdapConfigurationParams) SetPort(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["port"] = v + return +} + +// You should always use this function to get a new AddLdapConfigurationParams instance, +// as then you are sure you have configured all required params +func (s *ConfigurationService) NewAddLdapConfigurationParams(hostname string, port int) *AddLdapConfigurationParams { + p := &AddLdapConfigurationParams{} + p.p = make(map[string]interface{}) + p.p["hostname"] = hostname + p.p["port"] = port + return p +} + +// Add a new Ldap Configuration +func (s *ConfigurationService) AddLdapConfiguration(p *AddLdapConfigurationParams) (*AddLdapConfigurationResponse, error) { + resp, err := s.cs.newRequest("addLdapConfiguration", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddLdapConfigurationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddLdapConfigurationResponse struct { + Hostname string `json:"hostname,omitempty"` + Port int `json:"port,omitempty"` +} + +type DeleteLdapConfigurationParams struct { + p map[string]interface{} +} + +func (p *DeleteLdapConfigurationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostname"]; found { + u.Set("hostname", v.(string)) + } + return u +} + +func (p *DeleteLdapConfigurationParams) SetHostname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostname"] = v + return +} + +// You should always use this function to get a new DeleteLdapConfigurationParams instance, +// as then you are sure you have configured all required params +func (s *ConfigurationService) NewDeleteLdapConfigurationParams(hostname string) *DeleteLdapConfigurationParams { + p := &DeleteLdapConfigurationParams{} + p.p = make(map[string]interface{}) + p.p["hostname"] = hostname + return p +} + +// Remove an Ldap Configuration +func (s *ConfigurationService) DeleteLdapConfiguration(p *DeleteLdapConfigurationParams) (*DeleteLdapConfigurationResponse, error) { + resp, err := s.cs.newRequest("deleteLdapConfiguration", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteLdapConfigurationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteLdapConfigurationResponse struct { + Hostname string `json:"hostname,omitempty"` + Port int `json:"port,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/DiskOfferingService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/DiskOfferingService.go new file mode 100644 index 000000000..e97c784f8 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/DiskOfferingService.go @@ -0,0 +1,638 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateDiskOfferingParams struct { + p map[string]interface{} +} + +func (p *CreateDiskOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["bytesreadrate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("bytesreadrate", vv) + } + if v, found := p.p["byteswriterate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("byteswriterate", vv) + } + if v, found := p.p["customized"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("customized", vv) + } + if v, found := p.p["customizediops"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("customizediops", vv) + } + if v, found := p.p["disksize"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("disksize", vv) + } + if v, found := p.p["displayoffering"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayoffering", vv) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["hypervisorsnapshotreserve"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("hypervisorsnapshotreserve", vv) + } + if v, found := p.p["iopsreadrate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("iopsreadrate", vv) + } + if v, found := p.p["iopswriterate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("iopswriterate", vv) + } + if v, found := p.p["maxiops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("maxiops", vv) + } + if v, found := p.p["miniops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("miniops", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["storagetype"]; found { + u.Set("storagetype", v.(string)) + } + if v, found := p.p["tags"]; found { + u.Set("tags", v.(string)) + } + return u +} + +func (p *CreateDiskOfferingParams) SetBytesreadrate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bytesreadrate"] = v + return +} + +func (p *CreateDiskOfferingParams) SetByteswriterate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["byteswriterate"] = v + return +} + +func (p *CreateDiskOfferingParams) SetCustomized(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customized"] = v + return +} + +func (p *CreateDiskOfferingParams) SetCustomizediops(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customizediops"] = v + return +} + +func (p *CreateDiskOfferingParams) SetDisksize(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["disksize"] = v + return +} + +func (p *CreateDiskOfferingParams) SetDisplayoffering(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayoffering"] = v + return +} + +func (p *CreateDiskOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateDiskOfferingParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateDiskOfferingParams) SetHypervisorsnapshotreserve(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisorsnapshotreserve"] = v + return +} + +func (p *CreateDiskOfferingParams) SetIopsreadrate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["iopsreadrate"] = v + return +} + +func (p *CreateDiskOfferingParams) SetIopswriterate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["iopswriterate"] = v + return +} + +func (p *CreateDiskOfferingParams) SetMaxiops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxiops"] = v + return +} + +func (p *CreateDiskOfferingParams) SetMiniops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["miniops"] = v + return +} + +func (p *CreateDiskOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateDiskOfferingParams) SetStoragetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storagetype"] = v + return +} + +func (p *CreateDiskOfferingParams) SetTags(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new CreateDiskOfferingParams instance, +// as then you are sure you have configured all required params +func (s *DiskOfferingService) NewCreateDiskOfferingParams(displaytext string, name string) *CreateDiskOfferingParams { + p := &CreateDiskOfferingParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["name"] = name + return p +} + +// Creates a disk offering. +func (s *DiskOfferingService) CreateDiskOffering(p *CreateDiskOfferingParams) (*CreateDiskOfferingResponse, error) { + resp, err := s.cs.newRequest("createDiskOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateDiskOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateDiskOfferingResponse struct { + CacheMode string `json:"cacheMode,omitempty"` + Created string `json:"created,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Disksize int64 `json:"disksize,omitempty"` + Displayoffering bool `json:"displayoffering,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisorsnapshotreserve int `json:"hypervisorsnapshotreserve,omitempty"` + Id string `json:"id,omitempty"` + Iscustomized bool `json:"iscustomized,omitempty"` + Iscustomizediops bool `json:"iscustomizediops,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags string `json:"tags,omitempty"` +} + +type UpdateDiskOfferingParams struct { + p map[string]interface{} +} + +func (p *UpdateDiskOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["displayoffering"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayoffering", vv) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["sortkey"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("sortkey", vv) + } + return u +} + +func (p *UpdateDiskOfferingParams) SetDisplayoffering(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayoffering"] = v + return +} + +func (p *UpdateDiskOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateDiskOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateDiskOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateDiskOfferingParams) SetSortkey(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sortkey"] = v + return +} + +// You should always use this function to get a new UpdateDiskOfferingParams instance, +// as then you are sure you have configured all required params +func (s *DiskOfferingService) NewUpdateDiskOfferingParams(id string) *UpdateDiskOfferingParams { + p := &UpdateDiskOfferingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a disk offering. +func (s *DiskOfferingService) UpdateDiskOffering(p *UpdateDiskOfferingParams) (*UpdateDiskOfferingResponse, error) { + resp, err := s.cs.newRequest("updateDiskOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateDiskOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateDiskOfferingResponse struct { + CacheMode string `json:"cacheMode,omitempty"` + Created string `json:"created,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Disksize int64 `json:"disksize,omitempty"` + Displayoffering bool `json:"displayoffering,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisorsnapshotreserve int `json:"hypervisorsnapshotreserve,omitempty"` + Id string `json:"id,omitempty"` + Iscustomized bool `json:"iscustomized,omitempty"` + Iscustomizediops bool `json:"iscustomizediops,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags string `json:"tags,omitempty"` +} + +type DeleteDiskOfferingParams struct { + p map[string]interface{} +} + +func (p *DeleteDiskOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteDiskOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteDiskOfferingParams instance, +// as then you are sure you have configured all required params +func (s *DiskOfferingService) NewDeleteDiskOfferingParams(id string) *DeleteDiskOfferingParams { + p := &DeleteDiskOfferingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a disk offering. +func (s *DiskOfferingService) DeleteDiskOffering(p *DeleteDiskOfferingParams) (*DeleteDiskOfferingResponse, error) { + resp, err := s.cs.newRequest("deleteDiskOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteDiskOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteDiskOfferingResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListDiskOfferingsParams struct { + p map[string]interface{} +} + +func (p *ListDiskOfferingsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListDiskOfferingsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListDiskOfferingsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListDiskOfferingsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDiskOfferingsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListDiskOfferingsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDiskOfferingsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListDiskOfferingsParams instance, +// as then you are sure you have configured all required params +func (s *DiskOfferingService) NewListDiskOfferingsParams() *ListDiskOfferingsParams { + p := &ListDiskOfferingsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DiskOfferingService) GetDiskOfferingID(name string) (string, error) { + p := &ListDiskOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListDiskOfferings(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.DiskOfferings[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.DiskOfferings { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DiskOfferingService) GetDiskOfferingByName(name string) (*DiskOffering, int, error) { + id, err := s.GetDiskOfferingID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetDiskOfferingByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DiskOfferingService) GetDiskOfferingByID(id string) (*DiskOffering, int, error) { + p := &ListDiskOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListDiskOfferings(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.DiskOfferings[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for DiskOffering UUID: %s!", id) +} + +// Lists all available disk offerings. +func (s *DiskOfferingService) ListDiskOfferings(p *ListDiskOfferingsParams) (*ListDiskOfferingsResponse, error) { + resp, err := s.cs.newRequest("listDiskOfferings", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDiskOfferingsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDiskOfferingsResponse struct { + Count int `json:"count"` + DiskOfferings []*DiskOffering `json:"diskoffering"` +} + +type DiskOffering struct { + CacheMode string `json:"cacheMode,omitempty"` + Created string `json:"created,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Disksize int64 `json:"disksize,omitempty"` + Displayoffering bool `json:"displayoffering,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisorsnapshotreserve int `json:"hypervisorsnapshotreserve,omitempty"` + Id string `json:"id,omitempty"` + Iscustomized bool `json:"iscustomized,omitempty"` + Iscustomizediops bool `json:"iscustomizediops,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags string `json:"tags,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/DomainService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/DomainService.go new file mode 100644 index 000000000..883976499 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/DomainService.go @@ -0,0 +1,677 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateDomainParams struct { + p map[string]interface{} +} + +func (p *CreateDomainParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + if v, found := p.p["parentdomainid"]; found { + u.Set("parentdomainid", v.(string)) + } + return u +} + +func (p *CreateDomainParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateDomainParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateDomainParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +func (p *CreateDomainParams) SetParentdomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["parentdomainid"] = v + return +} + +// You should always use this function to get a new CreateDomainParams instance, +// as then you are sure you have configured all required params +func (s *DomainService) NewCreateDomainParams(name string) *CreateDomainParams { + p := &CreateDomainParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + return p +} + +// Creates a domain +func (s *DomainService) CreateDomain(p *CreateDomainParams) (*CreateDomainResponse, error) { + resp, err := s.cs.newRequest("createDomain", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateDomainResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateDomainResponse struct { + Haschild bool `json:"haschild,omitempty"` + Id string `json:"id,omitempty"` + Level int `json:"level,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Parentdomainid string `json:"parentdomainid,omitempty"` + Parentdomainname string `json:"parentdomainname,omitempty"` + Path string `json:"path,omitempty"` +} + +type UpdateDomainParams struct { + p map[string]interface{} +} + +func (p *UpdateDomainParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + return u +} + +func (p *UpdateDomainParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateDomainParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateDomainParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +// You should always use this function to get a new UpdateDomainParams instance, +// as then you are sure you have configured all required params +func (s *DomainService) NewUpdateDomainParams(id string) *UpdateDomainParams { + p := &UpdateDomainParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a domain with a new name +func (s *DomainService) UpdateDomain(p *UpdateDomainParams) (*UpdateDomainResponse, error) { + resp, err := s.cs.newRequest("updateDomain", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateDomainResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateDomainResponse struct { + Haschild bool `json:"haschild,omitempty"` + Id string `json:"id,omitempty"` + Level int `json:"level,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Parentdomainid string `json:"parentdomainid,omitempty"` + Parentdomainname string `json:"parentdomainname,omitempty"` + Path string `json:"path,omitempty"` +} + +type DeleteDomainParams struct { + p map[string]interface{} +} + +func (p *DeleteDomainParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["cleanup"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("cleanup", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteDomainParams) SetCleanup(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cleanup"] = v + return +} + +func (p *DeleteDomainParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteDomainParams instance, +// as then you are sure you have configured all required params +func (s *DomainService) NewDeleteDomainParams(id string) *DeleteDomainParams { + p := &DeleteDomainParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a specified domain +func (s *DomainService) DeleteDomain(p *DeleteDomainParams) (*DeleteDomainResponse, error) { + resp, err := s.cs.newRequest("deleteDomain", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteDomainResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteDomainResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListDomainsParams struct { + p map[string]interface{} +} + +func (p *ListDomainsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["level"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("level", vv) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListDomainsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListDomainsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDomainsParams) SetLevel(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["level"] = v + return +} + +func (p *ListDomainsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListDomainsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListDomainsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDomainsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListDomainsParams instance, +// as then you are sure you have configured all required params +func (s *DomainService) NewListDomainsParams() *ListDomainsParams { + p := &ListDomainsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DomainService) GetDomainID(name string) (string, error) { + p := &ListDomainsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListDomains(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Domains[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Domains { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DomainService) GetDomainByName(name string) (*Domain, int, error) { + id, err := s.GetDomainID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetDomainByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DomainService) GetDomainByID(id string) (*Domain, int, error) { + p := &ListDomainsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListDomains(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Domains[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Domain UUID: %s!", id) +} + +// Lists domains and provides detailed information for listed domains +func (s *DomainService) ListDomains(p *ListDomainsParams) (*ListDomainsResponse, error) { + resp, err := s.cs.newRequest("listDomains", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDomainsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDomainsResponse struct { + Count int `json:"count"` + Domains []*Domain `json:"domain"` +} + +type Domain struct { + Haschild bool `json:"haschild,omitempty"` + Id string `json:"id,omitempty"` + Level int `json:"level,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Parentdomainid string `json:"parentdomainid,omitempty"` + Parentdomainname string `json:"parentdomainname,omitempty"` + Path string `json:"path,omitempty"` +} + +type ListDomainChildrenParams struct { + p map[string]interface{} +} + +func (p *ListDomainChildrenParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListDomainChildrenParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListDomainChildrenParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListDomainChildrenParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDomainChildrenParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListDomainChildrenParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListDomainChildrenParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDomainChildrenParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListDomainChildrenParams instance, +// as then you are sure you have configured all required params +func (s *DomainService) NewListDomainChildrenParams() *ListDomainChildrenParams { + p := &ListDomainChildrenParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DomainService) GetDomainChildrenID(name string) (string, error) { + p := &ListDomainChildrenParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListDomainChildren(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.DomainChildren[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.DomainChildren { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DomainService) GetDomainChildrenByName(name string) (*DomainChildren, int, error) { + id, err := s.GetDomainChildrenID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetDomainChildrenByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *DomainService) GetDomainChildrenByID(id string) (*DomainChildren, int, error) { + p := &ListDomainChildrenParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListDomainChildren(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.DomainChildren[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for DomainChildren UUID: %s!", id) +} + +// Lists all children domains belonging to a specified domain +func (s *DomainService) ListDomainChildren(p *ListDomainChildrenParams) (*ListDomainChildrenResponse, error) { + resp, err := s.cs.newRequest("listDomainChildren", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDomainChildrenResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDomainChildrenResponse struct { + Count int `json:"count"` + DomainChildren []*DomainChildren `json:"domainchildren"` +} + +type DomainChildren struct { + Haschild bool `json:"haschild,omitempty"` + Id string `json:"id,omitempty"` + Level int `json:"level,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Parentdomainid string `json:"parentdomainid,omitempty"` + Parentdomainname string `json:"parentdomainname,omitempty"` + Path string `json:"path,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/EventService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/EventService.go new file mode 100644 index 000000000..23e53bb54 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/EventService.go @@ -0,0 +1,504 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ListEventsParams struct { + p map[string]interface{} +} + +func (p *ListEventsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["duration"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("duration", vv) + } + if v, found := p.p["enddate"]; found { + u.Set("enddate", v.(string)) + } + if v, found := p.p["entrytime"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("entrytime", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["level"]; found { + u.Set("level", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *ListEventsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListEventsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListEventsParams) SetDuration(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["duration"] = v + return +} + +func (p *ListEventsParams) SetEnddate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enddate"] = v + return +} + +func (p *ListEventsParams) SetEntrytime(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["entrytime"] = v + return +} + +func (p *ListEventsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListEventsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListEventsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListEventsParams) SetLevel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["level"] = v + return +} + +func (p *ListEventsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListEventsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListEventsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListEventsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListEventsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +func (p *ListEventsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["eventType"] = v + return +} + +// You should always use this function to get a new ListEventsParams instance, +// as then you are sure you have configured all required params +func (s *EventService) NewListEventsParams() *ListEventsParams { + p := &ListEventsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *EventService) GetEventByID(id string) (*Event, int, error) { + p := &ListEventsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListEvents(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListEvents(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Events[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Event UUID: %s!", id) +} + +// A command to list events. +func (s *EventService) ListEvents(p *ListEventsParams) (*ListEventsResponse, error) { + resp, err := s.cs.newRequest("listEvents", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListEventsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListEventsResponse struct { + Count int `json:"count"` + Events []*Event `json:"event"` +} + +type Event struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Level string `json:"level,omitempty"` + Parentid string `json:"parentid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` + Type string `json:"type,omitempty"` + Username string `json:"username,omitempty"` +} + +type ListEventTypesParams struct { + p map[string]interface{} +} + +func (p *ListEventTypesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + return u +} + +// You should always use this function to get a new ListEventTypesParams instance, +// as then you are sure you have configured all required params +func (s *EventService) NewListEventTypesParams() *ListEventTypesParams { + p := &ListEventTypesParams{} + p.p = make(map[string]interface{}) + return p +} + +// List Event Types +func (s *EventService) ListEventTypes(p *ListEventTypesParams) (*ListEventTypesResponse, error) { + resp, err := s.cs.newRequest("listEventTypes", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListEventTypesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListEventTypesResponse struct { + Count int `json:"count"` + EventTypes []*EventType `json:"eventtype"` +} + +type EventType struct { + Name string `json:"name,omitempty"` +} + +type ArchiveEventsParams struct { + p map[string]interface{} +} + +func (p *ArchiveEventsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enddate"]; found { + u.Set("enddate", v.(string)) + } + if v, found := p.p["ids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("ids", vv) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *ArchiveEventsParams) SetEnddate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enddate"] = v + return +} + +func (p *ArchiveEventsParams) SetIds(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ids"] = v + return +} + +func (p *ArchiveEventsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +func (p *ArchiveEventsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["eventType"] = v + return +} + +// You should always use this function to get a new ArchiveEventsParams instance, +// as then you are sure you have configured all required params +func (s *EventService) NewArchiveEventsParams() *ArchiveEventsParams { + p := &ArchiveEventsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Archive one or more events. +func (s *EventService) ArchiveEvents(p *ArchiveEventsParams) (*ArchiveEventsResponse, error) { + resp, err := s.cs.newRequest("archiveEvents", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ArchiveEventsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ArchiveEventsResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type DeleteEventsParams struct { + p map[string]interface{} +} + +func (p *DeleteEventsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enddate"]; found { + u.Set("enddate", v.(string)) + } + if v, found := p.p["ids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("ids", vv) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *DeleteEventsParams) SetEnddate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enddate"] = v + return +} + +func (p *DeleteEventsParams) SetIds(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ids"] = v + return +} + +func (p *DeleteEventsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +func (p *DeleteEventsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["eventType"] = v + return +} + +// You should always use this function to get a new DeleteEventsParams instance, +// as then you are sure you have configured all required params +func (s *EventService) NewDeleteEventsParams() *DeleteEventsParams { + p := &DeleteEventsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Delete one or more events. +func (s *EventService) DeleteEvents(p *DeleteEventsParams) (*DeleteEventsResponse, error) { + resp, err := s.cs.newRequest("deleteEvents", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteEventsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteEventsResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/FirewallService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/FirewallService.go new file mode 100644 index 000000000..08cb56418 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/FirewallService.go @@ -0,0 +1,2462 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ListPortForwardingRulesParams struct { + p map[string]interface{} +} + +func (p *ListPortForwardingRulesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *ListPortForwardingRulesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListPortForwardingRulesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new ListPortForwardingRulesParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewListPortForwardingRulesParams() *ListPortForwardingRulesParams { + p := &ListPortForwardingRulesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *FirewallService) GetPortForwardingRuleByID(id string) (*PortForwardingRule, int, error) { + p := &ListPortForwardingRulesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListPortForwardingRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListPortForwardingRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.PortForwardingRules[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for PortForwardingRule UUID: %s!", id) +} + +// Lists all port forwarding rules for an IP address. +func (s *FirewallService) ListPortForwardingRules(p *ListPortForwardingRulesParams) (*ListPortForwardingRulesResponse, error) { + resp, err := s.cs.newRequest("listPortForwardingRules", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPortForwardingRulesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPortForwardingRulesResponse struct { + Count int `json:"count"` + PortForwardingRules []*PortForwardingRule `json:"portforwardingrule"` +} + +type PortForwardingRule struct { + Cidrlist string `json:"cidrlist,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateendport string `json:"privateendport,omitempty"` + Privateport string `json:"privateport,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicendport string `json:"publicendport,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vmguestip string `json:"vmguestip,omitempty"` +} + +type CreatePortForwardingRuleParams struct { + p map[string]interface{} +} + +func (p *CreatePortForwardingRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["openfirewall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("openfirewall", vv) + } + if v, found := p.p["privateendport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("privateendport", vv) + } + if v, found := p.p["privateport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("privateport", vv) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["publicendport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("publicendport", vv) + } + if v, found := p.p["publicport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("publicport", vv) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["vmguestip"]; found { + u.Set("vmguestip", v.(string)) + } + return u +} + +func (p *CreatePortForwardingRuleParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetOpenfirewall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["openfirewall"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetPrivateendport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["privateendport"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetPrivateport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["privateport"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetPublicendport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicendport"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetPublicport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicport"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *CreatePortForwardingRuleParams) SetVmguestip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmguestip"] = v + return +} + +// You should always use this function to get a new CreatePortForwardingRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewCreatePortForwardingRuleParams(ipaddressid string, privateport int, protocol string, publicport int, virtualmachineid string) *CreatePortForwardingRuleParams { + p := &CreatePortForwardingRuleParams{} + p.p = make(map[string]interface{}) + p.p["ipaddressid"] = ipaddressid + p.p["privateport"] = privateport + p.p["protocol"] = protocol + p.p["publicport"] = publicport + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Creates a port forwarding rule +func (s *FirewallService) CreatePortForwardingRule(p *CreatePortForwardingRuleParams) (*CreatePortForwardingRuleResponse, error) { + resp, err := s.cs.newRequest("createPortForwardingRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreatePortForwardingRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreatePortForwardingRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateendport string `json:"privateendport,omitempty"` + Privateport string `json:"privateport,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicendport string `json:"publicendport,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vmguestip string `json:"vmguestip,omitempty"` +} + +type DeletePortForwardingRuleParams struct { + p map[string]interface{} +} + +func (p *DeletePortForwardingRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeletePortForwardingRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeletePortForwardingRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewDeletePortForwardingRuleParams(id string) *DeletePortForwardingRuleParams { + p := &DeletePortForwardingRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a port forwarding rule +func (s *FirewallService) DeletePortForwardingRule(p *DeletePortForwardingRuleParams) (*DeletePortForwardingRuleResponse, error) { + resp, err := s.cs.newRequest("deletePortForwardingRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeletePortForwardingRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeletePortForwardingRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type UpdatePortForwardingRuleParams struct { + p map[string]interface{} +} + +func (p *UpdatePortForwardingRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["privateip"]; found { + u.Set("privateip", v.(string)) + } + if v, found := p.p["privateport"]; found { + u.Set("privateport", v.(string)) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["publicport"]; found { + u.Set("publicport", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *UpdatePortForwardingRuleParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetPrivateip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["privateip"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetPrivateport(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["privateport"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetPublicport(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicport"] = v + return +} + +func (p *UpdatePortForwardingRuleParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new UpdatePortForwardingRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewUpdatePortForwardingRuleParams(id string) *UpdatePortForwardingRuleParams { + p := &UpdatePortForwardingRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a port forwarding rule. Only the private port and the virtual machine can be updated. +func (s *FirewallService) UpdatePortForwardingRule(p *UpdatePortForwardingRuleParams) (*UpdatePortForwardingRuleResponse, error) { + resp, err := s.cs.newRequest("updatePortForwardingRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdatePortForwardingRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdatePortForwardingRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateendport string `json:"privateendport,omitempty"` + Privateport string `json:"privateport,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicendport string `json:"publicendport,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vmguestip string `json:"vmguestip,omitempty"` +} + +type CreateFirewallRuleParams struct { + p map[string]interface{} +} + +func (p *CreateFirewallRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["endport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("endport", vv) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["icmpcode"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmpcode", vv) + } + if v, found := p.p["icmptype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmptype", vv) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["startport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("startport", vv) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *CreateFirewallRuleParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *CreateFirewallRuleParams) SetEndport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endport"] = v + return +} + +func (p *CreateFirewallRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateFirewallRuleParams) SetIcmpcode(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmpcode"] = v + return +} + +func (p *CreateFirewallRuleParams) SetIcmptype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmptype"] = v + return +} + +func (p *CreateFirewallRuleParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *CreateFirewallRuleParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *CreateFirewallRuleParams) SetStartport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startport"] = v + return +} + +func (p *CreateFirewallRuleParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["firewallType"] = v + return +} + +// You should always use this function to get a new CreateFirewallRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewCreateFirewallRuleParams(ipaddressid string, protocol string) *CreateFirewallRuleParams { + p := &CreateFirewallRuleParams{} + p.p = make(map[string]interface{}) + p.p["ipaddressid"] = ipaddressid + p.p["protocol"] = protocol + return p +} + +// Creates a firewall rule for a given ip address +func (s *FirewallService) CreateFirewallRule(p *CreateFirewallRuleParams) (*CreateFirewallRuleResponse, error) { + resp, err := s.cs.newRequest("createFirewallRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateFirewallRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateFirewallRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type DeleteFirewallRuleParams struct { + p map[string]interface{} +} + +func (p *DeleteFirewallRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteFirewallRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteFirewallRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewDeleteFirewallRuleParams(id string) *DeleteFirewallRuleParams { + p := &DeleteFirewallRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a firewall rule +func (s *FirewallService) DeleteFirewallRule(p *DeleteFirewallRuleParams) (*DeleteFirewallRuleResponse, error) { + resp, err := s.cs.newRequest("deleteFirewallRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteFirewallRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteFirewallRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListFirewallRulesParams struct { + p map[string]interface{} +} + +func (p *ListFirewallRulesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *ListFirewallRulesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListFirewallRulesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListFirewallRulesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListFirewallRulesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListFirewallRulesParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *ListFirewallRulesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListFirewallRulesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListFirewallRulesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListFirewallRulesParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListFirewallRulesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListFirewallRulesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListFirewallRulesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListFirewallRulesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new ListFirewallRulesParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewListFirewallRulesParams() *ListFirewallRulesParams { + p := &ListFirewallRulesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *FirewallService) GetFirewallRuleByID(id string) (*FirewallRule, int, error) { + p := &ListFirewallRulesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListFirewallRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListFirewallRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.FirewallRules[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for FirewallRule UUID: %s!", id) +} + +// Lists all firewall rules for an IP address. +func (s *FirewallService) ListFirewallRules(p *ListFirewallRulesParams) (*ListFirewallRulesResponse, error) { + resp, err := s.cs.newRequest("listFirewallRules", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListFirewallRulesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListFirewallRulesResponse struct { + Count int `json:"count"` + FirewallRules []*FirewallRule `json:"firewallrule"` +} + +type FirewallRule struct { + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type UpdateFirewallRuleParams struct { + p map[string]interface{} +} + +func (p *UpdateFirewallRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateFirewallRuleParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateFirewallRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateFirewallRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateFirewallRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewUpdateFirewallRuleParams(id string) *UpdateFirewallRuleParams { + p := &UpdateFirewallRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates firewall rule +func (s *FirewallService) UpdateFirewallRule(p *UpdateFirewallRuleParams) (*UpdateFirewallRuleResponse, error) { + resp, err := s.cs.newRequest("updateFirewallRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateFirewallRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateFirewallRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type CreateEgressFirewallRuleParams struct { + p map[string]interface{} +} + +func (p *CreateEgressFirewallRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["endport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("endport", vv) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["icmpcode"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmpcode", vv) + } + if v, found := p.p["icmptype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmptype", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["startport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("startport", vv) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *CreateEgressFirewallRuleParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetEndport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endport"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetIcmpcode(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmpcode"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetIcmptype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmptype"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetStartport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startport"] = v + return +} + +func (p *CreateEgressFirewallRuleParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["firewallType"] = v + return +} + +// You should always use this function to get a new CreateEgressFirewallRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewCreateEgressFirewallRuleParams(networkid string, protocol string) *CreateEgressFirewallRuleParams { + p := &CreateEgressFirewallRuleParams{} + p.p = make(map[string]interface{}) + p.p["networkid"] = networkid + p.p["protocol"] = protocol + return p +} + +// Creates a egress firewall rule for a given network +func (s *FirewallService) CreateEgressFirewallRule(p *CreateEgressFirewallRuleParams) (*CreateEgressFirewallRuleResponse, error) { + resp, err := s.cs.newRequest("createEgressFirewallRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateEgressFirewallRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateEgressFirewallRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type DeleteEgressFirewallRuleParams struct { + p map[string]interface{} +} + +func (p *DeleteEgressFirewallRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteEgressFirewallRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteEgressFirewallRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewDeleteEgressFirewallRuleParams(id string) *DeleteEgressFirewallRuleParams { + p := &DeleteEgressFirewallRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes an ggress firewall rule +func (s *FirewallService) DeleteEgressFirewallRule(p *DeleteEgressFirewallRuleParams) (*DeleteEgressFirewallRuleResponse, error) { + resp, err := s.cs.newRequest("deleteEgressFirewallRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteEgressFirewallRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteEgressFirewallRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListEgressFirewallRulesParams struct { + p map[string]interface{} +} + +func (p *ListEgressFirewallRulesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *ListEgressFirewallRulesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListEgressFirewallRulesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new ListEgressFirewallRulesParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewListEgressFirewallRulesParams() *ListEgressFirewallRulesParams { + p := &ListEgressFirewallRulesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *FirewallService) GetEgressFirewallRuleByID(id string) (*EgressFirewallRule, int, error) { + p := &ListEgressFirewallRulesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListEgressFirewallRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListEgressFirewallRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.EgressFirewallRules[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for EgressFirewallRule UUID: %s!", id) +} + +// Lists all egress firewall rules for network id. +func (s *FirewallService) ListEgressFirewallRules(p *ListEgressFirewallRulesParams) (*ListEgressFirewallRulesResponse, error) { + resp, err := s.cs.newRequest("listEgressFirewallRules", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListEgressFirewallRulesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListEgressFirewallRulesResponse struct { + Count int `json:"count"` + EgressFirewallRules []*EgressFirewallRule `json:"firewallrule"` +} + +type EgressFirewallRule struct { + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type UpdateEgressFirewallRuleParams struct { + p map[string]interface{} +} + +func (p *UpdateEgressFirewallRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateEgressFirewallRuleParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateEgressFirewallRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateEgressFirewallRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateEgressFirewallRuleParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewUpdateEgressFirewallRuleParams(id string) *UpdateEgressFirewallRuleParams { + p := &UpdateEgressFirewallRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates egress firewall rule +func (s *FirewallService) UpdateEgressFirewallRule(p *UpdateEgressFirewallRuleParams) (*UpdateEgressFirewallRuleResponse, error) { + resp, err := s.cs.newRequest("updateEgressFirewallRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateEgressFirewallRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateEgressFirewallRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type AddPaloAltoFirewallParams struct { + p map[string]interface{} +} + +func (p *AddPaloAltoFirewallParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["networkdevicetype"]; found { + u.Set("networkdevicetype", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddPaloAltoFirewallParams) SetNetworkdevicetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdevicetype"] = v + return +} + +func (p *AddPaloAltoFirewallParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddPaloAltoFirewallParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddPaloAltoFirewallParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddPaloAltoFirewallParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddPaloAltoFirewallParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewAddPaloAltoFirewallParams(networkdevicetype string, password string, physicalnetworkid string, url string, username string) *AddPaloAltoFirewallParams { + p := &AddPaloAltoFirewallParams{} + p.p = make(map[string]interface{}) + p.p["networkdevicetype"] = networkdevicetype + p.p["password"] = password + p.p["physicalnetworkid"] = physicalnetworkid + p.p["url"] = url + p.p["username"] = username + return p +} + +// Adds a Palo Alto firewall device +func (s *FirewallService) AddPaloAltoFirewall(p *AddPaloAltoFirewallParams) (*AddPaloAltoFirewallResponse, error) { + resp, err := s.cs.newRequest("addPaloAltoFirewall", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddPaloAltoFirewallResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddPaloAltoFirewallResponse struct { + JobID string `json:"jobid,omitempty"` + Fwdevicecapacity int64 `json:"fwdevicecapacity,omitempty"` + Fwdeviceid string `json:"fwdeviceid,omitempty"` + Fwdevicename string `json:"fwdevicename,omitempty"` + Fwdevicestate string `json:"fwdevicestate,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Numretries string `json:"numretries,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Privateinterface string `json:"privateinterface,omitempty"` + Privatezone string `json:"privatezone,omitempty"` + Provider string `json:"provider,omitempty"` + Publicinterface string `json:"publicinterface,omitempty"` + Publiczone string `json:"publiczone,omitempty"` + Timeout string `json:"timeout,omitempty"` + Usageinterface string `json:"usageinterface,omitempty"` + Username string `json:"username,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeletePaloAltoFirewallParams struct { + p map[string]interface{} +} + +func (p *DeletePaloAltoFirewallParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fwdeviceid"]; found { + u.Set("fwdeviceid", v.(string)) + } + return u +} + +func (p *DeletePaloAltoFirewallParams) SetFwdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fwdeviceid"] = v + return +} + +// You should always use this function to get a new DeletePaloAltoFirewallParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewDeletePaloAltoFirewallParams(fwdeviceid string) *DeletePaloAltoFirewallParams { + p := &DeletePaloAltoFirewallParams{} + p.p = make(map[string]interface{}) + p.p["fwdeviceid"] = fwdeviceid + return p +} + +// delete a Palo Alto firewall device +func (s *FirewallService) DeletePaloAltoFirewall(p *DeletePaloAltoFirewallParams) (*DeletePaloAltoFirewallResponse, error) { + resp, err := s.cs.newRequest("deletePaloAltoFirewall", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeletePaloAltoFirewallResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeletePaloAltoFirewallResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ConfigurePaloAltoFirewallParams struct { + p map[string]interface{} +} + +func (p *ConfigurePaloAltoFirewallParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fwdevicecapacity"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("fwdevicecapacity", vv) + } + if v, found := p.p["fwdeviceid"]; found { + u.Set("fwdeviceid", v.(string)) + } + return u +} + +func (p *ConfigurePaloAltoFirewallParams) SetFwdevicecapacity(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fwdevicecapacity"] = v + return +} + +func (p *ConfigurePaloAltoFirewallParams) SetFwdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fwdeviceid"] = v + return +} + +// You should always use this function to get a new ConfigurePaloAltoFirewallParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewConfigurePaloAltoFirewallParams(fwdeviceid string) *ConfigurePaloAltoFirewallParams { + p := &ConfigurePaloAltoFirewallParams{} + p.p = make(map[string]interface{}) + p.p["fwdeviceid"] = fwdeviceid + return p +} + +// Configures a Palo Alto firewall device +func (s *FirewallService) ConfigurePaloAltoFirewall(p *ConfigurePaloAltoFirewallParams) (*ConfigurePaloAltoFirewallResponse, error) { + resp, err := s.cs.newRequest("configurePaloAltoFirewall", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ConfigurePaloAltoFirewallResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ConfigurePaloAltoFirewallResponse struct { + JobID string `json:"jobid,omitempty"` + Fwdevicecapacity int64 `json:"fwdevicecapacity,omitempty"` + Fwdeviceid string `json:"fwdeviceid,omitempty"` + Fwdevicename string `json:"fwdevicename,omitempty"` + Fwdevicestate string `json:"fwdevicestate,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Numretries string `json:"numretries,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Privateinterface string `json:"privateinterface,omitempty"` + Privatezone string `json:"privatezone,omitempty"` + Provider string `json:"provider,omitempty"` + Publicinterface string `json:"publicinterface,omitempty"` + Publiczone string `json:"publiczone,omitempty"` + Timeout string `json:"timeout,omitempty"` + Usageinterface string `json:"usageinterface,omitempty"` + Username string `json:"username,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListPaloAltoFirewallsParams struct { + p map[string]interface{} +} + +func (p *ListPaloAltoFirewallsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fwdeviceid"]; found { + u.Set("fwdeviceid", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + return u +} + +func (p *ListPaloAltoFirewallsParams) SetFwdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fwdeviceid"] = v + return +} + +func (p *ListPaloAltoFirewallsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPaloAltoFirewallsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPaloAltoFirewallsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListPaloAltoFirewallsParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +// You should always use this function to get a new ListPaloAltoFirewallsParams instance, +// as then you are sure you have configured all required params +func (s *FirewallService) NewListPaloAltoFirewallsParams() *ListPaloAltoFirewallsParams { + p := &ListPaloAltoFirewallsParams{} + p.p = make(map[string]interface{}) + return p +} + +// lists Palo Alto firewall devices in a physical network +func (s *FirewallService) ListPaloAltoFirewalls(p *ListPaloAltoFirewallsParams) (*ListPaloAltoFirewallsResponse, error) { + resp, err := s.cs.newRequest("listPaloAltoFirewalls", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPaloAltoFirewallsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPaloAltoFirewallsResponse struct { + Count int `json:"count"` + PaloAltoFirewalls []*PaloAltoFirewall `json:"paloaltofirewall"` +} + +type PaloAltoFirewall struct { + Fwdevicecapacity int64 `json:"fwdevicecapacity,omitempty"` + Fwdeviceid string `json:"fwdeviceid,omitempty"` + Fwdevicename string `json:"fwdevicename,omitempty"` + Fwdevicestate string `json:"fwdevicestate,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Numretries string `json:"numretries,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Privateinterface string `json:"privateinterface,omitempty"` + Privatezone string `json:"privatezone,omitempty"` + Provider string `json:"provider,omitempty"` + Publicinterface string `json:"publicinterface,omitempty"` + Publiczone string `json:"publiczone,omitempty"` + Timeout string `json:"timeout,omitempty"` + Usageinterface string `json:"usageinterface,omitempty"` + Username string `json:"username,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/GuestOSService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/GuestOSService.go new file mode 100644 index 000000000..1cbff4db0 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/GuestOSService.go @@ -0,0 +1,1022 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ListOsTypesParams struct { + p map[string]interface{} +} + +func (p *ListOsTypesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["oscategoryid"]; found { + u.Set("oscategoryid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListOsTypesParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *ListOsTypesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListOsTypesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListOsTypesParams) SetOscategoryid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["oscategoryid"] = v + return +} + +func (p *ListOsTypesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListOsTypesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListOsTypesParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewListOsTypesParams() *ListOsTypesParams { + p := &ListOsTypesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *GuestOSService) GetOsTypeByID(id string) (*OsType, int, error) { + p := &ListOsTypesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListOsTypes(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.OsTypes[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for OsType UUID: %s!", id) +} + +// Lists all supported OS types for this cloud. +func (s *GuestOSService) ListOsTypes(p *ListOsTypesParams) (*ListOsTypesResponse, error) { + resp, err := s.cs.newRequest("listOsTypes", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListOsTypesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListOsTypesResponse struct { + Count int `json:"count"` + OsTypes []*OsType `json:"ostype"` +} + +type OsType struct { + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + Isuserdefined string `json:"isuserdefined,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` +} + +type ListOsCategoriesParams struct { + p map[string]interface{} +} + +func (p *ListOsCategoriesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListOsCategoriesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListOsCategoriesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListOsCategoriesParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListOsCategoriesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListOsCategoriesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListOsCategoriesParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewListOsCategoriesParams() *ListOsCategoriesParams { + p := &ListOsCategoriesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *GuestOSService) GetOsCategoryID(name string) (string, error) { + p := &ListOsCategoriesParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListOsCategories(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.OsCategories[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.OsCategories { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *GuestOSService) GetOsCategoryByName(name string) (*OsCategory, int, error) { + id, err := s.GetOsCategoryID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetOsCategoryByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *GuestOSService) GetOsCategoryByID(id string) (*OsCategory, int, error) { + p := &ListOsCategoriesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListOsCategories(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.OsCategories[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for OsCategory UUID: %s!", id) +} + +// Lists all supported OS categories for this cloud. +func (s *GuestOSService) ListOsCategories(p *ListOsCategoriesParams) (*ListOsCategoriesResponse, error) { + resp, err := s.cs.newRequest("listOsCategories", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListOsCategoriesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListOsCategoriesResponse struct { + Count int `json:"count"` + OsCategories []*OsCategory `json:"oscategory"` +} + +type OsCategory struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +type AddGuestOsParams struct { + p map[string]interface{} +} + +func (p *AddGuestOsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["oscategoryid"]; found { + u.Set("oscategoryid", v.(string)) + } + if v, found := p.p["osdisplayname"]; found { + u.Set("osdisplayname", v.(string)) + } + return u +} + +func (p *AddGuestOsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *AddGuestOsParams) SetOscategoryid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["oscategoryid"] = v + return +} + +func (p *AddGuestOsParams) SetOsdisplayname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["osdisplayname"] = v + return +} + +// You should always use this function to get a new AddGuestOsParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewAddGuestOsParams(oscategoryid string, osdisplayname string) *AddGuestOsParams { + p := &AddGuestOsParams{} + p.p = make(map[string]interface{}) + p.p["oscategoryid"] = oscategoryid + p.p["osdisplayname"] = osdisplayname + return p +} + +// Add a new guest OS type +func (s *GuestOSService) AddGuestOs(p *AddGuestOsParams) (*AddGuestOsResponse, error) { + resp, err := s.cs.newRequest("addGuestOs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddGuestOsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddGuestOsResponse struct { + JobID string `json:"jobid,omitempty"` + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + Isuserdefined string `json:"isuserdefined,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` +} + +type UpdateGuestOsParams struct { + p map[string]interface{} +} + +func (p *UpdateGuestOsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["osdisplayname"]; found { + u.Set("osdisplayname", v.(string)) + } + return u +} + +func (p *UpdateGuestOsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateGuestOsParams) SetOsdisplayname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["osdisplayname"] = v + return +} + +// You should always use this function to get a new UpdateGuestOsParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewUpdateGuestOsParams(id string, osdisplayname string) *UpdateGuestOsParams { + p := &UpdateGuestOsParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["osdisplayname"] = osdisplayname + return p +} + +// Updates the information about Guest OS +func (s *GuestOSService) UpdateGuestOs(p *UpdateGuestOsParams) (*UpdateGuestOsResponse, error) { + resp, err := s.cs.newRequest("updateGuestOs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateGuestOsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateGuestOsResponse struct { + JobID string `json:"jobid,omitempty"` + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + Isuserdefined string `json:"isuserdefined,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` +} + +type RemoveGuestOsParams struct { + p map[string]interface{} +} + +func (p *RemoveGuestOsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RemoveGuestOsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RemoveGuestOsParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewRemoveGuestOsParams(id string) *RemoveGuestOsParams { + p := &RemoveGuestOsParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Removes a Guest OS from listing. +func (s *GuestOSService) RemoveGuestOs(p *RemoveGuestOsParams) (*RemoveGuestOsResponse, error) { + resp, err := s.cs.newRequest("removeGuestOs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveGuestOsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveGuestOsResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListGuestOsMappingParams struct { + p map[string]interface{} +} + +func (p *ListGuestOsMappingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["hypervisorversion"]; found { + u.Set("hypervisorversion", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListGuestOsMappingParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *ListGuestOsMappingParams) SetHypervisorversion(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisorversion"] = v + return +} + +func (p *ListGuestOsMappingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListGuestOsMappingParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListGuestOsMappingParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +func (p *ListGuestOsMappingParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListGuestOsMappingParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListGuestOsMappingParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewListGuestOsMappingParams() *ListGuestOsMappingParams { + p := &ListGuestOsMappingParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *GuestOSService) GetGuestOsMappingByID(id string) (*GuestOsMapping, int, error) { + p := &ListGuestOsMappingParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListGuestOsMapping(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.GuestOsMapping[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for GuestOsMapping UUID: %s!", id) +} + +// Lists all available OS mappings for given hypervisor +func (s *GuestOSService) ListGuestOsMapping(p *ListGuestOsMappingParams) (*ListGuestOsMappingResponse, error) { + resp, err := s.cs.newRequest("listGuestOsMapping", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListGuestOsMappingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListGuestOsMappingResponse struct { + Count int `json:"count"` + GuestOsMapping []*GuestOsMapping `json:"guestosmapping"` +} + +type GuestOsMapping struct { + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Isuserdefined string `json:"isuserdefined,omitempty"` + Osdisplayname string `json:"osdisplayname,omitempty"` + Osnameforhypervisor string `json:"osnameforhypervisor,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` +} + +type AddGuestOsMappingParams struct { + p map[string]interface{} +} + +func (p *AddGuestOsMappingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["hypervisorversion"]; found { + u.Set("hypervisorversion", v.(string)) + } + if v, found := p.p["osdisplayname"]; found { + u.Set("osdisplayname", v.(string)) + } + if v, found := p.p["osnameforhypervisor"]; found { + u.Set("osnameforhypervisor", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + return u +} + +func (p *AddGuestOsMappingParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *AddGuestOsMappingParams) SetHypervisorversion(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisorversion"] = v + return +} + +func (p *AddGuestOsMappingParams) SetOsdisplayname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["osdisplayname"] = v + return +} + +func (p *AddGuestOsMappingParams) SetOsnameforhypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["osnameforhypervisor"] = v + return +} + +func (p *AddGuestOsMappingParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +// You should always use this function to get a new AddGuestOsMappingParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewAddGuestOsMappingParams(hypervisor string, hypervisorversion string, osnameforhypervisor string) *AddGuestOsMappingParams { + p := &AddGuestOsMappingParams{} + p.p = make(map[string]interface{}) + p.p["hypervisor"] = hypervisor + p.p["hypervisorversion"] = hypervisorversion + p.p["osnameforhypervisor"] = osnameforhypervisor + return p +} + +// Adds a guest OS name to hypervisor OS name mapping +func (s *GuestOSService) AddGuestOsMapping(p *AddGuestOsMappingParams) (*AddGuestOsMappingResponse, error) { + resp, err := s.cs.newRequest("addGuestOsMapping", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddGuestOsMappingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddGuestOsMappingResponse struct { + JobID string `json:"jobid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Isuserdefined string `json:"isuserdefined,omitempty"` + Osdisplayname string `json:"osdisplayname,omitempty"` + Osnameforhypervisor string `json:"osnameforhypervisor,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` +} + +type UpdateGuestOsMappingParams struct { + p map[string]interface{} +} + +func (p *UpdateGuestOsMappingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["osnameforhypervisor"]; found { + u.Set("osnameforhypervisor", v.(string)) + } + return u +} + +func (p *UpdateGuestOsMappingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateGuestOsMappingParams) SetOsnameforhypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["osnameforhypervisor"] = v + return +} + +// You should always use this function to get a new UpdateGuestOsMappingParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewUpdateGuestOsMappingParams(id string, osnameforhypervisor string) *UpdateGuestOsMappingParams { + p := &UpdateGuestOsMappingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["osnameforhypervisor"] = osnameforhypervisor + return p +} + +// Updates the information about Guest OS to Hypervisor specific name mapping +func (s *GuestOSService) UpdateGuestOsMapping(p *UpdateGuestOsMappingParams) (*UpdateGuestOsMappingResponse, error) { + resp, err := s.cs.newRequest("updateGuestOsMapping", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateGuestOsMappingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateGuestOsMappingResponse struct { + JobID string `json:"jobid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Isuserdefined string `json:"isuserdefined,omitempty"` + Osdisplayname string `json:"osdisplayname,omitempty"` + Osnameforhypervisor string `json:"osnameforhypervisor,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` +} + +type RemoveGuestOsMappingParams struct { + p map[string]interface{} +} + +func (p *RemoveGuestOsMappingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RemoveGuestOsMappingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RemoveGuestOsMappingParams instance, +// as then you are sure you have configured all required params +func (s *GuestOSService) NewRemoveGuestOsMappingParams(id string) *RemoveGuestOsMappingParams { + p := &RemoveGuestOsMappingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Removes a Guest OS Mapping. +func (s *GuestOSService) RemoveGuestOsMapping(p *RemoveGuestOsMappingParams) (*RemoveGuestOsMappingResponse, error) { + resp, err := s.cs.newRequest("removeGuestOsMapping", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveGuestOsMappingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveGuestOsMappingResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/HostService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/HostService.go new file mode 100644 index 000000000..427f278f5 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/HostService.go @@ -0,0 +1,2046 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type AddHostParams struct { + p map[string]interface{} +} + +func (p *AddHostParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["clustername"]; found { + u.Set("clustername", v.(string)) + } + if v, found := p.p["hosttags"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("hosttags", vv) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddHostParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *AddHostParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *AddHostParams) SetClustername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clustername"] = v + return +} + +func (p *AddHostParams) SetHosttags(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hosttags"] = v + return +} + +func (p *AddHostParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *AddHostParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddHostParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *AddHostParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddHostParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +func (p *AddHostParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddHostParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewAddHostParams(hypervisor string, password string, podid string, url string, username string, zoneid string) *AddHostParams { + p := &AddHostParams{} + p.p = make(map[string]interface{}) + p.p["hypervisor"] = hypervisor + p.p["password"] = password + p.p["podid"] = podid + p.p["url"] = url + p.p["username"] = username + p.p["zoneid"] = zoneid + return p +} + +// Adds a new host. +func (s *HostService) AddHost(p *AddHostParams) (*AddHostResponse, error) { + resp, err := s.cs.newRequest("addHost", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddHostResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddHostResponse struct { + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpusockets int `json:"cpusockets,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Gpugroup []struct { + Gpugroupname string `json:"gpugroupname,omitempty"` + Vgpu []struct { + Maxcapacity int64 `json:"maxcapacity,omitempty"` + Maxheads int64 `json:"maxheads,omitempty"` + Maxresolutionx int64 `json:"maxresolutionx,omitempty"` + Maxresolutiony int64 `json:"maxresolutiony,omitempty"` + Maxvgpuperpgpu int64 `json:"maxvgpuperpgpu,omitempty"` + Remainingcapacity int64 `json:"remainingcapacity,omitempty"` + Vgputype string `json:"vgputype,omitempty"` + Videoram int64 `json:"videoram,omitempty"` + } `json:"vgpu,omitempty"` + } `json:"gpugroup,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ReconnectHostParams struct { + p map[string]interface{} +} + +func (p *ReconnectHostParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ReconnectHostParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ReconnectHostParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewReconnectHostParams(id string) *ReconnectHostParams { + p := &ReconnectHostParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Reconnects a host. +func (s *HostService) ReconnectHost(p *ReconnectHostParams) (*ReconnectHostResponse, error) { + resp, err := s.cs.newRequest("reconnectHost", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReconnectHostResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReconnectHostResponse struct { + JobID string `json:"jobid,omitempty"` + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpusockets int `json:"cpusockets,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Gpugroup []struct { + Gpugroupname string `json:"gpugroupname,omitempty"` + Vgpu []struct { + Maxcapacity int64 `json:"maxcapacity,omitempty"` + Maxheads int64 `json:"maxheads,omitempty"` + Maxresolutionx int64 `json:"maxresolutionx,omitempty"` + Maxresolutiony int64 `json:"maxresolutiony,omitempty"` + Maxvgpuperpgpu int64 `json:"maxvgpuperpgpu,omitempty"` + Remainingcapacity int64 `json:"remainingcapacity,omitempty"` + Vgputype string `json:"vgputype,omitempty"` + Videoram int64 `json:"videoram,omitempty"` + } `json:"vgpu,omitempty"` + } `json:"gpugroup,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateHostParams struct { + p map[string]interface{} +} + +func (p *UpdateHostParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["hosttags"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("hosttags", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["oscategoryid"]; found { + u.Set("oscategoryid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + return u +} + +func (p *UpdateHostParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *UpdateHostParams) SetHosttags(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hosttags"] = v + return +} + +func (p *UpdateHostParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateHostParams) SetOscategoryid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["oscategoryid"] = v + return +} + +func (p *UpdateHostParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +// You should always use this function to get a new UpdateHostParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewUpdateHostParams(id string) *UpdateHostParams { + p := &UpdateHostParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a host. +func (s *HostService) UpdateHost(p *UpdateHostParams) (*UpdateHostResponse, error) { + resp, err := s.cs.newRequest("updateHost", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateHostResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateHostResponse struct { + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpusockets int `json:"cpusockets,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Gpugroup []struct { + Gpugroupname string `json:"gpugroupname,omitempty"` + Vgpu []struct { + Maxcapacity int64 `json:"maxcapacity,omitempty"` + Maxheads int64 `json:"maxheads,omitempty"` + Maxresolutionx int64 `json:"maxresolutionx,omitempty"` + Maxresolutiony int64 `json:"maxresolutiony,omitempty"` + Maxvgpuperpgpu int64 `json:"maxvgpuperpgpu,omitempty"` + Remainingcapacity int64 `json:"remainingcapacity,omitempty"` + Vgputype string `json:"vgputype,omitempty"` + Videoram int64 `json:"videoram,omitempty"` + } `json:"vgpu,omitempty"` + } `json:"gpugroup,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteHostParams struct { + p map[string]interface{} +} + +func (p *DeleteHostParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["forced"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forced", vv) + } + if v, found := p.p["forcedestroylocalstorage"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forcedestroylocalstorage", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteHostParams) SetForced(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forced"] = v + return +} + +func (p *DeleteHostParams) SetForcedestroylocalstorage(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forcedestroylocalstorage"] = v + return +} + +func (p *DeleteHostParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteHostParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewDeleteHostParams(id string) *DeleteHostParams { + p := &DeleteHostParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a host. +func (s *HostService) DeleteHost(p *DeleteHostParams) (*DeleteHostResponse, error) { + resp, err := s.cs.newRequest("deleteHost", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteHostResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteHostResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type PrepareHostForMaintenanceParams struct { + p map[string]interface{} +} + +func (p *PrepareHostForMaintenanceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *PrepareHostForMaintenanceParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new PrepareHostForMaintenanceParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewPrepareHostForMaintenanceParams(id string) *PrepareHostForMaintenanceParams { + p := &PrepareHostForMaintenanceParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Prepares a host for maintenance. +func (s *HostService) PrepareHostForMaintenance(p *PrepareHostForMaintenanceParams) (*PrepareHostForMaintenanceResponse, error) { + resp, err := s.cs.newRequest("prepareHostForMaintenance", p.toURLValues()) + if err != nil { + return nil, err + } + + var r PrepareHostForMaintenanceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type PrepareHostForMaintenanceResponse struct { + JobID string `json:"jobid,omitempty"` + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpusockets int `json:"cpusockets,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Gpugroup []struct { + Gpugroupname string `json:"gpugroupname,omitempty"` + Vgpu []struct { + Maxcapacity int64 `json:"maxcapacity,omitempty"` + Maxheads int64 `json:"maxheads,omitempty"` + Maxresolutionx int64 `json:"maxresolutionx,omitempty"` + Maxresolutiony int64 `json:"maxresolutiony,omitempty"` + Maxvgpuperpgpu int64 `json:"maxvgpuperpgpu,omitempty"` + Remainingcapacity int64 `json:"remainingcapacity,omitempty"` + Vgputype string `json:"vgputype,omitempty"` + Videoram int64 `json:"videoram,omitempty"` + } `json:"vgpu,omitempty"` + } `json:"gpugroup,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type CancelHostMaintenanceParams struct { + p map[string]interface{} +} + +func (p *CancelHostMaintenanceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *CancelHostMaintenanceParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new CancelHostMaintenanceParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewCancelHostMaintenanceParams(id string) *CancelHostMaintenanceParams { + p := &CancelHostMaintenanceParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Cancels host maintenance. +func (s *HostService) CancelHostMaintenance(p *CancelHostMaintenanceParams) (*CancelHostMaintenanceResponse, error) { + resp, err := s.cs.newRequest("cancelHostMaintenance", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CancelHostMaintenanceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CancelHostMaintenanceResponse struct { + JobID string `json:"jobid,omitempty"` + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpusockets int `json:"cpusockets,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Gpugroup []struct { + Gpugroupname string `json:"gpugroupname,omitempty"` + Vgpu []struct { + Maxcapacity int64 `json:"maxcapacity,omitempty"` + Maxheads int64 `json:"maxheads,omitempty"` + Maxresolutionx int64 `json:"maxresolutionx,omitempty"` + Maxresolutiony int64 `json:"maxresolutiony,omitempty"` + Maxvgpuperpgpu int64 `json:"maxvgpuperpgpu,omitempty"` + Remainingcapacity int64 `json:"remainingcapacity,omitempty"` + Vgputype string `json:"vgputype,omitempty"` + Videoram int64 `json:"videoram,omitempty"` + } `json:"vgpu,omitempty"` + } `json:"gpugroup,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListHostsParams struct { + p map[string]interface{} +} + +func (p *ListHostsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["details"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("details", vv) + } + if v, found := p.p["hahost"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("hahost", vv) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["resourcestate"]; found { + u.Set("resourcestate", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListHostsParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *ListHostsParams) SetDetails(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *ListHostsParams) SetHahost(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hahost"] = v + return +} + +func (p *ListHostsParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *ListHostsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListHostsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListHostsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListHostsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListHostsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListHostsParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListHostsParams) SetResourcestate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcestate"] = v + return +} + +func (p *ListHostsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListHostsParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostType"] = v + return +} + +func (p *ListHostsParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *ListHostsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListHostsParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewListHostsParams() *ListHostsParams { + p := &ListHostsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *HostService) GetHostID(name string) (string, error) { + p := &ListHostsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListHosts(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Hosts[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Hosts { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *HostService) GetHostByName(name string) (*Host, int, error) { + id, err := s.GetHostID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetHostByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *HostService) GetHostByID(id string) (*Host, int, error) { + p := &ListHostsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListHosts(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Hosts[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Host UUID: %s!", id) +} + +// Lists hosts. +func (s *HostService) ListHosts(p *ListHostsParams) (*ListHostsResponse, error) { + resp, err := s.cs.newRequest("listHosts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListHostsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListHostsResponse struct { + Count int `json:"count"` + Hosts []*Host `json:"host"` +} + +type Host struct { + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpusockets int `json:"cpusockets,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Gpugroup []struct { + Gpugroupname string `json:"gpugroupname,omitempty"` + Vgpu []struct { + Maxcapacity int64 `json:"maxcapacity,omitempty"` + Maxheads int64 `json:"maxheads,omitempty"` + Maxresolutionx int64 `json:"maxresolutionx,omitempty"` + Maxresolutiony int64 `json:"maxresolutiony,omitempty"` + Maxvgpuperpgpu int64 `json:"maxvgpuperpgpu,omitempty"` + Remainingcapacity int64 `json:"remainingcapacity,omitempty"` + Vgputype string `json:"vgputype,omitempty"` + Videoram int64 `json:"videoram,omitempty"` + } `json:"vgpu,omitempty"` + } `json:"gpugroup,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type FindHostsForMigrationParams struct { + p map[string]interface{} +} + +func (p *FindHostsForMigrationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *FindHostsForMigrationParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *FindHostsForMigrationParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *FindHostsForMigrationParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *FindHostsForMigrationParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new FindHostsForMigrationParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewFindHostsForMigrationParams(virtualmachineid string) *FindHostsForMigrationParams { + p := &FindHostsForMigrationParams{} + p.p = make(map[string]interface{}) + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Find hosts suitable for migrating a virtual machine. +func (s *HostService) FindHostsForMigration(p *FindHostsForMigrationParams) (*FindHostsForMigrationResponse, error) { + resp, err := s.cs.newRequest("findHostsForMigration", p.toURLValues()) + if err != nil { + return nil, err + } + + var r FindHostsForMigrationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type FindHostsForMigrationResponse struct { + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + RequiresStorageMotion bool `json:"requiresStorageMotion,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type AddSecondaryStorageParams struct { + p map[string]interface{} +} + +func (p *AddSecondaryStorageParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddSecondaryStorageParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddSecondaryStorageParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddSecondaryStorageParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewAddSecondaryStorageParams(url string) *AddSecondaryStorageParams { + p := &AddSecondaryStorageParams{} + p.p = make(map[string]interface{}) + p.p["url"] = url + return p +} + +// Adds secondary storage. +func (s *HostService) AddSecondaryStorage(p *AddSecondaryStorageParams) (*AddSecondaryStorageResponse, error) { + resp, err := s.cs.newRequest("addSecondaryStorage", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddSecondaryStorageResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddSecondaryStorageResponse struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateHostPasswordParams struct { + p map[string]interface{} +} + +func (p *UpdateHostPasswordParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *UpdateHostPasswordParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *UpdateHostPasswordParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *UpdateHostPasswordParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *UpdateHostPasswordParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new UpdateHostPasswordParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewUpdateHostPasswordParams(password string, username string) *UpdateHostPasswordParams { + p := &UpdateHostPasswordParams{} + p.p = make(map[string]interface{}) + p.p["password"] = password + p.p["username"] = username + return p +} + +// Update password of a host/pool on management server. +func (s *HostService) UpdateHostPassword(p *UpdateHostPasswordParams) (*UpdateHostPasswordResponse, error) { + resp, err := s.cs.newRequest("updateHostPassword", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateHostPasswordResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateHostPasswordResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ReleaseHostReservationParams struct { + p map[string]interface{} +} + +func (p *ReleaseHostReservationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ReleaseHostReservationParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ReleaseHostReservationParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewReleaseHostReservationParams(id string) *ReleaseHostReservationParams { + p := &ReleaseHostReservationParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Releases host reservation. +func (s *HostService) ReleaseHostReservation(p *ReleaseHostReservationParams) (*ReleaseHostReservationResponse, error) { + resp, err := s.cs.newRequest("releaseHostReservation", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReleaseHostReservationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReleaseHostReservationResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type AddBaremetalHostParams struct { + p map[string]interface{} +} + +func (p *AddBaremetalHostParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["clustername"]; found { + u.Set("clustername", v.(string)) + } + if v, found := p.p["hosttags"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("hosttags", vv) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddBaremetalHostParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *AddBaremetalHostParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *AddBaremetalHostParams) SetClustername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clustername"] = v + return +} + +func (p *AddBaremetalHostParams) SetHosttags(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hosttags"] = v + return +} + +func (p *AddBaremetalHostParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *AddBaremetalHostParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *AddBaremetalHostParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddBaremetalHostParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *AddBaremetalHostParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddBaremetalHostParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +func (p *AddBaremetalHostParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddBaremetalHostParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewAddBaremetalHostParams(hypervisor string, password string, podid string, url string, username string, zoneid string) *AddBaremetalHostParams { + p := &AddBaremetalHostParams{} + p.p = make(map[string]interface{}) + p.p["hypervisor"] = hypervisor + p.p["password"] = password + p.p["podid"] = podid + p.p["url"] = url + p.p["username"] = username + p.p["zoneid"] = zoneid + return p +} + +// add a baremetal host +func (s *HostService) AddBaremetalHost(p *AddBaremetalHostParams) (*AddBaremetalHostResponse, error) { + resp, err := s.cs.newRequest("addBaremetalHost", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddBaremetalHostResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddBaremetalHostResponse struct { + Averageload int64 `json:"averageload,omitempty"` + Capabilities string `json:"capabilities,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Clustertype string `json:"clustertype,omitempty"` + Cpuallocated string `json:"cpuallocated,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpusockets int `json:"cpusockets,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Cpuwithoverprovisioning string `json:"cpuwithoverprovisioning,omitempty"` + Created string `json:"created,omitempty"` + Disconnected string `json:"disconnected,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Events string `json:"events,omitempty"` + Gpugroup []struct { + Gpugroupname string `json:"gpugroupname,omitempty"` + Vgpu []struct { + Maxcapacity int64 `json:"maxcapacity,omitempty"` + Maxheads int64 `json:"maxheads,omitempty"` + Maxresolutionx int64 `json:"maxresolutionx,omitempty"` + Maxresolutiony int64 `json:"maxresolutiony,omitempty"` + Maxvgpuperpgpu int64 `json:"maxvgpuperpgpu,omitempty"` + Remainingcapacity int64 `json:"remainingcapacity,omitempty"` + Vgputype string `json:"vgputype,omitempty"` + Videoram int64 `json:"videoram,omitempty"` + } `json:"vgpu,omitempty"` + } `json:"gpugroup,omitempty"` + Hahost bool `json:"hahost,omitempty"` + Hasenoughcapacity bool `json:"hasenoughcapacity,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Islocalstorageactive bool `json:"islocalstorageactive,omitempty"` + Lastpinged string `json:"lastpinged,omitempty"` + Managementserverid int64 `json:"managementserverid,omitempty"` + Memoryallocated int64 `json:"memoryallocated,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Memoryused int64 `json:"memoryused,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Oscategoryid string `json:"oscategoryid,omitempty"` + Oscategoryname string `json:"oscategoryname,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Removed string `json:"removed,omitempty"` + Resourcestate string `json:"resourcestate,omitempty"` + State string `json:"state,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DedicateHostParams struct { + p map[string]interface{} +} + +func (p *DedicateHostParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + return u +} + +func (p *DedicateHostParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DedicateHostParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DedicateHostParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +// You should always use this function to get a new DedicateHostParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewDedicateHostParams(domainid string, hostid string) *DedicateHostParams { + p := &DedicateHostParams{} + p.p = make(map[string]interface{}) + p.p["domainid"] = domainid + p.p["hostid"] = hostid + return p +} + +// Dedicates a host. +func (s *HostService) DedicateHost(p *DedicateHostParams) (*DedicateHostResponse, error) { + resp, err := s.cs.newRequest("dedicateHost", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DedicateHostResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DedicateHostResponse struct { + JobID string `json:"jobid,omitempty"` + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` +} + +type ReleaseDedicatedHostParams struct { + p map[string]interface{} +} + +func (p *ReleaseDedicatedHostParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + return u +} + +func (p *ReleaseDedicatedHostParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +// You should always use this function to get a new ReleaseDedicatedHostParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewReleaseDedicatedHostParams(hostid string) *ReleaseDedicatedHostParams { + p := &ReleaseDedicatedHostParams{} + p.p = make(map[string]interface{}) + p.p["hostid"] = hostid + return p +} + +// Release the dedication for host +func (s *HostService) ReleaseDedicatedHost(p *ReleaseDedicatedHostParams) (*ReleaseDedicatedHostResponse, error) { + resp, err := s.cs.newRequest("releaseDedicatedHost", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReleaseDedicatedHostResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReleaseDedicatedHostResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListDedicatedHostsParams struct { + p map[string]interface{} +} + +func (p *ListDedicatedHostsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["affinitygroupid"]; found { + u.Set("affinitygroupid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListDedicatedHostsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListDedicatedHostsParams) SetAffinitygroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupid"] = v + return +} + +func (p *ListDedicatedHostsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListDedicatedHostsParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *ListDedicatedHostsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDedicatedHostsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDedicatedHostsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListDedicatedHostsParams instance, +// as then you are sure you have configured all required params +func (s *HostService) NewListDedicatedHostsParams() *ListDedicatedHostsParams { + p := &ListDedicatedHostsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists dedicated hosts. +func (s *HostService) ListDedicatedHosts(p *ListDedicatedHostsParams) (*ListDedicatedHostsResponse, error) { + resp, err := s.cs.newRequest("listDedicatedHosts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDedicatedHostsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDedicatedHostsResponse struct { + Count int `json:"count"` + DedicatedHosts []*DedicatedHost `json:"dedicatedhost"` +} + +type DedicatedHost struct { + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/HypervisorService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/HypervisorService.go new file mode 100644 index 000000000..aaabfadaa --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/HypervisorService.go @@ -0,0 +1,293 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ListHypervisorsParams struct { + p map[string]interface{} +} + +func (p *ListHypervisorsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListHypervisorsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListHypervisorsParams instance, +// as then you are sure you have configured all required params +func (s *HypervisorService) NewListHypervisorsParams() *ListHypervisorsParams { + p := &ListHypervisorsParams{} + p.p = make(map[string]interface{}) + return p +} + +// List hypervisors +func (s *HypervisorService) ListHypervisors(p *ListHypervisorsParams) (*ListHypervisorsResponse, error) { + resp, err := s.cs.newRequest("listHypervisors", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListHypervisorsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListHypervisorsResponse struct { + Count int `json:"count"` + Hypervisors []*Hypervisor `json:"hypervisor"` +} + +type Hypervisor struct { + Name string `json:"name,omitempty"` +} + +type UpdateHypervisorCapabilitiesParams struct { + p map[string]interface{} +} + +func (p *UpdateHypervisorCapabilitiesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["maxguestslimit"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("maxguestslimit", vv) + } + if v, found := p.p["securitygroupenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("securitygroupenabled", vv) + } + return u +} + +func (p *UpdateHypervisorCapabilitiesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateHypervisorCapabilitiesParams) SetMaxguestslimit(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxguestslimit"] = v + return +} + +func (p *UpdateHypervisorCapabilitiesParams) SetSecuritygroupenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupenabled"] = v + return +} + +// You should always use this function to get a new UpdateHypervisorCapabilitiesParams instance, +// as then you are sure you have configured all required params +func (s *HypervisorService) NewUpdateHypervisorCapabilitiesParams() *UpdateHypervisorCapabilitiesParams { + p := &UpdateHypervisorCapabilitiesParams{} + p.p = make(map[string]interface{}) + return p +} + +// Updates a hypervisor capabilities. +func (s *HypervisorService) UpdateHypervisorCapabilities(p *UpdateHypervisorCapabilitiesParams) (*UpdateHypervisorCapabilitiesResponse, error) { + resp, err := s.cs.newRequest("updateHypervisorCapabilities", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateHypervisorCapabilitiesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateHypervisorCapabilitiesResponse struct { + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Maxdatavolumeslimit int `json:"maxdatavolumeslimit,omitempty"` + Maxguestslimit int64 `json:"maxguestslimit,omitempty"` + Maxhostspercluster int `json:"maxhostspercluster,omitempty"` + Securitygroupenabled bool `json:"securitygroupenabled,omitempty"` + Storagemotionenabled bool `json:"storagemotionenabled,omitempty"` +} + +type ListHypervisorCapabilitiesParams struct { + p map[string]interface{} +} + +func (p *ListHypervisorCapabilitiesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListHypervisorCapabilitiesParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *ListHypervisorCapabilitiesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListHypervisorCapabilitiesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListHypervisorCapabilitiesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListHypervisorCapabilitiesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListHypervisorCapabilitiesParams instance, +// as then you are sure you have configured all required params +func (s *HypervisorService) NewListHypervisorCapabilitiesParams() *ListHypervisorCapabilitiesParams { + p := &ListHypervisorCapabilitiesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *HypervisorService) GetHypervisorCapabilityByID(id string) (*HypervisorCapability, int, error) { + p := &ListHypervisorCapabilitiesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListHypervisorCapabilities(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.HypervisorCapabilities[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for HypervisorCapability UUID: %s!", id) +} + +// Lists all hypervisor capabilities. +func (s *HypervisorService) ListHypervisorCapabilities(p *ListHypervisorCapabilitiesParams) (*ListHypervisorCapabilitiesResponse, error) { + resp, err := s.cs.newRequest("listHypervisorCapabilities", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListHypervisorCapabilitiesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListHypervisorCapabilitiesResponse struct { + Count int `json:"count"` + HypervisorCapabilities []*HypervisorCapability `json:"hypervisorcapability"` +} + +type HypervisorCapability struct { + Hypervisor string `json:"hypervisor,omitempty"` + Hypervisorversion string `json:"hypervisorversion,omitempty"` + Id string `json:"id,omitempty"` + Maxdatavolumeslimit int `json:"maxdatavolumeslimit,omitempty"` + Maxguestslimit int64 `json:"maxguestslimit,omitempty"` + Maxhostspercluster int `json:"maxhostspercluster,omitempty"` + Securitygroupenabled bool `json:"securitygroupenabled,omitempty"` + Storagemotionenabled bool `json:"storagemotionenabled,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ISOService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ISOService.go new file mode 100644 index 000000000..62348a7f0 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ISOService.go @@ -0,0 +1,1928 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type AttachIsoParams struct { + p map[string]interface{} +} + +func (p *AttachIsoParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *AttachIsoParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *AttachIsoParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new AttachIsoParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewAttachIsoParams(id string, virtualmachineid string) *AttachIsoParams { + p := &AttachIsoParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Attaches an ISO to a virtual machine. +func (s *ISOService) AttachIso(p *AttachIsoParams) (*AttachIsoResponse, error) { + resp, err := s.cs.newRequest("attachIso", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AttachIsoResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AttachIsoResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DetachIsoParams struct { + p map[string]interface{} +} + +func (p *DetachIsoParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *DetachIsoParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new DetachIsoParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewDetachIsoParams(virtualmachineid string) *DetachIsoParams { + p := &DetachIsoParams{} + p.p = make(map[string]interface{}) + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Detaches any ISO file (if any) currently attached to a virtual machine. +func (s *ISOService) DetachIso(p *DetachIsoParams) (*DetachIsoResponse, error) { + resp, err := s.cs.newRequest("detachIso", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DetachIsoResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DetachIsoResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListIsosParams struct { + p map[string]interface{} +} + +func (p *ListIsosParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["bootable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("bootable", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isofilter"]; found { + u.Set("isofilter", v.(string)) + } + if v, found := p.p["ispublic"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispublic", vv) + } + if v, found := p.p["isready"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isready", vv) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["showremoved"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("showremoved", vv) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListIsosParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListIsosParams) SetBootable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bootable"] = v + return +} + +func (p *ListIsosParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListIsosParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *ListIsosParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListIsosParams) SetIsofilter(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isofilter"] = v + return +} + +func (p *ListIsosParams) SetIspublic(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispublic"] = v + return +} + +func (p *ListIsosParams) SetIsready(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isready"] = v + return +} + +func (p *ListIsosParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListIsosParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListIsosParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListIsosParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListIsosParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListIsosParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListIsosParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListIsosParams) SetShowremoved(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["showremoved"] = v + return +} + +func (p *ListIsosParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListIsosParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListIsosParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewListIsosParams() *ListIsosParams { + p := &ListIsosParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ISOService) GetIsoID(name string, isofilter string, zoneid string) (string, error) { + p := &ListIsosParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + p.p["isofilter"] = isofilter + p.p["zoneid"] = zoneid + + l, err := s.ListIsos(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListIsos(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Isos[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Isos { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ISOService) GetIsoByName(name string, isofilter string, zoneid string) (*Iso, int, error) { + id, err := s.GetIsoID(name, isofilter, zoneid) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetIsoByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ISOService) GetIsoByID(id string) (*Iso, int, error) { + p := &ListIsosParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListIsos(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListIsos(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Isos[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Iso UUID: %s!", id) +} + +// Lists all available ISO files. +func (s *ISOService) ListIsos(p *ListIsosParams) (*ListIsosResponse, error) { + resp, err := s.cs.newRequest("listIsos", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListIsosResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListIsosResponse struct { + Count int `json:"count"` + Isos []*Iso `json:"iso"` +} + +type Iso struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RegisterIsoParams struct { + p map[string]interface{} +} + +func (p *RegisterIsoParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["bootable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("bootable", vv) + } + if v, found := p.p["checksum"]; found { + u.Set("checksum", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["imagestoreuuid"]; found { + u.Set("imagestoreuuid", v.(string)) + } + if v, found := p.p["isdynamicallyscalable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdynamicallyscalable", vv) + } + if v, found := p.p["isextractable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isextractable", vv) + } + if v, found := p.p["isfeatured"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isfeatured", vv) + } + if v, found := p.p["ispublic"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispublic", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *RegisterIsoParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *RegisterIsoParams) SetBootable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bootable"] = v + return +} + +func (p *RegisterIsoParams) SetChecksum(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["checksum"] = v + return +} + +func (p *RegisterIsoParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *RegisterIsoParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *RegisterIsoParams) SetImagestoreuuid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["imagestoreuuid"] = v + return +} + +func (p *RegisterIsoParams) SetIsdynamicallyscalable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdynamicallyscalable"] = v + return +} + +func (p *RegisterIsoParams) SetIsextractable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isextractable"] = v + return +} + +func (p *RegisterIsoParams) SetIsfeatured(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isfeatured"] = v + return +} + +func (p *RegisterIsoParams) SetIspublic(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispublic"] = v + return +} + +func (p *RegisterIsoParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *RegisterIsoParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +func (p *RegisterIsoParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *RegisterIsoParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *RegisterIsoParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new RegisterIsoParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewRegisterIsoParams(displaytext string, name string, url string, zoneid string) *RegisterIsoParams { + p := &RegisterIsoParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["name"] = name + p.p["url"] = url + p.p["zoneid"] = zoneid + return p +} + +// Registers an existing ISO into the CloudStack Cloud. +func (s *ISOService) RegisterIso(p *RegisterIsoParams) (*RegisterIsoResponse, error) { + resp, err := s.cs.newRequest("registerIso", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RegisterIsoResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type RegisterIsoResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateIsoParams struct { + p map[string]interface{} +} + +func (p *UpdateIsoParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["bootable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("bootable", vv) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["format"]; found { + u.Set("format", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isdynamicallyscalable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdynamicallyscalable", vv) + } + if v, found := p.p["isrouting"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrouting", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + if v, found := p.p["passwordenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("passwordenabled", vv) + } + if v, found := p.p["sortkey"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("sortkey", vv) + } + return u +} + +func (p *UpdateIsoParams) SetBootable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bootable"] = v + return +} + +func (p *UpdateIsoParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *UpdateIsoParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateIsoParams) SetFormat(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["format"] = v + return +} + +func (p *UpdateIsoParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateIsoParams) SetIsdynamicallyscalable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdynamicallyscalable"] = v + return +} + +func (p *UpdateIsoParams) SetIsrouting(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrouting"] = v + return +} + +func (p *UpdateIsoParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateIsoParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +func (p *UpdateIsoParams) SetPasswordenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["passwordenabled"] = v + return +} + +func (p *UpdateIsoParams) SetSortkey(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sortkey"] = v + return +} + +// You should always use this function to get a new UpdateIsoParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewUpdateIsoParams(id string) *UpdateIsoParams { + p := &UpdateIsoParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates an ISO file. +func (s *ISOService) UpdateIso(p *UpdateIsoParams) (*UpdateIsoResponse, error) { + resp, err := s.cs.newRequest("updateIso", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateIsoResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateIsoResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteIsoParams struct { + p map[string]interface{} +} + +func (p *DeleteIsoParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *DeleteIsoParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DeleteIsoParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new DeleteIsoParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewDeleteIsoParams(id string) *DeleteIsoParams { + p := &DeleteIsoParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes an ISO file. +func (s *ISOService) DeleteIso(p *DeleteIsoParams) (*DeleteIsoResponse, error) { + resp, err := s.cs.newRequest("deleteIso", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteIsoResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteIsoResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type CopyIsoParams struct { + p map[string]interface{} +} + +func (p *CopyIsoParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["destzoneid"]; found { + u.Set("destzoneid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["sourcezoneid"]; found { + u.Set("sourcezoneid", v.(string)) + } + return u +} + +func (p *CopyIsoParams) SetDestzoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["destzoneid"] = v + return +} + +func (p *CopyIsoParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *CopyIsoParams) SetSourcezoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourcezoneid"] = v + return +} + +// You should always use this function to get a new CopyIsoParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewCopyIsoParams(destzoneid string, id string) *CopyIsoParams { + p := &CopyIsoParams{} + p.p = make(map[string]interface{}) + p.p["destzoneid"] = destzoneid + p.p["id"] = id + return p +} + +// Copies an iso from one zone to another. +func (s *ISOService) CopyIso(p *CopyIsoParams) (*CopyIsoResponse, error) { + resp, err := s.cs.newRequest("copyIso", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CopyIsoResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CopyIsoResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateIsoPermissionsParams struct { + p map[string]interface{} +} + +func (p *UpdateIsoPermissionsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accounts"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("accounts", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isextractable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isextractable", vv) + } + if v, found := p.p["isfeatured"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isfeatured", vv) + } + if v, found := p.p["ispublic"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispublic", vv) + } + if v, found := p.p["op"]; found { + u.Set("op", v.(string)) + } + if v, found := p.p["projectids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("projectids", vv) + } + return u +} + +func (p *UpdateIsoPermissionsParams) SetAccounts(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accounts"] = v + return +} + +func (p *UpdateIsoPermissionsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateIsoPermissionsParams) SetIsextractable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isextractable"] = v + return +} + +func (p *UpdateIsoPermissionsParams) SetIsfeatured(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isfeatured"] = v + return +} + +func (p *UpdateIsoPermissionsParams) SetIspublic(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispublic"] = v + return +} + +func (p *UpdateIsoPermissionsParams) SetOp(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["op"] = v + return +} + +func (p *UpdateIsoPermissionsParams) SetProjectids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectids"] = v + return +} + +// You should always use this function to get a new UpdateIsoPermissionsParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewUpdateIsoPermissionsParams(id string) *UpdateIsoPermissionsParams { + p := &UpdateIsoPermissionsParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates iso permissions +func (s *ISOService) UpdateIsoPermissions(p *UpdateIsoPermissionsParams) (*UpdateIsoPermissionsResponse, error) { + resp, err := s.cs.newRequest("updateIsoPermissions", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateIsoPermissionsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateIsoPermissionsResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListIsoPermissionsParams struct { + p map[string]interface{} +} + +func (p *ListIsoPermissionsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ListIsoPermissionsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ListIsoPermissionsParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewListIsoPermissionsParams(id string) *ListIsoPermissionsParams { + p := &ListIsoPermissionsParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ISOService) GetIsoPermissionByID(id string) (*IsoPermission, int, error) { + p := &ListIsoPermissionsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + p.p["id"] = id + + l, err := s.ListIsoPermissions(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.IsoPermissions[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for IsoPermission UUID: %s!", id) +} + +// List iso visibility and all accounts that have permissions to view this iso. +func (s *ISOService) ListIsoPermissions(p *ListIsoPermissionsParams) (*ListIsoPermissionsResponse, error) { + resp, err := s.cs.newRequest("listIsoPermissions", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListIsoPermissionsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListIsoPermissionsResponse struct { + Count int `json:"count"` + IsoPermissions []*IsoPermission `json:"isopermission"` +} + +type IsoPermission struct { + Account []string `json:"account,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Projectids []string `json:"projectids,omitempty"` +} + +type ExtractIsoParams struct { + p map[string]interface{} +} + +func (p *ExtractIsoParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["mode"]; found { + u.Set("mode", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ExtractIsoParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ExtractIsoParams) SetMode(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["mode"] = v + return +} + +func (p *ExtractIsoParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *ExtractIsoParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ExtractIsoParams instance, +// as then you are sure you have configured all required params +func (s *ISOService) NewExtractIsoParams(id string, mode string) *ExtractIsoParams { + p := &ExtractIsoParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["mode"] = mode + return p +} + +// Extracts an ISO +func (s *ISOService) ExtractIso(p *ExtractIsoParams) (*ExtractIsoResponse, error) { + resp, err := s.cs.newRequest("extractIso", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ExtractIsoResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ExtractIsoResponse struct { + JobID string `json:"jobid,omitempty"` + Accountid string `json:"accountid,omitempty"` + Created string `json:"created,omitempty"` + ExtractId string `json:"extractId,omitempty"` + ExtractMode string `json:"extractMode,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Resultstring string `json:"resultstring,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Uploadpercentage int `json:"uploadpercentage,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ImageStoreService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ImageStoreService.go new file mode 100644 index 000000000..83194d820 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ImageStoreService.go @@ -0,0 +1,861 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type AddImageStoreParams struct { + p map[string]interface{} +} + +func (p *AddImageStoreParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["provider"]; found { + u.Set("provider", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddImageStoreParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *AddImageStoreParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *AddImageStoreParams) SetProvider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["provider"] = v + return +} + +func (p *AddImageStoreParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddImageStoreParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddImageStoreParams instance, +// as then you are sure you have configured all required params +func (s *ImageStoreService) NewAddImageStoreParams(provider string) *AddImageStoreParams { + p := &AddImageStoreParams{} + p.p = make(map[string]interface{}) + p.p["provider"] = provider + return p +} + +// Adds backup image store. +func (s *ImageStoreService) AddImageStore(p *AddImageStoreParams) (*AddImageStoreResponse, error) { + resp, err := s.cs.newRequest("addImageStore", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddImageStoreResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddImageStoreResponse struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListImageStoresParams struct { + p map[string]interface{} +} + +func (p *ListImageStoresParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["provider"]; found { + u.Set("provider", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListImageStoresParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListImageStoresParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListImageStoresParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListImageStoresParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListImageStoresParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListImageStoresParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *ListImageStoresParams) SetProvider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["provider"] = v + return +} + +func (p *ListImageStoresParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListImageStoresParams instance, +// as then you are sure you have configured all required params +func (s *ImageStoreService) NewListImageStoresParams() *ListImageStoresParams { + p := &ListImageStoresParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ImageStoreService) GetImageStoreID(name string) (string, error) { + p := &ListImageStoresParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListImageStores(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.ImageStores[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.ImageStores { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ImageStoreService) GetImageStoreByName(name string) (*ImageStore, int, error) { + id, err := s.GetImageStoreID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetImageStoreByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ImageStoreService) GetImageStoreByID(id string) (*ImageStore, int, error) { + p := &ListImageStoresParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListImageStores(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.ImageStores[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for ImageStore UUID: %s!", id) +} + +// Lists image stores. +func (s *ImageStoreService) ListImageStores(p *ListImageStoresParams) (*ListImageStoresResponse, error) { + resp, err := s.cs.newRequest("listImageStores", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListImageStoresResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListImageStoresResponse struct { + Count int `json:"count"` + ImageStores []*ImageStore `json:"imagestore"` +} + +type ImageStore struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteImageStoreParams struct { + p map[string]interface{} +} + +func (p *DeleteImageStoreParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteImageStoreParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteImageStoreParams instance, +// as then you are sure you have configured all required params +func (s *ImageStoreService) NewDeleteImageStoreParams(id string) *DeleteImageStoreParams { + p := &DeleteImageStoreParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes an image store . +func (s *ImageStoreService) DeleteImageStore(p *DeleteImageStoreParams) (*DeleteImageStoreResponse, error) { + resp, err := s.cs.newRequest("deleteImageStore", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteImageStoreResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteImageStoreResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type CreateSecondaryStagingStoreParams struct { + p map[string]interface{} +} + +func (p *CreateSecondaryStagingStoreParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["provider"]; found { + u.Set("provider", v.(string)) + } + if v, found := p.p["scope"]; found { + u.Set("scope", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateSecondaryStagingStoreParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *CreateSecondaryStagingStoreParams) SetProvider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["provider"] = v + return +} + +func (p *CreateSecondaryStagingStoreParams) SetScope(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scope"] = v + return +} + +func (p *CreateSecondaryStagingStoreParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *CreateSecondaryStagingStoreParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateSecondaryStagingStoreParams instance, +// as then you are sure you have configured all required params +func (s *ImageStoreService) NewCreateSecondaryStagingStoreParams(url string) *CreateSecondaryStagingStoreParams { + p := &CreateSecondaryStagingStoreParams{} + p.p = make(map[string]interface{}) + p.p["url"] = url + return p +} + +// create secondary staging store. +func (s *ImageStoreService) CreateSecondaryStagingStore(p *CreateSecondaryStagingStoreParams) (*CreateSecondaryStagingStoreResponse, error) { + resp, err := s.cs.newRequest("createSecondaryStagingStore", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateSecondaryStagingStoreResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateSecondaryStagingStoreResponse struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListSecondaryStagingStoresParams struct { + p map[string]interface{} +} + +func (p *ListSecondaryStagingStoresParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["provider"]; found { + u.Set("provider", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListSecondaryStagingStoresParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListSecondaryStagingStoresParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSecondaryStagingStoresParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListSecondaryStagingStoresParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSecondaryStagingStoresParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListSecondaryStagingStoresParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *ListSecondaryStagingStoresParams) SetProvider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["provider"] = v + return +} + +func (p *ListSecondaryStagingStoresParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListSecondaryStagingStoresParams instance, +// as then you are sure you have configured all required params +func (s *ImageStoreService) NewListSecondaryStagingStoresParams() *ListSecondaryStagingStoresParams { + p := &ListSecondaryStagingStoresParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ImageStoreService) GetSecondaryStagingStoreID(name string) (string, error) { + p := &ListSecondaryStagingStoresParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListSecondaryStagingStores(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.SecondaryStagingStores[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.SecondaryStagingStores { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ImageStoreService) GetSecondaryStagingStoreByName(name string) (*SecondaryStagingStore, int, error) { + id, err := s.GetSecondaryStagingStoreID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetSecondaryStagingStoreByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ImageStoreService) GetSecondaryStagingStoreByID(id string) (*SecondaryStagingStore, int, error) { + p := &ListSecondaryStagingStoresParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListSecondaryStagingStores(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.SecondaryStagingStores[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for SecondaryStagingStore UUID: %s!", id) +} + +// Lists secondary staging stores. +func (s *ImageStoreService) ListSecondaryStagingStores(p *ListSecondaryStagingStoresParams) (*ListSecondaryStagingStoresResponse, error) { + resp, err := s.cs.newRequest("listSecondaryStagingStores", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSecondaryStagingStoresResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSecondaryStagingStoresResponse struct { + Count int `json:"count"` + SecondaryStagingStores []*SecondaryStagingStore `json:"secondarystagingstore"` +} + +type SecondaryStagingStore struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteSecondaryStagingStoreParams struct { + p map[string]interface{} +} + +func (p *DeleteSecondaryStagingStoreParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteSecondaryStagingStoreParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteSecondaryStagingStoreParams instance, +// as then you are sure you have configured all required params +func (s *ImageStoreService) NewDeleteSecondaryStagingStoreParams(id string) *DeleteSecondaryStagingStoreParams { + p := &DeleteSecondaryStagingStoreParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a secondary staging store . +func (s *ImageStoreService) DeleteSecondaryStagingStore(p *DeleteSecondaryStagingStoreParams) (*DeleteSecondaryStagingStoreResponse, error) { + resp, err := s.cs.newRequest("deleteSecondaryStagingStore", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteSecondaryStagingStoreResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteSecondaryStagingStoreResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type UpdateCloudToUseObjectStoreParams struct { + p map[string]interface{} +} + +func (p *UpdateCloudToUseObjectStoreParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["provider"]; found { + u.Set("provider", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + return u +} + +func (p *UpdateCloudToUseObjectStoreParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *UpdateCloudToUseObjectStoreParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateCloudToUseObjectStoreParams) SetProvider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["provider"] = v + return +} + +func (p *UpdateCloudToUseObjectStoreParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +// You should always use this function to get a new UpdateCloudToUseObjectStoreParams instance, +// as then you are sure you have configured all required params +func (s *ImageStoreService) NewUpdateCloudToUseObjectStoreParams(provider string) *UpdateCloudToUseObjectStoreParams { + p := &UpdateCloudToUseObjectStoreParams{} + p.p = make(map[string]interface{}) + p.p["provider"] = provider + return p +} + +// Migrate current NFS secondary storages to use object store. +func (s *ImageStoreService) UpdateCloudToUseObjectStore(p *UpdateCloudToUseObjectStoreParams) (*UpdateCloudToUseObjectStoreResponse, error) { + resp, err := s.cs.newRequest("updateCloudToUseObjectStore", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateCloudToUseObjectStoreResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateCloudToUseObjectStoreResponse struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/InternalLBService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/InternalLBService.go new file mode 100644 index 000000000..b4fceda57 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/InternalLBService.go @@ -0,0 +1,1002 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ConfigureInternalLoadBalancerElementParams struct { + p map[string]interface{} +} + +func (p *ConfigureInternalLoadBalancerElementParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("enabled", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ConfigureInternalLoadBalancerElementParams) SetEnabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enabled"] = v + return +} + +func (p *ConfigureInternalLoadBalancerElementParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ConfigureInternalLoadBalancerElementParams instance, +// as then you are sure you have configured all required params +func (s *InternalLBService) NewConfigureInternalLoadBalancerElementParams(enabled bool, id string) *ConfigureInternalLoadBalancerElementParams { + p := &ConfigureInternalLoadBalancerElementParams{} + p.p = make(map[string]interface{}) + p.p["enabled"] = enabled + p.p["id"] = id + return p +} + +// Configures an Internal Load Balancer element. +func (s *InternalLBService) ConfigureInternalLoadBalancerElement(p *ConfigureInternalLoadBalancerElementParams) (*ConfigureInternalLoadBalancerElementResponse, error) { + resp, err := s.cs.newRequest("configureInternalLoadBalancerElement", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ConfigureInternalLoadBalancerElementResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ConfigureInternalLoadBalancerElementResponse struct { + JobID string `json:"jobid,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` +} + +type CreateInternalLoadBalancerElementParams struct { + p map[string]interface{} +} + +func (p *CreateInternalLoadBalancerElementParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["nspid"]; found { + u.Set("nspid", v.(string)) + } + return u +} + +func (p *CreateInternalLoadBalancerElementParams) SetNspid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nspid"] = v + return +} + +// You should always use this function to get a new CreateInternalLoadBalancerElementParams instance, +// as then you are sure you have configured all required params +func (s *InternalLBService) NewCreateInternalLoadBalancerElementParams(nspid string) *CreateInternalLoadBalancerElementParams { + p := &CreateInternalLoadBalancerElementParams{} + p.p = make(map[string]interface{}) + p.p["nspid"] = nspid + return p +} + +// Create an Internal Load Balancer element. +func (s *InternalLBService) CreateInternalLoadBalancerElement(p *CreateInternalLoadBalancerElementParams) (*CreateInternalLoadBalancerElementResponse, error) { + resp, err := s.cs.newRequest("createInternalLoadBalancerElement", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateInternalLoadBalancerElementResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateInternalLoadBalancerElementResponse struct { + JobID string `json:"jobid,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` +} + +type ListInternalLoadBalancerElementsParams struct { + p map[string]interface{} +} + +func (p *ListInternalLoadBalancerElementsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("enabled", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["nspid"]; found { + u.Set("nspid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListInternalLoadBalancerElementsParams) SetEnabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enabled"] = v + return +} + +func (p *ListInternalLoadBalancerElementsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListInternalLoadBalancerElementsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListInternalLoadBalancerElementsParams) SetNspid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nspid"] = v + return +} + +func (p *ListInternalLoadBalancerElementsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListInternalLoadBalancerElementsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListInternalLoadBalancerElementsParams instance, +// as then you are sure you have configured all required params +func (s *InternalLBService) NewListInternalLoadBalancerElementsParams() *ListInternalLoadBalancerElementsParams { + p := &ListInternalLoadBalancerElementsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *InternalLBService) GetInternalLoadBalancerElementByID(id string) (*InternalLoadBalancerElement, int, error) { + p := &ListInternalLoadBalancerElementsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListInternalLoadBalancerElements(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.InternalLoadBalancerElements[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for InternalLoadBalancerElement UUID: %s!", id) +} + +// Lists all available Internal Load Balancer elements. +func (s *InternalLBService) ListInternalLoadBalancerElements(p *ListInternalLoadBalancerElementsParams) (*ListInternalLoadBalancerElementsResponse, error) { + resp, err := s.cs.newRequest("listInternalLoadBalancerElements", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListInternalLoadBalancerElementsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListInternalLoadBalancerElementsResponse struct { + Count int `json:"count"` + InternalLoadBalancerElements []*InternalLoadBalancerElement `json:"internalloadbalancerelement"` +} + +type InternalLoadBalancerElement struct { + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` +} + +type StopInternalLoadBalancerVMParams struct { + p map[string]interface{} +} + +func (p *StopInternalLoadBalancerVMParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["forced"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forced", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StopInternalLoadBalancerVMParams) SetForced(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forced"] = v + return +} + +func (p *StopInternalLoadBalancerVMParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StopInternalLoadBalancerVMParams instance, +// as then you are sure you have configured all required params +func (s *InternalLBService) NewStopInternalLoadBalancerVMParams(id string) *StopInternalLoadBalancerVMParams { + p := &StopInternalLoadBalancerVMParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Stops an Internal LB vm. +func (s *InternalLBService) StopInternalLoadBalancerVM(p *StopInternalLoadBalancerVMParams) (*StopInternalLoadBalancerVMResponse, error) { + resp, err := s.cs.newRequest("stopInternalLoadBalancerVM", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StopInternalLoadBalancerVMResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StopInternalLoadBalancerVMResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type StartInternalLoadBalancerVMParams struct { + p map[string]interface{} +} + +func (p *StartInternalLoadBalancerVMParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StartInternalLoadBalancerVMParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StartInternalLoadBalancerVMParams instance, +// as then you are sure you have configured all required params +func (s *InternalLBService) NewStartInternalLoadBalancerVMParams(id string) *StartInternalLoadBalancerVMParams { + p := &StartInternalLoadBalancerVMParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Starts an existing internal lb vm. +func (s *InternalLBService) StartInternalLoadBalancerVM(p *StartInternalLoadBalancerVMParams) (*StartInternalLoadBalancerVMResponse, error) { + resp, err := s.cs.newRequest("startInternalLoadBalancerVM", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StartInternalLoadBalancerVMResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StartInternalLoadBalancerVMResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListInternalLoadBalancerVMsParams struct { + p map[string]interface{} +} + +func (p *ListInternalLoadBalancerVMsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["forvpc"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvpc", vv) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListInternalLoadBalancerVMsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetForvpc(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvpc"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +func (p *ListInternalLoadBalancerVMsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListInternalLoadBalancerVMsParams instance, +// as then you are sure you have configured all required params +func (s *InternalLBService) NewListInternalLoadBalancerVMsParams() *ListInternalLoadBalancerVMsParams { + p := &ListInternalLoadBalancerVMsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *InternalLBService) GetInternalLoadBalancerVMID(name string) (string, error) { + p := &ListInternalLoadBalancerVMsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListInternalLoadBalancerVMs(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListInternalLoadBalancerVMs(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.InternalLoadBalancerVMs[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.InternalLoadBalancerVMs { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *InternalLBService) GetInternalLoadBalancerVMByName(name string) (*InternalLoadBalancerVM, int, error) { + id, err := s.GetInternalLoadBalancerVMID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetInternalLoadBalancerVMByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *InternalLBService) GetInternalLoadBalancerVMByID(id string) (*InternalLoadBalancerVM, int, error) { + p := &ListInternalLoadBalancerVMsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListInternalLoadBalancerVMs(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListInternalLoadBalancerVMs(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.InternalLoadBalancerVMs[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for InternalLoadBalancerVM UUID: %s!", id) +} + +// List internal LB VMs. +func (s *InternalLBService) ListInternalLoadBalancerVMs(p *ListInternalLoadBalancerVMsParams) (*ListInternalLoadBalancerVMsResponse, error) { + resp, err := s.cs.newRequest("listInternalLoadBalancerVMs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListInternalLoadBalancerVMsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListInternalLoadBalancerVMsResponse struct { + Count int `json:"count"` + InternalLoadBalancerVMs []*InternalLoadBalancerVM `json:"internalloadbalancervm"` +} + +type InternalLoadBalancerVM struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/LDAPService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LDAPService.go new file mode 100644 index 000000000..f7890854a --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LDAPService.go @@ -0,0 +1,239 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +type LdapCreateAccountParams struct { + p map[string]interface{} +} + +func (p *LdapCreateAccountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["accountdetails"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("accountdetails[%d].key", i), k) + u.Set(fmt.Sprintf("accountdetails[%d].value", i), vv) + i++ + } + } + if v, found := p.p["accountid"]; found { + u.Set("accountid", v.(string)) + } + if v, found := p.p["accounttype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("accounttype", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + if v, found := p.p["timezone"]; found { + u.Set("timezone", v.(string)) + } + if v, found := p.p["userid"]; found { + u.Set("userid", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *LdapCreateAccountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *LdapCreateAccountParams) SetAccountdetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountdetails"] = v + return +} + +func (p *LdapCreateAccountParams) SetAccountid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountid"] = v + return +} + +func (p *LdapCreateAccountParams) SetAccounttype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accounttype"] = v + return +} + +func (p *LdapCreateAccountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *LdapCreateAccountParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +func (p *LdapCreateAccountParams) SetTimezone(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["timezone"] = v + return +} + +func (p *LdapCreateAccountParams) SetUserid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userid"] = v + return +} + +func (p *LdapCreateAccountParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new LdapCreateAccountParams instance, +// as then you are sure you have configured all required params +func (s *LDAPService) NewLdapCreateAccountParams(accounttype int, username string) *LdapCreateAccountParams { + p := &LdapCreateAccountParams{} + p.p = make(map[string]interface{}) + p.p["accounttype"] = accounttype + p.p["username"] = username + return p +} + +// Creates an account from an LDAP user +func (s *LDAPService) LdapCreateAccount(p *LdapCreateAccountParams) (*LdapCreateAccountResponse, error) { + resp, err := s.cs.newRequest("ldapCreateAccount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r LdapCreateAccountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type LdapCreateAccountResponse struct { + Accountdetails map[string]string `json:"accountdetails,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Defaultzoneid string `json:"defaultzoneid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Groups []string `json:"groups,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Iscleanuprequired bool `json:"iscleanuprequired,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Projectavailable string `json:"projectavailable,omitempty"` + Projectlimit string `json:"projectlimit,omitempty"` + Projecttotal int64 `json:"projecttotal,omitempty"` + Receivedbytes int64 `json:"receivedbytes,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Sentbytes int64 `json:"sentbytes,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + User []struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` + } `json:"user,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/LimitService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LimitService.go new file mode 100644 index 000000000..6aa7e7292 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LimitService.go @@ -0,0 +1,475 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type UpdateResourceLimitParams struct { + p map[string]interface{} +} + +func (p *UpdateResourceLimitParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["max"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("max", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["resourcetype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("resourcetype", vv) + } + return u +} + +func (p *UpdateResourceLimitParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UpdateResourceLimitParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *UpdateResourceLimitParams) SetMax(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["max"] = v + return +} + +func (p *UpdateResourceLimitParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *UpdateResourceLimitParams) SetResourcetype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +// You should always use this function to get a new UpdateResourceLimitParams instance, +// as then you are sure you have configured all required params +func (s *LimitService) NewUpdateResourceLimitParams(resourcetype int) *UpdateResourceLimitParams { + p := &UpdateResourceLimitParams{} + p.p = make(map[string]interface{}) + p.p["resourcetype"] = resourcetype + return p +} + +// Updates resource limits for an account or domain. +func (s *LimitService) UpdateResourceLimit(p *UpdateResourceLimitParams) (*UpdateResourceLimitResponse, error) { + resp, err := s.cs.newRequest("updateResourceLimit", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateResourceLimitResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateResourceLimitResponse struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Max int64 `json:"max,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` +} + +type UpdateResourceCountParams struct { + p map[string]interface{} +} + +func (p *UpdateResourceCountParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["resourcetype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("resourcetype", vv) + } + return u +} + +func (p *UpdateResourceCountParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UpdateResourceCountParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *UpdateResourceCountParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *UpdateResourceCountParams) SetResourcetype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +// You should always use this function to get a new UpdateResourceCountParams instance, +// as then you are sure you have configured all required params +func (s *LimitService) NewUpdateResourceCountParams(domainid string) *UpdateResourceCountParams { + p := &UpdateResourceCountParams{} + p.p = make(map[string]interface{}) + p.p["domainid"] = domainid + return p +} + +// Recalculate and update resource count for an account or domain. +func (s *LimitService) UpdateResourceCount(p *UpdateResourceCountParams) (*UpdateResourceCountResponse, error) { + resp, err := s.cs.newRequest("updateResourceCount", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateResourceCountResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateResourceCountResponse struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourcecount int64 `json:"resourcecount,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` +} + +type ListResourceLimitsParams struct { + p map[string]interface{} +} + +func (p *ListResourceLimitsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("id", vv) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["resourcetype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("resourcetype", vv) + } + return u +} + +func (p *ListResourceLimitsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListResourceLimitsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListResourceLimitsParams) SetId(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListResourceLimitsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListResourceLimitsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListResourceLimitsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListResourceLimitsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListResourceLimitsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListResourceLimitsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListResourceLimitsParams) SetResourcetype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +// You should always use this function to get a new ListResourceLimitsParams instance, +// as then you are sure you have configured all required params +func (s *LimitService) NewListResourceLimitsParams() *ListResourceLimitsParams { + p := &ListResourceLimitsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists resource limits. +func (s *LimitService) ListResourceLimits(p *ListResourceLimitsParams) (*ListResourceLimitsResponse, error) { + resp, err := s.cs.newRequest("listResourceLimits", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListResourceLimitsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListResourceLimitsResponse struct { + Count int `json:"count"` + ResourceLimits []*ResourceLimit `json:"resourcelimit"` +} + +type ResourceLimit struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Max int64 `json:"max,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` +} + +type GetApiLimitParams struct { + p map[string]interface{} +} + +func (p *GetApiLimitParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + return u +} + +// You should always use this function to get a new GetApiLimitParams instance, +// as then you are sure you have configured all required params +func (s *LimitService) NewGetApiLimitParams() *GetApiLimitParams { + p := &GetApiLimitParams{} + p.p = make(map[string]interface{}) + return p +} + +// Get API limit count for the caller +func (s *LimitService) GetApiLimit(p *GetApiLimitParams) (*GetApiLimitResponse, error) { + resp, err := s.cs.newRequest("getApiLimit", p.toURLValues()) + if err != nil { + return nil, err + } + + var r GetApiLimitResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type GetApiLimitResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + ApiAllowed int `json:"apiAllowed,omitempty"` + ApiIssued int `json:"apiIssued,omitempty"` + ExpireAfter int64 `json:"expireAfter,omitempty"` +} + +type ResetApiLimitParams struct { + p map[string]interface{} +} + +func (p *ResetApiLimitParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + return u +} + +func (p *ResetApiLimitParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +// You should always use this function to get a new ResetApiLimitParams instance, +// as then you are sure you have configured all required params +func (s *LimitService) NewResetApiLimitParams() *ResetApiLimitParams { + p := &ResetApiLimitParams{} + p.p = make(map[string]interface{}) + return p +} + +// Reset api count +func (s *LimitService) ResetApiLimit(p *ResetApiLimitParams) (*ResetApiLimitResponse, error) { + resp, err := s.cs.newRequest("resetApiLimit", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ResetApiLimitResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ResetApiLimitResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + ApiAllowed int `json:"apiAllowed,omitempty"` + ApiIssued int `json:"apiIssued,omitempty"` + ExpireAfter int64 `json:"expireAfter,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/LoadBalancerService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LoadBalancerService.go new file mode 100644 index 000000000..03d1ec4ec --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LoadBalancerService.go @@ -0,0 +1,4766 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *CreateLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["algorithm"]; found { + u.Set("algorithm", v.(string)) + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["openfirewall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("openfirewall", vv) + } + if v, found := p.p["privateport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("privateport", vv) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["publicipid"]; found { + u.Set("publicipid", v.(string)) + } + if v, found := p.p["publicport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("publicport", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateLoadBalancerRuleParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetAlgorithm(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["algorithm"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetOpenfirewall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["openfirewall"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetPrivateport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["privateport"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetPublicipid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicipid"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetPublicport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicport"] = v + return +} + +func (p *CreateLoadBalancerRuleParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewCreateLoadBalancerRuleParams(algorithm string, name string, privateport int, publicport int) *CreateLoadBalancerRuleParams { + p := &CreateLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["algorithm"] = algorithm + p.p["name"] = name + p.p["privateport"] = privateport + p.p["publicport"] = publicport + return p +} + +// Creates a load balancer rule +func (s *LoadBalancerService) CreateLoadBalancerRule(p *CreateLoadBalancerRuleParams) (*CreateLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("createLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateport string `json:"privateport,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *DeleteLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewDeleteLoadBalancerRuleParams(id string) *DeleteLoadBalancerRuleParams { + p := &DeleteLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a load balancer rule. +func (s *LoadBalancerService) DeleteLoadBalancerRule(p *DeleteLoadBalancerRuleParams) (*DeleteLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("deleteLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type RemoveFromLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *RemoveFromLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["virtualmachineids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("virtualmachineids", vv) + } + if v, found := p.p["vmidipmap"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("vmidipmap[%d].key", i), k) + u.Set(fmt.Sprintf("vmidipmap[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *RemoveFromLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *RemoveFromLoadBalancerRuleParams) SetVirtualmachineids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineids"] = v + return +} + +func (p *RemoveFromLoadBalancerRuleParams) SetVmidipmap(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmidipmap"] = v + return +} + +// You should always use this function to get a new RemoveFromLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewRemoveFromLoadBalancerRuleParams(id string) *RemoveFromLoadBalancerRuleParams { + p := &RemoveFromLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Removes a virtual machine or a list of virtual machines from a load balancer rule. +func (s *LoadBalancerService) RemoveFromLoadBalancerRule(p *RemoveFromLoadBalancerRuleParams) (*RemoveFromLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("removeFromLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveFromLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveFromLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type AssignToLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *AssignToLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["virtualmachineids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("virtualmachineids", vv) + } + if v, found := p.p["vmidipmap"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("vmidipmap[%d].key", i), k) + u.Set(fmt.Sprintf("vmidipmap[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *AssignToLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *AssignToLoadBalancerRuleParams) SetVirtualmachineids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineids"] = v + return +} + +func (p *AssignToLoadBalancerRuleParams) SetVmidipmap(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmidipmap"] = v + return +} + +// You should always use this function to get a new AssignToLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewAssignToLoadBalancerRuleParams(id string) *AssignToLoadBalancerRuleParams { + p := &AssignToLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Assigns virtual machine or a list of virtual machines to a load balancer rule. +func (s *LoadBalancerService) AssignToLoadBalancerRule(p *AssignToLoadBalancerRuleParams) (*AssignToLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("assignToLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AssignToLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AssignToLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type CreateLBStickinessPolicyParams struct { + p map[string]interface{} +} + +func (p *CreateLBStickinessPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + if v, found := p.p["methodname"]; found { + u.Set("methodname", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["param"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("param[%d].key", i), k) + u.Set(fmt.Sprintf("param[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *CreateLBStickinessPolicyParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateLBStickinessPolicyParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateLBStickinessPolicyParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +func (p *CreateLBStickinessPolicyParams) SetMethodname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["methodname"] = v + return +} + +func (p *CreateLBStickinessPolicyParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateLBStickinessPolicyParams) SetParam(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["param"] = v + return +} + +// You should always use this function to get a new CreateLBStickinessPolicyParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewCreateLBStickinessPolicyParams(lbruleid string, methodname string, name string) *CreateLBStickinessPolicyParams { + p := &CreateLBStickinessPolicyParams{} + p.p = make(map[string]interface{}) + p.p["lbruleid"] = lbruleid + p.p["methodname"] = methodname + p.p["name"] = name + return p +} + +// Creates a Load Balancer stickiness policy +func (s *LoadBalancerService) CreateLBStickinessPolicy(p *CreateLBStickinessPolicyParams) (*CreateLBStickinessPolicyResponse, error) { + resp, err := s.cs.newRequest("createLBStickinessPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateLBStickinessPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateLBStickinessPolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + Stickinesspolicy []struct { + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Methodname string `json:"methodname,omitempty"` + Name string `json:"name,omitempty"` + Params map[string]string `json:"params,omitempty"` + State string `json:"state,omitempty"` + } `json:"stickinesspolicy,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type UpdateLBStickinessPolicyParams struct { + p map[string]interface{} +} + +func (p *UpdateLBStickinessPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateLBStickinessPolicyParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateLBStickinessPolicyParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateLBStickinessPolicyParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateLBStickinessPolicyParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewUpdateLBStickinessPolicyParams(id string) *UpdateLBStickinessPolicyParams { + p := &UpdateLBStickinessPolicyParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates LB Stickiness policy +func (s *LoadBalancerService) UpdateLBStickinessPolicy(p *UpdateLBStickinessPolicyParams) (*UpdateLBStickinessPolicyResponse, error) { + resp, err := s.cs.newRequest("updateLBStickinessPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateLBStickinessPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateLBStickinessPolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + Stickinesspolicy []struct { + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Methodname string `json:"methodname,omitempty"` + Name string `json:"name,omitempty"` + Params map[string]string `json:"params,omitempty"` + State string `json:"state,omitempty"` + } `json:"stickinesspolicy,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteLBStickinessPolicyParams struct { + p map[string]interface{} +} + +func (p *DeleteLBStickinessPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteLBStickinessPolicyParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteLBStickinessPolicyParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewDeleteLBStickinessPolicyParams(id string) *DeleteLBStickinessPolicyParams { + p := &DeleteLBStickinessPolicyParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a LB stickiness policy. +func (s *LoadBalancerService) DeleteLBStickinessPolicy(p *DeleteLBStickinessPolicyParams) (*DeleteLBStickinessPolicyResponse, error) { + resp, err := s.cs.newRequest("deleteLBStickinessPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteLBStickinessPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteLBStickinessPolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListLoadBalancerRulesParams struct { + p map[string]interface{} +} + +func (p *ListLoadBalancerRulesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["publicipid"]; found { + u.Set("publicipid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListLoadBalancerRulesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetPublicipid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicipid"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *ListLoadBalancerRulesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListLoadBalancerRulesParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListLoadBalancerRulesParams() *ListLoadBalancerRulesParams { + p := &ListLoadBalancerRulesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLoadBalancerRuleID(name string) (string, error) { + p := &ListLoadBalancerRulesParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListLoadBalancerRules(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListLoadBalancerRules(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.LoadBalancerRules[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.LoadBalancerRules { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLoadBalancerRuleByName(name string) (*LoadBalancerRule, int, error) { + id, err := s.GetLoadBalancerRuleID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetLoadBalancerRuleByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLoadBalancerRuleByID(id string) (*LoadBalancerRule, int, error) { + p := &ListLoadBalancerRulesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListLoadBalancerRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListLoadBalancerRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.LoadBalancerRules[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for LoadBalancerRule UUID: %s!", id) +} + +// Lists load balancer rules. +func (s *LoadBalancerService) ListLoadBalancerRules(p *ListLoadBalancerRulesParams) (*ListLoadBalancerRulesResponse, error) { + resp, err := s.cs.newRequest("listLoadBalancerRules", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListLoadBalancerRulesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListLoadBalancerRulesResponse struct { + Count int `json:"count"` + LoadBalancerRules []*LoadBalancerRule `json:"loadbalancerrule"` +} + +type LoadBalancerRule struct { + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateport string `json:"privateport,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListLBStickinessPoliciesParams struct { + p map[string]interface{} +} + +func (p *ListLBStickinessPoliciesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListLBStickinessPoliciesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListLBStickinessPoliciesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListLBStickinessPoliciesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListLBStickinessPoliciesParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +func (p *ListLBStickinessPoliciesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListLBStickinessPoliciesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListLBStickinessPoliciesParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListLBStickinessPoliciesParams() *ListLBStickinessPoliciesParams { + p := &ListLBStickinessPoliciesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLBStickinessPolicyByID(id string) (*LBStickinessPolicy, int, error) { + p := &ListLBStickinessPoliciesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListLBStickinessPolicies(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.LBStickinessPolicies[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for LBStickinessPolicy UUID: %s!", id) +} + +// Lists LBStickiness policies. +func (s *LoadBalancerService) ListLBStickinessPolicies(p *ListLBStickinessPoliciesParams) (*ListLBStickinessPoliciesResponse, error) { + resp, err := s.cs.newRequest("listLBStickinessPolicies", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListLBStickinessPoliciesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListLBStickinessPoliciesResponse struct { + Count int `json:"count"` + LBStickinessPolicies []*LBStickinessPolicy `json:"lbstickinesspolicy"` +} + +type LBStickinessPolicy struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + Stickinesspolicy []struct { + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Methodname string `json:"methodname,omitempty"` + Name string `json:"name,omitempty"` + Params map[string]string `json:"params,omitempty"` + State string `json:"state,omitempty"` + } `json:"stickinesspolicy,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListLBHealthCheckPoliciesParams struct { + p map[string]interface{} +} + +func (p *ListLBHealthCheckPoliciesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListLBHealthCheckPoliciesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListLBHealthCheckPoliciesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListLBHealthCheckPoliciesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListLBHealthCheckPoliciesParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +func (p *ListLBHealthCheckPoliciesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListLBHealthCheckPoliciesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListLBHealthCheckPoliciesParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListLBHealthCheckPoliciesParams() *ListLBHealthCheckPoliciesParams { + p := &ListLBHealthCheckPoliciesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLBHealthCheckPolicyByID(id string) (*LBHealthCheckPolicy, int, error) { + p := &ListLBHealthCheckPoliciesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListLBHealthCheckPolicies(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.LBHealthCheckPolicies[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for LBHealthCheckPolicy UUID: %s!", id) +} + +// Lists load balancer HealthCheck policies. +func (s *LoadBalancerService) ListLBHealthCheckPolicies(p *ListLBHealthCheckPoliciesParams) (*ListLBHealthCheckPoliciesResponse, error) { + resp, err := s.cs.newRequest("listLBHealthCheckPolicies", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListLBHealthCheckPoliciesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListLBHealthCheckPoliciesResponse struct { + Count int `json:"count"` + LBHealthCheckPolicies []*LBHealthCheckPolicy `json:"lbhealthcheckpolicy"` +} + +type LBHealthCheckPolicy struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Healthcheckpolicy []struct { + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Healthcheckinterval int `json:"healthcheckinterval,omitempty"` + Healthcheckthresshold int `json:"healthcheckthresshold,omitempty"` + Id string `json:"id,omitempty"` + Pingpath string `json:"pingpath,omitempty"` + Responsetime int `json:"responsetime,omitempty"` + State string `json:"state,omitempty"` + Unhealthcheckthresshold int `json:"unhealthcheckthresshold,omitempty"` + } `json:"healthcheckpolicy,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type CreateLBHealthCheckPolicyParams struct { + p map[string]interface{} +} + +func (p *CreateLBHealthCheckPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["healthythreshold"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("healthythreshold", vv) + } + if v, found := p.p["intervaltime"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("intervaltime", vv) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + if v, found := p.p["pingpath"]; found { + u.Set("pingpath", v.(string)) + } + if v, found := p.p["responsetimeout"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("responsetimeout", vv) + } + if v, found := p.p["unhealthythreshold"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("unhealthythreshold", vv) + } + return u +} + +func (p *CreateLBHealthCheckPolicyParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateLBHealthCheckPolicyParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateLBHealthCheckPolicyParams) SetHealthythreshold(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["healthythreshold"] = v + return +} + +func (p *CreateLBHealthCheckPolicyParams) SetIntervaltime(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["intervaltime"] = v + return +} + +func (p *CreateLBHealthCheckPolicyParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +func (p *CreateLBHealthCheckPolicyParams) SetPingpath(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pingpath"] = v + return +} + +func (p *CreateLBHealthCheckPolicyParams) SetResponsetimeout(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["responsetimeout"] = v + return +} + +func (p *CreateLBHealthCheckPolicyParams) SetUnhealthythreshold(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["unhealthythreshold"] = v + return +} + +// You should always use this function to get a new CreateLBHealthCheckPolicyParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewCreateLBHealthCheckPolicyParams(lbruleid string) *CreateLBHealthCheckPolicyParams { + p := &CreateLBHealthCheckPolicyParams{} + p.p = make(map[string]interface{}) + p.p["lbruleid"] = lbruleid + return p +} + +// Creates a Load Balancer healthcheck policy +func (s *LoadBalancerService) CreateLBHealthCheckPolicy(p *CreateLBHealthCheckPolicyParams) (*CreateLBHealthCheckPolicyResponse, error) { + resp, err := s.cs.newRequest("createLBHealthCheckPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateLBHealthCheckPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateLBHealthCheckPolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Healthcheckpolicy []struct { + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Healthcheckinterval int `json:"healthcheckinterval,omitempty"` + Healthcheckthresshold int `json:"healthcheckthresshold,omitempty"` + Id string `json:"id,omitempty"` + Pingpath string `json:"pingpath,omitempty"` + Responsetime int `json:"responsetime,omitempty"` + State string `json:"state,omitempty"` + Unhealthcheckthresshold int `json:"unhealthcheckthresshold,omitempty"` + } `json:"healthcheckpolicy,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type UpdateLBHealthCheckPolicyParams struct { + p map[string]interface{} +} + +func (p *UpdateLBHealthCheckPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateLBHealthCheckPolicyParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateLBHealthCheckPolicyParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateLBHealthCheckPolicyParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateLBHealthCheckPolicyParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewUpdateLBHealthCheckPolicyParams(id string) *UpdateLBHealthCheckPolicyParams { + p := &UpdateLBHealthCheckPolicyParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates LB HealthCheck policy +func (s *LoadBalancerService) UpdateLBHealthCheckPolicy(p *UpdateLBHealthCheckPolicyParams) (*UpdateLBHealthCheckPolicyResponse, error) { + resp, err := s.cs.newRequest("updateLBHealthCheckPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateLBHealthCheckPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateLBHealthCheckPolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Healthcheckpolicy []struct { + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Healthcheckinterval int `json:"healthcheckinterval,omitempty"` + Healthcheckthresshold int `json:"healthcheckthresshold,omitempty"` + Id string `json:"id,omitempty"` + Pingpath string `json:"pingpath,omitempty"` + Responsetime int `json:"responsetime,omitempty"` + State string `json:"state,omitempty"` + Unhealthcheckthresshold int `json:"unhealthcheckthresshold,omitempty"` + } `json:"healthcheckpolicy,omitempty"` + Lbruleid string `json:"lbruleid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteLBHealthCheckPolicyParams struct { + p map[string]interface{} +} + +func (p *DeleteLBHealthCheckPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteLBHealthCheckPolicyParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteLBHealthCheckPolicyParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewDeleteLBHealthCheckPolicyParams(id string) *DeleteLBHealthCheckPolicyParams { + p := &DeleteLBHealthCheckPolicyParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a load balancer HealthCheck policy. +func (s *LoadBalancerService) DeleteLBHealthCheckPolicy(p *DeleteLBHealthCheckPolicyParams) (*DeleteLBHealthCheckPolicyResponse, error) { + resp, err := s.cs.newRequest("deleteLBHealthCheckPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteLBHealthCheckPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteLBHealthCheckPolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListLoadBalancerRuleInstancesParams struct { + p map[string]interface{} +} + +func (p *ListLoadBalancerRuleInstancesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["applied"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("applied", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["lbvmips"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("lbvmips", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListLoadBalancerRuleInstancesParams) SetApplied(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["applied"] = v + return +} + +func (p *ListLoadBalancerRuleInstancesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListLoadBalancerRuleInstancesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListLoadBalancerRuleInstancesParams) SetLbvmips(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbvmips"] = v + return +} + +func (p *ListLoadBalancerRuleInstancesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListLoadBalancerRuleInstancesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListLoadBalancerRuleInstancesParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListLoadBalancerRuleInstancesParams(id string) *ListLoadBalancerRuleInstancesParams { + p := &ListLoadBalancerRuleInstancesParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLoadBalancerRuleInstanceByID(id string) (*LoadBalancerRuleInstance, int, error) { + p := &ListLoadBalancerRuleInstancesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + p.p["id"] = id + + l, err := s.ListLoadBalancerRuleInstances(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.LoadBalancerRuleInstances[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for LoadBalancerRuleInstance UUID: %s!", id) +} + +// List all virtual machine instances that are assigned to a load balancer rule. +func (s *LoadBalancerService) ListLoadBalancerRuleInstances(p *ListLoadBalancerRuleInstancesParams) (*ListLoadBalancerRuleInstancesResponse, error) { + resp, err := s.cs.newRequest("listLoadBalancerRuleInstances", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListLoadBalancerRuleInstancesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListLoadBalancerRuleInstancesResponse struct { + Count int `json:"count"` + LoadBalancerRuleInstances []*LoadBalancerRuleInstance `json:"loadbalancerruleinstance"` +} + +type LoadBalancerRuleInstance struct { + Lbvmipaddresses []string `json:"lbvmipaddresses,omitempty"` + Loadbalancerruleinstance string `json:"loadbalancerruleinstance,omitempty"` +} + +type UpdateLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *UpdateLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["algorithm"]; found { + u.Set("algorithm", v.(string)) + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *UpdateLoadBalancerRuleParams) SetAlgorithm(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["algorithm"] = v + return +} + +func (p *UpdateLoadBalancerRuleParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateLoadBalancerRuleParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *UpdateLoadBalancerRuleParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateLoadBalancerRuleParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new UpdateLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewUpdateLoadBalancerRuleParams(id string) *UpdateLoadBalancerRuleParams { + p := &UpdateLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates load balancer +func (s *LoadBalancerService) UpdateLoadBalancerRule(p *UpdateLoadBalancerRuleParams) (*UpdateLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("updateLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateport string `json:"privateport,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type UploadSslCertParams struct { + p map[string]interface{} +} + +func (p *UploadSslCertParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["certchain"]; found { + u.Set("certchain", v.(string)) + } + if v, found := p.p["certificate"]; found { + u.Set("certificate", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["privatekey"]; found { + u.Set("privatekey", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *UploadSslCertParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UploadSslCertParams) SetCertchain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["certchain"] = v + return +} + +func (p *UploadSslCertParams) SetCertificate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["certificate"] = v + return +} + +func (p *UploadSslCertParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *UploadSslCertParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *UploadSslCertParams) SetPrivatekey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["privatekey"] = v + return +} + +func (p *UploadSslCertParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new UploadSslCertParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewUploadSslCertParams(certificate string, privatekey string) *UploadSslCertParams { + p := &UploadSslCertParams{} + p.p = make(map[string]interface{}) + p.p["certificate"] = certificate + p.p["privatekey"] = privatekey + return p +} + +// Upload a certificate to cloudstack +func (s *LoadBalancerService) UploadSslCert(p *UploadSslCertParams) (*UploadSslCertResponse, error) { + resp, err := s.cs.newRequest("uploadSslCert", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UploadSslCertResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UploadSslCertResponse struct { + Account string `json:"account,omitempty"` + Certchain string `json:"certchain,omitempty"` + Certificate string `json:"certificate,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerrulelist []string `json:"loadbalancerrulelist,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} + +type DeleteSslCertParams struct { + p map[string]interface{} +} + +func (p *DeleteSslCertParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteSslCertParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteSslCertParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewDeleteSslCertParams(id string) *DeleteSslCertParams { + p := &DeleteSslCertParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Delete a certificate to cloudstack +func (s *LoadBalancerService) DeleteSslCert(p *DeleteSslCertParams) (*DeleteSslCertResponse, error) { + resp, err := s.cs.newRequest("deleteSslCert", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteSslCertResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteSslCertResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListSslCertsParams struct { + p map[string]interface{} +} + +func (p *ListSslCertsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accountid"]; found { + u.Set("accountid", v.(string)) + } + if v, found := p.p["certid"]; found { + u.Set("certid", v.(string)) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *ListSslCertsParams) SetAccountid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountid"] = v + return +} + +func (p *ListSslCertsParams) SetCertid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["certid"] = v + return +} + +func (p *ListSslCertsParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +func (p *ListSslCertsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new ListSslCertsParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListSslCertsParams() *ListSslCertsParams { + p := &ListSslCertsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists SSL certificates +func (s *LoadBalancerService) ListSslCerts(p *ListSslCertsParams) (*ListSslCertsResponse, error) { + resp, err := s.cs.newRequest("listSslCerts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSslCertsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSslCertsResponse struct { + Count int `json:"count"` + SslCerts []*SslCert `json:"sslcert"` +} + +type SslCert struct { + Account string `json:"account,omitempty"` + Certchain string `json:"certchain,omitempty"` + Certificate string `json:"certificate,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerrulelist []string `json:"loadbalancerrulelist,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} + +type AssignCertToLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *AssignCertToLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["certid"]; found { + u.Set("certid", v.(string)) + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + return u +} + +func (p *AssignCertToLoadBalancerParams) SetCertid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["certid"] = v + return +} + +func (p *AssignCertToLoadBalancerParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +// You should always use this function to get a new AssignCertToLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewAssignCertToLoadBalancerParams(certid string, lbruleid string) *AssignCertToLoadBalancerParams { + p := &AssignCertToLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["certid"] = certid + p.p["lbruleid"] = lbruleid + return p +} + +// Assigns a certificate to a Load Balancer Rule +func (s *LoadBalancerService) AssignCertToLoadBalancer(p *AssignCertToLoadBalancerParams) (*AssignCertToLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("assignCertToLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AssignCertToLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AssignCertToLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type RemoveCertFromLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *RemoveCertFromLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["lbruleid"]; found { + u.Set("lbruleid", v.(string)) + } + return u +} + +func (p *RemoveCertFromLoadBalancerParams) SetLbruleid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbruleid"] = v + return +} + +// You should always use this function to get a new RemoveCertFromLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewRemoveCertFromLoadBalancerParams(lbruleid string) *RemoveCertFromLoadBalancerParams { + p := &RemoveCertFromLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["lbruleid"] = lbruleid + return p +} + +// Removes a certificate from a Load Balancer Rule +func (s *LoadBalancerService) RemoveCertFromLoadBalancer(p *RemoveCertFromLoadBalancerParams) (*RemoveCertFromLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("removeCertFromLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveCertFromLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveCertFromLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type AddNetscalerLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *AddNetscalerLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["gslbprovider"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("gslbprovider", vv) + } + if v, found := p.p["gslbproviderprivateip"]; found { + u.Set("gslbproviderprivateip", v.(string)) + } + if v, found := p.p["gslbproviderpublicip"]; found { + u.Set("gslbproviderpublicip", v.(string)) + } + if v, found := p.p["isexclusivegslbprovider"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isexclusivegslbprovider", vv) + } + if v, found := p.p["networkdevicetype"]; found { + u.Set("networkdevicetype", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddNetscalerLoadBalancerParams) SetGslbprovider(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslbprovider"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetGslbproviderprivateip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslbproviderprivateip"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetGslbproviderpublicip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslbproviderpublicip"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetIsexclusivegslbprovider(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isexclusivegslbprovider"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetNetworkdevicetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdevicetype"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddNetscalerLoadBalancerParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddNetscalerLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewAddNetscalerLoadBalancerParams(networkdevicetype string, password string, physicalnetworkid string, url string, username string) *AddNetscalerLoadBalancerParams { + p := &AddNetscalerLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["networkdevicetype"] = networkdevicetype + p.p["password"] = password + p.p["physicalnetworkid"] = physicalnetworkid + p.p["url"] = url + p.p["username"] = username + return p +} + +// Adds a netscaler load balancer device +func (s *LoadBalancerService) AddNetscalerLoadBalancer(p *AddNetscalerLoadBalancerParams) (*AddNetscalerLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("addNetscalerLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddNetscalerLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddNetscalerLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Gslbprovider bool `json:"gslbprovider,omitempty"` + Gslbproviderprivateip string `json:"gslbproviderprivateip,omitempty"` + Gslbproviderpublicip string `json:"gslbproviderpublicip,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isexclusivegslbprovider bool `json:"isexclusivegslbprovider,omitempty"` + Lbdevicecapacity int64 `json:"lbdevicecapacity,omitempty"` + Lbdevicededicated bool `json:"lbdevicededicated,omitempty"` + Lbdeviceid string `json:"lbdeviceid,omitempty"` + Lbdevicename string `json:"lbdevicename,omitempty"` + Lbdevicestate string `json:"lbdevicestate,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Podids []string `json:"podids,omitempty"` + Privateinterface string `json:"privateinterface,omitempty"` + Provider string `json:"provider,omitempty"` + Publicinterface string `json:"publicinterface,omitempty"` +} + +type DeleteNetscalerLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *DeleteNetscalerLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["lbdeviceid"]; found { + u.Set("lbdeviceid", v.(string)) + } + return u +} + +func (p *DeleteNetscalerLoadBalancerParams) SetLbdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbdeviceid"] = v + return +} + +// You should always use this function to get a new DeleteNetscalerLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewDeleteNetscalerLoadBalancerParams(lbdeviceid string) *DeleteNetscalerLoadBalancerParams { + p := &DeleteNetscalerLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["lbdeviceid"] = lbdeviceid + return p +} + +// delete a netscaler load balancer device +func (s *LoadBalancerService) DeleteNetscalerLoadBalancer(p *DeleteNetscalerLoadBalancerParams) (*DeleteNetscalerLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("deleteNetscalerLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNetscalerLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteNetscalerLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ConfigureNetscalerLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *ConfigureNetscalerLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["inline"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("inline", vv) + } + if v, found := p.p["lbdevicecapacity"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("lbdevicecapacity", vv) + } + if v, found := p.p["lbdevicededicated"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("lbdevicededicated", vv) + } + if v, found := p.p["lbdeviceid"]; found { + u.Set("lbdeviceid", v.(string)) + } + if v, found := p.p["podids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("podids", vv) + } + return u +} + +func (p *ConfigureNetscalerLoadBalancerParams) SetInline(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["inline"] = v + return +} + +func (p *ConfigureNetscalerLoadBalancerParams) SetLbdevicecapacity(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbdevicecapacity"] = v + return +} + +func (p *ConfigureNetscalerLoadBalancerParams) SetLbdevicededicated(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbdevicededicated"] = v + return +} + +func (p *ConfigureNetscalerLoadBalancerParams) SetLbdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbdeviceid"] = v + return +} + +func (p *ConfigureNetscalerLoadBalancerParams) SetPodids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podids"] = v + return +} + +// You should always use this function to get a new ConfigureNetscalerLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewConfigureNetscalerLoadBalancerParams(lbdeviceid string) *ConfigureNetscalerLoadBalancerParams { + p := &ConfigureNetscalerLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["lbdeviceid"] = lbdeviceid + return p +} + +// configures a netscaler load balancer device +func (s *LoadBalancerService) ConfigureNetscalerLoadBalancer(p *ConfigureNetscalerLoadBalancerParams) (*ConfigureNetscalerLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("configureNetscalerLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ConfigureNetscalerLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ConfigureNetscalerLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Gslbprovider bool `json:"gslbprovider,omitempty"` + Gslbproviderprivateip string `json:"gslbproviderprivateip,omitempty"` + Gslbproviderpublicip string `json:"gslbproviderpublicip,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isexclusivegslbprovider bool `json:"isexclusivegslbprovider,omitempty"` + Lbdevicecapacity int64 `json:"lbdevicecapacity,omitempty"` + Lbdevicededicated bool `json:"lbdevicededicated,omitempty"` + Lbdeviceid string `json:"lbdeviceid,omitempty"` + Lbdevicename string `json:"lbdevicename,omitempty"` + Lbdevicestate string `json:"lbdevicestate,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Podids []string `json:"podids,omitempty"` + Privateinterface string `json:"privateinterface,omitempty"` + Provider string `json:"provider,omitempty"` + Publicinterface string `json:"publicinterface,omitempty"` +} + +type ListNetscalerLoadBalancersParams struct { + p map[string]interface{} +} + +func (p *ListNetscalerLoadBalancersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["lbdeviceid"]; found { + u.Set("lbdeviceid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + return u +} + +func (p *ListNetscalerLoadBalancersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetscalerLoadBalancersParams) SetLbdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbdeviceid"] = v + return +} + +func (p *ListNetscalerLoadBalancersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetscalerLoadBalancersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNetscalerLoadBalancersParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +// You should always use this function to get a new ListNetscalerLoadBalancersParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListNetscalerLoadBalancersParams() *ListNetscalerLoadBalancersParams { + p := &ListNetscalerLoadBalancersParams{} + p.p = make(map[string]interface{}) + return p +} + +// lists netscaler load balancer devices +func (s *LoadBalancerService) ListNetscalerLoadBalancers(p *ListNetscalerLoadBalancersParams) (*ListNetscalerLoadBalancersResponse, error) { + resp, err := s.cs.newRequest("listNetscalerLoadBalancers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetscalerLoadBalancersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetscalerLoadBalancersResponse struct { + Count int `json:"count"` + NetscalerLoadBalancers []*NetscalerLoadBalancer `json:"netscalerloadbalancer"` +} + +type NetscalerLoadBalancer struct { + Gslbprovider bool `json:"gslbprovider,omitempty"` + Gslbproviderprivateip string `json:"gslbproviderprivateip,omitempty"` + Gslbproviderpublicip string `json:"gslbproviderpublicip,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isexclusivegslbprovider bool `json:"isexclusivegslbprovider,omitempty"` + Lbdevicecapacity int64 `json:"lbdevicecapacity,omitempty"` + Lbdevicededicated bool `json:"lbdevicededicated,omitempty"` + Lbdeviceid string `json:"lbdeviceid,omitempty"` + Lbdevicename string `json:"lbdevicename,omitempty"` + Lbdevicestate string `json:"lbdevicestate,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Podids []string `json:"podids,omitempty"` + Privateinterface string `json:"privateinterface,omitempty"` + Provider string `json:"provider,omitempty"` + Publicinterface string `json:"publicinterface,omitempty"` +} + +type CreateGlobalLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *CreateGlobalLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["gslbdomainname"]; found { + u.Set("gslbdomainname", v.(string)) + } + if v, found := p.p["gslblbmethod"]; found { + u.Set("gslblbmethod", v.(string)) + } + if v, found := p.p["gslbservicetype"]; found { + u.Set("gslbservicetype", v.(string)) + } + if v, found := p.p["gslbstickysessionmethodname"]; found { + u.Set("gslbstickysessionmethodname", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["regionid"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("regionid", vv) + } + return u +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetGslbdomainname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslbdomainname"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetGslblbmethod(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslblbmethod"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetGslbservicetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslbservicetype"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetGslbstickysessionmethodname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslbstickysessionmethodname"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateGlobalLoadBalancerRuleParams) SetRegionid(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["regionid"] = v + return +} + +// You should always use this function to get a new CreateGlobalLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewCreateGlobalLoadBalancerRuleParams(gslbdomainname string, gslbservicetype string, name string, regionid int) *CreateGlobalLoadBalancerRuleParams { + p := &CreateGlobalLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["gslbdomainname"] = gslbdomainname + p.p["gslbservicetype"] = gslbservicetype + p.p["name"] = name + p.p["regionid"] = regionid + return p +} + +// Creates a global load balancer rule +func (s *LoadBalancerService) CreateGlobalLoadBalancerRule(p *CreateGlobalLoadBalancerRuleParams) (*CreateGlobalLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("createGlobalLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateGlobalLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateGlobalLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gslbdomainname string `json:"gslbdomainname,omitempty"` + Gslblbmethod string `json:"gslblbmethod,omitempty"` + Gslbservicetype string `json:"gslbservicetype,omitempty"` + Gslbstickysessionmethodname string `json:"gslbstickysessionmethodname,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerrule []struct { + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateport string `json:"privateport,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + } `json:"loadbalancerrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Regionid int `json:"regionid,omitempty"` +} + +type DeleteGlobalLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *DeleteGlobalLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteGlobalLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteGlobalLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewDeleteGlobalLoadBalancerRuleParams(id string) *DeleteGlobalLoadBalancerRuleParams { + p := &DeleteGlobalLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a global load balancer rule. +func (s *LoadBalancerService) DeleteGlobalLoadBalancerRule(p *DeleteGlobalLoadBalancerRuleParams) (*DeleteGlobalLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("deleteGlobalLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteGlobalLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteGlobalLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type UpdateGlobalLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *UpdateGlobalLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["gslblbmethod"]; found { + u.Set("gslblbmethod", v.(string)) + } + if v, found := p.p["gslbstickysessionmethodname"]; found { + u.Set("gslbstickysessionmethodname", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateGlobalLoadBalancerRuleParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *UpdateGlobalLoadBalancerRuleParams) SetGslblbmethod(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslblbmethod"] = v + return +} + +func (p *UpdateGlobalLoadBalancerRuleParams) SetGslbstickysessionmethodname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslbstickysessionmethodname"] = v + return +} + +func (p *UpdateGlobalLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateGlobalLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewUpdateGlobalLoadBalancerRuleParams(id string) *UpdateGlobalLoadBalancerRuleParams { + p := &UpdateGlobalLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// update global load balancer rules. +func (s *LoadBalancerService) UpdateGlobalLoadBalancerRule(p *UpdateGlobalLoadBalancerRuleParams) (*UpdateGlobalLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("updateGlobalLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateGlobalLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateGlobalLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gslbdomainname string `json:"gslbdomainname,omitempty"` + Gslblbmethod string `json:"gslblbmethod,omitempty"` + Gslbservicetype string `json:"gslbservicetype,omitempty"` + Gslbstickysessionmethodname string `json:"gslbstickysessionmethodname,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerrule []struct { + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateport string `json:"privateport,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + } `json:"loadbalancerrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Regionid int `json:"regionid,omitempty"` +} + +type ListGlobalLoadBalancerRulesParams struct { + p map[string]interface{} +} + +func (p *ListGlobalLoadBalancerRulesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["regionid"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("regionid", vv) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *ListGlobalLoadBalancerRulesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetRegionid(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["regionid"] = v + return +} + +func (p *ListGlobalLoadBalancerRulesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new ListGlobalLoadBalancerRulesParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListGlobalLoadBalancerRulesParams() *ListGlobalLoadBalancerRulesParams { + p := &ListGlobalLoadBalancerRulesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetGlobalLoadBalancerRuleID(keyword string) (string, error) { + p := &ListGlobalLoadBalancerRulesParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + + l, err := s.ListGlobalLoadBalancerRules(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListGlobalLoadBalancerRules(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.GlobalLoadBalancerRules[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.GlobalLoadBalancerRules { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetGlobalLoadBalancerRuleByName(name string) (*GlobalLoadBalancerRule, int, error) { + id, err := s.GetGlobalLoadBalancerRuleID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetGlobalLoadBalancerRuleByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetGlobalLoadBalancerRuleByID(id string) (*GlobalLoadBalancerRule, int, error) { + p := &ListGlobalLoadBalancerRulesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListGlobalLoadBalancerRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListGlobalLoadBalancerRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.GlobalLoadBalancerRules[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for GlobalLoadBalancerRule UUID: %s!", id) +} + +// Lists load balancer rules. +func (s *LoadBalancerService) ListGlobalLoadBalancerRules(p *ListGlobalLoadBalancerRulesParams) (*ListGlobalLoadBalancerRulesResponse, error) { + resp, err := s.cs.newRequest("listGlobalLoadBalancerRules", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListGlobalLoadBalancerRulesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListGlobalLoadBalancerRulesResponse struct { + Count int `json:"count"` + GlobalLoadBalancerRules []*GlobalLoadBalancerRule `json:"globalloadbalancerrule"` +} + +type GlobalLoadBalancerRule struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gslbdomainname string `json:"gslbdomainname,omitempty"` + Gslblbmethod string `json:"gslblbmethod,omitempty"` + Gslbservicetype string `json:"gslbservicetype,omitempty"` + Gslbstickysessionmethodname string `json:"gslbstickysessionmethodname,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerrule []struct { + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateport string `json:"privateport,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + } `json:"loadbalancerrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Regionid int `json:"regionid,omitempty"` +} + +type AssignToGlobalLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *AssignToGlobalLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["gslblbruleweightsmap"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("gslblbruleweightsmap[%d].key", i), k) + u.Set(fmt.Sprintf("gslblbruleweightsmap[%d].value", i), vv) + i++ + } + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["loadbalancerrulelist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("loadbalancerrulelist", vv) + } + return u +} + +func (p *AssignToGlobalLoadBalancerRuleParams) SetGslblbruleweightsmap(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gslblbruleweightsmap"] = v + return +} + +func (p *AssignToGlobalLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *AssignToGlobalLoadBalancerRuleParams) SetLoadbalancerrulelist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["loadbalancerrulelist"] = v + return +} + +// You should always use this function to get a new AssignToGlobalLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewAssignToGlobalLoadBalancerRuleParams(id string, loadbalancerrulelist []string) *AssignToGlobalLoadBalancerRuleParams { + p := &AssignToGlobalLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["loadbalancerrulelist"] = loadbalancerrulelist + return p +} + +// Assign load balancer rule or list of load balancer rules to a global load balancer rules. +func (s *LoadBalancerService) AssignToGlobalLoadBalancerRule(p *AssignToGlobalLoadBalancerRuleParams) (*AssignToGlobalLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("assignToGlobalLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AssignToGlobalLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AssignToGlobalLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type RemoveFromGlobalLoadBalancerRuleParams struct { + p map[string]interface{} +} + +func (p *RemoveFromGlobalLoadBalancerRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["loadbalancerrulelist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("loadbalancerrulelist", vv) + } + return u +} + +func (p *RemoveFromGlobalLoadBalancerRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *RemoveFromGlobalLoadBalancerRuleParams) SetLoadbalancerrulelist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["loadbalancerrulelist"] = v + return +} + +// You should always use this function to get a new RemoveFromGlobalLoadBalancerRuleParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewRemoveFromGlobalLoadBalancerRuleParams(id string, loadbalancerrulelist []string) *RemoveFromGlobalLoadBalancerRuleParams { + p := &RemoveFromGlobalLoadBalancerRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["loadbalancerrulelist"] = loadbalancerrulelist + return p +} + +// Removes a load balancer rule association with global load balancer rule +func (s *LoadBalancerService) RemoveFromGlobalLoadBalancerRule(p *RemoveFromGlobalLoadBalancerRuleParams) (*RemoveFromGlobalLoadBalancerRuleResponse, error) { + resp, err := s.cs.newRequest("removeFromGlobalLoadBalancerRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveFromGlobalLoadBalancerRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveFromGlobalLoadBalancerRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type CreateLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *CreateLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["algorithm"]; found { + u.Set("algorithm", v.(string)) + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["instanceport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("instanceport", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["scheme"]; found { + u.Set("scheme", v.(string)) + } + if v, found := p.p["sourceipaddress"]; found { + u.Set("sourceipaddress", v.(string)) + } + if v, found := p.p["sourceipaddressnetworkid"]; found { + u.Set("sourceipaddressnetworkid", v.(string)) + } + if v, found := p.p["sourceport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("sourceport", vv) + } + return u +} + +func (p *CreateLoadBalancerParams) SetAlgorithm(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["algorithm"] = v + return +} + +func (p *CreateLoadBalancerParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateLoadBalancerParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateLoadBalancerParams) SetInstanceport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["instanceport"] = v + return +} + +func (p *CreateLoadBalancerParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateLoadBalancerParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *CreateLoadBalancerParams) SetScheme(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scheme"] = v + return +} + +func (p *CreateLoadBalancerParams) SetSourceipaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourceipaddress"] = v + return +} + +func (p *CreateLoadBalancerParams) SetSourceipaddressnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourceipaddressnetworkid"] = v + return +} + +func (p *CreateLoadBalancerParams) SetSourceport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourceport"] = v + return +} + +// You should always use this function to get a new CreateLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewCreateLoadBalancerParams(algorithm string, instanceport int, name string, networkid string, scheme string, sourceipaddressnetworkid string, sourceport int) *CreateLoadBalancerParams { + p := &CreateLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["algorithm"] = algorithm + p.p["instanceport"] = instanceport + p.p["name"] = name + p.p["networkid"] = networkid + p.p["scheme"] = scheme + p.p["sourceipaddressnetworkid"] = sourceipaddressnetworkid + p.p["sourceport"] = sourceport + return p +} + +// Creates a Load Balancer +func (s *LoadBalancerService) CreateLoadBalancer(p *CreateLoadBalancerParams) (*CreateLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("createLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerinstance []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + } `json:"loadbalancerinstance,omitempty"` + Loadbalancerrule []struct { + Instanceport int `json:"instanceport,omitempty"` + Sourceport int `json:"sourceport,omitempty"` + State string `json:"state,omitempty"` + } `json:"loadbalancerrule,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Sourceipaddress string `json:"sourceipaddress,omitempty"` + Sourceipaddressnetworkid string `json:"sourceipaddressnetworkid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type ListLoadBalancersParams struct { + p map[string]interface{} +} + +func (p *ListLoadBalancersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["scheme"]; found { + u.Set("scheme", v.(string)) + } + if v, found := p.p["sourceipaddress"]; found { + u.Set("sourceipaddress", v.(string)) + } + if v, found := p.p["sourceipaddressnetworkid"]; found { + u.Set("sourceipaddressnetworkid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *ListLoadBalancersParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListLoadBalancersParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListLoadBalancersParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListLoadBalancersParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListLoadBalancersParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListLoadBalancersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListLoadBalancersParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListLoadBalancersParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListLoadBalancersParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListLoadBalancersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListLoadBalancersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListLoadBalancersParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListLoadBalancersParams) SetScheme(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scheme"] = v + return +} + +func (p *ListLoadBalancersParams) SetSourceipaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourceipaddress"] = v + return +} + +func (p *ListLoadBalancersParams) SetSourceipaddressnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourceipaddressnetworkid"] = v + return +} + +func (p *ListLoadBalancersParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new ListLoadBalancersParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewListLoadBalancersParams() *ListLoadBalancersParams { + p := &ListLoadBalancersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLoadBalancerID(name string) (string, error) { + p := &ListLoadBalancersParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListLoadBalancers(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListLoadBalancers(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.LoadBalancers[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.LoadBalancers { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLoadBalancerByName(name string) (*LoadBalancer, int, error) { + id, err := s.GetLoadBalancerID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetLoadBalancerByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *LoadBalancerService) GetLoadBalancerByID(id string) (*LoadBalancer, int, error) { + p := &ListLoadBalancersParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListLoadBalancers(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListLoadBalancers(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.LoadBalancers[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for LoadBalancer UUID: %s!", id) +} + +// Lists Load Balancers +func (s *LoadBalancerService) ListLoadBalancers(p *ListLoadBalancersParams) (*ListLoadBalancersResponse, error) { + resp, err := s.cs.newRequest("listLoadBalancers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListLoadBalancersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListLoadBalancersResponse struct { + Count int `json:"count"` + LoadBalancers []*LoadBalancer `json:"loadbalancer"` +} + +type LoadBalancer struct { + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerinstance []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + } `json:"loadbalancerinstance,omitempty"` + Loadbalancerrule []struct { + Instanceport int `json:"instanceport,omitempty"` + Sourceport int `json:"sourceport,omitempty"` + State string `json:"state,omitempty"` + } `json:"loadbalancerrule,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Sourceipaddress string `json:"sourceipaddress,omitempty"` + Sourceipaddressnetworkid string `json:"sourceipaddressnetworkid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type DeleteLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *DeleteLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteLoadBalancerParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewDeleteLoadBalancerParams(id string) *DeleteLoadBalancerParams { + p := &DeleteLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a load balancer +func (s *LoadBalancerService) DeleteLoadBalancer(p *DeleteLoadBalancerParams) (*DeleteLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("deleteLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type UpdateLoadBalancerParams struct { + p map[string]interface{} +} + +func (p *UpdateLoadBalancerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateLoadBalancerParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateLoadBalancerParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateLoadBalancerParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateLoadBalancerParams instance, +// as then you are sure you have configured all required params +func (s *LoadBalancerService) NewUpdateLoadBalancerParams(id string) *UpdateLoadBalancerParams { + p := &UpdateLoadBalancerParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a Load Balancer +func (s *LoadBalancerService) UpdateLoadBalancer(p *UpdateLoadBalancerParams) (*UpdateLoadBalancerResponse, error) { + resp, err := s.cs.newRequest("updateLoadBalancer", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateLoadBalancerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateLoadBalancerResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Loadbalancerinstance []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + } `json:"loadbalancerinstance,omitempty"` + Loadbalancerrule []struct { + Instanceport int `json:"instanceport,omitempty"` + Sourceport int `json:"sourceport,omitempty"` + State string `json:"state,omitempty"` + } `json:"loadbalancerrule,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Sourceipaddress string `json:"sourceipaddress,omitempty"` + Sourceipaddressnetworkid string `json:"sourceipaddressnetworkid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/LoginService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LoginService.go new file mode 100644 index 000000000..bd6db4a97 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LoginService.go @@ -0,0 +1,17 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/LogoutService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LogoutService.go new file mode 100644 index 000000000..bd6db4a97 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/LogoutService.go @@ -0,0 +1,17 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/NATService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NATService.go new file mode 100644 index 000000000..25ad9a162 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NATService.go @@ -0,0 +1,640 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type EnableStaticNatParams struct { + p map[string]interface{} +} + +func (p *EnableStaticNatParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["vmguestip"]; found { + u.Set("vmguestip", v.(string)) + } + return u +} + +func (p *EnableStaticNatParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *EnableStaticNatParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *EnableStaticNatParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *EnableStaticNatParams) SetVmguestip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmguestip"] = v + return +} + +// You should always use this function to get a new EnableStaticNatParams instance, +// as then you are sure you have configured all required params +func (s *NATService) NewEnableStaticNatParams(ipaddressid string, virtualmachineid string) *EnableStaticNatParams { + p := &EnableStaticNatParams{} + p.p = make(map[string]interface{}) + p.p["ipaddressid"] = ipaddressid + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Enables static nat for given ip address +func (s *NATService) EnableStaticNat(p *EnableStaticNatParams) (*EnableStaticNatResponse, error) { + resp, err := s.cs.newRequest("enableStaticNat", p.toURLValues()) + if err != nil { + return nil, err + } + + var r EnableStaticNatResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type EnableStaticNatResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type CreateIpForwardingRuleParams struct { + p map[string]interface{} +} + +func (p *CreateIpForwardingRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["endport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("endport", vv) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["openfirewall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("openfirewall", vv) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["startport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("startport", vv) + } + return u +} + +func (p *CreateIpForwardingRuleParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *CreateIpForwardingRuleParams) SetEndport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endport"] = v + return +} + +func (p *CreateIpForwardingRuleParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *CreateIpForwardingRuleParams) SetOpenfirewall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["openfirewall"] = v + return +} + +func (p *CreateIpForwardingRuleParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *CreateIpForwardingRuleParams) SetStartport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startport"] = v + return +} + +// You should always use this function to get a new CreateIpForwardingRuleParams instance, +// as then you are sure you have configured all required params +func (s *NATService) NewCreateIpForwardingRuleParams(ipaddressid string, protocol string, startport int) *CreateIpForwardingRuleParams { + p := &CreateIpForwardingRuleParams{} + p.p = make(map[string]interface{}) + p.p["ipaddressid"] = ipaddressid + p.p["protocol"] = protocol + p.p["startport"] = startport + return p +} + +// Creates an ip forwarding rule +func (s *NATService) CreateIpForwardingRule(p *CreateIpForwardingRuleParams) (*CreateIpForwardingRuleResponse, error) { + resp, err := s.cs.newRequest("createIpForwardingRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateIpForwardingRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateIpForwardingRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateendport string `json:"privateendport,omitempty"` + Privateport string `json:"privateport,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicendport string `json:"publicendport,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vmguestip string `json:"vmguestip,omitempty"` +} + +type DeleteIpForwardingRuleParams struct { + p map[string]interface{} +} + +func (p *DeleteIpForwardingRuleParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteIpForwardingRuleParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteIpForwardingRuleParams instance, +// as then you are sure you have configured all required params +func (s *NATService) NewDeleteIpForwardingRuleParams(id string) *DeleteIpForwardingRuleParams { + p := &DeleteIpForwardingRuleParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes an ip forwarding rule +func (s *NATService) DeleteIpForwardingRule(p *DeleteIpForwardingRuleParams) (*DeleteIpForwardingRuleResponse, error) { + resp, err := s.cs.newRequest("deleteIpForwardingRule", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteIpForwardingRuleResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteIpForwardingRuleResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListIpForwardingRulesParams struct { + p map[string]interface{} +} + +func (p *ListIpForwardingRulesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *ListIpForwardingRulesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListIpForwardingRulesParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new ListIpForwardingRulesParams instance, +// as then you are sure you have configured all required params +func (s *NATService) NewListIpForwardingRulesParams() *ListIpForwardingRulesParams { + p := &ListIpForwardingRulesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NATService) GetIpForwardingRuleByID(id string) (*IpForwardingRule, int, error) { + p := &ListIpForwardingRulesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListIpForwardingRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListIpForwardingRules(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.IpForwardingRules[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for IpForwardingRule UUID: %s!", id) +} + +// List the ip forwarding rules +func (s *NATService) ListIpForwardingRules(p *ListIpForwardingRulesParams) (*ListIpForwardingRulesResponse, error) { + resp, err := s.cs.newRequest("listIpForwardingRules", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListIpForwardingRulesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListIpForwardingRulesResponse struct { + Count int `json:"count"` + IpForwardingRules []*IpForwardingRule `json:"ipforwardingrule"` +} + +type IpForwardingRule struct { + Cidrlist string `json:"cidrlist,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipaddressid string `json:"ipaddressid,omitempty"` + Networkid string `json:"networkid,omitempty"` + Privateendport string `json:"privateendport,omitempty"` + Privateport string `json:"privateport,omitempty"` + Protocol string `json:"protocol,omitempty"` + Publicendport string `json:"publicendport,omitempty"` + Publicport string `json:"publicport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vmguestip string `json:"vmguestip,omitempty"` +} + +type DisableStaticNatParams struct { + p map[string]interface{} +} + +func (p *DisableStaticNatParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["ipaddressid"]; found { + u.Set("ipaddressid", v.(string)) + } + return u +} + +func (p *DisableStaticNatParams) SetIpaddressid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddressid"] = v + return +} + +// You should always use this function to get a new DisableStaticNatParams instance, +// as then you are sure you have configured all required params +func (s *NATService) NewDisableStaticNatParams(ipaddressid string) *DisableStaticNatParams { + p := &DisableStaticNatParams{} + p.p = make(map[string]interface{}) + p.p["ipaddressid"] = ipaddressid + return p +} + +// Disables static rule for given ip address +func (s *NATService) DisableStaticNat(p *DisableStaticNatParams) (*DisableStaticNatResponse, error) { + resp, err := s.cs.newRequest("disableStaticNat", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DisableStaticNatResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DisableStaticNatResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkACLService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkACLService.go new file mode 100644 index 000000000..cc748f061 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkACLService.go @@ -0,0 +1,1480 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateNetworkACLParams struct { + p map[string]interface{} +} + +func (p *CreateNetworkACLParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["aclid"]; found { + u.Set("aclid", v.(string)) + } + if v, found := p.p["action"]; found { + u.Set("action", v.(string)) + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["endport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("endport", vv) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["icmpcode"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmpcode", vv) + } + if v, found := p.p["icmptype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmptype", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["number"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("number", vv) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["startport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("startport", vv) + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + return u +} + +func (p *CreateNetworkACLParams) SetAclid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["aclid"] = v + return +} + +func (p *CreateNetworkACLParams) SetAction(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["action"] = v + return +} + +func (p *CreateNetworkACLParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *CreateNetworkACLParams) SetEndport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endport"] = v + return +} + +func (p *CreateNetworkACLParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateNetworkACLParams) SetIcmpcode(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmpcode"] = v + return +} + +func (p *CreateNetworkACLParams) SetIcmptype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmptype"] = v + return +} + +func (p *CreateNetworkACLParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *CreateNetworkACLParams) SetNumber(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["number"] = v + return +} + +func (p *CreateNetworkACLParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *CreateNetworkACLParams) SetStartport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startport"] = v + return +} + +func (p *CreateNetworkACLParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +// You should always use this function to get a new CreateNetworkACLParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewCreateNetworkACLParams(protocol string) *CreateNetworkACLParams { + p := &CreateNetworkACLParams{} + p.p = make(map[string]interface{}) + p.p["protocol"] = protocol + return p +} + +// Creates a ACL rule in the given network (the network has to belong to VPC) +func (s *NetworkACLService) CreateNetworkACL(p *CreateNetworkACLParams) (*CreateNetworkACLResponse, error) { + resp, err := s.cs.newRequest("createNetworkACL", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateNetworkACLResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateNetworkACLResponse struct { + JobID string `json:"jobid,omitempty"` + Aclid string `json:"aclid,omitempty"` + Action string `json:"action,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Number int `json:"number,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` +} + +type UpdateNetworkACLItemParams struct { + p map[string]interface{} +} + +func (p *UpdateNetworkACLItemParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["action"]; found { + u.Set("action", v.(string)) + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["endport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("endport", vv) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["icmpcode"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmpcode", vv) + } + if v, found := p.p["icmptype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmptype", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["number"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("number", vv) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["startport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("startport", vv) + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + return u +} + +func (p *UpdateNetworkACLItemParams) SetAction(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["action"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetEndport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endport"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetIcmpcode(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmpcode"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetIcmptype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmptype"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetNumber(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["number"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetStartport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startport"] = v + return +} + +func (p *UpdateNetworkACLItemParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +// You should always use this function to get a new UpdateNetworkACLItemParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewUpdateNetworkACLItemParams(id string) *UpdateNetworkACLItemParams { + p := &UpdateNetworkACLItemParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates ACL Item with specified Id +func (s *NetworkACLService) UpdateNetworkACLItem(p *UpdateNetworkACLItemParams) (*UpdateNetworkACLItemResponse, error) { + resp, err := s.cs.newRequest("updateNetworkACLItem", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateNetworkACLItemResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateNetworkACLItemResponse struct { + JobID string `json:"jobid,omitempty"` + Aclid string `json:"aclid,omitempty"` + Action string `json:"action,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Number int `json:"number,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` +} + +type DeleteNetworkACLParams struct { + p map[string]interface{} +} + +func (p *DeleteNetworkACLParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteNetworkACLParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteNetworkACLParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewDeleteNetworkACLParams(id string) *DeleteNetworkACLParams { + p := &DeleteNetworkACLParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a Network ACL +func (s *NetworkACLService) DeleteNetworkACL(p *DeleteNetworkACLParams) (*DeleteNetworkACLResponse, error) { + resp, err := s.cs.newRequest("deleteNetworkACL", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNetworkACLResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteNetworkACLResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListNetworkACLsParams struct { + p map[string]interface{} +} + +func (p *ListNetworkACLsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["aclid"]; found { + u.Set("aclid", v.(string)) + } + if v, found := p.p["action"]; found { + u.Set("action", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + return u +} + +func (p *ListNetworkACLsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListNetworkACLsParams) SetAclid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["aclid"] = v + return +} + +func (p *ListNetworkACLsParams) SetAction(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["action"] = v + return +} + +func (p *ListNetworkACLsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListNetworkACLsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListNetworkACLsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListNetworkACLsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListNetworkACLsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetworkACLsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListNetworkACLsParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListNetworkACLsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetworkACLsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNetworkACLsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListNetworkACLsParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *ListNetworkACLsParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListNetworkACLsParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +// You should always use this function to get a new ListNetworkACLsParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewListNetworkACLsParams() *ListNetworkACLsParams { + p := &ListNetworkACLsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkACLService) GetNetworkACLByID(id string) (*NetworkACL, int, error) { + p := &ListNetworkACLsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListNetworkACLs(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListNetworkACLs(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.NetworkACLs[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for NetworkACL UUID: %s!", id) +} + +// Lists all network ACL items +func (s *NetworkACLService) ListNetworkACLs(p *ListNetworkACLsParams) (*ListNetworkACLsResponse, error) { + resp, err := s.cs.newRequest("listNetworkACLs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetworkACLsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetworkACLsResponse struct { + Count int `json:"count"` + NetworkACLs []*NetworkACL `json:"networkacl"` +} + +type NetworkACL struct { + Aclid string `json:"aclid,omitempty"` + Action string `json:"action,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Endport string `json:"endport,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Id string `json:"id,omitempty"` + Number int `json:"number,omitempty"` + Protocol string `json:"protocol,omitempty"` + Startport string `json:"startport,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` +} + +type CreateNetworkACLListParams struct { + p map[string]interface{} +} + +func (p *CreateNetworkACLListParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *CreateNetworkACLListParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateNetworkACLListParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateNetworkACLListParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateNetworkACLListParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new CreateNetworkACLListParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewCreateNetworkACLListParams(name string, vpcid string) *CreateNetworkACLListParams { + p := &CreateNetworkACLListParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["vpcid"] = vpcid + return p +} + +// Creates a Network ACL for the given VPC +func (s *NetworkACLService) CreateNetworkACLList(p *CreateNetworkACLListParams) (*CreateNetworkACLListResponse, error) { + resp, err := s.cs.newRequest("createNetworkACLList", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateNetworkACLListResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateNetworkACLListResponse struct { + JobID string `json:"jobid,omitempty"` + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Vpcid string `json:"vpcid,omitempty"` +} + +type DeleteNetworkACLListParams struct { + p map[string]interface{} +} + +func (p *DeleteNetworkACLListParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteNetworkACLListParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteNetworkACLListParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewDeleteNetworkACLListParams(id string) *DeleteNetworkACLListParams { + p := &DeleteNetworkACLListParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a Network ACL +func (s *NetworkACLService) DeleteNetworkACLList(p *DeleteNetworkACLListParams) (*DeleteNetworkACLListResponse, error) { + resp, err := s.cs.newRequest("deleteNetworkACLList", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNetworkACLListResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteNetworkACLListResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ReplaceNetworkACLListParams struct { + p map[string]interface{} +} + +func (p *ReplaceNetworkACLListParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["aclid"]; found { + u.Set("aclid", v.(string)) + } + if v, found := p.p["gatewayid"]; found { + u.Set("gatewayid", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + return u +} + +func (p *ReplaceNetworkACLListParams) SetAclid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["aclid"] = v + return +} + +func (p *ReplaceNetworkACLListParams) SetGatewayid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gatewayid"] = v + return +} + +func (p *ReplaceNetworkACLListParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +// You should always use this function to get a new ReplaceNetworkACLListParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewReplaceNetworkACLListParams(aclid string) *ReplaceNetworkACLListParams { + p := &ReplaceNetworkACLListParams{} + p.p = make(map[string]interface{}) + p.p["aclid"] = aclid + return p +} + +// Replaces ACL associated with a Network or private gateway +func (s *NetworkACLService) ReplaceNetworkACLList(p *ReplaceNetworkACLListParams) (*ReplaceNetworkACLListResponse, error) { + resp, err := s.cs.newRequest("replaceNetworkACLList", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReplaceNetworkACLListResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReplaceNetworkACLListResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListNetworkACLListsParams struct { + p map[string]interface{} +} + +func (p *ListNetworkACLListsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *ListNetworkACLListsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListNetworkACLListsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListNetworkACLListsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListNetworkACLListsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListNetworkACLListsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListNetworkACLListsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetworkACLListsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListNetworkACLListsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListNetworkACLListsParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListNetworkACLListsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetworkACLListsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNetworkACLListsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListNetworkACLListsParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new ListNetworkACLListsParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewListNetworkACLListsParams() *ListNetworkACLListsParams { + p := &ListNetworkACLListsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkACLService) GetNetworkACLListID(name string) (string, error) { + p := &ListNetworkACLListsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListNetworkACLLists(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListNetworkACLLists(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.NetworkACLLists[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.NetworkACLLists { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkACLService) GetNetworkACLListByName(name string) (*NetworkACLList, int, error) { + id, err := s.GetNetworkACLListID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetNetworkACLListByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkACLService) GetNetworkACLListByID(id string) (*NetworkACLList, int, error) { + p := &ListNetworkACLListsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListNetworkACLLists(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListNetworkACLLists(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.NetworkACLLists[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for NetworkACLList UUID: %s!", id) +} + +// Lists all network ACLs +func (s *NetworkACLService) ListNetworkACLLists(p *ListNetworkACLListsParams) (*ListNetworkACLListsResponse, error) { + resp, err := s.cs.newRequest("listNetworkACLLists", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetworkACLListsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetworkACLListsResponse struct { + Count int `json:"count"` + NetworkACLLists []*NetworkACLList `json:"networkacllist"` +} + +type NetworkACLList struct { + Description string `json:"description,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Vpcid string `json:"vpcid,omitempty"` +} + +type UpdateNetworkACLListParams struct { + p map[string]interface{} +} + +func (p *UpdateNetworkACLListParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateNetworkACLListParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateNetworkACLListParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateNetworkACLListParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateNetworkACLListParams instance, +// as then you are sure you have configured all required params +func (s *NetworkACLService) NewUpdateNetworkACLListParams(id string) *UpdateNetworkACLListParams { + p := &UpdateNetworkACLListParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates Network ACL list +func (s *NetworkACLService) UpdateNetworkACLList(p *UpdateNetworkACLListParams) (*UpdateNetworkACLListResponse, error) { + resp, err := s.cs.newRequest("updateNetworkACLList", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateNetworkACLListResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateNetworkACLListResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkDeviceService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkDeviceService.go new file mode 100644 index 000000000..7b8e64bcc --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkDeviceService.go @@ -0,0 +1,245 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +type AddNetworkDeviceParams struct { + p map[string]interface{} +} + +func (p *AddNetworkDeviceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["networkdeviceparameterlist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("networkdeviceparameterlist[%d].key", i), k) + u.Set(fmt.Sprintf("networkdeviceparameterlist[%d].value", i), vv) + i++ + } + } + if v, found := p.p["networkdevicetype"]; found { + u.Set("networkdevicetype", v.(string)) + } + return u +} + +func (p *AddNetworkDeviceParams) SetNetworkdeviceparameterlist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdeviceparameterlist"] = v + return +} + +func (p *AddNetworkDeviceParams) SetNetworkdevicetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdevicetype"] = v + return +} + +// You should always use this function to get a new AddNetworkDeviceParams instance, +// as then you are sure you have configured all required params +func (s *NetworkDeviceService) NewAddNetworkDeviceParams() *AddNetworkDeviceParams { + p := &AddNetworkDeviceParams{} + p.p = make(map[string]interface{}) + return p +} + +// Adds a network device of one of the following types: ExternalDhcp, ExternalFirewall, ExternalLoadBalancer, PxeServer +func (s *NetworkDeviceService) AddNetworkDevice(p *AddNetworkDeviceParams) (*AddNetworkDeviceResponse, error) { + resp, err := s.cs.newRequest("addNetworkDevice", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddNetworkDeviceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddNetworkDeviceResponse struct { + Id string `json:"id,omitempty"` +} + +type ListNetworkDeviceParams struct { + p map[string]interface{} +} + +func (p *ListNetworkDeviceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["networkdeviceparameterlist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("networkdeviceparameterlist[%d].key", i), k) + u.Set(fmt.Sprintf("networkdeviceparameterlist[%d].value", i), vv) + i++ + } + } + if v, found := p.p["networkdevicetype"]; found { + u.Set("networkdevicetype", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListNetworkDeviceParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetworkDeviceParams) SetNetworkdeviceparameterlist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdeviceparameterlist"] = v + return +} + +func (p *ListNetworkDeviceParams) SetNetworkdevicetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdevicetype"] = v + return +} + +func (p *ListNetworkDeviceParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetworkDeviceParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListNetworkDeviceParams instance, +// as then you are sure you have configured all required params +func (s *NetworkDeviceService) NewListNetworkDeviceParams() *ListNetworkDeviceParams { + p := &ListNetworkDeviceParams{} + p.p = make(map[string]interface{}) + return p +} + +// List network devices +func (s *NetworkDeviceService) ListNetworkDevice(p *ListNetworkDeviceParams) (*ListNetworkDeviceResponse, error) { + resp, err := s.cs.newRequest("listNetworkDevice", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetworkDeviceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetworkDeviceResponse struct { + Count int `json:"count"` + NetworkDevice []*NetworkDevice `json:"networkdevice"` +} + +type NetworkDevice struct { + Id string `json:"id,omitempty"` +} + +type DeleteNetworkDeviceParams struct { + p map[string]interface{} +} + +func (p *DeleteNetworkDeviceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteNetworkDeviceParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteNetworkDeviceParams instance, +// as then you are sure you have configured all required params +func (s *NetworkDeviceService) NewDeleteNetworkDeviceParams(id string) *DeleteNetworkDeviceParams { + p := &DeleteNetworkDeviceParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes network device. +func (s *NetworkDeviceService) DeleteNetworkDevice(p *DeleteNetworkDeviceParams) (*DeleteNetworkDeviceResponse, error) { + resp, err := s.cs.newRequest("deleteNetworkDevice", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNetworkDeviceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteNetworkDeviceResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkOfferingService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkOfferingService.go new file mode 100644 index 000000000..0e57e8a7d --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkOfferingService.go @@ -0,0 +1,939 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateNetworkOfferingParams struct { + p map[string]interface{} +} + +func (p *CreateNetworkOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["availability"]; found { + u.Set("availability", v.(string)) + } + if v, found := p.p["conservemode"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("conservemode", vv) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["egressdefaultpolicy"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("egressdefaultpolicy", vv) + } + if v, found := p.p["guestiptype"]; found { + u.Set("guestiptype", v.(string)) + } + if v, found := p.p["ispersistent"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispersistent", vv) + } + if v, found := p.p["keepaliveenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("keepaliveenabled", vv) + } + if v, found := p.p["maxconnections"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("maxconnections", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkrate"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("networkrate", vv) + } + if v, found := p.p["servicecapabilitylist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("servicecapabilitylist[%d].key", i), k) + u.Set(fmt.Sprintf("servicecapabilitylist[%d].value", i), vv) + i++ + } + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + if v, found := p.p["serviceproviderlist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("serviceproviderlist[%d].service", i), k) + u.Set(fmt.Sprintf("serviceproviderlist[%d].provider", i), vv) + i++ + } + } + if v, found := p.p["specifyipranges"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("specifyipranges", vv) + } + if v, found := p.p["specifyvlan"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("specifyvlan", vv) + } + if v, found := p.p["supportedservices"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("supportedservices", vv) + } + if v, found := p.p["tags"]; found { + u.Set("tags", v.(string)) + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + return u +} + +func (p *CreateNetworkOfferingParams) SetAvailability(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["availability"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetConservemode(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["conservemode"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetEgressdefaultpolicy(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["egressdefaultpolicy"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetGuestiptype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestiptype"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetIspersistent(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispersistent"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetKeepaliveenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keepaliveenabled"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetMaxconnections(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxconnections"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetNetworkrate(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkrate"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetServicecapabilitylist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["servicecapabilitylist"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetServiceproviderlist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceproviderlist"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetSpecifyipranges(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["specifyipranges"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetSpecifyvlan(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["specifyvlan"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetSupportedservices(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["supportedservices"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetTags(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *CreateNetworkOfferingParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +// You should always use this function to get a new CreateNetworkOfferingParams instance, +// as then you are sure you have configured all required params +func (s *NetworkOfferingService) NewCreateNetworkOfferingParams(displaytext string, guestiptype string, name string, supportedservices []string, traffictype string) *CreateNetworkOfferingParams { + p := &CreateNetworkOfferingParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["guestiptype"] = guestiptype + p.p["name"] = name + p.p["supportedservices"] = supportedservices + p.p["traffictype"] = traffictype + return p +} + +// Creates a network offering. +func (s *NetworkOfferingService) CreateNetworkOffering(p *CreateNetworkOfferingParams) (*CreateNetworkOfferingResponse, error) { + resp, err := s.cs.newRequest("createNetworkOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + if resp, err = getRawValue(resp); err != nil { + return nil, err + } + + var r CreateNetworkOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateNetworkOfferingResponse struct { + Availability string `json:"availability,omitempty"` + Conservemode bool `json:"conservemode,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Egressdefaultpolicy bool `json:"egressdefaultpolicy,omitempty"` + Forvpc bool `json:"forvpc,omitempty"` + Guestiptype string `json:"guestiptype,omitempty"` + Id string `json:"id,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Maxconnections int `json:"maxconnections,omitempty"` + Name string `json:"name,omitempty"` + Networkrate int `json:"networkrate,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + Specifyvlan bool `json:"specifyvlan,omitempty"` + State string `json:"state,omitempty"` + Supportsstrechedl2subnet bool `json:"supportsstrechedl2subnet,omitempty"` + Tags string `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` +} + +type UpdateNetworkOfferingParams struct { + p map[string]interface{} +} + +func (p *UpdateNetworkOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["availability"]; found { + u.Set("availability", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keepaliveenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("keepaliveenabled", vv) + } + if v, found := p.p["maxconnections"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("maxconnections", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["sortkey"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("sortkey", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + return u +} + +func (p *UpdateNetworkOfferingParams) SetAvailability(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["availability"] = v + return +} + +func (p *UpdateNetworkOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateNetworkOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateNetworkOfferingParams) SetKeepaliveenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keepaliveenabled"] = v + return +} + +func (p *UpdateNetworkOfferingParams) SetMaxconnections(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxconnections"] = v + return +} + +func (p *UpdateNetworkOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateNetworkOfferingParams) SetSortkey(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sortkey"] = v + return +} + +func (p *UpdateNetworkOfferingParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +// You should always use this function to get a new UpdateNetworkOfferingParams instance, +// as then you are sure you have configured all required params +func (s *NetworkOfferingService) NewUpdateNetworkOfferingParams() *UpdateNetworkOfferingParams { + p := &UpdateNetworkOfferingParams{} + p.p = make(map[string]interface{}) + return p +} + +// Updates a network offering. +func (s *NetworkOfferingService) UpdateNetworkOffering(p *UpdateNetworkOfferingParams) (*UpdateNetworkOfferingResponse, error) { + resp, err := s.cs.newRequest("updateNetworkOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateNetworkOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateNetworkOfferingResponse struct { + Availability string `json:"availability,omitempty"` + Conservemode bool `json:"conservemode,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Egressdefaultpolicy bool `json:"egressdefaultpolicy,omitempty"` + Forvpc bool `json:"forvpc,omitempty"` + Guestiptype string `json:"guestiptype,omitempty"` + Id string `json:"id,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Maxconnections int `json:"maxconnections,omitempty"` + Name string `json:"name,omitempty"` + Networkrate int `json:"networkrate,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + Specifyvlan bool `json:"specifyvlan,omitempty"` + State string `json:"state,omitempty"` + Supportsstrechedl2subnet bool `json:"supportsstrechedl2subnet,omitempty"` + Tags string `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` +} + +type DeleteNetworkOfferingParams struct { + p map[string]interface{} +} + +func (p *DeleteNetworkOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteNetworkOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteNetworkOfferingParams instance, +// as then you are sure you have configured all required params +func (s *NetworkOfferingService) NewDeleteNetworkOfferingParams(id string) *DeleteNetworkOfferingParams { + p := &DeleteNetworkOfferingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a network offering. +func (s *NetworkOfferingService) DeleteNetworkOffering(p *DeleteNetworkOfferingParams) (*DeleteNetworkOfferingResponse, error) { + resp, err := s.cs.newRequest("deleteNetworkOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNetworkOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteNetworkOfferingResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListNetworkOfferingsParams struct { + p map[string]interface{} +} + +func (p *ListNetworkOfferingsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["availability"]; found { + u.Set("availability", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["forvpc"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvpc", vv) + } + if v, found := p.p["guestiptype"]; found { + u.Set("guestiptype", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isdefault"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdefault", vv) + } + if v, found := p.p["istagged"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("istagged", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["sourcenatsupported"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("sourcenatsupported", vv) + } + if v, found := p.p["specifyipranges"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("specifyipranges", vv) + } + if v, found := p.p["specifyvlan"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("specifyvlan", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["supportedservices"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("supportedservices", vv) + } + if v, found := p.p["tags"]; found { + u.Set("tags", v.(string)) + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListNetworkOfferingsParams) SetAvailability(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["availability"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetForvpc(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvpc"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetGuestiptype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestiptype"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetIsdefault(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdefault"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetIstagged(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["istagged"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetSourcenatsupported(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourcenatsupported"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetSpecifyipranges(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["specifyipranges"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetSpecifyvlan(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["specifyvlan"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetSupportedservices(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["supportedservices"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetTags(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +func (p *ListNetworkOfferingsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListNetworkOfferingsParams instance, +// as then you are sure you have configured all required params +func (s *NetworkOfferingService) NewListNetworkOfferingsParams() *ListNetworkOfferingsParams { + p := &ListNetworkOfferingsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkOfferingService) GetNetworkOfferingID(name string) (string, error) { + p := &ListNetworkOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListNetworkOfferings(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.NetworkOfferings[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.NetworkOfferings { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkOfferingService) GetNetworkOfferingByName(name string) (*NetworkOffering, int, error) { + id, err := s.GetNetworkOfferingID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetNetworkOfferingByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkOfferingService) GetNetworkOfferingByID(id string) (*NetworkOffering, int, error) { + p := &ListNetworkOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListNetworkOfferings(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.NetworkOfferings[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for NetworkOffering UUID: %s!", id) +} + +// Lists all available network offerings. +func (s *NetworkOfferingService) ListNetworkOfferings(p *ListNetworkOfferingsParams) (*ListNetworkOfferingsResponse, error) { + resp, err := s.cs.newRequest("listNetworkOfferings", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetworkOfferingsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetworkOfferingsResponse struct { + Count int `json:"count"` + NetworkOfferings []*NetworkOffering `json:"networkoffering"` +} + +type NetworkOffering struct { + Availability string `json:"availability,omitempty"` + Conservemode bool `json:"conservemode,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Egressdefaultpolicy bool `json:"egressdefaultpolicy,omitempty"` + Forvpc bool `json:"forvpc,omitempty"` + Guestiptype string `json:"guestiptype,omitempty"` + Id string `json:"id,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Maxconnections int `json:"maxconnections,omitempty"` + Name string `json:"name,omitempty"` + Networkrate int `json:"networkrate,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + Specifyvlan bool `json:"specifyvlan,omitempty"` + State string `json:"state,omitempty"` + Supportsstrechedl2subnet bool `json:"supportsstrechedl2subnet,omitempty"` + Tags string `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkService.go new file mode 100644 index 000000000..1e9bda43c --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NetworkService.go @@ -0,0 +1,3711 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type DedicatePublicIpRangeParams struct { + p map[string]interface{} +} + +func (p *DedicatePublicIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *DedicatePublicIpRangeParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DedicatePublicIpRangeParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DedicatePublicIpRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DedicatePublicIpRangeParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new DedicatePublicIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewDedicatePublicIpRangeParams(account string, domainid string, id string) *DedicatePublicIpRangeParams { + p := &DedicatePublicIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["account"] = account + p.p["domainid"] = domainid + p.p["id"] = id + return p +} + +// Dedicates a Public IP range to an account +func (s *NetworkService) DedicatePublicIpRange(p *DedicatePublicIpRangeParams) (*DedicatePublicIpRangeResponse, error) { + resp, err := s.cs.newRequest("dedicatePublicIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DedicatePublicIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DedicatePublicIpRangeResponse struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Endip string `json:"endip,omitempty"` + Endipv6 string `json:"endipv6,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Startip string `json:"startip,omitempty"` + Startipv6 string `json:"startipv6,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ReleasePublicIpRangeParams struct { + p map[string]interface{} +} + +func (p *ReleasePublicIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ReleasePublicIpRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ReleasePublicIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewReleasePublicIpRangeParams(id string) *ReleasePublicIpRangeParams { + p := &ReleasePublicIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Releases a Public IP range back to the system pool +func (s *NetworkService) ReleasePublicIpRange(p *ReleasePublicIpRangeParams) (*ReleasePublicIpRangeResponse, error) { + resp, err := s.cs.newRequest("releasePublicIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReleasePublicIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ReleasePublicIpRangeResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type CreateNetworkParams struct { + p map[string]interface{} +} + +func (p *CreateNetworkParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["aclid"]; found { + u.Set("aclid", v.(string)) + } + if v, found := p.p["acltype"]; found { + u.Set("acltype", v.(string)) + } + if v, found := p.p["displaynetwork"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displaynetwork", vv) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["endip"]; found { + u.Set("endip", v.(string)) + } + if v, found := p.p["endipv6"]; found { + u.Set("endipv6", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["ip6cidr"]; found { + u.Set("ip6cidr", v.(string)) + } + if v, found := p.p["ip6gateway"]; found { + u.Set("ip6gateway", v.(string)) + } + if v, found := p.p["isolatedpvlan"]; found { + u.Set("isolatedpvlan", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + if v, found := p.p["networkofferingid"]; found { + u.Set("networkofferingid", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["startip"]; found { + u.Set("startip", v.(string)) + } + if v, found := p.p["startipv6"]; found { + u.Set("startipv6", v.(string)) + } + if v, found := p.p["subdomainaccess"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("subdomainaccess", vv) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateNetworkParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateNetworkParams) SetAclid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["aclid"] = v + return +} + +func (p *CreateNetworkParams) SetAcltype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["acltype"] = v + return +} + +func (p *CreateNetworkParams) SetDisplaynetwork(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaynetwork"] = v + return +} + +func (p *CreateNetworkParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateNetworkParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateNetworkParams) SetEndip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endip"] = v + return +} + +func (p *CreateNetworkParams) SetEndipv6(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endipv6"] = v + return +} + +func (p *CreateNetworkParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *CreateNetworkParams) SetIp6cidr(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6cidr"] = v + return +} + +func (p *CreateNetworkParams) SetIp6gateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6gateway"] = v + return +} + +func (p *CreateNetworkParams) SetIsolatedpvlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isolatedpvlan"] = v + return +} + +func (p *CreateNetworkParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateNetworkParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *CreateNetworkParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +func (p *CreateNetworkParams) SetNetworkofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkofferingid"] = v + return +} + +func (p *CreateNetworkParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *CreateNetworkParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *CreateNetworkParams) SetStartip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startip"] = v + return +} + +func (p *CreateNetworkParams) SetStartipv6(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startipv6"] = v + return +} + +func (p *CreateNetworkParams) SetSubdomainaccess(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["subdomainaccess"] = v + return +} + +func (p *CreateNetworkParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +func (p *CreateNetworkParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +func (p *CreateNetworkParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateNetworkParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewCreateNetworkParams(displaytext string, name string, networkofferingid string, zoneid string) *CreateNetworkParams { + p := &CreateNetworkParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["name"] = name + p.p["networkofferingid"] = networkofferingid + p.p["zoneid"] = zoneid + return p +} + +// Creates a network +func (s *NetworkService) CreateNetwork(p *CreateNetworkParams) (*CreateNetworkResponse, error) { + resp, err := s.cs.newRequest("createNetwork", p.toURLValues()) + if err != nil { + return nil, err + } + + if resp, err = getRawValue(resp); err != nil { + return nil, err + } + + var r CreateNetworkResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateNetworkResponse struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` +} + +type DeleteNetworkParams struct { + p map[string]interface{} +} + +func (p *DeleteNetworkParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["forced"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forced", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteNetworkParams) SetForced(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forced"] = v + return +} + +func (p *DeleteNetworkParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteNetworkParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewDeleteNetworkParams(id string) *DeleteNetworkParams { + p := &DeleteNetworkParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a network +func (s *NetworkService) DeleteNetwork(p *DeleteNetworkParams) (*DeleteNetworkResponse, error) { + resp, err := s.cs.newRequest("deleteNetwork", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNetworkResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteNetworkResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListNetworksParams struct { + p map[string]interface{} +} + +func (p *ListNetworksParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["acltype"]; found { + u.Set("acltype", v.(string)) + } + if v, found := p.p["canusefordeploy"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("canusefordeploy", vv) + } + if v, found := p.p["displaynetwork"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displaynetwork", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["forvpc"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvpc", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["issystem"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("issystem", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["restartrequired"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("restartrequired", vv) + } + if v, found := p.p["specifyipranges"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("specifyipranges", vv) + } + if v, found := p.p["supportedservices"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("supportedservices", vv) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListNetworksParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListNetworksParams) SetAcltype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["acltype"] = v + return +} + +func (p *ListNetworksParams) SetCanusefordeploy(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["canusefordeploy"] = v + return +} + +func (p *ListNetworksParams) SetDisplaynetwork(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaynetwork"] = v + return +} + +func (p *ListNetworksParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListNetworksParams) SetForvpc(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvpc"] = v + return +} + +func (p *ListNetworksParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListNetworksParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListNetworksParams) SetIssystem(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["issystem"] = v + return +} + +func (p *ListNetworksParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetworksParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListNetworksParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetworksParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNetworksParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *ListNetworksParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListNetworksParams) SetRestartrequired(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["restartrequired"] = v + return +} + +func (p *ListNetworksParams) SetSpecifyipranges(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["specifyipranges"] = v + return +} + +func (p *ListNetworksParams) SetSupportedservices(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["supportedservices"] = v + return +} + +func (p *ListNetworksParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListNetworksParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +func (p *ListNetworksParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkType"] = v + return +} + +func (p *ListNetworksParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +func (p *ListNetworksParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListNetworksParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListNetworksParams() *ListNetworksParams { + p := &ListNetworksParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetNetworkID(keyword string) (string, error) { + p := &ListNetworksParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + + l, err := s.ListNetworks(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListNetworks(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.Networks[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Networks { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetNetworkByName(name string) (*Network, int, error) { + id, err := s.GetNetworkID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetNetworkByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetNetworkByID(id string) (*Network, int, error) { + p := &ListNetworksParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListNetworks(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListNetworks(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Networks[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Network UUID: %s!", id) +} + +// Lists all available networks. +func (s *NetworkService) ListNetworks(p *ListNetworksParams) (*ListNetworksResponse, error) { + resp, err := s.cs.newRequest("listNetworks", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetworksResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetworksResponse struct { + Count int `json:"count"` + Networks []*Network `json:"network"` +} + +type Network struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` +} + +type RestartNetworkParams struct { + p map[string]interface{} +} + +func (p *RestartNetworkParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["cleanup"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("cleanup", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RestartNetworkParams) SetCleanup(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cleanup"] = v + return +} + +func (p *RestartNetworkParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RestartNetworkParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewRestartNetworkParams(id string) *RestartNetworkParams { + p := &RestartNetworkParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Restarts the network; includes 1) restarting network elements - virtual routers, dhcp servers 2) reapplying all public ips 3) reapplying loadBalancing/portForwarding rules +func (s *NetworkService) RestartNetwork(p *RestartNetworkParams) (*RestartNetworkResponse, error) { + resp, err := s.cs.newRequest("restartNetwork", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RestartNetworkResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RestartNetworkResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Allocated string `json:"allocated,omitempty"` + Associatednetworkid string `json:"associatednetworkid,omitempty"` + Associatednetworkname string `json:"associatednetworkname,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isportable bool `json:"isportable,omitempty"` + Issourcenat bool `json:"issourcenat,omitempty"` + Isstaticnat bool `json:"isstaticnat,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Purpose string `json:"purpose,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualmachinename string `json:"virtualmachinename,omitempty"` + Vlanid string `json:"vlanid,omitempty"` + Vlanname string `json:"vlanname,omitempty"` + Vmipaddress string `json:"vmipaddress,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateNetworkParams struct { + p map[string]interface{} +} + +func (p *UpdateNetworkParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["changecidr"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("changecidr", vv) + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["displaynetwork"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displaynetwork", vv) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["guestvmcidr"]; found { + u.Set("guestvmcidr", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + if v, found := p.p["networkofferingid"]; found { + u.Set("networkofferingid", v.(string)) + } + return u +} + +func (p *UpdateNetworkParams) SetChangecidr(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["changecidr"] = v + return +} + +func (p *UpdateNetworkParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateNetworkParams) SetDisplaynetwork(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaynetwork"] = v + return +} + +func (p *UpdateNetworkParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateNetworkParams) SetGuestvmcidr(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestvmcidr"] = v + return +} + +func (p *UpdateNetworkParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateNetworkParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateNetworkParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +func (p *UpdateNetworkParams) SetNetworkofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkofferingid"] = v + return +} + +// You should always use this function to get a new UpdateNetworkParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewUpdateNetworkParams(id string) *UpdateNetworkParams { + p := &UpdateNetworkParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a network +func (s *NetworkService) UpdateNetwork(p *UpdateNetworkParams) (*UpdateNetworkResponse, error) { + resp, err := s.cs.newRequest("updateNetwork", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateNetworkResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateNetworkResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` +} + +type CreatePhysicalNetworkParams struct { + p map[string]interface{} +} + +func (p *CreatePhysicalNetworkParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["broadcastdomainrange"]; found { + u.Set("broadcastdomainrange", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["isolationmethods"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("isolationmethods", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkspeed"]; found { + u.Set("networkspeed", v.(string)) + } + if v, found := p.p["tags"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("tags", vv) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreatePhysicalNetworkParams) SetBroadcastdomainrange(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["broadcastdomainrange"] = v + return +} + +func (p *CreatePhysicalNetworkParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreatePhysicalNetworkParams) SetIsolationmethods(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isolationmethods"] = v + return +} + +func (p *CreatePhysicalNetworkParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreatePhysicalNetworkParams) SetNetworkspeed(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkspeed"] = v + return +} + +func (p *CreatePhysicalNetworkParams) SetTags(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *CreatePhysicalNetworkParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +func (p *CreatePhysicalNetworkParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreatePhysicalNetworkParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewCreatePhysicalNetworkParams(name string, zoneid string) *CreatePhysicalNetworkParams { + p := &CreatePhysicalNetworkParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["zoneid"] = zoneid + return p +} + +// Creates a physical network +func (s *NetworkService) CreatePhysicalNetwork(p *CreatePhysicalNetworkParams) (*CreatePhysicalNetworkResponse, error) { + resp, err := s.cs.newRequest("createPhysicalNetwork", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreatePhysicalNetworkResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreatePhysicalNetworkResponse struct { + JobID string `json:"jobid,omitempty"` + Broadcastdomainrange string `json:"broadcastdomainrange,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Isolationmethods string `json:"isolationmethods,omitempty"` + Name string `json:"name,omitempty"` + Networkspeed string `json:"networkspeed,omitempty"` + State string `json:"state,omitempty"` + Tags string `json:"tags,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeletePhysicalNetworkParams struct { + p map[string]interface{} +} + +func (p *DeletePhysicalNetworkParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeletePhysicalNetworkParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeletePhysicalNetworkParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewDeletePhysicalNetworkParams(id string) *DeletePhysicalNetworkParams { + p := &DeletePhysicalNetworkParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a Physical Network. +func (s *NetworkService) DeletePhysicalNetwork(p *DeletePhysicalNetworkParams) (*DeletePhysicalNetworkResponse, error) { + resp, err := s.cs.newRequest("deletePhysicalNetwork", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeletePhysicalNetworkResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeletePhysicalNetworkResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListPhysicalNetworksParams struct { + p map[string]interface{} +} + +func (p *ListPhysicalNetworksParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListPhysicalNetworksParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListPhysicalNetworksParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPhysicalNetworksParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListPhysicalNetworksParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPhysicalNetworksParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListPhysicalNetworksParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListPhysicalNetworksParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListPhysicalNetworksParams() *ListPhysicalNetworksParams { + p := &ListPhysicalNetworksParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetPhysicalNetworkID(name string) (string, error) { + p := &ListPhysicalNetworksParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListPhysicalNetworks(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.PhysicalNetworks[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.PhysicalNetworks { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetPhysicalNetworkByName(name string) (*PhysicalNetwork, int, error) { + id, err := s.GetPhysicalNetworkID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetPhysicalNetworkByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetPhysicalNetworkByID(id string) (*PhysicalNetwork, int, error) { + p := &ListPhysicalNetworksParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListPhysicalNetworks(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.PhysicalNetworks[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for PhysicalNetwork UUID: %s!", id) +} + +// Lists physical networks +func (s *NetworkService) ListPhysicalNetworks(p *ListPhysicalNetworksParams) (*ListPhysicalNetworksResponse, error) { + resp, err := s.cs.newRequest("listPhysicalNetworks", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPhysicalNetworksResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPhysicalNetworksResponse struct { + Count int `json:"count"` + PhysicalNetworks []*PhysicalNetwork `json:"physicalnetwork"` +} + +type PhysicalNetwork struct { + Broadcastdomainrange string `json:"broadcastdomainrange,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Isolationmethods string `json:"isolationmethods,omitempty"` + Name string `json:"name,omitempty"` + Networkspeed string `json:"networkspeed,omitempty"` + State string `json:"state,omitempty"` + Tags string `json:"tags,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type UpdatePhysicalNetworkParams struct { + p map[string]interface{} +} + +func (p *UpdatePhysicalNetworkParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["networkspeed"]; found { + u.Set("networkspeed", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["tags"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("tags", vv) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + return u +} + +func (p *UpdatePhysicalNetworkParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdatePhysicalNetworkParams) SetNetworkspeed(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkspeed"] = v + return +} + +func (p *UpdatePhysicalNetworkParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *UpdatePhysicalNetworkParams) SetTags(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *UpdatePhysicalNetworkParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +// You should always use this function to get a new UpdatePhysicalNetworkParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewUpdatePhysicalNetworkParams(id string) *UpdatePhysicalNetworkParams { + p := &UpdatePhysicalNetworkParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a physical network +func (s *NetworkService) UpdatePhysicalNetwork(p *UpdatePhysicalNetworkParams) (*UpdatePhysicalNetworkResponse, error) { + resp, err := s.cs.newRequest("updatePhysicalNetwork", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdatePhysicalNetworkResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdatePhysicalNetworkResponse struct { + JobID string `json:"jobid,omitempty"` + Broadcastdomainrange string `json:"broadcastdomainrange,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Isolationmethods string `json:"isolationmethods,omitempty"` + Name string `json:"name,omitempty"` + Networkspeed string `json:"networkspeed,omitempty"` + State string `json:"state,omitempty"` + Tags string `json:"tags,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListSupportedNetworkServicesParams struct { + p map[string]interface{} +} + +func (p *ListSupportedNetworkServicesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["provider"]; found { + u.Set("provider", v.(string)) + } + if v, found := p.p["service"]; found { + u.Set("service", v.(string)) + } + return u +} + +func (p *ListSupportedNetworkServicesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSupportedNetworkServicesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSupportedNetworkServicesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListSupportedNetworkServicesParams) SetProvider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["provider"] = v + return +} + +func (p *ListSupportedNetworkServicesParams) SetService(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["service"] = v + return +} + +// You should always use this function to get a new ListSupportedNetworkServicesParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListSupportedNetworkServicesParams() *ListSupportedNetworkServicesParams { + p := &ListSupportedNetworkServicesParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists all network services provided by CloudStack or for the given Provider. +func (s *NetworkService) ListSupportedNetworkServices(p *ListSupportedNetworkServicesParams) (*ListSupportedNetworkServicesResponse, error) { + resp, err := s.cs.newRequest("listSupportedNetworkServices", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSupportedNetworkServicesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSupportedNetworkServicesResponse struct { + Count int `json:"count"` + SupportedNetworkServices []*SupportedNetworkService `json:"supportednetworkservice"` +} + +type SupportedNetworkService struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` +} + +type AddNetworkServiceProviderParams struct { + p map[string]interface{} +} + +func (p *AddNetworkServiceProviderParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["destinationphysicalnetworkid"]; found { + u.Set("destinationphysicalnetworkid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["servicelist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("servicelist", vv) + } + return u +} + +func (p *AddNetworkServiceProviderParams) SetDestinationphysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["destinationphysicalnetworkid"] = v + return +} + +func (p *AddNetworkServiceProviderParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *AddNetworkServiceProviderParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddNetworkServiceProviderParams) SetServicelist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["servicelist"] = v + return +} + +// You should always use this function to get a new AddNetworkServiceProviderParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewAddNetworkServiceProviderParams(name string, physicalnetworkid string) *AddNetworkServiceProviderParams { + p := &AddNetworkServiceProviderParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["physicalnetworkid"] = physicalnetworkid + return p +} + +// Adds a network serviceProvider to a physical network +func (s *NetworkService) AddNetworkServiceProvider(p *AddNetworkServiceProviderParams) (*AddNetworkServiceProviderResponse, error) { + resp, err := s.cs.newRequest("addNetworkServiceProvider", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddNetworkServiceProviderResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddNetworkServiceProviderResponse struct { + JobID string `json:"jobid,omitempty"` + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` +} + +type DeleteNetworkServiceProviderParams struct { + p map[string]interface{} +} + +func (p *DeleteNetworkServiceProviderParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteNetworkServiceProviderParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteNetworkServiceProviderParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewDeleteNetworkServiceProviderParams(id string) *DeleteNetworkServiceProviderParams { + p := &DeleteNetworkServiceProviderParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a Network Service Provider. +func (s *NetworkService) DeleteNetworkServiceProvider(p *DeleteNetworkServiceProviderParams) (*DeleteNetworkServiceProviderResponse, error) { + resp, err := s.cs.newRequest("deleteNetworkServiceProvider", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNetworkServiceProviderResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteNetworkServiceProviderResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListNetworkServiceProvidersParams struct { + p map[string]interface{} +} + +func (p *ListNetworkServiceProvidersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + return u +} + +func (p *ListNetworkServiceProvidersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetworkServiceProvidersParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListNetworkServiceProvidersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetworkServiceProvidersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNetworkServiceProvidersParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *ListNetworkServiceProvidersParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +// You should always use this function to get a new ListNetworkServiceProvidersParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListNetworkServiceProvidersParams() *ListNetworkServiceProvidersParams { + p := &ListNetworkServiceProvidersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetNetworkServiceProviderID(name string) (string, error) { + p := &ListNetworkServiceProvidersParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListNetworkServiceProviders(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.NetworkServiceProviders[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.NetworkServiceProviders { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// Lists network serviceproviders for a given physical network. +func (s *NetworkService) ListNetworkServiceProviders(p *ListNetworkServiceProvidersParams) (*ListNetworkServiceProvidersResponse, error) { + resp, err := s.cs.newRequest("listNetworkServiceProviders", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetworkServiceProvidersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetworkServiceProvidersResponse struct { + Count int `json:"count"` + NetworkServiceProviders []*NetworkServiceProvider `json:"networkserviceprovider"` +} + +type NetworkServiceProvider struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` +} + +type UpdateNetworkServiceProviderParams struct { + p map[string]interface{} +} + +func (p *UpdateNetworkServiceProviderParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["servicelist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("servicelist", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + return u +} + +func (p *UpdateNetworkServiceProviderParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateNetworkServiceProviderParams) SetServicelist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["servicelist"] = v + return +} + +func (p *UpdateNetworkServiceProviderParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +// You should always use this function to get a new UpdateNetworkServiceProviderParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewUpdateNetworkServiceProviderParams(id string) *UpdateNetworkServiceProviderParams { + p := &UpdateNetworkServiceProviderParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a network serviceProvider of a physical network +func (s *NetworkService) UpdateNetworkServiceProvider(p *UpdateNetworkServiceProviderParams) (*UpdateNetworkServiceProviderResponse, error) { + resp, err := s.cs.newRequest("updateNetworkServiceProvider", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateNetworkServiceProviderResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateNetworkServiceProviderResponse struct { + JobID string `json:"jobid,omitempty"` + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` +} + +type CreateStorageNetworkIpRangeParams struct { + p map[string]interface{} +} + +func (p *CreateStorageNetworkIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["endip"]; found { + u.Set("endip", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["startip"]; found { + u.Set("startip", v.(string)) + } + if v, found := p.p["vlan"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("vlan", vv) + } + return u +} + +func (p *CreateStorageNetworkIpRangeParams) SetEndip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endip"] = v + return +} + +func (p *CreateStorageNetworkIpRangeParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *CreateStorageNetworkIpRangeParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *CreateStorageNetworkIpRangeParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *CreateStorageNetworkIpRangeParams) SetStartip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startip"] = v + return +} + +func (p *CreateStorageNetworkIpRangeParams) SetVlan(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +// You should always use this function to get a new CreateStorageNetworkIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewCreateStorageNetworkIpRangeParams(gateway string, netmask string, podid string, startip string) *CreateStorageNetworkIpRangeParams { + p := &CreateStorageNetworkIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["gateway"] = gateway + p.p["netmask"] = netmask + p.p["podid"] = podid + p.p["startip"] = startip + return p +} + +// Creates a Storage network IP range. +func (s *NetworkService) CreateStorageNetworkIpRange(p *CreateStorageNetworkIpRangeParams) (*CreateStorageNetworkIpRangeResponse, error) { + resp, err := s.cs.newRequest("createStorageNetworkIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateStorageNetworkIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateStorageNetworkIpRangeResponse struct { + JobID string `json:"jobid,omitempty"` + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Podid string `json:"podid,omitempty"` + Startip string `json:"startip,omitempty"` + Vlan int `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteStorageNetworkIpRangeParams struct { + p map[string]interface{} +} + +func (p *DeleteStorageNetworkIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteStorageNetworkIpRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteStorageNetworkIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewDeleteStorageNetworkIpRangeParams(id string) *DeleteStorageNetworkIpRangeParams { + p := &DeleteStorageNetworkIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a storage network IP Range. +func (s *NetworkService) DeleteStorageNetworkIpRange(p *DeleteStorageNetworkIpRangeParams) (*DeleteStorageNetworkIpRangeResponse, error) { + resp, err := s.cs.newRequest("deleteStorageNetworkIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteStorageNetworkIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteStorageNetworkIpRangeResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListStorageNetworkIpRangeParams struct { + p map[string]interface{} +} + +func (p *ListStorageNetworkIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListStorageNetworkIpRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListStorageNetworkIpRangeParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListStorageNetworkIpRangeParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListStorageNetworkIpRangeParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListStorageNetworkIpRangeParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListStorageNetworkIpRangeParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListStorageNetworkIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListStorageNetworkIpRangeParams() *ListStorageNetworkIpRangeParams { + p := &ListStorageNetworkIpRangeParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetStorageNetworkIpRangeByID(id string) (*StorageNetworkIpRange, int, error) { + p := &ListStorageNetworkIpRangeParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListStorageNetworkIpRange(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.StorageNetworkIpRange[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for StorageNetworkIpRange UUID: %s!", id) +} + +// List a storage network IP range. +func (s *NetworkService) ListStorageNetworkIpRange(p *ListStorageNetworkIpRangeParams) (*ListStorageNetworkIpRangeResponse, error) { + resp, err := s.cs.newRequest("listStorageNetworkIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListStorageNetworkIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListStorageNetworkIpRangeResponse struct { + Count int `json:"count"` + StorageNetworkIpRange []*StorageNetworkIpRange `json:"storagenetworkiprange"` +} + +type StorageNetworkIpRange struct { + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Podid string `json:"podid,omitempty"` + Startip string `json:"startip,omitempty"` + Vlan int `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type UpdateStorageNetworkIpRangeParams struct { + p map[string]interface{} +} + +func (p *UpdateStorageNetworkIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["endip"]; found { + u.Set("endip", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["startip"]; found { + u.Set("startip", v.(string)) + } + if v, found := p.p["vlan"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("vlan", vv) + } + return u +} + +func (p *UpdateStorageNetworkIpRangeParams) SetEndip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endip"] = v + return +} + +func (p *UpdateStorageNetworkIpRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateStorageNetworkIpRangeParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *UpdateStorageNetworkIpRangeParams) SetStartip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startip"] = v + return +} + +func (p *UpdateStorageNetworkIpRangeParams) SetVlan(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +// You should always use this function to get a new UpdateStorageNetworkIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewUpdateStorageNetworkIpRangeParams(id string) *UpdateStorageNetworkIpRangeParams { + p := &UpdateStorageNetworkIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Update a Storage network IP range, only allowed when no IPs in this range have been allocated. +func (s *NetworkService) UpdateStorageNetworkIpRange(p *UpdateStorageNetworkIpRangeParams) (*UpdateStorageNetworkIpRangeResponse, error) { + resp, err := s.cs.newRequest("updateStorageNetworkIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateStorageNetworkIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateStorageNetworkIpRangeResponse struct { + JobID string `json:"jobid,omitempty"` + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Podid string `json:"podid,omitempty"` + Startip string `json:"startip,omitempty"` + Vlan int `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListPaloAltoFirewallNetworksParams struct { + p map[string]interface{} +} + +func (p *ListPaloAltoFirewallNetworksParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["lbdeviceid"]; found { + u.Set("lbdeviceid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListPaloAltoFirewallNetworksParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPaloAltoFirewallNetworksParams) SetLbdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbdeviceid"] = v + return +} + +func (p *ListPaloAltoFirewallNetworksParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPaloAltoFirewallNetworksParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListPaloAltoFirewallNetworksParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListPaloAltoFirewallNetworksParams(lbdeviceid string) *ListPaloAltoFirewallNetworksParams { + p := &ListPaloAltoFirewallNetworksParams{} + p.p = make(map[string]interface{}) + p.p["lbdeviceid"] = lbdeviceid + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetPaloAltoFirewallNetworkID(keyword string, lbdeviceid string) (string, error) { + p := &ListPaloAltoFirewallNetworksParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + p.p["lbdeviceid"] = lbdeviceid + + l, err := s.ListPaloAltoFirewallNetworks(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.PaloAltoFirewallNetworks[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.PaloAltoFirewallNetworks { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// lists network that are using Palo Alto firewall device +func (s *NetworkService) ListPaloAltoFirewallNetworks(p *ListPaloAltoFirewallNetworksParams) (*ListPaloAltoFirewallNetworksResponse, error) { + resp, err := s.cs.newRequest("listPaloAltoFirewallNetworks", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPaloAltoFirewallNetworksResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPaloAltoFirewallNetworksResponse struct { + Count int `json:"count"` + PaloAltoFirewallNetworks []*PaloAltoFirewallNetwork `json:"paloaltofirewallnetwork"` +} + +type PaloAltoFirewallNetwork struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` +} + +type ListNetscalerLoadBalancerNetworksParams struct { + p map[string]interface{} +} + +func (p *ListNetscalerLoadBalancerNetworksParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["lbdeviceid"]; found { + u.Set("lbdeviceid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListNetscalerLoadBalancerNetworksParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetscalerLoadBalancerNetworksParams) SetLbdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lbdeviceid"] = v + return +} + +func (p *ListNetscalerLoadBalancerNetworksParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetscalerLoadBalancerNetworksParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListNetscalerLoadBalancerNetworksParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListNetscalerLoadBalancerNetworksParams(lbdeviceid string) *ListNetscalerLoadBalancerNetworksParams { + p := &ListNetscalerLoadBalancerNetworksParams{} + p.p = make(map[string]interface{}) + p.p["lbdeviceid"] = lbdeviceid + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetNetscalerLoadBalancerNetworkID(keyword string, lbdeviceid string) (string, error) { + p := &ListNetscalerLoadBalancerNetworksParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + p.p["lbdeviceid"] = lbdeviceid + + l, err := s.ListNetscalerLoadBalancerNetworks(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.NetscalerLoadBalancerNetworks[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.NetscalerLoadBalancerNetworks { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// lists network that are using a netscaler load balancer device +func (s *NetworkService) ListNetscalerLoadBalancerNetworks(p *ListNetscalerLoadBalancerNetworksParams) (*ListNetscalerLoadBalancerNetworksResponse, error) { + resp, err := s.cs.newRequest("listNetscalerLoadBalancerNetworks", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetscalerLoadBalancerNetworksResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetscalerLoadBalancerNetworksResponse struct { + Count int `json:"count"` + NetscalerLoadBalancerNetworks []*NetscalerLoadBalancerNetwork `json:"netscalerloadbalancernetwork"` +} + +type NetscalerLoadBalancerNetwork struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` +} + +type ListNiciraNvpDeviceNetworksParams struct { + p map[string]interface{} +} + +func (p *ListNiciraNvpDeviceNetworksParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["nvpdeviceid"]; found { + u.Set("nvpdeviceid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListNiciraNvpDeviceNetworksParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNiciraNvpDeviceNetworksParams) SetNvpdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nvpdeviceid"] = v + return +} + +func (p *ListNiciraNvpDeviceNetworksParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNiciraNvpDeviceNetworksParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListNiciraNvpDeviceNetworksParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListNiciraNvpDeviceNetworksParams(nvpdeviceid string) *ListNiciraNvpDeviceNetworksParams { + p := &ListNiciraNvpDeviceNetworksParams{} + p.p = make(map[string]interface{}) + p.p["nvpdeviceid"] = nvpdeviceid + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *NetworkService) GetNiciraNvpDeviceNetworkID(keyword string, nvpdeviceid string) (string, error) { + p := &ListNiciraNvpDeviceNetworksParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + p.p["nvpdeviceid"] = nvpdeviceid + + l, err := s.ListNiciraNvpDeviceNetworks(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.NiciraNvpDeviceNetworks[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.NiciraNvpDeviceNetworks { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// lists network that are using a nicira nvp device +func (s *NetworkService) ListNiciraNvpDeviceNetworks(p *ListNiciraNvpDeviceNetworksParams) (*ListNiciraNvpDeviceNetworksResponse, error) { + resp, err := s.cs.newRequest("listNiciraNvpDeviceNetworks", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNiciraNvpDeviceNetworksResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNiciraNvpDeviceNetworksResponse struct { + Count int `json:"count"` + NiciraNvpDeviceNetworks []*NiciraNvpDeviceNetwork `json:"niciranvpdevicenetwork"` +} + +type NiciraNvpDeviceNetwork struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` +} + +type ListNetworkIsolationMethodsParams struct { + p map[string]interface{} +} + +func (p *ListNetworkIsolationMethodsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListNetworkIsolationMethodsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNetworkIsolationMethodsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNetworkIsolationMethodsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListNetworkIsolationMethodsParams instance, +// as then you are sure you have configured all required params +func (s *NetworkService) NewListNetworkIsolationMethodsParams() *ListNetworkIsolationMethodsParams { + p := &ListNetworkIsolationMethodsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists supported methods of network isolation +func (s *NetworkService) ListNetworkIsolationMethods(p *ListNetworkIsolationMethodsParams) (*ListNetworkIsolationMethodsResponse, error) { + resp, err := s.cs.newRequest("listNetworkIsolationMethods", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNetworkIsolationMethodsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNetworkIsolationMethodsResponse struct { + Count int `json:"count"` + NetworkIsolationMethods []*NetworkIsolationMethod `json:"networkisolationmethod"` +} + +type NetworkIsolationMethod struct { + Name string `json:"name,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/NicService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NicService.go new file mode 100644 index 000000000..3ef49d6ad --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NicService.go @@ -0,0 +1,320 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type AddIpToNicParams struct { + p map[string]interface{} +} + +func (p *AddIpToNicParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["nicid"]; found { + u.Set("nicid", v.(string)) + } + return u +} + +func (p *AddIpToNicParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *AddIpToNicParams) SetNicid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nicid"] = v + return +} + +// You should always use this function to get a new AddIpToNicParams instance, +// as then you are sure you have configured all required params +func (s *NicService) NewAddIpToNicParams(nicid string) *AddIpToNicParams { + p := &AddIpToNicParams{} + p.p = make(map[string]interface{}) + p.p["nicid"] = nicid + return p +} + +// Assigns secondary IP to NIC +func (s *NicService) AddIpToNic(p *AddIpToNicParams) (*AddIpToNicResponse, error) { + resp, err := s.cs.newRequest("addIpToNic", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddIpToNicResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddIpToNicResponse struct { + JobID string `json:"jobid,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Networkid string `json:"networkid,omitempty"` + Nicid string `json:"nicid,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` +} + +type RemoveIpFromNicParams struct { + p map[string]interface{} +} + +func (p *RemoveIpFromNicParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RemoveIpFromNicParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RemoveIpFromNicParams instance, +// as then you are sure you have configured all required params +func (s *NicService) NewRemoveIpFromNicParams(id string) *RemoveIpFromNicParams { + p := &RemoveIpFromNicParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Removes secondary IP from the NIC. +func (s *NicService) RemoveIpFromNic(p *RemoveIpFromNicParams) (*RemoveIpFromNicResponse, error) { + resp, err := s.cs.newRequest("removeIpFromNic", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveIpFromNicResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveIpFromNicResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListNicsParams struct { + p map[string]interface{} +} + +func (p *ListNicsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["nicid"]; found { + u.Set("nicid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *ListNicsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListNicsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNicsParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListNicsParams) SetNicid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nicid"] = v + return +} + +func (p *ListNicsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNicsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNicsParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new ListNicsParams instance, +// as then you are sure you have configured all required params +func (s *NicService) NewListNicsParams(virtualmachineid string) *ListNicsParams { + p := &ListNicsParams{} + p.p = make(map[string]interface{}) + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// list the vm nics IP to NIC +func (s *NicService) ListNics(p *ListNicsParams) (*ListNicsResponse, error) { + resp, err := s.cs.newRequest("listNics", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNicsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNicsResponse struct { + Count int `json:"count"` + Nics []*Nic `json:"nic"` +} + +type Nic struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/NiciraNVPService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NiciraNVPService.go new file mode 100644 index 000000000..a4cb2050b --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/NiciraNVPService.go @@ -0,0 +1,332 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type AddNiciraNvpDeviceParams struct { + p map[string]interface{} +} + +func (p *AddNiciraNvpDeviceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostname"]; found { + u.Set("hostname", v.(string)) + } + if v, found := p.p["l3gatewayserviceuuid"]; found { + u.Set("l3gatewayserviceuuid", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["transportzoneuuid"]; found { + u.Set("transportzoneuuid", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddNiciraNvpDeviceParams) SetHostname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostname"] = v + return +} + +func (p *AddNiciraNvpDeviceParams) SetL3gatewayserviceuuid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["l3gatewayserviceuuid"] = v + return +} + +func (p *AddNiciraNvpDeviceParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddNiciraNvpDeviceParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddNiciraNvpDeviceParams) SetTransportzoneuuid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["transportzoneuuid"] = v + return +} + +func (p *AddNiciraNvpDeviceParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddNiciraNvpDeviceParams instance, +// as then you are sure you have configured all required params +func (s *NiciraNVPService) NewAddNiciraNvpDeviceParams(hostname string, password string, physicalnetworkid string, transportzoneuuid string, username string) *AddNiciraNvpDeviceParams { + p := &AddNiciraNvpDeviceParams{} + p.p = make(map[string]interface{}) + p.p["hostname"] = hostname + p.p["password"] = password + p.p["physicalnetworkid"] = physicalnetworkid + p.p["transportzoneuuid"] = transportzoneuuid + p.p["username"] = username + return p +} + +// Adds a Nicira NVP device +func (s *NiciraNVPService) AddNiciraNvpDevice(p *AddNiciraNvpDeviceParams) (*AddNiciraNvpDeviceResponse, error) { + resp, err := s.cs.newRequest("addNiciraNvpDevice", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddNiciraNvpDeviceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddNiciraNvpDeviceResponse struct { + JobID string `json:"jobid,omitempty"` + Hostname string `json:"hostname,omitempty"` + L3gatewayserviceuuid string `json:"l3gatewayserviceuuid,omitempty"` + Niciradevicename string `json:"niciradevicename,omitempty"` + Nvpdeviceid string `json:"nvpdeviceid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Provider string `json:"provider,omitempty"` + Transportzoneuuid string `json:"transportzoneuuid,omitempty"` +} + +type DeleteNiciraNvpDeviceParams struct { + p map[string]interface{} +} + +func (p *DeleteNiciraNvpDeviceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["nvpdeviceid"]; found { + u.Set("nvpdeviceid", v.(string)) + } + return u +} + +func (p *DeleteNiciraNvpDeviceParams) SetNvpdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nvpdeviceid"] = v + return +} + +// You should always use this function to get a new DeleteNiciraNvpDeviceParams instance, +// as then you are sure you have configured all required params +func (s *NiciraNVPService) NewDeleteNiciraNvpDeviceParams(nvpdeviceid string) *DeleteNiciraNvpDeviceParams { + p := &DeleteNiciraNvpDeviceParams{} + p.p = make(map[string]interface{}) + p.p["nvpdeviceid"] = nvpdeviceid + return p +} + +// delete a nicira nvp device +func (s *NiciraNVPService) DeleteNiciraNvpDevice(p *DeleteNiciraNvpDeviceParams) (*DeleteNiciraNvpDeviceResponse, error) { + resp, err := s.cs.newRequest("deleteNiciraNvpDevice", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteNiciraNvpDeviceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteNiciraNvpDeviceResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListNiciraNvpDevicesParams struct { + p map[string]interface{} +} + +func (p *ListNiciraNvpDevicesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["nvpdeviceid"]; found { + u.Set("nvpdeviceid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + return u +} + +func (p *ListNiciraNvpDevicesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListNiciraNvpDevicesParams) SetNvpdeviceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nvpdeviceid"] = v + return +} + +func (p *ListNiciraNvpDevicesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListNiciraNvpDevicesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListNiciraNvpDevicesParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +// You should always use this function to get a new ListNiciraNvpDevicesParams instance, +// as then you are sure you have configured all required params +func (s *NiciraNVPService) NewListNiciraNvpDevicesParams() *ListNiciraNvpDevicesParams { + p := &ListNiciraNvpDevicesParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists Nicira NVP devices +func (s *NiciraNVPService) ListNiciraNvpDevices(p *ListNiciraNvpDevicesParams) (*ListNiciraNvpDevicesResponse, error) { + resp, err := s.cs.newRequest("listNiciraNvpDevices", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListNiciraNvpDevicesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListNiciraNvpDevicesResponse struct { + Count int `json:"count"` + NiciraNvpDevices []*NiciraNvpDevice `json:"niciranvpdevice"` +} + +type NiciraNvpDevice struct { + Hostname string `json:"hostname,omitempty"` + L3gatewayserviceuuid string `json:"l3gatewayserviceuuid,omitempty"` + Niciradevicename string `json:"niciradevicename,omitempty"` + Nvpdeviceid string `json:"nvpdeviceid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Provider string `json:"provider,omitempty"` + Transportzoneuuid string `json:"transportzoneuuid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/OvsElementService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/OvsElementService.go new file mode 100644 index 000000000..3c81420b7 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/OvsElementService.go @@ -0,0 +1,262 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ConfigureOvsElementParams struct { + p map[string]interface{} +} + +func (p *ConfigureOvsElementParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("enabled", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ConfigureOvsElementParams) SetEnabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enabled"] = v + return +} + +func (p *ConfigureOvsElementParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ConfigureOvsElementParams instance, +// as then you are sure you have configured all required params +func (s *OvsElementService) NewConfigureOvsElementParams(enabled bool, id string) *ConfigureOvsElementParams { + p := &ConfigureOvsElementParams{} + p.p = make(map[string]interface{}) + p.p["enabled"] = enabled + p.p["id"] = id + return p +} + +// Configures an ovs element. +func (s *OvsElementService) ConfigureOvsElement(p *ConfigureOvsElementParams) (*ConfigureOvsElementResponse, error) { + resp, err := s.cs.newRequest("configureOvsElement", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ConfigureOvsElementResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ConfigureOvsElementResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} + +type ListOvsElementsParams struct { + p map[string]interface{} +} + +func (p *ListOvsElementsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("enabled", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["nspid"]; found { + u.Set("nspid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListOvsElementsParams) SetEnabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enabled"] = v + return +} + +func (p *ListOvsElementsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListOvsElementsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListOvsElementsParams) SetNspid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nspid"] = v + return +} + +func (p *ListOvsElementsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListOvsElementsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListOvsElementsParams instance, +// as then you are sure you have configured all required params +func (s *OvsElementService) NewListOvsElementsParams() *ListOvsElementsParams { + p := &ListOvsElementsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *OvsElementService) GetOvsElementByID(id string) (*OvsElement, int, error) { + p := &ListOvsElementsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListOvsElements(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.OvsElements[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for OvsElement UUID: %s!", id) +} + +// Lists all available ovs elements. +func (s *OvsElementService) ListOvsElements(p *ListOvsElementsParams) (*ListOvsElementsResponse, error) { + resp, err := s.cs.newRequest("listOvsElements", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListOvsElementsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListOvsElementsResponse struct { + Count int `json:"count"` + OvsElements []*OvsElement `json:"ovselement"` +} + +type OvsElement struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/PodService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/PodService.go new file mode 100644 index 000000000..f52919095 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/PodService.go @@ -0,0 +1,870 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreatePodParams struct { + p map[string]interface{} +} + +func (p *CreatePodParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["endip"]; found { + u.Set("endip", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["startip"]; found { + u.Set("startip", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreatePodParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *CreatePodParams) SetEndip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endip"] = v + return +} + +func (p *CreatePodParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *CreatePodParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreatePodParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *CreatePodParams) SetStartip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startip"] = v + return +} + +func (p *CreatePodParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreatePodParams instance, +// as then you are sure you have configured all required params +func (s *PodService) NewCreatePodParams(gateway string, name string, netmask string, startip string, zoneid string) *CreatePodParams { + p := &CreatePodParams{} + p.p = make(map[string]interface{}) + p.p["gateway"] = gateway + p.p["name"] = name + p.p["netmask"] = netmask + p.p["startip"] = startip + p.p["zoneid"] = zoneid + return p +} + +// Creates a new Pod. +func (s *PodService) CreatePod(p *CreatePodParams) (*CreatePodResponse, error) { + resp, err := s.cs.newRequest("createPod", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreatePodResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreatePodResponse struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Startip string `json:"startip,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdatePodParams struct { + p map[string]interface{} +} + +func (p *UpdatePodParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["endip"]; found { + u.Set("endip", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["startip"]; found { + u.Set("startip", v.(string)) + } + return u +} + +func (p *UpdatePodParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *UpdatePodParams) SetEndip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endip"] = v + return +} + +func (p *UpdatePodParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *UpdatePodParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdatePodParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdatePodParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *UpdatePodParams) SetStartip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startip"] = v + return +} + +// You should always use this function to get a new UpdatePodParams instance, +// as then you are sure you have configured all required params +func (s *PodService) NewUpdatePodParams(id string) *UpdatePodParams { + p := &UpdatePodParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a Pod. +func (s *PodService) UpdatePod(p *UpdatePodParams) (*UpdatePodResponse, error) { + resp, err := s.cs.newRequest("updatePod", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdatePodResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdatePodResponse struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Startip string `json:"startip,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeletePodParams struct { + p map[string]interface{} +} + +func (p *DeletePodParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeletePodParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeletePodParams instance, +// as then you are sure you have configured all required params +func (s *PodService) NewDeletePodParams(id string) *DeletePodParams { + p := &DeletePodParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a Pod. +func (s *PodService) DeletePod(p *DeletePodParams) (*DeletePodResponse, error) { + resp, err := s.cs.newRequest("deletePod", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeletePodResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeletePodResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListPodsParams struct { + p map[string]interface{} +} + +func (p *ListPodsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["showcapacities"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("showcapacities", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListPodsParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *ListPodsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListPodsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPodsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListPodsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPodsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListPodsParams) SetShowcapacities(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["showcapacities"] = v + return +} + +func (p *ListPodsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListPodsParams instance, +// as then you are sure you have configured all required params +func (s *PodService) NewListPodsParams() *ListPodsParams { + p := &ListPodsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *PodService) GetPodID(name string) (string, error) { + p := &ListPodsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListPods(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Pods[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Pods { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *PodService) GetPodByName(name string) (*Pod, int, error) { + id, err := s.GetPodID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetPodByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *PodService) GetPodByID(id string) (*Pod, int, error) { + p := &ListPodsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListPods(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Pods[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Pod UUID: %s!", id) +} + +// Lists all Pods. +func (s *PodService) ListPods(p *ListPodsParams) (*ListPodsResponse, error) { + resp, err := s.cs.newRequest("listPods", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPodsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPodsResponse struct { + Count int `json:"count"` + Pods []*Pod `json:"pod"` +} + +type Pod struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Startip string `json:"startip,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DedicatePodParams struct { + p map[string]interface{} +} + +func (p *DedicatePodParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + return u +} + +func (p *DedicatePodParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DedicatePodParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DedicatePodParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +// You should always use this function to get a new DedicatePodParams instance, +// as then you are sure you have configured all required params +func (s *PodService) NewDedicatePodParams(domainid string, podid string) *DedicatePodParams { + p := &DedicatePodParams{} + p.p = make(map[string]interface{}) + p.p["domainid"] = domainid + p.p["podid"] = podid + return p +} + +// Dedicates a Pod. +func (s *PodService) DedicatePod(p *DedicatePodParams) (*DedicatePodResponse, error) { + resp, err := s.cs.newRequest("dedicatePod", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DedicatePodResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DedicatePodResponse struct { + JobID string `json:"jobid,omitempty"` + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` +} + +type ReleaseDedicatedPodParams struct { + p map[string]interface{} +} + +func (p *ReleaseDedicatedPodParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + return u +} + +func (p *ReleaseDedicatedPodParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +// You should always use this function to get a new ReleaseDedicatedPodParams instance, +// as then you are sure you have configured all required params +func (s *PodService) NewReleaseDedicatedPodParams(podid string) *ReleaseDedicatedPodParams { + p := &ReleaseDedicatedPodParams{} + p.p = make(map[string]interface{}) + p.p["podid"] = podid + return p +} + +// Release the dedication for the pod +func (s *PodService) ReleaseDedicatedPod(p *ReleaseDedicatedPodParams) (*ReleaseDedicatedPodResponse, error) { + resp, err := s.cs.newRequest("releaseDedicatedPod", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReleaseDedicatedPodResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReleaseDedicatedPodResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListDedicatedPodsParams struct { + p map[string]interface{} +} + +func (p *ListDedicatedPodsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["affinitygroupid"]; found { + u.Set("affinitygroupid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + return u +} + +func (p *ListDedicatedPodsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListDedicatedPodsParams) SetAffinitygroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupid"] = v + return +} + +func (p *ListDedicatedPodsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListDedicatedPodsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDedicatedPodsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDedicatedPodsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListDedicatedPodsParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +// You should always use this function to get a new ListDedicatedPodsParams instance, +// as then you are sure you have configured all required params +func (s *PodService) NewListDedicatedPodsParams() *ListDedicatedPodsParams { + p := &ListDedicatedPodsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists dedicated pods. +func (s *PodService) ListDedicatedPods(p *ListDedicatedPodsParams) (*ListDedicatedPodsResponse, error) { + resp, err := s.cs.newRequest("listDedicatedPods", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDedicatedPodsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDedicatedPodsResponse struct { + Count int `json:"count"` + DedicatedPods []*DedicatedPod `json:"dedicatedpod"` +} + +type DedicatedPod struct { + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/PoolService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/PoolService.go new file mode 100644 index 000000000..c42224ec8 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/PoolService.go @@ -0,0 +1,776 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type ListStoragePoolsParams struct { + p map[string]interface{} +} + +func (p *ListStoragePoolsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["path"]; found { + u.Set("path", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["scope"]; found { + u.Set("scope", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListStoragePoolsParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *ListStoragePoolsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListStoragePoolsParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *ListStoragePoolsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListStoragePoolsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListStoragePoolsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListStoragePoolsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListStoragePoolsParams) SetPath(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["path"] = v + return +} + +func (p *ListStoragePoolsParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListStoragePoolsParams) SetScope(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scope"] = v + return +} + +func (p *ListStoragePoolsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListStoragePoolsParams instance, +// as then you are sure you have configured all required params +func (s *PoolService) NewListStoragePoolsParams() *ListStoragePoolsParams { + p := &ListStoragePoolsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *PoolService) GetStoragePoolID(name string) (string, error) { + p := &ListStoragePoolsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListStoragePools(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.StoragePools[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.StoragePools { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *PoolService) GetStoragePoolByName(name string) (*StoragePool, int, error) { + id, err := s.GetStoragePoolID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetStoragePoolByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *PoolService) GetStoragePoolByID(id string) (*StoragePool, int, error) { + p := &ListStoragePoolsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListStoragePools(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.StoragePools[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for StoragePool UUID: %s!", id) +} + +// Lists storage pools. +func (s *PoolService) ListStoragePools(p *ListStoragePoolsParams) (*ListStoragePoolsResponse, error) { + resp, err := s.cs.newRequest("listStoragePools", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListStoragePoolsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListStoragePoolsResponse struct { + Count int `json:"count"` + StoragePools []*StoragePool `json:"storagepool"` +} + +type StoragePool struct { + Capacityiops int64 `json:"capacityiops,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Created string `json:"created,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Disksizeused int64 `json:"disksizeused,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + Overprovisionfactor string `json:"overprovisionfactor,omitempty"` + Path string `json:"path,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Scope string `json:"scope,omitempty"` + State string `json:"state,omitempty"` + Storagecapabilities map[string]string `json:"storagecapabilities,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Tags string `json:"tags,omitempty"` + Type string `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type CreateStoragePoolParams struct { + p map[string]interface{} +} + +func (p *CreateStoragePoolParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["capacitybytes"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("capacitybytes", vv) + } + if v, found := p.p["capacityiops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("capacityiops", vv) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["managed"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("managed", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["provider"]; found { + u.Set("provider", v.(string)) + } + if v, found := p.p["scope"]; found { + u.Set("scope", v.(string)) + } + if v, found := p.p["tags"]; found { + u.Set("tags", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateStoragePoolParams) SetCapacitybytes(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["capacitybytes"] = v + return +} + +func (p *CreateStoragePoolParams) SetCapacityiops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["capacityiops"] = v + return +} + +func (p *CreateStoragePoolParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *CreateStoragePoolParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *CreateStoragePoolParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *CreateStoragePoolParams) SetManaged(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["managed"] = v + return +} + +func (p *CreateStoragePoolParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateStoragePoolParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *CreateStoragePoolParams) SetProvider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["provider"] = v + return +} + +func (p *CreateStoragePoolParams) SetScope(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["scope"] = v + return +} + +func (p *CreateStoragePoolParams) SetTags(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *CreateStoragePoolParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *CreateStoragePoolParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateStoragePoolParams instance, +// as then you are sure you have configured all required params +func (s *PoolService) NewCreateStoragePoolParams(name string, url string, zoneid string) *CreateStoragePoolParams { + p := &CreateStoragePoolParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["url"] = url + p.p["zoneid"] = zoneid + return p +} + +// Creates a storage pool. +func (s *PoolService) CreateStoragePool(p *CreateStoragePoolParams) (*CreateStoragePoolResponse, error) { + resp, err := s.cs.newRequest("createStoragePool", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateStoragePoolResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateStoragePoolResponse struct { + Capacityiops int64 `json:"capacityiops,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Created string `json:"created,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Disksizeused int64 `json:"disksizeused,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + Overprovisionfactor string `json:"overprovisionfactor,omitempty"` + Path string `json:"path,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Scope string `json:"scope,omitempty"` + State string `json:"state,omitempty"` + Storagecapabilities map[string]string `json:"storagecapabilities,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Tags string `json:"tags,omitempty"` + Type string `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateStoragePoolParams struct { + p map[string]interface{} +} + +func (p *UpdateStoragePoolParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["capacitybytes"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("capacitybytes", vv) + } + if v, found := p.p["capacityiops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("capacityiops", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["tags"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("tags", vv) + } + return u +} + +func (p *UpdateStoragePoolParams) SetCapacitybytes(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["capacitybytes"] = v + return +} + +func (p *UpdateStoragePoolParams) SetCapacityiops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["capacityiops"] = v + return +} + +func (p *UpdateStoragePoolParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateStoragePoolParams) SetTags(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new UpdateStoragePoolParams instance, +// as then you are sure you have configured all required params +func (s *PoolService) NewUpdateStoragePoolParams(id string) *UpdateStoragePoolParams { + p := &UpdateStoragePoolParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a storage pool. +func (s *PoolService) UpdateStoragePool(p *UpdateStoragePoolParams) (*UpdateStoragePoolResponse, error) { + resp, err := s.cs.newRequest("updateStoragePool", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateStoragePoolResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateStoragePoolResponse struct { + Capacityiops int64 `json:"capacityiops,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Created string `json:"created,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Disksizeused int64 `json:"disksizeused,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + Overprovisionfactor string `json:"overprovisionfactor,omitempty"` + Path string `json:"path,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Scope string `json:"scope,omitempty"` + State string `json:"state,omitempty"` + Storagecapabilities map[string]string `json:"storagecapabilities,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Tags string `json:"tags,omitempty"` + Type string `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteStoragePoolParams struct { + p map[string]interface{} +} + +func (p *DeleteStoragePoolParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["forced"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forced", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteStoragePoolParams) SetForced(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forced"] = v + return +} + +func (p *DeleteStoragePoolParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteStoragePoolParams instance, +// as then you are sure you have configured all required params +func (s *PoolService) NewDeleteStoragePoolParams(id string) *DeleteStoragePoolParams { + p := &DeleteStoragePoolParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a storage pool. +func (s *PoolService) DeleteStoragePool(p *DeleteStoragePoolParams) (*DeleteStoragePoolResponse, error) { + resp, err := s.cs.newRequest("deleteStoragePool", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteStoragePoolResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteStoragePoolResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type FindStoragePoolsForMigrationParams struct { + p map[string]interface{} +} + +func (p *FindStoragePoolsForMigrationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *FindStoragePoolsForMigrationParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *FindStoragePoolsForMigrationParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *FindStoragePoolsForMigrationParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *FindStoragePoolsForMigrationParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new FindStoragePoolsForMigrationParams instance, +// as then you are sure you have configured all required params +func (s *PoolService) NewFindStoragePoolsForMigrationParams(id string) *FindStoragePoolsForMigrationParams { + p := &FindStoragePoolsForMigrationParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Lists storage pools available for migration of a volume. +func (s *PoolService) FindStoragePoolsForMigration(p *FindStoragePoolsForMigrationParams) (*FindStoragePoolsForMigrationResponse, error) { + resp, err := s.cs.newRequest("findStoragePoolsForMigration", p.toURLValues()) + if err != nil { + return nil, err + } + + var r FindStoragePoolsForMigrationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type FindStoragePoolsForMigrationResponse struct { + Capacityiops int64 `json:"capacityiops,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Created string `json:"created,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Disksizeused int64 `json:"disksizeused,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + Overprovisionfactor string `json:"overprovisionfactor,omitempty"` + Path string `json:"path,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Scope string `json:"scope,omitempty"` + State string `json:"state,omitempty"` + Storagecapabilities map[string]string `json:"storagecapabilities,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Tags string `json:"tags,omitempty"` + Type string `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/PortableIPService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/PortableIPService.go new file mode 100644 index 000000000..3dabf38ba --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/PortableIPService.go @@ -0,0 +1,387 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreatePortableIpRangeParams struct { + p map[string]interface{} +} + +func (p *CreatePortableIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["endip"]; found { + u.Set("endip", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["regionid"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("regionid", vv) + } + if v, found := p.p["startip"]; found { + u.Set("startip", v.(string)) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + return u +} + +func (p *CreatePortableIpRangeParams) SetEndip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endip"] = v + return +} + +func (p *CreatePortableIpRangeParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *CreatePortableIpRangeParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *CreatePortableIpRangeParams) SetRegionid(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["regionid"] = v + return +} + +func (p *CreatePortableIpRangeParams) SetStartip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startip"] = v + return +} + +func (p *CreatePortableIpRangeParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +// You should always use this function to get a new CreatePortableIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *PortableIPService) NewCreatePortableIpRangeParams(endip string, gateway string, netmask string, regionid int, startip string) *CreatePortableIpRangeParams { + p := &CreatePortableIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["endip"] = endip + p.p["gateway"] = gateway + p.p["netmask"] = netmask + p.p["regionid"] = regionid + p.p["startip"] = startip + return p +} + +// adds a range of portable public IP's to a region +func (s *PortableIPService) CreatePortableIpRange(p *CreatePortableIpRangeParams) (*CreatePortableIpRangeResponse, error) { + resp, err := s.cs.newRequest("createPortableIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreatePortableIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreatePortableIpRangeResponse struct { + JobID string `json:"jobid,omitempty"` + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Netmask string `json:"netmask,omitempty"` + Portableipaddress []struct { + Accountid string `json:"accountid,omitempty"` + Allocated string `json:"allocated,omitempty"` + Domainid string `json:"domainid,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Regionid int `json:"regionid,omitempty"` + State string `json:"state,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + } `json:"portableipaddress,omitempty"` + Regionid int `json:"regionid,omitempty"` + Startip string `json:"startip,omitempty"` + Vlan string `json:"vlan,omitempty"` +} + +type DeletePortableIpRangeParams struct { + p map[string]interface{} +} + +func (p *DeletePortableIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeletePortableIpRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeletePortableIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *PortableIPService) NewDeletePortableIpRangeParams(id string) *DeletePortableIpRangeParams { + p := &DeletePortableIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// deletes a range of portable public IP's associated with a region +func (s *PortableIPService) DeletePortableIpRange(p *DeletePortableIpRangeParams) (*DeletePortableIpRangeResponse, error) { + resp, err := s.cs.newRequest("deletePortableIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeletePortableIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeletePortableIpRangeResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListPortableIpRangesParams struct { + p map[string]interface{} +} + +func (p *ListPortableIpRangesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["regionid"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("regionid", vv) + } + return u +} + +func (p *ListPortableIpRangesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListPortableIpRangesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPortableIpRangesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPortableIpRangesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListPortableIpRangesParams) SetRegionid(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["regionid"] = v + return +} + +// You should always use this function to get a new ListPortableIpRangesParams instance, +// as then you are sure you have configured all required params +func (s *PortableIPService) NewListPortableIpRangesParams() *ListPortableIpRangesParams { + p := &ListPortableIpRangesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *PortableIPService) GetPortableIpRangeByID(id string) (*PortableIpRange, int, error) { + p := &ListPortableIpRangesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListPortableIpRanges(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.PortableIpRanges[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for PortableIpRange UUID: %s!", id) +} + +// list portable IP ranges +func (s *PortableIPService) ListPortableIpRanges(p *ListPortableIpRangesParams) (*ListPortableIpRangesResponse, error) { + resp, err := s.cs.newRequest("listPortableIpRanges", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPortableIpRangesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPortableIpRangesResponse struct { + Count int `json:"count"` + PortableIpRanges []*PortableIpRange `json:"portableiprange"` +} + +type PortableIpRange struct { + Endip string `json:"endip,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Netmask string `json:"netmask,omitempty"` + Portableipaddress []struct { + Accountid string `json:"accountid,omitempty"` + Allocated string `json:"allocated,omitempty"` + Domainid string `json:"domainid,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Regionid int `json:"regionid,omitempty"` + State string `json:"state,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + } `json:"portableipaddress,omitempty"` + Regionid int `json:"regionid,omitempty"` + Startip string `json:"startip,omitempty"` + Vlan string `json:"vlan,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ProjectService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ProjectService.go new file mode 100644 index 000000000..b826868a9 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ProjectService.go @@ -0,0 +1,1338 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateProjectParams struct { + p map[string]interface{} +} + +func (p *CreateProjectParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *CreateProjectParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateProjectParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateProjectParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateProjectParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new CreateProjectParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewCreateProjectParams(displaytext string, name string) *CreateProjectParams { + p := &CreateProjectParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["name"] = name + return p +} + +// Creates a project +func (s *ProjectService) CreateProject(p *CreateProjectParams) (*CreateProjectResponse, error) { + resp, err := s.cs.newRequest("createProject", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateProjectResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateProjectResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type DeleteProjectParams struct { + p map[string]interface{} +} + +func (p *DeleteProjectParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteProjectParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteProjectParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewDeleteProjectParams(id string) *DeleteProjectParams { + p := &DeleteProjectParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a project +func (s *ProjectService) DeleteProject(p *DeleteProjectParams) (*DeleteProjectResponse, error) { + resp, err := s.cs.newRequest("deleteProject", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteProjectResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteProjectResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type UpdateProjectParams struct { + p map[string]interface{} +} + +func (p *UpdateProjectParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateProjectParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UpdateProjectParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateProjectParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateProjectParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewUpdateProjectParams(id string) *UpdateProjectParams { + p := &UpdateProjectParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a project +func (s *ProjectService) UpdateProject(p *UpdateProjectParams) (*UpdateProjectResponse, error) { + resp, err := s.cs.newRequest("updateProject", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateProjectResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateProjectResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type ActivateProjectParams struct { + p map[string]interface{} +} + +func (p *ActivateProjectParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ActivateProjectParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ActivateProjectParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewActivateProjectParams(id string) *ActivateProjectParams { + p := &ActivateProjectParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Activates a project +func (s *ProjectService) ActivateProject(p *ActivateProjectParams) (*ActivateProjectResponse, error) { + resp, err := s.cs.newRequest("activateProject", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ActivateProjectResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ActivateProjectResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type SuspendProjectParams struct { + p map[string]interface{} +} + +func (p *SuspendProjectParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *SuspendProjectParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new SuspendProjectParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewSuspendProjectParams(id string) *SuspendProjectParams { + p := &SuspendProjectParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Suspends a project +func (s *ProjectService) SuspendProject(p *SuspendProjectParams) (*SuspendProjectResponse, error) { + resp, err := s.cs.newRequest("suspendProject", p.toURLValues()) + if err != nil { + return nil, err + } + + var r SuspendProjectResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type SuspendProjectResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type ListProjectsParams struct { + p map[string]interface{} +} + +func (p *ListProjectsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *ListProjectsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListProjectsParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *ListProjectsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListProjectsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListProjectsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListProjectsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListProjectsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListProjectsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListProjectsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListProjectsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListProjectsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListProjectsParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new ListProjectsParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewListProjectsParams() *ListProjectsParams { + p := &ListProjectsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ProjectService) GetProjectID(name string) (string, error) { + p := &ListProjectsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListProjects(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Projects[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Projects { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ProjectService) GetProjectByName(name string) (*Project, int, error) { + id, err := s.GetProjectID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetProjectByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ProjectService) GetProjectByID(id string) (*Project, int, error) { + p := &ListProjectsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListProjects(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Projects[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Project UUID: %s!", id) +} + +// Lists projects and provides detailed information for listed projects +func (s *ProjectService) ListProjects(p *ListProjectsParams) (*ListProjectsResponse, error) { + resp, err := s.cs.newRequest("listProjects", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListProjectsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListProjectsResponse struct { + Count int `json:"count"` + Projects []*Project `json:"project"` +} + +type Project struct { + Account string `json:"account,omitempty"` + Cpuavailable string `json:"cpuavailable,omitempty"` + Cpulimit string `json:"cpulimit,omitempty"` + Cputotal int64 `json:"cputotal,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ipavailable string `json:"ipavailable,omitempty"` + Iplimit string `json:"iplimit,omitempty"` + Iptotal int64 `json:"iptotal,omitempty"` + Memoryavailable string `json:"memoryavailable,omitempty"` + Memorylimit string `json:"memorylimit,omitempty"` + Memorytotal int64 `json:"memorytotal,omitempty"` + Name string `json:"name,omitempty"` + Networkavailable string `json:"networkavailable,omitempty"` + Networklimit string `json:"networklimit,omitempty"` + Networktotal int64 `json:"networktotal,omitempty"` + Primarystorageavailable string `json:"primarystorageavailable,omitempty"` + Primarystoragelimit string `json:"primarystoragelimit,omitempty"` + Primarystoragetotal int64 `json:"primarystoragetotal,omitempty"` + Secondarystorageavailable string `json:"secondarystorageavailable,omitempty"` + Secondarystoragelimit string `json:"secondarystoragelimit,omitempty"` + Secondarystoragetotal int64 `json:"secondarystoragetotal,omitempty"` + Snapshotavailable string `json:"snapshotavailable,omitempty"` + Snapshotlimit string `json:"snapshotlimit,omitempty"` + Snapshottotal int64 `json:"snapshottotal,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templateavailable string `json:"templateavailable,omitempty"` + Templatelimit string `json:"templatelimit,omitempty"` + Templatetotal int64 `json:"templatetotal,omitempty"` + Vmavailable string `json:"vmavailable,omitempty"` + Vmlimit string `json:"vmlimit,omitempty"` + Vmrunning int `json:"vmrunning,omitempty"` + Vmstopped int `json:"vmstopped,omitempty"` + Vmtotal int64 `json:"vmtotal,omitempty"` + Volumeavailable string `json:"volumeavailable,omitempty"` + Volumelimit string `json:"volumelimit,omitempty"` + Volumetotal int64 `json:"volumetotal,omitempty"` + Vpcavailable string `json:"vpcavailable,omitempty"` + Vpclimit string `json:"vpclimit,omitempty"` + Vpctotal int64 `json:"vpctotal,omitempty"` +} + +type ListProjectInvitationsParams struct { + p map[string]interface{} +} + +func (p *ListProjectInvitationsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["activeonly"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("activeonly", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + return u +} + +func (p *ListProjectInvitationsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListProjectInvitationsParams) SetActiveonly(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["activeonly"] = v + return +} + +func (p *ListProjectInvitationsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListProjectInvitationsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListProjectInvitationsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListProjectInvitationsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListProjectInvitationsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListProjectInvitationsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListProjectInvitationsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListProjectInvitationsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListProjectInvitationsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +// You should always use this function to get a new ListProjectInvitationsParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewListProjectInvitationsParams() *ListProjectInvitationsParams { + p := &ListProjectInvitationsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ProjectService) GetProjectInvitationByID(id string) (*ProjectInvitation, int, error) { + p := &ListProjectInvitationsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListProjectInvitations(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListProjectInvitations(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.ProjectInvitations[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for ProjectInvitation UUID: %s!", id) +} + +// Lists projects and provides detailed information for listed projects +func (s *ProjectService) ListProjectInvitations(p *ListProjectInvitationsParams) (*ListProjectInvitationsResponse, error) { + resp, err := s.cs.newRequest("listProjectInvitations", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListProjectInvitationsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListProjectInvitationsResponse struct { + Count int `json:"count"` + ProjectInvitations []*ProjectInvitation `json:"projectinvitation"` +} + +type ProjectInvitation struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` +} + +type UpdateProjectInvitationParams struct { + p map[string]interface{} +} + +func (p *UpdateProjectInvitationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accept"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("accept", vv) + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["token"]; found { + u.Set("token", v.(string)) + } + return u +} + +func (p *UpdateProjectInvitationParams) SetAccept(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accept"] = v + return +} + +func (p *UpdateProjectInvitationParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UpdateProjectInvitationParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *UpdateProjectInvitationParams) SetToken(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["token"] = v + return +} + +// You should always use this function to get a new UpdateProjectInvitationParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewUpdateProjectInvitationParams(projectid string) *UpdateProjectInvitationParams { + p := &UpdateProjectInvitationParams{} + p.p = make(map[string]interface{}) + p.p["projectid"] = projectid + return p +} + +// Accepts or declines project invitation +func (s *ProjectService) UpdateProjectInvitation(p *UpdateProjectInvitationParams) (*UpdateProjectInvitationResponse, error) { + resp, err := s.cs.newRequest("updateProjectInvitation", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateProjectInvitationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateProjectInvitationResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteProjectInvitationParams struct { + p map[string]interface{} +} + +func (p *DeleteProjectInvitationParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteProjectInvitationParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteProjectInvitationParams instance, +// as then you are sure you have configured all required params +func (s *ProjectService) NewDeleteProjectInvitationParams(id string) *DeleteProjectInvitationParams { + p := &DeleteProjectInvitationParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Accepts or declines project invitation +func (s *ProjectService) DeleteProjectInvitation(p *DeleteProjectInvitationParams) (*DeleteProjectInvitationResponse, error) { + resp, err := s.cs.newRequest("deleteProjectInvitation", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteProjectInvitationResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteProjectInvitationResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/RegionService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/RegionService.go new file mode 100644 index 000000000..18cede7fd --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/RegionService.go @@ -0,0 +1,336 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type AddRegionParams struct { + p map[string]interface{} +} + +func (p *AddRegionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["endpoint"]; found { + u.Set("endpoint", v.(string)) + } + if v, found := p.p["id"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("id", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *AddRegionParams) SetEndpoint(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endpoint"] = v + return +} + +func (p *AddRegionParams) SetId(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *AddRegionParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new AddRegionParams instance, +// as then you are sure you have configured all required params +func (s *RegionService) NewAddRegionParams(endpoint string, id int, name string) *AddRegionParams { + p := &AddRegionParams{} + p.p = make(map[string]interface{}) + p.p["endpoint"] = endpoint + p.p["id"] = id + p.p["name"] = name + return p +} + +// Adds a Region +func (s *RegionService) AddRegion(p *AddRegionParams) (*AddRegionResponse, error) { + resp, err := s.cs.newRequest("addRegion", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddRegionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddRegionResponse struct { + Endpoint string `json:"endpoint,omitempty"` + Gslbserviceenabled bool `json:"gslbserviceenabled,omitempty"` + Id int `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Portableipserviceenabled bool `json:"portableipserviceenabled,omitempty"` +} + +type UpdateRegionParams struct { + p map[string]interface{} +} + +func (p *UpdateRegionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["endpoint"]; found { + u.Set("endpoint", v.(string)) + } + if v, found := p.p["id"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("id", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *UpdateRegionParams) SetEndpoint(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endpoint"] = v + return +} + +func (p *UpdateRegionParams) SetId(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateRegionParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new UpdateRegionParams instance, +// as then you are sure you have configured all required params +func (s *RegionService) NewUpdateRegionParams(id int) *UpdateRegionParams { + p := &UpdateRegionParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a region +func (s *RegionService) UpdateRegion(p *UpdateRegionParams) (*UpdateRegionResponse, error) { + resp, err := s.cs.newRequest("updateRegion", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateRegionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateRegionResponse struct { + Endpoint string `json:"endpoint,omitempty"` + Gslbserviceenabled bool `json:"gslbserviceenabled,omitempty"` + Id int `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Portableipserviceenabled bool `json:"portableipserviceenabled,omitempty"` +} + +type RemoveRegionParams struct { + p map[string]interface{} +} + +func (p *RemoveRegionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("id", vv) + } + return u +} + +func (p *RemoveRegionParams) SetId(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RemoveRegionParams instance, +// as then you are sure you have configured all required params +func (s *RegionService) NewRemoveRegionParams(id int) *RemoveRegionParams { + p := &RemoveRegionParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Removes specified region +func (s *RegionService) RemoveRegion(p *RemoveRegionParams) (*RemoveRegionResponse, error) { + resp, err := s.cs.newRequest("removeRegion", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveRegionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type RemoveRegionResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListRegionsParams struct { + p map[string]interface{} +} + +func (p *ListRegionsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("id", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListRegionsParams) SetId(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListRegionsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListRegionsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListRegionsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListRegionsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListRegionsParams instance, +// as then you are sure you have configured all required params +func (s *RegionService) NewListRegionsParams() *ListRegionsParams { + p := &ListRegionsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists Regions +func (s *RegionService) ListRegions(p *ListRegionsParams) (*ListRegionsResponse, error) { + resp, err := s.cs.newRequest("listRegions", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListRegionsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListRegionsResponse struct { + Count int `json:"count"` + Regions []*Region `json:"region"` +} + +type Region struct { + Endpoint string `json:"endpoint,omitempty"` + Gslbserviceenabled bool `json:"gslbserviceenabled,omitempty"` + Id int `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Portableipserviceenabled bool `json:"portableipserviceenabled,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcemetadataService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcemetadataService.go new file mode 100644 index 000000000..e61b0ecc0 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcemetadataService.go @@ -0,0 +1,423 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +type AddResourceDetailParams struct { + p map[string]interface{} +} + +func (p *AddResourceDetailParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["resourceid"]; found { + u.Set("resourceid", v.(string)) + } + if v, found := p.p["resourcetype"]; found { + u.Set("resourcetype", v.(string)) + } + return u +} + +func (p *AddResourceDetailParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *AddResourceDetailParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *AddResourceDetailParams) SetResourceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourceid"] = v + return +} + +func (p *AddResourceDetailParams) SetResourcetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +// You should always use this function to get a new AddResourceDetailParams instance, +// as then you are sure you have configured all required params +func (s *ResourcemetadataService) NewAddResourceDetailParams(details map[string]string, resourceid string, resourcetype string) *AddResourceDetailParams { + p := &AddResourceDetailParams{} + p.p = make(map[string]interface{}) + p.p["details"] = details + p.p["resourceid"] = resourceid + p.p["resourcetype"] = resourcetype + return p +} + +// Adds detail for the Resource. +func (s *ResourcemetadataService) AddResourceDetail(p *AddResourceDetailParams) (*AddResourceDetailResponse, error) { + resp, err := s.cs.newRequest("addResourceDetail", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddResourceDetailResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddResourceDetailResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type RemoveResourceDetailParams struct { + p map[string]interface{} +} + +func (p *RemoveResourceDetailParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["key"]; found { + u.Set("key", v.(string)) + } + if v, found := p.p["resourceid"]; found { + u.Set("resourceid", v.(string)) + } + if v, found := p.p["resourcetype"]; found { + u.Set("resourcetype", v.(string)) + } + return u +} + +func (p *RemoveResourceDetailParams) SetKey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["key"] = v + return +} + +func (p *RemoveResourceDetailParams) SetResourceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourceid"] = v + return +} + +func (p *RemoveResourceDetailParams) SetResourcetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +// You should always use this function to get a new RemoveResourceDetailParams instance, +// as then you are sure you have configured all required params +func (s *ResourcemetadataService) NewRemoveResourceDetailParams(resourceid string, resourcetype string) *RemoveResourceDetailParams { + p := &RemoveResourceDetailParams{} + p.p = make(map[string]interface{}) + p.p["resourceid"] = resourceid + p.p["resourcetype"] = resourcetype + return p +} + +// Removes detail for the Resource. +func (s *ResourcemetadataService) RemoveResourceDetail(p *RemoveResourceDetailParams) (*RemoveResourceDetailResponse, error) { + resp, err := s.cs.newRequest("removeResourceDetail", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveResourceDetailResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveResourceDetailResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListResourceDetailsParams struct { + p map[string]interface{} +} + +func (p *ListResourceDetailsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["key"]; found { + u.Set("key", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["resourceid"]; found { + u.Set("resourceid", v.(string)) + } + if v, found := p.p["resourcetype"]; found { + u.Set("resourcetype", v.(string)) + } + if v, found := p.p["value"]; found { + u.Set("value", v.(string)) + } + return u +} + +func (p *ListResourceDetailsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListResourceDetailsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListResourceDetailsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListResourceDetailsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListResourceDetailsParams) SetKey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["key"] = v + return +} + +func (p *ListResourceDetailsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListResourceDetailsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListResourceDetailsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListResourceDetailsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListResourceDetailsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListResourceDetailsParams) SetResourceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourceid"] = v + return +} + +func (p *ListResourceDetailsParams) SetResourcetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +func (p *ListResourceDetailsParams) SetValue(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["value"] = v + return +} + +// You should always use this function to get a new ListResourceDetailsParams instance, +// as then you are sure you have configured all required params +func (s *ResourcemetadataService) NewListResourceDetailsParams(resourcetype string) *ListResourceDetailsParams { + p := &ListResourceDetailsParams{} + p.p = make(map[string]interface{}) + p.p["resourcetype"] = resourcetype + return p +} + +// List resource detail(s) +func (s *ResourcemetadataService) ListResourceDetails(p *ListResourceDetailsParams) (*ListResourceDetailsResponse, error) { + resp, err := s.cs.newRequest("listResourceDetails", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListResourceDetailsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListResourceDetailsResponse struct { + Count int `json:"count"` + ResourceDetails []*ResourceDetail `json:"resourcedetail"` +} + +type ResourceDetail struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcetagsService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcetagsService.go new file mode 100644 index 000000000..abfd86245 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ResourcetagsService.go @@ -0,0 +1,428 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateTagsParams struct { + p map[string]interface{} +} + +func (p *CreateTagsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customer"]; found { + u.Set("customer", v.(string)) + } + if v, found := p.p["resourceids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("resourceids", vv) + } + if v, found := p.p["resourcetype"]; found { + u.Set("resourcetype", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *CreateTagsParams) SetCustomer(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customer"] = v + return +} + +func (p *CreateTagsParams) SetResourceids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourceids"] = v + return +} + +func (p *CreateTagsParams) SetResourcetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +func (p *CreateTagsParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new CreateTagsParams instance, +// as then you are sure you have configured all required params +func (s *ResourcetagsService) NewCreateTagsParams(resourceids []string, resourcetype string, tags map[string]string) *CreateTagsParams { + p := &CreateTagsParams{} + p.p = make(map[string]interface{}) + p.p["resourceids"] = resourceids + p.p["resourcetype"] = resourcetype + p.p["tags"] = tags + return p +} + +// Creates resource tag(s) +func (s *ResourcetagsService) CreateTags(p *CreateTagsParams) (*CreateTagsResponse, error) { + resp, err := s.cs.newRequest("createTags", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateTagsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateTagsResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteTagsParams struct { + p map[string]interface{} +} + +func (p *DeleteTagsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["resourceids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("resourceids", vv) + } + if v, found := p.p["resourcetype"]; found { + u.Set("resourcetype", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *DeleteTagsParams) SetResourceids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourceids"] = v + return +} + +func (p *DeleteTagsParams) SetResourcetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +func (p *DeleteTagsParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new DeleteTagsParams instance, +// as then you are sure you have configured all required params +func (s *ResourcetagsService) NewDeleteTagsParams(resourceids []string, resourcetype string) *DeleteTagsParams { + p := &DeleteTagsParams{} + p.p = make(map[string]interface{}) + p.p["resourceids"] = resourceids + p.p["resourcetype"] = resourcetype + return p +} + +// Deleting resource tag(s) +func (s *ResourcetagsService) DeleteTags(p *DeleteTagsParams) (*DeleteTagsResponse, error) { + resp, err := s.cs.newRequest("deleteTags", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteTagsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteTagsResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListTagsParams struct { + p map[string]interface{} +} + +func (p *ListTagsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["customer"]; found { + u.Set("customer", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["key"]; found { + u.Set("key", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["resourceid"]; found { + u.Set("resourceid", v.(string)) + } + if v, found := p.p["resourcetype"]; found { + u.Set("resourcetype", v.(string)) + } + if v, found := p.p["value"]; found { + u.Set("value", v.(string)) + } + return u +} + +func (p *ListTagsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListTagsParams) SetCustomer(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customer"] = v + return +} + +func (p *ListTagsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListTagsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListTagsParams) SetKey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["key"] = v + return +} + +func (p *ListTagsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListTagsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListTagsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListTagsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListTagsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListTagsParams) SetResourceid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourceid"] = v + return +} + +func (p *ListTagsParams) SetResourcetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["resourcetype"] = v + return +} + +func (p *ListTagsParams) SetValue(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["value"] = v + return +} + +// You should always use this function to get a new ListTagsParams instance, +// as then you are sure you have configured all required params +func (s *ResourcetagsService) NewListTagsParams() *ListTagsParams { + p := &ListTagsParams{} + p.p = make(map[string]interface{}) + return p +} + +// List resource tag(s) +func (s *ResourcetagsService) ListTags(p *ListTagsParams) (*ListTagsResponse, error) { + resp, err := s.cs.newRequest("listTags", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListTagsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListTagsResponse struct { + Count int `json:"count"` + Tags []*Tag `json:"tag"` +} + +type Tag struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/RouterService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/RouterService.go new file mode 100644 index 000000000..6bd59e496 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/RouterService.go @@ -0,0 +1,1446 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type StartRouterParams struct { + p map[string]interface{} +} + +func (p *StartRouterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StartRouterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StartRouterParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewStartRouterParams(id string) *StartRouterParams { + p := &StartRouterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Starts a router. +func (s *RouterService) StartRouter(p *StartRouterParams) (*StartRouterResponse, error) { + resp, err := s.cs.newRequest("startRouter", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StartRouterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StartRouterResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RebootRouterParams struct { + p map[string]interface{} +} + +func (p *RebootRouterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RebootRouterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RebootRouterParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewRebootRouterParams(id string) *RebootRouterParams { + p := &RebootRouterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Starts a router. +func (s *RouterService) RebootRouter(p *RebootRouterParams) (*RebootRouterResponse, error) { + resp, err := s.cs.newRequest("rebootRouter", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RebootRouterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RebootRouterResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type StopRouterParams struct { + p map[string]interface{} +} + +func (p *StopRouterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["forced"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forced", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StopRouterParams) SetForced(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forced"] = v + return +} + +func (p *StopRouterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StopRouterParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewStopRouterParams(id string) *StopRouterParams { + p := &StopRouterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Stops a router. +func (s *RouterService) StopRouter(p *StopRouterParams) (*StopRouterResponse, error) { + resp, err := s.cs.newRequest("stopRouter", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StopRouterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StopRouterResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DestroyRouterParams struct { + p map[string]interface{} +} + +func (p *DestroyRouterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DestroyRouterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DestroyRouterParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewDestroyRouterParams(id string) *DestroyRouterParams { + p := &DestroyRouterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Destroys a router. +func (s *RouterService) DestroyRouter(p *DestroyRouterParams) (*DestroyRouterResponse, error) { + resp, err := s.cs.newRequest("destroyRouter", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DestroyRouterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DestroyRouterResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ChangeServiceForRouterParams struct { + p map[string]interface{} +} + +func (p *ChangeServiceForRouterParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + return u +} + +func (p *ChangeServiceForRouterParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ChangeServiceForRouterParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +// You should always use this function to get a new ChangeServiceForRouterParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewChangeServiceForRouterParams(id string, serviceofferingid string) *ChangeServiceForRouterParams { + p := &ChangeServiceForRouterParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["serviceofferingid"] = serviceofferingid + return p +} + +// Upgrades domain router to a new service offering +func (s *RouterService) ChangeServiceForRouter(p *ChangeServiceForRouterParams) (*ChangeServiceForRouterResponse, error) { + resp, err := s.cs.newRequest("changeServiceForRouter", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ChangeServiceForRouterResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ChangeServiceForRouterResponse struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListRoutersParams struct { + p map[string]interface{} +} + +func (p *ListRoutersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["forvpc"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvpc", vv) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["version"]; found { + u.Set("version", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListRoutersParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListRoutersParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *ListRoutersParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListRoutersParams) SetForvpc(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvpc"] = v + return +} + +func (p *ListRoutersParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *ListRoutersParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListRoutersParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListRoutersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListRoutersParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListRoutersParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListRoutersParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListRoutersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListRoutersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListRoutersParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListRoutersParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListRoutersParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListRoutersParams) SetVersion(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["version"] = v + return +} + +func (p *ListRoutersParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +func (p *ListRoutersParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListRoutersParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewListRoutersParams() *ListRoutersParams { + p := &ListRoutersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *RouterService) GetRouterID(name string) (string, error) { + p := &ListRoutersParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListRouters(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListRouters(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Routers[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Routers { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *RouterService) GetRouterByName(name string) (*Router, int, error) { + id, err := s.GetRouterID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetRouterByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *RouterService) GetRouterByID(id string) (*Router, int, error) { + p := &ListRoutersParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListRouters(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListRouters(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Routers[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Router UUID: %s!", id) +} + +// List routers. +func (s *RouterService) ListRouters(p *ListRoutersParams) (*ListRoutersResponse, error) { + resp, err := s.cs.newRequest("listRouters", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListRoutersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListRoutersResponse struct { + Count int `json:"count"` + Routers []*Router `json:"router"` +} + +type Router struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Guestipaddress string `json:"guestipaddress,omitempty"` + Guestmacaddress string `json:"guestmacaddress,omitempty"` + Guestnetmask string `json:"guestnetmask,omitempty"` + Guestnetworkid string `json:"guestnetworkid,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Isredundantrouter bool `json:"isredundantrouter,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Linklocalnetworkid string `json:"linklocalnetworkid,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Podid string `json:"podid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + Publicnetworkid string `json:"publicnetworkid,omitempty"` + Redundantstate string `json:"redundantstate,omitempty"` + Requiresupgrade bool `json:"requiresupgrade,omitempty"` + Role string `json:"role,omitempty"` + Scriptsversion string `json:"scriptsversion,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + State string `json:"state,omitempty"` + Templateid string `json:"templateid,omitempty"` + Version string `json:"version,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListVirtualRouterElementsParams struct { + p map[string]interface{} +} + +func (p *ListVirtualRouterElementsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("enabled", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["nspid"]; found { + u.Set("nspid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListVirtualRouterElementsParams) SetEnabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enabled"] = v + return +} + +func (p *ListVirtualRouterElementsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVirtualRouterElementsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVirtualRouterElementsParams) SetNspid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nspid"] = v + return +} + +func (p *ListVirtualRouterElementsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVirtualRouterElementsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListVirtualRouterElementsParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewListVirtualRouterElementsParams() *ListVirtualRouterElementsParams { + p := &ListVirtualRouterElementsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *RouterService) GetVirtualRouterElementByID(id string) (*VirtualRouterElement, int, error) { + p := &ListVirtualRouterElementsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVirtualRouterElements(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VirtualRouterElements[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VirtualRouterElement UUID: %s!", id) +} + +// Lists all available virtual router elements. +func (s *RouterService) ListVirtualRouterElements(p *ListVirtualRouterElementsParams) (*ListVirtualRouterElementsResponse, error) { + resp, err := s.cs.newRequest("listVirtualRouterElements", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVirtualRouterElementsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVirtualRouterElementsResponse struct { + Count int `json:"count"` + VirtualRouterElements []*VirtualRouterElement `json:"virtualrouterelement"` +} + +type VirtualRouterElement struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} + +type ConfigureVirtualRouterElementParams struct { + p map[string]interface{} +} + +func (p *ConfigureVirtualRouterElementParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["enabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("enabled", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ConfigureVirtualRouterElementParams) SetEnabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enabled"] = v + return +} + +func (p *ConfigureVirtualRouterElementParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ConfigureVirtualRouterElementParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewConfigureVirtualRouterElementParams(enabled bool, id string) *ConfigureVirtualRouterElementParams { + p := &ConfigureVirtualRouterElementParams{} + p.p = make(map[string]interface{}) + p.p["enabled"] = enabled + p.p["id"] = id + return p +} + +// Configures a virtual router element. +func (s *RouterService) ConfigureVirtualRouterElement(p *ConfigureVirtualRouterElementParams) (*ConfigureVirtualRouterElementResponse, error) { + resp, err := s.cs.newRequest("configureVirtualRouterElement", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ConfigureVirtualRouterElementResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ConfigureVirtualRouterElementResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} + +type CreateVirtualRouterElementParams struct { + p map[string]interface{} +} + +func (p *CreateVirtualRouterElementParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["nspid"]; found { + u.Set("nspid", v.(string)) + } + if v, found := p.p["providertype"]; found { + u.Set("providertype", v.(string)) + } + return u +} + +func (p *CreateVirtualRouterElementParams) SetNspid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nspid"] = v + return +} + +func (p *CreateVirtualRouterElementParams) SetProvidertype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["providertype"] = v + return +} + +// You should always use this function to get a new CreateVirtualRouterElementParams instance, +// as then you are sure you have configured all required params +func (s *RouterService) NewCreateVirtualRouterElementParams(nspid string) *CreateVirtualRouterElementParams { + p := &CreateVirtualRouterElementParams{} + p.p = make(map[string]interface{}) + p.p["nspid"] = nspid + return p +} + +// Create a virtual router element. +func (s *RouterService) CreateVirtualRouterElement(p *CreateVirtualRouterElementParams) (*CreateVirtualRouterElementResponse, error) { + resp, err := s.cs.newRequest("createVirtualRouterElement", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVirtualRouterElementResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVirtualRouterElementResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Id string `json:"id,omitempty"` + Nspid string `json:"nspid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/S3Service.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/S3Service.go new file mode 100644 index 000000000..d2cbb9326 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/S3Service.go @@ -0,0 +1,281 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +type AddS3Params struct { + p map[string]interface{} +} + +func (p *AddS3Params) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accesskey"]; found { + u.Set("accesskey", v.(string)) + } + if v, found := p.p["bucket"]; found { + u.Set("bucket", v.(string)) + } + if v, found := p.p["connectiontimeout"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("connectiontimeout", vv) + } + if v, found := p.p["endpoint"]; found { + u.Set("endpoint", v.(string)) + } + if v, found := p.p["maxerrorretry"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("maxerrorretry", vv) + } + if v, found := p.p["secretkey"]; found { + u.Set("secretkey", v.(string)) + } + if v, found := p.p["sockettimeout"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("sockettimeout", vv) + } + if v, found := p.p["usehttps"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("usehttps", vv) + } + return u +} + +func (p *AddS3Params) SetAccesskey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accesskey"] = v + return +} + +func (p *AddS3Params) SetBucket(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bucket"] = v + return +} + +func (p *AddS3Params) SetConnectiontimeout(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["connectiontimeout"] = v + return +} + +func (p *AddS3Params) SetEndpoint(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endpoint"] = v + return +} + +func (p *AddS3Params) SetMaxerrorretry(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxerrorretry"] = v + return +} + +func (p *AddS3Params) SetSecretkey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["secretkey"] = v + return +} + +func (p *AddS3Params) SetSockettimeout(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sockettimeout"] = v + return +} + +func (p *AddS3Params) SetUsehttps(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["usehttps"] = v + return +} + +// You should always use this function to get a new AddS3Params instance, +// as then you are sure you have configured all required params +func (s *S3Service) NewAddS3Params(accesskey string, bucket string, secretkey string) *AddS3Params { + p := &AddS3Params{} + p.p = make(map[string]interface{}) + p.p["accesskey"] = accesskey + p.p["bucket"] = bucket + p.p["secretkey"] = secretkey + return p +} + +// Adds S3 +func (s *S3Service) AddS3(p *AddS3Params) (*AddS3Response, error) { + resp, err := s.cs.newRequest("addS3", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddS3Response + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddS3Response struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListS3sParams struct { + p map[string]interface{} +} + +func (p *ListS3sParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListS3sParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListS3sParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListS3sParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListS3sParams instance, +// as then you are sure you have configured all required params +func (s *S3Service) NewListS3sParams() *ListS3sParams { + p := &ListS3sParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *S3Service) GetS3ID(keyword string) (string, error) { + p := &ListS3sParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + + l, err := s.ListS3s(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.S3s[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.S3s { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// Lists S3s +func (s *S3Service) ListS3s(p *ListS3sParams) (*ListS3sResponse, error) { + resp, err := s.cs.newRequest("listS3s", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListS3sResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListS3sResponse struct { + Count int `json:"count"` + S3s []*S3 `json:"s3"` +} + +type S3 struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/SSHService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SSHService.go new file mode 100644 index 000000000..eb97dfcb8 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SSHService.go @@ -0,0 +1,734 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type ResetSSHKeyForVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *ResetSSHKeyForVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keypair"]; found { + u.Set("keypair", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *ResetSSHKeyForVirtualMachineParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ResetSSHKeyForVirtualMachineParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ResetSSHKeyForVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ResetSSHKeyForVirtualMachineParams) SetKeypair(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keypair"] = v + return +} + +func (p *ResetSSHKeyForVirtualMachineParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new ResetSSHKeyForVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *SSHService) NewResetSSHKeyForVirtualMachineParams(id string, keypair string) *ResetSSHKeyForVirtualMachineParams { + p := &ResetSSHKeyForVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["keypair"] = keypair + return p +} + +// Resets the SSH Key for virtual machine. The virtual machine must be in a "Stopped" state. [async] +func (s *SSHService) ResetSSHKeyForVirtualMachine(p *ResetSSHKeyForVirtualMachineParams) (*ResetSSHKeyForVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("resetSSHKeyForVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ResetSSHKeyForVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ResetSSHKeyForVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RegisterSSHKeyPairParams struct { + p map[string]interface{} +} + +func (p *RegisterSSHKeyPairParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["publickey"]; found { + u.Set("publickey", v.(string)) + } + return u +} + +func (p *RegisterSSHKeyPairParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *RegisterSSHKeyPairParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *RegisterSSHKeyPairParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *RegisterSSHKeyPairParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *RegisterSSHKeyPairParams) SetPublickey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publickey"] = v + return +} + +// You should always use this function to get a new RegisterSSHKeyPairParams instance, +// as then you are sure you have configured all required params +func (s *SSHService) NewRegisterSSHKeyPairParams(name string, publickey string) *RegisterSSHKeyPairParams { + p := &RegisterSSHKeyPairParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["publickey"] = publickey + return p +} + +// Register a public key in a keypair under a certain name +func (s *SSHService) RegisterSSHKeyPair(p *RegisterSSHKeyPairParams) (*RegisterSSHKeyPairResponse, error) { + resp, err := s.cs.newRequest("registerSSHKeyPair", p.toURLValues()) + if err != nil { + return nil, err + } + + if resp, err = getRawValue(resp); err != nil { + return nil, err + } + + var r RegisterSSHKeyPairResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type RegisterSSHKeyPairResponse struct { + Fingerprint string `json:"fingerprint,omitempty"` + Name string `json:"name,omitempty"` +} + +type CreateSSHKeyPairParams struct { + p map[string]interface{} +} + +func (p *CreateSSHKeyPairParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *CreateSSHKeyPairParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateSSHKeyPairParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateSSHKeyPairParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateSSHKeyPairParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new CreateSSHKeyPairParams instance, +// as then you are sure you have configured all required params +func (s *SSHService) NewCreateSSHKeyPairParams(name string) *CreateSSHKeyPairParams { + p := &CreateSSHKeyPairParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + return p +} + +// Create a new keypair and returns the private key +func (s *SSHService) CreateSSHKeyPair(p *CreateSSHKeyPairParams) (*CreateSSHKeyPairResponse, error) { + resp, err := s.cs.newRequest("createSSHKeyPair", p.toURLValues()) + if err != nil { + return nil, err + } + + if resp, err = getRawValue(resp); err != nil { + return nil, err + } + + var r CreateSSHKeyPairResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateSSHKeyPairResponse struct { + Privatekey string `json:"privatekey,omitempty"` +} + +type DeleteSSHKeyPairParams struct { + p map[string]interface{} +} + +func (p *DeleteSSHKeyPairParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *DeleteSSHKeyPairParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DeleteSSHKeyPairParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DeleteSSHKeyPairParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *DeleteSSHKeyPairParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new DeleteSSHKeyPairParams instance, +// as then you are sure you have configured all required params +func (s *SSHService) NewDeleteSSHKeyPairParams(name string) *DeleteSSHKeyPairParams { + p := &DeleteSSHKeyPairParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + return p +} + +// Deletes a keypair by name +func (s *SSHService) DeleteSSHKeyPair(p *DeleteSSHKeyPairParams) (*DeleteSSHKeyPairResponse, error) { + resp, err := s.cs.newRequest("deleteSSHKeyPair", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteSSHKeyPairResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteSSHKeyPairResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListSSHKeyPairsParams struct { + p map[string]interface{} +} + +func (p *ListSSHKeyPairsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fingerprint"]; found { + u.Set("fingerprint", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *ListSSHKeyPairsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetFingerprint(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fingerprint"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListSSHKeyPairsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new ListSSHKeyPairsParams instance, +// as then you are sure you have configured all required params +func (s *SSHService) NewListSSHKeyPairsParams() *ListSSHKeyPairsParams { + p := &ListSSHKeyPairsParams{} + p.p = make(map[string]interface{}) + return p +} + +// List registered keypairs +func (s *SSHService) ListSSHKeyPairs(p *ListSSHKeyPairsParams) (*ListSSHKeyPairsResponse, error) { + resp, err := s.cs.newRequest("listSSHKeyPairs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSSHKeyPairsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSSHKeyPairsResponse struct { + Count int `json:"count"` + SSHKeyPairs []*SSHKeyPair `json:"sshkeypair"` +} + +type SSHKeyPair struct { + Fingerprint string `json:"fingerprint,omitempty"` + Name string `json:"name,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/SecurityGroupService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SecurityGroupService.go new file mode 100644 index 000000000..282cd64bd --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SecurityGroupService.go @@ -0,0 +1,1199 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateSecurityGroupParams struct { + p map[string]interface{} +} + +func (p *CreateSecurityGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *CreateSecurityGroupParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateSecurityGroupParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateSecurityGroupParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateSecurityGroupParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateSecurityGroupParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new CreateSecurityGroupParams instance, +// as then you are sure you have configured all required params +func (s *SecurityGroupService) NewCreateSecurityGroupParams(name string) *CreateSecurityGroupParams { + p := &CreateSecurityGroupParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + return p +} + +// Creates a security group +func (s *SecurityGroupService) CreateSecurityGroup(p *CreateSecurityGroupParams) (*CreateSecurityGroupResponse, error) { + resp, err := s.cs.newRequest("createSecurityGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateSecurityGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateSecurityGroupResponse struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type DeleteSecurityGroupParams struct { + p map[string]interface{} +} + +func (p *DeleteSecurityGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *DeleteSecurityGroupParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DeleteSecurityGroupParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DeleteSecurityGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DeleteSecurityGroupParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *DeleteSecurityGroupParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new DeleteSecurityGroupParams instance, +// as then you are sure you have configured all required params +func (s *SecurityGroupService) NewDeleteSecurityGroupParams() *DeleteSecurityGroupParams { + p := &DeleteSecurityGroupParams{} + p.p = make(map[string]interface{}) + return p +} + +// Deletes security group +func (s *SecurityGroupService) DeleteSecurityGroup(p *DeleteSecurityGroupParams) (*DeleteSecurityGroupResponse, error) { + resp, err := s.cs.newRequest("deleteSecurityGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteSecurityGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteSecurityGroupResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type AuthorizeSecurityGroupIngressParams struct { + p map[string]interface{} +} + +func (p *AuthorizeSecurityGroupIngressParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["endport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("endport", vv) + } + if v, found := p.p["icmpcode"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmpcode", vv) + } + if v, found := p.p["icmptype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmptype", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["securitygroupid"]; found { + u.Set("securitygroupid", v.(string)) + } + if v, found := p.p["securitygroupname"]; found { + u.Set("securitygroupname", v.(string)) + } + if v, found := p.p["startport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("startport", vv) + } + if v, found := p.p["usersecuritygrouplist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("usersecuritygrouplist[%d].key", i), k) + u.Set(fmt.Sprintf("usersecuritygrouplist[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *AuthorizeSecurityGroupIngressParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetEndport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endport"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetIcmpcode(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmpcode"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetIcmptype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmptype"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetSecuritygroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupid"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetSecuritygroupname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupname"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetStartport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startport"] = v + return +} + +func (p *AuthorizeSecurityGroupIngressParams) SetUsersecuritygrouplist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["usersecuritygrouplist"] = v + return +} + +// You should always use this function to get a new AuthorizeSecurityGroupIngressParams instance, +// as then you are sure you have configured all required params +func (s *SecurityGroupService) NewAuthorizeSecurityGroupIngressParams() *AuthorizeSecurityGroupIngressParams { + p := &AuthorizeSecurityGroupIngressParams{} + p.p = make(map[string]interface{}) + return p +} + +// Authorizes a particular ingress rule for this security group +func (s *SecurityGroupService) AuthorizeSecurityGroupIngress(p *AuthorizeSecurityGroupIngressParams) (*AuthorizeSecurityGroupIngressResponse, error) { + resp, err := s.cs.newRequest("authorizeSecurityGroupIngress", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AuthorizeSecurityGroupIngressResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AuthorizeSecurityGroupIngressResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type RevokeSecurityGroupIngressParams struct { + p map[string]interface{} +} + +func (p *RevokeSecurityGroupIngressParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RevokeSecurityGroupIngressParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RevokeSecurityGroupIngressParams instance, +// as then you are sure you have configured all required params +func (s *SecurityGroupService) NewRevokeSecurityGroupIngressParams(id string) *RevokeSecurityGroupIngressParams { + p := &RevokeSecurityGroupIngressParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a particular ingress rule from this security group +func (s *SecurityGroupService) RevokeSecurityGroupIngress(p *RevokeSecurityGroupIngressParams) (*RevokeSecurityGroupIngressResponse, error) { + resp, err := s.cs.newRequest("revokeSecurityGroupIngress", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RevokeSecurityGroupIngressResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RevokeSecurityGroupIngressResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type AuthorizeSecurityGroupEgressParams struct { + p map[string]interface{} +} + +func (p *AuthorizeSecurityGroupEgressParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["cidrlist"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("cidrlist", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["endport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("endport", vv) + } + if v, found := p.p["icmpcode"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmpcode", vv) + } + if v, found := p.p["icmptype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("icmptype", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["protocol"]; found { + u.Set("protocol", v.(string)) + } + if v, found := p.p["securitygroupid"]; found { + u.Set("securitygroupid", v.(string)) + } + if v, found := p.p["securitygroupname"]; found { + u.Set("securitygroupname", v.(string)) + } + if v, found := p.p["startport"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("startport", vv) + } + if v, found := p.p["usersecuritygrouplist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("usersecuritygrouplist[%d].key", i), k) + u.Set(fmt.Sprintf("usersecuritygrouplist[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *AuthorizeSecurityGroupEgressParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetCidrlist(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetEndport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endport"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetIcmpcode(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmpcode"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetIcmptype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["icmptype"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetProtocol(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["protocol"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetSecuritygroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupid"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetSecuritygroupname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupname"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetStartport(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startport"] = v + return +} + +func (p *AuthorizeSecurityGroupEgressParams) SetUsersecuritygrouplist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["usersecuritygrouplist"] = v + return +} + +// You should always use this function to get a new AuthorizeSecurityGroupEgressParams instance, +// as then you are sure you have configured all required params +func (s *SecurityGroupService) NewAuthorizeSecurityGroupEgressParams() *AuthorizeSecurityGroupEgressParams { + p := &AuthorizeSecurityGroupEgressParams{} + p.p = make(map[string]interface{}) + return p +} + +// Authorizes a particular egress rule for this security group +func (s *SecurityGroupService) AuthorizeSecurityGroupEgress(p *AuthorizeSecurityGroupEgressParams) (*AuthorizeSecurityGroupEgressResponse, error) { + resp, err := s.cs.newRequest("authorizeSecurityGroupEgress", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AuthorizeSecurityGroupEgressResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AuthorizeSecurityGroupEgressResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} + +type RevokeSecurityGroupEgressParams struct { + p map[string]interface{} +} + +func (p *RevokeSecurityGroupEgressParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RevokeSecurityGroupEgressParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RevokeSecurityGroupEgressParams instance, +// as then you are sure you have configured all required params +func (s *SecurityGroupService) NewRevokeSecurityGroupEgressParams(id string) *RevokeSecurityGroupEgressParams { + p := &RevokeSecurityGroupEgressParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a particular egress rule from this security group +func (s *SecurityGroupService) RevokeSecurityGroupEgress(p *RevokeSecurityGroupEgressParams) (*RevokeSecurityGroupEgressResponse, error) { + resp, err := s.cs.newRequest("revokeSecurityGroupEgress", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RevokeSecurityGroupEgressResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RevokeSecurityGroupEgressResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListSecurityGroupsParams struct { + p map[string]interface{} +} + +func (p *ListSecurityGroupsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["securitygroupname"]; found { + u.Set("securitygroupname", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *ListSecurityGroupsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListSecurityGroupsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListSecurityGroupsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListSecurityGroupsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListSecurityGroupsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSecurityGroupsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListSecurityGroupsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSecurityGroupsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListSecurityGroupsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListSecurityGroupsParams) SetSecuritygroupname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupname"] = v + return +} + +func (p *ListSecurityGroupsParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListSecurityGroupsParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new ListSecurityGroupsParams instance, +// as then you are sure you have configured all required params +func (s *SecurityGroupService) NewListSecurityGroupsParams() *ListSecurityGroupsParams { + p := &ListSecurityGroupsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SecurityGroupService) GetSecurityGroupID(keyword string) (string, error) { + p := &ListSecurityGroupsParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + + l, err := s.ListSecurityGroups(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListSecurityGroups(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.SecurityGroups[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.SecurityGroups { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SecurityGroupService) GetSecurityGroupByName(name string) (*SecurityGroup, int, error) { + id, err := s.GetSecurityGroupID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetSecurityGroupByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SecurityGroupService) GetSecurityGroupByID(id string) (*SecurityGroup, int, error) { + p := &ListSecurityGroupsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListSecurityGroups(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListSecurityGroups(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.SecurityGroups[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for SecurityGroup UUID: %s!", id) +} + +// Lists security groups +func (s *SecurityGroupService) ListSecurityGroups(p *ListSecurityGroupsParams) (*ListSecurityGroupsResponse, error) { + resp, err := s.cs.newRequest("listSecurityGroups", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSecurityGroupsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSecurityGroupsResponse struct { + Count int `json:"count"` + SecurityGroups []*SecurityGroup `json:"securitygroup"` +} + +type SecurityGroup struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ServiceOfferingService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ServiceOfferingService.go new file mode 100644 index 000000000..500064d8f --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ServiceOfferingService.go @@ -0,0 +1,803 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateServiceOfferingParams struct { + p map[string]interface{} +} + +func (p *CreateServiceOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["bytesreadrate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("bytesreadrate", vv) + } + if v, found := p.p["byteswriterate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("byteswriterate", vv) + } + if v, found := p.p["cpunumber"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("cpunumber", vv) + } + if v, found := p.p["cpuspeed"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("cpuspeed", vv) + } + if v, found := p.p["customizediops"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("customizediops", vv) + } + if v, found := p.p["deploymentplanner"]; found { + u.Set("deploymentplanner", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["hosttags"]; found { + u.Set("hosttags", v.(string)) + } + if v, found := p.p["hypervisorsnapshotreserve"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("hypervisorsnapshotreserve", vv) + } + if v, found := p.p["iopsreadrate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("iopsreadrate", vv) + } + if v, found := p.p["iopswriterate"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("iopswriterate", vv) + } + if v, found := p.p["issystem"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("issystem", vv) + } + if v, found := p.p["isvolatile"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isvolatile", vv) + } + if v, found := p.p["limitcpuuse"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("limitcpuuse", vv) + } + if v, found := p.p["maxiops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("maxiops", vv) + } + if v, found := p.p["memory"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("memory", vv) + } + if v, found := p.p["miniops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("miniops", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkrate"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("networkrate", vv) + } + if v, found := p.p["offerha"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("offerha", vv) + } + if v, found := p.p["serviceofferingdetails"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("serviceofferingdetails[%d].key", i), k) + u.Set(fmt.Sprintf("serviceofferingdetails[%d].value", i), vv) + i++ + } + } + if v, found := p.p["storagetype"]; found { + u.Set("storagetype", v.(string)) + } + if v, found := p.p["systemvmtype"]; found { + u.Set("systemvmtype", v.(string)) + } + if v, found := p.p["tags"]; found { + u.Set("tags", v.(string)) + } + return u +} + +func (p *CreateServiceOfferingParams) SetBytesreadrate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bytesreadrate"] = v + return +} + +func (p *CreateServiceOfferingParams) SetByteswriterate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["byteswriterate"] = v + return +} + +func (p *CreateServiceOfferingParams) SetCpunumber(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cpunumber"] = v + return +} + +func (p *CreateServiceOfferingParams) SetCpuspeed(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cpuspeed"] = v + return +} + +func (p *CreateServiceOfferingParams) SetCustomizediops(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customizediops"] = v + return +} + +func (p *CreateServiceOfferingParams) SetDeploymentplanner(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["deploymentplanner"] = v + return +} + +func (p *CreateServiceOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateServiceOfferingParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateServiceOfferingParams) SetHosttags(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hosttags"] = v + return +} + +func (p *CreateServiceOfferingParams) SetHypervisorsnapshotreserve(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisorsnapshotreserve"] = v + return +} + +func (p *CreateServiceOfferingParams) SetIopsreadrate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["iopsreadrate"] = v + return +} + +func (p *CreateServiceOfferingParams) SetIopswriterate(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["iopswriterate"] = v + return +} + +func (p *CreateServiceOfferingParams) SetIssystem(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["issystem"] = v + return +} + +func (p *CreateServiceOfferingParams) SetIsvolatile(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isvolatile"] = v + return +} + +func (p *CreateServiceOfferingParams) SetLimitcpuuse(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["limitcpuuse"] = v + return +} + +func (p *CreateServiceOfferingParams) SetMaxiops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxiops"] = v + return +} + +func (p *CreateServiceOfferingParams) SetMemory(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["memory"] = v + return +} + +func (p *CreateServiceOfferingParams) SetMiniops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["miniops"] = v + return +} + +func (p *CreateServiceOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateServiceOfferingParams) SetNetworkrate(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkrate"] = v + return +} + +func (p *CreateServiceOfferingParams) SetOfferha(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["offerha"] = v + return +} + +func (p *CreateServiceOfferingParams) SetServiceofferingdetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingdetails"] = v + return +} + +func (p *CreateServiceOfferingParams) SetStoragetype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storagetype"] = v + return +} + +func (p *CreateServiceOfferingParams) SetSystemvmtype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["systemvmtype"] = v + return +} + +func (p *CreateServiceOfferingParams) SetTags(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new CreateServiceOfferingParams instance, +// as then you are sure you have configured all required params +func (s *ServiceOfferingService) NewCreateServiceOfferingParams(displaytext string, name string) *CreateServiceOfferingParams { + p := &CreateServiceOfferingParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["name"] = name + return p +} + +// Creates a service offering. +func (s *ServiceOfferingService) CreateServiceOffering(p *CreateServiceOfferingParams) (*CreateServiceOfferingResponse, error) { + resp, err := s.cs.newRequest("createServiceOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + if resp, err = getRawValue(resp); err != nil { + return nil, err + } + + var r CreateServiceOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateServiceOfferingResponse struct { + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Created string `json:"created,omitempty"` + Defaultuse bool `json:"defaultuse,omitempty"` + Deploymentplanner string `json:"deploymentplanner,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisorsnapshotreserve int `json:"hypervisorsnapshotreserve,omitempty"` + Id string `json:"id,omitempty"` + Iscustomized bool `json:"iscustomized,omitempty"` + Iscustomizediops bool `json:"iscustomizediops,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Isvolatile bool `json:"isvolatile,omitempty"` + Limitcpuuse bool `json:"limitcpuuse,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Memory int `json:"memory,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Networkrate int `json:"networkrate,omitempty"` + Offerha bool `json:"offerha,omitempty"` + Serviceofferingdetails map[string]string `json:"serviceofferingdetails,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Tags string `json:"tags,omitempty"` +} + +type DeleteServiceOfferingParams struct { + p map[string]interface{} +} + +func (p *DeleteServiceOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteServiceOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteServiceOfferingParams instance, +// as then you are sure you have configured all required params +func (s *ServiceOfferingService) NewDeleteServiceOfferingParams(id string) *DeleteServiceOfferingParams { + p := &DeleteServiceOfferingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a service offering. +func (s *ServiceOfferingService) DeleteServiceOffering(p *DeleteServiceOfferingParams) (*DeleteServiceOfferingResponse, error) { + resp, err := s.cs.newRequest("deleteServiceOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteServiceOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteServiceOfferingResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type UpdateServiceOfferingParams struct { + p map[string]interface{} +} + +func (p *UpdateServiceOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["sortkey"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("sortkey", vv) + } + return u +} + +func (p *UpdateServiceOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateServiceOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateServiceOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateServiceOfferingParams) SetSortkey(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sortkey"] = v + return +} + +// You should always use this function to get a new UpdateServiceOfferingParams instance, +// as then you are sure you have configured all required params +func (s *ServiceOfferingService) NewUpdateServiceOfferingParams(id string) *UpdateServiceOfferingParams { + p := &UpdateServiceOfferingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a service offering. +func (s *ServiceOfferingService) UpdateServiceOffering(p *UpdateServiceOfferingParams) (*UpdateServiceOfferingResponse, error) { + resp, err := s.cs.newRequest("updateServiceOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateServiceOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateServiceOfferingResponse struct { + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Created string `json:"created,omitempty"` + Defaultuse bool `json:"defaultuse,omitempty"` + Deploymentplanner string `json:"deploymentplanner,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisorsnapshotreserve int `json:"hypervisorsnapshotreserve,omitempty"` + Id string `json:"id,omitempty"` + Iscustomized bool `json:"iscustomized,omitempty"` + Iscustomizediops bool `json:"iscustomizediops,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Isvolatile bool `json:"isvolatile,omitempty"` + Limitcpuuse bool `json:"limitcpuuse,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Memory int `json:"memory,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Networkrate int `json:"networkrate,omitempty"` + Offerha bool `json:"offerha,omitempty"` + Serviceofferingdetails map[string]string `json:"serviceofferingdetails,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Tags string `json:"tags,omitempty"` +} + +type ListServiceOfferingsParams struct { + p map[string]interface{} +} + +func (p *ListServiceOfferingsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["issystem"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("issystem", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["systemvmtype"]; found { + u.Set("systemvmtype", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *ListServiceOfferingsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListServiceOfferingsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListServiceOfferingsParams) SetIssystem(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["issystem"] = v + return +} + +func (p *ListServiceOfferingsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListServiceOfferingsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListServiceOfferingsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListServiceOfferingsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListServiceOfferingsParams) SetSystemvmtype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["systemvmtype"] = v + return +} + +func (p *ListServiceOfferingsParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new ListServiceOfferingsParams instance, +// as then you are sure you have configured all required params +func (s *ServiceOfferingService) NewListServiceOfferingsParams() *ListServiceOfferingsParams { + p := &ListServiceOfferingsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ServiceOfferingService) GetServiceOfferingID(name string) (string, error) { + p := &ListServiceOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListServiceOfferings(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.ServiceOfferings[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.ServiceOfferings { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ServiceOfferingService) GetServiceOfferingByName(name string) (*ServiceOffering, int, error) { + id, err := s.GetServiceOfferingID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetServiceOfferingByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ServiceOfferingService) GetServiceOfferingByID(id string) (*ServiceOffering, int, error) { + p := &ListServiceOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListServiceOfferings(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.ServiceOfferings[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for ServiceOffering UUID: %s!", id) +} + +// Lists all available service offerings. +func (s *ServiceOfferingService) ListServiceOfferings(p *ListServiceOfferingsParams) (*ListServiceOfferingsResponse, error) { + resp, err := s.cs.newRequest("listServiceOfferings", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListServiceOfferingsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListServiceOfferingsResponse struct { + Count int `json:"count"` + ServiceOfferings []*ServiceOffering `json:"serviceoffering"` +} + +type ServiceOffering struct { + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Created string `json:"created,omitempty"` + Defaultuse bool `json:"defaultuse,omitempty"` + Deploymentplanner string `json:"deploymentplanner,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hosttags string `json:"hosttags,omitempty"` + Hypervisorsnapshotreserve int `json:"hypervisorsnapshotreserve,omitempty"` + Id string `json:"id,omitempty"` + Iscustomized bool `json:"iscustomized,omitempty"` + Iscustomizediops bool `json:"iscustomizediops,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Isvolatile bool `json:"isvolatile,omitempty"` + Limitcpuuse bool `json:"limitcpuuse,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Memory int `json:"memory,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Networkrate int `json:"networkrate,omitempty"` + Offerha bool `json:"offerha,omitempty"` + Serviceofferingdetails map[string]string `json:"serviceofferingdetails,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Tags string `json:"tags,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/SnapshotService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SnapshotService.go new file mode 100644 index 000000000..0b23ad1e7 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SnapshotService.go @@ -0,0 +1,1784 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateSnapshotParams struct { + p map[string]interface{} +} + +func (p *CreateSnapshotParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["policyid"]; found { + u.Set("policyid", v.(string)) + } + if v, found := p.p["quiescevm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("quiescevm", vv) + } + if v, found := p.p["volumeid"]; found { + u.Set("volumeid", v.(string)) + } + return u +} + +func (p *CreateSnapshotParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateSnapshotParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateSnapshotParams) SetPolicyid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["policyid"] = v + return +} + +func (p *CreateSnapshotParams) SetQuiescevm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["quiescevm"] = v + return +} + +func (p *CreateSnapshotParams) SetVolumeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["volumeid"] = v + return +} + +// You should always use this function to get a new CreateSnapshotParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewCreateSnapshotParams(volumeid string) *CreateSnapshotParams { + p := &CreateSnapshotParams{} + p.p = make(map[string]interface{}) + p.p["volumeid"] = volumeid + return p +} + +// Creates an instant snapshot of a volume. +func (s *SnapshotService) CreateSnapshot(p *CreateSnapshotParams) (*CreateSnapshotResponse, error) { + resp, err := s.cs.newRequest("createSnapshot", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateSnapshotResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateSnapshotResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Intervaltype string `json:"intervaltype,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Revertable bool `json:"revertable,omitempty"` + Snapshottype string `json:"snapshottype,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Volumeid string `json:"volumeid,omitempty"` + Volumename string `json:"volumename,omitempty"` + Volumetype string `json:"volumetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListSnapshotsParams struct { + p map[string]interface{} +} + +func (p *ListSnapshotsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["intervaltype"]; found { + u.Set("intervaltype", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["snapshottype"]; found { + u.Set("snapshottype", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["volumeid"]; found { + u.Set("volumeid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListSnapshotsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListSnapshotsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListSnapshotsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListSnapshotsParams) SetIntervaltype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["intervaltype"] = v + return +} + +func (p *ListSnapshotsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListSnapshotsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSnapshotsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListSnapshotsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListSnapshotsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSnapshotsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListSnapshotsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListSnapshotsParams) SetSnapshottype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["snapshottype"] = v + return +} + +func (p *ListSnapshotsParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListSnapshotsParams) SetVolumeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["volumeid"] = v + return +} + +func (p *ListSnapshotsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListSnapshotsParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewListSnapshotsParams() *ListSnapshotsParams { + p := &ListSnapshotsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SnapshotService) GetSnapshotID(name string) (string, error) { + p := &ListSnapshotsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListSnapshots(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListSnapshots(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Snapshots[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Snapshots { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SnapshotService) GetSnapshotByName(name string) (*Snapshot, int, error) { + id, err := s.GetSnapshotID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetSnapshotByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SnapshotService) GetSnapshotByID(id string) (*Snapshot, int, error) { + p := &ListSnapshotsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListSnapshots(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListSnapshots(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Snapshots[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Snapshot UUID: %s!", id) +} + +// Lists all available snapshots for the account. +func (s *SnapshotService) ListSnapshots(p *ListSnapshotsParams) (*ListSnapshotsResponse, error) { + resp, err := s.cs.newRequest("listSnapshots", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSnapshotsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSnapshotsResponse struct { + Count int `json:"count"` + Snapshots []*Snapshot `json:"snapshot"` +} + +type Snapshot struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Intervaltype string `json:"intervaltype,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Revertable bool `json:"revertable,omitempty"` + Snapshottype string `json:"snapshottype,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Volumeid string `json:"volumeid,omitempty"` + Volumename string `json:"volumename,omitempty"` + Volumetype string `json:"volumetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteSnapshotParams struct { + p map[string]interface{} +} + +func (p *DeleteSnapshotParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteSnapshotParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteSnapshotParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewDeleteSnapshotParams(id string) *DeleteSnapshotParams { + p := &DeleteSnapshotParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a snapshot of a disk volume. +func (s *SnapshotService) DeleteSnapshot(p *DeleteSnapshotParams) (*DeleteSnapshotResponse, error) { + resp, err := s.cs.newRequest("deleteSnapshot", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteSnapshotResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteSnapshotResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type CreateSnapshotPolicyParams struct { + p map[string]interface{} +} + +func (p *CreateSnapshotPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["intervaltype"]; found { + u.Set("intervaltype", v.(string)) + } + if v, found := p.p["maxsnaps"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("maxsnaps", vv) + } + if v, found := p.p["schedule"]; found { + u.Set("schedule", v.(string)) + } + if v, found := p.p["timezone"]; found { + u.Set("timezone", v.(string)) + } + if v, found := p.p["volumeid"]; found { + u.Set("volumeid", v.(string)) + } + return u +} + +func (p *CreateSnapshotPolicyParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateSnapshotPolicyParams) SetIntervaltype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["intervaltype"] = v + return +} + +func (p *CreateSnapshotPolicyParams) SetMaxsnaps(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxsnaps"] = v + return +} + +func (p *CreateSnapshotPolicyParams) SetSchedule(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["schedule"] = v + return +} + +func (p *CreateSnapshotPolicyParams) SetTimezone(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["timezone"] = v + return +} + +func (p *CreateSnapshotPolicyParams) SetVolumeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["volumeid"] = v + return +} + +// You should always use this function to get a new CreateSnapshotPolicyParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewCreateSnapshotPolicyParams(intervaltype string, maxsnaps int, schedule string, timezone string, volumeid string) *CreateSnapshotPolicyParams { + p := &CreateSnapshotPolicyParams{} + p.p = make(map[string]interface{}) + p.p["intervaltype"] = intervaltype + p.p["maxsnaps"] = maxsnaps + p.p["schedule"] = schedule + p.p["timezone"] = timezone + p.p["volumeid"] = volumeid + return p +} + +// Creates a snapshot policy for the account. +func (s *SnapshotService) CreateSnapshotPolicy(p *CreateSnapshotPolicyParams) (*CreateSnapshotPolicyResponse, error) { + resp, err := s.cs.newRequest("createSnapshotPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateSnapshotPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateSnapshotPolicyResponse struct { + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Intervaltype int `json:"intervaltype,omitempty"` + Maxsnaps int `json:"maxsnaps,omitempty"` + Schedule string `json:"schedule,omitempty"` + Timezone string `json:"timezone,omitempty"` + Volumeid string `json:"volumeid,omitempty"` +} + +type UpdateSnapshotPolicyParams struct { + p map[string]interface{} +} + +func (p *UpdateSnapshotPolicyParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateSnapshotPolicyParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateSnapshotPolicyParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateSnapshotPolicyParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateSnapshotPolicyParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewUpdateSnapshotPolicyParams() *UpdateSnapshotPolicyParams { + p := &UpdateSnapshotPolicyParams{} + p.p = make(map[string]interface{}) + return p +} + +// Updates the snapshot policy. +func (s *SnapshotService) UpdateSnapshotPolicy(p *UpdateSnapshotPolicyParams) (*UpdateSnapshotPolicyResponse, error) { + resp, err := s.cs.newRequest("updateSnapshotPolicy", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateSnapshotPolicyResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateSnapshotPolicyResponse struct { + JobID string `json:"jobid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Intervaltype int `json:"intervaltype,omitempty"` + Maxsnaps int `json:"maxsnaps,omitempty"` + Schedule string `json:"schedule,omitempty"` + Timezone string `json:"timezone,omitempty"` + Volumeid string `json:"volumeid,omitempty"` +} + +type DeleteSnapshotPoliciesParams struct { + p map[string]interface{} +} + +func (p *DeleteSnapshotPoliciesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("ids", vv) + } + return u +} + +func (p *DeleteSnapshotPoliciesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DeleteSnapshotPoliciesParams) SetIds(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ids"] = v + return +} + +// You should always use this function to get a new DeleteSnapshotPoliciesParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewDeleteSnapshotPoliciesParams() *DeleteSnapshotPoliciesParams { + p := &DeleteSnapshotPoliciesParams{} + p.p = make(map[string]interface{}) + return p +} + +// Deletes snapshot policies for the account. +func (s *SnapshotService) DeleteSnapshotPolicies(p *DeleteSnapshotPoliciesParams) (*DeleteSnapshotPoliciesResponse, error) { + resp, err := s.cs.newRequest("deleteSnapshotPolicies", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteSnapshotPoliciesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteSnapshotPoliciesResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListSnapshotPoliciesParams struct { + p map[string]interface{} +} + +func (p *ListSnapshotPoliciesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["volumeid"]; found { + u.Set("volumeid", v.(string)) + } + return u +} + +func (p *ListSnapshotPoliciesParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListSnapshotPoliciesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListSnapshotPoliciesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSnapshotPoliciesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSnapshotPoliciesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListSnapshotPoliciesParams) SetVolumeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["volumeid"] = v + return +} + +// You should always use this function to get a new ListSnapshotPoliciesParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewListSnapshotPoliciesParams() *ListSnapshotPoliciesParams { + p := &ListSnapshotPoliciesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SnapshotService) GetSnapshotPolicyByID(id string) (*SnapshotPolicy, int, error) { + p := &ListSnapshotPoliciesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListSnapshotPolicies(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.SnapshotPolicies[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for SnapshotPolicy UUID: %s!", id) +} + +// Lists snapshot policies. +func (s *SnapshotService) ListSnapshotPolicies(p *ListSnapshotPoliciesParams) (*ListSnapshotPoliciesResponse, error) { + resp, err := s.cs.newRequest("listSnapshotPolicies", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSnapshotPoliciesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSnapshotPoliciesResponse struct { + Count int `json:"count"` + SnapshotPolicies []*SnapshotPolicy `json:"snapshotpolicy"` +} + +type SnapshotPolicy struct { + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Intervaltype int `json:"intervaltype,omitempty"` + Maxsnaps int `json:"maxsnaps,omitempty"` + Schedule string `json:"schedule,omitempty"` + Timezone string `json:"timezone,omitempty"` + Volumeid string `json:"volumeid,omitempty"` +} + +type RevertSnapshotParams struct { + p map[string]interface{} +} + +func (p *RevertSnapshotParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RevertSnapshotParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RevertSnapshotParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewRevertSnapshotParams(id string) *RevertSnapshotParams { + p := &RevertSnapshotParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// revert a volume snapshot. +func (s *SnapshotService) RevertSnapshot(p *RevertSnapshotParams) (*RevertSnapshotResponse, error) { + resp, err := s.cs.newRequest("revertSnapshot", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RevertSnapshotResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RevertSnapshotResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Intervaltype string `json:"intervaltype,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Revertable bool `json:"revertable,omitempty"` + Snapshottype string `json:"snapshottype,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Volumeid string `json:"volumeid,omitempty"` + Volumename string `json:"volumename,omitempty"` + Volumetype string `json:"volumetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListVMSnapshotParams struct { + p map[string]interface{} +} + +func (p *ListVMSnapshotParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["vmsnapshotid"]; found { + u.Set("vmsnapshotid", v.(string)) + } + return u +} + +func (p *ListVMSnapshotParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVMSnapshotParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVMSnapshotParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVMSnapshotParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVMSnapshotParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVMSnapshotParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListVMSnapshotParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVMSnapshotParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVMSnapshotParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVMSnapshotParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListVMSnapshotParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListVMSnapshotParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *ListVMSnapshotParams) SetVmsnapshotid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmsnapshotid"] = v + return +} + +// You should always use this function to get a new ListVMSnapshotParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewListVMSnapshotParams() *ListVMSnapshotParams { + p := &ListVMSnapshotParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SnapshotService) GetVMSnapshotID(name string) (string, error) { + p := &ListVMSnapshotParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListVMSnapshot(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVMSnapshot(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.VMSnapshot[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.VMSnapshot { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// List virtual machine snapshot by conditions +func (s *SnapshotService) ListVMSnapshot(p *ListVMSnapshotParams) (*ListVMSnapshotResponse, error) { + resp, err := s.cs.newRequest("listVMSnapshot", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVMSnapshotResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVMSnapshotResponse struct { + Count int `json:"count"` + VMSnapshot []*VMSnapshot `json:"vmsnapshot"` +} + +type VMSnapshot struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Current bool `json:"current,omitempty"` + Description string `json:"description,omitempty"` + Displayname string `json:"displayname,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` + ParentName string `json:"parentName,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type CreateVMSnapshotParams struct { + p map[string]interface{} +} + +func (p *CreateVMSnapshotParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["description"]; found { + u.Set("description", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["quiescevm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("quiescevm", vv) + } + if v, found := p.p["snapshotmemory"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("snapshotmemory", vv) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *CreateVMSnapshotParams) SetDescription(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["description"] = v + return +} + +func (p *CreateVMSnapshotParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateVMSnapshotParams) SetQuiescevm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["quiescevm"] = v + return +} + +func (p *CreateVMSnapshotParams) SetSnapshotmemory(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["snapshotmemory"] = v + return +} + +func (p *CreateVMSnapshotParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new CreateVMSnapshotParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewCreateVMSnapshotParams(virtualmachineid string) *CreateVMSnapshotParams { + p := &CreateVMSnapshotParams{} + p.p = make(map[string]interface{}) + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Creates snapshot for a vm. +func (s *SnapshotService) CreateVMSnapshot(p *CreateVMSnapshotParams) (*CreateVMSnapshotResponse, error) { + resp, err := s.cs.newRequest("createVMSnapshot", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVMSnapshotResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVMSnapshotResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Current bool `json:"current,omitempty"` + Description string `json:"description,omitempty"` + Displayname string `json:"displayname,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` + ParentName string `json:"parentName,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteVMSnapshotParams struct { + p map[string]interface{} +} + +func (p *DeleteVMSnapshotParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["vmsnapshotid"]; found { + u.Set("vmsnapshotid", v.(string)) + } + return u +} + +func (p *DeleteVMSnapshotParams) SetVmsnapshotid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmsnapshotid"] = v + return +} + +// You should always use this function to get a new DeleteVMSnapshotParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewDeleteVMSnapshotParams(vmsnapshotid string) *DeleteVMSnapshotParams { + p := &DeleteVMSnapshotParams{} + p.p = make(map[string]interface{}) + p.p["vmsnapshotid"] = vmsnapshotid + return p +} + +// Deletes a vmsnapshot. +func (s *SnapshotService) DeleteVMSnapshot(p *DeleteVMSnapshotParams) (*DeleteVMSnapshotResponse, error) { + resp, err := s.cs.newRequest("deleteVMSnapshot", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVMSnapshotResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteVMSnapshotResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type RevertToVMSnapshotParams struct { + p map[string]interface{} +} + +func (p *RevertToVMSnapshotParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["vmsnapshotid"]; found { + u.Set("vmsnapshotid", v.(string)) + } + return u +} + +func (p *RevertToVMSnapshotParams) SetVmsnapshotid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmsnapshotid"] = v + return +} + +// You should always use this function to get a new RevertToVMSnapshotParams instance, +// as then you are sure you have configured all required params +func (s *SnapshotService) NewRevertToVMSnapshotParams(vmsnapshotid string) *RevertToVMSnapshotParams { + p := &RevertToVMSnapshotParams{} + p.p = make(map[string]interface{}) + p.p["vmsnapshotid"] = vmsnapshotid + return p +} + +// Revert VM from a vmsnapshot. +func (s *SnapshotService) RevertToVMSnapshot(p *RevertToVMSnapshotParams) (*RevertToVMSnapshotResponse, error) { + resp, err := s.cs.newRequest("revertToVMSnapshot", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RevertToVMSnapshotResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RevertToVMSnapshotResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/StoragePoolService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/StoragePoolService.go new file mode 100644 index 000000000..e5a0f6aba --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/StoragePoolService.go @@ -0,0 +1,300 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type ListStorageProvidersParams struct { + p map[string]interface{} +} + +func (p *ListStorageProvidersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + return u +} + +func (p *ListStorageProvidersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListStorageProvidersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListStorageProvidersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListStorageProvidersParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storagePoolType"] = v + return +} + +// You should always use this function to get a new ListStorageProvidersParams instance, +// as then you are sure you have configured all required params +func (s *StoragePoolService) NewListStorageProvidersParams(storagePoolType string) *ListStorageProvidersParams { + p := &ListStorageProvidersParams{} + p.p = make(map[string]interface{}) + p.p["storagePoolType"] = storagePoolType + return p +} + +// Lists storage providers. +func (s *StoragePoolService) ListStorageProviders(p *ListStorageProvidersParams) (*ListStorageProvidersResponse, error) { + resp, err := s.cs.newRequest("listStorageProviders", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListStorageProvidersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListStorageProvidersResponse struct { + Count int `json:"count"` + StorageProviders []*StorageProvider `json:"storageprovider"` +} + +type StorageProvider struct { + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` +} + +type EnableStorageMaintenanceParams struct { + p map[string]interface{} +} + +func (p *EnableStorageMaintenanceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *EnableStorageMaintenanceParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new EnableStorageMaintenanceParams instance, +// as then you are sure you have configured all required params +func (s *StoragePoolService) NewEnableStorageMaintenanceParams(id string) *EnableStorageMaintenanceParams { + p := &EnableStorageMaintenanceParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Puts storage pool into maintenance state +func (s *StoragePoolService) EnableStorageMaintenance(p *EnableStorageMaintenanceParams) (*EnableStorageMaintenanceResponse, error) { + resp, err := s.cs.newRequest("enableStorageMaintenance", p.toURLValues()) + if err != nil { + return nil, err + } + + var r EnableStorageMaintenanceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type EnableStorageMaintenanceResponse struct { + JobID string `json:"jobid,omitempty"` + Capacityiops int64 `json:"capacityiops,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Created string `json:"created,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Disksizeused int64 `json:"disksizeused,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + Overprovisionfactor string `json:"overprovisionfactor,omitempty"` + Path string `json:"path,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Scope string `json:"scope,omitempty"` + State string `json:"state,omitempty"` + Storagecapabilities map[string]string `json:"storagecapabilities,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Tags string `json:"tags,omitempty"` + Type string `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type CancelStorageMaintenanceParams struct { + p map[string]interface{} +} + +func (p *CancelStorageMaintenanceParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *CancelStorageMaintenanceParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new CancelStorageMaintenanceParams instance, +// as then you are sure you have configured all required params +func (s *StoragePoolService) NewCancelStorageMaintenanceParams(id string) *CancelStorageMaintenanceParams { + p := &CancelStorageMaintenanceParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Cancels maintenance for primary storage +func (s *StoragePoolService) CancelStorageMaintenance(p *CancelStorageMaintenanceParams) (*CancelStorageMaintenanceResponse, error) { + resp, err := s.cs.newRequest("cancelStorageMaintenance", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CancelStorageMaintenanceResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CancelStorageMaintenanceResponse struct { + JobID string `json:"jobid,omitempty"` + Capacityiops int64 `json:"capacityiops,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Created string `json:"created,omitempty"` + Disksizeallocated int64 `json:"disksizeallocated,omitempty"` + Disksizetotal int64 `json:"disksizetotal,omitempty"` + Disksizeused int64 `json:"disksizeused,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Name string `json:"name,omitempty"` + Overprovisionfactor string `json:"overprovisionfactor,omitempty"` + Path string `json:"path,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Scope string `json:"scope,omitempty"` + State string `json:"state,omitempty"` + Storagecapabilities map[string]string `json:"storagecapabilities,omitempty"` + Suitableformigration bool `json:"suitableformigration,omitempty"` + Tags string `json:"tags,omitempty"` + Type string `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/StratosphereSSPService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/StratosphereSSPService.go new file mode 100644 index 000000000..4d63b8f57 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/StratosphereSSPService.go @@ -0,0 +1,132 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" +) + +type AddStratosphereSspParams struct { + p map[string]interface{} +} + +func (p *AddStratosphereSspParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["tenantuuid"]; found { + u.Set("tenantuuid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddStratosphereSspParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *AddStratosphereSspParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddStratosphereSspParams) SetTenantuuid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tenantuuid"] = v + return +} + +func (p *AddStratosphereSspParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddStratosphereSspParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +func (p *AddStratosphereSspParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddStratosphereSspParams instance, +// as then you are sure you have configured all required params +func (s *StratosphereSSPService) NewAddStratosphereSspParams(name string, url string, zoneid string) *AddStratosphereSspParams { + p := &AddStratosphereSspParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + p.p["url"] = url + p.p["zoneid"] = zoneid + return p +} + +// Adds stratosphere ssp server +func (s *StratosphereSSPService) AddStratosphereSsp(p *AddStratosphereSspParams) (*AddStratosphereSspResponse, error) { + resp, err := s.cs.newRequest("addStratosphereSsp", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddStratosphereSspResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddStratosphereSspResponse struct { + Hostid string `json:"hostid,omitempty"` + Name string `json:"name,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/SwiftService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SwiftService.go new file mode 100644 index 000000000..f1c743365 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SwiftService.go @@ -0,0 +1,243 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +type AddSwiftParams struct { + p map[string]interface{} +} + +func (p *AddSwiftParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["key"]; found { + u.Set("key", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddSwiftParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *AddSwiftParams) SetKey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["key"] = v + return +} + +func (p *AddSwiftParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddSwiftParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddSwiftParams instance, +// as then you are sure you have configured all required params +func (s *SwiftService) NewAddSwiftParams(url string) *AddSwiftParams { + p := &AddSwiftParams{} + p.p = make(map[string]interface{}) + p.p["url"] = url + return p +} + +// Adds Swift. +func (s *SwiftService) AddSwift(p *AddSwiftParams) (*AddSwiftResponse, error) { + resp, err := s.cs.newRequest("addSwift", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddSwiftResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddSwiftResponse struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListSwiftsParams struct { + p map[string]interface{} +} + +func (p *ListSwiftsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("id", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListSwiftsParams) SetId(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListSwiftsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSwiftsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSwiftsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListSwiftsParams instance, +// as then you are sure you have configured all required params +func (s *SwiftService) NewListSwiftsParams() *ListSwiftsParams { + p := &ListSwiftsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SwiftService) GetSwiftID(keyword string) (string, error) { + p := &ListSwiftsParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + + l, err := s.ListSwifts(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.Swifts[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Swifts { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// List Swift. +func (s *SwiftService) ListSwifts(p *ListSwiftsParams) (*ListSwiftsResponse, error) { + resp, err := s.cs.newRequest("listSwifts", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSwiftsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSwiftsResponse struct { + Count int `json:"count"` + Swifts []*Swift `json:"swift"` +} + +type Swift struct { + Details []string `json:"details,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + Providername string `json:"providername,omitempty"` + Scope string `json:"scope,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemCapacityService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemCapacityService.go new file mode 100644 index 000000000..fea76c8ab --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemCapacityService.go @@ -0,0 +1,178 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "net/url" + "strconv" +) + +type ListCapacityParams struct { + p map[string]interface{} +} + +func (p *ListCapacityParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["fetchlatest"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fetchlatest", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["sortby"]; found { + u.Set("sortby", v.(string)) + } + if v, found := p.p["type"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("type", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListCapacityParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *ListCapacityParams) SetFetchlatest(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fetchlatest"] = v + return +} + +func (p *ListCapacityParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListCapacityParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListCapacityParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListCapacityParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListCapacityParams) SetSortby(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sortby"] = v + return +} + +func (p *ListCapacityParams) SetType(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["systemCapacityType"] = v + return +} + +func (p *ListCapacityParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListCapacityParams instance, +// as then you are sure you have configured all required params +func (s *SystemCapacityService) NewListCapacityParams() *ListCapacityParams { + p := &ListCapacityParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists all the system wide capacities. +func (s *SystemCapacityService) ListCapacity(p *ListCapacityParams) (*ListCapacityResponse, error) { + resp, err := s.cs.newRequest("listCapacity", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListCapacityResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListCapacityResponse struct { + Count int `json:"count"` + Capacity []*Capacity `json:"capacity"` +} + +type Capacity struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemVMService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemVMService.go new file mode 100644 index 000000000..35369654e --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/SystemVMService.go @@ -0,0 +1,1026 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type StartSystemVmParams struct { + p map[string]interface{} +} + +func (p *StartSystemVmParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StartSystemVmParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StartSystemVmParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewStartSystemVmParams(id string) *StartSystemVmParams { + p := &StartSystemVmParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Starts a system virtual machine. +func (s *SystemVMService) StartSystemVm(p *StartSystemVmParams) (*StartSystemVmResponse, error) { + resp, err := s.cs.newRequest("startSystemVm", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StartSystemVmResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StartSystemVmResponse struct { + JobID string `json:"jobid,omitempty"` + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RebootSystemVmParams struct { + p map[string]interface{} +} + +func (p *RebootSystemVmParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RebootSystemVmParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RebootSystemVmParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewRebootSystemVmParams(id string) *RebootSystemVmParams { + p := &RebootSystemVmParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Reboots a system VM. +func (s *SystemVMService) RebootSystemVm(p *RebootSystemVmParams) (*RebootSystemVmResponse, error) { + resp, err := s.cs.newRequest("rebootSystemVm", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RebootSystemVmResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RebootSystemVmResponse struct { + JobID string `json:"jobid,omitempty"` + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type StopSystemVmParams struct { + p map[string]interface{} +} + +func (p *StopSystemVmParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["forced"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forced", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StopSystemVmParams) SetForced(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forced"] = v + return +} + +func (p *StopSystemVmParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StopSystemVmParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewStopSystemVmParams(id string) *StopSystemVmParams { + p := &StopSystemVmParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Stops a system VM. +func (s *SystemVMService) StopSystemVm(p *StopSystemVmParams) (*StopSystemVmResponse, error) { + resp, err := s.cs.newRequest("stopSystemVm", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StopSystemVmResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StopSystemVmResponse struct { + JobID string `json:"jobid,omitempty"` + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DestroySystemVmParams struct { + p map[string]interface{} +} + +func (p *DestroySystemVmParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DestroySystemVmParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DestroySystemVmParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewDestroySystemVmParams(id string) *DestroySystemVmParams { + p := &DestroySystemVmParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Destroyes a system virtual machine. +func (s *SystemVMService) DestroySystemVm(p *DestroySystemVmParams) (*DestroySystemVmResponse, error) { + resp, err := s.cs.newRequest("destroySystemVm", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DestroySystemVmResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DestroySystemVmResponse struct { + JobID string `json:"jobid,omitempty"` + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListSystemVmsParams struct { + p map[string]interface{} +} + +func (p *ListSystemVmsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["systemvmtype"]; found { + u.Set("systemvmtype", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListSystemVmsParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *ListSystemVmsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListSystemVmsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListSystemVmsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListSystemVmsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListSystemVmsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListSystemVmsParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListSystemVmsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListSystemVmsParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +func (p *ListSystemVmsParams) SetSystemvmtype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["systemvmtype"] = v + return +} + +func (p *ListSystemVmsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListSystemVmsParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewListSystemVmsParams() *ListSystemVmsParams { + p := &ListSystemVmsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SystemVMService) GetSystemVmID(name string) (string, error) { + p := &ListSystemVmsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListSystemVms(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.SystemVms[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.SystemVms { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SystemVMService) GetSystemVmByName(name string) (*SystemVm, int, error) { + id, err := s.GetSystemVmID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetSystemVmByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *SystemVMService) GetSystemVmByID(id string) (*SystemVm, int, error) { + p := &ListSystemVmsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListSystemVms(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.SystemVms[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for SystemVm UUID: %s!", id) +} + +// List system virtual machines. +func (s *SystemVMService) ListSystemVms(p *ListSystemVmsParams) (*ListSystemVmsResponse, error) { + resp, err := s.cs.newRequest("listSystemVms", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListSystemVmsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListSystemVmsResponse struct { + Count int `json:"count"` + SystemVms []*SystemVm `json:"systemvm"` +} + +type SystemVm struct { + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type MigrateSystemVmParams struct { + p map[string]interface{} +} + +func (p *MigrateSystemVmParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *MigrateSystemVmParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *MigrateSystemVmParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new MigrateSystemVmParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewMigrateSystemVmParams(hostid string, virtualmachineid string) *MigrateSystemVmParams { + p := &MigrateSystemVmParams{} + p.p = make(map[string]interface{}) + p.p["hostid"] = hostid + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Attempts Migration of a system virtual machine to the host specified. +func (s *SystemVMService) MigrateSystemVm(p *MigrateSystemVmParams) (*MigrateSystemVmResponse, error) { + resp, err := s.cs.newRequest("migrateSystemVm", p.toURLValues()) + if err != nil { + return nil, err + } + + var r MigrateSystemVmResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type MigrateSystemVmResponse struct { + JobID string `json:"jobid,omitempty"` + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ChangeServiceForSystemVmParams struct { + p map[string]interface{} +} + +func (p *ChangeServiceForSystemVmParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + return u +} + +func (p *ChangeServiceForSystemVmParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *ChangeServiceForSystemVmParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ChangeServiceForSystemVmParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +// You should always use this function to get a new ChangeServiceForSystemVmParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewChangeServiceForSystemVmParams(id string, serviceofferingid string) *ChangeServiceForSystemVmParams { + p := &ChangeServiceForSystemVmParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["serviceofferingid"] = serviceofferingid + return p +} + +// Changes the service offering for a system vm (console proxy or secondary storage). The system vm must be in a "Stopped" state for this command to take effect. +func (s *SystemVMService) ChangeServiceForSystemVm(p *ChangeServiceForSystemVmParams) (*ChangeServiceForSystemVmResponse, error) { + resp, err := s.cs.newRequest("changeServiceForSystemVm", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ChangeServiceForSystemVmResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ChangeServiceForSystemVmResponse struct { + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ScaleSystemVmParams struct { + p map[string]interface{} +} + +func (p *ScaleSystemVmParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + return u +} + +func (p *ScaleSystemVmParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *ScaleSystemVmParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ScaleSystemVmParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +// You should always use this function to get a new ScaleSystemVmParams instance, +// as then you are sure you have configured all required params +func (s *SystemVMService) NewScaleSystemVmParams(id string, serviceofferingid string) *ScaleSystemVmParams { + p := &ScaleSystemVmParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["serviceofferingid"] = serviceofferingid + return p +} + +// Scale the service offering for a system vm (console proxy or secondary storage). The system vm must be in a "Stopped" state for this command to take effect. +func (s *SystemVMService) ScaleSystemVm(p *ScaleSystemVmParams) (*ScaleSystemVmResponse, error) { + resp, err := s.cs.newRequest("scaleSystemVm", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ScaleSystemVmResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ScaleSystemVmResponse struct { + JobID string `json:"jobid,omitempty"` + Activeviewersessions int `json:"activeviewersessions,omitempty"` + Created string `json:"created,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Gateway string `json:"gateway,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Id string `json:"id,omitempty"` + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` + Linklocalip string `json:"linklocalip,omitempty"` + Linklocalmacaddress string `json:"linklocalmacaddress,omitempty"` + Linklocalnetmask string `json:"linklocalnetmask,omitempty"` + Name string `json:"name,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Podid string `json:"podid,omitempty"` + Privateip string `json:"privateip,omitempty"` + Privatemacaddress string `json:"privatemacaddress,omitempty"` + Privatenetmask string `json:"privatenetmask,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicmacaddress string `json:"publicmacaddress,omitempty"` + Publicnetmask string `json:"publicnetmask,omitempty"` + State string `json:"state,omitempty"` + Systemvmtype string `json:"systemvmtype,omitempty"` + Templateid string `json:"templateid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/TemplateService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/TemplateService.go new file mode 100644 index 000000000..860e59203 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/TemplateService.go @@ -0,0 +1,1999 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateTemplateParams struct { + p map[string]interface{} +} + +func (p *CreateTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["bits"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("bits", vv) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["isdynamicallyscalable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdynamicallyscalable", vv) + } + if v, found := p.p["isfeatured"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isfeatured", vv) + } + if v, found := p.p["ispublic"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispublic", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + if v, found := p.p["passwordenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("passwordenabled", vv) + } + if v, found := p.p["requireshvm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("requireshvm", vv) + } + if v, found := p.p["snapshotid"]; found { + u.Set("snapshotid", v.(string)) + } + if v, found := p.p["templatetag"]; found { + u.Set("templatetag", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["volumeid"]; found { + u.Set("volumeid", v.(string)) + } + return u +} + +func (p *CreateTemplateParams) SetBits(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bits"] = v + return +} + +func (p *CreateTemplateParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *CreateTemplateParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateTemplateParams) SetIsdynamicallyscalable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdynamicallyscalable"] = v + return +} + +func (p *CreateTemplateParams) SetIsfeatured(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isfeatured"] = v + return +} + +func (p *CreateTemplateParams) SetIspublic(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispublic"] = v + return +} + +func (p *CreateTemplateParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateTemplateParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +func (p *CreateTemplateParams) SetPasswordenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["passwordenabled"] = v + return +} + +func (p *CreateTemplateParams) SetRequireshvm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["requireshvm"] = v + return +} + +func (p *CreateTemplateParams) SetSnapshotid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["snapshotid"] = v + return +} + +func (p *CreateTemplateParams) SetTemplatetag(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templatetag"] = v + return +} + +func (p *CreateTemplateParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *CreateTemplateParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *CreateTemplateParams) SetVolumeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["volumeid"] = v + return +} + +// You should always use this function to get a new CreateTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewCreateTemplateParams(displaytext string, name string, ostypeid string) *CreateTemplateParams { + p := &CreateTemplateParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["name"] = name + p.p["ostypeid"] = ostypeid + return p +} + +// Creates a template of a virtual machine. The virtual machine must be in a STOPPED state. A template created from this command is automatically designated as a private template visible to the account that created it. +func (s *TemplateService) CreateTemplate(p *CreateTemplateParams) (*CreateTemplateResponse, error) { + resp, err := s.cs.newRequest("createTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateTemplateResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RegisterTemplateParams struct { + p map[string]interface{} +} + +func (p *RegisterTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["bits"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("bits", vv) + } + if v, found := p.p["checksum"]; found { + u.Set("checksum", v.(string)) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["format"]; found { + u.Set("format", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["isdynamicallyscalable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdynamicallyscalable", vv) + } + if v, found := p.p["isextractable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isextractable", vv) + } + if v, found := p.p["isfeatured"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isfeatured", vv) + } + if v, found := p.p["ispublic"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispublic", vv) + } + if v, found := p.p["isrouting"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrouting", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + if v, found := p.p["passwordenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("passwordenabled", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["requireshvm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("requireshvm", vv) + } + if v, found := p.p["sshkeyenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("sshkeyenabled", vv) + } + if v, found := p.p["templatetag"]; found { + u.Set("templatetag", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *RegisterTemplateParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *RegisterTemplateParams) SetBits(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bits"] = v + return +} + +func (p *RegisterTemplateParams) SetChecksum(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["checksum"] = v + return +} + +func (p *RegisterTemplateParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *RegisterTemplateParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *RegisterTemplateParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *RegisterTemplateParams) SetFormat(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["format"] = v + return +} + +func (p *RegisterTemplateParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *RegisterTemplateParams) SetIsdynamicallyscalable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdynamicallyscalable"] = v + return +} + +func (p *RegisterTemplateParams) SetIsextractable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isextractable"] = v + return +} + +func (p *RegisterTemplateParams) SetIsfeatured(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isfeatured"] = v + return +} + +func (p *RegisterTemplateParams) SetIspublic(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispublic"] = v + return +} + +func (p *RegisterTemplateParams) SetIsrouting(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrouting"] = v + return +} + +func (p *RegisterTemplateParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *RegisterTemplateParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +func (p *RegisterTemplateParams) SetPasswordenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["passwordenabled"] = v + return +} + +func (p *RegisterTemplateParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *RegisterTemplateParams) SetRequireshvm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["requireshvm"] = v + return +} + +func (p *RegisterTemplateParams) SetSshkeyenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sshkeyenabled"] = v + return +} + +func (p *RegisterTemplateParams) SetTemplatetag(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templatetag"] = v + return +} + +func (p *RegisterTemplateParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *RegisterTemplateParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new RegisterTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewRegisterTemplateParams(displaytext string, format string, hypervisor string, name string, ostypeid string, url string, zoneid string) *RegisterTemplateParams { + p := &RegisterTemplateParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["format"] = format + p.p["hypervisor"] = hypervisor + p.p["name"] = name + p.p["ostypeid"] = ostypeid + p.p["url"] = url + p.p["zoneid"] = zoneid + return p +} + +// Registers an existing template into the CloudStack cloud. +func (s *TemplateService) RegisterTemplate(p *RegisterTemplateParams) (*RegisterTemplateResponse, error) { + resp, err := s.cs.newRequest("registerTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RegisterTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type RegisterTemplateResponse struct { + Count int `json:"count"` + RegisterTemplate []*RegisterTemplate `json:"template"` +} + +type RegisterTemplate struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateTemplateParams struct { + p map[string]interface{} +} + +func (p *UpdateTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["bootable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("bootable", vv) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["format"]; found { + u.Set("format", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isdynamicallyscalable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdynamicallyscalable", vv) + } + if v, found := p.p["isrouting"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrouting", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + if v, found := p.p["passwordenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("passwordenabled", vv) + } + if v, found := p.p["sortkey"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("sortkey", vv) + } + return u +} + +func (p *UpdateTemplateParams) SetBootable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bootable"] = v + return +} + +func (p *UpdateTemplateParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *UpdateTemplateParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateTemplateParams) SetFormat(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["format"] = v + return +} + +func (p *UpdateTemplateParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateTemplateParams) SetIsdynamicallyscalable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdynamicallyscalable"] = v + return +} + +func (p *UpdateTemplateParams) SetIsrouting(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrouting"] = v + return +} + +func (p *UpdateTemplateParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateTemplateParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +func (p *UpdateTemplateParams) SetPasswordenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["passwordenabled"] = v + return +} + +func (p *UpdateTemplateParams) SetSortkey(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sortkey"] = v + return +} + +// You should always use this function to get a new UpdateTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewUpdateTemplateParams(id string) *UpdateTemplateParams { + p := &UpdateTemplateParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates attributes of a template. +func (s *TemplateService) UpdateTemplate(p *UpdateTemplateParams) (*UpdateTemplateResponse, error) { + resp, err := s.cs.newRequest("updateTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateTemplateResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type CopyTemplateParams struct { + p map[string]interface{} +} + +func (p *CopyTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["destzoneid"]; found { + u.Set("destzoneid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["sourcezoneid"]; found { + u.Set("sourcezoneid", v.(string)) + } + return u +} + +func (p *CopyTemplateParams) SetDestzoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["destzoneid"] = v + return +} + +func (p *CopyTemplateParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *CopyTemplateParams) SetSourcezoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourcezoneid"] = v + return +} + +// You should always use this function to get a new CopyTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewCopyTemplateParams(destzoneid string, id string) *CopyTemplateParams { + p := &CopyTemplateParams{} + p.p = make(map[string]interface{}) + p.p["destzoneid"] = destzoneid + p.p["id"] = id + return p +} + +// Copies a template from one zone to another. +func (s *TemplateService) CopyTemplate(p *CopyTemplateParams) (*CopyTemplateResponse, error) { + resp, err := s.cs.newRequest("copyTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CopyTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CopyTemplateResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteTemplateParams struct { + p map[string]interface{} +} + +func (p *DeleteTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *DeleteTemplateParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DeleteTemplateParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new DeleteTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewDeleteTemplateParams(id string) *DeleteTemplateParams { + p := &DeleteTemplateParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a template from the system. All virtual machines using the deleted template will not be affected. +func (s *TemplateService) DeleteTemplate(p *DeleteTemplateParams) (*DeleteTemplateResponse, error) { + resp, err := s.cs.newRequest("deleteTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteTemplateResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListTemplatesParams struct { + p map[string]interface{} +} + +func (p *ListTemplatesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["showremoved"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("showremoved", vv) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["templatefilter"]; found { + u.Set("templatefilter", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListTemplatesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListTemplatesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListTemplatesParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *ListTemplatesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListTemplatesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListTemplatesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListTemplatesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListTemplatesParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListTemplatesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListTemplatesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListTemplatesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListTemplatesParams) SetShowremoved(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["showremoved"] = v + return +} + +func (p *ListTemplatesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListTemplatesParams) SetTemplatefilter(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templatefilter"] = v + return +} + +func (p *ListTemplatesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListTemplatesParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewListTemplatesParams(templatefilter string) *ListTemplatesParams { + p := &ListTemplatesParams{} + p.p = make(map[string]interface{}) + p.p["templatefilter"] = templatefilter + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *TemplateService) GetTemplateID(name string, templatefilter string, zoneid string) (string, error) { + p := &ListTemplatesParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + p.p["templatefilter"] = templatefilter + p.p["zoneid"] = zoneid + + l, err := s.ListTemplates(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListTemplates(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Templates[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Templates { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *TemplateService) GetTemplateByName(name string, templatefilter string, zoneid string) (*Template, int, error) { + id, err := s.GetTemplateID(name, templatefilter, zoneid) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetTemplateByID(id, templatefilter) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *TemplateService) GetTemplateByID(id string, templatefilter string) (*Template, int, error) { + p := &ListTemplatesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + p.p["templatefilter"] = templatefilter + + l, err := s.ListTemplates(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListTemplates(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Templates[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Template UUID: %s!", id) +} + +// List all public, private, and privileged templates. +func (s *TemplateService) ListTemplates(p *ListTemplatesParams) (*ListTemplatesResponse, error) { + resp, err := s.cs.newRequest("listTemplates", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListTemplatesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListTemplatesResponse struct { + Count int `json:"count"` + Templates []*Template `json:"template"` +} + +type Template struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateTemplatePermissionsParams struct { + p map[string]interface{} +} + +func (p *UpdateTemplatePermissionsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["accounts"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("accounts", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isextractable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isextractable", vv) + } + if v, found := p.p["isfeatured"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isfeatured", vv) + } + if v, found := p.p["ispublic"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispublic", vv) + } + if v, found := p.p["op"]; found { + u.Set("op", v.(string)) + } + if v, found := p.p["projectids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("projectids", vv) + } + return u +} + +func (p *UpdateTemplatePermissionsParams) SetAccounts(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accounts"] = v + return +} + +func (p *UpdateTemplatePermissionsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateTemplatePermissionsParams) SetIsextractable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isextractable"] = v + return +} + +func (p *UpdateTemplatePermissionsParams) SetIsfeatured(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isfeatured"] = v + return +} + +func (p *UpdateTemplatePermissionsParams) SetIspublic(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispublic"] = v + return +} + +func (p *UpdateTemplatePermissionsParams) SetOp(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["op"] = v + return +} + +func (p *UpdateTemplatePermissionsParams) SetProjectids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectids"] = v + return +} + +// You should always use this function to get a new UpdateTemplatePermissionsParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewUpdateTemplatePermissionsParams(id string) *UpdateTemplatePermissionsParams { + p := &UpdateTemplatePermissionsParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a template visibility permissions. A public template is visible to all accounts within the same domain. A private template is visible only to the owner of the template. A priviledged template is a private template with account permissions added. Only accounts specified under the template permissions are visible to them. +func (s *TemplateService) UpdateTemplatePermissions(p *UpdateTemplatePermissionsParams) (*UpdateTemplatePermissionsResponse, error) { + resp, err := s.cs.newRequest("updateTemplatePermissions", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateTemplatePermissionsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateTemplatePermissionsResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListTemplatePermissionsParams struct { + p map[string]interface{} +} + +func (p *ListTemplatePermissionsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ListTemplatePermissionsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ListTemplatePermissionsParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewListTemplatePermissionsParams(id string) *ListTemplatePermissionsParams { + p := &ListTemplatePermissionsParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *TemplateService) GetTemplatePermissionByID(id string) (*TemplatePermission, int, error) { + p := &ListTemplatePermissionsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + p.p["id"] = id + + l, err := s.ListTemplatePermissions(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.TemplatePermissions[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for TemplatePermission UUID: %s!", id) +} + +// List template visibility and all accounts that have permissions to view this template. +func (s *TemplateService) ListTemplatePermissions(p *ListTemplatePermissionsParams) (*ListTemplatePermissionsResponse, error) { + resp, err := s.cs.newRequest("listTemplatePermissions", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListTemplatePermissionsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListTemplatePermissionsResponse struct { + Count int `json:"count"` + TemplatePermissions []*TemplatePermission `json:"templatepermission"` +} + +type TemplatePermission struct { + Account []string `json:"account,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Projectids []string `json:"projectids,omitempty"` +} + +type ExtractTemplateParams struct { + p map[string]interface{} +} + +func (p *ExtractTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["mode"]; found { + u.Set("mode", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ExtractTemplateParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ExtractTemplateParams) SetMode(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["mode"] = v + return +} + +func (p *ExtractTemplateParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *ExtractTemplateParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ExtractTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewExtractTemplateParams(id string, mode string) *ExtractTemplateParams { + p := &ExtractTemplateParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["mode"] = mode + return p +} + +// Extracts a template +func (s *TemplateService) ExtractTemplate(p *ExtractTemplateParams) (*ExtractTemplateResponse, error) { + resp, err := s.cs.newRequest("extractTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ExtractTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ExtractTemplateResponse struct { + JobID string `json:"jobid,omitempty"` + Accountid string `json:"accountid,omitempty"` + Created string `json:"created,omitempty"` + ExtractId string `json:"extractId,omitempty"` + ExtractMode string `json:"extractMode,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Resultstring string `json:"resultstring,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Uploadpercentage int `json:"uploadpercentage,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type PrepareTemplateParams struct { + p map[string]interface{} +} + +func (p *PrepareTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["templateid"]; found { + u.Set("templateid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *PrepareTemplateParams) SetTemplateid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templateid"] = v + return +} + +func (p *PrepareTemplateParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new PrepareTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewPrepareTemplateParams(templateid string, zoneid string) *PrepareTemplateParams { + p := &PrepareTemplateParams{} + p.p = make(map[string]interface{}) + p.p["templateid"] = templateid + p.p["zoneid"] = zoneid + return p +} + +// load template into primary storage +func (s *TemplateService) PrepareTemplate(p *PrepareTemplateParams) (*PrepareTemplateResponse, error) { + resp, err := s.cs.newRequest("prepareTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r PrepareTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type PrepareTemplateResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Bootable bool `json:"bootable,omitempty"` + Checksum string `json:"checksum,omitempty"` + Created string `json:"created,omitempty"` + CrossZones bool `json:"crossZones,omitempty"` + Details map[string]string `json:"details,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Format string `json:"format,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isfeatured bool `json:"isfeatured,omitempty"` + Ispublic bool `json:"ispublic,omitempty"` + Isready bool `json:"isready,omitempty"` + Name string `json:"name,omitempty"` + Ostypeid string `json:"ostypeid,omitempty"` + Ostypename string `json:"ostypename,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` + Size int64 `json:"size,omitempty"` + Sourcetemplateid string `json:"sourcetemplateid,omitempty"` + Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` + Status string `json:"status,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatetag string `json:"templatetag,omitempty"` + Templatetype string `json:"templatetype,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpgradeRouterTemplateParams struct { + p map[string]interface{} +} + +func (p *UpgradeRouterTemplateParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["clusterid"]; found { + u.Set("clusterid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *UpgradeRouterTemplateParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UpgradeRouterTemplateParams) SetClusterid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["clusterid"] = v + return +} + +func (p *UpgradeRouterTemplateParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *UpgradeRouterTemplateParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpgradeRouterTemplateParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *UpgradeRouterTemplateParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new UpgradeRouterTemplateParams instance, +// as then you are sure you have configured all required params +func (s *TemplateService) NewUpgradeRouterTemplateParams() *UpgradeRouterTemplateParams { + p := &UpgradeRouterTemplateParams{} + p.p = make(map[string]interface{}) + return p +} + +// Upgrades router to use newer template +func (s *TemplateService) UpgradeRouterTemplate(p *UpgradeRouterTemplateParams) (*UpgradeRouterTemplateResponse, error) { + resp, err := s.cs.newRequest("upgradeRouterTemplate", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpgradeRouterTemplateResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpgradeRouterTemplateResponse struct { + Jobid string `json:"jobid,omitempty"` + Jobstatus int `json:"jobstatus,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/UCSService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/UCSService.go new file mode 100644 index 000000000..5bf79c5e7 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/UCSService.go @@ -0,0 +1,582 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type AddUcsManagerParams struct { + p map[string]interface{} +} + +func (p *AddUcsManagerParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddUcsManagerParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *AddUcsManagerParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddUcsManagerParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddUcsManagerParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +func (p *AddUcsManagerParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddUcsManagerParams instance, +// as then you are sure you have configured all required params +func (s *UCSService) NewAddUcsManagerParams(password string, url string, username string, zoneid string) *AddUcsManagerParams { + p := &AddUcsManagerParams{} + p.p = make(map[string]interface{}) + p.p["password"] = password + p.p["url"] = url + p.p["username"] = username + p.p["zoneid"] = zoneid + return p +} + +// Adds a Ucs manager +func (s *UCSService) AddUcsManager(p *AddUcsManagerParams) (*AddUcsManagerResponse, error) { + resp, err := s.cs.newRequest("addUcsManager", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddUcsManagerResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddUcsManagerResponse struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListUcsManagersParams struct { + p map[string]interface{} +} + +func (p *ListUcsManagersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListUcsManagersParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListUcsManagersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListUcsManagersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListUcsManagersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListUcsManagersParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListUcsManagersParams instance, +// as then you are sure you have configured all required params +func (s *UCSService) NewListUcsManagersParams() *ListUcsManagersParams { + p := &ListUcsManagersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *UCSService) GetUcsManagerID(keyword string) (string, error) { + p := &ListUcsManagersParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + + l, err := s.ListUcsManagers(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.UcsManagers[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.UcsManagers { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *UCSService) GetUcsManagerByName(name string) (*UcsManager, int, error) { + id, err := s.GetUcsManagerID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetUcsManagerByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *UCSService) GetUcsManagerByID(id string) (*UcsManager, int, error) { + p := &ListUcsManagersParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListUcsManagers(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.UcsManagers[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for UcsManager UUID: %s!", id) +} + +// List ucs manager +func (s *UCSService) ListUcsManagers(p *ListUcsManagersParams) (*ListUcsManagersResponse, error) { + resp, err := s.cs.newRequest("listUcsManagers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListUcsManagersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListUcsManagersResponse struct { + Count int `json:"count"` + UcsManagers []*UcsManager `json:"ucsmanager"` +} + +type UcsManager struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListUcsProfilesParams struct { + p map[string]interface{} +} + +func (p *ListUcsProfilesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["ucsmanagerid"]; found { + u.Set("ucsmanagerid", v.(string)) + } + return u +} + +func (p *ListUcsProfilesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListUcsProfilesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListUcsProfilesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListUcsProfilesParams) SetUcsmanagerid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ucsmanagerid"] = v + return +} + +// You should always use this function to get a new ListUcsProfilesParams instance, +// as then you are sure you have configured all required params +func (s *UCSService) NewListUcsProfilesParams(ucsmanagerid string) *ListUcsProfilesParams { + p := &ListUcsProfilesParams{} + p.p = make(map[string]interface{}) + p.p["ucsmanagerid"] = ucsmanagerid + return p +} + +// List profile in ucs manager +func (s *UCSService) ListUcsProfiles(p *ListUcsProfilesParams) (*ListUcsProfilesResponse, error) { + resp, err := s.cs.newRequest("listUcsProfiles", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListUcsProfilesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListUcsProfilesResponse struct { + Count int `json:"count"` + UcsProfiles []*UcsProfile `json:"ucsprofile"` +} + +type UcsProfile struct { + Ucsdn string `json:"ucsdn,omitempty"` +} + +type ListUcsBladesParams struct { + p map[string]interface{} +} + +func (p *ListUcsBladesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["ucsmanagerid"]; found { + u.Set("ucsmanagerid", v.(string)) + } + return u +} + +func (p *ListUcsBladesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListUcsBladesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListUcsBladesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListUcsBladesParams) SetUcsmanagerid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ucsmanagerid"] = v + return +} + +// You should always use this function to get a new ListUcsBladesParams instance, +// as then you are sure you have configured all required params +func (s *UCSService) NewListUcsBladesParams(ucsmanagerid string) *ListUcsBladesParams { + p := &ListUcsBladesParams{} + p.p = make(map[string]interface{}) + p.p["ucsmanagerid"] = ucsmanagerid + return p +} + +// List ucs blades +func (s *UCSService) ListUcsBlades(p *ListUcsBladesParams) (*ListUcsBladesResponse, error) { + resp, err := s.cs.newRequest("listUcsBlades", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListUcsBladesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListUcsBladesResponse struct { + Count int `json:"count"` + UcsBlades []*UcsBlade `json:"ucsblade"` +} + +type UcsBlade struct { + Bladedn string `json:"bladedn,omitempty"` + Hostid string `json:"hostid,omitempty"` + Id string `json:"id,omitempty"` + Profiledn string `json:"profiledn,omitempty"` + Ucsmanagerid string `json:"ucsmanagerid,omitempty"` +} + +type AssociateUcsProfileToBladeParams struct { + p map[string]interface{} +} + +func (p *AssociateUcsProfileToBladeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["bladeid"]; found { + u.Set("bladeid", v.(string)) + } + if v, found := p.p["profiledn"]; found { + u.Set("profiledn", v.(string)) + } + if v, found := p.p["ucsmanagerid"]; found { + u.Set("ucsmanagerid", v.(string)) + } + return u +} + +func (p *AssociateUcsProfileToBladeParams) SetBladeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["bladeid"] = v + return +} + +func (p *AssociateUcsProfileToBladeParams) SetProfiledn(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["profiledn"] = v + return +} + +func (p *AssociateUcsProfileToBladeParams) SetUcsmanagerid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ucsmanagerid"] = v + return +} + +// You should always use this function to get a new AssociateUcsProfileToBladeParams instance, +// as then you are sure you have configured all required params +func (s *UCSService) NewAssociateUcsProfileToBladeParams(bladeid string, profiledn string, ucsmanagerid string) *AssociateUcsProfileToBladeParams { + p := &AssociateUcsProfileToBladeParams{} + p.p = make(map[string]interface{}) + p.p["bladeid"] = bladeid + p.p["profiledn"] = profiledn + p.p["ucsmanagerid"] = ucsmanagerid + return p +} + +// associate a profile to a blade +func (s *UCSService) AssociateUcsProfileToBlade(p *AssociateUcsProfileToBladeParams) (*AssociateUcsProfileToBladeResponse, error) { + resp, err := s.cs.newRequest("associateUcsProfileToBlade", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AssociateUcsProfileToBladeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AssociateUcsProfileToBladeResponse struct { + JobID string `json:"jobid,omitempty"` + Bladedn string `json:"bladedn,omitempty"` + Hostid string `json:"hostid,omitempty"` + Id string `json:"id,omitempty"` + Profiledn string `json:"profiledn,omitempty"` + Ucsmanagerid string `json:"ucsmanagerid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/UsageService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/UsageService.go new file mode 100644 index 000000000..ea468e1cb --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/UsageService.go @@ -0,0 +1,1120 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +type AddTrafficTypeParams struct { + p map[string]interface{} +} + +func (p *AddTrafficTypeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hypervnetworklabel"]; found { + u.Set("hypervnetworklabel", v.(string)) + } + if v, found := p.p["isolationmethod"]; found { + u.Set("isolationmethod", v.(string)) + } + if v, found := p.p["kvmnetworklabel"]; found { + u.Set("kvmnetworklabel", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["vmwarenetworklabel"]; found { + u.Set("vmwarenetworklabel", v.(string)) + } + if v, found := p.p["xennetworklabel"]; found { + u.Set("xennetworklabel", v.(string)) + } + return u +} + +func (p *AddTrafficTypeParams) SetHypervnetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervnetworklabel"] = v + return +} + +func (p *AddTrafficTypeParams) SetIsolationmethod(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isolationmethod"] = v + return +} + +func (p *AddTrafficTypeParams) SetKvmnetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["kvmnetworklabel"] = v + return +} + +func (p *AddTrafficTypeParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *AddTrafficTypeParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +func (p *AddTrafficTypeParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +func (p *AddTrafficTypeParams) SetVmwarenetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmwarenetworklabel"] = v + return +} + +func (p *AddTrafficTypeParams) SetXennetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["xennetworklabel"] = v + return +} + +// You should always use this function to get a new AddTrafficTypeParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewAddTrafficTypeParams(physicalnetworkid string, traffictype string) *AddTrafficTypeParams { + p := &AddTrafficTypeParams{} + p.p = make(map[string]interface{}) + p.p["physicalnetworkid"] = physicalnetworkid + p.p["traffictype"] = traffictype + return p +} + +// Adds traffic type to a physical network +func (s *UsageService) AddTrafficType(p *AddTrafficTypeParams) (*AddTrafficTypeResponse, error) { + resp, err := s.cs.newRequest("addTrafficType", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddTrafficTypeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddTrafficTypeResponse struct { + JobID string `json:"jobid,omitempty"` + Hypervnetworklabel string `json:"hypervnetworklabel,omitempty"` + Id string `json:"id,omitempty"` + Kvmnetworklabel string `json:"kvmnetworklabel,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Vmwarenetworklabel string `json:"vmwarenetworklabel,omitempty"` + Xennetworklabel string `json:"xennetworklabel,omitempty"` +} + +type DeleteTrafficTypeParams struct { + p map[string]interface{} +} + +func (p *DeleteTrafficTypeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteTrafficTypeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteTrafficTypeParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewDeleteTrafficTypeParams(id string) *DeleteTrafficTypeParams { + p := &DeleteTrafficTypeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes traffic type of a physical network +func (s *UsageService) DeleteTrafficType(p *DeleteTrafficTypeParams) (*DeleteTrafficTypeResponse, error) { + resp, err := s.cs.newRequest("deleteTrafficType", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteTrafficTypeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteTrafficTypeResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListTrafficTypesParams struct { + p map[string]interface{} +} + +func (p *ListTrafficTypesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + return u +} + +func (p *ListTrafficTypesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListTrafficTypesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListTrafficTypesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListTrafficTypesParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +// You should always use this function to get a new ListTrafficTypesParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewListTrafficTypesParams(physicalnetworkid string) *ListTrafficTypesParams { + p := &ListTrafficTypesParams{} + p.p = make(map[string]interface{}) + p.p["physicalnetworkid"] = physicalnetworkid + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *UsageService) GetTrafficTypeID(keyword string, physicalnetworkid string) (string, error) { + p := &ListTrafficTypesParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + p.p["physicalnetworkid"] = physicalnetworkid + + l, err := s.ListTrafficTypes(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.TrafficTypes[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.TrafficTypes { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// Lists traffic types of a given physical network. +func (s *UsageService) ListTrafficTypes(p *ListTrafficTypesParams) (*ListTrafficTypesResponse, error) { + resp, err := s.cs.newRequest("listTrafficTypes", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListTrafficTypesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListTrafficTypesResponse struct { + Count int `json:"count"` + TrafficTypes []*TrafficType `json:"traffictype"` +} + +type TrafficType struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` +} + +type UpdateTrafficTypeParams struct { + p map[string]interface{} +} + +func (p *UpdateTrafficTypeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hypervnetworklabel"]; found { + u.Set("hypervnetworklabel", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["kvmnetworklabel"]; found { + u.Set("kvmnetworklabel", v.(string)) + } + if v, found := p.p["vmwarenetworklabel"]; found { + u.Set("vmwarenetworklabel", v.(string)) + } + if v, found := p.p["xennetworklabel"]; found { + u.Set("xennetworklabel", v.(string)) + } + return u +} + +func (p *UpdateTrafficTypeParams) SetHypervnetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervnetworklabel"] = v + return +} + +func (p *UpdateTrafficTypeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateTrafficTypeParams) SetKvmnetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["kvmnetworklabel"] = v + return +} + +func (p *UpdateTrafficTypeParams) SetVmwarenetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vmwarenetworklabel"] = v + return +} + +func (p *UpdateTrafficTypeParams) SetXennetworklabel(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["xennetworklabel"] = v + return +} + +// You should always use this function to get a new UpdateTrafficTypeParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewUpdateTrafficTypeParams(id string) *UpdateTrafficTypeParams { + p := &UpdateTrafficTypeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates traffic type of a physical network +func (s *UsageService) UpdateTrafficType(p *UpdateTrafficTypeParams) (*UpdateTrafficTypeResponse, error) { + resp, err := s.cs.newRequest("updateTrafficType", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateTrafficTypeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateTrafficTypeResponse struct { + JobID string `json:"jobid,omitempty"` + Hypervnetworklabel string `json:"hypervnetworklabel,omitempty"` + Id string `json:"id,omitempty"` + Kvmnetworklabel string `json:"kvmnetworklabel,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Vmwarenetworklabel string `json:"vmwarenetworklabel,omitempty"` + Xennetworklabel string `json:"xennetworklabel,omitempty"` +} + +type ListTrafficTypeImplementorsParams struct { + p map[string]interface{} +} + +func (p *ListTrafficTypeImplementorsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["traffictype"]; found { + u.Set("traffictype", v.(string)) + } + return u +} + +func (p *ListTrafficTypeImplementorsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListTrafficTypeImplementorsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListTrafficTypeImplementorsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListTrafficTypeImplementorsParams) SetTraffictype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["traffictype"] = v + return +} + +// You should always use this function to get a new ListTrafficTypeImplementorsParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewListTrafficTypeImplementorsParams() *ListTrafficTypeImplementorsParams { + p := &ListTrafficTypeImplementorsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists implementors of implementor of a network traffic type or implementors of all network traffic types +func (s *UsageService) ListTrafficTypeImplementors(p *ListTrafficTypeImplementorsParams) (*ListTrafficTypeImplementorsResponse, error) { + resp, err := s.cs.newRequest("listTrafficTypeImplementors", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListTrafficTypeImplementorsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListTrafficTypeImplementorsResponse struct { + Count int `json:"count"` + TrafficTypeImplementors []*TrafficTypeImplementor `json:"traffictypeimplementor"` +} + +type TrafficTypeImplementor struct { + Traffictype string `json:"traffictype,omitempty"` + Traffictypeimplementor string `json:"traffictypeimplementor,omitempty"` +} + +type GenerateUsageRecordsParams struct { + p map[string]interface{} +} + +func (p *GenerateUsageRecordsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["enddate"]; found { + u.Set("enddate", v.(string)) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + return u +} + +func (p *GenerateUsageRecordsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *GenerateUsageRecordsParams) SetEnddate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enddate"] = v + return +} + +func (p *GenerateUsageRecordsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +// You should always use this function to get a new GenerateUsageRecordsParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewGenerateUsageRecordsParams(enddate string, startdate string) *GenerateUsageRecordsParams { + p := &GenerateUsageRecordsParams{} + p.p = make(map[string]interface{}) + p.p["enddate"] = enddate + p.p["startdate"] = startdate + return p +} + +// Generates usage records. This will generate records only if there any records to be generated, i.e if the scheduled usage job was not run or failed +func (s *UsageService) GenerateUsageRecords(p *GenerateUsageRecordsParams) (*GenerateUsageRecordsResponse, error) { + resp, err := s.cs.newRequest("generateUsageRecords", p.toURLValues()) + if err != nil { + return nil, err + } + + var r GenerateUsageRecordsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type GenerateUsageRecordsResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListUsageRecordsParams struct { + p map[string]interface{} +} + +func (p *ListUsageRecordsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["accountid"]; found { + u.Set("accountid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["enddate"]; found { + u.Set("enddate", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["startdate"]; found { + u.Set("startdate", v.(string)) + } + if v, found := p.p["type"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("type", vv) + } + return u +} + +func (p *ListUsageRecordsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListUsageRecordsParams) SetAccountid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountid"] = v + return +} + +func (p *ListUsageRecordsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListUsageRecordsParams) SetEnddate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["enddate"] = v + return +} + +func (p *ListUsageRecordsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListUsageRecordsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListUsageRecordsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListUsageRecordsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListUsageRecordsParams) SetStartdate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startdate"] = v + return +} + +func (p *ListUsageRecordsParams) SetType(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["usageType"] = v + return +} + +// You should always use this function to get a new ListUsageRecordsParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewListUsageRecordsParams(enddate string, startdate string) *ListUsageRecordsParams { + p := &ListUsageRecordsParams{} + p.p = make(map[string]interface{}) + p.p["enddate"] = enddate + p.p["startdate"] = startdate + return p +} + +// Lists usage records for accounts +func (s *UsageService) ListUsageRecords(p *ListUsageRecordsParams) (*ListUsageRecordsResponse, error) { + resp, err := s.cs.newRequest("listUsageRecords", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListUsageRecordsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListUsageRecordsResponse struct { + Count int `json:"count"` + UsageRecords []*UsageRecord `json:"usagerecord"` +} + +type UsageRecord struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Cpunumber int64 `json:"cpunumber,omitempty"` + Cpuspeed int64 `json:"cpuspeed,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Enddate string `json:"enddate,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Issourcenat bool `json:"issourcenat,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Memory int64 `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkid string `json:"networkid,omitempty"` + Offeringid string `json:"offeringid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Rawusage string `json:"rawusage,omitempty"` + Size int64 `json:"size,omitempty"` + Startdate string `json:"startdate,omitempty"` + Templateid string `json:"templateid,omitempty"` + Type string `json:"type,omitempty"` + Usage string `json:"usage,omitempty"` + Usageid string `json:"usageid,omitempty"` + Usagetype int `json:"usagetype,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Virtualsize int64 `json:"virtualsize,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type ListUsageTypesParams struct { + p map[string]interface{} +} + +func (p *ListUsageTypesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + return u +} + +// You should always use this function to get a new ListUsageTypesParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewListUsageTypesParams() *ListUsageTypesParams { + p := &ListUsageTypesParams{} + p.p = make(map[string]interface{}) + return p +} + +// List Usage Types +func (s *UsageService) ListUsageTypes(p *ListUsageTypesParams) (*ListUsageTypesResponse, error) { + resp, err := s.cs.newRequest("listUsageTypes", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListUsageTypesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListUsageTypesResponse struct { + Count int `json:"count"` + UsageTypes []*UsageType `json:"usagetype"` +} + +type UsageType struct { + Description string `json:"description,omitempty"` + Usagetypeid int `json:"usagetypeid,omitempty"` +} + +type AddTrafficMonitorParams struct { + p map[string]interface{} +} + +func (p *AddTrafficMonitorParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["excludezones"]; found { + u.Set("excludezones", v.(string)) + } + if v, found := p.p["includezones"]; found { + u.Set("includezones", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *AddTrafficMonitorParams) SetExcludezones(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["excludezones"] = v + return +} + +func (p *AddTrafficMonitorParams) SetIncludezones(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["includezones"] = v + return +} + +func (p *AddTrafficMonitorParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *AddTrafficMonitorParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new AddTrafficMonitorParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewAddTrafficMonitorParams(url string, zoneid string) *AddTrafficMonitorParams { + p := &AddTrafficMonitorParams{} + p.p = make(map[string]interface{}) + p.p["url"] = url + p.p["zoneid"] = zoneid + return p +} + +// Adds Traffic Monitor Host for Direct Network Usage +func (s *UsageService) AddTrafficMonitor(p *AddTrafficMonitorParams) (*AddTrafficMonitorResponse, error) { + resp, err := s.cs.newRequest("addTrafficMonitor", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddTrafficMonitorResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AddTrafficMonitorResponse struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Numretries string `json:"numretries,omitempty"` + Timeout string `json:"timeout,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteTrafficMonitorParams struct { + p map[string]interface{} +} + +func (p *DeleteTrafficMonitorParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteTrafficMonitorParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteTrafficMonitorParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewDeleteTrafficMonitorParams(id string) *DeleteTrafficMonitorParams { + p := &DeleteTrafficMonitorParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes an traffic monitor host. +func (s *UsageService) DeleteTrafficMonitor(p *DeleteTrafficMonitorParams) (*DeleteTrafficMonitorResponse, error) { + resp, err := s.cs.newRequest("deleteTrafficMonitor", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteTrafficMonitorResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteTrafficMonitorResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListTrafficMonitorsParams struct { + p map[string]interface{} +} + +func (p *ListTrafficMonitorsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListTrafficMonitorsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListTrafficMonitorsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListTrafficMonitorsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListTrafficMonitorsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListTrafficMonitorsParams instance, +// as then you are sure you have configured all required params +func (s *UsageService) NewListTrafficMonitorsParams(zoneid string) *ListTrafficMonitorsParams { + p := &ListTrafficMonitorsParams{} + p.p = make(map[string]interface{}) + p.p["zoneid"] = zoneid + return p +} + +// List traffic monitor Hosts. +func (s *UsageService) ListTrafficMonitors(p *ListTrafficMonitorsParams) (*ListTrafficMonitorsResponse, error) { + resp, err := s.cs.newRequest("listTrafficMonitors", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListTrafficMonitorsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListTrafficMonitorsResponse struct { + Count int `json:"count"` + TrafficMonitors []*TrafficMonitor `json:"trafficmonitor"` +} + +type TrafficMonitor struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Numretries string `json:"numretries,omitempty"` + Timeout string `json:"timeout,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/UserService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/UserService.go new file mode 100644 index 000000000..e11e25f61 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/UserService.go @@ -0,0 +1,1233 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateUserParams struct { + p map[string]interface{} +} + +func (p *CreateUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["email"]; found { + u.Set("email", v.(string)) + } + if v, found := p.p["firstname"]; found { + u.Set("firstname", v.(string)) + } + if v, found := p.p["lastname"]; found { + u.Set("lastname", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["timezone"]; found { + u.Set("timezone", v.(string)) + } + if v, found := p.p["userid"]; found { + u.Set("userid", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *CreateUserParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateUserParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateUserParams) SetEmail(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["email"] = v + return +} + +func (p *CreateUserParams) SetFirstname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["firstname"] = v + return +} + +func (p *CreateUserParams) SetLastname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lastname"] = v + return +} + +func (p *CreateUserParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *CreateUserParams) SetTimezone(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["timezone"] = v + return +} + +func (p *CreateUserParams) SetUserid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userid"] = v + return +} + +func (p *CreateUserParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new CreateUserParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewCreateUserParams(account string, email string, firstname string, lastname string, password string, username string) *CreateUserParams { + p := &CreateUserParams{} + p.p = make(map[string]interface{}) + p.p["account"] = account + p.p["email"] = email + p.p["firstname"] = firstname + p.p["lastname"] = lastname + p.p["password"] = password + p.p["username"] = username + return p +} + +// Creates a user for an account that already exists +func (s *UserService) CreateUser(p *CreateUserParams) (*CreateUserResponse, error) { + resp, err := s.cs.newRequest("createUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateUserResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` +} + +type DeleteUserParams struct { + p map[string]interface{} +} + +func (p *DeleteUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteUserParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteUserParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewDeleteUserParams(id string) *DeleteUserParams { + p := &DeleteUserParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a user for an account +func (s *UserService) DeleteUser(p *DeleteUserParams) (*DeleteUserResponse, error) { + resp, err := s.cs.newRequest("deleteUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteUserResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type UpdateUserParams struct { + p map[string]interface{} +} + +func (p *UpdateUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["email"]; found { + u.Set("email", v.(string)) + } + if v, found := p.p["firstname"]; found { + u.Set("firstname", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["lastname"]; found { + u.Set("lastname", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["timezone"]; found { + u.Set("timezone", v.(string)) + } + if v, found := p.p["userapikey"]; found { + u.Set("userapikey", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + if v, found := p.p["usersecretkey"]; found { + u.Set("usersecretkey", v.(string)) + } + return u +} + +func (p *UpdateUserParams) SetEmail(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["email"] = v + return +} + +func (p *UpdateUserParams) SetFirstname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["firstname"] = v + return +} + +func (p *UpdateUserParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateUserParams) SetLastname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["lastname"] = v + return +} + +func (p *UpdateUserParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *UpdateUserParams) SetTimezone(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["timezone"] = v + return +} + +func (p *UpdateUserParams) SetUserapikey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userapikey"] = v + return +} + +func (p *UpdateUserParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +func (p *UpdateUserParams) SetUsersecretkey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["usersecretkey"] = v + return +} + +// You should always use this function to get a new UpdateUserParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewUpdateUserParams(id string) *UpdateUserParams { + p := &UpdateUserParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a user account +func (s *UserService) UpdateUser(p *UpdateUserParams) (*UpdateUserResponse, error) { + resp, err := s.cs.newRequest("updateUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateUserResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` +} + +type ListUsersParams struct { + p map[string]interface{} +} + +func (p *ListUsersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["accounttype"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("accounttype", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *ListUsersParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListUsersParams) SetAccounttype(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accounttype"] = v + return +} + +func (p *ListUsersParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListUsersParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListUsersParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListUsersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListUsersParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListUsersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListUsersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListUsersParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListUsersParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new ListUsersParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewListUsersParams() *ListUsersParams { + p := &ListUsersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *UserService) GetUserByID(id string) (*User, int, error) { + p := &ListUsersParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListUsers(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Users[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for User UUID: %s!", id) +} + +// Lists user accounts +func (s *UserService) ListUsers(p *ListUsersParams) (*ListUsersResponse, error) { + resp, err := s.cs.newRequest("listUsers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListUsersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListUsersResponse struct { + Count int `json:"count"` + Users []*User `json:"user"` +} + +type User struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` +} + +type LockUserParams struct { + p map[string]interface{} +} + +func (p *LockUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *LockUserParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new LockUserParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewLockUserParams(id string) *LockUserParams { + p := &LockUserParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Locks a user account +func (s *UserService) LockUser(p *LockUserParams) (*LockUserResponse, error) { + resp, err := s.cs.newRequest("lockUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r LockUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type LockUserResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` +} + +type DisableUserParams struct { + p map[string]interface{} +} + +func (p *DisableUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DisableUserParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DisableUserParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewDisableUserParams(id string) *DisableUserParams { + p := &DisableUserParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Disables a user account +func (s *UserService) DisableUser(p *DisableUserParams) (*DisableUserResponse, error) { + resp, err := s.cs.newRequest("disableUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DisableUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DisableUserResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` +} + +type EnableUserParams struct { + p map[string]interface{} +} + +func (p *EnableUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *EnableUserParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new EnableUserParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewEnableUserParams(id string) *EnableUserParams { + p := &EnableUserParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Enables a user account +func (s *UserService) EnableUser(p *EnableUserParams) (*EnableUserResponse, error) { + resp, err := s.cs.newRequest("enableUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r EnableUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type EnableUserResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` +} + +type GetUserParams struct { + p map[string]interface{} +} + +func (p *GetUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["userapikey"]; found { + u.Set("userapikey", v.(string)) + } + return u +} + +func (p *GetUserParams) SetUserapikey(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userapikey"] = v + return +} + +// You should always use this function to get a new GetUserParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewGetUserParams(userapikey string) *GetUserParams { + p := &GetUserParams{} + p.p = make(map[string]interface{}) + p.p["userapikey"] = userapikey + return p +} + +// Find user account by API key +func (s *UserService) GetUser(p *GetUserParams) (*GetUserResponse, error) { + resp, err := s.cs.newRequest("getUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r GetUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type GetUserResponse struct { + Account string `json:"account,omitempty"` + Accountid string `json:"accountid,omitempty"` + Accounttype int `json:"accounttype,omitempty"` + Apikey string `json:"apikey,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Id string `json:"id,omitempty"` + Iscallerchilddomain bool `json:"iscallerchilddomain,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Lastname string `json:"lastname,omitempty"` + Secretkey string `json:"secretkey,omitempty"` + State string `json:"state,omitempty"` + Timezone string `json:"timezone,omitempty"` + Username string `json:"username,omitempty"` +} + +type GetVirtualMachineUserDataParams struct { + p map[string]interface{} +} + +func (p *GetVirtualMachineUserDataParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *GetVirtualMachineUserDataParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new GetVirtualMachineUserDataParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewGetVirtualMachineUserDataParams(virtualmachineid string) *GetVirtualMachineUserDataParams { + p := &GetVirtualMachineUserDataParams{} + p.p = make(map[string]interface{}) + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Returns user data associated with the VM +func (s *UserService) GetVirtualMachineUserData(p *GetVirtualMachineUserDataParams) (*GetVirtualMachineUserDataResponse, error) { + resp, err := s.cs.newRequest("getVirtualMachineUserData", p.toURLValues()) + if err != nil { + return nil, err + } + + var r GetVirtualMachineUserDataResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type GetVirtualMachineUserDataResponse struct { + Userdata string `json:"userdata,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` +} + +type RegisterUserKeysParams struct { + p map[string]interface{} +} + +func (p *RegisterUserKeysParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RegisterUserKeysParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RegisterUserKeysParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewRegisterUserKeysParams(id string) *RegisterUserKeysParams { + p := &RegisterUserKeysParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// This command allows a user to register for the developer API, returning a secret key and an API key. This request is made through the integration API port, so it is a privileged command and must be made on behalf of a user. It is up to the implementer just how the username and password are entered, and then how that translates to an integration API request. Both secret key and API key should be returned to the user +func (s *UserService) RegisterUserKeys(p *RegisterUserKeysParams) (*RegisterUserKeysResponse, error) { + resp, err := s.cs.newRequest("registerUserKeys", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RegisterUserKeysResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type RegisterUserKeysResponse struct { + Apikey string `json:"apikey,omitempty"` + Secretkey string `json:"secretkey,omitempty"` +} + +type ListLdapUsersParams struct { + p map[string]interface{} +} + +func (p *ListLdapUsersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listtype"]; found { + u.Set("listtype", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + return u +} + +func (p *ListLdapUsersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListLdapUsersParams) SetListtype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listtype"] = v + return +} + +func (p *ListLdapUsersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListLdapUsersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +// You should always use this function to get a new ListLdapUsersParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewListLdapUsersParams() *ListLdapUsersParams { + p := &ListLdapUsersParams{} + p.p = make(map[string]interface{}) + return p +} + +// Lists all LDAP Users +func (s *UserService) ListLdapUsers(p *ListLdapUsersParams) (*ListLdapUsersResponse, error) { + resp, err := s.cs.newRequest("listLdapUsers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListLdapUsersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListLdapUsersResponse struct { + Count int `json:"count"` + LdapUsers []*LdapUser `json:"ldapuser"` +} + +type LdapUser struct { + Domain string `json:"domain,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Lastname string `json:"lastname,omitempty"` + Principal string `json:"principal,omitempty"` + Username string `json:"username,omitempty"` +} + +type ImportLdapUsersParams struct { + p map[string]interface{} +} + +func (p *ImportLdapUsersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["accountdetails"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("accountdetails[%d].key", i), k) + u.Set(fmt.Sprintf("accountdetails[%d].value", i), vv) + i++ + } + } + if v, found := p.p["accounttype"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("accounttype", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["group"]; found { + u.Set("group", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["timezone"]; found { + u.Set("timezone", v.(string)) + } + return u +} + +func (p *ImportLdapUsersParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ImportLdapUsersParams) SetAccountdetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accountdetails"] = v + return +} + +func (p *ImportLdapUsersParams) SetAccounttype(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["accounttype"] = v + return +} + +func (p *ImportLdapUsersParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ImportLdapUsersParams) SetGroup(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["group"] = v + return +} + +func (p *ImportLdapUsersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ImportLdapUsersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ImportLdapUsersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ImportLdapUsersParams) SetTimezone(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["timezone"] = v + return +} + +// You should always use this function to get a new ImportLdapUsersParams instance, +// as then you are sure you have configured all required params +func (s *UserService) NewImportLdapUsersParams(accounttype int) *ImportLdapUsersParams { + p := &ImportLdapUsersParams{} + p.p = make(map[string]interface{}) + p.p["accounttype"] = accounttype + return p +} + +// Import LDAP users +func (s *UserService) ImportLdapUsers(p *ImportLdapUsersParams) (*ImportLdapUsersResponse, error) { + resp, err := s.cs.newRequest("importLdapUsers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ImportLdapUsersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ImportLdapUsersResponse struct { + Domain string `json:"domain,omitempty"` + Email string `json:"email,omitempty"` + Firstname string `json:"firstname,omitempty"` + Lastname string `json:"lastname,omitempty"` + Principal string `json:"principal,omitempty"` + Username string `json:"username,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/VLANService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VLANService.go new file mode 100644 index 000000000..38ebb0136 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VLANService.go @@ -0,0 +1,952 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateVlanIpRangeParams struct { + p map[string]interface{} +} + +func (p *CreateVlanIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["endip"]; found { + u.Set("endip", v.(string)) + } + if v, found := p.p["endipv6"]; found { + u.Set("endipv6", v.(string)) + } + if v, found := p.p["forvirtualnetwork"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvirtualnetwork", vv) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["ip6cidr"]; found { + u.Set("ip6cidr", v.(string)) + } + if v, found := p.p["ip6gateway"]; found { + u.Set("ip6gateway", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["startip"]; found { + u.Set("startip", v.(string)) + } + if v, found := p.p["startipv6"]; found { + u.Set("startipv6", v.(string)) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateVlanIpRangeParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetEndip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endip"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetEndipv6(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["endipv6"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetForvirtualnetwork(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvirtualnetwork"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetIp6cidr(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6cidr"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetIp6gateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6gateway"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetStartip(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startip"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetStartipv6(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startipv6"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +func (p *CreateVlanIpRangeParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateVlanIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *VLANService) NewCreateVlanIpRangeParams() *CreateVlanIpRangeParams { + p := &CreateVlanIpRangeParams{} + p.p = make(map[string]interface{}) + return p +} + +// Creates a VLAN IP range. +func (s *VLANService) CreateVlanIpRange(p *CreateVlanIpRangeParams) (*CreateVlanIpRangeResponse, error) { + resp, err := s.cs.newRequest("createVlanIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVlanIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateVlanIpRangeResponse struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Endip string `json:"endip,omitempty"` + Endipv6 string `json:"endipv6,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Startip string `json:"startip,omitempty"` + Startipv6 string `json:"startipv6,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DeleteVlanIpRangeParams struct { + p map[string]interface{} +} + +func (p *DeleteVlanIpRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteVlanIpRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteVlanIpRangeParams instance, +// as then you are sure you have configured all required params +func (s *VLANService) NewDeleteVlanIpRangeParams(id string) *DeleteVlanIpRangeParams { + p := &DeleteVlanIpRangeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Creates a VLAN IP range. +func (s *VLANService) DeleteVlanIpRange(p *DeleteVlanIpRangeParams) (*DeleteVlanIpRangeResponse, error) { + resp, err := s.cs.newRequest("deleteVlanIpRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVlanIpRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteVlanIpRangeResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListVlanIpRangesParams struct { + p map[string]interface{} +} + +func (p *ListVlanIpRangesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["forvirtualnetwork"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvirtualnetwork", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListVlanIpRangesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVlanIpRangesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVlanIpRangesParams) SetForvirtualnetwork(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvirtualnetwork"] = v + return +} + +func (p *ListVlanIpRangesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVlanIpRangesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVlanIpRangesParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListVlanIpRangesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVlanIpRangesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVlanIpRangesParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *ListVlanIpRangesParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListVlanIpRangesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVlanIpRangesParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +func (p *ListVlanIpRangesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListVlanIpRangesParams instance, +// as then you are sure you have configured all required params +func (s *VLANService) NewListVlanIpRangesParams() *ListVlanIpRangesParams { + p := &ListVlanIpRangesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VLANService) GetVlanIpRangeByID(id string) (*VlanIpRange, int, error) { + p := &ListVlanIpRangesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVlanIpRanges(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVlanIpRanges(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VlanIpRanges[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VlanIpRange UUID: %s!", id) +} + +// Lists all VLAN IP ranges. +func (s *VLANService) ListVlanIpRanges(p *ListVlanIpRangesParams) (*ListVlanIpRangesResponse, error) { + resp, err := s.cs.newRequest("listVlanIpRanges", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVlanIpRangesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVlanIpRangesResponse struct { + Count int `json:"count"` + VlanIpRanges []*VlanIpRange `json:"vlaniprange"` +} + +type VlanIpRange struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Endip string `json:"endip,omitempty"` + Endipv6 string `json:"endipv6,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Startip string `json:"startip,omitempty"` + Startipv6 string `json:"startipv6,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zoneid string `json:"zoneid,omitempty"` +} + +type DedicateGuestVlanRangeParams struct { + p map[string]interface{} +} + +func (p *DedicateGuestVlanRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["vlanrange"]; found { + u.Set("vlanrange", v.(string)) + } + return u +} + +func (p *DedicateGuestVlanRangeParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DedicateGuestVlanRangeParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DedicateGuestVlanRangeParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *DedicateGuestVlanRangeParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *DedicateGuestVlanRangeParams) SetVlanrange(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlanrange"] = v + return +} + +// You should always use this function to get a new DedicateGuestVlanRangeParams instance, +// as then you are sure you have configured all required params +func (s *VLANService) NewDedicateGuestVlanRangeParams(account string, domainid string, physicalnetworkid string, vlanrange string) *DedicateGuestVlanRangeParams { + p := &DedicateGuestVlanRangeParams{} + p.p = make(map[string]interface{}) + p.p["account"] = account + p.p["domainid"] = domainid + p.p["physicalnetworkid"] = physicalnetworkid + p.p["vlanrange"] = vlanrange + return p +} + +// Dedicates a guest vlan range to an account +func (s *VLANService) DedicateGuestVlanRange(p *DedicateGuestVlanRangeParams) (*DedicateGuestVlanRangeResponse, error) { + resp, err := s.cs.newRequest("dedicateGuestVlanRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DedicateGuestVlanRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DedicateGuestVlanRangeResponse struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Guestvlanrange string `json:"guestvlanrange,omitempty"` + Id string `json:"id,omitempty"` + Physicalnetworkid int64 `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Zoneid int64 `json:"zoneid,omitempty"` +} + +type ReleaseDedicatedGuestVlanRangeParams struct { + p map[string]interface{} +} + +func (p *ReleaseDedicatedGuestVlanRangeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ReleaseDedicatedGuestVlanRangeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ReleaseDedicatedGuestVlanRangeParams instance, +// as then you are sure you have configured all required params +func (s *VLANService) NewReleaseDedicatedGuestVlanRangeParams(id string) *ReleaseDedicatedGuestVlanRangeParams { + p := &ReleaseDedicatedGuestVlanRangeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Releases a dedicated guest vlan range to the system +func (s *VLANService) ReleaseDedicatedGuestVlanRange(p *ReleaseDedicatedGuestVlanRangeParams) (*ReleaseDedicatedGuestVlanRangeResponse, error) { + resp, err := s.cs.newRequest("releaseDedicatedGuestVlanRange", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReleaseDedicatedGuestVlanRangeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReleaseDedicatedGuestVlanRangeResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListDedicatedGuestVlanRangesParams struct { + p map[string]interface{} +} + +func (p *ListDedicatedGuestVlanRangesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["guestvlanrange"]; found { + u.Set("guestvlanrange", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListDedicatedGuestVlanRangesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetGuestvlanrange(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestvlanrange"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListDedicatedGuestVlanRangesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListDedicatedGuestVlanRangesParams instance, +// as then you are sure you have configured all required params +func (s *VLANService) NewListDedicatedGuestVlanRangesParams() *ListDedicatedGuestVlanRangesParams { + p := &ListDedicatedGuestVlanRangesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VLANService) GetDedicatedGuestVlanRangeByID(id string) (*DedicatedGuestVlanRange, int, error) { + p := &ListDedicatedGuestVlanRangesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListDedicatedGuestVlanRanges(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListDedicatedGuestVlanRanges(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.DedicatedGuestVlanRanges[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for DedicatedGuestVlanRange UUID: %s!", id) +} + +// Lists dedicated guest vlan ranges +func (s *VLANService) ListDedicatedGuestVlanRanges(p *ListDedicatedGuestVlanRangesParams) (*ListDedicatedGuestVlanRangesResponse, error) { + resp, err := s.cs.newRequest("listDedicatedGuestVlanRanges", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDedicatedGuestVlanRangesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDedicatedGuestVlanRangesResponse struct { + Count int `json:"count"` + DedicatedGuestVlanRanges []*DedicatedGuestVlanRange `json:"dedicatedguestvlanrange"` +} + +type DedicatedGuestVlanRange struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Guestvlanrange string `json:"guestvlanrange,omitempty"` + Id string `json:"id,omitempty"` + Physicalnetworkid int64 `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Zoneid int64 `json:"zoneid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/VMGroupService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VMGroupService.go new file mode 100644 index 000000000..da57bef01 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VMGroupService.go @@ -0,0 +1,494 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateInstanceGroupParams struct { + p map[string]interface{} +} + +func (p *CreateInstanceGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *CreateInstanceGroupParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateInstanceGroupParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateInstanceGroupParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateInstanceGroupParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new CreateInstanceGroupParams instance, +// as then you are sure you have configured all required params +func (s *VMGroupService) NewCreateInstanceGroupParams(name string) *CreateInstanceGroupParams { + p := &CreateInstanceGroupParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + return p +} + +// Creates a vm group +func (s *VMGroupService) CreateInstanceGroup(p *CreateInstanceGroupParams) (*CreateInstanceGroupResponse, error) { + resp, err := s.cs.newRequest("createInstanceGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateInstanceGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateInstanceGroupResponse struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} + +type DeleteInstanceGroupParams struct { + p map[string]interface{} +} + +func (p *DeleteInstanceGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteInstanceGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteInstanceGroupParams instance, +// as then you are sure you have configured all required params +func (s *VMGroupService) NewDeleteInstanceGroupParams(id string) *DeleteInstanceGroupParams { + p := &DeleteInstanceGroupParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a vm group +func (s *VMGroupService) DeleteInstanceGroup(p *DeleteInstanceGroupParams) (*DeleteInstanceGroupResponse, error) { + resp, err := s.cs.newRequest("deleteInstanceGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteInstanceGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteInstanceGroupResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type UpdateInstanceGroupParams struct { + p map[string]interface{} +} + +func (p *UpdateInstanceGroupParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *UpdateInstanceGroupParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateInstanceGroupParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new UpdateInstanceGroupParams instance, +// as then you are sure you have configured all required params +func (s *VMGroupService) NewUpdateInstanceGroupParams(id string) *UpdateInstanceGroupParams { + p := &UpdateInstanceGroupParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a vm group +func (s *VMGroupService) UpdateInstanceGroup(p *UpdateInstanceGroupParams) (*UpdateInstanceGroupResponse, error) { + resp, err := s.cs.newRequest("updateInstanceGroup", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateInstanceGroupResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateInstanceGroupResponse struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} + +type ListInstanceGroupsParams struct { + p map[string]interface{} +} + +func (p *ListInstanceGroupsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *ListInstanceGroupsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListInstanceGroupsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListInstanceGroupsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListInstanceGroupsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListInstanceGroupsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListInstanceGroupsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListInstanceGroupsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListInstanceGroupsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListInstanceGroupsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListInstanceGroupsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new ListInstanceGroupsParams instance, +// as then you are sure you have configured all required params +func (s *VMGroupService) NewListInstanceGroupsParams() *ListInstanceGroupsParams { + p := &ListInstanceGroupsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VMGroupService) GetInstanceGroupID(name string) (string, error) { + p := &ListInstanceGroupsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListInstanceGroups(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListInstanceGroups(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.InstanceGroups[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.InstanceGroups { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VMGroupService) GetInstanceGroupByName(name string) (*InstanceGroup, int, error) { + id, err := s.GetInstanceGroupID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetInstanceGroupByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VMGroupService) GetInstanceGroupByID(id string) (*InstanceGroup, int, error) { + p := &ListInstanceGroupsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListInstanceGroups(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListInstanceGroups(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.InstanceGroups[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for InstanceGroup UUID: %s!", id) +} + +// Lists vm groups +func (s *VMGroupService) ListInstanceGroups(p *ListInstanceGroupsParams) (*ListInstanceGroupsResponse, error) { + resp, err := s.cs.newRequest("listInstanceGroups", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListInstanceGroupsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListInstanceGroupsResponse struct { + Count int `json:"count"` + InstanceGroups []*InstanceGroup `json:"instancegroup"` +} + +type InstanceGroup struct { + Account string `json:"account,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/VPCService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VPCService.go new file mode 100644 index 000000000..956bbbc46 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VPCService.go @@ -0,0 +1,2834 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateVPCParams struct { + p map[string]interface{} +} + +func (p *CreateVPCParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["cidr"]; found { + u.Set("cidr", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkdomain"]; found { + u.Set("networkdomain", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["start"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("start", vv) + } + if v, found := p.p["vpcofferingid"]; found { + u.Set("vpcofferingid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateVPCParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateVPCParams) SetCidr(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidr"] = v + return +} + +func (p *CreateVPCParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateVPCParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateVPCParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateVPCParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateVPCParams) SetNetworkdomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkdomain"] = v + return +} + +func (p *CreateVPCParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *CreateVPCParams) SetStart(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["start"] = v + return +} + +func (p *CreateVPCParams) SetVpcofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcofferingid"] = v + return +} + +func (p *CreateVPCParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateVPCParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewCreateVPCParams(cidr string, displaytext string, name string, vpcofferingid string, zoneid string) *CreateVPCParams { + p := &CreateVPCParams{} + p.p = make(map[string]interface{}) + p.p["cidr"] = cidr + p.p["displaytext"] = displaytext + p.p["name"] = name + p.p["vpcofferingid"] = vpcofferingid + p.p["zoneid"] = zoneid + return p +} + +// Creates a VPC +func (s *VPCService) CreateVPC(p *CreateVPCParams) (*CreateVPCResponse, error) { + resp, err := s.cs.newRequest("createVPC", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVPCResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVPCResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Created string `json:"created,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Distributedvpcrouter bool `json:"distributedvpcrouter,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Network []struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` + } `json:"network,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Regionlevelvpc bool `json:"regionlevelvpc,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vpcofferingid string `json:"vpcofferingid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListVPCsParams struct { + p map[string]interface{} +} + +func (p *ListVPCsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["cidr"]; found { + u.Set("cidr", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["restartrequired"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("restartrequired", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["supportedservices"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("supportedservices", vv) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["vpcofferingid"]; found { + u.Set("vpcofferingid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListVPCsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVPCsParams) SetCidr(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidr"] = v + return +} + +func (p *ListVPCsParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *ListVPCsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVPCsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListVPCsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVPCsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVPCsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVPCsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVPCsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListVPCsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVPCsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVPCsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVPCsParams) SetRestartrequired(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["restartrequired"] = v + return +} + +func (p *ListVPCsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListVPCsParams) SetSupportedservices(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["supportedservices"] = v + return +} + +func (p *ListVPCsParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListVPCsParams) SetVpcofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcofferingid"] = v + return +} + +func (p *ListVPCsParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListVPCsParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewListVPCsParams() *ListVPCsParams { + p := &ListVPCsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetVPCID(name string) (string, error) { + p := &ListVPCsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListVPCs(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVPCs(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.VPCs[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.VPCs { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetVPCByName(name string) (*VPC, int, error) { + id, err := s.GetVPCID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetVPCByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetVPCByID(id string) (*VPC, int, error) { + p := &ListVPCsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVPCs(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVPCs(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VPCs[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VPC UUID: %s!", id) +} + +// Lists VPCs +func (s *VPCService) ListVPCs(p *ListVPCsParams) (*ListVPCsResponse, error) { + resp, err := s.cs.newRequest("listVPCs", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVPCsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVPCsResponse struct { + Count int `json:"count"` + VPCs []*VPC `json:"vpc"` +} + +type VPC struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Created string `json:"created,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Distributedvpcrouter bool `json:"distributedvpcrouter,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Network []struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` + } `json:"network,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Regionlevelvpc bool `json:"regionlevelvpc,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vpcofferingid string `json:"vpcofferingid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteVPCParams struct { + p map[string]interface{} +} + +func (p *DeleteVPCParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteVPCParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteVPCParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewDeleteVPCParams(id string) *DeleteVPCParams { + p := &DeleteVPCParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a VPC +func (s *VPCService) DeleteVPC(p *DeleteVPCParams) (*DeleteVPCResponse, error) { + resp, err := s.cs.newRequest("deleteVPC", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVPCResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteVPCResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type UpdateVPCParams struct { + p map[string]interface{} +} + +func (p *UpdateVPCParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *UpdateVPCParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateVPCParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateVPCParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateVPCParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateVPCParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new UpdateVPCParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewUpdateVPCParams(id string) *UpdateVPCParams { + p := &UpdateVPCParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a VPC +func (s *VPCService) UpdateVPC(p *UpdateVPCParams) (*UpdateVPCResponse, error) { + resp, err := s.cs.newRequest("updateVPC", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVPCResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateVPCResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Created string `json:"created,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Distributedvpcrouter bool `json:"distributedvpcrouter,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Network []struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` + } `json:"network,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Regionlevelvpc bool `json:"regionlevelvpc,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vpcofferingid string `json:"vpcofferingid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RestartVPCParams struct { + p map[string]interface{} +} + +func (p *RestartVPCParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RestartVPCParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RestartVPCParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewRestartVPCParams(id string) *RestartVPCParams { + p := &RestartVPCParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Restarts a VPC +func (s *VPCService) RestartVPC(p *RestartVPCParams) (*RestartVPCResponse, error) { + resp, err := s.cs.newRequest("restartVPC", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RestartVPCResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RestartVPCResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Created string `json:"created,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Distributedvpcrouter bool `json:"distributedvpcrouter,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Network []struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Acltype string `json:"acltype,omitempty"` + Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"` + Broadcasturi string `json:"broadcasturi,omitempty"` + Canusefordeploy bool `json:"canusefordeploy,omitempty"` + Cidr string `json:"cidr,omitempty"` + Displaynetwork bool `json:"displaynetwork,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Ispersistent bool `json:"ispersistent,omitempty"` + Issystem bool `json:"issystem,omitempty"` + Name string `json:"name,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkcidr string `json:"networkcidr,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Networkofferingavailability string `json:"networkofferingavailability,omitempty"` + Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"` + Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"` + Networkofferingid string `json:"networkofferingid,omitempty"` + Networkofferingname string `json:"networkofferingname,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Related string `json:"related,omitempty"` + Reservediprange string `json:"reservediprange,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + Specifyipranges bool `json:"specifyipranges,omitempty"` + State string `json:"state,omitempty"` + Strechedl2subnet bool `json:"strechedl2subnet,omitempty"` + Subdomainaccess bool `json:"subdomainaccess,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"` + } `json:"network,omitempty"` + Networkdomain string `json:"networkdomain,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Regionlevelvpc bool `json:"regionlevelvpc,omitempty"` + Restartrequired bool `json:"restartrequired,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vpcofferingid string `json:"vpcofferingid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type CreateVPCOfferingParams struct { + p map[string]interface{} +} + +func (p *CreateVPCOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["servicecapabilitylist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("servicecapabilitylist[%d].key", i), k) + u.Set(fmt.Sprintf("servicecapabilitylist[%d].value", i), vv) + i++ + } + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + if v, found := p.p["serviceproviderlist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("serviceproviderlist[%d].service", i), k) + u.Set(fmt.Sprintf("serviceproviderlist[%d].provider", i), vv) + i++ + } + } + if v, found := p.p["supportedservices"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("supportedservices", vv) + } + return u +} + +func (p *CreateVPCOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *CreateVPCOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateVPCOfferingParams) SetServicecapabilitylist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["servicecapabilitylist"] = v + return +} + +func (p *CreateVPCOfferingParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +func (p *CreateVPCOfferingParams) SetServiceproviderlist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceproviderlist"] = v + return +} + +func (p *CreateVPCOfferingParams) SetSupportedservices(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["supportedservices"] = v + return +} + +// You should always use this function to get a new CreateVPCOfferingParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewCreateVPCOfferingParams(displaytext string, name string, supportedservices []string) *CreateVPCOfferingParams { + p := &CreateVPCOfferingParams{} + p.p = make(map[string]interface{}) + p.p["displaytext"] = displaytext + p.p["name"] = name + p.p["supportedservices"] = supportedservices + return p +} + +// Creates VPC offering +func (s *VPCService) CreateVPCOffering(p *CreateVPCOfferingParams) (*CreateVPCOfferingResponse, error) { + resp, err := s.cs.newRequest("createVPCOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVPCOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVPCOfferingResponse struct { + JobID string `json:"jobid,omitempty"` + Created string `json:"created,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Distributedvpcrouter bool `json:"distributedvpcrouter,omitempty"` + Id string `json:"id,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Name string `json:"name,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + State string `json:"state,omitempty"` + SupportsregionLevelvpc bool `json:"supportsregionLevelvpc,omitempty"` +} + +type UpdateVPCOfferingParams struct { + p map[string]interface{} +} + +func (p *UpdateVPCOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + return u +} + +func (p *UpdateVPCOfferingParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *UpdateVPCOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateVPCOfferingParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateVPCOfferingParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +// You should always use this function to get a new UpdateVPCOfferingParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewUpdateVPCOfferingParams(id string) *UpdateVPCOfferingParams { + p := &UpdateVPCOfferingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates VPC offering +func (s *VPCService) UpdateVPCOffering(p *UpdateVPCOfferingParams) (*UpdateVPCOfferingResponse, error) { + resp, err := s.cs.newRequest("updateVPCOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVPCOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateVPCOfferingResponse struct { + JobID string `json:"jobid,omitempty"` + Created string `json:"created,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Distributedvpcrouter bool `json:"distributedvpcrouter,omitempty"` + Id string `json:"id,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Name string `json:"name,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + State string `json:"state,omitempty"` + SupportsregionLevelvpc bool `json:"supportsregionLevelvpc,omitempty"` +} + +type DeleteVPCOfferingParams struct { + p map[string]interface{} +} + +func (p *DeleteVPCOfferingParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteVPCOfferingParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteVPCOfferingParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewDeleteVPCOfferingParams(id string) *DeleteVPCOfferingParams { + p := &DeleteVPCOfferingParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes VPC offering +func (s *VPCService) DeleteVPCOffering(p *DeleteVPCOfferingParams) (*DeleteVPCOfferingResponse, error) { + resp, err := s.cs.newRequest("deleteVPCOffering", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVPCOfferingResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteVPCOfferingResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListVPCOfferingsParams struct { + p map[string]interface{} +} + +func (p *ListVPCOfferingsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["displaytext"]; found { + u.Set("displaytext", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isdefault"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdefault", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["supportedservices"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("supportedservices", vv) + } + return u +} + +func (p *ListVPCOfferingsParams) SetDisplaytext(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displaytext"] = v + return +} + +func (p *ListVPCOfferingsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVPCOfferingsParams) SetIsdefault(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdefault"] = v + return +} + +func (p *ListVPCOfferingsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVPCOfferingsParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListVPCOfferingsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVPCOfferingsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVPCOfferingsParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListVPCOfferingsParams) SetSupportedservices(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["supportedservices"] = v + return +} + +// You should always use this function to get a new ListVPCOfferingsParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewListVPCOfferingsParams() *ListVPCOfferingsParams { + p := &ListVPCOfferingsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetVPCOfferingID(name string) (string, error) { + p := &ListVPCOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListVPCOfferings(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.VPCOfferings[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.VPCOfferings { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetVPCOfferingByName(name string) (*VPCOffering, int, error) { + id, err := s.GetVPCOfferingID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetVPCOfferingByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetVPCOfferingByID(id string) (*VPCOffering, int, error) { + p := &ListVPCOfferingsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVPCOfferings(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VPCOfferings[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VPCOffering UUID: %s!", id) +} + +// Lists VPC offerings +func (s *VPCService) ListVPCOfferings(p *ListVPCOfferingsParams) (*ListVPCOfferingsResponse, error) { + resp, err := s.cs.newRequest("listVPCOfferings", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVPCOfferingsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVPCOfferingsResponse struct { + Count int `json:"count"` + VPCOfferings []*VPCOffering `json:"vpcoffering"` +} + +type VPCOffering struct { + Created string `json:"created,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Distributedvpcrouter bool `json:"distributedvpcrouter,omitempty"` + Id string `json:"id,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Name string `json:"name,omitempty"` + Service []struct { + Capability []struct { + Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + } `json:"capability,omitempty"` + Name string `json:"name,omitempty"` + Provider []struct { + Canenableindividualservice bool `json:"canenableindividualservice,omitempty"` + Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Servicelist []string `json:"servicelist,omitempty"` + State string `json:"state,omitempty"` + } `json:"provider,omitempty"` + } `json:"service,omitempty"` + State string `json:"state,omitempty"` + SupportsregionLevelvpc bool `json:"supportsregionLevelvpc,omitempty"` +} + +type CreatePrivateGatewayParams struct { + p map[string]interface{} +} + +func (p *CreatePrivateGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["aclid"]; found { + u.Set("aclid", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["netmask"]; found { + u.Set("netmask", v.(string)) + } + if v, found := p.p["networkofferingid"]; found { + u.Set("networkofferingid", v.(string)) + } + if v, found := p.p["physicalnetworkid"]; found { + u.Set("physicalnetworkid", v.(string)) + } + if v, found := p.p["sourcenatsupported"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("sourcenatsupported", vv) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *CreatePrivateGatewayParams) SetAclid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["aclid"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetNetmask(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["netmask"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetNetworkofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkofferingid"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetPhysicalnetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["physicalnetworkid"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetSourcenatsupported(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["sourcenatsupported"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +func (p *CreatePrivateGatewayParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new CreatePrivateGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewCreatePrivateGatewayParams(gateway string, ipaddress string, netmask string, vlan string, vpcid string) *CreatePrivateGatewayParams { + p := &CreatePrivateGatewayParams{} + p.p = make(map[string]interface{}) + p.p["gateway"] = gateway + p.p["ipaddress"] = ipaddress + p.p["netmask"] = netmask + p.p["vlan"] = vlan + p.p["vpcid"] = vpcid + return p +} + +// Creates a private gateway +func (s *VPCService) CreatePrivateGateway(p *CreatePrivateGatewayParams) (*CreatePrivateGatewayResponse, error) { + resp, err := s.cs.newRequest("createPrivateGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreatePrivateGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreatePrivateGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Sourcenatsupported bool `json:"sourcenatsupported,omitempty"` + State string `json:"state,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListPrivateGatewaysParams struct { + p map[string]interface{} +} + +func (p *ListPrivateGatewaysParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["vlan"]; found { + u.Set("vlan", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *ListPrivateGatewaysParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetVlan(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vlan"] = v + return +} + +func (p *ListPrivateGatewaysParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new ListPrivateGatewaysParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewListPrivateGatewaysParams() *ListPrivateGatewaysParams { + p := &ListPrivateGatewaysParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetPrivateGatewayByID(id string) (*PrivateGateway, int, error) { + p := &ListPrivateGatewaysParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListPrivateGateways(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListPrivateGateways(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.PrivateGateways[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for PrivateGateway UUID: %s!", id) +} + +// List private gateways +func (s *VPCService) ListPrivateGateways(p *ListPrivateGatewaysParams) (*ListPrivateGatewaysResponse, error) { + resp, err := s.cs.newRequest("listPrivateGateways", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListPrivateGatewaysResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListPrivateGatewaysResponse struct { + Count int `json:"count"` + PrivateGateways []*PrivateGateway `json:"privategateway"` +} + +type PrivateGateway struct { + Account string `json:"account,omitempty"` + Aclid string `json:"aclid,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Physicalnetworkid string `json:"physicalnetworkid,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Sourcenatsupported bool `json:"sourcenatsupported,omitempty"` + State string `json:"state,omitempty"` + Vlan string `json:"vlan,omitempty"` + Vpcid string `json:"vpcid,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeletePrivateGatewayParams struct { + p map[string]interface{} +} + +func (p *DeletePrivateGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeletePrivateGatewayParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeletePrivateGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewDeletePrivateGatewayParams(id string) *DeletePrivateGatewayParams { + p := &DeletePrivateGatewayParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a Private gateway +func (s *VPCService) DeletePrivateGateway(p *DeletePrivateGatewayParams) (*DeletePrivateGatewayResponse, error) { + resp, err := s.cs.newRequest("deletePrivateGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeletePrivateGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeletePrivateGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type CreateStaticRouteParams struct { + p map[string]interface{} +} + +func (p *CreateStaticRouteParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["cidr"]; found { + u.Set("cidr", v.(string)) + } + if v, found := p.p["gatewayid"]; found { + u.Set("gatewayid", v.(string)) + } + return u +} + +func (p *CreateStaticRouteParams) SetCidr(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidr"] = v + return +} + +func (p *CreateStaticRouteParams) SetGatewayid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gatewayid"] = v + return +} + +// You should always use this function to get a new CreateStaticRouteParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewCreateStaticRouteParams(cidr string, gatewayid string) *CreateStaticRouteParams { + p := &CreateStaticRouteParams{} + p.p = make(map[string]interface{}) + p.p["cidr"] = cidr + p.p["gatewayid"] = gatewayid + return p +} + +// Creates a static route +func (s *VPCService) CreateStaticRoute(p *CreateStaticRouteParams) (*CreateStaticRouteResponse, error) { + resp, err := s.cs.newRequest("createStaticRoute", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateStaticRouteResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateStaticRouteResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gatewayid string `json:"gatewayid,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vpcid string `json:"vpcid,omitempty"` +} + +type DeleteStaticRouteParams struct { + p map[string]interface{} +} + +func (p *DeleteStaticRouteParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteStaticRouteParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteStaticRouteParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewDeleteStaticRouteParams(id string) *DeleteStaticRouteParams { + p := &DeleteStaticRouteParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a static route +func (s *VPCService) DeleteStaticRoute(p *DeleteStaticRouteParams) (*DeleteStaticRouteResponse, error) { + resp, err := s.cs.newRequest("deleteStaticRoute", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteStaticRouteResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteStaticRouteResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListStaticRoutesParams struct { + p map[string]interface{} +} + +func (p *ListStaticRoutesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["gatewayid"]; found { + u.Set("gatewayid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *ListStaticRoutesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListStaticRoutesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListStaticRoutesParams) SetGatewayid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gatewayid"] = v + return +} + +func (p *ListStaticRoutesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListStaticRoutesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListStaticRoutesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListStaticRoutesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListStaticRoutesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListStaticRoutesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListStaticRoutesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListStaticRoutesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListStaticRoutesParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new ListStaticRoutesParams instance, +// as then you are sure you have configured all required params +func (s *VPCService) NewListStaticRoutesParams() *ListStaticRoutesParams { + p := &ListStaticRoutesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPCService) GetStaticRouteByID(id string) (*StaticRoute, int, error) { + p := &ListStaticRoutesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListStaticRoutes(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListStaticRoutes(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.StaticRoutes[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for StaticRoute UUID: %s!", id) +} + +// Lists all static routes +func (s *VPCService) ListStaticRoutes(p *ListStaticRoutesParams) (*ListStaticRoutesResponse, error) { + resp, err := s.cs.newRequest("listStaticRoutes", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListStaticRoutesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListStaticRoutesResponse struct { + Count int `json:"count"` + StaticRoutes []*StaticRoute `json:"staticroute"` +} + +type StaticRoute struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Gatewayid string `json:"gatewayid,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vpcid string `json:"vpcid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/VPNService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VPNService.go new file mode 100644 index 000000000..37550c4eb --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VPNService.go @@ -0,0 +1,2881 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateRemoteAccessVpnParams struct { + p map[string]interface{} +} + +func (p *CreateRemoteAccessVpnParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["iprange"]; found { + u.Set("iprange", v.(string)) + } + if v, found := p.p["openfirewall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("openfirewall", vv) + } + if v, found := p.p["publicipid"]; found { + u.Set("publicipid", v.(string)) + } + return u +} + +func (p *CreateRemoteAccessVpnParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateRemoteAccessVpnParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateRemoteAccessVpnParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateRemoteAccessVpnParams) SetIprange(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["iprange"] = v + return +} + +func (p *CreateRemoteAccessVpnParams) SetOpenfirewall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["openfirewall"] = v + return +} + +func (p *CreateRemoteAccessVpnParams) SetPublicipid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicipid"] = v + return +} + +// You should always use this function to get a new CreateRemoteAccessVpnParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewCreateRemoteAccessVpnParams(publicipid string) *CreateRemoteAccessVpnParams { + p := &CreateRemoteAccessVpnParams{} + p.p = make(map[string]interface{}) + p.p["publicipid"] = publicipid + return p +} + +// Creates a l2tp/ipsec remote access vpn +func (s *VPNService) CreateRemoteAccessVpn(p *CreateRemoteAccessVpnParams) (*CreateRemoteAccessVpnResponse, error) { + resp, err := s.cs.newRequest("createRemoteAccessVpn", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateRemoteAccessVpnResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateRemoteAccessVpnResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Iprange string `json:"iprange,omitempty"` + Presharedkey string `json:"presharedkey,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + State string `json:"state,omitempty"` +} + +type DeleteRemoteAccessVpnParams struct { + p map[string]interface{} +} + +func (p *DeleteRemoteAccessVpnParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["publicipid"]; found { + u.Set("publicipid", v.(string)) + } + return u +} + +func (p *DeleteRemoteAccessVpnParams) SetPublicipid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicipid"] = v + return +} + +// You should always use this function to get a new DeleteRemoteAccessVpnParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewDeleteRemoteAccessVpnParams(publicipid string) *DeleteRemoteAccessVpnParams { + p := &DeleteRemoteAccessVpnParams{} + p.p = make(map[string]interface{}) + p.p["publicipid"] = publicipid + return p +} + +// Destroys a l2tp/ipsec remote access vpn +func (s *VPNService) DeleteRemoteAccessVpn(p *DeleteRemoteAccessVpnParams) (*DeleteRemoteAccessVpnResponse, error) { + resp, err := s.cs.newRequest("deleteRemoteAccessVpn", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteRemoteAccessVpnResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteRemoteAccessVpnResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListRemoteAccessVpnsParams struct { + p map[string]interface{} +} + +func (p *ListRemoteAccessVpnsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["publicipid"]; found { + u.Set("publicipid", v.(string)) + } + return u +} + +func (p *ListRemoteAccessVpnsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListRemoteAccessVpnsParams) SetPublicipid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["publicipid"] = v + return +} + +// You should always use this function to get a new ListRemoteAccessVpnsParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewListRemoteAccessVpnsParams() *ListRemoteAccessVpnsParams { + p := &ListRemoteAccessVpnsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPNService) GetRemoteAccessVpnByID(id string) (*RemoteAccessVpn, int, error) { + p := &ListRemoteAccessVpnsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListRemoteAccessVpns(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListRemoteAccessVpns(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.RemoteAccessVpns[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for RemoteAccessVpn UUID: %s!", id) +} + +// Lists remote access vpns +func (s *VPNService) ListRemoteAccessVpns(p *ListRemoteAccessVpnsParams) (*ListRemoteAccessVpnsResponse, error) { + resp, err := s.cs.newRequest("listRemoteAccessVpns", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListRemoteAccessVpnsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListRemoteAccessVpnsResponse struct { + Count int `json:"count"` + RemoteAccessVpns []*RemoteAccessVpn `json:"remoteaccessvpn"` +} + +type RemoteAccessVpn struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Iprange string `json:"iprange,omitempty"` + Presharedkey string `json:"presharedkey,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + State string `json:"state,omitempty"` +} + +type UpdateRemoteAccessVpnParams struct { + p map[string]interface{} +} + +func (p *UpdateRemoteAccessVpnParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateRemoteAccessVpnParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateRemoteAccessVpnParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateRemoteAccessVpnParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateRemoteAccessVpnParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewUpdateRemoteAccessVpnParams(id string) *UpdateRemoteAccessVpnParams { + p := &UpdateRemoteAccessVpnParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates remote access vpn +func (s *VPNService) UpdateRemoteAccessVpn(p *UpdateRemoteAccessVpnParams) (*UpdateRemoteAccessVpnResponse, error) { + resp, err := s.cs.newRequest("updateRemoteAccessVpn", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateRemoteAccessVpnResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateRemoteAccessVpnResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Iprange string `json:"iprange,omitempty"` + Presharedkey string `json:"presharedkey,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + State string `json:"state,omitempty"` +} + +type AddVpnUserParams struct { + p map[string]interface{} +} + +func (p *AddVpnUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["password"]; found { + u.Set("password", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *AddVpnUserParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *AddVpnUserParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *AddVpnUserParams) SetPassword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["password"] = v + return +} + +func (p *AddVpnUserParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *AddVpnUserParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new AddVpnUserParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewAddVpnUserParams(password string, username string) *AddVpnUserParams { + p := &AddVpnUserParams{} + p.p = make(map[string]interface{}) + p.p["password"] = password + p.p["username"] = username + return p +} + +// Adds vpn users +func (s *VPNService) AddVpnUser(p *AddVpnUserParams) (*AddVpnUserResponse, error) { + resp, err := s.cs.newRequest("addVpnUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddVpnUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddVpnUserResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` + Username string `json:"username,omitempty"` +} + +type RemoveVpnUserParams struct { + p map[string]interface{} +} + +func (p *RemoveVpnUserParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *RemoveVpnUserParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *RemoveVpnUserParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *RemoveVpnUserParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *RemoveVpnUserParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new RemoveVpnUserParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewRemoveVpnUserParams(username string) *RemoveVpnUserParams { + p := &RemoveVpnUserParams{} + p.p = make(map[string]interface{}) + p.p["username"] = username + return p +} + +// Removes vpn user +func (s *VPNService) RemoveVpnUser(p *RemoveVpnUserParams) (*RemoveVpnUserResponse, error) { + resp, err := s.cs.newRequest("removeVpnUser", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveVpnUserResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveVpnUserResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListVpnUsersParams struct { + p map[string]interface{} +} + +func (p *ListVpnUsersParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["username"]; found { + u.Set("username", v.(string)) + } + return u +} + +func (p *ListVpnUsersParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVpnUsersParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVpnUsersParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVpnUsersParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVpnUsersParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVpnUsersParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVpnUsersParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVpnUsersParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVpnUsersParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVpnUsersParams) SetUsername(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["username"] = v + return +} + +// You should always use this function to get a new ListVpnUsersParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewListVpnUsersParams() *ListVpnUsersParams { + p := &ListVpnUsersParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPNService) GetVpnUserByID(id string) (*VpnUser, int, error) { + p := &ListVpnUsersParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVpnUsers(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVpnUsers(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VpnUsers[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VpnUser UUID: %s!", id) +} + +// Lists vpn users +func (s *VPNService) ListVpnUsers(p *ListVpnUsersParams) (*ListVpnUsersResponse, error) { + resp, err := s.cs.newRequest("listVpnUsers", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVpnUsersResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVpnUsersResponse struct { + Count int `json:"count"` + VpnUsers []*VpnUser `json:"vpnuser"` +} + +type VpnUser struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + State string `json:"state,omitempty"` + Username string `json:"username,omitempty"` +} + +type CreateVpnCustomerGatewayParams struct { + p map[string]interface{} +} + +func (p *CreateVpnCustomerGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["cidrlist"]; found { + u.Set("cidrlist", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["dpd"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("dpd", vv) + } + if v, found := p.p["esplifetime"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("esplifetime", vv) + } + if v, found := p.p["esppolicy"]; found { + u.Set("esppolicy", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["ikelifetime"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("ikelifetime", vv) + } + if v, found := p.p["ikepolicy"]; found { + u.Set("ikepolicy", v.(string)) + } + if v, found := p.p["ipsecpsk"]; found { + u.Set("ipsecpsk", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *CreateVpnCustomerGatewayParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetCidrlist(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetDpd(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dpd"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetEsplifetime(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["esplifetime"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetEsppolicy(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["esppolicy"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetIkelifetime(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ikelifetime"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetIkepolicy(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ikepolicy"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetIpsecpsk(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipsecpsk"] = v + return +} + +func (p *CreateVpnCustomerGatewayParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new CreateVpnCustomerGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewCreateVpnCustomerGatewayParams(cidrlist string, esppolicy string, gateway string, ikepolicy string, ipsecpsk string) *CreateVpnCustomerGatewayParams { + p := &CreateVpnCustomerGatewayParams{} + p.p = make(map[string]interface{}) + p.p["cidrlist"] = cidrlist + p.p["esppolicy"] = esppolicy + p.p["gateway"] = gateway + p.p["ikepolicy"] = ikepolicy + p.p["ipsecpsk"] = ipsecpsk + return p +} + +// Creates site to site vpn customer gateway +func (s *VPNService) CreateVpnCustomerGateway(p *CreateVpnCustomerGatewayParams) (*CreateVpnCustomerGatewayResponse, error) { + resp, err := s.cs.newRequest("createVpnCustomerGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVpnCustomerGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVpnCustomerGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Dpd bool `json:"dpd,omitempty"` + Esplifetime int64 `json:"esplifetime,omitempty"` + Esppolicy string `json:"esppolicy,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ikelifetime int64 `json:"ikelifetime,omitempty"` + Ikepolicy string `json:"ikepolicy,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipsecpsk string `json:"ipsecpsk,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` +} + +type CreateVpnGatewayParams struct { + p map[string]interface{} +} + +func (p *CreateVpnGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *CreateVpnGatewayParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateVpnGatewayParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new CreateVpnGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewCreateVpnGatewayParams(vpcid string) *CreateVpnGatewayParams { + p := &CreateVpnGatewayParams{} + p.p = make(map[string]interface{}) + p.p["vpcid"] = vpcid + return p +} + +// Creates site to site vpn local gateway +func (s *VPNService) CreateVpnGateway(p *CreateVpnGatewayParams) (*CreateVpnGatewayResponse, error) { + resp, err := s.cs.newRequest("createVpnGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVpnGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVpnGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Removed string `json:"removed,omitempty"` + Vpcid string `json:"vpcid,omitempty"` +} + +type CreateVpnConnectionParams struct { + p map[string]interface{} +} + +func (p *CreateVpnConnectionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["passive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("passive", vv) + } + if v, found := p.p["s2scustomergatewayid"]; found { + u.Set("s2scustomergatewayid", v.(string)) + } + if v, found := p.p["s2svpngatewayid"]; found { + u.Set("s2svpngatewayid", v.(string)) + } + return u +} + +func (p *CreateVpnConnectionParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *CreateVpnConnectionParams) SetPassive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["passive"] = v + return +} + +func (p *CreateVpnConnectionParams) SetS2scustomergatewayid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["s2scustomergatewayid"] = v + return +} + +func (p *CreateVpnConnectionParams) SetS2svpngatewayid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["s2svpngatewayid"] = v + return +} + +// You should always use this function to get a new CreateVpnConnectionParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewCreateVpnConnectionParams(s2scustomergatewayid string, s2svpngatewayid string) *CreateVpnConnectionParams { + p := &CreateVpnConnectionParams{} + p.p = make(map[string]interface{}) + p.p["s2scustomergatewayid"] = s2scustomergatewayid + p.p["s2svpngatewayid"] = s2svpngatewayid + return p +} + +// Create site to site vpn connection +func (s *VPNService) CreateVpnConnection(p *CreateVpnConnectionParams) (*CreateVpnConnectionResponse, error) { + resp, err := s.cs.newRequest("createVpnConnection", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVpnConnectionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVpnConnectionResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Dpd bool `json:"dpd,omitempty"` + Esplifetime int64 `json:"esplifetime,omitempty"` + Esppolicy string `json:"esppolicy,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ikelifetime int64 `json:"ikelifetime,omitempty"` + Ikepolicy string `json:"ikepolicy,omitempty"` + Ipsecpsk string `json:"ipsecpsk,omitempty"` + Passive bool `json:"passive,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Removed string `json:"removed,omitempty"` + S2scustomergatewayid string `json:"s2scustomergatewayid,omitempty"` + S2svpngatewayid string `json:"s2svpngatewayid,omitempty"` + State string `json:"state,omitempty"` +} + +type DeleteVpnCustomerGatewayParams struct { + p map[string]interface{} +} + +func (p *DeleteVpnCustomerGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteVpnCustomerGatewayParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteVpnCustomerGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewDeleteVpnCustomerGatewayParams(id string) *DeleteVpnCustomerGatewayParams { + p := &DeleteVpnCustomerGatewayParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Delete site to site vpn customer gateway +func (s *VPNService) DeleteVpnCustomerGateway(p *DeleteVpnCustomerGatewayParams) (*DeleteVpnCustomerGatewayResponse, error) { + resp, err := s.cs.newRequest("deleteVpnCustomerGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVpnCustomerGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteVpnCustomerGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteVpnGatewayParams struct { + p map[string]interface{} +} + +func (p *DeleteVpnGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteVpnGatewayParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteVpnGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewDeleteVpnGatewayParams(id string) *DeleteVpnGatewayParams { + p := &DeleteVpnGatewayParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Delete site to site vpn gateway +func (s *VPNService) DeleteVpnGateway(p *DeleteVpnGatewayParams) (*DeleteVpnGatewayResponse, error) { + resp, err := s.cs.newRequest("deleteVpnGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVpnGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteVpnGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type DeleteVpnConnectionParams struct { + p map[string]interface{} +} + +func (p *DeleteVpnConnectionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteVpnConnectionParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteVpnConnectionParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewDeleteVpnConnectionParams(id string) *DeleteVpnConnectionParams { + p := &DeleteVpnConnectionParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Delete site to site vpn connection +func (s *VPNService) DeleteVpnConnection(p *DeleteVpnConnectionParams) (*DeleteVpnConnectionResponse, error) { + resp, err := s.cs.newRequest("deleteVpnConnection", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVpnConnectionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeleteVpnConnectionResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type UpdateVpnCustomerGatewayParams struct { + p map[string]interface{} +} + +func (p *UpdateVpnCustomerGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["cidrlist"]; found { + u.Set("cidrlist", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["dpd"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("dpd", vv) + } + if v, found := p.p["esplifetime"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("esplifetime", vv) + } + if v, found := p.p["esppolicy"]; found { + u.Set("esppolicy", v.(string)) + } + if v, found := p.p["gateway"]; found { + u.Set("gateway", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ikelifetime"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("ikelifetime", vv) + } + if v, found := p.p["ikepolicy"]; found { + u.Set("ikepolicy", v.(string)) + } + if v, found := p.p["ipsecpsk"]; found { + u.Set("ipsecpsk", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *UpdateVpnCustomerGatewayParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetCidrlist(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["cidrlist"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetDpd(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dpd"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetEsplifetime(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["esplifetime"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetEsppolicy(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["esppolicy"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetGateway(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["gateway"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetIkelifetime(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ikelifetime"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetIkepolicy(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ikepolicy"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetIpsecpsk(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipsecpsk"] = v + return +} + +func (p *UpdateVpnCustomerGatewayParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new UpdateVpnCustomerGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewUpdateVpnCustomerGatewayParams(cidrlist string, esppolicy string, gateway string, id string, ikepolicy string, ipsecpsk string) *UpdateVpnCustomerGatewayParams { + p := &UpdateVpnCustomerGatewayParams{} + p.p = make(map[string]interface{}) + p.p["cidrlist"] = cidrlist + p.p["esppolicy"] = esppolicy + p.p["gateway"] = gateway + p.p["id"] = id + p.p["ikepolicy"] = ikepolicy + p.p["ipsecpsk"] = ipsecpsk + return p +} + +// Update site to site vpn customer gateway +func (s *VPNService) UpdateVpnCustomerGateway(p *UpdateVpnCustomerGatewayParams) (*UpdateVpnCustomerGatewayResponse, error) { + resp, err := s.cs.newRequest("updateVpnCustomerGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVpnCustomerGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateVpnCustomerGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Dpd bool `json:"dpd,omitempty"` + Esplifetime int64 `json:"esplifetime,omitempty"` + Esppolicy string `json:"esppolicy,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ikelifetime int64 `json:"ikelifetime,omitempty"` + Ikepolicy string `json:"ikepolicy,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipsecpsk string `json:"ipsecpsk,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` +} + +type ResetVpnConnectionParams struct { + p map[string]interface{} +} + +func (p *ResetVpnConnectionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ResetVpnConnectionParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ResetVpnConnectionParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ResetVpnConnectionParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ResetVpnConnectionParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewResetVpnConnectionParams(id string) *ResetVpnConnectionParams { + p := &ResetVpnConnectionParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Reset site to site vpn connection +func (s *VPNService) ResetVpnConnection(p *ResetVpnConnectionParams) (*ResetVpnConnectionResponse, error) { + resp, err := s.cs.newRequest("resetVpnConnection", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ResetVpnConnectionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ResetVpnConnectionResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Dpd bool `json:"dpd,omitempty"` + Esplifetime int64 `json:"esplifetime,omitempty"` + Esppolicy string `json:"esppolicy,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ikelifetime int64 `json:"ikelifetime,omitempty"` + Ikepolicy string `json:"ikepolicy,omitempty"` + Ipsecpsk string `json:"ipsecpsk,omitempty"` + Passive bool `json:"passive,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Removed string `json:"removed,omitempty"` + S2scustomergatewayid string `json:"s2scustomergatewayid,omitempty"` + S2svpngatewayid string `json:"s2svpngatewayid,omitempty"` + State string `json:"state,omitempty"` +} + +type ListVpnCustomerGatewaysParams struct { + p map[string]interface{} +} + +func (p *ListVpnCustomerGatewaysParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + return u +} + +func (p *ListVpnCustomerGatewaysParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVpnCustomerGatewaysParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +// You should always use this function to get a new ListVpnCustomerGatewaysParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewListVpnCustomerGatewaysParams() *ListVpnCustomerGatewaysParams { + p := &ListVpnCustomerGatewaysParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPNService) GetVpnCustomerGatewayID(keyword string) (string, error) { + p := &ListVpnCustomerGatewaysParams{} + p.p = make(map[string]interface{}) + + p.p["keyword"] = keyword + + l, err := s.ListVpnCustomerGateways(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVpnCustomerGateways(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", keyword, l) + } + + if l.Count == 1 { + return l.VpnCustomerGateways[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.VpnCustomerGateways { + if v.Name == keyword { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPNService) GetVpnCustomerGatewayByName(name string) (*VpnCustomerGateway, int, error) { + id, err := s.GetVpnCustomerGatewayID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetVpnCustomerGatewayByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPNService) GetVpnCustomerGatewayByID(id string) (*VpnCustomerGateway, int, error) { + p := &ListVpnCustomerGatewaysParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVpnCustomerGateways(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVpnCustomerGateways(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VpnCustomerGateways[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VpnCustomerGateway UUID: %s!", id) +} + +// Lists site to site vpn customer gateways +func (s *VPNService) ListVpnCustomerGateways(p *ListVpnCustomerGatewaysParams) (*ListVpnCustomerGatewaysResponse, error) { + resp, err := s.cs.newRequest("listVpnCustomerGateways", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVpnCustomerGatewaysResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVpnCustomerGatewaysResponse struct { + Count int `json:"count"` + VpnCustomerGateways []*VpnCustomerGateway `json:"vpncustomergateway"` +} + +type VpnCustomerGateway struct { + Account string `json:"account,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Dpd bool `json:"dpd,omitempty"` + Esplifetime int64 `json:"esplifetime,omitempty"` + Esppolicy string `json:"esppolicy,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ikelifetime int64 `json:"ikelifetime,omitempty"` + Ikepolicy string `json:"ikepolicy,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Ipsecpsk string `json:"ipsecpsk,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Removed string `json:"removed,omitempty"` +} + +type ListVpnGatewaysParams struct { + p map[string]interface{} +} + +func (p *ListVpnGatewaysParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *ListVpnGatewaysParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVpnGatewaysParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVpnGatewaysParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListVpnGatewaysParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVpnGatewaysParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVpnGatewaysParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVpnGatewaysParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVpnGatewaysParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVpnGatewaysParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVpnGatewaysParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVpnGatewaysParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new ListVpnGatewaysParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewListVpnGatewaysParams() *ListVpnGatewaysParams { + p := &ListVpnGatewaysParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPNService) GetVpnGatewayByID(id string) (*VpnGateway, int, error) { + p := &ListVpnGatewaysParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVpnGateways(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVpnGateways(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VpnGateways[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VpnGateway UUID: %s!", id) +} + +// Lists site 2 site vpn gateways +func (s *VPNService) ListVpnGateways(p *ListVpnGatewaysParams) (*ListVpnGatewaysResponse, error) { + resp, err := s.cs.newRequest("listVpnGateways", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVpnGatewaysResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVpnGatewaysResponse struct { + Count int `json:"count"` + VpnGateways []*VpnGateway `json:"vpngateway"` +} + +type VpnGateway struct { + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Removed string `json:"removed,omitempty"` + Vpcid string `json:"vpcid,omitempty"` +} + +type ListVpnConnectionsParams struct { + p map[string]interface{} +} + +func (p *ListVpnConnectionsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + return u +} + +func (p *ListVpnConnectionsParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVpnConnectionsParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVpnConnectionsParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *ListVpnConnectionsParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVpnConnectionsParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVpnConnectionsParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVpnConnectionsParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVpnConnectionsParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVpnConnectionsParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVpnConnectionsParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVpnConnectionsParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +// You should always use this function to get a new ListVpnConnectionsParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewListVpnConnectionsParams() *ListVpnConnectionsParams { + p := &ListVpnConnectionsParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VPNService) GetVpnConnectionByID(id string) (*VpnConnection, int, error) { + p := &ListVpnConnectionsParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVpnConnections(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVpnConnections(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VpnConnections[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VpnConnection UUID: %s!", id) +} + +// Lists site to site vpn connection gateways +func (s *VPNService) ListVpnConnections(p *ListVpnConnectionsParams) (*ListVpnConnectionsResponse, error) { + resp, err := s.cs.newRequest("listVpnConnections", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVpnConnectionsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVpnConnectionsResponse struct { + Count int `json:"count"` + VpnConnections []*VpnConnection `json:"vpnconnection"` +} + +type VpnConnection struct { + Account string `json:"account,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Dpd bool `json:"dpd,omitempty"` + Esplifetime int64 `json:"esplifetime,omitempty"` + Esppolicy string `json:"esppolicy,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ikelifetime int64 `json:"ikelifetime,omitempty"` + Ikepolicy string `json:"ikepolicy,omitempty"` + Ipsecpsk string `json:"ipsecpsk,omitempty"` + Passive bool `json:"passive,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Removed string `json:"removed,omitempty"` + S2scustomergatewayid string `json:"s2scustomergatewayid,omitempty"` + S2svpngatewayid string `json:"s2svpngatewayid,omitempty"` + State string `json:"state,omitempty"` +} + +type UpdateVpnConnectionParams struct { + p map[string]interface{} +} + +func (p *UpdateVpnConnectionParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateVpnConnectionParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateVpnConnectionParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateVpnConnectionParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateVpnConnectionParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewUpdateVpnConnectionParams(id string) *UpdateVpnConnectionParams { + p := &UpdateVpnConnectionParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates site to site vpn connection +func (s *VPNService) UpdateVpnConnection(p *UpdateVpnConnectionParams) (*UpdateVpnConnectionResponse, error) { + resp, err := s.cs.newRequest("updateVpnConnection", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVpnConnectionResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateVpnConnectionResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Cidrlist string `json:"cidrlist,omitempty"` + Created string `json:"created,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Dpd bool `json:"dpd,omitempty"` + Esplifetime int64 `json:"esplifetime,omitempty"` + Esppolicy string `json:"esppolicy,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ikelifetime int64 `json:"ikelifetime,omitempty"` + Ikepolicy string `json:"ikepolicy,omitempty"` + Ipsecpsk string `json:"ipsecpsk,omitempty"` + Passive bool `json:"passive,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Removed string `json:"removed,omitempty"` + S2scustomergatewayid string `json:"s2scustomergatewayid,omitempty"` + S2svpngatewayid string `json:"s2svpngatewayid,omitempty"` + State string `json:"state,omitempty"` +} + +type UpdateVpnGatewayParams struct { + p map[string]interface{} +} + +func (p *UpdateVpnGatewayParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["fordisplay"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("fordisplay", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *UpdateVpnGatewayParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateVpnGatewayParams) SetFordisplay(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["fordisplay"] = v + return +} + +func (p *UpdateVpnGatewayParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new UpdateVpnGatewayParams instance, +// as then you are sure you have configured all required params +func (s *VPNService) NewUpdateVpnGatewayParams(id string) *UpdateVpnGatewayParams { + p := &UpdateVpnGatewayParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates site to site vpn local gateway +func (s *VPNService) UpdateVpnGateway(p *UpdateVpnGatewayParams) (*UpdateVpnGatewayResponse, error) { + resp, err := s.cs.newRequest("updateVpnGateway", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVpnGatewayResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateVpnGatewayResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Fordisplay bool `json:"fordisplay,omitempty"` + Id string `json:"id,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Removed string `json:"removed,omitempty"` + Vpcid string `json:"vpcid,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/VirtualMachineService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VirtualMachineService.go new file mode 100644 index 000000000..2265e0b91 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VirtualMachineService.go @@ -0,0 +1,5323 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type DeployVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *DeployVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["affinitygroupids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("affinitygroupids", vv) + } + if v, found := p.p["affinitygroupnames"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("affinitygroupnames", vv) + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["deploymentplanner"]; found { + u.Set("deploymentplanner", v.(string)) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["diskofferingid"]; found { + u.Set("diskofferingid", v.(string)) + } + if v, found := p.p["displayname"]; found { + u.Set("displayname", v.(string)) + } + if v, found := p.p["displayvm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayvm", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["group"]; found { + u.Set("group", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["ip6address"]; found { + u.Set("ip6address", v.(string)) + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["iptonetworklist"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("iptonetworklist[%d].key", i), k) + u.Set(fmt.Sprintf("iptonetworklist[%d].value", i), vv) + i++ + } + } + if v, found := p.p["keyboard"]; found { + u.Set("keyboard", v.(string)) + } + if v, found := p.p["keypair"]; found { + u.Set("keypair", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("networkids", vv) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["rootdisksize"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("rootdisksize", vv) + } + if v, found := p.p["securitygroupids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("securitygroupids", vv) + } + if v, found := p.p["securitygroupnames"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("securitygroupnames", vv) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + if v, found := p.p["size"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("size", vv) + } + if v, found := p.p["startvm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("startvm", vv) + } + if v, found := p.p["templateid"]; found { + u.Set("templateid", v.(string)) + } + if v, found := p.p["userdata"]; found { + u.Set("userdata", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *DeployVirtualMachineParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DeployVirtualMachineParams) SetAffinitygroupids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupids"] = v + return +} + +func (p *DeployVirtualMachineParams) SetAffinitygroupnames(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupnames"] = v + return +} + +func (p *DeployVirtualMachineParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *DeployVirtualMachineParams) SetDeploymentplanner(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["deploymentplanner"] = v + return +} + +func (p *DeployVirtualMachineParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *DeployVirtualMachineParams) SetDiskofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["diskofferingid"] = v + return +} + +func (p *DeployVirtualMachineParams) SetDisplayname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayname"] = v + return +} + +func (p *DeployVirtualMachineParams) SetDisplayvm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayvm"] = v + return +} + +func (p *DeployVirtualMachineParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DeployVirtualMachineParams) SetGroup(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["group"] = v + return +} + +func (p *DeployVirtualMachineParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *DeployVirtualMachineParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *DeployVirtualMachineParams) SetIp6address(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6address"] = v + return +} + +func (p *DeployVirtualMachineParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *DeployVirtualMachineParams) SetIptonetworklist(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["iptonetworklist"] = v + return +} + +func (p *DeployVirtualMachineParams) SetKeyboard(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyboard"] = v + return +} + +func (p *DeployVirtualMachineParams) SetKeypair(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keypair"] = v + return +} + +func (p *DeployVirtualMachineParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *DeployVirtualMachineParams) SetNetworkids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkids"] = v + return +} + +func (p *DeployVirtualMachineParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *DeployVirtualMachineParams) SetRootdisksize(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["rootdisksize"] = v + return +} + +func (p *DeployVirtualMachineParams) SetSecuritygroupids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupids"] = v + return +} + +func (p *DeployVirtualMachineParams) SetSecuritygroupnames(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupnames"] = v + return +} + +func (p *DeployVirtualMachineParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +func (p *DeployVirtualMachineParams) SetSize(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["size"] = v + return +} + +func (p *DeployVirtualMachineParams) SetStartvm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["startvm"] = v + return +} + +func (p *DeployVirtualMachineParams) SetTemplateid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templateid"] = v + return +} + +func (p *DeployVirtualMachineParams) SetUserdata(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userdata"] = v + return +} + +func (p *DeployVirtualMachineParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new DeployVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewDeployVirtualMachineParams(serviceofferingid string, templateid string, zoneid string) *DeployVirtualMachineParams { + p := &DeployVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["serviceofferingid"] = serviceofferingid + p.p["templateid"] = templateid + p.p["zoneid"] = zoneid + return p +} + +// Creates and automatically starts a virtual machine based on a service offering, disk offering, and template. +func (s *VirtualMachineService) DeployVirtualMachine(p *DeployVirtualMachineParams) (*DeployVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("deployVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeployVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DeployVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DestroyVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *DestroyVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["expunge"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("expunge", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DestroyVirtualMachineParams) SetExpunge(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["expunge"] = v + return +} + +func (p *DestroyVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DestroyVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewDestroyVirtualMachineParams(id string) *DestroyVirtualMachineParams { + p := &DestroyVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Destroys a virtual machine. Once destroyed, only the administrator can recover it. +func (s *VirtualMachineService) DestroyVirtualMachine(p *DestroyVirtualMachineParams) (*DestroyVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("destroyVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DestroyVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DestroyVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RebootVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *RebootVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RebootVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RebootVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewRebootVirtualMachineParams(id string) *RebootVirtualMachineParams { + p := &RebootVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Reboots a virtual machine. +func (s *VirtualMachineService) RebootVirtualMachine(p *RebootVirtualMachineParams) (*RebootVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("rebootVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RebootVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RebootVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type StartVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *StartVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["deploymentplanner"]; found { + u.Set("deploymentplanner", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StartVirtualMachineParams) SetDeploymentplanner(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["deploymentplanner"] = v + return +} + +func (p *StartVirtualMachineParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *StartVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StartVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewStartVirtualMachineParams(id string) *StartVirtualMachineParams { + p := &StartVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Starts a virtual machine. +func (s *VirtualMachineService) StartVirtualMachine(p *StartVirtualMachineParams) (*StartVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("startVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StartVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StartVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type StopVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *StopVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["forced"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forced", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *StopVirtualMachineParams) SetForced(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forced"] = v + return +} + +func (p *StopVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new StopVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewStopVirtualMachineParams(id string) *StopVirtualMachineParams { + p := &StopVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Stops a virtual machine. +func (s *VirtualMachineService) StopVirtualMachine(p *StopVirtualMachineParams) (*StopVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("stopVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r StopVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type StopVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ResetPasswordForVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *ResetPasswordForVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ResetPasswordForVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ResetPasswordForVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewResetPasswordForVirtualMachineParams(id string) *ResetPasswordForVirtualMachineParams { + p := &ResetPasswordForVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Resets the password for virtual machine. The virtual machine must be in a "Stopped" state and the template must already support this feature for this command to take effect. [async] +func (s *VirtualMachineService) ResetPasswordForVirtualMachine(p *ResetPasswordForVirtualMachineParams) (*ResetPasswordForVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("resetPasswordForVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ResetPasswordForVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ResetPasswordForVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *UpdateVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["displayname"]; found { + u.Set("displayname", v.(string)) + } + if v, found := p.p["displayvm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayvm", vv) + } + if v, found := p.p["group"]; found { + u.Set("group", v.(string)) + } + if v, found := p.p["haenable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("haenable", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isdynamicallyscalable"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isdynamicallyscalable", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["ostypeid"]; found { + u.Set("ostypeid", v.(string)) + } + if v, found := p.p["userdata"]; found { + u.Set("userdata", v.(string)) + } + return u +} + +func (p *UpdateVirtualMachineParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetDisplayname(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayname"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetDisplayvm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayvm"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetGroup(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["group"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetHaenable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["haenable"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetIsdynamicallyscalable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isdynamicallyscalable"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetOstypeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ostypeid"] = v + return +} + +func (p *UpdateVirtualMachineParams) SetUserdata(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["userdata"] = v + return +} + +// You should always use this function to get a new UpdateVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewUpdateVirtualMachineParams(id string) *UpdateVirtualMachineParams { + p := &UpdateVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates properties of a virtual machine. The VM has to be stopped and restarted for the new properties to take effect. UpdateVirtualMachine does not first check whether the VM is stopped. Therefore, stop the VM manually before issuing this call. +func (s *VirtualMachineService) UpdateVirtualMachine(p *UpdateVirtualMachineParams) (*UpdateVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("updateVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateVirtualMachineResponse struct { + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ListVirtualMachinesParams struct { + p map[string]interface{} +} + +func (p *ListVirtualMachinesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["affinitygroupid"]; found { + u.Set("affinitygroupid", v.(string)) + } + if v, found := p.p["details"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("details", vv) + } + if v, found := p.p["displayvm"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayvm", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["forvirtualnetwork"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("forvirtualnetwork", vv) + } + if v, found := p.p["groupid"]; found { + u.Set("groupid", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["hypervisor"]; found { + u.Set("hypervisor", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["ids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("ids", vv) + } + if v, found := p.p["isoid"]; found { + u.Set("isoid", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["templateid"]; found { + u.Set("templateid", v.(string)) + } + if v, found := p.p["vpcid"]; found { + u.Set("vpcid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListVirtualMachinesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVirtualMachinesParams) SetAffinitygroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetDetails(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *ListVirtualMachinesParams) SetDisplayvm(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayvm"] = v + return +} + +func (p *ListVirtualMachinesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetForvirtualnetwork(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["forvirtualnetwork"] = v + return +} + +func (p *ListVirtualMachinesParams) SetGroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["groupid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetHypervisor(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hypervisor"] = v + return +} + +func (p *ListVirtualMachinesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVirtualMachinesParams) SetIds(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ids"] = v + return +} + +func (p *ListVirtualMachinesParams) SetIsoid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isoid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVirtualMachinesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVirtualMachinesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVirtualMachinesParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListVirtualMachinesParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVirtualMachinesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVirtualMachinesParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *ListVirtualMachinesParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListVirtualMachinesParams) SetTemplateid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templateid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetVpcid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["vpcid"] = v + return +} + +func (p *ListVirtualMachinesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListVirtualMachinesParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewListVirtualMachinesParams() *ListVirtualMachinesParams { + p := &ListVirtualMachinesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VirtualMachineService) GetVirtualMachineID(name string) (string, error) { + p := &ListVirtualMachinesParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListVirtualMachines(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVirtualMachines(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.VirtualMachines[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.VirtualMachines { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VirtualMachineService) GetVirtualMachineByName(name string) (*VirtualMachine, int, error) { + id, err := s.GetVirtualMachineID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetVirtualMachineByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VirtualMachineService) GetVirtualMachineByID(id string) (*VirtualMachine, int, error) { + p := &ListVirtualMachinesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVirtualMachines(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVirtualMachines(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.VirtualMachines[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for VirtualMachine UUID: %s!", id) +} + +// List the virtual machines owned by the account. +func (s *VirtualMachineService) ListVirtualMachines(p *ListVirtualMachinesParams) (*ListVirtualMachinesResponse, error) { + resp, err := s.cs.newRequest("listVirtualMachines", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVirtualMachinesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVirtualMachinesResponse struct { + Count int `json:"count"` + VirtualMachines []*VirtualMachine `json:"virtualmachine"` +} + +type VirtualMachine struct { + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type GetVMPasswordParams struct { + p map[string]interface{} +} + +func (p *GetVMPasswordParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *GetVMPasswordParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new GetVMPasswordParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewGetVMPasswordParams(id string) *GetVMPasswordParams { + p := &GetVMPasswordParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Returns an encrypted password for the VM +func (s *VirtualMachineService) GetVMPassword(p *GetVMPasswordParams) (*GetVMPasswordResponse, error) { + resp, err := s.cs.newRequest("getVMPassword", p.toURLValues()) + if err != nil { + return nil, err + } + + var r GetVMPasswordResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type GetVMPasswordResponse struct { + Encryptedpassword string `json:"encryptedpassword,omitempty"` +} + +type RestoreVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *RestoreVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["templateid"]; found { + u.Set("templateid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *RestoreVirtualMachineParams) SetTemplateid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["templateid"] = v + return +} + +func (p *RestoreVirtualMachineParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new RestoreVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewRestoreVirtualMachineParams(virtualmachineid string) *RestoreVirtualMachineParams { + p := &RestoreVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Restore a VM to original template/ISO or new template/ISO +func (s *VirtualMachineService) RestoreVirtualMachine(p *RestoreVirtualMachineParams) (*RestoreVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("restoreVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RestoreVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RestoreVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ChangeServiceForVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *ChangeServiceForVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + return u +} + +func (p *ChangeServiceForVirtualMachineParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *ChangeServiceForVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ChangeServiceForVirtualMachineParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +// You should always use this function to get a new ChangeServiceForVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewChangeServiceForVirtualMachineParams(id string, serviceofferingid string) *ChangeServiceForVirtualMachineParams { + p := &ChangeServiceForVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["serviceofferingid"] = serviceofferingid + return p +} + +// Changes the service offering for a virtual machine. The virtual machine must be in a "Stopped" state for this command to take effect. +func (s *VirtualMachineService) ChangeServiceForVirtualMachine(p *ChangeServiceForVirtualMachineParams) (*ChangeServiceForVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("changeServiceForVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ChangeServiceForVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ChangeServiceForVirtualMachineResponse struct { + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ScaleVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *ScaleVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["serviceofferingid"]; found { + u.Set("serviceofferingid", v.(string)) + } + return u +} + +func (p *ScaleVirtualMachineParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *ScaleVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ScaleVirtualMachineParams) SetServiceofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["serviceofferingid"] = v + return +} + +// You should always use this function to get a new ScaleVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewScaleVirtualMachineParams(id string, serviceofferingid string) *ScaleVirtualMachineParams { + p := &ScaleVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["serviceofferingid"] = serviceofferingid + return p +} + +// Scales the virtual machine to a new service offering. +func (s *VirtualMachineService) ScaleVirtualMachine(p *ScaleVirtualMachineParams) (*ScaleVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("scaleVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ScaleVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ScaleVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type AssignVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *AssignVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["networkids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("networkids", vv) + } + if v, found := p.p["securitygroupids"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("securitygroupids", vv) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *AssignVirtualMachineParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *AssignVirtualMachineParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *AssignVirtualMachineParams) SetNetworkids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkids"] = v + return +} + +func (p *AssignVirtualMachineParams) SetSecuritygroupids(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupids"] = v + return +} + +func (p *AssignVirtualMachineParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new AssignVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewAssignVirtualMachineParams(account string, domainid string, virtualmachineid string) *AssignVirtualMachineParams { + p := &AssignVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["account"] = account + p.p["domainid"] = domainid + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Change ownership of a VM from one account to another. This API is available for Basic zones with security groups and Advanced zones with guest networks. A root administrator can reassign a VM from any account to any other account in any domain. A domain administrator can reassign a VM to any account in the same domain. +func (s *VirtualMachineService) AssignVirtualMachine(p *AssignVirtualMachineParams) (*AssignVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("assignVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AssignVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type AssignVirtualMachineResponse struct { + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type MigrateVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *MigrateVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *MigrateVirtualMachineParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *MigrateVirtualMachineParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +func (p *MigrateVirtualMachineParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new MigrateVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewMigrateVirtualMachineParams(virtualmachineid string) *MigrateVirtualMachineParams { + p := &MigrateVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Attempts Migration of a VM to a different host or Root volume of the vm to a different storage pool +func (s *VirtualMachineService) MigrateVirtualMachine(p *MigrateVirtualMachineParams) (*MigrateVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("migrateVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r MigrateVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type MigrateVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type MigrateVirtualMachineWithVolumeParams struct { + p map[string]interface{} +} + +func (p *MigrateVirtualMachineWithVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["migrateto"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("migrateto[%d].key", i), k) + u.Set(fmt.Sprintf("migrateto[%d].value", i), vv) + i++ + } + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *MigrateVirtualMachineWithVolumeParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *MigrateVirtualMachineWithVolumeParams) SetMigrateto(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["migrateto"] = v + return +} + +func (p *MigrateVirtualMachineWithVolumeParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new MigrateVirtualMachineWithVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewMigrateVirtualMachineWithVolumeParams(hostid string, virtualmachineid string) *MigrateVirtualMachineWithVolumeParams { + p := &MigrateVirtualMachineWithVolumeParams{} + p.p = make(map[string]interface{}) + p.p["hostid"] = hostid + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Attempts Migration of a VM with its volumes to a different host +func (s *VirtualMachineService) MigrateVirtualMachineWithVolume(p *MigrateVirtualMachineWithVolumeParams) (*MigrateVirtualMachineWithVolumeResponse, error) { + resp, err := s.cs.newRequest("migrateVirtualMachineWithVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r MigrateVirtualMachineWithVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type MigrateVirtualMachineWithVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RecoverVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *RecoverVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *RecoverVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new RecoverVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewRecoverVirtualMachineParams(id string) *RecoverVirtualMachineParams { + p := &RecoverVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Recovers a virtual machine. +func (s *VirtualMachineService) RecoverVirtualMachine(p *RecoverVirtualMachineParams) (*RecoverVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("recoverVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RecoverVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type RecoverVirtualMachineResponse struct { + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ExpungeVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *ExpungeVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *ExpungeVirtualMachineParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new ExpungeVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewExpungeVirtualMachineParams(id string) *ExpungeVirtualMachineParams { + p := &ExpungeVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Expunge a virtual machine. Once expunged, it cannot be recoverd. +func (s *VirtualMachineService) ExpungeVirtualMachine(p *ExpungeVirtualMachineParams) (*ExpungeVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("expungeVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ExpungeVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ExpungeVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type CleanVMReservationsParams struct { + p map[string]interface{} +} + +func (p *CleanVMReservationsParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + return u +} + +// You should always use this function to get a new CleanVMReservationsParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewCleanVMReservationsParams() *CleanVMReservationsParams { + p := &CleanVMReservationsParams{} + p.p = make(map[string]interface{}) + return p +} + +// Cleanups VM reservations in the database. +func (s *VirtualMachineService) CleanVMReservations(p *CleanVMReservationsParams) (*CleanVMReservationsResponse, error) { + resp, err := s.cs.newRequest("cleanVMReservations", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CleanVMReservationsResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CleanVMReservationsResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type AddNicToVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *AddNicToVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["ipaddress"]; found { + u.Set("ipaddress", v.(string)) + } + if v, found := p.p["networkid"]; found { + u.Set("networkid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *AddNicToVirtualMachineParams) SetIpaddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ipaddress"] = v + return +} + +func (p *AddNicToVirtualMachineParams) SetNetworkid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networkid"] = v + return +} + +func (p *AddNicToVirtualMachineParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new AddNicToVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewAddNicToVirtualMachineParams(networkid string, virtualmachineid string) *AddNicToVirtualMachineParams { + p := &AddNicToVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["networkid"] = networkid + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Adds VM to specified network by creating a NIC +func (s *VirtualMachineService) AddNicToVirtualMachine(p *AddNicToVirtualMachineParams) (*AddNicToVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("addNicToVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AddNicToVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AddNicToVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type RemoveNicFromVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *RemoveNicFromVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["nicid"]; found { + u.Set("nicid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *RemoveNicFromVirtualMachineParams) SetNicid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nicid"] = v + return +} + +func (p *RemoveNicFromVirtualMachineParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new RemoveNicFromVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewRemoveNicFromVirtualMachineParams(nicid string, virtualmachineid string) *RemoveNicFromVirtualMachineParams { + p := &RemoveNicFromVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["nicid"] = nicid + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Removes VM from specified network by deleting a NIC +func (s *VirtualMachineService) RemoveNicFromVirtualMachine(p *RemoveNicFromVirtualMachineParams) (*RemoveNicFromVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("removeNicFromVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r RemoveNicFromVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type RemoveNicFromVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateDefaultNicForVirtualMachineParams struct { + p map[string]interface{} +} + +func (p *UpdateDefaultNicForVirtualMachineParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["nicid"]; found { + u.Set("nicid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *UpdateDefaultNicForVirtualMachineParams) SetNicid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["nicid"] = v + return +} + +func (p *UpdateDefaultNicForVirtualMachineParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new UpdateDefaultNicForVirtualMachineParams instance, +// as then you are sure you have configured all required params +func (s *VirtualMachineService) NewUpdateDefaultNicForVirtualMachineParams(nicid string, virtualmachineid string) *UpdateDefaultNicForVirtualMachineParams { + p := &UpdateDefaultNicForVirtualMachineParams{} + p.p = make(map[string]interface{}) + p.p["nicid"] = nicid + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Changes the default NIC on a VM +func (s *VirtualMachineService) UpdateDefaultNicForVirtualMachine(p *UpdateDefaultNicForVirtualMachineParams) (*UpdateDefaultNicForVirtualMachineResponse, error) { + resp, err := s.cs.newRequest("updateDefaultNicForVirtualMachine", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateDefaultNicForVirtualMachineResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateDefaultNicForVirtualMachineResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Affinitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` + } `json:"affinitygroup,omitempty"` + Cpunumber int `json:"cpunumber,omitempty"` + Cpuspeed int `json:"cpuspeed,omitempty"` + Cpuused string `json:"cpuused,omitempty"` + Created string `json:"created,omitempty"` + Details map[string]string `json:"details,omitempty"` + Diskioread int64 `json:"diskioread,omitempty"` + Diskiowrite int64 `json:"diskiowrite,omitempty"` + Diskkbsread int64 `json:"diskkbsread,omitempty"` + Diskkbswrite int64 `json:"diskkbswrite,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayname string `json:"displayname,omitempty"` + Displayvm bool `json:"displayvm,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` + Group string `json:"group,omitempty"` + Groupid string `json:"groupid,omitempty"` + Guestosid string `json:"guestosid,omitempty"` + Haenable bool `json:"haenable,omitempty"` + Hostid string `json:"hostid,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Instancename string `json:"instancename,omitempty"` + Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Keypair string `json:"keypair,omitempty"` + Memory int `json:"memory,omitempty"` + Name string `json:"name,omitempty"` + Networkkbsread int64 `json:"networkkbsread,omitempty"` + Networkkbswrite int64 `json:"networkkbswrite,omitempty"` + Nic []struct { + Broadcasturi string `json:"broadcasturi,omitempty"` + Deviceid string `json:"deviceid,omitempty"` + Gateway string `json:"gateway,omitempty"` + Id string `json:"id,omitempty"` + Ip6address string `json:"ip6address,omitempty"` + Ip6cidr string `json:"ip6cidr,omitempty"` + Ip6gateway string `json:"ip6gateway,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + Isdefault bool `json:"isdefault,omitempty"` + Isolationuri string `json:"isolationuri,omitempty"` + Macaddress string `json:"macaddress,omitempty"` + Netmask string `json:"netmask,omitempty"` + Networkid string `json:"networkid,omitempty"` + Networkname string `json:"networkname,omitempty"` + Secondaryip []struct { + Id string `json:"id,omitempty"` + Ipaddress string `json:"ipaddress,omitempty"` + } `json:"secondaryip,omitempty"` + Traffictype string `json:"traffictype,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + } `json:"nic,omitempty"` + Ostypeid int64 `json:"ostypeid,omitempty"` + Password string `json:"password,omitempty"` + Passwordenabled bool `json:"passwordenabled,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Publicip string `json:"publicip,omitempty"` + Publicipid string `json:"publicipid,omitempty"` + Rootdeviceid int64 `json:"rootdeviceid,omitempty"` + Rootdevicetype string `json:"rootdevicetype,omitempty"` + Securitygroup []struct { + Account string `json:"account,omitempty"` + Description string `json:"description,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Egressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"egressrule,omitempty"` + Id string `json:"id,omitempty"` + Ingressrule []struct { + Account string `json:"account,omitempty"` + Cidr string `json:"cidr,omitempty"` + Endport int `json:"endport,omitempty"` + Icmpcode int `json:"icmpcode,omitempty"` + Icmptype int `json:"icmptype,omitempty"` + Protocol string `json:"protocol,omitempty"` + Ruleid string `json:"ruleid,omitempty"` + Securitygroupname string `json:"securitygroupname,omitempty"` + Startport int `json:"startport,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"ingressrule,omitempty"` + Name string `json:"name,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + } `json:"securitygroup,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Servicestate string `json:"servicestate,omitempty"` + State string `json:"state,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Vgpu string `json:"vgpu,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/VolumeService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VolumeService.go new file mode 100644 index 000000000..eb4a7c460 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/VolumeService.go @@ -0,0 +1,1939 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type AttachVolumeParams struct { + p map[string]interface{} +} + +func (p *AttachVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["deviceid"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("deviceid", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *AttachVolumeParams) SetDeviceid(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["deviceid"] = v + return +} + +func (p *AttachVolumeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *AttachVolumeParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new AttachVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewAttachVolumeParams(id string, virtualmachineid string) *AttachVolumeParams { + p := &AttachVolumeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["virtualmachineid"] = virtualmachineid + return p +} + +// Attaches a disk volume to a virtual machine. +func (s *VolumeService) AttachVolume(p *AttachVolumeParams) (*AttachVolumeResponse, error) { + resp, err := s.cs.newRequest("attachVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r AttachVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type AttachVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UploadVolumeParams struct { + p map[string]interface{} +} + +func (p *UploadVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["checksum"]; found { + u.Set("checksum", v.(string)) + } + if v, found := p.p["diskofferingid"]; found { + u.Set("diskofferingid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["format"]; found { + u.Set("format", v.(string)) + } + if v, found := p.p["imagestoreuuid"]; found { + u.Set("imagestoreuuid", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *UploadVolumeParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *UploadVolumeParams) SetChecksum(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["checksum"] = v + return +} + +func (p *UploadVolumeParams) SetDiskofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["diskofferingid"] = v + return +} + +func (p *UploadVolumeParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *UploadVolumeParams) SetFormat(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["format"] = v + return +} + +func (p *UploadVolumeParams) SetImagestoreuuid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["imagestoreuuid"] = v + return +} + +func (p *UploadVolumeParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *UploadVolumeParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *UploadVolumeParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *UploadVolumeParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new UploadVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewUploadVolumeParams(format string, name string, url string, zoneid string) *UploadVolumeParams { + p := &UploadVolumeParams{} + p.p = make(map[string]interface{}) + p.p["format"] = format + p.p["name"] = name + p.p["url"] = url + p.p["zoneid"] = zoneid + return p +} + +// Uploads a data disk. +func (s *VolumeService) UploadVolume(p *UploadVolumeParams) (*UploadVolumeResponse, error) { + resp, err := s.cs.newRequest("uploadVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UploadVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UploadVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DetachVolumeParams struct { + p map[string]interface{} +} + +func (p *DetachVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["deviceid"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("deviceid", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + return u +} + +func (p *DetachVolumeParams) SetDeviceid(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["deviceid"] = v + return +} + +func (p *DetachVolumeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *DetachVolumeParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +// You should always use this function to get a new DetachVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewDetachVolumeParams() *DetachVolumeParams { + p := &DetachVolumeParams{} + p.p = make(map[string]interface{}) + return p +} + +// Detaches a disk volume from a virtual machine. +func (s *VolumeService) DetachVolume(p *DetachVolumeParams) (*DetachVolumeResponse, error) { + resp, err := s.cs.newRequest("detachVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DetachVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DetachVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type CreateVolumeParams struct { + p map[string]interface{} +} + +func (p *CreateVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["diskofferingid"]; found { + u.Set("diskofferingid", v.(string)) + } + if v, found := p.p["displayvolume"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayvolume", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["maxiops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("maxiops", vv) + } + if v, found := p.p["miniops"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("miniops", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["size"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("size", vv) + } + if v, found := p.p["snapshotid"]; found { + u.Set("snapshotid", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *CreateVolumeParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *CreateVolumeParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *CreateVolumeParams) SetDiskofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["diskofferingid"] = v + return +} + +func (p *CreateVolumeParams) SetDisplayvolume(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayvolume"] = v + return +} + +func (p *CreateVolumeParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateVolumeParams) SetMaxiops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["maxiops"] = v + return +} + +func (p *CreateVolumeParams) SetMiniops(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["miniops"] = v + return +} + +func (p *CreateVolumeParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateVolumeParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *CreateVolumeParams) SetSize(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["size"] = v + return +} + +func (p *CreateVolumeParams) SetSnapshotid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["snapshotid"] = v + return +} + +func (p *CreateVolumeParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *CreateVolumeParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new CreateVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewCreateVolumeParams(name string) *CreateVolumeParams { + p := &CreateVolumeParams{} + p.p = make(map[string]interface{}) + p.p["name"] = name + return p +} + +// Creates a disk volume from a disk offering. This disk volume must still be attached to a virtual machine to make use of it. +func (s *VolumeService) CreateVolume(p *CreateVolumeParams) (*CreateVolumeResponse, error) { + resp, err := s.cs.newRequest("createVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type CreateVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type DeleteVolumeParams struct { + p map[string]interface{} +} + +func (p *DeleteVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteVolumeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewDeleteVolumeParams(id string) *DeleteVolumeParams { + p := &DeleteVolumeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a detached disk volume. +func (s *VolumeService) DeleteVolume(p *DeleteVolumeParams) (*DeleteVolumeResponse, error) { + resp, err := s.cs.newRequest("deleteVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteVolumeResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListVolumesParams struct { + p map[string]interface{} +} + +func (p *ListVolumesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["diskofferingid"]; found { + u.Set("diskofferingid", v.(string)) + } + if v, found := p.p["displayvolume"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayvolume", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["hostid"]; found { + u.Set("hostid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["isrecursive"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("isrecursive", vv) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["listall"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("listall", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["podid"]; found { + u.Set("podid", v.(string)) + } + if v, found := p.p["projectid"]; found { + u.Set("projectid", v.(string)) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + if v, found := p.p["type"]; found { + u.Set("type", v.(string)) + } + if v, found := p.p["virtualmachineid"]; found { + u.Set("virtualmachineid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListVolumesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListVolumesParams) SetDiskofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["diskofferingid"] = v + return +} + +func (p *ListVolumesParams) SetDisplayvolume(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayvolume"] = v + return +} + +func (p *ListVolumesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListVolumesParams) SetHostid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["hostid"] = v + return +} + +func (p *ListVolumesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListVolumesParams) SetIsrecursive(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["isrecursive"] = v + return +} + +func (p *ListVolumesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListVolumesParams) SetListall(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["listall"] = v + return +} + +func (p *ListVolumesParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListVolumesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListVolumesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListVolumesParams) SetPodid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["podid"] = v + return +} + +func (p *ListVolumesParams) SetProjectid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["projectid"] = v + return +} + +func (p *ListVolumesParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +func (p *ListVolumesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +func (p *ListVolumesParams) SetType(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["volumeType"] = v + return +} + +func (p *ListVolumesParams) SetVirtualmachineid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["virtualmachineid"] = v + return +} + +func (p *ListVolumesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListVolumesParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewListVolumesParams() *ListVolumesParams { + p := &ListVolumesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VolumeService) GetVolumeID(name string) (string, error) { + p := &ListVolumesParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListVolumes(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVolumes(p) + if err != nil { + return "", err + } + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Volumes[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Volumes { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VolumeService) GetVolumeByName(name string) (*Volume, int, error) { + id, err := s.GetVolumeID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetVolumeByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *VolumeService) GetVolumeByID(id string) (*Volume, int, error) { + p := &ListVolumesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListVolumes(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + // If no matches, search all projects + p.p["projectid"] = "-1" + + l, err = s.ListVolumes(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Volumes[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Volume UUID: %s!", id) +} + +// Lists all volumes. +func (s *VolumeService) ListVolumes(p *ListVolumesParams) (*ListVolumesResponse, error) { + resp, err := s.cs.newRequest("listVolumes", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListVolumesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListVolumesResponse struct { + Count int `json:"count"` + Volumes []*Volume `json:"volume"` +} + +type Volume struct { + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ExtractVolumeParams struct { + p map[string]interface{} +} + +func (p *ExtractVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["mode"]; found { + u.Set("mode", v.(string)) + } + if v, found := p.p["url"]; found { + u.Set("url", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ExtractVolumeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ExtractVolumeParams) SetMode(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["mode"] = v + return +} + +func (p *ExtractVolumeParams) SetUrl(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["url"] = v + return +} + +func (p *ExtractVolumeParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ExtractVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewExtractVolumeParams(id string, mode string, zoneid string) *ExtractVolumeParams { + p := &ExtractVolumeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + p.p["mode"] = mode + p.p["zoneid"] = zoneid + return p +} + +// Extracts volume +func (s *VolumeService) ExtractVolume(p *ExtractVolumeParams) (*ExtractVolumeResponse, error) { + resp, err := s.cs.newRequest("extractVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ExtractVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ExtractVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Accountid string `json:"accountid,omitempty"` + Created string `json:"created,omitempty"` + ExtractId string `json:"extractId,omitempty"` + ExtractMode string `json:"extractMode,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Resultstring string `json:"resultstring,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Uploadpercentage int `json:"uploadpercentage,omitempty"` + Url string `json:"url,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type MigrateVolumeParams struct { + p map[string]interface{} +} + +func (p *MigrateVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["livemigrate"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("livemigrate", vv) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + if v, found := p.p["volumeid"]; found { + u.Set("volumeid", v.(string)) + } + return u +} + +func (p *MigrateVolumeParams) SetLivemigrate(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["livemigrate"] = v + return +} + +func (p *MigrateVolumeParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +func (p *MigrateVolumeParams) SetVolumeid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["volumeid"] = v + return +} + +// You should always use this function to get a new MigrateVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewMigrateVolumeParams(storageid string, volumeid string) *MigrateVolumeParams { + p := &MigrateVolumeParams{} + p.p = make(map[string]interface{}) + p.p["storageid"] = storageid + p.p["volumeid"] = volumeid + return p +} + +// Migrate volume +func (s *VolumeService) MigrateVolume(p *MigrateVolumeParams) (*MigrateVolumeResponse, error) { + resp, err := s.cs.newRequest("migrateVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r MigrateVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type MigrateVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ResizeVolumeParams struct { + p map[string]interface{} +} + +func (p *ResizeVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["diskofferingid"]; found { + u.Set("diskofferingid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["shrinkok"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("shrinkok", vv) + } + if v, found := p.p["size"]; found { + vv := strconv.FormatInt(v.(int64), 10) + u.Set("size", vv) + } + return u +} + +func (p *ResizeVolumeParams) SetDiskofferingid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["diskofferingid"] = v + return +} + +func (p *ResizeVolumeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ResizeVolumeParams) SetShrinkok(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["shrinkok"] = v + return +} + +func (p *ResizeVolumeParams) SetSize(v int64) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["size"] = v + return +} + +// You should always use this function to get a new ResizeVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewResizeVolumeParams(id string) *ResizeVolumeParams { + p := &ResizeVolumeParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Resizes a volume +func (s *VolumeService) ResizeVolume(p *ResizeVolumeParams) (*ResizeVolumeResponse, error) { + resp, err := s.cs.newRequest("resizeVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ResizeVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ResizeVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type UpdateVolumeParams struct { + p map[string]interface{} +} + +func (p *UpdateVolumeParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["chaininfo"]; found { + u.Set("chaininfo", v.(string)) + } + if v, found := p.p["customid"]; found { + u.Set("customid", v.(string)) + } + if v, found := p.p["displayvolume"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("displayvolume", vv) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["path"]; found { + u.Set("path", v.(string)) + } + if v, found := p.p["state"]; found { + u.Set("state", v.(string)) + } + if v, found := p.p["storageid"]; found { + u.Set("storageid", v.(string)) + } + return u +} + +func (p *UpdateVolumeParams) SetChaininfo(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["chaininfo"] = v + return +} + +func (p *UpdateVolumeParams) SetCustomid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["customid"] = v + return +} + +func (p *UpdateVolumeParams) SetDisplayvolume(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["displayvolume"] = v + return +} + +func (p *UpdateVolumeParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateVolumeParams) SetPath(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["path"] = v + return +} + +func (p *UpdateVolumeParams) SetState(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["state"] = v + return +} + +func (p *UpdateVolumeParams) SetStorageid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["storageid"] = v + return +} + +// You should always use this function to get a new UpdateVolumeParams instance, +// as then you are sure you have configured all required params +func (s *VolumeService) NewUpdateVolumeParams() *UpdateVolumeParams { + p := &UpdateVolumeParams{} + p.p = make(map[string]interface{}) + return p +} + +// Updates the volume. +func (s *VolumeService) UpdateVolume(p *UpdateVolumeParams) (*UpdateVolumeResponse, error) { + resp, err := s.cs.newRequest("updateVolume", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateVolumeResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type UpdateVolumeResponse struct { + JobID string `json:"jobid,omitempty"` + Account string `json:"account,omitempty"` + Attached string `json:"attached,omitempty"` + Chaininfo string `json:"chaininfo,omitempty"` + Created string `json:"created,omitempty"` + Destroyed bool `json:"destroyed,omitempty"` + Deviceid int64 `json:"deviceid,omitempty"` + DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty"` + DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty"` + DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty"` + DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty"` + Diskofferingdisplaytext string `json:"diskofferingdisplaytext,omitempty"` + Diskofferingid string `json:"diskofferingid,omitempty"` + Diskofferingname string `json:"diskofferingname,omitempty"` + Displayvolume bool `json:"displayvolume,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Hypervisor string `json:"hypervisor,omitempty"` + Id string `json:"id,omitempty"` + Isextractable bool `json:"isextractable,omitempty"` + Isodisplaytext string `json:"isodisplaytext,omitempty"` + Isoid string `json:"isoid,omitempty"` + Isoname string `json:"isoname,omitempty"` + Maxiops int64 `json:"maxiops,omitempty"` + Miniops int64 `json:"miniops,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Quiescevm bool `json:"quiescevm,omitempty"` + Serviceofferingdisplaytext string `json:"serviceofferingdisplaytext,omitempty"` + Serviceofferingid string `json:"serviceofferingid,omitempty"` + Serviceofferingname string `json:"serviceofferingname,omitempty"` + Size int64 `json:"size,omitempty"` + Snapshotid string `json:"snapshotid,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Storage string `json:"storage,omitempty"` + Storageid string `json:"storageid,omitempty"` + Storagetype string `json:"storagetype,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Templatedisplaytext string `json:"templatedisplaytext,omitempty"` + Templateid string `json:"templateid,omitempty"` + Templatename string `json:"templatename,omitempty"` + Type string `json:"type,omitempty"` + Virtualmachineid string `json:"virtualmachineid,omitempty"` + Vmdisplayname string `json:"vmdisplayname,omitempty"` + Vmname string `json:"vmname,omitempty"` + Vmstate string `json:"vmstate,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/ZoneService.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ZoneService.go new file mode 100644 index 000000000..56894dbdf --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/ZoneService.go @@ -0,0 +1,1158 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" +) + +type CreateZoneParams struct { + p map[string]interface{} +} + +func (p *CreateZoneParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["dns1"]; found { + u.Set("dns1", v.(string)) + } + if v, found := p.p["dns2"]; found { + u.Set("dns2", v.(string)) + } + if v, found := p.p["domain"]; found { + u.Set("domain", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["guestcidraddress"]; found { + u.Set("guestcidraddress", v.(string)) + } + if v, found := p.p["internaldns1"]; found { + u.Set("internaldns1", v.(string)) + } + if v, found := p.p["internaldns2"]; found { + u.Set("internaldns2", v.(string)) + } + if v, found := p.p["ip6dns1"]; found { + u.Set("ip6dns1", v.(string)) + } + if v, found := p.p["ip6dns2"]; found { + u.Set("ip6dns2", v.(string)) + } + if v, found := p.p["localstorageenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("localstorageenabled", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networktype"]; found { + u.Set("networktype", v.(string)) + } + if v, found := p.p["securitygroupenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("securitygroupenabled", vv) + } + return u +} + +func (p *CreateZoneParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *CreateZoneParams) SetDns1(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dns1"] = v + return +} + +func (p *CreateZoneParams) SetDns2(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dns2"] = v + return +} + +func (p *CreateZoneParams) SetDomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domain"] = v + return +} + +func (p *CreateZoneParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *CreateZoneParams) SetGuestcidraddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestcidraddress"] = v + return +} + +func (p *CreateZoneParams) SetInternaldns1(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["internaldns1"] = v + return +} + +func (p *CreateZoneParams) SetInternaldns2(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["internaldns2"] = v + return +} + +func (p *CreateZoneParams) SetIp6dns1(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6dns1"] = v + return +} + +func (p *CreateZoneParams) SetIp6dns2(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6dns2"] = v + return +} + +func (p *CreateZoneParams) SetLocalstorageenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["localstorageenabled"] = v + return +} + +func (p *CreateZoneParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *CreateZoneParams) SetNetworktype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networktype"] = v + return +} + +func (p *CreateZoneParams) SetSecuritygroupenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["securitygroupenabled"] = v + return +} + +// You should always use this function to get a new CreateZoneParams instance, +// as then you are sure you have configured all required params +func (s *ZoneService) NewCreateZoneParams(dns1 string, internaldns1 string, name string, networktype string) *CreateZoneParams { + p := &CreateZoneParams{} + p.p = make(map[string]interface{}) + p.p["dns1"] = dns1 + p.p["internaldns1"] = internaldns1 + p.p["name"] = name + p.p["networktype"] = networktype + return p +} + +// Creates a Zone. +func (s *ZoneService) CreateZone(p *CreateZoneParams) (*CreateZoneResponse, error) { + resp, err := s.cs.newRequest("createZone", p.toURLValues()) + if err != nil { + return nil, err + } + + var r CreateZoneResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type CreateZoneResponse struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Description string `json:"description,omitempty"` + Dhcpprovider string `json:"dhcpprovider,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Domainname string `json:"domainname,omitempty"` + Guestcidraddress string `json:"guestcidraddress,omitempty"` + Id string `json:"id,omitempty"` + Internaldns1 string `json:"internaldns1,omitempty"` + Internaldns2 string `json:"internaldns2,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Localstorageenabled bool `json:"localstorageenabled,omitempty"` + Name string `json:"name,omitempty"` + Networktype string `json:"networktype,omitempty"` + Resourcedetails map[string]string `json:"resourcedetails,omitempty"` + Securitygroupsenabled bool `json:"securitygroupsenabled,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zonetoken string `json:"zonetoken,omitempty"` +} + +type UpdateZoneParams struct { + p map[string]interface{} +} + +func (p *UpdateZoneParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["allocationstate"]; found { + u.Set("allocationstate", v.(string)) + } + if v, found := p.p["details"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("details[%d].key", i), k) + u.Set(fmt.Sprintf("details[%d].value", i), vv) + i++ + } + } + if v, found := p.p["dhcpprovider"]; found { + u.Set("dhcpprovider", v.(string)) + } + if v, found := p.p["dns1"]; found { + u.Set("dns1", v.(string)) + } + if v, found := p.p["dns2"]; found { + u.Set("dns2", v.(string)) + } + if v, found := p.p["dnssearchorder"]; found { + vv := strings.Join(v.([]string), ",") + u.Set("dnssearchorder", vv) + } + if v, found := p.p["domain"]; found { + u.Set("domain", v.(string)) + } + if v, found := p.p["guestcidraddress"]; found { + u.Set("guestcidraddress", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["internaldns1"]; found { + u.Set("internaldns1", v.(string)) + } + if v, found := p.p["internaldns2"]; found { + u.Set("internaldns2", v.(string)) + } + if v, found := p.p["ip6dns1"]; found { + u.Set("ip6dns1", v.(string)) + } + if v, found := p.p["ip6dns2"]; found { + u.Set("ip6dns2", v.(string)) + } + if v, found := p.p["ispublic"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("ispublic", vv) + } + if v, found := p.p["localstorageenabled"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("localstorageenabled", vv) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + return u +} + +func (p *UpdateZoneParams) SetAllocationstate(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["allocationstate"] = v + return +} + +func (p *UpdateZoneParams) SetDetails(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["details"] = v + return +} + +func (p *UpdateZoneParams) SetDhcpprovider(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dhcpprovider"] = v + return +} + +func (p *UpdateZoneParams) SetDns1(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dns1"] = v + return +} + +func (p *UpdateZoneParams) SetDns2(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dns2"] = v + return +} + +func (p *UpdateZoneParams) SetDnssearchorder(v []string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["dnssearchorder"] = v + return +} + +func (p *UpdateZoneParams) SetDomain(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domain"] = v + return +} + +func (p *UpdateZoneParams) SetGuestcidraddress(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["guestcidraddress"] = v + return +} + +func (p *UpdateZoneParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *UpdateZoneParams) SetInternaldns1(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["internaldns1"] = v + return +} + +func (p *UpdateZoneParams) SetInternaldns2(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["internaldns2"] = v + return +} + +func (p *UpdateZoneParams) SetIp6dns1(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6dns1"] = v + return +} + +func (p *UpdateZoneParams) SetIp6dns2(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ip6dns2"] = v + return +} + +func (p *UpdateZoneParams) SetIspublic(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["ispublic"] = v + return +} + +func (p *UpdateZoneParams) SetLocalstorageenabled(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["localstorageenabled"] = v + return +} + +func (p *UpdateZoneParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +// You should always use this function to get a new UpdateZoneParams instance, +// as then you are sure you have configured all required params +func (s *ZoneService) NewUpdateZoneParams(id string) *UpdateZoneParams { + p := &UpdateZoneParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Updates a Zone. +func (s *ZoneService) UpdateZone(p *UpdateZoneParams) (*UpdateZoneResponse, error) { + resp, err := s.cs.newRequest("updateZone", p.toURLValues()) + if err != nil { + return nil, err + } + + var r UpdateZoneResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type UpdateZoneResponse struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Description string `json:"description,omitempty"` + Dhcpprovider string `json:"dhcpprovider,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Domainname string `json:"domainname,omitempty"` + Guestcidraddress string `json:"guestcidraddress,omitempty"` + Id string `json:"id,omitempty"` + Internaldns1 string `json:"internaldns1,omitempty"` + Internaldns2 string `json:"internaldns2,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Localstorageenabled bool `json:"localstorageenabled,omitempty"` + Name string `json:"name,omitempty"` + Networktype string `json:"networktype,omitempty"` + Resourcedetails map[string]string `json:"resourcedetails,omitempty"` + Securitygroupsenabled bool `json:"securitygroupsenabled,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zonetoken string `json:"zonetoken,omitempty"` +} + +type DeleteZoneParams struct { + p map[string]interface{} +} + +func (p *DeleteZoneParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + return u +} + +func (p *DeleteZoneParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +// You should always use this function to get a new DeleteZoneParams instance, +// as then you are sure you have configured all required params +func (s *ZoneService) NewDeleteZoneParams(id string) *DeleteZoneParams { + p := &DeleteZoneParams{} + p.p = make(map[string]interface{}) + p.p["id"] = id + return p +} + +// Deletes a Zone. +func (s *ZoneService) DeleteZone(p *DeleteZoneParams) (*DeleteZoneResponse, error) { + resp, err := s.cs.newRequest("deleteZone", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DeleteZoneResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type DeleteZoneResponse struct { + Displaytext string `json:"displaytext,omitempty"` + Success string `json:"success,omitempty"` +} + +type ListZonesParams struct { + p map[string]interface{} +} + +func (p *ListZonesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["available"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("available", vv) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["id"]; found { + u.Set("id", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["name"]; found { + u.Set("name", v.(string)) + } + if v, found := p.p["networktype"]; found { + u.Set("networktype", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["showcapacities"]; found { + vv := strconv.FormatBool(v.(bool)) + u.Set("showcapacities", vv) + } + if v, found := p.p["tags"]; found { + i := 0 + for k, vv := range v.(map[string]string) { + u.Set(fmt.Sprintf("tags[%d].key", i), k) + u.Set(fmt.Sprintf("tags[%d].value", i), vv) + i++ + } + } + return u +} + +func (p *ListZonesParams) SetAvailable(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["available"] = v + return +} + +func (p *ListZonesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListZonesParams) SetId(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["id"] = v + return +} + +func (p *ListZonesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListZonesParams) SetName(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["name"] = v + return +} + +func (p *ListZonesParams) SetNetworktype(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["networktype"] = v + return +} + +func (p *ListZonesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListZonesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListZonesParams) SetShowcapacities(v bool) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["showcapacities"] = v + return +} + +func (p *ListZonesParams) SetTags(v map[string]string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["tags"] = v + return +} + +// You should always use this function to get a new ListZonesParams instance, +// as then you are sure you have configured all required params +func (s *ZoneService) NewListZonesParams() *ListZonesParams { + p := &ListZonesParams{} + p.p = make(map[string]interface{}) + return p +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ZoneService) GetZoneID(name string) (string, error) { + p := &ListZonesParams{} + p.p = make(map[string]interface{}) + + p.p["name"] = name + + l, err := s.ListZones(p) + if err != nil { + return "", err + } + + if l.Count == 0 { + return "", fmt.Errorf("No match found for %s: %+v", name, l) + } + + if l.Count == 1 { + return l.Zones[0].Id, nil + } + + if l.Count > 1 { + for _, v := range l.Zones { + if v.Name == name { + return v.Id, nil + } + } + } + return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l) +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ZoneService) GetZoneByName(name string) (*Zone, int, error) { + id, err := s.GetZoneID(name) + if err != nil { + return nil, -1, err + } + + r, count, err := s.GetZoneByID(id) + if err != nil { + return nil, count, err + } + return r, count, nil +} + +// This is a courtesy helper function, which in some cases may not work as expected! +func (s *ZoneService) GetZoneByID(id string) (*Zone, int, error) { + p := &ListZonesParams{} + p.p = make(map[string]interface{}) + + p.p["id"] = id + + l, err := s.ListZones(p) + if err != nil { + if strings.Contains(err.Error(), fmt.Sprintf( + "Invalid parameter id value=%s due to incorrect long value format, "+ + "or entity does not exist", id)) { + return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) + } + return nil, -1, err + } + + if l.Count == 0 { + return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) + } + + if l.Count == 1 { + return l.Zones[0], l.Count, nil + } + return nil, l.Count, fmt.Errorf("There is more then one result for Zone UUID: %s!", id) +} + +// Lists zones +func (s *ZoneService) ListZones(p *ListZonesParams) (*ListZonesResponse, error) { + resp, err := s.cs.newRequest("listZones", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListZonesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListZonesResponse struct { + Count int `json:"count"` + Zones []*Zone `json:"zone"` +} + +type Zone struct { + Allocationstate string `json:"allocationstate,omitempty"` + Capacity []struct { + Capacitytotal int64 `json:"capacitytotal,omitempty"` + Capacityused int64 `json:"capacityused,omitempty"` + Clusterid string `json:"clusterid,omitempty"` + Clustername string `json:"clustername,omitempty"` + Percentused string `json:"percentused,omitempty"` + Podid string `json:"podid,omitempty"` + Podname string `json:"podname,omitempty"` + Type int `json:"type,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` + } `json:"capacity,omitempty"` + Description string `json:"description,omitempty"` + Dhcpprovider string `json:"dhcpprovider,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Dns1 string `json:"dns1,omitempty"` + Dns2 string `json:"dns2,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Domainname string `json:"domainname,omitempty"` + Guestcidraddress string `json:"guestcidraddress,omitempty"` + Id string `json:"id,omitempty"` + Internaldns1 string `json:"internaldns1,omitempty"` + Internaldns2 string `json:"internaldns2,omitempty"` + Ip6dns1 string `json:"ip6dns1,omitempty"` + Ip6dns2 string `json:"ip6dns2,omitempty"` + Localstorageenabled bool `json:"localstorageenabled,omitempty"` + Name string `json:"name,omitempty"` + Networktype string `json:"networktype,omitempty"` + Resourcedetails map[string]string `json:"resourcedetails,omitempty"` + Securitygroupsenabled bool `json:"securitygroupsenabled,omitempty"` + Tags []struct { + Account string `json:"account,omitempty"` + Customer string `json:"customer,omitempty"` + Domain string `json:"domain,omitempty"` + Domainid string `json:"domainid,omitempty"` + Key string `json:"key,omitempty"` + Project string `json:"project,omitempty"` + Projectid string `json:"projectid,omitempty"` + Resourceid string `json:"resourceid,omitempty"` + Resourcetype string `json:"resourcetype,omitempty"` + Value string `json:"value,omitempty"` + } `json:"tags,omitempty"` + Vlan string `json:"vlan,omitempty"` + Zonetoken string `json:"zonetoken,omitempty"` +} + +type DedicateZoneParams struct { + p map[string]interface{} +} + +func (p *DedicateZoneParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *DedicateZoneParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *DedicateZoneParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *DedicateZoneParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new DedicateZoneParams instance, +// as then you are sure you have configured all required params +func (s *ZoneService) NewDedicateZoneParams(domainid string, zoneid string) *DedicateZoneParams { + p := &DedicateZoneParams{} + p.p = make(map[string]interface{}) + p.p["domainid"] = domainid + p.p["zoneid"] = zoneid + return p +} + +// Dedicates a zones. +func (s *ZoneService) DedicateZone(p *DedicateZoneParams) (*DedicateZoneResponse, error) { + resp, err := s.cs.newRequest("dedicateZone", p.toURLValues()) + if err != nil { + return nil, err + } + + var r DedicateZoneResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type DedicateZoneResponse struct { + JobID string `json:"jobid,omitempty"` + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} + +type ReleaseDedicatedZoneParams struct { + p map[string]interface{} +} + +func (p *ReleaseDedicatedZoneParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ReleaseDedicatedZoneParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ReleaseDedicatedZoneParams instance, +// as then you are sure you have configured all required params +func (s *ZoneService) NewReleaseDedicatedZoneParams(zoneid string) *ReleaseDedicatedZoneParams { + p := &ReleaseDedicatedZoneParams{} + p.p = make(map[string]interface{}) + p.p["zoneid"] = zoneid + return p +} + +// Release dedication of zone +func (s *ZoneService) ReleaseDedicatedZone(p *ReleaseDedicatedZoneParams) (*ReleaseDedicatedZoneResponse, error) { + resp, err := s.cs.newRequest("releaseDedicatedZone", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ReleaseDedicatedZoneResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + + // If we have a async client, we need to wait for the async result + if s.cs.async { + b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) + if err != nil { + if err == AsyncTimeoutErr { + return &r, err + } + return nil, err + } + + if err := json.Unmarshal(b, &r); err != nil { + return nil, err + } + } + return &r, nil +} + +type ReleaseDedicatedZoneResponse struct { + JobID string `json:"jobid,omitempty"` + Displaytext string `json:"displaytext,omitempty"` + Success bool `json:"success,omitempty"` +} + +type ListDedicatedZonesParams struct { + p map[string]interface{} +} + +func (p *ListDedicatedZonesParams) toURLValues() url.Values { + u := url.Values{} + if p.p == nil { + return u + } + if v, found := p.p["account"]; found { + u.Set("account", v.(string)) + } + if v, found := p.p["affinitygroupid"]; found { + u.Set("affinitygroupid", v.(string)) + } + if v, found := p.p["domainid"]; found { + u.Set("domainid", v.(string)) + } + if v, found := p.p["keyword"]; found { + u.Set("keyword", v.(string)) + } + if v, found := p.p["page"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("page", vv) + } + if v, found := p.p["pagesize"]; found { + vv := strconv.Itoa(v.(int)) + u.Set("pagesize", vv) + } + if v, found := p.p["zoneid"]; found { + u.Set("zoneid", v.(string)) + } + return u +} + +func (p *ListDedicatedZonesParams) SetAccount(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["account"] = v + return +} + +func (p *ListDedicatedZonesParams) SetAffinitygroupid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["affinitygroupid"] = v + return +} + +func (p *ListDedicatedZonesParams) SetDomainid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["domainid"] = v + return +} + +func (p *ListDedicatedZonesParams) SetKeyword(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["keyword"] = v + return +} + +func (p *ListDedicatedZonesParams) SetPage(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["page"] = v + return +} + +func (p *ListDedicatedZonesParams) SetPagesize(v int) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["pagesize"] = v + return +} + +func (p *ListDedicatedZonesParams) SetZoneid(v string) { + if p.p == nil { + p.p = make(map[string]interface{}) + } + p.p["zoneid"] = v + return +} + +// You should always use this function to get a new ListDedicatedZonesParams instance, +// as then you are sure you have configured all required params +func (s *ZoneService) NewListDedicatedZonesParams() *ListDedicatedZonesParams { + p := &ListDedicatedZonesParams{} + p.p = make(map[string]interface{}) + return p +} + +// List dedicated zones. +func (s *ZoneService) ListDedicatedZones(p *ListDedicatedZonesParams) (*ListDedicatedZonesResponse, error) { + resp, err := s.cs.newRequest("listDedicatedZones", p.toURLValues()) + if err != nil { + return nil, err + } + + var r ListDedicatedZonesResponse + if err := json.Unmarshal(resp, &r); err != nil { + return nil, err + } + return &r, nil +} + +type ListDedicatedZonesResponse struct { + Count int `json:"count"` + DedicatedZones []*DedicatedZone `json:"dedicatedzone"` +} + +type DedicatedZone struct { + Accountid string `json:"accountid,omitempty"` + Affinitygroupid string `json:"affinitygroupid,omitempty"` + Domainid string `json:"domainid,omitempty"` + Id string `json:"id,omitempty"` + Zoneid string `json:"zoneid,omitempty"` + Zonename string `json:"zonename,omitempty"` +} diff --git a/vendor/github.com/xanzy/go-cloudstack/cloudstack/cloudstack.go b/vendor/github.com/xanzy/go-cloudstack/cloudstack/cloudstack.go new file mode 100644 index 000000000..20eb3a798 --- /dev/null +++ b/vendor/github.com/xanzy/go-cloudstack/cloudstack/cloudstack.go @@ -0,0 +1,874 @@ +// +// Copyright 2014, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package cloudstack + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "crypto/tls" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +type CSError struct { + ErrorCode int `json:"errorcode"` + CSErrorCode int `json:"cserrorcode"` + ErrorText string `json:"errortext"` +} + +func (e *CSError) Error() error { + return fmt.Errorf("CloudStack API error %d (CSExceptionErrorCode: %d): %s", e.ErrorCode, e.CSErrorCode, e.ErrorText) +} + +type CloudStackClient struct { + HTTPGETOnly bool // If `true` only use HTTP GET calls + + client *http.Client // The http client for communicating + baseURL string // The base URL of the API + apiKey string // Api key + secret string // Secret key + async bool // Wait for async calls to finish + timeout int64 // Max waiting timeout in seconds for async jobs to finish; defaults to 300 seconds + + APIDiscovery *APIDiscoveryService + Account *AccountService + Address *AddressService + AffinityGroup *AffinityGroupService + Alert *AlertService + Asyncjob *AsyncjobService + AutoScale *AutoScaleService + Baremetal *BaremetalService + BigSwitchVNS *BigSwitchVNSService + Certificate *CertificateService + CloudIdentifier *CloudIdentifierService + Cluster *ClusterService + Configuration *ConfigurationService + DiskOffering *DiskOfferingService + Domain *DomainService + Event *EventService + Firewall *FirewallService + GuestOS *GuestOSService + Host *HostService + Hypervisor *HypervisorService + ISO *ISOService + ImageStore *ImageStoreService + InternalLB *InternalLBService + LDAP *LDAPService + Limit *LimitService + LoadBalancer *LoadBalancerService + NAT *NATService + NetworkACL *NetworkACLService + NetworkDevice *NetworkDeviceService + NetworkOffering *NetworkOfferingService + Network *NetworkService + Nic *NicService + NiciraNVP *NiciraNVPService + OvsElement *OvsElementService + Pod *PodService + Pool *PoolService + PortableIP *PortableIPService + Project *ProjectService + Region *RegionService + Resourcemetadata *ResourcemetadataService + Resourcetags *ResourcetagsService + Router *RouterService + S3 *S3Service + SSH *SSHService + SecurityGroup *SecurityGroupService + ServiceOffering *ServiceOfferingService + Snapshot *SnapshotService + StoragePool *StoragePoolService + StratosphereSSP *StratosphereSSPService + Swift *SwiftService + SystemCapacity *SystemCapacityService + SystemVM *SystemVMService + Template *TemplateService + UCS *UCSService + Usage *UsageService + User *UserService + VLAN *VLANService + VMGroup *VMGroupService + VPC *VPCService + VPN *VPNService + VirtualMachine *VirtualMachineService + Volume *VolumeService + Zone *ZoneService +} + +// Creates a new client for communicating with CloudStack +func newClient(apiurl string, apikey string, secret string, async bool, verifyssl bool) *CloudStackClient { + cs := &CloudStackClient{ + client: &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{InsecureSkipVerify: !verifyssl}, // If verifyssl is true, skipping the verify should be false and vice versa + }, + Timeout: time.Duration(60 * time.Second), + }, + baseURL: apiurl, + apiKey: apikey, + secret: secret, + async: async, + timeout: 300, + } + cs.APIDiscovery = NewAPIDiscoveryService(cs) + cs.Account = NewAccountService(cs) + cs.Address = NewAddressService(cs) + cs.AffinityGroup = NewAffinityGroupService(cs) + cs.Alert = NewAlertService(cs) + cs.Asyncjob = NewAsyncjobService(cs) + cs.AutoScale = NewAutoScaleService(cs) + cs.Baremetal = NewBaremetalService(cs) + cs.BigSwitchVNS = NewBigSwitchVNSService(cs) + cs.Certificate = NewCertificateService(cs) + cs.CloudIdentifier = NewCloudIdentifierService(cs) + cs.Cluster = NewClusterService(cs) + cs.Configuration = NewConfigurationService(cs) + cs.DiskOffering = NewDiskOfferingService(cs) + cs.Domain = NewDomainService(cs) + cs.Event = NewEventService(cs) + cs.Firewall = NewFirewallService(cs) + cs.GuestOS = NewGuestOSService(cs) + cs.Host = NewHostService(cs) + cs.Hypervisor = NewHypervisorService(cs) + cs.ISO = NewISOService(cs) + cs.ImageStore = NewImageStoreService(cs) + cs.InternalLB = NewInternalLBService(cs) + cs.LDAP = NewLDAPService(cs) + cs.Limit = NewLimitService(cs) + cs.LoadBalancer = NewLoadBalancerService(cs) + cs.NAT = NewNATService(cs) + cs.NetworkACL = NewNetworkACLService(cs) + cs.NetworkDevice = NewNetworkDeviceService(cs) + cs.NetworkOffering = NewNetworkOfferingService(cs) + cs.Network = NewNetworkService(cs) + cs.Nic = NewNicService(cs) + cs.NiciraNVP = NewNiciraNVPService(cs) + cs.OvsElement = NewOvsElementService(cs) + cs.Pod = NewPodService(cs) + cs.Pool = NewPoolService(cs) + cs.PortableIP = NewPortableIPService(cs) + cs.Project = NewProjectService(cs) + cs.Region = NewRegionService(cs) + cs.Resourcemetadata = NewResourcemetadataService(cs) + cs.Resourcetags = NewResourcetagsService(cs) + cs.Router = NewRouterService(cs) + cs.S3 = NewS3Service(cs) + cs.SSH = NewSSHService(cs) + cs.SecurityGroup = NewSecurityGroupService(cs) + cs.ServiceOffering = NewServiceOfferingService(cs) + cs.Snapshot = NewSnapshotService(cs) + cs.StoragePool = NewStoragePoolService(cs) + cs.StratosphereSSP = NewStratosphereSSPService(cs) + cs.Swift = NewSwiftService(cs) + cs.SystemCapacity = NewSystemCapacityService(cs) + cs.SystemVM = NewSystemVMService(cs) + cs.Template = NewTemplateService(cs) + cs.UCS = NewUCSService(cs) + cs.Usage = NewUsageService(cs) + cs.User = NewUserService(cs) + cs.VLAN = NewVLANService(cs) + cs.VMGroup = NewVMGroupService(cs) + cs.VPC = NewVPCService(cs) + cs.VPN = NewVPNService(cs) + cs.VirtualMachine = NewVirtualMachineService(cs) + cs.Volume = NewVolumeService(cs) + cs.Zone = NewZoneService(cs) + return cs +} + +// Default non-async client. So for async calls you need to implement and check the async job result yourself. When using +// HTTPS with a self-signed certificate to connect to your CloudStack API, you would probably want to set 'verifyssl' to +// false so the call ignores the SSL errors/warnings. +func NewClient(apiurl string, apikey string, secret string, verifyssl bool) *CloudStackClient { + cs := newClient(apiurl, apikey, secret, false, verifyssl) + return cs +} + +// For sync API calls this client behaves exactly the same as a standard client call, but for async API calls +// this client will wait until the async job is finished or until the configured AsyncTimeout is reached. When the async +// job finishes successfully it will return actual object received from the API and nil, but when the timout is +// reached it will return the initial object containing the async job ID for the running job and a warning. +func NewAsyncClient(apiurl string, apikey string, secret string, verifyssl bool) *CloudStackClient { + cs := newClient(apiurl, apikey, secret, true, verifyssl) + return cs +} + +// When using the async client an api call will wait for the async call to finish before returning. The default is to poll for 300 seconds +// seconds, to check if the async job is finished. +func (cs *CloudStackClient) AsyncTimeout(timeoutInSeconds int64) { + cs.timeout = timeoutInSeconds +} + +var AsyncTimeoutErr = errors.New("Timeout while waiting for async job to finish") + +// A helper function that you can use to get the result of a running async job. If the job is not finished within the configured +// timeout, the async job returns a AsyncTimeoutErr. +func (cs *CloudStackClient) GetAsyncJobResult(jobid string, timeout int64) (json.RawMessage, error) { + var timer time.Duration + currentTime := time.Now().Unix() + + for { + p := cs.Asyncjob.NewQueryAsyncJobResultParams(jobid) + r, err := cs.Asyncjob.QueryAsyncJobResult(p) + if err != nil { + return nil, err + } + + // Status 1 means the job is finished successfully + if r.Jobstatus == 1 { + return r.Jobresult, nil + } + + // When the status is 2, the job has failed + if r.Jobstatus == 2 { + if r.Jobresulttype == "text" { + return nil, fmt.Errorf(string(r.Jobresult)) + } else { + return nil, fmt.Errorf("Undefined error: %s", string(r.Jobresult)) + } + } + + if time.Now().Unix()-currentTime > timeout { + return nil, AsyncTimeoutErr + } + + // Add an (extremely simple) exponential backoff like feature to prevent + // flooding the CloudStack API + if timer < 15 { + timer++ + } + + time.Sleep(timer * time.Second) + } +} + +// Execute the request against a CS API. Will return the raw JSON data returned by the API and nil if +// no error occured. If the API returns an error the result will be nil and the HTTP error code and CS +// error details. If a processing (code) error occurs the result will be nil and the generated error +func (cs *CloudStackClient) newRequest(api string, params url.Values) (json.RawMessage, error) { + params.Set("apiKey", cs.apiKey) + params.Set("command", api) + params.Set("response", "json") + + // Generate signature for API call + // * Serialize parameters, URL encoding only values and sort them by key, done by encodeValues + // * Convert the entire argument string to lowercase + // * Replace all instances of '+' to '%20' + // * Calculate HMAC SHA1 of argument string with CloudStack secret + // * URL encode the string and convert to base64 + s := encodeValues(params) + s2 := strings.ToLower(s) + s3 := strings.Replace(s2, "+", "%20", -1) + mac := hmac.New(sha1.New, []byte(cs.secret)) + mac.Write([]byte(s3)) + signature := base64.StdEncoding.EncodeToString(mac.Sum(nil)) + + var err error + var resp *http.Response + if !cs.HTTPGETOnly && (api == "deployVirtualMachine" || api == "updateVirtualMachine") { + // The deployVirtualMachine API should be called using a POST call + // so we don't have to worry about the userdata size + + // Add the unescaped signature to the POST params + params.Set("signature", signature) + + // Make a POST call + resp, err = cs.client.PostForm(cs.baseURL, params) + } else { + // Create the final URL before we issue the request + url := cs.baseURL + "?" + s + "&signature=" + url.QueryEscape(signature) + + // Make a GET call + resp, err = cs.client.Get(url) + } + if err != nil { + return nil, err + } + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + // Need to get the raw value to make the result play nice + b, err = getRawValue(b) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + var e CSError + if err := json.Unmarshal(b, &e); err != nil { + return nil, err + } + return nil, e.Error() + } + return b, nil +} + +// Custom version of net/url Encode that only URL escapes values +// Unmodified portions here remain under BSD license of The Go Authors: https://go.googlesource.com/go/+/master/LICENSE +func encodeValues(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := k + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(url.QueryEscape(v)) + } + } + return buf.String() +} + +// Generic function to get the first raw value from a response as json.RawMessage +func getRawValue(b json.RawMessage) (json.RawMessage, error) { + var m map[string]json.RawMessage + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + for _, v := range m { + return v, nil + } + return nil, fmt.Errorf("Unable to extract the raw value from:\n\n%s\n\n", string(b)) +} + +type APIDiscoveryService struct { + cs *CloudStackClient +} + +func NewAPIDiscoveryService(cs *CloudStackClient) *APIDiscoveryService { + return &APIDiscoveryService{cs: cs} +} + +type AccountService struct { + cs *CloudStackClient +} + +func NewAccountService(cs *CloudStackClient) *AccountService { + return &AccountService{cs: cs} +} + +type AddressService struct { + cs *CloudStackClient +} + +func NewAddressService(cs *CloudStackClient) *AddressService { + return &AddressService{cs: cs} +} + +type AffinityGroupService struct { + cs *CloudStackClient +} + +func NewAffinityGroupService(cs *CloudStackClient) *AffinityGroupService { + return &AffinityGroupService{cs: cs} +} + +type AlertService struct { + cs *CloudStackClient +} + +func NewAlertService(cs *CloudStackClient) *AlertService { + return &AlertService{cs: cs} +} + +type AsyncjobService struct { + cs *CloudStackClient +} + +func NewAsyncjobService(cs *CloudStackClient) *AsyncjobService { + return &AsyncjobService{cs: cs} +} + +type AutoScaleService struct { + cs *CloudStackClient +} + +func NewAutoScaleService(cs *CloudStackClient) *AutoScaleService { + return &AutoScaleService{cs: cs} +} + +type BaremetalService struct { + cs *CloudStackClient +} + +func NewBaremetalService(cs *CloudStackClient) *BaremetalService { + return &BaremetalService{cs: cs} +} + +type BigSwitchVNSService struct { + cs *CloudStackClient +} + +func NewBigSwitchVNSService(cs *CloudStackClient) *BigSwitchVNSService { + return &BigSwitchVNSService{cs: cs} +} + +type CertificateService struct { + cs *CloudStackClient +} + +func NewCertificateService(cs *CloudStackClient) *CertificateService { + return &CertificateService{cs: cs} +} + +type CloudIdentifierService struct { + cs *CloudStackClient +} + +func NewCloudIdentifierService(cs *CloudStackClient) *CloudIdentifierService { + return &CloudIdentifierService{cs: cs} +} + +type ClusterService struct { + cs *CloudStackClient +} + +func NewClusterService(cs *CloudStackClient) *ClusterService { + return &ClusterService{cs: cs} +} + +type ConfigurationService struct { + cs *CloudStackClient +} + +func NewConfigurationService(cs *CloudStackClient) *ConfigurationService { + return &ConfigurationService{cs: cs} +} + +type DiskOfferingService struct { + cs *CloudStackClient +} + +func NewDiskOfferingService(cs *CloudStackClient) *DiskOfferingService { + return &DiskOfferingService{cs: cs} +} + +type DomainService struct { + cs *CloudStackClient +} + +func NewDomainService(cs *CloudStackClient) *DomainService { + return &DomainService{cs: cs} +} + +type EventService struct { + cs *CloudStackClient +} + +func NewEventService(cs *CloudStackClient) *EventService { + return &EventService{cs: cs} +} + +type FirewallService struct { + cs *CloudStackClient +} + +func NewFirewallService(cs *CloudStackClient) *FirewallService { + return &FirewallService{cs: cs} +} + +type GuestOSService struct { + cs *CloudStackClient +} + +func NewGuestOSService(cs *CloudStackClient) *GuestOSService { + return &GuestOSService{cs: cs} +} + +type HostService struct { + cs *CloudStackClient +} + +func NewHostService(cs *CloudStackClient) *HostService { + return &HostService{cs: cs} +} + +type HypervisorService struct { + cs *CloudStackClient +} + +func NewHypervisorService(cs *CloudStackClient) *HypervisorService { + return &HypervisorService{cs: cs} +} + +type ISOService struct { + cs *CloudStackClient +} + +func NewISOService(cs *CloudStackClient) *ISOService { + return &ISOService{cs: cs} +} + +type ImageStoreService struct { + cs *CloudStackClient +} + +func NewImageStoreService(cs *CloudStackClient) *ImageStoreService { + return &ImageStoreService{cs: cs} +} + +type InternalLBService struct { + cs *CloudStackClient +} + +func NewInternalLBService(cs *CloudStackClient) *InternalLBService { + return &InternalLBService{cs: cs} +} + +type LDAPService struct { + cs *CloudStackClient +} + +func NewLDAPService(cs *CloudStackClient) *LDAPService { + return &LDAPService{cs: cs} +} + +type LimitService struct { + cs *CloudStackClient +} + +func NewLimitService(cs *CloudStackClient) *LimitService { + return &LimitService{cs: cs} +} + +type LoadBalancerService struct { + cs *CloudStackClient +} + +func NewLoadBalancerService(cs *CloudStackClient) *LoadBalancerService { + return &LoadBalancerService{cs: cs} +} + +type NATService struct { + cs *CloudStackClient +} + +func NewNATService(cs *CloudStackClient) *NATService { + return &NATService{cs: cs} +} + +type NetworkACLService struct { + cs *CloudStackClient +} + +func NewNetworkACLService(cs *CloudStackClient) *NetworkACLService { + return &NetworkACLService{cs: cs} +} + +type NetworkDeviceService struct { + cs *CloudStackClient +} + +func NewNetworkDeviceService(cs *CloudStackClient) *NetworkDeviceService { + return &NetworkDeviceService{cs: cs} +} + +type NetworkOfferingService struct { + cs *CloudStackClient +} + +func NewNetworkOfferingService(cs *CloudStackClient) *NetworkOfferingService { + return &NetworkOfferingService{cs: cs} +} + +type NetworkService struct { + cs *CloudStackClient +} + +func NewNetworkService(cs *CloudStackClient) *NetworkService { + return &NetworkService{cs: cs} +} + +type NicService struct { + cs *CloudStackClient +} + +func NewNicService(cs *CloudStackClient) *NicService { + return &NicService{cs: cs} +} + +type NiciraNVPService struct { + cs *CloudStackClient +} + +func NewNiciraNVPService(cs *CloudStackClient) *NiciraNVPService { + return &NiciraNVPService{cs: cs} +} + +type OvsElementService struct { + cs *CloudStackClient +} + +func NewOvsElementService(cs *CloudStackClient) *OvsElementService { + return &OvsElementService{cs: cs} +} + +type PodService struct { + cs *CloudStackClient +} + +func NewPodService(cs *CloudStackClient) *PodService { + return &PodService{cs: cs} +} + +type PoolService struct { + cs *CloudStackClient +} + +func NewPoolService(cs *CloudStackClient) *PoolService { + return &PoolService{cs: cs} +} + +type PortableIPService struct { + cs *CloudStackClient +} + +func NewPortableIPService(cs *CloudStackClient) *PortableIPService { + return &PortableIPService{cs: cs} +} + +type ProjectService struct { + cs *CloudStackClient +} + +func NewProjectService(cs *CloudStackClient) *ProjectService { + return &ProjectService{cs: cs} +} + +type RegionService struct { + cs *CloudStackClient +} + +func NewRegionService(cs *CloudStackClient) *RegionService { + return &RegionService{cs: cs} +} + +type ResourcemetadataService struct { + cs *CloudStackClient +} + +func NewResourcemetadataService(cs *CloudStackClient) *ResourcemetadataService { + return &ResourcemetadataService{cs: cs} +} + +type ResourcetagsService struct { + cs *CloudStackClient +} + +func NewResourcetagsService(cs *CloudStackClient) *ResourcetagsService { + return &ResourcetagsService{cs: cs} +} + +type RouterService struct { + cs *CloudStackClient +} + +func NewRouterService(cs *CloudStackClient) *RouterService { + return &RouterService{cs: cs} +} + +type S3Service struct { + cs *CloudStackClient +} + +func NewS3Service(cs *CloudStackClient) *S3Service { + return &S3Service{cs: cs} +} + +type SSHService struct { + cs *CloudStackClient +} + +func NewSSHService(cs *CloudStackClient) *SSHService { + return &SSHService{cs: cs} +} + +type SecurityGroupService struct { + cs *CloudStackClient +} + +func NewSecurityGroupService(cs *CloudStackClient) *SecurityGroupService { + return &SecurityGroupService{cs: cs} +} + +type ServiceOfferingService struct { + cs *CloudStackClient +} + +func NewServiceOfferingService(cs *CloudStackClient) *ServiceOfferingService { + return &ServiceOfferingService{cs: cs} +} + +type SnapshotService struct { + cs *CloudStackClient +} + +func NewSnapshotService(cs *CloudStackClient) *SnapshotService { + return &SnapshotService{cs: cs} +} + +type StoragePoolService struct { + cs *CloudStackClient +} + +func NewStoragePoolService(cs *CloudStackClient) *StoragePoolService { + return &StoragePoolService{cs: cs} +} + +type StratosphereSSPService struct { + cs *CloudStackClient +} + +func NewStratosphereSSPService(cs *CloudStackClient) *StratosphereSSPService { + return &StratosphereSSPService{cs: cs} +} + +type SwiftService struct { + cs *CloudStackClient +} + +func NewSwiftService(cs *CloudStackClient) *SwiftService { + return &SwiftService{cs: cs} +} + +type SystemCapacityService struct { + cs *CloudStackClient +} + +func NewSystemCapacityService(cs *CloudStackClient) *SystemCapacityService { + return &SystemCapacityService{cs: cs} +} + +type SystemVMService struct { + cs *CloudStackClient +} + +func NewSystemVMService(cs *CloudStackClient) *SystemVMService { + return &SystemVMService{cs: cs} +} + +type TemplateService struct { + cs *CloudStackClient +} + +func NewTemplateService(cs *CloudStackClient) *TemplateService { + return &TemplateService{cs: cs} +} + +type UCSService struct { + cs *CloudStackClient +} + +func NewUCSService(cs *CloudStackClient) *UCSService { + return &UCSService{cs: cs} +} + +type UsageService struct { + cs *CloudStackClient +} + +func NewUsageService(cs *CloudStackClient) *UsageService { + return &UsageService{cs: cs} +} + +type UserService struct { + cs *CloudStackClient +} + +func NewUserService(cs *CloudStackClient) *UserService { + return &UserService{cs: cs} +} + +type VLANService struct { + cs *CloudStackClient +} + +func NewVLANService(cs *CloudStackClient) *VLANService { + return &VLANService{cs: cs} +} + +type VMGroupService struct { + cs *CloudStackClient +} + +func NewVMGroupService(cs *CloudStackClient) *VMGroupService { + return &VMGroupService{cs: cs} +} + +type VPCService struct { + cs *CloudStackClient +} + +func NewVPCService(cs *CloudStackClient) *VPCService { + return &VPCService{cs: cs} +} + +type VPNService struct { + cs *CloudStackClient +} + +func NewVPNService(cs *CloudStackClient) *VPNService { + return &VPNService{cs: cs} +} + +type VirtualMachineService struct { + cs *CloudStackClient +} + +func NewVirtualMachineService(cs *CloudStackClient) *VirtualMachineService { + return &VirtualMachineService{cs: cs} +} + +type VolumeService struct { + cs *CloudStackClient +} + +func NewVolumeService(cs *CloudStackClient) *VolumeService { + return &VolumeService{cs: cs} +} + +type ZoneService struct { + cs *CloudStackClient +} + +func NewZoneService(cs *CloudStackClient) *ZoneService { + return &ZoneService{cs: cs} +} diff --git a/vendor/github.com/xanzy/ssh-agent/.gitignore b/vendor/github.com/xanzy/ssh-agent/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/xanzy/ssh-agent/LICENSE b/vendor/github.com/xanzy/ssh-agent/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/xanzy/ssh-agent/README.md b/vendor/github.com/xanzy/ssh-agent/README.md new file mode 100644 index 000000000..d93af40a0 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/README.md @@ -0,0 +1,23 @@ +# ssh-agent + +Create a new [agent.Agent](https://godoc.org/golang.org/x/crypto/ssh/agent#Agent) on any type of OS (so including Windows) from any [Go](https://golang.org) application. + +## Limitations + +When compiled for Windows, it will only support [Pageant](http://the.earth.li/~sgtatham/putty/0.66/htmldoc/Chapter9.html#pageant) as the SSH authentication agent. + +## Credits + +Big thanks to [Давид МзареулÑн (David Mzareulyan)](https://github.com/davidmz) for creating the [go-pageant](https://github.com/davidmz/go-pageant) package! + +## Issues + +If you have an issue: report it on the [issue tracker](https://github.com/xanzy/ssh-agent/issues) + +## Author + +Sander van Harmelen () + +## License + +The files `pageant_windows.go` and `sshagent_windows.go` have their own license (see file headers). The rest of this package is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go new file mode 100644 index 000000000..3507b0228 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go @@ -0,0 +1,146 @@ +// +// Copyright (c) 2014 David Mzareulyan +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software +// and associated documentation files (the "Software"), to deal in the Software without restriction, +// including without limitation the rights to use, copy, modify, merge, publish, distribute, +// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial +// portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// + +// +build windows + +package sshagent + +// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155 +// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py + +import ( + "encoding/binary" + "errors" + "fmt" + "sync" + . "syscall" + . "unsafe" +) + +// Maximum size of message can be sent to pageant +const MaxMessageLen = 8192 + +var ( + ErrPageantNotFound = errors.New("pageant process not found") + ErrSendMessage = errors.New("error sending message") + + ErrMessageTooLong = errors.New("message too long") + ErrInvalidMessageFormat = errors.New("invalid message format") + ErrResponseTooLong = errors.New("response too long") +) + +const ( + agentCopydataID = 0x804e50ba + wmCopydata = 74 +) + +type copyData struct { + dwData uintptr + cbData uint32 + lpData Pointer +} + +var ( + lock sync.Mutex + + winFindWindow = winAPI("user32.dll", "FindWindowW") + winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId") + winSendMessage = winAPI("user32.dll", "SendMessageW") +) + +func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) { + proc := MustLoadDLL(dllName).MustFindProc(funcName) + return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) } +} + +// Available returns true if Pageant is running +func Available() bool { return pageantWindow() != 0 } + +// Query sends message msg to Pageant and returns response or error. +// 'msg' is raw agent request with length prefix +// Response is raw agent response with length prefix +func query(msg []byte) ([]byte, error) { + if len(msg) > MaxMessageLen { + return nil, ErrMessageTooLong + } + + msgLen := binary.BigEndian.Uint32(msg[:4]) + if len(msg) != int(msgLen)+4 { + return nil, ErrInvalidMessageFormat + } + + lock.Lock() + defer lock.Unlock() + + paWin := pageantWindow() + + if paWin == 0 { + return nil, ErrPageantNotFound + } + + thID, _, _ := winGetCurrentThreadID() + mapName := fmt.Sprintf("PageantRequest%08x", thID) + pMapName, _ := UTF16PtrFromString(mapName) + + mmap, err := CreateFileMapping(InvalidHandle, nil, PAGE_READWRITE, 0, MaxMessageLen+4, pMapName) + if err != nil { + return nil, err + } + defer CloseHandle(mmap) + + ptr, err := MapViewOfFile(mmap, FILE_MAP_WRITE, 0, 0, 0) + if err != nil { + return nil, err + } + defer UnmapViewOfFile(ptr) + + mmSlice := (*(*[MaxMessageLen]byte)(Pointer(ptr)))[:] + + copy(mmSlice, msg) + + mapNameBytesZ := append([]byte(mapName), 0) + + cds := copyData{ + dwData: agentCopydataID, + cbData: uint32(len(mapNameBytesZ)), + lpData: Pointer(&(mapNameBytesZ[0])), + } + + resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(Pointer(&cds))) + + if resp == 0 { + return nil, ErrSendMessage + } + + respLen := binary.BigEndian.Uint32(mmSlice[:4]) + if respLen > MaxMessageLen-4 { + return nil, ErrResponseTooLong + } + + respData := make([]byte, respLen+4) + copy(respData, mmSlice) + + return respData, nil +} + +func pageantWindow() uintptr { + nameP, _ := UTF16PtrFromString("Pageant") + h, _, _ := winFindWindow(uintptr(Pointer(nameP)), uintptr(Pointer(nameP))) + return h +} diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent.go b/vendor/github.com/xanzy/ssh-agent/sshagent.go new file mode 100644 index 000000000..259fea2b6 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/sshagent.go @@ -0,0 +1,49 @@ +// +// Copyright 2015, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +build !windows + +package sshagent + +import ( + "errors" + "fmt" + "net" + "os" + + "golang.org/x/crypto/ssh/agent" +) + +// New returns a new agent.Agent that uses a unix socket +func New() (agent.Agent, net.Conn, error) { + if !Available() { + return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified") + } + + sshAuthSock := os.Getenv("SSH_AUTH_SOCK") + + conn, err := net.Dial("unix", sshAuthSock) + if err != nil { + return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err) + } + + return agent.NewClient(conn), conn, nil +} + +// Available returns true is a auth socket is defined +func Available() bool { + return os.Getenv("SSH_AUTH_SOCK") != "" +} diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go new file mode 100644 index 000000000..c46710e88 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go @@ -0,0 +1,80 @@ +// +// Copyright (c) 2014 David Mzareulyan +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software +// and associated documentation files (the "Software"), to deal in the Software without restriction, +// including without limitation the rights to use, copy, modify, merge, publish, distribute, +// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial +// portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// + +// +build windows + +package sshagent + +import ( + "errors" + "io" + "net" + "sync" + + "golang.org/x/crypto/ssh/agent" +) + +// New returns a new agent.Agent and the (custom) connection it uses +// to communicate with a running pagent.exe instance (see README.md) +func New() (agent.Agent, net.Conn, error) { + if !Available() { + return nil, nil, errors.New("SSH agent requested but Pageant not running") + } + + return agent.NewClient(&conn{}), nil, nil +} + +type conn struct { + sync.Mutex + buf []byte +} + +func (c *conn) Close() { + c.Lock() + defer c.Unlock() + c.buf = nil +} + +func (c *conn) Write(p []byte) (int, error) { + c.Lock() + defer c.Unlock() + + resp, err := query(p) + if err != nil { + return 0, err + } + + c.buf = append(c.buf, resp...) + + return len(p), nil +} + +func (c *conn) Read(p []byte) (int, error) { + c.Lock() + defer c.Unlock() + + if len(c.buf) == 0 { + return 0, io.EOF + } + + n := copy(p, c.buf) + c.buf = c.buf[n:] + + return n, nil +} diff --git a/vendor/github.com/ziutek/mymysql/mysql/errors.go b/vendor/github.com/ziutek/mymysql/mysql/errors.go new file mode 100644 index 000000000..70be44b2f --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/errors.go @@ -0,0 +1,533 @@ +package mysql + +import ( + "fmt" +) + +// If a function/method returns error you may check the returned error type via +// a type assertion. If the type assertion succeeds you can retrieve the MySQL +// error code as below. +// +// Example: +// if val, ok := err.(*mysql.Error); ok { +// fmt.Println(val.Code) +// } +type Error struct { + Code uint16 + Msg []byte +} + +func (err Error) Error() string { + return fmt.Sprintf("Received #%d error from MySQL server: \"%s\"", + err.Code, err.Msg) +} + +// MySQL error codes +const ( + ER_HASHCHK = 1000 + ER_NISAMCHK = 1001 + ER_NO = 1002 + ER_YES = 1003 + ER_CANT_CREATE_FILE = 1004 + ER_CANT_CREATE_TABLE = 1005 + ER_CANT_CREATE_DB = 1006 + ER_DB_CREATE_EXISTS = 1007 + ER_DB_DROP_EXISTS = 1008 + ER_DB_DROP_DELETE = 1009 + ER_DB_DROP_RMDIR = 1010 + ER_CANT_DELETE_FILE = 1011 + ER_CANT_FIND_SYSTEM_REC = 1012 + ER_CANT_GET_STAT = 1013 + ER_CANT_GET_WD = 1014 + ER_CANT_LOCK = 1015 + ER_CANT_OPEN_FILE = 1016 + ER_FILE_NOT_FOUND = 1017 + ER_CANT_READ_DIR = 1018 + ER_CANT_SET_WD = 1019 + ER_CHECKREAD = 1020 + ER_DISK_FULL = 1021 + ER_DUP_KEY = 1022 + ER_ERROR_ON_CLOSE = 1023 + ER_ERROR_ON_READ = 1024 + ER_ERROR_ON_RENAME = 1025 + ER_ERROR_ON_WRITE = 1026 + ER_FILE_USED = 1027 + ER_FILSORT_ABORT = 1028 + ER_FORM_NOT_FOUND = 1029 + ER_GET_ERRNO = 1030 + ER_ILLEGAL_HA = 1031 + ER_KEY_NOT_FOUND = 1032 + ER_NOT_FORM_FILE = 1033 + ER_NOT_KEYFILE = 1034 + ER_OLD_KEYFILE = 1035 + ER_OPEN_AS_READONLY = 1036 + ER_OUTOFMEMORY = 1037 + ER_OUT_OF_SORTMEMORY = 1038 + ER_UNEXPECTED_EOF = 1039 + ER_CON_COUNT_ERROR = 1040 + ER_OUT_OF_RESOURCES = 1041 + ER_BAD_HOST_ERROR = 1042 + ER_HANDSHAKE_ERROR = 1043 + ER_DBACCESS_DENIED_ERROR = 1044 + ER_ACCESS_DENIED_ERROR = 1045 + ER_NO_DB_ERROR = 1046 + ER_UNKNOWN_COM_ERROR = 1047 + ER_BAD_NULL_ERROR = 1048 + ER_BAD_DB_ERROR = 1049 + ER_TABLE_EXISTS_ERROR = 1050 + ER_BAD_TABLE_ERROR = 1051 + ER_NON_UNIQ_ERROR = 1052 + ER_SERVER_SHUTDOWN = 1053 + ER_BAD_FIELD_ERROR = 1054 + ER_WRONG_FIELD_WITH_GROUP = 1055 + ER_WRONG_GROUP_FIELD = 1056 + ER_WRONG_SUM_SELECT = 1057 + ER_WRONG_VALUE_COUNT = 1058 + ER_TOO_LONG_IDENT = 1059 + ER_DUP_FIELDNAME = 1060 + ER_DUP_KEYNAME = 1061 + ER_DUP_ENTRY = 1062 + ER_WRONG_FIELD_SPEC = 1063 + ER_PARSE_ERROR = 1064 + ER_EMPTY_QUERY = 1065 + ER_NONUNIQ_TABLE = 1066 + ER_INVALID_DEFAULT = 1067 + ER_MULTIPLE_PRI_KEY = 1068 + ER_TOO_MANY_KEYS = 1069 + ER_TOO_MANY_KEY_PARTS = 1070 + ER_TOO_LONG_KEY = 1071 + ER_KEY_COLUMN_DOES_NOT_EXITS = 1072 + ER_BLOB_USED_AS_KEY = 1073 + ER_TOO_BIG_FIELDLENGTH = 1074 + ER_WRONG_AUTO_KEY = 1075 + ER_READY = 1076 + ER_NORMAL_SHUTDOWN = 1077 + ER_GOT_SIGNAL = 1078 + ER_SHUTDOWN_COMPLETE = 1079 + ER_FORCING_CLOSE = 1080 + ER_IPSOCK_ERROR = 1081 + ER_NO_SUCH_INDEX = 1082 + ER_WRONG_FIELD_TERMINATORS = 1083 + ER_BLOBS_AND_NO_TERMINATED = 1084 + ER_TEXTFILE_NOT_READABLE = 1085 + ER_FILE_EXISTS_ERROR = 1086 + ER_LOAD_INFO = 1087 + ER_ALTER_INFO = 1088 + ER_WRONG_SUB_KEY = 1089 + ER_CANT_REMOVE_ALL_FIELDS = 1090 + ER_CANT_DROP_FIELD_OR_KEY = 1091 + ER_INSERT_INFO = 1092 + ER_UPDATE_TABLE_USED = 1093 + ER_NO_SUCH_THREAD = 1094 + ER_KILL_DENIED_ERROR = 1095 + ER_NO_TABLES_USED = 1096 + ER_TOO_BIG_SET = 1097 + ER_NO_UNIQUE_LOGFILE = 1098 + ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099 + ER_TABLE_NOT_LOCKED = 1100 + ER_BLOB_CANT_HAVE_DEFAULT = 1101 + ER_WRONG_DB_NAME = 1102 + ER_WRONG_TABLE_NAME = 1103 + ER_TOO_BIG_SELECT = 1104 + ER_UNKNOWN_ERROR = 1105 + ER_UNKNOWN_PROCEDURE = 1106 + ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 + ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108 + ER_UNKNOWN_TABLE = 1109 + ER_FIELD_SPECIFIED_TWICE = 1110 + ER_INVALID_GROUP_FUNC_USE = 1111 + ER_UNSUPPORTED_EXTENSION = 1112 + ER_TABLE_MUST_HAVE_COLUMNS = 1113 + ER_RECORD_FILE_FULL = 1114 + ER_UNKNOWN_CHARACTER_SET = 1115 + ER_TOO_MANY_TABLES = 1116 + ER_TOO_MANY_FIELDS = 1117 + ER_TOO_BIG_ROWSIZE = 1118 + ER_STACK_OVERRUN = 1119 + ER_WRONG_OUTER_JOIN = 1120 + ER_NULL_COLUMN_IN_INDEX = 1121 + ER_CANT_FIND_UDF = 1122 + ER_CANT_INITIALIZE_UDF = 1123 + ER_UDF_NO_PATHS = 1124 + ER_UDF_EXISTS = 1125 + ER_CANT_OPEN_LIBRARY = 1126 + ER_CANT_FIND_DL_ENTRY = 1127 + ER_FUNCTION_NOT_DEFINED = 1128 + ER_HOST_IS_BLOCKED = 1129 + ER_HOST_NOT_PRIVILEGED = 1130 + ER_PASSWORD_ANONYMOUS_USER = 1131 + ER_PASSWORD_NOT_ALLOWED = 1132 + ER_PASSWORD_NO_MATCH = 1133 + ER_UPDATE_INFO = 1134 + ER_CANT_CREATE_THREAD = 1135 + ER_WRONG_VALUE_COUNT_ON_ROW = 1136 + ER_CANT_REOPEN_TABLE = 1137 + ER_INVALID_USE_OF_NULL = 1138 + ER_REGEXP_ERROR = 1139 + ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 + ER_NONEXISTING_GRANT = 1141 + ER_TABLEACCESS_DENIED_ERROR = 1142 + ER_COLUMNACCESS_DENIED_ERROR = 1143 + ER_ILLEGAL_GRANT_FOR_TABLE = 1144 + ER_GRANT_WRONG_HOST_OR_USER = 1145 + ER_NO_SUCH_TABLE = 1146 + ER_NONEXISTING_TABLE_GRANT = 1147 + ER_NOT_ALLOWED_COMMAND = 1148 + ER_SYNTAX_ERROR = 1149 + ER_DELAYED_CANT_CHANGE_LOCK = 1150 + ER_TOO_MANY_DELAYED_THREADS = 1151 + ER_ABORTING_CONNECTION = 1152 + ER_NET_PACKET_TOO_LARGE = 1153 + ER_NET_READ_ERROR_FROM_PIPE = 1154 + ER_NET_FCNTL_ERROR = 1155 + ER_NET_PACKETS_OUT_OF_ORDER = 1156 + ER_NET_UNCOMPRESS_ERROR = 1157 + ER_NET_READ_ERROR = 1158 + ER_NET_READ_INTERRUPTED = 1159 + ER_NET_ERROR_ON_WRITE = 1160 + ER_NET_WRITE_INTERRUPTED = 1161 + ER_TOO_LONG_STRING = 1162 + ER_TABLE_CANT_HANDLE_BLOB = 1163 + ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 + ER_DELAYED_INSERT_TABLE_LOCKED = 1165 + ER_WRONG_COLUMN_NAME = 1166 + ER_WRONG_KEY_COLUMN = 1167 + ER_WRONG_MRG_TABLE = 1168 + ER_DUP_UNIQUE = 1169 + ER_BLOB_KEY_WITHOUT_LENGTH = 1170 + ER_PRIMARY_CANT_HAVE_NULL = 1171 + ER_TOO_MANY_ROWS = 1172 + ER_REQUIRES_PRIMARY_KEY = 1173 + ER_NO_RAID_COMPILED = 1174 + ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 + ER_KEY_DOES_NOT_EXITS = 1176 + ER_CHECK_NO_SUCH_TABLE = 1177 + ER_CHECK_NOT_IMPLEMENTED = 1178 + ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 + ER_ERROR_DURING_COMMIT = 1180 + ER_ERROR_DURING_ROLLBACK = 1181 + ER_ERROR_DURING_FLUSH_LOGS = 1182 + ER_ERROR_DURING_CHECKPOINT = 1183 + ER_NEW_ABORTING_CONNECTION = 1184 + ER_DUMP_NOT_IMPLEMENTED = 1185 + ER_FLUSH_MASTER_BINLOG_CLOSED = 1186 + ER_INDEX_REBUILD = 1187 + ER_MASTER = 1188 + ER_MASTER_NET_READ = 1189 + ER_MASTER_NET_WRITE = 1190 + ER_FT_MATCHING_KEY_NOT_FOUND = 1191 + ER_LOCK_OR_ACTIVE_TRANSACTION = 1192 + ER_UNKNOWN_SYSTEM_VARIABLE = 1193 + ER_CRASHED_ON_USAGE = 1194 + ER_CRASHED_ON_REPAIR = 1195 + ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196 + ER_TRANS_CACHE_FULL = 1197 + ER_SLAVE_MUST_STOP = 1198 + ER_SLAVE_NOT_RUNNING = 1199 + ER_BAD_SLAVE = 1200 + ER_MASTER_INFO = 1201 + ER_SLAVE_THREAD = 1202 + ER_TOO_MANY_USER_CONNECTIONS = 1203 + ER_SET_CONSTANTS_ONLY = 1204 + ER_LOCK_WAIT_TIMEOUT = 1205 + ER_LOCK_TABLE_FULL = 1206 + ER_READ_ONLY_TRANSACTION = 1207 + ER_DROP_DB_WITH_READ_LOCK = 1208 + ER_CREATE_DB_WITH_READ_LOCK = 1209 + ER_WRONG_ARGUMENTS = 1210 + ER_NO_PERMISSION_TO_CREATE_USER = 1211 + ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212 + ER_LOCK_DEADLOCK = 1213 + ER_TABLE_CANT_HANDLE_FT = 1214 + ER_CANNOT_ADD_FOREIGN = 1215 + ER_NO_REFERENCED_ROW = 1216 + ER_ROW_IS_REFERENCED = 1217 + ER_CONNECT_TO_MASTER = 1218 + ER_QUERY_ON_MASTER = 1219 + ER_ERROR_WHEN_EXECUTING_COMMAND = 1220 + ER_WRONG_USAGE = 1221 + ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 + ER_CANT_UPDATE_WITH_READLOCK = 1223 + ER_MIXING_NOT_ALLOWED = 1224 + ER_DUP_ARGUMENT = 1225 + ER_USER_LIMIT_REACHED = 1226 + ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227 + ER_LOCAL_VARIABLE = 1228 + ER_GLOBAL_VARIABLE = 1229 + ER_NO_DEFAULT = 1230 + ER_WRONG_VALUE_FOR_VAR = 1231 + ER_WRONG_TYPE_FOR_VAR = 1232 + ER_VAR_CANT_BE_READ = 1233 + ER_CANT_USE_OPTION_HERE = 1234 + ER_NOT_SUPPORTED_YET = 1235 + ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236 + ER_SLAVE_IGNORED_TABLE = 1237 + ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238 + ER_WRONG_FK_DEF = 1239 + ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 + ER_OPERAND_COLUMNS = 1241 + ER_SUBQUERY_NO_1_ROW = 1242 + ER_UNKNOWN_STMT_HANDLER = 1243 + ER_CORRUPT_HELP_DB = 1244 + ER_CYCLIC_REFERENCE = 1245 + ER_AUTO_CONVERT = 1246 + ER_ILLEGAL_REFERENCE = 1247 + ER_DERIVED_MUST_HAVE_ALIAS = 1248 + ER_SELECT_REDUCED = 1249 + ER_TABLENAME_NOT_ALLOWED_HERE = 1250 + ER_NOT_SUPPORTED_AUTH_MODE = 1251 + ER_SPATIAL_CANT_HAVE_NULL = 1252 + ER_COLLATION_CHARSET_MISMATCH = 1253 + ER_SLAVE_WAS_RUNNING = 1254 + ER_SLAVE_WAS_NOT_RUNNING = 1255 + ER_TOO_BIG_FOR_UNCOMPRESS = 1256 + ER_ZLIB_Z_MEM_ERROR = 1257 + ER_ZLIB_Z_BUF_ERROR = 1258 + ER_ZLIB_Z_DATA_ERROR = 1259 + ER_CUT_VALUE_GROUP_CONCAT = 1260 + ER_WARN_TOO_FEW_RECORDS = 1261 + ER_WARN_TOO_MANY_RECORDS = 1262 + ER_WARN_NULL_TO_NOTNULL = 1263 + ER_WARN_DATA_OUT_OF_RANGE = 1264 + WARN_DATA_TRUNCATED = 1265 + ER_WARN_USING_OTHER_HANDLER = 1266 + ER_CANT_AGGREGATE_2COLLATIONS = 1267 + ER_DROP_USER = 1268 + ER_REVOKE_GRANTS = 1269 + ER_CANT_AGGREGATE_3COLLATIONS = 1270 + ER_CANT_AGGREGATE_NCOLLATIONS = 1271 + ER_VARIABLE_IS_NOT_STRUCT = 1272 + ER_UNKNOWN_COLLATION = 1273 + ER_SLAVE_IGNORED_SSL_PARAMS = 1274 + ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275 + ER_WARN_FIELD_RESOLVED = 1276 + ER_BAD_SLAVE_UNTIL_COND = 1277 + ER_MISSING_SKIP_SLAVE = 1278 + ER_UNTIL_COND_IGNORED = 1279 + ER_WRONG_NAME_FOR_INDEX = 1280 + ER_WRONG_NAME_FOR_CATALOG = 1281 + ER_WARN_QC_RESIZE = 1282 + ER_BAD_FT_COLUMN = 1283 + ER_UNKNOWN_KEY_CACHE = 1284 + ER_WARN_HOSTNAME_WONT_WORK = 1285 + ER_UNKNOWN_STORAGE_ENGINE = 1286 + ER_WARN_DEPRECATED_SYNTAX = 1287 + ER_NON_UPDATABLE_TABLE = 1288 + ER_FEATURE_DISABLED = 1289 + ER_OPTION_PREVENTS_STATEMENT = 1290 + ER_DUPLICATED_VALUE_IN_TYPE = 1291 + ER_TRUNCATED_WRONG_VALUE = 1292 + ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 + ER_INVALID_ON_UPDATE = 1294 + ER_UNSUPPORTED_PS = 1295 + ER_GET_ERRMSG = 1296 + ER_GET_TEMPORARY_ERRMSG = 1297 + ER_UNKNOWN_TIME_ZONE = 1298 + ER_WARN_INVALID_TIMESTAMP = 1299 + ER_INVALID_CHARACTER_STRING = 1300 + ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301 + ER_CONFLICTING_DECLARATIONS = 1302 + ER_SP_NO_RECURSIVE_CREATE = 1303 + ER_SP_ALREADY_EXISTS = 1304 + ER_SP_DOES_NOT_EXIST = 1305 + ER_SP_DROP_FAILED = 1306 + ER_SP_STORE_FAILED = 1307 + ER_SP_LILABEL_MISMATCH = 1308 + ER_SP_LABEL_REDEFINE = 1309 + ER_SP_LABEL_MISMATCH = 1310 + ER_SP_UNINIT_VAR = 1311 + ER_SP_BADSELECT = 1312 + ER_SP_BADRETURN = 1313 + ER_SP_BADSTATEMENT = 1314 + ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315 + ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 + ER_QUERY_INTERRUPTED = 1317 + ER_SP_WRONG_NO_OF_ARGS = 1318 + ER_SP_COND_MISMATCH = 1319 + ER_SP_NORETURN = 1320 + ER_SP_NORETURNEND = 1321 + ER_SP_BAD_CURSOR_QUERY = 1322 + ER_SP_BAD_CURSOR_SELECT = 1323 + ER_SP_CURSOR_MISMATCH = 1324 + ER_SP_CURSOR_ALREADY_OPEN = 1325 + ER_SP_CURSOR_NOT_OPEN = 1326 + ER_SP_UNDECLARED_VAR = 1327 + ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328 + ER_SP_FETCH_NO_DATA = 1329 + ER_SP_DUP_PARAM = 1330 + ER_SP_DUP_VAR = 1331 + ER_SP_DUP_COND = 1332 + ER_SP_DUP_CURS = 1333 + ER_SP_CANT_ALTER = 1334 + ER_SP_SUBSELECT_NYI = 1335 + ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 + ER_SP_VARCOND_AFTER_CURSHNDLR = 1337 + ER_SP_CURSOR_AFTER_HANDLER = 1338 + ER_SP_CASE_NOT_FOUND = 1339 + ER_FPARSER_TOO_BIG_FILE = 1340 + ER_FPARSER_BAD_HEADER = 1341 + ER_FPARSER_EOF_IN_COMMENT = 1342 + ER_FPARSER_ERROR_IN_PARAMETER = 1343 + ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 + ER_VIEW_NO_EXPLAIN = 1345 + ER_FRM_UNKNOWN_TYPE = 1346 + ER_WRONG_OBJECT = 1347 + ER_NONUPDATEABLE_COLUMN = 1348 + ER_VIEW_SELECT_DERIVED = 1349 + ER_VIEW_SELECT_CLAUSE = 1350 + ER_VIEW_SELECT_VARIABLE = 1351 + ER_VIEW_SELECT_TMPTABLE = 1352 + ER_VIEW_WRONG_LIST = 1353 + ER_WARN_VIEW_MERGE = 1354 + ER_WARN_VIEW_WITHOUT_KEY = 1355 + ER_VIEW_INVALID = 1356 + ER_SP_NO_DROP_SP = 1357 + ER_SP_GOTO_IN_HNDLR = 1358 + ER_TRG_ALREADY_EXISTS = 1359 + ER_TRG_DOES_NOT_EXIST = 1360 + ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361 + ER_TRG_CANT_CHANGE_ROW = 1362 + ER_TRG_NO_SUCH_ROW_IN_TRG = 1363 + ER_NO_DEFAULT_FOR_FIELD = 1364 + ER_DIVISION_BY_ZERO = 1365 + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 + ER_ILLEGAL_VALUE_FOR_TYPE = 1367 + ER_VIEW_NONUPD_CHECK = 1368 + ER_VIEW_CHECK_FAILED = 1369 + ER_PROCACCESS_DENIED_ERROR = 1370 + ER_RELAY_LOG_FAIL = 1371 + ER_PASSWD_LENGTH = 1372 + ER_UNKNOWN_TARGET_BINLOG = 1373 + ER_IO_ERR_LOG_INDEX_READ = 1374 + ER_BINLOG_PURGE_PROHIBITED = 1375 + ER_FSEEK_FAIL = 1376 + ER_BINLOG_PURGE_FATAL_ERR = 1377 + ER_LOG_IN_USE = 1378 + ER_LOG_PURGE_UNKNOWN_ERR = 1379 + ER_RELAY_LOG_INIT = 1380 + ER_NO_BINARY_LOGGING = 1381 + ER_RESERVED_SYNTAX = 1382 + ER_WSAS_FAILED = 1383 + ER_DIFF_GROUPS_PROC = 1384 + ER_NO_GROUP_FOR_PROC = 1385 + ER_ORDER_WITH_PROC = 1386 + ER_LOGGING_PROHIBIT_CHANGING_OF = 1387 + ER_NO_FILE_MAPPING = 1388 + ER_WRONG_MAGIC = 1389 + ER_PS_MANY_PARAM = 1390 + ER_KEY_PART_0 = 1391 + ER_VIEW_CHECKSUM = 1392 + ER_VIEW_MULTIUPDATE = 1393 + ER_VIEW_NO_INSERT_FIELD_LIST = 1394 + ER_VIEW_DELETE_MERGE_VIEW = 1395 + ER_CANNOT_USER = 1396 + ER_XAER_NOTA = 1397 + ER_XAER_INVAL = 1398 + ER_XAER_RMFAIL = 1399 + ER_XAER_OUTSIDE = 1400 + ER_XAER_RMERR = 1401 + ER_XA_RBROLLBACK = 1402 + ER_NONEXISTING_PROC_GRANT = 1403 + ER_PROC_AUTO_GRANT_FAIL = 1404 + ER_PROC_AUTO_REVOKE_FAIL = 1405 + ER_DATA_TOO_LONG = 1406 + ER_SP_BAD_SQLSTATE = 1407 + ER_STARTUP = 1408 + ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 + ER_CANT_CREATE_USER_WITH_GRANT = 1410 + ER_WRONG_VALUE_FOR_TYPE = 1411 + ER_TABLE_DEF_CHANGED = 1412 + ER_SP_DUP_HANDLER = 1413 + ER_SP_NOT_VAR_ARG = 1414 + ER_SP_NO_RETSET = 1415 + ER_CANT_CREATE_GEOMETRY_OBJECT = 1416 + ER_FAILED_ROUTINE_BREAK_BINLOG = 1417 + ER_BINLOG_UNSAFE_ROUTINE = 1418 + ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 + ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420 + ER_STMT_HAS_NO_OPEN_CURSOR = 1421 + ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 + ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423 + ER_SP_NO_RECURSION = 1424 + ER_TOO_BIG_SCALE = 1425 + ER_TOO_BIG_PRECISION = 1426 + ER_M_BIGGER_THAN_D = 1427 + ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428 + ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 + ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430 + ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 + ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 + ER_FOREIGN_DATA_STRING_INVALID = 1433 + ER_CANT_CREATE_FEDERATED_TABLE = 1434 + ER_TRG_IN_WRONG_SCHEMA = 1435 + ER_STACK_OVERRUN_NEED_MORE = 1436 + ER_TOO_LONG_BODY = 1437 + ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 + ER_TOO_BIG_DISPLAYWIDTH = 1439 + ER_XAER_DUPID = 1440 + ER_DATETIME_FUNCTION_OVERFLOW = 1441 + ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 + ER_VIEW_PREVENT_UPDATE = 1443 + ER_PS_NO_RECURSION = 1444 + ER_SP_CANT_SET_AUTOCOMMIT = 1445 + ER_MALFORMED_DEFINER = 1446 + ER_VIEW_FRM_NO_USER = 1447 + ER_VIEW_OTHER_USER = 1448 + ER_NO_SUCH_USER = 1449 + ER_FORBID_SCHEMA_CHANGE = 1450 + ER_ROW_IS_REFERENCED_2 = 1451 + ER_NO_REFERENCED_ROW_2 = 1452 + ER_SP_BAD_VAR_SHADOW = 1453 + ER_TRG_NO_DEFINER = 1454 + ER_OLD_FILE_FORMAT = 1455 + ER_SP_RECURSION_LIMIT = 1456 + ER_SP_PROC_TABLE_CORRUPT = 1457 + ER_SP_WRONG_NAME = 1458 + ER_TABLE_NEEDS_UPGRADE = 1459 + ER_SP_NO_AGGREGATE = 1460 + ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461 + ER_VIEW_RECURSIVE = 1462 + ER_NON_GROUPING_FIELD_USED = 1463 + ER_TABLE_CANT_HANDLE_SPKEYS = 1464 + ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 + ER_REMOVED_SPACES = 1466 + ER_AUTOINC_READ_FAILED = 1467 + ER_USERNAME = 1468 + ER_HOSTNAME = 1469 + ER_WRONG_STRING_LENGTH = 1470 + ER_NON_INSERTABLE_TABLE = 1471 +) + +type ClientError string + +func (e ClientError) Error() string { + return string(e) +} + +var ( + ErrSeq = ClientError("packet sequence error") + ErrPkt = ClientError("malformed packet") + ErrPktLong = ClientError("packet too long") + ErrUnexpNullLCS = ClientError("unexpected NULL LCS") + ErrUnexpNullLCB = ClientError("unexpected NULL LCB") + ErrUnexpNullDate = ClientError("unexpected NULL DATETIME") + ErrUnexpNullTime = ClientError("unexpected NULL TIME") + ErrUnkResultPkt = ClientError("unexpected or unknown result packet") + ErrNotConn = ClientError("not connected") + ErrAlredyConn = ClientError("not connected") + ErrBadResult = ClientError("unexpected result") + ErrUnreadedReply = ClientError("reply is not completely read") + ErrBindCount = ClientError("wrong number of values for bind") + ErrBindUnkType = ClientError("unknown value type for bind") + ErrRowLength = ClientError("wrong length of row slice") + ErrBadCommand = ClientError("comand isn't text SQL nor *Stmt") + ErrWrongDateLen = ClientError("wrong datetime/timestamp length") + ErrWrongTimeLen = ClientError("wrong time length") + ErrUnkMySQLType = ClientError("unknown MySQL type") + ErrWrongParamNum = ClientError("wrong parameter number") + ErrUnkDataType = ClientError("unknown data source type") + ErrSmallPktSize = ClientError("specified packet size is to small") + ErrReadAfterEOR = ClientError("previous ScanRow call returned io.EOF") + ErrOldProtocol = ClientError("server does not support 4.1 protocol") + ErrAuthentication = ClientError("authentication error") +) diff --git a/vendor/github.com/ziutek/mymysql/mysql/field.go b/vendor/github.com/ziutek/mymysql/mysql/field.go new file mode 100644 index 000000000..7ecf1b7cf --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/field.go @@ -0,0 +1,15 @@ +package mysql + +type Field struct { + Catalog string + Db string + Table string + OrgTable string + Name string + OrgName string + DispLen uint32 + // Charset uint16 + Flags uint16 + Type byte + Scale byte +} diff --git a/vendor/github.com/ziutek/mymysql/mysql/interface.go b/vendor/github.com/ziutek/mymysql/mysql/interface.go new file mode 100644 index 000000000..c092c3825 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/interface.go @@ -0,0 +1,106 @@ +// MySQL Client API written entirely in Go without any external dependences. +package mysql + +import ( + "net" + "time" +) + +// ConCommon is a common interface to the connection. +// See mymysql/native for method documentation +type ConnCommon interface { + Start(sql string, params ...interface{}) (Result, error) + Prepare(sql string) (Stmt, error) + + Ping() error + ThreadId() uint32 + Escape(txt string) string + + Query(sql string, params ...interface{}) ([]Row, Result, error) + QueryFirst(sql string, params ...interface{}) (Row, Result, error) + QueryLast(sql string, params ...interface{}) (Row, Result, error) +} + +// Dialer can be used to dial connections to MySQL. If Dialer returns (nil, nil) +// the hook is skipped and normal dialing proceeds. +type Dialer func(proto, laddr, raddr string, timeout time.Duration) (net.Conn, error) + +// Conn represnts connection to the MySQL server. +// See mymysql/native for method documentation +type Conn interface { + ConnCommon + + Clone() Conn + SetTimeout(time.Duration) + Connect() error + NetConn() net.Conn + SetDialer(Dialer) + Close() error + IsConnected() bool + Reconnect() error + Use(dbname string) error + Register(sql string) + SetMaxPktSize(new_size int) int + NarrowTypeSet(narrow bool) + FullFieldInfo(full bool) + Status() ConnStatus + + Begin() (Transaction, error) +} + +// Transaction represents MySQL transaction +// See mymysql/native for method documentation +type Transaction interface { + ConnCommon + + Commit() error + Rollback() error + Do(st Stmt) Stmt + IsValid() bool +} + +// Stmt represents MySQL prepared statement. +// See mymysql/native for method documentation +type Stmt interface { + Bind(params ...interface{}) + Run(params ...interface{}) (Result, error) + Delete() error + Reset() error + SendLongData(pnum int, data interface{}, pkt_size int) error + + Fields() []*Field + NumParam() int + WarnCount() int + + Exec(params ...interface{}) ([]Row, Result, error) + ExecFirst(params ...interface{}) (Row, Result, error) + ExecLast(params ...interface{}) (Row, Result, error) +} + +// Result represents one MySQL result set. +// See mymysql/native for method documentation +type Result interface { + StatusOnly() bool + ScanRow(Row) error + GetRow() (Row, error) + + MoreResults() bool + NextResult() (Result, error) + + Fields() []*Field + Map(string) int + Message() string + AffectedRows() uint64 + InsertId() uint64 + WarnCount() int + + MakeRow() Row + GetRows() ([]Row, error) + End() error + GetFirstRow() (Row, error) + GetLastRow() (Row, error) +} + +// New can be used to establish a connection. It is set by imported engine +// (see mymysql/native, mymysql/thrsafe) +var New func(proto, laddr, raddr, user, passwd string, db ...string) Conn diff --git a/vendor/github.com/ziutek/mymysql/mysql/row.go b/vendor/github.com/ziutek/mymysql/mysql/row.go new file mode 100644 index 000000000..25c1651d9 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/row.go @@ -0,0 +1,475 @@ +package mysql + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + "reflect" + "strconv" + "time" +) + +// Result row - contains values for any column of received row. +// +// If row is a result of ordinary text query, its element can be +// []byte slice, contained result text or nil if NULL is returned. +// +// If it is result of prepared statement execution, its element field can be: +// intX, uintX, floatX, []byte, Date, Time, time.Time (in Local location) or nil +type Row []interface{} + +// Get the nn-th value and return it as []byte ([]byte{} if NULL) +func (tr Row) Bin(nn int) (bin []byte) { + switch data := tr[nn].(type) { + case nil: + // bin = []byte{} + case []byte: + bin = data + default: + buf := new(bytes.Buffer) + fmt.Fprint(buf, data) + bin = buf.Bytes() + } + return +} + +// Get the nn-th value and return it as string ("" if NULL) +func (tr Row) Str(nn int) (str string) { + switch data := tr[nn].(type) { + case nil: + // str = "" + case []byte: + str = string(data) + case time.Time: + str = TimeString(data) + case time.Duration: + str = DurationString(data) + default: + str = fmt.Sprint(data) + } + return +} + +const _MAX_INT = int64(int(^uint(0) >> 1)) +const _MIN_INT = -_MAX_INT - 1 + +// Get the nn-th value and return it as int (0 if NULL). Return error if +// conversion is impossible. +func (tr Row) IntErr(nn int) (val int, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case int32: + val = int(data) + case int16: + val = int(data) + case uint16: + val = int(data) + case int8: + val = int(data) + case uint8: + val = int(data) + case []byte: + val, err = strconv.Atoi(string(data)) + case int64: + if data >= _MIN_INT && data <= _MAX_INT { + val = int(data) + } else { + err = strconv.ErrRange + } + case uint32: + if int64(data) <= _MAX_INT { + val = int(data) + } else { + err = strconv.ErrRange + } + case uint64: + if data <= uint64(_MAX_INT) { + val = int(data) + } else { + err = strconv.ErrRange + } + default: + err = os.ErrInvalid + } + return +} + +// Get the nn-th value and return it as int (0 if NULL). Panic if conversion is +// impossible. +func (tr Row) Int(nn int) (val int) { + val, err := tr.IntErr(nn) + if err != nil { + panic(err) + } + return +} + +// Get the nn-th value and return it as int. Return 0 if value is NULL or +// conversion is impossible. +func (tr Row) ForceInt(nn int) (val int) { + val, _ = tr.IntErr(nn) + return +} + +const _MAX_UINT = uint64(^uint(0)) + +// Get the nn-th value and return it as uint (0 if NULL). Return error if +// conversion is impossible. +func (tr Row) UintErr(nn int) (val uint, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case uint32: + val = uint(data) + case uint16: + val = uint(data) + case uint8: + val = uint(data) + case []byte: + var v uint64 + v, err = strconv.ParseUint(string(data), 0, 0) + val = uint(v) + case uint64: + if data <= _MAX_UINT { + val = uint(data) + } else { + err = strconv.ErrRange + } + case int8, int16, int32, int64: + v := reflect.ValueOf(data).Int() + if v >= 0 && uint64(v) <= _MAX_UINT { + val = uint(v) + } else { + err = strconv.ErrRange + } + default: + err = os.ErrInvalid + } + return +} + +// Get the nn-th value and return it as uint (0 if NULL). Panic if conversion is +// impossible. +func (tr Row) Uint(nn int) (val uint) { + val, err := tr.UintErr(nn) + if err != nil { + panic(err) + } + return +} + +// Get the nn-th value and return it as uint. Return 0 if value is NULL or +// conversion is impossible. +func (tr Row) ForceUint(nn int) (val uint) { + val, _ = tr.UintErr(nn) + return +} + +// Get the nn-th value and return it as Date (0000-00-00 if NULL). Return error +// if conversion is impossible. +func (tr Row) DateErr(nn int) (val Date, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case Date: + val = data + case []byte: + val, err = ParseDate(string(data)) + } + return +} + +// It is like DateErr but panics if conversion is impossible. +func (tr Row) Date(nn int) (val Date) { + val, err := tr.DateErr(nn) + if err != nil { + panic(err) + } + return +} + +// It is like DateErr but return 0000-00-00 if conversion is impossible. +func (tr Row) ForceDate(nn int) (val Date) { + val, _ = tr.DateErr(nn) + return +} + +// Get the nn-th value and return it as time.Time in loc location (zero if NULL) +// Returns error if conversion is impossible. It can convert Date to time.Time. +func (tr Row) TimeErr(nn int, loc *time.Location) (t time.Time, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case time.Time: + if loc == time.Local { + t = data + } else { + y, mon, d := data.Date() + h, m, s := data.Clock() + t = time.Date(y, mon, d, h, m, s, t.Nanosecond(), loc) + } + case Date: + t = data.Time(loc) + case []byte: + t, err = ParseTime(string(data), loc) + } + return +} + +// As TimeErr but panics if conversion is impossible. +func (tr Row) Time(nn int, loc *time.Location) (val time.Time) { + val, err := tr.TimeErr(nn, loc) + if err != nil { + panic(err) + } + return +} + +// It is like TimeErr but returns 0000-00-00 00:00:00 if conversion is +// impossible. +func (tr Row) ForceTime(nn int, loc *time.Location) (val time.Time) { + val, _ = tr.TimeErr(nn, loc) + return +} + +// Get the nn-th value and return it as time.Time in Local location +// (zero if NULL). Returns error if conversion is impossible. +// It can convert Date to time.Time. +func (tr Row) LocaltimeErr(nn int) (t time.Time, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case time.Time: + t = data + case Date: + t = data.Time(time.Local) + case []byte: + t, err = ParseTime(string(data), time.Local) + } + return +} + +// As LocaltimeErr but panics if conversion is impossible. +func (tr Row) Localtime(nn int) (val time.Time) { + val, err := tr.LocaltimeErr(nn) + if err != nil { + panic(err) + } + return +} + +// It is like LocaltimeErr but returns 0000-00-00 00:00:00 if conversion is +// impossible. +func (tr Row) ForceLocaltime(nn int) (val time.Time) { + val, _ = tr.LocaltimeErr(nn) + return +} + +// Get the nn-th value and return it as time.Duration (0 if NULL). Return error +// if conversion is impossible. +func (tr Row) DurationErr(nn int) (val time.Duration, err error) { + switch data := tr[nn].(type) { + case nil: + case time.Duration: + val = data + case []byte: + val, err = ParseDuration(string(data)) + default: + err = errors.New( + fmt.Sprintf("Can't convert `%v` to time.Duration", data), + ) + } + return +} + +// It is like DurationErr but panics if conversion is impossible. +func (tr Row) Duration(nn int) (val time.Duration) { + val, err := tr.DurationErr(nn) + if err != nil { + panic(err) + } + return +} + +// It is like DurationErr but return 0 if conversion is impossible. +func (tr Row) ForceDuration(nn int) (val time.Duration) { + val, _ = tr.DurationErr(nn) + return +} + +// Get the nn-th value and return it as bool. Return error +// if conversion is impossible. +func (tr Row) BoolErr(nn int) (val bool, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case int8: + val = (data != 0) + case int32: + val = (data != 0) + case int16: + val = (data != 0) + case int64: + val = (data != 0) + case uint8: + val = (data != 0) + case uint32: + val = (data != 0) + case uint16: + val = (data != 0) + case uint64: + val = (data != 0) + case []byte: + var v int64 + v, err = strconv.ParseInt(string(data), 0, 64) + val = (v != 0) + default: + err = os.ErrInvalid + } + return +} + +// It is like BoolErr but panics if conversion is impossible. +func (tr Row) Bool(nn int) (val bool) { + val, err := tr.BoolErr(nn) + if err != nil { + panic(err) + } + return +} + +// It is like BoolErr but return false if conversion is impossible. +func (tr Row) ForceBool(nn int) (val bool) { + val, _ = tr.BoolErr(nn) + return +} + +// Get the nn-th value and return it as int64 (0 if NULL). Return error if +// conversion is impossible. +func (tr Row) Int64Err(nn int) (val int64, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case int64, int32, int16, int8: + val = reflect.ValueOf(data).Int() + case uint64, uint32, uint16, uint8: + u := reflect.ValueOf(data).Uint() + if u > math.MaxInt64 { + err = strconv.ErrRange + } else { + val = int64(u) + } + case []byte: + val, err = strconv.ParseInt(string(data), 10, 64) + default: + err = os.ErrInvalid + } + return +} + +// Get the nn-th value and return it as int64 (0 if NULL). +// Panic if conversion is impossible. +func (tr Row) Int64(nn int) (val int64) { + val, err := tr.Int64Err(nn) + if err != nil { + panic(err) + } + return +} + +// Get the nn-th value and return it as int64. Return 0 if value is NULL or +// conversion is impossible. +func (tr Row) ForceInt64(nn int) (val int64) { + val, _ = tr.Int64Err(nn) + return +} + +// Get the nn-th value and return it as uint64 (0 if NULL). Return error if +// conversion is impossible. +func (tr Row) Uint64Err(nn int) (val uint64, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case uint64, uint32, uint16, uint8: + val = reflect.ValueOf(data).Uint() + case int64, int32, int16, int8: + i := reflect.ValueOf(data).Int() + if i < 0 { + err = strconv.ErrRange + } else { + val = uint64(i) + } + case []byte: + val, err = strconv.ParseUint(string(data), 10, 64) + default: + err = os.ErrInvalid + } + return +} + +// Get the nn-th value and return it as uint64 (0 if NULL). +// Panic if conversion is impossible. +func (tr Row) Uint64(nn int) (val uint64) { + val, err := tr.Uint64Err(nn) + if err != nil { + panic(err) + } + return +} + +// Get the nn-th value and return it as uint64. Return 0 if value is NULL or +// conversion is impossible. +func (tr Row) ForceUint64(nn int) (val uint64) { + val, _ = tr.Uint64Err(nn) + return +} + +// Get the nn-th value and return it as float64 (0 if NULL). Return error if +// conversion is impossible. +func (tr Row) FloatErr(nn int) (val float64, err error) { + switch data := tr[nn].(type) { + case nil: + // nop + case float64, float32: + val = reflect.ValueOf(data).Float() + case int64, int32, int16, int8: + i := reflect.ValueOf(data).Int() + if i >= 2<<53 || i <= -(2<<53) { + err = strconv.ErrRange + } else { + val = float64(i) + } + case uint64, uint32, uint16, uint8: + u := reflect.ValueOf(data).Uint() + if u >= 2<<53 { + err = strconv.ErrRange + } else { + val = float64(u) + } + case []byte: + val, err = strconv.ParseFloat(string(data), 64) + default: + err = os.ErrInvalid + } + return +} + +// Get the nn-th value and return it as float64 (0 if NULL). +// Panic if conversion is impossible. +func (tr Row) Float(nn int) (val float64) { + val, err := tr.FloatErr(nn) + if err != nil { + panic(err) + } + return +} + +// Get the nn-th value and return it as float64. Return 0 if value is NULL or +// if conversion is impossible. +func (tr Row) ForceFloat(nn int) (val float64) { + val, _ = tr.FloatErr(nn) + return +} diff --git a/vendor/github.com/ziutek/mymysql/mysql/status.go b/vendor/github.com/ziutek/mymysql/mysql/status.go new file mode 100644 index 000000000..35cabab13 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/status.go @@ -0,0 +1,18 @@ +package mysql + +type ConnStatus uint16 + +// Status of server connection +const ( + SERVER_STATUS_IN_TRANS ConnStatus = 0x01 // Transaction has started + SERVER_STATUS_AUTOCOMMIT ConnStatus = 0x02 // Server in auto_commit mode + SERVER_STATUS_MORE_RESULTS ConnStatus = 0x04 + SERVER_MORE_RESULTS_EXISTS ConnStatus = 0x08 // Multi query - next query exists + SERVER_QUERY_NO_GOOD_INDEX_USED ConnStatus = 0x10 + SERVER_QUERY_NO_INDEX_USED ConnStatus = 0x20 + SERVER_STATUS_CURSOR_EXISTS ConnStatus = 0x40 // Server opened a read-only non-scrollable cursor for a query + SERVER_STATUS_LAST_ROW_SENT ConnStatus = 0x80 + + SERVER_STATUS_DB_DROPPED ConnStatus = 0x100 + SERVER_STATUS_NO_BACKSLASH_ESCAPES ConnStatus = 0x200 +) diff --git a/vendor/github.com/ziutek/mymysql/mysql/types.go b/vendor/github.com/ziutek/mymysql/mysql/types.go new file mode 100644 index 000000000..bbac379f7 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/types.go @@ -0,0 +1,225 @@ +package mysql + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// For MySQL DATE type +type Date struct { + Year int16 + Month, Day byte +} + +func (dd Date) String() string { + return fmt.Sprintf("%04d-%02d-%02d", dd.Year, dd.Month, dd.Day) +} + +// True if date is 0000-00-00 +func (dd Date) IsZero() bool { + return dd.Day == 0 && dd.Month == 0 && dd.Year == 0 +} + +// Converts Date to time.Time using loc location. +// Converts MySQL zero to time.Time zero. +func (dd Date) Time(loc *time.Location) (t time.Time) { + if !dd.IsZero() { + t = time.Date( + int(dd.Year), time.Month(dd.Month), int(dd.Day), + 0, 0, 0, 0, + loc, + ) + } + return +} + +// Converts Date to time.Time using Local location. +// Converts MySQL zero to time.Time zero. +func (dd Date) Localtime() time.Time { + return dd.Time(time.Local) +} + +// Convert string date in format YYYY-MM-DD to Date. +// Leading and trailing spaces are ignored. +func ParseDate(str string) (dd Date, err error) { + str = strings.TrimSpace(str) + if str == "0000-00-00" { + return + } + var ( + y, m, d int + ) + if len(str) != 10 || str[4] != '-' || str[7] != '-' { + goto invalid + } + if y, err = strconv.Atoi(str[0:4]); err != nil { + return + } + if m, err = strconv.Atoi(str[5:7]); err != nil { + return + } + if m < 0 || m > 12 { // MySQL permits month == 0 + goto invalid + } + if d, err = strconv.Atoi(str[8:10]); err != nil { + return + } + if d < 0 { // MySQL permits day == 0 + goto invalid + } + switch m { + case 1, 3, 5, 7, 8, 10, 12: + if d > 31 { + goto invalid + } + case 4, 6, 9, 11: + if d > 30 { + goto invalid + } + case 2: + if d > 29 { + goto invalid + } + } + dd.Year = int16(y) + dd.Month = byte(m) + dd.Day = byte(d) + return + +invalid: + err = errors.New("Invalid MySQL DATE string: " + str) + return +} + +// Sandard MySQL datetime format +const TimeFormat = "2006-01-02 15:04:05.000000000" + +// Returns t as string in MySQL format Converts time.Time zero to MySQL zero. +func TimeString(t time.Time) string { + if t.IsZero() { + return "0000-00-00 00:00:00" + } + if t.Nanosecond() == 0 { + return t.Format(TimeFormat[:19]) + } + return t.Format(TimeFormat) +} + +// Parses string datetime in TimeFormat using loc location. +// Converts MySQL zero to time.Time zero. +func ParseTime(str string, loc *time.Location) (t time.Time, err error) { + str = strings.TrimSpace(str) + format := TimeFormat[:19] + switch len(str) { + case 10: + if str == "0000-00-00" { + return + } + format = format[:10] + case 19: + if str == "0000-00-00 00:00:00" { + return + } + } + // Don't expect 0000-00-00 00:00:00.0+ + t, err = time.ParseInLocation(format, str, loc) + return +} + +// Convert time.Duration to string representation of mysql.TIME +func DurationString(d time.Duration) string { + sign := 1 + if d < 0 { + sign = -1 + d = -d + } + ns := int(d % 1e9) + d /= 1e9 + sec := int(d % 60) + d /= 60 + min := int(d % 60) + hour := int(d/60) * sign + if ns == 0 { + return fmt.Sprintf("%d:%02d:%02d", hour, min, sec) + } + return fmt.Sprintf("%d:%02d:%02d.%09d", hour, min, sec, ns) +} + +// Parse duration from MySQL string format [+-]H+:MM:SS[.UUUUUUUUU]. +// Leading and trailing spaces are ignored. If format is invalid returns nil. +func ParseDuration(str string) (dur time.Duration, err error) { + str = strings.TrimSpace(str) + orig := str + // Check sign + sign := int64(1) + switch str[0] { + case '-': + sign = -1 + fallthrough + case '+': + str = str[1:] + } + var i, d int64 + // Find houre + if nn := strings.IndexRune(str, ':'); nn != -1 { + if i, err = strconv.ParseInt(str[0:nn], 10, 64); err != nil { + return + } + d = i * 3600 + str = str[nn+1:] + } else { + goto invalid + } + if len(str) != 5 && len(str) != 15 || str[2] != ':' { + goto invalid + } + if i, err = strconv.ParseInt(str[0:2], 10, 64); err != nil { + return + } + if i < 0 || i > 59 { + goto invalid + } + d += i * 60 + if i, err = strconv.ParseInt(str[3:5], 10, 64); err != nil { + return + } + if i < 0 || i > 59 { + goto invalid + } + d += i + d *= 1e9 + if len(str) == 15 { + if str[5] != '.' { + goto invalid + } + if i, err = strconv.ParseInt(str[6:15], 10, 64); err != nil { + return + } + d += i + } + dur = time.Duration(d * sign) + return + +invalid: + err = errors.New("invalid MySQL TIME string: " + orig) + return + +} + +type Blob []byte + +type Raw struct { + Typ uint16 + Val *[]byte +} + +type Timestamp struct { + time.Time +} + +func (t Timestamp) String() string { + return TimeString(t.Time) +} diff --git a/vendor/github.com/ziutek/mymysql/mysql/types_test.go b/vendor/github.com/ziutek/mymysql/mysql/types_test.go new file mode 100644 index 000000000..63d479eea --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/types_test.go @@ -0,0 +1,100 @@ +package mysql + +import ( + "testing" + "time" +) + +type sio struct { + in, out string +} + +func checkRow(t *testing.T, examples []sio, conv func(string) interface{}) { + row := make(Row, 1) + for _, ex := range examples { + row[0] = conv(ex.in) + str := row.Str(0) + if str != ex.out { + t.Fatalf("Wrong conversion: '%s' != '%s'", str, ex.out) + } + } +} + +var dates = []sio{ + sio{"2121-11-22", "2121-11-22"}, + sio{"0000-00-00", "0000-00-00"}, + sio{" 1234-12-18 ", "1234-12-18"}, + sio{"\t1234-12-18 \r\n", "1234-12-18"}, +} + +func TestConvDate(t *testing.T) { + conv := func(str string) interface{} { + d, err := ParseDate(str) + if err != nil { + return err + } + return d + } + checkRow(t, dates, conv) +} + +var datetimes = []sio{ + sio{"2121-11-22 11:22:32", "2121-11-22 11:22:32"}, + sio{" 1234-12-18 22:11:22 ", "1234-12-18 22:11:22"}, + sio{"\t 1234-12-18 22:11:22 \r\n", "1234-12-18 22:11:22"}, + sio{"2000-11-11", "2000-11-11 00:00:00"}, + sio{"0000-00-00 00:00:00", "0000-00-00 00:00:00"}, + sio{"0000-00-00", "0000-00-00 00:00:00"}, + sio{"2000-11-22 11:11:11.000111222", "2000-11-22 11:11:11.000111222"}, +} + +func TestConvTime(t *testing.T) { + conv := func(str string) interface{} { + d, err := ParseTime(str, time.Local) + if err != nil { + return err + } + return d + } + checkRow(t, datetimes, conv) +} + +var times = []sio{ + sio{"1:23:45", "1:23:45"}, + sio{"-112:23:45", "-112:23:45"}, + sio{"+112:23:45", "112:23:45"}, + sio{"1:60:00", "invalid MySQL TIME string: 1:60:00"}, + sio{"1:00:60", "invalid MySQL TIME string: 1:00:60"}, + sio{"1:23:45.000111333", "1:23:45.000111333"}, + sio{"-1:23:45.000111333", "-1:23:45.000111333"}, +} + +func TestConvDuration(t *testing.T) { + conv := func(str string) interface{} { + d, err := ParseDuration(str) + if err != nil { + return err + } + return d + + } + checkRow(t, times, conv) +} + +func TestEscapeString(t *testing.T) { + txt := " \000 \n \r \\ ' \" \032 " + exp := ` \0 \n \r \\ \' \" \Z ` + out := escapeString(txt) + if out != exp { + t.Fatalf("escapeString: ret='%s' exp='%s'", out, exp) + } +} + +func TestEscapeQuotes(t *testing.T) { + txt := " '' '' ' ' ' " + exp := ` '''' '''' '' '' '' ` + out := escapeQuotes(txt) + if out != exp { + t.Fatalf("escapeString: ret='%s' exp='%s'", out, exp) + } +} diff --git a/vendor/github.com/ziutek/mymysql/mysql/utils.go b/vendor/github.com/ziutek/mymysql/mysql/utils.go new file mode 100644 index 000000000..11b369e66 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/mysql/utils.go @@ -0,0 +1,302 @@ +package mysql + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "time" + "unicode" +) + +// Version returns mymysql version string +func Version() string { + return "1.5.3" +} + +func syntaxError(ln int) error { + return fmt.Errorf("syntax error at line: %d", ln) +} + +// Creates new conneection handler using configuration in cfgFile. Returns +// connection handler and map contains unknown options. +// +// Config file format(example): +// +// # mymysql options (if some option isn't specified it defaults to "") +// +// DbRaddr 127.0.0.1:3306 +// # DbRaddr /var/run/mysqld/mysqld.sock +// DbUser testuser +// DbPass TestPasswd9 +// # optional: DbName test +// # optional: DbEncd utf8 +// # optional: DbLaddr 127.0.0.1:0 +// # optional: DbTimeout 15s +// +// # Your options (returned in unk) +// +// MyOpt some text +func NewFromCF(cfgFile string) (con Conn, unk map[string]string, err error) { + var cf *os.File + cf, err = os.Open(cfgFile) + if err != nil { + return + } + br := bufio.NewReader(cf) + um := make(map[string]string) + var proto, laddr, raddr, user, pass, name, encd, to string + for i := 1; ; i++ { + buf, isPrefix, e := br.ReadLine() + if e != nil { + if e == io.EOF { + break + } + err = e + return + } + l := string(buf) + if isPrefix { + err = fmt.Errorf("line %d is too long", i) + return + } + l = strings.TrimFunc(l, unicode.IsSpace) + if len(l) == 0 || l[0] == '#' { + continue + } + n := strings.IndexFunc(l, unicode.IsSpace) + if n == -1 { + err = fmt.Errorf("syntax error at line: %d", i) + return + } + v := l[:n] + l = strings.TrimLeftFunc(l[n:], unicode.IsSpace) + switch v { + case "DbLaddr": + laddr = l + case "DbRaddr": + raddr = l + proto = "tcp" + if strings.IndexRune(l, ':') == -1 { + proto = "unix" + } + case "DbUser": + user = l + case "DbPass": + pass = l + case "DbName": + name = l + case "DbEncd": + encd = l + case "DbTimeout": + to = l + default: + um[v] = l + } + } + if raddr == "" { + err = errors.New("DbRaddr option is empty") + return + } + unk = um + if name != "" { + con = New(proto, laddr, raddr, user, pass, name) + } else { + con = New(proto, laddr, raddr, user, pass) + } + if encd != "" { + con.Register(fmt.Sprintf("SET NAMES %s", encd)) + } + if to != "" { + var timeout time.Duration + timeout, err = time.ParseDuration(to) + if err != nil { + return + } + con.SetTimeout(timeout) + } + return +} + +// Calls Start and next calls GetRow as long as it reads all rows from the +// result. Next it returns all readed rows as the slice of rows. +func Query(c Conn, sql string, params ...interface{}) (rows []Row, res Result, err error) { + res, err = c.Start(sql, params...) + if err != nil { + return + } + rows, err = GetRows(res) + return +} + +// Calls Start and next calls GetFirstRow +func QueryFirst(c Conn, sql string, params ...interface{}) (row Row, res Result, err error) { + res, err = c.Start(sql, params...) + if err != nil { + return + } + row, err = GetFirstRow(res) + return +} + +// Calls Start and next calls GetLastRow +func QueryLast(c Conn, sql string, params ...interface{}) (row Row, res Result, err error) { + res, err = c.Start(sql, params...) + if err != nil { + return + } + row, err = GetLastRow(res) + return +} + +// Calls Run and next call GetRow as long as it reads all rows from the +// result. Next it returns all readed rows as the slice of rows. +func Exec(s Stmt, params ...interface{}) (rows []Row, res Result, err error) { + res, err = s.Run(params...) + if err != nil { + return + } + rows, err = GetRows(res) + return +} + +// Calls Run and next call GetFirstRow +func ExecFirst(s Stmt, params ...interface{}) (row Row, res Result, err error) { + res, err = s.Run(params...) + if err != nil { + return + } + row, err = GetFirstRow(res) + return +} + +// Calls Run and next call GetLastRow +func ExecLast(s Stmt, params ...interface{}) (row Row, res Result, err error) { + res, err = s.Run(params...) + if err != nil { + return + } + row, err = GetLastRow(res) + return +} + +// Calls r.MakeRow and next r.ScanRow. Doesn't return io.EOF error (returns nil +// row insted). +func GetRow(r Result) (Row, error) { + row := r.MakeRow() + err := r.ScanRow(row) + if err != nil { + if err == io.EOF { + return nil, nil + } + return nil, err + } + return row, nil +} + +// Reads all rows from result and returns them as slice. +func GetRows(r Result) (rows []Row, err error) { + var row Row + for { + row, err = r.GetRow() + if err != nil || row == nil { + break + } + rows = append(rows, row) + } + return +} + +// Returns last row and discard others +func GetLastRow(r Result) (Row, error) { + row := r.MakeRow() + err := r.ScanRow(row) + if err == io.EOF { + return nil, nil + } + for err == nil { + err = r.ScanRow(row) + } + if err == io.EOF { + return row, nil + } + return nil, err +} + +// Read all unreaded rows and discard them. This function is useful if you +// don't want to use the remaining rows. It has an impact only on current +// result. If there is multi result query, you must use NextResult method and +// read/discard all rows in this result, before use other method that sends +// data to the server. You can't use this function if last GetRow returned nil. +func End(r Result) error { + _, err := GetLastRow(r) + return err +} + +// Returns first row and discard others +func GetFirstRow(r Result) (row Row, err error) { + row, err = r.GetRow() + if err == nil && row != nil { + err = r.End() + } + return +} + +func escapeString(txt string) string { + var ( + esc string + buf bytes.Buffer + ) + last := 0 + for ii, bb := range txt { + switch bb { + case 0: + esc = `\0` + case '\n': + esc = `\n` + case '\r': + esc = `\r` + case '\\': + esc = `\\` + case '\'': + esc = `\'` + case '"': + esc = `\"` + case '\032': + esc = `\Z` + default: + continue + } + io.WriteString(&buf, txt[last:ii]) + io.WriteString(&buf, esc) + last = ii + 1 + } + io.WriteString(&buf, txt[last:]) + return buf.String() +} + +func escapeQuotes(txt string) string { + var buf bytes.Buffer + last := 0 + for ii, bb := range txt { + if bb == '\'' { + io.WriteString(&buf, txt[last:ii]) + io.WriteString(&buf, `''`) + last = ii + 1 + } + } + io.WriteString(&buf, txt[last:]) + return buf.String() +} + +// Escapes special characters in the txt, so it is safe to place returned string +// to Query method. +func Escape(c Conn, txt string) string { + if c.Status()&SERVER_STATUS_NO_BACKSLASH_ESCAPES != 0 { + return escapeQuotes(txt) + } + return escapeString(txt) +} diff --git a/vendor/github.com/ziutek/mymysql/native/LICENSE b/vendor/github.com/ziutek/mymysql/native/LICENSE new file mode 100644 index 000000000..8eab9a6ec --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2010, Michal Derkacz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ziutek/mymysql/native/addons.go b/vendor/github.com/ziutek/mymysql/native/addons.go new file mode 100644 index 000000000..82ebe9028 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/addons.go @@ -0,0 +1,17 @@ +package native + +func NbinToNstr(nbin *[]byte) *string { + if nbin == nil { + return nil + } + str := string(*nbin) + return &str +} + +func NstrToNbin(nstr *string) *[]byte { + if nstr == nil { + return nil + } + bin := []byte(*nstr) + return &bin +} diff --git a/vendor/github.com/ziutek/mymysql/native/bind_test.go b/vendor/github.com/ziutek/mymysql/native/bind_test.go new file mode 100644 index 000000000..218cd1ee3 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/bind_test.go @@ -0,0 +1,386 @@ +package native + +import ( + "bufio" + "bytes" + "github.com/ziutek/mymysql/mysql" + "math" + "reflect" + "strconv" + "testing" + "time" +) + +var ( + Bytes = []byte("Ala ma Kota!") + String = "ssss" //"A kot ma AlÄ™!" + blob = mysql.Blob{1, 2, 3} + dateT = time.Date(2010, 12, 30, 17, 21, 01, 0, time.Local) + tstamp = mysql.Timestamp{dateT.Add(1e9)} + date = mysql.Date{Year: 2011, Month: 2, Day: 3} + tim = -time.Duration((5*24*3600+4*3600+3*60+2)*1e9 + 1) + bol = true + + pBytes *[]byte + pString *string + pBlob *mysql.Blob + pDateT *time.Time + pTstamp *mysql.Timestamp + pDate *mysql.Date + pTim *time.Duration + pBol *bool + + raw = mysql.Raw{MYSQL_TYPE_INT24, &[]byte{3, 2, 1, 0}} + + Int8 = int8(1) + Uint8 = uint8(2) + Int16 = int16(3) + Uint16 = uint16(4) + Int32 = int32(5) + Uint32 = uint32(6) + Int64 = int64(0x7000100020003001) + Uint64 = uint64(0xffff0000ffff0000) + Int = int(7) + Uint = uint(8) + + Float32 = float32(1e10) + Float64 = 256e256 + + pInt8 *int8 + pUint8 *uint8 + pInt16 *int16 + pUint16 *uint16 + pInt32 *int32 + pUint32 *uint32 + pInt64 *int64 + pUint64 *uint64 + pInt *int + pUint *uint + pFloat32 *float32 + pFloat64 *float64 +) + +type BindTest struct { + val interface{} + typ uint16 + length int +} + +func intSize() int { + switch strconv.IntSize { + case 32: + return 4 + case 64: + return 8 + } + panic("bad int size") +} + +func intType() uint16 { + switch strconv.IntSize { + case 32: + return MYSQL_TYPE_LONG + case 64: + return MYSQL_TYPE_LONGLONG + } + panic("bad int size") + +} + +var bindTests = []BindTest{ + BindTest{nil, MYSQL_TYPE_NULL, 0}, + + BindTest{Bytes, MYSQL_TYPE_VAR_STRING, -1}, + BindTest{String, MYSQL_TYPE_STRING, -1}, + BindTest{blob, MYSQL_TYPE_BLOB, -1}, + BindTest{dateT, MYSQL_TYPE_DATETIME, -1}, + BindTest{tstamp, MYSQL_TYPE_TIMESTAMP, -1}, + BindTest{date, MYSQL_TYPE_DATE, -1}, + BindTest{tim, MYSQL_TYPE_TIME, -1}, + BindTest{bol, MYSQL_TYPE_TINY, -1}, + + BindTest{&Bytes, MYSQL_TYPE_VAR_STRING, -1}, + BindTest{&String, MYSQL_TYPE_STRING, -1}, + BindTest{&blob, MYSQL_TYPE_BLOB, -1}, + BindTest{&dateT, MYSQL_TYPE_DATETIME, -1}, + BindTest{&tstamp, MYSQL_TYPE_TIMESTAMP, -1}, + BindTest{&date, MYSQL_TYPE_DATE, -1}, + BindTest{&tim, MYSQL_TYPE_TIME, -1}, + + BindTest{pBytes, MYSQL_TYPE_VAR_STRING, -1}, + BindTest{pString, MYSQL_TYPE_STRING, -1}, + BindTest{pBlob, MYSQL_TYPE_BLOB, -1}, + BindTest{pDateT, MYSQL_TYPE_DATETIME, -1}, + BindTest{pTstamp, MYSQL_TYPE_TIMESTAMP, -1}, + BindTest{pDate, MYSQL_TYPE_DATE, -1}, + BindTest{pTim, MYSQL_TYPE_TIME, -1}, + BindTest{pBol, MYSQL_TYPE_TINY, -1}, + + BindTest{raw, MYSQL_TYPE_INT24, -1}, + + BindTest{Int8, MYSQL_TYPE_TINY, 1}, + BindTest{Int16, MYSQL_TYPE_SHORT, 2}, + BindTest{Int32, MYSQL_TYPE_LONG, 4}, + BindTest{Int64, MYSQL_TYPE_LONGLONG, 8}, + BindTest{Int, intType(), intSize()}, + + BindTest{&Int8, MYSQL_TYPE_TINY, 1}, + BindTest{&Int16, MYSQL_TYPE_SHORT, 2}, + BindTest{&Int32, MYSQL_TYPE_LONG, 4}, + BindTest{&Int64, MYSQL_TYPE_LONGLONG, 8}, + BindTest{&Int, intType(), intSize()}, + + BindTest{pInt8, MYSQL_TYPE_TINY, 1}, + BindTest{pInt16, MYSQL_TYPE_SHORT, 2}, + BindTest{pInt32, MYSQL_TYPE_LONG, 4}, + BindTest{pInt64, MYSQL_TYPE_LONGLONG, 8}, + BindTest{pInt, intType(), intSize()}, + + BindTest{Uint8, MYSQL_TYPE_TINY | MYSQL_UNSIGNED_MASK, 1}, + BindTest{Uint16, MYSQL_TYPE_SHORT | MYSQL_UNSIGNED_MASK, 2}, + BindTest{Uint32, MYSQL_TYPE_LONG | MYSQL_UNSIGNED_MASK, 4}, + BindTest{Uint64, MYSQL_TYPE_LONGLONG | MYSQL_UNSIGNED_MASK, 8}, + BindTest{Uint, intType() | MYSQL_UNSIGNED_MASK, intSize()}, + + BindTest{&Uint8, MYSQL_TYPE_TINY | MYSQL_UNSIGNED_MASK, 1}, + BindTest{&Uint16, MYSQL_TYPE_SHORT | MYSQL_UNSIGNED_MASK, 2}, + BindTest{&Uint32, MYSQL_TYPE_LONG | MYSQL_UNSIGNED_MASK, 4}, + BindTest{&Uint64, MYSQL_TYPE_LONGLONG | MYSQL_UNSIGNED_MASK, 8}, + BindTest{&Uint, intType() | MYSQL_UNSIGNED_MASK, intSize()}, + + BindTest{pUint8, MYSQL_TYPE_TINY | MYSQL_UNSIGNED_MASK, 1}, + BindTest{pUint16, MYSQL_TYPE_SHORT | MYSQL_UNSIGNED_MASK, 2}, + BindTest{pUint32, MYSQL_TYPE_LONG | MYSQL_UNSIGNED_MASK, 4}, + BindTest{pUint64, MYSQL_TYPE_LONGLONG | MYSQL_UNSIGNED_MASK, 8}, + BindTest{pUint, intType() | MYSQL_UNSIGNED_MASK, intSize()}, + + BindTest{Float32, MYSQL_TYPE_FLOAT, 4}, + BindTest{Float64, MYSQL_TYPE_DOUBLE, 8}, + + BindTest{&Float32, MYSQL_TYPE_FLOAT, 4}, + BindTest{&Float64, MYSQL_TYPE_DOUBLE, 8}, +} + +func makeAddressable(v reflect.Value) reflect.Value { + if v.IsValid() { + // Make an addresable value + av := reflect.New(v.Type()).Elem() + av.Set(v) + v = av + } + return v +} + +func TestBind(t *testing.T) { + for _, test := range bindTests { + v := makeAddressable(reflect.ValueOf(test.val)) + val := bindValue(v) + if val.typ != test.typ || val.length != test.length { + t.Errorf( + "Type: %s exp=0x%x res=0x%x Len: exp=%d res=%d", + reflect.TypeOf(test.val), test.typ, val.typ, test.length, + val.length, + ) + } + } +} + +type WriteTest struct { + val interface{} + exp []byte +} + +var writeTest []WriteTest + +func encodeU16(v uint16) []byte { + buf := make([]byte, 2) + EncodeU16(buf, v) + return buf +} + +func encodeU24(v uint32) []byte { + buf := make([]byte, 3) + EncodeU24(buf, v) + return buf +} + +func encodeU32(v uint32) []byte { + buf := make([]byte, 4) + EncodeU32(buf, v) + return buf +} + +func encodeU64(v uint64) []byte { + buf := make([]byte, 8) + EncodeU64(buf, v) + return buf +} + +func encodeDuration(d time.Duration) []byte { + buf := make([]byte, 13) + n := EncodeDuration(buf, d) + return buf[:n] +} + +func encodeTime(t time.Time) []byte { + buf := make([]byte, 12) + n := EncodeTime(buf, t) + return buf[:n] +} + +func encodeDate(d mysql.Date) []byte { + buf := make([]byte, 5) + n := EncodeDate(buf, d) + return buf[:n] +} + +func encodeUint(u uint) []byte { + switch strconv.IntSize { + case 32: + return encodeU32(uint32(u)) + case 64: + return encodeU64(uint64(u)) + } + panic("bad int size") + +} + +func init() { + b := make([]byte, 64*1024) + for ii := range b { + b[ii] = byte(ii) + } + blob = mysql.Blob(b) + + writeTest = []WriteTest{ + WriteTest{Bytes, append([]byte{byte(len(Bytes))}, Bytes...)}, + WriteTest{String, append([]byte{byte(len(String))}, []byte(String)...)}, + WriteTest{pBytes, nil}, + WriteTest{pString, nil}, + WriteTest{ + blob, + append( + append([]byte{253}, byte(len(blob)), byte(len(blob)>>8), byte(len(blob)>>16)), + []byte(blob)...), + }, + WriteTest{ + dateT, + []byte{ + 7, byte(dateT.Year()), byte(dateT.Year() >> 8), + byte(dateT.Month()), + byte(dateT.Day()), byte(dateT.Hour()), byte(dateT.Minute()), + byte(dateT.Second()), + }, + }, + WriteTest{ + &dateT, + []byte{ + 7, byte(dateT.Year()), byte(dateT.Year() >> 8), + byte(dateT.Month()), + byte(dateT.Day()), byte(dateT.Hour()), byte(dateT.Minute()), + byte(dateT.Second()), + }, + }, + WriteTest{ + date, + []byte{ + 4, byte(date.Year), byte(date.Year >> 8), byte(date.Month), + byte(date.Day), + }, + }, + WriteTest{ + &date, + []byte{ + 4, byte(date.Year), byte(date.Year >> 8), byte(date.Month), + byte(date.Day), + }, + }, + WriteTest{ + tim, + []byte{12, 1, 5, 0, 0, 0, 4, 3, 2, 1, 0, 0, 0}, + }, + WriteTest{ + &tim, + []byte{12, 1, 5, 0, 0, 0, 4, 3, 2, 1, 0, 0, 0}, + }, + WriteTest{bol, []byte{1}}, + WriteTest{&bol, []byte{1}}, + WriteTest{pBol, nil}, + + WriteTest{dateT, encodeTime(dateT)}, + WriteTest{&dateT, encodeTime(dateT)}, + WriteTest{pDateT, nil}, + + WriteTest{tstamp, encodeTime(tstamp.Time)}, + WriteTest{&tstamp, encodeTime(tstamp.Time)}, + WriteTest{pTstamp, nil}, + + WriteTest{date, encodeDate(date)}, + WriteTest{&date, encodeDate(date)}, + WriteTest{pDate, nil}, + + WriteTest{tim, encodeDuration(tim)}, + WriteTest{&tim, encodeDuration(tim)}, + WriteTest{pTim, nil}, + + WriteTest{Int, encodeUint(uint(Int))}, + WriteTest{Int16, encodeU16(uint16(Int16))}, + WriteTest{Int32, encodeU32(uint32(Int32))}, + WriteTest{Int64, encodeU64(uint64(Int64))}, + + WriteTest{Uint, encodeUint(Uint)}, + WriteTest{Uint16, encodeU16(Uint16)}, + WriteTest{Uint32, encodeU32(Uint32)}, + WriteTest{Uint64, encodeU64(Uint64)}, + + WriteTest{&Int, encodeUint(uint(Int))}, + WriteTest{&Int16, encodeU16(uint16(Int16))}, + WriteTest{&Int32, encodeU32(uint32(Int32))}, + WriteTest{&Int64, encodeU64(uint64(Int64))}, + + WriteTest{&Uint, encodeUint(Uint)}, + WriteTest{&Uint16, encodeU16(Uint16)}, + WriteTest{&Uint32, encodeU32(Uint32)}, + WriteTest{&Uint64, encodeU64(Uint64)}, + + WriteTest{pInt, nil}, + WriteTest{pInt16, nil}, + WriteTest{pInt32, nil}, + WriteTest{pInt64, nil}, + + WriteTest{Float32, encodeU32(math.Float32bits(Float32))}, + WriteTest{Float64, encodeU64(math.Float64bits(Float64))}, + + WriteTest{&Float32, encodeU32(math.Float32bits(Float32))}, + WriteTest{&Float64, encodeU64(math.Float64bits(Float64))}, + + WriteTest{pFloat32, nil}, + WriteTest{pFloat64, nil}, + } +} + +func TestWrite(t *testing.T) { + buf := new(bytes.Buffer) + for _, test := range writeTest { + buf.Reset() + var seq byte + pw := &pktWriter{ + wr: bufio.NewWriter(buf), + seq: &seq, + to_write: len(test.exp), + } + v := makeAddressable(reflect.ValueOf(test.val)) + val := bindValue(v) + pw.writeValue(&val) + if !reflect.Indirect(v).IsValid() && len(buf.Bytes()) == 0 { + // writeValue writes nothing for nil + continue + } + if len(buf.Bytes()) != len(test.exp)+4 || !bytes.Equal(buf.Bytes()[4:], test.exp) || val.Len() != len(test.exp) { + t.Fatalf("%s - exp_len=%d res_len=%d exp: %v res: %v", + reflect.TypeOf(test.val), len(test.exp), val.Len(), + test.exp, buf.Bytes(), + ) + } + } +} diff --git a/vendor/github.com/ziutek/mymysql/native/binding.go b/vendor/github.com/ziutek/mymysql/native/binding.go new file mode 100644 index 000000000..c1ae07ea4 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/binding.go @@ -0,0 +1,150 @@ +package native + +import ( + "github.com/ziutek/mymysql/mysql" + "reflect" + "time" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) + timestampType = reflect.TypeOf(mysql.Timestamp{}) + dateType = reflect.TypeOf(mysql.Date{}) + durationType = reflect.TypeOf(time.Duration(0)) + blobType = reflect.TypeOf(mysql.Blob{}) + rawType = reflect.TypeOf(mysql.Raw{}) +) + +// val should be an addressable value +func bindValue(val reflect.Value) (out paramValue) { + if !val.IsValid() { + out.typ = MYSQL_TYPE_NULL + return + } + typ := val.Type() + if typ.Kind() == reflect.Ptr { + // We have addressable pointer + out.addr = val.Addr() + // Dereference pointer for next operation on its value + typ = typ.Elem() + val = val.Elem() + } else { + // We have addressable value. Create a pointer to it + pv := val.Addr() + // This pointer is unaddressable so copy it and return an address + out.addr = reflect.New(pv.Type()) + out.addr.Elem().Set(pv) + } + + // Obtain value type + switch typ.Kind() { + case reflect.String: + out.typ = MYSQL_TYPE_STRING + out.length = -1 + return + + case reflect.Int: + out.typ = _INT_TYPE + out.length = _SIZE_OF_INT + return + + case reflect.Int8: + out.typ = MYSQL_TYPE_TINY + out.length = 1 + return + + case reflect.Int16: + out.typ = MYSQL_TYPE_SHORT + out.length = 2 + return + + case reflect.Int32: + out.typ = MYSQL_TYPE_LONG + out.length = 4 + return + + case reflect.Int64: + if typ == durationType { + out.typ = MYSQL_TYPE_TIME + out.length = -1 + return + } + out.typ = MYSQL_TYPE_LONGLONG + out.length = 8 + return + + case reflect.Uint: + out.typ = _INT_TYPE | MYSQL_UNSIGNED_MASK + out.length = _SIZE_OF_INT + return + + case reflect.Uint8: + out.typ = MYSQL_TYPE_TINY | MYSQL_UNSIGNED_MASK + out.length = 1 + return + + case reflect.Uint16: + out.typ = MYSQL_TYPE_SHORT | MYSQL_UNSIGNED_MASK + out.length = 2 + return + + case reflect.Uint32: + out.typ = MYSQL_TYPE_LONG | MYSQL_UNSIGNED_MASK + out.length = 4 + return + + case reflect.Uint64: + out.typ = MYSQL_TYPE_LONGLONG | MYSQL_UNSIGNED_MASK + out.length = 8 + return + + case reflect.Float32: + out.typ = MYSQL_TYPE_FLOAT + out.length = 4 + return + + case reflect.Float64: + out.typ = MYSQL_TYPE_DOUBLE + out.length = 8 + return + + case reflect.Slice: + out.length = -1 + if typ == blobType { + out.typ = MYSQL_TYPE_BLOB + return + } + if typ.Elem().Kind() == reflect.Uint8 { + out.typ = MYSQL_TYPE_VAR_STRING + return + } + + case reflect.Struct: + out.length = -1 + if typ == timeType { + out.typ = MYSQL_TYPE_DATETIME + return + } + if typ == dateType { + out.typ = MYSQL_TYPE_DATE + return + } + if typ == timestampType { + out.typ = MYSQL_TYPE_TIMESTAMP + return + } + if typ == rawType { + out.typ = val.FieldByName("Typ").Interface().(uint16) + out.addr = val.FieldByName("Val").Addr() + out.raw = true + return + } + + case reflect.Bool: + out.typ = MYSQL_TYPE_TINY + // bool implementation isn't documented so we treat it in special way + out.length = -1 + return + } + panic(mysql.ErrBindUnkType) +} diff --git a/vendor/github.com/ziutek/mymysql/native/codecs.go b/vendor/github.com/ziutek/mymysql/native/codecs.go new file mode 100644 index 000000000..ddec54b5a --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/codecs.go @@ -0,0 +1,458 @@ +package native + +import ( + "github.com/ziutek/mymysql/mysql" + "time" +) + +// Integers + +func DecodeU16(buf []byte) uint16 { + return uint16(buf[1])<<8 | uint16(buf[0]) +} +func (pr *pktReader) readU16() uint16 { + buf := pr.buf[:2] + pr.readFull(buf) + return DecodeU16(buf) +} + +func DecodeU24(buf []byte) uint32 { + return (uint32(buf[2])<<8|uint32(buf[1]))<<8 | uint32(buf[0]) +} +func (pr *pktReader) readU24() uint32 { + buf := pr.buf[:3] + pr.readFull(buf) + return DecodeU24(buf) +} + +func DecodeU32(buf []byte) uint32 { + return ((uint32(buf[3])<<8|uint32(buf[2]))<<8| + uint32(buf[1]))<<8 | uint32(buf[0]) +} +func (pr *pktReader) readU32() uint32 { + buf := pr.buf[:4] + pr.readFull(buf) + return DecodeU32(buf) +} + +func DecodeU64(buf []byte) (rv uint64) { + for ii, vv := range buf { + rv |= uint64(vv) << uint(ii*8) + } + return +} +func (pr *pktReader) readU64() (rv uint64) { + buf := pr.buf[:8] + pr.readFull(buf) + return DecodeU64(buf) +} + +func EncodeU16(buf []byte, val uint16) { + buf[0] = byte(val) + buf[1] = byte(val >> 8) +} +func (pw *pktWriter) writeU16(val uint16) { + buf := pw.buf[:2] + EncodeU16(buf, val) + pw.write(buf) +} + +func EncodeU24(buf []byte, val uint32) { + buf[0] = byte(val) + buf[1] = byte(val >> 8) + buf[2] = byte(val >> 16) +} +func (pw *pktWriter) writeU24(val uint32) { + buf := pw.buf[:3] + EncodeU24(buf, val) + pw.write(buf) +} + +func EncodeU32(buf []byte, val uint32) { + buf[0] = byte(val) + buf[1] = byte(val >> 8) + buf[2] = byte(val >> 16) + buf[3] = byte(val >> 24) +} +func (pw *pktWriter) writeU32(val uint32) { + buf := pw.buf[:4] + EncodeU32(buf, val) + pw.write(buf) +} + +func EncodeU64(buf []byte, val uint64) { + buf[0] = byte(val) + buf[1] = byte(val >> 8) + buf[2] = byte(val >> 16) + buf[3] = byte(val >> 24) + buf[4] = byte(val >> 32) + buf[5] = byte(val >> 40) + buf[6] = byte(val >> 48) + buf[7] = byte(val >> 56) +} +func (pw *pktWriter) writeU64(val uint64) { + buf := pw.buf[:8] + EncodeU64(buf, val) + pw.write(buf) +} + +// Variable length values + +func (pr *pktReader) readNullLCB() (lcb uint64, null bool) { + bb := pr.readByte() + switch bb { + case 251: + null = true + case 252: + lcb = uint64(pr.readU16()) + case 253: + lcb = uint64(pr.readU24()) + case 254: + lcb = pr.readU64() + default: + lcb = uint64(bb) + } + return +} + +func (pr *pktReader) readLCB() uint64 { + lcb, null := pr.readNullLCB() + if null { + panic(mysql.ErrUnexpNullLCB) + } + return lcb +} + +func (pw *pktWriter) writeLCB(val uint64) { + switch { + case val <= 250: + pw.writeByte(byte(val)) + + case val <= 0xffff: + pw.writeByte(252) + pw.writeU16(uint16(val)) + + case val <= 0xffffff: + pw.writeByte(253) + pw.writeU24(uint32(val)) + + default: + pw.writeByte(254) + pw.writeU64(val) + } +} + +func lenLCB(val uint64) int { + switch { + case val <= 250: + return 1 + + case val <= 0xffff: + return 3 + + case val <= 0xffffff: + return 4 + } + return 9 +} + +func (pr *pktReader) readNullBin() (buf []byte, null bool) { + var l uint64 + l, null = pr.readNullLCB() + if null { + return + } + buf = make([]byte, l) + pr.readFull(buf) + return +} + +func (pr *pktReader) readBin() []byte { + buf, null := pr.readNullBin() + if null { + panic(mysql.ErrUnexpNullLCS) + } + return buf +} + +func (pr *pktReader) skipBin() { + n, _ := pr.readNullLCB() + pr.skipN(int(n)) +} + +func (pw *pktWriter) writeBin(buf []byte) { + pw.writeLCB(uint64(len(buf))) + pw.write(buf) +} + +func lenBin(buf []byte) int { + return lenLCB(uint64(len(buf))) + len(buf) +} + +func lenStr(str string) int { + return lenLCB(uint64(len(str))) + len(str) +} + +func (pw *pktWriter) writeLC(v interface{}) { + switch val := v.(type) { + case []byte: + pw.writeBin(val) + case *[]byte: + pw.writeBin(*val) + case string: + pw.writeBin([]byte(val)) + case *string: + pw.writeBin([]byte(*val)) + default: + panic("Unknown data type for write as length coded string") + } +} + +func lenLC(v interface{}) int { + switch val := v.(type) { + case []byte: + return lenBin(val) + case *[]byte: + return lenBin(*val) + case string: + return lenStr(val) + case *string: + return lenStr(*val) + } + panic("Unknown data type for write as length coded string") +} + +func (pr *pktReader) readNTB() (buf []byte) { + for { + ch := pr.readByte() + if ch == 0 { + break + } + buf = append(buf, ch) + } + return +} + +func (pw *pktWriter) writeNTB(buf []byte) { + pw.write(buf) + pw.writeByte(0) +} + +func (pw *pktWriter) writeNT(v interface{}) { + switch val := v.(type) { + case []byte: + pw.writeNTB(val) + case string: + pw.writeNTB([]byte(val)) + default: + panic("Unknown type for write as null terminated data") + } +} + +// Date and time + +func (pr *pktReader) readDuration() time.Duration { + dlen := pr.readByte() + switch dlen { + case 251: + // Null + panic(mysql.ErrUnexpNullTime) + case 0: + // 00:00:00 + return 0 + case 5, 8, 12: + // Properly time length + default: + panic(mysql.ErrWrongDateLen) + } + buf := pr.buf[:dlen] + pr.readFull(buf) + tt := int64(0) + switch dlen { + case 12: + // Nanosecond part + tt += int64(DecodeU32(buf[8:])) + fallthrough + case 8: + // HH:MM:SS part + tt += int64(int(buf[5])*3600+int(buf[6])*60+int(buf[7])) * 1e9 + fallthrough + case 5: + // Day part + tt += int64(DecodeU32(buf[1:5])) * (24 * 3600 * 1e9) + } + if buf[0] != 0 { + tt = -tt + } + return time.Duration(tt) +} + +func EncodeDuration(buf []byte, d time.Duration) int { + buf[0] = 0 + if d < 0 { + buf[1] = 1 + d = -d + } + if ns := uint32(d % 1e9); ns != 0 { + EncodeU32(buf[9:13], ns) // nanosecond + buf[0] += 4 + } + d /= 1e9 + if hms := int(d % (24 * 3600)); buf[0] != 0 || hms != 0 { + buf[8] = byte(hms % 60) // second + hms /= 60 + buf[7] = byte(hms % 60) // minute + buf[6] = byte(hms / 60) // hour + buf[0] += 3 + } + if day := uint32(d / (24 * 3600)); buf[0] != 0 || day != 0 { + EncodeU32(buf[2:6], day) // day + buf[0] += 4 + } + buf[0]++ // For sign byte + return int(buf[0] + 1) +} + +func (pw *pktWriter) writeDuration(d time.Duration) { + buf := pw.buf[:13] + n := EncodeDuration(buf, d) + pw.write(buf[:n]) +} + +func lenDuration(d time.Duration) int { + if d == 0 { + return 2 + } + if d%1e9 != 0 { + return 13 + } + d /= 1e9 + if d%(24*3600) != 0 { + return 9 + } + return 6 +} + +func (pr *pktReader) readTime() time.Time { + dlen := pr.readByte() + switch dlen { + case 251: + // Null + panic(mysql.ErrUnexpNullDate) + case 0: + // return 0000-00-00 converted to time.Time zero + return time.Time{} + case 4, 7, 11: + // Properly datetime length + default: + panic(mysql.ErrWrongDateLen) + } + + buf := pr.buf[:dlen] + pr.readFull(buf) + var y, mon, d, h, m, s, u int + switch dlen { + case 11: + // 2006-01-02 15:04:05.001004005 + u = int(DecodeU32(buf[7:])) + fallthrough + case 7: + // 2006-01-02 15:04:05 + h = int(buf[4]) + m = int(buf[5]) + s = int(buf[6]) + fallthrough + case 4: + // 2006-01-02 + y = int(DecodeU16(buf[0:2])) + mon = int(buf[2]) + d = int(buf[3]) + } + n := u * int(time.Microsecond) + return time.Date(y, time.Month(mon), d, h, m, s, n, time.Local) +} + +func encodeNonzeroTime(buf []byte, y int16, mon, d, h, m, s byte, u uint32) int { + buf[0] = 0 + switch { + case u != 0: + EncodeU32(buf[8:12], u) + buf[0] += 4 + fallthrough + case s != 0 || m != 0 || h != 0: + buf[7] = s + buf[6] = m + buf[5] = h + buf[0] += 3 + } + buf[4] = d + buf[3] = mon + EncodeU16(buf[1:3], uint16(y)) + buf[0] += 4 + return int(buf[0] + 1) +} + +func getTimeMicroseconds(t time.Time) int { + return (t.Nanosecond() + int(time.Microsecond/2)) / int(time.Microsecond) +} + +func EncodeTime(buf []byte, t time.Time) int { + if t.IsZero() { + // MySQL zero + buf[0] = 0 + return 1 // MySQL zero + } + y, mon, d := t.Date() + h, m, s := t.Clock() + u:= getTimeMicroseconds(t) + return encodeNonzeroTime( + buf, + int16(y), byte(mon), byte(d), + byte(h), byte(m), byte(s), uint32(u), + ) +} + +func (pw *pktWriter) writeTime(t time.Time) { + buf := pw.buf[:12] + n := EncodeTime(buf, t) + pw.write(buf[:n]) +} + +func lenTime(t time.Time) int { + switch { + case t.IsZero(): + return 1 + case getTimeMicroseconds(t) != 0: + return 12 + case t.Second() != 0 || t.Minute() != 0 || t.Hour() != 0: + return 8 + } + return 5 +} + +func (pr *pktReader) readDate() mysql.Date { + y, m, d := pr.readTime().Date() + return mysql.Date{int16(y), byte(m), byte(d)} +} + +func EncodeDate(buf []byte, d mysql.Date) int { + if d.IsZero() { + // MySQL zero + buf[0] = 0 + return 1 + } + return encodeNonzeroTime(buf, d.Year, d.Month, d.Day, 0, 0, 0, 0) +} + +func (pw *pktWriter) writeDate(d mysql.Date) { + buf := pw.buf[:5] + n := EncodeDate(buf, d) + pw.write(buf[:n]) +} + +func lenDate(d mysql.Date) int { + if d.IsZero() { + return 1 + } + return 5 +} diff --git a/vendor/github.com/ziutek/mymysql/native/command.go b/vendor/github.com/ziutek/mymysql/native/command.go new file mode 100644 index 000000000..2f89ff20b --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/command.go @@ -0,0 +1,149 @@ +package native + +import ( + "log" +) + +//import "log" + +// _COM_QUIT, _COM_STATISTICS, _COM_PROCESS_INFO, _COM_DEBUG, _COM_PING: +func (my *Conn) sendCmd(cmd byte) { + my.seq = 0 + pw := my.newPktWriter(1) + pw.writeByte(cmd) + if my.Debug { + log.Printf("[%2d <-] Command packet: Cmd=0x%x", my.seq-1, cmd) + } +} + +// _COM_QUERY, _COM_INIT_DB, _COM_CREATE_DB, _COM_DROP_DB, _COM_STMT_PREPARE: +func (my *Conn) sendCmdStr(cmd byte, s string) { + my.seq = 0 + pw := my.newPktWriter(1 + len(s)) + pw.writeByte(cmd) + pw.write([]byte(s)) + if my.Debug { + log.Printf("[%2d <-] Command packet: Cmd=0x%x %s", my.seq-1, cmd, s) + } +} + +// _COM_PROCESS_KILL, _COM_STMT_CLOSE, _COM_STMT_RESET: +func (my *Conn) sendCmdU32(cmd byte, u uint32) { + my.seq = 0 + pw := my.newPktWriter(1 + 4) + pw.writeByte(cmd) + pw.writeU32(u) + if my.Debug { + log.Printf("[%2d <-] Command packet: Cmd=0x%x %d", my.seq-1, cmd, u) + } +} + +func (my *Conn) sendLongData(stmtid uint32, pnum uint16, data []byte) { + my.seq = 0 + pw := my.newPktWriter(1 + 4 + 2 + len(data)) + pw.writeByte(_COM_STMT_SEND_LONG_DATA) + pw.writeU32(stmtid) // Statement ID + pw.writeU16(pnum) // Parameter number + pw.write(data) // payload + if my.Debug { + log.Printf("[%2d <-] SendLongData packet: pnum=%d", my.seq-1, pnum) + } +} + +/*func (my *Conn) sendCmd(cmd byte, argv ...interface{}) { + // Reset sequence number + my.seq = 0 + // Write command + switch cmd { + case _COM_QUERY, _COM_INIT_DB, _COM_CREATE_DB, _COM_DROP_DB, + _COM_STMT_PREPARE: + pw := my.newPktWriter(1 + lenBS(argv[0])) + writeByte(pw, cmd) + writeBS(pw, argv[0]) + + case _COM_STMT_SEND_LONG_DATA: + pw := my.newPktWriter(1 + 4 + 2 + lenBS(argv[2])) + writeByte(pw, cmd) + writeU32(pw, argv[0].(uint32)) // Statement ID + writeU16(pw, argv[1].(uint16)) // Parameter number + writeBS(pw, argv[2]) // payload + + case _COM_QUIT, _COM_STATISTICS, _COM_PROCESS_INFO, _COM_DEBUG, _COM_PING: + pw := my.newPktWriter(1) + writeByte(pw, cmd) + + case _COM_FIELD_LIST: + pay_len := 1 + lenBS(argv[0]) + 1 + if len(argv) > 1 { + pay_len += lenBS(argv[1]) + } + + pw := my.newPktWriter(pay_len) + writeByte(pw, cmd) + writeNT(pw, argv[0]) + if len(argv) > 1 { + writeBS(pw, argv[1]) + } + + case _COM_TABLE_DUMP: + pw := my.newPktWriter(1 + lenLC(argv[0]) + lenLC(argv[1])) + writeByte(pw, cmd) + writeLC(pw, argv[0]) + writeLC(pw, argv[1]) + + case _COM_REFRESH, _COM_SHUTDOWN: + pw := my.newPktWriter(1 + 1) + writeByte(pw, cmd) + writeByte(pw, argv[0].(byte)) + + case _COM_STMT_FETCH: + pw := my.newPktWriter(1 + 4 + 4) + writeByte(pw, cmd) + writeU32(pw, argv[0].(uint32)) + writeU32(pw, argv[1].(uint32)) + + case _COM_PROCESS_KILL, _COM_STMT_CLOSE, _COM_STMT_RESET: + pw := my.newPktWriter(1 + 4) + writeByte(pw, cmd) + writeU32(pw, argv[0].(uint32)) + + case _COM_SET_OPTION: + pw := my.newPktWriter(1 + 2) + writeByte(pw, cmd) + writeU16(pw, argv[0].(uint16)) + + case _COM_CHANGE_USER: + pw := my.newPktWriter( + 1 + lenBS(argv[0]) + 1 + lenLC(argv[1]) + lenBS(argv[2]) + 1, + ) + writeByte(pw, cmd) + writeNT(pw, argv[0]) // User name + writeLC(pw, argv[1]) // Scrambled password + writeNT(pw, argv[2]) // Database name + //writeU16(pw, argv[3]) // Character set number (since 5.1.23?) + + case _COM_BINLOG_DUMP: + pay_len := 1 + 4 + 2 + 4 + if len(argv) > 3 { + pay_len += lenBS(argv[3]) + } + + pw := my.newPktWriter(pay_len) + writeByte(pw, cmd) + writeU32(pw, argv[0].(uint32)) // Start position + writeU16(pw, argv[1].(uint16)) // Flags + writeU32(pw, argv[2].(uint32)) // Slave server id + if len(argv) > 3 { + writeBS(pw, argv[3]) + } + + // TODO: case COM_REGISTER_SLAVE: + + default: + panic("Unknown code for MySQL command") + } + + if my.Debug { + log.Printf("[%2d <-] Command packet: Cmd=0x%x", my.seq-1, cmd) + } +}*/ diff --git a/vendor/github.com/ziutek/mymysql/native/common.go b/vendor/github.com/ziutek/mymysql/native/common.go new file mode 100644 index 000000000..2064cbe18 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/common.go @@ -0,0 +1,25 @@ +package native + +import ( + "io" + "runtime" +) + +var tab8s = " " + +func catchError(err *error) { + if pv := recover(); pv != nil { + switch e := pv.(type) { + case runtime.Error: + panic(pv) + case error: + if e == io.EOF { + *err = io.ErrUnexpectedEOF + } else { + *err = e + } + default: + panic(pv) + } + } +} diff --git a/vendor/github.com/ziutek/mymysql/native/consts.go b/vendor/github.com/ziutek/mymysql/native/consts.go new file mode 100644 index 000000000..b4c031fa3 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/consts.go @@ -0,0 +1,176 @@ +package native + +import "strconv" + +// Client caps - borrowed from GoMySQL +const ( + _CLIENT_LONG_PASSWORD = 1 << iota // new more secure passwords + _CLIENT_FOUND_ROWS // Found instead of affected rows + _CLIENT_LONG_FLAG // Get all column flags + _CLIENT_CONNECT_WITH_DB // One can specify db on connect + _CLIENT_NO_SCHEMA // Don't allow database.table.column + _CLIENT_COMPRESS // Can use compression protocol + _CLIENT_ODBC // Odbc client + _CLIENT_LOCAL_FILES // Can use LOAD DATA LOCAL + _CLIENT_IGNORE_SPACE // Ignore spaces before '(' + _CLIENT_PROTOCOL_41 // New 4.1 protocol + _CLIENT_INTERACTIVE // This is an interactive client + _CLIENT_SSL // Switch to SSL after handshake + _CLIENT_IGNORE_SIGPIPE // IGNORE sigpipes + _CLIENT_TRANSACTIONS // Client knows about transactions + _CLIENT_RESERVED // Old flag for 4.1 protocol + _CLIENT_SECURE_CONN // New 4.1 authentication + _CLIENT_MULTI_STATEMENTS // Enable/disable multi-stmt support + _CLIENT_MULTI_RESULTS // Enable/disable multi-results +) + +// Commands - borrowed from GoMySQL +const ( + _COM_QUIT = 0x01 + _COM_INIT_DB = 0x02 + _COM_QUERY = 0x03 + _COM_FIELD_LIST = 0x04 + _COM_CREATE_DB = 0x05 + _COM_DROP_DB = 0x06 + _COM_REFRESH = 0x07 + _COM_SHUTDOWN = 0x08 + _COM_STATISTICS = 0x09 + _COM_PROCESS_INFO = 0x0a + _COM_CONNECT = 0x0b + _COM_PROCESS_KILL = 0x0c + _COM_DEBUG = 0x0d + _COM_PING = 0x0e + _COM_TIME = 0x0f + _COM_DELAYED_INSERT = 0x10 + _COM_CHANGE_USER = 0x11 + _COM_BINLOG_DUMP = 0x12 + _COM_TABLE_DUMP = 0x13 + _COM_CONNECT_OUT = 0x14 + _COM_REGISTER_SLAVE = 0x15 + _COM_STMT_PREPARE = 0x16 + _COM_STMT_EXECUTE = 0x17 + _COM_STMT_SEND_LONG_DATA = 0x18 + _COM_STMT_CLOSE = 0x19 + _COM_STMT_RESET = 0x1a + _COM_SET_OPTION = 0x1b + _COM_STMT_FETCH = 0x1c +) + +// MySQL protocol types. +// +// mymysql uses only some of them for send data to the MySQL server. Used +// MySQL types are marked with a comment contains mymysql type that uses it. +const ( + MYSQL_TYPE_DECIMAL = 0x00 + MYSQL_TYPE_TINY = 0x01 // int8, uint8, bool + MYSQL_TYPE_SHORT = 0x02 // int16, uint16 + MYSQL_TYPE_LONG = 0x03 // int32, uint32 + MYSQL_TYPE_FLOAT = 0x04 // float32 + MYSQL_TYPE_DOUBLE = 0x05 // float64 + MYSQL_TYPE_NULL = 0x06 // nil + MYSQL_TYPE_TIMESTAMP = 0x07 // Timestamp + MYSQL_TYPE_LONGLONG = 0x08 // int64, uint64 + MYSQL_TYPE_INT24 = 0x09 + MYSQL_TYPE_DATE = 0x0a // Date + MYSQL_TYPE_TIME = 0x0b // Time + MYSQL_TYPE_DATETIME = 0x0c // time.Time + MYSQL_TYPE_YEAR = 0x0d + MYSQL_TYPE_NEWDATE = 0x0e + MYSQL_TYPE_VARCHAR = 0x0f + MYSQL_TYPE_BIT = 0x10 + MYSQL_TYPE_NEWDECIMAL = 0xf6 + MYSQL_TYPE_ENUM = 0xf7 + MYSQL_TYPE_SET = 0xf8 + MYSQL_TYPE_TINY_BLOB = 0xf9 + MYSQL_TYPE_MEDIUM_BLOB = 0xfa + MYSQL_TYPE_LONG_BLOB = 0xfb + MYSQL_TYPE_BLOB = 0xfc // Blob + MYSQL_TYPE_VAR_STRING = 0xfd // []byte + MYSQL_TYPE_STRING = 0xfe // string + MYSQL_TYPE_GEOMETRY = 0xff + + MYSQL_UNSIGNED_MASK = uint16(1 << 15) +) + +// Mapping of MySQL types to (prefered) protocol types. Use it if you create +// your own Raw value. +// +// Comments contains corresponding types used by mymysql. string type may be +// replaced by []byte type and vice versa. []byte type is native for sending +// on a network, so any string is converted to it before sending. Than for +// better preformance use []byte. +const ( + // Client send and receive, mymysql representation for send / receive + TINYINT = MYSQL_TYPE_TINY // int8 / int8 + SMALLINT = MYSQL_TYPE_SHORT // int16 / int16 + INT = MYSQL_TYPE_LONG // int32 / int32 + BIGINT = MYSQL_TYPE_LONGLONG // int64 / int64 + FLOAT = MYSQL_TYPE_FLOAT // float32 / float32 + DOUBLE = MYSQL_TYPE_DOUBLE // float64 / float32 + TIME = MYSQL_TYPE_TIME // Time / Time + DATE = MYSQL_TYPE_DATE // Date / Date + DATETIME = MYSQL_TYPE_DATETIME // time.Time / time.Time + TIMESTAMP = MYSQL_TYPE_TIMESTAMP // Timestamp / time.Time + CHAR = MYSQL_TYPE_STRING // string / []byte + BLOB = MYSQL_TYPE_BLOB // Blob / []byte + NULL = MYSQL_TYPE_NULL // nil + + // Client send only, mymysql representation for send + OUT_TEXT = MYSQL_TYPE_STRING // string + OUT_VARCHAR = MYSQL_TYPE_STRING // string + OUT_BINARY = MYSQL_TYPE_BLOB // Blob + OUT_VARBINARY = MYSQL_TYPE_BLOB // Blob + + // Client receive only, mymysql representation for receive + IN_MEDIUMINT = MYSQL_TYPE_LONG // int32 + IN_YEAR = MYSQL_TYPE_SHORT // int16 + IN_BINARY = MYSQL_TYPE_STRING // []byte + IN_VARCHAR = MYSQL_TYPE_VAR_STRING // []byte + IN_VARBINARY = MYSQL_TYPE_VAR_STRING // []byte + IN_TINYBLOB = MYSQL_TYPE_TINY_BLOB // []byte + IN_TINYTEXT = MYSQL_TYPE_TINY_BLOB // []byte + IN_TEXT = MYSQL_TYPE_BLOB // []byte + IN_MEDIUMBLOB = MYSQL_TYPE_MEDIUM_BLOB // []byte + IN_MEDIUMTEXT = MYSQL_TYPE_MEDIUM_BLOB // []byte + IN_LONGBLOB = MYSQL_TYPE_LONG_BLOB // []byte + IN_LONGTEXT = MYSQL_TYPE_LONG_BLOB // []byte + + // MySQL 5.x specific + IN_DECIMAL = MYSQL_TYPE_NEWDECIMAL // TODO + IN_BIT = MYSQL_TYPE_BIT // []byte +) + +// Flags - borrowed from GoMySQL +const ( + _FLAG_NOT_NULL = 1 << iota + _FLAG_PRI_KEY + _FLAG_UNIQUE_KEY + _FLAG_MULTIPLE_KEY + _FLAG_BLOB + _FLAG_UNSIGNED + _FLAG_ZEROFILL + _FLAG_BINARY + _FLAG_ENUM + _FLAG_AUTO_INCREMENT + _FLAG_TIMESTAMP + _FLAG_SET + _FLAG_NO_DEFAULT_VALUE +) + +var ( + _SIZE_OF_INT int + _INT_TYPE uint16 +) + +func init() { + switch strconv.IntSize { + case 32: + _INT_TYPE = MYSQL_TYPE_LONG + _SIZE_OF_INT = 4 + case 64: + _INT_TYPE = MYSQL_TYPE_LONGLONG + _SIZE_OF_INT = 8 + default: + panic("bad int size") + } +} diff --git a/vendor/github.com/ziutek/mymysql/native/init.go b/vendor/github.com/ziutek/mymysql/native/init.go new file mode 100644 index 000000000..bf5756368 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/init.go @@ -0,0 +1,84 @@ +package native + +import ( + "github.com/ziutek/mymysql/mysql" + "log" +) + +func (my *Conn) init() { + my.seq = 0 // Reset sequence number, mainly for reconnect + if my.Debug { + log.Printf("[%2d ->] Init packet:", my.seq) + } + pr := my.newPktReader() + + my.info.prot_ver = pr.readByte() + my.info.serv_ver = pr.readNTB() + my.info.thr_id = pr.readU32() + pr.readFull(my.info.scramble[0:8]) + pr.skipN(1) + my.info.caps = pr.readU16() + my.info.lang = pr.readByte() + my.status = mysql.ConnStatus(pr.readU16()) + pr.skipN(13) + if my.info.caps&_CLIENT_PROTOCOL_41 != 0 { + pr.readFull(my.info.scramble[8:]) + } + pr.skipAll() // Skip other information + if my.Debug { + log.Printf(tab8s+"ProtVer=%d, ServVer=\"%s\" Status=0x%x", + my.info.prot_ver, my.info.serv_ver, my.status, + ) + } + if my.info.caps&_CLIENT_PROTOCOL_41 == 0 { + panic(mysql.ErrOldProtocol) + } +} + +func (my *Conn) auth() { + if my.Debug { + log.Printf("[%2d <-] Authentication packet", my.seq) + } + flags := uint32( + _CLIENT_PROTOCOL_41 | + _CLIENT_LONG_PASSWORD | + _CLIENT_LONG_FLAG | + _CLIENT_TRANSACTIONS | + _CLIENT_SECURE_CONN | + _CLIENT_LOCAL_FILES | + _CLIENT_MULTI_STATEMENTS | + _CLIENT_MULTI_RESULTS) + // Reset flags not supported by server + flags &= uint32(my.info.caps) | 0xffff0000 + scrPasswd := encryptedPasswd(my.passwd, my.info.scramble[:]) + pay_len := 4 + 4 + 1 + 23 + len(my.user) + 1 + 1 + len(scrPasswd) + if len(my.dbname) > 0 { + pay_len += len(my.dbname) + 1 + flags |= _CLIENT_CONNECT_WITH_DB + } + pw := my.newPktWriter(pay_len) + pw.writeU32(flags) + pw.writeU32(uint32(my.max_pkt_size)) + pw.writeByte(my.info.lang) // Charset number + pw.writeZeros(23) // Filler + pw.writeNTB([]byte(my.user)) // Username + pw.writeBin(scrPasswd) // Encrypted password + if len(my.dbname) > 0 { + pw.writeNTB([]byte(my.dbname)) + } + if len(my.dbname) > 0 { + pay_len += len(my.dbname) + 1 + flags |= _CLIENT_CONNECT_WITH_DB + } + return +} + +func (my *Conn) oldPasswd() { + if my.Debug { + log.Printf("[%2d <-] Password packet", my.seq) + } + scrPasswd := encryptedOldPassword(my.passwd, my.info.scramble[:]) + pw := my.newPktWriter(len(scrPasswd) + 1) + pw.write(scrPasswd) + pw.writeByte(0) +} diff --git a/vendor/github.com/ziutek/mymysql/native/mysql.go b/vendor/github.com/ziutek/mymysql/native/mysql.go new file mode 100644 index 000000000..802b319af --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/mysql.go @@ -0,0 +1,834 @@ +// Thread unsafe engine for MyMySQL +package native + +import ( + "bufio" + "fmt" + "github.com/ziutek/mymysql/mysql" + "io" + "net" + "reflect" + "strings" + "time" +) + +type serverInfo struct { + prot_ver byte + serv_ver []byte + thr_id uint32 + scramble [20]byte + caps uint16 + lang byte +} + +// MySQL connection handler +type Conn struct { + proto string // Network protocol + laddr string // Local address + raddr string // Remote (server) address + + user string // MySQL username + passwd string // MySQL password + dbname string // Database name + + net_conn net.Conn // MySQL connection + rd *bufio.Reader + wr *bufio.Writer + + info serverInfo // MySQL server information + seq byte // MySQL sequence number + + unreaded_reply bool + + init_cmds []string // MySQL commands/queries executed after connect + stmt_map map[uint32]*Stmt // For reprepare during reconnect + + // Current status of MySQL server connection + status mysql.ConnStatus + + // Maximum packet size that client can accept from server. + // Default 16*1024*1024-1. You may change it before connect. + max_pkt_size int + + // Timeout for connect + timeout time.Duration + + dialer mysql.Dialer + + // Return only types accepted by godrv + narrowTypeSet bool + // Store full information about fields in result + fullFieldInfo bool + + // Debug logging. You may change it at any time. + Debug bool +} + +// Create new MySQL handler. The first three arguments are passed to net.Bind +// for create connection. user and passwd are for authentication. Optional db +// is database name (you may not specify it and use Use() method later). +func New(proto, laddr, raddr, user, passwd string, db ...string) mysql.Conn { + my := Conn{ + proto: proto, + laddr: laddr, + raddr: raddr, + user: user, + passwd: passwd, + stmt_map: make(map[uint32]*Stmt), + max_pkt_size: 16*1024*1024 - 1, + timeout: 2 * time.Minute, + fullFieldInfo: true, + } + if len(db) == 1 { + my.dbname = db[0] + } else if len(db) > 1 { + panic("mymy.New: too many arguments") + } + return &my +} + +func (my *Conn) NarrowTypeSet(narrow bool) { + my.narrowTypeSet = narrow +} + +func (my *Conn) FullFieldInfo(full bool) { + my.fullFieldInfo = full +} + +// Creates new (not connected) connection using configuration from current +// connection. +func (my *Conn) Clone() mysql.Conn { + var c *Conn + if my.dbname == "" { + c = New(my.proto, my.laddr, my.raddr, my.user, my.passwd).(*Conn) + } else { + c = New(my.proto, my.laddr, my.raddr, my.user, my.passwd, my.dbname).(*Conn) + } + c.max_pkt_size = my.max_pkt_size + c.timeout = my.timeout + c.Debug = my.Debug + return c +} + +// If new_size > 0 sets maximum packet size. Returns old size. +func (my *Conn) SetMaxPktSize(new_size int) int { + old_size := my.max_pkt_size + if new_size > 0 { + my.max_pkt_size = new_size + } + return old_size +} + +// SetTimeout sets timeout for Connect and Reconnect +func (my *Conn) SetTimeout(timeout time.Duration) { + my.timeout = timeout +} + +// NetConn return internall net.Conn +func (my *Conn) NetConn() net.Conn { + return my.net_conn +} + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type stringAddr struct { + net, addr string +} + +func (a stringAddr) Network() string { return a.net } +func (a stringAddr) String() string { return a.addr } + +var DefaultDialer mysql.Dialer = func(proto, laddr, raddr string, + timeout time.Duration) (net.Conn, error) { + + if proto == "" { + proto = "unix" + if strings.IndexRune(raddr, ':') != -1 { + proto = "tcp" + } + } + + // Make a connection + d := &net.Dialer{Timeout: timeout} + if laddr != "" { + var err error + switch proto { + case "tcp", "tcp4", "tcp6": + d.LocalAddr, err = net.ResolveTCPAddr(proto, laddr) + case "unix": + d.LocalAddr, err = net.ResolveTCPAddr(proto, laddr) + default: + err = net.UnknownNetworkError(proto) + } + if err != nil { + return nil, err + } + } + return d.Dial(proto, raddr) +} + +func (my *Conn) SetDialer(d mysql.Dialer) { + my.dialer = d +} + +func (my *Conn) connect() (err error) { + defer catchError(&err) + + my.net_conn = nil + if my.dialer != nil { + my.net_conn, err = my.dialer(my.proto, my.laddr, my.raddr, my.timeout) + if err != nil { + my.net_conn = nil + return + } + } + if my.net_conn == nil { + my.net_conn, err = DefaultDialer(my.proto, my.laddr, my.raddr, my.timeout) + if err != nil { + my.net_conn = nil + return + } + } + my.rd = bufio.NewReader(my.net_conn) + my.wr = bufio.NewWriter(my.net_conn) + + // Initialisation + my.init() + my.auth() + res := my.getResult(nil, nil) + if res == nil { + // Try old password + my.oldPasswd() + res = my.getResult(nil, nil) + if res == nil { + return mysql.ErrAuthentication + } + } + + // Execute all registered commands + for _, cmd := range my.init_cmds { + // Send command + my.sendCmdStr(_COM_QUERY, cmd) + // Get command response + res := my.getResponse() + + // Read and discard all result rows + row := res.MakeRow() + for res != nil { + // Only read rows if they exist + if !res.StatusOnly() { + //read each row in this set + for { + err = res.getRow(row) + if err == io.EOF { + break + } else if err != nil { + return + } + } + } + + // Move to the next result + if res, err = res.nextResult(); err != nil { + return + } + } + } + + return +} + +// Establishes a connection with MySQL server version 4.1 or later. +func (my *Conn) Connect() (err error) { + if my.net_conn != nil { + return mysql.ErrAlredyConn + } + + return my.connect() +} + +// Check if connection is established +func (my *Conn) IsConnected() bool { + return my.net_conn != nil +} + +func (my *Conn) closeConn() (err error) { + defer catchError(&err) + + // Always close and invalidate connection, even if + // COM_QUIT returns an error + defer func() { + err = my.net_conn.Close() + my.net_conn = nil // Mark that we disconnect + }() + + // Close the connection + my.sendCmd(_COM_QUIT) + return +} + +// Close connection to the server +func (my *Conn) Close() (err error) { + if my.net_conn == nil { + return mysql.ErrNotConn + } + if my.unreaded_reply { + return mysql.ErrUnreadedReply + } + + return my.closeConn() +} + +// Close and reopen connection. +// Ignore unreaded rows, reprepare all prepared statements. +func (my *Conn) Reconnect() (err error) { + if my.net_conn != nil { + // Close connection, ignore all errors + my.closeConn() + } + // Reopen the connection. + if err = my.connect(); err != nil { + return + } + + // Reprepare all prepared statements + var ( + new_stmt *Stmt + new_map = make(map[uint32]*Stmt) + ) + for _, stmt := range my.stmt_map { + new_stmt, err = my.prepare(stmt.sql) + if err != nil { + return + } + // Assume that fields set in new_stmt by prepare() are indentical to + // corresponding fields in stmt. Why can they be different? + stmt.id = new_stmt.id + stmt.rebind = true + new_map[stmt.id] = stmt + } + // Replace the stmt_map + my.stmt_map = new_map + + return +} + +// Change database +func (my *Conn) Use(dbname string) (err error) { + defer catchError(&err) + + if my.net_conn == nil { + return mysql.ErrNotConn + } + if my.unreaded_reply { + return mysql.ErrUnreadedReply + } + + // Send command + my.sendCmdStr(_COM_INIT_DB, dbname) + // Get server response + my.getResult(nil, nil) + // Save new database name if no errors + my.dbname = dbname + + return +} + +func (my *Conn) getResponse() (res *Result) { + res = my.getResult(nil, nil) + if res == nil { + panic(mysql.ErrBadResult) + } + my.unreaded_reply = !res.StatusOnly() + return +} + +// Start new query. +// +// If you specify the parameters, the SQL string will be a result of +// fmt.Sprintf(sql, params...). +// You must get all result rows (if they exists) before next query. +func (my *Conn) Start(sql string, params ...interface{}) (res mysql.Result, err error) { + defer catchError(&err) + + if my.net_conn == nil { + return nil, mysql.ErrNotConn + } + if my.unreaded_reply { + return nil, mysql.ErrUnreadedReply + } + + if len(params) != 0 { + sql = fmt.Sprintf(sql, params...) + } + // Send query + my.sendCmdStr(_COM_QUERY, sql) + + // Get command response + res = my.getResponse() + return +} + +func (res *Result) getRow(row mysql.Row) (err error) { + defer catchError(&err) + + if res.my.getResult(res, row) != nil { + return io.EOF + } + return nil +} + +// Returns true if more results exixts. You don't have to call it before +// NextResult method (NextResult returns nil if there is no more results). +func (res *Result) MoreResults() bool { + return res.status&mysql.SERVER_MORE_RESULTS_EXISTS != 0 +} + +// Get the data row from server. This method reads one row of result set +// directly from network connection (without rows buffering on client side). +// Returns io.EOF if there is no more rows in current result set. +func (res *Result) ScanRow(row mysql.Row) error { + if row == nil { + return mysql.ErrRowLength + } + if res.eor_returned { + return mysql.ErrReadAfterEOR + } + if res.StatusOnly() { + // There is no fields in result (OK result) + res.eor_returned = true + return io.EOF + } + err := res.getRow(row) + if err == io.EOF { + res.eor_returned = true + if !res.MoreResults() { + res.my.unreaded_reply = false + } + } + return err +} + +// Like ScanRow but allocates memory for every row. +// Returns nil row insted of io.EOF error. +func (res *Result) GetRow() (mysql.Row, error) { + return mysql.GetRow(res) +} + +func (res *Result) nextResult() (next *Result, err error) { + defer catchError(&err) + if res.MoreResults() { + next = res.my.getResponse() + } + return +} + +// This function is used when last query was the multi result query or +// procedure call. Returns the next result or nil if no more resuts exists. +// +// Statements within the procedure may produce unknown number of result sets. +// The final result from the procedure is a status result that includes no +// result set (Result.StatusOnly() == true) . +func (res *Result) NextResult() (mysql.Result, error) { + if !res.MoreResults() { + return nil, nil + } + res, err := res.nextResult() + return res, err +} + +// Send MySQL PING to the server. +func (my *Conn) Ping() (err error) { + defer catchError(&err) + + if my.net_conn == nil { + return mysql.ErrNotConn + } + if my.unreaded_reply { + return mysql.ErrUnreadedReply + } + + // Send command + my.sendCmd(_COM_PING) + // Get server response + my.getResult(nil, nil) + + return +} + +func (my *Conn) prepare(sql string) (stmt *Stmt, err error) { + defer catchError(&err) + + // Send command + my.sendCmdStr(_COM_STMT_PREPARE, sql) + // Get server response + stmt, ok := my.getPrepareResult(nil).(*Stmt) + if !ok { + return nil, mysql.ErrBadResult + } + if len(stmt.params) > 0 { + // Get param fields + my.getPrepareResult(stmt) + } + if len(stmt.fields) > 0 { + // Get column fields + my.getPrepareResult(stmt) + } + return +} + +// Prepare server side statement. Return statement handler. +func (my *Conn) Prepare(sql string) (mysql.Stmt, error) { + if my.net_conn == nil { + return nil, mysql.ErrNotConn + } + if my.unreaded_reply { + return nil, mysql.ErrUnreadedReply + } + + stmt, err := my.prepare(sql) + if err != nil { + return nil, err + } + // Connect statement with database handler + my.stmt_map[stmt.id] = stmt + // Save SQL for reconnect + stmt.sql = sql + + return stmt, nil +} + +// Bind input data for the parameter markers in the SQL statement that was +// passed to Prepare. +// +// params may be a parameter list (slice), a struct or a pointer to the struct. +// A struct field can by value or pointer to value. A parameter (slice element) +// can be value, pointer to value or pointer to pointer to value. +// Values may be of the folowind types: intXX, uintXX, floatXX, bool, []byte, +// Blob, string, Time, Date, Time, Timestamp, Raw. +func (stmt *Stmt) Bind(params ...interface{}) { + stmt.rebind = true + + if len(params) == 1 { + // Check for struct binding + pval := reflect.ValueOf(params[0]) + kind := pval.Kind() + if kind == reflect.Ptr { + // Dereference pointer + pval = pval.Elem() + kind = pval.Kind() + } + typ := pval.Type() + if kind == reflect.Struct && + typ != timeType && + typ != dateType && + typ != timestampType && + typ != rawType { + // We have a struct to bind + if pval.NumField() != stmt.param_count { + panic(mysql.ErrBindCount) + } + if !pval.CanAddr() { + // Make an addressable structure + v := reflect.New(pval.Type()).Elem() + v.Set(pval) + pval = v + } + for ii := 0; ii < stmt.param_count; ii++ { + stmt.params[ii] = bindValue(pval.Field(ii)) + } + stmt.binded = true + return + } + } + + // There isn't struct to bind + + if len(params) != stmt.param_count { + panic(mysql.ErrBindCount) + } + for ii, par := range params { + pval := reflect.ValueOf(par) + if pval.IsValid() { + if pval.Kind() == reflect.Ptr { + // Dereference pointer - this value i addressable + pval = pval.Elem() + } else { + // Make an addressable value + v := reflect.New(pval.Type()).Elem() + v.Set(pval) + pval = v + } + } + stmt.params[ii] = bindValue(pval) + } + stmt.binded = true +} + +// Execute prepared statement. If statement requires parameters you may bind +// them first or specify directly. After this command you may use GetRow to +// retrieve data. +func (stmt *Stmt) Run(params ...interface{}) (res mysql.Result, err error) { + defer catchError(&err) + + if stmt.my.net_conn == nil { + return nil, mysql.ErrNotConn + } + if stmt.my.unreaded_reply { + return nil, mysql.ErrUnreadedReply + } + + // Bind parameters if any + if len(params) != 0 { + stmt.Bind(params...) + } else if stmt.param_count != 0 && !stmt.binded { + panic(mysql.ErrBindCount) + } + + // Send EXEC command with binded parameters + stmt.sendCmdExec() + // Get response + r := stmt.my.getResponse() + r.binary = true + res = r + return +} + +// Destroy statement on server side. Client side handler is invalid after this +// command. +func (stmt *Stmt) Delete() (err error) { + defer catchError(&err) + + if stmt.my.net_conn == nil { + return mysql.ErrNotConn + } + if stmt.my.unreaded_reply { + return mysql.ErrUnreadedReply + } + + // Allways delete statement on client side, even if + // the command return an error. + defer func() { + // Delete statement from stmt_map + delete(stmt.my.stmt_map, stmt.id) + // Invalidate handler + *stmt = Stmt{} + }() + + // Send command + stmt.my.sendCmdU32(_COM_STMT_CLOSE, stmt.id) + return +} + +// Resets a prepared statement on server: data sent to the server, unbuffered +// result sets and current errors. +func (stmt *Stmt) Reset() (err error) { + defer catchError(&err) + + if stmt.my.net_conn == nil { + return mysql.ErrNotConn + } + if stmt.my.unreaded_reply { + return mysql.ErrUnreadedReply + } + + // Next exec must send type information. We set rebind flag regardless of + // whether the command succeeds or not. + stmt.rebind = true + // Send command + stmt.my.sendCmdU32(_COM_STMT_RESET, stmt.id) + // Get result + stmt.my.getResult(nil, nil) + return +} + +// Send long data to MySQL server in chunks. +// You can call this method after Bind and before Exec. It can be called +// multiple times for one parameter to send TEXT or BLOB data in chunks. +// +// pnum - Parameter number to associate the data with. +// +// data - Data source string, []byte or io.Reader. +// +// pkt_size - It must be must be greater than 6 and less or equal to MySQL +// max_allowed_packet variable. You can obtain value of this variable +// using such query: SHOW variables WHERE Variable_name = 'max_allowed_packet' +// If data source is io.Reader then (pkt_size - 6) is size of a buffer that +// will be allocated for reading. +// +// If you have data source of type string or []byte in one piece you may +// properly set pkt_size and call this method once. If you have data in +// multiple pieces you can call this method multiple times. If data source is +// io.Reader you should properly set pkt_size. Data will be readed from +// io.Reader and send in pieces to the server until EOF. +func (stmt *Stmt) SendLongData(pnum int, data interface{}, pkt_size int) (err error) { + defer catchError(&err) + + if stmt.my.net_conn == nil { + return mysql.ErrNotConn + } + if stmt.my.unreaded_reply { + return mysql.ErrUnreadedReply + } + if pnum < 0 || pnum >= stmt.param_count { + return mysql.ErrWrongParamNum + } + if pkt_size -= 6; pkt_size < 0 { + return mysql.ErrSmallPktSize + } + + switch dd := data.(type) { + case io.Reader: + buf := make([]byte, pkt_size) + for { + nn, ee := dd.Read(buf) + if nn != 0 { + stmt.my.sendLongData(stmt.id, uint16(pnum), buf[0:nn]) + } + if ee == io.EOF { + return + } + if ee != nil { + return ee + } + } + + case []byte: + for len(dd) > pkt_size { + stmt.my.sendLongData(stmt.id, uint16(pnum), dd[0:pkt_size]) + dd = dd[pkt_size:] + } + stmt.my.sendLongData(stmt.id, uint16(pnum), dd) + return + + case string: + for len(dd) > pkt_size { + stmt.my.sendLongData( + stmt.id, + uint16(pnum), + []byte(dd[0:pkt_size]), + ) + dd = dd[pkt_size:] + } + stmt.my.sendLongData(stmt.id, uint16(pnum), []byte(dd)) + return + } + return mysql.ErrUnkDataType +} + +// Returns the thread ID of the current connection. +func (my *Conn) ThreadId() uint32 { + return my.info.thr_id +} + +// Register MySQL command/query to be executed immediately after connecting to +// the server. You may register multiple commands. They will be executed in +// the order of registration. Yhis method is mainly useful for reconnect. +func (my *Conn) Register(sql string) { + my.init_cmds = append(my.init_cmds, sql) +} + +// See mysql.Query +func (my *Conn) Query(sql string, params ...interface{}) ([]mysql.Row, mysql.Result, error) { + return mysql.Query(my, sql, params...) +} + +// See mysql.QueryFirst +func (my *Conn) QueryFirst(sql string, params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.QueryFirst(my, sql, params...) +} + +// See mysql.QueryLast +func (my *Conn) QueryLast(sql string, params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.QueryLast(my, sql, params...) +} + +// See mysql.Exec +func (stmt *Stmt) Exec(params ...interface{}) ([]mysql.Row, mysql.Result, error) { + return mysql.Exec(stmt, params...) +} + +// See mysql.ExecFirst +func (stmt *Stmt) ExecFirst(params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.ExecFirst(stmt, params...) +} + +// See mysql.ExecLast +func (stmt *Stmt) ExecLast(params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.ExecLast(stmt, params...) +} + +// See mysql.End +func (res *Result) End() error { + return mysql.End(res) +} + +// See mysql.GetFirstRow +func (res *Result) GetFirstRow() (mysql.Row, error) { + return mysql.GetFirstRow(res) +} + +// See mysql.GetLastRow +func (res *Result) GetLastRow() (mysql.Row, error) { + return mysql.GetLastRow(res) +} + +// See mysql.GetRows +func (res *Result) GetRows() ([]mysql.Row, error) { + return mysql.GetRows(res) +} + +// Escapes special characters in the txt, so it is safe to place returned string +// to Query method. +func (my *Conn) Escape(txt string) string { + return mysql.Escape(my, txt) +} + +func (my *Conn) Status() mysql.ConnStatus { + return my.status +} + +type Transaction struct { + *Conn +} + +// Starts a new transaction +func (my *Conn) Begin() (mysql.Transaction, error) { + _, err := my.Start("START TRANSACTION") + return &Transaction{my}, err +} + +// Commit a transaction +func (tr Transaction) Commit() error { + _, err := tr.Start("COMMIT") + tr.Conn = nil // Invalidate this transaction + return err +} + +// Rollback a transaction +func (tr Transaction) Rollback() error { + _, err := tr.Start("ROLLBACK") + tr.Conn = nil // Invalidate this transaction + return err +} + +func (tr Transaction) IsValid() bool { + return tr.Conn != nil +} + +// Binds statement to the context of transaction. For native engine this is +// identity function. +func (tr Transaction) Do(st mysql.Stmt) mysql.Stmt { + if s, ok := st.(*Stmt); !ok || s.my != tr.Conn { + panic("Transaction and statement doesn't belong to the same connection") + } + return st +} + +func init() { + mysql.New = New +} diff --git a/vendor/github.com/ziutek/mymysql/native/native_test.go b/vendor/github.com/ziutek/mymysql/native/native_test.go new file mode 100644 index 000000000..fb6597108 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/native_test.go @@ -0,0 +1,1218 @@ +package native + +import ( + "bytes" + "fmt" + "github.com/ziutek/mymysql/mysql" + "io/ioutil" + "os" + "reflect" + "testing" + "time" +) + +var ( + my mysql.Conn + user = "testuser" + passwd = "TestPasswd9" + dbname = "test" + //conn = []string{"unix", "", "/var/run/mysqld/mysqld.sock"} + conn = []string{"", "", "127.0.0.1:3306"} + debug = false +) + +type RowsResErr struct { + rows []mysql.Row + res mysql.Result + err error +} + +func query(sql string, params ...interface{}) *RowsResErr { + rows, res, err := my.Query(sql, params...) + return &RowsResErr{rows, res, err} +} + +func exec(stmt *Stmt, params ...interface{}) *RowsResErr { + rows, res, err := stmt.Exec(params...) + return &RowsResErr{rows, res, err} +} + +func checkErr(t *testing.T, err error, exp_err error) { + if err != exp_err { + if exp_err == nil { + t.Fatalf("Error: %v", err) + } else { + t.Fatalf("Error: %v\nExpected error: %v", err, exp_err) + } + } +} + +func checkWarnCount(t *testing.T, res_cnt, exp_cnt int) { + if res_cnt != exp_cnt { + t.Errorf("Warning count: res=%d exp=%d", res_cnt, exp_cnt) + rows, res, err := my.Query("show warnings") + if err != nil { + t.Fatal("Can't get warrnings from MySQL", err) + } + for _, row := range rows { + t.Errorf("%s: \"%s\"", row.Str(res.Map("Level")), + row.Str(res.Map("Message"))) + } + t.FailNow() + } +} + +func checkErrWarn(t *testing.T, res, exp *RowsResErr) { + checkErr(t, res.err, exp.err) + checkWarnCount(t, res.res.WarnCount(), exp.res.WarnCount()) +} + +func types(row mysql.Row) (tt []reflect.Type) { + tt = make([]reflect.Type, len(row)) + for ii, val := range row { + tt[ii] = reflect.TypeOf(val) + } + return +} + +func checkErrWarnRows(t *testing.T, res, exp *RowsResErr) { + checkErrWarn(t, res, exp) + if !reflect.DeepEqual(res.rows, exp.rows) { + rlen := len(res.rows) + elen := len(exp.rows) + t.Error("Rows are different!") + t.Errorf("len/cap: res=%d/%d exp=%d/%d", + rlen, cap(res.rows), elen, cap(exp.rows)) + max := rlen + if elen > max { + max = elen + } + for ii := 0; ii < max; ii++ { + if ii < len(res.rows) { + t.Errorf("%d: res type: %s", ii, types(res.rows[ii])) + } else { + t.Errorf("%d: res: ------", ii) + } + if ii < len(exp.rows) { + t.Errorf("%d: exp type: %s", ii, types(exp.rows[ii])) + } else { + t.Errorf("%d: exp: ------", ii) + } + if ii < len(res.rows) { + t.Error(" res: ", res.rows[ii]) + } + if ii < len(exp.rows) { + t.Error(" exp: ", exp.rows[ii]) + } + if ii < len(res.rows) { + t.Errorf(" res: %#v", res.rows[ii][2]) + } + if ii < len(exp.rows) { + t.Errorf(" exp: %#v", exp.rows[ii][2]) + } + } + t.FailNow() + } +} + +func checkResult(t *testing.T, res, exp *RowsResErr) { + checkErrWarnRows(t, res, exp) + r, e := res.res.(*Result), exp.res.(*Result) + if r.my != e.my || r.binary != e.binary || r.status_only != e.status_only || + r.status&0xdf != e.status || !bytes.Equal(r.message, e.message) || + r.affected_rows != e.affected_rows || + r.eor_returned != e.eor_returned || + !reflect.DeepEqual(res.rows, exp.rows) || res.err != exp.err { + t.Fatalf("Bad result:\nres=%+v\nexp=%+v", res.res, exp.res) + } +} + +func cmdOK(affected uint64, binary, eor bool) *RowsResErr { + return &RowsResErr{ + res: &Result{ + my: my.(*Conn), + binary: binary, + status_only: true, + status: 0x2, + message: []byte{}, + affected_rows: affected, + eor_returned: eor, + }, + } +} + +func selectOK(rows []mysql.Row, binary bool) (exp *RowsResErr) { + exp = cmdOK(0, binary, true) + exp.rows = rows + return +} + +func myConnect(t *testing.T, with_dbname bool, max_pkt_size int) { + if with_dbname { + my = New(conn[0], conn[1], conn[2], user, passwd, dbname) + } else { + my = New(conn[0], conn[1], conn[2], user, passwd) + } + + if max_pkt_size != 0 { + my.SetMaxPktSize(max_pkt_size) + } + my.(*Conn).Debug = debug + + checkErr(t, my.Connect(), nil) + checkResult(t, query("set names utf8"), cmdOK(0, false, true)) +} + +func myClose(t *testing.T) { + checkErr(t, my.Close(), nil) +} + +// Text queries tests + +func TestUse(t *testing.T) { + myConnect(t, false, 0) + checkErr(t, my.Use(dbname), nil) + myClose(t) +} + +func TestPing(t *testing.T) { + myConnect(t, false, 0) + checkErr(t, my.Ping(), nil) + myClose(t) +} + +func TestQuery(t *testing.T) { + myConnect(t, true, 0) + query("drop table t") // Drop test table if exists + checkResult(t, query("create table t (s varchar(40))"), + cmdOK(0, false, true)) + + exp := &RowsResErr{ + res: &Result{ + my: my.(*Conn), + field_count: 1, + fields: []*mysql.Field{ + &mysql.Field{ + Catalog: "def", + Db: "test", + Table: "Test", + OrgTable: "T", + Name: "Str", + OrgName: "s", + DispLen: 3 * 40, //varchar(40) + Flags: 0, + Type: MYSQL_TYPE_VAR_STRING, + Scale: 0, + }, + }, + status: mysql.SERVER_STATUS_AUTOCOMMIT, + eor_returned: true, + }, + } + + for ii := 0; ii > 10000; ii += 3 { + var val interface{} + if ii%10 == 0 { + checkResult(t, query("insert t values (null)"), + cmdOK(1, false, true)) + val = nil + } else { + txt := []byte(fmt.Sprintf("%d %d %d %d %d", ii, ii, ii, ii, ii)) + checkResult(t, + query("insert t values ('%s')", txt), cmdOK(1, false, true)) + val = txt + } + exp.rows = append(exp.rows, mysql.Row{val}) + } + + checkResult(t, query("select s as Str from t as Test"), exp) + checkResult(t, query("drop table t"), cmdOK(0, false, true)) + myClose(t) +} + +// Prepared statements tests + +type StmtErr struct { + stmt *Stmt + err error +} + +func prepare(sql string) *StmtErr { + stmt, err := my.Prepare(sql) + return &StmtErr{stmt.(*Stmt), err} +} + +func checkStmt(t *testing.T, res, exp *StmtErr) { + ok := res.err == exp.err && + // Skipping id + reflect.DeepEqual(res.stmt.fields, exp.stmt.fields) && + res.stmt.field_count == exp.stmt.field_count && + res.stmt.param_count == exp.stmt.param_count && + res.stmt.warning_count == exp.stmt.warning_count && + res.stmt.status == exp.stmt.status + + if !ok { + if exp.err == nil { + checkErr(t, res.err, nil) + checkWarnCount(t, res.stmt.warning_count, exp.stmt.warning_count) + for _, v := range res.stmt.fields { + fmt.Printf("%+v\n", v) + } + t.Fatalf("Bad result statement: res=%v exp=%v", res.stmt, exp.stmt) + } + } +} + +func TestPrepared(t *testing.T) { + myConnect(t, true, 0) + query("drop table p") // Drop test table if exists + checkResult(t, + query( + "create table p ("+ + " ii int not null, ss varchar(20), dd datetime"+ + ") default charset=utf8", + ), + cmdOK(0, false, true), + ) + + exp := Stmt{ + fields: []*mysql.Field{ + &mysql.Field{ + Catalog: "def", Db: "test", Table: "p", OrgTable: "p", + Name: "i", + OrgName: "ii", + DispLen: 11, + Flags: _FLAG_NO_DEFAULT_VALUE | _FLAG_NOT_NULL, + Type: MYSQL_TYPE_LONG, + Scale: 0, + }, + &mysql.Field{ + Catalog: "def", Db: "test", Table: "p", OrgTable: "p", + Name: "s", + OrgName: "ss", + DispLen: 3 * 20, // varchar(20) + Flags: 0, + Type: MYSQL_TYPE_VAR_STRING, + Scale: 0, + }, + &mysql.Field{ + Catalog: "def", Db: "test", Table: "p", OrgTable: "p", + Name: "d", + OrgName: "dd", + DispLen: 19, + Flags: _FLAG_BINARY, + Type: MYSQL_TYPE_DATETIME, + Scale: 0, + }, + }, + field_count: 3, + param_count: 2, + warning_count: 0, + status: 0x2, + } + + sel := prepare("select ii i, ss s, dd d from p where ii = ? and ss = ?") + checkStmt(t, sel, &StmtErr{&exp, nil}) + + all := prepare("select * from p") + checkErr(t, all.err, nil) + + ins := prepare("insert p values (?, ?, ?)") + checkErr(t, ins.err, nil) + + parsed, err := mysql.ParseTime("2012-01-17 01:10:10", time.Local) + checkErr(t, err, nil) + parsedZero, err := mysql.ParseTime("0000-00-00 00:00:00", time.Local) + checkErr(t, err, nil) + if !parsedZero.IsZero() { + t.Fatalf("time '%s' isn't zero", parsedZero) + } + exp_rows := []mysql.Row{ + mysql.Row{ + 2, "Taki tekst", time.Unix(123456789, 0), + }, + mysql.Row{ + 5, "PÄ…k róży", parsed, + }, + mysql.Row{ + -3, "基础体温", parsed, + }, + mysql.Row{ + 11, "Zero UTC datetime", time.Unix(0, 0), + }, + mysql.Row{ + 17, mysql.Blob([]byte("Zero datetime")), parsedZero, + }, + mysql.Row{ + 23, []byte("NULL datetime"), (*time.Time)(nil), + }, + mysql.Row{ + 23, "NULL", nil, + }, + } + + for _, row := range exp_rows { + checkErrWarn(t, + exec(ins.stmt, row[0], row[1], row[2]), + cmdOK(1, true, true), + ) + } + + // Convert values to expected result types + for _, row := range exp_rows { + for ii, col := range row { + val := reflect.ValueOf(col) + // Dereference pointers + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + switch val.Kind() { + case reflect.Invalid: + row[ii] = nil + + case reflect.String: + row[ii] = []byte(val.String()) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + row[ii] = int32(val.Int()) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + row[ii] = int32(val.Uint()) + + case reflect.Slice: + if val.Type().Elem().Kind() == reflect.Uint8 { + bytes := make([]byte, val.Len()) + for ii := range bytes { + bytes[ii] = val.Index(ii).Interface().(uint8) + } + row[ii] = bytes + } + } + } + } + + checkErrWarn(t, exec(sel.stmt, 2, "Taki tekst"), selectOK(exp_rows, true)) + checkErrWarnRows(t, exec(all.stmt), selectOK(exp_rows, true)) + + checkResult(t, query("drop table p"), cmdOK(0, false, true)) + + checkErr(t, sel.stmt.Delete(), nil) + checkErr(t, all.stmt.Delete(), nil) + checkErr(t, ins.stmt.Delete(), nil) + + myClose(t) +} + +// Bind testing + +func TestVarBinding(t *testing.T) { + myConnect(t, true, 0) + query("drop table t") // Drop test table if exists + checkResult(t, + query("create table t (id int primary key, str varchar(20))"), + cmdOK(0, false, true), + ) + + ins, err := my.Prepare("insert t values (?, ?)") + checkErr(t, err, nil) + + var ( + rre RowsResErr + id *int + str *string + ii int + ss string + ) + ins.Bind(&id, &str) + + i1 := 1 + s1 := "Ala" + id = &i1 + str = &s1 + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + i2 := 2 + s2 := "Ma kota!" + id = &i2 + str = &s2 + + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + ins.Bind(&ii, &ss) + ii = 3 + ss = "A kot ma Ale!" + + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + sel, err := my.Prepare("select str from t where id = ?") + checkErr(t, err, nil) + + rows, _, err := sel.Exec(1) + checkErr(t, err, nil) + if len(rows) != 1 || bytes.Compare([]byte(s1), rows[0].Bin(0)) != 0 { + t.Fatal("First string don't match") + } + + rows, _, err = sel.Exec(2) + checkErr(t, err, nil) + if len(rows) != 1 || bytes.Compare([]byte(s2), rows[0].Bin(0)) != 0 { + t.Fatal("Second string don't match") + } + + rows, _, err = sel.Exec(3) + checkErr(t, err, nil) + if len(rows) != 1 || bytes.Compare([]byte(ss), rows[0].Bin(0)) != 0 { + t.Fatal("Thrid string don't match") + } + + checkResult(t, query("drop table t"), cmdOK(0, false, true)) + myClose(t) +} + +func TestBindStruct(t *testing.T) { + myConnect(t, true, 0) + query("drop table t") // Drop test table if exists + checkResult(t, + query("create table t (id int primary key, txt varchar(20), b bool)"), + cmdOK(0, false, true), + ) + + ins, err := my.Prepare("insert t values (?, ?, ?)") + checkErr(t, err, nil) + sel, err := my.Prepare("select txt, b from t where id = ?") + checkErr(t, err, nil) + + var ( + s struct { + Id int + Txt string + B bool + } + rre RowsResErr + ) + + ins.Bind(&s) + + s.Id = 2 + s.Txt = "Ala ma kota." + s.B = true + + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + rows, _, err := sel.Exec(s.Id) + checkErr(t, err, nil) + if len(rows) != 1 || rows[0].Str(0) != s.Txt || rows[0].Bool(1) != s.B { + t.Fatal("selected data don't match inserted data") + } + + checkResult(t, query("drop table t"), cmdOK(0, false, true)) + myClose(t) +} + +func TestDate(t *testing.T) { + myConnect(t, true, 0) + query("drop table d") // Drop test table if exists + checkResult(t, + query("create table d (id int, dd date, dt datetime, tt time)"), + cmdOK(0, false, true), + ) + + test := []struct { + dd, dt string + tt time.Duration + }{ + { + "2011-11-13", + "2010-12-12 11:24:00", + -time.Duration((128*3600 + 3*60 + 2) * 1e9), + }, { + "0000-00-00", + "0000-00-00 00:00:00", + time.Duration(0), + }, + } + + ins, err := my.Prepare("insert d values (?, ?, ?, ?)") + checkErr(t, err, nil) + + sel, err := my.Prepare("select id, tt from d where dd = ? && dt = ?") + checkErr(t, err, nil) + + for i, r := range test { + _, err = ins.Run(i, r.dd, r.dt, r.tt) + checkErr(t, err, nil) + + sdt, err := mysql.ParseTime(r.dt, time.Local) + checkErr(t, err, nil) + sdd, err := mysql.ParseDate(r.dd) + checkErr(t, err, nil) + + rows, _, err := sel.Exec(sdd, sdt) + checkErr(t, err, nil) + if rows == nil { + t.Fatal("nil result") + } + if rows[0].Int(0) != i { + t.Fatal("Bad id", rows[0].Int(1)) + } + if rows[0][1].(time.Duration) != r.tt { + t.Fatal("Bad tt", rows[0].Duration(1)) + } + } + + checkResult(t, query("drop table d"), cmdOK(0, false, true)) + myClose(t) +} + +func TestDateTimeZone(t *testing.T) { + myConnect(t, true, 0) + query("drop table d") // Drop test table if exists + checkResult(t, + query("create table d (dt datetime)"), + cmdOK(0, false, true), + ) + + ins, err := my.Prepare("insert d values (?)") + checkErr(t, err, nil) + + sel, err := my.Prepare("select dt from d") + checkErr(t, err, nil) + + tstr := "2013-05-10 15:26:00.000000000" + + _, err = ins.Run(tstr) + checkErr(t, err, nil) + + tt := make([]time.Time, 4) + + row, _, err := sel.ExecFirst() + checkErr(t, err, nil) + tt[0] = row.Time(0, time.UTC) + tt[1] = row.Time(0, time.Local) + row, _, err = my.QueryFirst("select dt from d") + checkErr(t, err, nil) + tt[2] = row.Time(0, time.UTC) + tt[3] = row.Time(0, time.Local) + for _, v := range tt { + if v.Format(mysql.TimeFormat) != tstr { + t.Fatal("Timezone problem:", tstr, "!=", v) + } + } + + checkResult(t, query("drop table d"), cmdOK(0, false, true)) + myClose(t) +} + +// Big blob +func TestBigBlob(t *testing.T) { + myConnect(t, true, 34*1024*1024) + query("drop table p") // Drop test table if exists + checkResult(t, + query("create table p (id int primary key, bb longblob)"), + cmdOK(0, false, true), + ) + + ins, err := my.Prepare("insert p values (?, ?)") + checkErr(t, err, nil) + + sel, err := my.Prepare("select bb from p where id = ?") + checkErr(t, err, nil) + + big_blob := make(mysql.Blob, 33*1024*1024) + for ii := range big_blob { + big_blob[ii] = byte(ii) + } + + var ( + rre RowsResErr + bb mysql.Blob + id int + ) + data := struct { + Id int + Bb mysql.Blob + }{} + + // Individual parameters binding + ins.Bind(&id, &bb) + id = 1 + bb = big_blob + + // Insert full blob. Three packets are sended. First two has maximum length + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + // Struct binding + ins.Bind(&data) + data.Id = 2 + data.Bb = big_blob[0 : 32*1024*1024-31] + + // Insert part of blob - Two packets are sended. All has maximum length. + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + sel.Bind(&id) + + // Check first insert. + tmr := "Too many rows" + + id = 1 + res, err := sel.Run() + checkErr(t, err, nil) + + row, err := res.GetRow() + checkErr(t, err, nil) + end, err := res.GetRow() + checkErr(t, err, nil) + if end != nil { + t.Fatal(tmr) + } + + if bytes.Compare(row[0].([]byte), big_blob) != 0 { + t.Fatal("Full blob data don't match") + } + + // Check second insert. + id = 2 + res, err = sel.Run() + checkErr(t, err, nil) + + row, err = res.GetRow() + checkErr(t, err, nil) + end, err = res.GetRow() + checkErr(t, err, nil) + if end != nil { + t.Fatal(tmr) + } + + if bytes.Compare(row.Bin(res.Map("bb")), data.Bb) != 0 { + t.Fatal("Partial blob data don't match") + } + + checkResult(t, query("drop table p"), cmdOK(0, false, true)) + myClose(t) +} + +// Test for empty result +func TestEmpty(t *testing.T) { + checkNil := func(r mysql.Row) { + if r != nil { + t.Error("Not empty result") + } + } + myConnect(t, true, 0) + query("drop table e") // Drop test table if exists + // Create table + checkResult(t, + query("create table e (id int)"), + cmdOK(0, false, true), + ) + // Text query + res, err := my.Start("select * from e") + checkErr(t, err, nil) + row, err := res.GetRow() + checkErr(t, err, nil) + checkNil(row) + row, err = res.GetRow() + checkErr(t, err, mysql.ErrReadAfterEOR) + checkNil(row) + // Prepared statement + sel, err := my.Prepare("select * from e") + checkErr(t, err, nil) + res, err = sel.Run() + checkErr(t, err, nil) + row, err = res.GetRow() + checkErr(t, err, nil) + checkNil(row) + row, err = res.GetRow() + checkErr(t, err, mysql.ErrReadAfterEOR) + checkNil(row) + // Drop test table + checkResult(t, query("drop table e"), cmdOK(0, false, true)) +} + +// Reconnect test +func TestReconnect(t *testing.T) { + myConnect(t, true, 0) + query("drop table r") // Drop test table if exists + checkResult(t, + query("create table r (id int primary key, str varchar(20))"), + cmdOK(0, false, true), + ) + + ins, err := my.Prepare("insert r values (?, ?)") + checkErr(t, err, nil) + sel, err := my.Prepare("select str from r where id = ?") + checkErr(t, err, nil) + + params := struct { + Id int + Str string + }{} + var sel_id int + + ins.Bind(¶ms) + sel.Bind(&sel_id) + + checkErr(t, my.Reconnect(), nil) + + params.Id = 1 + params.Str = "Bla bla bla" + _, err = ins.Run() + checkErr(t, err, nil) + + checkErr(t, my.Reconnect(), nil) + + sel_id = 1 + res, err := sel.Run() + checkErr(t, err, nil) + + row, err := res.GetRow() + checkErr(t, err, nil) + + checkErr(t, res.End(), nil) + + if row == nil || row[0] == nil || + params.Str != row.Str(0) { + t.Fatal("Bad result") + } + + checkErr(t, my.Reconnect(), nil) + + checkResult(t, query("drop table r"), cmdOK(0, false, true)) + myClose(t) +} + +// StmtSendLongData test + +func TestSendLongData(t *testing.T) { + myConnect(t, true, 64*1024*1024) + query("drop table l") // Drop test table if exists + checkResult(t, + query("create table l (id int primary key, bb longblob)"), + cmdOK(0, false, true), + ) + ins, err := my.Prepare("insert l values (?, ?)") + checkErr(t, err, nil) + + sel, err := my.Prepare("select bb from l where id = ?") + checkErr(t, err, nil) + + var ( + rre RowsResErr + id int64 + ) + + ins.Bind(&id, []byte(nil)) + sel.Bind(&id) + + // Prepare data + data := make([]byte, 4*1024*1024) + for ii := range data { + data[ii] = byte(ii) + } + // Send long data twice + checkErr(t, ins.SendLongData(1, data, 256*1024), nil) + checkErr(t, ins.SendLongData(1, data, 512*1024), nil) + + id = 1 + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + res, err := sel.Run() + checkErr(t, err, nil) + + row, err := res.GetRow() + checkErr(t, err, nil) + + checkErr(t, res.End(), nil) + + if row == nil || row[0] == nil || + bytes.Compare(append(data, data...), row.Bin(0)) != 0 { + t.Fatal("Bad result") + } + + file, err := ioutil.TempFile("", "mymysql_test-") + checkErr(t, err, nil) + filename := file.Name() + defer os.Remove(filename) + + buf := make([]byte, 1024) + for i := 0; i < 2048; i++ { + _, err := file.Write(buf) + checkErr(t, err, nil) + } + checkErr(t, file.Close(), nil) + + // Send long data from io.Reader twice + file, err = os.Open(filename) + checkErr(t, err, nil) + checkErr(t, ins.SendLongData(1, file, 128*1024), nil) + checkErr(t, file.Close(), nil) + file, err = os.Open(filename) + checkErr(t, err, nil) + checkErr(t, ins.SendLongData(1, file, 1024*1024), nil) + checkErr(t, file.Close(), nil) + + id = 2 + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + res, err = sel.Run() + checkErr(t, err, nil) + + row, err = res.GetRow() + checkErr(t, err, nil) + + checkErr(t, res.End(), nil) + + // Read file for check result + data, err = ioutil.ReadFile(filename) + checkErr(t, err, nil) + + if row == nil || row[0] == nil || + bytes.Compare(append(data, data...), row.Bin(0)) != 0 { + t.Fatal("Bad result") + } + + checkResult(t, query("drop table l"), cmdOK(0, false, true)) + myClose(t) +} + +func TestNull(t *testing.T) { + myConnect(t, true, 0) + query("drop table if exists n") + checkResult(t, + query("create table n (i int not null, n int)"), + cmdOK(0, false, true), + ) + ins, err := my.Prepare("insert n values (?, ?)") + checkErr(t, err, nil) + + var ( + p struct{ I, N *int } + rre RowsResErr + ) + ins.Bind(&p) + + p.I = new(int) + p.N = new(int) + + *p.I = 0 + *p.N = 1 + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + *p.I = 1 + p.N = nil + rre.res, rre.err = ins.Run() + checkResult(t, &rre, cmdOK(1, true, false)) + + checkResult(t, query("insert n values (2, 1)"), cmdOK(1, false, true)) + checkResult(t, query("insert n values (3, NULL)"), cmdOK(1, false, true)) + + rows, res, err := my.Query("select * from n") + checkErr(t, err, nil) + if len(rows) != 4 { + t.Fatal("str: len(rows) != 4") + } + i := res.Map("i") + n := res.Map("n") + for k, row := range rows { + switch { + case row[i] == nil || row.Int(i) != k: + case k%2 == 1 && row[n] != nil: + case k%2 == 0 && (row[n] == nil || row.Int(n) != 1): + default: + continue + } + t.Fatalf("str row: %d = (%s, %s)", k, row[i], row[n]) + } + + sel, err := my.Prepare("select * from n") + checkErr(t, err, nil) + rows, res, err = sel.Exec() + checkErr(t, err, nil) + if len(rows) != 4 { + t.Fatal("bin: len(rows) != 4") + } + i = res.Map("i") + n = res.Map("n") + for k, row := range rows { + switch { + case row[i] == nil || row.Int(i) != k: + case k%2 == 1 && row[n] != nil: + case k%2 == 0 && (row[n] == nil || row.Int(n) != 1): + default: + continue + } + t.Fatalf("bin row: %d = (%v, %v)", k, row[i], row[n]) + } + + checkResult(t, query("drop table n"), cmdOK(0, false, true)) +} + +func TestMultipleResults(t *testing.T) { + myConnect(t, true, 0) + query("drop table m") // Drop test table if exists + checkResult(t, + query("create table m (id int primary key, str varchar(20))"), + cmdOK(0, false, true), + ) + + str := []string{"zero", "jeden", "dwa"} + + checkResult(t, query("insert m values (0, '%s')", str[0]), + cmdOK(1, false, true)) + checkResult(t, query("insert m values (1, '%s')", str[1]), + cmdOK(1, false, true)) + checkResult(t, query("insert m values (2, '%s')", str[2]), + cmdOK(1, false, true)) + + res, err := my.Start("select id from m; select str from m") + checkErr(t, err, nil) + + for ii := 0; ; ii++ { + row, err := res.GetRow() + checkErr(t, err, nil) + if row == nil { + break + } + if row.Int(0) != ii { + t.Fatal("Bad result") + } + } + res, err = res.NextResult() + checkErr(t, err, nil) + for ii := 0; ; ii++ { + row, err := res.GetRow() + checkErr(t, err, nil) + if row == nil { + break + } + if row.Str(0) != str[ii] { + t.Fatal("Bad result") + } + } + + checkResult(t, query("drop table m"), cmdOK(0, false, true)) + myClose(t) +} + +func TestDecimal(t *testing.T) { + myConnect(t, true, 0) + + query("drop table if exists d") + checkResult(t, + query("create table d (d decimal(4,2))"), + cmdOK(0, false, true), + ) + + checkResult(t, query("insert d values (10.01)"), cmdOK(1, false, true)) + sql := "select * from d" + sel, err := my.Prepare(sql) + checkErr(t, err, nil) + rows, res, err := sel.Exec() + checkErr(t, err, nil) + if len(rows) != 1 || rows[0][res.Map("d")].(float64) != 10.01 { + t.Fatal(sql) + } + + checkResult(t, query("drop table d"), cmdOK(0, false, true)) + myClose(t) +} + +func TestMediumInt(t *testing.T) { + myConnect(t, true, 0) + query("DROP TABLE mi") + checkResult(t, + query( + `CREATE TABLE mi ( + id INT PRIMARY KEY AUTO_INCREMENT, + m MEDIUMINT + )`, + ), + cmdOK(0, false, true), + ) + + const n = 9 + + for i := 0; i < n; i++ { + res, err := my.Start("INSERT mi VALUES (0, %d)", i) + checkErr(t, err, nil) + if res.InsertId() != uint64(i+1) { + t.Fatalf("Wrong insert id: %d, expected: %d", res.InsertId(), i+1) + } + } + + sel, err := my.Prepare("SELECT * FROM mi") + checkErr(t, err, nil) + + res, err := sel.Run() + checkErr(t, err, nil) + + i := 0 + for { + row, err := res.GetRow() + checkErr(t, err, nil) + if row == nil { + break + } + id, m := row.Int(0), row.Int(1) + if id != i+1 || m != i { + t.Fatalf("i=%d id=%d m=%d", i, id, m) + } + i++ + } + if i != n { + t.Fatalf("%d rows read, %d expected", i, n) + } + checkResult(t, query("drop table mi"), cmdOK(0, false, true)) +} + +func TestStoredProcedures(t *testing.T) { + myConnect(t, true, 0) + query("DROP PROCEDURE pr") + query("DROP TABLE p") + checkResult(t, + query( + `CREATE TABLE p ( + id INT PRIMARY KEY AUTO_INCREMENT, + txt VARCHAR(8) + )`, + ), + cmdOK(0, false, true), + ) + _, err := my.Start( + `CREATE PROCEDURE pr (IN i INT) + BEGIN + INSERT p VALUES (0, "aaa"); + SELECT * FROM p; + SELECT i * id FROM p; + END`, + ) + checkErr(t, err, nil) + + res, err := my.Start("CALL pr(3)") + checkErr(t, err, nil) + + rows, err := res.GetRows() + checkErr(t, err, nil) + if len(rows) != 1 || len(rows[0]) != 2 || rows[0].Int(0) != 1 || rows[0].Str(1) != "aaa" { + t.Fatalf("Bad result set: %+v", rows) + } + + res, err = res.NextResult() + checkErr(t, err, nil) + + rows, err = res.GetRows() + checkErr(t, err, nil) + if len(rows) != 1 || len(rows[0]) != 1 || rows[0].Int(0) != 3 { + t.Fatalf("Bad result set: %+v", rows) + } + + res, err = res.NextResult() + checkErr(t, err, nil) + if !res.StatusOnly() { + t.Fatalf("Result includes resultset at end of procedure: %+v", res) + } + + _, err = my.Start("DROP PROCEDURE pr") + checkErr(t, err, nil) + + checkResult(t, query("DROP TABLE p"), cmdOK(0, false, true)) +} + +// Benchamrks + +func check(err error) { + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func BenchmarkInsertSelect(b *testing.B) { + b.StopTimer() + + my := New(conn[0], conn[1], conn[2], user, passwd, dbname) + check(my.Connect()) + + my.Start("drop table b") // Drop test table if exists + + _, err := my.Start("create table b (s varchar(40), i int)") + check(err) + + for ii := 0; ii < 10000; ii++ { + _, err := my.Start("insert b values ('%d-%d-%d', %d)", ii, ii, ii, ii) + check(err) + } + + b.StartTimer() + + for ii := 0; ii < b.N; ii++ { + res, err := my.Start("select * from b") + check(err) + for { + row, err := res.GetRow() + check(err) + if row == nil { + break + } + } + } + + b.StopTimer() + + _, err = my.Start("drop table b") + check(err) + check(my.Close()) +} + +func BenchmarkPreparedInsertSelect(b *testing.B) { + b.StopTimer() + + my := New(conn[0], conn[1], conn[2], user, passwd, dbname) + check(my.Connect()) + + my.Start("drop table b") // Drop test table if exists + + _, err := my.Start("create table b (s varchar(40), i int)") + check(err) + + ins, err := my.Prepare("insert b values (?, ?)") + check(err) + + sel, err := my.Prepare("select * from b") + check(err) + + for ii := 0; ii < 10000; ii++ { + _, err := ins.Run(fmt.Sprintf("%d-%d-%d", ii, ii, ii), ii) + check(err) + } + + b.StartTimer() + + for ii := 0; ii < b.N; ii++ { + res, err := sel.Run() + check(err) + for { + row, err := res.GetRow() + check(err) + if row == nil { + break + } + } + } + + b.StopTimer() + + _, err = my.Start("drop table b") + check(err) + check(my.Close()) +} diff --git a/vendor/github.com/ziutek/mymysql/native/packet.go b/vendor/github.com/ziutek/mymysql/native/packet.go new file mode 100644 index 000000000..a99700cf5 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/packet.go @@ -0,0 +1,250 @@ +package native + +import ( + "bufio" + "github.com/ziutek/mymysql/mysql" + "io" + "io/ioutil" +) + +type pktReader struct { + rd *bufio.Reader + seq *byte + remain int + last bool + buf [12]byte + ibuf [3]byte +} + +func (my *Conn) newPktReader() *pktReader { + return &pktReader{rd: my.rd, seq: &my.seq} +} + +func (pr *pktReader) readHeader() { + // Read next packet header + buf := pr.ibuf[:] + for { + n, err := pr.rd.Read(buf) + if err != nil { + panic(err) + } + buf = buf[n:] + if len(buf) == 0 { + break + } + } + pr.remain = int(DecodeU24(pr.ibuf[:])) + seq, err := pr.rd.ReadByte() + if err != nil { + panic(err) + } + // Chceck sequence number + if *pr.seq != seq { + panic(mysql.ErrSeq) + } + *pr.seq++ + // Last packet? + pr.last = (pr.remain != 0xffffff) +} + +func (pr *pktReader) readFull(buf []byte) { + for len(buf) > 0 { + if pr.remain == 0 { + if pr.last { + // No more packets + panic(io.EOF) + } + pr.readHeader() + } + n := len(buf) + if n > pr.remain { + n = pr.remain + } + n, err := pr.rd.Read(buf[:n]) + pr.remain -= n + if err != nil { + panic(err) + } + buf = buf[n:] + } + return +} + +func (pr *pktReader) readByte() byte { + if pr.remain == 0 { + if pr.last { + // No more packets + panic(io.EOF) + } + pr.readHeader() + } + b, err := pr.rd.ReadByte() + if err != nil { + panic(err) + } + pr.remain-- + return b +} + +func (pr *pktReader) readAll() (buf []byte) { + m := 0 + for { + if pr.remain == 0 { + if pr.last { + break + } + pr.readHeader() + } + new_buf := make([]byte, m+pr.remain) + copy(new_buf, buf) + buf = new_buf + n, err := pr.rd.Read(buf[m:]) + pr.remain -= n + m += n + if err != nil { + panic(err) + } + } + return +} + +func (pr *pktReader) skipAll() { + for { + if pr.remain == 0 { + if pr.last { + break + } + pr.readHeader() + } + n, err := io.CopyN(ioutil.Discard, pr.rd, int64(pr.remain)) + pr.remain -= int(n) + if err != nil { + panic(err) + } + } + return +} + +func (pr *pktReader) skipN(n int) { + for n > 0 { + if pr.remain == 0 { + if pr.last { + panic(io.EOF) + } + pr.readHeader() + } + m := int64(n) + if n > pr.remain { + m = int64(pr.remain) + } + m, err := io.CopyN(ioutil.Discard, pr.rd, m) + pr.remain -= int(m) + n -= int(m) + if err != nil { + panic(err) + } + } + return +} + +func (pr *pktReader) unreadByte() { + if err := pr.rd.UnreadByte(); err != nil { + panic(err) + } + pr.remain++ +} + +func (pr *pktReader) eof() bool { + return pr.remain == 0 && pr.last +} + +func (pr *pktReader) checkEof() { + if !pr.eof() { + panic(mysql.ErrPktLong) + } +} + +type pktWriter struct { + wr *bufio.Writer + seq *byte + remain int + to_write int + last bool + buf [23]byte + ibuf [3]byte +} + +func (my *Conn) newPktWriter(to_write int) *pktWriter { + return &pktWriter{wr: my.wr, seq: &my.seq, to_write: to_write} +} + +func (pw *pktWriter) writeHeader(l int) { + buf := pw.ibuf[:] + EncodeU24(buf, uint32(l)) + if _, err := pw.wr.Write(buf); err != nil { + panic(err) + } + if err := pw.wr.WriteByte(*pw.seq); err != nil { + panic(err) + } + // Update sequence number + *pw.seq++ +} + +func (pw *pktWriter) write(buf []byte) { + if len(buf) == 0 { + return + } + var nn int + for len(buf) != 0 { + if pw.remain == 0 { + if pw.to_write == 0 { + panic("too many data for write as packet") + } + if pw.to_write >= 0xffffff { + pw.remain = 0xffffff + } else { + pw.remain = pw.to_write + pw.last = true + } + pw.to_write -= pw.remain + pw.writeHeader(pw.remain) + } + nn = len(buf) + if nn > pw.remain { + nn = pw.remain + } + var err error + nn, err = pw.wr.Write(buf[0:nn]) + pw.remain -= nn + if err != nil { + panic(err) + } + buf = buf[nn:] + } + if pw.remain+pw.to_write == 0 { + if !pw.last { + // Write header for empty packet + pw.writeHeader(0) + } + // Flush bufio buffers + if err := pw.wr.Flush(); err != nil { + panic(err) + } + } + return +} + +func (pw *pktWriter) writeByte(b byte) { + pw.buf[0] = b + pw.write(pw.buf[:1]) +} + +// n should be <= 23 +func (pw *pktWriter) writeZeros(n int) { + buf := pw.buf[:n] + for i := range buf { + buf[i] = 0 + } + pw.write(buf) +} diff --git a/vendor/github.com/ziutek/mymysql/native/paramvalue.go b/vendor/github.com/ziutek/mymysql/native/paramvalue.go new file mode 100644 index 000000000..a83fc6648 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/paramvalue.go @@ -0,0 +1,161 @@ +package native + +import ( + "github.com/ziutek/mymysql/mysql" + "math" + "reflect" + "time" +) + +type paramValue struct { + typ uint16 + addr reflect.Value + raw bool + length int // >=0 - length of value, <0 - unknown length +} + +func (val *paramValue) Len() int { + if !val.addr.IsValid() { + // Invalid Value was binded + return 0 + } + // val.addr always points to the pointer - lets dereference it + v := val.addr.Elem() + if v.IsNil() { + // Binded Ptr Value is nil + return 0 + } + v = v.Elem() + + if val.length >= 0 { + return val.length + } + + switch val.typ { + case MYSQL_TYPE_STRING: + return lenStr(v.String()) + + case MYSQL_TYPE_DATE: + return lenDate(v.Interface().(mysql.Date)) + + case MYSQL_TYPE_TIMESTAMP: + return lenTime(v.Interface().(mysql.Timestamp).Time) + case MYSQL_TYPE_DATETIME: + return lenTime(v.Interface().(time.Time)) + + case MYSQL_TYPE_TIME: + return lenDuration(v.Interface().(time.Duration)) + + case MYSQL_TYPE_TINY: // val.length < 0 so this is bool + return 1 + } + // MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_BLOB and type of Raw value + return lenBin(v.Bytes()) +} + +func (pw *pktWriter) writeValue(val *paramValue) { + if !val.addr.IsValid() { + // Invalid Value was binded + return + } + // val.addr always points to the pointer - lets dereference it + v := val.addr.Elem() + if v.IsNil() { + // Binded Ptr Value is nil + return + } + v = v.Elem() + + if val.raw || val.typ == MYSQL_TYPE_VAR_STRING || + val.typ == MYSQL_TYPE_BLOB { + pw.writeBin(v.Bytes()) + return + } + // We don't need unsigned bit to check type + unsign := (val.typ & MYSQL_UNSIGNED_MASK) != 0 + switch val.typ & ^MYSQL_UNSIGNED_MASK { + case MYSQL_TYPE_NULL: + // Don't write null values + + case MYSQL_TYPE_STRING: + pw.writeBin([]byte(v.String())) + + case MYSQL_TYPE_LONG: + i := v.Interface() + if unsign { + l, ok := i.(uint32) + if !ok { + l = uint32(i.(uint)) + } + pw.writeU32(l) + } else { + l, ok := i.(int32) + if !ok { + l = int32(i.(int)) + } + pw.writeU32(uint32(l)) + } + + case MYSQL_TYPE_FLOAT: + pw.writeU32(math.Float32bits(v.Interface().(float32))) + + case MYSQL_TYPE_SHORT: + if unsign { + pw.writeU16(v.Interface().(uint16)) + } else { + pw.writeU16(uint16(v.Interface().(int16))) + + } + + case MYSQL_TYPE_TINY: + if val.length == -1 { + // Translate bool value to MySQL tiny + if v.Bool() { + pw.writeByte(1) + } else { + pw.writeByte(0) + } + } else { + if unsign { + pw.writeByte(v.Interface().(uint8)) + } else { + pw.writeByte(uint8(v.Interface().(int8))) + } + } + + case MYSQL_TYPE_LONGLONG: + i := v.Interface() + if unsign { + l, ok := i.(uint64) + if !ok { + l = uint64(i.(uint)) + } + pw.writeU64(l) + } else { + l, ok := i.(int64) + if !ok { + l = int64(i.(int)) + } + pw.writeU64(uint64(l)) + } + + case MYSQL_TYPE_DOUBLE: + pw.writeU64(math.Float64bits(v.Interface().(float64))) + + case MYSQL_TYPE_DATE: + pw.writeDate(v.Interface().(mysql.Date)) + + case MYSQL_TYPE_TIMESTAMP: + pw.writeTime(v.Interface().(mysql.Timestamp).Time) + + case MYSQL_TYPE_DATETIME: + pw.writeTime(v.Interface().(time.Time)) + + case MYSQL_TYPE_TIME: + pw.writeDuration(v.Interface().(time.Duration)) + + default: + panic(mysql.ErrBindUnkType) + } + return +} diff --git a/vendor/github.com/ziutek/mymysql/native/passwd.go b/vendor/github.com/ziutek/mymysql/native/passwd.go new file mode 100644 index 000000000..3691ffc21 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/passwd.go @@ -0,0 +1,108 @@ +package native + +import ( + "crypto/sha1" + "math" +) + +// Borrowed from GoMySQL +// SHA1(SHA1(SHA1(password)), scramble) XOR SHA1(password) +func encryptedPasswd(password string, scramble []byte) (out []byte) { + if len(password) == 0 { + return + } + // stage1_hash = SHA1(password) + // SHA1 encode + crypt := sha1.New() + crypt.Write([]byte(password)) + stg1Hash := crypt.Sum(nil) + // token = SHA1(SHA1(stage1_hash), scramble) XOR stage1_hash + // SHA1 encode again + crypt.Reset() + crypt.Write(stg1Hash) + stg2Hash := crypt.Sum(nil) + // SHA1 2nd hash and scramble + crypt.Reset() + crypt.Write(scramble) + crypt.Write(stg2Hash) + stg3Hash := crypt.Sum(nil) + // XOR with first hash + out = make([]byte, len(scramble)) + for ii := range scramble { + out[ii] = stg3Hash[ii] ^ stg1Hash[ii] + } + return +} + +// Old password handling based on translating to Go some functions from +// libmysql + +// The main idea is that no password are sent between client & server on +// connection and that no password are saved in mysql in a decodable form. +// +// On connection a random string is generated and sent to the client. +// The client generates a new string with a random generator inited with +// the hash values from the password and the sent string. +// This 'check' string is sent to the server where it is compared with +// a string generated from the stored hash_value of the password and the +// random string. + +// libmysql/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +func newMyRnd(seed1, seed2 uint32) *myRnd { + r := new(myRnd) + r.seed1 = seed1 % myRndMaxVal + r.seed2 = seed2 % myRndMaxVal + return r +} + +func (r *myRnd) Float64() float64 { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + return float64(r.seed1) / myRndMaxVal +} + +// libmysql/password.c +func pwHash(password []byte) (result [2]uint32) { + var nr, add, nr2, tmp uint32 + nr, add, nr2 = 1345345333, 7, 0x12345671 + + for _, c := range password { + if c == ' ' || c == '\t' { + continue // skip space in password + } + + tmp = uint32(c) + nr ^= (((nr & 63) + add) * tmp) + (nr << 8) + nr2 += (nr2 << 8) ^ nr + add += tmp + } + + result[0] = nr & ((1 << 31) - 1) // Don't use sign bit (str2int) + result[1] = nr2 & ((1 << 31) - 1) + return +} + +func encryptedOldPassword(password string, scramble []byte) []byte { + if len(password) == 0 { + return nil + } + scramble = scramble[:8] + hashPw := pwHash([]byte(password)) + hashSc := pwHash(scramble) + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + var out [8]byte + for i := range out { + out[i] = byte(math.Floor(r.Float64()*31) + 64) + } + extra := byte(math.Floor(r.Float64() * 31)) + for i := range out { + out[i] ^= extra + } + return out[:] +} diff --git a/vendor/github.com/ziutek/mymysql/native/prepared.go b/vendor/github.com/ziutek/mymysql/native/prepared.go new file mode 100644 index 000000000..90eb43509 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/prepared.go @@ -0,0 +1,163 @@ +package native + +import ( + "github.com/ziutek/mymysql/mysql" + "log" +) + +type Stmt struct { + my *Conn + + id uint32 + sql string // For reprepare during reconnect + + params []paramValue // Parameters binding + rebind bool + binded bool + + fields []*mysql.Field + + field_count int + param_count int + warning_count int + status mysql.ConnStatus + + null_bitmap []byte +} + +func (stmt *Stmt) Fields() []*mysql.Field { + return stmt.fields +} + +func (stmt *Stmt) NumParam() int { + return stmt.param_count +} + +func (stmt *Stmt) WarnCount() int { + return stmt.warning_count +} + +func (stmt *Stmt) sendCmdExec() { + // Calculate packet length and NULL bitmap + pkt_len := 1 + 4 + 1 + 4 + 1 + len(stmt.null_bitmap) + for ii := range stmt.null_bitmap { + stmt.null_bitmap[ii] = 0 + } + for ii, param := range stmt.params { + par_len := param.Len() + pkt_len += par_len + if par_len == 0 { + null_byte := ii >> 3 + null_mask := byte(1) << uint(ii-(null_byte<<3)) + stmt.null_bitmap[null_byte] |= null_mask + } + } + if stmt.rebind { + pkt_len += stmt.param_count * 2 + } + // Reset sequence number + stmt.my.seq = 0 + // Packet sending + pw := stmt.my.newPktWriter(pkt_len) + pw.writeByte(_COM_STMT_EXECUTE) + pw.writeU32(stmt.id) + pw.writeByte(0) // flags = CURSOR_TYPE_NO_CURSOR + pw.writeU32(1) // iteration_count + pw.write(stmt.null_bitmap) + if stmt.rebind { + pw.writeByte(1) + // Types + for _, param := range stmt.params { + pw.writeU16(param.typ) + } + } else { + pw.writeByte(0) + } + // Values + for i := range stmt.params { + pw.writeValue(&stmt.params[i]) + } + + if stmt.my.Debug { + log.Printf("[%2d <-] Exec command packet: len=%d, null_bitmap=%v, rebind=%t", + stmt.my.seq-1, pkt_len, stmt.null_bitmap, stmt.rebind) + } + + // Mark that we sended information about binded types + stmt.rebind = false +} + +func (my *Conn) getPrepareResult(stmt *Stmt) interface{} { +loop: + pr := my.newPktReader() // New reader for next packet + pkt0 := pr.readByte() + + //log.Println("pkt0:", pkt0, "stmt:", stmt) + + if pkt0 == 255 { + // Error packet + my.getErrorPacket(pr) + } + + if stmt == nil { + if pkt0 == 0 { + // OK packet + return my.getPrepareOkPacket(pr) + } + } else { + unreaded_params := (stmt.param_count < len(stmt.params)) + switch { + case pkt0 == 254: + // EOF packet + stmt.warning_count, stmt.status = my.getEofPacket(pr) + stmt.my.status = stmt.status + return stmt + + case pkt0 > 0 && pkt0 < 251 && (stmt.field_count < len(stmt.fields) || + unreaded_params): + // Field packet + if unreaded_params { + // Read and ignore parameter field. Sentence from MySQL source: + /* skip parameters data: we don't support it yet */ + pr.skipAll() + // Increment param_count count + stmt.param_count++ + } else { + field := my.getFieldPacket(pr) + stmt.fields[stmt.field_count] = field + // Increment field count + stmt.field_count++ + } + // Read next packet + goto loop + } + } + panic(mysql.ErrUnkResultPkt) +} + +func (my *Conn) getPrepareOkPacket(pr *pktReader) (stmt *Stmt) { + if my.Debug { + log.Printf("[%2d ->] Perpared OK packet:", my.seq-1) + } + + stmt = new(Stmt) + stmt.my = my + // First byte was readed by getPrepRes + stmt.id = pr.readU32() + stmt.fields = make([]*mysql.Field, int(pr.readU16())) // FieldCount + pl := int(pr.readU16()) // ParamCount + if pl > 0 { + stmt.params = make([]paramValue, pl) + stmt.null_bitmap = make([]byte, (pl+7)>>3) + } + pr.skipN(1) + stmt.warning_count = int(pr.readU16()) + pr.checkEof() + + if my.Debug { + log.Printf(tab8s+"ID=0x%x ParamCount=%d FieldsCount=%d WarnCount=%d", + stmt.id, len(stmt.params), len(stmt.fields), stmt.warning_count, + ) + } + return +} diff --git a/vendor/github.com/ziutek/mymysql/native/result.go b/vendor/github.com/ziutek/mymysql/native/result.go new file mode 100644 index 000000000..a36ed175a --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/result.go @@ -0,0 +1,406 @@ +package native + +import ( + "errors" + "github.com/ziutek/mymysql/mysql" + "log" + "math" + "strconv" +) + +type Result struct { + my *Conn + status_only bool // true if result doesn't contain result set + binary bool // Binary result expected + + field_count int + fields []*mysql.Field // Fields table + fc_map map[string]int // Maps field name to column number + + message []byte + affected_rows uint64 + + // Primary key value (useful for AUTO_INCREMENT primary keys) + insert_id uint64 + + // Number of warinigs during command execution + // You can use the SHOW WARNINGS query for details. + warning_count int + + // MySQL server status immediately after the query execution + status mysql.ConnStatus + + // Seted by GetRow if it returns nil row + eor_returned bool +} + +// Returns true if this is status result that includes no result set +func (res *Result) StatusOnly() bool { + return res.status_only +} + +// Returns a table containing descriptions of the columns +func (res *Result) Fields() []*mysql.Field { + return res.fields +} + +// Returns index for given name or -1 if field of that name doesn't exist +func (res *Result) Map(field_name string) int { + if fi, ok := res.fc_map[field_name]; ok { + return fi + } + return -1 +} + +func (res *Result) Message() string { + return string(res.message) +} + +func (res *Result) AffectedRows() uint64 { + return res.affected_rows +} + +func (res *Result) InsertId() uint64 { + return res.insert_id +} + +func (res *Result) WarnCount() int { + return res.warning_count +} + +func (res *Result) MakeRow() mysql.Row { + return make(mysql.Row, res.field_count) +} + +func (my *Conn) getResult(res *Result, row mysql.Row) *Result { +loop: + pr := my.newPktReader() // New reader for next packet + pkt0 := pr.readByte() + + if pkt0 == 255 { + // Error packet + my.getErrorPacket(pr) + } + + if res == nil { + switch { + case pkt0 == 0: + // OK packet + return my.getOkPacket(pr) + + case pkt0 > 0 && pkt0 < 251: + // Result set header packet + res = my.getResSetHeadPacket(pr) + // Read next packet + goto loop + case pkt0 == 251: + // Load infile response + // Handle response + goto loop + case pkt0 == 254: + // EOF packet (without body) + return nil + } + } else { + switch { + case pkt0 == 254: + // EOF packet + res.warning_count, res.status = my.getEofPacket(pr) + my.status = res.status + return res + + case pkt0 > 0 && pkt0 < 251 && res.field_count < len(res.fields): + // Field packet + field := my.getFieldPacket(pr) + res.fields[res.field_count] = field + res.fc_map[field.Name] = res.field_count + // Increment field count + res.field_count++ + // Read next packet + goto loop + + case pkt0 < 254 && res.field_count == len(res.fields): + // Row Data Packet + if len(row) != res.field_count { + panic(mysql.ErrRowLength) + } + if res.binary { + my.getBinRowPacket(pr, res, row) + } else { + my.getTextRowPacket(pr, res, row) + } + return nil + } + } + panic(mysql.ErrUnkResultPkt) +} + +func (my *Conn) getOkPacket(pr *pktReader) (res *Result) { + if my.Debug { + log.Printf("[%2d ->] OK packet:", my.seq-1) + } + res = new(Result) + res.status_only = true + res.my = my + // First byte was readed by getResult + res.affected_rows = pr.readLCB() + res.insert_id = pr.readLCB() + res.status = mysql.ConnStatus(pr.readU16()) + my.status = res.status + res.warning_count = int(pr.readU16()) + res.message = pr.readAll() + pr.checkEof() + + if my.Debug { + log.Printf(tab8s+"AffectedRows=%d InsertId=0x%x Status=0x%x "+ + "WarningCount=%d Message=\"%s\"", res.affected_rows, res.insert_id, + res.status, res.warning_count, res.message, + ) + } + return +} + +func (my *Conn) getErrorPacket(pr *pktReader) { + if my.Debug { + log.Printf("[%2d ->] Error packet:", my.seq-1) + } + var err mysql.Error + err.Code = pr.readU16() + if pr.readByte() != '#' { + panic(mysql.ErrPkt) + } + pr.skipN(5) + err.Msg = pr.readAll() + pr.checkEof() + + if my.Debug { + log.Printf(tab8s+"code=0x%x msg=\"%s\"", err.Code, err.Msg) + } + panic(&err) +} + +func (my *Conn) getEofPacket(pr *pktReader) (warn_count int, status mysql.ConnStatus) { + if my.Debug { + if pr.eof() { + log.Printf("[%2d ->] EOF packet without body", my.seq-1) + } else { + log.Printf("[%2d ->] EOF packet:", my.seq-1) + } + } + if pr.eof() { + return + } + warn_count = int(pr.readU16()) + if pr.eof() { + return + } + status = mysql.ConnStatus(pr.readU16()) + pr.checkEof() + + if my.Debug { + log.Printf(tab8s+"WarningCount=%d Status=0x%x", warn_count, status) + } + return +} + +func (my *Conn) getResSetHeadPacket(pr *pktReader) (res *Result) { + if my.Debug { + log.Printf("[%2d ->] Result set header packet:", my.seq-1) + } + pr.unreadByte() + + field_count := int(pr.readLCB()) + pr.checkEof() + + res = &Result{ + my: my, + fields: make([]*mysql.Field, field_count), + fc_map: make(map[string]int), + } + + if my.Debug { + log.Printf(tab8s+"FieldCount=%d", field_count) + } + return +} + +func (my *Conn) getFieldPacket(pr *pktReader) (field *mysql.Field) { + if my.Debug { + log.Printf("[%2d ->] Field packet:", my.seq-1) + } + pr.unreadByte() + + field = new(mysql.Field) + if my.fullFieldInfo { + field.Catalog = string(pr.readBin()) + field.Db = string(pr.readBin()) + field.Table = string(pr.readBin()) + field.OrgTable = string(pr.readBin()) + } else { + pr.skipBin() + pr.skipBin() + pr.skipBin() + pr.skipBin() + } + field.Name = string(pr.readBin()) + if my.fullFieldInfo { + field.OrgName = string(pr.readBin()) + } else { + pr.skipBin() + } + pr.skipN(1 + 2) + //field.Charset= pr.readU16() + field.DispLen = pr.readU32() + field.Type = pr.readByte() + field.Flags = pr.readU16() + field.Scale = pr.readByte() + pr.skipN(2) + pr.checkEof() + + if my.Debug { + log.Printf(tab8s+"Name=\"%s\" Type=0x%x", field.Name, field.Type) + } + return +} + +func (my *Conn) getTextRowPacket(pr *pktReader, res *Result, row mysql.Row) { + if my.Debug { + log.Printf("[%2d ->] Text row data packet", my.seq-1) + } + pr.unreadByte() + + for ii := 0; ii < res.field_count; ii++ { + bin, null := pr.readNullBin() + if null { + row[ii] = nil + } else { + row[ii] = bin + } + } + pr.checkEof() +} + +func (my *Conn) getBinRowPacket(pr *pktReader, res *Result, row mysql.Row) { + if my.Debug { + log.Printf("[%2d ->] Binary row data packet", my.seq-1) + } + // First byte was readed by getResult + + null_bitmap := make([]byte, (res.field_count+7+2)>>3) + pr.readFull(null_bitmap) + + for ii, field := range res.fields { + null_byte := (ii + 2) >> 3 + null_mask := byte(1) << uint(2+ii-(null_byte<<3)) + if null_bitmap[null_byte]&null_mask != 0 { + // Null field + row[ii] = nil + continue + } + unsigned := (field.Flags & _FLAG_UNSIGNED) != 0 + if my.narrowTypeSet { + row[ii] = readValueNarrow(pr, field.Type, unsigned) + } else { + row[ii] = readValue(pr, field.Type, unsigned) + } + } +} + +func readValue(pr *pktReader, typ byte, unsigned bool) interface{} { + switch typ { + case MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VARCHAR, + MYSQL_TYPE_BIT, MYSQL_TYPE_BLOB, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_SET, + MYSQL_TYPE_ENUM, MYSQL_TYPE_GEOMETRY: + return pr.readBin() + case MYSQL_TYPE_TINY: + if unsigned { + return pr.readByte() + } else { + return int8(pr.readByte()) + } + case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR: + if unsigned { + return pr.readU16() + } else { + return int16(pr.readU16()) + } + case MYSQL_TYPE_LONG, MYSQL_TYPE_INT24: + if unsigned { + return pr.readU32() + } else { + return int32(pr.readU32()) + } + case MYSQL_TYPE_LONGLONG: + if unsigned { + return pr.readU64() + } else { + return int64(pr.readU64()) + } + case MYSQL_TYPE_FLOAT: + return math.Float32frombits(pr.readU32()) + case MYSQL_TYPE_DOUBLE: + return math.Float64frombits(pr.readU64()) + case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL: + dec := string(pr.readBin()) + r, err := strconv.ParseFloat(dec, 64) + if err != nil { + panic(errors.New("MySQL server returned wrong decimal value: " + dec)) + } + return r + case MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE: + return pr.readDate() + case MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIMESTAMP: + return pr.readTime() + case MYSQL_TYPE_TIME: + return pr.readDuration() + } + panic(mysql.ErrUnkMySQLType) +} + +func readValueNarrow(pr *pktReader, typ byte, unsigned bool) interface{} { + switch typ { + case MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VARCHAR, + MYSQL_TYPE_BIT, MYSQL_TYPE_BLOB, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_SET, + MYSQL_TYPE_ENUM, MYSQL_TYPE_GEOMETRY: + return pr.readBin() + case MYSQL_TYPE_TINY: + if unsigned { + return int64(pr.readByte()) + } + return int64(int8(pr.readByte())) + case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR: + if unsigned { + return int64(pr.readU16()) + } + return int64(int16(pr.readU16())) + case MYSQL_TYPE_LONG, MYSQL_TYPE_INT24: + if unsigned { + return int64(pr.readU32()) + } + return int64(int32(pr.readU32())) + case MYSQL_TYPE_LONGLONG: + v := pr.readU64() + if unsigned && v > math.MaxInt64 { + panic(errors.New("Value to large for int64 type")) + } + return int64(v) + case MYSQL_TYPE_FLOAT: + return float64(math.Float32frombits(pr.readU32())) + case MYSQL_TYPE_DOUBLE: + return math.Float64frombits(pr.readU64()) + case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL: + dec := string(pr.readBin()) + r, err := strconv.ParseFloat(dec, 64) + if err != nil { + panic("MySQL server returned wrong decimal value: " + dec) + } + return r + case MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE: + return pr.readTime() + case MYSQL_TYPE_TIME: + return int64(pr.readDuration()) + } + panic(mysql.ErrUnkMySQLType) +} diff --git a/vendor/github.com/ziutek/mymysql/native/unsafe.go-disabled b/vendor/github.com/ziutek/mymysql/native/unsafe.go-disabled new file mode 100644 index 000000000..bef32cc3a --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/native/unsafe.go-disabled @@ -0,0 +1,116 @@ +package native + +import ( + "github.com/ziutek/mymysql/mysql" + "time" + "unsafe" +) + +type paramValue struct { + typ uint16 + addr unsafe.Pointer + raw bool + length int // >=0 - length of value, <0 - unknown length +} + +func (pv *paramValue) SetAddr(addr uintptr) { + pv.addr = unsafe.Pointer(addr) +} + +func (val *paramValue) Len() int { + if val.addr == nil { + // Invalid Value was binded + return 0 + } + // val.addr always points to the pointer - lets dereference it + ptr := *(*unsafe.Pointer)(val.addr) + if ptr == nil { + // Binded Ptr Value is nil + return 0 + } + + if val.length >= 0 { + return val.length + } + + switch val.typ { + case MYSQL_TYPE_STRING: + return lenStr(*(*string)(ptr)) + + case MYSQL_TYPE_DATE: + return lenDate(*(*mysql.Date)(ptr)) + + case MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME: + return lenTime(*(*time.Time)(ptr)) + + case MYSQL_TYPE_TIME: + return lenDuration(*(*time.Duration)(ptr)) + + case MYSQL_TYPE_TINY: // val.length < 0 so this is bool + return 1 + } + // MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_BLOB and type of Raw value + return lenBin(*(*[]byte)(ptr)) +} + +func (pw *pktWriter) writeValue(val *paramValue) { + if val.addr == nil { + // Invalid Value was binded + return + } + // val.addr always points to the pointer - lets dereference it + ptr := *(*unsafe.Pointer)(val.addr) + if ptr == nil { + // Binded Ptr Value is nil + return + } + + if val.raw || val.typ == MYSQL_TYPE_VAR_STRING || + val.typ == MYSQL_TYPE_BLOB { + pw.writeBin(*(*[]byte)(ptr)) + return + } + // We don't need unsigned bit to check type + switch val.typ & ^MYSQL_UNSIGNED_MASK { + case MYSQL_TYPE_NULL: + // Don't write null values + + case MYSQL_TYPE_STRING: + s := *(*string)(ptr) + pw.writeBin([]byte(s)) + + case MYSQL_TYPE_LONG, MYSQL_TYPE_FLOAT: + pw.writeU32(*(*uint32)(ptr)) + + case MYSQL_TYPE_SHORT: + pw.writeU16(*(*uint16)(ptr)) + + case MYSQL_TYPE_TINY: + if val.length == -1 { + // Translate bool value to MySQL tiny + if *(*bool)(ptr) { + pw.writeByte(1) + } else { + pw.writeByte(0) + } + } else { + pw.writeByte(*(*byte)(ptr)) + } + + case MYSQL_TYPE_LONGLONG, MYSQL_TYPE_DOUBLE: + pw.writeU64(*(*uint64)(ptr)) + + case MYSQL_TYPE_DATE: + pw.writeDate(*(*mysql.Date)(ptr)) + + case MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME: + pw.writeTime(*(*time.Time)(ptr)) + + case MYSQL_TYPE_TIME: + pw.writeDuration(*(*time.Duration)(ptr)) + + default: + panic(mysql.ErrBindUnkType) + } + return +} diff --git a/vendor/github.com/ziutek/mymysql/thrsafe/LICENSE b/vendor/github.com/ziutek/mymysql/thrsafe/LICENSE new file mode 100644 index 000000000..8eab9a6ec --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/thrsafe/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2010, Michal Derkacz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ziutek/mymysql/thrsafe/thrsafe.go b/vendor/github.com/ziutek/mymysql/thrsafe/thrsafe.go new file mode 100644 index 000000000..4a23860c0 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/thrsafe/thrsafe.go @@ -0,0 +1,349 @@ +// Thread safe engine for MyMySQL +// +// In contrast to native engine: +// - one connection can be used by multiple gorutines, +// - if connection is idle pings are sent to the server (once per minute) to +// avoid timeout. +// +// See documentation of mymysql/native for details +package thrsafe + +import ( + "github.com/ziutek/mymysql/mysql" + _ "github.com/ziutek/mymysql/native" + "io" + "sync" + "time" +) + +type Conn struct { + mysql.Conn + mutex *sync.Mutex + + stopPinger chan struct{} + lastUsed time.Time +} + +func (c *Conn) lock() { + //log.Println(c, ":: lock @", c.mutex) + c.mutex.Lock() +} + +func (c *Conn) unlock() { + //log.Println(c, ":: unlock @", c.mutex) + c.lastUsed = time.Now() + c.mutex.Unlock() +} + +type Result struct { + mysql.Result + conn *Conn +} + +type Stmt struct { + mysql.Stmt + conn *Conn +} + +type Transaction struct { + *Conn + conn *Conn +} + +func New(proto, laddr, raddr, user, passwd string, db ...string) mysql.Conn { + return &Conn{ + Conn: orgNew(proto, laddr, raddr, user, passwd, db...), + mutex: new(sync.Mutex), + } +} + +func (c *Conn) Clone() mysql.Conn { + return &Conn{ + Conn: c.Conn.Clone(), + mutex: new(sync.Mutex), + } +} + +func (c *Conn) pinger() { + const to = 60 * time.Second + sleep := to + for { + timer := time.After(sleep) + select { + case <-c.stopPinger: + return + case t := <-timer: + c.mutex.Lock() + lastUsed := c.lastUsed + c.mutex.Unlock() + sleep := to - t.Sub(lastUsed) + if sleep <= 0 { + if c.Ping() != nil { + return + } + sleep = to + } + } + } +} + +func (c *Conn) Connect() error { + //log.Println("Connect") + c.lock() + defer c.unlock() + c.stopPinger = make(chan struct{}) + go c.pinger() + return c.Conn.Connect() +} + +func (c *Conn) Close() error { + //log.Println("Close") + close(c.stopPinger) // Stop pinger before lock connection + c.lock() + defer c.unlock() + return c.Conn.Close() +} + +func (c *Conn) Reconnect() error { + //log.Println("Reconnect") + c.lock() + defer c.unlock() + if c.stopPinger == nil { + go c.pinger() + } + return c.Conn.Reconnect() +} + +func (c *Conn) Use(dbname string) error { + //log.Println("Use") + c.lock() + defer c.unlock() + return c.Conn.Use(dbname) +} + +func (c *Conn) Start(sql string, params ...interface{}) (mysql.Result, error) { + //log.Println("Start") + c.lock() + res, err := c.Conn.Start(sql, params...) + // Unlock if error or OK result (which doesn't provide any fields) + if err != nil { + c.unlock() + return nil, err + } + if res.StatusOnly() && !res.MoreResults() { + c.unlock() + } + return &Result{Result: res, conn: c}, err +} + +func (c *Conn) Status() mysql.ConnStatus { + c.lock() + defer c.unlock() + return c.Conn.Status() +} + +func (c *Conn) Escape(txt string) string { + return mysql.Escape(c, txt) +} + +func (res *Result) ScanRow(row mysql.Row) error { + //log.Println("ScanRow") + err := res.Result.ScanRow(row) + if err == nil { + // There are more rows to read + return nil + } + if err == mysql.ErrReadAfterEOR { + // Trying read after EOR - connection unlocked before + return err + } + if err != io.EOF || !res.StatusOnly() && !res.MoreResults() { + // Error or no more rows in not empty result set and no more resutls. + // In case if empty result set and no more resutls Start has unlocked + // it before. + res.conn.unlock() + } + return err +} + +func (res *Result) GetRow() (mysql.Row, error) { + return mysql.GetRow(res) +} + +func (res *Result) NextResult() (mysql.Result, error) { + //log.Println("NextResult") + next, err := res.Result.NextResult() + if err != nil { + return nil, err + } + if next == nil { + return nil, nil + } + if next.StatusOnly() && !next.MoreResults() { + res.conn.unlock() + } + return &Result{next, res.conn}, nil +} + +func (c *Conn) Ping() error { + c.lock() + defer c.unlock() + return c.Conn.Ping() +} + +func (c *Conn) Prepare(sql string) (mysql.Stmt, error) { + //log.Println("Prepare") + c.lock() + defer c.unlock() + stmt, err := c.Conn.Prepare(sql) + if err != nil { + return nil, err + } + return &Stmt{Stmt: stmt, conn: c}, nil +} + +func (stmt *Stmt) Run(params ...interface{}) (mysql.Result, error) { + //log.Println("Run") + stmt.conn.lock() + res, err := stmt.Stmt.Run(params...) + // Unlock if error or OK result (which doesn't provide any fields) + if err != nil { + stmt.conn.unlock() + return nil, err + } + if res.StatusOnly() && !res.MoreResults() { + stmt.conn.unlock() + } + return &Result{Result: res, conn: stmt.conn}, nil +} + +func (stmt *Stmt) Delete() error { + //log.Println("Delete") + stmt.conn.lock() + defer stmt.conn.unlock() + return stmt.Stmt.Delete() +} + +func (stmt *Stmt) Reset() error { + //log.Println("Reset") + stmt.conn.lock() + defer stmt.conn.unlock() + return stmt.Stmt.Reset() +} + +func (stmt *Stmt) SendLongData(pnum int, data interface{}, pkt_size int) error { + //log.Println("SendLongData") + stmt.conn.lock() + defer stmt.conn.unlock() + return stmt.Stmt.SendLongData(pnum, data, pkt_size) +} + +// See mysql.Query +func (c *Conn) Query(sql string, params ...interface{}) ([]mysql.Row, mysql.Result, error) { + return mysql.Query(c, sql, params...) +} + +// See mysql.QueryFirst +func (my *Conn) QueryFirst(sql string, params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.QueryFirst(my, sql, params...) +} + +// See mysql.QueryLast +func (my *Conn) QueryLast(sql string, params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.QueryLast(my, sql, params...) +} + +// See mysql.Exec +func (stmt *Stmt) Exec(params ...interface{}) ([]mysql.Row, mysql.Result, error) { + return mysql.Exec(stmt, params...) +} + +// See mysql.ExecFirst +func (stmt *Stmt) ExecFirst(params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.ExecFirst(stmt, params...) +} + +// See mysql.ExecLast +func (stmt *Stmt) ExecLast(params ...interface{}) (mysql.Row, mysql.Result, error) { + return mysql.ExecLast(stmt, params...) +} + +// See mysql.End +func (res *Result) End() error { + return mysql.End(res) +} + +// See mysql.GetFirstRow +func (res *Result) GetFirstRow() (mysql.Row, error) { + return mysql.GetFirstRow(res) +} + +// See mysql.GetLastRow +func (res *Result) GetLastRow() (mysql.Row, error) { + return mysql.GetLastRow(res) +} + +// See mysql.GetRows +func (res *Result) GetRows() ([]mysql.Row, error) { + return mysql.GetRows(res) +} + +// Begins a new transaction. No any other thread can send command on this +// connection until Commit or Rollback will be called. +// Periodical pinging the server is disabled during transaction. + +func (c *Conn) Begin() (mysql.Transaction, error) { + //log.Println("Begin") + c.lock() + tr := Transaction{ + &Conn{Conn: c.Conn, mutex: new(sync.Mutex)}, + c, + } + _, err := c.Conn.Start("START TRANSACTION") + if err != nil { + c.unlock() + return nil, err + } + return &tr, nil +} + +func (tr *Transaction) end(cr string) error { + tr.lock() + _, err := tr.conn.Conn.Start(cr) + tr.conn.unlock() + // Invalidate this transaction + m := tr.Conn.mutex + tr.Conn = nil + tr.conn = nil + m.Unlock() // One goorutine which still uses this transaction will panic + return err +} + +func (tr *Transaction) Commit() error { + //log.Println("Commit") + return tr.end("COMMIT") +} + +func (tr *Transaction) Rollback() error { + //log.Println("Rollback") + return tr.end("ROLLBACK") +} + +func (tr *Transaction) IsValid() bool { + return tr.Conn != nil +} + +func (tr *Transaction) Do(st mysql.Stmt) mysql.Stmt { + if s, ok := st.(*Stmt); ok && s.conn == tr.conn { + // Returns new statement which uses statement mutexes + return &Stmt{s.Stmt, tr.Conn} + } + panic("Transaction and statement doesn't belong to the same connection") +} + +var orgNew func(proto, laddr, raddr, user, passwd string, db ...string) mysql.Conn + +func init() { + orgNew = mysql.New + mysql.New = New +} diff --git a/vendor/github.com/ziutek/mymysql/thrsafe/thrsafe_test.go b/vendor/github.com/ziutek/mymysql/thrsafe/thrsafe_test.go new file mode 100644 index 000000000..13bb04845 --- /dev/null +++ b/vendor/github.com/ziutek/mymysql/thrsafe/thrsafe_test.go @@ -0,0 +1,136 @@ +package thrsafe + +import ( + "github.com/ziutek/mymysql/mysql" + "github.com/ziutek/mymysql/native" + "testing" +) + +const ( + user = "testuser" + passwd = "TestPasswd9" + dbname = "test" + proto = "tcp" + daddr = "127.0.0.1:3306" + //proto = "unix" + //daddr = "/var/run/mysqld/mysqld.sock" + debug = false +) + +var db mysql.Conn + +func checkErr(t *testing.T, err error) { + if err != nil { + t.Fatalf("Error: %v", err) + } +} + +func connect(t *testing.T) mysql.Conn { + db := New(proto, "", daddr, user, passwd, dbname) + db.(*Conn).Conn.(*native.Conn).Debug = debug + checkErr(t, db.Connect()) + return db +} + +func TestS(t *testing.T) { + db := connect(t) + res, err := db.Start("SET @a=1") + checkErr(t, err) + if !res.StatusOnly() { + t.Fatalf("'SET @a' statement returns result with rows") + } + err = db.Close() + checkErr(t, err) +} + +func TestSS(t *testing.T) { + db := connect(t) + + res, err := db.Start("SET @a=1; SET @b=2") + checkErr(t, err) + if !res.StatusOnly() { + t.Fatalf("'SET @a' statement returns result with rows") + } + + res, err = res.NextResult() + checkErr(t, err) + if !res.StatusOnly() { + t.Fatalf("'SET @b' statement returns result with rows") + } + + err = db.Close() + checkErr(t, err) +} + +func TestSDS(t *testing.T) { + db := connect(t) + + res, err := db.Start("SET @a=1; SELECT @a; SET @b=2") + checkErr(t, err) + if !res.StatusOnly() { + t.Fatalf("'SET @a' statement returns result with rows") + } + + res, err = res.NextResult() + checkErr(t, err) + rows, err := res.GetRows() + checkErr(t, err) + if rows[0].Int(0) != 1 { + t.Fatalf("First query doesn't return '1'") + } + + res, err = res.NextResult() + checkErr(t, err) + if !res.StatusOnly() { + t.Fatalf("'SET @b' statement returns result with rows") + } + + err = db.Close() + checkErr(t, err) +} + +func TestSSDDD(t *testing.T) { + db := connect(t) + + res, err := db.Start("SET @a=1; SET @b=2; SELECT @a; SELECT @b; SELECT 3") + checkErr(t, err) + if !res.StatusOnly() { + t.Fatalf("'SET @a' statement returns result with rows") + } + + res, err = res.NextResult() + checkErr(t, err) + if !res.StatusOnly() { + t.Fatalf("'SET @b' statement returns result with rows") + } + + res, err = res.NextResult() + checkErr(t, err) + rows, err := res.GetRows() + checkErr(t, err) + if rows[0].Int(0) != 1 { + t.Fatalf("First query doesn't return '1'") + } + + res, err = res.NextResult() + checkErr(t, err) + rows, err = res.GetRows() + checkErr(t, err) + if rows[0].Int(0) != 2 { + t.Fatalf("Second query doesn't return '2'") + } + + res, err = res.NextResult() + checkErr(t, err) + rows, err = res.GetRows() + checkErr(t, err) + if rows[0].Int(0) != 3 { + t.Fatalf("Thrid query doesn't return '3'") + } + if res.MoreResults() { + t.Fatalf("There is unexpected one more result") + } + + err = db.Close() + checkErr(t, err) +} diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 000000000..797f9b051 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF +GLOBL ·REDMASK51(SB), 8, $8 + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 000000000..45484d1b5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,88 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + CMPQ SI,$1 + MOVQ 0(DI),SI + MOVQ 80(DI),DX + MOVQ 8(DI),CX + MOVQ 88(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,0(DI) + MOVQ DX,80(DI) + MOVQ CX,8(DI) + MOVQ R8,88(DI) + MOVQ 16(DI),SI + MOVQ 96(DI),DX + MOVQ 24(DI),CX + MOVQ 104(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,16(DI) + MOVQ DX,96(DI) + MOVQ CX,24(DI) + MOVQ R8,104(DI) + MOVQ 32(DI),SI + MOVQ 112(DI),DX + MOVQ 40(DI),CX + MOVQ 120(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,32(DI) + MOVQ DX,112(DI) + MOVQ CX,40(DI) + MOVQ R8,120(DI) + MOVQ 48(DI),SI + MOVQ 128(DI),DX + MOVQ 56(DI),CX + MOVQ 136(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,48(DI) + MOVQ DX,128(DI) + MOVQ CX,56(DI) + MOVQ R8,136(DI) + MOVQ 64(DI),SI + MOVQ 144(DI),DX + MOVQ 72(DI),CX + MOVQ 152(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,64(DI) + MOVQ DX,144(DI) + MOVQ CX,72(DI) + MOVQ R8,152(DI) + MOVQ DI,AX + MOVQ SI,DX + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 000000000..6918c47fc --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,841 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have a implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + var x fieldElement + b = -b + for i := range x { + x[i] = b & (f[i] ^ g[i]) + } + + for i := range f { + f[i] ^= x[i] + } + for i := range g { + g[i] ^= x[i] + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_test.go b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go new file mode 100644 index 000000000..14b0ee87c --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go @@ -0,0 +1,29 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package curve25519 + +import ( + "fmt" + "testing" +) + +const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a" + +func TestBaseScalarMult(t *testing.T) { + var a, b [32]byte + in := &a + out := &b + a[0] = 1 + + for i := 0; i < 200; i++ { + ScalarBaseMult(out, in) + in, out = out, in + } + + result := fmt.Sprintf("%x", in[:]) + if result != expectedHex { + t.Errorf("incorrect result: got %s, want %s", result, expectedHex) + } +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 000000000..f7db9c1ce --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html +package curve25519 + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 000000000..37599fac0 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$96-8 + MOVQ inout+0(FP), DI + + MOVQ SP,R11 + MOVQ $31,CX + NOTQ CX + ANDQ CX,SP + ADDQ $32,SP + + MOVQ R11,0(SP) + MOVQ R12,8(SP) + MOVQ R13,16(SP) + MOVQ R14,24(SP) + MOVQ R15,32(SP) + MOVQ BX,40(SP) + MOVQ BP,48(SP) + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ ·REDMASK51(SB),AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + MOVQ 0(SP),R11 + MOVQ 8(SP),R12 + MOVQ 16(SP),R13 + MOVQ 24(SP),R14 + MOVQ 32(SP),R15 + MOVQ 40(SP),BX + MOVQ 48(SP),BP + MOVQ R11,SP + MOVQ DI,AX + MOVQ SI,DX + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 000000000..3949f9cfa --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1398 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$384-8 + MOVQ inout+0(FP),DI + + MOVQ SP,R11 + MOVQ $31,CX + NOTQ CX + ANDQ CX,SP + ADDQ $32,SP + + MOVQ R11,0(SP) + MOVQ R12,8(SP) + MOVQ R13,16(SP) + MOVQ R14,24(SP) + MOVQ R15,32(SP) + MOVQ BX,40(SP) + MOVQ BP,48(SP) + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,56(SP) + MOVQ DX,64(SP) + MOVQ CX,72(SP) + MOVQ R8,80(SP) + MOVQ R9,88(SP) + MOVQ AX,96(SP) + MOVQ R10,104(SP) + MOVQ R11,112(SP) + MOVQ R12,120(SP) + MOVQ R13,128(SP) + MOVQ 96(SP),AX + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 96(SP),AX + SHLQ $1,AX + MULQ 104(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 96(SP),AX + SHLQ $1,AX + MULQ 112(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 96(SP),AX + SHLQ $1,AX + MULQ 120(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 96(SP),AX + SHLQ $1,AX + MULQ 128(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 104(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 104(SP),AX + SHLQ $1,AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(SP),AX + SHLQ $1,AX + MULQ 120(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 104(SP),DX + IMUL3Q $38,DX,AX + MULQ 128(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 112(SP),AX + MULQ 112(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 112(SP),DX + IMUL3Q $38,DX,AX + MULQ 120(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 112(SP),DX + IMUL3Q $38,DX,AX + MULQ 128(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 120(SP),DX + IMUL3Q $19,DX,AX + MULQ 120(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 120(SP),DX + IMUL3Q $38,DX,AX + MULQ 128(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 128(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,136(SP) + MOVQ R8,144(SP) + MOVQ R9,152(SP) + MOVQ AX,160(SP) + MOVQ R10,168(SP) + MOVQ 56(SP),AX + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 56(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 56(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 56(SP),AX + SHLQ $1,AX + MULQ 80(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 56(SP),AX + SHLQ $1,AX + MULQ 88(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 64(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 64(SP),AX + SHLQ $1,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 64(SP),AX + SHLQ $1,AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 72(SP),AX + MULQ 72(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 72(SP),DX + IMUL3Q $38,DX,AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 72(SP),DX + IMUL3Q $38,DX,AX + MULQ 88(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 80(SP),DX + IMUL3Q $19,DX,AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 80(SP),DX + IMUL3Q $38,DX,AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(SP),DX + IMUL3Q $19,DX,AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ AX,200(SP) + MOVQ R10,208(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 136(SP),SI + SUBQ 144(SP),DX + SUBQ 152(SP),CX + SUBQ 160(SP),R8 + SUBQ 168(SP),R9 + MOVQ SI,216(SP) + MOVQ DX,224(SP) + MOVQ CX,232(SP) + MOVQ R8,240(SP) + MOVQ R9,248(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,256(SP) + MOVQ DX,264(SP) + MOVQ CX,272(SP) + MOVQ R8,280(SP) + MOVQ R9,288(SP) + MOVQ AX,296(SP) + MOVQ R10,304(SP) + MOVQ R11,312(SP) + MOVQ R12,320(SP) + MOVQ R13,328(SP) + MOVQ 280(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,336(SP) + MULQ 112(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 288(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,344(SP) + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 96(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 104(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 256(SP),AX + MULQ 112(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 256(SP),AX + MULQ 120(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 256(SP),AX + MULQ 128(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 264(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 264(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 120(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 264(SP),DX + IMUL3Q $19,DX,AX + MULQ 128(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 272(SP),AX + MULQ 96(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 104(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 272(SP),AX + MULQ 112(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MULQ 120(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MULQ 128(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 280(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 336(SP),AX + MULQ 120(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 336(SP),AX + MULQ 128(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 344(SP),AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 344(SP),AX + MULQ 120(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 344(SP),AX + MULQ 128(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,96(SP) + MOVQ R8,104(SP) + MOVQ R9,112(SP) + MOVQ AX,120(SP) + MOVQ R10,128(SP) + MOVQ 320(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,256(SP) + MULQ 72(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 328(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,264(SP) + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 296(SP),AX + MULQ 56(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 296(SP),AX + MULQ 64(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 296(SP),AX + MULQ 72(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 296(SP),AX + MULQ 80(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 296(SP),AX + MULQ 88(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 304(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 304(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 304(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 304(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 304(SP),DX + IMUL3Q $19,DX,AX + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 312(SP),AX + MULQ 56(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 312(SP),AX + MULQ 64(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 312(SP),AX + MULQ 72(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 312(SP),DX + IMUL3Q $19,DX,AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 312(SP),DX + IMUL3Q $19,DX,AX + MULQ 88(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 320(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 320(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 256(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 328(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 264(SP),AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 264(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 96(SP),SI + ADDQ 104(SP),R8 + ADDQ 112(SP),R9 + ADDQ 120(SP),AX + ADDQ 128(SP),R10 + SUBQ 96(SP),DX + SUBQ 104(SP),CX + SUBQ 112(SP),R11 + SUBQ 120(SP),R12 + SUBQ 128(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,56(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,64(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 56(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 64(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 64(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 200(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,56(SP) + MULQ 152(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,64(SP) + MULQ 144(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(SP),AX + MULQ 136(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(SP),AX + MULQ 144(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 176(SP),AX + MULQ 152(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 176(SP),AX + MULQ 160(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 176(SP),AX + MULQ 168(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 184(SP),AX + MULQ 136(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(SP),AX + MULQ 144(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 184(SP),AX + MULQ 152(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(SP),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 184(SP),DX + IMUL3Q $19,DX,AX + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 192(SP),AX + MULQ 136(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(SP),AX + MULQ 144(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 192(SP),AX + MULQ 152(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 192(SP),DX + IMUL3Q $19,DX,AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 192(SP),DX + IMUL3Q $19,DX,AX + MULQ 168(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 136(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 200(SP),AX + MULQ 144(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 56(SP),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 136(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 64(SP),AX + MULQ 152(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 64(SP),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 216(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 224(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 232(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 248(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 136(SP),SI + ADDQ 144(SP),CX + ADDQ 152(SP),R8 + ADDQ 160(SP),R9 + ADDQ 168(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,56(SP) + MULQ 232(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,64(SP) + MULQ 224(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 216(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 224(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 232(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 240(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 248(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 216(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 224(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 232(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 240(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 248(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 216(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 224(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 232(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 240(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 248(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 216(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 224(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),AX + MULQ 240(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 56(SP),AX + MULQ 248(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 216(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 64(SP),AX + MULQ 232(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),AX + MULQ 240(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 64(SP),AX + MULQ 248(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ ·REDMASK51(SB),DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + MOVQ 0(SP),R11 + MOVQ 8(SP),R12 + MOVQ 16(SP),R13 + MOVQ 24(SP),R14 + MOVQ 32(SP),R15 + MOVQ 40(SP),BX + MOVQ 48(SP),BP + MOVQ R11,SP + MOVQ DI,AX + MOVQ SI,DX + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 000000000..5822bd533 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 000000000..e48d183ee --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,191 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$128-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ SP,R11 + MOVQ $31,CX + NOTQ CX + ANDQ CX,SP + ADDQ $32,SP + + MOVQ R11,0(SP) + MOVQ R12,8(SP) + MOVQ R13,16(SP) + MOVQ R14,24(SP) + MOVQ R15,32(SP) + MOVQ BX,40(SP) + MOVQ BP,48(SP) + MOVQ DI,56(SP) + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,64(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,72(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 64(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 64(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 72(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 72(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ ·REDMASK51(SB),SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + MOVQ 0(SP),R11 + MOVQ 8(SP),R12 + MOVQ 16(SP),R13 + MOVQ 24(SP),R14 + MOVQ 32(SP),R15 + MOVQ 40(SP),BX + MOVQ 48(SP),BP + MOVQ R11,SP + MOVQ DI,AX + MOVQ SI,DX + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 000000000..78d1a50dd --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,153 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$96-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ SP,R11 + MOVQ $31,CX + NOTQ CX + ANDQ CX,SP + ADDQ $32, SP + + MOVQ R11,0(SP) + MOVQ R12,8(SP) + MOVQ R13,16(SP) + MOVQ R14,24(SP) + MOVQ R15,32(SP) + MOVQ BX,40(SP) + MOVQ BP,48(SP) + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ ·REDMASK51(SB),SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + MOVQ 0(SP),R11 + MOVQ 8(SP),R12 + MOVQ 16(SP),R13 + MOVQ 24(SP),R14 + MOVQ 32(SP),R15 + MOVQ 40(SP),BX + MOVQ 48(SP),BP + MOVQ R11,SP + MOVQ DI,AX + MOVQ SI,DX + RET diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go new file mode 100644 index 000000000..d043c5bec --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -0,0 +1,615 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package agent implements a client to an ssh-agent daemon. + +References: + [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD +*/ +package agent + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "sync" + + "golang.org/x/crypto/ssh" +) + +// Agent represents the capabilities of an ssh-agent. +type Agent interface { + // List returns the identities known to the agent. + List() ([]*Key, error) + + // Sign has the agent sign the data using a protocol 2 key as defined + // in [PROTOCOL.agent] section 2.6.2. + Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) + + // Add adds a private key to the agent. + Add(key AddedKey) error + + // Remove removes all identities with the given public key. + Remove(key ssh.PublicKey) error + + // RemoveAll removes all identities. + RemoveAll() error + + // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. + Lock(passphrase []byte) error + + // Unlock undoes the effect of Lock + Unlock(passphrase []byte) error + + // Signers returns signers for all the known keys. + Signers() ([]ssh.Signer, error) +} + +// AddedKey describes an SSH key to be added to an Agent. +type AddedKey struct { + // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or + // *ecdsa.PrivateKey, which will be inserted into the agent. + PrivateKey interface{} + // Certificate, if not nil, is communicated to the agent and will be + // stored with the key. + Certificate *ssh.Certificate + // Comment is an optional, free-form string. + Comment string + // LifetimeSecs, if not zero, is the number of seconds that the + // agent will store the key for. + LifetimeSecs uint32 + // ConfirmBeforeUse, if true, requests that the agent confirm with the + // user before each use of this key. + ConfirmBeforeUse bool +} + +// See [PROTOCOL.agent], section 3. +const ( + agentRequestV1Identities = 1 + + // 3.2 Requests from client to agent for protocol 2 key operations + agentAddIdentity = 17 + agentRemoveIdentity = 18 + agentRemoveAllIdentities = 19 + agentAddIdConstrained = 25 + + // 3.3 Key-type independent requests from client to agent + agentAddSmartcardKey = 20 + agentRemoveSmartcardKey = 21 + agentLock = 22 + agentUnlock = 23 + agentAddSmartcardKeyConstrained = 26 + + // 3.7 Key constraint identifiers + agentConstrainLifetime = 1 + agentConstrainConfirm = 2 +) + +// maxAgentResponseBytes is the maximum agent reply size that is accepted. This +// is a sanity check, not a limit in the spec. +const maxAgentResponseBytes = 16 << 20 + +// Agent messages: +// These structures mirror the wire format of the corresponding ssh agent +// messages found in [PROTOCOL.agent]. + +// 3.4 Generic replies from agent to client +const agentFailure = 5 + +type failureAgentMsg struct{} + +const agentSuccess = 6 + +type successAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentRequestIdentities = 11 + +type requestIdentitiesAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentIdentitiesAnswer = 12 + +type identitiesAnswerAgentMsg struct { + NumKeys uint32 `sshtype:"12"` + Keys []byte `ssh:"rest"` +} + +// See [PROTOCOL.agent], section 2.6.2. +const agentSignRequest = 13 + +type signRequestAgentMsg struct { + KeyBlob []byte `sshtype:"13"` + Data []byte + Flags uint32 +} + +// See [PROTOCOL.agent], section 2.6.2. + +// 3.6 Replies from agent to client for protocol 2 key operations +const agentSignResponse = 14 + +type signResponseAgentMsg struct { + SigBlob []byte `sshtype:"14"` +} + +type publicKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +// Key represents a protocol 2 public key as defined in +// [PROTOCOL.agent], section 2.5.2. +type Key struct { + Format string + Blob []byte + Comment string +} + +func clientErr(err error) error { + return fmt.Errorf("agent: client error: %v", err) +} + +// String returns the storage form of an agent key with the format, base64 +// encoded serialized key, and the comment if it is not empty. +func (k *Key) String() string { + s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) + + if k.Comment != "" { + s += " " + k.Comment + } + + return s +} + +// Type returns the public key type. +func (k *Key) Type() string { + return k.Format +} + +// Marshal returns key blob to satisfy the ssh.PublicKey interface. +func (k *Key) Marshal() []byte { + return k.Blob +} + +// Verify satisfies the ssh.PublicKey interface, but is not +// implemented for agent keys. +func (k *Key) Verify(data []byte, sig *ssh.Signature) error { + return errors.New("agent: agent key does not know how to verify") +} + +type wireKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +func parseKey(in []byte) (out *Key, rest []byte, err error) { + var record struct { + Blob []byte + Comment string + Rest []byte `ssh:"rest"` + } + + if err := ssh.Unmarshal(in, &record); err != nil { + return nil, nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(record.Blob, &wk); err != nil { + return nil, nil, err + } + + return &Key{ + Format: wk.Format, + Blob: record.Blob, + Comment: record.Comment, + }, record.Rest, nil +} + +// client is a client for an ssh-agent process. +type client struct { + // conn is typically a *net.UnixConn + conn io.ReadWriter + // mu is used to prevent concurrent access to the agent + mu sync.Mutex +} + +// NewClient returns an Agent that talks to an ssh-agent process over +// the given connection. +func NewClient(rw io.ReadWriter) Agent { + return &client{conn: rw} +} + +// call sends an RPC to the agent. On success, the reply is +// unmarshaled into reply and replyType is set to the first byte of +// the reply, which contains the type of the message. +func (c *client) call(req []byte) (reply interface{}, err error) { + c.mu.Lock() + defer c.mu.Unlock() + + msg := make([]byte, 4+len(req)) + binary.BigEndian.PutUint32(msg, uint32(len(req))) + copy(msg[4:], req) + if _, err = c.conn.Write(msg); err != nil { + return nil, clientErr(err) + } + + var respSizeBuf [4]byte + if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { + return nil, clientErr(err) + } + respSize := binary.BigEndian.Uint32(respSizeBuf[:]) + if respSize > maxAgentResponseBytes { + return nil, clientErr(err) + } + + buf := make([]byte, respSize) + if _, err = io.ReadFull(c.conn, buf); err != nil { + return nil, clientErr(err) + } + reply, err = unmarshal(buf) + if err != nil { + return nil, clientErr(err) + } + return reply, err +} + +func (c *client) simpleCall(req []byte) error { + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +func (c *client) RemoveAll() error { + return c.simpleCall([]byte{agentRemoveAllIdentities}) +} + +func (c *client) Remove(key ssh.PublicKey) error { + req := ssh.Marshal(&agentRemoveIdentityMsg{ + KeyBlob: key.Marshal(), + }) + return c.simpleCall(req) +} + +func (c *client) Lock(passphrase []byte) error { + req := ssh.Marshal(&agentLockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +func (c *client) Unlock(passphrase []byte) error { + req := ssh.Marshal(&agentUnlockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +// List returns the identities known to the agent. +func (c *client) List() ([]*Key, error) { + // see [PROTOCOL.agent] section 2.5.2. + req := []byte{agentRequestIdentities} + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *identitiesAnswerAgentMsg: + if msg.NumKeys > maxAgentResponseBytes/8 { + return nil, errors.New("agent: too many keys in agent reply") + } + keys := make([]*Key, msg.NumKeys) + data := msg.Keys + for i := uint32(0); i < msg.NumKeys; i++ { + var key *Key + var err error + if key, data, err = parseKey(data); err != nil { + return nil, err + } + keys[i] = key + } + return keys, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to list keys") + } + panic("unreachable") +} + +// Sign has the agent sign the data using a protocol 2 key as defined +// in [PROTOCOL.agent] section 2.6.2. +func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + req := ssh.Marshal(signRequestAgentMsg{ + KeyBlob: key.Marshal(), + Data: data, + }) + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *signResponseAgentMsg: + var sig ssh.Signature + if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { + return nil, err + } + + return &sig, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to sign challenge") + } + panic("unreachable") +} + +// unmarshal parses an agent message in packet, returning the parsed +// form and the message type of packet. +func unmarshal(packet []byte) (interface{}, error) { + if len(packet) < 1 { + return nil, errors.New("agent: empty packet") + } + var msg interface{} + switch packet[0] { + case agentFailure: + return new(failureAgentMsg), nil + case agentSuccess: + return new(successAgentMsg), nil + case agentIdentitiesAnswer: + msg = new(identitiesAnswerAgentMsg) + case agentSignResponse: + msg = new(signResponseAgentMsg) + default: + return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) + } + if err := ssh.Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +type rsaKeyMsg struct { + Type string `sshtype:"17"` + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaKeyMsg struct { + Type string `sshtype:"17"` + P *big.Int + Q *big.Int + G *big.Int + Y *big.Int + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaKeyMsg struct { + Type string `sshtype:"17"` + Curve string + KeyBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +// Insert adds a private key to the agent. +func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaKeyMsg{ + Type: ssh.KeyAlgoRSA, + N: k.N, + E: big.NewInt(int64(k.E)), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaKeyMsg{ + Type: ssh.KeyAlgoDSA, + P: k.P, + Q: k.Q, + G: k.G, + Y: k.Y, + X: k.X, + Comments: comment, + Constraints: constraints, + }) + case *ecdsa.PrivateKey: + nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) + req = ssh.Marshal(ecdsaKeyMsg{ + Type: "ecdsa-sha2-" + nistID, + Curve: nistID, + KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIdConstrained + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +type rsaCertMsg struct { + Type string `sshtype:"17"` + CertBytes []byte + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaCertMsg struct { + Type string `sshtype:"17"` + CertBytes []byte + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaCertMsg struct { + Type string `sshtype:"17"` + CertBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +// Insert adds a private key to the agent. If a certificate is given, +// that certificate is added instead as public key. +func (c *client) Add(key AddedKey) error { + var constraints []byte + + if secs := key.LifetimeSecs; secs != 0 { + constraints = append(constraints, agentConstrainLifetime) + + var secsBytes [4]byte + binary.BigEndian.PutUint32(secsBytes[:], secs) + constraints = append(constraints, secsBytes[:]...) + } + + if key.ConfirmBeforeUse { + constraints = append(constraints, agentConstrainConfirm) + } + + if cert := key.Certificate; cert == nil { + return c.insertKey(key.PrivateKey, key.Comment, constraints) + } else { + return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) + } +} + +func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + X: k.X, + Comments: comment, + }) + case *ecdsa.PrivateKey: + req = ssh.Marshal(ecdsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Comments: comment, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIdConstrained + } + + signer, err := ssh.NewSignerFromKey(s) + if err != nil { + return err + } + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return errors.New("agent: signer and cert have different public key") + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +// Signers provides a callback for client authentication. +func (c *client) Signers() ([]ssh.Signer, error) { + keys, err := c.List() + if err != nil { + return nil, err + } + + var result []ssh.Signer + for _, k := range keys { + result = append(result, &agentKeyringSigner{c, k}) + } + return result, nil +} + +type agentKeyringSigner struct { + agent *client + pub ssh.PublicKey +} + +func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { + return s.pub +} + +func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { + // The agent has its own entropy source, so the rand argument is ignored. + return s.agent.Sign(s.pub, data) +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client_test.go b/vendor/golang.org/x/crypto/ssh/agent/client_test.go new file mode 100644 index 000000000..ec7198d54 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/client_test.go @@ -0,0 +1,287 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "bytes" + "crypto/rand" + "errors" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "testing" + + "golang.org/x/crypto/ssh" +) + +// startAgent executes ssh-agent, and returns a Agent interface to it. +func startAgent(t *testing.T) (client Agent, socket string, cleanup func()) { + if testing.Short() { + // ssh-agent is not always available, and the key + // types supported vary by platform. + t.Skip("skipping test due to -short") + } + + bin, err := exec.LookPath("ssh-agent") + if err != nil { + t.Skip("could not find ssh-agent") + } + + cmd := exec.Command(bin, "-s") + out, err := cmd.Output() + if err != nil { + t.Fatalf("cmd.Output: %v", err) + } + + /* Output looks like: + + SSH_AUTH_SOCK=/tmp/ssh-P65gpcqArqvH/agent.15541; export SSH_AUTH_SOCK; + SSH_AGENT_PID=15542; export SSH_AGENT_PID; + echo Agent pid 15542; + */ + fields := bytes.Split(out, []byte(";")) + line := bytes.SplitN(fields[0], []byte("="), 2) + line[0] = bytes.TrimLeft(line[0], "\n") + if string(line[0]) != "SSH_AUTH_SOCK" { + t.Fatalf("could not find key SSH_AUTH_SOCK in %q", fields[0]) + } + socket = string(line[1]) + + line = bytes.SplitN(fields[2], []byte("="), 2) + line[0] = bytes.TrimLeft(line[0], "\n") + if string(line[0]) != "SSH_AGENT_PID" { + t.Fatalf("could not find key SSH_AGENT_PID in %q", fields[2]) + } + pidStr := line[1] + pid, err := strconv.Atoi(string(pidStr)) + if err != nil { + t.Fatalf("Atoi(%q): %v", pidStr, err) + } + + conn, err := net.Dial("unix", string(socket)) + if err != nil { + t.Fatalf("net.Dial: %v", err) + } + + ac := NewClient(conn) + return ac, socket, func() { + proc, _ := os.FindProcess(pid) + if proc != nil { + proc.Kill() + } + conn.Close() + os.RemoveAll(filepath.Dir(socket)) + } +} + +func testAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { + agent, _, cleanup := startAgent(t) + defer cleanup() + + testAgentInterface(t, agent, key, cert, lifetimeSecs) +} + +func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { + signer, err := ssh.NewSignerFromKey(key) + if err != nil { + t.Fatalf("NewSignerFromKey(%T): %v", key, err) + } + // The agent should start up empty. + if keys, err := agent.List(); err != nil { + t.Fatalf("RequestIdentities: %v", err) + } else if len(keys) > 0 { + t.Fatalf("got %d keys, want 0: %v", len(keys), keys) + } + + // Attempt to insert the key, with certificate if specified. + var pubKey ssh.PublicKey + if cert != nil { + err = agent.Add(AddedKey{ + PrivateKey: key, + Certificate: cert, + Comment: "comment", + LifetimeSecs: lifetimeSecs, + }) + pubKey = cert + } else { + err = agent.Add(AddedKey{PrivateKey: key, Comment: "comment", LifetimeSecs: lifetimeSecs}) + pubKey = signer.PublicKey() + } + if err != nil { + t.Fatalf("insert(%T): %v", key, err) + } + + // Did the key get inserted successfully? + if keys, err := agent.List(); err != nil { + t.Fatalf("List: %v", err) + } else if len(keys) != 1 { + t.Fatalf("got %v, want 1 key", keys) + } else if keys[0].Comment != "comment" { + t.Fatalf("key comment: got %v, want %v", keys[0].Comment, "comment") + } else if !bytes.Equal(keys[0].Blob, pubKey.Marshal()) { + t.Fatalf("key mismatch") + } + + // Can the agent make a valid signature? + data := []byte("hello") + sig, err := agent.Sign(pubKey, data) + if err != nil { + t.Fatalf("Sign(%s): %v", pubKey.Type(), err) + } + + if err := pubKey.Verify(data, sig); err != nil { + t.Fatalf("Verify(%s): %v", pubKey.Type(), err) + } +} + +func TestAgent(t *testing.T) { + for _, keyType := range []string{"rsa", "dsa", "ecdsa"} { + testAgent(t, testPrivateKeys[keyType], nil, 0) + } +} + +func TestCert(t *testing.T) { + cert := &ssh.Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: ssh.CertTimeInfinity, + CertType: ssh.UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + testAgent(t, testPrivateKeys["rsa"], cert, 0) +} + +func TestConstraints(t *testing.T) { + testAgent(t, testPrivateKeys["rsa"], nil, 3600 /* lifetime in seconds */) +} + +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and +// therefore is buffered (net.Pipe deadlocks if both sides start with +// a write.) +func netPipe() (net.Conn, net.Conn, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, nil, err + } + defer listener.Close() + c1, err := net.Dial("tcp", listener.Addr().String()) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1, c2, nil +} + +func TestAuth(t *testing.T) { + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + defer a.Close() + defer b.Close() + + agent, _, cleanup := startAgent(t) + defer cleanup() + + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment"}); err != nil { + t.Errorf("Add: %v", err) + } + + serverConf := ssh.ServerConfig{} + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.PublicKeyCallback = func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) { + if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + return nil, nil + } + + return nil, errors.New("pubkey rejected") + } + + go func() { + conn, _, _, err := ssh.NewServerConn(a, &serverConf) + if err != nil { + t.Fatalf("Server: %v", err) + } + conn.Close() + }() + + conf := ssh.ClientConfig{} + conf.Auth = append(conf.Auth, ssh.PublicKeysCallback(agent.Signers)) + conn, _, _, err := ssh.NewClientConn(b, "", &conf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + conn.Close() +} + +func TestLockClient(t *testing.T) { + agent, _, cleanup := startAgent(t) + defer cleanup() + testLockAgent(agent, t) +} + +func testLockAgent(agent Agent, t *testing.T) { + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment 1"}); err != nil { + t.Errorf("Add: %v", err) + } + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["dsa"], Comment: "comment dsa"}); err != nil { + t.Errorf("Add: %v", err) + } + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 2 { + t.Errorf("Want 2 keys, got %v", keys) + } + + passphrase := []byte("secret") + if err := agent.Lock(passphrase); err != nil { + t.Errorf("Lock: %v", err) + } + + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 0 { + t.Errorf("Want 0 keys, got %v", keys) + } + + signer, _ := ssh.NewSignerFromKey(testPrivateKeys["rsa"]) + if _, err := agent.Sign(signer.PublicKey(), []byte("hello")); err == nil { + t.Fatalf("Sign did not fail") + } + + if err := agent.Remove(signer.PublicKey()); err == nil { + t.Fatalf("Remove did not fail") + } + + if err := agent.RemoveAll(); err == nil { + t.Fatalf("RemoveAll did not fail") + } + + if err := agent.Unlock(nil); err == nil { + t.Errorf("Unlock with wrong passphrase succeeded") + } + if err := agent.Unlock(passphrase); err != nil { + t.Errorf("Unlock: %v", err) + } + + if err := agent.Remove(signer.PublicKey()); err != nil { + t.Fatalf("Remove: %v", err) + } + + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 1 { + t.Errorf("Want 1 keys, got %v", keys) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go new file mode 100644 index 000000000..fd24ba900 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/forward.go @@ -0,0 +1,103 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "errors" + "io" + "net" + "sync" + + "golang.org/x/crypto/ssh" +) + +// RequestAgentForwarding sets up agent forwarding for the session. +// ForwardToAgent or ForwardToRemote should be called to route +// the authentication requests. +func RequestAgentForwarding(session *ssh.Session) error { + ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) + if err != nil { + return err + } + if !ok { + return errors.New("forwarding request denied") + } + return nil +} + +// ForwardToAgent routes authentication requests to the given keyring. +func ForwardToAgent(client *ssh.Client, keyring Agent) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go func() { + ServeAgent(keyring, channel) + channel.Close() + }() + } + }() + return nil +} + +const channelType = "auth-agent@openssh.com" + +// ForwardToRemote routes authentication requests to the ssh-agent +// process serving on the given unix socket. +func ForwardToRemote(client *ssh.Client, addr string) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + conn, err := net.Dial("unix", addr) + if err != nil { + return err + } + conn.Close() + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go forwardUnixSocket(channel, addr) + } + }() + return nil +} + +func forwardUnixSocket(channel ssh.Channel, addr string) { + conn, err := net.Dial("unix", addr) + if err != nil { + return + } + + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(conn, channel) + conn.(*net.UnixConn).CloseWrite() + wg.Done() + }() + go func() { + io.Copy(channel, conn) + channel.CloseWrite() + wg.Done() + }() + + wg.Wait() + conn.Close() + channel.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go new file mode 100644 index 000000000..12ffa82b1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -0,0 +1,184 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "bytes" + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "sync" + + "golang.org/x/crypto/ssh" +) + +type privKey struct { + signer ssh.Signer + comment string +} + +type keyring struct { + mu sync.Mutex + keys []privKey + + locked bool + passphrase []byte +} + +var errLocked = errors.New("agent: locked") + +// NewKeyring returns an Agent that holds keys in memory. It is safe +// for concurrent use by multiple goroutines. +func NewKeyring() Agent { + return &keyring{} +} + +// RemoveAll removes all identities. +func (r *keyring) RemoveAll() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.keys = nil + return nil +} + +// Remove removes all identities with the given public key. +func (r *keyring) Remove(key ssh.PublicKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + want := key.Marshal() + found := false + for i := 0; i < len(r.keys); { + if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { + found = true + r.keys[i] = r.keys[len(r.keys)-1] + r.keys = r.keys[:len(r.keys)-1] + continue + } else { + i++ + } + } + + if !found { + return errors.New("agent: key not found") + } + return nil +} + +// Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. +func (r *keyring) Lock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.locked = true + r.passphrase = passphrase + return nil +} + +// Unlock undoes the effect of Lock +func (r *keyring) Unlock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.locked { + return errors.New("agent: not locked") + } + if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { + return fmt.Errorf("agent: incorrect passphrase") + } + + r.locked = false + r.passphrase = nil + return nil +} + +// List returns the identities known to the agent. +func (r *keyring) List() ([]*Key, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + // section 2.7: locked agents return empty. + return nil, nil + } + + var ids []*Key + for _, k := range r.keys { + pub := k.signer.PublicKey() + ids = append(ids, &Key{ + Format: pub.Type(), + Blob: pub.Marshal(), + Comment: k.comment}) + } + return ids, nil +} + +// Insert adds a private key to the keyring. If a certificate +// is given, that certificate is added as public key. Note that +// any constraints given are ignored. +func (r *keyring) Add(key AddedKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + signer, err := ssh.NewSignerFromKey(key.PrivateKey) + + if err != nil { + return err + } + + if cert := key.Certificate; cert != nil { + signer, err = ssh.NewCertSigner(cert, signer) + if err != nil { + return err + } + } + + r.keys = append(r.keys, privKey{signer, key.Comment}) + + return nil +} + +// Sign returns a signature for the data. +func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + wanted := key.Marshal() + for _, k := range r.keys { + if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { + return k.signer.Sign(rand.Reader, data) + } + } + return nil, errors.New("not found") +} + +// Signers returns signers for all the known keys. +func (r *keyring) Signers() ([]ssh.Signer, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + s := make([]ssh.Signer, 0, len(r.keys)) + for _, k := range r.keys { + s = append(s, k.signer) + } + return s, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go b/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go new file mode 100644 index 000000000..7f0590571 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go @@ -0,0 +1,78 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "testing" +) + +func addTestKey(t *testing.T, a Agent, keyName string) { + err := a.Add(AddedKey{ + PrivateKey: testPrivateKeys[keyName], + Comment: keyName, + }) + if err != nil { + t.Fatalf("failed to add key %q: %v", keyName, err) + } +} + +func removeTestKey(t *testing.T, a Agent, keyName string) { + err := a.Remove(testPublicKeys[keyName]) + if err != nil { + t.Fatalf("failed to remove key %q: %v", keyName, err) + } +} + +func validateListedKeys(t *testing.T, a Agent, expectedKeys []string) { + listedKeys, err := a.List() + if err != nil { + t.Fatalf("failed to list keys: %v", err) + return + } + actualKeys := make(map[string]bool) + for _, key := range listedKeys { + actualKeys[key.Comment] = true + } + + matchedKeys := make(map[string]bool) + for _, expectedKey := range expectedKeys { + if !actualKeys[expectedKey] { + t.Fatalf("expected key %q, but was not found", expectedKey) + } else { + matchedKeys[expectedKey] = true + } + } + + for actualKey := range actualKeys { + if !matchedKeys[actualKey] { + t.Fatalf("key %q was found, but was not expected", actualKey) + } + } +} + +func TestKeyringAddingAndRemoving(t *testing.T) { + keyNames := []string{"dsa", "ecdsa", "rsa", "user"} + + // add all test private keys + k := NewKeyring() + for _, keyName := range keyNames { + addTestKey(t, k, keyName) + } + validateListedKeys(t, k, keyNames) + + // remove a key in the middle + keyToRemove := keyNames[1] + keyNames = append(keyNames[:1], keyNames[2:]...) + + removeTestKey(t, k, keyToRemove) + validateListedKeys(t, k, keyNames) + + // remove all keys + err := k.RemoveAll() + if err != nil { + t.Fatalf("failed to remove all keys: %v", err) + } + validateListedKeys(t, k, []string{}) +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go new file mode 100644 index 000000000..b21a20180 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/server.go @@ -0,0 +1,209 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "crypto/rsa" + "encoding/binary" + "fmt" + "io" + "log" + "math/big" + + "golang.org/x/crypto/ssh" +) + +// Server wraps an Agent and uses it to implement the agent side of +// the SSH-agent, wire protocol. +type server struct { + agent Agent +} + +func (s *server) processRequestBytes(reqData []byte) []byte { + rep, err := s.processRequest(reqData) + if err != nil { + if err != errLocked { + // TODO(hanwen): provide better logging interface? + log.Printf("agent %d: %v", reqData[0], err) + } + return []byte{agentFailure} + } + + if err == nil && rep == nil { + return []byte{agentSuccess} + } + + return ssh.Marshal(rep) +} + +func marshalKey(k *Key) []byte { + var record struct { + Blob []byte + Comment string + } + record.Blob = k.Marshal() + record.Comment = k.Comment + + return ssh.Marshal(&record) +} + +type agentV1IdentityMsg struct { + Numkeys uint32 `sshtype:"2"` +} + +type agentRemoveIdentityMsg struct { + KeyBlob []byte `sshtype:"18"` +} + +type agentLockMsg struct { + Passphrase []byte `sshtype:"22"` +} + +type agentUnlockMsg struct { + Passphrase []byte `sshtype:"23"` +} + +func (s *server) processRequest(data []byte) (interface{}, error) { + switch data[0] { + case agentRequestV1Identities: + return &agentV1IdentityMsg{0}, nil + case agentRemoveIdentity: + var req agentRemoveIdentityMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) + + case agentRemoveAllIdentities: + return nil, s.agent.RemoveAll() + + case agentLock: + var req agentLockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + return nil, s.agent.Lock(req.Passphrase) + + case agentUnlock: + var req agentLockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + return nil, s.agent.Unlock(req.Passphrase) + + case agentSignRequest: + var req signRequestAgentMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + k := &Key{ + Format: wk.Format, + Blob: req.KeyBlob, + } + + sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags. + if err != nil { + return nil, err + } + return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil + case agentRequestIdentities: + keys, err := s.agent.List() + if err != nil { + return nil, err + } + + rep := identitiesAnswerAgentMsg{ + NumKeys: uint32(len(keys)), + } + for _, k := range keys { + rep.Keys = append(rep.Keys, marshalKey(k)...) + } + return rep, nil + case agentAddIdentity: + return nil, s.insertIdentity(data) + } + + return nil, fmt.Errorf("unknown opcode %d", data[0]) +} + +func (s *server) insertIdentity(req []byte) error { + var record struct { + Type string `sshtype:"17"` + Rest []byte `ssh:"rest"` + } + if err := ssh.Unmarshal(req, &record); err != nil { + return err + } + + switch record.Type { + case ssh.KeyAlgoRSA: + var k rsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return err + } + + priv := rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(k.E.Int64()), + N: k.N, + }, + D: k.D, + Primes: []*big.Int{k.P, k.Q}, + } + priv.Precompute() + + return s.agent.Add(AddedKey{PrivateKey: &priv, Comment: k.Comments}) + } + return fmt.Errorf("not implemented: %s", record.Type) +} + +// ServeAgent serves the agent protocol on the given connection. It +// returns when an I/O error occurs. +func ServeAgent(agent Agent, c io.ReadWriter) error { + s := &server{agent} + + var length [4]byte + for { + if _, err := io.ReadFull(c, length[:]); err != nil { + return err + } + l := binary.BigEndian.Uint32(length[:]) + if l > maxAgentResponseBytes { + // We also cap requests. + return fmt.Errorf("agent: request too large: %d", l) + } + + req := make([]byte, l) + if _, err := io.ReadFull(c, req); err != nil { + return err + } + + repData := s.processRequestBytes(req) + if len(repData) > maxAgentResponseBytes { + return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) + } + + binary.BigEndian.PutUint32(length[:], uint32(len(repData))) + if _, err := c.Write(length[:]); err != nil { + return err + } + if _, err := c.Write(repData); err != nil { + return err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server_test.go b/vendor/golang.org/x/crypto/ssh/agent/server_test.go new file mode 100644 index 000000000..ef0ab2934 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/server_test.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "testing" + + "golang.org/x/crypto/ssh" +) + +func TestServer(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + client := NewClient(c1) + + go ServeAgent(NewKeyring(), c2) + + testAgentInterface(t, client, testPrivateKeys["rsa"], nil, 0) +} + +func TestLockServer(t *testing.T) { + testLockAgent(NewKeyring(), t) +} + +func TestSetupForwardAgent(t *testing.T) { + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + defer a.Close() + defer b.Close() + + _, socket, cleanup := startAgent(t) + defer cleanup() + + serverConf := ssh.ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + incoming := make(chan *ssh.ServerConn, 1) + go func() { + conn, _, _, err := ssh.NewServerConn(a, &serverConf) + if err != nil { + t.Fatalf("Server: %v", err) + } + incoming <- conn + }() + + conf := ssh.ClientConfig{} + conn, chans, reqs, err := ssh.NewClientConn(b, "", &conf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + client := ssh.NewClient(conn, chans, reqs) + + if err := ForwardToRemote(client, socket); err != nil { + t.Fatalf("SetupForwardAgent: %v", err) + } + + server := <-incoming + ch, reqs, err := server.OpenChannel(channelType, nil) + if err != nil { + t.Fatalf("OpenChannel(%q): %v", channelType, err) + } + go ssh.DiscardRequests(reqs) + + agentClient := NewClient(ch) + testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0) + conn.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go b/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go new file mode 100644 index 000000000..b7a8781e1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go @@ -0,0 +1,64 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package agent + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]ssh.Signer + testPublicKeys map[string]ssh.PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]ssh.Signer, n) + testPublicKeys = make(map[string]ssh.PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &ssh.Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: ssh.Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/benchmark_test.go b/vendor/golang.org/x/crypto/ssh/benchmark_test.go new file mode 100644 index 000000000..d9f7eb9b6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/benchmark_test.go @@ -0,0 +1,122 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "io" + "net" + "testing" +) + +type server struct { + *ServerConn + chans <-chan NewChannel +} + +func newServer(c net.Conn, conf *ServerConfig) (*server, error) { + sconn, chans, reqs, err := NewServerConn(c, conf) + if err != nil { + return nil, err + } + go DiscardRequests(reqs) + return &server{sconn, chans}, nil +} + +func (s *server) Accept() (NewChannel, error) { + n, ok := <-s.chans + if !ok { + return nil, io.EOF + } + return n, nil +} + +func sshPipe() (Conn, *server, error) { + c1, c2, err := netPipe() + if err != nil { + return nil, nil, err + } + + clientConf := ClientConfig{ + User: "user", + } + serverConf := ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + done := make(chan *server, 1) + go func() { + server, err := newServer(c2, &serverConf) + if err != nil { + done <- nil + } + done <- server + }() + + client, _, reqs, err := NewClientConn(c1, "", &clientConf) + if err != nil { + return nil, nil, err + } + + server := <-done + if server == nil { + return nil, nil, errors.New("server handshake failed.") + } + go DiscardRequests(reqs) + + return client, server, nil +} + +func BenchmarkEndToEnd(b *testing.B) { + b.StopTimer() + + client, server, err := sshPipe() + if err != nil { + b.Fatalf("sshPipe: %v", err) + } + + defer client.Close() + defer server.Close() + + size := (1 << 20) + input := make([]byte, size) + output := make([]byte, size) + b.SetBytes(int64(size)) + done := make(chan int, 1) + + go func() { + newCh, err := server.Accept() + if err != nil { + b.Fatalf("Client: %v", err) + } + ch, incoming, err := newCh.Accept() + go DiscardRequests(incoming) + for i := 0; i < b.N; i++ { + if _, err := io.ReadFull(ch, output); err != nil { + b.Fatalf("ReadFull: %v", err) + } + } + ch.Close() + done <- 1 + }() + + ch, in, err := client.OpenChannel("speed", nil) + if err != nil { + b.Fatalf("OpenChannel: %v", err) + } + go DiscardRequests(in) + + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := ch.Write(input); err != nil { + b.Fatalf("WriteFull: %v", err) + } + } + ch.Close() + b.StopTimer() + + <-done +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 000000000..6931b5114 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive os.EOF. +func (b *buffer) eof() error { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() + return nil +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer_test.go b/vendor/golang.org/x/crypto/ssh/buffer_test.go new file mode 100644 index 000000000..d5781cb3d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer_test.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "testing" +) + +var alphabet = []byte("abcdefghijklmnopqrstuvwxyz") + +func TestBufferReadwrite(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + r, _ := b.Read(make([]byte, 10)) + if r != 10 { + t.Fatalf("Expected written == read == 10, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + r, _ = b.Read(make([]byte, 10)) + if r != 5 { + t.Fatalf("Expected written == read == 5, written: 5, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:10]) + r, _ = b.Read(make([]byte, 5)) + if r != 5 { + t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + b.write(alphabet[5:15]) + r, _ = b.Read(make([]byte, 10)) + r2, _ := b.Read(make([]byte, 10)) + if r != 10 || r2 != 5 || 15 != r+r2 { + t.Fatal("Expected written == read == 15") + } +} + +func TestBufferClose(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + b.eof() + _, err := b.Read(make([]byte, 5)) + if err != nil { + t.Fatal("expected read of 5 to not return EOF") + } + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err := b.Read(make([]byte, 5)) + r2, err2 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || err != nil || err2 != nil { + t.Fatal("expected reads of 5 and 5") + } + + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err = b.Read(make([]byte, 5)) + r2, err2 = b.Read(make([]byte, 10)) + r3, err3 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF { + t.Fatal("expected reads of 5 and 5 and 0, with EOF") + } + + b = newBuffer() + b.write(make([]byte, 5)) + b.write(make([]byte, 10)) + b.eof() + r, err = b.Read(make([]byte, 9)) + r2, err2 = b.Read(make([]byte, 3)) + r3, err3 = b.Read(make([]byte, 3)) + r4, err4 := b.Read(make([]byte, 10)) + if err != nil || err2 != nil || err3 != nil || err4 != io.EOF { + t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4) + } + if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 { + t.Fatal("Expected written == read == 15", r, r2, r3, r4) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 000000000..385770036 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,501 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// These constants from [PROTOCOL.certkeys] represent the algorithm names +// for certificate types supported by this package. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return nil, errors.New("ssh: signer and cert have different public key") + } + + return &openSSHCertSigner{cert, signer}, nil +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsAuthority should return true if the key is recognized as + // an authority. This allows for certificates to be signed by other + // certificates. + IsAuthority func(auth PublicKey) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + + return c.CheckCert(addr, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial) + } + + for opt, _ := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + if !c.IsAuthority(cert.SignatureKey) { + return fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert sets c.SignatureKey to the authority's public key and stores a +// Signature, by authority, in the certificate. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +var certAlgoNames = map[string]string{ + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, +} + +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. +// Panics if a non-certificate algorithm is passed. +func certToPrivAlgo(algo string) string { + for privAlgo, pubAlgo := range certAlgoNames { + if pubAlgo == algo { + return privAlgo + } + } + panic("unknown cert algorithm") +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the key name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + algo, ok := certAlgoNames[c.Key.Type()] + if !ok { + panic("unknown cert key type") + } + return algo +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs_test.go b/vendor/golang.org/x/crypto/ssh/certs_test.go new file mode 100644 index 000000000..c5f2e5330 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs_test.go @@ -0,0 +1,216 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "reflect" + "testing" + "time" +) + +// Cert generated by ssh-keygen 6.0p1 Debian-4. +// % ssh-keygen -s ca-key -I test user-key +const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=` + +func TestParseCert(t *testing.T) { + authKeyBytes := []byte(exampleSSHCert) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + + if _, ok := key.(*Certificate); !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3 +// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub +// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN +// Critical Options: +// force-command /bin/sleep +// source-address 192.168.1.0/24 +// Extensions: +// permit-X11-forwarding +// permit-agent-forwarding +// permit-port-forwarding +// permit-pty +// permit-user-rc +const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ` + +func TestParseCertWithOptions(t *testing.T) { + opts := map[string]string{ + "source-address": "192.168.1.0/24", + "force-command": "/bin/sleep", + } + exts := map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + } + authKeyBytes := []byte(exampleSSHCertWithOptions) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + cert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + if !reflect.DeepEqual(cert.CriticalOptions, opts) { + t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts) + } + if !reflect.DeepEqual(cert.Extensions, exts) { + t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts) + } + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +func TestValidateCert(t *testing.T) { + key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert)) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + validCert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + checker := CertChecker{} + checker.IsAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal()) + } + + if err := checker.CheckCert("user", validCert); err != nil { + t.Errorf("Unable to validate certificate: %v", err) + } + invalidCert := &Certificate{ + Key: testPublicKeys["rsa"], + SignatureKey: testPublicKeys["ecdsa"], + ValidBefore: CertTimeInfinity, + Signature: &Signature{}, + } + if err := checker.CheckCert("user", invalidCert); err == nil { + t.Error("Invalid cert signature passed validation") + } +} + +func TestValidateCertTime(t *testing.T) { + cert := Certificate{ + ValidPrincipals: []string{"user"}, + Key: testPublicKeys["rsa"], + ValidAfter: 50, + ValidBefore: 100, + } + + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + for ts, ok := range map[int64]bool{ + 25: false, + 50: true, + 99: true, + 100: false, + 125: false, + } { + checker := CertChecker{ + Clock: func() time.Time { return time.Unix(ts, 0) }, + } + checker.IsAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), + testPublicKeys["ecdsa"].Marshal()) + } + + if v := checker.CheckCert("user", &cert); (v == nil) != ok { + t.Errorf("Authenticate(%d): %v", ts, v) + } + } +} + +// TODO(hanwen): tests for +// +// host keys: +// * fallbacks + +func TestHostKeyCert(t *testing.T) { + cert := &Certificate{ + ValidPrincipals: []string{"hostname", "hostname.domain"}, + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: HostCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + checker := &CertChecker{ + IsAuthority: func(p PublicKey) bool { + return bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal()) + }, + } + + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Errorf("NewCertSigner: %v", err) + } + + for _, name := range []string{"hostname", "otherhost"} { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + errc := make(chan error) + + go func() { + conf := ServerConfig{ + NoClientAuth: true, + } + conf.AddHostKey(certSigner) + _, _, _, err := NewServerConn(c1, &conf) + errc <- err + }() + + config := &ClientConfig{ + User: "user", + HostKeyCallback: checker.CheckHostKey, + } + _, _, _, err = NewClientConn(c2, name, config) + + succeed := name == "hostname" + if (err == nil) != succeed { + t.Fatalf("NewClientConn(%q): %v", name, err) + } + + err = <-errc + if (err == nil) != succeed { + t.Fatalf("NewServerConn(%q): %v", name, err) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 000000000..5403c7e45 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,631 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window. + windowMu sync.Mutex + myWindow uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (c *channel) writePacket(packet []byte) error { + c.writeMu.Lock() + if c.sentClose { + c.writeMu.Unlock() + return io.EOF + } + c.sentClose = (packet[0] == msgChannelClose) + err := c.mux.conn.writePacket(packet) + c.writeMu.Unlock() + return err +} + +func (c *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send %d: %#v", c.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], c.remoteId) + return c.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if c.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + c.writeMu.Lock() + packet := c.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + c.writeMu.Unlock() + + for len(data) > 0 { + space := min(c.maxRemotePayload, len(data)) + if space, err = c.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], c.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = c.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + c.writeMu.Lock() + c.packetPool[extendedCode] = packet + c.writeMu.Unlock() + + return n, err +} + +func (c *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > c.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + c.windowMu.Lock() + if c.myWindow < length { + c.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + c.myWindow -= length + c.windowMu.Unlock() + + if extended == 1 { + c.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + c.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(n uint32) error { + c.windowMu.Lock() + // Since myWindow is managed on our side, and can never exceed + // the initial window setting, we don't worry about overflow. + c.myWindow += uint32(n) + c.windowMu.Unlock() + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: uint32(n), + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necesary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (c *channel) responseMessageReceived() error { + if c.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if c.decided { + return errors.New("ssh: duplicate response received for channel") + } + c.decided = true + return nil +} + +func (c *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return c.handleData(packet) + case msgChannelClose: + c.sendMessage(channelCloseMsg{PeersId: c.remoteId}) + c.mux.chanList.remove(c.localId) + c.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + c.extPending.eof() + c.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := c.responseMessageReceived(); err != nil { + return err + } + c.mux.chanList.remove(msg.PeersId) + c.msg <- msg + case *channelOpenConfirmMsg: + if err := c.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + c.remoteId = msg.MyId + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.MyWindow) + c.msg <- msg + case *windowAdjustMsg: + if !c.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: c, + } + + c.incomingRequests <- &req + default: + c.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, 16), + msg: make(chan interface{}, 16), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (c *channel) Accept() (Channel, <-chan *Request, error) { + if c.decided { + return nil, nil, errDecidedAlready + } + c.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersId: c.remoteId, + MyId: c.localId, + MyWindow: c.myWindow, + MaxPacketSize: c.maxIncomingPayload, + } + c.decided = true + if err := c.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return c, c.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersId: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersId: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersId: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersId: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersId: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersId: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 000000000..2732963f3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,552 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type streamCipherMode struct { + keySize int + ivSize int + skip int + createFunc func(key, iv []byte) (cipher.Stream, error) +} + +func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) { + if len(key) < c.keySize { + panic("ssh: key length too small for cipher") + } + if len(iv) < c.ivSize { + panic("ssh: iv too small for cipher") + } + + stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize]) + if err != nil { + return nil, err + } + + var streamDump []byte + if c.skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := c.skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + return stream, nil +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*streamCipherMode{ + // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR}, + "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR}, + "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR}, + + // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, 1536, newRC4}, + "arcfour256": {32, 0, 1536, newRC4}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, 0, newRC4}, + + // AES-GCM is not a stream cipher, so it is constructed with a + // special case. If we add any more non-stream ciphers, we + // should invest a cleaner way to do this. + gcmCipherID: {16, 12, 0, nil}, + + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, 0, nil}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + s.mac.Write(s.prefix[:]) + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + s.mac.Write(data) + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writePacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + s.mac.Write(s.prefix[:]) + s.mac.Write(packet) + s.mac.Write(padding) + } + + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded.") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + padding := plain[0] + if padding < 4 || padding >= 20 { + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil { + return nil, err + } else { + c.oracleCamouflage -= uint32(n) + } + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher_test.go b/vendor/golang.org/x/crypto/ssh/cipher_test.go new file mode 100644 index 000000000..54b92b6ed --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher_test.go @@ -0,0 +1,127 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/rand" + "testing" +) + +func TestDefaultCiphersExist(t *testing.T) { + for _, cipherAlgo := range supportedCiphers { + if _, ok := cipherModes[cipherAlgo]; !ok { + t.Errorf("default cipher %q is unknown", cipherAlgo) + } + } +} + +func TestPacketCiphers(t *testing.T) { + // Still test aes128cbc cipher althought it's commented out. + cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil} + defer delete(cipherModes, aes128cbcID) + + for cipher := range cipherModes { + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: cipher, + MAC: "hmac-sha1", + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Errorf("newPacketCipher(client, %q): %v", cipher, err) + continue + } + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Errorf("newPacketCipher(client, %q): %v", cipher, err) + continue + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writePacket(0, buf, rand.Reader, input); err != nil { + t.Errorf("writePacket(%q): %v", cipher, err) + continue + } + + packet, err := server.readPacket(0, buf) + if err != nil { + t.Errorf("readPacket(%q): %v", cipher, err) + continue + } + + if string(packet) != want { + t.Errorf("roundtrip(%q): got %q, want %q", cipher, packet, want) + } + } +} + +func TestCBCOracleCounterMeasure(t *testing.T) { + cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil} + defer delete(cipherModes, aes128cbcID) + + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: aes128cbcID, + MAC: "hmac-sha1", + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writePacket(0, buf, rand.Reader, input); err != nil { + t.Errorf("writePacket: %v", err) + } + + packetSize := buf.Len() + buf.Write(make([]byte, 2*maxPacket)) + + // We corrupt each byte, but this usually will only test the + // 'packet too large' or 'MAC failure' cases. + lastRead := -1 + for i := 0; i < packetSize; i++ { + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + fresh := &bytes.Buffer{} + fresh.Write(buf.Bytes()) + fresh.Bytes()[i] ^= 0x01 + + before := fresh.Len() + _, err = server.readPacket(0, fresh) + if err == nil { + t.Errorf("corrupt byte %d: readPacket succeeded ", i) + continue + } + if _, ok := err.(cbcError); !ok { + t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err) + continue + } + + after := fresh.Len() + bytesRead := before - after + if bytesRead < maxPacket { + t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket) + continue + } + + if i > 0 && bytesRead != lastRead { + t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead) + } + lastRead = bytesRead + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 000000000..0b9fbe500 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,213 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "net" + "sync" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, port forwarding and tunneled dialing. +type Client struct { + Conn + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, 16) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + conn := &connection{ + sshConn: sshConn{conn: c}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.requestKeyChange(); err != nil { + return err + } + + if packet, err := c.transport.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + // We just did the key change, so the session ID is established. + c.sessionID = c.transport.getSessionID() + + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key +// exchange. +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.Dial(network, addr) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback, if not nil, is called during the cryptographic + // handshake to validate the server's host key. A nil HostKeyCallback + // implies that all host keys are accepted. + HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 000000000..e15be3ef2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,441 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + tried := make(map[string]bool) + var lastMethods []string + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(c.transport.getSessionID(), config.User, c.transport, config.Rand) + if err != nil { + return err + } + if ok { + // success + return nil + } + tried[auth.method()] = true + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if tried[candidateMethod] { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) +} + +func keys(m map[string]bool) []string { + s := make([]string, 0, len(m)) + + for key := range m { + s = append(s, key) + } + return s +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return false, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return false, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return false, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + // Authentication is performed in two stages. The first stage sends an + // enquiry to test if each key is acceptable to the remote. The second + // stage attempts to authenticate with the valid keys obtained in the + // first stage. + + signers, err := cb() + if err != nil { + return false, nil, err + } + var validKeys []Signer + for _, signer := range signers { + if ok, err := validateKey(signer.PublicKey(), user, c); ok { + validKeys = append(validKeys, signer) + } else { + if err != nil { + return false, nil, err + } + } + } + + // methods that may continue if this auth is not successful. + var methods []string + for _, signer := range validKeys { + pub := signer.PublicKey() + + pubKey := pub.Marshal() + sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, []byte(pub.Type()), pubKey)) + if err != nil { + return false, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: pub.Type(), + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return false, nil, err + } + var success bool + success, methods, err = handleAuthResponse(c) + if err != nil { + return false, nil, err + } + if success { + return success, methods, err + } + } + return false, methods, nil +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: key.Type(), + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + algoname := key.Type() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + // TODO(gpaul): add callback to present the banner to the user + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (bool, []string, error) { + for { + packet, err := c.readPacket() + if err != nil { + return false, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + // TODO: add callback to present the banner to the user + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + return false, msg.Methods, nil + case msgUserAuthSuccess: + return true, nil, nil + case msgDisconnect: + return false, nil, io.EOF + default: + return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the user and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns a AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return false, nil, err + } + + for { + packet, err := c.readPacket() + if err != nil { + return false, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + // TODO: Print banners during userauth. + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + return false, msg.Methods, nil + case msgUserAuthSuccess: + return true, nil, nil + default: + return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return false, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.User, msg.Instruction, prompts, echos) + if err != nil { + return false, nil, err + } + + if len(answers) != len(prompts) { + return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return false, nil, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go new file mode 100644 index 000000000..2ea44624f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth_test.go @@ -0,0 +1,393 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "strings" + "testing" +) + +type keyboardInteractive map[string]string + +func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) { + var answers []string + for _, q := range questions { + answers = append(answers, cr[q]) + } + return answers, nil +} + +// reused internally by tests +var clientPassword = "tiger" + +// tryAuth runs a handshake with a given config against an SSH server +// with config serverConfig +func tryAuth(t *testing.T, config *ClientConfig) error { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + certChecker := CertChecker{ + IsAuthority: func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal()) + }, + UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + return nil, nil + } + + return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User()) + }, + IsRevoked: func(c *Certificate) bool { + return c.Serial == 666 + }, + } + + serverConfig := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { + if conn.User() == "testuser" && string(pass) == clientPassword { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + PublicKeyCallback: certChecker.Authenticate, + KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) { + ans, err := challenge("user", + "instruction", + []string{"question1", "question2"}, + []bool{true, true}) + if err != nil { + return nil, err + } + ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2" + if ok { + challenge("user", "motd", nil, nil) + return nil, nil + } + return nil, errors.New("keyboard-interactive failed") + }, + AuthLogCallback: func(conn ConnMetadata, method string, err error) { + t.Logf("user %q, method %q: %v", conn.User(), method, err) + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + go newServer(c1, serverConfig) + _, _, _, err = NewClientConn(c2, "", config) + return err +} + +func TestClientAuthPublicKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password(clientPassword), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodFallback(t *testing.T) { + var passwordCalled bool + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + PasswordCallback( + func() (string, error) { + passwordCalled = true + return "WRONG", nil + }), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + + if passwordCalled { + t.Errorf("password auth tried before public-key auth.") + } +} + +func TestAuthMethodWrongPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password("wrong"), + PublicKeys(testSigners["rsa"]), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "answer2", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodWrongKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "WRONG", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive") + } +} + +// the mock server will only authenticate ssh-rsa keys +func TestAuthMethodInvalidPublicKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"]), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("dsa private key should not have authenticated with rsa public key") + } +} + +// the client should authenticate with the second key +func TestAuthMethodRSAandDSA(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"], testSigners["rsa"]), + }, + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with rsa key: %v", err) + } +} + +func TestClientHMAC(t *testing.T) { + for _, mac := range supportedMACs { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + Config: Config{ + MACs: []string{mac}, + }, + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err) + } + } +} + +// issue 4285. +func TestClientUnsupportedCipher(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + Ciphers: []string{"aes128-cbc"}, // not currently supported + }, + } + if err := tryAuth(t, config); err == nil { + t.Errorf("expected no ciphers in common") + } +} + +func TestClientUnsupportedKex(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported + }, + } + if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") { + t.Errorf("got %v, expected 'common algorithm'", err) + } +} + +func TestClientLoginCert(t *testing.T) { + cert := &Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + clientConfig := &ClientConfig{ + User: "user", + } + clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner)) + + t.Log("should succeed") + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + t.Log("corrupted signature") + cert.Signature.Blob[0]++ + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with corrupted sig") + } + + t.Log("revoked") + cert.Serial = 666 + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("revoked cert login succeeded") + } + cert.Serial = 1 + + t.Log("sign with wrong key") + cert.SignCert(rand.Reader, testSigners["dsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with non-authoritive key") + } + + t.Log("host cert") + cert.CertType = HostCert + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong type") + } + cert.CertType = UserCert + + t.Log("principal specified") + cert.ValidPrincipals = []string{"user"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + t.Log("wrong principal specified") + cert.ValidPrincipals = []string{"fred"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong principal") + } + cert.ValidPrincipals = nil + + t.Log("added critical option") + cert.CriticalOptions = map[string]string{"root-access": "yes"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with unrecognized critical option") + } + + t.Log("allowed source address") + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login with source-address failed: %v", err) + } + + t.Log("disallowed source address") + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login with source-address succeeded") + } +} + +func testPermissionsPassing(withPermissions bool, t *testing.T) { + serverConfig := &ServerConfig{ + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "nopermissions" { + return nil, nil + } else { + return &Permissions{}, nil + } + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + } + if withPermissions { + clientConfig.User = "permissions" + } else { + clientConfig.User = "nopermissions" + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatal(err) + } + if p := serverConn.Permissions; (p != nil) != withPermissions { + t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p) + } +} + +func TestPermissionsPassing(t *testing.T) { + testPermissionsPassing(true, t) +} + +func TestNoPermissionsPassing(t *testing.T) { + testPermissionsPassing(false, t) +} diff --git a/vendor/golang.org/x/crypto/ssh/client_test.go b/vendor/golang.org/x/crypto/ssh/client_test.go new file mode 100644 index 000000000..1fe790cb4 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_test.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "net" + "testing" +) + +func testClientVersion(t *testing.T, config *ClientConfig, expected string) { + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + receivedVersion := make(chan string, 1) + go func() { + version, err := readVersion(serverConn) + if err != nil { + receivedVersion <- "" + } else { + receivedVersion <- string(version) + } + serverConn.Close() + }() + NewClientConn(clientConn, "", config) + actual := <-receivedVersion + if actual != expected { + t.Fatalf("got %s; want %s", actual, expected) + } +} + +func TestCustomClientVersion(t *testing.T) { + version := "Test-Client-Version-0.0" + testClientVersion(t, &ClientConfig{ClientVersion: version}, version) +} + +func TestDefaultClientVersion(t *testing.T) { + testClientVersion(t, &ClientConfig{}, packageVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 000000000..9fc739e1d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,354 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers specifies the supported ciphers in preference order. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", + "arcfour256", "arcfour128", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, kexAlgoDH1SHA1, +} + +// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSA, KeyAlgoDSA, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported algorithms to their respective +// hashes needed for signature verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertAlgoRSAv01: crypto.SHA1, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, 1 gigabyte is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a + // default set of algorithms is used. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible + // default is used. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default + // is used. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = supportedCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = supportedKexAlgos + } + + if c.MACs == nil { + c.MACs = supportedMACs + } + + if c.RekeyThreshold == 0 { + // RFC 4253, section 9 suggests rekeying after 1G. + c.RekeyThreshold = 1 << 30 + } + if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. +func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo []byte + PubKey []byte + }{ + sessionId, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 000000000..979d919e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,144 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + // It is empty if no authentication is used. + User() string + + // SessionID returns the sesson hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshconn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 000000000..a5ff8af6b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 +*/ +package ssh diff --git a/vendor/golang.org/x/crypto/ssh/example_test.go b/vendor/golang.org/x/crypto/ssh/example_test.go new file mode 100644 index 000000000..dfd9dcab6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/example_test.go @@ -0,0 +1,211 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/terminal" +) + +func ExampleNewServerConn() { + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("id_rsa") + if err != nil { + panic("Failed to load private key") + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + panic("Failed to parse private key") + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + panic("failed to listen for connection") + } + nConn, err := listener.Accept() + if err != nil { + panic("failed to accept incoming connection") + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + _, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + panic("failed to handshake") + } + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of a shell, the type is + // "session" and ServerShell may be used to present a simple + // terminal interface. + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + panic("could not accept channel.") + } + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "shell" request. + go func(in <-chan *ssh.Request) { + for req := range in { + ok := false + switch req.Type { + case "shell": + ok = true + if len(req.Payload) > 0 { + // We don't accept any + // commands, only the + // default shell. + ok = false + } + } + req.Reply(ok, nil) + } + }(requests) + + term := terminal.NewTerminal(channel, "> ") + + go func() { + defer channel.Close() + for { + line, err := term.ReadLine() + if err != nil { + break + } + fmt.Println(line) + } + }() + } +} + +func ExampleDial() { + // An SSH client is represented with a ClientConn. Currently only + // the "password" authentication method is supported. + // + // To authenticate with the remote server you must pass at least one + // implementation of AuthMethod via the Auth field in ClientConfig. + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("yourpassword"), + }, + } + client, err := ssh.Dial("tcp", "yourserver.com:22", config) + if err != nil { + panic("Failed to dial: " + err.Error()) + } + + // Each ClientConn can support multiple interactive sessions, + // represented by a Session. + session, err := client.NewSession() + if err != nil { + panic("Failed to create session: " + err.Error()) + } + defer session.Close() + + // Once a Session is created, you can execute a single command on + // the remote side using the Run method. + var b bytes.Buffer + session.Stdout = &b + if err := session.Run("/usr/bin/whoami"); err != nil { + panic("Failed to run: " + err.Error()) + } + fmt.Println(b.String()) +} + +func ExampleClient_Listen() { + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + } + // Dial your ssh server. + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatalf("unable to connect: %s", err) + } + defer conn.Close() + + // Request the remote side to open port 8080 on all interfaces. + l, err := conn.Listen("tcp", "0.0.0.0:8080") + if err != nil { + log.Fatalf("unable to register tcp forward: %v", err) + } + defer l.Close() + + // Serve HTTP with your SSH server acting as a reverse proxy. + http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + fmt.Fprintf(resp, "Hello world!\n") + })) +} + +func ExampleSession_RequestPty() { + // Create client config + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + } + // Connect to ssh server + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatalf("unable to connect: %s", err) + } + defer conn.Close() + // Create a session + session, err := conn.NewSession() + if err != nil { + log.Fatalf("unable to create session: %s", err) + } + defer session.Close() + // Set up terminal modes + modes := ssh.TerminalModes{ + ssh.ECHO: 0, // disable echoing + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + // Request pseudo terminal + if err := session.RequestPty("xterm", 80, 40, modes); err != nil { + log.Fatalf("request for pseudo terminal failed: %s", err) + } + // Start remote shell + if err := session.Shell(); err != nil { + log.Fatalf("failed to start shell: %s", err) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 000000000..1c54f7587 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,412 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error + + // getSessionID returns the session ID. prepareKeyChange must + // have been called once. + getSessionID() []byte +} + +// rekeyingTransport is the interface of handshakeTransport that we +// (internally) expose to ClientConn and ServerConn. +type rekeyingTransport interface { + packetConn + + // requestKeyChange asks the remote side to change keys. All + // writes are blocked until the key change succeeds, which is + // signaled by reading a msgNewKeys. + requestKeyChange() error + + // getSessionID returns the session ID. This is only valid + // after the first key change has completed. + getSessionID() []byte +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + // data for host key checking + hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + dialAddress string + remoteAddr net.Addr + + readSinceKex uint64 + + // Protects the writing side of the connection + mu sync.Mutex + cond *sync.Cond + sentInitPacket []byte + sentInitMsg *kexInitMsg + writtenSinceKex uint64 + writeError error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, 16), + config: config, + } + t.cond = sync.NewCond(&t.mu) + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + go t.readLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.conn.getSessionID() +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + for { + p, err := t.readOnePacket() + if err != nil { + t.readError = err + close(t.incoming) + break + } + if p[0] == msgIgnore || p[0] == msgDebug { + continue + } + t.incoming <- p + } + + // If we can't read, declare the writing part dead too. + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil { + t.writeError = t.readError + } + t.cond.Broadcast() +} + +func (t *handshakeTransport) readOnePacket() ([]byte, error) { + if t.readSinceKex > t.config.RekeyThreshold { + if err := t.requestKeyChange(); err != nil { + return nil, err + } + } + + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + t.readSinceKex += uint64(len(p)) + if debugHandshake { + msg, err := decode(p) + log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err) + } + if p[0] != msgKexInit { + return p, nil + } + err = t.enterKeyExchange(p) + + t.mu.Lock() + if err != nil { + // drop connection + t.conn.Close() + t.writeError = err + } + + if debugHandshake { + log.Printf("%s exited key exchange, err %v", t.id(), err) + } + + // Unblock writers. + t.sentInitMsg = nil + t.sentInitPacket = nil + t.cond.Broadcast() + t.writtenSinceKex = 0 + t.mu.Unlock() + + if err != nil { + return nil, err + } + + t.readSinceKex = 0 + return []byte{msgNewKeys}, nil +} + +// sendKexInit sends a key change message, and returns the message +// that was sent. After initiating the key change, all writes will be +// blocked until the change is done, and a failed key change will +// close the underlying transport. This function is safe for +// concurrent use by multiple goroutines. +func (t *handshakeTransport) sendKexInit() (*kexInitMsg, []byte, error) { + t.mu.Lock() + defer t.mu.Unlock() + return t.sendKexInitLocked() +} + +func (t *handshakeTransport) requestKeyChange() error { + _, _, err := t.sendKexInit() + return err +} + +// sendKexInitLocked sends a key change message. t.mu must be locked +// while this happens. +func (t *handshakeTransport) sendKexInitLocked() (*kexInitMsg, []byte, error) { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + if t.sentInitMsg != nil { + return t.sentInitMsg, t.sentInitPacket, nil + } + msg := &kexInitMsg{ + KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + if len(t.hostKeys) > 0 { + for _, k := range t.hostKeys { + msg.ServerHostKeyAlgos = append( + msg.ServerHostKeyAlgos, k.PublicKey().Type()) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + } + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.conn.writePacket(packetCopy); err != nil { + return nil, nil, err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + return msg, packet, nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + + if t.writtenSinceKex > t.config.RekeyThreshold { + t.sendKexInitLocked() + } + for t.sentInitMsg != nil && t.writeError == nil { + t.cond.Wait() + } + if t.writeError != nil { + return t.writeError + } + t.writtenSinceKex += uint64(len(p)) + + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + default: + return t.conn.writePacket(p) + } +} + +func (t *handshakeTransport) Close() error { + return t.conn.Close() +} + +// enterKeyExchange runs the key exchange. +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + myInit, myInitPacket, err := t.sendKexInit() + if err != nil { + return err + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: myInitPacket, + } + + clientInit := otherInit + serverInit := myInit + if len(t.hostKeys) == 0 { + clientInit = myInit + serverInit = otherInit + + magics.clientKexInit = myInitPacket + magics.serverKexInit = otherInitPacket + } + + algs, err := findAgreedAlgorithms(clientInit, serverInit) + if err != nil { + return err + } + + // We don't send FirstKexFollows, but we handle receiving it. + if otherInit.FirstKexFollows && algs.kex != otherInit.KexAlgos[0] { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[algs.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, algs, &magics) + } else { + result, err = t.client(kex, algs, &magics) + } + + if err != nil { + return err + } + + t.conn.prepareKeyChange(algs, result) + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + var hostKey Signer + for _, k := range t.hostKeys { + if algs.hostKey == k.PublicKey().Type() { + hostKey = k + } + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, result); err != nil { + return nil, err + } + + if t.hostKeyCallback != nil { + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/handshake_test.go b/vendor/golang.org/x/crypto/ssh/handshake_test.go new file mode 100644 index 000000000..b86d369cc --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake_test.go @@ -0,0 +1,415 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" +) + +type testChecker struct { + calls []string +} + +func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + if dialAddr == "bad" { + return fmt.Errorf("dialAddr is bad") + } + + if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil { + return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr) + } + + t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal())) + + return nil +} + +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and +// therefore is buffered (net.Pipe deadlocks if both sides start with +// a write.) +func netPipe() (net.Conn, net.Conn, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, nil, err + } + defer listener.Close() + c1, err := net.Dial("tcp", listener.Addr().String()) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1, c2, nil +} + +func handshakePair(clientConf *ClientConfig, addr string) (client *handshakeTransport, server *handshakeTransport, err error) { + a, b, err := netPipe() + if err != nil { + return nil, nil, err + } + + trC := newTransport(a, rand.Reader, true) + trS := newTransport(b, rand.Reader, false) + clientConf.SetDefaults() + + v := []byte("version") + client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + server = newServerTransport(trS, v, v, serverConf) + + return client, server, nil +} + +func TestHandshakeBasic(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr") + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + go func() { + // Client writes a bunch of stuff, and does a key + // change in the middle. This should not confuse the + // handshake in progress + for i := 0; i < 10; i++ { + p := []byte{msgRequestSuccess, byte(i)} + if err := trC.writePacket(p); err != nil { + t.Fatalf("sendPacket: %v", err) + } + if i == 5 { + // halfway through, we request a key change. + _, _, err := trC.sendKexInit() + if err != nil { + t.Fatalf("sendKexInit: %v", err) + } + } + } + trC.Close() + }() + + // Server checks that client messages come in cleanly + i := 0 + for { + p, err := trS.readPacket() + if err != nil { + break + } + if p[0] == msgNewKeys { + continue + } + want := []byte{msgRequestSuccess, byte(i)} + if bytes.Compare(p, want) != 0 { + t.Errorf("message %d: got %q, want %q", i, p, want) + } + i++ + } + if i != 10 { + t.Errorf("received %d messages, want 10.", i) + } + + // If all went well, we registered exactly 1 key change. + if len(checker.calls) != 1 { + t.Fatalf("got %d host key checks, want 1", len(checker.calls)) + } + + pub := testSigners["ecdsa"].PublicKey() + want := fmt.Sprintf("%s %v %s %x", "addr", trC.remoteAddr, pub.Type(), pub.Marshal()) + if want != checker.calls[0] { + t.Errorf("got %q want %q for host key check", checker.calls[0], want) + } +} + +func TestHandshakeError(t *testing.T) { + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "bad") + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + // send a packet + packet := []byte{msgRequestSuccess, 42} + if err := trC.writePacket(packet); err != nil { + t.Errorf("writePacket: %v", err) + } + + // Now request a key change. + _, _, err = trC.sendKexInit() + if err != nil { + t.Errorf("sendKexInit: %v", err) + } + + // the key change will fail, and afterwards we can't write. + if err := trC.writePacket([]byte{msgRequestSuccess, 43}); err == nil { + t.Errorf("writePacket after botched rekey succeeded.") + } + + readback, err := trS.readPacket() + if err != nil { + t.Fatalf("server closed too soon: %v", err) + } + if bytes.Compare(readback, packet) != 0 { + t.Errorf("got %q want %q", readback, packet) + } + readback, err = trS.readPacket() + if err == nil { + t.Errorf("got a message %q after failed key change", readback) + } +} + +func TestHandshakeTwice(t *testing.T) { + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr") + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + // send a packet + packet := make([]byte, 5) + packet[0] = msgRequestSuccess + if err := trC.writePacket(packet); err != nil { + t.Errorf("writePacket: %v", err) + } + + // Now request a key change. + _, _, err = trC.sendKexInit() + if err != nil { + t.Errorf("sendKexInit: %v", err) + } + + // Send another packet. Use a fresh one, since writePacket destroys. + packet = make([]byte, 5) + packet[0] = msgRequestSuccess + if err := trC.writePacket(packet); err != nil { + t.Errorf("writePacket: %v", err) + } + + // 2nd key change. + _, _, err = trC.sendKexInit() + if err != nil { + t.Errorf("sendKexInit: %v", err) + } + + packet = make([]byte, 5) + packet[0] = msgRequestSuccess + if err := trC.writePacket(packet); err != nil { + t.Errorf("writePacket: %v", err) + } + + packet = make([]byte, 5) + packet[0] = msgRequestSuccess + for i := 0; i < 5; i++ { + msg, err := trS.readPacket() + if err != nil { + t.Fatalf("server closed too soon: %v", err) + } + if msg[0] == msgNewKeys { + continue + } + + if bytes.Compare(msg, packet) != 0 { + t.Errorf("packet %d: got %q want %q", i, msg, packet) + } + } + if len(checker.calls) != 2 { + t.Errorf("got %d key changes, want 2", len(checker.calls)) + } +} + +func TestHandshakeAutoRekeyWrite(t *testing.T) { + checker := &testChecker{} + clientConf := &ClientConfig{HostKeyCallback: checker.Check} + clientConf.RekeyThreshold = 500 + trC, trS, err := handshakePair(clientConf, "addr") + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + for i := 0; i < 5; i++ { + packet := make([]byte, 251) + packet[0] = msgRequestSuccess + if err := trC.writePacket(packet); err != nil { + t.Errorf("writePacket: %v", err) + } + } + + j := 0 + for ; j < 5; j++ { + _, err := trS.readPacket() + if err != nil { + break + } + } + + if j != 5 { + t.Errorf("got %d, want 5 messages", j) + } + + if len(checker.calls) != 2 { + t.Errorf("got %d key changes, wanted 2", len(checker.calls)) + } +} + +type syncChecker struct { + called chan int +} + +func (t *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + t.called <- 1 + return nil +} + +func TestHandshakeAutoRekeyRead(t *testing.T) { + sync := &syncChecker{make(chan int, 2)} + clientConf := &ClientConfig{ + HostKeyCallback: sync.Check, + } + clientConf.RekeyThreshold = 500 + + trC, trS, err := handshakePair(clientConf, "addr") + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + packet := make([]byte, 501) + packet[0] = msgRequestSuccess + if err := trS.writePacket(packet); err != nil { + t.Fatalf("writePacket: %v", err) + } + // While we read out the packet, a key change will be + // initiated. + if _, err := trC.readPacket(); err != nil { + t.Fatalf("readPacket(client): %v", err) + } + + <-sync.called +} + +// errorKeyingTransport generates errors after a given number of +// read/write operations. +type errorKeyingTransport struct { + packetConn + readLeft, writeLeft int +} + +func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error { + return nil +} +func (n *errorKeyingTransport) getSessionID() []byte { + return nil +} + +func (n *errorKeyingTransport) writePacket(packet []byte) error { + if n.writeLeft == 0 { + n.Close() + return errors.New("barf") + } + + n.writeLeft-- + return n.packetConn.writePacket(packet) +} + +func (n *errorKeyingTransport) readPacket() ([]byte, error) { + if n.readLeft == 0 { + n.Close() + return nil, errors.New("barf") + } + + n.readLeft-- + return n.packetConn.readPacket() +} + +func TestHandshakeErrorHandlingRead(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1) + } +} + +func TestHandshakeErrorHandlingWrite(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i) + } +} + +// testHandshakeErrorHandlingN runs handshakes, injecting errors. If +// handshakeTransport deadlocks, the go runtime will detect it and +// panic. +func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int) { + msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)}) + + a, b := memPipe() + defer a.Close() + defer b.Close() + + key := testSigners["ecdsa"] + serverConf := Config{RekeyThreshold: minRekeyThreshold} + serverConf.SetDefaults() + serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'}) + serverConn.hostKeys = []Signer{key} + go serverConn.readLoop() + + clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold} + clientConf.SetDefaults() + clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'}) + clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()} + go clientConn.readLoop() + + var wg sync.WaitGroup + wg.Add(4) + + for _, hs := range []packetConn{serverConn, clientConn} { + go func(c packetConn) { + for { + err := c.writePacket(msg) + if err != nil { + break + } + } + wg.Done() + }(hs) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + } + wg.Done() + }(hs) + } + + wg.Wait() +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 000000000..3ec603c0a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,526 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to signal data inside transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p *big.Int +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Sign() <= 0 || theirPublic.Cmp(group.p) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + hashFunc := crypto.SHA1 + + x, err := rand.Int(randSource, group.p) + if err != nil { + return nil, err + } + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + kInt, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: crypto.SHA1, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + hashFunc := crypto.SHA1 + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + y, err := rand.Int(randSource, group.p) + if err != nil { + return + } + + Y := new(big.Int).Exp(group.g, y, group.p) + kInt, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA1, + }, nil +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in RFC + // 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + } + + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/kex_test.go b/vendor/golang.org/x/crypto/ssh/kex_test.go new file mode 100644 index 000000000..12ca0acd3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex_test.go @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Key exchange tests. + +import ( + "crypto/rand" + "reflect" + "testing" +) + +func TestKexes(t *testing.T) { + type kexResultErr struct { + result *kexResult + err error + } + + for name, kex := range kexAlgoMap { + a, b := memPipe() + + s := make(chan kexResultErr, 1) + c := make(chan kexResultErr, 1) + var magics handshakeMagics + go func() { + r, e := kex.Client(a, rand.Reader, &magics) + a.Close() + c <- kexResultErr{r, e} + }() + go func() { + r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"]) + b.Close() + s <- kexResultErr{r, e} + }() + + clientRes := <-c + serverRes := <-s + if clientRes.err != nil { + t.Errorf("client: %v", clientRes.err) + } + if serverRes.err != nil { + t.Errorf("server: %v", serverRes.err) + } + if !reflect.DeepEqual(clientRes.result, serverRes.result) { + t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 000000000..cfc970b2c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,720 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" +) + +// These constants represent the algorithm names for key types supported by this +// package. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: + cert, err := parseCert(in, certToPrivAlgo(algo)) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", err) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the begining of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsaâ€). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKeys parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// PublicKey is an abstraction of different types of public keys. +type PublicKey interface { + // Type returns the key's type, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, + // with the name prefix. + Marshal() []byte + + // Verify that sig is a signature on the given data using this + // key. This function will hash the data appropriately first. + Verify(data []byte, sig *Signature) error +} + +// A Signer can create signatures that verify against a public key. +type Signer interface { + // PublicKey returns an associated PublicKey instance. + PublicKey() PublicKey + + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != r.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) +} + +type dsaPublicKey dsa.PublicKey + +func (r *dsaPublicKey) Type() string { + return "ssh-dss" +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + }, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (key *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + key.nistID() +} + +func (key *ecdsaPublicKey) nistID() string { + switch key.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (key *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y) + w := struct { + Name string + ID string + Key []byte + }{ + key.Type(), + key.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != key.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) + } + + h := ecHash(key.Curve).New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding +// Signer instance. ECDSA keys must use P-256, P-384 or P-521. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return &dsaPrivateKey{key}, nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + var hashFunc crypto.Hash + + switch key := s.pubKey.(type) { + case *rsaPublicKey, *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + + h := hashFunc.New() + h.Write(data) + digest := h.Sum(nil) + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: s.pubKey.Type(), + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or +// any other crypto.Signer and returns a corresponding Signer instance. ECDSA +// keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It +// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Priv *big.Int + Pub *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Priv, + }, + X: k.Pub, + }, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/keys_test.go b/vendor/golang.org/x/crypto/ssh/keys_test.go new file mode 100644 index 000000000..27569473f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys_test.go @@ -0,0 +1,437 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "fmt" + "reflect" + "strings" + "testing" + + "golang.org/x/crypto/ssh/testdata" +) + +func rawKey(pub PublicKey) interface{} { + switch k := pub.(type) { + case *rsaPublicKey: + return (*rsa.PublicKey)(k) + case *dsaPublicKey: + return (*dsa.PublicKey)(k) + case *ecdsaPublicKey: + return (*ecdsa.PublicKey)(k) + case *Certificate: + return k + } + panic("unknown key type") +} + +func TestKeyMarshalParse(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + roundtrip, err := ParsePublicKey(pub.Marshal()) + if err != nil { + t.Errorf("ParsePublicKey(%T): %v", pub, err) + } + + k1 := rawKey(pub) + k2 := rawKey(roundtrip) + + if !reflect.DeepEqual(k1, k2) { + t.Errorf("got %#v in roundtrip, want %#v", k2, k1) + } + } +} + +func TestUnsupportedCurves(t *testing.T) { + raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + + if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err) + } + + if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err) + } +} + +func TestNewPublicKey(t *testing.T) { + for _, k := range testSigners { + raw := rawKey(k.PublicKey()) + // Skip certificates, as NewPublicKey does not support them. + if _, ok := raw.(*Certificate); ok { + continue + } + pub, err := NewPublicKey(raw) + if err != nil { + t.Errorf("NewPublicKey(%#v): %v", raw, err) + } + if !reflect.DeepEqual(k.PublicKey(), pub) { + t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey()) + } + } +} + +func TestKeySignVerify(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + + data := []byte("sign me") + sig, err := priv.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("Sign(%T): %v", priv, err) + } + + if err := pub.Verify(data, sig); err != nil { + t.Errorf("publicKey.Verify(%T): %v", priv, err) + } + sig.Blob[5]++ + if err := pub.Verify(data, sig); err == nil { + t.Errorf("publicKey.Verify on broken sig did not fail") + } + } +} + +func TestParseRSAPrivateKey(t *testing.T) { + key := testPrivateKeys["rsa"] + + rsa, ok := key.(*rsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *rsa.PrivateKey", rsa) + } + + if err := rsa.Validate(); err != nil { + t.Errorf("Validate: %v", err) + } +} + +func TestParseECPrivateKey(t *testing.T) { + key := testPrivateKeys["ecdsa"] + + ecKey, ok := key.(*ecdsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey) + } + + if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) { + t.Fatalf("public key does not validate.") + } +} + +func TestParseDSA(t *testing.T) { + // We actually exercise the ParsePrivateKey codepath here, as opposed to + // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go + // uses. + s, err := ParsePrivateKey(testdata.PEMBytes["dsa"]) + if err != nil { + t.Fatalf("ParsePrivateKey returned error: %s", err) + } + + data := []byte("sign me") + sig, err := s.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("dsa.Sign: %v", err) + } + + if err := s.PublicKey().Verify(data, sig); err != nil { + t.Errorf("Verify failed: %v", err) + } +} + +// Tests for authorized_keys parsing. + +// getTestKey returns a public key, and its base64 encoding. +func getTestKey() (PublicKey, string) { + k := testPublicKeys["rsa"] + + b := &bytes.Buffer{} + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(k.Marshal()) + e.Close() + + return k, b.String() +} + +func TestMarshalParsePublicKey(t *testing.T) { + pub, pubSerialized := getTestKey() + line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized) + + authKeys := MarshalAuthorizedKey(pub) + actualFields := strings.Fields(string(authKeys)) + if len(actualFields) == 0 { + t.Fatalf("failed authKeys: %v", authKeys) + } + + // drop the comment + expectedFields := strings.Fields(line)[0:2] + + if !reflect.DeepEqual(actualFields, expectedFields) { + t.Errorf("got %v, expected %v", actualFields, expectedFields) + } + + actPub, _, _, _, err := ParseAuthorizedKey([]byte(line)) + if err != nil { + t.Fatalf("cannot parse %v: %v", line, err) + } + if !reflect.DeepEqual(actPub, pub) { + t.Errorf("got %v, expected %v", actPub, pub) + } +} + +type authResult struct { + pubKey PublicKey + options []string + comments string + rest string + ok bool +} + +func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []authResult) { + rest := authKeys + var values []authResult + for len(rest) > 0 { + var r authResult + var err error + r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest) + r.ok = (err == nil) + t.Log(err) + r.rest = string(rest) + values = append(values, r) + } + + if !reflect.DeepEqual(values, expected) { + t.Errorf("got %#v, expected %#v", values, expected) + } +} + +func TestAuthorizedKeyBasic(t *testing.T) { + pub, pubSerialized := getTestKey() + line := "ssh-rsa " + pubSerialized + " user@host" + testAuthorizedKeys(t, []byte(line), + []authResult{ + {pub, nil, "user@host", "", true}, + }) +} + +func TestAuth(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithOptions := []string{ + `# comments to ignore before any keys...`, + ``, + `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`, + `# comments to ignore, along with a blank line`, + ``, + `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`, + ``, + `# more comments, plus a invalid entry`, + `ssh-rsa data-that-will-not-parse user@host3`, + } + for _, eol := range []string{"\n", "\r\n"} { + authOptions := strings.Join(authWithOptions, eol) + rest2 := strings.Join(authWithOptions[3:], eol) + rest3 := strings.Join(authWithOptions[6:], eol) + testAuthorizedKeys(t, []byte(authOptions), []authResult{ + {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true}, + {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true}, + {nil, nil, "", "", false}, + }) + } +} + +func TestAuthWithQuotedSpaceInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []authResult{ + {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedCommaInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []authResult{ + {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedQuoteInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`) + authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []authResult{ + {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) + + testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []authResult{ + {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true}, + }) +} + +func TestAuthWithInvalidSpace(t *testing.T) { + _, pubSerialized := getTestKey() + authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +#more to follow but still no valid keys`) + testAuthorizedKeys(t, []byte(authWithInvalidSpace), []authResult{ + {nil, nil, "", "", false}, + }) +} + +func TestAuthWithMissingQuote(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`) + + testAuthorizedKeys(t, []byte(authWithMissingQuote), []authResult{ + {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true}, + }) +} + +func TestInvalidEntry(t *testing.T) { + authInvalid := []byte(`ssh-rsa`) + _, _, _, _, err := ParseAuthorizedKey(authInvalid) + if err == nil { + t.Errorf("got valid entry for %q", authInvalid) + } +} + +var knownHostsParseTests = []struct { + input string + err string + + marker string + comment string + hosts []string + rest string +} { + { + "", + "EOF", + + "", "", nil, "", + }, + { + "# Just a comment", + "EOF", + + "", "", nil, "", + }, + { + " \t ", + "EOF", + + "", "", nil, "", + }, + { + "localhost ssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line", + "", + + "", "comment comment", []string{"localhost"}, "next line", + }, + { + "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost","[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}", + "", + + "marker", "", []string{"localhost","[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd", + "short read", + + "", "", nil, "", + }, +} + +func TestKnownHostsParsing(t *testing.T) { + rsaPub, rsaPubSerialized := getTestKey() + + for i, test := range knownHostsParseTests { + var expectedKey PublicKey + const rsaKeyToken = "{RSAPUB}" + + input := test.input + if strings.Contains(input, rsaKeyToken) { + expectedKey = rsaPub + input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1) + } + + marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input)) + if err != nil { + if len(test.err) == 0 { + t.Errorf("#%d: unexpectedly failed with %q", i, err) + } else if !strings.Contains(err.Error(), test.err) { + t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err) + } + continue + } else if len(test.err) != 0 { + t.Errorf("#%d: succeeded but expected error including %q", i, test.err) + continue + } + + if !reflect.DeepEqual(expectedKey, pubKey) { + t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey) + } + + if marker != test.marker { + t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker) + } + + if comment != test.comment { + t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment) + } + + if !reflect.DeepEqual(test.hosts, hosts) { + t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts) + } + + if rest := string(rest); rest != test.rest { + t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 000000000..07744ad67 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,57 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +type macMode struct { + keySize int + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-256": {32, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/mempipe_test.go b/vendor/golang.org/x/crypto/ssh/mempipe_test.go new file mode 100644 index 000000000..8697cd614 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mempipe_test.go @@ -0,0 +1,110 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" + "testing" +) + +// An in-memory packetConn. It is safe to call Close and writePacket +// from different goroutines. +type memTransport struct { + eof bool + pending [][]byte + write *memTransport + sync.Mutex + *sync.Cond +} + +func (t *memTransport) readPacket() ([]byte, error) { + t.Lock() + defer t.Unlock() + for { + if len(t.pending) > 0 { + r := t.pending[0] + t.pending = t.pending[1:] + return r, nil + } + if t.eof { + return nil, io.EOF + } + t.Cond.Wait() + } +} + +func (t *memTransport) closeSelf() error { + t.Lock() + defer t.Unlock() + if t.eof { + return io.EOF + } + t.eof = true + t.Cond.Broadcast() + return nil +} + +func (t *memTransport) Close() error { + err := t.write.closeSelf() + t.closeSelf() + return err +} + +func (t *memTransport) writePacket(p []byte) error { + t.write.Lock() + defer t.write.Unlock() + if t.write.eof { + return io.EOF + } + c := make([]byte, len(p)) + copy(c, p) + t.write.pending = append(t.write.pending, c) + t.write.Cond.Signal() + return nil +} + +func memPipe() (a, b packetConn) { + t1 := memTransport{} + t2 := memTransport{} + t1.write = &t2 + t2.write = &t1 + t1.Cond = sync.NewCond(&t1.Mutex) + t2.Cond = sync.NewCond(&t2.Mutex) + return &t1, &t2 +} + +func TestMemPipe(t *testing.T) { + a, b := memPipe() + if err := a.writePacket([]byte{42}); err != nil { + t.Fatalf("writePacket: %v", err) + } + if err := a.Close(); err != nil { + t.Fatal("Close: ", err) + } + p, err := b.readPacket() + if err != nil { + t.Fatal("readPacket: ", err) + } + if len(p) != 1 || p[0] != 42 { + t.Fatalf("got %v, want {42}", p) + } + p, err = b.readPacket() + if err != io.EOF { + t.Fatalf("got %v, %v, want EOF", p, err) + } +} + +func TestDoubleClose(t *testing.T) { + a, _ := memPipe() + err := a.Close() + if err != nil { + t.Errorf("Close: %v", err) + } + err = a.Close() + if err != io.EOF { + t.Errorf("expect EOF on double close.") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 000000000..eaf610669 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,725 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 + + // Standard authentication messages + msgUserAuthSuccess = 52 + msgUserAuthBanner = 53 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Helman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + User string `sshtype:"60"` + Instruction string + DeprecatedLanguage string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersId uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersId uint32 `sshtype:"91"` + MyId uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersId uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersId uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersId uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersId uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersId uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersId uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersId uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// typeTag returns the type byte for the given type. The type should +// be struct. +func typeTag(structType reflect.Type) byte { + var tag byte + var tagStr string + tagStr = structType.Field(0).Tag.Get("sshtype") + i, err := strconv.Atoi(tagStr) + if err == nil { + tag = byte(i) + } + return tag +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a number in decimal, the packet +// must start that number. In case of error, Unmarshal returns a +// ParseError or UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedType := typeTag(structType) + if len(data) == 0 { + return parseError(expectedType) + } + if expectedType > 0 { + if data[0] != expectedType { + return unexpectedMessageError(expectedType, data[0]) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, "unsupported type") + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgType := typeTag(v.Type()) + if msgType > 0 { + out = append(out, msgType) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/messages_test.go b/vendor/golang.org/x/crypto/ssh/messages_test.go new file mode 100644 index 000000000..955b5127f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages_test.go @@ -0,0 +1,254 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "math/big" + "math/rand" + "reflect" + "testing" + "testing/quick" +) + +var intLengthTests = []struct { + val, length int +}{ + {0, 4 + 0}, + {1, 4 + 1}, + {127, 4 + 1}, + {128, 4 + 2}, + {-1, 4 + 1}, +} + +func TestIntLength(t *testing.T) { + for _, test := range intLengthTests { + v := new(big.Int).SetInt64(int64(test.val)) + length := intLength(v) + if length != test.length { + t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length) + } + } +} + +type msgAllTypes struct { + Bool bool `sshtype:"21"` + Array [16]byte + Uint64 uint64 + Uint32 uint32 + Uint8 uint8 + String string + Strings []string + Bytes []byte + Int *big.Int + Rest []byte `ssh:"rest"` +} + +func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value { + m := &msgAllTypes{} + m.Bool = rand.Intn(2) == 1 + randomBytes(m.Array[:], rand) + m.Uint64 = uint64(rand.Int63n(1<<63 - 1)) + m.Uint32 = uint32(rand.Intn((1 << 31) - 1)) + m.Uint8 = uint8(rand.Intn(1 << 8)) + m.String = string(m.Array[:]) + m.Strings = randomNameList(rand) + m.Bytes = m.Array[:] + m.Int = randomInt(rand) + m.Rest = m.Array[:] + return reflect.ValueOf(m) +} + +func TestMarshalUnmarshal(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + iface := &msgAllTypes{} + ty := reflect.ValueOf(iface).Type() + + n := 100 + if testing.Short() { + n = 5 + } + for j := 0; j < n; j++ { + v, ok := quick.Value(ty, rand) + if !ok { + t.Errorf("failed to create value") + break + } + + m1 := v.Elem().Interface() + m2 := iface + + marshaled := Marshal(m1) + if err := Unmarshal(marshaled, m2); err != nil { + t.Errorf("Unmarshal %#v: %s", m1, err) + break + } + + if !reflect.DeepEqual(v.Interface(), m2) { + t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled) + break + } + } +} + +func TestUnmarshalEmptyPacket(t *testing.T) { + var b []byte + var m channelRequestSuccessMsg + if err := Unmarshal(b, &m); err == nil { + t.Fatalf("unmarshal of empty slice succeeded") + } +} + +func TestUnmarshalUnexpectedPacket(t *testing.T) { + type S struct { + I uint32 `sshtype:"43"` + S string + B bool + } + + s := S{11, "hello", true} + packet := Marshal(s) + packet[0] = 42 + roundtrip := S{} + err := Unmarshal(packet, &roundtrip) + if err == nil { + t.Fatal("expected error, not nil") + } +} + +func TestMarshalPtr(t *testing.T) { + s := struct { + S string + }{"hello"} + + m1 := Marshal(s) + m2 := Marshal(&s) + if !bytes.Equal(m1, m2) { + t.Errorf("got %q, want %q for marshaled pointer", m2, m1) + } +} + +func TestBareMarshalUnmarshal(t *testing.T) { + type S struct { + I uint32 + S string + B bool + } + + s := S{42, "hello", true} + packet := Marshal(s) + roundtrip := S{} + Unmarshal(packet, &roundtrip) + + if !reflect.DeepEqual(s, roundtrip) { + t.Errorf("got %#v, want %#v", roundtrip, s) + } +} + +func TestBareMarshal(t *testing.T) { + type S2 struct { + I uint32 + } + s := S2{42} + packet := Marshal(s) + i, rest, ok := parseUint32(packet) + if len(rest) > 0 || !ok { + t.Errorf("parseInt(%q): parse error", packet) + } + if i != s.I { + t.Errorf("got %d, want %d", i, s.I) + } +} + +func TestUnmarshalShortKexInitPacket(t *testing.T) { + // This used to panic. + // Issue 11348 + packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff} + kim := &kexInitMsg{} + if err := Unmarshal(packet, kim); err == nil { + t.Error("truncated packet unmarshaled without error") + } +} + +func randomBytes(out []byte, rand *rand.Rand) { + for i := 0; i < len(out); i++ { + out[i] = byte(rand.Int31()) + } +} + +func randomNameList(rand *rand.Rand) []string { + ret := make([]string, rand.Int31()&15) + for i := range ret { + s := make([]byte, 1+(rand.Int31()&15)) + for j := range s { + s[j] = 'a' + uint8(rand.Int31()&15) + } + ret[i] = string(s) + } + return ret +} + +func randomInt(rand *rand.Rand) *big.Int { + return new(big.Int).SetInt64(int64(int32(rand.Uint32()))) +} + +func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + ki := &kexInitMsg{} + randomBytes(ki.Cookie[:], rand) + ki.KexAlgos = randomNameList(rand) + ki.ServerHostKeyAlgos = randomNameList(rand) + ki.CiphersClientServer = randomNameList(rand) + ki.CiphersServerClient = randomNameList(rand) + ki.MACsClientServer = randomNameList(rand) + ki.MACsServerClient = randomNameList(rand) + ki.CompressionClientServer = randomNameList(rand) + ki.CompressionServerClient = randomNameList(rand) + ki.LanguagesClientServer = randomNameList(rand) + ki.LanguagesServerClient = randomNameList(rand) + if rand.Int31()&1 == 1 { + ki.FirstKexFollows = true + } + return reflect.ValueOf(ki) +} + +func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + dhi := &kexDHInitMsg{} + dhi.X = randomInt(rand) + return reflect.ValueOf(dhi) +} + +var ( + _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + + _kexInit = Marshal(_kexInitMsg) + _kexDHInit = Marshal(_kexDHInitMsg) +) + +func BenchmarkMarshalKexInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexInitMsg) + } +} + +func BenchmarkUnmarshalKexInitMsg(b *testing.B) { + m := new(kexInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexInit, m) + } +} + +func BenchmarkMarshalKexDHInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexDHInitMsg) + } +} + +func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) { + m := new(kexDHInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexDHInit, m) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 000000000..321880ad9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,356 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, 16), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, 16), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +// TODO(hanwen): Disconnect is a transport layer message. We should +// probably send and receive Disconnect somewhere in the transport +// code. + +// Disconnect sends a disconnect message. +func (m *mux) Disconnect(reason uint32, message string) error { + return m.sendMessage(disconnectMsg{ + Reason: reason, + Message: message, + }) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgNewKeys: + // Ignore notification of key change. + return nil + case msgDisconnect: + return m.handleDisconnect(packet) + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return fmt.Errorf("ssh: invalid channel %d", id) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleDisconnect(packet []byte) error { + var d disconnectMsg + if err := Unmarshal(packet, &d); err != nil { + return err + } + + if debugMux { + log.Printf("caught disconnect: %v", d) + } + return &d +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersId: msg.PeersId, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersId + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersId: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go new file mode 100644 index 000000000..523038960 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux_test.go @@ -0,0 +1,525 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "io/ioutil" + "sync" + "testing" +) + +func muxPair() (*mux, *mux) { + a, b := memPipe() + + s := newMux(a) + c := newMux(b) + + return s, c +} + +// Returns both ends of a channel, and the mux for the the 2nd +// channel. +func channelPair(t *testing.T) (*channel, *channel, *mux) { + c, s := muxPair() + + res := make(chan *channel, 1) + go func() { + newCh, ok := <-s.incomingChannels + if !ok { + t.Fatalf("No incoming channel") + } + if newCh.ChannelType() != "chan" { + t.Fatalf("got type %q want chan", newCh.ChannelType()) + } + ch, _, err := newCh.Accept() + if err != nil { + t.Fatalf("Accept %v", err) + } + res <- ch.(*channel) + }() + + ch, err := c.openChannel("chan", nil) + if err != nil { + t.Fatalf("OpenChannel: %v", err) + } + + return <-res, ch, c +} + +// Test that stderr and stdout can be addressed from different +// goroutines. This is intended for use with the race detector. +func TestMuxChannelExtendedThreadSafety(t *testing.T) { + writer, reader, mux := channelPair(t) + defer writer.Close() + defer reader.Close() + defer mux.Close() + + var wr, rd sync.WaitGroup + magic := "hello world" + + wr.Add(2) + go func() { + io.WriteString(writer, magic) + wr.Done() + }() + go func() { + io.WriteString(writer.Stderr(), magic) + wr.Done() + }() + + rd.Add(2) + go func() { + c, err := ioutil.ReadAll(reader) + if string(c) != magic { + t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + go func() { + c, err := ioutil.ReadAll(reader.Stderr()) + if string(c) != magic { + t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + + wr.Wait() + writer.CloseWrite() + rd.Wait() +} + +func TestMuxReadWrite(t *testing.T) { + s, c, mux := channelPair(t) + defer s.Close() + defer c.Close() + defer mux.Close() + + magic := "hello world" + magicExt := "hello stderr" + go func() { + _, err := s.Write([]byte(magic)) + if err != nil { + t.Fatalf("Write: %v", err) + } + _, err = s.Extended(1).Write([]byte(magicExt)) + if err != nil { + t.Fatalf("Write: %v", err) + } + err = s.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + }() + + var buf [1024]byte + n, err := c.Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + got := string(buf[:n]) + if got != magic { + t.Fatalf("server: got %q want %q", got, magic) + } + + n, err = c.Extended(1).Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + + got = string(buf[:n]) + if got != magicExt { + t.Fatalf("server: got %q want %q", got, magic) + } +} + +func TestMuxChannelOverflow(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + writer.Write(make([]byte, 1)) + wDone <- 1 + }() + writer.remoteWin.waitWriterBlocked() + + // Send 1 byte. + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], writer.remoteId) + marshalUint32(packet[5:], uint32(1)) + packet[9] = 42 + + if err := writer.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + if _, err := reader.SendRequest("hello", true, nil); err == nil { + t.Errorf("SendRequest succeeded.") + } + <-wDone +} + +func TestMuxChannelCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + wDone <- 1 + }() + + writer.remoteWin.waitWriterBlocked() + reader.Close() + <-wDone +} + +func TestMuxConnectionCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + wDone <- 1 + }() + + writer.remoteWin.waitWriterBlocked() + mux.Close() + <-wDone +} + +func TestMuxReject(t *testing.T) { + client, server := muxPair() + defer server.Close() + defer client.Close() + + go func() { + ch, ok := <-server.incomingChannels + if !ok { + t.Fatalf("Accept") + } + if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" { + t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData()) + } + ch.Reject(RejectionReason(42), "message") + }() + + ch, err := client.openChannel("ch", []byte("extra")) + if ch != nil { + t.Fatal("openChannel not rejected") + } + + ocf, ok := err.(*OpenChannelError) + if !ok { + t.Errorf("got %#v want *OpenChannelError", err) + } else if ocf.Reason != 42 || ocf.Message != "message" { + t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message") + } + + want := "ssh: rejected: unknown reason 42 (message)" + if err.Error() != want { + t.Errorf("got %q, want %q", err.Error(), want) + } +} + +func TestMuxChannelRequest(t *testing.T) { + client, server, mux := channelPair(t) + defer server.Close() + defer client.Close() + defer mux.Close() + + var received int + var wg sync.WaitGroup + wg.Add(1) + go func() { + for r := range server.incomingRequests { + received++ + r.Reply(r.Type == "yes", nil) + } + wg.Done() + }() + _, err := client.SendRequest("yes", false, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + ok, err := client.SendRequest("yes", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + + if !ok { + t.Errorf("SendRequest(yes): %v", ok) + + } + + ok, err = client.SendRequest("no", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + if ok { + t.Errorf("SendRequest(no): %v", ok) + + } + + client.Close() + wg.Wait() + + if received != 3 { + t.Errorf("got %d requests, want %d", received, 3) + } +} + +func TestMuxGlobalRequest(t *testing.T) { + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + var seen bool + go func() { + for r := range serverMux.incomingRequests { + seen = seen || r.Type == "peek" + if r.WantReply { + err := r.Reply(r.Type == "yes", + append([]byte(r.Type), r.Payload...)) + if err != nil { + t.Errorf("AckRequest: %v", err) + } + } + } + }() + + _, _, err := clientMux.SendRequest("peek", false, nil) + if err != nil { + t.Errorf("SendRequest: %v", err) + } + + ok, data, err := clientMux.SendRequest("yes", true, []byte("a")) + if !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + + if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil { + t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v", + ok, data, err) + } + + clientMux.Disconnect(0, "") + if !seen { + t.Errorf("never saw 'peek' request") + } +} + +func TestMuxGlobalRequestUnblock(t *testing.T) { + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + result := make(chan error, 1) + go func() { + _, _, err := clientMux.SendRequest("hello", true, nil) + result <- err + }() + + <-serverMux.incomingRequests + serverMux.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", io.EOF) + } +} + +func TestMuxChannelRequestUnblock(t *testing.T) { + a, b, connB := channelPair(t) + defer a.Close() + defer b.Close() + defer connB.Close() + + result := make(chan error, 1) + go func() { + _, err := a.SendRequest("hello", true, nil) + result <- err + }() + + <-b.incomingRequests + connB.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", err) + } +} + +func TestMuxDisconnect(t *testing.T) { + a, b := muxPair() + defer a.Close() + defer b.Close() + + go func() { + for r := range b.incomingRequests { + r.Reply(true, nil) + } + }() + + a.Disconnect(42, "whatever") + ok, _, err := a.SendRequest("hello", true, nil) + if ok || err == nil { + t.Errorf("got reply after disconnecting") + } + err = b.Wait() + if d, ok := err.(*disconnectMsg); !ok || d.Reason != 42 { + t.Errorf("got %#v, want disconnectMsg{Reason:42}", err) + } +} + +func TestMuxCloseChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + defer r.Close() + defer w.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.Close(); err != nil { + t.Errorf("w.Close: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after Close", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxCloseWriteChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.CloseWrite(); err != nil { + t.Errorf("w.CloseWrite: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after CloseWrite", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxInvalidRecord(t *testing.T) { + a, b := muxPair() + defer a.Close() + defer b.Close() + + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], 29348723 /* invalid channel id */) + marshalUint32(packet[5:], 1) + packet[9] = 42 + + a.conn.writePacket(packet) + go a.SendRequest("hello", false, nil) + // 'a' wrote an invalid packet, so 'b' has exited. + req, ok := <-b.incomingRequests + if ok { + t.Errorf("got request %#v after receiving invalid packet", req) + } +} + +func TestZeroWindowAdjust(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + go func() { + io.WriteString(a, "hello") + // bogus adjust. + a.sendMessage(windowAdjustMsg{}) + io.WriteString(a, "world") + a.Close() + }() + + want := "helloworld" + c, _ := ioutil.ReadAll(b) + if string(c) != want { + t.Errorf("got %q want %q", c, want) + } +} + +func TestMuxMaxPacketSize(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + large := make([]byte, a.maxRemotePayload+1) + packet := make([]byte, 1+4+4+1+len(large)) + packet[0] = msgChannelData + marshalUint32(packet[1:], a.remoteId) + marshalUint32(packet[5:], uint32(len(large))) + packet[9] = 42 + + if err := a.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + + go a.SendRequest("hello", false, nil) + + _, ok := <-b.incomingRequests + if ok { + t.Errorf("connection still alive after receiving large packet.") + } +} + +// Don't ship code with debug=true. +func TestDebug(t *testing.T) { + if debugMux { + t.Error("mux debug switched on") + } + if debugHandshake { + t.Error("handshake debug switched on") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 000000000..4781eb780 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,495 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a +// user. Permissions, except for "source-address", must be enforced in +// the server application layer, after successful authentication. The +// Permissions are passed on in ServerConn so a server implementation +// can honor them. +type Permissions struct { + // Critical options restrict default permissions. Common + // restrictions are "source-address" and "force-command". If + // the server cannot enforce the restriction, or does not + // recognize it, the user should not authenticate. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Common extensions are + // "permit-agent-forwarding", "permit-X11-forwarding". Lack of + // support for an extension does not preclude authenticating a + // user. + Extensions map[string]string +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + NoClientAuth bool + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client attempts public + // key authentication. It must return true if the given public key is + // valid for the given user. For example, see CertChecker.Authenticate. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same algorithm, it is overwritten. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { + sig, err := k.Sign(rand, data) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.requestKeyChange(); err != nil { + return nil, err + } + + if packet, err := s.transport.readPacket(); err != nil { + return nil, err + } else if packet[0] != msgNewKeys { + return nil, unexpectedMessageError(msgNewKeys, packet[0]) + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func isAcceptableAlgo(algo string) bool { + switch algo { + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: + return true + } + return false +} + +func checkSourceAddress(addr net.Addr, sourceAddr string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if bytes.Equal(allowedIP, tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + var err error + var cache pubKeyCache + var perms *Permissions + +userAuthLoop: + for { + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + s.user = userAuthReq.User + perms = nil + authErr := errors.New("no auth passed yet") + + switch userAuthReq.Method { + case "none": + if config.NoClientAuth { + s.user = "" + authErr = nil + } + case "password": + if config.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = config.PasswordCallback(s, password) + case "keyboard-interactive": + if config.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configubred") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if config.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !isAcceptableAlgo(algo) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) + if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + candidate.result = checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + if candidate.result == nil { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !isAcceptableAlgo(sig.Format) { + break + } + signedData := buildDataSignedForAuth(s.transport.getSessionID(), userAuthReq, algoBytes, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + if authErr == nil { + break userAuthLoop + } + + var failureMsg userAuthFailureMsg + if config.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if config.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if config.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if err = s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err = s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 000000000..fd10cd1aa --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,605 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of ioutil.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the command fails to run or doesn't complete successfully, the +// error is of type *ExitError. Other error types may be +// returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the command fails to run or doesn't complete successfully, the +// error is of type *ExitError. Other error types may be +// returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for _ = range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + d := msg.Payload + wm.status = int(d[0])<<24 | int(d[1])<<16 | int(d[2])<<8 | int(d[3]) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + return errors.New("wait: remote command exited without exit status or exit signal") + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + return &ExitError{wm} +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + return fmt.Sprintf("Process exited with: %v. Reason was: %v (%v)", w.status, w.msg, w.signal) +} diff --git a/vendor/golang.org/x/crypto/ssh/session_test.go b/vendor/golang.org/x/crypto/ssh/session_test.go new file mode 100644 index 000000000..f7f0f7642 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session_test.go @@ -0,0 +1,774 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session tests. + +import ( + "bytes" + crypto_rand "crypto/rand" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "testing" + + "golang.org/x/crypto/ssh/terminal" +) + +type serverType func(Channel, <-chan *Request, *testing.T) + +// dial constructs a new test server and returns a *ClientConn. +func dial(handler serverType, t *testing.T) *Client { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + go func() { + defer c1.Close() + conf := ServerConfig{ + NoClientAuth: true, + } + conf.AddHostKey(testSigners["rsa"]) + + _, chans, reqs, err := NewServerConn(c1, &conf) + if err != nil { + t.Fatalf("Unable to handshake: %v", err) + } + go DiscardRequests(reqs) + + for newCh := range chans { + if newCh.ChannelType() != "session" { + newCh.Reject(UnknownChannelType, "unknown channel type") + continue + } + + ch, inReqs, err := newCh.Accept() + if err != nil { + t.Errorf("Accept: %v", err) + continue + } + go func() { + handler(ch, inReqs, t) + }() + } + }() + + config := &ClientConfig{ + User: "testuser", + } + + conn, chans, reqs, err := NewClientConn(c2, "", config) + if err != nil { + t.Fatalf("unable to dial remote side: %v", err) + } + + return NewClient(conn, chans, reqs) +} + +// Test a simple string is returned to session.Stdout. +func TestSessionShell(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout := new(bytes.Buffer) + session.Stdout = stdout + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %s", err) + } + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + actual := stdout.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it. + +// Test a simple string is returned via StdoutPipe. +func TestSessionStdoutPipe(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("Unable to request StdoutPipe(): %v", err) + } + var buf bytes.Buffer + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + done := make(chan bool, 1) + go func() { + if _, err := io.Copy(&buf, stdout); err != nil { + t.Errorf("Copy of stdout failed: %v", err) + } + done <- true + }() + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + <-done + actual := buf.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// Test that a simple string is returned via the Output helper, +// and that stderr is discarded. +func TestSessionOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.Output("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + w := "this-is-stdout." + g := string(buf) + if g != w { + t.Error("Remote command did not return expected string:") + t.Logf("want %q", w) + t.Logf("got %q", g) + } +} + +// Test that both stdout and stderr are returned +// via the CombinedOutput helper. +func TestSessionCombinedOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + const stdout = "this-is-stdout." + const stderr = "this-is-stderr." + g := string(buf) + if g != stdout+stderr && g != stderr+stdout { + t.Error("Remote command did not return expected string:") + t.Logf("want %q, or %q", stdout+stderr, stderr+stdout) + t.Logf("got %q", g) + } +} + +// Test non-0 exit status is returned correctly. +func TestExitStatusNonZero(t *testing.T) { + conn := dial(exitStatusNonZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus()) + } +} + +// Test 0 exit status is returned correctly. +func TestExitStatusZero(t *testing.T) { + conn := dial(exitStatusZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got %v", err) + } +} + +// Test exit signal and status are both returned correctly. +func TestExitSignalAndStatus(t *testing.T) { + conn := dial(exitSignalAndStatusHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestKnownExitSignalOnly(t *testing.T) { + conn := dial(exitSignalHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 143 { + t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestUnknownExitSignal(t *testing.T) { + conn := dial(exitSignalUnknownHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "SYS" || e.ExitStatus() != 128 { + t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test WaitMsg is not returned if the channel closes abruptly. +func TestExitWithoutStatusOrSignal(t *testing.T) { + conn := dial(exitWithoutSignalOrStatus, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + _, ok := err.(*ExitError) + if ok { + // you can't actually test for errors.errorString + // because it's not exported. + t.Fatalf("expected *errorString but got %T", err) + } +} + +// windowTestBytes is the number of bytes that we'll send to the SSH server. +const windowTestBytes = 16000 * 200 + +// TestServerWindow writes random data to the server. The server is expected to echo +// the same data back, which is compared against the original. +func TestServerWindow(t *testing.T) { + origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes) + origBytes := origBuf.Bytes() + + conn := dial(echoHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + result := make(chan []byte) + + go func() { + defer close(result) + echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + serverStdout, err := session.StdoutPipe() + if err != nil { + t.Errorf("StdoutPipe failed: %v", err) + return + } + n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes) + if err != nil && err != io.EOF { + t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err) + } + result <- echoedBuf.Bytes() + }() + + serverStdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes) + if err != nil { + t.Fatalf("failed to copy origBuf to serverStdin: %v", err) + } + if written != windowTestBytes { + t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes) + } + + echoedBytes := <-result + + if !bytes.Equal(origBytes, echoedBytes) { + t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes)) + } +} + +// Verify the client can handle a keepalive packet from the server. +func TestClientHandlesKeepalives(t *testing.T) { + conn := dial(channelKeepaliveSender, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got: %v", err) + } +} + +type exitStatusMsg struct { + Status uint32 +} + +type exitSignalMsg struct { + Signal string + CoreDumped bool + Errmsg string + Lang string +} + +func handleTerminalRequests(in <-chan *Request) { + for req := range in { + ok := false + switch req.Type { + case "shell": + ok = true + if len(req.Payload) > 0 { + // We don't accept any commands, only the default shell. + ok = false + } + case "env": + ok = true + } + req.Reply(ok, nil) + } +} + +func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal { + term := terminal.NewTerminal(ch, prompt) + go handleTerminalRequests(in) + return term +} + +func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(0, ch, t) +} + +func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) +} + +func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) + sendSignal("TERM", ch, t) +} + +func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("TERM", ch, t) +} + +func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("SYS", ch, t) +} + +func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) +} + +func shellHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "golang") + readLine(shell, t) + sendStatus(0, ch, t) +} + +// Ignores the command, writes fixed strings to stderr and stdout. +// Strings are "this-is-stdout." and "this-is-stderr.". +func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + _, err := ch.Read(nil) + + req, ok := <-in + if !ok { + t.Fatalf("error: expected channel request, got: %#v", err) + return + } + + // ignore request, always send some text + req.Reply(true, nil) + + _, err = io.WriteString(ch, "this-is-stdout.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + _, err = io.WriteString(ch.Stderr(), "this-is-stderr.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + sendStatus(0, ch, t) +} + +func readLine(shell *terminal.Terminal, t *testing.T) { + if _, err := shell.ReadLine(); err != nil && err != io.EOF { + t.Errorf("unable to read line: %v", err) + } +} + +func sendStatus(status uint32, ch Channel, t *testing.T) { + msg := exitStatusMsg{ + Status: status, + } + if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil { + t.Errorf("unable to send status: %v", err) + } +} + +func sendSignal(signal string, ch Channel, t *testing.T) { + sig := exitSignalMsg{ + Signal: signal, + CoreDumped: false, + Errmsg: "Process terminated", + Lang: "en-GB-oed", + } + if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil { + t.Errorf("unable to send signal: %v", err) + } +} + +func discardHandler(ch Channel, t *testing.T) { + defer ch.Close() + io.Copy(ioutil.Discard, ch) +} + +func echoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil { + t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err) + } +} + +// copyNRandomly copies n bytes from src to dst. It uses a variable, and random, +// buffer size to exercise more code paths. +func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) { + var ( + buf = make([]byte, 32*1024) + written int + remaining = n + ) + for remaining > 0 { + l := rand.Intn(1 << 15) + if remaining < l { + l = remaining + } + nr, er := src.Read(buf[:l]) + nw, ew := dst.Write(buf[:nr]) + remaining -= nw + written += nw + if ew != nil { + return written, ew + } + if nr != nw { + return written, io.ErrShortWrite + } + if er != nil && er != io.EOF { + return written, er + } + } + return written, nil +} + +func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil { + t.Errorf("unable to send channel keepalive request: %v", err) + } + sendStatus(0, ch, t) +} + +func TestClientWriteEOF(t *testing.T) { + conn := dial(simpleEchoHandler, t) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + stdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("StdoutPipe failed: %v", err) + } + + data := []byte(`0000`) + _, err = stdin.Write(data) + if err != nil { + t.Fatalf("Write failed: %v", err) + } + stdin.Close() + + res, err := ioutil.ReadAll(stdout) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + + if !bytes.Equal(data, res) { + t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res) + } +} + +func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + data, err := ioutil.ReadAll(ch) + if err != nil { + t.Errorf("handler read error: %v", err) + } + _, err = ch.Write(data) + if err != nil { + t.Errorf("handler write error: %v", err) + } +} + +func TestSessionID(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverID := make(chan []byte, 1) + clientID := make(chan []byte, 1) + + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + clientConf := &ClientConfig{ + User: "user", + } + + go func() { + conn, chans, reqs, err := NewServerConn(c1, serverConf) + if err != nil { + t.Fatalf("server handshake: %v", err) + } + serverID <- conn.SessionID() + go DiscardRequests(reqs) + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + go func() { + conn, chans, reqs, err := NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("client handshake: %v", err) + } + clientID <- conn.SessionID() + go DiscardRequests(reqs) + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + s := <-serverID + c := <-clientID + if bytes.Compare(s, c) != 0 { + t.Errorf("server session ID (%x) != client session ID (%x)", s, c) + } else if len(s) == 0 { + t.Errorf("client and server SessionID were empty.") + } +} + +type noReadConn struct { + readSeen bool + net.Conn +} + +func (c *noReadConn) Close() error { + return nil +} + +func (c *noReadConn) Read(b []byte) (int, error) { + c.readSeen = true + return 0, errors.New("noReadConn error") +} + +func TestInvalidServerConfiguration(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serveConn := noReadConn{Conn: c1} + serverConf := &ServerConfig{} + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key") + } + + serverConf.AddHostKey(testSigners["ecdsa"]) + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method") + } +} + +func TestHostKeyAlgorithms(t *testing.T) { + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.AddHostKey(testSigners["ecdsa"]) + + connect := func(clientConf *ClientConfig, want string) { + var alg string + clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error { + alg = key.Type() + return nil + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + _, _, _, err = NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + if alg != want { + t.Errorf("selected key algorithm %s, want %s", alg, want) + } + } + + // By default, we get the preferred algorithm, which is ECDSA 256. + + clientConf := &ClientConfig{} + connect(clientConf, KeyAlgoECDSA256) + + // Client asks for RSA explicitly. + clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA} + connect(clientConf, KeyAlgoRSA) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"} + _, _, _, err = NewClientConn(c2, "", clientConf) + if err == nil { + t.Fatal("succeeded connecting with unknown hostkey algorithm") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 000000000..6151241ff --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,407 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +func (c *Client) Listen(n, addr string) (net.Listener, error) { + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(*laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.TCPAddr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr *net.TCPAddr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.TCPAddr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + addr, + make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var payload forwardedTCPPayload + if err := Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err := parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + if ok := l.forward(*laddr, *raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.TCPAddr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port { + f.c <- forward{ch, &raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &tcpChanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(*l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + return &tcpChanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &tcpChanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// tcpChanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type tcpChanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *tcpChanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *tcpChanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *tcpChanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/tcpip_test.go new file mode 100644 index 000000000..f1265cb49 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip_test.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "testing" +) + +func TestAutoPortListenBroken(t *testing.T) { + broken := "SSH-2.0-OpenSSH_5.9hh11" + works := "SSH-2.0-OpenSSH_6.1" + if !isBrokenOpenSSHVersion(broken) { + t.Errorf("version %q not marked as broken", broken) + } + if isBrokenOpenSSHVersion(works) { + t.Errorf("version %q marked as broken", works) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 000000000..741eeb13f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,892 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} +var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return t.c.Write(buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = t.c.Write(buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } + + panic("unreachable") // for Go 1.0. +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go new file mode 100644 index 000000000..a663fe41b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go @@ -0,0 +1,269 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "io" + "testing" +) + +type MockTerminal struct { + toSend []byte + bytesPerRead int + received []byte +} + +func (c *MockTerminal) Read(data []byte) (n int, err error) { + n = len(data) + if n == 0 { + return + } + if n > len(c.toSend) { + n = len(c.toSend) + } + if n == 0 { + return 0, io.EOF + } + if c.bytesPerRead > 0 && n > c.bytesPerRead { + n = c.bytesPerRead + } + copy(data, c.toSend[:n]) + c.toSend = c.toSend[n:] + return +} + +func (c *MockTerminal) Write(data []byte) (n int, err error) { + c.received = append(c.received, data...) + return len(data), nil +} + +func TestClose(t *testing.T) { + c := &MockTerminal{} + ss := NewTerminal(c, "> ") + line, err := ss.ReadLine() + if line != "" { + t.Errorf("Expected empty line but got: %s", line) + } + if err != io.EOF { + t.Errorf("Error should have been EOF but got: %s", err) + } +} + +var keyPressTests = []struct { + in string + line string + err error + throwAwayLines int +}{ + { + err: io.EOF, + }, + { + in: "\r", + line: "", + }, + { + in: "foo\r", + line: "foo", + }, + { + in: "a\x1b[Cb\r", // right + line: "ab", + }, + { + in: "a\x1b[Db\r", // left + line: "ba", + }, + { + in: "a\177b\r", // backspace + line: "b", + }, + { + in: "\x1b[A\r", // up + }, + { + in: "\x1b[B\r", // down + }, + { + in: "line\x1b[A\x1b[B\r", // up then down + line: "line", + }, + { + in: "line1\rline2\x1b[A\r", // recall previous line. + line: "line1", + throwAwayLines: 1, + }, + { + // recall two previous lines and append. + in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r", + line: "line1xxx", + throwAwayLines: 2, + }, + { + // Ctrl-A to move to beginning of line followed by ^K to kill + // line. + in: "a b \001\013\r", + line: "", + }, + { + // Ctrl-A to move to beginning of line, Ctrl-E to move to end, + // finally ^K to kill nothing. + in: "a b \001\005\013\r", + line: "a b ", + }, + { + in: "\027\r", + line: "", + }, + { + in: "a\027\r", + line: "", + }, + { + in: "a \027\r", + line: "", + }, + { + in: "a b\027\r", + line: "a ", + }, + { + in: "a b \027\r", + line: "a ", + }, + { + in: "one two thr\x1b[D\027\r", + line: "one two r", + }, + { + in: "\013\r", + line: "", + }, + { + in: "a\013\r", + line: "a", + }, + { + in: "ab\x1b[D\013\r", + line: "a", + }, + { + in: "Ξεσκεπάζω\r", + line: "Ξεσκεπάζω", + }, + { + in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace. + line: "", + throwAwayLines: 1, + }, + { + in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter. + line: "£", + throwAwayLines: 1, + }, + { + // Ctrl-D at the end of the line should be ignored. + in: "a\004\r", + line: "a", + }, + { + // a, b, left, Ctrl-D should erase the b. + in: "ab\x1b[D\004\r", + line: "a", + }, + { + // a, b, c, d, left, left, ^U should erase to the beginning of + // the line. + in: "abcd\x1b[D\x1b[D\025\r", + line: "cd", + }, + { + // Bracketed paste mode: control sequences should be returned + // verbatim in paste mode. + in: "abc\x1b[200~de\177f\x1b[201~\177\r", + line: "abcde\177", + }, + { + // Enter in bracketed paste mode should still work. + in: "abc\x1b[200~d\refg\x1b[201~h\r", + line: "efgh", + throwAwayLines: 1, + }, + { + // Lines consisting entirely of pasted data should be indicated as such. + in: "\x1b[200~a\r", + line: "a", + err: ErrPasteIndicator, + }, +} + +func TestKeyPresses(t *testing.T) { + for i, test := range keyPressTests { + for j := 1; j < len(test.in); j++ { + c := &MockTerminal{ + toSend: []byte(test.in), + bytesPerRead: j, + } + ss := NewTerminal(c, "> ") + for k := 0; k < test.throwAwayLines; k++ { + _, err := ss.ReadLine() + if err != nil { + t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err) + } + } + line, err := ss.ReadLine() + if line != test.line { + t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line) + break + } + if err != test.err { + t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err) + break + } + } + } +} + +func TestPasswordNotSaved(t *testing.T) { + c := &MockTerminal{ + toSend: []byte("password\r\x1b[A\r"), + bytesPerRead: 1, + } + ss := NewTerminal(c, "> ") + pw, _ := ss.ReadPassword("> ") + if pw != "password" { + t.Fatalf("failed to read password, got %s", pw) + } + line, _ := ss.ReadLine() + if len(line) > 0 { + t.Fatalf("password was saved in history") + } +} + +var setSizeTests = []struct { + width, height int +}{ + {40, 13}, + {80, 24}, + {132, 43}, +} + +func TestTerminalSetSize(t *testing.T) { + for _, setSize := range setSizeTests { + c := &MockTerminal{ + toSend: []byte("password\r\x1b[A\r"), + bytesPerRead: 1, + } + ss := NewTerminal(c, "> ") + ss.SetSize(setSize.width, setSize.height) + pw, _ := ss.ReadPassword("Password: ") + if pw != "password" { + t.Fatalf("failed to read password, got %s", pw) + } + if string(c.received) != "Password: \r\n" { + t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 000000000..0763c9a97 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,128 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "io" + "syscall" + "unsafe" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF + newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var oldState syscall.Termios + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + defer func() { + syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 000000000..9c1ffd145 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 000000000..5883b22d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 000000000..2dd6c3d97 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "io" + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + short int16 + word uint16 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(syscall.Handle(fd), buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go new file mode 100644 index 000000000..f481253c9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package test + +import ( + "bytes" + "testing" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +func TestAgentForward(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + keyring := agent.NewKeyring() + if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil { + t.Fatalf("Error adding key: %s", err) + } + if err := keyring.Add(agent.AddedKey{ + PrivateKey: testPrivateKeys["dsa"], + ConfirmBeforeUse: true, + LifetimeSecs: 3600, + }); err != nil { + t.Fatalf("Error adding key with constraints: %s", err) + } + pub := testPublicKeys["dsa"] + + sess, err := conn.NewSession() + if err != nil { + t.Fatalf("NewSession: %v", err) + } + if err := agent.RequestAgentForwarding(sess); err != nil { + t.Fatalf("RequestAgentForwarding: %v", err) + } + + if err := agent.ForwardToAgent(conn, keyring); err != nil { + t.Fatalf("SetupForwardKeyring: %v", err) + } + out, err := sess.CombinedOutput("ssh-add -L") + if err != nil { + t.Fatalf("running ssh-add: %v, out %s", err, out) + } + key, _, _, _, err := ssh.ParseAuthorizedKey(out) + if err != nil { + t.Fatalf("ParseAuthorizedKey(%q): %v", out, err) + } + + if !bytes.Equal(key.Marshal(), pub.Marshal()) { + t.Fatalf("got key %s, want %s", ssh.MarshalAuthorizedKey(key), ssh.MarshalAuthorizedKey(pub)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/cert_test.go b/vendor/golang.org/x/crypto/ssh/test/cert_test.go new file mode 100644 index 000000000..364790f17 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/cert_test.go @@ -0,0 +1,47 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package test + +import ( + "crypto/rand" + "testing" + + "golang.org/x/crypto/ssh" +) + +func TestCertLogin(t *testing.T) { + s := newServer(t) + defer s.Shutdown() + + // Use a key different from the default. + clientKey := testSigners["dsa"] + caAuthKey := testSigners["ecdsa"] + cert := &ssh.Certificate{ + Key: clientKey.PublicKey(), + ValidPrincipals: []string{username()}, + CertType: ssh.UserCert, + ValidBefore: ssh.CertTimeInfinity, + } + if err := cert.SignCert(rand.Reader, caAuthKey); err != nil { + t.Fatalf("SetSignature: %v", err) + } + + certSigner, err := ssh.NewCertSigner(cert, clientKey) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + conf := &ssh.ClientConfig{ + User: username(), + } + conf.Auth = append(conf.Auth, ssh.PublicKeys(certSigner)) + client, err := s.TryDial(conf) + if err != nil { + t.Fatalf("TryDial: %v", err) + } + client.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/test/doc.go b/vendor/golang.org/x/crypto/ssh/test/doc.go new file mode 100644 index 000000000..29ad65e96 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/doc.go @@ -0,0 +1,7 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package contains integration tests for the +// golang.org/x/crypto/ssh package. +package test diff --git a/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go new file mode 100644 index 000000000..877a88cde --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go @@ -0,0 +1,160 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package test + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "net" + "testing" + "time" +) + +func TestPortForward(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + sshListener, err := conn.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + + go func() { + sshConn, err := sshListener.Accept() + if err != nil { + t.Fatalf("listen.Accept failed: %v", err) + } + + _, err = io.Copy(sshConn, sshConn) + if err != nil && err != io.EOF { + t.Fatalf("ssh client copy: %v", err) + } + sshConn.Close() + }() + + forwardedAddr := sshListener.Addr().String() + tcpConn, err := net.Dial("tcp", forwardedAddr) + if err != nil { + t.Fatalf("TCP dial failed: %v", err) + } + + readChan := make(chan []byte) + go func() { + data, _ := ioutil.ReadAll(tcpConn) + readChan <- data + }() + + // Invent some data. + data := make([]byte, 100*1000) + for i := range data { + data[i] = byte(i % 255) + } + + var sent []byte + for len(sent) < 1000*1000 { + // Send random sized chunks + m := rand.Intn(len(data)) + n, err := tcpConn.Write(data[:m]) + if err != nil { + break + } + sent = append(sent, data[:n]...) + } + if err := tcpConn.(*net.TCPConn).CloseWrite(); err != nil { + t.Errorf("tcpConn.CloseWrite: %v", err) + } + + read := <-readChan + + if len(sent) != len(read) { + t.Fatalf("got %d bytes, want %d", len(read), len(sent)) + } + if bytes.Compare(sent, read) != 0 { + t.Fatalf("read back data does not match") + } + + if err := sshListener.Close(); err != nil { + t.Fatalf("sshListener.Close: %v", err) + } + + // Check that the forward disappeared. + tcpConn, err = net.Dial("tcp", forwardedAddr) + if err == nil { + tcpConn.Close() + t.Errorf("still listening to %s after closing", forwardedAddr) + } +} + +func TestAcceptClose(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + + sshListener, err := conn.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + + quit := make(chan error, 1) + go func() { + for { + c, err := sshListener.Accept() + if err != nil { + quit <- err + break + } + c.Close() + } + }() + sshListener.Close() + + select { + case <-time.After(1 * time.Second): + t.Errorf("timeout: listener did not close.") + case err := <-quit: + t.Logf("quit as expected (error %v)", err) + } +} + +// Check that listeners exit if the underlying client transport dies. +func TestPortForwardConnectionClose(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + + sshListener, err := conn.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + + quit := make(chan error, 1) + go func() { + for { + c, err := sshListener.Accept() + if err != nil { + quit <- err + break + } + c.Close() + } + }() + + // It would be even nicer if we closed the server side, but it + // is more involved as the fd for that side is dup()ed. + server.clientConn.Close() + + select { + case <-time.After(1 * time.Second): + t.Errorf("timeout: listener did not close.") + case err := <-quit: + t.Logf("quit as expected (error %v)", err) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/session_test.go b/vendor/golang.org/x/crypto/ssh/test/session_test.go new file mode 100644 index 000000000..c0e714ba9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/session_test.go @@ -0,0 +1,340 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows + +package test + +// Session functional tests. + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" + + "golang.org/x/crypto/ssh" +) + +func TestRunCommandSuccess(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + err = session.Run("true") + if err != nil { + t.Fatalf("session failed: %v", err) + } +} + +func TestHostKeyCheck(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + + conf := clientConfig() + hostDB := hostKeyDB() + conf.HostKeyCallback = hostDB.Check + + // change the keys. + hostDB.keys[ssh.KeyAlgoRSA][25]++ + hostDB.keys[ssh.KeyAlgoDSA][25]++ + hostDB.keys[ssh.KeyAlgoECDSA256][25]++ + + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + t.Fatalf("dial should have failed.") + } else if !strings.Contains(err.Error(), "host key mismatch") { + t.Fatalf("'host key mismatch' not found in %v", err) + } +} + +func TestRunCommandStdin(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + r, w := io.Pipe() + defer r.Close() + defer w.Close() + session.Stdin = r + + err = session.Run("true") + if err != nil { + t.Fatalf("session failed: %v", err) + } +} + +func TestRunCommandStdinError(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + r, w := io.Pipe() + defer r.Close() + session.Stdin = r + pipeErr := errors.New("closing write end of pipe") + w.CloseWithError(pipeErr) + + err = session.Run("true") + if err != pipeErr { + t.Fatalf("expected %v, found %v", pipeErr, err) + } +} + +func TestRunCommandFailed(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + err = session.Run(`bash -c "kill -9 $$"`) + if err == nil { + t.Fatalf("session succeeded: %v", err) + } +} + +func TestRunCommandWeClosed(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + err = session.Shell() + if err != nil { + t.Fatalf("shell failed: %v", err) + } + err = session.Close() + if err != nil { + t.Fatalf("shell failed: %v", err) + } +} + +func TestFuncLargeRead(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("unable to create new session: %s", err) + } + + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("unable to acquire stdout pipe: %s", err) + } + + err = session.Start("dd if=/dev/urandom bs=2048 count=1024") + if err != nil { + t.Fatalf("unable to execute remote command: %s", err) + } + + buf := new(bytes.Buffer) + n, err := io.Copy(buf, stdout) + if err != nil { + t.Fatalf("error reading from remote stdout: %s", err) + } + + if n != 2048*1024 { + t.Fatalf("Expected %d bytes but read only %d from remote command", 2048, n) + } +} + +func TestKeyChange(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + hostDB := hostKeyDB() + conf.HostKeyCallback = hostDB.Check + conf.RekeyThreshold = 1024 + conn := server.Dial(conf) + defer conn.Close() + + for i := 0; i < 4; i++ { + session, err := conn.NewSession() + if err != nil { + t.Fatalf("unable to create new session: %s", err) + } + + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("unable to acquire stdout pipe: %s", err) + } + + err = session.Start("dd if=/dev/urandom bs=1024 count=1") + if err != nil { + t.Fatalf("unable to execute remote command: %s", err) + } + buf := new(bytes.Buffer) + n, err := io.Copy(buf, stdout) + if err != nil { + t.Fatalf("error reading from remote stdout: %s", err) + } + + want := int64(1024) + if n != want { + t.Fatalf("Expected %d bytes but read only %d from remote command", want, n) + } + } + + if changes := hostDB.checkCount; changes < 4 { + t.Errorf("got %d key changes, want 4", changes) + } +} + +func TestInvalidTerminalMode(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + if err = session.RequestPty("vt100", 80, 40, ssh.TerminalModes{255: 1984}); err == nil { + t.Fatalf("req-pty failed: successful request with invalid mode") + } +} + +func TestValidTerminalMode(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("unable to acquire stdout pipe: %s", err) + } + + stdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("unable to acquire stdin pipe: %s", err) + } + + tm := ssh.TerminalModes{ssh.ECHO: 0} + if err = session.RequestPty("xterm", 80, 40, tm); err != nil { + t.Fatalf("req-pty failed: %s", err) + } + + err = session.Shell() + if err != nil { + t.Fatalf("session failed: %s", err) + } + + stdin.Write([]byte("stty -a && exit\n")) + + var buf bytes.Buffer + if _, err := io.Copy(&buf, stdout); err != nil { + t.Fatalf("reading failed: %s", err) + } + + if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "-echo ") { + t.Fatalf("terminal mode failure: expected -echo in stty output, got %s", sttyOutput) + } +} + +func TestCiphers(t *testing.T) { + var config ssh.Config + config.SetDefaults() + cipherOrder := config.Ciphers + // This cipher will not be tested when commented out in cipher.go it will + // fallback to the next available as per line 292. + cipherOrder = append(cipherOrder, "aes128-cbc") + + for _, ciph := range cipherOrder { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + conf.Ciphers = []string{ciph} + // Don't fail if sshd doesnt have the cipher. + conf.Ciphers = append(conf.Ciphers, cipherOrder...) + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + } else { + t.Fatalf("failed for cipher %q", ciph) + } + } +} + +func TestMACs(t *testing.T) { + var config ssh.Config + config.SetDefaults() + macOrder := config.MACs + + for _, mac := range macOrder { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + conf.MACs = []string{mac} + // Don't fail if sshd doesnt have the MAC. + conf.MACs = append(conf.MACs, macOrder...) + if conn, err := server.TryDial(conf); err == nil { + conn.Close() + } else { + t.Fatalf("failed for MAC %q", mac) + } + } +} + +func TestKeyExchanges(t *testing.T) { + var config ssh.Config + config.SetDefaults() + kexOrder := config.KeyExchanges + for _, kex := range kexOrder { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + // Don't fail if sshd doesnt have the kex. + conf.KeyExchanges = append([]string{kex}, kexOrder...) + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + } else { + t.Errorf("failed for kex %q", kex) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go new file mode 100644 index 000000000..a2eb9358d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go @@ -0,0 +1,46 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows + +package test + +// direct-tcpip functional tests + +import ( + "io" + "net" + "testing" +) + +func TestDial(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + sshConn := server.Dial(clientConfig()) + defer sshConn.Close() + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer l.Close() + + go func() { + for { + c, err := l.Accept() + if err != nil { + break + } + + io.WriteString(c, c.RemoteAddr().String()) + c.Close() + } + }() + + conn, err := sshConn.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer conn.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go new file mode 100644 index 000000000..f1fc50b2e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go @@ -0,0 +1,261 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd plan9 + +package test + +// functional test harness for unix. + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "os/user" + "path/filepath" + "testing" + "text/template" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/testdata" +) + +const sshd_config = ` +Protocol 2 +HostKey {{.Dir}}/id_rsa +HostKey {{.Dir}}/id_dsa +HostKey {{.Dir}}/id_ecdsa +Pidfile {{.Dir}}/sshd.pid +#UsePrivilegeSeparation no +KeyRegenerationInterval 3600 +ServerKeyBits 768 +SyslogFacility AUTH +LogLevel DEBUG2 +LoginGraceTime 120 +PermitRootLogin no +StrictModes no +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile {{.Dir}}/id_user.pub +TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub +IgnoreRhosts yes +RhostsRSAAuthentication no +HostbasedAuthentication no +` + +var configTmpl = template.Must(template.New("").Parse(sshd_config)) + +type server struct { + t *testing.T + cleanup func() // executed during Shutdown + configfile string + cmd *exec.Cmd + output bytes.Buffer // holds stderr from sshd process + + // Client half of the network connection. + clientConn net.Conn +} + +func username() string { + var username string + if user, err := user.Current(); err == nil { + username = user.Username + } else { + // user.Current() currently requires cgo. If an error is + // returned attempt to get the username from the environment. + log.Printf("user.Current: %v; falling back on $USER", err) + username = os.Getenv("USER") + } + if username == "" { + panic("Unable to get username") + } + return username +} + +type storedHostKey struct { + // keys map from an algorithm string to binary key data. + keys map[string][]byte + + // checkCount counts the Check calls. Used for testing + // rekeying. + checkCount int +} + +func (k *storedHostKey) Add(key ssh.PublicKey) { + if k.keys == nil { + k.keys = map[string][]byte{} + } + k.keys[key.Type()] = key.Marshal() +} + +func (k *storedHostKey) Check(addr string, remote net.Addr, key ssh.PublicKey) error { + k.checkCount++ + algo := key.Type() + + if k.keys == nil || bytes.Compare(key.Marshal(), k.keys[algo]) != 0 { + return fmt.Errorf("host key mismatch. Got %q, want %q", key, k.keys[algo]) + } + return nil +} + +func hostKeyDB() *storedHostKey { + keyChecker := &storedHostKey{} + keyChecker.Add(testPublicKeys["ecdsa"]) + keyChecker.Add(testPublicKeys["rsa"]) + keyChecker.Add(testPublicKeys["dsa"]) + return keyChecker +} + +func clientConfig() *ssh.ClientConfig { + config := &ssh.ClientConfig{ + User: username(), + Auth: []ssh.AuthMethod{ + ssh.PublicKeys(testSigners["user"]), + }, + HostKeyCallback: hostKeyDB().Check, + } + return config +} + +// unixConnection creates two halves of a connected net.UnixConn. It +// is used for connecting the Go SSH client with sshd without opening +// ports. +func unixConnection() (*net.UnixConn, *net.UnixConn, error) { + dir, err := ioutil.TempDir("", "unixConnection") + if err != nil { + return nil, nil, err + } + defer os.Remove(dir) + + addr := filepath.Join(dir, "ssh") + listener, err := net.Listen("unix", addr) + if err != nil { + return nil, nil, err + } + defer listener.Close() + c1, err := net.Dial("unix", addr) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1.(*net.UnixConn), c2.(*net.UnixConn), nil +} + +func (s *server) TryDial(config *ssh.ClientConfig) (*ssh.Client, error) { + sshd, err := exec.LookPath("sshd") + if err != nil { + s.t.Skipf("skipping test: %v", err) + } + + c1, c2, err := unixConnection() + if err != nil { + s.t.Fatalf("unixConnection: %v", err) + } + + s.cmd = exec.Command(sshd, "-f", s.configfile, "-i", "-e") + f, err := c2.File() + if err != nil { + s.t.Fatalf("UnixConn.File: %v", err) + } + defer f.Close() + s.cmd.Stdin = f + s.cmd.Stdout = f + s.cmd.Stderr = &s.output + if err := s.cmd.Start(); err != nil { + s.t.Fail() + s.Shutdown() + s.t.Fatalf("s.cmd.Start: %v", err) + } + s.clientConn = c1 + conn, chans, reqs, err := ssh.NewClientConn(c1, "", config) + if err != nil { + return nil, err + } + return ssh.NewClient(conn, chans, reqs), nil +} + +func (s *server) Dial(config *ssh.ClientConfig) *ssh.Client { + conn, err := s.TryDial(config) + if err != nil { + s.t.Fail() + s.Shutdown() + s.t.Fatalf("ssh.Client: %v", err) + } + return conn +} + +func (s *server) Shutdown() { + if s.cmd != nil && s.cmd.Process != nil { + // Don't check for errors; if it fails it's most + // likely "os: process already finished", and we don't + // care about that. Use os.Interrupt, so child + // processes are killed too. + s.cmd.Process.Signal(os.Interrupt) + s.cmd.Wait() + } + if s.t.Failed() { + // log any output from sshd process + s.t.Logf("sshd: %s", s.output.String()) + } + s.cleanup() +} + +func writeFile(path string, contents []byte) { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600) + if err != nil { + panic(err) + } + defer f.Close() + if _, err := f.Write(contents); err != nil { + panic(err) + } +} + +// newServer returns a new mock ssh server. +func newServer(t *testing.T) *server { + if testing.Short() { + t.Skip("skipping test due to -short") + } + dir, err := ioutil.TempDir("", "sshtest") + if err != nil { + t.Fatal(err) + } + f, err := os.Create(filepath.Join(dir, "sshd_config")) + if err != nil { + t.Fatal(err) + } + err = configTmpl.Execute(f, map[string]string{ + "Dir": dir, + }) + if err != nil { + t.Fatal(err) + } + f.Close() + + for k, v := range testdata.PEMBytes { + filename := "id_" + k + writeFile(filepath.Join(dir, filename), v) + writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k])) + } + + return &server{ + t: t, + configfile: f.Name(), + cleanup: func() { + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + }, + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/testdata_test.go b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go new file mode 100644 index 000000000..ae48c7516 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go @@ -0,0 +1,64 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package test + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]ssh.Signer + testPublicKeys map[string]ssh.PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]ssh.Signer, n) + testPublicKeys = make(map[string]ssh.PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &ssh.Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: ssh.Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/testdata/doc.go b/vendor/golang.org/x/crypto/ssh/testdata/doc.go new file mode 100644 index 000000000..ae7bd8b89 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/testdata/doc.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package contains test data shared between the various subpackages of +// the golang.org/x/crypto/ssh package. Under no circumstance should +// this data be used for production code. +package testdata diff --git a/vendor/golang.org/x/crypto/ssh/testdata/keys.go b/vendor/golang.org/x/crypto/ssh/testdata/keys.go new file mode 100644 index 000000000..5ff1c0e03 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/testdata/keys.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testdata + +var PEMBytes = map[string][]byte{ + "dsa": []byte(`-----BEGIN DSA PRIVATE KEY----- +MIIBuwIBAAKBgQD6PDSEyXiI9jfNs97WuM46MSDCYlOqWw80ajN16AohtBncs1YB +lHk//dQOvCYOsYaE+gNix2jtoRjwXhDsc25/IqQbU1ahb7mB8/rsaILRGIbA5WH3 +EgFtJmXFovDz3if6F6TzvhFpHgJRmLYVR8cqsezL3hEZOvvs2iH7MorkxwIVAJHD +nD82+lxh2fb4PMsIiaXudAsBAoGAQRf7Q/iaPRn43ZquUhd6WwvirqUj+tkIu6eV +2nZWYmXLlqFQKEy4Tejl7Wkyzr2OSYvbXLzo7TNxLKoWor6ips0phYPPMyXld14r +juhT24CrhOzuLMhDduMDi032wDIZG4Y+K7ElU8Oufn8Sj5Wge8r6ANmmVgmFfynr +FhdYCngCgYEA3ucGJ93/Mx4q4eKRDxcWD3QzWyqpbRVRRV1Vmih9Ha/qC994nJFz +DQIdjxDIT2Rk2AGzMqFEB68Zc3O+Wcsmz5eWWzEwFxaTwOGWTyDqsDRLm3fD+QYj +nOwuxb0Kce+gWI8voWcqC9cyRm09jGzu2Ab3Bhtpg8JJ8L7gS3MRZK4CFEx4UAfY +Fmsr0W6fHB9nhS4/UXM8 +-----END DSA PRIVATE KEY----- +`), + "ecdsa": []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49 +AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+ +6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA== +-----END EC PRIVATE KEY----- +`), + "rsa": []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIBOwIBAAJBALdGZxkXDAjsYk10ihwU6Id2KeILz1TAJuoq4tOgDWxEEGeTrcld +r/ZwVaFzjWzxaf6zQIJbfaSEAhqD5yo72+sCAwEAAQJBAK8PEVU23Wj8mV0QjwcJ +tZ4GcTUYQL7cF4+ezTCE9a1NrGnCP2RuQkHEKxuTVrxXt+6OF15/1/fuXnxKjmJC +nxkCIQDaXvPPBi0c7vAxGwNY9726x01/dNbHCE0CBtcotobxpwIhANbbQbh3JHVW +2haQh4fAG5mhesZKAGcxTyv4mQ7uMSQdAiAj+4dzMpJWdSzQ+qGHlHMIBvVHLkqB +y2VdEyF7DPCZewIhAI7GOI/6LDIFOvtPo6Bj2nNmyQ1HU6k/LRtNIXi4c9NJAiAr +rrxx26itVhJmcvoUhOjwuzSlP2bE5VHAvkGB352YBg== +-----END RSA PRIVATE KEY----- +`), + "user": []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49 +AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD +PLL8IEwvYu2wq+lpXfGQnNMbzYf9gspG0w== +-----END EC PRIVATE KEY----- +`), +} diff --git a/vendor/golang.org/x/crypto/ssh/testdata_test.go b/vendor/golang.org/x/crypto/ssh/testdata_test.go new file mode 100644 index 000000000..f2828c1b5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/testdata_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package ssh + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]Signer + testPublicKeys map[string]PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]Signer, n) + testPublicKeys = make(map[string]PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 000000000..8351d378e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,332 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "errors" + "io" +) + +const ( + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + + io.Closer + + // Initial H used for the session ID. Once assigned this does + // not change, even during subsequent key exchanges. + sessionID []byte +} + +// getSessionID returns the ID of the SSH connection. The return value +// should not be modified. +func (t *transport) getSessionID() []byte { + if t.sessionID == nil { + panic("session ID not set yet") + } + return t.sessionID +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writePacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + if t.sessionID == nil { + t.sessionID = kexResult.H + } + + kexResult.SessionID = t.sessionID + + if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil { + return err + } else { + t.reader.pendingKeyChange <- ciph + } + + if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil { + return err + } else { + t.writer.pendingKeyChange <- ciph + } + + return nil +} + +// Read and decrypt next packet. +func (t *transport) readPacket() ([]byte, error) { + return t.reader.readPacket(t.bufReader) +} + +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + packet, err := s.packetCipher.readPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 && packet[0] == msgNewKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message.") + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + return t.writer.writePacket(t.bufWriter, t.rand, packet) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// generateKeys generates key material for IV, MAC and encryption. +func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) { + cipherMode := cipherModes[algs.Cipher] + macMode := macModes[algs.MAC] + + iv = make([]byte, cipherMode.ivSize) + key = make([]byte, cipherMode.keySize) + macKey = make([]byte, macMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + generateKeyMaterial(macKey, d.macKeyTag, kex) + return +} + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + iv, key, macKey := generateKeys(d, algs, kex) + + if algs.Cipher == gcmCipherID { + return newGCMCipher(iv, key, macKey) + } + + if algs.Cipher == aes128cbcID { + return newAESCBCCipher(iv, key, macKey, algs) + } + + c := &streamPacketCipher{ + mac: macModes[algs.MAC].new(macKey), + } + c.macResult = make([]byte, c.mac.Size()) + + var err error + c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv) + if err != nil { + return nil, err + } + + return c, nil +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for len(versionString) < maxVersionStringBytes { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/transport_test.go b/vendor/golang.org/x/crypto/ssh/transport_test.go new file mode 100644 index 000000000..92d83abf9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport_test.go @@ -0,0 +1,109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "strings" + "testing" +) + +func TestReadVersion(t *testing.T) { + longversion := strings.Repeat("SSH-2.0-bla", 50)[:253] + cases := map[string]string{ + "SSH-2.0-bla\r\n": "SSH-2.0-bla", + "SSH-2.0-bla\n": "SSH-2.0-bla", + longversion + "\r\n": longversion, + } + + for in, want := range cases { + result, err := readVersion(bytes.NewBufferString(in)) + if err != nil { + t.Errorf("readVersion(%q): %s", in, err) + } + got := string(result) + if got != want { + t.Errorf("got %q, want %q", got, want) + } + } +} + +func TestReadVersionError(t *testing.T) { + longversion := strings.Repeat("SSH-2.0-bla", 50)[:253] + cases := []string{ + longversion + "too-long\r\n", + } + for _, in := range cases { + if _, err := readVersion(bytes.NewBufferString(in)); err == nil { + t.Errorf("readVersion(%q) should have failed", in) + } + } +} + +func TestExchangeVersionsBasic(t *testing.T) { + v := "SSH-2.0-bla" + buf := bytes.NewBufferString(v + "\r\n") + them, err := exchangeVersions(buf, []byte("xyz")) + if err != nil { + t.Errorf("exchangeVersions: %v", err) + } + + if want := "SSH-2.0-bla"; string(them) != want { + t.Errorf("got %q want %q for our version", them, want) + } +} + +func TestExchangeVersions(t *testing.T) { + cases := []string{ + "not\x000allowed", + "not allowed\n", + } + for _, c := range cases { + buf := bytes.NewBufferString("SSH-2.0-bla\r\n") + if _, err := exchangeVersions(buf, []byte(c)); err == nil { + t.Errorf("exchangeVersions(%q): should have failed", c) + } + } +} + +type closerBuffer struct { + bytes.Buffer +} + +func (b *closerBuffer) Close() error { + return nil +} + +func TestTransportMaxPacketWrite(t *testing.T) { + buf := &closerBuffer{} + tr := newTransport(buf, rand.Reader, true) + huge := make([]byte, maxPacket+1) + err := tr.writePacket(huge) + if err == nil { + t.Errorf("transport accepted write for a huge packet.") + } +} + +func TestTransportMaxPacketReader(t *testing.T) { + var header [5]byte + huge := make([]byte, maxPacket+128) + binary.BigEndian.PutUint32(header[0:], uint32(len(huge))) + // padding. + header[4] = 0 + + buf := &closerBuffer{} + buf.Write(header[:]) + buf.Write(huge) + + tr := newTransport(buf, rand.Reader, true) + _, err := tr.readPacket() + if err == nil { + t.Errorf("transport succeeded reading huge packet.") + } else if !strings.Contains(err.Error(), "large") { + t.Errorf("got %q, should mention %q", err.Error(), "large") + } +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..11bd8d34e --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,447 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 000000000..05345fc5e --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,575 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 100*time.Millisecond) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o = otherContext{c} + c, _ = WithTimeout(o, 300*time.Millisecond) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestCanceledTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 200*time.Millisecond) + o := otherContext{c} + c, cancel := WithTimeout(o, 400*time.Millisecond) + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 15, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TOOD(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 000000000..e3170e333 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 000000000..56bcbadb8 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 000000000..26a5e19ac --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go new file mode 100644 index 000000000..77c25ba7e --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -0,0 +1,176 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package ctxhttp + +import ( + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func TestNoTimeout(t *testing.T) { + ctx := context.Background() + resp, err := doRequest(ctx) + + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } +} + +func TestCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(requestDuration / 2) + cancel() + }() + + resp, err := doRequest(ctx) + + if resp != nil || err == nil { + t.Fatalf("expected error, didn't get one. resp: %v", resp) + } + if err != ctx.Err() { + t.Fatalf("expected error from context but got: %v", err) + } +} + +func TestCancelAfterRequest(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + resp, err := doRequest(ctx) + + // Cancel before reading the body. + // Request.Body should still be readable after the context is canceled. + cancel() + + b, err := ioutil.ReadAll(resp.Body) + if err != nil || string(b) != requestBody { + t.Fatalf("could not read body: %q %v", b, err) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + }) + + serv := httptest.NewServer(handler) + defer serv.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, serv.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} + +func doRequest(ctx context.Context) (*http.Response, error) { + var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + w.Write([]byte(requestBody)) + }) + + serv := httptest.NewServer(okHandler) + defer serv.Close() + + return Get(ctx, nil, serv.URL) +} + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 000000000..a6754dc36 --- /dev/null +++ b/vendor/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "golang.org/x/net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 000000000..a035125c3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 000000000..46aa2b12d --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 000000000..d02f24fd5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 000000000..0d5141733 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go new file mode 100644 index 000000000..44af1f1a9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitbucket provides constants for using OAuth2 to access Bitbucket. +package bitbucket + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Bitbucket's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://bitbucket.org/site/oauth2/authorize", + TokenURL: "https://bitbucket.org/site/oauth2/access_token", +} diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 000000000..8962c49d1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 000000000..a47e46559 --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4 +package clientcredentials + +import ( + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// tokenFromInternal maps an *internal.Token struct into +// an *oauth2.Token struct. +func tokenFromInternal(t *internal.Token) *oauth2.Token { + if t == nil { + return nil + } + tk := &oauth2.Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + } + return tk.WithExtra(t.Raw) +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is +// returned along with an error. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} + +// Client Credentials Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// Token uses client credentials to retrieve a token. +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + return retrieveToken(c.ctx, c.conf, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")), + }) +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go new file mode 100644 index 000000000..5a0170a95 --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go @@ -0,0 +1,96 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package clientcredentials + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "golang.org/x/oauth2" +) + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + Scopes: []string{"scope1", "scope2"}, + TokenURL: url + "/token", + } +} + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func TestTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token") + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want { + t.Errorf("Content-Type header = %q; want %q", got, want) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + r.Body.Close() + } + if err != nil { + t.Errorf("failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("payload = %q; want %q", string(body), "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2") + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Token(oauth2.NoContext) + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("token invalid. got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c") + } + if tok.TokenType != "bearer" { + t.Errorf("token type = %q; want %q", tok.TokenType, "bearer") + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(oauth2.NoContext) + c.Get(ts.URL + "/somethingelse") +} diff --git a/vendor/golang.org/x/oauth2/example_test.go b/vendor/golang.org/x/oauth2/example_test.go new file mode 100644 index 000000000..33b305c62 --- /dev/null +++ b/vendor/golang.org/x/oauth2/example_test.go @@ -0,0 +1,45 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2_test + +import ( + "fmt" + "log" + + "golang.org/x/oauth2" +) + +func ExampleConfig() { + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://provider.com/o/oauth2/auth", + TokenURL: "https://provider.com/o/oauth2/token", + }, + } + + // Redirect user to consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Use the authorization code that is pushed to the redirect URL. + // NewTransportWithCode will do the handshake to retrieve + // an access token and initiate a Transport that is + // authorized and authenticated by the retrieved token. + var code string + if _, err := fmt.Scan(&code); err != nil { + log.Fatal(err) + } + tok, err := conf.Exchange(oauth2.NoContext, code) + if err != nil { + log.Fatal(err) + } + + client := conf.Client(oauth2.NoContext, tok) + client.Get("...") +} diff --git a/vendor/golang.org/x/oauth2/facebook/facebook.go b/vendor/golang.org/x/oauth2/facebook/facebook.go new file mode 100644 index 000000000..51c1480bd --- /dev/null +++ b/vendor/golang.org/x/oauth2/facebook/facebook.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facebook provides constants for using OAuth2 to access Facebook. +package facebook + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Facebook's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.facebook.com/dialog/oauth", + TokenURL: "https://graph.facebook.com/oauth/access_token", +} diff --git a/vendor/golang.org/x/oauth2/github/github.go b/vendor/golang.org/x/oauth2/github/github.go new file mode 100644 index 000000000..26502a368 --- /dev/null +++ b/vendor/golang.org/x/oauth2/github/github.go @@ -0,0 +1,16 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package github provides constants for using OAuth2 to access Github. +package github + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Github's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://github.com/login/oauth/authorize", + TokenURL: "https://github.com/login/oauth/access_token", +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..dc993efb5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,86 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. +var appengineVM bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 000000000..4f42c8b34 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken +} diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go new file mode 100644 index 000000000..633611cc3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineVM = true + appengineTokenFunc = appengine.AccessToken +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..b95236297 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,155 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +// +// This client should be used when developing services +// that run on Google App Engine or Google Compute Engine +// and use "Application Default Credentials." +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource is a token source that uses +// "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + ts, err := tokenSourceFromFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return ts, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + _, err := os.Stat(filename) + if err == nil { + ts, err2 := tokenSourceFromFile(ctx, filename, scope) + if err2 == nil { + return ts, nil + } + err = err2 + } else if os.IsNotExist(err) { + err = nil // ignore this error + } + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineVM { + return AppEngineTokenSource(ctx, scope...), nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + return ComputeTokenSource(""), nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var d struct { + // Common fields + Type string + ClientID string `json:"client_id"` + + // User Credential fields + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(b, &d); err != nil { + return nil, err + } + switch d.Type { + case "authorized_user": + cfg := &oauth2.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: append([]string{}, scopes...), // copy + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: d.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "service_account": + cfg := &jwt.Config{ + Email: d.ClientEmail, + PrivateKey: []byte(d.PrivateKey), + Scopes: append([]string{}, scopes...), // copy + TokenURL: JWTTokenURL, + } + return cfg.TokenSource(ctx), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", d.Type) + } +} diff --git a/vendor/golang.org/x/oauth2/google/example_test.go b/vendor/golang.org/x/oauth2/google/example_test.go new file mode 100644 index 000000000..9745be192 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/example_test.go @@ -0,0 +1,150 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm !appengine + +package google_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/appengine" + "google.golang.org/appengine/urlfetch" +) + +func ExampleDefaultClient() { + client, err := google.DefaultClient(oauth2.NoContext, + "https://www.googleapis.com/auth/devstorage.full_control") + if err != nil { + log.Fatal(err) + } + client.Get("...") +} + +func Example_webServer() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + RedirectURL: "YOUR_REDIRECT_URL", + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + Endpoint: google.Endpoint, + } + // Redirect user to Google's consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state") + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Handle the exchange code to initiate a transport. + tok, err := conf.Exchange(oauth2.NoContext, "authorization-code") + if err != nil { + log.Fatal(err) + } + client := conf.Client(oauth2.NoContext, tok) + client.Get("...") +} + +func ExampleJWTConfigFromJSON() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + // Navigate to your project, then see the "Credentials" page + // under "APIs & Auth". + // To create a service account client, click "Create new Client ID", + // select "Service Account", and click "Create Client ID". A JSON + // key file will then be downloaded to your computer. + data, err := ioutil.ReadFile("/path/to/your-project-key.json") + if err != nil { + log.Fatal(err) + } + conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of + // your service account. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleSDKConfig() { + // The credentials will be obtained from the first account that + // has been authorized with `gcloud auth login`. + conf, err := google.NewSDKConfig("") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of the SDK user. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func Example_serviceAccount() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &jwt.Config{ + Email: "xxx@developer.gserviceaccount.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // The field only supports PEM containers with no passphrase. + // The openssl command will convert p12 keys to passphrase-less PEM containers. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + TokenURL: google.JWTTokenURL, + // If you would like to impersonate a user, you can + // create a transport with a subject. The following GET + // request will be made on the behalf of user@example.com. + // Optional. + Subject: "user@example.com", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleAppEngineTokenSource() { + var req *http.Request // from the ServeHTTP handler + ctx := appengine.NewContext(req) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"), + Base: &urlfetch.Transport{ + Context: ctx, + }, + }, + } + client.Get("...") +} + +func ExampleComputeTokenSource() { + client := &http.Client{ + Transport: &oauth2.Transport{ + // Fetch from Google Compute Engine's metadata server to retrieve + // an access token for the provided account. + // If no account is specified, "default" is used. + Source: google.ComputeTokenSource(""), + }, + } + client.Get("...") +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..0bed73866 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloadable from https://console.developers.google.com, +// under "APIs & Auth" > "Credentials". Download the Web application credentials in the +// JSON format and provide the contents of the file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" page under "APIs & Auth" for your +// project at https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var key struct { + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(jsonKey, &key); err != nil { + return nil, err + } + return &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + Scopes: scope, + TokenURL: JWTTokenURL, + }, nil +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/google_test.go b/vendor/golang.org/x/oauth2/google/google_test.go new file mode 100644 index 000000000..74080edea --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google_test.go @@ -0,0 +1,67 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "strings" + "testing" +) + +var webJSONKey = []byte(` +{ + "web": { + "auth_uri": "https://google.com/o/oauth2/auth", + "client_secret": "3Oknc4jS_wA2r9i", + "token_uri": "https://google.com/o/oauth2/token", + "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "redirect_uris": ["https://www.example.com/oauth2callback"], + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "javascript_origins": ["https://www.example.com"] + } +}`) + +var installedJSONKey = []byte(`{ + "installed": { + "client_id": "222-installed.apps.googleusercontent.com", + "redirect_uris": ["https://www.example.com/oauth2callback"] + } +}`) + +func TestConfigFromJSON(t *testing.T) { + conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2") + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } + if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want { + t.Errorf("ClientSecret = %q; want %q", got, want) + } + if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want { + t.Errorf("RedictURL = %q; want %q", got, want) + } + if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { + t.Errorf("Scopes = %q; want %q", got, want) + } + if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want { + t.Errorf("AuthURL = %q; want %q", got, want) + } + if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} + +func TestConfigFromJSON_Installed(t *testing.T) { + conf, err := ConfigFromJSON(installedJSONKey) + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..b91991786 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,71 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..d29a3bb9b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,168 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} diff --git a/vendor/golang.org/x/oauth2/google/sdk_test.go b/vendor/golang.org/x/oauth2/google/sdk_test.go new file mode 100644 index 000000000..a5aa2a646 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk_test.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import "testing" + +func TestSDKConfig(t *testing.T) { + sdkConfigPath = func() (string, error) { + return "testdata/gcloud", nil + } + + tests := []struct { + account string + accessToken string + err bool + }{ + {"", "bar_access_token", false}, + {"foo@example.com", "foo_access_token", false}, + {"bar@example.com", "bar_access_token", false}, + {"baz@serviceaccount.example.com", "", true}, + } + for _, tt := range tests { + c, err := NewSDKConfig(tt.account) + if got, want := err != nil, tt.err; got != want { + if !tt.err { + t.Errorf("expected no error, got error: %v", tt.err, err) + } else { + t.Errorf("expected error, got none") + } + continue + } + if err != nil { + continue + } + tok := c.initialToken + if tok == nil { + t.Errorf("expected token %q, got: nil", tt.accessToken) + continue + } + if tok.AccessToken != tt.accessToken { + t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken) + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials new file mode 100644 index 000000000..ff5eefbd0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials @@ -0,0 +1,122 @@ +{ + "data": [ + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "foo_access_token", + "client_id": "foo_client_id", + "client_secret": "foo_client_secret", + "id_token": { + "at_hash": "foo_at_hash", + "aud": "foo_aud", + "azp": "foo_azp", + "cid": "foo_cid", + "email": "foo@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "foo_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "foo_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "foo_access_token", + "expires_in": 3600, + "id_token": "foo_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "foo@example.com", + "clientId": "foo_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "bar_access_token", + "client_id": "bar_client_id", + "client_secret": "bar_client_secret", + "id_token": { + "at_hash": "bar_at_hash", + "aud": "bar_aud", + "azp": "bar_azp", + "cid": "bar_cid", + "email": "bar@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "bar_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "bar_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "bar_access_token", + "expires_in": 3600, + "id_token": "bar_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "bar@example.com", + "clientId": "bar_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "ServiceAccountCredentials", + "_kwargs": {}, + "_module": "oauth2client.client", + "_private_key_id": "00000000000000000000000000000000", + "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n", + "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "_service_account_email": "baz@serviceaccount.example.com", + "_service_account_id": "baz.serviceaccount.example.com", + "_token_uri": "https://accounts.google.com/o/oauth2/token", + "_user_agent": "Cloud SDK Command Line Tool", + "access_token": null, + "assertion_type": null, + "client_id": null, + "client_secret": null, + "id_token": null, + "invalid": false, + "refresh_token": null, + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "service_account_name": "baz@serviceaccount.example.com", + "token_expiry": null, + "token_response": null, + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "baz@serviceaccount.example.com", + "clientId": "baz_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + } + ], + "file_version": 1 +} diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties new file mode 100644 index 000000000..025de886c --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties @@ -0,0 +1,2 @@ +[core] +account = bar@example.com \ No newline at end of file diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 000000000..fbe1028d6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2_test.go b/vendor/golang.org/x/oauth2/internal/oauth2_test.go new file mode 100644 index 000000000..c61585542 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2_test.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseINI(t *testing.T) { + tests := []struct { + ini string + want map[string]map[string]string + }{ + { + `root = toor +[foo] +bar = hop +ini = nin +`, + map[string]map[string]string{ + "": map[string]string{"root": "toor"}, + "foo": map[string]string{"bar": "hop", "ini": "nin"}, + }, + }, + { + `[empty] +[section] +empty= +`, + map[string]map[string]string{ + "": map[string]string{}, + "empty": map[string]string{}, + "section": map[string]string{"empty": ""}, + }, + }, + { + `ignore +[invalid +=stuff +;comment=true +`, + map[string]map[string]string{ + "": map[string]string{}, + }, + }, + } + for _, tt := range tests { + result, err := ParseINI(strings.NewReader(tt.ini)) + if err != nil { + t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err) + continue + } + if !reflect.DeepEqual(result, tt.want) { + t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 000000000..39caf6c61 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,221 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.dropbox.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/token_test.go b/vendor/golang.org/x/oauth2/internal/token_test.go new file mode 100644 index 000000000..d8d1e989f --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token_test.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "fmt" + "testing" +) + +func TestRegisterBrokenAuthHeaderProvider(t *testing.T) { + RegisterBrokenAuthHeaderProvider("https://aaa.com/") + tokenURL := "https://aaa.com/token" + if providerAuthHeaderWorks(tokenURL) { + t.Errorf("URL: %s is a broken provider", tokenURL) + } +} + +func Test_providerAuthHeaderWorks(t *testing.T) { + for _, p := range brokenAuthHeaderProviders { + if providerAuthHeaderWorks(p) { + t.Errorf("URL: %s not found in list", p) + } + p := fmt.Sprintf("%ssomesuffix", p) + if providerAuthHeaderWorks(p) { + t.Errorf("URL: %s not found in list", p) + } + } + p := "https://api.not-in-the-list-example.com/" + if !providerAuthHeaderWorks(p) { + t.Errorf("URL: %s found in list", p) + } + +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 000000000..f1f173e34 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/internal/transport_test.go b/vendor/golang.org/x/oauth2/internal/transport_test.go new file mode 100644 index 000000000..313f637cd --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport_test.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestContextClient(t *testing.T) { + rc := &http.Client{} + RegisterContextClientFunc(func(context.Context) (*http.Client, error) { + return rc, nil + }) + + c := &http.Client{} + ctx := context.WithValue(nil, HTTPClient, c) + + hc, err := ContextClient(ctx) + if err != nil { + t.Fatalf("want valid client; got err = %v", err) + } + if hc != c { + t.Fatalf("want context client = %p; got = %p", c, hc) + } + + hc, err = ContextClient(context.TODO()) + if err != nil { + t.Fatalf("want valid client; got err = %v", err) + } + if hc != rc { + t.Fatalf("want registered client = %p; got = %p", c, hc) + } +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..b46edb27c --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides encoding and decoding utilities for +// signed JWS messages. +package jws + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64Encode(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64Encode(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64Encode(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64Decode(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write([]byte(data)) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// base64Encode returns and Base64url encoded version of the input string with any +// trailing "=" stripped. +func base64Encode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// base64Decode decodes the Base64url encoded string +func base64Decode(s string) ([]byte, error) { + // add back missing padding + switch len(s) % 4 { + case 1: + s += "===" + case 2: + s += "==" + case 3: + s += "=" + } + return base64.URLEncoding.DecodeString(s) +} diff --git a/vendor/golang.org/x/oauth2/jwt/example_test.go b/vendor/golang.org/x/oauth2/jwt/example_test.go new file mode 100644 index 000000000..a9533e85f --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/example_test.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt_test + +import ( + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +func ExampleJWTConfig() { + conf := &jwt.Config{ + Email: "xxx@developer.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + // It only supports PEM containers with no passphrase. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Subject: "user@example.com", + TokenURL: "https://provider.com/o/oauth2/token", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..2ffad21a6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,153 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + payload, err := jws.Encode(defaultHeader, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt_test.go b/vendor/golang.org/x/oauth2/jwt/jwt_test.go new file mode 100644 index 000000000..a9c126b47 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt + +import ( + "net/http" + "net/http/httptest" + "testing" + + "golang.org/x/oauth2" +) + +var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY-----`) + +func TestJWTFetch_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "scope": "user", + "token_type": "bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(oauth2.NoContext).Token() + if err != nil { + t.Fatal(err) + } + if !tok.Valid() { + t.Errorf("Token invalid") + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v", tok.TokenType) + } + if tok.Expiry.IsZero() { + t.Errorf("Unexpected token expiry, %#v", tok.Expiry) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +func TestJWTFetch_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(oauth2.NoContext).Token() + if err != nil { + t.Fatal(err) + } + if tok == nil { + t.Fatalf("token is nil") + } + if tok.Valid() { + t.Errorf("token is valid. want invalid.") + } + if tok.AccessToken != "" { + t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken) + } + if want := "bearer"; tok.TokenType != want { + t.Errorf("TokenType = %q; want %q", tok.TokenType, want) + } + scope := tok.Extra("scope") + if want := "user"; scope != want { + t.Errorf("token scope = %q; want %q", scope, want) + } +} + +func TestJWTFetch_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(oauth2.NoContext).Token() + if err == nil { + t.Error("got a token; expected error") + if tok.AccessToken != "" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + } +} diff --git a/vendor/golang.org/x/oauth2/linkedin/linkedin.go b/vendor/golang.org/x/oauth2/linkedin/linkedin.go new file mode 100644 index 000000000..30c212b10 --- /dev/null +++ b/vendor/golang.org/x/oauth2/linkedin/linkedin.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linkedin provides constants for using OAuth2 to access LinkedIn. +package linkedin + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is LinkedIn's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", + TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", +} diff --git a/vendor/golang.org/x/oauth2/microsoft/microsoft.go b/vendor/golang.org/x/oauth2/microsoft/microsoft.go new file mode 100644 index 000000000..a097f813e --- /dev/null +++ b/vendor/golang.org/x/oauth2/microsoft/microsoft.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package microsoft provides constants for using OAuth2 to access Windows Live ID. +package microsoft + +import ( + "golang.org/x/oauth2" +) + +// LiveConnectEndpoint is Windows's Live ID OAuth 2.0 endpoint. +var LiveConnectEndpoint = oauth2.Endpoint{ + AuthURL: "https://login.live.com/oauth20_authorize.srf", + TokenURL: "https://login.live.com/oauth20_token.srf", +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 000000000..9b7b977da --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,337 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/oauth2_test.go b/vendor/golang.org/x/oauth2/oauth2_test.go new file mode 100644 index 000000000..448673b51 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2_test.go @@ -0,0 +1,471 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strconv" + "testing" + "time" + + "golang.org/x/net/context" +) + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +type mockCache struct { + token *Token + readErr error +} + +func (c *mockCache) ReadToken() (*Token, error) { + return c.token, c.readErr +} + +func (c *mockCache) WriteToken(*Token) { + // do nothing +} + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + RedirectURL: "REDIRECT_URL", + Scopes: []string{"scope1", "scope2"}, + Endpoint: Endpoint{ + AuthURL: url + "/auth", + TokenURL: url + "/token", + }, + } +} + +func TestAuthCodeURL(t *testing.T) { + conf := newConf("server") + url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce) + if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" { + t.Errorf("Auth code URL doesn't match the expected, found: %v", url) + } +} + +func TestAuthCodeURL_CustomParam(t *testing.T) { + conf := newConf("server") + param := SetAuthURLParam("foo", "bar") + url := conf.AuthCodeURL("baz", param) + if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" { + t.Errorf("Auth code URL doesn't match the expected, found: %v", url) + } +} + +func TestAuthCodeURL_Optional(t *testing.T) { + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "/auth-url", + TokenURL: "/token-url", + }, + } + url := conf.AuthCodeURL("") + if url != "/auth-url?client_id=CLIENT_ID&response_type=code" { + t.Fatalf("Auth code URL doesn't match the expected, found: %v", url) + } +} + +func TestExchangeRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(NoContext, "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +func TestExchangeRequest_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(NoContext, "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } + expiresIn := tok.Extra("expires_in") + if expiresIn != float64(86400) { + t.Errorf("Unexpected non-numeric value for expires_in: %v", expiresIn) + } +} + +func TestExtraValueRetrieval(t *testing.T) { + values := url.Values{} + + kvmap := map[string]string{ + "scope": "user", "token_type": "bearer", "expires_in": "86400.92", + "server_time": "1443571905.5606415", "referer_ip": "10.0.0.1", + "etag": "\"afZYj912P4alikMz_P11982\"", "request_id": "86400", + "untrimmed": " untrimmed ", + } + + for key, value := range kvmap { + values.Set(key, value) + } + + tok := Token{ + raw: values, + } + + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected scope %v wanted \"user\"", scope) + } + serverTime := tok.Extra("server_time") + if serverTime != 1443571905.5606415 { + t.Errorf("Unexpected non-float64 value for server_time: %v", serverTime) + } + refererIp := tok.Extra("referer_ip") + if refererIp != "10.0.0.1" { + t.Errorf("Unexpected non-string value for referer_ip: %v", refererIp) + } + expires_in := tok.Extra("expires_in") + if expires_in != 86400.92 { + t.Errorf("Unexpected value for expires_in, wanted 86400 got %v", expires_in) + } + requestId := tok.Extra("request_id") + if requestId != int64(86400) { + t.Errorf("Unexpected non-int64 value for request_id: %v", requestId) + } + untrimmed := tok.Extra("untrimmed") + if untrimmed != " untrimmed " { + t.Errorf("Unexpected value for untrimmed, got %q expected \" untrimmed \"", untrimmed) + } +} + +const day = 24 * time.Hour + +func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { + seconds := int32(day.Seconds()) + jsonNumberType := reflect.TypeOf(json.Number("0")) + for _, c := range []struct { + expires string + expect error + }{ + {fmt.Sprintf(`"expires_in": %d`, seconds), nil}, + {fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case + {fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case + {`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type + {`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type + {`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value + } { + testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect) + } +} + +func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) + })) + defer ts.Close() + conf := newConf(ts.URL) + t1 := time.Now().Add(day) + tok, err := conf.Exchange(NoContext, "exchange-code") + t2 := time.Now().Add(day) + // Do a fmt.Sprint comparison so either side can be + // nil. fmt.Sprint just stringifies them to "", and no + // non-nil expected error ever stringifies as "", so this + // isn't terribly disgusting. We do this because Go 1.4 and + // Go 1.5 return a different deep value for + // json.UnmarshalTypeError. In Go 1.5, the + // json.UnmarshalTypeError contains a new field with a new + // non-zero value. Rather than ignore it here with reflect or + // add new files and +build tags, just look at the strings. + if fmt.Sprint(err) != fmt.Sprint(expect) { + t.Errorf("Error = %v; want %v", err, expect) + } + if err != nil { + return + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expiry := tok.Expiry + if expiry.Before(t1) || expiry.After(t2) { + t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) + } +} + +func TestExchangeRequest_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(NoContext, "code") + if err != nil { + t.Fatal(err) + } + if tok.AccessToken != "" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } +} + +func TestExchangeRequest_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(NoContext, "exchange-code") + if err == nil { + t.Error("expected error from invalid access_token type") + } +} + +func TestExchangeRequest_NonBasicAuth(t *testing.T) { + tr := &mockTransport{ + rt: func(r *http.Request) (w *http.Response, err error) { + headerAuth := r.Header.Get("Authorization") + if headerAuth != "" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + return nil, errors.New("no response") + }, + } + c := &http.Client{Transport: tr} + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "https://accounts.google.com/auth", + TokenURL: "https://accounts.google.com/token", + }, + } + + ctx := context.WithValue(context.Background(), HTTPClient, c) + conf.Exchange(ctx, "code") +} + +func TestPasswordCredentialsTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + expected := "/token" + if r.URL.String() != expected { + t.Errorf("URL = %q; want %q", r.URL, expected) + } + headerAuth := r.Header.Get("Authorization") + expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" + if headerAuth != expected { + t.Errorf("Authorization header = %q; want %q", headerAuth, expected) + } + headerContentType := r.Header.Get("Content-Type") + expected = "application/x-www-form-urlencoded" + if headerContentType != expected { + t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1" + if string(body) != expected { + t.Errorf("res.Body = %q; want %q", string(body), expected) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expected := "90d64460d14870c08c81352a05dedd3465940a7c" + if tok.AccessToken != expected { + t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) + } + expected = "bearer" + if tok.TokenType != expected { + t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"}) + c.Get(ts.URL + "/somethingelse") +} + +func TestFetchWithNoRefreshToken(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(NoContext, nil) + _, err := c.Get(ts.URL + "/somethingelse") + if err == nil { + t.Errorf("Fetch should return an error if no refresh token is set") + } +} + +func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + tkr := tokenRefresher{ + conf: conf, + ctx: NoContext, + refreshToken: "OLD REFRESH TOKEN", + } + tk, err := tkr.Token() + if err != nil { + t.Errorf("Unexpected refreshToken error returned: %v", err) + return + } + if tk.RefreshToken != tkr.refreshToken { + t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken) + } +} + +func TestConfigClientWithToken(t *testing.T) { + tok := &Token{ + AccessToken: "abc123", + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + return + })) + defer ts.Close() + conf := newConf(ts.URL) + + c := conf.Client(NoContext, tok) + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Error(err) + } + _, err = c.Do(req) + if err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go new file mode 100644 index 000000000..60741ce9d --- /dev/null +++ b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. +package odnoklassniki + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Odnoklassniki's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", + TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", +} diff --git a/vendor/golang.org/x/oauth2/paypal/paypal.go b/vendor/golang.org/x/oauth2/paypal/paypal.go new file mode 100644 index 000000000..32308322f --- /dev/null +++ b/vendor/golang.org/x/oauth2/paypal/paypal.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package paypal provides constants for using OAuth2 to access PayPal. +package paypal + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", +} + +// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 000000000..7a3167f15 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/token_test.go b/vendor/golang.org/x/oauth2/token_test.go new file mode 100644 index 000000000..80db83c29 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token_test.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "testing" + "time" +) + +func TestTokenExtra(t *testing.T) { + type testCase struct { + key string + val interface{} + want interface{} + } + const key = "extra-key" + cases := []testCase{ + {key: key, val: "abc", want: "abc"}, + {key: key, val: 123, want: 123}, + {key: key, val: "", want: ""}, + {key: "other-key", val: "def", want: nil}, + } + for _, tc := range cases { + extra := make(map[string]interface{}) + extra[tc.key] = tc.val + tok := &Token{raw: extra} + if got, want := tok.Extra(key), tc.want; got != want { + t.Errorf("Extra(%q) = %q; want %q", key, got, want) + } + } +} + +func TestTokenExpiry(t *testing.T) { + now := time.Now() + cases := []struct { + name string + tok *Token + want bool + }{ + {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false}, + {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true}, + {name: "-1 hour", tok: &Token{Expiry: now.Add(-1 * time.Hour)}, want: true}, + } + for _, tc := range cases { + if got, want := tc.tok.expired(), tc.want; got != want { + t.Errorf("expired (%q) = %v; want %v", tc.name, got, want) + } + } +} + +func TestTokenTypeMethod(t *testing.T) { + cases := []struct { + name string + tok *Token + want string + }{ + {name: "bearer-mixed_case", tok: &Token{TokenType: "beAREr"}, want: "Bearer"}, + {name: "default-bearer", tok: &Token{}, want: "Bearer"}, + {name: "basic", tok: &Token{TokenType: "basic"}, want: "Basic"}, + {name: "basic-capitalized", tok: &Token{TokenType: "Basic"}, want: "Basic"}, + {name: "mac", tok: &Token{TokenType: "mac"}, want: "MAC"}, + {name: "mac-caps", tok: &Token{TokenType: "MAC"}, want: "MAC"}, + {name: "mac-mixed_case", tok: &Token{TokenType: "mAc"}, want: "MAC"}, + } + for _, tc := range cases { + if got, want := tc.tok.Type(), tc.want; got != want { + t.Errorf("TokenType(%q) = %v; want %v", tc.name, got, want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 000000000..92ac7e253 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/oauth2/transport_test.go b/vendor/golang.org/x/oauth2/transport_test.go new file mode 100644 index 000000000..9bb1383f0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport_test.go @@ -0,0 +1,108 @@ +package oauth2 + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +type tokenSource struct{ token *Token } + +func (t *tokenSource) Token() (*Token, error) { + return t.token, nil +} + +func TestTransportNilTokenSource(t *testing.T) { + tr := &Transport{} + server := newMockServer(func(w http.ResponseWriter, r *http.Request) {}) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err == nil { + t.Errorf("a nil Source was passed into the transport expected an error") + } + if res != nil { + t.Errorf("expected a nil response, got %v", res) + } +} + +func TestTransportTokenSource(t *testing.T) { + ts := &tokenSource{ + token: &Token{ + AccessToken: "abc", + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Authorization") != "Bearer abc" { + t.Errorf("Transport doesn't set the Authorization header from the fetched token") + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() +} + +// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113 +func TestTransportTokenSourceTypes(t *testing.T) { + const val = "abc" + tests := []struct { + key string + val string + want string + }{ + {key: "bearer", val: val, want: "Bearer abc"}, + {key: "mac", val: val, want: "MAC abc"}, + {key: "basic", val: val, want: "Basic abc"}, + } + for _, tc := range tests { + ts := &tokenSource{ + token: &Token{ + AccessToken: tc.val, + TokenType: tc.key, + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), tc.want; got != want { + t.Errorf("Authorization header (%q) = %q; want %q", val, got, want) + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } +} + +func TestTokenValidNoAccessToken(t *testing.T) { + token := &Token{} + if token.Valid() { + t.Errorf("Token should not be valid with no access token") + } +} + +func TestExpiredWithExpiry(t *testing.T) { + token := &Token{ + Expiry: time.Now().Add(-5 * time.Hour), + } + if token.Valid() { + t.Errorf("Token should not be valid if it expired in the past") + } +} + +func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(handler)) +} diff --git a/vendor/golang.org/x/oauth2/vk/vk.go b/vendor/golang.org/x/oauth2/vk/vk.go new file mode 100644 index 000000000..6463482cf --- /dev/null +++ b/vendor/golang.org/x/oauth2/vk/vk.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vk provides constants for using OAuth2 to access VK.com. +package vk + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is VK's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.vk.com/authorize", + TokenURL: "https://oauth.vk.com/access_token", +} diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json new file mode 100644 index 000000000..bc27bb7c6 --- /dev/null +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -0,0 +1,13967 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/aU2RyGSEMbRxHCIqylkOho4GK2M\"", + "discoveryVersion": "v1", + "id": "compute:v1", + "name": "compute", + "version": "v1", + "revision": "20160107", + "title": "Compute Engine API", + "description": "API for the Google Compute Engine service.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", + "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" + }, + "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/compute/v1/projects/", + "basePath": "/compute/v1/projects/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "compute/v1/projects/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/compute": { + "description": "View and manage your Google Compute Engine resources" + }, + "https://www.googleapis.com/auth/compute.readonly": { + "description": "View your Google Compute Engine resources" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "AccessConfig": { + "id": "AccessConfig", + "type": "object", + "description": "An access configuration attached to an instance's network interface.", + "properties": { + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#accessConfig for access configs.", + "default": "compute#accessConfig" + }, + "name": { + "type": "string", + "description": "Name of this access configuration." + }, + "natIP": { + "type": "string", + "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance." + }, + "type": { + "type": "string", + "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", + "default": "ONE_TO_ONE_NAT", + "enum": [ + "ONE_TO_ONE_NAT" + ], + "enumDescriptions": [ + "" + ] + } + } + }, + "Address": { + "id": "Address", + "type": "object", + "description": "A reserved address resource.", + "properties": { + "address": { + "type": "string", + "description": "The static external IP address represented by this resource." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#address for addresses.", + "default": "compute#address" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.addresses.insert" + ] + } + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional address resides. This field is not applicable to global addresses." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the address, which can be either IN_USE or RESERVED. An address that is RESERVED is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", + "enum": [ + "IN_USE", + "RESERVED" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "users": { + "type": "array", + "description": "[Output Only] The URLs of the resources that are using this address.", + "items": { + "type": "string" + } + } + } + }, + "AddressAggregatedList": { + "id": "AddressAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped address lists.", + "additionalProperties": { + "$ref": "AddressesScopedList", + "description": "[Output Only] Name of the scope containing this set of addresses." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#addressAggregatedList for aggregated lists of addresses.", + "default": "compute#addressAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AddressList": { + "id": "AddressList", + "type": "object", + "description": "Contains a list of address resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Address resources.", + "items": { + "$ref": "Address" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#addressList for lists of addresses.", + "default": "compute#addressList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "AddressesScopedList": { + "id": "AddressesScopedList", + "type": "object", + "properties": { + "addresses": { + "type": "array", + "description": "[Output Only] List of addresses contained in this scope.", + "items": { + "$ref": "Address" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "AttachedDisk": { + "id": "AttachedDisk", + "type": "object", + "description": "An instance-attached disk resource.", + "properties": { + "autoDelete": { + "type": "boolean", + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)." + }, + "boot": { + "type": "boolean", + "description": "Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem." + }, + "deviceName": { + "type": "string", + "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks." + }, + "index": { + "type": "integer", + "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", + "format": "int32" + }, + "initializeParams": { + "$ref": "AttachedDiskInitializeParams", + "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." + }, + "interface": { + "type": "string", + "description": "Specifies the disk interface to use for attaching this disk, either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "enum": [ + "NVME", + "SCSI" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#attachedDisk for attached disks.", + "default": "compute#attachedDisk" + }, + "licenses": { + "type": "array", + "description": "[Output Only] Any valid publicly visible licenses.", + "items": { + "type": "string" + } + }, + "mode": { + "type": "string", + "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", + "enum": [ + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "source": { + "type": "string", + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks." + }, + "type": { + "type": "string", + "description": "Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT.", + "enum": [ + "PERSISTENT", + "SCRATCH" + ], + "enumDescriptions": [ + "", + "" + ], + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + } + } + }, + "AttachedDiskInitializeParams": { + "id": "AttachedDiskInitializeParams", + "type": "object", + "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both.", + "properties": { + "diskName": { + "type": "string", + "description": "Specifies the disk name. If not specified, the default is to use the name of the instance." + }, + "diskSizeGb": { + "type": "string", + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int64" + }, + "diskType": { + "type": "string", + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\n\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard \n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType" + }, + "sourceImage": { + "type": "string", + "description": "A source image used to create the disk. You can provide a private (custom) image, and Compute Engine will use the corresponding image from your project. For example:\n\nglobal/images/my-private-image \n\nOr you can provide an image from a publicly-available project. For example, to use a Debian image from the debian-cloud project, make sure to include the project in the URL:\n\nprojects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD \n\nwhere vYYYYMMDD is the image version. The fully-qualified URL will also work in both cases." + } + } + }, + "Autoscaler": { + "id": "Autoscaler", + "type": "object", + "properties": { + "autoscalingPolicy": { + "$ref": "AutoscalingPolicy", + "description": "Autoscaling configuration." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#autoscaler" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceGroups.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "target": { + "type": "string", + "description": "URL of Instance Group Manager or Replica Pool which will be controlled by Autoscaler." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the instance group resides." + } + } + }, + "AutoscalerAggregatedList": { + "id": "AutoscalerAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped autoscaler lists.", + "additionalProperties": { + "$ref": "AutoscalersScopedList", + "description": "Name of the scope containing this set of autoscalers." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#autoscalerAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AutoscalerList": { + "id": "AutoscalerList", + "type": "object", + "description": "Contains a list of persistent autoscaler resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Autoscaler resources.", + "items": { + "$ref": "Autoscaler" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#autoscalerList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AutoscalersScopedList": { + "id": "AutoscalersScopedList", + "type": "object", + "properties": { + "autoscalers": { + "type": "array", + "description": "List of autoscalers contained in this scope.", + "items": { + "$ref": "Autoscaler" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of autoscalers when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "AutoscalingPolicy": { + "id": "AutoscalingPolicy", + "type": "object", + "description": "Cloud Autoscaler policy.", + "properties": { + "coolDownPeriodSec": { + "type": "integer", + "description": "The number of seconds that the Autoscaler should wait between two succeeding changes to the number of virtual machines. You should define an interval that is at least as long as the initialization time of a virtual machine and the time it may take for replica pool to create the virtual machine. The default is 60 seconds.", + "format": "int32" + }, + "cpuUtilization": { + "$ref": "AutoscalingPolicyCpuUtilization", + "description": "TODO(jbartosik): Add support for scaling based on muliple utilization metrics (take max recommendation). Exactly one utilization policy should be provided. Configuration parameters of CPU based autoscaling policy." + }, + "customMetricUtilizations": { + "type": "array", + "description": "Configuration parameters of autoscaling based on custom metric.", + "items": { + "$ref": "AutoscalingPolicyCustomMetricUtilization" + } + }, + "loadBalancingUtilization": { + "$ref": "AutoscalingPolicyLoadBalancingUtilization", + "description": "Configuration parameters of autoscaling based on load balancer." + }, + "maxNumReplicas": { + "type": "integer", + "description": "The maximum number of replicas that the Autoscaler can scale up to. This field is required for config to be effective. Maximum number of replicas should be not lower than minimal number of replicas. Absolute limit for this value is defined in Autoscaler backend.", + "format": "int32" + }, + "minNumReplicas": { + "type": "integer", + "description": "The minimum number of replicas that the Autoscaler can scale down to. Can't be less than 0. If not provided Autoscaler will choose default value depending on maximal number of replicas.", + "format": "int32" + } + } + }, + "AutoscalingPolicyCpuUtilization": { + "id": "AutoscalingPolicyCpuUtilization", + "type": "object", + "description": "CPU utilization policy.", + "properties": { + "utilizationTarget": { + "type": "number", + "description": "The target utilization that the Autoscaler should maintain. It is represented as a fraction of used cores. For example: 6 cores used in 8-core VM are represented here as 0.75. Must be a float value between (0, 1]. If not defined, the default is 0.8.", + "format": "double" + } + } + }, + "AutoscalingPolicyCustomMetricUtilization": { + "id": "AutoscalingPolicyCustomMetricUtilization", + "type": "object", + "description": "Custom utilization metric policy.", + "properties": { + "metric": { + "type": "string", + "description": "Identifier of the metric. It should be a Cloud Monitoring metric. The metric can not have negative values. The metric should be an utilization metric (increasing number of VMs handling requests x times should reduce average value of the metric roughly x times). For example you could use: compute.googleapis.com/instance/network/received_bytes_count." + }, + "utilizationTarget": { + "type": "number", + "description": "Target value of the metric which Autoscaler should maintain. Must be a positive value.", + "format": "double" + }, + "utilizationTargetType": { + "type": "string", + "description": "Defines type in which utilization_target is expressed.", + "enum": [ + "DELTA_PER_MINUTE", + "DELTA_PER_SECOND", + "GAUGE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "AutoscalingPolicyLoadBalancingUtilization": { + "id": "AutoscalingPolicyLoadBalancingUtilization", + "type": "object", + "description": "Load balancing utilization policy.", + "properties": { + "utilizationTarget": { + "type": "number", + "description": "Fraction of backend capacity utilization (set in HTTP load balancing configuration) that Autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8. For example if your maxRatePerInstance capacity (in HTTP Load Balancing configuration) is set at 10 and you would like to keep number of instances such that each instance receives 7 QPS on average, set this to 0.7.", + "format": "double" + } + } + }, + "Backend": { + "id": "Backend", + "type": "object", + "description": "Message containing information of one individual backend.", + "properties": { + "balancingMode": { + "type": "string", + "description": "Specifies the balancing mode for this backend. The default is UTILIZATION but available values are UTILIZATION and RATE.", + "enum": [ + "RATE", + "UTILIZATION" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "capacityScaler": { + "type": "number", + "description": "A multiplier applied to the group's maximum servicing capacity (either UTILIZATION or RATE). Default value is 1, which means the group will serve up to 100% of its configured CPU or RPS (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available CPU or RPS. Valid range is [0.0,1.0].", + "format": "float" + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "group": { + "type": "string", + "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL." + }, + "maxRate": { + "type": "integer", + "description": "The max RPS of the group. Can be used with either balancing mode, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "format": "int32" + }, + "maxRatePerInstance": { + "type": "number", + "description": "The max RPS that a single backed instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "format": "float" + }, + "maxUtilization": { + "type": "number", + "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].", + "format": "float" + } + } + }, + "BackendService": { + "id": "BackendService", + "type": "object", + "description": "A BackendService resource. This resource defines a group of backend virtual machines together with their serving capacity.", + "properties": { + "backends": { + "type": "array", + "description": "The list of backends that serve this BackendService.", + "items": { + "$ref": "Backend" + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService.", + "format": "byte" + }, + "healthChecks": { + "type": "array", + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#backendService for backend services.", + "default": "compute#backendService" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "Deprecated in favor of port name. The TCP port to connect on the backend. The default value is 80.", + "format": "int32" + }, + "portName": { + "type": "string", + "description": "Name of backend port. The same name should appear in the resource views referenced by this service. Required." + }, + "protocol": { + "type": "string", + "enum": [ + "HTTP", + "HTTPS" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "timeoutSec": { + "type": "integer", + "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", + "format": "int32" + } + } + }, + "BackendServiceGroupHealth": { + "id": "BackendServiceGroupHealth", + "type": "object", + "properties": { + "healthStatus": { + "type": "array", + "items": { + "$ref": "HealthStatus" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#backendServiceGroupHealth for the health of backend services.", + "default": "compute#backendServiceGroupHealth" + } + } + }, + "BackendServiceList": { + "id": "BackendServiceList", + "type": "object", + "description": "Contains a list of BackendService resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of BackendService resources.", + "items": { + "$ref": "BackendService" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#backendServiceList for lists of backend services.", + "default": "compute#backendServiceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DeprecationStatus": { + "id": "DeprecationStatus", + "type": "object", + "description": "Deprecation status for a public resource.", + "properties": { + "deleted": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DELETED." + }, + "deprecated": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DEPRECATED." + }, + "obsolete": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to OBSOLETE." + }, + "replacement": { + "type": "string", + "description": "The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource." + }, + "state": { + "type": "string", + "description": "The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, or DELETED. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", + "enum": [ + "DELETED", + "DEPRECATED", + "OBSOLETE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "Disk": { + "id": "Disk", + "type": "object", + "description": "A Disk resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#disk for disks.", + "default": "compute#disk" + }, + "lastAttachTimestamp": { + "type": "string", + "description": "[Output Only] Last attach timestamp in RFC3339 text format." + }, + "lastDetachTimestamp": { + "type": "string", + "description": "[Output Only] Last detach timestamp in RFC3339 text format." + }, + "licenses": { + "type": "array", + "description": "Any applicable publicly visible licenses.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.disks.insert" + ] + } + }, + "options": { + "type": "string", + "description": "Internal use only." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined fully-qualified URL for this resource." + }, + "sizeGb": { + "type": "string", + "description": "Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the sourceImage or sourceSnapshot parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with sourceImage or sourceSnapshot, the value of sizeGb must not be less than the size of the sourceImage or the size of the snapshot.", + "format": "int64" + }, + "sourceImage": { + "type": "string", + "description": "The source image used to create this disk. If the source image is deleted from the system, this field will not be set, even if an image with the same name has been re-created.\n\nWhen creating a disk, you can provide a private (custom) image using the following input, and Compute Engine will use the corresponding image from your project. For example:\n\nglobal/images/my-private-image \n\nOr you can provide an image from a publicly-available project. For example, to use a Debian image from the debian-cloud project, make sure to include the project in the URL:\n\nprojects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD \n\nwhere vYYYYMMDD is the image version. The fully-qualified URL will also work in both cases." + }, + "sourceImageId": { + "type": "string", + "description": "The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used." + }, + "sourceSnapshot": { + "type": "string", + "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot \n- projects/project/global/snapshots/snapshot \n- global/snapshots/snapshot" + }, + "sourceSnapshotId": { + "type": "string", + "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of disk creation. Applicable statuses includes: CREATING, FAILED, READY, RESTORING.", + "enum": [ + "CREATING", + "FAILED", + "READY", + "RESTORING" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "type": { + "type": "string", + "description": "URL of the disk type resource describing which disk type to use to create the disk; provided by the client when the disk is created." + }, + "users": { + "type": "array", + "description": "Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance", + "items": { + "type": "string" + } + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the disk resides." + } + } + }, + "DiskAggregatedList": { + "id": "DiskAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped disk lists.", + "additionalProperties": { + "$ref": "DisksScopedList", + "description": "[Output Only] Name of the scope containing this set of disks." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskAggregatedList for aggregated lists of persistent disks.", + "default": "compute#diskAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskList": { + "id": "DiskList", + "type": "object", + "description": "A list of Disk resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of persistent disks.", + "items": { + "$ref": "Disk" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", + "default": "compute#diskList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskMoveRequest": { + "id": "DiskMoveRequest", + "type": "object", + "properties": { + "destinationZone": { + "type": "string", + "description": "The URL of the destination zone to move the disk to. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" + }, + "targetDisk": { + "type": "string", + "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk" + } + } + }, + "DiskType": { + "id": "DiskType", + "type": "object", + "description": "A disk type resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "defaultDiskSizeGb": { + "type": "string", + "description": "[Output Only] Server-defined default disk size in GB.", + "format": "int64" + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this disk type." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional description of this resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#diskType for disk types.", + "default": "compute#diskType" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "validDiskSize": { + "type": "string", + "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\"." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the disk type resides." + } + } + }, + "DiskTypeAggregatedList": { + "id": "DiskTypeAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped disk type lists.", + "additionalProperties": { + "$ref": "DiskTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of disk types." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskTypeAggregatedList.", + "default": "compute#diskTypeAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskTypeList": { + "id": "DiskTypeList", + "type": "object", + "description": "Contains a list of disk type resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Disk Type resources.", + "items": { + "$ref": "DiskType" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskTypeList for disk types.", + "default": "compute#diskTypeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskTypesScopedList": { + "id": "DiskTypesScopedList", + "type": "object", + "properties": { + "diskTypes": { + "type": "array", + "description": "[Output Only] List of disk types contained in this scope.", + "items": { + "$ref": "DiskType" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of disk types when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "DisksScopedList": { + "id": "DisksScopedList", + "type": "object", + "properties": { + "disks": { + "type": "array", + "description": "[Output Only] List of disks contained in this scope.", + "items": { + "$ref": "Disk" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of disks when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "Firewall": { + "id": "Firewall", + "type": "object", + "description": "A Firewall resource.", + "properties": { + "allowed": { + "type": "array", + "description": "The list of rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "items": { + "type": "object", + "properties": { + "IPProtocol": { + "type": "string", + "description": "The IP protocol that is allowed for this rule. The protocol type is required when creating a firewall. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." + }, + "ports": { + "type": "array", + "description": "An optional list of ports which are allowed. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, connections through any port are allowed\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + } + } + } + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Ony] Type of the resource. Always compute#firewall for firewall rules.", + "default": "compute#firewall" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.firewalls.insert", + "compute.firewalls.patch" + ] + } + }, + "network": { + "type": "string", + "description": "URL of the network resource for this firewall rule. This field is required for creating an instance but optional when creating a firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sourceRanges": { + "type": "array", + "description": "The IP address blocks that this rule applies to, expressed in CIDR format. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "items": { + "type": "string" + } + }, + "sourceTags": { + "type": "array", + "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "items": { + "type": "string" + } + }, + "targetTags": { + "type": "array", + "description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + } + } + } + }, + "FirewallList": { + "id": "FirewallList", + "type": "object", + "description": "Contains a list of Firewall resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Firewall resources.", + "items": { + "$ref": "Firewall" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", + "default": "compute#firewallList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ForwardingRule": { + "id": "ForwardingRule", + "type": "object", + "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, portRange] tuple.", + "properties": { + "IPAddress": { + "type": "string", + "description": "Value of the reserved IP address that this forwarding rule is serving on behalf of. For global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. If left empty (default value), an ephemeral IP from the same scope (global or regional) will be assigned." + }, + "IPProtocol": { + "type": "string", + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH or SCTP.", + "enum": [ + "AH", + "ESP", + "SCTP", + "TCP", + "UDP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "default": "compute#forwardingRule" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "portRange": { + "type": "string", + "description": "Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges." + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "target": { + "type": "string", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource." + } + } + }, + "ForwardingRuleAggregatedList": { + "id": "ForwardingRuleAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped forwarding rule lists.", + "additionalProperties": { + "$ref": "ForwardingRulesScopedList", + "description": "Name of the scope containing this set of addresses." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#forwardingRuleAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ForwardingRuleList": { + "id": "ForwardingRuleList", + "type": "object", + "description": "Contains a list of ForwardingRule resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Set by the server." + }, + "items": { + "type": "array", + "description": "A list of ForwardingRule resources.", + "items": { + "$ref": "ForwardingRule" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#forwardingRuleList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ForwardingRulesScopedList": { + "id": "ForwardingRulesScopedList", + "type": "object", + "properties": { + "forwardingRules": { + "type": "array", + "description": "List of forwarding rules contained in this scope.", + "items": { + "$ref": "ForwardingRule" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "HealthCheckReference": { + "id": "HealthCheckReference", + "type": "object", + "properties": { + "healthCheck": { + "type": "string" + } + } + }, + "HealthStatus": { + "id": "HealthStatus", + "type": "object", + "properties": { + "healthState": { + "type": "string", + "description": "Health state of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "instance": { + "type": "string", + "description": "URL of the instance resource." + }, + "ipAddress": { + "type": "string", + "description": "The IP address represented by this resource." + }, + "port": { + "type": "integer", + "description": "The port on the instance.", + "format": "int32" + } + } + }, + "HostRule": { + "id": "HostRule", + "type": "object", + "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", + "properties": { + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "hosts": { + "type": "array", + "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", + "items": { + "type": "string" + } + }, + "pathMatcher": { + "type": "string", + "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion." + } + } + }, + "HttpHealthCheck": { + "id": "HttpHealthCheck", + "type": "object", + "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", + "properties": { + "checkIntervalSec": { + "type": "integer", + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "healthyThreshold": { + "type": "integer", + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32" + }, + "host": { + "type": "string", + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#httpHealthCheck" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "The TCP port number for the HTTP health check request. The default value is 80.", + "format": "int32" + }, + "requestPath": { + "type": "string", + "description": "The request path of the HTTP health check request. The default value is \"/\"." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "timeoutSec": { + "type": "integer", + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", + "format": "int32" + }, + "unhealthyThreshold": { + "type": "integer", + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32" + } + } + }, + "HttpHealthCheckList": { + "id": "HttpHealthCheckList", + "type": "object", + "description": "Contains a list of HttpHealthCheck resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of HttpHealthCheck resources.", + "items": { + "$ref": "HttpHealthCheck" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#httpHealthCheckList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "HttpsHealthCheck": { + "id": "HttpsHealthCheck", + "type": "object", + "description": "An HttpsHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTPS.", + "properties": { + "checkIntervalSec": { + "type": "integer", + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "healthyThreshold": { + "type": "integer", + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32" + }, + "host": { + "type": "string", + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#httpsHealthCheck" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "The TCP port number for the HTTPS health check request. The default value is 443.", + "format": "int32" + }, + "requestPath": { + "type": "string", + "description": "The request path of the HTTPS health check request. The default value is \"/\"." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "timeoutSec": { + "type": "integer", + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", + "format": "int32" + }, + "unhealthyThreshold": { + "type": "integer", + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32" + } + } + }, + "HttpsHealthCheckList": { + "id": "HttpsHealthCheckList", + "type": "object", + "description": "Contains a list of HttpsHealthCheck resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of HttpsHealthCheck resources.", + "items": { + "$ref": "HttpsHealthCheck" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#httpsHealthCheckList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Image": { + "id": "Image", + "type": "object", + "description": "An Image resource.", + "properties": { + "archiveSizeBytes": { + "type": "string", + "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", + "format": "int64" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this image." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "diskSizeGb": { + "type": "string", + "description": "Size of the image when restored onto a persistent disk (in GB).", + "format": "int64" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#image for images.", + "default": "compute#image" + }, + "licenses": { + "type": "array", + "description": "Any applicable publicly visible licenses.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + }, + "rawDisk": { + "type": "object", + "description": "The parameters of the raw disk image.", + "properties": { + "containerType": { + "type": "string", + "description": "The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", + "enum": [ + "TAR" + ], + "enumDescriptions": [ + "" + ] + }, + "sha1Checksum": { + "type": "string", + "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.", + "pattern": "[a-f0-9]{40}" + }, + "source": { + "type": "string", + "description": "The full Google Cloud Storage URL where the disk image is stored. You must provide either this property or the sourceDisk property but not both.", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + } + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sourceDisk": { + "type": "string", + "description": "URL of the The source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk \n- projects/project/zones/zone/disk/disk \n- zones/zone/disks/disk" + }, + "sourceDiskId": { + "type": "string", + "description": "The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name." + }, + "sourceType": { + "type": "string", + "description": "The type of the image used to create this disk. The default and only value is RAW", + "default": "RAW", + "enum": [ + "RAW" + ], + "enumDescriptions": [ + "" + ] + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", + "enum": [ + "FAILED", + "PENDING", + "READY" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "ImageList": { + "id": "ImageList", + "type": "object", + "description": "Contains a list of Image resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Image resources.", + "items": { + "$ref": "Image" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#imageList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Instance": { + "id": "Instance", + "type": "object", + "description": "An Instance resource.", + "properties": { + "canIpForward": { + "type": "boolean", + "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding." + }, + "cpuPlatform": { + "type": "string", + "description": "[Output Only] The CPU platform used by this instance." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "disks": { + "type": "array", + "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", + "items": { + "$ref": "AttachedDisk" + } + }, + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#instance for instances.", + "default": "compute#instance" + }, + "machineType": { + "type": "string", + "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/ machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type:\n\nzones/us-central1-f/machineTypes/n1-standard-1 \n\nTo create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB):\n\nzones/zone/machineTypes/custom-CPUS-MEMORY \n\nFor example: zones/us-central1-f/machineTypes/custom-4-5120 \n\nFor a full list of restrictions, read the Specifications for custom machine types.", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." + }, + "networkInterfaces": { + "type": "array", + "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet.", + "items": { + "$ref": "NetworkInterface" + } + }, + "scheduling": { + "$ref": "Scheduling", + "description": "Scheduling options for this instance." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server defined URL for this resource." + }, + "serviceAccounts": { + "type": "array", + "description": "A list of service accounts, with their specified scopes, authorized for this instance. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Authenticating from Google Compute Engine for more information.", + "items": { + "$ref": "ServiceAccount" + } + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, and TERMINATED.", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "statusMessage": { + "type": "string", + "description": "[Output Only] An optional, human-readable explanation of the status." + }, + "tags": { + "$ref": "Tags", + "description": "A list of tags to appy to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the instance resides." + } + } + }, + "InstanceAggregatedList": { + "id": "InstanceAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped instance lists.", + "additionalProperties": { + "$ref": "InstancesScopedList", + "description": "Name of the scope containing this set of instances." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#instanceAggregatedList for aggregated lists of Instance resources.", + "default": "compute#instanceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] A token used to continue a truncated list request." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server defined URL for this resource." + } + } + }, + "InstanceGroup": { + "id": "InstanceGroup", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", + "format": "byte" + }, + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", + "default": "compute#instanceGroup" + }, + "name": { + "type": "string", + "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "namedPorts": { + "type": "array", + "description": "Assigns a name to a port number. For example: {name: \"http\", port: 80}\n\nThis allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] \n\nNamed ports apply to all instances in this instance group.", + "items": { + "$ref": "NamedPort" + } + }, + "network": { + "type": "string", + "description": "[Output Only] The URL of the network to which all instances in the instance group belong." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this instance group. The server generates this URL." + }, + "size": { + "type": "integer", + "description": "[Output Only] The total number of instances in the instance group.", + "format": "int32" + }, + "zone": { + "type": "string", + "description": "[Output Only] The URL of the zone where the instance group is located." + } + } + }, + "InstanceGroupAggregatedList": { + "id": "InstanceGroupAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this aggregated list of instance groups. The server generates this identifier." + }, + "items": { + "type": "object", + "description": "A map of scoped instance group lists.", + "additionalProperties": { + "$ref": "InstanceGroupsScopedList", + "description": "The name of the scope that contains this set of instance groups." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupAggregatedList for aggregated lists of instance groups.", + "default": "compute#instanceGroupAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupList": { + "id": "InstanceGroupList", + "type": "object", + "description": "A list of InstanceGroup resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this list of instance groups. The server generates this identifier." + }, + "items": { + "type": "array", + "description": "A list of instance groups.", + "items": { + "$ref": "InstanceGroup" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", + "default": "compute#instanceGroupList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupManager": { + "id": "InstanceGroupManager", + "type": "object", + "description": "InstanceGroupManagers\n\nNext available tag: 20", + "properties": { + "baseInstanceName": { + "type": "string", + "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", + "pattern": "[a-z][-a-z0-9]{0,57}", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format." + }, + "currentActions": { + "$ref": "InstanceGroupManagerActionsSummary", + "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "[Output Only] The fingerprint of the target pools information. You can use this optional field for optimistic locking when you update the target pool entries.", + "format": "byte" + }, + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "format": "uint64" + }, + "instanceGroup": { + "type": "string", + "description": "[Output Only] The URL of the Instance Group resource." + }, + "instanceTemplate": { + "type": "string", + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group.", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", + "default": "compute#instanceGroupManager" + }, + "name": { + "type": "string", + "description": "The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "namedPorts": { + "type": "array", + "description": "Named ports configured for the Instance Groups complementary to this Instance Group Manager.", + "items": { + "$ref": "NamedPort" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this managed instance group. The server defines this URL." + }, + "targetPools": { + "type": "array", + "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", + "items": { + "type": "string" + } + }, + "targetSize": { + "type": "integer", + "description": "The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number.", + "format": "int32", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located." + } + } + }, + "InstanceGroupManagerActionsSummary": { + "id": "InstanceGroupManagerActionsSummary", + "type": "object", + "properties": { + "abandoning": { + "type": "integer", + "description": "[Output Only] The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it.", + "format": "int32" + }, + "creating": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created.", + "format": "int32" + }, + "deleting": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted.", + "format": "int32" + }, + "none": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are running and have no scheduled actions.", + "format": "int32" + }, + "recreating": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template.", + "format": "int32" + }, + "refreshing": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance.", + "format": "int32" + }, + "restarting": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted.", + "format": "int32" + } + } + }, + "InstanceGroupManagerAggregatedList": { + "id": "InstanceGroupManagerAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this aggregated list of managed instance groups. The server generates this identifier." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of filtered managed instance group lists.", + "additionalProperties": { + "$ref": "InstanceGroupManagersScopedList", + "description": "[Output Only] The name of the scope that contains this set of managed instance groups." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerAggregatedList for an aggregated list of managed instance groups.", + "default": "compute#instanceGroupManagerAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupManagerList": { + "id": "InstanceGroupManagerList", + "type": "object", + "description": "[Output Only] A list of managed instance groups.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of managed instance groups.", + "items": { + "$ref": "InstanceGroupManager" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups.", + "default": "compute#instanceGroupManagerList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupManagersAbandonInstancesRequest": { + "id": "InstanceGroupManagersAbandonInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The URL for one or more instances to abandon from the managed instance group.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupManagersDeleteInstancesRequest": { + "id": "InstanceGroupManagersDeleteInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The list of instances to delete from this managed instance group. Specify one or more instance URLs.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupManagersListManagedInstancesResponse": { + "id": "InstanceGroupManagersListManagedInstancesResponse", + "type": "object", + "properties": { + "managedInstances": { + "type": "array", + "description": "[Output Only] The list of instances in the managed instance group.", + "items": { + "$ref": "ManagedInstance" + } + } + } + }, + "InstanceGroupManagersRecreateInstancesRequest": { + "id": "InstanceGroupManagersRecreateInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The URL for one or more instances to recreate.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupManagersScopedList": { + "id": "InstanceGroupManagersScopedList", + "type": "object", + "properties": { + "instanceGroupManagers": { + "type": "array", + "description": "[Output Only] The list of managed instance groups that are contained in the specified project and zone.", + "items": { + "$ref": "InstanceGroupManager" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] The warning that replaces the list of managed instance groups when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceGroupManagersSetInstanceTemplateRequest": { + "id": "InstanceGroupManagersSetInstanceTemplateRequest", + "type": "object", + "properties": { + "instanceTemplate": { + "type": "string", + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group." + } + } + }, + "InstanceGroupManagersSetTargetPoolsRequest": { + "id": "InstanceGroupManagersSetTargetPoolsRequest", + "type": "object", + "properties": { + "fingerprint": { + "type": "string", + "description": "The fingerprint of the target pools information. Use this optional property to prevent conflicts when multiple users change the target pools settings concurrently. Obtain the fingerprint with the instanceGroupManagers.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte" + }, + "targetPools": { + "type": "array", + "description": "The list of target pool URLs that instances in this managed instance group belong to. The managed instance group applies these target pools to all of the instances in the group. Existing instances and new instances in the group all receive these target pool settings.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupsAddInstancesRequest": { + "id": "InstanceGroupsAddInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The list of instances to add to the instance group.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "InstanceGroupsListInstances": { + "id": "InstanceGroupsListInstances", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this list of instance groups. The server generates this identifier." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of instances and any named ports that are assigned to those instances.", + "items": { + "$ref": "InstanceWithNamedPorts" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for lists of instance groups.", + "default": "compute#instanceGroupsListInstances" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this list of instance groups. The server generates this URL." + } + } + }, + "InstanceGroupsListInstancesRequest": { + "id": "InstanceGroupsListInstancesRequest", + "type": "object", + "properties": { + "instanceState": { + "type": "string", + "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", + "enum": [ + "ALL", + "RUNNING" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "InstanceGroupsRemoveInstancesRequest": { + "id": "InstanceGroupsRemoveInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The list of instances to remove from the instance group.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "InstanceGroupsScopedList": { + "id": "InstanceGroupsScopedList", + "type": "object", + "properties": { + "instanceGroups": { + "type": "array", + "description": "[Output Only] The list of instance groups that are contained in this scope.", + "items": { + "$ref": "InstanceGroup" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that replaces the list of instance groups when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceGroupsSetNamedPortsRequest": { + "id": "InstanceGroupsSetNamedPortsRequest", + "type": "object", + "properties": { + "fingerprint": { + "type": "string", + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte" + }, + "namedPorts": { + "type": "array", + "description": "The list of named ports to set for this instance group.", + "items": { + "$ref": "NamedPort" + } + } + } + }, + "InstanceList": { + "id": "InstanceList", + "type": "object", + "description": "Contains a list of instance resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Instance resources.", + "items": { + "$ref": "Instance" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#instanceList for lists of Instance resources.", + "default": "compute#instanceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] A token used to continue a truncated list request." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server defined URL for this resource." + } + } + }, + "InstanceMoveRequest": { + "id": "InstanceMoveRequest", + "type": "object", + "properties": { + "destinationZone": { + "type": "string", + "description": "The URL of the destination zone to move the instance to. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" + }, + "targetInstance": { + "type": "string", + "description": "The URL of the target instance to move. This can be a full or partial URL. For example, the following are all valid URLs to an instance: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" + } + } + }, + "InstanceProperties": { + "id": "InstanceProperties", + "type": "object", + "description": "", + "properties": { + "canIpForward": { + "type": "boolean", + "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the canIpForward documentation for more information." + }, + "description": { + "type": "string", + "description": "An optional text description for the instances that are created from this instance template." + }, + "disks": { + "type": "array", + "description": "An array of disks that are associated with the instances that are created from this template.", + "items": { + "$ref": "AttachedDisk" + } + }, + "machineType": { + "type": "string", + "description": "The machine type to use for instances that are created from this template.", + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] + } + }, + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." + }, + "networkInterfaces": { + "type": "array", + "description": "An array of network access configurations for this interface.", + "items": { + "$ref": "NetworkInterface" + } + }, + "scheduling": { + "$ref": "Scheduling", + "description": "Specifies the scheduling options for the instances that are created from this template." + }, + "serviceAccounts": { + "type": "array", + "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", + "items": { + "$ref": "ServiceAccount" + } + }, + "tags": { + "$ref": "Tags", + "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." + } + } + }, + "InstanceReference": { + "id": "InstanceReference", + "type": "object", + "properties": { + "instance": { + "type": "string", + "description": "The URL for a specific instance." + } + } + }, + "InstanceTemplate": { + "id": "InstanceTemplate", + "type": "object", + "description": "An Instance Template resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this instance template in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceTemplate for instance templates.", + "default": "compute#instanceTemplate" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] + } + }, + "properties": { + "$ref": "InstanceProperties", + "description": "The instance properties for this instance template." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this instance template. The server defines this URL." + } + } + }, + "InstanceTemplateList": { + "id": "InstanceTemplateList", + "type": "object", + "description": "A list of instance templates.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier." + }, + "items": { + "type": "array", + "description": "[Output Only] list of InstanceTemplate resources.", + "items": { + "$ref": "InstanceTemplate" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceTemplatesListResponse for instance template lists.", + "default": "compute#instanceTemplateList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this instance template list. The server defines this URL." + } + } + }, + "InstanceWithNamedPorts": { + "id": "InstanceWithNamedPorts", + "type": "object", + "properties": { + "instance": { + "type": "string", + "description": "[Output Only] The URL of the instance." + }, + "namedPorts": { + "type": "array", + "description": "[Output Only] The named ports that belong to this instance group.", + "items": { + "$ref": "NamedPort" + } + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the instance.", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ] + } + } + }, + "InstancesScopedList": { + "id": "InstancesScopedList", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "[Output Only] List of instances contained in this scope.", + "items": { + "$ref": "Instance" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "License": { + "id": "License", + "type": "object", + "description": "A license resource.", + "properties": { + "chargesUseFee": { + "type": "boolean", + "description": "[Output Only] If true, the customer will be charged license fee for running software that contains this license on an instance." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#license for licenses.", + "default": "compute#license" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource. The name is 1-63 characters long and complies with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "MachineType": { + "id": "MachineType", + "type": "object", + "description": "A Machine Type resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this machine type." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional textual description of the resource." + }, + "guestCpus": { + "type": "integer", + "description": "[Output Only] The number of virtual CPUs that are available to the instance.", + "format": "int32" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "imageSpaceGb": { + "type": "integer", + "description": "[Deprecated] This property is deprecated and will never be populated with any relevant values.", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", + "default": "compute#machineType" + }, + "maximumPersistentDisks": { + "type": "integer", + "description": "[Output Only] Maximum persistent disks allowed.", + "format": "int32" + }, + "maximumPersistentDisksSizeGb": { + "type": "string", + "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", + "format": "int64" + }, + "memoryMb": { + "type": "integer", + "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", + "format": "int32" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "scratchDisks": { + "type": "array", + "description": "[Output Only] List of extended scratch disks assigned to the instance.", + "items": { + "type": "object", + "properties": { + "diskGb": { + "type": "integer", + "description": "Size of the scratch disk, defined in GB.", + "format": "int32" + } + } + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a." + } + } + }, + "MachineTypeAggregatedList": { + "id": "MachineTypeAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped machine type lists.", + "additionalProperties": { + "$ref": "MachineTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of machine types." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", + "default": "compute#machineTypeAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "MachineTypeList": { + "id": "MachineTypeList", + "type": "object", + "description": "Contains a list of Machine Type resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Machine Type resources.", + "items": { + "$ref": "MachineType" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", + "default": "compute#machineTypeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "MachineTypesScopedList": { + "id": "MachineTypesScopedList", + "type": "object", + "properties": { + "machineTypes": { + "type": "array", + "description": "[Output Only] List of machine types contained in this scope.", + "items": { + "$ref": "MachineType" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that appears when the machine types list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "ManagedInstance": { + "id": "ManagedInstance", + "type": "object", + "properties": { + "currentAction": { + "type": "string", + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", + "enum": [ + "ABANDONING", + "CREATING", + "DELETING", + "NONE", + "RECREATING", + "REFRESHING", + "RESTARTING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ] + }, + "id": { + "type": "string", + "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", + "format": "uint64" + }, + "instance": { + "type": "string", + "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created." + }, + "instanceStatus": { + "type": "string", + "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "lastAttempt": { + "$ref": "ManagedInstanceLastAttempt", + "description": "[Output Only] Information about the last attempt to create or delete the instance." + } + } + }, + "ManagedInstanceLastAttempt": { + "id": "ManagedInstanceLastAttempt", + "type": "object", + "properties": { + "errors": { + "type": "object", + "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", + "properties": { + "errors": { + "type": "array", + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] The error type identifier for this error." + }, + "location": { + "type": "string", + "description": "[Output Only] Indicates the field in the request which caused the error. This property is optional." + }, + "message": { + "type": "string", + "description": "[Output Only] An optional, human-readable error message." + } + } + } + } + } + } + } + }, + "Metadata": { + "id": "Metadata", + "type": "object", + "description": "A metadata key/value entry.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.", + "format": "byte" + }, + "items": { + "type": "array", + "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", + "pattern": "[a-zA-Z0-9-_]{1,128}", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + }, + "value": { + "type": "string", + "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 32768 bytes.", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + } + } + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", + "default": "compute#metadata" + } + } + }, + "NamedPort": { + "id": "NamedPort", + "type": "object", + "description": "The named port. For example: .", + "properties": { + "name": { + "type": "string", + "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035." + }, + "port": { + "type": "integer", + "description": "The port number, which can be a value between 1 and 65535.", + "format": "int32" + } + } + }, + "Network": { + "id": "Network", + "type": "object", + "description": "A network resource.", + "properties": { + "IPv4Range": { + "type": "string", + "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "gatewayIPv4": { + "type": "string", + "description": "A gateway address for default routing to other networks. This value is read only and is selected by the Google Compute Engine, typically as the first usable address in the IPv4Range.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "default": "compute#network" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.networks.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "NetworkInterface": { + "id": "NetworkInterface", + "type": "object", + "description": "A network interface resource attached to an instance.", + "properties": { + "accessConfigs": { + "type": "array", + "description": "An array of configurations for this interface. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "items": { + "$ref": "AccessConfig" + } + }, + "name": { + "type": "string", + "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc." + }, + "network": { + "type": "string", + "description": "URL of the network resource for this instance. This is required for creating an instance but optional when creating a firewall rule. If not specified when creating a firewall rule, the default network is used:\n\nglobal/networks/default \n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "networkIP": { + "type": "string", + "description": "[Output Only] An optional IPV4 internal network address assigned to the instance for this network interface." + } + } + }, + "NetworkList": { + "id": "NetworkList", + "type": "object", + "description": "Contains a list of Network resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Network resources.", + "items": { + "$ref": "Network" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", + "default": "compute#networkList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource ." + } + } + }, + "Operation": { + "id": "Operation", + "type": "object", + "description": "An Operation resource, used to manage asynchronous API requests.", + "properties": { + "clientOperationId": { + "type": "string", + "description": "[Output Only] A unique client ID generated by the server." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "[Output Only] A textual description of the operation, which is set when the operation is created." + }, + "endTime": { + "type": "string", + "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format." + }, + "error": { + "type": "object", + "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", + "properties": { + "errors": { + "type": "array", + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] The error type identifier for this error." + }, + "location": { + "type": "string", + "description": "[Output Only] Indicates the field in the request which caused the error. This property is optional." + }, + "message": { + "type": "string", + "description": "[Output Only] An optional, human-readable error message." + } + } + } + } + } + }, + "httpErrorMessage": { + "type": "string", + "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND." + }, + "httpErrorStatusCode": { + "type": "integer", + "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned. For example, a 404 means the resource was not found.", + "format": "int32" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "insertTime": { + "type": "string", + "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", + "default": "compute#operation" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "operationType": { + "type": "string", + "description": "[Output Only] The type of operation, which can be insert, update, or delete." + }, + "progress": { + "type": "integer", + "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess at when the operation will be complete. This number should monotonically increase as the operation progresses.", + "format": "int32" + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the operation resides. Only applicable for regional resources." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "startTime": { + "type": "string", + "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", + "enum": [ + "DONE", + "PENDING", + "RUNNING" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "statusMessage": { + "type": "string", + "description": "[Output Only] An optional textual description of the current status of the operation." + }, + "targetId": { + "type": "string", + "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", + "format": "uint64" + }, + "targetLink": { + "type": "string", + "description": "[Output Only] The URL of the resource that the operation is modifying." + }, + "user": { + "type": "string", + "description": "[Output Only] User who requested the operation, for example: user@example.com." + }, + "warnings": { + "type": "array", + "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the operation resides." + } + } + }, + "OperationAggregatedList": { + "id": "OperationAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped operation lists.", + "additionalProperties": { + "$ref": "OperationsScopedList", + "description": "[Output Only] Name of the scope containing this set of operations." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", + "default": "compute#operationAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "OperationList": { + "id": "OperationList", + "type": "object", + "description": "Contains a list of Operation resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] The Operation resources.", + "items": { + "$ref": "Operation" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", + "default": "compute#operationList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "OperationsScopedList": { + "id": "OperationsScopedList", + "type": "object", + "properties": { + "operations": { + "type": "array", + "description": "[Output Only] List of operations contained in this scope.", + "items": { + "$ref": "Operation" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "PathMatcher": { + "id": "PathMatcher", + "type": "object", + "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", + "properties": { + "defaultService": { + "type": "string", + "description": "The URL to the BackendService resource. This will be used if none of the pathRules defined by this PathMatcher is met by the URL's path portion." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "name": { + "type": "string", + "description": "The name to which this PathMatcher is referred by the HostRule." + }, + "pathRules": { + "type": "array", + "description": "The list of path rules.", + "items": { + "$ref": "PathRule" + } + } + } + }, + "PathRule": { + "id": "PathRule", + "type": "object", + "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", + "properties": { + "paths": { + "type": "array", + "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", + "items": { + "type": "string" + } + }, + "service": { + "type": "string", + "description": "The URL of the BackendService resource if this rule is matched." + } + } + }, + "Project": { + "id": "Project", + "type": "object", + "description": "A Project resource. Projects can only be created in the Google Developers Console. Unless marked otherwise, values can only be modified in the console.", + "properties": { + "commonInstanceMetadata": { + "$ref": "Metadata", + "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource." + }, + "enabledFeatures": { + "type": "array", + "description": "Restricted features enabled for use on this project.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#project for projects.", + "default": "compute#project" + }, + "name": { + "type": "string", + "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine." + }, + "quotas": { + "type": "array", + "description": "[Output Only] Quotas assigned to this project.", + "items": { + "$ref": "Quota" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "usageExportLocation": { + "$ref": "UsageExportLocation", + "description": "The location in Cloud Storage and naming method of the daily usage report." + } + } + }, + "Quota": { + "id": "Quota", + "type": "object", + "description": "A quotas entry.", + "properties": { + "limit": { + "type": "number", + "description": "[Output Only] Quota limit for this metric.", + "format": "double" + }, + "metric": { + "type": "string", + "description": "[Output Only] Name of the quota metric.", + "enum": [ + "AUTOSCALERS", + "BACKEND_SERVICES", + "CPUS", + "DISKS_TOTAL_GB", + "FIREWALLS", + "FORWARDING_RULES", + "HEALTH_CHECKS", + "IMAGES", + "INSTANCES", + "INSTANCE_GROUPS", + "INSTANCE_GROUP_MANAGERS", + "INSTANCE_TEMPLATES", + "IN_USE_ADDRESSES", + "LOCAL_SSD_TOTAL_GB", + "NETWORKS", + "ROUTES", + "SNAPSHOTS", + "SSD_TOTAL_GB", + "SSL_CERTIFICATES", + "STATIC_ADDRESSES", + "TARGET_HTTPS_PROXIES", + "TARGET_HTTP_PROXIES", + "TARGET_INSTANCES", + "TARGET_POOLS", + "TARGET_VPN_GATEWAYS", + "URL_MAPS", + "VPN_TUNNELS" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "usage": { + "type": "number", + "description": "[Output Only] Current usage of this metric.", + "format": "double" + } + } + }, + "Region": { + "id": "Region", + "type": "object", + "description": "Region resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this region." + }, + "description": { + "type": "string", + "description": "[Output Only] Textual description of the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#region for regions.", + "default": "compute#region" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "quotas": { + "type": "array", + "description": "[Output Only] Quotas assigned to this region.", + "items": { + "$ref": "Quota" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] Status of the region, either UP or DOWN.", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "zones": { + "type": "array", + "description": "[Output Only] A list of zones available in this region, in the form of resource URLs.", + "items": { + "type": "string" + } + } + } + }, + "RegionList": { + "id": "RegionList", + "type": "object", + "description": "Contains a list of region resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Region resources.", + "items": { + "$ref": "Region" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "default": "compute#regionList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ResourceGroupReference": { + "id": "ResourceGroupReference", + "type": "object", + "properties": { + "group": { + "type": "string", + "description": "A URI referencing one of the resource views listed in the backend service." + } + } + }, + "Route": { + "id": "Route", + "type": "object", + "description": "The route resource. A Route is a rule that specifies how certain packets should be handled by the virtual network. Routes are associated with instances by tag and the set of Routes for a particular instance is called its routing table. For each packet leaving a instance, the system searches that instance's routing table for a single best matching Route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the Route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching Routes. The packet is then forwarded as specified by the nextHop field of the winning Route -- either to another instance destination, a instance gateway or a Google Compute Engien-operated gateway. Packets that do not match any Route in the sending instance's routing table are dropped.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "destRange": { + "type": "string", + "description": "The destination range of outgoing packets that this route applies to.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", + "default": "compute#route" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "network": { + "type": "string", + "description": "Fully-qualified URL of the network that this route applies to.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "nextHopGateway": { + "type": "string", + "description": "The URL to a gateway that should handle matching packets. Currently, this is only the internet gateway: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway" + }, + "nextHopInstance": { + "type": "string", + "description": "The fully-qualified URL to an instance that should handle matching packets. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/" + }, + "nextHopIp": { + "type": "string", + "description": "The network IP address of an instance that should handle matching packets." + }, + "nextHopNetwork": { + "type": "string", + "description": "The URL of the local network if it should handle matching packets." + }, + "nextHopVpnTunnel": { + "type": "string", + "description": "The URL to a VpnTunnel that should handle matching packets." + }, + "priority": { + "type": "integer", + "description": "Breaks ties between Routes of equal specificity. Routes with smaller values win when tied with routes with larger values. Default value is 1000. A valid range is between 0 and 65535.", + "format": "uint32", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined fully-qualified URL for this resource." + }, + "tags": { + "type": "array", + "description": "A list of instance tags to which this route applies.", + "items": { + "type": "string" + }, + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "warnings": { + "type": "array", + "description": "[Output Only] If potential misconfigurations are detected for this route, this field will be populated with warning messages.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + } + }, + "RouteList": { + "id": "RouteList", + "type": "object", + "description": "Contains a list of route resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Route resources.", + "items": { + "$ref": "Route" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#routeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Scheduling": { + "id": "Scheduling", + "type": "object", + "description": "Sets the scheduling options for an Instance.", + "properties": { + "automaticRestart": { + "type": "boolean", + "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted." + }, + "onHostMaintenance": { + "type": "string", + "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting maintenance behavior.", + "enum": [ + "MIGRATE", + "TERMINATE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "preemptible": { + "type": "boolean", + "description": "Whether the instance is preemptible." + } + } + }, + "SerialPortOutput": { + "id": "SerialPortOutput", + "type": "object", + "description": "An instance's serial console output.", + "properties": { + "contents": { + "type": "string", + "description": "[Output Only] The contents of the console output." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", + "default": "compute#serialPortOutput" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server defined URL for the resource." + } + } + }, + "ServiceAccount": { + "id": "ServiceAccount", + "type": "object", + "description": "A service account.", + "properties": { + "email": { + "type": "string", + "description": "Email address of the service account." + }, + "scopes": { + "type": "array", + "description": "The list of scopes to be made available for this service account.", + "items": { + "type": "string" + } + } + } + }, + "Snapshot": { + "id": "Snapshot", + "type": "object", + "description": "A persistent disk snapshot resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "diskSizeGb": { + "type": "string", + "description": "[Output Only] Size of the snapshot, specified in GB.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", + "default": "compute#snapshot" + }, + "licenses": { + "type": "array", + "description": "Public visible licenses.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sourceDisk": { + "type": "string", + "description": "The source disk used to create this snapshot." + }, + "sourceDiskId": { + "type": "string", + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the snapshot.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY", + "UPLOADING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "storageBytes": { + "type": "string", + "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "format": "int64" + }, + "storageBytesStatus": { + "type": "string", + "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation.", + "enum": [ + "UPDATING", + "UP_TO_DATE" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SnapshotList": { + "id": "SnapshotList", + "type": "object", + "description": "Contains a list of Snapshot resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Snapshot resources.", + "items": { + "$ref": "Snapshot" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#snapshotList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "SslCertificate": { + "id": "SslCertificate", + "type": "object", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to global HTTPS loadbalancer to serve secure connections.", + "properties": { + "certificate": { + "type": "string", + "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", + "default": "compute#sslCertificate" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "privateKey": { + "type": "string", + "description": "A write-only private key in PEM format. Only insert RPCs will include this field." + }, + "selfLink": { + "type": "string", + "description": "[Output only] Server-defined URL for the resource." + } + } + }, + "SslCertificateList": { + "id": "SslCertificateList", + "type": "object", + "description": "Contains a list of SslCertificate resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of SslCertificate resources.", + "items": { + "$ref": "SslCertificate" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#sslCertificateList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Tags": { + "id": "Tags", + "type": "object", + "description": "A set of instance tags.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte" + }, + "items": { + "type": "array", + "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "items": { + "type": "string" + } + } + } + }, + "TargetHttpProxy": { + "id": "TargetHttpProxy", + "type": "object", + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", + "default": "compute#targetHttpProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "urlMap": { + "type": "string", + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." + } + } + }, + "TargetHttpProxyList": { + "id": "TargetHttpProxyList", + "type": "object", + "description": "A list of TargetHttpProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetHttpProxy resources.", + "items": { + "$ref": "TargetHttpProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource. Always compute#targetHttpProxyList for lists of Target HTTP proxies.", + "default": "compute#targetHttpProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetHttpsProxiesSetSslCertificatesRequest": { + "id": "TargetHttpsProxiesSetSslCertificatesRequest", + "type": "object", + "properties": { + "sslCertificates": { + "type": "array", + "description": "New set of URLs to SslCertificate resources to associate with this TargetHttpProxy. Currently exactly one ssl certificate must be specified.", + "items": { + "type": "string" + } + } + } + }, + "TargetHttpsProxy": { + "id": "TargetHttpsProxy", + "type": "object", + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#targetHttpsProxy for target HTTPS proxies.", + "default": "compute#targetHttpsProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sslCertificates": { + "type": "array", + "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. Currently exactly one SSL certificate must be specified.", + "items": { + "type": "string" + } + }, + "urlMap": { + "type": "string", + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." + } + } + }, + "TargetHttpsProxyList": { + "id": "TargetHttpsProxyList", + "type": "object", + "description": "Contains a list of TargetHttpsProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetHttpsProxy resources.", + "items": { + "$ref": "TargetHttpsProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetHttpsProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetInstance": { + "id": "TargetInstance", + "type": "object", + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "instance": { + "type": "string", + "description": "The URL to the instance that terminates the relevant traffic." + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", + "default": "compute#targetInstance" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "natPolicy": { + "type": "string", + "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", + "enum": [ + "NO_NAT" + ], + "enumDescriptions": [ + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the target instance resides." + } + } + }, + "TargetInstanceAggregatedList": { + "id": "TargetInstanceAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped target instance lists.", + "additionalProperties": { + "$ref": "TargetInstancesScopedList", + "description": "Name of the scope containing this set of target instances." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetInstanceList": { + "id": "TargetInstanceList", + "type": "object", + "description": "Contains a list of TargetInstance resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetInstance resources.", + "items": { + "$ref": "TargetInstance" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetInstancesScopedList": { + "id": "TargetInstancesScopedList", + "type": "object", + "properties": { + "targetInstances": { + "type": "array", + "description": "List of target instances contained in this scope.", + "items": { + "$ref": "TargetInstance" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetPool": { + "id": "TargetPool", + "type": "object", + "description": "A TargetPool resource. This resource defines a pool of instances, associated HttpHealthCheck resources, and the fallback TargetPool.", + "properties": { + "backupPool": { + "type": "string", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "failoverRatio": { + "type": "number", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", + "format": "float" + }, + "healthChecks": { + "type": "array", + "description": "A list of URLs to the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member instances will be considered healthy at all times.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "instances": { + "type": "array", + "description": "A list of resource URLs to the member virtual machines serving this pool. They must live in zones contained in the same region as this pool.", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", + "default": "compute#targetPool" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the target pool resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sessionAffinity": { + "type": "string", + "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PROTO", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "TargetPoolAggregatedList": { + "id": "TargetPoolAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped target pool lists.", + "additionalProperties": { + "$ref": "TargetPoolsScopedList", + "description": "Name of the scope containing this set of target pools." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetPoolAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetPoolInstanceHealth": { + "id": "TargetPoolInstanceHealth", + "type": "object", + "properties": { + "healthStatus": { + "type": "array", + "items": { + "$ref": "HealthStatus" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetPoolInstanceHealth" + } + } + }, + "TargetPoolList": { + "id": "TargetPoolList", + "type": "object", + "description": "Contains a list of TargetPool resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetPool resources.", + "items": { + "$ref": "TargetPool" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetPoolList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetPoolsAddHealthCheckRequest": { + "id": "TargetPoolsAddHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "Health check URLs to be added to targetPool.", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsAddInstanceRequest": { + "id": "TargetPoolsAddInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "URLs of the instances to be added to targetPool.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsRemoveHealthCheckRequest": { + "id": "TargetPoolsRemoveHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "Health check URLs to be removed from targetPool.", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsRemoveInstanceRequest": { + "id": "TargetPoolsRemoveInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "URLs of the instances to be removed from targetPool.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsScopedList": { + "id": "TargetPoolsScopedList", + "type": "object", + "properties": { + "targetPools": { + "type": "array", + "description": "List of target pools contained in this scope.", + "items": { + "$ref": "TargetPool" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetReference": { + "id": "TargetReference", + "type": "object", + "properties": { + "target": { + "type": "string" + } + } + }, + "TargetVpnGateway": { + "id": "TargetVpnGateway", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "forwardingRules": { + "type": "array", + "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGateway" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + } + }, + "network": { + "type": "string", + "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + } + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the target VPN gateway resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the VPN gateway.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "tunnels": { + "type": "array", + "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert and associated to a VPN gateway.", + "items": { + "type": "string" + } + } + } + }, + "TargetVpnGatewayAggregatedList": { + "id": "TargetVpnGatewayAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped target vpn gateway lists.", + "additionalProperties": { + "$ref": "TargetVpnGatewaysScopedList", + "description": "[Output Only] Name of the scope containing this set of target vpn gateways." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "TargetVpnGatewayList": { + "id": "TargetVpnGatewayList", + "type": "object", + "description": "Contains a list of TargetVpnGateway resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of TargetVpnGateway resources.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "TargetVpnGatewaysScopedList": { + "id": "TargetVpnGatewaysScopedList", + "type": "object", + "properties": { + "targetVpnGateways": { + "type": "array", + "description": "[Output Only] List of target vpn gateways contained in this scope.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TestFailure": { + "id": "TestFailure", + "type": "object", + "properties": { + "actualService": { + "type": "string" + }, + "expectedService": { + "type": "string" + }, + "host": { + "type": "string" + }, + "path": { + "type": "string" + } + } + }, + "UrlMap": { + "id": "UrlMap", + "type": "object", + "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "defaultService": { + "type": "string", + "description": "The URL of the BackendService resource if none of the hostRules match." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.", + "format": "byte" + }, + "hostRules": { + "type": "array", + "description": "The list of HostRules to use against the URL.", + "items": { + "$ref": "HostRule" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", + "default": "compute#urlMap" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "pathMatchers": { + "type": "array", + "description": "The list of named PathMatchers to use against the URL.", + "items": { + "$ref": "PathMatcher" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "tests": { + "type": "array", + "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only all of the test cases pass.", + "items": { + "$ref": "UrlMapTest" + } + } + } + }, + "UrlMapList": { + "id": "UrlMapList", + "type": "object", + "description": "Contains a list of UrlMap resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Set by the server." + }, + "items": { + "type": "array", + "description": "A list of UrlMap resources.", + "items": { + "$ref": "UrlMap" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#urlMapList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "UrlMapReference": { + "id": "UrlMapReference", + "type": "object", + "properties": { + "urlMap": { + "type": "string" + } + } + }, + "UrlMapTest": { + "id": "UrlMapTest", + "type": "object", + "description": "Message for the expected URL mappings.", + "properties": { + "description": { + "type": "string", + "description": "Description of this test case." + }, + "host": { + "type": "string", + "description": "Host portion of the URL." + }, + "path": { + "type": "string", + "description": "Path portion of the URL." + }, + "service": { + "type": "string", + "description": "Expected BackendService resource the given URL should be mapped to." + } + } + }, + "UrlMapValidationResult": { + "id": "UrlMapValidationResult", + "type": "object", + "description": "Message representing the validation result for a UrlMap.", + "properties": { + "loadErrors": { + "type": "array", + "items": { + "type": "string" + } + }, + "loadSucceeded": { + "type": "boolean", + "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons." + }, + "testFailures": { + "type": "array", + "items": { + "$ref": "TestFailure" + } + }, + "testPassed": { + "type": "boolean", + "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure." + } + } + }, + "UrlMapsValidateRequest": { + "id": "UrlMapsValidateRequest", + "type": "object", + "properties": { + "resource": { + "$ref": "UrlMap", + "description": "Content of the UrlMap to be validated." + } + } + }, + "UrlMapsValidateResponse": { + "id": "UrlMapsValidateResponse", + "type": "object", + "properties": { + "result": { + "$ref": "UrlMapValidationResult" + } + } + }, + "UsageExportLocation": { + "id": "UsageExportLocation", + "type": "object", + "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.", + "properties": { + "bucketName": { + "type": "string", + "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This is just the bucket name, with no gs:// or https://storage.googleapis.com/ in front of it." + }, + "reportNamePrefix": { + "type": "string", + "description": "An optional prefix for the name of the usage report object stored in bucketName. If not supplied, defaults to usage. The report is stored as a CSV file named report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the usage according to Pacific Time. If you supply a prefix, it should conform to Cloud Storage object naming conventions." + } + } + }, + "VpnTunnel": { + "id": "VpnTunnel", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "detailedStatus": { + "type": "string", + "description": "[Output Only] Detailed status message for the VPN tunnel." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "ikeVersion": { + "type": "integer", + "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnel" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.vpnTunnels.insert" + ] + } + }, + "peerIp": { + "type": "string", + "description": "IP address of the peer VPN gateway." + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the VPN tunnel resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sharedSecret": { + "type": "string", + "description": "Shared secret used to set the secure session between the GCE VPN gateway and the peer VPN gateway." + }, + "sharedSecretHash": { + "type": "string", + "description": "Hash of the shared secret." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the VPN tunnel.", + "enum": [ + "AUTHORIZATION_ERROR", + "DEPROVISIONING", + "ESTABLISHED", + "FAILED", + "FIRST_HANDSHAKE", + "NEGOTIATION_FAILURE", + "NETWORK_ERROR", + "NO_INCOMING_PACKETS", + "PROVISIONING", + "REJECTED", + "WAITING_FOR_FULL_CONFIG" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "targetVpnGateway": { + "type": "string", + "description": "URL of the VPN gateway to which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", + "annotations": { + "required": [ + "compute.vpnTunnels.insert" + ] + } + } + } + }, + "VpnTunnelAggregatedList": { + "id": "VpnTunnelAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped vpn tunnel lists.", + "additionalProperties": { + "$ref": "VpnTunnelsScopedList", + "description": "Name of the scope containing this set of vpn tunnels." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnelAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "VpnTunnelList": { + "id": "VpnTunnelList", + "type": "object", + "description": "Contains a list of VpnTunnel resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of VpnTunnel resources.", + "items": { + "$ref": "VpnTunnel" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnelList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "VpnTunnelsScopedList": { + "id": "VpnTunnelsScopedList", + "type": "object", + "properties": { + "vpnTunnels": { + "type": "array", + "description": "List of vpn tunnels contained in this scope.", + "items": { + "$ref": "VpnTunnel" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource, and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "Zone": { + "id": "Zone", + "type": "object", + "description": "A Zone resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this zone." + }, + "description": { + "type": "string", + "description": "[Output Only] Textual description of the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#zone for zones.", + "default": "compute#zone" + }, + "maintenanceWindows": { + "type": "array", + "description": "[Output Only] Any scheduled maintenance windows for this zone. When the zone is in a maintenance window, all resources which reside in the zone will be unavailable. For more information, see Maintenance Windows", + "items": { + "type": "object", + "properties": { + "beginTime": { + "type": "string", + "description": "[Output Only] Starting time of the maintenance window, in RFC3339 format." + }, + "description": { + "type": "string", + "description": "[Output Only] Textual description of the maintenance window." + }, + "endTime": { + "type": "string", + "description": "[Output Only] Ending time of the maintenance window, in RFC3339 format." + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the maintenance window." + } + } + } + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "region": { + "type": "string", + "description": "[Output Only] Full URL reference to the region which hosts the zone." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] Status of the zone, either UP or DOWN.", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "ZoneList": { + "id": "ZoneList", + "type": "object", + "description": "Contains a list of zone resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Zone resources.", + "items": { + "$ref": "Zone" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#zoneList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + } + }, + "resources": { + "addresses": { + "methods": { + "aggregatedList": { + "id": "compute.addresses.aggregatedList", + "path": "{project}/aggregated/addresses", + "httpMethod": "GET", + "description": "Retrieves the list of addresses grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AddressAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.addresses.delete", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "DELETE", + "description": "Deletes the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.addresses.get", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "GET", + "description": "Returns the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Address" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.addresses.insert", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "POST", + "description": "Creates an address resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Address" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.addresses.list", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "GET", + "description": "Retrieves the list of address resources contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "AddressList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "autoscalers": { + "methods": { + "aggregatedList": { + "id": "compute.autoscalers.aggregatedList", + "path": "{project}/aggregated/autoscalers", + "httpMethod": "GET", + "description": "Retrieves the list of autoscalers grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AutoscalerAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.autoscalers.delete", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + "httpMethod": "DELETE", + "description": "Deletes the specified autoscaler resource.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the persistent autoscaler resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "autoscaler" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.autoscalers.get", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + "httpMethod": "GET", + "description": "Returns the specified autoscaler resource.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the persistent autoscaler resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "autoscaler" + ], + "response": { + "$ref": "Autoscaler" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.autoscalers.insert", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "POST", + "description": "Creates an autoscaler resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.autoscalers.list", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "GET", + "description": "Retrieves the list of autoscaler resources contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "AutoscalerList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.autoscalers.patch", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "PATCH", + "description": "Updates an autoscaler resource in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "autoscaler" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.autoscalers.update", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "PUT", + "description": "Updates an autoscaler resource in the specified project using the data included in the request.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler resource to update.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "backendServices": { + "methods": { + "delete": { + "id": "compute.backendServices.delete", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "DELETE", + "description": "Deletes the specified BackendService resource.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.backendServices.get", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "GET", + "description": "Returns the specified BackendService resource.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "response": { + "$ref": "BackendService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getHealth": { + "id": "compute.backendServices.getHealth", + "path": "{project}/global/backendServices/{backendService}/getHealth", + "httpMethod": "POST", + "description": "Gets the most recent health check results for this BackendService.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to which the queried instance belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "ResourceGroupReference" + }, + "response": { + "$ref": "BackendServiceGroupHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.backendServices.insert", + "path": "{project}/global/backendServices", + "httpMethod": "POST", + "description": "Creates a BackendService resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.backendServices.list", + "path": "{project}/global/backendServices", + "httpMethod": "GET", + "description": "Retrieves the list of BackendService resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "BackendServiceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.backendServices.patch", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "PATCH", + "description": "Update the entire content of the BackendService resource. This method supports patch semantics.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.backendServices.update", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "PUT", + "description": "Update the entire content of the BackendService resource.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "diskTypes": { + "methods": { + "aggregatedList": { + "id": "compute.diskTypes.aggregatedList", + "path": "{project}/aggregated/diskTypes", + "httpMethod": "GET", + "description": "Retrieves the list of disk type resources grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "DiskTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.diskTypes.get", + "path": "{project}/zones/{zone}/diskTypes/{diskType}", + "httpMethod": "GET", + "description": "Returns the specified disk type resource.", + "parameters": { + "diskType": { + "type": "string", + "description": "Name of the disk type resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "diskType" + ], + "response": { + "$ref": "DiskType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.diskTypes.list", + "path": "{project}/zones/{zone}/diskTypes", + "httpMethod": "GET", + "description": "Retrieves the list of disk type resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "DiskTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "disks": { + "methods": { + "aggregatedList": { + "id": "compute.disks.aggregatedList", + "path": "{project}/aggregated/disks", + "httpMethod": "GET", + "description": "Retrieves the list of persistent disks grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "DiskAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "createSnapshot": { + "id": "compute.disks.createSnapshot", + "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + "httpMethod": "POST", + "description": "Creates a snapshot of a specified persistent disk.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk to snapshot.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "request": { + "$ref": "Snapshot" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "id": "compute.disks.delete", + "path": "{project}/zones/{zone}/disks/{disk}", + "httpMethod": "DELETE", + "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.disks.get", + "path": "{project}/zones/{zone}/disks/{disk}", + "httpMethod": "GET", + "description": "Returns a specified persistent disk.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "response": { + "$ref": "Disk" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.disks.insert", + "path": "{project}/zones/{zone}/disks", + "httpMethod": "POST", + "description": "Creates a persistent disk in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sourceImage": { + "type": "string", + "description": "Optional. Source image to restore onto a disk.", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Disk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.disks.list", + "path": "{project}/zones/{zone}/disks", + "httpMethod": "GET", + "description": "Retrieves the list of persistent disks contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "DiskList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "firewalls": { + "methods": { + "delete": { + "id": "compute.firewalls.delete", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "DELETE", + "description": "Deletes the specified firewall resource.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.firewalls.get", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "GET", + "description": "Returns the specified firewall resource.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "response": { + "$ref": "Firewall" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.firewalls.insert", + "path": "{project}/global/firewalls", + "httpMethod": "POST", + "description": "Creates a firewall resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.firewalls.list", + "path": "{project}/global/firewalls", + "httpMethod": "GET", + "description": "Retrieves the list of firewall resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "FirewallList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.firewalls.patch", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PATCH", + "description": "Updates the specified firewall resource with the data included in the request. This method supports patch semantics.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.firewalls.update", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PUT", + "description": "Updates the specified firewall resource with the data included in the request.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "forwardingRules": { + "methods": { + "aggregatedList": { + "id": "compute.forwardingRules.aggregatedList", + "path": "{project}/aggregated/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves the list of forwarding rules grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ForwardingRuleAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.forwardingRules.delete", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "DELETE", + "description": "Deletes the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.forwardingRules.get", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "GET", + "description": "Returns the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "response": { + "$ref": "ForwardingRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.forwardingRules.insert", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "POST", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.forwardingRules.list", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves the list of ForwardingRule resources available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "ForwardingRuleList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setTarget": { + "id": "compute.forwardingRules.setTarget", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + "httpMethod": "POST", + "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource in which target is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "globalAddresses": { + "methods": { + "delete": { + "id": "compute.globalAddresses.delete", + "path": "{project}/global/addresses/{address}", + "httpMethod": "DELETE", + "description": "Deletes the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "address" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.globalAddresses.get", + "path": "{project}/global/addresses/{address}", + "httpMethod": "GET", + "description": "Returns the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "address" + ], + "response": { + "$ref": "Address" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.globalAddresses.insert", + "path": "{project}/global/addresses", + "httpMethod": "POST", + "description": "Creates an address resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Address" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.globalAddresses.list", + "path": "{project}/global/addresses", + "httpMethod": "GET", + "description": "Retrieves the list of global address resources.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AddressList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "globalForwardingRules": { + "methods": { + "delete": { + "id": "compute.globalForwardingRules.delete", + "path": "{project}/global/forwardingRules/{forwardingRule}", + "httpMethod": "DELETE", + "description": "Deletes the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "forwardingRule" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.globalForwardingRules.get", + "path": "{project}/global/forwardingRules/{forwardingRule}", + "httpMethod": "GET", + "description": "Returns the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "forwardingRule" + ], + "response": { + "$ref": "ForwardingRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.globalForwardingRules.insert", + "path": "{project}/global/forwardingRules", + "httpMethod": "POST", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.globalForwardingRules.list", + "path": "{project}/global/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves the list of ForwardingRule resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ForwardingRuleList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setTarget": { + "id": "compute.globalForwardingRules.setTarget", + "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + "httpMethod": "POST", + "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource in which target is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "forwardingRule" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "globalOperations": { + "methods": { + "aggregatedList": { + "id": "compute.globalOperations.aggregatedList", + "path": "{project}/aggregated/operations", + "httpMethod": "GET", + "description": "Retrieves the list of all operations grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "OperationAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.globalOperations.delete", + "path": "{project}/global/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.globalOperations.get", + "path": "{project}/global/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.globalOperations.list", + "path": "{project}/global/operations", + "httpMethod": "GET", + "description": "Retrieves the list of Operation resources contained within the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "httpHealthChecks": { + "methods": { + "delete": { + "id": "compute.httpHealthChecks.delete", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "DELETE", + "description": "Deletes the specified HttpHealthCheck resource.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.httpHealthChecks.get", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "GET", + "description": "Returns the specified HttpHealthCheck resource.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "HttpHealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.httpHealthChecks.insert", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "POST", + "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.httpHealthChecks.list", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "GET", + "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "HttpHealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.httpHealthChecks.patch", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.httpHealthChecks.update", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PUT", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "httpsHealthChecks": { + "methods": { + "delete": { + "id": "compute.httpsHealthChecks.delete", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "DELETE", + "description": "Deletes the specified HttpsHealthCheck resource.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.httpsHealthChecks.get", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "GET", + "description": "Returns the specified HttpsHealthCheck resource.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "response": { + "$ref": "HttpsHealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.httpsHealthChecks.insert", + "path": "{project}/global/httpsHealthChecks", + "httpMethod": "POST", + "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "HttpsHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.httpsHealthChecks.list", + "path": "{project}/global/httpsHealthChecks", + "httpMethod": "GET", + "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "HttpsHealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.httpsHealthChecks.patch", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "request": { + "$ref": "HttpsHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.httpsHealthChecks.update", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "PUT", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "request": { + "$ref": "HttpsHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "images": { + "methods": { + "delete": { + "id": "compute.images.delete", + "path": "{project}/global/images/{image}", + "httpMethod": "DELETE", + "description": "Deletes the specified image resource.", + "parameters": { + "image": { + "type": "string", + "description": "Name of the image resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deprecate": { + "id": "compute.images.deprecate", + "path": "{project}/global/images/{image}/deprecate", + "httpMethod": "POST", + "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + "parameters": { + "image": { + "type": "string", + "description": "Image name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "request": { + "$ref": "DeprecationStatus" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.images.get", + "path": "{project}/global/images/{image}", + "httpMethod": "GET", + "description": "Returns the specified image resource.", + "parameters": { + "image": { + "type": "string", + "description": "Name of the image resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "response": { + "$ref": "Image" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.images.insert", + "path": "{project}/global/images", + "httpMethod": "POST", + "description": "Creates an image resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Image" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "compute.images.list", + "path": "{project}/global/images", + "httpMethod": "GET", + "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 7. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.\n\nSee Accessing images for more information.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ImageList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "instanceGroupManagers": { + "methods": { + "abandonInstances": { + "id": "compute.instanceGroupManagers.abandonInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "httpMethod": "POST", + "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersAbandonInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.instanceGroupManagers.aggregatedList", + "path": "{project}/aggregated/instanceGroupManagers", + "httpMethod": "GET", + "description": "Retrieves the list of managed instance groups and groups them by zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceGroupManagerAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.instanceGroupManagers.delete", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "DELETE", + "description": "Deletes the specified managed instance group and all of the instances in that group.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group to delete.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteInstances": { + "id": "compute.instanceGroupManagers.deleteInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + "httpMethod": "POST", + "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersDeleteInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instanceGroupManagers.get", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "GET", + "description": "Returns all of the details about the specified managed instance group.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "response": { + "$ref": "InstanceGroupManager" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instanceGroupManagers.insert", + "path": "{project}/zones/{zone}/instanceGroupManagers", + "httpMethod": "POST", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the managed instance group.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "InstanceGroupManager" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instanceGroupManagers.list", + "path": "{project}/zones/{zone}/instanceGroupManagers", + "httpMethod": "GET", + "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "InstanceGroupManagerList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listManagedInstances": { + "id": "compute.instanceGroupManagers.listManagedInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "httpMethod": "POST", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "response": { + "$ref": "InstanceGroupManagersListManagedInstancesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "recreateInstances": { + "id": "compute.instanceGroupManagers.recreateInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "httpMethod": "POST", + "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersRecreateInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "resize": { + "id": "compute.instanceGroupManagers.resize", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + "httpMethod": "POST", + "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "size": { + "type": "integer", + "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + "required": true, + "format": "int32", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager", + "size" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setInstanceTemplate": { + "id": "compute.instanceGroupManagers.setInstanceTemplate", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + "httpMethod": "POST", + "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setTargetPools": { + "id": "compute.instanceGroupManagers.setTargetPools", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "httpMethod": "POST", + "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "instanceGroups": { + "methods": { + "addInstances": { + "id": "compute.instanceGroups.addInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + "httpMethod": "POST", + "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group where you are adding instances.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsAddInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.instanceGroups.aggregatedList", + "path": "{project}/aggregated/instanceGroups", + "httpMethod": "GET", + "description": "Retrieves the list of instance groups and sorts them by zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceGroupAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.instanceGroups.delete", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + "httpMethod": "DELETE", + "description": "Deletes the specified instance group. The instances in the group are not deleted.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group to delete.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instanceGroups.get", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + "httpMethod": "GET", + "description": "Returns the specified instance group resource.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "response": { + "$ref": "InstanceGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instanceGroups.insert", + "path": "{project}/zones/{zone}/instanceGroups", + "httpMethod": "POST", + "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the instance group.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "InstanceGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instanceGroups.list", + "path": "{project}/zones/{zone}/instanceGroups", + "httpMethod": "GET", + "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "InstanceGroupList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listInstances": { + "id": "compute.instanceGroups.listInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + "httpMethod": "POST", + "description": "Lists the instances in the specified instance group.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "instanceGroup": { + "type": "string", + "description": "The name of the instance group from which you want to generate a list of included instances.", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsListInstancesRequest" + }, + "response": { + "$ref": "InstanceGroupsListInstances" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "removeInstances": { + "id": "compute.instanceGroups.removeInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + "httpMethod": "POST", + "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group where the specified instances will be removed.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsRemoveInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setNamedPorts": { + "id": "compute.instanceGroups.setNamedPorts", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + "httpMethod": "POST", + "description": "Sets the named ports for the specified instance group.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group where the named ports are updated.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsSetNamedPortsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "instanceTemplates": { + "methods": { + "delete": { + "id": "compute.instanceTemplates.delete", + "path": "{project}/global/instanceTemplates/{instanceTemplate}", + "httpMethod": "DELETE", + "description": "Deletes the specified instance template.", + "parameters": { + "instanceTemplate": { + "type": "string", + "description": "The name of the instance template to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instanceTemplate" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instanceTemplates.get", + "path": "{project}/global/instanceTemplates/{instanceTemplate}", + "httpMethod": "GET", + "description": "Returns the specified instance template resource.", + "parameters": { + "instanceTemplate": { + "type": "string", + "description": "The name of the instance template.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instanceTemplate" + ], + "response": { + "$ref": "InstanceTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instanceTemplates.insert", + "path": "{project}/global/instanceTemplates", + "httpMethod": "POST", + "description": "Creates an instance template in the specified project using the data that is included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "InstanceTemplate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instanceTemplates.list", + "path": "{project}/global/instanceTemplates", + "httpMethod": "GET", + "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceTemplateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "instances": { + "methods": { + "addAccessConfig": { + "id": "compute.instances.addAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + "httpMethod": "POST", + "description": "Adds an access config to an instance's network interface.", + "parameters": { + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "The name of the network interface to add to this instance.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "request": { + "$ref": "AccessConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.instances.aggregatedList", + "path": "{project}/aggregated/instances", + "httpMethod": "GET", + "description": "Retrieves aggregated list of instance resources.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "attachDisk": { + "id": "compute.instances.attachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + "httpMethod": "POST", + "description": "Attaches a Disk resource to an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "AttachedDisk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "id": "compute.instances.delete", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "DELETE", + "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteAccessConfig": { + "id": "compute.instances.deleteAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + "httpMethod": "POST", + "description": "Deletes an access config from an instance's network interface.", + "parameters": { + "accessConfig": { + "type": "string", + "description": "The name of the access config to delete.", + "required": true, + "location": "query" + }, + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "The name of the network interface.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "accessConfig", + "networkInterface" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "detachDisk": { + "id": "compute.instances.detachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + "httpMethod": "POST", + "description": "Detaches a disk from an instance.", + "parameters": { + "deviceName": { + "type": "string", + "description": "Disk device name to detach.", + "required": true, + "pattern": "\\w[\\w.-]{0,254}", + "location": "query" + }, + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "deviceName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instances.get", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "GET", + "description": "Returns the specified instance resource.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Instance" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getSerialPortOutput": { + "id": "compute.instances.getSerialPortOutput", + "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + "httpMethod": "GET", + "description": "Returns the specified instance's serial port output.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "port": { + "type": "integer", + "description": "Specifies which COM or serial port to retrieve data from.", + "default": "1", + "format": "int32", + "minimum": "1", + "maximum": "4", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "SerialPortOutput" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instances.insert", + "path": "{project}/zones/{zone}/instances", + "httpMethod": "POST", + "description": "Creates an instance resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Instance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instances.list", + "path": "{project}/zones/{zone}/instances", + "httpMethod": "GET", + "description": "Retrieves the list of instance resources contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "InstanceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "reset": { + "id": "compute.instances.reset", + "path": "{project}/zones/{zone}/instances/{instance}/reset", + "httpMethod": "POST", + "description": "Performs a hard reset on the instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setDiskAutoDelete": { + "id": "compute.instances.setDiskAutoDelete", + "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + "httpMethod": "POST", + "description": "Sets the auto-delete flag for a disk attached to an instance.", + "parameters": { + "autoDelete": { + "type": "boolean", + "description": "Whether to auto-delete the disk when the instance is deleted.", + "required": true, + "location": "query" + }, + "deviceName": { + "type": "string", + "description": "The device name of the disk to modify.", + "required": true, + "pattern": "\\w[\\w.-]{0,254}", + "location": "query" + }, + "instance": { + "type": "string", + "description": "The instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "autoDelete", + "deviceName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setMetadata": { + "id": "compute.instances.setMetadata", + "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + "httpMethod": "POST", + "description": "Sets metadata for the specified instance to the data included in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setScheduling": { + "id": "compute.instances.setScheduling", + "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + "httpMethod": "POST", + "description": "Sets an instance's scheduling options.", + "parameters": { + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Scheduling" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setTags": { + "id": "compute.instances.setTags", + "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "httpMethod": "POST", + "description": "Sets tags for the specified instance to the data included in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Tags" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "start": { + "id": "compute.instances.start", + "path": "{project}/zones/{zone}/instances/{instance}/start", + "httpMethod": "POST", + "description": "This method starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to start.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stop": { + "id": "compute.instances.stop", + "path": "{project}/zones/{zone}/instances/{instance}/stop", + "httpMethod": "POST", + "description": "This method stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses,will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to stop.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "licenses": { + "methods": { + "get": { + "id": "compute.licenses.get", + "path": "{project}/global/licenses/{license}", + "httpMethod": "GET", + "description": "Returns the specified license resource.", + "parameters": { + "license": { + "type": "string", + "description": "Name of the license resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "license" + ], + "response": { + "$ref": "License" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "machineTypes": { + "methods": { + "aggregatedList": { + "id": "compute.machineTypes.aggregatedList", + "path": "{project}/aggregated/machineTypes", + "httpMethod": "GET", + "description": "Retrieves the list of machine type resources grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "MachineTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.machineTypes.get", + "path": "{project}/zones/{zone}/machineTypes/{machineType}", + "httpMethod": "GET", + "description": "Returns the specified machine type resource.", + "parameters": { + "machineType": { + "type": "string", + "description": "Name of the machine type resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "machineType" + ], + "response": { + "$ref": "MachineType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.machineTypes.list", + "path": "{project}/zones/{zone}/machineTypes", + "httpMethod": "GET", + "description": "Retrieves the list of machine type resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "MachineTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "networks": { + "methods": { + "delete": { + "id": "compute.networks.delete", + "path": "{project}/global/networks/{network}", + "httpMethod": "DELETE", + "description": "Deletes the specified network resource.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.networks.get", + "path": "{project}/global/networks/{network}", + "httpMethod": "GET", + "description": "Returns the specified network resource.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Network" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.networks.insert", + "path": "{project}/global/networks", + "httpMethod": "POST", + "description": "Creates a network resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.networks.list", + "path": "{project}/global/networks", + "httpMethod": "GET", + "description": "Retrieves the list of network resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "NetworkList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "projects": { + "methods": { + "get": { + "id": "compute.projects.get", + "path": "{project}", + "httpMethod": "GET", + "description": "Returns the specified project resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Project" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "moveDisk": { + "id": "compute.projects.moveDisk", + "path": "{project}/moveDisk", + "httpMethod": "POST", + "description": "Moves a persistent disk from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "DiskMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "moveInstance": { + "id": "compute.projects.moveInstance", + "path": "{project}/moveInstance", + "httpMethod": "POST", + "description": "Moves an instance and its attached persistent disks from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "InstanceMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setCommonInstanceMetadata": { + "id": "compute.projects.setCommonInstanceMetadata", + "path": "{project}/setCommonInstanceMetadata", + "httpMethod": "POST", + "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUsageExportBucket": { + "id": "compute.projects.setUsageExportBucket", + "path": "{project}/setUsageExportBucket", + "httpMethod": "POST", + "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "UsageExportLocation" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "id": "compute.regionOperations.delete", + "path": "{project}/regions/{region}/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified region-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.regionOperations.get", + "path": "{project}/regions/{region}/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified region-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.regionOperations.list", + "path": "{project}/regions/{region}/operations", + "httpMethod": "GET", + "description": "Retrieves the list of Operation resources contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regions": { + "methods": { + "get": { + "id": "compute.regions.get", + "path": "{project}/regions/{region}", + "httpMethod": "GET", + "description": "Returns the specified region resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "Region" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.regions.list", + "path": "{project}/regions", + "httpMethod": "GET", + "description": "Retrieves the list of region resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "RegionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "routes": { + "methods": { + "delete": { + "id": "compute.routes.delete", + "path": "{project}/global/routes/{route}", + "httpMethod": "DELETE", + "description": "Deletes the specified route resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "route": { + "type": "string", + "description": "Name of the route resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "route" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.routes.get", + "path": "{project}/global/routes/{route}", + "httpMethod": "GET", + "description": "Returns the specified route resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "route": { + "type": "string", + "description": "Name of the route resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "route" + ], + "response": { + "$ref": "Route" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.routes.insert", + "path": "{project}/global/routes", + "httpMethod": "POST", + "description": "Creates a route resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Route" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.routes.list", + "path": "{project}/global/routes", + "httpMethod": "GET", + "description": "Retrieves the list of route resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "RouteList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "snapshots": { + "methods": { + "delete": { + "id": "compute.snapshots.delete", + "path": "{project}/global/snapshots/{snapshot}", + "httpMethod": "DELETE", + "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "snapshot": { + "type": "string", + "description": "Name of the Snapshot resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "snapshot" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.snapshots.get", + "path": "{project}/global/snapshots/{snapshot}", + "httpMethod": "GET", + "description": "Returns the specified Snapshot resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "snapshot": { + "type": "string", + "description": "Name of the Snapshot resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "snapshot" + ], + "response": { + "$ref": "Snapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.snapshots.list", + "path": "{project}/global/snapshots", + "httpMethod": "GET", + "description": "Retrieves the list of Snapshot resources contained within the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SnapshotList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "sslCertificates": { + "methods": { + "delete": { + "id": "compute.sslCertificates.delete", + "path": "{project}/global/sslCertificates/{sslCertificate}", + "httpMethod": "DELETE", + "description": "Deletes the specified SslCertificate resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sslCertificate": { + "type": "string", + "description": "Name of the SslCertificate resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslCertificate" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.sslCertificates.get", + "path": "{project}/global/sslCertificates/{sslCertificate}", + "httpMethod": "GET", + "description": "Returns the specified SslCertificate resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sslCertificate": { + "type": "string", + "description": "Name of the SslCertificate resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslCertificate" + ], + "response": { + "$ref": "SslCertificate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.sslCertificates.insert", + "path": "{project}/global/sslCertificates", + "httpMethod": "POST", + "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "SslCertificate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.sslCertificates.list", + "path": "{project}/global/sslCertificates", + "httpMethod": "GET", + "description": "Retrieves the list of SslCertificate resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SslCertificateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetHttpProxies": { + "methods": { + "delete": { + "id": "compute.targetHttpProxies.delete", + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetHttpProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpProxy": { + "type": "string", + "description": "Name of the TargetHttpProxy resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetHttpProxies.get", + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "httpMethod": "GET", + "description": "Returns the specified TargetHttpProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpProxy": { + "type": "string", + "description": "Name of the TargetHttpProxy resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "response": { + "$ref": "TargetHttpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetHttpProxies.insert", + "path": "{project}/global/targetHttpProxies", + "httpMethod": "POST", + "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetHttpProxies.list", + "path": "{project}/global/targetHttpProxies", + "httpMethod": "GET", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetHttpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setUrlMap": { + "id": "compute.targetHttpProxies.setUrlMap", + "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "httpMethod": "POST", + "description": "Changes the URL map for TargetHttpProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpProxy": { + "type": "string", + "description": "Name of the TargetHttpProxy resource whose URL map is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetHttpsProxies": { + "methods": { + "delete": { + "id": "compute.targetHttpsProxies.delete", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetHttpsProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetHttpsProxies.get", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "GET", + "description": "Returns the specified TargetHttpsProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "response": { + "$ref": "TargetHttpsProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetHttpsProxies.insert", + "path": "{project}/global/targetHttpsProxies", + "httpMethod": "POST", + "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetHttpsProxies.list", + "path": "{project}/global/targetHttpsProxies", + "httpMethod": "GET", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetHttpsProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setSslCertificates": { + "id": "compute.targetHttpsProxies.setSslCertificates", + "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "httpMethod": "POST", + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource whose SSLCertificate is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "request": { + "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUrlMap": { + "id": "compute.targetHttpsProxies.setUrlMap", + "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "httpMethod": "POST", + "description": "Changes the URL map for TargetHttpsProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetInstances": { + "methods": { + "aggregatedList": { + "id": "compute.targetInstances.aggregatedList", + "path": "{project}/aggregated/targetInstances", + "httpMethod": "GET", + "description": "Retrieves the list of target instances grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetInstanceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.targetInstances.delete", + "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetInstance resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetInstance": { + "type": "string", + "description": "Name of the TargetInstance resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "targetInstance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetInstances.get", + "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + "httpMethod": "GET", + "description": "Returns the specified TargetInstance resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetInstance": { + "type": "string", + "description": "Name of the TargetInstance resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "targetInstance" + ], + "response": { + "$ref": "TargetInstance" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetInstances.insert", + "path": "{project}/zones/{zone}/targetInstances", + "httpMethod": "POST", + "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "TargetInstance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetInstances.list", + "path": "{project}/zones/{zone}/targetInstances", + "httpMethod": "GET", + "description": "Retrieves the list of TargetInstance resources available to the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "TargetInstanceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetPools": { + "methods": { + "addHealthCheck": { + "id": "compute.targetPools.addHealthCheck", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "httpMethod": "POST", + "description": "Adds health check URL to targetPool.", + "parameters": { + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which health_check_url is to be added.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsAddHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "addInstance": { + "id": "compute.targetPools.addInstance", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + "httpMethod": "POST", + "description": "Adds instance URL to targetPool.", + "parameters": { + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which instance_url is to be added.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsAddInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.targetPools.aggregatedList", + "path": "{project}/aggregated/targetPools", + "httpMethod": "GET", + "description": "Retrieves the list of target pools grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetPoolAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.targetPools.delete", + "path": "{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetPool resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetPools.get", + "path": "{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "GET", + "description": "Returns the specified TargetPool resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "response": { + "$ref": "TargetPool" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getHealth": { + "id": "compute.targetPools.getHealth", + "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + "httpMethod": "POST", + "description": "Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.", + "parameters": { + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which the queried instance belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "InstanceReference" + }, + "response": { + "$ref": "TargetPoolInstanceHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetPools.insert", + "path": "{project}/regions/{region}/targetPools", + "httpMethod": "POST", + "description": "Creates a TargetPool resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "TargetPool" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetPools.list", + "path": "{project}/regions/{region}/targetPools", + "httpMethod": "GET", + "description": "Retrieves the list of TargetPool resources available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "TargetPoolList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "removeHealthCheck": { + "id": "compute.targetPools.removeHealthCheck", + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + "httpMethod": "POST", + "description": "Removes health check URL from targetPool.", + "parameters": { + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which health_check_url is to be removed.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsRemoveHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeInstance": { + "id": "compute.targetPools.removeInstance", + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "httpMethod": "POST", + "description": "Removes instance URL from targetPool.", + "parameters": { + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which instance_url is to be removed.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsRemoveInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setBackup": { + "id": "compute.targetPools.setBackup", + "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "httpMethod": "POST", + "description": "Changes backup pool configurations.", + "parameters": { + "failoverRatio": { + "type": "number", + "description": "New failoverRatio value for the containing target pool.", + "format": "float", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource for which the backup is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetVpnGateways": { + "methods": { + "aggregatedList": { + "id": "compute.targetVpnGateways.aggregatedList", + "path": "{project}/aggregated/targetVpnGateways", + "httpMethod": "GET", + "description": "Retrieves the list of target VPN gateways grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetVpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.targetVpnGateways.delete", + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetVpnGateway resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetVpnGateway": { + "type": "string", + "description": "Name of the TargetVpnGateway resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetVpnGateways.get", + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "httpMethod": "GET", + "description": "Returns the specified TargetVpnGateway resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetVpnGateway": { + "type": "string", + "description": "Name of the TargetVpnGateway resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "response": { + "$ref": "TargetVpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetVpnGateways.insert", + "path": "{project}/regions/{region}/targetVpnGateways", + "httpMethod": "POST", + "description": "Creates a TargetVpnGateway resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "TargetVpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetVpnGateways.list", + "path": "{project}/regions/{region}/targetVpnGateways", + "httpMethod": "GET", + "description": "Retrieves the list of TargetVpnGateway resources available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "TargetVpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "urlMaps": { + "methods": { + "delete": { + "id": "compute.urlMaps.delete", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "DELETE", + "description": "Deletes the specified UrlMap resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.urlMaps.get", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "GET", + "description": "Returns the specified UrlMap resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "response": { + "$ref": "UrlMap" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.urlMaps.insert", + "path": "{project}/global/urlMaps", + "httpMethod": "POST", + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.urlMaps.list", + "path": "{project}/global/urlMaps", + "httpMethod": "GET", + "description": "Retrieves the list of UrlMap resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "UrlMapList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.urlMaps.patch", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "PATCH", + "description": "Update the entire content of the UrlMap resource. This method supports patch semantics.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.urlMaps.update", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "PUT", + "description": "Update the entire content of the UrlMap resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "validate": { + "id": "compute.urlMaps.validate", + "path": "{project}/global/urlMaps/{urlMap}/validate", + "httpMethod": "POST", + "description": "Run static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to be validated as.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "UrlMapsValidateRequest" + }, + "response": { + "$ref": "UrlMapsValidateResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "vpnTunnels": { + "methods": { + "aggregatedList": { + "id": "compute.vpnTunnels.aggregatedList", + "path": "{project}/aggregated/vpnTunnels", + "httpMethod": "GET", + "description": "Retrieves the list of VPN tunnels grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "VpnTunnelAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.vpnTunnels.delete", + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "DELETE", + "description": "Deletes the specified VpnTunnel resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "vpnTunnel": { + "type": "string", + "description": "Name of the VpnTunnel resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.vpnTunnels.get", + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "GET", + "description": "Returns the specified VpnTunnel resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "vpnTunnel": { + "type": "string", + "description": "Name of the VpnTunnel resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "response": { + "$ref": "VpnTunnel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.vpnTunnels.insert", + "path": "{project}/regions/{region}/vpnTunnels", + "httpMethod": "POST", + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "VpnTunnel" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.vpnTunnels.list", + "path": "{project}/regions/{region}/vpnTunnels", + "httpMethod": "GET", + "description": "Retrieves the list of VpnTunnel resources contained in the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "VpnTunnelList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zoneOperations": { + "methods": { + "delete": { + "id": "compute.zoneOperations.delete", + "path": "{project}/zones/{zone}/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified zone-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.zoneOperations.get", + "path": "{project}/zones/{zone}/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified zone-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.zoneOperations.list", + "path": "{project}/zones/{zone}/operations", + "httpMethod": "GET", + "description": "Retrieves the list of Operation resources contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zones": { + "methods": { + "get": { + "id": "compute.zones.get", + "path": "{project}/zones/{zone}", + "httpMethod": "GET", + "description": "Returns the specified zone resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "Zone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.zones.list", + "path": "{project}/zones", + "httpMethod": "GET", + "description": "Retrieves the list of zone resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ZoneList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + } + } +} diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go new file mode 100644 index 000000000..c5882702e --- /dev/null +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -0,0 +1,37579 @@ +// Package compute provides access to the Compute Engine API. +// +// See https://developers.google.com/compute/docs/reference/latest/ +// +// Usage example: +// +// import "google.golang.org/api/compute/v1" +// ... +// computeService, err := compute.New(oauthHttpClient) +package compute + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "compute:v1" +const apiName = "compute" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/compute/v1/projects/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View and manage your Google Compute Engine resources + ComputeScope = "https://www.googleapis.com/auth/compute" + + // View your Google Compute Engine resources + ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Addresses = NewAddressesService(s) + s.Autoscalers = NewAutoscalersService(s) + s.BackendServices = NewBackendServicesService(s) + s.DiskTypes = NewDiskTypesService(s) + s.Disks = NewDisksService(s) + s.Firewalls = NewFirewallsService(s) + s.ForwardingRules = NewForwardingRulesService(s) + s.GlobalAddresses = NewGlobalAddressesService(s) + s.GlobalForwardingRules = NewGlobalForwardingRulesService(s) + s.GlobalOperations = NewGlobalOperationsService(s) + s.HttpHealthChecks = NewHttpHealthChecksService(s) + s.HttpsHealthChecks = NewHttpsHealthChecksService(s) + s.Images = NewImagesService(s) + s.InstanceGroupManagers = NewInstanceGroupManagersService(s) + s.InstanceGroups = NewInstanceGroupsService(s) + s.InstanceTemplates = NewInstanceTemplatesService(s) + s.Instances = NewInstancesService(s) + s.Licenses = NewLicensesService(s) + s.MachineTypes = NewMachineTypesService(s) + s.Networks = NewNetworksService(s) + s.Projects = NewProjectsService(s) + s.RegionOperations = NewRegionOperationsService(s) + s.Regions = NewRegionsService(s) + s.Routes = NewRoutesService(s) + s.Snapshots = NewSnapshotsService(s) + s.SslCertificates = NewSslCertificatesService(s) + s.TargetHttpProxies = NewTargetHttpProxiesService(s) + s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) + s.TargetInstances = NewTargetInstancesService(s) + s.TargetPools = NewTargetPoolsService(s) + s.TargetVpnGateways = NewTargetVpnGatewaysService(s) + s.UrlMaps = NewUrlMapsService(s) + s.VpnTunnels = NewVpnTunnelsService(s) + s.ZoneOperations = NewZoneOperationsService(s) + s.Zones = NewZonesService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Addresses *AddressesService + + Autoscalers *AutoscalersService + + BackendServices *BackendServicesService + + DiskTypes *DiskTypesService + + Disks *DisksService + + Firewalls *FirewallsService + + ForwardingRules *ForwardingRulesService + + GlobalAddresses *GlobalAddressesService + + GlobalForwardingRules *GlobalForwardingRulesService + + GlobalOperations *GlobalOperationsService + + HttpHealthChecks *HttpHealthChecksService + + HttpsHealthChecks *HttpsHealthChecksService + + Images *ImagesService + + InstanceGroupManagers *InstanceGroupManagersService + + InstanceGroups *InstanceGroupsService + + InstanceTemplates *InstanceTemplatesService + + Instances *InstancesService + + Licenses *LicensesService + + MachineTypes *MachineTypesService + + Networks *NetworksService + + Projects *ProjectsService + + RegionOperations *RegionOperationsService + + Regions *RegionsService + + Routes *RoutesService + + Snapshots *SnapshotsService + + SslCertificates *SslCertificatesService + + TargetHttpProxies *TargetHttpProxiesService + + TargetHttpsProxies *TargetHttpsProxiesService + + TargetInstances *TargetInstancesService + + TargetPools *TargetPoolsService + + TargetVpnGateways *TargetVpnGatewaysService + + UrlMaps *UrlMapsService + + VpnTunnels *VpnTunnelsService + + ZoneOperations *ZoneOperationsService + + Zones *ZonesService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewAddressesService(s *Service) *AddressesService { + rs := &AddressesService{s: s} + return rs +} + +type AddressesService struct { + s *Service +} + +func NewAutoscalersService(s *Service) *AutoscalersService { + rs := &AutoscalersService{s: s} + return rs +} + +type AutoscalersService struct { + s *Service +} + +func NewBackendServicesService(s *Service) *BackendServicesService { + rs := &BackendServicesService{s: s} + return rs +} + +type BackendServicesService struct { + s *Service +} + +func NewDiskTypesService(s *Service) *DiskTypesService { + rs := &DiskTypesService{s: s} + return rs +} + +type DiskTypesService struct { + s *Service +} + +func NewDisksService(s *Service) *DisksService { + rs := &DisksService{s: s} + return rs +} + +type DisksService struct { + s *Service +} + +func NewFirewallsService(s *Service) *FirewallsService { + rs := &FirewallsService{s: s} + return rs +} + +type FirewallsService struct { + s *Service +} + +func NewForwardingRulesService(s *Service) *ForwardingRulesService { + rs := &ForwardingRulesService{s: s} + return rs +} + +type ForwardingRulesService struct { + s *Service +} + +func NewGlobalAddressesService(s *Service) *GlobalAddressesService { + rs := &GlobalAddressesService{s: s} + return rs +} + +type GlobalAddressesService struct { + s *Service +} + +func NewGlobalForwardingRulesService(s *Service) *GlobalForwardingRulesService { + rs := &GlobalForwardingRulesService{s: s} + return rs +} + +type GlobalForwardingRulesService struct { + s *Service +} + +func NewGlobalOperationsService(s *Service) *GlobalOperationsService { + rs := &GlobalOperationsService{s: s} + return rs +} + +type GlobalOperationsService struct { + s *Service +} + +func NewHttpHealthChecksService(s *Service) *HttpHealthChecksService { + rs := &HttpHealthChecksService{s: s} + return rs +} + +type HttpHealthChecksService struct { + s *Service +} + +func NewHttpsHealthChecksService(s *Service) *HttpsHealthChecksService { + rs := &HttpsHealthChecksService{s: s} + return rs +} + +type HttpsHealthChecksService struct { + s *Service +} + +func NewImagesService(s *Service) *ImagesService { + rs := &ImagesService{s: s} + return rs +} + +type ImagesService struct { + s *Service +} + +func NewInstanceGroupManagersService(s *Service) *InstanceGroupManagersService { + rs := &InstanceGroupManagersService{s: s} + return rs +} + +type InstanceGroupManagersService struct { + s *Service +} + +func NewInstanceGroupsService(s *Service) *InstanceGroupsService { + rs := &InstanceGroupsService{s: s} + return rs +} + +type InstanceGroupsService struct { + s *Service +} + +func NewInstanceTemplatesService(s *Service) *InstanceTemplatesService { + rs := &InstanceTemplatesService{s: s} + return rs +} + +type InstanceTemplatesService struct { + s *Service +} + +func NewInstancesService(s *Service) *InstancesService { + rs := &InstancesService{s: s} + return rs +} + +type InstancesService struct { + s *Service +} + +func NewLicensesService(s *Service) *LicensesService { + rs := &LicensesService{s: s} + return rs +} + +type LicensesService struct { + s *Service +} + +func NewMachineTypesService(s *Service) *MachineTypesService { + rs := &MachineTypesService{s: s} + return rs +} + +type MachineTypesService struct { + s *Service +} + +func NewNetworksService(s *Service) *NetworksService { + rs := &NetworksService{s: s} + return rs +} + +type NetworksService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + return rs +} + +type ProjectsService struct { + s *Service +} + +func NewRegionOperationsService(s *Service) *RegionOperationsService { + rs := &RegionOperationsService{s: s} + return rs +} + +type RegionOperationsService struct { + s *Service +} + +func NewRegionsService(s *Service) *RegionsService { + rs := &RegionsService{s: s} + return rs +} + +type RegionsService struct { + s *Service +} + +func NewRoutesService(s *Service) *RoutesService { + rs := &RoutesService{s: s} + return rs +} + +type RoutesService struct { + s *Service +} + +func NewSnapshotsService(s *Service) *SnapshotsService { + rs := &SnapshotsService{s: s} + return rs +} + +type SnapshotsService struct { + s *Service +} + +func NewSslCertificatesService(s *Service) *SslCertificatesService { + rs := &SslCertificatesService{s: s} + return rs +} + +type SslCertificatesService struct { + s *Service +} + +func NewTargetHttpProxiesService(s *Service) *TargetHttpProxiesService { + rs := &TargetHttpProxiesService{s: s} + return rs +} + +type TargetHttpProxiesService struct { + s *Service +} + +func NewTargetHttpsProxiesService(s *Service) *TargetHttpsProxiesService { + rs := &TargetHttpsProxiesService{s: s} + return rs +} + +type TargetHttpsProxiesService struct { + s *Service +} + +func NewTargetInstancesService(s *Service) *TargetInstancesService { + rs := &TargetInstancesService{s: s} + return rs +} + +type TargetInstancesService struct { + s *Service +} + +func NewTargetPoolsService(s *Service) *TargetPoolsService { + rs := &TargetPoolsService{s: s} + return rs +} + +type TargetPoolsService struct { + s *Service +} + +func NewTargetVpnGatewaysService(s *Service) *TargetVpnGatewaysService { + rs := &TargetVpnGatewaysService{s: s} + return rs +} + +type TargetVpnGatewaysService struct { + s *Service +} + +func NewUrlMapsService(s *Service) *UrlMapsService { + rs := &UrlMapsService{s: s} + return rs +} + +type UrlMapsService struct { + s *Service +} + +func NewVpnTunnelsService(s *Service) *VpnTunnelsService { + rs := &VpnTunnelsService{s: s} + return rs +} + +type VpnTunnelsService struct { + s *Service +} + +func NewZoneOperationsService(s *Service) *ZoneOperationsService { + rs := &ZoneOperationsService{s: s} + return rs +} + +type ZoneOperationsService struct { + s *Service +} + +func NewZonesService(s *Service) *ZonesService { + rs := &ZonesService{s: s} + return rs +} + +type ZonesService struct { + s *Service +} + +// AccessConfig: An access configuration attached to an instance's +// network interface. +type AccessConfig struct { + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + + // Name: Name of this access configuration. + Name string `json:"name,omitempty"` + + // NatIP: An external IP address associated with this instance. Specify + // an unused static external IP address available to the project or + // leave this field undefined to use an IP from a shared ephemeral IP + // address pool. If you specify a static external IP address, it must + // live in the same region as the zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // Type: The type of configuration. The default and only option is + // ONE_TO_ONE_NAT. + // + // Possible values: + // "ONE_TO_ONE_NAT" (default) + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AccessConfig) MarshalJSON() ([]byte, error) { + type noMethod AccessConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Address: A reserved address resource. +type Address struct { + // Address: The static external IP address represented by this resource. + Address string `json:"address,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#address for + // addresses. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the regional address + // resides. This field is not applicable to global addresses. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the address, which can be either + // IN_USE or RESERVED. An address that is RESERVED is currently reserved + // and available to use. An IN_USE address is currently being used by + // another resource and is not available. + // + // Possible values: + // "IN_USE" + // "RESERVED" + Status string `json:"status,omitempty"` + + // Users: [Output Only] The URLs of the resources that are using this + // address. + Users []string `json:"users,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Address) MarshalJSON() ([]byte, error) { + type noMethod Address + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AddressAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped address lists. + Items map[string]AddressesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#addressAggregatedList for aggregated lists of addresses. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AddressList: Contains a list of address resources. +type AddressList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Address resources. + Items []*Address `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#addressList for + // lists of addresses. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressList) MarshalJSON() ([]byte, error) { + type noMethod AddressList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AddressesScopedList struct { + // Addresses: [Output Only] List of addresses contained in this scope. + Addresses []*Address `json:"addresses,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *AddressesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Addresses") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AddressesScopedListWarning: [Output Only] Informational warning which +// replaces the list of addresses when the list is empty. +type AddressesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AddressesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AttachedDisk: An instance-attached disk resource. +type AttachedDisk struct { + // AutoDelete: Specifies whether the disk will be auto-deleted when the + // instance is deleted (but not when the disk is detached from the + // instance). + AutoDelete bool `json:"autoDelete,omitempty"` + + // Boot: Indicates that this is a boot disk. The virtual machine will + // use the first partition of the disk for its root filesystem. + Boot bool `json:"boot,omitempty"` + + // DeviceName: Specifies a unique device name of your choice that is + // reflected into the /dev/disk/by-id/google-* tree of a Linux operating + // system running within the instance. This name can be used to + // reference the device for mounting, resizing, and so on, from within + // the instance. + // + // If not specified, the server chooses a default device name to apply + // to this disk, in the form persistent-disks-x, where x is a number + // assigned by Google Compute Engine. This field is only applicable for + // persistent disks. + DeviceName string `json:"deviceName,omitempty"` + + // Index: Assigns a zero-based index to this disk, where 0 is reserved + // for the boot disk. For example, if you have many disks attached to an + // instance, each disk would have a unique index number. If not + // specified, the server will choose an appropriate value. + Index int64 `json:"index,omitempty"` + + // InitializeParams: [Input Only] Specifies the parameters for a new + // disk that will be created alongside the new instance. Use + // initialization parameters to create boot disks or local SSDs attached + // to the new instance. + // + // This property is mutually exclusive with the source property; you can + // only define one or the other, but not both. + InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"` + + // Interface: Specifies the disk interface to use for attaching this + // disk, either SCSI or NVME. The default is SCSI. For performance + // characteristics of SCSI over NVMe, see Local SSD performance. + // + // Possible values: + // "NVME" + // "SCSI" + Interface string `json:"interface,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#attachedDisk + // for attached disks. + Kind string `json:"kind,omitempty"` + + // Licenses: [Output Only] Any valid publicly visible licenses. + Licenses []string `json:"licenses,omitempty"` + + // Mode: The mode in which to attach this disk, either READ_WRITE or + // READ_ONLY. If not specified, the default is to attach the disk in + // READ_WRITE mode. + // + // Possible values: + // "READ_ONLY" + // "READ_WRITE" + Mode string `json:"mode,omitempty"` + + // Source: Specifies a valid partial or full URL to an existing + // Persistent Disk resource. This field is only applicable for + // persistent disks. + Source string `json:"source,omitempty"` + + // Type: Specifies the type of the disk, either SCRATCH or PERSISTENT. + // If not specified, the default is PERSISTENT. + // + // Possible values: + // "PERSISTENT" + // "SCRATCH" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AttachedDisk) MarshalJSON() ([]byte, error) { + type noMethod AttachedDisk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AttachedDiskInitializeParams: [Input Only] Specifies the parameters +// for a new disk that will be created alongside the new instance. Use +// initialization parameters to create boot disks or local SSDs attached +// to the new instance. +// +// This property is mutually exclusive with the source property; you can +// only define one or the other, but not both. +type AttachedDiskInitializeParams struct { + // DiskName: Specifies the disk name. If not specified, the default is + // to use the name of the instance. + DiskName string `json:"diskName,omitempty"` + + // DiskSizeGb: Specifies the size of the disk in base-2 GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // DiskType: Specifies the disk type to use to create the instance. If + // not specified, the default is pd-standard, specified using the full + // URL. For + // example: + // + // https://www.googleapis.com/compute/v1/projects/project/zones + // /zone/diskTypes/pd-standard + // + // Other values include pd-ssd and local-ssd. If you define this field, + // you can provide either the full or partial URL. For example, the + // following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType + // - projects/project/zones/zone/diskTypes/diskType + // - zones/zone/diskTypes/diskType + DiskType string `json:"diskType,omitempty"` + + // SourceImage: A source image used to create the disk. You can provide + // a private (custom) image, and Compute Engine will use the + // corresponding image from your project. For + // example: + // + // global/images/my-private-image + // + // Or you can provide an image from a publicly-available project. For + // example, to use a Debian image from the debian-cloud project, make + // sure to include the project in the + // URL: + // + // projects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD + // + // where vYYYYMMDD is the image version. The fully-qualified URL will + // also work in both cases. + SourceImage string `json:"sourceImage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { + type noMethod AttachedDiskInitializeParams + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type Autoscaler struct { + // AutoscalingPolicy: Autoscaling configuration. + AutoscalingPolicy *AutoscalingPolicy `json:"autoscalingPolicy,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: Type of the resource. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Target: URL of Instance Group Manager or Replica Pool which will be + // controlled by Autoscaler. + Target string `json:"target,omitempty"` + + // Zone: [Output Only] URL of the zone where the instance group resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AutoscalingPolicy") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Autoscaler) MarshalJSON() ([]byte, error) { + type noMethod Autoscaler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AutoscalerAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped autoscaler lists. + Items map[string]AutoscalersScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalerList: Contains a list of persistent autoscaler resources. +type AutoscalerList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of Autoscaler resources. + Items []*Autoscaler `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalerList) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AutoscalersScopedList struct { + // Autoscalers: List of autoscalers contained in this scope. + Autoscalers []*Autoscaler `json:"autoscalers,omitempty"` + + // Warning: Informational warning which replaces the list of autoscalers + // when the list is empty. + Warning *AutoscalersScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Autoscalers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { + type noMethod AutoscalersScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalersScopedListWarning: Informational warning which replaces +// the list of autoscalers when the list is empty. +type AutoscalersScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalersScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalersScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AutoscalersScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalersScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicy: Cloud Autoscaler policy. +type AutoscalingPolicy struct { + // CoolDownPeriodSec: The number of seconds that the Autoscaler should + // wait between two succeeding changes to the number of virtual + // machines. You should define an interval that is at least as long as + // the initialization time of a virtual machine and the time it may take + // for replica pool to create the virtual machine. The default is 60 + // seconds. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: TODO(jbartosik): Add support for scaling based on + // muliple utilization metrics (take max recommendation). Exactly one + // utilization policy should be provided. Configuration parameters of + // CPU based autoscaling policy. + CpuUtilization *AutoscalingPolicyCpuUtilization `json:"cpuUtilization,omitempty"` + + // CustomMetricUtilizations: Configuration parameters of autoscaling + // based on custom metric. + CustomMetricUtilizations []*AutoscalingPolicyCustomMetricUtilization `json:"customMetricUtilizations,omitempty"` + + // LoadBalancingUtilization: Configuration parameters of autoscaling + // based on load balancer. + LoadBalancingUtilization *AutoscalingPolicyLoadBalancingUtilization `json:"loadBalancingUtilization,omitempty"` + + // MaxNumReplicas: The maximum number of replicas that the Autoscaler + // can scale up to. This field is required for config to be effective. + // Maximum number of replicas should be not lower than minimal number of + // replicas. Absolute limit for this value is defined in Autoscaler + // backend. + MaxNumReplicas int64 `json:"maxNumReplicas,omitempty"` + + // MinNumReplicas: The minimum number of replicas that the Autoscaler + // can scale down to. Can't be less than 0. If not provided Autoscaler + // will choose default value depending on maximal number of replicas. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CoolDownPeriodSec") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicyCpuUtilization: CPU utilization policy. +type AutoscalingPolicyCpuUtilization struct { + // UtilizationTarget: The target utilization that the Autoscaler should + // maintain. It is represented as a fraction of used cores. For example: + // 6 cores used in 8-core VM are represented here as 0.75. Must be a + // float value between (0, 1]. If not defined, the default is 0.8. + UtilizationTarget float64 `json:"utilizationTarget,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UtilizationTarget") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicyCpuUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicyCustomMetricUtilization: Custom utilization metric +// policy. +type AutoscalingPolicyCustomMetricUtilization struct { + // Metric: Identifier of the metric. It should be a Cloud Monitoring + // metric. The metric can not have negative values. The metric should be + // an utilization metric (increasing number of VMs handling requests x + // times should reduce average value of the metric roughly x times). For + // example you could use: + // compute.googleapis.com/instance/network/received_bytes_count. + Metric string `json:"metric,omitempty"` + + // UtilizationTarget: Target value of the metric which Autoscaler should + // maintain. Must be a positive value. + UtilizationTarget float64 `json:"utilizationTarget,omitempty"` + + // UtilizationTargetType: Defines type in which utilization_target is + // expressed. + // + // Possible values: + // "DELTA_PER_MINUTE" + // "DELTA_PER_SECOND" + // "GAUGE" + UtilizationTargetType string `json:"utilizationTargetType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Metric") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicyCustomMetricUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicyLoadBalancingUtilization: Load balancing utilization +// policy. +type AutoscalingPolicyLoadBalancingUtilization struct { + // UtilizationTarget: Fraction of backend capacity utilization (set in + // HTTP load balancing configuration) that Autoscaler should maintain. + // Must be a positive float value. If not defined, the default is 0.8. + // For example if your maxRatePerInstance capacity (in HTTP Load + // Balancing configuration) is set at 10 and you would like to keep + // number of instances such that each instance receives 7 QPS on + // average, set this to 0.7. + UtilizationTarget float64 `json:"utilizationTarget,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UtilizationTarget") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicyLoadBalancingUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Backend: Message containing information of one individual backend. +type Backend struct { + // BalancingMode: Specifies the balancing mode for this backend. The + // default is UTILIZATION but available values are UTILIZATION and RATE. + // + // Possible values: + // "RATE" + // "UTILIZATION" + BalancingMode string `json:"balancingMode,omitempty"` + + // CapacityScaler: A multiplier applied to the group's maximum servicing + // capacity (either UTILIZATION or RATE). Default value is 1, which + // means the group will serve up to 100% of its configured CPU or RPS + // (depending on balancingMode). A setting of 0 means the group is + // completely drained, offering 0% of its available CPU or RPS. Valid + // range is [0.0,1.0]. + CapacityScaler float64 `json:"capacityScaler,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Group: The fully-qualified URL of a zonal Instance Group resource. + // This instance group defines the list of instances that serve traffic. + // Member virtual machine instances from each instance group must live + // in the same zone as the instance group itself. No two backends in a + // backend service are allowed to use same Instance Group + // resource. + // + // Note that you must specify an Instance Group resource using the + // fully-qualified URL, rather than a partial URL. + Group string `json:"group,omitempty"` + + // MaxRate: The max RPS of the group. Can be used with either balancing + // mode, but required if RATE mode. For RATE mode, either maxRate or + // maxRatePerInstance must be set. + MaxRate int64 `json:"maxRate,omitempty"` + + // MaxRatePerInstance: The max RPS that a single backed instance can + // handle. This is used to calculate the capacity of the group. Can be + // used in either balancing mode. For RATE mode, either maxRate or + // maxRatePerInstance must be set. + MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` + + // MaxUtilization: Used when balancingMode is UTILIZATION. This ratio + // defines the CPU utilization target for the group. The default is 0.8. + // Valid range is [0.0, 1.0]. + MaxUtilization float64 `json:"maxUtilization,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BalancingMode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Backend) MarshalJSON() ([]byte, error) { + type noMethod Backend + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BackendService: A BackendService resource. This resource defines a +// group of backend virtual machines together with their serving +// capacity. +type BackendService struct { + // Backends: The list of backends that serve this BackendService. + Backends []*Backend `json:"backends,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a BackendService. An up-to-date + // fingerprint must be provided in order to update the BackendService. + Fingerprint string `json:"fingerprint,omitempty"` + + // HealthChecks: The list of URLs to the HttpHealthCheck or + // HttpsHealthCheck resource for health checking this BackendService. + // Currently at most one health check can be specified, and a health + // check is required. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#backendService + // for backend services. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Port: Deprecated in favor of port name. The TCP port to connect on + // the backend. The default value is 80. + Port int64 `json:"port,omitempty"` + + // PortName: Name of backend port. The same name should appear in the + // resource views referenced by this service. Required. + PortName string `json:"portName,omitempty"` + + // Possible values: + // "HTTP" + // "HTTPS" + Protocol string `json:"protocol,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // TimeoutSec: How many seconds to wait for the backend before + // considering it a failed request. Default is 30 seconds. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Backends") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackendService) MarshalJSON() ([]byte, error) { + type noMethod BackendService + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type BackendServiceGroupHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#backendServiceGroupHealth for the health of backend services. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceGroupHealth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BackendServiceList: Contains a list of BackendService resources. +type BackendServiceList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of BackendService resources. + Items []*BackendService `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#backendServiceList for lists of backend services. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackendServiceList) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DeprecationStatus: Deprecation status for a public resource. +type DeprecationStatus struct { + // Deleted: An optional RFC3339 timestamp on or after which the + // deprecation state of this resource will be changed to DELETED. + Deleted string `json:"deleted,omitempty"` + + // Deprecated: An optional RFC3339 timestamp on or after which the + // deprecation state of this resource will be changed to DEPRECATED. + Deprecated string `json:"deprecated,omitempty"` + + // Obsolete: An optional RFC3339 timestamp on or after which the + // deprecation state of this resource will be changed to OBSOLETE. + Obsolete string `json:"obsolete,omitempty"` + + // Replacement: The URL of the suggested replacement for a deprecated + // resource. The suggested replacement resource must be the same kind of + // resource as the deprecated resource. + Replacement string `json:"replacement,omitempty"` + + // State: The deprecation state of this resource. This can be + // DEPRECATED, OBSOLETE, or DELETED. Operations which create a new + // resource using a DEPRECATED resource will return successfully, but + // with a warning indicating the deprecated resource and recommending + // its replacement. Operations which use OBSOLETE or DELETED resources + // will be rejected and result in an error. + // + // Possible values: + // "DELETED" + // "DEPRECATED" + // "OBSOLETE" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Deleted") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { + type noMethod DeprecationStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Disk: A Disk resource. +type Disk struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#disk for + // disks. + Kind string `json:"kind,omitempty"` + + // LastAttachTimestamp: [Output Only] Last attach timestamp in RFC3339 + // text format. + LastAttachTimestamp string `json:"lastAttachTimestamp,omitempty"` + + // LastDetachTimestamp: [Output Only] Last detach timestamp in RFC3339 + // text format. + LastDetachTimestamp string `json:"lastDetachTimestamp,omitempty"` + + // Licenses: Any applicable publicly visible licenses. + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Options: Internal use only. + Options string `json:"options,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // SizeGb: Size of the persistent disk, specified in GB. You can specify + // this field when creating a persistent disk using the sourceImage or + // sourceSnapshot parameter, or specify it alone to create an empty + // persistent disk. + // + // If you specify this field along with sourceImage or sourceSnapshot, + // the value of sizeGb must not be less than the size of the sourceImage + // or the size of the snapshot. + SizeGb int64 `json:"sizeGb,omitempty,string"` + + // SourceImage: The source image used to create this disk. If the source + // image is deleted from the system, this field will not be set, even if + // an image with the same name has been re-created. + // + // When creating a disk, you can provide a private (custom) image using + // the following input, and Compute Engine will use the corresponding + // image from your project. For example: + // + // global/images/my-private-image + // + // Or you can provide an image from a publicly-available project. For + // example, to use a Debian image from the debian-cloud project, make + // sure to include the project in the + // URL: + // + // projects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD + // + // where vYYYYMMDD is the image version. The fully-qualified URL will + // also work in both cases. + SourceImage string `json:"sourceImage,omitempty"` + + // SourceImageId: The ID value of the image used to create this disk. + // This value identifies the exact image that was used to create this + // persistent disk. For example, if you created the persistent disk from + // an image that was later deleted and recreated under the same name, + // the source image ID would identify the exact version of the image + // that was used. + SourceImageId string `json:"sourceImageId,omitempty"` + + // SourceSnapshot: The source snapshot used to create this disk. You can + // provide this as a partial or full URL to the resource. For example, + // the following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot + // - projects/project/global/snapshots/snapshot + // - global/snapshots/snapshot + SourceSnapshot string `json:"sourceSnapshot,omitempty"` + + // SourceSnapshotId: [Output Only] The unique ID of the snapshot used to + // create this disk. This value identifies the exact snapshot that was + // used to create this persistent disk. For example, if you created the + // persistent disk from a snapshot that was later deleted and recreated + // under the same name, the source snapshot ID would identify the exact + // version of the snapshot that was used. + SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` + + // Status: [Output Only] The status of disk creation. Applicable + // statuses includes: CREATING, FAILED, READY, RESTORING. + // + // Possible values: + // "CREATING" + // "FAILED" + // "READY" + // "RESTORING" + Status string `json:"status,omitempty"` + + // Type: URL of the disk type resource describing which disk type to use + // to create the disk; provided by the client when the disk is created. + Type string `json:"type,omitempty"` + + // Users: Links to the users of the disk (attached instances) in form: + // project/zones/zone/instances/instance + Users []string `json:"users,omitempty"` + + // Zone: [Output Only] URL of the zone where the disk resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Disk) MarshalJSON() ([]byte, error) { + type noMethod Disk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped disk lists. + Items map[string]DisksScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#diskAggregatedList for aggregated lists of persistent disks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskList: A list of Disk resources. +type DiskList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of persistent disks. + Items []*Disk `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#diskList for + // lists of disks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskList) MarshalJSON() ([]byte, error) { + type noMethod DiskList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskMoveRequest struct { + // DestinationZone: The URL of the destination zone to move the disk to. + // This can be a full or partial URL. For example, the following are all + // valid URLs to a zone: + // - https://www.googleapis.com/compute/v1/projects/project/zones/zone + // + // - projects/project/zones/zone + // - zones/zone + DestinationZone string `json:"destinationZone,omitempty"` + + // TargetDisk: The URL of the target disk to move. This can be a full or + // partial URL. For example, the following are all valid URLs to a disk: + // + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk + // - projects/project/zones/zone/disks/disk + // - zones/zone/disks/disk + TargetDisk string `json:"targetDisk,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationZone") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { + type noMethod DiskMoveRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskType: A disk type resource. +type DiskType struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultDiskSizeGb: [Output Only] Server-defined default disk size in + // GB. + DefaultDiskSizeGb int64 `json:"defaultDiskSizeGb,omitempty,string"` + + // Deprecated: [Output Only] The deprecation status associated with this + // disk type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] An optional description of this resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#diskType for + // disk types. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ValidDiskSize: [Output Only] An optional textual description of the + // valid disk size, such as "10GB-10TB". + ValidDiskSize string `json:"validDiskSize,omitempty"` + + // Zone: [Output Only] URL of the zone where the disk type resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskType) MarshalJSON() ([]byte, error) { + type noMethod DiskType + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskTypeAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped disk type lists. + Items map[string]DiskTypesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#diskTypeAggregatedList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskTypeList: Contains a list of disk type resources. +type DiskTypeList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Disk Type resources. + Items []*DiskType `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#diskTypeList for + // disk types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypeList) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskTypesScopedList struct { + // DiskTypes: [Output Only] List of disk types contained in this scope. + DiskTypes []*DiskType `json:"diskTypes,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of disk types when the list is empty. + Warning *DiskTypesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { + type noMethod DiskTypesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskTypesScopedListWarning: [Output Only] Informational warning which +// replaces the list of disk types when the list is empty. +type DiskTypesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskTypesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DisksScopedList struct { + // Disks: [Output Only] List of disks contained in this scope. + Disks []*Disk `json:"disks,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of disks when the list is empty. + Warning *DisksScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksScopedList) MarshalJSON() ([]byte, error) { + type noMethod DisksScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DisksScopedListWarning: [Output Only] Informational warning which +// replaces the list of disks when the list is empty. +type DisksScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DisksScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DisksScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DisksScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DisksScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Firewall: A Firewall resource. +type Firewall struct { + // Allowed: The list of rules specified by this firewall. Each rule + // specifies a protocol and port-range tuple that describes a permitted + // connection. + Allowed []*FirewallAllowed `json:"allowed,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Ony] Type of the resource. Always compute#firewall for + // firewall rules. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network resource for this firewall rule. This + // field is required for creating an instance but optional when creating + // a firewall rule. If not specified when creating a firewall rule, the + // default network is used: + // global/networks/default + // If you choose to specify this property, you can specify the network + // as a full or partial URL. For example, the following are all valid + // URLs: + // - + // https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network + // - projects/myproject/global/networks/my-network + // - global/networks/default + Network string `json:"network,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SourceRanges: The IP address blocks that this rule applies to, + // expressed in CIDR format. One or both of sourceRanges and sourceTags + // may be set. + // + // If both properties are set, an inbound connection is allowed if the + // range matches the sourceRanges OR the tag of the source matches the + // sourceTags property. The connection does not need to match both + // properties. + SourceRanges []string `json:"sourceRanges,omitempty"` + + // SourceTags: A list of instance tags which this rule applies to. One + // or both of sourceRanges and sourceTags may be set. + // + // If both properties are set, an inbound connection is allowed if the + // range matches the sourceRanges OR the tag of the source matches the + // sourceTags property. The connection does not need to match both + // properties. + SourceTags []string `json:"sourceTags,omitempty"` + + // TargetTags: A list of instance tags indicating sets of instances + // located in the network that may make network connections as specified + // in allowed[]. If no targetTags are specified, the firewall rule + // applies to all instances on the specified network. + TargetTags []string `json:"targetTags,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Allowed") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Firewall) MarshalJSON() ([]byte, error) { + type noMethod Firewall + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type FirewallAllowed struct { + // IPProtocol: The IP protocol that is allowed for this rule. The + // protocol type is required when creating a firewall. This value can + // either be one of the following well known protocol strings (tcp, udp, + // icmp, esp, ah, sctp), or the IP protocol number. + IPProtocol string `json:"IPProtocol,omitempty"` + + // Ports: An optional list of ports which are allowed. This field is + // only applicable for UDP or TCP protocol. Each entry must be either an + // integer or a range. If not specified, connections through any port + // are allowed + // + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. + Ports []string `json:"ports,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IPProtocol") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { + type noMethod FirewallAllowed + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FirewallList: Contains a list of Firewall resources. +type FirewallList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Firewall resources. + Items []*Firewall `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#firewallList for + // lists of firewalls. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FirewallList) MarshalJSON() ([]byte, error) { + type noMethod FirewallList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ForwardingRule: A ForwardingRule resource. A ForwardingRule resource +// specifies which pool of target virtual machines to forward a packet +// to if it matches the given [IPAddress, IPProtocol, portRange] tuple. +type ForwardingRule struct { + // IPAddress: Value of the reserved IP address that this forwarding rule + // is serving on behalf of. For global forwarding rules, the address + // must be a global IP; for regional forwarding rules, the address must + // live in the same region as the forwarding rule. If left empty + // (default value), an ephemeral IP from the same scope (global or + // regional) will be assigned. + IPAddress string `json:"IPAddress,omitempty"` + + // IPProtocol: The IP protocol to which this rule applies. Valid options + // are TCP, UDP, ESP, AH or SCTP. + // + // Possible values: + // "AH" + // "ESP" + // "SCTP" + // "TCP" + // "UDP" + IPProtocol string `json:"IPProtocol,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#forwardingRule for Forwarding Rule resources. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PortRange: Applicable only when IPProtocol is TCP, UDP, or SCTP, only + // packets addressed to ports in the specified range will be forwarded + // to target. Forwarding rules with the same [IPAddress, IPProtocol] + // pair must have disjoint port ranges. + PortRange string `json:"portRange,omitempty"` + + // Region: [Output Only] URL of the region where the regional forwarding + // rule resides. This field is not applicable to global forwarding + // rules. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Target: The URL of the target resource to receive the matched + // traffic. For regional forwarding rules, this target must live in the + // same region as the forwarding rule. For global forwarding rules, this + // target must be a global TargetHttpProxy or TargetHttpsProxy resource. + Target string `json:"target,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "IPAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRule) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ForwardingRuleAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped forwarding rule lists. + Items map[string]ForwardingRulesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ForwardingRuleList: Contains a list of ForwardingRule resources. +type ForwardingRuleList struct { + // Id: [Output Only] Unique identifier for the resource. Set by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of ForwardingRule resources. + Items []*ForwardingRule `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ForwardingRulesScopedList struct { + // ForwardingRules: List of forwarding rules contained in this scope. + ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` + + // Warning: Informational warning which replaces the list of forwarding + // rules when the list is empty. + Warning *ForwardingRulesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForwardingRules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRulesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ForwardingRulesScopedListWarning: Informational warning which +// replaces the list of forwarding rules when the list is empty. +type ForwardingRulesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRulesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRulesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ForwardingRulesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRulesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type HealthCheckReference struct { + HealthCheck string `json:"healthCheck,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthCheck") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type HealthStatus struct { + // HealthState: Health state of the instance. + // + // Possible values: + // "HEALTHY" + // "UNHEALTHY" + HealthState string `json:"healthState,omitempty"` + + // Instance: URL of the instance resource. + Instance string `json:"instance,omitempty"` + + // IpAddress: The IP address represented by this resource. + IpAddress string `json:"ipAddress,omitempty"` + + // Port: The port on the instance. + Port int64 `json:"port,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthState") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HealthStatus) MarshalJSON() ([]byte, error) { + type noMethod HealthStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HostRule: UrlMaps A host-matching rule for a URL. If matched, will +// use the named PathMatcher to select the BackendService. +type HostRule struct { + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Hosts: The list of host patterns to match. They must be valid + // hostnames, except * will match any string of ([a-z0-9-.]*). In that + // case, * must be the first character and must be followed in the + // pattern by either - or .. + Hosts []string `json:"hosts,omitempty"` + + // PathMatcher: The name of the PathMatcher to use to match the path + // portion of the URL if the hostRule matches the URL's host portion. + PathMatcher string `json:"pathMatcher,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HostRule) MarshalJSON() ([]byte, error) { + type noMethod HostRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpHealthCheck: An HttpHealthCheck resource. This resource defines a +// template for how individual instances should be checked for health, +// via HTTP. +type HttpHealthCheck struct { + // CheckIntervalSec: How often (in seconds) to send a health check. The + // default value is 5 seconds. + CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // HealthyThreshold: A so-far unhealthy instance will be marked healthy + // after this many consecutive successes. The default value is 2. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: The value of the host header in the HTTP health check request. + // If left empty (default value), the public IP on behalf of which this + // health check is performed will be used. + Host string `json:"host,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: Type of the resource. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Port: The TCP port number for the HTTP health check request. The + // default value is 80. + Port int64 `json:"port,omitempty"` + + // RequestPath: The request path of the HTTP health check request. The + // default value is "/". + RequestPath string `json:"requestPath,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // TimeoutSec: How long (in seconds) to wait before claiming failure. + // The default value is 5 seconds. It is invalid for timeoutSec to have + // greater value than checkIntervalSec. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // UnhealthyThreshold: A so-far healthy instance will be marked + // unhealthy after this many consecutive failures. The default value is + // 2. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpHealthCheckList: Contains a list of HttpHealthCheck resources. +type HttpHealthCheckList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of HttpHealthCheck resources. + Items []*HttpHealthCheck `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines +// a template for how individual instances should be checked for health, +// via HTTPS. +type HttpsHealthCheck struct { + // CheckIntervalSec: How often (in seconds) to send a health check. The + // default value is 5 seconds. + CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // HealthyThreshold: A so-far unhealthy instance will be marked healthy + // after this many consecutive successes. The default value is 2. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: The value of the host header in the HTTPS health check request. + // If left empty (default value), the public IP on behalf of which this + // health check is performed will be used. + Host string `json:"host,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: Type of the resource. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Port: The TCP port number for the HTTPS health check request. The + // default value is 443. + Port int64 `json:"port,omitempty"` + + // RequestPath: The request path of the HTTPS health check request. The + // default value is "/". + RequestPath string `json:"requestPath,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // TimeoutSec: How long (in seconds) to wait before claiming failure. + // The default value is 5 seconds. It is invalid for timeoutSec to have + // a greater value than checkIntervalSec. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // UnhealthyThreshold: A so-far healthy instance will be marked + // unhealthy after this many consecutive failures. The default value is + // 2. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpsHealthCheckList: Contains a list of HttpsHealthCheck resources. +type HttpsHealthCheckList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of HttpsHealthCheck resources. + Items []*HttpsHealthCheck `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Image: An Image resource. +type Image struct { + // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google + // Cloud Storage (in bytes). + ArchiveSizeBytes int64 `json:"archiveSizeBytes,omitempty,string"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: The deprecation status associated with this image. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DiskSizeGb: Size of the image when restored onto a persistent disk + // (in GB). + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#image for + // images. + Kind string `json:"kind,omitempty"` + + // Licenses: Any applicable publicly visible licenses. + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // RawDisk: The parameters of the raw disk image. + RawDisk *ImageRawDisk `json:"rawDisk,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SourceDisk: URL of the The source disk used to create this image. + // This can be a full or valid partial URL. You must provide either this + // property or the rawDisk.source property but not both to create an + // image. For example, the following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk + // - projects/project/zones/zone/disk/disk + // - zones/zone/disks/disk + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: The ID value of the disk used to create this image. + // This value may be used to determine whether the image was taken from + // the current or a previous instance of a given disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // SourceType: The type of the image used to create this disk. The + // default and only value is RAW + // + // Possible values: + // "RAW" (default) + SourceType string `json:"sourceType,omitempty"` + + // Status: [Output Only] The status of the image. An image can be used + // to create other resources, such as instances, only after the image + // has been successfully created and the status is set to READY. + // Possible values are FAILED, PENDING, or READY. + // + // Possible values: + // "FAILED" + // "PENDING" + // "READY" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ArchiveSizeBytes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ImageRawDisk: The parameters of the raw disk image. +type ImageRawDisk struct { + // ContainerType: The format used to encode and transmit the block + // device, which should be TAR. This is just a container and + // transmission format and not a runtime format. Provided by the client + // when the disk image is created. + // + // Possible values: + // "TAR" + ContainerType string `json:"containerType,omitempty"` + + // Sha1Checksum: An optional SHA1 checksum of the disk image before + // unpackaging; provided by the client when the disk image is created. + Sha1Checksum string `json:"sha1Checksum,omitempty"` + + // Source: The full Google Cloud Storage URL where the disk image is + // stored. You must provide either this property or the sourceDisk + // property but not both. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContainerType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { + type noMethod ImageRawDisk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ImageList: Contains a list of Image resources. +type ImageList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Image resources. + Items []*Image `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ImageList) MarshalJSON() ([]byte, error) { + type noMethod ImageList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Instance: An Instance resource. +type Instance struct { + // CanIpForward: Allows this instance to send and receive packets with + // non-matching destination or source IPs. This is required if you plan + // to use this instance to forward routes. For more information, see + // Enabling IP Forwarding. + CanIpForward bool `json:"canIpForward,omitempty"` + + // CpuPlatform: [Output Only] The CPU platform used by this instance. + CpuPlatform string `json:"cpuPlatform,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional textual description of the resource; + // provided by the client when the resource is created. + Description string `json:"description,omitempty"` + + // Disks: Array of disks associated with this instance. Persistent disks + // must be created before you can assign them. + Disks []*AttachedDisk `json:"disks,omitempty"` + + // Id: [Output Only] Unique identifier for the resource. This identifier + // is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#instance for + // instances. + Kind string `json:"kind,omitempty"` + + // MachineType: Full or partial URL of the machine type resource to use + // for this instance, in the format: zones/zone/machineTypes/ + // machine-type. This is provided by the client when the instance is + // created. For example, the following is a valid partial url to a + // predefined machine + // type: + // + // zones/us-central1-f/machineTypes/n1-standard-1 + // + // To create a custom machine type, provide a URL to a machine type in + // the following format, where CPUS is 1 or an even number up to 32 (2, + // 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. + // Memory must be a multiple of 256 MB and must be supplied in MB (e.g. + // 5 GB of memory is 5120 + // MB): + // + // zones/zone/machineTypes/custom-CPUS-MEMORY + // + // For example: zones/us-central1-f/machineTypes/custom-4-5120 + // + // For a full list of restrictions, read the Specifications for custom + // machine types. + MachineType string `json:"machineType,omitempty"` + + // Metadata: The metadata key/value pairs assigned to this instance. + // This includes custom metadata and predefined keys. + Metadata *Metadata `json:"metadata,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NetworkInterfaces: An array of configurations for this interface. + // This specifies how this interface is configured to interact with + // other network services, such as connecting to the internet. + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + + // Scheduling: Scheduling options for this instance. + Scheduling *Scheduling `json:"scheduling,omitempty"` + + // SelfLink: [Output Only] Server defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServiceAccounts: A list of service accounts, with their specified + // scopes, authorized for this instance. Service accounts generate + // access tokens that can be accessed through the metadata server and + // used to authenticate applications on the instance. See Authenticating + // from Google Compute Engine for more information. + ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + + // Status: [Output Only] The status of the instance. One of the + // following values: PROVISIONING, STAGING, RUNNING, STOPPING, and + // TERMINATED. + // + // Possible values: + // "PROVISIONING" + // "RUNNING" + // "STAGING" + // "STOPPED" + // "STOPPING" + // "SUSPENDED" + // "SUSPENDING" + // "TERMINATED" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output Only] An optional, human-readable explanation + // of the status. + StatusMessage string `json:"statusMessage,omitempty"` + + // Tags: A list of tags to appy to this instance. Tags are used to + // identify valid sources or targets for network firewalls and are + // specified by the client during instance creation. The tags can be + // later modified by the setTags method. Each tag within the list must + // comply with RFC1035. + Tags *Tags `json:"tags,omitempty"` + + // Zone: [Output Only] URL of the zone where the instance resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Instance) MarshalJSON() ([]byte, error) { + type noMethod Instance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped instance lists. + Items map[string]InstancesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#instanceAggregatedList for aggregated lists of Instance + // resources. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroup struct { + // CreationTimestamp: [Output Only] The creation timestamp for this + // instance group in RFC3339 text format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: [Output Only] The fingerprint of the named ports. The + // system uses this fingerprint to detect conflicts when multiple users + // change the named ports concurrently. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] A unique identifier for this resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroup for instance groups. + Kind string `json:"kind,omitempty"` + + // Name: The name of the instance group. The name must be 1-63 + // characters long, and comply with RFC1035. + Name string `json:"name,omitempty"` + + // NamedPorts: Assigns a name to a port number. For example: {name: + // "http", port: 80} + // + // This allows the system to reference ports by the assigned name + // instead of a port number. Named ports can also contain multiple + // ports. For example: [{name: "http", port: 80},{name: "http", port: + // 8080}] + // + // Named ports apply to all instances in this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // Network: [Output Only] The URL of the network to which all instances + // in the instance group belong. + Network string `json:"network,omitempty"` + + // SelfLink: [Output Only] The URL for this instance group. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // Size: [Output Only] The total number of instances in the instance + // group. + Size int64 `json:"size,omitempty"` + + // Zone: [Output Only] The URL of the zone where the instance group is + // located. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroup) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroup + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupAggregatedList struct { + // Id: [Output Only] A unique identifier for this aggregated list of + // instance groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: A map of scoped instance group lists. + Items map[string]InstanceGroupsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupAggregatedList for aggregated lists of instance + // groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupList: A list of InstanceGroup resources. +type InstanceGroupList struct { + // Id: [Output Only] A unique identifier for this list of instance + // groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: A list of instance groups. + Items []*InstanceGroup `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupList for instance group lists. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupManager: InstanceGroupManagers +// +// Next available tag: 20 +type InstanceGroupManager struct { + // BaseInstanceName: The base instance name to use for instances in this + // group. The value must be 1-58 characters long. Instances are named by + // appending a hyphen and a random four-character string to the base + // instance name. The base instance name must comply with RFC1035. + BaseInstanceName string `json:"baseInstanceName,omitempty"` + + // CreationTimestamp: [Output Only] The creation timestamp for this + // managed instance group in RFC3339 text format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CurrentActions: [Output Only] The list of instance actions and the + // number of instances in this managed instance group that are scheduled + // for each of those actions. + CurrentActions *InstanceGroupManagerActionsSummary `json:"currentActions,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: [Output Only] The fingerprint of the target pools + // information. You can use this optional field for optimistic locking + // when you update the target pool entries. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] A unique identifier for this resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + + // InstanceGroup: [Output Only] The URL of the Instance Group resource. + InstanceGroup string `json:"instanceGroup,omitempty"` + + // InstanceTemplate: The URL of the instance template that is specified + // for this managed instance group. The group uses this template to + // create all new instances in the managed instance group. + InstanceTemplate string `json:"instanceTemplate,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManager for managed instance groups. + Kind string `json:"kind,omitempty"` + + // Name: The name of the managed instance group. The name must be 1-63 + // characters long, and comply with RFC1035. + Name string `json:"name,omitempty"` + + // NamedPorts: Named ports configured for the Instance Groups + // complementary to this Instance Group Manager. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // SelfLink: [Output Only] The URL for this managed instance group. The + // server defines this URL. + SelfLink string `json:"selfLink,omitempty"` + + // TargetPools: The URLs for all TargetPool resources to which instances + // in the instanceGroup field are added. The target pools automatically + // apply to all of the instances in the managed instance group. + TargetPools []string `json:"targetPools,omitempty"` + + // TargetSize: The target number of running instances for this managed + // instance group. Deleting or abandoning instances reduces this number. + // Resizing the group changes this number. + TargetSize int64 `json:"targetSize,omitempty"` + + // Zone: The name of the zone where the managed instance group is + // located. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BaseInstanceName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManager + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagerActionsSummary struct { + // Abandoning: [Output Only] The total number of instances in the + // managed instance group that are scheduled to be abandoned. Abandoning + // an instance removes it from the managed instance group without + // deleting it. + Abandoning int64 `json:"abandoning,omitempty"` + + // Creating: [Output Only] The number of instances in the managed + // instance group that are scheduled to be created or are currently + // being created. + Creating int64 `json:"creating,omitempty"` + + // Deleting: [Output Only] The number of instances in the managed + // instance group that are scheduled to be deleted or are currently + // being deleted. + Deleting int64 `json:"deleting,omitempty"` + + // None: [Output Only] The number of instances in the managed instance + // group that are running and have no scheduled actions. + None int64 `json:"none,omitempty"` + + // Recreating: [Output Only] The number of instances in the managed + // instance group that are scheduled to be recreated or are currently + // being being recreated. Recreating an instance deletes the existing + // root persistent disk and creates a new disk from the image that is + // defined in the instance template. + Recreating int64 `json:"recreating,omitempty"` + + // Refreshing: [Output Only] The number of instances in the managed + // instance group that are being reconfigured with properties that do + // not require a restart or a recreate action. For example, setting or + // removing target pools for the instance. + Refreshing int64 `json:"refreshing,omitempty"` + + // Restarting: [Output Only] The number of instances in the managed + // instance group that are scheduled to be restarted or are currently + // being restarted. + Restarting int64 `json:"restarting,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Abandoning") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerActionsSummary + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagerAggregatedList struct { + // Id: [Output Only] A unique identifier for this aggregated list of + // managed instance groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of filtered managed instance group lists. + Items map[string]InstanceGroupManagersScopedList `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerAggregatedList for an aggregated list of + // managed instance groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupManagerList: [Output Only] A list of managed instance +// groups. +type InstanceGroupManagerList struct { + // Id: [Output Only] A unique identifier for this resource type. The + // server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of managed instance groups. + Items []*InstanceGroupManager `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The URL for one or more instances to abandon from the + // managed instance group. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersAbandonInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The list of instances to delete from this managed instance + // group. Specify one or more instance URLs. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersDeleteInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersListManagedInstancesResponse struct { + // ManagedInstances: [Output Only] The list of instances in the managed + // instance group. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersListManagedInstancesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersRecreateInstancesRequest struct { + // Instances: The URL for one or more instances to recreate. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersRecreateInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersScopedList struct { + // InstanceGroupManagers: [Output Only] The list of managed instance + // groups that are contained in the specified project and zone. + InstanceGroupManagers []*InstanceGroupManager `json:"instanceGroupManagers,omitempty"` + + // Warning: [Output Only] The warning that replaces the list of managed + // instance groups when the list is empty. + Warning *InstanceGroupManagersScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InstanceGroupManagers") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupManagersScopedListWarning: [Output Only] The warning +// that replaces the list of managed instance groups when the list is +// empty. +type InstanceGroupManagersScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagersScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersSetInstanceTemplateRequest struct { + // InstanceTemplate: The URL of the instance template that is specified + // for this managed instance group. The group uses this template to + // create all new instances in the managed instance group. + InstanceTemplate string `json:"instanceTemplate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersSetInstanceTemplateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersSetTargetPoolsRequest struct { + // Fingerprint: The fingerprint of the target pools information. Use + // this optional property to prevent conflicts when multiple users + // change the target pools settings concurrently. Obtain the fingerprint + // with the instanceGroupManagers.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` + + // TargetPools: The list of target pool URLs that instances in this + // managed instance group belong to. The managed instance group applies + // these target pools to all of the instances in the group. Existing + // instances and new instances in the group all receive these target + // pool settings. + TargetPools []string `json:"targetPools,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersSetTargetPoolsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsAddInstancesRequest struct { + // Instances: The list of instances to add to the instance group. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsAddInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsListInstances struct { + // Id: [Output Only] A unique identifier for this list of instance + // groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of instances and any named ports that are + // assigned to those instances. + Items []*InstanceWithNamedPorts `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupsListInstances for lists of instance groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this list of instance groups. The + // server generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstances + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsListInstancesRequest struct { + // InstanceState: A filter for the state of the instances in the + // instance group. Valid options are ALL or RUNNING. If you do not + // specify this parameter the list includes all instances regardless of + // their state. + // + // Possible values: + // "ALL" + // "RUNNING" + InstanceState string `json:"instanceState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceState") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsRemoveInstancesRequest struct { + // Instances: The list of instances to remove from the instance group. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsRemoveInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsScopedList struct { + // InstanceGroups: [Output Only] The list of instance groups that are + // contained in this scope. + InstanceGroups []*InstanceGroup `json:"instanceGroups,omitempty"` + + // Warning: [Output Only] An informational warning that replaces the + // list of instance groups when the list is empty. + Warning *InstanceGroupsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceGroups") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupsScopedListWarning: [Output Only] An informational +// warning that replaces the list of instance groups when the list is +// empty. +type InstanceGroupsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsSetNamedPortsRequest struct { + // Fingerprint: The fingerprint of the named ports information for this + // instance group. Use this optional property to prevent conflicts when + // multiple users change the named ports settings concurrently. Obtain + // the fingerprint with the instanceGroups.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` + + // NamedPorts: The list of named ports to set for this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsSetNamedPortsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceList: Contains a list of instance resources. +type InstanceList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Instance resources. + Items []*Instance `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#instanceList for + // lists of Instance resources. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceList) MarshalJSON() ([]byte, error) { + type noMethod InstanceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceMoveRequest struct { + // DestinationZone: The URL of the destination zone to move the instance + // to. This can be a full or partial URL. For example, the following are + // all valid URLs to a zone: + // - https://www.googleapis.com/compute/v1/projects/project/zones/zone + // + // - projects/project/zones/zone + // - zones/zone + DestinationZone string `json:"destinationZone,omitempty"` + + // TargetInstance: The URL of the target instance to move. This can be a + // full or partial URL. For example, the following are all valid URLs to + // an instance: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + TargetInstance string `json:"targetInstance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationZone") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceMoveRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceProperties struct { + // CanIpForward: Enables instances created based on this template to + // send packets with source IP addresses other than their own and + // receive packets with destination IP addresses other than their own. + // If these instances will be used as an IP gateway or it will be set as + // the next-hop in a Route resource, specify true. If unsure, leave this + // set to false. See the canIpForward documentation for more + // information. + CanIpForward bool `json:"canIpForward,omitempty"` + + // Description: An optional text description for the instances that are + // created from this instance template. + Description string `json:"description,omitempty"` + + // Disks: An array of disks that are associated with the instances that + // are created from this template. + Disks []*AttachedDisk `json:"disks,omitempty"` + + // MachineType: The machine type to use for instances that are created + // from this template. + MachineType string `json:"machineType,omitempty"` + + // Metadata: The metadata key/value pairs to assign to instances that + // are created from this template. These pairs can consist of custom + // metadata or predefined keys. See Project and instance metadata for + // more information. + Metadata *Metadata `json:"metadata,omitempty"` + + // NetworkInterfaces: An array of network access configurations for this + // interface. + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + + // Scheduling: Specifies the scheduling options for the instances that + // are created from this template. + Scheduling *Scheduling `json:"scheduling,omitempty"` + + // ServiceAccounts: A list of service accounts with specified scopes. + // Access tokens for these service accounts are available to the + // instances that are created from this template. Use metadata queries + // to obtain the access tokens for these instances. + ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + + // Tags: A list of tags to apply to the instances that are created from + // this template. The tags identify valid sources or targets for network + // firewalls. The setTags method can modify this list of tags. Each tag + // within the list must comply with RFC1035. + Tags *Tags `json:"tags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceProperties) MarshalJSON() ([]byte, error) { + type noMethod InstanceProperties + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceReference struct { + // Instance: The URL for a specific instance. + Instance string `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceReference) MarshalJSON() ([]byte, error) { + type noMethod InstanceReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceTemplate: An Instance Template resource. +type InstanceTemplate struct { + // CreationTimestamp: [Output Only] The creation timestamp for this + // instance template in RFC3339 text format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] A unique identifier for this instance template. The + // server defines this identifier. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceTemplate for instance templates. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Properties: The instance properties for this instance template. + Properties *InstanceProperties `json:"properties,omitempty"` + + // SelfLink: [Output Only] The URL for this instance template. The + // server defines this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplate + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceTemplateList: A list of instance templates. +type InstanceTemplateList struct { + // Id: [Output Only] A unique identifier for this instance template. The + // server defines this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] list of InstanceTemplate resources. + Items []*InstanceTemplate `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceTemplatesListResponse for instance template lists. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this instance template list. The + // server defines this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceWithNamedPorts struct { + // Instance: [Output Only] The URL of the instance. + Instance string `json:"instance,omitempty"` + + // NamedPorts: [Output Only] The named ports that belong to this + // instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // Status: [Output Only] The status of the instance. + // + // Possible values: + // "PROVISIONING" + // "RUNNING" + // "STAGING" + // "STOPPED" + // "STOPPING" + // "SUSPENDED" + // "SUSPENDING" + // "TERMINATED" + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { + type noMethod InstanceWithNamedPorts + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstancesScopedList struct { + // Instances: [Output Only] List of instances contained in this scope. + Instances []*Instance `json:"instances,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of instances when the list is empty. + Warning *InstancesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { + type noMethod InstancesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesScopedListWarning: [Output Only] Informational warning which +// replaces the list of instances when the list is empty. +type InstancesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstancesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstancesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstancesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstancesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// License: A license resource. +type License struct { + // ChargesUseFee: [Output Only] If true, the customer will be charged + // license fee for running software that contains this license on an + // instance. + ChargesUseFee bool `json:"chargesUseFee,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#license for + // licenses. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. The name is 1-63 characters + // long and complies with RFC1035. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *License) MarshalJSON() ([]byte, error) { + type noMethod License + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MachineType: A Machine Type resource. +type MachineType struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // machine type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] An optional textual description of the + // resource. + Description string `json:"description,omitempty"` + + // GuestCpus: [Output Only] The number of virtual CPUs that are + // available to the instance. + GuestCpus int64 `json:"guestCpus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // ImageSpaceGb: [Deprecated] This property is deprecated and will never + // be populated with any relevant values. + ImageSpaceGb int64 `json:"imageSpaceGb,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#machineType for machine types. + Kind string `json:"kind,omitempty"` + + // MaximumPersistentDisks: [Output Only] Maximum persistent disks + // allowed. + MaximumPersistentDisks int64 `json:"maximumPersistentDisks,omitempty"` + + // MaximumPersistentDisksSizeGb: [Output Only] Maximum total persistent + // disks size (GB) allowed. + MaximumPersistentDisksSizeGb int64 `json:"maximumPersistentDisksSizeGb,omitempty,string"` + + // MemoryMb: [Output Only] The amount of physical memory available to + // the instance, defined in MB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // ScratchDisks: [Output Only] List of extended scratch disks assigned + // to the instance. + ScratchDisks []*MachineTypeScratchDisks `json:"scratchDisks,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] The name of the zone where the machine type + // resides, such as us-central1-a. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineType) MarshalJSON() ([]byte, error) { + type noMethod MachineType + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypeScratchDisks struct { + // DiskGb: Size of the scratch disk, defined in GB. + DiskGb int64 `json:"diskGb,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskGb") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypeScratchDisks) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeScratchDisks + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypeAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped machine type lists. + Items map[string]MachineTypesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#machineTypeAggregatedList for aggregated lists of machine + // types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MachineTypeList: Contains a list of Machine Type resources. +type MachineTypeList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Machine Type resources. + Items []*MachineType `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#machineTypeList + // for lists of machine types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypeList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypesScopedList struct { + // MachineTypes: [Output Only] List of machine types contained in this + // scope. + MachineTypes []*MachineType `json:"machineTypes,omitempty"` + + // Warning: [Output Only] An informational warning that appears when the + // machine types list is empty. + Warning *MachineTypesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MachineTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MachineTypesScopedListWarning: [Output Only] An informational warning +// that appears when the machine types list is empty. +type MachineTypesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedInstance struct { + // CurrentAction: [Output Only] The current action that the managed + // instance group has scheduled for the instance. Possible values: + // - NONE The instance is running, and the managed instance group does + // not have any scheduled actions for this instance. + // - CREATING The managed instance group is creating this instance. + // - RECREATING The managed instance group is recreating this instance. + // + // - DELETING The managed instance group is permanently deleting this + // instance. + // - ABANDONING The managed instance group is abandoning this instance. + // The instance will be removed from the instance group and from any + // target pools that are associated with this group. + // - RESTARTING The managed instance group is restarting the instance. + // + // - REFRESHING The managed instance group is applying configuration + // changes to the instance without stopping it. For example, the group + // can update the target pool list for an instance without stopping that + // instance. + // + // Possible values: + // "ABANDONING" + // "CREATING" + // "DELETING" + // "NONE" + // "RECREATING" + // "REFRESHING" + // "RESTARTING" + CurrentAction string `json:"currentAction,omitempty"` + + // Id: [Output only] The unique identifier for this resource. This field + // is empty when instance does not exist. + Id uint64 `json:"id,omitempty,string"` + + // Instance: [Output Only] The URL of the instance. The URL can exist + // even if the instance has not yet been created. + Instance string `json:"instance,omitempty"` + + // InstanceStatus: [Output Only] The status of the instance. This field + // is empty when the instance does not exist. + // + // Possible values: + // "PROVISIONING" + // "RUNNING" + // "STAGING" + // "STOPPED" + // "STOPPING" + // "SUSPENDED" + // "SUSPENDING" + // "TERMINATED" + InstanceStatus string `json:"instanceStatus,omitempty"` + + // LastAttempt: [Output Only] Information about the last attempt to + // create or delete the instance. + LastAttempt *ManagedInstanceLastAttempt `json:"lastAttempt,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstance) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedInstanceLastAttempt struct { + // Errors: [Output Only] Encountered errors during the last attempt to + // create or delete the instance. + Errors *ManagedInstanceLastAttemptErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttempt + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ManagedInstanceLastAttemptErrors: [Output Only] Encountered errors +// during the last attempt to create or delete the instance. +type ManagedInstanceLastAttemptErrors struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*ManagedInstanceLastAttemptErrorsErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttemptErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedInstanceLastAttemptErrorsErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` + + // Location: [Output Only] Indicates the field in the request which + // caused the error. This property is optional. + Location string `json:"location,omitempty"` + + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttemptErrorsErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Metadata: A metadata key/value entry. +type Metadata struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + Fingerprint string `json:"fingerprint,omitempty"` + + // Items: Array of key/value pairs. The total size of all keys and + // values must be less than 512 KB. + Items []*MetadataItems `json:"items,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#metadata for + // metadata. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Metadata) MarshalJSON() ([]byte, error) { + type noMethod Metadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MetadataItems struct { + // Key: Key for the metadata entry. Keys must conform to the following + // regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is + // reflected as part of a URL in the metadata server. Additionally, to + // avoid ambiguity, keys must not conflict with any other metadata keys + // for the project. + Key string `json:"key,omitempty"` + + // Value: Value for the metadata entry. These are free-form strings, and + // only have meaning as interpreted by the image running in the + // instance. The only restriction placed on values is that their size + // must be less than or equal to 32768 bytes. + Value *string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MetadataItems) MarshalJSON() ([]byte, error) { + type noMethod MetadataItems + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NamedPort: The named port. For example: . +type NamedPort struct { + // Name: The name for this named port. The name must be 1-63 characters + // long, and comply with RFC1035. + Name string `json:"name,omitempty"` + + // Port: The port number, which can be a value between 1 and 65535. + Port int64 `json:"port,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NamedPort) MarshalJSON() ([]byte, error) { + type noMethod NamedPort + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Network: A network resource. +type Network struct { + // IPv4Range: The range of internal addresses that are legal on this + // network. This range is a CIDR specification, for example: + // 192.168.0.0/16. Provided by the client when the network is created. + IPv4Range string `json:"IPv4Range,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // GatewayIPv4: A gateway address for default routing to other networks. + // This value is read only and is selected by the Google Compute Engine, + // typically as the first usable address in the IPv4Range. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#network for + // networks. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "IPv4Range") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Network) MarshalJSON() ([]byte, error) { + type noMethod Network + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NetworkInterface: A network interface resource attached to an +// instance. +type NetworkInterface struct { + // AccessConfigs: An array of configurations for this interface. + // Currently, ONE_TO_ONE_NAT is the only access config supported. If + // there are no accessConfigs specified, then this instance will have no + // external internet access. + AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` + + // Name: [Output Only] The name of the network interface, generated by + // the server. For network devices, these are eth0, eth1, etc. + Name string `json:"name,omitempty"` + + // Network: URL of the network resource for this instance. This is + // required for creating an instance but optional when creating a + // firewall rule. If not specified when creating a firewall rule, the + // default network is used: + // + // global/networks/default + // + // If you specify this property, you can specify the network as a full + // or partial URL. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/networks/network + // - projects/project/global/networks/network + // - global/networks/default + Network string `json:"network,omitempty"` + + // NetworkIP: [Output Only] An optional IPV4 internal network address + // assigned to the instance for this network interface. + NetworkIP string `json:"networkIP,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NetworkList: Contains a list of Network resources. +type NetworkList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Network resources. + Items []*Network `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#networkList for + // lists of networks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource . + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NetworkList) MarshalJSON() ([]byte, error) { + type noMethod NetworkList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Operation: An Operation resource, used to manage asynchronous API +// requests. +type Operation struct { + // ClientOperationId: [Output Only] A unique client ID generated by the + // server. + ClientOperationId string `json:"clientOperationId,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] A textual description of the operation, + // which is set when the operation is created. + Description string `json:"description,omitempty"` + + // EndTime: [Output Only] The time that this operation was completed. + // This value is in RFC3339 text format. + EndTime string `json:"endTime,omitempty"` + + // Error: [Output Only] If errors are generated during processing of the + // operation, this field will be populated. + Error *OperationError `json:"error,omitempty"` + + // HttpErrorMessage: [Output Only] If the operation fails, this field + // contains the HTTP error message that was returned, such as NOT FOUND. + HttpErrorMessage string `json:"httpErrorMessage,omitempty"` + + // HttpErrorStatusCode: [Output Only] If the operation fails, this field + // contains the HTTP error message that was returned. For example, a 404 + // means the resource was not found. + HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // InsertTime: [Output Only] The time that this operation was requested. + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#operation + // for Operation resources. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // OperationType: [Output Only] The type of operation, which can be + // insert, update, or delete. + OperationType string `json:"operationType,omitempty"` + + // Progress: [Output Only] An optional progress indicator that ranges + // from 0 to 100. There is no requirement that this be linear or support + // any granularity of operations. This should not be used to guess at + // when the operation will be complete. This number should monotonically + // increase as the operation progresses. + Progress int64 `json:"progress,omitempty"` + + // Region: [Output Only] URL of the region where the operation resides. + // Only applicable for regional resources. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: [Output Only] The time that this operation was started by + // the server. This value is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` + + // Status: [Output Only] The status of the operation, which can be one + // of the following: PENDING, RUNNING, or DONE. + // + // Possible values: + // "DONE" + // "PENDING" + // "RUNNING" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output Only] An optional textual description of the + // current status of the operation. + StatusMessage string `json:"statusMessage,omitempty"` + + // TargetId: [Output Only] The unique target ID, which identifies a + // specific incarnation of the target resource. + TargetId uint64 `json:"targetId,omitempty,string"` + + // TargetLink: [Output Only] The URL of the resource that the operation + // is modifying. + TargetLink string `json:"targetLink,omitempty"` + + // User: [Output Only] User who requested the operation, for example: + // user@example.com. + User string `json:"user,omitempty"` + + // Warnings: [Output Only] If warning messages are generated during + // processing of the operation, this field will be populated. + Warnings []*OperationWarnings `json:"warnings,omitempty"` + + // Zone: [Output Only] URL of the zone where the operation resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationError: [Output Only] If errors are generated during +// processing of the operation, this field will be populated. +type OperationError struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*OperationErrorErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationError) MarshalJSON() ([]byte, error) { + type noMethod OperationError + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationErrorErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` + + // Location: [Output Only] Indicates the field in the request which + // caused the error. This property is optional. + Location string `json:"location,omitempty"` + + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { + type noMethod OperationErrorErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationWarningsData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationWarnings) MarshalJSON() ([]byte, error) { + type noMethod OperationWarnings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { + type noMethod OperationWarningsData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped operation lists. + Items map[string]OperationsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#operationAggregatedList for aggregated lists of operations. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationList: Contains a list of Operation resources. +type OperationList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] The Operation resources. + Items []*Operation `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#operations for + // Operations resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationList) MarshalJSON() ([]byte, error) { + type noMethod OperationList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationsScopedList struct { + // Operations: [Output Only] List of operations contained in this scope. + Operations []*Operation `json:"operations,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of operations when the list is empty. + Warning *OperationsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Operations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationsScopedListWarning: [Output Only] Informational warning +// which replaces the list of operations when the list is empty. +type OperationsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PathMatcher: A matcher for the path portion of the URL. The +// BackendService from the longest-matched rule will serve the URL. If +// no rule was matched, the default service will be used. +type PathMatcher struct { + // DefaultService: The URL to the BackendService resource. This will be + // used if none of the pathRules defined by this PathMatcher is met by + // the URL's path portion. + DefaultService string `json:"defaultService,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Name: The name to which this PathMatcher is referred by the HostRule. + Name string `json:"name,omitempty"` + + // PathRules: The list of path rules. + PathRules []*PathRule `json:"pathRules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PathMatcher) MarshalJSON() ([]byte, error) { + type noMethod PathMatcher + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PathRule: A path-matching rule for a URL. If matched, will use the +// specified BackendService to handle the traffic arriving at this URL. +type PathRule struct { + // Paths: The list of path patterns to match. Each must start with / and + // the only place a * is allowed is at the end following a /. The string + // fed to the path matcher does not include any text after the first ? + // or #, and those chars are not allowed here. + Paths []string `json:"paths,omitempty"` + + // Service: The URL of the BackendService resource if this rule is + // matched. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Paths") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PathRule) MarshalJSON() ([]byte, error) { + type noMethod PathRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Project: A Project resource. Projects can only be created in the +// Google Developers Console. Unless marked otherwise, values can only +// be modified in the console. +type Project struct { + // CommonInstanceMetadata: Metadata key/value pairs available to all + // instances contained in this project. See Custom metadata for more + // information. + CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional textual description of the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: Restricted features enabled for use on this project. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. This is not the project ID, and + // is just a unique ID used by Compute Engine to identify resources. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#project for + // projects. + Kind string `json:"kind,omitempty"` + + // Name: The project ID. For example: my-example-project. Use the + // project ID to make requests to Compute Engine. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this project. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UsageExportLocation: The location in Cloud Storage and naming method + // of the daily usage report. + UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "CommonInstanceMetadata") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Project) MarshalJSON() ([]byte, error) { + type noMethod Project + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Quota: A quotas entry. +type Quota struct { + // Limit: [Output Only] Quota limit for this metric. + Limit float64 `json:"limit,omitempty"` + + // Metric: [Output Only] Name of the quota metric. + // + // Possible values: + // "AUTOSCALERS" + // "BACKEND_SERVICES" + // "CPUS" + // "DISKS_TOTAL_GB" + // "FIREWALLS" + // "FORWARDING_RULES" + // "HEALTH_CHECKS" + // "IMAGES" + // "INSTANCES" + // "INSTANCE_GROUPS" + // "INSTANCE_GROUP_MANAGERS" + // "INSTANCE_TEMPLATES" + // "IN_USE_ADDRESSES" + // "LOCAL_SSD_TOTAL_GB" + // "NETWORKS" + // "ROUTES" + // "SNAPSHOTS" + // "SSD_TOTAL_GB" + // "SSL_CERTIFICATES" + // "STATIC_ADDRESSES" + // "TARGET_HTTPS_PROXIES" + // "TARGET_HTTP_PROXIES" + // "TARGET_INSTANCES" + // "TARGET_POOLS" + // "TARGET_VPN_GATEWAYS" + // "URL_MAPS" + // "VPN_TUNNELS" + Metric string `json:"metric,omitempty"` + + // Usage: [Output Only] Current usage of this metric. + Usage float64 `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Limit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Quota) MarshalJSON() ([]byte, error) { + type noMethod Quota + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Region: Region resource. +type Region struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // region. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#region for + // regions. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this region. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the region, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Zones: [Output Only] A list of zones available in this region, in the + // form of resource URLs. + Zones []string `json:"zones,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Region) MarshalJSON() ([]byte, error) { + type noMethod Region + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RegionList: Contains a list of region resources. +type RegionList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Region resources. + Items []*Region `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#regionList for + // lists of regions. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RegionList) MarshalJSON() ([]byte, error) { + type noMethod RegionList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ResourceGroupReference struct { + // Group: A URI referencing one of the resource views listed in the + // backend service. + Group string `json:"group,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Group") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { + type noMethod ResourceGroupReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Route: The route resource. A Route is a rule that specifies how +// certain packets should be handled by the virtual network. Routes are +// associated with instances by tag and the set of Routes for a +// particular instance is called its routing table. For each packet +// leaving a instance, the system searches that instance's routing table +// for a single best matching Route. Routes match packets by destination +// IP address, preferring smaller or more specific ranges over larger +// ones. If there is a tie, the system selects the Route with the +// smallest priority value. If there is still a tie, it uses the layer +// three and four packet headers to select just one of the remaining +// matching Routes. The packet is then forwarded as specified by the +// nextHop field of the winning Route -- either to another instance +// destination, a instance gateway or a Google Compute Engien-operated +// gateway. Packets that do not match any Route in the sending +// instance's routing table are dropped. +type Route struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DestRange: The destination range of outgoing packets that this route + // applies to. + DestRange string `json:"destRange,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of this resource. Always compute#routes for + // Route resources. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: Fully-qualified URL of the network that this route applies + // to. + Network string `json:"network,omitempty"` + + // NextHopGateway: The URL to a gateway that should handle matching + // packets. Currently, this is only the internet gateway: + // projects//global/gateways/default-internet-gateway + NextHopGateway string `json:"nextHopGateway,omitempty"` + + // NextHopInstance: The fully-qualified URL to an instance that should + // handle matching packets. For + // example: + // https://www.googleapis.com/compute/v1/projects/project/zones/ + // zone/instances/ + NextHopInstance string `json:"nextHopInstance,omitempty"` + + // NextHopIp: The network IP address of an instance that should handle + // matching packets. + NextHopIp string `json:"nextHopIp,omitempty"` + + // NextHopNetwork: The URL of the local network if it should handle + // matching packets. + NextHopNetwork string `json:"nextHopNetwork,omitempty"` + + // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching + // packets. + NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` + + // Priority: Breaks ties between Routes of equal specificity. Routes + // with smaller values win when tied with routes with larger values. + // Default value is 1000. A valid range is between 0 and 65535. + Priority int64 `json:"priority,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tags: A list of instance tags to which this route applies. + Tags []string `json:"tags,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this route, this field will be populated with warning messages. + Warnings []*RouteWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Route) MarshalJSON() ([]byte, error) { + type noMethod Route + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouteWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteWarningsData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouteWarnings) MarshalJSON() ([]byte, error) { + type noMethod RouteWarnings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouteWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { + type noMethod RouteWarningsData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RouteList: Contains a list of route resources. +type RouteList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Route resources. + Items []*Route `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouteList) MarshalJSON() ([]byte, error) { + type noMethod RouteList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Scheduling: Sets the scheduling options for an Instance. +type Scheduling struct { + // AutomaticRestart: Specifies whether the instance should be + // automatically restarted if it is terminated by Compute Engine (not + // terminated by a user). You can only set the automatic restart option + // for standard instances. Preemptible instances cannot be automatically + // restarted. + AutomaticRestart bool `json:"automaticRestart,omitempty"` + + // OnHostMaintenance: Defines the maintenance behavior for this + // instance. For standard instances, the default behavior is MIGRATE. + // For preemptible instances, the default and only possible behavior is + // TERMINATE. For more information, see Setting maintenance behavior. + // + // Possible values: + // "MIGRATE" + // "TERMINATE" + OnHostMaintenance string `json:"onHostMaintenance,omitempty"` + + // Preemptible: Whether the instance is preemptible. + Preemptible bool `json:"preemptible,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SerialPortOutput: An instance's serial console output. +type SerialPortOutput struct { + // Contents: [Output Only] The contents of the console output. + Contents string `json:"contents,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#serialPortOutput for serial port output. + Kind string `json:"kind,omitempty"` + + // SelfLink: [Output Only] Server defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Contents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { + type noMethod SerialPortOutput + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ServiceAccount: A service account. +type ServiceAccount struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` + + // Scopes: The list of scopes to be made available for this service + // account. + Scopes []string `json:"scopes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type noMethod ServiceAccount + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Snapshot: A persistent disk snapshot resource. +type Snapshot struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#snapshot for + // Snapshot resources. + Kind string `json:"kind,omitempty"` + + // Licenses: Public visible licenses. + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SourceDisk: The source disk used to create this snapshot. + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // Status: [Output Only] The status of the snapshot. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + // "UPLOADING" + Status string `json:"status,omitempty"` + + // StorageBytes: [Output Only] A size of the the storage used by the + // snapshot. As snapshots share storage, this number is expected to + // change with snapshot creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type noMethod Snapshot + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SnapshotList: Contains a list of Snapshot resources. +type SnapshotList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of Snapshot resources. + Items []*Snapshot `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SnapshotList) MarshalJSON() ([]byte, error) { + type noMethod SnapshotList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertificate: An SslCertificate resource. This resource provides a +// mechanism to upload an SSL key and certificate to global HTTPS +// loadbalancer to serve secure connections. +type SslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#sslCertificate for SSL certificates. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PrivateKey: A write-only private key in PEM format. Only insert RPCs + // will include this field. + PrivateKey string `json:"privateKey,omitempty"` + + // SelfLink: [Output only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Certificate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertificate) MarshalJSON() ([]byte, error) { + type noMethod SslCertificate + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertificateList: Contains a list of SslCertificate resources. +type SslCertificateList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SslCertificate resources. + Items []*SslCertificate `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertificateList) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Tags: A set of instance tags. +type Tags struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + // + // To see the latest fingerprint, make get() request to the instance. + Fingerprint string `json:"fingerprint,omitempty"` + + // Items: An array of tags. Each tag must be 1-63 characters long, and + // comply with RFC1035. + Items []string `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Tags) MarshalJSON() ([]byte, error) { + type noMethod Tags + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an +// HTTP proxy. +type TargetHttpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpProxyList: A list of TargetHttpProxy resources. +type TargetHttpProxyList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpProxy resources. + Items []*TargetHttpProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpProxyList for lists + // of Target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of URLs to SslCertificate resources to + // associate with this TargetHttpProxy. Currently exactly one ssl + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxiesSetSslCertificatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines +// an HTTPS proxy. +type TargetHttpsProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetHttpsProxy for target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections to Backends. Currently exactly one SSL + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. +type TargetHttpsProxyList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpsProxy resources. + Items []*TargetHttpsProxy `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetInstance: A TargetInstance resource. This resource defines an +// endpoint instance that terminates traffic of certain protocols. +type TargetInstance struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instance: The URL to the instance that terminates the relevant + // traffic. + Instance string `json:"instance,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#targetInstance for target instances. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. + // Currently only NO_NAT (default value) is supported. + // + // Possible values: + // "NO_NAT" + NatPolicy string `json:"natPolicy,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] URL of the zone where the target instance + // resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstance) MarshalJSON() ([]byte, error) { + type noMethod TargetInstance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetInstanceAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped target instance lists. + Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetInstanceList: Contains a list of TargetInstance resources. +type TargetInstanceList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetInstance resources. + Items []*TargetInstance `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetInstancesScopedList struct { + // TargetInstances: List of target instances contained in this scope. + TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetInstancesScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type TargetInstancesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetInstancesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetPool: A TargetPool resource. This resource defines a pool of +// instances, associated HttpHealthCheck resources, and the fallback +// TargetPool. +type TargetPool struct { + // BackupPool: This field is applicable only when the containing target + // pool is serving a forwarding rule as the primary pool, and its + // failoverRatio field is properly set to a value between [0, + // 1]. + // + // backupPool and failoverRatio together define the fallback behavior of + // the primary target pool: if the ratio of the healthy instances in the + // primary pool is at or below failoverRatio, traffic arriving at the + // load-balanced IP will be directed to the backup pool. + // + // In case where failoverRatio and backupPool are not set, or all the + // instances in the backup pool are unhealthy, the traffic will be + // directed back to the primary pool in the "force" mode, where traffic + // will be spread to the healthy instances with the best effort, or to + // all instances when no instance is healthy. + BackupPool string `json:"backupPool,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // FailoverRatio: This field is applicable only when the containing + // target pool is serving a forwarding rule as the primary pool (i.e., + // not as a backup pool to some other target pool). The value of the + // field must be in [0, 1]. + // + // If set, backupPool must also be set. They together define the + // fallback behavior of the primary target pool: if the ratio of the + // healthy instances in the primary pool is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // backup pool. + // + // In case where failoverRatio is not set or all the instances in the + // backup pool are unhealthy, the traffic will be directed back to the + // primary pool in the "force" mode, where traffic will be spread to the + // healthy instances with the best effort, or to all instances when no + // instance is healthy. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // HealthChecks: A list of URLs to the HttpHealthCheck resource. A + // member instance in this pool is considered healthy if and only if all + // specified health checks pass. An empty list means all member + // instances will be considered healthy at all times. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instances: A list of resource URLs to the member virtual machines + // serving this pool. They must live in zones contained in the same + // region as this pool. + Instances []string `json:"instances,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#targetPool + // for target pools. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the target pool + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SessionAffinity: Sesssion affinity option, must be one of the + // following values: + // NONE: Connections from the same client IP may go to any instance in + // the pool. + // CLIENT_IP: Connections from the same client IP will go to the same + // instance in the pool while that instance remains + // healthy. + // CLIENT_IP_PROTO: Connections from the same client IP with the same IP + // protocol will go to the same instance in the pool while that instance + // remains healthy. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PROTO" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackupPool") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPool) MarshalJSON() ([]byte, error) { + type noMethod TargetPool + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped target pool lists. + Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolInstanceHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolInstanceHealth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetPoolList: Contains a list of TargetPool resources. +type TargetPoolList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items []*TargetPool `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsAddHealthCheckRequest struct { + // HealthChecks: Health check URLs to be added to targetPool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsAddInstanceRequest struct { + // Instances: URLs of the instances to be added to targetPool. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsRemoveHealthCheckRequest struct { + // HealthChecks: Health check URLs to be removed from targetPool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsRemoveInstanceRequest struct { + // Instances: URLs of the instances to be removed from targetPool. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsScopedList struct { + // TargetPools: List of target pools contained in this scope. + TargetPools []*TargetPool `json:"targetPools,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetPools") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetPoolsScopedListWarning: Informational warning which replaces +// the list of addresses when the list is empty. +type TargetPoolsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetReference struct { + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Target") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetReference) MarshalJSON() ([]byte, error) { + type noMethod TargetReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated to a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using compute.vpntunnels.insert and associated + // to a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGateway + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped target vpn gateway lists. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. +type TargetVpnGatewayList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of TargetVpnGateway resources. + Items []*TargetVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetVpnGatewaysScopedList struct { + // TargetVpnGateways: [Output Only] List of target vpn gateways + // contained in this scope. + TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetVpnGatewaysScopedListWarning: [Output Only] Informational +// warning which replaces the list of addresses when the list is empty. +type TargetVpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetVpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TestFailure struct { + ActualService string `json:"actualService,omitempty"` + + ExpectedService string `json:"expectedService,omitempty"` + + Host string `json:"host,omitempty"` + + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActualService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TestFailure) MarshalJSON() ([]byte, error) { + type noMethod TestFailure + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMap: A UrlMap resource. This resource defines the mapping from URL +// to the BackendService resource, based on the "longest-match" of the +// URL's host and path. +type UrlMap struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultService: The URL of the BackendService resource if none of the + // hostRules match. + DefaultService string `json:"defaultService,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a UrlMap. An up-to-date + // fingerprint must be provided in order to update the UrlMap. + Fingerprint string `json:"fingerprint,omitempty"` + + // HostRules: The list of HostRules to use against the URL. + HostRules []*HostRule `json:"hostRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#urlMaps for + // url maps. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PathMatchers: The list of named PathMatchers to use against the URL. + PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tests: The list of expected URL mappings. Request to update this + // UrlMap will succeed only all of the test cases pass. + Tests []*UrlMapTest `json:"tests,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type noMethod UrlMap + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMapList: Contains a list of UrlMap resources. +type UrlMapList struct { + // Id: [Output Only] Unique identifier for the resource. Set by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of UrlMap resources. + Items []*UrlMap `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapList) MarshalJSON() ([]byte, error) { + type noMethod UrlMapList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type UrlMapReference struct { + UrlMap string `json:"urlMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UrlMap") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapReference) MarshalJSON() ([]byte, error) { + type noMethod UrlMapReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMapTest: Message for the expected URL mappings. +type UrlMapTest struct { + // Description: Description of this test case. + Description string `json:"description,omitempty"` + + // Host: Host portion of the URL. + Host string `json:"host,omitempty"` + + // Path: Path portion of the URL. + Path string `json:"path,omitempty"` + + // Service: Expected BackendService resource the given URL should be + // mapped to. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapTest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapTest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMapValidationResult: Message representing the validation result +// for a UrlMap. +type UrlMapValidationResult struct { + LoadErrors []string `json:"loadErrors,omitempty"` + + // LoadSucceeded: Whether the given UrlMap can be successfully loaded. + // If false, 'loadErrors' indicates the reasons. + LoadSucceeded bool `json:"loadSucceeded,omitempty"` + + TestFailures []*TestFailure `json:"testFailures,omitempty"` + + // TestPassed: If successfully loaded, this field indicates whether the + // test passed. If false, 'testFailures's indicate the reason of + // failure. + TestPassed bool `json:"testPassed,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { + type noMethod UrlMapValidationResult + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type UrlMapsValidateRequest struct { + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Resource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type UrlMapsValidateResponse struct { + Result *UrlMapValidationResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UsageExportLocation: The location in Cloud Storage and naming method +// of the daily usage report. Contains bucket_name and report_name +// prefix. +type UsageExportLocation struct { + // BucketName: The name of an existing bucket in Cloud Storage where the + // usage report object is stored. The Google Service Account is granted + // write access to this bucket. This is just the bucket name, with no + // gs:// or https://storage.googleapis.com/ in front of it. + BucketName string `json:"bucketName,omitempty"` + + // ReportNamePrefix: An optional prefix for the name of the usage report + // object stored in bucketName. If not supplied, defaults to usage. The + // report is stored as a CSV file named + // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the + // usage according to Pacific Time. If you supply a prefix, it should + // conform to Cloud Storage object naming conventions. + ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { + type noMethod UsageExportLocation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnel struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DetailedStatus: [Output Only] Detailed status message for the VPN + // tunnel. + DetailedStatus string `json:"detailedStatus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IkeVersion: IKE protocol version to use when establishing the VPN + // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. + // Default version is 2. + IkeVersion int64 `json:"ikeVersion,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PeerIp: IP address of the peer VPN gateway. + PeerIp string `json:"peerIp,omitempty"` + + // Region: [Output Only] URL of the region where the VPN tunnel resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: Shared secret used to set the secure session between + // the GCE VPN gateway and the peer VPN gateway. + SharedSecret string `json:"sharedSecret,omitempty"` + + // SharedSecretHash: Hash of the shared secret. + SharedSecretHash string `json:"sharedSecretHash,omitempty"` + + // Status: [Output Only] The status of the VPN tunnel. + // + // Possible values: + // "AUTHORIZATION_ERROR" + // "DEPROVISIONING" + // "ESTABLISHED" + // "FAILED" + // "FIRST_HANDSHAKE" + // "NEGOTIATION_FAILURE" + // "NETWORK_ERROR" + // "NO_INCOMING_PACKETS" + // "PROVISIONING" + // "REJECTED" + // "WAITING_FOR_FULL_CONFIG" + Status string `json:"status,omitempty"` + + // TargetVpnGateway: URL of the VPN gateway to which this VPN tunnel is + // associated. Provided by the client when the VPN tunnel is created. + TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnel) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnelAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped vpn tunnel lists. + Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// VpnTunnelList: Contains a list of VpnTunnel resources. +type VpnTunnelList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of VpnTunnel resources. + Items []*VpnTunnel `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnelsScopedList struct { + // VpnTunnels: List of vpn tunnels contained in this scope. + VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// VpnTunnelsScopedListWarning: Informational warning which replaces the +// list of addresses when the list is empty. +type VpnTunnelsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnelsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource, and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Zone: A Zone resource. +type Zone struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // zone. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#zone for + // zones. + Kind string `json:"kind,omitempty"` + + // MaintenanceWindows: [Output Only] Any scheduled maintenance windows + // for this zone. When the zone is in a maintenance window, all + // resources which reside in the zone will be unavailable. For more + // information, see Maintenance Windows + MaintenanceWindows []*ZoneMaintenanceWindows `json:"maintenanceWindows,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Region: [Output Only] Full URL reference to the region which hosts + // the zone. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the zone, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Zone) MarshalJSON() ([]byte, error) { + type noMethod Zone + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ZoneMaintenanceWindows struct { + // BeginTime: [Output Only] Starting time of the maintenance window, in + // RFC3339 format. + BeginTime string `json:"beginTime,omitempty"` + + // Description: [Output Only] Textual description of the maintenance + // window. + Description string `json:"description,omitempty"` + + // EndTime: [Output Only] Ending time of the maintenance window, in + // RFC3339 format. + EndTime string `json:"endTime,omitempty"` + + // Name: [Output Only] Name of the maintenance window. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BeginTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ZoneMaintenanceWindows) MarshalJSON() ([]byte, error) { + type noMethod ZoneMaintenanceWindows + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ZoneList: Contains a list of zone resources. +type ZoneList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Zone resources. + Items []*Zone `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ZoneList) MarshalJSON() ([]byte, error) { + type noMethod ZoneList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of addresses grouped by scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of addresses grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of address resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of address resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of autoscalers grouped by scope. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of autoscalers grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified autoscaler resource. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler resource.", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the persistent autoscaler resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified autoscaler resource. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the persistent autoscaler resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an autoscaler resource in the specified project using +// the data included in the request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.list": + +type AutoscalersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of autoscaler resources contained within the +// specified zone. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of autoscaler resources contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "response": { + // "$ref": "AutoscalerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.patch": + +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler2 *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an autoscaler resource in the specified project using +// the data included in the request. This method supports patch +// semantics. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler string, autoscaler2 *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.urlParams_.Set("autoscaler", autoscaler) + c.autoscaler2 = autoscaler2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler resource in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler resource to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an autoscaler resource in the specified project using +// the data included in the request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler resource to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler resource to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.delete": + +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.get": + +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendService resource.", + // "httpMethod": "GET", + // "id": "compute.backendServices.get", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "BackendService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.getHealth": + +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceGroupHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, + // "response": { + // "$ref": "BackendServiceGroupHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.insert": + +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendservice = backendservice + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendService resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.backendServices.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.list": + +type BackendServicesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of BackendService resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendServices.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices", + // "response": { + // "$ref": "BackendServiceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.patch": + +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Update the entire content of the BackendService resource. This +// method supports patch semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update the entire content of the BackendService resource. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.update": + +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Update the entire content of the BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update the entire content of the BackendService resource.", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.diskTypes.aggregatedList": + +type DiskTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of disk type resources grouped by +// scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of disk type resources grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/diskTypes", + // "response": { + // "$ref": "DiskTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.diskTypes.get": + +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified disk type resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.diskType = diskType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { + c.ctx_ = ctx + return c +} + +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "diskType": c.diskType, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified disk type resource.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "diskType" + // ], + // "parameters": { + // "diskType": { + // "description": "Name of the disk type resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "response": { + // "$ref": "DiskType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.diskTypes.list": + +type DiskTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of disk type resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { + c.ctx_ = ctx + return c +} + +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of disk type resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/diskTypes", + // "response": { + // "$ref": "DiskTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.aggregatedList": + +type DisksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of persistent disks grouped by +// scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of persistent disks grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.disks.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/disks", + // "response": { + // "$ref": "DiskAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.createSnapshot": + +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { + c.ctx_ = ctx + return c +} + +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot of a specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.delete": + +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.disks.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.get": + +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { + c.ctx_ = ctx + return c +} + +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Disk{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a specified persistent disk.", + // "httpMethod": "GET", + // "id": "compute.disks.get", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Disk" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.insert": + +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a persistent disk in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a persistent disk in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.disks.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks", + // "request": { + // "$ref": "Disk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.list": + +type DisksListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *DisksListCall) Filter(filter string) *DisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { + c.ctx_ = ctx + return c +} + +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of persistent disks contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.disks.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks", + // "response": { + // "$ref": "DiskList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.firewalls.delete": + +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified firewall resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified firewall resource.", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.get": + +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified firewall resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Firewall{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified firewall resource.", + // "httpMethod": "GET", + // "id": "compute.firewalls.get", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "response": { + // "$ref": "Firewall" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.firewalls.insert": + +type FirewallsInsertCall struct { + s *Service + project string + firewall *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a firewall resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a firewall resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.firewalls.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls", + // "request": { + // "$ref": "Firewall" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.list": + +type FirewallsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of firewall resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &FirewallList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of firewall resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.firewalls.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls", + // "response": { + // "$ref": "FirewallList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.firewalls.patch": + +type FirewallsPatchCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates the specified firewall resource with the data included +// in the request. This method supports patch semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + c.firewall2 = firewall2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified firewall resource with the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.firewalls.patch", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.update": + +type FirewallsUpdateCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates the specified firewall resource with the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + c.firewall2 = firewall2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified firewall resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.firewalls.update", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.aggregatedList": + +type ForwardingRulesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of forwarding rules grouped by +// scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRuleAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of forwarding rules grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/forwardingRules", + // "response": { + // "$ref": "ForwardingRuleAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.forwardingRules.delete": + +type ForwardingRulesDeleteCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.forwardingRules.delete", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.get": + +type ForwardingRulesGetCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified ForwardingRule resource.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.get", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "ForwardingRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.forwardingRules.insert": + +type ForwardingRulesInsertCall struct { + s *Service + project string + region string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingrule = forwardingrule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.list": + +type ForwardingRulesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of ForwardingRule resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRuleList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of ForwardingRule resources available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules", + // "response": { + // "$ref": "ForwardingRuleList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.forwardingRules.setTarget": + +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setTarget", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalAddresses.delete": + +type GlobalAddressesDeleteCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalAddresses.delete", + // "parameterOrder": [ + // "project", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalAddresses.get": + +type GlobalAddressesGetCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.get", + // "parameterOrder": [ + // "project", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalAddresses.insert": + +type GlobalAddressesInsertCall struct { + s *Service + project string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalAddresses.list": + +type GlobalAddressesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of global address resources. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of global address resources.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalForwardingRules.delete": + +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalForwardingRules.delete", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.get": + +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified ForwardingRule resource.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.get", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "ForwardingRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalForwardingRules.insert": + +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingrule = forwardingrule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.list": + +type GlobalForwardingRulesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of ForwardingRule resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRuleList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of ForwardingRule resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules", + // "response": { + // "$ref": "ForwardingRuleList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalForwardingRules.setTarget": + +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setTarget", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalOperations.aggregatedList": + +type GlobalOperationsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of all operations grouped by +// scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all operations grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/operations", + // "response": { + // "$ref": "OperationAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalOperations.delete": + +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalOperations.get": + +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified Operations resource.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalOperations.list": + +type GlobalOperationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Operation resources contained within the specified project.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpHealthChecks.delete": + +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HttpHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpHealthChecks.get": + +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpHealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified HttpHealthCheck resource.", + // "httpMethod": "GET", + // "id": "compute.httpHealthChecks.get", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "response": { + // "$ref": "HttpHealthCheck" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpHealthChecks.insert": + +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httphealthcheck = httphealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpHealthChecks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpHealthChecks.list": + +type HttpHealthChecksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpHealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.httpHealthChecks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks", + // "response": { + // "$ref": "HttpHealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpHealthChecks.patch": + +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports patch +// semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpHealthChecks.update": + +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.delete": + +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HttpsHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpsHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.get": + +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpsHealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified HttpsHealthCheck resource.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.get", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "response": { + // "$ref": "HttpsHealthCheck" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.insert": + +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpshealthcheck = httpshealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpsHealthChecks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.list": + +type HttpsHealthChecksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpsHealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks", + // "response": { + // "$ref": "HttpsHealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.patch": + +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports patch +// semantics. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.update": + +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.delete": + +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified image resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified image resource.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.deprecate": + +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Deprecate: Sets the deprecation status of an image. +// +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + c.deprecationstatus = deprecationstatus + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.deprecate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "httpMethod": "POST", + // "id": "compute.images.deprecate", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Image name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}/deprecate", + // "request": { + // "$ref": "DeprecationStatus" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.get": + +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified image resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Image{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified image resource.", + // "httpMethod": "GET", + // "id": "compute.images.get", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}", + // "response": { + // "$ref": "Image" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.images.insert": + +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an image resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an image resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.images.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images", + // "request": { + // "$ref": "Image" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "compute.images.list": + +type ImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of private images available to the specified +// project. Private images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 7. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// +// See Accessing images for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ImageList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 7. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.\n\nSee Accessing images for more information.", + // "httpMethod": "GET", + // "id": "compute.images.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images", + // "response": { + // "$ref": "ImageList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.abandonInstances": + +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.aggregatedList": + +type InstanceGroupManagersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instanceGroupManagers", + // "response": { + // "$ref": "InstanceGroupManagerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.delete": + +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.deleteInstances": + +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.get": + +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns all of the details about the specified managed instance +// group. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManager{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns all of the details about the specified managed instance group.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "InstanceGroupManager" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.insert": + +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instancegroupmanager = instancegroupmanager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.list": + +type InstanceGroupManagersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "response": { + // "$ref": "InstanceGroupManagerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.listManagedInstances": + +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagersListManagedInstancesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listManagedInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "response": { + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.recreateInstances": + +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.recreateInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "request": { + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.resize": + +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager", + // "size" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.setInstanceTemplate": + +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.setTargetPools": + +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.addInstances": + +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.addInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.addInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "request": { + // "$ref": "InstanceGroupsAddInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.aggregatedList": + +type InstanceGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instanceGroups", + // "response": { + // "$ref": "InstanceGroupAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.delete": + +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance group. The instances in the group are not deleted.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.get": + +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified instance group resource. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance group resource.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "InstanceGroup" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.insert": + +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instancegroup = instancegroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups", + // "request": { + // "$ref": "InstanceGroup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.list": + +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups", + // "response": { + // "$ref": "InstanceGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.listInstances": + +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupsListInstances{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances in the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.listInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "InstanceGroupsListInstancesRequest" + // }, + // "response": { + // "$ref": "InstanceGroupsListInstances" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.removeInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "request": { + // "$ref": "InstanceGroupsRemoveInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.setNamedPorts": + +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the named ports for the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.setNamedPorts", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "InstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.delete": + +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified instance template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instanceTemplate": c.instanceTemplate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance template.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", + // "parameterOrder": [ + // "project", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.get": + +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified instance template resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instanceTemplate": c.instanceTemplate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance template resource.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", + // "parameterOrder": [ + // "project", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceTemplates.insert": + +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancetemplate = instancetemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance template in the specified project using the data that is included in the request.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates", + // "request": { + // "$ref": "InstanceTemplate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.list": + +type InstanceTemplatesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of instance templates that are contained +// within the specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceTemplateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates", + // "response": { + // "$ref": "InstanceTemplateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.addAccessConfig": + +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.addAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds an access config to an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.addAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "networkInterface" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.aggregatedList": + +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves aggregated list of instance resources. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves aggregated list of instance resources.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instances", + // "response": { + // "$ref": "InstanceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AttachDisk: Attaches a Disk resource to an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.attachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Attaches a Disk resource to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.attachDisk", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + // "request": { + // "$ref": "AttachedDisk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.delete": + +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.deleteAccessConfig": + +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.deleteAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an access config from an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.deleteAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "accessConfig", + // "networkInterface" + // ], + // "parameters": { + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.detachDisk": + +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.detachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Detaches a disk from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.detachDisk", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "deviceName" + // ], + // "parameters": { + // "deviceName": { + // "description": "Disk device name to detach.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.get": + +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified instance resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Instance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance resource.", + // "httpMethod": "GET", + // "id": "compute.instances.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Instance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.getSerialPortOutput": + +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetSerialPortOutput: Returns the specified instance's serial port +// output. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SerialPortOutput{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance's serial port output.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "response": { + // "$ref": "SerialPortOutput" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.insert": + +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances", + // "request": { + // "$ref": "Instance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.list": + +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of instance resources contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instance resources contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances", + // "response": { + // "$ref": "InstanceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.reset": + +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Reset: Performs a hard reset on the instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Performs a hard reset on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.reset", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setDiskAutoDelete": + +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setDiskAutoDelete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDiskAutoDelete", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "autoDelete", + // "deviceName" + // ], + // "parameters": { + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, + // "deviceName": { + // "description": "The device name of the disk to modify.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setMetadata": + +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.metadata = metadata + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets metadata for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMetadata", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setScheduling": + +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.scheduling = scheduling + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setScheduling" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets an instance's scheduling options.", + // "httpMethod": "POST", + // "id": "compute.instances.setScheduling", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "request": { + // "$ref": "Scheduling" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setTags": + +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTags: Sets tags for the specified instance to the data included in +// the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.tags = tags + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setTags" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets tags for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setTags", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "request": { + // "$ref": "Tags" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.start": + +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Start: This method starts an instance that was stopped using the +// using the instances().stop method. For more information, see Restart +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.start" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "This method starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.start", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.stop": + +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Stop: This method stops a running instance, shutting it down cleanly, +// and allows you to restart the instance at a later time. Stopped +// instances do not incur per-minute, virtual machine usage charges +// while they are stopped, but any resources that the virtual machine is +// using, such as persistent disks and static IP addresses,will continue +// to be charged until they are deleted. For more information, see +// Stopping an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.stop" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "This method stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses,will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.stop", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to stop.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.licenses.get": + +type LicensesGetCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified license resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.license = license + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { + c.ctx_ = ctx + return c +} + +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "license": c.license, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &License{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified license resource.", + // "httpMethod": "GET", + // "id": "compute.licenses.get", + // "parameterOrder": [ + // "project", + // "license" + // ], + // "parameters": { + // "license": { + // "description": "Name of the license resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenses/{license}", + // "response": { + // "$ref": "License" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.machineTypes.aggregatedList": + +type MachineTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of machine type resources grouped +// by scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MachineTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of machine type resources grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/machineTypes", + // "response": { + // "$ref": "MachineTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.machineTypes.get": + +type MachineTypesGetCall struct { + s *Service + project string + zone string + machineType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified machine type resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.machineType = machineType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { + c.ctx_ = ctx + return c +} + +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MachineType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified machine type resource.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "machineType" + // ], + // "parameters": { + // "machineType": { + // "description": "Name of the machine type resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "response": { + // "$ref": "MachineType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.machineTypes.list": + +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of machine type resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { + c.ctx_ = ctx + return c +} + +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MachineTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of machine type resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/machineTypes", + // "response": { + // "$ref": "MachineTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networks.delete": + +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified network resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified network resource.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networks.get": + +type NetworksGetCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified network resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Network.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Network{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified network resource.", + // "httpMethod": "GET", + // "id": "compute.networks.get", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}", + // "response": { + // "$ref": "Network" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networks.insert": + +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a network resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a network resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.networks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks", + // "request": { + // "$ref": "Network" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networks.list": + +type NetworksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of network resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NetworkList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of network resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.networks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks", + // "response": { + // "$ref": "NetworkList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.projects.get": + +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified project resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified project resource.", + // "httpMethod": "GET", + // "id": "compute.projects.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.projects.moveDisk": + +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// MoveDisk: Moves a persistent disk from one zone to another. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.diskmoverequest = diskmoverequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.moveDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves a persistent disk from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveDisk", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/moveDisk", + // "request": { + // "$ref": "DiskMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.moveInstance": + +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancemoverequest = instancemoverequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.moveInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves an instance and its attached persistent disks from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveInstance", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/moveInstance", + // "request": { + // "$ref": "InstanceMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setCommonInstanceMetadata": + +type ProjectsSetCommonInstanceMetadataCall struct { + s *Service + project string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.metadata = metadata + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.setCommonInstanceMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.projects.setCommonInstanceMetadata", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/setCommonInstanceMetadata", + // "request": { + // "$ref": "Metadata" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setUsageExportBucket": + +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.usageexportlocation = usageexportlocation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.setUsageExportBucket" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "httpMethod": "POST", + // "id": "compute.projects.setUsageExportBucket", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/setUsageExportBucket", + // "request": { + // "$ref": "UsageExportLocation" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "compute.regionOperations.delete": + +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified region-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionOperations.get": + +type RegionOperationsGetCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regionOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified region-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.get", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionOperations.list": + +type RegionOperationsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Operation resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regions.get": + +type RegionsGetCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified region resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get +func (r *RegionsService) Get(project string, region string) *RegionsGetCall { + c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { + c.ctx_ = ctx + return c +} + +func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regions.get" call. +// Exactly one of *Region or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Region.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Region{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified region resource.", + // "httpMethod": "GET", + // "id": "compute.regions.get", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}", + // "response": { + // "$ref": "Region" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regions.list": + +type RegionsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of region resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list +func (r *RegionsService) List(project string) *RegionsListCall { + c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *RegionsListCall) Filter(filter string) *RegionsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { + c.ctx_ = ctx + return c +} + +func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regions.list" call. +// Exactly one of *RegionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RegionList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RegionList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of region resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.regions.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions", + // "response": { + // "$ref": "RegionList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.routes.delete": + +type RoutesDeleteCall struct { + s *Service + project string + route string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified route resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete +func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { + c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.route = route + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "route": c.route, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified route resource.", + // "httpMethod": "DELETE", + // "id": "compute.routes.delete", + // "parameterOrder": [ + // "project", + // "route" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "route": { + // "description": "Name of the route resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes/{route}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.routes.get": + +type RoutesGetCall struct { + s *Service + project string + route string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified route resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get +func (r *RoutesService) Get(project string, route string) *RoutesGetCall { + c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.route = route + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "route": c.route, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.get" call. +// Exactly one of *Route or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Route.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Route{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified route resource.", + // "httpMethod": "GET", + // "id": "compute.routes.get", + // "parameterOrder": [ + // "project", + // "route" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "route": { + // "description": "Name of the route resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes/{route}", + // "response": { + // "$ref": "Route" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.routes.insert": + +type RoutesInsertCall struct { + s *Service + project string + route *Route + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a route resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert +func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { + c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.route = route + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a route resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.routes.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes", + // "request": { + // "$ref": "Route" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.routes.list": + +type RoutesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of route resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list +func (r *RoutesService) List(project string) *RoutesListCall { + c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *RoutesListCall) Filter(filter string) *RoutesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.list" call. +// Exactly one of *RouteList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouteList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RouteList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of route resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.routes.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes", + // "response": { + // "$ref": "RouteList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.snapshots.delete": + +type SnapshotsDeleteCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Snapshot resource. Keep in mind that +// deleting a single snapshot might not necessarily delete all the data +// on that snapshot. If any data on the snapshot that is marked for +// deletion is needed for subsequent snapshots, the data will be moved +// to the next corresponding snapshot. +// +// For more information, see Deleting snaphots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete +func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { + c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "snapshot": c.snapshot, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.snapshots.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", + // "httpMethod": "DELETE", + // "id": "compute.snapshots.delete", + // "parameterOrder": [ + // "project", + // "snapshot" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "snapshot": { + // "description": "Name of the Snapshot resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/snapshots/{snapshot}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.snapshots.get": + +type SnapshotsGetCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Snapshot resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get +func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { + c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { + c.ctx_ = ctx + return c +} + +func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "snapshot": c.snapshot, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.snapshots.get" call. +// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snapshot.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Snapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Snapshot resource.", + // "httpMethod": "GET", + // "id": "compute.snapshots.get", + // "parameterOrder": [ + // "project", + // "snapshot" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "snapshot": { + // "description": "Name of the Snapshot resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/snapshots/{snapshot}", + // "response": { + // "$ref": "Snapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.snapshots.list": + +type SnapshotsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of Snapshot resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list +func (r *SnapshotsService) List(project string) *SnapshotsListCall { + c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { + c.ctx_ = ctx + return c +} + +func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.snapshots.list" call. +// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SnapshotList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SnapshotList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Snapshot resources contained within the specified project.", + // "httpMethod": "GET", + // "id": "compute.snapshots.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/snapshots", + // "response": { + // "$ref": "SnapshotList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.sslCertificates.delete": + +type SslCertificatesDeleteCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified SslCertificate resource. +func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { + c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslCertificate = sslCertificate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslCertificate": c.sslCertificate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified SslCertificate resource.", + // "httpMethod": "DELETE", + // "id": "compute.sslCertificates.delete", + // "parameterOrder": [ + // "project", + // "sslCertificate" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.sslCertificates.get": + +type SslCertificatesGetCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified SslCertificate resource. +func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { + c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslCertificate = sslCertificate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslCertificate": c.sslCertificate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertificate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified SslCertificate resource.", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.get", + // "parameterOrder": [ + // "project", + // "sslCertificate" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "response": { + // "$ref": "SslCertificate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.sslCertificates.insert": + +type SslCertificatesInsertCall struct { + s *Service + project string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a SslCertificate resource in the specified project +// using the data included in the request. +func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { + c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslcertificate = sslcertificate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.sslCertificates.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates", + // "request": { + // "$ref": "SslCertificate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.sslCertificates.list": + +type SslCertificatesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of SslCertificate resources available to the +// specified project. +func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { + c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertificateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of SslCertificate resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates", + // "response": { + // "$ref": "SslCertificateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpProxies.delete": + +type TargetHttpProxiesDeleteCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetHttpProxy resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete +func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { + c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetHttpProxies.delete", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpProxies.get": + +type TargetHttpProxiesGetCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetHttpProxy resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get +func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { + c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxiesGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetHttpProxy resource.", + // "httpMethod": "GET", + // "id": "compute.targetHttpProxies.get", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "response": { + // "$ref": "TargetHttpProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpProxies.insert": + +type TargetHttpProxiesInsertCall struct { + s *Service + project string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetHttpProxy resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert +func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { + c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targethttpproxy = targethttpproxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpProxiesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetHttpProxies.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies", + // "request": { + // "$ref": "TargetHttpProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpProxies.list": + +type TargetHttpProxiesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list +func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { + c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProxiesListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetHttpProxies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies", + // "response": { + // "$ref": "TargetHttpProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpProxies.setUrlMap": + +type TargetHttpProxiesSetUrlMapCall struct { + s *Service + project string + targetHttpProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetUrlMap: Changes the URL map for TargetHttpProxy. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap +func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { + c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource whose URL map is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.delete": + +type TargetHttpsProxiesDeleteCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetHttpsProxy resource. +func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { + c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttpsProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpsProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetHttpsProxies.delete", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.get": + +type TargetHttpsProxiesGetCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetHttpsProxy resource. +func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { + c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsProxiesGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetHttpsProxy resource.", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.get", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "TargetHttpsProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.insert": + +type TargetHttpsProxiesInsertCall struct { + s *Service + project string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetHttpsProxy resource in the specified project +// using the data included in the request. +func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { + c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targethttpsproxy = targethttpsproxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttpsProxiesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies", + // "request": { + // "$ref": "TargetHttpsProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.list": + +type TargetHttpsProxiesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of TargetHttpsProxy resources available to +// the specified project. +func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { + c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsProxiesListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies", + // "response": { + // "$ref": "TargetHttpsProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.setSslCertificates": + +type TargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + targetHttpsProxy string + targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { + c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxiessetsslcertificatesrequest = targethttpsproxiessetsslcertificatesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslCertificatesCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.setSslCertificates" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Replaces SslCertificates for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setSslCertificates", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose SSLCertificate is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "request": { + // "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.setUrlMap": + +type TargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { + c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetInstances.aggregatedList": + +type TargetInstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of target instances grouped by +// scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList +func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { + c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *TargetInstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *TargetInstancesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.aggregatedList" call. +// Exactly one of *TargetInstanceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetInstanceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetInstanceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of target instances grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.targetInstances.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetInstances", + // "response": { + // "$ref": "TargetInstanceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetInstances.delete": + +type TargetInstancesDeleteCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetInstance resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete +func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { + c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.targetInstance = targetInstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstancesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetInstance resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetInstances.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "targetInstance" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetInstance": { + // "description": "Name of the TargetInstance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetInstances.get": + +type TargetInstancesGetCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetInstance resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get +func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { + c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.targetInstance = targetInstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.get" call. +// Exactly one of *TargetInstance or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetInstance.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetInstance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetInstance resource.", + // "httpMethod": "GET", + // "id": "compute.targetInstances.get", + // "parameterOrder": [ + // "project", + // "zone", + // "targetInstance" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetInstance": { + // "description": "Name of the TargetInstance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "response": { + // "$ref": "TargetInstance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetInstances.insert": + +type TargetInstancesInsertCall struct { + s *Service + project string + zone string + targetinstance *TargetInstance + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetInstance resource in the specified project +// and zone using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert +func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { + c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.targetinstance = targetinstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstancesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetInstances.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances", + // "request": { + // "$ref": "TargetInstance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetInstances.list": + +type TargetInstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of TargetInstance resources available to the +// specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list +func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { + c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.list" call. +// Exactly one of *TargetInstanceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetInstanceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetInstanceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetInstance resources available to the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.targetInstances.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances", + // "response": { + // "$ref": "TargetInstanceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.addHealthCheck": + +type TargetPoolsAddHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddHealthCheck: Adds health check URL to targetPool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck +func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { + c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPoolsAddHealthCheckCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.addHealthCheck" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds health check URL to targetPool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.addHealthCheck", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which health_check_url is to be added.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + // "request": { + // "$ref": "TargetPoolsAddHealthCheckRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.addInstance": + +type TargetPoolsAddInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddInstance: Adds instance URL to targetPool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance +func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { + c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAddInstanceCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.addInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds instance URL to targetPool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.addInstance", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which instance_url is to be added.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + // "request": { + // "$ref": "TargetPoolsAddInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.aggregatedList": + +type TargetPoolsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of target pools grouped by scope. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList +func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { + c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetPoolsAggregatedListCall) IfNoneMatch(entityTag string) *TargetPoolsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPoolsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.aggregatedList" call. +// Exactly one of *TargetPoolAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetPoolAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of target pools grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.targetPools.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetPools", + // "response": { + // "$ref": "TargetPoolAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.delete": + +type TargetPoolsDeleteCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetPool resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete +func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { + c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetPool resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetPools.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.get": + +type TargetPoolsGetCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetPool resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get +func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { + c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetPoolsGetCall) IfNoneMatch(entityTag string) *TargetPoolsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.get" call. +// Exactly one of *TargetPool or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetPool.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPool{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetPool resource.", + // "httpMethod": "GET", + // "id": "compute.targetPools.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "response": { + // "$ref": "TargetPool" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.getHealth": + +type TargetPoolsGetHealthCall struct { + s *Service + project string + region string + targetPool string + instancereference *InstanceReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// GetHealth: Gets the most recent health check results for each IP for +// the given instance that is referenced by given TargetPool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth +func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { + c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.instancereference = instancereference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetHealthCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.getHealth" call. +// Exactly one of *TargetPoolInstanceHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolInstanceHealth.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPoolInstanceHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolInstanceHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.getHealth", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + // "request": { + // "$ref": "InstanceReference" + // }, + // "response": { + // "$ref": "TargetPoolInstanceHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.insert": + +type TargetPoolsInsertCall struct { + s *Service + project string + region string + targetpool *TargetPool + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetPool resource in the specified project and +// region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert +func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { + c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetpool = targetpool + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetPool resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetPools.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools", + // "request": { + // "$ref": "TargetPool" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.list": + +type TargetPoolsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of TargetPool resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list +func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { + c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.list" call. +// Exactly one of *TargetPoolList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetPoolList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetPool resources available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.targetPools.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools", + // "response": { + // "$ref": "TargetPoolList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.removeHealthCheck": + +type TargetPoolsRemoveHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RemoveHealthCheck: Removes health check URL from targetPool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck +func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { + c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetPoolsRemoveHealthCheckCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.removeHealthCheck" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes health check URL from targetPool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.removeHealthCheck", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which health_check_url is to be removed.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + // "request": { + // "$ref": "TargetPoolsRemoveHealthCheckRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.removeInstance": + +type TargetPoolsRemoveInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RemoveInstance: Removes instance URL from targetPool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance +func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { + c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.removeInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes instance URL from targetPool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.removeInstance", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which instance_url is to be removed.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + // "request": { + // "$ref": "TargetPoolsRemoveInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.setBackup": + +type TargetPoolsSetBackupCall struct { + s *Service + project string + region string + targetPool string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetBackup: Changes backup pool configurations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup +func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { + c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetreference = targetreference + return c +} + +// FailoverRatio sets the optional parameter "failoverRatio": New +// failoverRatio value for the containing target pool. +func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { + c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.setBackup" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes backup pool configurations.", + // "httpMethod": "POST", + // "id": "compute.targetPools.setBackup", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "failoverRatio": { + // "description": "New failoverRatio value for the containing target pool.", + // "format": "float", + // "location": "query", + // "type": "number" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource for which the backup is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "request": { + // "$ref": "TargetReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetVpnGateways.aggregatedList": + +type TargetVpnGatewaysAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of target VPN gateways grouped by +// scope. +func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { + c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetVpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysAggregatedListCall) Context(ctx context.Context) *TargetVpnGatewaysAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.aggregatedList" call. +// Exactly one of *TargetVpnGatewayAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetVpnGatewayAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetVpnGatewayAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of target VPN gateways grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.targetVpnGateways.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetVpnGateways", + // "response": { + // "$ref": "TargetVpnGatewayAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetVpnGateways.delete": + +type TargetVpnGatewaysDeleteCall struct { + s *Service + project string + region string + targetVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetVpnGateway resource. +func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { + c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetVpnGateway = targetVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysDeleteCall) Context(ctx context.Context) *TargetVpnGatewaysDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetVpnGateway": c.targetVpnGateway, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetVpnGateway resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetVpnGateways.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetVpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetVpnGateway": { + // "description": "Name of the TargetVpnGateway resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetVpnGateways.get": + +type TargetVpnGatewaysGetCall struct { + s *Service + project string + region string + targetVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetVpnGateway resource. +func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { + c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetVpnGateway = targetVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysGetCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetVpnGatewaysGetCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysGetCall) Context(ctx context.Context) *TargetVpnGatewaysGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetVpnGateway": c.targetVpnGateway, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.get" call. +// Exactly one of *TargetVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnGateway, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetVpnGateway{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetVpnGateway resource.", + // "httpMethod": "GET", + // "id": "compute.targetVpnGateways.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetVpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetVpnGateway": { + // "description": "Name of the TargetVpnGateway resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + // "response": { + // "$ref": "TargetVpnGateway" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetVpnGateways.insert": + +type TargetVpnGatewaysInsertCall struct { + s *Service + project string + region string + targetvpngateway *TargetVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetVpnGateway resource in the specified project +// and region using the data included in the request. +func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { + c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetvpngateway = targetvpngateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysInsertCall) Context(ctx context.Context) *TargetVpnGatewaysInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetvpngateway) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetVpnGateway resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetVpnGateways.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways", + // "request": { + // "$ref": "TargetVpnGateway" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetVpnGateways.list": + +type TargetVpnGatewaysListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of TargetVpnGateway resources available to +// the specified project and region. +func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { + c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatewaysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewaysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetVpnGatewaysListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysListCall) Context(ctx context.Context) *TargetVpnGatewaysListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.list" call. +// Exactly one of *TargetVpnGatewayList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetVpnGatewayList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetVpnGatewayList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetVpnGateway resources available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.targetVpnGateways.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways", + // "response": { + // "$ref": "TargetVpnGatewayList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.urlMaps.delete": + +type UrlMapsDeleteCall struct { + s *Service + project string + urlMap string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified UrlMap resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete +func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { + c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsDeleteCall) Context(ctx context.Context) *UrlMapsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified UrlMap resource.", + // "httpMethod": "DELETE", + // "id": "compute.urlMaps.delete", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.get": + +type UrlMapsGetCall struct { + s *Service + project string + urlMap string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified UrlMap resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get +func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { + c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsGetCall) Fields(s ...googleapi.Field) *UrlMapsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UrlMapsGetCall) IfNoneMatch(entityTag string) *UrlMapsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsGetCall) Context(ctx context.Context) *UrlMapsGetCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.get" call. +// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *UrlMap.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMap{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified UrlMap resource.", + // "httpMethod": "GET", + // "id": "compute.urlMaps.get", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "response": { + // "$ref": "UrlMap" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.urlMaps.insert": + +type UrlMapsInsertCall struct { + s *Service + project string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a UrlMap resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert +func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { + c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlmap = urlmap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsInsertCall) Context(ctx context.Context) *UrlMapsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.urlMaps.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.list": + +type UrlMapsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of UrlMap resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list +func (r *UrlMapsService) List(project string) *UrlMapsListCall { + c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UrlMapsListCall) IfNoneMatch(entityTag string) *UrlMapsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsListCall) Context(ctx context.Context) *UrlMapsListCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.list" call. +// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UrlMapList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMapList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of UrlMap resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.urlMaps.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps", + // "response": { + // "$ref": "UrlMapList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.urlMaps.patch": + +type UrlMapsPatchCall struct { + s *Service + project string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Update the entire content of the UrlMap resource. This method +// supports patch semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch +func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { + c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + c.urlmap = urlmap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsPatchCall) Context(ctx context.Context) *UrlMapsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update the entire content of the UrlMap resource. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.urlMaps.patch", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.update": + +type UrlMapsUpdateCall struct { + s *Service + project string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Update the entire content of the UrlMap resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update +func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { + c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + c.urlmap = urlmap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsUpdateCall) Context(ctx context.Context) *UrlMapsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update the entire content of the UrlMap resource.", + // "httpMethod": "PUT", + // "id": "compute.urlMaps.update", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.validate": + +type UrlMapsValidateCall struct { + s *Service + project string + urlMap string + urlmapsvalidaterequest *UrlMapsValidateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Validate: Run static validation for the UrlMap. In particular, the +// tests of the provided UrlMap will be run. Calling this method does +// NOT create the UrlMap. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate +func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { + c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + c.urlmapsvalidaterequest = urlmapsvalidaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsValidateCall) Context(ctx context.Context) *UrlMapsValidateCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.validate" call. +// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMapsValidateResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Run static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + // "httpMethod": "POST", + // "id": "compute.urlMaps.validate", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to be validated as.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}/validate", + // "request": { + // "$ref": "UrlMapsValidateRequest" + // }, + // "response": { + // "$ref": "UrlMapsValidateResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnTunnels.aggregatedList": + +type VpnTunnelsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of VPN tunnels grouped by scope. +func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregatedListCall { + c := &VpnTunnelsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *VpnTunnelsAggregatedListCall) MaxResults(maxResults int64) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsAggregatedListCall) Fields(s ...googleapi.Field) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnTunnelsAggregatedListCall) IfNoneMatch(entityTag string) *VpnTunnelsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsAggregatedListCall) Context(ctx context.Context) *VpnTunnelsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnTunnels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.aggregatedList" call. +// Exactly one of *VpnTunnelAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *VpnTunnelAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnTunnelAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of VPN tunnels grouped by scope.", + // "httpMethod": "GET", + // "id": "compute.vpnTunnels.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/vpnTunnels", + // "response": { + // "$ref": "VpnTunnelAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.vpnTunnels.delete": + +type VpnTunnelsDeleteCall struct { + s *Service + project string + region string + vpnTunnel string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified VpnTunnel resource. +func (r *VpnTunnelsService) Delete(project string, region string, vpnTunnel string) *VpnTunnelsDeleteCall { + c := &VpnTunnelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnTunnel = vpnTunnel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsDeleteCall) Fields(s ...googleapi.Field) *VpnTunnelsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsDeleteCall) Context(ctx context.Context) *VpnTunnelsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnTunnel": c.vpnTunnel, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified VpnTunnel resource.", + // "httpMethod": "DELETE", + // "id": "compute.vpnTunnels.delete", + // "parameterOrder": [ + // "project", + // "region", + // "vpnTunnel" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnTunnel": { + // "description": "Name of the VpnTunnel resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnTunnels.get": + +type VpnTunnelsGetCall struct { + s *Service + project string + region string + vpnTunnel string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified VpnTunnel resource. +func (r *VpnTunnelsService) Get(project string, region string, vpnTunnel string) *VpnTunnelsGetCall { + c := &VpnTunnelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnTunnel = vpnTunnel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsGetCall) Fields(s ...googleapi.Field) *VpnTunnelsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnTunnelsGetCall) IfNoneMatch(entityTag string) *VpnTunnelsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsGetCall) Context(ctx context.Context) *VpnTunnelsGetCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnTunnel": c.vpnTunnel, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.get" call. +// Exactly one of *VpnTunnel or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnTunnel.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnTunnel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified VpnTunnel resource.", + // "httpMethod": "GET", + // "id": "compute.vpnTunnels.get", + // "parameterOrder": [ + // "project", + // "region", + // "vpnTunnel" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnTunnel": { + // "description": "Name of the VpnTunnel resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + // "response": { + // "$ref": "VpnTunnel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.vpnTunnels.insert": + +type VpnTunnelsInsertCall struct { + s *Service + project string + region string + vpntunnel *VpnTunnel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a VpnTunnel resource in the specified project and +// region using the data included in the request. +func (r *VpnTunnelsService) Insert(project string, region string, vpntunnel *VpnTunnel) *VpnTunnelsInsertCall { + c := &VpnTunnelsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpntunnel = vpntunnel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsInsertCall) Fields(s ...googleapi.Field) *VpnTunnelsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsInsertCall) Context(ctx context.Context) *VpnTunnelsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.vpntunnel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.vpnTunnels.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels", + // "request": { + // "$ref": "VpnTunnel" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnTunnels.list": + +type VpnTunnelsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of VpnTunnel resources contained in the +// specified project and region. +func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListCall { + c := &VpnTunnelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *VpnTunnelsListCall) MaxResults(maxResults int64) *VpnTunnelsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsListCall) Fields(s ...googleapi.Field) *VpnTunnelsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnTunnelsListCall) IfNoneMatch(entityTag string) *VpnTunnelsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsListCall) Context(ctx context.Context) *VpnTunnelsListCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.list" call. +// Exactly one of *VpnTunnelList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnTunnelList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnTunnelList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of VpnTunnel resources contained in the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.vpnTunnels.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels", + // "response": { + // "$ref": "VpnTunnelList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.zoneOperations.delete": + +type ZoneOperationsDeleteCall struct { + s *Service + project string + zone string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified zone-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/delete +func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall { + c := &ZoneOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZoneOperationsDeleteCall) Fields(s ...googleapi.Field) *ZoneOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZoneOperationsDeleteCall) Context(ctx context.Context) *ZoneOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zoneOperations.delete" call. +func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified zone-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.zoneOperations.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.zoneOperations.get": + +type ZoneOperationsGetCall struct { + s *Service + project string + zone string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves the specified zone-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/get +func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall { + c := &ZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZoneOperationsGetCall) Fields(s ...googleapi.Field) *ZoneOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZoneOperationsGetCall) IfNoneMatch(entityTag string) *ZoneOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZoneOperationsGetCall) Context(ctx context.Context) *ZoneOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zoneOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified zone-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.zoneOperations.get", + // "parameterOrder": [ + // "project", + // "zone", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.zoneOperations.list": + +type ZoneOperationsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of Operation resources contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/list +func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall { + c := &ZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZoneOperationsListCall) Fields(s ...googleapi.Field) *ZoneOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZoneOperationsListCall) IfNoneMatch(entityTag string) *ZoneOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZoneOperationsListCall) Context(ctx context.Context) *ZoneOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zoneOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Operation resources contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.zoneOperations.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.zones.get": + +type ZonesGetCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified zone resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zones/get +func (r *ZonesService) Get(project string, zone string) *ZonesGetCall { + c := &ZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZonesGetCall) Fields(s ...googleapi.Field) *ZonesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZonesGetCall) IfNoneMatch(entityTag string) *ZonesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZonesGetCall) Context(ctx context.Context) *ZonesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zones.get" call. +// Exactly one of *Zone or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Zone.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Zone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified zone resource.", + // "httpMethod": "GET", + // "id": "compute.zones.get", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}", + // "response": { + // "$ref": "Zone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.zones.list": + +type ZonesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of zone resources available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zones/list +func (r *ZonesService) List(project string) *ZonesListCall { + c := &ZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, filter=name ne example-instance. +// +// Compute Engine Beta API Only: If you use filtering in the Beta API, +// you can also filter on nested fields. For example, you could filter +// on instances that have set the scheduling.automaticRestart field to +// true. In particular, use filtering on nested fields to take advantage +// of instance labels to organize and filter results based on label +// values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions meaning that +// resources must match all expressions to pass the filters. +func (c *ZonesListCall) Filter(filter string) *ZonesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that Compute Engine should return. If the +// number of available results is larger than maxResults, Compute Engine +// returns a nextPageToken that can be used to get the next page of +// results in subsequent list requests. +func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZonesListCall) Fields(s ...googleapi.Field) *ZonesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZonesListCall) IfNoneMatch(entityTag string) *ZonesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZonesListCall) Context(ctx context.Context) *ZonesListCall { + c.ctx_ = ctx + return c +} + +func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zones.list" call. +// Exactly one of *ZoneList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *ZoneList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ZoneList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of zone resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.zones.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that Compute Engine should return. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones", + // "response": { + // "$ref": "ZoneList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json new file mode 100644 index 000000000..5cbdf13f8 --- /dev/null +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -0,0 +1,690 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/-5Ir9-bAl4HnPM8XDQ5ycW_gSZQ\"", + "discoveryVersion": "v1", + "id": "container:v1", + "name": "container", + "version": "v1", + "revision": "20150603", + "title": "Google Container Engine API", + "description": "The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://cloud.google.com/container-engine/", + "protocol": "rest", + "baseUrl": "https://container.googleapis.com/", + "basePath": "", + "rootUrl": "https://container.googleapis.com/", + "servicePath": "", + "batchPath": "batch", + "parameters": { + "access_token": { + "type": "string", + "description": "OAuth access token.", + "location": "query" + }, + "alt": { + "type": "string", + "description": "Data format for response.", + "default": "json", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "bearer_token": { + "type": "string", + "description": "OAuth bearer token.", + "location": "query" + }, + "callback": { + "type": "string", + "description": "JSONP", + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "pp": { + "type": "boolean", + "description": "Pretty-print response.", + "default": "true", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query" + }, + "upload_protocol": { + "type": "string", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query" + }, + "uploadType": { + "type": "string", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query" + }, + "$.xgafv": { + "type": "string", + "description": "V1 error format.", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "schemas": { + "ListClustersResponse": { + "id": "ListClustersResponse", + "type": "object", + "description": "ListClustersResponse is the result of ListClustersRequest.", + "properties": { + "clusters": { + "type": "array", + "description": "A list of clusters in the project in the specified zone, or across all ones.", + "items": { + "$ref": "Cluster" + } + } + } + }, + "Cluster": { + "id": "Cluster", + "type": "object", + "description": "A Google Container Engine cluster.", + "properties": { + "name": { + "type": "string", + "description": "The name of this cluster. The name must be unique within this project and zone, and can be up to 40 characters with the following restrictions: * Lowercase letters, numbers, and hyphens only. * Must start with a letter. * Must end with a number or a letter." + }, + "description": { + "type": "string", + "description": "An optional description of this cluster." + }, + "initialNodeCount": { + "type": "integer", + "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine [resource quota](/compute/docs/resource-quotas) is sufficient for this number of instances. You must also have available firewall and routes quota.", + "format": "int32" + }, + "nodeConfig": { + "$ref": "NodeConfig", + "description": "Parameters used in creating the cluster's nodes. See the descriptions of the child properties of `nodeConfig`. If unspecified, the defaults for all child properties are used." + }, + "masterAuth": { + "$ref": "MasterAuth", + "description": "The authentication information for accessing the master." + }, + "loggingService": { + "type": "string", + "description": "The logging service that the cluster should write logs to. Currently available options: * \"logging.googleapis.com\" - the Google Cloud Logging service * \"none\" - no logs will be exported from the cluster * \"\" - default value; the default is \"logging.googleapis.com\"" + }, + "monitoringService": { + "type": "string", + "description": "The monitoring service that the cluster should write metrics to. Currently available options: * \"monitoring.googleapis.com\" - the Google Cloud Monitoring service * \"none\" - no metrics will be exported from the cluster * \"\" - default value; the default is \"monitoring.googleapis.com\"" + }, + "network": { + "type": "string", + "description": "The name of the Google Compute Engine [network](/compute/docs/networking#networks_1) to which the cluster is connected. If left unspecified, the \"default\" network will be used." + }, + "clusterIpv4Cidr": { + "type": "string", + "description": "The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`." + }, + "selfLink": { + "type": "string", + "description": "[Output only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output only] The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides." + }, + "endpoint": { + "type": "string", + "description": "[Output only] The IP address of this cluster's Kubernetes master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information." + }, + "initialClusterVersion": { + "type": "string", + "description": "[Output only] The software version of Kubernetes master and kubelets used in the cluster when it was first created. The version can be upgraded over time." + }, + "currentMasterVersion": { + "type": "string", + "description": "[Output only] The current software version of the master endpoint." + }, + "currentNodeVersion": { + "type": "string", + "description": "[Output only] The current version of the node software components. If they are currently at different versions because they're in the process of being upgraded, this reflects the minimum version of any of them." + }, + "createTime": { + "type": "string", + "description": "[Output only] The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format." + }, + "status": { + "type": "string", + "description": "[Output only] The current status of this cluster.", + "enum": [ + "STATUS_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RECONCILING", + "STOPPING", + "ERROR" + ] + }, + "statusMessage": { + "type": "string", + "description": "[Output only] Additional information about the current status of this cluster, if available." + }, + "nodeIpv4CidrSize": { + "type": "integer", + "description": "[Output only] The size of the address space on each node for hosting containers. This is provisioned from within the container_ipv4_cidr range.", + "format": "int32" + }, + "servicesIpv4Cidr": { + "type": "string", + "description": "[Output only] The IP address range of the Kubernetes services in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last /16 from the container CIDR." + }, + "instanceGroupUrls": { + "type": "array", + "description": "[Output only] The resource URLs of [instance groups](/compute/docs/instance-groups/) associated with this cluster.", + "items": { + "type": "string" + } + } + } + }, + "NodeConfig": { + "id": "NodeConfig", + "type": "object", + "description": "Per-node parameters.", + "properties": { + "machineType": { + "type": "string", + "description": "The name of a Google Compute Engine [machine type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If unspecified, the default machine type is `n1-standard-1`." + }, + "diskSizeGb": { + "type": "integer", + "description": "Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB.", + "format": "int32" + }, + "oauthScopes": { + "type": "array", + "description": "The set of Google API scopes to be made available on all of the node VMs under the \"default\" service account. The following scopes are recommended, but not required, and by default are not included: * `https://www.googleapis.com/auth/compute` is required for mounting persistent storage on your nodes. * `https://www.googleapis.com/auth/devstorage.read_only` is required for communicating with *gcr.io*. If unspecified, no scopes are added.", + "items": { + "type": "string" + } + } + } + }, + "MasterAuth": { + "id": "MasterAuth", + "type": "object", + "description": "The authentication information for accessing the master endpoint. Authentication can be done using HTTP basic auth or using client certificates.", + "properties": { + "username": { + "type": "string", + "description": "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint." + }, + "password": { + "type": "string", + "description": "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint. Because the master endpoint is open to the internet, you should create a strong password." + }, + "clusterCaCertificate": { + "type": "string", + "description": "[Output only] Base64 encoded public certificate that is the root of trust for the cluster." + }, + "clientCertificate": { + "type": "string", + "description": "[Output only] Base64 encoded public certificate used by clients to authenticate to the cluster endpoint." + }, + "clientKey": { + "type": "string", + "description": "[Output only] Base64 encoded private key used by clients to authenticate to the cluster endpoint." + } + } + }, + "CreateClusterRequest": { + "id": "CreateClusterRequest", + "type": "object", + "description": "CreateClusterRequest creates a cluster.", + "properties": { + "cluster": { + "$ref": "Cluster", + "description": "A [cluster resource](/container-engine/reference/rest/v1/projects.zones.clusters)" + } + } + }, + "Operation": { + "id": "Operation", + "type": "object", + "description": "Defines the operation resource. All fields are output only.", + "properties": { + "name": { + "type": "string", + "description": "The server-assigned ID for the operation." + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the operation is taking place." + }, + "operationType": { + "type": "string", + "description": "The operation type.", + "enum": [ + "TYPE_UNSPECIFIED", + "CREATE_CLUSTER", + "DELETE_CLUSTER", + "UPGRADE_MASTER", + "UPGRADE_NODES", + "REPAIR_CLUSTER" + ] + }, + "status": { + "type": "string", + "description": "The current status of the operation.", + "enum": [ + "STATUS_UNSPECIFIED", + "PENDING", + "RUNNING", + "DONE" + ] + }, + "statusMessage": { + "type": "string", + "description": "If an error has occurred, a textual description of the error." + }, + "selfLink": { + "type": "string", + "description": "Server-defined URL for the resource." + }, + "targetLink": { + "type": "string", + "description": "Server-defined URL for the target of the operation." + } + } + }, + "UpdateClusterRequest": { + "id": "UpdateClusterRequest", + "type": "object", + "description": "UpdateClusterRequest updates a cluster.", + "properties": { + "update": { + "$ref": "ClusterUpdate", + "description": "A description of the update." + } + } + }, + "ClusterUpdate": { + "id": "ClusterUpdate", + "type": "object", + "description": "ClusterUpdate describes an update to the cluster.", + "properties": { + "desiredNodeVersion": { + "type": "string", + "description": "The Kubernetes version to change the nodes to (typically an upgrade). Use \"-\" to upgrade to the latest version supported by the server." + } + } + }, + "ListOperationsResponse": { + "id": "ListOperationsResponse", + "type": "object", + "description": "ListOperationsResponse is the result of ListOperationsRequest.", + "properties": { + "operations": { + "type": "array", + "description": "A list of operations in the project in the specified zone.", + "items": { + "$ref": "Operation" + } + } + } + }, + "ServerConfig": { + "id": "ServerConfig", + "type": "object", + "description": "Container Engine Server configuration.", + "properties": { + "defaultClusterVersion": { + "type": "string", + "description": "What version this server deploys by default." + }, + "validNodeVersions": { + "type": "array", + "description": "List of valid node upgrade target versions.", + "items": { + "type": "string" + } + } + } + } + }, + "resources": { + "projects": { + "resources": { + "zones": { + "methods": { + "getServerconfig": { + "id": "container.projects.zones.getServerconfig", + "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", + "httpMethod": "GET", + "description": "Returns configuration info about the Container Engine service.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for, or \"-\" for all zones.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "response": { + "$ref": "ServerConfig" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "clusters": { + "methods": { + "list": { + "id": "container.projects.zones.clusters.list", + "path": "v1/projects/{projectId}/zones/{zone}/clusters", + "httpMethod": "GET", + "description": "Lists all clusters owned by a project in either the specified zone or all zones.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "response": { + "$ref": "ListClustersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "container.projects.zones.clusters.get", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "httpMethod": "GET", + "description": "Gets a specific cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster to retrieve.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "response": { + "$ref": "Cluster" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "id": "container.projects.zones.clusters.create", + "path": "v1/projects/{projectId}/zones/{zone}/clusters", + "httpMethod": "POST", + "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master endpoint. By default, the cluster is created in the project's [default network](/compute/docs/networking#networks_1). One firewall is added for the cluster. After cluster creation, the cluster creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "request": { + "$ref": "CreateClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "update": { + "id": "container.projects.zones.clusters.update", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "httpMethod": "PUT", + "description": "Update settings of a specific cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster to upgrade.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "request": { + "$ref": "UpdateClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "id": "container.projects.zones.clusters.delete", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "httpMethod": "DELETE", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster to delete.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "operations": { + "methods": { + "list": { + "id": "container.projects.zones.operations.list", + "path": "v1/projects/{projectId}/zones/{zone}/operations", + "httpMethod": "GET", + "description": "Lists all operations in a project in a specific zone or all zones.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for, or \"-\" for all zones.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "container.projects.zones.operations.get", + "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + "httpMethod": "GET", + "description": "Gets the specified operation.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "operationId": { + "type": "string", + "description": "The server-assigned `name` of the operation.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "operationId" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + } + } +} diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go new file mode 100644 index 000000000..27e0b70dd --- /dev/null +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -0,0 +1,1639 @@ +// Package container provides access to the Google Container Engine API. +// +// See https://cloud.google.com/container-engine/ +// +// Usage example: +// +// import "google.golang.org/api/container/v1" +// ... +// containerService, err := container.New(oauthHttpClient) +package container + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "container:v1" +const apiName = "container" +const apiVersion = "v1" +const basePath = "https://container.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Zones = NewProjectsZonesService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Zones *ProjectsZonesService +} + +func NewProjectsZonesService(s *Service) *ProjectsZonesService { + rs := &ProjectsZonesService{s: s} + rs.Clusters = NewProjectsZonesClustersService(s) + rs.Operations = NewProjectsZonesOperationsService(s) + return rs +} + +type ProjectsZonesService struct { + s *Service + + Clusters *ProjectsZonesClustersService + + Operations *ProjectsZonesOperationsService +} + +func NewProjectsZonesClustersService(s *Service) *ProjectsZonesClustersService { + rs := &ProjectsZonesClustersService{s: s} + return rs +} + +type ProjectsZonesClustersService struct { + s *Service +} + +func NewProjectsZonesOperationsService(s *Service) *ProjectsZonesOperationsService { + rs := &ProjectsZonesOperationsService{s: s} + return rs +} + +type ProjectsZonesOperationsService struct { + s *Service +} + +// Cluster: A Google Container Engine cluster. +type Cluster struct { + // ClusterIpv4Cidr: The IP address range of the container pods in this + // cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically + // chosen or specify a `/14` block in `10.0.0.0/8`. + ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` + + // CreateTime: [Output only] The time the cluster was created, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreateTime string `json:"createTime,omitempty"` + + // CurrentMasterVersion: [Output only] The current software version of + // the master endpoint. + CurrentMasterVersion string `json:"currentMasterVersion,omitempty"` + + // CurrentNodeVersion: [Output only] The current version of the node + // software components. If they are currently at different versions + // because they're in the process of being upgraded, this reflects the + // minimum version of any of them. + CurrentNodeVersion string `json:"currentNodeVersion,omitempty"` + + // Description: An optional description of this cluster. + Description string `json:"description,omitempty"` + + // Endpoint: [Output only] The IP address of this cluster's Kubernetes + // master endpoint. The endpoint can be accessed from the internet at + // `https://username:password@endpoint/`. See the `masterAuth` property + // of this resource for username and password information. + Endpoint string `json:"endpoint,omitempty"` + + // InitialClusterVersion: [Output only] The software version of + // Kubernetes master and kubelets used in the cluster when it was first + // created. The version can be upgraded over time. + InitialClusterVersion string `json:"initialClusterVersion,omitempty"` + + // InitialNodeCount: The number of nodes to create in this cluster. You + // must ensure that your Compute Engine [resource + // quota](/compute/docs/resource-quotas) is sufficient for this number + // of instances. You must also have available firewall and routes quota. + InitialNodeCount int64 `json:"initialNodeCount,omitempty"` + + // InstanceGroupUrls: [Output only] The resource URLs of [instance + // groups](/compute/docs/instance-groups/) associated with this cluster. + InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` + + // LoggingService: The logging service that the cluster should write + // logs to. Currently available options: * "logging.googleapis.com" - + // the Google Cloud Logging service * "none" - no logs will be exported + // from the cluster * "" - default value; the default is + // "logging.googleapis.com" + LoggingService string `json:"loggingService,omitempty"` + + // MasterAuth: The authentication information for accessing the master. + MasterAuth *MasterAuth `json:"masterAuth,omitempty"` + + // MonitoringService: The monitoring service that the cluster should + // write metrics to. Currently available options: * + // "monitoring.googleapis.com" - the Google Cloud Monitoring service * + // "none" - no metrics will be exported from the cluster * "" - default + // value; the default is "monitoring.googleapis.com" + MonitoringService string `json:"monitoringService,omitempty"` + + // Name: The name of this cluster. The name must be unique within this + // project and zone, and can be up to 40 characters with the following + // restrictions: * Lowercase letters, numbers, and hyphens only. * Must + // start with a letter. * Must end with a number or a letter. + Name string `json:"name,omitempty"` + + // Network: The name of the Google Compute Engine + // [network](/compute/docs/networking#networks_1) to which the cluster + // is connected. If left unspecified, the "default" network will be + // used. + Network string `json:"network,omitempty"` + + // NodeConfig: Parameters used in creating the cluster's nodes. See the + // descriptions of the child properties of `nodeConfig`. If unspecified, + // the defaults for all child properties are used. + NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` + + // NodeIpv4CidrSize: [Output only] The size of the address space on each + // node for hosting containers. This is provisioned from within the + // container_ipv4_cidr range. + NodeIpv4CidrSize int64 `json:"nodeIpv4CidrSize,omitempty"` + + // SelfLink: [Output only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServicesIpv4Cidr: [Output only] The IP address range of the + // Kubernetes services in this cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `1.2.3.4/29`). Service addresses are typically put in + // the last /16 from the container CIDR. + ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` + + // Status: [Output only] The current status of this cluster. + // + // Possible values: + // "STATUS_UNSPECIFIED" + // "PROVISIONING" + // "RUNNING" + // "RECONCILING" + // "STOPPING" + // "ERROR" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output only] Additional information about the current + // status of this cluster, if available. + StatusMessage string `json:"statusMessage,omitempty"` + + // Zone: [Output only] The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClusterIpv4Cidr") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Cluster) MarshalJSON() ([]byte, error) { + type noMethod Cluster + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ClusterUpdate: ClusterUpdate describes an update to the cluster. +type ClusterUpdate struct { + // DesiredNodeVersion: The Kubernetes version to change the nodes to + // (typically an upgrade). Use "-" to upgrade to the latest version + // supported by the server. + DesiredNodeVersion string `json:"desiredNodeVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DesiredNodeVersion") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { + type noMethod ClusterUpdate + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CreateClusterRequest: CreateClusterRequest creates a cluster. +type CreateClusterRequest struct { + // Cluster: A [cluster + // resource](/container-engine/reference/rest/v1/projects.zones.clusters) + Cluster *Cluster `json:"cluster,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Cluster") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { + type noMethod CreateClusterRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListClustersResponse: ListClustersResponse is the result of +// ListClustersRequest. +type ListClustersResponse struct { + // Clusters: A list of clusters in the project in the specified zone, or + // across all ones. + Clusters []*Cluster `json:"clusters,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Clusters") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { + type noMethod ListClustersResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListOperationsResponse: ListOperationsResponse is the result of +// ListOperationsRequest. +type ListOperationsResponse struct { + // Operations: A list of operations in the project in the specified + // zone. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Operations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListOperationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MasterAuth: The authentication information for accessing the master +// endpoint. Authentication can be done using HTTP basic auth or using +// client certificates. +type MasterAuth struct { + // ClientCertificate: [Output only] Base64 encoded public certificate + // used by clients to authenticate to the cluster endpoint. + ClientCertificate string `json:"clientCertificate,omitempty"` + + // ClientKey: [Output only] Base64 encoded private key used by clients + // to authenticate to the cluster endpoint. + ClientKey string `json:"clientKey,omitempty"` + + // ClusterCaCertificate: [Output only] Base64 encoded public certificate + // that is the root of trust for the cluster. + ClusterCaCertificate string `json:"clusterCaCertificate,omitempty"` + + // Password: The password to use for HTTP basic authentication when + // accessing the Kubernetes master endpoint. Because the master endpoint + // is open to the internet, you should create a strong password. + Password string `json:"password,omitempty"` + + // Username: The username to use for HTTP basic authentication when + // accessing the Kubernetes master endpoint. + Username string `json:"username,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientCertificate") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MasterAuth) MarshalJSON() ([]byte, error) { + type noMethod MasterAuth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NodeConfig: Per-node parameters. +type NodeConfig struct { + // DiskSizeGb: Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. If unspecified, the default + // disk size is 100GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty"` + + // MachineType: The name of a Google Compute Engine [machine + // type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If + // unspecified, the default machine type is `n1-standard-1`. + MachineType string `json:"machineType,omitempty"` + + // OauthScopes: The set of Google API scopes to be made available on all + // of the node VMs under the "default" service account. The following + // scopes are recommended, but not required, and by default are not + // included: * `https://www.googleapis.com/auth/compute` is required for + // mounting persistent storage on your nodes. * + // `https://www.googleapis.com/auth/devstorage.read_only` is required + // for communicating with *gcr.io*. If unspecified, no scopes are added. + OauthScopes []string `json:"oauthScopes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NodeConfig) MarshalJSON() ([]byte, error) { + type noMethod NodeConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Operation: Defines the operation resource. All fields are output +// only. +type Operation struct { + // Name: The server-assigned ID for the operation. + Name string `json:"name,omitempty"` + + // OperationType: The operation type. + // + // Possible values: + // "TYPE_UNSPECIFIED" + // "CREATE_CLUSTER" + // "DELETE_CLUSTER" + // "UPGRADE_MASTER" + // "UPGRADE_NODES" + // "REPAIR_CLUSTER" + OperationType string `json:"operationType,omitempty"` + + // SelfLink: Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: The current status of the operation. + // + // Possible values: + // "STATUS_UNSPECIFIED" + // "PENDING" + // "RUNNING" + // "DONE" + Status string `json:"status,omitempty"` + + // StatusMessage: If an error has occurred, a textual description of the + // error. + StatusMessage string `json:"statusMessage,omitempty"` + + // TargetLink: Server-defined URL for the target of the operation. + TargetLink string `json:"targetLink,omitempty"` + + // Zone: The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the operation is + // taking place. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ServerConfig: Container Engine Server configuration. +type ServerConfig struct { + // DefaultClusterVersion: What version this server deploys by default. + DefaultClusterVersion string `json:"defaultClusterVersion,omitempty"` + + // ValidNodeVersions: List of valid node upgrade target versions. + ValidNodeVersions []string `json:"validNodeVersions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "DefaultClusterVersion") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ServerConfig) MarshalJSON() ([]byte, error) { + type noMethod ServerConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UpdateClusterRequest: UpdateClusterRequest updates a cluster. +type UpdateClusterRequest struct { + // Update: A description of the update. + Update *ClusterUpdate `json:"update,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Update") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { + type noMethod UpdateClusterRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "container.projects.zones.getServerconfig": + +type ProjectsZonesGetServerconfigCall struct { + s *Service + projectId string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetServerconfig: Returns configuration info about the Container +// Engine service. +func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *ProjectsZonesGetServerconfigCall { + c := &ProjectsZonesGetServerconfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesGetServerconfigCall) Fields(s ...googleapi.Field) *ProjectsZonesGetServerconfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesGetServerconfigCall) IfNoneMatch(entityTag string) *ProjectsZonesGetServerconfigCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesGetServerconfigCall) Context(ctx context.Context) *ProjectsZonesGetServerconfigCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesGetServerconfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/serverconfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.getServerconfig" call. +// Exactly one of *ServerConfig or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServerConfig.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*ServerConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServerConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns configuration info about the Container Engine service.", + // "httpMethod": "GET", + // "id": "container.projects.zones.getServerconfig", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for, or \"-\" for all zones.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", + // "response": { + // "$ref": "ServerConfig" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.create": + +type ProjectsZonesClustersCreateCall struct { + s *Service + projectId string + zone string + createclusterrequest *CreateClusterRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a cluster, consisting of the specified number and +// type of Google Compute Engine instances, plus a Kubernetes master +// endpoint. By default, the cluster is created in the project's +// [default network](/compute/docs/networking#networks_1). One firewall +// is added for the cluster. After cluster creation, the cluster creates +// routes for each node to allow the containers on that node to +// communicate with all other instances in the cluster. Finally, an +// entry is added to the project's global metadata indicating which CIDR +// range is being used by the cluster. +func (r *ProjectsZonesClustersService) Create(projectId string, zone string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall { + c := &ProjectsZonesClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.createclusterrequest = createclusterrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersCreateCall) Context(ctx context.Context) *ProjectsZonesClustersCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master endpoint. By default, the cluster is created in the project's [default network](/compute/docs/networking#networks_1). One firewall is added for the cluster. After cluster creation, the cluster creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.", + // "httpMethod": "POST", + // "id": "container.projects.zones.clusters.create", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters", + // "request": { + // "$ref": "CreateClusterRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.delete": + +type ProjectsZonesClustersDeleteCall struct { + s *Service + projectId string + zone string + clusterId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the cluster, including the Kubernetes endpoint and +// all worker nodes. Firewalls and routes that were configured during +// cluster creation are also deleted. +func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clusterId string) *ProjectsZonesClustersDeleteCall { + c := &ProjectsZonesClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersDeleteCall) Context(ctx context.Context) *ProjectsZonesClustersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted.", + // "httpMethod": "DELETE", + // "id": "container.projects.zones.clusters.delete", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.get": + +type ProjectsZonesClustersGetCall struct { + s *Service + projectId string + zone string + clusterId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a specific cluster. +func (r *ProjectsZonesClustersService) Get(projectId string, zone string, clusterId string) *ProjectsZonesClustersGetCall { + c := &ProjectsZonesClustersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesClustersGetCall) IfNoneMatch(entityTag string) *ProjectsZonesClustersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersGetCall) Context(ctx context.Context) *ProjectsZonesClustersGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.get" call. +// Exactly one of *Cluster or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Cluster.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsZonesClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluster, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Cluster{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a specific cluster.", + // "httpMethod": "GET", + // "id": "container.projects.zones.clusters.get", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster to retrieve.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + // "response": { + // "$ref": "Cluster" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.list": + +type ProjectsZonesClustersListCall struct { + s *Service + projectId string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all clusters owned by a project in either the specified +// zone or all zones. +func (r *ProjectsZonesClustersService) List(projectId string, zone string) *ProjectsZonesClustersListCall { + c := &ProjectsZonesClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesClustersListCall) IfNoneMatch(entityTag string) *ProjectsZonesClustersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersListCall) Context(ctx context.Context) *ProjectsZonesClustersListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.list" call. +// Exactly one of *ListClustersResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListClustersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListClustersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListClustersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all clusters owned by a project in either the specified zone or all zones.", + // "httpMethod": "GET", + // "id": "container.projects.zones.clusters.list", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters", + // "response": { + // "$ref": "ListClustersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.update": + +type ProjectsZonesClustersUpdateCall struct { + s *Service + projectId string + zone string + clusterId string + updateclusterrequest *UpdateClusterRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Update settings of a specific cluster. +func (r *ProjectsZonesClustersService) Update(projectId string, zone string, clusterId string, updateclusterrequest *UpdateClusterRequest) *ProjectsZonesClustersUpdateCall { + c := &ProjectsZonesClustersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + c.updateclusterrequest = updateclusterrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersUpdateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersUpdateCall) Context(ctx context.Context) *ProjectsZonesClustersUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateclusterrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update settings of a specific cluster.", + // "httpMethod": "PUT", + // "id": "container.projects.zones.clusters.update", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster to upgrade.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + // "request": { + // "$ref": "UpdateClusterRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.operations.get": + +type ProjectsZonesOperationsGetCall struct { + s *Service + projectId string + zone string + operationId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets the specified operation. +func (r *ProjectsZonesOperationsService) Get(projectId string, zone string, operationId string) *ProjectsZonesOperationsGetCall { + c := &ProjectsZonesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsZonesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesOperationsGetCall) Context(ctx context.Context) *ProjectsZonesOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations/{operationId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "operationId": c.operationId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the specified operation.", + // "httpMethod": "GET", + // "id": "container.projects.zones.operations.get", + // "parameterOrder": [ + // "projectId", + // "zone", + // "operationId" + // ], + // "parameters": { + // "operationId": { + // "description": "The server-assigned `name` of the operation.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.operations.list": + +type ProjectsZonesOperationsListCall struct { + s *Service + projectId string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all operations in a project in a specific zone or all +// zones. +func (r *ProjectsZonesOperationsService) List(projectId string, zone string) *ProjectsZonesOperationsListCall { + c := &ProjectsZonesOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesOperationsListCall) IfNoneMatch(entityTag string) *ProjectsZonesOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesOperationsListCall) Context(ctx context.Context) *ProjectsZonesOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsZonesOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all operations in a project in a specific zone or all zones.", + // "httpMethod": "GET", + // "id": "container.projects.zones.operations.list", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for, or \"-\" for all zones.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/operations", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json new file mode 100644 index 000000000..a36533455 --- /dev/null +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -0,0 +1,708 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/zoueaaoAZQGFJohVmI5skQDTvqg\"", + "discoveryVersion": "v1", + "id": "dns:v1", + "name": "dns", + "version": "v1", + "revision": "20150729", + "title": "Google Cloud DNS API", + "description": "The Google Cloud DNS API provides services for configuring and serving authoritative DNS records.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://developers.google.com/cloud-dns", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/dns/v1/projects/", + "basePath": "/dns/v1/projects/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "dns/v1/projects/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/ndev.clouddns.readonly": { + "description": "View your DNS records hosted by Google Cloud DNS" + }, + "https://www.googleapis.com/auth/ndev.clouddns.readwrite": { + "description": "View and manage your DNS records hosted by Google Cloud DNS" + } + } + } + }, + "schemas": { + "Change": { + "id": "Change", + "type": "object", + "description": "An atomic update to a collection of ResourceRecordSets.", + "properties": { + "additions": { + "type": "array", + "description": "Which ResourceRecordSets to add?", + "items": { + "$ref": "ResourceRecordSet" + } + }, + "deletions": { + "type": "array", + "description": "Which ResourceRecordSets to remove? Must match existing data exactly.", + "items": { + "$ref": "ResourceRecordSet" + } + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#change\".", + "default": "dns#change" + }, + "startTime": { + "type": "string", + "description": "The time that this operation was started by the server. This is in RFC3339 text format." + }, + "status": { + "type": "string", + "description": "Status of the operation (output only).", + "enum": [ + "done", + "pending" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "ChangesListResponse": { + "id": "ChangesListResponse", + "type": "object", + "description": "The response to a request to enumerate Changes to a ResourceRecordSets collection.", + "properties": { + "changes": { + "type": "array", + "description": "The requested changes.", + "items": { + "$ref": "Change" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "dns#changesListResponse" + }, + "nextPageToken": { + "type": "string", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size." + } + } + }, + "ManagedZone": { + "id": "ManagedZone", + "type": "object", + "description": "A zone is a subtree of the DNS namespace under one administrative responsibility. A ManagedZone is a resource that represents a DNS zone hosted by the Cloud DNS service.", + "properties": { + "creationTime": { + "type": "string", + "description": "The time that this resource was created on the server. This is in RFC3339 text format. Output only." + }, + "description": { + "type": "string", + "description": "A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the managed zone's function." + }, + "dnsName": { + "type": "string", + "description": "The DNS name of this managed zone, for instance \"example.com.\"." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZone\".", + "default": "dns#managedZone" + }, + "name": { + "type": "string", + "description": "User assigned name for this resource. Must be unique within the project. The name must be 1-32 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes." + }, + "nameServerSet": { + "type": "string", + "description": "Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users will leave this field unset." + }, + "nameServers": { + "type": "array", + "description": "Delegate your managed_zone to these virtual name servers; defined by the server (output only)", + "items": { + "type": "string" + } + } + } + }, + "ManagedZonesListResponse": { + "id": "ManagedZonesListResponse", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "dns#managedZonesListResponse" + }, + "managedZones": { + "type": "array", + "description": "The managed zone resources.", + "items": { + "$ref": "ManagedZone" + } + }, + "nextPageToken": { + "type": "string", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size." + } + } + }, + "Project": { + "id": "Project", + "type": "object", + "description": "A project resource. The project is a top level container for resources including Cloud DNS ManagedZones. Projects can be created only in the APIs console.", + "properties": { + "id": { + "type": "string", + "description": "User assigned unique identifier for the resource (output only)." + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#project\".", + "default": "dns#project" + }, + "number": { + "type": "string", + "description": "Unique numeric identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "quota": { + "$ref": "Quota", + "description": "Quotas assigned to this project (output only)." + } + } + }, + "Quota": { + "id": "Quota", + "type": "object", + "description": "Limits associated with a Project.", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#quota\".", + "default": "dns#quota" + }, + "managedZones": { + "type": "integer", + "description": "Maximum allowed number of managed zones in the project.", + "format": "int32" + }, + "resourceRecordsPerRrset": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecords per ResourceRecordSet.", + "format": "int32" + }, + "rrsetAdditionsPerChange": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecordSets to add per ChangesCreateRequest.", + "format": "int32" + }, + "rrsetDeletionsPerChange": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecordSets to delete per ChangesCreateRequest.", + "format": "int32" + }, + "rrsetsPerManagedZone": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecordSets per zone in the project.", + "format": "int32" + }, + "totalRrdataSizePerChange": { + "type": "integer", + "description": "Maximum allowed size for total rrdata in one ChangesCreateRequest in bytes.", + "format": "int32" + } + } + }, + "ResourceRecordSet": { + "id": "ResourceRecordSet", + "type": "object", + "description": "A unit of data that will be returned by the DNS servers.", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#resourceRecordSet\".", + "default": "dns#resourceRecordSet" + }, + "name": { + "type": "string", + "description": "For example, www.example.com." + }, + "rrdatas": { + "type": "array", + "description": "As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).", + "items": { + "type": "string" + } + }, + "ttl": { + "type": "integer", + "description": "Number of seconds that this ResourceRecordSet can be cached by resolvers.", + "format": "int32" + }, + "type": { + "type": "string", + "description": "The identifier of a supported record type, for example, A, AAAA, MX, TXT, and so on." + } + } + }, + "ResourceRecordSetsListResponse": { + "id": "ResourceRecordSetsListResponse", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "dns#resourceRecordSetsListResponse" + }, + "nextPageToken": { + "type": "string", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size." + }, + "rrsets": { + "type": "array", + "description": "The resource record set resources.", + "items": { + "$ref": "ResourceRecordSet" + } + } + } + } + }, + "resources": { + "changes": { + "methods": { + "create": { + "id": "dns.changes.create", + "path": "{project}/managedZones/{managedZone}/changes", + "httpMethod": "POST", + "description": "Atomically update the ResourceRecordSet collection.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "request": { + "$ref": "Change" + }, + "response": { + "$ref": "Change" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "get": { + "id": "dns.changes.get", + "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + "httpMethod": "GET", + "description": "Fetch the representation of an existing Change.", + "parameters": { + "changeId": { + "type": "string", + "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", + "required": true, + "location": "path" + }, + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone", + "changeId" + ], + "response": { + "$ref": "Change" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "id": "dns.changes.list", + "path": "{project}/managedZones/{managedZone}/changes", + "httpMethod": "GET", + "description": "Enumerate Changes to a ResourceRecordSet collection.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + }, + "sortBy": { + "type": "string", + "description": "Sorting criterion. The only supported value is change sequence.", + "default": "changeSequence", + "enum": [ + "changeSequence" + ], + "enumDescriptions": [ + "" + ], + "location": "query" + }, + "sortOrder": { + "type": "string", + "description": "Sorting order direction: 'ascending' or 'descending'.", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "response": { + "$ref": "ChangesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "managedZones": { + "methods": { + "create": { + "id": "dns.managedZones.create", + "path": "{project}/managedZones", + "httpMethod": "POST", + "description": "Create a new ManagedZone.", + "parameters": { + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "ManagedZone" + }, + "response": { + "$ref": "ManagedZone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "delete": { + "id": "dns.managedZones.delete", + "path": "{project}/managedZones/{managedZone}", + "httpMethod": "DELETE", + "description": "Delete a previously created ManagedZone.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "get": { + "id": "dns.managedZones.get", + "path": "{project}/managedZones/{managedZone}", + "httpMethod": "GET", + "description": "Fetch the representation of an existing ManagedZone.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "response": { + "$ref": "ManagedZone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "id": "dns.managedZones.list", + "path": "{project}/managedZones", + "httpMethod": "GET", + "description": "Enumerate ManagedZones that have been created but not yet deleted.", + "parameters": { + "dnsName": { + "type": "string", + "description": "Restricts the list to return only zones with this domain name.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ManagedZonesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "projects": { + "methods": { + "get": { + "id": "dns.projects.get", + "path": "{project}", + "httpMethod": "GET", + "description": "Fetch the representation of an existing Project.", + "parameters": { + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Project" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "resourceRecordSets": { + "methods": { + "list": { + "id": "dns.resourceRecordSets.list", + "path": "{project}/managedZones/{managedZone}/rrsets", + "httpMethod": "GET", + "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query" + }, + "name": { + "type": "string", + "description": "Restricts the list to return only records with this fully qualified domain name.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + }, + "type": { + "type": "string", + "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "response": { + "$ref": "ResourceRecordSetsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + } + } +} diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go new file mode 100644 index 000000000..a0a2e72fd --- /dev/null +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -0,0 +1,1787 @@ +// Package dns provides access to the Google Cloud DNS API. +// +// See https://developers.google.com/cloud-dns +// +// Usage example: +// +// import "google.golang.org/api/dns/v1" +// ... +// dnsService, err := dns.New(oauthHttpClient) +package dns + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "dns:v1" +const apiName = "dns" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/dns/v1/projects/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // View your DNS records hosted by Google Cloud DNS + NdevClouddnsReadonlyScope = "https://www.googleapis.com/auth/ndev.clouddns.readonly" + + // View and manage your DNS records hosted by Google Cloud DNS + NdevClouddnsReadwriteScope = "https://www.googleapis.com/auth/ndev.clouddns.readwrite" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Changes = NewChangesService(s) + s.ManagedZones = NewManagedZonesService(s) + s.Projects = NewProjectsService(s) + s.ResourceRecordSets = NewResourceRecordSetsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Changes *ChangesService + + ManagedZones *ManagedZonesService + + Projects *ProjectsService + + ResourceRecordSets *ResourceRecordSetsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewChangesService(s *Service) *ChangesService { + rs := &ChangesService{s: s} + return rs +} + +type ChangesService struct { + s *Service +} + +func NewManagedZonesService(s *Service) *ManagedZonesService { + rs := &ManagedZonesService{s: s} + return rs +} + +type ManagedZonesService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + return rs +} + +type ProjectsService struct { + s *Service +} + +func NewResourceRecordSetsService(s *Service) *ResourceRecordSetsService { + rs := &ResourceRecordSetsService{s: s} + return rs +} + +type ResourceRecordSetsService struct { + s *Service +} + +// Change: An atomic update to a collection of ResourceRecordSets. +type Change struct { + // Additions: Which ResourceRecordSets to add? + Additions []*ResourceRecordSet `json:"additions,omitempty"` + + // Deletions: Which ResourceRecordSets to remove? Must match existing + // data exactly. + Deletions []*ResourceRecordSet `json:"deletions,omitempty"` + + // Id: Unique identifier for the resource; defined by the server (output + // only). + Id string `json:"id,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#change". + Kind string `json:"kind,omitempty"` + + // StartTime: The time that this operation was started by the server. + // This is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` + + // Status: Status of the operation (output only). + // + // Possible values: + // "done" + // "pending" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Additions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Change) MarshalJSON() ([]byte, error) { + type noMethod Change + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ChangesListResponse: The response to a request to enumerate Changes +// to a ResourceRecordSets collection. +type ChangesListResponse struct { + // Changes: The requested changes. + Changes []*Change `json:"changes,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // pagination token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a "snapshot" of collections + // larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Changes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ChangesListResponse) MarshalJSON() ([]byte, error) { + type noMethod ChangesListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ManagedZone: A zone is a subtree of the DNS namespace under one +// administrative responsibility. A ManagedZone is a resource that +// represents a DNS zone hosted by the Cloud DNS service. +type ManagedZone struct { + // CreationTime: The time that this resource was created on the server. + // This is in RFC3339 text format. Output only. + CreationTime string `json:"creationTime,omitempty"` + + // Description: A mutable string of at most 1024 characters associated + // with this resource for the user's convenience. Has no effect on the + // managed zone's function. + Description string `json:"description,omitempty"` + + // DnsName: The DNS name of this managed zone, for instance + // "example.com.". + DnsName string `json:"dnsName,omitempty"` + + // Id: Unique identifier for the resource; defined by the server (output + // only) + Id uint64 `json:"id,omitempty,string"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#managedZone". + Kind string `json:"kind,omitempty"` + + // Name: User assigned name for this resource. Must be unique within the + // project. The name must be 1-32 characters long, must begin with a + // letter, end with a letter or digit, and only contain lowercase + // letters, digits or dashes. + Name string `json:"name,omitempty"` + + // NameServerSet: Optionally specifies the NameServerSet for this + // ManagedZone. A NameServerSet is a set of DNS name servers that all + // host the same ManagedZones. Most users will leave this field unset. + NameServerSet string `json:"nameServerSet,omitempty"` + + // NameServers: Delegate your managed_zone to these virtual name + // servers; defined by the server (output only) + NameServers []string `json:"nameServers,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedZone) MarshalJSON() ([]byte, error) { + type noMethod ManagedZone + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedZonesListResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // ManagedZones: The managed zone resources. + ManagedZones []*ManagedZone `json:"managedZones,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // page token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a consistent snapshot of a + // collection larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedZonesListResponse) MarshalJSON() ([]byte, error) { + type noMethod ManagedZonesListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Project: A project resource. The project is a top level container for +// resources including Cloud DNS ManagedZones. Projects can be created +// only in the APIs console. +type Project struct { + // Id: User assigned unique identifier for the resource (output only). + Id string `json:"id,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#project". + Kind string `json:"kind,omitempty"` + + // Number: Unique numeric identifier for the resource; defined by the + // server (output only). + Number uint64 `json:"number,omitempty,string"` + + // Quota: Quotas assigned to this project (output only). + Quota *Quota `json:"quota,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Project) MarshalJSON() ([]byte, error) { + type noMethod Project + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Quota: Limits associated with a Project. +type Quota struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#quota". + Kind string `json:"kind,omitempty"` + + // ManagedZones: Maximum allowed number of managed zones in the project. + ManagedZones int64 `json:"managedZones,omitempty"` + + // ResourceRecordsPerRrset: Maximum allowed number of ResourceRecords + // per ResourceRecordSet. + ResourceRecordsPerRrset int64 `json:"resourceRecordsPerRrset,omitempty"` + + // RrsetAdditionsPerChange: Maximum allowed number of ResourceRecordSets + // to add per ChangesCreateRequest. + RrsetAdditionsPerChange int64 `json:"rrsetAdditionsPerChange,omitempty"` + + // RrsetDeletionsPerChange: Maximum allowed number of ResourceRecordSets + // to delete per ChangesCreateRequest. + RrsetDeletionsPerChange int64 `json:"rrsetDeletionsPerChange,omitempty"` + + // RrsetsPerManagedZone: Maximum allowed number of ResourceRecordSets + // per zone in the project. + RrsetsPerManagedZone int64 `json:"rrsetsPerManagedZone,omitempty"` + + // TotalRrdataSizePerChange: Maximum allowed size for total rrdata in + // one ChangesCreateRequest in bytes. + TotalRrdataSizePerChange int64 `json:"totalRrdataSizePerChange,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Quota) MarshalJSON() ([]byte, error) { + type noMethod Quota + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ResourceRecordSet: A unit of data that will be returned by the DNS +// servers. +type ResourceRecordSet struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#resourceRecordSet". + Kind string `json:"kind,omitempty"` + + // Name: For example, www.example.com. + Name string `json:"name,omitempty"` + + // Rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section + // 3.6.1). + Rrdatas []string `json:"rrdatas,omitempty"` + + // Ttl: Number of seconds that this ResourceRecordSet can be cached by + // resolvers. + Ttl int64 `json:"ttl,omitempty"` + + // Type: The identifier of a supported record type, for example, A, + // AAAA, MX, TXT, and so on. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ResourceRecordSet) MarshalJSON() ([]byte, error) { + type noMethod ResourceRecordSet + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ResourceRecordSetsListResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // pagination token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a consistent snapshot of a + // collection larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Rrsets: The resource record set resources. + Rrsets []*ResourceRecordSet `json:"rrsets,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ResourceRecordSetsListResponse) MarshalJSON() ([]byte, error) { + type noMethod ResourceRecordSetsListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "dns.changes.create": + +type ChangesCreateCall struct { + s *Service + project string + managedZone string + change *Change + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Atomically update the ResourceRecordSet collection. +func (r *ChangesService) Create(project string, managedZone string, change *Change) *ChangesCreateCall { + c := &ChangesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.change = change + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesCreateCall) Fields(s ...googleapi.Field) *ChangesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesCreateCall) Context(ctx context.Context) *ChangesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.changes.create" call. +// Exactly one of *Change or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Change.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Change{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Atomically update the ResourceRecordSet collection.", + // "httpMethod": "POST", + // "id": "dns.changes.create", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/changes", + // "request": { + // "$ref": "Change" + // }, + // "response": { + // "$ref": "Change" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.changes.get": + +type ChangesGetCall struct { + s *Service + project string + managedZone string + changeId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Fetch the representation of an existing Change. +func (r *ChangesService) Get(project string, managedZone string, changeId string) *ChangesGetCall { + c := &ChangesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.changeId = changeId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesGetCall) Fields(s ...googleapi.Field) *ChangesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesGetCall) IfNoneMatch(entityTag string) *ChangesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesGetCall) Context(ctx context.Context) *ChangesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes/{changeId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "changeId": c.changeId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.changes.get" call. +// Exactly one of *Change or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Change.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Change{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing Change.", + // "httpMethod": "GET", + // "id": "dns.changes.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "changeId" + // ], + // "parameters": { + // "changeId": { + // "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + // "response": { + // "$ref": "Change" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.changes.list": + +type ChangesListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Enumerate Changes to a ResourceRecordSet collection. +func (r *ChangesService) List(project string, managedZone string) *ChangesListCall { + c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *ChangesListCall) MaxResults(maxResults int64) *ChangesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ChangesListCall) PageToken(pageToken string) *ChangesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SortBy sets the optional parameter "sortBy": Sorting criterion. The +// only supported value is change sequence. +// +// Possible values: +// "changeSequence" (default) +func (c *ChangesListCall) SortBy(sortBy string) *ChangesListCall { + c.urlParams_.Set("sortBy", sortBy) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Sorting order +// direction: 'ascending' or 'descending'. +func (c *ChangesListCall) SortOrder(sortOrder string) *ChangesListCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.changes.list" call. +// Exactly one of *ChangesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ChangesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ChangesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate Changes to a ResourceRecordSet collection.", + // "httpMethod": "GET", + // "id": "dns.changes.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sortBy": { + // "default": "changeSequence", + // "description": "Sorting criterion. The only supported value is change sequence.", + // "enum": [ + // "changeSequence" + // ], + // "enumDescriptions": [ + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Sorting order direction: 'ascending' or 'descending'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/changes", + // "response": { + // "$ref": "ChangesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.create": + +type ManagedZonesCreateCall struct { + s *Service + project string + managedzone *ManagedZone + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Create a new ManagedZone. +func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) *ManagedZonesCreateCall { + c := &ManagedZonesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedzone = managedzone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesCreateCall) Fields(s ...googleapi.Field) *ManagedZonesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesCreateCall) Context(ctx context.Context) *ManagedZonesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.create" call. +// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedZone.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create a new ManagedZone.", + // "httpMethod": "POST", + // "id": "dns.managedZones.create", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones", + // "request": { + // "$ref": "ManagedZone" + // }, + // "response": { + // "$ref": "ManagedZone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.delete": + +type ManagedZonesDeleteCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Delete a previously created ManagedZone. +func (r *ManagedZonesService) Delete(project string, managedZone string) *ManagedZonesDeleteCall { + c := &ManagedZonesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesDeleteCall) Fields(s ...googleapi.Field) *ManagedZonesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesDeleteCall) Context(ctx context.Context) *ManagedZonesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.delete" call. +func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Delete a previously created ManagedZone.", + // "httpMethod": "DELETE", + // "id": "dns.managedZones.delete", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.get": + +type ManagedZonesGetCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Fetch the representation of an existing ManagedZone. +func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZonesGetCall { + c := &ManagedZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesGetCall) Fields(s ...googleapi.Field) *ManagedZonesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZonesGetCall) IfNoneMatch(entityTag string) *ManagedZonesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesGetCall) Context(ctx context.Context) *ManagedZonesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.get" call. +// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedZone.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing ManagedZone.", + // "httpMethod": "GET", + // "id": "dns.managedZones.get", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}", + // "response": { + // "$ref": "ManagedZone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.list": + +type ManagedZonesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Enumerate ManagedZones that have been created but not yet +// deleted. +func (r *ManagedZonesService) List(project string) *ManagedZonesListCall { + c := &ManagedZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// DnsName sets the optional parameter "dnsName": Restricts the list to +// return only zones with this domain name. +func (c *ManagedZonesListCall) DnsName(dnsName string) *ManagedZonesListCall { + c.urlParams_.Set("dnsName", dnsName) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *ManagedZonesListCall) MaxResults(maxResults int64) *ManagedZonesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ManagedZonesListCall) PageToken(pageToken string) *ManagedZonesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesListCall) Fields(s ...googleapi.Field) *ManagedZonesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZonesListCall) IfNoneMatch(entityTag string) *ManagedZonesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesListCall) Context(ctx context.Context) *ManagedZonesListCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.list" call. +// Exactly one of *ManagedZonesListResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedZonesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZonesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate ManagedZones that have been created but not yet deleted.", + // "httpMethod": "GET", + // "id": "dns.managedZones.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "dnsName": { + // "description": "Restricts the list to return only zones with this domain name.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones", + // "response": { + // "$ref": "ManagedZonesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.projects.get": + +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Fetch the representation of an existing Project. +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing Project.", + // "httpMethod": "GET", + // "id": "dns.projects.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.resourceRecordSets.list": + +type ResourceRecordSetsListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Enumerate ResourceRecordSets that have been created but not yet +// deleted. +func (r *ResourceRecordSetsService) List(project string, managedZone string) *ResourceRecordSetsListCall { + c := &ResourceRecordSetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *ResourceRecordSetsListCall) MaxResults(maxResults int64) *ResourceRecordSetsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// Name sets the optional parameter "name": Restricts the list to return +// only records with this fully qualified domain name. +func (c *ResourceRecordSetsListCall) Name(name string) *ResourceRecordSetsListCall { + c.urlParams_.Set("name", name) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ResourceRecordSetsListCall) PageToken(pageToken string) *ResourceRecordSetsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Type sets the optional parameter "type": Restricts the list to return +// only records of this type. If present, the "name" parameter must also +// be present. +func (c *ResourceRecordSetsListCall) Type(type_ string) *ResourceRecordSetsListCall { + c.urlParams_.Set("type", type_) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourceRecordSetsListCall) Fields(s ...googleapi.Field) *ResourceRecordSetsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourceRecordSetsListCall) IfNoneMatch(entityTag string) *ResourceRecordSetsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourceRecordSetsListCall) Context(ctx context.Context) *ResourceRecordSetsListCall { + c.ctx_ = ctx + return c +} + +func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/rrsets") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.resourceRecordSets.list" call. +// Exactly one of *ResourceRecordSetsListResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResourceRecordSetsListResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourceRecordSetsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + // "httpMethod": "GET", + // "id": "dns.resourceRecordSets.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "name": { + // "description": "Restricts the list to return only records with this fully qualified domain name.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "type": { + // "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/rrsets", + // "response": { + // "$ref": "ResourceRecordSetsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go new file mode 100644 index 000000000..4b8ec1424 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type ResumableBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { + return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if rb.err == nil && len(rb.chunk) == 0 { + rb.err = rb.loadChunk() + } + return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (rb *ResumableBuffer) loadChunk() error { + bufSize := cap(rb.chunk) + rb.chunk = rb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = rb.media.Read(rb.chunk[read:]) + read += n + } + rb.chunk = rb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (rb *ResumableBuffer) Next() { + rb.off += int64(len(rb.chunk)) + rb.chunk = rb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/vendor/google.golang.org/api/gensupport/buffer_test.go b/vendor/google.golang.org/api/gensupport/buffer_test.go new file mode 100644 index 000000000..253fa791b --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/buffer_test.go @@ -0,0 +1,296 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + "io/ioutil" + "reflect" + "testing" + "testing/iotest" + + "google.golang.org/api/googleapi" +) + +// getChunkAsString reads a chunk from rb, but does not call Next. +func getChunkAsString(t *testing.T, rb *ResumableBuffer) (string, error) { + chunk, _, size, err := rb.Chunk() + + buf, e := ioutil.ReadAll(chunk) + if e != nil { + t.Fatalf("Failed reading chunk: %v", e) + } + if size != len(buf) { + t.Fatalf("reported chunk size doesn't match actual chunk size: got: %v; want: %v", size, len(buf)) + } + return string(buf), err +} + +func TestChunking(t *testing.T) { + type testCase struct { + data string // the data to read from the Reader + finalErr error // error to return after data has been read + chunkSize int + wantChunks []string + } + + for _, singleByteReads := range []bool{true, false} { + for _, tc := range []testCase{ + { + data: "abcdefg", + finalErr: nil, + chunkSize: 3, + wantChunks: []string{"abc", "def", "g"}, + }, + { + data: "abcdefg", + finalErr: nil, + chunkSize: 1, + wantChunks: []string{"a", "b", "c", "d", "e", "f", "g"}, + }, + { + data: "abcdefg", + finalErr: nil, + chunkSize: 7, + wantChunks: []string{"abcdefg"}, + }, + { + data: "abcdefg", + finalErr: nil, + chunkSize: 8, + wantChunks: []string{"abcdefg"}, + }, + { + data: "abcdefg", + finalErr: io.ErrUnexpectedEOF, + chunkSize: 3, + wantChunks: []string{"abc", "def", "g"}, + }, + { + data: "abcdefg", + finalErr: io.ErrUnexpectedEOF, + chunkSize: 8, + wantChunks: []string{"abcdefg"}, + }, + } { + var r io.Reader = &errReader{buf: []byte(tc.data), err: tc.finalErr} + + if singleByteReads { + r = iotest.OneByteReader(r) + } + + rb := NewResumableBuffer(r, tc.chunkSize) + var gotErr error + got := []string{} + for { + chunk, err := getChunkAsString(t, rb) + if len(chunk) != 0 { + got = append(got, string(chunk)) + } + if err != nil { + gotErr = err + break + } + rb.Next() + } + + if !reflect.DeepEqual(got, tc.wantChunks) { + t.Errorf("Failed reading buffer: got: %v; want:%v", got, tc.wantChunks) + } + + expectedErr := tc.finalErr + if expectedErr == nil { + expectedErr = io.EOF + } + if gotErr != expectedErr { + t.Errorf("Reading buffer error: got: %v; want: %v", gotErr, expectedErr) + } + } + } +} + +func TestChunkCanBeReused(t *testing.T) { + er := &errReader{buf: []byte("abcdefg")} + rb := NewResumableBuffer(er, 3) + + // expectChunk reads a chunk and checks that it got what was wanted. + expectChunk := func(want string, wantErr error) { + got, err := getChunkAsString(t, rb) + if err != wantErr { + t.Errorf("error reading buffer: got: %v; want: %v", err, wantErr) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Failed reading buffer: got: %q; want:%q", got, want) + } + } + expectChunk("abc", nil) + // On second call, should get same chunk again. + expectChunk("abc", nil) + rb.Next() + expectChunk("def", nil) + expectChunk("def", nil) + rb.Next() + expectChunk("g", io.EOF) + expectChunk("g", io.EOF) + rb.Next() + expectChunk("", io.EOF) +} + +func TestPos(t *testing.T) { + er := &errReader{buf: []byte("abcdefg")} + rb := NewResumableBuffer(er, 3) + + expectChunkAtOffset := func(want int64, wantErr error) { + _, off, _, err := rb.Chunk() + if err != wantErr { + t.Errorf("error reading buffer: got: %v; want: %v", err, wantErr) + } + if got := off; got != want { + t.Errorf("resumable buffer Pos: got: %v; want: %v", got, want) + } + } + + // We expect the first chunk to be at offset 0. + expectChunkAtOffset(0, nil) + // Fetching the same chunk should return the same offset. + expectChunkAtOffset(0, nil) + + // Calling Next multiple times should only cause off to advance by 3, since off is not advanced until + // the chunk is actually read. + rb.Next() + rb.Next() + expectChunkAtOffset(3, nil) + + rb.Next() + + // Load the final 1-byte chunk. + expectChunkAtOffset(6, io.EOF) + + // Next will advance 1 byte. But there are no more chunks, so off will not increase beyond 7. + rb.Next() + expectChunkAtOffset(7, io.EOF) + rb.Next() + expectChunkAtOffset(7, io.EOF) +} + +// bytes.Reader implements both Reader and ReaderAt. The following types +// implement various combinations of Reader, ReaderAt and ContentTyper, by +// wrapping bytes.Reader. All implement at least ReaderAt, so they can be +// passed to ReaderAtToReader. The following table summarizes which types +// implement which interfaces: +// +// ReaderAt Reader ContentTyper +// reader x x +// typerReader x x x +// readerAt x +// typerReaderAt x x + +// reader implements Reader, in addition to ReaderAt. +type reader struct { + r *bytes.Reader +} + +func (r *reader) ReadAt(b []byte, off int64) (n int, err error) { + return r.r.ReadAt(b, off) +} + +func (r *reader) Read(b []byte) (n int, err error) { + return r.r.Read(b) +} + +// typerReader implements Reader and ContentTyper, in addition to ReaderAt. +type typerReader struct { + r *bytes.Reader +} + +func (tr *typerReader) ReadAt(b []byte, off int64) (n int, err error) { + return tr.r.ReadAt(b, off) +} + +func (tr *typerReader) Read(b []byte) (n int, err error) { + return tr.r.Read(b) +} + +func (tr *typerReader) ContentType() string { + return "ctype" +} + +// readerAt implements only ReaderAt. +type readerAt struct { + r *bytes.Reader +} + +func (ra *readerAt) ReadAt(b []byte, off int64) (n int, err error) { + return ra.r.ReadAt(b, off) +} + +// typerReaderAt implements ContentTyper, in addition to ReaderAt. +type typerReaderAt struct { + r *bytes.Reader +} + +func (tra *typerReaderAt) ReadAt(b []byte, off int64) (n int, err error) { + return tra.r.ReadAt(b, off) +} + +func (tra *typerReaderAt) ContentType() string { + return "ctype" +} + +func TestAdapter(t *testing.T) { + data := "abc" + + checkConversion := func(to io.Reader, wantTyper bool) { + if _, ok := to.(googleapi.ContentTyper); ok != wantTyper { + t.Errorf("reader implements typer? got: %v; want: %v", ok, wantTyper) + } + if typer, ok := to.(googleapi.ContentTyper); ok && typer.ContentType() != "ctype" { + t.Errorf("content type: got: %s; want: ctype", typer.ContentType()) + } + buf, err := ioutil.ReadAll(to) + if err != nil { + t.Errorf("error reading data: %v", err) + return + } + if !bytes.Equal(buf, []byte(data)) { + t.Errorf("failed reading data: got: %s; want: %s", buf, data) + } + } + + type testCase struct { + from io.ReaderAt + wantTyper bool + } + for _, tc := range []testCase{ + { + from: &reader{bytes.NewReader([]byte(data))}, + wantTyper: false, + }, + { + // Reader and ContentTyper + from: &typerReader{bytes.NewReader([]byte(data))}, + wantTyper: true, + }, + { + // ReaderAt + from: &readerAt{bytes.NewReader([]byte(data))}, + wantTyper: false, + }, + { + // ReaderAt and ContentTyper + from: &typerReaderAt{bytes.NewReader([]byte(data))}, + wantTyper: true, + }, + } { + to := ReaderAtToReader(tc.from, int64(len(data))) + checkConversion(to, tc.wantTyper) + // tc.from is a ReaderAt, and should be treated like one, even + // if it also implements Reader. Specifically, it can be + // reused and read from the beginning. + to = ReaderAtToReader(tc.from, int64(len(data))) + checkConversion(to, tc.wantTyper) + } +} diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go new file mode 100644 index 000000000..752c4b411 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go new file mode 100644 index 000000000..dd7bcd2eb --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/json.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if: +// * it has a non-empty value, or +// * its field name is present in forceSendFields, and +// * it is not a nil pointer or nil interface. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { + if len(forceSendFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/google.golang.org/api/gensupport/json_test.go b/vendor/google.golang.org/api/gensupport/json_test.go new file mode 100644 index 000000000..b2f921913 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/json_test.go @@ -0,0 +1,367 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "reflect" + "testing" + + "google.golang.org/api/googleapi" +) + +type schema struct { + // Basic types + B bool `json:"b,omitempty"` + F float64 `json:"f,omitempty"` + I int64 `json:"i,omitempty"` + Istr int64 `json:"istr,omitempty,string"` + Str string `json:"str,omitempty"` + + // Pointers to basic types + PB *bool `json:"pb,omitempty"` + PF *float64 `json:"pf,omitempty"` + PI *int64 `json:"pi,omitempty"` + PIStr *int64 `json:"pistr,omitempty,string"` + PStr *string `json:"pstr,omitempty"` + + // Other types + Int64s googleapi.Int64s `json:"i64s,omitempty"` + S []int `json:"s,omitempty"` + M map[string]string `json:"m,omitempty"` + Any interface{} `json:"any,omitempty"` + Child *child `json:"child,omitempty"` + + ForceSendFields []string `json:"-"` +} + +type child struct { + B bool `json:"childbool,omitempty"` +} + +type testCase struct { + s schema + want string +} + +func TestBasics(t *testing.T) { + for _, tc := range []testCase{ + { + s: schema{}, + want: `{}`, + }, + { + s: schema{ + ForceSendFields: []string{"B", "F", "I", "Istr", "Str", "PB", "PF", "PI", "PIStr", "PStr"}, + }, + want: `{"b":false,"f":0.0,"i":0,"istr":"0","str":""}`, + }, + { + s: schema{ + B: true, + F: 1.2, + I: 1, + Istr: 2, + Str: "a", + PB: googleapi.Bool(true), + PF: googleapi.Float64(1.2), + PI: googleapi.Int64(int64(1)), + PIStr: googleapi.Int64(int64(2)), + PStr: googleapi.String("a"), + }, + want: `{"b":true,"f":1.2,"i":1,"istr":"2","str":"a","pb":true,"pf":1.2,"pi":1,"pistr":"2","pstr":"a"}`, + }, + { + s: schema{ + B: false, + F: 0.0, + I: 0, + Istr: 0, + Str: "", + PB: googleapi.Bool(false), + PF: googleapi.Float64(0.0), + PI: googleapi.Int64(int64(0)), + PIStr: googleapi.Int64(int64(0)), + PStr: googleapi.String(""), + }, + want: `{"pb":false,"pf":0.0,"pi":0,"pistr":"0","pstr":""}`, + }, + { + s: schema{ + B: false, + F: 0.0, + I: 0, + Istr: 0, + Str: "", + PB: googleapi.Bool(false), + PF: googleapi.Float64(0.0), + PI: googleapi.Int64(int64(0)), + PIStr: googleapi.Int64(int64(0)), + PStr: googleapi.String(""), + ForceSendFields: []string{"B", "F", "I", "Istr", "Str", "PB", "PF", "PI", "PIStr", "PStr"}, + }, + want: `{"b":false,"f":0.0,"i":0,"istr":"0","str":"","pb":false,"pf":0.0,"pi":0,"pistr":"0","pstr":""}`, + }, + } { + checkMarshalJSON(t, tc) + } +} + +func TestSliceFields(t *testing.T) { + for _, tc := range []testCase{ + { + s: schema{}, + want: `{}`, + }, + { + s: schema{S: []int{}, Int64s: googleapi.Int64s{}}, + want: `{}`, + }, + { + s: schema{S: []int{1}, Int64s: googleapi.Int64s{1}}, + want: `{"s":[1],"i64s":["1"]}`, + }, + { + s: schema{ + ForceSendFields: []string{"S", "Int64s"}, + }, + want: `{"s":[],"i64s":[]}`, + }, + { + s: schema{ + S: []int{}, + Int64s: googleapi.Int64s{}, + ForceSendFields: []string{"S", "Int64s"}, + }, + want: `{"s":[],"i64s":[]}`, + }, + { + s: schema{ + S: []int{1}, + Int64s: googleapi.Int64s{1}, + ForceSendFields: []string{"S", "Int64s"}, + }, + want: `{"s":[1],"i64s":["1"]}`, + }, + } { + checkMarshalJSON(t, tc) + } +} + +func TestMapField(t *testing.T) { + for _, tc := range []testCase{ + { + s: schema{}, + want: `{}`, + }, + { + s: schema{M: make(map[string]string)}, + want: `{}`, + }, + { + s: schema{M: map[string]string{"a": "b"}}, + want: `{"m":{"a":"b"}}`, + }, + { + s: schema{ + ForceSendFields: []string{"M"}, + }, + want: `{"m":{}}`, + }, + { + s: schema{ + M: make(map[string]string), + ForceSendFields: []string{"M"}, + }, + want: `{"m":{}}`, + }, + { + s: schema{ + M: map[string]string{"a": "b"}, + ForceSendFields: []string{"M"}, + }, + want: `{"m":{"a":"b"}}`, + }, + } { + checkMarshalJSON(t, tc) + } +} + +type anyType struct { + Field int +} + +func (a anyType) MarshalJSON() ([]byte, error) { + return []byte(`"anyType value"`), nil +} + +func TestAnyField(t *testing.T) { + // ForceSendFields has no effect on nil interfaces and interfaces that contain nil pointers. + var nilAny *anyType + for _, tc := range []testCase{ + { + s: schema{}, + want: `{}`, + }, + { + s: schema{Any: nilAny}, + want: `{"any": null}`, + }, + { + s: schema{Any: &anyType{}}, + want: `{"any":"anyType value"}`, + }, + { + s: schema{Any: anyType{}}, + want: `{"any":"anyType value"}`, + }, + { + s: schema{ + ForceSendFields: []string{"Any"}, + }, + want: `{}`, + }, + { + s: schema{ + Any: nilAny, + ForceSendFields: []string{"Any"}, + }, + want: `{"any": null}`, + }, + { + s: schema{ + Any: &anyType{}, + ForceSendFields: []string{"Any"}, + }, + want: `{"any":"anyType value"}`, + }, + { + s: schema{ + Any: anyType{}, + ForceSendFields: []string{"Any"}, + }, + want: `{"any":"anyType value"}`, + }, + } { + checkMarshalJSON(t, tc) + } +} + +func TestSubschema(t *testing.T) { + // Subschemas are always stored as pointers, so ForceSendFields has no effect on them. + for _, tc := range []testCase{ + { + s: schema{}, + want: `{}`, + }, + { + s: schema{ + ForceSendFields: []string{"Child"}, + }, + want: `{}`, + }, + { + s: schema{Child: &child{}}, + want: `{"child":{}}`, + }, + { + s: schema{ + Child: &child{}, + ForceSendFields: []string{"Child"}, + }, + want: `{"child":{}}`, + }, + { + s: schema{Child: &child{B: true}}, + want: `{"child":{"childbool":true}}`, + }, + + { + s: schema{ + Child: &child{B: true}, + ForceSendFields: []string{"Child"}, + }, + want: `{"child":{"childbool":true}}`, + }, + } { + checkMarshalJSON(t, tc) + } +} + +// checkMarshalJSON verifies that calling schemaToMap on tc.s yields a result which is equivalent to tc.want. +func checkMarshalJSON(t *testing.T, tc testCase) { + doCheckMarshalJSON(t, tc.s, tc.s.ForceSendFields, tc.want) + if len(tc.s.ForceSendFields) == 0 { + // verify that the code path used when ForceSendFields + // is non-empty produces the same output as the fast + // path that is used when it is empty. + doCheckMarshalJSON(t, tc.s, []string{"dummy"}, tc.want) + } +} + +func doCheckMarshalJSON(t *testing.T, s schema, forceSendFields []string, wantJSON string) { + encoded, err := MarshalJSON(s, forceSendFields) + if err != nil { + t.Fatalf("encoding json:\n got err: %v", err) + } + + // The expected and obtained JSON can differ in field ordering, so unmarshal before comparing. + var got interface{} + var want interface{} + err = json.Unmarshal(encoded, &got) + if err != nil { + t.Fatalf("decoding json:\n got err: %v", err) + } + err = json.Unmarshal([]byte(wantJSON), &want) + if err != nil { + t.Fatalf("decoding json:\n got err: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("schemaToMap:\ngot :%s\nwant:%s", got, want) + } +} + +func TestParseJSONTag(t *testing.T) { + for _, tc := range []struct { + tag string + want jsonTag + }{ + { + tag: "-", + want: jsonTag{ignore: true}, + }, { + tag: "name,omitempty", + want: jsonTag{apiName: "name"}, + }, { + tag: "name,omitempty,string", + want: jsonTag{apiName: "name", stringFormat: true}, + }, + } { + got, err := parseJSONTag(tc.tag) + if err != nil { + t.Fatalf("parsing json:\n got err: %v\ntag: %q", err, tc.tag) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("parseJSONTage:\ngot :%s\nwant:%s", got, tc.want) + } + } +} +func TestParseMalformedJSONTag(t *testing.T) { + for _, tag := range []string{ + "", + "name", + "name,", + "name,blah", + "name,blah,string", + ",omitempty", + ",omitempty,string", + "name,omitempty,string,blah", + } { + _, err := parseJSONTag(tag) + if err == nil { + t.Fatalf("parsing json: expected err, got nil for tag: %v", tag) + } + } +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go new file mode 100644 index 000000000..685e08d5a --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatability, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + pipeOpen bool + ctype string +} + +func newMultipartReader(parts []typeReader) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + if !mp.pipeOpen { + return nil + } + mp.pipeOpen = false + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + h.Set("Content-Type", contentType) + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// If chunkSize is non-zero and the contents of media do not fit in a single +// chunk (or there is an error reading media), then media will be returned as a +// ResumableBuffer. Otherwise, media will be returned as a Reader. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, + *ResumableBuffer) { + if chunkSize == 0 { // do not chunk + return media, nil + } + + rb := NewResumableBuffer(media, chunkSize) + rdr, _, _, err := rb.Chunk() + + if err == io.EOF { // we can upload this in a single request + return rdr, nil + } + // err might be a non-EOF error. If it is, the next call to rb.Chunk will + // return the same error. Returning a ResumableBuffer ensures that this error + // will be handled at some point. + + return nil, rb +} diff --git a/vendor/google.golang.org/api/gensupport/media_test.go b/vendor/google.golang.org/api/gensupport/media_test.go new file mode 100644 index 000000000..f0dd36499 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/media_test.go @@ -0,0 +1,142 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + "io/ioutil" + "reflect" + "testing" +) + +func TestContentSniffing(t *testing.T) { + type testCase struct { + data []byte // the data to read from the Reader + finalErr error // error to return after data has been read + + wantContentType string + wantContentTypeResult bool + } + + for _, tc := range []testCase{ + { + data: []byte{0, 0, 0, 0}, + finalErr: nil, + wantContentType: "application/octet-stream", + wantContentTypeResult: true, + }, + { + data: []byte(""), + finalErr: nil, + wantContentType: "text/plain; charset=utf-8", + wantContentTypeResult: true, + }, + { + data: []byte(""), + finalErr: io.ErrUnexpectedEOF, + wantContentType: "text/plain; charset=utf-8", + wantContentTypeResult: false, + }, + { + data: []byte("abc"), + finalErr: nil, + wantContentType: "text/plain; charset=utf-8", + wantContentTypeResult: true, + }, + { + data: []byte("abc"), + finalErr: io.ErrUnexpectedEOF, + wantContentType: "text/plain; charset=utf-8", + wantContentTypeResult: false, + }, + // The following examples contain more bytes than are buffered for sniffing. + { + data: bytes.Repeat([]byte("a"), 513), + finalErr: nil, + wantContentType: "text/plain; charset=utf-8", + wantContentTypeResult: true, + }, + { + data: bytes.Repeat([]byte("a"), 513), + finalErr: io.ErrUnexpectedEOF, + wantContentType: "text/plain; charset=utf-8", + wantContentTypeResult: true, // true because error is after first 512 bytes. + }, + } { + er := &errReader{buf: tc.data, err: tc.finalErr} + + sct := newContentSniffer(er) + + // Even if was an error during the first 512 bytes, we should still be able to read those bytes. + buf, err := ioutil.ReadAll(sct) + + if !reflect.DeepEqual(buf, tc.data) { + t.Fatalf("Failed reading buffer: got: %q; want:%q", buf, tc.data) + } + + if err != tc.finalErr { + t.Fatalf("Reading buffer error: got: %v; want: %v", err, tc.finalErr) + } + + ct, ok := sct.ContentType() + if ok != tc.wantContentTypeResult { + t.Fatalf("Content type result got: %v; want: %v", ok, tc.wantContentTypeResult) + } + if ok && ct != tc.wantContentType { + t.Fatalf("Content type got: %q; want: %q", ct, tc.wantContentType) + } + } +} + +type staticContentTyper struct { + io.Reader +} + +func (sct staticContentTyper) ContentType() string { + return "static content type" +} + +func TestDetermineContentType(t *testing.T) { + data := []byte("abc") + rdr := func() io.Reader { + return bytes.NewBuffer(data) + } + + type testCase struct { + r io.Reader + explicitConentType string + wantContentType string + } + + for _, tc := range []testCase{ + { + r: rdr(), + wantContentType: "text/plain; charset=utf-8", + }, + { + r: staticContentTyper{rdr()}, + wantContentType: "static content type", + }, + { + r: staticContentTyper{rdr()}, + explicitConentType: "explicit", + wantContentType: "explicit", + }, + } { + r, ctype := DetermineContentType(tc.r, tc.explicitConentType) + got, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("Failed reading buffer: %v", err) + } + if !reflect.DeepEqual(got, data) { + t.Fatalf("Failed reading buffer: got: %q; want:%q", got, data) + } + + if ctype != tc.wantContentType { + t.Fatalf("Content type got: %q; want: %q", ctype, tc.wantContentType) + } + } +} diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go new file mode 100644 index 000000000..1dd747921 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/params.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go new file mode 100644 index 000000000..ff11a1bed --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -0,0 +1,134 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. + statusResumeIncomplete = 308 +) + +// uploadPause determines the delay between failed upload attempts +// TODO(mcgreevy): improve this retry mechanism. +var uploadPause = 1 * time.Second + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *ResumableBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) { + var res *http.Response + var err error + + for { + select { // Check for cancellation + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + chunk, off, size, e := rx.Media.Chunk() + reqSize := int64(size) + done := e == io.EOF + + if !done && e != nil { + return nil, e + } + + req, _ := http.NewRequest("POST", rx.URI, chunk) + req.ContentLength = reqSize + var contentRange string + if done { + if reqSize == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+reqSize-1, off+reqSize) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+reqSize-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + res, err = ctxhttp.Do(ctx, rx.Client, req) + + success := err == nil && res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK + if success && reqSize > 0 { + rx.mu.Lock() + rx.progress = off + reqSize // number of bytes sent so far + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(off + reqSize) + } + } + if err != nil || res.StatusCode != statusResumeIncomplete { + break + } + rx.Media.Next() + res.Body.Close() + } + return res, err +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + for { + resp, err = rx.transferChunks(ctx) + // It's possible for err and resp to both be non-nil here, but we expose a simpler + // contract to our callers: exactly one of resp and err will be non-nil. This means + // that any response body must be closed here before returning a non-nil error. + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusOK { + return resp, nil + } + resp.Body.Close() + select { // Check for cancellation + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(uploadPause): + } + } +} diff --git a/vendor/google.golang.org/api/gensupport/resumable_test.go b/vendor/google.golang.org/api/gensupport/resumable_test.go new file mode 100644 index 000000000..c45bb31f0 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/resumable_test.go @@ -0,0 +1,219 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +type unexpectedReader struct{} + +func (unexpectedReader) Read([]byte) (int, error) { + return 0, fmt.Errorf("unexpected read in test") +} + +// event is an expected request/response pair +type event struct { + // the byte range header that should be present in a request. + byteRange string + // the http status code to send in response. + responseStatus int +} + +// interruptibleTransport is configured with a canned set of requests/responses. +// It records the incoming data, unless the corresponding event is configured to return +// http.StatusServiceUnavailable. +type interruptibleTransport struct { + events []event + buf []byte + bodies bodyTracker +} + +// bodyTracker keeps track of response bodies that have not been closed. +type bodyTracker map[io.ReadCloser]struct{} + +func (bt bodyTracker) Add(body io.ReadCloser) { + bt[body] = struct{}{} +} + +func (bt bodyTracker) Close(body io.ReadCloser) { + delete(bt, body) +} + +type trackingCloser struct { + io.Reader + tracker bodyTracker +} + +func (tc *trackingCloser) Close() error { + tc.tracker.Close(tc) + return nil +} + +func (tc *trackingCloser) Open() { + tc.tracker.Add(tc) +} + +func (t *interruptibleTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ev := t.events[0] + t.events = t.events[1:] + if got, want := req.Header.Get("Content-Range"), ev.byteRange; got != want { + return nil, fmt.Errorf("byte range: got %s; want %s", got, want) + } + + if ev.responseStatus != http.StatusServiceUnavailable { + buf, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, fmt.Errorf("error reading from request data: %v", err) + } + t.buf = append(t.buf, buf...) + } + + tc := &trackingCloser{unexpectedReader{}, t.bodies} + tc.Open() + + res := &http.Response{ + StatusCode: ev.responseStatus, + Header: http.Header{}, + Body: tc, + } + return res, nil +} + +type progressRecorder struct { + updates []int64 +} + +func (pr *progressRecorder) ProgressUpdate(current int64) { + pr.updates = append(pr.updates, current) +} + +func TestInterruptedTransferChunks(t *testing.T) { + type testCase struct { + data string + chunkSize int + events []event + wantProgress []int64 + } + + for _, tc := range []testCase{ + { + data: strings.Repeat("a", 300), + chunkSize: 90, + events: []event{ + {"bytes 0-89/*", http.StatusServiceUnavailable}, + {"bytes 0-89/*", 308}, + {"bytes 90-179/*", 308}, + {"bytes 180-269/*", http.StatusServiceUnavailable}, + {"bytes 180-269/*", 308}, + {"bytes 270-299/300", 200}, + }, + + wantProgress: []int64{90, 180, 270, 300}, + }, + { + data: strings.Repeat("a", 20), + chunkSize: 10, + events: []event{ + {"bytes 0-9/*", http.StatusServiceUnavailable}, + {"bytes 0-9/*", 308}, + {"bytes 10-19/*", http.StatusServiceUnavailable}, + {"bytes 10-19/*", 308}, + // 0 byte final request demands a byte range with leading asterix. + {"bytes */20", http.StatusServiceUnavailable}, + {"bytes */20", 200}, + }, + + wantProgress: []int64{10, 20}, + }, + } { + media := strings.NewReader(tc.data) + + tr := &interruptibleTransport{ + buf: make([]byte, 0, len(tc.data)), + events: tc.events, + bodies: bodyTracker{}, + } + + // TODO(mcgreevy): replace this sleep with something cleaner. + uploadPause = time.Duration(0) // skip sleep in tests. + pr := progressRecorder{} + rx := &ResumableUpload{ + Client: &http.Client{Transport: tr}, + Media: NewResumableBuffer(media, tc.chunkSize), + MediaType: "text/plain", + Callback: pr.ProgressUpdate, + } + res, err := rx.Upload(context.Background()) + if err == nil { + res.Body.Close() + } + if err != nil || res == nil || res.StatusCode != http.StatusOK { + if res == nil { + t.Errorf("Upload not successful, res=nil: %v", err) + } else { + t.Errorf("Upload not successful, statusCode=%v: %v", res.StatusCode, err) + } + } + if !reflect.DeepEqual(tr.buf, []byte(tc.data)) { + t.Errorf("transferred contents:\ngot %s\nwant %s", tr.buf, tc.data) + } + + if !reflect.DeepEqual(pr.updates, tc.wantProgress) { + t.Errorf("progress updates: got %v, want %v", pr.updates, tc.wantProgress) + } + + if len(tr.events) > 0 { + t.Errorf("did not observe all expected events. leftover events: %v", tr.events) + } + if len(tr.bodies) > 0 { + t.Errorf("unclosed request bodies: %v", tr.bodies) + } + } +} + +func TestCancelUpload(t *testing.T) { + const ( + chunkSize = 90 + mediaSize = 300 + ) + media := strings.NewReader(strings.Repeat("a", mediaSize)) + + tr := &interruptibleTransport{ + buf: make([]byte, 0, mediaSize), + } + + // TODO(mcgreevy): replace this sleep with something cleaner. + // At that time, test cancelling upload at some point other than before it starts. + uploadPause = time.Duration(0) // skip sleep in tests. + pr := progressRecorder{} + rx := &ResumableUpload{ + Client: &http.Client{Transport: tr}, + Media: NewResumableBuffer(media, chunkSize), + MediaType: "text/plain", + Callback: pr.ProgressUpdate, + } + ctx, cancelFunc := context.WithCancel(context.Background()) + cancelFunc() // stop the upload that hasn't started yet + res, err := rx.Upload(ctx) + if err != context.Canceled { + t.Errorf("Upload err: got: %v; want: context cancelled", err) + } + if res != nil { + t.Errorf("Upload result: got: %v; want: nil", res) + } + if pr.updates != nil { + t.Errorf("progress updates: got %v; want: nil", pr.updates) + } +} diff --git a/vendor/google.golang.org/api/gensupport/util_test.go b/vendor/google.golang.org/api/gensupport/util_test.go new file mode 100644 index 000000000..6da208737 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/util_test.go @@ -0,0 +1,25 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import "io" + +// errReader reads out of a buffer until it is empty, then returns the specified error. +type errReader struct { + buf []byte + err error +} + +func (er *errReader) Read(p []byte) (int, error) { + if len(er.buf) == 0 { + if er.err == nil { + return 0, io.EOF + } + return 0, er.err + } + n := copy(p, er.buf) + er.buf = er.buf[n:] + return n, nil +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 000000000..8796e3e09 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,418 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. + // When using a resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // The default chunk size to use for resumable uplods if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + res.Body.Close() + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) +} + +// ContentType returns a MediaOption which sets the content type of data to be uploaded. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ChunkSize int +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// has4860Fix is whether this Go environment contains the fix for +// http://golang.org/issue/4860 +var has4860Fix bool + +// init initializes has4860Fix by checking the behavior of the net/http package. +func init() { + r := http.Request{ + URL: &url.URL{ + Scheme: "http", + Opaque: "//opaque", + }, + } + b := &bytes.Buffer{} + r.Write(b) + has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) +} + +// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it +// don't alter any hex-escaped characters in u.Path. +func SetOpaque(u *url.URL) { + u.Opaque = "//" + u.Host + u.Path + if !has4860Fix { + u.Opaque = u.Scheme + ":" + u.Opaque + } +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + expanded, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = expanded + SetOpaque(u) + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/googleapi_test.go b/vendor/google.golang.org/api/googleapi/googleapi_test.go new file mode 100644 index 000000000..4fa051a5d --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/googleapi_test.go @@ -0,0 +1,380 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "testing" +) + +type SetOpaqueTest struct { + in *url.URL + wantRequestURI string +} + +var setOpaqueTests = []SetOpaqueTest{ + // no path + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + }, + "http://www.golang.org", + }, + // path + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + Path: "/", + }, + "http://www.golang.org/", + }, + // file with hex escaping + { + &url.URL{ + Scheme: "https", + Host: "www.golang.org", + Path: "/file%20one&two", + }, + "https://www.golang.org/file%20one&two", + }, + // query + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + Path: "/", + RawQuery: "q=go+language", + }, + "http://www.golang.org/?q=go+language", + }, + // file with hex escaping in path plus query + { + &url.URL{ + Scheme: "https", + Host: "www.golang.org", + Path: "/file%20one&two", + RawQuery: "q=go+language", + }, + "https://www.golang.org/file%20one&two?q=go+language", + }, + // query with hex escaping + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + Path: "/", + RawQuery: "q=go%20language", + }, + "http://www.golang.org/?q=go%20language", + }, +} + +// prefixTmpl is a template for the expected prefix of the output of writing +// an HTTP request. +const prefixTmpl = "GET %v HTTP/1.1\r\nHost: %v\r\n" + +func TestSetOpaque(t *testing.T) { + for _, test := range setOpaqueTests { + u := *test.in + SetOpaque(&u) + + w := &bytes.Buffer{} + r := &http.Request{URL: &u} + if err := r.Write(w); err != nil { + t.Errorf("write request: %v", err) + continue + } + + prefix := fmt.Sprintf(prefixTmpl, test.wantRequestURI, test.in.Host) + if got := string(w.Bytes()); !strings.HasPrefix(got, prefix) { + t.Errorf("got %q expected prefix %q", got, prefix) + } + } +} + +type ExpandTest struct { + in string + expansions map[string]string + want string +} + +var expandTests = []ExpandTest{ + // no expansions + { + "http://www.golang.org/", + map[string]string{}, + "http://www.golang.org/", + }, + // one expansion, no escaping + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red", + }, + "http://www.golang.org/red/delete", + }, + // one expansion, with hex escapes + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red/blue", + }, + "http://www.golang.org/red%2Fblue/delete", + }, + // one expansion, with space + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org/red%20or%20blue/delete", + }, + // expansion not found + { + "http://www.golang.org/{object}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org//delete", + }, + // multiple expansions + { + "http://www.golang.org/{one}/{two}/{three}/get", + map[string]string{ + "one": "ONE", + "two": "TWO", + "three": "THREE", + }, + "http://www.golang.org/ONE/TWO/THREE/get", + }, + // utf-8 characters + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": "£100", + }, + "http://www.golang.org/%C2%A3100/get", + }, + // punctuations + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": `/\@:,.`, + }, + "http://www.golang.org/%2F%5C%40%3A%2C./get", + }, + // mis-matched brackets + { + "http://www.golang.org/{bucket/get", + map[string]string{ + "bucket": "red", + }, + "http://www.golang.org/{bucket/get", + }, + // "+" prefix for suppressing escape + // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3 + { + "http://www.golang.org/{+topic}", + map[string]string{ + "topic": "/topics/myproject/mytopic", + }, + // The double slashes here look weird, but it's intentional + "http://www.golang.org//topics/myproject/mytopic", + }, +} + +func TestExpand(t *testing.T) { + for i, test := range expandTests { + u := url.URL{ + Path: test.in, + } + Expand(&u, test.expansions) + got := u.Path + if got != test.want { + t.Errorf("got %q expected %q in test %d", got, test.want, i+1) + } + } +} + +type CheckResponseTest struct { + in *http.Response + bodyText string + want error + errText string +} + +var checkResponseTests = []CheckResponseTest{ + { + &http.Response{ + StatusCode: http.StatusOK, + }, + "", + nil, + "", + }, + { + &http.Response{ + StatusCode: http.StatusInternalServerError, + }, + `{"error":{}}`, + &Error{ + Code: http.StatusInternalServerError, + Body: `{"error":{}}`, + }, + `googleapi: got HTTP response code 500 with body: {"error":{}}`, + }, + { + &http.Response{ + StatusCode: http.StatusNotFound, + }, + `{"error":{"message":"Error message for StatusNotFound."}}`, + &Error{ + Code: http.StatusNotFound, + Message: "Error message for StatusNotFound.", + Body: `{"error":{"message":"Error message for StatusNotFound."}}`, + }, + "googleapi: Error 404: Error message for StatusNotFound.", + }, + { + &http.Response{ + StatusCode: http.StatusBadRequest, + }, + `{"error":"invalid_token","error_description":"Invalid Value"}`, + &Error{ + Code: http.StatusBadRequest, + Body: `{"error":"invalid_token","error_description":"Invalid Value"}`, + }, + `googleapi: got HTTP response code 400 with body: {"error":"invalid_token","error_description":"Invalid Value"}`, + }, + { + &http.Response{ + StatusCode: http.StatusBadRequest, + }, + `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`, + &Error{ + Code: http.StatusBadRequest, + Errors: []ErrorItem{ + { + Reason: "keyInvalid", + Message: "Bad Request", + }, + }, + Body: `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`, + Message: "Bad Request", + }, + "googleapi: Error 400: Bad Request, keyInvalid", + }, +} + +func TestCheckResponse(t *testing.T) { + for _, test := range checkResponseTests { + res := test.in + if test.bodyText != "" { + res.Body = ioutil.NopCloser(strings.NewReader(test.bodyText)) + } + g := CheckResponse(res) + if !reflect.DeepEqual(g, test.want) { + t.Errorf("CheckResponse: got %v, want %v", g, test.want) + gotJson, err := json.Marshal(g) + if err != nil { + t.Error(err) + } + wantJson, err := json.Marshal(test.want) + if err != nil { + t.Error(err) + } + t.Errorf("json(got): %q\njson(want): %q", string(gotJson), string(wantJson)) + } + if g != nil && g.Error() != test.errText { + t.Errorf("CheckResponse: unexpected error message.\nGot: %q\nwant: %q", g, test.errText) + } + } +} + +type VariantPoint struct { + Type string + Coordinates []float64 +} + +type VariantTest struct { + in map[string]interface{} + result bool + want VariantPoint +} + +var coords = []interface{}{1.0, 2.0} + +var variantTests = []VariantTest{ + { + in: map[string]interface{}{ + "type": "Point", + "coordinates": coords, + }, + result: true, + want: VariantPoint{ + Type: "Point", + Coordinates: []float64{1.0, 2.0}, + }, + }, + { + in: map[string]interface{}{ + "type": "Point", + "bogus": coords, + }, + result: true, + want: VariantPoint{ + Type: "Point", + }, + }, +} + +func TestVariantType(t *testing.T) { + for _, test := range variantTests { + if g := VariantType(test.in); g != test.want.Type { + t.Errorf("VariantType(%v): got %v, want %v", test.in, g, test.want.Type) + } + } +} + +func TestConvertVariant(t *testing.T) { + for _, test := range variantTests { + g := VariantPoint{} + r := ConvertVariant(test.in, &g) + if r != test.result { + t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, r, test.result) + } + if !reflect.DeepEqual(g, test.want) { + t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, g, test.want) + } + } +} + +func TestRoundChunkSize(t *testing.T) { + type testCase struct { + in int + want int + } + for _, tc := range []testCase{ + {0, 0}, + {256*1024 - 1, 256 * 1024}, + {256 * 1024, 256 * 1024}, + {256*1024 + 1, 2 * 256 * 1024}, + } { + mo := &MediaOptions{} + ChunkSize(tc.in).setOptions(mo) + if got := mo.ChunkSize; got != tc.want { + t.Errorf("rounding chunk size: got: %v; want %v", got, tc.want) + } + } +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 000000000..8a84813fe --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 000000000..399ef4623 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 000000000..eca1ea250 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go new file mode 100644 index 000000000..a02b4b071 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -0,0 +1,182 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/api/googleapi/types_test.go b/vendor/google.golang.org/api/googleapi/types_test.go new file mode 100644 index 000000000..a6b204515 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/types_test.go @@ -0,0 +1,44 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestTypes(t *testing.T) { + type T struct { + I32 Int32s + I64 Int64s + U32 Uint32s + U64 Uint64s + F64 Float64s + } + v := &T{ + I32: Int32s{-1, 2, 3}, + I64: Int64s{-1, 2, 1 << 33}, + U32: Uint32s{1, 2}, + U64: Uint64s{1, 2, 1 << 33}, + F64: Float64s{1.5, 3.33}, + } + got, err := json.Marshal(v) + if err != nil { + t.Fatal(err) + } + want := `{"I32":["-1","2","3"],"I64":["-1","2","8589934592"],"U32":["1","2"],"U64":["1","2","8589934592"],"F64":["1.5","3.33"]}` + if string(got) != want { + t.Fatalf("Marshal mismatch.\n got: %s\nwant: %s\n", got, want) + } + + v2 := new(T) + if err := json.Unmarshal(got, v2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(v, v2) { + t.Fatalf("Unmarshal didn't produce same results.\n got: %#v\nwant: %#v\n", v, v2) + } +} diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json new file mode 100644 index 000000000..ee1b7a403 --- /dev/null +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json @@ -0,0 +1,1040 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/hbQtG3uMFD1QEGQS0K7qrH-ar90\"", + "discoveryVersion": "v1", + "id": "pubsub:v1", + "name": "pubsub", + "version": "v1", + "revision": "20151103", + "title": "Google Cloud Pub/Sub API", + "description": "Provides reliable, many-to-many, asynchronous messaging between applications.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://cloud.google.com/pubsub/docs", + "protocol": "rest", + "baseUrl": "https://pubsub.googleapis.com/", + "basePath": "", + "rootUrl": "https://pubsub.googleapis.com/", + "servicePath": "", + "batchPath": "batch", + "parameters": { + "access_token": { + "type": "string", + "description": "OAuth access token.", + "location": "query" + }, + "alt": { + "type": "string", + "description": "Data format for response.", + "default": "json", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "bearer_token": { + "type": "string", + "description": "OAuth bearer token.", + "location": "query" + }, + "callback": { + "type": "string", + "description": "JSONP", + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "pp": { + "type": "boolean", + "description": "Pretty-print response.", + "default": "true", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query" + }, + "upload_protocol": { + "type": "string", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query" + }, + "uploadType": { + "type": "string", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query" + }, + "$.xgafv": { + "type": "string", + "description": "V1 error format.", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/pubsub": { + "description": "View and manage Pub/Sub topics and subscriptions" + } + } + } + }, + "schemas": { + "SetIamPolicyRequest": { + "id": "SetIamPolicyRequest", + "type": "object", + "description": "Request message for `SetIamPolicy` method.", + "properties": { + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." + } + } + }, + "Policy": { + "id": "Policy", + "type": "object", + "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources. A `Policy` consists of a list of `bindings`. A `Binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM. **Example** { \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\"] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] } For a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam).", + "properties": { + "version": { + "type": "integer", + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32" + }, + "bindings": { + "type": "array", + "description": "Associates a list of `members` to a `role`. Multiple `bindings` must not be specified for the same `role`. `bindings` with no members will result in an error.", + "items": { + "$ref": "Binding" + } + }, + "etag": { + "type": "string", + "description": "Can be used to perform a read-modify-write.", + "format": "byte" + } + } + }, + "Binding": { + "id": "Binding", + "type": "object", + "description": "Associates `members` with a `role`.", + "properties": { + "role": { + "type": "string", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Required" + }, + "members": { + "type": "array", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following formats: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` or `joe@example.com`. * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`.", + "items": { + "type": "string" + } + } + } + }, + "TestIamPermissionsRequest": { + "id": "TestIamPermissionsRequest", + "type": "object", + "description": "Request message for `TestIamPermissions` method.", + "properties": { + "permissions": { + "type": "array", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", + "items": { + "type": "string" + } + } + } + }, + "TestIamPermissionsResponse": { + "id": "TestIamPermissionsResponse", + "type": "object", + "description": "Response message for `TestIamPermissions` method.", + "properties": { + "permissions": { + "type": "array", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", + "items": { + "type": "string" + } + } + } + }, + "Topic": { + "id": "Topic", + "type": "object", + "description": "A topic resource.", + "properties": { + "name": { + "type": "string", + "description": "The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`." + } + } + }, + "PublishRequest": { + "id": "PublishRequest", + "type": "object", + "description": "Request for the Publish method.", + "properties": { + "messages": { + "type": "array", + "description": "The messages to publish.", + "items": { + "$ref": "PubsubMessage" + } + } + } + }, + "PubsubMessage": { + "id": "PubsubMessage", + "type": "object", + "description": "A message data and its attributes. The message payload must not be empty; it must contain either a non-empty data field, or at least one attribute.", + "properties": { + "data": { + "type": "string", + "description": "The message payload. For JSON requests, the value of this field must be base64-encoded.", + "format": "byte" + }, + "attributes": { + "type": "object", + "description": "Optional attributes for this message.", + "additionalProperties": { + "type": "string" + } + }, + "messageId": { + "type": "string", + "description": "ID of this message, assigned by the server when the message is published. Guaranteed to be unique within the topic. This value may be read by a subscriber that receives a `PubsubMessage` via a `Pull` call or a push delivery. It must not be populated by the publisher in a `Publish` call." + }, + "publishTime": { + "type": "string", + "description": "The time at which the message was published, populated by the server when it receives the `Publish` call. It must not be populated by the publisher in a `Publish` call." + } + } + }, + "PublishResponse": { + "id": "PublishResponse", + "type": "object", + "description": "Response for the `Publish` method.", + "properties": { + "messageIds": { + "type": "array", + "description": "The server-assigned ID of each published message, in the same order as the messages in the request. IDs are guaranteed to be unique within the topic.", + "items": { + "type": "string" + } + } + } + }, + "ListTopicsResponse": { + "id": "ListTopicsResponse", + "type": "object", + "description": "Response for the `ListTopics` method.", + "properties": { + "topics": { + "type": "array", + "description": "The resulting topics.", + "items": { + "$ref": "Topic" + } + }, + "nextPageToken": { + "type": "string", + "description": "If not empty, indicates that there may be more topics that match the request; this value should be passed in a new `ListTopicsRequest`." + } + } + }, + "ListTopicSubscriptionsResponse": { + "id": "ListTopicSubscriptionsResponse", + "type": "object", + "description": "Response for the `ListTopicSubscriptions` method.", + "properties": { + "subscriptions": { + "type": "array", + "description": "The names of the subscriptions that match the request.", + "items": { + "type": "string" + } + }, + "nextPageToken": { + "type": "string", + "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListTopicSubscriptionsRequest` to get more subscriptions." + } + } + }, + "Empty": { + "id": "Empty", + "type": "object", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`." + }, + "Subscription": { + "id": "Subscription", + "type": "object", + "description": "A subscription resource.", + "properties": { + "name": { + "type": "string", + "description": "The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`." + }, + "topic": { + "type": "string", + "description": "The name of the topic from which this subscription is receiving messages. The value of this field will be `_deleted-topic_` if the topic has been deleted." + }, + "pushConfig": { + "$ref": "PushConfig", + "description": "If push delivery is used with this subscription, this field is used to configure it. An empty `pushConfig` signifies that the subscriber will pull and ack messages using API methods." + }, + "ackDeadlineSeconds": { + "type": "integer", + "description": "This value is the maximum time after a subscriber receives a message before the subscriber should acknowledge the message. After message delivery but before the ack deadline expires and before the message is acknowledged, it is an outstanding message and will not be delivered again during that time (on a best-effort basis). For pull delivery this value is used as the initial value for the ack deadline. To override this value for a given message, call `ModifyAckDeadline` with the corresponding `ack_id`. For push delivery, this value is also used to set the request timeout for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message. If this parameter is not set, the default value of 10 seconds is used.", + "format": "int32" + } + } + }, + "PushConfig": { + "id": "PushConfig", + "type": "object", + "description": "Configuration for a push delivery endpoint.", + "properties": { + "pushEndpoint": { + "type": "string", + "description": "A URL locating the endpoint to which messages should be pushed. For example, a Webhook endpoint might use \"https://example.com/push\"." + }, + "attributes": { + "type": "object", + "description": "Endpoint configuration attributes. Every endpoint has a set of API supported attributes that can be used to control different aspects of the message delivery. The currently supported attribute is `x-goog-version`, which you can use to change the format of the push message. This attribute indicates the version of the data expected by the endpoint. This controls the shape of the envelope (i.e. its fields and metadata). The endpoint version is based on the version of the Pub/Sub API. If not present during the `CreateSubscription` call, it will default to the version of the API used to make such call. If not present during a `ModifyPushConfig` call, its value will not be changed. `GetSubscription` calls will always return a valid version, even if the subscription was created without this attribute. The possible values for this attribute are: * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", + "additionalProperties": { + "type": "string" + } + } + } + }, + "ListSubscriptionsResponse": { + "id": "ListSubscriptionsResponse", + "type": "object", + "description": "Response for the `ListSubscriptions` method.", + "properties": { + "subscriptions": { + "type": "array", + "description": "The subscriptions that match the request.", + "items": { + "$ref": "Subscription" + } + }, + "nextPageToken": { + "type": "string", + "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListSubscriptionsRequest` to get more subscriptions." + } + } + }, + "ModifyAckDeadlineRequest": { + "id": "ModifyAckDeadlineRequest", + "type": "object", + "description": "Request for the ModifyAckDeadline method.", + "properties": { + "ackIds": { + "type": "array", + "description": "List of acknowledgment IDs.", + "items": { + "type": "string" + } + }, + "ackDeadlineSeconds": { + "type": "integer", + "description": "The new ack deadline with respect to the time this request was sent to the Pub/Sub system. Must be \u003e= 0. For example, if the value is 10, the new ack deadline will expire 10 seconds after the `ModifyAckDeadline` call was made. Specifying zero may immediately make the message available for another pull request.", + "format": "int32" + } + } + }, + "AcknowledgeRequest": { + "id": "AcknowledgeRequest", + "type": "object", + "description": "Request for the Acknowledge method.", + "properties": { + "ackIds": { + "type": "array", + "description": "The acknowledgment ID for the messages being acknowledged that was returned by the Pub/Sub system in the `Pull` response. Must not be empty.", + "items": { + "type": "string" + } + } + } + }, + "PullRequest": { + "id": "PullRequest", + "type": "object", + "description": "Request for the `Pull` method.", + "properties": { + "returnImmediately": { + "type": "boolean", + "description": "If this is specified as true the system will respond immediately even if it is not able to return a message in the `Pull` response. Otherwise the system is allowed to wait until at least one message is available rather than returning no messages. The client may cancel the request if it does not wish to wait any longer for the response." + }, + "maxMessages": { + "type": "integer", + "description": "The maximum number of messages returned for this request. The Pub/Sub system may return fewer than the number specified.", + "format": "int32" + } + } + }, + "PullResponse": { + "id": "PullResponse", + "type": "object", + "description": "Response for the `Pull` method.", + "properties": { + "receivedMessages": { + "type": "array", + "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if there are no more available in the backlog. The Pub/Sub system may return fewer than the `maxMessages` requested even if there are more messages available in the backlog.", + "items": { + "$ref": "ReceivedMessage" + } + } + } + }, + "ReceivedMessage": { + "id": "ReceivedMessage", + "type": "object", + "description": "A message and its corresponding acknowledgment ID.", + "properties": { + "ackId": { + "type": "string", + "description": "This ID can be used to acknowledge the received message." + }, + "message": { + "$ref": "PubsubMessage", + "description": "The message." + } + } + }, + "ModifyPushConfigRequest": { + "id": "ModifyPushConfigRequest", + "type": "object", + "description": "Request for the ModifyPushConfig method.", + "properties": { + "pushConfig": { + "$ref": "PushConfig", + "description": "The push configuration for future deliveries. An empty `pushConfig` indicates that the Pub/Sub system should stop pushing messages from the given subscription and allow messages to be pulled and acknowledged - effectively pausing the subscription if `Pull` is not called." + } + } + } + }, + "resources": { + "projects": { + "resources": { + "topics": { + "methods": { + "setIamPolicy": { + "id": "pubsub.projects.topics.setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "httpMethod": "POST", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "parameters": { + "resource": { + "type": "string", + "description": "REQUIRED: The resource for which policy is being specified. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective SetIamPolicy rpc.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "resource" + ], + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "getIamPolicy": { + "id": "pubsub.projects.topics.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "httpMethod": "GET", + "description": "Gets the access control policy for a `resource`. Is empty if the policy or the resource does not exist.", + "parameters": { + "resource": { + "type": "string", + "description": "REQUIRED: The resource for which policy is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective GetIamPolicy rpc.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "testIamPermissions": { + "id": "pubsub.projects.topics.testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "resource": { + "type": "string", + "description": "REQUIRED: The resource for which policy detail is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective TestIamPermissions rpc.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "resource" + ], + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "create": { + "id": "pubsub.projects.topics.create", + "path": "v1/{+name}", + "httpMethod": "PUT", + "description": "Creates the given topic with the given name.", + "parameters": { + "name": { + "type": "string", + "description": "The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "name" + ], + "request": { + "$ref": "Topic" + }, + "response": { + "$ref": "Topic" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "publish": { + "id": "pubsub.projects.topics.publish", + "path": "v1/{+topic}:publish", + "httpMethod": "POST", + "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain either a non-empty data field, or at least one attribute.", + "parameters": { + "topic": { + "type": "string", + "description": "The messages in the request will be published on this topic.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "topic" + ], + "request": { + "$ref": "PublishRequest" + }, + "response": { + "$ref": "PublishResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "get": { + "id": "pubsub.projects.topics.get", + "path": "v1/{+topic}", + "httpMethod": "GET", + "description": "Gets the configuration of a topic.", + "parameters": { + "topic": { + "type": "string", + "description": "The name of the topic to get.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "topic" + ], + "response": { + "$ref": "Topic" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "list": { + "id": "pubsub.projects.topics.list", + "path": "v1/{+project}/topics", + "httpMethod": "GET", + "description": "Lists matching topics.", + "parameters": { + "project": { + "type": "string", + "description": "The name of the cloud project that topics belong to.", + "required": true, + "pattern": "^projects/[^/]*$", + "location": "path" + }, + "pageSize": { + "type": "integer", + "description": "Maximum number of topics to return.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ListTopicsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "delete": { + "id": "pubsub.projects.topics.delete", + "path": "v1/{+topic}", + "httpMethod": "DELETE", + "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their `topic` field is set to `_deleted-topic_`.", + "parameters": { + "topic": { + "type": "string", + "description": "Name of the topic to delete.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "topic" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + } + }, + "resources": { + "subscriptions": { + "methods": { + "list": { + "id": "pubsub.projects.topics.subscriptions.list", + "path": "v1/{+topic}/subscriptions", + "httpMethod": "GET", + "description": "Lists the name of the subscriptions for this topic.", + "parameters": { + "topic": { + "type": "string", + "description": "The name of the topic that subscriptions are attached to.", + "required": true, + "pattern": "^projects/[^/]*/topics/[^/]*$", + "location": "path" + }, + "pageSize": { + "type": "integer", + "description": "Maximum number of subscription names to return.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", + "location": "query" + } + }, + "parameterOrder": [ + "topic" + ], + "response": { + "$ref": "ListTopicSubscriptionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + } + } + } + } + }, + "subscriptions": { + "methods": { + "setIamPolicy": { + "id": "pubsub.projects.subscriptions.setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "httpMethod": "POST", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "parameters": { + "resource": { + "type": "string", + "description": "REQUIRED: The resource for which policy is being specified. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective SetIamPolicy rpc.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "resource" + ], + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "getIamPolicy": { + "id": "pubsub.projects.subscriptions.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "httpMethod": "GET", + "description": "Gets the access control policy for a `resource`. Is empty if the policy or the resource does not exist.", + "parameters": { + "resource": { + "type": "string", + "description": "REQUIRED: The resource for which policy is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective GetIamPolicy rpc.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "testIamPermissions": { + "id": "pubsub.projects.subscriptions.testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "resource": { + "type": "string", + "description": "REQUIRED: The resource for which policy detail is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective TestIamPermissions rpc.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "resource" + ], + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "create": { + "id": "pubsub.projects.subscriptions.create", + "path": "v1/{+name}", + "httpMethod": "PUT", + "description": "Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.", + "parameters": { + "name": { + "type": "string", + "description": "The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "name" + ], + "request": { + "$ref": "Subscription" + }, + "response": { + "$ref": "Subscription" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "get": { + "id": "pubsub.projects.subscriptions.get", + "path": "v1/{+subscription}", + "httpMethod": "GET", + "description": "Gets the configuration details of a subscription.", + "parameters": { + "subscription": { + "type": "string", + "description": "The name of the subscription to get.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "subscription" + ], + "response": { + "$ref": "Subscription" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "list": { + "id": "pubsub.projects.subscriptions.list", + "path": "v1/{+project}/subscriptions", + "httpMethod": "GET", + "description": "Lists matching subscriptions.", + "parameters": { + "project": { + "type": "string", + "description": "The name of the cloud project that subscriptions belong to.", + "required": true, + "pattern": "^projects/[^/]*$", + "location": "path" + }, + "pageSize": { + "type": "integer", + "description": "Maximum number of subscriptions to return.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ListSubscriptionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "delete": { + "id": "pubsub.projects.subscriptions.delete", + "path": "v1/{+subscription}", + "httpMethod": "DELETE", + "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.", + "parameters": { + "subscription": { + "type": "string", + "description": "The subscription to delete.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "subscription" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "modifyAckDeadline": { + "id": "pubsub.projects.subscriptions.modifyAckDeadline", + "path": "v1/{+subscription}:modifyAckDeadline", + "httpMethod": "POST", + "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.", + "parameters": { + "subscription": { + "type": "string", + "description": "The name of the subscription.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "subscription" + ], + "request": { + "$ref": "ModifyAckDeadlineRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "acknowledge": { + "id": "pubsub.projects.subscriptions.acknowledge", + "path": "v1/{+subscription}:acknowledge", + "httpMethod": "POST", + "description": "Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", + "parameters": { + "subscription": { + "type": "string", + "description": "The subscription whose message is being acknowledged.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "subscription" + ], + "request": { + "$ref": "AcknowledgeRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "pull": { + "id": "pubsub.projects.subscriptions.pull", + "path": "v1/{+subscription}:pull", + "httpMethod": "POST", + "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return `UNAVAILABLE` if there are too many concurrent pull requests pending for the given subscription.", + "parameters": { + "subscription": { + "type": "string", + "description": "The subscription from which messages should be pulled.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "subscription" + ], + "request": { + "$ref": "PullRequest" + }, + "response": { + "$ref": "PullResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "modifyPushConfig": { + "id": "pubsub.projects.subscriptions.modifyPushConfig", + "path": "v1/{+subscription}:modifyPushConfig", + "httpMethod": "POST", + "description": "Modifies the `PushConfig` for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the `PushConfig`.", + "parameters": { + "subscription": { + "type": "string", + "description": "The name of the subscription.", + "required": true, + "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + "location": "path" + } + }, + "parameterOrder": [ + "subscription" + ], + "request": { + "$ref": "ModifyPushConfigRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + } + } + } + } + } + } +} diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go new file mode 100644 index 000000000..b2aa89daa --- /dev/null +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go @@ -0,0 +1,3316 @@ +// Package pubsub provides access to the Google Cloud Pub/Sub API. +// +// See https://cloud.google.com/pubsub/docs +// +// Usage example: +// +// import "google.golang.org/api/pubsub/v1" +// ... +// pubsubService, err := pubsub.New(oauthHttpClient) +package pubsub + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "pubsub:v1" +const apiName = "pubsub" +const apiVersion = "v1" +const basePath = "https://pubsub.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View and manage Pub/Sub topics and subscriptions + PubsubScope = "https://www.googleapis.com/auth/pubsub" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Subscriptions = NewProjectsSubscriptionsService(s) + rs.Topics = NewProjectsTopicsService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Subscriptions *ProjectsSubscriptionsService + + Topics *ProjectsTopicsService +} + +func NewProjectsSubscriptionsService(s *Service) *ProjectsSubscriptionsService { + rs := &ProjectsSubscriptionsService{s: s} + return rs +} + +type ProjectsSubscriptionsService struct { + s *Service +} + +func NewProjectsTopicsService(s *Service) *ProjectsTopicsService { + rs := &ProjectsTopicsService{s: s} + rs.Subscriptions = NewProjectsTopicsSubscriptionsService(s) + return rs +} + +type ProjectsTopicsService struct { + s *Service + + Subscriptions *ProjectsTopicsSubscriptionsService +} + +func NewProjectsTopicsSubscriptionsService(s *Service) *ProjectsTopicsSubscriptionsService { + rs := &ProjectsTopicsSubscriptionsService{s: s} + return rs +} + +type ProjectsTopicsSubscriptionsService struct { + s *Service +} + +// AcknowledgeRequest: Request for the Acknowledge method. +type AcknowledgeRequest struct { + // AckIds: The acknowledgment ID for the messages being acknowledged + // that was returned by the Pub/Sub system in the `Pull` response. Must + // not be empty. + AckIds []string `json:"ackIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AckIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AcknowledgeRequest) MarshalJSON() ([]byte, error) { + type noMethod AcknowledgeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Binding: Associates `members` with a `role`. +type Binding struct { + // Members: Specifies the identities requesting access for a Cloud + // Platform resource. `members` can have the following formats: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@gmail.com` or `joe@example.com`. * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `domain:{domain}`: A Google Apps domain name + // that represents all the users of that domain. For example, + // `google.com` or `example.com`. + Members []string `json:"members,omitempty"` + + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Required + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Members") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Binding) MarshalJSON() ([]byte, error) { + type noMethod Binding + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + +// ListSubscriptionsResponse: Response for the `ListSubscriptions` +// method. +type ListSubscriptionsResponse struct { + // NextPageToken: If not empty, indicates that there may be more + // subscriptions that match the request; this value should be passed in + // a new `ListSubscriptionsRequest` to get more subscriptions. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Subscriptions: The subscriptions that match the request. + Subscriptions []*Subscription `json:"subscriptions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListSubscriptionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListTopicSubscriptionsResponse: Response for the +// `ListTopicSubscriptions` method. +type ListTopicSubscriptionsResponse struct { + // NextPageToken: If not empty, indicates that there may be more + // subscriptions that match the request; this value should be passed in + // a new `ListTopicSubscriptionsRequest` to get more subscriptions. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Subscriptions: The names of the subscriptions that match the request. + Subscriptions []string `json:"subscriptions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListTopicSubscriptionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListTopicsResponse: Response for the `ListTopics` method. +type ListTopicsResponse struct { + // NextPageToken: If not empty, indicates that there may be more topics + // that match the request; this value should be passed in a new + // `ListTopicsRequest`. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Topics: The resulting topics. + Topics []*Topic `json:"topics,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListTopicsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListTopicsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ModifyAckDeadlineRequest: Request for the ModifyAckDeadline method. +type ModifyAckDeadlineRequest struct { + // AckDeadlineSeconds: The new ack deadline with respect to the time + // this request was sent to the Pub/Sub system. Must be >= 0. For + // example, if the value is 10, the new ack deadline will expire 10 + // seconds after the `ModifyAckDeadline` call was made. Specifying zero + // may immediately make the message available for another pull request. + AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` + + // AckIds: List of acknowledgment IDs. + AckIds []string `json:"ackIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AckDeadlineSeconds") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ModifyAckDeadlineRequest) MarshalJSON() ([]byte, error) { + type noMethod ModifyAckDeadlineRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ModifyPushConfigRequest: Request for the ModifyPushConfig method. +type ModifyPushConfigRequest struct { + // PushConfig: The push configuration for future deliveries. An empty + // `pushConfig` indicates that the Pub/Sub system should stop pushing + // messages from the given subscription and allow messages to be pulled + // and acknowledged - effectively pausing the subscription if `Pull` is + // not called. + PushConfig *PushConfig `json:"pushConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PushConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { + type noMethod ModifyPushConfigRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Policy: Defines an Identity and Access Management (IAM) policy. It is +// used to specify access control policies for Cloud Platform resources. +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list +// of `members` to a `role`, where the members can be user accounts, +// Google groups, Google domains, and service accounts. A `role` is a +// named list of permissions defined by IAM. **Example** { "bindings": [ +// { "role": "roles/owner", "members": [ "user:mike@example.com", +// "group:admins@example.com", "domain:google.com", +// "serviceAccount:my-other-app@appspot.gserviceaccount.com"] }, { +// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] } +// For a description of IAM and its features, see the [IAM developer's +// guide](https://cloud.google.com/iam). +type Policy struct { + // Bindings: Associates a list of `members` to a `role`. Multiple + // `bindings` must not be specified for the same `role`. `bindings` with + // no members will result in an error. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: Can be used to perform a read-modify-write. + Etag string `json:"etag,omitempty"` + + // Version: Version of the `Policy`. The default version is 0. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Policy) MarshalJSON() ([]byte, error) { + type noMethod Policy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PublishRequest: Request for the Publish method. +type PublishRequest struct { + // Messages: The messages to publish. + Messages []*PubsubMessage `json:"messages,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Messages") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PublishRequest) MarshalJSON() ([]byte, error) { + type noMethod PublishRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PublishResponse: Response for the `Publish` method. +type PublishResponse struct { + // MessageIds: The server-assigned ID of each published message, in the + // same order as the messages in the request. IDs are guaranteed to be + // unique within the topic. + MessageIds []string `json:"messageIds,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "MessageIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PublishResponse) MarshalJSON() ([]byte, error) { + type noMethod PublishResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PubsubMessage: A message data and its attributes. The message payload +// must not be empty; it must contain either a non-empty data field, or +// at least one attribute. +type PubsubMessage struct { + // Attributes: Optional attributes for this message. + Attributes map[string]string `json:"attributes,omitempty"` + + // Data: The message payload. For JSON requests, the value of this field + // must be base64-encoded. + Data string `json:"data,omitempty"` + + // MessageId: ID of this message, assigned by the server when the + // message is published. Guaranteed to be unique within the topic. This + // value may be read by a subscriber that receives a `PubsubMessage` via + // a `Pull` call or a push delivery. It must not be populated by the + // publisher in a `Publish` call. + MessageId string `json:"messageId,omitempty"` + + // PublishTime: The time at which the message was published, populated + // by the server when it receives the `Publish` call. It must not be + // populated by the publisher in a `Publish` call. + PublishTime string `json:"publishTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Attributes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PubsubMessage) MarshalJSON() ([]byte, error) { + type noMethod PubsubMessage + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PullRequest: Request for the `Pull` method. +type PullRequest struct { + // MaxMessages: The maximum number of messages returned for this + // request. The Pub/Sub system may return fewer than the number + // specified. + MaxMessages int64 `json:"maxMessages,omitempty"` + + // ReturnImmediately: If this is specified as true the system will + // respond immediately even if it is not able to return a message in the + // `Pull` response. Otherwise the system is allowed to wait until at + // least one message is available rather than returning no messages. The + // client may cancel the request if it does not wish to wait any longer + // for the response. + ReturnImmediately bool `json:"returnImmediately,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxMessages") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PullRequest) MarshalJSON() ([]byte, error) { + type noMethod PullRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PullResponse: Response for the `Pull` method. +type PullResponse struct { + // ReceivedMessages: Received Pub/Sub messages. The Pub/Sub system will + // return zero messages if there are no more available in the backlog. + // The Pub/Sub system may return fewer than the `maxMessages` requested + // even if there are more messages available in the backlog. + ReceivedMessages []*ReceivedMessage `json:"receivedMessages,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ReceivedMessages") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PullResponse) MarshalJSON() ([]byte, error) { + type noMethod PullResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PushConfig: Configuration for a push delivery endpoint. +type PushConfig struct { + // Attributes: Endpoint configuration attributes. Every endpoint has a + // set of API supported attributes that can be used to control different + // aspects of the message delivery. The currently supported attribute is + // `x-goog-version`, which you can use to change the format of the push + // message. This attribute indicates the version of the data expected by + // the endpoint. This controls the shape of the envelope (i.e. its + // fields and metadata). The endpoint version is based on the version of + // the Pub/Sub API. If not present during the `CreateSubscription` call, + // it will default to the version of the API used to make such call. If + // not present during a `ModifyPushConfig` call, its value will not be + // changed. `GetSubscription` calls will always return a valid version, + // even if the subscription was created without this attribute. The + // possible values for this attribute are: * `v1beta1`: uses the push + // format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses + // the push format defined in the v1 Pub/Sub API. + Attributes map[string]string `json:"attributes,omitempty"` + + // PushEndpoint: A URL locating the endpoint to which messages should be + // pushed. For example, a Webhook endpoint might use + // "https://example.com/push". + PushEndpoint string `json:"pushEndpoint,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Attributes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PushConfig) MarshalJSON() ([]byte, error) { + type noMethod PushConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ReceivedMessage: A message and its corresponding acknowledgment ID. +type ReceivedMessage struct { + // AckId: This ID can be used to acknowledge the received message. + AckId string `json:"ackId,omitempty"` + + // Message: The message. + Message *PubsubMessage `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AckId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { + type noMethod ReceivedMessage + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SetIamPolicyRequest: Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + // Policy: REQUIRED: The complete policy to be applied to the + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. + Policy *Policy `json:"policy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type noMethod SetIamPolicyRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Subscription: A subscription resource. +type Subscription struct { + // AckDeadlineSeconds: This value is the maximum time after a subscriber + // receives a message before the subscriber should acknowledge the + // message. After message delivery but before the ack deadline expires + // and before the message is acknowledged, it is an outstanding message + // and will not be delivered again during that time (on a best-effort + // basis). For pull delivery this value is used as the initial value for + // the ack deadline. To override this value for a given message, call + // `ModifyAckDeadline` with the corresponding `ack_id`. For push + // delivery, this value is also used to set the request timeout for the + // call to the push endpoint. If the subscriber never acknowledges the + // message, the Pub/Sub system will eventually redeliver the message. If + // this parameter is not set, the default value of 10 seconds is used. + AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` + + // Name: The name of the subscription. It must have the format + // "projects/{project}/subscriptions/{subscription}". `{subscription}` + // must start with a letter, and contain only letters (`[A-Za-z]`), + // numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), + // tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 + // and 255 characters in length, and it must not start with "goog". + Name string `json:"name,omitempty"` + + // PushConfig: If push delivery is used with this subscription, this + // field is used to configure it. An empty `pushConfig` signifies that + // the subscriber will pull and ack messages using API methods. + PushConfig *PushConfig `json:"pushConfig,omitempty"` + + // Topic: The name of the topic from which this subscription is + // receiving messages. The value of this field will be `_deleted-topic_` + // if the topic has been deleted. + Topic string `json:"topic,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AckDeadlineSeconds") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Subscription) MarshalJSON() ([]byte, error) { + type noMethod Subscription + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TestIamPermissionsRequest: Request message for `TestIamPermissions` +// method. +type TestIamPermissionsRequest struct { + // Permissions: The set of permissions to check for the `resource`. + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { + type noMethod TestIamPermissionsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TestIamPermissionsResponse: Response message for `TestIamPermissions` +// method. +type TestIamPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type noMethod TestIamPermissionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Topic: A topic resource. +type Topic struct { + // Name: The name of the topic. It must have the format + // "projects/{project}/topics/{topic}". `{topic}` must start with a + // letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), + // dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus + // (`+`) or percent signs (`%`). It must be between 3 and 255 characters + // in length, and it must not start with "goog". + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Topic) MarshalJSON() ([]byte, error) { + type noMethod Topic + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "pubsub.projects.subscriptions.acknowledge": + +type ProjectsSubscriptionsAcknowledgeCall struct { + s *Service + subscription string + acknowledgerequest *AcknowledgeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Acknowledge: Acknowledges the messages associated with the `ack_ids` +// in the `AcknowledgeRequest`. The Pub/Sub system can remove the +// relevant messages from the subscription. Acknowledging a message +// whose ack deadline has expired may succeed, but such a message may be +// redelivered later. Acknowledging a message more than once will not +// result in an error. +func (r *ProjectsSubscriptionsService) Acknowledge(subscription string, acknowledgerequest *AcknowledgeRequest) *ProjectsSubscriptionsAcknowledgeCall { + c := &ProjectsSubscriptionsAcknowledgeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + c.acknowledgerequest = acknowledgerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsAcknowledgeCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsAcknowledgeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsAcknowledgeCall) Context(ctx context.Context) *ProjectsSubscriptionsAcknowledgeCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsAcknowledgeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:acknowledge") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.acknowledge" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSubscriptionsAcknowledgeCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.acknowledge", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "The subscription whose message is being acknowledged.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}:acknowledge", + // "request": { + // "$ref": "AcknowledgeRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.create": + +type ProjectsSubscriptionsCreateCall struct { + s *Service + name string + subscription *Subscription + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a subscription to a given topic for a given +// subscriber. If the subscription already exists, returns +// `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns +// `NOT_FOUND`. If the name is not provided in the request, the server +// will assign a random name for this subscription on the same project +// as the topic. +func (r *ProjectsSubscriptionsService) Create(name string, subscription *Subscription) *ProjectsSubscriptionsCreateCall { + c := &ProjectsSubscriptionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.subscription = subscription + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsCreateCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsCreateCall) Context(ctx context.Context) *ProjectsSubscriptionsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.create" call. +// Exactly one of *Subscription or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Subscription.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsCreateCall) Do(opts ...googleapi.CallOption) (*Subscription, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Subscription{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.", + // "httpMethod": "PUT", + // "id": "pubsub.projects.subscriptions.create", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "Subscription" + // }, + // "response": { + // "$ref": "Subscription" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.delete": + +type ProjectsSubscriptionsDeleteCall struct { + s *Service + subscription string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes an existing subscription. All pending messages in the +// subscription are immediately dropped. Calls to `Pull` after deletion +// will return `NOT_FOUND`. After a subscription is deleted, a new one +// may be created with the same name, but the new one has no association +// with the old subscription, or its topic unless the same topic is +// specified. +func (r *ProjectsSubscriptionsService) Delete(subscription string) *ProjectsSubscriptionsDeleteCall { + c := &ProjectsSubscriptionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsDeleteCall) Context(ctx context.Context) *ProjectsSubscriptionsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.", + // "httpMethod": "DELETE", + // "id": "pubsub.projects.subscriptions.delete", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "The subscription to delete.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.get": + +type ProjectsSubscriptionsGetCall struct { + s *Service + subscription string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets the configuration details of a subscription. +func (r *ProjectsSubscriptionsService) Get(subscription string) *ProjectsSubscriptionsGetCall { + c := &ProjectsSubscriptionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsGetCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsSubscriptionsGetCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsGetCall) Context(ctx context.Context) *ProjectsSubscriptionsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.get" call. +// Exactly one of *Subscription or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Subscription.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsGetCall) Do(opts ...googleapi.CallOption) (*Subscription, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Subscription{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the configuration details of a subscription.", + // "httpMethod": "GET", + // "id": "pubsub.projects.subscriptions.get", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "The name of the subscription to get.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}", + // "response": { + // "$ref": "Subscription" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.getIamPolicy": + +type ProjectsSubscriptionsGetIamPolicyCall struct { + s *Service + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetIamPolicy: Gets the access control policy for a `resource`. Is +// empty if the policy or the resource does not exist. +func (r *ProjectsSubscriptionsService) GetIamPolicy(resource string) *ProjectsSubscriptionsGetIamPolicyCall { + c := &ProjectsSubscriptionsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsSubscriptionsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsGetIamPolicyCall) Context(ctx context.Context) *ProjectsSubscriptionsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSubscriptionsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a `resource`. Is empty if the policy or the resource does not exist.", + // "httpMethod": "GET", + // "id": "pubsub.projects.subscriptions.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which policy is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective GetIamPolicy rpc.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.list": + +type ProjectsSubscriptionsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists matching subscriptions. +func (r *ProjectsSubscriptionsService) List(project string) *ProjectsSubscriptionsListCall { + c := &ProjectsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// subscriptions to return. +func (c *ProjectsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsSubscriptionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned +// by the last `ListSubscriptionsResponse`; indicates that this is a +// continuation of a prior `ListSubscriptions` call, and that the system +// should return the next page of data. +func (c *ProjectsSubscriptionsListCall) PageToken(pageToken string) *ProjectsSubscriptionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsSubscriptionsListCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsListCall) Context(ctx context.Context) *ProjectsSubscriptionsListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+project}/subscriptions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.list" call. +// Exactly one of *ListSubscriptionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListSubscriptionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListSubscriptionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListSubscriptionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists matching subscriptions.", + // "httpMethod": "GET", + // "id": "pubsub.projects.subscriptions.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "pageSize": { + // "description": "Maximum number of subscriptions to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "The name of the cloud project that subscriptions belong to.", + // "location": "path", + // "pattern": "^projects/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+project}/subscriptions", + // "response": { + // "$ref": "ListSubscriptionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.modifyAckDeadline": + +type ProjectsSubscriptionsModifyAckDeadlineCall struct { + s *Service + subscription string + modifyackdeadlinerequest *ModifyAckDeadlineRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// ModifyAckDeadline: Modifies the ack deadline for a specific message. +// This method is useful to indicate that more time is needed to process +// a message by the subscriber, or to make the message available for +// redelivery if the processing was interrupted. +func (r *ProjectsSubscriptionsService) ModifyAckDeadline(subscription string, modifyackdeadlinerequest *ModifyAckDeadlineRequest) *ProjectsSubscriptionsModifyAckDeadlineCall { + c := &ProjectsSubscriptionsModifyAckDeadlineCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + c.modifyackdeadlinerequest = modifyackdeadlinerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyAckDeadlineCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Context(ctx context.Context) *ProjectsSubscriptionsModifyAckDeadlineCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsModifyAckDeadlineCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:modifyAckDeadline") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.modifyAckDeadline" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.modifyAckDeadline", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "The name of the subscription.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}:modifyAckDeadline", + // "request": { + // "$ref": "ModifyAckDeadlineRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.modifyPushConfig": + +type ProjectsSubscriptionsModifyPushConfigCall struct { + s *Service + subscription string + modifypushconfigrequest *ModifyPushConfigRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// ModifyPushConfig: Modifies the `PushConfig` for a specified +// subscription. This may be used to change a push subscription to a +// pull one (signified by an empty `PushConfig`) or vice versa, or +// change the endpoint URL and other attributes of a push subscription. +// Messages will accumulate for delivery continuously through the call +// regardless of changes to the `PushConfig`. +func (r *ProjectsSubscriptionsService) ModifyPushConfig(subscription string, modifypushconfigrequest *ModifyPushConfigRequest) *ProjectsSubscriptionsModifyPushConfigCall { + c := &ProjectsSubscriptionsModifyPushConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + c.modifypushconfigrequest = modifypushconfigrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsModifyPushConfigCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyPushConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsModifyPushConfigCall) Context(ctx context.Context) *ProjectsSubscriptionsModifyPushConfigCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsModifyPushConfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:modifyPushConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.modifyPushConfig" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSubscriptionsModifyPushConfigCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Modifies the `PushConfig` for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the `PushConfig`.", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.modifyPushConfig", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "The name of the subscription.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}:modifyPushConfig", + // "request": { + // "$ref": "ModifyPushConfigRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.pull": + +type ProjectsSubscriptionsPullCall struct { + s *Service + subscription string + pullrequest *PullRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Pull: Pulls messages from the server. Returns an empty list if there +// are no messages available in the backlog. The server may return +// `UNAVAILABLE` if there are too many concurrent pull requests pending +// for the given subscription. +func (r *ProjectsSubscriptionsService) Pull(subscription string, pullrequest *PullRequest) *ProjectsSubscriptionsPullCall { + c := &ProjectsSubscriptionsPullCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + c.pullrequest = pullrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsPullCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsPullCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsPullCall) Context(ctx context.Context) *ProjectsSubscriptionsPullCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsPullCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:pull") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.pull" call. +// Exactly one of *PullResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PullResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsPullCall) Do(opts ...googleapi.CallOption) (*PullResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PullResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return `UNAVAILABLE` if there are too many concurrent pull requests pending for the given subscription.", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.pull", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "The subscription from which messages should be pulled.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}:pull", + // "request": { + // "$ref": "PullRequest" + // }, + // "response": { + // "$ref": "PullResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.setIamPolicy": + +type ProjectsSubscriptionsSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ProjectsSubscriptionsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSubscriptionsSetIamPolicyCall { + c := &ProjectsSubscriptionsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsSetIamPolicyCall) Context(ctx context.Context) *ProjectsSubscriptionsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSubscriptionsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which policy is being specified. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective SetIamPolicy rpc.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:setIamPolicy", + // "request": { + // "$ref": "SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.subscriptions.testIamPermissions": + +type ProjectsSubscriptionsTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ProjectsSubscriptionsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsSubscriptionsTestIamPermissionsCall { + c := &ProjectsSubscriptionsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsSubscriptionsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSubscriptionsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.subscriptions.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which policy detail is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective TestIamPermissions rpc.", + // "location": "path", + // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.create": + +type ProjectsTopicsCreateCall struct { + s *Service + name string + topic *Topic + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates the given topic with the given name. +func (r *ProjectsTopicsService) Create(name string, topic *Topic) *ProjectsTopicsCreateCall { + c := &ProjectsTopicsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.topic = topic + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsCreateCall) Fields(s ...googleapi.Field) *ProjectsTopicsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsCreateCall) Context(ctx context.Context) *ProjectsTopicsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.create" call. +// Exactly one of *Topic or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Topic.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTopicsCreateCall) Do(opts ...googleapi.CallOption) (*Topic, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Topic{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates the given topic with the given name.", + // "httpMethod": "PUT", + // "id": "pubsub.projects.topics.create", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "Topic" + // }, + // "response": { + // "$ref": "Topic" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.delete": + +type ProjectsTopicsDeleteCall struct { + s *Service + topic string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the topic with the given name. Returns `NOT_FOUND` if +// the topic does not exist. After a topic is deleted, a new topic may +// be created with the same name; this is an entirely new topic with +// none of the old configuration or subscriptions. Existing +// subscriptions to this topic are not deleted, but their `topic` field +// is set to `_deleted-topic_`. +func (r *ProjectsTopicsService) Delete(topic string) *ProjectsTopicsDeleteCall { + c := &ProjectsTopicsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.topic = topic + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsDeleteCall) Fields(s ...googleapi.Field) *ProjectsTopicsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsDeleteCall) Context(ctx context.Context) *ProjectsTopicsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "topic": c.topic, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTopicsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their `topic` field is set to `_deleted-topic_`.", + // "httpMethod": "DELETE", + // "id": "pubsub.projects.topics.delete", + // "parameterOrder": [ + // "topic" + // ], + // "parameters": { + // "topic": { + // "description": "Name of the topic to delete.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+topic}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.get": + +type ProjectsTopicsGetCall struct { + s *Service + topic string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets the configuration of a topic. +func (r *ProjectsTopicsService) Get(topic string) *ProjectsTopicsGetCall { + c := &ProjectsTopicsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.topic = topic + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsGetCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTopicsGetCall) IfNoneMatch(entityTag string) *ProjectsTopicsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsGetCall) Context(ctx context.Context) *ProjectsTopicsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "topic": c.topic, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.get" call. +// Exactly one of *Topic or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Topic.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTopicsGetCall) Do(opts ...googleapi.CallOption) (*Topic, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Topic{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the configuration of a topic.", + // "httpMethod": "GET", + // "id": "pubsub.projects.topics.get", + // "parameterOrder": [ + // "topic" + // ], + // "parameters": { + // "topic": { + // "description": "The name of the topic to get.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+topic}", + // "response": { + // "$ref": "Topic" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.getIamPolicy": + +type ProjectsTopicsGetIamPolicyCall struct { + s *Service + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetIamPolicy: Gets the access control policy for a `resource`. Is +// empty if the policy or the resource does not exist. +func (r *ProjectsTopicsService) GetIamPolicy(resource string) *ProjectsTopicsGetIamPolicyCall { + c := &ProjectsTopicsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTopicsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsTopicsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsGetIamPolicyCall) Context(ctx context.Context) *ProjectsTopicsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTopicsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a `resource`. Is empty if the policy or the resource does not exist.", + // "httpMethod": "GET", + // "id": "pubsub.projects.topics.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which policy is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective GetIamPolicy rpc.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.list": + +type ProjectsTopicsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists matching topics. +func (r *ProjectsTopicsService) List(project string) *ProjectsTopicsListCall { + c := &ProjectsTopicsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// topics to return. +func (c *ProjectsTopicsListCall) PageSize(pageSize int64) *ProjectsTopicsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned +// by the last `ListTopicsResponse`; indicates that this is a +// continuation of a prior `ListTopics` call, and that the system should +// return the next page of data. +func (c *ProjectsTopicsListCall) PageToken(pageToken string) *ProjectsTopicsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTopicsListCall) IfNoneMatch(entityTag string) *ProjectsTopicsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsListCall) Context(ctx context.Context) *ProjectsTopicsListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+project}/topics") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.list" call. +// Exactly one of *ListTopicsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListTopicsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTopicsListCall) Do(opts ...googleapi.CallOption) (*ListTopicsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListTopicsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists matching topics.", + // "httpMethod": "GET", + // "id": "pubsub.projects.topics.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "pageSize": { + // "description": "Maximum number of topics to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "The name of the cloud project that topics belong to.", + // "location": "path", + // "pattern": "^projects/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+project}/topics", + // "response": { + // "$ref": "ListTopicsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.publish": + +type ProjectsTopicsPublishCall struct { + s *Service + topic string + publishrequest *PublishRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Publish: Adds one or more messages to the topic. Returns `NOT_FOUND` +// if the topic does not exist. The message payload must not be empty; +// it must contain either a non-empty data field, or at least one +// attribute. +func (r *ProjectsTopicsService) Publish(topic string, publishrequest *PublishRequest) *ProjectsTopicsPublishCall { + c := &ProjectsTopicsPublishCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.topic = topic + c.publishrequest = publishrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsPublishCall) Fields(s ...googleapi.Field) *ProjectsTopicsPublishCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsPublishCall) Context(ctx context.Context) *ProjectsTopicsPublishCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsPublishCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}:publish") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "topic": c.topic, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.publish" call. +// Exactly one of *PublishResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PublishResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTopicsPublishCall) Do(opts ...googleapi.CallOption) (*PublishResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PublishResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain either a non-empty data field, or at least one attribute.", + // "httpMethod": "POST", + // "id": "pubsub.projects.topics.publish", + // "parameterOrder": [ + // "topic" + // ], + // "parameters": { + // "topic": { + // "description": "The messages in the request will be published on this topic.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+topic}:publish", + // "request": { + // "$ref": "PublishRequest" + // }, + // "response": { + // "$ref": "PublishResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.setIamPolicy": + +type ProjectsTopicsSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ProjectsTopicsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsTopicsSetIamPolicyCall { + c := &ProjectsTopicsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsTopicsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsSetIamPolicyCall) Context(ctx context.Context) *ProjectsTopicsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTopicsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "pubsub.projects.topics.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which policy is being specified. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective SetIamPolicy rpc.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:setIamPolicy", + // "request": { + // "$ref": "SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.testIamPermissions": + +type ProjectsTopicsTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ProjectsTopicsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTopicsTestIamPermissionsCall { + c := &ProjectsTopicsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsTopicsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsTopicsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTopicsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "pubsub.projects.topics.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which policy detail is being requested. `resource` is usually specified as a path, such as, `projects/{project}/zones/{zone}/disks/{disk}`. The format for the path specified in this value is resource specific and is specified in the documentation for the respective TestIamPermissions rpc.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.topics.subscriptions.list": + +type ProjectsTopicsSubscriptionsListCall struct { + s *Service + topic string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists the name of the subscriptions for this topic. +func (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall { + c := &ProjectsTopicsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.topic = topic + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// subscription names to return. +func (c *ProjectsTopicsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsTopicsSubscriptionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned +// by the last `ListTopicSubscriptionsResponse`; indicates that this is +// a continuation of a prior `ListTopicSubscriptions` call, and that the +// system should return the next page of data. +func (c *ProjectsTopicsSubscriptionsListCall) PageToken(pageToken string) *ProjectsTopicsSubscriptionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsSubscriptionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTopicsSubscriptionsListCall) IfNoneMatch(entityTag string) *ProjectsTopicsSubscriptionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsSubscriptionsListCall) Context(ctx context.Context) *ProjectsTopicsSubscriptionsListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsTopicsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}/subscriptions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "topic": c.topic, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "pubsub.projects.topics.subscriptions.list" call. +// Exactly one of *ListTopicSubscriptionsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListTopicSubscriptionsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTopicsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListTopicSubscriptionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListTopicSubscriptionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the name of the subscriptions for this topic.", + // "httpMethod": "GET", + // "id": "pubsub.projects.topics.subscriptions.list", + // "parameterOrder": [ + // "topic" + // ], + // "parameters": { + // "pageSize": { + // "description": "Maximum number of subscription names to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", + // "location": "query", + // "type": "string" + // }, + // "topic": { + // "description": "The name of the topic that subscriptions are attached to.", + // "location": "path", + // "pattern": "^projects/[^/]*/topics/[^/]*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+topic}/subscriptions", + // "response": { + // "$ref": "ListTopicSubscriptionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json new file mode 100644 index 000000000..6c371985f --- /dev/null +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json @@ -0,0 +1,2707 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/CCmkz9wJbxqIZMDlSyPUsI6BFWQ\"", + "discoveryVersion": "v1", + "id": "sqladmin:v1beta4", + "name": "sqladmin", + "canonicalName": "SQL Admin", + "version": "v1beta4", + "revision": "20151117", + "title": "Cloud SQL Administration API", + "description": "API for Cloud SQL database instance management.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://cloud.google.com/sql/docs/reference/latest", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/sql/v1beta4/", + "basePath": "/sql/v1beta4/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "sql/v1beta4/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/sqlservice.admin": { + "description": "Manage your Google SQL Service instances" + } + } + } + }, + "schemas": { + "AclEntry": { + "id": "AclEntry", + "type": "object", + "description": "An entry for an Access Control list.", + "properties": { + "expirationTime": { + "type": "string", + "description": "The time when this access control entry expires in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "kind": { + "type": "string", + "description": "This is always sql#aclEntry.", + "default": "sql#aclEntry" + }, + "name": { + "type": "string", + "description": "An optional label to identify this entry." + }, + "value": { + "type": "string", + "description": "The whitelisted value for the access control list." + } + } + }, + "BackupConfiguration": { + "id": "BackupConfiguration", + "type": "object", + "description": "Database instance backup configuration.", + "properties": { + "binaryLogEnabled": { + "type": "boolean", + "description": "Whether binary log is enabled. If backup configuration is disabled, binary log must be disabled as well." + }, + "enabled": { + "type": "boolean", + "description": "Whether this configuration is enabled." + }, + "kind": { + "type": "string", + "description": "This is always sql#backupConfiguration.", + "default": "sql#backupConfiguration" + }, + "startTime": { + "type": "string", + "description": "Start time for the daily backup configuration in UTC timezone in the 24 hour format - HH:MM." + } + } + }, + "BackupRun": { + "id": "BackupRun", + "type": "object", + "description": "A database instance backup run resource.", + "properties": { + "endTime": { + "type": "string", + "description": "The time the backup operation completed in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "enqueuedTime": { + "type": "string", + "description": "The time the run was enqueued in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "error": { + "$ref": "OperationError", + "description": "Information about why the backup operation failed. This is only present if the run has the FAILED status." + }, + "id": { + "type": "string", + "description": "A unique identifier for this backup run. Note that this is unique only within the scope of a particular Cloud SQL instance.", + "format": "int64" + }, + "instance": { + "type": "string", + "description": "Name of the database instance." + }, + "kind": { + "type": "string", + "description": "This is always sql#backupRun.", + "default": "sql#backupRun" + }, + "selfLink": { + "type": "string", + "description": "The URI of this resource." + }, + "startTime": { + "type": "string", + "description": "The time the backup operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "status": { + "type": "string", + "description": "The status of this run." + }, + "windowStartTime": { + "type": "string", + "description": "The start time of the backup window during which this the backup was attempted in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + } + } + }, + "BackupRunsListResponse": { + "id": "BackupRunsListResponse", + "type": "object", + "description": "Backup run list results.", + "properties": { + "items": { + "type": "array", + "description": "A list of backup runs in reverse chronological order of the enqueued time.", + "items": { + "$ref": "BackupRun" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#backupRunsList.", + "default": "sql#backupRunsList" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + } + } + }, + "BinLogCoordinates": { + "id": "BinLogCoordinates", + "type": "object", + "description": "Binary log coordinates.", + "properties": { + "binLogFileName": { + "type": "string", + "description": "Name of the binary log file for a Cloud SQL instance." + }, + "binLogPosition": { + "type": "string", + "description": "Position (offset) within the binary log file.", + "format": "int64" + }, + "kind": { + "type": "string", + "description": "This is always sql#binLogCoordinates.", + "default": "sql#binLogCoordinates" + } + } + }, + "CloneContext": { + "id": "CloneContext", + "type": "object", + "description": "Database instance clone context.", + "properties": { + "binLogCoordinates": { + "$ref": "BinLogCoordinates", + "description": "Binary log coordinates, if specified, indentify the the position up to which the source instance should be cloned. If not specified, the source instance is cloned up to the most recent binary log coordintes." + }, + "destinationInstanceName": { + "type": "string", + "description": "Name of the Cloud SQL instance to be created as a clone." + }, + "kind": { + "type": "string", + "description": "This is always sql#cloneContext.", + "default": "sql#cloneContext" + } + } + }, + "Database": { + "id": "Database", + "type": "object", + "description": "A database resource inside a Cloud SQL instance.", + "properties": { + "charset": { + "type": "string", + "description": "The MySQL charset value." + }, + "collation": { + "type": "string", + "description": "The MySQL collation value." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the resource." + }, + "instance": { + "type": "string", + "description": "The name of the Cloud SQL instance. This does not include the project ID.", + "annotations": { + "required": [ + "sql.databases.insert" + ] + } + }, + "kind": { + "type": "string", + "description": "This is always sql#database.", + "default": "sql#database" + }, + "name": { + "type": "string", + "description": "The name of the database in the Cloud SQL instance. This does not include the project ID or instance name.", + "annotations": { + "required": [ + "sql.databases.insert" + ] + } + }, + "project": { + "type": "string", + "description": "The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable.", + "annotations": { + "required": [ + "sql.databases.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "The URI of this resource." + } + } + }, + "DatabaseFlags": { + "id": "DatabaseFlags", + "type": "object", + "description": "MySQL flags for Cloud SQL instances.", + "properties": { + "name": { + "type": "string", + "description": "The name of the flag. These flags are passed at instance startup, so include both MySQL server options and MySQL system variables. Flags should be specified with underscores, not hyphens. For more information, see Configuring MySQL Flags in the Google Cloud SQL documentation, as well as the official MySQL documentation for server options and system variables." + }, + "value": { + "type": "string", + "description": "The value of the flag. Booleans should be set to on for true and off for false. This field must be omitted if the flag doesn't take a value." + } + } + }, + "DatabaseInstance": { + "id": "DatabaseInstance", + "type": "object", + "description": "A Cloud SQL instance resource.", + "properties": { + "currentDiskSize": { + "type": "string", + "description": "The current disk usage of the instance in bytes.", + "format": "int64" + }, + "databaseVersion": { + "type": "string", + "description": "The database engine type and version. Can be MYSQL_5_5 or MYSQL_5_6. Defaults to MYSQL_5_5. The databaseVersion can not be changed after instance creation." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the resource." + }, + "failoverReplica": { + "type": "object", + "description": "The name and status of the failover replica. Only applies to Second Generation instances.", + "properties": { + "available": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "instanceType": { + "type": "string", + "description": "The instance type. This can be one of the following.\nCLOUD_SQL_INSTANCE: A Cloud SQL instance that is not replicating from a master.\nON_PREMISES_INSTANCE: An instance running on the customer's premises.\nREAD_REPLICA_INSTANCE: A Cloud SQL instance configured as a read-replica." + }, + "ipAddresses": { + "type": "array", + "description": "The assigned IP addresses for the instance.", + "items": { + "$ref": "IpMapping" + } + }, + "ipv6Address": { + "type": "string", + "description": "The IPv6 address assigned to the instance." + }, + "kind": { + "type": "string", + "description": "This is always sql#instance.", + "default": "sql#instance" + }, + "masterInstanceName": { + "type": "string", + "description": "The name of the instance which will act as master in the replication setup." + }, + "maxDiskSize": { + "type": "string", + "description": "The maximum disk size of the instance in bytes.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "Name of the Cloud SQL instance. This does not include the project ID.", + "annotations": { + "required": [ + "sql.instances.insert" + ] + } + }, + "onPremisesConfiguration": { + "$ref": "OnPremisesConfiguration", + "description": "Configuration specific to on-premises instances." + }, + "project": { + "type": "string", + "description": "The project ID of the project containing the Cloud SQL instance. The Google apps domain is prefixed if applicable." + }, + "region": { + "type": "string", + "description": "The geographical region. Can be us-central, asia-east1 or europe-west1. Defaults to us-central. The region can not be changed after instance creation." + }, + "replicaConfiguration": { + "$ref": "ReplicaConfiguration", + "description": "Configuration specific to read-replicas replicating from on-premises masters." + }, + "replicaNames": { + "type": "array", + "description": "The replicas of the instance.", + "items": { + "type": "string" + } + }, + "selfLink": { + "type": "string", + "description": "The URI of this resource." + }, + "serverCaCert": { + "$ref": "SslCert", + "description": "SSL configuration." + }, + "serviceAccountEmailAddress": { + "type": "string", + "description": "The service account email address assigned to the instance." + }, + "settings": { + "$ref": "Settings", + "description": "The user settings.", + "annotations": { + "required": [ + "sql.instances.insert", + "sql.instances.update" + ] + } + }, + "state": { + "type": "string", + "description": "The current serving state of the Cloud SQL instance. This can be one of the following.\nRUNNABLE: The instance is running, or is ready to run when accessed.\nSUSPENDED: The instance is not available, for example due to problems with billing.\nPENDING_CREATE: The instance is being created.\nMAINTENANCE: The instance is down for maintenance.\nFAILED: The instance creation failed.\nUNKNOWN_STATE: The state of the instance is unknown." + } + } + }, + "DatabasesListResponse": { + "id": "DatabasesListResponse", + "type": "object", + "description": "Database list response.", + "properties": { + "items": { + "type": "array", + "description": "List of database resources in the instance.", + "items": { + "$ref": "Database" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#databasesList.", + "default": "sql#databasesList" + } + } + }, + "ExportContext": { + "id": "ExportContext", + "type": "object", + "description": "Database instance export context.", + "properties": { + "csvExportOptions": { + "type": "object", + "description": "Options for exporting data as CSV.", + "properties": { + "selectQuery": { + "type": "string", + "description": "The select query used to extract the data." + } + } + }, + "databases": { + "type": "array", + "description": "Databases (for example, guestbook) from which the export is made. If fileType is SQL and no database is specified, all databases are exported. If fileType is CSV, you can optionally specify at most one database to export. If csvExportOptions.selectQuery also specifies the database, this field will be ignored.", + "items": { + "type": "string" + } + }, + "fileType": { + "type": "string", + "description": "The file type for the specified uri.\nSQL: The file contains SQL statements.\nCSV: The file contains CSV data." + }, + "kind": { + "type": "string", + "description": "This is always sql#exportContext.", + "default": "sql#exportContext" + }, + "sqlExportOptions": { + "type": "object", + "description": "Options for exporting data as SQL statements.", + "properties": { + "schemaOnly": { + "type": "boolean", + "description": "Export only schemas." + }, + "tables": { + "type": "array", + "description": "Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database.", + "items": { + "type": "string" + } + } + } + }, + "uri": { + "type": "string", + "description": "The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the operation fails. If fileType is SQL and the filename ends with .gz, the contents are compressed." + } + } + }, + "FailoverContext": { + "id": "FailoverContext", + "type": "object", + "description": "Database instance failover context.", + "properties": { + "kind": { + "type": "string", + "description": "This is always sql#failoverContext.", + "default": "sql#failoverContext" + }, + "settingsVersion": { + "type": "string", + "description": "The current settings version of this instance. Request will be rejected if this version doesn't match the current settings version.", + "format": "int64" + } + } + }, + "Flag": { + "id": "Flag", + "type": "object", + "description": "A Google Cloud SQL service flag resource.", + "properties": { + "allowedStringValues": { + "type": "array", + "description": "For STRING flags, a list of strings that the value can be set to.", + "items": { + "type": "string" + } + }, + "appliesTo": { + "type": "array", + "description": "The database version this flag applies to. Can be MYSQL_5_5, MYSQL_5_6, or both.", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#flag.", + "default": "sql#flag" + }, + "maxValue": { + "type": "string", + "description": "For INTEGER flags, the maximum allowed value.", + "format": "int64" + }, + "minValue": { + "type": "string", + "description": "For INTEGER flags, the minimum allowed value.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "This is the name of the flag. Flag names always use underscores, not hyphens, e.g. max_allowed_packet" + }, + "requiresRestart": { + "type": "boolean", + "description": "Indicates whether changing this flag will trigger a database restart. Only applicable to Second Generation instances." + }, + "type": { + "type": "string", + "description": "The type of the flag. Flags are typed to being BOOLEAN, STRING, INTEGER or NONE. NONE is used for flags which do not take a value, such as skip_grant_tables." + } + } + }, + "FlagsListResponse": { + "id": "FlagsListResponse", + "type": "object", + "description": "Flags list response.", + "properties": { + "items": { + "type": "array", + "description": "List of flags.", + "items": { + "$ref": "Flag" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#flagsList.", + "default": "sql#flagsList" + } + } + }, + "ImportContext": { + "id": "ImportContext", + "type": "object", + "description": "Database instance import context.", + "properties": { + "csvImportOptions": { + "type": "object", + "description": "Options for importing data as CSV.", + "properties": { + "columns": { + "type": "array", + "description": "The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.", + "items": { + "type": "string" + } + }, + "table": { + "type": "string", + "description": "The table to which CSV data is imported." + } + } + }, + "database": { + "type": "string", + "description": "The database (for example, guestbook) to which the import is made. If fileType is SQL and no database is specified, it is assumed that the database is specified in the file to be imported. If fileType is CSV, it must be specified." + }, + "fileType": { + "type": "string", + "description": "The file type for the specified uri.\nSQL: The file contains SQL statements.\nCSV: The file contains CSV data." + }, + "kind": { + "type": "string", + "description": "This is always sql#importContext.", + "default": "sql#importContext" + }, + "uri": { + "type": "string", + "description": "A path to the file in Google Cloud Storage from which the import is made. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are supported when fileType is SQL." + } + } + }, + "InstancesCloneRequest": { + "id": "InstancesCloneRequest", + "type": "object", + "description": "Database instance clone request.", + "properties": { + "cloneContext": { + "$ref": "CloneContext", + "description": "Contains details about the clone operation." + } + } + }, + "InstancesExportRequest": { + "id": "InstancesExportRequest", + "type": "object", + "description": "Database instance export request.", + "properties": { + "exportContext": { + "$ref": "ExportContext", + "description": "Contains details about the export operation." + } + } + }, + "InstancesFailoverRequest": { + "id": "InstancesFailoverRequest", + "type": "object", + "description": "Instance failover request.", + "properties": { + "failoverContext": { + "$ref": "FailoverContext", + "description": "Failover Context." + } + } + }, + "InstancesImportRequest": { + "id": "InstancesImportRequest", + "type": "object", + "description": "Database instance import request.", + "properties": { + "importContext": { + "$ref": "ImportContext", + "description": "Contains details about the import operation." + } + } + }, + "InstancesListResponse": { + "id": "InstancesListResponse", + "type": "object", + "description": "Database instances list response.", + "properties": { + "items": { + "type": "array", + "description": "List of database instance resources.", + "items": { + "$ref": "DatabaseInstance" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#instancesList.", + "default": "sql#instancesList" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + } + } + }, + "InstancesRestoreBackupRequest": { + "id": "InstancesRestoreBackupRequest", + "type": "object", + "description": "Database instance restore backup request.", + "properties": { + "restoreBackupContext": { + "$ref": "RestoreBackupContext", + "description": "Parameters required to perform the restore backup operation." + } + } + }, + "IpConfiguration": { + "id": "IpConfiguration", + "type": "object", + "description": "IP Management configuration.", + "properties": { + "authorizedNetworks": { + "type": "array", + "description": "The list of external networks that are allowed to connect to the instance using the IP. In CIDR notation, also known as 'slash' notation (e.g. 192.168.100.0/24).", + "items": { + "$ref": "AclEntry" + } + }, + "ipv4Enabled": { + "type": "boolean", + "description": "Whether the instance should be assigned an IP address or not." + }, + "requireSsl": { + "type": "boolean", + "description": "Whether the mysqld should default to 'REQUIRE X509' for users connecting over IP." + } + } + }, + "IpMapping": { + "id": "IpMapping", + "type": "object", + "description": "Database instance IP Mapping.", + "properties": { + "ipAddress": { + "type": "string", + "description": "The IP address assigned." + }, + "timeToRetire": { + "type": "string", + "description": "The due time for this IP to be retired in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. This field is only available when the IP is scheduled to be retired.", + "format": "date-time" + } + } + }, + "LocationPreference": { + "id": "LocationPreference", + "type": "object", + "description": "Preferred location. This specifies where a Cloud SQL instance should preferably be located, either in a specific Compute Engine zone, or co-located with an App Engine application. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified.", + "properties": { + "followGaeApplication": { + "type": "string", + "description": "The AppEngine application to follow, it must be in the same region as the Cloud SQL instance." + }, + "kind": { + "type": "string", + "description": "This is always sql#locationPreference.", + "default": "sql#locationPreference" + }, + "zone": { + "type": "string", + "description": "The preferred Compute Engine zone (e.g. us-centra1-a, us-central1-b, etc.)." + } + } + }, + "MaintenanceWindow": { + "id": "MaintenanceWindow", + "type": "object", + "description": "Maintenance window. This specifies when a v2 Cloud SQL instance should preferably be restarted for system maintenance puruposes.", + "properties": { + "day": { + "type": "integer", + "description": "day of week (1-7), starting on Monday.", + "format": "int32" + }, + "hour": { + "type": "integer", + "description": "hour of day - 0 to 23.", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "This is always sql#maintenanceWindow.", + "default": "sql#maintenanceWindow" + }, + "updateTrack": { + "type": "string" + } + } + }, + "MySqlReplicaConfiguration": { + "id": "MySqlReplicaConfiguration", + "type": "object", + "description": "Read-replica configuration specific to MySQL databases.", + "properties": { + "caCertificate": { + "type": "string", + "description": "PEM representation of the trusted CA's x509 certificate." + }, + "clientCertificate": { + "type": "string", + "description": "PEM representation of the slave's x509 certificate." + }, + "clientKey": { + "type": "string", + "description": "PEM representation of the slave's private key. The corresponsing public key is encoded in the client's certificate." + }, + "connectRetryInterval": { + "type": "integer", + "description": "Seconds to wait between connect retries. MySQL's default is 60 seconds.", + "format": "int32" + }, + "dumpFilePath": { + "type": "string", + "description": "Path to a SQL dump file in Google Cloud Storage from which the slave instance is to be created. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are also supported. Dumps should have the binlog co-ordinates from which replication should begin. This can be accomplished by setting --master-data to 1 when using mysqldump." + }, + "kind": { + "type": "string", + "description": "This is always sql#mysqlReplicaConfiguration.", + "default": "sql#mysqlReplicaConfiguration" + }, + "masterHeartbeatPeriod": { + "type": "string", + "description": "Interval in milliseconds between replication heartbeats.", + "format": "int64" + }, + "password": { + "type": "string", + "description": "The password for the replication connection." + }, + "sslCipher": { + "type": "string", + "description": "A list of permissible ciphers to use for SSL encryption." + }, + "username": { + "type": "string", + "description": "The username for the replication connection." + }, + "verifyServerCertificate": { + "type": "boolean", + "description": "Whether or not to check the master's Common Name value in the certificate that it sends during the SSL handshake." + } + } + }, + "OnPremisesConfiguration": { + "id": "OnPremisesConfiguration", + "type": "object", + "description": "On-premises instance configuration.", + "properties": { + "hostPort": { + "type": "string", + "description": "The host and port of the on-premises instance in host:port format" + }, + "kind": { + "type": "string", + "description": "This is always sql#onPremisesConfiguration.", + "default": "sql#onPremisesConfiguration" + } + } + }, + "Operation": { + "id": "Operation", + "type": "object", + "description": "An Operations resource contains information about database instance operations such as create, delete, and restart. Operations resources are created in response to operations that were initiated; you never create them directly.", + "properties": { + "endTime": { + "type": "string", + "description": "The time this operation finished in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "error": { + "$ref": "OperationErrors", + "description": "If errors occurred during processing of this operation, this field will be populated." + }, + "exportContext": { + "$ref": "ExportContext", + "description": "The context for export operation, if applicable." + }, + "importContext": { + "$ref": "ImportContext", + "description": "The context for import operation, if applicable." + }, + "insertTime": { + "type": "string", + "description": "The time this operation was enqueued in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "kind": { + "type": "string", + "description": "This is always sql#operation.", + "default": "sql#operation" + }, + "name": { + "type": "string", + "description": "An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation." + }, + "operationType": { + "type": "string", + "description": "The type of the operation. Valid values are CREATE, DELETE, UPDATE, RESTART, IMPORT, EXPORT, BACKUP_VOLUME, RESTORE_VOLUME, CREATE_USER, DELETE_USER, CREATE_DATABASE, DELETE_DATABASE ." + }, + "selfLink": { + "type": "string", + "description": "The URI of this resource." + }, + "startTime": { + "type": "string", + "description": "The time this operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "status": { + "type": "string", + "description": "The status of an operation. Valid values are PENDING, RUNNING, DONE, UNKNOWN." + }, + "targetId": { + "type": "string", + "description": "Name of the database instance related to this operation." + }, + "targetLink": { + "type": "string", + "description": "The URI of the instance related to the operation." + }, + "targetProject": { + "type": "string", + "description": "The project ID of the target instance related to this operation." + }, + "user": { + "type": "string", + "description": "The email address of the user who initiated this operation." + } + } + }, + "OperationError": { + "id": "OperationError", + "type": "object", + "description": "Database instance operation error.", + "properties": { + "code": { + "type": "string", + "description": "Identifies the specific error that occurred." + }, + "kind": { + "type": "string", + "description": "This is always sql#operationError.", + "default": "sql#operationError" + }, + "message": { + "type": "string", + "description": "Additional information about the error encountered." + } + } + }, + "OperationErrors": { + "id": "OperationErrors", + "type": "object", + "description": "Database instance operation errors list wrapper.", + "properties": { + "errors": { + "type": "array", + "description": "The list of errors encountered while processing this operation.", + "items": { + "$ref": "OperationError" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#operationErrors.", + "default": "sql#operationErrors" + } + } + }, + "OperationsListResponse": { + "id": "OperationsListResponse", + "type": "object", + "description": "Database instance list operations response.", + "properties": { + "items": { + "type": "array", + "description": "List of operation resources.", + "items": { + "$ref": "Operation" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#operationsList.", + "default": "sql#operationsList" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + } + } + }, + "ReplicaConfiguration": { + "id": "ReplicaConfiguration", + "type": "object", + "description": "Read-replica configuration for connecting to the master.", + "properties": { + "failoverTarget": { + "type": "boolean", + "description": "Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. In case the master instance fails, the replica instance will be promoted as the new master instance.\nOnly one replica can be specified as failover target, and the replica has to be in different zone with the master instance." + }, + "kind": { + "type": "string", + "description": "This is always sql#replicaConfiguration.", + "default": "sql#replicaConfiguration" + }, + "mysqlReplicaConfiguration": { + "$ref": "MySqlReplicaConfiguration", + "description": "MySQL specific configuration when replicating from a MySQL on-premises master. Replication configuration information such as the username, password, certificates, and keys are not stored in the instance metadata. The configuration information is used only to set up the replication connection and is stored by MySQL in a file named master.info in the data directory." + } + } + }, + "RestoreBackupContext": { + "id": "RestoreBackupContext", + "type": "object", + "description": "Database instance restore from backup context.", + "properties": { + "backupRunId": { + "type": "string", + "description": "The ID of the backup run to restore from.", + "format": "int64", + "annotations": { + "required": [ + "sql.instances.restoreBackup" + ] + } + }, + "instanceId": { + "type": "string", + "description": "The ID of the instance that the backup was taken from." + }, + "kind": { + "type": "string", + "description": "This is always sql#restoreBackupContext.", + "default": "sql#restoreBackupContext" + } + } + }, + "Settings": { + "id": "Settings", + "type": "object", + "description": "Database instance settings.", + "properties": { + "activationPolicy": { + "type": "string", + "description": "The activation policy for this instance. This specifies when the instance should be activated and is applicable only when the instance state is RUNNABLE. This can be one of the following.\nALWAYS: The instance should always be active.\nNEVER: The instance should never be activated.\nON_DEMAND: The instance is activated upon receiving requests." + }, + "authorizedGaeApplications": { + "type": "array", + "description": "The App Engine app IDs that can access this instance.", + "items": { + "type": "string" + } + }, + "backupConfiguration": { + "$ref": "BackupConfiguration", + "description": "The daily backup configuration for the instance." + }, + "crashSafeReplicationEnabled": { + "type": "boolean", + "description": "Configuration specific to read replica instances. Indicates whether database flags for crash-safe replication are enabled." + }, + "dataDiskSizeGb": { + "type": "string", + "description": "The size of data disk, in GB. Only supported for 2nd Generation instances. The data disk size minimum is 10GB.", + "format": "int64" + }, + "dataDiskType": { + "type": "string", + "description": "The type of data disk. Only supported for 2nd Generation instances. The default type is SSD." + }, + "databaseFlags": { + "type": "array", + "description": "The database flags passed to the instance at startup.", + "items": { + "$ref": "DatabaseFlags" + } + }, + "databaseReplicationEnabled": { + "type": "boolean", + "description": "Configuration specific to read replica instances. Indicates whether replication is enabled or not." + }, + "ipConfiguration": { + "$ref": "IpConfiguration", + "description": "The settings for IP Management. This allows to enable or disable the instance IP and manage which external networks can connect to the instance." + }, + "kind": { + "type": "string", + "description": "This is always sql#settings.", + "default": "sql#settings" + }, + "locationPreference": { + "$ref": "LocationPreference", + "description": "The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or GCE zone for better performance." + }, + "maintenanceWindow": { + "$ref": "MaintenanceWindow", + "description": "The maintenance window for this instance. This specifies when the instance may be restarted for maintenance purposes." + }, + "pricingPlan": { + "type": "string", + "description": "The pricing plan for this instance. This can be either PER_USE or PACKAGE." + }, + "replicationType": { + "type": "string", + "description": "The type of replication this instance uses. This can be either ASYNCHRONOUS or SYNCHRONOUS." + }, + "settingsVersion": { + "type": "string", + "description": "The version of instance settings. This is a required field for update method to make sure concurrent updates are handled properly. During update, use the most recent settingsVersion value for this instance and do not try to update this value.", + "format": "int64", + "annotations": { + "required": [ + "sql.instances.update" + ] + } + }, + "tier": { + "type": "string", + "description": "The tier of service for this instance, for example D1, D2. For more information, see pricing.", + "annotations": { + "required": [ + "sql.instances.insert", + "sql.instances.update" + ] + } + } + } + }, + "SslCert": { + "id": "SslCert", + "type": "object", + "description": "SslCerts Resource", + "properties": { + "cert": { + "type": "string", + "description": "PEM representation." + }, + "certSerialNumber": { + "type": "string", + "description": "Serial number, as extracted from the certificate." + }, + "commonName": { + "type": "string", + "description": "User supplied name. Constrained to [a-zA-Z.-_ ]+." + }, + "createTime": { + "type": "string", + "description": "The time when the certificate was created in RFC 3339 format, for example 2012-11-15T16:19:00.094Z", + "format": "date-time" + }, + "expirationTime": { + "type": "string", + "description": "The time when the certificate expires in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.", + "format": "date-time" + }, + "instance": { + "type": "string", + "description": "Name of the database instance." + }, + "kind": { + "type": "string", + "description": "This is always sql#sslCert.", + "default": "sql#sslCert" + }, + "selfLink": { + "type": "string", + "description": "The URI of this resource." + }, + "sha1Fingerprint": { + "type": "string", + "description": "Sha1 Fingerprint." + } + } + }, + "SslCertDetail": { + "id": "SslCertDetail", + "type": "object", + "description": "SslCertDetail.", + "properties": { + "certInfo": { + "$ref": "SslCert", + "description": "The public information about the cert." + }, + "certPrivateKey": { + "type": "string", + "description": "The private key for the client cert, in pem format. Keep private in order to protect your security." + } + } + }, + "SslCertsCreateEphemeralRequest": { + "id": "SslCertsCreateEphemeralRequest", + "type": "object", + "description": "SslCerts create ephemeral certificate request.", + "properties": { + "public_key": { + "type": "string", + "description": "PEM encoded public key to include in the signed certificate." + } + } + }, + "SslCertsInsertRequest": { + "id": "SslCertsInsertRequest", + "type": "object", + "description": "SslCerts insert request.", + "properties": { + "commonName": { + "type": "string", + "description": "User supplied name. Must be a distinct name from the other certificates for this instance. New certificates will not be usable until the instance is restarted." + } + } + }, + "SslCertsInsertResponse": { + "id": "SslCertsInsertResponse", + "type": "object", + "description": "SslCert insert response.", + "properties": { + "clientCert": { + "$ref": "SslCertDetail", + "description": "The new client certificate and private key. The new certificate will not work until the instance is restarted." + }, + "kind": { + "type": "string", + "description": "This is always sql#sslCertsInsert.", + "default": "sql#sslCertsInsert" + }, + "serverCaCert": { + "$ref": "SslCert", + "description": "The server Certificate Authority's certificate. If this is missing you can force a new one to be generated by calling resetSslConfig method on instances resource." + } + } + }, + "SslCertsListResponse": { + "id": "SslCertsListResponse", + "type": "object", + "description": "SslCerts list response.", + "properties": { + "items": { + "type": "array", + "description": "List of client certificates for the instance.", + "items": { + "$ref": "SslCert" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#sslCertsList.", + "default": "sql#sslCertsList" + } + } + }, + "Tier": { + "id": "Tier", + "type": "object", + "description": "A Google Cloud SQL service tier resource.", + "properties": { + "DiskQuota": { + "type": "string", + "description": "The maximum disk size of this tier in bytes.", + "format": "int64" + }, + "RAM": { + "type": "string", + "description": "The maximum RAM usage of this tier in bytes.", + "format": "int64" + }, + "kind": { + "type": "string", + "description": "This is always sql#tier.", + "default": "sql#tier" + }, + "region": { + "type": "array", + "description": "The applicable regions for this tier. Can be us-east1, europe-west1 or asia-east1.", + "items": { + "type": "string" + } + }, + "tier": { + "type": "string", + "description": "An identifier for the service tier, for example D1, D2 etc. For related information, see Pricing." + } + } + }, + "TiersListResponse": { + "id": "TiersListResponse", + "type": "object", + "description": "Tiers list response.", + "properties": { + "items": { + "type": "array", + "description": "List of tiers.", + "items": { + "$ref": "Tier" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#tiersList.", + "default": "sql#tiersList" + } + } + }, + "User": { + "id": "User", + "type": "object", + "description": "A Cloud SQL user resource.", + "properties": { + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the resource." + }, + "host": { + "type": "string", + "description": "The host name from which the user can connect. For insert operations, host defaults to an empty string. For update operations, host is specified as part of the request URL. The host name is not mutable with this API." + }, + "instance": { + "type": "string", + "description": "The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for update since it is already specified on the URL." + }, + "kind": { + "type": "string", + "description": "This is always sql#user.", + "default": "sql#user" + }, + "name": { + "type": "string", + "description": "The name of the user in the Cloud SQL instance. Can be omitted for update since it is already specified on the URL." + }, + "password": { + "type": "string", + "description": "The password for the user." + }, + "project": { + "type": "string", + "description": "The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for update since it is already specified on the URL." + } + } + }, + "UsersListResponse": { + "id": "UsersListResponse", + "type": "object", + "description": "User list response.", + "properties": { + "items": { + "type": "array", + "description": "List of user resources in the instance.", + "items": { + "$ref": "User" + } + }, + "kind": { + "type": "string", + "description": "This is always sql#usersList.", + "default": "sql#usersList" + }, + "nextPageToken": { + "type": "string", + "description": "An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation." + } + } + } + }, + "resources": { + "backupRuns": { + "methods": { + "delete": { + "id": "sql.backupRuns.delete", + "path": "projects/{project}/instances/{instance}/backupRuns/{id}", + "httpMethod": "DELETE", + "description": "Deletes the backup taken by a backup run.", + "parameters": { + "id": { + "type": "string", + "description": "The ID of the Backup Run to delete. To find a Backup Run ID, use the list method.", + "required": true, + "format": "int64", + "location": "path" + }, + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "id" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "get": { + "id": "sql.backupRuns.get", + "path": "projects/{project}/instances/{instance}/backupRuns/{id}", + "httpMethod": "GET", + "description": "Retrieves a resource containing information about a backup run.", + "parameters": { + "id": { + "type": "string", + "description": "The ID of this Backup Run.", + "required": true, + "format": "int64", + "location": "path" + }, + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "id" + ], + "response": { + "$ref": "BackupRun" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "list": { + "id": "sql.backupRuns.list", + "path": "projects/{project}/instances/{instance}/backupRuns", + "httpMethod": "GET", + "description": "Lists all backup runs associated with a given instance and configuration in the reverse chronological order of the enqueued time.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of backup runs per response.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "BackupRunsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + }, + "databases": { + "methods": { + "delete": { + "id": "sql.databases.delete", + "path": "projects/{project}/instances/{instance}/databases/{database}", + "httpMethod": "DELETE", + "description": "Deletes a resource containing information about a database inside a Cloud SQL instance.", + "parameters": { + "database": { + "type": "string", + "description": "Name of the database to be deleted in the instance.", + "required": true, + "location": "path" + }, + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "database" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "get": { + "id": "sql.databases.get", + "path": "projects/{project}/instances/{instance}/databases/{database}", + "httpMethod": "GET", + "description": "Retrieves a resource containing information about a database inside a Cloud SQL instance.", + "parameters": { + "database": { + "type": "string", + "description": "Name of the database in the instance.", + "required": true, + "location": "path" + }, + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "database" + ], + "response": { + "$ref": "Database" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "insert": { + "id": "sql.databases.insert", + "path": "projects/{project}/instances/{instance}/databases", + "httpMethod": "POST", + "description": "Inserts a resource containing information about a database inside a Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "Database" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "list": { + "id": "sql.databases.list", + "path": "projects/{project}/instances/{instance}/databases", + "httpMethod": "GET", + "description": "Lists databases in the specified Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project for which to list Cloud SQL instances.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "DatabasesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "patch": { + "id": "sql.databases.patch", + "path": "projects/{project}/instances/{instance}/databases/{database}", + "httpMethod": "PATCH", + "description": "Updates a resource containing information about a database inside a Cloud SQL instance. This method supports patch semantics.", + "parameters": { + "database": { + "type": "string", + "description": "Name of the database to be updated in the instance.", + "required": true, + "location": "path" + }, + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "database" + ], + "request": { + "$ref": "Database" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "update": { + "id": "sql.databases.update", + "path": "projects/{project}/instances/{instance}/databases/{database}", + "httpMethod": "PUT", + "description": "Updates a resource containing information about a database inside a Cloud SQL instance.", + "parameters": { + "database": { + "type": "string", + "description": "Name of the database to be updated in the instance.", + "required": true, + "location": "path" + }, + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "database" + ], + "request": { + "$ref": "Database" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + }, + "flags": { + "methods": { + "list": { + "id": "sql.flags.list", + "path": "flags", + "httpMethod": "GET", + "description": "List all available database flags for Google Cloud SQL instances.", + "response": { + "$ref": "FlagsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + }, + "instances": { + "methods": { + "clone": { + "id": "sql.instances.clone", + "path": "projects/{project}/instances/{instance}/clone", + "httpMethod": "POST", + "description": "Creates a Cloud SQL instance as a clone of the source instance.", + "parameters": { + "instance": { + "type": "string", + "description": "The ID of the Cloud SQL instance to be cloned (source). This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the source as well as the clone Cloud SQL instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "InstancesCloneRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "delete": { + "id": "sql.instances.delete", + "path": "projects/{project}/instances/{instance}", + "httpMethod": "DELETE", + "description": "Deletes a Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance to be deleted.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "export": { + "id": "sql.instances.export", + "path": "projects/{project}/instances/{instance}/export", + "httpMethod": "POST", + "description": "Exports data from a Cloud SQL instance to a Google Cloud Storage bucket as a MySQL dump file.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance to be exported.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "InstancesExportRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "failover": { + "id": "sql.instances.failover", + "path": "projects/{project}/instances/{instance}/failover", + "httpMethod": "POST", + "description": "Failover the instance to its failover replica instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "ID of the project that contains the read replica.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "InstancesFailoverRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "get": { + "id": "sql.instances.get", + "path": "projects/{project}/instances/{instance}", + "httpMethod": "GET", + "description": "Retrieves a resource containing information about a Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "DatabaseInstance" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "import": { + "id": "sql.instances.import", + "path": "projects/{project}/instances/{instance}/import", + "httpMethod": "POST", + "description": "Imports data into a Cloud SQL instance from a MySQL dump file in Google Cloud Storage.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "InstancesImportRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "insert": { + "id": "sql.instances.insert", + "path": "projects/{project}/instances", + "httpMethod": "POST", + "description": "Creates a new Cloud SQL instance.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID of the project to which the newly created Cloud SQL instances should belong.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "DatabaseInstance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "list": { + "id": "sql.instances.list", + "path": "projects/{project}/instances", + "httpMethod": "GET", + "description": "Lists instances under a given project in the alphabetical order of the instance name.", + "parameters": { + "maxResults": { + "type": "integer", + "description": "The maximum number of results to return per response.", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID of the project for which to list Cloud SQL instances.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstancesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "patch": { + "id": "sql.instances.patch", + "path": "projects/{project}/instances/{instance}", + "httpMethod": "PATCH", + "description": "Updates settings of a Cloud SQL instance. Caution: This is not a partial update, so you must include values for all the settings that you want to retain. For partial updates, use patch.. This method supports patch semantics.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "DatabaseInstance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "promoteReplica": { + "id": "sql.instances.promoteReplica", + "path": "projects/{project}/instances/{instance}/promoteReplica", + "httpMethod": "POST", + "description": "Promotes the read replica instance to be a stand-alone Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL read replica instance name.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "ID of the project that contains the read replica.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "resetSslConfig": { + "id": "sql.instances.resetSslConfig", + "path": "projects/{project}/instances/{instance}/resetSslConfig", + "httpMethod": "POST", + "description": "Deletes all client certificates and generates a new server SSL certificate for the instance. The changes will not take effect until the instance is restarted. Existing instances without a server certificate will need to call this once to set a server certificate.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "restart": { + "id": "sql.instances.restart", + "path": "projects/{project}/instances/{instance}/restart", + "httpMethod": "POST", + "description": "Restarts a Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance to be restarted.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "restoreBackup": { + "id": "sql.instances.restoreBackup", + "path": "projects/{project}/instances/{instance}/restoreBackup", + "httpMethod": "POST", + "description": "Restores a backup of a Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "InstancesRestoreBackupRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "startReplica": { + "id": "sql.instances.startReplica", + "path": "projects/{project}/instances/{instance}/startReplica", + "httpMethod": "POST", + "description": "Starts the replication in the read replica instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL read replica instance name.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "ID of the project that contains the read replica.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "stopReplica": { + "id": "sql.instances.stopReplica", + "path": "projects/{project}/instances/{instance}/stopReplica", + "httpMethod": "POST", + "description": "Stops the replication in the read replica instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL read replica instance name.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "ID of the project that contains the read replica.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "update": { + "id": "sql.instances.update", + "path": "projects/{project}/instances/{instance}", + "httpMethod": "PUT", + "description": "Updates settings of a Cloud SQL instance. Caution: This is not a partial update, so you must include values for all the settings that you want to retain. For partial updates, use patch.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "etagRequired": true, + "request": { + "$ref": "DatabaseInstance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + }, + "operations": { + "methods": { + "get": { + "id": "sql.operations.get", + "path": "projects/{project}/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves an instance operation that has been performed on an instance.", + "parameters": { + "operation": { + "type": "string", + "description": "Instance operation ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "list": { + "id": "sql.operations.list", + "path": "projects/{project}/operations", + "httpMethod": "GET", + "description": "Lists all instance operations that have been performed on the given Cloud SQL instance in the reverse chronological order of the start time.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of operations per response.", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "OperationsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + }, + "sslCerts": { + "methods": { + "createEphemeral": { + "id": "sql.sslCerts.createEphemeral", + "path": "projects/{project}/instances/{instance}/createEphemeral", + "httpMethod": "POST", + "description": "Generates a short-lived X509 certificate containing the provided public key and signed by a private key specific to the target instance. Users may use the certificate to authenticate as themselves when connecting to the database.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the Cloud SQL project.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "SslCertsCreateEphemeralRequest" + }, + "response": { + "$ref": "SslCert" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "delete": { + "id": "sql.sslCerts.delete", + "path": "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", + "httpMethod": "DELETE", + "description": "Deletes the SSL certificate. The change will not take effect until the instance is restarted.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance to be deleted.", + "required": true, + "location": "path" + }, + "sha1Fingerprint": { + "type": "string", + "description": "Sha1 FingerPrint.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "sha1Fingerprint" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "get": { + "id": "sql.sslCerts.get", + "path": "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", + "httpMethod": "GET", + "description": "Retrieves a particular SSL certificate. Does not include the private key (required for usage). The private key must be saved from the response to initial creation.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + }, + "sha1Fingerprint": { + "type": "string", + "description": "Sha1 FingerPrint.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "sha1Fingerprint" + ], + "response": { + "$ref": "SslCert" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "insert": { + "id": "sql.sslCerts.insert", + "path": "projects/{project}/instances/{instance}/sslCerts", + "httpMethod": "POST", + "description": "Creates an SSL certificate and returns it along with the private key and server certificate authority. The new certificate will not be usable until the instance is restarted.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project to which the newly created Cloud SQL instances should belong.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "SslCertsInsertRequest" + }, + "response": { + "$ref": "SslCertsInsertResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "list": { + "id": "sql.sslCerts.list", + "path": "projects/{project}/instances/{instance}/sslCerts", + "httpMethod": "GET", + "description": "Lists all of the current SSL certificates for the instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Cloud SQL instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project for which to list Cloud SQL instances.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "SslCertsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + }, + "tiers": { + "methods": { + "list": { + "id": "sql.tiers.list", + "path": "projects/{project}/tiers", + "httpMethod": "GET", + "description": "Lists all available service tiers for Google Cloud SQL, for example D1, D2. For related information, see Pricing.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID of the project for which to list tiers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TiersListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + }, + "users": { + "methods": { + "delete": { + "id": "sql.users.delete", + "path": "projects/{project}/instances/{instance}/users", + "httpMethod": "DELETE", + "description": "Deletes a user from a Cloud SQL instance.", + "parameters": { + "host": { + "type": "string", + "description": "Host of the user in the instance.", + "required": true, + "location": "query" + }, + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "name": { + "type": "string", + "description": "Name of the user in the instance.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "host", + "name" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "insert": { + "id": "sql.users.insert", + "path": "projects/{project}/instances/{instance}/users", + "httpMethod": "POST", + "description": "Creates a new user in a Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "request": { + "$ref": "User" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "list": { + "id": "sql.users.list", + "path": "projects/{project}/instances/{instance}/users", + "httpMethod": "GET", + "description": "Lists users in the specified Cloud SQL instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance" + ], + "response": { + "$ref": "UsersListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "update": { + "id": "sql.users.update", + "path": "projects/{project}/instances/{instance}/users", + "httpMethod": "PUT", + "description": "Updates an existing user in a Cloud SQL instance.", + "parameters": { + "host": { + "type": "string", + "description": "Host of the user in the instance.", + "required": true, + "location": "query" + }, + "instance": { + "type": "string", + "description": "Database instance ID. This does not include the project ID.", + "required": true, + "location": "path" + }, + "name": { + "type": "string", + "description": "Name of the user in the instance.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID of the project that contains the instance.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instance", + "host", + "name" + ], + "request": { + "$ref": "User" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + } + } + } + } +} diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go new file mode 100644 index 000000000..9f18b942b --- /dev/null +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go @@ -0,0 +1,6956 @@ +// Package sqladmin provides access to the Cloud SQL Administration API. +// +// See https://cloud.google.com/sql/docs/reference/latest +// +// Usage example: +// +// import "google.golang.org/api/sqladmin/v1beta4" +// ... +// sqladminService, err := sqladmin.New(oauthHttpClient) +package sqladmin + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "sqladmin:v1beta4" +const apiName = "sqladmin" +const apiVersion = "v1beta4" +const basePath = "https://www.googleapis.com/sql/v1beta4/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // Manage your Google SQL Service instances + SqlserviceAdminScope = "https://www.googleapis.com/auth/sqlservice.admin" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.BackupRuns = NewBackupRunsService(s) + s.Databases = NewDatabasesService(s) + s.Flags = NewFlagsService(s) + s.Instances = NewInstancesService(s) + s.Operations = NewOperationsService(s) + s.SslCerts = NewSslCertsService(s) + s.Tiers = NewTiersService(s) + s.Users = NewUsersService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + BackupRuns *BackupRunsService + + Databases *DatabasesService + + Flags *FlagsService + + Instances *InstancesService + + Operations *OperationsService + + SslCerts *SslCertsService + + Tiers *TiersService + + Users *UsersService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewBackupRunsService(s *Service) *BackupRunsService { + rs := &BackupRunsService{s: s} + return rs +} + +type BackupRunsService struct { + s *Service +} + +func NewDatabasesService(s *Service) *DatabasesService { + rs := &DatabasesService{s: s} + return rs +} + +type DatabasesService struct { + s *Service +} + +func NewFlagsService(s *Service) *FlagsService { + rs := &FlagsService{s: s} + return rs +} + +type FlagsService struct { + s *Service +} + +func NewInstancesService(s *Service) *InstancesService { + rs := &InstancesService{s: s} + return rs +} + +type InstancesService struct { + s *Service +} + +func NewOperationsService(s *Service) *OperationsService { + rs := &OperationsService{s: s} + return rs +} + +type OperationsService struct { + s *Service +} + +func NewSslCertsService(s *Service) *SslCertsService { + rs := &SslCertsService{s: s} + return rs +} + +type SslCertsService struct { + s *Service +} + +func NewTiersService(s *Service) *TiersService { + rs := &TiersService{s: s} + return rs +} + +type TiersService struct { + s *Service +} + +func NewUsersService(s *Service) *UsersService { + rs := &UsersService{s: s} + return rs +} + +type UsersService struct { + s *Service +} + +// AclEntry: An entry for an Access Control list. +type AclEntry struct { + // ExpirationTime: The time when this access control entry expires in + // RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + ExpirationTime string `json:"expirationTime,omitempty"` + + // Kind: This is always sql#aclEntry. + Kind string `json:"kind,omitempty"` + + // Name: An optional label to identify this entry. + Name string `json:"name,omitempty"` + + // Value: The whitelisted value for the access control list. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpirationTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AclEntry) MarshalJSON() ([]byte, error) { + type noMethod AclEntry + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BackupConfiguration: Database instance backup configuration. +type BackupConfiguration struct { + // BinaryLogEnabled: Whether binary log is enabled. If backup + // configuration is disabled, binary log must be disabled as well. + BinaryLogEnabled bool `json:"binaryLogEnabled,omitempty"` + + // Enabled: Whether this configuration is enabled. + Enabled bool `json:"enabled,omitempty"` + + // Kind: This is always sql#backupConfiguration. + Kind string `json:"kind,omitempty"` + + // StartTime: Start time for the daily backup configuration in UTC + // timezone in the 24 hour format - HH:MM. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BinaryLogEnabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackupConfiguration) MarshalJSON() ([]byte, error) { + type noMethod BackupConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BackupRun: A database instance backup run resource. +type BackupRun struct { + // EndTime: The time the backup operation completed in UTC timezone in + // RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + EndTime string `json:"endTime,omitempty"` + + // EnqueuedTime: The time the run was enqueued in UTC timezone in RFC + // 3339 format, for example 2012-11-15T16:19:00.094Z. + EnqueuedTime string `json:"enqueuedTime,omitempty"` + + // Error: Information about why the backup operation failed. This is + // only present if the run has the FAILED status. + Error *OperationError `json:"error,omitempty"` + + // Id: A unique identifier for this backup run. Note that this is unique + // only within the scope of a particular Cloud SQL instance. + Id int64 `json:"id,omitempty,string"` + + // Instance: Name of the database instance. + Instance string `json:"instance,omitempty"` + + // Kind: This is always sql#backupRun. + Kind string `json:"kind,omitempty"` + + // SelfLink: The URI of this resource. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: The time the backup operation actually started in UTC + // timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + StartTime string `json:"startTime,omitempty"` + + // Status: The status of this run. + Status string `json:"status,omitempty"` + + // WindowStartTime: The start time of the backup window during which + // this the backup was attempted in RFC 3339 format, for example + // 2012-11-15T16:19:00.094Z. + WindowStartTime string `json:"windowStartTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackupRun) MarshalJSON() ([]byte, error) { + type noMethod BackupRun + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BackupRunsListResponse: Backup run list results. +type BackupRunsListResponse struct { + // Items: A list of backup runs in reverse chronological order of the + // enqueued time. + Items []*BackupRun `json:"items,omitempty"` + + // Kind: This is always sql#backupRunsList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackupRunsListResponse) MarshalJSON() ([]byte, error) { + type noMethod BackupRunsListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BinLogCoordinates: Binary log coordinates. +type BinLogCoordinates struct { + // BinLogFileName: Name of the binary log file for a Cloud SQL instance. + BinLogFileName string `json:"binLogFileName,omitempty"` + + // BinLogPosition: Position (offset) within the binary log file. + BinLogPosition int64 `json:"binLogPosition,omitempty,string"` + + // Kind: This is always sql#binLogCoordinates. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BinLogFileName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BinLogCoordinates) MarshalJSON() ([]byte, error) { + type noMethod BinLogCoordinates + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CloneContext: Database instance clone context. +type CloneContext struct { + // BinLogCoordinates: Binary log coordinates, if specified, indentify + // the the position up to which the source instance should be cloned. If + // not specified, the source instance is cloned up to the most recent + // binary log coordintes. + BinLogCoordinates *BinLogCoordinates `json:"binLogCoordinates,omitempty"` + + // DestinationInstanceName: Name of the Cloud SQL instance to be created + // as a clone. + DestinationInstanceName string `json:"destinationInstanceName,omitempty"` + + // Kind: This is always sql#cloneContext. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BinLogCoordinates") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CloneContext) MarshalJSON() ([]byte, error) { + type noMethod CloneContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Database: A database resource inside a Cloud SQL instance. +type Database struct { + // Charset: The MySQL charset value. + Charset string `json:"charset,omitempty"` + + // Collation: The MySQL collation value. + Collation string `json:"collation,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the resource. + Etag string `json:"etag,omitempty"` + + // Instance: The name of the Cloud SQL instance. This does not include + // the project ID. + Instance string `json:"instance,omitempty"` + + // Kind: This is always sql#database. + Kind string `json:"kind,omitempty"` + + // Name: The name of the database in the Cloud SQL instance. This does + // not include the project ID or instance name. + Name string `json:"name,omitempty"` + + // Project: The project ID of the project containing the Cloud SQL + // database. The Google apps domain is prefixed if applicable. + Project string `json:"project,omitempty"` + + // SelfLink: The URI of this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Charset") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Database) MarshalJSON() ([]byte, error) { + type noMethod Database + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DatabaseFlags: MySQL flags for Cloud SQL instances. +type DatabaseFlags struct { + // Name: The name of the flag. These flags are passed at instance + // startup, so include both MySQL server options and MySQL system + // variables. Flags should be specified with underscores, not hyphens. + // For more information, see Configuring MySQL Flags in the Google Cloud + // SQL documentation, as well as the official MySQL documentation for + // server options and system variables. + Name string `json:"name,omitempty"` + + // Value: The value of the flag. Booleans should be set to on for true + // and off for false. This field must be omitted if the flag doesn't + // take a value. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DatabaseFlags) MarshalJSON() ([]byte, error) { + type noMethod DatabaseFlags + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DatabaseInstance: A Cloud SQL instance resource. +type DatabaseInstance struct { + // CurrentDiskSize: The current disk usage of the instance in bytes. + CurrentDiskSize int64 `json:"currentDiskSize,omitempty,string"` + + // DatabaseVersion: The database engine type and version. Can be + // MYSQL_5_5 or MYSQL_5_6. Defaults to MYSQL_5_5. The databaseVersion + // can not be changed after instance creation. + DatabaseVersion string `json:"databaseVersion,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the resource. + Etag string `json:"etag,omitempty"` + + // FailoverReplica: The name and status of the failover replica. Only + // applies to Second Generation instances. + FailoverReplica *DatabaseInstanceFailoverReplica `json:"failoverReplica,omitempty"` + + // InstanceType: The instance type. This can be one of the + // following. + // CLOUD_SQL_INSTANCE: A Cloud SQL instance that is not replicating from + // a master. + // ON_PREMISES_INSTANCE: An instance running on the customer's + // premises. + // READ_REPLICA_INSTANCE: A Cloud SQL instance configured as a + // read-replica. + InstanceType string `json:"instanceType,omitempty"` + + // IpAddresses: The assigned IP addresses for the instance. + IpAddresses []*IpMapping `json:"ipAddresses,omitempty"` + + // Ipv6Address: The IPv6 address assigned to the instance. + Ipv6Address string `json:"ipv6Address,omitempty"` + + // Kind: This is always sql#instance. + Kind string `json:"kind,omitempty"` + + // MasterInstanceName: The name of the instance which will act as master + // in the replication setup. + MasterInstanceName string `json:"masterInstanceName,omitempty"` + + // MaxDiskSize: The maximum disk size of the instance in bytes. + MaxDiskSize int64 `json:"maxDiskSize,omitempty,string"` + + // Name: Name of the Cloud SQL instance. This does not include the + // project ID. + Name string `json:"name,omitempty"` + + // OnPremisesConfiguration: Configuration specific to on-premises + // instances. + OnPremisesConfiguration *OnPremisesConfiguration `json:"onPremisesConfiguration,omitempty"` + + // Project: The project ID of the project containing the Cloud SQL + // instance. The Google apps domain is prefixed if applicable. + Project string `json:"project,omitempty"` + + // Region: The geographical region. Can be us-central, asia-east1 or + // europe-west1. Defaults to us-central. The region can not be changed + // after instance creation. + Region string `json:"region,omitempty"` + + // ReplicaConfiguration: Configuration specific to read-replicas + // replicating from on-premises masters. + ReplicaConfiguration *ReplicaConfiguration `json:"replicaConfiguration,omitempty"` + + // ReplicaNames: The replicas of the instance. + ReplicaNames []string `json:"replicaNames,omitempty"` + + // SelfLink: The URI of this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerCaCert: SSL configuration. + ServerCaCert *SslCert `json:"serverCaCert,omitempty"` + + // ServiceAccountEmailAddress: The service account email address + // assigned to the instance. + ServiceAccountEmailAddress string `json:"serviceAccountEmailAddress,omitempty"` + + // Settings: The user settings. + Settings *Settings `json:"settings,omitempty"` + + // State: The current serving state of the Cloud SQL instance. This can + // be one of the following. + // RUNNABLE: The instance is running, or is ready to run when + // accessed. + // SUSPENDED: The instance is not available, for example due to problems + // with billing. + // PENDING_CREATE: The instance is being created. + // MAINTENANCE: The instance is down for maintenance. + // FAILED: The instance creation failed. + // UNKNOWN_STATE: The state of the instance is unknown. + State string `json:"state,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CurrentDiskSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DatabaseInstance) MarshalJSON() ([]byte, error) { + type noMethod DatabaseInstance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DatabaseInstanceFailoverReplica: The name and status of the failover +// replica. Only applies to Second Generation instances. +type DatabaseInstanceFailoverReplica struct { + Available bool `json:"available,omitempty"` + + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Available") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DatabaseInstanceFailoverReplica) MarshalJSON() ([]byte, error) { + type noMethod DatabaseInstanceFailoverReplica + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DatabasesListResponse: Database list response. +type DatabasesListResponse struct { + // Items: List of database resources in the instance. + Items []*Database `json:"items,omitempty"` + + // Kind: This is always sql#databasesList. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DatabasesListResponse) MarshalJSON() ([]byte, error) { + type noMethod DatabasesListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ExportContext: Database instance export context. +type ExportContext struct { + // CsvExportOptions: Options for exporting data as CSV. + CsvExportOptions *ExportContextCsvExportOptions `json:"csvExportOptions,omitempty"` + + // Databases: Databases (for example, guestbook) from which the export + // is made. If fileType is SQL and no database is specified, all + // databases are exported. If fileType is CSV, you can optionally + // specify at most one database to export. If + // csvExportOptions.selectQuery also specifies the database, this field + // will be ignored. + Databases []string `json:"databases,omitempty"` + + // FileType: The file type for the specified uri. + // SQL: The file contains SQL statements. + // CSV: The file contains CSV data. + FileType string `json:"fileType,omitempty"` + + // Kind: This is always sql#exportContext. + Kind string `json:"kind,omitempty"` + + // SqlExportOptions: Options for exporting data as SQL statements. + SqlExportOptions *ExportContextSqlExportOptions `json:"sqlExportOptions,omitempty"` + + // Uri: The path to the file in Google Cloud Storage where the export + // will be stored. The URI is in the form gs://bucketName/fileName. If + // the file already exists, the operation fails. If fileType is SQL and + // the filename ends with .gz, the contents are compressed. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CsvExportOptions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ExportContext) MarshalJSON() ([]byte, error) { + type noMethod ExportContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ExportContextCsvExportOptions: Options for exporting data as CSV. +type ExportContextCsvExportOptions struct { + // SelectQuery: The select query used to extract the data. + SelectQuery string `json:"selectQuery,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SelectQuery") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ExportContextCsvExportOptions) MarshalJSON() ([]byte, error) { + type noMethod ExportContextCsvExportOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ExportContextSqlExportOptions: Options for exporting data as SQL +// statements. +type ExportContextSqlExportOptions struct { + // SchemaOnly: Export only schemas. + SchemaOnly bool `json:"schemaOnly,omitempty"` + + // Tables: Tables to export, or that were exported, from the specified + // database. If you specify tables, specify one and only one database. + Tables []string `json:"tables,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SchemaOnly") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ExportContextSqlExportOptions) MarshalJSON() ([]byte, error) { + type noMethod ExportContextSqlExportOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FailoverContext: Database instance failover context. +type FailoverContext struct { + // Kind: This is always sql#failoverContext. + Kind string `json:"kind,omitempty"` + + // SettingsVersion: The current settings version of this instance. + // Request will be rejected if this version doesn't match the current + // settings version. + SettingsVersion int64 `json:"settingsVersion,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FailoverContext) MarshalJSON() ([]byte, error) { + type noMethod FailoverContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Flag: A Google Cloud SQL service flag resource. +type Flag struct { + // AllowedStringValues: For STRING flags, a list of strings that the + // value can be set to. + AllowedStringValues []string `json:"allowedStringValues,omitempty"` + + // AppliesTo: The database version this flag applies to. Can be + // MYSQL_5_5, MYSQL_5_6, or both. + AppliesTo []string `json:"appliesTo,omitempty"` + + // Kind: This is always sql#flag. + Kind string `json:"kind,omitempty"` + + // MaxValue: For INTEGER flags, the maximum allowed value. + MaxValue int64 `json:"maxValue,omitempty,string"` + + // MinValue: For INTEGER flags, the minimum allowed value. + MinValue int64 `json:"minValue,omitempty,string"` + + // Name: This is the name of the flag. Flag names always use + // underscores, not hyphens, e.g. max_allowed_packet + Name string `json:"name,omitempty"` + + // RequiresRestart: Indicates whether changing this flag will trigger a + // database restart. Only applicable to Second Generation instances. + RequiresRestart bool `json:"requiresRestart,omitempty"` + + // Type: The type of the flag. Flags are typed to being BOOLEAN, STRING, + // INTEGER or NONE. NONE is used for flags which do not take a value, + // such as skip_grant_tables. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowedStringValues") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Flag) MarshalJSON() ([]byte, error) { + type noMethod Flag + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FlagsListResponse: Flags list response. +type FlagsListResponse struct { + // Items: List of flags. + Items []*Flag `json:"items,omitempty"` + + // Kind: This is always sql#flagsList. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FlagsListResponse) MarshalJSON() ([]byte, error) { + type noMethod FlagsListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ImportContext: Database instance import context. +type ImportContext struct { + // CsvImportOptions: Options for importing data as CSV. + CsvImportOptions *ImportContextCsvImportOptions `json:"csvImportOptions,omitempty"` + + // Database: The database (for example, guestbook) to which the import + // is made. If fileType is SQL and no database is specified, it is + // assumed that the database is specified in the file to be imported. If + // fileType is CSV, it must be specified. + Database string `json:"database,omitempty"` + + // FileType: The file type for the specified uri. + // SQL: The file contains SQL statements. + // CSV: The file contains CSV data. + FileType string `json:"fileType,omitempty"` + + // Kind: This is always sql#importContext. + Kind string `json:"kind,omitempty"` + + // Uri: A path to the file in Google Cloud Storage from which the import + // is made. The URI is in the form gs://bucketName/fileName. Compressed + // gzip files (.gz) are supported when fileType is SQL. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CsvImportOptions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ImportContext) MarshalJSON() ([]byte, error) { + type noMethod ImportContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ImportContextCsvImportOptions: Options for importing data as CSV. +type ImportContextCsvImportOptions struct { + // Columns: The columns to which CSV data is imported. If not specified, + // all columns of the database table are loaded with CSV data. + Columns []string `json:"columns,omitempty"` + + // Table: The table to which CSV data is imported. + Table string `json:"table,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Columns") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ImportContextCsvImportOptions) MarshalJSON() ([]byte, error) { + type noMethod ImportContextCsvImportOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesCloneRequest: Database instance clone request. +type InstancesCloneRequest struct { + // CloneContext: Contains details about the clone operation. + CloneContext *CloneContext `json:"cloneContext,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CloneContext") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesCloneRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesCloneRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesExportRequest: Database instance export request. +type InstancesExportRequest struct { + // ExportContext: Contains details about the export operation. + ExportContext *ExportContext `json:"exportContext,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExportContext") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesExportRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesExportRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesFailoverRequest: Instance failover request. +type InstancesFailoverRequest struct { + // FailoverContext: Failover Context. + FailoverContext *FailoverContext `json:"failoverContext,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FailoverContext") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesFailoverRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesFailoverRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesImportRequest: Database instance import request. +type InstancesImportRequest struct { + // ImportContext: Contains details about the import operation. + ImportContext *ImportContext `json:"importContext,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ImportContext") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesImportRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesImportRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesListResponse: Database instances list response. +type InstancesListResponse struct { + // Items: List of database instance resources. + Items []*DatabaseInstance `json:"items,omitempty"` + + // Kind: This is always sql#instancesList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesListResponse) MarshalJSON() ([]byte, error) { + type noMethod InstancesListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesRestoreBackupRequest: Database instance restore backup +// request. +type InstancesRestoreBackupRequest struct { + // RestoreBackupContext: Parameters required to perform the restore + // backup operation. + RestoreBackupContext *RestoreBackupContext `json:"restoreBackupContext,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "RestoreBackupContext") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesRestoreBackupRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesRestoreBackupRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// IpConfiguration: IP Management configuration. +type IpConfiguration struct { + // AuthorizedNetworks: The list of external networks that are allowed to + // connect to the instance using the IP. In CIDR notation, also known as + // 'slash' notation (e.g. 192.168.100.0/24). + AuthorizedNetworks []*AclEntry `json:"authorizedNetworks,omitempty"` + + // Ipv4Enabled: Whether the instance should be assigned an IP address or + // not. + Ipv4Enabled bool `json:"ipv4Enabled,omitempty"` + + // RequireSsl: Whether the mysqld should default to 'REQUIRE X509' for + // users connecting over IP. + RequireSsl bool `json:"requireSsl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuthorizedNetworks") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *IpConfiguration) MarshalJSON() ([]byte, error) { + type noMethod IpConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// IpMapping: Database instance IP Mapping. +type IpMapping struct { + // IpAddress: The IP address assigned. + IpAddress string `json:"ipAddress,omitempty"` + + // TimeToRetire: The due time for this IP to be retired in RFC 3339 + // format, for example 2012-11-15T16:19:00.094Z. This field is only + // available when the IP is scheduled to be retired. + TimeToRetire string `json:"timeToRetire,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *IpMapping) MarshalJSON() ([]byte, error) { + type noMethod IpMapping + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// LocationPreference: Preferred location. This specifies where a Cloud +// SQL instance should preferably be located, either in a specific +// Compute Engine zone, or co-located with an App Engine application. +// Note that if the preferred location is not available, the instance +// will be located as close as possible within the region. Only one +// location may be specified. +type LocationPreference struct { + // FollowGaeApplication: The AppEngine application to follow, it must be + // in the same region as the Cloud SQL instance. + FollowGaeApplication string `json:"followGaeApplication,omitempty"` + + // Kind: This is always sql#locationPreference. + Kind string `json:"kind,omitempty"` + + // Zone: The preferred Compute Engine zone (e.g. us-centra1-a, + // us-central1-b, etc.). + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "FollowGaeApplication") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *LocationPreference) MarshalJSON() ([]byte, error) { + type noMethod LocationPreference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MaintenanceWindow: Maintenance window. This specifies when a v2 Cloud +// SQL instance should preferably be restarted for system maintenance +// puruposes. +type MaintenanceWindow struct { + // Day: day of week (1-7), starting on Monday. + Day int64 `json:"day,omitempty"` + + // Hour: hour of day - 0 to 23. + Hour int64 `json:"hour,omitempty"` + + // Kind: This is always sql#maintenanceWindow. + Kind string `json:"kind,omitempty"` + + UpdateTrack string `json:"updateTrack,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Day") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { + type noMethod MaintenanceWindow + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MySqlReplicaConfiguration: Read-replica configuration specific to +// MySQL databases. +type MySqlReplicaConfiguration struct { + // CaCertificate: PEM representation of the trusted CA's x509 + // certificate. + CaCertificate string `json:"caCertificate,omitempty"` + + // ClientCertificate: PEM representation of the slave's x509 + // certificate. + ClientCertificate string `json:"clientCertificate,omitempty"` + + // ClientKey: PEM representation of the slave's private key. The + // corresponsing public key is encoded in the client's certificate. + ClientKey string `json:"clientKey,omitempty"` + + // ConnectRetryInterval: Seconds to wait between connect retries. + // MySQL's default is 60 seconds. + ConnectRetryInterval int64 `json:"connectRetryInterval,omitempty"` + + // DumpFilePath: Path to a SQL dump file in Google Cloud Storage from + // which the slave instance is to be created. The URI is in the form + // gs://bucketName/fileName. Compressed gzip files (.gz) are also + // supported. Dumps should have the binlog co-ordinates from which + // replication should begin. This can be accomplished by setting + // --master-data to 1 when using mysqldump. + DumpFilePath string `json:"dumpFilePath,omitempty"` + + // Kind: This is always sql#mysqlReplicaConfiguration. + Kind string `json:"kind,omitempty"` + + // MasterHeartbeatPeriod: Interval in milliseconds between replication + // heartbeats. + MasterHeartbeatPeriod int64 `json:"masterHeartbeatPeriod,omitempty,string"` + + // Password: The password for the replication connection. + Password string `json:"password,omitempty"` + + // SslCipher: A list of permissible ciphers to use for SSL encryption. + SslCipher string `json:"sslCipher,omitempty"` + + // Username: The username for the replication connection. + Username string `json:"username,omitempty"` + + // VerifyServerCertificate: Whether or not to check the master's Common + // Name value in the certificate that it sends during the SSL handshake. + VerifyServerCertificate bool `json:"verifyServerCertificate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CaCertificate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MySqlReplicaConfiguration) MarshalJSON() ([]byte, error) { + type noMethod MySqlReplicaConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OnPremisesConfiguration: On-premises instance configuration. +type OnPremisesConfiguration struct { + // HostPort: The host and port of the on-premises instance in host:port + // format + HostPort string `json:"hostPort,omitempty"` + + // Kind: This is always sql#onPremisesConfiguration. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HostPort") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OnPremisesConfiguration) MarshalJSON() ([]byte, error) { + type noMethod OnPremisesConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Operation: An Operations resource contains information about database +// instance operations such as create, delete, and restart. Operations +// resources are created in response to operations that were initiated; +// you never create them directly. +type Operation struct { + // EndTime: The time this operation finished in UTC timezone in RFC 3339 + // format, for example 2012-11-15T16:19:00.094Z. + EndTime string `json:"endTime,omitempty"` + + // Error: If errors occurred during processing of this operation, this + // field will be populated. + Error *OperationErrors `json:"error,omitempty"` + + // ExportContext: The context for export operation, if applicable. + ExportContext *ExportContext `json:"exportContext,omitempty"` + + // ImportContext: The context for import operation, if applicable. + ImportContext *ImportContext `json:"importContext,omitempty"` + + // InsertTime: The time this operation was enqueued in UTC timezone in + // RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + InsertTime string `json:"insertTime,omitempty"` + + // Kind: This is always sql#operation. + Kind string `json:"kind,omitempty"` + + // Name: An identifier that uniquely identifies the operation. You can + // use this identifier to retrieve the Operations resource that has + // information about the operation. + Name string `json:"name,omitempty"` + + // OperationType: The type of the operation. Valid values are CREATE, + // DELETE, UPDATE, RESTART, IMPORT, EXPORT, BACKUP_VOLUME, + // RESTORE_VOLUME, CREATE_USER, DELETE_USER, CREATE_DATABASE, + // DELETE_DATABASE . + OperationType string `json:"operationType,omitempty"` + + // SelfLink: The URI of this resource. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: The time this operation actually started in UTC timezone + // in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + StartTime string `json:"startTime,omitempty"` + + // Status: The status of an operation. Valid values are PENDING, + // RUNNING, DONE, UNKNOWN. + Status string `json:"status,omitempty"` + + // TargetId: Name of the database instance related to this operation. + TargetId string `json:"targetId,omitempty"` + + // TargetLink: The URI of the instance related to the operation. + TargetLink string `json:"targetLink,omitempty"` + + // TargetProject: The project ID of the target instance related to this + // operation. + TargetProject string `json:"targetProject,omitempty"` + + // User: The email address of the user who initiated this operation. + User string `json:"user,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationError: Database instance operation error. +type OperationError struct { + // Code: Identifies the specific error that occurred. + Code string `json:"code,omitempty"` + + // Kind: This is always sql#operationError. + Kind string `json:"kind,omitempty"` + + // Message: Additional information about the error encountered. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationError) MarshalJSON() ([]byte, error) { + type noMethod OperationError + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationErrors: Database instance operation errors list wrapper. +type OperationErrors struct { + // Errors: The list of errors encountered while processing this + // operation. + Errors []*OperationError `json:"errors,omitempty"` + + // Kind: This is always sql#operationErrors. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationErrors) MarshalJSON() ([]byte, error) { + type noMethod OperationErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationsListResponse: Database instance list operations response. +type OperationsListResponse struct { + // Items: List of operation resources. + Items []*Operation `json:"items,omitempty"` + + // Kind: This is always sql#operationsList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationsListResponse) MarshalJSON() ([]byte, error) { + type noMethod OperationsListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ReplicaConfiguration: Read-replica configuration for connecting to +// the master. +type ReplicaConfiguration struct { + // FailoverTarget: Specifies if the replica is the failover target. If + // the field is set to true the replica will be designated as a failover + // replica. In case the master instance fails, the replica instance will + // be promoted as the new master instance. + // Only one replica can be specified as failover target, and the replica + // has to be in different zone with the master instance. + FailoverTarget bool `json:"failoverTarget,omitempty"` + + // Kind: This is always sql#replicaConfiguration. + Kind string `json:"kind,omitempty"` + + // MysqlReplicaConfiguration: MySQL specific configuration when + // replicating from a MySQL on-premises master. Replication + // configuration information such as the username, password, + // certificates, and keys are not stored in the instance metadata. The + // configuration information is used only to set up the replication + // connection and is stored by MySQL in a file named master.info in the + // data directory. + MysqlReplicaConfiguration *MySqlReplicaConfiguration `json:"mysqlReplicaConfiguration,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FailoverTarget") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ReplicaConfiguration) MarshalJSON() ([]byte, error) { + type noMethod ReplicaConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RestoreBackupContext: Database instance restore from backup context. +type RestoreBackupContext struct { + // BackupRunId: The ID of the backup run to restore from. + BackupRunId int64 `json:"backupRunId,omitempty,string"` + + // InstanceId: The ID of the instance that the backup was taken from. + InstanceId string `json:"instanceId,omitempty"` + + // Kind: This is always sql#restoreBackupContext. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackupRunId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RestoreBackupContext) MarshalJSON() ([]byte, error) { + type noMethod RestoreBackupContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Settings: Database instance settings. +type Settings struct { + // ActivationPolicy: The activation policy for this instance. This + // specifies when the instance should be activated and is applicable + // only when the instance state is RUNNABLE. This can be one of the + // following. + // ALWAYS: The instance should always be active. + // NEVER: The instance should never be activated. + // ON_DEMAND: The instance is activated upon receiving requests. + ActivationPolicy string `json:"activationPolicy,omitempty"` + + // AuthorizedGaeApplications: The App Engine app IDs that can access + // this instance. + AuthorizedGaeApplications []string `json:"authorizedGaeApplications,omitempty"` + + // BackupConfiguration: The daily backup configuration for the instance. + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + + // CrashSafeReplicationEnabled: Configuration specific to read replica + // instances. Indicates whether database flags for crash-safe + // replication are enabled. + CrashSafeReplicationEnabled bool `json:"crashSafeReplicationEnabled,omitempty"` + + // DataDiskSizeGb: The size of data disk, in GB. Only supported for 2nd + // Generation instances. The data disk size minimum is 10GB. + DataDiskSizeGb int64 `json:"dataDiskSizeGb,omitempty,string"` + + // DataDiskType: The type of data disk. Only supported for 2nd + // Generation instances. The default type is SSD. + DataDiskType string `json:"dataDiskType,omitempty"` + + // DatabaseFlags: The database flags passed to the instance at startup. + DatabaseFlags []*DatabaseFlags `json:"databaseFlags,omitempty"` + + // DatabaseReplicationEnabled: Configuration specific to read replica + // instances. Indicates whether replication is enabled or not. + DatabaseReplicationEnabled bool `json:"databaseReplicationEnabled,omitempty"` + + // IpConfiguration: The settings for IP Management. This allows to + // enable or disable the instance IP and manage which external networks + // can connect to the instance. + IpConfiguration *IpConfiguration `json:"ipConfiguration,omitempty"` + + // Kind: This is always sql#settings. + Kind string `json:"kind,omitempty"` + + // LocationPreference: The location preference settings. This allows the + // instance to be located as near as possible to either an App Engine + // app or GCE zone for better performance. + LocationPreference *LocationPreference `json:"locationPreference,omitempty"` + + // MaintenanceWindow: The maintenance window for this instance. This + // specifies when the instance may be restarted for maintenance + // purposes. + MaintenanceWindow *MaintenanceWindow `json:"maintenanceWindow,omitempty"` + + // PricingPlan: The pricing plan for this instance. This can be either + // PER_USE or PACKAGE. + PricingPlan string `json:"pricingPlan,omitempty"` + + // ReplicationType: The type of replication this instance uses. This can + // be either ASYNCHRONOUS or SYNCHRONOUS. + ReplicationType string `json:"replicationType,omitempty"` + + // SettingsVersion: The version of instance settings. This is a required + // field for update method to make sure concurrent updates are handled + // properly. During update, use the most recent settingsVersion value + // for this instance and do not try to update this value. + SettingsVersion int64 `json:"settingsVersion,omitempty,string"` + + // Tier: The tier of service for this instance, for example D1, D2. For + // more information, see pricing. + Tier string `json:"tier,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActivationPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Settings) MarshalJSON() ([]byte, error) { + type noMethod Settings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCert: SslCerts Resource +type SslCert struct { + // Cert: PEM representation. + Cert string `json:"cert,omitempty"` + + // CertSerialNumber: Serial number, as extracted from the certificate. + CertSerialNumber string `json:"certSerialNumber,omitempty"` + + // CommonName: User supplied name. Constrained to [a-zA-Z.-_ ]+. + CommonName string `json:"commonName,omitempty"` + + // CreateTime: The time when the certificate was created in RFC 3339 + // format, for example 2012-11-15T16:19:00.094Z + CreateTime string `json:"createTime,omitempty"` + + // ExpirationTime: The time when the certificate expires in RFC 3339 + // format, for example 2012-11-15T16:19:00.094Z. + ExpirationTime string `json:"expirationTime,omitempty"` + + // Instance: Name of the database instance. + Instance string `json:"instance,omitempty"` + + // Kind: This is always sql#sslCert. + Kind string `json:"kind,omitempty"` + + // SelfLink: The URI of this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Sha1Fingerprint: Sha1 Fingerprint. + Sha1Fingerprint string `json:"sha1Fingerprint,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Cert") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCert) MarshalJSON() ([]byte, error) { + type noMethod SslCert + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertDetail: SslCertDetail. +type SslCertDetail struct { + // CertInfo: The public information about the cert. + CertInfo *SslCert `json:"certInfo,omitempty"` + + // CertPrivateKey: The private key for the client cert, in pem format. + // Keep private in order to protect your security. + CertPrivateKey string `json:"certPrivateKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertInfo") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertDetail) MarshalJSON() ([]byte, error) { + type noMethod SslCertDetail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertsCreateEphemeralRequest: SslCerts create ephemeral certificate +// request. +type SslCertsCreateEphemeralRequest struct { + // PublicKey: PEM encoded public key to include in the signed + // certificate. + PublicKey string `json:"public_key,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PublicKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertsCreateEphemeralRequest) MarshalJSON() ([]byte, error) { + type noMethod SslCertsCreateEphemeralRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertsInsertRequest: SslCerts insert request. +type SslCertsInsertRequest struct { + // CommonName: User supplied name. Must be a distinct name from the + // other certificates for this instance. New certificates will not be + // usable until the instance is restarted. + CommonName string `json:"commonName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CommonName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertsInsertRequest) MarshalJSON() ([]byte, error) { + type noMethod SslCertsInsertRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertsInsertResponse: SslCert insert response. +type SslCertsInsertResponse struct { + // ClientCert: The new client certificate and private key. The new + // certificate will not work until the instance is restarted. + ClientCert *SslCertDetail `json:"clientCert,omitempty"` + + // Kind: This is always sql#sslCertsInsert. + Kind string `json:"kind,omitempty"` + + // ServerCaCert: The server Certificate Authority's certificate. If this + // is missing you can force a new one to be generated by calling + // resetSslConfig method on instances resource. + ServerCaCert *SslCert `json:"serverCaCert,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClientCert") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertsInsertResponse) MarshalJSON() ([]byte, error) { + type noMethod SslCertsInsertResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertsListResponse: SslCerts list response. +type SslCertsListResponse struct { + // Items: List of client certificates for the instance. + Items []*SslCert `json:"items,omitempty"` + + // Kind: This is always sql#sslCertsList. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertsListResponse) MarshalJSON() ([]byte, error) { + type noMethod SslCertsListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Tier: A Google Cloud SQL service tier resource. +type Tier struct { + // DiskQuota: The maximum disk size of this tier in bytes. + DiskQuota int64 `json:"DiskQuota,omitempty,string"` + + // RAM: The maximum RAM usage of this tier in bytes. + RAM int64 `json:"RAM,omitempty,string"` + + // Kind: This is always sql#tier. + Kind string `json:"kind,omitempty"` + + // Region: The applicable regions for this tier. Can be us-east1, + // europe-west1 or asia-east1. + Region []string `json:"region,omitempty"` + + // Tier: An identifier for the service tier, for example D1, D2 etc. For + // related information, see Pricing. + Tier string `json:"tier,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskQuota") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Tier) MarshalJSON() ([]byte, error) { + type noMethod Tier + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TiersListResponse: Tiers list response. +type TiersListResponse struct { + // Items: List of tiers. + Items []*Tier `json:"items,omitempty"` + + // Kind: This is always sql#tiersList. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TiersListResponse) MarshalJSON() ([]byte, error) { + type noMethod TiersListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// User: A Cloud SQL user resource. +type User struct { + // Etag: HTTP 1.1 Entity tag for the resource. + Etag string `json:"etag,omitempty"` + + // Host: The host name from which the user can connect. For insert + // operations, host defaults to an empty string. For update operations, + // host is specified as part of the request URL. The host name is not + // mutable with this API. + Host string `json:"host,omitempty"` + + // Instance: The name of the Cloud SQL instance. This does not include + // the project ID. Can be omitted for update since it is already + // specified on the URL. + Instance string `json:"instance,omitempty"` + + // Kind: This is always sql#user. + Kind string `json:"kind,omitempty"` + + // Name: The name of the user in the Cloud SQL instance. Can be omitted + // for update since it is already specified on the URL. + Name string `json:"name,omitempty"` + + // Password: The password for the user. + Password string `json:"password,omitempty"` + + // Project: The project ID of the project containing the Cloud SQL + // database. The Google apps domain is prefixed if applicable. Can be + // omitted for update since it is already specified on the URL. + Project string `json:"project,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *User) MarshalJSON() ([]byte, error) { + type noMethod User + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UsersListResponse: User list response. +type UsersListResponse struct { + // Items: List of user resources in the instance. + Items []*User `json:"items,omitempty"` + + // Kind: This is always sql#usersList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: An identifier that uniquely identifies the operation. + // You can use this identifier to retrieve the Operations resource that + // has information about the operation. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UsersListResponse) MarshalJSON() ([]byte, error) { + type noMethod UsersListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "sql.backupRuns.delete": + +type BackupRunsDeleteCall struct { + s *Service + project string + instance string + id int64 + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the backup taken by a backup run. +func (r *BackupRunsService) Delete(project string, instance string, id int64) *BackupRunsDeleteCall { + c := &BackupRunsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.id = id + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackupRunsDeleteCall) Fields(s ...googleapi.Field) *BackupRunsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackupRunsDeleteCall) Context(ctx context.Context) *BackupRunsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *BackupRunsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/backupRuns/{id}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "id": strconv.FormatInt(c.id, 10), + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.backupRuns.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackupRunsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the backup taken by a backup run.", + // "httpMethod": "DELETE", + // "id": "sql.backupRuns.delete", + // "parameterOrder": [ + // "project", + // "instance", + // "id" + // ], + // "parameters": { + // "id": { + // "description": "The ID of the Backup Run to delete. To find a Backup Run ID, use the list method.", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/backupRuns/{id}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.backupRuns.get": + +type BackupRunsGetCall struct { + s *Service + project string + instance string + id int64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves a resource containing information about a backup run. +func (r *BackupRunsService) Get(project string, instance string, id int64) *BackupRunsGetCall { + c := &BackupRunsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.id = id + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackupRunsGetCall) Fields(s ...googleapi.Field) *BackupRunsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackupRunsGetCall) IfNoneMatch(entityTag string) *BackupRunsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackupRunsGetCall) Context(ctx context.Context) *BackupRunsGetCall { + c.ctx_ = ctx + return c +} + +func (c *BackupRunsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/backupRuns/{id}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "id": strconv.FormatInt(c.id, 10), + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.backupRuns.get" call. +// Exactly one of *BackupRun or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackupRun.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackupRunsGetCall) Do(opts ...googleapi.CallOption) (*BackupRun, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackupRun{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a resource containing information about a backup run.", + // "httpMethod": "GET", + // "id": "sql.backupRuns.get", + // "parameterOrder": [ + // "project", + // "instance", + // "id" + // ], + // "parameters": { + // "id": { + // "description": "The ID of this Backup Run.", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/backupRuns/{id}", + // "response": { + // "$ref": "BackupRun" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.backupRuns.list": + +type BackupRunsListCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all backup runs associated with a given instance and +// configuration in the reverse chronological order of the enqueued +// time. +func (r *BackupRunsService) List(project string, instance string) *BackupRunsListCall { + c := &BackupRunsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of backup runs per response. +func (c *BackupRunsListCall) MaxResults(maxResults int64) *BackupRunsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BackupRunsListCall) PageToken(pageToken string) *BackupRunsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackupRunsListCall) Fields(s ...googleapi.Field) *BackupRunsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackupRunsListCall) IfNoneMatch(entityTag string) *BackupRunsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackupRunsListCall) Context(ctx context.Context) *BackupRunsListCall { + c.ctx_ = ctx + return c +} + +func (c *BackupRunsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/backupRuns") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.backupRuns.list" call. +// Exactly one of *BackupRunsListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackupRunsListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackupRunsListCall) Do(opts ...googleapi.CallOption) (*BackupRunsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackupRunsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all backup runs associated with a given instance and configuration in the reverse chronological order of the enqueued time.", + // "httpMethod": "GET", + // "id": "sql.backupRuns.list", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of backup runs per response.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/backupRuns", + // "response": { + // "$ref": "BackupRunsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.databases.delete": + +type DatabasesDeleteCall struct { + s *Service + project string + instance string + database string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a resource containing information about a database +// inside a Cloud SQL instance. +func (r *DatabasesService) Delete(project string, instance string, database string) *DatabasesDeleteCall { + c := &DatabasesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.database = database + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatabasesDeleteCall) Fields(s ...googleapi.Field) *DatabasesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatabasesDeleteCall) Context(ctx context.Context) *DatabasesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *DatabasesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/databases/{database}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "database": c.database, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.databases.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DatabasesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a resource containing information about a database inside a Cloud SQL instance.", + // "httpMethod": "DELETE", + // "id": "sql.databases.delete", + // "parameterOrder": [ + // "project", + // "instance", + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Name of the database to be deleted in the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/databases/{database}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.databases.get": + +type DatabasesGetCall struct { + s *Service + project string + instance string + database string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves a resource containing information about a database +// inside a Cloud SQL instance. +func (r *DatabasesService) Get(project string, instance string, database string) *DatabasesGetCall { + c := &DatabasesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.database = database + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatabasesGetCall) Fields(s ...googleapi.Field) *DatabasesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DatabasesGetCall) IfNoneMatch(entityTag string) *DatabasesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatabasesGetCall) Context(ctx context.Context) *DatabasesGetCall { + c.ctx_ = ctx + return c +} + +func (c *DatabasesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/databases/{database}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "database": c.database, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.databases.get" call. +// Exactly one of *Database or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Database.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DatabasesGetCall) Do(opts ...googleapi.CallOption) (*Database, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Database{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a resource containing information about a database inside a Cloud SQL instance.", + // "httpMethod": "GET", + // "id": "sql.databases.get", + // "parameterOrder": [ + // "project", + // "instance", + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Name of the database in the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/databases/{database}", + // "response": { + // "$ref": "Database" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.databases.insert": + +type DatabasesInsertCall struct { + s *Service + project string + instance string + database *Database + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Inserts a resource containing information about a database +// inside a Cloud SQL instance. +func (r *DatabasesService) Insert(project string, instance string, database *Database) *DatabasesInsertCall { + c := &DatabasesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.database = database + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatabasesInsertCall) Fields(s ...googleapi.Field) *DatabasesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatabasesInsertCall) Context(ctx context.Context) *DatabasesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *DatabasesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.database) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/databases") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.databases.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DatabasesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a resource containing information about a database inside a Cloud SQL instance.", + // "httpMethod": "POST", + // "id": "sql.databases.insert", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/databases", + // "request": { + // "$ref": "Database" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.databases.list": + +type DatabasesListCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists databases in the specified Cloud SQL instance. +func (r *DatabasesService) List(project string, instance string) *DatabasesListCall { + c := &DatabasesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatabasesListCall) Fields(s ...googleapi.Field) *DatabasesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DatabasesListCall) IfNoneMatch(entityTag string) *DatabasesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatabasesListCall) Context(ctx context.Context) *DatabasesListCall { + c.ctx_ = ctx + return c +} + +func (c *DatabasesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/databases") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.databases.list" call. +// Exactly one of *DatabasesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DatabasesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DatabasesListCall) Do(opts ...googleapi.CallOption) (*DatabasesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DatabasesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists databases in the specified Cloud SQL instance.", + // "httpMethod": "GET", + // "id": "sql.databases.list", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project for which to list Cloud SQL instances.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/databases", + // "response": { + // "$ref": "DatabasesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.databases.patch": + +type DatabasesPatchCall struct { + s *Service + project string + instance string + database string + database2 *Database + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a resource containing information about a database +// inside a Cloud SQL instance. This method supports patch semantics. +func (r *DatabasesService) Patch(project string, instance string, database string, database2 *Database) *DatabasesPatchCall { + c := &DatabasesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.database = database + c.database2 = database2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatabasesPatchCall) Fields(s ...googleapi.Field) *DatabasesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatabasesPatchCall) Context(ctx context.Context) *DatabasesPatchCall { + c.ctx_ = ctx + return c +} + +func (c *DatabasesPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.database2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/databases/{database}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "database": c.database, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.databases.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DatabasesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a resource containing information about a database inside a Cloud SQL instance. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "sql.databases.patch", + // "parameterOrder": [ + // "project", + // "instance", + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Name of the database to be updated in the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/databases/{database}", + // "request": { + // "$ref": "Database" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.databases.update": + +type DatabasesUpdateCall struct { + s *Service + project string + instance string + database string + database2 *Database + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a resource containing information about a database +// inside a Cloud SQL instance. +func (r *DatabasesService) Update(project string, instance string, database string, database2 *Database) *DatabasesUpdateCall { + c := &DatabasesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.database = database + c.database2 = database2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatabasesUpdateCall) Fields(s ...googleapi.Field) *DatabasesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatabasesUpdateCall) Context(ctx context.Context) *DatabasesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *DatabasesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.database2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/databases/{database}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "database": c.database, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.databases.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DatabasesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a resource containing information about a database inside a Cloud SQL instance.", + // "httpMethod": "PUT", + // "id": "sql.databases.update", + // "parameterOrder": [ + // "project", + // "instance", + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Name of the database to be updated in the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/databases/{database}", + // "request": { + // "$ref": "Database" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.flags.list": + +type FlagsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: List all available database flags for Google Cloud SQL +// instances. +func (r *FlagsService) List() *FlagsListCall { + c := &FlagsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FlagsListCall) Fields(s ...googleapi.Field) *FlagsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FlagsListCall) IfNoneMatch(entityTag string) *FlagsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FlagsListCall) Context(ctx context.Context) *FlagsListCall { + c.ctx_ = ctx + return c +} + +func (c *FlagsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "flags") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.flags.list" call. +// Exactly one of *FlagsListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *FlagsListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FlagsListCall) Do(opts ...googleapi.CallOption) (*FlagsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &FlagsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all available database flags for Google Cloud SQL instances.", + // "httpMethod": "GET", + // "id": "sql.flags.list", + // "path": "flags", + // "response": { + // "$ref": "FlagsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.clone": + +type InstancesCloneCall struct { + s *Service + project string + instance string + instancesclonerequest *InstancesCloneRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Clone: Creates a Cloud SQL instance as a clone of the source +// instance. +func (r *InstancesService) Clone(project string, instance string, instancesclonerequest *InstancesCloneRequest) *InstancesCloneCall { + c := &InstancesCloneCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.instancesclonerequest = instancesclonerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesCloneCall) Fields(s ...googleapi.Field) *InstancesCloneCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesCloneCall) Context(ctx context.Context) *InstancesCloneCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesCloneCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesclonerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/clone") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.clone" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesCloneCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a Cloud SQL instance as a clone of the source instance.", + // "httpMethod": "POST", + // "id": "sql.instances.clone", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The ID of the Cloud SQL instance to be cloned (source). This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the source as well as the clone Cloud SQL instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/clone", + // "request": { + // "$ref": "InstancesCloneRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.delete": + +type InstancesDeleteCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a Cloud SQL instance. +func (r *InstancesService) Delete(project string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a Cloud SQL instance.", + // "httpMethod": "DELETE", + // "id": "sql.instances.delete", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance to be deleted.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.export": + +type InstancesExportCall struct { + s *Service + project string + instance string + instancesexportrequest *InstancesExportRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Export: Exports data from a Cloud SQL instance to a Google Cloud +// Storage bucket as a MySQL dump file. +func (r *InstancesService) Export(project string, instance string, instancesexportrequest *InstancesExportRequest) *InstancesExportCall { + c := &InstancesExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.instancesexportrequest = instancesexportrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesExportCall) Fields(s ...googleapi.Field) *InstancesExportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesExportCall) Context(ctx context.Context) *InstancesExportCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesExportCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesexportrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/export") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.export" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesExportCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Exports data from a Cloud SQL instance to a Google Cloud Storage bucket as a MySQL dump file.", + // "httpMethod": "POST", + // "id": "sql.instances.export", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance to be exported.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/export", + // "request": { + // "$ref": "InstancesExportRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sql.instances.failover": + +type InstancesFailoverCall struct { + s *Service + project string + instance string + instancesfailoverrequest *InstancesFailoverRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Failover: Failover the instance to its failover replica instance. +func (r *InstancesService) Failover(project string, instance string, instancesfailoverrequest *InstancesFailoverRequest) *InstancesFailoverCall { + c := &InstancesFailoverCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.instancesfailoverrequest = instancesfailoverrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesFailoverCall) Fields(s ...googleapi.Field) *InstancesFailoverCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesFailoverCall) Context(ctx context.Context) *InstancesFailoverCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesFailoverCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesfailoverrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/failover") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.failover" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesFailoverCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Failover the instance to its failover replica instance.", + // "httpMethod": "POST", + // "id": "sql.instances.failover", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "ID of the project that contains the read replica.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/failover", + // "request": { + // "$ref": "InstancesFailoverRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.get": + +type InstancesGetCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves a resource containing information about a Cloud SQL +// instance. +func (r *InstancesService) Get(project string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.get" call. +// Exactly one of *DatabaseInstance or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DatabaseInstance.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*DatabaseInstance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DatabaseInstance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a resource containing information about a Cloud SQL instance.", + // "httpMethod": "GET", + // "id": "sql.instances.get", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}", + // "response": { + // "$ref": "DatabaseInstance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.import": + +type InstancesImportCall struct { + s *Service + project string + instance string + instancesimportrequest *InstancesImportRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Import: Imports data into a Cloud SQL instance from a MySQL dump file +// in Google Cloud Storage. +func (r *InstancesService) Import(project string, instance string, instancesimportrequest *InstancesImportRequest) *InstancesImportCall { + c := &InstancesImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.instancesimportrequest = instancesimportrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesImportCall) Fields(s ...googleapi.Field) *InstancesImportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesImportCall) Context(ctx context.Context) *InstancesImportCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesImportCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesimportrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/import") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.import" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesImportCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Imports data into a Cloud SQL instance from a MySQL dump file in Google Cloud Storage.", + // "httpMethod": "POST", + // "id": "sql.instances.import", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/import", + // "request": { + // "$ref": "InstancesImportRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sql.instances.insert": + +type InstancesInsertCall struct { + s *Service + project string + databaseinstance *DatabaseInstance + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new Cloud SQL instance. +func (r *InstancesService) Insert(project string, databaseinstance *DatabaseInstance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.databaseinstance = databaseinstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new Cloud SQL instance.", + // "httpMethod": "POST", + // "id": "sql.instances.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID of the project to which the newly created Cloud SQL instances should belong.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances", + // "request": { + // "$ref": "DatabaseInstance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.list": + +type InstancesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists instances under a given project in the alphabetical order +// of the instance name. +func (r *InstancesService) List(project string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results to return per response. +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.list" call. +// Exactly one of *InstancesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstancesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstancesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstancesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists instances under a given project in the alphabetical order of the instance name.", + // "httpMethod": "GET", + // "id": "sql.instances.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "maxResults": { + // "description": "The maximum number of results to return per response.", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project for which to list Cloud SQL instances.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances", + // "response": { + // "$ref": "InstancesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.patch": + +type InstancesPatchCall struct { + s *Service + project string + instance string + databaseinstance *DatabaseInstance + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates settings of a Cloud SQL instance. Caution: This is not +// a partial update, so you must include values for all the settings +// that you want to retain. For partial updates, use patch.. This method +// supports patch semantics. +func (r *InstancesService) Patch(project string, instance string, databaseinstance *DatabaseInstance) *InstancesPatchCall { + c := &InstancesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.databaseinstance = databaseinstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesPatchCall) Fields(s ...googleapi.Field) *InstancesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesPatchCall) Context(ctx context.Context) *InstancesPatchCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates settings of a Cloud SQL instance. Caution: This is not a partial update, so you must include values for all the settings that you want to retain. For partial updates, use patch.. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "sql.instances.patch", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}", + // "request": { + // "$ref": "DatabaseInstance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.promoteReplica": + +type InstancesPromoteReplicaCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// PromoteReplica: Promotes the read replica instance to be a +// stand-alone Cloud SQL instance. +func (r *InstancesService) PromoteReplica(project string, instance string) *InstancesPromoteReplicaCall { + c := &InstancesPromoteReplicaCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesPromoteReplicaCall) Fields(s ...googleapi.Field) *InstancesPromoteReplicaCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesPromoteReplicaCall) Context(ctx context.Context) *InstancesPromoteReplicaCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesPromoteReplicaCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/promoteReplica") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.promoteReplica" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesPromoteReplicaCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Promotes the read replica instance to be a stand-alone Cloud SQL instance.", + // "httpMethod": "POST", + // "id": "sql.instances.promoteReplica", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL read replica instance name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "ID of the project that contains the read replica.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/promoteReplica", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.resetSslConfig": + +type InstancesResetSslConfigCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// ResetSslConfig: Deletes all client certificates and generates a new +// server SSL certificate for the instance. The changes will not take +// effect until the instance is restarted. Existing instances without a +// server certificate will need to call this once to set a server +// certificate. +func (r *InstancesService) ResetSslConfig(project string, instance string) *InstancesResetSslConfigCall { + c := &InstancesResetSslConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesResetSslConfigCall) Fields(s ...googleapi.Field) *InstancesResetSslConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesResetSslConfigCall) Context(ctx context.Context) *InstancesResetSslConfigCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/resetSslConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.resetSslConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetSslConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes all client certificates and generates a new server SSL certificate for the instance. The changes will not take effect until the instance is restarted. Existing instances without a server certificate will need to call this once to set a server certificate.", + // "httpMethod": "POST", + // "id": "sql.instances.resetSslConfig", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/resetSslConfig", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.restart": + +type InstancesRestartCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Restart: Restarts a Cloud SQL instance. +func (r *InstancesService) Restart(project string, instance string) *InstancesRestartCall { + c := &InstancesRestartCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesRestartCall) Fields(s ...googleapi.Field) *InstancesRestartCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesRestartCall) Context(ctx context.Context) *InstancesRestartCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/restart") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.restart" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesRestartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restarts a Cloud SQL instance.", + // "httpMethod": "POST", + // "id": "sql.instances.restart", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance to be restarted.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/restart", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.restoreBackup": + +type InstancesRestoreBackupCall struct { + s *Service + project string + instance string + instancesrestorebackuprequest *InstancesRestoreBackupRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RestoreBackup: Restores a backup of a Cloud SQL instance. +func (r *InstancesService) RestoreBackup(project string, instance string, instancesrestorebackuprequest *InstancesRestoreBackupRequest) *InstancesRestoreBackupCall { + c := &InstancesRestoreBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.instancesrestorebackuprequest = instancesrestorebackuprequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesRestoreBackupCall) Fields(s ...googleapi.Field) *InstancesRestoreBackupCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesRestoreBackupCall) Context(ctx context.Context) *InstancesRestoreBackupCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesRestoreBackupCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesrestorebackuprequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/restoreBackup") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.restoreBackup" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesRestoreBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restores a backup of a Cloud SQL instance.", + // "httpMethod": "POST", + // "id": "sql.instances.restoreBackup", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/restoreBackup", + // "request": { + // "$ref": "InstancesRestoreBackupRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.startReplica": + +type InstancesStartReplicaCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// StartReplica: Starts the replication in the read replica instance. +func (r *InstancesService) StartReplica(project string, instance string) *InstancesStartReplicaCall { + c := &InstancesStartReplicaCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStartReplicaCall) Fields(s ...googleapi.Field) *InstancesStartReplicaCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStartReplicaCall) Context(ctx context.Context) *InstancesStartReplicaCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesStartReplicaCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/startReplica") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.startReplica" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartReplicaCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts the replication in the read replica instance.", + // "httpMethod": "POST", + // "id": "sql.instances.startReplica", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL read replica instance name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "ID of the project that contains the read replica.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/startReplica", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.stopReplica": + +type InstancesStopReplicaCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// StopReplica: Stops the replication in the read replica instance. +func (r *InstancesService) StopReplica(project string, instance string) *InstancesStopReplicaCall { + c := &InstancesStopReplicaCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStopReplicaCall) Fields(s ...googleapi.Field) *InstancesStopReplicaCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStopReplicaCall) Context(ctx context.Context) *InstancesStopReplicaCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesStopReplicaCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/stopReplica") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.stopReplica" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStopReplicaCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stops the replication in the read replica instance.", + // "httpMethod": "POST", + // "id": "sql.instances.stopReplica", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL read replica instance name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "ID of the project that contains the read replica.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/stopReplica", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.update": + +type InstancesUpdateCall struct { + s *Service + project string + instance string + databaseinstance *DatabaseInstance + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates settings of a Cloud SQL instance. Caution: This is +// not a partial update, so you must include values for all the settings +// that you want to retain. For partial updates, use patch. +func (r *InstancesService) Update(project string, instance string, databaseinstance *DatabaseInstance) *InstancesUpdateCall { + c := &InstancesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.databaseinstance = databaseinstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesUpdateCall) Fields(s ...googleapi.Field) *InstancesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesUpdateCall) Context(ctx context.Context) *InstancesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.instances.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates settings of a Cloud SQL instance. Caution: This is not a partial update, so you must include values for all the settings that you want to retain. For partial updates, use patch.", + // "etagRequired": true, + // "httpMethod": "PUT", + // "id": "sql.instances.update", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}", + // "request": { + // "$ref": "DatabaseInstance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.operations.get": + +type OperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves an instance operation that has been performed on an +// instance. +func (r *OperationsService) Get(project string, operation string) *OperationsGetCall { + c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an instance operation that has been performed on an instance.", + // "httpMethod": "GET", + // "id": "sql.operations.get", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Instance operation ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.operations.list": + +type OperationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all instance operations that have been performed on the +// given Cloud SQL instance in the reverse chronological order of the +// start time. +func (r *OperationsService) List(project string, instance string) *OperationsListCall { + c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlParams_.Set("instance", instance) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of operations per response. +func (c *OperationsListCall) MaxResults(maxResults int64) *OperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.operations.list" call. +// Exactly one of *OperationsListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationsListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*OperationsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all instance operations that have been performed on the given Cloud SQL instance in the reverse chronological order of the start time.", + // "httpMethod": "GET", + // "id": "sql.operations.list", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of operations per response.", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/operations", + // "response": { + // "$ref": "OperationsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.sslCerts.createEphemeral": + +type SslCertsCreateEphemeralCall struct { + s *Service + project string + instance string + sslcertscreateephemeralrequest *SslCertsCreateEphemeralRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// CreateEphemeral: Generates a short-lived X509 certificate containing +// the provided public key and signed by a private key specific to the +// target instance. Users may use the certificate to authenticate as +// themselves when connecting to the database. +func (r *SslCertsService) CreateEphemeral(project string, instance string, sslcertscreateephemeralrequest *SslCertsCreateEphemeralRequest) *SslCertsCreateEphemeralCall { + c := &SslCertsCreateEphemeralCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.sslcertscreateephemeralrequest = sslcertscreateephemeralrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertsCreateEphemeralCall) Fields(s ...googleapi.Field) *SslCertsCreateEphemeralCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertsCreateEphemeralCall) Context(ctx context.Context) *SslCertsCreateEphemeralCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertsCreateEphemeralCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertscreateephemeralrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/createEphemeral") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.sslCerts.createEphemeral" call. +// Exactly one of *SslCert or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *SslCert.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SslCertsCreateEphemeralCall) Do(opts ...googleapi.CallOption) (*SslCert, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCert{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generates a short-lived X509 certificate containing the provided public key and signed by a private key specific to the target instance. Users may use the certificate to authenticate as themselves when connecting to the database.", + // "httpMethod": "POST", + // "id": "sql.sslCerts.createEphemeral", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the Cloud SQL project.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/createEphemeral", + // "request": { + // "$ref": "SslCertsCreateEphemeralRequest" + // }, + // "response": { + // "$ref": "SslCert" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.sslCerts.delete": + +type SslCertsDeleteCall struct { + s *Service + project string + instance string + sha1Fingerprint string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the SSL certificate. The change will not take effect +// until the instance is restarted. +func (r *SslCertsService) Delete(project string, instance string, sha1Fingerprint string) *SslCertsDeleteCall { + c := &SslCertsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.sha1Fingerprint = sha1Fingerprint + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertsDeleteCall) Fields(s ...googleapi.Field) *SslCertsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertsDeleteCall) Context(ctx context.Context) *SslCertsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "sha1Fingerprint": c.sha1Fingerprint, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.sslCerts.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the SSL certificate. The change will not take effect until the instance is restarted.", + // "httpMethod": "DELETE", + // "id": "sql.sslCerts.delete", + // "parameterOrder": [ + // "project", + // "instance", + // "sha1Fingerprint" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance to be deleted.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sha1Fingerprint": { + // "description": "Sha1 FingerPrint.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.sslCerts.get": + +type SslCertsGetCall struct { + s *Service + project string + instance string + sha1Fingerprint string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves a particular SSL certificate. Does not include the +// private key (required for usage). The private key must be saved from +// the response to initial creation. +func (r *SslCertsService) Get(project string, instance string, sha1Fingerprint string) *SslCertsGetCall { + c := &SslCertsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.sha1Fingerprint = sha1Fingerprint + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertsGetCall) Fields(s ...googleapi.Field) *SslCertsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertsGetCall) IfNoneMatch(entityTag string) *SslCertsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertsGetCall) Context(ctx context.Context) *SslCertsGetCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + "sha1Fingerprint": c.sha1Fingerprint, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.sslCerts.get" call. +// Exactly one of *SslCert or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *SslCert.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SslCertsGetCall) Do(opts ...googleapi.CallOption) (*SslCert, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCert{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a particular SSL certificate. Does not include the private key (required for usage). The private key must be saved from the response to initial creation.", + // "httpMethod": "GET", + // "id": "sql.sslCerts.get", + // "parameterOrder": [ + // "project", + // "instance", + // "sha1Fingerprint" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sha1Fingerprint": { + // "description": "Sha1 FingerPrint.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", + // "response": { + // "$ref": "SslCert" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.sslCerts.insert": + +type SslCertsInsertCall struct { + s *Service + project string + instance string + sslcertsinsertrequest *SslCertsInsertRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an SSL certificate and returns it along with the +// private key and server certificate authority. The new certificate +// will not be usable until the instance is restarted. +func (r *SslCertsService) Insert(project string, instance string, sslcertsinsertrequest *SslCertsInsertRequest) *SslCertsInsertCall { + c := &SslCertsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.sslcertsinsertrequest = sslcertsinsertrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertsInsertCall) Fields(s ...googleapi.Field) *SslCertsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertsInsertCall) Context(ctx context.Context) *SslCertsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertsinsertrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/sslCerts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.sslCerts.insert" call. +// Exactly one of *SslCertsInsertResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertsInsertResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertsInsertCall) Do(opts ...googleapi.CallOption) (*SslCertsInsertResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertsInsertResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an SSL certificate and returns it along with the private key and server certificate authority. The new certificate will not be usable until the instance is restarted.", + // "httpMethod": "POST", + // "id": "sql.sslCerts.insert", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project to which the newly created Cloud SQL instances should belong.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/sslCerts", + // "request": { + // "$ref": "SslCertsInsertRequest" + // }, + // "response": { + // "$ref": "SslCertsInsertResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.sslCerts.list": + +type SslCertsListCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all of the current SSL certificates for the instance. +func (r *SslCertsService) List(project string, instance string) *SslCertsListCall { + c := &SslCertsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertsListCall) Fields(s ...googleapi.Field) *SslCertsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertsListCall) IfNoneMatch(entityTag string) *SslCertsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertsListCall) Context(ctx context.Context) *SslCertsListCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/sslCerts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.sslCerts.list" call. +// Exactly one of *SslCertsListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertsListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertsListCall) Do(opts ...googleapi.CallOption) (*SslCertsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all of the current SSL certificates for the instance.", + // "httpMethod": "GET", + // "id": "sql.sslCerts.list", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project for which to list Cloud SQL instances.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/sslCerts", + // "response": { + // "$ref": "SslCertsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.tiers.list": + +type TiersListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all available service tiers for Google Cloud SQL, for +// example D1, D2. For related information, see Pricing. +func (r *TiersService) List(project string) *TiersListCall { + c := &TiersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TiersListCall) Fields(s ...googleapi.Field) *TiersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TiersListCall) IfNoneMatch(entityTag string) *TiersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TiersListCall) Context(ctx context.Context) *TiersListCall { + c.ctx_ = ctx + return c +} + +func (c *TiersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/tiers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.tiers.list" call. +// Exactly one of *TiersListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TiersListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TiersListCall) Do(opts ...googleapi.CallOption) (*TiersListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TiersListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all available service tiers for Google Cloud SQL, for example D1, D2. For related information, see Pricing.", + // "httpMethod": "GET", + // "id": "sql.tiers.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID of the project for which to list tiers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/tiers", + // "response": { + // "$ref": "TiersListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.users.delete": + +type UsersDeleteCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a user from a Cloud SQL instance. +func (r *UsersService) Delete(project string, instance string, host string, name string) *UsersDeleteCall { + c := &UsersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.urlParams_.Set("host", host) + c.urlParams_.Set("name", name) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersDeleteCall) Fields(s ...googleapi.Field) *UsersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersDeleteCall) Context(ctx context.Context) *UsersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/users") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.users.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a user from a Cloud SQL instance.", + // "httpMethod": "DELETE", + // "id": "sql.users.delete", + // "parameterOrder": [ + // "project", + // "instance", + // "host", + // "name" + // ], + // "parameters": { + // "host": { + // "description": "Host of the user in the instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "Name of the user in the instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/users", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.users.insert": + +type UsersInsertCall struct { + s *Service + project string + instance string + user *User + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new user in a Cloud SQL instance. +func (r *UsersService) Insert(project string, instance string, user *User) *UsersInsertCall { + c := &UsersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.user = user + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersInsertCall) Fields(s ...googleapi.Field) *UsersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersInsertCall) Context(ctx context.Context) *UsersInsertCall { + c.ctx_ = ctx + return c +} + +func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/users") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.users.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new user in a Cloud SQL instance.", + // "httpMethod": "POST", + // "id": "sql.users.insert", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/users", + // "request": { + // "$ref": "User" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.users.list": + +type UsersListCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists users in the specified Cloud SQL instance. +func (r *UsersService) List(project string, instance string) *UsersListCall { + c := &UsersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersListCall) Fields(s ...googleapi.Field) *UsersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UsersListCall) IfNoneMatch(entityTag string) *UsersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersListCall) Context(ctx context.Context) *UsersListCall { + c.ctx_ = ctx + return c +} + +func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/users") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.users.list" call. +// Exactly one of *UsersListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UsersListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UsersListCall) Do(opts ...googleapi.CallOption) (*UsersListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UsersListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists users in the specified Cloud SQL instance.", + // "httpMethod": "GET", + // "id": "sql.users.list", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/users", + // "response": { + // "$ref": "UsersListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.users.update": + +type UsersUpdateCall struct { + s *Service + project string + instance string + user *User + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an existing user in a Cloud SQL instance. +func (r *UsersService) Update(project string, instance string, host string, name string, user *User) *UsersUpdateCall { + c := &UsersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.urlParams_.Set("host", host) + c.urlParams_.Set("name", name) + c.user = user + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersUpdateCall) Fields(s ...googleapi.Field) *UsersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersUpdateCall) Context(ctx context.Context) *UsersUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *UsersUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/users") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "sql.users.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing user in a Cloud SQL instance.", + // "httpMethod": "PUT", + // "id": "sql.users.update", + // "parameterOrder": [ + // "project", + // "instance", + // "host", + // "name" + // ], + // "parameters": { + // "host": { + // "description": "Host of the user in the instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Database instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "Name of the user in the instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/instances/{instance}/users", + // "request": { + // "$ref": "User" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json new file mode 100644 index 000000000..439d42afd --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -0,0 +1,2871 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/iW6lW-fzl_pGlxeRN7h4J2G0OTQ\"", + "discoveryVersion": "v1", + "id": "storage:v1", + "name": "storage", + "version": "v1", + "revision": "20151229", + "title": "Cloud Storage JSON API", + "description": "Lets you store and retrieve potentially-large, immutable data objects.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "labels": [ + "labs" + ], + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/storage/v1/", + "basePath": "/storage/v1/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "storage/v1/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "Bucket": { + "id": "Bucket", + "type": "object", + "description": "A bucket.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the bucket.", + "items": { + "$ref": "BucketAccessControl" + }, + "annotations": { + "required": [ + "storage.buckets.update" + ] + } + }, + "cors": { + "type": "array", + "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", + "items": { + "type": "object", + "properties": { + "maxAgeSeconds": { + "type": "integer", + "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", + "format": "int32" + }, + "method": { + "type": "array", + "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", + "items": { + "type": "string" + } + }, + "origin": { + "type": "array", + "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", + "items": { + "type": "string" + } + }, + "responseHeader": { + "type": "array", + "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", + "items": { + "type": "string" + } + } + } + } + }, + "defaultObjectAcl": { + "type": "array", + "description": "Default access controls to apply to new objects when no ACL is provided.", + "items": { + "$ref": "ObjectAccessControl" + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the bucket." + }, + "id": { + "type": "string", + "description": "The ID of the bucket." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For buckets, this is always storage#bucket.", + "default": "storage#bucket" + }, + "lifecycle": { + "type": "object", + "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "properties": { + "rule": { + "type": "array", + "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", + "items": { + "type": "object", + "properties": { + "action": { + "type": "object", + "description": "The action to take.", + "properties": { + "type": { + "type": "string", + "description": "Type of the action. Currently, only Delete is supported." + } + } + }, + "condition": { + "type": "object", + "description": "The condition(s) under which the action will be taken.", + "properties": { + "age": { + "type": "integer", + "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", + "format": "int32" + }, + "createdBefore": { + "type": "string", + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", + "format": "date" + }, + "isLive": { + "type": "boolean", + "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects." + }, + "numNewerVersions": { + "type": "integer", + "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", + "format": "int32" + } + } + } + } + } + } + } + }, + "location": { + "type": "string", + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list." + }, + "logging": { + "type": "object", + "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", + "properties": { + "logBucket": { + "type": "string", + "description": "The destination bucket where the current bucket's logs should be placed." + }, + "logObjectPrefix": { + "type": "string", + "description": "A prefix for log object names." + } + } + }, + "metageneration": { + "type": "string", + "description": "The metadata generation of this bucket.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of the bucket.", + "annotations": { + "required": [ + "storage.buckets.insert" + ] + } + }, + "owner": { + "type": "object", + "description": "The owner of the bucket. This is always the project team's owner group.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form project-owner-projectId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "projectNumber": { + "type": "string", + "description": "The project number of the project the bucket belongs to.", + "format": "uint64" + }, + "selfLink": { + "type": "string", + "description": "The URI of this bucket." + }, + "storageClass": { + "type": "string", + "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes." + }, + "timeCreated": { + "type": "string", + "description": "The creation time of the bucket in RFC 3339 format.", + "format": "date-time" + }, + "updated": { + "type": "string", + "description": "The modification time of the bucket in RFC 3339 format.", + "format": "date-time" + }, + "versioning": { + "type": "object", + "description": "The bucket's versioning configuration.", + "properties": { + "enabled": { + "type": "boolean", + "description": "While set to true, versioning is fully enabled for this bucket." + } + } + }, + "website": { + "type": "object", + "description": "The bucket's website configuration.", + "properties": { + "mainPageSuffix": { + "type": "string", + "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories." + }, + "notFoundPage": { + "type": "string", + "description": "The custom object to return when a requested resource is not found." + } + } + } + } + }, + "BucketAccessControl": { + "id": "BucketAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", + "default": "storage#bucketAccessControl" + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team. Can be owners, editors, or viewers." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "BucketAccessControls": { + "id": "BucketAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "BucketAccessControl" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", + "default": "storage#bucketAccessControls" + } + } + }, + "Buckets": { + "id": "Buckets", + "type": "object", + "description": "A list of buckets.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Bucket" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", + "default": "storage#buckets" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + } + } + }, + "Channel": { + "id": "Channel", + "type": "object", + "description": "An notification channel used to watch for resource changes.", + "properties": { + "address": { + "type": "string", + "description": "The address where notifications are delivered for this channel." + }, + "expiration": { + "type": "string", + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "A UUID or similar unique string that identifies this channel." + }, + "kind": { + "type": "string", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "default": "api#channel" + }, + "params": { + "type": "object", + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "additionalProperties": { + "type": "string", + "description": "Declares a new parameter by name." + } + }, + "payload": { + "type": "boolean", + "description": "A Boolean value to indicate whether payload is wanted. Optional." + }, + "resourceId": { + "type": "string", + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." + }, + "resourceUri": { + "type": "string", + "description": "A version-specific identifier for the watched resource." + }, + "token": { + "type": "string", + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." + }, + "type": { + "type": "string", + "description": "The type of delivery mechanism used for this channel." + } + } + }, + "ComposeRequest": { + "id": "ComposeRequest", + "type": "object", + "description": "A Compose request.", + "properties": { + "destination": { + "$ref": "Object", + "description": "Properties of the resulting object." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#composeRequest" + }, + "sourceObjects": { + "type": "array", + "description": "The list of source objects that will be concatenated into a single object.", + "items": { + "type": "object", + "properties": { + "generation": { + "type": "string", + "description": "The generation of this object to use as the source.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + }, + "objectPreconditions": { + "type": "object", + "description": "Conditions that must be met for this operation to execute.", + "properties": { + "ifGenerationMatch": { + "type": "string", + "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", + "format": "int64" + } + } + } + } + }, + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + } + } + }, + "Object": { + "id": "Object", + "type": "object", + "description": "An object.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the object.", + "items": { + "$ref": "ObjectAccessControl" + }, + "annotations": { + "required": [ + "storage.objects.update" + ] + } + }, + "bucket": { + "type": "string", + "description": "The name of the bucket containing this object." + }, + "cacheControl": { + "type": "string", + "description": "Cache-Control directive for the object data." + }, + "componentCount": { + "type": "integer", + "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", + "format": "int32" + }, + "contentDisposition": { + "type": "string", + "description": "Content-Disposition of the object data." + }, + "contentEncoding": { + "type": "string", + "description": "Content-Encoding of the object data." + }, + "contentLanguage": { + "type": "string", + "description": "Content-Language of the object data." + }, + "contentType": { + "type": "string", + "description": "Content-Type of the object data.", + "annotations": { + "required": [ + "storage.objects.insert", + "storage.objects.update" + ] + } + }, + "crc32c": { + "type": "string", + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices." + }, + "customerEncryption": { + "type": "object", + "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", + "properties": { + "encryptionAlgorithm": { + "type": "string", + "description": "The encryption algorithm." + }, + "keySha256": { + "type": "string", + "description": "SHA256 hash value of the encryption key." + } + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the object." + }, + "generation": { + "type": "string", + "description": "The content generation of this object. Used for object versioning.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the object." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For objects, this is always storage#object.", + "default": "storage#object" + }, + "md5Hash": { + "type": "string", + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices." + }, + "mediaLink": { + "type": "string", + "description": "Media download link." + }, + "metadata": { + "type": "object", + "description": "User-provided metadata, in key/value pairs.", + "additionalProperties": { + "type": "string", + "description": "An individual metadata entry." + } + }, + "metageneration": { + "type": "string", + "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of this object. Required if not specified by URL parameter." + }, + "owner": { + "type": "object", + "description": "The owner of the object. This will always be the uploader of the object.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form user-userId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "selfLink": { + "type": "string", + "description": "The link to this object." + }, + "size": { + "type": "string", + "description": "Content-Length of the data in bytes.", + "format": "uint64" + }, + "storageClass": { + "type": "string", + "description": "Storage class of the object." + }, + "timeCreated": { + "type": "string", + "description": "The creation time of the object in RFC 3339 format.", + "format": "date-time" + }, + "timeDeleted": { + "type": "string", + "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "format": "date-time" + }, + "updated": { + "type": "string", + "description": "The modification time of the object metadata in RFC 3339 format.", + "format": "date-time" + } + } + }, + "ObjectAccessControl": { + "id": "ObjectAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "generation": { + "type": "string", + "description": "The content generation of the object.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", + "default": "storage#objectAccessControl" + }, + "object": { + "type": "string", + "description": "The name of the object." + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team. Can be owners, editors, or viewers." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity. Can be READER or OWNER." + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "ObjectAccessControls": { + "id": "ObjectAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "type": "any" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", + "default": "storage#objectAccessControls" + } + } + }, + "Objects": { + "id": "Objects", + "type": "object", + "description": "A list of objects.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Object" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of objects, this is always storage#objects.", + "default": "storage#objects" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + }, + "prefixes": { + "type": "array", + "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", + "items": { + "type": "string" + } + } + } + }, + "RewriteResponse": { + "id": "RewriteResponse", + "type": "object", + "description": "A rewrite response.", + "properties": { + "done": { + "type": "boolean", + "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#rewriteResponse" + }, + "objectSize": { + "type": "string", + "description": "The total size of the object being copied in bytes. This property is always present in the response.", + "format": "uint64" + }, + "resource": { + "$ref": "Object", + "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." + }, + "rewriteToken": { + "type": "string", + "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy." + }, + "totalBytesRewritten": { + "type": "string", + "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", + "format": "uint64" + } + } + } + }, + "resources": { + "bucketAccessControls": { + "methods": { + "delete": { + "id": "storage.bucketAccessControls.delete", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.bucketAccessControls.get", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.bucketAccessControls.insert", + "path": "b/{bucket}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.bucketAccessControls.list", + "path": "b/{bucket}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "BucketAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.bucketAccessControls.patch", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.bucketAccessControls.update", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "buckets": { + "methods": { + "delete": { + "id": "storage.buckets.delete", + "path": "b/{bucket}", + "httpMethod": "DELETE", + "description": "Permanently deletes an empty bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration does not match this value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.buckets.get", + "path": "b/{bucket}", + "httpMethod": "GET", + "description": "Returns metadata for the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "id": "storage.buckets.insert", + "path": "b", + "httpMethod": "POST", + "description": "Creates a new bucket.", + "parameters": { + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "storage.buckets.list", + "path": "b", + "httpMethod": "GET", + "description": "Retrieves a list of buckets for a given project.", + "parameters": { + "maxResults": { + "type": "integer", + "description": "Maximum number of buckets to return.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to buckets whose names begin with this prefix.", + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Buckets" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "patch": { + "id": "storage.buckets.patch", + "path": "b/{bucket}", + "httpMethod": "PATCH", + "description": "Updates a bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.buckets.update", + "path": "b/{bucket}", + "httpMethod": "PUT", + "description": "Updates a bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "id": "storage.channels.stop", + "path": "channels/stop", + "httpMethod": "POST", + "description": "Stop watching resources through this channel", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "defaultObjectAccessControls": { + "methods": { + "delete": { + "id": "storage.defaultObjectAccessControls.delete", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.defaultObjectAccessControls.get", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "GET", + "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.defaultObjectAccessControls.insert", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "POST", + "description": "Creates a new default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.defaultObjectAccessControls.list", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "GET", + "description": "Retrieves default object ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.defaultObjectAccessControls.patch", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PATCH", + "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.defaultObjectAccessControls.update", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PUT", + "description": "Updates a default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objectAccessControls": { + "methods": { + "delete": { + "id": "storage.objectAccessControls.delete", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.objectAccessControls.get", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.objectAccessControls.insert", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.objectAccessControls.list", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.objectAccessControls.patch", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.objectAccessControls.update", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objects": { + "methods": { + "compose": { + "id": "storage.objects.compose", + "path": "b/{destinationBucket}/o/{destinationObject}/compose", + "httpMethod": "POST", + "description": "Concatenates a list of existing objects into a new object in the same bucket.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "ComposeRequest" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "copy": { + "id": "storage.objects.copy", + "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Copies a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "delete": { + "id": "storage.objects.delete", + "path": "b/{bucket}/o/{object}", + "httpMethod": "DELETE", + "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.objects.get", + "path": "b/{bucket}/o/{object}", + "httpMethod": "GET", + "description": "Retrieves an object or its metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "insert": { + "id": "storage.objects.insert", + "path": "b/{bucket}/o", + "httpMethod": "POST", + "description": "Stores a new object and metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "contentEncoding": { + "type": "string", + "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "name": { + "type": "string", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true, + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/storage/v1/b/{bucket}/o" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/storage/v1/b/{bucket}/o" + } + } + } + }, + "list": { + "id": "storage.objects.list", + "path": "b/{bucket}/o", + "httpMethod": "GET", + "description": "Retrieves a list of objects matching the criteria.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Objects" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + }, + "patch": { + "id": "storage.objects.patch", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PATCH", + "description": "Updates an object's metadata. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "rewrite": { + "id": "storage.objects.rewrite", + "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "maxBytesRewrittenPerCall": { + "type": "string", + "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "rewriteToken": { + "type": "string", + "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "RewriteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.objects.update", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PUT", + "description": "Updates an object's metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "watchAll": { + "id": "storage.objects.watchAll", + "path": "b/{bucket}/o/watch", + "httpMethod": "POST", + "description": "Watch for changes on all objects in a bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + } + } +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go new file mode 100644 index 000000000..f5c5f23eb --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -0,0 +1,7632 @@ +// Package storage provides access to the Cloud Storage JSON API. +// +// See https://developers.google.com/storage/docs/json_api/ +// +// Usage example: +// +// import "google.golang.org/api/storage/v1" +// ... +// storageService, err := storage.New(oauthHttpClient) +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "storage:v1" +const apiName = "storage" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/storage/v1/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + BucketAccessControls *BucketAccessControlsService + + Buckets *BucketsService + + Channels *ChannelsService + + DefaultObjectAccessControls *DefaultObjectAccessControlsService + + ObjectAccessControls *ObjectAccessControlsService + + Objects *ObjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { + rs := &BucketAccessControlsService{s: s} + return rs +} + +type BucketAccessControlsService struct { + s *Service +} + +func NewBucketsService(s *Service) *BucketsService { + rs := &BucketsService{s: s} + return rs +} + +type BucketsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { + rs := &DefaultObjectAccessControlsService{s: s} + return rs +} + +type DefaultObjectAccessControlsService struct { + s *Service +} + +func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { + rs := &ObjectAccessControlsService{s: s} + return rs +} + +type ObjectAccessControlsService struct { + s *Service +} + +func NewObjectsService(s *Service) *ObjectsService { + rs := &ObjectsService{s: s} + return rs +} + +type ObjectsService struct { + s *Service +} + +// Bucket: A bucket. +type Bucket struct { + // Acl: Access controls on the bucket. + Acl []*BucketAccessControl `json:"acl,omitempty"` + + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) + // configuration. + Cors []*BucketCors `json:"cors,omitempty"` + + // DefaultObjectAcl: Default access controls to apply to new objects + // when no ACL is provided. + DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the bucket. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the bucket. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For buckets, this is always + // storage#bucket. + Kind string `json:"kind,omitempty"` + + // Lifecycle: The bucket's lifecycle configuration. See lifecycle + // management for more information. + Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` + + // Location: The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to + // US. See the developer's guide for the authoritative list. + Location string `json:"location,omitempty"` + + // Logging: The bucket's logging configuration, which defines the + // destination bucket and optional name prefix for the current bucket's + // logs. + Logging *BucketLogging `json:"logging,omitempty"` + + // Metageneration: The metadata generation of this bucket. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the bucket. + Name string `json:"name,omitempty"` + + // Owner: The owner of the bucket. This is always the project team's + // owner group. + Owner *BucketOwner `json:"owner,omitempty"` + + // ProjectNumber: The project number of the project the bucket belongs + // to. + ProjectNumber uint64 `json:"projectNumber,omitempty,string"` + + // SelfLink: The URI of this bucket. + SelfLink string `json:"selfLink,omitempty"` + + // StorageClass: The bucket's storage class. This defines how objects in + // the bucket are stored and determines the SLA and the cost of storage. + // Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. + // Defaults to STANDARD. For more information, see storage classes. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the bucket in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Updated: The modification time of the bucket in RFC 3339 format. + Updated string `json:"updated,omitempty"` + + // Versioning: The bucket's versioning configuration. + Versioning *BucketVersioning `json:"versioning,omitempty"` + + // Website: The bucket's website configuration. + Website *BucketWebsite `json:"website,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Bucket) MarshalJSON() ([]byte, error) { + type noMethod Bucket + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type BucketCors struct { + // MaxAgeSeconds: The value, in seconds, to return in the + // Access-Control-Max-Age header used in preflight responses. + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` + + // Method: The list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Method []string `json:"method,omitempty"` + + // Origin: The list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origin []string `json:"origin,omitempty"` + + // ResponseHeader: The list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeader []string `json:"responseHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketCors) MarshalJSON() ([]byte, error) { + type noMethod BucketCors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle +// management for more information. +type BucketLifecycle struct { + // Rule: A lifecycle management rule, which is made of an action to take + // and the condition(s) under which the action will be taken. + Rule []*BucketLifecycleRule `json:"rule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycle + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type BucketLifecycleRule struct { + // Action: The action to take. + Action *BucketLifecycleRuleAction `json:"action,omitempty"` + + // Condition: The condition(s) under which the action will be taken. + Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLifecycleRuleAction: The action to take. +type BucketLifecycleRuleAction struct { + // Type: Type of the action. Currently, only Delete is supported. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Type") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRuleAction + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLifecycleRuleCondition: The condition(s) under which the action +// will be taken. +type BucketLifecycleRuleCondition struct { + // Age: Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + Age int64 `json:"age,omitempty"` + + // CreatedBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when an object + // is created before midnight of the specified date in UTC. + CreatedBefore string `json:"createdBefore,omitempty"` + + // IsLive: Relevant only for versioned objects. If the value is true, + // this condition matches live objects; if the value is false, it + // matches archived objects. + IsLive bool `json:"isLive,omitempty"` + + // NumNewerVersions: Relevant only for versioned objects. If the value + // is N, this condition is satisfied when there are at least N versions + // (including the live version) newer than this version of the object. + NumNewerVersions int64 `json:"numNewerVersions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Age") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRuleCondition + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLogging: The bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // LogBucket: The destination bucket where the current bucket's logs + // should be placed. + LogBucket string `json:"logBucket,omitempty"` + + // LogObjectPrefix: A prefix for log object names. + LogObjectPrefix string `json:"logObjectPrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LogBucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLogging) MarshalJSON() ([]byte, error) { + type noMethod BucketLogging + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketOwner: The owner of the bucket. This is always the project +// team's owner group. +type BucketOwner struct { + // Entity: The entity, in the form project-owner-projectId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketOwner) MarshalJSON() ([]byte, error) { + type noMethod BucketOwner + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketVersioning: The bucket's versioning configuration. +type BucketVersioning struct { + // Enabled: While set to true, versioning is fully enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketVersioning) MarshalJSON() ([]byte, error) { + type noMethod BucketVersioning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketWebsite: The bucket's website configuration. +type BucketWebsite struct { + // MainPageSuffix: Behaves as the bucket's directory index where missing + // objects are treated as potential directories. + MainPageSuffix string `json:"mainPageSuffix,omitempty"` + + // NotFoundPage: The custom object to return when a requested resource + // is not found. + NotFoundPage string `json:"notFoundPage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketWebsite) MarshalJSON() ([]byte, error) { + type noMethod BucketWebsite + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketAccessControl: An access-control entry. +type BucketAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For bucket access control entries, + // this is always storage#bucketAccessControl. + Kind string `json:"kind,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. Can be READER, WRITER, or + // OWNER. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControl + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketAccessControlProjectTeam: The project team associated with the +// entity, if any. +type BucketAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. Can be owners, editors, or viewers. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControlProjectTeam + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketAccessControls: An access-control list. +type BucketAccessControls struct { + // Items: The list of items. + Items []*BucketAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of bucket access control + // entries, this is always storage#bucketAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControls + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Buckets: A list of buckets. +type Buckets struct { + // Items: The list of items. + Items []*Bucket `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of buckets, this is always + // storage#buckets. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Buckets) MarshalJSON() ([]byte, error) { + type noMethod Buckets + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource. Value: the fixed string "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type noMethod Channel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ComposeRequest: A Compose request. +type ComposeRequest struct { + // Destination: Properties of the resulting object. + Destination *Object `json:"destination,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // SourceObjects: The list of source objects that will be concatenated + // into a single object. + SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Destination") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ComposeRequest) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ComposeRequestSourceObjects struct { + // Generation: The generation of this object to use as the source. + Generation int64 `json:"generation,omitempty,string"` + + // Name: The source object's name. The source object's bucket is + // implicitly the destination bucket. + Name string `json:"name,omitempty"` + + // ObjectPreconditions: Conditions that must be met for this operation + // to execute. + ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Generation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequestSourceObjects + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must +// be met for this operation to execute. +type ComposeRequestSourceObjectsObjectPreconditions struct { + // IfGenerationMatch: Only perform the composition if the generation of + // the source object that would be used matches this value. If this + // value and a generation are both specified, they must be the same + // value or the call will fail. + IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequestSourceObjectsObjectPreconditions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Object: An object. +type Object struct { + // Acl: Access controls on the object. + Acl []*ObjectAccessControl `json:"acl,omitempty"` + + // Bucket: The name of the bucket containing this object. + Bucket string `json:"bucket,omitempty"` + + // CacheControl: Cache-Control directive for the object data. + CacheControl string `json:"cacheControl,omitempty"` + + // ComponentCount: Number of underlying components that make up this + // object. Components are accumulated by compose operations. + ComponentCount int64 `json:"componentCount,omitempty"` + + // ContentDisposition: Content-Disposition of the object data. + ContentDisposition string `json:"contentDisposition,omitempty"` + + // ContentEncoding: Content-Encoding of the object data. + ContentEncoding string `json:"contentEncoding,omitempty"` + + // ContentLanguage: Content-Language of the object data. + ContentLanguage string `json:"contentLanguage,omitempty"` + + // ContentType: Content-Type of the object data. + ContentType string `json:"contentType,omitempty"` + + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; + // encoded using base64 in big-endian byte order. For more information + // about using the CRC32c checksum, see Hashes and ETags: Best + // Practices. + Crc32c string `json:"crc32c,omitempty"` + + // CustomerEncryption: Metadata of customer-supplied encryption key, if + // the object is encrypted by such a key. + CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the object. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of this object. Used for object + // versioning. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the object. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For objects, this is always + // storage#object. + Kind string `json:"kind,omitempty"` + + // Md5Hash: MD5 hash of the data; encoded using base64. For more + // information about using the MD5 hash, see Hashes and ETags: Best + // Practices. + Md5Hash string `json:"md5Hash,omitempty"` + + // MediaLink: Media download link. + MediaLink string `json:"mediaLink,omitempty"` + + // Metadata: User-provided metadata, in key/value pairs. + Metadata map[string]string `json:"metadata,omitempty"` + + // Metageneration: The version of the metadata for this object at this + // generation. Used for preconditions and for detecting changes in + // metadata. A metageneration number is only meaningful in the context + // of a particular generation of a particular object. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of this object. Required if not specified by URL + // parameter. + Name string `json:"name,omitempty"` + + // Owner: The owner of the object. This will always be the uploader of + // the object. + Owner *ObjectOwner `json:"owner,omitempty"` + + // SelfLink: The link to this object. + SelfLink string `json:"selfLink,omitempty"` + + // Size: Content-Length of the data in bytes. + Size uint64 `json:"size,omitempty,string"` + + // StorageClass: Storage class of the object. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the object in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // TimeDeleted: The deletion time of the object in RFC 3339 format. Will + // be returned if and only if this version of the object has been + // deleted. + TimeDeleted string `json:"timeDeleted,omitempty"` + + // Updated: The modification time of the object metadata in RFC 3339 + // format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Object) MarshalJSON() ([]byte, error) { + type noMethod Object + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectCustomerEncryption: Metadata of customer-supplied encryption +// key, if the object is encrypted by such a key. +type ObjectCustomerEncryption struct { + // EncryptionAlgorithm: The encryption algorithm. + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + + // KeySha256: SHA256 hash value of the encryption key. + KeySha256 string `json:"keySha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { + type noMethod ObjectCustomerEncryption + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectOwner: The owner of the object. This will always be the +// uploader of the object. +type ObjectOwner struct { + // Entity: The entity, in the form user-userId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectOwner) MarshalJSON() ([]byte, error) { + type noMethod ObjectOwner + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectAccessControl: An access-control entry. +type ObjectAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of the object. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For object access control entries, + // this is always storage#objectAccessControl. + Kind string `json:"kind,omitempty"` + + // Object: The name of the object. + Object string `json:"object,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. Can be READER or OWNER. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControl + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectAccessControlProjectTeam: The project team associated with the +// entity, if any. +type ObjectAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. Can be owners, editors, or viewers. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControlProjectTeam + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectAccessControls: An access-control list. +type ObjectAccessControls struct { + // Items: The list of items. + Items []interface{} `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of object access control + // entries, this is always storage#objectAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControls + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Objects: A list of objects. +type Objects struct { + // Items: The list of items. + Items []*Object `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of objects, this is always + // storage#objects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Prefixes: The list of prefixes of objects matching-but-not-listed up + // to and including the requested delimiter. + Prefixes []string `json:"prefixes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Objects) MarshalJSON() ([]byte, error) { + type noMethod Objects + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RewriteResponse: A rewrite response. +type RewriteResponse struct { + // Done: true if the copy is finished; otherwise, false if the copy is + // in progress. This property is always present in the response. + Done bool `json:"done,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // ObjectSize: The total size of the object being copied in bytes. This + // property is always present in the response. + ObjectSize uint64 `json:"objectSize,omitempty,string"` + + // Resource: A resource containing the metadata for the copied-to + // object. This property is present in the response only when copying + // completes. + Resource *Object `json:"resource,omitempty"` + + // RewriteToken: A token to use in subsequent requests to continue + // copying data. This token is present in the response only when there + // is more data to copy. + RewriteToken string `json:"rewriteToken,omitempty"` + + // TotalBytesRewritten: The total bytes written so far, which can be + // used to provide a waiting user with a progress indicator. This + // property is always present in the response. + TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RewriteResponse) MarshalJSON() ([]byte, error) { + type noMethod RewriteResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "storage.bucketAccessControls.delete": + +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.get": + +type BucketAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.get" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.insert": + +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.insert" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.bucketAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.list": + +type BucketAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves ACL entries on the specified bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.list" call. +// Exactly one of *BucketAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "response": { + // "$ref": "BucketAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.patch": + +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an ACL entry on the specified bucket. This method +// supports patch semantics. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.patch" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.update": + +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.update" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.delete": + +type BucketsDeleteCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes an empty bucket. +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.get": + +type BucketsGetCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns metadata for the specified bucket. +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.get" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns metadata for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.get", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.insert": + +type BucketsInsertCall struct { + s *Service + bucket *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new bucket. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.insert" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.list": + +type BucketsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of buckets for a given project. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.list" call. +// Exactly one of *Buckets or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Buckets{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of buckets for a given project.", + // "httpMethod": "GET", + // "id": "storage.buckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "maxResults": { + // "description": "Maximum number of buckets to return.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "response": { + // "$ref": "Buckets" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.patch": + +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a bucket. This method supports patch semantics. +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.patch" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.update": + +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a bucket. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.update" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.delete": + +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.get": + +type DefaultObjectAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.insert": + +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new default object ACL entry on the specified +// bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.list": + +type DefaultObjectAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves default object ACL entries on the specified bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves default object ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.patch": + +type DefaultObjectAccessControlsPatchCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a default object ACL entry on the specified bucket. +// This method supports patch semantics. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.update": + +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.delete": + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.get": + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the ACL entry for the specified entity on the specified +// object. +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new ACL entry on the specified object. +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves ACL entries on the specified object. +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an ACL entry on the specified object. This method +// supports patch semantics. +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.compose" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's generation does not match the given +// value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.copy" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves an object or its metadata. +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's generation +// matches the given value. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's generation does not match the given value. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + media_ io.Reader + resumableBuffer_ *gensupport.ResumableBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context +} + +// Insert: Stores a new object and metadata. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts. +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Media specifies the media to upload in a single chunk. At most one of +// Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. ResumableMedia is deprecated in favour of Media. +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.progressUpdater_ = pu + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.media_ != nil || c.resumableBuffer_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + protocol := "multipart" + if c.resumableBuffer_ != nil { + protocol = "resumable" + } + c.urlParams_.Set("uploadType", protocol) + } + urls += "?" + c.urlParams_.Encode() + if c.media_ != nil { + var combined io.ReadCloser + combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + defer combined.Close() + body = combined + } + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + if c.resumableBuffer_ != nil { + req.Header.Set("X-Upload-Content-Type", c.mediaType_) + } + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.insert" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + if c.resumableBuffer_ != nil { + loc := res.Header.Get("Location") + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumableBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "supportsMediaUpload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of objects matching the criteria. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return. As duplicate prefixes are omitted, +// fewer total results may be returned than requested. The default value +// of this parameter is 1,000 items. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.list" call. +// Exactly one of *Objects or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an object's metadata. This method supports patch +// semantics. +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.patch" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's generation does not match the given +// value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If +// specified the value must be an integral multiple of 1 MiB (1048576). +// Also, this only applies to requests where the source and destination +// span locations and/or storage classes. Finally, this value must not +// change across rewrite calls else you'll get an error that the +// rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.rewrite" call. +// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RewriteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "RewriteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.update": + +type ObjectsUpdateCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an object's metadata. +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.update" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata.", + // "httpMethod": "PUT", + // "id": "storage.objects.update", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.watchAll": + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// WatchAll: Watch for changes on all objects in a bucket. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return. As duplicate prefixes are omitted, +// fewer total results may be returned than requested. The default value +// of this parameter is 1,000 items. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.watchAll" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} diff --git a/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/google.golang.org/cloud/compute/metadata/metadata.go new file mode 100644 index 000000000..3dd684e08 --- /dev/null +++ b/vendor/google.golang.org/cloud/compute/metadata/metadata.go @@ -0,0 +1,327 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "google.golang.org/cloud/internal" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 750 * time.Millisecond, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 750 * time.Millisecond, + }, + }, +} + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag. This func is otherwise equivalent to Get. +func getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = "169.254.169.254" + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := metaClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + + // We use the DNS name of the metadata service here instead of the IP address + // because we expect that to fail faster in the not-on-GCE case. + res, err := metaClient.Get("http://metadata.google.internal") + if err != nil { + return false + } + onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" + return onGCE.v +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + suffix += "?wait_for_change=true&last_etag=" + for { + val, etag, err := getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} diff --git a/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/google.golang.org/cloud/internal/cloud.go new file mode 100644 index 000000000..59428803d --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/cloud.go @@ -0,0 +1,128 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" + "sync" + + "golang.org/x/net/context" +) + +type contextKey struct{} + +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + if c == nil { + panic("nil *http.Client passed to WithContext") + } + if projID == "" { + panic("empty project ID passed to WithContext") + } + return context.WithValue(parent, contextKey{}, &cloudContext{ + ProjectID: projID, + HTTPClient: c, + }) +} + +const userAgent = "gcloud-golang/0.1" + +type cloudContext struct { + ProjectID string + HTTPClient *http.Client + + mu sync.Mutex // guards svc + svc map[string]interface{} // e.g. "storage" => *rawStorage.Service +} + +// Service returns the result of the fill function if it's never been +// called before for the given name (which is assumed to be an API +// service name, like "datastore"). If it has already been cached, the fill +// func is not run. +// It's safe for concurrent use by multiple goroutines. +func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { + return cc(ctx).service(name, fill) +} + +func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { + c.mu.Lock() + defer c.mu.Unlock() + + if c.svc == nil { + c.svc = make(map[string]interface{}) + } else if v, ok := c.svc[name]; ok { + return v + } + v := fill(c.HTTPClient) + c.svc[name] = v + return v +} + +// Transport is an http.RoundTripper that appends +// Google Cloud client's user-agent to the original +// request's user-agent header. +type Transport struct { + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +func ProjID(ctx context.Context) string { + return cc(ctx).ProjectID +} + +func HTTPClient(ctx context.Context) *http.Client { + return cc(ctx).HTTPClient +} + +// cc returns the internal *cloudContext (cc) state for a context.Context. +// It panics if the user did it wrong. +func cc(ctx context.Context) *cloudContext { + if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { + return c + } + panic("invalid context.Context type; it should be created with cloud.NewContext") +} diff --git a/vendor/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go b/vendor/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go new file mode 100644 index 000000000..9cb9be528 --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. +// source: datastore_v1.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + datastore_v1.proto + +It has these top-level messages: + PartitionId + Key + Value + Property + Entity + EntityResult + Query + KindExpression + PropertyReference + PropertyExpression + PropertyOrder + Filter + CompositeFilter + PropertyFilter + GqlQuery + GqlQueryArg + QueryResultBatch + Mutation + MutationResult + ReadOptions + LookupRequest + LookupResponse + RunQueryRequest + RunQueryResponse + BeginTransactionRequest + BeginTransactionResponse + RollbackRequest + RollbackResponse + CommitRequest + CommitResponse + AllocateIdsRequest + AllocateIdsResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// Specifies what data the 'entity' field contains. +// A ResultType is either implied (for example, in LookupResponse.found it +// is always FULL) or specified by context (for example, in message +// QueryResultBatch, field 'entity_result_type' specifies a ResultType +// for all the values in field 'entity_result'). +type EntityResult_ResultType int32 + +const ( + EntityResult_FULL EntityResult_ResultType = 1 + EntityResult_PROJECTION EntityResult_ResultType = 2 + // The entity may have no key. + // A property value may have meaning 18. + EntityResult_KEY_ONLY EntityResult_ResultType = 3 +) + +var EntityResult_ResultType_name = map[int32]string{ + 1: "FULL", + 2: "PROJECTION", + 3: "KEY_ONLY", +} +var EntityResult_ResultType_value = map[string]int32{ + "FULL": 1, + "PROJECTION": 2, + "KEY_ONLY": 3, +} + +func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { + p := new(EntityResult_ResultType) + *p = x + return p +} +func (x EntityResult_ResultType) String() string { + return proto.EnumName(EntityResult_ResultType_name, int32(x)) +} +func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") + if err != nil { + return err + } + *x = EntityResult_ResultType(value) + return nil +} + +type PropertyExpression_AggregationFunction int32 + +const ( + PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 +) + +var PropertyExpression_AggregationFunction_name = map[int32]string{ + 1: "FIRST", +} +var PropertyExpression_AggregationFunction_value = map[string]int32{ + "FIRST": 1, +} + +func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { + p := new(PropertyExpression_AggregationFunction) + *p = x + return p +} +func (x PropertyExpression_AggregationFunction) String() string { + return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) +} +func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") + if err != nil { + return err + } + *x = PropertyExpression_AggregationFunction(value) + return nil +} + +type PropertyOrder_Direction int32 + +const ( + PropertyOrder_ASCENDING PropertyOrder_Direction = 1 + PropertyOrder_DESCENDING PropertyOrder_Direction = 2 +) + +var PropertyOrder_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var PropertyOrder_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { + p := new(PropertyOrder_Direction) + *p = x + return p +} +func (x PropertyOrder_Direction) String() string { + return proto.EnumName(PropertyOrder_Direction_name, int32(x)) +} +func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") + if err != nil { + return err + } + *x = PropertyOrder_Direction(value) + return nil +} + +type CompositeFilter_Operator int32 + +const ( + CompositeFilter_AND CompositeFilter_Operator = 1 +) + +var CompositeFilter_Operator_name = map[int32]string{ + 1: "AND", +} +var CompositeFilter_Operator_value = map[string]int32{ + "AND": 1, +} + +func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { + p := new(CompositeFilter_Operator) + *p = x + return p +} +func (x CompositeFilter_Operator) String() string { + return proto.EnumName(CompositeFilter_Operator_name, int32(x)) +} +func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") + if err != nil { + return err + } + *x = CompositeFilter_Operator(value) + return nil +} + +type PropertyFilter_Operator int32 + +const ( + PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 + PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 + PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 + PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 + PropertyFilter_EQUAL PropertyFilter_Operator = 5 + PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 +) + +var PropertyFilter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 11: "HAS_ANCESTOR", +} +var PropertyFilter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "HAS_ANCESTOR": 11, +} + +func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { + p := new(PropertyFilter_Operator) + *p = x + return p +} +func (x PropertyFilter_Operator) String() string { + return proto.EnumName(PropertyFilter_Operator_name, int32(x)) +} +func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") + if err != nil { + return err + } + *x = PropertyFilter_Operator(value) + return nil +} + +// The possible values for the 'more_results' field. +type QueryResultBatch_MoreResultsType int32 + +const ( + QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 + QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 + // results after the limit. + QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 +) + +var QueryResultBatch_MoreResultsType_name = map[int32]string{ + 1: "NOT_FINISHED", + 2: "MORE_RESULTS_AFTER_LIMIT", + 3: "NO_MORE_RESULTS", +} +var QueryResultBatch_MoreResultsType_value = map[string]int32{ + "NOT_FINISHED": 1, + "MORE_RESULTS_AFTER_LIMIT": 2, + "NO_MORE_RESULTS": 3, +} + +func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { + p := new(QueryResultBatch_MoreResultsType) + *p = x + return p +} +func (x QueryResultBatch_MoreResultsType) String() string { + return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) +} +func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") + if err != nil { + return err + } + *x = QueryResultBatch_MoreResultsType(value) + return nil +} + +type ReadOptions_ReadConsistency int32 + +const ( + ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 + ReadOptions_STRONG ReadOptions_ReadConsistency = 1 + ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 +) + +var ReadOptions_ReadConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "STRONG", + 2: "EVENTUAL", +} +var ReadOptions_ReadConsistency_value = map[string]int32{ + "DEFAULT": 0, + "STRONG": 1, + "EVENTUAL": 2, +} + +func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { + p := new(ReadOptions_ReadConsistency) + *p = x + return p +} +func (x ReadOptions_ReadConsistency) String() string { + return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) +} +func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") + if err != nil { + return err + } + *x = ReadOptions_ReadConsistency(value) + return nil +} + +type BeginTransactionRequest_IsolationLevel int32 + +const ( + BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 + // conflict if their mutations conflict. For example: + // Read(A),Write(B) may not conflict with Read(B),Write(A), + // but Read(B),Write(B) does conflict with Read(B),Write(B). + BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 +) + +var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ + 0: "SNAPSHOT", + 1: "SERIALIZABLE", +} +var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ + "SNAPSHOT": 0, + "SERIALIZABLE": 1, +} + +func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { + p := new(BeginTransactionRequest_IsolationLevel) + *p = x + return p +} +func (x BeginTransactionRequest_IsolationLevel) String() string { + return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) +} +func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") + if err != nil { + return err + } + *x = BeginTransactionRequest_IsolationLevel(value) + return nil +} + +type CommitRequest_Mode int32 + +const ( + CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 + CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 +) + +var CommitRequest_Mode_name = map[int32]string{ + 1: "TRANSACTIONAL", + 2: "NON_TRANSACTIONAL", +} +var CommitRequest_Mode_value = map[string]int32{ + "TRANSACTIONAL": 1, + "NON_TRANSACTIONAL": 2, +} + +func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { + p := new(CommitRequest_Mode) + *p = x + return p +} +func (x CommitRequest_Mode) String() string { + return proto.EnumName(CommitRequest_Mode_name, int32(x)) +} +func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") + if err != nil { + return err + } + *x = CommitRequest_Mode(value) + return nil +} + +// An identifier for a particular subset of entities. +// +// Entities are partitioned into various subsets, each used by different +// datasets and different namespaces within a dataset and so forth. +// +// All input partition IDs are normalized before use. +// A partition ID is normalized as follows: +// If the partition ID is unset or is set to an empty partition ID, replace it +// with the context partition ID. +// Otherwise, if the partition ID has no dataset ID, assign it the context +// partition ID's dataset ID. +// Unless otherwise documented, the context partition ID has the dataset ID set +// to the context dataset ID and no other partition dimension set. +// +// A partition ID is empty if all of its fields are unset. +// +// Partition dimension: +// A dimension may be unset. +// A dimension's value must never be "". +// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +// If the value of any dimension matches regex "__.*__", +// the partition is reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented contexts. +// +// Dataset ID: +// A dataset id's value must never be "". +// A dataset id's value must match +// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +type PartitionId struct { + // The dataset ID. + DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` + // The namespace. + Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} + +func (m *PartitionId) GetDatasetId() string { + if m != nil && m.DatasetId != nil { + return *m.DatasetId + } + return "" +} + +func (m *PartitionId) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +// A unique identifier for an entity. +// If a key's partition id or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a dataset + // (usually implicitly specified by the project) and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a root entity, the second element identifies + // a child of the root entity, the third element a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's ancestors. + // An entity path is always fully complete: ALL of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. A path can never + // be empty. + PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} + +func (m *Key) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *Key) GetPathElement() []*Key_PathElement { + if m != nil { + return m.PathElement + } + return nil +} + +// A (kind, ID/name) pair used to construct a key path. +// +// At most one of name or ID may be set. +// If either is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex "__.*__" is reserved/read-only. + // A kind must not contain more than 500 characters. + // Cannot be "". + Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` + // The ID of the entity. + // Never equal to zero. Values less than zero are discouraged and will not + // be supported in the future. + Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` + // The name of the entity. + // A name matching regex "__.*__" is reserved/read-only. + // A name must not be more than 500 characters. + // Cannot be "". + Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} + +func (m *Key_PathElement) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Key_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A message that can hold any of the supported value types and associated +// metadata. +// +// At most one of the Value fields may be set. +// If none are set the value is "null". +// +type Value struct { + // A boolean value. + BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` + // An integer value. + IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` + // A double value. + DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` + // A timestamp value. + TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` + // A key value. + KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` + // A blob key value. + BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` + // A UTF-8 encoded string value. + StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` + // A blob value. + BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` + // An entity value. + // May have no key. + // May have a key with an incomplete key path. + // May have a reserved/read-only key. + EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` + // A list value. + // Cannot contain another list value. + // Cannot also have a meaning and indexing set. + ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` + // The meaning field is reserved and should not be used. + Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` + // If the value should be indexed. + // + // The indexed property may be set for a + // null value. + // When indexed is true, stringValue + // is limited to 500 characters and the blob value is limited to 500 bytes. + // Exception: If meaning is set to 2, string_value is limited to 2038 + // characters regardless of indexed. + // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 + // will be ignored on input (and will never be set on output). + // Input values by default have indexed set to + // true; however, you can explicitly set indexed to + // true if you want. (An output value never has + // indexed explicitly set to true.) If a value is + // itself an entity, it cannot have indexed set to + // true. + // Exception: An entity value with meaning 9, 20 or 21 may be indexed. + Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} + +const Default_Value_Indexed bool = true + +func (m *Value) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *Value) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Value) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *Value) GetTimestampMicrosecondsValue() int64 { + if m != nil && m.TimestampMicrosecondsValue != nil { + return *m.TimestampMicrosecondsValue + } + return 0 +} + +func (m *Value) GetKeyValue() *Key { + if m != nil { + return m.KeyValue + } + return nil +} + +func (m *Value) GetBlobKeyValue() string { + if m != nil && m.BlobKeyValue != nil { + return *m.BlobKeyValue + } + return "" +} + +func (m *Value) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Value) GetBlobValue() []byte { + if m != nil { + return m.BlobValue + } + return nil +} + +func (m *Value) GetEntityValue() *Entity { + if m != nil { + return m.EntityValue + } + return nil +} + +func (m *Value) GetListValue() []*Value { + if m != nil { + return m.ListValue + } + return nil +} + +func (m *Value) GetMeaning() int32 { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return 0 +} + +func (m *Value) GetIndexed() bool { + if m != nil && m.Indexed != nil { + return *m.Indexed + } + return Default_Value_Indexed +} + +// An entity property. +type Property struct { + // The name of the property. + // A property name matching regex "__.*__" is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // Cannot be "". + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + // The value(s) of the property. + // Each value can have only one value property populated. For example, + // you cannot have a values list of { value: { integerValue: 22, + // stringValue: "a" } }, but you can have { value: { listValue: + // [ { integerValue: 22 }, { stringValue: "a" } ] }. + Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// An entity. +// +// An entity is limited to 1 megabyte when stored. That roughly +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +type Entity struct { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in Value.entityValue may have no key). + // An entity's kind is its key's path's last element's kind, + // or null if it has no key. + Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // The entity's properties. + // Each property's name must be unique for its entity. + Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} + +func (m *Entity) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +// The result of fetching an entity from the datastore. +type EntityResult struct { + // The resulting entity. + Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityResult) Reset() { *m = EntityResult{} } +func (m *EntityResult) String() string { return proto.CompactTextString(m) } +func (*EntityResult) ProtoMessage() {} + +func (m *EntityResult) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +// A query. +type Query struct { + // The projection to return. If not set the entire entity is returned. + Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` + // The kinds to query (if empty, returns entities from all kinds). + Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` + // The filter to apply (optional). + Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + // The order to apply to the query results (if empty, order is unspecified). + Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` + // The properties to group by (if empty, no grouping is applied to the + // result set). + GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` + // A starting point for the query results. Optional. Query cursors are + // returned in query result batches. + StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` + // An ending point for the query results. Optional. Query cursors are + // returned in query result batches. + EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` + // The number of results to skip. Applies before limit, but after all other + // constraints (optional, defaults to 0). + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + // The maximum number of results to return. Applies after all other + // constraints. Optional. + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 + +func (m *Query) GetProjection() []*PropertyExpression { + if m != nil { + return m.Projection + } + return nil +} + +func (m *Query) GetKind() []*KindExpression { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Query) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetOrder() []*PropertyOrder { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetGroupBy() []*PropertyReference { + if m != nil { + return m.GroupBy + } + return nil +} + +func (m *Query) GetStartCursor() []byte { + if m != nil { + return m.StartCursor + } + return nil +} + +func (m *Query) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +// A representation of a kind. +type KindExpression struct { + // The name of the kind. + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KindExpression) Reset() { *m = KindExpression{} } +func (m *KindExpression) String() string { return proto.CompactTextString(m) } +func (*KindExpression) ProtoMessage() {} + +func (m *KindExpression) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A reference to a property relative to the kind expressions. +// exactly. +type PropertyReference struct { + // The name of the property. + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyReference) Reset() { *m = PropertyReference{} } +func (m *PropertyReference) String() string { return proto.CompactTextString(m) } +func (*PropertyReference) ProtoMessage() {} + +func (m *PropertyReference) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A representation of a property in a projection. +type PropertyExpression struct { + // The property to project. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The aggregation function to apply to the property. Optional. + // Can only be used when grouping by at least one property. Must + // then be set on all properties in the projection that are not + // being grouped by. + AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=datastore.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } +func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } +func (*PropertyExpression) ProtoMessage() {} + +func (m *PropertyExpression) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { + if m != nil && m.AggregationFunction != nil { + return *m.AggregationFunction + } + return PropertyExpression_FIRST +} + +// The desired order for a specific property. +type PropertyOrder struct { + // The property to order by. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The direction to order by. + Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=datastore.PropertyOrder_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } +func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } +func (*PropertyOrder) ProtoMessage() {} + +const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING + +func (m *PropertyOrder) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_PropertyOrder_Direction +} + +// A holder for any type of filter. Exactly one field should be specified. +type Filter struct { + // A composite filter. + CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` + // A filter on a property. + PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} + +func (m *Filter) GetCompositeFilter() *CompositeFilter { + if m != nil { + return m.CompositeFilter + } + return nil +} + +func (m *Filter) GetPropertyFilter() *PropertyFilter { + if m != nil { + return m.PropertyFilter + } + return nil +} + +// A filter that merges the multiple other filters using the given operation. +type CompositeFilter struct { + // The operator for combining multiple filters. + Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=datastore.CompositeFilter_Operator" json:"operator,omitempty"` + // The list of filters to combine. + // Must contain at least one filter. + Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } +func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } +func (*CompositeFilter) ProtoMessage() {} + +func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return CompositeFilter_AND +} + +func (m *CompositeFilter) GetFilter() []*Filter { + if m != nil { + return m.Filter + } + return nil +} + +// A filter on a specific property. +type PropertyFilter struct { + // The property to filter by. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The operator to filter by. + Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=datastore.PropertyFilter_Operator" json:"operator,omitempty"` + // The value to compare the property to. + Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } +func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } +func (*PropertyFilter) ProtoMessage() {} + +func (m *PropertyFilter) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return PropertyFilter_LESS_THAN +} + +func (m *PropertyFilter) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// A GQL query. +type GqlQuery struct { + QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` + // When false, the query string must not contain a literal. + AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` + // A named argument must set field GqlQueryArg.name. + // No two named arguments may have the same name. + // For each non-reserved named binding site in the query string, + // there must be a named argument with that name, + // but not necessarily the inverse. + NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` + // Numbered binding site @1 references the first numbered argument, + // effectively using 1-based indexing, rather than the usual 0. + // A numbered argument must NOT set field GqlQueryArg.name. + // For each binding site numbered i in query_string, + // there must be an ith numbered argument. + // The inverse must also be true. + NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GqlQuery) Reset() { *m = GqlQuery{} } +func (m *GqlQuery) String() string { return proto.CompactTextString(m) } +func (*GqlQuery) ProtoMessage() {} + +const Default_GqlQuery_AllowLiteral bool = false + +func (m *GqlQuery) GetQueryString() string { + if m != nil && m.QueryString != nil { + return *m.QueryString + } + return "" +} + +func (m *GqlQuery) GetAllowLiteral() bool { + if m != nil && m.AllowLiteral != nil { + return *m.AllowLiteral + } + return Default_GqlQuery_AllowLiteral +} + +func (m *GqlQuery) GetNameArg() []*GqlQueryArg { + if m != nil { + return m.NameArg + } + return nil +} + +func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { + if m != nil { + return m.NumberArg + } + return nil +} + +// A binding argument for a GQL query. +// Exactly one of fields value and cursor must be set. +type GqlQueryArg struct { + // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". + // Must not match regex "__.*__". + // Must not be "". + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } +func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } +func (*GqlQueryArg) ProtoMessage() {} + +func (m *GqlQueryArg) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *GqlQueryArg) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *GqlQueryArg) GetCursor() []byte { + if m != nil { + return m.Cursor + } + return nil +} + +// A batch of results produced by a query. +type QueryResultBatch struct { + // The result type for every entity in entityResults. + EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=datastore.EntityResult_ResultType" json:"entity_result_type,omitempty"` + // The results for this batch. + EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` + // A cursor that points to the position after the last result in the batch. + // May be absent. + EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` + // The state of the query after the current batch. + MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=datastore.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` + // The number of results skipped because of Query.offset. + SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } +func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } +func (*QueryResultBatch) ProtoMessage() {} + +func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { + if m != nil && m.EntityResultType != nil { + return *m.EntityResultType + } + return EntityResult_FULL +} + +func (m *QueryResultBatch) GetEntityResult() []*EntityResult { + if m != nil { + return m.EntityResult + } + return nil +} + +func (m *QueryResultBatch) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return QueryResultBatch_NOT_FINISHED +} + +func (m *QueryResultBatch) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +// A set of changes to apply. +// +// No entity in this message may have a reserved property name, +// not even a property in an entity in a value. +// No value in this message may have meaning 18, +// not even a value in an entity in another value. +// +// If entities with duplicate keys are present, an arbitrary choice will +// be made as to which is written. +type Mutation struct { + // Entities to upsert. + // Each upserted entity's key must have a complete path and + // must not be reserved/read-only. + Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` + // Entities to update. + // Each updated entity's key must have a complete path and + // must not be reserved/read-only. + Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` + // Entities to insert. + // Each inserted entity's key must have a complete path and + // must not be reserved/read-only. + Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` + // Insert entities with a newly allocated ID. + // Each inserted entity's key must omit the final identifier in its path and + // must not be reserved/read-only. + InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` + // Keys of entities to delete. + // Each key must have a complete key path and must not be reserved/read-only. + Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` + // Ignore a user specified read-only period. Optional. + Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} + +func (m *Mutation) GetUpsert() []*Entity { + if m != nil { + return m.Upsert + } + return nil +} + +func (m *Mutation) GetUpdate() []*Entity { + if m != nil { + return m.Update + } + return nil +} + +func (m *Mutation) GetInsert() []*Entity { + if m != nil { + return m.Insert + } + return nil +} + +func (m *Mutation) GetInsertAutoId() []*Entity { + if m != nil { + return m.InsertAutoId + } + return nil +} + +func (m *Mutation) GetDelete() []*Key { + if m != nil { + return m.Delete + } + return nil +} + +func (m *Mutation) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return false +} + +// The result of applying a mutation. +type MutationResult struct { + // Number of index writes. + IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` + // Keys for insertAutoId entities. One per entity from the + // request, in the same order. + InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutationResult) Reset() { *m = MutationResult{} } +func (m *MutationResult) String() string { return proto.CompactTextString(m) } +func (*MutationResult) ProtoMessage() {} + +func (m *MutationResult) GetIndexUpdates() int32 { + if m != nil && m.IndexUpdates != nil { + return *m.IndexUpdates + } + return 0 +} + +func (m *MutationResult) GetInsertAutoIdKey() []*Key { + if m != nil { + return m.InsertAutoIdKey + } + return nil +} + +// Options shared by read requests. +type ReadOptions struct { + // The read consistency to use. + // Cannot be set when transaction is set. + // Lookup and ancestor queries default to STRONG, global queries default to + // EVENTUAL and cannot be set to STRONG. + ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=datastore.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` + // The transaction to use. Optional. + Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReadOptions) Reset() { *m = ReadOptions{} } +func (m *ReadOptions) String() string { return proto.CompactTextString(m) } +func (*ReadOptions) ProtoMessage() {} + +const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT + +func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { + if m != nil && m.ReadConsistency != nil { + return *m.ReadConsistency + } + return Default_ReadOptions_ReadConsistency +} + +func (m *ReadOptions) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for Lookup. +type LookupRequest struct { + // Options for this lookup request. Optional. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` + // Keys of entities to look up from the datastore. + Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LookupRequest) Reset() { *m = LookupRequest{} } +func (m *LookupRequest) String() string { return proto.CompactTextString(m) } +func (*LookupRequest) ProtoMessage() {} + +func (m *LookupRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *LookupRequest) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +// The response for Lookup. +type LookupResponse struct { + // Entities found as ResultType.FULL entities. + Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + // Entities not found as ResultType.KEY_ONLY entities. + Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` + // A list of keys that were not looked up due to resource constraints. + Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LookupResponse) Reset() { *m = LookupResponse{} } +func (m *LookupResponse) String() string { return proto.CompactTextString(m) } +func (*LookupResponse) ProtoMessage() {} + +func (m *LookupResponse) GetFound() []*EntityResult { + if m != nil { + return m.Found + } + return nil +} + +func (m *LookupResponse) GetMissing() []*EntityResult { + if m != nil { + return m.Missing + } + return nil +} + +func (m *LookupResponse) GetDeferred() []*Key { + if m != nil { + return m.Deferred + } + return nil +} + +// The request for RunQuery. +type RunQueryRequest struct { + // The options for this query. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` + // Entities are partitioned into subsets, identified by a dataset (usually + // implicitly specified by the project) and namespace ID. Queries are scoped + // to a single partition. + // This partition ID is normalized with the standard default context + // partition ID, but all other partition IDs in RunQueryRequest are + // normalized with this partition ID as the context partition ID. + PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` + // The query to run. + // Either this field or field gql_query must be set, but not both. + Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` + // The GQL query to run. + // Either this field or field query must be set, but not both. + GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } +func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } +func (*RunQueryRequest) ProtoMessage() {} + +func (m *RunQueryRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *RunQueryRequest) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *RunQueryRequest) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { + if m != nil { + return m.GqlQuery + } + return nil +} + +// The response for RunQuery. +type RunQueryResponse struct { + // A batch of query results (always present). + Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } +func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } +func (*RunQueryResponse) ProtoMessage() {} + +func (m *RunQueryResponse) GetBatch() *QueryResultBatch { + if m != nil { + return m.Batch + } + return nil +} + +// The request for BeginTransaction. +type BeginTransactionRequest struct { + // The transaction isolation level. + IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=datastore.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT + +func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { + if m != nil && m.IsolationLevel != nil { + return *m.IsolationLevel + } + return Default_BeginTransactionRequest_IsolationLevel +} + +// The response for BeginTransaction. +type BeginTransactionResponse struct { + // The transaction identifier (always present). + Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } +func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionResponse) ProtoMessage() {} + +func (m *BeginTransactionResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for Rollback. +type RollbackRequest struct { + // The transaction identifier, returned by a call to + // beginTransaction. + Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} + +func (m *RollbackRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The response for Rollback. +type RollbackResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackResponse) ProtoMessage() {} + +// The request for Commit. +type CommitRequest struct { + // The transaction identifier, returned by a call to + // beginTransaction. Must be set when mode is TRANSACTIONAL. + Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` + // The mutation to perform. Optional. + Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` + // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. + Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=datastore.CommitRequest_Mode,def=1" json:"mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} + +const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL + +func (m *CommitRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *CommitRequest) GetMutation() *Mutation { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *CommitRequest) GetMode() CommitRequest_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_CommitRequest_Mode +} + +// The response for Commit. +type CommitResponse struct { + // The result of performing the mutation (if any). + MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetMutationResult() *MutationResult { + if m != nil { + return m.MutationResult + } + return nil +} + +// The request for AllocateIds. +type AllocateIdsRequest struct { + // A list of keys with incomplete key paths to allocate IDs for. + // No key may be reserved/read-only. + Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +// The response for AllocateIds. +type AllocateIdsResponse struct { + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +func init() { + proto.RegisterEnum("datastore.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) + proto.RegisterEnum("datastore.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) + proto.RegisterEnum("datastore.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) + proto.RegisterEnum("datastore.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) + proto.RegisterEnum("datastore.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) + proto.RegisterEnum("datastore.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) + proto.RegisterEnum("datastore.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) + proto.RegisterEnum("datastore.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) + proto.RegisterEnum("datastore.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) +} diff --git a/vendor/google.golang.org/cloud/internal/datastore/datastore_v1.proto b/vendor/google.golang.org/cloud/internal/datastore/datastore_v1.proto new file mode 100644 index 000000000..d752beaa5 --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/datastore/datastore_v1.proto @@ -0,0 +1,606 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// The datastore v1 service proto definitions + +syntax = "proto2"; + +package datastore; +option java_package = "com.google.api.services.datastore"; + + +// An identifier for a particular subset of entities. +// +// Entities are partitioned into various subsets, each used by different +// datasets and different namespaces within a dataset and so forth. +// +// All input partition IDs are normalized before use. +// A partition ID is normalized as follows: +// If the partition ID is unset or is set to an empty partition ID, replace it +// with the context partition ID. +// Otherwise, if the partition ID has no dataset ID, assign it the context +// partition ID's dataset ID. +// Unless otherwise documented, the context partition ID has the dataset ID set +// to the context dataset ID and no other partition dimension set. +// +// A partition ID is empty if all of its fields are unset. +// +// Partition dimension: +// A dimension may be unset. +// A dimension's value must never be "". +// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +// If the value of any dimension matches regex "__.*__", +// the partition is reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented contexts. +// +// Dataset ID: +// A dataset id's value must never be "". +// A dataset id's value must match +// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +message PartitionId { + // The dataset ID. + optional string dataset_id = 3; + // The namespace. + optional string namespace = 4; +} + +// A unique identifier for an entity. +// If a key's partition id or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +message Key { + // Entities are partitioned into subsets, currently identified by a dataset + // (usually implicitly specified by the project) and namespace ID. + // Queries are scoped to a single partition. + optional PartitionId partition_id = 1; + + // A (kind, ID/name) pair used to construct a key path. + // + // At most one of name or ID may be set. + // If either is set, the element is complete. + // If neither is set, the element is incomplete. + message PathElement { + // The kind of the entity. + // A kind matching regex "__.*__" is reserved/read-only. + // A kind must not contain more than 500 characters. + // Cannot be "". + required string kind = 1; + // The ID of the entity. + // Never equal to zero. Values less than zero are discouraged and will not + // be supported in the future. + optional int64 id = 2; + // The name of the entity. + // A name matching regex "__.*__" is reserved/read-only. + // A name must not be more than 500 characters. + // Cannot be "". + optional string name = 3; + } + + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a root entity, the second element identifies + // a child of the root entity, the third element a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's ancestors. + // An entity path is always fully complete: ALL of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. A path can never + // be empty. + repeated PathElement path_element = 2; +} + +// A message that can hold any of the supported value types and associated +// metadata. +// +// At most one of the Value fields may be set. +// If none are set the value is "null". +// +message Value { + // A boolean value. + optional bool boolean_value = 1; + // An integer value. + optional int64 integer_value = 2; + // A double value. + optional double double_value = 3; + // A timestamp value. + optional int64 timestamp_microseconds_value = 4; + // A key value. + optional Key key_value = 5; + // A blob key value. + optional string blob_key_value = 16; + // A UTF-8 encoded string value. + optional string string_value = 17; + // A blob value. + optional bytes blob_value = 18; + // An entity value. + // May have no key. + // May have a key with an incomplete key path. + // May have a reserved/read-only key. + optional Entity entity_value = 6; + // A list value. + // Cannot contain another list value. + // Cannot also have a meaning and indexing set. + repeated Value list_value = 7; + + // The meaning field is reserved and should not be used. + optional int32 meaning = 14; + + // If the value should be indexed. + // + // The indexed property may be set for a + // null value. + // When indexed is true, stringValue + // is limited to 500 characters and the blob value is limited to 500 bytes. + // Exception: If meaning is set to 2, string_value is limited to 2038 + // characters regardless of indexed. + // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 + // will be ignored on input (and will never be set on output). + // Input values by default have indexed set to + // true; however, you can explicitly set indexed to + // true if you want. (An output value never has + // indexed explicitly set to true.) If a value is + // itself an entity, it cannot have indexed set to + // true. + // Exception: An entity value with meaning 9, 20 or 21 may be indexed. + optional bool indexed = 15 [default = true]; +} + +// An entity property. +message Property { + // The name of the property. + // A property name matching regex "__.*__" is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // Cannot be "". + required string name = 1; + + // The value(s) of the property. + // Each value can have only one value property populated. For example, + // you cannot have a values list of { value: { integerValue: 22, + // stringValue: "a" } }, but you can have { value: { listValue: + // [ { integerValue: 22 }, { stringValue: "a" } ] }. + required Value value = 4; +} + +// An entity. +// +// An entity is limited to 1 megabyte when stored. That roughly +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +message Entity { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in Value.entityValue may have no key). + // An entity's kind is its key's path's last element's kind, + // or null if it has no key. + optional Key key = 1; + // The entity's properties. + // Each property's name must be unique for its entity. + repeated Property property = 2; +} + +// The result of fetching an entity from the datastore. +message EntityResult { + // Specifies what data the 'entity' field contains. + // A ResultType is either implied (for example, in LookupResponse.found it + // is always FULL) or specified by context (for example, in message + // QueryResultBatch, field 'entity_result_type' specifies a ResultType + // for all the values in field 'entity_result'). + enum ResultType { + FULL = 1; // The entire entity. + PROJECTION = 2; // A projected subset of properties. + // The entity may have no key. + // A property value may have meaning 18. + KEY_ONLY = 3; // Only the key. + } + + // The resulting entity. + required Entity entity = 1; +} + +// A query. +message Query { + // The projection to return. If not set the entire entity is returned. + repeated PropertyExpression projection = 2; + + // The kinds to query (if empty, returns entities from all kinds). + repeated KindExpression kind = 3; + + // The filter to apply (optional). + optional Filter filter = 4; + + // The order to apply to the query results (if empty, order is unspecified). + repeated PropertyOrder order = 5; + + // The properties to group by (if empty, no grouping is applied to the + // result set). + repeated PropertyReference group_by = 6; + + // A starting point for the query results. Optional. Query cursors are + // returned in query result batches. + optional bytes /* serialized QueryCursor */ start_cursor = 7; + + // An ending point for the query results. Optional. Query cursors are + // returned in query result batches. + optional bytes /* serialized QueryCursor */ end_cursor = 8; + + // The number of results to skip. Applies before limit, but after all other + // constraints (optional, defaults to 0). + optional int32 offset = 10 [default=0]; + + // The maximum number of results to return. Applies after all other + // constraints. Optional. + optional int32 limit = 11; +} + +// A representation of a kind. +message KindExpression { + // The name of the kind. + required string name = 1; +} + +// A reference to a property relative to the kind expressions. +// exactly. +message PropertyReference { + // The name of the property. + required string name = 2; +} + +// A representation of a property in a projection. +message PropertyExpression { + enum AggregationFunction { + FIRST = 1; + } + // The property to project. + required PropertyReference property = 1; + // The aggregation function to apply to the property. Optional. + // Can only be used when grouping by at least one property. Must + // then be set on all properties in the projection that are not + // being grouped by. + optional AggregationFunction aggregation_function = 2; +} + +// The desired order for a specific property. +message PropertyOrder { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + // The property to order by. + required PropertyReference property = 1; + // The direction to order by. + optional Direction direction = 2 [default=ASCENDING]; +} + +// A holder for any type of filter. Exactly one field should be specified. +message Filter { + // A composite filter. + optional CompositeFilter composite_filter = 1; + // A filter on a property. + optional PropertyFilter property_filter = 2; +} + +// A filter that merges the multiple other filters using the given operation. +message CompositeFilter { + enum Operator { + AND = 1; + } + + // The operator for combining multiple filters. + required Operator operator = 1; + // The list of filters to combine. + // Must contain at least one filter. + repeated Filter filter = 2; +} + +// A filter on a specific property. +message PropertyFilter { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + + HAS_ANCESTOR = 11; + } + + // The property to filter by. + required PropertyReference property = 1; + // The operator to filter by. + required Operator operator = 2; + // The value to compare the property to. + required Value value = 3; +} + +// A GQL query. +message GqlQuery { + required string query_string = 1; + // When false, the query string must not contain a literal. + optional bool allow_literal = 2 [default = false]; + // A named argument must set field GqlQueryArg.name. + // No two named arguments may have the same name. + // For each non-reserved named binding site in the query string, + // there must be a named argument with that name, + // but not necessarily the inverse. + repeated GqlQueryArg name_arg = 3; + // Numbered binding site @1 references the first numbered argument, + // effectively using 1-based indexing, rather than the usual 0. + // A numbered argument must NOT set field GqlQueryArg.name. + // For each binding site numbered i in query_string, + // there must be an ith numbered argument. + // The inverse must also be true. + repeated GqlQueryArg number_arg = 4; +} + +// A binding argument for a GQL query. +// Exactly one of fields value and cursor must be set. +message GqlQueryArg { + // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". + // Must not match regex "__.*__". + // Must not be "". + optional string name = 1; + optional Value value = 2; + optional bytes cursor = 3; +} + +// A batch of results produced by a query. +message QueryResultBatch { + // The possible values for the 'more_results' field. + enum MoreResultsType { + NOT_FINISHED = 1; // There are additional batches to fetch from this query. + MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more + // results after the limit. + NO_MORE_RESULTS = 3; // The query has been exhausted. + } + + // The result type for every entity in entityResults. + required EntityResult.ResultType entity_result_type = 1; + // The results for this batch. + repeated EntityResult entity_result = 2; + + // A cursor that points to the position after the last result in the batch. + // May be absent. + optional bytes /* serialized QueryCursor */ end_cursor = 4; + + // The state of the query after the current batch. + required MoreResultsType more_results = 5; + + // The number of results skipped because of Query.offset. + optional int32 skipped_results = 6; +} + +// A set of changes to apply. +// +// No entity in this message may have a reserved property name, +// not even a property in an entity in a value. +// No value in this message may have meaning 18, +// not even a value in an entity in another value. +// +// If entities with duplicate keys are present, an arbitrary choice will +// be made as to which is written. +message Mutation { + // Entities to upsert. + // Each upserted entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity upsert = 1; + // Entities to update. + // Each updated entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity update = 2; + // Entities to insert. + // Each inserted entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity insert = 3; + // Insert entities with a newly allocated ID. + // Each inserted entity's key must omit the final identifier in its path and + // must not be reserved/read-only. + repeated Entity insert_auto_id = 4; + // Keys of entities to delete. + // Each key must have a complete key path and must not be reserved/read-only. + repeated Key delete = 5; + // Ignore a user specified read-only period. Optional. + optional bool force = 6; +} + +// The result of applying a mutation. +message MutationResult { + // Number of index writes. + required int32 index_updates = 1; + // Keys for insertAutoId entities. One per entity from the + // request, in the same order. + repeated Key insert_auto_id_key = 2; +} + +// Options shared by read requests. +message ReadOptions { + enum ReadConsistency { + DEFAULT = 0; + STRONG = 1; + EVENTUAL = 2; + } + + // The read consistency to use. + // Cannot be set when transaction is set. + // Lookup and ancestor queries default to STRONG, global queries default to + // EVENTUAL and cannot be set to STRONG. + optional ReadConsistency read_consistency = 1 [default=DEFAULT]; + + // The transaction to use. Optional. + optional bytes /* serialized Transaction */ transaction = 2; +} + +// The request for Lookup. +message LookupRequest { + + // Options for this lookup request. Optional. + optional ReadOptions read_options = 1; + // Keys of entities to look up from the datastore. + repeated Key key = 3; +} + +// The response for Lookup. +message LookupResponse { + + // The order of results in these fields is undefined and has no relation to + // the order of the keys in the input. + + // Entities found as ResultType.FULL entities. + repeated EntityResult found = 1; + + // Entities not found as ResultType.KEY_ONLY entities. + repeated EntityResult missing = 2; + + // A list of keys that were not looked up due to resource constraints. + repeated Key deferred = 3; +} + + +// The request for RunQuery. +message RunQueryRequest { + + // The options for this query. + optional ReadOptions read_options = 1; + + // Entities are partitioned into subsets, identified by a dataset (usually + // implicitly specified by the project) and namespace ID. Queries are scoped + // to a single partition. + // This partition ID is normalized with the standard default context + // partition ID, but all other partition IDs in RunQueryRequest are + // normalized with this partition ID as the context partition ID. + optional PartitionId partition_id = 2; + + // The query to run. + // Either this field or field gql_query must be set, but not both. + optional Query query = 3; + // The GQL query to run. + // Either this field or field query must be set, but not both. + optional GqlQuery gql_query = 7; +} + +// The response for RunQuery. +message RunQueryResponse { + + // A batch of query results (always present). + optional QueryResultBatch batch = 1; + +} + +// The request for BeginTransaction. +message BeginTransactionRequest { + + enum IsolationLevel { + SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions + // conflict if their mutations conflict. For example: + // Read(A),Write(B) may not conflict with Read(B),Write(A), + // but Read(B),Write(B) does conflict with Read(B),Write(B). + SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent + // transactions conflict if they cannot be serialized. + // For example Read(A),Write(B) does conflict with + // Read(B),Write(A) but Read(A) may not conflict with + // Write(A). + } + + // The transaction isolation level. + optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; +} + +// The response for BeginTransaction. +message BeginTransactionResponse { + + // The transaction identifier (always present). + optional bytes /* serialized Transaction */ transaction = 1; +} + +// The request for Rollback. +message RollbackRequest { + + // The transaction identifier, returned by a call to + // beginTransaction. + required bytes /* serialized Transaction */ transaction = 1; +} + +// The response for Rollback. +message RollbackResponse { +// Empty +} + +// The request for Commit. +message CommitRequest { + + enum Mode { + TRANSACTIONAL = 1; + NON_TRANSACTIONAL = 2; + } + + // The transaction identifier, returned by a call to + // beginTransaction. Must be set when mode is TRANSACTIONAL. + optional bytes /* serialized Transaction */ transaction = 1; + // The mutation to perform. Optional. + optional Mutation mutation = 2; + // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. + optional Mode mode = 5 [default=TRANSACTIONAL]; +} + +// The response for Commit. +message CommitResponse { + + // The result of performing the mutation (if any). + optional MutationResult mutation_result = 1; +} + +// The request for AllocateIds. +message AllocateIdsRequest { + + // A list of keys with incomplete key paths to allocate IDs for. + // No key may be reserved/read-only. + repeated Key key = 1; +} + +// The response for AllocateIds. +message AllocateIdsResponse { + + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + repeated Key key = 1; +} + +// Each rpc normalizes the partition IDs of the keys in its input entities, +// and always returns entities with keys with normalized partition IDs. +// (Note that applies to all entities, including entities in values.) +service DatastoreService { + // Look up some entities by key. + rpc Lookup(LookupRequest) returns (LookupResponse) { + }; + // Query for entities. + rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { + }; + // Begin a new transaction. + rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { + }; + // Commit a transaction, optionally creating, deleting or modifying some + // entities. + rpc Commit(CommitRequest) returns (CommitResponse) { + }; + // Roll back a transaction. + rpc Rollback(RollbackRequest) returns (RollbackResponse) { + }; + // Allocate IDs for incomplete keys (useful for referencing an entity before + // it is inserted). + rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { + }; +} diff --git a/vendor/google.golang.org/cloud/internal/opts/option.go b/vendor/google.golang.org/cloud/internal/opts/option.go new file mode 100644 index 000000000..844d31044 --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/opts/option.go @@ -0,0 +1,25 @@ +// Package opts holds the DialOpts struct, configurable by +// cloud.ClientOptions to set up transports for cloud packages. +// +// This is a separate page to prevent cycles between the core +// cloud packages. +package opts + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/grpc" +) + +type DialOpt struct { + Endpoint string + Scopes []string + UserAgent string + + TokenSource oauth2.TokenSource + + HTTPClient *http.Client + GRPCClient *grpc.ClientConn + GRPCDialOpts []grpc.DialOption +} diff --git a/vendor/google.golang.org/cloud/internal/testutil/context.go b/vendor/google.golang.org/cloud/internal/testutil/context.go new file mode 100644 index 000000000..c10142757 --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/testutil/context.go @@ -0,0 +1,71 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "io/ioutil" + "log" + "os" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +// ProjID returns the project ID to use in integration tests, or the empty +// string if none is configured. +func ProjID() string { + projID := os.Getenv(envProjID) + if projID == "" { + return "" + } + return projID +} + +// TokenSource returns the OAuth2 token source to use in integration tests, +// or nil if none is configured. TokenSource will log.Fatal if the token +// source is specified but missing or invalid. +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envPrivateKey) + if key == "" { + return nil + } + jsonKey, err := ioutil.ReadFile(key) + if err != nil { + log.Fatalf("Cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + log.Fatalf("google.JWTConfigFromJSON: %v", err) + } + return conf.TokenSource(ctx) +} + +// TODO(djd): Delete this function when it's no longer used. +func Context(scopes ...string) context.Context { + ctx := oauth2.NoContext + ts := TokenSource(ctx, scopes...) + if ts == nil { + return nil + } + return cloud.NewContext(ProjID(), oauth2.NewClient(ctx, ts)) +} diff --git a/vendor/google.golang.org/cloud/internal/transport/cancelreq.go b/vendor/google.golang.org/cloud/internal/transport/cancelreq.go new file mode 100644 index 000000000..ddae71cce --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/transport/cancelreq.go @@ -0,0 +1,29 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.5 + +package transport + +import "net/http" + +// makeReqCancel returns a closure that cancels the given http.Request +// when called. +func makeReqCancel(req *http.Request) func(http.RoundTripper) { + c := make(chan struct{}) + req.Cancel = c + return func(http.RoundTripper) { + close(c) + } +} diff --git a/vendor/google.golang.org/cloud/internal/transport/cancelreq_legacy.go b/vendor/google.golang.org/cloud/internal/transport/cancelreq_legacy.go new file mode 100644 index 000000000..c11a4ddeb --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/transport/cancelreq_legacy.go @@ -0,0 +1,31 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.5 + +package transport + +import "net/http" + +// makeReqCancel returns a closure that cancels the given http.Request +// when called. +func makeReqCancel(req *http.Request) func(http.RoundTripper) { + // Go 1.4 and prior do not have a reliable way of cancelling a request. + // Transport.CancelRequest will only work if the request is already in-flight. + return func(r http.RoundTripper) { + if t, ok := r.(*http.Transport); ok { + t.CancelRequest(req) + } + } +} diff --git a/vendor/google.golang.org/cloud/internal/transport/dial.go b/vendor/google.golang.org/cloud/internal/transport/dial.go new file mode 100644 index 000000000..ae2baf9fd --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/transport/dial.go @@ -0,0 +1,135 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" + "google.golang.org/cloud/internal/opts" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" +) + +// ErrHTTP is returned when on a non-200 HTTP response. +type ErrHTTP struct { + StatusCode int + Body []byte + err error +} + +func (e *ErrHTTP) Error() string { + if e.err == nil { + return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body) + } + return e.err.Error() +} + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opt ...cloud.ClientOption) (*http.Client, string, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.GRPCClient != nil { + return nil, "", errors.New("unsupported GRPC base transport specified") + } + // TODO(djd): Wrap all http.Clients with appropriate internal version to add + // UserAgent header and prepend correct endpoint. + if o.HTTPClient != nil { + return o.HTTPClient, o.Endpoint, nil + } + if o.TokenSource == nil { + var err error + o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) + if err != nil { + return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err) + } + } + return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil +} + +// NewProtoClient returns a ProtoClient for communicating with a Google cloud service, +// configured with the given ClientOptions. +func NewProtoClient(ctx context.Context, opt ...cloud.ClientOption) (*ProtoClient, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.GRPCClient != nil { + return nil, errors.New("unsupported GRPC base transport specified") + } + var client *http.Client + switch { + case o.HTTPClient != nil: + if o.TokenSource != nil { + return nil, errors.New("at most one of WithTokenSource or WithBaseHTTP may be provided") + } + client = o.HTTPClient + case o.TokenSource != nil: + client = oauth2.NewClient(ctx, o.TokenSource) + default: + var err error + client, err = google.DefaultClient(ctx, o.Scopes...) + if err != nil { + return nil, err + } + } + + return &ProtoClient{ + client: client, + endpoint: o.Endpoint, + userAgent: o.UserAgent, + }, nil +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opt ...cloud.ClientOption) (*grpc.ClientConn, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP base transport specified") + } + if o.GRPCClient != nil { + return o.GRPCClient, nil + } + if o.TokenSource == nil { + var err error + o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) + if err != nil { + return nil, fmt.Errorf("google.DefaultTokenSource: %v", err) + } + } + grpcOpts := []grpc.DialOption{ + grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + return grpc.Dial(o.Endpoint, grpcOpts...) +} diff --git a/vendor/google.golang.org/cloud/internal/transport/proto.go b/vendor/google.golang.org/cloud/internal/transport/proto.go new file mode 100644 index 000000000..05b11cde1 --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/transport/proto.go @@ -0,0 +1,80 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" +) + +type ProtoClient struct { + client *http.Client + endpoint string + userAgent string +} + +func (c *ProtoClient) Call(ctx context.Context, method string, req, resp proto.Message) error { + payload, err := proto.Marshal(req) + if err != nil { + return err + } + + httpReq, err := http.NewRequest("POST", c.endpoint+method, bytes.NewReader(payload)) + if err != nil { + return err + } + httpReq.Header.Set("Content-Type", "application/x-protobuf") + if ua := c.userAgent; ua != "" { + httpReq.Header.Set("User-Agent", ua) + } + + errc := make(chan error, 1) + cancel := makeReqCancel(httpReq) + + go func() { + r, err := c.client.Do(httpReq) + if err != nil { + errc <- err + return + } + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err = &ErrHTTP{ + StatusCode: r.StatusCode, + Body: body, + err: err, + } + } + if err != nil { + errc <- err + return + } + errc <- proto.Unmarshal(body, resp) + }() + + select { + case <-ctx.Done(): + cancel(c.client.Transport) // Cancel the HTTP request. + return ctx.Err() + case err := <-errc: + return err + } +}